Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v3.13-rc1 4666 lines 123 kB view raw
1/* 2 * CFQ, or complete fairness queueing, disk scheduler. 3 * 4 * Based on ideas from a previously unfinished io 5 * scheduler (round robin per-process disk scheduling) and Andrea Arcangeli. 6 * 7 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> 8 */ 9#include <linux/module.h> 10#include <linux/slab.h> 11#include <linux/blkdev.h> 12#include <linux/elevator.h> 13#include <linux/jiffies.h> 14#include <linux/rbtree.h> 15#include <linux/ioprio.h> 16#include <linux/blktrace_api.h> 17#include "blk.h" 18#include "blk-cgroup.h" 19 20/* 21 * tunables 22 */ 23/* max queue in one round of service */ 24static const int cfq_quantum = 8; 25static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 }; 26/* maximum backwards seek, in KiB */ 27static const int cfq_back_max = 16 * 1024; 28/* penalty of a backwards seek */ 29static const int cfq_back_penalty = 2; 30static const int cfq_slice_sync = HZ / 10; 31static int cfq_slice_async = HZ / 25; 32static const int cfq_slice_async_rq = 2; 33static int cfq_slice_idle = HZ / 125; 34static int cfq_group_idle = HZ / 125; 35static const int cfq_target_latency = HZ * 3/10; /* 300 ms */ 36static const int cfq_hist_divisor = 4; 37 38/* 39 * offset from end of service tree 40 */ 41#define CFQ_IDLE_DELAY (HZ / 5) 42 43/* 44 * below this threshold, we consider thinktime immediate 45 */ 46#define CFQ_MIN_TT (2) 47 48#define CFQ_SLICE_SCALE (5) 49#define CFQ_HW_QUEUE_MIN (5) 50#define CFQ_SERVICE_SHIFT 12 51 52#define CFQQ_SEEK_THR (sector_t)(8 * 100) 53#define CFQQ_CLOSE_THR (sector_t)(8 * 1024) 54#define CFQQ_SECT_THR_NONROT (sector_t)(2 * 32) 55#define CFQQ_SEEKY(cfqq) (hweight32(cfqq->seek_history) > 32/8) 56 57#define RQ_CIC(rq) icq_to_cic((rq)->elv.icq) 58#define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elv.priv[0]) 59#define RQ_CFQG(rq) (struct cfq_group *) ((rq)->elv.priv[1]) 60 61static struct kmem_cache *cfq_pool; 62 63#define CFQ_PRIO_LISTS IOPRIO_BE_NR 64#define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE) 65#define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT) 66 67#define sample_valid(samples) ((samples) > 80) 68#define rb_entry_cfqg(node) rb_entry((node), struct cfq_group, rb_node) 69 70struct cfq_ttime { 71 unsigned long last_end_request; 72 73 unsigned long ttime_total; 74 unsigned long ttime_samples; 75 unsigned long ttime_mean; 76}; 77 78/* 79 * Most of our rbtree usage is for sorting with min extraction, so 80 * if we cache the leftmost node we don't have to walk down the tree 81 * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should 82 * move this into the elevator for the rq sorting as well. 83 */ 84struct cfq_rb_root { 85 struct rb_root rb; 86 struct rb_node *left; 87 unsigned count; 88 u64 min_vdisktime; 89 struct cfq_ttime ttime; 90}; 91#define CFQ_RB_ROOT (struct cfq_rb_root) { .rb = RB_ROOT, \ 92 .ttime = {.last_end_request = jiffies,},} 93 94/* 95 * Per process-grouping structure 96 */ 97struct cfq_queue { 98 /* reference count */ 99 int ref; 100 /* various state flags, see below */ 101 unsigned int flags; 102 /* parent cfq_data */ 103 struct cfq_data *cfqd; 104 /* service_tree member */ 105 struct rb_node rb_node; 106 /* service_tree key */ 107 unsigned long rb_key; 108 /* prio tree member */ 109 struct rb_node p_node; 110 /* prio tree root we belong to, if any */ 111 struct rb_root *p_root; 112 /* sorted list of pending requests */ 113 struct rb_root sort_list; 114 /* if fifo isn't expired, next request to serve */ 115 struct request *next_rq; 116 /* requests queued in sort_list */ 117 int queued[2]; 118 /* currently allocated requests */ 119 int allocated[2]; 120 /* fifo list of requests in sort_list */ 121 struct list_head fifo; 122 123 /* time when queue got scheduled in to dispatch first request. */ 124 unsigned long dispatch_start; 125 unsigned int allocated_slice; 126 unsigned int slice_dispatch; 127 /* time when first request from queue completed and slice started. */ 128 unsigned long slice_start; 129 unsigned long slice_end; 130 long slice_resid; 131 132 /* pending priority requests */ 133 int prio_pending; 134 /* number of requests that are on the dispatch list or inside driver */ 135 int dispatched; 136 137 /* io prio of this group */ 138 unsigned short ioprio, org_ioprio; 139 unsigned short ioprio_class; 140 141 pid_t pid; 142 143 u32 seek_history; 144 sector_t last_request_pos; 145 146 struct cfq_rb_root *service_tree; 147 struct cfq_queue *new_cfqq; 148 struct cfq_group *cfqg; 149 /* Number of sectors dispatched from queue in single dispatch round */ 150 unsigned long nr_sectors; 151}; 152 153/* 154 * First index in the service_trees. 155 * IDLE is handled separately, so it has negative index 156 */ 157enum wl_class_t { 158 BE_WORKLOAD = 0, 159 RT_WORKLOAD = 1, 160 IDLE_WORKLOAD = 2, 161 CFQ_PRIO_NR, 162}; 163 164/* 165 * Second index in the service_trees. 166 */ 167enum wl_type_t { 168 ASYNC_WORKLOAD = 0, 169 SYNC_NOIDLE_WORKLOAD = 1, 170 SYNC_WORKLOAD = 2 171}; 172 173struct cfqg_stats { 174#ifdef CONFIG_CFQ_GROUP_IOSCHED 175 /* total bytes transferred */ 176 struct blkg_rwstat service_bytes; 177 /* total IOs serviced, post merge */ 178 struct blkg_rwstat serviced; 179 /* number of ios merged */ 180 struct blkg_rwstat merged; 181 /* total time spent on device in ns, may not be accurate w/ queueing */ 182 struct blkg_rwstat service_time; 183 /* total time spent waiting in scheduler queue in ns */ 184 struct blkg_rwstat wait_time; 185 /* number of IOs queued up */ 186 struct blkg_rwstat queued; 187 /* total sectors transferred */ 188 struct blkg_stat sectors; 189 /* total disk time and nr sectors dispatched by this group */ 190 struct blkg_stat time; 191#ifdef CONFIG_DEBUG_BLK_CGROUP 192 /* time not charged to this cgroup */ 193 struct blkg_stat unaccounted_time; 194 /* sum of number of ios queued across all samples */ 195 struct blkg_stat avg_queue_size_sum; 196 /* count of samples taken for average */ 197 struct blkg_stat avg_queue_size_samples; 198 /* how many times this group has been removed from service tree */ 199 struct blkg_stat dequeue; 200 /* total time spent waiting for it to be assigned a timeslice. */ 201 struct blkg_stat group_wait_time; 202 /* time spent idling for this blkcg_gq */ 203 struct blkg_stat idle_time; 204 /* total time with empty current active q with other requests queued */ 205 struct blkg_stat empty_time; 206 /* fields after this shouldn't be cleared on stat reset */ 207 uint64_t start_group_wait_time; 208 uint64_t start_idle_time; 209 uint64_t start_empty_time; 210 uint16_t flags; 211#endif /* CONFIG_DEBUG_BLK_CGROUP */ 212#endif /* CONFIG_CFQ_GROUP_IOSCHED */ 213}; 214 215/* This is per cgroup per device grouping structure */ 216struct cfq_group { 217 /* must be the first member */ 218 struct blkg_policy_data pd; 219 220 /* group service_tree member */ 221 struct rb_node rb_node; 222 223 /* group service_tree key */ 224 u64 vdisktime; 225 226 /* 227 * The number of active cfqgs and sum of their weights under this 228 * cfqg. This covers this cfqg's leaf_weight and all children's 229 * weights, but does not cover weights of further descendants. 230 * 231 * If a cfqg is on the service tree, it's active. An active cfqg 232 * also activates its parent and contributes to the children_weight 233 * of the parent. 234 */ 235 int nr_active; 236 unsigned int children_weight; 237 238 /* 239 * vfraction is the fraction of vdisktime that the tasks in this 240 * cfqg are entitled to. This is determined by compounding the 241 * ratios walking up from this cfqg to the root. 242 * 243 * It is in fixed point w/ CFQ_SERVICE_SHIFT and the sum of all 244 * vfractions on a service tree is approximately 1. The sum may 245 * deviate a bit due to rounding errors and fluctuations caused by 246 * cfqgs entering and leaving the service tree. 247 */ 248 unsigned int vfraction; 249 250 /* 251 * There are two weights - (internal) weight is the weight of this 252 * cfqg against the sibling cfqgs. leaf_weight is the wight of 253 * this cfqg against the child cfqgs. For the root cfqg, both 254 * weights are kept in sync for backward compatibility. 255 */ 256 unsigned int weight; 257 unsigned int new_weight; 258 unsigned int dev_weight; 259 260 unsigned int leaf_weight; 261 unsigned int new_leaf_weight; 262 unsigned int dev_leaf_weight; 263 264 /* number of cfqq currently on this group */ 265 int nr_cfqq; 266 267 /* 268 * Per group busy queues average. Useful for workload slice calc. We 269 * create the array for each prio class but at run time it is used 270 * only for RT and BE class and slot for IDLE class remains unused. 271 * This is primarily done to avoid confusion and a gcc warning. 272 */ 273 unsigned int busy_queues_avg[CFQ_PRIO_NR]; 274 /* 275 * rr lists of queues with requests. We maintain service trees for 276 * RT and BE classes. These trees are subdivided in subclasses 277 * of SYNC, SYNC_NOIDLE and ASYNC based on workload type. For IDLE 278 * class there is no subclassification and all the cfq queues go on 279 * a single tree service_tree_idle. 280 * Counts are embedded in the cfq_rb_root 281 */ 282 struct cfq_rb_root service_trees[2][3]; 283 struct cfq_rb_root service_tree_idle; 284 285 unsigned long saved_wl_slice; 286 enum wl_type_t saved_wl_type; 287 enum wl_class_t saved_wl_class; 288 289 /* number of requests that are on the dispatch list or inside driver */ 290 int dispatched; 291 struct cfq_ttime ttime; 292 struct cfqg_stats stats; /* stats for this cfqg */ 293 struct cfqg_stats dead_stats; /* stats pushed from dead children */ 294}; 295 296struct cfq_io_cq { 297 struct io_cq icq; /* must be the first member */ 298 struct cfq_queue *cfqq[2]; 299 struct cfq_ttime ttime; 300 int ioprio; /* the current ioprio */ 301#ifdef CONFIG_CFQ_GROUP_IOSCHED 302 uint64_t blkcg_id; /* the current blkcg ID */ 303#endif 304}; 305 306/* 307 * Per block device queue structure 308 */ 309struct cfq_data { 310 struct request_queue *queue; 311 /* Root service tree for cfq_groups */ 312 struct cfq_rb_root grp_service_tree; 313 struct cfq_group *root_group; 314 315 /* 316 * The priority currently being served 317 */ 318 enum wl_class_t serving_wl_class; 319 enum wl_type_t serving_wl_type; 320 unsigned long workload_expires; 321 struct cfq_group *serving_group; 322 323 /* 324 * Each priority tree is sorted by next_request position. These 325 * trees are used when determining if two or more queues are 326 * interleaving requests (see cfq_close_cooperator). 327 */ 328 struct rb_root prio_trees[CFQ_PRIO_LISTS]; 329 330 unsigned int busy_queues; 331 unsigned int busy_sync_queues; 332 333 int rq_in_driver; 334 int rq_in_flight[2]; 335 336 /* 337 * queue-depth detection 338 */ 339 int rq_queued; 340 int hw_tag; 341 /* 342 * hw_tag can be 343 * -1 => indeterminate, (cfq will behave as if NCQ is present, to allow better detection) 344 * 1 => NCQ is present (hw_tag_est_depth is the estimated max depth) 345 * 0 => no NCQ 346 */ 347 int hw_tag_est_depth; 348 unsigned int hw_tag_samples; 349 350 /* 351 * idle window management 352 */ 353 struct timer_list idle_slice_timer; 354 struct work_struct unplug_work; 355 356 struct cfq_queue *active_queue; 357 struct cfq_io_cq *active_cic; 358 359 /* 360 * async queue for each priority case 361 */ 362 struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR]; 363 struct cfq_queue *async_idle_cfqq; 364 365 sector_t last_position; 366 367 /* 368 * tunables, see top of file 369 */ 370 unsigned int cfq_quantum; 371 unsigned int cfq_fifo_expire[2]; 372 unsigned int cfq_back_penalty; 373 unsigned int cfq_back_max; 374 unsigned int cfq_slice[2]; 375 unsigned int cfq_slice_async_rq; 376 unsigned int cfq_slice_idle; 377 unsigned int cfq_group_idle; 378 unsigned int cfq_latency; 379 unsigned int cfq_target_latency; 380 381 /* 382 * Fallback dummy cfqq for extreme OOM conditions 383 */ 384 struct cfq_queue oom_cfqq; 385 386 unsigned long last_delayed_sync; 387}; 388 389static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd); 390 391static struct cfq_rb_root *st_for(struct cfq_group *cfqg, 392 enum wl_class_t class, 393 enum wl_type_t type) 394{ 395 if (!cfqg) 396 return NULL; 397 398 if (class == IDLE_WORKLOAD) 399 return &cfqg->service_tree_idle; 400 401 return &cfqg->service_trees[class][type]; 402} 403 404enum cfqq_state_flags { 405 CFQ_CFQQ_FLAG_on_rr = 0, /* on round-robin busy list */ 406 CFQ_CFQQ_FLAG_wait_request, /* waiting for a request */ 407 CFQ_CFQQ_FLAG_must_dispatch, /* must be allowed a dispatch */ 408 CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */ 409 CFQ_CFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */ 410 CFQ_CFQQ_FLAG_idle_window, /* slice idling enabled */ 411 CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */ 412 CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */ 413 CFQ_CFQQ_FLAG_sync, /* synchronous queue */ 414 CFQ_CFQQ_FLAG_coop, /* cfqq is shared */ 415 CFQ_CFQQ_FLAG_split_coop, /* shared cfqq will be splitted */ 416 CFQ_CFQQ_FLAG_deep, /* sync cfqq experienced large depth */ 417 CFQ_CFQQ_FLAG_wait_busy, /* Waiting for next request */ 418}; 419 420#define CFQ_CFQQ_FNS(name) \ 421static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq) \ 422{ \ 423 (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name); \ 424} \ 425static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq) \ 426{ \ 427 (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \ 428} \ 429static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \ 430{ \ 431 return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \ 432} 433 434CFQ_CFQQ_FNS(on_rr); 435CFQ_CFQQ_FNS(wait_request); 436CFQ_CFQQ_FNS(must_dispatch); 437CFQ_CFQQ_FNS(must_alloc_slice); 438CFQ_CFQQ_FNS(fifo_expire); 439CFQ_CFQQ_FNS(idle_window); 440CFQ_CFQQ_FNS(prio_changed); 441CFQ_CFQQ_FNS(slice_new); 442CFQ_CFQQ_FNS(sync); 443CFQ_CFQQ_FNS(coop); 444CFQ_CFQQ_FNS(split_coop); 445CFQ_CFQQ_FNS(deep); 446CFQ_CFQQ_FNS(wait_busy); 447#undef CFQ_CFQQ_FNS 448 449static inline struct cfq_group *pd_to_cfqg(struct blkg_policy_data *pd) 450{ 451 return pd ? container_of(pd, struct cfq_group, pd) : NULL; 452} 453 454static inline struct blkcg_gq *cfqg_to_blkg(struct cfq_group *cfqg) 455{ 456 return pd_to_blkg(&cfqg->pd); 457} 458 459#if defined(CONFIG_CFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP) 460 461/* cfqg stats flags */ 462enum cfqg_stats_flags { 463 CFQG_stats_waiting = 0, 464 CFQG_stats_idling, 465 CFQG_stats_empty, 466}; 467 468#define CFQG_FLAG_FNS(name) \ 469static inline void cfqg_stats_mark_##name(struct cfqg_stats *stats) \ 470{ \ 471 stats->flags |= (1 << CFQG_stats_##name); \ 472} \ 473static inline void cfqg_stats_clear_##name(struct cfqg_stats *stats) \ 474{ \ 475 stats->flags &= ~(1 << CFQG_stats_##name); \ 476} \ 477static inline int cfqg_stats_##name(struct cfqg_stats *stats) \ 478{ \ 479 return (stats->flags & (1 << CFQG_stats_##name)) != 0; \ 480} \ 481 482CFQG_FLAG_FNS(waiting) 483CFQG_FLAG_FNS(idling) 484CFQG_FLAG_FNS(empty) 485#undef CFQG_FLAG_FNS 486 487/* This should be called with the queue_lock held. */ 488static void cfqg_stats_update_group_wait_time(struct cfqg_stats *stats) 489{ 490 unsigned long long now; 491 492 if (!cfqg_stats_waiting(stats)) 493 return; 494 495 now = sched_clock(); 496 if (time_after64(now, stats->start_group_wait_time)) 497 blkg_stat_add(&stats->group_wait_time, 498 now - stats->start_group_wait_time); 499 cfqg_stats_clear_waiting(stats); 500} 501 502/* This should be called with the queue_lock held. */ 503static void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg, 504 struct cfq_group *curr_cfqg) 505{ 506 struct cfqg_stats *stats = &cfqg->stats; 507 508 if (cfqg_stats_waiting(stats)) 509 return; 510 if (cfqg == curr_cfqg) 511 return; 512 stats->start_group_wait_time = sched_clock(); 513 cfqg_stats_mark_waiting(stats); 514} 515 516/* This should be called with the queue_lock held. */ 517static void cfqg_stats_end_empty_time(struct cfqg_stats *stats) 518{ 519 unsigned long long now; 520 521 if (!cfqg_stats_empty(stats)) 522 return; 523 524 now = sched_clock(); 525 if (time_after64(now, stats->start_empty_time)) 526 blkg_stat_add(&stats->empty_time, 527 now - stats->start_empty_time); 528 cfqg_stats_clear_empty(stats); 529} 530 531static void cfqg_stats_update_dequeue(struct cfq_group *cfqg) 532{ 533 blkg_stat_add(&cfqg->stats.dequeue, 1); 534} 535 536static void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg) 537{ 538 struct cfqg_stats *stats = &cfqg->stats; 539 540 if (blkg_rwstat_total(&stats->queued)) 541 return; 542 543 /* 544 * group is already marked empty. This can happen if cfqq got new 545 * request in parent group and moved to this group while being added 546 * to service tree. Just ignore the event and move on. 547 */ 548 if (cfqg_stats_empty(stats)) 549 return; 550 551 stats->start_empty_time = sched_clock(); 552 cfqg_stats_mark_empty(stats); 553} 554 555static void cfqg_stats_update_idle_time(struct cfq_group *cfqg) 556{ 557 struct cfqg_stats *stats = &cfqg->stats; 558 559 if (cfqg_stats_idling(stats)) { 560 unsigned long long now = sched_clock(); 561 562 if (time_after64(now, stats->start_idle_time)) 563 blkg_stat_add(&stats->idle_time, 564 now - stats->start_idle_time); 565 cfqg_stats_clear_idling(stats); 566 } 567} 568 569static void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg) 570{ 571 struct cfqg_stats *stats = &cfqg->stats; 572 573 BUG_ON(cfqg_stats_idling(stats)); 574 575 stats->start_idle_time = sched_clock(); 576 cfqg_stats_mark_idling(stats); 577} 578 579static void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg) 580{ 581 struct cfqg_stats *stats = &cfqg->stats; 582 583 blkg_stat_add(&stats->avg_queue_size_sum, 584 blkg_rwstat_total(&stats->queued)); 585 blkg_stat_add(&stats->avg_queue_size_samples, 1); 586 cfqg_stats_update_group_wait_time(stats); 587} 588 589#else /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */ 590 591static inline void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg, struct cfq_group *curr_cfqg) { } 592static inline void cfqg_stats_end_empty_time(struct cfqg_stats *stats) { } 593static inline void cfqg_stats_update_dequeue(struct cfq_group *cfqg) { } 594static inline void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg) { } 595static inline void cfqg_stats_update_idle_time(struct cfq_group *cfqg) { } 596static inline void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg) { } 597static inline void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg) { } 598 599#endif /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */ 600 601#ifdef CONFIG_CFQ_GROUP_IOSCHED 602 603static struct blkcg_policy blkcg_policy_cfq; 604 605static inline struct cfq_group *blkg_to_cfqg(struct blkcg_gq *blkg) 606{ 607 return pd_to_cfqg(blkg_to_pd(blkg, &blkcg_policy_cfq)); 608} 609 610static inline struct cfq_group *cfqg_parent(struct cfq_group *cfqg) 611{ 612 struct blkcg_gq *pblkg = cfqg_to_blkg(cfqg)->parent; 613 614 return pblkg ? blkg_to_cfqg(pblkg) : NULL; 615} 616 617static inline void cfqg_get(struct cfq_group *cfqg) 618{ 619 return blkg_get(cfqg_to_blkg(cfqg)); 620} 621 622static inline void cfqg_put(struct cfq_group *cfqg) 623{ 624 return blkg_put(cfqg_to_blkg(cfqg)); 625} 626 627#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) do { \ 628 char __pbuf[128]; \ 629 \ 630 blkg_path(cfqg_to_blkg((cfqq)->cfqg), __pbuf, sizeof(__pbuf)); \ 631 blk_add_trace_msg((cfqd)->queue, "cfq%d%c%c %s " fmt, (cfqq)->pid, \ 632 cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \ 633 cfqq_type((cfqq)) == SYNC_NOIDLE_WORKLOAD ? 'N' : ' ',\ 634 __pbuf, ##args); \ 635} while (0) 636 637#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do { \ 638 char __pbuf[128]; \ 639 \ 640 blkg_path(cfqg_to_blkg(cfqg), __pbuf, sizeof(__pbuf)); \ 641 blk_add_trace_msg((cfqd)->queue, "%s " fmt, __pbuf, ##args); \ 642} while (0) 643 644static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg, 645 struct cfq_group *curr_cfqg, int rw) 646{ 647 blkg_rwstat_add(&cfqg->stats.queued, rw, 1); 648 cfqg_stats_end_empty_time(&cfqg->stats); 649 cfqg_stats_set_start_group_wait_time(cfqg, curr_cfqg); 650} 651 652static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg, 653 unsigned long time, unsigned long unaccounted_time) 654{ 655 blkg_stat_add(&cfqg->stats.time, time); 656#ifdef CONFIG_DEBUG_BLK_CGROUP 657 blkg_stat_add(&cfqg->stats.unaccounted_time, unaccounted_time); 658#endif 659} 660 661static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw) 662{ 663 blkg_rwstat_add(&cfqg->stats.queued, rw, -1); 664} 665 666static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw) 667{ 668 blkg_rwstat_add(&cfqg->stats.merged, rw, 1); 669} 670 671static inline void cfqg_stats_update_dispatch(struct cfq_group *cfqg, 672 uint64_t bytes, int rw) 673{ 674 blkg_stat_add(&cfqg->stats.sectors, bytes >> 9); 675 blkg_rwstat_add(&cfqg->stats.serviced, rw, 1); 676 blkg_rwstat_add(&cfqg->stats.service_bytes, rw, bytes); 677} 678 679static inline void cfqg_stats_update_completion(struct cfq_group *cfqg, 680 uint64_t start_time, uint64_t io_start_time, int rw) 681{ 682 struct cfqg_stats *stats = &cfqg->stats; 683 unsigned long long now = sched_clock(); 684 685 if (time_after64(now, io_start_time)) 686 blkg_rwstat_add(&stats->service_time, rw, now - io_start_time); 687 if (time_after64(io_start_time, start_time)) 688 blkg_rwstat_add(&stats->wait_time, rw, 689 io_start_time - start_time); 690} 691 692/* @stats = 0 */ 693static void cfqg_stats_reset(struct cfqg_stats *stats) 694{ 695 /* queued stats shouldn't be cleared */ 696 blkg_rwstat_reset(&stats->service_bytes); 697 blkg_rwstat_reset(&stats->serviced); 698 blkg_rwstat_reset(&stats->merged); 699 blkg_rwstat_reset(&stats->service_time); 700 blkg_rwstat_reset(&stats->wait_time); 701 blkg_stat_reset(&stats->time); 702#ifdef CONFIG_DEBUG_BLK_CGROUP 703 blkg_stat_reset(&stats->unaccounted_time); 704 blkg_stat_reset(&stats->avg_queue_size_sum); 705 blkg_stat_reset(&stats->avg_queue_size_samples); 706 blkg_stat_reset(&stats->dequeue); 707 blkg_stat_reset(&stats->group_wait_time); 708 blkg_stat_reset(&stats->idle_time); 709 blkg_stat_reset(&stats->empty_time); 710#endif 711} 712 713/* @to += @from */ 714static void cfqg_stats_merge(struct cfqg_stats *to, struct cfqg_stats *from) 715{ 716 /* queued stats shouldn't be cleared */ 717 blkg_rwstat_merge(&to->service_bytes, &from->service_bytes); 718 blkg_rwstat_merge(&to->serviced, &from->serviced); 719 blkg_rwstat_merge(&to->merged, &from->merged); 720 blkg_rwstat_merge(&to->service_time, &from->service_time); 721 blkg_rwstat_merge(&to->wait_time, &from->wait_time); 722 blkg_stat_merge(&from->time, &from->time); 723#ifdef CONFIG_DEBUG_BLK_CGROUP 724 blkg_stat_merge(&to->unaccounted_time, &from->unaccounted_time); 725 blkg_stat_merge(&to->avg_queue_size_sum, &from->avg_queue_size_sum); 726 blkg_stat_merge(&to->avg_queue_size_samples, &from->avg_queue_size_samples); 727 blkg_stat_merge(&to->dequeue, &from->dequeue); 728 blkg_stat_merge(&to->group_wait_time, &from->group_wait_time); 729 blkg_stat_merge(&to->idle_time, &from->idle_time); 730 blkg_stat_merge(&to->empty_time, &from->empty_time); 731#endif 732} 733 734/* 735 * Transfer @cfqg's stats to its parent's dead_stats so that the ancestors' 736 * recursive stats can still account for the amount used by this cfqg after 737 * it's gone. 738 */ 739static void cfqg_stats_xfer_dead(struct cfq_group *cfqg) 740{ 741 struct cfq_group *parent = cfqg_parent(cfqg); 742 743 lockdep_assert_held(cfqg_to_blkg(cfqg)->q->queue_lock); 744 745 if (unlikely(!parent)) 746 return; 747 748 cfqg_stats_merge(&parent->dead_stats, &cfqg->stats); 749 cfqg_stats_merge(&parent->dead_stats, &cfqg->dead_stats); 750 cfqg_stats_reset(&cfqg->stats); 751 cfqg_stats_reset(&cfqg->dead_stats); 752} 753 754#else /* CONFIG_CFQ_GROUP_IOSCHED */ 755 756static inline struct cfq_group *cfqg_parent(struct cfq_group *cfqg) { return NULL; } 757static inline void cfqg_get(struct cfq_group *cfqg) { } 758static inline void cfqg_put(struct cfq_group *cfqg) { } 759 760#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \ 761 blk_add_trace_msg((cfqd)->queue, "cfq%d%c%c " fmt, (cfqq)->pid, \ 762 cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \ 763 cfqq_type((cfqq)) == SYNC_NOIDLE_WORKLOAD ? 'N' : ' ',\ 764 ##args) 765#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do {} while (0) 766 767static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg, 768 struct cfq_group *curr_cfqg, int rw) { } 769static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg, 770 unsigned long time, unsigned long unaccounted_time) { } 771static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw) { } 772static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw) { } 773static inline void cfqg_stats_update_dispatch(struct cfq_group *cfqg, 774 uint64_t bytes, int rw) { } 775static inline void cfqg_stats_update_completion(struct cfq_group *cfqg, 776 uint64_t start_time, uint64_t io_start_time, int rw) { } 777 778#endif /* CONFIG_CFQ_GROUP_IOSCHED */ 779 780#define cfq_log(cfqd, fmt, args...) \ 781 blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args) 782 783/* Traverses through cfq group service trees */ 784#define for_each_cfqg_st(cfqg, i, j, st) \ 785 for (i = 0; i <= IDLE_WORKLOAD; i++) \ 786 for (j = 0, st = i < IDLE_WORKLOAD ? &cfqg->service_trees[i][j]\ 787 : &cfqg->service_tree_idle; \ 788 (i < IDLE_WORKLOAD && j <= SYNC_WORKLOAD) || \ 789 (i == IDLE_WORKLOAD && j == 0); \ 790 j++, st = i < IDLE_WORKLOAD ? \ 791 &cfqg->service_trees[i][j]: NULL) \ 792 793static inline bool cfq_io_thinktime_big(struct cfq_data *cfqd, 794 struct cfq_ttime *ttime, bool group_idle) 795{ 796 unsigned long slice; 797 if (!sample_valid(ttime->ttime_samples)) 798 return false; 799 if (group_idle) 800 slice = cfqd->cfq_group_idle; 801 else 802 slice = cfqd->cfq_slice_idle; 803 return ttime->ttime_mean > slice; 804} 805 806static inline bool iops_mode(struct cfq_data *cfqd) 807{ 808 /* 809 * If we are not idling on queues and it is a NCQ drive, parallel 810 * execution of requests is on and measuring time is not possible 811 * in most of the cases until and unless we drive shallower queue 812 * depths and that becomes a performance bottleneck. In such cases 813 * switch to start providing fairness in terms of number of IOs. 814 */ 815 if (!cfqd->cfq_slice_idle && cfqd->hw_tag) 816 return true; 817 else 818 return false; 819} 820 821static inline enum wl_class_t cfqq_class(struct cfq_queue *cfqq) 822{ 823 if (cfq_class_idle(cfqq)) 824 return IDLE_WORKLOAD; 825 if (cfq_class_rt(cfqq)) 826 return RT_WORKLOAD; 827 return BE_WORKLOAD; 828} 829 830 831static enum wl_type_t cfqq_type(struct cfq_queue *cfqq) 832{ 833 if (!cfq_cfqq_sync(cfqq)) 834 return ASYNC_WORKLOAD; 835 if (!cfq_cfqq_idle_window(cfqq)) 836 return SYNC_NOIDLE_WORKLOAD; 837 return SYNC_WORKLOAD; 838} 839 840static inline int cfq_group_busy_queues_wl(enum wl_class_t wl_class, 841 struct cfq_data *cfqd, 842 struct cfq_group *cfqg) 843{ 844 if (wl_class == IDLE_WORKLOAD) 845 return cfqg->service_tree_idle.count; 846 847 return cfqg->service_trees[wl_class][ASYNC_WORKLOAD].count + 848 cfqg->service_trees[wl_class][SYNC_NOIDLE_WORKLOAD].count + 849 cfqg->service_trees[wl_class][SYNC_WORKLOAD].count; 850} 851 852static inline int cfqg_busy_async_queues(struct cfq_data *cfqd, 853 struct cfq_group *cfqg) 854{ 855 return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count + 856 cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count; 857} 858 859static void cfq_dispatch_insert(struct request_queue *, struct request *); 860static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, bool is_sync, 861 struct cfq_io_cq *cic, struct bio *bio, 862 gfp_t gfp_mask); 863 864static inline struct cfq_io_cq *icq_to_cic(struct io_cq *icq) 865{ 866 /* cic->icq is the first member, %NULL will convert to %NULL */ 867 return container_of(icq, struct cfq_io_cq, icq); 868} 869 870static inline struct cfq_io_cq *cfq_cic_lookup(struct cfq_data *cfqd, 871 struct io_context *ioc) 872{ 873 if (ioc) 874 return icq_to_cic(ioc_lookup_icq(ioc, cfqd->queue)); 875 return NULL; 876} 877 878static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_cq *cic, bool is_sync) 879{ 880 return cic->cfqq[is_sync]; 881} 882 883static inline void cic_set_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq, 884 bool is_sync) 885{ 886 cic->cfqq[is_sync] = cfqq; 887} 888 889static inline struct cfq_data *cic_to_cfqd(struct cfq_io_cq *cic) 890{ 891 return cic->icq.q->elevator->elevator_data; 892} 893 894/* 895 * We regard a request as SYNC, if it's either a read or has the SYNC bit 896 * set (in which case it could also be direct WRITE). 897 */ 898static inline bool cfq_bio_sync(struct bio *bio) 899{ 900 return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC); 901} 902 903/* 904 * scheduler run of queue, if there are requests pending and no one in the 905 * driver that will restart queueing 906 */ 907static inline void cfq_schedule_dispatch(struct cfq_data *cfqd) 908{ 909 if (cfqd->busy_queues) { 910 cfq_log(cfqd, "schedule dispatch"); 911 kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work); 912 } 913} 914 915/* 916 * Scale schedule slice based on io priority. Use the sync time slice only 917 * if a queue is marked sync and has sync io queued. A sync queue with async 918 * io only, should not get full sync slice length. 919 */ 920static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync, 921 unsigned short prio) 922{ 923 const int base_slice = cfqd->cfq_slice[sync]; 924 925 WARN_ON(prio >= IOPRIO_BE_NR); 926 927 return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio)); 928} 929 930static inline int 931cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) 932{ 933 return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio); 934} 935 936/** 937 * cfqg_scale_charge - scale disk time charge according to cfqg weight 938 * @charge: disk time being charged 939 * @vfraction: vfraction of the cfqg, fixed point w/ CFQ_SERVICE_SHIFT 940 * 941 * Scale @charge according to @vfraction, which is in range (0, 1]. The 942 * scaling is inversely proportional. 943 * 944 * scaled = charge / vfraction 945 * 946 * The result is also in fixed point w/ CFQ_SERVICE_SHIFT. 947 */ 948static inline u64 cfqg_scale_charge(unsigned long charge, 949 unsigned int vfraction) 950{ 951 u64 c = charge << CFQ_SERVICE_SHIFT; /* make it fixed point */ 952 953 /* charge / vfraction */ 954 c <<= CFQ_SERVICE_SHIFT; 955 do_div(c, vfraction); 956 return c; 957} 958 959static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime) 960{ 961 s64 delta = (s64)(vdisktime - min_vdisktime); 962 if (delta > 0) 963 min_vdisktime = vdisktime; 964 965 return min_vdisktime; 966} 967 968static inline u64 min_vdisktime(u64 min_vdisktime, u64 vdisktime) 969{ 970 s64 delta = (s64)(vdisktime - min_vdisktime); 971 if (delta < 0) 972 min_vdisktime = vdisktime; 973 974 return min_vdisktime; 975} 976 977static void update_min_vdisktime(struct cfq_rb_root *st) 978{ 979 struct cfq_group *cfqg; 980 981 if (st->left) { 982 cfqg = rb_entry_cfqg(st->left); 983 st->min_vdisktime = max_vdisktime(st->min_vdisktime, 984 cfqg->vdisktime); 985 } 986} 987 988/* 989 * get averaged number of queues of RT/BE priority. 990 * average is updated, with a formula that gives more weight to higher numbers, 991 * to quickly follows sudden increases and decrease slowly 992 */ 993 994static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd, 995 struct cfq_group *cfqg, bool rt) 996{ 997 unsigned min_q, max_q; 998 unsigned mult = cfq_hist_divisor - 1; 999 unsigned round = cfq_hist_divisor / 2; 1000 unsigned busy = cfq_group_busy_queues_wl(rt, cfqd, cfqg); 1001 1002 min_q = min(cfqg->busy_queues_avg[rt], busy); 1003 max_q = max(cfqg->busy_queues_avg[rt], busy); 1004 cfqg->busy_queues_avg[rt] = (mult * max_q + min_q + round) / 1005 cfq_hist_divisor; 1006 return cfqg->busy_queues_avg[rt]; 1007} 1008 1009static inline unsigned 1010cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg) 1011{ 1012 return cfqd->cfq_target_latency * cfqg->vfraction >> CFQ_SERVICE_SHIFT; 1013} 1014 1015static inline unsigned 1016cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) 1017{ 1018 unsigned slice = cfq_prio_to_slice(cfqd, cfqq); 1019 if (cfqd->cfq_latency) { 1020 /* 1021 * interested queues (we consider only the ones with the same 1022 * priority class in the cfq group) 1023 */ 1024 unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg, 1025 cfq_class_rt(cfqq)); 1026 unsigned sync_slice = cfqd->cfq_slice[1]; 1027 unsigned expect_latency = sync_slice * iq; 1028 unsigned group_slice = cfq_group_slice(cfqd, cfqq->cfqg); 1029 1030 if (expect_latency > group_slice) { 1031 unsigned base_low_slice = 2 * cfqd->cfq_slice_idle; 1032 /* scale low_slice according to IO priority 1033 * and sync vs async */ 1034 unsigned low_slice = 1035 min(slice, base_low_slice * slice / sync_slice); 1036 /* the adapted slice value is scaled to fit all iqs 1037 * into the target latency */ 1038 slice = max(slice * group_slice / expect_latency, 1039 low_slice); 1040 } 1041 } 1042 return slice; 1043} 1044 1045static inline void 1046cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq) 1047{ 1048 unsigned slice = cfq_scaled_cfqq_slice(cfqd, cfqq); 1049 1050 cfqq->slice_start = jiffies; 1051 cfqq->slice_end = jiffies + slice; 1052 cfqq->allocated_slice = slice; 1053 cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies); 1054} 1055 1056/* 1057 * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end 1058 * isn't valid until the first request from the dispatch is activated 1059 * and the slice time set. 1060 */ 1061static inline bool cfq_slice_used(struct cfq_queue *cfqq) 1062{ 1063 if (cfq_cfqq_slice_new(cfqq)) 1064 return false; 1065 if (time_before(jiffies, cfqq->slice_end)) 1066 return false; 1067 1068 return true; 1069} 1070 1071/* 1072 * Lifted from AS - choose which of rq1 and rq2 that is best served now. 1073 * We choose the request that is closest to the head right now. Distance 1074 * behind the head is penalized and only allowed to a certain extent. 1075 */ 1076static struct request * 1077cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, sector_t last) 1078{ 1079 sector_t s1, s2, d1 = 0, d2 = 0; 1080 unsigned long back_max; 1081#define CFQ_RQ1_WRAP 0x01 /* request 1 wraps */ 1082#define CFQ_RQ2_WRAP 0x02 /* request 2 wraps */ 1083 unsigned wrap = 0; /* bit mask: requests behind the disk head? */ 1084 1085 if (rq1 == NULL || rq1 == rq2) 1086 return rq2; 1087 if (rq2 == NULL) 1088 return rq1; 1089 1090 if (rq_is_sync(rq1) != rq_is_sync(rq2)) 1091 return rq_is_sync(rq1) ? rq1 : rq2; 1092 1093 if ((rq1->cmd_flags ^ rq2->cmd_flags) & REQ_PRIO) 1094 return rq1->cmd_flags & REQ_PRIO ? rq1 : rq2; 1095 1096 s1 = blk_rq_pos(rq1); 1097 s2 = blk_rq_pos(rq2); 1098 1099 /* 1100 * by definition, 1KiB is 2 sectors 1101 */ 1102 back_max = cfqd->cfq_back_max * 2; 1103 1104 /* 1105 * Strict one way elevator _except_ in the case where we allow 1106 * short backward seeks which are biased as twice the cost of a 1107 * similar forward seek. 1108 */ 1109 if (s1 >= last) 1110 d1 = s1 - last; 1111 else if (s1 + back_max >= last) 1112 d1 = (last - s1) * cfqd->cfq_back_penalty; 1113 else 1114 wrap |= CFQ_RQ1_WRAP; 1115 1116 if (s2 >= last) 1117 d2 = s2 - last; 1118 else if (s2 + back_max >= last) 1119 d2 = (last - s2) * cfqd->cfq_back_penalty; 1120 else 1121 wrap |= CFQ_RQ2_WRAP; 1122 1123 /* Found required data */ 1124 1125 /* 1126 * By doing switch() on the bit mask "wrap" we avoid having to 1127 * check two variables for all permutations: --> faster! 1128 */ 1129 switch (wrap) { 1130 case 0: /* common case for CFQ: rq1 and rq2 not wrapped */ 1131 if (d1 < d2) 1132 return rq1; 1133 else if (d2 < d1) 1134 return rq2; 1135 else { 1136 if (s1 >= s2) 1137 return rq1; 1138 else 1139 return rq2; 1140 } 1141 1142 case CFQ_RQ2_WRAP: 1143 return rq1; 1144 case CFQ_RQ1_WRAP: 1145 return rq2; 1146 case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */ 1147 default: 1148 /* 1149 * Since both rqs are wrapped, 1150 * start with the one that's further behind head 1151 * (--> only *one* back seek required), 1152 * since back seek takes more time than forward. 1153 */ 1154 if (s1 <= s2) 1155 return rq1; 1156 else 1157 return rq2; 1158 } 1159} 1160 1161/* 1162 * The below is leftmost cache rbtree addon 1163 */ 1164static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root) 1165{ 1166 /* Service tree is empty */ 1167 if (!root->count) 1168 return NULL; 1169 1170 if (!root->left) 1171 root->left = rb_first(&root->rb); 1172 1173 if (root->left) 1174 return rb_entry(root->left, struct cfq_queue, rb_node); 1175 1176 return NULL; 1177} 1178 1179static struct cfq_group *cfq_rb_first_group(struct cfq_rb_root *root) 1180{ 1181 if (!root->left) 1182 root->left = rb_first(&root->rb); 1183 1184 if (root->left) 1185 return rb_entry_cfqg(root->left); 1186 1187 return NULL; 1188} 1189 1190static void rb_erase_init(struct rb_node *n, struct rb_root *root) 1191{ 1192 rb_erase(n, root); 1193 RB_CLEAR_NODE(n); 1194} 1195 1196static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root) 1197{ 1198 if (root->left == n) 1199 root->left = NULL; 1200 rb_erase_init(n, &root->rb); 1201 --root->count; 1202} 1203 1204/* 1205 * would be nice to take fifo expire time into account as well 1206 */ 1207static struct request * 1208cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq, 1209 struct request *last) 1210{ 1211 struct rb_node *rbnext = rb_next(&last->rb_node); 1212 struct rb_node *rbprev = rb_prev(&last->rb_node); 1213 struct request *next = NULL, *prev = NULL; 1214 1215 BUG_ON(RB_EMPTY_NODE(&last->rb_node)); 1216 1217 if (rbprev) 1218 prev = rb_entry_rq(rbprev); 1219 1220 if (rbnext) 1221 next = rb_entry_rq(rbnext); 1222 else { 1223 rbnext = rb_first(&cfqq->sort_list); 1224 if (rbnext && rbnext != &last->rb_node) 1225 next = rb_entry_rq(rbnext); 1226 } 1227 1228 return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last)); 1229} 1230 1231static unsigned long cfq_slice_offset(struct cfq_data *cfqd, 1232 struct cfq_queue *cfqq) 1233{ 1234 /* 1235 * just an approximation, should be ok. 1236 */ 1237 return (cfqq->cfqg->nr_cfqq - 1) * (cfq_prio_slice(cfqd, 1, 0) - 1238 cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio)); 1239} 1240 1241static inline s64 1242cfqg_key(struct cfq_rb_root *st, struct cfq_group *cfqg) 1243{ 1244 return cfqg->vdisktime - st->min_vdisktime; 1245} 1246 1247static void 1248__cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg) 1249{ 1250 struct rb_node **node = &st->rb.rb_node; 1251 struct rb_node *parent = NULL; 1252 struct cfq_group *__cfqg; 1253 s64 key = cfqg_key(st, cfqg); 1254 int left = 1; 1255 1256 while (*node != NULL) { 1257 parent = *node; 1258 __cfqg = rb_entry_cfqg(parent); 1259 1260 if (key < cfqg_key(st, __cfqg)) 1261 node = &parent->rb_left; 1262 else { 1263 node = &parent->rb_right; 1264 left = 0; 1265 } 1266 } 1267 1268 if (left) 1269 st->left = &cfqg->rb_node; 1270 1271 rb_link_node(&cfqg->rb_node, parent, node); 1272 rb_insert_color(&cfqg->rb_node, &st->rb); 1273} 1274 1275static void 1276cfq_update_group_weight(struct cfq_group *cfqg) 1277{ 1278 BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node)); 1279 1280 if (cfqg->new_weight) { 1281 cfqg->weight = cfqg->new_weight; 1282 cfqg->new_weight = 0; 1283 } 1284 1285 if (cfqg->new_leaf_weight) { 1286 cfqg->leaf_weight = cfqg->new_leaf_weight; 1287 cfqg->new_leaf_weight = 0; 1288 } 1289} 1290 1291static void 1292cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg) 1293{ 1294 unsigned int vfr = 1 << CFQ_SERVICE_SHIFT; /* start with 1 */ 1295 struct cfq_group *pos = cfqg; 1296 struct cfq_group *parent; 1297 bool propagate; 1298 1299 /* add to the service tree */ 1300 BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node)); 1301 1302 cfq_update_group_weight(cfqg); 1303 __cfq_group_service_tree_add(st, cfqg); 1304 1305 /* 1306 * Activate @cfqg and calculate the portion of vfraction @cfqg is 1307 * entitled to. vfraction is calculated by walking the tree 1308 * towards the root calculating the fraction it has at each level. 1309 * The compounded ratio is how much vfraction @cfqg owns. 1310 * 1311 * Start with the proportion tasks in this cfqg has against active 1312 * children cfqgs - its leaf_weight against children_weight. 1313 */ 1314 propagate = !pos->nr_active++; 1315 pos->children_weight += pos->leaf_weight; 1316 vfr = vfr * pos->leaf_weight / pos->children_weight; 1317 1318 /* 1319 * Compound ->weight walking up the tree. Both activation and 1320 * vfraction calculation are done in the same loop. Propagation 1321 * stops once an already activated node is met. vfraction 1322 * calculation should always continue to the root. 1323 */ 1324 while ((parent = cfqg_parent(pos))) { 1325 if (propagate) { 1326 propagate = !parent->nr_active++; 1327 parent->children_weight += pos->weight; 1328 } 1329 vfr = vfr * pos->weight / parent->children_weight; 1330 pos = parent; 1331 } 1332 1333 cfqg->vfraction = max_t(unsigned, vfr, 1); 1334} 1335 1336static void 1337cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg) 1338{ 1339 struct cfq_rb_root *st = &cfqd->grp_service_tree; 1340 struct cfq_group *__cfqg; 1341 struct rb_node *n; 1342 1343 cfqg->nr_cfqq++; 1344 if (!RB_EMPTY_NODE(&cfqg->rb_node)) 1345 return; 1346 1347 /* 1348 * Currently put the group at the end. Later implement something 1349 * so that groups get lesser vtime based on their weights, so that 1350 * if group does not loose all if it was not continuously backlogged. 1351 */ 1352 n = rb_last(&st->rb); 1353 if (n) { 1354 __cfqg = rb_entry_cfqg(n); 1355 cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY; 1356 } else 1357 cfqg->vdisktime = st->min_vdisktime; 1358 cfq_group_service_tree_add(st, cfqg); 1359} 1360 1361static void 1362cfq_group_service_tree_del(struct cfq_rb_root *st, struct cfq_group *cfqg) 1363{ 1364 struct cfq_group *pos = cfqg; 1365 bool propagate; 1366 1367 /* 1368 * Undo activation from cfq_group_service_tree_add(). Deactivate 1369 * @cfqg and propagate deactivation upwards. 1370 */ 1371 propagate = !--pos->nr_active; 1372 pos->children_weight -= pos->leaf_weight; 1373 1374 while (propagate) { 1375 struct cfq_group *parent = cfqg_parent(pos); 1376 1377 /* @pos has 0 nr_active at this point */ 1378 WARN_ON_ONCE(pos->children_weight); 1379 pos->vfraction = 0; 1380 1381 if (!parent) 1382 break; 1383 1384 propagate = !--parent->nr_active; 1385 parent->children_weight -= pos->weight; 1386 pos = parent; 1387 } 1388 1389 /* remove from the service tree */ 1390 if (!RB_EMPTY_NODE(&cfqg->rb_node)) 1391 cfq_rb_erase(&cfqg->rb_node, st); 1392} 1393 1394static void 1395cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg) 1396{ 1397 struct cfq_rb_root *st = &cfqd->grp_service_tree; 1398 1399 BUG_ON(cfqg->nr_cfqq < 1); 1400 cfqg->nr_cfqq--; 1401 1402 /* If there are other cfq queues under this group, don't delete it */ 1403 if (cfqg->nr_cfqq) 1404 return; 1405 1406 cfq_log_cfqg(cfqd, cfqg, "del_from_rr group"); 1407 cfq_group_service_tree_del(st, cfqg); 1408 cfqg->saved_wl_slice = 0; 1409 cfqg_stats_update_dequeue(cfqg); 1410} 1411 1412static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq, 1413 unsigned int *unaccounted_time) 1414{ 1415 unsigned int slice_used; 1416 1417 /* 1418 * Queue got expired before even a single request completed or 1419 * got expired immediately after first request completion. 1420 */ 1421 if (!cfqq->slice_start || cfqq->slice_start == jiffies) { 1422 /* 1423 * Also charge the seek time incurred to the group, otherwise 1424 * if there are mutiple queues in the group, each can dispatch 1425 * a single request on seeky media and cause lots of seek time 1426 * and group will never know it. 1427 */ 1428 slice_used = max_t(unsigned, (jiffies - cfqq->dispatch_start), 1429 1); 1430 } else { 1431 slice_used = jiffies - cfqq->slice_start; 1432 if (slice_used > cfqq->allocated_slice) { 1433 *unaccounted_time = slice_used - cfqq->allocated_slice; 1434 slice_used = cfqq->allocated_slice; 1435 } 1436 if (time_after(cfqq->slice_start, cfqq->dispatch_start)) 1437 *unaccounted_time += cfqq->slice_start - 1438 cfqq->dispatch_start; 1439 } 1440 1441 return slice_used; 1442} 1443 1444static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg, 1445 struct cfq_queue *cfqq) 1446{ 1447 struct cfq_rb_root *st = &cfqd->grp_service_tree; 1448 unsigned int used_sl, charge, unaccounted_sl = 0; 1449 int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg) 1450 - cfqg->service_tree_idle.count; 1451 unsigned int vfr; 1452 1453 BUG_ON(nr_sync < 0); 1454 used_sl = charge = cfq_cfqq_slice_usage(cfqq, &unaccounted_sl); 1455 1456 if (iops_mode(cfqd)) 1457 charge = cfqq->slice_dispatch; 1458 else if (!cfq_cfqq_sync(cfqq) && !nr_sync) 1459 charge = cfqq->allocated_slice; 1460 1461 /* 1462 * Can't update vdisktime while on service tree and cfqg->vfraction 1463 * is valid only while on it. Cache vfr, leave the service tree, 1464 * update vdisktime and go back on. The re-addition to the tree 1465 * will also update the weights as necessary. 1466 */ 1467 vfr = cfqg->vfraction; 1468 cfq_group_service_tree_del(st, cfqg); 1469 cfqg->vdisktime += cfqg_scale_charge(charge, vfr); 1470 cfq_group_service_tree_add(st, cfqg); 1471 1472 /* This group is being expired. Save the context */ 1473 if (time_after(cfqd->workload_expires, jiffies)) { 1474 cfqg->saved_wl_slice = cfqd->workload_expires 1475 - jiffies; 1476 cfqg->saved_wl_type = cfqd->serving_wl_type; 1477 cfqg->saved_wl_class = cfqd->serving_wl_class; 1478 } else 1479 cfqg->saved_wl_slice = 0; 1480 1481 cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime, 1482 st->min_vdisktime); 1483 cfq_log_cfqq(cfqq->cfqd, cfqq, 1484 "sl_used=%u disp=%u charge=%u iops=%u sect=%lu", 1485 used_sl, cfqq->slice_dispatch, charge, 1486 iops_mode(cfqd), cfqq->nr_sectors); 1487 cfqg_stats_update_timeslice_used(cfqg, used_sl, unaccounted_sl); 1488 cfqg_stats_set_start_empty_time(cfqg); 1489} 1490 1491/** 1492 * cfq_init_cfqg_base - initialize base part of a cfq_group 1493 * @cfqg: cfq_group to initialize 1494 * 1495 * Initialize the base part which is used whether %CONFIG_CFQ_GROUP_IOSCHED 1496 * is enabled or not. 1497 */ 1498static void cfq_init_cfqg_base(struct cfq_group *cfqg) 1499{ 1500 struct cfq_rb_root *st; 1501 int i, j; 1502 1503 for_each_cfqg_st(cfqg, i, j, st) 1504 *st = CFQ_RB_ROOT; 1505 RB_CLEAR_NODE(&cfqg->rb_node); 1506 1507 cfqg->ttime.last_end_request = jiffies; 1508} 1509 1510#ifdef CONFIG_CFQ_GROUP_IOSCHED 1511static void cfqg_stats_init(struct cfqg_stats *stats) 1512{ 1513 blkg_rwstat_init(&stats->service_bytes); 1514 blkg_rwstat_init(&stats->serviced); 1515 blkg_rwstat_init(&stats->merged); 1516 blkg_rwstat_init(&stats->service_time); 1517 blkg_rwstat_init(&stats->wait_time); 1518 blkg_rwstat_init(&stats->queued); 1519 1520 blkg_stat_init(&stats->sectors); 1521 blkg_stat_init(&stats->time); 1522 1523#ifdef CONFIG_DEBUG_BLK_CGROUP 1524 blkg_stat_init(&stats->unaccounted_time); 1525 blkg_stat_init(&stats->avg_queue_size_sum); 1526 blkg_stat_init(&stats->avg_queue_size_samples); 1527 blkg_stat_init(&stats->dequeue); 1528 blkg_stat_init(&stats->group_wait_time); 1529 blkg_stat_init(&stats->idle_time); 1530 blkg_stat_init(&stats->empty_time); 1531#endif 1532} 1533 1534static void cfq_pd_init(struct blkcg_gq *blkg) 1535{ 1536 struct cfq_group *cfqg = blkg_to_cfqg(blkg); 1537 1538 cfq_init_cfqg_base(cfqg); 1539 cfqg->weight = blkg->blkcg->cfq_weight; 1540 cfqg->leaf_weight = blkg->blkcg->cfq_leaf_weight; 1541 cfqg_stats_init(&cfqg->stats); 1542 cfqg_stats_init(&cfqg->dead_stats); 1543} 1544 1545static void cfq_pd_offline(struct blkcg_gq *blkg) 1546{ 1547 /* 1548 * @blkg is going offline and will be ignored by 1549 * blkg_[rw]stat_recursive_sum(). Transfer stats to the parent so 1550 * that they don't get lost. If IOs complete after this point, the 1551 * stats for them will be lost. Oh well... 1552 */ 1553 cfqg_stats_xfer_dead(blkg_to_cfqg(blkg)); 1554} 1555 1556/* offset delta from cfqg->stats to cfqg->dead_stats */ 1557static const int dead_stats_off_delta = offsetof(struct cfq_group, dead_stats) - 1558 offsetof(struct cfq_group, stats); 1559 1560/* to be used by recursive prfill, sums live and dead stats recursively */ 1561static u64 cfqg_stat_pd_recursive_sum(struct blkg_policy_data *pd, int off) 1562{ 1563 u64 sum = 0; 1564 1565 sum += blkg_stat_recursive_sum(pd, off); 1566 sum += blkg_stat_recursive_sum(pd, off + dead_stats_off_delta); 1567 return sum; 1568} 1569 1570/* to be used by recursive prfill, sums live and dead rwstats recursively */ 1571static struct blkg_rwstat cfqg_rwstat_pd_recursive_sum(struct blkg_policy_data *pd, 1572 int off) 1573{ 1574 struct blkg_rwstat a, b; 1575 1576 a = blkg_rwstat_recursive_sum(pd, off); 1577 b = blkg_rwstat_recursive_sum(pd, off + dead_stats_off_delta); 1578 blkg_rwstat_merge(&a, &b); 1579 return a; 1580} 1581 1582static void cfq_pd_reset_stats(struct blkcg_gq *blkg) 1583{ 1584 struct cfq_group *cfqg = blkg_to_cfqg(blkg); 1585 1586 cfqg_stats_reset(&cfqg->stats); 1587 cfqg_stats_reset(&cfqg->dead_stats); 1588} 1589 1590/* 1591 * Search for the cfq group current task belongs to. request_queue lock must 1592 * be held. 1593 */ 1594static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd, 1595 struct blkcg *blkcg) 1596{ 1597 struct request_queue *q = cfqd->queue; 1598 struct cfq_group *cfqg = NULL; 1599 1600 /* avoid lookup for the common case where there's no blkcg */ 1601 if (blkcg == &blkcg_root) { 1602 cfqg = cfqd->root_group; 1603 } else { 1604 struct blkcg_gq *blkg; 1605 1606 blkg = blkg_lookup_create(blkcg, q); 1607 if (!IS_ERR(blkg)) 1608 cfqg = blkg_to_cfqg(blkg); 1609 } 1610 1611 return cfqg; 1612} 1613 1614static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) 1615{ 1616 /* Currently, all async queues are mapped to root group */ 1617 if (!cfq_cfqq_sync(cfqq)) 1618 cfqg = cfqq->cfqd->root_group; 1619 1620 cfqq->cfqg = cfqg; 1621 /* cfqq reference on cfqg */ 1622 cfqg_get(cfqg); 1623} 1624 1625static u64 cfqg_prfill_weight_device(struct seq_file *sf, 1626 struct blkg_policy_data *pd, int off) 1627{ 1628 struct cfq_group *cfqg = pd_to_cfqg(pd); 1629 1630 if (!cfqg->dev_weight) 1631 return 0; 1632 return __blkg_prfill_u64(sf, pd, cfqg->dev_weight); 1633} 1634 1635static int cfqg_print_weight_device(struct cgroup_subsys_state *css, 1636 struct cftype *cft, struct seq_file *sf) 1637{ 1638 blkcg_print_blkgs(sf, css_to_blkcg(css), cfqg_prfill_weight_device, 1639 &blkcg_policy_cfq, 0, false); 1640 return 0; 1641} 1642 1643static u64 cfqg_prfill_leaf_weight_device(struct seq_file *sf, 1644 struct blkg_policy_data *pd, int off) 1645{ 1646 struct cfq_group *cfqg = pd_to_cfqg(pd); 1647 1648 if (!cfqg->dev_leaf_weight) 1649 return 0; 1650 return __blkg_prfill_u64(sf, pd, cfqg->dev_leaf_weight); 1651} 1652 1653static int cfqg_print_leaf_weight_device(struct cgroup_subsys_state *css, 1654 struct cftype *cft, 1655 struct seq_file *sf) 1656{ 1657 blkcg_print_blkgs(sf, css_to_blkcg(css), cfqg_prfill_leaf_weight_device, 1658 &blkcg_policy_cfq, 0, false); 1659 return 0; 1660} 1661 1662static int cfq_print_weight(struct cgroup_subsys_state *css, struct cftype *cft, 1663 struct seq_file *sf) 1664{ 1665 seq_printf(sf, "%u\n", css_to_blkcg(css)->cfq_weight); 1666 return 0; 1667} 1668 1669static int cfq_print_leaf_weight(struct cgroup_subsys_state *css, 1670 struct cftype *cft, struct seq_file *sf) 1671{ 1672 seq_printf(sf, "%u\n", css_to_blkcg(css)->cfq_leaf_weight); 1673 return 0; 1674} 1675 1676static int __cfqg_set_weight_device(struct cgroup_subsys_state *css, 1677 struct cftype *cft, const char *buf, 1678 bool is_leaf_weight) 1679{ 1680 struct blkcg *blkcg = css_to_blkcg(css); 1681 struct blkg_conf_ctx ctx; 1682 struct cfq_group *cfqg; 1683 int ret; 1684 1685 ret = blkg_conf_prep(blkcg, &blkcg_policy_cfq, buf, &ctx); 1686 if (ret) 1687 return ret; 1688 1689 ret = -EINVAL; 1690 cfqg = blkg_to_cfqg(ctx.blkg); 1691 if (!ctx.v || (ctx.v >= CFQ_WEIGHT_MIN && ctx.v <= CFQ_WEIGHT_MAX)) { 1692 if (!is_leaf_weight) { 1693 cfqg->dev_weight = ctx.v; 1694 cfqg->new_weight = ctx.v ?: blkcg->cfq_weight; 1695 } else { 1696 cfqg->dev_leaf_weight = ctx.v; 1697 cfqg->new_leaf_weight = ctx.v ?: blkcg->cfq_leaf_weight; 1698 } 1699 ret = 0; 1700 } 1701 1702 blkg_conf_finish(&ctx); 1703 return ret; 1704} 1705 1706static int cfqg_set_weight_device(struct cgroup_subsys_state *css, 1707 struct cftype *cft, const char *buf) 1708{ 1709 return __cfqg_set_weight_device(css, cft, buf, false); 1710} 1711 1712static int cfqg_set_leaf_weight_device(struct cgroup_subsys_state *css, 1713 struct cftype *cft, const char *buf) 1714{ 1715 return __cfqg_set_weight_device(css, cft, buf, true); 1716} 1717 1718static int __cfq_set_weight(struct cgroup_subsys_state *css, struct cftype *cft, 1719 u64 val, bool is_leaf_weight) 1720{ 1721 struct blkcg *blkcg = css_to_blkcg(css); 1722 struct blkcg_gq *blkg; 1723 1724 if (val < CFQ_WEIGHT_MIN || val > CFQ_WEIGHT_MAX) 1725 return -EINVAL; 1726 1727 spin_lock_irq(&blkcg->lock); 1728 1729 if (!is_leaf_weight) 1730 blkcg->cfq_weight = val; 1731 else 1732 blkcg->cfq_leaf_weight = val; 1733 1734 hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) { 1735 struct cfq_group *cfqg = blkg_to_cfqg(blkg); 1736 1737 if (!cfqg) 1738 continue; 1739 1740 if (!is_leaf_weight) { 1741 if (!cfqg->dev_weight) 1742 cfqg->new_weight = blkcg->cfq_weight; 1743 } else { 1744 if (!cfqg->dev_leaf_weight) 1745 cfqg->new_leaf_weight = blkcg->cfq_leaf_weight; 1746 } 1747 } 1748 1749 spin_unlock_irq(&blkcg->lock); 1750 return 0; 1751} 1752 1753static int cfq_set_weight(struct cgroup_subsys_state *css, struct cftype *cft, 1754 u64 val) 1755{ 1756 return __cfq_set_weight(css, cft, val, false); 1757} 1758 1759static int cfq_set_leaf_weight(struct cgroup_subsys_state *css, 1760 struct cftype *cft, u64 val) 1761{ 1762 return __cfq_set_weight(css, cft, val, true); 1763} 1764 1765static int cfqg_print_stat(struct cgroup_subsys_state *css, struct cftype *cft, 1766 struct seq_file *sf) 1767{ 1768 struct blkcg *blkcg = css_to_blkcg(css); 1769 1770 blkcg_print_blkgs(sf, blkcg, blkg_prfill_stat, &blkcg_policy_cfq, 1771 cft->private, false); 1772 return 0; 1773} 1774 1775static int cfqg_print_rwstat(struct cgroup_subsys_state *css, 1776 struct cftype *cft, struct seq_file *sf) 1777{ 1778 struct blkcg *blkcg = css_to_blkcg(css); 1779 1780 blkcg_print_blkgs(sf, blkcg, blkg_prfill_rwstat, &blkcg_policy_cfq, 1781 cft->private, true); 1782 return 0; 1783} 1784 1785static u64 cfqg_prfill_stat_recursive(struct seq_file *sf, 1786 struct blkg_policy_data *pd, int off) 1787{ 1788 u64 sum = cfqg_stat_pd_recursive_sum(pd, off); 1789 1790 return __blkg_prfill_u64(sf, pd, sum); 1791} 1792 1793static u64 cfqg_prfill_rwstat_recursive(struct seq_file *sf, 1794 struct blkg_policy_data *pd, int off) 1795{ 1796 struct blkg_rwstat sum = cfqg_rwstat_pd_recursive_sum(pd, off); 1797 1798 return __blkg_prfill_rwstat(sf, pd, &sum); 1799} 1800 1801static int cfqg_print_stat_recursive(struct cgroup_subsys_state *css, 1802 struct cftype *cft, struct seq_file *sf) 1803{ 1804 struct blkcg *blkcg = css_to_blkcg(css); 1805 1806 blkcg_print_blkgs(sf, blkcg, cfqg_prfill_stat_recursive, 1807 &blkcg_policy_cfq, cft->private, false); 1808 return 0; 1809} 1810 1811static int cfqg_print_rwstat_recursive(struct cgroup_subsys_state *css, 1812 struct cftype *cft, struct seq_file *sf) 1813{ 1814 struct blkcg *blkcg = css_to_blkcg(css); 1815 1816 blkcg_print_blkgs(sf, blkcg, cfqg_prfill_rwstat_recursive, 1817 &blkcg_policy_cfq, cft->private, true); 1818 return 0; 1819} 1820 1821#ifdef CONFIG_DEBUG_BLK_CGROUP 1822static u64 cfqg_prfill_avg_queue_size(struct seq_file *sf, 1823 struct blkg_policy_data *pd, int off) 1824{ 1825 struct cfq_group *cfqg = pd_to_cfqg(pd); 1826 u64 samples = blkg_stat_read(&cfqg->stats.avg_queue_size_samples); 1827 u64 v = 0; 1828 1829 if (samples) { 1830 v = blkg_stat_read(&cfqg->stats.avg_queue_size_sum); 1831 v = div64_u64(v, samples); 1832 } 1833 __blkg_prfill_u64(sf, pd, v); 1834 return 0; 1835} 1836 1837/* print avg_queue_size */ 1838static int cfqg_print_avg_queue_size(struct cgroup_subsys_state *css, 1839 struct cftype *cft, struct seq_file *sf) 1840{ 1841 struct blkcg *blkcg = css_to_blkcg(css); 1842 1843 blkcg_print_blkgs(sf, blkcg, cfqg_prfill_avg_queue_size, 1844 &blkcg_policy_cfq, 0, false); 1845 return 0; 1846} 1847#endif /* CONFIG_DEBUG_BLK_CGROUP */ 1848 1849static struct cftype cfq_blkcg_files[] = { 1850 /* on root, weight is mapped to leaf_weight */ 1851 { 1852 .name = "weight_device", 1853 .flags = CFTYPE_ONLY_ON_ROOT, 1854 .read_seq_string = cfqg_print_leaf_weight_device, 1855 .write_string = cfqg_set_leaf_weight_device, 1856 .max_write_len = 256, 1857 }, 1858 { 1859 .name = "weight", 1860 .flags = CFTYPE_ONLY_ON_ROOT, 1861 .read_seq_string = cfq_print_leaf_weight, 1862 .write_u64 = cfq_set_leaf_weight, 1863 }, 1864 1865 /* no such mapping necessary for !roots */ 1866 { 1867 .name = "weight_device", 1868 .flags = CFTYPE_NOT_ON_ROOT, 1869 .read_seq_string = cfqg_print_weight_device, 1870 .write_string = cfqg_set_weight_device, 1871 .max_write_len = 256, 1872 }, 1873 { 1874 .name = "weight", 1875 .flags = CFTYPE_NOT_ON_ROOT, 1876 .read_seq_string = cfq_print_weight, 1877 .write_u64 = cfq_set_weight, 1878 }, 1879 1880 { 1881 .name = "leaf_weight_device", 1882 .read_seq_string = cfqg_print_leaf_weight_device, 1883 .write_string = cfqg_set_leaf_weight_device, 1884 .max_write_len = 256, 1885 }, 1886 { 1887 .name = "leaf_weight", 1888 .read_seq_string = cfq_print_leaf_weight, 1889 .write_u64 = cfq_set_leaf_weight, 1890 }, 1891 1892 /* statistics, covers only the tasks in the cfqg */ 1893 { 1894 .name = "time", 1895 .private = offsetof(struct cfq_group, stats.time), 1896 .read_seq_string = cfqg_print_stat, 1897 }, 1898 { 1899 .name = "sectors", 1900 .private = offsetof(struct cfq_group, stats.sectors), 1901 .read_seq_string = cfqg_print_stat, 1902 }, 1903 { 1904 .name = "io_service_bytes", 1905 .private = offsetof(struct cfq_group, stats.service_bytes), 1906 .read_seq_string = cfqg_print_rwstat, 1907 }, 1908 { 1909 .name = "io_serviced", 1910 .private = offsetof(struct cfq_group, stats.serviced), 1911 .read_seq_string = cfqg_print_rwstat, 1912 }, 1913 { 1914 .name = "io_service_time", 1915 .private = offsetof(struct cfq_group, stats.service_time), 1916 .read_seq_string = cfqg_print_rwstat, 1917 }, 1918 { 1919 .name = "io_wait_time", 1920 .private = offsetof(struct cfq_group, stats.wait_time), 1921 .read_seq_string = cfqg_print_rwstat, 1922 }, 1923 { 1924 .name = "io_merged", 1925 .private = offsetof(struct cfq_group, stats.merged), 1926 .read_seq_string = cfqg_print_rwstat, 1927 }, 1928 { 1929 .name = "io_queued", 1930 .private = offsetof(struct cfq_group, stats.queued), 1931 .read_seq_string = cfqg_print_rwstat, 1932 }, 1933 1934 /* the same statictics which cover the cfqg and its descendants */ 1935 { 1936 .name = "time_recursive", 1937 .private = offsetof(struct cfq_group, stats.time), 1938 .read_seq_string = cfqg_print_stat_recursive, 1939 }, 1940 { 1941 .name = "sectors_recursive", 1942 .private = offsetof(struct cfq_group, stats.sectors), 1943 .read_seq_string = cfqg_print_stat_recursive, 1944 }, 1945 { 1946 .name = "io_service_bytes_recursive", 1947 .private = offsetof(struct cfq_group, stats.service_bytes), 1948 .read_seq_string = cfqg_print_rwstat_recursive, 1949 }, 1950 { 1951 .name = "io_serviced_recursive", 1952 .private = offsetof(struct cfq_group, stats.serviced), 1953 .read_seq_string = cfqg_print_rwstat_recursive, 1954 }, 1955 { 1956 .name = "io_service_time_recursive", 1957 .private = offsetof(struct cfq_group, stats.service_time), 1958 .read_seq_string = cfqg_print_rwstat_recursive, 1959 }, 1960 { 1961 .name = "io_wait_time_recursive", 1962 .private = offsetof(struct cfq_group, stats.wait_time), 1963 .read_seq_string = cfqg_print_rwstat_recursive, 1964 }, 1965 { 1966 .name = "io_merged_recursive", 1967 .private = offsetof(struct cfq_group, stats.merged), 1968 .read_seq_string = cfqg_print_rwstat_recursive, 1969 }, 1970 { 1971 .name = "io_queued_recursive", 1972 .private = offsetof(struct cfq_group, stats.queued), 1973 .read_seq_string = cfqg_print_rwstat_recursive, 1974 }, 1975#ifdef CONFIG_DEBUG_BLK_CGROUP 1976 { 1977 .name = "avg_queue_size", 1978 .read_seq_string = cfqg_print_avg_queue_size, 1979 }, 1980 { 1981 .name = "group_wait_time", 1982 .private = offsetof(struct cfq_group, stats.group_wait_time), 1983 .read_seq_string = cfqg_print_stat, 1984 }, 1985 { 1986 .name = "idle_time", 1987 .private = offsetof(struct cfq_group, stats.idle_time), 1988 .read_seq_string = cfqg_print_stat, 1989 }, 1990 { 1991 .name = "empty_time", 1992 .private = offsetof(struct cfq_group, stats.empty_time), 1993 .read_seq_string = cfqg_print_stat, 1994 }, 1995 { 1996 .name = "dequeue", 1997 .private = offsetof(struct cfq_group, stats.dequeue), 1998 .read_seq_string = cfqg_print_stat, 1999 }, 2000 { 2001 .name = "unaccounted_time", 2002 .private = offsetof(struct cfq_group, stats.unaccounted_time), 2003 .read_seq_string = cfqg_print_stat, 2004 }, 2005#endif /* CONFIG_DEBUG_BLK_CGROUP */ 2006 { } /* terminate */ 2007}; 2008#else /* GROUP_IOSCHED */ 2009static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd, 2010 struct blkcg *blkcg) 2011{ 2012 return cfqd->root_group; 2013} 2014 2015static inline void 2016cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) { 2017 cfqq->cfqg = cfqg; 2018} 2019 2020#endif /* GROUP_IOSCHED */ 2021 2022/* 2023 * The cfqd->service_trees holds all pending cfq_queue's that have 2024 * requests waiting to be processed. It is sorted in the order that 2025 * we will service the queues. 2026 */ 2027static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq, 2028 bool add_front) 2029{ 2030 struct rb_node **p, *parent; 2031 struct cfq_queue *__cfqq; 2032 unsigned long rb_key; 2033 struct cfq_rb_root *st; 2034 int left; 2035 int new_cfqq = 1; 2036 2037 st = st_for(cfqq->cfqg, cfqq_class(cfqq), cfqq_type(cfqq)); 2038 if (cfq_class_idle(cfqq)) { 2039 rb_key = CFQ_IDLE_DELAY; 2040 parent = rb_last(&st->rb); 2041 if (parent && parent != &cfqq->rb_node) { 2042 __cfqq = rb_entry(parent, struct cfq_queue, rb_node); 2043 rb_key += __cfqq->rb_key; 2044 } else 2045 rb_key += jiffies; 2046 } else if (!add_front) { 2047 /* 2048 * Get our rb key offset. Subtract any residual slice 2049 * value carried from last service. A negative resid 2050 * count indicates slice overrun, and this should position 2051 * the next service time further away in the tree. 2052 */ 2053 rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies; 2054 rb_key -= cfqq->slice_resid; 2055 cfqq->slice_resid = 0; 2056 } else { 2057 rb_key = -HZ; 2058 __cfqq = cfq_rb_first(st); 2059 rb_key += __cfqq ? __cfqq->rb_key : jiffies; 2060 } 2061 2062 if (!RB_EMPTY_NODE(&cfqq->rb_node)) { 2063 new_cfqq = 0; 2064 /* 2065 * same position, nothing more to do 2066 */ 2067 if (rb_key == cfqq->rb_key && cfqq->service_tree == st) 2068 return; 2069 2070 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree); 2071 cfqq->service_tree = NULL; 2072 } 2073 2074 left = 1; 2075 parent = NULL; 2076 cfqq->service_tree = st; 2077 p = &st->rb.rb_node; 2078 while (*p) { 2079 parent = *p; 2080 __cfqq = rb_entry(parent, struct cfq_queue, rb_node); 2081 2082 /* 2083 * sort by key, that represents service time. 2084 */ 2085 if (time_before(rb_key, __cfqq->rb_key)) 2086 p = &parent->rb_left; 2087 else { 2088 p = &parent->rb_right; 2089 left = 0; 2090 } 2091 } 2092 2093 if (left) 2094 st->left = &cfqq->rb_node; 2095 2096 cfqq->rb_key = rb_key; 2097 rb_link_node(&cfqq->rb_node, parent, p); 2098 rb_insert_color(&cfqq->rb_node, &st->rb); 2099 st->count++; 2100 if (add_front || !new_cfqq) 2101 return; 2102 cfq_group_notify_queue_add(cfqd, cfqq->cfqg); 2103} 2104 2105static struct cfq_queue * 2106cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root, 2107 sector_t sector, struct rb_node **ret_parent, 2108 struct rb_node ***rb_link) 2109{ 2110 struct rb_node **p, *parent; 2111 struct cfq_queue *cfqq = NULL; 2112 2113 parent = NULL; 2114 p = &root->rb_node; 2115 while (*p) { 2116 struct rb_node **n; 2117 2118 parent = *p; 2119 cfqq = rb_entry(parent, struct cfq_queue, p_node); 2120 2121 /* 2122 * Sort strictly based on sector. Smallest to the left, 2123 * largest to the right. 2124 */ 2125 if (sector > blk_rq_pos(cfqq->next_rq)) 2126 n = &(*p)->rb_right; 2127 else if (sector < blk_rq_pos(cfqq->next_rq)) 2128 n = &(*p)->rb_left; 2129 else 2130 break; 2131 p = n; 2132 cfqq = NULL; 2133 } 2134 2135 *ret_parent = parent; 2136 if (rb_link) 2137 *rb_link = p; 2138 return cfqq; 2139} 2140 2141static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq) 2142{ 2143 struct rb_node **p, *parent; 2144 struct cfq_queue *__cfqq; 2145 2146 if (cfqq->p_root) { 2147 rb_erase(&cfqq->p_node, cfqq->p_root); 2148 cfqq->p_root = NULL; 2149 } 2150 2151 if (cfq_class_idle(cfqq)) 2152 return; 2153 if (!cfqq->next_rq) 2154 return; 2155 2156 cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio]; 2157 __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root, 2158 blk_rq_pos(cfqq->next_rq), &parent, &p); 2159 if (!__cfqq) { 2160 rb_link_node(&cfqq->p_node, parent, p); 2161 rb_insert_color(&cfqq->p_node, cfqq->p_root); 2162 } else 2163 cfqq->p_root = NULL; 2164} 2165 2166/* 2167 * Update cfqq's position in the service tree. 2168 */ 2169static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq) 2170{ 2171 /* 2172 * Resorting requires the cfqq to be on the RR list already. 2173 */ 2174 if (cfq_cfqq_on_rr(cfqq)) { 2175 cfq_service_tree_add(cfqd, cfqq, 0); 2176 cfq_prio_tree_add(cfqd, cfqq); 2177 } 2178} 2179 2180/* 2181 * add to busy list of queues for service, trying to be fair in ordering 2182 * the pending list according to last request service 2183 */ 2184static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) 2185{ 2186 cfq_log_cfqq(cfqd, cfqq, "add_to_rr"); 2187 BUG_ON(cfq_cfqq_on_rr(cfqq)); 2188 cfq_mark_cfqq_on_rr(cfqq); 2189 cfqd->busy_queues++; 2190 if (cfq_cfqq_sync(cfqq)) 2191 cfqd->busy_sync_queues++; 2192 2193 cfq_resort_rr_list(cfqd, cfqq); 2194} 2195 2196/* 2197 * Called when the cfqq no longer has requests pending, remove it from 2198 * the service tree. 2199 */ 2200static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) 2201{ 2202 cfq_log_cfqq(cfqd, cfqq, "del_from_rr"); 2203 BUG_ON(!cfq_cfqq_on_rr(cfqq)); 2204 cfq_clear_cfqq_on_rr(cfqq); 2205 2206 if (!RB_EMPTY_NODE(&cfqq->rb_node)) { 2207 cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree); 2208 cfqq->service_tree = NULL; 2209 } 2210 if (cfqq->p_root) { 2211 rb_erase(&cfqq->p_node, cfqq->p_root); 2212 cfqq->p_root = NULL; 2213 } 2214 2215 cfq_group_notify_queue_del(cfqd, cfqq->cfqg); 2216 BUG_ON(!cfqd->busy_queues); 2217 cfqd->busy_queues--; 2218 if (cfq_cfqq_sync(cfqq)) 2219 cfqd->busy_sync_queues--; 2220} 2221 2222/* 2223 * rb tree support functions 2224 */ 2225static void cfq_del_rq_rb(struct request *rq) 2226{ 2227 struct cfq_queue *cfqq = RQ_CFQQ(rq); 2228 const int sync = rq_is_sync(rq); 2229 2230 BUG_ON(!cfqq->queued[sync]); 2231 cfqq->queued[sync]--; 2232 2233 elv_rb_del(&cfqq->sort_list, rq); 2234 2235 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) { 2236 /* 2237 * Queue will be deleted from service tree when we actually 2238 * expire it later. Right now just remove it from prio tree 2239 * as it is empty. 2240 */ 2241 if (cfqq->p_root) { 2242 rb_erase(&cfqq->p_node, cfqq->p_root); 2243 cfqq->p_root = NULL; 2244 } 2245 } 2246} 2247 2248static void cfq_add_rq_rb(struct request *rq) 2249{ 2250 struct cfq_queue *cfqq = RQ_CFQQ(rq); 2251 struct cfq_data *cfqd = cfqq->cfqd; 2252 struct request *prev; 2253 2254 cfqq->queued[rq_is_sync(rq)]++; 2255 2256 elv_rb_add(&cfqq->sort_list, rq); 2257 2258 if (!cfq_cfqq_on_rr(cfqq)) 2259 cfq_add_cfqq_rr(cfqd, cfqq); 2260 2261 /* 2262 * check if this request is a better next-serve candidate 2263 */ 2264 prev = cfqq->next_rq; 2265 cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq, cfqd->last_position); 2266 2267 /* 2268 * adjust priority tree position, if ->next_rq changes 2269 */ 2270 if (prev != cfqq->next_rq) 2271 cfq_prio_tree_add(cfqd, cfqq); 2272 2273 BUG_ON(!cfqq->next_rq); 2274} 2275 2276static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq) 2277{ 2278 elv_rb_del(&cfqq->sort_list, rq); 2279 cfqq->queued[rq_is_sync(rq)]--; 2280 cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags); 2281 cfq_add_rq_rb(rq); 2282 cfqg_stats_update_io_add(RQ_CFQG(rq), cfqq->cfqd->serving_group, 2283 rq->cmd_flags); 2284} 2285 2286static struct request * 2287cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio) 2288{ 2289 struct task_struct *tsk = current; 2290 struct cfq_io_cq *cic; 2291 struct cfq_queue *cfqq; 2292 2293 cic = cfq_cic_lookup(cfqd, tsk->io_context); 2294 if (!cic) 2295 return NULL; 2296 2297 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio)); 2298 if (cfqq) 2299 return elv_rb_find(&cfqq->sort_list, bio_end_sector(bio)); 2300 2301 return NULL; 2302} 2303 2304static void cfq_activate_request(struct request_queue *q, struct request *rq) 2305{ 2306 struct cfq_data *cfqd = q->elevator->elevator_data; 2307 2308 cfqd->rq_in_driver++; 2309 cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d", 2310 cfqd->rq_in_driver); 2311 2312 cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq); 2313} 2314 2315static void cfq_deactivate_request(struct request_queue *q, struct request *rq) 2316{ 2317 struct cfq_data *cfqd = q->elevator->elevator_data; 2318 2319 WARN_ON(!cfqd->rq_in_driver); 2320 cfqd->rq_in_driver--; 2321 cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d", 2322 cfqd->rq_in_driver); 2323} 2324 2325static void cfq_remove_request(struct request *rq) 2326{ 2327 struct cfq_queue *cfqq = RQ_CFQQ(rq); 2328 2329 if (cfqq->next_rq == rq) 2330 cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq); 2331 2332 list_del_init(&rq->queuelist); 2333 cfq_del_rq_rb(rq); 2334 2335 cfqq->cfqd->rq_queued--; 2336 cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags); 2337 if (rq->cmd_flags & REQ_PRIO) { 2338 WARN_ON(!cfqq->prio_pending); 2339 cfqq->prio_pending--; 2340 } 2341} 2342 2343static int cfq_merge(struct request_queue *q, struct request **req, 2344 struct bio *bio) 2345{ 2346 struct cfq_data *cfqd = q->elevator->elevator_data; 2347 struct request *__rq; 2348 2349 __rq = cfq_find_rq_fmerge(cfqd, bio); 2350 if (__rq && elv_rq_merge_ok(__rq, bio)) { 2351 *req = __rq; 2352 return ELEVATOR_FRONT_MERGE; 2353 } 2354 2355 return ELEVATOR_NO_MERGE; 2356} 2357 2358static void cfq_merged_request(struct request_queue *q, struct request *req, 2359 int type) 2360{ 2361 if (type == ELEVATOR_FRONT_MERGE) { 2362 struct cfq_queue *cfqq = RQ_CFQQ(req); 2363 2364 cfq_reposition_rq_rb(cfqq, req); 2365 } 2366} 2367 2368static void cfq_bio_merged(struct request_queue *q, struct request *req, 2369 struct bio *bio) 2370{ 2371 cfqg_stats_update_io_merged(RQ_CFQG(req), bio->bi_rw); 2372} 2373 2374static void 2375cfq_merged_requests(struct request_queue *q, struct request *rq, 2376 struct request *next) 2377{ 2378 struct cfq_queue *cfqq = RQ_CFQQ(rq); 2379 struct cfq_data *cfqd = q->elevator->elevator_data; 2380 2381 /* 2382 * reposition in fifo if next is older than rq 2383 */ 2384 if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) && 2385 time_before(rq_fifo_time(next), rq_fifo_time(rq)) && 2386 cfqq == RQ_CFQQ(next)) { 2387 list_move(&rq->queuelist, &next->queuelist); 2388 rq_set_fifo_time(rq, rq_fifo_time(next)); 2389 } 2390 2391 if (cfqq->next_rq == next) 2392 cfqq->next_rq = rq; 2393 cfq_remove_request(next); 2394 cfqg_stats_update_io_merged(RQ_CFQG(rq), next->cmd_flags); 2395 2396 cfqq = RQ_CFQQ(next); 2397 /* 2398 * all requests of this queue are merged to other queues, delete it 2399 * from the service tree. If it's the active_queue, 2400 * cfq_dispatch_requests() will choose to expire it or do idle 2401 */ 2402 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list) && 2403 cfqq != cfqd->active_queue) 2404 cfq_del_cfqq_rr(cfqd, cfqq); 2405} 2406 2407static int cfq_allow_merge(struct request_queue *q, struct request *rq, 2408 struct bio *bio) 2409{ 2410 struct cfq_data *cfqd = q->elevator->elevator_data; 2411 struct cfq_io_cq *cic; 2412 struct cfq_queue *cfqq; 2413 2414 /* 2415 * Disallow merge of a sync bio into an async request. 2416 */ 2417 if (cfq_bio_sync(bio) && !rq_is_sync(rq)) 2418 return false; 2419 2420 /* 2421 * Lookup the cfqq that this bio will be queued with and allow 2422 * merge only if rq is queued there. 2423 */ 2424 cic = cfq_cic_lookup(cfqd, current->io_context); 2425 if (!cic) 2426 return false; 2427 2428 cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio)); 2429 return cfqq == RQ_CFQQ(rq); 2430} 2431 2432static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq) 2433{ 2434 del_timer(&cfqd->idle_slice_timer); 2435 cfqg_stats_update_idle_time(cfqq->cfqg); 2436} 2437 2438static void __cfq_set_active_queue(struct cfq_data *cfqd, 2439 struct cfq_queue *cfqq) 2440{ 2441 if (cfqq) { 2442 cfq_log_cfqq(cfqd, cfqq, "set_active wl_class:%d wl_type:%d", 2443 cfqd->serving_wl_class, cfqd->serving_wl_type); 2444 cfqg_stats_update_avg_queue_size(cfqq->cfqg); 2445 cfqq->slice_start = 0; 2446 cfqq->dispatch_start = jiffies; 2447 cfqq->allocated_slice = 0; 2448 cfqq->slice_end = 0; 2449 cfqq->slice_dispatch = 0; 2450 cfqq->nr_sectors = 0; 2451 2452 cfq_clear_cfqq_wait_request(cfqq); 2453 cfq_clear_cfqq_must_dispatch(cfqq); 2454 cfq_clear_cfqq_must_alloc_slice(cfqq); 2455 cfq_clear_cfqq_fifo_expire(cfqq); 2456 cfq_mark_cfqq_slice_new(cfqq); 2457 2458 cfq_del_timer(cfqd, cfqq); 2459 } 2460 2461 cfqd->active_queue = cfqq; 2462} 2463 2464/* 2465 * current cfqq expired its slice (or was too idle), select new one 2466 */ 2467static void 2468__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq, 2469 bool timed_out) 2470{ 2471 cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out); 2472 2473 if (cfq_cfqq_wait_request(cfqq)) 2474 cfq_del_timer(cfqd, cfqq); 2475 2476 cfq_clear_cfqq_wait_request(cfqq); 2477 cfq_clear_cfqq_wait_busy(cfqq); 2478 2479 /* 2480 * If this cfqq is shared between multiple processes, check to 2481 * make sure that those processes are still issuing I/Os within 2482 * the mean seek distance. If not, it may be time to break the 2483 * queues apart again. 2484 */ 2485 if (cfq_cfqq_coop(cfqq) && CFQQ_SEEKY(cfqq)) 2486 cfq_mark_cfqq_split_coop(cfqq); 2487 2488 /* 2489 * store what was left of this slice, if the queue idled/timed out 2490 */ 2491 if (timed_out) { 2492 if (cfq_cfqq_slice_new(cfqq)) 2493 cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq); 2494 else 2495 cfqq->slice_resid = cfqq->slice_end - jiffies; 2496 cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid); 2497 } 2498 2499 cfq_group_served(cfqd, cfqq->cfqg, cfqq); 2500 2501 if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) 2502 cfq_del_cfqq_rr(cfqd, cfqq); 2503 2504 cfq_resort_rr_list(cfqd, cfqq); 2505 2506 if (cfqq == cfqd->active_queue) 2507 cfqd->active_queue = NULL; 2508 2509 if (cfqd->active_cic) { 2510 put_io_context(cfqd->active_cic->icq.ioc); 2511 cfqd->active_cic = NULL; 2512 } 2513} 2514 2515static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out) 2516{ 2517 struct cfq_queue *cfqq = cfqd->active_queue; 2518 2519 if (cfqq) 2520 __cfq_slice_expired(cfqd, cfqq, timed_out); 2521} 2522 2523/* 2524 * Get next queue for service. Unless we have a queue preemption, 2525 * we'll simply select the first cfqq in the service tree. 2526 */ 2527static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd) 2528{ 2529 struct cfq_rb_root *st = st_for(cfqd->serving_group, 2530 cfqd->serving_wl_class, cfqd->serving_wl_type); 2531 2532 if (!cfqd->rq_queued) 2533 return NULL; 2534 2535 /* There is nothing to dispatch */ 2536 if (!st) 2537 return NULL; 2538 if (RB_EMPTY_ROOT(&st->rb)) 2539 return NULL; 2540 return cfq_rb_first(st); 2541} 2542 2543static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd) 2544{ 2545 struct cfq_group *cfqg; 2546 struct cfq_queue *cfqq; 2547 int i, j; 2548 struct cfq_rb_root *st; 2549 2550 if (!cfqd->rq_queued) 2551 return NULL; 2552 2553 cfqg = cfq_get_next_cfqg(cfqd); 2554 if (!cfqg) 2555 return NULL; 2556 2557 for_each_cfqg_st(cfqg, i, j, st) 2558 if ((cfqq = cfq_rb_first(st)) != NULL) 2559 return cfqq; 2560 return NULL; 2561} 2562 2563/* 2564 * Get and set a new active queue for service. 2565 */ 2566static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd, 2567 struct cfq_queue *cfqq) 2568{ 2569 if (!cfqq) 2570 cfqq = cfq_get_next_queue(cfqd); 2571 2572 __cfq_set_active_queue(cfqd, cfqq); 2573 return cfqq; 2574} 2575 2576static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd, 2577 struct request *rq) 2578{ 2579 if (blk_rq_pos(rq) >= cfqd->last_position) 2580 return blk_rq_pos(rq) - cfqd->last_position; 2581 else 2582 return cfqd->last_position - blk_rq_pos(rq); 2583} 2584 2585static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq, 2586 struct request *rq) 2587{ 2588 return cfq_dist_from_last(cfqd, rq) <= CFQQ_CLOSE_THR; 2589} 2590 2591static struct cfq_queue *cfqq_close(struct cfq_data *cfqd, 2592 struct cfq_queue *cur_cfqq) 2593{ 2594 struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio]; 2595 struct rb_node *parent, *node; 2596 struct cfq_queue *__cfqq; 2597 sector_t sector = cfqd->last_position; 2598 2599 if (RB_EMPTY_ROOT(root)) 2600 return NULL; 2601 2602 /* 2603 * First, if we find a request starting at the end of the last 2604 * request, choose it. 2605 */ 2606 __cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL); 2607 if (__cfqq) 2608 return __cfqq; 2609 2610 /* 2611 * If the exact sector wasn't found, the parent of the NULL leaf 2612 * will contain the closest sector. 2613 */ 2614 __cfqq = rb_entry(parent, struct cfq_queue, p_node); 2615 if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq)) 2616 return __cfqq; 2617 2618 if (blk_rq_pos(__cfqq->next_rq) < sector) 2619 node = rb_next(&__cfqq->p_node); 2620 else 2621 node = rb_prev(&__cfqq->p_node); 2622 if (!node) 2623 return NULL; 2624 2625 __cfqq = rb_entry(node, struct cfq_queue, p_node); 2626 if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq)) 2627 return __cfqq; 2628 2629 return NULL; 2630} 2631 2632/* 2633 * cfqd - obvious 2634 * cur_cfqq - passed in so that we don't decide that the current queue is 2635 * closely cooperating with itself. 2636 * 2637 * So, basically we're assuming that that cur_cfqq has dispatched at least 2638 * one request, and that cfqd->last_position reflects a position on the disk 2639 * associated with the I/O issued by cur_cfqq. I'm not sure this is a valid 2640 * assumption. 2641 */ 2642static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd, 2643 struct cfq_queue *cur_cfqq) 2644{ 2645 struct cfq_queue *cfqq; 2646 2647 if (cfq_class_idle(cur_cfqq)) 2648 return NULL; 2649 if (!cfq_cfqq_sync(cur_cfqq)) 2650 return NULL; 2651 if (CFQQ_SEEKY(cur_cfqq)) 2652 return NULL; 2653 2654 /* 2655 * Don't search priority tree if it's the only queue in the group. 2656 */ 2657 if (cur_cfqq->cfqg->nr_cfqq == 1) 2658 return NULL; 2659 2660 /* 2661 * We should notice if some of the queues are cooperating, eg 2662 * working closely on the same area of the disk. In that case, 2663 * we can group them together and don't waste time idling. 2664 */ 2665 cfqq = cfqq_close(cfqd, cur_cfqq); 2666 if (!cfqq) 2667 return NULL; 2668 2669 /* If new queue belongs to different cfq_group, don't choose it */ 2670 if (cur_cfqq->cfqg != cfqq->cfqg) 2671 return NULL; 2672 2673 /* 2674 * It only makes sense to merge sync queues. 2675 */ 2676 if (!cfq_cfqq_sync(cfqq)) 2677 return NULL; 2678 if (CFQQ_SEEKY(cfqq)) 2679 return NULL; 2680 2681 /* 2682 * Do not merge queues of different priority classes 2683 */ 2684 if (cfq_class_rt(cfqq) != cfq_class_rt(cur_cfqq)) 2685 return NULL; 2686 2687 return cfqq; 2688} 2689 2690/* 2691 * Determine whether we should enforce idle window for this queue. 2692 */ 2693 2694static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq) 2695{ 2696 enum wl_class_t wl_class = cfqq_class(cfqq); 2697 struct cfq_rb_root *st = cfqq->service_tree; 2698 2699 BUG_ON(!st); 2700 BUG_ON(!st->count); 2701 2702 if (!cfqd->cfq_slice_idle) 2703 return false; 2704 2705 /* We never do for idle class queues. */ 2706 if (wl_class == IDLE_WORKLOAD) 2707 return false; 2708 2709 /* We do for queues that were marked with idle window flag. */ 2710 if (cfq_cfqq_idle_window(cfqq) && 2711 !(blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)) 2712 return true; 2713 2714 /* 2715 * Otherwise, we do only if they are the last ones 2716 * in their service tree. 2717 */ 2718 if (st->count == 1 && cfq_cfqq_sync(cfqq) && 2719 !cfq_io_thinktime_big(cfqd, &st->ttime, false)) 2720 return true; 2721 cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d", st->count); 2722 return false; 2723} 2724 2725static void cfq_arm_slice_timer(struct cfq_data *cfqd) 2726{ 2727 struct cfq_queue *cfqq = cfqd->active_queue; 2728 struct cfq_io_cq *cic; 2729 unsigned long sl, group_idle = 0; 2730 2731 /* 2732 * SSD device without seek penalty, disable idling. But only do so 2733 * for devices that support queuing, otherwise we still have a problem 2734 * with sync vs async workloads. 2735 */ 2736 if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag) 2737 return; 2738 2739 WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list)); 2740 WARN_ON(cfq_cfqq_slice_new(cfqq)); 2741 2742 /* 2743 * idle is disabled, either manually or by past process history 2744 */ 2745 if (!cfq_should_idle(cfqd, cfqq)) { 2746 /* no queue idling. Check for group idling */ 2747 if (cfqd->cfq_group_idle) 2748 group_idle = cfqd->cfq_group_idle; 2749 else 2750 return; 2751 } 2752 2753 /* 2754 * still active requests from this queue, don't idle 2755 */ 2756 if (cfqq->dispatched) 2757 return; 2758 2759 /* 2760 * task has exited, don't wait 2761 */ 2762 cic = cfqd->active_cic; 2763 if (!cic || !atomic_read(&cic->icq.ioc->active_ref)) 2764 return; 2765 2766 /* 2767 * If our average think time is larger than the remaining time 2768 * slice, then don't idle. This avoids overrunning the allotted 2769 * time slice. 2770 */ 2771 if (sample_valid(cic->ttime.ttime_samples) && 2772 (cfqq->slice_end - jiffies < cic->ttime.ttime_mean)) { 2773 cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%lu", 2774 cic->ttime.ttime_mean); 2775 return; 2776 } 2777 2778 /* There are other queues in the group, don't do group idle */ 2779 if (group_idle && cfqq->cfqg->nr_cfqq > 1) 2780 return; 2781 2782 cfq_mark_cfqq_wait_request(cfqq); 2783 2784 if (group_idle) 2785 sl = cfqd->cfq_group_idle; 2786 else 2787 sl = cfqd->cfq_slice_idle; 2788 2789 mod_timer(&cfqd->idle_slice_timer, jiffies + sl); 2790 cfqg_stats_set_start_idle_time(cfqq->cfqg); 2791 cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl, 2792 group_idle ? 1 : 0); 2793} 2794 2795/* 2796 * Move request from internal lists to the request queue dispatch list. 2797 */ 2798static void cfq_dispatch_insert(struct request_queue *q, struct request *rq) 2799{ 2800 struct cfq_data *cfqd = q->elevator->elevator_data; 2801 struct cfq_queue *cfqq = RQ_CFQQ(rq); 2802 2803 cfq_log_cfqq(cfqd, cfqq, "dispatch_insert"); 2804 2805 cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq); 2806 cfq_remove_request(rq); 2807 cfqq->dispatched++; 2808 (RQ_CFQG(rq))->dispatched++; 2809 elv_dispatch_sort(q, rq); 2810 2811 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++; 2812 cfqq->nr_sectors += blk_rq_sectors(rq); 2813 cfqg_stats_update_dispatch(cfqq->cfqg, blk_rq_bytes(rq), rq->cmd_flags); 2814} 2815 2816/* 2817 * return expired entry, or NULL to just start from scratch in rbtree 2818 */ 2819static struct request *cfq_check_fifo(struct cfq_queue *cfqq) 2820{ 2821 struct request *rq = NULL; 2822 2823 if (cfq_cfqq_fifo_expire(cfqq)) 2824 return NULL; 2825 2826 cfq_mark_cfqq_fifo_expire(cfqq); 2827 2828 if (list_empty(&cfqq->fifo)) 2829 return NULL; 2830 2831 rq = rq_entry_fifo(cfqq->fifo.next); 2832 if (time_before(jiffies, rq_fifo_time(rq))) 2833 rq = NULL; 2834 2835 cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq); 2836 return rq; 2837} 2838 2839static inline int 2840cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq) 2841{ 2842 const int base_rq = cfqd->cfq_slice_async_rq; 2843 2844 WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR); 2845 2846 return 2 * base_rq * (IOPRIO_BE_NR - cfqq->ioprio); 2847} 2848 2849/* 2850 * Must be called with the queue_lock held. 2851 */ 2852static int cfqq_process_refs(struct cfq_queue *cfqq) 2853{ 2854 int process_refs, io_refs; 2855 2856 io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE]; 2857 process_refs = cfqq->ref - io_refs; 2858 BUG_ON(process_refs < 0); 2859 return process_refs; 2860} 2861 2862static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq) 2863{ 2864 int process_refs, new_process_refs; 2865 struct cfq_queue *__cfqq; 2866 2867 /* 2868 * If there are no process references on the new_cfqq, then it is 2869 * unsafe to follow the ->new_cfqq chain as other cfqq's in the 2870 * chain may have dropped their last reference (not just their 2871 * last process reference). 2872 */ 2873 if (!cfqq_process_refs(new_cfqq)) 2874 return; 2875 2876 /* Avoid a circular list and skip interim queue merges */ 2877 while ((__cfqq = new_cfqq->new_cfqq)) { 2878 if (__cfqq == cfqq) 2879 return; 2880 new_cfqq = __cfqq; 2881 } 2882 2883 process_refs = cfqq_process_refs(cfqq); 2884 new_process_refs = cfqq_process_refs(new_cfqq); 2885 /* 2886 * If the process for the cfqq has gone away, there is no 2887 * sense in merging the queues. 2888 */ 2889 if (process_refs == 0 || new_process_refs == 0) 2890 return; 2891 2892 /* 2893 * Merge in the direction of the lesser amount of work. 2894 */ 2895 if (new_process_refs >= process_refs) { 2896 cfqq->new_cfqq = new_cfqq; 2897 new_cfqq->ref += process_refs; 2898 } else { 2899 new_cfqq->new_cfqq = cfqq; 2900 cfqq->ref += new_process_refs; 2901 } 2902} 2903 2904static enum wl_type_t cfq_choose_wl_type(struct cfq_data *cfqd, 2905 struct cfq_group *cfqg, enum wl_class_t wl_class) 2906{ 2907 struct cfq_queue *queue; 2908 int i; 2909 bool key_valid = false; 2910 unsigned long lowest_key = 0; 2911 enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD; 2912 2913 for (i = 0; i <= SYNC_WORKLOAD; ++i) { 2914 /* select the one with lowest rb_key */ 2915 queue = cfq_rb_first(st_for(cfqg, wl_class, i)); 2916 if (queue && 2917 (!key_valid || time_before(queue->rb_key, lowest_key))) { 2918 lowest_key = queue->rb_key; 2919 cur_best = i; 2920 key_valid = true; 2921 } 2922 } 2923 2924 return cur_best; 2925} 2926 2927static void 2928choose_wl_class_and_type(struct cfq_data *cfqd, struct cfq_group *cfqg) 2929{ 2930 unsigned slice; 2931 unsigned count; 2932 struct cfq_rb_root *st; 2933 unsigned group_slice; 2934 enum wl_class_t original_class = cfqd->serving_wl_class; 2935 2936 /* Choose next priority. RT > BE > IDLE */ 2937 if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg)) 2938 cfqd->serving_wl_class = RT_WORKLOAD; 2939 else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg)) 2940 cfqd->serving_wl_class = BE_WORKLOAD; 2941 else { 2942 cfqd->serving_wl_class = IDLE_WORKLOAD; 2943 cfqd->workload_expires = jiffies + 1; 2944 return; 2945 } 2946 2947 if (original_class != cfqd->serving_wl_class) 2948 goto new_workload; 2949 2950 /* 2951 * For RT and BE, we have to choose also the type 2952 * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload 2953 * expiration time 2954 */ 2955 st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type); 2956 count = st->count; 2957 2958 /* 2959 * check workload expiration, and that we still have other queues ready 2960 */ 2961 if (count && !time_after(jiffies, cfqd->workload_expires)) 2962 return; 2963 2964new_workload: 2965 /* otherwise select new workload type */ 2966 cfqd->serving_wl_type = cfq_choose_wl_type(cfqd, cfqg, 2967 cfqd->serving_wl_class); 2968 st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type); 2969 count = st->count; 2970 2971 /* 2972 * the workload slice is computed as a fraction of target latency 2973 * proportional to the number of queues in that workload, over 2974 * all the queues in the same priority class 2975 */ 2976 group_slice = cfq_group_slice(cfqd, cfqg); 2977 2978 slice = group_slice * count / 2979 max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_wl_class], 2980 cfq_group_busy_queues_wl(cfqd->serving_wl_class, cfqd, 2981 cfqg)); 2982 2983 if (cfqd->serving_wl_type == ASYNC_WORKLOAD) { 2984 unsigned int tmp; 2985 2986 /* 2987 * Async queues are currently system wide. Just taking 2988 * proportion of queues with-in same group will lead to higher 2989 * async ratio system wide as generally root group is going 2990 * to have higher weight. A more accurate thing would be to 2991 * calculate system wide asnc/sync ratio. 2992 */ 2993 tmp = cfqd->cfq_target_latency * 2994 cfqg_busy_async_queues(cfqd, cfqg); 2995 tmp = tmp/cfqd->busy_queues; 2996 slice = min_t(unsigned, slice, tmp); 2997 2998 /* async workload slice is scaled down according to 2999 * the sync/async slice ratio. */ 3000 slice = slice * cfqd->cfq_slice[0] / cfqd->cfq_slice[1]; 3001 } else 3002 /* sync workload slice is at least 2 * cfq_slice_idle */ 3003 slice = max(slice, 2 * cfqd->cfq_slice_idle); 3004 3005 slice = max_t(unsigned, slice, CFQ_MIN_TT); 3006 cfq_log(cfqd, "workload slice:%d", slice); 3007 cfqd->workload_expires = jiffies + slice; 3008} 3009 3010static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd) 3011{ 3012 struct cfq_rb_root *st = &cfqd->grp_service_tree; 3013 struct cfq_group *cfqg; 3014 3015 if (RB_EMPTY_ROOT(&st->rb)) 3016 return NULL; 3017 cfqg = cfq_rb_first_group(st); 3018 update_min_vdisktime(st); 3019 return cfqg; 3020} 3021 3022static void cfq_choose_cfqg(struct cfq_data *cfqd) 3023{ 3024 struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd); 3025 3026 cfqd->serving_group = cfqg; 3027 3028 /* Restore the workload type data */ 3029 if (cfqg->saved_wl_slice) { 3030 cfqd->workload_expires = jiffies + cfqg->saved_wl_slice; 3031 cfqd->serving_wl_type = cfqg->saved_wl_type; 3032 cfqd->serving_wl_class = cfqg->saved_wl_class; 3033 } else 3034 cfqd->workload_expires = jiffies - 1; 3035 3036 choose_wl_class_and_type(cfqd, cfqg); 3037} 3038 3039/* 3040 * Select a queue for service. If we have a current active queue, 3041 * check whether to continue servicing it, or retrieve and set a new one. 3042 */ 3043static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) 3044{ 3045 struct cfq_queue *cfqq, *new_cfqq = NULL; 3046 3047 cfqq = cfqd->active_queue; 3048 if (!cfqq) 3049 goto new_queue; 3050 3051 if (!cfqd->rq_queued) 3052 return NULL; 3053 3054 /* 3055 * We were waiting for group to get backlogged. Expire the queue 3056 */ 3057 if (cfq_cfqq_wait_busy(cfqq) && !RB_EMPTY_ROOT(&cfqq->sort_list)) 3058 goto expire; 3059 3060 /* 3061 * The active queue has run out of time, expire it and select new. 3062 */ 3063 if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) { 3064 /* 3065 * If slice had not expired at the completion of last request 3066 * we might not have turned on wait_busy flag. Don't expire 3067 * the queue yet. Allow the group to get backlogged. 3068 * 3069 * The very fact that we have used the slice, that means we 3070 * have been idling all along on this queue and it should be 3071 * ok to wait for this request to complete. 3072 */ 3073 if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list) 3074 && cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) { 3075 cfqq = NULL; 3076 goto keep_queue; 3077 } else 3078 goto check_group_idle; 3079 } 3080 3081 /* 3082 * The active queue has requests and isn't expired, allow it to 3083 * dispatch. 3084 */ 3085 if (!RB_EMPTY_ROOT(&cfqq->sort_list)) 3086 goto keep_queue; 3087 3088 /* 3089 * If another queue has a request waiting within our mean seek 3090 * distance, let it run. The expire code will check for close 3091 * cooperators and put the close queue at the front of the service 3092 * tree. If possible, merge the expiring queue with the new cfqq. 3093 */ 3094 new_cfqq = cfq_close_cooperator(cfqd, cfqq); 3095 if (new_cfqq) { 3096 if (!cfqq->new_cfqq) 3097 cfq_setup_merge(cfqq, new_cfqq); 3098 goto expire; 3099 } 3100 3101 /* 3102 * No requests pending. If the active queue still has requests in 3103 * flight or is idling for a new request, allow either of these 3104 * conditions to happen (or time out) before selecting a new queue. 3105 */ 3106 if (timer_pending(&cfqd->idle_slice_timer)) { 3107 cfqq = NULL; 3108 goto keep_queue; 3109 } 3110 3111 /* 3112 * This is a deep seek queue, but the device is much faster than 3113 * the queue can deliver, don't idle 3114 **/ 3115 if (CFQQ_SEEKY(cfqq) && cfq_cfqq_idle_window(cfqq) && 3116 (cfq_cfqq_slice_new(cfqq) || 3117 (cfqq->slice_end - jiffies > jiffies - cfqq->slice_start))) { 3118 cfq_clear_cfqq_deep(cfqq); 3119 cfq_clear_cfqq_idle_window(cfqq); 3120 } 3121 3122 if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) { 3123 cfqq = NULL; 3124 goto keep_queue; 3125 } 3126 3127 /* 3128 * If group idle is enabled and there are requests dispatched from 3129 * this group, wait for requests to complete. 3130 */ 3131check_group_idle: 3132 if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1 && 3133 cfqq->cfqg->dispatched && 3134 !cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true)) { 3135 cfqq = NULL; 3136 goto keep_queue; 3137 } 3138 3139expire: 3140 cfq_slice_expired(cfqd, 0); 3141new_queue: 3142 /* 3143 * Current queue expired. Check if we have to switch to a new 3144 * service tree 3145 */ 3146 if (!new_cfqq) 3147 cfq_choose_cfqg(cfqd); 3148 3149 cfqq = cfq_set_active_queue(cfqd, new_cfqq); 3150keep_queue: 3151 return cfqq; 3152} 3153 3154static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq) 3155{ 3156 int dispatched = 0; 3157 3158 while (cfqq->next_rq) { 3159 cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq); 3160 dispatched++; 3161 } 3162 3163 BUG_ON(!list_empty(&cfqq->fifo)); 3164 3165 /* By default cfqq is not expired if it is empty. Do it explicitly */ 3166 __cfq_slice_expired(cfqq->cfqd, cfqq, 0); 3167 return dispatched; 3168} 3169 3170/* 3171 * Drain our current requests. Used for barriers and when switching 3172 * io schedulers on-the-fly. 3173 */ 3174static int cfq_forced_dispatch(struct cfq_data *cfqd) 3175{ 3176 struct cfq_queue *cfqq; 3177 int dispatched = 0; 3178 3179 /* Expire the timeslice of the current active queue first */ 3180 cfq_slice_expired(cfqd, 0); 3181 while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) { 3182 __cfq_set_active_queue(cfqd, cfqq); 3183 dispatched += __cfq_forced_dispatch_cfqq(cfqq); 3184 } 3185 3186 BUG_ON(cfqd->busy_queues); 3187 3188 cfq_log(cfqd, "forced_dispatch=%d", dispatched); 3189 return dispatched; 3190} 3191 3192static inline bool cfq_slice_used_soon(struct cfq_data *cfqd, 3193 struct cfq_queue *cfqq) 3194{ 3195 /* the queue hasn't finished any request, can't estimate */ 3196 if (cfq_cfqq_slice_new(cfqq)) 3197 return true; 3198 if (time_after(jiffies + cfqd->cfq_slice_idle * cfqq->dispatched, 3199 cfqq->slice_end)) 3200 return true; 3201 3202 return false; 3203} 3204 3205static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq) 3206{ 3207 unsigned int max_dispatch; 3208 3209 /* 3210 * Drain async requests before we start sync IO 3211 */ 3212 if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_flight[BLK_RW_ASYNC]) 3213 return false; 3214 3215 /* 3216 * If this is an async queue and we have sync IO in flight, let it wait 3217 */ 3218 if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq)) 3219 return false; 3220 3221 max_dispatch = max_t(unsigned int, cfqd->cfq_quantum / 2, 1); 3222 if (cfq_class_idle(cfqq)) 3223 max_dispatch = 1; 3224 3225 /* 3226 * Does this cfqq already have too much IO in flight? 3227 */ 3228 if (cfqq->dispatched >= max_dispatch) { 3229 bool promote_sync = false; 3230 /* 3231 * idle queue must always only have a single IO in flight 3232 */ 3233 if (cfq_class_idle(cfqq)) 3234 return false; 3235 3236 /* 3237 * If there is only one sync queue 3238 * we can ignore async queue here and give the sync 3239 * queue no dispatch limit. The reason is a sync queue can 3240 * preempt async queue, limiting the sync queue doesn't make 3241 * sense. This is useful for aiostress test. 3242 */ 3243 if (cfq_cfqq_sync(cfqq) && cfqd->busy_sync_queues == 1) 3244 promote_sync = true; 3245 3246 /* 3247 * We have other queues, don't allow more IO from this one 3248 */ 3249 if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq) && 3250 !promote_sync) 3251 return false; 3252 3253 /* 3254 * Sole queue user, no limit 3255 */ 3256 if (cfqd->busy_queues == 1 || promote_sync) 3257 max_dispatch = -1; 3258 else 3259 /* 3260 * Normally we start throttling cfqq when cfq_quantum/2 3261 * requests have been dispatched. But we can drive 3262 * deeper queue depths at the beginning of slice 3263 * subjected to upper limit of cfq_quantum. 3264 * */ 3265 max_dispatch = cfqd->cfq_quantum; 3266 } 3267 3268 /* 3269 * Async queues must wait a bit before being allowed dispatch. 3270 * We also ramp up the dispatch depth gradually for async IO, 3271 * based on the last sync IO we serviced 3272 */ 3273 if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) { 3274 unsigned long last_sync = jiffies - cfqd->last_delayed_sync; 3275 unsigned int depth; 3276 3277 depth = last_sync / cfqd->cfq_slice[1]; 3278 if (!depth && !cfqq->dispatched) 3279 depth = 1; 3280 if (depth < max_dispatch) 3281 max_dispatch = depth; 3282 } 3283 3284 /* 3285 * If we're below the current max, allow a dispatch 3286 */ 3287 return cfqq->dispatched < max_dispatch; 3288} 3289 3290/* 3291 * Dispatch a request from cfqq, moving them to the request queue 3292 * dispatch list. 3293 */ 3294static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq) 3295{ 3296 struct request *rq; 3297 3298 BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list)); 3299 3300 if (!cfq_may_dispatch(cfqd, cfqq)) 3301 return false; 3302 3303 /* 3304 * follow expired path, else get first next available 3305 */ 3306 rq = cfq_check_fifo(cfqq); 3307 if (!rq) 3308 rq = cfqq->next_rq; 3309 3310 /* 3311 * insert request into driver dispatch list 3312 */ 3313 cfq_dispatch_insert(cfqd->queue, rq); 3314 3315 if (!cfqd->active_cic) { 3316 struct cfq_io_cq *cic = RQ_CIC(rq); 3317 3318 atomic_long_inc(&cic->icq.ioc->refcount); 3319 cfqd->active_cic = cic; 3320 } 3321 3322 return true; 3323} 3324 3325/* 3326 * Find the cfqq that we need to service and move a request from that to the 3327 * dispatch list 3328 */ 3329static int cfq_dispatch_requests(struct request_queue *q, int force) 3330{ 3331 struct cfq_data *cfqd = q->elevator->elevator_data; 3332 struct cfq_queue *cfqq; 3333 3334 if (!cfqd->busy_queues) 3335 return 0; 3336 3337 if (unlikely(force)) 3338 return cfq_forced_dispatch(cfqd); 3339 3340 cfqq = cfq_select_queue(cfqd); 3341 if (!cfqq) 3342 return 0; 3343 3344 /* 3345 * Dispatch a request from this cfqq, if it is allowed 3346 */ 3347 if (!cfq_dispatch_request(cfqd, cfqq)) 3348 return 0; 3349 3350 cfqq->slice_dispatch++; 3351 cfq_clear_cfqq_must_dispatch(cfqq); 3352 3353 /* 3354 * expire an async queue immediately if it has used up its slice. idle 3355 * queue always expire after 1 dispatch round. 3356 */ 3357 if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) && 3358 cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) || 3359 cfq_class_idle(cfqq))) { 3360 cfqq->slice_end = jiffies + 1; 3361 cfq_slice_expired(cfqd, 0); 3362 } 3363 3364 cfq_log_cfqq(cfqd, cfqq, "dispatched a request"); 3365 return 1; 3366} 3367 3368/* 3369 * task holds one reference to the queue, dropped when task exits. each rq 3370 * in-flight on this queue also holds a reference, dropped when rq is freed. 3371 * 3372 * Each cfq queue took a reference on the parent group. Drop it now. 3373 * queue lock must be held here. 3374 */ 3375static void cfq_put_queue(struct cfq_queue *cfqq) 3376{ 3377 struct cfq_data *cfqd = cfqq->cfqd; 3378 struct cfq_group *cfqg; 3379 3380 BUG_ON(cfqq->ref <= 0); 3381 3382 cfqq->ref--; 3383 if (cfqq->ref) 3384 return; 3385 3386 cfq_log_cfqq(cfqd, cfqq, "put_queue"); 3387 BUG_ON(rb_first(&cfqq->sort_list)); 3388 BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]); 3389 cfqg = cfqq->cfqg; 3390 3391 if (unlikely(cfqd->active_queue == cfqq)) { 3392 __cfq_slice_expired(cfqd, cfqq, 0); 3393 cfq_schedule_dispatch(cfqd); 3394 } 3395 3396 BUG_ON(cfq_cfqq_on_rr(cfqq)); 3397 kmem_cache_free(cfq_pool, cfqq); 3398 cfqg_put(cfqg); 3399} 3400 3401static void cfq_put_cooperator(struct cfq_queue *cfqq) 3402{ 3403 struct cfq_queue *__cfqq, *next; 3404 3405 /* 3406 * If this queue was scheduled to merge with another queue, be 3407 * sure to drop the reference taken on that queue (and others in 3408 * the merge chain). See cfq_setup_merge and cfq_merge_cfqqs. 3409 */ 3410 __cfqq = cfqq->new_cfqq; 3411 while (__cfqq) { 3412 if (__cfqq == cfqq) { 3413 WARN(1, "cfqq->new_cfqq loop detected\n"); 3414 break; 3415 } 3416 next = __cfqq->new_cfqq; 3417 cfq_put_queue(__cfqq); 3418 __cfqq = next; 3419 } 3420} 3421 3422static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq) 3423{ 3424 if (unlikely(cfqq == cfqd->active_queue)) { 3425 __cfq_slice_expired(cfqd, cfqq, 0); 3426 cfq_schedule_dispatch(cfqd); 3427 } 3428 3429 cfq_put_cooperator(cfqq); 3430 3431 cfq_put_queue(cfqq); 3432} 3433 3434static void cfq_init_icq(struct io_cq *icq) 3435{ 3436 struct cfq_io_cq *cic = icq_to_cic(icq); 3437 3438 cic->ttime.last_end_request = jiffies; 3439} 3440 3441static void cfq_exit_icq(struct io_cq *icq) 3442{ 3443 struct cfq_io_cq *cic = icq_to_cic(icq); 3444 struct cfq_data *cfqd = cic_to_cfqd(cic); 3445 3446 if (cic->cfqq[BLK_RW_ASYNC]) { 3447 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]); 3448 cic->cfqq[BLK_RW_ASYNC] = NULL; 3449 } 3450 3451 if (cic->cfqq[BLK_RW_SYNC]) { 3452 cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_SYNC]); 3453 cic->cfqq[BLK_RW_SYNC] = NULL; 3454 } 3455} 3456 3457static void cfq_init_prio_data(struct cfq_queue *cfqq, struct cfq_io_cq *cic) 3458{ 3459 struct task_struct *tsk = current; 3460 int ioprio_class; 3461 3462 if (!cfq_cfqq_prio_changed(cfqq)) 3463 return; 3464 3465 ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio); 3466 switch (ioprio_class) { 3467 default: 3468 printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class); 3469 case IOPRIO_CLASS_NONE: 3470 /* 3471 * no prio set, inherit CPU scheduling settings 3472 */ 3473 cfqq->ioprio = task_nice_ioprio(tsk); 3474 cfqq->ioprio_class = task_nice_ioclass(tsk); 3475 break; 3476 case IOPRIO_CLASS_RT: 3477 cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio); 3478 cfqq->ioprio_class = IOPRIO_CLASS_RT; 3479 break; 3480 case IOPRIO_CLASS_BE: 3481 cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio); 3482 cfqq->ioprio_class = IOPRIO_CLASS_BE; 3483 break; 3484 case IOPRIO_CLASS_IDLE: 3485 cfqq->ioprio_class = IOPRIO_CLASS_IDLE; 3486 cfqq->ioprio = 7; 3487 cfq_clear_cfqq_idle_window(cfqq); 3488 break; 3489 } 3490 3491 /* 3492 * keep track of original prio settings in case we have to temporarily 3493 * elevate the priority of this queue 3494 */ 3495 cfqq->org_ioprio = cfqq->ioprio; 3496 cfq_clear_cfqq_prio_changed(cfqq); 3497} 3498 3499static void check_ioprio_changed(struct cfq_io_cq *cic, struct bio *bio) 3500{ 3501 int ioprio = cic->icq.ioc->ioprio; 3502 struct cfq_data *cfqd = cic_to_cfqd(cic); 3503 struct cfq_queue *cfqq; 3504 3505 /* 3506 * Check whether ioprio has changed. The condition may trigger 3507 * spuriously on a newly created cic but there's no harm. 3508 */ 3509 if (unlikely(!cfqd) || likely(cic->ioprio == ioprio)) 3510 return; 3511 3512 cfqq = cic->cfqq[BLK_RW_ASYNC]; 3513 if (cfqq) { 3514 struct cfq_queue *new_cfqq; 3515 new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic, bio, 3516 GFP_ATOMIC); 3517 if (new_cfqq) { 3518 cic->cfqq[BLK_RW_ASYNC] = new_cfqq; 3519 cfq_put_queue(cfqq); 3520 } 3521 } 3522 3523 cfqq = cic->cfqq[BLK_RW_SYNC]; 3524 if (cfqq) 3525 cfq_mark_cfqq_prio_changed(cfqq); 3526 3527 cic->ioprio = ioprio; 3528} 3529 3530static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq, 3531 pid_t pid, bool is_sync) 3532{ 3533 RB_CLEAR_NODE(&cfqq->rb_node); 3534 RB_CLEAR_NODE(&cfqq->p_node); 3535 INIT_LIST_HEAD(&cfqq->fifo); 3536 3537 cfqq->ref = 0; 3538 cfqq->cfqd = cfqd; 3539 3540 cfq_mark_cfqq_prio_changed(cfqq); 3541 3542 if (is_sync) { 3543 if (!cfq_class_idle(cfqq)) 3544 cfq_mark_cfqq_idle_window(cfqq); 3545 cfq_mark_cfqq_sync(cfqq); 3546 } 3547 cfqq->pid = pid; 3548} 3549 3550#ifdef CONFIG_CFQ_GROUP_IOSCHED 3551static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio) 3552{ 3553 struct cfq_data *cfqd = cic_to_cfqd(cic); 3554 struct cfq_queue *sync_cfqq; 3555 uint64_t id; 3556 3557 rcu_read_lock(); 3558 id = bio_blkcg(bio)->id; 3559 rcu_read_unlock(); 3560 3561 /* 3562 * Check whether blkcg has changed. The condition may trigger 3563 * spuriously on a newly created cic but there's no harm. 3564 */ 3565 if (unlikely(!cfqd) || likely(cic->blkcg_id == id)) 3566 return; 3567 3568 sync_cfqq = cic_to_cfqq(cic, 1); 3569 if (sync_cfqq) { 3570 /* 3571 * Drop reference to sync queue. A new sync queue will be 3572 * assigned in new group upon arrival of a fresh request. 3573 */ 3574 cfq_log_cfqq(cfqd, sync_cfqq, "changed cgroup"); 3575 cic_set_cfqq(cic, NULL, 1); 3576 cfq_put_queue(sync_cfqq); 3577 } 3578 3579 cic->blkcg_id = id; 3580} 3581#else 3582static inline void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio) { } 3583#endif /* CONFIG_CFQ_GROUP_IOSCHED */ 3584 3585static struct cfq_queue * 3586cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic, 3587 struct bio *bio, gfp_t gfp_mask) 3588{ 3589 struct blkcg *blkcg; 3590 struct cfq_queue *cfqq, *new_cfqq = NULL; 3591 struct cfq_group *cfqg; 3592 3593retry: 3594 rcu_read_lock(); 3595 3596 blkcg = bio_blkcg(bio); 3597 cfqg = cfq_lookup_create_cfqg(cfqd, blkcg); 3598 cfqq = cic_to_cfqq(cic, is_sync); 3599 3600 /* 3601 * Always try a new alloc if we fell back to the OOM cfqq 3602 * originally, since it should just be a temporary situation. 3603 */ 3604 if (!cfqq || cfqq == &cfqd->oom_cfqq) { 3605 cfqq = NULL; 3606 if (new_cfqq) { 3607 cfqq = new_cfqq; 3608 new_cfqq = NULL; 3609 } else if (gfp_mask & __GFP_WAIT) { 3610 rcu_read_unlock(); 3611 spin_unlock_irq(cfqd->queue->queue_lock); 3612 new_cfqq = kmem_cache_alloc_node(cfq_pool, 3613 gfp_mask | __GFP_ZERO, 3614 cfqd->queue->node); 3615 spin_lock_irq(cfqd->queue->queue_lock); 3616 if (new_cfqq) 3617 goto retry; 3618 else 3619 return &cfqd->oom_cfqq; 3620 } else { 3621 cfqq = kmem_cache_alloc_node(cfq_pool, 3622 gfp_mask | __GFP_ZERO, 3623 cfqd->queue->node); 3624 } 3625 3626 if (cfqq) { 3627 cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync); 3628 cfq_init_prio_data(cfqq, cic); 3629 cfq_link_cfqq_cfqg(cfqq, cfqg); 3630 cfq_log_cfqq(cfqd, cfqq, "alloced"); 3631 } else 3632 cfqq = &cfqd->oom_cfqq; 3633 } 3634 3635 if (new_cfqq) 3636 kmem_cache_free(cfq_pool, new_cfqq); 3637 3638 rcu_read_unlock(); 3639 return cfqq; 3640} 3641 3642static struct cfq_queue ** 3643cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio) 3644{ 3645 switch (ioprio_class) { 3646 case IOPRIO_CLASS_RT: 3647 return &cfqd->async_cfqq[0][ioprio]; 3648 case IOPRIO_CLASS_NONE: 3649 ioprio = IOPRIO_NORM; 3650 /* fall through */ 3651 case IOPRIO_CLASS_BE: 3652 return &cfqd->async_cfqq[1][ioprio]; 3653 case IOPRIO_CLASS_IDLE: 3654 return &cfqd->async_idle_cfqq; 3655 default: 3656 BUG(); 3657 } 3658} 3659 3660static struct cfq_queue * 3661cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic, 3662 struct bio *bio, gfp_t gfp_mask) 3663{ 3664 const int ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio); 3665 const int ioprio = IOPRIO_PRIO_DATA(cic->ioprio); 3666 struct cfq_queue **async_cfqq = NULL; 3667 struct cfq_queue *cfqq = NULL; 3668 3669 if (!is_sync) { 3670 async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio); 3671 cfqq = *async_cfqq; 3672 } 3673 3674 if (!cfqq) 3675 cfqq = cfq_find_alloc_queue(cfqd, is_sync, cic, bio, gfp_mask); 3676 3677 /* 3678 * pin the queue now that it's allocated, scheduler exit will prune it 3679 */ 3680 if (!is_sync && !(*async_cfqq)) { 3681 cfqq->ref++; 3682 *async_cfqq = cfqq; 3683 } 3684 3685 cfqq->ref++; 3686 return cfqq; 3687} 3688 3689static void 3690__cfq_update_io_thinktime(struct cfq_ttime *ttime, unsigned long slice_idle) 3691{ 3692 unsigned long elapsed = jiffies - ttime->last_end_request; 3693 elapsed = min(elapsed, 2UL * slice_idle); 3694 3695 ttime->ttime_samples = (7*ttime->ttime_samples + 256) / 8; 3696 ttime->ttime_total = (7*ttime->ttime_total + 256*elapsed) / 8; 3697 ttime->ttime_mean = (ttime->ttime_total + 128) / ttime->ttime_samples; 3698} 3699 3700static void 3701cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_queue *cfqq, 3702 struct cfq_io_cq *cic) 3703{ 3704 if (cfq_cfqq_sync(cfqq)) { 3705 __cfq_update_io_thinktime(&cic->ttime, cfqd->cfq_slice_idle); 3706 __cfq_update_io_thinktime(&cfqq->service_tree->ttime, 3707 cfqd->cfq_slice_idle); 3708 } 3709#ifdef CONFIG_CFQ_GROUP_IOSCHED 3710 __cfq_update_io_thinktime(&cfqq->cfqg->ttime, cfqd->cfq_group_idle); 3711#endif 3712} 3713 3714static void 3715cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq, 3716 struct request *rq) 3717{ 3718 sector_t sdist = 0; 3719 sector_t n_sec = blk_rq_sectors(rq); 3720 if (cfqq->last_request_pos) { 3721 if (cfqq->last_request_pos < blk_rq_pos(rq)) 3722 sdist = blk_rq_pos(rq) - cfqq->last_request_pos; 3723 else 3724 sdist = cfqq->last_request_pos - blk_rq_pos(rq); 3725 } 3726 3727 cfqq->seek_history <<= 1; 3728 if (blk_queue_nonrot(cfqd->queue)) 3729 cfqq->seek_history |= (n_sec < CFQQ_SECT_THR_NONROT); 3730 else 3731 cfqq->seek_history |= (sdist > CFQQ_SEEK_THR); 3732} 3733 3734/* 3735 * Disable idle window if the process thinks too long or seeks so much that 3736 * it doesn't matter 3737 */ 3738static void 3739cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq, 3740 struct cfq_io_cq *cic) 3741{ 3742 int old_idle, enable_idle; 3743 3744 /* 3745 * Don't idle for async or idle io prio class 3746 */ 3747 if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq)) 3748 return; 3749 3750 enable_idle = old_idle = cfq_cfqq_idle_window(cfqq); 3751 3752 if (cfqq->queued[0] + cfqq->queued[1] >= 4) 3753 cfq_mark_cfqq_deep(cfqq); 3754 3755 if (cfqq->next_rq && (cfqq->next_rq->cmd_flags & REQ_NOIDLE)) 3756 enable_idle = 0; 3757 else if (!atomic_read(&cic->icq.ioc->active_ref) || 3758 !cfqd->cfq_slice_idle || 3759 (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq))) 3760 enable_idle = 0; 3761 else if (sample_valid(cic->ttime.ttime_samples)) { 3762 if (cic->ttime.ttime_mean > cfqd->cfq_slice_idle) 3763 enable_idle = 0; 3764 else 3765 enable_idle = 1; 3766 } 3767 3768 if (old_idle != enable_idle) { 3769 cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle); 3770 if (enable_idle) 3771 cfq_mark_cfqq_idle_window(cfqq); 3772 else 3773 cfq_clear_cfqq_idle_window(cfqq); 3774 } 3775} 3776 3777/* 3778 * Check if new_cfqq should preempt the currently active queue. Return 0 for 3779 * no or if we aren't sure, a 1 will cause a preempt. 3780 */ 3781static bool 3782cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, 3783 struct request *rq) 3784{ 3785 struct cfq_queue *cfqq; 3786 3787 cfqq = cfqd->active_queue; 3788 if (!cfqq) 3789 return false; 3790 3791 if (cfq_class_idle(new_cfqq)) 3792 return false; 3793 3794 if (cfq_class_idle(cfqq)) 3795 return true; 3796 3797 /* 3798 * Don't allow a non-RT request to preempt an ongoing RT cfqq timeslice. 3799 */ 3800 if (cfq_class_rt(cfqq) && !cfq_class_rt(new_cfqq)) 3801 return false; 3802 3803 /* 3804 * if the new request is sync, but the currently running queue is 3805 * not, let the sync request have priority. 3806 */ 3807 if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq)) 3808 return true; 3809 3810 if (new_cfqq->cfqg != cfqq->cfqg) 3811 return false; 3812 3813 if (cfq_slice_used(cfqq)) 3814 return true; 3815 3816 /* Allow preemption only if we are idling on sync-noidle tree */ 3817 if (cfqd->serving_wl_type == SYNC_NOIDLE_WORKLOAD && 3818 cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD && 3819 new_cfqq->service_tree->count == 2 && 3820 RB_EMPTY_ROOT(&cfqq->sort_list)) 3821 return true; 3822 3823 /* 3824 * So both queues are sync. Let the new request get disk time if 3825 * it's a metadata request and the current queue is doing regular IO. 3826 */ 3827 if ((rq->cmd_flags & REQ_PRIO) && !cfqq->prio_pending) 3828 return true; 3829 3830 /* 3831 * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice. 3832 */ 3833 if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq)) 3834 return true; 3835 3836 /* An idle queue should not be idle now for some reason */ 3837 if (RB_EMPTY_ROOT(&cfqq->sort_list) && !cfq_should_idle(cfqd, cfqq)) 3838 return true; 3839 3840 if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq)) 3841 return false; 3842 3843 /* 3844 * if this request is as-good as one we would expect from the 3845 * current cfqq, let it preempt 3846 */ 3847 if (cfq_rq_close(cfqd, cfqq, rq)) 3848 return true; 3849 3850 return false; 3851} 3852 3853/* 3854 * cfqq preempts the active queue. if we allowed preempt with no slice left, 3855 * let it have half of its nominal slice. 3856 */ 3857static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) 3858{ 3859 enum wl_type_t old_type = cfqq_type(cfqd->active_queue); 3860 3861 cfq_log_cfqq(cfqd, cfqq, "preempt"); 3862 cfq_slice_expired(cfqd, 1); 3863 3864 /* 3865 * workload type is changed, don't save slice, otherwise preempt 3866 * doesn't happen 3867 */ 3868 if (old_type != cfqq_type(cfqq)) 3869 cfqq->cfqg->saved_wl_slice = 0; 3870 3871 /* 3872 * Put the new queue at the front of the of the current list, 3873 * so we know that it will be selected next. 3874 */ 3875 BUG_ON(!cfq_cfqq_on_rr(cfqq)); 3876 3877 cfq_service_tree_add(cfqd, cfqq, 1); 3878 3879 cfqq->slice_end = 0; 3880 cfq_mark_cfqq_slice_new(cfqq); 3881} 3882 3883/* 3884 * Called when a new fs request (rq) is added (to cfqq). Check if there's 3885 * something we should do about it 3886 */ 3887static void 3888cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, 3889 struct request *rq) 3890{ 3891 struct cfq_io_cq *cic = RQ_CIC(rq); 3892 3893 cfqd->rq_queued++; 3894 if (rq->cmd_flags & REQ_PRIO) 3895 cfqq->prio_pending++; 3896 3897 cfq_update_io_thinktime(cfqd, cfqq, cic); 3898 cfq_update_io_seektime(cfqd, cfqq, rq); 3899 cfq_update_idle_window(cfqd, cfqq, cic); 3900 3901 cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq); 3902 3903 if (cfqq == cfqd->active_queue) { 3904 /* 3905 * Remember that we saw a request from this process, but 3906 * don't start queuing just yet. Otherwise we risk seeing lots 3907 * of tiny requests, because we disrupt the normal plugging 3908 * and merging. If the request is already larger than a single 3909 * page, let it rip immediately. For that case we assume that 3910 * merging is already done. Ditto for a busy system that 3911 * has other work pending, don't risk delaying until the 3912 * idle timer unplug to continue working. 3913 */ 3914 if (cfq_cfqq_wait_request(cfqq)) { 3915 if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE || 3916 cfqd->busy_queues > 1) { 3917 cfq_del_timer(cfqd, cfqq); 3918 cfq_clear_cfqq_wait_request(cfqq); 3919 __blk_run_queue(cfqd->queue); 3920 } else { 3921 cfqg_stats_update_idle_time(cfqq->cfqg); 3922 cfq_mark_cfqq_must_dispatch(cfqq); 3923 } 3924 } 3925 } else if (cfq_should_preempt(cfqd, cfqq, rq)) { 3926 /* 3927 * not the active queue - expire current slice if it is 3928 * idle and has expired it's mean thinktime or this new queue 3929 * has some old slice time left and is of higher priority or 3930 * this new queue is RT and the current one is BE 3931 */ 3932 cfq_preempt_queue(cfqd, cfqq); 3933 __blk_run_queue(cfqd->queue); 3934 } 3935} 3936 3937static void cfq_insert_request(struct request_queue *q, struct request *rq) 3938{ 3939 struct cfq_data *cfqd = q->elevator->elevator_data; 3940 struct cfq_queue *cfqq = RQ_CFQQ(rq); 3941 3942 cfq_log_cfqq(cfqd, cfqq, "insert_request"); 3943 cfq_init_prio_data(cfqq, RQ_CIC(rq)); 3944 3945 rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]); 3946 list_add_tail(&rq->queuelist, &cfqq->fifo); 3947 cfq_add_rq_rb(rq); 3948 cfqg_stats_update_io_add(RQ_CFQG(rq), cfqd->serving_group, 3949 rq->cmd_flags); 3950 cfq_rq_enqueued(cfqd, cfqq, rq); 3951} 3952 3953/* 3954 * Update hw_tag based on peak queue depth over 50 samples under 3955 * sufficient load. 3956 */ 3957static void cfq_update_hw_tag(struct cfq_data *cfqd) 3958{ 3959 struct cfq_queue *cfqq = cfqd->active_queue; 3960 3961 if (cfqd->rq_in_driver > cfqd->hw_tag_est_depth) 3962 cfqd->hw_tag_est_depth = cfqd->rq_in_driver; 3963 3964 if (cfqd->hw_tag == 1) 3965 return; 3966 3967 if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN && 3968 cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN) 3969 return; 3970 3971 /* 3972 * If active queue hasn't enough requests and can idle, cfq might not 3973 * dispatch sufficient requests to hardware. Don't zero hw_tag in this 3974 * case 3975 */ 3976 if (cfqq && cfq_cfqq_idle_window(cfqq) && 3977 cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] < 3978 CFQ_HW_QUEUE_MIN && cfqd->rq_in_driver < CFQ_HW_QUEUE_MIN) 3979 return; 3980 3981 if (cfqd->hw_tag_samples++ < 50) 3982 return; 3983 3984 if (cfqd->hw_tag_est_depth >= CFQ_HW_QUEUE_MIN) 3985 cfqd->hw_tag = 1; 3986 else 3987 cfqd->hw_tag = 0; 3988} 3989 3990static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq) 3991{ 3992 struct cfq_io_cq *cic = cfqd->active_cic; 3993 3994 /* If the queue already has requests, don't wait */ 3995 if (!RB_EMPTY_ROOT(&cfqq->sort_list)) 3996 return false; 3997 3998 /* If there are other queues in the group, don't wait */ 3999 if (cfqq->cfqg->nr_cfqq > 1) 4000 return false; 4001 4002 /* the only queue in the group, but think time is big */ 4003 if (cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true)) 4004 return false; 4005 4006 if (cfq_slice_used(cfqq)) 4007 return true; 4008 4009 /* if slice left is less than think time, wait busy */ 4010 if (cic && sample_valid(cic->ttime.ttime_samples) 4011 && (cfqq->slice_end - jiffies < cic->ttime.ttime_mean)) 4012 return true; 4013 4014 /* 4015 * If think times is less than a jiffy than ttime_mean=0 and above 4016 * will not be true. It might happen that slice has not expired yet 4017 * but will expire soon (4-5 ns) during select_queue(). To cover the 4018 * case where think time is less than a jiffy, mark the queue wait 4019 * busy if only 1 jiffy is left in the slice. 4020 */ 4021 if (cfqq->slice_end - jiffies == 1) 4022 return true; 4023 4024 return false; 4025} 4026 4027static void cfq_completed_request(struct request_queue *q, struct request *rq) 4028{ 4029 struct cfq_queue *cfqq = RQ_CFQQ(rq); 4030 struct cfq_data *cfqd = cfqq->cfqd; 4031 const int sync = rq_is_sync(rq); 4032 unsigned long now; 4033 4034 now = jiffies; 4035 cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d", 4036 !!(rq->cmd_flags & REQ_NOIDLE)); 4037 4038 cfq_update_hw_tag(cfqd); 4039 4040 WARN_ON(!cfqd->rq_in_driver); 4041 WARN_ON(!cfqq->dispatched); 4042 cfqd->rq_in_driver--; 4043 cfqq->dispatched--; 4044 (RQ_CFQG(rq))->dispatched--; 4045 cfqg_stats_update_completion(cfqq->cfqg, rq_start_time_ns(rq), 4046 rq_io_start_time_ns(rq), rq->cmd_flags); 4047 4048 cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--; 4049 4050 if (sync) { 4051 struct cfq_rb_root *st; 4052 4053 RQ_CIC(rq)->ttime.last_end_request = now; 4054 4055 if (cfq_cfqq_on_rr(cfqq)) 4056 st = cfqq->service_tree; 4057 else 4058 st = st_for(cfqq->cfqg, cfqq_class(cfqq), 4059 cfqq_type(cfqq)); 4060 4061 st->ttime.last_end_request = now; 4062 if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now)) 4063 cfqd->last_delayed_sync = now; 4064 } 4065 4066#ifdef CONFIG_CFQ_GROUP_IOSCHED 4067 cfqq->cfqg->ttime.last_end_request = now; 4068#endif 4069 4070 /* 4071 * If this is the active queue, check if it needs to be expired, 4072 * or if we want to idle in case it has no pending requests. 4073 */ 4074 if (cfqd->active_queue == cfqq) { 4075 const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list); 4076 4077 if (cfq_cfqq_slice_new(cfqq)) { 4078 cfq_set_prio_slice(cfqd, cfqq); 4079 cfq_clear_cfqq_slice_new(cfqq); 4080 } 4081 4082 /* 4083 * Should we wait for next request to come in before we expire 4084 * the queue. 4085 */ 4086 if (cfq_should_wait_busy(cfqd, cfqq)) { 4087 unsigned long extend_sl = cfqd->cfq_slice_idle; 4088 if (!cfqd->cfq_slice_idle) 4089 extend_sl = cfqd->cfq_group_idle; 4090 cfqq->slice_end = jiffies + extend_sl; 4091 cfq_mark_cfqq_wait_busy(cfqq); 4092 cfq_log_cfqq(cfqd, cfqq, "will busy wait"); 4093 } 4094 4095 /* 4096 * Idling is not enabled on: 4097 * - expired queues 4098 * - idle-priority queues 4099 * - async queues 4100 * - queues with still some requests queued 4101 * - when there is a close cooperator 4102 */ 4103 if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq)) 4104 cfq_slice_expired(cfqd, 1); 4105 else if (sync && cfqq_empty && 4106 !cfq_close_cooperator(cfqd, cfqq)) { 4107 cfq_arm_slice_timer(cfqd); 4108 } 4109 } 4110 4111 if (!cfqd->rq_in_driver) 4112 cfq_schedule_dispatch(cfqd); 4113} 4114 4115static inline int __cfq_may_queue(struct cfq_queue *cfqq) 4116{ 4117 if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) { 4118 cfq_mark_cfqq_must_alloc_slice(cfqq); 4119 return ELV_MQUEUE_MUST; 4120 } 4121 4122 return ELV_MQUEUE_MAY; 4123} 4124 4125static int cfq_may_queue(struct request_queue *q, int rw) 4126{ 4127 struct cfq_data *cfqd = q->elevator->elevator_data; 4128 struct task_struct *tsk = current; 4129 struct cfq_io_cq *cic; 4130 struct cfq_queue *cfqq; 4131 4132 /* 4133 * don't force setup of a queue from here, as a call to may_queue 4134 * does not necessarily imply that a request actually will be queued. 4135 * so just lookup a possibly existing queue, or return 'may queue' 4136 * if that fails 4137 */ 4138 cic = cfq_cic_lookup(cfqd, tsk->io_context); 4139 if (!cic) 4140 return ELV_MQUEUE_MAY; 4141 4142 cfqq = cic_to_cfqq(cic, rw_is_sync(rw)); 4143 if (cfqq) { 4144 cfq_init_prio_data(cfqq, cic); 4145 4146 return __cfq_may_queue(cfqq); 4147 } 4148 4149 return ELV_MQUEUE_MAY; 4150} 4151 4152/* 4153 * queue lock held here 4154 */ 4155static void cfq_put_request(struct request *rq) 4156{ 4157 struct cfq_queue *cfqq = RQ_CFQQ(rq); 4158 4159 if (cfqq) { 4160 const int rw = rq_data_dir(rq); 4161 4162 BUG_ON(!cfqq->allocated[rw]); 4163 cfqq->allocated[rw]--; 4164 4165 /* Put down rq reference on cfqg */ 4166 cfqg_put(RQ_CFQG(rq)); 4167 rq->elv.priv[0] = NULL; 4168 rq->elv.priv[1] = NULL; 4169 4170 cfq_put_queue(cfqq); 4171 } 4172} 4173 4174static struct cfq_queue * 4175cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_cq *cic, 4176 struct cfq_queue *cfqq) 4177{ 4178 cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq); 4179 cic_set_cfqq(cic, cfqq->new_cfqq, 1); 4180 cfq_mark_cfqq_coop(cfqq->new_cfqq); 4181 cfq_put_queue(cfqq); 4182 return cic_to_cfqq(cic, 1); 4183} 4184 4185/* 4186 * Returns NULL if a new cfqq should be allocated, or the old cfqq if this 4187 * was the last process referring to said cfqq. 4188 */ 4189static struct cfq_queue * 4190split_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq) 4191{ 4192 if (cfqq_process_refs(cfqq) == 1) { 4193 cfqq->pid = current->pid; 4194 cfq_clear_cfqq_coop(cfqq); 4195 cfq_clear_cfqq_split_coop(cfqq); 4196 return cfqq; 4197 } 4198 4199 cic_set_cfqq(cic, NULL, 1); 4200 4201 cfq_put_cooperator(cfqq); 4202 4203 cfq_put_queue(cfqq); 4204 return NULL; 4205} 4206/* 4207 * Allocate cfq data structures associated with this request. 4208 */ 4209static int 4210cfq_set_request(struct request_queue *q, struct request *rq, struct bio *bio, 4211 gfp_t gfp_mask) 4212{ 4213 struct cfq_data *cfqd = q->elevator->elevator_data; 4214 struct cfq_io_cq *cic = icq_to_cic(rq->elv.icq); 4215 const int rw = rq_data_dir(rq); 4216 const bool is_sync = rq_is_sync(rq); 4217 struct cfq_queue *cfqq; 4218 4219 might_sleep_if(gfp_mask & __GFP_WAIT); 4220 4221 spin_lock_irq(q->queue_lock); 4222 4223 check_ioprio_changed(cic, bio); 4224 check_blkcg_changed(cic, bio); 4225new_queue: 4226 cfqq = cic_to_cfqq(cic, is_sync); 4227 if (!cfqq || cfqq == &cfqd->oom_cfqq) { 4228 cfqq = cfq_get_queue(cfqd, is_sync, cic, bio, gfp_mask); 4229 cic_set_cfqq(cic, cfqq, is_sync); 4230 } else { 4231 /* 4232 * If the queue was seeky for too long, break it apart. 4233 */ 4234 if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) { 4235 cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq"); 4236 cfqq = split_cfqq(cic, cfqq); 4237 if (!cfqq) 4238 goto new_queue; 4239 } 4240 4241 /* 4242 * Check to see if this queue is scheduled to merge with 4243 * another, closely cooperating queue. The merging of 4244 * queues happens here as it must be done in process context. 4245 * The reference on new_cfqq was taken in merge_cfqqs. 4246 */ 4247 if (cfqq->new_cfqq) 4248 cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq); 4249 } 4250 4251 cfqq->allocated[rw]++; 4252 4253 cfqq->ref++; 4254 cfqg_get(cfqq->cfqg); 4255 rq->elv.priv[0] = cfqq; 4256 rq->elv.priv[1] = cfqq->cfqg; 4257 spin_unlock_irq(q->queue_lock); 4258 return 0; 4259} 4260 4261static void cfq_kick_queue(struct work_struct *work) 4262{ 4263 struct cfq_data *cfqd = 4264 container_of(work, struct cfq_data, unplug_work); 4265 struct request_queue *q = cfqd->queue; 4266 4267 spin_lock_irq(q->queue_lock); 4268 __blk_run_queue(cfqd->queue); 4269 spin_unlock_irq(q->queue_lock); 4270} 4271 4272/* 4273 * Timer running if the active_queue is currently idling inside its time slice 4274 */ 4275static void cfq_idle_slice_timer(unsigned long data) 4276{ 4277 struct cfq_data *cfqd = (struct cfq_data *) data; 4278 struct cfq_queue *cfqq; 4279 unsigned long flags; 4280 int timed_out = 1; 4281 4282 cfq_log(cfqd, "idle timer fired"); 4283 4284 spin_lock_irqsave(cfqd->queue->queue_lock, flags); 4285 4286 cfqq = cfqd->active_queue; 4287 if (cfqq) { 4288 timed_out = 0; 4289 4290 /* 4291 * We saw a request before the queue expired, let it through 4292 */ 4293 if (cfq_cfqq_must_dispatch(cfqq)) 4294 goto out_kick; 4295 4296 /* 4297 * expired 4298 */ 4299 if (cfq_slice_used(cfqq)) 4300 goto expire; 4301 4302 /* 4303 * only expire and reinvoke request handler, if there are 4304 * other queues with pending requests 4305 */ 4306 if (!cfqd->busy_queues) 4307 goto out_cont; 4308 4309 /* 4310 * not expired and it has a request pending, let it dispatch 4311 */ 4312 if (!RB_EMPTY_ROOT(&cfqq->sort_list)) 4313 goto out_kick; 4314 4315 /* 4316 * Queue depth flag is reset only when the idle didn't succeed 4317 */ 4318 cfq_clear_cfqq_deep(cfqq); 4319 } 4320expire: 4321 cfq_slice_expired(cfqd, timed_out); 4322out_kick: 4323 cfq_schedule_dispatch(cfqd); 4324out_cont: 4325 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); 4326} 4327 4328static void cfq_shutdown_timer_wq(struct cfq_data *cfqd) 4329{ 4330 del_timer_sync(&cfqd->idle_slice_timer); 4331 cancel_work_sync(&cfqd->unplug_work); 4332} 4333 4334static void cfq_put_async_queues(struct cfq_data *cfqd) 4335{ 4336 int i; 4337 4338 for (i = 0; i < IOPRIO_BE_NR; i++) { 4339 if (cfqd->async_cfqq[0][i]) 4340 cfq_put_queue(cfqd->async_cfqq[0][i]); 4341 if (cfqd->async_cfqq[1][i]) 4342 cfq_put_queue(cfqd->async_cfqq[1][i]); 4343 } 4344 4345 if (cfqd->async_idle_cfqq) 4346 cfq_put_queue(cfqd->async_idle_cfqq); 4347} 4348 4349static void cfq_exit_queue(struct elevator_queue *e) 4350{ 4351 struct cfq_data *cfqd = e->elevator_data; 4352 struct request_queue *q = cfqd->queue; 4353 4354 cfq_shutdown_timer_wq(cfqd); 4355 4356 spin_lock_irq(q->queue_lock); 4357 4358 if (cfqd->active_queue) 4359 __cfq_slice_expired(cfqd, cfqd->active_queue, 0); 4360 4361 cfq_put_async_queues(cfqd); 4362 4363 spin_unlock_irq(q->queue_lock); 4364 4365 cfq_shutdown_timer_wq(cfqd); 4366 4367#ifdef CONFIG_CFQ_GROUP_IOSCHED 4368 blkcg_deactivate_policy(q, &blkcg_policy_cfq); 4369#else 4370 kfree(cfqd->root_group); 4371#endif 4372 kfree(cfqd); 4373} 4374 4375static int cfq_init_queue(struct request_queue *q, struct elevator_type *e) 4376{ 4377 struct cfq_data *cfqd; 4378 struct blkcg_gq *blkg __maybe_unused; 4379 int i, ret; 4380 struct elevator_queue *eq; 4381 4382 eq = elevator_alloc(q, e); 4383 if (!eq) 4384 return -ENOMEM; 4385 4386 cfqd = kzalloc_node(sizeof(*cfqd), GFP_KERNEL, q->node); 4387 if (!cfqd) { 4388 kobject_put(&eq->kobj); 4389 return -ENOMEM; 4390 } 4391 eq->elevator_data = cfqd; 4392 4393 cfqd->queue = q; 4394 spin_lock_irq(q->queue_lock); 4395 q->elevator = eq; 4396 spin_unlock_irq(q->queue_lock); 4397 4398 /* Init root service tree */ 4399 cfqd->grp_service_tree = CFQ_RB_ROOT; 4400 4401 /* Init root group and prefer root group over other groups by default */ 4402#ifdef CONFIG_CFQ_GROUP_IOSCHED 4403 ret = blkcg_activate_policy(q, &blkcg_policy_cfq); 4404 if (ret) 4405 goto out_free; 4406 4407 cfqd->root_group = blkg_to_cfqg(q->root_blkg); 4408#else 4409 ret = -ENOMEM; 4410 cfqd->root_group = kzalloc_node(sizeof(*cfqd->root_group), 4411 GFP_KERNEL, cfqd->queue->node); 4412 if (!cfqd->root_group) 4413 goto out_free; 4414 4415 cfq_init_cfqg_base(cfqd->root_group); 4416#endif 4417 cfqd->root_group->weight = 2 * CFQ_WEIGHT_DEFAULT; 4418 cfqd->root_group->leaf_weight = 2 * CFQ_WEIGHT_DEFAULT; 4419 4420 /* 4421 * Not strictly needed (since RB_ROOT just clears the node and we 4422 * zeroed cfqd on alloc), but better be safe in case someone decides 4423 * to add magic to the rb code 4424 */ 4425 for (i = 0; i < CFQ_PRIO_LISTS; i++) 4426 cfqd->prio_trees[i] = RB_ROOT; 4427 4428 /* 4429 * Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues. 4430 * Grab a permanent reference to it, so that the normal code flow 4431 * will not attempt to free it. oom_cfqq is linked to root_group 4432 * but shouldn't hold a reference as it'll never be unlinked. Lose 4433 * the reference from linking right away. 4434 */ 4435 cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0); 4436 cfqd->oom_cfqq.ref++; 4437 4438 spin_lock_irq(q->queue_lock); 4439 cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, cfqd->root_group); 4440 cfqg_put(cfqd->root_group); 4441 spin_unlock_irq(q->queue_lock); 4442 4443 init_timer(&cfqd->idle_slice_timer); 4444 cfqd->idle_slice_timer.function = cfq_idle_slice_timer; 4445 cfqd->idle_slice_timer.data = (unsigned long) cfqd; 4446 4447 INIT_WORK(&cfqd->unplug_work, cfq_kick_queue); 4448 4449 cfqd->cfq_quantum = cfq_quantum; 4450 cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0]; 4451 cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1]; 4452 cfqd->cfq_back_max = cfq_back_max; 4453 cfqd->cfq_back_penalty = cfq_back_penalty; 4454 cfqd->cfq_slice[0] = cfq_slice_async; 4455 cfqd->cfq_slice[1] = cfq_slice_sync; 4456 cfqd->cfq_target_latency = cfq_target_latency; 4457 cfqd->cfq_slice_async_rq = cfq_slice_async_rq; 4458 cfqd->cfq_slice_idle = cfq_slice_idle; 4459 cfqd->cfq_group_idle = cfq_group_idle; 4460 cfqd->cfq_latency = 1; 4461 cfqd->hw_tag = -1; 4462 /* 4463 * we optimistically start assuming sync ops weren't delayed in last 4464 * second, in order to have larger depth for async operations. 4465 */ 4466 cfqd->last_delayed_sync = jiffies - HZ; 4467 return 0; 4468 4469out_free: 4470 kfree(cfqd); 4471 kobject_put(&eq->kobj); 4472 return ret; 4473} 4474 4475/* 4476 * sysfs parts below --> 4477 */ 4478static ssize_t 4479cfq_var_show(unsigned int var, char *page) 4480{ 4481 return sprintf(page, "%d\n", var); 4482} 4483 4484static ssize_t 4485cfq_var_store(unsigned int *var, const char *page, size_t count) 4486{ 4487 char *p = (char *) page; 4488 4489 *var = simple_strtoul(p, &p, 10); 4490 return count; 4491} 4492 4493#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ 4494static ssize_t __FUNC(struct elevator_queue *e, char *page) \ 4495{ \ 4496 struct cfq_data *cfqd = e->elevator_data; \ 4497 unsigned int __data = __VAR; \ 4498 if (__CONV) \ 4499 __data = jiffies_to_msecs(__data); \ 4500 return cfq_var_show(__data, (page)); \ 4501} 4502SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0); 4503SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1); 4504SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1); 4505SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0); 4506SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0); 4507SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1); 4508SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1); 4509SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1); 4510SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); 4511SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0); 4512SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0); 4513SHOW_FUNCTION(cfq_target_latency_show, cfqd->cfq_target_latency, 1); 4514#undef SHOW_FUNCTION 4515 4516#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ 4517static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \ 4518{ \ 4519 struct cfq_data *cfqd = e->elevator_data; \ 4520 unsigned int __data; \ 4521 int ret = cfq_var_store(&__data, (page), count); \ 4522 if (__data < (MIN)) \ 4523 __data = (MIN); \ 4524 else if (__data > (MAX)) \ 4525 __data = (MAX); \ 4526 if (__CONV) \ 4527 *(__PTR) = msecs_to_jiffies(__data); \ 4528 else \ 4529 *(__PTR) = __data; \ 4530 return ret; \ 4531} 4532STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0); 4533STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, 4534 UINT_MAX, 1); 4535STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1, 4536 UINT_MAX, 1); 4537STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0); 4538STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1, 4539 UINT_MAX, 0); 4540STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1); 4541STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1); 4542STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1); 4543STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); 4544STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, 4545 UINT_MAX, 0); 4546STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0); 4547STORE_FUNCTION(cfq_target_latency_store, &cfqd->cfq_target_latency, 1, UINT_MAX, 1); 4548#undef STORE_FUNCTION 4549 4550#define CFQ_ATTR(name) \ 4551 __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store) 4552 4553static struct elv_fs_entry cfq_attrs[] = { 4554 CFQ_ATTR(quantum), 4555 CFQ_ATTR(fifo_expire_sync), 4556 CFQ_ATTR(fifo_expire_async), 4557 CFQ_ATTR(back_seek_max), 4558 CFQ_ATTR(back_seek_penalty), 4559 CFQ_ATTR(slice_sync), 4560 CFQ_ATTR(slice_async), 4561 CFQ_ATTR(slice_async_rq), 4562 CFQ_ATTR(slice_idle), 4563 CFQ_ATTR(group_idle), 4564 CFQ_ATTR(low_latency), 4565 CFQ_ATTR(target_latency), 4566 __ATTR_NULL 4567}; 4568 4569static struct elevator_type iosched_cfq = { 4570 .ops = { 4571 .elevator_merge_fn = cfq_merge, 4572 .elevator_merged_fn = cfq_merged_request, 4573 .elevator_merge_req_fn = cfq_merged_requests, 4574 .elevator_allow_merge_fn = cfq_allow_merge, 4575 .elevator_bio_merged_fn = cfq_bio_merged, 4576 .elevator_dispatch_fn = cfq_dispatch_requests, 4577 .elevator_add_req_fn = cfq_insert_request, 4578 .elevator_activate_req_fn = cfq_activate_request, 4579 .elevator_deactivate_req_fn = cfq_deactivate_request, 4580 .elevator_completed_req_fn = cfq_completed_request, 4581 .elevator_former_req_fn = elv_rb_former_request, 4582 .elevator_latter_req_fn = elv_rb_latter_request, 4583 .elevator_init_icq_fn = cfq_init_icq, 4584 .elevator_exit_icq_fn = cfq_exit_icq, 4585 .elevator_set_req_fn = cfq_set_request, 4586 .elevator_put_req_fn = cfq_put_request, 4587 .elevator_may_queue_fn = cfq_may_queue, 4588 .elevator_init_fn = cfq_init_queue, 4589 .elevator_exit_fn = cfq_exit_queue, 4590 }, 4591 .icq_size = sizeof(struct cfq_io_cq), 4592 .icq_align = __alignof__(struct cfq_io_cq), 4593 .elevator_attrs = cfq_attrs, 4594 .elevator_name = "cfq", 4595 .elevator_owner = THIS_MODULE, 4596}; 4597 4598#ifdef CONFIG_CFQ_GROUP_IOSCHED 4599static struct blkcg_policy blkcg_policy_cfq = { 4600 .pd_size = sizeof(struct cfq_group), 4601 .cftypes = cfq_blkcg_files, 4602 4603 .pd_init_fn = cfq_pd_init, 4604 .pd_offline_fn = cfq_pd_offline, 4605 .pd_reset_stats_fn = cfq_pd_reset_stats, 4606}; 4607#endif 4608 4609static int __init cfq_init(void) 4610{ 4611 int ret; 4612 4613 /* 4614 * could be 0 on HZ < 1000 setups 4615 */ 4616 if (!cfq_slice_async) 4617 cfq_slice_async = 1; 4618 if (!cfq_slice_idle) 4619 cfq_slice_idle = 1; 4620 4621#ifdef CONFIG_CFQ_GROUP_IOSCHED 4622 if (!cfq_group_idle) 4623 cfq_group_idle = 1; 4624 4625 ret = blkcg_policy_register(&blkcg_policy_cfq); 4626 if (ret) 4627 return ret; 4628#else 4629 cfq_group_idle = 0; 4630#endif 4631 4632 ret = -ENOMEM; 4633 cfq_pool = KMEM_CACHE(cfq_queue, 0); 4634 if (!cfq_pool) 4635 goto err_pol_unreg; 4636 4637 ret = elv_register(&iosched_cfq); 4638 if (ret) 4639 goto err_free_pool; 4640 4641 return 0; 4642 4643err_free_pool: 4644 kmem_cache_destroy(cfq_pool); 4645err_pol_unreg: 4646#ifdef CONFIG_CFQ_GROUP_IOSCHED 4647 blkcg_policy_unregister(&blkcg_policy_cfq); 4648#endif 4649 return ret; 4650} 4651 4652static void __exit cfq_exit(void) 4653{ 4654#ifdef CONFIG_CFQ_GROUP_IOSCHED 4655 blkcg_policy_unregister(&blkcg_policy_cfq); 4656#endif 4657 elv_unregister(&iosched_cfq); 4658 kmem_cache_destroy(cfq_pool); 4659} 4660 4661module_init(cfq_init); 4662module_exit(cfq_exit); 4663 4664MODULE_AUTHOR("Jens Axboe"); 4665MODULE_LICENSE("GPL"); 4666MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");