Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1#ifndef _BLK_CGROUP_H
2#define _BLK_CGROUP_H
3/*
4 * Common Block IO controller cgroup interface
5 *
6 * Based on ideas and code from CFQ, CFS and BFQ:
7 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
8 *
9 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
10 * Paolo Valente <paolo.valente@unimore.it>
11 *
12 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
13 * Nauman Rafique <nauman@google.com>
14 */
15
16#include <linux/cgroup.h>
17#include <linux/percpu_counter.h>
18#include <linux/seq_file.h>
19#include <linux/radix-tree.h>
20#include <linux/blkdev.h>
21#include <linux/atomic.h>
22
23/* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */
24#define BLKG_STAT_CPU_BATCH (INT_MAX / 2)
25
26/* Max limits for throttle policy */
27#define THROTL_IOPS_MAX UINT_MAX
28
29#ifdef CONFIG_BLK_CGROUP
30
31enum blkg_rwstat_type {
32 BLKG_RWSTAT_READ,
33 BLKG_RWSTAT_WRITE,
34 BLKG_RWSTAT_SYNC,
35 BLKG_RWSTAT_ASYNC,
36
37 BLKG_RWSTAT_NR,
38 BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
39};
40
41struct blkcg_gq;
42
43struct blkcg {
44 struct cgroup_subsys_state css;
45 spinlock_t lock;
46
47 struct radix_tree_root blkg_tree;
48 struct blkcg_gq __rcu *blkg_hint;
49 struct hlist_head blkg_list;
50
51 struct blkcg_policy_data *cpd[BLKCG_MAX_POLS];
52
53 struct list_head all_blkcgs_node;
54#ifdef CONFIG_CGROUP_WRITEBACK
55 struct list_head cgwb_list;
56#endif
57};
58
59/*
60 * blkg_[rw]stat->aux_cnt is excluded for local stats but included for
61 * recursive. Used to carry stats of dead children, and, for blkg_rwstat,
62 * to carry result values from read and sum operations.
63 */
64struct blkg_stat {
65 struct percpu_counter cpu_cnt;
66 atomic64_t aux_cnt;
67};
68
69struct blkg_rwstat {
70 struct percpu_counter cpu_cnt[BLKG_RWSTAT_NR];
71 atomic64_t aux_cnt[BLKG_RWSTAT_NR];
72};
73
74/*
75 * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
76 * request_queue (q). This is used by blkcg policies which need to track
77 * information per blkcg - q pair.
78 *
79 * There can be multiple active blkcg policies and each blkg:policy pair is
80 * represented by a blkg_policy_data which is allocated and freed by each
81 * policy's pd_alloc/free_fn() methods. A policy can allocate private data
82 * area by allocating larger data structure which embeds blkg_policy_data
83 * at the beginning.
84 */
85struct blkg_policy_data {
86 /* the blkg and policy id this per-policy data belongs to */
87 struct blkcg_gq *blkg;
88 int plid;
89};
90
91/*
92 * Policies that need to keep per-blkcg data which is independent from any
93 * request_queue associated to it should implement cpd_alloc/free_fn()
94 * methods. A policy can allocate private data area by allocating larger
95 * data structure which embeds blkcg_policy_data at the beginning.
96 * cpd_init() is invoked to let each policy handle per-blkcg data.
97 */
98struct blkcg_policy_data {
99 /* the blkcg and policy id this per-policy data belongs to */
100 struct blkcg *blkcg;
101 int plid;
102};
103
104/* association between a blk cgroup and a request queue */
105struct blkcg_gq {
106 /* Pointer to the associated request_queue */
107 struct request_queue *q;
108 struct list_head q_node;
109 struct hlist_node blkcg_node;
110 struct blkcg *blkcg;
111
112 /*
113 * Each blkg gets congested separately and the congestion state is
114 * propagated to the matching bdi_writeback_congested.
115 */
116 struct bdi_writeback_congested *wb_congested;
117
118 /* all non-root blkcg_gq's are guaranteed to have access to parent */
119 struct blkcg_gq *parent;
120
121 /* request allocation list for this blkcg-q pair */
122 struct request_list rl;
123
124 /* reference count */
125 atomic_t refcnt;
126
127 /* is this blkg online? protected by both blkcg and q locks */
128 bool online;
129
130 struct blkg_rwstat stat_bytes;
131 struct blkg_rwstat stat_ios;
132
133 struct blkg_policy_data *pd[BLKCG_MAX_POLS];
134
135 struct rcu_head rcu_head;
136};
137
138typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp);
139typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd);
140typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd);
141typedef void (blkcg_pol_bind_cpd_fn)(struct blkcg_policy_data *cpd);
142typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp, int node);
143typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd);
144typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
145typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
146typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
147typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
148
149struct blkcg_policy {
150 int plid;
151 /* cgroup files for the policy */
152 struct cftype *dfl_cftypes;
153 struct cftype *legacy_cftypes;
154
155 /* operations */
156 blkcg_pol_alloc_cpd_fn *cpd_alloc_fn;
157 blkcg_pol_init_cpd_fn *cpd_init_fn;
158 blkcg_pol_free_cpd_fn *cpd_free_fn;
159 blkcg_pol_bind_cpd_fn *cpd_bind_fn;
160
161 blkcg_pol_alloc_pd_fn *pd_alloc_fn;
162 blkcg_pol_init_pd_fn *pd_init_fn;
163 blkcg_pol_online_pd_fn *pd_online_fn;
164 blkcg_pol_offline_pd_fn *pd_offline_fn;
165 blkcg_pol_free_pd_fn *pd_free_fn;
166 blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
167};
168
169extern struct blkcg blkcg_root;
170extern struct cgroup_subsys_state * const blkcg_root_css;
171
172struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
173 struct request_queue *q, bool update_hint);
174struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
175 struct request_queue *q);
176int blkcg_init_queue(struct request_queue *q);
177void blkcg_drain_queue(struct request_queue *q);
178void blkcg_exit_queue(struct request_queue *q);
179
180/* Blkio controller policy registration */
181int blkcg_policy_register(struct blkcg_policy *pol);
182void blkcg_policy_unregister(struct blkcg_policy *pol);
183int blkcg_activate_policy(struct request_queue *q,
184 const struct blkcg_policy *pol);
185void blkcg_deactivate_policy(struct request_queue *q,
186 const struct blkcg_policy *pol);
187
188const char *blkg_dev_name(struct blkcg_gq *blkg);
189void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
190 u64 (*prfill)(struct seq_file *,
191 struct blkg_policy_data *, int),
192 const struct blkcg_policy *pol, int data,
193 bool show_total);
194u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
195u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
196 const struct blkg_rwstat *rwstat);
197u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off);
198u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
199 int off);
200int blkg_print_stat_bytes(struct seq_file *sf, void *v);
201int blkg_print_stat_ios(struct seq_file *sf, void *v);
202int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v);
203int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v);
204
205u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg,
206 struct blkcg_policy *pol, int off);
207struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg,
208 struct blkcg_policy *pol, int off);
209
210struct blkg_conf_ctx {
211 struct gendisk *disk;
212 struct blkcg_gq *blkg;
213 char *body;
214};
215
216int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
217 char *input, struct blkg_conf_ctx *ctx);
218void blkg_conf_finish(struct blkg_conf_ctx *ctx);
219
220
221static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
222{
223 return css ? container_of(css, struct blkcg, css) : NULL;
224}
225
226static inline struct blkcg *task_blkcg(struct task_struct *tsk)
227{
228 return css_to_blkcg(task_css(tsk, io_cgrp_id));
229}
230
231static inline struct blkcg *bio_blkcg(struct bio *bio)
232{
233 if (bio && bio->bi_css)
234 return css_to_blkcg(bio->bi_css);
235 return task_blkcg(current);
236}
237
238static inline struct cgroup_subsys_state *
239task_get_blkcg_css(struct task_struct *task)
240{
241 return task_get_css(task, io_cgrp_id);
242}
243
244/**
245 * blkcg_parent - get the parent of a blkcg
246 * @blkcg: blkcg of interest
247 *
248 * Return the parent blkcg of @blkcg. Can be called anytime.
249 */
250static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
251{
252 return css_to_blkcg(blkcg->css.parent);
253}
254
255/**
256 * __blkg_lookup - internal version of blkg_lookup()
257 * @blkcg: blkcg of interest
258 * @q: request_queue of interest
259 * @update_hint: whether to update lookup hint with the result or not
260 *
261 * This is internal version and shouldn't be used by policy
262 * implementations. Looks up blkgs for the @blkcg - @q pair regardless of
263 * @q's bypass state. If @update_hint is %true, the caller should be
264 * holding @q->queue_lock and lookup hint is updated on success.
265 */
266static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
267 struct request_queue *q,
268 bool update_hint)
269{
270 struct blkcg_gq *blkg;
271
272 if (blkcg == &blkcg_root)
273 return q->root_blkg;
274
275 blkg = rcu_dereference(blkcg->blkg_hint);
276 if (blkg && blkg->q == q)
277 return blkg;
278
279 return blkg_lookup_slowpath(blkcg, q, update_hint);
280}
281
282/**
283 * blkg_lookup - lookup blkg for the specified blkcg - q pair
284 * @blkcg: blkcg of interest
285 * @q: request_queue of interest
286 *
287 * Lookup blkg for the @blkcg - @q pair. This function should be called
288 * under RCU read lock and is guaranteed to return %NULL if @q is bypassing
289 * - see blk_queue_bypass_start() for details.
290 */
291static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
292 struct request_queue *q)
293{
294 WARN_ON_ONCE(!rcu_read_lock_held());
295
296 if (unlikely(blk_queue_bypass(q)))
297 return NULL;
298 return __blkg_lookup(blkcg, q, false);
299}
300
301/**
302 * blkg_to_pdata - get policy private data
303 * @blkg: blkg of interest
304 * @pol: policy of interest
305 *
306 * Return pointer to private data associated with the @blkg-@pol pair.
307 */
308static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
309 struct blkcg_policy *pol)
310{
311 return blkg ? blkg->pd[pol->plid] : NULL;
312}
313
314static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg,
315 struct blkcg_policy *pol)
316{
317 return blkcg ? blkcg->cpd[pol->plid] : NULL;
318}
319
320/**
321 * pdata_to_blkg - get blkg associated with policy private data
322 * @pd: policy private data of interest
323 *
324 * @pd is policy private data. Determine the blkg it's associated with.
325 */
326static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
327{
328 return pd ? pd->blkg : NULL;
329}
330
331static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd)
332{
333 return cpd ? cpd->blkcg : NULL;
334}
335
336/**
337 * blkg_path - format cgroup path of blkg
338 * @blkg: blkg of interest
339 * @buf: target buffer
340 * @buflen: target buffer length
341 *
342 * Format the path of the cgroup of @blkg into @buf.
343 */
344static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
345{
346 return cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
347}
348
349/**
350 * blkg_get - get a blkg reference
351 * @blkg: blkg to get
352 *
353 * The caller should be holding an existing reference.
354 */
355static inline void blkg_get(struct blkcg_gq *blkg)
356{
357 WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
358 atomic_inc(&blkg->refcnt);
359}
360
361void __blkg_release_rcu(struct rcu_head *rcu);
362
363/**
364 * blkg_put - put a blkg reference
365 * @blkg: blkg to put
366 */
367static inline void blkg_put(struct blkcg_gq *blkg)
368{
369 WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
370 if (atomic_dec_and_test(&blkg->refcnt))
371 call_rcu(&blkg->rcu_head, __blkg_release_rcu);
372}
373
374/**
375 * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
376 * @d_blkg: loop cursor pointing to the current descendant
377 * @pos_css: used for iteration
378 * @p_blkg: target blkg to walk descendants of
379 *
380 * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU
381 * read locked. If called under either blkcg or queue lock, the iteration
382 * is guaranteed to include all and only online blkgs. The caller may
383 * update @pos_css by calling css_rightmost_descendant() to skip subtree.
384 * @p_blkg is included in the iteration and the first node to be visited.
385 */
386#define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \
387 css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \
388 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
389 (p_blkg)->q, false)))
390
391/**
392 * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
393 * @d_blkg: loop cursor pointing to the current descendant
394 * @pos_css: used for iteration
395 * @p_blkg: target blkg to walk descendants of
396 *
397 * Similar to blkg_for_each_descendant_pre() but performs post-order
398 * traversal instead. Synchronization rules are the same. @p_blkg is
399 * included in the iteration and the last node to be visited.
400 */
401#define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \
402 css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \
403 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
404 (p_blkg)->q, false)))
405
406/**
407 * blk_get_rl - get request_list to use
408 * @q: request_queue of interest
409 * @bio: bio which will be attached to the allocated request (may be %NULL)
410 *
411 * The caller wants to allocate a request from @q to use for @bio. Find
412 * the request_list to use and obtain a reference on it. Should be called
413 * under queue_lock. This function is guaranteed to return non-%NULL
414 * request_list.
415 */
416static inline struct request_list *blk_get_rl(struct request_queue *q,
417 struct bio *bio)
418{
419 struct blkcg *blkcg;
420 struct blkcg_gq *blkg;
421
422 rcu_read_lock();
423
424 blkcg = bio_blkcg(bio);
425
426 /* bypass blkg lookup and use @q->root_rl directly for root */
427 if (blkcg == &blkcg_root)
428 goto root_rl;
429
430 /*
431 * Try to use blkg->rl. blkg lookup may fail under memory pressure
432 * or if either the blkcg or queue is going away. Fall back to
433 * root_rl in such cases.
434 */
435 blkg = blkg_lookup(blkcg, q);
436 if (unlikely(!blkg))
437 goto root_rl;
438
439 blkg_get(blkg);
440 rcu_read_unlock();
441 return &blkg->rl;
442root_rl:
443 rcu_read_unlock();
444 return &q->root_rl;
445}
446
447/**
448 * blk_put_rl - put request_list
449 * @rl: request_list to put
450 *
451 * Put the reference acquired by blk_get_rl(). Should be called under
452 * queue_lock.
453 */
454static inline void blk_put_rl(struct request_list *rl)
455{
456 if (rl->blkg->blkcg != &blkcg_root)
457 blkg_put(rl->blkg);
458}
459
460/**
461 * blk_rq_set_rl - associate a request with a request_list
462 * @rq: request of interest
463 * @rl: target request_list
464 *
465 * Associate @rq with @rl so that accounting and freeing can know the
466 * request_list @rq came from.
467 */
468static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl)
469{
470 rq->rl = rl;
471}
472
473/**
474 * blk_rq_rl - return the request_list a request came from
475 * @rq: request of interest
476 *
477 * Return the request_list @rq is allocated from.
478 */
479static inline struct request_list *blk_rq_rl(struct request *rq)
480{
481 return rq->rl;
482}
483
484struct request_list *__blk_queue_next_rl(struct request_list *rl,
485 struct request_queue *q);
486/**
487 * blk_queue_for_each_rl - iterate through all request_lists of a request_queue
488 *
489 * Should be used under queue_lock.
490 */
491#define blk_queue_for_each_rl(rl, q) \
492 for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q)))
493
494static inline int blkg_stat_init(struct blkg_stat *stat, gfp_t gfp)
495{
496 int ret;
497
498 ret = percpu_counter_init(&stat->cpu_cnt, 0, gfp);
499 if (ret)
500 return ret;
501
502 atomic64_set(&stat->aux_cnt, 0);
503 return 0;
504}
505
506static inline void blkg_stat_exit(struct blkg_stat *stat)
507{
508 percpu_counter_destroy(&stat->cpu_cnt);
509}
510
511/**
512 * blkg_stat_add - add a value to a blkg_stat
513 * @stat: target blkg_stat
514 * @val: value to add
515 *
516 * Add @val to @stat. The caller must ensure that IRQ on the same CPU
517 * don't re-enter this function for the same counter.
518 */
519static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
520{
521 percpu_counter_add_batch(&stat->cpu_cnt, val, BLKG_STAT_CPU_BATCH);
522}
523
524/**
525 * blkg_stat_read - read the current value of a blkg_stat
526 * @stat: blkg_stat to read
527 */
528static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
529{
530 return percpu_counter_sum_positive(&stat->cpu_cnt);
531}
532
533/**
534 * blkg_stat_reset - reset a blkg_stat
535 * @stat: blkg_stat to reset
536 */
537static inline void blkg_stat_reset(struct blkg_stat *stat)
538{
539 percpu_counter_set(&stat->cpu_cnt, 0);
540 atomic64_set(&stat->aux_cnt, 0);
541}
542
543/**
544 * blkg_stat_add_aux - add a blkg_stat into another's aux count
545 * @to: the destination blkg_stat
546 * @from: the source
547 *
548 * Add @from's count including the aux one to @to's aux count.
549 */
550static inline void blkg_stat_add_aux(struct blkg_stat *to,
551 struct blkg_stat *from)
552{
553 atomic64_add(blkg_stat_read(from) + atomic64_read(&from->aux_cnt),
554 &to->aux_cnt);
555}
556
557static inline int blkg_rwstat_init(struct blkg_rwstat *rwstat, gfp_t gfp)
558{
559 int i, ret;
560
561 for (i = 0; i < BLKG_RWSTAT_NR; i++) {
562 ret = percpu_counter_init(&rwstat->cpu_cnt[i], 0, gfp);
563 if (ret) {
564 while (--i >= 0)
565 percpu_counter_destroy(&rwstat->cpu_cnt[i]);
566 return ret;
567 }
568 atomic64_set(&rwstat->aux_cnt[i], 0);
569 }
570 return 0;
571}
572
573static inline void blkg_rwstat_exit(struct blkg_rwstat *rwstat)
574{
575 int i;
576
577 for (i = 0; i < BLKG_RWSTAT_NR; i++)
578 percpu_counter_destroy(&rwstat->cpu_cnt[i]);
579}
580
581/**
582 * blkg_rwstat_add - add a value to a blkg_rwstat
583 * @rwstat: target blkg_rwstat
584 * @op: REQ_OP and flags
585 * @val: value to add
586 *
587 * Add @val to @rwstat. The counters are chosen according to @rw. The
588 * caller is responsible for synchronizing calls to this function.
589 */
590static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
591 unsigned int op, uint64_t val)
592{
593 struct percpu_counter *cnt;
594
595 if (op_is_write(op))
596 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_WRITE];
597 else
598 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_READ];
599
600 percpu_counter_add_batch(cnt, val, BLKG_STAT_CPU_BATCH);
601
602 if (op_is_sync(op))
603 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC];
604 else
605 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC];
606
607 percpu_counter_add_batch(cnt, val, BLKG_STAT_CPU_BATCH);
608}
609
610/**
611 * blkg_rwstat_read - read the current values of a blkg_rwstat
612 * @rwstat: blkg_rwstat to read
613 *
614 * Read the current snapshot of @rwstat and return it in the aux counts.
615 */
616static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
617{
618 struct blkg_rwstat result;
619 int i;
620
621 for (i = 0; i < BLKG_RWSTAT_NR; i++)
622 atomic64_set(&result.aux_cnt[i],
623 percpu_counter_sum_positive(&rwstat->cpu_cnt[i]));
624 return result;
625}
626
627/**
628 * blkg_rwstat_total - read the total count of a blkg_rwstat
629 * @rwstat: blkg_rwstat to read
630 *
631 * Return the total count of @rwstat regardless of the IO direction. This
632 * function can be called without synchronization and takes care of u64
633 * atomicity.
634 */
635static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
636{
637 struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
638
639 return atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_READ]) +
640 atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]);
641}
642
643/**
644 * blkg_rwstat_reset - reset a blkg_rwstat
645 * @rwstat: blkg_rwstat to reset
646 */
647static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
648{
649 int i;
650
651 for (i = 0; i < BLKG_RWSTAT_NR; i++) {
652 percpu_counter_set(&rwstat->cpu_cnt[i], 0);
653 atomic64_set(&rwstat->aux_cnt[i], 0);
654 }
655}
656
657/**
658 * blkg_rwstat_add_aux - add a blkg_rwstat into another's aux count
659 * @to: the destination blkg_rwstat
660 * @from: the source
661 *
662 * Add @from's count including the aux one to @to's aux count.
663 */
664static inline void blkg_rwstat_add_aux(struct blkg_rwstat *to,
665 struct blkg_rwstat *from)
666{
667 struct blkg_rwstat v = blkg_rwstat_read(from);
668 int i;
669
670 for (i = 0; i < BLKG_RWSTAT_NR; i++)
671 atomic64_add(atomic64_read(&v.aux_cnt[i]) +
672 atomic64_read(&from->aux_cnt[i]),
673 &to->aux_cnt[i]);
674}
675
676#ifdef CONFIG_BLK_DEV_THROTTLING
677extern bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
678 struct bio *bio);
679#else
680static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
681 struct bio *bio) { return false; }
682#endif
683
684static inline bool blkcg_bio_issue_check(struct request_queue *q,
685 struct bio *bio)
686{
687 struct blkcg *blkcg;
688 struct blkcg_gq *blkg;
689 bool throtl = false;
690
691 rcu_read_lock();
692 blkcg = bio_blkcg(bio);
693
694 blkg = blkg_lookup(blkcg, q);
695 if (unlikely(!blkg)) {
696 spin_lock_irq(q->queue_lock);
697 blkg = blkg_lookup_create(blkcg, q);
698 if (IS_ERR(blkg))
699 blkg = NULL;
700 spin_unlock_irq(q->queue_lock);
701 }
702
703 throtl = blk_throtl_bio(q, blkg, bio);
704
705 if (!throtl) {
706 blkg = blkg ?: q->root_blkg;
707 blkg_rwstat_add(&blkg->stat_bytes, bio->bi_opf,
708 bio->bi_iter.bi_size);
709 blkg_rwstat_add(&blkg->stat_ios, bio->bi_opf, 1);
710 }
711
712 rcu_read_unlock();
713 return !throtl;
714}
715
716#else /* CONFIG_BLK_CGROUP */
717
718struct blkcg {
719};
720
721struct blkg_policy_data {
722};
723
724struct blkcg_policy_data {
725};
726
727struct blkcg_gq {
728};
729
730struct blkcg_policy {
731};
732
733#define blkcg_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
734
735static inline struct cgroup_subsys_state *
736task_get_blkcg_css(struct task_struct *task)
737{
738 return NULL;
739}
740
741#ifdef CONFIG_BLOCK
742
743static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
744static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
745static inline void blkcg_drain_queue(struct request_queue *q) { }
746static inline void blkcg_exit_queue(struct request_queue *q) { }
747static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
748static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
749static inline int blkcg_activate_policy(struct request_queue *q,
750 const struct blkcg_policy *pol) { return 0; }
751static inline void blkcg_deactivate_policy(struct request_queue *q,
752 const struct blkcg_policy *pol) { }
753
754static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
755
756static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
757 struct blkcg_policy *pol) { return NULL; }
758static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
759static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
760static inline void blkg_get(struct blkcg_gq *blkg) { }
761static inline void blkg_put(struct blkcg_gq *blkg) { }
762
763static inline struct request_list *blk_get_rl(struct request_queue *q,
764 struct bio *bio) { return &q->root_rl; }
765static inline void blk_put_rl(struct request_list *rl) { }
766static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
767static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }
768
769static inline bool blkcg_bio_issue_check(struct request_queue *q,
770 struct bio *bio) { return true; }
771
772#define blk_queue_for_each_rl(rl, q) \
773 for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
774
775#endif /* CONFIG_BLOCK */
776#endif /* CONFIG_BLK_CGROUP */
777#endif /* _BLK_CGROUP_H */