Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _BLK_CGROUP_H
3#define _BLK_CGROUP_H
4/*
5 * Common Block IO controller cgroup interface
6 *
7 * Based on ideas and code from CFQ, CFS and BFQ:
8 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
9 *
10 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
11 * Paolo Valente <paolo.valente@unimore.it>
12 *
13 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
14 * Nauman Rafique <nauman@google.com>
15 */
16
17#include <linux/cgroup.h>
18#include <linux/percpu_counter.h>
19#include <linux/seq_file.h>
20#include <linux/radix-tree.h>
21#include <linux/blkdev.h>
22#include <linux/atomic.h>
23
24/* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */
25#define BLKG_STAT_CPU_BATCH (INT_MAX / 2)
26
27/* Max limits for throttle policy */
28#define THROTL_IOPS_MAX UINT_MAX
29
30#ifdef CONFIG_BLK_CGROUP
31
32enum blkg_rwstat_type {
33 BLKG_RWSTAT_READ,
34 BLKG_RWSTAT_WRITE,
35 BLKG_RWSTAT_SYNC,
36 BLKG_RWSTAT_ASYNC,
37
38 BLKG_RWSTAT_NR,
39 BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
40};
41
42struct blkcg_gq;
43
44struct blkcg {
45 struct cgroup_subsys_state css;
46 spinlock_t lock;
47
48 struct radix_tree_root blkg_tree;
49 struct blkcg_gq __rcu *blkg_hint;
50 struct hlist_head blkg_list;
51
52 struct blkcg_policy_data *cpd[BLKCG_MAX_POLS];
53
54 struct list_head all_blkcgs_node;
55#ifdef CONFIG_CGROUP_WRITEBACK
56 struct list_head cgwb_list;
57#endif
58};
59
60/*
61 * blkg_[rw]stat->aux_cnt is excluded for local stats but included for
62 * recursive. Used to carry stats of dead children, and, for blkg_rwstat,
63 * to carry result values from read and sum operations.
64 */
65struct blkg_stat {
66 struct percpu_counter cpu_cnt;
67 atomic64_t aux_cnt;
68};
69
70struct blkg_rwstat {
71 struct percpu_counter cpu_cnt[BLKG_RWSTAT_NR];
72 atomic64_t aux_cnt[BLKG_RWSTAT_NR];
73};
74
75/*
76 * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
77 * request_queue (q). This is used by blkcg policies which need to track
78 * information per blkcg - q pair.
79 *
80 * There can be multiple active blkcg policies and each blkg:policy pair is
81 * represented by a blkg_policy_data which is allocated and freed by each
82 * policy's pd_alloc/free_fn() methods. A policy can allocate private data
83 * area by allocating larger data structure which embeds blkg_policy_data
84 * at the beginning.
85 */
86struct blkg_policy_data {
87 /* the blkg and policy id this per-policy data belongs to */
88 struct blkcg_gq *blkg;
89 int plid;
90};
91
92/*
93 * Policies that need to keep per-blkcg data which is independent from any
94 * request_queue associated to it should implement cpd_alloc/free_fn()
95 * methods. A policy can allocate private data area by allocating larger
96 * data structure which embeds blkcg_policy_data at the beginning.
97 * cpd_init() is invoked to let each policy handle per-blkcg data.
98 */
99struct blkcg_policy_data {
100 /* the blkcg and policy id this per-policy data belongs to */
101 struct blkcg *blkcg;
102 int plid;
103};
104
105/* association between a blk cgroup and a request queue */
106struct blkcg_gq {
107 /* Pointer to the associated request_queue */
108 struct request_queue *q;
109 struct list_head q_node;
110 struct hlist_node blkcg_node;
111 struct blkcg *blkcg;
112
113 /*
114 * Each blkg gets congested separately and the congestion state is
115 * propagated to the matching bdi_writeback_congested.
116 */
117 struct bdi_writeback_congested *wb_congested;
118
119 /* all non-root blkcg_gq's are guaranteed to have access to parent */
120 struct blkcg_gq *parent;
121
122 /* request allocation list for this blkcg-q pair */
123 struct request_list rl;
124
125 /* reference count */
126 atomic_t refcnt;
127
128 /* is this blkg online? protected by both blkcg and q locks */
129 bool online;
130
131 struct blkg_rwstat stat_bytes;
132 struct blkg_rwstat stat_ios;
133
134 struct blkg_policy_data *pd[BLKCG_MAX_POLS];
135
136 struct rcu_head rcu_head;
137};
138
139typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp);
140typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd);
141typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd);
142typedef void (blkcg_pol_bind_cpd_fn)(struct blkcg_policy_data *cpd);
143typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp, int node);
144typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd);
145typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
146typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
147typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
148typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
149
150struct blkcg_policy {
151 int plid;
152 /* cgroup files for the policy */
153 struct cftype *dfl_cftypes;
154 struct cftype *legacy_cftypes;
155
156 /* operations */
157 blkcg_pol_alloc_cpd_fn *cpd_alloc_fn;
158 blkcg_pol_init_cpd_fn *cpd_init_fn;
159 blkcg_pol_free_cpd_fn *cpd_free_fn;
160 blkcg_pol_bind_cpd_fn *cpd_bind_fn;
161
162 blkcg_pol_alloc_pd_fn *pd_alloc_fn;
163 blkcg_pol_init_pd_fn *pd_init_fn;
164 blkcg_pol_online_pd_fn *pd_online_fn;
165 blkcg_pol_offline_pd_fn *pd_offline_fn;
166 blkcg_pol_free_pd_fn *pd_free_fn;
167 blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
168};
169
170extern struct blkcg blkcg_root;
171extern struct cgroup_subsys_state * const blkcg_root_css;
172
173struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
174 struct request_queue *q, bool update_hint);
175struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
176 struct request_queue *q);
177int blkcg_init_queue(struct request_queue *q);
178void blkcg_drain_queue(struct request_queue *q);
179void blkcg_exit_queue(struct request_queue *q);
180
181/* Blkio controller policy registration */
182int blkcg_policy_register(struct blkcg_policy *pol);
183void blkcg_policy_unregister(struct blkcg_policy *pol);
184int blkcg_activate_policy(struct request_queue *q,
185 const struct blkcg_policy *pol);
186void blkcg_deactivate_policy(struct request_queue *q,
187 const struct blkcg_policy *pol);
188
189const char *blkg_dev_name(struct blkcg_gq *blkg);
190void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
191 u64 (*prfill)(struct seq_file *,
192 struct blkg_policy_data *, int),
193 const struct blkcg_policy *pol, int data,
194 bool show_total);
195u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
196u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
197 const struct blkg_rwstat *rwstat);
198u64 blkg_prfill_stat(struct seq_file *sf, struct blkg_policy_data *pd, int off);
199u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
200 int off);
201int blkg_print_stat_bytes(struct seq_file *sf, void *v);
202int blkg_print_stat_ios(struct seq_file *sf, void *v);
203int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v);
204int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v);
205
206u64 blkg_stat_recursive_sum(struct blkcg_gq *blkg,
207 struct blkcg_policy *pol, int off);
208struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkcg_gq *blkg,
209 struct blkcg_policy *pol, int off);
210
211struct blkg_conf_ctx {
212 struct gendisk *disk;
213 struct blkcg_gq *blkg;
214 char *body;
215};
216
217int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
218 char *input, struct blkg_conf_ctx *ctx);
219void blkg_conf_finish(struct blkg_conf_ctx *ctx);
220
221
222static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
223{
224 return css ? container_of(css, struct blkcg, css) : NULL;
225}
226
227static inline struct blkcg *task_blkcg(struct task_struct *tsk)
228{
229 return css_to_blkcg(task_css(tsk, io_cgrp_id));
230}
231
232static inline struct blkcg *bio_blkcg(struct bio *bio)
233{
234 if (bio && bio->bi_css)
235 return css_to_blkcg(bio->bi_css);
236 return task_blkcg(current);
237}
238
239static inline struct cgroup_subsys_state *
240task_get_blkcg_css(struct task_struct *task)
241{
242 return task_get_css(task, io_cgrp_id);
243}
244
245/**
246 * blkcg_parent - get the parent of a blkcg
247 * @blkcg: blkcg of interest
248 *
249 * Return the parent blkcg of @blkcg. Can be called anytime.
250 */
251static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
252{
253 return css_to_blkcg(blkcg->css.parent);
254}
255
256/**
257 * __blkg_lookup - internal version of blkg_lookup()
258 * @blkcg: blkcg of interest
259 * @q: request_queue of interest
260 * @update_hint: whether to update lookup hint with the result or not
261 *
262 * This is internal version and shouldn't be used by policy
263 * implementations. Looks up blkgs for the @blkcg - @q pair regardless of
264 * @q's bypass state. If @update_hint is %true, the caller should be
265 * holding @q->queue_lock and lookup hint is updated on success.
266 */
267static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
268 struct request_queue *q,
269 bool update_hint)
270{
271 struct blkcg_gq *blkg;
272
273 if (blkcg == &blkcg_root)
274 return q->root_blkg;
275
276 blkg = rcu_dereference(blkcg->blkg_hint);
277 if (blkg && blkg->q == q)
278 return blkg;
279
280 return blkg_lookup_slowpath(blkcg, q, update_hint);
281}
282
283/**
284 * blkg_lookup - lookup blkg for the specified blkcg - q pair
285 * @blkcg: blkcg of interest
286 * @q: request_queue of interest
287 *
288 * Lookup blkg for the @blkcg - @q pair. This function should be called
289 * under RCU read lock and is guaranteed to return %NULL if @q is bypassing
290 * - see blk_queue_bypass_start() for details.
291 */
292static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
293 struct request_queue *q)
294{
295 WARN_ON_ONCE(!rcu_read_lock_held());
296
297 if (unlikely(blk_queue_bypass(q)))
298 return NULL;
299 return __blkg_lookup(blkcg, q, false);
300}
301
302/**
303 * blkg_to_pdata - get policy private data
304 * @blkg: blkg of interest
305 * @pol: policy of interest
306 *
307 * Return pointer to private data associated with the @blkg-@pol pair.
308 */
309static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
310 struct blkcg_policy *pol)
311{
312 return blkg ? blkg->pd[pol->plid] : NULL;
313}
314
315static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg,
316 struct blkcg_policy *pol)
317{
318 return blkcg ? blkcg->cpd[pol->plid] : NULL;
319}
320
321/**
322 * pdata_to_blkg - get blkg associated with policy private data
323 * @pd: policy private data of interest
324 *
325 * @pd is policy private data. Determine the blkg it's associated with.
326 */
327static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
328{
329 return pd ? pd->blkg : NULL;
330}
331
332static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd)
333{
334 return cpd ? cpd->blkcg : NULL;
335}
336
337/**
338 * blkg_path - format cgroup path of blkg
339 * @blkg: blkg of interest
340 * @buf: target buffer
341 * @buflen: target buffer length
342 *
343 * Format the path of the cgroup of @blkg into @buf.
344 */
345static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
346{
347 return cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
348}
349
350/**
351 * blkg_get - get a blkg reference
352 * @blkg: blkg to get
353 *
354 * The caller should be holding an existing reference.
355 */
356static inline void blkg_get(struct blkcg_gq *blkg)
357{
358 WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
359 atomic_inc(&blkg->refcnt);
360}
361
362void __blkg_release_rcu(struct rcu_head *rcu);
363
364/**
365 * blkg_put - put a blkg reference
366 * @blkg: blkg to put
367 */
368static inline void blkg_put(struct blkcg_gq *blkg)
369{
370 WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
371 if (atomic_dec_and_test(&blkg->refcnt))
372 call_rcu(&blkg->rcu_head, __blkg_release_rcu);
373}
374
375/**
376 * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
377 * @d_blkg: loop cursor pointing to the current descendant
378 * @pos_css: used for iteration
379 * @p_blkg: target blkg to walk descendants of
380 *
381 * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU
382 * read locked. If called under either blkcg or queue lock, the iteration
383 * is guaranteed to include all and only online blkgs. The caller may
384 * update @pos_css by calling css_rightmost_descendant() to skip subtree.
385 * @p_blkg is included in the iteration and the first node to be visited.
386 */
387#define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \
388 css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \
389 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
390 (p_blkg)->q, false)))
391
392/**
393 * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
394 * @d_blkg: loop cursor pointing to the current descendant
395 * @pos_css: used for iteration
396 * @p_blkg: target blkg to walk descendants of
397 *
398 * Similar to blkg_for_each_descendant_pre() but performs post-order
399 * traversal instead. Synchronization rules are the same. @p_blkg is
400 * included in the iteration and the last node to be visited.
401 */
402#define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \
403 css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \
404 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
405 (p_blkg)->q, false)))
406
407/**
408 * blk_get_rl - get request_list to use
409 * @q: request_queue of interest
410 * @bio: bio which will be attached to the allocated request (may be %NULL)
411 *
412 * The caller wants to allocate a request from @q to use for @bio. Find
413 * the request_list to use and obtain a reference on it. Should be called
414 * under queue_lock. This function is guaranteed to return non-%NULL
415 * request_list.
416 */
417static inline struct request_list *blk_get_rl(struct request_queue *q,
418 struct bio *bio)
419{
420 struct blkcg *blkcg;
421 struct blkcg_gq *blkg;
422
423 rcu_read_lock();
424
425 blkcg = bio_blkcg(bio);
426
427 /* bypass blkg lookup and use @q->root_rl directly for root */
428 if (blkcg == &blkcg_root)
429 goto root_rl;
430
431 /*
432 * Try to use blkg->rl. blkg lookup may fail under memory pressure
433 * or if either the blkcg or queue is going away. Fall back to
434 * root_rl in such cases.
435 */
436 blkg = blkg_lookup(blkcg, q);
437 if (unlikely(!blkg))
438 goto root_rl;
439
440 blkg_get(blkg);
441 rcu_read_unlock();
442 return &blkg->rl;
443root_rl:
444 rcu_read_unlock();
445 return &q->root_rl;
446}
447
448/**
449 * blk_put_rl - put request_list
450 * @rl: request_list to put
451 *
452 * Put the reference acquired by blk_get_rl(). Should be called under
453 * queue_lock.
454 */
455static inline void blk_put_rl(struct request_list *rl)
456{
457 if (rl->blkg->blkcg != &blkcg_root)
458 blkg_put(rl->blkg);
459}
460
461/**
462 * blk_rq_set_rl - associate a request with a request_list
463 * @rq: request of interest
464 * @rl: target request_list
465 *
466 * Associate @rq with @rl so that accounting and freeing can know the
467 * request_list @rq came from.
468 */
469static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl)
470{
471 rq->rl = rl;
472}
473
474/**
475 * blk_rq_rl - return the request_list a request came from
476 * @rq: request of interest
477 *
478 * Return the request_list @rq is allocated from.
479 */
480static inline struct request_list *blk_rq_rl(struct request *rq)
481{
482 return rq->rl;
483}
484
485struct request_list *__blk_queue_next_rl(struct request_list *rl,
486 struct request_queue *q);
487/**
488 * blk_queue_for_each_rl - iterate through all request_lists of a request_queue
489 *
490 * Should be used under queue_lock.
491 */
492#define blk_queue_for_each_rl(rl, q) \
493 for ((rl) = &(q)->root_rl; (rl); (rl) = __blk_queue_next_rl((rl), (q)))
494
495static inline int blkg_stat_init(struct blkg_stat *stat, gfp_t gfp)
496{
497 int ret;
498
499 ret = percpu_counter_init(&stat->cpu_cnt, 0, gfp);
500 if (ret)
501 return ret;
502
503 atomic64_set(&stat->aux_cnt, 0);
504 return 0;
505}
506
507static inline void blkg_stat_exit(struct blkg_stat *stat)
508{
509 percpu_counter_destroy(&stat->cpu_cnt);
510}
511
512/**
513 * blkg_stat_add - add a value to a blkg_stat
514 * @stat: target blkg_stat
515 * @val: value to add
516 *
517 * Add @val to @stat. The caller must ensure that IRQ on the same CPU
518 * don't re-enter this function for the same counter.
519 */
520static inline void blkg_stat_add(struct blkg_stat *stat, uint64_t val)
521{
522 percpu_counter_add_batch(&stat->cpu_cnt, val, BLKG_STAT_CPU_BATCH);
523}
524
525/**
526 * blkg_stat_read - read the current value of a blkg_stat
527 * @stat: blkg_stat to read
528 */
529static inline uint64_t blkg_stat_read(struct blkg_stat *stat)
530{
531 return percpu_counter_sum_positive(&stat->cpu_cnt);
532}
533
534/**
535 * blkg_stat_reset - reset a blkg_stat
536 * @stat: blkg_stat to reset
537 */
538static inline void blkg_stat_reset(struct blkg_stat *stat)
539{
540 percpu_counter_set(&stat->cpu_cnt, 0);
541 atomic64_set(&stat->aux_cnt, 0);
542}
543
544/**
545 * blkg_stat_add_aux - add a blkg_stat into another's aux count
546 * @to: the destination blkg_stat
547 * @from: the source
548 *
549 * Add @from's count including the aux one to @to's aux count.
550 */
551static inline void blkg_stat_add_aux(struct blkg_stat *to,
552 struct blkg_stat *from)
553{
554 atomic64_add(blkg_stat_read(from) + atomic64_read(&from->aux_cnt),
555 &to->aux_cnt);
556}
557
558static inline int blkg_rwstat_init(struct blkg_rwstat *rwstat, gfp_t gfp)
559{
560 int i, ret;
561
562 for (i = 0; i < BLKG_RWSTAT_NR; i++) {
563 ret = percpu_counter_init(&rwstat->cpu_cnt[i], 0, gfp);
564 if (ret) {
565 while (--i >= 0)
566 percpu_counter_destroy(&rwstat->cpu_cnt[i]);
567 return ret;
568 }
569 atomic64_set(&rwstat->aux_cnt[i], 0);
570 }
571 return 0;
572}
573
574static inline void blkg_rwstat_exit(struct blkg_rwstat *rwstat)
575{
576 int i;
577
578 for (i = 0; i < BLKG_RWSTAT_NR; i++)
579 percpu_counter_destroy(&rwstat->cpu_cnt[i]);
580}
581
582/**
583 * blkg_rwstat_add - add a value to a blkg_rwstat
584 * @rwstat: target blkg_rwstat
585 * @op: REQ_OP and flags
586 * @val: value to add
587 *
588 * Add @val to @rwstat. The counters are chosen according to @rw. The
589 * caller is responsible for synchronizing calls to this function.
590 */
591static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
592 unsigned int op, uint64_t val)
593{
594 struct percpu_counter *cnt;
595
596 if (op_is_write(op))
597 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_WRITE];
598 else
599 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_READ];
600
601 percpu_counter_add_batch(cnt, val, BLKG_STAT_CPU_BATCH);
602
603 if (op_is_sync(op))
604 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC];
605 else
606 cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC];
607
608 percpu_counter_add_batch(cnt, val, BLKG_STAT_CPU_BATCH);
609}
610
611/**
612 * blkg_rwstat_read - read the current values of a blkg_rwstat
613 * @rwstat: blkg_rwstat to read
614 *
615 * Read the current snapshot of @rwstat and return it in the aux counts.
616 */
617static inline struct blkg_rwstat blkg_rwstat_read(struct blkg_rwstat *rwstat)
618{
619 struct blkg_rwstat result;
620 int i;
621
622 for (i = 0; i < BLKG_RWSTAT_NR; i++)
623 atomic64_set(&result.aux_cnt[i],
624 percpu_counter_sum_positive(&rwstat->cpu_cnt[i]));
625 return result;
626}
627
628/**
629 * blkg_rwstat_total - read the total count of a blkg_rwstat
630 * @rwstat: blkg_rwstat to read
631 *
632 * Return the total count of @rwstat regardless of the IO direction. This
633 * function can be called without synchronization and takes care of u64
634 * atomicity.
635 */
636static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
637{
638 struct blkg_rwstat tmp = blkg_rwstat_read(rwstat);
639
640 return atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_READ]) +
641 atomic64_read(&tmp.aux_cnt[BLKG_RWSTAT_WRITE]);
642}
643
644/**
645 * blkg_rwstat_reset - reset a blkg_rwstat
646 * @rwstat: blkg_rwstat to reset
647 */
648static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
649{
650 int i;
651
652 for (i = 0; i < BLKG_RWSTAT_NR; i++) {
653 percpu_counter_set(&rwstat->cpu_cnt[i], 0);
654 atomic64_set(&rwstat->aux_cnt[i], 0);
655 }
656}
657
658/**
659 * blkg_rwstat_add_aux - add a blkg_rwstat into another's aux count
660 * @to: the destination blkg_rwstat
661 * @from: the source
662 *
663 * Add @from's count including the aux one to @to's aux count.
664 */
665static inline void blkg_rwstat_add_aux(struct blkg_rwstat *to,
666 struct blkg_rwstat *from)
667{
668 struct blkg_rwstat v = blkg_rwstat_read(from);
669 int i;
670
671 for (i = 0; i < BLKG_RWSTAT_NR; i++)
672 atomic64_add(atomic64_read(&v.aux_cnt[i]) +
673 atomic64_read(&from->aux_cnt[i]),
674 &to->aux_cnt[i]);
675}
676
677#ifdef CONFIG_BLK_DEV_THROTTLING
678extern bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
679 struct bio *bio);
680#else
681static inline bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
682 struct bio *bio) { return false; }
683#endif
684
685static inline bool blkcg_bio_issue_check(struct request_queue *q,
686 struct bio *bio)
687{
688 struct blkcg *blkcg;
689 struct blkcg_gq *blkg;
690 bool throtl = false;
691
692 rcu_read_lock();
693 blkcg = bio_blkcg(bio);
694
695 /* associate blkcg if bio hasn't attached one */
696 bio_associate_blkcg(bio, &blkcg->css);
697
698 blkg = blkg_lookup(blkcg, q);
699 if (unlikely(!blkg)) {
700 spin_lock_irq(q->queue_lock);
701 blkg = blkg_lookup_create(blkcg, q);
702 if (IS_ERR(blkg))
703 blkg = NULL;
704 spin_unlock_irq(q->queue_lock);
705 }
706
707 throtl = blk_throtl_bio(q, blkg, bio);
708
709 if (!throtl) {
710 blkg = blkg ?: q->root_blkg;
711 blkg_rwstat_add(&blkg->stat_bytes, bio->bi_opf,
712 bio->bi_iter.bi_size);
713 blkg_rwstat_add(&blkg->stat_ios, bio->bi_opf, 1);
714 }
715
716 rcu_read_unlock();
717 return !throtl;
718}
719
720#else /* CONFIG_BLK_CGROUP */
721
722struct blkcg {
723};
724
725struct blkg_policy_data {
726};
727
728struct blkcg_policy_data {
729};
730
731struct blkcg_gq {
732};
733
734struct blkcg_policy {
735};
736
737#define blkcg_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
738
739static inline struct cgroup_subsys_state *
740task_get_blkcg_css(struct task_struct *task)
741{
742 return NULL;
743}
744
745#ifdef CONFIG_BLOCK
746
747static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
748static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
749static inline void blkcg_drain_queue(struct request_queue *q) { }
750static inline void blkcg_exit_queue(struct request_queue *q) { }
751static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
752static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
753static inline int blkcg_activate_policy(struct request_queue *q,
754 const struct blkcg_policy *pol) { return 0; }
755static inline void blkcg_deactivate_policy(struct request_queue *q,
756 const struct blkcg_policy *pol) { }
757
758static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
759
760static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
761 struct blkcg_policy *pol) { return NULL; }
762static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
763static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
764static inline void blkg_get(struct blkcg_gq *blkg) { }
765static inline void blkg_put(struct blkcg_gq *blkg) { }
766
767static inline struct request_list *blk_get_rl(struct request_queue *q,
768 struct bio *bio) { return &q->root_rl; }
769static inline void blk_put_rl(struct request_list *rl) { }
770static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { }
771static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; }
772
773static inline bool blkcg_bio_issue_check(struct request_queue *q,
774 struct bio *bio) { return true; }
775
776#define blk_queue_for_each_rl(rl, q) \
777 for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
778
779#endif /* CONFIG_BLOCK */
780#endif /* CONFIG_BLK_CGROUP */
781#endif /* _BLK_CGROUP_H */