Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __LINUX_BACKING_DEV_DEFS_H
3#define __LINUX_BACKING_DEV_DEFS_H
4
5#include <linux/list.h>
6#include <linux/radix-tree.h>
7#include <linux/rbtree.h>
8#include <linux/spinlock.h>
9#include <linux/percpu_counter.h>
10#include <linux/percpu-refcount.h>
11#include <linux/flex_proportions.h>
12#include <linux/timer.h>
13#include <linux/workqueue.h>
14#include <linux/kref.h>
15#include <linux/refcount.h>
16
17struct page;
18struct device;
19struct dentry;
20
21/*
22 * Bits in bdi_writeback.state
23 */
24enum wb_state {
25 WB_registered, /* bdi_register() was done */
26 WB_writeback_running, /* Writeback is in progress */
27 WB_has_dirty_io, /* Dirty inodes on ->b_{dirty|io|more_io} */
28 WB_start_all, /* nr_pages == 0 (all) work pending */
29};
30
31enum wb_stat_item {
32 WB_RECLAIMABLE,
33 WB_WRITEBACK,
34 WB_DIRTIED,
35 WB_WRITTEN,
36 NR_WB_STAT_ITEMS
37};
38
39#define WB_STAT_BATCH (8*(1+ilog2(nr_cpu_ids)))
40
41/*
42 * why some writeback work was initiated
43 */
44enum wb_reason {
45 WB_REASON_BACKGROUND,
46 WB_REASON_VMSCAN,
47 WB_REASON_SYNC,
48 WB_REASON_PERIODIC,
49 WB_REASON_LAPTOP_TIMER,
50 WB_REASON_FS_FREE_SPACE,
51 /*
52 * There is no bdi forker thread any more and works are done
53 * by emergency worker, however, this is TPs userland visible
54 * and we'll be exposing exactly the same information,
55 * so it has a mismatch name.
56 */
57 WB_REASON_FORKER_THREAD,
58 WB_REASON_FOREIGN_FLUSH,
59
60 WB_REASON_MAX,
61};
62
63struct wb_completion {
64 atomic_t cnt;
65 wait_queue_head_t *waitq;
66 unsigned long progress_stamp; /* The jiffies when slow progress is detected */
67 unsigned long wait_start; /* The jiffies when waiting for the writeback work to finish */
68};
69
70#define __WB_COMPLETION_INIT(_waitq) \
71 (struct wb_completion){ .cnt = ATOMIC_INIT(1), .waitq = (_waitq) }
72
73/*
74 * If one wants to wait for one or more wb_writeback_works, each work's
75 * ->done should be set to a wb_completion defined using the following
76 * macro. Once all work items are issued with wb_queue_work(), the caller
77 * can wait for the completion of all using wb_wait_for_completion(). Work
78 * items which are waited upon aren't freed automatically on completion.
79 */
80#define WB_COMPLETION_INIT(bdi) __WB_COMPLETION_INIT(&(bdi)->wb_waitq)
81
82#define DEFINE_WB_COMPLETION(cmpl, bdi) \
83 struct wb_completion cmpl = WB_COMPLETION_INIT(bdi)
84
85/*
86 * Each wb (bdi_writeback) can perform writeback operations, is measured
87 * and throttled, independently. Without cgroup writeback, each bdi
88 * (bdi_writeback) is served by its embedded bdi->wb.
89 *
90 * On the default hierarchy, blkcg implicitly enables memcg. This allows
91 * using memcg's page ownership for attributing writeback IOs, and every
92 * memcg - blkcg combination can be served by its own wb by assigning a
93 * dedicated wb to each memcg, which enables isolation across different
94 * cgroups and propagation of IO back pressure down from the IO layer upto
95 * the tasks which are generating the dirty pages to be written back.
96 *
97 * A cgroup wb is indexed on its bdi by the ID of the associated memcg,
98 * refcounted with the number of inodes attached to it, and pins the memcg
99 * and the corresponding blkcg. As the corresponding blkcg for a memcg may
100 * change as blkcg is disabled and enabled higher up in the hierarchy, a wb
101 * is tested for blkcg after lookup and removed from index on mismatch so
102 * that a new wb for the combination can be created.
103 *
104 * Each bdi_writeback that is not embedded into the backing_dev_info must hold
105 * a reference to the parent backing_dev_info. See cgwb_create() for details.
106 */
107struct bdi_writeback {
108 struct backing_dev_info *bdi; /* our parent bdi */
109
110 unsigned long state; /* Always use atomic bitops on this */
111 unsigned long last_old_flush; /* last old data flush */
112
113 struct list_head b_dirty; /* dirty inodes */
114 struct list_head b_io; /* parked for writeback */
115 struct list_head b_more_io; /* parked for more writeback */
116 struct list_head b_dirty_time; /* time stamps are dirty */
117 spinlock_t list_lock; /* protects the b_* lists */
118
119 atomic_t writeback_inodes; /* number of inodes under writeback */
120 struct percpu_counter stat[NR_WB_STAT_ITEMS];
121
122 unsigned long bw_time_stamp; /* last time write bw is updated */
123 unsigned long dirtied_stamp;
124 unsigned long written_stamp; /* pages written at bw_time_stamp */
125 unsigned long write_bandwidth; /* the estimated write bandwidth */
126 unsigned long avg_write_bandwidth; /* further smoothed write bw, > 0 */
127
128 /*
129 * The base dirty throttle rate, re-calculated on every 200ms.
130 * All the bdi tasks' dirty rate will be curbed under it.
131 * @dirty_ratelimit tracks the estimated @balanced_dirty_ratelimit
132 * in small steps and is much more smooth/stable than the latter.
133 */
134 unsigned long dirty_ratelimit;
135 unsigned long balanced_dirty_ratelimit;
136
137 struct fprop_local_percpu completions;
138 int dirty_exceeded;
139 enum wb_reason start_all_reason;
140
141 spinlock_t work_lock; /* protects work_list & dwork scheduling */
142 struct list_head work_list;
143 struct delayed_work dwork; /* work item used for writeback */
144 struct delayed_work bw_dwork; /* work item used for bandwidth estimate */
145
146 struct list_head bdi_node; /* anchored at bdi->wb_list */
147
148#ifdef CONFIG_CGROUP_WRITEBACK
149 struct percpu_ref refcnt; /* used only for !root wb's */
150 struct fprop_local_percpu memcg_completions;
151 struct cgroup_subsys_state *memcg_css; /* the associated memcg */
152 struct cgroup_subsys_state *blkcg_css; /* and blkcg */
153 struct list_head memcg_node; /* anchored at memcg->cgwb_list */
154 struct list_head blkcg_node; /* anchored at blkcg->cgwb_list */
155 struct list_head b_attached; /* attached inodes, protected by list_lock */
156 struct list_head offline_node; /* anchored at offline_cgwbs */
157 struct work_struct switch_work; /* work used to perform inode switching
158 * to this wb */
159 struct llist_head switch_wbs_ctxs; /* queued contexts for
160 * writeback switching */
161
162 union {
163 struct work_struct release_work;
164 struct rcu_head rcu;
165 };
166#endif
167};
168
169struct backing_dev_info {
170 u64 id;
171 struct rb_node rb_node; /* keyed by ->id */
172 struct list_head bdi_list;
173 /* max readahead in PAGE_SIZE units */
174 unsigned long __data_racy ra_pages;
175
176 unsigned long io_pages; /* max allowed IO size */
177
178 struct kref refcnt; /* Reference counter for the structure */
179 unsigned int capabilities; /* Device capabilities */
180 unsigned int min_ratio;
181 unsigned int max_ratio, max_prop_frac;
182
183 /*
184 * Sum of avg_write_bw of wbs with dirty inodes. > 0 if there are
185 * any dirty wbs, which is depended upon by bdi_has_dirty().
186 */
187 atomic_long_t tot_write_bandwidth;
188 /*
189 * Jiffies when last process was dirty throttled on this bdi. Used by
190 * blk-wbt.
191 */
192 unsigned long last_bdp_sleep;
193
194 struct bdi_writeback wb; /* the root writeback info for this bdi */
195 struct list_head wb_list; /* list of all wbs */
196#ifdef CONFIG_CGROUP_WRITEBACK
197 struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */
198 struct mutex cgwb_release_mutex; /* protect shutdown of wb structs */
199 struct rw_semaphore wb_switch_rwsem; /* no cgwb switch while syncing */
200#endif
201 wait_queue_head_t wb_waitq;
202
203 struct device *dev;
204 char dev_name[64];
205 struct device *owner;
206
207 struct timer_list laptop_mode_wb_timer;
208
209#ifdef CONFIG_DEBUG_FS
210 struct dentry *debug_dir;
211#endif
212};
213
214struct wb_lock_cookie {
215 bool locked;
216 unsigned long flags;
217};
218
219#ifdef CONFIG_CGROUP_WRITEBACK
220
221/**
222 * wb_tryget - try to increment a wb's refcount
223 * @wb: bdi_writeback to get
224 */
225static inline bool wb_tryget(struct bdi_writeback *wb)
226{
227 if (wb != &wb->bdi->wb)
228 return percpu_ref_tryget(&wb->refcnt);
229 return true;
230}
231
232/**
233 * wb_get - increment a wb's refcount
234 * @wb: bdi_writeback to get
235 */
236static inline void wb_get(struct bdi_writeback *wb)
237{
238 if (wb != &wb->bdi->wb)
239 percpu_ref_get(&wb->refcnt);
240}
241
242/**
243 * wb_put - decrement a wb's refcount
244 * @wb: bdi_writeback to put
245 * @nr: number of references to put
246 */
247static inline void wb_put_many(struct bdi_writeback *wb, unsigned long nr)
248{
249 if (WARN_ON_ONCE(!wb->bdi)) {
250 /*
251 * A driver bug might cause a file to be removed before bdi was
252 * initialized.
253 */
254 return;
255 }
256
257 if (wb != &wb->bdi->wb)
258 percpu_ref_put_many(&wb->refcnt, nr);
259}
260
261/**
262 * wb_put - decrement a wb's refcount
263 * @wb: bdi_writeback to put
264 */
265static inline void wb_put(struct bdi_writeback *wb)
266{
267 wb_put_many(wb, 1);
268}
269
270/**
271 * wb_dying - is a wb dying?
272 * @wb: bdi_writeback of interest
273 *
274 * Returns whether @wb is unlinked and being drained.
275 */
276static inline bool wb_dying(struct bdi_writeback *wb)
277{
278 return percpu_ref_is_dying(&wb->refcnt);
279}
280
281#else /* CONFIG_CGROUP_WRITEBACK */
282
283static inline bool wb_tryget(struct bdi_writeback *wb)
284{
285 return true;
286}
287
288static inline void wb_get(struct bdi_writeback *wb)
289{
290}
291
292static inline void wb_put(struct bdi_writeback *wb)
293{
294}
295
296static inline void wb_put_many(struct bdi_writeback *wb, unsigned long nr)
297{
298}
299
300static inline bool wb_dying(struct bdi_writeback *wb)
301{
302 return false;
303}
304
305#endif /* CONFIG_CGROUP_WRITEBACK */
306
307#endif /* __LINUX_BACKING_DEV_DEFS_H */