Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * include/linux/backing-dev.h
4 *
5 * low-level device information and state which is propagated up through
6 * to high-level code.
7 */
8
9#ifndef _LINUX_BACKING_DEV_H
10#define _LINUX_BACKING_DEV_H
11
12#include <linux/kernel.h>
13#include <linux/fs.h>
14#include <linux/sched.h>
15#include <linux/device.h>
16#include <linux/writeback.h>
17#include <linux/backing-dev-defs.h>
18#include <linux/slab.h>
19
20static inline struct backing_dev_info *bdi_get(struct backing_dev_info *bdi)
21{
22 kref_get(&bdi->refcnt);
23 return bdi;
24}
25
26struct backing_dev_info *bdi_get_by_id(u64 id);
27void bdi_put(struct backing_dev_info *bdi);
28
29__printf(2, 3)
30int bdi_register(struct backing_dev_info *bdi, const char *fmt, ...);
31__printf(2, 0)
32int bdi_register_va(struct backing_dev_info *bdi, const char *fmt,
33 va_list args);
34void bdi_set_owner(struct backing_dev_info *bdi, struct device *owner);
35void bdi_unregister(struct backing_dev_info *bdi);
36
37struct backing_dev_info *bdi_alloc(int node_id);
38
39void wb_start_background_writeback(struct bdi_writeback *wb);
40void wb_workfn(struct work_struct *work);
41void wb_wakeup_delayed(struct bdi_writeback *wb);
42
43void wb_wait_for_completion(struct wb_completion *done);
44
45extern spinlock_t bdi_lock;
46extern struct list_head bdi_list;
47
48extern struct workqueue_struct *bdi_wq;
49extern struct workqueue_struct *bdi_async_bio_wq;
50
51static inline bool wb_has_dirty_io(struct bdi_writeback *wb)
52{
53 return test_bit(WB_has_dirty_io, &wb->state);
54}
55
56static inline bool bdi_has_dirty_io(struct backing_dev_info *bdi)
57{
58 /*
59 * @bdi->tot_write_bandwidth is guaranteed to be > 0 if there are
60 * any dirty wbs. See wb_update_write_bandwidth().
61 */
62 return atomic_long_read(&bdi->tot_write_bandwidth);
63}
64
65static inline void wb_stat_mod(struct bdi_writeback *wb,
66 enum wb_stat_item item, s64 amount)
67{
68 percpu_counter_add_batch(&wb->stat[item], amount, WB_STAT_BATCH);
69}
70
71static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
72{
73 wb_stat_mod(wb, item, 1);
74}
75
76static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
77{
78 wb_stat_mod(wb, item, -1);
79}
80
81static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
82{
83 return percpu_counter_read_positive(&wb->stat[item]);
84}
85
86static inline s64 wb_stat_sum(struct bdi_writeback *wb, enum wb_stat_item item)
87{
88 return percpu_counter_sum_positive(&wb->stat[item]);
89}
90
91extern void wb_writeout_inc(struct bdi_writeback *wb);
92
93/*
94 * maximal error of a stat counter.
95 */
96static inline unsigned long wb_stat_error(void)
97{
98#ifdef CONFIG_SMP
99 return nr_cpu_ids * WB_STAT_BATCH;
100#else
101 return 1;
102#endif
103}
104
105int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio);
106int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio);
107
108/*
109 * Flags in backing_dev_info::capability
110 *
111 * BDI_CAP_WRITEBACK: Supports dirty page writeback, and dirty pages
112 * should contribute to accounting
113 * BDI_CAP_WRITEBACK_ACCT: Automatically account writeback pages
114 * BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold
115 */
116#define BDI_CAP_WRITEBACK (1 << 0)
117#define BDI_CAP_WRITEBACK_ACCT (1 << 1)
118#define BDI_CAP_STRICTLIMIT (1 << 2)
119
120extern struct backing_dev_info noop_backing_dev_info;
121
122/**
123 * writeback_in_progress - determine whether there is writeback in progress
124 * @wb: bdi_writeback of interest
125 *
126 * Determine whether there is writeback waiting to be handled against a
127 * bdi_writeback.
128 */
129static inline bool writeback_in_progress(struct bdi_writeback *wb)
130{
131 return test_bit(WB_writeback_running, &wb->state);
132}
133
134struct backing_dev_info *inode_to_bdi(struct inode *inode);
135
136static inline bool mapping_can_writeback(struct address_space *mapping)
137{
138 return inode_to_bdi(mapping->host)->capabilities & BDI_CAP_WRITEBACK;
139}
140
141static inline int bdi_sched_wait(void *word)
142{
143 schedule();
144 return 0;
145}
146
147#ifdef CONFIG_CGROUP_WRITEBACK
148
149struct bdi_writeback *wb_get_lookup(struct backing_dev_info *bdi,
150 struct cgroup_subsys_state *memcg_css);
151struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
152 struct cgroup_subsys_state *memcg_css,
153 gfp_t gfp);
154void wb_memcg_offline(struct mem_cgroup *memcg);
155void wb_blkcg_offline(struct cgroup_subsys_state *css);
156
157/**
158 * inode_cgwb_enabled - test whether cgroup writeback is enabled on an inode
159 * @inode: inode of interest
160 *
161 * Cgroup writeback requires support from the filesystem. Also, both memcg and
162 * iocg have to be on the default hierarchy. Test whether all conditions are
163 * met.
164 *
165 * Note that the test result may change dynamically on the same inode
166 * depending on how memcg and iocg are configured.
167 */
168static inline bool inode_cgwb_enabled(struct inode *inode)
169{
170 struct backing_dev_info *bdi = inode_to_bdi(inode);
171
172 return cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
173 cgroup_subsys_on_dfl(io_cgrp_subsys) &&
174 (bdi->capabilities & BDI_CAP_WRITEBACK) &&
175 (inode->i_sb->s_iflags & SB_I_CGROUPWB);
176}
177
178/**
179 * wb_find_current - find wb for %current on a bdi
180 * @bdi: bdi of interest
181 *
182 * Find the wb of @bdi which matches both the memcg and blkcg of %current.
183 * Must be called under rcu_read_lock() which protects the returend wb.
184 * NULL if not found.
185 */
186static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
187{
188 struct cgroup_subsys_state *memcg_css;
189 struct bdi_writeback *wb;
190
191 memcg_css = task_css(current, memory_cgrp_id);
192 if (!memcg_css->parent)
193 return &bdi->wb;
194
195 wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id);
196
197 /*
198 * %current's blkcg equals the effective blkcg of its memcg. No
199 * need to use the relatively expensive cgroup_get_e_css().
200 */
201 if (likely(wb && wb->blkcg_css == task_css(current, io_cgrp_id)))
202 return wb;
203 return NULL;
204}
205
206/**
207 * wb_get_create_current - get or create wb for %current on a bdi
208 * @bdi: bdi of interest
209 * @gfp: allocation mask
210 *
211 * Equivalent to wb_get_create() on %current's memcg. This function is
212 * called from a relatively hot path and optimizes the common cases using
213 * wb_find_current().
214 */
215static inline struct bdi_writeback *
216wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
217{
218 struct bdi_writeback *wb;
219
220 rcu_read_lock();
221 wb = wb_find_current(bdi);
222 if (wb && unlikely(!wb_tryget(wb)))
223 wb = NULL;
224 rcu_read_unlock();
225
226 if (unlikely(!wb)) {
227 struct cgroup_subsys_state *memcg_css;
228
229 memcg_css = task_get_css(current, memory_cgrp_id);
230 wb = wb_get_create(bdi, memcg_css, gfp);
231 css_put(memcg_css);
232 }
233 return wb;
234}
235
236/**
237 * inode_to_wb_is_valid - test whether an inode has a wb associated
238 * @inode: inode of interest
239 *
240 * Returns %true if @inode has a wb associated. May be called without any
241 * locking.
242 */
243static inline bool inode_to_wb_is_valid(struct inode *inode)
244{
245 return inode->i_wb;
246}
247
248/**
249 * inode_to_wb - determine the wb of an inode
250 * @inode: inode of interest
251 *
252 * Returns the wb @inode is currently associated with. The caller must be
253 * holding either @inode->i_lock, the i_pages lock, or the
254 * associated wb's list_lock.
255 */
256static inline struct bdi_writeback *inode_to_wb(const struct inode *inode)
257{
258#ifdef CONFIG_LOCKDEP
259 WARN_ON_ONCE(debug_locks &&
260 (!lockdep_is_held(&inode->i_lock) &&
261 !lockdep_is_held(&inode->i_mapping->i_pages.xa_lock) &&
262 !lockdep_is_held(&inode->i_wb->list_lock)));
263#endif
264 return inode->i_wb;
265}
266
267static inline struct bdi_writeback *inode_to_wb_wbc(
268 struct inode *inode,
269 struct writeback_control *wbc)
270{
271 /*
272 * If wbc does not have inode attached, it means cgroup writeback was
273 * disabled when wbc started. Just use the default wb in that case.
274 */
275 return wbc->wb ? wbc->wb : &inode_to_bdi(inode)->wb;
276}
277
278/**
279 * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction
280 * @inode: target inode
281 * @cookie: output param, to be passed to the end function
282 *
283 * The caller wants to access the wb associated with @inode but isn't
284 * holding inode->i_lock, the i_pages lock or wb->list_lock. This
285 * function determines the wb associated with @inode and ensures that the
286 * association doesn't change until the transaction is finished with
287 * unlocked_inode_to_wb_end().
288 *
289 * The caller must call unlocked_inode_to_wb_end() with *@cookie afterwards and
290 * can't sleep during the transaction. IRQs may or may not be disabled on
291 * return.
292 */
293static inline struct bdi_writeback *
294unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
295{
296 rcu_read_lock();
297
298 /*
299 * Paired with store_release in inode_switch_wbs_work_fn() and
300 * ensures that we see the new wb if we see cleared I_WB_SWITCH.
301 */
302 cookie->locked = smp_load_acquire(&inode->i_state) & I_WB_SWITCH;
303
304 if (unlikely(cookie->locked))
305 xa_lock_irqsave(&inode->i_mapping->i_pages, cookie->flags);
306
307 /*
308 * Protected by either !I_WB_SWITCH + rcu_read_lock() or the i_pages
309 * lock. inode_to_wb() will bark. Deref directly.
310 */
311 return inode->i_wb;
312}
313
314/**
315 * unlocked_inode_to_wb_end - end inode wb access transaction
316 * @inode: target inode
317 * @cookie: @cookie from unlocked_inode_to_wb_begin()
318 */
319static inline void unlocked_inode_to_wb_end(struct inode *inode,
320 struct wb_lock_cookie *cookie)
321{
322 if (unlikely(cookie->locked))
323 xa_unlock_irqrestore(&inode->i_mapping->i_pages, cookie->flags);
324
325 rcu_read_unlock();
326}
327
328#else /* CONFIG_CGROUP_WRITEBACK */
329
330static inline bool inode_cgwb_enabled(struct inode *inode)
331{
332 return false;
333}
334
335static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
336{
337 return &bdi->wb;
338}
339
340static inline struct bdi_writeback *
341wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp)
342{
343 return &bdi->wb;
344}
345
346static inline bool inode_to_wb_is_valid(struct inode *inode)
347{
348 return true;
349}
350
351static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
352{
353 return &inode_to_bdi(inode)->wb;
354}
355
356static inline struct bdi_writeback *inode_to_wb_wbc(
357 struct inode *inode,
358 struct writeback_control *wbc)
359{
360 return inode_to_wb(inode);
361}
362
363
364static inline struct bdi_writeback *
365unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
366{
367 return inode_to_wb(inode);
368}
369
370static inline void unlocked_inode_to_wb_end(struct inode *inode,
371 struct wb_lock_cookie *cookie)
372{
373}
374
375static inline void wb_memcg_offline(struct mem_cgroup *memcg)
376{
377}
378
379static inline void wb_blkcg_offline(struct cgroup_subsys_state *css)
380{
381}
382
383#endif /* CONFIG_CGROUP_WRITEBACK */
384
385const char *bdi_dev_name(struct backing_dev_info *bdi);
386
387#endif /* _LINUX_BACKING_DEV_H */