at v6.0-rc1 10 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * include/linux/backing-dev.h 4 * 5 * low-level device information and state which is propagated up through 6 * to high-level code. 7 */ 8 9#ifndef _LINUX_BACKING_DEV_H 10#define _LINUX_BACKING_DEV_H 11 12#include <linux/kernel.h> 13#include <linux/fs.h> 14#include <linux/sched.h> 15#include <linux/device.h> 16#include <linux/writeback.h> 17#include <linux/backing-dev-defs.h> 18#include <linux/slab.h> 19 20static inline struct backing_dev_info *bdi_get(struct backing_dev_info *bdi) 21{ 22 kref_get(&bdi->refcnt); 23 return bdi; 24} 25 26struct backing_dev_info *bdi_get_by_id(u64 id); 27void bdi_put(struct backing_dev_info *bdi); 28 29__printf(2, 3) 30int bdi_register(struct backing_dev_info *bdi, const char *fmt, ...); 31__printf(2, 0) 32int bdi_register_va(struct backing_dev_info *bdi, const char *fmt, 33 va_list args); 34void bdi_set_owner(struct backing_dev_info *bdi, struct device *owner); 35void bdi_unregister(struct backing_dev_info *bdi); 36 37struct backing_dev_info *bdi_alloc(int node_id); 38 39void wb_start_background_writeback(struct bdi_writeback *wb); 40void wb_workfn(struct work_struct *work); 41void wb_wakeup_delayed(struct bdi_writeback *wb); 42 43void wb_wait_for_completion(struct wb_completion *done); 44 45extern spinlock_t bdi_lock; 46extern struct list_head bdi_list; 47 48extern struct workqueue_struct *bdi_wq; 49extern struct workqueue_struct *bdi_async_bio_wq; 50 51static inline bool wb_has_dirty_io(struct bdi_writeback *wb) 52{ 53 return test_bit(WB_has_dirty_io, &wb->state); 54} 55 56static inline bool bdi_has_dirty_io(struct backing_dev_info *bdi) 57{ 58 /* 59 * @bdi->tot_write_bandwidth is guaranteed to be > 0 if there are 60 * any dirty wbs. See wb_update_write_bandwidth(). 61 */ 62 return atomic_long_read(&bdi->tot_write_bandwidth); 63} 64 65static inline void wb_stat_mod(struct bdi_writeback *wb, 66 enum wb_stat_item item, s64 amount) 67{ 68 percpu_counter_add_batch(&wb->stat[item], amount, WB_STAT_BATCH); 69} 70 71static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item) 72{ 73 wb_stat_mod(wb, item, 1); 74} 75 76static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item) 77{ 78 wb_stat_mod(wb, item, -1); 79} 80 81static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item) 82{ 83 return percpu_counter_read_positive(&wb->stat[item]); 84} 85 86static inline s64 wb_stat_sum(struct bdi_writeback *wb, enum wb_stat_item item) 87{ 88 return percpu_counter_sum_positive(&wb->stat[item]); 89} 90 91extern void wb_writeout_inc(struct bdi_writeback *wb); 92 93/* 94 * maximal error of a stat counter. 95 */ 96static inline unsigned long wb_stat_error(void) 97{ 98#ifdef CONFIG_SMP 99 return nr_cpu_ids * WB_STAT_BATCH; 100#else 101 return 1; 102#endif 103} 104 105int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio); 106int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio); 107 108/* 109 * Flags in backing_dev_info::capability 110 * 111 * BDI_CAP_WRITEBACK: Supports dirty page writeback, and dirty pages 112 * should contribute to accounting 113 * BDI_CAP_WRITEBACK_ACCT: Automatically account writeback pages 114 * BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold 115 */ 116#define BDI_CAP_WRITEBACK (1 << 0) 117#define BDI_CAP_WRITEBACK_ACCT (1 << 1) 118#define BDI_CAP_STRICTLIMIT (1 << 2) 119 120extern struct backing_dev_info noop_backing_dev_info; 121 122int bdi_init(struct backing_dev_info *bdi); 123 124/** 125 * writeback_in_progress - determine whether there is writeback in progress 126 * @wb: bdi_writeback of interest 127 * 128 * Determine whether there is writeback waiting to be handled against a 129 * bdi_writeback. 130 */ 131static inline bool writeback_in_progress(struct bdi_writeback *wb) 132{ 133 return test_bit(WB_writeback_running, &wb->state); 134} 135 136struct backing_dev_info *inode_to_bdi(struct inode *inode); 137 138static inline bool mapping_can_writeback(struct address_space *mapping) 139{ 140 return inode_to_bdi(mapping->host)->capabilities & BDI_CAP_WRITEBACK; 141} 142 143#ifdef CONFIG_CGROUP_WRITEBACK 144 145struct bdi_writeback *wb_get_lookup(struct backing_dev_info *bdi, 146 struct cgroup_subsys_state *memcg_css); 147struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi, 148 struct cgroup_subsys_state *memcg_css, 149 gfp_t gfp); 150void wb_memcg_offline(struct mem_cgroup *memcg); 151void wb_blkcg_offline(struct cgroup_subsys_state *css); 152 153/** 154 * inode_cgwb_enabled - test whether cgroup writeback is enabled on an inode 155 * @inode: inode of interest 156 * 157 * Cgroup writeback requires support from the filesystem. Also, both memcg and 158 * iocg have to be on the default hierarchy. Test whether all conditions are 159 * met. 160 * 161 * Note that the test result may change dynamically on the same inode 162 * depending on how memcg and iocg are configured. 163 */ 164static inline bool inode_cgwb_enabled(struct inode *inode) 165{ 166 struct backing_dev_info *bdi = inode_to_bdi(inode); 167 168 return cgroup_subsys_on_dfl(memory_cgrp_subsys) && 169 cgroup_subsys_on_dfl(io_cgrp_subsys) && 170 (bdi->capabilities & BDI_CAP_WRITEBACK) && 171 (inode->i_sb->s_iflags & SB_I_CGROUPWB); 172} 173 174/** 175 * wb_find_current - find wb for %current on a bdi 176 * @bdi: bdi of interest 177 * 178 * Find the wb of @bdi which matches both the memcg and blkcg of %current. 179 * Must be called under rcu_read_lock() which protects the returend wb. 180 * NULL if not found. 181 */ 182static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi) 183{ 184 struct cgroup_subsys_state *memcg_css; 185 struct bdi_writeback *wb; 186 187 memcg_css = task_css(current, memory_cgrp_id); 188 if (!memcg_css->parent) 189 return &bdi->wb; 190 191 wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id); 192 193 /* 194 * %current's blkcg equals the effective blkcg of its memcg. No 195 * need to use the relatively expensive cgroup_get_e_css(). 196 */ 197 if (likely(wb && wb->blkcg_css == task_css(current, io_cgrp_id))) 198 return wb; 199 return NULL; 200} 201 202/** 203 * wb_get_create_current - get or create wb for %current on a bdi 204 * @bdi: bdi of interest 205 * @gfp: allocation mask 206 * 207 * Equivalent to wb_get_create() on %current's memcg. This function is 208 * called from a relatively hot path and optimizes the common cases using 209 * wb_find_current(). 210 */ 211static inline struct bdi_writeback * 212wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp) 213{ 214 struct bdi_writeback *wb; 215 216 rcu_read_lock(); 217 wb = wb_find_current(bdi); 218 if (wb && unlikely(!wb_tryget(wb))) 219 wb = NULL; 220 rcu_read_unlock(); 221 222 if (unlikely(!wb)) { 223 struct cgroup_subsys_state *memcg_css; 224 225 memcg_css = task_get_css(current, memory_cgrp_id); 226 wb = wb_get_create(bdi, memcg_css, gfp); 227 css_put(memcg_css); 228 } 229 return wb; 230} 231 232/** 233 * inode_to_wb - determine the wb of an inode 234 * @inode: inode of interest 235 * 236 * Returns the wb @inode is currently associated with. The caller must be 237 * holding either @inode->i_lock, the i_pages lock, or the 238 * associated wb's list_lock. 239 */ 240static inline struct bdi_writeback *inode_to_wb(const struct inode *inode) 241{ 242#ifdef CONFIG_LOCKDEP 243 WARN_ON_ONCE(debug_locks && 244 (!lockdep_is_held(&inode->i_lock) && 245 !lockdep_is_held(&inode->i_mapping->i_pages.xa_lock) && 246 !lockdep_is_held(&inode->i_wb->list_lock))); 247#endif 248 return inode->i_wb; 249} 250 251static inline struct bdi_writeback *inode_to_wb_wbc( 252 struct inode *inode, 253 struct writeback_control *wbc) 254{ 255 /* 256 * If wbc does not have inode attached, it means cgroup writeback was 257 * disabled when wbc started. Just use the default wb in that case. 258 */ 259 return wbc->wb ? wbc->wb : &inode_to_bdi(inode)->wb; 260} 261 262/** 263 * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction 264 * @inode: target inode 265 * @cookie: output param, to be passed to the end function 266 * 267 * The caller wants to access the wb associated with @inode but isn't 268 * holding inode->i_lock, the i_pages lock or wb->list_lock. This 269 * function determines the wb associated with @inode and ensures that the 270 * association doesn't change until the transaction is finished with 271 * unlocked_inode_to_wb_end(). 272 * 273 * The caller must call unlocked_inode_to_wb_end() with *@cookie afterwards and 274 * can't sleep during the transaction. IRQs may or may not be disabled on 275 * return. 276 */ 277static inline struct bdi_writeback * 278unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie) 279{ 280 rcu_read_lock(); 281 282 /* 283 * Paired with store_release in inode_switch_wbs_work_fn() and 284 * ensures that we see the new wb if we see cleared I_WB_SWITCH. 285 */ 286 cookie->locked = smp_load_acquire(&inode->i_state) & I_WB_SWITCH; 287 288 if (unlikely(cookie->locked)) 289 xa_lock_irqsave(&inode->i_mapping->i_pages, cookie->flags); 290 291 /* 292 * Protected by either !I_WB_SWITCH + rcu_read_lock() or the i_pages 293 * lock. inode_to_wb() will bark. Deref directly. 294 */ 295 return inode->i_wb; 296} 297 298/** 299 * unlocked_inode_to_wb_end - end inode wb access transaction 300 * @inode: target inode 301 * @cookie: @cookie from unlocked_inode_to_wb_begin() 302 */ 303static inline void unlocked_inode_to_wb_end(struct inode *inode, 304 struct wb_lock_cookie *cookie) 305{ 306 if (unlikely(cookie->locked)) 307 xa_unlock_irqrestore(&inode->i_mapping->i_pages, cookie->flags); 308 309 rcu_read_unlock(); 310} 311 312#else /* CONFIG_CGROUP_WRITEBACK */ 313 314static inline bool inode_cgwb_enabled(struct inode *inode) 315{ 316 return false; 317} 318 319static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi) 320{ 321 return &bdi->wb; 322} 323 324static inline struct bdi_writeback * 325wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp) 326{ 327 return &bdi->wb; 328} 329 330static inline struct bdi_writeback *inode_to_wb(struct inode *inode) 331{ 332 return &inode_to_bdi(inode)->wb; 333} 334 335static inline struct bdi_writeback *inode_to_wb_wbc( 336 struct inode *inode, 337 struct writeback_control *wbc) 338{ 339 return inode_to_wb(inode); 340} 341 342 343static inline struct bdi_writeback * 344unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie) 345{ 346 return inode_to_wb(inode); 347} 348 349static inline void unlocked_inode_to_wb_end(struct inode *inode, 350 struct wb_lock_cookie *cookie) 351{ 352} 353 354static inline void wb_memcg_offline(struct mem_cgroup *memcg) 355{ 356} 357 358static inline void wb_blkcg_offline(struct cgroup_subsys_state *css) 359{ 360} 361 362#endif /* CONFIG_CGROUP_WRITEBACK */ 363 364const char *bdi_dev_name(struct backing_dev_info *bdi); 365 366#endif /* _LINUX_BACKING_DEV_H */