at v6.18-rc2 10 kB view raw
1/* SPDX-License-Identifier: GPL-2.0 */ 2/* 3 * include/linux/backing-dev.h 4 * 5 * low-level device information and state which is propagated up through 6 * to high-level code. 7 */ 8 9#ifndef _LINUX_BACKING_DEV_H 10#define _LINUX_BACKING_DEV_H 11 12#include <linux/kernel.h> 13#include <linux/fs.h> 14#include <linux/sched.h> 15#include <linux/device.h> 16#include <linux/writeback.h> 17#include <linux/backing-dev-defs.h> 18#include <linux/slab.h> 19 20static inline struct backing_dev_info *bdi_get(struct backing_dev_info *bdi) 21{ 22 kref_get(&bdi->refcnt); 23 return bdi; 24} 25 26struct backing_dev_info *bdi_get_by_id(u64 id); 27void bdi_put(struct backing_dev_info *bdi); 28 29__printf(2, 3) 30int bdi_register(struct backing_dev_info *bdi, const char *fmt, ...); 31__printf(2, 0) 32int bdi_register_va(struct backing_dev_info *bdi, const char *fmt, 33 va_list args); 34void bdi_set_owner(struct backing_dev_info *bdi, struct device *owner); 35void bdi_unregister(struct backing_dev_info *bdi); 36 37struct backing_dev_info *bdi_alloc(int node_id); 38 39void wb_start_background_writeback(struct bdi_writeback *wb); 40void wb_workfn(struct work_struct *work); 41 42void wb_wait_for_completion(struct wb_completion *done); 43 44extern spinlock_t bdi_lock; 45extern struct list_head bdi_list; 46 47extern struct workqueue_struct *bdi_wq; 48 49static inline bool wb_has_dirty_io(struct bdi_writeback *wb) 50{ 51 return test_bit(WB_has_dirty_io, &wb->state); 52} 53 54static inline bool bdi_has_dirty_io(struct backing_dev_info *bdi) 55{ 56 /* 57 * @bdi->tot_write_bandwidth is guaranteed to be > 0 if there are 58 * any dirty wbs. See wb_update_write_bandwidth(). 59 */ 60 return atomic_long_read(&bdi->tot_write_bandwidth); 61} 62 63static inline void wb_stat_mod(struct bdi_writeback *wb, 64 enum wb_stat_item item, s64 amount) 65{ 66 percpu_counter_add_batch(&wb->stat[item], amount, WB_STAT_BATCH); 67} 68 69static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item) 70{ 71 return percpu_counter_read_positive(&wb->stat[item]); 72} 73 74static inline s64 wb_stat_sum(struct bdi_writeback *wb, enum wb_stat_item item) 75{ 76 return percpu_counter_sum_positive(&wb->stat[item]); 77} 78 79extern void wb_writeout_inc(struct bdi_writeback *wb); 80 81/* 82 * maximal error of a stat counter. 83 */ 84static inline unsigned long wb_stat_error(void) 85{ 86#ifdef CONFIG_SMP 87 return nr_cpu_ids * WB_STAT_BATCH; 88#else 89 return 1; 90#endif 91} 92 93/* BDI ratio is expressed as part per 1000000 for finer granularity. */ 94#define BDI_RATIO_SCALE 10000 95 96u64 bdi_get_min_bytes(struct backing_dev_info *bdi); 97u64 bdi_get_max_bytes(struct backing_dev_info *bdi); 98int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio); 99int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio); 100int bdi_set_min_ratio_no_scale(struct backing_dev_info *bdi, unsigned int min_ratio); 101int bdi_set_max_ratio_no_scale(struct backing_dev_info *bdi, unsigned int max_ratio); 102int bdi_set_min_bytes(struct backing_dev_info *bdi, u64 min_bytes); 103int bdi_set_max_bytes(struct backing_dev_info *bdi, u64 max_bytes); 104int bdi_set_strict_limit(struct backing_dev_info *bdi, unsigned int strict_limit); 105 106/* 107 * Flags in backing_dev_info::capability 108 * 109 * BDI_CAP_WRITEBACK: Supports dirty page writeback, and dirty pages 110 * should contribute to accounting 111 * BDI_CAP_STRICTLIMIT: Keep number of dirty pages below bdi threshold 112 */ 113#define BDI_CAP_WRITEBACK (1 << 0) 114#define BDI_CAP_STRICTLIMIT (1 << 1) 115 116extern struct backing_dev_info noop_backing_dev_info; 117 118int bdi_init(struct backing_dev_info *bdi); 119 120/** 121 * writeback_in_progress - determine whether there is writeback in progress 122 * @wb: bdi_writeback of interest 123 * 124 * Determine whether there is writeback waiting to be handled against a 125 * bdi_writeback. 126 */ 127static inline bool writeback_in_progress(struct bdi_writeback *wb) 128{ 129 return test_bit(WB_writeback_running, &wb->state); 130} 131 132struct backing_dev_info *inode_to_bdi(struct inode *inode); 133 134static inline bool mapping_can_writeback(struct address_space *mapping) 135{ 136 return inode_to_bdi(mapping->host)->capabilities & BDI_CAP_WRITEBACK; 137} 138 139#ifdef CONFIG_CGROUP_WRITEBACK 140 141struct bdi_writeback *wb_get_lookup(struct backing_dev_info *bdi, 142 struct cgroup_subsys_state *memcg_css); 143struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi, 144 struct cgroup_subsys_state *memcg_css, 145 gfp_t gfp); 146void wb_memcg_offline(struct mem_cgroup *memcg); 147void wb_blkcg_offline(struct cgroup_subsys_state *css); 148 149/** 150 * inode_cgwb_enabled - test whether cgroup writeback is enabled on an inode 151 * @inode: inode of interest 152 * 153 * Cgroup writeback requires support from the filesystem. Also, both memcg and 154 * iocg have to be on the default hierarchy. Test whether all conditions are 155 * met. 156 * 157 * Note that the test result may change dynamically on the same inode 158 * depending on how memcg and iocg are configured. 159 */ 160static inline bool inode_cgwb_enabled(struct inode *inode) 161{ 162 struct backing_dev_info *bdi = inode_to_bdi(inode); 163 164 return cgroup_subsys_on_dfl(memory_cgrp_subsys) && 165 cgroup_subsys_on_dfl(io_cgrp_subsys) && 166 (bdi->capabilities & BDI_CAP_WRITEBACK) && 167 (inode->i_sb->s_iflags & SB_I_CGROUPWB); 168} 169 170/** 171 * wb_find_current - find wb for %current on a bdi 172 * @bdi: bdi of interest 173 * 174 * Find the wb of @bdi which matches both the memcg and blkcg of %current. 175 * Must be called under rcu_read_lock() which protects the returend wb. 176 * NULL if not found. 177 */ 178static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi) 179{ 180 struct cgroup_subsys_state *memcg_css; 181 struct bdi_writeback *wb; 182 183 memcg_css = task_css(current, memory_cgrp_id); 184 if (!memcg_css->parent) 185 return &bdi->wb; 186 187 wb = radix_tree_lookup(&bdi->cgwb_tree, memcg_css->id); 188 189 /* 190 * %current's blkcg equals the effective blkcg of its memcg. No 191 * need to use the relatively expensive cgroup_get_e_css(). 192 */ 193 if (likely(wb && wb->blkcg_css == task_css(current, io_cgrp_id))) 194 return wb; 195 return NULL; 196} 197 198/** 199 * wb_get_create_current - get or create wb for %current on a bdi 200 * @bdi: bdi of interest 201 * @gfp: allocation mask 202 * 203 * Equivalent to wb_get_create() on %current's memcg. This function is 204 * called from a relatively hot path and optimizes the common cases using 205 * wb_find_current(). 206 */ 207static inline struct bdi_writeback * 208wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp) 209{ 210 struct bdi_writeback *wb; 211 212 rcu_read_lock(); 213 wb = wb_find_current(bdi); 214 if (wb && unlikely(!wb_tryget(wb))) 215 wb = NULL; 216 rcu_read_unlock(); 217 218 if (unlikely(!wb)) { 219 struct cgroup_subsys_state *memcg_css; 220 221 memcg_css = task_get_css(current, memory_cgrp_id); 222 wb = wb_get_create(bdi, memcg_css, gfp); 223 css_put(memcg_css); 224 } 225 return wb; 226} 227 228/** 229 * inode_to_wb - determine the wb of an inode 230 * @inode: inode of interest 231 * 232 * Returns the wb @inode is currently associated with. The caller must be 233 * holding either @inode->i_lock, the i_pages lock, or the 234 * associated wb's list_lock. 235 */ 236static inline struct bdi_writeback *inode_to_wb(const struct inode *inode) 237{ 238#ifdef CONFIG_LOCKDEP 239 WARN_ON_ONCE(debug_locks && 240 (inode->i_sb->s_iflags & SB_I_CGROUPWB) && 241 (!lockdep_is_held(&inode->i_lock) && 242 !lockdep_is_held(&inode->i_mapping->i_pages.xa_lock) && 243 !lockdep_is_held(&inode->i_wb->list_lock))); 244#endif 245 return inode->i_wb; 246} 247 248static inline struct bdi_writeback *inode_to_wb_wbc( 249 struct inode *inode, 250 struct writeback_control *wbc) 251{ 252 /* 253 * If wbc does not have inode attached, it means cgroup writeback was 254 * disabled when wbc started. Just use the default wb in that case. 255 */ 256 return wbc->wb ? wbc->wb : &inode_to_bdi(inode)->wb; 257} 258 259/** 260 * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction 261 * @inode: target inode 262 * @cookie: output param, to be passed to the end function 263 * 264 * The caller wants to access the wb associated with @inode but isn't 265 * holding inode->i_lock, the i_pages lock or wb->list_lock. This 266 * function determines the wb associated with @inode and ensures that the 267 * association doesn't change until the transaction is finished with 268 * unlocked_inode_to_wb_end(). 269 * 270 * The caller must call unlocked_inode_to_wb_end() with *@cookie afterwards and 271 * can't sleep during the transaction. IRQs may or may not be disabled on 272 * return. 273 */ 274static inline struct bdi_writeback * 275unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie) 276{ 277 rcu_read_lock(); 278 279 /* 280 * Paired with store_release in inode_switch_wbs_work_fn() and 281 * ensures that we see the new wb if we see cleared I_WB_SWITCH. 282 */ 283 cookie->locked = smp_load_acquire(&inode->i_state) & I_WB_SWITCH; 284 285 if (unlikely(cookie->locked)) 286 xa_lock_irqsave(&inode->i_mapping->i_pages, cookie->flags); 287 288 /* 289 * Protected by either !I_WB_SWITCH + rcu_read_lock() or the i_pages 290 * lock. inode_to_wb() will bark. Deref directly. 291 */ 292 return inode->i_wb; 293} 294 295/** 296 * unlocked_inode_to_wb_end - end inode wb access transaction 297 * @inode: target inode 298 * @cookie: @cookie from unlocked_inode_to_wb_begin() 299 */ 300static inline void unlocked_inode_to_wb_end(struct inode *inode, 301 struct wb_lock_cookie *cookie) 302{ 303 if (unlikely(cookie->locked)) 304 xa_unlock_irqrestore(&inode->i_mapping->i_pages, cookie->flags); 305 306 rcu_read_unlock(); 307} 308 309#else /* CONFIG_CGROUP_WRITEBACK */ 310 311static inline bool inode_cgwb_enabled(struct inode *inode) 312{ 313 return false; 314} 315 316static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi) 317{ 318 return &bdi->wb; 319} 320 321static inline struct bdi_writeback * 322wb_get_create_current(struct backing_dev_info *bdi, gfp_t gfp) 323{ 324 return &bdi->wb; 325} 326 327static inline struct bdi_writeback *inode_to_wb(struct inode *inode) 328{ 329 return &inode_to_bdi(inode)->wb; 330} 331 332static inline struct bdi_writeback *inode_to_wb_wbc( 333 struct inode *inode, 334 struct writeback_control *wbc) 335{ 336 return inode_to_wb(inode); 337} 338 339 340static inline struct bdi_writeback * 341unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie) 342{ 343 return inode_to_wb(inode); 344} 345 346static inline void unlocked_inode_to_wb_end(struct inode *inode, 347 struct wb_lock_cookie *cookie) 348{ 349} 350 351static inline void wb_memcg_offline(struct mem_cgroup *memcg) 352{ 353} 354 355static inline void wb_blkcg_offline(struct cgroup_subsys_state *css) 356{ 357} 358 359#endif /* CONFIG_CGROUP_WRITEBACK */ 360 361const char *bdi_dev_name(struct backing_dev_info *bdi); 362 363#endif /* _LINUX_BACKING_DEV_H */