at v3.3-rc1 10 kB view raw
1/* 2 * include/linux/backing-dev.h 3 * 4 * low-level device information and state which is propagated up through 5 * to high-level code. 6 */ 7 8#ifndef _LINUX_BACKING_DEV_H 9#define _LINUX_BACKING_DEV_H 10 11#include <linux/percpu_counter.h> 12#include <linux/log2.h> 13#include <linux/proportions.h> 14#include <linux/kernel.h> 15#include <linux/fs.h> 16#include <linux/sched.h> 17#include <linux/timer.h> 18#include <linux/writeback.h> 19#include <linux/atomic.h> 20 21struct page; 22struct device; 23struct dentry; 24 25/* 26 * Bits in backing_dev_info.state 27 */ 28enum bdi_state { 29 BDI_pending, /* On its way to being activated */ 30 BDI_wb_alloc, /* Default embedded wb allocated */ 31 BDI_async_congested, /* The async (write) queue is getting full */ 32 BDI_sync_congested, /* The sync queue is getting full */ 33 BDI_registered, /* bdi_register() was done */ 34 BDI_writeback_running, /* Writeback is in progress */ 35 BDI_unused, /* Available bits start here */ 36}; 37 38typedef int (congested_fn)(void *, int); 39 40enum bdi_stat_item { 41 BDI_RECLAIMABLE, 42 BDI_WRITEBACK, 43 BDI_DIRTIED, 44 BDI_WRITTEN, 45 NR_BDI_STAT_ITEMS 46}; 47 48#define BDI_STAT_BATCH (8*(1+ilog2(nr_cpu_ids))) 49 50struct bdi_writeback { 51 struct backing_dev_info *bdi; /* our parent bdi */ 52 unsigned int nr; 53 54 unsigned long last_old_flush; /* last old data flush */ 55 unsigned long last_active; /* last time bdi thread was active */ 56 57 struct task_struct *task; /* writeback thread */ 58 struct timer_list wakeup_timer; /* used for delayed bdi thread wakeup */ 59 struct list_head b_dirty; /* dirty inodes */ 60 struct list_head b_io; /* parked for writeback */ 61 struct list_head b_more_io; /* parked for more writeback */ 62 spinlock_t list_lock; /* protects the b_* lists */ 63}; 64 65struct backing_dev_info { 66 struct list_head bdi_list; 67 unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */ 68 unsigned long state; /* Always use atomic bitops on this */ 69 unsigned int capabilities; /* Device capabilities */ 70 congested_fn *congested_fn; /* Function pointer if device is md/dm */ 71 void *congested_data; /* Pointer to aux data for congested func */ 72 73 char *name; 74 75 struct percpu_counter bdi_stat[NR_BDI_STAT_ITEMS]; 76 77 unsigned long bw_time_stamp; /* last time write bw is updated */ 78 unsigned long dirtied_stamp; 79 unsigned long written_stamp; /* pages written at bw_time_stamp */ 80 unsigned long write_bandwidth; /* the estimated write bandwidth */ 81 unsigned long avg_write_bandwidth; /* further smoothed write bw */ 82 83 /* 84 * The base dirty throttle rate, re-calculated on every 200ms. 85 * All the bdi tasks' dirty rate will be curbed under it. 86 * @dirty_ratelimit tracks the estimated @balanced_dirty_ratelimit 87 * in small steps and is much more smooth/stable than the latter. 88 */ 89 unsigned long dirty_ratelimit; 90 unsigned long balanced_dirty_ratelimit; 91 92 struct prop_local_percpu completions; 93 int dirty_exceeded; 94 95 unsigned int min_ratio; 96 unsigned int max_ratio, max_prop_frac; 97 98 struct bdi_writeback wb; /* default writeback info for this bdi */ 99 spinlock_t wb_lock; /* protects work_list */ 100 101 struct list_head work_list; 102 103 struct device *dev; 104 105 struct timer_list laptop_mode_wb_timer; 106 107#ifdef CONFIG_DEBUG_FS 108 struct dentry *debug_dir; 109 struct dentry *debug_stats; 110#endif 111}; 112 113int bdi_init(struct backing_dev_info *bdi); 114void bdi_destroy(struct backing_dev_info *bdi); 115 116int bdi_register(struct backing_dev_info *bdi, struct device *parent, 117 const char *fmt, ...); 118int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev); 119void bdi_unregister(struct backing_dev_info *bdi); 120int bdi_setup_and_register(struct backing_dev_info *, char *, unsigned int); 121void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, 122 enum wb_reason reason); 123void bdi_start_background_writeback(struct backing_dev_info *bdi); 124int bdi_writeback_thread(void *data); 125int bdi_has_dirty_io(struct backing_dev_info *bdi); 126void bdi_arm_supers_timer(void); 127void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi); 128void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2); 129 130extern spinlock_t bdi_lock; 131extern struct list_head bdi_list; 132extern struct list_head bdi_pending_list; 133 134static inline int wb_has_dirty_io(struct bdi_writeback *wb) 135{ 136 return !list_empty(&wb->b_dirty) || 137 !list_empty(&wb->b_io) || 138 !list_empty(&wb->b_more_io); 139} 140 141static inline void __add_bdi_stat(struct backing_dev_info *bdi, 142 enum bdi_stat_item item, s64 amount) 143{ 144 __percpu_counter_add(&bdi->bdi_stat[item], amount, BDI_STAT_BATCH); 145} 146 147static inline void __inc_bdi_stat(struct backing_dev_info *bdi, 148 enum bdi_stat_item item) 149{ 150 __add_bdi_stat(bdi, item, 1); 151} 152 153static inline void inc_bdi_stat(struct backing_dev_info *bdi, 154 enum bdi_stat_item item) 155{ 156 unsigned long flags; 157 158 local_irq_save(flags); 159 __inc_bdi_stat(bdi, item); 160 local_irq_restore(flags); 161} 162 163static inline void __dec_bdi_stat(struct backing_dev_info *bdi, 164 enum bdi_stat_item item) 165{ 166 __add_bdi_stat(bdi, item, -1); 167} 168 169static inline void dec_bdi_stat(struct backing_dev_info *bdi, 170 enum bdi_stat_item item) 171{ 172 unsigned long flags; 173 174 local_irq_save(flags); 175 __dec_bdi_stat(bdi, item); 176 local_irq_restore(flags); 177} 178 179static inline s64 bdi_stat(struct backing_dev_info *bdi, 180 enum bdi_stat_item item) 181{ 182 return percpu_counter_read_positive(&bdi->bdi_stat[item]); 183} 184 185static inline s64 __bdi_stat_sum(struct backing_dev_info *bdi, 186 enum bdi_stat_item item) 187{ 188 return percpu_counter_sum_positive(&bdi->bdi_stat[item]); 189} 190 191static inline s64 bdi_stat_sum(struct backing_dev_info *bdi, 192 enum bdi_stat_item item) 193{ 194 s64 sum; 195 unsigned long flags; 196 197 local_irq_save(flags); 198 sum = __bdi_stat_sum(bdi, item); 199 local_irq_restore(flags); 200 201 return sum; 202} 203 204extern void bdi_writeout_inc(struct backing_dev_info *bdi); 205 206/* 207 * maximal error of a stat counter. 208 */ 209static inline unsigned long bdi_stat_error(struct backing_dev_info *bdi) 210{ 211#ifdef CONFIG_SMP 212 return nr_cpu_ids * BDI_STAT_BATCH; 213#else 214 return 1; 215#endif 216} 217 218int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio); 219int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ratio); 220 221/* 222 * Flags in backing_dev_info::capability 223 * 224 * The first three flags control whether dirty pages will contribute to the 225 * VM's accounting and whether writepages() should be called for dirty pages 226 * (something that would not, for example, be appropriate for ramfs) 227 * 228 * WARNING: these flags are closely related and should not normally be 229 * used separately. The BDI_CAP_NO_ACCT_AND_WRITEBACK combines these 230 * three flags into a single convenience macro. 231 * 232 * BDI_CAP_NO_ACCT_DIRTY: Dirty pages shouldn't contribute to accounting 233 * BDI_CAP_NO_WRITEBACK: Don't write pages back 234 * BDI_CAP_NO_ACCT_WB: Don't automatically account writeback pages 235 * 236 * These flags let !MMU mmap() govern direct device mapping vs immediate 237 * copying more easily for MAP_PRIVATE, especially for ROM filesystems. 238 * 239 * BDI_CAP_MAP_COPY: Copy can be mapped (MAP_PRIVATE) 240 * BDI_CAP_MAP_DIRECT: Can be mapped directly (MAP_SHARED) 241 * BDI_CAP_READ_MAP: Can be mapped for reading 242 * BDI_CAP_WRITE_MAP: Can be mapped for writing 243 * BDI_CAP_EXEC_MAP: Can be mapped for execution 244 * 245 * BDI_CAP_SWAP_BACKED: Count shmem/tmpfs objects as swap-backed. 246 */ 247#define BDI_CAP_NO_ACCT_DIRTY 0x00000001 248#define BDI_CAP_NO_WRITEBACK 0x00000002 249#define BDI_CAP_MAP_COPY 0x00000004 250#define BDI_CAP_MAP_DIRECT 0x00000008 251#define BDI_CAP_READ_MAP 0x00000010 252#define BDI_CAP_WRITE_MAP 0x00000020 253#define BDI_CAP_EXEC_MAP 0x00000040 254#define BDI_CAP_NO_ACCT_WB 0x00000080 255#define BDI_CAP_SWAP_BACKED 0x00000100 256 257#define BDI_CAP_VMFLAGS \ 258 (BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP | BDI_CAP_EXEC_MAP) 259 260#define BDI_CAP_NO_ACCT_AND_WRITEBACK \ 261 (BDI_CAP_NO_WRITEBACK | BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_ACCT_WB) 262 263#if defined(VM_MAYREAD) && \ 264 (BDI_CAP_READ_MAP != VM_MAYREAD || \ 265 BDI_CAP_WRITE_MAP != VM_MAYWRITE || \ 266 BDI_CAP_EXEC_MAP != VM_MAYEXEC) 267#error please change backing_dev_info::capabilities flags 268#endif 269 270extern struct backing_dev_info default_backing_dev_info; 271extern struct backing_dev_info noop_backing_dev_info; 272 273int writeback_in_progress(struct backing_dev_info *bdi); 274 275static inline int bdi_congested(struct backing_dev_info *bdi, int bdi_bits) 276{ 277 if (bdi->congested_fn) 278 return bdi->congested_fn(bdi->congested_data, bdi_bits); 279 return (bdi->state & bdi_bits); 280} 281 282static inline int bdi_read_congested(struct backing_dev_info *bdi) 283{ 284 return bdi_congested(bdi, 1 << BDI_sync_congested); 285} 286 287static inline int bdi_write_congested(struct backing_dev_info *bdi) 288{ 289 return bdi_congested(bdi, 1 << BDI_async_congested); 290} 291 292static inline int bdi_rw_congested(struct backing_dev_info *bdi) 293{ 294 return bdi_congested(bdi, (1 << BDI_sync_congested) | 295 (1 << BDI_async_congested)); 296} 297 298enum { 299 BLK_RW_ASYNC = 0, 300 BLK_RW_SYNC = 1, 301}; 302 303void clear_bdi_congested(struct backing_dev_info *bdi, int sync); 304void set_bdi_congested(struct backing_dev_info *bdi, int sync); 305long congestion_wait(int sync, long timeout); 306long wait_iff_congested(struct zone *zone, int sync, long timeout); 307 308static inline bool bdi_cap_writeback_dirty(struct backing_dev_info *bdi) 309{ 310 return !(bdi->capabilities & BDI_CAP_NO_WRITEBACK); 311} 312 313static inline bool bdi_cap_account_dirty(struct backing_dev_info *bdi) 314{ 315 return !(bdi->capabilities & BDI_CAP_NO_ACCT_DIRTY); 316} 317 318static inline bool bdi_cap_account_writeback(struct backing_dev_info *bdi) 319{ 320 /* Paranoia: BDI_CAP_NO_WRITEBACK implies BDI_CAP_NO_ACCT_WB */ 321 return !(bdi->capabilities & (BDI_CAP_NO_ACCT_WB | 322 BDI_CAP_NO_WRITEBACK)); 323} 324 325static inline bool bdi_cap_swap_backed(struct backing_dev_info *bdi) 326{ 327 return bdi->capabilities & BDI_CAP_SWAP_BACKED; 328} 329 330static inline bool bdi_cap_flush_forker(struct backing_dev_info *bdi) 331{ 332 return bdi == &default_backing_dev_info; 333} 334 335static inline bool mapping_cap_writeback_dirty(struct address_space *mapping) 336{ 337 return bdi_cap_writeback_dirty(mapping->backing_dev_info); 338} 339 340static inline bool mapping_cap_account_dirty(struct address_space *mapping) 341{ 342 return bdi_cap_account_dirty(mapping->backing_dev_info); 343} 344 345static inline bool mapping_cap_swap_backed(struct address_space *mapping) 346{ 347 return bdi_cap_swap_backed(mapping->backing_dev_info); 348} 349 350static inline int bdi_sched_wait(void *word) 351{ 352 schedule(); 353 return 0; 354} 355 356#endif /* _LINUX_BACKING_DEV_H */