at v3.11-rc2 17 kB view raw
1 2#include <linux/wait.h> 3#include <linux/backing-dev.h> 4#include <linux/kthread.h> 5#include <linux/freezer.h> 6#include <linux/fs.h> 7#include <linux/pagemap.h> 8#include <linux/mm.h> 9#include <linux/sched.h> 10#include <linux/module.h> 11#include <linux/writeback.h> 12#include <linux/device.h> 13#include <trace/events/writeback.h> 14 15static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0); 16 17struct backing_dev_info default_backing_dev_info = { 18 .name = "default", 19 .ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE, 20 .state = 0, 21 .capabilities = BDI_CAP_MAP_COPY, 22}; 23EXPORT_SYMBOL_GPL(default_backing_dev_info); 24 25struct backing_dev_info noop_backing_dev_info = { 26 .name = "noop", 27 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK, 28}; 29EXPORT_SYMBOL_GPL(noop_backing_dev_info); 30 31static struct class *bdi_class; 32 33/* 34 * bdi_lock protects updates to bdi_list. bdi_list has RCU reader side 35 * locking. 36 */ 37DEFINE_SPINLOCK(bdi_lock); 38LIST_HEAD(bdi_list); 39 40/* bdi_wq serves all asynchronous writeback tasks */ 41struct workqueue_struct *bdi_wq; 42 43void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2) 44{ 45 if (wb1 < wb2) { 46 spin_lock(&wb1->list_lock); 47 spin_lock_nested(&wb2->list_lock, 1); 48 } else { 49 spin_lock(&wb2->list_lock); 50 spin_lock_nested(&wb1->list_lock, 1); 51 } 52} 53 54#ifdef CONFIG_DEBUG_FS 55#include <linux/debugfs.h> 56#include <linux/seq_file.h> 57 58static struct dentry *bdi_debug_root; 59 60static void bdi_debug_init(void) 61{ 62 bdi_debug_root = debugfs_create_dir("bdi", NULL); 63} 64 65static int bdi_debug_stats_show(struct seq_file *m, void *v) 66{ 67 struct backing_dev_info *bdi = m->private; 68 struct bdi_writeback *wb = &bdi->wb; 69 unsigned long background_thresh; 70 unsigned long dirty_thresh; 71 unsigned long bdi_thresh; 72 unsigned long nr_dirty, nr_io, nr_more_io; 73 struct inode *inode; 74 75 nr_dirty = nr_io = nr_more_io = 0; 76 spin_lock(&wb->list_lock); 77 list_for_each_entry(inode, &wb->b_dirty, i_wb_list) 78 nr_dirty++; 79 list_for_each_entry(inode, &wb->b_io, i_wb_list) 80 nr_io++; 81 list_for_each_entry(inode, &wb->b_more_io, i_wb_list) 82 nr_more_io++; 83 spin_unlock(&wb->list_lock); 84 85 global_dirty_limits(&background_thresh, &dirty_thresh); 86 bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh); 87 88#define K(x) ((x) << (PAGE_SHIFT - 10)) 89 seq_printf(m, 90 "BdiWriteback: %10lu kB\n" 91 "BdiReclaimable: %10lu kB\n" 92 "BdiDirtyThresh: %10lu kB\n" 93 "DirtyThresh: %10lu kB\n" 94 "BackgroundThresh: %10lu kB\n" 95 "BdiDirtied: %10lu kB\n" 96 "BdiWritten: %10lu kB\n" 97 "BdiWriteBandwidth: %10lu kBps\n" 98 "b_dirty: %10lu\n" 99 "b_io: %10lu\n" 100 "b_more_io: %10lu\n" 101 "bdi_list: %10u\n" 102 "state: %10lx\n", 103 (unsigned long) K(bdi_stat(bdi, BDI_WRITEBACK)), 104 (unsigned long) K(bdi_stat(bdi, BDI_RECLAIMABLE)), 105 K(bdi_thresh), 106 K(dirty_thresh), 107 K(background_thresh), 108 (unsigned long) K(bdi_stat(bdi, BDI_DIRTIED)), 109 (unsigned long) K(bdi_stat(bdi, BDI_WRITTEN)), 110 (unsigned long) K(bdi->write_bandwidth), 111 nr_dirty, 112 nr_io, 113 nr_more_io, 114 !list_empty(&bdi->bdi_list), bdi->state); 115#undef K 116 117 return 0; 118} 119 120static int bdi_debug_stats_open(struct inode *inode, struct file *file) 121{ 122 return single_open(file, bdi_debug_stats_show, inode->i_private); 123} 124 125static const struct file_operations bdi_debug_stats_fops = { 126 .open = bdi_debug_stats_open, 127 .read = seq_read, 128 .llseek = seq_lseek, 129 .release = single_release, 130}; 131 132static void bdi_debug_register(struct backing_dev_info *bdi, const char *name) 133{ 134 bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root); 135 bdi->debug_stats = debugfs_create_file("stats", 0444, bdi->debug_dir, 136 bdi, &bdi_debug_stats_fops); 137} 138 139static void bdi_debug_unregister(struct backing_dev_info *bdi) 140{ 141 debugfs_remove(bdi->debug_stats); 142 debugfs_remove(bdi->debug_dir); 143} 144#else 145static inline void bdi_debug_init(void) 146{ 147} 148static inline void bdi_debug_register(struct backing_dev_info *bdi, 149 const char *name) 150{ 151} 152static inline void bdi_debug_unregister(struct backing_dev_info *bdi) 153{ 154} 155#endif 156 157static ssize_t read_ahead_kb_store(struct device *dev, 158 struct device_attribute *attr, 159 const char *buf, size_t count) 160{ 161 struct backing_dev_info *bdi = dev_get_drvdata(dev); 162 unsigned long read_ahead_kb; 163 ssize_t ret; 164 165 ret = kstrtoul(buf, 10, &read_ahead_kb); 166 if (ret < 0) 167 return ret; 168 169 bdi->ra_pages = read_ahead_kb >> (PAGE_SHIFT - 10); 170 171 return count; 172} 173 174#define K(pages) ((pages) << (PAGE_SHIFT - 10)) 175 176#define BDI_SHOW(name, expr) \ 177static ssize_t name##_show(struct device *dev, \ 178 struct device_attribute *attr, char *page) \ 179{ \ 180 struct backing_dev_info *bdi = dev_get_drvdata(dev); \ 181 \ 182 return snprintf(page, PAGE_SIZE-1, "%lld\n", (long long)expr); \ 183} 184 185BDI_SHOW(read_ahead_kb, K(bdi->ra_pages)) 186 187static ssize_t min_ratio_store(struct device *dev, 188 struct device_attribute *attr, const char *buf, size_t count) 189{ 190 struct backing_dev_info *bdi = dev_get_drvdata(dev); 191 unsigned int ratio; 192 ssize_t ret; 193 194 ret = kstrtouint(buf, 10, &ratio); 195 if (ret < 0) 196 return ret; 197 198 ret = bdi_set_min_ratio(bdi, ratio); 199 if (!ret) 200 ret = count; 201 202 return ret; 203} 204BDI_SHOW(min_ratio, bdi->min_ratio) 205 206static ssize_t max_ratio_store(struct device *dev, 207 struct device_attribute *attr, const char *buf, size_t count) 208{ 209 struct backing_dev_info *bdi = dev_get_drvdata(dev); 210 unsigned int ratio; 211 ssize_t ret; 212 213 ret = kstrtouint(buf, 10, &ratio); 214 if (ret < 0) 215 return ret; 216 217 ret = bdi_set_max_ratio(bdi, ratio); 218 if (!ret) 219 ret = count; 220 221 return ret; 222} 223BDI_SHOW(max_ratio, bdi->max_ratio) 224 225static ssize_t stable_pages_required_show(struct device *dev, 226 struct device_attribute *attr, 227 char *page) 228{ 229 struct backing_dev_info *bdi = dev_get_drvdata(dev); 230 231 return snprintf(page, PAGE_SIZE-1, "%d\n", 232 bdi_cap_stable_pages_required(bdi) ? 1 : 0); 233} 234 235static struct device_attribute bdi_dev_attrs[] = { 236 __ATTR_RW(read_ahead_kb), 237 __ATTR_RW(min_ratio), 238 __ATTR_RW(max_ratio), 239 __ATTR_RO(stable_pages_required), 240 __ATTR_NULL, 241}; 242 243static __init int bdi_class_init(void) 244{ 245 bdi_class = class_create(THIS_MODULE, "bdi"); 246 if (IS_ERR(bdi_class)) 247 return PTR_ERR(bdi_class); 248 249 bdi_class->dev_attrs = bdi_dev_attrs; 250 bdi_debug_init(); 251 return 0; 252} 253postcore_initcall(bdi_class_init); 254 255static int __init default_bdi_init(void) 256{ 257 int err; 258 259 bdi_wq = alloc_workqueue("writeback", WQ_MEM_RECLAIM | WQ_FREEZABLE | 260 WQ_UNBOUND | WQ_SYSFS, 0); 261 if (!bdi_wq) 262 return -ENOMEM; 263 264 err = bdi_init(&default_backing_dev_info); 265 if (!err) 266 bdi_register(&default_backing_dev_info, NULL, "default"); 267 err = bdi_init(&noop_backing_dev_info); 268 269 return err; 270} 271subsys_initcall(default_bdi_init); 272 273int bdi_has_dirty_io(struct backing_dev_info *bdi) 274{ 275 return wb_has_dirty_io(&bdi->wb); 276} 277 278/* 279 * This function is used when the first inode for this bdi is marked dirty. It 280 * wakes-up the corresponding bdi thread which should then take care of the 281 * periodic background write-out of dirty inodes. Since the write-out would 282 * starts only 'dirty_writeback_interval' centisecs from now anyway, we just 283 * set up a timer which wakes the bdi thread up later. 284 * 285 * Note, we wouldn't bother setting up the timer, but this function is on the 286 * fast-path (used by '__mark_inode_dirty()'), so we save few context switches 287 * by delaying the wake-up. 288 */ 289void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi) 290{ 291 unsigned long timeout; 292 293 timeout = msecs_to_jiffies(dirty_writeback_interval * 10); 294 mod_delayed_work(bdi_wq, &bdi->wb.dwork, timeout); 295} 296 297/* 298 * Remove bdi from bdi_list, and ensure that it is no longer visible 299 */ 300static void bdi_remove_from_list(struct backing_dev_info *bdi) 301{ 302 spin_lock_bh(&bdi_lock); 303 list_del_rcu(&bdi->bdi_list); 304 spin_unlock_bh(&bdi_lock); 305 306 synchronize_rcu_expedited(); 307 308 /* bdi_list is now unused, clear it to mark @bdi dying */ 309 INIT_LIST_HEAD(&bdi->bdi_list); 310} 311 312int bdi_register(struct backing_dev_info *bdi, struct device *parent, 313 const char *fmt, ...) 314{ 315 va_list args; 316 struct device *dev; 317 318 if (bdi->dev) /* The driver needs to use separate queues per device */ 319 return 0; 320 321 va_start(args, fmt); 322 dev = device_create_vargs(bdi_class, parent, MKDEV(0, 0), bdi, fmt, args); 323 va_end(args); 324 if (IS_ERR(dev)) 325 return PTR_ERR(dev); 326 327 bdi->dev = dev; 328 329 bdi_debug_register(bdi, dev_name(dev)); 330 set_bit(BDI_registered, &bdi->state); 331 332 spin_lock_bh(&bdi_lock); 333 list_add_tail_rcu(&bdi->bdi_list, &bdi_list); 334 spin_unlock_bh(&bdi_lock); 335 336 trace_writeback_bdi_register(bdi); 337 return 0; 338} 339EXPORT_SYMBOL(bdi_register); 340 341int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev) 342{ 343 return bdi_register(bdi, NULL, "%u:%u", MAJOR(dev), MINOR(dev)); 344} 345EXPORT_SYMBOL(bdi_register_dev); 346 347/* 348 * Remove bdi from the global list and shutdown any threads we have running 349 */ 350static void bdi_wb_shutdown(struct backing_dev_info *bdi) 351{ 352 if (!bdi_cap_writeback_dirty(bdi)) 353 return; 354 355 /* 356 * Make sure nobody finds us on the bdi_list anymore 357 */ 358 bdi_remove_from_list(bdi); 359 360 /* 361 * Drain work list and shutdown the delayed_work. At this point, 362 * @bdi->bdi_list is empty telling bdi_Writeback_workfn() that @bdi 363 * is dying and its work_list needs to be drained no matter what. 364 */ 365 mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0); 366 flush_delayed_work(&bdi->wb.dwork); 367 WARN_ON(!list_empty(&bdi->work_list)); 368 369 /* 370 * This shouldn't be necessary unless @bdi for some reason has 371 * unflushed dirty IO after work_list is drained. Do it anyway 372 * just in case. 373 */ 374 cancel_delayed_work_sync(&bdi->wb.dwork); 375} 376 377/* 378 * This bdi is going away now, make sure that no super_blocks point to it 379 */ 380static void bdi_prune_sb(struct backing_dev_info *bdi) 381{ 382 struct super_block *sb; 383 384 spin_lock(&sb_lock); 385 list_for_each_entry(sb, &super_blocks, s_list) { 386 if (sb->s_bdi == bdi) 387 sb->s_bdi = &default_backing_dev_info; 388 } 389 spin_unlock(&sb_lock); 390} 391 392void bdi_unregister(struct backing_dev_info *bdi) 393{ 394 struct device *dev = bdi->dev; 395 396 if (dev) { 397 bdi_set_min_ratio(bdi, 0); 398 trace_writeback_bdi_unregister(bdi); 399 bdi_prune_sb(bdi); 400 401 bdi_wb_shutdown(bdi); 402 bdi_debug_unregister(bdi); 403 404 spin_lock_bh(&bdi->wb_lock); 405 bdi->dev = NULL; 406 spin_unlock_bh(&bdi->wb_lock); 407 408 device_unregister(dev); 409 } 410} 411EXPORT_SYMBOL(bdi_unregister); 412 413static void bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi) 414{ 415 memset(wb, 0, sizeof(*wb)); 416 417 wb->bdi = bdi; 418 wb->last_old_flush = jiffies; 419 INIT_LIST_HEAD(&wb->b_dirty); 420 INIT_LIST_HEAD(&wb->b_io); 421 INIT_LIST_HEAD(&wb->b_more_io); 422 spin_lock_init(&wb->list_lock); 423 INIT_DELAYED_WORK(&wb->dwork, bdi_writeback_workfn); 424} 425 426/* 427 * Initial write bandwidth: 100 MB/s 428 */ 429#define INIT_BW (100 << (20 - PAGE_SHIFT)) 430 431int bdi_init(struct backing_dev_info *bdi) 432{ 433 int i, err; 434 435 bdi->dev = NULL; 436 437 bdi->min_ratio = 0; 438 bdi->max_ratio = 100; 439 bdi->max_prop_frac = FPROP_FRAC_BASE; 440 spin_lock_init(&bdi->wb_lock); 441 INIT_LIST_HEAD(&bdi->bdi_list); 442 INIT_LIST_HEAD(&bdi->work_list); 443 444 bdi_wb_init(&bdi->wb, bdi); 445 446 for (i = 0; i < NR_BDI_STAT_ITEMS; i++) { 447 err = percpu_counter_init(&bdi->bdi_stat[i], 0); 448 if (err) 449 goto err; 450 } 451 452 bdi->dirty_exceeded = 0; 453 454 bdi->bw_time_stamp = jiffies; 455 bdi->written_stamp = 0; 456 457 bdi->balanced_dirty_ratelimit = INIT_BW; 458 bdi->dirty_ratelimit = INIT_BW; 459 bdi->write_bandwidth = INIT_BW; 460 bdi->avg_write_bandwidth = INIT_BW; 461 462 err = fprop_local_init_percpu(&bdi->completions); 463 464 if (err) { 465err: 466 while (i--) 467 percpu_counter_destroy(&bdi->bdi_stat[i]); 468 } 469 470 return err; 471} 472EXPORT_SYMBOL(bdi_init); 473 474void bdi_destroy(struct backing_dev_info *bdi) 475{ 476 int i; 477 478 /* 479 * Splice our entries to the default_backing_dev_info, if this 480 * bdi disappears 481 */ 482 if (bdi_has_dirty_io(bdi)) { 483 struct bdi_writeback *dst = &default_backing_dev_info.wb; 484 485 bdi_lock_two(&bdi->wb, dst); 486 list_splice(&bdi->wb.b_dirty, &dst->b_dirty); 487 list_splice(&bdi->wb.b_io, &dst->b_io); 488 list_splice(&bdi->wb.b_more_io, &dst->b_more_io); 489 spin_unlock(&bdi->wb.list_lock); 490 spin_unlock(&dst->list_lock); 491 } 492 493 bdi_unregister(bdi); 494 495 /* 496 * If bdi_unregister() had already been called earlier, the dwork 497 * could still be pending because bdi_prune_sb() can race with the 498 * bdi_wakeup_thread_delayed() calls from __mark_inode_dirty(). 499 */ 500 cancel_delayed_work_sync(&bdi->wb.dwork); 501 502 for (i = 0; i < NR_BDI_STAT_ITEMS; i++) 503 percpu_counter_destroy(&bdi->bdi_stat[i]); 504 505 fprop_local_destroy_percpu(&bdi->completions); 506} 507EXPORT_SYMBOL(bdi_destroy); 508 509/* 510 * For use from filesystems to quickly init and register a bdi associated 511 * with dirty writeback 512 */ 513int bdi_setup_and_register(struct backing_dev_info *bdi, char *name, 514 unsigned int cap) 515{ 516 int err; 517 518 bdi->name = name; 519 bdi->capabilities = cap; 520 err = bdi_init(bdi); 521 if (err) 522 return err; 523 524 err = bdi_register(bdi, NULL, "%.28s-%ld", name, 525 atomic_long_inc_return(&bdi_seq)); 526 if (err) { 527 bdi_destroy(bdi); 528 return err; 529 } 530 531 return 0; 532} 533EXPORT_SYMBOL(bdi_setup_and_register); 534 535static wait_queue_head_t congestion_wqh[2] = { 536 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]), 537 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1]) 538 }; 539static atomic_t nr_bdi_congested[2]; 540 541void clear_bdi_congested(struct backing_dev_info *bdi, int sync) 542{ 543 enum bdi_state bit; 544 wait_queue_head_t *wqh = &congestion_wqh[sync]; 545 546 bit = sync ? BDI_sync_congested : BDI_async_congested; 547 if (test_and_clear_bit(bit, &bdi->state)) 548 atomic_dec(&nr_bdi_congested[sync]); 549 smp_mb__after_clear_bit(); 550 if (waitqueue_active(wqh)) 551 wake_up(wqh); 552} 553EXPORT_SYMBOL(clear_bdi_congested); 554 555void set_bdi_congested(struct backing_dev_info *bdi, int sync) 556{ 557 enum bdi_state bit; 558 559 bit = sync ? BDI_sync_congested : BDI_async_congested; 560 if (!test_and_set_bit(bit, &bdi->state)) 561 atomic_inc(&nr_bdi_congested[sync]); 562} 563EXPORT_SYMBOL(set_bdi_congested); 564 565/** 566 * congestion_wait - wait for a backing_dev to become uncongested 567 * @sync: SYNC or ASYNC IO 568 * @timeout: timeout in jiffies 569 * 570 * Waits for up to @timeout jiffies for a backing_dev (any backing_dev) to exit 571 * write congestion. If no backing_devs are congested then just wait for the 572 * next write to be completed. 573 */ 574long congestion_wait(int sync, long timeout) 575{ 576 long ret; 577 unsigned long start = jiffies; 578 DEFINE_WAIT(wait); 579 wait_queue_head_t *wqh = &congestion_wqh[sync]; 580 581 prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE); 582 ret = io_schedule_timeout(timeout); 583 finish_wait(wqh, &wait); 584 585 trace_writeback_congestion_wait(jiffies_to_usecs(timeout), 586 jiffies_to_usecs(jiffies - start)); 587 588 return ret; 589} 590EXPORT_SYMBOL(congestion_wait); 591 592/** 593 * wait_iff_congested - Conditionally wait for a backing_dev to become uncongested or a zone to complete writes 594 * @zone: A zone to check if it is heavily congested 595 * @sync: SYNC or ASYNC IO 596 * @timeout: timeout in jiffies 597 * 598 * In the event of a congested backing_dev (any backing_dev) and the given 599 * @zone has experienced recent congestion, this waits for up to @timeout 600 * jiffies for either a BDI to exit congestion of the given @sync queue 601 * or a write to complete. 602 * 603 * In the absence of zone congestion, cond_resched() is called to yield 604 * the processor if necessary but otherwise does not sleep. 605 * 606 * The return value is 0 if the sleep is for the full timeout. Otherwise, 607 * it is the number of jiffies that were still remaining when the function 608 * returned. return_value == timeout implies the function did not sleep. 609 */ 610long wait_iff_congested(struct zone *zone, int sync, long timeout) 611{ 612 long ret; 613 unsigned long start = jiffies; 614 DEFINE_WAIT(wait); 615 wait_queue_head_t *wqh = &congestion_wqh[sync]; 616 617 /* 618 * If there is no congestion, or heavy congestion is not being 619 * encountered in the current zone, yield if necessary instead 620 * of sleeping on the congestion queue 621 */ 622 if (atomic_read(&nr_bdi_congested[sync]) == 0 || 623 !zone_is_reclaim_congested(zone)) { 624 cond_resched(); 625 626 /* In case we scheduled, work out time remaining */ 627 ret = timeout - (jiffies - start); 628 if (ret < 0) 629 ret = 0; 630 631 goto out; 632 } 633 634 /* Sleep until uncongested or a write happens */ 635 prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE); 636 ret = io_schedule_timeout(timeout); 637 finish_wait(wqh, &wait); 638 639out: 640 trace_writeback_wait_iff_congested(jiffies_to_usecs(timeout), 641 jiffies_to_usecs(jiffies - start)); 642 643 return ret; 644} 645EXPORT_SYMBOL(wait_iff_congested); 646 647int pdflush_proc_obsolete(struct ctl_table *table, int write, 648 void __user *buffer, size_t *lenp, loff_t *ppos) 649{ 650 char kbuf[] = "0\n"; 651 652 if (*ppos) { 653 *lenp = 0; 654 return 0; 655 } 656 657 if (copy_to_user(buffer, kbuf, sizeof(kbuf))) 658 return -EFAULT; 659 printk_once(KERN_WARNING "%s exported in /proc is scheduled for removal\n", 660 table->procname); 661 662 *lenp = 2; 663 *ppos += *lenp; 664 return 2; 665}