Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

vfs: add support for a lazytime mount option

Add a new mount option which enables a new "lazytime" mode. This mode
causes atime, mtime, and ctime updates to only be made to the
in-memory version of the inode. The on-disk times will only get
updated when (a) if the inode needs to be updated for some non-time
related change, (b) if userspace calls fsync(), syncfs() or sync(), or
(c) just before an undeleted inode is evicted from memory.

This is OK according to POSIX because there are no guarantees after a
crash unless userspace explicitly requests via a fsync(2) call.

For workloads which feature a large number of random write to a
preallocated file, the lazytime mount option significantly reduces
writes to the inode table. The repeated 4k writes to a single block
will result in undesirable stress on flash devices and SMR disk
drives. Even on conventional HDD's, the repeated writes to the inode
table block will trigger Adjacent Track Interference (ATI) remediation
latencies, which very negatively impact long tail latencies --- which
is a very big deal for web serving tiers (for example).

Google-Bug-Id: 18297052

Signed-off-by: Theodore Ts'o <tytso@mit.edu>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>

authored by

Theodore Ts'o and committed by
Al Viro
0ae45f63 e36f014e

+186 -35
+6
fs/ext4/inode.c
··· 4840 4840 * If the inode is marked synchronous, we don't honour that here - doing 4841 4841 * so would cause a commit on atime updates, which we don't bother doing. 4842 4842 * We handle synchronous inodes at the highest possible level. 4843 + * 4844 + * If only the I_DIRTY_TIME flag is set, we can skip everything. If 4845 + * I_DIRTY_TIME and I_DIRTY_SYNC is set, the only inode fields we need 4846 + * to copy into the on-disk inode structure are the timestamp files. 4843 4847 */ 4844 4848 void ext4_dirty_inode(struct inode *inode, int flags) 4845 4849 { 4846 4850 handle_t *handle; 4847 4851 4852 + if (flags == I_DIRTY_TIME) 4853 + return; 4848 4854 handle = ext4_journal_start(inode, EXT4_HT_INODE, 2); 4849 4855 if (IS_ERR(handle)) 4850 4856 goto out;
+51 -11
fs/fs-writeback.c
··· 247 247 return ret; 248 248 } 249 249 250 + #define EXPIRE_DIRTY_ATIME 0x0001 251 + 250 252 /* 251 253 * Move expired (dirtied before work->older_than_this) dirty inodes from 252 254 * @delaying_queue to @dispatch_queue. 253 255 */ 254 256 static int move_expired_inodes(struct list_head *delaying_queue, 255 257 struct list_head *dispatch_queue, 258 + int flags, 256 259 struct wb_writeback_work *work) 257 260 { 261 + unsigned long *older_than_this = NULL; 262 + unsigned long expire_time; 258 263 LIST_HEAD(tmp); 259 264 struct list_head *pos, *node; 260 265 struct super_block *sb = NULL; ··· 267 262 int do_sb_sort = 0; 268 263 int moved = 0; 269 264 265 + if ((flags & EXPIRE_DIRTY_ATIME) == 0) 266 + older_than_this = work->older_than_this; 267 + else if ((work->reason == WB_REASON_SYNC) == 0) { 268 + expire_time = jiffies - (HZ * 86400); 269 + older_than_this = &expire_time; 270 + } 270 271 while (!list_empty(delaying_queue)) { 271 272 inode = wb_inode(delaying_queue->prev); 272 - if (work->older_than_this && 273 - inode_dirtied_after(inode, *work->older_than_this)) 273 + if (older_than_this && 274 + inode_dirtied_after(inode, *older_than_this)) 274 275 break; 275 276 list_move(&inode->i_wb_list, &tmp); 276 277 moved++; 278 + if (flags & EXPIRE_DIRTY_ATIME) 279 + set_bit(__I_DIRTY_TIME_EXPIRED, &inode->i_state); 277 280 if (sb_is_blkdev_sb(inode->i_sb)) 278 281 continue; 279 282 if (sb && sb != inode->i_sb) ··· 322 309 static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work) 323 310 { 324 311 int moved; 312 + 325 313 assert_spin_locked(&wb->list_lock); 326 314 list_splice_init(&wb->b_more_io, &wb->b_io); 327 - moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, work); 315 + moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, 0, work); 316 + moved += move_expired_inodes(&wb->b_dirty_time, &wb->b_io, 317 + EXPIRE_DIRTY_ATIME, work); 328 318 trace_writeback_queue_io(wb, work, moved); 329 319 } 330 320 ··· 451 435 * updates after data IO completion. 452 436 */ 453 437 redirty_tail(inode, wb); 438 + } else if (inode->i_state & I_DIRTY_TIME) { 439 + list_move(&inode->i_wb_list, &wb->b_dirty_time); 454 440 } else { 455 441 /* The inode is clean. Remove from writeback lists. */ 456 442 list_del_init(&inode->i_wb_list); ··· 499 481 spin_lock(&inode->i_lock); 500 482 501 483 dirty = inode->i_state & I_DIRTY; 502 - inode->i_state &= ~I_DIRTY; 484 + if (((dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) && 485 + (inode->i_state & I_DIRTY_TIME)) || 486 + (inode->i_state & I_DIRTY_TIME_EXPIRED)) { 487 + dirty |= I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED; 488 + trace_writeback_lazytime(inode); 489 + } 490 + inode->i_state &= ~dirty; 503 491 504 492 /* 505 493 * Paired with smp_mb() in __mark_inode_dirty(). This allows ··· 525 501 526 502 spin_unlock(&inode->i_lock); 527 503 504 + if (dirty & I_DIRTY_TIME) 505 + mark_inode_dirty_sync(inode); 528 506 /* Don't write the inode if only I_DIRTY_PAGES was set */ 529 - if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) { 507 + if (dirty & ~I_DIRTY_PAGES) { 530 508 int err = write_inode(inode, wbc); 531 509 if (ret == 0) 532 510 ret = err; ··· 576 550 * make sure inode is on some writeback list and leave it there unless 577 551 * we have completely cleaned the inode. 578 552 */ 579 - if (!(inode->i_state & I_DIRTY) && 553 + if (!(inode->i_state & I_DIRTY_ALL) && 580 554 (wbc->sync_mode != WB_SYNC_ALL || 581 555 !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK))) 582 556 goto out; ··· 591 565 * If inode is clean, remove it from writeback lists. Otherwise don't 592 566 * touch it. See comment above for explanation. 593 567 */ 594 - if (!(inode->i_state & I_DIRTY)) 568 + if (!(inode->i_state & I_DIRTY_ALL)) 595 569 list_del_init(&inode->i_wb_list); 596 570 spin_unlock(&wb->list_lock); 597 571 inode_sync_complete(inode); ··· 733 707 wrote += write_chunk - wbc.nr_to_write; 734 708 spin_lock(&wb->list_lock); 735 709 spin_lock(&inode->i_lock); 736 - if (!(inode->i_state & I_DIRTY)) 710 + if (!(inode->i_state & I_DIRTY_ALL)) 737 711 wrote++; 738 712 requeue_inode(inode, wb, &wbc); 739 713 inode_sync_complete(inode); ··· 1171 1145 * page->mapping->host, so the page-dirtying time is recorded in the internal 1172 1146 * blockdev inode. 1173 1147 */ 1148 + #define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC) 1174 1149 void __mark_inode_dirty(struct inode *inode, int flags) 1175 1150 { 1176 1151 struct super_block *sb = inode->i_sb; 1177 1152 struct backing_dev_info *bdi = NULL; 1153 + int dirtytime; 1154 + 1155 + trace_writeback_mark_inode_dirty(inode, flags); 1178 1156 1179 1157 /* 1180 1158 * Don't do this for I_DIRTY_PAGES - that doesn't actually 1181 1159 * dirty the inode itself 1182 1160 */ 1183 - if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) { 1161 + if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_TIME)) { 1184 1162 trace_writeback_dirty_inode_start(inode, flags); 1185 1163 1186 1164 if (sb->s_op->dirty_inode) ··· 1192 1162 1193 1163 trace_writeback_dirty_inode(inode, flags); 1194 1164 } 1165 + if (flags & I_DIRTY_INODE) 1166 + flags &= ~I_DIRTY_TIME; 1167 + dirtytime = flags & I_DIRTY_TIME; 1195 1168 1196 1169 /* 1197 1170 * Paired with smp_mb() in __writeback_single_inode() for the ··· 1202 1169 */ 1203 1170 smp_mb(); 1204 1171 1205 - if ((inode->i_state & flags) == flags) 1172 + if (((inode->i_state & flags) == flags) || 1173 + (dirtytime && (inode->i_state & I_DIRTY_INODE))) 1206 1174 return; 1207 1175 1208 1176 if (unlikely(block_dump)) 1209 1177 block_dump___mark_inode_dirty(inode); 1210 1178 1211 1179 spin_lock(&inode->i_lock); 1180 + if (dirtytime && (inode->i_state & I_DIRTY_INODE)) 1181 + goto out_unlock_inode; 1212 1182 if ((inode->i_state & flags) != flags) { 1213 1183 const int was_dirty = inode->i_state & I_DIRTY; 1214 1184 1185 + if (flags & I_DIRTY_INODE) 1186 + inode->i_state &= ~I_DIRTY_TIME; 1215 1187 inode->i_state |= flags; 1216 1188 1217 1189 /* ··· 1263 1225 } 1264 1226 1265 1227 inode->dirtied_when = jiffies; 1266 - list_move(&inode->i_wb_list, &bdi->wb.b_dirty); 1228 + list_move(&inode->i_wb_list, dirtytime ? 1229 + &bdi->wb.b_dirty_time : &bdi->wb.b_dirty); 1267 1230 spin_unlock(&bdi->wb.list_lock); 1231 + trace_writeback_dirty_inode_enqueue(inode); 1268 1232 1269 1233 if (wakeup_bdi) 1270 1234 bdi_wakeup_thread_delayed(bdi);
+2 -2
fs/gfs2/file.c
··· 655 655 { 656 656 struct address_space *mapping = file->f_mapping; 657 657 struct inode *inode = mapping->host; 658 - int sync_state = inode->i_state & I_DIRTY; 658 + int sync_state = inode->i_state & I_DIRTY_ALL; 659 659 struct gfs2_inode *ip = GFS2_I(inode); 660 660 int ret = 0, ret1 = 0; 661 661 ··· 668 668 if (!gfs2_is_jdata(ip)) 669 669 sync_state &= ~I_DIRTY_PAGES; 670 670 if (datasync) 671 - sync_state &= ~I_DIRTY_SYNC; 671 + sync_state &= ~(I_DIRTY_SYNC | I_DIRTY_TIME); 672 672 673 673 if (sync_state) { 674 674 ret = sync_inode_metadata(inode, 1);
+40 -16
fs/inode.c
··· 18 18 #include <linux/buffer_head.h> /* for inode_has_buffers */ 19 19 #include <linux/ratelimit.h> 20 20 #include <linux/list_lru.h> 21 + #include <trace/events/writeback.h> 21 22 #include "internal.h" 22 23 23 24 /* ··· 31 30 * inode_sb_list_lock protects: 32 31 * sb->s_inodes, inode->i_sb_list 33 32 * bdi->wb.list_lock protects: 34 - * bdi->wb.b_{dirty,io,more_io}, inode->i_wb_list 33 + * bdi->wb.b_{dirty,io,more_io,dirty_time}, inode->i_wb_list 35 34 * inode_hash_lock protects: 36 35 * inode_hashtable, inode->i_hash 37 36 * ··· 417 416 */ 418 417 void inode_add_lru(struct inode *inode) 419 418 { 420 - if (!(inode->i_state & (I_DIRTY | I_SYNC | I_FREEING | I_WILL_FREE)) && 419 + if (!(inode->i_state & (I_DIRTY_ALL | I_SYNC | 420 + I_FREEING | I_WILL_FREE)) && 421 421 !atomic_read(&inode->i_count) && inode->i_sb->s_flags & MS_ACTIVE) 422 422 inode_lru_list_add(inode); 423 423 } ··· 649 647 spin_unlock(&inode->i_lock); 650 648 continue; 651 649 } 652 - if (inode->i_state & I_DIRTY && !kill_dirty) { 650 + if (inode->i_state & I_DIRTY_ALL && !kill_dirty) { 653 651 spin_unlock(&inode->i_lock); 654 652 busy = 1; 655 653 continue; ··· 1434 1432 */ 1435 1433 void iput(struct inode *inode) 1436 1434 { 1437 - if (inode) { 1438 - BUG_ON(inode->i_state & I_CLEAR); 1439 - 1440 - if (atomic_dec_and_lock(&inode->i_count, &inode->i_lock)) 1441 - iput_final(inode); 1435 + if (!inode) 1436 + return; 1437 + BUG_ON(inode->i_state & I_CLEAR); 1438 + retry: 1439 + if (atomic_dec_and_lock(&inode->i_count, &inode->i_lock)) { 1440 + if (inode->i_nlink && (inode->i_state & I_DIRTY_TIME)) { 1441 + atomic_inc(&inode->i_count); 1442 + inode->i_state &= ~I_DIRTY_TIME; 1443 + spin_unlock(&inode->i_lock); 1444 + trace_writeback_lazytime_iput(inode); 1445 + mark_inode_dirty_sync(inode); 1446 + goto retry; 1447 + } 1448 + iput_final(inode); 1442 1449 } 1443 1450 } 1444 1451 EXPORT_SYMBOL(iput); ··· 1506 1495 return 0; 1507 1496 } 1508 1497 1509 - /* 1510 - * This does the actual work of updating an inodes time or version. Must have 1511 - * had called mnt_want_write() before calling this. 1512 - */ 1513 - static int update_time(struct inode *inode, struct timespec *time, int flags) 1498 + int generic_update_time(struct inode *inode, struct timespec *time, int flags) 1514 1499 { 1515 - if (inode->i_op->update_time) 1516 - return inode->i_op->update_time(inode, time, flags); 1500 + int iflags = I_DIRTY_TIME; 1517 1501 1518 1502 if (flags & S_ATIME) 1519 1503 inode->i_atime = *time; ··· 1518 1512 inode->i_ctime = *time; 1519 1513 if (flags & S_MTIME) 1520 1514 inode->i_mtime = *time; 1521 - mark_inode_dirty_sync(inode); 1515 + 1516 + if (!(inode->i_sb->s_flags & MS_LAZYTIME) || (flags & S_VERSION)) 1517 + iflags |= I_DIRTY_SYNC; 1518 + __mark_inode_dirty(inode, iflags); 1522 1519 return 0; 1520 + } 1521 + EXPORT_SYMBOL(generic_update_time); 1522 + 1523 + /* 1524 + * This does the actual work of updating an inodes time or version. Must have 1525 + * had called mnt_want_write() before calling this. 1526 + */ 1527 + static int update_time(struct inode *inode, struct timespec *time, int flags) 1528 + { 1529 + int (*update_time)(struct inode *, struct timespec *, int); 1530 + 1531 + update_time = inode->i_op->update_time ? inode->i_op->update_time : 1532 + generic_update_time; 1533 + 1534 + return update_time(inode, time, flags); 1523 1535 } 1524 1536 1525 1537 /**
+1 -1
fs/jfs/file.c
··· 39 39 return rc; 40 40 41 41 mutex_lock(&inode->i_mutex); 42 - if (!(inode->i_state & I_DIRTY) || 42 + if (!(inode->i_state & I_DIRTY_ALL) || 43 43 (datasync && !(inode->i_state & I_DIRTY_DATASYNC))) { 44 44 /* Make sure committed changes hit the disk */ 45 45 jfs_flush_journal(JFS_SBI(inode->i_sb)->log, 1);
+1 -1
fs/libfs.c
··· 948 948 949 949 mutex_lock(&inode->i_mutex); 950 950 ret = sync_mapping_buffers(inode->i_mapping); 951 - if (!(inode->i_state & I_DIRTY)) 951 + if (!(inode->i_state & I_DIRTY_ALL)) 952 952 goto out; 953 953 if (datasync && !(inode->i_state & I_DIRTY_DATASYNC)) 954 954 goto out;
+1
fs/proc_namespace.c
··· 44 44 { MS_SYNCHRONOUS, ",sync" }, 45 45 { MS_DIRSYNC, ",dirsync" }, 46 46 { MS_MANDLOCK, ",mand" }, 47 + { MS_LAZYTIME, ",lazytime" }, 47 48 { 0, NULL } 48 49 }; 49 50 const struct proc_fs_info *fs_infop;
+8
fs/sync.c
··· 177 177 */ 178 178 int vfs_fsync_range(struct file *file, loff_t start, loff_t end, int datasync) 179 179 { 180 + struct inode *inode = file->f_mapping->host; 181 + 180 182 if (!file->f_op->fsync) 181 183 return -EINVAL; 184 + if (!datasync && (inode->i_state & I_DIRTY_TIME)) { 185 + spin_lock(&inode->i_lock); 186 + inode->i_state &= ~I_DIRTY_TIME; 187 + spin_unlock(&inode->i_lock); 188 + mark_inode_dirty_sync(inode); 189 + } 182 190 return file->f_op->fsync(file, start, end, datasync); 183 191 } 184 192 EXPORT_SYMBOL(vfs_fsync_range);
+1
include/linux/backing-dev.h
··· 55 55 struct list_head b_dirty; /* dirty inodes */ 56 56 struct list_head b_io; /* parked for writeback */ 57 57 struct list_head b_more_io; /* parked for more writeback */ 58 + struct list_head b_dirty_time; /* time stamps are dirty */ 58 59 spinlock_t list_lock; /* protects the b_* lists */ 59 60 }; 60 61
+5
include/linux/fs.h
··· 1746 1746 #define __I_DIO_WAKEUP 9 1747 1747 #define I_DIO_WAKEUP (1 << I_DIO_WAKEUP) 1748 1748 #define I_LINKABLE (1 << 10) 1749 + #define I_DIRTY_TIME (1 << 11) 1750 + #define __I_DIRTY_TIME_EXPIRED 12 1751 + #define I_DIRTY_TIME_EXPIRED (1 << __I_DIRTY_TIME_EXPIRED) 1749 1752 1750 1753 #define I_DIRTY (I_DIRTY_SYNC | I_DIRTY_DATASYNC | I_DIRTY_PAGES) 1754 + #define I_DIRTY_ALL (I_DIRTY | I_DIRTY_TIME) 1751 1755 1752 1756 extern void __mark_inode_dirty(struct inode *, int); 1753 1757 static inline void mark_inode_dirty(struct inode *inode) ··· 1914 1910 1915 1911 extern void ihold(struct inode * inode); 1916 1912 extern void iput(struct inode *); 1913 + extern int generic_update_time(struct inode *, struct timespec *, int); 1917 1914 1918 1915 static inline struct inode *file_inode(const struct file *f) 1919 1916 {
+59 -1
include/trace/events/writeback.h
··· 18 18 {I_FREEING, "I_FREEING"}, \ 19 19 {I_CLEAR, "I_CLEAR"}, \ 20 20 {I_SYNC, "I_SYNC"}, \ 21 + {I_DIRTY_TIME, "I_DIRTY_TIME"}, \ 22 + {I_DIRTY_TIME_EXPIRED, "I_DIRTY_TIME_EXPIRED"}, \ 21 23 {I_REFERENCED, "I_REFERENCED"} \ 22 24 ) 23 25 ··· 70 68 TP_STRUCT__entry ( 71 69 __array(char, name, 32) 72 70 __field(unsigned long, ino) 71 + __field(unsigned long, state) 73 72 __field(unsigned long, flags) 74 73 ), 75 74 ··· 81 78 strncpy(__entry->name, 82 79 bdi->dev ? dev_name(bdi->dev) : "(unknown)", 32); 83 80 __entry->ino = inode->i_ino; 81 + __entry->state = inode->i_state; 84 82 __entry->flags = flags; 85 83 ), 86 84 87 - TP_printk("bdi %s: ino=%lu flags=%s", 85 + TP_printk("bdi %s: ino=%lu state=%s flags=%s", 88 86 __entry->name, 89 87 __entry->ino, 88 + show_inode_state(__entry->state), 90 89 show_inode_state(__entry->flags) 91 90 ) 91 + ); 92 + 93 + DEFINE_EVENT(writeback_dirty_inode_template, writeback_mark_inode_dirty, 94 + 95 + TP_PROTO(struct inode *inode, int flags), 96 + 97 + TP_ARGS(inode, flags) 92 98 ); 93 99 94 100 DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode_start, ··· 608 596 struct writeback_control *wbc, 609 597 unsigned long nr_to_write), 610 598 TP_ARGS(inode, wbc, nr_to_write) 599 + ); 600 + 601 + DECLARE_EVENT_CLASS(writeback_lazytime_template, 602 + TP_PROTO(struct inode *inode), 603 + 604 + TP_ARGS(inode), 605 + 606 + TP_STRUCT__entry( 607 + __field( dev_t, dev ) 608 + __field(unsigned long, ino ) 609 + __field(unsigned long, state ) 610 + __field( __u16, mode ) 611 + __field(unsigned long, dirtied_when ) 612 + ), 613 + 614 + TP_fast_assign( 615 + __entry->dev = inode->i_sb->s_dev; 616 + __entry->ino = inode->i_ino; 617 + __entry->state = inode->i_state; 618 + __entry->mode = inode->i_mode; 619 + __entry->dirtied_when = inode->dirtied_when; 620 + ), 621 + 622 + TP_printk("dev %d,%d ino %lu dirtied %lu state %s mode 0%o", 623 + MAJOR(__entry->dev), MINOR(__entry->dev), 624 + __entry->ino, __entry->dirtied_when, 625 + show_inode_state(__entry->state), __entry->mode) 626 + ); 627 + 628 + DEFINE_EVENT(writeback_lazytime_template, writeback_lazytime, 629 + TP_PROTO(struct inode *inode), 630 + 631 + TP_ARGS(inode) 632 + ); 633 + 634 + DEFINE_EVENT(writeback_lazytime_template, writeback_lazytime_iput, 635 + TP_PROTO(struct inode *inode), 636 + 637 + TP_ARGS(inode) 638 + ); 639 + 640 + DEFINE_EVENT(writeback_lazytime_template, writeback_dirty_inode_enqueue, 641 + 642 + TP_PROTO(struct inode *inode), 643 + 644 + TP_ARGS(inode) 611 645 ); 612 646 613 647 #endif /* _TRACE_WRITEBACK_H */
+3 -1
include/uapi/linux/fs.h
··· 90 90 #define MS_KERNMOUNT (1<<22) /* this is a kern_mount call */ 91 91 #define MS_I_VERSION (1<<23) /* Update inode I_version field */ 92 92 #define MS_STRICTATIME (1<<24) /* Always perform atime updates */ 93 + #define MS_LAZYTIME (1<<25) /* Update the on-disk [acm]times lazily */ 93 94 94 95 /* These sb flags are internal to the kernel */ 95 96 #define MS_NOSEC (1<<28) ··· 101 100 /* 102 101 * Superblock flags that can be altered by MS_REMOUNT 103 102 */ 104 - #define MS_RMT_MASK (MS_RDONLY|MS_SYNCHRONOUS|MS_MANDLOCK|MS_I_VERSION) 103 + #define MS_RMT_MASK (MS_RDONLY|MS_SYNCHRONOUS|MS_MANDLOCK|MS_I_VERSION|\ 104 + MS_LAZYTIME) 105 105 106 106 /* 107 107 * Old magic mount flag and mask
+8 -2
mm/backing-dev.c
··· 69 69 unsigned long background_thresh; 70 70 unsigned long dirty_thresh; 71 71 unsigned long bdi_thresh; 72 - unsigned long nr_dirty, nr_io, nr_more_io; 72 + unsigned long nr_dirty, nr_io, nr_more_io, nr_dirty_time; 73 73 struct inode *inode; 74 74 75 - nr_dirty = nr_io = nr_more_io = 0; 75 + nr_dirty = nr_io = nr_more_io = nr_dirty_time = 0; 76 76 spin_lock(&wb->list_lock); 77 77 list_for_each_entry(inode, &wb->b_dirty, i_wb_list) 78 78 nr_dirty++; ··· 80 80 nr_io++; 81 81 list_for_each_entry(inode, &wb->b_more_io, i_wb_list) 82 82 nr_more_io++; 83 + list_for_each_entry(inode, &wb->b_dirty_time, i_wb_list) 84 + if (inode->i_state & I_DIRTY_TIME) 85 + nr_dirty_time++; 83 86 spin_unlock(&wb->list_lock); 84 87 85 88 global_dirty_limits(&background_thresh, &dirty_thresh); ··· 101 98 "b_dirty: %10lu\n" 102 99 "b_io: %10lu\n" 103 100 "b_more_io: %10lu\n" 101 + "b_dirty_time: %10lu\n" 104 102 "bdi_list: %10u\n" 105 103 "state: %10lx\n", 106 104 (unsigned long) K(bdi_stat(bdi, BDI_WRITEBACK)), ··· 115 111 nr_dirty, 116 112 nr_io, 117 113 nr_more_io, 114 + nr_dirty_time, 118 115 !list_empty(&bdi->bdi_list), bdi->state); 119 116 #undef K 120 117 ··· 423 418 INIT_LIST_HEAD(&wb->b_dirty); 424 419 INIT_LIST_HEAD(&wb->b_io); 425 420 INIT_LIST_HEAD(&wb->b_more_io); 421 + INIT_LIST_HEAD(&wb->b_dirty_time); 426 422 spin_lock_init(&wb->list_lock); 427 423 INIT_DELAYED_WORK(&wb->dwork, bdi_writeback_workfn); 428 424 }