fs: move i_wb_list out from under inode_lock

Protect the inode writeback list with a new global lock
inode_wb_list_lock and use it to protect the list manipulations and
traversals. This lock replaces the inode_lock as the inodes on the
list can be validity checked while holding the inode->i_lock and
hence the inode_lock is no longer needed to protect the list.

Signed-off-by: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>

authored by Dave Chinner and committed by Al Viro a66979ab 55fa6091

+70 -48
+2 -2
fs/block_dev.c
··· 55 static void bdev_inode_switch_bdi(struct inode *inode, 56 struct backing_dev_info *dst) 57 { 58 - spin_lock(&inode_lock); 59 spin_lock(&inode->i_lock); 60 inode->i_data.backing_dev_info = dst; 61 if (inode->i_state & I_DIRTY) 62 list_move(&inode->i_wb_list, &dst->wb.b_dirty); 63 spin_unlock(&inode->i_lock); 64 - spin_unlock(&inode_lock); 65 } 66 67 static sector_t max_block(struct block_device *bdev)
··· 55 static void bdev_inode_switch_bdi(struct inode *inode, 56 struct backing_dev_info *dst) 57 { 58 + spin_lock(&inode_wb_list_lock); 59 spin_lock(&inode->i_lock); 60 inode->i_data.backing_dev_info = dst; 61 if (inode->i_state & I_DIRTY) 62 list_move(&inode->i_wb_list, &dst->wb.b_dirty); 63 spin_unlock(&inode->i_lock); 64 + spin_unlock(&inode_wb_list_lock); 65 } 66 67 static sector_t max_block(struct block_device *bdev)
+44 -32
fs/fs-writeback.c
··· 176 } 177 178 /* 179 * Redirty an inode: set its when-it-was dirtied timestamp and move it to the 180 * furthest end of its superblock's dirty-inode list. 181 * ··· 199 { 200 struct bdi_writeback *wb = &inode_to_bdi(inode)->wb; 201 202 if (!list_empty(&wb->b_dirty)) { 203 struct inode *tail; 204 ··· 217 { 218 struct bdi_writeback *wb = &inode_to_bdi(inode)->wb; 219 220 list_move(&inode->i_wb_list, &wb->b_more_io); 221 } 222 223 static void inode_sync_complete(struct inode *inode) 224 { 225 /* 226 - * Prevent speculative execution through spin_unlock(&inode_lock); 227 */ 228 smp_mb(); 229 wake_up_bit(&inode->i_state, __I_SYNC); 230 } ··· 301 */ 302 static void queue_io(struct bdi_writeback *wb, unsigned long *older_than_this) 303 { 304 list_splice_init(&wb->b_more_io, &wb->b_io); 305 move_expired_inodes(&wb->b_dirty, &wb->b_io, older_than_this); 306 } ··· 324 wqh = bit_waitqueue(&inode->i_state, __I_SYNC); 325 while (inode->i_state & I_SYNC) { 326 spin_unlock(&inode->i_lock); 327 - spin_unlock(&inode_lock); 328 __wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE); 329 - spin_lock(&inode_lock); 330 spin_lock(&inode->i_lock); 331 } 332 } 333 334 /* 335 - * Write out an inode's dirty pages. Called under inode_lock. Either the 336 - * caller has ref on the inode (either via __iget or via syscall against an fd) 337 - * or the inode has I_WILL_FREE set (via generic_forget_inode) 338 * 339 * If `wait' is set, wait on the writeout. 340 * 341 * The whole writeout design is quite complex and fragile. We want to avoid 342 * starvation of particular inodes when others are being redirtied, prevent 343 * livelocks, etc. 344 - * 345 - * Called under inode_lock. 346 */ 347 static int 348 writeback_single_inode(struct inode *inode, struct writeback_control *wbc) ··· 382 inode->i_state |= I_SYNC; 383 inode->i_state &= ~I_DIRTY_PAGES; 384 spin_unlock(&inode->i_lock); 385 - spin_unlock(&inode_lock); 386 387 ret = do_writepages(mapping, wbc); 388 ··· 402 * due to delalloc, clear dirty metadata flags right before 403 * write_inode() 404 */ 405 - spin_lock(&inode_lock); 406 spin_lock(&inode->i_lock); 407 dirty = inode->i_state & I_DIRTY; 408 inode->i_state &= ~(I_DIRTY_SYNC | I_DIRTY_DATASYNC); 409 spin_unlock(&inode->i_lock); 410 - spin_unlock(&inode_lock); 411 /* Don't write the inode if only I_DIRTY_PAGES was set */ 412 if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) { 413 int err = write_inode(inode, wbc); ··· 413 ret = err; 414 } 415 416 - spin_lock(&inode_lock); 417 spin_lock(&inode->i_lock); 418 inode->i_state &= ~I_SYNC; 419 if (!(inode->i_state & I_FREEING)) { ··· 555 */ 556 redirty_tail(inode); 557 } 558 - spin_unlock(&inode_lock); 559 iput(inode); 560 cond_resched(); 561 - spin_lock(&inode_lock); 562 if (wbc->nr_to_write <= 0) { 563 wbc->more_io = 1; 564 return 1; ··· 577 578 if (!wbc->wb_start) 579 wbc->wb_start = jiffies; /* livelock avoidance */ 580 - spin_lock(&inode_lock); 581 if (!wbc->for_kupdate || list_empty(&wb->b_io)) 582 queue_io(wb, wbc->older_than_this); 583 ··· 595 if (ret) 596 break; 597 } 598 - spin_unlock(&inode_lock); 599 /* Leave any unwritten inodes on b_io */ 600 } 601 ··· 604 { 605 WARN_ON(!rwsem_is_locked(&sb->s_umount)); 606 607 - spin_lock(&inode_lock); 608 if (!wbc->for_kupdate || list_empty(&wb->b_io)) 609 queue_io(wb, wbc->older_than_this); 610 writeback_sb_inodes(sb, wb, wbc, true); 611 - spin_unlock(&inode_lock); 612 } 613 614 /* ··· 747 * become available for writeback. Otherwise 748 * we'll just busyloop. 749 */ 750 - spin_lock(&inode_lock); 751 if (!list_empty(&wb->b_more_io)) { 752 inode = wb_inode(wb->b_more_io.prev); 753 trace_wbc_writeback_wait(&wbc, wb->bdi); ··· 755 inode_wait_for_writeback(inode); 756 spin_unlock(&inode->i_lock); 757 } 758 - spin_unlock(&inode_lock); 759 } 760 761 return wrote; ··· 1021 { 1022 struct super_block *sb = inode->i_sb; 1023 struct backing_dev_info *bdi = NULL; 1024 - bool wakeup_bdi = false; 1025 1026 /* 1027 * Don't do this for I_DIRTY_PAGES - that doesn't actually ··· 1044 if (unlikely(block_dump)) 1045 block_dump___mark_inode_dirty(inode); 1046 1047 - spin_lock(&inode_lock); 1048 spin_lock(&inode->i_lock); 1049 if ((inode->i_state & flags) != flags) { 1050 const int was_dirty = inode->i_state & I_DIRTY; ··· 1069 if (inode->i_state & I_FREEING) 1070 goto out_unlock_inode; 1071 1072 - spin_unlock(&inode->i_lock); 1073 /* 1074 * If the inode was already on b_dirty/b_io/b_more_io, don't 1075 * reposition it (that would break b_dirty time-ordering). 1076 */ 1077 if (!was_dirty) { 1078 bdi = inode_to_bdi(inode); 1079 1080 if (bdi_cap_writeback_dirty(bdi)) { ··· 1091 wakeup_bdi = true; 1092 } 1093 1094 inode->dirtied_when = jiffies; 1095 list_move(&inode->i_wb_list, &bdi->wb.b_dirty); 1096 } 1097 - goto out; 1098 } 1099 out_unlock_inode: 1100 spin_unlock(&inode->i_lock); 1101 - out: 1102 - spin_unlock(&inode_lock); 1103 1104 - if (wakeup_bdi) 1105 - bdi_wakeup_thread_delayed(bdi); 1106 } 1107 EXPORT_SYMBOL(__mark_inode_dirty); 1108 ··· 1308 wbc.nr_to_write = 0; 1309 1310 might_sleep(); 1311 - spin_lock(&inode_lock); 1312 ret = writeback_single_inode(inode, &wbc); 1313 - spin_unlock(&inode_lock); 1314 if (sync) 1315 inode_sync_wait(inode); 1316 return ret; ··· 1332 { 1333 int ret; 1334 1335 - spin_lock(&inode_lock); 1336 ret = writeback_single_inode(inode, wbc); 1337 - spin_unlock(&inode_lock); 1338 return ret; 1339 } 1340 EXPORT_SYMBOL(sync_inode);
··· 176 } 177 178 /* 179 + * Remove the inode from the writeback list it is on. 180 + */ 181 + void inode_wb_list_del(struct inode *inode) 182 + { 183 + spin_lock(&inode_wb_list_lock); 184 + list_del_init(&inode->i_wb_list); 185 + spin_unlock(&inode_wb_list_lock); 186 + } 187 + 188 + 189 + /* 190 * Redirty an inode: set its when-it-was dirtied timestamp and move it to the 191 * furthest end of its superblock's dirty-inode list. 192 * ··· 188 { 189 struct bdi_writeback *wb = &inode_to_bdi(inode)->wb; 190 191 + assert_spin_locked(&inode_wb_list_lock); 192 if (!list_empty(&wb->b_dirty)) { 193 struct inode *tail; 194 ··· 205 { 206 struct bdi_writeback *wb = &inode_to_bdi(inode)->wb; 207 208 + assert_spin_locked(&inode_wb_list_lock); 209 list_move(&inode->i_wb_list, &wb->b_more_io); 210 } 211 212 static void inode_sync_complete(struct inode *inode) 213 { 214 /* 215 + * Prevent speculative execution through 216 + * spin_unlock(&inode_wb_list_lock); 217 */ 218 + 219 smp_mb(); 220 wake_up_bit(&inode->i_state, __I_SYNC); 221 } ··· 286 */ 287 static void queue_io(struct bdi_writeback *wb, unsigned long *older_than_this) 288 { 289 + assert_spin_locked(&inode_wb_list_lock); 290 list_splice_init(&wb->b_more_io, &wb->b_io); 291 move_expired_inodes(&wb->b_dirty, &wb->b_io, older_than_this); 292 } ··· 308 wqh = bit_waitqueue(&inode->i_state, __I_SYNC); 309 while (inode->i_state & I_SYNC) { 310 spin_unlock(&inode->i_lock); 311 + spin_unlock(&inode_wb_list_lock); 312 __wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE); 313 + spin_lock(&inode_wb_list_lock); 314 spin_lock(&inode->i_lock); 315 } 316 } 317 318 /* 319 + * Write out an inode's dirty pages. Called under inode_wb_list_lock. Either 320 + * the caller has an active reference on the inode or the inode has I_WILL_FREE 321 + * set. 322 * 323 * If `wait' is set, wait on the writeout. 324 * 325 * The whole writeout design is quite complex and fragile. We want to avoid 326 * starvation of particular inodes when others are being redirtied, prevent 327 * livelocks, etc. 328 */ 329 static int 330 writeback_single_inode(struct inode *inode, struct writeback_control *wbc) ··· 368 inode->i_state |= I_SYNC; 369 inode->i_state &= ~I_DIRTY_PAGES; 370 spin_unlock(&inode->i_lock); 371 + spin_unlock(&inode_wb_list_lock); 372 373 ret = do_writepages(mapping, wbc); 374 ··· 388 * due to delalloc, clear dirty metadata flags right before 389 * write_inode() 390 */ 391 spin_lock(&inode->i_lock); 392 dirty = inode->i_state & I_DIRTY; 393 inode->i_state &= ~(I_DIRTY_SYNC | I_DIRTY_DATASYNC); 394 spin_unlock(&inode->i_lock); 395 /* Don't write the inode if only I_DIRTY_PAGES was set */ 396 if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) { 397 int err = write_inode(inode, wbc); ··· 401 ret = err; 402 } 403 404 + spin_lock(&inode_wb_list_lock); 405 spin_lock(&inode->i_lock); 406 inode->i_state &= ~I_SYNC; 407 if (!(inode->i_state & I_FREEING)) { ··· 543 */ 544 redirty_tail(inode); 545 } 546 + spin_unlock(&inode_wb_list_lock); 547 iput(inode); 548 cond_resched(); 549 + spin_lock(&inode_wb_list_lock); 550 if (wbc->nr_to_write <= 0) { 551 wbc->more_io = 1; 552 return 1; ··· 565 566 if (!wbc->wb_start) 567 wbc->wb_start = jiffies; /* livelock avoidance */ 568 + spin_lock(&inode_wb_list_lock); 569 if (!wbc->for_kupdate || list_empty(&wb->b_io)) 570 queue_io(wb, wbc->older_than_this); 571 ··· 583 if (ret) 584 break; 585 } 586 + spin_unlock(&inode_wb_list_lock); 587 /* Leave any unwritten inodes on b_io */ 588 } 589 ··· 592 { 593 WARN_ON(!rwsem_is_locked(&sb->s_umount)); 594 595 + spin_lock(&inode_wb_list_lock); 596 if (!wbc->for_kupdate || list_empty(&wb->b_io)) 597 queue_io(wb, wbc->older_than_this); 598 writeback_sb_inodes(sb, wb, wbc, true); 599 + spin_unlock(&inode_wb_list_lock); 600 } 601 602 /* ··· 735 * become available for writeback. Otherwise 736 * we'll just busyloop. 737 */ 738 + spin_lock(&inode_wb_list_lock); 739 if (!list_empty(&wb->b_more_io)) { 740 inode = wb_inode(wb->b_more_io.prev); 741 trace_wbc_writeback_wait(&wbc, wb->bdi); ··· 743 inode_wait_for_writeback(inode); 744 spin_unlock(&inode->i_lock); 745 } 746 + spin_unlock(&inode_wb_list_lock); 747 } 748 749 return wrote; ··· 1009 { 1010 struct super_block *sb = inode->i_sb; 1011 struct backing_dev_info *bdi = NULL; 1012 1013 /* 1014 * Don't do this for I_DIRTY_PAGES - that doesn't actually ··· 1033 if (unlikely(block_dump)) 1034 block_dump___mark_inode_dirty(inode); 1035 1036 spin_lock(&inode->i_lock); 1037 if ((inode->i_state & flags) != flags) { 1038 const int was_dirty = inode->i_state & I_DIRTY; ··· 1059 if (inode->i_state & I_FREEING) 1060 goto out_unlock_inode; 1061 1062 /* 1063 * If the inode was already on b_dirty/b_io/b_more_io, don't 1064 * reposition it (that would break b_dirty time-ordering). 1065 */ 1066 if (!was_dirty) { 1067 + bool wakeup_bdi = false; 1068 bdi = inode_to_bdi(inode); 1069 1070 if (bdi_cap_writeback_dirty(bdi)) { ··· 1081 wakeup_bdi = true; 1082 } 1083 1084 + spin_unlock(&inode->i_lock); 1085 + spin_lock(&inode_wb_list_lock); 1086 inode->dirtied_when = jiffies; 1087 list_move(&inode->i_wb_list, &bdi->wb.b_dirty); 1088 + spin_unlock(&inode_wb_list_lock); 1089 + 1090 + if (wakeup_bdi) 1091 + bdi_wakeup_thread_delayed(bdi); 1092 + return; 1093 } 1094 } 1095 out_unlock_inode: 1096 spin_unlock(&inode->i_lock); 1097 1098 } 1099 EXPORT_SYMBOL(__mark_inode_dirty); 1100 ··· 1296 wbc.nr_to_write = 0; 1297 1298 might_sleep(); 1299 + spin_lock(&inode_wb_list_lock); 1300 ret = writeback_single_inode(inode, &wbc); 1301 + spin_unlock(&inode_wb_list_lock); 1302 if (sync) 1303 inode_sync_wait(inode); 1304 return ret; ··· 1320 { 1321 int ret; 1322 1323 + spin_lock(&inode_wb_list_lock); 1324 ret = writeback_single_inode(inode, wbc); 1325 + spin_unlock(&inode_wb_list_lock); 1326 return ret; 1327 } 1328 EXPORT_SYMBOL(sync_inode);
+8 -4
fs/inode.c
··· 26 #include <linux/posix_acl.h> 27 #include <linux/ima.h> 28 #include <linux/cred.h> 29 30 /* 31 * inode locking rules. ··· 37 * inode_lru, inode->i_lru 38 * inode_sb_list_lock protects: 39 * sb->s_inodes, inode->i_sb_list 40 * 41 * Lock ordering: 42 * inode_lock ··· 47 * inode_sb_list_lock 48 * inode->i_lock 49 * inode_lru_lock 50 */ 51 52 /* ··· 111 DEFINE_SPINLOCK(inode_lock); 112 113 __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_sb_list_lock); 114 115 /* 116 * iprune_sem provides exclusion between the icache shrinking and the ··· 490 BUG_ON(!(inode->i_state & I_FREEING)); 491 BUG_ON(!list_empty(&inode->i_lru)); 492 493 - spin_lock(&inode_lock); 494 - list_del_init(&inode->i_wb_list); 495 - spin_unlock(&inode_lock); 496 - 497 inode_sb_list_del(inode); 498 499 if (op->evict_inode) {
··· 26 #include <linux/posix_acl.h> 27 #include <linux/ima.h> 28 #include <linux/cred.h> 29 + #include "internal.h" 30 31 /* 32 * inode locking rules. ··· 36 * inode_lru, inode->i_lru 37 * inode_sb_list_lock protects: 38 * sb->s_inodes, inode->i_sb_list 39 + * inode_wb_list_lock protects: 40 + * bdi->wb.b_{dirty,io,more_io}, inode->i_wb_list 41 * 42 * Lock ordering: 43 * inode_lock ··· 44 * inode_sb_list_lock 45 * inode->i_lock 46 * inode_lru_lock 47 + * 48 + * inode_wb_list_lock 49 + * inode->i_lock 50 */ 51 52 /* ··· 105 DEFINE_SPINLOCK(inode_lock); 106 107 __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_sb_list_lock); 108 + __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_wb_list_lock); 109 110 /* 111 * iprune_sem provides exclusion between the icache shrinking and the ··· 483 BUG_ON(!(inode->i_state & I_FREEING)); 484 BUG_ON(!list_empty(&inode->i_lru)); 485 486 + inode_wb_list_del(inode); 487 inode_sb_list_del(inode); 488 489 if (op->evict_inode) {
+5
fs/internal.h
··· 127 */ 128 extern spinlock_t inode_sb_list_lock; 129 130 extern int get_nr_dirty_inodes(void); 131 extern void evict_inodes(struct super_block *); 132 extern int invalidate_inodes(struct super_block *, bool);
··· 127 */ 128 extern spinlock_t inode_sb_list_lock; 129 130 + /* 131 + * fs-writeback.c 132 + */ 133 + extern void inode_wb_list_del(struct inode *inode); 134 + 135 extern int get_nr_dirty_inodes(void); 136 extern void evict_inodes(struct super_block *); 137 extern int invalidate_inodes(struct super_block *, bool);
+1
include/linux/writeback.h
··· 10 struct backing_dev_info; 11 12 extern spinlock_t inode_lock; 13 14 /* 15 * fs/fs-writeback.c
··· 10 struct backing_dev_info; 11 12 extern spinlock_t inode_lock; 13 + extern spinlock_t inode_wb_list_lock; 14 15 /* 16 * fs/fs-writeback.c
+4 -4
mm/backing-dev.c
··· 73 struct inode *inode; 74 75 nr_wb = nr_dirty = nr_io = nr_more_io = 0; 76 - spin_lock(&inode_lock); 77 list_for_each_entry(inode, &wb->b_dirty, i_wb_list) 78 nr_dirty++; 79 list_for_each_entry(inode, &wb->b_io, i_wb_list) 80 nr_io++; 81 list_for_each_entry(inode, &wb->b_more_io, i_wb_list) 82 nr_more_io++; 83 - spin_unlock(&inode_lock); 84 85 global_dirty_limits(&background_thresh, &dirty_thresh); 86 bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh); ··· 682 if (bdi_has_dirty_io(bdi)) { 683 struct bdi_writeback *dst = &default_backing_dev_info.wb; 684 685 - spin_lock(&inode_lock); 686 list_splice(&bdi->wb.b_dirty, &dst->b_dirty); 687 list_splice(&bdi->wb.b_io, &dst->b_io); 688 list_splice(&bdi->wb.b_more_io, &dst->b_more_io); 689 - spin_unlock(&inode_lock); 690 } 691 692 bdi_unregister(bdi);
··· 73 struct inode *inode; 74 75 nr_wb = nr_dirty = nr_io = nr_more_io = 0; 76 + spin_lock(&inode_wb_list_lock); 77 list_for_each_entry(inode, &wb->b_dirty, i_wb_list) 78 nr_dirty++; 79 list_for_each_entry(inode, &wb->b_io, i_wb_list) 80 nr_io++; 81 list_for_each_entry(inode, &wb->b_more_io, i_wb_list) 82 nr_more_io++; 83 + spin_unlock(&inode_wb_list_lock); 84 85 global_dirty_limits(&background_thresh, &dirty_thresh); 86 bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh); ··· 682 if (bdi_has_dirty_io(bdi)) { 683 struct bdi_writeback *dst = &default_backing_dev_info.wb; 684 685 + spin_lock(&inode_wb_list_lock); 686 list_splice(&bdi->wb.b_dirty, &dst->b_dirty); 687 list_splice(&bdi->wb.b_io, &dst->b_io); 688 list_splice(&bdi->wb.b_more_io, &dst->b_more_io); 689 + spin_unlock(&inode_wb_list_lock); 690 } 691 692 bdi_unregister(bdi);
+4 -4
mm/filemap.c
··· 80 * ->i_mutex 81 * ->i_alloc_sem (various) 82 * 83 - * ->inode_lock 84 - * ->sb_lock (fs/fs-writeback.c) 85 * ->mapping->tree_lock (__sync_single_inode) 86 * 87 * ->i_mmap_lock ··· 98 * ->zone.lru_lock (check_pte_range->isolate_lru_page) 99 * ->private_lock (page_remove_rmap->set_page_dirty) 100 * ->tree_lock (page_remove_rmap->set_page_dirty) 101 - * ->inode_lock (page_remove_rmap->set_page_dirty) 102 * ->inode->i_lock (page_remove_rmap->set_page_dirty) 103 - * ->inode_lock (zap_pte_range->set_page_dirty) 104 * ->inode->i_lock (zap_pte_range->set_page_dirty) 105 * ->private_lock (zap_pte_range->__set_page_dirty_buffers) 106 *
··· 80 * ->i_mutex 81 * ->i_alloc_sem (various) 82 * 83 + * inode_wb_list_lock 84 + * sb_lock (fs/fs-writeback.c) 85 * ->mapping->tree_lock (__sync_single_inode) 86 * 87 * ->i_mmap_lock ··· 98 * ->zone.lru_lock (check_pte_range->isolate_lru_page) 99 * ->private_lock (page_remove_rmap->set_page_dirty) 100 * ->tree_lock (page_remove_rmap->set_page_dirty) 101 + * inode_wb_list_lock (page_remove_rmap->set_page_dirty) 102 * ->inode->i_lock (page_remove_rmap->set_page_dirty) 103 + * inode_wb_list_lock (zap_pte_range->set_page_dirty) 104 * ->inode->i_lock (zap_pte_range->set_page_dirty) 105 * ->private_lock (zap_pte_range->__set_page_dirty_buffers) 106 *
+2 -2
mm/rmap.c
··· 31 * swap_lock (in swap_duplicate, swap_info_get) 32 * mmlist_lock (in mmput, drain_mmlist and others) 33 * mapping->private_lock (in __set_page_dirty_buffers) 34 - * inode_lock (in set_page_dirty's __mark_inode_dirty) 35 * inode->i_lock (in set_page_dirty's __mark_inode_dirty) 36 * sb_lock (within inode_lock in fs/fs-writeback.c) 37 * mapping->tree_lock (widely used, in set_page_dirty, 38 * in arch-dependent flush_dcache_mmap_lock, 39 - * within inode_lock in __sync_single_inode) 40 * 41 * (code doesn't rely on that order so it could be switched around) 42 * ->tasklist_lock
··· 31 * swap_lock (in swap_duplicate, swap_info_get) 32 * mmlist_lock (in mmput, drain_mmlist and others) 33 * mapping->private_lock (in __set_page_dirty_buffers) 34 * inode->i_lock (in set_page_dirty's __mark_inode_dirty) 35 + * inode_wb_list_lock (in set_page_dirty's __mark_inode_dirty) 36 * sb_lock (within inode_lock in fs/fs-writeback.c) 37 * mapping->tree_lock (widely used, in set_page_dirty, 38 * in arch-dependent flush_dcache_mmap_lock, 39 + * within inode_wb_list_lock in __sync_single_inode) 40 * 41 * (code doesn't rely on that order so it could be switched around) 42 * ->tasklist_lock