Merge branch 'reiserfs/kill-bkl' of git://git.kernel.org/pub/scm/linux/kernel/git/frederic/random-tracing

* 'reiserfs/kill-bkl' of git://git.kernel.org/pub/scm/linux/kernel/git/frederic/random-tracing:
reiserfs: Safely acquire i_mutex from xattr_rmdir
reiserfs: Safely acquire i_mutex from reiserfs_for_each_xattr
reiserfs: Fix journal mutex <-> inode mutex lock inversion
reiserfs: Fix unwanted recursive reiserfs lock in reiserfs_unlink()
reiserfs: Relax lock before open xattr dir in reiserfs_xattr_set_handle()
reiserfs: Relax reiserfs lock while freeing the journal
reiserfs: Fix reiserfs lock <-> i_mutex dependency inversion on xattr
reiserfs: Warn on lock relax if taken recursively
reiserfs: Fix reiserfs lock <-> i_xattr_sem dependency inversion
reiserfs: Fix remaining in-reclaim-fs <-> reclaim-fs-on locking inversion
reiserfs: Fix reiserfs lock <-> inode mutex dependency inversion
reiserfs: Fix reiserfs lock and journal lock inversion dependency
reiserfs: Fix possible recursive lock

+79 -15
+3
fs/reiserfs/bitmap.c
··· 1277 1277 struct reiserfs_bitmap_info *bitmap; 1278 1278 unsigned int bmap_nr = reiserfs_bmap_count(sb); 1279 1279 1280 + /* Avoid lock recursion in fault case */ 1281 + reiserfs_write_unlock(sb); 1280 1282 bitmap = vmalloc(sizeof(*bitmap) * bmap_nr); 1283 + reiserfs_write_lock(sb); 1281 1284 if (bitmap == NULL) 1282 1285 return -ENOMEM; 1283 1286
+3 -2
fs/reiserfs/inode.c
··· 31 31 JOURNAL_PER_BALANCE_CNT * 2 + 32 32 2 * REISERFS_QUOTA_INIT_BLOCKS(inode->i_sb); 33 33 struct reiserfs_transaction_handle th; 34 + int depth; 34 35 int err; 35 36 36 37 truncate_inode_pages(&inode->i_data, 0); 37 38 38 - reiserfs_write_lock(inode->i_sb); 39 + depth = reiserfs_write_lock_once(inode->i_sb); 39 40 40 41 /* The = 0 happens when we abort creating a new inode for some reason like lack of space.. */ 41 42 if (!(inode->i_state & I_NEW) && INODE_PKEY(inode)->k_objectid != 0) { /* also handles bad_inode case */ ··· 75 74 out: 76 75 clear_inode(inode); /* note this must go after the journal_end to prevent deadlock */ 77 76 inode->i_blocks = 0; 78 - reiserfs_write_unlock(inode->i_sb); 77 + reiserfs_write_unlock_once(inode->i_sb, depth); 79 78 } 80 79 81 80 static void _make_cpu_key(struct cpu_key *key, int version, __u32 dirid,
+14 -4
fs/reiserfs/journal.c
··· 2009 2009 destroy_workqueue(commit_wq); 2010 2010 commit_wq = NULL; 2011 2011 } 2012 - reiserfs_write_lock(sb); 2013 2012 2014 2013 free_journal_ram(sb); 2014 + 2015 + reiserfs_write_lock(sb); 2015 2016 2016 2017 return 0; 2017 2018 } ··· 2759 2758 struct reiserfs_journal *journal; 2760 2759 struct reiserfs_journal_list *jl; 2761 2760 char b[BDEVNAME_SIZE]; 2761 + int ret; 2762 2762 2763 + /* 2764 + * Unlock here to avoid various RECLAIM-FS-ON <-> IN-RECLAIM-FS 2765 + * dependency inversion warnings. 2766 + */ 2767 + reiserfs_write_unlock(sb); 2763 2768 journal = SB_JOURNAL(sb) = vmalloc(sizeof(struct reiserfs_journal)); 2764 2769 if (!journal) { 2765 2770 reiserfs_warning(sb, "journal-1256", 2766 2771 "unable to get memory for journal structure"); 2772 + reiserfs_write_lock(sb); 2767 2773 return 1; 2768 2774 } 2769 2775 memset(journal, 0, sizeof(struct reiserfs_journal)); ··· 2779 2771 INIT_LIST_HEAD(&journal->j_working_list); 2780 2772 INIT_LIST_HEAD(&journal->j_journal_list); 2781 2773 journal->j_persistent_trans = 0; 2782 - if (reiserfs_allocate_list_bitmaps(sb, 2783 - journal->j_list_bitmap, 2784 - reiserfs_bmap_count(sb))) 2774 + ret = reiserfs_allocate_list_bitmaps(sb, journal->j_list_bitmap, 2775 + reiserfs_bmap_count(sb)); 2776 + reiserfs_write_lock(sb); 2777 + if (ret) 2785 2778 goto free_and_return; 2779 + 2786 2780 allocate_bitmap_nodes(sb); 2787 2781 2788 2782 /* reserved for journal area support */
+9
fs/reiserfs/lock.c
··· 86 86 reiserfs_panic(sb, "%s called without kernel lock held %d", 87 87 caller); 88 88 } 89 + 90 + #ifdef CONFIG_REISERFS_CHECK 91 + void reiserfs_lock_check_recursive(struct super_block *sb) 92 + { 93 + struct reiserfs_sb_info *sb_i = REISERFS_SB(sb); 94 + 95 + WARN_ONCE((sb_i->lock_depth > 0), "Unwanted recursive reiserfs lock!\n"); 96 + } 97 + #endif
+4 -3
fs/reiserfs/namei.c
··· 921 921 struct reiserfs_transaction_handle th; 922 922 int jbegin_count; 923 923 unsigned long savelink; 924 + int depth; 924 925 925 926 inode = dentry->d_inode; 926 927 ··· 933 932 JOURNAL_PER_BALANCE_CNT * 2 + 2 + 934 933 4 * REISERFS_QUOTA_TRANS_BLOCKS(dir->i_sb); 935 934 936 - reiserfs_write_lock(dir->i_sb); 935 + depth = reiserfs_write_lock_once(dir->i_sb); 937 936 retval = journal_begin(&th, dir->i_sb, jbegin_count); 938 937 if (retval) 939 938 goto out_unlink; ··· 994 993 995 994 retval = journal_end(&th, dir->i_sb, jbegin_count); 996 995 reiserfs_check_path(&path); 997 - reiserfs_write_unlock(dir->i_sb); 996 + reiserfs_write_unlock_once(dir->i_sb, depth); 998 997 return retval; 999 998 1000 999 end_unlink: ··· 1004 1003 if (err) 1005 1004 retval = err; 1006 1005 out_unlink: 1007 - reiserfs_write_unlock(dir->i_sb); 1006 + reiserfs_write_unlock_once(dir->i_sb, depth); 1008 1007 return retval; 1009 1008 } 1010 1009
+20 -6
fs/reiserfs/xattr.c
··· 83 83 BUG_ON(!mutex_is_locked(&dir->i_mutex)); 84 84 vfs_dq_init(dir); 85 85 86 - mutex_lock_nested(&dentry->d_inode->i_mutex, I_MUTEX_CHILD); 86 + reiserfs_mutex_lock_nested_safe(&dentry->d_inode->i_mutex, 87 + I_MUTEX_CHILD, dir->i_sb); 87 88 error = dir->i_op->unlink(dir, dentry); 88 89 mutex_unlock(&dentry->d_inode->i_mutex); 89 90 ··· 99 98 BUG_ON(!mutex_is_locked(&dir->i_mutex)); 100 99 vfs_dq_init(dir); 101 100 102 - mutex_lock_nested(&dentry->d_inode->i_mutex, I_MUTEX_CHILD); 101 + reiserfs_mutex_lock_nested_safe(&dentry->d_inode->i_mutex, 102 + I_MUTEX_CHILD, dir->i_sb); 103 103 dentry_unhash(dentry); 104 104 error = dir->i_op->rmdir(dir, dentry); 105 105 if (!error) ··· 237 235 if (IS_PRIVATE(inode) || get_inode_sd_version(inode) == STAT_DATA_V1) 238 236 return 0; 239 237 238 + reiserfs_write_unlock(inode->i_sb); 240 239 dir = open_xa_dir(inode, XATTR_REPLACE); 241 240 if (IS_ERR(dir)) { 242 241 err = PTR_ERR(dir); 242 + reiserfs_write_lock(inode->i_sb); 243 243 goto out; 244 244 } else if (!dir->d_inode) { 245 245 err = 0; 246 + reiserfs_write_lock(inode->i_sb); 246 247 goto out_dir; 247 248 } 248 249 249 250 mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_XATTR); 251 + 252 + reiserfs_write_lock(inode->i_sb); 253 + 250 254 buf.xadir = dir; 251 255 err = reiserfs_readdir_dentry(dir, &buf, fill_with_dentries, &pos); 252 256 while ((err == 0 || err == -ENOSPC) && buf.count) { ··· 291 283 err = journal_begin(&th, inode->i_sb, blocks); 292 284 if (!err) { 293 285 int jerror; 294 - mutex_lock_nested(&dir->d_parent->d_inode->i_mutex, 295 - I_MUTEX_XATTR); 286 + reiserfs_mutex_lock_nested_safe( 287 + &dir->d_parent->d_inode->i_mutex, 288 + I_MUTEX_XATTR, inode->i_sb); 296 289 err = action(dir, data); 297 290 jerror = journal_end(&th, inode->i_sb, blocks); 298 291 mutex_unlock(&dir->d_parent->d_inode->i_mutex); ··· 489 480 if (!buffer) 490 481 return lookup_and_delete_xattr(inode, name); 491 482 483 + reiserfs_write_unlock(inode->i_sb); 492 484 dentry = xattr_lookup(inode, name, flags); 493 - if (IS_ERR(dentry)) 485 + if (IS_ERR(dentry)) { 486 + reiserfs_write_lock(inode->i_sb); 494 487 return PTR_ERR(dentry); 488 + } 495 489 496 - down_write(&REISERFS_I(inode)->i_xattr_sem); 490 + down_read(&REISERFS_I(inode)->i_xattr_sem); 491 + 492 + reiserfs_write_lock(inode->i_sb); 497 493 498 494 xahash = xattr_hash(buffer, buffer_size); 499 495 while (buffer_pos < buffer_size || buffer_pos == 0) {
+26
include/linux/reiserfs_fs.h
··· 62 62 int reiserfs_write_lock_once(struct super_block *s); 63 63 void reiserfs_write_unlock_once(struct super_block *s, int lock_depth); 64 64 65 + #ifdef CONFIG_REISERFS_CHECK 66 + void reiserfs_lock_check_recursive(struct super_block *s); 67 + #else 68 + static inline void reiserfs_lock_check_recursive(struct super_block *s) { } 69 + #endif 70 + 65 71 /* 66 72 * Several mutexes depend on the write lock. 67 73 * However sometimes we want to relax the write lock while we hold ··· 98 92 static inline void reiserfs_mutex_lock_safe(struct mutex *m, 99 93 struct super_block *s) 100 94 { 95 + reiserfs_lock_check_recursive(s); 101 96 reiserfs_write_unlock(s); 102 97 mutex_lock(m); 98 + reiserfs_write_lock(s); 99 + } 100 + 101 + static inline void 102 + reiserfs_mutex_lock_nested_safe(struct mutex *m, unsigned int subclass, 103 + struct super_block *s) 104 + { 105 + reiserfs_lock_check_recursive(s); 106 + reiserfs_write_unlock(s); 107 + mutex_lock_nested(m, subclass); 108 + reiserfs_write_lock(s); 109 + } 110 + 111 + static inline void 112 + reiserfs_down_read_safe(struct rw_semaphore *sem, struct super_block *s) 113 + { 114 + reiserfs_lock_check_recursive(s); 115 + reiserfs_write_unlock(s); 116 + down_read(sem); 103 117 reiserfs_write_lock(s); 104 118 } 105 119