Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-3.0-fixes

Pull gfs2 fixes from Steven Whitehouse:
"Out of these five patches, the one for ensuring that the number of
revokes is not exceeded, and the one for checking the glock is not
already held in gfs2_getxattr are the two most important. The latter
can be triggered by selinux.

The other three patches are very small and fix mostly fairly trivial
issues"

* git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-3.0-fixes:
GFS2: Check for glock already held in gfs2_getxattr
GFS2: alloc_workqueue() doesn't return an ERR_PTR
GFS2: don't overrun reserved revokes
GFS2: WQ_NON_REENTRANT is meaningless and going away
GFS2: Fix typo in gfs2_create_inode()

+23 -11
+4 -4
fs/gfs2/glock.c
··· 1838 1838 1839 1839 glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM | 1840 1840 WQ_HIGHPRI | WQ_FREEZABLE, 0); 1841 - if (IS_ERR(glock_workqueue)) 1842 - return PTR_ERR(glock_workqueue); 1841 + if (!glock_workqueue) 1842 + return -ENOMEM; 1843 1843 gfs2_delete_workqueue = alloc_workqueue("delete_workqueue", 1844 1844 WQ_MEM_RECLAIM | WQ_FREEZABLE, 1845 1845 0); 1846 - if (IS_ERR(gfs2_delete_workqueue)) { 1846 + if (!gfs2_delete_workqueue) { 1847 1847 destroy_workqueue(glock_workqueue); 1848 - return PTR_ERR(gfs2_delete_workqueue); 1848 + return -ENOMEM; 1849 1849 } 1850 1850 1851 1851 register_shrinker(&glock_shrinker);
+13 -5
fs/gfs2/glops.c
··· 47 47 * None of the buffers should be dirty, locked, or pinned. 48 48 */ 49 49 50 - static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync) 50 + static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync, 51 + unsigned int nr_revokes) 51 52 { 52 53 struct gfs2_sbd *sdp = gl->gl_sbd; 53 54 struct list_head *head = &gl->gl_ail_list; ··· 58 57 59 58 gfs2_log_lock(sdp); 60 59 spin_lock(&sdp->sd_ail_lock); 61 - list_for_each_entry_safe(bd, tmp, head, bd_ail_gl_list) { 60 + list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) { 61 + if (nr_revokes == 0) 62 + break; 62 63 bh = bd->bd_bh; 63 64 if (bh->b_state & b_state) { 64 65 if (fsync) ··· 68 65 gfs2_ail_error(gl, bh); 69 66 } 70 67 gfs2_trans_add_revoke(sdp, bd); 68 + nr_revokes--; 71 69 } 72 70 GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count)); 73 71 spin_unlock(&sdp->sd_ail_lock); ··· 95 91 WARN_ON_ONCE(current->journal_info); 96 92 current->journal_info = &tr; 97 93 98 - __gfs2_ail_flush(gl, 0); 94 + __gfs2_ail_flush(gl, 0, tr.tr_revokes); 99 95 100 96 gfs2_trans_end(sdp); 101 97 gfs2_log_flush(sdp, NULL); ··· 105 101 { 106 102 struct gfs2_sbd *sdp = gl->gl_sbd; 107 103 unsigned int revokes = atomic_read(&gl->gl_ail_count); 104 + unsigned int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64); 108 105 int ret; 109 106 110 107 if (!revokes) 111 108 return; 112 109 113 - ret = gfs2_trans_begin(sdp, 0, revokes); 110 + while (revokes > max_revokes) 111 + max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64); 112 + 113 + ret = gfs2_trans_begin(sdp, 0, max_revokes); 114 114 if (ret) 115 115 return; 116 - __gfs2_ail_flush(gl, fsync); 116 + __gfs2_ail_flush(gl, fsync, max_revokes); 117 117 gfs2_trans_end(sdp); 118 118 gfs2_log_flush(sdp, NULL); 119 119 }
+5 -1
fs/gfs2/inode.c
··· 594 594 } 595 595 gfs2_glock_dq_uninit(ghs); 596 596 if (IS_ERR(d)) 597 - return PTR_RET(d); 597 + return PTR_ERR(d); 598 598 return error; 599 599 } else if (error != -ENOENT) { 600 600 goto fail_gunlock; ··· 1749 1749 struct gfs2_inode *ip = GFS2_I(inode); 1750 1750 struct gfs2_holder gh; 1751 1751 int ret; 1752 + 1753 + /* For selinux during lookup */ 1754 + if (gfs2_glock_is_locked_by_me(ip->i_gl)) 1755 + return generic_getxattr(dentry, name, data, size); 1752 1756 1753 1757 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &gh); 1754 1758 ret = gfs2_glock_nq(&gh);
+1 -1
fs/gfs2/main.c
··· 155 155 goto fail_wq; 156 156 157 157 gfs2_control_wq = alloc_workqueue("gfs2_control", 158 - WQ_NON_REENTRANT | WQ_UNBOUND | WQ_FREEZABLE, 0); 158 + WQ_UNBOUND | WQ_FREEZABLE, 0); 159 159 if (!gfs2_control_wq) 160 160 goto fail_recovery; 161 161