Merge git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-2.6-nmw

* git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-2.6-nmw: (27 commits)
GFS2: Use DEFINE_SPINLOCK
GFS2: Fix use-after-free bug on umount (try #2)
Revert "GFS2: Fix use-after-free bug on umount"
GFS2: Streamline alloc calculations for writes
GFS2: Send useful information with uevent messages
GFS2: Fix use-after-free bug on umount
GFS2: Remove ancient, unused code
GFS2: Move four functions from super.c
GFS2: Fix bug in gfs2_lock_fs_check_clean()
GFS2: Send some sensible sysfs stuff
GFS2: Kill two daemons with one patch
GFS2: Move gfs2_recoverd into recovery.c
GFS2: Fix "truncate in progress" hang
GFS2: Clean up & move gfs2_quotad
GFS2: Add more detail to debugfs glock dumps
GFS2: Banish struct gfs2_rgrpd_host
GFS2: Move rg_free from gfs2_rgrpd_host to gfs2_rgrpd
GFS2: Move rg_igeneration into struct gfs2_rgrpd
GFS2: Banish struct gfs2_dinode_host
GFS2: Move i_size from gfs2_dinode_host and rename it to i_disksize
...

+967 -1087
+1 -1
fs/gfs2/Makefile
··· 1 1 obj-$(CONFIG_GFS2_FS) += gfs2.o 2 - gfs2-y := acl.o bmap.o daemon.o dir.o eaops.o eattr.o glock.o \ 2 + gfs2-y := acl.o bmap.o dir.o eaops.o eattr.o glock.o \ 3 3 glops.o inode.o log.o lops.o locking.o main.o meta_io.o \ 4 4 mount.o ops_address.o ops_dentry.o ops_export.o ops_file.o \ 5 5 ops_fstype.o ops_inode.o ops_super.o quota.o \
+1 -1
fs/gfs2/acl.c
··· 91 91 struct gfs2_ea_location el_this; 92 92 int error; 93 93 94 - if (!ip->i_di.di_eattr) 94 + if (!ip->i_eattr) 95 95 return 0; 96 96 97 97 memset(&er, 0, sizeof(struct gfs2_ea_request));
+21 -56
fs/gfs2/bmap.c
··· 75 75 void *kaddr = kmap(page); 76 76 77 77 memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), 78 - ip->i_di.di_size); 79 - memset(kaddr + ip->i_di.di_size, 0, 80 - PAGE_CACHE_SIZE - ip->i_di.di_size); 78 + ip->i_disksize); 79 + memset(kaddr + ip->i_disksize, 0, 80 + PAGE_CACHE_SIZE - ip->i_disksize); 81 81 kunmap(page); 82 82 83 83 SetPageUptodate(page); ··· 132 132 if (error) 133 133 goto out; 134 134 135 - if (ip->i_di.di_size) { 135 + if (ip->i_disksize) { 136 136 /* Get a free block, fill it with the stuffed data, 137 137 and write it out to disk */ 138 138 ··· 159 159 di = (struct gfs2_dinode *)dibh->b_data; 160 160 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode)); 161 161 162 - if (ip->i_di.di_size) { 162 + if (ip->i_disksize) { 163 163 *(__be64 *)(di + 1) = cpu_to_be64(block); 164 164 gfs2_add_inode_blocks(&ip->i_inode, 1); 165 165 di->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode)); ··· 926 926 } 927 927 } 928 928 929 - ip->i_di.di_size = size; 929 + ip->i_disksize = size; 930 930 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME; 931 931 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 932 932 gfs2_dinode_out(ip, dibh->b_data); ··· 1033 1033 goto out; 1034 1034 1035 1035 if (gfs2_is_stuffed(ip)) { 1036 - ip->i_di.di_size = size; 1036 + ip->i_disksize = size; 1037 1037 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME; 1038 1038 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 1039 1039 gfs2_dinode_out(ip, dibh->b_data); ··· 1045 1045 error = gfs2_block_truncate_page(ip->i_inode.i_mapping); 1046 1046 1047 1047 if (!error) { 1048 - ip->i_di.di_size = size; 1048 + ip->i_disksize = size; 1049 1049 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME; 1050 - ip->i_di.di_flags |= GFS2_DIF_TRUNC_IN_PROG; 1050 + ip->i_diskflags |= GFS2_DIF_TRUNC_IN_PROG; 1051 1051 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 1052 1052 gfs2_dinode_out(ip, dibh->b_data); 1053 1053 } ··· 1114 1114 if (error) 1115 1115 goto out; 1116 1116 1117 - if (!ip->i_di.di_size) { 1117 + if (!ip->i_disksize) { 1118 1118 ip->i_height = 0; 1119 1119 ip->i_goal = ip->i_no_addr; 1120 1120 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode)); 1121 1121 } 1122 1122 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME; 1123 - ip->i_di.di_flags &= ~GFS2_DIF_TRUNC_IN_PROG; 1123 + ip->i_diskflags &= ~GFS2_DIF_TRUNC_IN_PROG; 1124 1124 1125 1125 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 1126 1126 gfs2_dinode_out(ip, dibh->b_data); ··· 1205 1205 if (gfs2_assert_warn(GFS2_SB(&ip->i_inode), S_ISREG(ip->i_inode.i_mode))) 1206 1206 return -EINVAL; 1207 1207 1208 - if (size > ip->i_di.di_size) 1208 + if (size > ip->i_disksize) 1209 1209 error = do_grow(ip, size); 1210 - else if (size < ip->i_di.di_size) 1210 + else if (size < ip->i_disksize) 1211 1211 error = do_shrink(ip, size); 1212 1212 else 1213 1213 /* update time stamps */ ··· 1219 1219 int gfs2_truncatei_resume(struct gfs2_inode *ip) 1220 1220 { 1221 1221 int error; 1222 - error = trunc_dealloc(ip, ip->i_di.di_size); 1222 + error = trunc_dealloc(ip, ip->i_disksize); 1223 1223 if (!error) 1224 1224 error = trunc_end(ip); 1225 1225 return error; ··· 1228 1228 int gfs2_file_dealloc(struct gfs2_inode *ip) 1229 1229 { 1230 1230 return trunc_dealloc(ip, 0); 1231 - } 1232 - 1233 - /** 1234 - * gfs2_write_calc_reserv - calculate number of blocks needed to write to a file 1235 - * @ip: the file 1236 - * @len: the number of bytes to be written to the file 1237 - * @data_blocks: returns the number of data blocks required 1238 - * @ind_blocks: returns the number of indirect blocks required 1239 - * 1240 - */ 1241 - 1242 - void gfs2_write_calc_reserv(struct gfs2_inode *ip, unsigned int len, 1243 - unsigned int *data_blocks, unsigned int *ind_blocks) 1244 - { 1245 - struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 1246 - unsigned int tmp; 1247 - 1248 - if (gfs2_is_dir(ip)) { 1249 - *data_blocks = DIV_ROUND_UP(len, sdp->sd_jbsize) + 2; 1250 - *ind_blocks = 3 * (sdp->sd_max_jheight - 1); 1251 - } else { 1252 - *data_blocks = (len >> sdp->sd_sb.sb_bsize_shift) + 3; 1253 - *ind_blocks = 3 * (sdp->sd_max_height - 1); 1254 - } 1255 - 1256 - for (tmp = *data_blocks; tmp > sdp->sd_diptrs;) { 1257 - tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs); 1258 - *ind_blocks += tmp; 1259 - } 1260 1231 } 1261 1232 1262 1233 /** ··· 1247 1276 struct buffer_head bh; 1248 1277 unsigned int shift; 1249 1278 u64 lblock, lblock_stop, size; 1279 + u64 end_of_file; 1250 1280 1251 1281 *alloc_required = 0; 1252 1282 ··· 1263 1291 1264 1292 *alloc_required = 1; 1265 1293 shift = sdp->sd_sb.sb_bsize_shift; 1266 - if (gfs2_is_dir(ip)) { 1267 - unsigned int bsize = sdp->sd_jbsize; 1268 - lblock = offset; 1269 - do_div(lblock, bsize); 1270 - lblock_stop = offset + len + bsize - 1; 1271 - do_div(lblock_stop, bsize); 1272 - } else { 1273 - u64 end_of_file = (ip->i_di.di_size + sdp->sd_sb.sb_bsize - 1) >> shift; 1274 - lblock = offset >> shift; 1275 - lblock_stop = (offset + len + sdp->sd_sb.sb_bsize - 1) >> shift; 1276 - if (lblock_stop > end_of_file) 1277 - return 0; 1278 - } 1294 + BUG_ON(gfs2_is_dir(ip)); 1295 + end_of_file = (ip->i_disksize + sdp->sd_sb.sb_bsize - 1) >> shift; 1296 + lblock = offset >> shift; 1297 + lblock_stop = (offset + len + sdp->sd_sb.sb_bsize - 1) >> shift; 1298 + if (lblock_stop > end_of_file) 1299 + return 0; 1279 1300 1280 1301 size = (lblock_stop - lblock) << shift; 1281 1302 do {
+30 -4
fs/gfs2/bmap.h
··· 10 10 #ifndef __BMAP_DOT_H__ 11 11 #define __BMAP_DOT_H__ 12 12 13 + #include "inode.h" 14 + 13 15 struct inode; 14 16 struct gfs2_inode; 15 17 struct page; 18 + 19 + 20 + /** 21 + * gfs2_write_calc_reserv - calculate number of blocks needed to write to a file 22 + * @ip: the file 23 + * @len: the number of bytes to be written to the file 24 + * @data_blocks: returns the number of data blocks required 25 + * @ind_blocks: returns the number of indirect blocks required 26 + * 27 + */ 28 + 29 + static inline void gfs2_write_calc_reserv(const struct gfs2_inode *ip, 30 + unsigned int len, 31 + unsigned int *data_blocks, 32 + unsigned int *ind_blocks) 33 + { 34 + const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 35 + unsigned int tmp; 36 + 37 + BUG_ON(gfs2_is_dir(ip)); 38 + *data_blocks = (len >> sdp->sd_sb.sb_bsize_shift) + 3; 39 + *ind_blocks = 3 * (sdp->sd_max_height - 1); 40 + 41 + for (tmp = *data_blocks; tmp > sdp->sd_diptrs;) { 42 + tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs); 43 + *ind_blocks += tmp; 44 + } 45 + } 16 46 17 47 int gfs2_unstuff_dinode(struct gfs2_inode *ip, struct page *page); 18 48 int gfs2_block_map(struct inode *inode, sector_t lblock, struct buffer_head *bh, int create); ··· 51 21 int gfs2_truncatei(struct gfs2_inode *ip, u64 size); 52 22 int gfs2_truncatei_resume(struct gfs2_inode *ip); 53 23 int gfs2_file_dealloc(struct gfs2_inode *ip); 54 - 55 - void gfs2_write_calc_reserv(struct gfs2_inode *ip, unsigned int len, 56 - unsigned int *data_blocks, 57 - unsigned int *ind_blocks); 58 24 int gfs2_write_alloc_required(struct gfs2_inode *ip, u64 offset, 59 25 unsigned int len, int *alloc_required); 60 26
-136
fs/gfs2/daemon.c
··· 1 - /* 2 - * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 3 - * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. 4 - * 5 - * This copyrighted material is made available to anyone wishing to use, 6 - * modify, copy, or redistribute it subject to the terms and conditions 7 - * of the GNU General Public License version 2. 8 - */ 9 - 10 - #include <linux/sched.h> 11 - #include <linux/slab.h> 12 - #include <linux/spinlock.h> 13 - #include <linux/completion.h> 14 - #include <linux/buffer_head.h> 15 - #include <linux/kthread.h> 16 - #include <linux/delay.h> 17 - #include <linux/gfs2_ondisk.h> 18 - #include <linux/lm_interface.h> 19 - #include <linux/freezer.h> 20 - 21 - #include "gfs2.h" 22 - #include "incore.h" 23 - #include "daemon.h" 24 - #include "glock.h" 25 - #include "log.h" 26 - #include "quota.h" 27 - #include "recovery.h" 28 - #include "super.h" 29 - #include "util.h" 30 - 31 - /* This uses schedule_timeout() instead of msleep() because it's good for 32 - the daemons to wake up more often than the timeout when unmounting so 33 - the user's unmount doesn't sit there forever. 34 - 35 - The kthread functions used to start these daemons block and flush signals. */ 36 - 37 - /** 38 - * gfs2_glockd - Reclaim unused glock structures 39 - * @sdp: Pointer to GFS2 superblock 40 - * 41 - * One or more of these daemons run, reclaiming glocks on sd_reclaim_list. 42 - * Number of daemons can be set by user, with num_glockd mount option. 43 - */ 44 - 45 - int gfs2_glockd(void *data) 46 - { 47 - struct gfs2_sbd *sdp = data; 48 - 49 - while (!kthread_should_stop()) { 50 - while (atomic_read(&sdp->sd_reclaim_count)) 51 - gfs2_reclaim_glock(sdp); 52 - 53 - wait_event_interruptible(sdp->sd_reclaim_wq, 54 - (atomic_read(&sdp->sd_reclaim_count) || 55 - kthread_should_stop())); 56 - if (freezing(current)) 57 - refrigerator(); 58 - } 59 - 60 - return 0; 61 - } 62 - 63 - /** 64 - * gfs2_recoverd - Recover dead machine's journals 65 - * @sdp: Pointer to GFS2 superblock 66 - * 67 - */ 68 - 69 - int gfs2_recoverd(void *data) 70 - { 71 - struct gfs2_sbd *sdp = data; 72 - unsigned long t; 73 - 74 - while (!kthread_should_stop()) { 75 - gfs2_check_journals(sdp); 76 - t = gfs2_tune_get(sdp, gt_recoverd_secs) * HZ; 77 - if (freezing(current)) 78 - refrigerator(); 79 - schedule_timeout_interruptible(t); 80 - } 81 - 82 - return 0; 83 - } 84 - 85 - /** 86 - * gfs2_quotad - Write cached quota changes into the quota file 87 - * @sdp: Pointer to GFS2 superblock 88 - * 89 - */ 90 - 91 - int gfs2_quotad(void *data) 92 - { 93 - struct gfs2_sbd *sdp = data; 94 - unsigned long t; 95 - int error; 96 - 97 - while (!kthread_should_stop()) { 98 - /* Update the master statfs file */ 99 - 100 - t = sdp->sd_statfs_sync_time + 101 - gfs2_tune_get(sdp, gt_statfs_quantum) * HZ; 102 - 103 - if (time_after_eq(jiffies, t)) { 104 - error = gfs2_statfs_sync(sdp); 105 - if (error && 106 - error != -EROFS && 107 - !test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) 108 - fs_err(sdp, "quotad: (1) error=%d\n", error); 109 - sdp->sd_statfs_sync_time = jiffies; 110 - } 111 - 112 - /* Update quota file */ 113 - 114 - t = sdp->sd_quota_sync_time + 115 - gfs2_tune_get(sdp, gt_quota_quantum) * HZ; 116 - 117 - if (time_after_eq(jiffies, t)) { 118 - error = gfs2_quota_sync(sdp); 119 - if (error && 120 - error != -EROFS && 121 - !test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) 122 - fs_err(sdp, "quotad: (2) error=%d\n", error); 123 - sdp->sd_quota_sync_time = jiffies; 124 - } 125 - 126 - gfs2_quota_scan(sdp); 127 - 128 - t = gfs2_tune_get(sdp, gt_quotad_secs) * HZ; 129 - if (freezing(current)) 130 - refrigerator(); 131 - schedule_timeout_interruptible(t); 132 - } 133 - 134 - return 0; 135 - } 136 -
-17
fs/gfs2/daemon.h
··· 1 - /* 2 - * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 3 - * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. 4 - * 5 - * This copyrighted material is made available to anyone wishing to use, 6 - * modify, copy, or redistribute it subject to the terms and conditions 7 - * of the GNU General Public License version 2. 8 - */ 9 - 10 - #ifndef __DAEMON_DOT_H__ 11 - #define __DAEMON_DOT_H__ 12 - 13 - int gfs2_glockd(void *data); 14 - int gfs2_recoverd(void *data); 15 - int gfs2_quotad(void *data); 16 - 17 - #endif /* __DAEMON_DOT_H__ */
+31 -31
fs/gfs2/dir.c
··· 36 36 * the block. In leaves, they begin at offset sizeof(struct gfs2_leaf) from the 37 37 * beginning of the leaf block. The dirents reside in leaves when 38 38 * 39 - * dip->i_di.di_flags & GFS2_DIF_EXHASH is true 39 + * dip->i_diskflags & GFS2_DIF_EXHASH is true 40 40 * 41 41 * Otherwise, the dirents are "linear", within a single stuffed dinode block. 42 42 * ··· 128 128 129 129 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 130 130 memcpy(dibh->b_data + offset + sizeof(struct gfs2_dinode), buf, size); 131 - if (ip->i_di.di_size < offset + size) 132 - ip->i_di.di_size = offset + size; 131 + if (ip->i_disksize < offset + size) 132 + ip->i_disksize = offset + size; 133 133 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME; 134 134 gfs2_dinode_out(ip, dibh->b_data); 135 135 ··· 226 226 if (error) 227 227 return error; 228 228 229 - if (ip->i_di.di_size < offset + copied) 230 - ip->i_di.di_size = offset + copied; 229 + if (ip->i_disksize < offset + copied) 230 + ip->i_disksize = offset + copied; 231 231 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME; 232 232 233 233 gfs2_trans_add_bh(ip->i_gl, dibh, 1); ··· 277 277 int copied = 0; 278 278 int error = 0; 279 279 280 - if (offset >= ip->i_di.di_size) 280 + if (offset >= ip->i_disksize) 281 281 return 0; 282 282 283 - if (offset + size > ip->i_di.di_size) 284 - size = ip->i_di.di_size - offset; 283 + if (offset + size > ip->i_disksize) 284 + size = ip->i_disksize - offset; 285 285 286 286 if (!size) 287 287 return 0; ··· 755 755 struct gfs2_inode *ip = GFS2_I(inode); 756 756 int error; 757 757 758 - if (ip->i_di.di_flags & GFS2_DIF_EXHASH) { 758 + if (ip->i_diskflags & GFS2_DIF_EXHASH) { 759 759 struct gfs2_leaf *leaf; 760 760 unsigned hsize = 1 << ip->i_depth; 761 761 unsigned index; 762 762 u64 ln; 763 - if (hsize * sizeof(u64) != ip->i_di.di_size) { 763 + if (hsize * sizeof(u64) != ip->i_disksize) { 764 764 gfs2_consist_inode(ip); 765 765 return ERR_PTR(-EIO); 766 766 } ··· 858 858 return -ENOSPC; 859 859 bn = bh->b_blocknr; 860 860 861 - gfs2_assert(sdp, dip->i_di.di_entries < (1 << 16)); 862 - leaf->lf_entries = cpu_to_be16(dip->i_di.di_entries); 861 + gfs2_assert(sdp, dip->i_entries < (1 << 16)); 862 + leaf->lf_entries = cpu_to_be16(dip->i_entries); 863 863 864 864 /* Copy dirents */ 865 865 ··· 905 905 for (x = sdp->sd_hash_ptrs; x--; lp++) 906 906 *lp = cpu_to_be64(bn); 907 907 908 - dip->i_di.di_size = sdp->sd_sb.sb_bsize / 2; 908 + dip->i_disksize = sdp->sd_sb.sb_bsize / 2; 909 909 gfs2_add_inode_blocks(&dip->i_inode, 1); 910 - dip->i_di.di_flags |= GFS2_DIF_EXHASH; 910 + dip->i_diskflags |= GFS2_DIF_EXHASH; 911 911 912 912 for (x = sdp->sd_hash_ptrs, y = -1; x; x >>= 1, y++) ; 913 913 dip->i_depth = y; ··· 1082 1082 int error = 0; 1083 1083 1084 1084 hsize = 1 << dip->i_depth; 1085 - if (hsize * sizeof(u64) != dip->i_di.di_size) { 1085 + if (hsize * sizeof(u64) != dip->i_disksize) { 1086 1086 gfs2_consist_inode(dip); 1087 1087 return -EIO; 1088 1088 } ··· 1091 1091 1092 1092 buf = kcalloc(3, sdp->sd_hash_bsize, GFP_NOFS | __GFP_NOFAIL); 1093 1093 1094 - for (block = dip->i_di.di_size >> sdp->sd_hash_bsize_shift; block--;) { 1094 + for (block = dip->i_disksize >> sdp->sd_hash_bsize_shift; block--;) { 1095 1095 error = gfs2_dir_read_data(dip, (char *)buf, 1096 1096 block * sdp->sd_hash_bsize, 1097 1097 sdp->sd_hash_bsize, 1); ··· 1370 1370 unsigned depth = 0; 1371 1371 1372 1372 hsize = 1 << dip->i_depth; 1373 - if (hsize * sizeof(u64) != dip->i_di.di_size) { 1373 + if (hsize * sizeof(u64) != dip->i_disksize) { 1374 1374 gfs2_consist_inode(dip); 1375 1375 return -EIO; 1376 1376 } ··· 1426 1426 int copied = 0; 1427 1427 int error; 1428 1428 1429 - if (!dip->i_di.di_entries) 1429 + if (!dip->i_entries) 1430 1430 return 0; 1431 1431 1432 - if (dip->i_di.di_flags & GFS2_DIF_EXHASH) 1432 + if (dip->i_diskflags & GFS2_DIF_EXHASH) 1433 1433 return dir_e_read(inode, offset, opaque, filldir); 1434 1434 1435 1435 if (!gfs2_is_stuffed(dip)) { ··· 1453 1453 error = PTR_ERR(dent); 1454 1454 goto out; 1455 1455 } 1456 - if (dip->i_di.di_entries != g.offset) { 1456 + if (dip->i_entries != g.offset) { 1457 1457 fs_warn(sdp, "Number of entries corrupt in dir %llu, " 1458 - "ip->i_di.di_entries (%u) != g.offset (%u)\n", 1458 + "ip->i_entries (%u) != g.offset (%u)\n", 1459 1459 (unsigned long long)dip->i_no_addr, 1460 - dip->i_di.di_entries, 1460 + dip->i_entries, 1461 1461 g.offset); 1462 1462 error = -EIO; 1463 1463 goto out; 1464 1464 } 1465 1465 error = do_filldir_main(dip, offset, opaque, filldir, darr, 1466 - dip->i_di.di_entries, &copied); 1466 + dip->i_entries, &copied); 1467 1467 out: 1468 1468 kfree(darr); 1469 1469 } ··· 1612 1612 dent = gfs2_init_dirent(inode, dent, name, bh); 1613 1613 gfs2_inum_out(nip, dent); 1614 1614 dent->de_type = cpu_to_be16(type); 1615 - if (ip->i_di.di_flags & GFS2_DIF_EXHASH) { 1615 + if (ip->i_diskflags & GFS2_DIF_EXHASH) { 1616 1616 leaf = (struct gfs2_leaf *)bh->b_data; 1617 1617 be16_add_cpu(&leaf->lf_entries, 1); 1618 1618 } ··· 1621 1621 if (error) 1622 1622 break; 1623 1623 gfs2_trans_add_bh(ip->i_gl, bh, 1); 1624 - ip->i_di.di_entries++; 1624 + ip->i_entries++; 1625 1625 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME; 1626 1626 gfs2_dinode_out(ip, bh->b_data); 1627 1627 brelse(bh); 1628 1628 error = 0; 1629 1629 break; 1630 1630 } 1631 - if (!(ip->i_di.di_flags & GFS2_DIF_EXHASH)) { 1631 + if (!(ip->i_diskflags & GFS2_DIF_EXHASH)) { 1632 1632 error = dir_make_exhash(inode); 1633 1633 if (error) 1634 1634 break; ··· 1691 1691 } 1692 1692 1693 1693 dirent_del(dip, bh, prev, dent); 1694 - if (dip->i_di.di_flags & GFS2_DIF_EXHASH) { 1694 + if (dip->i_diskflags & GFS2_DIF_EXHASH) { 1695 1695 struct gfs2_leaf *leaf = (struct gfs2_leaf *)bh->b_data; 1696 1696 u16 entries = be16_to_cpu(leaf->lf_entries); 1697 1697 if (!entries) ··· 1704 1704 if (error) 1705 1705 return error; 1706 1706 1707 - if (!dip->i_di.di_entries) 1707 + if (!dip->i_entries) 1708 1708 gfs2_consist_inode(dip); 1709 1709 gfs2_trans_add_bh(dip->i_gl, bh, 1); 1710 - dip->i_di.di_entries--; 1710 + dip->i_entries--; 1711 1711 dip->i_inode.i_mtime = dip->i_inode.i_ctime = CURRENT_TIME; 1712 1712 gfs2_dinode_out(dip, bh->b_data); 1713 1713 brelse(bh); ··· 1748 1748 gfs2_inum_out(nip, dent); 1749 1749 dent->de_type = cpu_to_be16(new_type); 1750 1750 1751 - if (dip->i_di.di_flags & GFS2_DIF_EXHASH) { 1751 + if (dip->i_diskflags & GFS2_DIF_EXHASH) { 1752 1752 brelse(bh); 1753 1753 error = gfs2_meta_inode_buffer(dip, &bh); 1754 1754 if (error) ··· 1784 1784 int error = 0; 1785 1785 1786 1786 hsize = 1 << dip->i_depth; 1787 - if (hsize * sizeof(u64) != dip->i_di.di_size) { 1787 + if (hsize * sizeof(u64) != dip->i_disksize) { 1788 1788 gfs2_consist_inode(dip); 1789 1789 return -EIO; 1790 1790 }
+1
fs/gfs2/dir.h
··· 11 11 #define __DIR_DOT_H__ 12 12 13 13 #include <linux/dcache.h> 14 + #include <linux/crc32.h> 14 15 15 16 struct inode; 16 17 struct gfs2_inode;
+20 -20
fs/gfs2/eattr.c
··· 114 114 __be64 *eablk, *end; 115 115 int error; 116 116 117 - error = gfs2_meta_read(ip->i_gl, ip->i_di.di_eattr, DIO_WAIT, &bh); 117 + error = gfs2_meta_read(ip->i_gl, ip->i_eattr, DIO_WAIT, &bh); 118 118 if (error) 119 119 return error; 120 120 121 - if (!(ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT)) { 121 + if (!(ip->i_diskflags & GFS2_DIF_EA_INDIRECT)) { 122 122 error = ea_foreach_i(ip, bh, ea_call, data); 123 123 goto out; 124 124 } ··· 414 414 if (error) 415 415 return error; 416 416 417 - if (ip->i_di.di_eattr) { 417 + if (ip->i_eattr) { 418 418 struct ea_list ei = { .ei_er = er, .ei_size = 0 }; 419 419 420 420 error = ea_foreach(ip, ea_list_i, &ei); ··· 514 514 struct gfs2_ea_location el; 515 515 int error; 516 516 517 - if (!ip->i_di.di_eattr) 517 + if (!ip->i_eattr) 518 518 return -ENODATA; 519 519 520 520 error = gfs2_ea_find(ip, er, &el); ··· 741 741 if (error) 742 742 return error; 743 743 744 - ip->i_di.di_eattr = bh->b_blocknr; 744 + ip->i_eattr = bh->b_blocknr; 745 745 error = ea_write(ip, GFS2_EA_BH2FIRST(bh), er); 746 746 747 747 brelse(bh); ··· 935 935 int error; 936 936 int mh_size = sizeof(struct gfs2_meta_header); 937 937 938 - if (ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT) { 938 + if (ip->i_diskflags & GFS2_DIF_EA_INDIRECT) { 939 939 __be64 *end; 940 940 941 - error = gfs2_meta_read(ip->i_gl, ip->i_di.di_eattr, DIO_WAIT, 941 + error = gfs2_meta_read(ip->i_gl, ip->i_eattr, DIO_WAIT, 942 942 &indbh); 943 943 if (error) 944 944 return error; ··· 972 972 gfs2_buffer_clear_tail(indbh, mh_size); 973 973 974 974 eablk = (__be64 *)(indbh->b_data + mh_size); 975 - *eablk = cpu_to_be64(ip->i_di.di_eattr); 976 - ip->i_di.di_eattr = blk; 977 - ip->i_di.di_flags |= GFS2_DIF_EA_INDIRECT; 975 + *eablk = cpu_to_be64(ip->i_eattr); 976 + ip->i_eattr = blk; 977 + ip->i_diskflags |= GFS2_DIF_EA_INDIRECT; 978 978 gfs2_add_inode_blocks(&ip->i_inode, 1); 979 979 980 980 eablk++; ··· 1015 1015 if (error) 1016 1016 return error; 1017 1017 1018 - if (!(ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT)) 1018 + if (!(ip->i_diskflags & GFS2_DIF_EA_INDIRECT)) 1019 1019 blks++; 1020 1020 if (GFS2_EAREQ_SIZE_STUFFED(er) > GFS2_SB(&ip->i_inode)->sd_jbsize) 1021 1021 blks += DIV_ROUND_UP(er->er_data_len, GFS2_SB(&ip->i_inode)->sd_jbsize); ··· 1040 1040 struct gfs2_ea_location el; 1041 1041 int error; 1042 1042 1043 - if (!ip->i_di.di_eattr) { 1043 + if (!ip->i_eattr) { 1044 1044 if (er->er_flags & XATTR_REPLACE) 1045 1045 return -ENODATA; 1046 1046 return ea_init(ip, er); ··· 1051 1051 return error; 1052 1052 1053 1053 if (el.el_ea) { 1054 - if (ip->i_di.di_flags & GFS2_DIF_APPENDONLY) { 1054 + if (ip->i_diskflags & GFS2_DIF_APPENDONLY) { 1055 1055 brelse(el.el_bh); 1056 1056 return -EPERM; 1057 1057 } ··· 1145 1145 struct gfs2_ea_location el; 1146 1146 int error; 1147 1147 1148 - if (!ip->i_di.di_eattr) 1148 + if (!ip->i_eattr) 1149 1149 return -ENODATA; 1150 1150 1151 1151 error = gfs2_ea_find(ip, er, &el); ··· 1309 1309 1310 1310 memset(&rlist, 0, sizeof(struct gfs2_rgrp_list)); 1311 1311 1312 - error = gfs2_meta_read(ip->i_gl, ip->i_di.di_eattr, DIO_WAIT, &indbh); 1312 + error = gfs2_meta_read(ip->i_gl, ip->i_eattr, DIO_WAIT, &indbh); 1313 1313 if (error) 1314 1314 return error; 1315 1315 ··· 1388 1388 if (bstart) 1389 1389 gfs2_free_meta(ip, bstart, blen); 1390 1390 1391 - ip->i_di.di_flags &= ~GFS2_DIF_EA_INDIRECT; 1391 + ip->i_diskflags &= ~GFS2_DIF_EA_INDIRECT; 1392 1392 1393 1393 error = gfs2_meta_inode_buffer(ip, &dibh); 1394 1394 if (!error) { ··· 1416 1416 struct buffer_head *dibh; 1417 1417 int error; 1418 1418 1419 - rgd = gfs2_blk2rgrpd(sdp, ip->i_di.di_eattr); 1419 + rgd = gfs2_blk2rgrpd(sdp, ip->i_eattr); 1420 1420 if (!rgd) { 1421 1421 gfs2_consist_inode(ip); 1422 1422 return -EIO; ··· 1432 1432 if (error) 1433 1433 goto out_gunlock; 1434 1434 1435 - gfs2_free_meta(ip, ip->i_di.di_eattr, 1); 1435 + gfs2_free_meta(ip, ip->i_eattr, 1); 1436 1436 1437 - ip->i_di.di_eattr = 0; 1437 + ip->i_eattr = 0; 1438 1438 gfs2_add_inode_blocks(&ip->i_inode, -1); 1439 1439 1440 1440 error = gfs2_meta_inode_buffer(ip, &dibh); ··· 1479 1479 if (error) 1480 1480 goto out_rindex; 1481 1481 1482 - if (ip->i_di.di_flags & GFS2_DIF_EA_INDIRECT) { 1482 + if (ip->i_diskflags & GFS2_DIF_EA_INDIRECT) { 1483 1483 error = ea_dealloc_indirect(ip); 1484 1484 if (error) 1485 1485 goto out_rindex;
+158 -149
fs/gfs2/glock.c
··· 40 40 #include "quota.h" 41 41 #include "super.h" 42 42 #include "util.h" 43 + #include "bmap.h" 43 44 44 45 struct gfs2_gl_hash_bucket { 45 46 struct hlist_head hb_list; ··· 62 61 63 62 static DECLARE_RWSEM(gfs2_umount_flush_sem); 64 63 static struct dentry *gfs2_root; 65 - static struct task_struct *scand_process; 66 - static unsigned int scand_secs = 5; 67 64 static struct workqueue_struct *glock_workqueue; 65 + static LIST_HEAD(lru_list); 66 + static atomic_t lru_count = ATOMIC_INIT(0); 67 + static DEFINE_SPINLOCK(lru_lock); 68 68 69 69 #define GFS2_GL_HASH_SHIFT 15 70 70 #define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT) ··· 176 174 } 177 175 178 176 /** 177 + * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list 178 + * @gl: the glock 179 + * 180 + */ 181 + 182 + static void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl) 183 + { 184 + spin_lock(&lru_lock); 185 + if (list_empty(&gl->gl_lru) && gl->gl_state != LM_ST_UNLOCKED) { 186 + list_add_tail(&gl->gl_lru, &lru_list); 187 + atomic_inc(&lru_count); 188 + } 189 + spin_unlock(&lru_lock); 190 + } 191 + 192 + /** 179 193 * gfs2_glock_put() - Decrement reference count on glock 180 194 * @gl: The glock to put 181 195 * ··· 205 187 if (atomic_dec_and_test(&gl->gl_ref)) { 206 188 hlist_del(&gl->gl_list); 207 189 write_unlock(gl_lock_addr(gl->gl_hash)); 190 + spin_lock(&lru_lock); 191 + if (!list_empty(&gl->gl_lru)) { 192 + list_del_init(&gl->gl_lru); 193 + atomic_dec(&lru_count); 194 + } 195 + spin_unlock(&lru_lock); 208 196 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_UNLOCKED); 209 - GLOCK_BUG_ON(gl, !list_empty(&gl->gl_reclaim)); 197 + GLOCK_BUG_ON(gl, !list_empty(&gl->gl_lru)); 210 198 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders)); 211 199 glock_free(gl); 212 200 rv = 1; 213 201 goto out; 214 202 } 215 203 write_unlock(gl_lock_addr(gl->gl_hash)); 204 + /* 1 for being hashed, 1 for having state != LM_ST_UNLOCKED */ 205 + if (atomic_read(&gl->gl_ref) == 2) 206 + gfs2_glock_schedule_for_reclaim(gl); 216 207 out: 217 208 return rv; 218 209 } ··· 316 289 * do_promote - promote as many requests as possible on the current queue 317 290 * @gl: The glock 318 291 * 319 - * Returns: true if there is a blocked holder at the head of the list 292 + * Returns: 1 if there is a blocked holder at the head of the list, or 2 293 + * if a type specific operation is underway. 320 294 */ 321 295 322 296 static int do_promote(struct gfs2_glock *gl) 297 + __releases(&gl->gl_spin) 298 + __acquires(&gl->gl_spin) 323 299 { 324 300 const struct gfs2_glock_operations *glops = gl->gl_ops; 325 301 struct gfs2_holder *gh, *tmp; ··· 340 310 ret = glops->go_lock(gh); 341 311 spin_lock(&gl->gl_spin); 342 312 if (ret) { 313 + if (ret == 1) 314 + return 2; 343 315 gh->gh_error = ret; 344 316 list_del_init(&gh->gh_list); 345 317 gfs2_holder_wake(gh); ··· 446 414 const struct gfs2_glock_operations *glops = gl->gl_ops; 447 415 struct gfs2_holder *gh; 448 416 unsigned state = ret & LM_OUT_ST_MASK; 417 + int rv; 449 418 450 419 spin_lock(&gl->gl_spin); 451 420 state_change(gl, state); ··· 501 468 gfs2_demote_wake(gl); 502 469 if (state != LM_ST_UNLOCKED) { 503 470 if (glops->go_xmote_bh) { 504 - int rv; 505 471 spin_unlock(&gl->gl_spin); 506 472 rv = glops->go_xmote_bh(gl, gh); 507 473 if (rv == -EAGAIN) ··· 511 479 goto out; 512 480 } 513 481 } 514 - do_promote(gl); 482 + rv = do_promote(gl); 483 + if (rv == 2) 484 + goto out_locked; 515 485 } 516 486 out: 517 487 clear_bit(GLF_LOCK, &gl->gl_flags); 488 + out_locked: 518 489 spin_unlock(&gl->gl_spin); 519 490 gfs2_glock_put(gl); 520 491 } ··· 546 511 */ 547 512 548 513 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target) 514 + __releases(&gl->gl_spin) 515 + __acquires(&gl->gl_spin) 549 516 { 550 517 const struct gfs2_glock_operations *glops = gl->gl_ops; 551 518 struct gfs2_sbd *sdp = gl->gl_sbd; ··· 613 576 */ 614 577 615 578 static void run_queue(struct gfs2_glock *gl, const int nonblock) 579 + __releases(&gl->gl_spin) 580 + __acquires(&gl->gl_spin) 616 581 { 617 582 struct gfs2_holder *gh = NULL; 583 + int ret; 618 584 619 585 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) 620 586 return; ··· 636 596 } else { 637 597 if (test_bit(GLF_DEMOTE, &gl->gl_flags)) 638 598 gfs2_demote_wake(gl); 639 - if (do_promote(gl) == 0) 599 + ret = do_promote(gl); 600 + if (ret == 0) 640 601 goto out; 602 + if (ret == 2) 603 + return; 641 604 gh = find_first_waiter(gl); 642 605 gl->gl_target = gh->gh_state; 643 606 if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) ··· 863 820 */ 864 821 865 822 static void handle_callback(struct gfs2_glock *gl, unsigned int state, 866 - int remote, unsigned long delay) 823 + unsigned long delay) 867 824 { 868 825 int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE; 869 826 ··· 871 828 if (gl->gl_demote_state == LM_ST_EXCLUSIVE) { 872 829 gl->gl_demote_state = state; 873 830 gl->gl_demote_time = jiffies; 874 - if (remote && gl->gl_ops->go_type == LM_TYPE_IOPEN && 875 - gl->gl_object) 876 - gfs2_glock_schedule_for_reclaim(gl); 877 831 } else if (gl->gl_demote_state != LM_ST_UNLOCKED && 878 832 gl->gl_demote_state != state) { 879 833 gl->gl_demote_state = LM_ST_UNLOCKED; ··· 917 877 */ 918 878 919 879 static inline void add_to_queue(struct gfs2_holder *gh) 880 + __releases(&gl->gl_spin) 881 + __acquires(&gl->gl_spin) 920 882 { 921 883 struct gfs2_glock *gl = gh->gh_gl; 922 884 struct gfs2_sbd *sdp = gl->gl_sbd; ··· 1040 998 1041 999 spin_lock(&gl->gl_spin); 1042 1000 if (gh->gh_flags & GL_NOCACHE) 1043 - handle_callback(gl, LM_ST_UNLOCKED, 0, 0); 1001 + handle_callback(gl, LM_ST_UNLOCKED, 0); 1044 1002 1045 1003 list_del_init(&gh->gh_list); 1046 1004 if (find_first_holder(gl) == NULL) { ··· 1311 1269 delay = gl->gl_ops->go_min_hold_time; 1312 1270 1313 1271 spin_lock(&gl->gl_spin); 1314 - handle_callback(gl, state, 1, delay); 1272 + handle_callback(gl, state, delay); 1315 1273 spin_unlock(&gl->gl_spin); 1316 1274 if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0) 1317 1275 gfs2_glock_put(gl); 1276 + } 1277 + 1278 + static void gfs2_jdesc_make_dirty(struct gfs2_sbd *sdp, unsigned int jid) 1279 + { 1280 + struct gfs2_jdesc *jd; 1281 + 1282 + spin_lock(&sdp->sd_jindex_spin); 1283 + list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) { 1284 + if (jd->jd_jid != jid) 1285 + continue; 1286 + jd->jd_dirty = 1; 1287 + break; 1288 + } 1289 + spin_unlock(&sdp->sd_jindex_spin); 1318 1290 } 1319 1291 1320 1292 /** ··· 1394 1338 * Returns: 1 if it's ok 1395 1339 */ 1396 1340 1397 - static int demote_ok(struct gfs2_glock *gl) 1341 + static int demote_ok(const struct gfs2_glock *gl) 1398 1342 { 1399 1343 const struct gfs2_glock_operations *glops = gl->gl_ops; 1400 - int demote = 1; 1401 1344 1402 - if (test_bit(GLF_STICKY, &gl->gl_flags)) 1403 - demote = 0; 1404 - else if (glops->go_demote_ok) 1405 - demote = glops->go_demote_ok(gl); 1406 - 1407 - return demote; 1345 + if (gl->gl_state == LM_ST_UNLOCKED) 1346 + return 0; 1347 + if (!list_empty(&gl->gl_holders)) 1348 + return 0; 1349 + if (glops->go_demote_ok) 1350 + return glops->go_demote_ok(gl); 1351 + return 1; 1408 1352 } 1409 1353 1410 - /** 1411 - * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list 1412 - * @gl: the glock 1413 - * 1414 - */ 1415 1354 1416 - void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl) 1417 - { 1418 - struct gfs2_sbd *sdp = gl->gl_sbd; 1419 - 1420 - spin_lock(&sdp->sd_reclaim_lock); 1421 - if (list_empty(&gl->gl_reclaim)) { 1422 - gfs2_glock_hold(gl); 1423 - list_add(&gl->gl_reclaim, &sdp->sd_reclaim_list); 1424 - atomic_inc(&sdp->sd_reclaim_count); 1425 - spin_unlock(&sdp->sd_reclaim_lock); 1426 - wake_up(&sdp->sd_reclaim_wq); 1427 - } else 1428 - spin_unlock(&sdp->sd_reclaim_lock); 1429 - } 1430 - 1431 - /** 1432 - * gfs2_reclaim_glock - process the next glock on the filesystem's reclaim list 1433 - * @sdp: the filesystem 1434 - * 1435 - * Called from gfs2_glockd() glock reclaim daemon, or when promoting a 1436 - * different glock and we notice that there are a lot of glocks in the 1437 - * reclaim list. 1438 - * 1439 - */ 1440 - 1441 - void gfs2_reclaim_glock(struct gfs2_sbd *sdp) 1355 + static int gfs2_shrink_glock_memory(int nr, gfp_t gfp_mask) 1442 1356 { 1443 1357 struct gfs2_glock *gl; 1444 - int done_callback = 0; 1358 + int may_demote; 1359 + int nr_skipped = 0; 1360 + int got_ref = 0; 1361 + LIST_HEAD(skipped); 1445 1362 1446 - spin_lock(&sdp->sd_reclaim_lock); 1447 - if (list_empty(&sdp->sd_reclaim_list)) { 1448 - spin_unlock(&sdp->sd_reclaim_lock); 1449 - return; 1363 + if (nr == 0) 1364 + goto out; 1365 + 1366 + if (!(gfp_mask & __GFP_FS)) 1367 + return -1; 1368 + 1369 + spin_lock(&lru_lock); 1370 + while(nr && !list_empty(&lru_list)) { 1371 + gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru); 1372 + list_del_init(&gl->gl_lru); 1373 + atomic_dec(&lru_count); 1374 + 1375 + /* Test for being demotable */ 1376 + if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { 1377 + gfs2_glock_hold(gl); 1378 + got_ref = 1; 1379 + spin_unlock(&lru_lock); 1380 + spin_lock(&gl->gl_spin); 1381 + may_demote = demote_ok(gl); 1382 + spin_unlock(&gl->gl_spin); 1383 + clear_bit(GLF_LOCK, &gl->gl_flags); 1384 + if (may_demote) { 1385 + handle_callback(gl, LM_ST_UNLOCKED, 0); 1386 + nr--; 1387 + if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) 1388 + gfs2_glock_put(gl); 1389 + } 1390 + spin_lock(&lru_lock); 1391 + if (may_demote) 1392 + continue; 1393 + } 1394 + if (list_empty(&gl->gl_lru) && 1395 + (atomic_read(&gl->gl_ref) <= (2 + got_ref))) { 1396 + nr_skipped++; 1397 + list_add(&gl->gl_lru, &skipped); 1398 + } 1399 + if (got_ref) { 1400 + spin_unlock(&lru_lock); 1401 + gfs2_glock_put(gl); 1402 + spin_lock(&lru_lock); 1403 + got_ref = 0; 1404 + } 1450 1405 } 1451 - gl = list_entry(sdp->sd_reclaim_list.next, 1452 - struct gfs2_glock, gl_reclaim); 1453 - list_del_init(&gl->gl_reclaim); 1454 - spin_unlock(&sdp->sd_reclaim_lock); 1455 - 1456 - atomic_dec(&sdp->sd_reclaim_count); 1457 - atomic_inc(&sdp->sd_reclaimed); 1458 - 1459 - spin_lock(&gl->gl_spin); 1460 - if (find_first_holder(gl) == NULL && 1461 - gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl)) { 1462 - handle_callback(gl, LM_ST_UNLOCKED, 0, 0); 1463 - done_callback = 1; 1464 - } 1465 - spin_unlock(&gl->gl_spin); 1466 - if (!done_callback || 1467 - queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) 1468 - gfs2_glock_put(gl); 1406 + list_splice(&skipped, &lru_list); 1407 + atomic_add(nr_skipped, &lru_count); 1408 + spin_unlock(&lru_lock); 1409 + out: 1410 + return (atomic_read(&lru_count) / 100) * sysctl_vfs_cache_pressure; 1469 1411 } 1412 + 1413 + static struct shrinker glock_shrinker = { 1414 + .shrink = gfs2_shrink_glock_memory, 1415 + .seeks = DEFAULT_SEEKS, 1416 + }; 1470 1417 1471 1418 /** 1472 1419 * examine_bucket - Call a function for glock in a hash bucket ··· 1516 1457 } 1517 1458 1518 1459 /** 1519 - * scan_glock - look at a glock and see if we can reclaim it 1520 - * @gl: the glock to look at 1521 - * 1522 - */ 1523 - 1524 - static void scan_glock(struct gfs2_glock *gl) 1525 - { 1526 - if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object) 1527 - return; 1528 - if (test_bit(GLF_LOCK, &gl->gl_flags)) 1529 - return; 1530 - 1531 - spin_lock(&gl->gl_spin); 1532 - if (find_first_holder(gl) == NULL && 1533 - gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl)) 1534 - gfs2_glock_schedule_for_reclaim(gl); 1535 - spin_unlock(&gl->gl_spin); 1536 - } 1537 - 1538 - /** 1539 1460 * clear_glock - look at a glock and see if we can free it from glock cache 1540 1461 * @gl: the glock to look at 1541 1462 * ··· 1523 1484 1524 1485 static void clear_glock(struct gfs2_glock *gl) 1525 1486 { 1526 - struct gfs2_sbd *sdp = gl->gl_sbd; 1527 - int released; 1528 - 1529 - spin_lock(&sdp->sd_reclaim_lock); 1530 - if (!list_empty(&gl->gl_reclaim)) { 1531 - list_del_init(&gl->gl_reclaim); 1532 - atomic_dec(&sdp->sd_reclaim_count); 1533 - spin_unlock(&sdp->sd_reclaim_lock); 1534 - released = gfs2_glock_put(gl); 1535 - gfs2_assert(sdp, !released); 1536 - } else { 1537 - spin_unlock(&sdp->sd_reclaim_lock); 1487 + spin_lock(&lru_lock); 1488 + if (!list_empty(&gl->gl_lru)) { 1489 + list_del_init(&gl->gl_lru); 1490 + atomic_dec(&lru_count); 1538 1491 } 1492 + spin_unlock(&lru_lock); 1539 1493 1540 1494 spin_lock(&gl->gl_spin); 1541 1495 if (find_first_holder(gl) == NULL && gl->gl_state != LM_ST_UNLOCKED) 1542 - handle_callback(gl, LM_ST_UNLOCKED, 0, 0); 1496 + handle_callback(gl, LM_ST_UNLOCKED, 0); 1543 1497 spin_unlock(&gl->gl_spin); 1544 1498 gfs2_glock_hold(gl); 1545 1499 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) ··· 1578 1546 up_write(&gfs2_umount_flush_sem); 1579 1547 msleep(10); 1580 1548 } 1549 + } 1550 + 1551 + void gfs2_glock_finish_truncate(struct gfs2_inode *ip) 1552 + { 1553 + struct gfs2_glock *gl = ip->i_gl; 1554 + int ret; 1555 + 1556 + ret = gfs2_truncatei_resume(ip); 1557 + gfs2_assert_withdraw(gl->gl_sbd, ret == 0); 1558 + 1559 + spin_lock(&gl->gl_spin); 1560 + clear_bit(GLF_LOCK, &gl->gl_flags); 1561 + run_queue(gl, 1); 1562 + spin_unlock(&gl->gl_spin); 1581 1563 } 1582 1564 1583 1565 static const char *state2str(unsigned state) ··· 1669 1623 char *p = buf; 1670 1624 if (test_bit(GLF_LOCK, gflags)) 1671 1625 *p++ = 'l'; 1672 - if (test_bit(GLF_STICKY, gflags)) 1673 - *p++ = 's'; 1674 1626 if (test_bit(GLF_DEMOTE, gflags)) 1675 1627 *p++ = 'D'; 1676 1628 if (test_bit(GLF_PENDING_DEMOTE, gflags)) ··· 1787 1743 return error; 1788 1744 } 1789 1745 1790 - /** 1791 - * gfs2_scand - Look for cached glocks and inodes to toss from memory 1792 - * @sdp: Pointer to GFS2 superblock 1793 - * 1794 - * One of these daemons runs, finding candidates to add to sd_reclaim_list. 1795 - * See gfs2_glockd() 1796 - */ 1797 - 1798 - static int gfs2_scand(void *data) 1799 - { 1800 - unsigned x; 1801 - unsigned delay; 1802 - 1803 - while (!kthread_should_stop()) { 1804 - for (x = 0; x < GFS2_GL_HASH_SIZE; x++) 1805 - examine_bucket(scan_glock, NULL, x); 1806 - if (freezing(current)) 1807 - refrigerator(); 1808 - delay = scand_secs; 1809 - if (delay < 1) 1810 - delay = 1; 1811 - schedule_timeout_interruptible(delay * HZ); 1812 - } 1813 - 1814 - return 0; 1815 - } 1816 - 1817 - 1818 1746 1819 1747 int __init gfs2_glock_init(void) 1820 1748 { ··· 1800 1784 } 1801 1785 #endif 1802 1786 1803 - scand_process = kthread_run(gfs2_scand, NULL, "gfs2_scand"); 1804 - if (IS_ERR(scand_process)) 1805 - return PTR_ERR(scand_process); 1806 - 1807 1787 glock_workqueue = create_workqueue("glock_workqueue"); 1808 - if (IS_ERR(glock_workqueue)) { 1809 - kthread_stop(scand_process); 1788 + if (IS_ERR(glock_workqueue)) 1810 1789 return PTR_ERR(glock_workqueue); 1811 - } 1790 + 1791 + register_shrinker(&glock_shrinker); 1812 1792 1813 1793 return 0; 1814 1794 } 1815 1795 1816 1796 void gfs2_glock_exit(void) 1817 1797 { 1798 + unregister_shrinker(&glock_shrinker); 1818 1799 destroy_workqueue(glock_workqueue); 1819 - kthread_stop(scand_process); 1820 1800 } 1821 - 1822 - module_param(scand_secs, uint, S_IRUGO|S_IWUSR); 1823 - MODULE_PARM_DESC(scand_secs, "The number of seconds between scand runs"); 1824 1801 1825 1802 static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi) 1826 1803 {
+1 -1
fs/gfs2/glock.h
··· 129 129 void gfs2_lvb_unhold(struct gfs2_glock *gl); 130 130 131 131 void gfs2_glock_cb(void *cb_data, unsigned int type, void *data); 132 - void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl); 133 132 void gfs2_reclaim_glock(struct gfs2_sbd *sdp); 134 133 void gfs2_gl_hash_clear(struct gfs2_sbd *sdp); 134 + void gfs2_glock_finish_truncate(struct gfs2_inode *ip); 135 135 136 136 int __init gfs2_glock_init(void); 137 137 void gfs2_glock_exit(void);
+37 -19
fs/gfs2/glops.c
··· 201 201 * Returns: 1 if it's ok 202 202 */ 203 203 204 - static int inode_go_demote_ok(struct gfs2_glock *gl) 204 + static int inode_go_demote_ok(const struct gfs2_glock *gl) 205 205 { 206 206 struct gfs2_sbd *sdp = gl->gl_sbd; 207 - int demote = 0; 208 - 209 - if (!gl->gl_object && !gl->gl_aspace->i_mapping->nrpages) 210 - demote = 1; 211 - else if (!sdp->sd_args.ar_localcaching && 212 - time_after_eq(jiffies, gl->gl_stamp + 213 - gfs2_tune_get(sdp, gt_demote_secs) * HZ)) 214 - demote = 1; 215 - 216 - return demote; 207 + if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object) 208 + return 0; 209 + return 1; 217 210 } 218 211 219 212 /** ··· 220 227 static int inode_go_lock(struct gfs2_holder *gh) 221 228 { 222 229 struct gfs2_glock *gl = gh->gh_gl; 230 + struct gfs2_sbd *sdp = gl->gl_sbd; 223 231 struct gfs2_inode *ip = gl->gl_object; 224 232 int error = 0; 225 233 ··· 233 239 return error; 234 240 } 235 241 236 - if ((ip->i_di.di_flags & GFS2_DIF_TRUNC_IN_PROG) && 242 + if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) && 237 243 (gl->gl_state == LM_ST_EXCLUSIVE) && 238 - (gh->gh_state == LM_ST_EXCLUSIVE)) 239 - error = gfs2_truncatei_resume(ip); 244 + (gh->gh_state == LM_ST_EXCLUSIVE)) { 245 + spin_lock(&sdp->sd_trunc_lock); 246 + if (list_empty(&ip->i_trunc_list)) 247 + list_add(&sdp->sd_trunc_list, &ip->i_trunc_list); 248 + spin_unlock(&sdp->sd_trunc_lock); 249 + wake_up(&sdp->sd_quota_wait); 250 + return 1; 251 + } 240 252 241 253 return error; 242 254 } ··· 260 260 const struct gfs2_inode *ip = gl->gl_object; 261 261 if (ip == NULL) 262 262 return 0; 263 - gfs2_print_dbg(seq, " I: n:%llu/%llu t:%u f:0x%08lx\n", 263 + gfs2_print_dbg(seq, " I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu/%llu\n", 264 264 (unsigned long long)ip->i_no_formal_ino, 265 265 (unsigned long long)ip->i_no_addr, 266 - IF2DT(ip->i_inode.i_mode), ip->i_flags); 266 + IF2DT(ip->i_inode.i_mode), ip->i_flags, 267 + (unsigned int)ip->i_diskflags, 268 + (unsigned long long)ip->i_inode.i_size, 269 + (unsigned long long)ip->i_disksize); 267 270 return 0; 268 271 } 269 272 ··· 277 274 * Returns: 1 if it's ok 278 275 */ 279 276 280 - static int rgrp_go_demote_ok(struct gfs2_glock *gl) 277 + static int rgrp_go_demote_ok(const struct gfs2_glock *gl) 281 278 { 282 279 return !gl->gl_aspace->i_mapping->nrpages; 283 280 } ··· 321 318 const struct gfs2_rgrpd *rgd = gl->gl_object; 322 319 if (rgd == NULL) 323 320 return 0; 324 - gfs2_print_dbg(seq, " R: n:%llu\n", (unsigned long long)rgd->rd_addr); 321 + gfs2_print_dbg(seq, " R: n:%llu f:%02x b:%u/%u i:%u\n", 322 + (unsigned long long)rgd->rd_addr, rgd->rd_flags, 323 + rgd->rd_free, rgd->rd_free_clone, rgd->rd_dinodes); 325 324 return 0; 326 325 } 327 326 ··· 379 374 } 380 375 381 376 /** 377 + * trans_go_demote_ok 378 + * @gl: the glock 379 + * 380 + * Always returns 0 381 + */ 382 + 383 + static int trans_go_demote_ok(const struct gfs2_glock *gl) 384 + { 385 + return 0; 386 + } 387 + 388 + /** 382 389 * quota_go_demote_ok - Check to see if it's ok to unlock a quota glock 383 390 * @gl: the glock 384 391 * 385 392 * Returns: 1 if it's ok 386 393 */ 387 394 388 - static int quota_go_demote_ok(struct gfs2_glock *gl) 395 + static int quota_go_demote_ok(const struct gfs2_glock *gl) 389 396 { 390 397 return !atomic_read(&gl->gl_lvb_count); 391 398 } ··· 431 414 const struct gfs2_glock_operations gfs2_trans_glops = { 432 415 .go_xmote_th = trans_go_sync, 433 416 .go_xmote_bh = trans_go_xmote_bh, 417 + .go_demote_ok = trans_go_demote_ok, 434 418 .go_type = LM_TYPE_NONDISK, 435 419 }; 436 420
+18 -39
fs/gfs2/incore.h
··· 68 68 u32 bi_len; 69 69 }; 70 70 71 - struct gfs2_rgrp_host { 72 - u32 rg_free; 73 - u32 rg_dinodes; 74 - u64 rg_igeneration; 75 - }; 76 - 77 71 struct gfs2_rgrpd { 78 72 struct list_head rd_list; /* Link with superblock */ 79 73 struct list_head rd_list_mru; ··· 77 83 u32 rd_length; /* length of rgrp header in fs blocks */ 78 84 u32 rd_data; /* num of data blocks in rgrp */ 79 85 u32 rd_bitbytes; /* number of bytes in data bitmaps */ 80 - struct gfs2_rgrp_host rd_rg; 81 - struct gfs2_bitmap *rd_bits; 82 - unsigned int rd_bh_count; 83 - struct mutex rd_mutex; 86 + u32 rd_free; 84 87 u32 rd_free_clone; 88 + u32 rd_dinodes; 89 + u64 rd_igeneration; 90 + struct gfs2_bitmap *rd_bits; 91 + struct mutex rd_mutex; 85 92 struct gfs2_log_element rd_le; 86 - u32 rd_last_alloc; 87 93 struct gfs2_sbd *rd_sbd; 94 + unsigned int rd_bh_count; 95 + u32 rd_last_alloc; 88 96 unsigned char rd_flags; 89 97 #define GFS2_RDF_CHECK 0x01 /* Need to check for unlinked inodes */ 90 98 #define GFS2_RDF_NOALLOC 0x02 /* rg prohibits allocation */ ··· 125 129 void (*go_xmote_th) (struct gfs2_glock *gl); 126 130 int (*go_xmote_bh) (struct gfs2_glock *gl, struct gfs2_holder *gh); 127 131 void (*go_inval) (struct gfs2_glock *gl, int flags); 128 - int (*go_demote_ok) (struct gfs2_glock *gl); 132 + int (*go_demote_ok) (const struct gfs2_glock *gl); 129 133 int (*go_lock) (struct gfs2_holder *gh); 130 134 void (*go_unlock) (struct gfs2_holder *gh); 131 135 int (*go_dump)(struct seq_file *seq, const struct gfs2_glock *gl); ··· 155 159 156 160 enum { 157 161 GLF_LOCK = 1, 158 - GLF_STICKY = 2, 159 162 GLF_DEMOTE = 3, 160 163 GLF_PENDING_DEMOTE = 4, 161 164 GLF_DEMOTE_IN_PROGRESS = 5, ··· 189 194 unsigned long gl_tchange; 190 195 void *gl_object; 191 196 192 - struct list_head gl_reclaim; 197 + struct list_head gl_lru; 193 198 194 199 struct gfs2_sbd *gl_sbd; 195 200 ··· 228 233 GIF_USER = 4, /* user inode, not metadata addr space */ 229 234 }; 230 235 231 - struct gfs2_dinode_host { 232 - u64 di_size; /* number of bytes in file */ 233 - u64 di_generation; /* generation number for NFS */ 234 - u32 di_flags; /* GFS2_DIF_... */ 235 - /* These only apply to directories */ 236 - u32 di_entries; /* The number of entries in the directory */ 237 - u64 di_eattr; /* extended attribute block number */ 238 - }; 239 236 240 237 struct gfs2_inode { 241 238 struct inode i_inode; 242 239 u64 i_no_addr; 243 240 u64 i_no_formal_ino; 241 + u64 i_generation; 242 + u64 i_eattr; 243 + loff_t i_disksize; 244 244 unsigned long i_flags; /* GIF_... */ 245 - 246 - struct gfs2_dinode_host i_di; /* To be replaced by ref to block */ 247 - 248 245 struct gfs2_glock *i_gl; /* Move into i_gh? */ 249 246 struct gfs2_holder i_iopen_gh; 250 247 struct gfs2_holder i_gh; /* for prepare/commit_write only */ 251 248 struct gfs2_alloc *i_alloc; 252 249 u64 i_goal; /* goal block for allocations */ 253 250 struct rw_semaphore i_rw_mutex; 251 + struct list_head i_trunc_list; 252 + u32 i_entries; 253 + u32 i_diskflags; 254 254 u8 i_height; 255 255 u8 i_depth; 256 256 }; ··· 396 406 struct gfs2_tune { 397 407 spinlock_t gt_spin; 398 408 399 - unsigned int gt_demote_secs; /* Cache retention for unheld glock */ 400 409 unsigned int gt_incore_log_blocks; 401 410 unsigned int gt_log_flush_secs; 402 411 403 412 unsigned int gt_recoverd_secs; 404 413 unsigned int gt_logd_secs; 405 - unsigned int gt_quotad_secs; 406 414 407 415 unsigned int gt_quota_simul_sync; /* Max quotavals to sync at once */ 408 416 unsigned int gt_quota_warn_period; /* Secs between quota warn msgs */ ··· 476 488 /* Lock Stuff */ 477 489 478 490 struct lm_lockstruct sd_lockstruct; 479 - struct list_head sd_reclaim_list; 480 - spinlock_t sd_reclaim_lock; 481 - wait_queue_head_t sd_reclaim_wq; 482 - atomic_t sd_reclaim_count; 483 491 struct gfs2_holder sd_live_gh; 484 492 struct gfs2_glock *sd_rename_gl; 485 493 struct gfs2_glock *sd_trans_gl; ··· 503 519 spinlock_t sd_statfs_spin; 504 520 struct gfs2_statfs_change_host sd_statfs_master; 505 521 struct gfs2_statfs_change_host sd_statfs_local; 506 - unsigned long sd_statfs_sync_time; 507 522 508 523 /* Resource group stuff */ 509 524 ··· 535 552 struct task_struct *sd_recoverd_process; 536 553 struct task_struct *sd_logd_process; 537 554 struct task_struct *sd_quotad_process; 538 - struct task_struct *sd_glockd_process[GFS2_GLOCKD_MAX]; 539 - unsigned int sd_glockd_num; 540 555 541 556 /* Quota stuff */ 542 557 ··· 542 561 atomic_t sd_quota_count; 543 562 spinlock_t sd_quota_spin; 544 563 struct mutex sd_quota_mutex; 564 + wait_queue_head_t sd_quota_wait; 565 + struct list_head sd_trunc_list; 566 + spinlock_t sd_trunc_lock; 545 567 546 568 unsigned int sd_quota_slots; 547 569 unsigned int sd_quota_chunks; 548 570 unsigned char **sd_quota_bitmap; 549 571 550 572 u64 sd_quota_sync_gen; 551 - unsigned long sd_quota_sync_time; 552 573 553 574 /* Log stuff */ 554 575 ··· 606 623 struct gfs2_holder sd_freeze_gh; 607 624 struct mutex sd_freeze_lock; 608 625 unsigned int sd_freeze_count; 609 - 610 - /* Counters */ 611 - 612 - atomic_t sd_reclaimed; 613 626 614 627 char sd_fsname[GFS2_FSNAME_LEN]; 615 628 char sd_table_name[GFS2_FSNAME_LEN];
+24 -29
fs/gfs2/inode.c
··· 32 32 #include "log.h" 33 33 #include "meta_io.h" 34 34 #include "ops_address.h" 35 - #include "ops_inode.h" 36 35 #include "quota.h" 37 36 #include "rgrp.h" 38 37 #include "trans.h" ··· 247 248 248 249 static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf) 249 250 { 250 - struct gfs2_dinode_host *di = &ip->i_di; 251 251 const struct gfs2_dinode *str = buf; 252 252 struct timespec atime; 253 253 u16 height, depth; ··· 272 274 * to do that. 273 275 */ 274 276 ip->i_inode.i_nlink = be32_to_cpu(str->di_nlink); 275 - di->di_size = be64_to_cpu(str->di_size); 276 - i_size_write(&ip->i_inode, di->di_size); 277 + ip->i_disksize = be64_to_cpu(str->di_size); 278 + i_size_write(&ip->i_inode, ip->i_disksize); 277 279 gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks)); 278 280 atime.tv_sec = be64_to_cpu(str->di_atime); 279 281 atime.tv_nsec = be32_to_cpu(str->di_atime_nsec); ··· 285 287 ip->i_inode.i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec); 286 288 287 289 ip->i_goal = be64_to_cpu(str->di_goal_meta); 288 - di->di_generation = be64_to_cpu(str->di_generation); 290 + ip->i_generation = be64_to_cpu(str->di_generation); 289 291 290 - di->di_flags = be32_to_cpu(str->di_flags); 292 + ip->i_diskflags = be32_to_cpu(str->di_flags); 291 293 gfs2_set_inode_flags(&ip->i_inode); 292 294 height = be16_to_cpu(str->di_height); 293 295 if (unlikely(height > GFS2_MAX_META_HEIGHT)) ··· 298 300 if (unlikely(depth > GFS2_DIR_MAX_DEPTH)) 299 301 goto corrupt; 300 302 ip->i_depth = (u8)depth; 301 - di->di_entries = be32_to_cpu(str->di_entries); 303 + ip->i_entries = be32_to_cpu(str->di_entries); 302 304 303 - di->di_eattr = be64_to_cpu(str->di_eattr); 305 + ip->i_eattr = be64_to_cpu(str->di_eattr); 304 306 if (S_ISREG(ip->i_inode.i_mode)) 305 307 gfs2_set_aops(&ip->i_inode); 306 308 ··· 386 388 gfs2_free_di(rgd, ip); 387 389 388 390 gfs2_trans_end(sdp); 389 - clear_bit(GLF_STICKY, &ip->i_gl->gl_flags); 390 391 391 392 out_rg_gunlock: 392 393 gfs2_glock_dq_uninit(&al->al_rgd_gh); ··· 687 690 return error; 688 691 } 689 692 690 - if (dip->i_di.di_entries == (u32)-1) 693 + if (dip->i_entries == (u32)-1) 691 694 return -EFBIG; 692 695 if (S_ISDIR(mode) && dip->i_inode.i_nlink == (u32)-1) 693 696 return -EMLINK; ··· 787 790 di->di_flags = 0; 788 791 789 792 if (S_ISREG(mode)) { 790 - if ((dip->i_di.di_flags & GFS2_DIF_INHERIT_JDATA) || 793 + if ((dip->i_diskflags & GFS2_DIF_INHERIT_JDATA) || 791 794 gfs2_tune_get(sdp, gt_new_files_jdata)) 792 795 di->di_flags |= cpu_to_be32(GFS2_DIF_JDATA); 793 796 } else if (S_ISDIR(mode)) { 794 - di->di_flags |= cpu_to_be32(dip->i_di.di_flags & 797 + di->di_flags |= cpu_to_be32(dip->i_diskflags & 795 798 GFS2_DIF_INHERIT_JDATA); 796 799 } 797 800 ··· 1065 1068 struct qstr dotname; 1066 1069 int error; 1067 1070 1068 - if (ip->i_di.di_entries != 2) { 1071 + if (ip->i_entries != 2) { 1069 1072 if (gfs2_consist_inode(ip)) 1070 1073 gfs2_dinode_print(ip); 1071 1074 return -EIO; ··· 1165 1168 return error; 1166 1169 } 1167 1170 1168 - if (!ip->i_di.di_size) { 1171 + if (!ip->i_disksize) { 1169 1172 gfs2_consist_inode(ip); 1170 1173 error = -EIO; 1171 1174 goto out; ··· 1175 1178 if (error) 1176 1179 goto out; 1177 1180 1178 - x = ip->i_di.di_size + 1; 1181 + x = ip->i_disksize + 1; 1179 1182 if (x > *len) { 1180 1183 *buf = kmalloc(x, GFP_NOFS); 1181 1184 if (!*buf) { ··· 1239 1242 1240 1243 void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf) 1241 1244 { 1242 - const struct gfs2_dinode_host *di = &ip->i_di; 1243 1245 struct gfs2_dinode *str = buf; 1244 1246 1245 1247 str->di_header.mh_magic = cpu_to_be32(GFS2_MAGIC); ··· 1252 1256 str->di_uid = cpu_to_be32(ip->i_inode.i_uid); 1253 1257 str->di_gid = cpu_to_be32(ip->i_inode.i_gid); 1254 1258 str->di_nlink = cpu_to_be32(ip->i_inode.i_nlink); 1255 - str->di_size = cpu_to_be64(di->di_size); 1259 + str->di_size = cpu_to_be64(ip->i_disksize); 1256 1260 str->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode)); 1257 1261 str->di_atime = cpu_to_be64(ip->i_inode.i_atime.tv_sec); 1258 1262 str->di_mtime = cpu_to_be64(ip->i_inode.i_mtime.tv_sec); ··· 1260 1264 1261 1265 str->di_goal_meta = cpu_to_be64(ip->i_goal); 1262 1266 str->di_goal_data = cpu_to_be64(ip->i_goal); 1263 - str->di_generation = cpu_to_be64(di->di_generation); 1267 + str->di_generation = cpu_to_be64(ip->i_generation); 1264 1268 1265 - str->di_flags = cpu_to_be32(di->di_flags); 1269 + str->di_flags = cpu_to_be32(ip->i_diskflags); 1266 1270 str->di_height = cpu_to_be16(ip->i_height); 1267 1271 str->di_payload_format = cpu_to_be32(S_ISDIR(ip->i_inode.i_mode) && 1268 - !(ip->i_di.di_flags & GFS2_DIF_EXHASH) ? 1272 + !(ip->i_diskflags & GFS2_DIF_EXHASH) ? 1269 1273 GFS2_FORMAT_DE : 0); 1270 1274 str->di_depth = cpu_to_be16(ip->i_depth); 1271 - str->di_entries = cpu_to_be32(di->di_entries); 1275 + str->di_entries = cpu_to_be32(ip->i_entries); 1272 1276 1273 - str->di_eattr = cpu_to_be64(di->di_eattr); 1277 + str->di_eattr = cpu_to_be64(ip->i_eattr); 1274 1278 str->di_atime_nsec = cpu_to_be32(ip->i_inode.i_atime.tv_nsec); 1275 1279 str->di_mtime_nsec = cpu_to_be32(ip->i_inode.i_mtime.tv_nsec); 1276 1280 str->di_ctime_nsec = cpu_to_be32(ip->i_inode.i_ctime.tv_nsec); ··· 1278 1282 1279 1283 void gfs2_dinode_print(const struct gfs2_inode *ip) 1280 1284 { 1281 - const struct gfs2_dinode_host *di = &ip->i_di; 1282 - 1283 1285 printk(KERN_INFO " no_formal_ino = %llu\n", 1284 1286 (unsigned long long)ip->i_no_formal_ino); 1285 1287 printk(KERN_INFO " no_addr = %llu\n", 1286 1288 (unsigned long long)ip->i_no_addr); 1287 - printk(KERN_INFO " di_size = %llu\n", (unsigned long long)di->di_size); 1289 + printk(KERN_INFO " i_disksize = %llu\n", 1290 + (unsigned long long)ip->i_disksize); 1288 1291 printk(KERN_INFO " blocks = %llu\n", 1289 1292 (unsigned long long)gfs2_get_inode_blocks(&ip->i_inode)); 1290 1293 printk(KERN_INFO " i_goal = %llu\n", 1291 1294 (unsigned long long)ip->i_goal); 1292 - printk(KERN_INFO " di_flags = 0x%.8X\n", di->di_flags); 1295 + printk(KERN_INFO " i_diskflags = 0x%.8X\n", ip->i_diskflags); 1293 1296 printk(KERN_INFO " i_height = %u\n", ip->i_height); 1294 1297 printk(KERN_INFO " i_depth = %u\n", ip->i_depth); 1295 - printk(KERN_INFO " di_entries = %u\n", di->di_entries); 1296 - printk(KERN_INFO " di_eattr = %llu\n", 1297 - (unsigned long long)di->di_eattr); 1298 + printk(KERN_INFO " i_entries = %u\n", ip->i_entries); 1299 + printk(KERN_INFO " i_eattr = %llu\n", 1300 + (unsigned long long)ip->i_eattr); 1298 1301 } 1299 1302
+12 -1
fs/gfs2/inode.h
··· 10 10 #ifndef __INODE_DOT_H__ 11 11 #define __INODE_DOT_H__ 12 12 13 + #include <linux/fs.h> 13 14 #include "util.h" 14 15 15 16 static inline int gfs2_is_stuffed(const struct gfs2_inode *ip) ··· 20 19 21 20 static inline int gfs2_is_jdata(const struct gfs2_inode *ip) 22 21 { 23 - return ip->i_di.di_flags & GFS2_DIF_JDATA; 22 + return ip->i_diskflags & GFS2_DIF_JDATA; 24 23 } 25 24 26 25 static inline int gfs2_is_writeback(const struct gfs2_inode *ip) ··· 97 96 struct inode *gfs2_lookup_simple(struct inode *dip, const char *name); 98 97 void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf); 99 98 void gfs2_dinode_print(const struct gfs2_inode *ip); 99 + 100 + extern const struct inode_operations gfs2_file_iops; 101 + extern const struct inode_operations gfs2_dir_iops; 102 + extern const struct inode_operations gfs2_symlink_iops; 103 + extern const struct file_operations gfs2_file_fops; 104 + extern const struct file_operations gfs2_dir_fops; 105 + extern const struct file_operations gfs2_file_fops_nolock; 106 + extern const struct file_operations gfs2_dir_fops_nolock; 107 + 108 + extern void gfs2_set_inode_flags(struct inode *inode); 100 109 101 110 #endif /* __INODE_DOT_H__ */ 102 111
+10 -2
fs/gfs2/locking/dlm/mount.c
··· 194 194 static void gdlm_recovery_done(void *lockspace, unsigned int jid, 195 195 unsigned int message) 196 196 { 197 + char env_jid[20]; 198 + char env_status[20]; 199 + char *envp[] = { env_jid, env_status, NULL }; 197 200 struct gdlm_ls *ls = lockspace; 198 201 ls->recover_jid_done = jid; 199 202 ls->recover_jid_status = message; 200 - kobject_uevent(&ls->kobj, KOBJ_CHANGE); 203 + sprintf(env_jid, "JID=%d", jid); 204 + sprintf(env_status, "RECOVERY=%s", 205 + message == LM_RD_SUCCESS ? "Done" : "Failed"); 206 + kobject_uevent_env(&ls->kobj, KOBJ_CHANGE, envp); 201 207 } 202 208 203 209 static void gdlm_others_may_mount(void *lockspace) 204 210 { 211 + char *message = "FIRSTMOUNT=Done"; 212 + char *envp[] = { message, NULL }; 205 213 struct gdlm_ls *ls = lockspace; 206 214 ls->first_done = 1; 207 - kobject_uevent(&ls->kobj, KOBJ_CHANGE); 215 + kobject_uevent_env(&ls->kobj, KOBJ_CHANGE, envp); 208 216 } 209 217 210 218 /* Userspace gets the offline uevent, blocks new gfs locks on
+15 -1
fs/gfs2/locking/dlm/sysfs.c
··· 195 195 kobject_put(&ls->kobj); 196 196 } 197 197 198 + static int gdlm_uevent(struct kset *kset, struct kobject *kobj, 199 + struct kobj_uevent_env *env) 200 + { 201 + struct gdlm_ls *ls = container_of(kobj, struct gdlm_ls, kobj); 202 + add_uevent_var(env, "LOCKTABLE=%s:%s", ls->clustername, ls->fsname); 203 + add_uevent_var(env, "LOCKPROTO=lock_dlm"); 204 + return 0; 205 + } 206 + 207 + static struct kset_uevent_ops gdlm_uevent_ops = { 208 + .uevent = gdlm_uevent, 209 + }; 210 + 211 + 198 212 int gdlm_sysfs_init(void) 199 213 { 200 - gdlm_kset = kset_create_and_add("lock_dlm", NULL, kernel_kobj); 214 + gdlm_kset = kset_create_and_add("lock_dlm", &gdlm_uevent_ops, kernel_kobj); 201 215 if (!gdlm_kset) { 202 216 printk(KERN_WARNING "%s: can not create kset\n", __func__); 203 217 return -ENOMEM;
+13 -2
fs/gfs2/main.c
··· 19 19 20 20 #include "gfs2.h" 21 21 #include "incore.h" 22 - #include "ops_fstype.h" 22 + #include "super.h" 23 23 #include "sys.h" 24 24 #include "util.h" 25 25 #include "glock.h" ··· 30 30 31 31 inode_init_once(&ip->i_inode); 32 32 init_rwsem(&ip->i_rw_mutex); 33 + INIT_LIST_HEAD(&ip->i_trunc_list); 33 34 ip->i_alloc = NULL; 34 35 } 35 36 ··· 43 42 INIT_LIST_HEAD(&gl->gl_holders); 44 43 gl->gl_lvb = NULL; 45 44 atomic_set(&gl->gl_lvb_count, 0); 46 - INIT_LIST_HEAD(&gl->gl_reclaim); 45 + INIT_LIST_HEAD(&gl->gl_lru); 47 46 INIT_LIST_HEAD(&gl->gl_ail_list); 48 47 atomic_set(&gl->gl_ail_count, 0); 49 48 } ··· 94 93 if (!gfs2_rgrpd_cachep) 95 94 goto fail; 96 95 96 + gfs2_quotad_cachep = kmem_cache_create("gfs2_quotad", 97 + sizeof(struct gfs2_quota_data), 98 + 0, 0, NULL); 99 + if (!gfs2_quotad_cachep) 100 + goto fail; 101 + 97 102 error = register_filesystem(&gfs2_fs_type); 98 103 if (error) 99 104 goto fail; ··· 118 111 unregister_filesystem(&gfs2_fs_type); 119 112 fail: 120 113 gfs2_glock_exit(); 114 + 115 + if (gfs2_quotad_cachep) 116 + kmem_cache_destroy(gfs2_quotad_cachep); 121 117 122 118 if (gfs2_rgrpd_cachep) 123 119 kmem_cache_destroy(gfs2_rgrpd_cachep); ··· 150 140 unregister_filesystem(&gfs2_fs_type); 151 141 unregister_filesystem(&gfs2meta_fs_type); 152 142 143 + kmem_cache_destroy(gfs2_quotad_cachep); 153 144 kmem_cache_destroy(gfs2_rgrpd_cachep); 154 145 kmem_cache_destroy(gfs2_bufdata_cachep); 155 146 kmem_cache_destroy(gfs2_inode_cachep);
+1 -28
fs/gfs2/mount.c
··· 32 32 Opt_debug, 33 33 Opt_nodebug, 34 34 Opt_upgrade, 35 - Opt_num_glockd, 36 35 Opt_acl, 37 36 Opt_noacl, 38 37 Opt_quota_off, ··· 56 57 {Opt_debug, "debug"}, 57 58 {Opt_nodebug, "nodebug"}, 58 59 {Opt_upgrade, "upgrade"}, 59 - {Opt_num_glockd, "num_glockd=%d"}, 60 60 {Opt_acl, "acl"}, 61 61 {Opt_noacl, "noacl"}, 62 62 {Opt_quota_off, "quota=off"}, ··· 85 87 int error = 0; 86 88 87 89 if (!remount) { 88 - /* If someone preloaded options, use those instead */ 89 - spin_lock(&gfs2_sys_margs_lock); 90 - if (gfs2_sys_margs) { 91 - data = gfs2_sys_margs; 92 - gfs2_sys_margs = NULL; 93 - } 94 - spin_unlock(&gfs2_sys_margs_lock); 95 - 96 90 /* Set some defaults */ 97 - args->ar_num_glockd = GFS2_GLOCKD_DEFAULT; 98 91 args->ar_quota = GFS2_QUOTA_DEFAULT; 99 92 args->ar_data = GFS2_DATA_DEFAULT; 100 93 } ··· 94 105 process them */ 95 106 96 107 for (options = data; (o = strsep(&options, ",")); ) { 97 - int token, option; 108 + int token; 98 109 substring_t tmp[MAX_OPT_ARGS]; 99 110 100 111 if (!*o) ··· 184 195 if (remount && !args->ar_upgrade) 185 196 goto cant_remount; 186 197 args->ar_upgrade = 1; 187 - break; 188 - case Opt_num_glockd: 189 - if ((error = match_int(&tmp[0], &option))) { 190 - fs_info(sdp, "problem getting num_glockd\n"); 191 - goto out_error; 192 - } 193 - 194 - if (remount && option != args->ar_num_glockd) 195 - goto cant_remount; 196 - if (!option || option > GFS2_GLOCKD_MAX) { 197 - fs_info(sdp, "0 < num_glockd <= %u (not %u)\n", 198 - GFS2_GLOCKD_MAX, option); 199 - error = -EINVAL; 200 - goto out_error; 201 - } 202 - args->ar_num_glockd = option; 203 198 break; 204 199 case Opt_acl: 205 200 args->ar_posix_acl = 1;
+16 -16
fs/gfs2/ops_address.c
··· 210 210 { 211 211 struct inode *inode = page->mapping->host; 212 212 struct gfs2_sbd *sdp = GFS2_SB(inode); 213 - int error; 213 + int ret; 214 214 int done_trans = 0; 215 - 216 - error = gfs2_writepage_common(page, wbc); 217 - if (error <= 0) 218 - return error; 219 215 220 216 if (PageChecked(page)) { 221 217 if (wbc->sync_mode != WB_SYNC_ALL) 222 218 goto out_ignore; 223 - error = gfs2_trans_begin(sdp, RES_DINODE + 1, 0); 224 - if (error) 219 + ret = gfs2_trans_begin(sdp, RES_DINODE + 1, 0); 220 + if (ret) 225 221 goto out_ignore; 226 222 done_trans = 1; 227 223 } 228 - error = __gfs2_jdata_writepage(page, wbc); 224 + ret = gfs2_writepage_common(page, wbc); 225 + if (ret > 0) 226 + ret = __gfs2_jdata_writepage(page, wbc); 229 227 if (done_trans) 230 228 gfs2_trans_end(sdp); 231 - return error; 229 + return ret; 232 230 233 231 out_ignore: 234 232 redirty_page_for_writepage(wbc, page); ··· 451 453 452 454 kaddr = kmap_atomic(page, KM_USER0); 453 455 memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), 454 - ip->i_di.di_size); 455 - memset(kaddr + ip->i_di.di_size, 0, PAGE_CACHE_SIZE - ip->i_di.di_size); 456 + ip->i_disksize); 457 + memset(kaddr + ip->i_disksize, 0, PAGE_CACHE_SIZE - ip->i_disksize); 456 458 kunmap_atomic(kaddr, KM_USER0); 457 459 flush_dcache_page(page); 458 460 brelse(dibh); ··· 625 627 { 626 628 struct gfs2_inode *ip = GFS2_I(mapping->host); 627 629 struct gfs2_sbd *sdp = GFS2_SB(mapping->host); 628 - unsigned int data_blocks, ind_blocks, rblocks; 630 + unsigned int data_blocks = 0, ind_blocks = 0, rblocks; 629 631 int alloc_required; 630 632 int error = 0; 631 633 struct gfs2_alloc *al; ··· 639 641 if (unlikely(error)) 640 642 goto out_uninit; 641 643 642 - gfs2_write_calc_reserv(ip, len, &data_blocks, &ind_blocks); 643 644 error = gfs2_write_alloc_required(ip, pos, len, &alloc_required); 644 645 if (error) 645 646 goto out_unlock; 647 + 648 + if (alloc_required || gfs2_is_jdata(ip)) 649 + gfs2_write_calc_reserv(ip, len, &data_blocks, &ind_blocks); 646 650 647 651 if (alloc_required) { 648 652 al = gfs2_alloc_get(ip); ··· 782 782 783 783 if (inode->i_size < to) { 784 784 i_size_write(inode, to); 785 - ip->i_di.di_size = inode->i_size; 785 + ip->i_disksize = inode->i_size; 786 786 di->di_size = cpu_to_be64(inode->i_size); 787 787 mark_inode_dirty(inode); 788 788 } ··· 847 847 848 848 ret = generic_write_end(file, mapping, pos, len, copied, page, fsdata); 849 849 850 - if (likely(ret >= 0) && (inode->i_size > ip->i_di.di_size)) { 850 + if (likely(ret >= 0) && (inode->i_size > ip->i_disksize)) { 851 851 di = (struct gfs2_dinode *)dibh->b_data; 852 - ip->i_di.di_size = inode->i_size; 852 + ip->i_disksize = inode->i_size; 853 853 di->di_size = cpu_to_be64(inode->i_size); 854 854 mark_inode_dirty(inode); 855 855 }
+1 -1
fs/gfs2/ops_dentry.c
··· 19 19 #include "incore.h" 20 20 #include "dir.h" 21 21 #include "glock.h" 22 - #include "ops_dentry.h" 22 + #include "super.h" 23 23 #include "util.h" 24 24 #include "inode.h" 25 25
-17
fs/gfs2/ops_dentry.h
··· 1 - /* 2 - * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 3 - * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. 4 - * 5 - * This copyrighted material is made available to anyone wishing to use, 6 - * modify, copy, or redistribute it subject to the terms and conditions 7 - * of the GNU General Public License version 2. 8 - */ 9 - 10 - #ifndef __OPS_DENTRY_DOT_H__ 11 - #define __OPS_DENTRY_DOT_H__ 12 - 13 - #include <linux/dcache.h> 14 - 15 - extern struct dentry_operations gfs2_dops; 16 - 17 - #endif /* __OPS_DENTRY_DOT_H__ */
+2 -3
fs/gfs2/ops_export.c
··· 22 22 #include "glock.h" 23 23 #include "glops.h" 24 24 #include "inode.h" 25 - #include "ops_dentry.h" 26 - #include "ops_fstype.h" 25 + #include "super.h" 27 26 #include "rgrp.h" 28 27 #include "util.h" 29 28 ··· 213 214 } 214 215 215 216 error = -EIO; 216 - if (GFS2_I(inode)->i_di.di_flags & GFS2_DIF_SYSTEM) { 217 + if (GFS2_I(inode)->i_diskflags & GFS2_DIF_SYSTEM) { 217 218 iput(inode); 218 219 goto fail; 219 220 }
+10 -12
fs/gfs2/ops_file.c
··· 39 39 #include "util.h" 40 40 #include "eaops.h" 41 41 #include "ops_address.h" 42 - #include "ops_inode.h" 43 42 44 43 /** 45 44 * gfs2_llseek - seek to a location in a file ··· 157 158 if (error) 158 159 return error; 159 160 160 - fsflags = fsflags_cvt(gfs2_to_fsflags, ip->i_di.di_flags); 161 - if (!S_ISDIR(inode->i_mode) && ip->i_di.di_flags & GFS2_DIF_JDATA) 161 + fsflags = fsflags_cvt(gfs2_to_fsflags, ip->i_diskflags); 162 + if (!S_ISDIR(inode->i_mode) && ip->i_diskflags & GFS2_DIF_JDATA) 162 163 fsflags |= FS_JOURNAL_DATA_FL; 163 164 if (put_user(fsflags, ptr)) 164 165 error = -EFAULT; ··· 171 172 void gfs2_set_inode_flags(struct inode *inode) 172 173 { 173 174 struct gfs2_inode *ip = GFS2_I(inode); 174 - struct gfs2_dinode_host *di = &ip->i_di; 175 175 unsigned int flags = inode->i_flags; 176 176 177 177 flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC); 178 - if (di->di_flags & GFS2_DIF_IMMUTABLE) 178 + if (ip->i_diskflags & GFS2_DIF_IMMUTABLE) 179 179 flags |= S_IMMUTABLE; 180 - if (di->di_flags & GFS2_DIF_APPENDONLY) 180 + if (ip->i_diskflags & GFS2_DIF_APPENDONLY) 181 181 flags |= S_APPEND; 182 - if (di->di_flags & GFS2_DIF_NOATIME) 182 + if (ip->i_diskflags & GFS2_DIF_NOATIME) 183 183 flags |= S_NOATIME; 184 - if (di->di_flags & GFS2_DIF_SYNC) 184 + if (ip->i_diskflags & GFS2_DIF_SYNC) 185 185 flags |= S_SYNC; 186 186 inode->i_flags = flags; 187 187 } ··· 219 221 if (error) 220 222 goto out_drop_write; 221 223 222 - flags = ip->i_di.di_flags; 224 + flags = ip->i_diskflags; 223 225 new_flags = (flags & ~mask) | (reqflags & mask); 224 226 if ((new_flags ^ flags) == 0) 225 227 goto out; ··· 258 260 if (error) 259 261 goto out_trans_end; 260 262 gfs2_trans_add_bh(ip->i_gl, bh, 1); 261 - ip->i_di.di_flags = new_flags; 263 + ip->i_diskflags = new_flags; 262 264 gfs2_dinode_out(ip, bh->b_data); 263 265 brelse(bh); 264 266 gfs2_set_inode_flags(inode); ··· 355 357 goto out; 356 358 357 359 set_bit(GIF_SW_PAGED, &ip->i_flags); 358 - gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks); 359 360 ret = gfs2_write_alloc_required(ip, pos, PAGE_CACHE_SIZE, &alloc_required); 360 361 if (ret || !alloc_required) 361 362 goto out_unlock; ··· 366 369 ret = gfs2_quota_lock_check(ip); 367 370 if (ret) 368 371 goto out_alloc_put; 372 + gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks); 369 373 al->al_requested = data_blocks + ind_blocks; 370 374 ret = gfs2_inplace_reserve(ip); 371 375 if (ret) ··· 477 479 goto fail; 478 480 479 481 if (!(file->f_flags & O_LARGEFILE) && 480 - ip->i_di.di_size > MAX_NON_LFS) { 482 + ip->i_disksize > MAX_NON_LFS) { 481 483 error = -EOVERFLOW; 482 484 goto fail_gunlock; 483 485 }
+84 -41
fs/gfs2/ops_fstype.c
··· 22 22 #include "gfs2.h" 23 23 #include "incore.h" 24 24 #include "bmap.h" 25 - #include "daemon.h" 26 25 #include "glock.h" 27 26 #include "glops.h" 28 27 #include "inode.h" 29 28 #include "mount.h" 30 - #include "ops_fstype.h" 31 - #include "ops_dentry.h" 32 - #include "ops_super.h" 33 29 #include "recovery.h" 34 30 #include "rgrp.h" 35 31 #include "super.h" 36 32 #include "sys.h" 37 33 #include "util.h" 38 34 #include "log.h" 35 + #include "quota.h" 36 + #include "dir.h" 39 37 40 38 #define DO 0 41 39 #define UNDO 1 ··· 56 58 { 57 59 spin_lock_init(&gt->gt_spin); 58 60 59 - gt->gt_demote_secs = 300; 60 61 gt->gt_incore_log_blocks = 1024; 61 62 gt->gt_log_flush_secs = 60; 62 63 gt->gt_recoverd_secs = 60; 63 64 gt->gt_logd_secs = 1; 64 - gt->gt_quotad_secs = 5; 65 65 gt->gt_quota_simul_sync = 64; 66 66 gt->gt_quota_warn_period = 10; 67 67 gt->gt_quota_scale_num = 1; ··· 87 91 88 92 gfs2_tune_init(&sdp->sd_tune); 89 93 90 - INIT_LIST_HEAD(&sdp->sd_reclaim_list); 91 - spin_lock_init(&sdp->sd_reclaim_lock); 92 - init_waitqueue_head(&sdp->sd_reclaim_wq); 93 - 94 94 mutex_init(&sdp->sd_inum_mutex); 95 95 spin_lock_init(&sdp->sd_statfs_spin); 96 96 ··· 102 110 INIT_LIST_HEAD(&sdp->sd_quota_list); 103 111 spin_lock_init(&sdp->sd_quota_spin); 104 112 mutex_init(&sdp->sd_quota_mutex); 113 + init_waitqueue_head(&sdp->sd_quota_wait); 114 + INIT_LIST_HEAD(&sdp->sd_trunc_list); 115 + spin_lock_init(&sdp->sd_trunc_lock); 105 116 106 117 spin_lock_init(&sdp->sd_log_lock); 107 118 ··· 438 443 static int init_locking(struct gfs2_sbd *sdp, struct gfs2_holder *mount_gh, 439 444 int undo) 440 445 { 441 - struct task_struct *p; 442 446 int error = 0; 443 447 444 448 if (undo) 445 449 goto fail_trans; 446 - 447 - for (sdp->sd_glockd_num = 0; 448 - sdp->sd_glockd_num < sdp->sd_args.ar_num_glockd; 449 - sdp->sd_glockd_num++) { 450 - p = kthread_run(gfs2_glockd, sdp, "gfs2_glockd"); 451 - error = IS_ERR(p); 452 - if (error) { 453 - fs_err(sdp, "can't start glockd thread: %d\n", error); 454 - goto fail; 455 - } 456 - sdp->sd_glockd_process[sdp->sd_glockd_num] = p; 457 - } 458 450 459 451 error = gfs2_glock_nq_num(sdp, 460 452 GFS2_MOUNT_LOCK, &gfs2_nondisk_glops, ··· 475 493 fs_err(sdp, "can't create transaction glock: %d\n", error); 476 494 goto fail_rename; 477 495 } 478 - set_bit(GLF_STICKY, &sdp->sd_trans_gl->gl_flags); 479 496 480 497 return 0; 481 498 ··· 487 506 fail_mount: 488 507 gfs2_glock_dq_uninit(mount_gh); 489 508 fail: 490 - while (sdp->sd_glockd_num--) 491 - kthread_stop(sdp->sd_glockd_process[sdp->sd_glockd_num]); 492 - 493 509 return error; 494 510 } 495 511 ··· 598 620 599 621 prev_db = 0; 600 622 601 - for (lb = 0; lb < ip->i_di.di_size >> sdp->sd_sb.sb_bsize_shift; lb++) { 623 + for (lb = 0; lb < ip->i_disksize >> sdp->sd_sb.sb_bsize_shift; lb++) { 602 624 bh.b_state = 0; 603 625 bh.b_blocknr = 0; 604 626 bh.b_size = 1 << ip->i_inode.i_blkbits; ··· 639 661 sdp->sd_lockstruct.ls_lockspace); 640 662 } 641 663 664 + /** 665 + * gfs2_jindex_hold - Grab a lock on the jindex 666 + * @sdp: The GFS2 superblock 667 + * @ji_gh: the holder for the jindex glock 668 + * 669 + * Returns: errno 670 + */ 671 + 672 + static int gfs2_jindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ji_gh) 673 + { 674 + struct gfs2_inode *dip = GFS2_I(sdp->sd_jindex); 675 + struct qstr name; 676 + char buf[20]; 677 + struct gfs2_jdesc *jd; 678 + int error; 679 + 680 + name.name = buf; 681 + 682 + mutex_lock(&sdp->sd_jindex_mutex); 683 + 684 + for (;;) { 685 + error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, ji_gh); 686 + if (error) 687 + break; 688 + 689 + name.len = sprintf(buf, "journal%u", sdp->sd_journals); 690 + name.hash = gfs2_disk_hash(name.name, name.len); 691 + 692 + error = gfs2_dir_check(sdp->sd_jindex, &name, NULL); 693 + if (error == -ENOENT) { 694 + error = 0; 695 + break; 696 + } 697 + 698 + gfs2_glock_dq_uninit(ji_gh); 699 + 700 + if (error) 701 + break; 702 + 703 + error = -ENOMEM; 704 + jd = kzalloc(sizeof(struct gfs2_jdesc), GFP_KERNEL); 705 + if (!jd) 706 + break; 707 + 708 + INIT_LIST_HEAD(&jd->extent_list); 709 + jd->jd_inode = gfs2_lookupi(sdp->sd_jindex, &name, 1); 710 + if (!jd->jd_inode || IS_ERR(jd->jd_inode)) { 711 + if (!jd->jd_inode) 712 + error = -ENOENT; 713 + else 714 + error = PTR_ERR(jd->jd_inode); 715 + kfree(jd); 716 + break; 717 + } 718 + 719 + spin_lock(&sdp->sd_jindex_spin); 720 + jd->jd_jid = sdp->sd_journals++; 721 + list_add_tail(&jd->jd_list, &sdp->sd_jindex_list); 722 + spin_unlock(&sdp->sd_jindex_spin); 723 + } 724 + 725 + mutex_unlock(&sdp->sd_jindex_mutex); 726 + 727 + return error; 728 + } 729 + 642 730 static int init_journal(struct gfs2_sbd *sdp, int undo) 643 731 { 644 732 struct inode *master = sdp->sd_master_dir->d_inode; ··· 725 681 return PTR_ERR(sdp->sd_jindex); 726 682 } 727 683 ip = GFS2_I(sdp->sd_jindex); 728 - set_bit(GLF_STICKY, &ip->i_gl->gl_flags); 729 684 730 685 /* Load in the journal index special file */ 731 686 ··· 875 832 goto fail_statfs; 876 833 } 877 834 ip = GFS2_I(sdp->sd_rindex); 878 - set_bit(GLF_STICKY, &ip->i_gl->gl_flags); 879 835 sdp->sd_rindex_uptodate = 0; 880 836 881 837 /* Read in the quota inode */ ··· 1014 972 return error; 1015 973 } 1016 974 sdp->sd_logd_process = p; 1017 - 1018 - sdp->sd_statfs_sync_time = jiffies; 1019 - sdp->sd_quota_sync_time = jiffies; 1020 975 1021 976 p = kthread_run(gfs2_quotad, sdp, "gfs2_quotad"); 1022 977 error = IS_ERR(p); ··· 1263 1224 static void gfs2_kill_sb(struct super_block *sb) 1264 1225 { 1265 1226 struct gfs2_sbd *sdp = sb->s_fs_info; 1266 - if (sdp) { 1267 - gfs2_meta_syncfs(sdp); 1268 - dput(sdp->sd_root_dir); 1269 - dput(sdp->sd_master_dir); 1270 - sdp->sd_root_dir = NULL; 1271 - sdp->sd_master_dir = NULL; 1227 + 1228 + if (sdp == NULL) { 1229 + kill_block_super(sb); 1230 + return; 1272 1231 } 1232 + 1233 + gfs2_meta_syncfs(sdp); 1234 + dput(sdp->sd_root_dir); 1235 + dput(sdp->sd_master_dir); 1236 + sdp->sd_root_dir = NULL; 1237 + sdp->sd_master_dir = NULL; 1273 1238 shrink_dcache_sb(sb); 1274 1239 kill_block_super(sb); 1275 - if (sdp) 1276 - gfs2_delete_debugfs_file(sdp); 1240 + gfs2_delete_debugfs_file(sdp); 1241 + kfree(sdp); 1277 1242 } 1278 1243 1279 1244 struct file_system_type gfs2_fs_type = {
-19
fs/gfs2/ops_fstype.h
··· 1 - /* 2 - * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 3 - * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. 4 - * 5 - * This copyrighted material is made available to anyone wishing to use, 6 - * modify, copy, or redistribute it subject to the terms and conditions 7 - * of the GNU General Public License version 2. 8 - */ 9 - 10 - #ifndef __OPS_FSTYPE_DOT_H__ 11 - #define __OPS_FSTYPE_DOT_H__ 12 - 13 - #include <linux/fs.h> 14 - 15 - extern struct file_system_type gfs2_fs_type; 16 - extern struct file_system_type gfs2meta_fs_type; 17 - extern const struct export_operations gfs2_export_ops; 18 - 19 - #endif /* __OPS_FSTYPE_DOT_H__ */
+60 -15
fs/gfs2/ops_inode.c
··· 19 19 #include <linux/gfs2_ondisk.h> 20 20 #include <linux/crc32.h> 21 21 #include <linux/lm_interface.h> 22 + #include <linux/fiemap.h> 22 23 #include <asm/uaccess.h> 23 24 24 25 #include "gfs2.h" ··· 32 31 #include "glock.h" 33 32 #include "inode.h" 34 33 #include "meta_io.h" 35 - #include "ops_dentry.h" 36 - #include "ops_inode.h" 37 34 #include "quota.h" 38 35 #include "rgrp.h" 39 36 #include "trans.h" 40 37 #include "util.h" 38 + #include "super.h" 41 39 42 40 /** 43 41 * gfs2_create - Create a file ··· 185 185 if (!dip->i_inode.i_nlink) 186 186 goto out_gunlock; 187 187 error = -EFBIG; 188 - if (dip->i_di.di_entries == (u32)-1) 188 + if (dip->i_entries == (u32)-1) 189 189 goto out_gunlock; 190 190 error = -EPERM; 191 191 if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) ··· 371 371 372 372 ip = ghs[1].gh_gl->gl_object; 373 373 374 - ip->i_di.di_size = size; 374 + ip->i_disksize = size; 375 375 376 376 error = gfs2_meta_inode_buffer(ip, &dibh); 377 377 ··· 425 425 ip = ghs[1].gh_gl->gl_object; 426 426 427 427 ip->i_inode.i_nlink = 2; 428 - ip->i_di.di_size = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode); 429 - ip->i_di.di_flags |= GFS2_DIF_JDATA; 430 - ip->i_di.di_entries = 2; 428 + ip->i_disksize = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode); 429 + ip->i_diskflags |= GFS2_DIF_JDATA; 430 + ip->i_entries = 2; 431 431 432 432 error = gfs2_meta_inode_buffer(ip, &dibh); 433 433 ··· 517 517 if (error) 518 518 goto out_gunlock; 519 519 520 - if (ip->i_di.di_entries < 2) { 520 + if (ip->i_entries < 2) { 521 521 if (gfs2_consist_inode(ip)) 522 522 gfs2_dinode_print(ip); 523 523 error = -EIO; 524 524 goto out_gunlock; 525 525 } 526 - if (ip->i_di.di_entries > 2) { 526 + if (ip->i_entries > 2) { 527 527 error = -ENOTEMPTY; 528 528 goto out_gunlock; 529 529 } ··· 726 726 goto out_gunlock; 727 727 728 728 if (S_ISDIR(nip->i_inode.i_mode)) { 729 - if (nip->i_di.di_entries < 2) { 729 + if (nip->i_entries < 2) { 730 730 if (gfs2_consist_inode(nip)) 731 731 gfs2_dinode_print(nip); 732 732 error = -EIO; 733 733 goto out_gunlock; 734 734 } 735 - if (nip->i_di.di_entries > 2) { 735 + if (nip->i_entries > 2) { 736 736 error = -ENOTEMPTY; 737 737 goto out_gunlock; 738 738 } ··· 758 758 error = -EINVAL; 759 759 goto out_gunlock; 760 760 } 761 - if (ndip->i_di.di_entries == (u32)-1) { 761 + if (ndip->i_entries == (u32)-1) { 762 762 error = -EFBIG; 763 763 goto out_gunlock; 764 764 } ··· 990 990 struct gfs2_sbd *sdp = GFS2_SB(inode); 991 991 int error; 992 992 993 - if (attr->ia_size != ip->i_di.di_size) { 993 + if (attr->ia_size != ip->i_disksize) { 994 994 error = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks); 995 995 if (error) 996 996 return error; ··· 1001 1001 } 1002 1002 1003 1003 error = gfs2_truncatei(ip, attr->ia_size); 1004 - if (error && (inode->i_size != ip->i_di.di_size)) 1005 - i_size_write(inode, ip->i_di.di_size); 1004 + if (error && (inode->i_size != ip->i_disksize)) 1005 + i_size_write(inode, ip->i_disksize); 1006 1006 1007 1007 return error; 1008 1008 } ··· 1212 1212 return gfs2_ea_remove(GFS2_I(dentry->d_inode), &er); 1213 1213 } 1214 1214 1215 + static int gfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 1216 + u64 start, u64 len) 1217 + { 1218 + struct gfs2_inode *ip = GFS2_I(inode); 1219 + struct gfs2_holder gh; 1220 + int ret; 1221 + 1222 + ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC); 1223 + if (ret) 1224 + return ret; 1225 + 1226 + mutex_lock(&inode->i_mutex); 1227 + 1228 + ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh); 1229 + if (ret) 1230 + goto out; 1231 + 1232 + if (gfs2_is_stuffed(ip)) { 1233 + u64 phys = ip->i_no_addr << inode->i_blkbits; 1234 + u64 size = i_size_read(inode); 1235 + u32 flags = FIEMAP_EXTENT_LAST|FIEMAP_EXTENT_NOT_ALIGNED| 1236 + FIEMAP_EXTENT_DATA_INLINE; 1237 + phys += sizeof(struct gfs2_dinode); 1238 + phys += start; 1239 + if (start + len > size) 1240 + len = size - start; 1241 + if (start < size) 1242 + ret = fiemap_fill_next_extent(fieinfo, start, phys, 1243 + len, flags); 1244 + if (ret == 1) 1245 + ret = 0; 1246 + } else { 1247 + ret = __generic_block_fiemap(inode, fieinfo, start, len, 1248 + gfs2_block_map); 1249 + } 1250 + 1251 + gfs2_glock_dq_uninit(&gh); 1252 + out: 1253 + mutex_unlock(&inode->i_mutex); 1254 + return ret; 1255 + } 1256 + 1215 1257 const struct inode_operations gfs2_file_iops = { 1216 1258 .permission = gfs2_permission, 1217 1259 .setattr = gfs2_setattr, ··· 1262 1220 .getxattr = gfs2_getxattr, 1263 1221 .listxattr = gfs2_listxattr, 1264 1222 .removexattr = gfs2_removexattr, 1223 + .fiemap = gfs2_fiemap, 1265 1224 }; 1266 1225 1267 1226 const struct inode_operations gfs2_dir_iops = { ··· 1282 1239 .getxattr = gfs2_getxattr, 1283 1240 .listxattr = gfs2_listxattr, 1284 1241 .removexattr = gfs2_removexattr, 1242 + .fiemap = gfs2_fiemap, 1285 1243 }; 1286 1244 1287 1245 const struct inode_operations gfs2_symlink_iops = { ··· 1295 1251 .getxattr = gfs2_getxattr, 1296 1252 .listxattr = gfs2_listxattr, 1297 1253 .removexattr = gfs2_removexattr, 1254 + .fiemap = gfs2_fiemap, 1298 1255 }; 1299 1256
-25
fs/gfs2/ops_inode.h
··· 1 - /* 2 - * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 3 - * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. 4 - * 5 - * This copyrighted material is made available to anyone wishing to use, 6 - * modify, copy, or redistribute it subject to the terms and conditions 7 - * of the GNU General Public License version 2. 8 - */ 9 - 10 - #ifndef __OPS_INODE_DOT_H__ 11 - #define __OPS_INODE_DOT_H__ 12 - 13 - #include <linux/fs.h> 14 - 15 - extern const struct inode_operations gfs2_file_iops; 16 - extern const struct inode_operations gfs2_dir_iops; 17 - extern const struct inode_operations gfs2_symlink_iops; 18 - extern const struct file_operations gfs2_file_fops; 19 - extern const struct file_operations gfs2_dir_fops; 20 - extern const struct file_operations gfs2_file_fops_nolock; 21 - extern const struct file_operations gfs2_dir_fops_nolock; 22 - 23 - extern void gfs2_set_inode_flags(struct inode *inode); 24 - 25 - #endif /* __OPS_INODE_DOT_H__ */
+137 -12
fs/gfs2/ops_super.c
··· 28 28 #include "inode.h" 29 29 #include "log.h" 30 30 #include "mount.h" 31 - #include "ops_super.h" 32 31 #include "quota.h" 33 32 #include "recovery.h" 34 33 #include "rgrp.h" ··· 142 143 kthread_stop(sdp->sd_quotad_process); 143 144 kthread_stop(sdp->sd_logd_process); 144 145 kthread_stop(sdp->sd_recoverd_process); 145 - while (sdp->sd_glockd_num--) 146 - kthread_stop(sdp->sd_glockd_process[sdp->sd_glockd_num]); 147 146 148 147 if (!(sb->s_flags & MS_RDONLY)) { 149 148 error = gfs2_make_fs_ro(sdp); ··· 182 185 183 186 /* At this point, we're through participating in the lockspace */ 184 187 gfs2_sys_fs_del(sdp); 185 - kfree(sdp); 186 188 } 187 189 188 190 /** ··· 253 257 static void gfs2_unlockfs(struct super_block *sb) 254 258 { 255 259 gfs2_unfreeze_fs(sb->s_fs_info); 260 + } 261 + 262 + /** 263 + * statfs_fill - fill in the sg for a given RG 264 + * @rgd: the RG 265 + * @sc: the sc structure 266 + * 267 + * Returns: 0 on success, -ESTALE if the LVB is invalid 268 + */ 269 + 270 + static int statfs_slow_fill(struct gfs2_rgrpd *rgd, 271 + struct gfs2_statfs_change_host *sc) 272 + { 273 + gfs2_rgrp_verify(rgd); 274 + sc->sc_total += rgd->rd_data; 275 + sc->sc_free += rgd->rd_free; 276 + sc->sc_dinodes += rgd->rd_dinodes; 277 + return 0; 278 + } 279 + 280 + /** 281 + * gfs2_statfs_slow - Stat a filesystem using asynchronous locking 282 + * @sdp: the filesystem 283 + * @sc: the sc info that will be returned 284 + * 285 + * Any error (other than a signal) will cause this routine to fall back 286 + * to the synchronous version. 287 + * 288 + * FIXME: This really shouldn't busy wait like this. 289 + * 290 + * Returns: errno 291 + */ 292 + 293 + static int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc) 294 + { 295 + struct gfs2_holder ri_gh; 296 + struct gfs2_rgrpd *rgd_next; 297 + struct gfs2_holder *gha, *gh; 298 + unsigned int slots = 64; 299 + unsigned int x; 300 + int done; 301 + int error = 0, err; 302 + 303 + memset(sc, 0, sizeof(struct gfs2_statfs_change_host)); 304 + gha = kcalloc(slots, sizeof(struct gfs2_holder), GFP_KERNEL); 305 + if (!gha) 306 + return -ENOMEM; 307 + 308 + error = gfs2_rindex_hold(sdp, &ri_gh); 309 + if (error) 310 + goto out; 311 + 312 + rgd_next = gfs2_rgrpd_get_first(sdp); 313 + 314 + for (;;) { 315 + done = 1; 316 + 317 + for (x = 0; x < slots; x++) { 318 + gh = gha + x; 319 + 320 + if (gh->gh_gl && gfs2_glock_poll(gh)) { 321 + err = gfs2_glock_wait(gh); 322 + if (err) { 323 + gfs2_holder_uninit(gh); 324 + error = err; 325 + } else { 326 + if (!error) 327 + error = statfs_slow_fill( 328 + gh->gh_gl->gl_object, sc); 329 + gfs2_glock_dq_uninit(gh); 330 + } 331 + } 332 + 333 + if (gh->gh_gl) 334 + done = 0; 335 + else if (rgd_next && !error) { 336 + error = gfs2_glock_nq_init(rgd_next->rd_gl, 337 + LM_ST_SHARED, 338 + GL_ASYNC, 339 + gh); 340 + rgd_next = gfs2_rgrpd_get_next(rgd_next); 341 + done = 0; 342 + } 343 + 344 + if (signal_pending(current)) 345 + error = -ERESTARTSYS; 346 + } 347 + 348 + if (done) 349 + break; 350 + 351 + yield(); 352 + } 353 + 354 + gfs2_glock_dq_uninit(&ri_gh); 355 + 356 + out: 357 + kfree(gha); 358 + return error; 359 + } 360 + 361 + /** 362 + * gfs2_statfs_i - Do a statfs 363 + * @sdp: the filesystem 364 + * @sg: the sg structure 365 + * 366 + * Returns: errno 367 + */ 368 + 369 + static int gfs2_statfs_i(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc) 370 + { 371 + struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master; 372 + struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; 373 + 374 + spin_lock(&sdp->sd_statfs_spin); 375 + 376 + *sc = *m_sc; 377 + sc->sc_total += l_sc->sc_total; 378 + sc->sc_free += l_sc->sc_free; 379 + sc->sc_dinodes += l_sc->sc_dinodes; 380 + 381 + spin_unlock(&sdp->sd_statfs_spin); 382 + 383 + if (sc->sc_free < 0) 384 + sc->sc_free = 0; 385 + if (sc->sc_free > sc->sc_total) 386 + sc->sc_free = sc->sc_total; 387 + if (sc->sc_dinodes < 0) 388 + sc->sc_dinodes = 0; 389 + 390 + return 0; 256 391 } 257 392 258 393 /** ··· 497 370 */ 498 371 if (test_bit(GIF_USER, &ip->i_flags)) { 499 372 ip->i_gl->gl_object = NULL; 500 - gfs2_glock_schedule_for_reclaim(ip->i_gl); 501 373 gfs2_glock_put(ip->i_gl); 502 374 ip->i_gl = NULL; 503 375 if (ip->i_iopen_gh.gh_gl) { ··· 549 423 seq_printf(s, ",debug"); 550 424 if (args->ar_upgrade) 551 425 seq_printf(s, ",upgrade"); 552 - if (args->ar_num_glockd != GFS2_GLOCKD_DEFAULT) 553 - seq_printf(s, ",num_glockd=%u", args->ar_num_glockd); 554 426 if (args->ar_posix_acl) 555 427 seq_printf(s, ",acl"); 556 428 if (args->ar_quota != GFS2_QUOTA_DEFAULT) { ··· 618 494 gfs2_holder_reinit(LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | GL_NOCACHE, &ip->i_iopen_gh); 619 495 error = gfs2_glock_nq(&ip->i_iopen_gh); 620 496 if (error) 621 - goto out_uninit; 497 + goto out_truncate; 622 498 623 499 if (S_ISDIR(inode->i_mode) && 624 - (ip->i_di.di_flags & GFS2_DIF_EXHASH)) { 500 + (ip->i_diskflags & GFS2_DIF_EXHASH)) { 625 501 error = gfs2_dir_exhash_dealloc(ip); 626 502 if (error) 627 503 goto out_unlock; 628 504 } 629 505 630 - if (ip->i_di.di_eattr) { 506 + if (ip->i_eattr) { 631 507 error = gfs2_ea_dealloc(ip); 632 508 if (error) 633 509 goto out_unlock; ··· 643 519 if (error) 644 520 goto out_unlock; 645 521 522 + out_truncate: 646 523 error = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks); 647 524 if (error) 648 525 goto out_unlock; ··· 652 527 gfs2_trans_end(sdp); 653 528 654 529 out_unlock: 655 - gfs2_glock_dq(&ip->i_iopen_gh); 656 - out_uninit: 530 + if (test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) 531 + gfs2_glock_dq(&ip->i_iopen_gh); 657 532 gfs2_holder_uninit(&ip->i_iopen_gh); 658 533 gfs2_glock_dq_uninit(&gh); 659 534 if (error && error != GLR_TRYFAILED)
-17
fs/gfs2/ops_super.h
··· 1 - /* 2 - * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 3 - * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. 4 - * 5 - * This copyrighted material is made available to anyone wishing to use, 6 - * modify, copy, or redistribute it subject to the terms and conditions 7 - * of the GNU General Public License version 2. 8 - */ 9 - 10 - #ifndef __OPS_SUPER_DOT_H__ 11 - #define __OPS_SUPER_DOT_H__ 12 - 13 - #include <linux/fs.h> 14 - 15 - extern const struct super_operations gfs2_super_ops; 16 - 17 - #endif /* __OPS_SUPER_DOT_H__ */
+103 -10
fs/gfs2/quota.c
··· 46 46 #include <linux/bio.h> 47 47 #include <linux/gfs2_ondisk.h> 48 48 #include <linux/lm_interface.h> 49 + #include <linux/kthread.h> 50 + #include <linux/freezer.h> 49 51 50 52 #include "gfs2.h" 51 53 #include "incore.h" ··· 96 94 struct gfs2_quota_data *qd; 97 95 int error; 98 96 99 - qd = kzalloc(sizeof(struct gfs2_quota_data), GFP_NOFS); 97 + qd = kmem_cache_zalloc(gfs2_quotad_cachep, GFP_NOFS); 100 98 if (!qd) 101 99 return -ENOMEM; 102 100 ··· 121 119 return 0; 122 120 123 121 fail: 124 - kfree(qd); 122 + kmem_cache_free(gfs2_quotad_cachep, qd); 125 123 return error; 126 124 } 127 125 ··· 160 158 if (qd || !create) { 161 159 if (new_qd) { 162 160 gfs2_lvb_unhold(new_qd->qd_gl); 163 - kfree(new_qd); 161 + kmem_cache_free(gfs2_quotad_cachep, new_qd); 164 162 } 165 163 *qdp = qd; 166 164 return 0; ··· 1015 1013 1016 1014 if (gfs2_assert_warn(GFS2_SB(&ip->i_inode), change)) 1017 1015 return; 1018 - if (ip->i_di.di_flags & GFS2_DIF_SYSTEM) 1016 + if (ip->i_diskflags & GFS2_DIF_SYSTEM) 1019 1017 return; 1020 1018 1021 1019 for (x = 0; x < al->al_qd_num; x++) { ··· 1102 1100 int gfs2_quota_init(struct gfs2_sbd *sdp) 1103 1101 { 1104 1102 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode); 1105 - unsigned int blocks = ip->i_di.di_size >> sdp->sd_sb.sb_bsize_shift; 1103 + unsigned int blocks = ip->i_disksize >> sdp->sd_sb.sb_bsize_shift; 1106 1104 unsigned int x, slot = 0; 1107 1105 unsigned int found = 0; 1108 1106 u64 dblock; 1109 1107 u32 extlen = 0; 1110 1108 int error; 1111 1109 1112 - if (!ip->i_di.di_size || ip->i_di.di_size > (64 << 20) || 1113 - ip->i_di.di_size & (sdp->sd_sb.sb_bsize - 1)) { 1110 + if (!ip->i_disksize || ip->i_disksize > (64 << 20) || 1111 + ip->i_disksize & (sdp->sd_sb.sb_bsize - 1)) { 1114 1112 gfs2_consist_inode(ip); 1115 1113 return -EIO; 1116 1114 } ··· 1197 1195 return error; 1198 1196 } 1199 1197 1200 - void gfs2_quota_scan(struct gfs2_sbd *sdp) 1198 + static void gfs2_quota_scan(struct gfs2_sbd *sdp) 1201 1199 { 1202 1200 struct gfs2_quota_data *qd, *safe; 1203 1201 LIST_HEAD(dead); ··· 1224 1222 gfs2_assert_warn(sdp, !qd->qd_bh_count); 1225 1223 1226 1224 gfs2_lvb_unhold(qd->qd_gl); 1227 - kfree(qd); 1225 + kmem_cache_free(gfs2_quotad_cachep, qd); 1228 1226 } 1229 1227 } 1230 1228 ··· 1259 1257 gfs2_assert_warn(sdp, !qd->qd_bh_count); 1260 1258 1261 1259 gfs2_lvb_unhold(qd->qd_gl); 1262 - kfree(qd); 1260 + kmem_cache_free(gfs2_quotad_cachep, qd); 1263 1261 1264 1262 spin_lock(&sdp->sd_quota_spin); 1265 1263 } ··· 1272 1270 kfree(sdp->sd_quota_bitmap[x]); 1273 1271 kfree(sdp->sd_quota_bitmap); 1274 1272 } 1273 + } 1274 + 1275 + static void quotad_error(struct gfs2_sbd *sdp, const char *msg, int error) 1276 + { 1277 + if (error == 0 || error == -EROFS) 1278 + return; 1279 + if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) 1280 + fs_err(sdp, "gfs2_quotad: %s error %d\n", msg, error); 1281 + } 1282 + 1283 + static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg, 1284 + int (*fxn)(struct gfs2_sbd *sdp), 1285 + unsigned long t, unsigned long *timeo, 1286 + unsigned int *new_timeo) 1287 + { 1288 + if (t >= *timeo) { 1289 + int error = fxn(sdp); 1290 + quotad_error(sdp, msg, error); 1291 + *timeo = gfs2_tune_get_i(&sdp->sd_tune, new_timeo) * HZ; 1292 + } else { 1293 + *timeo -= t; 1294 + } 1295 + } 1296 + 1297 + static void quotad_check_trunc_list(struct gfs2_sbd *sdp) 1298 + { 1299 + struct gfs2_inode *ip; 1300 + 1301 + while(1) { 1302 + ip = NULL; 1303 + spin_lock(&sdp->sd_trunc_lock); 1304 + if (!list_empty(&sdp->sd_trunc_list)) { 1305 + ip = list_entry(sdp->sd_trunc_list.next, 1306 + struct gfs2_inode, i_trunc_list); 1307 + list_del_init(&ip->i_trunc_list); 1308 + } 1309 + spin_unlock(&sdp->sd_trunc_lock); 1310 + if (ip == NULL) 1311 + return; 1312 + gfs2_glock_finish_truncate(ip); 1313 + } 1314 + } 1315 + 1316 + /** 1317 + * gfs2_quotad - Write cached quota changes into the quota file 1318 + * @sdp: Pointer to GFS2 superblock 1319 + * 1320 + */ 1321 + 1322 + int gfs2_quotad(void *data) 1323 + { 1324 + struct gfs2_sbd *sdp = data; 1325 + struct gfs2_tune *tune = &sdp->sd_tune; 1326 + unsigned long statfs_timeo = 0; 1327 + unsigned long quotad_timeo = 0; 1328 + unsigned long t = 0; 1329 + DEFINE_WAIT(wait); 1330 + int empty; 1331 + 1332 + while (!kthread_should_stop()) { 1333 + 1334 + /* Update the master statfs file */ 1335 + quotad_check_timeo(sdp, "statfs", gfs2_statfs_sync, t, 1336 + &statfs_timeo, &tune->gt_statfs_quantum); 1337 + 1338 + /* Update quota file */ 1339 + quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t, 1340 + &quotad_timeo, &tune->gt_quota_quantum); 1341 + 1342 + /* FIXME: This should be turned into a shrinker */ 1343 + gfs2_quota_scan(sdp); 1344 + 1345 + /* Check for & recover partially truncated inodes */ 1346 + quotad_check_trunc_list(sdp); 1347 + 1348 + if (freezing(current)) 1349 + refrigerator(); 1350 + t = min(quotad_timeo, statfs_timeo); 1351 + 1352 + prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_UNINTERRUPTIBLE); 1353 + spin_lock(&sdp->sd_trunc_lock); 1354 + empty = list_empty(&sdp->sd_trunc_list); 1355 + spin_unlock(&sdp->sd_trunc_lock); 1356 + if (empty) 1357 + t -= schedule_timeout(t); 1358 + else 1359 + t = 0; 1360 + finish_wait(&sdp->sd_quota_wait, &wait); 1361 + } 1362 + 1363 + return 0; 1275 1364 } 1276 1365
+12 -12
fs/gfs2/quota.h
··· 15 15 16 16 #define NO_QUOTA_CHANGE ((u32)-1) 17 17 18 - int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid); 19 - void gfs2_quota_unhold(struct gfs2_inode *ip); 18 + extern int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid); 19 + extern void gfs2_quota_unhold(struct gfs2_inode *ip); 20 20 21 - int gfs2_quota_lock(struct gfs2_inode *ip, u32 uid, u32 gid); 22 - void gfs2_quota_unlock(struct gfs2_inode *ip); 21 + extern int gfs2_quota_lock(struct gfs2_inode *ip, u32 uid, u32 gid); 22 + extern void gfs2_quota_unlock(struct gfs2_inode *ip); 23 23 24 - int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid); 25 - void gfs2_quota_change(struct gfs2_inode *ip, s64 change, 26 - u32 uid, u32 gid); 24 + extern int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid); 25 + extern void gfs2_quota_change(struct gfs2_inode *ip, s64 change, 26 + u32 uid, u32 gid); 27 27 28 - int gfs2_quota_sync(struct gfs2_sbd *sdp); 29 - int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id); 28 + extern int gfs2_quota_sync(struct gfs2_sbd *sdp); 29 + extern int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id); 30 30 31 - int gfs2_quota_init(struct gfs2_sbd *sdp); 32 - void gfs2_quota_scan(struct gfs2_sbd *sdp); 33 - void gfs2_quota_cleanup(struct gfs2_sbd *sdp); 31 + extern int gfs2_quota_init(struct gfs2_sbd *sdp); 32 + extern void gfs2_quota_cleanup(struct gfs2_sbd *sdp); 33 + extern int gfs2_quotad(void *data); 34 34 35 35 static inline int gfs2_quota_lock_check(struct gfs2_inode *ip) 36 36 {
+47 -1
fs/gfs2/recovery.c
··· 14 14 #include <linux/gfs2_ondisk.h> 15 15 #include <linux/crc32.h> 16 16 #include <linux/lm_interface.h> 17 + #include <linux/kthread.h> 18 + #include <linux/freezer.h> 17 19 18 20 #include "gfs2.h" 19 21 #include "incore.h" ··· 585 583 return error; 586 584 } 587 585 586 + static struct gfs2_jdesc *gfs2_jdesc_find_dirty(struct gfs2_sbd *sdp) 587 + { 588 + struct gfs2_jdesc *jd; 589 + int found = 0; 590 + 591 + spin_lock(&sdp->sd_jindex_spin); 592 + 593 + list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) { 594 + if (jd->jd_dirty) { 595 + jd->jd_dirty = 0; 596 + found = 1; 597 + break; 598 + } 599 + } 600 + spin_unlock(&sdp->sd_jindex_spin); 601 + 602 + if (!found) 603 + jd = NULL; 604 + 605 + return jd; 606 + } 607 + 588 608 /** 589 609 * gfs2_check_journals - Recover any dirty journals 590 610 * @sdp: the filesystem 591 611 * 592 612 */ 593 613 594 - void gfs2_check_journals(struct gfs2_sbd *sdp) 614 + static void gfs2_check_journals(struct gfs2_sbd *sdp) 595 615 { 596 616 struct gfs2_jdesc *jd; 597 617 ··· 625 601 if (jd != sdp->sd_jdesc) 626 602 gfs2_recover_journal(jd); 627 603 } 604 + } 605 + 606 + /** 607 + * gfs2_recoverd - Recover dead machine's journals 608 + * @sdp: Pointer to GFS2 superblock 609 + * 610 + */ 611 + 612 + int gfs2_recoverd(void *data) 613 + { 614 + struct gfs2_sbd *sdp = data; 615 + unsigned long t; 616 + 617 + while (!kthread_should_stop()) { 618 + gfs2_check_journals(sdp); 619 + t = gfs2_tune_get(sdp, gt_recoverd_secs) * HZ; 620 + if (freezing(current)) 621 + refrigerator(); 622 + schedule_timeout_interruptible(t); 623 + } 624 + 625 + return 0; 628 626 } 629 627
+7 -7
fs/gfs2/recovery.h
··· 18 18 *blk = 0; 19 19 } 20 20 21 - int gfs2_replay_read_block(struct gfs2_jdesc *jd, unsigned int blk, 21 + extern int gfs2_replay_read_block(struct gfs2_jdesc *jd, unsigned int blk, 22 22 struct buffer_head **bh); 23 23 24 - int gfs2_revoke_add(struct gfs2_sbd *sdp, u64 blkno, unsigned int where); 25 - int gfs2_revoke_check(struct gfs2_sbd *sdp, u64 blkno, unsigned int where); 26 - void gfs2_revoke_clean(struct gfs2_sbd *sdp); 24 + extern int gfs2_revoke_add(struct gfs2_sbd *sdp, u64 blkno, unsigned int where); 25 + extern int gfs2_revoke_check(struct gfs2_sbd *sdp, u64 blkno, unsigned int where); 26 + extern void gfs2_revoke_clean(struct gfs2_sbd *sdp); 27 27 28 - int gfs2_find_jhead(struct gfs2_jdesc *jd, 28 + extern int gfs2_find_jhead(struct gfs2_jdesc *jd, 29 29 struct gfs2_log_header_host *head); 30 - int gfs2_recover_journal(struct gfs2_jdesc *gfs2_jd); 31 - void gfs2_check_journals(struct gfs2_sbd *sdp); 30 + extern int gfs2_recover_journal(struct gfs2_jdesc *gfs2_jd); 31 + extern int gfs2_recoverd(void *data); 32 32 33 33 #endif /* __RECOVERY_DOT_H__ */ 34 34
+27 -31
fs/gfs2/rgrp.c
··· 269 269 bi->bi_len, x); 270 270 } 271 271 272 - if (count[0] != rgd->rd_rg.rg_free) { 272 + if (count[0] != rgd->rd_free) { 273 273 if (gfs2_consist_rgrpd(rgd)) 274 274 fs_err(sdp, "free data mismatch: %u != %u\n", 275 - count[0], rgd->rd_rg.rg_free); 275 + count[0], rgd->rd_free); 276 276 return; 277 277 } 278 278 279 - tmp = rgd->rd_data - 280 - rgd->rd_rg.rg_free - 281 - rgd->rd_rg.rg_dinodes; 279 + tmp = rgd->rd_data - rgd->rd_free - rgd->rd_dinodes; 282 280 if (count[1] + count[2] != tmp) { 283 281 if (gfs2_consist_rgrpd(rgd)) 284 282 fs_err(sdp, "used data mismatch: %u != %u\n", ··· 284 286 return; 285 287 } 286 288 287 - if (count[3] != rgd->rd_rg.rg_dinodes) { 289 + if (count[3] != rgd->rd_dinodes) { 288 290 if (gfs2_consist_rgrpd(rgd)) 289 291 fs_err(sdp, "used metadata mismatch: %u != %u\n", 290 - count[3], rgd->rd_rg.rg_dinodes); 292 + count[3], rgd->rd_dinodes); 291 293 return; 292 294 } 293 295 ··· 499 501 for (rgrps = 0;; rgrps++) { 500 502 loff_t pos = rgrps * sizeof(struct gfs2_rindex); 501 503 502 - if (pos + sizeof(struct gfs2_rindex) >= ip->i_di.di_size) 504 + if (pos + sizeof(struct gfs2_rindex) >= ip->i_disksize) 503 505 break; 504 506 error = gfs2_internal_read(ip, &ra_state, buf, &pos, 505 507 sizeof(struct gfs2_rindex)); ··· 588 590 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 589 591 struct inode *inode = &ip->i_inode; 590 592 struct file_ra_state ra_state; 591 - u64 rgrp_count = ip->i_di.di_size; 593 + u64 rgrp_count = ip->i_disksize; 592 594 int error; 593 595 594 596 if (do_div(rgrp_count, sizeof(struct gfs2_rindex))) { ··· 632 634 for (sdp->sd_rgrps = 0;; sdp->sd_rgrps++) { 633 635 /* Ignore partials */ 634 636 if ((sdp->sd_rgrps + 1) * sizeof(struct gfs2_rindex) > 635 - ip->i_di.di_size) 637 + ip->i_disksize) 636 638 break; 637 639 error = read_rindex_entry(ip, &ra_state); 638 640 if (error) { ··· 690 692 static void gfs2_rgrp_in(struct gfs2_rgrpd *rgd, const void *buf) 691 693 { 692 694 const struct gfs2_rgrp *str = buf; 693 - struct gfs2_rgrp_host *rg = &rgd->rd_rg; 694 695 u32 rg_flags; 695 696 696 697 rg_flags = be32_to_cpu(str->rg_flags); ··· 697 700 rgd->rd_flags |= GFS2_RDF_NOALLOC; 698 701 else 699 702 rgd->rd_flags &= ~GFS2_RDF_NOALLOC; 700 - rg->rg_free = be32_to_cpu(str->rg_free); 701 - rg->rg_dinodes = be32_to_cpu(str->rg_dinodes); 702 - rg->rg_igeneration = be64_to_cpu(str->rg_igeneration); 703 + rgd->rd_free = be32_to_cpu(str->rg_free); 704 + rgd->rd_dinodes = be32_to_cpu(str->rg_dinodes); 705 + rgd->rd_igeneration = be64_to_cpu(str->rg_igeneration); 703 706 } 704 707 705 708 static void gfs2_rgrp_out(struct gfs2_rgrpd *rgd, void *buf) 706 709 { 707 710 struct gfs2_rgrp *str = buf; 708 - struct gfs2_rgrp_host *rg = &rgd->rd_rg; 709 711 u32 rg_flags = 0; 710 712 711 713 if (rgd->rd_flags & GFS2_RDF_NOALLOC) 712 714 rg_flags |= GFS2_RGF_NOALLOC; 713 715 str->rg_flags = cpu_to_be32(rg_flags); 714 - str->rg_free = cpu_to_be32(rg->rg_free); 715 - str->rg_dinodes = cpu_to_be32(rg->rg_dinodes); 716 + str->rg_free = cpu_to_be32(rgd->rd_free); 717 + str->rg_dinodes = cpu_to_be32(rgd->rd_dinodes); 716 718 str->__pad = cpu_to_be32(0); 717 - str->rg_igeneration = cpu_to_be64(rg->rg_igeneration); 719 + str->rg_igeneration = cpu_to_be64(rgd->rd_igeneration); 718 720 memset(&str->rg_reserved, 0, sizeof(str->rg_reserved)); 719 721 } 720 722 ··· 772 776 } 773 777 774 778 spin_lock(&sdp->sd_rindex_spin); 775 - rgd->rd_free_clone = rgd->rd_rg.rg_free; 779 + rgd->rd_free_clone = rgd->rd_free; 776 780 rgd->rd_bh_count++; 777 781 spin_unlock(&sdp->sd_rindex_spin); 778 782 ··· 846 850 } 847 851 848 852 spin_lock(&sdp->sd_rindex_spin); 849 - rgd->rd_free_clone = rgd->rd_rg.rg_free; 853 + rgd->rd_free_clone = rgd->rd_free; 850 854 spin_unlock(&sdp->sd_rindex_spin); 851 855 } 852 856 ··· 1399 1403 block = rgd->rd_data0 + blk; 1400 1404 ip->i_goal = block; 1401 1405 1402 - gfs2_assert_withdraw(sdp, rgd->rd_rg.rg_free >= *n); 1403 - rgd->rd_rg.rg_free -= *n; 1406 + gfs2_assert_withdraw(sdp, rgd->rd_free >= *n); 1407 + rgd->rd_free -= *n; 1404 1408 1405 1409 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); 1406 1410 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data); ··· 1441 1445 1442 1446 block = rgd->rd_data0 + blk; 1443 1447 1444 - gfs2_assert_withdraw(sdp, rgd->rd_rg.rg_free); 1445 - rgd->rd_rg.rg_free--; 1446 - rgd->rd_rg.rg_dinodes++; 1447 - *generation = rgd->rd_rg.rg_igeneration++; 1448 + gfs2_assert_withdraw(sdp, rgd->rd_free); 1449 + rgd->rd_free--; 1450 + rgd->rd_dinodes++; 1451 + *generation = rgd->rd_igeneration++; 1448 1452 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); 1449 1453 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data); 1450 1454 ··· 1477 1481 if (!rgd) 1478 1482 return; 1479 1483 1480 - rgd->rd_rg.rg_free += blen; 1484 + rgd->rd_free += blen; 1481 1485 1482 1486 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); 1483 1487 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data); ··· 1505 1509 if (!rgd) 1506 1510 return; 1507 1511 1508 - rgd->rd_rg.rg_free += blen; 1512 + rgd->rd_free += blen; 1509 1513 1510 1514 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); 1511 1515 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data); ··· 1542 1546 return; 1543 1547 gfs2_assert_withdraw(sdp, rgd == tmp_rgd); 1544 1548 1545 - if (!rgd->rd_rg.rg_dinodes) 1549 + if (!rgd->rd_dinodes) 1546 1550 gfs2_consist_rgrpd(rgd); 1547 - rgd->rd_rg.rg_dinodes--; 1548 - rgd->rd_rg.rg_free++; 1551 + rgd->rd_dinodes--; 1552 + rgd->rd_free++; 1549 1553 1550 1554 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1); 1551 1555 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
+4 -242
fs/gfs2/super.c
··· 34 34 #include "util.h" 35 35 36 36 /** 37 - * gfs2_jindex_hold - Grab a lock on the jindex 38 - * @sdp: The GFS2 superblock 39 - * @ji_gh: the holder for the jindex glock 40 - * 41 - * This is very similar to the gfs2_rindex_hold() function, except that 42 - * in general we hold the jindex lock for longer periods of time and 43 - * we grab it far less frequently (in general) then the rgrp lock. 44 - * 45 - * Returns: errno 46 - */ 47 - 48 - int gfs2_jindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ji_gh) 49 - { 50 - struct gfs2_inode *dip = GFS2_I(sdp->sd_jindex); 51 - struct qstr name; 52 - char buf[20]; 53 - struct gfs2_jdesc *jd; 54 - int error; 55 - 56 - name.name = buf; 57 - 58 - mutex_lock(&sdp->sd_jindex_mutex); 59 - 60 - for (;;) { 61 - error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, ji_gh); 62 - if (error) 63 - break; 64 - 65 - name.len = sprintf(buf, "journal%u", sdp->sd_journals); 66 - name.hash = gfs2_disk_hash(name.name, name.len); 67 - 68 - error = gfs2_dir_check(sdp->sd_jindex, &name, NULL); 69 - if (error == -ENOENT) { 70 - error = 0; 71 - break; 72 - } 73 - 74 - gfs2_glock_dq_uninit(ji_gh); 75 - 76 - if (error) 77 - break; 78 - 79 - error = -ENOMEM; 80 - jd = kzalloc(sizeof(struct gfs2_jdesc), GFP_KERNEL); 81 - if (!jd) 82 - break; 83 - 84 - INIT_LIST_HEAD(&jd->extent_list); 85 - jd->jd_inode = gfs2_lookupi(sdp->sd_jindex, &name, 1); 86 - if (!jd->jd_inode || IS_ERR(jd->jd_inode)) { 87 - if (!jd->jd_inode) 88 - error = -ENOENT; 89 - else 90 - error = PTR_ERR(jd->jd_inode); 91 - kfree(jd); 92 - break; 93 - } 94 - 95 - spin_lock(&sdp->sd_jindex_spin); 96 - jd->jd_jid = sdp->sd_journals++; 97 - list_add_tail(&jd->jd_list, &sdp->sd_jindex_list); 98 - spin_unlock(&sdp->sd_jindex_spin); 99 - } 100 - 101 - mutex_unlock(&sdp->sd_jindex_mutex); 102 - 103 - return error; 104 - } 105 - 106 - /** 107 37 * gfs2_jindex_free - Clear all the journal index information 108 38 * @sdp: The GFS2 superblock 109 39 * ··· 96 166 return jd; 97 167 } 98 168 99 - void gfs2_jdesc_make_dirty(struct gfs2_sbd *sdp, unsigned int jid) 100 - { 101 - struct gfs2_jdesc *jd; 102 - 103 - spin_lock(&sdp->sd_jindex_spin); 104 - jd = jdesc_find_i(&sdp->sd_jindex_list, jid); 105 - if (jd) 106 - jd->jd_dirty = 1; 107 - spin_unlock(&sdp->sd_jindex_spin); 108 - } 109 - 110 - struct gfs2_jdesc *gfs2_jdesc_find_dirty(struct gfs2_sbd *sdp) 111 - { 112 - struct gfs2_jdesc *jd; 113 - int found = 0; 114 - 115 - spin_lock(&sdp->sd_jindex_spin); 116 - 117 - list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) { 118 - if (jd->jd_dirty) { 119 - jd->jd_dirty = 0; 120 - found = 1; 121 - break; 122 - } 123 - } 124 - spin_unlock(&sdp->sd_jindex_spin); 125 - 126 - if (!found) 127 - jd = NULL; 128 - 129 - return jd; 130 - } 131 - 132 169 int gfs2_jdesc_check(struct gfs2_jdesc *jd) 133 170 { 134 171 struct gfs2_inode *ip = GFS2_I(jd->jd_inode); ··· 103 206 int ar; 104 207 int error; 105 208 106 - if (ip->i_di.di_size < (8 << 20) || ip->i_di.di_size > (1 << 30) || 107 - (ip->i_di.di_size & (sdp->sd_sb.sb_bsize - 1))) { 209 + if (ip->i_disksize < (8 << 20) || ip->i_disksize > (1 << 30) || 210 + (ip->i_disksize & (sdp->sd_sb.sb_bsize - 1))) { 108 211 gfs2_consist_inode(ip); 109 212 return -EIO; 110 213 } 111 - jd->jd_blocks = ip->i_di.di_size >> sdp->sd_sb.sb_bsize_shift; 214 + jd->jd_blocks = ip->i_disksize >> sdp->sd_sb.sb_bsize_shift; 112 215 113 - error = gfs2_write_alloc_required(ip, 0, ip->i_di.di_size, &ar); 216 + error = gfs2_write_alloc_required(ip, 0, ip->i_disksize, &ar); 114 217 if (!error && ar) { 115 218 gfs2_consist_inode(ip); 116 219 error = -EIO; ··· 320 423 return error; 321 424 } 322 425 323 - /** 324 - * gfs2_statfs_i - Do a statfs 325 - * @sdp: the filesystem 326 - * @sg: the sg structure 327 - * 328 - * Returns: errno 329 - */ 330 - 331 - int gfs2_statfs_i(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc) 332 - { 333 - struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master; 334 - struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local; 335 - 336 - spin_lock(&sdp->sd_statfs_spin); 337 - 338 - *sc = *m_sc; 339 - sc->sc_total += l_sc->sc_total; 340 - sc->sc_free += l_sc->sc_free; 341 - sc->sc_dinodes += l_sc->sc_dinodes; 342 - 343 - spin_unlock(&sdp->sd_statfs_spin); 344 - 345 - if (sc->sc_free < 0) 346 - sc->sc_free = 0; 347 - if (sc->sc_free > sc->sc_total) 348 - sc->sc_free = sc->sc_total; 349 - if (sc->sc_dinodes < 0) 350 - sc->sc_dinodes = 0; 351 - 352 - return 0; 353 - } 354 - 355 - /** 356 - * statfs_fill - fill in the sg for a given RG 357 - * @rgd: the RG 358 - * @sc: the sc structure 359 - * 360 - * Returns: 0 on success, -ESTALE if the LVB is invalid 361 - */ 362 - 363 - static int statfs_slow_fill(struct gfs2_rgrpd *rgd, 364 - struct gfs2_statfs_change_host *sc) 365 - { 366 - gfs2_rgrp_verify(rgd); 367 - sc->sc_total += rgd->rd_data; 368 - sc->sc_free += rgd->rd_rg.rg_free; 369 - sc->sc_dinodes += rgd->rd_rg.rg_dinodes; 370 - return 0; 371 - } 372 - 373 - /** 374 - * gfs2_statfs_slow - Stat a filesystem using asynchronous locking 375 - * @sdp: the filesystem 376 - * @sc: the sc info that will be returned 377 - * 378 - * Any error (other than a signal) will cause this routine to fall back 379 - * to the synchronous version. 380 - * 381 - * FIXME: This really shouldn't busy wait like this. 382 - * 383 - * Returns: errno 384 - */ 385 - 386 - int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc) 387 - { 388 - struct gfs2_holder ri_gh; 389 - struct gfs2_rgrpd *rgd_next; 390 - struct gfs2_holder *gha, *gh; 391 - unsigned int slots = 64; 392 - unsigned int x; 393 - int done; 394 - int error = 0, err; 395 - 396 - memset(sc, 0, sizeof(struct gfs2_statfs_change_host)); 397 - gha = kcalloc(slots, sizeof(struct gfs2_holder), GFP_KERNEL); 398 - if (!gha) 399 - return -ENOMEM; 400 - 401 - error = gfs2_rindex_hold(sdp, &ri_gh); 402 - if (error) 403 - goto out; 404 - 405 - rgd_next = gfs2_rgrpd_get_first(sdp); 406 - 407 - for (;;) { 408 - done = 1; 409 - 410 - for (x = 0; x < slots; x++) { 411 - gh = gha + x; 412 - 413 - if (gh->gh_gl && gfs2_glock_poll(gh)) { 414 - err = gfs2_glock_wait(gh); 415 - if (err) { 416 - gfs2_holder_uninit(gh); 417 - error = err; 418 - } else { 419 - if (!error) 420 - error = statfs_slow_fill( 421 - gh->gh_gl->gl_object, sc); 422 - gfs2_glock_dq_uninit(gh); 423 - } 424 - } 425 - 426 - if (gh->gh_gl) 427 - done = 0; 428 - else if (rgd_next && !error) { 429 - error = gfs2_glock_nq_init(rgd_next->rd_gl, 430 - LM_ST_SHARED, 431 - GL_ASYNC, 432 - gh); 433 - rgd_next = gfs2_rgrpd_get_next(rgd_next); 434 - done = 0; 435 - } 436 - 437 - if (signal_pending(current)) 438 - error = -ERESTARTSYS; 439 - } 440 - 441 - if (done) 442 - break; 443 - 444 - yield(); 445 - } 446 - 447 - gfs2_glock_dq_uninit(&ri_gh); 448 - 449 - out: 450 - kfree(gha); 451 - return error; 452 - } 453 - 454 426 struct lfcc { 455 427 struct list_head list; 456 428 struct gfs2_holder gh; ··· 345 579 LIST_HEAD(list); 346 580 struct gfs2_log_header_host lh; 347 581 int error; 348 - 349 - error = gfs2_jindex_hold(sdp, &ji_gh); 350 - if (error) 351 - return error; 352 582 353 583 list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) { 354 584 lfcc = kmalloc(sizeof(struct lfcc), GFP_KERNEL);
+8 -5
fs/gfs2/super.h
··· 10 10 #ifndef __SUPER_DOT_H__ 11 11 #define __SUPER_DOT_H__ 12 12 13 + #include <linux/fs.h> 14 + #include <linux/dcache.h> 13 15 #include "incore.h" 14 16 15 17 void gfs2_lm_unmount(struct gfs2_sbd *sdp); ··· 25 23 return x; 26 24 } 27 25 28 - int gfs2_jindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ji_gh); 29 26 void gfs2_jindex_free(struct gfs2_sbd *sdp); 30 27 31 28 struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid); 32 - void gfs2_jdesc_make_dirty(struct gfs2_sbd *sdp, unsigned int jid); 33 - struct gfs2_jdesc *gfs2_jdesc_find_dirty(struct gfs2_sbd *sdp); 34 29 int gfs2_jdesc_check(struct gfs2_jdesc *jd); 35 30 36 31 int gfs2_lookup_in_master_dir(struct gfs2_sbd *sdp, char *filename, ··· 39 40 void gfs2_statfs_change(struct gfs2_sbd *sdp, 40 41 s64 total, s64 free, s64 dinodes); 41 42 int gfs2_statfs_sync(struct gfs2_sbd *sdp); 42 - int gfs2_statfs_i(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc); 43 - int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc); 44 43 45 44 int gfs2_freeze_fs(struct gfs2_sbd *sdp); 46 45 void gfs2_unfreeze_fs(struct gfs2_sbd *sdp); 46 + 47 + extern struct file_system_type gfs2_fs_type; 48 + extern struct file_system_type gfs2meta_fs_type; 49 + extern const struct export_operations gfs2_export_ops; 50 + extern const struct super_operations gfs2_super_ops; 51 + extern struct dentry_operations gfs2_dops; 47 52 48 53 #endif /* __SUPER_DOT_H__ */ 49 54
+16 -50
fs/gfs2/sys.c
··· 26 26 #include "quota.h" 27 27 #include "util.h" 28 28 29 - char *gfs2_sys_margs; 30 - spinlock_t gfs2_sys_margs_lock; 31 - 32 29 static ssize_t id_show(struct gfs2_sbd *sdp, char *buf) 33 30 { 34 31 return snprintf(buf, PAGE_SIZE, "%u:%u\n", ··· 260 263 ARGS_ATTR(localflocks, "%d\n"); 261 264 ARGS_ATTR(debug, "%d\n"); 262 265 ARGS_ATTR(upgrade, "%d\n"); 263 - ARGS_ATTR(num_glockd, "%u\n"); 264 266 ARGS_ATTR(posix_acl, "%d\n"); 265 267 ARGS_ATTR(quota, "%u\n"); 266 268 ARGS_ATTR(suiddir, "%d\n"); ··· 275 279 &args_attr_localflocks.attr, 276 280 &args_attr_debug.attr, 277 281 &args_attr_upgrade.attr, 278 - &args_attr_num_glockd.attr, 279 282 &args_attr_posix_acl.attr, 280 283 &args_attr_quota.attr, 281 284 &args_attr_suiddir.attr, 282 285 &args_attr_data.attr, 283 - NULL, 284 - }; 285 - 286 - /* 287 - * display counters from superblock 288 - */ 289 - 290 - struct counters_attr { 291 - struct attribute attr; 292 - ssize_t (*show)(struct gfs2_sbd *, char *); 293 - }; 294 - 295 - #define COUNTERS_ATTR(name, fmt) \ 296 - static ssize_t name##_show(struct gfs2_sbd *sdp, char *buf) \ 297 - { \ 298 - return snprintf(buf, PAGE_SIZE, fmt, \ 299 - (unsigned int)atomic_read(&sdp->sd_##name)); \ 300 - } \ 301 - static struct counters_attr counters_attr_##name = __ATTR_RO(name) 302 - 303 - COUNTERS_ATTR(reclaimed, "%u\n"); 304 - 305 - static struct attribute *counters_attrs[] = { 306 - &counters_attr_reclaimed.attr, 307 286 NULL, 308 287 }; 309 288 ··· 364 393 } \ 365 394 TUNE_ATTR_2(name, name##_store) 366 395 367 - TUNE_ATTR(demote_secs, 0); 368 396 TUNE_ATTR(incore_log_blocks, 0); 369 397 TUNE_ATTR(log_flush_secs, 0); 370 398 TUNE_ATTR(quota_warn_period, 0); ··· 378 408 TUNE_ATTR(statfs_quantum, 1); 379 409 TUNE_ATTR_DAEMON(recoverd_secs, recoverd_process); 380 410 TUNE_ATTR_DAEMON(logd_secs, logd_process); 381 - TUNE_ATTR_DAEMON(quotad_secs, quotad_process); 382 411 TUNE_ATTR_3(quota_scale, quota_scale_show, quota_scale_store); 383 412 384 413 static struct attribute *tune_attrs[] = { 385 - &tune_attr_demote_secs.attr, 386 414 &tune_attr_incore_log_blocks.attr, 387 415 &tune_attr_log_flush_secs.attr, 388 416 &tune_attr_quota_warn_period.attr, ··· 394 426 &tune_attr_statfs_quantum.attr, 395 427 &tune_attr_recoverd_secs.attr, 396 428 &tune_attr_logd_secs.attr, 397 - &tune_attr_quotad_secs.attr, 398 429 &tune_attr_quota_scale.attr, 399 430 &tune_attr_new_files_jdata.attr, 400 431 NULL, ··· 402 435 static struct attribute_group lockstruct_group = { 403 436 .name = "lockstruct", 404 437 .attrs = lockstruct_attrs, 405 - }; 406 - 407 - static struct attribute_group counters_group = { 408 - .name = "counters", 409 - .attrs = counters_attrs, 410 438 }; 411 439 412 440 static struct attribute_group args_group = { ··· 428 466 if (error) 429 467 goto fail_reg; 430 468 431 - error = sysfs_create_group(&sdp->sd_kobj, &counters_group); 432 - if (error) 433 - goto fail_lockstruct; 434 - 435 469 error = sysfs_create_group(&sdp->sd_kobj, &args_group); 436 470 if (error) 437 - goto fail_counters; 471 + goto fail_lockstruct; 438 472 439 473 error = sysfs_create_group(&sdp->sd_kobj, &tune_group); 440 474 if (error) ··· 441 483 442 484 fail_args: 443 485 sysfs_remove_group(&sdp->sd_kobj, &args_group); 444 - fail_counters: 445 - sysfs_remove_group(&sdp->sd_kobj, &counters_group); 446 486 fail_lockstruct: 447 487 sysfs_remove_group(&sdp->sd_kobj, &lockstruct_group); 448 488 fail_reg: ··· 454 498 { 455 499 sysfs_remove_group(&sdp->sd_kobj, &tune_group); 456 500 sysfs_remove_group(&sdp->sd_kobj, &args_group); 457 - sysfs_remove_group(&sdp->sd_kobj, &counters_group); 458 501 sysfs_remove_group(&sdp->sd_kobj, &lockstruct_group); 459 502 kobject_put(&sdp->sd_kobj); 460 503 } 461 504 505 + static int gfs2_uevent(struct kset *kset, struct kobject *kobj, 506 + struct kobj_uevent_env *env) 507 + { 508 + struct gfs2_sbd *sdp = container_of(kobj, struct gfs2_sbd, sd_kobj); 509 + add_uevent_var(env, "LOCKTABLE=%s", sdp->sd_table_name); 510 + add_uevent_var(env, "LOCKPROTO=%s", sdp->sd_proto_name); 511 + return 0; 512 + } 513 + 514 + static struct kset_uevent_ops gfs2_uevent_ops = { 515 + .uevent = gfs2_uevent, 516 + }; 517 + 518 + 462 519 int gfs2_sys_init(void) 463 520 { 464 - gfs2_sys_margs = NULL; 465 - spin_lock_init(&gfs2_sys_margs_lock); 466 - gfs2_kset = kset_create_and_add("gfs2", NULL, fs_kobj); 521 + gfs2_kset = kset_create_and_add("gfs2", &gfs2_uevent_ops, fs_kobj); 467 522 if (!gfs2_kset) 468 523 return -ENOMEM; 469 524 return 0; ··· 482 515 483 516 void gfs2_sys_uninit(void) 484 517 { 485 - kfree(gfs2_sys_margs); 486 518 kset_unregister(gfs2_kset); 487 519 } 488 520
-4
fs/gfs2/sys.h
··· 13 13 #include <linux/spinlock.h> 14 14 struct gfs2_sbd; 15 15 16 - /* Allow args to be passed to GFS2 when using an initial ram disk */ 17 - extern char *gfs2_sys_margs; 18 - extern spinlock_t gfs2_sys_margs_lock; 19 - 20 16 int gfs2_sys_fs_add(struct gfs2_sbd *sdp); 21 17 void gfs2_sys_fs_del(struct gfs2_sbd *sdp); 22 18
+1
fs/gfs2/util.c
··· 25 25 struct kmem_cache *gfs2_inode_cachep __read_mostly; 26 26 struct kmem_cache *gfs2_bufdata_cachep __read_mostly; 27 27 struct kmem_cache *gfs2_rgrpd_cachep __read_mostly; 28 + struct kmem_cache *gfs2_quotad_cachep __read_mostly; 28 29 29 30 void gfs2_assert_i(struct gfs2_sbd *sdp) 30 31 {
+1
fs/gfs2/util.h
··· 148 148 extern struct kmem_cache *gfs2_inode_cachep; 149 149 extern struct kmem_cache *gfs2_bufdata_cachep; 150 150 extern struct kmem_cache *gfs2_rgrpd_cachep; 151 + extern struct kmem_cache *gfs2_quotad_cachep; 151 152 152 153 static inline unsigned int gfs2_tune_get_i(struct gfs2_tune *gt, 153 154 unsigned int *p)
+34 -10
fs/ioctl.c
··· 231 231 #define blk_to_logical(inode, blk) (blk << (inode)->i_blkbits) 232 232 #define logical_to_blk(inode, offset) (offset >> (inode)->i_blkbits); 233 233 234 - /* 234 + /** 235 + * __generic_block_fiemap - FIEMAP for block based inodes (no locking) 235 236 * @inode - the inode to map 236 237 * @arg - the pointer to userspace where we copy everything to 237 238 * @get_block - the fs's get_block function ··· 243 242 * 244 243 * If it is possible to have data blocks beyond a hole past @inode->i_size, then 245 244 * please do not use this function, it will stop at the first unmapped block 246 - * beyond i_size 245 + * beyond i_size. 246 + * 247 + * If you use this function directly, you need to do your own locking. Use 248 + * generic_block_fiemap if you want the locking done for you. 247 249 */ 248 - int generic_block_fiemap(struct inode *inode, 249 - struct fiemap_extent_info *fieinfo, u64 start, 250 - u64 len, get_block_t *get_block) 250 + 251 + int __generic_block_fiemap(struct inode *inode, 252 + struct fiemap_extent_info *fieinfo, u64 start, 253 + u64 len, get_block_t *get_block) 251 254 { 252 255 struct buffer_head tmp; 253 256 unsigned int start_blk; ··· 264 259 return ret; 265 260 266 261 start_blk = logical_to_blk(inode, start); 267 - 268 - /* guard against change */ 269 - mutex_lock(&inode->i_mutex); 270 262 271 263 length = (long long)min_t(u64, len, i_size_read(inode)); 272 264 map_len = length; ··· 336 334 cond_resched(); 337 335 } while (1); 338 336 339 - mutex_unlock(&inode->i_mutex); 340 - 341 337 /* if ret is 1 then we just hit the end of the extent array */ 342 338 if (ret == 1) 343 339 ret = 0; 344 340 341 + return ret; 342 + } 343 + EXPORT_SYMBOL(__generic_block_fiemap); 344 + 345 + /** 346 + * generic_block_fiemap - FIEMAP for block based inodes 347 + * @inode: The inode to map 348 + * @fieinfo: The mapping information 349 + * @start: The initial block to map 350 + * @len: The length of the extect to attempt to map 351 + * @get_block: The block mapping function for the fs 352 + * 353 + * Calls __generic_block_fiemap to map the inode, after taking 354 + * the inode's mutex lock. 355 + */ 356 + 357 + int generic_block_fiemap(struct inode *inode, 358 + struct fiemap_extent_info *fieinfo, u64 start, 359 + u64 len, get_block_t *get_block) 360 + { 361 + int ret; 362 + mutex_lock(&inode->i_mutex); 363 + ret = __generic_block_fiemap(inode, fieinfo, start, len, get_block); 364 + mutex_unlock(&inode->i_mutex); 345 365 return ret; 346 366 } 347 367 EXPORT_SYMBOL(generic_block_fiemap);
+3
include/linux/fs.h
··· 2059 2059 2060 2060 extern int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd, 2061 2061 unsigned long arg); 2062 + extern int __generic_block_fiemap(struct inode *inode, 2063 + struct fiemap_extent_info *fieinfo, u64 start, 2064 + u64 len, get_block_t *get_block); 2062 2065 extern int generic_block_fiemap(struct inode *inode, 2063 2066 struct fiemap_extent_info *fieinfo, u64 start, 2064 2067 u64 len, get_block_t *get_block);