+21
-2
fs/9p/v9fs_vfs.h
+21
-2
fs/9p/v9fs_vfs.h
···
40
40
*/
41
41
#define P9_LOCK_TIMEOUT (30*HZ)
42
42
43
+
/* flags for v9fs_stat2inode() & v9fs_stat2inode_dotl() */
44
+
#define V9FS_STAT2INODE_KEEP_ISIZE 1
45
+
43
46
extern struct file_system_type v9fs_fs_type;
44
47
extern const struct address_space_operations v9fs_addr_operations;
45
48
extern const struct file_operations v9fs_file_operations;
···
64
61
struct inode *inode, umode_t mode, dev_t);
65
62
void v9fs_evict_inode(struct inode *inode);
66
63
ino_t v9fs_qid2ino(struct p9_qid *qid);
67
-
void v9fs_stat2inode(struct p9_wstat *, struct inode *, struct super_block *);
68
-
void v9fs_stat2inode_dotl(struct p9_stat_dotl *, struct inode *);
64
+
void v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode,
65
+
struct super_block *sb, unsigned int flags);
66
+
void v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode,
67
+
unsigned int flags);
69
68
int v9fs_dir_release(struct inode *inode, struct file *filp);
70
69
int v9fs_file_open(struct inode *inode, struct file *file);
71
70
void v9fs_inode2stat(struct inode *inode, struct p9_wstat *stat);
···
88
83
}
89
84
90
85
int v9fs_open_to_dotl_flags(int flags);
86
+
87
+
static inline void v9fs_i_size_write(struct inode *inode, loff_t i_size)
88
+
{
89
+
/*
90
+
* 32-bit need the lock, concurrent updates could break the
91
+
* sequences and make i_size_read() loop forever.
92
+
* 64-bit updates are atomic and can skip the locking.
93
+
*/
94
+
if (sizeof(i_size) > sizeof(long))
95
+
spin_lock(&inode->i_lock);
96
+
i_size_write(inode, i_size);
97
+
if (sizeof(i_size) > sizeof(long))
98
+
spin_unlock(&inode->i_lock);
99
+
}
91
100
#endif
+5
-1
fs/9p/vfs_file.c
+5
-1
fs/9p/vfs_file.c
···
446
446
i_size = i_size_read(inode);
447
447
if (iocb->ki_pos > i_size) {
448
448
inode_add_bytes(inode, iocb->ki_pos - i_size);
449
-
i_size_write(inode, iocb->ki_pos);
449
+
/*
450
+
* Need to serialize against i_size_write() in
451
+
* v9fs_stat2inode()
452
+
*/
453
+
v9fs_i_size_write(inode, iocb->ki_pos);
450
454
}
451
455
return retval;
452
456
}
+11
-12
fs/9p/vfs_inode.c
+11
-12
fs/9p/vfs_inode.c
···
538
538
if (retval)
539
539
goto error;
540
540
541
-
v9fs_stat2inode(st, inode, sb);
541
+
v9fs_stat2inode(st, inode, sb, 0);
542
542
v9fs_cache_inode_get_cookie(inode);
543
543
unlock_new_inode(inode);
544
544
return inode;
···
1092
1092
if (IS_ERR(st))
1093
1093
return PTR_ERR(st);
1094
1094
1095
-
v9fs_stat2inode(st, d_inode(dentry), dentry->d_sb);
1095
+
v9fs_stat2inode(st, d_inode(dentry), dentry->d_sb, 0);
1096
1096
generic_fillattr(d_inode(dentry), stat);
1097
1097
1098
1098
p9stat_free(st);
···
1170
1170
* @stat: Plan 9 metadata (mistat) structure
1171
1171
* @inode: inode to populate
1172
1172
* @sb: superblock of filesystem
1173
+
* @flags: control flags (e.g. V9FS_STAT2INODE_KEEP_ISIZE)
1173
1174
*
1174
1175
*/
1175
1176
1176
1177
void
1177
1178
v9fs_stat2inode(struct p9_wstat *stat, struct inode *inode,
1178
-
struct super_block *sb)
1179
+
struct super_block *sb, unsigned int flags)
1179
1180
{
1180
1181
umode_t mode;
1181
1182
char ext[32];
···
1217
1216
mode = p9mode2perm(v9ses, stat);
1218
1217
mode |= inode->i_mode & ~S_IALLUGO;
1219
1218
inode->i_mode = mode;
1220
-
i_size_write(inode, stat->length);
1221
1219
1220
+
if (!(flags & V9FS_STAT2INODE_KEEP_ISIZE))
1221
+
v9fs_i_size_write(inode, stat->length);
1222
1222
/* not real number of blocks, but 512 byte ones ... */
1223
-
inode->i_blocks = (i_size_read(inode) + 512 - 1) >> 9;
1223
+
inode->i_blocks = (stat->length + 512 - 1) >> 9;
1224
1224
v9inode->cache_validity &= ~V9FS_INO_INVALID_ATTR;
1225
1225
}
1226
1226
···
1418
1416
{
1419
1417
int umode;
1420
1418
dev_t rdev;
1421
-
loff_t i_size;
1422
1419
struct p9_wstat *st;
1423
1420
struct v9fs_session_info *v9ses;
1421
+
unsigned int flags;
1424
1422
1425
1423
v9ses = v9fs_inode2v9ses(inode);
1426
1424
st = p9_client_stat(fid);
···
1433
1431
if ((inode->i_mode & S_IFMT) != (umode & S_IFMT))
1434
1432
goto out;
1435
1433
1436
-
spin_lock(&inode->i_lock);
1437
1434
/*
1438
1435
* We don't want to refresh inode->i_size,
1439
1436
* because we may have cached data
1440
1437
*/
1441
-
i_size = inode->i_size;
1442
-
v9fs_stat2inode(st, inode, inode->i_sb);
1443
-
if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
1444
-
inode->i_size = i_size;
1445
-
spin_unlock(&inode->i_lock);
1438
+
flags = (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) ?
1439
+
V9FS_STAT2INODE_KEEP_ISIZE : 0;
1440
+
v9fs_stat2inode(st, inode, inode->i_sb, flags);
1446
1441
out:
1447
1442
p9stat_free(st);
1448
1443
kfree(st);
+14
-13
fs/9p/vfs_inode_dotl.c
+14
-13
fs/9p/vfs_inode_dotl.c
···
143
143
if (retval)
144
144
goto error;
145
145
146
-
v9fs_stat2inode_dotl(st, inode);
146
+
v9fs_stat2inode_dotl(st, inode, 0);
147
147
v9fs_cache_inode_get_cookie(inode);
148
148
retval = v9fs_get_acl(inode, fid);
149
149
if (retval)
···
496
496
if (IS_ERR(st))
497
497
return PTR_ERR(st);
498
498
499
-
v9fs_stat2inode_dotl(st, d_inode(dentry));
499
+
v9fs_stat2inode_dotl(st, d_inode(dentry), 0);
500
500
generic_fillattr(d_inode(dentry), stat);
501
501
/* Change block size to what the server returned */
502
502
stat->blksize = st->st_blksize;
···
607
607
* v9fs_stat2inode_dotl - populate an inode structure with stat info
608
608
* @stat: stat structure
609
609
* @inode: inode to populate
610
+
* @flags: ctrl flags (e.g. V9FS_STAT2INODE_KEEP_ISIZE)
610
611
*
611
612
*/
612
613
613
614
void
614
-
v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode)
615
+
v9fs_stat2inode_dotl(struct p9_stat_dotl *stat, struct inode *inode,
616
+
unsigned int flags)
615
617
{
616
618
umode_t mode;
617
619
struct v9fs_inode *v9inode = V9FS_I(inode);
···
633
631
mode |= inode->i_mode & ~S_IALLUGO;
634
632
inode->i_mode = mode;
635
633
636
-
i_size_write(inode, stat->st_size);
634
+
if (!(flags & V9FS_STAT2INODE_KEEP_ISIZE))
635
+
v9fs_i_size_write(inode, stat->st_size);
637
636
inode->i_blocks = stat->st_blocks;
638
637
} else {
639
638
if (stat->st_result_mask & P9_STATS_ATIME) {
···
664
661
}
665
662
if (stat->st_result_mask & P9_STATS_RDEV)
666
663
inode->i_rdev = new_decode_dev(stat->st_rdev);
667
-
if (stat->st_result_mask & P9_STATS_SIZE)
668
-
i_size_write(inode, stat->st_size);
664
+
if (!(flags & V9FS_STAT2INODE_KEEP_ISIZE) &&
665
+
stat->st_result_mask & P9_STATS_SIZE)
666
+
v9fs_i_size_write(inode, stat->st_size);
669
667
if (stat->st_result_mask & P9_STATS_BLOCKS)
670
668
inode->i_blocks = stat->st_blocks;
671
669
}
···
932
928
933
929
int v9fs_refresh_inode_dotl(struct p9_fid *fid, struct inode *inode)
934
930
{
935
-
loff_t i_size;
936
931
struct p9_stat_dotl *st;
937
932
struct v9fs_session_info *v9ses;
933
+
unsigned int flags;
938
934
939
935
v9ses = v9fs_inode2v9ses(inode);
940
936
st = p9_client_getattr_dotl(fid, P9_STATS_ALL);
···
946
942
if ((inode->i_mode & S_IFMT) != (st->st_mode & S_IFMT))
947
943
goto out;
948
944
949
-
spin_lock(&inode->i_lock);
950
945
/*
951
946
* We don't want to refresh inode->i_size,
952
947
* because we may have cached data
953
948
*/
954
-
i_size = inode->i_size;
955
-
v9fs_stat2inode_dotl(st, inode);
956
-
if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE)
957
-
inode->i_size = i_size;
958
-
spin_unlock(&inode->i_lock);
949
+
flags = (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) ?
950
+
V9FS_STAT2INODE_KEEP_ISIZE : 0;
951
+
v9fs_stat2inode_dotl(st, inode, flags);
959
952
out:
960
953
kfree(st);
961
954
return 0;
+2
-2
fs/9p/vfs_super.c
+2
-2
fs/9p/vfs_super.c
···
172
172
goto release_sb;
173
173
}
174
174
d_inode(root)->i_ino = v9fs_qid2ino(&st->qid);
175
-
v9fs_stat2inode_dotl(st, d_inode(root));
175
+
v9fs_stat2inode_dotl(st, d_inode(root), 0);
176
176
kfree(st);
177
177
} else {
178
178
struct p9_wstat *st = NULL;
···
183
183
}
184
184
185
185
d_inode(root)->i_ino = v9fs_qid2ino(&st->qid);
186
-
v9fs_stat2inode(st, d_inode(root), sb);
186
+
v9fs_stat2inode(st, d_inode(root), sb, 0);
187
187
188
188
p9stat_free(st);
189
189
kfree(st);
+1
-1
net/9p/client.c
+1
-1
net/9p/client.c
+1
-1
net/9p/trans_xen.c
+1
-1
net/9p/trans_xen.c
···
513
513
case XenbusStateClosed:
514
514
if (dev->state == XenbusStateClosed)
515
515
break;
516
-
/* Missed the backend's CLOSING state -- fallthrough */
516
+
/* fall through - Missed the backend's CLOSING state */
517
517
case XenbusStateClosing:
518
518
xenbus_frontend_closed(dev);
519
519
break;