Merge git://git.kernel.org/pub/scm/linux/kernel/git/sfrench/cifs-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/sfrench/cifs-2.6:
cifs: make cifs_set_oplock_level() take a cifsInodeInfo pointer
cifs: dereferencing first then checking
cifs: trivial comment fix: tlink_tree is now a rbtree
[CIFS] Cleanup unused variable build warning
cifs: convert tlink_tree to a rbtree
cifs: store pointer to master tlink in superblock (try #2)
cifs: trivial doc fix: note setlease implemented
CIFS: Add cifs_set_oplock_level
FS: cifs, remove unneeded NULL tests

+152 -161
+1 -1
fs/cifs/TODO
··· 81 81 82 82 v) mount check for unmatched uids 83 83 84 - w) Add support for new vfs entry points for setlease and fallocate 84 + w) Add support for new vfs entry point for fallocate 85 85 86 86 x) Fix Samba 3 server to handle Linux kernel aio so dbench with lots of 87 87 processes can proceed better in parallel (on the server)
+3 -3
fs/cifs/cifs_fs_sb.h
··· 15 15 * the GNU Lesser General Public License for more details. 16 16 * 17 17 */ 18 - #include <linux/radix-tree.h> 18 + #include <linux/rbtree.h> 19 19 20 20 #ifndef _CIFS_FS_SB_H 21 21 #define _CIFS_FS_SB_H ··· 42 42 #define CIFS_MOUNT_MULTIUSER 0x20000 /* multiuser mount */ 43 43 44 44 struct cifs_sb_info { 45 - struct radix_tree_root tlink_tree; 46 - #define CIFS_TLINK_MASTER_TAG 0 /* is "master" (mount) tcon */ 45 + struct rb_root tlink_tree; 47 46 spinlock_t tlink_tree_lock; 47 + struct tcon_link *master_tlink; 48 48 struct nls_table *local_nls; 49 49 unsigned int rsize; 50 50 unsigned int wsize;
+2 -3
fs/cifs/cifsfs.c
··· 116 116 return -ENOMEM; 117 117 118 118 spin_lock_init(&cifs_sb->tlink_tree_lock); 119 - INIT_RADIX_TREE(&cifs_sb->tlink_tree, GFP_KERNEL); 119 + cifs_sb->tlink_tree = RB_ROOT; 120 120 121 121 rc = bdi_setup_and_register(&cifs_sb->bdi, "cifs", BDI_CAP_MAP_COPY); 122 122 if (rc) { ··· 321 321 /* Until the file is open and we have gotten oplock 322 322 info back from the server, can not assume caching of 323 323 file data or metadata */ 324 - cifs_inode->clientCanCacheRead = false; 325 - cifs_inode->clientCanCacheAll = false; 324 + cifs_set_oplock_level(cifs_inode, 0); 326 325 cifs_inode->delete_pending = false; 327 326 cifs_inode->invalid_mapping = false; 328 327 cifs_inode->vfs_inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
+2 -1
fs/cifs/cifsglob.h
··· 336 336 * "get" on the container. 337 337 */ 338 338 struct tcon_link { 339 - unsigned long tl_index; 339 + struct rb_node tl_rbnode; 340 + uid_t tl_uid; 340 341 unsigned long tl_flags; 341 342 #define TCON_LINK_MASTER 0 342 343 #define TCON_LINK_PENDING 1
+1
fs/cifs/cifsproto.h
··· 104 104 extern u64 cifs_UnixTimeToNT(struct timespec); 105 105 extern struct timespec cnvrtDosUnixTm(__le16 le_date, __le16 le_time, 106 106 int offset); 107 + extern void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock); 107 108 108 109 extern struct cifsFileInfo *cifs_new_fileinfo(__u16 fileHandle, 109 110 struct file *file, struct tcon_link *tlink,
+99 -96
fs/cifs/connect.c
··· 116 116 117 117 static int ipv4_connect(struct TCP_Server_Info *server); 118 118 static int ipv6_connect(struct TCP_Server_Info *server); 119 + static void tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink); 119 120 static void cifs_prune_tlinks(struct work_struct *work); 120 121 121 122 /* ··· 2901 2900 goto mount_fail_check; 2902 2901 } 2903 2902 2904 - tlink->tl_index = pSesInfo->linux_uid; 2903 + tlink->tl_uid = pSesInfo->linux_uid; 2905 2904 tlink->tl_tcon = tcon; 2906 2905 tlink->tl_time = jiffies; 2907 2906 set_bit(TCON_LINK_MASTER, &tlink->tl_flags); 2908 2907 set_bit(TCON_LINK_IN_TREE, &tlink->tl_flags); 2909 2908 2910 - rc = radix_tree_preload(GFP_KERNEL); 2911 - if (rc == -ENOMEM) { 2912 - kfree(tlink); 2913 - goto mount_fail_check; 2914 - } 2915 - 2909 + cifs_sb->master_tlink = tlink; 2916 2910 spin_lock(&cifs_sb->tlink_tree_lock); 2917 - radix_tree_insert(&cifs_sb->tlink_tree, pSesInfo->linux_uid, tlink); 2918 - radix_tree_tag_set(&cifs_sb->tlink_tree, pSesInfo->linux_uid, 2919 - CIFS_TLINK_MASTER_TAG); 2911 + tlink_rb_insert(&cifs_sb->tlink_tree, tlink); 2920 2912 spin_unlock(&cifs_sb->tlink_tree_lock); 2921 - radix_tree_preload_end(); 2922 2913 2923 2914 queue_delayed_work(system_nrt_wq, &cifs_sb->prune_tlinks, 2924 2915 TLINK_IDLE_EXPIRE); ··· 3100 3107 int 3101 3108 cifs_umount(struct super_block *sb, struct cifs_sb_info *cifs_sb) 3102 3109 { 3103 - int i, ret; 3110 + struct rb_root *root = &cifs_sb->tlink_tree; 3111 + struct rb_node *node; 3112 + struct tcon_link *tlink; 3104 3113 char *tmp; 3105 - struct tcon_link *tlink[8]; 3106 - unsigned long index = 0; 3107 3114 3108 3115 cancel_delayed_work_sync(&cifs_sb->prune_tlinks); 3109 3116 3110 - do { 3111 - spin_lock(&cifs_sb->tlink_tree_lock); 3112 - ret = radix_tree_gang_lookup(&cifs_sb->tlink_tree, 3113 - (void **)tlink, index, 3114 - ARRAY_SIZE(tlink)); 3115 - /* increment index for next pass */ 3116 - if (ret > 0) 3117 - index = tlink[ret - 1]->tl_index + 1; 3118 - for (i = 0; i < ret; i++) { 3119 - cifs_get_tlink(tlink[i]); 3120 - clear_bit(TCON_LINK_IN_TREE, &tlink[i]->tl_flags); 3121 - radix_tree_delete(&cifs_sb->tlink_tree, 3122 - tlink[i]->tl_index); 3123 - } 3124 - spin_unlock(&cifs_sb->tlink_tree_lock); 3117 + spin_lock(&cifs_sb->tlink_tree_lock); 3118 + while ((node = rb_first(root))) { 3119 + tlink = rb_entry(node, struct tcon_link, tl_rbnode); 3120 + cifs_get_tlink(tlink); 3121 + clear_bit(TCON_LINK_IN_TREE, &tlink->tl_flags); 3122 + rb_erase(node, root); 3125 3123 3126 - for (i = 0; i < ret; i++) 3127 - cifs_put_tlink(tlink[i]); 3128 - } while (ret != 0); 3124 + spin_unlock(&cifs_sb->tlink_tree_lock); 3125 + cifs_put_tlink(tlink); 3126 + spin_lock(&cifs_sb->tlink_tree_lock); 3127 + } 3128 + spin_unlock(&cifs_sb->tlink_tree_lock); 3129 3129 3130 3130 tmp = cifs_sb->prepath; 3131 3131 cifs_sb->prepathlen = 0; ··· 3257 3271 return tcon; 3258 3272 } 3259 3273 3260 - static struct tcon_link * 3274 + static inline struct tcon_link * 3261 3275 cifs_sb_master_tlink(struct cifs_sb_info *cifs_sb) 3262 3276 { 3263 - struct tcon_link *tlink; 3264 - unsigned int ret; 3265 - 3266 - spin_lock(&cifs_sb->tlink_tree_lock); 3267 - ret = radix_tree_gang_lookup_tag(&cifs_sb->tlink_tree, (void **)&tlink, 3268 - 0, 1, CIFS_TLINK_MASTER_TAG); 3269 - spin_unlock(&cifs_sb->tlink_tree_lock); 3270 - 3271 - /* the master tcon should always be present */ 3272 - if (ret == 0) 3273 - BUG(); 3274 - 3275 - return tlink; 3277 + return cifs_sb->master_tlink; 3276 3278 } 3277 3279 3278 3280 struct cifsTconInfo * ··· 3276 3302 return signal_pending(current) ? -ERESTARTSYS : 0; 3277 3303 } 3278 3304 3305 + /* find and return a tlink with given uid */ 3306 + static struct tcon_link * 3307 + tlink_rb_search(struct rb_root *root, uid_t uid) 3308 + { 3309 + struct rb_node *node = root->rb_node; 3310 + struct tcon_link *tlink; 3311 + 3312 + while (node) { 3313 + tlink = rb_entry(node, struct tcon_link, tl_rbnode); 3314 + 3315 + if (tlink->tl_uid > uid) 3316 + node = node->rb_left; 3317 + else if (tlink->tl_uid < uid) 3318 + node = node->rb_right; 3319 + else 3320 + return tlink; 3321 + } 3322 + return NULL; 3323 + } 3324 + 3325 + /* insert a tcon_link into the tree */ 3326 + static void 3327 + tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink) 3328 + { 3329 + struct rb_node **new = &(root->rb_node), *parent = NULL; 3330 + struct tcon_link *tlink; 3331 + 3332 + while (*new) { 3333 + tlink = rb_entry(*new, struct tcon_link, tl_rbnode); 3334 + parent = *new; 3335 + 3336 + if (tlink->tl_uid > new_tlink->tl_uid) 3337 + new = &((*new)->rb_left); 3338 + else 3339 + new = &((*new)->rb_right); 3340 + } 3341 + 3342 + rb_link_node(&new_tlink->tl_rbnode, parent, new); 3343 + rb_insert_color(&new_tlink->tl_rbnode, root); 3344 + } 3345 + 3279 3346 /* 3280 3347 * Find or construct an appropriate tcon given a cifs_sb and the fsuid of the 3281 3348 * current task. ··· 3324 3309 * If the superblock doesn't refer to a multiuser mount, then just return 3325 3310 * the master tcon for the mount. 3326 3311 * 3327 - * First, search the radix tree for an existing tcon for this fsuid. If one 3312 + * First, search the rbtree for an existing tcon for this fsuid. If one 3328 3313 * exists, then check to see if it's pending construction. If it is then wait 3329 3314 * for construction to complete. Once it's no longer pending, check to see if 3330 3315 * it failed and either return an error or retry construction, depending on ··· 3337 3322 cifs_sb_tlink(struct cifs_sb_info *cifs_sb) 3338 3323 { 3339 3324 int ret; 3340 - unsigned long fsuid = (unsigned long) current_fsuid(); 3325 + uid_t fsuid = current_fsuid(); 3341 3326 struct tcon_link *tlink, *newtlink; 3342 3327 3343 3328 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)) 3344 3329 return cifs_get_tlink(cifs_sb_master_tlink(cifs_sb)); 3345 3330 3346 3331 spin_lock(&cifs_sb->tlink_tree_lock); 3347 - tlink = radix_tree_lookup(&cifs_sb->tlink_tree, fsuid); 3332 + tlink = tlink_rb_search(&cifs_sb->tlink_tree, fsuid); 3348 3333 if (tlink) 3349 3334 cifs_get_tlink(tlink); 3350 3335 spin_unlock(&cifs_sb->tlink_tree_lock); ··· 3353 3338 newtlink = kzalloc(sizeof(*tlink), GFP_KERNEL); 3354 3339 if (newtlink == NULL) 3355 3340 return ERR_PTR(-ENOMEM); 3356 - newtlink->tl_index = fsuid; 3341 + newtlink->tl_uid = fsuid; 3357 3342 newtlink->tl_tcon = ERR_PTR(-EACCES); 3358 3343 set_bit(TCON_LINK_PENDING, &newtlink->tl_flags); 3359 3344 set_bit(TCON_LINK_IN_TREE, &newtlink->tl_flags); 3360 3345 cifs_get_tlink(newtlink); 3361 3346 3362 - ret = radix_tree_preload(GFP_KERNEL); 3363 - if (ret != 0) { 3364 - kfree(newtlink); 3365 - return ERR_PTR(ret); 3366 - } 3367 - 3368 3347 spin_lock(&cifs_sb->tlink_tree_lock); 3369 3348 /* was one inserted after previous search? */ 3370 - tlink = radix_tree_lookup(&cifs_sb->tlink_tree, fsuid); 3349 + tlink = tlink_rb_search(&cifs_sb->tlink_tree, fsuid); 3371 3350 if (tlink) { 3372 3351 cifs_get_tlink(tlink); 3373 3352 spin_unlock(&cifs_sb->tlink_tree_lock); 3374 - radix_tree_preload_end(); 3375 3353 kfree(newtlink); 3376 3354 goto wait_for_construction; 3377 3355 } 3378 - ret = radix_tree_insert(&cifs_sb->tlink_tree, fsuid, newtlink); 3379 - spin_unlock(&cifs_sb->tlink_tree_lock); 3380 - radix_tree_preload_end(); 3381 - if (ret) { 3382 - kfree(newtlink); 3383 - return ERR_PTR(ret); 3384 - } 3385 3356 tlink = newtlink; 3357 + tlink_rb_insert(&cifs_sb->tlink_tree, tlink); 3358 + spin_unlock(&cifs_sb->tlink_tree_lock); 3386 3359 } else { 3387 3360 wait_for_construction: 3388 3361 ret = wait_on_bit(&tlink->tl_flags, TCON_LINK_PENDING, ··· 3416 3413 { 3417 3414 struct cifs_sb_info *cifs_sb = container_of(work, struct cifs_sb_info, 3418 3415 prune_tlinks.work); 3419 - struct tcon_link *tlink[8]; 3420 - unsigned long now = jiffies; 3421 - unsigned long index = 0; 3422 - int i, ret; 3416 + struct rb_root *root = &cifs_sb->tlink_tree; 3417 + struct rb_node *node = rb_first(root); 3418 + struct rb_node *tmp; 3419 + struct tcon_link *tlink; 3423 3420 3424 - do { 3425 - spin_lock(&cifs_sb->tlink_tree_lock); 3426 - ret = radix_tree_gang_lookup(&cifs_sb->tlink_tree, 3427 - (void **)tlink, index, 3428 - ARRAY_SIZE(tlink)); 3429 - /* increment index for next pass */ 3430 - if (ret > 0) 3431 - index = tlink[ret - 1]->tl_index + 1; 3432 - for (i = 0; i < ret; i++) { 3433 - if (test_bit(TCON_LINK_MASTER, &tlink[i]->tl_flags) || 3434 - atomic_read(&tlink[i]->tl_count) != 0 || 3435 - time_after(tlink[i]->tl_time + TLINK_IDLE_EXPIRE, 3436 - now)) { 3437 - tlink[i] = NULL; 3438 - continue; 3439 - } 3440 - cifs_get_tlink(tlink[i]); 3441 - clear_bit(TCON_LINK_IN_TREE, &tlink[i]->tl_flags); 3442 - radix_tree_delete(&cifs_sb->tlink_tree, 3443 - tlink[i]->tl_index); 3444 - } 3421 + /* 3422 + * Because we drop the spinlock in the loop in order to put the tlink 3423 + * it's not guarded against removal of links from the tree. The only 3424 + * places that remove entries from the tree are this function and 3425 + * umounts. Because this function is non-reentrant and is canceled 3426 + * before umount can proceed, this is safe. 3427 + */ 3428 + spin_lock(&cifs_sb->tlink_tree_lock); 3429 + node = rb_first(root); 3430 + while (node != NULL) { 3431 + tmp = node; 3432 + node = rb_next(tmp); 3433 + tlink = rb_entry(tmp, struct tcon_link, tl_rbnode); 3434 + 3435 + if (test_bit(TCON_LINK_MASTER, &tlink->tl_flags) || 3436 + atomic_read(&tlink->tl_count) != 0 || 3437 + time_after(tlink->tl_time + TLINK_IDLE_EXPIRE, jiffies)) 3438 + continue; 3439 + 3440 + cifs_get_tlink(tlink); 3441 + clear_bit(TCON_LINK_IN_TREE, &tlink->tl_flags); 3442 + rb_erase(tmp, root); 3443 + 3445 3444 spin_unlock(&cifs_sb->tlink_tree_lock); 3446 - 3447 - for (i = 0; i < ret; i++) { 3448 - if (tlink[i] != NULL) 3449 - cifs_put_tlink(tlink[i]); 3450 - } 3451 - } while (ret != 0); 3445 + cifs_put_tlink(tlink); 3446 + spin_lock(&cifs_sb->tlink_tree_lock); 3447 + } 3448 + spin_unlock(&cifs_sb->tlink_tree_lock); 3452 3449 3453 3450 queue_delayed_work(system_nrt_wq, &cifs_sb->prune_tlinks, 3454 3451 TLINK_IDLE_EXPIRE);
+22 -50
fs/cifs/file.c
··· 146 146 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb, 147 147 xid, NULL); 148 148 149 - if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) { 150 - pCifsInode->clientCanCacheAll = true; 151 - pCifsInode->clientCanCacheRead = true; 152 - cFYI(1, "Exclusive Oplock granted on inode %p", inode); 153 - } else if ((oplock & 0xF) == OPLOCK_READ) 154 - pCifsInode->clientCanCacheRead = true; 149 + cifs_set_oplock_level(pCifsInode, oplock); 155 150 156 151 return rc; 157 152 } ··· 248 253 list_add_tail(&pCifsFile->flist, &pCifsInode->openFileList); 249 254 spin_unlock(&cifs_file_list_lock); 250 255 251 - if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) { 252 - pCifsInode->clientCanCacheAll = true; 253 - pCifsInode->clientCanCacheRead = true; 254 - cFYI(1, "Exclusive Oplock inode %p", inode); 255 - } else if ((oplock & 0xF) == OPLOCK_READ) 256 - pCifsInode->clientCanCacheRead = true; 256 + cifs_set_oplock_level(pCifsInode, oplock); 257 257 258 258 file->private_data = pCifsFile; 259 259 return pCifsFile; ··· 261 271 */ 262 272 void cifsFileInfo_put(struct cifsFileInfo *cifs_file) 263 273 { 274 + struct inode *inode = cifs_file->dentry->d_inode; 264 275 struct cifsTconInfo *tcon = tlink_tcon(cifs_file->tlink); 265 - struct cifsInodeInfo *cifsi = CIFS_I(cifs_file->dentry->d_inode); 276 + struct cifsInodeInfo *cifsi = CIFS_I(inode); 266 277 struct cifsLockInfo *li, *tmp; 267 278 268 279 spin_lock(&cifs_file_list_lock); ··· 279 288 if (list_empty(&cifsi->openFileList)) { 280 289 cFYI(1, "closing last open instance for inode %p", 281 290 cifs_file->dentry->d_inode); 282 - cifsi->clientCanCacheRead = false; 283 - cifsi->clientCanCacheAll = false; 291 + cifs_set_oplock_level(cifsi, 0); 284 292 } 285 293 spin_unlock(&cifs_file_list_lock); 286 294 ··· 597 607 rc = filemap_write_and_wait(inode->i_mapping); 598 608 mapping_set_error(inode->i_mapping, rc); 599 609 600 - pCifsInode->clientCanCacheAll = false; 601 - pCifsInode->clientCanCacheRead = false; 602 610 if (tcon->unix_ext) 603 611 rc = cifs_get_inode_info_unix(&inode, 604 612 full_path, inode->i_sb, xid); ··· 610 622 invalidate the current end of file on the server 611 623 we can not go to the server to get the new inod 612 624 info */ 613 - if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) { 614 - pCifsInode->clientCanCacheAll = true; 615 - pCifsInode->clientCanCacheRead = true; 616 - cFYI(1, "Exclusive Oplock granted on inode %p", 617 - pCifsFile->dentry->d_inode); 618 - } else if ((oplock & 0xF) == OPLOCK_READ) { 619 - pCifsInode->clientCanCacheRead = true; 620 - pCifsInode->clientCanCacheAll = false; 621 - } else { 622 - pCifsInode->clientCanCacheRead = false; 623 - pCifsInode->clientCanCacheAll = false; 624 - } 625 + 626 + cifs_set_oplock_level(pCifsInode, oplock); 627 + 625 628 cifs_relock_file(pCifsFile); 626 629 627 630 reopen_error_exit: ··· 754 775 755 776 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); 756 777 tcon = tlink_tcon(((struct cifsFileInfo *)file->private_data)->tlink); 757 - 758 - if (file->private_data == NULL) { 759 - rc = -EBADF; 760 - FreeXid(xid); 761 - return rc; 762 - } 763 778 netfid = ((struct cifsFileInfo *)file->private_data)->netfid; 764 779 765 780 if ((tcon->ses->capabilities & CAP_UNIX) && ··· 929 956 ssize_t cifs_user_write(struct file *file, const char __user *write_data, 930 957 size_t write_size, loff_t *poffset) 931 958 { 959 + struct inode *inode = file->f_path.dentry->d_inode; 932 960 int rc = 0; 933 961 unsigned int bytes_written = 0; 934 962 unsigned int total_written; ··· 937 963 struct cifsTconInfo *pTcon; 938 964 int xid, long_op; 939 965 struct cifsFileInfo *open_file; 940 - struct cifsInodeInfo *cifsi = CIFS_I(file->f_path.dentry->d_inode); 966 + struct cifsInodeInfo *cifsi = CIFS_I(inode); 941 967 942 968 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); 943 969 ··· 1003 1029 1004 1030 cifs_stats_bytes_written(pTcon, total_written); 1005 1031 1006 - /* since the write may have blocked check these pointers again */ 1007 - if ((file->f_path.dentry) && (file->f_path.dentry->d_inode)) { 1008 - struct inode *inode = file->f_path.dentry->d_inode; 1009 1032 /* Do not update local mtime - server will set its actual value on write 1010 - * inode->i_ctime = inode->i_mtime = 1011 - * current_fs_time(inode->i_sb);*/ 1012 - if (total_written > 0) { 1013 - spin_lock(&inode->i_lock); 1014 - if (*poffset > file->f_path.dentry->d_inode->i_size) 1015 - i_size_write(file->f_path.dentry->d_inode, 1016 - *poffset); 1017 - spin_unlock(&inode->i_lock); 1018 - } 1019 - mark_inode_dirty_sync(file->f_path.dentry->d_inode); 1033 + * inode->i_ctime = inode->i_mtime = 1034 + * current_fs_time(inode->i_sb);*/ 1035 + if (total_written > 0) { 1036 + spin_lock(&inode->i_lock); 1037 + if (*poffset > inode->i_size) 1038 + i_size_write(inode, *poffset); 1039 + spin_unlock(&inode->i_lock); 1020 1040 } 1041 + mark_inode_dirty_sync(inode); 1042 + 1021 1043 FreeXid(xid); 1022 1044 return total_written; 1023 1045 } ··· 1148 1178 bool fsuid_only) 1149 1179 { 1150 1180 struct cifsFileInfo *open_file; 1151 - struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb); 1181 + struct cifs_sb_info *cifs_sb; 1152 1182 bool any_available = false; 1153 1183 int rc; 1154 1184 ··· 1161 1191 dump_stack(); 1162 1192 return NULL; 1163 1193 } 1194 + 1195 + cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb); 1164 1196 1165 1197 /* only filter by fsuid on multiuser mounts */ 1166 1198 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
-4
fs/cifs/ioctl.c
··· 63 63 #ifdef CONFIG_CIFS_POSIX 64 64 case FS_IOC_GETFLAGS: 65 65 if (CIFS_UNIX_EXTATTR_CAP & caps) { 66 - if (pSMBFile == NULL) 67 - break; 68 66 rc = CIFSGetExtAttr(xid, tcon, pSMBFile->netfid, 69 67 &ExtAttrBits, &ExtAttrMask); 70 68 if (rc == 0) ··· 78 80 rc = -EFAULT; 79 81 break; 80 82 } 81 - if (pSMBFile == NULL) 82 - break; 83 83 /* rc= CIFSGetExtAttr(xid,tcon,pSMBFile->netfid, 84 84 extAttrBits, &ExtAttrMask);*/ 85 85 }
+22 -3
fs/cifs/misc.c
··· 569 569 570 570 cFYI(1, "file id match, oplock break"); 571 571 pCifsInode = CIFS_I(netfile->dentry->d_inode); 572 - pCifsInode->clientCanCacheAll = false; 573 - if (pSMB->OplockLevel == 0) 574 - pCifsInode->clientCanCacheRead = false; 575 572 573 + cifs_set_oplock_level(pCifsInode, 574 + pSMB->OplockLevel); 576 575 /* 577 576 * cifs_oplock_break_put() can't be called 578 577 * from here. Get reference after queueing ··· 719 720 "mount. Consider mounting with the \"noserverino\" " 720 721 "option to silence this message.", 721 722 cifs_sb_master_tcon(cifs_sb)->treeName); 723 + } 724 + } 725 + 726 + void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock) 727 + { 728 + oplock &= 0xF; 729 + 730 + if (oplock == OPLOCK_EXCLUSIVE) { 731 + cinode->clientCanCacheAll = true; 732 + cinode->clientCanCacheRead = true; 733 + cFYI(1, "Exclusive Oplock granted on inode %p", 734 + &cinode->vfs_inode); 735 + } else if (oplock == OPLOCK_READ) { 736 + cinode->clientCanCacheAll = false; 737 + cinode->clientCanCacheRead = true; 738 + cFYI(1, "Level II Oplock granted on inode %p", 739 + &cinode->vfs_inode); 740 + } else { 741 + cinode->clientCanCacheAll = false; 742 + cinode->clientCanCacheRead = false; 722 743 } 723 744 }