Merge tag '5.20-rc-smb3-client-fixes-part2' of git://git.samba.org/sfrench/cifs-2.6

Pull more cifs updates from Steve French:

- two fixes for stable, one for a lock length miscalculation, and
another fixes a lease break timeout bug

- improvement to handle leases, allows the close timeout to be
configured more safely

- five restructuring/cleanup patches

* tag '5.20-rc-smb3-client-fixes-part2' of git://git.samba.org/sfrench/cifs-2.6:
cifs: Do not access tcon->cfids->cfid directly from is_path_accessible
cifs: Add constructor/destructors for tcon->cfid
SMB3: fix lease break timeout when multiple deferred close handles for the same file.
smb3: allow deferred close timeout to be configurable
cifs: Do not use tcon->cfid directly, use the cfid we get from open_cached_dir
cifs: Move cached-dir functions into a separate file
cifs: Remove {cifs,nfs}_fscache_release_page()
cifs: fix lock length calculation

+1 -1
fs/cifs/Makefile
··· 7 7 8 8 cifs-y := trace.o cifsfs.o cifs_debug.o connect.o dir.o file.o \ 9 9 inode.o link.o misc.o netmisc.o smbencrypt.o transport.o \ 10 - cifs_unicode.o nterr.o cifsencrypt.o \ 10 + cached_dir.o cifs_unicode.o nterr.o cifsencrypt.o \ 11 11 readdir.o ioctl.o sess.o export.o unc.o winucase.o \ 12 12 smb2ops.o smb2maperror.o smb2transport.o \ 13 13 smb2misc.o smb2pdu.o smb2inode.o smb2file.o cifsacl.o fs_context.o \
+388
fs/cifs/cached_dir.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* 3 + * Functions to handle the cached directory entries 4 + * 5 + * Copyright (c) 2022, Ronnie Sahlberg <lsahlber@redhat.com> 6 + */ 7 + 8 + #include "cifsglob.h" 9 + #include "cifsproto.h" 10 + #include "cifs_debug.h" 11 + #include "smb2proto.h" 12 + #include "cached_dir.h" 13 + 14 + /* 15 + * Open the and cache a directory handle. 16 + * If error then *cfid is not initialized. 17 + */ 18 + int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon, 19 + const char *path, 20 + struct cifs_sb_info *cifs_sb, 21 + bool lookup_only, struct cached_fid **ret_cfid) 22 + { 23 + struct cifs_ses *ses; 24 + struct TCP_Server_Info *server; 25 + struct cifs_open_parms oparms; 26 + struct smb2_create_rsp *o_rsp = NULL; 27 + struct smb2_query_info_rsp *qi_rsp = NULL; 28 + int resp_buftype[2]; 29 + struct smb_rqst rqst[2]; 30 + struct kvec rsp_iov[2]; 31 + struct kvec open_iov[SMB2_CREATE_IOV_SIZE]; 32 + struct kvec qi_iov[1]; 33 + int rc, flags = 0; 34 + __le16 utf16_path = 0; /* Null - since an open of top of share */ 35 + u8 oplock = SMB2_OPLOCK_LEVEL_II; 36 + struct cifs_fid *pfid; 37 + struct dentry *dentry; 38 + struct cached_fid *cfid; 39 + 40 + if (tcon == NULL || tcon->nohandlecache || 41 + is_smb1_server(tcon->ses->server)) 42 + return -EOPNOTSUPP; 43 + 44 + ses = tcon->ses; 45 + server = ses->server; 46 + 47 + if (cifs_sb->root == NULL) 48 + return -ENOENT; 49 + 50 + if (strlen(path)) 51 + return -ENOENT; 52 + 53 + dentry = cifs_sb->root; 54 + 55 + cfid = tcon->cfid; 56 + mutex_lock(&cfid->fid_mutex); 57 + if (cfid->is_valid) { 58 + cifs_dbg(FYI, "found a cached root file handle\n"); 59 + *ret_cfid = cfid; 60 + kref_get(&cfid->refcount); 61 + mutex_unlock(&cfid->fid_mutex); 62 + return 0; 63 + } 64 + 65 + /* 66 + * We do not hold the lock for the open because in case 67 + * SMB2_open needs to reconnect, it will end up calling 68 + * cifs_mark_open_files_invalid() which takes the lock again 69 + * thus causing a deadlock 70 + */ 71 + mutex_unlock(&cfid->fid_mutex); 72 + 73 + if (lookup_only) 74 + return -ENOENT; 75 + 76 + if (smb3_encryption_required(tcon)) 77 + flags |= CIFS_TRANSFORM_REQ; 78 + 79 + if (!server->ops->new_lease_key) 80 + return -EIO; 81 + 82 + pfid = &cfid->fid; 83 + server->ops->new_lease_key(pfid); 84 + 85 + memset(rqst, 0, sizeof(rqst)); 86 + resp_buftype[0] = resp_buftype[1] = CIFS_NO_BUFFER; 87 + memset(rsp_iov, 0, sizeof(rsp_iov)); 88 + 89 + /* Open */ 90 + memset(&open_iov, 0, sizeof(open_iov)); 91 + rqst[0].rq_iov = open_iov; 92 + rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE; 93 + 94 + oparms.tcon = tcon; 95 + oparms.create_options = cifs_create_options(cifs_sb, CREATE_NOT_FILE); 96 + oparms.desired_access = FILE_READ_ATTRIBUTES; 97 + oparms.disposition = FILE_OPEN; 98 + oparms.fid = pfid; 99 + oparms.reconnect = false; 100 + 101 + rc = SMB2_open_init(tcon, server, 102 + &rqst[0], &oplock, &oparms, &utf16_path); 103 + if (rc) 104 + goto oshr_free; 105 + smb2_set_next_command(tcon, &rqst[0]); 106 + 107 + memset(&qi_iov, 0, sizeof(qi_iov)); 108 + rqst[1].rq_iov = qi_iov; 109 + rqst[1].rq_nvec = 1; 110 + 111 + rc = SMB2_query_info_init(tcon, server, 112 + &rqst[1], COMPOUND_FID, 113 + COMPOUND_FID, FILE_ALL_INFORMATION, 114 + SMB2_O_INFO_FILE, 0, 115 + sizeof(struct smb2_file_all_info) + 116 + PATH_MAX * 2, 0, NULL); 117 + if (rc) 118 + goto oshr_free; 119 + 120 + smb2_set_related(&rqst[1]); 121 + 122 + rc = compound_send_recv(xid, ses, server, 123 + flags, 2, rqst, 124 + resp_buftype, rsp_iov); 125 + mutex_lock(&cfid->fid_mutex); 126 + 127 + /* 128 + * Now we need to check again as the cached root might have 129 + * been successfully re-opened from a concurrent process 130 + */ 131 + 132 + if (cfid->is_valid) { 133 + /* work was already done */ 134 + 135 + /* stash fids for close() later */ 136 + struct cifs_fid fid = { 137 + .persistent_fid = pfid->persistent_fid, 138 + .volatile_fid = pfid->volatile_fid, 139 + }; 140 + 141 + /* 142 + * caller expects this func to set the fid in cfid to valid 143 + * cached root, so increment the refcount. 144 + */ 145 + kref_get(&cfid->refcount); 146 + 147 + mutex_unlock(&cfid->fid_mutex); 148 + 149 + if (rc == 0) { 150 + /* close extra handle outside of crit sec */ 151 + SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid); 152 + } 153 + rc = 0; 154 + goto oshr_free; 155 + } 156 + 157 + /* Cached root is still invalid, continue normaly */ 158 + 159 + if (rc) { 160 + if (rc == -EREMCHG) { 161 + tcon->need_reconnect = true; 162 + pr_warn_once("server share %s deleted\n", 163 + tcon->treeName); 164 + } 165 + goto oshr_exit; 166 + } 167 + 168 + atomic_inc(&tcon->num_remote_opens); 169 + 170 + o_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base; 171 + oparms.fid->persistent_fid = o_rsp->PersistentFileId; 172 + oparms.fid->volatile_fid = o_rsp->VolatileFileId; 173 + #ifdef CONFIG_CIFS_DEBUG2 174 + oparms.fid->mid = le64_to_cpu(o_rsp->hdr.MessageId); 175 + #endif /* CIFS_DEBUG2 */ 176 + 177 + cfid->tcon = tcon; 178 + cfid->is_valid = true; 179 + cfid->dentry = dentry; 180 + dget(dentry); 181 + kref_init(&cfid->refcount); 182 + 183 + /* BB TBD check to see if oplock level check can be removed below */ 184 + if (o_rsp->OplockLevel == SMB2_OPLOCK_LEVEL_LEASE) { 185 + /* 186 + * See commit 2f94a3125b87. Increment the refcount when we 187 + * get a lease for root, release it if lease break occurs 188 + */ 189 + kref_get(&cfid->refcount); 190 + cfid->has_lease = true; 191 + smb2_parse_contexts(server, o_rsp, 192 + &oparms.fid->epoch, 193 + oparms.fid->lease_key, &oplock, 194 + NULL, NULL); 195 + } else 196 + goto oshr_exit; 197 + 198 + qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base; 199 + if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info)) 200 + goto oshr_exit; 201 + if (!smb2_validate_and_copy_iov( 202 + le16_to_cpu(qi_rsp->OutputBufferOffset), 203 + sizeof(struct smb2_file_all_info), 204 + &rsp_iov[1], sizeof(struct smb2_file_all_info), 205 + (char *)&cfid->file_all_info)) 206 + cfid->file_all_info_is_valid = true; 207 + 208 + cfid->time = jiffies; 209 + 210 + oshr_exit: 211 + mutex_unlock(&cfid->fid_mutex); 212 + oshr_free: 213 + SMB2_open_free(&rqst[0]); 214 + SMB2_query_info_free(&rqst[1]); 215 + free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base); 216 + free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base); 217 + if (rc == 0) 218 + *ret_cfid = cfid; 219 + 220 + return rc; 221 + } 222 + 223 + int open_cached_dir_by_dentry(struct cifs_tcon *tcon, 224 + struct dentry *dentry, 225 + struct cached_fid **ret_cfid) 226 + { 227 + struct cached_fid *cfid; 228 + 229 + cfid = tcon->cfid; 230 + 231 + mutex_lock(&cfid->fid_mutex); 232 + if (cfid->dentry == dentry) { 233 + cifs_dbg(FYI, "found a cached root file handle by dentry\n"); 234 + *ret_cfid = cfid; 235 + kref_get(&cfid->refcount); 236 + mutex_unlock(&cfid->fid_mutex); 237 + return 0; 238 + } 239 + mutex_unlock(&cfid->fid_mutex); 240 + return -ENOENT; 241 + } 242 + 243 + static void 244 + smb2_close_cached_fid(struct kref *ref) 245 + { 246 + struct cached_fid *cfid = container_of(ref, struct cached_fid, 247 + refcount); 248 + struct cached_dirent *dirent, *q; 249 + 250 + if (cfid->is_valid) { 251 + cifs_dbg(FYI, "clear cached root file handle\n"); 252 + SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid, 253 + cfid->fid.volatile_fid); 254 + } 255 + 256 + /* 257 + * We only check validity above to send SMB2_close, 258 + * but we still need to invalidate these entries 259 + * when this function is called 260 + */ 261 + cfid->is_valid = false; 262 + cfid->file_all_info_is_valid = false; 263 + cfid->has_lease = false; 264 + if (cfid->dentry) { 265 + dput(cfid->dentry); 266 + cfid->dentry = NULL; 267 + } 268 + /* 269 + * Delete all cached dirent names 270 + */ 271 + mutex_lock(&cfid->dirents.de_mutex); 272 + list_for_each_entry_safe(dirent, q, &cfid->dirents.entries, entry) { 273 + list_del(&dirent->entry); 274 + kfree(dirent->name); 275 + kfree(dirent); 276 + } 277 + cfid->dirents.is_valid = 0; 278 + cfid->dirents.is_failed = 0; 279 + cfid->dirents.ctx = NULL; 280 + cfid->dirents.pos = 0; 281 + mutex_unlock(&cfid->dirents.de_mutex); 282 + 283 + } 284 + 285 + void close_cached_dir(struct cached_fid *cfid) 286 + { 287 + mutex_lock(&cfid->fid_mutex); 288 + kref_put(&cfid->refcount, smb2_close_cached_fid); 289 + mutex_unlock(&cfid->fid_mutex); 290 + } 291 + 292 + void close_cached_dir_lease_locked(struct cached_fid *cfid) 293 + { 294 + if (cfid->has_lease) { 295 + cfid->has_lease = false; 296 + kref_put(&cfid->refcount, smb2_close_cached_fid); 297 + } 298 + } 299 + 300 + void close_cached_dir_lease(struct cached_fid *cfid) 301 + { 302 + mutex_lock(&cfid->fid_mutex); 303 + close_cached_dir_lease_locked(cfid); 304 + mutex_unlock(&cfid->fid_mutex); 305 + } 306 + 307 + /* 308 + * Called from cifs_kill_sb when we unmount a share 309 + */ 310 + void close_all_cached_dirs(struct cifs_sb_info *cifs_sb) 311 + { 312 + struct rb_root *root = &cifs_sb->tlink_tree; 313 + struct rb_node *node; 314 + struct cached_fid *cfid; 315 + struct cifs_tcon *tcon; 316 + struct tcon_link *tlink; 317 + 318 + for (node = rb_first(root); node; node = rb_next(node)) { 319 + tlink = rb_entry(node, struct tcon_link, tl_rbnode); 320 + tcon = tlink_tcon(tlink); 321 + if (IS_ERR(tcon)) 322 + continue; 323 + cfid = tcon->cfid; 324 + mutex_lock(&cfid->fid_mutex); 325 + if (cfid->dentry) { 326 + dput(cfid->dentry); 327 + cfid->dentry = NULL; 328 + } 329 + mutex_unlock(&cfid->fid_mutex); 330 + } 331 + } 332 + 333 + /* 334 + * Invalidate and close all cached dirs when a TCON has been reset 335 + * due to a session loss. 336 + */ 337 + void invalidate_all_cached_dirs(struct cifs_tcon *tcon) 338 + { 339 + mutex_lock(&tcon->cfid->fid_mutex); 340 + tcon->cfid->is_valid = false; 341 + /* cached handle is not valid, so SMB2_CLOSE won't be sent below */ 342 + close_cached_dir_lease_locked(tcon->cfid); 343 + memset(&tcon->cfid->fid, 0, sizeof(struct cifs_fid)); 344 + mutex_unlock(&tcon->cfid->fid_mutex); 345 + } 346 + 347 + static void 348 + smb2_cached_lease_break(struct work_struct *work) 349 + { 350 + struct cached_fid *cfid = container_of(work, 351 + struct cached_fid, lease_break); 352 + 353 + close_cached_dir_lease(cfid); 354 + } 355 + 356 + int cached_dir_lease_break(struct cifs_tcon *tcon, __u8 lease_key[16]) 357 + { 358 + if (tcon->cfid->is_valid && 359 + !memcmp(lease_key, 360 + tcon->cfid->fid.lease_key, 361 + SMB2_LEASE_KEY_SIZE)) { 362 + tcon->cfid->time = 0; 363 + INIT_WORK(&tcon->cfid->lease_break, 364 + smb2_cached_lease_break); 365 + queue_work(cifsiod_wq, 366 + &tcon->cfid->lease_break); 367 + return true; 368 + } 369 + return false; 370 + } 371 + 372 + struct cached_fid *init_cached_dir(void) 373 + { 374 + struct cached_fid *cfid; 375 + 376 + cfid = kzalloc(sizeof(*cfid), GFP_KERNEL); 377 + if (!cfid) 378 + return NULL; 379 + INIT_LIST_HEAD(&cfid->dirents.entries); 380 + mutex_init(&cfid->dirents.de_mutex); 381 + mutex_init(&cfid->fid_mutex); 382 + return cfid; 383 + } 384 + 385 + void free_cached_dir(struct cifs_tcon *tcon) 386 + { 387 + kfree(tcon->cfid); 388 + }
+64
fs/cifs/cached_dir.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * Functions to handle the cached directory entries 4 + * 5 + * Copyright (c) 2022, Ronnie Sahlberg <lsahlber@redhat.com> 6 + */ 7 + 8 + #ifndef _CACHED_DIR_H 9 + #define _CACHED_DIR_H 10 + 11 + 12 + struct cached_dirent { 13 + struct list_head entry; 14 + char *name; 15 + int namelen; 16 + loff_t pos; 17 + 18 + struct cifs_fattr fattr; 19 + }; 20 + 21 + struct cached_dirents { 22 + bool is_valid:1; 23 + bool is_failed:1; 24 + struct dir_context *ctx; /* 25 + * Only used to make sure we only take entries 26 + * from a single context. Never dereferenced. 27 + */ 28 + struct mutex de_mutex; 29 + int pos; /* Expected ctx->pos */ 30 + struct list_head entries; 31 + }; 32 + 33 + struct cached_fid { 34 + bool is_valid:1; /* Do we have a useable root fid */ 35 + bool file_all_info_is_valid:1; 36 + bool has_lease:1; 37 + unsigned long time; /* jiffies of when lease was taken */ 38 + struct kref refcount; 39 + struct cifs_fid fid; 40 + struct mutex fid_mutex; 41 + struct cifs_tcon *tcon; 42 + struct dentry *dentry; 43 + struct work_struct lease_break; 44 + struct smb2_file_all_info file_all_info; 45 + struct cached_dirents dirents; 46 + }; 47 + 48 + extern struct cached_fid *init_cached_dir(void); 49 + extern void free_cached_dir(struct cifs_tcon *tcon); 50 + extern int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon, 51 + const char *path, 52 + struct cifs_sb_info *cifs_sb, 53 + bool lookup_only, struct cached_fid **cfid); 54 + extern int open_cached_dir_by_dentry(struct cifs_tcon *tcon, 55 + struct dentry *dentry, 56 + struct cached_fid **cfid); 57 + extern void close_cached_dir(struct cached_fid *cfid); 58 + extern void close_cached_dir_lease(struct cached_fid *cfid); 59 + extern void close_cached_dir_lease_locked(struct cached_fid *cfid); 60 + extern void close_all_cached_dirs(struct cifs_sb_info *cifs_sb); 61 + extern void invalidate_all_cached_dirs(struct cifs_tcon *tcon); 62 + extern int cached_dir_lease_break(struct cifs_tcon *tcon, __u8 lease_key[16]); 63 + 64 + #endif /* _CACHED_DIR_H */
+3 -18
fs/cifs/cifsfs.c
··· 46 46 #include "netlink.h" 47 47 #endif 48 48 #include "fs_context.h" 49 + #include "cached_dir.h" 49 50 50 51 /* 51 52 * DOS dates from 1980/1/1 through 2107/12/31 ··· 284 283 static void cifs_kill_sb(struct super_block *sb) 285 284 { 286 285 struct cifs_sb_info *cifs_sb = CIFS_SB(sb); 287 - struct cifs_tcon *tcon; 288 - struct cached_fid *cfid; 289 - struct rb_root *root = &cifs_sb->tlink_tree; 290 - struct rb_node *node; 291 - struct tcon_link *tlink; 292 286 293 287 /* 294 288 * We ned to release all dentries for the cached directories 295 289 * before we kill the sb. 296 290 */ 297 291 if (cifs_sb->root) { 298 - for (node = rb_first(root); node; node = rb_next(node)) { 299 - tlink = rb_entry(node, struct tcon_link, tl_rbnode); 300 - tcon = tlink_tcon(tlink); 301 - if (IS_ERR(tcon)) 302 - continue; 303 - cfid = &tcon->crfid; 304 - mutex_lock(&cfid->fid_mutex); 305 - if (cfid->dentry) { 306 - dput(cfid->dentry); 307 - cfid->dentry = NULL; 308 - } 309 - mutex_unlock(&cfid->fid_mutex); 310 - } 292 + close_all_cached_dirs(cifs_sb); 311 293 312 294 /* finally release root dentry */ 313 295 dput(cifs_sb->root); ··· 693 709 seq_printf(s, ",acdirmax=%lu", cifs_sb->ctx->acdirmax / HZ); 694 710 seq_printf(s, ",acregmax=%lu", cifs_sb->ctx->acregmax / HZ); 695 711 } 712 + seq_printf(s, ",closetimeo=%lu", cifs_sb->ctx->closetimeo / HZ); 696 713 697 714 if (tcon->ses->chan_max > 1) 698 715 seq_printf(s, ",multichannel,max_channels=%zu",
+3 -39
fs/cifs/cifsglob.h
··· 1128 1128 u32 cf_cifstag; 1129 1129 }; 1130 1130 1131 - struct cached_dirent { 1132 - struct list_head entry; 1133 - char *name; 1134 - int namelen; 1135 - loff_t pos; 1136 - 1137 - struct cifs_fattr fattr; 1138 - }; 1139 - 1140 - struct cached_dirents { 1141 - bool is_valid:1; 1142 - bool is_failed:1; 1143 - struct dir_context *ctx; /* 1144 - * Only used to make sure we only take entries 1145 - * from a single context. Never dereferenced. 1146 - */ 1147 - struct mutex de_mutex; 1148 - int pos; /* Expected ctx->pos */ 1149 - struct list_head entries; 1150 - }; 1151 - 1152 - struct cached_fid { 1153 - bool is_valid:1; /* Do we have a useable root fid */ 1154 - bool file_all_info_is_valid:1; 1155 - bool has_lease:1; 1156 - unsigned long time; /* jiffies of when lease was taken */ 1157 - struct kref refcount; 1158 - struct cifs_fid *fid; 1159 - struct mutex fid_mutex; 1160 - struct cifs_tcon *tcon; 1161 - struct dentry *dentry; 1162 - struct work_struct lease_break; 1163 - struct smb2_file_all_info file_all_info; 1164 - struct cached_dirents dirents; 1165 - }; 1166 - 1167 1131 /* 1168 1132 * there is one of these for each connection to a resource on a particular 1169 1133 * session ··· 1221 1257 struct fscache_volume *fscache; /* cookie for share */ 1222 1258 #endif 1223 1259 struct list_head pending_opens; /* list of incomplete opens */ 1224 - struct cached_fid crfid; /* Cached root fid */ 1260 + struct cached_fid *cfid; /* Cached root fid */ 1225 1261 /* BB add field for back pointer to sb struct(s)? */ 1226 1262 #ifdef CONFIG_CIFS_DFS_UPCALL 1227 1263 struct list_head ulist; /* cache update list */ ··· 2096 2132 return is_tcon_dfs(tcon) || (ref && (ref->flags & DFSREF_REFERRAL_SERVER)); 2097 2133 } 2098 2134 2099 - static inline u64 cifs_flock_len(struct file_lock *fl) 2135 + static inline u64 cifs_flock_len(const struct file_lock *fl) 2100 2136 { 2101 - return fl->fl_end == OFFSET_MAX ? 0 : fl->fl_end - fl->fl_start + 1; 2137 + return (u64)fl->fl_end - fl->fl_start + 1; 2102 2138 } 2103 2139 2104 2140 static inline size_t ntlmssp_workstation_name_size(const struct cifs_ses *ses)
-1
fs/cifs/cifsproto.h
··· 597 597 struct cifs_aio_ctx *cifs_aio_ctx_alloc(void); 598 598 void cifs_aio_ctx_release(struct kref *refcount); 599 599 int setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw); 600 - void smb2_cached_lease_break(struct work_struct *work); 601 600 602 601 int cifs_alloc_hash(const char *name, struct crypto_shash **shash, 603 602 struct sdesc **sdesc);
+2
fs/cifs/connect.c
··· 2681 2681 return 0; 2682 2682 if (old->ctx->acdirmax != new->ctx->acdirmax) 2683 2683 return 0; 2684 + if (old->ctx->closetimeo != new->ctx->closetimeo) 2685 + return 0; 2684 2686 2685 2687 return 1; 2686 2688 }
+8 -31
fs/cifs/file.c
··· 34 34 #include "smbdirect.h" 35 35 #include "fs_context.h" 36 36 #include "cifs_ioctl.h" 37 + #include "cached_dir.h" 37 38 38 39 /* 39 40 * Mark as invalid, all open files on tree connections since they ··· 65 64 } 66 65 spin_unlock(&tcon->open_file_lock); 67 66 68 - mutex_lock(&tcon->crfid.fid_mutex); 69 - tcon->crfid.is_valid = false; 70 - /* cached handle is not valid, so SMB2_CLOSE won't be sent below */ 71 - close_cached_dir_lease_locked(&tcon->crfid); 72 - memset(tcon->crfid.fid, 0, sizeof(struct cifs_fid)); 73 - mutex_unlock(&tcon->crfid.fid_mutex); 74 - 67 + invalidate_all_cached_dirs(tcon); 75 68 spin_lock(&tcon->tc_lock); 76 69 if (tcon->status == TID_IN_FILES_INVALIDATE) 77 70 tcon->status = TID_NEED_TCON; ··· 964 969 * So, Increase the ref count to avoid use-after-free. 965 970 */ 966 971 if (!mod_delayed_work(deferredclose_wq, 967 - &cfile->deferred, cifs_sb->ctx->acregmax)) 972 + &cfile->deferred, cifs_sb->ctx->closetimeo)) 968 973 cifsFileInfo_get(cfile); 969 974 } else { 970 975 /* Deferred close for files */ 971 976 queue_delayed_work(deferredclose_wq, 972 - &cfile->deferred, cifs_sb->ctx->acregmax); 977 + &cfile->deferred, cifs_sb->ctx->closetimeo); 973 978 cfile->deferred_close_scheduled = true; 974 979 spin_unlock(&cinode->deferred_lock); 975 980 return 0; ··· 1931 1936 rc = -EACCES; 1932 1937 xid = get_xid(); 1933 1938 1934 - cifs_dbg(FYI, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld end: %lld\n", 1935 - cmd, flock->fl_flags, flock->fl_type, 1936 - flock->fl_start, flock->fl_end); 1939 + cifs_dbg(FYI, "%s: %pD2 cmd=0x%x type=0x%x flags=0x%x r=%lld:%lld\n", __func__, file, cmd, 1940 + flock->fl_flags, flock->fl_type, (long long)flock->fl_start, 1941 + (long long)flock->fl_end); 1937 1942 1938 1943 cfile = (struct cifsFileInfo *)file->private_data; 1939 1944 tcon = tlink_tcon(cfile->tlink); ··· 5059 5064 struct TCP_Server_Info *server = tcon->ses->server; 5060 5065 int rc = 0; 5061 5066 bool purge_cache = false; 5062 - bool is_deferred = false; 5063 - struct cifs_deferred_close *dclose; 5064 5067 5065 5068 wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS, 5066 5069 TASK_UNINTERRUPTIBLE); ··· 5095 5102 5096 5103 oplock_break_ack: 5097 5104 /* 5098 - * When oplock break is received and there are no active 5099 - * file handles but cached, then schedule deferred close immediately. 5100 - * So, new open will not use cached handle. 5101 - */ 5102 - spin_lock(&CIFS_I(inode)->deferred_lock); 5103 - is_deferred = cifs_is_deferred_close(cfile, &dclose); 5104 - spin_unlock(&CIFS_I(inode)->deferred_lock); 5105 - if (is_deferred && 5106 - cfile->deferred_close_scheduled && 5107 - delayed_work_pending(&cfile->deferred)) { 5108 - if (cancel_delayed_work(&cfile->deferred)) { 5109 - _cifsFileInfo_put(cfile, false, false); 5110 - goto oplock_break_done; 5111 - } 5112 - } 5113 - /* 5114 5105 * releasing stale oplock after recent reconnect of smb session using 5115 5106 * a now incorrect file handle is not a data integrity issue but do 5116 5107 * not bother sending an oplock release if session to server still is ··· 5105 5128 cinode); 5106 5129 cifs_dbg(FYI, "Oplock release rc = %d\n", rc); 5107 5130 } 5108 - oplock_break_done: 5131 + 5109 5132 _cifsFileInfo_put(cfile, false /* do not wait for ourself */, false); 5110 5133 cifs_done_oplock_break(cinode); 5111 5134 }
+9
fs/cifs/fs_context.c
··· 147 147 fsparam_u32("actimeo", Opt_actimeo), 148 148 fsparam_u32("acdirmax", Opt_acdirmax), 149 149 fsparam_u32("acregmax", Opt_acregmax), 150 + fsparam_u32("closetimeo", Opt_closetimeo), 150 151 fsparam_u32("echo_interval", Opt_echo_interval), 151 152 fsparam_u32("max_credits", Opt_max_credits), 152 153 fsparam_u32("handletimeout", Opt_handletimeout), ··· 1075 1074 } 1076 1075 ctx->acdirmax = ctx->acregmax = HZ * result.uint_32; 1077 1076 break; 1077 + case Opt_closetimeo: 1078 + ctx->closetimeo = HZ * result.uint_32; 1079 + if (ctx->closetimeo > SMB3_MAX_DCLOSETIMEO) { 1080 + cifs_errorf(fc, "closetimeo too large\n"); 1081 + goto cifs_parse_mount_err; 1082 + } 1083 + break; 1078 1084 case Opt_echo_interval: 1079 1085 ctx->echo_interval = result.uint_32; 1080 1086 break; ··· 1529 1521 1530 1522 ctx->acregmax = CIFS_DEF_ACTIMEO; 1531 1523 ctx->acdirmax = CIFS_DEF_ACTIMEO; 1524 + ctx->closetimeo = SMB3_DEF_DCLOSETIMEO; 1532 1525 1533 1526 /* Most clients set timeout to 0, allows server to use its default */ 1534 1527 ctx->handle_timeout = 0; /* See MS-SMB2 spec section 2.2.14.2.12 */
+8
fs/cifs/fs_context.h
··· 125 125 Opt_actimeo, 126 126 Opt_acdirmax, 127 127 Opt_acregmax, 128 + Opt_closetimeo, 128 129 Opt_echo_interval, 129 130 Opt_max_credits, 130 131 Opt_snapshot, ··· 248 247 /* attribute cache timemout for files and directories in jiffies */ 249 248 unsigned long acregmax; 250 249 unsigned long acdirmax; 250 + /* timeout for deferred close of files in jiffies */ 251 + unsigned long closetimeo; 251 252 struct smb_version_operations *ops; 252 253 struct smb_version_values *vals; 253 254 char *prepath; ··· 282 279 extern int smb3_fs_context_dup(struct smb3_fs_context *new_ctx, struct smb3_fs_context *ctx); 283 280 extern void smb3_update_mnt_flags(struct cifs_sb_info *cifs_sb); 284 281 282 + /* 283 + * max deferred close timeout (jiffies) - 2^30 284 + */ 285 + #define SMB3_MAX_DCLOSETIMEO (1 << 30) 286 + #define SMB3_DEF_DCLOSETIMEO (5 * HZ) /* Can increase later, other clients use larger */ 285 287 #endif
-16
fs/cifs/fscache.h
··· 108 108 __cifs_readpage_to_fscache(inode, page); 109 109 } 110 110 111 - static inline int cifs_fscache_release_page(struct page *page, gfp_t gfp) 112 - { 113 - if (PageFsCache(page)) { 114 - if (current_is_kswapd() || !(gfp & __GFP_FS)) 115 - return false; 116 - wait_on_page_fscache(page); 117 - fscache_note_page_release(cifs_inode_cookie(page->mapping->host)); 118 - } 119 - return true; 120 - } 121 - 122 111 #else /* CONFIG_CIFS_FSCACHE */ 123 112 static inline 124 113 void cifs_fscache_fill_coherency(struct inode *inode, ··· 142 153 143 154 static inline 144 155 void cifs_readpage_to_fscache(struct inode *inode, struct page *page) {} 145 - 146 - static inline int nfs_fscache_release_page(struct page *page, gfp_t gfp) 147 - { 148 - return true; /* May release page */ 149 - } 150 156 151 157 #endif /* CONFIG_CIFS_FSCACHE */ 152 158
+1
fs/cifs/inode.c
··· 25 25 #include "fscache.h" 26 26 #include "fs_context.h" 27 27 #include "cifs_ioctl.h" 28 + #include "cached_dir.h" 28 29 29 30 static void cifs_set_ops(struct inode *inode) 30 31 {
+9 -11
fs/cifs/misc.c
··· 23 23 #include "dns_resolve.h" 24 24 #endif 25 25 #include "fs_context.h" 26 + #include "cached_dir.h" 26 27 27 28 extern mempool_t *cifs_sm_req_poolp; 28 29 extern mempool_t *cifs_req_poolp; ··· 117 116 ret_buf = kzalloc(sizeof(*ret_buf), GFP_KERNEL); 118 117 if (!ret_buf) 119 118 return NULL; 120 - ret_buf->crfid.fid = kzalloc(sizeof(*ret_buf->crfid.fid), GFP_KERNEL); 121 - if (!ret_buf->crfid.fid) { 119 + ret_buf->cfid = init_cached_dir(); 120 + if (!ret_buf->cfid) { 122 121 kfree(ret_buf); 123 122 return NULL; 124 123 } 125 - INIT_LIST_HEAD(&ret_buf->crfid.dirents.entries); 126 - mutex_init(&ret_buf->crfid.dirents.de_mutex); 127 124 128 125 atomic_inc(&tconInfoAllocCount); 129 126 ret_buf->status = TID_NEW; ··· 130 131 INIT_LIST_HEAD(&ret_buf->openFileList); 131 132 INIT_LIST_HEAD(&ret_buf->tcon_list); 132 133 spin_lock_init(&ret_buf->open_file_lock); 133 - mutex_init(&ret_buf->crfid.fid_mutex); 134 134 spin_lock_init(&ret_buf->stat_lock); 135 135 atomic_set(&ret_buf->num_local_opens, 0); 136 136 atomic_set(&ret_buf->num_remote_opens, 0); ··· 138 140 } 139 141 140 142 void 141 - tconInfoFree(struct cifs_tcon *buf_to_free) 143 + tconInfoFree(struct cifs_tcon *tcon) 142 144 { 143 - if (buf_to_free == NULL) { 145 + if (tcon == NULL) { 144 146 cifs_dbg(FYI, "Null buffer passed to tconInfoFree\n"); 145 147 return; 146 148 } 149 + free_cached_dir(tcon); 147 150 atomic_dec(&tconInfoAllocCount); 148 - kfree(buf_to_free->nativeFileSystem); 149 - kfree_sensitive(buf_to_free->password); 150 - kfree(buf_to_free->crfid.fid); 151 - kfree(buf_to_free); 151 + kfree(tcon->nativeFileSystem); 152 + kfree_sensitive(tcon->password); 153 + kfree(tcon); 152 154 } 153 155 154 156 struct smb_hdr *
+3 -2
fs/cifs/readdir.c
··· 21 21 #include "cifsfs.h" 22 22 #include "smb2proto.h" 23 23 #include "fs_context.h" 24 + #include "cached_dir.h" 24 25 25 26 /* 26 27 * To be safe - for UCS to UTF-8 with strings loaded with the rare long ··· 1072 1071 tcon = tlink_tcon(cifsFile->tlink); 1073 1072 } 1074 1073 1075 - rc = open_cached_dir(xid, tcon, full_path, cifs_sb, &cfid); 1074 + rc = open_cached_dir(xid, tcon, full_path, cifs_sb, false, &cfid); 1076 1075 cifs_put_tlink(tlink); 1077 1076 if (rc) 1078 1077 goto cache_not_found; ··· 1143 1142 tcon = tlink_tcon(cifsFile->tlink); 1144 1143 rc = find_cifs_entry(xid, tcon, ctx->pos, file, full_path, 1145 1144 &current_entry, &num_to_fill); 1146 - open_cached_dir(xid, tcon, full_path, cifs_sb, &cfid); 1145 + open_cached_dir(xid, tcon, full_path, cifs_sb, false, &cfid); 1147 1146 if (rc) { 1148 1147 cifs_dbg(FYI, "fce error %d\n", rc); 1149 1148 goto rddir2_exit;
+6 -5
fs/cifs/smb2inode.c
··· 23 23 #include "smb2glob.h" 24 24 #include "smb2pdu.h" 25 25 #include "smb2proto.h" 26 + #include "cached_dir.h" 26 27 27 28 static void 28 29 free_set_inf_compound(struct smb_rqst *rqst) ··· 516 515 if (strcmp(full_path, "")) 517 516 rc = -ENOENT; 518 517 else 519 - rc = open_cached_dir(xid, tcon, full_path, cifs_sb, &cfid); 518 + rc = open_cached_dir(xid, tcon, full_path, cifs_sb, false, &cfid); 520 519 /* If it is a root and its handle is cached then use it */ 521 520 if (!rc) { 522 - if (tcon->crfid.file_all_info_is_valid) { 521 + if (cfid->file_all_info_is_valid) { 523 522 move_smb2_info_to_cifs(data, 524 - &tcon->crfid.file_all_info); 523 + &cfid->file_all_info); 525 524 } else { 526 525 rc = SMB2_query_info(xid, tcon, 527 - cfid->fid->persistent_fid, 528 - cfid->fid->volatile_fid, smb2_data); 526 + cfid->fid.persistent_fid, 527 + cfid->fid.volatile_fid, smb2_data); 529 528 if (!rc) 530 529 move_smb2_info_to_cifs(data, smb2_data); 531 530 }
+2 -9
fs/cifs/smb2misc.c
··· 16 16 #include "smb2status.h" 17 17 #include "smb2glob.h" 18 18 #include "nterr.h" 19 + #include "cached_dir.h" 19 20 20 21 static int 21 22 check_smb2_hdr(struct smb2_hdr *shdr, __u64 mid) ··· 649 648 } 650 649 spin_unlock(&tcon->open_file_lock); 651 650 652 - if (tcon->crfid.is_valid && 653 - !memcmp(rsp->LeaseKey, 654 - tcon->crfid.fid->lease_key, 655 - SMB2_LEASE_KEY_SIZE)) { 656 - tcon->crfid.time = 0; 657 - INIT_WORK(&tcon->crfid.lease_break, 658 - smb2_cached_lease_break); 659 - queue_work(cifsiod_wq, 660 - &tcon->crfid.lease_break); 651 + if (cached_dir_lease_break(tcon, rsp->LeaseKey)) { 661 652 spin_unlock(&cifs_tcp_ses_lock); 662 653 return true; 663 654 }
+19 -301
fs/cifs/smb2ops.c
··· 27 27 #include "smbdirect.h" 28 28 #include "fscache.h" 29 29 #include "fs_context.h" 30 + #include "cached_dir.h" 30 31 31 32 /* Change credits for different ops and return the total number of credits */ 32 33 static int ··· 703 702 } 704 703 705 704 static void 706 - smb2_close_cached_fid(struct kref *ref) 707 - { 708 - struct cached_fid *cfid = container_of(ref, struct cached_fid, 709 - refcount); 710 - struct cached_dirent *dirent, *q; 711 - 712 - if (cfid->is_valid) { 713 - cifs_dbg(FYI, "clear cached root file handle\n"); 714 - SMB2_close(0, cfid->tcon, cfid->fid->persistent_fid, 715 - cfid->fid->volatile_fid); 716 - } 717 - 718 - /* 719 - * We only check validity above to send SMB2_close, 720 - * but we still need to invalidate these entries 721 - * when this function is called 722 - */ 723 - cfid->is_valid = false; 724 - cfid->file_all_info_is_valid = false; 725 - cfid->has_lease = false; 726 - if (cfid->dentry) { 727 - dput(cfid->dentry); 728 - cfid->dentry = NULL; 729 - } 730 - /* 731 - * Delete all cached dirent names 732 - */ 733 - mutex_lock(&cfid->dirents.de_mutex); 734 - list_for_each_entry_safe(dirent, q, &cfid->dirents.entries, entry) { 735 - list_del(&dirent->entry); 736 - kfree(dirent->name); 737 - kfree(dirent); 738 - } 739 - cfid->dirents.is_valid = 0; 740 - cfid->dirents.is_failed = 0; 741 - cfid->dirents.ctx = NULL; 742 - cfid->dirents.pos = 0; 743 - mutex_unlock(&cfid->dirents.de_mutex); 744 - 745 - } 746 - 747 - void close_cached_dir(struct cached_fid *cfid) 748 - { 749 - mutex_lock(&cfid->fid_mutex); 750 - kref_put(&cfid->refcount, smb2_close_cached_fid); 751 - mutex_unlock(&cfid->fid_mutex); 752 - } 753 - 754 - void close_cached_dir_lease_locked(struct cached_fid *cfid) 755 - { 756 - if (cfid->has_lease) { 757 - cfid->has_lease = false; 758 - kref_put(&cfid->refcount, smb2_close_cached_fid); 759 - } 760 - } 761 - 762 - void close_cached_dir_lease(struct cached_fid *cfid) 763 - { 764 - mutex_lock(&cfid->fid_mutex); 765 - close_cached_dir_lease_locked(cfid); 766 - mutex_unlock(&cfid->fid_mutex); 767 - } 768 - 769 - void 770 - smb2_cached_lease_break(struct work_struct *work) 771 - { 772 - struct cached_fid *cfid = container_of(work, 773 - struct cached_fid, lease_break); 774 - 775 - close_cached_dir_lease(cfid); 776 - } 777 - 778 - /* 779 - * Open the and cache a directory handle. 780 - * Only supported for the root handle. 781 - * If error then *cfid is not initialized. 782 - */ 783 - int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon, 784 - const char *path, 785 - struct cifs_sb_info *cifs_sb, 786 - struct cached_fid **cfid) 787 - { 788 - struct cifs_ses *ses; 789 - struct TCP_Server_Info *server; 790 - struct cifs_open_parms oparms; 791 - struct smb2_create_rsp *o_rsp = NULL; 792 - struct smb2_query_info_rsp *qi_rsp = NULL; 793 - int resp_buftype[2]; 794 - struct smb_rqst rqst[2]; 795 - struct kvec rsp_iov[2]; 796 - struct kvec open_iov[SMB2_CREATE_IOV_SIZE]; 797 - struct kvec qi_iov[1]; 798 - int rc, flags = 0; 799 - __le16 utf16_path = 0; /* Null - since an open of top of share */ 800 - u8 oplock = SMB2_OPLOCK_LEVEL_II; 801 - struct cifs_fid *pfid; 802 - struct dentry *dentry; 803 - 804 - if (tcon == NULL || tcon->nohandlecache || 805 - is_smb1_server(tcon->ses->server)) 806 - return -ENOTSUPP; 807 - 808 - ses = tcon->ses; 809 - server = ses->server; 810 - 811 - if (cifs_sb->root == NULL) 812 - return -ENOENT; 813 - 814 - if (strlen(path)) 815 - return -ENOENT; 816 - 817 - dentry = cifs_sb->root; 818 - 819 - mutex_lock(&tcon->crfid.fid_mutex); 820 - if (tcon->crfid.is_valid) { 821 - cifs_dbg(FYI, "found a cached root file handle\n"); 822 - *cfid = &tcon->crfid; 823 - kref_get(&tcon->crfid.refcount); 824 - mutex_unlock(&tcon->crfid.fid_mutex); 825 - return 0; 826 - } 827 - 828 - /* 829 - * We do not hold the lock for the open because in case 830 - * SMB2_open needs to reconnect, it will end up calling 831 - * cifs_mark_open_files_invalid() which takes the lock again 832 - * thus causing a deadlock 833 - */ 834 - 835 - mutex_unlock(&tcon->crfid.fid_mutex); 836 - 837 - if (smb3_encryption_required(tcon)) 838 - flags |= CIFS_TRANSFORM_REQ; 839 - 840 - if (!server->ops->new_lease_key) 841 - return -EIO; 842 - 843 - pfid = tcon->crfid.fid; 844 - server->ops->new_lease_key(pfid); 845 - 846 - memset(rqst, 0, sizeof(rqst)); 847 - resp_buftype[0] = resp_buftype[1] = CIFS_NO_BUFFER; 848 - memset(rsp_iov, 0, sizeof(rsp_iov)); 849 - 850 - /* Open */ 851 - memset(&open_iov, 0, sizeof(open_iov)); 852 - rqst[0].rq_iov = open_iov; 853 - rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE; 854 - 855 - oparms.tcon = tcon; 856 - oparms.create_options = cifs_create_options(cifs_sb, CREATE_NOT_FILE); 857 - oparms.desired_access = FILE_READ_ATTRIBUTES; 858 - oparms.disposition = FILE_OPEN; 859 - oparms.fid = pfid; 860 - oparms.reconnect = false; 861 - 862 - rc = SMB2_open_init(tcon, server, 863 - &rqst[0], &oplock, &oparms, &utf16_path); 864 - if (rc) 865 - goto oshr_free; 866 - smb2_set_next_command(tcon, &rqst[0]); 867 - 868 - memset(&qi_iov, 0, sizeof(qi_iov)); 869 - rqst[1].rq_iov = qi_iov; 870 - rqst[1].rq_nvec = 1; 871 - 872 - rc = SMB2_query_info_init(tcon, server, 873 - &rqst[1], COMPOUND_FID, 874 - COMPOUND_FID, FILE_ALL_INFORMATION, 875 - SMB2_O_INFO_FILE, 0, 876 - sizeof(struct smb2_file_all_info) + 877 - PATH_MAX * 2, 0, NULL); 878 - if (rc) 879 - goto oshr_free; 880 - 881 - smb2_set_related(&rqst[1]); 882 - 883 - rc = compound_send_recv(xid, ses, server, 884 - flags, 2, rqst, 885 - resp_buftype, rsp_iov); 886 - mutex_lock(&tcon->crfid.fid_mutex); 887 - 888 - /* 889 - * Now we need to check again as the cached root might have 890 - * been successfully re-opened from a concurrent process 891 - */ 892 - 893 - if (tcon->crfid.is_valid) { 894 - /* work was already done */ 895 - 896 - /* stash fids for close() later */ 897 - struct cifs_fid fid = { 898 - .persistent_fid = pfid->persistent_fid, 899 - .volatile_fid = pfid->volatile_fid, 900 - }; 901 - 902 - /* 903 - * caller expects this func to set the fid in crfid to valid 904 - * cached root, so increment the refcount. 905 - */ 906 - kref_get(&tcon->crfid.refcount); 907 - 908 - mutex_unlock(&tcon->crfid.fid_mutex); 909 - 910 - if (rc == 0) { 911 - /* close extra handle outside of crit sec */ 912 - SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid); 913 - } 914 - rc = 0; 915 - goto oshr_free; 916 - } 917 - 918 - /* Cached root is still invalid, continue normaly */ 919 - 920 - if (rc) { 921 - if (rc == -EREMCHG) { 922 - tcon->need_reconnect = true; 923 - pr_warn_once("server share %s deleted\n", 924 - tcon->treeName); 925 - } 926 - goto oshr_exit; 927 - } 928 - 929 - atomic_inc(&tcon->num_remote_opens); 930 - 931 - o_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base; 932 - oparms.fid->persistent_fid = o_rsp->PersistentFileId; 933 - oparms.fid->volatile_fid = o_rsp->VolatileFileId; 934 - #ifdef CONFIG_CIFS_DEBUG2 935 - oparms.fid->mid = le64_to_cpu(o_rsp->hdr.MessageId); 936 - #endif /* CIFS_DEBUG2 */ 937 - 938 - tcon->crfid.tcon = tcon; 939 - tcon->crfid.is_valid = true; 940 - tcon->crfid.dentry = dentry; 941 - dget(dentry); 942 - kref_init(&tcon->crfid.refcount); 943 - 944 - /* BB TBD check to see if oplock level check can be removed below */ 945 - if (o_rsp->OplockLevel == SMB2_OPLOCK_LEVEL_LEASE) { 946 - /* 947 - * See commit 2f94a3125b87. Increment the refcount when we 948 - * get a lease for root, release it if lease break occurs 949 - */ 950 - kref_get(&tcon->crfid.refcount); 951 - tcon->crfid.has_lease = true; 952 - smb2_parse_contexts(server, o_rsp, 953 - &oparms.fid->epoch, 954 - oparms.fid->lease_key, &oplock, 955 - NULL, NULL); 956 - } else 957 - goto oshr_exit; 958 - 959 - qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base; 960 - if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info)) 961 - goto oshr_exit; 962 - if (!smb2_validate_and_copy_iov( 963 - le16_to_cpu(qi_rsp->OutputBufferOffset), 964 - sizeof(struct smb2_file_all_info), 965 - &rsp_iov[1], sizeof(struct smb2_file_all_info), 966 - (char *)&tcon->crfid.file_all_info)) 967 - tcon->crfid.file_all_info_is_valid = true; 968 - tcon->crfid.time = jiffies; 969 - 970 - 971 - oshr_exit: 972 - mutex_unlock(&tcon->crfid.fid_mutex); 973 - oshr_free: 974 - SMB2_open_free(&rqst[0]); 975 - SMB2_query_info_free(&rqst[1]); 976 - free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base); 977 - free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base); 978 - if (rc == 0) 979 - *cfid = &tcon->crfid; 980 - return rc; 981 - } 982 - 983 - int open_cached_dir_by_dentry(struct cifs_tcon *tcon, 984 - struct dentry *dentry, 985 - struct cached_fid **cfid) 986 - { 987 - mutex_lock(&tcon->crfid.fid_mutex); 988 - if (tcon->crfid.dentry == dentry) { 989 - cifs_dbg(FYI, "found a cached root file handle by dentry\n"); 990 - *cfid = &tcon->crfid; 991 - kref_get(&tcon->crfid.refcount); 992 - mutex_unlock(&tcon->crfid.fid_mutex); 993 - return 0; 994 - } 995 - mutex_unlock(&tcon->crfid.fid_mutex); 996 - return -ENOENT; 997 - } 998 - 999 - static void 1000 705 smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon, 1001 706 struct cifs_sb_info *cifs_sb) 1002 707 { ··· 720 1013 oparms.fid = &fid; 721 1014 oparms.reconnect = false; 722 1015 723 - rc = open_cached_dir(xid, tcon, "", cifs_sb, &cfid); 1016 + rc = open_cached_dir(xid, tcon, "", cifs_sb, false, &cfid); 724 1017 if (rc == 0) 725 - memcpy(&fid, cfid->fid, sizeof(struct cifs_fid)); 1018 + memcpy(&fid, &cfid->fid, sizeof(struct cifs_fid)); 726 1019 else 727 1020 rc = SMB2_open(xid, &oparms, &srch_path, &oplock, NULL, NULL, 728 1021 NULL, NULL); ··· 783 1076 __u8 oplock = SMB2_OPLOCK_LEVEL_NONE; 784 1077 struct cifs_open_parms oparms; 785 1078 struct cifs_fid fid; 1079 + struct cached_fid *cfid; 786 1080 787 - if ((*full_path == 0) && tcon->crfid.is_valid) 788 - return 0; 1081 + rc = open_cached_dir(xid, tcon, full_path, cifs_sb, true, &cfid); 1082 + if (!rc) { 1083 + if (cfid->is_valid) { 1084 + close_cached_dir(cfid); 1085 + return 0; 1086 + } 1087 + close_cached_dir(cfid); 1088 + } 789 1089 790 1090 utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb); 791 1091 if (!utf16_path) ··· 2437 2723 resp_buftype[0] = resp_buftype[1] = resp_buftype[2] = CIFS_NO_BUFFER; 2438 2724 memset(rsp_iov, 0, sizeof(rsp_iov)); 2439 2725 2726 + /* 2727 + * We can only call this for things we know are directories. 2728 + */ 2440 2729 if (!strcmp(path, "")) 2441 - open_cached_dir(xid, tcon, path, cifs_sb, &cfid); /* cfid null if open dir failed */ 2730 + open_cached_dir(xid, tcon, path, cifs_sb, false, 2731 + &cfid); /* cfid null if open dir failed */ 2442 2732 2443 2733 memset(&open_iov, 0, sizeof(open_iov)); 2444 2734 rqst[0].rq_iov = open_iov; ··· 2468 2750 if (cfid) { 2469 2751 rc = SMB2_query_info_init(tcon, server, 2470 2752 &rqst[1], 2471 - cfid->fid->persistent_fid, 2472 - cfid->fid->volatile_fid, 2753 + cfid->fid.persistent_fid, 2754 + cfid->fid.volatile_fid, 2473 2755 class, type, 0, 2474 2756 output_len, 0, 2475 2757 NULL);
+2 -1
fs/cifs/smb2pdu.c
··· 39 39 #ifdef CONFIG_CIFS_DFS_UPCALL 40 40 #include "dfs_cache.h" 41 41 #endif 42 + #include "cached_dir.h" 42 43 43 44 /* 44 45 * The following table defines the expected "StructureSize" of SMB2 requests ··· 1979 1978 } 1980 1979 spin_unlock(&ses->chan_lock); 1981 1980 1982 - close_cached_dir_lease(&tcon->crfid); 1981 + invalidate_all_cached_dirs(tcon); 1983 1982 1984 1983 rc = smb2_plain_req_init(SMB2_TREE_DISCONNECT, tcon, ses->server, 1985 1984 (void **) &req,
-10
fs/cifs/smb2proto.h
··· 54 54 extern int smb3_handle_read_data(struct TCP_Server_Info *server, 55 55 struct mid_q_entry *mid); 56 56 57 - extern int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon, 58 - const char *path, 59 - struct cifs_sb_info *cifs_sb, 60 - struct cached_fid **cfid); 61 - extern int open_cached_dir_by_dentry(struct cifs_tcon *tcon, 62 - struct dentry *dentry, 63 - struct cached_fid **cfid); 64 - extern void close_cached_dir(struct cached_fid *cfid); 65 - extern void close_cached_dir_lease(struct cached_fid *cfid); 66 - extern void close_cached_dir_lease_locked(struct cached_fid *cfid); 67 57 extern void move_smb2_info_to_cifs(FILE_ALL_INFO *dst, 68 58 struct smb2_file_all_info *src); 69 59 extern int smb2_query_reparse_tag(const unsigned int xid, struct cifs_tcon *tcon,