Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

cifs: enable caching of directories for which a lease is held

This expands the directory caching to now cache an open handle for all
directories (up to a maximum) and not just the root directory.

In this patch, locking and refcounting is intended to work as so:

The main function to get a reference to a cached handle is
find_or_create_cached_dir() called from open_cached_dir()
These functions are protected under the cfid_list_lock spin-lock
to make sure we do not race creating new references for cached dirs
with deletion of expired ones.

An successful open_cached_dir() will take out 2 references to the cfid if
this was the very first and successful call to open the directory and
it acquired a lease from the server.
One reference is for the lease and the other is for the cfid that we
return. The is lease reference is tracked by cfid->has_lease.
If the directory already has a handle with an active lease, then we just
take out one new reference for the cfid and return it.
It can happen that we have a thread that tries to open a cached directory
where we have a cfid already but we do not, yet, have a working lease. In
this case we will just return NULL, and this the caller will fall back to
the case when no handle was available.

In this model the total number of references we have on a cfid is
1 for while the handle is open and we have a lease, and one additional
reference for each open instance of a cfid.

Once we get a lease break (cached_dir_lease_break()) we remove the
cfid from the list under the spinlock. This prevents any new threads to
use it, and we also call smb2_cached_lease_break() via the work_queue
in order to drop the reference we got for the lease (we drop it outside
of the spin-lock.)
Anytime a thread calls close_cached_dir() we also drop a reference to the
cfid.
When the last reference to the cfid is released smb2_close_cached_fid()
will be invoked which will drop the reference ot the dentry we held for
this cfid and it will also, if we the handle is open/has a lease
also call SMB2_close() to close the handle on the server.

Two events require special handling:
invalidate_all_cached_dirs() this function is called from SMB2_tdis()
and cifs_mark_open_files_invalid().
In both cases the tcon is either gone already or will be shortly so
we do not need to actually close the handles. They will be dropped
server side as part of the tcon dropping.
But we have to be careful about a potential race with a concurrent
lease break so we need to take out additional refences to avoid the
cfid from being freed while we are still referencing it.

free_cached_dirs() which is called from tconInfoFree().
This is called quite late in the umount process so there should no longer
be any open handles or files and we can just free all the remaining data.

Signed-off-by: Ronnie Sahlberg <lsahlber@redhat.com>
Signed-off-by: Steve French <stfrench@microsoft.com>

authored by

Ronnie Sahlberg and committed by
Steve French
ebe98f14 9ee2afe5

+261 -191
+244 -180
fs/cifs/cached_dir.c
··· 11 11 #include "smb2proto.h" 12 12 #include "cached_dir.h" 13 13 14 - struct cached_fid *init_cached_dir(const char *path); 14 + static struct cached_fid *init_cached_dir(const char *path); 15 + static void free_cached_dir(struct cached_fid *cfid); 16 + 17 + static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids, 18 + const char *path, 19 + bool lookup_only) 20 + { 21 + struct cached_fid *cfid; 22 + 23 + spin_lock(&cfids->cfid_list_lock); 24 + list_for_each_entry(cfid, &cfids->entries, entry) { 25 + if (!strcmp(cfid->path, path)) { 26 + /* 27 + * If it doesn't have a lease it is either not yet 28 + * fully cached or it may be in the process of 29 + * being deleted due to a lease break. 30 + */ 31 + if (!cfid->has_lease) { 32 + spin_unlock(&cfids->cfid_list_lock); 33 + return NULL; 34 + } 35 + kref_get(&cfid->refcount); 36 + spin_unlock(&cfids->cfid_list_lock); 37 + return cfid; 38 + } 39 + } 40 + if (lookup_only) { 41 + spin_unlock(&cfids->cfid_list_lock); 42 + return NULL; 43 + } 44 + if (cfids->num_entries >= MAX_CACHED_FIDS) { 45 + spin_unlock(&cfids->cfid_list_lock); 46 + return NULL; 47 + } 48 + cfid = init_cached_dir(path); 49 + if (cfid == NULL) { 50 + spin_unlock(&cfids->cfid_list_lock); 51 + return NULL; 52 + } 53 + cfid->cfids = cfids; 54 + cfids->num_entries++; 55 + list_add(&cfid->entry, &cfids->entries); 56 + cfid->on_list = true; 57 + kref_get(&cfid->refcount); 58 + spin_unlock(&cfids->cfid_list_lock); 59 + return cfid; 60 + } 15 61 16 62 /* 17 63 * Open the and cache a directory handle. ··· 79 33 struct kvec open_iov[SMB2_CREATE_IOV_SIZE]; 80 34 struct kvec qi_iov[1]; 81 35 int rc, flags = 0; 82 - __le16 utf16_path = 0; /* Null - since an open of top of share */ 36 + __le16 *utf16_path = NULL; 83 37 u8 oplock = SMB2_OPLOCK_LEVEL_II; 84 38 struct cifs_fid *pfid; 85 - struct dentry *dentry; 39 + struct dentry *dentry = NULL; 86 40 struct cached_fid *cfid; 41 + struct cached_fids *cfids; 87 42 88 - if (tcon == NULL || tcon->nohandlecache || 43 + 44 + if (tcon == NULL || tcon->cfids == NULL || tcon->nohandlecache || 89 45 is_smb1_server(tcon->ses->server)) 90 46 return -EOPNOTSUPP; 91 47 92 48 ses = tcon->ses; 93 49 server = ses->server; 50 + cfids = tcon->cfids; 51 + 52 + if (!server->ops->new_lease_key) 53 + return -EIO; 94 54 95 55 if (cifs_sb->root == NULL) 96 56 return -ENOENT; 97 57 58 + /* 59 + * TODO: for better caching we need to find and use the dentry also 60 + * for non-root directories. 61 + */ 98 62 if (!path[0]) 99 63 dentry = cifs_sb->root; 100 - else 101 - return -ENOENT; 102 64 103 - cfid = tcon->cfids->cfid; 104 - if (cfid == NULL) { 105 - cfid = init_cached_dir(path); 106 - tcon->cfids->cfid = cfid; 107 - } 108 - if (cfid == NULL) 65 + utf16_path = cifs_convert_path_to_utf16(path, cifs_sb); 66 + if (!utf16_path) 109 67 return -ENOMEM; 110 68 111 - mutex_lock(&cfid->fid_mutex); 112 - if (cfid->is_valid) { 113 - cifs_dbg(FYI, "found a cached root file handle\n"); 69 + cfid = find_or_create_cached_dir(cfids, path, lookup_only); 70 + if (cfid == NULL) { 71 + kfree(utf16_path); 72 + return -ENOENT; 73 + } 74 + /* 75 + * At this point we either have a lease already and we can just 76 + * return it. If not we are guaranteed to be the only thread accessing 77 + * this cfid. 78 + */ 79 + if (cfid->has_lease) { 114 80 *ret_cfid = cfid; 115 - kref_get(&cfid->refcount); 116 - mutex_unlock(&cfid->fid_mutex); 81 + kfree(utf16_path); 117 82 return 0; 118 83 } 119 84 120 85 /* 121 86 * We do not hold the lock for the open because in case 122 - * SMB2_open needs to reconnect, it will end up calling 123 - * cifs_mark_open_files_invalid() which takes the lock again 124 - * thus causing a deadlock 87 + * SMB2_open needs to reconnect. 88 + * This is safe because no other thread will be able to get a ref 89 + * to the cfid until we have finished opening the file and (possibly) 90 + * acquired a lease. 125 91 */ 126 - mutex_unlock(&cfid->fid_mutex); 127 - 128 - if (lookup_only) 129 - return -ENOENT; 130 - 131 92 if (smb3_encryption_required(tcon)) 132 93 flags |= CIFS_TRANSFORM_REQ; 133 - 134 - if (!server->ops->new_lease_key) 135 - return -EIO; 136 94 137 95 pfid = &cfid->fid; 138 96 server->ops->new_lease_key(pfid); ··· 158 108 oparms.reconnect = false; 159 109 160 110 rc = SMB2_open_init(tcon, server, 161 - &rqst[0], &oplock, &oparms, &utf16_path); 111 + &rqst[0], &oplock, &oparms, utf16_path); 162 112 if (rc) 163 113 goto oshr_free; 164 114 smb2_set_next_command(tcon, &rqst[0]); ··· 181 131 rc = compound_send_recv(xid, ses, server, 182 132 flags, 2, rqst, 183 133 resp_buftype, rsp_iov); 184 - mutex_lock(&cfid->fid_mutex); 185 - 186 - /* 187 - * Now we need to check again as the cached root might have 188 - * been successfully re-opened from a concurrent process 189 - */ 190 - 191 - if (cfid->is_valid) { 192 - /* work was already done */ 193 - 194 - /* stash fids for close() later */ 195 - struct cifs_fid fid = { 196 - .persistent_fid = pfid->persistent_fid, 197 - .volatile_fid = pfid->volatile_fid, 198 - }; 199 - 200 - /* 201 - * caller expects this func to set the fid in cfid to valid 202 - * cached root, so increment the refcount. 203 - */ 204 - kref_get(&cfid->refcount); 205 - 206 - mutex_unlock(&cfid->fid_mutex); 207 - 208 - if (rc == 0) { 209 - /* close extra handle outside of crit sec */ 210 - SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid); 211 - } 212 - rc = 0; 213 - goto oshr_free; 214 - } 215 - 216 - /* Cached root is still invalid, continue normaly */ 217 - 218 134 if (rc) { 219 135 if (rc == -EREMCHG) { 220 136 tcon->need_reconnect = true; 221 137 pr_warn_once("server share %s deleted\n", 222 138 tcon->tree_name); 223 139 } 224 - goto oshr_exit; 140 + goto oshr_free; 225 141 } 226 142 227 143 atomic_inc(&tcon->num_remote_opens); ··· 200 184 #endif /* CIFS_DEBUG2 */ 201 185 202 186 cfid->tcon = tcon; 203 - cfid->is_valid = true; 204 - cfid->dentry = dentry; 205 - if (dentry) 187 + if (dentry) { 188 + cfid->dentry = dentry; 206 189 dget(dentry); 207 - kref_init(&cfid->refcount); 208 - 190 + } 209 191 /* BB TBD check to see if oplock level check can be removed below */ 210 - if (o_rsp->OplockLevel == SMB2_OPLOCK_LEVEL_LEASE) { 211 - /* 212 - * See commit 2f94a3125b87. Increment the refcount when we 213 - * get a lease for root, release it if lease break occurs 214 - */ 215 - kref_get(&cfid->refcount); 216 - cfid->has_lease = true; 217 - smb2_parse_contexts(server, o_rsp, 218 - &oparms.fid->epoch, 219 - oparms.fid->lease_key, &oplock, 220 - NULL, NULL); 221 - } else 222 - goto oshr_exit; 192 + if (o_rsp->OplockLevel != SMB2_OPLOCK_LEVEL_LEASE) 193 + goto oshr_free; 194 + 195 + 196 + smb2_parse_contexts(server, o_rsp, 197 + &oparms.fid->epoch, 198 + oparms.fid->lease_key, &oplock, 199 + NULL, NULL); 223 200 224 201 qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base; 225 202 if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info)) 226 - goto oshr_exit; 203 + goto oshr_free; 227 204 if (!smb2_validate_and_copy_iov( 228 205 le16_to_cpu(qi_rsp->OutputBufferOffset), 229 206 sizeof(struct smb2_file_all_info), 230 207 &rsp_iov[1], sizeof(struct smb2_file_all_info), 231 208 (char *)&cfid->file_all_info)) 232 209 cfid->file_all_info_is_valid = true; 233 - 234 210 cfid->time = jiffies; 211 + cfid->is_open = true; 212 + cfid->has_lease = true; 235 213 236 - oshr_exit: 237 - mutex_unlock(&cfid->fid_mutex); 238 214 oshr_free: 215 + kfree(utf16_path); 239 216 SMB2_open_free(&rqst[0]); 240 217 SMB2_query_info_free(&rqst[1]); 241 218 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base); 242 219 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base); 220 + spin_lock(&cfids->cfid_list_lock); 221 + if (!cfid->has_lease) { 222 + if (cfid->on_list) { 223 + list_del(&cfid->entry); 224 + cfid->on_list = false; 225 + cfids->num_entries--; 226 + } 227 + rc = -ENOENT; 228 + } 229 + spin_unlock(&cfids->cfid_list_lock); 230 + if (rc) { 231 + free_cached_dir(cfid); 232 + cfid = NULL; 233 + } 234 + 243 235 if (rc == 0) 244 236 *ret_cfid = cfid; 245 237 ··· 259 235 struct cached_fid **ret_cfid) 260 236 { 261 237 struct cached_fid *cfid; 238 + struct cached_fids *cfids = tcon->cfids; 262 239 263 - cfid = tcon->cfids->cfid; 264 - if (cfid == NULL) 240 + if (cfids == NULL) 265 241 return -ENOENT; 266 242 267 - mutex_lock(&cfid->fid_mutex); 268 - if (cfid->dentry == dentry) { 269 - cifs_dbg(FYI, "found a cached root file handle by dentry\n"); 270 - *ret_cfid = cfid; 271 - kref_get(&cfid->refcount); 272 - mutex_unlock(&cfid->fid_mutex); 273 - return 0; 243 + spin_lock(&cfids->cfid_list_lock); 244 + list_for_each_entry(cfid, &cfids->entries, entry) { 245 + if (dentry && cfid->dentry == dentry) { 246 + cifs_dbg(FYI, "found a cached root file handle by dentry\n"); 247 + kref_get(&cfid->refcount); 248 + *ret_cfid = cfid; 249 + spin_unlock(&cfids->cfid_list_lock); 250 + return 0; 251 + } 274 252 } 275 - mutex_unlock(&cfid->fid_mutex); 253 + spin_unlock(&cfids->cfid_list_lock); 276 254 return -ENOENT; 277 255 } 278 256 ··· 283 257 { 284 258 struct cached_fid *cfid = container_of(ref, struct cached_fid, 285 259 refcount); 286 - struct cached_dirent *dirent, *q; 287 260 288 - if (cfid->is_valid) { 289 - cifs_dbg(FYI, "clear cached root file handle\n"); 261 + spin_lock(&cfid->cfids->cfid_list_lock); 262 + if (cfid->on_list) { 263 + list_del(&cfid->entry); 264 + cfid->on_list = false; 265 + cfid->cfids->num_entries--; 266 + } 267 + spin_unlock(&cfid->cfids->cfid_list_lock); 268 + 269 + dput(cfid->dentry); 270 + cfid->dentry = NULL; 271 + 272 + if (cfid->is_open) { 290 273 SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid, 291 274 cfid->fid.volatile_fid); 292 275 } 293 276 294 - /* 295 - * We only check validity above to send SMB2_close, 296 - * but we still need to invalidate these entries 297 - * when this function is called 298 - */ 299 - cfid->is_valid = false; 300 - cfid->file_all_info_is_valid = false; 301 - cfid->has_lease = false; 302 - if (cfid->dentry) { 303 - dput(cfid->dentry); 304 - cfid->dentry = NULL; 305 - } 306 - /* 307 - * Delete all cached dirent names 308 - */ 309 - mutex_lock(&cfid->dirents.de_mutex); 310 - list_for_each_entry_safe(dirent, q, &cfid->dirents.entries, entry) { 311 - list_del(&dirent->entry); 312 - kfree(dirent->name); 313 - kfree(dirent); 314 - } 315 - cfid->dirents.is_valid = 0; 316 - cfid->dirents.is_failed = 0; 317 - cfid->dirents.ctx = NULL; 318 - cfid->dirents.pos = 0; 319 - mutex_unlock(&cfid->dirents.de_mutex); 320 - 277 + free_cached_dir(cfid); 321 278 } 322 279 323 280 void close_cached_dir(struct cached_fid *cfid) 324 281 { 325 - mutex_lock(&cfid->fid_mutex); 326 282 kref_put(&cfid->refcount, smb2_close_cached_fid); 327 - mutex_unlock(&cfid->fid_mutex); 328 - } 329 - 330 - void close_cached_dir_lease_locked(struct cached_fid *cfid) 331 - { 332 - if (cfid->has_lease) { 333 - cfid->has_lease = false; 334 - kref_put(&cfid->refcount, smb2_close_cached_fid); 335 - } 336 - } 337 - 338 - void close_cached_dir_lease(struct cached_fid *cfid) 339 - { 340 - mutex_lock(&cfid->fid_mutex); 341 - close_cached_dir_lease_locked(cfid); 342 - mutex_unlock(&cfid->fid_mutex); 343 283 } 344 284 345 285 /* ··· 318 326 struct cached_fid *cfid; 319 327 struct cifs_tcon *tcon; 320 328 struct tcon_link *tlink; 329 + struct cached_fids *cfids; 321 330 322 331 for (node = rb_first(root); node; node = rb_next(node)) { 323 332 tlink = rb_entry(node, struct tcon_link, tl_rbnode); 324 333 tcon = tlink_tcon(tlink); 325 334 if (IS_ERR(tcon)) 326 335 continue; 327 - cfid = tcon->cfids->cfid; 328 - if (cfid == NULL) 336 + cfids = tcon->cfids; 337 + if (cfids == NULL) 329 338 continue; 330 - mutex_lock(&cfid->fid_mutex); 331 - if (cfid->dentry) { 339 + list_for_each_entry(cfid, &cfids->entries, entry) { 332 340 dput(cfid->dentry); 333 341 cfid->dentry = NULL; 334 342 } 335 - mutex_unlock(&cfid->fid_mutex); 336 343 } 337 344 } 338 345 339 346 /* 340 - * Invalidate and close all cached dirs when a TCON has been reset 347 + * Invalidate all cached dirs when a TCON has been reset 341 348 * due to a session loss. 342 349 */ 343 350 void invalidate_all_cached_dirs(struct cifs_tcon *tcon) 344 351 { 345 - struct cached_fid *cfid = tcon->cfids->cfid; 352 + struct cached_fids *cfids = tcon->cfids; 353 + struct cached_fid *cfid, *q; 354 + struct list_head entry; 346 355 347 - if (cfid == NULL) 348 - return; 356 + INIT_LIST_HEAD(&entry); 357 + spin_lock(&cfids->cfid_list_lock); 358 + list_for_each_entry_safe(cfid, q, &cfids->entries, entry) { 359 + list_del(&cfid->entry); 360 + list_add(&cfid->entry, &entry); 361 + cfids->num_entries--; 362 + cfid->is_open = false; 363 + /* To prevent race with smb2_cached_lease_break() */ 364 + kref_get(&cfid->refcount); 365 + } 366 + spin_unlock(&cfids->cfid_list_lock); 349 367 350 - mutex_lock(&cfid->fid_mutex); 351 - cfid->is_valid = false; 352 - /* cached handle is not valid, so SMB2_CLOSE won't be sent below */ 353 - close_cached_dir_lease_locked(cfid); 354 - memset(&cfid->fid, 0, sizeof(struct cifs_fid)); 355 - mutex_unlock(&cfid->fid_mutex); 368 + list_for_each_entry_safe(cfid, q, &entry, entry) { 369 + cfid->on_list = false; 370 + list_del(&cfid->entry); 371 + cancel_work_sync(&cfid->lease_break); 372 + if (cfid->has_lease) { 373 + /* 374 + * We lease was never cancelled from the server so we 375 + * need to drop the reference. 376 + */ 377 + spin_lock(&cfids->cfid_list_lock); 378 + cfid->has_lease = false; 379 + spin_unlock(&cfids->cfid_list_lock); 380 + kref_put(&cfid->refcount, smb2_close_cached_fid); 381 + } 382 + /* Drop the extra reference opened above*/ 383 + kref_put(&cfid->refcount, smb2_close_cached_fid); 384 + } 356 385 } 357 386 358 387 static void ··· 382 369 struct cached_fid *cfid = container_of(work, 383 370 struct cached_fid, lease_break); 384 371 385 - close_cached_dir_lease(cfid); 372 + spin_lock(&cfid->cfids->cfid_list_lock); 373 + cfid->has_lease = false; 374 + spin_unlock(&cfid->cfids->cfid_list_lock); 375 + kref_put(&cfid->refcount, smb2_close_cached_fid); 386 376 } 387 377 388 378 int cached_dir_lease_break(struct cifs_tcon *tcon, __u8 lease_key[16]) 389 379 { 390 - struct cached_fid *cfid = tcon->cfids->cfid; 380 + struct cached_fids *cfids = tcon->cfids; 381 + struct cached_fid *cfid; 391 382 392 - if (cfid == NULL) 383 + if (cfids == NULL) 393 384 return false; 394 385 395 - if (cfid->is_valid && 396 - !memcmp(lease_key, 397 - cfid->fid.lease_key, 398 - SMB2_LEASE_KEY_SIZE)) { 399 - cfid->time = 0; 400 - INIT_WORK(&cfid->lease_break, 401 - smb2_cached_lease_break); 402 - queue_work(cifsiod_wq, 403 - &cfid->lease_break); 404 - return true; 386 + spin_lock(&cfids->cfid_list_lock); 387 + list_for_each_entry(cfid, &cfids->entries, entry) { 388 + if (cfid->has_lease && 389 + !memcmp(lease_key, 390 + cfid->fid.lease_key, 391 + SMB2_LEASE_KEY_SIZE)) { 392 + cfid->time = 0; 393 + /* 394 + * We found a lease remove it from the list 395 + * so no threads can access it. 396 + */ 397 + list_del(&cfid->entry); 398 + cfid->on_list = false; 399 + cfids->num_entries--; 400 + 401 + queue_work(cifsiod_wq, 402 + &cfid->lease_break); 403 + spin_unlock(&cfids->cfid_list_lock); 404 + return true; 405 + } 405 406 } 407 + spin_unlock(&cfids->cfid_list_lock); 406 408 return false; 407 409 } 408 410 409 - struct cached_fid *init_cached_dir(const char *path) 411 + static struct cached_fid *init_cached_dir(const char *path) 410 412 { 411 413 struct cached_fid *cfid; 412 414 413 - cfid = kzalloc(sizeof(*cfid), GFP_KERNEL); 415 + cfid = kzalloc(sizeof(*cfid), GFP_ATOMIC); 414 416 if (!cfid) 415 417 return NULL; 416 - cfid->path = kstrdup(path, GFP_KERNEL); 418 + cfid->path = kstrdup(path, GFP_ATOMIC); 417 419 if (!cfid->path) { 418 420 kfree(cfid); 419 421 return NULL; 420 422 } 421 423 424 + INIT_WORK(&cfid->lease_break, smb2_cached_lease_break); 425 + INIT_LIST_HEAD(&cfid->entry); 422 426 INIT_LIST_HEAD(&cfid->dirents.entries); 423 427 mutex_init(&cfid->dirents.de_mutex); 424 - mutex_init(&cfid->fid_mutex); 428 + spin_lock_init(&cfid->fid_lock); 429 + kref_init(&cfid->refcount); 425 430 return cfid; 426 431 } 427 432 428 - void free_cached_dir(struct cached_fid *cfid) 433 + static void free_cached_dir(struct cached_fid *cfid) 429 434 { 435 + struct cached_dirent *dirent, *q; 436 + 437 + dput(cfid->dentry); 438 + cfid->dentry = NULL; 439 + 440 + /* 441 + * Delete all cached dirent names 442 + */ 443 + list_for_each_entry_safe(dirent, q, &cfid->dirents.entries, entry) { 444 + list_del(&dirent->entry); 445 + kfree(dirent->name); 446 + kfree(dirent); 447 + } 448 + 430 449 kfree(cfid->path); 431 450 cfid->path = NULL; 432 451 kfree(cfid); ··· 471 426 cfids = kzalloc(sizeof(*cfids), GFP_KERNEL); 472 427 if (!cfids) 473 428 return NULL; 474 - mutex_init(&cfids->cfid_list_mutex); 429 + spin_lock_init(&cfids->cfid_list_lock); 430 + INIT_LIST_HEAD(&cfids->entries); 475 431 return cfids; 476 432 } 477 433 434 + /* 435 + * Called from tconInfoFree when we are tearing down the tcon. 436 + * There are no active users or open files/directories at this point. 437 + */ 478 438 void free_cached_dirs(struct cached_fids *cfids) 479 439 { 480 - if (cfids->cfid) { 481 - free_cached_dir(cfids->cfid); 482 - cfids->cfid = NULL; 440 + struct cached_fid *cfid, *q; 441 + struct list_head entry; 442 + 443 + INIT_LIST_HEAD(&entry); 444 + spin_lock(&cfids->cfid_list_lock); 445 + list_for_each_entry_safe(cfid, q, &cfids->entries, entry) { 446 + cfid->on_list = false; 447 + cfid->is_open = false; 448 + list_del(&cfid->entry); 449 + list_add(&cfid->entry, &entry); 483 450 } 451 + spin_unlock(&cfids->cfid_list_lock); 452 + 453 + list_for_each_entry_safe(cfid, q, &entry, entry) { 454 + list_del(&cfid->entry); 455 + free_cached_dir(cfid); 456 + } 457 + 484 458 kfree(cfids); 485 459 }
+13 -7
fs/cifs/cached_dir.h
··· 31 31 }; 32 32 33 33 struct cached_fid { 34 + struct list_head entry; 35 + struct cached_fids *cfids; 34 36 const char *path; 35 - bool is_valid:1; /* Do we have a useable root fid */ 36 - bool file_all_info_is_valid:1; 37 37 bool has_lease:1; 38 + bool is_open:1; 39 + bool on_list:1; 40 + bool file_all_info_is_valid:1; 38 41 unsigned long time; /* jiffies of when lease was taken */ 39 42 struct kref refcount; 40 43 struct cifs_fid fid; 41 - struct mutex fid_mutex; 44 + spinlock_t fid_lock; 42 45 struct cifs_tcon *tcon; 43 46 struct dentry *dentry; 44 47 struct work_struct lease_break; ··· 49 46 struct cached_dirents dirents; 50 47 }; 51 48 49 + #define MAX_CACHED_FIDS 16 52 50 struct cached_fids { 53 - struct mutex cfid_list_mutex; 54 - struct cached_fid *cfid; 51 + /* Must be held when: 52 + * - accessing the cfids->entries list 53 + */ 54 + spinlock_t cfid_list_lock; 55 + int num_entries; 56 + struct list_head entries; 55 57 }; 56 58 57 59 extern struct cached_fids *init_cached_dirs(void); ··· 69 61 struct dentry *dentry, 70 62 struct cached_fid **cfid); 71 63 extern void close_cached_dir(struct cached_fid *cfid); 72 - extern void close_cached_dir_lease(struct cached_fid *cfid); 73 - extern void close_cached_dir_lease_locked(struct cached_fid *cfid); 74 64 extern void close_all_cached_dirs(struct cifs_sb_info *cifs_sb); 75 65 extern void invalidate_all_cached_dirs(struct cifs_tcon *tcon); 76 66 extern int cached_dir_lease_break(struct cifs_tcon *tcon, __u8 lease_key[16]);
+3 -3
fs/cifs/inode.c
··· 2299 2299 return true; 2300 2300 2301 2301 if (!open_cached_dir_by_dentry(tcon, dentry->d_parent, &cfid)) { 2302 - mutex_lock(&cfid->fid_mutex); 2302 + spin_lock(&cfid->fid_lock); 2303 2303 if (cfid->time && cifs_i->time > cfid->time) { 2304 - mutex_unlock(&cfid->fid_mutex); 2304 + spin_unlock(&cfid->fid_lock); 2305 2305 close_cached_dir(cfid); 2306 2306 return false; 2307 2307 } 2308 - mutex_unlock(&cfid->fid_mutex); 2308 + spin_unlock(&cfid->fid_lock); 2309 2309 close_cached_dir(cfid); 2310 2310 } 2311 2311 /*
+1 -1
fs/cifs/smb2ops.c
··· 801 801 802 802 rc = open_cached_dir(xid, tcon, full_path, cifs_sb, true, &cfid); 803 803 if (!rc) { 804 - if (cfid->is_valid) { 804 + if (cfid->has_lease) { 805 805 close_cached_dir(cfid); 806 806 return 0; 807 807 }