Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag '6.6-rc5-smb3-client-fixes' of git://git.samba.org/sfrench/cifs-2.6

Pull smb client fixes from Steve French:

- fix caching race with open_cached_dir and laundromat cleanup of
cached dirs (addresses a problem spotted with xfstest run with
directory leases enabled)

- reduce excessive resource usage of laundromat threads

* tag '6.6-rc5-smb3-client-fixes' of git://git.samba.org/sfrench/cifs-2.6:
smb: client: prevent new fids from being removed by laundromat
smb: client: make laundromat a delayed worker

+70 -75
+69 -74
fs/smb/client/cached_dir.c
··· 15 15 static struct cached_fid *init_cached_dir(const char *path); 16 16 static void free_cached_dir(struct cached_fid *cfid); 17 17 static void smb2_close_cached_fid(struct kref *ref); 18 + static void cfids_laundromat_worker(struct work_struct *work); 18 19 19 20 static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids, 20 21 const char *path, ··· 170 169 return -ENOENT; 171 170 } 172 171 /* 173 - * At this point we either have a lease already and we can just 174 - * return it. If not we are guaranteed to be the only thread accessing 175 - * this cfid. 172 + * Return cached fid if it has a lease. Otherwise, it is either a new 173 + * entry or laundromat worker removed it from @cfids->entries. Caller 174 + * will put last reference if the latter. 176 175 */ 176 + spin_lock(&cfids->cfid_list_lock); 177 177 if (cfid->has_lease) { 178 + spin_unlock(&cfids->cfid_list_lock); 178 179 *ret_cfid = cfid; 179 180 kfree(utf16_path); 180 181 return 0; 181 182 } 183 + spin_unlock(&cfids->cfid_list_lock); 182 184 183 185 /* 184 186 * Skip any prefix paths in @path as lookup_positive_unlocked() ends up ··· 298 294 goto oshr_free; 299 295 } 300 296 } 297 + spin_lock(&cfids->cfid_list_lock); 301 298 cfid->dentry = dentry; 302 299 cfid->time = jiffies; 303 300 cfid->has_lease = true; 301 + spin_unlock(&cfids->cfid_list_lock); 304 302 305 303 oshr_free: 306 304 kfree(utf16_path); ··· 311 305 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base); 312 306 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base); 313 307 spin_lock(&cfids->cfid_list_lock); 314 - if (rc && !cfid->has_lease) { 315 - if (cfid->on_list) { 316 - list_del(&cfid->entry); 317 - cfid->on_list = false; 318 - cfids->num_entries--; 308 + if (!cfid->has_lease) { 309 + if (rc) { 310 + if (cfid->on_list) { 311 + list_del(&cfid->entry); 312 + cfid->on_list = false; 313 + cfids->num_entries--; 314 + } 315 + rc = -ENOENT; 316 + } else { 317 + /* 318 + * We are guaranteed to have two references at this 319 + * point. One for the caller and one for a potential 320 + * lease. Release the Lease-ref so that the directory 321 + * will be closed when the caller closes the cached 322 + * handle. 323 + */ 324 + spin_unlock(&cfids->cfid_list_lock); 325 + kref_put(&cfid->refcount, smb2_close_cached_fid); 326 + goto out; 319 327 } 320 - rc = -ENOENT; 321 328 } 322 329 spin_unlock(&cfids->cfid_list_lock); 323 - if (!rc && !cfid->has_lease) { 324 - /* 325 - * We are guaranteed to have two references at this point. 326 - * One for the caller and one for a potential lease. 327 - * Release the Lease-ref so that the directory will be closed 328 - * when the caller closes the cached handle. 329 - */ 330 - kref_put(&cfid->refcount, smb2_close_cached_fid); 331 - } 332 330 if (rc) { 333 331 if (cfid->is_open) 334 332 SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid, ··· 340 330 free_cached_dir(cfid); 341 331 cfid = NULL; 342 332 } 343 - 333 + out: 344 334 if (rc == 0) { 345 335 *ret_cfid = cfid; 346 336 atomic_inc(&tcon->num_remote_opens); ··· 582 572 kfree(cfid); 583 573 } 584 574 585 - static int 586 - cifs_cfids_laundromat_thread(void *p) 575 + static void cfids_laundromat_worker(struct work_struct *work) 587 576 { 588 - struct cached_fids *cfids = p; 577 + struct cached_fids *cfids; 589 578 struct cached_fid *cfid, *q; 590 - struct list_head entry; 579 + LIST_HEAD(entry); 591 580 592 - while (!kthread_should_stop()) { 593 - ssleep(1); 594 - INIT_LIST_HEAD(&entry); 595 - if (kthread_should_stop()) 596 - return 0; 597 - spin_lock(&cfids->cfid_list_lock); 598 - list_for_each_entry_safe(cfid, q, &cfids->entries, entry) { 599 - if (time_after(jiffies, cfid->time + HZ * dir_cache_timeout)) { 600 - list_del(&cfid->entry); 601 - list_add(&cfid->entry, &entry); 602 - cfids->num_entries--; 603 - } 604 - } 605 - spin_unlock(&cfids->cfid_list_lock); 581 + cfids = container_of(work, struct cached_fids, laundromat_work.work); 606 582 607 - list_for_each_entry_safe(cfid, q, &entry, entry) { 583 + spin_lock(&cfids->cfid_list_lock); 584 + list_for_each_entry_safe(cfid, q, &cfids->entries, entry) { 585 + if (cfid->time && 586 + time_after(jiffies, cfid->time + HZ * dir_cache_timeout)) { 608 587 cfid->on_list = false; 609 - list_del(&cfid->entry); 610 - /* 611 - * Cancel, and wait for the work to finish in 612 - * case we are racing with it. 613 - */ 614 - cancel_work_sync(&cfid->lease_break); 615 - if (cfid->has_lease) { 616 - /* 617 - * We lease has not yet been cancelled from 618 - * the server so we need to drop the reference. 619 - */ 620 - spin_lock(&cfids->cfid_list_lock); 621 - cfid->has_lease = false; 622 - spin_unlock(&cfids->cfid_list_lock); 623 - kref_put(&cfid->refcount, smb2_close_cached_fid); 624 - } 588 + list_move(&cfid->entry, &entry); 589 + cfids->num_entries--; 590 + /* To prevent race with smb2_cached_lease_break() */ 591 + kref_get(&cfid->refcount); 625 592 } 626 593 } 594 + spin_unlock(&cfids->cfid_list_lock); 627 595 628 - return 0; 596 + list_for_each_entry_safe(cfid, q, &entry, entry) { 597 + list_del(&cfid->entry); 598 + /* 599 + * Cancel and wait for the work to finish in case we are racing 600 + * with it. 601 + */ 602 + cancel_work_sync(&cfid->lease_break); 603 + if (cfid->has_lease) { 604 + /* 605 + * Our lease has not yet been cancelled from the server 606 + * so we need to drop the reference. 607 + */ 608 + spin_lock(&cfids->cfid_list_lock); 609 + cfid->has_lease = false; 610 + spin_unlock(&cfids->cfid_list_lock); 611 + kref_put(&cfid->refcount, smb2_close_cached_fid); 612 + } 613 + /* Drop the extra reference opened above */ 614 + kref_put(&cfid->refcount, smb2_close_cached_fid); 615 + } 616 + queue_delayed_work(cifsiod_wq, &cfids->laundromat_work, 617 + dir_cache_timeout * HZ); 629 618 } 630 - 631 619 632 620 struct cached_fids *init_cached_dirs(void) 633 621 { ··· 637 629 spin_lock_init(&cfids->cfid_list_lock); 638 630 INIT_LIST_HEAD(&cfids->entries); 639 631 640 - /* 641 - * since we're in a cifs function already, we know that 642 - * this will succeed. No need for try_module_get(). 643 - */ 644 - __module_get(THIS_MODULE); 645 - cfids->laundromat = kthread_run(cifs_cfids_laundromat_thread, 646 - cfids, "cifsd-cfid-laundromat"); 647 - if (IS_ERR(cfids->laundromat)) { 648 - cifs_dbg(VFS, "Failed to start cfids laundromat thread.\n"); 649 - kfree(cfids); 650 - module_put(THIS_MODULE); 651 - return NULL; 652 - } 632 + INIT_DELAYED_WORK(&cfids->laundromat_work, cfids_laundromat_worker); 633 + queue_delayed_work(cifsiod_wq, &cfids->laundromat_work, 634 + dir_cache_timeout * HZ); 635 + 653 636 return cfids; 654 637 } 655 638 ··· 656 657 if (cfids == NULL) 657 658 return; 658 659 659 - if (cfids->laundromat) { 660 - kthread_stop(cfids->laundromat); 661 - cfids->laundromat = NULL; 662 - module_put(THIS_MODULE); 663 - } 660 + cancel_delayed_work_sync(&cfids->laundromat_work); 664 661 665 662 spin_lock(&cfids->cfid_list_lock); 666 663 list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
+1 -1
fs/smb/client/cached_dir.h
··· 57 57 spinlock_t cfid_list_lock; 58 58 int num_entries; 59 59 struct list_head entries; 60 - struct task_struct *laundromat; 60 + struct delayed_work laundromat_work; 61 61 }; 62 62 63 63 extern struct cached_fids *init_cached_dirs(void);