Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

smb: client: remove cfids_invalidation_worker

We can do the same cleanup on laundromat.

On invalidate_all_cached_dirs(), run laundromat worker with 0 timeout
and flush it for immediate + sync cleanup.

Signed-off-by: Enzo Matsumiya <ematsumiya@suse.de>
Signed-off-by: Steve French <stfrench@microsoft.com>

authored by

Enzo Matsumiya and committed by
Steve French
7ae6152b be3898a3

+9 -29
+9 -28
fs/smb/client/cached_dir.c
··· 562 562 563 563 /* 564 564 * Mark all the cfids as closed, and move them to the cfids->dying list. 565 - * They'll be cleaned up later by cfids_invalidation_worker. Take 566 - * a reference to each cfid during this process. 565 + * They'll be cleaned up by laundromat. Take a reference to each cfid 566 + * during this process. 567 567 */ 568 568 spin_lock(&cfids->cfid_list_lock); 569 569 list_for_each_entry_safe(cfid, q, &cfids->entries, entry) { ··· 580 580 } else 581 581 kref_get(&cfid->refcount); 582 582 } 583 - /* 584 - * Queue dropping of the dentries once locks have been dropped 585 - */ 586 - if (!list_empty(&cfids->dying)) 587 - queue_work(cfid_put_wq, &cfids->invalidation_work); 588 583 spin_unlock(&cfids->cfid_list_lock); 584 + 585 + /* run laundromat unconditionally now as there might have been previously queued work */ 586 + mod_delayed_work(cfid_put_wq, &cfids->laundromat_work, 0); 587 + flush_delayed_work(&cfids->laundromat_work); 589 588 } 590 589 591 590 static void ··· 714 715 kfree(cfid); 715 716 } 716 717 717 - static void cfids_invalidation_worker(struct work_struct *work) 718 - { 719 - struct cached_fids *cfids = container_of(work, struct cached_fids, 720 - invalidation_work); 721 - struct cached_fid *cfid, *q; 722 - LIST_HEAD(entry); 723 - 724 - spin_lock(&cfids->cfid_list_lock); 725 - /* move cfids->dying to the local list */ 726 - list_cut_before(&entry, &cfids->dying, &cfids->dying); 727 - spin_unlock(&cfids->cfid_list_lock); 728 - 729 - list_for_each_entry_safe(cfid, q, &entry, entry) { 730 - list_del(&cfid->entry); 731 - /* Drop the ref-count acquired in invalidate_all_cached_dirs */ 732 - kref_put(&cfid->refcount, smb2_close_cached_fid); 733 - } 734 - } 735 - 736 718 static void cfids_laundromat_worker(struct work_struct *work) 737 719 { 738 720 struct cached_fids *cfids; ··· 723 743 cfids = container_of(work, struct cached_fids, laundromat_work.work); 724 744 725 745 spin_lock(&cfids->cfid_list_lock); 746 + /* move cfids->dying to the local list */ 747 + list_cut_before(&entry, &cfids->dying, &cfids->dying); 748 + 726 749 list_for_each_entry_safe(cfid, q, &cfids->entries, entry) { 727 750 if (cfid->last_access_time && 728 751 time_after(jiffies, cfid->last_access_time + HZ * dir_cache_timeout)) { ··· 779 796 INIT_LIST_HEAD(&cfids->entries); 780 797 INIT_LIST_HEAD(&cfids->dying); 781 798 782 - INIT_WORK(&cfids->invalidation_work, cfids_invalidation_worker); 783 799 INIT_DELAYED_WORK(&cfids->laundromat_work, cfids_laundromat_worker); 784 800 queue_delayed_work(cfid_put_wq, &cfids->laundromat_work, 785 801 dir_cache_timeout * HZ); ··· 802 820 return; 803 821 804 822 cancel_delayed_work_sync(&cfids->laundromat_work); 805 - cancel_work_sync(&cfids->invalidation_work); 806 823 807 824 spin_lock(&cfids->cfid_list_lock); 808 825 list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
-1
fs/smb/client/cached_dir.h
··· 62 62 int num_entries; 63 63 struct list_head entries; 64 64 struct list_head dying; 65 - struct work_struct invalidation_work; 66 65 struct delayed_work laundromat_work; 67 66 /* aggregate accounting for all cached dirents under this tcon */ 68 67 atomic_long_t total_dirents_entries;