at v5.7 1578 lines 35 kB view raw
1// SPDX-License-Identifier: GPL-2.0 2/* 3 * DFS referral cache routines 4 * 5 * Copyright (c) 2018-2019 Paulo Alcantara <palcantara@suse.de> 6 */ 7 8#include <linux/jhash.h> 9#include <linux/ktime.h> 10#include <linux/slab.h> 11#include <linux/proc_fs.h> 12#include <linux/nls.h> 13#include <linux/workqueue.h> 14#include "cifsglob.h" 15#include "smb2pdu.h" 16#include "smb2proto.h" 17#include "cifsproto.h" 18#include "cifs_debug.h" 19#include "cifs_unicode.h" 20#include "smb2glob.h" 21 22#include "dfs_cache.h" 23 24#define CACHE_HTABLE_SIZE 32 25#define CACHE_MAX_ENTRIES 64 26 27#define IS_INTERLINK_SET(v) ((v) & (DFSREF_REFERRAL_SERVER | \ 28 DFSREF_STORAGE_SERVER)) 29 30struct cache_dfs_tgt { 31 char *name; 32 struct list_head list; 33}; 34 35struct cache_entry { 36 struct hlist_node hlist; 37 const char *path; 38 int ttl; 39 int srvtype; 40 int flags; 41 struct timespec64 etime; 42 int path_consumed; 43 int numtgts; 44 struct list_head tlist; 45 struct cache_dfs_tgt *tgthint; 46}; 47 48struct vol_info { 49 char *fullpath; 50 spinlock_t smb_vol_lock; 51 struct smb_vol smb_vol; 52 char *mntdata; 53 struct list_head list; 54 struct list_head rlist; 55 struct kref refcnt; 56}; 57 58static struct kmem_cache *cache_slab __read_mostly; 59static struct workqueue_struct *dfscache_wq __read_mostly; 60 61static int cache_ttl; 62static DEFINE_SPINLOCK(cache_ttl_lock); 63 64static struct nls_table *cache_nlsc; 65 66/* 67 * Number of entries in the cache 68 */ 69static atomic_t cache_count; 70 71static struct hlist_head cache_htable[CACHE_HTABLE_SIZE]; 72static DECLARE_RWSEM(htable_rw_lock); 73 74static LIST_HEAD(vol_list); 75static DEFINE_SPINLOCK(vol_list_lock); 76 77static void refresh_cache_worker(struct work_struct *work); 78 79static DECLARE_DELAYED_WORK(refresh_task, refresh_cache_worker); 80 81static int get_normalized_path(const char *path, char **npath) 82{ 83 if (!path || strlen(path) < 3 || (*path != '\\' && *path != '/')) 84 return -EINVAL; 85 86 if (*path == '\\') { 87 *npath = (char *)path; 88 } else { 89 *npath = kstrndup(path, strlen(path), GFP_KERNEL); 90 if (!*npath) 91 return -ENOMEM; 92 convert_delimiter(*npath, '\\'); 93 } 94 return 0; 95} 96 97static inline void free_normalized_path(const char *path, char *npath) 98{ 99 if (path != npath) 100 kfree(npath); 101} 102 103static inline bool cache_entry_expired(const struct cache_entry *ce) 104{ 105 struct timespec64 ts; 106 107 ktime_get_coarse_real_ts64(&ts); 108 return timespec64_compare(&ts, &ce->etime) >= 0; 109} 110 111static inline void free_tgts(struct cache_entry *ce) 112{ 113 struct cache_dfs_tgt *t, *n; 114 115 list_for_each_entry_safe(t, n, &ce->tlist, list) { 116 list_del(&t->list); 117 kfree(t->name); 118 kfree(t); 119 } 120} 121 122static inline void flush_cache_ent(struct cache_entry *ce) 123{ 124 hlist_del_init(&ce->hlist); 125 kfree(ce->path); 126 free_tgts(ce); 127 atomic_dec(&cache_count); 128 kmem_cache_free(cache_slab, ce); 129} 130 131static void flush_cache_ents(void) 132{ 133 int i; 134 135 for (i = 0; i < CACHE_HTABLE_SIZE; i++) { 136 struct hlist_head *l = &cache_htable[i]; 137 struct hlist_node *n; 138 struct cache_entry *ce; 139 140 hlist_for_each_entry_safe(ce, n, l, hlist) { 141 if (!hlist_unhashed(&ce->hlist)) 142 flush_cache_ent(ce); 143 } 144 } 145} 146 147/* 148 * dfs cache /proc file 149 */ 150static int dfscache_proc_show(struct seq_file *m, void *v) 151{ 152 int i; 153 struct cache_entry *ce; 154 struct cache_dfs_tgt *t; 155 156 seq_puts(m, "DFS cache\n---------\n"); 157 158 down_read(&htable_rw_lock); 159 for (i = 0; i < CACHE_HTABLE_SIZE; i++) { 160 struct hlist_head *l = &cache_htable[i]; 161 162 hlist_for_each_entry(ce, l, hlist) { 163 if (hlist_unhashed(&ce->hlist)) 164 continue; 165 166 seq_printf(m, 167 "cache entry: path=%s,type=%s,ttl=%d,etime=%ld," 168 "interlink=%s,path_consumed=%d,expired=%s\n", 169 ce->path, 170 ce->srvtype == DFS_TYPE_ROOT ? "root" : "link", 171 ce->ttl, ce->etime.tv_nsec, 172 IS_INTERLINK_SET(ce->flags) ? "yes" : "no", 173 ce->path_consumed, 174 cache_entry_expired(ce) ? "yes" : "no"); 175 176 list_for_each_entry(t, &ce->tlist, list) { 177 seq_printf(m, " %s%s\n", 178 t->name, 179 ce->tgthint == t ? " (target hint)" : ""); 180 } 181 } 182 } 183 up_read(&htable_rw_lock); 184 185 return 0; 186} 187 188static ssize_t dfscache_proc_write(struct file *file, const char __user *buffer, 189 size_t count, loff_t *ppos) 190{ 191 char c; 192 int rc; 193 194 rc = get_user(c, buffer); 195 if (rc) 196 return rc; 197 198 if (c != '0') 199 return -EINVAL; 200 201 cifs_dbg(FYI, "clearing dfs cache"); 202 203 down_write(&htable_rw_lock); 204 flush_cache_ents(); 205 up_write(&htable_rw_lock); 206 207 return count; 208} 209 210static int dfscache_proc_open(struct inode *inode, struct file *file) 211{ 212 return single_open(file, dfscache_proc_show, NULL); 213} 214 215const struct proc_ops dfscache_proc_ops = { 216 .proc_open = dfscache_proc_open, 217 .proc_read = seq_read, 218 .proc_lseek = seq_lseek, 219 .proc_release = single_release, 220 .proc_write = dfscache_proc_write, 221}; 222 223#ifdef CONFIG_CIFS_DEBUG2 224static inline void dump_tgts(const struct cache_entry *ce) 225{ 226 struct cache_dfs_tgt *t; 227 228 cifs_dbg(FYI, "target list:\n"); 229 list_for_each_entry(t, &ce->tlist, list) { 230 cifs_dbg(FYI, " %s%s\n", t->name, 231 ce->tgthint == t ? " (target hint)" : ""); 232 } 233} 234 235static inline void dump_ce(const struct cache_entry *ce) 236{ 237 cifs_dbg(FYI, "cache entry: path=%s,type=%s,ttl=%d,etime=%ld," 238 "interlink=%s,path_consumed=%d,expired=%s\n", ce->path, 239 ce->srvtype == DFS_TYPE_ROOT ? "root" : "link", ce->ttl, 240 ce->etime.tv_nsec, 241 IS_INTERLINK_SET(ce->flags) ? "yes" : "no", 242 ce->path_consumed, 243 cache_entry_expired(ce) ? "yes" : "no"); 244 dump_tgts(ce); 245} 246 247static inline void dump_refs(const struct dfs_info3_param *refs, int numrefs) 248{ 249 int i; 250 251 cifs_dbg(FYI, "DFS referrals returned by the server:\n"); 252 for (i = 0; i < numrefs; i++) { 253 const struct dfs_info3_param *ref = &refs[i]; 254 255 cifs_dbg(FYI, 256 "\n" 257 "flags: 0x%x\n" 258 "path_consumed: %d\n" 259 "server_type: 0x%x\n" 260 "ref_flag: 0x%x\n" 261 "path_name: %s\n" 262 "node_name: %s\n" 263 "ttl: %d (%dm)\n", 264 ref->flags, ref->path_consumed, ref->server_type, 265 ref->ref_flag, ref->path_name, ref->node_name, 266 ref->ttl, ref->ttl / 60); 267 } 268} 269#else 270#define dump_tgts(e) 271#define dump_ce(e) 272#define dump_refs(r, n) 273#endif 274 275/** 276 * dfs_cache_init - Initialize DFS referral cache. 277 * 278 * Return zero if initialized successfully, otherwise non-zero. 279 */ 280int dfs_cache_init(void) 281{ 282 int rc; 283 int i; 284 285 dfscache_wq = alloc_workqueue("cifs-dfscache", 286 WQ_FREEZABLE | WQ_MEM_RECLAIM, 1); 287 if (!dfscache_wq) 288 return -ENOMEM; 289 290 cache_slab = kmem_cache_create("cifs_dfs_cache", 291 sizeof(struct cache_entry), 0, 292 SLAB_HWCACHE_ALIGN, NULL); 293 if (!cache_slab) { 294 rc = -ENOMEM; 295 goto out_destroy_wq; 296 } 297 298 for (i = 0; i < CACHE_HTABLE_SIZE; i++) 299 INIT_HLIST_HEAD(&cache_htable[i]); 300 301 atomic_set(&cache_count, 0); 302 cache_nlsc = load_nls_default(); 303 304 cifs_dbg(FYI, "%s: initialized DFS referral cache\n", __func__); 305 return 0; 306 307out_destroy_wq: 308 destroy_workqueue(dfscache_wq); 309 return rc; 310} 311 312static inline unsigned int cache_entry_hash(const void *data, int size) 313{ 314 unsigned int h; 315 316 h = jhash(data, size, 0); 317 return h & (CACHE_HTABLE_SIZE - 1); 318} 319 320/* Check whether second path component of @path is SYSVOL or NETLOGON */ 321static inline bool is_sysvol_or_netlogon(const char *path) 322{ 323 const char *s; 324 char sep = path[0]; 325 326 s = strchr(path + 1, sep) + 1; 327 return !strncasecmp(s, "sysvol", strlen("sysvol")) || 328 !strncasecmp(s, "netlogon", strlen("netlogon")); 329} 330 331/* Return target hint of a DFS cache entry */ 332static inline char *get_tgt_name(const struct cache_entry *ce) 333{ 334 struct cache_dfs_tgt *t = ce->tgthint; 335 336 return t ? t->name : ERR_PTR(-ENOENT); 337} 338 339/* Return expire time out of a new entry's TTL */ 340static inline struct timespec64 get_expire_time(int ttl) 341{ 342 struct timespec64 ts = { 343 .tv_sec = ttl, 344 .tv_nsec = 0, 345 }; 346 struct timespec64 now; 347 348 ktime_get_coarse_real_ts64(&now); 349 return timespec64_add(now, ts); 350} 351 352/* Allocate a new DFS target */ 353static struct cache_dfs_tgt *alloc_target(const char *name) 354{ 355 struct cache_dfs_tgt *t; 356 357 t = kmalloc(sizeof(*t), GFP_ATOMIC); 358 if (!t) 359 return ERR_PTR(-ENOMEM); 360 t->name = kstrndup(name, strlen(name), GFP_ATOMIC); 361 if (!t->name) { 362 kfree(t); 363 return ERR_PTR(-ENOMEM); 364 } 365 INIT_LIST_HEAD(&t->list); 366 return t; 367} 368 369/* 370 * Copy DFS referral information to a cache entry and conditionally update 371 * target hint. 372 */ 373static int copy_ref_data(const struct dfs_info3_param *refs, int numrefs, 374 struct cache_entry *ce, const char *tgthint) 375{ 376 int i; 377 378 ce->ttl = refs[0].ttl; 379 ce->etime = get_expire_time(ce->ttl); 380 ce->srvtype = refs[0].server_type; 381 ce->flags = refs[0].ref_flag; 382 ce->path_consumed = refs[0].path_consumed; 383 384 for (i = 0; i < numrefs; i++) { 385 struct cache_dfs_tgt *t; 386 387 t = alloc_target(refs[i].node_name); 388 if (IS_ERR(t)) { 389 free_tgts(ce); 390 return PTR_ERR(t); 391 } 392 if (tgthint && !strcasecmp(t->name, tgthint)) { 393 list_add(&t->list, &ce->tlist); 394 tgthint = NULL; 395 } else { 396 list_add_tail(&t->list, &ce->tlist); 397 } 398 ce->numtgts++; 399 } 400 401 ce->tgthint = list_first_entry_or_null(&ce->tlist, 402 struct cache_dfs_tgt, list); 403 404 return 0; 405} 406 407/* Allocate a new cache entry */ 408static struct cache_entry *alloc_cache_entry(const char *path, 409 const struct dfs_info3_param *refs, 410 int numrefs) 411{ 412 struct cache_entry *ce; 413 int rc; 414 415 ce = kmem_cache_zalloc(cache_slab, GFP_KERNEL); 416 if (!ce) 417 return ERR_PTR(-ENOMEM); 418 419 ce->path = kstrndup(path, strlen(path), GFP_KERNEL); 420 if (!ce->path) { 421 kmem_cache_free(cache_slab, ce); 422 return ERR_PTR(-ENOMEM); 423 } 424 INIT_HLIST_NODE(&ce->hlist); 425 INIT_LIST_HEAD(&ce->tlist); 426 427 rc = copy_ref_data(refs, numrefs, ce, NULL); 428 if (rc) { 429 kfree(ce->path); 430 kmem_cache_free(cache_slab, ce); 431 ce = ERR_PTR(rc); 432 } 433 return ce; 434} 435 436/* Must be called with htable_rw_lock held */ 437static void remove_oldest_entry(void) 438{ 439 int i; 440 struct cache_entry *ce; 441 struct cache_entry *to_del = NULL; 442 443 for (i = 0; i < CACHE_HTABLE_SIZE; i++) { 444 struct hlist_head *l = &cache_htable[i]; 445 446 hlist_for_each_entry(ce, l, hlist) { 447 if (hlist_unhashed(&ce->hlist)) 448 continue; 449 if (!to_del || timespec64_compare(&ce->etime, 450 &to_del->etime) < 0) 451 to_del = ce; 452 } 453 } 454 455 if (!to_del) { 456 cifs_dbg(FYI, "%s: no entry to remove", __func__); 457 return; 458 } 459 460 cifs_dbg(FYI, "%s: removing entry", __func__); 461 dump_ce(to_del); 462 flush_cache_ent(to_del); 463} 464 465/* Add a new DFS cache entry */ 466static int add_cache_entry(const char *path, unsigned int hash, 467 struct dfs_info3_param *refs, int numrefs) 468{ 469 struct cache_entry *ce; 470 471 ce = alloc_cache_entry(path, refs, numrefs); 472 if (IS_ERR(ce)) 473 return PTR_ERR(ce); 474 475 spin_lock(&cache_ttl_lock); 476 if (!cache_ttl) { 477 cache_ttl = ce->ttl; 478 queue_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ); 479 } else { 480 cache_ttl = min_t(int, cache_ttl, ce->ttl); 481 mod_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ); 482 } 483 spin_unlock(&cache_ttl_lock); 484 485 down_write(&htable_rw_lock); 486 hlist_add_head(&ce->hlist, &cache_htable[hash]); 487 dump_ce(ce); 488 up_write(&htable_rw_lock); 489 490 return 0; 491} 492 493/* 494 * Find a DFS cache entry in hash table and optionally check prefix path against 495 * @path. 496 * Use whole path components in the match. 497 * Must be called with htable_rw_lock held. 498 * 499 * Return ERR_PTR(-ENOENT) if the entry is not found. 500 */ 501static struct cache_entry *lookup_cache_entry(const char *path, 502 unsigned int *hash) 503{ 504 struct cache_entry *ce; 505 unsigned int h; 506 bool found = false; 507 508 h = cache_entry_hash(path, strlen(path)); 509 510 hlist_for_each_entry(ce, &cache_htable[h], hlist) { 511 if (!strcasecmp(path, ce->path)) { 512 found = true; 513 dump_ce(ce); 514 break; 515 } 516 } 517 518 if (!found) 519 ce = ERR_PTR(-ENOENT); 520 if (hash) 521 *hash = h; 522 523 return ce; 524} 525 526static void __vol_release(struct vol_info *vi) 527{ 528 kfree(vi->fullpath); 529 kfree(vi->mntdata); 530 cifs_cleanup_volume_info_contents(&vi->smb_vol); 531 kfree(vi); 532} 533 534static void vol_release(struct kref *kref) 535{ 536 struct vol_info *vi = container_of(kref, struct vol_info, refcnt); 537 538 spin_lock(&vol_list_lock); 539 list_del(&vi->list); 540 spin_unlock(&vol_list_lock); 541 __vol_release(vi); 542} 543 544static inline void free_vol_list(void) 545{ 546 struct vol_info *vi, *nvi; 547 548 list_for_each_entry_safe(vi, nvi, &vol_list, list) { 549 list_del_init(&vi->list); 550 __vol_release(vi); 551 } 552} 553 554/** 555 * dfs_cache_destroy - destroy DFS referral cache 556 */ 557void dfs_cache_destroy(void) 558{ 559 cancel_delayed_work_sync(&refresh_task); 560 unload_nls(cache_nlsc); 561 free_vol_list(); 562 flush_cache_ents(); 563 kmem_cache_destroy(cache_slab); 564 destroy_workqueue(dfscache_wq); 565 566 cifs_dbg(FYI, "%s: destroyed DFS referral cache\n", __func__); 567} 568 569/* Must be called with htable_rw_lock held */ 570static int __update_cache_entry(const char *path, 571 const struct dfs_info3_param *refs, 572 int numrefs) 573{ 574 int rc; 575 struct cache_entry *ce; 576 char *s, *th = NULL; 577 578 ce = lookup_cache_entry(path, NULL); 579 if (IS_ERR(ce)) 580 return PTR_ERR(ce); 581 582 if (ce->tgthint) { 583 s = ce->tgthint->name; 584 th = kstrndup(s, strlen(s), GFP_ATOMIC); 585 if (!th) 586 return -ENOMEM; 587 } 588 589 free_tgts(ce); 590 ce->numtgts = 0; 591 592 rc = copy_ref_data(refs, numrefs, ce, th); 593 594 kfree(th); 595 596 return rc; 597} 598 599static int get_dfs_referral(const unsigned int xid, struct cifs_ses *ses, 600 const struct nls_table *nls_codepage, int remap, 601 const char *path, struct dfs_info3_param **refs, 602 int *numrefs) 603{ 604 cifs_dbg(FYI, "%s: get an DFS referral for %s\n", __func__, path); 605 606 if (!ses || !ses->server || !ses->server->ops->get_dfs_refer) 607 return -EOPNOTSUPP; 608 if (unlikely(!nls_codepage)) 609 return -EINVAL; 610 611 *refs = NULL; 612 *numrefs = 0; 613 614 return ses->server->ops->get_dfs_refer(xid, ses, path, refs, numrefs, 615 nls_codepage, remap); 616} 617 618/* Update an expired cache entry by getting a new DFS referral from server */ 619static int update_cache_entry(const char *path, 620 const struct dfs_info3_param *refs, 621 int numrefs) 622{ 623 624 int rc; 625 626 down_write(&htable_rw_lock); 627 rc = __update_cache_entry(path, refs, numrefs); 628 up_write(&htable_rw_lock); 629 630 return rc; 631} 632 633/* 634 * Find, create or update a DFS cache entry. 635 * 636 * If the entry wasn't found, it will create a new one. Or if it was found but 637 * expired, then it will update the entry accordingly. 638 * 639 * For interlinks, __cifs_dfs_mount() and expand_dfs_referral() are supposed to 640 * handle them properly. 641 */ 642static int __dfs_cache_find(const unsigned int xid, struct cifs_ses *ses, 643 const struct nls_table *nls_codepage, int remap, 644 const char *path, bool noreq) 645{ 646 int rc; 647 unsigned int hash; 648 struct cache_entry *ce; 649 struct dfs_info3_param *refs = NULL; 650 int numrefs = 0; 651 bool newent = false; 652 653 cifs_dbg(FYI, "%s: search path: %s\n", __func__, path); 654 655 down_read(&htable_rw_lock); 656 657 ce = lookup_cache_entry(path, &hash); 658 659 /* 660 * If @noreq is set, no requests will be sent to the server. Just return 661 * the cache entry. 662 */ 663 if (noreq) { 664 up_read(&htable_rw_lock); 665 return PTR_ERR_OR_ZERO(ce); 666 } 667 668 if (!IS_ERR(ce)) { 669 if (!cache_entry_expired(ce)) { 670 dump_ce(ce); 671 up_read(&htable_rw_lock); 672 return 0; 673 } 674 } else { 675 newent = true; 676 } 677 678 up_read(&htable_rw_lock); 679 680 /* 681 * No entry was found. 682 * 683 * Request a new DFS referral in order to create a new cache entry, or 684 * updating an existing one. 685 */ 686 rc = get_dfs_referral(xid, ses, nls_codepage, remap, path, 687 &refs, &numrefs); 688 if (rc) 689 return rc; 690 691 dump_refs(refs, numrefs); 692 693 if (!newent) { 694 rc = update_cache_entry(path, refs, numrefs); 695 goto out_free_refs; 696 } 697 698 if (atomic_read(&cache_count) >= CACHE_MAX_ENTRIES) { 699 cifs_dbg(FYI, "%s: reached max cache size (%d)", __func__, 700 CACHE_MAX_ENTRIES); 701 down_write(&htable_rw_lock); 702 remove_oldest_entry(); 703 up_write(&htable_rw_lock); 704 } 705 706 rc = add_cache_entry(path, hash, refs, numrefs); 707 if (!rc) 708 atomic_inc(&cache_count); 709 710out_free_refs: 711 free_dfs_info_array(refs, numrefs); 712 return rc; 713} 714 715/* 716 * Set up a DFS referral from a given cache entry. 717 * 718 * Must be called with htable_rw_lock held. 719 */ 720static int setup_referral(const char *path, struct cache_entry *ce, 721 struct dfs_info3_param *ref, const char *target) 722{ 723 int rc; 724 725 cifs_dbg(FYI, "%s: set up new ref\n", __func__); 726 727 memset(ref, 0, sizeof(*ref)); 728 729 ref->path_name = kstrndup(path, strlen(path), GFP_ATOMIC); 730 if (!ref->path_name) 731 return -ENOMEM; 732 733 ref->node_name = kstrndup(target, strlen(target), GFP_ATOMIC); 734 if (!ref->node_name) { 735 rc = -ENOMEM; 736 goto err_free_path; 737 } 738 739 ref->path_consumed = ce->path_consumed; 740 ref->ttl = ce->ttl; 741 ref->server_type = ce->srvtype; 742 ref->ref_flag = ce->flags; 743 744 return 0; 745 746err_free_path: 747 kfree(ref->path_name); 748 ref->path_name = NULL; 749 return rc; 750} 751 752/* Return target list of a DFS cache entry */ 753static int get_targets(struct cache_entry *ce, struct dfs_cache_tgt_list *tl) 754{ 755 int rc; 756 struct list_head *head = &tl->tl_list; 757 struct cache_dfs_tgt *t; 758 struct dfs_cache_tgt_iterator *it, *nit; 759 760 memset(tl, 0, sizeof(*tl)); 761 INIT_LIST_HEAD(head); 762 763 list_for_each_entry(t, &ce->tlist, list) { 764 it = kzalloc(sizeof(*it), GFP_ATOMIC); 765 if (!it) { 766 rc = -ENOMEM; 767 goto err_free_it; 768 } 769 770 it->it_name = kstrndup(t->name, strlen(t->name), GFP_ATOMIC); 771 if (!it->it_name) { 772 kfree(it); 773 rc = -ENOMEM; 774 goto err_free_it; 775 } 776 777 if (ce->tgthint == t) 778 list_add(&it->it_list, head); 779 else 780 list_add_tail(&it->it_list, head); 781 } 782 783 tl->tl_numtgts = ce->numtgts; 784 785 return 0; 786 787err_free_it: 788 list_for_each_entry_safe(it, nit, head, it_list) { 789 kfree(it->it_name); 790 kfree(it); 791 } 792 return rc; 793} 794 795/** 796 * dfs_cache_find - find a DFS cache entry 797 * 798 * If it doesn't find the cache entry, then it will get a DFS referral 799 * for @path and create a new entry. 800 * 801 * In case the cache entry exists but expired, it will get a DFS referral 802 * for @path and then update the respective cache entry. 803 * 804 * These parameters are passed down to the get_dfs_refer() call if it 805 * needs to be issued: 806 * @xid: syscall xid 807 * @ses: smb session to issue the request on 808 * @nls_codepage: charset conversion 809 * @remap: path character remapping type 810 * @path: path to lookup in DFS referral cache. 811 * 812 * @ref: when non-NULL, store single DFS referral result in it. 813 * @tgt_list: when non-NULL, store complete DFS target list in it. 814 * 815 * Return zero if the target was found, otherwise non-zero. 816 */ 817int dfs_cache_find(const unsigned int xid, struct cifs_ses *ses, 818 const struct nls_table *nls_codepage, int remap, 819 const char *path, struct dfs_info3_param *ref, 820 struct dfs_cache_tgt_list *tgt_list) 821{ 822 int rc; 823 char *npath; 824 struct cache_entry *ce; 825 826 rc = get_normalized_path(path, &npath); 827 if (rc) 828 return rc; 829 830 rc = __dfs_cache_find(xid, ses, nls_codepage, remap, npath, false); 831 if (rc) 832 goto out_free_path; 833 834 down_read(&htable_rw_lock); 835 836 ce = lookup_cache_entry(npath, NULL); 837 if (IS_ERR(ce)) { 838 up_read(&htable_rw_lock); 839 rc = PTR_ERR(ce); 840 goto out_free_path; 841 } 842 843 if (ref) 844 rc = setup_referral(path, ce, ref, get_tgt_name(ce)); 845 else 846 rc = 0; 847 if (!rc && tgt_list) 848 rc = get_targets(ce, tgt_list); 849 850 up_read(&htable_rw_lock); 851 852out_free_path: 853 free_normalized_path(path, npath); 854 return rc; 855} 856 857/** 858 * dfs_cache_noreq_find - find a DFS cache entry without sending any requests to 859 * the currently connected server. 860 * 861 * NOTE: This function will neither update a cache entry in case it was 862 * expired, nor create a new cache entry if @path hasn't been found. It heavily 863 * relies on an existing cache entry. 864 * 865 * @path: path to lookup in the DFS referral cache. 866 * @ref: when non-NULL, store single DFS referral result in it. 867 * @tgt_list: when non-NULL, store complete DFS target list in it. 868 * 869 * Return 0 if successful. 870 * Return -ENOENT if the entry was not found. 871 * Return non-zero for other errors. 872 */ 873int dfs_cache_noreq_find(const char *path, struct dfs_info3_param *ref, 874 struct dfs_cache_tgt_list *tgt_list) 875{ 876 int rc; 877 char *npath; 878 struct cache_entry *ce; 879 880 rc = get_normalized_path(path, &npath); 881 if (rc) 882 return rc; 883 884 cifs_dbg(FYI, "%s: path: %s\n", __func__, npath); 885 886 down_read(&htable_rw_lock); 887 888 ce = lookup_cache_entry(npath, NULL); 889 if (IS_ERR(ce)) { 890 rc = PTR_ERR(ce); 891 goto out_unlock; 892 } 893 894 if (ref) 895 rc = setup_referral(path, ce, ref, get_tgt_name(ce)); 896 else 897 rc = 0; 898 if (!rc && tgt_list) 899 rc = get_targets(ce, tgt_list); 900 901out_unlock: 902 up_read(&htable_rw_lock); 903 free_normalized_path(path, npath); 904 905 return rc; 906} 907 908/** 909 * dfs_cache_update_tgthint - update target hint of a DFS cache entry 910 * 911 * If it doesn't find the cache entry, then it will get a DFS referral for @path 912 * and create a new entry. 913 * 914 * In case the cache entry exists but expired, it will get a DFS referral 915 * for @path and then update the respective cache entry. 916 * 917 * @xid: syscall id 918 * @ses: smb session 919 * @nls_codepage: charset conversion 920 * @remap: type of character remapping for paths 921 * @path: path to lookup in DFS referral cache. 922 * @it: DFS target iterator 923 * 924 * Return zero if the target hint was updated successfully, otherwise non-zero. 925 */ 926int dfs_cache_update_tgthint(const unsigned int xid, struct cifs_ses *ses, 927 const struct nls_table *nls_codepage, int remap, 928 const char *path, 929 const struct dfs_cache_tgt_iterator *it) 930{ 931 int rc; 932 char *npath; 933 struct cache_entry *ce; 934 struct cache_dfs_tgt *t; 935 936 rc = get_normalized_path(path, &npath); 937 if (rc) 938 return rc; 939 940 cifs_dbg(FYI, "%s: update target hint - path: %s\n", __func__, npath); 941 942 rc = __dfs_cache_find(xid, ses, nls_codepage, remap, npath, false); 943 if (rc) 944 goto out_free_path; 945 946 down_write(&htable_rw_lock); 947 948 ce = lookup_cache_entry(npath, NULL); 949 if (IS_ERR(ce)) { 950 rc = PTR_ERR(ce); 951 goto out_unlock; 952 } 953 954 t = ce->tgthint; 955 956 if (likely(!strcasecmp(it->it_name, t->name))) 957 goto out_unlock; 958 959 list_for_each_entry(t, &ce->tlist, list) { 960 if (!strcasecmp(t->name, it->it_name)) { 961 ce->tgthint = t; 962 cifs_dbg(FYI, "%s: new target hint: %s\n", __func__, 963 it->it_name); 964 break; 965 } 966 } 967 968out_unlock: 969 up_write(&htable_rw_lock); 970out_free_path: 971 free_normalized_path(path, npath); 972 973 return rc; 974} 975 976/** 977 * dfs_cache_noreq_update_tgthint - update target hint of a DFS cache entry 978 * without sending any requests to the currently connected server. 979 * 980 * NOTE: This function will neither update a cache entry in case it was 981 * expired, nor create a new cache entry if @path hasn't been found. It heavily 982 * relies on an existing cache entry. 983 * 984 * @path: path to lookup in DFS referral cache. 985 * @it: target iterator which contains the target hint to update the cache 986 * entry with. 987 * 988 * Return zero if the target hint was updated successfully, otherwise non-zero. 989 */ 990int dfs_cache_noreq_update_tgthint(const char *path, 991 const struct dfs_cache_tgt_iterator *it) 992{ 993 int rc; 994 char *npath; 995 struct cache_entry *ce; 996 struct cache_dfs_tgt *t; 997 998 if (!it) 999 return -EINVAL; 1000 1001 rc = get_normalized_path(path, &npath); 1002 if (rc) 1003 return rc; 1004 1005 cifs_dbg(FYI, "%s: path: %s\n", __func__, npath); 1006 1007 down_write(&htable_rw_lock); 1008 1009 ce = lookup_cache_entry(npath, NULL); 1010 if (IS_ERR(ce)) { 1011 rc = PTR_ERR(ce); 1012 goto out_unlock; 1013 } 1014 1015 rc = 0; 1016 t = ce->tgthint; 1017 1018 if (unlikely(!strcasecmp(it->it_name, t->name))) 1019 goto out_unlock; 1020 1021 list_for_each_entry(t, &ce->tlist, list) { 1022 if (!strcasecmp(t->name, it->it_name)) { 1023 ce->tgthint = t; 1024 cifs_dbg(FYI, "%s: new target hint: %s\n", __func__, 1025 it->it_name); 1026 break; 1027 } 1028 } 1029 1030out_unlock: 1031 up_write(&htable_rw_lock); 1032 free_normalized_path(path, npath); 1033 1034 return rc; 1035} 1036 1037/** 1038 * dfs_cache_get_tgt_referral - returns a DFS referral (@ref) from a given 1039 * target iterator (@it). 1040 * 1041 * @path: path to lookup in DFS referral cache. 1042 * @it: DFS target iterator. 1043 * @ref: DFS referral pointer to set up the gathered information. 1044 * 1045 * Return zero if the DFS referral was set up correctly, otherwise non-zero. 1046 */ 1047int dfs_cache_get_tgt_referral(const char *path, 1048 const struct dfs_cache_tgt_iterator *it, 1049 struct dfs_info3_param *ref) 1050{ 1051 int rc; 1052 char *npath; 1053 struct cache_entry *ce; 1054 1055 if (!it || !ref) 1056 return -EINVAL; 1057 1058 rc = get_normalized_path(path, &npath); 1059 if (rc) 1060 return rc; 1061 1062 cifs_dbg(FYI, "%s: path: %s\n", __func__, npath); 1063 1064 down_read(&htable_rw_lock); 1065 1066 ce = lookup_cache_entry(npath, NULL); 1067 if (IS_ERR(ce)) { 1068 rc = PTR_ERR(ce); 1069 goto out_unlock; 1070 } 1071 1072 cifs_dbg(FYI, "%s: target name: %s\n", __func__, it->it_name); 1073 1074 rc = setup_referral(path, ce, ref, it->it_name); 1075 1076out_unlock: 1077 up_read(&htable_rw_lock); 1078 free_normalized_path(path, npath); 1079 1080 return rc; 1081} 1082 1083static int dup_vol(struct smb_vol *vol, struct smb_vol *new) 1084{ 1085 memcpy(new, vol, sizeof(*new)); 1086 1087 if (vol->username) { 1088 new->username = kstrndup(vol->username, strlen(vol->username), 1089 GFP_KERNEL); 1090 if (!new->username) 1091 return -ENOMEM; 1092 } 1093 if (vol->password) { 1094 new->password = kstrndup(vol->password, strlen(vol->password), 1095 GFP_KERNEL); 1096 if (!new->password) 1097 goto err_free_username; 1098 } 1099 if (vol->UNC) { 1100 cifs_dbg(FYI, "%s: vol->UNC: %s\n", __func__, vol->UNC); 1101 new->UNC = kstrndup(vol->UNC, strlen(vol->UNC), GFP_KERNEL); 1102 if (!new->UNC) 1103 goto err_free_password; 1104 } 1105 if (vol->domainname) { 1106 new->domainname = kstrndup(vol->domainname, 1107 strlen(vol->domainname), GFP_KERNEL); 1108 if (!new->domainname) 1109 goto err_free_unc; 1110 } 1111 if (vol->iocharset) { 1112 new->iocharset = kstrndup(vol->iocharset, 1113 strlen(vol->iocharset), GFP_KERNEL); 1114 if (!new->iocharset) 1115 goto err_free_domainname; 1116 } 1117 if (vol->prepath) { 1118 cifs_dbg(FYI, "%s: vol->prepath: %s\n", __func__, vol->prepath); 1119 new->prepath = kstrndup(vol->prepath, strlen(vol->prepath), 1120 GFP_KERNEL); 1121 if (!new->prepath) 1122 goto err_free_iocharset; 1123 } 1124 1125 return 0; 1126 1127err_free_iocharset: 1128 kfree(new->iocharset); 1129err_free_domainname: 1130 kfree(new->domainname); 1131err_free_unc: 1132 kfree(new->UNC); 1133err_free_password: 1134 kzfree(new->password); 1135err_free_username: 1136 kfree(new->username); 1137 kfree(new); 1138 return -ENOMEM; 1139} 1140 1141/** 1142 * dfs_cache_add_vol - add a cifs volume during mount() that will be handled by 1143 * DFS cache refresh worker. 1144 * 1145 * @mntdata: mount data. 1146 * @vol: cifs volume. 1147 * @fullpath: origin full path. 1148 * 1149 * Return zero if volume was set up correctly, otherwise non-zero. 1150 */ 1151int dfs_cache_add_vol(char *mntdata, struct smb_vol *vol, const char *fullpath) 1152{ 1153 int rc; 1154 struct vol_info *vi; 1155 1156 if (!vol || !fullpath || !mntdata) 1157 return -EINVAL; 1158 1159 cifs_dbg(FYI, "%s: fullpath: %s\n", __func__, fullpath); 1160 1161 vi = kzalloc(sizeof(*vi), GFP_KERNEL); 1162 if (!vi) 1163 return -ENOMEM; 1164 1165 vi->fullpath = kstrndup(fullpath, strlen(fullpath), GFP_KERNEL); 1166 if (!vi->fullpath) { 1167 rc = -ENOMEM; 1168 goto err_free_vi; 1169 } 1170 1171 rc = dup_vol(vol, &vi->smb_vol); 1172 if (rc) 1173 goto err_free_fullpath; 1174 1175 vi->mntdata = mntdata; 1176 spin_lock_init(&vi->smb_vol_lock); 1177 kref_init(&vi->refcnt); 1178 1179 spin_lock(&vol_list_lock); 1180 list_add_tail(&vi->list, &vol_list); 1181 spin_unlock(&vol_list_lock); 1182 1183 return 0; 1184 1185err_free_fullpath: 1186 kfree(vi->fullpath); 1187err_free_vi: 1188 kfree(vi); 1189 return rc; 1190} 1191 1192/* Must be called with vol_list_lock held */ 1193static struct vol_info *find_vol(const char *fullpath) 1194{ 1195 struct vol_info *vi; 1196 1197 list_for_each_entry(vi, &vol_list, list) { 1198 cifs_dbg(FYI, "%s: vi->fullpath: %s\n", __func__, vi->fullpath); 1199 if (!strcasecmp(vi->fullpath, fullpath)) 1200 return vi; 1201 } 1202 return ERR_PTR(-ENOENT); 1203} 1204 1205/** 1206 * dfs_cache_update_vol - update vol info in DFS cache after failover 1207 * 1208 * @fullpath: fullpath to look up in volume list. 1209 * @server: TCP ses pointer. 1210 * 1211 * Return zero if volume was updated, otherwise non-zero. 1212 */ 1213int dfs_cache_update_vol(const char *fullpath, struct TCP_Server_Info *server) 1214{ 1215 struct vol_info *vi; 1216 1217 if (!fullpath || !server) 1218 return -EINVAL; 1219 1220 cifs_dbg(FYI, "%s: fullpath: %s\n", __func__, fullpath); 1221 1222 spin_lock(&vol_list_lock); 1223 vi = find_vol(fullpath); 1224 if (IS_ERR(vi)) { 1225 spin_unlock(&vol_list_lock); 1226 return PTR_ERR(vi); 1227 } 1228 kref_get(&vi->refcnt); 1229 spin_unlock(&vol_list_lock); 1230 1231 cifs_dbg(FYI, "%s: updating volume info\n", __func__); 1232 spin_lock(&vi->smb_vol_lock); 1233 memcpy(&vi->smb_vol.dstaddr, &server->dstaddr, 1234 sizeof(vi->smb_vol.dstaddr)); 1235 spin_unlock(&vi->smb_vol_lock); 1236 1237 kref_put(&vi->refcnt, vol_release); 1238 1239 return 0; 1240} 1241 1242/** 1243 * dfs_cache_del_vol - remove volume info in DFS cache during umount() 1244 * 1245 * @fullpath: fullpath to look up in volume list. 1246 */ 1247void dfs_cache_del_vol(const char *fullpath) 1248{ 1249 struct vol_info *vi; 1250 1251 if (!fullpath || !*fullpath) 1252 return; 1253 1254 cifs_dbg(FYI, "%s: fullpath: %s\n", __func__, fullpath); 1255 1256 spin_lock(&vol_list_lock); 1257 vi = find_vol(fullpath); 1258 spin_unlock(&vol_list_lock); 1259 1260 kref_put(&vi->refcnt, vol_release); 1261} 1262 1263/** 1264 * dfs_cache_get_tgt_share - parse a DFS target 1265 * 1266 * @it: DFS target iterator. 1267 * @share: tree name. 1268 * @share_len: length of tree name. 1269 * @prefix: prefix path. 1270 * @prefix_len: length of prefix path. 1271 * 1272 * Return zero if target was parsed correctly, otherwise non-zero. 1273 */ 1274int dfs_cache_get_tgt_share(const struct dfs_cache_tgt_iterator *it, 1275 const char **share, size_t *share_len, 1276 const char **prefix, size_t *prefix_len) 1277{ 1278 char *s, sep; 1279 1280 if (!it || !share || !share_len || !prefix || !prefix_len) 1281 return -EINVAL; 1282 1283 sep = it->it_name[0]; 1284 if (sep != '\\' && sep != '/') 1285 return -EINVAL; 1286 1287 s = strchr(it->it_name + 1, sep); 1288 if (!s) 1289 return -EINVAL; 1290 1291 s = strchrnul(s + 1, sep); 1292 1293 *share = it->it_name; 1294 *share_len = s - it->it_name; 1295 *prefix = *s ? s + 1 : s; 1296 *prefix_len = &it->it_name[strlen(it->it_name)] - *prefix; 1297 1298 return 0; 1299} 1300 1301/* Get all tcons that are within a DFS namespace and can be refreshed */ 1302static void get_tcons(struct TCP_Server_Info *server, struct list_head *head) 1303{ 1304 struct cifs_ses *ses; 1305 struct cifs_tcon *tcon; 1306 1307 INIT_LIST_HEAD(head); 1308 1309 spin_lock(&cifs_tcp_ses_lock); 1310 list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { 1311 list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { 1312 if (!tcon->need_reconnect && !tcon->need_reopen_files && 1313 tcon->dfs_path) { 1314 tcon->tc_count++; 1315 list_add_tail(&tcon->ulist, head); 1316 } 1317 } 1318 if (ses->tcon_ipc && !ses->tcon_ipc->need_reconnect && 1319 ses->tcon_ipc->dfs_path) { 1320 list_add_tail(&ses->tcon_ipc->ulist, head); 1321 } 1322 } 1323 spin_unlock(&cifs_tcp_ses_lock); 1324} 1325 1326static bool is_dfs_link(const char *path) 1327{ 1328 char *s; 1329 1330 s = strchr(path + 1, '\\'); 1331 if (!s) 1332 return false; 1333 return !!strchr(s + 1, '\\'); 1334} 1335 1336static char *get_dfs_root(const char *path) 1337{ 1338 char *s, *npath; 1339 1340 s = strchr(path + 1, '\\'); 1341 if (!s) 1342 return ERR_PTR(-EINVAL); 1343 1344 s = strchr(s + 1, '\\'); 1345 if (!s) 1346 return ERR_PTR(-EINVAL); 1347 1348 npath = kstrndup(path, s - path, GFP_KERNEL); 1349 if (!npath) 1350 return ERR_PTR(-ENOMEM); 1351 1352 return npath; 1353} 1354 1355static inline void put_tcp_server(struct TCP_Server_Info *server) 1356{ 1357 cifs_put_tcp_session(server, 0); 1358} 1359 1360static struct TCP_Server_Info *get_tcp_server(struct smb_vol *vol) 1361{ 1362 struct TCP_Server_Info *server; 1363 1364 server = cifs_find_tcp_session(vol); 1365 if (IS_ERR_OR_NULL(server)) 1366 return NULL; 1367 1368 spin_lock(&GlobalMid_Lock); 1369 if (server->tcpStatus != CifsGood) { 1370 spin_unlock(&GlobalMid_Lock); 1371 put_tcp_server(server); 1372 return NULL; 1373 } 1374 spin_unlock(&GlobalMid_Lock); 1375 1376 return server; 1377} 1378 1379/* Find root SMB session out of a DFS link path */ 1380static struct cifs_ses *find_root_ses(struct vol_info *vi, 1381 struct cifs_tcon *tcon, 1382 const char *path) 1383{ 1384 char *rpath; 1385 int rc; 1386 struct cache_entry *ce; 1387 struct dfs_info3_param ref = {0}; 1388 char *mdata = NULL, *devname = NULL; 1389 struct TCP_Server_Info *server; 1390 struct cifs_ses *ses; 1391 struct smb_vol vol = {NULL}; 1392 1393 rpath = get_dfs_root(path); 1394 if (IS_ERR(rpath)) 1395 return ERR_CAST(rpath); 1396 1397 down_read(&htable_rw_lock); 1398 1399 ce = lookup_cache_entry(rpath, NULL); 1400 if (IS_ERR(ce)) { 1401 up_read(&htable_rw_lock); 1402 ses = ERR_CAST(ce); 1403 goto out; 1404 } 1405 1406 rc = setup_referral(path, ce, &ref, get_tgt_name(ce)); 1407 if (rc) { 1408 up_read(&htable_rw_lock); 1409 ses = ERR_PTR(rc); 1410 goto out; 1411 } 1412 1413 up_read(&htable_rw_lock); 1414 1415 mdata = cifs_compose_mount_options(vi->mntdata, rpath, &ref, 1416 &devname); 1417 free_dfs_info_param(&ref); 1418 1419 if (IS_ERR(mdata)) { 1420 ses = ERR_CAST(mdata); 1421 mdata = NULL; 1422 goto out; 1423 } 1424 1425 rc = cifs_setup_volume_info(&vol, mdata, devname, false); 1426 kfree(devname); 1427 1428 if (rc) { 1429 ses = ERR_PTR(rc); 1430 goto out; 1431 } 1432 1433 server = get_tcp_server(&vol); 1434 if (!server) { 1435 ses = ERR_PTR(-EHOSTDOWN); 1436 goto out; 1437 } 1438 1439 ses = cifs_get_smb_ses(server, &vol); 1440 1441out: 1442 cifs_cleanup_volume_info_contents(&vol); 1443 kfree(mdata); 1444 kfree(rpath); 1445 1446 return ses; 1447} 1448 1449/* Refresh DFS cache entry from a given tcon */ 1450static int refresh_tcon(struct vol_info *vi, struct cifs_tcon *tcon) 1451{ 1452 int rc = 0; 1453 unsigned int xid; 1454 char *path, *npath; 1455 struct cache_entry *ce; 1456 struct cifs_ses *root_ses = NULL, *ses; 1457 struct dfs_info3_param *refs = NULL; 1458 int numrefs = 0; 1459 1460 xid = get_xid(); 1461 1462 path = tcon->dfs_path + 1; 1463 1464 rc = get_normalized_path(path, &npath); 1465 if (rc) 1466 goto out_free_xid; 1467 1468 down_read(&htable_rw_lock); 1469 1470 ce = lookup_cache_entry(npath, NULL); 1471 if (IS_ERR(ce)) { 1472 rc = PTR_ERR(ce); 1473 up_read(&htable_rw_lock); 1474 goto out_free_path; 1475 } 1476 1477 if (!cache_entry_expired(ce)) { 1478 up_read(&htable_rw_lock); 1479 goto out_free_path; 1480 } 1481 1482 up_read(&htable_rw_lock); 1483 1484 /* If it's a DFS Link, then use root SMB session for refreshing it */ 1485 if (is_dfs_link(npath)) { 1486 ses = root_ses = find_root_ses(vi, tcon, npath); 1487 if (IS_ERR(ses)) { 1488 rc = PTR_ERR(ses); 1489 root_ses = NULL; 1490 goto out_free_path; 1491 } 1492 } else { 1493 ses = tcon->ses; 1494 } 1495 1496 rc = get_dfs_referral(xid, ses, cache_nlsc, tcon->remap, npath, &refs, 1497 &numrefs); 1498 if (!rc) { 1499 dump_refs(refs, numrefs); 1500 rc = update_cache_entry(npath, refs, numrefs); 1501 free_dfs_info_array(refs, numrefs); 1502 } 1503 1504 if (root_ses) 1505 cifs_put_smb_ses(root_ses); 1506 1507out_free_path: 1508 free_normalized_path(path, npath); 1509 1510out_free_xid: 1511 free_xid(xid); 1512 return rc; 1513} 1514 1515/* 1516 * Worker that will refresh DFS cache based on lowest TTL value from a DFS 1517 * referral. 1518 */ 1519static void refresh_cache_worker(struct work_struct *work) 1520{ 1521 struct vol_info *vi, *nvi; 1522 struct TCP_Server_Info *server; 1523 LIST_HEAD(vols); 1524 LIST_HEAD(tcons); 1525 struct cifs_tcon *tcon, *ntcon; 1526 int rc; 1527 1528 /* 1529 * Find SMB volumes that are eligible (server->tcpStatus == CifsGood) 1530 * for refreshing. 1531 */ 1532 spin_lock(&vol_list_lock); 1533 list_for_each_entry(vi, &vol_list, list) { 1534 server = get_tcp_server(&vi->smb_vol); 1535 if (!server) 1536 continue; 1537 1538 kref_get(&vi->refcnt); 1539 list_add_tail(&vi->rlist, &vols); 1540 put_tcp_server(server); 1541 } 1542 spin_unlock(&vol_list_lock); 1543 1544 /* Walk through all TCONs and refresh any expired cache entry */ 1545 list_for_each_entry_safe(vi, nvi, &vols, rlist) { 1546 spin_lock(&vi->smb_vol_lock); 1547 server = get_tcp_server(&vi->smb_vol); 1548 spin_unlock(&vi->smb_vol_lock); 1549 1550 if (!server) 1551 goto next_vol; 1552 1553 get_tcons(server, &tcons); 1554 rc = 0; 1555 1556 list_for_each_entry_safe(tcon, ntcon, &tcons, ulist) { 1557 /* 1558 * Skip tcp server if any of its tcons failed to refresh 1559 * (possibily due to reconnects). 1560 */ 1561 if (!rc) 1562 rc = refresh_tcon(vi, tcon); 1563 1564 list_del_init(&tcon->ulist); 1565 cifs_put_tcon(tcon); 1566 } 1567 1568 put_tcp_server(server); 1569 1570next_vol: 1571 list_del_init(&vi->rlist); 1572 kref_put(&vi->refcnt, vol_release); 1573 } 1574 1575 spin_lock(&cache_ttl_lock); 1576 queue_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ); 1577 spin_unlock(&cache_ttl_lock); 1578}