at v5.0 1368 lines 32 kB view raw
1// SPDX-License-Identifier: GPL-2.0 2/* 3 * DFS referral cache routines 4 * 5 * Copyright (c) 2018 Paulo Alcantara <palcantara@suse.de> 6 */ 7 8#include <linux/rcupdate.h> 9#include <linux/rculist.h> 10#include <linux/jhash.h> 11#include <linux/ktime.h> 12#include <linux/slab.h> 13#include <linux/nls.h> 14#include <linux/workqueue.h> 15#include "cifsglob.h" 16#include "smb2pdu.h" 17#include "smb2proto.h" 18#include "cifsproto.h" 19#include "cifs_debug.h" 20#include "cifs_unicode.h" 21#include "smb2glob.h" 22 23#include "dfs_cache.h" 24 25#define DFS_CACHE_HTABLE_SIZE 32 26#define DFS_CACHE_MAX_ENTRIES 64 27 28#define IS_INTERLINK_SET(v) ((v) & (DFSREF_REFERRAL_SERVER | \ 29 DFSREF_STORAGE_SERVER)) 30 31struct dfs_cache_tgt { 32 char *t_name; 33 struct list_head t_list; 34}; 35 36struct dfs_cache_entry { 37 struct hlist_node ce_hlist; 38 const char *ce_path; 39 int ce_ttl; 40 int ce_srvtype; 41 int ce_flags; 42 struct timespec64 ce_etime; 43 int ce_path_consumed; 44 int ce_numtgts; 45 struct list_head ce_tlist; 46 struct dfs_cache_tgt *ce_tgthint; 47 struct rcu_head ce_rcu; 48}; 49 50static struct kmem_cache *dfs_cache_slab __read_mostly; 51 52struct dfs_cache_vol_info { 53 char *vi_fullpath; 54 struct smb_vol vi_vol; 55 struct list_head vi_list; 56}; 57 58struct dfs_cache { 59 struct mutex dc_lock; 60 struct nls_table *dc_nlsc; 61 struct list_head dc_vol_list; 62 int dc_ttl; 63 struct delayed_work dc_refresh; 64}; 65 66static struct dfs_cache dfs_cache; 67 68/* 69 * Number of entries in the cache 70 */ 71static size_t dfs_cache_count; 72 73static DEFINE_MUTEX(dfs_cache_list_lock); 74static struct hlist_head dfs_cache_htable[DFS_CACHE_HTABLE_SIZE]; 75 76static void refresh_cache_worker(struct work_struct *work); 77 78static inline bool is_path_valid(const char *path) 79{ 80 return path && (strchr(path + 1, '\\') || strchr(path + 1, '/')); 81} 82 83static inline int get_normalized_path(const char *path, char **npath) 84{ 85 if (*path == '\\') { 86 *npath = (char *)path; 87 } else { 88 *npath = kstrndup(path, strlen(path), GFP_KERNEL); 89 if (!*npath) 90 return -ENOMEM; 91 convert_delimiter(*npath, '\\'); 92 } 93 return 0; 94} 95 96static inline void free_normalized_path(const char *path, char *npath) 97{ 98 if (path != npath) 99 kfree(npath); 100} 101 102static inline bool cache_entry_expired(const struct dfs_cache_entry *ce) 103{ 104 struct timespec64 ts; 105 106 ktime_get_coarse_real_ts64(&ts); 107 return timespec64_compare(&ts, &ce->ce_etime) >= 0; 108} 109 110static inline void free_tgts(struct dfs_cache_entry *ce) 111{ 112 struct dfs_cache_tgt *t, *n; 113 114 list_for_each_entry_safe(t, n, &ce->ce_tlist, t_list) { 115 list_del(&t->t_list); 116 kfree(t->t_name); 117 kfree(t); 118 } 119} 120 121static void free_cache_entry(struct rcu_head *rcu) 122{ 123 struct dfs_cache_entry *ce = container_of(rcu, struct dfs_cache_entry, 124 ce_rcu); 125 kmem_cache_free(dfs_cache_slab, ce); 126} 127 128static inline void flush_cache_ent(struct dfs_cache_entry *ce) 129{ 130 if (hlist_unhashed(&ce->ce_hlist)) 131 return; 132 133 hlist_del_init_rcu(&ce->ce_hlist); 134 kfree(ce->ce_path); 135 free_tgts(ce); 136 dfs_cache_count--; 137 call_rcu(&ce->ce_rcu, free_cache_entry); 138} 139 140static void flush_cache_ents(void) 141{ 142 int i; 143 144 rcu_read_lock(); 145 for (i = 0; i < DFS_CACHE_HTABLE_SIZE; i++) { 146 struct hlist_head *l = &dfs_cache_htable[i]; 147 struct dfs_cache_entry *ce; 148 149 hlist_for_each_entry_rcu(ce, l, ce_hlist) 150 flush_cache_ent(ce); 151 } 152 rcu_read_unlock(); 153} 154 155/* 156 * dfs cache /proc file 157 */ 158static int dfscache_proc_show(struct seq_file *m, void *v) 159{ 160 int bucket; 161 struct dfs_cache_entry *ce; 162 struct dfs_cache_tgt *t; 163 164 seq_puts(m, "DFS cache\n---------\n"); 165 166 mutex_lock(&dfs_cache_list_lock); 167 168 rcu_read_lock(); 169 hash_for_each_rcu(dfs_cache_htable, bucket, ce, ce_hlist) { 170 seq_printf(m, 171 "cache entry: path=%s,type=%s,ttl=%d,etime=%ld," 172 "interlink=%s,path_consumed=%d,expired=%s\n", 173 ce->ce_path, 174 ce->ce_srvtype == DFS_TYPE_ROOT ? "root" : "link", 175 ce->ce_ttl, ce->ce_etime.tv_nsec, 176 IS_INTERLINK_SET(ce->ce_flags) ? "yes" : "no", 177 ce->ce_path_consumed, 178 cache_entry_expired(ce) ? "yes" : "no"); 179 180 list_for_each_entry(t, &ce->ce_tlist, t_list) { 181 seq_printf(m, " %s%s\n", 182 t->t_name, 183 ce->ce_tgthint == t ? " (target hint)" : ""); 184 } 185 186 } 187 rcu_read_unlock(); 188 189 mutex_unlock(&dfs_cache_list_lock); 190 return 0; 191} 192 193static ssize_t dfscache_proc_write(struct file *file, const char __user *buffer, 194 size_t count, loff_t *ppos) 195{ 196 char c; 197 int rc; 198 199 rc = get_user(c, buffer); 200 if (rc) 201 return rc; 202 203 if (c != '0') 204 return -EINVAL; 205 206 cifs_dbg(FYI, "clearing dfs cache"); 207 mutex_lock(&dfs_cache_list_lock); 208 flush_cache_ents(); 209 mutex_unlock(&dfs_cache_list_lock); 210 211 return count; 212} 213 214static int dfscache_proc_open(struct inode *inode, struct file *file) 215{ 216 return single_open(file, dfscache_proc_show, NULL); 217} 218 219const struct file_operations dfscache_proc_fops = { 220 .open = dfscache_proc_open, 221 .read = seq_read, 222 .llseek = seq_lseek, 223 .release = single_release, 224 .write = dfscache_proc_write, 225}; 226 227#ifdef CONFIG_CIFS_DEBUG2 228static inline void dump_tgts(const struct dfs_cache_entry *ce) 229{ 230 struct dfs_cache_tgt *t; 231 232 cifs_dbg(FYI, "target list:\n"); 233 list_for_each_entry(t, &ce->ce_tlist, t_list) { 234 cifs_dbg(FYI, " %s%s\n", t->t_name, 235 ce->ce_tgthint == t ? " (target hint)" : ""); 236 } 237} 238 239static inline void dump_ce(const struct dfs_cache_entry *ce) 240{ 241 cifs_dbg(FYI, "cache entry: path=%s,type=%s,ttl=%d,etime=%ld," 242 "interlink=%s,path_consumed=%d,expired=%s\n", ce->ce_path, 243 ce->ce_srvtype == DFS_TYPE_ROOT ? "root" : "link", ce->ce_ttl, 244 ce->ce_etime.tv_nsec, 245 IS_INTERLINK_SET(ce->ce_flags) ? "yes" : "no", 246 ce->ce_path_consumed, 247 cache_entry_expired(ce) ? "yes" : "no"); 248 dump_tgts(ce); 249} 250 251static inline void dump_refs(const struct dfs_info3_param *refs, int numrefs) 252{ 253 int i; 254 255 cifs_dbg(FYI, "DFS referrals returned by the server:\n"); 256 for (i = 0; i < numrefs; i++) { 257 const struct dfs_info3_param *ref = &refs[i]; 258 259 cifs_dbg(FYI, 260 "\n" 261 "flags: 0x%x\n" 262 "path_consumed: %d\n" 263 "server_type: 0x%x\n" 264 "ref_flag: 0x%x\n" 265 "path_name: %s\n" 266 "node_name: %s\n" 267 "ttl: %d (%dm)\n", 268 ref->flags, ref->path_consumed, ref->server_type, 269 ref->ref_flag, ref->path_name, ref->node_name, 270 ref->ttl, ref->ttl / 60); 271 } 272} 273#else 274#define dump_tgts(e) 275#define dump_ce(e) 276#define dump_refs(r, n) 277#endif 278 279/** 280 * dfs_cache_init - Initialize DFS referral cache. 281 * 282 * Return zero if initialized successfully, otherwise non-zero. 283 */ 284int dfs_cache_init(void) 285{ 286 int i; 287 288 dfs_cache_slab = kmem_cache_create("cifs_dfs_cache", 289 sizeof(struct dfs_cache_entry), 0, 290 SLAB_HWCACHE_ALIGN, NULL); 291 if (!dfs_cache_slab) 292 return -ENOMEM; 293 294 for (i = 0; i < DFS_CACHE_HTABLE_SIZE; i++) 295 INIT_HLIST_HEAD(&dfs_cache_htable[i]); 296 297 INIT_LIST_HEAD(&dfs_cache.dc_vol_list); 298 mutex_init(&dfs_cache.dc_lock); 299 INIT_DELAYED_WORK(&dfs_cache.dc_refresh, refresh_cache_worker); 300 dfs_cache.dc_ttl = -1; 301 dfs_cache.dc_nlsc = load_nls_default(); 302 303 cifs_dbg(FYI, "%s: initialized DFS referral cache\n", __func__); 304 return 0; 305} 306 307static inline unsigned int cache_entry_hash(const void *data, int size) 308{ 309 unsigned int h; 310 311 h = jhash(data, size, 0); 312 return h & (DFS_CACHE_HTABLE_SIZE - 1); 313} 314 315/* Check whether second path component of @path is SYSVOL or NETLOGON */ 316static inline bool is_sysvol_or_netlogon(const char *path) 317{ 318 const char *s; 319 char sep = path[0]; 320 321 s = strchr(path + 1, sep) + 1; 322 return !strncasecmp(s, "sysvol", strlen("sysvol")) || 323 !strncasecmp(s, "netlogon", strlen("netlogon")); 324} 325 326/* Return target hint of a DFS cache entry */ 327static inline char *get_tgt_name(const struct dfs_cache_entry *ce) 328{ 329 struct dfs_cache_tgt *t = ce->ce_tgthint; 330 331 return t ? t->t_name : ERR_PTR(-ENOENT); 332} 333 334/* Return expire time out of a new entry's TTL */ 335static inline struct timespec64 get_expire_time(int ttl) 336{ 337 struct timespec64 ts = { 338 .tv_sec = ttl, 339 .tv_nsec = 0, 340 }; 341 struct timespec64 now; 342 343 ktime_get_coarse_real_ts64(&now); 344 return timespec64_add(now, ts); 345} 346 347/* Allocate a new DFS target */ 348static inline struct dfs_cache_tgt *alloc_tgt(const char *name) 349{ 350 struct dfs_cache_tgt *t; 351 352 t = kmalloc(sizeof(*t), GFP_KERNEL); 353 if (!t) 354 return ERR_PTR(-ENOMEM); 355 t->t_name = kstrndup(name, strlen(name), GFP_KERNEL); 356 if (!t->t_name) { 357 kfree(t); 358 return ERR_PTR(-ENOMEM); 359 } 360 INIT_LIST_HEAD(&t->t_list); 361 return t; 362} 363 364/* 365 * Copy DFS referral information to a cache entry and conditionally update 366 * target hint. 367 */ 368static int copy_ref_data(const struct dfs_info3_param *refs, int numrefs, 369 struct dfs_cache_entry *ce, const char *tgthint) 370{ 371 int i; 372 373 ce->ce_ttl = refs[0].ttl; 374 ce->ce_etime = get_expire_time(ce->ce_ttl); 375 ce->ce_srvtype = refs[0].server_type; 376 ce->ce_flags = refs[0].ref_flag; 377 ce->ce_path_consumed = refs[0].path_consumed; 378 379 for (i = 0; i < numrefs; i++) { 380 struct dfs_cache_tgt *t; 381 382 t = alloc_tgt(refs[i].node_name); 383 if (IS_ERR(t)) { 384 free_tgts(ce); 385 return PTR_ERR(t); 386 } 387 if (tgthint && !strcasecmp(t->t_name, tgthint)) { 388 list_add(&t->t_list, &ce->ce_tlist); 389 tgthint = NULL; 390 } else { 391 list_add_tail(&t->t_list, &ce->ce_tlist); 392 } 393 ce->ce_numtgts++; 394 } 395 396 ce->ce_tgthint = list_first_entry_or_null(&ce->ce_tlist, 397 struct dfs_cache_tgt, t_list); 398 399 return 0; 400} 401 402/* Allocate a new cache entry */ 403static struct dfs_cache_entry * 404alloc_cache_entry(const char *path, const struct dfs_info3_param *refs, 405 int numrefs) 406{ 407 struct dfs_cache_entry *ce; 408 int rc; 409 410 ce = kmem_cache_zalloc(dfs_cache_slab, GFP_KERNEL); 411 if (!ce) 412 return ERR_PTR(-ENOMEM); 413 414 ce->ce_path = kstrdup_const(path, GFP_KERNEL); 415 if (!ce->ce_path) { 416 kmem_cache_free(dfs_cache_slab, ce); 417 return ERR_PTR(-ENOMEM); 418 } 419 INIT_HLIST_NODE(&ce->ce_hlist); 420 INIT_LIST_HEAD(&ce->ce_tlist); 421 422 rc = copy_ref_data(refs, numrefs, ce, NULL); 423 if (rc) { 424 kfree(ce->ce_path); 425 kmem_cache_free(dfs_cache_slab, ce); 426 ce = ERR_PTR(rc); 427 } 428 return ce; 429} 430 431static void remove_oldest_entry(void) 432{ 433 int bucket; 434 struct dfs_cache_entry *ce; 435 struct dfs_cache_entry *to_del = NULL; 436 437 rcu_read_lock(); 438 hash_for_each_rcu(dfs_cache_htable, bucket, ce, ce_hlist) { 439 if (!to_del || timespec64_compare(&ce->ce_etime, 440 &to_del->ce_etime) < 0) 441 to_del = ce; 442 } 443 if (!to_del) { 444 cifs_dbg(FYI, "%s: no entry to remove", __func__); 445 goto out; 446 } 447 cifs_dbg(FYI, "%s: removing entry", __func__); 448 dump_ce(to_del); 449 flush_cache_ent(to_del); 450out: 451 rcu_read_unlock(); 452} 453 454/* Add a new DFS cache entry */ 455static inline struct dfs_cache_entry * 456add_cache_entry(unsigned int hash, const char *path, 457 const struct dfs_info3_param *refs, int numrefs) 458{ 459 struct dfs_cache_entry *ce; 460 461 ce = alloc_cache_entry(path, refs, numrefs); 462 if (IS_ERR(ce)) 463 return ce; 464 465 hlist_add_head_rcu(&ce->ce_hlist, &dfs_cache_htable[hash]); 466 467 mutex_lock(&dfs_cache.dc_lock); 468 if (dfs_cache.dc_ttl < 0) { 469 dfs_cache.dc_ttl = ce->ce_ttl; 470 queue_delayed_work(cifsiod_wq, &dfs_cache.dc_refresh, 471 dfs_cache.dc_ttl * HZ); 472 } else { 473 dfs_cache.dc_ttl = min_t(int, dfs_cache.dc_ttl, ce->ce_ttl); 474 mod_delayed_work(cifsiod_wq, &dfs_cache.dc_refresh, 475 dfs_cache.dc_ttl * HZ); 476 } 477 mutex_unlock(&dfs_cache.dc_lock); 478 479 return ce; 480} 481 482static struct dfs_cache_entry *__find_cache_entry(unsigned int hash, 483 const char *path) 484{ 485 struct dfs_cache_entry *ce; 486 bool found = false; 487 488 rcu_read_lock(); 489 hlist_for_each_entry_rcu(ce, &dfs_cache_htable[hash], ce_hlist) { 490 if (!strcasecmp(path, ce->ce_path)) { 491#ifdef CONFIG_CIFS_DEBUG2 492 char *name = get_tgt_name(ce); 493 494 if (unlikely(IS_ERR(name))) { 495 rcu_read_unlock(); 496 return ERR_CAST(name); 497 } 498 cifs_dbg(FYI, "%s: cache hit\n", __func__); 499 cifs_dbg(FYI, "%s: target hint: %s\n", __func__, name); 500#endif 501 found = true; 502 break; 503 } 504 } 505 rcu_read_unlock(); 506 return found ? ce : ERR_PTR(-ENOENT); 507} 508 509/* 510 * Find a DFS cache entry in hash table and optionally check prefix path against 511 * @path. 512 * Use whole path components in the match. 513 * Return ERR_PTR(-ENOENT) if the entry is not found. 514 */ 515static inline struct dfs_cache_entry *find_cache_entry(const char *path, 516 unsigned int *hash) 517{ 518 *hash = cache_entry_hash(path, strlen(path)); 519 return __find_cache_entry(*hash, path); 520} 521 522static inline void destroy_slab_cache(void) 523{ 524 rcu_barrier(); 525 kmem_cache_destroy(dfs_cache_slab); 526} 527 528static inline void free_vol(struct dfs_cache_vol_info *vi) 529{ 530 list_del(&vi->vi_list); 531 kfree(vi->vi_fullpath); 532 cifs_cleanup_volume_info_contents(&vi->vi_vol); 533 kfree(vi); 534} 535 536static inline void free_vol_list(void) 537{ 538 struct dfs_cache_vol_info *vi, *nvi; 539 540 list_for_each_entry_safe(vi, nvi, &dfs_cache.dc_vol_list, vi_list) 541 free_vol(vi); 542} 543 544/** 545 * dfs_cache_destroy - destroy DFS referral cache 546 */ 547void dfs_cache_destroy(void) 548{ 549 cancel_delayed_work_sync(&dfs_cache.dc_refresh); 550 unload_nls(dfs_cache.dc_nlsc); 551 free_vol_list(); 552 mutex_destroy(&dfs_cache.dc_lock); 553 554 flush_cache_ents(); 555 destroy_slab_cache(); 556 mutex_destroy(&dfs_cache_list_lock); 557 558 cifs_dbg(FYI, "%s: destroyed DFS referral cache\n", __func__); 559} 560 561static inline struct dfs_cache_entry * 562__update_cache_entry(const char *path, const struct dfs_info3_param *refs, 563 int numrefs) 564{ 565 int rc; 566 unsigned int h; 567 struct dfs_cache_entry *ce; 568 char *s, *th = NULL; 569 570 ce = find_cache_entry(path, &h); 571 if (IS_ERR(ce)) 572 return ce; 573 574 if (ce->ce_tgthint) { 575 s = ce->ce_tgthint->t_name; 576 th = kstrndup(s, strlen(s), GFP_KERNEL); 577 if (!th) 578 return ERR_PTR(-ENOMEM); 579 } 580 581 free_tgts(ce); 582 ce->ce_numtgts = 0; 583 584 rc = copy_ref_data(refs, numrefs, ce, th); 585 kfree(th); 586 587 if (rc) 588 ce = ERR_PTR(rc); 589 590 return ce; 591} 592 593/* Update an expired cache entry by getting a new DFS referral from server */ 594static struct dfs_cache_entry * 595update_cache_entry(const unsigned int xid, struct cifs_ses *ses, 596 const struct nls_table *nls_codepage, int remap, 597 const char *path, struct dfs_cache_entry *ce) 598{ 599 int rc; 600 struct dfs_info3_param *refs = NULL; 601 int numrefs = 0; 602 603 cifs_dbg(FYI, "%s: update expired cache entry\n", __func__); 604 /* 605 * Check if caller provided enough parameters to update an expired 606 * entry. 607 */ 608 if (!ses || !ses->server || !ses->server->ops->get_dfs_refer) 609 return ERR_PTR(-ETIME); 610 if (unlikely(!nls_codepage)) 611 return ERR_PTR(-ETIME); 612 613 cifs_dbg(FYI, "%s: DFS referral request for %s\n", __func__, path); 614 615 rc = ses->server->ops->get_dfs_refer(xid, ses, path, &refs, &numrefs, 616 nls_codepage, remap); 617 if (rc) 618 ce = ERR_PTR(rc); 619 else 620 ce = __update_cache_entry(path, refs, numrefs); 621 622 dump_refs(refs, numrefs); 623 free_dfs_info_array(refs, numrefs); 624 625 return ce; 626} 627 628/* 629 * Find, create or update a DFS cache entry. 630 * 631 * If the entry wasn't found, it will create a new one. Or if it was found but 632 * expired, then it will update the entry accordingly. 633 * 634 * For interlinks, __cifs_dfs_mount() and expand_dfs_referral() are supposed to 635 * handle them properly. 636 */ 637static struct dfs_cache_entry * 638do_dfs_cache_find(const unsigned int xid, struct cifs_ses *ses, 639 const struct nls_table *nls_codepage, int remap, 640 const char *path, bool noreq) 641{ 642 int rc; 643 unsigned int h; 644 struct dfs_cache_entry *ce; 645 struct dfs_info3_param *nrefs; 646 int numnrefs; 647 648 cifs_dbg(FYI, "%s: search path: %s\n", __func__, path); 649 650 ce = find_cache_entry(path, &h); 651 if (IS_ERR(ce)) { 652 cifs_dbg(FYI, "%s: cache miss\n", __func__); 653 /* 654 * If @noreq is set, no requests will be sent to the server for 655 * either updating or getting a new DFS referral. 656 */ 657 if (noreq) 658 return ce; 659 /* 660 * No cache entry was found, so check for valid parameters that 661 * will be required to get a new DFS referral and then create a 662 * new cache entry. 663 */ 664 if (!ses || !ses->server || !ses->server->ops->get_dfs_refer) { 665 ce = ERR_PTR(-EOPNOTSUPP); 666 return ce; 667 } 668 if (unlikely(!nls_codepage)) { 669 ce = ERR_PTR(-EINVAL); 670 return ce; 671 } 672 673 nrefs = NULL; 674 numnrefs = 0; 675 676 cifs_dbg(FYI, "%s: DFS referral request for %s\n", __func__, 677 path); 678 679 rc = ses->server->ops->get_dfs_refer(xid, ses, path, &nrefs, 680 &numnrefs, nls_codepage, 681 remap); 682 if (rc) { 683 ce = ERR_PTR(rc); 684 return ce; 685 } 686 687 dump_refs(nrefs, numnrefs); 688 689 cifs_dbg(FYI, "%s: new cache entry\n", __func__); 690 691 if (dfs_cache_count >= DFS_CACHE_MAX_ENTRIES) { 692 cifs_dbg(FYI, "%s: reached max cache size (%d)", 693 __func__, DFS_CACHE_MAX_ENTRIES); 694 remove_oldest_entry(); 695 } 696 ce = add_cache_entry(h, path, nrefs, numnrefs); 697 free_dfs_info_array(nrefs, numnrefs); 698 699 if (IS_ERR(ce)) 700 return ce; 701 702 dfs_cache_count++; 703 } 704 705 dump_ce(ce); 706 707 /* Just return the found cache entry in case @noreq is set */ 708 if (noreq) 709 return ce; 710 711 if (cache_entry_expired(ce)) { 712 cifs_dbg(FYI, "%s: expired cache entry\n", __func__); 713 ce = update_cache_entry(xid, ses, nls_codepage, remap, path, 714 ce); 715 if (IS_ERR(ce)) { 716 cifs_dbg(FYI, "%s: failed to update expired entry\n", 717 __func__); 718 } 719 } 720 return ce; 721} 722 723/* Set up a new DFS referral from a given cache entry */ 724static int setup_ref(const char *path, const struct dfs_cache_entry *ce, 725 struct dfs_info3_param *ref, const char *tgt) 726{ 727 int rc; 728 729 cifs_dbg(FYI, "%s: set up new ref\n", __func__); 730 731 memset(ref, 0, sizeof(*ref)); 732 733 ref->path_name = kstrndup(path, strlen(path), GFP_KERNEL); 734 if (!ref->path_name) 735 return -ENOMEM; 736 737 ref->path_consumed = ce->ce_path_consumed; 738 739 ref->node_name = kstrndup(tgt, strlen(tgt), GFP_KERNEL); 740 if (!ref->node_name) { 741 rc = -ENOMEM; 742 goto err_free_path; 743 } 744 745 ref->ttl = ce->ce_ttl; 746 ref->server_type = ce->ce_srvtype; 747 ref->ref_flag = ce->ce_flags; 748 749 return 0; 750 751err_free_path: 752 kfree(ref->path_name); 753 ref->path_name = NULL; 754 return rc; 755} 756 757/* Return target list of a DFS cache entry */ 758static int get_tgt_list(const struct dfs_cache_entry *ce, 759 struct dfs_cache_tgt_list *tl) 760{ 761 int rc; 762 struct list_head *head = &tl->tl_list; 763 struct dfs_cache_tgt *t; 764 struct dfs_cache_tgt_iterator *it, *nit; 765 766 memset(tl, 0, sizeof(*tl)); 767 INIT_LIST_HEAD(head); 768 769 list_for_each_entry(t, &ce->ce_tlist, t_list) { 770 it = kzalloc(sizeof(*it), GFP_KERNEL); 771 if (!it) { 772 rc = -ENOMEM; 773 goto err_free_it; 774 } 775 776 it->it_name = kstrndup(t->t_name, strlen(t->t_name), 777 GFP_KERNEL); 778 if (!it->it_name) { 779 kfree(it); 780 rc = -ENOMEM; 781 goto err_free_it; 782 } 783 784 if (ce->ce_tgthint == t) 785 list_add(&it->it_list, head); 786 else 787 list_add_tail(&it->it_list, head); 788 } 789 tl->tl_numtgts = ce->ce_numtgts; 790 791 return 0; 792 793err_free_it: 794 list_for_each_entry_safe(it, nit, head, it_list) { 795 kfree(it->it_name); 796 kfree(it); 797 } 798 return rc; 799} 800 801/** 802 * dfs_cache_find - find a DFS cache entry 803 * 804 * If it doesn't find the cache entry, then it will get a DFS referral 805 * for @path and create a new entry. 806 * 807 * In case the cache entry exists but expired, it will get a DFS referral 808 * for @path and then update the respective cache entry. 809 * 810 * These parameters are passed down to the get_dfs_refer() call if it 811 * needs to be issued: 812 * @xid: syscall xid 813 * @ses: smb session to issue the request on 814 * @nls_codepage: charset conversion 815 * @remap: path character remapping type 816 * @path: path to lookup in DFS referral cache. 817 * 818 * @ref: when non-NULL, store single DFS referral result in it. 819 * @tgt_list: when non-NULL, store complete DFS target list in it. 820 * 821 * Return zero if the target was found, otherwise non-zero. 822 */ 823int dfs_cache_find(const unsigned int xid, struct cifs_ses *ses, 824 const struct nls_table *nls_codepage, int remap, 825 const char *path, struct dfs_info3_param *ref, 826 struct dfs_cache_tgt_list *tgt_list) 827{ 828 int rc; 829 char *npath; 830 struct dfs_cache_entry *ce; 831 832 if (unlikely(!is_path_valid(path))) 833 return -EINVAL; 834 835 rc = get_normalized_path(path, &npath); 836 if (rc) 837 return rc; 838 839 mutex_lock(&dfs_cache_list_lock); 840 ce = do_dfs_cache_find(xid, ses, nls_codepage, remap, npath, false); 841 if (!IS_ERR(ce)) { 842 if (ref) 843 rc = setup_ref(path, ce, ref, get_tgt_name(ce)); 844 else 845 rc = 0; 846 if (!rc && tgt_list) 847 rc = get_tgt_list(ce, tgt_list); 848 } else { 849 rc = PTR_ERR(ce); 850 } 851 mutex_unlock(&dfs_cache_list_lock); 852 free_normalized_path(path, npath); 853 return rc; 854} 855 856/** 857 * dfs_cache_noreq_find - find a DFS cache entry without sending any requests to 858 * the currently connected server. 859 * 860 * NOTE: This function will neither update a cache entry in case it was 861 * expired, nor create a new cache entry if @path hasn't been found. It heavily 862 * relies on an existing cache entry. 863 * 864 * @path: path to lookup in the DFS referral cache. 865 * @ref: when non-NULL, store single DFS referral result in it. 866 * @tgt_list: when non-NULL, store complete DFS target list in it. 867 * 868 * Return 0 if successful. 869 * Return -ENOENT if the entry was not found. 870 * Return non-zero for other errors. 871 */ 872int dfs_cache_noreq_find(const char *path, struct dfs_info3_param *ref, 873 struct dfs_cache_tgt_list *tgt_list) 874{ 875 int rc; 876 char *npath; 877 struct dfs_cache_entry *ce; 878 879 if (unlikely(!is_path_valid(path))) 880 return -EINVAL; 881 882 rc = get_normalized_path(path, &npath); 883 if (rc) 884 return rc; 885 886 mutex_lock(&dfs_cache_list_lock); 887 ce = do_dfs_cache_find(0, NULL, NULL, 0, npath, true); 888 if (IS_ERR(ce)) { 889 rc = PTR_ERR(ce); 890 goto out; 891 } 892 893 if (ref) 894 rc = setup_ref(path, ce, ref, get_tgt_name(ce)); 895 else 896 rc = 0; 897 if (!rc && tgt_list) 898 rc = get_tgt_list(ce, tgt_list); 899out: 900 mutex_unlock(&dfs_cache_list_lock); 901 free_normalized_path(path, npath); 902 return rc; 903} 904 905/** 906 * dfs_cache_update_tgthint - update target hint of a DFS cache entry 907 * 908 * If it doesn't find the cache entry, then it will get a DFS referral for @path 909 * and create a new entry. 910 * 911 * In case the cache entry exists but expired, it will get a DFS referral 912 * for @path and then update the respective cache entry. 913 * 914 * @xid: syscall id 915 * @ses: smb session 916 * @nls_codepage: charset conversion 917 * @remap: type of character remapping for paths 918 * @path: path to lookup in DFS referral cache. 919 * @it: DFS target iterator 920 * 921 * Return zero if the target hint was updated successfully, otherwise non-zero. 922 */ 923int dfs_cache_update_tgthint(const unsigned int xid, struct cifs_ses *ses, 924 const struct nls_table *nls_codepage, int remap, 925 const char *path, 926 const struct dfs_cache_tgt_iterator *it) 927{ 928 int rc; 929 char *npath; 930 struct dfs_cache_entry *ce; 931 struct dfs_cache_tgt *t; 932 933 if (unlikely(!is_path_valid(path))) 934 return -EINVAL; 935 936 rc = get_normalized_path(path, &npath); 937 if (rc) 938 return rc; 939 940 cifs_dbg(FYI, "%s: path: %s\n", __func__, npath); 941 942 mutex_lock(&dfs_cache_list_lock); 943 ce = do_dfs_cache_find(xid, ses, nls_codepage, remap, npath, false); 944 if (IS_ERR(ce)) { 945 rc = PTR_ERR(ce); 946 goto out; 947 } 948 949 rc = 0; 950 951 t = ce->ce_tgthint; 952 953 if (likely(!strcasecmp(it->it_name, t->t_name))) 954 goto out; 955 956 list_for_each_entry(t, &ce->ce_tlist, t_list) { 957 if (!strcasecmp(t->t_name, it->it_name)) { 958 ce->ce_tgthint = t; 959 cifs_dbg(FYI, "%s: new target hint: %s\n", __func__, 960 it->it_name); 961 break; 962 } 963 } 964 965out: 966 mutex_unlock(&dfs_cache_list_lock); 967 free_normalized_path(path, npath); 968 return rc; 969} 970 971/** 972 * dfs_cache_noreq_update_tgthint - update target hint of a DFS cache entry 973 * without sending any requests to the currently connected server. 974 * 975 * NOTE: This function will neither update a cache entry in case it was 976 * expired, nor create a new cache entry if @path hasn't been found. It heavily 977 * relies on an existing cache entry. 978 * 979 * @path: path to lookup in DFS referral cache. 980 * @it: target iterator which contains the target hint to update the cache 981 * entry with. 982 * 983 * Return zero if the target hint was updated successfully, otherwise non-zero. 984 */ 985int dfs_cache_noreq_update_tgthint(const char *path, 986 const struct dfs_cache_tgt_iterator *it) 987{ 988 int rc; 989 char *npath; 990 struct dfs_cache_entry *ce; 991 struct dfs_cache_tgt *t; 992 993 if (unlikely(!is_path_valid(path)) || !it) 994 return -EINVAL; 995 996 rc = get_normalized_path(path, &npath); 997 if (rc) 998 return rc; 999 1000 cifs_dbg(FYI, "%s: path: %s\n", __func__, npath); 1001 1002 mutex_lock(&dfs_cache_list_lock); 1003 1004 ce = do_dfs_cache_find(0, NULL, NULL, 0, npath, true); 1005 if (IS_ERR(ce)) { 1006 rc = PTR_ERR(ce); 1007 goto out; 1008 } 1009 1010 rc = 0; 1011 1012 t = ce->ce_tgthint; 1013 1014 if (unlikely(!strcasecmp(it->it_name, t->t_name))) 1015 goto out; 1016 1017 list_for_each_entry(t, &ce->ce_tlist, t_list) { 1018 if (!strcasecmp(t->t_name, it->it_name)) { 1019 ce->ce_tgthint = t; 1020 cifs_dbg(FYI, "%s: new target hint: %s\n", __func__, 1021 it->it_name); 1022 break; 1023 } 1024 } 1025 1026out: 1027 mutex_unlock(&dfs_cache_list_lock); 1028 free_normalized_path(path, npath); 1029 return rc; 1030} 1031 1032/** 1033 * dfs_cache_get_tgt_referral - returns a DFS referral (@ref) from a given 1034 * target iterator (@it). 1035 * 1036 * @path: path to lookup in DFS referral cache. 1037 * @it: DFS target iterator. 1038 * @ref: DFS referral pointer to set up the gathered information. 1039 * 1040 * Return zero if the DFS referral was set up correctly, otherwise non-zero. 1041 */ 1042int dfs_cache_get_tgt_referral(const char *path, 1043 const struct dfs_cache_tgt_iterator *it, 1044 struct dfs_info3_param *ref) 1045{ 1046 int rc; 1047 char *npath; 1048 struct dfs_cache_entry *ce; 1049 unsigned int h; 1050 1051 if (!it || !ref) 1052 return -EINVAL; 1053 if (unlikely(!is_path_valid(path))) 1054 return -EINVAL; 1055 1056 rc = get_normalized_path(path, &npath); 1057 if (rc) 1058 return rc; 1059 1060 cifs_dbg(FYI, "%s: path: %s\n", __func__, npath); 1061 1062 mutex_lock(&dfs_cache_list_lock); 1063 1064 ce = find_cache_entry(npath, &h); 1065 if (IS_ERR(ce)) { 1066 rc = PTR_ERR(ce); 1067 goto out; 1068 } 1069 1070 cifs_dbg(FYI, "%s: target name: %s\n", __func__, it->it_name); 1071 1072 rc = setup_ref(path, ce, ref, it->it_name); 1073 1074out: 1075 mutex_unlock(&dfs_cache_list_lock); 1076 free_normalized_path(path, npath); 1077 return rc; 1078} 1079 1080static int dup_vol(struct smb_vol *vol, struct smb_vol *new) 1081{ 1082 memcpy(new, vol, sizeof(*new)); 1083 1084 if (vol->username) { 1085 new->username = kstrndup(vol->username, strlen(vol->username), 1086 GFP_KERNEL); 1087 if (!new->username) 1088 return -ENOMEM; 1089 } 1090 if (vol->password) { 1091 new->password = kstrndup(vol->password, strlen(vol->password), 1092 GFP_KERNEL); 1093 if (!new->password) 1094 goto err_free_username; 1095 } 1096 if (vol->UNC) { 1097 cifs_dbg(FYI, "%s: vol->UNC: %s\n", __func__, vol->UNC); 1098 new->UNC = kstrndup(vol->UNC, strlen(vol->UNC), GFP_KERNEL); 1099 if (!new->UNC) 1100 goto err_free_password; 1101 } 1102 if (vol->domainname) { 1103 new->domainname = kstrndup(vol->domainname, 1104 strlen(vol->domainname), GFP_KERNEL); 1105 if (!new->domainname) 1106 goto err_free_unc; 1107 } 1108 if (vol->iocharset) { 1109 new->iocharset = kstrndup(vol->iocharset, 1110 strlen(vol->iocharset), GFP_KERNEL); 1111 if (!new->iocharset) 1112 goto err_free_domainname; 1113 } 1114 if (vol->prepath) { 1115 cifs_dbg(FYI, "%s: vol->prepath: %s\n", __func__, vol->prepath); 1116 new->prepath = kstrndup(vol->prepath, strlen(vol->prepath), 1117 GFP_KERNEL); 1118 if (!new->prepath) 1119 goto err_free_iocharset; 1120 } 1121 1122 return 0; 1123 1124err_free_iocharset: 1125 kfree(new->iocharset); 1126err_free_domainname: 1127 kfree(new->domainname); 1128err_free_unc: 1129 kfree(new->UNC); 1130err_free_password: 1131 kzfree(new->password); 1132err_free_username: 1133 kfree(new->username); 1134 kfree(new); 1135 return -ENOMEM; 1136} 1137 1138/** 1139 * dfs_cache_add_vol - add a cifs volume during mount() that will be handled by 1140 * DFS cache refresh worker. 1141 * 1142 * @vol: cifs volume. 1143 * @fullpath: origin full path. 1144 * 1145 * Return zero if volume was set up correctly, otherwise non-zero. 1146 */ 1147int dfs_cache_add_vol(struct smb_vol *vol, const char *fullpath) 1148{ 1149 int rc; 1150 struct dfs_cache_vol_info *vi; 1151 1152 if (!vol || !fullpath) 1153 return -EINVAL; 1154 1155 cifs_dbg(FYI, "%s: fullpath: %s\n", __func__, fullpath); 1156 1157 vi = kzalloc(sizeof(*vi), GFP_KERNEL); 1158 if (!vi) 1159 return -ENOMEM; 1160 1161 vi->vi_fullpath = kstrndup(fullpath, strlen(fullpath), GFP_KERNEL); 1162 if (!vi->vi_fullpath) { 1163 rc = -ENOMEM; 1164 goto err_free_vi; 1165 } 1166 1167 rc = dup_vol(vol, &vi->vi_vol); 1168 if (rc) 1169 goto err_free_fullpath; 1170 1171 mutex_lock(&dfs_cache.dc_lock); 1172 list_add_tail(&vi->vi_list, &dfs_cache.dc_vol_list); 1173 mutex_unlock(&dfs_cache.dc_lock); 1174 return 0; 1175 1176err_free_fullpath: 1177 kfree(vi->vi_fullpath); 1178err_free_vi: 1179 kfree(vi); 1180 return rc; 1181} 1182 1183static inline struct dfs_cache_vol_info *find_vol(const char *fullpath) 1184{ 1185 struct dfs_cache_vol_info *vi; 1186 1187 list_for_each_entry(vi, &dfs_cache.dc_vol_list, vi_list) { 1188 cifs_dbg(FYI, "%s: vi->vi_fullpath: %s\n", __func__, 1189 vi->vi_fullpath); 1190 if (!strcasecmp(vi->vi_fullpath, fullpath)) 1191 return vi; 1192 } 1193 return ERR_PTR(-ENOENT); 1194} 1195 1196/** 1197 * dfs_cache_update_vol - update vol info in DFS cache after failover 1198 * 1199 * @fullpath: fullpath to look up in volume list. 1200 * @server: TCP ses pointer. 1201 * 1202 * Return zero if volume was updated, otherwise non-zero. 1203 */ 1204int dfs_cache_update_vol(const char *fullpath, struct TCP_Server_Info *server) 1205{ 1206 int rc; 1207 struct dfs_cache_vol_info *vi; 1208 1209 if (!fullpath || !server) 1210 return -EINVAL; 1211 1212 cifs_dbg(FYI, "%s: fullpath: %s\n", __func__, fullpath); 1213 1214 mutex_lock(&dfs_cache.dc_lock); 1215 1216 vi = find_vol(fullpath); 1217 if (IS_ERR(vi)) { 1218 rc = PTR_ERR(vi); 1219 goto out; 1220 } 1221 1222 cifs_dbg(FYI, "%s: updating volume info\n", __func__); 1223 memcpy(&vi->vi_vol.dstaddr, &server->dstaddr, 1224 sizeof(vi->vi_vol.dstaddr)); 1225 rc = 0; 1226 1227out: 1228 mutex_unlock(&dfs_cache.dc_lock); 1229 return rc; 1230} 1231 1232/** 1233 * dfs_cache_del_vol - remove volume info in DFS cache during umount() 1234 * 1235 * @fullpath: fullpath to look up in volume list. 1236 */ 1237void dfs_cache_del_vol(const char *fullpath) 1238{ 1239 struct dfs_cache_vol_info *vi; 1240 1241 if (!fullpath || !*fullpath) 1242 return; 1243 1244 cifs_dbg(FYI, "%s: fullpath: %s\n", __func__, fullpath); 1245 1246 mutex_lock(&dfs_cache.dc_lock); 1247 vi = find_vol(fullpath); 1248 if (!IS_ERR(vi)) 1249 free_vol(vi); 1250 mutex_unlock(&dfs_cache.dc_lock); 1251} 1252 1253/* Get all tcons that are within a DFS namespace and can be refreshed */ 1254static void get_tcons(struct TCP_Server_Info *server, struct list_head *head) 1255{ 1256 struct cifs_ses *ses; 1257 struct cifs_tcon *tcon; 1258 1259 INIT_LIST_HEAD(head); 1260 1261 spin_lock(&cifs_tcp_ses_lock); 1262 list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { 1263 list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { 1264 if (!tcon->need_reconnect && !tcon->need_reopen_files && 1265 tcon->dfs_path) { 1266 tcon->tc_count++; 1267 list_add_tail(&tcon->ulist, head); 1268 } 1269 } 1270 if (ses->tcon_ipc && !ses->tcon_ipc->need_reconnect && 1271 ses->tcon_ipc->dfs_path) { 1272 list_add_tail(&ses->tcon_ipc->ulist, head); 1273 } 1274 } 1275 spin_unlock(&cifs_tcp_ses_lock); 1276} 1277 1278/* Refresh DFS cache entry from a given tcon */ 1279static void do_refresh_tcon(struct dfs_cache *dc, struct cifs_tcon *tcon) 1280{ 1281 int rc = 0; 1282 unsigned int xid; 1283 char *path, *npath; 1284 unsigned int h; 1285 struct dfs_cache_entry *ce; 1286 struct dfs_info3_param *refs = NULL; 1287 int numrefs = 0; 1288 1289 xid = get_xid(); 1290 1291 path = tcon->dfs_path + 1; 1292 1293 rc = get_normalized_path(path, &npath); 1294 if (rc) 1295 goto out; 1296 1297 mutex_lock(&dfs_cache_list_lock); 1298 ce = find_cache_entry(npath, &h); 1299 mutex_unlock(&dfs_cache_list_lock); 1300 1301 if (IS_ERR(ce)) { 1302 rc = PTR_ERR(ce); 1303 goto out; 1304 } 1305 1306 if (!cache_entry_expired(ce)) 1307 goto out; 1308 1309 if (unlikely(!tcon->ses->server->ops->get_dfs_refer)) { 1310 rc = -EOPNOTSUPP; 1311 } else { 1312 rc = tcon->ses->server->ops->get_dfs_refer(xid, tcon->ses, path, 1313 &refs, &numrefs, 1314 dc->dc_nlsc, 1315 tcon->remap); 1316 if (!rc) { 1317 mutex_lock(&dfs_cache_list_lock); 1318 ce = __update_cache_entry(npath, refs, numrefs); 1319 mutex_unlock(&dfs_cache_list_lock); 1320 dump_refs(refs, numrefs); 1321 free_dfs_info_array(refs, numrefs); 1322 if (IS_ERR(ce)) 1323 rc = PTR_ERR(ce); 1324 } 1325 } 1326 if (rc) 1327 cifs_dbg(FYI, "%s: failed to update expired entry\n", __func__); 1328out: 1329 free_xid(xid); 1330 free_normalized_path(path, npath); 1331} 1332 1333/* 1334 * Worker that will refresh DFS cache based on lowest TTL value from a DFS 1335 * referral. 1336 * 1337 * FIXME: ensure that all requests are sent to DFS root for refreshing the 1338 * cache. 1339 */ 1340static void refresh_cache_worker(struct work_struct *work) 1341{ 1342 struct dfs_cache *dc = container_of(work, struct dfs_cache, 1343 dc_refresh.work); 1344 struct dfs_cache_vol_info *vi; 1345 struct TCP_Server_Info *server; 1346 LIST_HEAD(list); 1347 struct cifs_tcon *tcon, *ntcon; 1348 1349 mutex_lock(&dc->dc_lock); 1350 1351 list_for_each_entry(vi, &dc->dc_vol_list, vi_list) { 1352 server = cifs_find_tcp_session(&vi->vi_vol); 1353 if (IS_ERR_OR_NULL(server)) 1354 continue; 1355 if (server->tcpStatus != CifsGood) 1356 goto next; 1357 get_tcons(server, &list); 1358 list_for_each_entry_safe(tcon, ntcon, &list, ulist) { 1359 do_refresh_tcon(dc, tcon); 1360 list_del_init(&tcon->ulist); 1361 cifs_put_tcon(tcon); 1362 } 1363next: 1364 cifs_put_tcp_session(server, 0); 1365 } 1366 queue_delayed_work(cifsiod_wq, &dc->dc_refresh, dc->dc_ttl * HZ); 1367 mutex_unlock(&dc->dc_lock); 1368}