Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * DFS referral cache routines
4 *
5 * Copyright (c) 2018-2019 Paulo Alcantara <palcantara@suse.de>
6 */
7
8#include <linux/jhash.h>
9#include <linux/ktime.h>
10#include <linux/slab.h>
11#include <linux/proc_fs.h>
12#include <linux/nls.h>
13#include <linux/workqueue.h>
14#include <linux/uuid.h>
15#include "cifsglob.h"
16#include "smb2pdu.h"
17#include "smb2proto.h"
18#include "cifsproto.h"
19#include "cifs_debug.h"
20#include "cifs_unicode.h"
21#include "smb2glob.h"
22#include "dns_resolve.h"
23#include "dfs.h"
24
25#include "dfs_cache.h"
26
27#define CACHE_HTABLE_SIZE 32
28#define CACHE_MAX_ENTRIES 64
29#define CACHE_MIN_TTL 120 /* 2 minutes */
30#define CACHE_DEFAULT_TTL 300 /* 5 minutes */
31
32#define IS_DFS_INTERLINK(v) (((v) & DFSREF_REFERRAL_SERVER) && !((v) & DFSREF_STORAGE_SERVER))
33
34struct cache_dfs_tgt {
35 char *name;
36 int path_consumed;
37 struct list_head list;
38};
39
40struct cache_entry {
41 struct hlist_node hlist;
42 const char *path;
43 int hdr_flags; /* RESP_GET_DFS_REFERRAL.ReferralHeaderFlags */
44 int ttl; /* DFS_REREFERRAL_V3.TimeToLive */
45 int srvtype; /* DFS_REREFERRAL_V3.ServerType */
46 int ref_flags; /* DFS_REREFERRAL_V3.ReferralEntryFlags */
47 struct timespec64 etime;
48 int path_consumed; /* RESP_GET_DFS_REFERRAL.PathConsumed */
49 int numtgts;
50 struct list_head tlist;
51 struct cache_dfs_tgt *tgthint;
52};
53
54static struct kmem_cache *cache_slab __read_mostly;
55struct workqueue_struct *dfscache_wq;
56
57atomic_t dfs_cache_ttl;
58
59static struct nls_table *cache_cp;
60
61/*
62 * Number of entries in the cache
63 */
64static atomic_t cache_count;
65
66static struct hlist_head cache_htable[CACHE_HTABLE_SIZE];
67static DECLARE_RWSEM(htable_rw_lock);
68
69/**
70 * dfs_cache_canonical_path - get a canonical DFS path
71 *
72 * @path: DFS path
73 * @cp: codepage
74 * @remap: mapping type
75 *
76 * Return canonical path if success, otherwise error.
77 */
78char *dfs_cache_canonical_path(const char *path, const struct nls_table *cp, int remap)
79{
80 char *tmp;
81 int plen = 0;
82 char *npath;
83
84 if (!path || strlen(path) < 3 || (*path != '\\' && *path != '/'))
85 return ERR_PTR(-EINVAL);
86
87 if (unlikely(strcmp(cp->charset, cache_cp->charset))) {
88 tmp = (char *)cifs_strndup_to_utf16(path, strlen(path), &plen, cp, remap);
89 if (!tmp) {
90 cifs_dbg(VFS, "%s: failed to convert path to utf16\n", __func__);
91 return ERR_PTR(-EINVAL);
92 }
93
94 npath = cifs_strndup_from_utf16(tmp, plen, true, cache_cp);
95 kfree(tmp);
96
97 if (!npath) {
98 cifs_dbg(VFS, "%s: failed to convert path from utf16\n", __func__);
99 return ERR_PTR(-EINVAL);
100 }
101 } else {
102 npath = kstrdup(path, GFP_KERNEL);
103 if (!npath)
104 return ERR_PTR(-ENOMEM);
105 }
106 convert_delimiter(npath, '\\');
107 return npath;
108}
109
110static inline bool cache_entry_expired(const struct cache_entry *ce)
111{
112 struct timespec64 ts;
113
114 ktime_get_coarse_real_ts64(&ts);
115 return timespec64_compare(&ts, &ce->etime) >= 0;
116}
117
118static inline void free_tgts(struct cache_entry *ce)
119{
120 struct cache_dfs_tgt *t, *n;
121
122 list_for_each_entry_safe(t, n, &ce->tlist, list) {
123 list_del(&t->list);
124 kfree(t->name);
125 kfree(t);
126 }
127}
128
129static inline void flush_cache_ent(struct cache_entry *ce)
130{
131 hlist_del_init(&ce->hlist);
132 kfree(ce->path);
133 free_tgts(ce);
134 atomic_dec(&cache_count);
135 kmem_cache_free(cache_slab, ce);
136}
137
138static void flush_cache_ents(void)
139{
140 int i;
141
142 for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
143 struct hlist_head *l = &cache_htable[i];
144 struct hlist_node *n;
145 struct cache_entry *ce;
146
147 hlist_for_each_entry_safe(ce, n, l, hlist) {
148 if (!hlist_unhashed(&ce->hlist))
149 flush_cache_ent(ce);
150 }
151 }
152}
153
154/*
155 * dfs cache /proc file
156 */
157static int dfscache_proc_show(struct seq_file *m, void *v)
158{
159 int i;
160 struct cache_entry *ce;
161 struct cache_dfs_tgt *t;
162
163 seq_puts(m, "DFS cache\n---------\n");
164
165 down_read(&htable_rw_lock);
166 for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
167 struct hlist_head *l = &cache_htable[i];
168
169 hlist_for_each_entry(ce, l, hlist) {
170 if (hlist_unhashed(&ce->hlist))
171 continue;
172
173 seq_printf(m,
174 "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,hdr_flags=0x%x,ref_flags=0x%x,interlink=%s,path_consumed=%d,expired=%s\n",
175 ce->path, ce->srvtype == DFS_TYPE_ROOT ? "root" : "link",
176 ce->ttl, ce->etime.tv_nsec, ce->hdr_flags, ce->ref_flags,
177 IS_DFS_INTERLINK(ce->hdr_flags) ? "yes" : "no",
178 ce->path_consumed, cache_entry_expired(ce) ? "yes" : "no");
179
180 list_for_each_entry(t, &ce->tlist, list) {
181 seq_printf(m, " %s%s\n",
182 t->name,
183 READ_ONCE(ce->tgthint) == t ? " (target hint)" : "");
184 }
185 }
186 }
187 up_read(&htable_rw_lock);
188
189 return 0;
190}
191
192static ssize_t dfscache_proc_write(struct file *file, const char __user *buffer,
193 size_t count, loff_t *ppos)
194{
195 char c;
196 int rc;
197
198 rc = get_user(c, buffer);
199 if (rc)
200 return rc;
201
202 if (c != '0')
203 return -EINVAL;
204
205 cifs_dbg(FYI, "clearing dfs cache\n");
206
207 down_write(&htable_rw_lock);
208 flush_cache_ents();
209 up_write(&htable_rw_lock);
210
211 return count;
212}
213
214static int dfscache_proc_open(struct inode *inode, struct file *file)
215{
216 return single_open(file, dfscache_proc_show, NULL);
217}
218
219const struct proc_ops dfscache_proc_ops = {
220 .proc_open = dfscache_proc_open,
221 .proc_read = seq_read,
222 .proc_lseek = seq_lseek,
223 .proc_release = single_release,
224 .proc_write = dfscache_proc_write,
225};
226
227#ifdef CONFIG_CIFS_DEBUG2
228static inline void dump_tgts(const struct cache_entry *ce)
229{
230 struct cache_dfs_tgt *t;
231
232 cifs_dbg(FYI, "target list:\n");
233 list_for_each_entry(t, &ce->tlist, list) {
234 cifs_dbg(FYI, " %s%s\n", t->name,
235 READ_ONCE(ce->tgthint) == t ? " (target hint)" : "");
236 }
237}
238
239static inline void dump_ce(const struct cache_entry *ce)
240{
241 cifs_dbg(FYI, "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,hdr_flags=0x%x,ref_flags=0x%x,interlink=%s,path_consumed=%d,expired=%s\n",
242 ce->path,
243 ce->srvtype == DFS_TYPE_ROOT ? "root" : "link", ce->ttl,
244 ce->etime.tv_nsec,
245 ce->hdr_flags, ce->ref_flags,
246 IS_DFS_INTERLINK(ce->hdr_flags) ? "yes" : "no",
247 ce->path_consumed,
248 cache_entry_expired(ce) ? "yes" : "no");
249 dump_tgts(ce);
250}
251
252static inline void dump_refs(const struct dfs_info3_param *refs, int numrefs)
253{
254 int i;
255
256 cifs_dbg(FYI, "DFS referrals returned by the server:\n");
257 for (i = 0; i < numrefs; i++) {
258 const struct dfs_info3_param *ref = &refs[i];
259
260 cifs_dbg(FYI,
261 "\n"
262 "flags: 0x%x\n"
263 "path_consumed: %d\n"
264 "server_type: 0x%x\n"
265 "ref_flag: 0x%x\n"
266 "path_name: %s\n"
267 "node_name: %s\n"
268 "ttl: %d (%dm)\n",
269 ref->flags, ref->path_consumed, ref->server_type,
270 ref->ref_flag, ref->path_name, ref->node_name,
271 ref->ttl, ref->ttl / 60);
272 }
273}
274#else
275#define dump_tgts(e)
276#define dump_ce(e)
277#define dump_refs(r, n)
278#endif
279
280/**
281 * dfs_cache_init - Initialize DFS referral cache.
282 *
283 * Return zero if initialized successfully, otherwise non-zero.
284 */
285int dfs_cache_init(void)
286{
287 int rc;
288 int i;
289
290 dfscache_wq = alloc_workqueue("cifs-dfscache",
291 WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM,
292 0);
293 if (!dfscache_wq)
294 return -ENOMEM;
295
296 cache_slab = kmem_cache_create("cifs_dfs_cache",
297 sizeof(struct cache_entry), 0,
298 SLAB_HWCACHE_ALIGN, NULL);
299 if (!cache_slab) {
300 rc = -ENOMEM;
301 goto out_destroy_wq;
302 }
303
304 for (i = 0; i < CACHE_HTABLE_SIZE; i++)
305 INIT_HLIST_HEAD(&cache_htable[i]);
306
307 atomic_set(&cache_count, 0);
308 atomic_set(&dfs_cache_ttl, CACHE_DEFAULT_TTL);
309 cache_cp = load_nls("utf8");
310 if (!cache_cp)
311 cache_cp = load_nls_default();
312
313 cifs_dbg(FYI, "%s: initialized DFS referral cache\n", __func__);
314 return 0;
315
316out_destroy_wq:
317 destroy_workqueue(dfscache_wq);
318 return rc;
319}
320
321static int cache_entry_hash(const void *data, int size, unsigned int *hash)
322{
323 int i, clen;
324 const unsigned char *s = data;
325 wchar_t c;
326 unsigned int h = 0;
327
328 for (i = 0; i < size; i += clen) {
329 clen = cache_cp->char2uni(&s[i], size - i, &c);
330 if (unlikely(clen < 0)) {
331 cifs_dbg(VFS, "%s: can't convert char\n", __func__);
332 return clen;
333 }
334 c = cifs_toupper(c);
335 h = jhash(&c, sizeof(c), h);
336 }
337 *hash = h % CACHE_HTABLE_SIZE;
338 return 0;
339}
340
341/* Return target hint of a DFS cache entry */
342static inline char *get_tgt_name(const struct cache_entry *ce)
343{
344 struct cache_dfs_tgt *t = READ_ONCE(ce->tgthint);
345
346 return t ? t->name : ERR_PTR(-ENOENT);
347}
348
349/* Return expire time out of a new entry's TTL */
350static inline struct timespec64 get_expire_time(int ttl)
351{
352 struct timespec64 ts = {
353 .tv_sec = ttl,
354 .tv_nsec = 0,
355 };
356 struct timespec64 now;
357
358 ktime_get_coarse_real_ts64(&now);
359 return timespec64_add(now, ts);
360}
361
362/* Allocate a new DFS target */
363static struct cache_dfs_tgt *alloc_target(const char *name, int path_consumed)
364{
365 struct cache_dfs_tgt *t;
366
367 t = kmalloc(sizeof(*t), GFP_ATOMIC);
368 if (!t)
369 return ERR_PTR(-ENOMEM);
370 t->name = kstrdup(name, GFP_ATOMIC);
371 if (!t->name) {
372 kfree(t);
373 return ERR_PTR(-ENOMEM);
374 }
375 t->path_consumed = path_consumed;
376 INIT_LIST_HEAD(&t->list);
377 return t;
378}
379
380/*
381 * Copy DFS referral information to a cache entry and conditionally update
382 * target hint.
383 */
384static int copy_ref_data(const struct dfs_info3_param *refs, int numrefs,
385 struct cache_entry *ce, const char *tgthint)
386{
387 struct cache_dfs_tgt *target;
388 int i;
389
390 ce->ttl = max_t(int, refs[0].ttl, CACHE_MIN_TTL);
391 ce->etime = get_expire_time(ce->ttl);
392 ce->srvtype = refs[0].server_type;
393 ce->hdr_flags = refs[0].flags;
394 ce->ref_flags = refs[0].ref_flag;
395 ce->path_consumed = refs[0].path_consumed;
396
397 for (i = 0; i < numrefs; i++) {
398 struct cache_dfs_tgt *t;
399
400 t = alloc_target(refs[i].node_name, refs[i].path_consumed);
401 if (IS_ERR(t)) {
402 free_tgts(ce);
403 return PTR_ERR(t);
404 }
405 if (tgthint && !strcasecmp(t->name, tgthint)) {
406 list_add(&t->list, &ce->tlist);
407 tgthint = NULL;
408 } else {
409 list_add_tail(&t->list, &ce->tlist);
410 }
411 ce->numtgts++;
412 }
413
414 target = list_first_entry_or_null(&ce->tlist, struct cache_dfs_tgt,
415 list);
416 WRITE_ONCE(ce->tgthint, target);
417
418 return 0;
419}
420
421/* Allocate a new cache entry */
422static struct cache_entry *alloc_cache_entry(struct dfs_info3_param *refs, int numrefs)
423{
424 struct cache_entry *ce;
425 int rc;
426
427 ce = kmem_cache_zalloc(cache_slab, GFP_KERNEL);
428 if (!ce)
429 return ERR_PTR(-ENOMEM);
430
431 ce->path = refs[0].path_name;
432 refs[0].path_name = NULL;
433
434 INIT_HLIST_NODE(&ce->hlist);
435 INIT_LIST_HEAD(&ce->tlist);
436
437 rc = copy_ref_data(refs, numrefs, ce, NULL);
438 if (rc) {
439 kfree(ce->path);
440 kmem_cache_free(cache_slab, ce);
441 ce = ERR_PTR(rc);
442 }
443 return ce;
444}
445
446static void remove_oldest_entry_locked(void)
447{
448 int i;
449 struct cache_entry *ce;
450 struct cache_entry *to_del = NULL;
451
452 WARN_ON(!rwsem_is_locked(&htable_rw_lock));
453
454 for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
455 struct hlist_head *l = &cache_htable[i];
456
457 hlist_for_each_entry(ce, l, hlist) {
458 if (hlist_unhashed(&ce->hlist))
459 continue;
460 if (!to_del || timespec64_compare(&ce->etime,
461 &to_del->etime) < 0)
462 to_del = ce;
463 }
464 }
465
466 if (!to_del) {
467 cifs_dbg(FYI, "%s: no entry to remove\n", __func__);
468 return;
469 }
470
471 cifs_dbg(FYI, "%s: removing entry\n", __func__);
472 dump_ce(to_del);
473 flush_cache_ent(to_del);
474}
475
476/* Add a new DFS cache entry */
477static struct cache_entry *add_cache_entry_locked(struct dfs_info3_param *refs,
478 int numrefs)
479{
480 int rc;
481 struct cache_entry *ce;
482 unsigned int hash;
483 int ttl;
484
485 WARN_ON(!rwsem_is_locked(&htable_rw_lock));
486
487 if (atomic_read(&cache_count) >= CACHE_MAX_ENTRIES) {
488 cifs_dbg(FYI, "%s: reached max cache size (%d)\n", __func__, CACHE_MAX_ENTRIES);
489 remove_oldest_entry_locked();
490 }
491
492 rc = cache_entry_hash(refs[0].path_name, strlen(refs[0].path_name), &hash);
493 if (rc)
494 return ERR_PTR(rc);
495
496 ce = alloc_cache_entry(refs, numrefs);
497 if (IS_ERR(ce))
498 return ce;
499
500 ttl = min_t(int, atomic_read(&dfs_cache_ttl), ce->ttl);
501 atomic_set(&dfs_cache_ttl, ttl);
502
503 hlist_add_head(&ce->hlist, &cache_htable[hash]);
504 dump_ce(ce);
505
506 atomic_inc(&cache_count);
507
508 return ce;
509}
510
511/* Check if two DFS paths are equal. @s1 and @s2 are expected to be in @cache_cp's charset */
512static bool dfs_path_equal(const char *s1, int len1, const char *s2, int len2)
513{
514 int i, l1, l2;
515 wchar_t c1, c2;
516
517 if (len1 != len2)
518 return false;
519
520 for (i = 0; i < len1; i += l1) {
521 l1 = cache_cp->char2uni(&s1[i], len1 - i, &c1);
522 l2 = cache_cp->char2uni(&s2[i], len2 - i, &c2);
523 if (unlikely(l1 < 0 && l2 < 0)) {
524 if (s1[i] != s2[i])
525 return false;
526 l1 = 1;
527 continue;
528 }
529 if (l1 != l2)
530 return false;
531 if (cifs_toupper(c1) != cifs_toupper(c2))
532 return false;
533 }
534 return true;
535}
536
537static struct cache_entry *__lookup_cache_entry(const char *path, unsigned int hash, int len)
538{
539 struct cache_entry *ce;
540
541 hlist_for_each_entry(ce, &cache_htable[hash], hlist) {
542 if (dfs_path_equal(ce->path, strlen(ce->path), path, len)) {
543 dump_ce(ce);
544 return ce;
545 }
546 }
547 return ERR_PTR(-ENOENT);
548}
549
550/*
551 * Find a DFS cache entry in hash table and optionally check prefix path against normalized @path.
552 *
553 * Use whole path components in the match. Must be called with htable_rw_lock held.
554 *
555 * Return cached entry if successful.
556 * Return ERR_PTR(-ENOENT) if the entry is not found.
557 * Return error ptr otherwise.
558 */
559static struct cache_entry *lookup_cache_entry(const char *path)
560{
561 struct cache_entry *ce;
562 int cnt = 0;
563 const char *s = path, *e;
564 char sep = *s;
565 unsigned int hash;
566 int rc;
567
568 while ((s = strchr(s, sep)) && ++cnt < 3)
569 s++;
570
571 if (cnt < 3) {
572 rc = cache_entry_hash(path, strlen(path), &hash);
573 if (rc)
574 return ERR_PTR(rc);
575 return __lookup_cache_entry(path, hash, strlen(path));
576 }
577 /*
578 * Handle paths that have more than two path components and are a complete prefix of the DFS
579 * referral request path (@path).
580 *
581 * See MS-DFSC 3.2.5.5 "Receiving a Root Referral Request or Link Referral Request".
582 */
583 e = path + strlen(path) - 1;
584 while (e > s) {
585 int len;
586
587 /* skip separators */
588 while (e > s && *e == sep)
589 e--;
590 if (e == s)
591 break;
592
593 len = e + 1 - path;
594 rc = cache_entry_hash(path, len, &hash);
595 if (rc)
596 return ERR_PTR(rc);
597 ce = __lookup_cache_entry(path, hash, len);
598 if (!IS_ERR(ce))
599 return ce;
600
601 /* backward until separator */
602 while (e > s && *e != sep)
603 e--;
604 }
605 return ERR_PTR(-ENOENT);
606}
607
608/**
609 * dfs_cache_destroy - destroy DFS referral cache
610 */
611void dfs_cache_destroy(void)
612{
613 unload_nls(cache_cp);
614 flush_cache_ents();
615 kmem_cache_destroy(cache_slab);
616 destroy_workqueue(dfscache_wq);
617
618 cifs_dbg(FYI, "%s: destroyed DFS referral cache\n", __func__);
619}
620
621/* Update a cache entry with the new referral in @refs */
622static int update_cache_entry_locked(struct cache_entry *ce, const struct dfs_info3_param *refs,
623 int numrefs)
624{
625 struct cache_dfs_tgt *target;
626 char *th = NULL;
627 int rc;
628
629 WARN_ON(!rwsem_is_locked(&htable_rw_lock));
630
631 target = READ_ONCE(ce->tgthint);
632 if (target) {
633 th = kstrdup(target->name, GFP_ATOMIC);
634 if (!th)
635 return -ENOMEM;
636 }
637
638 free_tgts(ce);
639 ce->numtgts = 0;
640
641 rc = copy_ref_data(refs, numrefs, ce, th);
642
643 kfree(th);
644
645 return rc;
646}
647
648static int get_dfs_referral(const unsigned int xid, struct cifs_ses *ses, const char *path,
649 struct dfs_info3_param **refs, int *numrefs)
650{
651 int rc;
652 int i;
653
654 *refs = NULL;
655 *numrefs = 0;
656
657 if (!ses || !ses->server || !ses->server->ops->get_dfs_refer)
658 return -EOPNOTSUPP;
659 if (unlikely(!cache_cp))
660 return -EINVAL;
661
662 cifs_dbg(FYI, "%s: ipc=%s referral=%s\n", __func__, ses->tcon_ipc->tree_name, path);
663 rc = ses->server->ops->get_dfs_refer(xid, ses, path, refs, numrefs, cache_cp,
664 NO_MAP_UNI_RSVD);
665 if (!rc) {
666 struct dfs_info3_param *ref = *refs;
667
668 for (i = 0; i < *numrefs; i++)
669 convert_delimiter(ref[i].path_name, '\\');
670 }
671 return rc;
672}
673
674/*
675 * Find, create or update a DFS cache entry.
676 *
677 * If the entry wasn't found, it will create a new one. Or if it was found but
678 * expired, then it will update the entry accordingly.
679 *
680 * For interlinks, cifs_mount() and expand_dfs_referral() are supposed to
681 * handle them properly.
682 *
683 * On success, return entry with acquired lock for reading, otherwise error ptr.
684 */
685static struct cache_entry *cache_refresh_path(const unsigned int xid,
686 struct cifs_ses *ses,
687 const char *path,
688 bool force_refresh)
689{
690 struct dfs_info3_param *refs = NULL;
691 struct cache_entry *ce;
692 int numrefs = 0;
693 int rc;
694
695 cifs_dbg(FYI, "%s: search path: %s\n", __func__, path);
696
697 down_read(&htable_rw_lock);
698
699 ce = lookup_cache_entry(path);
700 if (!IS_ERR(ce)) {
701 if (!force_refresh && !cache_entry_expired(ce))
702 return ce;
703 } else if (PTR_ERR(ce) != -ENOENT) {
704 up_read(&htable_rw_lock);
705 return ce;
706 }
707
708 /*
709 * Unlock shared access as we don't want to hold any locks while getting
710 * a new referral. The @ses used for performing the I/O could be
711 * reconnecting and it acquires @htable_rw_lock to look up the dfs cache
712 * in order to failover -- if necessary.
713 */
714 up_read(&htable_rw_lock);
715
716 /*
717 * Either the entry was not found, or it is expired, or it is a forced
718 * refresh.
719 * Request a new DFS referral in order to create or update a cache entry.
720 */
721 rc = get_dfs_referral(xid, ses, path, &refs, &numrefs);
722 if (rc) {
723 ce = ERR_PTR(rc);
724 goto out;
725 }
726
727 dump_refs(refs, numrefs);
728
729 down_write(&htable_rw_lock);
730 /* Re-check as another task might have it added or refreshed already */
731 ce = lookup_cache_entry(path);
732 if (!IS_ERR(ce)) {
733 if (force_refresh || cache_entry_expired(ce)) {
734 rc = update_cache_entry_locked(ce, refs, numrefs);
735 if (rc)
736 ce = ERR_PTR(rc);
737 }
738 } else if (PTR_ERR(ce) == -ENOENT) {
739 ce = add_cache_entry_locked(refs, numrefs);
740 }
741
742 if (IS_ERR(ce)) {
743 up_write(&htable_rw_lock);
744 goto out;
745 }
746
747 downgrade_write(&htable_rw_lock);
748out:
749 free_dfs_info_array(refs, numrefs);
750 return ce;
751}
752
753/*
754 * Set up a DFS referral from a given cache entry.
755 *
756 * Must be called with htable_rw_lock held.
757 */
758static int setup_referral(const char *path, struct cache_entry *ce,
759 struct dfs_info3_param *ref, const char *target)
760{
761 int rc;
762
763 cifs_dbg(FYI, "%s: set up new ref\n", __func__);
764
765 memset(ref, 0, sizeof(*ref));
766
767 ref->path_name = kstrdup(path, GFP_ATOMIC);
768 if (!ref->path_name)
769 return -ENOMEM;
770
771 ref->node_name = kstrdup(target, GFP_ATOMIC);
772 if (!ref->node_name) {
773 rc = -ENOMEM;
774 goto err_free_path;
775 }
776
777 ref->path_consumed = ce->path_consumed;
778 ref->ttl = ce->ttl;
779 ref->server_type = ce->srvtype;
780 ref->ref_flag = ce->ref_flags;
781 ref->flags = ce->hdr_flags;
782
783 return 0;
784
785err_free_path:
786 kfree(ref->path_name);
787 ref->path_name = NULL;
788 return rc;
789}
790
791/* Return target list of a DFS cache entry */
792static int get_targets(struct cache_entry *ce, struct dfs_cache_tgt_list *tl)
793{
794 int rc;
795 struct list_head *head = &tl->tl_list;
796 struct cache_dfs_tgt *t;
797 struct dfs_cache_tgt_iterator *it, *nit;
798
799 memset(tl, 0, sizeof(*tl));
800 INIT_LIST_HEAD(head);
801
802 list_for_each_entry(t, &ce->tlist, list) {
803 it = kzalloc(sizeof(*it), GFP_ATOMIC);
804 if (!it) {
805 rc = -ENOMEM;
806 goto err_free_it;
807 }
808
809 it->it_name = kstrdup(t->name, GFP_ATOMIC);
810 if (!it->it_name) {
811 kfree(it);
812 rc = -ENOMEM;
813 goto err_free_it;
814 }
815 it->it_path_consumed = t->path_consumed;
816
817 if (READ_ONCE(ce->tgthint) == t)
818 list_add(&it->it_list, head);
819 else
820 list_add_tail(&it->it_list, head);
821 }
822
823 tl->tl_numtgts = ce->numtgts;
824
825 return 0;
826
827err_free_it:
828 list_for_each_entry_safe(it, nit, head, it_list) {
829 list_del(&it->it_list);
830 kfree(it->it_name);
831 kfree(it);
832 }
833 return rc;
834}
835
836/**
837 * dfs_cache_find - find a DFS cache entry
838 *
839 * If it doesn't find the cache entry, then it will get a DFS referral
840 * for @path and create a new entry.
841 *
842 * In case the cache entry exists but expired, it will get a DFS referral
843 * for @path and then update the respective cache entry.
844 *
845 * These parameters are passed down to the get_dfs_refer() call if it
846 * needs to be issued:
847 * @xid: syscall xid
848 * @ses: smb session to issue the request on
849 * @cp: codepage
850 * @remap: path character remapping type
851 * @path: path to lookup in DFS referral cache.
852 *
853 * @ref: when non-NULL, store single DFS referral result in it.
854 * @tgt_list: when non-NULL, store complete DFS target list in it.
855 *
856 * Return zero if the target was found, otherwise non-zero.
857 */
858int dfs_cache_find(const unsigned int xid, struct cifs_ses *ses, const struct nls_table *cp,
859 int remap, const char *path, struct dfs_info3_param *ref,
860 struct dfs_cache_tgt_list *tgt_list)
861{
862 int rc;
863 const char *npath;
864 struct cache_entry *ce;
865
866 npath = dfs_cache_canonical_path(path, cp, remap);
867 if (IS_ERR(npath))
868 return PTR_ERR(npath);
869
870 ce = cache_refresh_path(xid, ses, npath, false);
871 if (IS_ERR(ce)) {
872 rc = PTR_ERR(ce);
873 goto out_free_path;
874 }
875
876 if (ref)
877 rc = setup_referral(path, ce, ref, get_tgt_name(ce));
878 else
879 rc = 0;
880 if (!rc && tgt_list)
881 rc = get_targets(ce, tgt_list);
882
883 up_read(&htable_rw_lock);
884
885out_free_path:
886 kfree(npath);
887 return rc;
888}
889
890/**
891 * dfs_cache_noreq_find - find a DFS cache entry without sending any requests to
892 * the currently connected server.
893 *
894 * NOTE: This function will neither update a cache entry in case it was
895 * expired, nor create a new cache entry if @path hasn't been found. It heavily
896 * relies on an existing cache entry.
897 *
898 * @path: canonical DFS path to lookup in the DFS referral cache.
899 * @ref: when non-NULL, store single DFS referral result in it.
900 * @tgt_list: when non-NULL, store complete DFS target list in it.
901 *
902 * Return 0 if successful.
903 * Return -ENOENT if the entry was not found.
904 * Return non-zero for other errors.
905 */
906int dfs_cache_noreq_find(const char *path, struct dfs_info3_param *ref,
907 struct dfs_cache_tgt_list *tgt_list)
908{
909 int rc;
910 struct cache_entry *ce;
911
912 cifs_dbg(FYI, "%s: path: %s\n", __func__, path);
913
914 down_read(&htable_rw_lock);
915
916 ce = lookup_cache_entry(path);
917 if (IS_ERR(ce)) {
918 rc = PTR_ERR(ce);
919 goto out_unlock;
920 }
921
922 if (ref)
923 rc = setup_referral(path, ce, ref, get_tgt_name(ce));
924 else
925 rc = 0;
926 if (!rc && tgt_list)
927 rc = get_targets(ce, tgt_list);
928
929out_unlock:
930 up_read(&htable_rw_lock);
931 return rc;
932}
933
934/**
935 * dfs_cache_noreq_update_tgthint - update target hint of a DFS cache entry
936 * without sending any requests to the currently connected server.
937 *
938 * NOTE: This function will neither update a cache entry in case it was
939 * expired, nor create a new cache entry if @path hasn't been found. It heavily
940 * relies on an existing cache entry.
941 *
942 * @path: canonical DFS path to lookup in DFS referral cache.
943 * @it: target iterator which contains the target hint to update the cache
944 * entry with.
945 *
946 * Return zero if the target hint was updated successfully, otherwise non-zero.
947 */
948void dfs_cache_noreq_update_tgthint(const char *path, const struct dfs_cache_tgt_iterator *it)
949{
950 struct cache_dfs_tgt *t;
951 struct cache_entry *ce;
952
953 if (!path || !it)
954 return;
955
956 cifs_dbg(FYI, "%s: path: %s\n", __func__, path);
957
958 down_read(&htable_rw_lock);
959
960 ce = lookup_cache_entry(path);
961 if (IS_ERR(ce))
962 goto out_unlock;
963
964 t = READ_ONCE(ce->tgthint);
965
966 if (unlikely(!strcasecmp(it->it_name, t->name)))
967 goto out_unlock;
968
969 list_for_each_entry(t, &ce->tlist, list) {
970 if (!strcasecmp(t->name, it->it_name)) {
971 WRITE_ONCE(ce->tgthint, t);
972 cifs_dbg(FYI, "%s: new target hint: %s\n", __func__,
973 it->it_name);
974 break;
975 }
976 }
977
978out_unlock:
979 up_read(&htable_rw_lock);
980}
981
982/**
983 * dfs_cache_get_tgt_referral - returns a DFS referral (@ref) from a given
984 * target iterator (@it).
985 *
986 * @path: canonical DFS path to lookup in DFS referral cache.
987 * @it: DFS target iterator.
988 * @ref: DFS referral pointer to set up the gathered information.
989 *
990 * Return zero if the DFS referral was set up correctly, otherwise non-zero.
991 */
992int dfs_cache_get_tgt_referral(const char *path, const struct dfs_cache_tgt_iterator *it,
993 struct dfs_info3_param *ref)
994{
995 int rc;
996 struct cache_entry *ce;
997
998 if (!it || !ref)
999 return -EINVAL;
1000
1001 cifs_dbg(FYI, "%s: path: %s\n", __func__, path);
1002
1003 down_read(&htable_rw_lock);
1004
1005 ce = lookup_cache_entry(path);
1006 if (IS_ERR(ce)) {
1007 rc = PTR_ERR(ce);
1008 goto out_unlock;
1009 }
1010
1011 cifs_dbg(FYI, "%s: target name: %s\n", __func__, it->it_name);
1012
1013 rc = setup_referral(path, ce, ref, it->it_name);
1014
1015out_unlock:
1016 up_read(&htable_rw_lock);
1017 return rc;
1018}
1019
1020/* Extract share from DFS target and return a pointer to prefix path or NULL */
1021static const char *parse_target_share(const char *target, char **share)
1022{
1023 const char *s, *seps = "/\\";
1024 size_t len;
1025
1026 s = strpbrk(target + 1, seps);
1027 if (!s)
1028 return ERR_PTR(-EINVAL);
1029
1030 len = strcspn(s + 1, seps);
1031 if (!len)
1032 return ERR_PTR(-EINVAL);
1033 s += len;
1034
1035 len = s - target + 1;
1036 *share = kstrndup(target, len, GFP_KERNEL);
1037 if (!*share)
1038 return ERR_PTR(-ENOMEM);
1039
1040 s = target + len;
1041 return s + strspn(s, seps);
1042}
1043
1044/**
1045 * dfs_cache_get_tgt_share - parse a DFS target
1046 *
1047 * @path: DFS full path
1048 * @it: DFS target iterator.
1049 * @share: tree name.
1050 * @prefix: prefix path.
1051 *
1052 * Return zero if target was parsed correctly, otherwise non-zero.
1053 */
1054int dfs_cache_get_tgt_share(char *path, const struct dfs_cache_tgt_iterator *it, char **share,
1055 char **prefix)
1056{
1057 char sep;
1058 char *target_share;
1059 char *ppath = NULL;
1060 const char *target_ppath, *dfsref_ppath;
1061 size_t target_pplen, dfsref_pplen;
1062 size_t len, c;
1063
1064 if (!it || !path || !share || !prefix || strlen(path) < it->it_path_consumed)
1065 return -EINVAL;
1066
1067 sep = it->it_name[0];
1068 if (sep != '\\' && sep != '/')
1069 return -EINVAL;
1070
1071 target_ppath = parse_target_share(it->it_name, &target_share);
1072 if (IS_ERR(target_ppath))
1073 return PTR_ERR(target_ppath);
1074
1075 /* point to prefix in DFS referral path */
1076 dfsref_ppath = path + it->it_path_consumed;
1077 dfsref_ppath += strspn(dfsref_ppath, "/\\");
1078
1079 target_pplen = strlen(target_ppath);
1080 dfsref_pplen = strlen(dfsref_ppath);
1081
1082 /* merge prefix paths from DFS referral path and target node */
1083 if (target_pplen || dfsref_pplen) {
1084 len = target_pplen + dfsref_pplen + 2;
1085 ppath = kzalloc(len, GFP_KERNEL);
1086 if (!ppath) {
1087 kfree(target_share);
1088 return -ENOMEM;
1089 }
1090 c = strscpy(ppath, target_ppath, len);
1091 if (c && dfsref_pplen)
1092 ppath[c] = sep;
1093 strlcat(ppath, dfsref_ppath, len);
1094 }
1095 *share = target_share;
1096 *prefix = ppath;
1097 return 0;
1098}
1099
1100static bool target_share_equal(struct TCP_Server_Info *server, const char *s1, const char *s2)
1101{
1102 char unc[sizeof("\\\\") + SERVER_NAME_LENGTH] = {0};
1103 const char *host;
1104 size_t hostlen;
1105 struct sockaddr_storage ss;
1106 bool match;
1107 int rc;
1108
1109 if (strcasecmp(s1, s2))
1110 return false;
1111
1112 /*
1113 * Resolve share's hostname and check if server address matches. Otherwise just ignore it
1114 * as we could not have upcall to resolve hostname or failed to convert ip address.
1115 */
1116 extract_unc_hostname(s1, &host, &hostlen);
1117 scnprintf(unc, sizeof(unc), "\\\\%.*s", (int)hostlen, host);
1118
1119 rc = dns_resolve_server_name_to_ip(unc, (struct sockaddr *)&ss, NULL);
1120 if (rc < 0) {
1121 cifs_dbg(FYI, "%s: could not resolve %.*s. assuming server address matches.\n",
1122 __func__, (int)hostlen, host);
1123 return true;
1124 }
1125
1126 cifs_server_lock(server);
1127 match = cifs_match_ipaddr((struct sockaddr *)&server->dstaddr, (struct sockaddr *)&ss);
1128 cifs_server_unlock(server);
1129
1130 return match;
1131}
1132
1133/*
1134 * Mark dfs tcon for reconnecting when the currently connected tcon does not match any of the new
1135 * target shares in @refs.
1136 */
1137static void mark_for_reconnect_if_needed(struct TCP_Server_Info *server,
1138 const char *path,
1139 struct dfs_cache_tgt_list *old_tl,
1140 struct dfs_cache_tgt_list *new_tl)
1141{
1142 struct dfs_cache_tgt_iterator *oit, *nit;
1143
1144 for (oit = dfs_cache_get_tgt_iterator(old_tl); oit;
1145 oit = dfs_cache_get_next_tgt(old_tl, oit)) {
1146 for (nit = dfs_cache_get_tgt_iterator(new_tl); nit;
1147 nit = dfs_cache_get_next_tgt(new_tl, nit)) {
1148 if (target_share_equal(server,
1149 dfs_cache_get_tgt_name(oit),
1150 dfs_cache_get_tgt_name(nit))) {
1151 dfs_cache_noreq_update_tgthint(path, nit);
1152 return;
1153 }
1154 }
1155 }
1156
1157 cifs_dbg(FYI, "%s: no cached or matched targets. mark dfs share for reconnect.\n", __func__);
1158 cifs_signal_cifsd_for_reconnect(server, true);
1159}
1160
1161static bool is_ses_good(struct cifs_ses *ses)
1162{
1163 struct TCP_Server_Info *server = ses->server;
1164 struct cifs_tcon *tcon = ses->tcon_ipc;
1165 bool ret;
1166
1167 spin_lock(&ses->ses_lock);
1168 spin_lock(&ses->chan_lock);
1169 ret = !cifs_chan_needs_reconnect(ses, server) &&
1170 ses->ses_status == SES_GOOD &&
1171 !tcon->need_reconnect;
1172 spin_unlock(&ses->chan_lock);
1173 spin_unlock(&ses->ses_lock);
1174 return ret;
1175}
1176
1177/* Refresh dfs referral of tcon and mark it for reconnect if needed */
1178static int __refresh_tcon(const char *path, struct cifs_ses *ses, bool force_refresh)
1179{
1180 struct dfs_cache_tgt_list old_tl = DFS_CACHE_TGT_LIST_INIT(old_tl);
1181 struct dfs_cache_tgt_list new_tl = DFS_CACHE_TGT_LIST_INIT(new_tl);
1182 struct TCP_Server_Info *server = ses->server;
1183 bool needs_refresh = false;
1184 struct cache_entry *ce;
1185 unsigned int xid;
1186 int rc = 0;
1187
1188 xid = get_xid();
1189
1190 down_read(&htable_rw_lock);
1191 ce = lookup_cache_entry(path);
1192 needs_refresh = force_refresh || IS_ERR(ce) || cache_entry_expired(ce);
1193 if (!IS_ERR(ce)) {
1194 rc = get_targets(ce, &old_tl);
1195 cifs_dbg(FYI, "%s: get_targets: %d\n", __func__, rc);
1196 }
1197 up_read(&htable_rw_lock);
1198
1199 if (!needs_refresh) {
1200 rc = 0;
1201 goto out;
1202 }
1203
1204 ses = CIFS_DFS_ROOT_SES(ses);
1205 if (!is_ses_good(ses)) {
1206 cifs_dbg(FYI, "%s: skip cache refresh due to disconnected ipc\n",
1207 __func__);
1208 goto out;
1209 }
1210
1211 ce = cache_refresh_path(xid, ses, path, true);
1212 if (!IS_ERR(ce)) {
1213 rc = get_targets(ce, &new_tl);
1214 up_read(&htable_rw_lock);
1215 cifs_dbg(FYI, "%s: get_targets: %d\n", __func__, rc);
1216 mark_for_reconnect_if_needed(server, path, &old_tl, &new_tl);
1217 }
1218
1219out:
1220 free_xid(xid);
1221 dfs_cache_free_tgts(&old_tl);
1222 dfs_cache_free_tgts(&new_tl);
1223 return rc;
1224}
1225
1226static int refresh_tcon(struct cifs_tcon *tcon, bool force_refresh)
1227{
1228 struct TCP_Server_Info *server = tcon->ses->server;
1229 struct cifs_ses *ses = tcon->ses;
1230
1231 mutex_lock(&server->refpath_lock);
1232 if (server->leaf_fullpath)
1233 __refresh_tcon(server->leaf_fullpath + 1, ses, force_refresh);
1234 mutex_unlock(&server->refpath_lock);
1235 return 0;
1236}
1237
1238/**
1239 * dfs_cache_remount_fs - remount a DFS share
1240 *
1241 * Reconfigure dfs mount by forcing a new DFS referral and if the currently cached targets do not
1242 * match any of the new targets, mark it for reconnect.
1243 *
1244 * @cifs_sb: cifs superblock.
1245 *
1246 * Return zero if remounted, otherwise non-zero.
1247 */
1248int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb)
1249{
1250 struct cifs_tcon *tcon;
1251 struct TCP_Server_Info *server;
1252
1253 if (!cifs_sb || !cifs_sb->master_tlink)
1254 return -EINVAL;
1255
1256 tcon = cifs_sb_master_tcon(cifs_sb);
1257 server = tcon->ses->server;
1258
1259 if (!server->origin_fullpath) {
1260 cifs_dbg(FYI, "%s: not a dfs mount\n", __func__);
1261 return 0;
1262 }
1263 /*
1264 * After reconnecting to a different server, unique ids won't match anymore, so we disable
1265 * serverino. This prevents dentry revalidation to think the dentry are stale (ESTALE).
1266 */
1267 cifs_autodisable_serverino(cifs_sb);
1268 /*
1269 * Force the use of prefix path to support failover on DFS paths that resolve to targets
1270 * that have different prefix paths.
1271 */
1272 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
1273
1274 return refresh_tcon(tcon, true);
1275}
1276
1277/* Refresh all DFS referrals related to DFS tcon */
1278void dfs_cache_refresh(struct work_struct *work)
1279{
1280 struct TCP_Server_Info *server;
1281 struct dfs_root_ses *rses;
1282 struct cifs_tcon *tcon;
1283 struct cifs_ses *ses;
1284
1285 tcon = container_of(work, struct cifs_tcon, dfs_cache_work.work);
1286 ses = tcon->ses;
1287 server = ses->server;
1288
1289 mutex_lock(&server->refpath_lock);
1290 if (server->leaf_fullpath)
1291 __refresh_tcon(server->leaf_fullpath + 1, ses, false);
1292 mutex_unlock(&server->refpath_lock);
1293
1294 list_for_each_entry(rses, &tcon->dfs_ses_list, list) {
1295 ses = rses->ses;
1296 server = ses->server;
1297 mutex_lock(&server->refpath_lock);
1298 if (server->leaf_fullpath)
1299 __refresh_tcon(server->leaf_fullpath + 1, ses, false);
1300 mutex_unlock(&server->refpath_lock);
1301 }
1302
1303 queue_delayed_work(dfscache_wq, &tcon->dfs_cache_work,
1304 atomic_read(&dfs_cache_ttl) * HZ);
1305}