Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * DFS referral cache routines
4 *
5 * Copyright (c) 2018-2019 Paulo Alcantara <palcantara@suse.de>
6 */
7
8#include <linux/jhash.h>
9#include <linux/ktime.h>
10#include <linux/slab.h>
11#include <linux/proc_fs.h>
12#include <linux/nls.h>
13#include <linux/workqueue.h>
14#include <linux/uuid.h>
15#include "cifsglob.h"
16#include "smb2pdu.h"
17#include "smb2proto.h"
18#include "cifsproto.h"
19#include "cifs_debug.h"
20#include "cifs_unicode.h"
21#include "smb2glob.h"
22#include "dns_resolve.h"
23
24#include "dfs_cache.h"
25
26#define CACHE_HTABLE_SIZE 32
27#define CACHE_MAX_ENTRIES 64
28#define CACHE_MIN_TTL 120 /* 2 minutes */
29
30#define IS_DFS_INTERLINK(v) (((v) & DFSREF_REFERRAL_SERVER) && !((v) & DFSREF_STORAGE_SERVER))
31
32struct cache_dfs_tgt {
33 char *name;
34 int path_consumed;
35 struct list_head list;
36};
37
38struct cache_entry {
39 struct hlist_node hlist;
40 const char *path;
41 int hdr_flags; /* RESP_GET_DFS_REFERRAL.ReferralHeaderFlags */
42 int ttl; /* DFS_REREFERRAL_V3.TimeToLive */
43 int srvtype; /* DFS_REREFERRAL_V3.ServerType */
44 int ref_flags; /* DFS_REREFERRAL_V3.ReferralEntryFlags */
45 struct timespec64 etime;
46 int path_consumed; /* RESP_GET_DFS_REFERRAL.PathConsumed */
47 int numtgts;
48 struct list_head tlist;
49 struct cache_dfs_tgt *tgthint;
50};
51
52static struct kmem_cache *cache_slab __read_mostly;
53static struct workqueue_struct *dfscache_wq __read_mostly;
54
55static int cache_ttl;
56static DEFINE_SPINLOCK(cache_ttl_lock);
57
58static struct nls_table *cache_cp;
59
60/*
61 * Number of entries in the cache
62 */
63static atomic_t cache_count;
64
65static struct hlist_head cache_htable[CACHE_HTABLE_SIZE];
66static DECLARE_RWSEM(htable_rw_lock);
67
68static void refresh_cache_worker(struct work_struct *work);
69
70static DECLARE_DELAYED_WORK(refresh_task, refresh_cache_worker);
71
72/**
73 * dfs_cache_canonical_path - get a canonical DFS path
74 *
75 * @path: DFS path
76 * @cp: codepage
77 * @remap: mapping type
78 *
79 * Return canonical path if success, otherwise error.
80 */
81char *dfs_cache_canonical_path(const char *path, const struct nls_table *cp, int remap)
82{
83 char *tmp;
84 int plen = 0;
85 char *npath;
86
87 if (!path || strlen(path) < 3 || (*path != '\\' && *path != '/'))
88 return ERR_PTR(-EINVAL);
89
90 if (unlikely(strcmp(cp->charset, cache_cp->charset))) {
91 tmp = (char *)cifs_strndup_to_utf16(path, strlen(path), &plen, cp, remap);
92 if (!tmp) {
93 cifs_dbg(VFS, "%s: failed to convert path to utf16\n", __func__);
94 return ERR_PTR(-EINVAL);
95 }
96
97 npath = cifs_strndup_from_utf16(tmp, plen, true, cache_cp);
98 kfree(tmp);
99
100 if (!npath) {
101 cifs_dbg(VFS, "%s: failed to convert path from utf16\n", __func__);
102 return ERR_PTR(-EINVAL);
103 }
104 } else {
105 npath = kstrdup(path, GFP_KERNEL);
106 if (!npath)
107 return ERR_PTR(-ENOMEM);
108 }
109 convert_delimiter(npath, '\\');
110 return npath;
111}
112
113static inline bool cache_entry_expired(const struct cache_entry *ce)
114{
115 struct timespec64 ts;
116
117 ktime_get_coarse_real_ts64(&ts);
118 return timespec64_compare(&ts, &ce->etime) >= 0;
119}
120
121static inline void free_tgts(struct cache_entry *ce)
122{
123 struct cache_dfs_tgt *t, *n;
124
125 list_for_each_entry_safe(t, n, &ce->tlist, list) {
126 list_del(&t->list);
127 kfree(t->name);
128 kfree(t);
129 }
130}
131
132static inline void flush_cache_ent(struct cache_entry *ce)
133{
134 hlist_del_init(&ce->hlist);
135 kfree(ce->path);
136 free_tgts(ce);
137 atomic_dec(&cache_count);
138 kmem_cache_free(cache_slab, ce);
139}
140
141static void flush_cache_ents(void)
142{
143 int i;
144
145 for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
146 struct hlist_head *l = &cache_htable[i];
147 struct hlist_node *n;
148 struct cache_entry *ce;
149
150 hlist_for_each_entry_safe(ce, n, l, hlist) {
151 if (!hlist_unhashed(&ce->hlist))
152 flush_cache_ent(ce);
153 }
154 }
155}
156
157/*
158 * dfs cache /proc file
159 */
160static int dfscache_proc_show(struct seq_file *m, void *v)
161{
162 int i;
163 struct cache_entry *ce;
164 struct cache_dfs_tgt *t;
165
166 seq_puts(m, "DFS cache\n---------\n");
167
168 down_read(&htable_rw_lock);
169 for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
170 struct hlist_head *l = &cache_htable[i];
171
172 hlist_for_each_entry(ce, l, hlist) {
173 if (hlist_unhashed(&ce->hlist))
174 continue;
175
176 seq_printf(m,
177 "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,hdr_flags=0x%x,ref_flags=0x%x,interlink=%s,path_consumed=%d,expired=%s\n",
178 ce->path, ce->srvtype == DFS_TYPE_ROOT ? "root" : "link",
179 ce->ttl, ce->etime.tv_nsec, ce->hdr_flags, ce->ref_flags,
180 IS_DFS_INTERLINK(ce->hdr_flags) ? "yes" : "no",
181 ce->path_consumed, cache_entry_expired(ce) ? "yes" : "no");
182
183 list_for_each_entry(t, &ce->tlist, list) {
184 seq_printf(m, " %s%s\n",
185 t->name,
186 READ_ONCE(ce->tgthint) == t ? " (target hint)" : "");
187 }
188 }
189 }
190 up_read(&htable_rw_lock);
191
192 return 0;
193}
194
195static ssize_t dfscache_proc_write(struct file *file, const char __user *buffer,
196 size_t count, loff_t *ppos)
197{
198 char c;
199 int rc;
200
201 rc = get_user(c, buffer);
202 if (rc)
203 return rc;
204
205 if (c != '0')
206 return -EINVAL;
207
208 cifs_dbg(FYI, "clearing dfs cache\n");
209
210 down_write(&htable_rw_lock);
211 flush_cache_ents();
212 up_write(&htable_rw_lock);
213
214 return count;
215}
216
217static int dfscache_proc_open(struct inode *inode, struct file *file)
218{
219 return single_open(file, dfscache_proc_show, NULL);
220}
221
222const struct proc_ops dfscache_proc_ops = {
223 .proc_open = dfscache_proc_open,
224 .proc_read = seq_read,
225 .proc_lseek = seq_lseek,
226 .proc_release = single_release,
227 .proc_write = dfscache_proc_write,
228};
229
230#ifdef CONFIG_CIFS_DEBUG2
231static inline void dump_tgts(const struct cache_entry *ce)
232{
233 struct cache_dfs_tgt *t;
234
235 cifs_dbg(FYI, "target list:\n");
236 list_for_each_entry(t, &ce->tlist, list) {
237 cifs_dbg(FYI, " %s%s\n", t->name,
238 READ_ONCE(ce->tgthint) == t ? " (target hint)" : "");
239 }
240}
241
242static inline void dump_ce(const struct cache_entry *ce)
243{
244 cifs_dbg(FYI, "cache entry: path=%s,type=%s,ttl=%d,etime=%ld,hdr_flags=0x%x,ref_flags=0x%x,interlink=%s,path_consumed=%d,expired=%s\n",
245 ce->path,
246 ce->srvtype == DFS_TYPE_ROOT ? "root" : "link", ce->ttl,
247 ce->etime.tv_nsec,
248 ce->hdr_flags, ce->ref_flags,
249 IS_DFS_INTERLINK(ce->hdr_flags) ? "yes" : "no",
250 ce->path_consumed,
251 cache_entry_expired(ce) ? "yes" : "no");
252 dump_tgts(ce);
253}
254
255static inline void dump_refs(const struct dfs_info3_param *refs, int numrefs)
256{
257 int i;
258
259 cifs_dbg(FYI, "DFS referrals returned by the server:\n");
260 for (i = 0; i < numrefs; i++) {
261 const struct dfs_info3_param *ref = &refs[i];
262
263 cifs_dbg(FYI,
264 "\n"
265 "flags: 0x%x\n"
266 "path_consumed: %d\n"
267 "server_type: 0x%x\n"
268 "ref_flag: 0x%x\n"
269 "path_name: %s\n"
270 "node_name: %s\n"
271 "ttl: %d (%dm)\n",
272 ref->flags, ref->path_consumed, ref->server_type,
273 ref->ref_flag, ref->path_name, ref->node_name,
274 ref->ttl, ref->ttl / 60);
275 }
276}
277#else
278#define dump_tgts(e)
279#define dump_ce(e)
280#define dump_refs(r, n)
281#endif
282
283/**
284 * dfs_cache_init - Initialize DFS referral cache.
285 *
286 * Return zero if initialized successfully, otherwise non-zero.
287 */
288int dfs_cache_init(void)
289{
290 int rc;
291 int i;
292
293 dfscache_wq = alloc_workqueue("cifs-dfscache", WQ_FREEZABLE | WQ_UNBOUND, 1);
294 if (!dfscache_wq)
295 return -ENOMEM;
296
297 cache_slab = kmem_cache_create("cifs_dfs_cache",
298 sizeof(struct cache_entry), 0,
299 SLAB_HWCACHE_ALIGN, NULL);
300 if (!cache_slab) {
301 rc = -ENOMEM;
302 goto out_destroy_wq;
303 }
304
305 for (i = 0; i < CACHE_HTABLE_SIZE; i++)
306 INIT_HLIST_HEAD(&cache_htable[i]);
307
308 atomic_set(&cache_count, 0);
309 cache_cp = load_nls("utf8");
310 if (!cache_cp)
311 cache_cp = load_nls_default();
312
313 cifs_dbg(FYI, "%s: initialized DFS referral cache\n", __func__);
314 return 0;
315
316out_destroy_wq:
317 destroy_workqueue(dfscache_wq);
318 return rc;
319}
320
321static int cache_entry_hash(const void *data, int size, unsigned int *hash)
322{
323 int i, clen;
324 const unsigned char *s = data;
325 wchar_t c;
326 unsigned int h = 0;
327
328 for (i = 0; i < size; i += clen) {
329 clen = cache_cp->char2uni(&s[i], size - i, &c);
330 if (unlikely(clen < 0)) {
331 cifs_dbg(VFS, "%s: can't convert char\n", __func__);
332 return clen;
333 }
334 c = cifs_toupper(c);
335 h = jhash(&c, sizeof(c), h);
336 }
337 *hash = h % CACHE_HTABLE_SIZE;
338 return 0;
339}
340
341/* Return target hint of a DFS cache entry */
342static inline char *get_tgt_name(const struct cache_entry *ce)
343{
344 struct cache_dfs_tgt *t = READ_ONCE(ce->tgthint);
345
346 return t ? t->name : ERR_PTR(-ENOENT);
347}
348
349/* Return expire time out of a new entry's TTL */
350static inline struct timespec64 get_expire_time(int ttl)
351{
352 struct timespec64 ts = {
353 .tv_sec = ttl,
354 .tv_nsec = 0,
355 };
356 struct timespec64 now;
357
358 ktime_get_coarse_real_ts64(&now);
359 return timespec64_add(now, ts);
360}
361
362/* Allocate a new DFS target */
363static struct cache_dfs_tgt *alloc_target(const char *name, int path_consumed)
364{
365 struct cache_dfs_tgt *t;
366
367 t = kmalloc(sizeof(*t), GFP_ATOMIC);
368 if (!t)
369 return ERR_PTR(-ENOMEM);
370 t->name = kstrdup(name, GFP_ATOMIC);
371 if (!t->name) {
372 kfree(t);
373 return ERR_PTR(-ENOMEM);
374 }
375 t->path_consumed = path_consumed;
376 INIT_LIST_HEAD(&t->list);
377 return t;
378}
379
380/*
381 * Copy DFS referral information to a cache entry and conditionally update
382 * target hint.
383 */
384static int copy_ref_data(const struct dfs_info3_param *refs, int numrefs,
385 struct cache_entry *ce, const char *tgthint)
386{
387 struct cache_dfs_tgt *target;
388 int i;
389
390 ce->ttl = max_t(int, refs[0].ttl, CACHE_MIN_TTL);
391 ce->etime = get_expire_time(ce->ttl);
392 ce->srvtype = refs[0].server_type;
393 ce->hdr_flags = refs[0].flags;
394 ce->ref_flags = refs[0].ref_flag;
395 ce->path_consumed = refs[0].path_consumed;
396
397 for (i = 0; i < numrefs; i++) {
398 struct cache_dfs_tgt *t;
399
400 t = alloc_target(refs[i].node_name, refs[i].path_consumed);
401 if (IS_ERR(t)) {
402 free_tgts(ce);
403 return PTR_ERR(t);
404 }
405 if (tgthint && !strcasecmp(t->name, tgthint)) {
406 list_add(&t->list, &ce->tlist);
407 tgthint = NULL;
408 } else {
409 list_add_tail(&t->list, &ce->tlist);
410 }
411 ce->numtgts++;
412 }
413
414 target = list_first_entry_or_null(&ce->tlist, struct cache_dfs_tgt,
415 list);
416 WRITE_ONCE(ce->tgthint, target);
417
418 return 0;
419}
420
421/* Allocate a new cache entry */
422static struct cache_entry *alloc_cache_entry(struct dfs_info3_param *refs, int numrefs)
423{
424 struct cache_entry *ce;
425 int rc;
426
427 ce = kmem_cache_zalloc(cache_slab, GFP_KERNEL);
428 if (!ce)
429 return ERR_PTR(-ENOMEM);
430
431 ce->path = refs[0].path_name;
432 refs[0].path_name = NULL;
433
434 INIT_HLIST_NODE(&ce->hlist);
435 INIT_LIST_HEAD(&ce->tlist);
436
437 rc = copy_ref_data(refs, numrefs, ce, NULL);
438 if (rc) {
439 kfree(ce->path);
440 kmem_cache_free(cache_slab, ce);
441 ce = ERR_PTR(rc);
442 }
443 return ce;
444}
445
446static void remove_oldest_entry_locked(void)
447{
448 int i;
449 struct cache_entry *ce;
450 struct cache_entry *to_del = NULL;
451
452 WARN_ON(!rwsem_is_locked(&htable_rw_lock));
453
454 for (i = 0; i < CACHE_HTABLE_SIZE; i++) {
455 struct hlist_head *l = &cache_htable[i];
456
457 hlist_for_each_entry(ce, l, hlist) {
458 if (hlist_unhashed(&ce->hlist))
459 continue;
460 if (!to_del || timespec64_compare(&ce->etime,
461 &to_del->etime) < 0)
462 to_del = ce;
463 }
464 }
465
466 if (!to_del) {
467 cifs_dbg(FYI, "%s: no entry to remove\n", __func__);
468 return;
469 }
470
471 cifs_dbg(FYI, "%s: removing entry\n", __func__);
472 dump_ce(to_del);
473 flush_cache_ent(to_del);
474}
475
476/* Add a new DFS cache entry */
477static struct cache_entry *add_cache_entry_locked(struct dfs_info3_param *refs,
478 int numrefs)
479{
480 int rc;
481 struct cache_entry *ce;
482 unsigned int hash;
483
484 WARN_ON(!rwsem_is_locked(&htable_rw_lock));
485
486 if (atomic_read(&cache_count) >= CACHE_MAX_ENTRIES) {
487 cifs_dbg(FYI, "%s: reached max cache size (%d)\n", __func__, CACHE_MAX_ENTRIES);
488 remove_oldest_entry_locked();
489 }
490
491 rc = cache_entry_hash(refs[0].path_name, strlen(refs[0].path_name), &hash);
492 if (rc)
493 return ERR_PTR(rc);
494
495 ce = alloc_cache_entry(refs, numrefs);
496 if (IS_ERR(ce))
497 return ce;
498
499 spin_lock(&cache_ttl_lock);
500 if (!cache_ttl) {
501 cache_ttl = ce->ttl;
502 queue_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
503 } else {
504 cache_ttl = min_t(int, cache_ttl, ce->ttl);
505 mod_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
506 }
507 spin_unlock(&cache_ttl_lock);
508
509 hlist_add_head(&ce->hlist, &cache_htable[hash]);
510 dump_ce(ce);
511
512 atomic_inc(&cache_count);
513
514 return ce;
515}
516
517/* Check if two DFS paths are equal. @s1 and @s2 are expected to be in @cache_cp's charset */
518static bool dfs_path_equal(const char *s1, int len1, const char *s2, int len2)
519{
520 int i, l1, l2;
521 wchar_t c1, c2;
522
523 if (len1 != len2)
524 return false;
525
526 for (i = 0; i < len1; i += l1) {
527 l1 = cache_cp->char2uni(&s1[i], len1 - i, &c1);
528 l2 = cache_cp->char2uni(&s2[i], len2 - i, &c2);
529 if (unlikely(l1 < 0 && l2 < 0)) {
530 if (s1[i] != s2[i])
531 return false;
532 l1 = 1;
533 continue;
534 }
535 if (l1 != l2)
536 return false;
537 if (cifs_toupper(c1) != cifs_toupper(c2))
538 return false;
539 }
540 return true;
541}
542
543static struct cache_entry *__lookup_cache_entry(const char *path, unsigned int hash, int len)
544{
545 struct cache_entry *ce;
546
547 hlist_for_each_entry(ce, &cache_htable[hash], hlist) {
548 if (dfs_path_equal(ce->path, strlen(ce->path), path, len)) {
549 dump_ce(ce);
550 return ce;
551 }
552 }
553 return ERR_PTR(-ENOENT);
554}
555
556/*
557 * Find a DFS cache entry in hash table and optionally check prefix path against normalized @path.
558 *
559 * Use whole path components in the match. Must be called with htable_rw_lock held.
560 *
561 * Return cached entry if successful.
562 * Return ERR_PTR(-ENOENT) if the entry is not found.
563 * Return error ptr otherwise.
564 */
565static struct cache_entry *lookup_cache_entry(const char *path)
566{
567 struct cache_entry *ce;
568 int cnt = 0;
569 const char *s = path, *e;
570 char sep = *s;
571 unsigned int hash;
572 int rc;
573
574 while ((s = strchr(s, sep)) && ++cnt < 3)
575 s++;
576
577 if (cnt < 3) {
578 rc = cache_entry_hash(path, strlen(path), &hash);
579 if (rc)
580 return ERR_PTR(rc);
581 return __lookup_cache_entry(path, hash, strlen(path));
582 }
583 /*
584 * Handle paths that have more than two path components and are a complete prefix of the DFS
585 * referral request path (@path).
586 *
587 * See MS-DFSC 3.2.5.5 "Receiving a Root Referral Request or Link Referral Request".
588 */
589 e = path + strlen(path) - 1;
590 while (e > s) {
591 int len;
592
593 /* skip separators */
594 while (e > s && *e == sep)
595 e--;
596 if (e == s)
597 break;
598
599 len = e + 1 - path;
600 rc = cache_entry_hash(path, len, &hash);
601 if (rc)
602 return ERR_PTR(rc);
603 ce = __lookup_cache_entry(path, hash, len);
604 if (!IS_ERR(ce))
605 return ce;
606
607 /* backward until separator */
608 while (e > s && *e != sep)
609 e--;
610 }
611 return ERR_PTR(-ENOENT);
612}
613
614/**
615 * dfs_cache_destroy - destroy DFS referral cache
616 */
617void dfs_cache_destroy(void)
618{
619 cancel_delayed_work_sync(&refresh_task);
620 unload_nls(cache_cp);
621 flush_cache_ents();
622 kmem_cache_destroy(cache_slab);
623 destroy_workqueue(dfscache_wq);
624
625 cifs_dbg(FYI, "%s: destroyed DFS referral cache\n", __func__);
626}
627
628/* Update a cache entry with the new referral in @refs */
629static int update_cache_entry_locked(struct cache_entry *ce, const struct dfs_info3_param *refs,
630 int numrefs)
631{
632 struct cache_dfs_tgt *target;
633 char *th = NULL;
634 int rc;
635
636 WARN_ON(!rwsem_is_locked(&htable_rw_lock));
637
638 target = READ_ONCE(ce->tgthint);
639 if (target) {
640 th = kstrdup(target->name, GFP_ATOMIC);
641 if (!th)
642 return -ENOMEM;
643 }
644
645 free_tgts(ce);
646 ce->numtgts = 0;
647
648 rc = copy_ref_data(refs, numrefs, ce, th);
649
650 kfree(th);
651
652 return rc;
653}
654
655static int get_dfs_referral(const unsigned int xid, struct cifs_ses *ses, const char *path,
656 struct dfs_info3_param **refs, int *numrefs)
657{
658 int rc;
659 int i;
660
661 *refs = NULL;
662 *numrefs = 0;
663
664 if (!ses || !ses->server || !ses->server->ops->get_dfs_refer)
665 return -EOPNOTSUPP;
666 if (unlikely(!cache_cp))
667 return -EINVAL;
668
669 cifs_dbg(FYI, "%s: ipc=%s referral=%s\n", __func__, ses->tcon_ipc->tree_name, path);
670 rc = ses->server->ops->get_dfs_refer(xid, ses, path, refs, numrefs, cache_cp,
671 NO_MAP_UNI_RSVD);
672 if (!rc) {
673 struct dfs_info3_param *ref = *refs;
674
675 for (i = 0; i < *numrefs; i++)
676 convert_delimiter(ref[i].path_name, '\\');
677 }
678 return rc;
679}
680
681/*
682 * Find, create or update a DFS cache entry.
683 *
684 * If the entry wasn't found, it will create a new one. Or if it was found but
685 * expired, then it will update the entry accordingly.
686 *
687 * For interlinks, cifs_mount() and expand_dfs_referral() are supposed to
688 * handle them properly.
689 *
690 * On success, return entry with acquired lock for reading, otherwise error ptr.
691 */
692static struct cache_entry *cache_refresh_path(const unsigned int xid,
693 struct cifs_ses *ses,
694 const char *path,
695 bool force_refresh)
696{
697 struct dfs_info3_param *refs = NULL;
698 struct cache_entry *ce;
699 int numrefs = 0;
700 int rc;
701
702 cifs_dbg(FYI, "%s: search path: %s\n", __func__, path);
703
704 down_read(&htable_rw_lock);
705
706 ce = lookup_cache_entry(path);
707 if (!IS_ERR(ce)) {
708 if (!force_refresh && !cache_entry_expired(ce))
709 return ce;
710 } else if (PTR_ERR(ce) != -ENOENT) {
711 up_read(&htable_rw_lock);
712 return ce;
713 }
714
715 /*
716 * Unlock shared access as we don't want to hold any locks while getting
717 * a new referral. The @ses used for performing the I/O could be
718 * reconnecting and it acquires @htable_rw_lock to look up the dfs cache
719 * in order to failover -- if necessary.
720 */
721 up_read(&htable_rw_lock);
722
723 /*
724 * Either the entry was not found, or it is expired, or it is a forced
725 * refresh.
726 * Request a new DFS referral in order to create or update a cache entry.
727 */
728 rc = get_dfs_referral(xid, ses, path, &refs, &numrefs);
729 if (rc) {
730 ce = ERR_PTR(rc);
731 goto out;
732 }
733
734 dump_refs(refs, numrefs);
735
736 down_write(&htable_rw_lock);
737 /* Re-check as another task might have it added or refreshed already */
738 ce = lookup_cache_entry(path);
739 if (!IS_ERR(ce)) {
740 if (force_refresh || cache_entry_expired(ce)) {
741 rc = update_cache_entry_locked(ce, refs, numrefs);
742 if (rc)
743 ce = ERR_PTR(rc);
744 }
745 } else if (PTR_ERR(ce) == -ENOENT) {
746 ce = add_cache_entry_locked(refs, numrefs);
747 }
748
749 if (IS_ERR(ce)) {
750 up_write(&htable_rw_lock);
751 goto out;
752 }
753
754 downgrade_write(&htable_rw_lock);
755out:
756 free_dfs_info_array(refs, numrefs);
757 return ce;
758}
759
760/*
761 * Set up a DFS referral from a given cache entry.
762 *
763 * Must be called with htable_rw_lock held.
764 */
765static int setup_referral(const char *path, struct cache_entry *ce,
766 struct dfs_info3_param *ref, const char *target)
767{
768 int rc;
769
770 cifs_dbg(FYI, "%s: set up new ref\n", __func__);
771
772 memset(ref, 0, sizeof(*ref));
773
774 ref->path_name = kstrdup(path, GFP_ATOMIC);
775 if (!ref->path_name)
776 return -ENOMEM;
777
778 ref->node_name = kstrdup(target, GFP_ATOMIC);
779 if (!ref->node_name) {
780 rc = -ENOMEM;
781 goto err_free_path;
782 }
783
784 ref->path_consumed = ce->path_consumed;
785 ref->ttl = ce->ttl;
786 ref->server_type = ce->srvtype;
787 ref->ref_flag = ce->ref_flags;
788 ref->flags = ce->hdr_flags;
789
790 return 0;
791
792err_free_path:
793 kfree(ref->path_name);
794 ref->path_name = NULL;
795 return rc;
796}
797
798/* Return target list of a DFS cache entry */
799static int get_targets(struct cache_entry *ce, struct dfs_cache_tgt_list *tl)
800{
801 int rc;
802 struct list_head *head = &tl->tl_list;
803 struct cache_dfs_tgt *t;
804 struct dfs_cache_tgt_iterator *it, *nit;
805
806 memset(tl, 0, sizeof(*tl));
807 INIT_LIST_HEAD(head);
808
809 list_for_each_entry(t, &ce->tlist, list) {
810 it = kzalloc(sizeof(*it), GFP_ATOMIC);
811 if (!it) {
812 rc = -ENOMEM;
813 goto err_free_it;
814 }
815
816 it->it_name = kstrdup(t->name, GFP_ATOMIC);
817 if (!it->it_name) {
818 kfree(it);
819 rc = -ENOMEM;
820 goto err_free_it;
821 }
822 it->it_path_consumed = t->path_consumed;
823
824 if (READ_ONCE(ce->tgthint) == t)
825 list_add(&it->it_list, head);
826 else
827 list_add_tail(&it->it_list, head);
828 }
829
830 tl->tl_numtgts = ce->numtgts;
831
832 return 0;
833
834err_free_it:
835 list_for_each_entry_safe(it, nit, head, it_list) {
836 list_del(&it->it_list);
837 kfree(it->it_name);
838 kfree(it);
839 }
840 return rc;
841}
842
843/**
844 * dfs_cache_find - find a DFS cache entry
845 *
846 * If it doesn't find the cache entry, then it will get a DFS referral
847 * for @path and create a new entry.
848 *
849 * In case the cache entry exists but expired, it will get a DFS referral
850 * for @path and then update the respective cache entry.
851 *
852 * These parameters are passed down to the get_dfs_refer() call if it
853 * needs to be issued:
854 * @xid: syscall xid
855 * @ses: smb session to issue the request on
856 * @cp: codepage
857 * @remap: path character remapping type
858 * @path: path to lookup in DFS referral cache.
859 *
860 * @ref: when non-NULL, store single DFS referral result in it.
861 * @tgt_list: when non-NULL, store complete DFS target list in it.
862 *
863 * Return zero if the target was found, otherwise non-zero.
864 */
865int dfs_cache_find(const unsigned int xid, struct cifs_ses *ses, const struct nls_table *cp,
866 int remap, const char *path, struct dfs_info3_param *ref,
867 struct dfs_cache_tgt_list *tgt_list)
868{
869 int rc;
870 const char *npath;
871 struct cache_entry *ce;
872
873 npath = dfs_cache_canonical_path(path, cp, remap);
874 if (IS_ERR(npath))
875 return PTR_ERR(npath);
876
877 ce = cache_refresh_path(xid, ses, npath, false);
878 if (IS_ERR(ce)) {
879 rc = PTR_ERR(ce);
880 goto out_free_path;
881 }
882
883 if (ref)
884 rc = setup_referral(path, ce, ref, get_tgt_name(ce));
885 else
886 rc = 0;
887 if (!rc && tgt_list)
888 rc = get_targets(ce, tgt_list);
889
890 up_read(&htable_rw_lock);
891
892out_free_path:
893 kfree(npath);
894 return rc;
895}
896
897/**
898 * dfs_cache_noreq_find - find a DFS cache entry without sending any requests to
899 * the currently connected server.
900 *
901 * NOTE: This function will neither update a cache entry in case it was
902 * expired, nor create a new cache entry if @path hasn't been found. It heavily
903 * relies on an existing cache entry.
904 *
905 * @path: canonical DFS path to lookup in the DFS referral cache.
906 * @ref: when non-NULL, store single DFS referral result in it.
907 * @tgt_list: when non-NULL, store complete DFS target list in it.
908 *
909 * Return 0 if successful.
910 * Return -ENOENT if the entry was not found.
911 * Return non-zero for other errors.
912 */
913int dfs_cache_noreq_find(const char *path, struct dfs_info3_param *ref,
914 struct dfs_cache_tgt_list *tgt_list)
915{
916 int rc;
917 struct cache_entry *ce;
918
919 cifs_dbg(FYI, "%s: path: %s\n", __func__, path);
920
921 down_read(&htable_rw_lock);
922
923 ce = lookup_cache_entry(path);
924 if (IS_ERR(ce)) {
925 rc = PTR_ERR(ce);
926 goto out_unlock;
927 }
928
929 if (ref)
930 rc = setup_referral(path, ce, ref, get_tgt_name(ce));
931 else
932 rc = 0;
933 if (!rc && tgt_list)
934 rc = get_targets(ce, tgt_list);
935
936out_unlock:
937 up_read(&htable_rw_lock);
938 return rc;
939}
940
941/**
942 * dfs_cache_noreq_update_tgthint - update target hint of a DFS cache entry
943 * without sending any requests to the currently connected server.
944 *
945 * NOTE: This function will neither update a cache entry in case it was
946 * expired, nor create a new cache entry if @path hasn't been found. It heavily
947 * relies on an existing cache entry.
948 *
949 * @path: canonical DFS path to lookup in DFS referral cache.
950 * @it: target iterator which contains the target hint to update the cache
951 * entry with.
952 *
953 * Return zero if the target hint was updated successfully, otherwise non-zero.
954 */
955void dfs_cache_noreq_update_tgthint(const char *path, const struct dfs_cache_tgt_iterator *it)
956{
957 struct cache_dfs_tgt *t;
958 struct cache_entry *ce;
959
960 if (!path || !it)
961 return;
962
963 cifs_dbg(FYI, "%s: path: %s\n", __func__, path);
964
965 down_read(&htable_rw_lock);
966
967 ce = lookup_cache_entry(path);
968 if (IS_ERR(ce))
969 goto out_unlock;
970
971 t = READ_ONCE(ce->tgthint);
972
973 if (unlikely(!strcasecmp(it->it_name, t->name)))
974 goto out_unlock;
975
976 list_for_each_entry(t, &ce->tlist, list) {
977 if (!strcasecmp(t->name, it->it_name)) {
978 WRITE_ONCE(ce->tgthint, t);
979 cifs_dbg(FYI, "%s: new target hint: %s\n", __func__,
980 it->it_name);
981 break;
982 }
983 }
984
985out_unlock:
986 up_read(&htable_rw_lock);
987}
988
989/**
990 * dfs_cache_get_tgt_referral - returns a DFS referral (@ref) from a given
991 * target iterator (@it).
992 *
993 * @path: canonical DFS path to lookup in DFS referral cache.
994 * @it: DFS target iterator.
995 * @ref: DFS referral pointer to set up the gathered information.
996 *
997 * Return zero if the DFS referral was set up correctly, otherwise non-zero.
998 */
999int dfs_cache_get_tgt_referral(const char *path, const struct dfs_cache_tgt_iterator *it,
1000 struct dfs_info3_param *ref)
1001{
1002 int rc;
1003 struct cache_entry *ce;
1004
1005 if (!it || !ref)
1006 return -EINVAL;
1007
1008 cifs_dbg(FYI, "%s: path: %s\n", __func__, path);
1009
1010 down_read(&htable_rw_lock);
1011
1012 ce = lookup_cache_entry(path);
1013 if (IS_ERR(ce)) {
1014 rc = PTR_ERR(ce);
1015 goto out_unlock;
1016 }
1017
1018 cifs_dbg(FYI, "%s: target name: %s\n", __func__, it->it_name);
1019
1020 rc = setup_referral(path, ce, ref, it->it_name);
1021
1022out_unlock:
1023 up_read(&htable_rw_lock);
1024 return rc;
1025}
1026
1027/* Extract share from DFS target and return a pointer to prefix path or NULL */
1028static const char *parse_target_share(const char *target, char **share)
1029{
1030 const char *s, *seps = "/\\";
1031 size_t len;
1032
1033 s = strpbrk(target + 1, seps);
1034 if (!s)
1035 return ERR_PTR(-EINVAL);
1036
1037 len = strcspn(s + 1, seps);
1038 if (!len)
1039 return ERR_PTR(-EINVAL);
1040 s += len;
1041
1042 len = s - target + 1;
1043 *share = kstrndup(target, len, GFP_KERNEL);
1044 if (!*share)
1045 return ERR_PTR(-ENOMEM);
1046
1047 s = target + len;
1048 return s + strspn(s, seps);
1049}
1050
1051/**
1052 * dfs_cache_get_tgt_share - parse a DFS target
1053 *
1054 * @path: DFS full path
1055 * @it: DFS target iterator.
1056 * @share: tree name.
1057 * @prefix: prefix path.
1058 *
1059 * Return zero if target was parsed correctly, otherwise non-zero.
1060 */
1061int dfs_cache_get_tgt_share(char *path, const struct dfs_cache_tgt_iterator *it, char **share,
1062 char **prefix)
1063{
1064 char sep;
1065 char *target_share;
1066 char *ppath = NULL;
1067 const char *target_ppath, *dfsref_ppath;
1068 size_t target_pplen, dfsref_pplen;
1069 size_t len, c;
1070
1071 if (!it || !path || !share || !prefix || strlen(path) < it->it_path_consumed)
1072 return -EINVAL;
1073
1074 sep = it->it_name[0];
1075 if (sep != '\\' && sep != '/')
1076 return -EINVAL;
1077
1078 target_ppath = parse_target_share(it->it_name, &target_share);
1079 if (IS_ERR(target_ppath))
1080 return PTR_ERR(target_ppath);
1081
1082 /* point to prefix in DFS referral path */
1083 dfsref_ppath = path + it->it_path_consumed;
1084 dfsref_ppath += strspn(dfsref_ppath, "/\\");
1085
1086 target_pplen = strlen(target_ppath);
1087 dfsref_pplen = strlen(dfsref_ppath);
1088
1089 /* merge prefix paths from DFS referral path and target node */
1090 if (target_pplen || dfsref_pplen) {
1091 len = target_pplen + dfsref_pplen + 2;
1092 ppath = kzalloc(len, GFP_KERNEL);
1093 if (!ppath) {
1094 kfree(target_share);
1095 return -ENOMEM;
1096 }
1097 c = strscpy(ppath, target_ppath, len);
1098 if (c && dfsref_pplen)
1099 ppath[c] = sep;
1100 strlcat(ppath, dfsref_ppath, len);
1101 }
1102 *share = target_share;
1103 *prefix = ppath;
1104 return 0;
1105}
1106
1107static bool target_share_equal(struct TCP_Server_Info *server, const char *s1, const char *s2)
1108{
1109 char unc[sizeof("\\\\") + SERVER_NAME_LENGTH] = {0};
1110 const char *host;
1111 size_t hostlen;
1112 struct sockaddr_storage ss;
1113 bool match;
1114 int rc;
1115
1116 if (strcasecmp(s1, s2))
1117 return false;
1118
1119 /*
1120 * Resolve share's hostname and check if server address matches. Otherwise just ignore it
1121 * as we could not have upcall to resolve hostname or failed to convert ip address.
1122 */
1123 extract_unc_hostname(s1, &host, &hostlen);
1124 scnprintf(unc, sizeof(unc), "\\\\%.*s", (int)hostlen, host);
1125
1126 rc = dns_resolve_server_name_to_ip(unc, (struct sockaddr *)&ss, NULL);
1127 if (rc < 0) {
1128 cifs_dbg(FYI, "%s: could not resolve %.*s. assuming server address matches.\n",
1129 __func__, (int)hostlen, host);
1130 return true;
1131 }
1132
1133 cifs_server_lock(server);
1134 match = cifs_match_ipaddr((struct sockaddr *)&server->dstaddr, (struct sockaddr *)&ss);
1135 cifs_server_unlock(server);
1136
1137 return match;
1138}
1139
1140/*
1141 * Mark dfs tcon for reconnecting when the currently connected tcon does not match any of the new
1142 * target shares in @refs.
1143 */
1144static void mark_for_reconnect_if_needed(struct TCP_Server_Info *server,
1145 struct dfs_cache_tgt_list *old_tl,
1146 struct dfs_cache_tgt_list *new_tl)
1147{
1148 struct dfs_cache_tgt_iterator *oit, *nit;
1149
1150 for (oit = dfs_cache_get_tgt_iterator(old_tl); oit;
1151 oit = dfs_cache_get_next_tgt(old_tl, oit)) {
1152 for (nit = dfs_cache_get_tgt_iterator(new_tl); nit;
1153 nit = dfs_cache_get_next_tgt(new_tl, nit)) {
1154 if (target_share_equal(server,
1155 dfs_cache_get_tgt_name(oit),
1156 dfs_cache_get_tgt_name(nit)))
1157 return;
1158 }
1159 }
1160
1161 cifs_dbg(FYI, "%s: no cached or matched targets. mark dfs share for reconnect.\n", __func__);
1162 cifs_signal_cifsd_for_reconnect(server, true);
1163}
1164
1165/* Refresh dfs referral of tcon and mark it for reconnect if needed */
1166static int __refresh_tcon(const char *path, struct cifs_tcon *tcon, bool force_refresh)
1167{
1168 struct dfs_cache_tgt_list old_tl = DFS_CACHE_TGT_LIST_INIT(old_tl);
1169 struct dfs_cache_tgt_list new_tl = DFS_CACHE_TGT_LIST_INIT(new_tl);
1170 struct cifs_ses *ses = CIFS_DFS_ROOT_SES(tcon->ses);
1171 struct cifs_tcon *ipc = ses->tcon_ipc;
1172 bool needs_refresh = false;
1173 struct cache_entry *ce;
1174 unsigned int xid;
1175 int rc = 0;
1176
1177 xid = get_xid();
1178
1179 down_read(&htable_rw_lock);
1180 ce = lookup_cache_entry(path);
1181 needs_refresh = force_refresh || IS_ERR(ce) || cache_entry_expired(ce);
1182 if (!IS_ERR(ce)) {
1183 rc = get_targets(ce, &old_tl);
1184 cifs_dbg(FYI, "%s: get_targets: %d\n", __func__, rc);
1185 }
1186 up_read(&htable_rw_lock);
1187
1188 if (!needs_refresh) {
1189 rc = 0;
1190 goto out;
1191 }
1192
1193 spin_lock(&ipc->tc_lock);
1194 if (ipc->status != TID_GOOD) {
1195 spin_unlock(&ipc->tc_lock);
1196 cifs_dbg(FYI, "%s: skip cache refresh due to disconnected ipc\n", __func__);
1197 goto out;
1198 }
1199 spin_unlock(&ipc->tc_lock);
1200
1201 ce = cache_refresh_path(xid, ses, path, true);
1202 if (!IS_ERR(ce)) {
1203 rc = get_targets(ce, &new_tl);
1204 up_read(&htable_rw_lock);
1205 cifs_dbg(FYI, "%s: get_targets: %d\n", __func__, rc);
1206 mark_for_reconnect_if_needed(tcon->ses->server, &old_tl, &new_tl);
1207 }
1208
1209out:
1210 free_xid(xid);
1211 dfs_cache_free_tgts(&old_tl);
1212 dfs_cache_free_tgts(&new_tl);
1213 return rc;
1214}
1215
1216static int refresh_tcon(struct cifs_tcon *tcon, bool force_refresh)
1217{
1218 struct TCP_Server_Info *server = tcon->ses->server;
1219
1220 mutex_lock(&server->refpath_lock);
1221 if (server->leaf_fullpath)
1222 __refresh_tcon(server->leaf_fullpath + 1, tcon, force_refresh);
1223 mutex_unlock(&server->refpath_lock);
1224 return 0;
1225}
1226
1227/**
1228 * dfs_cache_remount_fs - remount a DFS share
1229 *
1230 * Reconfigure dfs mount by forcing a new DFS referral and if the currently cached targets do not
1231 * match any of the new targets, mark it for reconnect.
1232 *
1233 * @cifs_sb: cifs superblock.
1234 *
1235 * Return zero if remounted, otherwise non-zero.
1236 */
1237int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb)
1238{
1239 struct cifs_tcon *tcon;
1240 struct TCP_Server_Info *server;
1241
1242 if (!cifs_sb || !cifs_sb->master_tlink)
1243 return -EINVAL;
1244
1245 tcon = cifs_sb_master_tcon(cifs_sb);
1246 server = tcon->ses->server;
1247
1248 if (!server->origin_fullpath) {
1249 cifs_dbg(FYI, "%s: not a dfs mount\n", __func__);
1250 return 0;
1251 }
1252 /*
1253 * After reconnecting to a different server, unique ids won't match anymore, so we disable
1254 * serverino. This prevents dentry revalidation to think the dentry are stale (ESTALE).
1255 */
1256 cifs_autodisable_serverino(cifs_sb);
1257 /*
1258 * Force the use of prefix path to support failover on DFS paths that resolve to targets
1259 * that have different prefix paths.
1260 */
1261 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
1262
1263 return refresh_tcon(tcon, true);
1264}
1265
1266/*
1267 * Worker that will refresh DFS cache from all active mounts based on lowest TTL value
1268 * from a DFS referral.
1269 */
1270static void refresh_cache_worker(struct work_struct *work)
1271{
1272 struct TCP_Server_Info *server;
1273 struct cifs_tcon *tcon, *ntcon;
1274 struct list_head tcons;
1275 struct cifs_ses *ses;
1276
1277 INIT_LIST_HEAD(&tcons);
1278
1279 spin_lock(&cifs_tcp_ses_lock);
1280 list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
1281 if (!server->leaf_fullpath)
1282 continue;
1283
1284 list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
1285 if (ses->tcon_ipc) {
1286 ses->ses_count++;
1287 list_add_tail(&ses->tcon_ipc->ulist, &tcons);
1288 }
1289 list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
1290 if (!tcon->ipc) {
1291 tcon->tc_count++;
1292 list_add_tail(&tcon->ulist, &tcons);
1293 }
1294 }
1295 }
1296 }
1297 spin_unlock(&cifs_tcp_ses_lock);
1298
1299 list_for_each_entry_safe(tcon, ntcon, &tcons, ulist) {
1300 struct TCP_Server_Info *server = tcon->ses->server;
1301
1302 list_del_init(&tcon->ulist);
1303
1304 mutex_lock(&server->refpath_lock);
1305 if (server->leaf_fullpath)
1306 __refresh_tcon(server->leaf_fullpath + 1, tcon, false);
1307 mutex_unlock(&server->refpath_lock);
1308
1309 if (tcon->ipc)
1310 cifs_put_smb_ses(tcon->ses);
1311 else
1312 cifs_put_tcon(tcon);
1313 }
1314
1315 spin_lock(&cache_ttl_lock);
1316 queue_delayed_work(dfscache_wq, &refresh_task, cache_ttl * HZ);
1317 spin_unlock(&cache_ttl_lock);
1318}