Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: LGPL-2.1
2/*
3 *
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
6 *
7 */
8
9#include <linux/slab.h>
10#include <linux/ctype.h>
11#include <linux/mempool.h>
12#include <linux/vmalloc.h>
13#include "cifsglob.h"
14#include "cifsproto.h"
15#include "cifs_debug.h"
16#include "smberr.h"
17#include "nterr.h"
18#include "cifs_unicode.h"
19#include "smb2pdu.h"
20#include "smb2proto.h"
21#include "smb1proto.h"
22#include "cifsfs.h"
23#ifdef CONFIG_CIFS_DFS_UPCALL
24#include "dns_resolve.h"
25#include "dfs_cache.h"
26#include "dfs.h"
27#endif
28#include "fs_context.h"
29#include "cached_dir.h"
30
31/* The xid serves as a useful identifier for each incoming vfs request,
32 in a similar way to the mid which is useful to track each sent smb,
33 and CurrentXid can also provide a running counter (although it
34 will eventually wrap past zero) of the total vfs operations handled
35 since the cifs fs was mounted */
36
37unsigned int
38_get_xid(void)
39{
40 unsigned int xid;
41
42 spin_lock(&GlobalMid_Lock);
43 GlobalTotalActiveXid++;
44
45 /* keep high water mark for number of simultaneous ops in filesystem */
46 if (GlobalTotalActiveXid > GlobalMaxActiveXid)
47 GlobalMaxActiveXid = GlobalTotalActiveXid;
48 if (GlobalTotalActiveXid > 65000)
49 cifs_dbg(FYI, "warning: more than 65000 requests active\n");
50 xid = GlobalCurrentXid++;
51 spin_unlock(&GlobalMid_Lock);
52 return xid;
53}
54
55void
56_free_xid(unsigned int xid)
57{
58 spin_lock(&GlobalMid_Lock);
59 /* if (GlobalTotalActiveXid == 0)
60 BUG(); */
61 GlobalTotalActiveXid--;
62 spin_unlock(&GlobalMid_Lock);
63}
64
65struct cifs_ses *
66sesInfoAlloc(void)
67{
68 struct cifs_ses *ret_buf;
69
70 ret_buf = kzalloc(sizeof(struct cifs_ses), GFP_KERNEL);
71 if (ret_buf) {
72 atomic_inc(&sesInfoAllocCount);
73 spin_lock_init(&ret_buf->ses_lock);
74 ret_buf->ses_status = SES_NEW;
75 ++ret_buf->ses_count;
76 INIT_LIST_HEAD(&ret_buf->smb_ses_list);
77 INIT_LIST_HEAD(&ret_buf->tcon_list);
78 mutex_init(&ret_buf->session_mutex);
79 spin_lock_init(&ret_buf->iface_lock);
80 INIT_LIST_HEAD(&ret_buf->iface_list);
81 spin_lock_init(&ret_buf->chan_lock);
82 }
83 return ret_buf;
84}
85
86void
87sesInfoFree(struct cifs_ses *buf_to_free)
88{
89 struct cifs_server_iface *iface = NULL, *niface = NULL;
90
91 if (buf_to_free == NULL) {
92 cifs_dbg(FYI, "Null buffer passed to sesInfoFree\n");
93 return;
94 }
95
96 unload_nls(buf_to_free->local_nls);
97 atomic_dec(&sesInfoAllocCount);
98 kfree(buf_to_free->serverOS);
99 kfree(buf_to_free->serverDomain);
100 kfree(buf_to_free->serverNOS);
101 kfree_sensitive(buf_to_free->password);
102 kfree_sensitive(buf_to_free->password2);
103 kfree(buf_to_free->user_name);
104 kfree(buf_to_free->domainName);
105 kfree(buf_to_free->dns_dom);
106 kfree_sensitive(buf_to_free->auth_key.response);
107 spin_lock(&buf_to_free->iface_lock);
108 list_for_each_entry_safe(iface, niface, &buf_to_free->iface_list,
109 iface_head)
110 kref_put(&iface->refcount, release_iface);
111 spin_unlock(&buf_to_free->iface_lock);
112 kfree_sensitive(buf_to_free);
113}
114
115struct cifs_tcon *
116tcon_info_alloc(bool dir_leases_enabled, enum smb3_tcon_ref_trace trace)
117{
118 struct cifs_tcon *ret_buf;
119 static atomic_t tcon_debug_id;
120
121 ret_buf = kzalloc(sizeof(*ret_buf), GFP_KERNEL);
122 if (!ret_buf)
123 return NULL;
124
125 if (dir_leases_enabled == true) {
126 ret_buf->cfids = init_cached_dirs();
127 if (!ret_buf->cfids) {
128 kfree(ret_buf);
129 return NULL;
130 }
131 }
132 /* else ret_buf->cfids is already set to NULL above */
133
134 atomic_inc(&tconInfoAllocCount);
135 ret_buf->status = TID_NEW;
136 ret_buf->debug_id = atomic_inc_return(&tcon_debug_id);
137 ret_buf->tc_count = 1;
138 spin_lock_init(&ret_buf->tc_lock);
139 INIT_LIST_HEAD(&ret_buf->openFileList);
140 INIT_LIST_HEAD(&ret_buf->tcon_list);
141 INIT_LIST_HEAD(&ret_buf->cifs_sb_list);
142 spin_lock_init(&ret_buf->open_file_lock);
143 spin_lock_init(&ret_buf->stat_lock);
144 spin_lock_init(&ret_buf->sb_list_lock);
145 atomic_set(&ret_buf->num_local_opens, 0);
146 atomic_set(&ret_buf->num_remote_opens, 0);
147 ret_buf->stats_from_time = ktime_get_real_seconds();
148#ifdef CONFIG_CIFS_FSCACHE
149 mutex_init(&ret_buf->fscache_lock);
150#endif
151 trace_smb3_tcon_ref(ret_buf->debug_id, ret_buf->tc_count, trace);
152#ifdef CONFIG_CIFS_DFS_UPCALL
153 INIT_LIST_HEAD(&ret_buf->dfs_ses_list);
154#endif
155 INIT_LIST_HEAD(&ret_buf->pending_opens);
156 INIT_DELAYED_WORK(&ret_buf->query_interfaces,
157 smb2_query_server_interfaces);
158#ifdef CONFIG_CIFS_DFS_UPCALL
159 INIT_DELAYED_WORK(&ret_buf->dfs_cache_work, dfs_cache_refresh);
160#endif
161
162 return ret_buf;
163}
164
165void
166tconInfoFree(struct cifs_tcon *tcon, enum smb3_tcon_ref_trace trace)
167{
168 if (tcon == NULL) {
169 cifs_dbg(FYI, "Null buffer passed to tconInfoFree\n");
170 return;
171 }
172 trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count, trace);
173 free_cached_dirs(tcon->cfids);
174 atomic_dec(&tconInfoAllocCount);
175 kfree(tcon->nativeFileSystem);
176 kfree_sensitive(tcon->password);
177 kfree(tcon->origin_fullpath);
178 kfree(tcon);
179}
180
181void *
182cifs_buf_get(void)
183{
184 void *ret_buf = NULL;
185 /*
186 * SMB2 header is bigger than CIFS one - no problems to clean some
187 * more bytes for CIFS.
188 */
189 size_t buf_size = sizeof(struct smb2_hdr);
190
191 /*
192 * We could use negotiated size instead of max_msgsize -
193 * but it may be more efficient to always alloc same size
194 * albeit slightly larger than necessary and maxbuffersize
195 * defaults to this and can not be bigger.
196 */
197 ret_buf = mempool_alloc(cifs_req_poolp, GFP_NOFS);
198
199 /* clear the first few header bytes */
200 /* for most paths, more is cleared in header_assemble */
201 memset(ret_buf, 0, buf_size + 3);
202 atomic_inc(&buf_alloc_count);
203#ifdef CONFIG_CIFS_STATS2
204 atomic_inc(&total_buf_alloc_count);
205#endif /* CONFIG_CIFS_STATS2 */
206
207 return ret_buf;
208}
209
210void
211cifs_buf_release(void *buf_to_free)
212{
213 if (buf_to_free == NULL) {
214 /* cifs_dbg(FYI, "Null buffer passed to cifs_buf_release\n");*/
215 return;
216 }
217 mempool_free(buf_to_free, cifs_req_poolp);
218
219 atomic_dec(&buf_alloc_count);
220 return;
221}
222
223void *
224cifs_small_buf_get(void)
225{
226 void *ret_buf = NULL;
227
228/* We could use negotiated size instead of max_msgsize -
229 but it may be more efficient to always alloc same size
230 albeit slightly larger than necessary and maxbuffersize
231 defaults to this and can not be bigger */
232 ret_buf = mempool_alloc(cifs_sm_req_poolp, GFP_NOFS);
233 /* No need to clear memory here, cleared in header assemble */
234 atomic_inc(&small_buf_alloc_count);
235#ifdef CONFIG_CIFS_STATS2
236 atomic_inc(&total_small_buf_alloc_count);
237#endif /* CONFIG_CIFS_STATS2 */
238
239 return ret_buf;
240}
241
242void
243cifs_small_buf_release(void *buf_to_free)
244{
245
246 if (buf_to_free == NULL) {
247 cifs_dbg(FYI, "Null buffer passed to cifs_small_buf_release\n");
248 return;
249 }
250 mempool_free(buf_to_free, cifs_sm_req_poolp);
251
252 atomic_dec(&small_buf_alloc_count);
253 return;
254}
255
256void
257free_rsp_buf(int resp_buftype, void *rsp)
258{
259 if (resp_buftype == CIFS_SMALL_BUFFER)
260 cifs_small_buf_release(rsp);
261 else if (resp_buftype == CIFS_LARGE_BUFFER)
262 cifs_buf_release(rsp);
263}
264
265void
266dump_smb(void *buf, int smb_buf_length)
267{
268 if (traceSMB == 0)
269 return;
270
271 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE, 8, 2, buf,
272 smb_buf_length, true);
273}
274
275void
276cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb)
277{
278 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
279 struct cifs_tcon *tcon = NULL;
280
281 if (cifs_sb->master_tlink)
282 tcon = cifs_sb_master_tcon(cifs_sb);
283
284 cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM;
285 cifs_sb->mnt_cifs_serverino_autodisabled = true;
286 cifs_dbg(VFS, "Autodisabling the use of server inode numbers on %s\n",
287 tcon ? tcon->tree_name : "new server");
288 cifs_dbg(VFS, "The server doesn't seem to support them properly or the files might be on different servers (DFS)\n");
289 cifs_dbg(VFS, "Hardlinks will not be recognized on this mount. Consider mounting with the \"noserverino\" option to silence this message.\n");
290
291 }
292}
293
294void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock)
295{
296 oplock &= 0xF;
297
298 if (oplock == OPLOCK_EXCLUSIVE) {
299 cinode->oplock = CIFS_CACHE_WRITE_FLG | CIFS_CACHE_READ_FLG;
300 cifs_dbg(FYI, "Exclusive Oplock granted on inode %p\n",
301 &cinode->netfs.inode);
302 } else if (oplock == OPLOCK_READ) {
303 cinode->oplock = CIFS_CACHE_READ_FLG;
304 cifs_dbg(FYI, "Level II Oplock granted on inode %p\n",
305 &cinode->netfs.inode);
306 } else
307 cinode->oplock = 0;
308}
309
310/*
311 * We wait for oplock breaks to be processed before we attempt to perform
312 * writes.
313 */
314int cifs_get_writer(struct cifsInodeInfo *cinode)
315{
316 int rc;
317
318start:
319 rc = wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_OPLOCK_BREAK,
320 TASK_KILLABLE);
321 if (rc)
322 return rc;
323
324 spin_lock(&cinode->writers_lock);
325 if (!cinode->writers)
326 set_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
327 cinode->writers++;
328 /* Check to see if we have started servicing an oplock break */
329 if (test_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags)) {
330 cinode->writers--;
331 if (cinode->writers == 0) {
332 clear_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
333 wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS);
334 }
335 spin_unlock(&cinode->writers_lock);
336 goto start;
337 }
338 spin_unlock(&cinode->writers_lock);
339 return 0;
340}
341
342void cifs_put_writer(struct cifsInodeInfo *cinode)
343{
344 spin_lock(&cinode->writers_lock);
345 cinode->writers--;
346 if (cinode->writers == 0) {
347 clear_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
348 wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS);
349 }
350 spin_unlock(&cinode->writers_lock);
351}
352
353/**
354 * cifs_queue_oplock_break - queue the oplock break handler for cfile
355 * @cfile: The file to break the oplock on
356 *
357 * This function is called from the demultiplex thread when it
358 * receives an oplock break for @cfile.
359 *
360 * Assumes the tcon->open_file_lock is held.
361 * Assumes cfile->file_info_lock is NOT held.
362 */
363void cifs_queue_oplock_break(struct cifsFileInfo *cfile)
364{
365 /*
366 * Bump the handle refcount now while we hold the
367 * open_file_lock to enforce the validity of it for the oplock
368 * break handler. The matching put is done at the end of the
369 * handler.
370 */
371 cifsFileInfo_get(cfile);
372
373 queue_work(cifsoplockd_wq, &cfile->oplock_break);
374}
375
376void cifs_done_oplock_break(struct cifsInodeInfo *cinode)
377{
378 clear_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags);
379 wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_OPLOCK_BREAK);
380}
381
382bool
383backup_cred(struct cifs_sb_info *cifs_sb)
384{
385 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID) {
386 if (uid_eq(cifs_sb->ctx->backupuid, current_fsuid()))
387 return true;
388 }
389 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID) {
390 if (in_group_p(cifs_sb->ctx->backupgid))
391 return true;
392 }
393
394 return false;
395}
396
397void
398cifs_del_pending_open(struct cifs_pending_open *open)
399{
400 spin_lock(&tlink_tcon(open->tlink)->open_file_lock);
401 list_del(&open->olist);
402 spin_unlock(&tlink_tcon(open->tlink)->open_file_lock);
403}
404
405void
406cifs_add_pending_open_locked(struct cifs_fid *fid, struct tcon_link *tlink,
407 struct cifs_pending_open *open)
408{
409 memcpy(open->lease_key, fid->lease_key, SMB2_LEASE_KEY_SIZE);
410 open->oplock = CIFS_OPLOCK_NO_CHANGE;
411 open->tlink = tlink;
412 fid->pending_open = open;
413 list_add_tail(&open->olist, &tlink_tcon(tlink)->pending_opens);
414}
415
416void
417cifs_add_pending_open(struct cifs_fid *fid, struct tcon_link *tlink,
418 struct cifs_pending_open *open)
419{
420 spin_lock(&tlink_tcon(tlink)->open_file_lock);
421 cifs_add_pending_open_locked(fid, tlink, open);
422 spin_unlock(&tlink_tcon(open->tlink)->open_file_lock);
423}
424
425/*
426 * Critical section which runs after acquiring deferred_lock.
427 * As there is no reference count on cifs_deferred_close, pdclose
428 * should not be used outside deferred_lock.
429 */
430bool
431cifs_is_deferred_close(struct cifsFileInfo *cfile, struct cifs_deferred_close **pdclose)
432{
433 struct cifs_deferred_close *dclose;
434
435 list_for_each_entry(dclose, &CIFS_I(d_inode(cfile->dentry))->deferred_closes, dlist) {
436 if ((dclose->netfid == cfile->fid.netfid) &&
437 (dclose->persistent_fid == cfile->fid.persistent_fid) &&
438 (dclose->volatile_fid == cfile->fid.volatile_fid)) {
439 *pdclose = dclose;
440 return true;
441 }
442 }
443 return false;
444}
445
446/*
447 * Critical section which runs after acquiring deferred_lock.
448 */
449void
450cifs_add_deferred_close(struct cifsFileInfo *cfile, struct cifs_deferred_close *dclose)
451{
452 bool is_deferred = false;
453 struct cifs_deferred_close *pdclose;
454
455 is_deferred = cifs_is_deferred_close(cfile, &pdclose);
456 if (is_deferred) {
457 kfree(dclose);
458 return;
459 }
460
461 dclose->tlink = cfile->tlink;
462 dclose->netfid = cfile->fid.netfid;
463 dclose->persistent_fid = cfile->fid.persistent_fid;
464 dclose->volatile_fid = cfile->fid.volatile_fid;
465 list_add_tail(&dclose->dlist, &CIFS_I(d_inode(cfile->dentry))->deferred_closes);
466}
467
468/*
469 * Critical section which runs after acquiring deferred_lock.
470 */
471void
472cifs_del_deferred_close(struct cifsFileInfo *cfile)
473{
474 bool is_deferred = false;
475 struct cifs_deferred_close *dclose;
476
477 is_deferred = cifs_is_deferred_close(cfile, &dclose);
478 if (!is_deferred)
479 return;
480 list_del(&dclose->dlist);
481 kfree(dclose);
482}
483
484void
485cifs_close_deferred_file(struct cifsInodeInfo *cifs_inode)
486{
487 struct cifsFileInfo *cfile = NULL;
488 struct file_list *tmp_list, *tmp_next_list;
489 LIST_HEAD(file_head);
490
491 if (cifs_inode == NULL)
492 return;
493
494 spin_lock(&cifs_inode->open_file_lock);
495 list_for_each_entry(cfile, &cifs_inode->openFileList, flist) {
496 if (delayed_work_pending(&cfile->deferred)) {
497 if (cancel_delayed_work(&cfile->deferred)) {
498 spin_lock(&cifs_inode->deferred_lock);
499 cifs_del_deferred_close(cfile);
500 spin_unlock(&cifs_inode->deferred_lock);
501
502 tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
503 if (tmp_list == NULL)
504 break;
505 tmp_list->cfile = cfile;
506 list_add_tail(&tmp_list->list, &file_head);
507 }
508 }
509 }
510 spin_unlock(&cifs_inode->open_file_lock);
511
512 list_for_each_entry_safe(tmp_list, tmp_next_list, &file_head, list) {
513 _cifsFileInfo_put(tmp_list->cfile, false, false);
514 list_del(&tmp_list->list);
515 kfree(tmp_list);
516 }
517}
518
519void
520cifs_close_all_deferred_files(struct cifs_tcon *tcon)
521{
522 struct cifsFileInfo *cfile;
523 struct file_list *tmp_list, *tmp_next_list;
524 LIST_HEAD(file_head);
525
526 spin_lock(&tcon->open_file_lock);
527 list_for_each_entry(cfile, &tcon->openFileList, tlist) {
528 if (delayed_work_pending(&cfile->deferred)) {
529 if (cancel_delayed_work(&cfile->deferred)) {
530 spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
531 cifs_del_deferred_close(cfile);
532 spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
533
534 tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
535 if (tmp_list == NULL)
536 break;
537 tmp_list->cfile = cfile;
538 list_add_tail(&tmp_list->list, &file_head);
539 }
540 }
541 }
542 spin_unlock(&tcon->open_file_lock);
543
544 list_for_each_entry_safe(tmp_list, tmp_next_list, &file_head, list) {
545 _cifsFileInfo_put(tmp_list->cfile, true, false);
546 list_del(&tmp_list->list);
547 kfree(tmp_list);
548 }
549}
550
551void cifs_close_deferred_file_under_dentry(struct cifs_tcon *tcon,
552 struct dentry *dentry)
553{
554 struct file_list *tmp_list, *tmp_next_list;
555 struct cifsFileInfo *cfile;
556 LIST_HEAD(file_head);
557
558 spin_lock(&tcon->open_file_lock);
559 list_for_each_entry(cfile, &tcon->openFileList, tlist) {
560 if ((cfile->dentry == dentry) &&
561 delayed_work_pending(&cfile->deferred) &&
562 cancel_delayed_work(&cfile->deferred)) {
563 spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
564 cifs_del_deferred_close(cfile);
565 spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
566
567 tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
568 if (tmp_list == NULL)
569 break;
570 tmp_list->cfile = cfile;
571 list_add_tail(&tmp_list->list, &file_head);
572 }
573 }
574 spin_unlock(&tcon->open_file_lock);
575
576 list_for_each_entry_safe(tmp_list, tmp_next_list, &file_head, list) {
577 _cifsFileInfo_put(tmp_list->cfile, true, false);
578 list_del(&tmp_list->list);
579 kfree(tmp_list);
580 }
581}
582
583/*
584 * If a dentry has been deleted, all corresponding open handles should know that
585 * so that we do not defer close them.
586 */
587void cifs_mark_open_handles_for_deleted_file(struct inode *inode,
588 const char *path)
589{
590 struct cifsFileInfo *cfile;
591 void *page;
592 const char *full_path;
593 struct cifsInodeInfo *cinode = CIFS_I(inode);
594
595 page = alloc_dentry_path();
596 spin_lock(&cinode->open_file_lock);
597
598 /*
599 * note: we need to construct path from dentry and compare only if the
600 * inode has any hardlinks. When number of hardlinks is 1, we can just
601 * mark all open handles since they are going to be from the same file.
602 */
603 if (inode->i_nlink > 1) {
604 list_for_each_entry(cfile, &cinode->openFileList, flist) {
605 full_path = build_path_from_dentry(cfile->dentry, page);
606 if (!IS_ERR(full_path) && strcmp(full_path, path) == 0)
607 cfile->status_file_deleted = true;
608 }
609 } else {
610 list_for_each_entry(cfile, &cinode->openFileList, flist)
611 cfile->status_file_deleted = true;
612 }
613 spin_unlock(&cinode->open_file_lock);
614 free_dentry_path(page);
615}
616
617/* parses DFS referral V3 structure
618 * caller is responsible for freeing target_nodes
619 * returns:
620 * - on success - 0
621 * - on failure - errno
622 */
623int
624parse_dfs_referrals(struct get_dfs_referral_rsp *rsp, u32 rsp_size,
625 unsigned int *num_of_nodes,
626 struct dfs_info3_param **target_nodes,
627 const struct nls_table *nls_codepage, int remap,
628 const char *searchName, bool is_unicode)
629{
630 int i, rc = 0;
631 char *data_end;
632 struct dfs_referral_level_3 *ref;
633
634 if (rsp_size < sizeof(*rsp)) {
635 cifs_dbg(VFS | ONCE,
636 "%s: header is malformed (size is %u, must be %zu)\n",
637 __func__, rsp_size, sizeof(*rsp));
638 rc = -EINVAL;
639 goto parse_DFS_referrals_exit;
640 }
641
642 *num_of_nodes = le16_to_cpu(rsp->NumberOfReferrals);
643
644 if (*num_of_nodes < 1) {
645 cifs_dbg(VFS | ONCE, "%s: [path=%s] num_referrals must be at least > 0, but we got %d\n",
646 __func__, searchName, *num_of_nodes);
647 rc = -ENOENT;
648 goto parse_DFS_referrals_exit;
649 }
650
651 if (sizeof(*rsp) + *num_of_nodes * sizeof(REFERRAL3) > rsp_size) {
652 cifs_dbg(VFS | ONCE,
653 "%s: malformed buffer (size is %u, must be at least %zu)\n",
654 __func__, rsp_size,
655 sizeof(*rsp) + *num_of_nodes * sizeof(REFERRAL3));
656 rc = -EINVAL;
657 goto parse_DFS_referrals_exit;
658 }
659
660 ref = (struct dfs_referral_level_3 *) &(rsp->referrals);
661 if (ref->VersionNumber != cpu_to_le16(3)) {
662 cifs_dbg(VFS, "Referrals of V%d version are not supported, should be V3\n",
663 le16_to_cpu(ref->VersionNumber));
664 rc = -EINVAL;
665 goto parse_DFS_referrals_exit;
666 }
667
668 /* get the upper boundary of the resp buffer */
669 data_end = (char *)rsp + rsp_size;
670
671 cifs_dbg(FYI, "num_referrals: %d dfs flags: 0x%x ...\n",
672 *num_of_nodes, le32_to_cpu(rsp->DFSFlags));
673
674 *target_nodes = kcalloc(*num_of_nodes, sizeof(struct dfs_info3_param),
675 GFP_KERNEL);
676 if (*target_nodes == NULL) {
677 rc = -ENOMEM;
678 goto parse_DFS_referrals_exit;
679 }
680
681 /* collect necessary data from referrals */
682 for (i = 0; i < *num_of_nodes; i++) {
683 char *temp;
684 int max_len;
685 struct dfs_info3_param *node = (*target_nodes)+i;
686
687 node->flags = le32_to_cpu(rsp->DFSFlags);
688 if (is_unicode) {
689 __le16 *tmp = kmalloc(strlen(searchName)*2 + 2,
690 GFP_KERNEL);
691 if (tmp == NULL) {
692 rc = -ENOMEM;
693 goto parse_DFS_referrals_exit;
694 }
695 cifsConvertToUTF16((__le16 *) tmp, searchName,
696 PATH_MAX, nls_codepage, remap);
697 node->path_consumed = cifs_utf16_bytes(tmp,
698 le16_to_cpu(rsp->PathConsumed),
699 nls_codepage);
700 kfree(tmp);
701 } else
702 node->path_consumed = le16_to_cpu(rsp->PathConsumed);
703
704 node->server_type = le16_to_cpu(ref->ServerType);
705 node->ref_flag = le16_to_cpu(ref->ReferralEntryFlags);
706
707 /* copy DfsPath */
708 temp = (char *)ref + le16_to_cpu(ref->DfsPathOffset);
709 max_len = data_end - temp;
710 node->path_name = cifs_strndup_from_utf16(temp, max_len,
711 is_unicode, nls_codepage);
712 if (!node->path_name) {
713 rc = -ENOMEM;
714 goto parse_DFS_referrals_exit;
715 }
716
717 /* copy link target UNC */
718 temp = (char *)ref + le16_to_cpu(ref->NetworkAddressOffset);
719 max_len = data_end - temp;
720 node->node_name = cifs_strndup_from_utf16(temp, max_len,
721 is_unicode, nls_codepage);
722 if (!node->node_name) {
723 rc = -ENOMEM;
724 goto parse_DFS_referrals_exit;
725 }
726
727 node->ttl = le32_to_cpu(ref->TimeToLive);
728
729 ref++;
730 }
731
732parse_DFS_referrals_exit:
733 if (rc) {
734 free_dfs_info_array(*target_nodes, *num_of_nodes);
735 *target_nodes = NULL;
736 *num_of_nodes = 0;
737 }
738 return rc;
739}
740
741/**
742 * cifs_alloc_hash - allocate hash and hash context together
743 * @name: The name of the crypto hash algo
744 * @sdesc: SHASH descriptor where to put the pointer to the hash TFM
745 *
746 * The caller has to make sure @sdesc is initialized to either NULL or
747 * a valid context. It can be freed via cifs_free_hash().
748 */
749int
750cifs_alloc_hash(const char *name, struct shash_desc **sdesc)
751{
752 int rc = 0;
753 struct crypto_shash *alg = NULL;
754
755 if (*sdesc)
756 return 0;
757
758 alg = crypto_alloc_shash(name, 0, 0);
759 if (IS_ERR(alg)) {
760 cifs_dbg(VFS, "Could not allocate shash TFM '%s'\n", name);
761 rc = PTR_ERR(alg);
762 *sdesc = NULL;
763 return rc;
764 }
765
766 *sdesc = kmalloc(sizeof(struct shash_desc) + crypto_shash_descsize(alg), GFP_KERNEL);
767 if (*sdesc == NULL) {
768 cifs_dbg(VFS, "no memory left to allocate shash TFM '%s'\n", name);
769 crypto_free_shash(alg);
770 return -ENOMEM;
771 }
772
773 (*sdesc)->tfm = alg;
774 return 0;
775}
776
777/**
778 * cifs_free_hash - free hash and hash context together
779 * @sdesc: Where to find the pointer to the hash TFM
780 *
781 * Freeing a NULL descriptor is safe.
782 */
783void
784cifs_free_hash(struct shash_desc **sdesc)
785{
786 if (unlikely(!sdesc) || !*sdesc)
787 return;
788
789 if ((*sdesc)->tfm) {
790 crypto_free_shash((*sdesc)->tfm);
791 (*sdesc)->tfm = NULL;
792 }
793
794 kfree_sensitive(*sdesc);
795 *sdesc = NULL;
796}
797
798void extract_unc_hostname(const char *unc, const char **h, size_t *len)
799{
800 const char *end;
801
802 /* skip initial slashes */
803 while (*unc && (*unc == '\\' || *unc == '/'))
804 unc++;
805
806 end = unc;
807
808 while (*end && !(*end == '\\' || *end == '/'))
809 end++;
810
811 *h = unc;
812 *len = end - unc;
813}
814
815/**
816 * copy_path_name - copy src path to dst, possibly truncating
817 * @dst: The destination buffer
818 * @src: The source name
819 *
820 * returns number of bytes written (including trailing nul)
821 */
822int copy_path_name(char *dst, const char *src)
823{
824 int name_len;
825
826 /*
827 * PATH_MAX includes nul, so if strlen(src) >= PATH_MAX it
828 * will truncate and strlen(dst) will be PATH_MAX-1
829 */
830 name_len = strscpy(dst, src, PATH_MAX);
831 if (WARN_ON_ONCE(name_len < 0))
832 name_len = PATH_MAX-1;
833
834 /* we count the trailing nul */
835 name_len++;
836 return name_len;
837}
838
839struct super_cb_data {
840 void *data;
841 struct super_block *sb;
842};
843
844static void tcon_super_cb(struct super_block *sb, void *arg)
845{
846 struct super_cb_data *sd = arg;
847 struct cifs_sb_info *cifs_sb;
848 struct cifs_tcon *t1 = sd->data, *t2;
849
850 if (sd->sb)
851 return;
852
853 cifs_sb = CIFS_SB(sb);
854 t2 = cifs_sb_master_tcon(cifs_sb);
855
856 spin_lock(&t2->tc_lock);
857 if ((t1->ses == t2->ses ||
858 t1->ses->dfs_root_ses == t2->ses->dfs_root_ses) &&
859 t1->ses->server == t2->ses->server &&
860 t2->origin_fullpath &&
861 dfs_src_pathname_equal(t2->origin_fullpath, t1->origin_fullpath))
862 sd->sb = sb;
863 spin_unlock(&t2->tc_lock);
864}
865
866static struct super_block *__cifs_get_super(void (*f)(struct super_block *, void *),
867 void *data)
868{
869 struct super_cb_data sd = {
870 .data = data,
871 .sb = NULL,
872 };
873 struct file_system_type **fs_type = (struct file_system_type *[]) {
874 &cifs_fs_type, &smb3_fs_type, NULL,
875 };
876
877 for (; *fs_type; fs_type++) {
878 iterate_supers_type(*fs_type, f, &sd);
879 if (sd.sb) {
880 /*
881 * Grab an active reference in order to prevent automounts (DFS links)
882 * of expiring and then freeing up our cifs superblock pointer while
883 * we're doing failover.
884 */
885 cifs_sb_active(sd.sb);
886 return sd.sb;
887 }
888 }
889 pr_warn_once("%s: could not find dfs superblock\n", __func__);
890 return ERR_PTR(-EINVAL);
891}
892
893static void __cifs_put_super(struct super_block *sb)
894{
895 if (!IS_ERR_OR_NULL(sb))
896 cifs_sb_deactive(sb);
897}
898
899struct super_block *cifs_get_dfs_tcon_super(struct cifs_tcon *tcon)
900{
901 spin_lock(&tcon->tc_lock);
902 if (!tcon->origin_fullpath) {
903 spin_unlock(&tcon->tc_lock);
904 return ERR_PTR(-ENOENT);
905 }
906 spin_unlock(&tcon->tc_lock);
907 return __cifs_get_super(tcon_super_cb, tcon);
908}
909
910void cifs_put_tcp_super(struct super_block *sb)
911{
912 __cifs_put_super(sb);
913}
914
915#ifdef CONFIG_CIFS_DFS_UPCALL
916int match_target_ip(struct TCP_Server_Info *server,
917 const char *host, size_t hostlen,
918 bool *result)
919{
920 struct sockaddr_storage ss;
921 int rc;
922
923 cifs_dbg(FYI, "%s: hostname=%.*s\n", __func__, (int)hostlen, host);
924
925 *result = false;
926
927 rc = dns_resolve_name(server->dns_dom, host, hostlen,
928 (struct sockaddr *)&ss);
929 if (rc < 0)
930 return rc;
931
932 spin_lock(&server->srv_lock);
933 *result = cifs_match_ipaddr((struct sockaddr *)&server->dstaddr, (struct sockaddr *)&ss);
934 spin_unlock(&server->srv_lock);
935 cifs_dbg(FYI, "%s: ip addresses matched: %s\n", __func__, str_yes_no(*result));
936 return 0;
937}
938
939int cifs_update_super_prepath(struct cifs_sb_info *cifs_sb, char *prefix)
940{
941 int rc;
942
943 kfree(cifs_sb->prepath);
944 cifs_sb->prepath = NULL;
945
946 if (prefix && *prefix) {
947 cifs_sb->prepath = cifs_sanitize_prepath(prefix, GFP_ATOMIC);
948 if (IS_ERR(cifs_sb->prepath)) {
949 rc = PTR_ERR(cifs_sb->prepath);
950 cifs_sb->prepath = NULL;
951 return rc;
952 }
953 if (cifs_sb->prepath)
954 convert_delimiter(cifs_sb->prepath, CIFS_DIR_SEP(cifs_sb));
955 }
956
957 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
958 return 0;
959}
960
961/*
962 * Handle weird Windows SMB server behaviour. It responds with
963 * STATUS_OBJECT_NAME_INVALID code to SMB2 QUERY_INFO request for
964 * "\<server>\<dfsname>\<linkpath>" DFS reference, where <dfsname> contains
965 * non-ASCII unicode symbols.
966 */
967int cifs_inval_name_dfs_link_error(const unsigned int xid,
968 struct cifs_tcon *tcon,
969 struct cifs_sb_info *cifs_sb,
970 const char *full_path,
971 bool *islink)
972{
973 struct TCP_Server_Info *server = tcon->ses->server;
974 struct cifs_ses *ses = tcon->ses;
975 size_t len;
976 char *path;
977 char *ref_path;
978
979 *islink = false;
980
981 /*
982 * Fast path - skip check when @full_path doesn't have a prefix path to
983 * look up or tcon is not DFS.
984 */
985 if (strlen(full_path) < 2 || !cifs_sb ||
986 (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS) ||
987 !is_tcon_dfs(tcon))
988 return 0;
989
990 spin_lock(&server->srv_lock);
991 if (!server->leaf_fullpath) {
992 spin_unlock(&server->srv_lock);
993 return 0;
994 }
995 spin_unlock(&server->srv_lock);
996
997 /*
998 * Slow path - tcon is DFS and @full_path has prefix path, so attempt
999 * to get a referral to figure out whether it is an DFS link.
1000 */
1001 len = strnlen(tcon->tree_name, MAX_TREE_SIZE + 1) + strlen(full_path) + 1;
1002 path = kmalloc(len, GFP_KERNEL);
1003 if (!path)
1004 return -ENOMEM;
1005
1006 scnprintf(path, len, "%s%s", tcon->tree_name, full_path);
1007 ref_path = dfs_cache_canonical_path(path + 1, cifs_sb->local_nls,
1008 cifs_remap(cifs_sb));
1009 kfree(path);
1010
1011 if (IS_ERR(ref_path)) {
1012 if (PTR_ERR(ref_path) != -EINVAL)
1013 return PTR_ERR(ref_path);
1014 } else {
1015 struct dfs_info3_param *refs = NULL;
1016 int num_refs = 0;
1017
1018 /*
1019 * XXX: we are not using dfs_cache_find() here because we might
1020 * end up filling all the DFS cache and thus potentially
1021 * removing cached DFS targets that the client would eventually
1022 * need during failover.
1023 */
1024 ses = CIFS_DFS_ROOT_SES(ses);
1025 if (ses->server->ops->get_dfs_refer &&
1026 !ses->server->ops->get_dfs_refer(xid, ses, ref_path, &refs,
1027 &num_refs, cifs_sb->local_nls,
1028 cifs_remap(cifs_sb)))
1029 *islink = refs[0].server_type == DFS_TYPE_LINK;
1030 free_dfs_info_array(refs, num_refs);
1031 kfree(ref_path);
1032 }
1033 return 0;
1034}
1035#endif
1036
1037int cifs_wait_for_server_reconnect(struct TCP_Server_Info *server, bool retry)
1038{
1039 int timeout = 10;
1040 int rc;
1041
1042 spin_lock(&server->srv_lock);
1043 if (server->tcpStatus != CifsNeedReconnect) {
1044 spin_unlock(&server->srv_lock);
1045 return 0;
1046 }
1047 timeout *= server->nr_targets;
1048 spin_unlock(&server->srv_lock);
1049
1050 /*
1051 * Give demultiplex thread up to 10 seconds to each target available for
1052 * reconnect -- should be greater than cifs socket timeout which is 7
1053 * seconds.
1054 *
1055 * On "soft" mounts we wait once. Hard mounts keep retrying until
1056 * process is killed or server comes back on-line.
1057 */
1058 do {
1059 rc = wait_event_interruptible_timeout(server->response_q,
1060 (server->tcpStatus != CifsNeedReconnect),
1061 timeout * HZ);
1062 if (rc < 0) {
1063 cifs_dbg(FYI, "%s: aborting reconnect due to received signal\n",
1064 __func__);
1065 return -ERESTARTSYS;
1066 }
1067
1068 /* are we still trying to reconnect? */
1069 spin_lock(&server->srv_lock);
1070 if (server->tcpStatus != CifsNeedReconnect) {
1071 spin_unlock(&server->srv_lock);
1072 return 0;
1073 }
1074 spin_unlock(&server->srv_lock);
1075 } while (retry);
1076
1077 cifs_dbg(FYI, "%s: gave up waiting on reconnect\n", __func__);
1078 return -EHOSTDOWN;
1079}