Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (C) 2016 Namjae Jeon <linkinjeon@kernel.org>
4 * Copyright (C) 2019 Samsung Electronics Co., Ltd.
5 */
6
7#include <linux/fs.h>
8#include <linux/filelock.h>
9#include <linux/slab.h>
10#include <linux/vmalloc.h>
11#include <linux/kthread.h>
12#include <linux/freezer.h>
13
14#include "glob.h"
15#include "vfs_cache.h"
16#include "oplock.h"
17#include "vfs.h"
18#include "connection.h"
19#include "mgmt/tree_connect.h"
20#include "mgmt/user_session.h"
21#include "smb_common.h"
22#include "server.h"
23
24#define S_DEL_PENDING 1
25#define S_DEL_ON_CLS 2
26#define S_DEL_ON_CLS_STREAM 8
27
28static unsigned int inode_hash_mask __read_mostly;
29static unsigned int inode_hash_shift __read_mostly;
30static struct hlist_head *inode_hashtable __read_mostly;
31static DEFINE_RWLOCK(inode_hash_lock);
32
33static struct ksmbd_file_table global_ft;
34static atomic_long_t fd_limit;
35static struct kmem_cache *filp_cache;
36
37static bool durable_scavenger_running;
38static DEFINE_MUTEX(durable_scavenger_lock);
39static wait_queue_head_t dh_wq;
40
41void ksmbd_set_fd_limit(unsigned long limit)
42{
43 limit = min(limit, get_max_files());
44 atomic_long_set(&fd_limit, limit);
45}
46
47static bool fd_limit_depleted(void)
48{
49 long v = atomic_long_dec_return(&fd_limit);
50
51 if (v >= 0)
52 return false;
53 atomic_long_inc(&fd_limit);
54 return true;
55}
56
57static void fd_limit_close(void)
58{
59 atomic_long_inc(&fd_limit);
60}
61
62/*
63 * INODE hash
64 */
65
66static unsigned long inode_hash(struct super_block *sb, unsigned long hashval)
67{
68 unsigned long tmp;
69
70 tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
71 L1_CACHE_BYTES;
72 tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> inode_hash_shift);
73 return tmp & inode_hash_mask;
74}
75
76static struct ksmbd_inode *__ksmbd_inode_lookup(struct dentry *de)
77{
78 struct hlist_head *head = inode_hashtable +
79 inode_hash(d_inode(de)->i_sb, (unsigned long)de);
80 struct ksmbd_inode *ci = NULL, *ret_ci = NULL;
81
82 hlist_for_each_entry(ci, head, m_hash) {
83 if (ci->m_de == de) {
84 if (atomic_inc_not_zero(&ci->m_count))
85 ret_ci = ci;
86 break;
87 }
88 }
89 return ret_ci;
90}
91
92static struct ksmbd_inode *ksmbd_inode_lookup(struct ksmbd_file *fp)
93{
94 return __ksmbd_inode_lookup(fp->filp->f_path.dentry);
95}
96
97struct ksmbd_inode *ksmbd_inode_lookup_lock(struct dentry *d)
98{
99 struct ksmbd_inode *ci;
100
101 read_lock(&inode_hash_lock);
102 ci = __ksmbd_inode_lookup(d);
103 read_unlock(&inode_hash_lock);
104
105 return ci;
106}
107
108int ksmbd_query_inode_status(struct dentry *dentry)
109{
110 struct ksmbd_inode *ci;
111 int ret = KSMBD_INODE_STATUS_UNKNOWN;
112
113 read_lock(&inode_hash_lock);
114 ci = __ksmbd_inode_lookup(dentry);
115 read_unlock(&inode_hash_lock);
116 if (!ci)
117 return ret;
118
119 down_read(&ci->m_lock);
120 if (ci->m_flags & (S_DEL_PENDING | S_DEL_ON_CLS))
121 ret = KSMBD_INODE_STATUS_PENDING_DELETE;
122 else
123 ret = KSMBD_INODE_STATUS_OK;
124 up_read(&ci->m_lock);
125
126 atomic_dec(&ci->m_count);
127 return ret;
128}
129
130bool ksmbd_inode_pending_delete(struct ksmbd_file *fp)
131{
132 struct ksmbd_inode *ci = fp->f_ci;
133 int ret;
134
135 down_read(&ci->m_lock);
136 ret = (ci->m_flags & (S_DEL_PENDING | S_DEL_ON_CLS));
137 up_read(&ci->m_lock);
138
139 return ret;
140}
141
142void ksmbd_set_inode_pending_delete(struct ksmbd_file *fp)
143{
144 struct ksmbd_inode *ci = fp->f_ci;
145
146 down_write(&ci->m_lock);
147 ci->m_flags |= S_DEL_PENDING;
148 up_write(&ci->m_lock);
149}
150
151void ksmbd_clear_inode_pending_delete(struct ksmbd_file *fp)
152{
153 struct ksmbd_inode *ci = fp->f_ci;
154
155 down_write(&ci->m_lock);
156 ci->m_flags &= ~S_DEL_PENDING;
157 up_write(&ci->m_lock);
158}
159
160void ksmbd_fd_set_delete_on_close(struct ksmbd_file *fp,
161 int file_info)
162{
163 struct ksmbd_inode *ci = fp->f_ci;
164
165 down_write(&ci->m_lock);
166 if (ksmbd_stream_fd(fp))
167 ci->m_flags |= S_DEL_ON_CLS_STREAM;
168 else
169 ci->m_flags |= S_DEL_ON_CLS;
170 up_write(&ci->m_lock);
171}
172
173static void ksmbd_inode_hash(struct ksmbd_inode *ci)
174{
175 struct hlist_head *b = inode_hashtable +
176 inode_hash(d_inode(ci->m_de)->i_sb, (unsigned long)ci->m_de);
177
178 hlist_add_head(&ci->m_hash, b);
179}
180
181static void ksmbd_inode_unhash(struct ksmbd_inode *ci)
182{
183 write_lock(&inode_hash_lock);
184 hlist_del_init(&ci->m_hash);
185 write_unlock(&inode_hash_lock);
186}
187
188static int ksmbd_inode_init(struct ksmbd_inode *ci, struct ksmbd_file *fp)
189{
190 atomic_set(&ci->m_count, 1);
191 atomic_set(&ci->op_count, 0);
192 atomic_set(&ci->sop_count, 0);
193 ci->m_flags = 0;
194 ci->m_fattr = 0;
195 INIT_LIST_HEAD(&ci->m_fp_list);
196 INIT_LIST_HEAD(&ci->m_op_list);
197 init_rwsem(&ci->m_lock);
198 ci->m_de = fp->filp->f_path.dentry;
199 return 0;
200}
201
202static struct ksmbd_inode *ksmbd_inode_get(struct ksmbd_file *fp)
203{
204 struct ksmbd_inode *ci, *tmpci;
205 int rc;
206
207 read_lock(&inode_hash_lock);
208 ci = ksmbd_inode_lookup(fp);
209 read_unlock(&inode_hash_lock);
210 if (ci)
211 return ci;
212
213 ci = kmalloc(sizeof(struct ksmbd_inode), KSMBD_DEFAULT_GFP);
214 if (!ci)
215 return NULL;
216
217 rc = ksmbd_inode_init(ci, fp);
218 if (rc) {
219 pr_err("inode initialized failed\n");
220 kfree(ci);
221 return NULL;
222 }
223
224 write_lock(&inode_hash_lock);
225 tmpci = ksmbd_inode_lookup(fp);
226 if (!tmpci) {
227 ksmbd_inode_hash(ci);
228 } else {
229 kfree(ci);
230 ci = tmpci;
231 }
232 write_unlock(&inode_hash_lock);
233 return ci;
234}
235
236static void ksmbd_inode_free(struct ksmbd_inode *ci)
237{
238 ksmbd_inode_unhash(ci);
239 kfree(ci);
240}
241
242void ksmbd_inode_put(struct ksmbd_inode *ci)
243{
244 if (atomic_dec_and_test(&ci->m_count))
245 ksmbd_inode_free(ci);
246}
247
248int __init ksmbd_inode_hash_init(void)
249{
250 unsigned int loop;
251 unsigned long numentries = 16384;
252 unsigned long bucketsize = sizeof(struct hlist_head);
253 unsigned long size;
254
255 inode_hash_shift = ilog2(numentries);
256 inode_hash_mask = (1 << inode_hash_shift) - 1;
257
258 size = bucketsize << inode_hash_shift;
259
260 /* init master fp hash table */
261 inode_hashtable = vmalloc(size);
262 if (!inode_hashtable)
263 return -ENOMEM;
264
265 for (loop = 0; loop < (1U << inode_hash_shift); loop++)
266 INIT_HLIST_HEAD(&inode_hashtable[loop]);
267 return 0;
268}
269
270void ksmbd_release_inode_hash(void)
271{
272 vfree(inode_hashtable);
273}
274
275static void __ksmbd_inode_close(struct ksmbd_file *fp)
276{
277 struct ksmbd_inode *ci = fp->f_ci;
278 int err;
279 struct file *filp;
280
281 filp = fp->filp;
282
283 if (ksmbd_stream_fd(fp)) {
284 bool remove_stream_xattr = false;
285
286 down_write(&ci->m_lock);
287 if (ci->m_flags & S_DEL_ON_CLS_STREAM) {
288 ci->m_flags &= ~S_DEL_ON_CLS_STREAM;
289 remove_stream_xattr = true;
290 }
291 up_write(&ci->m_lock);
292
293 if (remove_stream_xattr) {
294 err = ksmbd_vfs_remove_xattr(file_mnt_idmap(filp),
295 &filp->f_path,
296 fp->stream.name,
297 true);
298 if (err)
299 pr_err("remove xattr failed : %s\n",
300 fp->stream.name);
301 }
302 }
303
304 if (atomic_dec_and_test(&ci->m_count)) {
305 bool do_unlink = false;
306
307 down_write(&ci->m_lock);
308 if (ci->m_flags & (S_DEL_ON_CLS | S_DEL_PENDING)) {
309 ci->m_flags &= ~(S_DEL_ON_CLS | S_DEL_PENDING);
310 do_unlink = true;
311 }
312 up_write(&ci->m_lock);
313
314 if (do_unlink)
315 ksmbd_vfs_unlink(filp);
316
317 ksmbd_inode_free(ci);
318 }
319}
320
321static void __ksmbd_remove_durable_fd(struct ksmbd_file *fp)
322{
323 if (!has_file_id(fp->persistent_id))
324 return;
325
326 idr_remove(global_ft.idr, fp->persistent_id);
327}
328
329static void ksmbd_remove_durable_fd(struct ksmbd_file *fp)
330{
331 write_lock(&global_ft.lock);
332 __ksmbd_remove_durable_fd(fp);
333 write_unlock(&global_ft.lock);
334 if (waitqueue_active(&dh_wq))
335 wake_up(&dh_wq);
336}
337
338static void __ksmbd_remove_fd(struct ksmbd_file_table *ft, struct ksmbd_file *fp)
339{
340 if (!has_file_id(fp->volatile_id))
341 return;
342
343 down_write(&fp->f_ci->m_lock);
344 list_del_init(&fp->node);
345 up_write(&fp->f_ci->m_lock);
346
347 write_lock(&ft->lock);
348 idr_remove(ft->idr, fp->volatile_id);
349 write_unlock(&ft->lock);
350}
351
352static void __ksmbd_close_fd(struct ksmbd_file_table *ft, struct ksmbd_file *fp)
353{
354 struct file *filp;
355 struct ksmbd_lock *smb_lock, *tmp_lock;
356
357 fd_limit_close();
358 ksmbd_remove_durable_fd(fp);
359 if (ft)
360 __ksmbd_remove_fd(ft, fp);
361
362 close_id_del_oplock(fp);
363 filp = fp->filp;
364
365 __ksmbd_inode_close(fp);
366 if (!IS_ERR_OR_NULL(filp))
367 fput(filp);
368
369 /* because the reference count of fp is 0, it is guaranteed that
370 * there are not accesses to fp->lock_list.
371 */
372 list_for_each_entry_safe(smb_lock, tmp_lock, &fp->lock_list, flist) {
373 spin_lock(&fp->conn->llist_lock);
374 list_del(&smb_lock->clist);
375 spin_unlock(&fp->conn->llist_lock);
376
377 list_del(&smb_lock->flist);
378 locks_free_lock(smb_lock->fl);
379 kfree(smb_lock);
380 }
381
382 if (ksmbd_stream_fd(fp))
383 kfree(fp->stream.name);
384 kmem_cache_free(filp_cache, fp);
385}
386
387static struct ksmbd_file *ksmbd_fp_get(struct ksmbd_file *fp)
388{
389 if (fp->f_state != FP_INITED)
390 return NULL;
391
392 if (!atomic_inc_not_zero(&fp->refcount))
393 return NULL;
394 return fp;
395}
396
397static struct ksmbd_file *__ksmbd_lookup_fd(struct ksmbd_file_table *ft,
398 u64 id)
399{
400 struct ksmbd_file *fp;
401
402 if (!has_file_id(id))
403 return NULL;
404
405 read_lock(&ft->lock);
406 fp = idr_find(ft->idr, id);
407 if (fp)
408 fp = ksmbd_fp_get(fp);
409 read_unlock(&ft->lock);
410 return fp;
411}
412
413static void __put_fd_final(struct ksmbd_work *work, struct ksmbd_file *fp)
414{
415 __ksmbd_close_fd(&work->sess->file_table, fp);
416 atomic_dec(&work->conn->stats.open_files_count);
417}
418
419static void set_close_state_blocked_works(struct ksmbd_file *fp)
420{
421 struct ksmbd_work *cancel_work;
422
423 spin_lock(&fp->f_lock);
424 list_for_each_entry(cancel_work, &fp->blocked_works,
425 fp_entry) {
426 cancel_work->state = KSMBD_WORK_CLOSED;
427 cancel_work->cancel_fn(cancel_work->cancel_argv);
428 }
429 spin_unlock(&fp->f_lock);
430}
431
432int ksmbd_close_fd(struct ksmbd_work *work, u64 id)
433{
434 struct ksmbd_file *fp;
435 struct ksmbd_file_table *ft;
436
437 if (!has_file_id(id))
438 return 0;
439
440 ft = &work->sess->file_table;
441 write_lock(&ft->lock);
442 fp = idr_find(ft->idr, id);
443 if (fp) {
444 set_close_state_blocked_works(fp);
445
446 if (fp->f_state != FP_INITED)
447 fp = NULL;
448 else {
449 fp->f_state = FP_CLOSED;
450 if (!atomic_dec_and_test(&fp->refcount))
451 fp = NULL;
452 }
453 }
454 write_unlock(&ft->lock);
455
456 if (!fp)
457 return -EINVAL;
458
459 __put_fd_final(work, fp);
460 return 0;
461}
462
463void ksmbd_fd_put(struct ksmbd_work *work, struct ksmbd_file *fp)
464{
465 if (!fp)
466 return;
467
468 if (!atomic_dec_and_test(&fp->refcount))
469 return;
470 __put_fd_final(work, fp);
471}
472
473static bool __sanity_check(struct ksmbd_tree_connect *tcon, struct ksmbd_file *fp)
474{
475 if (!fp)
476 return false;
477 if (fp->tcon != tcon)
478 return false;
479 return true;
480}
481
482struct ksmbd_file *ksmbd_lookup_foreign_fd(struct ksmbd_work *work, u64 id)
483{
484 return __ksmbd_lookup_fd(&work->sess->file_table, id);
485}
486
487struct ksmbd_file *ksmbd_lookup_fd_fast(struct ksmbd_work *work, u64 id)
488{
489 struct ksmbd_file *fp = __ksmbd_lookup_fd(&work->sess->file_table, id);
490
491 if (__sanity_check(work->tcon, fp))
492 return fp;
493
494 ksmbd_fd_put(work, fp);
495 return NULL;
496}
497
498struct ksmbd_file *ksmbd_lookup_fd_slow(struct ksmbd_work *work, u64 id,
499 u64 pid)
500{
501 struct ksmbd_file *fp;
502
503 if (!has_file_id(id)) {
504 id = work->compound_fid;
505 pid = work->compound_pfid;
506 }
507
508 fp = __ksmbd_lookup_fd(&work->sess->file_table, id);
509 if (!__sanity_check(work->tcon, fp)) {
510 ksmbd_fd_put(work, fp);
511 return NULL;
512 }
513 if (fp->persistent_id != pid) {
514 ksmbd_fd_put(work, fp);
515 return NULL;
516 }
517 return fp;
518}
519
520struct ksmbd_file *ksmbd_lookup_global_fd(unsigned long long id)
521{
522 return __ksmbd_lookup_fd(&global_ft, id);
523}
524
525struct ksmbd_file *ksmbd_lookup_durable_fd(unsigned long long id)
526{
527 struct ksmbd_file *fp;
528
529 fp = __ksmbd_lookup_fd(&global_ft, id);
530 if (fp && (fp->conn ||
531 (fp->durable_scavenger_timeout &&
532 (fp->durable_scavenger_timeout <
533 jiffies_to_msecs(jiffies))))) {
534 ksmbd_put_durable_fd(fp);
535 fp = NULL;
536 }
537
538 return fp;
539}
540
541void ksmbd_put_durable_fd(struct ksmbd_file *fp)
542{
543 if (!atomic_dec_and_test(&fp->refcount))
544 return;
545
546 __ksmbd_close_fd(NULL, fp);
547}
548
549struct ksmbd_file *ksmbd_lookup_fd_cguid(char *cguid)
550{
551 struct ksmbd_file *fp = NULL;
552 unsigned int id;
553
554 read_lock(&global_ft.lock);
555 idr_for_each_entry(global_ft.idr, fp, id) {
556 if (!memcmp(fp->create_guid,
557 cguid,
558 SMB2_CREATE_GUID_SIZE)) {
559 fp = ksmbd_fp_get(fp);
560 break;
561 }
562 }
563 read_unlock(&global_ft.lock);
564
565 return fp;
566}
567
568struct ksmbd_file *ksmbd_lookup_fd_inode(struct dentry *dentry)
569{
570 struct ksmbd_file *lfp;
571 struct ksmbd_inode *ci;
572 struct inode *inode = d_inode(dentry);
573
574 read_lock(&inode_hash_lock);
575 ci = __ksmbd_inode_lookup(dentry);
576 read_unlock(&inode_hash_lock);
577 if (!ci)
578 return NULL;
579
580 down_read(&ci->m_lock);
581 list_for_each_entry(lfp, &ci->m_fp_list, node) {
582 if (inode == file_inode(lfp->filp)) {
583 atomic_dec(&ci->m_count);
584 lfp = ksmbd_fp_get(lfp);
585 up_read(&ci->m_lock);
586 return lfp;
587 }
588 }
589 atomic_dec(&ci->m_count);
590 up_read(&ci->m_lock);
591 return NULL;
592}
593
594#define OPEN_ID_TYPE_VOLATILE_ID (0)
595#define OPEN_ID_TYPE_PERSISTENT_ID (1)
596
597static void __open_id_set(struct ksmbd_file *fp, u64 id, int type)
598{
599 if (type == OPEN_ID_TYPE_VOLATILE_ID)
600 fp->volatile_id = id;
601 if (type == OPEN_ID_TYPE_PERSISTENT_ID)
602 fp->persistent_id = id;
603}
604
605static int __open_id(struct ksmbd_file_table *ft, struct ksmbd_file *fp,
606 int type)
607{
608 u64 id = 0;
609 int ret;
610
611 if (type == OPEN_ID_TYPE_VOLATILE_ID && fd_limit_depleted()) {
612 __open_id_set(fp, KSMBD_NO_FID, type);
613 return -EMFILE;
614 }
615
616 idr_preload(KSMBD_DEFAULT_GFP);
617 write_lock(&ft->lock);
618 ret = idr_alloc_cyclic(ft->idr, fp, 0, INT_MAX - 1, GFP_NOWAIT);
619 if (ret >= 0) {
620 id = ret;
621 ret = 0;
622 } else {
623 id = KSMBD_NO_FID;
624 fd_limit_close();
625 }
626
627 __open_id_set(fp, id, type);
628 write_unlock(&ft->lock);
629 idr_preload_end();
630 return ret;
631}
632
633unsigned int ksmbd_open_durable_fd(struct ksmbd_file *fp)
634{
635 __open_id(&global_ft, fp, OPEN_ID_TYPE_PERSISTENT_ID);
636 return fp->persistent_id;
637}
638
639struct ksmbd_file *ksmbd_open_fd(struct ksmbd_work *work, struct file *filp)
640{
641 struct ksmbd_file *fp;
642 int ret;
643
644 fp = kmem_cache_zalloc(filp_cache, KSMBD_DEFAULT_GFP);
645 if (!fp) {
646 pr_err("Failed to allocate memory\n");
647 return ERR_PTR(-ENOMEM);
648 }
649
650 INIT_LIST_HEAD(&fp->blocked_works);
651 INIT_LIST_HEAD(&fp->node);
652 INIT_LIST_HEAD(&fp->lock_list);
653 spin_lock_init(&fp->f_lock);
654 atomic_set(&fp->refcount, 1);
655
656 fp->filp = filp;
657 fp->conn = work->conn;
658 fp->tcon = work->tcon;
659 fp->volatile_id = KSMBD_NO_FID;
660 fp->persistent_id = KSMBD_NO_FID;
661 fp->f_state = FP_NEW;
662 fp->f_ci = ksmbd_inode_get(fp);
663
664 if (!fp->f_ci) {
665 ret = -ENOMEM;
666 goto err_out;
667 }
668
669 ret = __open_id(&work->sess->file_table, fp, OPEN_ID_TYPE_VOLATILE_ID);
670 if (ret) {
671 ksmbd_inode_put(fp->f_ci);
672 goto err_out;
673 }
674
675 atomic_inc(&work->conn->stats.open_files_count);
676 return fp;
677
678err_out:
679 kmem_cache_free(filp_cache, fp);
680 return ERR_PTR(ret);
681}
682
683void ksmbd_update_fstate(struct ksmbd_file_table *ft, struct ksmbd_file *fp,
684 unsigned int state)
685{
686 if (!fp)
687 return;
688
689 write_lock(&ft->lock);
690 fp->f_state = state;
691 write_unlock(&ft->lock);
692}
693
694static int
695__close_file_table_ids(struct ksmbd_file_table *ft,
696 struct ksmbd_tree_connect *tcon,
697 bool (*skip)(struct ksmbd_tree_connect *tcon,
698 struct ksmbd_file *fp))
699{
700 struct ksmbd_file *fp;
701 unsigned int id = 0;
702 int num = 0;
703
704 while (1) {
705 write_lock(&ft->lock);
706 fp = idr_get_next(ft->idr, &id);
707 if (!fp) {
708 write_unlock(&ft->lock);
709 break;
710 }
711
712 if (skip(tcon, fp) ||
713 !atomic_dec_and_test(&fp->refcount)) {
714 id++;
715 write_unlock(&ft->lock);
716 continue;
717 }
718
719 set_close_state_blocked_works(fp);
720 idr_remove(ft->idr, fp->volatile_id);
721 fp->volatile_id = KSMBD_NO_FID;
722 write_unlock(&ft->lock);
723
724 down_write(&fp->f_ci->m_lock);
725 list_del_init(&fp->node);
726 up_write(&fp->f_ci->m_lock);
727
728 __ksmbd_close_fd(ft, fp);
729
730 num++;
731 id++;
732 }
733
734 return num;
735}
736
737static inline bool is_reconnectable(struct ksmbd_file *fp)
738{
739 struct oplock_info *opinfo = opinfo_get(fp);
740 bool reconn = false;
741
742 if (!opinfo)
743 return false;
744
745 if (opinfo->op_state != OPLOCK_STATE_NONE) {
746 opinfo_put(opinfo);
747 return false;
748 }
749
750 if (fp->is_resilient || fp->is_persistent)
751 reconn = true;
752 else if (fp->is_durable && opinfo->is_lease &&
753 opinfo->o_lease->state & SMB2_LEASE_HANDLE_CACHING_LE)
754 reconn = true;
755
756 else if (fp->is_durable && opinfo->level == SMB2_OPLOCK_LEVEL_BATCH)
757 reconn = true;
758
759 opinfo_put(opinfo);
760 return reconn;
761}
762
763static bool tree_conn_fd_check(struct ksmbd_tree_connect *tcon,
764 struct ksmbd_file *fp)
765{
766 return fp->tcon != tcon;
767}
768
769static bool ksmbd_durable_scavenger_alive(void)
770{
771 if (!durable_scavenger_running)
772 return false;
773
774 if (kthread_should_stop())
775 return false;
776
777 if (idr_is_empty(global_ft.idr))
778 return false;
779
780 return true;
781}
782
783static void ksmbd_scavenger_dispose_dh(struct list_head *head)
784{
785 while (!list_empty(head)) {
786 struct ksmbd_file *fp;
787
788 fp = list_first_entry(head, struct ksmbd_file, node);
789 list_del_init(&fp->node);
790 __ksmbd_close_fd(NULL, fp);
791 }
792}
793
794static int ksmbd_durable_scavenger(void *dummy)
795{
796 struct ksmbd_file *fp = NULL;
797 unsigned int id;
798 unsigned int min_timeout = 1;
799 bool found_fp_timeout;
800 LIST_HEAD(scavenger_list);
801 unsigned long remaining_jiffies;
802
803 __module_get(THIS_MODULE);
804
805 set_freezable();
806 while (ksmbd_durable_scavenger_alive()) {
807 if (try_to_freeze())
808 continue;
809
810 found_fp_timeout = false;
811
812 remaining_jiffies = wait_event_timeout(dh_wq,
813 ksmbd_durable_scavenger_alive() == false,
814 __msecs_to_jiffies(min_timeout));
815 if (remaining_jiffies)
816 min_timeout = jiffies_to_msecs(remaining_jiffies);
817 else
818 min_timeout = DURABLE_HANDLE_MAX_TIMEOUT;
819
820 write_lock(&global_ft.lock);
821 idr_for_each_entry(global_ft.idr, fp, id) {
822 if (!fp->durable_timeout)
823 continue;
824
825 if (atomic_read(&fp->refcount) > 1 ||
826 fp->conn)
827 continue;
828
829 found_fp_timeout = true;
830 if (fp->durable_scavenger_timeout <=
831 jiffies_to_msecs(jiffies)) {
832 __ksmbd_remove_durable_fd(fp);
833 list_add(&fp->node, &scavenger_list);
834 } else {
835 unsigned long durable_timeout;
836
837 durable_timeout =
838 fp->durable_scavenger_timeout -
839 jiffies_to_msecs(jiffies);
840
841 if (min_timeout > durable_timeout)
842 min_timeout = durable_timeout;
843 }
844 }
845 write_unlock(&global_ft.lock);
846
847 ksmbd_scavenger_dispose_dh(&scavenger_list);
848
849 if (found_fp_timeout == false)
850 break;
851 }
852
853 durable_scavenger_running = false;
854
855 module_put(THIS_MODULE);
856
857 return 0;
858}
859
860void ksmbd_launch_ksmbd_durable_scavenger(void)
861{
862 if (!(server_conf.flags & KSMBD_GLOBAL_FLAG_DURABLE_HANDLE))
863 return;
864
865 mutex_lock(&durable_scavenger_lock);
866 if (durable_scavenger_running == true) {
867 mutex_unlock(&durable_scavenger_lock);
868 return;
869 }
870
871 durable_scavenger_running = true;
872
873 server_conf.dh_task = kthread_run(ksmbd_durable_scavenger,
874 (void *)NULL, "ksmbd-durable-scavenger");
875 if (IS_ERR(server_conf.dh_task))
876 pr_err("cannot start conn thread, err : %ld\n",
877 PTR_ERR(server_conf.dh_task));
878 mutex_unlock(&durable_scavenger_lock);
879}
880
881void ksmbd_stop_durable_scavenger(void)
882{
883 if (!(server_conf.flags & KSMBD_GLOBAL_FLAG_DURABLE_HANDLE))
884 return;
885
886 mutex_lock(&durable_scavenger_lock);
887 if (!durable_scavenger_running) {
888 mutex_unlock(&durable_scavenger_lock);
889 return;
890 }
891
892 durable_scavenger_running = false;
893 if (waitqueue_active(&dh_wq))
894 wake_up(&dh_wq);
895 mutex_unlock(&durable_scavenger_lock);
896 kthread_stop(server_conf.dh_task);
897}
898
899static bool session_fd_check(struct ksmbd_tree_connect *tcon,
900 struct ksmbd_file *fp)
901{
902 struct ksmbd_inode *ci;
903 struct oplock_info *op;
904 struct ksmbd_conn *conn;
905
906 if (!is_reconnectable(fp))
907 return false;
908
909 conn = fp->conn;
910 ci = fp->f_ci;
911 down_write(&ci->m_lock);
912 list_for_each_entry_rcu(op, &ci->m_op_list, op_entry) {
913 if (op->conn != conn)
914 continue;
915 if (op->conn && atomic_dec_and_test(&op->conn->refcnt))
916 kfree(op->conn);
917 op->conn = NULL;
918 }
919 up_write(&ci->m_lock);
920
921 fp->conn = NULL;
922 fp->tcon = NULL;
923 fp->volatile_id = KSMBD_NO_FID;
924
925 if (fp->durable_timeout)
926 fp->durable_scavenger_timeout =
927 jiffies_to_msecs(jiffies) + fp->durable_timeout;
928
929 return true;
930}
931
932void ksmbd_close_tree_conn_fds(struct ksmbd_work *work)
933{
934 int num = __close_file_table_ids(&work->sess->file_table,
935 work->tcon,
936 tree_conn_fd_check);
937
938 atomic_sub(num, &work->conn->stats.open_files_count);
939}
940
941void ksmbd_close_session_fds(struct ksmbd_work *work)
942{
943 int num = __close_file_table_ids(&work->sess->file_table,
944 work->tcon,
945 session_fd_check);
946
947 atomic_sub(num, &work->conn->stats.open_files_count);
948}
949
950int ksmbd_init_global_file_table(void)
951{
952 return ksmbd_init_file_table(&global_ft);
953}
954
955void ksmbd_free_global_file_table(void)
956{
957 struct ksmbd_file *fp = NULL;
958 unsigned int id;
959
960 idr_for_each_entry(global_ft.idr, fp, id) {
961 ksmbd_remove_durable_fd(fp);
962 __ksmbd_close_fd(NULL, fp);
963 }
964
965 idr_destroy(global_ft.idr);
966 kfree(global_ft.idr);
967}
968
969int ksmbd_validate_name_reconnect(struct ksmbd_share_config *share,
970 struct ksmbd_file *fp, char *name)
971{
972 char *pathname, *ab_pathname;
973 int ret = 0;
974
975 pathname = kmalloc(PATH_MAX, KSMBD_DEFAULT_GFP);
976 if (!pathname)
977 return -EACCES;
978
979 ab_pathname = d_path(&fp->filp->f_path, pathname, PATH_MAX);
980 if (IS_ERR(ab_pathname)) {
981 kfree(pathname);
982 return -EACCES;
983 }
984
985 if (name && strcmp(&ab_pathname[share->path_sz + 1], name)) {
986 ksmbd_debug(SMB, "invalid name reconnect %s\n", name);
987 ret = -EINVAL;
988 }
989
990 kfree(pathname);
991
992 return ret;
993}
994
995int ksmbd_reopen_durable_fd(struct ksmbd_work *work, struct ksmbd_file *fp)
996{
997 struct ksmbd_inode *ci;
998 struct oplock_info *op;
999
1000 if (!fp->is_durable || fp->conn || fp->tcon) {
1001 pr_err("Invalid durable fd [%p:%p]\n", fp->conn, fp->tcon);
1002 return -EBADF;
1003 }
1004
1005 if (has_file_id(fp->volatile_id)) {
1006 pr_err("Still in use durable fd: %llu\n", fp->volatile_id);
1007 return -EBADF;
1008 }
1009
1010 fp->conn = work->conn;
1011 fp->tcon = work->tcon;
1012
1013 ci = fp->f_ci;
1014 down_write(&ci->m_lock);
1015 list_for_each_entry_rcu(op, &ci->m_op_list, op_entry) {
1016 if (op->conn)
1017 continue;
1018 op->conn = fp->conn;
1019 atomic_inc(&op->conn->refcnt);
1020 }
1021 up_write(&ci->m_lock);
1022
1023 fp->f_state = FP_NEW;
1024 __open_id(&work->sess->file_table, fp, OPEN_ID_TYPE_VOLATILE_ID);
1025 if (!has_file_id(fp->volatile_id)) {
1026 fp->conn = NULL;
1027 fp->tcon = NULL;
1028 return -EBADF;
1029 }
1030 return 0;
1031}
1032
1033int ksmbd_init_file_table(struct ksmbd_file_table *ft)
1034{
1035 ft->idr = kzalloc(sizeof(struct idr), KSMBD_DEFAULT_GFP);
1036 if (!ft->idr)
1037 return -ENOMEM;
1038
1039 idr_init(ft->idr);
1040 rwlock_init(&ft->lock);
1041 return 0;
1042}
1043
1044void ksmbd_destroy_file_table(struct ksmbd_file_table *ft)
1045{
1046 if (!ft->idr)
1047 return;
1048
1049 __close_file_table_ids(ft, NULL, session_fd_check);
1050 idr_destroy(ft->idr);
1051 kfree(ft->idr);
1052 ft->idr = NULL;
1053}
1054
1055int ksmbd_init_file_cache(void)
1056{
1057 filp_cache = kmem_cache_create("ksmbd_file_cache",
1058 sizeof(struct ksmbd_file), 0,
1059 SLAB_HWCACHE_ALIGN, NULL);
1060 if (!filp_cache)
1061 goto out;
1062
1063 init_waitqueue_head(&dh_wq);
1064
1065 return 0;
1066
1067out:
1068 pr_err("failed to allocate file cache\n");
1069 return -ENOMEM;
1070}
1071
1072void ksmbd_exit_file_cache(void)
1073{
1074 kmem_cache_destroy(filp_cache);
1075}