Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

cifs: fix use-after-free bug in refresh_cache_worker()

The UAF bug occurred because we were putting DFS root sessions in
cifs_umount() while DFS cache refresher was being executed.

Make DFS root sessions have same lifetime as DFS tcons so we can avoid
the use-after-free bug is DFS cache refresher and other places that
require IPCs to get new DFS referrals on. Also, get rid of mount
group handling in DFS cache as we no longer need it.

This fixes below use-after-free bug catched by KASAN

[ 379.946955] BUG: KASAN: use-after-free in __refresh_tcon.isra.0+0x10b/0xc10 [cifs]
[ 379.947642] Read of size 8 at addr ffff888018f57030 by task kworker/u4:3/56
[ 379.948096]
[ 379.948208] CPU: 0 PID: 56 Comm: kworker/u4:3 Not tainted 6.2.0-rc7-lku #23
[ 379.948661] Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS
rel-1.16.0-0-gd239552-rebuilt.opensuse.org 04/01/2014
[ 379.949368] Workqueue: cifs-dfscache refresh_cache_worker [cifs]
[ 379.949942] Call Trace:
[ 379.950113] <TASK>
[ 379.950260] dump_stack_lvl+0x50/0x67
[ 379.950510] print_report+0x16a/0x48e
[ 379.950759] ? __virt_addr_valid+0xd8/0x160
[ 379.951040] ? __phys_addr+0x41/0x80
[ 379.951285] kasan_report+0xdb/0x110
[ 379.951533] ? __refresh_tcon.isra.0+0x10b/0xc10 [cifs]
[ 379.952056] ? __refresh_tcon.isra.0+0x10b/0xc10 [cifs]
[ 379.952585] __refresh_tcon.isra.0+0x10b/0xc10 [cifs]
[ 379.953096] ? __pfx___refresh_tcon.isra.0+0x10/0x10 [cifs]
[ 379.953637] ? __pfx___mutex_lock+0x10/0x10
[ 379.953915] ? lock_release+0xb6/0x720
[ 379.954167] ? __pfx_lock_acquire+0x10/0x10
[ 379.954443] ? refresh_cache_worker+0x34e/0x6d0 [cifs]
[ 379.954960] ? __pfx_wb_workfn+0x10/0x10
[ 379.955239] refresh_cache_worker+0x4ad/0x6d0 [cifs]
[ 379.955755] ? __pfx_refresh_cache_worker+0x10/0x10 [cifs]
[ 379.956323] ? __pfx_lock_acquired+0x10/0x10
[ 379.956615] ? read_word_at_a_time+0xe/0x20
[ 379.956898] ? lockdep_hardirqs_on_prepare+0x12/0x220
[ 379.957235] process_one_work+0x535/0x990
[ 379.957509] ? __pfx_process_one_work+0x10/0x10
[ 379.957812] ? lock_acquired+0xb7/0x5f0
[ 379.958069] ? __list_add_valid+0x37/0xd0
[ 379.958341] ? __list_add_valid+0x37/0xd0
[ 379.958611] worker_thread+0x8e/0x630
[ 379.958861] ? __pfx_worker_thread+0x10/0x10
[ 379.959148] kthread+0x17d/0x1b0
[ 379.959369] ? __pfx_kthread+0x10/0x10
[ 379.959630] ret_from_fork+0x2c/0x50
[ 379.959879] </TASK>

Signed-off-by: Paulo Alcantara (SUSE) <pc@manguebit.com>
Cc: stable@vger.kernel.org # 6.2
Signed-off-by: Steve French <stfrench@microsoft.com>

authored by

Paulo Alcantara and committed by
Steve French
396935de b56bce50

+67 -164
-2
fs/cifs/cifs_fs_sb.h
··· 61 61 /* only used when CIFS_MOUNT_USE_PREFIX_PATH is set */ 62 62 char *prepath; 63 63 64 - /* randomly generated 128-bit number for indexing dfs mount groups in referral cache */ 65 - uuid_t dfs_mount_id; 66 64 /* 67 65 * Indicate whether serverino option was turned off later 68 66 * (cifs_autodisable_serverino) in order to match new mounts.
+2 -1
fs/cifs/cifsglob.h
··· 1233 1233 /* BB add field for back pointer to sb struct(s)? */ 1234 1234 #ifdef CONFIG_CIFS_DFS_UPCALL 1235 1235 struct list_head ulist; /* cache update list */ 1236 + struct list_head dfs_ses_list; 1236 1237 #endif 1237 1238 struct delayed_work query_interfaces; /* query interfaces workqueue job */ 1238 1239 }; ··· 1750 1749 struct TCP_Server_Info *server; 1751 1750 struct cifs_ses *ses; 1752 1751 struct cifs_tcon *tcon; 1753 - uuid_t mount_id; 1754 1752 char *origin_fullpath, *leaf_fullpath; 1753 + struct list_head dfs_ses_list; 1755 1754 }; 1756 1755 1757 1756 static inline void free_dfs_info_param(struct dfs_info3_param *param)
+3 -6
fs/cifs/connect.c
··· 3408 3408 bool isdfs; 3409 3409 int rc; 3410 3410 3411 - uuid_gen(&mnt_ctx.mount_id); 3411 + INIT_LIST_HEAD(&mnt_ctx.dfs_ses_list); 3412 + 3412 3413 rc = dfs_mount_share(&mnt_ctx, &isdfs); 3413 3414 if (rc) 3414 3415 goto error; ··· 3429 3428 kfree(cifs_sb->prepath); 3430 3429 cifs_sb->prepath = ctx->prepath; 3431 3430 ctx->prepath = NULL; 3432 - uuid_copy(&cifs_sb->dfs_mount_id, &mnt_ctx.mount_id); 3433 3431 3434 3432 out: 3435 3433 cifs_try_adding_channels(cifs_sb, mnt_ctx.ses); ··· 3440 3440 return rc; 3441 3441 3442 3442 error: 3443 - dfs_cache_put_refsrv_sessions(&mnt_ctx.mount_id); 3443 + dfs_put_root_smb_sessions(&mnt_ctx.dfs_ses_list); 3444 3444 kfree(mnt_ctx.origin_fullpath); 3445 3445 kfree(mnt_ctx.leaf_fullpath); 3446 3446 cifs_mount_put_conns(&mnt_ctx); ··· 3638 3638 spin_unlock(&cifs_sb->tlink_tree_lock); 3639 3639 3640 3640 kfree(cifs_sb->prepath); 3641 - #ifdef CONFIG_CIFS_DFS_UPCALL 3642 - dfs_cache_put_refsrv_sessions(&cifs_sb->dfs_mount_id); 3643 - #endif 3644 3641 call_rcu(&cifs_sb->rcu, delayed_free); 3645 3642 } 3646 3643
+39 -13
fs/cifs/dfs.c
··· 99 99 return rc; 100 100 } 101 101 102 - static void set_root_ses(struct cifs_mount_ctx *mnt_ctx) 102 + static int get_root_smb_session(struct cifs_mount_ctx *mnt_ctx) 103 103 { 104 104 struct smb3_fs_context *ctx = mnt_ctx->fs_ctx; 105 + struct dfs_root_ses *root_ses; 105 106 struct cifs_ses *ses = mnt_ctx->ses; 106 107 107 108 if (ses) { 109 + root_ses = kmalloc(sizeof(*root_ses), GFP_KERNEL); 110 + if (!root_ses) 111 + return -ENOMEM; 112 + 113 + INIT_LIST_HEAD(&root_ses->list); 114 + 108 115 spin_lock(&cifs_tcp_ses_lock); 109 116 ses->ses_count++; 110 117 spin_unlock(&cifs_tcp_ses_lock); 111 - dfs_cache_add_refsrv_session(&mnt_ctx->mount_id, ses); 118 + root_ses->ses = ses; 119 + list_add_tail(&root_ses->list, &mnt_ctx->dfs_ses_list); 112 120 } 113 - ctx->dfs_root_ses = mnt_ctx->ses; 121 + ctx->dfs_root_ses = ses; 122 + return 0; 114 123 } 115 124 116 125 static int get_dfs_conn(struct cifs_mount_ctx *mnt_ctx, const char *ref_path, const char *full_path, ··· 127 118 { 128 119 struct smb3_fs_context *ctx = mnt_ctx->fs_ctx; 129 120 struct dfs_info3_param ref = {}; 130 - int rc; 121 + bool is_refsrv = false; 122 + int rc, rc2; 131 123 132 124 rc = dfs_cache_get_tgt_referral(ref_path + 1, tit, &ref); 133 125 if (rc) ··· 143 133 if (rc) 144 134 goto out; 145 135 146 - if (ref.flags & DFSREF_REFERRAL_SERVER) 147 - set_root_ses(mnt_ctx); 136 + is_refsrv = !!(ref.flags & DFSREF_REFERRAL_SERVER); 148 137 149 138 rc = -EREMOTE; 150 139 if (ref.flags & DFSREF_STORAGE_SERVER) { ··· 152 143 goto out; 153 144 154 145 /* some servers may not advertise referral capability under ref.flags */ 155 - if (!(ref.flags & DFSREF_REFERRAL_SERVER) && 156 - is_tcon_dfs(mnt_ctx->tcon)) 157 - set_root_ses(mnt_ctx); 146 + is_refsrv |= is_tcon_dfs(mnt_ctx->tcon); 158 147 159 148 rc = cifs_is_path_remote(mnt_ctx); 149 + } 150 + 151 + if (rc == -EREMOTE && is_refsrv) { 152 + rc2 = get_root_smb_session(mnt_ctx); 153 + if (rc2) 154 + rc = rc2; 160 155 } 161 156 162 157 out: ··· 175 162 char *ref_path = NULL, *full_path = NULL; 176 163 struct dfs_cache_tgt_iterator *tit; 177 164 struct TCP_Server_Info *server; 165 + struct cifs_tcon *tcon; 178 166 char *origin_fullpath = NULL; 179 167 int num_links = 0; 180 168 int rc; ··· 245 231 246 232 if (!rc) { 247 233 server = mnt_ctx->server; 234 + tcon = mnt_ctx->tcon; 248 235 249 236 mutex_lock(&server->refpath_lock); 250 - server->origin_fullpath = origin_fullpath; 251 - server->current_fullpath = server->leaf_fullpath; 237 + if (!server->origin_fullpath) { 238 + server->origin_fullpath = origin_fullpath; 239 + server->current_fullpath = server->leaf_fullpath; 240 + origin_fullpath = NULL; 241 + } 252 242 mutex_unlock(&server->refpath_lock); 253 - origin_fullpath = NULL; 243 + 244 + if (list_empty(&tcon->dfs_ses_list)) { 245 + list_replace_init(&mnt_ctx->dfs_ses_list, 246 + &tcon->dfs_ses_list); 247 + } else { 248 + dfs_put_root_smb_sessions(&mnt_ctx->dfs_ses_list); 249 + } 254 250 } 255 251 256 252 out: ··· 301 277 } 302 278 303 279 *isdfs = true; 304 - set_root_ses(mnt_ctx); 280 + rc = get_root_smb_session(mnt_ctx); 281 + if (rc) 282 + return rc; 305 283 306 284 return __dfs_mount_share(mnt_ctx); 307 285 }
+16
fs/cifs/dfs.h
··· 10 10 #include "fs_context.h" 11 11 #include "cifs_unicode.h" 12 12 13 + struct dfs_root_ses { 14 + struct list_head list; 15 + struct cifs_ses *ses; 16 + }; 17 + 13 18 int dfs_parse_target_referral(const char *full_path, const struct dfs_info3_param *ref, 14 19 struct smb3_fs_context *ctx); 15 20 int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx, bool *isdfs); ··· 47 42 server->origin_fullpath, 48 43 strlen(server->origin_fullpath), 49 44 true); 45 + } 46 + 47 + static inline void dfs_put_root_smb_sessions(struct list_head *head) 48 + { 49 + struct dfs_root_ses *root, *tmp; 50 + 51 + list_for_each_entry_safe(root, tmp, head, list) { 52 + list_del_init(&root->list); 53 + cifs_put_smb_ses(root->ses); 54 + kfree(root); 55 + } 50 56 } 51 57 52 58 #endif /* _CIFS_DFS_H */
-140
fs/cifs/dfs_cache.c
··· 49 49 struct cache_dfs_tgt *tgthint; 50 50 }; 51 51 52 - /* List of referral server sessions per dfs mount */ 53 - struct mount_group { 54 - struct list_head list; 55 - uuid_t id; 56 - struct cifs_ses *sessions[CACHE_MAX_ENTRIES]; 57 - int num_sessions; 58 - spinlock_t lock; 59 - struct list_head refresh_list; 60 - struct kref refcount; 61 - }; 62 - 63 52 static struct kmem_cache *cache_slab __read_mostly; 64 53 static struct workqueue_struct *dfscache_wq __read_mostly; 65 54 ··· 65 76 static struct hlist_head cache_htable[CACHE_HTABLE_SIZE]; 66 77 static DECLARE_RWSEM(htable_rw_lock); 67 78 68 - static LIST_HEAD(mount_group_list); 69 - static DEFINE_MUTEX(mount_group_list_lock); 70 - 71 79 static void refresh_cache_worker(struct work_struct *work); 72 80 73 81 static DECLARE_DELAYED_WORK(refresh_task, refresh_cache_worker); 74 - 75 - static void __mount_group_release(struct mount_group *mg) 76 - { 77 - int i; 78 - 79 - for (i = 0; i < mg->num_sessions; i++) 80 - cifs_put_smb_ses(mg->sessions[i]); 81 - kfree(mg); 82 - } 83 - 84 - static void mount_group_release(struct kref *kref) 85 - { 86 - struct mount_group *mg = container_of(kref, struct mount_group, refcount); 87 - 88 - mutex_lock(&mount_group_list_lock); 89 - list_del(&mg->list); 90 - mutex_unlock(&mount_group_list_lock); 91 - __mount_group_release(mg); 92 - } 93 - 94 - static struct mount_group *find_mount_group_locked(const uuid_t *id) 95 - { 96 - struct mount_group *mg; 97 - 98 - list_for_each_entry(mg, &mount_group_list, list) { 99 - if (uuid_equal(&mg->id, id)) 100 - return mg; 101 - } 102 - return ERR_PTR(-ENOENT); 103 - } 104 - 105 - static struct mount_group *__get_mount_group_locked(const uuid_t *id) 106 - { 107 - struct mount_group *mg; 108 - 109 - mg = find_mount_group_locked(id); 110 - if (!IS_ERR(mg)) 111 - return mg; 112 - 113 - mg = kmalloc(sizeof(*mg), GFP_KERNEL); 114 - if (!mg) 115 - return ERR_PTR(-ENOMEM); 116 - kref_init(&mg->refcount); 117 - uuid_copy(&mg->id, id); 118 - mg->num_sessions = 0; 119 - spin_lock_init(&mg->lock); 120 - list_add(&mg->list, &mount_group_list); 121 - return mg; 122 - } 123 - 124 - static struct mount_group *get_mount_group(const uuid_t *id) 125 - { 126 - struct mount_group *mg; 127 - 128 - mutex_lock(&mount_group_list_lock); 129 - mg = __get_mount_group_locked(id); 130 - if (!IS_ERR(mg)) 131 - kref_get(&mg->refcount); 132 - mutex_unlock(&mount_group_list_lock); 133 - 134 - return mg; 135 - } 136 - 137 - static void free_mount_group_list(void) 138 - { 139 - struct mount_group *mg, *tmp_mg; 140 - 141 - list_for_each_entry_safe(mg, tmp_mg, &mount_group_list, list) { 142 - list_del_init(&mg->list); 143 - __mount_group_release(mg); 144 - } 145 - } 146 82 147 83 /** 148 84 * dfs_cache_canonical_path - get a canonical DFS path ··· 618 704 { 619 705 cancel_delayed_work_sync(&refresh_task); 620 706 unload_nls(cache_cp); 621 - free_mount_group_list(); 622 707 flush_cache_ents(); 623 708 kmem_cache_destroy(cache_slab); 624 709 destroy_workqueue(dfscache_wq); ··· 1024 1111 return rc; 1025 1112 } 1026 1113 1027 - /** 1028 - * dfs_cache_add_refsrv_session - add SMB session of referral server 1029 - * 1030 - * @mount_id: mount group uuid to lookup. 1031 - * @ses: reference counted SMB session of referral server. 1032 - */ 1033 - void dfs_cache_add_refsrv_session(const uuid_t *mount_id, struct cifs_ses *ses) 1034 - { 1035 - struct mount_group *mg; 1036 - 1037 - if (WARN_ON_ONCE(!mount_id || uuid_is_null(mount_id) || !ses)) 1038 - return; 1039 - 1040 - mg = get_mount_group(mount_id); 1041 - if (WARN_ON_ONCE(IS_ERR(mg))) 1042 - return; 1043 - 1044 - spin_lock(&mg->lock); 1045 - if (mg->num_sessions < ARRAY_SIZE(mg->sessions)) 1046 - mg->sessions[mg->num_sessions++] = ses; 1047 - spin_unlock(&mg->lock); 1048 - kref_put(&mg->refcount, mount_group_release); 1049 - } 1050 - 1051 - /** 1052 - * dfs_cache_put_refsrv_sessions - put all referral server sessions 1053 - * 1054 - * Put all SMB sessions from the given mount group id. 1055 - * 1056 - * @mount_id: mount group uuid to lookup. 1057 - */ 1058 - void dfs_cache_put_refsrv_sessions(const uuid_t *mount_id) 1059 - { 1060 - struct mount_group *mg; 1061 - 1062 - if (!mount_id || uuid_is_null(mount_id)) 1063 - return; 1064 - 1065 - mutex_lock(&mount_group_list_lock); 1066 - mg = find_mount_group_locked(mount_id); 1067 - if (IS_ERR(mg)) { 1068 - mutex_unlock(&mount_group_list_lock); 1069 - return; 1070 - } 1071 - mutex_unlock(&mount_group_list_lock); 1072 - kref_put(&mg->refcount, mount_group_release); 1073 - } 1074 - 1075 1114 /* Extract share from DFS target and return a pointer to prefix path or NULL */ 1076 1115 static const char *parse_target_share(const char *target, char **share) 1077 1116 { ··· 1248 1383 if (!server->origin_fullpath) { 1249 1384 cifs_dbg(FYI, "%s: not a dfs mount\n", __func__); 1250 1385 return 0; 1251 - } 1252 - 1253 - if (uuid_is_null(&cifs_sb->dfs_mount_id)) { 1254 - cifs_dbg(FYI, "%s: no dfs mount group id\n", __func__); 1255 - return -EINVAL; 1256 1386 } 1257 1387 /* 1258 1388 * After reconnecting to a different server, unique ids won't match anymore, so we disable
-2
fs/cifs/dfs_cache.h
··· 40 40 struct dfs_info3_param *ref); 41 41 int dfs_cache_get_tgt_share(char *path, const struct dfs_cache_tgt_iterator *it, char **share, 42 42 char **prefix); 43 - void dfs_cache_put_refsrv_sessions(const uuid_t *mount_id); 44 - void dfs_cache_add_refsrv_session(const uuid_t *mount_id, struct cifs_ses *ses); 45 43 char *dfs_cache_canonical_path(const char *path, const struct nls_table *cp, int remap); 46 44 int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb); 47 45
+7
fs/cifs/misc.c
··· 22 22 #ifdef CONFIG_CIFS_DFS_UPCALL 23 23 #include "dns_resolve.h" 24 24 #include "dfs_cache.h" 25 + #include "dfs.h" 25 26 #endif 26 27 #include "fs_context.h" 27 28 #include "cached_dir.h" ··· 135 134 spin_lock_init(&ret_buf->stat_lock); 136 135 atomic_set(&ret_buf->num_local_opens, 0); 137 136 atomic_set(&ret_buf->num_remote_opens, 0); 137 + #ifdef CONFIG_CIFS_DFS_UPCALL 138 + INIT_LIST_HEAD(&ret_buf->dfs_ses_list); 139 + #endif 138 140 139 141 return ret_buf; 140 142 } ··· 153 149 atomic_dec(&tconInfoAllocCount); 154 150 kfree(tcon->nativeFileSystem); 155 151 kfree_sensitive(tcon->password); 152 + #ifdef CONFIG_CIFS_DFS_UPCALL 153 + dfs_put_root_smb_sessions(&tcon->dfs_ses_list); 154 + #endif 156 155 kfree(tcon); 157 156 } 158 157