Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
fork

Configure Feed

Select the types of activity you want to include in your feed.

cifs: protect access of TCP_Server_Info::{origin,leaf}_fullpath

Protect access of TCP_Server_Info::{origin,leaf}_fullpath when
matching DFS connections, and get rid of
TCP_Server_Info::current_fullpath while we're at it.

Cc: stable@vger.kernel.org # v6.2+
Signed-off-by: Paulo Alcantara (SUSE) <pc@manguebit.com>
Signed-off-by: Steve French <stfrench@microsoft.com>

authored by

Paulo Alcantara and committed by
Steve French
3dc9c433 ee20d7c6

+43 -20
+13 -7
fs/cifs/cifsglob.h
··· 736 736 #endif 737 737 struct mutex refpath_lock; /* protects leaf_fullpath */ 738 738 /* 739 - * Canonical DFS full paths that were used to chase referrals in mount and reconnect. 739 + * origin_fullpath: Canonical copy of smb3_fs_context::source. 740 + * It is used for matching existing DFS tcons. 740 741 * 741 - * origin_fullpath: first or original referral path 742 - * leaf_fullpath: last referral path (might be changed due to nested links in reconnect) 742 + * leaf_fullpath: Canonical DFS referral path related to this 743 + * connection. 744 + * It is used in DFS cache refresher, reconnect and may 745 + * change due to nested DFS links. 743 746 * 744 - * current_fullpath: pointer to either origin_fullpath or leaf_fullpath 745 - * NOTE: cannot be accessed outside cifs_reconnect() and smb2_reconnect() 747 + * Both protected by @refpath_lock and @srv_lock. The @refpath_lock is 748 + * mosly used for not requiring a copy of @leaf_fullpath when getting 749 + * cached or new DFS referrals (which might also sleep during I/O). 750 + * While @srv_lock is held for making string and NULL comparions against 751 + * both fields as in mount(2) and cache refresh. 746 752 * 747 - * format: \\HOST\SHARE\[OPTIONAL PATH] 753 + * format: \\HOST\SHARE[\OPTIONAL PATH] 748 754 */ 749 - char *origin_fullpath, *leaf_fullpath, *current_fullpath; 755 + char *origin_fullpath, *leaf_fullpath; 750 756 }; 751 757 752 758 static inline bool is_smb1(struct TCP_Server_Info *server)
+6 -4
fs/cifs/connect.c
··· 454 454 static int reconnect_dfs_server(struct TCP_Server_Info *server) 455 455 { 456 456 int rc = 0; 457 - const char *refpath = server->current_fullpath + 1; 458 457 struct dfs_cache_tgt_list tl = DFS_CACHE_TGT_LIST_INIT(tl); 459 458 struct dfs_cache_tgt_iterator *target_hint = NULL; 460 459 int num_targets = 0; ··· 466 467 * through /proc/fs/cifs/dfscache or the target list is empty due to server settings after 467 468 * refreshing the referral, so, in this case, default it to 1. 468 469 */ 469 - if (!dfs_cache_noreq_find(refpath, NULL, &tl)) 470 + mutex_lock(&server->refpath_lock); 471 + if (!dfs_cache_noreq_find(server->leaf_fullpath + 1, NULL, &tl)) 470 472 num_targets = dfs_cache_get_nr_tgts(&tl); 473 + mutex_unlock(&server->refpath_lock); 471 474 if (!num_targets) 472 475 num_targets = 1; 473 476 ··· 513 512 mod_delayed_work(cifsiod_wq, &server->reconnect, 0); 514 513 } while (server->tcpStatus == CifsNeedReconnect); 515 514 516 - dfs_cache_noreq_update_tgthint(refpath, target_hint); 515 + mutex_lock(&server->refpath_lock); 516 + dfs_cache_noreq_update_tgthint(server->leaf_fullpath + 1, target_hint); 517 + mutex_unlock(&server->refpath_lock); 517 518 dfs_cache_free_tgts(&tl); 518 519 519 520 /* Need to set up echo worker again once connection has been established */ ··· 1585 1582 rc = -ENOMEM; 1586 1583 goto out_err; 1587 1584 } 1588 - tcp_ses->current_fullpath = tcp_ses->leaf_fullpath; 1589 1585 } 1590 1586 1591 1587 if (ctx->nosharesock)
+8 -6
fs/cifs/dfs.c
··· 248 248 tcon = mnt_ctx->tcon; 249 249 250 250 mutex_lock(&server->refpath_lock); 251 + spin_lock(&server->srv_lock); 251 252 if (!server->origin_fullpath) { 252 253 server->origin_fullpath = origin_fullpath; 253 - server->current_fullpath = server->leaf_fullpath; 254 254 origin_fullpath = NULL; 255 255 } 256 + spin_unlock(&server->srv_lock); 256 257 mutex_unlock(&server->refpath_lock); 257 258 258 259 if (list_empty(&tcon->dfs_ses_list)) { ··· 343 342 rc = PTR_ERR(npath); 344 343 } else { 345 344 mutex_lock(&server->refpath_lock); 345 + spin_lock(&server->srv_lock); 346 346 kfree(server->leaf_fullpath); 347 347 server->leaf_fullpath = npath; 348 + spin_unlock(&server->srv_lock); 348 349 mutex_unlock(&server->refpath_lock); 349 - server->current_fullpath = server->leaf_fullpath; 350 350 } 351 351 return rc; 352 352 } ··· 452 450 share = prefix = NULL; 453 451 454 452 /* Check if share matches with tcp ses */ 455 - rc = dfs_cache_get_tgt_share(server->current_fullpath + 1, tit, &share, &prefix); 453 + rc = dfs_cache_get_tgt_share(server->leaf_fullpath + 1, tit, &share, &prefix); 456 454 if (rc) { 457 455 cifs_dbg(VFS, "%s: failed to parse target share: %d\n", __func__, rc); 458 456 break; ··· 466 464 continue; 467 465 } 468 466 469 - dfs_cache_noreq_update_tgthint(server->current_fullpath + 1, tit); 467 + dfs_cache_noreq_update_tgthint(server->leaf_fullpath + 1, tit); 470 468 tree_connect_ipc(xid, tree, cifs_sb, tcon); 471 469 472 470 scnprintf(tree, MAX_TREE_SIZE, "\\%s", share); ··· 584 582 cifs_sb = CIFS_SB(sb); 585 583 586 584 /* If it is not dfs or there was no cached dfs referral, then reconnect to same share */ 587 - if (!server->current_fullpath || 588 - dfs_cache_noreq_find(server->current_fullpath + 1, &ref, &tl)) { 585 + if (!server->leaf_fullpath || 586 + dfs_cache_noreq_find(server->leaf_fullpath + 1, &ref, &tl)) { 589 587 rc = ops->tree_connect(xid, tcon->ses, tcon->tree_name, tcon, cifs_sb->local_nls); 590 588 goto out; 591 589 }
+11 -2
fs/cifs/dfs.h
··· 43 43 size_t len; 44 44 char *s; 45 45 46 - if (unlikely(!server->origin_fullpath)) 46 + spin_lock(&server->srv_lock); 47 + if (unlikely(!server->origin_fullpath)) { 48 + spin_unlock(&server->srv_lock); 47 49 return ERR_PTR(-EREMOTE); 50 + } 51 + spin_unlock(&server->srv_lock); 48 52 49 53 s = dentry_path_raw(dentry, page, PATH_MAX); 50 54 if (IS_ERR(s)) ··· 57 53 if (!s[1]) 58 54 s++; 59 55 56 + spin_lock(&server->srv_lock); 60 57 len = strlen(server->origin_fullpath); 61 - if (s < (char *)page + len) 58 + if (s < (char *)page + len) { 59 + spin_unlock(&server->srv_lock); 62 60 return ERR_PTR(-ENAMETOOLONG); 61 + } 63 62 64 63 s -= len; 65 64 memcpy(s, server->origin_fullpath, len); 65 + spin_unlock(&server->srv_lock); 66 66 convert_delimiter(s, '/'); 67 + 67 68 return s; 68 69 } 69 70
+5 -1
fs/cifs/dfs_cache.c
··· 1278 1278 1279 1279 spin_lock(&cifs_tcp_ses_lock); 1280 1280 list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) { 1281 - if (!server->leaf_fullpath) 1281 + spin_lock(&server->srv_lock); 1282 + if (!server->leaf_fullpath) { 1283 + spin_unlock(&server->srv_lock); 1282 1284 continue; 1285 + } 1286 + spin_unlock(&server->srv_lock); 1283 1287 1284 1288 list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { 1285 1289 if (ses->tcon_ipc) {