cifs: disable sharing session and tcon and add new TCP sharing code

The code that allows these structs to be shared is extremely racy.
Disable the sharing of SMB and tcon structs for now until we can
come up with a way to do this that's race free.

We want to continue to share TCP sessions, however since they are
required for multiuser mounts. For that, implement a new (hopefully
race-free) scheme. Add a new global list of TCP sessions, and take
care to get a reference to it whenever we're dealing with one.

Signed-off-by: Jeff Layton <jlayton@redhat.com>
Signed-off-by: Steve French <sfrench@us.ibm.com>

authored by Jeff Layton and committed by Steve French e7ddee90 3ec332ef

+95 -149
+1 -1
fs/cifs/cifs_debug.c
··· 144 seq_printf(m, "TCP status: %d\n\tLocal Users To " 145 "Server: %d SecMode: 0x%x Req On Wire: %d", 146 ses->server->tcpStatus, 147 - atomic_read(&ses->server->socketUseCount), 148 ses->server->secMode, 149 atomic_read(&ses->server->inFlight)); 150
··· 144 seq_printf(m, "TCP status: %d\n\tLocal Users To " 145 "Server: %d SecMode: 0x%x Req On Wire: %d", 146 ses->server->tcpStatus, 147 + ses->server->srv_count, 148 ses->server->secMode, 149 atomic_read(&ses->server->inFlight)); 150
+2 -1
fs/cifs/cifsfs.c
··· 1059 { 1060 int rc = 0; 1061 cifs_proc_init(); 1062 - INIT_LIST_HEAD(&global_cifs_sock_list); 1063 INIT_LIST_HEAD(&GlobalSMBSessionList); /* BB to be removed by jl */ 1064 INIT_LIST_HEAD(&GlobalTreeConnectionList); /* BB to be removed by jl */ 1065 INIT_LIST_HEAD(&GlobalOplock_Q); ··· 1089 GlobalMaxActiveXid = 0; 1090 memset(Local_System_Name, 0, 15); 1091 rwlock_init(&GlobalSMBSeslock); 1092 spin_lock_init(&GlobalMid_Lock); 1093 1094 if (cifs_max_pending < 2) {
··· 1059 { 1060 int rc = 0; 1061 cifs_proc_init(); 1062 + INIT_LIST_HEAD(&cifs_tcp_ses_list); 1063 INIT_LIST_HEAD(&GlobalSMBSessionList); /* BB to be removed by jl */ 1064 INIT_LIST_HEAD(&GlobalTreeConnectionList); /* BB to be removed by jl */ 1065 INIT_LIST_HEAD(&GlobalOplock_Q); ··· 1089 GlobalMaxActiveXid = 0; 1090 memset(Local_System_Name, 0, 15); 1091 rwlock_init(&GlobalSMBSeslock); 1092 + rwlock_init(&cifs_tcp_ses_lock); 1093 spin_lock_init(&GlobalMid_Lock); 1094 1095 if (cifs_max_pending < 2) {
+11 -6
fs/cifs/cifsglob.h
··· 123 struct TCP_Server_Info { 124 struct list_head tcp_ses_list; 125 struct list_head smb_ses_list; 126 /* 15 character server name + 0x20 16th byte indicating type = srv */ 127 char server_RFC1001_name[SERVER_NAME_LEN_WITH_NULL]; 128 char unicode_server_Name[SERVER_NAME_LEN_WITH_NULL * 2]; ··· 145 bool svlocal:1; /* local server or remote */ 146 bool noblocksnd; /* use blocking sendmsg */ 147 bool noautotune; /* do not autotune send buf sizes */ 148 - atomic_t socketUseCount; /* number of open cifs sessions on socket */ 149 atomic_t inFlight; /* number of requests on the wire to server */ 150 #ifdef CONFIG_CIFS_STATS2 151 atomic_t inSend; /* requests trying to send */ ··· 591 #define GLOBAL_EXTERN extern 592 #endif 593 594 - 595 - /* the list of TCP_Server_Info structures, ie each of the sockets 596 * connecting our client to a distinct server (ip address), is 597 - * chained together by global_cifs_sock_list. The list of all our SMB 598 * sessions (and from that the tree connections) can be found 599 - * by iterating over global_cifs_sock_list */ 600 - GLOBAL_EXTERN struct list_head global_cifs_sock_list; 601 GLOBAL_EXTERN struct list_head GlobalSMBSessionList; /* BB to be removed by jl*/ 602 GLOBAL_EXTERN struct list_head GlobalTreeConnectionList; /* BB to be removed */ 603 GLOBAL_EXTERN rwlock_t GlobalSMBSeslock; /* protects list inserts on 3 above */
··· 123 struct TCP_Server_Info { 124 struct list_head tcp_ses_list; 125 struct list_head smb_ses_list; 126 + int srv_count; /* reference counter */ 127 /* 15 character server name + 0x20 16th byte indicating type = srv */ 128 char server_RFC1001_name[SERVER_NAME_LEN_WITH_NULL]; 129 char unicode_server_Name[SERVER_NAME_LEN_WITH_NULL * 2]; ··· 144 bool svlocal:1; /* local server or remote */ 145 bool noblocksnd; /* use blocking sendmsg */ 146 bool noautotune; /* do not autotune send buf sizes */ 147 atomic_t inFlight; /* number of requests on the wire to server */ 148 #ifdef CONFIG_CIFS_STATS2 149 atomic_t inSend; /* requests trying to send */ ··· 591 #define GLOBAL_EXTERN extern 592 #endif 593 594 + /* 595 + * the list of TCP_Server_Info structures, ie each of the sockets 596 * connecting our client to a distinct server (ip address), is 597 + * chained together by cifs_tcp_ses_list. The list of all our SMB 598 * sessions (and from that the tree connections) can be found 599 + * by iterating over cifs_tcp_ses_list 600 + */ 601 + GLOBAL_EXTERN struct list_head cifs_tcp_ses_list; 602 + 603 + /* protects cifs_tcp_ses_list and srv_count for each tcp session */ 604 + GLOBAL_EXTERN rwlock_t cifs_tcp_ses_lock; 605 + 606 GLOBAL_EXTERN struct list_head GlobalSMBSessionList; /* BB to be removed by jl*/ 607 GLOBAL_EXTERN struct list_head GlobalTreeConnectionList; /* BB to be removed */ 608 GLOBAL_EXTERN rwlock_t GlobalSMBSeslock; /* protects list inserts on 3 above */
+1
fs/cifs/cifsproto.h
··· 102 const __u16 *pfid); 103 extern int mode_to_acl(struct inode *inode, const char *path, __u64); 104 105 extern int cifs_mount(struct super_block *, struct cifs_sb_info *, char *, 106 const char *); 107 extern int cifs_umount(struct super_block *, struct cifs_sb_info *);
··· 102 const __u16 *pfid); 103 extern int mode_to_acl(struct inode *inode, const char *path, __u64); 104 105 + extern void cifs_put_tcp_session(struct TCP_Server_Info *server); 106 extern int cifs_mount(struct super_block *, struct cifs_sb_info *, char *, 107 const char *); 108 extern int cifs_umount(struct super_block *, struct cifs_sb_info *);
+9 -9
fs/cifs/cifssmb.c
··· 664 rc = -EIO; 665 goto neg_err_exit; 666 } 667 - 668 - if (server->socketUseCount.counter > 1) { 669 if (memcmp(server->server_GUID, 670 pSMBr->u.extended_response. 671 GUID, 16) != 0) { ··· 675 pSMBr->u.extended_response.GUID, 676 16); 677 } 678 - } else 679 memcpy(server->server_GUID, 680 pSMBr->u.extended_response.GUID, 16); 681 682 if (count == 16) { 683 server->secType = RawNTLMSSP; ··· 833 pSMB->AndXCommand = 0xFF; 834 rc = SendReceiveNoRsp(xid, ses, (struct smb_hdr *) pSMB, 0); 835 session_already_dead: 836 - atomic_dec(&ses->server->socketUseCount); 837 - if (atomic_read(&ses->server->socketUseCount) == 0) { 838 - spin_lock(&GlobalMid_Lock); 839 - ses->server->tcpStatus = CifsExiting; 840 - spin_unlock(&GlobalMid_Lock); 841 - rc = -ESHUTDOWN; 842 } 843 up(&ses->sesSem); 844
··· 664 rc = -EIO; 665 goto neg_err_exit; 666 } 667 + read_lock(&cifs_tcp_ses_lock); 668 + if (server->srv_count > 1) { 669 + read_unlock(&cifs_tcp_ses_lock); 670 if (memcmp(server->server_GUID, 671 pSMBr->u.extended_response. 672 GUID, 16) != 0) { ··· 674 pSMBr->u.extended_response.GUID, 675 16); 676 } 677 + } else { 678 + read_unlock(&cifs_tcp_ses_lock); 679 memcpy(server->server_GUID, 680 pSMBr->u.extended_response.GUID, 16); 681 + } 682 683 if (count == 16) { 684 server->secType = RawNTLMSSP; ··· 830 pSMB->AndXCommand = 0xFF; 831 rc = SendReceiveNoRsp(xid, ses, (struct smb_hdr *) pSMB, 0); 832 session_already_dead: 833 + if (ses->server) { 834 + cifs_put_tcp_session(ses->server); 835 + rc = 0; 836 } 837 up(&ses->sesSem); 838
+71 -132
fs/cifs/connect.c
··· 659 } 660 } /* end while !EXITING */ 661 662 spin_lock(&GlobalMid_Lock); 663 server->tcpStatus = CifsExiting; 664 spin_unlock(&GlobalMid_Lock); ··· 1362 return 0; 1363 } 1364 1365 - static struct cifsSesInfo * 1366 - cifs_find_tcp_session(struct in_addr *target_ip_addr, 1367 - struct in6_addr *target_ip6_addr, 1368 - char *userName, struct TCP_Server_Info **psrvTcp) 1369 { 1370 struct list_head *tmp; 1371 - struct cifsSesInfo *ses; 1372 1373 - *psrvTcp = NULL; 1374 1375 - read_lock(&GlobalSMBSeslock); 1376 - list_for_each(tmp, &GlobalSMBSessionList) { 1377 - ses = list_entry(tmp, struct cifsSesInfo, cifsSessionList); 1378 - if (!ses->server) 1379 continue; 1380 1381 - if (target_ip_addr && 1382 - ses->server->addr.sockAddr.sin_addr.s_addr != target_ip_addr->s_addr) 1383 - continue; 1384 - else if (target_ip6_addr && 1385 - memcmp(&ses->server->addr.sockAddr6.sin6_addr, 1386 - target_ip6_addr, sizeof(*target_ip6_addr))) 1387 - continue; 1388 - /* BB lock server and tcp session; increment use count here?? */ 1389 1390 - /* found a match on the TCP session */ 1391 - *psrvTcp = ses->server; 1392 - 1393 - /* BB check if reconnection needed */ 1394 - if (strncmp(ses->userName, userName, MAX_USERNAME_SIZE) == 0) { 1395 - read_unlock(&GlobalSMBSeslock); 1396 - /* Found exact match on both TCP and 1397 - SMB sessions */ 1398 - return ses; 1399 - } 1400 - /* else tcp and smb sessions need reconnection */ 1401 } 1402 - read_unlock(&GlobalSMBSeslock); 1403 - 1404 return NULL; 1405 } 1406 1407 - static struct cifsTconInfo * 1408 - find_unc(__be32 new_target_ip_addr, char *uncName, char *userName) 1409 { 1410 - struct list_head *tmp; 1411 - struct cifsTconInfo *tcon; 1412 - __be32 old_ip; 1413 1414 - read_lock(&GlobalSMBSeslock); 1415 - 1416 - list_for_each(tmp, &GlobalTreeConnectionList) { 1417 - cFYI(1, ("Next tcon")); 1418 - tcon = list_entry(tmp, struct cifsTconInfo, cifsConnectionList); 1419 - if (!tcon->ses || !tcon->ses->server) 1420 - continue; 1421 - 1422 - old_ip = tcon->ses->server->addr.sockAddr.sin_addr.s_addr; 1423 - cFYI(1, ("old ip addr: %x == new ip %x ?", 1424 - old_ip, new_target_ip_addr)); 1425 - 1426 - if (old_ip != new_target_ip_addr) 1427 - continue; 1428 - 1429 - /* BB lock tcon, server, tcp session and increment use count? */ 1430 - /* found a match on the TCP session */ 1431 - /* BB check if reconnection needed */ 1432 - cFYI(1, ("IP match, old UNC: %s new: %s", 1433 - tcon->treeName, uncName)); 1434 - 1435 - if (strncmp(tcon->treeName, uncName, MAX_TREE_SIZE)) 1436 - continue; 1437 - 1438 - cFYI(1, ("and old usr: %s new: %s", 1439 - tcon->treeName, uncName)); 1440 - 1441 - if (strncmp(tcon->ses->userName, userName, MAX_USERNAME_SIZE)) 1442 - continue; 1443 - 1444 - /* matched smb session (user name) */ 1445 - read_unlock(&GlobalSMBSeslock); 1446 - return tcon; 1447 } 1448 1449 - read_unlock(&GlobalSMBSeslock); 1450 - return NULL; 1451 } 1452 1453 int ··· 1860 } 1861 } 1862 1863 - static void 1864 - kill_cifsd(struct TCP_Server_Info *server) 1865 - { 1866 - struct task_struct *task; 1867 - 1868 - task = xchg(&server->tsk, NULL); 1869 - if (task) 1870 - force_sig(SIGKILL, task); 1871 - } 1872 - 1873 static void setup_cifs_sb(struct smb_vol *pvolume_info, 1874 struct cifs_sb_info *cifs_sb) 1875 { ··· 2038 } 2039 } 2040 2041 - if (addr.sa_family == AF_INET) 2042 - existingCifsSes = cifs_find_tcp_session(&sin_server->sin_addr, 2043 - NULL /* no ipv6 addr */, 2044 - volume_info.username, &srvTcp); 2045 - else if (addr.sa_family == AF_INET6) { 2046 - cFYI(1, ("looking for ipv6 address")); 2047 - existingCifsSes = cifs_find_tcp_session(NULL /* no ipv4 addr */, 2048 - &sin_server6->sin6_addr, 2049 - volume_info.username, &srvTcp); 2050 - } else { 2051 - rc = -EINVAL; 2052 - goto out; 2053 - } 2054 - 2055 - if (!srvTcp) { 2056 if (addr.sa_family == AF_INET6) { 2057 cFYI(1, ("attempting ipv6 connect")); 2058 /* BB should we allow ipv6 on port 139? */ ··· 2111 memcpy(srvTcp->server_RFC1001_name, 2112 volume_info.target_rfc1001_name, 16); 2113 srvTcp->sequence_number = 0; 2114 } 2115 } 2116 ··· 2168 rc = cifs_setup_session(xid, pSesInfo, 2169 cifs_sb->local_nls); 2170 up(&pSesInfo->sesSem); 2171 - if (!rc) 2172 - atomic_inc(&srvTcp->socketUseCount); 2173 } 2174 } 2175 ··· 2175 if (!rc) { 2176 setup_cifs_sb(&volume_info, cifs_sb); 2177 2178 - tcon = 2179 - find_unc(sin_server->sin_addr.s_addr, volume_info.UNC, 2180 - volume_info.username); 2181 if (tcon) { 2182 cFYI(1, ("Found match on UNC path")); 2183 if (tcon->seal != volume_info.seal) ··· 2237 /* on error free sesinfo and tcon struct if needed */ 2238 mount_fail_check: 2239 if (rc) { 2240 - /* if session setup failed, use count is zero but 2241 - we still need to free cifsd thread */ 2242 - if (atomic_read(&srvTcp->socketUseCount) == 0) { 2243 - spin_lock(&GlobalMid_Lock); 2244 - srvTcp->tcpStatus = CifsExiting; 2245 - spin_unlock(&GlobalMid_Lock); 2246 - kill_cifsd(srvTcp); 2247 - } 2248 - /* If find_unc succeeded then rc == 0 so we can not end */ 2249 - if (tcon) /* up accidently freeing someone elses tcon struct */ 2250 tconInfoFree(tcon); 2251 if (existingCifsSes == NULL) { 2252 if (pSesInfo) { 2253 if ((pSesInfo->server) && 2254 - (pSesInfo->status == CifsGood)) { 2255 - int temp_rc; 2256 - temp_rc = CIFSSMBLogoff(xid, pSesInfo); 2257 - /* if the socketUseCount is now zero */ 2258 - if ((temp_rc == -ESHUTDOWN) && 2259 - (pSesInfo->server)) 2260 - kill_cifsd(pSesInfo->server); 2261 - } else { 2262 cFYI(1, ("No session or bad tcon")); 2263 - if (pSesInfo->server) { 2264 - spin_lock(&GlobalMid_Lock); 2265 - srvTcp->tcpStatus = CifsExiting; 2266 - spin_unlock(&GlobalMid_Lock); 2267 - kill_cifsd(pSesInfo->server); 2268 - } 2269 } 2270 sesInfoFree(pSesInfo); 2271 /* pSesInfo = NULL; */ ··· 3558 if (rc == -EBUSY) { 3559 FreeXid(xid); 3560 return 0; 3561 - } else if (rc == -ESHUTDOWN) { 3562 - cFYI(1, ("Waking up socket by sending signal")); 3563 - if (ses->server) 3564 - kill_cifsd(ses->server); 3565 - rc = 0; 3566 - } /* else - we have an smb session 3567 - left on this socket do not kill cifsd */ 3568 } else 3569 cFYI(1, ("No session or bad tcon")); 3570 }
··· 659 } 660 } /* end while !EXITING */ 661 662 + /* take it off the list, if it's not already */ 663 + write_lock(&cifs_tcp_ses_lock); 664 + list_del_init(&server->tcp_ses_list); 665 + write_unlock(&cifs_tcp_ses_lock); 666 + 667 spin_lock(&GlobalMid_Lock); 668 server->tcpStatus = CifsExiting; 669 spin_unlock(&GlobalMid_Lock); ··· 1357 return 0; 1358 } 1359 1360 + static struct TCP_Server_Info * 1361 + cifs_find_tcp_session(struct sockaddr *addr) 1362 { 1363 struct list_head *tmp; 1364 + struct TCP_Server_Info *server; 1365 + struct sockaddr_in *addr4 = (struct sockaddr_in *) addr; 1366 + struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *) addr; 1367 1368 + write_lock(&cifs_tcp_ses_lock); 1369 + list_for_each(tmp, &cifs_tcp_ses_list) { 1370 + server = list_entry(tmp, struct TCP_Server_Info, 1371 + tcp_ses_list); 1372 1373 + /* 1374 + * the demux thread can exit on its own while still in CifsNew 1375 + * so don't accept any sockets in that state. Since the 1376 + * tcpStatus never changes back to CifsNew it's safe to check 1377 + * for this without a lock. 1378 + */ 1379 + if (server->tcpStatus == CifsNew) 1380 continue; 1381 1382 + if (addr->sa_family == AF_INET && 1383 + (addr4->sin_addr.s_addr != 1384 + server->addr.sockAddr.sin_addr.s_addr)) 1385 + continue; 1386 + else if (addr->sa_family == AF_INET6 && 1387 + memcmp(&server->addr.sockAddr6.sin6_addr, 1388 + &addr6->sin6_addr, sizeof(addr6->sin6_addr))) 1389 + continue; 1390 1391 + ++server->srv_count; 1392 + write_unlock(&cifs_tcp_ses_lock); 1393 + return server; 1394 } 1395 + write_unlock(&cifs_tcp_ses_lock); 1396 return NULL; 1397 } 1398 1399 + void 1400 + cifs_put_tcp_session(struct TCP_Server_Info *server) 1401 { 1402 + struct task_struct *task; 1403 1404 + write_lock(&cifs_tcp_ses_lock); 1405 + if (--server->srv_count > 0) { 1406 + write_unlock(&cifs_tcp_ses_lock); 1407 + return; 1408 } 1409 1410 + list_del_init(&server->tcp_ses_list); 1411 + write_unlock(&cifs_tcp_ses_lock); 1412 + 1413 + spin_lock(&GlobalMid_Lock); 1414 + server->tcpStatus = CifsExiting; 1415 + spin_unlock(&GlobalMid_Lock); 1416 + 1417 + task = xchg(&server->tsk, NULL); 1418 + if (task) 1419 + force_sig(SIGKILL, task); 1420 } 1421 1422 int ··· 1881 } 1882 } 1883 1884 static void setup_cifs_sb(struct smb_vol *pvolume_info, 1885 struct cifs_sb_info *cifs_sb) 1886 { ··· 2069 } 2070 } 2071 2072 + srvTcp = cifs_find_tcp_session(&addr); 2073 + if (srvTcp) { 2074 + cFYI(1, ("Existing tcp session with server found")); 2075 + } else { /* create socket */ 2076 if (addr.sa_family == AF_INET6) { 2077 cFYI(1, ("attempting ipv6 connect")); 2078 /* BB should we allow ipv6 on port 139? */ ··· 2153 memcpy(srvTcp->server_RFC1001_name, 2154 volume_info.target_rfc1001_name, 16); 2155 srvTcp->sequence_number = 0; 2156 + INIT_LIST_HEAD(&srvTcp->tcp_ses_list); 2157 + ++srvTcp->srv_count; 2158 + write_lock(&cifs_tcp_ses_lock); 2159 + list_add(&srvTcp->tcp_ses_list, 2160 + &cifs_tcp_ses_list); 2161 + write_unlock(&cifs_tcp_ses_lock); 2162 } 2163 } 2164 ··· 2204 rc = cifs_setup_session(xid, pSesInfo, 2205 cifs_sb->local_nls); 2206 up(&pSesInfo->sesSem); 2207 } 2208 } 2209 ··· 2213 if (!rc) { 2214 setup_cifs_sb(&volume_info, cifs_sb); 2215 2216 if (tcon) { 2217 cFYI(1, ("Found match on UNC path")); 2218 if (tcon->seal != volume_info.seal) ··· 2278 /* on error free sesinfo and tcon struct if needed */ 2279 mount_fail_check: 2280 if (rc) { 2281 + /* If find_unc succeeded then rc == 0 so we can not end */ 2282 + /* up accidently freeing someone elses tcon struct */ 2283 + if (tcon) 2284 tconInfoFree(tcon); 2285 + 2286 if (existingCifsSes == NULL) { 2287 if (pSesInfo) { 2288 if ((pSesInfo->server) && 2289 + (pSesInfo->status == CifsGood)) 2290 + CIFSSMBLogoff(xid, pSesInfo); 2291 + else { 2292 cFYI(1, ("No session or bad tcon")); 2293 + if (pSesInfo->server) 2294 + cifs_put_tcp_session( 2295 + pSesInfo->server); 2296 } 2297 sesInfoFree(pSesInfo); 2298 /* pSesInfo = NULL; */ ··· 3613 if (rc == -EBUSY) { 3614 FreeXid(xid); 3615 return 0; 3616 + } 3617 } else 3618 cFYI(1, ("No session or bad tcon")); 3619 }