Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag '5.20-rc-smb3-client-fixes-part1' of git://git.samba.org/sfrench/cifs-2.6

Pull cifs updates from Steve French:
"Mostly cleanup, including smb1 refactoring:

- multichannel perf improvement

- move additional SMB1 code to not be compiled in when legacy support
is disabled.

- bug fixes, including one important one for memory leak

- various cleanup patches

We are still working on and testing some deferred close improvements
including an important lease break fix for case when multiple deferred
closes are still open, and also some additional perf improvements -
those are not included here"

* tag '5.20-rc-smb3-client-fixes-part1' of git://git.samba.org/sfrench/cifs-2.6:
cifs: update internal module number
cifs: alloc_mid function should be marked as static
cifs: remove "cifs_" prefix from init/destroy mids functions
cifs: remove useless DeleteMidQEntry()
cifs: when insecure legacy is disabled shrink amount of SMB1 code
cifs: trivial style fixup
cifs: fix wrong unlock before return from cifs_tree_connect()
cifs: avoid use of global locks for high contention data
cifs: remove remaining build warnings
cifs: list_for_each() -> list_for_each_entry()
cifs: update MAINTAINERS file with reviewers
smb2: small refactor in smb2_check_message()
cifs: Fix memory leak when using fscache
cifs: remove minor build warning
cifs: remove some camelCase and also some static build warnings
cifs: remove unnecessary (void*) conversions.
cifs: remove unnecessary type castings
cifs: remove redundant initialization to variable mnt_sign_enabled
smb3: check xattr value length earlier

+1104 -907
+5 -2
MAINTAINERS
··· 5123 5123 F: include/linux/of_clk.h 5124 5124 X: drivers/clk/clkdev.c 5125 5125 5126 - COMMON INTERNET FILE SYSTEM CLIENT (CIFS) 5126 + COMMON INTERNET FILE SYSTEM CLIENT (CIFS and SMB3) 5127 5127 M: Steve French <sfrench@samba.org> 5128 + R: Paulo Alcantara <pc@cjr.nz> (DFS, global name space) 5129 + R: Ronnie Sahlberg <lsahlber@redhat.com> (directory leases, sparse files) 5130 + R: Shyam Prasad N <sprasad@microsoft.com> (multichannel) 5128 5131 L: linux-cifs@vger.kernel.org 5129 5132 L: samba-technical@lists.samba.org (moderated for non-subscribers) 5130 5133 S: Supported 5131 - W: http://linux-cifs.samba.org/ 5134 + W: https://wiki.samba.org/index.php/LinuxCIFS 5132 5135 T: git git://git.samba.org/sfrench/cifs-2.6.git 5133 5136 F: Documentation/admin-guide/cifs/ 5134 5137 F: fs/cifs/
+2 -2
fs/cifs/Makefile
··· 5 5 ccflags-y += -I$(src) # needed for trace events 6 6 obj-$(CONFIG_CIFS) += cifs.o 7 7 8 - cifs-y := trace.o cifsfs.o cifssmb.o cifs_debug.o connect.o dir.o file.o \ 8 + cifs-y := trace.o cifsfs.o cifs_debug.o connect.o dir.o file.o \ 9 9 inode.o link.o misc.o netmisc.o smbencrypt.o transport.o \ 10 10 cifs_unicode.o nterr.o cifsencrypt.o \ 11 11 readdir.o ioctl.o sess.o export.o unc.o winucase.o \ ··· 31 31 32 32 cifs-$(CONFIG_CIFS_ROOT) += cifsroot.o 33 33 34 - cifs-$(CONFIG_CIFS_ALLOW_INSECURE_LEGACY) += smb1ops.o 34 + cifs-$(CONFIG_CIFS_ALLOW_INSECURE_LEGACY) += smb1ops.o cifssmb.o
+23 -49
fs/cifs/cifs_debug.c
··· 36 36 void cifs_dump_detail(void *buf, struct TCP_Server_Info *server) 37 37 { 38 38 #ifdef CONFIG_CIFS_DEBUG2 39 - struct smb_hdr *smb = (struct smb_hdr *)buf; 39 + struct smb_hdr *smb = buf; 40 40 41 41 cifs_dbg(VFS, "Cmd: %d Err: 0x%x Flags: 0x%x Flgs2: 0x%x Mid: %d Pid: %d\n", 42 42 smb->Command, smb->Status.CifsError, ··· 55 55 return; 56 56 57 57 cifs_dbg(VFS, "Dump pending requests:\n"); 58 - spin_lock(&GlobalMid_Lock); 58 + spin_lock(&server->mid_lock); 59 59 list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) { 60 60 cifs_dbg(VFS, "State: %d Cmd: %d Pid: %d Cbdata: %p Mid %llu\n", 61 61 mid_entry->mid_state, ··· 78 78 mid_entry->resp_buf, 62); 79 79 } 80 80 } 81 - spin_unlock(&GlobalMid_Lock); 81 + spin_unlock(&server->mid_lock); 82 82 #endif /* CONFIG_CIFS_DEBUG2 */ 83 83 } 84 84 ··· 168 168 169 169 static int cifs_debug_files_proc_show(struct seq_file *m, void *v) 170 170 { 171 - struct list_head *tmp, *tmp1, *tmp2; 172 171 struct TCP_Server_Info *server; 173 172 struct cifs_ses *ses; 174 173 struct cifs_tcon *tcon; ··· 183 184 #endif /* CIFS_DEBUG2 */ 184 185 spin_lock(&cifs_tcp_ses_lock); 185 186 list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) { 186 - list_for_each(tmp, &server->smb_ses_list) { 187 - ses = list_entry(tmp, struct cifs_ses, smb_ses_list); 188 - list_for_each(tmp1, &ses->tcon_list) { 189 - tcon = list_entry(tmp1, struct cifs_tcon, tcon_list); 187 + list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { 188 + list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { 190 189 spin_lock(&tcon->open_file_lock); 191 - list_for_each(tmp2, &tcon->openFileList) { 192 - cfile = list_entry(tmp2, struct cifsFileInfo, 193 - tlist); 190 + list_for_each_entry(cfile, &tcon->openFileList, tlist) { 194 191 seq_printf(m, 195 192 "0x%x 0x%llx 0x%x %d %d %d %pd", 196 193 tcon->tid, ··· 213 218 214 219 static int cifs_debug_data_proc_show(struct seq_file *m, void *v) 215 220 { 216 - struct list_head *tmp2, *tmp3; 217 221 struct mid_q_entry *mid_entry; 218 222 struct TCP_Server_Info *server; 219 223 struct cifs_ses *ses; ··· 375 381 376 382 seq_printf(m, "\n\n\tSessions: "); 377 383 i = 0; 378 - list_for_each(tmp2, &server->smb_ses_list) { 379 - ses = list_entry(tmp2, struct cifs_ses, 380 - smb_ses_list); 384 + list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { 381 385 i++; 382 386 if ((ses->serverDomain == NULL) || 383 387 (ses->serverOS == NULL) || ··· 439 447 else 440 448 seq_puts(m, "none\n"); 441 449 442 - list_for_each(tmp3, &ses->tcon_list) { 443 - tcon = list_entry(tmp3, struct cifs_tcon, 444 - tcon_list); 450 + list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { 445 451 ++j; 446 452 seq_printf(m, "\n\t%d) ", j); 447 453 cifs_debug_tcon(m, tcon); ··· 463 473 seq_printf(m, "\n\t\t[NONE]"); 464 474 465 475 seq_puts(m, "\n\n\tMIDs: "); 466 - spin_lock(&GlobalMid_Lock); 467 - list_for_each(tmp3, &server->pending_mid_q) { 468 - mid_entry = list_entry(tmp3, struct mid_q_entry, 469 - qhead); 476 + spin_lock(&server->mid_lock); 477 + list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) { 470 478 seq_printf(m, "\n\tState: %d com: %d pid:" 471 479 " %d cbdata: %p mid %llu\n", 472 480 mid_entry->mid_state, ··· 473 485 mid_entry->callback_data, 474 486 mid_entry->mid); 475 487 } 476 - spin_unlock(&GlobalMid_Lock); 488 + spin_unlock(&server->mid_lock); 477 489 seq_printf(m, "\n--\n"); 478 490 } 479 491 if (c == 0) ··· 492 504 { 493 505 bool bv; 494 506 int rc; 495 - struct list_head *tmp1, *tmp2, *tmp3; 496 507 struct TCP_Server_Info *server; 497 508 struct cifs_ses *ses; 498 509 struct cifs_tcon *tcon; ··· 501 514 #ifdef CONFIG_CIFS_STATS2 502 515 int i; 503 516 504 - atomic_set(&totBufAllocCount, 0); 505 - atomic_set(&totSmBufAllocCount, 0); 517 + atomic_set(&total_buf_alloc_count, 0); 518 + atomic_set(&total_small_buf_alloc_count, 0); 506 519 #endif /* CONFIG_CIFS_STATS2 */ 507 520 atomic_set(&tcpSesReconnectCount, 0); 508 521 atomic_set(&tconInfoReconnectCount, 0); ··· 512 525 GlobalCurrentXid = 0; 513 526 spin_unlock(&GlobalMid_Lock); 514 527 spin_lock(&cifs_tcp_ses_lock); 515 - list_for_each(tmp1, &cifs_tcp_ses_list) { 516 - server = list_entry(tmp1, struct TCP_Server_Info, 517 - tcp_ses_list); 528 + list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) { 518 529 server->max_in_flight = 0; 519 530 #ifdef CONFIG_CIFS_STATS2 520 531 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) { ··· 523 538 server->fastest_cmd[0] = 0; 524 539 } 525 540 #endif /* CONFIG_CIFS_STATS2 */ 526 - list_for_each(tmp2, &server->smb_ses_list) { 527 - ses = list_entry(tmp2, struct cifs_ses, 528 - smb_ses_list); 529 - list_for_each(tmp3, &ses->tcon_list) { 530 - tcon = list_entry(tmp3, 531 - struct cifs_tcon, 532 - tcon_list); 541 + list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { 542 + list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { 533 543 atomic_set(&tcon->num_smbs_sent, 0); 534 544 spin_lock(&tcon->stat_lock); 535 545 tcon->bytes_read = 0; ··· 549 569 #ifdef CONFIG_CIFS_STATS2 550 570 int j; 551 571 #endif /* STATS2 */ 552 - struct list_head *tmp2, *tmp3; 553 572 struct TCP_Server_Info *server; 554 573 struct cifs_ses *ses; 555 574 struct cifs_tcon *tcon; ··· 558 579 seq_printf(m, "Share (unique mount targets): %d\n", 559 580 tconInfoAllocCount.counter); 560 581 seq_printf(m, "SMB Request/Response Buffer: %d Pool size: %d\n", 561 - bufAllocCount.counter, 582 + buf_alloc_count.counter, 562 583 cifs_min_rcv + tcpSesAllocCount.counter); 563 584 seq_printf(m, "SMB Small Req/Resp Buffer: %d Pool size: %d\n", 564 - smBufAllocCount.counter, cifs_min_small); 585 + small_buf_alloc_count.counter, cifs_min_small); 565 586 #ifdef CONFIG_CIFS_STATS2 566 587 seq_printf(m, "Total Large %d Small %d Allocations\n", 567 - atomic_read(&totBufAllocCount), 568 - atomic_read(&totSmBufAllocCount)); 588 + atomic_read(&total_buf_alloc_count), 589 + atomic_read(&total_small_buf_alloc_count)); 569 590 #endif /* CONFIG_CIFS_STATS2 */ 570 591 571 - seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount)); 592 + seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&mid_count)); 572 593 seq_printf(m, 573 594 "\n%d session %d share reconnects\n", 574 595 tcpSesReconnectCount.counter, tconInfoReconnectCount.counter); ··· 598 619 atomic_read(&server->smb2slowcmd[j]), 599 620 server->hostname, j); 600 621 #endif /* STATS2 */ 601 - list_for_each(tmp2, &server->smb_ses_list) { 602 - ses = list_entry(tmp2, struct cifs_ses, 603 - smb_ses_list); 604 - list_for_each(tmp3, &ses->tcon_list) { 605 - tcon = list_entry(tmp3, 606 - struct cifs_tcon, 607 - tcon_list); 622 + list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { 623 + list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { 608 624 i++; 609 625 seq_printf(m, "\n%d) %s", i, tcon->treeName); 610 626 if (tcon->need_reconnect)
+2
fs/cifs/cifsacl.c
··· 1379 1379 return rc; 1380 1380 } 1381 1381 1382 + #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 1382 1383 struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb, 1383 1384 const struct cifs_fid *cifsfid, u32 *pacllen, 1384 1385 u32 __maybe_unused unused) ··· 1513 1512 cifs_put_tlink(tlink); 1514 1513 return rc; 1515 1514 } 1515 + #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 1516 1516 1517 1517 /* Translate the CIFS ACL (similar to NTFS ACL) for a file into mode bits */ 1518 1518 int
+3 -3
fs/cifs/cifsencrypt.c
··· 141 141 if ((cifs_pdu == NULL) || (server == NULL)) 142 142 return -EINVAL; 143 143 144 - spin_lock(&cifs_tcp_ses_lock); 144 + spin_lock(&server->srv_lock); 145 145 if (!(cifs_pdu->Flags2 & SMBFLG2_SECURITY_SIGNATURE) || 146 146 server->tcpStatus == CifsNeedNegotiate) { 147 - spin_unlock(&cifs_tcp_ses_lock); 147 + spin_unlock(&server->srv_lock); 148 148 return rc; 149 149 } 150 - spin_unlock(&cifs_tcp_ses_lock); 150 + spin_unlock(&server->srv_lock); 151 151 152 152 if (!server->session_estab) { 153 153 memcpy(cifs_pdu->Signature.SecuritySignature, "BSRSPYL", 8);
+41 -12
fs/cifs/cifsfs.c
··· 68 68 unsigned int global_secflags = CIFSSEC_DEF; 69 69 /* unsigned int ntlmv2_support = 0; */ 70 70 unsigned int sign_CIFS_PDUs = 1; 71 + 72 + /* 73 + * Global transaction id (XID) information 74 + */ 75 + unsigned int GlobalCurrentXid; /* protected by GlobalMid_Sem */ 76 + unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Sem */ 77 + unsigned int GlobalMaxActiveXid; /* prot by GlobalMid_Sem */ 78 + spinlock_t GlobalMid_Lock; /* protects above & list operations on midQ entries */ 79 + 80 + /* 81 + * Global counters, updated atomically 82 + */ 83 + atomic_t sesInfoAllocCount; 84 + atomic_t tconInfoAllocCount; 85 + atomic_t tcpSesNextId; 86 + atomic_t tcpSesAllocCount; 87 + atomic_t tcpSesReconnectCount; 88 + atomic_t tconInfoReconnectCount; 89 + 90 + atomic_t mid_count; 91 + atomic_t buf_alloc_count; 92 + atomic_t small_buf_alloc_count; 93 + #ifdef CONFIG_CIFS_STATS2 94 + atomic_t total_buf_alloc_count; 95 + atomic_t total_small_buf_alloc_count; 96 + #endif/* STATS2 */ 97 + struct list_head cifs_tcp_ses_list; 98 + spinlock_t cifs_tcp_ses_lock; 71 99 static const struct super_operations cifs_super_ops; 72 100 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE; 73 101 module_param(CIFSMaxBufSize, uint, 0444); ··· 731 703 tcon = cifs_sb_master_tcon(cifs_sb); 732 704 733 705 spin_lock(&cifs_tcp_ses_lock); 706 + spin_lock(&tcon->tc_lock); 734 707 if ((tcon->tc_count > 1) || (tcon->status == TID_EXITING)) { 735 708 /* we have other mounts to same share or we have 736 709 already tried to force umount this and woken up 737 710 all waiting network requests, nothing to do */ 711 + spin_unlock(&tcon->tc_lock); 738 712 spin_unlock(&cifs_tcp_ses_lock); 739 713 return; 740 714 } else if (tcon->tc_count == 1) 741 715 tcon->status = TID_EXITING; 716 + spin_unlock(&tcon->tc_lock); 742 717 spin_unlock(&cifs_tcp_ses_lock); 743 718 744 719 /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */ ··· 1568 1537 kmem_cache_destroy(cifs_sm_req_cachep); 1569 1538 } 1570 1539 1571 - static int 1572 - cifs_init_mids(void) 1540 + static int init_mids(void) 1573 1541 { 1574 1542 cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids", 1575 1543 sizeof(struct mid_q_entry), 0, ··· 1586 1556 return 0; 1587 1557 } 1588 1558 1589 - static void 1590 - cifs_destroy_mids(void) 1559 + static void destroy_mids(void) 1591 1560 { 1592 1561 mempool_destroy(cifs_mid_poolp); 1593 1562 kmem_cache_destroy(cifs_mid_cachep); ··· 1608 1579 atomic_set(&tcpSesReconnectCount, 0); 1609 1580 atomic_set(&tconInfoReconnectCount, 0); 1610 1581 1611 - atomic_set(&bufAllocCount, 0); 1612 - atomic_set(&smBufAllocCount, 0); 1582 + atomic_set(&buf_alloc_count, 0); 1583 + atomic_set(&small_buf_alloc_count, 0); 1613 1584 #ifdef CONFIG_CIFS_STATS2 1614 - atomic_set(&totBufAllocCount, 0); 1615 - atomic_set(&totSmBufAllocCount, 0); 1585 + atomic_set(&total_buf_alloc_count, 0); 1586 + atomic_set(&total_small_buf_alloc_count, 0); 1616 1587 if (slow_rsp_threshold < 1) 1617 1588 cifs_dbg(FYI, "slow_response_threshold msgs disabled\n"); 1618 1589 else if (slow_rsp_threshold > 32767) ··· 1620 1591 "slow response threshold set higher than recommended (0 to 32767)\n"); 1621 1592 #endif /* CONFIG_CIFS_STATS2 */ 1622 1593 1623 - atomic_set(&midCount, 0); 1594 + atomic_set(&mid_count, 0); 1624 1595 GlobalCurrentXid = 0; 1625 1596 GlobalTotalActiveXid = 0; 1626 1597 GlobalMaxActiveXid = 0; ··· 1683 1654 if (rc) 1684 1655 goto out_destroy_deferredclose_wq; 1685 1656 1686 - rc = cifs_init_mids(); 1657 + rc = init_mids(); 1687 1658 if (rc) 1688 1659 goto out_destroy_inodecache; 1689 1660 ··· 1740 1711 #endif 1741 1712 cifs_destroy_request_bufs(); 1742 1713 out_destroy_mids: 1743 - cifs_destroy_mids(); 1714 + destroy_mids(); 1744 1715 out_destroy_inodecache: 1745 1716 cifs_destroy_inodecache(); 1746 1717 out_destroy_deferredclose_wq: ··· 1776 1747 dfs_cache_destroy(); 1777 1748 #endif 1778 1749 cifs_destroy_request_bufs(); 1779 - cifs_destroy_mids(); 1750 + destroy_mids(); 1780 1751 cifs_destroy_inodecache(); 1781 1752 destroy_workqueue(deferredclose_wq); 1782 1753 destroy_workqueue(cifsoplockd_wq);
+2 -2
fs/cifs/cifsfs.h
··· 153 153 #endif /* CONFIG_CIFS_NFSD_EXPORT */ 154 154 155 155 /* when changing internal version - update following two lines at same time */ 156 - #define SMB3_PRODUCT_BUILD 37 157 - #define CIFS_VERSION "2.37" 156 + #define SMB3_PRODUCT_BUILD 38 157 + #define CIFS_VERSION "2.38" 158 158 #endif /* _CIFSFS_H */
+91 -44
fs/cifs/cifsglob.h
··· 605 605 struct TCP_Server_Info { 606 606 struct list_head tcp_ses_list; 607 607 struct list_head smb_ses_list; 608 + spinlock_t srv_lock; /* protect anything here that is not protected */ 608 609 __u64 conn_id; /* connection identifier (useful for debugging) */ 609 610 int srv_count; /* reference counter */ 610 611 /* 15 character server name + 0x20 16th byte indicating type = srv */ ··· 623 622 #endif 624 623 wait_queue_head_t response_q; 625 624 wait_queue_head_t request_q; /* if more than maxmpx to srvr must block*/ 625 + spinlock_t mid_lock; /* protect mid queue and it's entries */ 626 626 struct list_head pending_mid_q; 627 627 bool noblocksnd; /* use blocking sendmsg */ 628 628 bool noautotune; /* do not autotune send buf sizes */ ··· 1010 1008 struct list_head rlist; /* reconnect list */ 1011 1009 struct list_head tcon_list; 1012 1010 struct cifs_tcon *tcon_ipc; 1011 + spinlock_t ses_lock; /* protect anything here that is not protected */ 1013 1012 struct mutex session_mutex; 1014 1013 struct TCP_Server_Info *server; /* pointer to server info */ 1015 1014 int ses_count; /* reference counter */ ··· 1172 1169 struct list_head tcon_list; 1173 1170 int tc_count; 1174 1171 struct list_head rlist; /* reconnect list */ 1172 + spinlock_t tc_lock; /* protect anything here that is not protected */ 1175 1173 atomic_t num_local_opens; /* num of all opens including disconnected */ 1176 1174 atomic_t num_remote_opens; /* num of all network opens on server */ 1177 1175 struct list_head openFileList; ··· 1903 1899 */ 1904 1900 1905 1901 /**************************************************************************** 1906 - * Locking notes. All updates to global variables and lists should be 1907 - * protected by spinlocks or semaphores. 1902 + * Here are all the locks (spinlock, mutex, semaphore) in cifs.ko, arranged according 1903 + * to the locking order. i.e. if two locks are to be held together, the lock that 1904 + * appears higher in this list needs to be taken before the other. 1908 1905 * 1909 - * Spinlocks 1910 - * --------- 1911 - * GlobalMid_Lock protects: 1912 - * list operations on pending_mid_q and oplockQ 1913 - * updates to XID counters, multiplex id and SMB sequence numbers 1914 - * list operations on global DnotifyReqList 1915 - * updates to ses->status and TCP_Server_Info->tcpStatus 1916 - * updates to server->CurrentMid 1917 - * tcp_ses_lock protects: 1918 - * list operations on tcp and SMB session lists 1919 - * tcon->open_file_lock protects the list of open files hanging off the tcon 1920 - * inode->open_file_lock protects the openFileList hanging off the inode 1921 - * cfile->file_info_lock protects counters and fields in cifs file struct 1922 - * f_owner.lock protects certain per file struct operations 1923 - * mapping->page_lock protects certain per page operations 1906 + * If you hold a lock that is lower in this list, and you need to take a higher lock 1907 + * (or if you think that one of the functions that you're calling may need to), first 1908 + * drop the lock you hold, pick up the higher lock, then the lower one. This will 1909 + * ensure that locks are picked up only in one direction in the below table 1910 + * (top to bottom). 1924 1911 * 1925 - * Note that the cifs_tcon.open_file_lock should be taken before 1926 - * not after the cifsInodeInfo.open_file_lock 1912 + * Also, if you expect a function to be called with a lock held, explicitly document 1913 + * this in the comments on top of your function definition. 1927 1914 * 1928 - * Semaphores 1929 - * ---------- 1930 - * cifsInodeInfo->lock_sem protects: 1931 - * the list of locks held by the inode 1915 + * And also, try to keep the critical sections (lock hold time) to be as minimal as 1916 + * possible. Blocking / calling other functions with a lock held always increase 1917 + * the risk of a possible deadlock. 1932 1918 * 1919 + * Following this rule will avoid unnecessary deadlocks, which can get really hard to 1920 + * debug. Also, any new lock that you introduce, please add to this list in the correct 1921 + * order. 1922 + * 1923 + * Please populate this list whenever you introduce new locks in your changes. Or in 1924 + * case I've missed some existing locks. Please ensure that it's added in the list 1925 + * based on the locking order expected. 1926 + * 1927 + * ===================================================================================== 1928 + * Lock Protects Initialization fn 1929 + * ===================================================================================== 1930 + * vol_list_lock 1931 + * vol_info->ctx_lock vol_info->ctx 1932 + * cifs_sb_info->tlink_tree_lock cifs_sb_info->tlink_tree cifs_setup_cifs_sb 1933 + * TCP_Server_Info-> TCP_Server_Info cifs_get_tcp_session 1934 + * reconnect_mutex 1935 + * TCP_Server_Info->srv_mutex TCP_Server_Info cifs_get_tcp_session 1936 + * cifs_ses->session_mutex cifs_ses sesInfoAlloc 1937 + * cifs_tcon 1938 + * cifs_tcon->open_file_lock cifs_tcon->openFileList tconInfoAlloc 1939 + * cifs_tcon->pending_opens 1940 + * cifs_tcon->stat_lock cifs_tcon->bytes_read tconInfoAlloc 1941 + * cifs_tcon->bytes_written 1942 + * cifs_tcp_ses_lock cifs_tcp_ses_list sesInfoAlloc 1943 + * GlobalMid_Lock GlobalMaxActiveXid init_cifs 1944 + * GlobalCurrentXid 1945 + * GlobalTotalActiveXid 1946 + * TCP_Server_Info->srv_lock (anything in struct not protected by another lock and can change) 1947 + * TCP_Server_Info->mid_lock TCP_Server_Info->pending_mid_q cifs_get_tcp_session 1948 + * ->CurrentMid 1949 + * (any changes in mid_q_entry fields) 1950 + * TCP_Server_Info->req_lock TCP_Server_Info->in_flight cifs_get_tcp_session 1951 + * ->credits 1952 + * ->echo_credits 1953 + * ->oplock_credits 1954 + * ->reconnect_instance 1955 + * cifs_ses->ses_lock (anything that is not protected by another lock and can change) 1956 + * cifs_ses->iface_lock cifs_ses->iface_list sesInfoAlloc 1957 + * ->iface_count 1958 + * ->iface_last_update 1959 + * cifs_ses->chan_lock cifs_ses->chans 1960 + * ->chans_need_reconnect 1961 + * ->chans_in_reconnect 1962 + * cifs_tcon->tc_lock (anything that is not protected by another lock and can change) 1963 + * cifsInodeInfo->open_file_lock cifsInodeInfo->openFileList cifs_alloc_inode 1964 + * cifsInodeInfo->writers_lock cifsInodeInfo->writers cifsInodeInfo_alloc 1965 + * cifsInodeInfo->lock_sem cifsInodeInfo->llist cifs_init_once 1966 + * ->can_cache_brlcks 1967 + * cifsInodeInfo->deferred_lock cifsInodeInfo->deferred_closes cifsInodeInfo_alloc 1968 + * cached_fid->fid_mutex cifs_tcon->crfid tconInfoAlloc 1969 + * cifsFileInfo->fh_mutex cifsFileInfo cifs_new_fileinfo 1970 + * cifsFileInfo->file_info_lock cifsFileInfo->count cifs_new_fileinfo 1971 + * ->invalidHandle initiate_cifs_search 1972 + * ->oplock_break_cancelled 1973 + * cifs_aio_ctx->aio_mutex cifs_aio_ctx cifs_aio_ctx_alloc 1933 1974 ****************************************************************************/ 1934 1975 1935 1976 #ifdef DECLARE_GLOBALS_HERE ··· 1990 1941 * sessions (and from that the tree connections) can be found 1991 1942 * by iterating over cifs_tcp_ses_list 1992 1943 */ 1993 - GLOBAL_EXTERN struct list_head cifs_tcp_ses_list; 1944 + extern struct list_head cifs_tcp_ses_list; 1994 1945 1995 1946 /* 1996 1947 * This lock protects the cifs_tcp_ses_list, the list of smb sessions per 1997 1948 * tcp session, and the list of tcon's per smb session. It also protects 1998 - * the reference counters for the server, smb session, and tcon. It also 1999 - * protects some fields in the TCP_Server_Info struct such as dstaddr. Finally, 2000 - * changes to the tcon->tidStatus should be done while holding this lock. 1949 + * the reference counters for the server, smb session, and tcon. 2001 1950 * generally the locks should be taken in order tcp_ses_lock before 2002 1951 * tcon->open_file_lock and that before file->file_info_lock since the 2003 1952 * structure order is cifs_socket-->cifs_ses-->cifs_tcon-->cifs_file 2004 1953 */ 2005 - GLOBAL_EXTERN spinlock_t cifs_tcp_ses_lock; 1954 + extern spinlock_t cifs_tcp_ses_lock; 2006 1955 2007 1956 /* 2008 1957 * Global transaction id (XID) information 2009 1958 */ 2010 - GLOBAL_EXTERN unsigned int GlobalCurrentXid; /* protected by GlobalMid_Sem */ 2011 - GLOBAL_EXTERN unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Sem */ 2012 - GLOBAL_EXTERN unsigned int GlobalMaxActiveXid; /* prot by GlobalMid_Sem */ 2013 - GLOBAL_EXTERN spinlock_t GlobalMid_Lock; /* protects above & list operations */ 2014 - /* on midQ entries */ 1959 + extern unsigned int GlobalCurrentXid; /* protected by GlobalMid_Sem */ 1960 + extern unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Sem */ 1961 + extern unsigned int GlobalMaxActiveXid; /* prot by GlobalMid_Sem */ 1962 + extern spinlock_t GlobalMid_Lock; /* protects above & list operations on midQ entries */ 1963 + 2015 1964 /* 2016 1965 * Global counters, updated atomically 2017 1966 */ 2018 - GLOBAL_EXTERN atomic_t sesInfoAllocCount; 2019 - GLOBAL_EXTERN atomic_t tconInfoAllocCount; 2020 - GLOBAL_EXTERN atomic_t tcpSesNextId; 2021 - GLOBAL_EXTERN atomic_t tcpSesAllocCount; 2022 - GLOBAL_EXTERN atomic_t tcpSesReconnectCount; 2023 - GLOBAL_EXTERN atomic_t tconInfoReconnectCount; 1967 + extern atomic_t sesInfoAllocCount; 1968 + extern atomic_t tconInfoAllocCount; 1969 + extern atomic_t tcpSesNextId; 1970 + extern atomic_t tcpSesAllocCount; 1971 + extern atomic_t tcpSesReconnectCount; 1972 + extern atomic_t tconInfoReconnectCount; 2024 1973 2025 1974 /* Various Debug counters */ 2026 - GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */ 1975 + extern atomic_t buf_alloc_count; /* current number allocated */ 1976 + extern atomic_t small_buf_alloc_count; 2027 1977 #ifdef CONFIG_CIFS_STATS2 2028 - GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */ 2029 - GLOBAL_EXTERN atomic_t totSmBufAllocCount; 1978 + extern atomic_t total_buf_alloc_count; /* total allocated over all time */ 1979 + extern atomic_t total_small_buf_alloc_count; 2030 1980 extern unsigned int slow_rsp_threshold; /* number of secs before logging */ 2031 1981 #endif 2032 - GLOBAL_EXTERN atomic_t smBufAllocCount; 2033 - GLOBAL_EXTERN atomic_t midCount; 2034 1982 2035 1983 /* Misc globals */ 2036 1984 extern bool enable_oplocks; /* enable or disable oplocks */ ··· 2044 1998 extern unsigned int cifs_min_small; /* min size of small buf pool */ 2045 1999 extern unsigned int cifs_max_pending; /* MAX requests at once to server*/ 2046 2000 extern bool disable_legacy_dialects; /* forbid vers=1.0 and vers=2.0 mounts */ 2001 + extern atomic_t mid_count; 2047 2002 2048 2003 void cifs_oplock_break(struct work_struct *work); 2049 2004 void cifs_queue_oplock_break(struct cifsFileInfo *cfile);
+4 -6
fs/cifs/cifsproto.h
··· 78 78 extern char *cifs_compose_mount_options(const char *sb_mountdata, 79 79 const char *fullpath, const struct dfs_info3_param *ref, 80 80 char **devname); 81 - /* extern void renew_parental_timestamps(struct dentry *direntry);*/ 82 - extern struct mid_q_entry *AllocMidQEntry(const struct smb_hdr *smb_buffer, 83 - struct TCP_Server_Info *server); 84 - extern void DeleteMidQEntry(struct mid_q_entry *midEntry); 85 - extern void cifs_delete_mid(struct mid_q_entry *mid); 86 - extern void cifs_mid_q_entry_release(struct mid_q_entry *midEntry); 81 + extern void delete_mid(struct mid_q_entry *mid); 82 + extern void release_mid(struct mid_q_entry *mid); 87 83 extern void cifs_wake_up_task(struct mid_q_entry *mid); 88 84 extern int cifs_handle_standard(struct TCP_Server_Info *server, 89 85 struct mid_q_entry *mid); ··· 517 521 extern int generate_smb311signingkey(struct cifs_ses *ses, 518 522 struct TCP_Server_Info *server); 519 523 524 + #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 520 525 extern int CIFSSMBCopy(unsigned int xid, 521 526 struct cifs_tcon *source_tcon, 522 527 const char *fromName, ··· 548 551 const struct nls_table *nls_codepage, int remap_special_chars); 549 552 extern int CIFSGetExtAttr(const unsigned int xid, struct cifs_tcon *tcon, 550 553 const int netfid, __u64 *pExtAttrBits, __u64 *pMask); 554 + #endif /* CIFS_ALLOW_INSECURE_LEGACY */ 551 555 extern void cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb); 552 556 extern bool couldbe_mf_symlink(const struct cifs_fattr *fattr); 553 557 extern int check_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
+14 -463
fs/cifs/cifssmb.c
··· 29 29 #include "cifsproto.h" 30 30 #include "cifs_unicode.h" 31 31 #include "cifs_debug.h" 32 - #include "smb2proto.h" 33 32 #include "fscache.h" 34 33 #include "smbdirect.h" 35 34 #ifdef CONFIG_CIFS_DFS_UPCALL ··· 61 62 #define CIFS_NUM_PROT 1 62 63 #endif /* CIFS_POSIX */ 63 64 64 - /* 65 - * Mark as invalid, all open files on tree connections since they 66 - * were closed when session to server was lost. 67 - */ 68 - void 69 - cifs_mark_open_files_invalid(struct cifs_tcon *tcon) 70 - { 71 - struct cifsFileInfo *open_file = NULL; 72 - struct list_head *tmp; 73 - struct list_head *tmp1; 74 - 75 - /* only send once per connect */ 76 - spin_lock(&cifs_tcp_ses_lock); 77 - if ((tcon->ses->ses_status != SES_GOOD) || (tcon->status != TID_NEED_RECON)) { 78 - spin_unlock(&cifs_tcp_ses_lock); 79 - return; 80 - } 81 - tcon->status = TID_IN_FILES_INVALIDATE; 82 - spin_unlock(&cifs_tcp_ses_lock); 83 - 84 - /* list all files open on tree connection and mark them invalid */ 85 - spin_lock(&tcon->open_file_lock); 86 - list_for_each_safe(tmp, tmp1, &tcon->openFileList) { 87 - open_file = list_entry(tmp, struct cifsFileInfo, tlist); 88 - open_file->invalidHandle = true; 89 - open_file->oplock_break_cancelled = true; 90 - } 91 - spin_unlock(&tcon->open_file_lock); 92 - 93 - mutex_lock(&tcon->crfid.fid_mutex); 94 - tcon->crfid.is_valid = false; 95 - /* cached handle is not valid, so SMB2_CLOSE won't be sent below */ 96 - close_cached_dir_lease_locked(&tcon->crfid); 97 - memset(tcon->crfid.fid, 0, sizeof(struct cifs_fid)); 98 - mutex_unlock(&tcon->crfid.fid_mutex); 99 - 100 - spin_lock(&cifs_tcp_ses_lock); 101 - if (tcon->status == TID_IN_FILES_INVALIDATE) 102 - tcon->status = TID_NEED_TCON; 103 - spin_unlock(&cifs_tcp_ses_lock); 104 - 105 - /* 106 - * BB Add call to invalidate_inodes(sb) for all superblocks mounted 107 - * to this tcon. 108 - */ 109 - } 110 65 111 66 /* reconnect the socket, tcon, and smb session if needed */ 112 67 static int ··· 87 134 * only tree disconnect, open, and write, (and ulogoff which does not 88 135 * have tcon) are allowed as we start force umount 89 136 */ 90 - spin_lock(&cifs_tcp_ses_lock); 137 + spin_lock(&tcon->tc_lock); 91 138 if (tcon->status == TID_EXITING) { 92 139 if (smb_command != SMB_COM_WRITE_ANDX && 93 140 smb_command != SMB_COM_OPEN_ANDX && 94 141 smb_command != SMB_COM_TREE_DISCONNECT) { 95 - spin_unlock(&cifs_tcp_ses_lock); 142 + spin_unlock(&tcon->tc_lock); 96 143 cifs_dbg(FYI, "can not send cmd %d while umounting\n", 97 144 smb_command); 98 145 return -ENODEV; 99 146 } 100 147 } 101 - spin_unlock(&cifs_tcp_ses_lock); 148 + spin_unlock(&tcon->tc_lock); 102 149 103 150 retries = server->nr_targets; 104 151 ··· 118 165 } 119 166 120 167 /* are we still trying to reconnect? */ 121 - spin_lock(&cifs_tcp_ses_lock); 168 + spin_lock(&server->srv_lock); 122 169 if (server->tcpStatus != CifsNeedReconnect) { 123 - spin_unlock(&cifs_tcp_ses_lock); 170 + spin_unlock(&server->srv_lock); 124 171 break; 125 172 } 126 - spin_unlock(&cifs_tcp_ses_lock); 173 + spin_unlock(&server->srv_lock); 127 174 128 175 if (retries && --retries) 129 176 continue; ··· 154 201 * and the server never sends an answer the socket will be closed 155 202 * and tcpStatus set to reconnect. 156 203 */ 157 - spin_lock(&cifs_tcp_ses_lock); 204 + spin_lock(&server->srv_lock); 158 205 if (server->tcpStatus == CifsNeedReconnect) { 159 - spin_unlock(&cifs_tcp_ses_lock); 206 + spin_unlock(&server->srv_lock); 160 207 rc = -EHOSTDOWN; 161 208 goto out; 162 209 } 163 - spin_unlock(&cifs_tcp_ses_lock); 210 + spin_unlock(&server->srv_lock); 164 211 165 212 /* 166 213 * need to prevent multiple threads trying to simultaneously ··· 410 457 return 0; 411 458 } 412 459 413 - int 414 - cifs_enable_signing(struct TCP_Server_Info *server, bool mnt_sign_required) 415 - { 416 - bool srv_sign_required = server->sec_mode & server->vals->signing_required; 417 - bool srv_sign_enabled = server->sec_mode & server->vals->signing_enabled; 418 - bool mnt_sign_enabled = global_secflags & CIFSSEC_MAY_SIGN; 419 - 420 - /* 421 - * Is signing required by mnt options? If not then check 422 - * global_secflags to see if it is there. 423 - */ 424 - if (!mnt_sign_required) 425 - mnt_sign_required = ((global_secflags & CIFSSEC_MUST_SIGN) == 426 - CIFSSEC_MUST_SIGN); 427 - 428 - /* 429 - * If signing is required then it's automatically enabled too, 430 - * otherwise, check to see if the secflags allow it. 431 - */ 432 - mnt_sign_enabled = mnt_sign_required ? mnt_sign_required : 433 - (global_secflags & CIFSSEC_MAY_SIGN); 434 - 435 - /* If server requires signing, does client allow it? */ 436 - if (srv_sign_required) { 437 - if (!mnt_sign_enabled) { 438 - cifs_dbg(VFS, "Server requires signing, but it's disabled in SecurityFlags!\n"); 439 - return -ENOTSUPP; 440 - } 441 - server->sign = true; 442 - } 443 - 444 - /* If client requires signing, does server allow it? */ 445 - if (mnt_sign_required) { 446 - if (!srv_sign_enabled) { 447 - cifs_dbg(VFS, "Server does not support signing!\n"); 448 - return -ENOTSUPP; 449 - } 450 - server->sign = true; 451 - } 452 - 453 - if (cifs_rdma_enabled(server) && server->sign) 454 - cifs_dbg(VFS, "Signing is enabled, and RDMA read/write will be disabled\n"); 455 - 456 - return 0; 457 - } 458 - 459 460 static bool 460 461 should_set_ext_sec_flag(enum securityEnum sectype) 461 462 { ··· 591 684 struct TCP_Server_Info *server = mid->callback_data; 592 685 struct cifs_credits credits = { .value = 1, .instance = 0 }; 593 686 594 - DeleteMidQEntry(mid); 687 + release_mid(mid); 595 688 add_credits(server, &credits, CIFS_ECHO_OP); 596 689 } 597 690 ··· 1286 1379 return rc; 1287 1380 } 1288 1381 1289 - /* 1290 - * Discard any remaining data in the current SMB. To do this, we borrow the 1291 - * current bigbuf. 1292 - */ 1293 - int 1294 - cifs_discard_remaining_data(struct TCP_Server_Info *server) 1295 - { 1296 - unsigned int rfclen = server->pdu_size; 1297 - int remaining = rfclen + server->vals->header_preamble_size - 1298 - server->total_read; 1299 - 1300 - while (remaining > 0) { 1301 - int length; 1302 - 1303 - length = cifs_discard_from_socket(server, 1304 - min_t(size_t, remaining, 1305 - CIFSMaxBufSize + MAX_HEADER_SIZE(server))); 1306 - if (length < 0) 1307 - return length; 1308 - server->total_read += length; 1309 - remaining -= length; 1310 - } 1311 - 1312 - return 0; 1313 - } 1314 - 1315 - static int 1316 - __cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid, 1317 - bool malformed) 1318 - { 1319 - int length; 1320 - 1321 - length = cifs_discard_remaining_data(server); 1322 - dequeue_mid(mid, malformed); 1323 - mid->resp_buf = server->smallbuf; 1324 - server->smallbuf = NULL; 1325 - return length; 1326 - } 1327 - 1328 - static int 1329 - cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid) 1330 - { 1331 - struct cifs_readdata *rdata = mid->callback_data; 1332 - 1333 - return __cifs_readv_discard(server, mid, rdata->result); 1334 - } 1335 - 1336 - int 1337 - cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid) 1338 - { 1339 - int length, len; 1340 - unsigned int data_offset, data_len; 1341 - struct cifs_readdata *rdata = mid->callback_data; 1342 - char *buf = server->smallbuf; 1343 - unsigned int buflen = server->pdu_size + 1344 - server->vals->header_preamble_size; 1345 - bool use_rdma_mr = false; 1346 - 1347 - cifs_dbg(FYI, "%s: mid=%llu offset=%llu bytes=%u\n", 1348 - __func__, mid->mid, rdata->offset, rdata->bytes); 1349 - 1350 - /* 1351 - * read the rest of READ_RSP header (sans Data array), or whatever we 1352 - * can if there's not enough data. At this point, we've read down to 1353 - * the Mid. 1354 - */ 1355 - len = min_t(unsigned int, buflen, server->vals->read_rsp_size) - 1356 - HEADER_SIZE(server) + 1; 1357 - 1358 - length = cifs_read_from_socket(server, 1359 - buf + HEADER_SIZE(server) - 1, len); 1360 - if (length < 0) 1361 - return length; 1362 - server->total_read += length; 1363 - 1364 - if (server->ops->is_session_expired && 1365 - server->ops->is_session_expired(buf)) { 1366 - cifs_reconnect(server, true); 1367 - return -1; 1368 - } 1369 - 1370 - if (server->ops->is_status_pending && 1371 - server->ops->is_status_pending(buf, server)) { 1372 - cifs_discard_remaining_data(server); 1373 - return -1; 1374 - } 1375 - 1376 - /* set up first two iov for signature check and to get credits */ 1377 - rdata->iov[0].iov_base = buf; 1378 - rdata->iov[0].iov_len = server->vals->header_preamble_size; 1379 - rdata->iov[1].iov_base = buf + server->vals->header_preamble_size; 1380 - rdata->iov[1].iov_len = 1381 - server->total_read - server->vals->header_preamble_size; 1382 - cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n", 1383 - rdata->iov[0].iov_base, rdata->iov[0].iov_len); 1384 - cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n", 1385 - rdata->iov[1].iov_base, rdata->iov[1].iov_len); 1386 - 1387 - /* Was the SMB read successful? */ 1388 - rdata->result = server->ops->map_error(buf, false); 1389 - if (rdata->result != 0) { 1390 - cifs_dbg(FYI, "%s: server returned error %d\n", 1391 - __func__, rdata->result); 1392 - /* normal error on read response */ 1393 - return __cifs_readv_discard(server, mid, false); 1394 - } 1395 - 1396 - /* Is there enough to get to the rest of the READ_RSP header? */ 1397 - if (server->total_read < server->vals->read_rsp_size) { 1398 - cifs_dbg(FYI, "%s: server returned short header. got=%u expected=%zu\n", 1399 - __func__, server->total_read, 1400 - server->vals->read_rsp_size); 1401 - rdata->result = -EIO; 1402 - return cifs_readv_discard(server, mid); 1403 - } 1404 - 1405 - data_offset = server->ops->read_data_offset(buf) + 1406 - server->vals->header_preamble_size; 1407 - if (data_offset < server->total_read) { 1408 - /* 1409 - * win2k8 sometimes sends an offset of 0 when the read 1410 - * is beyond the EOF. Treat it as if the data starts just after 1411 - * the header. 1412 - */ 1413 - cifs_dbg(FYI, "%s: data offset (%u) inside read response header\n", 1414 - __func__, data_offset); 1415 - data_offset = server->total_read; 1416 - } else if (data_offset > MAX_CIFS_SMALL_BUFFER_SIZE) { 1417 - /* data_offset is beyond the end of smallbuf */ 1418 - cifs_dbg(FYI, "%s: data offset (%u) beyond end of smallbuf\n", 1419 - __func__, data_offset); 1420 - rdata->result = -EIO; 1421 - return cifs_readv_discard(server, mid); 1422 - } 1423 - 1424 - cifs_dbg(FYI, "%s: total_read=%u data_offset=%u\n", 1425 - __func__, server->total_read, data_offset); 1426 - 1427 - len = data_offset - server->total_read; 1428 - if (len > 0) { 1429 - /* read any junk before data into the rest of smallbuf */ 1430 - length = cifs_read_from_socket(server, 1431 - buf + server->total_read, len); 1432 - if (length < 0) 1433 - return length; 1434 - server->total_read += length; 1435 - } 1436 - 1437 - /* how much data is in the response? */ 1438 - #ifdef CONFIG_CIFS_SMB_DIRECT 1439 - use_rdma_mr = rdata->mr; 1440 - #endif 1441 - data_len = server->ops->read_data_length(buf, use_rdma_mr); 1442 - if (!use_rdma_mr && (data_offset + data_len > buflen)) { 1443 - /* data_len is corrupt -- discard frame */ 1444 - rdata->result = -EIO; 1445 - return cifs_readv_discard(server, mid); 1446 - } 1447 - 1448 - length = rdata->read_into_pages(server, rdata, data_len); 1449 - if (length < 0) 1450 - return length; 1451 - 1452 - server->total_read += length; 1453 - 1454 - cifs_dbg(FYI, "total_read=%u buflen=%u remaining=%u\n", 1455 - server->total_read, buflen, data_len); 1456 - 1457 - /* discard anything left over */ 1458 - if (server->total_read < buflen) 1459 - return cifs_readv_discard(server, mid); 1460 - 1461 - dequeue_mid(mid, false); 1462 - mid->resp_buf = server->smallbuf; 1463 - server->smallbuf = NULL; 1464 - return length; 1465 - } 1466 - 1467 1382 static void 1468 1383 cifs_readv_callback(struct mid_q_entry *mid) 1469 1384 { ··· 1336 1607 } 1337 1608 1338 1609 queue_work(cifsiod_wq, &rdata->work); 1339 - DeleteMidQEntry(mid); 1610 + release_mid(mid); 1340 1611 add_credits(server, &credits, 0); 1341 1612 } 1342 1613 ··· 1638 1909 return rc; 1639 1910 } 1640 1911 1641 - void 1642 - cifs_writedata_release(struct kref *refcount) 1643 - { 1644 - struct cifs_writedata *wdata = container_of(refcount, 1645 - struct cifs_writedata, refcount); 1646 - #ifdef CONFIG_CIFS_SMB_DIRECT 1647 - if (wdata->mr) { 1648 - smbd_deregister_mr(wdata->mr); 1649 - wdata->mr = NULL; 1650 - } 1651 - #endif 1652 - 1653 - if (wdata->cfile) 1654 - cifsFileInfo_put(wdata->cfile); 1655 - 1656 - kvfree(wdata->pages); 1657 - kfree(wdata); 1658 - } 1659 - 1660 - /* 1661 - * Write failed with a retryable error. Resend the write request. It's also 1662 - * possible that the page was redirtied so re-clean the page. 1663 - */ 1664 - static void 1665 - cifs_writev_requeue(struct cifs_writedata *wdata) 1666 - { 1667 - int i, rc = 0; 1668 - struct inode *inode = d_inode(wdata->cfile->dentry); 1669 - struct TCP_Server_Info *server; 1670 - unsigned int rest_len; 1671 - 1672 - server = tlink_tcon(wdata->cfile->tlink)->ses->server; 1673 - i = 0; 1674 - rest_len = wdata->bytes; 1675 - do { 1676 - struct cifs_writedata *wdata2; 1677 - unsigned int j, nr_pages, wsize, tailsz, cur_len; 1678 - 1679 - wsize = server->ops->wp_retry_size(inode); 1680 - if (wsize < rest_len) { 1681 - nr_pages = wsize / PAGE_SIZE; 1682 - if (!nr_pages) { 1683 - rc = -ENOTSUPP; 1684 - break; 1685 - } 1686 - cur_len = nr_pages * PAGE_SIZE; 1687 - tailsz = PAGE_SIZE; 1688 - } else { 1689 - nr_pages = DIV_ROUND_UP(rest_len, PAGE_SIZE); 1690 - cur_len = rest_len; 1691 - tailsz = rest_len - (nr_pages - 1) * PAGE_SIZE; 1692 - } 1693 - 1694 - wdata2 = cifs_writedata_alloc(nr_pages, cifs_writev_complete); 1695 - if (!wdata2) { 1696 - rc = -ENOMEM; 1697 - break; 1698 - } 1699 - 1700 - for (j = 0; j < nr_pages; j++) { 1701 - wdata2->pages[j] = wdata->pages[i + j]; 1702 - lock_page(wdata2->pages[j]); 1703 - clear_page_dirty_for_io(wdata2->pages[j]); 1704 - } 1705 - 1706 - wdata2->sync_mode = wdata->sync_mode; 1707 - wdata2->nr_pages = nr_pages; 1708 - wdata2->offset = page_offset(wdata2->pages[0]); 1709 - wdata2->pagesz = PAGE_SIZE; 1710 - wdata2->tailsz = tailsz; 1711 - wdata2->bytes = cur_len; 1712 - 1713 - rc = cifs_get_writable_file(CIFS_I(inode), FIND_WR_ANY, 1714 - &wdata2->cfile); 1715 - if (!wdata2->cfile) { 1716 - cifs_dbg(VFS, "No writable handle to retry writepages rc=%d\n", 1717 - rc); 1718 - if (!is_retryable_error(rc)) 1719 - rc = -EBADF; 1720 - } else { 1721 - wdata2->pid = wdata2->cfile->pid; 1722 - rc = server->ops->async_writev(wdata2, 1723 - cifs_writedata_release); 1724 - } 1725 - 1726 - for (j = 0; j < nr_pages; j++) { 1727 - unlock_page(wdata2->pages[j]); 1728 - if (rc != 0 && !is_retryable_error(rc)) { 1729 - SetPageError(wdata2->pages[j]); 1730 - end_page_writeback(wdata2->pages[j]); 1731 - put_page(wdata2->pages[j]); 1732 - } 1733 - } 1734 - 1735 - kref_put(&wdata2->refcount, cifs_writedata_release); 1736 - if (rc) { 1737 - if (is_retryable_error(rc)) 1738 - continue; 1739 - i += nr_pages; 1740 - break; 1741 - } 1742 - 1743 - rest_len -= cur_len; 1744 - i += nr_pages; 1745 - } while (i < wdata->nr_pages); 1746 - 1747 - /* cleanup remaining pages from the original wdata */ 1748 - for (; i < wdata->nr_pages; i++) { 1749 - SetPageError(wdata->pages[i]); 1750 - end_page_writeback(wdata->pages[i]); 1751 - put_page(wdata->pages[i]); 1752 - } 1753 - 1754 - if (rc != 0 && !is_retryable_error(rc)) 1755 - mapping_set_error(inode->i_mapping, rc); 1756 - kref_put(&wdata->refcount, cifs_writedata_release); 1757 - } 1758 - 1759 - void 1760 - cifs_writev_complete(struct work_struct *work) 1761 - { 1762 - struct cifs_writedata *wdata = container_of(work, 1763 - struct cifs_writedata, work); 1764 - struct inode *inode = d_inode(wdata->cfile->dentry); 1765 - int i = 0; 1766 - 1767 - if (wdata->result == 0) { 1768 - spin_lock(&inode->i_lock); 1769 - cifs_update_eof(CIFS_I(inode), wdata->offset, wdata->bytes); 1770 - spin_unlock(&inode->i_lock); 1771 - cifs_stats_bytes_written(tlink_tcon(wdata->cfile->tlink), 1772 - wdata->bytes); 1773 - } else if (wdata->sync_mode == WB_SYNC_ALL && wdata->result == -EAGAIN) 1774 - return cifs_writev_requeue(wdata); 1775 - 1776 - for (i = 0; i < wdata->nr_pages; i++) { 1777 - struct page *page = wdata->pages[i]; 1778 - if (wdata->result == -EAGAIN) 1779 - __set_page_dirty_nobuffers(page); 1780 - else if (wdata->result < 0) 1781 - SetPageError(page); 1782 - end_page_writeback(page); 1783 - cifs_readpage_to_fscache(inode, page); 1784 - put_page(page); 1785 - } 1786 - if (wdata->result != -EAGAIN) 1787 - mapping_set_error(inode->i_mapping, wdata->result); 1788 - kref_put(&wdata->refcount, cifs_writedata_release); 1789 - } 1790 - 1791 - struct cifs_writedata * 1792 - cifs_writedata_alloc(unsigned int nr_pages, work_func_t complete) 1793 - { 1794 - struct page **pages = 1795 - kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS); 1796 - if (pages) 1797 - return cifs_writedata_direct_alloc(pages, complete); 1798 - 1799 - return NULL; 1800 - } 1801 - 1802 - struct cifs_writedata * 1803 - cifs_writedata_direct_alloc(struct page **pages, work_func_t complete) 1804 - { 1805 - struct cifs_writedata *wdata; 1806 - 1807 - wdata = kzalloc(sizeof(*wdata), GFP_NOFS); 1808 - if (wdata != NULL) { 1809 - wdata->pages = pages; 1810 - kref_init(&wdata->refcount); 1811 - INIT_LIST_HEAD(&wdata->list); 1812 - init_completion(&wdata->done); 1813 - INIT_WORK(&wdata->work, complete); 1814 - } 1815 - return wdata; 1816 - } 1817 - 1818 1912 /* 1819 1913 * Check the mid_state and signature on received buffer (if any), and queue the 1820 1914 * workqueue completion task. ··· 1684 2132 } 1685 2133 1686 2134 queue_work(cifsiod_wq, &wdata->work); 1687 - DeleteMidQEntry(mid); 2135 + release_mid(mid); 1688 2136 add_credits(tcon->ses->server, &credits, 0); 1689 2137 } 1690 2138 ··· 3212 3660 return rc; 3213 3661 } 3214 3662 3215 - /* BB fix tabs in this function FIXME BB */ 3216 3663 int 3217 3664 CIFSGetExtAttr(const unsigned int xid, struct cifs_tcon *tcon, 3218 3665 const int netfid, __u64 *pExtAttrBits, __u64 *pMask) ··· 3228 3677 3229 3678 GetExtAttrRetry: 3230 3679 rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB, 3231 - (void **) &pSMBr); 3680 + (void **) &pSMBr); 3232 3681 if (rc) 3233 3682 return rc; 3234 3683 ··· 3274 3723 __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset); 3275 3724 __u16 count = le16_to_cpu(pSMBr->t2.DataCount); 3276 3725 struct file_chattr_info *pfinfo; 3277 - /* BB Do we need a cast or hash here ? */ 3726 + 3278 3727 if (count != 16) { 3279 3728 cifs_dbg(FYI, "Invalid size ret in GetExtAttr\n"); 3280 3729 rc = -EIO;
+187 -112
fs/cifs/connect.c
··· 119 119 goto requeue_resolve; 120 120 } 121 121 122 - spin_lock(&cifs_tcp_ses_lock); 122 + spin_lock(&server->srv_lock); 123 123 rc = cifs_convert_address((struct sockaddr *)&server->dstaddr, ipaddr, 124 124 strlen(ipaddr)); 125 - spin_unlock(&cifs_tcp_ses_lock); 125 + spin_unlock(&server->srv_lock); 126 126 kfree(ipaddr); 127 127 128 128 /* rc == 1 means success here */ ··· 205 205 /* If server is a channel, select the primary channel */ 206 206 pserver = CIFS_SERVER_IS_CHAN(server) ? server->primary_server : server; 207 207 208 - spin_lock(&cifs_tcp_ses_lock); 208 + spin_lock(&pserver->srv_lock); 209 209 if (!all_channels) { 210 210 pserver->tcpStatus = CifsNeedReconnect; 211 - spin_unlock(&cifs_tcp_ses_lock); 211 + spin_unlock(&pserver->srv_lock); 212 212 return; 213 213 } 214 + spin_unlock(&pserver->srv_lock); 214 215 216 + spin_lock(&cifs_tcp_ses_lock); 215 217 list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) { 216 218 spin_lock(&ses->chan_lock); 217 - for (i = 0; i < ses->chan_count; i++) 219 + for (i = 0; i < ses->chan_count; i++) { 220 + spin_lock(&ses->chans[i].server->srv_lock); 218 221 ses->chans[i].server->tcpStatus = CifsNeedReconnect; 222 + spin_unlock(&ses->chans[i].server->srv_lock); 223 + } 219 224 spin_unlock(&ses->chan_lock); 220 225 } 221 226 spin_unlock(&cifs_tcp_ses_lock); ··· 257 252 spin_lock(&cifs_tcp_ses_lock); 258 253 list_for_each_entry_safe(ses, nses, &pserver->smb_ses_list, smb_ses_list) { 259 254 /* check if iface is still active */ 260 - if (!cifs_chan_is_iface_active(ses, server)) { 261 - /* 262 - * HACK: drop the lock before calling 263 - * cifs_chan_update_iface to avoid deadlock 264 - */ 265 - ses->ses_count++; 266 - spin_unlock(&cifs_tcp_ses_lock); 255 + if (!cifs_chan_is_iface_active(ses, server)) 267 256 cifs_chan_update_iface(ses, server); 268 - spin_lock(&cifs_tcp_ses_lock); 269 - ses->ses_count--; 270 - } 271 257 272 258 spin_lock(&ses->chan_lock); 273 259 if (!mark_smb_session && cifs_chan_needs_reconnect(ses, server)) ··· 319 323 /* mark submitted MIDs for retry and issue callback */ 320 324 INIT_LIST_HEAD(&retry_list); 321 325 cifs_dbg(FYI, "%s: moving mids to private list\n", __func__); 322 - spin_lock(&GlobalMid_Lock); 326 + spin_lock(&server->mid_lock); 323 327 list_for_each_entry_safe(mid, nmid, &server->pending_mid_q, qhead) { 324 328 kref_get(&mid->refcount); 325 329 if (mid->mid_state == MID_REQUEST_SUBMITTED) ··· 327 331 list_move(&mid->qhead, &retry_list); 328 332 mid->mid_flags |= MID_DELETED; 329 333 } 330 - spin_unlock(&GlobalMid_Lock); 334 + spin_unlock(&server->mid_lock); 331 335 cifs_server_unlock(server); 332 336 333 337 cifs_dbg(FYI, "%s: issuing mid callbacks\n", __func__); 334 338 list_for_each_entry_safe(mid, nmid, &retry_list, qhead) { 335 339 list_del_init(&mid->qhead); 336 340 mid->callback(mid); 337 - cifs_mid_q_entry_release(mid); 341 + release_mid(mid); 338 342 } 339 343 340 344 if (cifs_rdma_enabled(server)) { ··· 346 350 347 351 static bool cifs_tcp_ses_needs_reconnect(struct TCP_Server_Info *server, int num_targets) 348 352 { 349 - spin_lock(&cifs_tcp_ses_lock); 353 + spin_lock(&server->srv_lock); 350 354 server->nr_targets = num_targets; 351 355 if (server->tcpStatus == CifsExiting) { 352 356 /* the demux thread will exit normally next time through the loop */ 353 - spin_unlock(&cifs_tcp_ses_lock); 357 + spin_unlock(&server->srv_lock); 354 358 wake_up(&server->response_q); 355 359 return false; 356 360 } ··· 360 364 server->hostname); 361 365 server->tcpStatus = CifsNeedReconnect; 362 366 363 - spin_unlock(&cifs_tcp_ses_lock); 367 + spin_unlock(&server->srv_lock); 364 368 return true; 365 369 } 366 370 ··· 410 414 } else { 411 415 atomic_inc(&tcpSesReconnectCount); 412 416 set_credits(server, 1); 413 - spin_lock(&cifs_tcp_ses_lock); 417 + spin_lock(&server->srv_lock); 414 418 if (server->tcpStatus != CifsExiting) 415 419 server->tcpStatus = CifsNeedNegotiate; 416 - spin_unlock(&cifs_tcp_ses_lock); 420 + spin_unlock(&server->srv_lock); 417 421 cifs_swn_reset_server_dstaddr(server); 418 422 cifs_server_unlock(server); 419 423 mod_delayed_work(cifsiod_wq, &server->reconnect, 0); 420 424 } 421 425 } while (server->tcpStatus == CifsNeedReconnect); 422 426 423 - spin_lock(&cifs_tcp_ses_lock); 427 + spin_lock(&server->srv_lock); 424 428 if (server->tcpStatus == CifsNeedNegotiate) 425 429 mod_delayed_work(cifsiod_wq, &server->echo, 0); 426 - spin_unlock(&cifs_tcp_ses_lock); 430 + spin_unlock(&server->srv_lock); 427 431 428 432 wake_up(&server->response_q); 429 433 return rc; ··· 537 541 */ 538 542 atomic_inc(&tcpSesReconnectCount); 539 543 set_credits(server, 1); 540 - spin_lock(&cifs_tcp_ses_lock); 544 + spin_lock(&server->srv_lock); 541 545 if (server->tcpStatus != CifsExiting) 542 546 server->tcpStatus = CifsNeedNegotiate; 543 - spin_unlock(&cifs_tcp_ses_lock); 547 + spin_unlock(&server->srv_lock); 544 548 cifs_swn_reset_server_dstaddr(server); 545 549 cifs_server_unlock(server); 546 550 mod_delayed_work(cifsiod_wq, &server->reconnect, 0); ··· 552 556 dfs_cache_free_tgts(&tl); 553 557 554 558 /* Need to set up echo worker again once connection has been established */ 555 - spin_lock(&cifs_tcp_ses_lock); 559 + spin_lock(&server->srv_lock); 556 560 if (server->tcpStatus == CifsNeedNegotiate) 557 561 mod_delayed_work(cifsiod_wq, &server->echo, 0); 558 - 559 - spin_unlock(&cifs_tcp_ses_lock); 562 + spin_unlock(&server->srv_lock); 560 563 561 564 wake_up(&server->response_q); 562 565 return rc; ··· 564 569 int cifs_reconnect(struct TCP_Server_Info *server, bool mark_smb_session) 565 570 { 566 571 /* If tcp session is not an dfs connection, then reconnect to last target server */ 567 - spin_lock(&cifs_tcp_ses_lock); 572 + spin_lock(&server->srv_lock); 568 573 if (!server->is_dfs_conn) { 569 - spin_unlock(&cifs_tcp_ses_lock); 574 + spin_unlock(&server->srv_lock); 570 575 return __cifs_reconnect(server, mark_smb_session); 571 576 } 572 - spin_unlock(&cifs_tcp_ses_lock); 577 + spin_unlock(&server->srv_lock); 573 578 574 579 mutex_lock(&server->refpath_lock); 575 580 if (!server->origin_fullpath || !server->leaf_fullpath) { ··· 665 670 * 65s kernel_recvmsg times out, and we see that we haven't gotten 666 671 * a response in >60s. 667 672 */ 668 - spin_lock(&cifs_tcp_ses_lock); 673 + spin_lock(&server->srv_lock); 669 674 if ((server->tcpStatus == CifsGood || 670 675 server->tcpStatus == CifsNeedNegotiate) && 671 676 (!server->ops->can_echo || server->ops->can_echo(server)) && 672 677 time_after(jiffies, server->lstrp + 3 * server->echo_interval)) { 673 - spin_unlock(&cifs_tcp_ses_lock); 678 + spin_unlock(&server->srv_lock); 674 679 cifs_server_dbg(VFS, "has not responded in %lu seconds. Reconnecting...\n", 675 680 (3 * server->echo_interval) / HZ); 676 681 cifs_reconnect(server, false); 677 682 return true; 678 683 } 679 - spin_unlock(&cifs_tcp_ses_lock); 684 + spin_unlock(&server->srv_lock); 680 685 681 686 return false; 682 687 } ··· 721 726 else 722 727 length = sock_recvmsg(server->ssocket, smb_msg, 0); 723 728 724 - spin_lock(&cifs_tcp_ses_lock); 729 + spin_lock(&server->srv_lock); 725 730 if (server->tcpStatus == CifsExiting) { 726 - spin_unlock(&cifs_tcp_ses_lock); 731 + spin_unlock(&server->srv_lock); 727 732 return -ESHUTDOWN; 728 733 } 729 734 730 735 if (server->tcpStatus == CifsNeedReconnect) { 731 - spin_unlock(&cifs_tcp_ses_lock); 736 + spin_unlock(&server->srv_lock); 732 737 cifs_reconnect(server, false); 733 738 return -ECONNABORTED; 734 739 } 735 - spin_unlock(&cifs_tcp_ses_lock); 740 + spin_unlock(&server->srv_lock); 736 741 737 742 if (length == -ERESTARTSYS || 738 743 length == -EAGAIN || ··· 844 849 #ifdef CONFIG_CIFS_STATS2 845 850 mid->when_received = jiffies; 846 851 #endif 847 - spin_lock(&GlobalMid_Lock); 852 + spin_lock(&mid->server->mid_lock); 848 853 if (!malformed) 849 854 mid->mid_state = MID_RESPONSE_RECEIVED; 850 855 else ··· 854 859 * function has finished processing it is a bug. 855 860 */ 856 861 if (mid->mid_flags & MID_DELETED) { 857 - spin_unlock(&GlobalMid_Lock); 862 + spin_unlock(&mid->server->mid_lock); 858 863 pr_warn_once("trying to dequeue a deleted mid\n"); 859 864 } else { 860 865 list_del_init(&mid->qhead); 861 866 mid->mid_flags |= MID_DELETED; 862 - spin_unlock(&GlobalMid_Lock); 867 + spin_unlock(&mid->server->mid_lock); 863 868 } 864 869 } 865 870 ··· 898 903 dequeue_mid(mid, malformed); 899 904 } 900 905 906 + int 907 + cifs_enable_signing(struct TCP_Server_Info *server, bool mnt_sign_required) 908 + { 909 + bool srv_sign_required = server->sec_mode & server->vals->signing_required; 910 + bool srv_sign_enabled = server->sec_mode & server->vals->signing_enabled; 911 + bool mnt_sign_enabled; 912 + 913 + /* 914 + * Is signing required by mnt options? If not then check 915 + * global_secflags to see if it is there. 916 + */ 917 + if (!mnt_sign_required) 918 + mnt_sign_required = ((global_secflags & CIFSSEC_MUST_SIGN) == 919 + CIFSSEC_MUST_SIGN); 920 + 921 + /* 922 + * If signing is required then it's automatically enabled too, 923 + * otherwise, check to see if the secflags allow it. 924 + */ 925 + mnt_sign_enabled = mnt_sign_required ? mnt_sign_required : 926 + (global_secflags & CIFSSEC_MAY_SIGN); 927 + 928 + /* If server requires signing, does client allow it? */ 929 + if (srv_sign_required) { 930 + if (!mnt_sign_enabled) { 931 + cifs_dbg(VFS, "Server requires signing, but it's disabled in SecurityFlags!\n"); 932 + return -EOPNOTSUPP; 933 + } 934 + server->sign = true; 935 + } 936 + 937 + /* If client requires signing, does server allow it? */ 938 + if (mnt_sign_required) { 939 + if (!srv_sign_enabled) { 940 + cifs_dbg(VFS, "Server does not support signing!\n"); 941 + return -EOPNOTSUPP; 942 + } 943 + server->sign = true; 944 + } 945 + 946 + if (cifs_rdma_enabled(server) && server->sign) 947 + cifs_dbg(VFS, "Signing is enabled, and RDMA read/write will be disabled\n"); 948 + 949 + return 0; 950 + } 951 + 952 + 901 953 static void clean_demultiplex_info(struct TCP_Server_Info *server) 902 954 { 903 955 int length; 904 956 905 957 /* take it off the list, if it's not already */ 906 - spin_lock(&cifs_tcp_ses_lock); 958 + spin_lock(&server->srv_lock); 907 959 list_del_init(&server->tcp_ses_list); 908 - spin_unlock(&cifs_tcp_ses_lock); 960 + spin_unlock(&server->srv_lock); 909 961 910 962 cancel_delayed_work_sync(&server->echo); 911 963 cancel_delayed_work_sync(&server->resolve); 912 964 913 - spin_lock(&cifs_tcp_ses_lock); 965 + spin_lock(&server->srv_lock); 914 966 server->tcpStatus = CifsExiting; 915 - spin_unlock(&cifs_tcp_ses_lock); 967 + spin_unlock(&server->srv_lock); 916 968 wake_up_all(&server->response_q); 917 969 918 970 /* check if we have blocked requests that need to free */ ··· 990 948 struct list_head *tmp, *tmp2; 991 949 992 950 INIT_LIST_HEAD(&dispose_list); 993 - spin_lock(&GlobalMid_Lock); 951 + spin_lock(&server->mid_lock); 994 952 list_for_each_safe(tmp, tmp2, &server->pending_mid_q) { 995 953 mid_entry = list_entry(tmp, struct mid_q_entry, qhead); 996 954 cifs_dbg(FYI, "Clearing mid %llu\n", mid_entry->mid); ··· 999 957 list_move(&mid_entry->qhead, &dispose_list); 1000 958 mid_entry->mid_flags |= MID_DELETED; 1001 959 } 1002 - spin_unlock(&GlobalMid_Lock); 960 + spin_unlock(&server->mid_lock); 1003 961 1004 962 /* now walk dispose list and issue callbacks */ 1005 963 list_for_each_safe(tmp, tmp2, &dispose_list) { ··· 1007 965 cifs_dbg(FYI, "Callback mid %llu\n", mid_entry->mid); 1008 966 list_del_init(&mid_entry->qhead); 1009 967 mid_entry->callback(mid_entry); 1010 - cifs_mid_q_entry_release(mid_entry); 968 + release_mid(mid_entry); 1011 969 } 1012 970 /* 1/8th of sec is more than enough time for them to exit */ 1013 971 msleep(125); ··· 1081 1039 cifs_handle_standard(struct TCP_Server_Info *server, struct mid_q_entry *mid) 1082 1040 { 1083 1041 char *buf = server->large_buf ? server->bigbuf : server->smallbuf; 1084 - int length; 1042 + int rc; 1085 1043 1086 1044 /* 1087 1045 * We know that we received enough to get to the MID as we 1088 1046 * checked the pdu_length earlier. Now check to see 1089 - * if the rest of the header is OK. We borrow the length 1090 - * var for the rest of the loop to avoid a new stack var. 1047 + * if the rest of the header is OK. 1091 1048 * 1092 1049 * 48 bytes is enough to display the header and a little bit 1093 1050 * into the payload for debugging purposes. 1094 1051 */ 1095 - length = server->ops->check_message(buf, server->total_read, server); 1096 - if (length != 0) 1052 + rc = server->ops->check_message(buf, server->total_read, server); 1053 + if (rc) 1097 1054 cifs_dump_mem("Bad SMB: ", buf, 1098 1055 min_t(unsigned int, server->total_read, 48)); 1099 1056 ··· 1107 1066 return -1; 1108 1067 1109 1068 if (!mid) 1110 - return length; 1069 + return rc; 1111 1070 1112 - handle_mid(mid, server, buf, length); 1071 + handle_mid(mid, server, buf, rc); 1113 1072 return 0; 1114 1073 } 1115 1074 ··· 1246 1205 if (length < 0) { 1247 1206 for (i = 0; i < num_mids; i++) 1248 1207 if (mids[i]) 1249 - cifs_mid_q_entry_release(mids[i]); 1208 + release_mid(mids[i]); 1250 1209 continue; 1251 1210 } 1252 1211 ··· 1273 1232 if (!mids[i]->multiRsp || mids[i]->multiEnd) 1274 1233 mids[i]->callback(mids[i]); 1275 1234 1276 - cifs_mid_q_entry_release(mids[i]); 1235 + release_mid(mids[i]); 1277 1236 } else if (server->ops->is_oplock_break && 1278 1237 server->ops->is_oplock_break(bufs[i], 1279 1238 server)) { ··· 1281 1240 cifs_dbg(FYI, "Received oplock break\n"); 1282 1241 } else { 1283 1242 cifs_server_dbg(VFS, "No task to wake, unknown frame received! NumMids %d\n", 1284 - atomic_read(&midCount)); 1243 + atomic_read(&mid_count)); 1285 1244 cifs_dump_mem("Received Data is: ", bufs[i], 1286 1245 HEADER_SIZE(server)); 1287 1246 smb2_add_credits_from_hdr(bufs[i], server); ··· 1452 1411 return true; 1453 1412 } 1454 1413 1414 + /* this function must be called with srv_lock held */ 1455 1415 static int match_server(struct TCP_Server_Info *server, struct smb3_fs_context *ctx) 1456 1416 { 1457 1417 struct sockaddr *addr = (struct sockaddr *)&ctx->dstaddr; ··· 1513 1471 1514 1472 spin_lock(&cifs_tcp_ses_lock); 1515 1473 list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) { 1474 + spin_lock(&server->srv_lock); 1516 1475 #ifdef CONFIG_CIFS_DFS_UPCALL 1517 1476 /* 1518 1477 * DFS failover implementation in cifs_reconnect() requires unique tcp sessions for ··· 1521 1478 * shares or even links that may connect to same server but having completely 1522 1479 * different failover targets. 1523 1480 */ 1524 - if (server->is_dfs_conn) 1481 + if (server->is_dfs_conn) { 1482 + spin_unlock(&server->srv_lock); 1525 1483 continue; 1484 + } 1526 1485 #endif 1527 1486 /* 1528 1487 * Skip ses channels since they're only handled in lower layers 1529 1488 * (e.g. cifs_send_recv). 1530 1489 */ 1531 - if (CIFS_SERVER_IS_CHAN(server) || !match_server(server, ctx)) 1490 + if (CIFS_SERVER_IS_CHAN(server) || !match_server(server, ctx)) { 1491 + spin_unlock(&server->srv_lock); 1532 1492 continue; 1493 + } 1494 + spin_unlock(&server->srv_lock); 1533 1495 1534 1496 ++server->srv_count; 1535 1497 spin_unlock(&cifs_tcp_ses_lock); ··· 1582 1534 else 1583 1535 cancel_delayed_work_sync(&server->reconnect); 1584 1536 1585 - spin_lock(&cifs_tcp_ses_lock); 1537 + spin_lock(&server->srv_lock); 1586 1538 server->tcpStatus = CifsExiting; 1587 - spin_unlock(&cifs_tcp_ses_lock); 1539 + spin_unlock(&server->srv_lock); 1588 1540 1589 1541 cifs_crypto_secmech_release(server); 1590 1542 ··· 1643 1595 if (primary_server) { 1644 1596 spin_lock(&cifs_tcp_ses_lock); 1645 1597 ++primary_server->srv_count; 1646 - tcp_ses->primary_server = primary_server; 1647 1598 spin_unlock(&cifs_tcp_ses_lock); 1599 + tcp_ses->primary_server = primary_server; 1648 1600 } 1649 1601 init_waitqueue_head(&tcp_ses->response_q); 1650 1602 init_waitqueue_head(&tcp_ses->request_q); ··· 1660 1612 tcp_ses->lstrp = jiffies; 1661 1613 tcp_ses->compress_algorithm = cpu_to_le16(ctx->compression); 1662 1614 spin_lock_init(&tcp_ses->req_lock); 1615 + spin_lock_init(&tcp_ses->srv_lock); 1616 + spin_lock_init(&tcp_ses->mid_lock); 1663 1617 INIT_LIST_HEAD(&tcp_ses->tcp_ses_list); 1664 1618 INIT_LIST_HEAD(&tcp_ses->smb_ses_list); 1665 1619 INIT_DELAYED_WORK(&tcp_ses->echo, cifs_echo_request); ··· 1735 1685 * to the struct since the kernel thread not created yet 1736 1686 * no need to spinlock this update of tcpStatus 1737 1687 */ 1738 - spin_lock(&cifs_tcp_ses_lock); 1688 + spin_lock(&tcp_ses->srv_lock); 1739 1689 tcp_ses->tcpStatus = CifsNeedNegotiate; 1740 - spin_unlock(&cifs_tcp_ses_lock); 1690 + spin_unlock(&tcp_ses->srv_lock); 1741 1691 1742 1692 if ((ctx->max_credits < 20) || (ctx->max_credits > 60000)) 1743 1693 tcp_ses->max_credits = SMB2_MAX_CREDITS_AVAILABLE; ··· 1779 1729 return ERR_PTR(rc); 1780 1730 } 1781 1731 1732 + /* this function must be called with ses_lock held */ 1782 1733 static int match_session(struct cifs_ses *ses, struct smb3_fs_context *ctx) 1783 1734 { 1784 1735 if (ctx->sectype != Unspecified && ··· 1915 1864 1916 1865 spin_lock(&cifs_tcp_ses_lock); 1917 1866 list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { 1918 - if (ses->ses_status == SES_EXITING) 1867 + spin_lock(&ses->ses_lock); 1868 + if (ses->ses_status == SES_EXITING) { 1869 + spin_unlock(&ses->ses_lock); 1919 1870 continue; 1920 - if (!match_session(ses, ctx)) 1871 + } 1872 + if (!match_session(ses, ctx)) { 1873 + spin_unlock(&ses->ses_lock); 1921 1874 continue; 1875 + } 1876 + spin_unlock(&ses->ses_lock); 1877 + 1922 1878 ++ses->ses_count; 1923 1879 spin_unlock(&cifs_tcp_ses_lock); 1924 1880 return ses; ··· 1940 1882 unsigned int chan_count; 1941 1883 struct TCP_Server_Info *server = ses->server; 1942 1884 1943 - spin_lock(&cifs_tcp_ses_lock); 1885 + spin_lock(&ses->ses_lock); 1944 1886 if (ses->ses_status == SES_EXITING) { 1945 - spin_unlock(&cifs_tcp_ses_lock); 1887 + spin_unlock(&ses->ses_lock); 1946 1888 return; 1947 1889 } 1890 + spin_unlock(&ses->ses_lock); 1948 1891 1949 1892 cifs_dbg(FYI, "%s: ses_count=%d\n", __func__, ses->ses_count); 1950 1893 cifs_dbg(FYI, "%s: ses ipc: %s\n", __func__, ses->tcon_ipc ? ses->tcon_ipc->treeName : "NONE"); 1951 1894 1895 + spin_lock(&cifs_tcp_ses_lock); 1952 1896 if (--ses->ses_count > 0) { 1953 1897 spin_unlock(&cifs_tcp_ses_lock); 1954 1898 return; 1955 1899 } 1900 + spin_unlock(&cifs_tcp_ses_lock); 1956 1901 1957 1902 /* ses_count can never go negative */ 1958 1903 WARN_ON(ses->ses_count < 0); 1959 1904 1960 1905 if (ses->ses_status == SES_GOOD) 1961 1906 ses->ses_status = SES_EXITING; 1962 - spin_unlock(&cifs_tcp_ses_lock); 1963 1907 1964 1908 cifs_free_ipc(ses); 1965 1909 ··· 2296 2236 return ERR_PTR(rc); 2297 2237 } 2298 2238 2239 + /* this function must be called with tc_lock held */ 2299 2240 static int match_tcon(struct cifs_tcon *tcon, struct smb3_fs_context *ctx) 2300 2241 { 2301 2242 if (tcon->status == TID_EXITING) ··· 2319 2258 static struct cifs_tcon * 2320 2259 cifs_find_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx) 2321 2260 { 2322 - struct list_head *tmp; 2323 2261 struct cifs_tcon *tcon; 2324 2262 2325 2263 spin_lock(&cifs_tcp_ses_lock); 2326 - list_for_each(tmp, &ses->tcon_list) { 2327 - tcon = list_entry(tmp, struct cifs_tcon, tcon_list); 2328 - 2329 - if (!match_tcon(tcon, ctx)) 2264 + list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { 2265 + spin_lock(&tcon->tc_lock); 2266 + if (!match_tcon(tcon, ctx)) { 2267 + spin_unlock(&tcon->tc_lock); 2330 2268 continue; 2269 + } 2331 2270 ++tcon->tc_count; 2271 + spin_unlock(&tcon->tc_lock); 2332 2272 spin_unlock(&cifs_tcp_ses_lock); 2333 2273 return tcon; 2334 2274 } ··· 2706 2644 int 2707 2645 cifs_match_super(struct super_block *sb, void *data) 2708 2646 { 2709 - struct cifs_mnt_data *mnt_data = (struct cifs_mnt_data *)data; 2647 + struct cifs_mnt_data *mnt_data = data; 2710 2648 struct smb3_fs_context *ctx; 2711 2649 struct cifs_sb_info *cifs_sb; 2712 2650 struct TCP_Server_Info *tcp_srv; ··· 2729 2667 2730 2668 ctx = mnt_data->ctx; 2731 2669 2670 + spin_lock(&tcp_srv->srv_lock); 2671 + spin_lock(&ses->ses_lock); 2672 + spin_lock(&tcon->tc_lock); 2732 2673 if (!match_server(tcp_srv, ctx) || 2733 2674 !match_session(ses, ctx) || 2734 2675 !match_tcon(tcon, ctx) || ··· 2742 2677 2743 2678 rc = compare_mount_options(sb, mnt_data); 2744 2679 out: 2680 + spin_unlock(&tcon->tc_lock); 2681 + spin_unlock(&ses->ses_lock); 2682 + spin_unlock(&tcp_srv->srv_lock); 2683 + 2745 2684 spin_unlock(&cifs_tcp_ses_lock); 2746 2685 cifs_put_tlink(tlink); 2747 2686 return rc; ··· 3023 2954 return generic_ip_connect(server); 3024 2955 } 3025 2956 2957 + #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 3026 2958 void reset_cifs_unix_caps(unsigned int xid, struct cifs_tcon *tcon, 3027 2959 struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx) 3028 2960 { ··· 3129 3059 } 3130 3060 } 3131 3061 } 3062 + #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 3132 3063 3133 3064 int cifs_setup_cifs_sb(struct cifs_sb_info *cifs_sb) 3134 3065 { ··· 3246 3175 if (tcon->posix_extensions) 3247 3176 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_POSIX_PATHS; 3248 3177 3178 + #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 3249 3179 /* tell server which Unix caps we support */ 3250 3180 if (cap_unix(tcon->ses)) { 3251 3181 /* ··· 3254 3182 * for just this mount. 3255 3183 */ 3256 3184 reset_cifs_unix_caps(xid, tcon, cifs_sb, ctx); 3257 - spin_lock(&cifs_tcp_ses_lock); 3185 + spin_lock(&tcon->ses->server->srv_lock); 3258 3186 if ((tcon->ses->server->tcpStatus == CifsNeedReconnect) && 3259 3187 (le64_to_cpu(tcon->fsUnixInfo.Capability) & 3260 3188 CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)) { 3261 - spin_unlock(&cifs_tcp_ses_lock); 3189 + spin_unlock(&tcon->ses->server->srv_lock); 3262 3190 rc = -EACCES; 3263 3191 goto out; 3264 3192 } 3265 - spin_unlock(&cifs_tcp_ses_lock); 3193 + spin_unlock(&tcon->ses->server->srv_lock); 3266 3194 } else 3195 + #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 3267 3196 tcon->unix_ext = 0; /* server does not support them */ 3268 3197 3269 3198 /* do not care if a following call succeed - informational */ ··· 3346 3273 rc = mount_get_conns(mnt_ctx); 3347 3274 if (mnt_ctx->server) { 3348 3275 cifs_dbg(FYI, "%s: marking tcp session as a dfs connection\n", __func__); 3349 - spin_lock(&cifs_tcp_ses_lock); 3276 + spin_lock(&mnt_ctx->server->srv_lock); 3350 3277 mnt_ctx->server->is_dfs_conn = true; 3351 - spin_unlock(&cifs_tcp_ses_lock); 3278 + spin_unlock(&mnt_ctx->server->srv_lock); 3352 3279 } 3353 3280 return rc; 3354 3281 } ··· 4063 3990 return -ENOSYS; 4064 3991 4065 3992 /* only send once per connect */ 4066 - spin_lock(&cifs_tcp_ses_lock); 3993 + spin_lock(&server->srv_lock); 4067 3994 if (!server->ops->need_neg(server) || 4068 3995 server->tcpStatus != CifsNeedNegotiate) { 4069 - spin_unlock(&cifs_tcp_ses_lock); 3996 + spin_unlock(&server->srv_lock); 4070 3997 return 0; 4071 3998 } 4072 3999 server->tcpStatus = CifsInNegotiate; 4073 - spin_unlock(&cifs_tcp_ses_lock); 4000 + spin_unlock(&server->srv_lock); 4074 4001 4075 4002 rc = server->ops->negotiate(xid, ses, server); 4076 4003 if (rc == 0) { 4077 - spin_lock(&cifs_tcp_ses_lock); 4004 + spin_lock(&server->srv_lock); 4078 4005 if (server->tcpStatus == CifsInNegotiate) 4079 4006 server->tcpStatus = CifsGood; 4080 4007 else 4081 4008 rc = -EHOSTDOWN; 4082 - spin_unlock(&cifs_tcp_ses_lock); 4009 + spin_unlock(&server->srv_lock); 4083 4010 } else { 4084 - spin_lock(&cifs_tcp_ses_lock); 4011 + spin_lock(&server->srv_lock); 4085 4012 if (server->tcpStatus == CifsInNegotiate) 4086 4013 server->tcpStatus = CifsNeedNegotiate; 4087 - spin_unlock(&cifs_tcp_ses_lock); 4014 + spin_unlock(&server->srv_lock); 4088 4015 } 4089 4016 4090 4017 return rc; ··· 4100 4027 struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr; 4101 4028 bool is_binding = false; 4102 4029 4103 - spin_lock(&cifs_tcp_ses_lock); 4030 + spin_lock(&ses->ses_lock); 4104 4031 if (server->dstaddr.ss_family == AF_INET6) 4105 4032 scnprintf(ses->ip_addr, sizeof(ses->ip_addr), "%pI6", &addr6->sin6_addr); 4106 4033 else ··· 4109 4036 if (ses->ses_status != SES_GOOD && 4110 4037 ses->ses_status != SES_NEW && 4111 4038 ses->ses_status != SES_NEED_RECON) { 4112 - spin_unlock(&cifs_tcp_ses_lock); 4039 + spin_unlock(&ses->ses_lock); 4113 4040 return 0; 4114 4041 } 4115 4042 ··· 4118 4045 if (CIFS_ALL_CHANS_GOOD(ses) || 4119 4046 cifs_chan_in_reconnect(ses, server)) { 4120 4047 spin_unlock(&ses->chan_lock); 4121 - spin_unlock(&cifs_tcp_ses_lock); 4048 + spin_unlock(&ses->ses_lock); 4122 4049 return 0; 4123 4050 } 4124 4051 is_binding = !CIFS_ALL_CHANS_NEED_RECONNECT(ses); ··· 4127 4054 4128 4055 if (!is_binding) 4129 4056 ses->ses_status = SES_IN_SETUP; 4130 - spin_unlock(&cifs_tcp_ses_lock); 4057 + spin_unlock(&ses->ses_lock); 4131 4058 4132 4059 if (!is_binding) { 4133 4060 ses->capabilities = server->capabilities; ··· 4151 4078 4152 4079 if (rc) { 4153 4080 cifs_server_dbg(VFS, "Send error in SessSetup = %d\n", rc); 4154 - spin_lock(&cifs_tcp_ses_lock); 4081 + spin_lock(&ses->ses_lock); 4155 4082 if (ses->ses_status == SES_IN_SETUP) 4156 4083 ses->ses_status = SES_NEED_RECON; 4157 4084 spin_lock(&ses->chan_lock); 4158 4085 cifs_chan_clear_in_reconnect(ses, server); 4159 4086 spin_unlock(&ses->chan_lock); 4160 - spin_unlock(&cifs_tcp_ses_lock); 4087 + spin_unlock(&ses->ses_lock); 4161 4088 } else { 4162 - spin_lock(&cifs_tcp_ses_lock); 4089 + spin_lock(&ses->ses_lock); 4163 4090 if (ses->ses_status == SES_IN_SETUP) 4164 4091 ses->ses_status = SES_GOOD; 4165 4092 spin_lock(&ses->chan_lock); 4166 4093 cifs_chan_clear_in_reconnect(ses, server); 4167 4094 cifs_chan_clear_need_reconnect(ses, server); 4168 4095 spin_unlock(&ses->chan_lock); 4169 - spin_unlock(&cifs_tcp_ses_lock); 4096 + spin_unlock(&ses->ses_lock); 4170 4097 } 4171 4098 4172 4099 return rc; ··· 4240 4167 goto out; 4241 4168 } 4242 4169 4170 + #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 4243 4171 if (cap_unix(ses)) 4244 4172 reset_cifs_unix_caps(0, tcon, NULL, ctx); 4173 + #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 4245 4174 4246 4175 out: 4247 4176 kfree(ctx->username); ··· 4632 4557 struct dfs_info3_param ref = {0}; 4633 4558 4634 4559 /* only send once per connect */ 4635 - spin_lock(&cifs_tcp_ses_lock); 4560 + spin_lock(&tcon->tc_lock); 4636 4561 if (tcon->ses->ses_status != SES_GOOD || 4637 4562 (tcon->status != TID_NEW && 4638 4563 tcon->status != TID_NEED_TCON)) { 4639 - spin_unlock(&cifs_tcp_ses_lock); 4564 + spin_unlock(&tcon->tc_lock); 4640 4565 return 0; 4641 4566 } 4642 4567 tcon->status = TID_IN_TCON; 4643 - spin_unlock(&cifs_tcp_ses_lock); 4568 + spin_unlock(&tcon->tc_lock); 4644 4569 4645 4570 tree = kzalloc(MAX_TREE_SIZE, GFP_KERNEL); 4646 4571 if (!tree) { ··· 4679 4604 cifs_put_tcp_super(sb); 4680 4605 4681 4606 if (rc) { 4682 - spin_lock(&cifs_tcp_ses_lock); 4607 + spin_lock(&tcon->tc_lock); 4683 4608 if (tcon->status == TID_IN_TCON) 4684 4609 tcon->status = TID_NEED_TCON; 4685 - spin_unlock(&cifs_tcp_ses_lock); 4610 + spin_unlock(&tcon->tc_lock); 4686 4611 } else { 4687 - spin_lock(&cifs_tcp_ses_lock); 4612 + spin_lock(&tcon->tc_lock); 4688 4613 if (tcon->status == TID_IN_TCON) 4689 4614 tcon->status = TID_GOOD; 4690 - spin_unlock(&cifs_tcp_ses_lock); 4615 + spin_unlock(&tcon->tc_lock); 4691 4616 tcon->need_reconnect = false; 4692 4617 } 4693 4618 ··· 4700 4625 const struct smb_version_operations *ops = tcon->ses->server->ops; 4701 4626 4702 4627 /* only send once per connect */ 4703 - spin_lock(&cifs_tcp_ses_lock); 4628 + spin_lock(&tcon->tc_lock); 4704 4629 if (tcon->ses->ses_status != SES_GOOD || 4705 4630 (tcon->status != TID_NEW && 4706 4631 tcon->status != TID_NEED_TCON)) { 4707 - spin_unlock(&cifs_tcp_ses_lock); 4632 + spin_unlock(&tcon->tc_lock); 4708 4633 return 0; 4709 4634 } 4710 4635 tcon->status = TID_IN_TCON; 4711 - spin_unlock(&cifs_tcp_ses_lock); 4636 + spin_unlock(&tcon->tc_lock); 4712 4637 4713 4638 rc = ops->tree_connect(xid, tcon->ses, tcon->treeName, tcon, nlsc); 4714 4639 if (rc) { 4715 - spin_lock(&cifs_tcp_ses_lock); 4640 + spin_lock(&tcon->tc_lock); 4716 4641 if (tcon->status == TID_IN_TCON) 4717 4642 tcon->status = TID_NEED_TCON; 4718 - spin_unlock(&cifs_tcp_ses_lock); 4643 + spin_unlock(&tcon->tc_lock); 4719 4644 } else { 4720 - spin_lock(&cifs_tcp_ses_lock); 4645 + spin_lock(&tcon->tc_lock); 4721 4646 if (tcon->status == TID_IN_TCON) 4722 4647 tcon->status = TID_GOOD; 4723 - spin_unlock(&cifs_tcp_ses_lock); 4724 4648 tcon->need_reconnect = false; 4649 + spin_unlock(&tcon->tc_lock); 4725 4650 } 4726 4651 4727 4652 return rc;
+7 -1
fs/cifs/dfs_cache.c
··· 1526 1526 1527 1527 spin_lock(&cifs_tcp_ses_lock); 1528 1528 list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) { 1529 - if (!server->is_dfs_conn) 1529 + spin_lock(&server->srv_lock); 1530 + if (!server->is_dfs_conn) { 1531 + spin_unlock(&server->srv_lock); 1530 1532 continue; 1533 + } 1534 + spin_unlock(&server->srv_lock); 1531 1535 1532 1536 list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { 1533 1537 list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { 1538 + spin_lock(&tcon->tc_lock); 1534 1539 if (!tcon->ipc && !tcon->need_reconnect) { 1535 1540 tcon->tc_count++; 1536 1541 list_add_tail(&tcon->ulist, &tcons); 1537 1542 } 1543 + spin_unlock(&tcon->tc_lock); 1538 1544 } 1539 1545 } 1540 1546 }
+8
fs/cifs/dir.c
··· 193 193 return PTR_ERR(full_path); 194 194 } 195 195 196 + #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 196 197 if (tcon->unix_ext && cap_unix(tcon->ses) && !tcon->broken_posix_open && 197 198 (CIFS_UNIX_POSIX_PATH_OPS_CAP & 198 199 le64_to_cpu(tcon->fsUnixInfo.Capability))) { ··· 262 261 * rare for path not covered on files) 263 262 */ 264 263 } 264 + #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 265 265 266 266 desired_access = 0; 267 267 if (OPEN_FMODE(oflags) & FMODE_READ) ··· 318 316 goto out; 319 317 } 320 318 319 + #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 321 320 /* 322 321 * If Open reported that we actually created a file then we now have to 323 322 * set the mode if possible. ··· 360 357 rc = cifs_get_inode_info_unix(&newinode, full_path, inode->i_sb, 361 358 xid); 362 359 else { 360 + #else 361 + { 362 + #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 363 363 /* TODO: Add support for calling POSIX query info here, but passing in fid */ 364 364 rc = cifs_get_inode_info(&newinode, full_path, buf, inode->i_sb, 365 365 xid, fid); ··· 383 377 } 384 378 } 385 379 380 + #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 386 381 cifs_create_set_dentry: 382 + #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 387 383 if (rc != 0) { 388 384 cifs_dbg(FYI, "Create worked, get_inode_info failed rc = %d\n", 389 385 rc);
+269 -14
fs/cifs/file.c
··· 26 26 #include "cifspdu.h" 27 27 #include "cifsglob.h" 28 28 #include "cifsproto.h" 29 + #include "smb2proto.h" 29 30 #include "cifs_unicode.h" 30 31 #include "cifs_debug.h" 31 32 #include "cifs_fs_sb.h" ··· 34 33 #include "smbdirect.h" 35 34 #include "fs_context.h" 36 35 #include "cifs_ioctl.h" 36 + 37 + /* 38 + * Mark as invalid, all open files on tree connections since they 39 + * were closed when session to server was lost. 40 + */ 41 + void 42 + cifs_mark_open_files_invalid(struct cifs_tcon *tcon) 43 + { 44 + struct cifsFileInfo *open_file = NULL; 45 + struct list_head *tmp; 46 + struct list_head *tmp1; 47 + 48 + /* only send once per connect */ 49 + spin_lock(&tcon->ses->ses_lock); 50 + if ((tcon->ses->ses_status != SES_GOOD) || (tcon->status != TID_NEED_RECON)) { 51 + spin_unlock(&tcon->ses->ses_lock); 52 + return; 53 + } 54 + tcon->status = TID_IN_FILES_INVALIDATE; 55 + spin_unlock(&tcon->ses->ses_lock); 56 + 57 + /* list all files open on tree connection and mark them invalid */ 58 + spin_lock(&tcon->open_file_lock); 59 + list_for_each_safe(tmp, tmp1, &tcon->openFileList) { 60 + open_file = list_entry(tmp, struct cifsFileInfo, tlist); 61 + open_file->invalidHandle = true; 62 + open_file->oplock_break_cancelled = true; 63 + } 64 + spin_unlock(&tcon->open_file_lock); 65 + 66 + mutex_lock(&tcon->crfid.fid_mutex); 67 + tcon->crfid.is_valid = false; 68 + /* cached handle is not valid, so SMB2_CLOSE won't be sent below */ 69 + close_cached_dir_lease_locked(&tcon->crfid); 70 + memset(tcon->crfid.fid, 0, sizeof(struct cifs_fid)); 71 + mutex_unlock(&tcon->crfid.fid_mutex); 72 + 73 + spin_lock(&tcon->tc_lock); 74 + if (tcon->status == TID_IN_FILES_INVALIDATE) 75 + tcon->status = TID_NEED_TCON; 76 + spin_unlock(&tcon->tc_lock); 77 + 78 + /* 79 + * BB Add call to invalidate_inodes(sb) for all superblocks mounted 80 + * to this tcon. 81 + */ 82 + } 37 83 38 84 static inline int cifs_convert_flags(unsigned int flags) 39 85 { ··· 100 52 FILE_READ_DATA); 101 53 } 102 54 55 + #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 103 56 static u32 cifs_posix_convert_flags(unsigned int flags) 104 57 { 105 58 u32 posix_flags = 0; ··· 134 85 135 86 return posix_flags; 136 87 } 88 + #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 137 89 138 90 static inline int cifs_get_disposition(unsigned int flags) 139 91 { ··· 150 100 return FILE_OPEN; 151 101 } 152 102 103 + #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 153 104 int cifs_posix_open(const char *full_path, struct inode **pinode, 154 105 struct super_block *sb, int mode, unsigned int f_flags, 155 106 __u32 *poplock, __u16 *pnetfid, unsigned int xid) ··· 212 161 kfree(presp_data); 213 162 return rc; 214 163 } 164 + #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 215 165 216 166 static int 217 167 cifs_nt_open(const char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb, ··· 631 579 else 632 580 oplock = 0; 633 581 582 + #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 634 583 if (!tcon->broken_posix_open && tcon->unix_ext && 635 584 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP & 636 585 le64_to_cpu(tcon->fsUnixInfo.Capability))) { ··· 656 603 * or DFS errors. 657 604 */ 658 605 } 606 + #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 659 607 660 608 if (server->ops->get_lease_key) 661 609 server->ops->get_lease_key(inode, &fid); ··· 684 630 goto out; 685 631 } 686 632 633 + #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 687 634 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) { 688 635 /* 689 636 * Time to set mode which we can not set earlier due to ··· 702 647 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid, 703 648 cfile->pid); 704 649 } 650 + #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 705 651 706 652 use_cache: 707 653 fscache_use_cookie(cifs_inode_cookie(file_inode(file)), ··· 720 664 return rc; 721 665 } 722 666 667 + #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 723 668 static int cifs_push_posix_locks(struct cifsFileInfo *cfile); 669 + #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 724 670 725 671 /* 726 672 * Try to reacquire byte range locks that were released when session ··· 731 673 static int 732 674 cifs_relock_file(struct cifsFileInfo *cfile) 733 675 { 734 - struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb); 735 676 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry)); 736 677 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); 737 678 int rc = 0; 679 + #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 680 + struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb); 681 + #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 738 682 739 683 down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING); 740 684 if (cinode->can_cache_brlcks) { ··· 745 685 return rc; 746 686 } 747 687 688 + #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 748 689 if (cap_unix(tcon->ses) && 749 690 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) && 750 691 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) 751 692 rc = cifs_push_posix_locks(cfile); 752 693 else 694 + #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 753 695 rc = tcon->ses->server->ops->push_mand_locks(cfile); 754 696 755 697 up_read(&cinode->lock_sem); ··· 812 750 else 813 751 oplock = 0; 814 752 753 + #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 815 754 if (tcon->unix_ext && cap_unix(tcon->ses) && 816 755 (CIFS_UNIX_POSIX_PATH_OPS_CAP & 817 756 le64_to_cpu(tcon->fsUnixInfo.Capability))) { ··· 836 773 * in the reconnect path it is important to retry hard 837 774 */ 838 775 } 776 + #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 839 777 840 778 desired_access = cifs_convert_flags(cfile->f_flags); 841 779 ··· 881 817 goto reopen_error_exit; 882 818 } 883 819 820 + #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 884 821 reopen_success: 822 + #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 885 823 cfile->invalidHandle = false; 886 824 mutex_unlock(&cfile->fh_mutex); 887 825 cinode = CIFS_I(inode); ··· 994 928 void 995 929 cifs_reopen_persistent_handles(struct cifs_tcon *tcon) 996 930 { 997 - struct cifsFileInfo *open_file; 998 - struct list_head *tmp; 999 - struct list_head *tmp1; 931 + struct cifsFileInfo *open_file, *tmp; 1000 932 struct list_head tmp_list; 1001 933 1002 934 if (!tcon->use_persistent || !tcon->need_reopen_files) ··· 1007 943 1008 944 /* list all files open on tree connection, reopen resilient handles */ 1009 945 spin_lock(&tcon->open_file_lock); 1010 - list_for_each(tmp, &tcon->openFileList) { 1011 - open_file = list_entry(tmp, struct cifsFileInfo, tlist); 946 + list_for_each_entry(open_file, &tcon->openFileList, tlist) { 1012 947 if (!open_file->invalidHandle) 1013 948 continue; 1014 949 cifsFileInfo_get(open_file); ··· 1015 952 } 1016 953 spin_unlock(&tcon->open_file_lock); 1017 954 1018 - list_for_each_safe(tmp, tmp1, &tmp_list) { 1019 - open_file = list_entry(tmp, struct cifsFileInfo, rlist); 955 + list_for_each_entry_safe(open_file, tmp, &tmp_list, rlist) { 1020 956 if (cifs_reopen_file(open_file, false /* do not flush */)) 1021 957 tcon->need_reopen_files = true; 1022 958 list_del_init(&open_file->rlist); ··· 1258 1196 return rc; 1259 1197 } 1260 1198 1199 + #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 1261 1200 /* 1262 1201 * Check if there is another lock that prevents us to set the lock (posix 1263 1202 * style). If such a lock exists, update the flock structure with its ··· 1397 1334 { 1398 1335 return cifs_lock_secret ^ hash32_ptr((const void *)owner); 1399 1336 } 1337 + #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 1400 1338 1401 1339 struct lock_to_push { 1402 1340 struct list_head llist; ··· 1408 1344 __u8 type; 1409 1345 }; 1410 1346 1347 + #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 1411 1348 static int 1412 1349 cifs_push_posix_locks(struct cifsFileInfo *cfile) 1413 1350 { ··· 1496 1431 } 1497 1432 goto out; 1498 1433 } 1434 + #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 1499 1435 1500 1436 static int 1501 1437 cifs_push_locks(struct cifsFileInfo *cfile) 1502 1438 { 1503 - struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb); 1504 1439 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry)); 1505 1440 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); 1506 1441 int rc = 0; 1442 + #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 1443 + struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb); 1444 + #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 1507 1445 1508 1446 /* we are going to update can_cache_brlcks here - need a write access */ 1509 1447 cifs_down_write(&cinode->lock_sem); ··· 1515 1447 return rc; 1516 1448 } 1517 1449 1450 + #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 1518 1451 if (cap_unix(tcon->ses) && 1519 1452 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) && 1520 1453 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) 1521 1454 rc = cifs_push_posix_locks(cfile); 1522 1455 else 1456 + #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 1523 1457 rc = tcon->ses->server->ops->push_mand_locks(cfile); 1524 1458 1525 1459 cinode->can_cache_brlcks = false; ··· 1585 1515 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data; 1586 1516 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); 1587 1517 struct TCP_Server_Info *server = tcon->ses->server; 1518 + #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 1588 1519 __u16 netfid = cfile->fid.netfid; 1589 1520 1590 1521 if (posix_lck) { ··· 1605 1534 posix_lock_type, wait_flag); 1606 1535 return rc; 1607 1536 } 1537 + #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 1608 1538 1609 1539 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock); 1610 1540 if (!rc) ··· 1666 1594 } 1667 1595 } 1668 1596 1597 + #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 1669 1598 int 1670 1599 cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, 1671 1600 unsigned int xid) ··· 1779 1706 kfree(buf); 1780 1707 return rc; 1781 1708 } 1709 + #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 1782 1710 1783 1711 static int 1784 1712 cifs_setlk(struct file *file, struct file_lock *flock, __u32 type, ··· 1793 1719 struct TCP_Server_Info *server = tcon->ses->server; 1794 1720 struct inode *inode = d_inode(cfile->dentry); 1795 1721 1722 + #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 1796 1723 if (posix_lck) { 1797 1724 int posix_lock_type; 1798 1725 ··· 1815 1740 NULL, posix_lock_type, wait_flag); 1816 1741 goto out; 1817 1742 } 1818 - 1743 + #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 1819 1744 if (lock) { 1820 1745 struct cifsLockInfo *lock; 1821 1746 ··· 2278 2203 free_dentry_path(page); 2279 2204 return -ENOENT; 2280 2205 } 2206 + 2207 + void 2208 + cifs_writedata_release(struct kref *refcount) 2209 + { 2210 + struct cifs_writedata *wdata = container_of(refcount, 2211 + struct cifs_writedata, refcount); 2212 + #ifdef CONFIG_CIFS_SMB_DIRECT 2213 + if (wdata->mr) { 2214 + smbd_deregister_mr(wdata->mr); 2215 + wdata->mr = NULL; 2216 + } 2217 + #endif 2218 + 2219 + if (wdata->cfile) 2220 + cifsFileInfo_put(wdata->cfile); 2221 + 2222 + kvfree(wdata->pages); 2223 + kfree(wdata); 2224 + } 2225 + 2226 + /* 2227 + * Write failed with a retryable error. Resend the write request. It's also 2228 + * possible that the page was redirtied so re-clean the page. 2229 + */ 2230 + static void 2231 + cifs_writev_requeue(struct cifs_writedata *wdata) 2232 + { 2233 + int i, rc = 0; 2234 + struct inode *inode = d_inode(wdata->cfile->dentry); 2235 + struct TCP_Server_Info *server; 2236 + unsigned int rest_len; 2237 + 2238 + server = tlink_tcon(wdata->cfile->tlink)->ses->server; 2239 + i = 0; 2240 + rest_len = wdata->bytes; 2241 + do { 2242 + struct cifs_writedata *wdata2; 2243 + unsigned int j, nr_pages, wsize, tailsz, cur_len; 2244 + 2245 + wsize = server->ops->wp_retry_size(inode); 2246 + if (wsize < rest_len) { 2247 + nr_pages = wsize / PAGE_SIZE; 2248 + if (!nr_pages) { 2249 + rc = -EOPNOTSUPP; 2250 + break; 2251 + } 2252 + cur_len = nr_pages * PAGE_SIZE; 2253 + tailsz = PAGE_SIZE; 2254 + } else { 2255 + nr_pages = DIV_ROUND_UP(rest_len, PAGE_SIZE); 2256 + cur_len = rest_len; 2257 + tailsz = rest_len - (nr_pages - 1) * PAGE_SIZE; 2258 + } 2259 + 2260 + wdata2 = cifs_writedata_alloc(nr_pages, cifs_writev_complete); 2261 + if (!wdata2) { 2262 + rc = -ENOMEM; 2263 + break; 2264 + } 2265 + 2266 + for (j = 0; j < nr_pages; j++) { 2267 + wdata2->pages[j] = wdata->pages[i + j]; 2268 + lock_page(wdata2->pages[j]); 2269 + clear_page_dirty_for_io(wdata2->pages[j]); 2270 + } 2271 + 2272 + wdata2->sync_mode = wdata->sync_mode; 2273 + wdata2->nr_pages = nr_pages; 2274 + wdata2->offset = page_offset(wdata2->pages[0]); 2275 + wdata2->pagesz = PAGE_SIZE; 2276 + wdata2->tailsz = tailsz; 2277 + wdata2->bytes = cur_len; 2278 + 2279 + rc = cifs_get_writable_file(CIFS_I(inode), FIND_WR_ANY, 2280 + &wdata2->cfile); 2281 + if (!wdata2->cfile) { 2282 + cifs_dbg(VFS, "No writable handle to retry writepages rc=%d\n", 2283 + rc); 2284 + if (!is_retryable_error(rc)) 2285 + rc = -EBADF; 2286 + } else { 2287 + wdata2->pid = wdata2->cfile->pid; 2288 + rc = server->ops->async_writev(wdata2, 2289 + cifs_writedata_release); 2290 + } 2291 + 2292 + for (j = 0; j < nr_pages; j++) { 2293 + unlock_page(wdata2->pages[j]); 2294 + if (rc != 0 && !is_retryable_error(rc)) { 2295 + SetPageError(wdata2->pages[j]); 2296 + end_page_writeback(wdata2->pages[j]); 2297 + put_page(wdata2->pages[j]); 2298 + } 2299 + } 2300 + 2301 + kref_put(&wdata2->refcount, cifs_writedata_release); 2302 + if (rc) { 2303 + if (is_retryable_error(rc)) 2304 + continue; 2305 + i += nr_pages; 2306 + break; 2307 + } 2308 + 2309 + rest_len -= cur_len; 2310 + i += nr_pages; 2311 + } while (i < wdata->nr_pages); 2312 + 2313 + /* cleanup remaining pages from the original wdata */ 2314 + for (; i < wdata->nr_pages; i++) { 2315 + SetPageError(wdata->pages[i]); 2316 + end_page_writeback(wdata->pages[i]); 2317 + put_page(wdata->pages[i]); 2318 + } 2319 + 2320 + if (rc != 0 && !is_retryable_error(rc)) 2321 + mapping_set_error(inode->i_mapping, rc); 2322 + kref_put(&wdata->refcount, cifs_writedata_release); 2323 + } 2324 + 2325 + void 2326 + cifs_writev_complete(struct work_struct *work) 2327 + { 2328 + struct cifs_writedata *wdata = container_of(work, 2329 + struct cifs_writedata, work); 2330 + struct inode *inode = d_inode(wdata->cfile->dentry); 2331 + int i = 0; 2332 + 2333 + if (wdata->result == 0) { 2334 + spin_lock(&inode->i_lock); 2335 + cifs_update_eof(CIFS_I(inode), wdata->offset, wdata->bytes); 2336 + spin_unlock(&inode->i_lock); 2337 + cifs_stats_bytes_written(tlink_tcon(wdata->cfile->tlink), 2338 + wdata->bytes); 2339 + } else if (wdata->sync_mode == WB_SYNC_ALL && wdata->result == -EAGAIN) 2340 + return cifs_writev_requeue(wdata); 2341 + 2342 + for (i = 0; i < wdata->nr_pages; i++) { 2343 + struct page *page = wdata->pages[i]; 2344 + 2345 + if (wdata->result == -EAGAIN) 2346 + __set_page_dirty_nobuffers(page); 2347 + else if (wdata->result < 0) 2348 + SetPageError(page); 2349 + end_page_writeback(page); 2350 + cifs_readpage_to_fscache(inode, page); 2351 + put_page(page); 2352 + } 2353 + if (wdata->result != -EAGAIN) 2354 + mapping_set_error(inode->i_mapping, wdata->result); 2355 + kref_put(&wdata->refcount, cifs_writedata_release); 2356 + } 2357 + 2358 + struct cifs_writedata * 2359 + cifs_writedata_alloc(unsigned int nr_pages, work_func_t complete) 2360 + { 2361 + struct page **pages = 2362 + kcalloc(nr_pages, sizeof(struct page *), GFP_NOFS); 2363 + if (pages) 2364 + return cifs_writedata_direct_alloc(pages, complete); 2365 + 2366 + return NULL; 2367 + } 2368 + 2369 + struct cifs_writedata * 2370 + cifs_writedata_direct_alloc(struct page **pages, work_func_t complete) 2371 + { 2372 + struct cifs_writedata *wdata; 2373 + 2374 + wdata = kzalloc(sizeof(*wdata), GFP_NOFS); 2375 + if (wdata != NULL) { 2376 + wdata->pages = pages; 2377 + kref_init(&wdata->refcount); 2378 + INIT_LIST_HEAD(&wdata->list); 2379 + init_completion(&wdata->done); 2380 + INIT_WORK(&wdata->work, complete); 2381 + } 2382 + return wdata; 2383 + } 2384 + 2281 2385 2282 2386 static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to) 2283 2387 { ··· 4713 4459 * TODO: Send a whole batch of pages to be read 4714 4460 * by the cache. 4715 4461 */ 4716 - page = readahead_page(ractl); 4717 - last_batch_size = 1 << thp_order(page); 4462 + struct folio *folio = readahead_folio(ractl); 4463 + 4464 + last_batch_size = folio_nr_pages(folio); 4718 4465 if (cifs_readpage_from_fscache(ractl->mapping->host, 4719 - page) < 0) { 4466 + &folio->page) < 0) { 4720 4467 /* 4721 4468 * TODO: Deal with cache read failure 4722 4469 * here, but for the moment, delegate ··· 4725 4470 */ 4726 4471 caching = false; 4727 4472 } 4728 - unlock_page(page); 4473 + folio_unlock(folio); 4729 4474 next_cached++; 4730 4475 cache_nr_pages--; 4731 4476 if (cache_nr_pages == 0)
+57 -7
fs/cifs/inode.c
··· 339 339 fattr->cf_flags = CIFS_FATTR_DFS_REFERRAL; 340 340 } 341 341 342 + #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 342 343 static int 343 344 cifs_get_file_info_unix(struct file *filp) 344 345 { ··· 433 432 cgiiu_exit: 434 433 return rc; 435 434 } 435 + #else 436 + int cifs_get_inode_info_unix(struct inode **pinode, 437 + const unsigned char *full_path, 438 + struct super_block *sb, unsigned int xid) 439 + { 440 + return -EOPNOTSUPP; 441 + } 442 + #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 436 443 437 444 static int 438 445 cifs_sfu_type(struct cifs_fattr *fattr, const char *path, ··· 804 795 return hash; 805 796 } 806 797 798 + #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 807 799 /** 808 800 * cifs_backup_query_path_info - SMB1 fallback code to get ino 809 801 * ··· 857 847 *data = (FILE_ALL_INFO *)info.srch_entries_start; 858 848 return 0; 859 849 } 850 + #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 860 851 861 852 static void 862 853 cifs_set_fattr_ino(int xid, ··· 1002 991 rc = 0; 1003 992 break; 1004 993 case -EACCES: 994 + #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 1005 995 /* 1006 996 * perm errors, try again with backup flags if possible 1007 997 * ··· 1034 1022 /* nothing we can do, bail out */ 1035 1023 goto out; 1036 1024 } 1025 + #else 1026 + goto out; 1027 + #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 1037 1028 break; 1038 1029 default: 1039 1030 cifs_dbg(FYI, "%s: unhandled err rc %d\n", __func__, rc); ··· 1052 1037 /* 1053 1038 * 4. Tweak fattr based on mount options 1054 1039 */ 1055 - 1040 + #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 1056 1041 handle_mnt_opt: 1042 + #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 1057 1043 /* query for SFU type info if supported and needed */ 1058 1044 if (fattr.cf_cifsattrs & ATTR_SYSTEM && 1059 1045 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) { ··· 1239 1223 static int 1240 1224 cifs_find_inode(struct inode *inode, void *opaque) 1241 1225 { 1242 - struct cifs_fattr *fattr = (struct cifs_fattr *) opaque; 1226 + struct cifs_fattr *fattr = opaque; 1243 1227 1244 1228 /* don't match inode with different uniqueid */ 1245 1229 if (CIFS_I(inode)->uniqueid != fattr->cf_uniqueid) ··· 1263 1247 static int 1264 1248 cifs_init_inode(struct inode *inode, void *opaque) 1265 1249 { 1266 - struct cifs_fattr *fattr = (struct cifs_fattr *) opaque; 1250 + struct cifs_fattr *fattr = opaque; 1267 1251 1268 1252 CIFS_I(inode)->uniqueid = fattr->cf_uniqueid; 1269 1253 CIFS_I(inode)->createtime = fattr->cf_createtime; ··· 1451 1435 return server->ops->set_file_info(inode, full_path, &info_buf, xid); 1452 1436 } 1453 1437 1438 + #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 1454 1439 /* 1455 1440 * Open the given file (if it isn't already), set the DELETE_ON_CLOSE bit 1456 1441 * and rename it to a random name that hopefully won't conflict with ··· 1582 1565 1583 1566 goto out_close; 1584 1567 } 1568 + #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 1585 1569 1586 1570 /* copied from fs/nfs/dir.c with small changes */ 1587 1571 static void ··· 1645 1627 } 1646 1628 1647 1629 cifs_close_deferred_file_under_dentry(tcon, full_path); 1630 + #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 1648 1631 if (cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP & 1649 1632 le64_to_cpu(tcon->fsUnixInfo.Capability))) { 1650 1633 rc = CIFSPOSIXDelFile(xid, tcon, full_path, ··· 1655 1636 if ((rc == 0) || (rc == -ENOENT)) 1656 1637 goto psx_del_no_retry; 1657 1638 } 1639 + #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 1658 1640 1659 1641 retry_std_delete: 1660 1642 if (!server->ops->unlink) { ··· 1734 1714 1735 1715 if (tcon->posix_extensions) 1736 1716 rc = smb311_posix_get_inode_info(&inode, full_path, parent->i_sb, xid); 1717 + #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 1737 1718 else if (tcon->unix_ext) 1738 1719 rc = cifs_get_inode_info_unix(&inode, full_path, parent->i_sb, 1739 1720 xid); 1721 + #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 1740 1722 else 1741 1723 rc = cifs_get_inode_info(&inode, full_path, NULL, parent->i_sb, 1742 1724 xid, NULL); ··· 1768 1746 if (parent->i_mode & S_ISGID) 1769 1747 mode |= S_ISGID; 1770 1748 1749 + #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 1771 1750 if (tcon->unix_ext) { 1772 1751 struct cifs_unix_set_info_args args = { 1773 1752 .mode = mode, ··· 1791 1768 cifs_sb->local_nls, 1792 1769 cifs_remap(cifs_sb)); 1793 1770 } else { 1771 + #else 1772 + { 1773 + #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 1794 1774 struct TCP_Server_Info *server = tcon->ses->server; 1795 1775 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) && 1796 1776 (mode & S_IWUGO) == 0 && server->ops->mkdir_setinfo) ··· 1814 1788 return 0; 1815 1789 } 1816 1790 1791 + #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 1817 1792 static int 1818 1793 cifs_posix_mkdir(struct inode *inode, struct dentry *dentry, umode_t mode, 1819 1794 const char *full_path, struct cifs_sb_info *cifs_sb, ··· 1877 1850 xid); 1878 1851 goto posix_mkdir_out; 1879 1852 } 1853 + #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 1880 1854 1881 1855 int cifs_mkdir(struct user_namespace *mnt_userns, struct inode *inode, 1882 1856 struct dentry *direntry, umode_t mode) ··· 1920 1892 goto mkdir_out; 1921 1893 } 1922 1894 1895 + #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 1923 1896 if (cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP & 1924 1897 le64_to_cpu(tcon->fsUnixInfo.Capability))) { 1925 1898 rc = cifs_posix_mkdir(inode, direntry, mode, full_path, cifs_sb, ··· 1928 1899 if (rc != -EOPNOTSUPP) 1929 1900 goto mkdir_out; 1930 1901 } 1902 + #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 1931 1903 1932 1904 if (!server->ops->mkdir) { 1933 1905 rc = -ENOSYS; ··· 2045 2015 struct tcon_link *tlink; 2046 2016 struct cifs_tcon *tcon; 2047 2017 struct TCP_Server_Info *server; 2018 + #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 2048 2019 struct cifs_fid fid; 2049 2020 struct cifs_open_parms oparms; 2050 - int oplock, rc; 2021 + int oplock; 2022 + #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 2023 + int rc; 2051 2024 2052 2025 tlink = cifs_sb_tlink(cifs_sb); 2053 2026 if (IS_ERR(tlink)) ··· 2076 2043 if (server->vals->protocol_id != 0) 2077 2044 goto do_rename_exit; 2078 2045 2046 + #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 2079 2047 /* open-file renames don't work across directories */ 2080 2048 if (to_dentry->d_parent != from_dentry->d_parent) 2081 2049 goto do_rename_exit; ··· 2098 2064 cifs_sb->local_nls, cifs_remap(cifs_sb)); 2099 2065 CIFSSMBClose(xid, tcon, fid.netfid); 2100 2066 } 2067 + #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 2101 2068 do_rename_exit: 2102 2069 if (rc == 0) 2103 2070 d_move(from_dentry, to_dentry); ··· 2116 2081 struct cifs_sb_info *cifs_sb; 2117 2082 struct tcon_link *tlink; 2118 2083 struct cifs_tcon *tcon; 2119 - FILE_UNIX_BASIC_INFO *info_buf_source = NULL; 2120 - FILE_UNIX_BASIC_INFO *info_buf_target; 2121 2084 unsigned int xid; 2122 2085 int rc, tmprc; 2123 2086 int retry_count = 0; 2087 + FILE_UNIX_BASIC_INFO *info_buf_source = NULL; 2088 + #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 2089 + FILE_UNIX_BASIC_INFO *info_buf_target; 2090 + #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 2124 2091 2125 2092 if (flags & ~RENAME_NOREPLACE) 2126 2093 return -EINVAL; ··· 2176 2139 if (flags & RENAME_NOREPLACE) 2177 2140 goto cifs_rename_exit; 2178 2141 2142 + #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 2179 2143 if (rc == -EEXIST && tcon->unix_ext) { 2180 2144 /* 2181 2145 * Are src and dst hardlinks of same inode? We can only tell ··· 2216 2178 */ 2217 2179 2218 2180 unlink_target: 2181 + #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 2182 + 2219 2183 /* Try unlinking the target dentry if it's not negative */ 2220 2184 if (d_really_is_positive(target_dentry) && (rc == -EACCES || rc == -EEXIST)) { 2221 2185 if (d_is_dir(target_dentry)) ··· 2377 2337 { 2378 2338 int rc = 0; 2379 2339 struct dentry *dentry = file_dentry(filp); 2340 + #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 2380 2341 struct cifsFileInfo *cfile = (struct cifsFileInfo *) filp->private_data; 2342 + #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 2381 2343 2382 2344 if (!cifs_dentry_needs_reval(dentry)) 2383 2345 return rc; 2384 2346 2347 + #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 2385 2348 if (tlink_tcon(cfile->tlink)->unix_ext) 2386 2349 rc = cifs_get_file_info_unix(filp); 2387 2350 else 2351 + #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 2388 2352 rc = cifs_get_file_info(filp); 2389 2353 2390 2354 return rc; ··· 2697 2653 return rc; 2698 2654 } 2699 2655 2656 + #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 2700 2657 static int 2701 2658 cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs) 2702 2659 { ··· 2845 2800 free_xid(xid); 2846 2801 return rc; 2847 2802 } 2803 + #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 2848 2804 2849 2805 static int 2850 2806 cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs) ··· 3041 2995 struct iattr *attrs) 3042 2996 { 3043 2997 struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb); 3044 - struct cifs_tcon *pTcon = cifs_sb_master_tcon(cifs_sb); 3045 2998 int rc, retries = 0; 2999 + #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 3000 + struct cifs_tcon *pTcon = cifs_sb_master_tcon(cifs_sb); 3001 + #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 3046 3002 3047 3003 if (unlikely(cifs_forced_shutdown(cifs_sb))) 3048 3004 return -EIO; 3049 3005 3050 3006 do { 3007 + #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 3051 3008 if (pTcon->unix_ext) 3052 3009 rc = cifs_setattr_unix(direntry, attrs); 3053 3010 else 3011 + #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 3054 3012 rc = cifs_setattr_nounix(direntry, attrs); 3055 3013 retries++; 3056 3014 } while (is_retryable_error(rc) && retries < 2);
+2
fs/cifs/ioctl.c
··· 333 333 tcon = tlink_tcon(pSMBFile->tlink); 334 334 caps = le64_to_cpu(tcon->fsUnixInfo.Capability); 335 335 #ifdef CONFIG_CIFS_POSIX 336 + #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 336 337 if (CIFS_UNIX_EXTATTR_CAP & caps) { 337 338 __u64 ExtAttrMask = 0; 338 339 rc = CIFSGetExtAttr(xid, tcon, ··· 346 345 if (rc != EOPNOTSUPP) 347 346 break; 348 347 } 348 + #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 349 349 #endif /* CONFIG_CIFS_POSIX */ 350 350 rc = 0; 351 351 if (CIFS_I(inode)->cifsAttrs & ATTR_COMPRESSED) {
+8
fs/cifs/link.c
··· 286 286 return rc; 287 287 } 288 288 289 + #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 289 290 /* 290 291 * SMB 1.0 Protocol specific functions 291 292 */ ··· 369 368 CIFSSMBClose(xid, tcon, fid.netfid); 370 369 return rc; 371 370 } 371 + #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 372 372 373 373 /* 374 374 * SMB 2.1/SMB3 Protocol specific functions ··· 534 532 goto cifs_hl_exit; 535 533 } 536 534 535 + #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 537 536 if (tcon->unix_ext) 538 537 rc = CIFSUnixCreateHardLink(xid, tcon, from_name, to_name, 539 538 cifs_sb->local_nls, 540 539 cifs_remap(cifs_sb)); 541 540 else { 541 + #else 542 + { 543 + #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 542 544 server = tcon->ses->server; 543 545 if (!server->ops->create_hardlink) { 544 546 rc = -ENOSYS; ··· 710 704 /* BB what if DFS and this volume is on different share? BB */ 711 705 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) 712 706 rc = create_mf_symlink(xid, pTcon, cifs_sb, full_path, symname); 707 + #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 713 708 else if (pTcon->unix_ext) 714 709 rc = CIFSUnixCreateSymLink(xid, pTcon, full_path, symname, 715 710 cifs_sb->local_nls, 716 711 cifs_remap(cifs_sb)); 712 + #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 717 713 /* else 718 714 rc = CIFSCreateReparseSymLink(xid, pTcon, fromName, toName, 719 715 cifs_sb_target->local_nls); */
+13 -20
fs/cifs/misc.c
··· 69 69 ret_buf = kzalloc(sizeof(struct cifs_ses), GFP_KERNEL); 70 70 if (ret_buf) { 71 71 atomic_inc(&sesInfoAllocCount); 72 + spin_lock_init(&ret_buf->ses_lock); 72 73 ret_buf->ses_status = SES_NEW; 73 74 ++ret_buf->ses_count; 74 75 INIT_LIST_HEAD(&ret_buf->smb_ses_list); ··· 127 126 atomic_inc(&tconInfoAllocCount); 128 127 ret_buf->status = TID_NEW; 129 128 ++ret_buf->tc_count; 129 + spin_lock_init(&ret_buf->tc_lock); 130 130 INIT_LIST_HEAD(&ret_buf->openFileList); 131 131 INIT_LIST_HEAD(&ret_buf->tcon_list); 132 132 spin_lock_init(&ret_buf->open_file_lock); ··· 174 172 /* clear the first few header bytes */ 175 173 /* for most paths, more is cleared in header_assemble */ 176 174 memset(ret_buf, 0, buf_size + 3); 177 - atomic_inc(&bufAllocCount); 175 + atomic_inc(&buf_alloc_count); 178 176 #ifdef CONFIG_CIFS_STATS2 179 - atomic_inc(&totBufAllocCount); 177 + atomic_inc(&total_buf_alloc_count); 180 178 #endif /* CONFIG_CIFS_STATS2 */ 181 179 182 180 return ret_buf; ··· 191 189 } 192 190 mempool_free(buf_to_free, cifs_req_poolp); 193 191 194 - atomic_dec(&bufAllocCount); 192 + atomic_dec(&buf_alloc_count); 195 193 return; 196 194 } 197 195 ··· 207 205 ret_buf = mempool_alloc(cifs_sm_req_poolp, GFP_NOFS); 208 206 /* No need to clear memory here, cleared in header assemble */ 209 207 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/ 210 - atomic_inc(&smBufAllocCount); 208 + atomic_inc(&small_buf_alloc_count); 211 209 #ifdef CONFIG_CIFS_STATS2 212 - atomic_inc(&totSmBufAllocCount); 210 + atomic_inc(&total_small_buf_alloc_count); 213 211 #endif /* CONFIG_CIFS_STATS2 */ 214 212 215 213 return ret_buf; ··· 225 223 } 226 224 mempool_free(buf_to_free, cifs_sm_req_poolp); 227 225 228 - atomic_dec(&smBufAllocCount); 226 + atomic_dec(&small_buf_alloc_count); 229 227 return; 230 228 } 231 229 ··· 402 400 { 403 401 struct smb_hdr *buf = (struct smb_hdr *)buffer; 404 402 struct smb_com_lock_req *pSMB = (struct smb_com_lock_req *)buf; 405 - struct list_head *tmp, *tmp1, *tmp2; 406 403 struct cifs_ses *ses; 407 404 struct cifs_tcon *tcon; 408 405 struct cifsInodeInfo *pCifsInode; ··· 468 467 469 468 /* look up tcon based on tid & uid */ 470 469 spin_lock(&cifs_tcp_ses_lock); 471 - list_for_each(tmp, &srv->smb_ses_list) { 472 - ses = list_entry(tmp, struct cifs_ses, smb_ses_list); 473 - list_for_each(tmp1, &ses->tcon_list) { 474 - tcon = list_entry(tmp1, struct cifs_tcon, tcon_list); 470 + list_for_each_entry(ses, &srv->smb_ses_list, smb_ses_list) { 471 + list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { 475 472 if (tcon->tid != buf->Tid) 476 473 continue; 477 474 478 475 cifs_stats_inc(&tcon->stats.cifs_stats.num_oplock_brks); 479 476 spin_lock(&tcon->open_file_lock); 480 - list_for_each(tmp2, &tcon->openFileList) { 481 - netfile = list_entry(tmp2, struct cifsFileInfo, 482 - tlist); 477 + list_for_each_entry(netfile, &tcon->openFileList, tlist) { 483 478 if (pSMB->Fid != netfile->fid.netfid) 484 479 continue; 485 480 ··· 760 763 cifs_close_all_deferred_files(struct cifs_tcon *tcon) 761 764 { 762 765 struct cifsFileInfo *cfile; 763 - struct list_head *tmp; 764 766 struct file_list *tmp_list, *tmp_next_list; 765 767 struct list_head file_head; 766 768 767 769 INIT_LIST_HEAD(&file_head); 768 770 spin_lock(&tcon->open_file_lock); 769 - list_for_each(tmp, &tcon->openFileList) { 770 - cfile = list_entry(tmp, struct cifsFileInfo, tlist); 771 + list_for_each_entry(cfile, &tcon->openFileList, tlist) { 771 772 if (delayed_work_pending(&cfile->deferred)) { 772 773 if (cancel_delayed_work(&cfile->deferred)) { 773 774 tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC); ··· 788 793 cifs_close_deferred_file_under_dentry(struct cifs_tcon *tcon, const char *path) 789 794 { 790 795 struct cifsFileInfo *cfile; 791 - struct list_head *tmp; 792 796 struct file_list *tmp_list, *tmp_next_list; 793 797 struct list_head file_head; 794 798 void *page; ··· 796 802 INIT_LIST_HEAD(&file_head); 797 803 page = alloc_dentry_path(); 798 804 spin_lock(&tcon->open_file_lock); 799 - list_for_each(tmp, &tcon->openFileList) { 800 - cfile = list_entry(tmp, struct cifsFileInfo, tlist); 805 + list_for_each_entry(cfile, &tcon->openFileList, tlist) { 801 806 full_path = build_path_from_dentry(cfile->dentry, page); 802 807 if (strstr(full_path, path)) { 803 808 if (delayed_work_pending(&cfile->deferred)) {
+1 -1
fs/cifs/netmisc.c
··· 911 911 unsigned int 912 912 smbCalcSize(void *buf, struct TCP_Server_Info *server) 913 913 { 914 - struct smb_hdr *ptr = (struct smb_hdr *)buf; 914 + struct smb_hdr *ptr = buf; 915 915 return (sizeof(struct smb_hdr) + (2 * ptr->WordCount) + 916 916 2 /* size of the bcc field */ + get_bcc(ptr)); 917 917 }
+4 -1
fs/cifs/sess.c
··· 499 499 return rc; 500 500 } 501 501 502 + #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 502 503 static __u32 cifs_ssetup_hdr(struct cifs_ses *ses, 503 504 struct TCP_Server_Info *server, 504 505 SESSION_SETUP_ANDX *pSMB) ··· 591 590 592 591 *pbcc_area = bcc_ptr; 593 592 } 594 - 595 593 596 594 static void unicode_ssetup_strings(char **pbcc_area, struct cifs_ses *ses, 597 595 const struct nls_table *nls_cp) ··· 753 753 for it later, but it is not very important */ 754 754 cifs_dbg(FYI, "ascii: bytes left %d\n", bleft); 755 755 } 756 + #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 756 757 757 758 int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len, 758 759 struct cifs_ses *ses) ··· 1171 1170 struct kvec iov[3]; 1172 1171 }; 1173 1172 1173 + #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 1174 1174 static int 1175 1175 sess_alloc_buffer(struct sess_data *sess_data, int wct) 1176 1176 { ··· 1848 1846 kfree(sess_data); 1849 1847 return rc; 1850 1848 } 1849 + #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */
+5 -5
fs/cifs/smb1ops.c
··· 92 92 struct smb_hdr *buf = (struct smb_hdr *)buffer; 93 93 struct mid_q_entry *mid; 94 94 95 - spin_lock(&GlobalMid_Lock); 95 + spin_lock(&server->mid_lock); 96 96 list_for_each_entry(mid, &server->pending_mid_q, qhead) { 97 97 if (compare_mid(mid->mid, buf) && 98 98 mid->mid_state == MID_REQUEST_SUBMITTED && 99 99 le16_to_cpu(mid->command) == buf->Command) { 100 100 kref_get(&mid->refcount); 101 - spin_unlock(&GlobalMid_Lock); 101 + spin_unlock(&server->mid_lock); 102 102 return mid; 103 103 } 104 104 } 105 - spin_unlock(&GlobalMid_Lock); 105 + spin_unlock(&server->mid_lock); 106 106 return NULL; 107 107 } 108 108 ··· 166 166 __u16 last_mid, cur_mid; 167 167 bool collision, reconnect = false; 168 168 169 - spin_lock(&GlobalMid_Lock); 169 + spin_lock(&server->mid_lock); 170 170 171 171 /* mid is 16 bit only for CIFS/SMB */ 172 172 cur_mid = (__u16)((server->CurrentMid) & 0xffff); ··· 225 225 } 226 226 cur_mid++; 227 227 } 228 - spin_unlock(&GlobalMid_Lock); 228 + spin_unlock(&server->mid_lock); 229 229 230 230 if (reconnect) { 231 231 cifs_signal_cifsd_for_reconnect(server, false);
+29 -20
fs/cifs/smb2misc.c
··· 132 132 } 133 133 134 134 int 135 - smb2_check_message(char *buf, unsigned int len, struct TCP_Server_Info *srvr) 135 + smb2_check_message(char *buf, unsigned int len, struct TCP_Server_Info *server) 136 136 { 137 137 struct smb2_hdr *shdr = (struct smb2_hdr *)buf; 138 138 struct smb2_pdu *pdu = (struct smb2_pdu *)shdr; 139 - __u64 mid; 140 - __u32 clc_len; /* calculated length */ 141 - int command; 142 - int pdu_size = sizeof(struct smb2_pdu); 143 139 int hdr_size = sizeof(struct smb2_hdr); 140 + int pdu_size = sizeof(struct smb2_pdu); 141 + int command; 142 + __u32 calc_len; /* calculated length */ 143 + __u64 mid; 144 144 145 145 /* 146 146 * Add function to do table lookup of StructureSize by command ··· 154 154 155 155 /* decrypt frame now that it is completely read in */ 156 156 spin_lock(&cifs_tcp_ses_lock); 157 - list_for_each_entry(iter, &srvr->smb_ses_list, smb_ses_list) { 157 + list_for_each_entry(iter, &server->smb_ses_list, smb_ses_list) { 158 158 if (iter->Suid == le64_to_cpu(thdr->SessionId)) { 159 159 ses = iter; 160 160 break; ··· 221 221 } 222 222 } 223 223 224 - clc_len = smb2_calc_size(buf, srvr); 224 + calc_len = smb2_calc_size(buf, server); 225 225 226 - if (shdr->Command == SMB2_NEGOTIATE) 227 - clc_len += get_neg_ctxt_len(shdr, len, clc_len); 226 + /* For SMB2_IOCTL, OutputOffset and OutputLength are optional, so might 227 + * be 0, and not a real miscalculation */ 228 + if (command == SMB2_IOCTL_HE && calc_len == 0) 229 + return 0; 228 230 229 - if (len != clc_len) { 230 - cifs_dbg(FYI, "Calculated size %u length %u mismatch mid %llu\n", 231 - clc_len, len, mid); 231 + if (command == SMB2_NEGOTIATE_HE) 232 + calc_len += get_neg_ctxt_len(shdr, len, calc_len); 233 + 234 + if (len != calc_len) { 232 235 /* create failed on symlink */ 233 236 if (command == SMB2_CREATE_HE && 234 237 shdr->Status == STATUS_STOPPED_ON_SYMLINK) 235 238 return 0; 236 239 /* Windows 7 server returns 24 bytes more */ 237 - if (clc_len + 24 == len && command == SMB2_OPLOCK_BREAK_HE) 240 + if (calc_len + 24 == len && command == SMB2_OPLOCK_BREAK_HE) 238 241 return 0; 239 242 /* server can return one byte more due to implied bcc[0] */ 240 - if (clc_len == len + 1) 243 + if (calc_len == len + 1) 241 244 return 0; 242 245 243 246 /* 244 247 * Some windows servers (win2016) will pad also the final 245 248 * PDU in a compound to 8 bytes. 246 249 */ 247 - if (((clc_len + 7) & ~7) == len) 250 + if (((calc_len + 7) & ~7) == len) 248 251 return 0; 249 252 250 253 /* ··· 256 253 * SMB2/SMB3 frame length (header + smb2 response specific data) 257 254 * Some windows servers also pad up to 8 bytes when compounding. 258 255 */ 259 - if (clc_len < len) 256 + if (calc_len < len) 260 257 return 0; 261 258 262 - pr_warn_once( 263 - "srv rsp too short, len %d not %d. cmd:%d mid:%llu\n", 264 - len, clc_len, command, mid); 259 + /* Only log a message if len was really miscalculated */ 260 + if (unlikely(cifsFYI)) 261 + cifs_dbg(FYI, "Server response too short: calculated " 262 + "length %u doesn't match read length %u (cmd=%d, mid=%llu)\n", 263 + calc_len, len, command, mid); 264 + else 265 + pr_warn("Server response too short: calculated length " 266 + "%u doesn't match read length %u (cmd=%d, mid=%llu)\n", 267 + calc_len, len, command, mid); 265 268 266 269 return 1; 267 270 } ··· 411 402 unsigned int 412 403 smb2_calc_size(void *buf, struct TCP_Server_Info *srvr) 413 404 { 414 - struct smb2_pdu *pdu = (struct smb2_pdu *)buf; 405 + struct smb2_pdu *pdu = buf; 415 406 struct smb2_hdr *shdr = &pdu->hdr; 416 407 int offset; /* the offset from the beginning of SMB to data area */ 417 408 int data_length; /* the length of the variable length data area */
+32 -29
fs/cifs/smb2ops.c
··· 126 126 optype, scredits, add); 127 127 } 128 128 129 - spin_lock(&cifs_tcp_ses_lock); 129 + spin_lock(&server->srv_lock); 130 130 if (server->tcpStatus == CifsNeedReconnect 131 131 || server->tcpStatus == CifsExiting) { 132 - spin_unlock(&cifs_tcp_ses_lock); 132 + spin_unlock(&server->srv_lock); 133 133 return; 134 134 } 135 - spin_unlock(&cifs_tcp_ses_lock); 135 + spin_unlock(&server->srv_lock); 136 136 137 137 switch (rc) { 138 138 case -1: ··· 218 218 spin_lock(&server->req_lock); 219 219 } else { 220 220 spin_unlock(&server->req_lock); 221 - spin_lock(&cifs_tcp_ses_lock); 221 + spin_lock(&server->srv_lock); 222 222 if (server->tcpStatus == CifsExiting) { 223 - spin_unlock(&cifs_tcp_ses_lock); 223 + spin_unlock(&server->srv_lock); 224 224 return -ENOENT; 225 225 } 226 - spin_unlock(&cifs_tcp_ses_lock); 226 + spin_unlock(&server->srv_lock); 227 227 228 228 spin_lock(&server->req_lock); 229 229 scredits = server->credits; ··· 319 319 { 320 320 __u64 mid; 321 321 /* for SMB2 we need the current value */ 322 - spin_lock(&GlobalMid_Lock); 322 + spin_lock(&server->mid_lock); 323 323 mid = server->CurrentMid++; 324 - spin_unlock(&GlobalMid_Lock); 324 + spin_unlock(&server->mid_lock); 325 325 return mid; 326 326 } 327 327 328 328 static void 329 329 smb2_revert_current_mid(struct TCP_Server_Info *server, const unsigned int val) 330 330 { 331 - spin_lock(&GlobalMid_Lock); 331 + spin_lock(&server->mid_lock); 332 332 if (server->CurrentMid >= val) 333 333 server->CurrentMid -= val; 334 - spin_unlock(&GlobalMid_Lock); 334 + spin_unlock(&server->mid_lock); 335 335 } 336 336 337 337 static struct mid_q_entry * ··· 346 346 return NULL; 347 347 } 348 348 349 - spin_lock(&GlobalMid_Lock); 349 + spin_lock(&server->mid_lock); 350 350 list_for_each_entry(mid, &server->pending_mid_q, qhead) { 351 351 if ((mid->mid == wire_mid) && 352 352 (mid->mid_state == MID_REQUEST_SUBMITTED) && ··· 356 356 list_del_init(&mid->qhead); 357 357 mid->mid_flags |= MID_DELETED; 358 358 } 359 - spin_unlock(&GlobalMid_Lock); 359 + spin_unlock(&server->mid_lock); 360 360 return mid; 361 361 } 362 362 } 363 - spin_unlock(&GlobalMid_Lock); 363 + spin_unlock(&server->mid_lock); 364 364 return NULL; 365 365 } 366 366 ··· 403 403 { 404 404 int rc; 405 405 406 - spin_lock(&GlobalMid_Lock); 406 + spin_lock(&server->mid_lock); 407 407 server->CurrentMid = 0; 408 - spin_unlock(&GlobalMid_Lock); 408 + spin_unlock(&server->mid_lock); 409 409 rc = SMB2_negotiate(xid, ses, server); 410 410 /* BB we probably don't need to retry with modern servers */ 411 411 if (rc == -EAGAIN) ··· 1145 1145 size_t name_len, value_len, user_name_len; 1146 1146 1147 1147 while (src_size > 0) { 1148 - name = &src->ea_data[0]; 1149 1148 name_len = (size_t)src->ea_name_length; 1150 - value = &src->ea_data[src->ea_name_length + 1]; 1151 1149 value_len = (size_t)le16_to_cpu(src->ea_value_length); 1152 1150 1153 1151 if (name_len == 0) ··· 1156 1158 rc = -EIO; 1157 1159 goto out; 1158 1160 } 1161 + 1162 + name = &src->ea_data[0]; 1163 + value = &src->ea_data[src->ea_name_length + 1]; 1159 1164 1160 1165 if (ea_name) { 1161 1166 if (ea_name_len == name_len && ··· 2575 2574 smb2_is_network_name_deleted(char *buf, struct TCP_Server_Info *server) 2576 2575 { 2577 2576 struct smb2_hdr *shdr = (struct smb2_hdr *)buf; 2578 - struct list_head *tmp, *tmp1; 2579 2577 struct cifs_ses *ses; 2580 2578 struct cifs_tcon *tcon; 2581 2579 ··· 2582 2582 return; 2583 2583 2584 2584 spin_lock(&cifs_tcp_ses_lock); 2585 - list_for_each(tmp, &server->smb_ses_list) { 2586 - ses = list_entry(tmp, struct cifs_ses, smb_ses_list); 2587 - list_for_each(tmp1, &ses->tcon_list) { 2588 - tcon = list_entry(tmp1, struct cifs_tcon, tcon_list); 2585 + list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { 2586 + list_for_each_entry(tcon, &ses->tcon_list, tcon_list) { 2589 2587 if (tcon->tid == le32_to_cpu(shdr->Id.SyncId.TreeId)) { 2588 + spin_lock(&tcon->tc_lock); 2590 2589 tcon->need_reconnect = true; 2590 + spin_unlock(&tcon->tc_lock); 2591 2591 spin_unlock(&cifs_tcp_ses_lock); 2592 2592 pr_warn_once("Server share %s deleted.\n", 2593 2593 tcon->treeName); ··· 4563 4563 list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) { 4564 4564 list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { 4565 4565 if (ses->Suid == ses_id) { 4566 + spin_lock(&ses->ses_lock); 4566 4567 ses_enc_key = enc ? ses->smb3encryptionkey : 4567 4568 ses->smb3decryptionkey; 4568 4569 memcpy(key, ses_enc_key, SMB3_ENC_DEC_KEY_SIZE); 4570 + spin_unlock(&ses->ses_lock); 4569 4571 spin_unlock(&cifs_tcp_ses_lock); 4570 4572 return 0; 4571 4573 } ··· 5082 5080 5083 5081 mid->callback(mid); 5084 5082 } else { 5085 - spin_lock(&cifs_tcp_ses_lock); 5086 - spin_lock(&GlobalMid_Lock); 5083 + spin_lock(&dw->server->srv_lock); 5087 5084 if (dw->server->tcpStatus == CifsNeedReconnect) { 5085 + spin_lock(&dw->server->mid_lock); 5088 5086 mid->mid_state = MID_RETRY_NEEDED; 5089 - spin_unlock(&GlobalMid_Lock); 5090 - spin_unlock(&cifs_tcp_ses_lock); 5087 + spin_unlock(&dw->server->mid_lock); 5088 + spin_unlock(&dw->server->srv_lock); 5091 5089 mid->callback(mid); 5092 5090 } else { 5091 + spin_lock(&dw->server->mid_lock); 5093 5092 mid->mid_state = MID_REQUEST_SUBMITTED; 5094 5093 mid->mid_flags &= ~(MID_DELETED); 5095 5094 list_add_tail(&mid->qhead, 5096 5095 &dw->server->pending_mid_q); 5097 - spin_unlock(&GlobalMid_Lock); 5098 - spin_unlock(&cifs_tcp_ses_lock); 5096 + spin_unlock(&dw->server->mid_lock); 5097 + spin_unlock(&dw->server->srv_lock); 5099 5098 } 5100 5099 } 5101 - cifs_mid_q_entry_release(mid); 5100 + release_mid(mid); 5102 5101 } 5103 5102 5104 5103 free_pages:
+16 -16
fs/cifs/smb2pdu.c
··· 162 162 if (smb2_command == SMB2_TREE_CONNECT || smb2_command == SMB2_IOCTL) 163 163 return 0; 164 164 165 - spin_lock(&cifs_tcp_ses_lock); 165 + spin_lock(&tcon->tc_lock); 166 166 if (tcon->status == TID_EXITING) { 167 167 /* 168 168 * only tree disconnect, open, and write, ··· 172 172 if ((smb2_command != SMB2_WRITE) && 173 173 (smb2_command != SMB2_CREATE) && 174 174 (smb2_command != SMB2_TREE_DISCONNECT)) { 175 - spin_unlock(&cifs_tcp_ses_lock); 175 + spin_unlock(&tcon->tc_lock); 176 176 cifs_dbg(FYI, "can not send cmd %d while umounting\n", 177 177 smb2_command); 178 178 return -ENODEV; 179 179 } 180 180 } 181 - spin_unlock(&cifs_tcp_ses_lock); 181 + spin_unlock(&tcon->tc_lock); 182 182 if ((!tcon->ses) || (tcon->ses->ses_status == SES_EXITING) || 183 183 (!tcon->ses->server) || !server) 184 184 return -EIO; ··· 217 217 } 218 218 219 219 /* are we still trying to reconnect? */ 220 - spin_lock(&cifs_tcp_ses_lock); 220 + spin_lock(&server->srv_lock); 221 221 if (server->tcpStatus != CifsNeedReconnect) { 222 - spin_unlock(&cifs_tcp_ses_lock); 222 + spin_unlock(&server->srv_lock); 223 223 break; 224 224 } 225 - spin_unlock(&cifs_tcp_ses_lock); 225 + spin_unlock(&server->srv_lock); 226 226 227 227 if (retries && --retries) 228 228 continue; ··· 256 256 * and the server never sends an answer the socket will be closed 257 257 * and tcpStatus set to reconnect. 258 258 */ 259 - spin_lock(&cifs_tcp_ses_lock); 259 + spin_lock(&server->srv_lock); 260 260 if (server->tcpStatus == CifsNeedReconnect) { 261 - spin_unlock(&cifs_tcp_ses_lock); 261 + spin_unlock(&server->srv_lock); 262 262 rc = -EHOSTDOWN; 263 263 goto out; 264 264 } 265 - spin_unlock(&cifs_tcp_ses_lock); 265 + spin_unlock(&server->srv_lock); 266 266 267 267 /* 268 268 * need to prevent multiple threads trying to simultaneously ··· 354 354 void *buf, 355 355 unsigned int *total_len) 356 356 { 357 - struct smb2_pdu *spdu = (struct smb2_pdu *)buf; 357 + struct smb2_pdu *spdu = buf; 358 358 /* lookup word count ie StructureSize from table */ 359 359 __u16 parmsize = smb2_req_struct_sizes[le16_to_cpu(smb2_command)]; 360 360 ··· 3776 3776 credits.instance = server->reconnect_instance; 3777 3777 } 3778 3778 3779 - DeleteMidQEntry(mid); 3779 + release_mid(mid); 3780 3780 add_credits(server, &credits, CIFS_ECHO_OP); 3781 3781 } 3782 3782 ··· 3911 3911 3912 3912 cifs_dbg(FYI, "In echo request for conn_id %lld\n", server->conn_id); 3913 3913 3914 - spin_lock(&cifs_tcp_ses_lock); 3914 + spin_lock(&server->srv_lock); 3915 3915 if (server->ops->need_neg && 3916 3916 server->ops->need_neg(server)) { 3917 - spin_unlock(&cifs_tcp_ses_lock); 3917 + spin_unlock(&server->srv_lock); 3918 3918 /* No need to send echo on newly established connections */ 3919 3919 mod_delayed_work(cifsiod_wq, &server->reconnect, 0); 3920 3920 return rc; 3921 3921 } 3922 - spin_unlock(&cifs_tcp_ses_lock); 3922 + spin_unlock(&server->srv_lock); 3923 3923 3924 3924 rc = smb2_plain_req_init(SMB2_ECHO, NULL, server, 3925 3925 (void **)&req, &total_len); ··· 4201 4201 rdata->offset, rdata->got_bytes); 4202 4202 4203 4203 queue_work(cifsiod_wq, &rdata->work); 4204 - DeleteMidQEntry(mid); 4204 + release_mid(mid); 4205 4205 add_credits(server, &credits, 0); 4206 4206 } 4207 4207 ··· 4440 4440 wdata->offset, wdata->bytes); 4441 4441 4442 4442 queue_work(cifsiod_wq, &wdata->work); 4443 - DeleteMidQEntry(mid); 4443 + release_mid(mid); 4444 4444 add_credits(server, &credits, 0); 4445 4445 } 4446 4446
+20 -18
fs/cifs/smb2transport.c
··· 640 640 641 641 if (!is_signed) 642 642 return 0; 643 - spin_lock(&cifs_tcp_ses_lock); 643 + spin_lock(&server->srv_lock); 644 644 if (server->ops->need_neg && 645 645 server->ops->need_neg(server)) { 646 - spin_unlock(&cifs_tcp_ses_lock); 646 + spin_unlock(&server->srv_lock); 647 647 return 0; 648 648 } 649 - spin_unlock(&cifs_tcp_ses_lock); 649 + spin_unlock(&server->srv_lock); 650 650 if (!is_binding && !server->session_estab) { 651 651 strncpy(shdr->Signature, "BSRSPYL", 8); 652 652 return 0; ··· 750 750 temp->callback = cifs_wake_up_task; 751 751 temp->callback_data = current; 752 752 753 - atomic_inc(&midCount); 753 + atomic_inc(&mid_count); 754 754 temp->mid_state = MID_REQUEST_ALLOCATED; 755 755 trace_smb3_cmd_enter(le32_to_cpu(shdr->Id.SyncId.TreeId), 756 756 le64_to_cpu(shdr->SessionId), ··· 762 762 smb2_get_mid_entry(struct cifs_ses *ses, struct TCP_Server_Info *server, 763 763 struct smb2_hdr *shdr, struct mid_q_entry **mid) 764 764 { 765 - spin_lock(&cifs_tcp_ses_lock); 765 + spin_lock(&server->srv_lock); 766 766 if (server->tcpStatus == CifsExiting) { 767 - spin_unlock(&cifs_tcp_ses_lock); 767 + spin_unlock(&server->srv_lock); 768 768 return -ENOENT; 769 769 } 770 770 771 771 if (server->tcpStatus == CifsNeedReconnect) { 772 - spin_unlock(&cifs_tcp_ses_lock); 772 + spin_unlock(&server->srv_lock); 773 773 cifs_dbg(FYI, "tcp session dead - return to caller to retry\n"); 774 774 return -EAGAIN; 775 775 } 776 776 777 777 if (server->tcpStatus == CifsNeedNegotiate && 778 778 shdr->Command != SMB2_NEGOTIATE) { 779 - spin_unlock(&cifs_tcp_ses_lock); 779 + spin_unlock(&server->srv_lock); 780 780 return -EAGAIN; 781 781 } 782 + spin_unlock(&server->srv_lock); 782 783 784 + spin_lock(&ses->ses_lock); 783 785 if (ses->ses_status == SES_NEW) { 784 786 if ((shdr->Command != SMB2_SESSION_SETUP) && 785 787 (shdr->Command != SMB2_NEGOTIATE)) { 786 - spin_unlock(&cifs_tcp_ses_lock); 788 + spin_unlock(&ses->ses_lock); 787 789 return -EAGAIN; 788 790 } 789 791 /* else ok - we are setting up session */ ··· 793 791 794 792 if (ses->ses_status == SES_EXITING) { 795 793 if (shdr->Command != SMB2_LOGOFF) { 796 - spin_unlock(&cifs_tcp_ses_lock); 794 + spin_unlock(&ses->ses_lock); 797 795 return -EAGAIN; 798 796 } 799 797 /* else ok - we are shutting down the session */ 800 798 } 801 - spin_unlock(&cifs_tcp_ses_lock); 799 + spin_unlock(&ses->ses_lock); 802 800 803 801 *mid = smb2_mid_entry_alloc(shdr, server); 804 802 if (*mid == NULL) 805 803 return -ENOMEM; 806 - spin_lock(&GlobalMid_Lock); 804 + spin_lock(&server->mid_lock); 807 805 list_add_tail(&(*mid)->qhead, &server->pending_mid_q); 808 - spin_unlock(&GlobalMid_Lock); 806 + spin_unlock(&server->mid_lock); 809 807 810 808 return 0; 811 809 } ··· 856 854 rc = smb2_sign_rqst(rqst, server); 857 855 if (rc) { 858 856 revert_current_mid_from_hdr(server, shdr); 859 - cifs_delete_mid(mid); 857 + delete_mid(mid); 860 858 return ERR_PTR(rc); 861 859 } 862 860 ··· 871 869 (struct smb2_hdr *)rqst->rq_iov[0].iov_base; 872 870 struct mid_q_entry *mid; 873 871 874 - spin_lock(&cifs_tcp_ses_lock); 872 + spin_lock(&server->srv_lock); 875 873 if (server->tcpStatus == CifsNeedNegotiate && 876 874 shdr->Command != SMB2_NEGOTIATE) { 877 - spin_unlock(&cifs_tcp_ses_lock); 875 + spin_unlock(&server->srv_lock); 878 876 return ERR_PTR(-EAGAIN); 879 877 } 880 - spin_unlock(&cifs_tcp_ses_lock); 878 + spin_unlock(&server->srv_lock); 881 879 882 880 smb2_seq_num_into_buf(server, shdr); 883 881 ··· 890 888 rc = smb2_sign_rqst(rqst, server); 891 889 if (rc) { 892 890 revert_current_mid_from_hdr(server, shdr); 893 - DeleteMidQEntry(mid); 891 + release_mid(mid); 894 892 return ERR_PTR(rc); 895 893 } 896 894
+255 -79
fs/cifs/transport.c
··· 21 21 #include <asm/processor.h> 22 22 #include <linux/mempool.h> 23 23 #include <linux/sched/signal.h> 24 + #include <linux/task_io_accounting_ops.h> 24 25 #include "cifspdu.h" 25 26 #include "cifsglob.h" 26 27 #include "cifsproto.h" ··· 38 37 wake_up_process(mid->callback_data); 39 38 } 40 39 41 - struct mid_q_entry * 42 - AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server) 40 + static struct mid_q_entry * 41 + alloc_mid(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server) 43 42 { 44 43 struct mid_q_entry *temp; 45 44 46 45 if (server == NULL) { 47 - cifs_dbg(VFS, "Null TCP session in AllocMidQEntry\n"); 46 + cifs_dbg(VFS, "%s: null TCP session\n", __func__); 48 47 return NULL; 49 48 } 50 49 ··· 69 68 temp->callback = cifs_wake_up_task; 70 69 temp->callback_data = current; 71 70 72 - atomic_inc(&midCount); 71 + atomic_inc(&mid_count); 73 72 temp->mid_state = MID_REQUEST_ALLOCATED; 74 73 return temp; 75 74 } 76 75 77 - static void _cifs_mid_q_entry_release(struct kref *refcount) 76 + static void __release_mid(struct kref *refcount) 78 77 { 79 78 struct mid_q_entry *midEntry = 80 79 container_of(refcount, struct mid_q_entry, refcount); ··· 92 91 server->ops->handle_cancelled_mid(midEntry, server); 93 92 94 93 midEntry->mid_state = MID_FREE; 95 - atomic_dec(&midCount); 94 + atomic_dec(&mid_count); 96 95 if (midEntry->large_buf) 97 96 cifs_buf_release(midEntry->resp_buf); 98 97 else ··· 153 152 mempool_free(midEntry, cifs_mid_poolp); 154 153 } 155 154 156 - void cifs_mid_q_entry_release(struct mid_q_entry *midEntry) 155 + void release_mid(struct mid_q_entry *mid) 157 156 { 158 - spin_lock(&GlobalMid_Lock); 159 - kref_put(&midEntry->refcount, _cifs_mid_q_entry_release); 160 - spin_unlock(&GlobalMid_Lock); 161 - } 157 + struct TCP_Server_Info *server = mid->server; 162 158 163 - void DeleteMidQEntry(struct mid_q_entry *midEntry) 164 - { 165 - cifs_mid_q_entry_release(midEntry); 159 + spin_lock(&server->mid_lock); 160 + kref_put(&mid->refcount, __release_mid); 161 + spin_unlock(&server->mid_lock); 166 162 } 167 163 168 164 void 169 - cifs_delete_mid(struct mid_q_entry *mid) 165 + delete_mid(struct mid_q_entry *mid) 170 166 { 171 - spin_lock(&GlobalMid_Lock); 167 + spin_lock(&mid->server->mid_lock); 172 168 if (!(mid->mid_flags & MID_DELETED)) { 173 169 list_del_init(&mid->qhead); 174 170 mid->mid_flags |= MID_DELETED; 175 171 } 176 - spin_unlock(&GlobalMid_Lock); 172 + spin_unlock(&mid->server->mid_lock); 177 173 178 - DeleteMidQEntry(mid); 174 + release_mid(mid); 179 175 } 180 176 181 177 /* ··· 575 577 } else { 576 578 spin_unlock(&server->req_lock); 577 579 578 - spin_lock(&cifs_tcp_ses_lock); 580 + spin_lock(&server->srv_lock); 579 581 if (server->tcpStatus == CifsExiting) { 580 - spin_unlock(&cifs_tcp_ses_lock); 582 + spin_unlock(&server->srv_lock); 581 583 return -ENOENT; 582 584 } 583 - spin_unlock(&cifs_tcp_ses_lock); 585 + spin_unlock(&server->srv_lock); 584 586 585 587 /* 586 588 * For normal commands, reserve the last MAX_COMPOUND ··· 723 725 static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf, 724 726 struct mid_q_entry **ppmidQ) 725 727 { 726 - spin_lock(&cifs_tcp_ses_lock); 728 + spin_lock(&ses->ses_lock); 727 729 if (ses->ses_status == SES_NEW) { 728 730 if ((in_buf->Command != SMB_COM_SESSION_SETUP_ANDX) && 729 731 (in_buf->Command != SMB_COM_NEGOTIATE)) { 730 - spin_unlock(&cifs_tcp_ses_lock); 732 + spin_unlock(&ses->ses_lock); 731 733 return -EAGAIN; 732 734 } 733 735 /* else ok - we are setting up session */ ··· 736 738 if (ses->ses_status == SES_EXITING) { 737 739 /* check if SMB session is bad because we are setting it up */ 738 740 if (in_buf->Command != SMB_COM_LOGOFF_ANDX) { 739 - spin_unlock(&cifs_tcp_ses_lock); 741 + spin_unlock(&ses->ses_lock); 740 742 return -EAGAIN; 741 743 } 742 744 /* else ok - we are shutting down session */ 743 745 } 744 - spin_unlock(&cifs_tcp_ses_lock); 746 + spin_unlock(&ses->ses_lock); 745 747 746 - *ppmidQ = AllocMidQEntry(in_buf, ses->server); 748 + *ppmidQ = alloc_mid(in_buf, ses->server); 747 749 if (*ppmidQ == NULL) 748 750 return -ENOMEM; 749 - spin_lock(&GlobalMid_Lock); 751 + spin_lock(&ses->server->mid_lock); 750 752 list_add_tail(&(*ppmidQ)->qhead, &ses->server->pending_mid_q); 751 - spin_unlock(&GlobalMid_Lock); 753 + spin_unlock(&ses->server->mid_lock); 752 754 return 0; 753 755 } 754 756 ··· 780 782 if (server->sign) 781 783 hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE; 782 784 783 - mid = AllocMidQEntry(hdr, server); 785 + mid = alloc_mid(hdr, server); 784 786 if (mid == NULL) 785 787 return ERR_PTR(-ENOMEM); 786 788 787 789 rc = cifs_sign_rqst(rqst, server, &mid->sequence_number); 788 790 if (rc) { 789 - DeleteMidQEntry(mid); 791 + release_mid(mid); 790 792 return ERR_PTR(rc); 791 793 } 792 794 ··· 847 849 mid->mid_state = MID_REQUEST_SUBMITTED; 848 850 849 851 /* put it on the pending_mid_q */ 850 - spin_lock(&GlobalMid_Lock); 852 + spin_lock(&server->mid_lock); 851 853 list_add_tail(&mid->qhead, &server->pending_mid_q); 852 - spin_unlock(&GlobalMid_Lock); 854 + spin_unlock(&server->mid_lock); 853 855 854 856 /* 855 857 * Need to store the time in mid before calling I/O. For call_async, ··· 863 865 if (rc < 0) { 864 866 revert_current_mid(server, mid->credits); 865 867 server->sequence_number -= 2; 866 - cifs_delete_mid(mid); 868 + delete_mid(mid); 867 869 } 868 870 869 871 cifs_server_unlock(server); ··· 910 912 cifs_dbg(FYI, "%s: cmd=%d mid=%llu state=%d\n", 911 913 __func__, le16_to_cpu(mid->command), mid->mid, mid->mid_state); 912 914 913 - spin_lock(&GlobalMid_Lock); 915 + spin_lock(&server->mid_lock); 914 916 switch (mid->mid_state) { 915 917 case MID_RESPONSE_RECEIVED: 916 - spin_unlock(&GlobalMid_Lock); 918 + spin_unlock(&server->mid_lock); 917 919 return rc; 918 920 case MID_RETRY_NEEDED: 919 921 rc = -EAGAIN; ··· 933 935 __func__, mid->mid, mid->mid_state); 934 936 rc = -EIO; 935 937 } 936 - spin_unlock(&GlobalMid_Lock); 938 + spin_unlock(&server->mid_lock); 937 939 938 - DeleteMidQEntry(mid); 940 + release_mid(mid); 939 941 return rc; 940 942 } 941 943 ··· 995 997 return ERR_PTR(rc); 996 998 rc = cifs_sign_rqst(rqst, ses->server, &mid->sequence_number); 997 999 if (rc) { 998 - cifs_delete_mid(mid); 1000 + delete_mid(mid); 999 1001 return ERR_PTR(rc); 1000 1002 } 1001 1003 return mid; ··· 1024 1026 cifs_cancelled_callback(struct mid_q_entry *mid) 1025 1027 { 1026 1028 cifs_compound_callback(mid); 1027 - DeleteMidQEntry(mid); 1029 + release_mid(mid); 1028 1030 } 1029 1031 1030 1032 /* ··· 1076 1078 return -EIO; 1077 1079 } 1078 1080 1079 - spin_lock(&cifs_tcp_ses_lock); 1081 + spin_lock(&server->srv_lock); 1080 1082 if (server->tcpStatus == CifsExiting) { 1081 - spin_unlock(&cifs_tcp_ses_lock); 1083 + spin_unlock(&server->srv_lock); 1082 1084 return -ENOENT; 1083 1085 } 1084 - spin_unlock(&cifs_tcp_ses_lock); 1086 + spin_unlock(&server->srv_lock); 1085 1087 1086 1088 /* 1087 1089 * Wait for all the requests to become available. ··· 1128 1130 if (IS_ERR(midQ[i])) { 1129 1131 revert_current_mid(server, i); 1130 1132 for (j = 0; j < i; j++) 1131 - cifs_delete_mid(midQ[j]); 1133 + delete_mid(midQ[j]); 1132 1134 cifs_server_unlock(server); 1133 1135 1134 1136 /* Update # of requests on wire to server */ ··· 1184 1186 /* 1185 1187 * Compounding is never used during session establish. 1186 1188 */ 1187 - spin_lock(&cifs_tcp_ses_lock); 1189 + spin_lock(&ses->ses_lock); 1188 1190 if ((ses->ses_status == SES_NEW) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) { 1189 - spin_unlock(&cifs_tcp_ses_lock); 1191 + spin_unlock(&ses->ses_lock); 1190 1192 1191 1193 cifs_server_lock(server); 1192 1194 smb311_update_preauth_hash(ses, server, rqst[0].rq_iov, rqst[0].rq_nvec); 1193 1195 cifs_server_unlock(server); 1194 1196 1195 - spin_lock(&cifs_tcp_ses_lock); 1197 + spin_lock(&ses->ses_lock); 1196 1198 } 1197 - spin_unlock(&cifs_tcp_ses_lock); 1199 + spin_unlock(&ses->ses_lock); 1198 1200 1199 1201 for (i = 0; i < num_rqst; i++) { 1200 1202 rc = wait_for_response(server, midQ[i]); ··· 1206 1208 cifs_server_dbg(FYI, "Cancelling wait for mid %llu cmd: %d\n", 1207 1209 midQ[i]->mid, le16_to_cpu(midQ[i]->command)); 1208 1210 send_cancel(server, &rqst[i], midQ[i]); 1209 - spin_lock(&GlobalMid_Lock); 1211 + spin_lock(&server->mid_lock); 1210 1212 midQ[i]->mid_flags |= MID_WAIT_CANCELLED; 1211 1213 if (midQ[i]->mid_state == MID_REQUEST_SUBMITTED) { 1212 1214 midQ[i]->callback = cifs_cancelled_callback; 1213 1215 cancelled_mid[i] = true; 1214 1216 credits[i].value = 0; 1215 1217 } 1216 - spin_unlock(&GlobalMid_Lock); 1218 + spin_unlock(&server->mid_lock); 1217 1219 } 1218 1220 } 1219 1221 ··· 1248 1250 rc = server->ops->check_receive(midQ[i], server, 1249 1251 flags & CIFS_LOG_ERROR); 1250 1252 1251 - /* mark it so buf will not be freed by cifs_delete_mid */ 1253 + /* mark it so buf will not be freed by delete_mid */ 1252 1254 if ((flags & CIFS_NO_RSP_BUF) == 0) 1253 1255 midQ[i]->resp_buf = NULL; 1254 1256 ··· 1257 1259 /* 1258 1260 * Compounding is never used during session establish. 1259 1261 */ 1260 - spin_lock(&cifs_tcp_ses_lock); 1262 + spin_lock(&ses->ses_lock); 1261 1263 if ((ses->ses_status == SES_NEW) || (optype & CIFS_NEG_OP) || (optype & CIFS_SESS_OP)) { 1262 1264 struct kvec iov = { 1263 1265 .iov_base = resp_iov[0].iov_base, 1264 1266 .iov_len = resp_iov[0].iov_len 1265 1267 }; 1266 - spin_unlock(&cifs_tcp_ses_lock); 1268 + spin_unlock(&ses->ses_lock); 1267 1269 cifs_server_lock(server); 1268 1270 smb311_update_preauth_hash(ses, server, &iov, 1); 1269 1271 cifs_server_unlock(server); 1270 - spin_lock(&cifs_tcp_ses_lock); 1272 + spin_lock(&ses->ses_lock); 1271 1273 } 1272 - spin_unlock(&cifs_tcp_ses_lock); 1274 + spin_unlock(&ses->ses_lock); 1273 1275 1274 1276 out: 1275 1277 /* ··· 1280 1282 */ 1281 1283 for (i = 0; i < num_rqst; i++) { 1282 1284 if (!cancelled_mid[i]) 1283 - cifs_delete_mid(midQ[i]); 1285 + delete_mid(midQ[i]); 1284 1286 } 1285 1287 1286 1288 return rc; ··· 1358 1360 return -EIO; 1359 1361 } 1360 1362 1361 - spin_lock(&cifs_tcp_ses_lock); 1363 + spin_lock(&server->srv_lock); 1362 1364 if (server->tcpStatus == CifsExiting) { 1363 - spin_unlock(&cifs_tcp_ses_lock); 1365 + spin_unlock(&server->srv_lock); 1364 1366 return -ENOENT; 1365 1367 } 1366 - spin_unlock(&cifs_tcp_ses_lock); 1368 + spin_unlock(&server->srv_lock); 1367 1369 1368 1370 /* Ensure that we do not send more than 50 overlapping requests 1369 1371 to the same server. We may make this configurable later or ··· 1417 1419 rc = wait_for_response(server, midQ); 1418 1420 if (rc != 0) { 1419 1421 send_cancel(server, &rqst, midQ); 1420 - spin_lock(&GlobalMid_Lock); 1422 + spin_lock(&server->mid_lock); 1421 1423 if (midQ->mid_state == MID_REQUEST_SUBMITTED) { 1422 1424 /* no longer considered to be "in-flight" */ 1423 - midQ->callback = DeleteMidQEntry; 1424 - spin_unlock(&GlobalMid_Lock); 1425 + midQ->callback = release_mid; 1426 + spin_unlock(&server->mid_lock); 1425 1427 add_credits(server, &credits, 0); 1426 1428 return rc; 1427 1429 } 1428 - spin_unlock(&GlobalMid_Lock); 1430 + spin_unlock(&server->mid_lock); 1429 1431 } 1430 1432 1431 1433 rc = cifs_sync_mid_result(midQ, server); ··· 1445 1447 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4); 1446 1448 rc = cifs_check_receive(midQ, server, 0); 1447 1449 out: 1448 - cifs_delete_mid(midQ); 1450 + delete_mid(midQ); 1449 1451 add_credits(server, &credits, 0); 1450 1452 1451 1453 return rc; ··· 1503 1505 return -EIO; 1504 1506 } 1505 1507 1506 - spin_lock(&cifs_tcp_ses_lock); 1508 + spin_lock(&server->srv_lock); 1507 1509 if (server->tcpStatus == CifsExiting) { 1508 - spin_unlock(&cifs_tcp_ses_lock); 1510 + spin_unlock(&server->srv_lock); 1509 1511 return -ENOENT; 1510 1512 } 1511 - spin_unlock(&cifs_tcp_ses_lock); 1513 + spin_unlock(&server->srv_lock); 1512 1514 1513 1515 /* Ensure that we do not send more than 50 overlapping requests 1514 1516 to the same server. We may make this configurable later or ··· 1538 1540 1539 1541 rc = cifs_sign_smb(in_buf, server, &midQ->sequence_number); 1540 1542 if (rc) { 1541 - cifs_delete_mid(midQ); 1543 + delete_mid(midQ); 1542 1544 cifs_server_unlock(server); 1543 1545 return rc; 1544 1546 } ··· 1555 1557 cifs_server_unlock(server); 1556 1558 1557 1559 if (rc < 0) { 1558 - cifs_delete_mid(midQ); 1560 + delete_mid(midQ); 1559 1561 return rc; 1560 1562 } 1561 1563 ··· 1566 1568 (server->tcpStatus != CifsNew))); 1567 1569 1568 1570 /* Were we interrupted by a signal ? */ 1569 - spin_lock(&cifs_tcp_ses_lock); 1571 + spin_lock(&server->srv_lock); 1570 1572 if ((rc == -ERESTARTSYS) && 1571 1573 (midQ->mid_state == MID_REQUEST_SUBMITTED) && 1572 1574 ((server->tcpStatus == CifsGood) || 1573 1575 (server->tcpStatus == CifsNew))) { 1574 - spin_unlock(&cifs_tcp_ses_lock); 1576 + spin_unlock(&server->srv_lock); 1575 1577 1576 1578 if (in_buf->Command == SMB_COM_TRANSACTION2) { 1577 1579 /* POSIX lock. We send a NT_CANCEL SMB to cause the 1578 1580 blocking lock to return. */ 1579 1581 rc = send_cancel(server, &rqst, midQ); 1580 1582 if (rc) { 1581 - cifs_delete_mid(midQ); 1583 + delete_mid(midQ); 1582 1584 return rc; 1583 1585 } 1584 1586 } else { ··· 1590 1592 /* If we get -ENOLCK back the lock may have 1591 1593 already been removed. Don't exit in this case. */ 1592 1594 if (rc && rc != -ENOLCK) { 1593 - cifs_delete_mid(midQ); 1595 + delete_mid(midQ); 1594 1596 return rc; 1595 1597 } 1596 1598 } ··· 1598 1600 rc = wait_for_response(server, midQ); 1599 1601 if (rc) { 1600 1602 send_cancel(server, &rqst, midQ); 1601 - spin_lock(&GlobalMid_Lock); 1603 + spin_lock(&server->mid_lock); 1602 1604 if (midQ->mid_state == MID_REQUEST_SUBMITTED) { 1603 1605 /* no longer considered to be "in-flight" */ 1604 - midQ->callback = DeleteMidQEntry; 1605 - spin_unlock(&GlobalMid_Lock); 1606 + midQ->callback = release_mid; 1607 + spin_unlock(&server->mid_lock); 1606 1608 return rc; 1607 1609 } 1608 - spin_unlock(&GlobalMid_Lock); 1610 + spin_unlock(&server->mid_lock); 1609 1611 } 1610 1612 1611 1613 /* We got the response - restart system call. */ 1612 1614 rstart = 1; 1613 - spin_lock(&cifs_tcp_ses_lock); 1615 + spin_lock(&server->srv_lock); 1614 1616 } 1615 - spin_unlock(&cifs_tcp_ses_lock); 1617 + spin_unlock(&server->srv_lock); 1616 1618 1617 1619 rc = cifs_sync_mid_result(midQ, server); 1618 1620 if (rc != 0) ··· 1629 1631 memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4); 1630 1632 rc = cifs_check_receive(midQ, server, 0); 1631 1633 out: 1632 - cifs_delete_mid(midQ); 1634 + delete_mid(midQ); 1633 1635 if (rstart && rc == -EACCES) 1634 1636 return -ERESTARTSYS; 1635 1637 return rc; 1638 + } 1639 + 1640 + /* 1641 + * Discard any remaining data in the current SMB. To do this, we borrow the 1642 + * current bigbuf. 1643 + */ 1644 + int 1645 + cifs_discard_remaining_data(struct TCP_Server_Info *server) 1646 + { 1647 + unsigned int rfclen = server->pdu_size; 1648 + int remaining = rfclen + server->vals->header_preamble_size - 1649 + server->total_read; 1650 + 1651 + while (remaining > 0) { 1652 + int length; 1653 + 1654 + length = cifs_discard_from_socket(server, 1655 + min_t(size_t, remaining, 1656 + CIFSMaxBufSize + MAX_HEADER_SIZE(server))); 1657 + if (length < 0) 1658 + return length; 1659 + server->total_read += length; 1660 + remaining -= length; 1661 + } 1662 + 1663 + return 0; 1664 + } 1665 + 1666 + static int 1667 + __cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid, 1668 + bool malformed) 1669 + { 1670 + int length; 1671 + 1672 + length = cifs_discard_remaining_data(server); 1673 + dequeue_mid(mid, malformed); 1674 + mid->resp_buf = server->smallbuf; 1675 + server->smallbuf = NULL; 1676 + return length; 1677 + } 1678 + 1679 + static int 1680 + cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid) 1681 + { 1682 + struct cifs_readdata *rdata = mid->callback_data; 1683 + 1684 + return __cifs_readv_discard(server, mid, rdata->result); 1685 + } 1686 + 1687 + int 1688 + cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid) 1689 + { 1690 + int length, len; 1691 + unsigned int data_offset, data_len; 1692 + struct cifs_readdata *rdata = mid->callback_data; 1693 + char *buf = server->smallbuf; 1694 + unsigned int buflen = server->pdu_size + 1695 + server->vals->header_preamble_size; 1696 + bool use_rdma_mr = false; 1697 + 1698 + cifs_dbg(FYI, "%s: mid=%llu offset=%llu bytes=%u\n", 1699 + __func__, mid->mid, rdata->offset, rdata->bytes); 1700 + 1701 + /* 1702 + * read the rest of READ_RSP header (sans Data array), or whatever we 1703 + * can if there's not enough data. At this point, we've read down to 1704 + * the Mid. 1705 + */ 1706 + len = min_t(unsigned int, buflen, server->vals->read_rsp_size) - 1707 + HEADER_SIZE(server) + 1; 1708 + 1709 + length = cifs_read_from_socket(server, 1710 + buf + HEADER_SIZE(server) - 1, len); 1711 + if (length < 0) 1712 + return length; 1713 + server->total_read += length; 1714 + 1715 + if (server->ops->is_session_expired && 1716 + server->ops->is_session_expired(buf)) { 1717 + cifs_reconnect(server, true); 1718 + return -1; 1719 + } 1720 + 1721 + if (server->ops->is_status_pending && 1722 + server->ops->is_status_pending(buf, server)) { 1723 + cifs_discard_remaining_data(server); 1724 + return -1; 1725 + } 1726 + 1727 + /* set up first two iov for signature check and to get credits */ 1728 + rdata->iov[0].iov_base = buf; 1729 + rdata->iov[0].iov_len = server->vals->header_preamble_size; 1730 + rdata->iov[1].iov_base = buf + server->vals->header_preamble_size; 1731 + rdata->iov[1].iov_len = 1732 + server->total_read - server->vals->header_preamble_size; 1733 + cifs_dbg(FYI, "0: iov_base=%p iov_len=%zu\n", 1734 + rdata->iov[0].iov_base, rdata->iov[0].iov_len); 1735 + cifs_dbg(FYI, "1: iov_base=%p iov_len=%zu\n", 1736 + rdata->iov[1].iov_base, rdata->iov[1].iov_len); 1737 + 1738 + /* Was the SMB read successful? */ 1739 + rdata->result = server->ops->map_error(buf, false); 1740 + if (rdata->result != 0) { 1741 + cifs_dbg(FYI, "%s: server returned error %d\n", 1742 + __func__, rdata->result); 1743 + /* normal error on read response */ 1744 + return __cifs_readv_discard(server, mid, false); 1745 + } 1746 + 1747 + /* Is there enough to get to the rest of the READ_RSP header? */ 1748 + if (server->total_read < server->vals->read_rsp_size) { 1749 + cifs_dbg(FYI, "%s: server returned short header. got=%u expected=%zu\n", 1750 + __func__, server->total_read, 1751 + server->vals->read_rsp_size); 1752 + rdata->result = -EIO; 1753 + return cifs_readv_discard(server, mid); 1754 + } 1755 + 1756 + data_offset = server->ops->read_data_offset(buf) + 1757 + server->vals->header_preamble_size; 1758 + if (data_offset < server->total_read) { 1759 + /* 1760 + * win2k8 sometimes sends an offset of 0 when the read 1761 + * is beyond the EOF. Treat it as if the data starts just after 1762 + * the header. 1763 + */ 1764 + cifs_dbg(FYI, "%s: data offset (%u) inside read response header\n", 1765 + __func__, data_offset); 1766 + data_offset = server->total_read; 1767 + } else if (data_offset > MAX_CIFS_SMALL_BUFFER_SIZE) { 1768 + /* data_offset is beyond the end of smallbuf */ 1769 + cifs_dbg(FYI, "%s: data offset (%u) beyond end of smallbuf\n", 1770 + __func__, data_offset); 1771 + rdata->result = -EIO; 1772 + return cifs_readv_discard(server, mid); 1773 + } 1774 + 1775 + cifs_dbg(FYI, "%s: total_read=%u data_offset=%u\n", 1776 + __func__, server->total_read, data_offset); 1777 + 1778 + len = data_offset - server->total_read; 1779 + if (len > 0) { 1780 + /* read any junk before data into the rest of smallbuf */ 1781 + length = cifs_read_from_socket(server, 1782 + buf + server->total_read, len); 1783 + if (length < 0) 1784 + return length; 1785 + server->total_read += length; 1786 + } 1787 + 1788 + /* how much data is in the response? */ 1789 + #ifdef CONFIG_CIFS_SMB_DIRECT 1790 + use_rdma_mr = rdata->mr; 1791 + #endif 1792 + data_len = server->ops->read_data_length(buf, use_rdma_mr); 1793 + if (!use_rdma_mr && (data_offset + data_len > buflen)) { 1794 + /* data_len is corrupt -- discard frame */ 1795 + rdata->result = -EIO; 1796 + return cifs_readv_discard(server, mid); 1797 + } 1798 + 1799 + length = rdata->read_into_pages(server, rdata, data_len); 1800 + if (length < 0) 1801 + return length; 1802 + 1803 + server->total_read += length; 1804 + 1805 + cifs_dbg(FYI, "total_read=%u buflen=%u remaining=%u\n", 1806 + server->total_read, buflen, data_len); 1807 + 1808 + /* discard anything left over */ 1809 + if (server->total_read < buflen) 1810 + return cifs_readv_discard(server, mid); 1811 + 1812 + dequeue_mid(mid, false); 1813 + mid->resp_buf = server->smallbuf; 1814 + server->smallbuf = NULL; 1815 + return length; 1636 1816 }
+4 -1
fs/cifs/xattr.c
··· 201 201 break; 202 202 } 203 203 204 + #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 204 205 case XATTR_ACL_ACCESS: 205 206 #ifdef CONFIG_CIFS_POSIX 206 207 if (!value) ··· 225 224 cifs_remap(cifs_sb)); 226 225 #endif /* CONFIG_CIFS_POSIX */ 227 226 break; 227 + #endif /* CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 228 228 } 229 229 230 230 out: ··· 366 364 } 367 365 break; 368 366 } 369 - 367 + #ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY 370 368 case XATTR_ACL_ACCESS: 371 369 #ifdef CONFIG_CIFS_POSIX 372 370 if (sb->s_flags & SB_POSIXACL) ··· 386 384 cifs_remap(cifs_sb)); 387 385 #endif /* CONFIG_CIFS_POSIX */ 388 386 break; 387 + #endif /* ifdef CONFIG_CIFS_ALLOW_INSECURE_LEGACY */ 389 388 } 390 389 391 390 /* We could add an additional check for streams ie