Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

cifs: Fix use after free of a mid_q_entry

With protocol version 2.0 mounts we have seen crashes with corrupt mid
entries. Either the server->pending_mid_q list becomes corrupt with a
cyclic reference in one element or a mid object fetched by the
demultiplexer thread becomes overwritten during use.

Code review identified a race between the demultiplexer thread and the
request issuing thread. The demultiplexer thread seems to be written
with the assumption that it is the sole user of the mid object until
it calls the mid callback which either wakes the issuer task or
deletes the mid.

This assumption is not true because the issuer task can be woken up
earlier by a signal. If the demultiplexer thread has proceeded as far
as setting the mid_state to MID_RESPONSE_RECEIVED then the issuer
thread will happily end up calling cifs_delete_mid while the
demultiplexer thread still is using the mid object.

Inserting a delay in the cifs demultiplexer thread widens the race
window and makes reproduction of the race very easy:

if (server->large_buf)
buf = server->bigbuf;

+ usleep_range(500, 4000);

server->lstrp = jiffies;

To resolve this I think the proper solution involves putting a
reference count on the mid object. This patch makes sure that the
demultiplexer thread holds a reference until it has finished
processing the transaction.

Cc: stable@vger.kernel.org
Signed-off-by: Lars Persson <larper@axis.com>
Acked-by: Paulo Alcantara <palcantara@suse.de>
Reviewed-by: Ronnie Sahlberg <lsahlber@redhat.com>
Reviewed-by: Pavel Shilovsky <pshilov@microsoft.com>
Signed-off-by: Steve French <stfrench@microsoft.com>

authored by

Lars Persson and committed by
Steve French
696e420b 06c85639

+29 -2
+1
fs/cifs/cifsglob.h
··· 1416 1416 /* one of these for every pending CIFS request to the server */ 1417 1417 struct mid_q_entry { 1418 1418 struct list_head qhead; /* mids waiting on reply from this server */ 1419 + struct kref refcount; 1419 1420 struct TCP_Server_Info *server; /* server corresponding to this mid */ 1420 1421 __u64 mid; /* multiplex id */ 1421 1422 __u32 pid; /* process id */
+1
fs/cifs/cifsproto.h
··· 82 82 struct TCP_Server_Info *server); 83 83 extern void DeleteMidQEntry(struct mid_q_entry *midEntry); 84 84 extern void cifs_delete_mid(struct mid_q_entry *mid); 85 + extern void cifs_mid_q_entry_release(struct mid_q_entry *midEntry); 85 86 extern void cifs_wake_up_task(struct mid_q_entry *mid); 86 87 extern int cifs_handle_standard(struct TCP_Server_Info *server, 87 88 struct mid_q_entry *mid);
+7 -1
fs/cifs/connect.c
··· 924 924 server->pdu_size = next_offset; 925 925 } 926 926 927 + mid_entry = NULL; 927 928 if (server->ops->is_transform_hdr && 928 929 server->ops->receive_transform && 929 930 server->ops->is_transform_hdr(buf)) { ··· 939 938 length = mid_entry->receive(server, mid_entry); 940 939 } 941 940 942 - if (length < 0) 941 + if (length < 0) { 942 + if (mid_entry) 943 + cifs_mid_q_entry_release(mid_entry); 943 944 continue; 945 + } 944 946 945 947 if (server->large_buf) 946 948 buf = server->bigbuf; ··· 960 956 961 957 if (!mid_entry->multiRsp || mid_entry->multiEnd) 962 958 mid_entry->callback(mid_entry); 959 + 960 + cifs_mid_q_entry_release(mid_entry); 963 961 } else if (server->ops->is_oplock_break && 964 962 server->ops->is_oplock_break(buf, server)) { 965 963 cifs_dbg(FYI, "Received oplock break\n");
+1
fs/cifs/smb1ops.c
··· 107 107 if (compare_mid(mid->mid, buf) && 108 108 mid->mid_state == MID_REQUEST_SUBMITTED && 109 109 le16_to_cpu(mid->command) == buf->Command) { 110 + kref_get(&mid->refcount); 110 111 spin_unlock(&GlobalMid_Lock); 111 112 return mid; 112 113 }
+1
fs/cifs/smb2ops.c
··· 203 203 if ((mid->mid == wire_mid) && 204 204 (mid->mid_state == MID_REQUEST_SUBMITTED) && 205 205 (mid->command == shdr->Command)) { 206 + kref_get(&mid->refcount); 206 207 spin_unlock(&GlobalMid_Lock); 207 208 return mid; 208 209 }
+1
fs/cifs/smb2transport.c
··· 548 548 549 549 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS); 550 550 memset(temp, 0, sizeof(struct mid_q_entry)); 551 + kref_init(&temp->refcount); 551 552 temp->mid = le64_to_cpu(shdr->MessageId); 552 553 temp->pid = current->pid; 553 554 temp->command = shdr->Command; /* Always LE */
+17 -1
fs/cifs/transport.c
··· 61 61 62 62 temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS); 63 63 memset(temp, 0, sizeof(struct mid_q_entry)); 64 + kref_init(&temp->refcount); 64 65 temp->mid = get_mid(smb_buffer); 65 66 temp->pid = current->pid; 66 67 temp->command = cpu_to_le16(smb_buffer->Command); ··· 81 80 atomic_inc(&midCount); 82 81 temp->mid_state = MID_REQUEST_ALLOCATED; 83 82 return temp; 83 + } 84 + 85 + static void _cifs_mid_q_entry_release(struct kref *refcount) 86 + { 87 + struct mid_q_entry *mid = container_of(refcount, struct mid_q_entry, 88 + refcount); 89 + 90 + mempool_free(mid, cifs_mid_poolp); 91 + } 92 + 93 + void cifs_mid_q_entry_release(struct mid_q_entry *midEntry) 94 + { 95 + spin_lock(&GlobalMid_Lock); 96 + kref_put(&midEntry->refcount, _cifs_mid_q_entry_release); 97 + spin_unlock(&GlobalMid_Lock); 84 98 } 85 99 86 100 void ··· 126 110 } 127 111 } 128 112 #endif 129 - mempool_free(midEntry, cifs_mid_poolp); 113 + cifs_mid_q_entry_release(midEntry); 130 114 } 131 115 132 116 void