[INET]: Fix potential kfree on vmalloc-ed area of request_sock_queue

The request_sock_queue's listen_opt is either vmalloc-ed or
kmalloc-ed depending on the number of table entries. Thus it
is expected to be handled properly on free, which is done in
the reqsk_queue_destroy().

However the error path in inet_csk_listen_start() calls
the lite version of reqsk_queue_destroy, called
__reqsk_queue_destroy, which calls the kfree unconditionally.

Fix this and move the __reqsk_queue_destroy into a .c file as
it looks too big to be inline.

As David also noticed, this is an error recovery path only,
so no locking is required and the lopt is known to be not NULL.

reqsk_queue_yank_listen_sk is also now only used in
net/core/request_sock.c so we should move it there too.

Signed-off-by: Pavel Emelyanov <xemul@openvz.org>
Acked-by: Eric Dumazet <dada1@cosmosbay.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by Pavel Emelyanov and committed by David S. Miller dab6ba36 bd7b3f34

+36 -17
+1 -17
include/net/request_sock.h
··· 124 124 extern int reqsk_queue_alloc(struct request_sock_queue *queue, 125 125 unsigned int nr_table_entries); 126 126 127 - static inline struct listen_sock *reqsk_queue_yank_listen_sk(struct request_sock_queue *queue) 128 - { 129 - struct listen_sock *lopt; 130 - 131 - write_lock_bh(&queue->syn_wait_lock); 132 - lopt = queue->listen_opt; 133 - queue->listen_opt = NULL; 134 - write_unlock_bh(&queue->syn_wait_lock); 135 - 136 - return lopt; 137 - } 138 - 139 - static inline void __reqsk_queue_destroy(struct request_sock_queue *queue) 140 - { 141 - kfree(reqsk_queue_yank_listen_sk(queue)); 142 - } 143 - 127 + extern void __reqsk_queue_destroy(struct request_sock_queue *queue); 144 128 extern void reqsk_queue_destroy(struct request_sock_queue *queue); 145 129 146 130 static inline struct request_sock *
+35
net/core/request_sock.c
··· 71 71 72 72 EXPORT_SYMBOL(reqsk_queue_alloc); 73 73 74 + void __reqsk_queue_destroy(struct request_sock_queue *queue) 75 + { 76 + struct listen_sock *lopt; 77 + size_t lopt_size; 78 + 79 + /* 80 + * this is an error recovery path only 81 + * no locking needed and the lopt is not NULL 82 + */ 83 + 84 + lopt = queue->listen_opt; 85 + lopt_size = sizeof(struct listen_sock) + 86 + lopt->nr_table_entries * sizeof(struct request_sock *); 87 + 88 + if (lopt_size > PAGE_SIZE) 89 + vfree(lopt); 90 + else 91 + kfree(lopt); 92 + } 93 + 94 + EXPORT_SYMBOL(__reqsk_queue_destroy); 95 + 96 + static inline struct listen_sock *reqsk_queue_yank_listen_sk( 97 + struct request_sock_queue *queue) 98 + { 99 + struct listen_sock *lopt; 100 + 101 + write_lock_bh(&queue->syn_wait_lock); 102 + lopt = queue->listen_opt; 103 + queue->listen_opt = NULL; 104 + write_unlock_bh(&queue->syn_wait_lock); 105 + 106 + return lopt; 107 + } 108 + 74 109 void reqsk_queue_destroy(struct request_sock_queue *queue) 75 110 { 76 111 /* make all the listen_opt local to us */