Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ovpn: ensure sk is still valid during cleanup

Removing a peer while userspace attempts to close its transport
socket triggers a race condition resulting in the following
crash:

Oops: general protection fault, probably for non-canonical address 0xdffffc0000000077: 0000 [#1] SMP KASAN
KASAN: null-ptr-deref in range [0x00000000000003b8-0x00000000000003bf]
CPU: 12 UID: 0 PID: 162 Comm: kworker/12:1 Tainted: G O 6.15.0-rc2-00635-g521139ac3840 #272 PREEMPT(full)
Tainted: [O]=OOT_MODULE
Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.16.3-20240910_120124-localhost 04/01/2014
Workqueue: events ovpn_peer_keepalive_work [ovpn]
RIP: 0010:ovpn_socket_release+0x23c/0x500 [ovpn]
Code: ea 03 80 3c 02 00 0f 85 71 02 00 00 48 b8 00 00 00 00 00 fc ff df 4d 8b 64 24 18 49 8d bc 24 be 03 00 00 48 89 fa 48 c1 ea 03 <0f> b6 14 02 48 89 f8 83 e0 07 83 c0 01 38 d0 7c 08 84 d2 0f 85 30
RSP: 0018:ffffc90000c9fb18 EFLAGS: 00010217
RAX: dffffc0000000000 RBX: ffff8881148d7940 RCX: ffffffff817787bb
RDX: 0000000000000077 RSI: 0000000000000008 RDI: 00000000000003be
RBP: ffffc90000c9fb30 R08: 0000000000000000 R09: fffffbfff0d3e840
R10: ffffffff869f4207 R11: 0000000000000000 R12: 0000000000000000
R13: ffff888115eb9300 R14: ffffc90000c9fbc8 R15: 000000000000000c
FS: 0000000000000000(0000) GS:ffff8882b0151000(0000) knlGS:0000000000000000
CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
CR2: 00007f37266b6114 CR3: 00000000054a8000 CR4: 0000000000750ef0
PKRU: 55555554
Call Trace:
<TASK>
unlock_ovpn+0x8b/0xe0 [ovpn]
ovpn_peer_keepalive_work+0xe3/0x540 [ovpn]
? ovpn_peers_free+0x780/0x780 [ovpn]
? lock_acquire+0x56/0x70
? process_one_work+0x888/0x1740
process_one_work+0x933/0x1740
? pwq_dec_nr_in_flight+0x10b0/0x10b0
? move_linked_works+0x12d/0x2c0
? assign_work+0x163/0x270
worker_thread+0x4d6/0xd90
? preempt_count_sub+0x4c/0x70
? process_one_work+0x1740/0x1740
kthread+0x36c/0x710
? trace_preempt_on+0x8c/0x1e0
? kthread_is_per_cpu+0xc0/0xc0
? preempt_count_sub+0x4c/0x70
? _raw_spin_unlock_irq+0x36/0x60
? calculate_sigpending+0x7b/0xa0
? kthread_is_per_cpu+0xc0/0xc0
ret_from_fork+0x3a/0x80
? kthread_is_per_cpu+0xc0/0xc0
ret_from_fork_asm+0x11/0x20
</TASK>
Modules linked in: ovpn(O)

This happens because the peer deletion operation reaches
ovpn_socket_release() while ovpn_sock->sock (struct socket *)
and its sk member (struct sock *) are still both valid.
Here synchronize_rcu() is invoked, after which ovpn_sock->sock->sk
becomes NULL, due to the concurrent socket closing triggered
from userspace.

After having invoked synchronize_rcu(), ovpn_socket_release() will
attempt dereferencing ovpn_sock->sock->sk, triggering the crash
reported above.

The reason for accessing sk is that we need to retrieve its
protocol and continue the cleanup routine accordingly.

This crash can be easily produced by running openvpn userspace in
client mode with `--keepalive 10 20`, while entirely omitting this
option on the server side.
After 20 seconds ovpn will assume the peer (server) to be dead,
will start removing it and will notify userspace. The latter will
receive the notification and close the transport socket, thus
triggering the crash.

To fix the race condition for good, we need to refactor struct ovpn_socket.
Since ovpn is always only interested in the sock->sk member (struct sock *)
we can directly hold a reference to it, raher than accessing it via
its struct socket container.

This means changing "struct socket *ovpn_socket->sock" to
"struct sock *ovpn_socket->sk".

While acquiring a reference to sk, we can increase its refcounter
without affecting the socket close()/destroy() notification
(which we rely on when userspace closes a socket we are using).

By increasing sk's refcounter we know we can dereference it
in ovpn_socket_release() without incurring in any race condition
anymore.

ovpn_socket_release() will ultimately decrease the reference
counter.

Cc: Oleksandr Natalenko <oleksandr@natalenko.name>
Fixes: 11851cbd60ea ("ovpn: implement TCP transport")
Reported-by: Qingfang Deng <dqfext@gmail.com>
Closes: https://github.com/OpenVPN/ovpn-net-next/issues/1
Tested-by: Gert Doering <gert@greenie.muc.de>
Link: https://www.mail-archive.com/openvpn-devel@lists.sourceforge.net/msg31575.html
Reviewed-by: Michal Swiatkowski <michal.swiatkowski@linux.intel.com>
Signed-off-by: Antonio Quartulli <antonio@openvpn.net>

+102 -104
+4 -4
drivers/net/ovpn/io.c
··· 134 134 135 135 rcu_read_lock(); 136 136 sock = rcu_dereference(peer->sock); 137 - if (sock && sock->sock->sk->sk_protocol == IPPROTO_UDP) 137 + if (sock && sock->sk->sk_protocol == IPPROTO_UDP) 138 138 /* check if this peer changed local or remote endpoint */ 139 139 ovpn_peer_endpoints_update(peer, skb); 140 140 rcu_read_unlock(); ··· 270 270 if (unlikely(!sock)) 271 271 goto err_unlock; 272 272 273 - switch (sock->sock->sk->sk_protocol) { 273 + switch (sock->sk->sk_protocol) { 274 274 case IPPROTO_UDP: 275 - ovpn_udp_send_skb(peer, sock->sock, skb); 275 + ovpn_udp_send_skb(peer, sock->sk, skb); 276 276 break; 277 277 case IPPROTO_TCP: 278 - ovpn_tcp_send_skb(peer, sock->sock, skb); 278 + ovpn_tcp_send_skb(peer, sock->sk, skb); 279 279 break; 280 280 default: 281 281 /* no transport configured yet */
+8 -8
drivers/net/ovpn/netlink.c
··· 501 501 /* when using a TCP socket the remote IP is not expected */ 502 502 rcu_read_lock(); 503 503 sock = rcu_dereference(peer->sock); 504 - if (sock && sock->sock->sk->sk_protocol == IPPROTO_TCP && 504 + if (sock && sock->sk->sk_protocol == IPPROTO_TCP && 505 505 (attrs[OVPN_A_PEER_REMOTE_IPV4] || 506 506 attrs[OVPN_A_PEER_REMOTE_IPV6])) { 507 507 rcu_read_unlock(); ··· 559 559 goto err_unlock; 560 560 } 561 561 562 - if (!net_eq(genl_info_net(info), sock_net(sock->sock->sk))) { 562 + if (!net_eq(genl_info_net(info), sock_net(sock->sk))) { 563 563 id = peernet2id_alloc(genl_info_net(info), 564 - sock_net(sock->sock->sk), 564 + sock_net(sock->sk), 565 565 GFP_ATOMIC); 566 566 if (nla_put_s32(skb, OVPN_A_PEER_SOCKET_NETNSID, id)) 567 567 goto err_unlock; 568 568 } 569 - local_port = inet_sk(sock->sock->sk)->inet_sport; 569 + local_port = inet_sk(sock->sk)->inet_sport; 570 570 rcu_read_unlock(); 571 571 572 572 if (nla_put_u32(skb, OVPN_A_PEER_ID, peer->id)) ··· 1153 1153 ret = -EINVAL; 1154 1154 goto err_unlock; 1155 1155 } 1156 - genlmsg_multicast_netns(&ovpn_nl_family, sock_net(sock->sock->sk), 1157 - msg, 0, OVPN_NLGRP_PEERS, GFP_ATOMIC); 1156 + genlmsg_multicast_netns(&ovpn_nl_family, sock_net(sock->sk), msg, 0, 1157 + OVPN_NLGRP_PEERS, GFP_ATOMIC); 1158 1158 rcu_read_unlock(); 1159 1159 1160 1160 return 0; ··· 1218 1218 ret = -EINVAL; 1219 1219 goto err_unlock; 1220 1220 } 1221 - genlmsg_multicast_netns(&ovpn_nl_family, sock_net(sock->sock->sk), 1222 - msg, 0, OVPN_NLGRP_PEERS, GFP_ATOMIC); 1221 + genlmsg_multicast_netns(&ovpn_nl_family, sock_net(sock->sk), msg, 0, 1222 + OVPN_NLGRP_PEERS, GFP_ATOMIC); 1223 1223 rcu_read_unlock(); 1224 1224 1225 1225 return 0;
+2 -2
drivers/net/ovpn/peer.c
··· 1145 1145 1146 1146 if (sk) { 1147 1147 ovpn_sock = rcu_access_pointer(peer->sock); 1148 - if (!ovpn_sock || ovpn_sock->sock->sk != sk) { 1148 + if (!ovpn_sock || ovpn_sock->sk != sk) { 1149 1149 spin_unlock_bh(&ovpn->lock); 1150 1150 ovpn_peer_put(peer); 1151 1151 return; ··· 1175 1175 if (sk) { 1176 1176 rcu_read_lock(); 1177 1177 ovpn_sock = rcu_dereference(peer->sock); 1178 - remove = ovpn_sock && ovpn_sock->sock->sk == sk; 1178 + remove = ovpn_sock && ovpn_sock->sk == sk; 1179 1179 rcu_read_unlock(); 1180 1180 } 1181 1181
+38 -30
drivers/net/ovpn/socket.c
··· 24 24 struct ovpn_socket *sock = container_of(kref, struct ovpn_socket, 25 25 refcount); 26 26 27 - if (sock->sock->sk->sk_protocol == IPPROTO_UDP) 27 + if (sock->sk->sk_protocol == IPPROTO_UDP) 28 28 ovpn_udp_socket_detach(sock); 29 - else if (sock->sock->sk->sk_protocol == IPPROTO_TCP) 29 + else if (sock->sk->sk_protocol == IPPROTO_TCP) 30 30 ovpn_tcp_socket_detach(sock); 31 31 } 32 32 ··· 75 75 if (!sock) 76 76 return; 77 77 78 - /* sanity check: we should not end up here if the socket 79 - * was already closed 80 - */ 81 - if (!sock->sock->sk) { 82 - DEBUG_NET_WARN_ON_ONCE(1); 83 - return; 84 - } 85 - 86 78 /* Drop the reference while holding the sock lock to avoid 87 79 * concurrent ovpn_socket_new call to mess up with a partially 88 80 * detached socket. ··· 82 90 * Holding the lock ensures that a socket with refcnt 0 is fully 83 91 * detached before it can be picked by a concurrent reader. 84 92 */ 85 - lock_sock(sock->sock->sk); 93 + lock_sock(sock->sk); 86 94 released = ovpn_socket_put(peer, sock); 87 - release_sock(sock->sock->sk); 95 + release_sock(sock->sk); 88 96 89 97 /* align all readers with sk_user_data being NULL */ 90 98 synchronize_rcu(); 91 99 92 100 /* following cleanup should happen with lock released */ 93 101 if (released) { 94 - if (sock->sock->sk->sk_protocol == IPPROTO_UDP) { 102 + if (sock->sk->sk_protocol == IPPROTO_UDP) { 95 103 netdev_put(sock->ovpn->dev, &sock->dev_tracker); 96 - } else if (sock->sock->sk->sk_protocol == IPPROTO_TCP) { 104 + } else if (sock->sk->sk_protocol == IPPROTO_TCP) { 97 105 /* wait for TCP jobs to terminate */ 98 106 ovpn_tcp_socket_wait_finish(sock); 99 107 ovpn_peer_put(sock->peer); 100 108 } 109 + /* drop reference acquired in ovpn_socket_new() */ 110 + sock_put(sock->sk); 101 111 /* we can call plain kfree() because we already waited one RCU 102 112 * period due to synchronize_rcu() 103 113 */ ··· 112 118 return kref_get_unless_zero(&sock->refcount); 113 119 } 114 120 115 - static int ovpn_socket_attach(struct ovpn_socket *sock, struct ovpn_peer *peer) 121 + static int ovpn_socket_attach(struct ovpn_socket *ovpn_sock, 122 + struct socket *sock, 123 + struct ovpn_peer *peer) 116 124 { 117 - if (sock->sock->sk->sk_protocol == IPPROTO_UDP) 118 - return ovpn_udp_socket_attach(sock, peer->ovpn); 119 - else if (sock->sock->sk->sk_protocol == IPPROTO_TCP) 120 - return ovpn_tcp_socket_attach(sock, peer); 125 + if (sock->sk->sk_protocol == IPPROTO_UDP) 126 + return ovpn_udp_socket_attach(ovpn_sock, sock, peer->ovpn); 127 + else if (sock->sk->sk_protocol == IPPROTO_TCP) 128 + return ovpn_tcp_socket_attach(ovpn_sock, peer); 121 129 122 130 return -EOPNOTSUPP; 123 131 } ··· 134 138 struct ovpn_socket *ovpn_socket_new(struct socket *sock, struct ovpn_peer *peer) 135 139 { 136 140 struct ovpn_socket *ovpn_sock; 141 + struct sock *sk = sock->sk; 137 142 int ret; 138 143 139 - lock_sock(sock->sk); 144 + lock_sock(sk); 140 145 141 146 /* a TCP socket can only be owned by a single peer, therefore there 142 147 * can't be any other user 143 148 */ 144 - if (sock->sk->sk_protocol == IPPROTO_TCP && sock->sk->sk_user_data) { 149 + if (sk->sk_protocol == IPPROTO_TCP && sk->sk_user_data) { 145 150 ovpn_sock = ERR_PTR(-EBUSY); 146 151 goto sock_release; 147 152 } ··· 150 153 /* a UDP socket can be shared across multiple peers, but we must make 151 154 * sure it is not owned by something else 152 155 */ 153 - if (sock->sk->sk_protocol == IPPROTO_UDP) { 154 - u8 type = READ_ONCE(udp_sk(sock->sk)->encap_type); 156 + if (sk->sk_protocol == IPPROTO_UDP) { 157 + u8 type = READ_ONCE(udp_sk(sk)->encap_type); 155 158 156 159 /* socket owned by other encapsulation module */ 157 160 if (type && type != UDP_ENCAP_OVPNINUDP) { ··· 160 163 } 161 164 162 165 rcu_read_lock(); 163 - ovpn_sock = rcu_dereference_sk_user_data(sock->sk); 166 + ovpn_sock = rcu_dereference_sk_user_data(sk); 164 167 if (ovpn_sock) { 165 168 /* socket owned by another ovpn instance, we can't use it */ 166 169 if (ovpn_sock->ovpn != peer->ovpn) { ··· 197 200 goto sock_release; 198 201 } 199 202 200 - ovpn_sock->sock = sock; 203 + ovpn_sock->sk = sk; 201 204 kref_init(&ovpn_sock->refcount); 202 205 203 - ret = ovpn_socket_attach(ovpn_sock, peer); 206 + /* the newly created ovpn_socket is holding reference to sk, 207 + * therefore we increase its refcounter. 208 + * 209 + * This ovpn_socket instance is referenced by all peers 210 + * using the same socket. 211 + * 212 + * ovpn_socket_release() will take care of dropping the reference. 213 + */ 214 + sock_hold(sk); 215 + 216 + ret = ovpn_socket_attach(ovpn_sock, sock, peer); 204 217 if (ret < 0) { 218 + sock_put(sk); 205 219 kfree(ovpn_sock); 206 220 ovpn_sock = ERR_PTR(ret); 207 221 goto sock_release; ··· 221 213 /* TCP sockets are per-peer, therefore they are linked to their unique 222 214 * peer 223 215 */ 224 - if (sock->sk->sk_protocol == IPPROTO_TCP) { 216 + if (sk->sk_protocol == IPPROTO_TCP) { 225 217 INIT_WORK(&ovpn_sock->tcp_tx_work, ovpn_tcp_tx_work); 226 218 ovpn_sock->peer = peer; 227 219 ovpn_peer_hold(peer); 228 - } else if (sock->sk->sk_protocol == IPPROTO_UDP) { 220 + } else if (sk->sk_protocol == IPPROTO_UDP) { 229 221 /* in UDP we only link the ovpn instance since the socket is 230 222 * shared among multiple peers 231 223 */ ··· 234 226 GFP_KERNEL); 235 227 } 236 228 237 - rcu_assign_sk_user_data(sock->sk, ovpn_sock); 229 + rcu_assign_sk_user_data(sk, ovpn_sock); 238 230 sock_release: 239 - release_sock(sock->sk); 231 + release_sock(sk); 240 232 return ovpn_sock; 241 233 }
+2 -2
drivers/net/ovpn/socket.h
··· 22 22 * @ovpn: ovpn instance owning this socket (UDP only) 23 23 * @dev_tracker: reference tracker for associated dev (UDP only) 24 24 * @peer: unique peer transmitting over this socket (TCP only) 25 - * @sock: the low level sock object 25 + * @sk: the low level sock object 26 26 * @refcount: amount of contexts currently referencing this object 27 27 * @work: member used to schedule release routine (it may block) 28 28 * @tcp_tx_work: work for deferring outgoing packet processing (TCP only) ··· 36 36 struct ovpn_peer *peer; 37 37 }; 38 38 39 - struct socket *sock; 39 + struct sock *sk; 40 40 struct kref refcount; 41 41 struct work_struct work; 42 42 struct work_struct tcp_tx_work;
+32 -33
drivers/net/ovpn/tcp.c
··· 186 186 void ovpn_tcp_socket_detach(struct ovpn_socket *ovpn_sock) 187 187 { 188 188 struct ovpn_peer *peer = ovpn_sock->peer; 189 - struct socket *sock = ovpn_sock->sock; 189 + struct sock *sk = ovpn_sock->sk; 190 190 191 191 strp_stop(&peer->tcp.strp); 192 192 skb_queue_purge(&peer->tcp.user_queue); 193 193 194 194 /* restore CBs that were saved in ovpn_sock_set_tcp_cb() */ 195 - sock->sk->sk_data_ready = peer->tcp.sk_cb.sk_data_ready; 196 - sock->sk->sk_write_space = peer->tcp.sk_cb.sk_write_space; 197 - sock->sk->sk_prot = peer->tcp.sk_cb.prot; 198 - sock->sk->sk_socket->ops = peer->tcp.sk_cb.ops; 195 + sk->sk_data_ready = peer->tcp.sk_cb.sk_data_ready; 196 + sk->sk_write_space = peer->tcp.sk_cb.sk_write_space; 197 + sk->sk_prot = peer->tcp.sk_cb.prot; 198 + sk->sk_socket->ops = peer->tcp.sk_cb.ops; 199 199 200 - rcu_assign_sk_user_data(sock->sk, NULL); 200 + rcu_assign_sk_user_data(sk, NULL); 201 201 } 202 202 203 203 void ovpn_tcp_socket_wait_finish(struct ovpn_socket *sock) ··· 283 283 284 284 sock = container_of(work, struct ovpn_socket, tcp_tx_work); 285 285 286 - lock_sock(sock->sock->sk); 286 + lock_sock(sock->sk); 287 287 if (sock->peer) 288 - ovpn_tcp_send_sock(sock->peer, sock->sock->sk); 289 - release_sock(sock->sock->sk); 288 + ovpn_tcp_send_sock(sock->peer, sock->sk); 289 + release_sock(sock->sk); 290 290 } 291 291 292 292 static void ovpn_tcp_send_sock_skb(struct ovpn_peer *peer, struct sock *sk, ··· 307 307 ovpn_tcp_send_sock(peer, sk); 308 308 } 309 309 310 - void ovpn_tcp_send_skb(struct ovpn_peer *peer, struct socket *sock, 310 + void ovpn_tcp_send_skb(struct ovpn_peer *peer, struct sock *sk, 311 311 struct sk_buff *skb) 312 312 { 313 313 u16 len = skb->len; 314 314 315 315 *(__be16 *)__skb_push(skb, sizeof(u16)) = htons(len); 316 316 317 - spin_lock_nested(&sock->sk->sk_lock.slock, OVPN_TCP_DEPTH_NESTING); 318 - if (sock_owned_by_user(sock->sk)) { 317 + spin_lock_nested(&sk->sk_lock.slock, OVPN_TCP_DEPTH_NESTING); 318 + if (sock_owned_by_user(sk)) { 319 319 if (skb_queue_len(&peer->tcp.out_queue) >= 320 320 READ_ONCE(net_hotdata.max_backlog)) { 321 321 dev_dstats_tx_dropped(peer->ovpn->dev); ··· 324 324 } 325 325 __skb_queue_tail(&peer->tcp.out_queue, skb); 326 326 } else { 327 - ovpn_tcp_send_sock_skb(peer, sock->sk, skb); 327 + ovpn_tcp_send_sock_skb(peer, sk, skb); 328 328 } 329 329 unlock: 330 - spin_unlock(&sock->sk->sk_lock.slock); 330 + spin_unlock(&sk->sk_lock.slock); 331 331 } 332 332 333 333 static void ovpn_tcp_release(struct sock *sk) ··· 474 474 int ovpn_tcp_socket_attach(struct ovpn_socket *ovpn_sock, 475 475 struct ovpn_peer *peer) 476 476 { 477 - struct socket *sock = ovpn_sock->sock; 478 477 struct strp_callbacks cb = { 479 478 .rcv_msg = ovpn_tcp_rcv, 480 479 .parse_msg = ovpn_tcp_parse, ··· 481 482 int ret; 482 483 483 484 /* make sure no pre-existing encapsulation handler exists */ 484 - if (sock->sk->sk_user_data) 485 + if (ovpn_sock->sk->sk_user_data) 485 486 return -EBUSY; 486 487 487 488 /* only a fully connected socket is expected. Connection should be 488 489 * handled in userspace 489 490 */ 490 - if (sock->sk->sk_state != TCP_ESTABLISHED) { 491 + if (ovpn_sock->sk->sk_state != TCP_ESTABLISHED) { 491 492 net_err_ratelimited("%s: provided TCP socket is not in ESTABLISHED state: %d\n", 492 493 netdev_name(peer->ovpn->dev), 493 - sock->sk->sk_state); 494 + ovpn_sock->sk->sk_state); 494 495 return -EINVAL; 495 496 } 496 497 497 - ret = strp_init(&peer->tcp.strp, sock->sk, &cb); 498 + ret = strp_init(&peer->tcp.strp, ovpn_sock->sk, &cb); 498 499 if (ret < 0) { 499 500 DEBUG_NET_WARN_ON_ONCE(1); 500 501 return ret; ··· 502 503 503 504 INIT_WORK(&peer->tcp.defer_del_work, ovpn_tcp_peer_del_work); 504 505 505 - __sk_dst_reset(sock->sk); 506 + __sk_dst_reset(ovpn_sock->sk); 506 507 skb_queue_head_init(&peer->tcp.user_queue); 507 508 skb_queue_head_init(&peer->tcp.out_queue); 508 509 509 510 /* save current CBs so that they can be restored upon socket release */ 510 - peer->tcp.sk_cb.sk_data_ready = sock->sk->sk_data_ready; 511 - peer->tcp.sk_cb.sk_write_space = sock->sk->sk_write_space; 512 - peer->tcp.sk_cb.prot = sock->sk->sk_prot; 513 - peer->tcp.sk_cb.ops = sock->sk->sk_socket->ops; 511 + peer->tcp.sk_cb.sk_data_ready = ovpn_sock->sk->sk_data_ready; 512 + peer->tcp.sk_cb.sk_write_space = ovpn_sock->sk->sk_write_space; 513 + peer->tcp.sk_cb.prot = ovpn_sock->sk->sk_prot; 514 + peer->tcp.sk_cb.ops = ovpn_sock->sk->sk_socket->ops; 514 515 515 516 /* assign our static CBs and prot/ops */ 516 - sock->sk->sk_data_ready = ovpn_tcp_data_ready; 517 - sock->sk->sk_write_space = ovpn_tcp_write_space; 517 + ovpn_sock->sk->sk_data_ready = ovpn_tcp_data_ready; 518 + ovpn_sock->sk->sk_write_space = ovpn_tcp_write_space; 518 519 519 - if (sock->sk->sk_family == AF_INET) { 520 - sock->sk->sk_prot = &ovpn_tcp_prot; 521 - sock->sk->sk_socket->ops = &ovpn_tcp_ops; 520 + if (ovpn_sock->sk->sk_family == AF_INET) { 521 + ovpn_sock->sk->sk_prot = &ovpn_tcp_prot; 522 + ovpn_sock->sk->sk_socket->ops = &ovpn_tcp_ops; 522 523 } else { 523 - sock->sk->sk_prot = &ovpn_tcp6_prot; 524 - sock->sk->sk_socket->ops = &ovpn_tcp6_ops; 524 + ovpn_sock->sk->sk_prot = &ovpn_tcp6_prot; 525 + ovpn_sock->sk->sk_socket->ops = &ovpn_tcp6_ops; 525 526 } 526 527 527 528 /* avoid using task_frag */ 528 - sock->sk->sk_allocation = GFP_ATOMIC; 529 - sock->sk->sk_use_task_frag = false; 529 + ovpn_sock->sk->sk_allocation = GFP_ATOMIC; 530 + ovpn_sock->sk->sk_use_task_frag = false; 530 531 531 532 /* enqueue the RX worker */ 532 533 strp_check_rcv(&peer->tcp.strp);
+2 -1
drivers/net/ovpn/tcp.h
··· 30 30 * Required by the OpenVPN protocol in order to extract packets from 31 31 * the TCP stream on the receiver side. 32 32 */ 33 - void ovpn_tcp_send_skb(struct ovpn_peer *peer, struct socket *sock, struct sk_buff *skb); 33 + void ovpn_tcp_send_skb(struct ovpn_peer *peer, struct sock *sk, 34 + struct sk_buff *skb); 34 35 void ovpn_tcp_tx_work(struct work_struct *work); 35 36 36 37 #endif /* _NET_OVPN_TCP_H_ */
+12 -22
drivers/net/ovpn/udp.c
··· 43 43 return NULL; 44 44 45 45 /* make sure that sk matches our stored transport socket */ 46 - if (unlikely(!ovpn_sock->sock || sk != ovpn_sock->sock->sk)) 46 + if (unlikely(!ovpn_sock->sk || sk != ovpn_sock->sk)) 47 47 return NULL; 48 48 49 49 return ovpn_sock; ··· 335 335 /** 336 336 * ovpn_udp_send_skb - prepare skb and send it over via UDP 337 337 * @peer: the destination peer 338 - * @sock: the RCU protected peer socket 338 + * @sk: peer socket 339 339 * @skb: the packet to send 340 340 */ 341 - void ovpn_udp_send_skb(struct ovpn_peer *peer, struct socket *sock, 341 + void ovpn_udp_send_skb(struct ovpn_peer *peer, struct sock *sk, 342 342 struct sk_buff *skb) 343 343 { 344 - int ret = -1; 344 + int ret; 345 345 346 346 skb->dev = peer->ovpn->dev; 347 347 /* no checksum performed at this layer */ 348 348 skb->ip_summed = CHECKSUM_NONE; 349 349 350 - /* get socket info */ 351 - if (unlikely(!sock)) { 352 - net_warn_ratelimited("%s: no sock for remote peer %u\n", 353 - netdev_name(peer->ovpn->dev), peer->id); 354 - goto out; 355 - } 356 - 357 350 /* crypto layer -> transport (UDP) */ 358 - ret = ovpn_udp_output(peer, &peer->dst_cache, sock->sk, skb); 359 - out: 360 - if (unlikely(ret < 0)) { 351 + ret = ovpn_udp_output(peer, &peer->dst_cache, sk, skb); 352 + if (unlikely(ret < 0)) 361 353 kfree_skb(skb); 362 - return; 363 - } 364 354 } 365 355 366 356 static void ovpn_udp_encap_destroy(struct sock *sk) ··· 373 383 /** 374 384 * ovpn_udp_socket_attach - set udp-tunnel CBs on socket and link it to ovpn 375 385 * @ovpn_sock: socket to configure 386 + * @sock: the socket container to be passed to setup_udp_tunnel_sock() 376 387 * @ovpn: the openvp instance to link 377 388 * 378 389 * After invoking this function, the sock will be controlled by ovpn so that ··· 381 390 * 382 391 * Return: 0 on success or a negative error code otherwise 383 392 */ 384 - int ovpn_udp_socket_attach(struct ovpn_socket *ovpn_sock, 393 + int ovpn_udp_socket_attach(struct ovpn_socket *ovpn_sock, struct socket *sock, 385 394 struct ovpn_priv *ovpn) 386 395 { 387 396 struct udp_tunnel_sock_cfg cfg = { ··· 389 398 .encap_rcv = ovpn_udp_encap_recv, 390 399 .encap_destroy = ovpn_udp_encap_destroy, 391 400 }; 392 - struct socket *sock = ovpn_sock->sock; 393 401 struct ovpn_socket *old_data; 394 402 int ret; 395 403 396 404 /* make sure no pre-existing encapsulation handler exists */ 397 405 rcu_read_lock(); 398 - old_data = rcu_dereference_sk_user_data(sock->sk); 406 + old_data = rcu_dereference_sk_user_data(ovpn_sock->sk); 399 407 if (!old_data) { 400 408 /* socket is currently unused - we can take it */ 401 409 rcu_read_unlock(); 402 - setup_udp_tunnel_sock(sock_net(sock->sk), sock, &cfg); 410 + setup_udp_tunnel_sock(sock_net(ovpn_sock->sk), sock, &cfg); 403 411 return 0; 404 412 } 405 413 ··· 411 421 * Unlikely TCP, a single UDP socket can be used to talk to many remote 412 422 * hosts and therefore openvpn instantiates one only for all its peers 413 423 */ 414 - if ((READ_ONCE(udp_sk(sock->sk)->encap_type) == UDP_ENCAP_OVPNINUDP) && 424 + if ((READ_ONCE(udp_sk(ovpn_sock->sk)->encap_type) == UDP_ENCAP_OVPNINUDP) && 415 425 old_data->ovpn == ovpn) { 416 426 netdev_dbg(ovpn->dev, 417 427 "provided socket already owned by this interface\n"); ··· 432 442 */ 433 443 void ovpn_udp_socket_detach(struct ovpn_socket *ovpn_sock) 434 444 { 435 - struct sock *sk = ovpn_sock->sock->sk; 445 + struct sock *sk = ovpn_sock->sk; 436 446 437 447 /* Re-enable multicast loopback */ 438 448 inet_set_bit(MC_LOOP, sk);
+2 -2
drivers/net/ovpn/udp.h
··· 15 15 struct ovpn_priv; 16 16 struct socket; 17 17 18 - int ovpn_udp_socket_attach(struct ovpn_socket *ovpn_sock, 18 + int ovpn_udp_socket_attach(struct ovpn_socket *ovpn_sock, struct socket *sock, 19 19 struct ovpn_priv *ovpn); 20 20 void ovpn_udp_socket_detach(struct ovpn_socket *ovpn_sock); 21 21 22 - void ovpn_udp_send_skb(struct ovpn_peer *peer, struct socket *sock, 22 + void ovpn_udp_send_skb(struct ovpn_peer *peer, struct sock *sk, 23 23 struct sk_buff *skb); 24 24 25 25 #endif /* _NET_OVPN_UDP_H_ */