Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* Local endpoint object management
3 *
4 * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10#include <linux/module.h>
11#include <linux/net.h>
12#include <linux/skbuff.h>
13#include <linux/slab.h>
14#include <linux/udp.h>
15#include <linux/ip.h>
16#include <linux/hashtable.h>
17#include <net/sock.h>
18#include <net/udp.h>
19#include <net/udp_tunnel.h>
20#include <net/af_rxrpc.h>
21#include "ar-internal.h"
22
23static void rxrpc_local_rcu(struct rcu_head *);
24
25/*
26 * Handle an ICMP/ICMP6 error turning up at the tunnel. Push it through the
27 * usual mechanism so that it gets parsed and presented through the UDP
28 * socket's error_report().
29 */
30static void rxrpc_encap_err_rcv(struct sock *sk, struct sk_buff *skb, int err,
31 __be16 port, u32 info, u8 *payload)
32{
33 if (ip_hdr(skb)->version == IPVERSION)
34 return ip_icmp_error(sk, skb, err, port, info, payload);
35 if (IS_ENABLED(CONFIG_AF_RXRPC_IPV6))
36 return ipv6_icmp_error(sk, skb, err, port, info, payload);
37}
38
39/*
40 * Compare a local to an address. Return -ve, 0 or +ve to indicate less than,
41 * same or greater than.
42 *
43 * We explicitly don't compare the RxRPC service ID as we want to reject
44 * conflicting uses by differing services. Further, we don't want to share
45 * addresses with different options (IPv6), so we don't compare those bits
46 * either.
47 */
48static long rxrpc_local_cmp_key(const struct rxrpc_local *local,
49 const struct sockaddr_rxrpc *srx)
50{
51 long diff;
52
53 diff = ((local->srx.transport_type - srx->transport_type) ?:
54 (local->srx.transport_len - srx->transport_len) ?:
55 (local->srx.transport.family - srx->transport.family));
56 if (diff != 0)
57 return diff;
58
59 switch (srx->transport.family) {
60 case AF_INET:
61 /* If the choice of UDP port is left up to the transport, then
62 * the endpoint record doesn't match.
63 */
64 return ((u16 __force)local->srx.transport.sin.sin_port -
65 (u16 __force)srx->transport.sin.sin_port) ?:
66 memcmp(&local->srx.transport.sin.sin_addr,
67 &srx->transport.sin.sin_addr,
68 sizeof(struct in_addr));
69#ifdef CONFIG_AF_RXRPC_IPV6
70 case AF_INET6:
71 /* If the choice of UDP6 port is left up to the transport, then
72 * the endpoint record doesn't match.
73 */
74 return ((u16 __force)local->srx.transport.sin6.sin6_port -
75 (u16 __force)srx->transport.sin6.sin6_port) ?:
76 memcmp(&local->srx.transport.sin6.sin6_addr,
77 &srx->transport.sin6.sin6_addr,
78 sizeof(struct in6_addr));
79#endif
80 default:
81 BUG();
82 }
83}
84
85/*
86 * Allocate a new local endpoint.
87 */
88static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet,
89 const struct sockaddr_rxrpc *srx)
90{
91 struct rxrpc_local *local;
92
93 local = kzalloc(sizeof(struct rxrpc_local), GFP_KERNEL);
94 if (local) {
95 refcount_set(&local->ref, 1);
96 atomic_set(&local->active_users, 1);
97 local->rxnet = rxnet;
98 INIT_HLIST_NODE(&local->link);
99 init_rwsem(&local->defrag_sem);
100 init_completion(&local->io_thread_ready);
101 skb_queue_head_init(&local->rx_queue);
102 INIT_LIST_HEAD(&local->call_attend_q);
103 local->client_bundles = RB_ROOT;
104 spin_lock_init(&local->client_bundles_lock);
105 spin_lock_init(&local->lock);
106 rwlock_init(&local->services_lock);
107 local->debug_id = atomic_inc_return(&rxrpc_debug_id);
108 memcpy(&local->srx, srx, sizeof(*srx));
109 local->srx.srx_service = 0;
110 trace_rxrpc_local(local->debug_id, rxrpc_local_new, 1, 1);
111 }
112
113 _leave(" = %p", local);
114 return local;
115}
116
117/*
118 * create the local socket
119 * - must be called with rxrpc_local_mutex locked
120 */
121static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
122{
123 struct udp_tunnel_sock_cfg tuncfg = {NULL};
124 struct sockaddr_rxrpc *srx = &local->srx;
125 struct udp_port_cfg udp_conf = {0};
126 struct task_struct *io_thread;
127 struct sock *usk;
128 int ret;
129
130 _enter("%p{%d,%d}",
131 local, srx->transport_type, srx->transport.family);
132
133 udp_conf.family = srx->transport.family;
134 udp_conf.use_udp_checksums = true;
135 if (udp_conf.family == AF_INET) {
136 udp_conf.local_ip = srx->transport.sin.sin_addr;
137 udp_conf.local_udp_port = srx->transport.sin.sin_port;
138#if IS_ENABLED(CONFIG_AF_RXRPC_IPV6)
139 } else {
140 udp_conf.local_ip6 = srx->transport.sin6.sin6_addr;
141 udp_conf.local_udp_port = srx->transport.sin6.sin6_port;
142 udp_conf.use_udp6_tx_checksums = true;
143 udp_conf.use_udp6_rx_checksums = true;
144#endif
145 }
146 ret = udp_sock_create(net, &udp_conf, &local->socket);
147 if (ret < 0) {
148 _leave(" = %d [socket]", ret);
149 return ret;
150 }
151
152 tuncfg.encap_type = UDP_ENCAP_RXRPC;
153 tuncfg.encap_rcv = rxrpc_encap_rcv;
154 tuncfg.encap_err_rcv = rxrpc_encap_err_rcv;
155 tuncfg.sk_user_data = local;
156 setup_udp_tunnel_sock(net, local->socket, &tuncfg);
157
158 /* set the socket up */
159 usk = local->socket->sk;
160 usk->sk_error_report = rxrpc_error_report;
161
162 switch (srx->transport.family) {
163 case AF_INET6:
164 /* we want to receive ICMPv6 errors */
165 ip6_sock_set_recverr(usk);
166
167 /* Fall through and set IPv4 options too otherwise we don't get
168 * errors from IPv4 packets sent through the IPv6 socket.
169 */
170 fallthrough;
171 case AF_INET:
172 /* we want to receive ICMP errors */
173 ip_sock_set_recverr(usk);
174
175 /* we want to set the don't fragment bit */
176 ip_sock_set_mtu_discover(usk, IP_PMTUDISC_DO);
177
178 /* We want receive timestamps. */
179 sock_enable_timestamps(usk);
180 break;
181
182 default:
183 BUG();
184 }
185
186 io_thread = kthread_run(rxrpc_io_thread, local,
187 "krxrpcio/%u", ntohs(udp_conf.local_udp_port));
188 if (IS_ERR(io_thread)) {
189 ret = PTR_ERR(io_thread);
190 goto error_sock;
191 }
192
193 wait_for_completion(&local->io_thread_ready);
194 local->io_thread = io_thread;
195 _leave(" = 0");
196 return 0;
197
198error_sock:
199 kernel_sock_shutdown(local->socket, SHUT_RDWR);
200 local->socket->sk->sk_user_data = NULL;
201 sock_release(local->socket);
202 local->socket = NULL;
203 return ret;
204}
205
206/*
207 * Look up or create a new local endpoint using the specified local address.
208 */
209struct rxrpc_local *rxrpc_lookup_local(struct net *net,
210 const struct sockaddr_rxrpc *srx)
211{
212 struct rxrpc_local *local;
213 struct rxrpc_net *rxnet = rxrpc_net(net);
214 struct hlist_node *cursor;
215 long diff;
216 int ret;
217
218 _enter("{%d,%d,%pISp}",
219 srx->transport_type, srx->transport.family, &srx->transport);
220
221 mutex_lock(&rxnet->local_mutex);
222
223 hlist_for_each(cursor, &rxnet->local_endpoints) {
224 local = hlist_entry(cursor, struct rxrpc_local, link);
225
226 diff = rxrpc_local_cmp_key(local, srx);
227 if (diff != 0)
228 continue;
229
230 /* Services aren't allowed to share transport sockets, so
231 * reject that here. It is possible that the object is dying -
232 * but it may also still have the local transport address that
233 * we want bound.
234 */
235 if (srx->srx_service) {
236 local = NULL;
237 goto addr_in_use;
238 }
239
240 /* Found a match. We want to replace a dying object.
241 * Attempting to bind the transport socket may still fail if
242 * we're attempting to use a local address that the dying
243 * object is still using.
244 */
245 if (!rxrpc_use_local(local, rxrpc_local_use_lookup))
246 break;
247
248 goto found;
249 }
250
251 local = rxrpc_alloc_local(rxnet, srx);
252 if (!local)
253 goto nomem;
254
255 ret = rxrpc_open_socket(local, net);
256 if (ret < 0)
257 goto sock_error;
258
259 if (cursor) {
260 hlist_replace_rcu(cursor, &local->link);
261 cursor->pprev = NULL;
262 } else {
263 hlist_add_head_rcu(&local->link, &rxnet->local_endpoints);
264 }
265
266found:
267 mutex_unlock(&rxnet->local_mutex);
268 _leave(" = %p", local);
269 return local;
270
271nomem:
272 ret = -ENOMEM;
273sock_error:
274 mutex_unlock(&rxnet->local_mutex);
275 if (local)
276 call_rcu(&local->rcu, rxrpc_local_rcu);
277 _leave(" = %d", ret);
278 return ERR_PTR(ret);
279
280addr_in_use:
281 mutex_unlock(&rxnet->local_mutex);
282 _leave(" = -EADDRINUSE");
283 return ERR_PTR(-EADDRINUSE);
284}
285
286/*
287 * Get a ref on a local endpoint.
288 */
289struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *local,
290 enum rxrpc_local_trace why)
291{
292 int r, u;
293
294 u = atomic_read(&local->active_users);
295 __refcount_inc(&local->ref, &r);
296 trace_rxrpc_local(local->debug_id, why, r + 1, u);
297 return local;
298}
299
300/*
301 * Get a ref on a local endpoint unless its usage has already reached 0.
302 */
303struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local,
304 enum rxrpc_local_trace why)
305{
306 int r, u;
307
308 if (local && __refcount_inc_not_zero(&local->ref, &r)) {
309 u = atomic_read(&local->active_users);
310 trace_rxrpc_local(local->debug_id, why, r + 1, u);
311 return local;
312 }
313
314 return NULL;
315}
316
317/*
318 * Drop a ref on a local endpoint.
319 */
320void rxrpc_put_local(struct rxrpc_local *local, enum rxrpc_local_trace why)
321{
322 unsigned int debug_id;
323 bool dead;
324 int r, u;
325
326 if (local) {
327 debug_id = local->debug_id;
328
329 u = atomic_read(&local->active_users);
330 dead = __refcount_dec_and_test(&local->ref, &r);
331 trace_rxrpc_local(debug_id, why, r, u);
332
333 if (dead)
334 call_rcu(&local->rcu, rxrpc_local_rcu);
335 }
336}
337
338/*
339 * Start using a local endpoint.
340 */
341struct rxrpc_local *rxrpc_use_local(struct rxrpc_local *local,
342 enum rxrpc_local_trace why)
343{
344 local = rxrpc_get_local_maybe(local, rxrpc_local_get_for_use);
345 if (!local)
346 return NULL;
347
348 if (!__rxrpc_use_local(local, why)) {
349 rxrpc_put_local(local, rxrpc_local_put_for_use);
350 return NULL;
351 }
352
353 return local;
354}
355
356/*
357 * Cease using a local endpoint. Once the number of active users reaches 0, we
358 * start the closure of the transport in the I/O thread..
359 */
360void rxrpc_unuse_local(struct rxrpc_local *local, enum rxrpc_local_trace why)
361{
362 unsigned int debug_id;
363 int r, u;
364
365 if (local) {
366 debug_id = local->debug_id;
367 r = refcount_read(&local->ref);
368 u = atomic_dec_return(&local->active_users);
369 trace_rxrpc_local(debug_id, why, r, u);
370 if (u == 0)
371 kthread_stop(local->io_thread);
372 }
373}
374
375/*
376 * Destroy a local endpoint's socket and then hand the record to RCU to dispose
377 * of.
378 *
379 * Closing the socket cannot be done from bottom half context or RCU callback
380 * context because it might sleep.
381 */
382void rxrpc_destroy_local(struct rxrpc_local *local)
383{
384 struct socket *socket = local->socket;
385 struct rxrpc_net *rxnet = local->rxnet;
386
387 _enter("%d", local->debug_id);
388
389 local->dead = true;
390
391 mutex_lock(&rxnet->local_mutex);
392 hlist_del_init_rcu(&local->link);
393 mutex_unlock(&rxnet->local_mutex);
394
395 rxrpc_clean_up_local_conns(local);
396 rxrpc_service_connection_reaper(&rxnet->service_conn_reaper);
397 ASSERT(!local->service);
398
399 if (socket) {
400 local->socket = NULL;
401 kernel_sock_shutdown(socket, SHUT_RDWR);
402 socket->sk->sk_user_data = NULL;
403 sock_release(socket);
404 }
405
406 /* At this point, there should be no more packets coming in to the
407 * local endpoint.
408 */
409 rxrpc_purge_queue(&local->rx_queue);
410}
411
412/*
413 * Destroy a local endpoint after the RCU grace period expires.
414 */
415static void rxrpc_local_rcu(struct rcu_head *rcu)
416{
417 struct rxrpc_local *local = container_of(rcu, struct rxrpc_local, rcu);
418
419 rxrpc_see_local(local, rxrpc_local_free);
420 kfree(local);
421}
422
423/*
424 * Verify the local endpoint list is empty by this point.
425 */
426void rxrpc_destroy_all_locals(struct rxrpc_net *rxnet)
427{
428 struct rxrpc_local *local;
429
430 _enter("");
431
432 flush_workqueue(rxrpc_workqueue);
433
434 if (!hlist_empty(&rxnet->local_endpoints)) {
435 mutex_lock(&rxnet->local_mutex);
436 hlist_for_each_entry(local, &rxnet->local_endpoints, link) {
437 pr_err("AF_RXRPC: Leaked local %p {%d}\n",
438 local, refcount_read(&local->ref));
439 }
440 mutex_unlock(&rxnet->local_mutex);
441 BUG();
442 }
443}