Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* RxRPC virtual connection handler, common bits.
3 *
4 * Copyright (C) 2007, 2016 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10#include <linux/module.h>
11#include <linux/slab.h>
12#include <linux/net.h>
13#include <linux/skbuff.h>
14#include "ar-internal.h"
15
16/*
17 * Time till a connection expires after last use (in seconds).
18 */
19unsigned int __read_mostly rxrpc_connection_expiry = 10 * 60;
20unsigned int __read_mostly rxrpc_closed_conn_expiry = 10;
21
22static void rxrpc_clean_up_connection(struct work_struct *work);
23static void rxrpc_set_service_reap_timer(struct rxrpc_net *rxnet,
24 unsigned long reap_at);
25
26void rxrpc_poke_conn(struct rxrpc_connection *conn, enum rxrpc_conn_trace why)
27{
28 struct rxrpc_local *local = conn->local;
29 bool busy;
30
31 if (WARN_ON_ONCE(!local))
32 return;
33
34 spin_lock_bh(&local->lock);
35 busy = !list_empty(&conn->attend_link);
36 if (!busy) {
37 rxrpc_get_connection(conn, why);
38 list_add_tail(&conn->attend_link, &local->conn_attend_q);
39 }
40 spin_unlock_bh(&local->lock);
41 rxrpc_wake_up_io_thread(local);
42}
43
44static void rxrpc_connection_timer(struct timer_list *timer)
45{
46 struct rxrpc_connection *conn =
47 container_of(timer, struct rxrpc_connection, timer);
48
49 rxrpc_poke_conn(conn, rxrpc_conn_get_poke_timer);
50}
51
52/*
53 * allocate a new connection
54 */
55struct rxrpc_connection *rxrpc_alloc_connection(struct rxrpc_net *rxnet,
56 gfp_t gfp)
57{
58 struct rxrpc_connection *conn;
59
60 _enter("");
61
62 conn = kzalloc(sizeof(struct rxrpc_connection), gfp);
63 if (conn) {
64 INIT_LIST_HEAD(&conn->cache_link);
65 timer_setup(&conn->timer, &rxrpc_connection_timer, 0);
66 INIT_WORK(&conn->processor, rxrpc_process_connection);
67 INIT_WORK(&conn->destructor, rxrpc_clean_up_connection);
68 INIT_LIST_HEAD(&conn->proc_link);
69 INIT_LIST_HEAD(&conn->link);
70 mutex_init(&conn->security_lock);
71 mutex_init(&conn->tx_data_alloc_lock);
72 skb_queue_head_init(&conn->rx_queue);
73 conn->rxnet = rxnet;
74 conn->security = &rxrpc_no_security;
75 spin_lock_init(&conn->state_lock);
76 conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
77 conn->idle_timestamp = jiffies;
78 }
79
80 _leave(" = %p{%d}", conn, conn ? conn->debug_id : 0);
81 return conn;
82}
83
84/*
85 * Look up a connection in the cache by protocol parameters.
86 *
87 * If successful, a pointer to the connection is returned, but no ref is taken.
88 * NULL is returned if there is no match.
89 *
90 * When searching for a service call, if we find a peer but no connection, we
91 * return that through *_peer in case we need to create a new service call.
92 *
93 * The caller must be holding the RCU read lock.
94 */
95struct rxrpc_connection *rxrpc_find_client_connection_rcu(struct rxrpc_local *local,
96 struct sockaddr_rxrpc *srx,
97 struct sk_buff *skb)
98{
99 struct rxrpc_connection *conn;
100 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
101 struct rxrpc_peer *peer;
102
103 _enter(",%x", sp->hdr.cid & RXRPC_CIDMASK);
104
105 /* Look up client connections by connection ID alone as their
106 * IDs are unique for this machine.
107 */
108 conn = idr_find(&local->conn_ids, sp->hdr.cid >> RXRPC_CIDSHIFT);
109 if (!conn || refcount_read(&conn->ref) == 0) {
110 _debug("no conn");
111 goto not_found;
112 }
113
114 if (conn->proto.epoch != sp->hdr.epoch ||
115 conn->local != local)
116 goto not_found;
117
118 peer = conn->peer;
119 switch (srx->transport.family) {
120 case AF_INET:
121 if (peer->srx.transport.sin.sin_port !=
122 srx->transport.sin.sin_port)
123 goto not_found;
124 break;
125#ifdef CONFIG_AF_RXRPC_IPV6
126 case AF_INET6:
127 if (peer->srx.transport.sin6.sin6_port !=
128 srx->transport.sin6.sin6_port)
129 goto not_found;
130 break;
131#endif
132 default:
133 BUG();
134 }
135
136 _leave(" = %p", conn);
137 return conn;
138
139not_found:
140 _leave(" = NULL");
141 return NULL;
142}
143
144/*
145 * Disconnect a call and clear any channel it occupies when that call
146 * terminates. The caller must hold the channel_lock and must release the
147 * call's ref on the connection.
148 */
149void __rxrpc_disconnect_call(struct rxrpc_connection *conn,
150 struct rxrpc_call *call)
151{
152 struct rxrpc_channel *chan =
153 &conn->channels[call->cid & RXRPC_CHANNELMASK];
154
155 _enter("%d,%x", conn->debug_id, call->cid);
156
157 if (chan->call == call) {
158 /* Save the result of the call so that we can repeat it if necessary
159 * through the channel, whilst disposing of the actual call record.
160 */
161 trace_rxrpc_disconnect_call(call);
162 switch (call->completion) {
163 case RXRPC_CALL_SUCCEEDED:
164 chan->last_seq = call->rx_highest_seq;
165 chan->last_type = RXRPC_PACKET_TYPE_ACK;
166 break;
167 case RXRPC_CALL_LOCALLY_ABORTED:
168 chan->last_abort = call->abort_code;
169 chan->last_type = RXRPC_PACKET_TYPE_ABORT;
170 break;
171 default:
172 chan->last_abort = RX_CALL_DEAD;
173 chan->last_type = RXRPC_PACKET_TYPE_ABORT;
174 break;
175 }
176
177 chan->last_call = chan->call_id;
178 chan->call_id = chan->call_counter;
179 chan->call = NULL;
180 }
181
182 _leave("");
183}
184
185/*
186 * Disconnect a call and clear any channel it occupies when that call
187 * terminates.
188 */
189void rxrpc_disconnect_call(struct rxrpc_call *call)
190{
191 struct rxrpc_connection *conn = call->conn;
192
193 set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
194 rxrpc_see_call(call, rxrpc_call_see_disconnected);
195
196 call->peer->cong_ssthresh = call->cong_ssthresh;
197
198 if (!hlist_unhashed(&call->error_link)) {
199 spin_lock(&call->peer->lock);
200 hlist_del_init(&call->error_link);
201 spin_unlock(&call->peer->lock);
202 }
203
204 if (rxrpc_is_client_call(call)) {
205 rxrpc_disconnect_client_call(call->bundle, call);
206 } else {
207 __rxrpc_disconnect_call(conn, call);
208 conn->idle_timestamp = jiffies;
209 if (atomic_dec_and_test(&conn->active))
210 rxrpc_set_service_reap_timer(conn->rxnet,
211 jiffies + rxrpc_connection_expiry * HZ);
212 }
213
214 rxrpc_put_call(call, rxrpc_call_put_io_thread);
215}
216
217/*
218 * Queue a connection's work processor, getting a ref to pass to the work
219 * queue.
220 */
221void rxrpc_queue_conn(struct rxrpc_connection *conn, enum rxrpc_conn_trace why)
222{
223 if (atomic_read(&conn->active) >= 0 &&
224 rxrpc_queue_work(&conn->processor))
225 rxrpc_see_connection(conn, why);
226}
227
228/*
229 * Note the re-emergence of a connection.
230 */
231void rxrpc_see_connection(struct rxrpc_connection *conn,
232 enum rxrpc_conn_trace why)
233{
234 if (conn) {
235 int r = refcount_read(&conn->ref);
236
237 trace_rxrpc_conn(conn->debug_id, r, why);
238 }
239}
240
241/*
242 * Get a ref on a connection.
243 */
244struct rxrpc_connection *rxrpc_get_connection(struct rxrpc_connection *conn,
245 enum rxrpc_conn_trace why)
246{
247 int r;
248
249 __refcount_inc(&conn->ref, &r);
250 trace_rxrpc_conn(conn->debug_id, r + 1, why);
251 return conn;
252}
253
254/*
255 * Try to get a ref on a connection.
256 */
257struct rxrpc_connection *
258rxrpc_get_connection_maybe(struct rxrpc_connection *conn,
259 enum rxrpc_conn_trace why)
260{
261 int r;
262
263 if (conn) {
264 if (__refcount_inc_not_zero(&conn->ref, &r))
265 trace_rxrpc_conn(conn->debug_id, r + 1, why);
266 else
267 conn = NULL;
268 }
269 return conn;
270}
271
272/*
273 * Set the service connection reap timer.
274 */
275static void rxrpc_set_service_reap_timer(struct rxrpc_net *rxnet,
276 unsigned long reap_at)
277{
278 if (rxnet->live)
279 timer_reduce(&rxnet->service_conn_reap_timer, reap_at);
280}
281
282/*
283 * destroy a virtual connection
284 */
285static void rxrpc_rcu_free_connection(struct rcu_head *rcu)
286{
287 struct rxrpc_connection *conn =
288 container_of(rcu, struct rxrpc_connection, rcu);
289 struct rxrpc_net *rxnet = conn->rxnet;
290
291 _enter("{%d,u=%d}", conn->debug_id, refcount_read(&conn->ref));
292
293 trace_rxrpc_conn(conn->debug_id, refcount_read(&conn->ref),
294 rxrpc_conn_free);
295 kfree(conn);
296
297 if (atomic_dec_and_test(&rxnet->nr_conns))
298 wake_up_var(&rxnet->nr_conns);
299}
300
301/*
302 * Clean up a dead connection.
303 */
304static void rxrpc_clean_up_connection(struct work_struct *work)
305{
306 struct rxrpc_connection *conn =
307 container_of(work, struct rxrpc_connection, destructor);
308 struct rxrpc_net *rxnet = conn->rxnet;
309
310 ASSERT(!conn->channels[0].call &&
311 !conn->channels[1].call &&
312 !conn->channels[2].call &&
313 !conn->channels[3].call);
314 ASSERT(list_empty(&conn->cache_link));
315
316 del_timer_sync(&conn->timer);
317 cancel_work_sync(&conn->processor); /* Processing may restart the timer */
318 del_timer_sync(&conn->timer);
319
320 write_lock(&rxnet->conn_lock);
321 list_del_init(&conn->proc_link);
322 write_unlock(&rxnet->conn_lock);
323
324 rxrpc_purge_queue(&conn->rx_queue);
325
326 rxrpc_kill_client_conn(conn);
327
328 conn->security->clear(conn);
329 key_put(conn->key);
330 rxrpc_put_bundle(conn->bundle, rxrpc_bundle_put_conn);
331 rxrpc_put_peer(conn->peer, rxrpc_peer_put_conn);
332 rxrpc_put_local(conn->local, rxrpc_local_put_kill_conn);
333
334 /* Drain the Rx queue. Note that even though we've unpublished, an
335 * incoming packet could still be being added to our Rx queue, so we
336 * will need to drain it again in the RCU cleanup handler.
337 */
338 rxrpc_purge_queue(&conn->rx_queue);
339
340 page_frag_cache_drain(&conn->tx_data_alloc);
341 call_rcu(&conn->rcu, rxrpc_rcu_free_connection);
342}
343
344/*
345 * Drop a ref on a connection.
346 */
347void rxrpc_put_connection(struct rxrpc_connection *conn,
348 enum rxrpc_conn_trace why)
349{
350 unsigned int debug_id;
351 bool dead;
352 int r;
353
354 if (!conn)
355 return;
356
357 debug_id = conn->debug_id;
358 dead = __refcount_dec_and_test(&conn->ref, &r);
359 trace_rxrpc_conn(debug_id, r - 1, why);
360 if (dead) {
361 del_timer(&conn->timer);
362 cancel_work(&conn->processor);
363
364 if (in_softirq() || work_busy(&conn->processor) ||
365 timer_pending(&conn->timer))
366 /* Can't use the rxrpc workqueue as we need to cancel/flush
367 * something that may be running/waiting there.
368 */
369 schedule_work(&conn->destructor);
370 else
371 rxrpc_clean_up_connection(&conn->destructor);
372 }
373}
374
375/*
376 * reap dead service connections
377 */
378void rxrpc_service_connection_reaper(struct work_struct *work)
379{
380 struct rxrpc_connection *conn, *_p;
381 struct rxrpc_net *rxnet =
382 container_of(work, struct rxrpc_net, service_conn_reaper);
383 unsigned long expire_at, earliest, idle_timestamp, now;
384 int active;
385
386 LIST_HEAD(graveyard);
387
388 _enter("");
389
390 now = jiffies;
391 earliest = now + MAX_JIFFY_OFFSET;
392
393 write_lock(&rxnet->conn_lock);
394 list_for_each_entry_safe(conn, _p, &rxnet->service_conns, link) {
395 ASSERTCMP(atomic_read(&conn->active), >=, 0);
396 if (likely(atomic_read(&conn->active) > 0))
397 continue;
398 if (conn->state == RXRPC_CONN_SERVICE_PREALLOC)
399 continue;
400
401 if (rxnet->live && !conn->local->dead) {
402 idle_timestamp = READ_ONCE(conn->idle_timestamp);
403 expire_at = idle_timestamp + rxrpc_connection_expiry * HZ;
404 if (conn->local->service_closed)
405 expire_at = idle_timestamp + rxrpc_closed_conn_expiry * HZ;
406
407 _debug("reap CONN %d { a=%d,t=%ld }",
408 conn->debug_id, atomic_read(&conn->active),
409 (long)expire_at - (long)now);
410
411 if (time_before(now, expire_at)) {
412 if (time_before(expire_at, earliest))
413 earliest = expire_at;
414 continue;
415 }
416 }
417
418 /* The activity count sits at 0 whilst the conn is unused on
419 * the list; we reduce that to -1 to make the conn unavailable.
420 */
421 active = 0;
422 if (!atomic_try_cmpxchg(&conn->active, &active, -1))
423 continue;
424 rxrpc_see_connection(conn, rxrpc_conn_see_reap_service);
425
426 if (rxrpc_conn_is_client(conn))
427 BUG();
428 else
429 rxrpc_unpublish_service_conn(conn);
430
431 list_move_tail(&conn->link, &graveyard);
432 }
433 write_unlock(&rxnet->conn_lock);
434
435 if (earliest != now + MAX_JIFFY_OFFSET) {
436 _debug("reschedule reaper %ld", (long)earliest - (long)now);
437 ASSERT(time_after(earliest, now));
438 rxrpc_set_service_reap_timer(rxnet, earliest);
439 }
440
441 while (!list_empty(&graveyard)) {
442 conn = list_entry(graveyard.next, struct rxrpc_connection,
443 link);
444 list_del_init(&conn->link);
445
446 ASSERTCMP(atomic_read(&conn->active), ==, -1);
447 rxrpc_put_connection(conn, rxrpc_conn_put_service_reaped);
448 }
449
450 _leave("");
451}
452
453/*
454 * preemptively destroy all the service connection records rather than
455 * waiting for them to time out
456 */
457void rxrpc_destroy_all_connections(struct rxrpc_net *rxnet)
458{
459 struct rxrpc_connection *conn, *_p;
460 bool leak = false;
461
462 _enter("");
463
464 atomic_dec(&rxnet->nr_conns);
465
466 del_timer_sync(&rxnet->service_conn_reap_timer);
467 rxrpc_queue_work(&rxnet->service_conn_reaper);
468 flush_workqueue(rxrpc_workqueue);
469
470 write_lock(&rxnet->conn_lock);
471 list_for_each_entry_safe(conn, _p, &rxnet->service_conns, link) {
472 pr_err("AF_RXRPC: Leaked conn %p {%d}\n",
473 conn, refcount_read(&conn->ref));
474 leak = true;
475 }
476 write_unlock(&rxnet->conn_lock);
477 BUG_ON(leak);
478
479 ASSERT(list_empty(&rxnet->conn_proc_list));
480
481 /* We need to wait for the connections to be destroyed by RCU as they
482 * pin things that we still need to get rid of.
483 */
484 wait_var_event(&rxnet->nr_conns, !atomic_read(&rxnet->nr_conns));
485 _leave("");
486}