Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* RxRPC individual remote procedure call handling
3 *
4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10#include <linux/slab.h>
11#include <linux/module.h>
12#include <linux/circ_buf.h>
13#include <linux/spinlock_types.h>
14#include <net/sock.h>
15#include <net/af_rxrpc.h>
16#include "ar-internal.h"
17
18const char *const rxrpc_call_states[NR__RXRPC_CALL_STATES] = {
19 [RXRPC_CALL_UNINITIALISED] = "Uninit ",
20 [RXRPC_CALL_CLIENT_AWAIT_CONN] = "ClWtConn",
21 [RXRPC_CALL_CLIENT_SEND_REQUEST] = "ClSndReq",
22 [RXRPC_CALL_CLIENT_AWAIT_REPLY] = "ClAwtRpl",
23 [RXRPC_CALL_CLIENT_RECV_REPLY] = "ClRcvRpl",
24 [RXRPC_CALL_SERVER_PREALLOC] = "SvPrealc",
25 [RXRPC_CALL_SERVER_SECURING] = "SvSecure",
26 [RXRPC_CALL_SERVER_RECV_REQUEST] = "SvRcvReq",
27 [RXRPC_CALL_SERVER_ACK_REQUEST] = "SvAckReq",
28 [RXRPC_CALL_SERVER_SEND_REPLY] = "SvSndRpl",
29 [RXRPC_CALL_SERVER_AWAIT_ACK] = "SvAwtACK",
30 [RXRPC_CALL_COMPLETE] = "Complete",
31};
32
33const char *const rxrpc_call_completions[NR__RXRPC_CALL_COMPLETIONS] = {
34 [RXRPC_CALL_SUCCEEDED] = "Complete",
35 [RXRPC_CALL_REMOTELY_ABORTED] = "RmtAbort",
36 [RXRPC_CALL_LOCALLY_ABORTED] = "LocAbort",
37 [RXRPC_CALL_LOCAL_ERROR] = "LocError",
38 [RXRPC_CALL_NETWORK_ERROR] = "NetError",
39};
40
41struct kmem_cache *rxrpc_call_jar;
42
43static struct semaphore rxrpc_call_limiter =
44 __SEMAPHORE_INITIALIZER(rxrpc_call_limiter, 1000);
45static struct semaphore rxrpc_kernel_call_limiter =
46 __SEMAPHORE_INITIALIZER(rxrpc_kernel_call_limiter, 1000);
47
48static void rxrpc_call_timer_expired(struct timer_list *t)
49{
50 struct rxrpc_call *call = from_timer(call, t, timer);
51
52 _enter("%d", call->debug_id);
53
54 if (call->state < RXRPC_CALL_COMPLETE) {
55 trace_rxrpc_timer(call, rxrpc_timer_expired, jiffies);
56 __rxrpc_queue_call(call);
57 } else {
58 rxrpc_put_call(call, rxrpc_call_put);
59 }
60}
61
62void rxrpc_reduce_call_timer(struct rxrpc_call *call,
63 unsigned long expire_at,
64 unsigned long now,
65 enum rxrpc_timer_trace why)
66{
67 if (rxrpc_try_get_call(call, rxrpc_call_got_timer)) {
68 trace_rxrpc_timer(call, why, now);
69 if (timer_reduce(&call->timer, expire_at))
70 rxrpc_put_call(call, rxrpc_call_put_notimer);
71 }
72}
73
74void rxrpc_delete_call_timer(struct rxrpc_call *call)
75{
76 if (del_timer_sync(&call->timer))
77 rxrpc_put_call(call, rxrpc_call_put_timer);
78}
79
80static struct lock_class_key rxrpc_call_user_mutex_lock_class_key;
81
82/*
83 * find an extant server call
84 * - called in process context with IRQs enabled
85 */
86struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *rx,
87 unsigned long user_call_ID)
88{
89 struct rxrpc_call *call;
90 struct rb_node *p;
91
92 _enter("%p,%lx", rx, user_call_ID);
93
94 read_lock(&rx->call_lock);
95
96 p = rx->calls.rb_node;
97 while (p) {
98 call = rb_entry(p, struct rxrpc_call, sock_node);
99
100 if (user_call_ID < call->user_call_ID)
101 p = p->rb_left;
102 else if (user_call_ID > call->user_call_ID)
103 p = p->rb_right;
104 else
105 goto found_extant_call;
106 }
107
108 read_unlock(&rx->call_lock);
109 _leave(" = NULL");
110 return NULL;
111
112found_extant_call:
113 rxrpc_get_call(call, rxrpc_call_got);
114 read_unlock(&rx->call_lock);
115 _leave(" = %p [%d]", call, atomic_read(&call->usage));
116 return call;
117}
118
119/*
120 * allocate a new call
121 */
122struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp,
123 unsigned int debug_id)
124{
125 struct rxrpc_call *call;
126 struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk));
127
128 call = kmem_cache_zalloc(rxrpc_call_jar, gfp);
129 if (!call)
130 return NULL;
131
132 call->rxtx_buffer = kcalloc(RXRPC_RXTX_BUFF_SIZE,
133 sizeof(struct sk_buff *),
134 gfp);
135 if (!call->rxtx_buffer)
136 goto nomem;
137
138 call->rxtx_annotations = kcalloc(RXRPC_RXTX_BUFF_SIZE, sizeof(u8), gfp);
139 if (!call->rxtx_annotations)
140 goto nomem_2;
141
142 mutex_init(&call->user_mutex);
143
144 /* Prevent lockdep reporting a deadlock false positive between the afs
145 * filesystem and sys_sendmsg() via the mmap sem.
146 */
147 if (rx->sk.sk_kern_sock)
148 lockdep_set_class(&call->user_mutex,
149 &rxrpc_call_user_mutex_lock_class_key);
150
151 timer_setup(&call->timer, rxrpc_call_timer_expired, 0);
152 INIT_WORK(&call->processor, &rxrpc_process_call);
153 INIT_LIST_HEAD(&call->link);
154 INIT_LIST_HEAD(&call->chan_wait_link);
155 INIT_LIST_HEAD(&call->accept_link);
156 INIT_LIST_HEAD(&call->recvmsg_link);
157 INIT_LIST_HEAD(&call->sock_link);
158 init_waitqueue_head(&call->waitq);
159 spin_lock_init(&call->lock);
160 spin_lock_init(&call->notify_lock);
161 spin_lock_init(&call->input_lock);
162 rwlock_init(&call->state_lock);
163 atomic_set(&call->usage, 1);
164 call->debug_id = debug_id;
165 call->tx_total_len = -1;
166 call->next_rx_timo = 20 * HZ;
167 call->next_req_timo = 1 * HZ;
168
169 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
170
171 /* Leave space in the ring to handle a maxed-out jumbo packet */
172 call->rx_winsize = rxrpc_rx_window_size;
173 call->tx_winsize = 16;
174 call->rx_expect_next = 1;
175
176 call->cong_cwnd = 2;
177 call->cong_ssthresh = RXRPC_RXTX_BUFF_SIZE - 1;
178
179 call->rxnet = rxnet;
180 call->rtt_avail = RXRPC_CALL_RTT_AVAIL_MASK;
181 atomic_inc(&rxnet->nr_calls);
182 return call;
183
184nomem_2:
185 kfree(call->rxtx_buffer);
186nomem:
187 kmem_cache_free(rxrpc_call_jar, call);
188 return NULL;
189}
190
191/*
192 * Allocate a new client call.
193 */
194static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
195 struct sockaddr_rxrpc *srx,
196 gfp_t gfp,
197 unsigned int debug_id)
198{
199 struct rxrpc_call *call;
200 ktime_t now;
201
202 _enter("");
203
204 call = rxrpc_alloc_call(rx, gfp, debug_id);
205 if (!call)
206 return ERR_PTR(-ENOMEM);
207 call->state = RXRPC_CALL_CLIENT_AWAIT_CONN;
208 call->service_id = srx->srx_service;
209 call->tx_phase = true;
210 now = ktime_get_real();
211 call->acks_latest_ts = now;
212 call->cong_tstamp = now;
213
214 _leave(" = %p", call);
215 return call;
216}
217
218/*
219 * Initiate the call ack/resend/expiry timer.
220 */
221static void rxrpc_start_call_timer(struct rxrpc_call *call)
222{
223 unsigned long now = jiffies;
224 unsigned long j = now + MAX_JIFFY_OFFSET;
225
226 call->ack_at = j;
227 call->ack_lost_at = j;
228 call->resend_at = j;
229 call->ping_at = j;
230 call->expect_rx_by = j;
231 call->expect_req_by = j;
232 call->expect_term_by = j;
233 call->timer.expires = now;
234}
235
236/*
237 * Wait for a call slot to become available.
238 */
239static struct semaphore *rxrpc_get_call_slot(struct rxrpc_call_params *p, gfp_t gfp)
240{
241 struct semaphore *limiter = &rxrpc_call_limiter;
242
243 if (p->kernel)
244 limiter = &rxrpc_kernel_call_limiter;
245 if (p->interruptibility == RXRPC_UNINTERRUPTIBLE) {
246 down(limiter);
247 return limiter;
248 }
249 return down_interruptible(limiter) < 0 ? NULL : limiter;
250}
251
252/*
253 * Release a call slot.
254 */
255static void rxrpc_put_call_slot(struct rxrpc_call *call)
256{
257 struct semaphore *limiter = &rxrpc_call_limiter;
258
259 if (test_bit(RXRPC_CALL_KERNEL, &call->flags))
260 limiter = &rxrpc_kernel_call_limiter;
261 up(limiter);
262}
263
264/*
265 * Set up a call for the given parameters.
266 * - Called with the socket lock held, which it must release.
267 * - If it returns a call, the call's lock will need releasing by the caller.
268 */
269struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
270 struct rxrpc_conn_parameters *cp,
271 struct sockaddr_rxrpc *srx,
272 struct rxrpc_call_params *p,
273 gfp_t gfp,
274 unsigned int debug_id)
275 __releases(&rx->sk.sk_lock.slock)
276 __acquires(&call->user_mutex)
277{
278 struct rxrpc_call *call, *xcall;
279 struct rxrpc_net *rxnet;
280 struct semaphore *limiter;
281 struct rb_node *parent, **pp;
282 const void *here = __builtin_return_address(0);
283 int ret;
284
285 _enter("%p,%lx", rx, p->user_call_ID);
286
287 limiter = rxrpc_get_call_slot(p, gfp);
288 if (!limiter)
289 return ERR_PTR(-ERESTARTSYS);
290
291 call = rxrpc_alloc_client_call(rx, srx, gfp, debug_id);
292 if (IS_ERR(call)) {
293 release_sock(&rx->sk);
294 up(limiter);
295 _leave(" = %ld", PTR_ERR(call));
296 return call;
297 }
298
299 call->interruptibility = p->interruptibility;
300 call->tx_total_len = p->tx_total_len;
301 trace_rxrpc_call(call->debug_id, rxrpc_call_new_client,
302 atomic_read(&call->usage),
303 here, (const void *)p->user_call_ID);
304 if (p->kernel)
305 __set_bit(RXRPC_CALL_KERNEL, &call->flags);
306
307 /* We need to protect a partially set up call against the user as we
308 * will be acting outside the socket lock.
309 */
310 mutex_lock(&call->user_mutex);
311
312 /* Publish the call, even though it is incompletely set up as yet */
313 write_lock(&rx->call_lock);
314
315 pp = &rx->calls.rb_node;
316 parent = NULL;
317 while (*pp) {
318 parent = *pp;
319 xcall = rb_entry(parent, struct rxrpc_call, sock_node);
320
321 if (p->user_call_ID < xcall->user_call_ID)
322 pp = &(*pp)->rb_left;
323 else if (p->user_call_ID > xcall->user_call_ID)
324 pp = &(*pp)->rb_right;
325 else
326 goto error_dup_user_ID;
327 }
328
329 rcu_assign_pointer(call->socket, rx);
330 call->user_call_ID = p->user_call_ID;
331 __set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
332 rxrpc_get_call(call, rxrpc_call_got_userid);
333 rb_link_node(&call->sock_node, parent, pp);
334 rb_insert_color(&call->sock_node, &rx->calls);
335 list_add(&call->sock_link, &rx->sock_calls);
336
337 write_unlock(&rx->call_lock);
338
339 rxnet = call->rxnet;
340 write_lock(&rxnet->call_lock);
341 list_add_tail(&call->link, &rxnet->calls);
342 write_unlock(&rxnet->call_lock);
343
344 /* From this point on, the call is protected by its own lock. */
345 release_sock(&rx->sk);
346
347 /* Set up or get a connection record and set the protocol parameters,
348 * including channel number and call ID.
349 */
350 ret = rxrpc_connect_call(rx, call, cp, srx, gfp);
351 if (ret < 0)
352 goto error_attached_to_socket;
353
354 trace_rxrpc_call(call->debug_id, rxrpc_call_connected,
355 atomic_read(&call->usage), here, NULL);
356
357 rxrpc_start_call_timer(call);
358
359 _net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id);
360
361 _leave(" = %p [new]", call);
362 return call;
363
364 /* We unexpectedly found the user ID in the list after taking
365 * the call_lock. This shouldn't happen unless the user races
366 * with itself and tries to add the same user ID twice at the
367 * same time in different threads.
368 */
369error_dup_user_ID:
370 write_unlock(&rx->call_lock);
371 release_sock(&rx->sk);
372 __rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
373 RX_CALL_DEAD, -EEXIST);
374 trace_rxrpc_call(call->debug_id, rxrpc_call_error,
375 atomic_read(&call->usage), here, ERR_PTR(-EEXIST));
376 rxrpc_release_call(rx, call);
377 mutex_unlock(&call->user_mutex);
378 rxrpc_put_call(call, rxrpc_call_put);
379 _leave(" = -EEXIST");
380 return ERR_PTR(-EEXIST);
381
382 /* We got an error, but the call is attached to the socket and is in
383 * need of release. However, we might now race with recvmsg() when
384 * completing the call queues it. Return 0 from sys_sendmsg() and
385 * leave the error to recvmsg() to deal with.
386 */
387error_attached_to_socket:
388 trace_rxrpc_call(call->debug_id, rxrpc_call_error,
389 atomic_read(&call->usage), here, ERR_PTR(ret));
390 set_bit(RXRPC_CALL_DISCONNECTED, &call->flags);
391 __rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR,
392 RX_CALL_DEAD, ret);
393 _leave(" = c=%08x [err]", call->debug_id);
394 return call;
395}
396
397/*
398 * Set up an incoming call. call->conn points to the connection.
399 * This is called in BH context and isn't allowed to fail.
400 */
401void rxrpc_incoming_call(struct rxrpc_sock *rx,
402 struct rxrpc_call *call,
403 struct sk_buff *skb)
404{
405 struct rxrpc_connection *conn = call->conn;
406 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
407 u32 chan;
408
409 _enter(",%d", call->conn->debug_id);
410
411 rcu_assign_pointer(call->socket, rx);
412 call->call_id = sp->hdr.callNumber;
413 call->service_id = sp->hdr.serviceId;
414 call->cid = sp->hdr.cid;
415 call->state = RXRPC_CALL_SERVER_SECURING;
416 call->cong_tstamp = skb->tstamp;
417
418 /* Set the channel for this call. We don't get channel_lock as we're
419 * only defending against the data_ready handler (which we're called
420 * from) and the RESPONSE packet parser (which is only really
421 * interested in call_counter and can cope with a disagreement with the
422 * call pointer).
423 */
424 chan = sp->hdr.cid & RXRPC_CHANNELMASK;
425 conn->channels[chan].call_counter = call->call_id;
426 conn->channels[chan].call_id = call->call_id;
427 rcu_assign_pointer(conn->channels[chan].call, call);
428
429 spin_lock(&conn->params.peer->lock);
430 hlist_add_head_rcu(&call->error_link, &conn->params.peer->error_targets);
431 spin_unlock(&conn->params.peer->lock);
432
433 _net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id);
434
435 rxrpc_start_call_timer(call);
436 _leave("");
437}
438
439/*
440 * Queue a call's work processor, getting a ref to pass to the work queue.
441 */
442bool rxrpc_queue_call(struct rxrpc_call *call)
443{
444 const void *here = __builtin_return_address(0);
445 int n = atomic_fetch_add_unless(&call->usage, 1, 0);
446 if (n == 0)
447 return false;
448 if (rxrpc_queue_work(&call->processor))
449 trace_rxrpc_call(call->debug_id, rxrpc_call_queued, n + 1,
450 here, NULL);
451 else
452 rxrpc_put_call(call, rxrpc_call_put_noqueue);
453 return true;
454}
455
456/*
457 * Queue a call's work processor, passing the callers ref to the work queue.
458 */
459bool __rxrpc_queue_call(struct rxrpc_call *call)
460{
461 const void *here = __builtin_return_address(0);
462 int n = atomic_read(&call->usage);
463 ASSERTCMP(n, >=, 1);
464 if (rxrpc_queue_work(&call->processor))
465 trace_rxrpc_call(call->debug_id, rxrpc_call_queued_ref, n,
466 here, NULL);
467 else
468 rxrpc_put_call(call, rxrpc_call_put_noqueue);
469 return true;
470}
471
472/*
473 * Note the re-emergence of a call.
474 */
475void rxrpc_see_call(struct rxrpc_call *call)
476{
477 const void *here = __builtin_return_address(0);
478 if (call) {
479 int n = atomic_read(&call->usage);
480
481 trace_rxrpc_call(call->debug_id, rxrpc_call_seen, n,
482 here, NULL);
483 }
484}
485
486bool rxrpc_try_get_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
487{
488 const void *here = __builtin_return_address(0);
489 int n = atomic_fetch_add_unless(&call->usage, 1, 0);
490
491 if (n == 0)
492 return false;
493 trace_rxrpc_call(call->debug_id, op, n, here, NULL);
494 return true;
495}
496
497/*
498 * Note the addition of a ref on a call.
499 */
500void rxrpc_get_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
501{
502 const void *here = __builtin_return_address(0);
503 int n = atomic_inc_return(&call->usage);
504
505 trace_rxrpc_call(call->debug_id, op, n, here, NULL);
506}
507
508/*
509 * Clean up the RxTx skb ring.
510 */
511static void rxrpc_cleanup_ring(struct rxrpc_call *call)
512{
513 int i;
514
515 for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) {
516 rxrpc_free_skb(call->rxtx_buffer[i], rxrpc_skb_cleaned);
517 call->rxtx_buffer[i] = NULL;
518 }
519}
520
521/*
522 * Detach a call from its owning socket.
523 */
524void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call)
525{
526 const void *here = __builtin_return_address(0);
527 struct rxrpc_connection *conn = call->conn;
528 bool put = false;
529
530 _enter("{%d,%d}", call->debug_id, atomic_read(&call->usage));
531
532 trace_rxrpc_call(call->debug_id, rxrpc_call_release,
533 atomic_read(&call->usage),
534 here, (const void *)call->flags);
535
536 ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
537
538 spin_lock_bh(&call->lock);
539 if (test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags))
540 BUG();
541 spin_unlock_bh(&call->lock);
542
543 rxrpc_put_call_slot(call);
544 rxrpc_delete_call_timer(call);
545
546 /* Make sure we don't get any more notifications */
547 write_lock_bh(&rx->recvmsg_lock);
548
549 if (!list_empty(&call->recvmsg_link)) {
550 _debug("unlinking once-pending call %p { e=%lx f=%lx }",
551 call, call->events, call->flags);
552 list_del(&call->recvmsg_link);
553 put = true;
554 }
555
556 /* list_empty() must return false in rxrpc_notify_socket() */
557 call->recvmsg_link.next = NULL;
558 call->recvmsg_link.prev = NULL;
559
560 write_unlock_bh(&rx->recvmsg_lock);
561 if (put)
562 rxrpc_put_call(call, rxrpc_call_put);
563
564 write_lock(&rx->call_lock);
565
566 if (test_and_clear_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
567 rb_erase(&call->sock_node, &rx->calls);
568 memset(&call->sock_node, 0xdd, sizeof(call->sock_node));
569 rxrpc_put_call(call, rxrpc_call_put_userid);
570 }
571
572 list_del(&call->sock_link);
573 write_unlock(&rx->call_lock);
574
575 _debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn);
576
577 if (conn && !test_bit(RXRPC_CALL_DISCONNECTED, &call->flags))
578 rxrpc_disconnect_call(call);
579 if (call->security)
580 call->security->free_call_crypto(call);
581 _leave("");
582}
583
584/*
585 * release all the calls associated with a socket
586 */
587void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx)
588{
589 struct rxrpc_call *call;
590
591 _enter("%p", rx);
592
593 while (!list_empty(&rx->to_be_accepted)) {
594 call = list_entry(rx->to_be_accepted.next,
595 struct rxrpc_call, accept_link);
596 list_del(&call->accept_link);
597 rxrpc_abort_call("SKR", call, 0, RX_CALL_DEAD, -ECONNRESET);
598 rxrpc_put_call(call, rxrpc_call_put);
599 }
600
601 while (!list_empty(&rx->sock_calls)) {
602 call = list_entry(rx->sock_calls.next,
603 struct rxrpc_call, sock_link);
604 rxrpc_get_call(call, rxrpc_call_got);
605 rxrpc_abort_call("SKT", call, 0, RX_CALL_DEAD, -ECONNRESET);
606 rxrpc_send_abort_packet(call);
607 rxrpc_release_call(rx, call);
608 rxrpc_put_call(call, rxrpc_call_put);
609 }
610
611 _leave("");
612}
613
614/*
615 * release a call
616 */
617void rxrpc_put_call(struct rxrpc_call *call, enum rxrpc_call_trace op)
618{
619 struct rxrpc_net *rxnet = call->rxnet;
620 const void *here = __builtin_return_address(0);
621 unsigned int debug_id = call->debug_id;
622 int n;
623
624 ASSERT(call != NULL);
625
626 n = atomic_dec_return(&call->usage);
627 trace_rxrpc_call(debug_id, op, n, here, NULL);
628 ASSERTCMP(n, >=, 0);
629 if (n == 0) {
630 _debug("call %d dead", call->debug_id);
631 ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
632
633 if (!list_empty(&call->link)) {
634 write_lock(&rxnet->call_lock);
635 list_del_init(&call->link);
636 write_unlock(&rxnet->call_lock);
637 }
638
639 rxrpc_cleanup_call(call);
640 }
641}
642
643/*
644 * Final call destruction - but must be done in process context.
645 */
646static void rxrpc_destroy_call(struct work_struct *work)
647{
648 struct rxrpc_call *call = container_of(work, struct rxrpc_call, processor);
649 struct rxrpc_net *rxnet = call->rxnet;
650
651 rxrpc_delete_call_timer(call);
652
653 rxrpc_put_connection(call->conn);
654 rxrpc_put_peer(call->peer);
655 kfree(call->rxtx_buffer);
656 kfree(call->rxtx_annotations);
657 kmem_cache_free(rxrpc_call_jar, call);
658 if (atomic_dec_and_test(&rxnet->nr_calls))
659 wake_up_var(&rxnet->nr_calls);
660}
661
662/*
663 * Final call destruction under RCU.
664 */
665static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
666{
667 struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu);
668
669 if (in_softirq()) {
670 INIT_WORK(&call->processor, rxrpc_destroy_call);
671 if (!rxrpc_queue_work(&call->processor))
672 BUG();
673 } else {
674 rxrpc_destroy_call(&call->processor);
675 }
676}
677
678/*
679 * clean up a call
680 */
681void rxrpc_cleanup_call(struct rxrpc_call *call)
682{
683 _net("DESTROY CALL %d", call->debug_id);
684
685 memset(&call->sock_node, 0xcd, sizeof(call->sock_node));
686
687 ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE);
688 ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags));
689
690 rxrpc_cleanup_ring(call);
691 rxrpc_free_skb(call->tx_pending, rxrpc_skb_cleaned);
692
693 call_rcu(&call->rcu, rxrpc_rcu_destroy_call);
694}
695
696/*
697 * Make sure that all calls are gone from a network namespace. To reach this
698 * point, any open UDP sockets in that namespace must have been closed, so any
699 * outstanding calls cannot be doing I/O.
700 */
701void rxrpc_destroy_all_calls(struct rxrpc_net *rxnet)
702{
703 struct rxrpc_call *call;
704
705 _enter("");
706
707 if (!list_empty(&rxnet->calls)) {
708 write_lock(&rxnet->call_lock);
709
710 while (!list_empty(&rxnet->calls)) {
711 call = list_entry(rxnet->calls.next,
712 struct rxrpc_call, link);
713 _debug("Zapping call %p", call);
714
715 rxrpc_see_call(call);
716 list_del_init(&call->link);
717
718 pr_err("Call %p still in use (%d,%s,%lx,%lx)!\n",
719 call, atomic_read(&call->usage),
720 rxrpc_call_states[call->state],
721 call->flags, call->events);
722
723 write_unlock(&rxnet->call_lock);
724 cond_resched();
725 write_lock(&rxnet->call_lock);
726 }
727
728 write_unlock(&rxnet->call_lock);
729 }
730
731 atomic_dec(&rxnet->nr_calls);
732 wait_var_event(&rxnet->nr_calls, !atomic_read(&rxnet->nr_calls));
733}