Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* Processing of received RxRPC packets
3 *
4 * Copyright (C) 2020 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10#include "ar-internal.h"
11
12/* Override priority when generating ACKs for received DATA */
13static const u8 rxrpc_ack_priority[RXRPC_ACK__INVALID] = {
14 [RXRPC_ACK_IDLE] = 1,
15 [RXRPC_ACK_DELAY] = 2,
16 [RXRPC_ACK_REQUESTED] = 3,
17 [RXRPC_ACK_DUPLICATE] = 4,
18 [RXRPC_ACK_EXCEEDS_WINDOW] = 5,
19 [RXRPC_ACK_NOSPACE] = 6,
20 [RXRPC_ACK_OUT_OF_SEQUENCE] = 7,
21};
22
23static void rxrpc_proto_abort(struct rxrpc_call *call, rxrpc_seq_t seq,
24 enum rxrpc_abort_reason why)
25{
26 rxrpc_abort_call(call, seq, RX_PROTOCOL_ERROR, -EBADMSG, why);
27}
28
29/*
30 * Do TCP-style congestion management [RFC5681].
31 */
32static void rxrpc_congestion_management(struct rxrpc_call *call,
33 struct rxrpc_ack_summary *summary)
34{
35 summary->change = rxrpc_cong_no_change;
36 summary->in_flight = rxrpc_tx_in_flight(call);
37
38 if (test_and_clear_bit(RXRPC_CALL_RETRANS_TIMEOUT, &call->flags)) {
39 summary->retrans_timeo = true;
40 call->cong_ssthresh = umax(summary->in_flight / 2, 2);
41 call->cong_cwnd = 1;
42 if (call->cong_cwnd >= call->cong_ssthresh &&
43 call->cong_ca_state == RXRPC_CA_SLOW_START) {
44 call->cong_ca_state = RXRPC_CA_CONGEST_AVOIDANCE;
45 call->cong_tstamp = call->acks_latest_ts;
46 call->cong_cumul_acks = 0;
47 }
48 }
49
50 call->cong_cumul_acks += summary->nr_new_sacks;
51 call->cong_cumul_acks += summary->nr_new_hacks;
52 if (call->cong_cumul_acks > 255)
53 call->cong_cumul_acks = 255;
54
55 switch (call->cong_ca_state) {
56 case RXRPC_CA_SLOW_START:
57 if (call->acks_nr_snacks > 0)
58 goto packet_loss_detected;
59 if (call->cong_cumul_acks > 0)
60 call->cong_cwnd += 1;
61 if (call->cong_cwnd >= call->cong_ssthresh) {
62 call->cong_ca_state = RXRPC_CA_CONGEST_AVOIDANCE;
63 call->cong_tstamp = call->acks_latest_ts;
64 }
65 goto out;
66
67 case RXRPC_CA_CONGEST_AVOIDANCE:
68 if (call->acks_nr_snacks > 0)
69 goto packet_loss_detected;
70
71 /* We analyse the number of packets that get ACK'd per RTT
72 * period and increase the window if we managed to fill it.
73 */
74 if (call->rtt_count == 0)
75 goto out;
76 if (ktime_before(call->acks_latest_ts,
77 ktime_add_us(call->cong_tstamp,
78 call->srtt_us >> 3)))
79 goto out_no_clear_ca;
80 summary->change = rxrpc_cong_rtt_window_end;
81 call->cong_tstamp = call->acks_latest_ts;
82 if (call->cong_cumul_acks >= call->cong_cwnd)
83 call->cong_cwnd++;
84 goto out;
85
86 case RXRPC_CA_PACKET_LOSS:
87 if (call->acks_nr_snacks == 0)
88 goto resume_normality;
89
90 if (summary->new_low_snack) {
91 summary->change = rxrpc_cong_new_low_nack;
92 call->cong_dup_acks = 1;
93 if (call->cong_extra > 1)
94 call->cong_extra = 1;
95 goto send_extra_data;
96 }
97
98 call->cong_dup_acks++;
99 if (call->cong_dup_acks < 3)
100 goto send_extra_data;
101
102 summary->change = rxrpc_cong_begin_retransmission;
103 call->cong_ca_state = RXRPC_CA_FAST_RETRANSMIT;
104 call->cong_ssthresh = umax(summary->in_flight / 2, 2);
105 call->cong_cwnd = call->cong_ssthresh + 3;
106 call->cong_extra = 0;
107 call->cong_dup_acks = 0;
108 summary->need_retransmit = true;
109 summary->in_fast_or_rto_recovery = true;
110 goto out;
111
112 case RXRPC_CA_FAST_RETRANSMIT:
113 rxrpc_tlp_init(call);
114 summary->in_fast_or_rto_recovery = true;
115 if (!summary->new_low_snack) {
116 if (summary->nr_new_sacks == 0)
117 call->cong_cwnd += 1;
118 call->cong_dup_acks++;
119 if (call->cong_dup_acks == 2) {
120 summary->change = rxrpc_cong_retransmit_again;
121 call->cong_dup_acks = 0;
122 summary->need_retransmit = true;
123 }
124 } else {
125 summary->change = rxrpc_cong_progress;
126 call->cong_cwnd = call->cong_ssthresh;
127 if (call->acks_nr_snacks == 0) {
128 summary->exiting_fast_or_rto_recovery = true;
129 goto resume_normality;
130 }
131 }
132 goto out;
133
134 default:
135 BUG();
136 goto out;
137 }
138
139resume_normality:
140 summary->change = rxrpc_cong_cleared_nacks;
141 call->cong_dup_acks = 0;
142 call->cong_extra = 0;
143 call->cong_tstamp = call->acks_latest_ts;
144 if (call->cong_cwnd < call->cong_ssthresh)
145 call->cong_ca_state = RXRPC_CA_SLOW_START;
146 else
147 call->cong_ca_state = RXRPC_CA_CONGEST_AVOIDANCE;
148out:
149 call->cong_cumul_acks = 0;
150out_no_clear_ca:
151 if (call->cong_cwnd >= RXRPC_TX_MAX_WINDOW)
152 call->cong_cwnd = RXRPC_TX_MAX_WINDOW;
153 trace_rxrpc_congest(call, summary);
154 return;
155
156packet_loss_detected:
157 summary->change = rxrpc_cong_saw_nack;
158 call->cong_ca_state = RXRPC_CA_PACKET_LOSS;
159 call->cong_dup_acks = 0;
160 goto send_extra_data;
161
162send_extra_data:
163 /* Send some previously unsent DATA if we have some to advance the ACK
164 * state.
165 */
166 if (test_bit(RXRPC_CALL_TX_LAST, &call->flags) ||
167 call->acks_nr_sacks != call->tx_top - call->tx_bottom) {
168 call->cong_extra++;
169 wake_up(&call->waitq);
170 }
171 goto out_no_clear_ca;
172}
173
174/*
175 * Degrade the congestion window if we haven't transmitted a packet for >1RTT.
176 */
177void rxrpc_congestion_degrade(struct rxrpc_call *call)
178{
179 ktime_t rtt, now, time_since;
180
181 if (call->cong_ca_state != RXRPC_CA_SLOW_START &&
182 call->cong_ca_state != RXRPC_CA_CONGEST_AVOIDANCE)
183 return;
184 if (__rxrpc_call_state(call) == RXRPC_CALL_CLIENT_AWAIT_REPLY)
185 return;
186
187 rtt = ns_to_ktime(call->srtt_us * (NSEC_PER_USEC / 8));
188 now = ktime_get_real();
189 time_since = ktime_sub(now, call->tx_last_sent);
190 if (ktime_before(time_since, rtt))
191 return;
192
193 trace_rxrpc_reset_cwnd(call, time_since, rtt);
194 rxrpc_inc_stat(call->rxnet, stat_tx_data_cwnd_reset);
195 call->tx_last_sent = now;
196 call->cong_ca_state = RXRPC_CA_SLOW_START;
197 call->cong_ssthresh = umax(call->cong_ssthresh, call->cong_cwnd * 3 / 4);
198 call->cong_cwnd = umax(call->cong_cwnd / 2, RXRPC_MIN_CWND);
199}
200
201/*
202 * Add an RTT sample derived from an ACK'd DATA packet.
203 */
204static void rxrpc_add_data_rtt_sample(struct rxrpc_call *call,
205 struct rxrpc_ack_summary *summary,
206 struct rxrpc_txqueue *tq,
207 int ix)
208{
209 ktime_t xmit_ts = ktime_add_us(tq->xmit_ts_base, tq->segment_xmit_ts[ix]);
210
211 rxrpc_call_add_rtt(call, rxrpc_rtt_rx_data_ack, -1,
212 summary->acked_serial, summary->ack_serial,
213 xmit_ts, call->acks_latest_ts);
214 __clear_bit(ix, &tq->rtt_samples); /* Prevent repeat RTT sample */
215}
216
217/*
218 * Apply a hard ACK by advancing the Tx window.
219 */
220static bool rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
221 struct rxrpc_ack_summary *summary)
222{
223 struct rxrpc_txqueue *tq = call->tx_queue;
224 rxrpc_seq_t seq = call->tx_bottom + 1;
225 bool rot_last = false, trace = false;
226
227 _enter("%x,%x", call->tx_bottom, to);
228
229 trace_rxrpc_tx_rotate(call, seq, to);
230 trace_rxrpc_tq(call, tq, seq, rxrpc_tq_rotate);
231
232 if (call->acks_lowest_nak == call->tx_bottom) {
233 call->acks_lowest_nak = to;
234 } else if (after(to, call->acks_lowest_nak)) {
235 summary->new_low_snack = true;
236 call->acks_lowest_nak = to;
237 }
238
239 /* We may have a left over fully-consumed buffer at the front that we
240 * couldn't drop before (rotate_and_keep below).
241 */
242 if (seq == call->tx_qbase + RXRPC_NR_TXQUEUE) {
243 call->tx_qbase += RXRPC_NR_TXQUEUE;
244 call->tx_queue = tq->next;
245 trace_rxrpc_tq(call, tq, seq, rxrpc_tq_rotate_and_free);
246 kfree(tq);
247 tq = call->tx_queue;
248 }
249
250 do {
251 unsigned int ix = seq - call->tx_qbase;
252
253 _debug("tq=%x seq=%x i=%d f=%x", tq->qbase, seq, ix, tq->bufs[ix]->flags);
254 if (tq->bufs[ix]->flags & RXRPC_LAST_PACKET) {
255 set_bit(RXRPC_CALL_TX_LAST, &call->flags);
256 rot_last = true;
257 }
258
259 if (summary->acked_serial == tq->segment_serial[ix] &&
260 test_bit(ix, &tq->rtt_samples))
261 rxrpc_add_data_rtt_sample(call, summary, tq, ix);
262
263 if (ix == tq->nr_reported_acks) {
264 /* Packet directly hard ACK'd. */
265 tq->nr_reported_acks++;
266 rxrpc_input_rack_one(call, summary, tq, ix);
267 if (seq == call->tlp_seq)
268 summary->tlp_probe_acked = true;
269 summary->nr_new_hacks++;
270 __set_bit(ix, &tq->segment_acked);
271 trace_rxrpc_rotate(call, tq, summary, seq, rxrpc_rotate_trace_hack);
272 } else if (test_bit(ix, &tq->segment_acked)) {
273 /* Soft ACK -> hard ACK. */
274 call->acks_nr_sacks--;
275 trace_rxrpc_rotate(call, tq, summary, seq, rxrpc_rotate_trace_sack);
276 } else {
277 /* Soft NAK -> hard ACK. */
278 call->acks_nr_snacks--;
279 rxrpc_input_rack_one(call, summary, tq, ix);
280 if (seq == call->tlp_seq)
281 summary->tlp_probe_acked = true;
282 summary->nr_new_hacks++;
283 __set_bit(ix, &tq->segment_acked);
284 trace_rxrpc_rotate(call, tq, summary, seq, rxrpc_rotate_trace_snak);
285 }
286
287 call->tx_nr_sent--;
288 if (__test_and_clear_bit(ix, &tq->segment_lost))
289 call->tx_nr_lost--;
290 if (__test_and_clear_bit(ix, &tq->segment_retransmitted))
291 call->tx_nr_resent--;
292 __clear_bit(ix, &tq->ever_retransmitted);
293
294 rxrpc_put_txbuf(tq->bufs[ix], rxrpc_txbuf_put_rotated);
295 tq->bufs[ix] = NULL;
296
297 WRITE_ONCE(call->tx_bottom, seq);
298 trace_rxrpc_txqueue(call, (rot_last ?
299 rxrpc_txqueue_rotate_last :
300 rxrpc_txqueue_rotate));
301
302 seq++;
303 trace = true;
304 if (!(seq & RXRPC_TXQ_MASK)) {
305 trace_rxrpc_rack_update(call, summary);
306 trace = false;
307 prefetch(tq->next);
308 if (tq != call->tx_qtail) {
309 call->tx_qbase += RXRPC_NR_TXQUEUE;
310 call->tx_queue = tq->next;
311 trace_rxrpc_tq(call, tq, seq, rxrpc_tq_rotate_and_free);
312 kfree(tq);
313 tq = call->tx_queue;
314 } else {
315 trace_rxrpc_tq(call, tq, seq, rxrpc_tq_rotate_and_keep);
316 tq = NULL;
317 break;
318 }
319 }
320
321 } while (before_eq(seq, to));
322
323 if (trace)
324 trace_rxrpc_rack_update(call, summary);
325
326 if (rot_last) {
327 set_bit(RXRPC_CALL_TX_ALL_ACKED, &call->flags);
328 if (tq) {
329 trace_rxrpc_tq(call, tq, seq, rxrpc_tq_rotate_and_free);
330 kfree(tq);
331 call->tx_queue = NULL;
332 }
333 }
334
335 _debug("%x,%x,%x,%d", to, call->tx_bottom, call->tx_top, rot_last);
336
337 wake_up(&call->waitq);
338 return rot_last;
339}
340
341/*
342 * End the transmission phase of a call.
343 *
344 * This occurs when we get an ACKALL packet, the first DATA packet of a reply,
345 * or a final ACK packet.
346 */
347static void rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun,
348 enum rxrpc_abort_reason abort_why)
349{
350 ASSERT(test_bit(RXRPC_CALL_TX_LAST, &call->flags));
351
352 call->rack_timer_mode = RXRPC_CALL_RACKTIMER_OFF;
353 call->rack_timo_at = KTIME_MAX;
354 trace_rxrpc_rack_timer(call, 0, false);
355 trace_rxrpc_timer_can(call, rxrpc_timer_trace_rack_off + call->rack_timer_mode);
356
357 switch (__rxrpc_call_state(call)) {
358 case RXRPC_CALL_CLIENT_SEND_REQUEST:
359 case RXRPC_CALL_CLIENT_AWAIT_REPLY:
360 if (reply_begun) {
361 rxrpc_set_call_state(call, RXRPC_CALL_CLIENT_RECV_REPLY);
362 trace_rxrpc_txqueue(call, rxrpc_txqueue_end);
363 break;
364 }
365
366 rxrpc_set_call_state(call, RXRPC_CALL_CLIENT_AWAIT_REPLY);
367 trace_rxrpc_txqueue(call, rxrpc_txqueue_await_reply);
368 break;
369
370 case RXRPC_CALL_SERVER_AWAIT_ACK:
371 rxrpc_call_completed(call);
372 trace_rxrpc_txqueue(call, rxrpc_txqueue_end);
373 break;
374
375 default:
376 kdebug("end_tx %s", rxrpc_call_states[__rxrpc_call_state(call)]);
377 rxrpc_proto_abort(call, call->tx_top, abort_why);
378 break;
379 }
380}
381
382/*
383 * Begin the reply reception phase of a call.
384 */
385static bool rxrpc_receiving_reply(struct rxrpc_call *call)
386{
387 struct rxrpc_ack_summary summary = { 0 };
388 rxrpc_seq_t top = READ_ONCE(call->tx_top);
389
390 if (call->ackr_reason) {
391 call->delay_ack_at = KTIME_MAX;
392 trace_rxrpc_timer_can(call, rxrpc_timer_trace_delayed_ack);
393 }
394
395 if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags)) {
396 if (!rxrpc_rotate_tx_window(call, top, &summary)) {
397 rxrpc_proto_abort(call, top, rxrpc_eproto_early_reply);
398 return false;
399 }
400 }
401
402 rxrpc_end_tx_phase(call, true, rxrpc_eproto_unexpected_reply);
403 return true;
404}
405
406/*
407 * End the packet reception phase.
408 */
409static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial)
410{
411 rxrpc_seq_t whigh = READ_ONCE(call->rx_highest_seq);
412
413 _enter("%d,%s", call->debug_id, rxrpc_call_states[__rxrpc_call_state(call)]);
414
415 trace_rxrpc_receive(call, rxrpc_receive_end, 0, whigh);
416
417 switch (__rxrpc_call_state(call)) {
418 case RXRPC_CALL_CLIENT_RECV_REPLY:
419 rxrpc_propose_delay_ACK(call, serial, rxrpc_propose_ack_terminal_ack);
420 rxrpc_call_completed(call);
421 break;
422
423 case RXRPC_CALL_SERVER_RECV_REQUEST:
424 rxrpc_set_call_state(call, RXRPC_CALL_SERVER_ACK_REQUEST);
425 call->expect_req_by = KTIME_MAX;
426 rxrpc_propose_delay_ACK(call, serial, rxrpc_propose_ack_processing_op);
427 break;
428
429 default:
430 break;
431 }
432}
433
434static void rxrpc_input_update_ack_window(struct rxrpc_call *call,
435 rxrpc_seq_t window, rxrpc_seq_t wtop)
436{
437 call->ackr_window = window;
438 call->ackr_wtop = wtop;
439}
440
441/*
442 * Push a DATA packet onto the Rx queue.
443 */
444static void rxrpc_input_queue_data(struct rxrpc_call *call, struct sk_buff *skb,
445 rxrpc_seq_t window, rxrpc_seq_t wtop,
446 enum rxrpc_receive_trace why)
447{
448 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
449 bool last = sp->hdr.flags & RXRPC_LAST_PACKET;
450
451 spin_lock_irq(&call->recvmsg_queue.lock);
452
453 __skb_queue_tail(&call->recvmsg_queue, skb);
454 rxrpc_input_update_ack_window(call, window, wtop);
455 trace_rxrpc_receive(call, last ? why + 1 : why, sp->hdr.serial, sp->hdr.seq);
456 if (last)
457 /* Change the state inside the lock so that recvmsg syncs
458 * correctly with it and using sendmsg() to send a reply
459 * doesn't race.
460 */
461 rxrpc_end_rx_phase(call, sp->hdr.serial);
462
463 spin_unlock_irq(&call->recvmsg_queue.lock);
464}
465
466/*
467 * Process a DATA packet.
468 */
469static void rxrpc_input_data_one(struct rxrpc_call *call, struct sk_buff *skb,
470 bool *_notify, rxrpc_serial_t *_ack_serial, int *_ack_reason)
471{
472 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
473 struct sk_buff *oos;
474 rxrpc_serial_t serial = sp->hdr.serial;
475 unsigned int sack = call->ackr_sack_base;
476 rxrpc_seq_t window = call->ackr_window;
477 rxrpc_seq_t wtop = call->ackr_wtop;
478 rxrpc_seq_t wlimit = window + call->rx_winsize - 1;
479 rxrpc_seq_t seq = sp->hdr.seq;
480 bool last = sp->hdr.flags & RXRPC_LAST_PACKET;
481 int ack_reason = -1;
482
483 rxrpc_inc_stat(call->rxnet, stat_rx_data);
484 if (sp->hdr.flags & RXRPC_REQUEST_ACK)
485 rxrpc_inc_stat(call->rxnet, stat_rx_data_reqack);
486 if (sp->hdr.flags & RXRPC_JUMBO_PACKET)
487 rxrpc_inc_stat(call->rxnet, stat_rx_data_jumbo);
488
489 if (last) {
490 if (test_and_set_bit(RXRPC_CALL_RX_LAST, &call->flags) &&
491 seq + 1 != wtop)
492 return rxrpc_proto_abort(call, seq, rxrpc_eproto_different_last);
493 } else {
494 if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) &&
495 after_eq(seq, wtop)) {
496 pr_warn("Packet beyond last: c=%x q=%x window=%x-%x wlimit=%x\n",
497 call->debug_id, seq, window, wtop, wlimit);
498 return rxrpc_proto_abort(call, seq, rxrpc_eproto_data_after_last);
499 }
500 }
501
502 if (after(seq, call->rx_highest_seq))
503 call->rx_highest_seq = seq;
504
505 trace_rxrpc_rx_data(call->debug_id, seq, serial, sp->hdr.flags);
506
507 if (before(seq, window)) {
508 ack_reason = RXRPC_ACK_DUPLICATE;
509 goto send_ack;
510 }
511 if (after(seq, wlimit)) {
512 ack_reason = RXRPC_ACK_EXCEEDS_WINDOW;
513 goto send_ack;
514 }
515
516 /* Queue the packet. */
517 if (seq == window) {
518 if (sp->hdr.flags & RXRPC_REQUEST_ACK)
519 ack_reason = RXRPC_ACK_REQUESTED;
520 /* Send an immediate ACK if we fill in a hole */
521 else if (!skb_queue_empty(&call->rx_oos_queue))
522 ack_reason = RXRPC_ACK_DELAY;
523
524 window++;
525 if (after(window, wtop)) {
526 trace_rxrpc_sack(call, seq, sack, rxrpc_sack_none);
527 wtop = window;
528 } else {
529 trace_rxrpc_sack(call, seq, sack, rxrpc_sack_advance);
530 sack = (sack + 1) % RXRPC_SACK_SIZE;
531 }
532
533
534 rxrpc_get_skb(skb, rxrpc_skb_get_to_recvmsg);
535
536 rxrpc_input_queue_data(call, skb, window, wtop, rxrpc_receive_queue);
537 *_notify = true;
538
539 while ((oos = skb_peek(&call->rx_oos_queue))) {
540 struct rxrpc_skb_priv *osp = rxrpc_skb(oos);
541
542 if (after(osp->hdr.seq, window))
543 break;
544
545 __skb_unlink(oos, &call->rx_oos_queue);
546 last = osp->hdr.flags & RXRPC_LAST_PACKET;
547 seq = osp->hdr.seq;
548 call->ackr_sack_table[sack] = 0;
549 trace_rxrpc_sack(call, seq, sack, rxrpc_sack_fill);
550 sack = (sack + 1) % RXRPC_SACK_SIZE;
551
552 window++;
553 rxrpc_input_queue_data(call, oos, window, wtop,
554 rxrpc_receive_queue_oos);
555 }
556
557 call->ackr_sack_base = sack;
558 } else {
559 unsigned int slot;
560
561 ack_reason = RXRPC_ACK_OUT_OF_SEQUENCE;
562
563 slot = seq - window;
564 sack = (sack + slot) % RXRPC_SACK_SIZE;
565
566 if (call->ackr_sack_table[sack % RXRPC_SACK_SIZE]) {
567 ack_reason = RXRPC_ACK_DUPLICATE;
568 goto send_ack;
569 }
570
571 call->ackr_sack_table[sack % RXRPC_SACK_SIZE] |= 1;
572 trace_rxrpc_sack(call, seq, sack, rxrpc_sack_oos);
573
574 if (after(seq + 1, wtop)) {
575 wtop = seq + 1;
576 rxrpc_input_update_ack_window(call, window, wtop);
577 }
578
579 skb_queue_walk(&call->rx_oos_queue, oos) {
580 struct rxrpc_skb_priv *osp = rxrpc_skb(oos);
581
582 if (after(osp->hdr.seq, seq)) {
583 rxrpc_get_skb(skb, rxrpc_skb_get_to_recvmsg_oos);
584 __skb_queue_before(&call->rx_oos_queue, oos, skb);
585 goto oos_queued;
586 }
587 }
588
589 rxrpc_get_skb(skb, rxrpc_skb_get_to_recvmsg_oos);
590 __skb_queue_tail(&call->rx_oos_queue, skb);
591 oos_queued:
592 trace_rxrpc_receive(call, last ? rxrpc_receive_oos_last : rxrpc_receive_oos,
593 sp->hdr.serial, sp->hdr.seq);
594 }
595
596send_ack:
597 if (ack_reason >= 0) {
598 if (rxrpc_ack_priority[ack_reason] > rxrpc_ack_priority[*_ack_reason]) {
599 *_ack_serial = serial;
600 *_ack_reason = ack_reason;
601 } else if (rxrpc_ack_priority[ack_reason] == rxrpc_ack_priority[*_ack_reason] &&
602 ack_reason == RXRPC_ACK_REQUESTED) {
603 *_ack_serial = serial;
604 *_ack_reason = ack_reason;
605 }
606 }
607}
608
609/*
610 * Split a jumbo packet and file the bits separately.
611 */
612static bool rxrpc_input_split_jumbo(struct rxrpc_call *call, struct sk_buff *skb)
613{
614 struct rxrpc_jumbo_header jhdr;
615 struct rxrpc_skb_priv *sp = rxrpc_skb(skb), *jsp;
616 struct sk_buff *jskb;
617 rxrpc_serial_t ack_serial = 0;
618 unsigned int offset = sizeof(struct rxrpc_wire_header);
619 unsigned int len = skb->len - offset;
620 bool notify = false;
621 int ack_reason = 0, count = 1, stat_ix;
622
623 while (sp->hdr.flags & RXRPC_JUMBO_PACKET) {
624 if (len < RXRPC_JUMBO_SUBPKTLEN)
625 goto protocol_error;
626 if (sp->hdr.flags & RXRPC_LAST_PACKET)
627 goto protocol_error;
628 if (skb_copy_bits(skb, offset + RXRPC_JUMBO_DATALEN,
629 &jhdr, sizeof(jhdr)) < 0)
630 goto protocol_error;
631
632 jskb = skb_clone(skb, GFP_NOFS);
633 if (!jskb) {
634 kdebug("couldn't clone");
635 return false;
636 }
637 rxrpc_new_skb(jskb, rxrpc_skb_new_jumbo_subpacket);
638 jsp = rxrpc_skb(jskb);
639 jsp->offset = offset;
640 jsp->len = RXRPC_JUMBO_DATALEN;
641 rxrpc_input_data_one(call, jskb, ¬ify, &ack_serial, &ack_reason);
642 rxrpc_free_skb(jskb, rxrpc_skb_put_jumbo_subpacket);
643
644 sp->hdr.flags = jhdr.flags;
645 sp->hdr._rsvd = ntohs(jhdr._rsvd);
646 sp->hdr.seq++;
647 sp->hdr.serial++;
648 offset += RXRPC_JUMBO_SUBPKTLEN;
649 len -= RXRPC_JUMBO_SUBPKTLEN;
650 count++;
651 }
652
653 sp->offset = offset;
654 sp->len = len;
655 rxrpc_input_data_one(call, skb, ¬ify, &ack_serial, &ack_reason);
656
657 stat_ix = umin(count, ARRAY_SIZE(call->rxnet->stat_rx_jumbo)) - 1;
658 atomic_inc(&call->rxnet->stat_rx_jumbo[stat_ix]);
659
660 if (ack_reason > 0) {
661 rxrpc_send_ACK(call, ack_reason, ack_serial,
662 rxrpc_propose_ack_input_data);
663 } else {
664 call->ackr_nr_unacked++;
665 rxrpc_propose_delay_ACK(call, sp->hdr.serial,
666 rxrpc_propose_ack_input_data);
667 }
668 if (notify && !test_bit(RXRPC_CALL_CONN_CHALLENGING, &call->flags)) {
669 trace_rxrpc_notify_socket(call->debug_id, sp->hdr.serial);
670 rxrpc_notify_socket(call);
671 }
672 return true;
673
674protocol_error:
675 return false;
676}
677
678/*
679 * Process a DATA packet, adding the packet to the Rx ring. The caller's
680 * packet ref must be passed on or discarded.
681 */
682static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb)
683{
684 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
685 rxrpc_serial_t serial = sp->hdr.serial;
686 rxrpc_seq_t seq0 = sp->hdr.seq;
687
688 _enter("{%x,%x,%x},{%u,%x}",
689 call->ackr_window, call->ackr_wtop, call->rx_highest_seq,
690 skb->len, seq0);
691
692 if (__rxrpc_call_is_complete(call))
693 return;
694
695 switch (__rxrpc_call_state(call)) {
696 case RXRPC_CALL_CLIENT_SEND_REQUEST:
697 case RXRPC_CALL_CLIENT_AWAIT_REPLY:
698 /* Received data implicitly ACKs all of the request
699 * packets we sent when we're acting as a client.
700 */
701 if (!rxrpc_receiving_reply(call))
702 goto out_notify;
703 break;
704
705 case RXRPC_CALL_SERVER_RECV_REQUEST: {
706 unsigned long timo = READ_ONCE(call->next_req_timo);
707
708 if (timo) {
709 ktime_t delay = ms_to_ktime(timo);
710
711 call->expect_req_by = ktime_add(ktime_get_real(), delay);
712 trace_rxrpc_timer_set(call, delay, rxrpc_timer_trace_idle);
713 }
714 break;
715 }
716
717 default:
718 break;
719 }
720
721 if (!rxrpc_input_split_jumbo(call, skb)) {
722 rxrpc_proto_abort(call, sp->hdr.seq, rxrpc_badmsg_bad_jumbo);
723 goto out_notify;
724 }
725 return;
726
727out_notify:
728 trace_rxrpc_notify_socket(call->debug_id, serial);
729 rxrpc_notify_socket(call);
730 _leave(" [queued]");
731}
732
733/*
734 * See if there's a cached RTT probe to complete.
735 */
736static void rxrpc_complete_rtt_probe(struct rxrpc_call *call,
737 ktime_t resp_time,
738 rxrpc_serial_t acked_serial,
739 rxrpc_serial_t ack_serial,
740 enum rxrpc_rtt_rx_trace type)
741{
742 rxrpc_serial_t orig_serial;
743 unsigned long avail;
744 ktime_t sent_at;
745 bool matched = false;
746 int i;
747
748 avail = READ_ONCE(call->rtt_avail);
749 smp_rmb(); /* Read avail bits before accessing data. */
750
751 for (i = 0; i < ARRAY_SIZE(call->rtt_serial); i++) {
752 if (!test_bit(i + RXRPC_CALL_RTT_PEND_SHIFT, &avail))
753 continue;
754
755 sent_at = call->rtt_sent_at[i];
756 orig_serial = call->rtt_serial[i];
757
758 if (orig_serial == acked_serial) {
759 clear_bit(i + RXRPC_CALL_RTT_PEND_SHIFT, &call->rtt_avail);
760 smp_mb(); /* Read data before setting avail bit */
761 set_bit(i, &call->rtt_avail);
762 rxrpc_call_add_rtt(call, type, i, acked_serial, ack_serial,
763 sent_at, resp_time);
764 matched = true;
765 }
766
767 /* If a later serial is being acked, then mark this slot as
768 * being available.
769 */
770 if (after(acked_serial, orig_serial)) {
771 trace_rxrpc_rtt_rx(call, rxrpc_rtt_rx_obsolete, i,
772 orig_serial, acked_serial, 0, 0, 0);
773 clear_bit(i + RXRPC_CALL_RTT_PEND_SHIFT, &call->rtt_avail);
774 smp_wmb();
775 set_bit(i, &call->rtt_avail);
776 }
777 }
778
779 if (!matched)
780 trace_rxrpc_rtt_rx(call, rxrpc_rtt_rx_lost, 9, 0, acked_serial, 0, 0, 0);
781}
782
783/*
784 * Process the extra information that may be appended to an ACK packet
785 */
786static void rxrpc_input_ack_trailer(struct rxrpc_call *call, struct sk_buff *skb,
787 struct rxrpc_acktrailer *trailer)
788{
789 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
790 struct rxrpc_peer *peer = call->peer;
791 unsigned int max_data, capacity;
792 bool wake = false;
793 u32 max_mtu = ntohl(trailer->maxMTU);
794 //u32 if_mtu = ntohl(trailer->ifMTU);
795 u32 rwind = ntohl(trailer->rwind);
796 u32 jumbo_max = ntohl(trailer->jumbo_max);
797
798 if (rwind > RXRPC_TX_MAX_WINDOW)
799 rwind = RXRPC_TX_MAX_WINDOW;
800 if (call->tx_winsize != rwind) {
801 if (rwind > call->tx_winsize)
802 wake = true;
803 trace_rxrpc_rx_rwind_change(call, sp->hdr.serial, rwind, wake);
804 call->tx_winsize = rwind;
805 }
806
807 max_mtu = clamp(max_mtu, 500, 65535);
808 peer->ackr_max_data = max_mtu;
809
810 if (max_mtu < peer->max_data) {
811 trace_rxrpc_pmtud_reduce(peer, sp->hdr.serial, max_mtu,
812 rxrpc_pmtud_reduce_ack);
813 write_seqcount_begin(&peer->mtu_lock);
814 peer->max_data = max_mtu;
815 write_seqcount_end(&peer->mtu_lock);
816 }
817
818 max_data = umin(max_mtu, peer->max_data);
819 capacity = max_data;
820 capacity += sizeof(struct rxrpc_jumbo_header); /* First subpacket has main hdr, not jumbo */
821 capacity /= sizeof(struct rxrpc_jumbo_header) + RXRPC_JUMBO_DATALEN;
822
823 if (jumbo_max == 0) {
824 /* The peer says it supports pmtu discovery */
825 peer->ackr_adv_pmtud = true;
826 } else {
827 peer->ackr_adv_pmtud = false;
828 capacity = clamp(capacity, 1, jumbo_max);
829 }
830
831 call->tx_jumbo_max = capacity;
832
833 if (wake)
834 wake_up(&call->waitq);
835}
836
837#if defined(CONFIG_X86) && __GNUC__ && !defined(__clang__)
838/* Clang doesn't support the %z constraint modifier */
839#define shiftr_adv_rotr(shift_from, rotate_into) ({ \
840 asm(" shr%z1 %1\n" \
841 " inc %0\n" \
842 " rcr%z2 %2\n" \
843 : "+d"(shift_from), "+m"(*(shift_from)), "+rm"(rotate_into) \
844 ); \
845 })
846#else
847#define shiftr_adv_rotr(shift_from, rotate_into) ({ \
848 typeof(rotate_into) __bit0 = *(shift_from) & 1; \
849 *(shift_from) >>= 1; \
850 shift_from++; \
851 rotate_into >>= 1; \
852 rotate_into |= __bit0 << (sizeof(rotate_into) * 8 - 1); \
853 })
854#endif
855
856/*
857 * Deal with RTT samples from soft ACKs.
858 */
859static void rxrpc_input_soft_rtt(struct rxrpc_call *call,
860 struct rxrpc_ack_summary *summary,
861 struct rxrpc_txqueue *tq)
862{
863 for (int ix = 0; ix < RXRPC_NR_TXQUEUE; ix++)
864 if (summary->acked_serial == tq->segment_serial[ix])
865 return rxrpc_add_data_rtt_sample(call, summary, tq, ix);
866}
867
868/*
869 * Process a batch of soft ACKs specific to a transmission queue segment.
870 */
871static void rxrpc_input_soft_ack_tq(struct rxrpc_call *call,
872 struct rxrpc_ack_summary *summary,
873 struct rxrpc_txqueue *tq,
874 unsigned long extracted_acks,
875 int nr_reported,
876 rxrpc_seq_t seq,
877 rxrpc_seq_t *lowest_nak)
878{
879 unsigned long old_reported = 0, flipped, new_acks = 0;
880 unsigned long a_to_n, n_to_a = 0;
881 int new, a, n;
882
883 if (tq->nr_reported_acks > 0)
884 old_reported = ~0UL >> (RXRPC_NR_TXQUEUE - tq->nr_reported_acks);
885
886 _enter("{%x,%lx,%d},%lx,%d,%x",
887 tq->qbase, tq->segment_acked, tq->nr_reported_acks,
888 extracted_acks, nr_reported, seq);
889
890 _debug("[%x]", tq->qbase);
891 _debug("tq %16lx %u", tq->segment_acked, tq->nr_reported_acks);
892 _debug("sack %16lx %u", extracted_acks, nr_reported);
893
894 /* See how many previously logged ACKs/NAKs have flipped. */
895 flipped = (tq->segment_acked ^ extracted_acks) & old_reported;
896 if (flipped) {
897 n_to_a = ~tq->segment_acked & flipped; /* Old NAK -> ACK */
898 a_to_n = tq->segment_acked & flipped; /* Old ACK -> NAK */
899 a = hweight_long(n_to_a);
900 n = hweight_long(a_to_n);
901 _debug("flip %16lx", flipped);
902 _debug("ntoa %16lx %d", n_to_a, a);
903 _debug("aton %16lx %d", a_to_n, n);
904 call->acks_nr_sacks += a - n;
905 call->acks_nr_snacks += n - a;
906 summary->nr_new_sacks += a;
907 summary->nr_new_snacks += n;
908 }
909
910 /* See how many new ACKs/NAKs have been acquired. */
911 new = nr_reported - tq->nr_reported_acks;
912 if (new > 0) {
913 new_acks = extracted_acks & ~old_reported;
914 if (new_acks) {
915 a = hweight_long(new_acks);
916 n = new - a;
917 _debug("new_a %16lx new=%d a=%d n=%d", new_acks, new, a, n);
918 call->acks_nr_sacks += a;
919 call->acks_nr_snacks += n;
920 summary->nr_new_sacks += a;
921 summary->nr_new_snacks += n;
922 } else {
923 call->acks_nr_snacks += new;
924 summary->nr_new_snacks += new;
925 }
926 }
927
928 tq->nr_reported_acks = nr_reported;
929 tq->segment_acked = extracted_acks;
930 trace_rxrpc_apply_acks(call, tq);
931
932 if (extracted_acks != ~0UL) {
933 rxrpc_seq_t lowest = seq + ffz(extracted_acks);
934
935 if (before(lowest, *lowest_nak))
936 *lowest_nak = lowest;
937 }
938
939 if (summary->acked_serial)
940 rxrpc_input_soft_rtt(call, summary, tq);
941
942 new_acks |= n_to_a;
943 if (new_acks)
944 rxrpc_input_rack(call, summary, tq, new_acks);
945
946 if (call->tlp_serial &&
947 rxrpc_seq_in_txq(tq, call->tlp_seq) &&
948 test_bit(call->tlp_seq - tq->qbase, &new_acks))
949 summary->tlp_probe_acked = true;
950}
951
952/*
953 * Process individual soft ACKs.
954 *
955 * Each ACK in the array corresponds to one packet and can be either an ACK or
956 * a NAK. If we get find an explicitly NAK'd packet we resend immediately;
957 * packets that lie beyond the end of the ACK list are scheduled for resend by
958 * the timer on the basis that the peer might just not have processed them at
959 * the time the ACK was sent.
960 */
961static void rxrpc_input_soft_acks(struct rxrpc_call *call,
962 struct rxrpc_ack_summary *summary,
963 struct sk_buff *skb)
964{
965 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
966 struct rxrpc_txqueue *tq = call->tx_queue;
967 unsigned long extracted = ~0UL;
968 unsigned int nr = 0;
969 rxrpc_seq_t seq = call->acks_hard_ack + 1;
970 rxrpc_seq_t lowest_nak = seq + sp->ack.nr_acks;
971 u8 *acks = skb->data + sizeof(struct rxrpc_wire_header) + sizeof(struct rxrpc_ackpacket);
972
973 _enter("%x,%x,%u", tq->qbase, seq, sp->ack.nr_acks);
974
975 while (after(seq, tq->qbase + RXRPC_NR_TXQUEUE - 1))
976 tq = tq->next;
977
978 for (unsigned int i = 0; i < sp->ack.nr_acks; i++) {
979 /* Decant ACKs until we hit a txqueue boundary. */
980 shiftr_adv_rotr(acks, extracted);
981 if (i == 256) {
982 acks -= i;
983 i = 0;
984 }
985 seq++;
986 nr++;
987 if ((seq & RXRPC_TXQ_MASK) != 0)
988 continue;
989
990 _debug("bound %16lx %u", extracted, nr);
991
992 rxrpc_input_soft_ack_tq(call, summary, tq, extracted, RXRPC_NR_TXQUEUE,
993 seq - RXRPC_NR_TXQUEUE, &lowest_nak);
994 extracted = ~0UL;
995 nr = 0;
996 tq = tq->next;
997 prefetch(tq);
998 }
999
1000 if (nr) {
1001 unsigned int nr_reported = seq & RXRPC_TXQ_MASK;
1002
1003 extracted >>= RXRPC_NR_TXQUEUE - nr_reported;
1004 _debug("tail %16lx %u", extracted, nr_reported);
1005 rxrpc_input_soft_ack_tq(call, summary, tq, extracted, nr_reported,
1006 seq & ~RXRPC_TXQ_MASK, &lowest_nak);
1007 }
1008
1009 /* We *can* have more nacks than we did - the peer is permitted to drop
1010 * packets it has soft-acked and re-request them. Further, it is
1011 * possible for the nack distribution to change whilst the number of
1012 * nacks stays the same or goes down.
1013 */
1014 if (lowest_nak != call->acks_lowest_nak) {
1015 call->acks_lowest_nak = lowest_nak;
1016 summary->new_low_snack = true;
1017 }
1018
1019 _debug("summary A=%d+%d N=%d+%d",
1020 call->acks_nr_sacks, summary->nr_new_sacks,
1021 call->acks_nr_snacks, summary->nr_new_snacks);
1022}
1023
1024/*
1025 * Return true if the ACK is valid - ie. it doesn't appear to have regressed
1026 * with respect to the ack state conveyed by preceding ACKs.
1027 */
1028static bool rxrpc_is_ack_valid(struct rxrpc_call *call,
1029 rxrpc_seq_t hard_ack, rxrpc_seq_t prev_pkt)
1030{
1031 rxrpc_seq_t base = READ_ONCE(call->acks_hard_ack);
1032
1033 if (after(hard_ack, base))
1034 return true; /* The window advanced */
1035
1036 if (before(hard_ack, base))
1037 return false; /* firstPacket regressed */
1038
1039 if (after_eq(prev_pkt, call->acks_prev_seq))
1040 return true; /* previousPacket hasn't regressed. */
1041
1042 /* Some rx implementations put a serial number in previousPacket. */
1043 if (after(prev_pkt, base + call->tx_winsize))
1044 return false;
1045 return true;
1046}
1047
1048/*
1049 * Process an ACK packet.
1050 *
1051 * ack.firstPacket is the sequence number of the first soft-ACK'd/NAK'd packet
1052 * in the ACK array. Anything before that is hard-ACK'd and may be discarded.
1053 *
1054 * A hard-ACK means that a packet has been processed and may be discarded; a
1055 * soft-ACK means that the packet may be discarded and retransmission
1056 * requested. A phase is complete when all packets are hard-ACK'd.
1057 */
1058static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb)
1059{
1060 struct rxrpc_ack_summary summary = { 0 };
1061 struct rxrpc_acktrailer trailer;
1062 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
1063 rxrpc_seq_t first_soft_ack, hard_ack, prev_pkt;
1064 int nr_acks, offset, ioffset;
1065
1066 _enter("");
1067
1068 offset = sizeof(struct rxrpc_wire_header) + sizeof(struct rxrpc_ackpacket);
1069
1070 summary.ack_serial = sp->hdr.serial;
1071 first_soft_ack = sp->ack.first_ack;
1072 prev_pkt = sp->ack.prev_ack;
1073 nr_acks = sp->ack.nr_acks;
1074 hard_ack = first_soft_ack - 1;
1075 summary.acked_serial = sp->ack.acked_serial;
1076 summary.ack_reason = (sp->ack.reason < RXRPC_ACK__INVALID ?
1077 sp->ack.reason : RXRPC_ACK__INVALID);
1078
1079 trace_rxrpc_rx_ack(call, sp);
1080 rxrpc_inc_stat(call->rxnet, stat_rx_acks[summary.ack_reason]);
1081 prefetch(call->tx_queue);
1082
1083 /* If we get an EXCEEDS_WINDOW ACK from the server, it probably
1084 * indicates that the client address changed due to NAT. The server
1085 * lost the call because it switched to a different peer.
1086 */
1087 if (unlikely(summary.ack_reason == RXRPC_ACK_EXCEEDS_WINDOW) &&
1088 hard_ack == 0 &&
1089 prev_pkt == 0 &&
1090 rxrpc_is_client_call(call)) {
1091 rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
1092 0, -ENETRESET);
1093 goto send_response;
1094 }
1095
1096 /* If we get an OUT_OF_SEQUENCE ACK from the server, that can also
1097 * indicate a change of address. However, we can retransmit the call
1098 * if we still have it buffered to the beginning.
1099 */
1100 if (unlikely(summary.ack_reason == RXRPC_ACK_OUT_OF_SEQUENCE) &&
1101 hard_ack == 0 &&
1102 prev_pkt == 0 &&
1103 call->tx_bottom == 0 &&
1104 rxrpc_is_client_call(call)) {
1105 rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
1106 0, -ENETRESET);
1107 goto send_response;
1108 }
1109
1110 /* Discard any out-of-order or duplicate ACKs (outside lock). */
1111 if (!rxrpc_is_ack_valid(call, hard_ack, prev_pkt)) {
1112 trace_rxrpc_rx_discard_ack(call, summary.ack_serial, hard_ack, prev_pkt);
1113 goto send_response; /* Still respond if requested. */
1114 }
1115
1116 trailer.maxMTU = 0;
1117 ioffset = offset + nr_acks + 3;
1118 if (skb->len >= ioffset + sizeof(trailer) &&
1119 skb_copy_bits(skb, ioffset, &trailer, sizeof(trailer)) < 0)
1120 return rxrpc_proto_abort(call, 0, rxrpc_badmsg_short_ack_trailer);
1121
1122 if (nr_acks > 0)
1123 skb_condense(skb);
1124
1125 call->acks_latest_ts = ktime_get_real();
1126 call->acks_hard_ack = hard_ack;
1127 call->acks_prev_seq = prev_pkt;
1128
1129 if (summary.acked_serial) {
1130 switch (summary.ack_reason) {
1131 case RXRPC_ACK_PING_RESPONSE:
1132 rxrpc_complete_rtt_probe(call, call->acks_latest_ts,
1133 summary.acked_serial, summary.ack_serial,
1134 rxrpc_rtt_rx_ping_response);
1135 break;
1136 default:
1137 if (after(summary.acked_serial, call->acks_highest_serial))
1138 call->acks_highest_serial = summary.acked_serial;
1139 summary.rtt_sample_avail = true;
1140 break;
1141 }
1142 }
1143
1144 /* Parse rwind and mtu sizes if provided. */
1145 if (trailer.maxMTU)
1146 rxrpc_input_ack_trailer(call, skb, &trailer);
1147
1148 if (hard_ack + 1 == 0)
1149 return rxrpc_proto_abort(call, 0, rxrpc_eproto_ackr_zero);
1150
1151 /* Ignore ACKs unless we are or have just been transmitting. */
1152 switch (__rxrpc_call_state(call)) {
1153 case RXRPC_CALL_CLIENT_SEND_REQUEST:
1154 case RXRPC_CALL_CLIENT_AWAIT_REPLY:
1155 case RXRPC_CALL_SERVER_SEND_REPLY:
1156 case RXRPC_CALL_SERVER_AWAIT_ACK:
1157 break;
1158 default:
1159 goto send_response;
1160 }
1161
1162 if (before(hard_ack, call->tx_bottom) ||
1163 after(hard_ack, call->tx_top))
1164 return rxrpc_proto_abort(call, 0, rxrpc_eproto_ackr_outside_window);
1165 if (nr_acks > call->tx_top - hard_ack)
1166 return rxrpc_proto_abort(call, 0, rxrpc_eproto_ackr_sack_overflow);
1167
1168 if (after(hard_ack, call->tx_bottom)) {
1169 if (rxrpc_rotate_tx_window(call, hard_ack, &summary)) {
1170 rxrpc_end_tx_phase(call, false, rxrpc_eproto_unexpected_ack);
1171 goto send_response;
1172 }
1173 }
1174
1175 if (nr_acks > 0) {
1176 if (offset > (int)skb->len - nr_acks)
1177 return rxrpc_proto_abort(call, 0, rxrpc_eproto_ackr_short_sack);
1178 rxrpc_input_soft_acks(call, &summary, skb);
1179 }
1180
1181 if (test_bit(RXRPC_CALL_TX_LAST, &call->flags) &&
1182 call->acks_nr_sacks == call->tx_top - hard_ack &&
1183 rxrpc_is_client_call(call))
1184 rxrpc_propose_ping(call, summary.ack_serial,
1185 rxrpc_propose_ack_ping_for_lost_reply);
1186
1187 /* Drive the congestion management algorithm first and then RACK-TLP as
1188 * the latter depends on the state/change in state in the former.
1189 */
1190 rxrpc_congestion_management(call, &summary);
1191 rxrpc_rack_detect_loss_and_arm_timer(call, &summary);
1192 rxrpc_tlp_process_ack(call, &summary);
1193 if (call->tlp_serial && after_eq(summary.acked_serial, call->tlp_serial))
1194 call->tlp_serial = 0;
1195
1196send_response:
1197 if (summary.ack_reason == RXRPC_ACK_PING)
1198 rxrpc_send_ACK(call, RXRPC_ACK_PING_RESPONSE, summary.ack_serial,
1199 rxrpc_propose_ack_respond_to_ping);
1200 else if (sp->hdr.flags & RXRPC_REQUEST_ACK)
1201 rxrpc_send_ACK(call, RXRPC_ACK_REQUESTED, summary.ack_serial,
1202 rxrpc_propose_ack_respond_to_ack);
1203}
1204
1205/*
1206 * Process an ACKALL packet.
1207 */
1208static void rxrpc_input_ackall(struct rxrpc_call *call, struct sk_buff *skb)
1209{
1210 struct rxrpc_ack_summary summary = { 0 };
1211
1212 if (rxrpc_rotate_tx_window(call, call->tx_top, &summary))
1213 rxrpc_end_tx_phase(call, false, rxrpc_eproto_unexpected_ackall);
1214}
1215
1216/*
1217 * Process an ABORT packet directed at a call.
1218 */
1219static void rxrpc_input_abort(struct rxrpc_call *call, struct sk_buff *skb)
1220{
1221 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
1222
1223 trace_rxrpc_rx_abort(call, sp->hdr.serial, skb->priority);
1224
1225 rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
1226 skb->priority, -ECONNABORTED);
1227}
1228
1229/*
1230 * Process an incoming call packet.
1231 */
1232void rxrpc_input_call_packet(struct rxrpc_call *call, struct sk_buff *skb)
1233{
1234 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
1235 unsigned long timo;
1236
1237 _enter("%p,%p", call, skb);
1238
1239 if (sp->hdr.serviceId != call->dest_srx.srx_service)
1240 call->dest_srx.srx_service = sp->hdr.serviceId;
1241 if ((int)sp->hdr.serial - (int)call->rx_serial > 0)
1242 call->rx_serial = sp->hdr.serial;
1243 if (!test_bit(RXRPC_CALL_RX_HEARD, &call->flags))
1244 set_bit(RXRPC_CALL_RX_HEARD, &call->flags);
1245
1246 timo = READ_ONCE(call->next_rx_timo);
1247 if (timo) {
1248 ktime_t delay = ms_to_ktime(timo);
1249
1250 call->expect_rx_by = ktime_add(ktime_get_real(), delay);
1251 trace_rxrpc_timer_set(call, delay, rxrpc_timer_trace_expect_rx);
1252 }
1253
1254 switch (sp->hdr.type) {
1255 case RXRPC_PACKET_TYPE_DATA:
1256 return rxrpc_input_data(call, skb);
1257
1258 case RXRPC_PACKET_TYPE_ACK:
1259 return rxrpc_input_ack(call, skb);
1260
1261 case RXRPC_PACKET_TYPE_BUSY:
1262 /* Just ignore BUSY packets from the server; the retry and
1263 * lifespan timers will take care of business. BUSY packets
1264 * from the client don't make sense.
1265 */
1266 return;
1267
1268 case RXRPC_PACKET_TYPE_ABORT:
1269 return rxrpc_input_abort(call, skb);
1270
1271 case RXRPC_PACKET_TYPE_ACKALL:
1272 return rxrpc_input_ackall(call, skb);
1273
1274 default:
1275 break;
1276 }
1277}
1278
1279/*
1280 * Handle a new service call on a channel implicitly completing the preceding
1281 * call on that channel. This does not apply to client conns.
1282 *
1283 * TODO: If callNumber > call_id + 1, renegotiate security.
1284 */
1285void rxrpc_implicit_end_call(struct rxrpc_call *call, struct sk_buff *skb)
1286{
1287 switch (__rxrpc_call_state(call)) {
1288 case RXRPC_CALL_SERVER_AWAIT_ACK:
1289 rxrpc_call_completed(call);
1290 fallthrough;
1291 case RXRPC_CALL_COMPLETE:
1292 break;
1293 default:
1294 rxrpc_abort_call(call, 0, RX_CALL_DEAD, -ESHUTDOWN,
1295 rxrpc_eproto_improper_term);
1296 trace_rxrpc_improper_term(call);
1297 break;
1298 }
1299
1300 rxrpc_input_call_event(call);
1301}