Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SCTP kernel implementation
2 * Copyright (c) 1999-2000 Cisco, Inc.
3 * Copyright (c) 1999-2001 Motorola, Inc.
4 * Copyright (c) 2001-2003 International Business Machines Corp.
5 * Copyright (c) 2001 Intel Corp.
6 * Copyright (c) 2001 La Monte H.P. Yarroll
7 *
8 * This file is part of the SCTP kernel implementation
9 *
10 * This module provides the abstraction for an SCTP tranport representing
11 * a remote transport address. For local transport addresses, we just use
12 * union sctp_addr.
13 *
14 * This SCTP implementation is free software;
15 * you can redistribute it and/or modify it under the terms of
16 * the GNU General Public License as published by
17 * the Free Software Foundation; either version 2, or (at your option)
18 * any later version.
19 *
20 * This SCTP implementation is distributed in the hope that it
21 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
22 * ************************
23 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
24 * See the GNU General Public License for more details.
25 *
26 * You should have received a copy of the GNU General Public License
27 * along with GNU CC; see the file COPYING. If not, see
28 * <http://www.gnu.org/licenses/>.
29 *
30 * Please send any bug reports or fixes you make to the
31 * email address(es):
32 * lksctp developers <linux-sctp@vger.kernel.org>
33 *
34 * Written or modified by:
35 * La Monte H.P. Yarroll <piggy@acm.org>
36 * Karl Knutson <karl@athena.chicago.il.us>
37 * Jon Grimm <jgrimm@us.ibm.com>
38 * Xingang Guo <xingang.guo@intel.com>
39 * Hui Huang <hui.huang@nokia.com>
40 * Sridhar Samudrala <sri@us.ibm.com>
41 * Ardelle Fan <ardelle.fan@intel.com>
42 */
43
44#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
45
46#include <linux/slab.h>
47#include <linux/types.h>
48#include <linux/random.h>
49#include <net/sctp/sctp.h>
50#include <net/sctp/sm.h>
51
52/* 1st Level Abstractions. */
53
54/* Initialize a new transport from provided memory. */
55static struct sctp_transport *sctp_transport_init(struct net *net,
56 struct sctp_transport *peer,
57 const union sctp_addr *addr,
58 gfp_t gfp)
59{
60 /* Copy in the address. */
61 peer->ipaddr = *addr;
62 peer->af_specific = sctp_get_af_specific(addr->sa.sa_family);
63 memset(&peer->saddr, 0, sizeof(union sctp_addr));
64
65 peer->sack_generation = 0;
66
67 /* From 6.3.1 RTO Calculation:
68 *
69 * C1) Until an RTT measurement has been made for a packet sent to the
70 * given destination transport address, set RTO to the protocol
71 * parameter 'RTO.Initial'.
72 */
73 peer->rto = msecs_to_jiffies(net->sctp.rto_initial);
74
75 peer->last_time_heard = 0;
76 peer->last_time_ecne_reduced = jiffies;
77
78 peer->param_flags = SPP_HB_DISABLE |
79 SPP_PMTUD_ENABLE |
80 SPP_SACKDELAY_ENABLE;
81
82 /* Initialize the default path max_retrans. */
83 peer->pathmaxrxt = net->sctp.max_retrans_path;
84 peer->pf_retrans = net->sctp.pf_retrans;
85
86 INIT_LIST_HEAD(&peer->transmitted);
87 INIT_LIST_HEAD(&peer->send_ready);
88 INIT_LIST_HEAD(&peer->transports);
89
90 timer_setup(&peer->T3_rtx_timer, sctp_generate_t3_rtx_event, 0);
91 timer_setup(&peer->hb_timer, sctp_generate_heartbeat_event, 0);
92 timer_setup(&peer->reconf_timer, sctp_generate_reconf_event, 0);
93 timer_setup(&peer->proto_unreach_timer,
94 sctp_generate_proto_unreach_event, 0);
95
96 /* Initialize the 64-bit random nonce sent with heartbeat. */
97 get_random_bytes(&peer->hb_nonce, sizeof(peer->hb_nonce));
98
99 refcount_set(&peer->refcnt, 1);
100
101 return peer;
102}
103
104/* Allocate and initialize a new transport. */
105struct sctp_transport *sctp_transport_new(struct net *net,
106 const union sctp_addr *addr,
107 gfp_t gfp)
108{
109 struct sctp_transport *transport;
110
111 transport = kzalloc(sizeof(*transport), gfp);
112 if (!transport)
113 goto fail;
114
115 if (!sctp_transport_init(net, transport, addr, gfp))
116 goto fail_init;
117
118 SCTP_DBG_OBJCNT_INC(transport);
119
120 return transport;
121
122fail_init:
123 kfree(transport);
124
125fail:
126 return NULL;
127}
128
129/* This transport is no longer needed. Free up if possible, or
130 * delay until it last reference count.
131 */
132void sctp_transport_free(struct sctp_transport *transport)
133{
134 /* Try to delete the heartbeat timer. */
135 if (del_timer(&transport->hb_timer))
136 sctp_transport_put(transport);
137
138 /* Delete the T3_rtx timer if it's active.
139 * There is no point in not doing this now and letting
140 * structure hang around in memory since we know
141 * the tranport is going away.
142 */
143 if (del_timer(&transport->T3_rtx_timer))
144 sctp_transport_put(transport);
145
146 if (del_timer(&transport->reconf_timer))
147 sctp_transport_put(transport);
148
149 /* Delete the ICMP proto unreachable timer if it's active. */
150 if (del_timer(&transport->proto_unreach_timer))
151 sctp_association_put(transport->asoc);
152
153 sctp_transport_put(transport);
154}
155
156static void sctp_transport_destroy_rcu(struct rcu_head *head)
157{
158 struct sctp_transport *transport;
159
160 transport = container_of(head, struct sctp_transport, rcu);
161
162 dst_release(transport->dst);
163 kfree(transport);
164 SCTP_DBG_OBJCNT_DEC(transport);
165}
166
167/* Destroy the transport data structure.
168 * Assumes there are no more users of this structure.
169 */
170static void sctp_transport_destroy(struct sctp_transport *transport)
171{
172 if (unlikely(refcount_read(&transport->refcnt))) {
173 WARN(1, "Attempt to destroy undead transport %p!\n", transport);
174 return;
175 }
176
177 sctp_packet_free(&transport->packet);
178
179 if (transport->asoc)
180 sctp_association_put(transport->asoc);
181
182 call_rcu(&transport->rcu, sctp_transport_destroy_rcu);
183}
184
185/* Start T3_rtx timer if it is not already running and update the heartbeat
186 * timer. This routine is called every time a DATA chunk is sent.
187 */
188void sctp_transport_reset_t3_rtx(struct sctp_transport *transport)
189{
190 /* RFC 2960 6.3.2 Retransmission Timer Rules
191 *
192 * R1) Every time a DATA chunk is sent to any address(including a
193 * retransmission), if the T3-rtx timer of that address is not running
194 * start it running so that it will expire after the RTO of that
195 * address.
196 */
197
198 if (!timer_pending(&transport->T3_rtx_timer))
199 if (!mod_timer(&transport->T3_rtx_timer,
200 jiffies + transport->rto))
201 sctp_transport_hold(transport);
202}
203
204void sctp_transport_reset_hb_timer(struct sctp_transport *transport)
205{
206 unsigned long expires;
207
208 /* When a data chunk is sent, reset the heartbeat interval. */
209 expires = jiffies + sctp_transport_timeout(transport);
210 if (time_before(transport->hb_timer.expires, expires) &&
211 !mod_timer(&transport->hb_timer,
212 expires + prandom_u32_max(transport->rto)))
213 sctp_transport_hold(transport);
214}
215
216void sctp_transport_reset_reconf_timer(struct sctp_transport *transport)
217{
218 if (!timer_pending(&transport->reconf_timer))
219 if (!mod_timer(&transport->reconf_timer,
220 jiffies + transport->rto))
221 sctp_transport_hold(transport);
222}
223
224/* This transport has been assigned to an association.
225 * Initialize fields from the association or from the sock itself.
226 * Register the reference count in the association.
227 */
228void sctp_transport_set_owner(struct sctp_transport *transport,
229 struct sctp_association *asoc)
230{
231 transport->asoc = asoc;
232 sctp_association_hold(asoc);
233}
234
235/* Initialize the pmtu of a transport. */
236void sctp_transport_pmtu(struct sctp_transport *transport, struct sock *sk)
237{
238 /* If we don't have a fresh route, look one up */
239 if (!transport->dst || transport->dst->obsolete) {
240 sctp_transport_dst_release(transport);
241 transport->af_specific->get_dst(transport, &transport->saddr,
242 &transport->fl, sk);
243 }
244
245 if (transport->param_flags & SPP_PMTUD_DISABLE) {
246 struct sctp_association *asoc = transport->asoc;
247
248 if (!transport->pathmtu && asoc && asoc->pathmtu)
249 transport->pathmtu = asoc->pathmtu;
250 if (transport->pathmtu)
251 return;
252 }
253
254 if (transport->dst)
255 transport->pathmtu = sctp_dst_mtu(transport->dst);
256 else
257 transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
258}
259
260bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
261{
262 struct dst_entry *dst = sctp_transport_dst_check(t);
263 bool change = true;
264
265 if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) {
266 pr_warn_ratelimited("%s: Reported pmtu %d too low, using default minimum of %d\n",
267 __func__, pmtu, SCTP_DEFAULT_MINSEGMENT);
268 /* Use default minimum segment instead */
269 pmtu = SCTP_DEFAULT_MINSEGMENT;
270 }
271 pmtu = SCTP_TRUNC4(pmtu);
272
273 if (dst) {
274 dst->ops->update_pmtu(dst, t->asoc->base.sk, NULL, pmtu);
275 dst = sctp_transport_dst_check(t);
276 }
277
278 if (!dst) {
279 t->af_specific->get_dst(t, &t->saddr, &t->fl, t->asoc->base.sk);
280 dst = t->dst;
281 }
282
283 if (dst) {
284 /* Re-fetch, as under layers may have a higher minimum size */
285 pmtu = sctp_dst_mtu(dst);
286 change = t->pathmtu != pmtu;
287 }
288 t->pathmtu = pmtu;
289
290 return change;
291}
292
293/* Caches the dst entry and source address for a transport's destination
294 * address.
295 */
296void sctp_transport_route(struct sctp_transport *transport,
297 union sctp_addr *saddr, struct sctp_sock *opt)
298{
299 struct sctp_association *asoc = transport->asoc;
300 struct sctp_af *af = transport->af_specific;
301
302 sctp_transport_dst_release(transport);
303 af->get_dst(transport, saddr, &transport->fl, sctp_opt2sk(opt));
304
305 if (saddr)
306 memcpy(&transport->saddr, saddr, sizeof(union sctp_addr));
307 else
308 af->get_saddr(opt, transport, &transport->fl);
309
310 sctp_transport_pmtu(transport, sctp_opt2sk(opt));
311
312 /* Initialize sk->sk_rcv_saddr, if the transport is the
313 * association's active path for getsockname().
314 */
315 if (transport->dst && asoc &&
316 (!asoc->peer.primary_path || transport == asoc->peer.active_path))
317 opt->pf->to_sk_saddr(&transport->saddr, asoc->base.sk);
318}
319
320/* Hold a reference to a transport. */
321int sctp_transport_hold(struct sctp_transport *transport)
322{
323 return refcount_inc_not_zero(&transport->refcnt);
324}
325
326/* Release a reference to a transport and clean up
327 * if there are no more references.
328 */
329void sctp_transport_put(struct sctp_transport *transport)
330{
331 if (refcount_dec_and_test(&transport->refcnt))
332 sctp_transport_destroy(transport);
333}
334
335/* Update transport's RTO based on the newly calculated RTT. */
336void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt)
337{
338 if (unlikely(!tp->rto_pending))
339 /* We should not be doing any RTO updates unless rto_pending is set. */
340 pr_debug("%s: rto_pending not set on transport %p!\n", __func__, tp);
341
342 if (tp->rttvar || tp->srtt) {
343 struct net *net = sock_net(tp->asoc->base.sk);
344 /* 6.3.1 C3) When a new RTT measurement R' is made, set
345 * RTTVAR <- (1 - RTO.Beta) * RTTVAR + RTO.Beta * |SRTT - R'|
346 * SRTT <- (1 - RTO.Alpha) * SRTT + RTO.Alpha * R'
347 */
348
349 /* Note: The above algorithm has been rewritten to
350 * express rto_beta and rto_alpha as inverse powers
351 * of two.
352 * For example, assuming the default value of RTO.Alpha of
353 * 1/8, rto_alpha would be expressed as 3.
354 */
355 tp->rttvar = tp->rttvar - (tp->rttvar >> net->sctp.rto_beta)
356 + (((__u32)abs((__s64)tp->srtt - (__s64)rtt)) >> net->sctp.rto_beta);
357 tp->srtt = tp->srtt - (tp->srtt >> net->sctp.rto_alpha)
358 + (rtt >> net->sctp.rto_alpha);
359 } else {
360 /* 6.3.1 C2) When the first RTT measurement R is made, set
361 * SRTT <- R, RTTVAR <- R/2.
362 */
363 tp->srtt = rtt;
364 tp->rttvar = rtt >> 1;
365 }
366
367 /* 6.3.1 G1) Whenever RTTVAR is computed, if RTTVAR = 0, then
368 * adjust RTTVAR <- G, where G is the CLOCK GRANULARITY.
369 */
370 if (tp->rttvar == 0)
371 tp->rttvar = SCTP_CLOCK_GRANULARITY;
372
373 /* 6.3.1 C3) After the computation, update RTO <- SRTT + 4 * RTTVAR. */
374 tp->rto = tp->srtt + (tp->rttvar << 2);
375
376 /* 6.3.1 C6) Whenever RTO is computed, if it is less than RTO.Min
377 * seconds then it is rounded up to RTO.Min seconds.
378 */
379 if (tp->rto < tp->asoc->rto_min)
380 tp->rto = tp->asoc->rto_min;
381
382 /* 6.3.1 C7) A maximum value may be placed on RTO provided it is
383 * at least RTO.max seconds.
384 */
385 if (tp->rto > tp->asoc->rto_max)
386 tp->rto = tp->asoc->rto_max;
387
388 sctp_max_rto(tp->asoc, tp);
389 tp->rtt = rtt;
390
391 /* Reset rto_pending so that a new RTT measurement is started when a
392 * new data chunk is sent.
393 */
394 tp->rto_pending = 0;
395
396 pr_debug("%s: transport:%p, rtt:%d, srtt:%d rttvar:%d, rto:%ld\n",
397 __func__, tp, rtt, tp->srtt, tp->rttvar, tp->rto);
398}
399
400/* This routine updates the transport's cwnd and partial_bytes_acked
401 * parameters based on the bytes acked in the received SACK.
402 */
403void sctp_transport_raise_cwnd(struct sctp_transport *transport,
404 __u32 sack_ctsn, __u32 bytes_acked)
405{
406 struct sctp_association *asoc = transport->asoc;
407 __u32 cwnd, ssthresh, flight_size, pba, pmtu;
408
409 cwnd = transport->cwnd;
410 flight_size = transport->flight_size;
411
412 /* See if we need to exit Fast Recovery first */
413 if (asoc->fast_recovery &&
414 TSN_lte(asoc->fast_recovery_exit, sack_ctsn))
415 asoc->fast_recovery = 0;
416
417 ssthresh = transport->ssthresh;
418 pba = transport->partial_bytes_acked;
419 pmtu = transport->asoc->pathmtu;
420
421 if (cwnd <= ssthresh) {
422 /* RFC 4960 7.2.1
423 * o When cwnd is less than or equal to ssthresh, an SCTP
424 * endpoint MUST use the slow-start algorithm to increase
425 * cwnd only if the current congestion window is being fully
426 * utilized, an incoming SACK advances the Cumulative TSN
427 * Ack Point, and the data sender is not in Fast Recovery.
428 * Only when these three conditions are met can the cwnd be
429 * increased; otherwise, the cwnd MUST not be increased.
430 * If these conditions are met, then cwnd MUST be increased
431 * by, at most, the lesser of 1) the total size of the
432 * previously outstanding DATA chunk(s) acknowledged, and
433 * 2) the destination's path MTU. This upper bound protects
434 * against the ACK-Splitting attack outlined in [SAVAGE99].
435 */
436 if (asoc->fast_recovery)
437 return;
438
439 /* The appropriate cwnd increase algorithm is performed
440 * if, and only if the congestion window is being fully
441 * utilized. Note that RFC4960 Errata 3.22 removed the
442 * other condition on ctsn moving.
443 */
444 if (flight_size < cwnd)
445 return;
446
447 if (bytes_acked > pmtu)
448 cwnd += pmtu;
449 else
450 cwnd += bytes_acked;
451
452 pr_debug("%s: slow start: transport:%p, bytes_acked:%d, "
453 "cwnd:%d, ssthresh:%d, flight_size:%d, pba:%d\n",
454 __func__, transport, bytes_acked, cwnd, ssthresh,
455 flight_size, pba);
456 } else {
457 /* RFC 2960 7.2.2 Whenever cwnd is greater than ssthresh,
458 * upon each SACK arrival, increase partial_bytes_acked
459 * by the total number of bytes of all new chunks
460 * acknowledged in that SACK including chunks
461 * acknowledged by the new Cumulative TSN Ack and by Gap
462 * Ack Blocks. (updated by RFC4960 Errata 3.22)
463 *
464 * When partial_bytes_acked is greater than cwnd and
465 * before the arrival of the SACK the sender had less
466 * bytes of data outstanding than cwnd (i.e., before
467 * arrival of the SACK, flightsize was less than cwnd),
468 * reset partial_bytes_acked to cwnd. (RFC 4960 Errata
469 * 3.26)
470 *
471 * When partial_bytes_acked is equal to or greater than
472 * cwnd and before the arrival of the SACK the sender
473 * had cwnd or more bytes of data outstanding (i.e.,
474 * before arrival of the SACK, flightsize was greater
475 * than or equal to cwnd), partial_bytes_acked is reset
476 * to (partial_bytes_acked - cwnd). Next, cwnd is
477 * increased by MTU. (RFC 4960 Errata 3.12)
478 */
479 pba += bytes_acked;
480 if (pba > cwnd && flight_size < cwnd)
481 pba = cwnd;
482 if (pba >= cwnd && flight_size >= cwnd) {
483 pba = pba - cwnd;
484 cwnd += pmtu;
485 }
486
487 pr_debug("%s: congestion avoidance: transport:%p, "
488 "bytes_acked:%d, cwnd:%d, ssthresh:%d, "
489 "flight_size:%d, pba:%d\n", __func__,
490 transport, bytes_acked, cwnd, ssthresh,
491 flight_size, pba);
492 }
493
494 transport->cwnd = cwnd;
495 transport->partial_bytes_acked = pba;
496}
497
498/* This routine is used to lower the transport's cwnd when congestion is
499 * detected.
500 */
501void sctp_transport_lower_cwnd(struct sctp_transport *transport,
502 enum sctp_lower_cwnd reason)
503{
504 struct sctp_association *asoc = transport->asoc;
505
506 switch (reason) {
507 case SCTP_LOWER_CWND_T3_RTX:
508 /* RFC 2960 Section 7.2.3, sctpimpguide
509 * When the T3-rtx timer expires on an address, SCTP should
510 * perform slow start by:
511 * ssthresh = max(cwnd/2, 4*MTU)
512 * cwnd = 1*MTU
513 * partial_bytes_acked = 0
514 */
515 transport->ssthresh = max(transport->cwnd/2,
516 4*asoc->pathmtu);
517 transport->cwnd = asoc->pathmtu;
518
519 /* T3-rtx also clears fast recovery */
520 asoc->fast_recovery = 0;
521 break;
522
523 case SCTP_LOWER_CWND_FAST_RTX:
524 /* RFC 2960 7.2.4 Adjust the ssthresh and cwnd of the
525 * destination address(es) to which the missing DATA chunks
526 * were last sent, according to the formula described in
527 * Section 7.2.3.
528 *
529 * RFC 2960 7.2.3, sctpimpguide Upon detection of packet
530 * losses from SACK (see Section 7.2.4), An endpoint
531 * should do the following:
532 * ssthresh = max(cwnd/2, 4*MTU)
533 * cwnd = ssthresh
534 * partial_bytes_acked = 0
535 */
536 if (asoc->fast_recovery)
537 return;
538
539 /* Mark Fast recovery */
540 asoc->fast_recovery = 1;
541 asoc->fast_recovery_exit = asoc->next_tsn - 1;
542
543 transport->ssthresh = max(transport->cwnd/2,
544 4*asoc->pathmtu);
545 transport->cwnd = transport->ssthresh;
546 break;
547
548 case SCTP_LOWER_CWND_ECNE:
549 /* RFC 2481 Section 6.1.2.
550 * If the sender receives an ECN-Echo ACK packet
551 * then the sender knows that congestion was encountered in the
552 * network on the path from the sender to the receiver. The
553 * indication of congestion should be treated just as a
554 * congestion loss in non-ECN Capable TCP. That is, the TCP
555 * source halves the congestion window "cwnd" and reduces the
556 * slow start threshold "ssthresh".
557 * A critical condition is that TCP does not react to
558 * congestion indications more than once every window of
559 * data (or more loosely more than once every round-trip time).
560 */
561 if (time_after(jiffies, transport->last_time_ecne_reduced +
562 transport->rtt)) {
563 transport->ssthresh = max(transport->cwnd/2,
564 4*asoc->pathmtu);
565 transport->cwnd = transport->ssthresh;
566 transport->last_time_ecne_reduced = jiffies;
567 }
568 break;
569
570 case SCTP_LOWER_CWND_INACTIVE:
571 /* RFC 2960 Section 7.2.1, sctpimpguide
572 * When the endpoint does not transmit data on a given
573 * transport address, the cwnd of the transport address
574 * should be adjusted to max(cwnd/2, 4*MTU) per RTO.
575 * NOTE: Although the draft recommends that this check needs
576 * to be done every RTO interval, we do it every hearbeat
577 * interval.
578 */
579 transport->cwnd = max(transport->cwnd/2,
580 4*asoc->pathmtu);
581 /* RFC 4960 Errata 3.27.2: also adjust sshthresh */
582 transport->ssthresh = transport->cwnd;
583 break;
584 }
585
586 transport->partial_bytes_acked = 0;
587
588 pr_debug("%s: transport:%p, reason:%d, cwnd:%d, ssthresh:%d\n",
589 __func__, transport, reason, transport->cwnd,
590 transport->ssthresh);
591}
592
593/* Apply Max.Burst limit to the congestion window:
594 * sctpimpguide-05 2.14.2
595 * D) When the time comes for the sender to
596 * transmit new DATA chunks, the protocol parameter Max.Burst MUST
597 * first be applied to limit how many new DATA chunks may be sent.
598 * The limit is applied by adjusting cwnd as follows:
599 * if ((flightsize+ Max.Burst * MTU) < cwnd)
600 * cwnd = flightsize + Max.Burst * MTU
601 */
602
603void sctp_transport_burst_limited(struct sctp_transport *t)
604{
605 struct sctp_association *asoc = t->asoc;
606 u32 old_cwnd = t->cwnd;
607 u32 max_burst_bytes;
608
609 if (t->burst_limited || asoc->max_burst == 0)
610 return;
611
612 max_burst_bytes = t->flight_size + (asoc->max_burst * asoc->pathmtu);
613 if (max_burst_bytes < old_cwnd) {
614 t->cwnd = max_burst_bytes;
615 t->burst_limited = old_cwnd;
616 }
617}
618
619/* Restore the old cwnd congestion window, after the burst had it's
620 * desired effect.
621 */
622void sctp_transport_burst_reset(struct sctp_transport *t)
623{
624 if (t->burst_limited) {
625 t->cwnd = t->burst_limited;
626 t->burst_limited = 0;
627 }
628}
629
630/* What is the next timeout value for this transport? */
631unsigned long sctp_transport_timeout(struct sctp_transport *trans)
632{
633 /* RTO + timer slack +/- 50% of RTO */
634 unsigned long timeout = trans->rto >> 1;
635
636 if (trans->state != SCTP_UNCONFIRMED &&
637 trans->state != SCTP_PF)
638 timeout += trans->hbinterval;
639
640 return max_t(unsigned long, timeout, HZ / 5);
641}
642
643/* Reset transport variables to their initial values */
644void sctp_transport_reset(struct sctp_transport *t)
645{
646 struct sctp_association *asoc = t->asoc;
647
648 /* RFC 2960 (bis), Section 5.2.4
649 * All the congestion control parameters (e.g., cwnd, ssthresh)
650 * related to this peer MUST be reset to their initial values
651 * (see Section 6.2.1)
652 */
653 t->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380));
654 t->burst_limited = 0;
655 t->ssthresh = asoc->peer.i.a_rwnd;
656 t->rto = asoc->rto_initial;
657 sctp_max_rto(asoc, t);
658 t->rtt = 0;
659 t->srtt = 0;
660 t->rttvar = 0;
661
662 /* Reset these additional variables so that we have a clean slate. */
663 t->partial_bytes_acked = 0;
664 t->flight_size = 0;
665 t->error_count = 0;
666 t->rto_pending = 0;
667 t->hb_sent = 0;
668
669 /* Initialize the state information for SFR-CACC */
670 t->cacc.changeover_active = 0;
671 t->cacc.cycling_changeover = 0;
672 t->cacc.next_tsn_at_change = 0;
673 t->cacc.cacc_saw_newack = 0;
674}
675
676/* Schedule retransmission on the given transport */
677void sctp_transport_immediate_rtx(struct sctp_transport *t)
678{
679 /* Stop pending T3_rtx_timer */
680 if (del_timer(&t->T3_rtx_timer))
681 sctp_transport_put(t);
682
683 sctp_retransmit(&t->asoc->outqueue, t, SCTP_RTXR_T3_RTX);
684 if (!timer_pending(&t->T3_rtx_timer)) {
685 if (!mod_timer(&t->T3_rtx_timer, jiffies + t->rto))
686 sctp_transport_hold(t);
687 }
688}
689
690/* Drop dst */
691void sctp_transport_dst_release(struct sctp_transport *t)
692{
693 dst_release(t->dst);
694 t->dst = NULL;
695 t->dst_pending_confirm = 0;
696}
697
698/* Schedule neighbour confirm */
699void sctp_transport_dst_confirm(struct sctp_transport *t)
700{
701 t->dst_pending_confirm = 1;
702}