Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

tcp: remove Appropriate Byte Count support

TCP Appropriate Byte Count was added by me, but later disabled.
There is no point in maintaining it since it is a potential source
of bugs and Linux already implements other better window protection
heuristics.

Signed-off-by: Stephen Hemminger <stephen@networkplumber.org>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Stephen Hemminger and committed by
David S. Miller
ca2eb567 547472b8

+1 -67
-11
Documentation/networking/ip-sysctl.txt
··· 130 130 Defaults to 128. See also tcp_max_syn_backlog for additional tuning 131 131 for TCP sockets. 132 132 133 - tcp_abc - INTEGER 134 - Controls Appropriate Byte Count (ABC) defined in RFC3465. 135 - ABC is a way of increasing congestion window (cwnd) more slowly 136 - in response to partial acknowledgments. 137 - Possible values are: 138 - 0 increase cwnd once per acknowledgment (no ABC) 139 - 1 increase cwnd once per acknowledgment of full sized segment 140 - 2 allow increase cwnd by two if acknowledgment is 141 - of two segments to compensate for delayed acknowledgments. 142 - Default: 0 (off) 143 - 144 133 tcp_abort_on_overflow - BOOLEAN 145 134 If listening service is too slow to accept new connections, 146 135 reset them. Default state is FALSE. It means that if overflow
-1
include/linux/tcp.h
··· 246 246 u32 sacked_out; /* SACK'd packets */ 247 247 u32 fackets_out; /* FACK'd packets */ 248 248 u32 tso_deferred; 249 - u32 bytes_acked; /* Appropriate Byte Counting - RFC3465 */ 250 249 251 250 /* from STCP, retrans queue hinting */ 252 251 struct sk_buff* lost_skb_hint;
-1
include/net/tcp.h
··· 279 279 extern int sysctl_tcp_nometrics_save; 280 280 extern int sysctl_tcp_moderate_rcvbuf; 281 281 extern int sysctl_tcp_tso_win_divisor; 282 - extern int sysctl_tcp_abc; 283 282 extern int sysctl_tcp_mtu_probing; 284 283 extern int sysctl_tcp_base_mss; 285 284 extern int sysctl_tcp_workaround_signed_windows;
-1
kernel/sysctl_binary.c
··· 387 387 { CTL_INT, NET_TCP_MODERATE_RCVBUF, "tcp_moderate_rcvbuf" }, 388 388 { CTL_INT, NET_TCP_TSO_WIN_DIVISOR, "tcp_tso_win_divisor" }, 389 389 { CTL_STR, NET_TCP_CONG_CONTROL, "tcp_congestion_control" }, 390 - { CTL_INT, NET_TCP_ABC, "tcp_abc" }, 391 390 { CTL_INT, NET_TCP_MTU_PROBING, "tcp_mtu_probing" }, 392 391 { CTL_INT, NET_TCP_BASE_MSS, "tcp_base_mss" }, 393 392 { CTL_INT, NET_IPV4_TCP_WORKAROUND_SIGNED_WINDOWS, "tcp_workaround_signed_windows" },
-7
net/ipv4/sysctl_net_ipv4.c
··· 633 633 .proc_handler = proc_tcp_congestion_control, 634 634 }, 635 635 { 636 - .procname = "tcp_abc", 637 - .data = &sysctl_tcp_abc, 638 - .maxlen = sizeof(int), 639 - .mode = 0644, 640 - .proc_handler = proc_dointvec, 641 - }, 642 - { 643 636 .procname = "tcp_mtu_probing", 644 637 .data = &sysctl_tcp_mtu_probing, 645 638 .maxlen = sizeof(int),
-1
net/ipv4/tcp.c
··· 2289 2289 tp->packets_out = 0; 2290 2290 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; 2291 2291 tp->snd_cwnd_cnt = 0; 2292 - tp->bytes_acked = 0; 2293 2292 tp->window_clamp = 0; 2294 2293 tcp_set_ca_state(sk, TCP_CA_Open); 2295 2294 tcp_clear_retrans(tp);
+1 -29
net/ipv4/tcp_cong.c
··· 317 317 snd_cwnd = 1U; 318 318 } 319 319 320 - /* RFC3465: ABC Slow start 321 - * Increase only after a full MSS of bytes is acked 322 - * 323 - * TCP sender SHOULD increase cwnd by the number of 324 - * previously unacknowledged bytes ACKed by each incoming 325 - * acknowledgment, provided the increase is not more than L 326 - */ 327 - if (sysctl_tcp_abc && tp->bytes_acked < tp->mss_cache) 328 - return; 329 - 330 320 if (sysctl_tcp_max_ssthresh > 0 && tp->snd_cwnd > sysctl_tcp_max_ssthresh) 331 321 cnt = sysctl_tcp_max_ssthresh >> 1; /* limited slow start */ 332 322 else 333 323 cnt = snd_cwnd; /* exponential increase */ 334 - 335 - /* RFC3465: ABC 336 - * We MAY increase by 2 if discovered delayed ack 337 - */ 338 - if (sysctl_tcp_abc > 1 && tp->bytes_acked >= 2*tp->mss_cache) 339 - cnt <<= 1; 340 - tp->bytes_acked = 0; 341 324 342 325 tp->snd_cwnd_cnt += cnt; 343 326 while (tp->snd_cwnd_cnt >= snd_cwnd) { ··· 361 378 /* In "safe" area, increase. */ 362 379 if (tp->snd_cwnd <= tp->snd_ssthresh) 363 380 tcp_slow_start(tp); 364 - 365 381 /* In dangerous area, increase slowly. */ 366 - else if (sysctl_tcp_abc) { 367 - /* RFC3465: Appropriate Byte Count 368 - * increase once for each full cwnd acked 369 - */ 370 - if (tp->bytes_acked >= tp->snd_cwnd*tp->mss_cache) { 371 - tp->bytes_acked -= tp->snd_cwnd*tp->mss_cache; 372 - if (tp->snd_cwnd < tp->snd_cwnd_clamp) 373 - tp->snd_cwnd++; 374 - } 375 - } else { 382 + else 376 383 tcp_cong_avoid_ai(tp, tp->snd_cwnd); 377 - } 378 384 } 379 385 EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid); 380 386
-15
net/ipv4/tcp_input.c
··· 98 98 int sysctl_tcp_thin_dupack __read_mostly; 99 99 100 100 int sysctl_tcp_moderate_rcvbuf __read_mostly = 1; 101 - int sysctl_tcp_abc __read_mostly; 102 101 int sysctl_tcp_early_retrans __read_mostly = 2; 103 102 104 103 #define FLAG_DATA 0x01 /* Incoming frame contained data. */ ··· 2006 2007 tp->snd_cwnd_cnt = 0; 2007 2008 tp->snd_cwnd_stamp = tcp_time_stamp; 2008 2009 tp->frto_counter = 0; 2009 - tp->bytes_acked = 0; 2010 2010 2011 2011 tp->reordering = min_t(unsigned int, tp->reordering, 2012 2012 sysctl_tcp_reordering); ··· 2054 2056 tp->snd_cwnd_cnt = 0; 2055 2057 tp->snd_cwnd_stamp = tcp_time_stamp; 2056 2058 2057 - tp->bytes_acked = 0; 2058 2059 tcp_clear_retrans_partial(tp); 2059 2060 2060 2061 if (tcp_is_reno(tp)) ··· 2681 2684 struct tcp_sock *tp = tcp_sk(sk); 2682 2685 2683 2686 tp->high_seq = tp->snd_nxt; 2684 - tp->bytes_acked = 0; 2685 2687 tp->snd_cwnd_cnt = 0; 2686 2688 tp->prior_cwnd = tp->snd_cwnd; 2687 2689 tp->prr_delivered = 0; ··· 2731 2735 struct tcp_sock *tp = tcp_sk(sk); 2732 2736 2733 2737 tp->prior_ssthresh = 0; 2734 - tp->bytes_acked = 0; 2735 2738 if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) { 2736 2739 tp->undo_marker = 0; 2737 2740 tcp_init_cwnd_reduction(sk, set_ssthresh); ··· 3412 3417 { 3413 3418 tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh); 3414 3419 tp->snd_cwnd_cnt = 0; 3415 - tp->bytes_acked = 0; 3416 3420 TCP_ECN_queue_cwr(tp); 3417 3421 tcp_moderate_cwnd(tp); 3418 3422 } ··· 3602 3608 3603 3609 if (after(ack, prior_snd_una)) 3604 3610 flag |= FLAG_SND_UNA_ADVANCED; 3605 - 3606 - if (sysctl_tcp_abc) { 3607 - if (icsk->icsk_ca_state < TCP_CA_CWR) 3608 - tp->bytes_acked += ack - prior_snd_una; 3609 - else if (icsk->icsk_ca_state == TCP_CA_Loss) 3610 - /* we assume just one segment left network */ 3611 - tp->bytes_acked += min(ack - prior_snd_una, 3612 - tp->mss_cache); 3613 - } 3614 3611 3615 3612 prior_fackets = tp->fackets_out; 3616 3613 prior_in_flight = tcp_packets_in_flight(tp);
-1
net/ipv4/tcp_minisocks.c
··· 446 446 */ 447 447 newtp->snd_cwnd = TCP_INIT_CWND; 448 448 newtp->snd_cwnd_cnt = 0; 449 - newtp->bytes_acked = 0; 450 449 451 450 newtp->frto_counter = 0; 452 451 newtp->frto_highmark = 0;