Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

tcp: remove in_flight parameter from cong_avoid() methods

Commit e114a710aa505 ("tcp: fix cwnd limited checking to improve
congestion control") obsoleted in_flight parameter from
tcp_is_cwnd_limited() and its callers.

This patch does the removal as promised.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Acked-by: Neal Cardwell <ncardwell@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Eric Dumazet and committed by
David S. Miller
24901551 e114a710

+36 -48
+3 -5
include/net/tcp.h
··· 796 796 /* return slow start threshold (required) */ 797 797 u32 (*ssthresh)(struct sock *sk); 798 798 /* do new cwnd calculation (required) */ 799 - void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked, u32 in_flight); 799 + void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked); 800 800 /* call before changing ca_state (optional) */ 801 801 void (*set_state)(struct sock *sk, u8 new_state); 802 802 /* call when cwnd event occurs (optional) */ ··· 828 828 829 829 extern struct tcp_congestion_ops tcp_init_congestion_ops; 830 830 u32 tcp_reno_ssthresh(struct sock *sk); 831 - void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked, u32 in_flight); 831 + void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked); 832 832 extern struct tcp_congestion_ops tcp_reno; 833 833 834 834 static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state) ··· 986 986 * risks 100% overshoot. The advantage is that we discourage application to 987 987 * either send more filler packets or data to artificially blow up the cwnd 988 988 * usage, and allow application-limited process to probe bw more aggressively. 989 - * 990 - * TODO: remove in_flight once we can fix all callers, and their callers... 991 989 */ 992 - static inline bool tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight) 990 + static inline bool tcp_is_cwnd_limited(const struct sock *sk) 993 991 { 994 992 const struct tcp_sock *tp = tcp_sk(sk); 995 993
+2 -3
net/ipv4/tcp_bic.c
··· 140 140 ca->cnt = 1; 141 141 } 142 142 143 - static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked, 144 - u32 in_flight) 143 + static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked) 145 144 { 146 145 struct tcp_sock *tp = tcp_sk(sk); 147 146 struct bictcp *ca = inet_csk_ca(sk); 148 147 149 - if (!tcp_is_cwnd_limited(sk, in_flight)) 148 + if (!tcp_is_cwnd_limited(sk)) 150 149 return; 151 150 152 151 if (tp->snd_cwnd <= tp->snd_ssthresh)
+2 -2
net/ipv4/tcp_cong.c
··· 317 317 /* This is Jacobson's slow start and congestion avoidance. 318 318 * SIGCOMM '88, p. 328. 319 319 */ 320 - void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked, u32 in_flight) 320 + void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked) 321 321 { 322 322 struct tcp_sock *tp = tcp_sk(sk); 323 323 324 - if (!tcp_is_cwnd_limited(sk, in_flight)) 324 + if (!tcp_is_cwnd_limited(sk)) 325 325 return; 326 326 327 327 /* In "safe" area, increase. */
+2 -3
net/ipv4/tcp_cubic.c
··· 304 304 ca->cnt = 1; 305 305 } 306 306 307 - static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked, 308 - u32 in_flight) 307 + static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked) 309 308 { 310 309 struct tcp_sock *tp = tcp_sk(sk); 311 310 struct bictcp *ca = inet_csk_ca(sk); 312 311 313 - if (!tcp_is_cwnd_limited(sk, in_flight)) 312 + if (!tcp_is_cwnd_limited(sk)) 314 313 return; 315 314 316 315 if (tp->snd_cwnd <= tp->snd_ssthresh) {
+2 -2
net/ipv4/tcp_highspeed.c
··· 109 109 tp->snd_cwnd_clamp = min_t(u32, tp->snd_cwnd_clamp, 0xffffffff/128); 110 110 } 111 111 112 - static void hstcp_cong_avoid(struct sock *sk, u32 ack, u32 acked, u32 in_flight) 112 + static void hstcp_cong_avoid(struct sock *sk, u32 ack, u32 acked) 113 113 { 114 114 struct tcp_sock *tp = tcp_sk(sk); 115 115 struct hstcp *ca = inet_csk_ca(sk); 116 116 117 - if (!tcp_is_cwnd_limited(sk, in_flight)) 117 + if (!tcp_is_cwnd_limited(sk)) 118 118 return; 119 119 120 120 if (tp->snd_cwnd <= tp->snd_ssthresh)
+2 -2
net/ipv4/tcp_htcp.c
··· 227 227 return max((tp->snd_cwnd * ca->beta) >> 7, 2U); 228 228 } 229 229 230 - static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 acked, u32 in_flight) 230 + static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 acked) 231 231 { 232 232 struct tcp_sock *tp = tcp_sk(sk); 233 233 struct htcp *ca = inet_csk_ca(sk); 234 234 235 - if (!tcp_is_cwnd_limited(sk, in_flight)) 235 + if (!tcp_is_cwnd_limited(sk)) 236 236 return; 237 237 238 238 if (tp->snd_cwnd <= tp->snd_ssthresh)
+3 -4
net/ipv4/tcp_hybla.c
··· 87 87 * o Give cwnd a new value based on the model proposed 88 88 * o remember increments <1 89 89 */ 90 - static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 acked, 91 - u32 in_flight) 90 + static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 acked) 92 91 { 93 92 struct tcp_sock *tp = tcp_sk(sk); 94 93 struct hybla *ca = inet_csk_ca(sk); ··· 100 101 ca->minrtt_us = tp->srtt_us; 101 102 } 102 103 103 - if (!tcp_is_cwnd_limited(sk, in_flight)) 104 + if (!tcp_is_cwnd_limited(sk)) 104 105 return; 105 106 106 107 if (!ca->hybla_en) { 107 - tcp_reno_cong_avoid(sk, ack, acked, in_flight); 108 + tcp_reno_cong_avoid(sk, ack, acked); 108 109 return; 109 110 } 110 111
+2 -3
net/ipv4/tcp_illinois.c
··· 255 255 /* 256 256 * Increase window in response to successful acknowledgment. 257 257 */ 258 - static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 acked, 259 - u32 in_flight) 258 + static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 acked) 260 259 { 261 260 struct tcp_sock *tp = tcp_sk(sk); 262 261 struct illinois *ca = inet_csk_ca(sk); ··· 264 265 update_params(sk); 265 266 266 267 /* RFC2861 only increase cwnd if fully utilized */ 267 - if (!tcp_is_cwnd_limited(sk, in_flight)) 268 + if (!tcp_is_cwnd_limited(sk)) 268 269 return; 269 270 270 271 /* In slow start */
+4 -5
net/ipv4/tcp_input.c
··· 2938 2938 tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, seq_rtt_us, -1L); 2939 2939 } 2940 2940 2941 - static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 acked, u32 in_flight) 2941 + static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 acked) 2942 2942 { 2943 2943 const struct inet_connection_sock *icsk = inet_csk(sk); 2944 - icsk->icsk_ca_ops->cong_avoid(sk, ack, acked, in_flight); 2944 + 2945 + icsk->icsk_ca_ops->cong_avoid(sk, ack, acked); 2945 2946 tcp_sk(sk)->snd_cwnd_stamp = tcp_time_stamp; 2946 2947 } 2947 2948 ··· 3365 3364 u32 ack_seq = TCP_SKB_CB(skb)->seq; 3366 3365 u32 ack = TCP_SKB_CB(skb)->ack_seq; 3367 3366 bool is_dupack = false; 3368 - u32 prior_in_flight; 3369 3367 u32 prior_fackets; 3370 3368 int prior_packets = tp->packets_out; 3371 3369 const int prior_unsacked = tp->packets_out - tp->sacked_out; ··· 3397 3397 flag |= FLAG_SND_UNA_ADVANCED; 3398 3398 3399 3399 prior_fackets = tp->fackets_out; 3400 - prior_in_flight = tcp_packets_in_flight(tp); 3401 3400 3402 3401 /* ts_recent update must be made after we are sure that the packet 3403 3402 * is in window. ··· 3451 3452 3452 3453 /* Advance cwnd if state allows */ 3453 3454 if (tcp_may_raise_cwnd(sk, flag)) 3454 - tcp_cong_avoid(sk, ack, acked, prior_in_flight); 3455 + tcp_cong_avoid(sk, ack, acked); 3455 3456 3456 3457 if (tcp_ack_is_dubious(sk, flag)) { 3457 3458 is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
+2 -3
net/ipv4/tcp_lp.c
··· 115 115 * Will only call newReno CA when away from inference. 116 116 * From TCP-LP's paper, this will be handled in additive increasement. 117 117 */ 118 - static void tcp_lp_cong_avoid(struct sock *sk, u32 ack, u32 acked, 119 - u32 in_flight) 118 + static void tcp_lp_cong_avoid(struct sock *sk, u32 ack, u32 acked) 120 119 { 121 120 struct lp *lp = inet_csk_ca(sk); 122 121 123 122 if (!(lp->flag & LP_WITHIN_INF)) 124 - tcp_reno_cong_avoid(sk, ack, acked, in_flight); 123 + tcp_reno_cong_avoid(sk, ack, acked); 125 124 } 126 125 127 126 /**
+1 -1
net/ipv4/tcp_output.c
··· 1408 1408 1409 1409 tp->lsnd_pending = tp->packets_out + unsent_segs; 1410 1410 1411 - if (tcp_is_cwnd_limited(sk, 0)) { 1411 + if (tcp_is_cwnd_limited(sk)) { 1412 1412 /* Network is feed fully. */ 1413 1413 tp->snd_cwnd_used = 0; 1414 1414 tp->snd_cwnd_stamp = tcp_time_stamp;
+2 -3
net/ipv4/tcp_scalable.c
··· 15 15 #define TCP_SCALABLE_AI_CNT 50U 16 16 #define TCP_SCALABLE_MD_SCALE 3 17 17 18 - static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked, 19 - u32 in_flight) 18 + static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked) 20 19 { 21 20 struct tcp_sock *tp = tcp_sk(sk); 22 21 23 - if (!tcp_is_cwnd_limited(sk, in_flight)) 22 + if (!tcp_is_cwnd_limited(sk)) 24 23 return; 25 24 26 25 if (tp->snd_cwnd <= tp->snd_ssthresh)
+3 -4
net/ipv4/tcp_vegas.c
··· 163 163 return min(tp->snd_ssthresh, tp->snd_cwnd-1); 164 164 } 165 165 166 - static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked, 167 - u32 in_flight) 166 + static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked) 168 167 { 169 168 struct tcp_sock *tp = tcp_sk(sk); 170 169 struct vegas *vegas = inet_csk_ca(sk); 171 170 172 171 if (!vegas->doing_vegas_now) { 173 - tcp_reno_cong_avoid(sk, ack, acked, in_flight); 172 + tcp_reno_cong_avoid(sk, ack, acked); 174 173 return; 175 174 } 176 175 ··· 194 195 /* We don't have enough RTT samples to do the Vegas 195 196 * calculation, so we'll behave like Reno. 196 197 */ 197 - tcp_reno_cong_avoid(sk, ack, acked, in_flight); 198 + tcp_reno_cong_avoid(sk, ack, acked); 198 199 } else { 199 200 u32 rtt, diff; 200 201 u64 target_cwnd;
+4 -5
net/ipv4/tcp_veno.c
··· 114 114 tcp_veno_init(sk); 115 115 } 116 116 117 - static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked, 118 - u32 in_flight) 117 + static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked) 119 118 { 120 119 struct tcp_sock *tp = tcp_sk(sk); 121 120 struct veno *veno = inet_csk_ca(sk); 122 121 123 122 if (!veno->doing_veno_now) { 124 - tcp_reno_cong_avoid(sk, ack, acked, in_flight); 123 + tcp_reno_cong_avoid(sk, ack, acked); 125 124 return; 126 125 } 127 126 128 127 /* limited by applications */ 129 - if (!tcp_is_cwnd_limited(sk, in_flight)) 128 + if (!tcp_is_cwnd_limited(sk)) 130 129 return; 131 130 132 131 /* We do the Veno calculations only if we got enough rtt samples */ ··· 133 134 /* We don't have enough rtt samples to do the Veno 134 135 * calculation, so we'll behave like Reno. 135 136 */ 136 - tcp_reno_cong_avoid(sk, ack, acked, in_flight); 137 + tcp_reno_cong_avoid(sk, ack, acked); 137 138 } else { 138 139 u64 target_cwnd; 139 140 u32 rtt;
+2 -3
net/ipv4/tcp_yeah.c
··· 69 69 tcp_vegas_pkts_acked(sk, pkts_acked, rtt_us); 70 70 } 71 71 72 - static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked, 73 - u32 in_flight) 72 + static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked) 74 73 { 75 74 struct tcp_sock *tp = tcp_sk(sk); 76 75 struct yeah *yeah = inet_csk_ca(sk); 77 76 78 - if (!tcp_is_cwnd_limited(sk, in_flight)) 77 + if (!tcp_is_cwnd_limited(sk)) 79 78 return; 80 79 81 80 if (tp->snd_cwnd <= tp->snd_ssthresh)