Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

tcp: consolidate congestion control undo functions

Most TCP congestion controls are using identical logic to undo
cwnd except BBR. This patch consolidates these similar functions
to the one used currently by Reno and others.

Suggested-by: Neal Cardwell <ncardwell@google.com>
Signed-off-by: Yuchung Cheng <ycheng@google.com>
Signed-off-by: Neal Cardwell <ncardwell@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Yuchung Cheng and committed by
David S. Miller
f1722a1b 4faf7839

+9 -103
+1 -13
net/ipv4/tcp_bic.c
··· 49 49 struct bictcp { 50 50 u32 cnt; /* increase cwnd by 1 after ACKs */ 51 51 u32 last_max_cwnd; /* last maximum snd_cwnd */ 52 - u32 loss_cwnd; /* congestion window at last loss */ 53 52 u32 last_cwnd; /* the last snd_cwnd */ 54 53 u32 last_time; /* time when updated last_cwnd */ 55 54 u32 epoch_start; /* beginning of an epoch */ ··· 71 72 struct bictcp *ca = inet_csk_ca(sk); 72 73 73 74 bictcp_reset(ca); 74 - ca->loss_cwnd = 0; 75 75 76 76 if (initial_ssthresh) 77 77 tcp_sk(sk)->snd_ssthresh = initial_ssthresh; ··· 170 172 else 171 173 ca->last_max_cwnd = tp->snd_cwnd; 172 174 173 - ca->loss_cwnd = tp->snd_cwnd; 174 - 175 175 if (tp->snd_cwnd <= low_window) 176 176 return max(tp->snd_cwnd >> 1U, 2U); 177 177 else 178 178 return max((tp->snd_cwnd * beta) / BICTCP_BETA_SCALE, 2U); 179 - } 180 - 181 - static u32 bictcp_undo_cwnd(struct sock *sk) 182 - { 183 - const struct tcp_sock *tp = tcp_sk(sk); 184 - const struct bictcp *ca = inet_csk_ca(sk); 185 - 186 - return max(tp->snd_cwnd, ca->loss_cwnd); 187 179 } 188 180 189 181 static void bictcp_state(struct sock *sk, u8 new_state) ··· 202 214 .ssthresh = bictcp_recalc_ssthresh, 203 215 .cong_avoid = bictcp_cong_avoid, 204 216 .set_state = bictcp_state, 205 - .undo_cwnd = bictcp_undo_cwnd, 217 + .undo_cwnd = tcp_reno_undo_cwnd, 206 218 .pkts_acked = bictcp_acked, 207 219 .owner = THIS_MODULE, 208 220 .name = "bic",
+1 -11
net/ipv4/tcp_cdg.c
··· 85 85 u8 state; 86 86 u8 delack; 87 87 u32 rtt_seq; 88 - u32 undo_cwnd; 89 88 u32 shadow_wnd; 90 89 u16 backoff_cnt; 91 90 u16 sample_cnt; ··· 329 330 struct cdg *ca = inet_csk_ca(sk); 330 331 struct tcp_sock *tp = tcp_sk(sk); 331 332 332 - ca->undo_cwnd = tp->snd_cwnd; 333 - 334 333 if (ca->state == CDG_BACKOFF) 335 334 return max(2U, (tp->snd_cwnd * min(1024U, backoff_beta)) >> 10); 336 335 ··· 339 342 if (use_shadow) 340 343 return max3(2U, ca->shadow_wnd, tp->snd_cwnd >> 1); 341 344 return max(2U, tp->snd_cwnd >> 1); 342 - } 343 - 344 - static u32 tcp_cdg_undo_cwnd(struct sock *sk) 345 - { 346 - struct cdg *ca = inet_csk_ca(sk); 347 - 348 - return max(tcp_sk(sk)->snd_cwnd, ca->undo_cwnd); 349 345 } 350 346 351 347 static void tcp_cdg_cwnd_event(struct sock *sk, const enum tcp_ca_event ev) ··· 393 403 .cong_avoid = tcp_cdg_cong_avoid, 394 404 .cwnd_event = tcp_cdg_cwnd_event, 395 405 .pkts_acked = tcp_cdg_acked, 396 - .undo_cwnd = tcp_cdg_undo_cwnd, 406 + .undo_cwnd = tcp_reno_undo_cwnd, 397 407 .ssthresh = tcp_cdg_ssthresh, 398 408 .release = tcp_cdg_release, 399 409 .init = tcp_cdg_init,
+1 -12
net/ipv4/tcp_cubic.c
··· 83 83 struct bictcp { 84 84 u32 cnt; /* increase cwnd by 1 after ACKs */ 85 85 u32 last_max_cwnd; /* last maximum snd_cwnd */ 86 - u32 loss_cwnd; /* congestion window at last loss */ 87 86 u32 last_cwnd; /* the last snd_cwnd */ 88 87 u32 last_time; /* time when updated last_cwnd */ 89 88 u32 bic_origin_point;/* origin point of bic function */ ··· 141 142 struct bictcp *ca = inet_csk_ca(sk); 142 143 143 144 bictcp_reset(ca); 144 - ca->loss_cwnd = 0; 145 145 146 146 if (hystart) 147 147 bictcp_hystart_reset(sk); ··· 364 366 else 365 367 ca->last_max_cwnd = tp->snd_cwnd; 366 368 367 - ca->loss_cwnd = tp->snd_cwnd; 368 - 369 369 return max((tp->snd_cwnd * beta) / BICTCP_BETA_SCALE, 2U); 370 - } 371 - 372 - static u32 bictcp_undo_cwnd(struct sock *sk) 373 - { 374 - struct bictcp *ca = inet_csk_ca(sk); 375 - 376 - return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd); 377 370 } 378 371 379 372 static void bictcp_state(struct sock *sk, u8 new_state) ··· 459 470 .ssthresh = bictcp_recalc_ssthresh, 460 471 .cong_avoid = bictcp_cong_avoid, 461 472 .set_state = bictcp_state, 462 - .undo_cwnd = bictcp_undo_cwnd, 473 + .undo_cwnd = tcp_reno_undo_cwnd, 463 474 .cwnd_event = bictcp_cwnd_event, 464 475 .pkts_acked = bictcp_acked, 465 476 .owner = THIS_MODULE,
+1 -10
net/ipv4/tcp_highspeed.c
··· 94 94 95 95 struct hstcp { 96 96 u32 ai; 97 - u32 loss_cwnd; 98 97 }; 99 98 100 99 static void hstcp_init(struct sock *sk) ··· 152 153 const struct tcp_sock *tp = tcp_sk(sk); 153 154 struct hstcp *ca = inet_csk_ca(sk); 154 155 155 - ca->loss_cwnd = tp->snd_cwnd; 156 156 /* Do multiplicative decrease */ 157 157 return max(tp->snd_cwnd - ((tp->snd_cwnd * hstcp_aimd_vals[ca->ai].md) >> 8), 2U); 158 - } 159 - 160 - static u32 hstcp_cwnd_undo(struct sock *sk) 161 - { 162 - const struct hstcp *ca = inet_csk_ca(sk); 163 - 164 - return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd); 165 158 } 166 159 167 160 static struct tcp_congestion_ops tcp_highspeed __read_mostly = { 168 161 .init = hstcp_init, 169 162 .ssthresh = hstcp_ssthresh, 170 - .undo_cwnd = hstcp_cwnd_undo, 163 + .undo_cwnd = tcp_reno_undo_cwnd, 171 164 .cong_avoid = hstcp_cong_avoid, 172 165 173 166 .owner = THIS_MODULE,
+1 -10
net/ipv4/tcp_illinois.c
··· 48 48 u32 end_seq; /* right edge of current RTT */ 49 49 u32 alpha; /* Additive increase */ 50 50 u32 beta; /* Muliplicative decrease */ 51 - u32 loss_cwnd; /* cwnd on loss */ 52 51 u16 acked; /* # packets acked by current ACK */ 53 52 u8 rtt_above; /* average rtt has gone above threshold */ 54 53 u8 rtt_low; /* # of rtts measurements below threshold */ ··· 296 297 struct tcp_sock *tp = tcp_sk(sk); 297 298 struct illinois *ca = inet_csk_ca(sk); 298 299 299 - ca->loss_cwnd = tp->snd_cwnd; 300 300 /* Multiplicative decrease */ 301 301 return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->beta) >> BETA_SHIFT), 2U); 302 - } 303 - 304 - static u32 tcp_illinois_cwnd_undo(struct sock *sk) 305 - { 306 - const struct illinois *ca = inet_csk_ca(sk); 307 - 308 - return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd); 309 302 } 310 303 311 304 /* Extract info for Tcp socket info provided via netlink. */ ··· 327 336 static struct tcp_congestion_ops tcp_illinois __read_mostly = { 328 337 .init = tcp_illinois_init, 329 338 .ssthresh = tcp_illinois_ssthresh, 330 - .undo_cwnd = tcp_illinois_cwnd_undo, 339 + .undo_cwnd = tcp_reno_undo_cwnd, 331 340 .cong_avoid = tcp_illinois_cong_avoid, 332 341 .set_state = tcp_illinois_state, 333 342 .get_info = tcp_illinois_info,
+1 -12
net/ipv4/tcp_nv.c
··· 86 86 * < 0 => less than 1 packet/RTT */ 87 87 u8 available8; 88 88 u16 available16; 89 - u32 loss_cwnd; /* cwnd at last loss */ 90 89 u8 nv_allow_cwnd_growth:1, /* whether cwnd can grow */ 91 90 nv_reset:1, /* whether to reset values */ 92 91 nv_catchup:1; /* whether we are growing because ··· 120 121 struct tcp_sock *tp = tcp_sk(sk); 121 122 122 123 ca->nv_reset = 0; 123 - ca->loss_cwnd = 0; 124 124 ca->nv_no_cong_cnt = 0; 125 125 ca->nv_rtt_cnt = 0; 126 126 ca->nv_last_rtt = 0; ··· 175 177 static u32 tcpnv_recalc_ssthresh(struct sock *sk) 176 178 { 177 179 const struct tcp_sock *tp = tcp_sk(sk); 178 - struct tcpnv *ca = inet_csk_ca(sk); 179 180 180 - ca->loss_cwnd = tp->snd_cwnd; 181 181 return max((tp->snd_cwnd * nv_loss_dec_factor) >> 10, 2U); 182 - } 183 - 184 - static u32 tcpnv_undo_cwnd(struct sock *sk) 185 - { 186 - struct tcpnv *ca = inet_csk_ca(sk); 187 - 188 - return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd); 189 182 } 190 183 191 184 static void tcpnv_state(struct sock *sk, u8 new_state) ··· 435 446 .ssthresh = tcpnv_recalc_ssthresh, 436 447 .cong_avoid = tcpnv_cong_avoid, 437 448 .set_state = tcpnv_state, 438 - .undo_cwnd = tcpnv_undo_cwnd, 449 + .undo_cwnd = tcp_reno_undo_cwnd, 439 450 .pkts_acked = tcpnv_acked, 440 451 .get_info = tcpnv_get_info, 441 452
+1 -15
net/ipv4/tcp_scalable.c
··· 15 15 #define TCP_SCALABLE_AI_CNT 50U 16 16 #define TCP_SCALABLE_MD_SCALE 3 17 17 18 - struct scalable { 19 - u32 loss_cwnd; 20 - }; 21 - 22 18 static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked) 23 19 { 24 20 struct tcp_sock *tp = tcp_sk(sk); ··· 32 36 static u32 tcp_scalable_ssthresh(struct sock *sk) 33 37 { 34 38 const struct tcp_sock *tp = tcp_sk(sk); 35 - struct scalable *ca = inet_csk_ca(sk); 36 - 37 - ca->loss_cwnd = tp->snd_cwnd; 38 39 39 40 return max(tp->snd_cwnd - (tp->snd_cwnd>>TCP_SCALABLE_MD_SCALE), 2U); 40 41 } 41 42 42 - static u32 tcp_scalable_cwnd_undo(struct sock *sk) 43 - { 44 - const struct scalable *ca = inet_csk_ca(sk); 45 - 46 - return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd); 47 - } 48 - 49 43 static struct tcp_congestion_ops tcp_scalable __read_mostly = { 50 44 .ssthresh = tcp_scalable_ssthresh, 51 - .undo_cwnd = tcp_scalable_cwnd_undo, 45 + .undo_cwnd = tcp_reno_undo_cwnd, 52 46 .cong_avoid = tcp_scalable_cong_avoid, 53 47 54 48 .owner = THIS_MODULE,
+1 -10
net/ipv4/tcp_veno.c
··· 30 30 u32 basertt; /* the min of all Veno rtt measurements seen (in usec) */ 31 31 u32 inc; /* decide whether to increase cwnd */ 32 32 u32 diff; /* calculate the diff rate */ 33 - u32 loss_cwnd; /* cwnd when loss occured */ 34 33 }; 35 34 36 35 /* There are several situations when we must "re-start" Veno: ··· 193 194 const struct tcp_sock *tp = tcp_sk(sk); 194 195 struct veno *veno = inet_csk_ca(sk); 195 196 196 - veno->loss_cwnd = tp->snd_cwnd; 197 197 if (veno->diff < beta) 198 198 /* in "non-congestive state", cut cwnd by 1/5 */ 199 199 return max(tp->snd_cwnd * 4 / 5, 2U); ··· 201 203 return max(tp->snd_cwnd >> 1U, 2U); 202 204 } 203 205 204 - static u32 tcp_veno_cwnd_undo(struct sock *sk) 205 - { 206 - const struct veno *veno = inet_csk_ca(sk); 207 - 208 - return max(tcp_sk(sk)->snd_cwnd, veno->loss_cwnd); 209 - } 210 - 211 206 static struct tcp_congestion_ops tcp_veno __read_mostly = { 212 207 .init = tcp_veno_init, 213 208 .ssthresh = tcp_veno_ssthresh, 214 - .undo_cwnd = tcp_veno_cwnd_undo, 209 + .undo_cwnd = tcp_reno_undo_cwnd, 215 210 .cong_avoid = tcp_veno_cong_avoid, 216 211 .pkts_acked = tcp_veno_pkts_acked, 217 212 .set_state = tcp_veno_state,
+1 -10
net/ipv4/tcp_yeah.c
··· 37 37 u32 fast_count; 38 38 39 39 u32 pkts_acked; 40 - u32 loss_cwnd; 41 40 }; 42 41 43 42 static void tcp_yeah_init(struct sock *sk) ··· 219 220 220 221 yeah->fast_count = 0; 221 222 yeah->reno_count = max(yeah->reno_count>>1, 2U); 222 - yeah->loss_cwnd = tp->snd_cwnd; 223 223 224 224 return max_t(int, tp->snd_cwnd - reduction, 2); 225 - } 226 - 227 - static u32 tcp_yeah_cwnd_undo(struct sock *sk) 228 - { 229 - const struct yeah *yeah = inet_csk_ca(sk); 230 - 231 - return max(tcp_sk(sk)->snd_cwnd, yeah->loss_cwnd); 232 225 } 233 226 234 227 static struct tcp_congestion_ops tcp_yeah __read_mostly = { 235 228 .init = tcp_yeah_init, 236 229 .ssthresh = tcp_yeah_ssthresh, 237 - .undo_cwnd = tcp_yeah_cwnd_undo, 230 + .undo_cwnd = tcp_reno_undo_cwnd, 238 231 .cong_avoid = tcp_yeah_cong_avoid, 239 232 .set_state = tcp_vegas_state, 240 233 .cwnd_event = tcp_vegas_cwnd_event,