Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

tcp: add accessors to read/set tp->snd_cwnd

We had various bugs over the years with code
breaking the assumption that tp->snd_cwnd is greater
than zero.

Lately, syzbot reported the WARN_ON_ONCE(!tp->prior_cwnd) added
in commit 8b8a321ff72c ("tcp: fix zero cwnd in tcp_cwnd_reduction")
can trigger, and without a repro we would have to spend
considerable time finding the bug.

Instead of complaining too late, we want to catch where
and when tp->snd_cwnd is set to an illegal value.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Suggested-by: Yuchung Cheng <ycheng@google.com>
Cc: Neal Cardwell <ncardwell@google.com>
Acked-by: Yuchung Cheng <ycheng@google.com>
Link: https://lore.kernel.org/r/20220405233538.947344-1-eric.dumazet@gmail.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

authored by

Eric Dumazet and committed by
Jakub Kicinski
40570375 487dc3ca

+208 -192
+15 -4
include/net/tcp.h
··· 1207 1207 1208 1208 #define TCP_INFINITE_SSTHRESH 0x7fffffff 1209 1209 1210 + static inline u32 tcp_snd_cwnd(const struct tcp_sock *tp) 1211 + { 1212 + return tp->snd_cwnd; 1213 + } 1214 + 1215 + static inline void tcp_snd_cwnd_set(struct tcp_sock *tp, u32 val) 1216 + { 1217 + WARN_ON_ONCE((int)val <= 0); 1218 + tp->snd_cwnd = val; 1219 + } 1220 + 1210 1221 static inline bool tcp_in_slow_start(const struct tcp_sock *tp) 1211 1222 { 1212 - return tp->snd_cwnd < tp->snd_ssthresh; 1223 + return tcp_snd_cwnd(tp) < tp->snd_ssthresh; 1213 1224 } 1214 1225 1215 1226 static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp) ··· 1246 1235 return tp->snd_ssthresh; 1247 1236 else 1248 1237 return max(tp->snd_ssthresh, 1249 - ((tp->snd_cwnd >> 1) + 1250 - (tp->snd_cwnd >> 2))); 1238 + ((tcp_snd_cwnd(tp) >> 1) + 1239 + (tcp_snd_cwnd(tp) >> 2))); 1251 1240 } 1252 1241 1253 1242 /* Use define here intentionally to get WARN_ON location shown at the caller */ ··· 1289 1278 1290 1279 /* If in slow start, ensure cwnd grows to twice what was ACKed. */ 1291 1280 if (tcp_in_slow_start(tp)) 1292 - return tp->snd_cwnd < 2 * tp->max_packets_out; 1281 + return tcp_snd_cwnd(tp) < 2 * tp->max_packets_out; 1293 1282 1294 1283 return tp->is_cwnd_limited; 1295 1284 }
+1 -1
include/trace/events/tcp.h
··· 279 279 __entry->data_len = skb->len - __tcp_hdrlen(th); 280 280 __entry->snd_nxt = tp->snd_nxt; 281 281 __entry->snd_una = tp->snd_una; 282 - __entry->snd_cwnd = tp->snd_cwnd; 282 + __entry->snd_cwnd = tcp_snd_cwnd(tp); 283 283 __entry->snd_wnd = tp->snd_wnd; 284 284 __entry->rcv_wnd = tp->rcv_wnd; 285 285 __entry->ssthresh = tcp_current_ssthresh(sk);
+1 -1
net/core/filter.c
··· 5173 5173 if (val <= 0 || tp->data_segs_out > tp->syn_data) 5174 5174 ret = -EINVAL; 5175 5175 else 5176 - tp->snd_cwnd = val; 5176 + tcp_snd_cwnd_set(tp, val); 5177 5177 break; 5178 5178 case TCP_BPF_SNDCWND_CLAMP: 5179 5179 if (val <= 0) {
+4 -4
net/ipv4/tcp.c
··· 429 429 * algorithms that we must have the following bandaid to talk 430 430 * efficiently to them. -DaveM 431 431 */ 432 - tp->snd_cwnd = TCP_INIT_CWND; 432 + tcp_snd_cwnd_set(tp, TCP_INIT_CWND); 433 433 434 434 /* There's a bubble in the pipe until at least the first ACK. */ 435 435 tp->app_limited = ~0U; ··· 3033 3033 icsk->icsk_rto_min = TCP_RTO_MIN; 3034 3034 icsk->icsk_delack_max = TCP_DELACK_MAX; 3035 3035 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; 3036 - tp->snd_cwnd = TCP_INIT_CWND; 3036 + tcp_snd_cwnd_set(tp, TCP_INIT_CWND); 3037 3037 tp->snd_cwnd_cnt = 0; 3038 3038 tp->window_clamp = 0; 3039 3039 tp->delivered = 0; ··· 3744 3744 info->tcpi_max_pacing_rate = rate64; 3745 3745 3746 3746 info->tcpi_reordering = tp->reordering; 3747 - info->tcpi_snd_cwnd = tp->snd_cwnd; 3747 + info->tcpi_snd_cwnd = tcp_snd_cwnd(tp); 3748 3748 3749 3749 if (info->tcpi_state == TCP_LISTEN) { 3750 3750 /* listeners aliased fields : ··· 3915 3915 rate64 = tcp_compute_delivery_rate(tp); 3916 3916 nla_put_u64_64bit(stats, TCP_NLA_DELIVERY_RATE, rate64, TCP_NLA_PAD); 3917 3917 3918 - nla_put_u32(stats, TCP_NLA_SND_CWND, tp->snd_cwnd); 3918 + nla_put_u32(stats, TCP_NLA_SND_CWND, tcp_snd_cwnd(tp)); 3919 3919 nla_put_u32(stats, TCP_NLA_REORDERING, tp->reordering); 3920 3920 nla_put_u32(stats, TCP_NLA_MIN_RTT, tcp_min_rtt(tp)); 3921 3921
+10 -10
net/ipv4/tcp_bbr.c
··· 276 276 } else { /* no RTT sample yet */ 277 277 rtt_us = USEC_PER_MSEC; /* use nominal default RTT */ 278 278 } 279 - bw = (u64)tp->snd_cwnd * BW_UNIT; 279 + bw = (u64)tcp_snd_cwnd(tp) * BW_UNIT; 280 280 do_div(bw, rtt_us); 281 281 sk->sk_pacing_rate = bbr_bw_to_pacing_rate(sk, bw, bbr_high_gain); 282 282 } ··· 323 323 struct bbr *bbr = inet_csk_ca(sk); 324 324 325 325 if (bbr->prev_ca_state < TCP_CA_Recovery && bbr->mode != BBR_PROBE_RTT) 326 - bbr->prior_cwnd = tp->snd_cwnd; /* this cwnd is good enough */ 326 + bbr->prior_cwnd = tcp_snd_cwnd(tp); /* this cwnd is good enough */ 327 327 else /* loss recovery or BBR_PROBE_RTT have temporarily cut cwnd */ 328 - bbr->prior_cwnd = max(bbr->prior_cwnd, tp->snd_cwnd); 328 + bbr->prior_cwnd = max(bbr->prior_cwnd, tcp_snd_cwnd(tp)); 329 329 } 330 330 331 331 static void bbr_cwnd_event(struct sock *sk, enum tcp_ca_event event) ··· 482 482 struct tcp_sock *tp = tcp_sk(sk); 483 483 struct bbr *bbr = inet_csk_ca(sk); 484 484 u8 prev_state = bbr->prev_ca_state, state = inet_csk(sk)->icsk_ca_state; 485 - u32 cwnd = tp->snd_cwnd; 485 + u32 cwnd = tcp_snd_cwnd(tp); 486 486 487 487 /* An ACK for P pkts should release at most 2*P packets. We do this 488 488 * in two steps. First, here we deduct the number of lost packets. ··· 520 520 { 521 521 struct tcp_sock *tp = tcp_sk(sk); 522 522 struct bbr *bbr = inet_csk_ca(sk); 523 - u32 cwnd = tp->snd_cwnd, target_cwnd = 0; 523 + u32 cwnd = tcp_snd_cwnd(tp), target_cwnd = 0; 524 524 525 525 if (!acked) 526 526 goto done; /* no packet fully ACKed; just apply caps */ ··· 544 544 cwnd = max(cwnd, bbr_cwnd_min_target); 545 545 546 546 done: 547 - tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp); /* apply global cap */ 547 + tcp_snd_cwnd_set(tp, min(cwnd, tp->snd_cwnd_clamp)); /* apply global cap */ 548 548 if (bbr->mode == BBR_PROBE_RTT) /* drain queue, refresh min_rtt */ 549 - tp->snd_cwnd = min(tp->snd_cwnd, bbr_cwnd_min_target); 549 + tcp_snd_cwnd_set(tp, min(tcp_snd_cwnd(tp), bbr_cwnd_min_target)); 550 550 } 551 551 552 552 /* End cycle phase if it's time and/or we hit the phase's in-flight target. */ ··· 856 856 bbr->ack_epoch_acked = min_t(u32, 0xFFFFF, 857 857 bbr->ack_epoch_acked + rs->acked_sacked); 858 858 extra_acked = bbr->ack_epoch_acked - expected_acked; 859 - extra_acked = min(extra_acked, tp->snd_cwnd); 859 + extra_acked = min(extra_acked, tcp_snd_cwnd(tp)); 860 860 if (extra_acked > bbr->extra_acked[bbr->extra_acked_win_idx]) 861 861 bbr->extra_acked[bbr->extra_acked_win_idx] = extra_acked; 862 862 } ··· 914 914 return; 915 915 916 916 bbr->min_rtt_stamp = tcp_jiffies32; /* wait a while until PROBE_RTT */ 917 - tp->snd_cwnd = max(tp->snd_cwnd, bbr->prior_cwnd); 917 + tcp_snd_cwnd_set(tp, max(tcp_snd_cwnd(tp), bbr->prior_cwnd)); 918 918 bbr_reset_mode(sk); 919 919 } 920 920 ··· 1093 1093 bbr->full_bw = 0; /* spurious slow-down; reset full pipe detection */ 1094 1094 bbr->full_bw_cnt = 0; 1095 1095 bbr_reset_lt_bw_sampling(sk); 1096 - return tcp_sk(sk)->snd_cwnd; 1096 + return tcp_snd_cwnd(tcp_sk(sk)); 1097 1097 } 1098 1098 1099 1099 /* Entering loss recovery, so save cwnd for when we exit or undo recovery. */
+7 -7
net/ipv4/tcp_bic.c
··· 150 150 if (!acked) 151 151 return; 152 152 } 153 - bictcp_update(ca, tp->snd_cwnd); 153 + bictcp_update(ca, tcp_snd_cwnd(tp)); 154 154 tcp_cong_avoid_ai(tp, ca->cnt, acked); 155 155 } 156 156 ··· 166 166 ca->epoch_start = 0; /* end of epoch */ 167 167 168 168 /* Wmax and fast convergence */ 169 - if (tp->snd_cwnd < ca->last_max_cwnd && fast_convergence) 170 - ca->last_max_cwnd = (tp->snd_cwnd * (BICTCP_BETA_SCALE + beta)) 169 + if (tcp_snd_cwnd(tp) < ca->last_max_cwnd && fast_convergence) 170 + ca->last_max_cwnd = (tcp_snd_cwnd(tp) * (BICTCP_BETA_SCALE + beta)) 171 171 / (2 * BICTCP_BETA_SCALE); 172 172 else 173 - ca->last_max_cwnd = tp->snd_cwnd; 173 + ca->last_max_cwnd = tcp_snd_cwnd(tp); 174 174 175 - if (tp->snd_cwnd <= low_window) 176 - return max(tp->snd_cwnd >> 1U, 2U); 175 + if (tcp_snd_cwnd(tp) <= low_window) 176 + return max(tcp_snd_cwnd(tp) >> 1U, 2U); 177 177 else 178 - return max((tp->snd_cwnd * beta) / BICTCP_BETA_SCALE, 2U); 178 + return max((tcp_snd_cwnd(tp) * beta) / BICTCP_BETA_SCALE, 2U); 179 179 } 180 180 181 181 static void bictcp_state(struct sock *sk, u8 new_state)
+15 -15
net/ipv4/tcp_cdg.c
··· 161 161 LINUX_MIB_TCPHYSTARTTRAINDETECT); 162 162 NET_ADD_STATS(sock_net(sk), 163 163 LINUX_MIB_TCPHYSTARTTRAINCWND, 164 - tp->snd_cwnd); 165 - tp->snd_ssthresh = tp->snd_cwnd; 164 + tcp_snd_cwnd(tp)); 165 + tp->snd_ssthresh = tcp_snd_cwnd(tp); 166 166 return; 167 167 } 168 168 } ··· 180 180 LINUX_MIB_TCPHYSTARTDELAYDETECT); 181 181 NET_ADD_STATS(sock_net(sk), 182 182 LINUX_MIB_TCPHYSTARTDELAYCWND, 183 - tp->snd_cwnd); 184 - tp->snd_ssthresh = tp->snd_cwnd; 183 + tcp_snd_cwnd(tp)); 184 + tp->snd_ssthresh = tcp_snd_cwnd(tp); 185 185 } 186 186 } 187 187 } ··· 252 252 return false; 253 253 } 254 254 255 - ca->shadow_wnd = max(ca->shadow_wnd, tp->snd_cwnd); 255 + ca->shadow_wnd = max(ca->shadow_wnd, tcp_snd_cwnd(tp)); 256 256 ca->state = CDG_BACKOFF; 257 257 tcp_enter_cwr(sk); 258 258 return true; ··· 285 285 } 286 286 287 287 if (!tcp_is_cwnd_limited(sk)) { 288 - ca->shadow_wnd = min(ca->shadow_wnd, tp->snd_cwnd); 288 + ca->shadow_wnd = min(ca->shadow_wnd, tcp_snd_cwnd(tp)); 289 289 return; 290 290 } 291 291 292 - prior_snd_cwnd = tp->snd_cwnd; 292 + prior_snd_cwnd = tcp_snd_cwnd(tp); 293 293 tcp_reno_cong_avoid(sk, ack, acked); 294 294 295 - incr = tp->snd_cwnd - prior_snd_cwnd; 295 + incr = tcp_snd_cwnd(tp) - prior_snd_cwnd; 296 296 ca->shadow_wnd = max(ca->shadow_wnd, ca->shadow_wnd + incr); 297 297 } 298 298 ··· 331 331 struct tcp_sock *tp = tcp_sk(sk); 332 332 333 333 if (ca->state == CDG_BACKOFF) 334 - return max(2U, (tp->snd_cwnd * min(1024U, backoff_beta)) >> 10); 334 + return max(2U, (tcp_snd_cwnd(tp) * min(1024U, backoff_beta)) >> 10); 335 335 336 336 if (ca->state == CDG_NONFULL && use_tolerance) 337 - return tp->snd_cwnd; 337 + return tcp_snd_cwnd(tp); 338 338 339 - ca->shadow_wnd = min(ca->shadow_wnd >> 1, tp->snd_cwnd); 339 + ca->shadow_wnd = min(ca->shadow_wnd >> 1, tcp_snd_cwnd(tp)); 340 340 if (use_shadow) 341 - return max3(2U, ca->shadow_wnd, tp->snd_cwnd >> 1); 342 - return max(2U, tp->snd_cwnd >> 1); 341 + return max3(2U, ca->shadow_wnd, tcp_snd_cwnd(tp) >> 1); 342 + return max(2U, tcp_snd_cwnd(tp) >> 1); 343 343 } 344 344 345 345 static void tcp_cdg_cwnd_event(struct sock *sk, const enum tcp_ca_event ev) ··· 357 357 358 358 ca->gradients = gradients; 359 359 ca->rtt_seq = tp->snd_nxt; 360 - ca->shadow_wnd = tp->snd_cwnd; 360 + ca->shadow_wnd = tcp_snd_cwnd(tp); 361 361 break; 362 362 case CA_EVENT_COMPLETE_CWR: 363 363 ca->state = CDG_UNKNOWN; ··· 380 380 ca->gradients = kcalloc(window, sizeof(ca->gradients[0]), 381 381 GFP_NOWAIT | __GFP_NOWARN); 382 382 ca->rtt_seq = tp->snd_nxt; 383 - ca->shadow_wnd = tp->snd_cwnd; 383 + ca->shadow_wnd = tcp_snd_cwnd(tp); 384 384 } 385 385 386 386 static void tcp_cdg_release(struct sock *sk)
+9 -9
net/ipv4/tcp_cong.c
··· 393 393 */ 394 394 u32 tcp_slow_start(struct tcp_sock *tp, u32 acked) 395 395 { 396 - u32 cwnd = min(tp->snd_cwnd + acked, tp->snd_ssthresh); 396 + u32 cwnd = min(tcp_snd_cwnd(tp) + acked, tp->snd_ssthresh); 397 397 398 - acked -= cwnd - tp->snd_cwnd; 399 - tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp); 398 + acked -= cwnd - tcp_snd_cwnd(tp); 399 + tcp_snd_cwnd_set(tp, min(cwnd, tp->snd_cwnd_clamp)); 400 400 401 401 return acked; 402 402 } ··· 410 410 /* If credits accumulated at a higher w, apply them gently now. */ 411 411 if (tp->snd_cwnd_cnt >= w) { 412 412 tp->snd_cwnd_cnt = 0; 413 - tp->snd_cwnd++; 413 + tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + 1); 414 414 } 415 415 416 416 tp->snd_cwnd_cnt += acked; ··· 418 418 u32 delta = tp->snd_cwnd_cnt / w; 419 419 420 420 tp->snd_cwnd_cnt -= delta * w; 421 - tp->snd_cwnd += delta; 421 + tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + delta); 422 422 } 423 - tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_cwnd_clamp); 423 + tcp_snd_cwnd_set(tp, min(tcp_snd_cwnd(tp), tp->snd_cwnd_clamp)); 424 424 } 425 425 EXPORT_SYMBOL_GPL(tcp_cong_avoid_ai); 426 426 ··· 445 445 return; 446 446 } 447 447 /* In dangerous area, increase slowly. */ 448 - tcp_cong_avoid_ai(tp, tp->snd_cwnd, acked); 448 + tcp_cong_avoid_ai(tp, tcp_snd_cwnd(tp), acked); 449 449 } 450 450 EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid); 451 451 ··· 454 454 { 455 455 const struct tcp_sock *tp = tcp_sk(sk); 456 456 457 - return max(tp->snd_cwnd >> 1U, 2U); 457 + return max(tcp_snd_cwnd(tp) >> 1U, 2U); 458 458 } 459 459 EXPORT_SYMBOL_GPL(tcp_reno_ssthresh); 460 460 ··· 462 462 { 463 463 const struct tcp_sock *tp = tcp_sk(sk); 464 464 465 - return max(tp->snd_cwnd, tp->prior_cwnd); 465 + return max(tcp_snd_cwnd(tp), tp->prior_cwnd); 466 466 } 467 467 EXPORT_SYMBOL_GPL(tcp_reno_undo_cwnd); 468 468
+11 -11
net/ipv4/tcp_cubic.c
··· 334 334 if (!acked) 335 335 return; 336 336 } 337 - bictcp_update(ca, tp->snd_cwnd, acked); 337 + bictcp_update(ca, tcp_snd_cwnd(tp), acked); 338 338 tcp_cong_avoid_ai(tp, ca->cnt, acked); 339 339 } 340 340 ··· 346 346 ca->epoch_start = 0; /* end of epoch */ 347 347 348 348 /* Wmax and fast convergence */ 349 - if (tp->snd_cwnd < ca->last_max_cwnd && fast_convergence) 350 - ca->last_max_cwnd = (tp->snd_cwnd * (BICTCP_BETA_SCALE + beta)) 349 + if (tcp_snd_cwnd(tp) < ca->last_max_cwnd && fast_convergence) 350 + ca->last_max_cwnd = (tcp_snd_cwnd(tp) * (BICTCP_BETA_SCALE + beta)) 351 351 / (2 * BICTCP_BETA_SCALE); 352 352 else 353 - ca->last_max_cwnd = tp->snd_cwnd; 353 + ca->last_max_cwnd = tcp_snd_cwnd(tp); 354 354 355 - return max((tp->snd_cwnd * beta) / BICTCP_BETA_SCALE, 2U); 355 + return max((tcp_snd_cwnd(tp) * beta) / BICTCP_BETA_SCALE, 2U); 356 356 } 357 357 358 358 static void cubictcp_state(struct sock *sk, u8 new_state) ··· 413 413 ca->found = 1; 414 414 pr_debug("hystart_ack_train (%u > %u) delay_min %u (+ ack_delay %u) cwnd %u\n", 415 415 now - ca->round_start, threshold, 416 - ca->delay_min, hystart_ack_delay(sk), tp->snd_cwnd); 416 + ca->delay_min, hystart_ack_delay(sk), tcp_snd_cwnd(tp)); 417 417 NET_INC_STATS(sock_net(sk), 418 418 LINUX_MIB_TCPHYSTARTTRAINDETECT); 419 419 NET_ADD_STATS(sock_net(sk), 420 420 LINUX_MIB_TCPHYSTARTTRAINCWND, 421 - tp->snd_cwnd); 422 - tp->snd_ssthresh = tp->snd_cwnd; 421 + tcp_snd_cwnd(tp)); 422 + tp->snd_ssthresh = tcp_snd_cwnd(tp); 423 423 } 424 424 } 425 425 } ··· 438 438 LINUX_MIB_TCPHYSTARTDELAYDETECT); 439 439 NET_ADD_STATS(sock_net(sk), 440 440 LINUX_MIB_TCPHYSTARTDELAYCWND, 441 - tp->snd_cwnd); 442 - tp->snd_ssthresh = tp->snd_cwnd; 441 + tcp_snd_cwnd(tp)); 442 + tp->snd_ssthresh = tcp_snd_cwnd(tp); 443 443 } 444 444 } 445 445 } ··· 469 469 470 470 /* hystart triggers when cwnd is larger than some threshold */ 471 471 if (!ca->found && tcp_in_slow_start(tp) && hystart && 472 - tp->snd_cwnd >= hystart_low_window) 472 + tcp_snd_cwnd(tp) >= hystart_low_window) 473 473 hystart_update(sk, delay); 474 474 } 475 475
+6 -5
net/ipv4/tcp_dctcp.c
··· 106 106 struct dctcp *ca = inet_csk_ca(sk); 107 107 struct tcp_sock *tp = tcp_sk(sk); 108 108 109 - ca->loss_cwnd = tp->snd_cwnd; 110 - return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->dctcp_alpha) >> 11U), 2U); 109 + ca->loss_cwnd = tcp_snd_cwnd(tp); 110 + return max(tcp_snd_cwnd(tp) - ((tcp_snd_cwnd(tp) * ca->dctcp_alpha) >> 11U), 2U); 111 111 } 112 112 113 113 static void dctcp_update_alpha(struct sock *sk, u32 flags) ··· 148 148 struct dctcp *ca = inet_csk_ca(sk); 149 149 struct tcp_sock *tp = tcp_sk(sk); 150 150 151 - ca->loss_cwnd = tp->snd_cwnd; 152 - tp->snd_ssthresh = max(tp->snd_cwnd >> 1U, 2U); 151 + ca->loss_cwnd = tcp_snd_cwnd(tp); 152 + tp->snd_ssthresh = max(tcp_snd_cwnd(tp) >> 1U, 2U); 153 153 } 154 154 155 155 static void dctcp_state(struct sock *sk, u8 new_state) ··· 211 211 static u32 dctcp_cwnd_undo(struct sock *sk) 212 212 { 213 213 const struct dctcp *ca = inet_csk_ca(sk); 214 + struct tcp_sock *tp = tcp_sk(sk); 214 215 215 - return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd); 216 + return max(tcp_snd_cwnd(tp), ca->loss_cwnd); 216 217 } 217 218 218 219 static struct tcp_congestion_ops dctcp __read_mostly = {
+9 -9
net/ipv4/tcp_highspeed.c
··· 127 127 * snd_cwnd <= 128 128 * hstcp_aimd_vals[ca->ai].cwnd 129 129 */ 130 - if (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd) { 131 - while (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd && 130 + if (tcp_snd_cwnd(tp) > hstcp_aimd_vals[ca->ai].cwnd) { 131 + while (tcp_snd_cwnd(tp) > hstcp_aimd_vals[ca->ai].cwnd && 132 132 ca->ai < HSTCP_AIMD_MAX - 1) 133 133 ca->ai++; 134 - } else if (ca->ai && tp->snd_cwnd <= hstcp_aimd_vals[ca->ai-1].cwnd) { 135 - while (ca->ai && tp->snd_cwnd <= hstcp_aimd_vals[ca->ai-1].cwnd) 134 + } else if (ca->ai && tcp_snd_cwnd(tp) <= hstcp_aimd_vals[ca->ai-1].cwnd) { 135 + while (ca->ai && tcp_snd_cwnd(tp) <= hstcp_aimd_vals[ca->ai-1].cwnd) 136 136 ca->ai--; 137 137 } 138 138 139 139 /* Do additive increase */ 140 - if (tp->snd_cwnd < tp->snd_cwnd_clamp) { 140 + if (tcp_snd_cwnd(tp) < tp->snd_cwnd_clamp) { 141 141 /* cwnd = cwnd + a(w) / cwnd */ 142 142 tp->snd_cwnd_cnt += ca->ai + 1; 143 - if (tp->snd_cwnd_cnt >= tp->snd_cwnd) { 144 - tp->snd_cwnd_cnt -= tp->snd_cwnd; 145 - tp->snd_cwnd++; 143 + if (tp->snd_cwnd_cnt >= tcp_snd_cwnd(tp)) { 144 + tp->snd_cwnd_cnt -= tcp_snd_cwnd(tp); 145 + tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + 1); 146 146 } 147 147 } 148 148 } ··· 154 154 struct hstcp *ca = inet_csk_ca(sk); 155 155 156 156 /* Do multiplicative decrease */ 157 - return max(tp->snd_cwnd - ((tp->snd_cwnd * hstcp_aimd_vals[ca->ai].md) >> 8), 2U); 157 + return max(tcp_snd_cwnd(tp) - ((tcp_snd_cwnd(tp) * hstcp_aimd_vals[ca->ai].md) >> 8), 2U); 158 158 } 159 159 160 160 static struct tcp_congestion_ops tcp_highspeed __read_mostly = {
+5 -5
net/ipv4/tcp_htcp.c
··· 124 124 125 125 ca->packetcount += sample->pkts_acked; 126 126 127 - if (ca->packetcount >= tp->snd_cwnd - (ca->alpha >> 7 ? : 1) && 127 + if (ca->packetcount >= tcp_snd_cwnd(tp) - (ca->alpha >> 7 ? : 1) && 128 128 now - ca->lasttime >= ca->minRTT && 129 129 ca->minRTT > 0) { 130 130 __u32 cur_Bi = ca->packetcount * HZ / (now - ca->lasttime); ··· 225 225 const struct htcp *ca = inet_csk_ca(sk); 226 226 227 227 htcp_param_update(sk); 228 - return max((tp->snd_cwnd * ca->beta) >> 7, 2U); 228 + return max((tcp_snd_cwnd(tp) * ca->beta) >> 7, 2U); 229 229 } 230 230 231 231 static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 acked) ··· 242 242 /* In dangerous area, increase slowly. 243 243 * In theory this is tp->snd_cwnd += alpha / tp->snd_cwnd 244 244 */ 245 - if ((tp->snd_cwnd_cnt * ca->alpha)>>7 >= tp->snd_cwnd) { 246 - if (tp->snd_cwnd < tp->snd_cwnd_clamp) 247 - tp->snd_cwnd++; 245 + if ((tp->snd_cwnd_cnt * ca->alpha)>>7 >= tcp_snd_cwnd(tp)) { 246 + if (tcp_snd_cwnd(tp) < tp->snd_cwnd_clamp) 247 + tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + 1); 248 248 tp->snd_cwnd_cnt = 0; 249 249 htcp_alpha_update(ca); 250 250 } else
+9 -9
net/ipv4/tcp_hybla.c
··· 54 54 ca->rho2_7ls = 0; 55 55 ca->snd_cwnd_cents = 0; 56 56 ca->hybla_en = true; 57 - tp->snd_cwnd = 2; 57 + tcp_snd_cwnd_set(tp, 2); 58 58 tp->snd_cwnd_clamp = 65535; 59 59 60 60 /* 1st Rho measurement based on initial srtt */ ··· 62 62 63 63 /* set minimum rtt as this is the 1st ever seen */ 64 64 ca->minrtt_us = tp->srtt_us; 65 - tp->snd_cwnd = ca->rho; 65 + tcp_snd_cwnd_set(tp, ca->rho); 66 66 } 67 67 68 68 static void hybla_state(struct sock *sk, u8 ca_state) ··· 137 137 * as long as increment is estimated as (rho<<7)/window 138 138 * it already is <<7 and we can easily count its fractions. 139 139 */ 140 - increment = ca->rho2_7ls / tp->snd_cwnd; 140 + increment = ca->rho2_7ls / tcp_snd_cwnd(tp); 141 141 if (increment < 128) 142 142 tp->snd_cwnd_cnt++; 143 143 } 144 144 145 145 odd = increment % 128; 146 - tp->snd_cwnd += increment >> 7; 146 + tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + (increment >> 7)); 147 147 ca->snd_cwnd_cents += odd; 148 148 149 149 /* check when fractions goes >=128 and increase cwnd by 1. */ 150 150 while (ca->snd_cwnd_cents >= 128) { 151 - tp->snd_cwnd++; 151 + tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + 1); 152 152 ca->snd_cwnd_cents -= 128; 153 153 tp->snd_cwnd_cnt = 0; 154 154 } 155 155 /* check when cwnd has not been incremented for a while */ 156 - if (increment == 0 && odd == 0 && tp->snd_cwnd_cnt >= tp->snd_cwnd) { 157 - tp->snd_cwnd++; 156 + if (increment == 0 && odd == 0 && tp->snd_cwnd_cnt >= tcp_snd_cwnd(tp)) { 157 + tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + 1); 158 158 tp->snd_cwnd_cnt = 0; 159 159 } 160 160 /* clamp down slowstart cwnd to ssthresh value. */ 161 161 if (is_slowstart) 162 - tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh); 162 + tcp_snd_cwnd_set(tp, min(tcp_snd_cwnd(tp), tp->snd_ssthresh)); 163 163 164 - tp->snd_cwnd = min_t(u32, tp->snd_cwnd, tp->snd_cwnd_clamp); 164 + tcp_snd_cwnd_set(tp, min(tcp_snd_cwnd(tp), tp->snd_cwnd_clamp)); 165 165 } 166 166 167 167 static struct tcp_congestion_ops tcp_hybla __read_mostly = {
+7 -5
net/ipv4/tcp_illinois.c
··· 224 224 struct tcp_sock *tp = tcp_sk(sk); 225 225 struct illinois *ca = inet_csk_ca(sk); 226 226 227 - if (tp->snd_cwnd < win_thresh) { 227 + if (tcp_snd_cwnd(tp) < win_thresh) { 228 228 ca->alpha = ALPHA_BASE; 229 229 ca->beta = BETA_BASE; 230 230 } else if (ca->cnt_rtt > 0) { ··· 284 284 * tp->snd_cwnd += alpha/tp->snd_cwnd 285 285 */ 286 286 delta = (tp->snd_cwnd_cnt * ca->alpha) >> ALPHA_SHIFT; 287 - if (delta >= tp->snd_cwnd) { 288 - tp->snd_cwnd = min(tp->snd_cwnd + delta / tp->snd_cwnd, 289 - (u32)tp->snd_cwnd_clamp); 287 + if (delta >= tcp_snd_cwnd(tp)) { 288 + tcp_snd_cwnd_set(tp, min(tcp_snd_cwnd(tp) + delta / tcp_snd_cwnd(tp), 289 + (u32)tp->snd_cwnd_clamp)); 290 290 tp->snd_cwnd_cnt = 0; 291 291 } 292 292 } ··· 296 296 { 297 297 struct tcp_sock *tp = tcp_sk(sk); 298 298 struct illinois *ca = inet_csk_ca(sk); 299 + u32 decr; 299 300 300 301 /* Multiplicative decrease */ 301 - return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->beta) >> BETA_SHIFT), 2U); 302 + decr = (tcp_snd_cwnd(tp) * ca->beta) >> BETA_SHIFT; 303 + return max(tcp_snd_cwnd(tp) - decr, 2U); 302 304 } 303 305 304 306 /* Extract info for Tcp socket info provided via netlink. */
+18 -18
net/ipv4/tcp_input.c
··· 414 414 per_mss = roundup_pow_of_two(per_mss) + 415 415 SKB_DATA_ALIGN(sizeof(struct sk_buff)); 416 416 417 - nr_segs = max_t(u32, TCP_INIT_CWND, tp->snd_cwnd); 417 + nr_segs = max_t(u32, TCP_INIT_CWND, tcp_snd_cwnd(tp)); 418 418 nr_segs = max_t(u32, nr_segs, tp->reordering + 1); 419 419 420 420 /* Fast Recovery (RFC 5681 3.2) : ··· 909 909 * If snd_cwnd >= (tp->snd_ssthresh / 2), we are approaching 910 910 * end of slow start and should slow down. 911 911 */ 912 - if (tp->snd_cwnd < tp->snd_ssthresh / 2) 912 + if (tcp_snd_cwnd(tp) < tp->snd_ssthresh / 2) 913 913 rate *= sock_net(sk)->ipv4.sysctl_tcp_pacing_ss_ratio; 914 914 else 915 915 rate *= sock_net(sk)->ipv4.sysctl_tcp_pacing_ca_ratio; 916 916 917 - rate *= max(tp->snd_cwnd, tp->packets_out); 917 + rate *= max(tcp_snd_cwnd(tp), tp->packets_out); 918 918 919 919 if (likely(tp->srtt_us)) 920 920 do_div(rate, tp->srtt_us); ··· 2147 2147 !after(tp->high_seq, tp->snd_una) || 2148 2148 (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) { 2149 2149 tp->prior_ssthresh = tcp_current_ssthresh(sk); 2150 - tp->prior_cwnd = tp->snd_cwnd; 2150 + tp->prior_cwnd = tcp_snd_cwnd(tp); 2151 2151 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); 2152 2152 tcp_ca_event(sk, CA_EVENT_LOSS); 2153 2153 tcp_init_undo(tp); 2154 2154 } 2155 - tp->snd_cwnd = tcp_packets_in_flight(tp) + 1; 2155 + tcp_snd_cwnd_set(tp, tcp_packets_in_flight(tp) + 1); 2156 2156 tp->snd_cwnd_cnt = 0; 2157 2157 tp->snd_cwnd_stamp = tcp_jiffies32; 2158 2158 ··· 2458 2458 pr_debug("Undo %s %pI4/%u c%u l%u ss%u/%u p%u\n", 2459 2459 msg, 2460 2460 &inet->inet_daddr, ntohs(inet->inet_dport), 2461 - tp->snd_cwnd, tcp_left_out(tp), 2461 + tcp_snd_cwnd(tp), tcp_left_out(tp), 2462 2462 tp->snd_ssthresh, tp->prior_ssthresh, 2463 2463 tp->packets_out); 2464 2464 } ··· 2467 2467 pr_debug("Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n", 2468 2468 msg, 2469 2469 &sk->sk_v6_daddr, ntohs(inet->inet_dport), 2470 - tp->snd_cwnd, tcp_left_out(tp), 2470 + tcp_snd_cwnd(tp), tcp_left_out(tp), 2471 2471 tp->snd_ssthresh, tp->prior_ssthresh, 2472 2472 tp->packets_out); 2473 2473 } ··· 2492 2492 if (tp->prior_ssthresh) { 2493 2493 const struct inet_connection_sock *icsk = inet_csk(sk); 2494 2494 2495 - tp->snd_cwnd = icsk->icsk_ca_ops->undo_cwnd(sk); 2495 + tcp_snd_cwnd_set(tp, icsk->icsk_ca_ops->undo_cwnd(sk)); 2496 2496 2497 2497 if (tp->prior_ssthresh > tp->snd_ssthresh) { 2498 2498 tp->snd_ssthresh = tp->prior_ssthresh; ··· 2599 2599 tp->high_seq = tp->snd_nxt; 2600 2600 tp->tlp_high_seq = 0; 2601 2601 tp->snd_cwnd_cnt = 0; 2602 - tp->prior_cwnd = tp->snd_cwnd; 2602 + tp->prior_cwnd = tcp_snd_cwnd(tp); 2603 2603 tp->prr_delivered = 0; 2604 2604 tp->prr_out = 0; 2605 2605 tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk); ··· 2629 2629 } 2630 2630 /* Force a fast retransmit upon entering fast recovery */ 2631 2631 sndcnt = max(sndcnt, (tp->prr_out ? 0 : 1)); 2632 - tp->snd_cwnd = tcp_packets_in_flight(tp) + sndcnt; 2632 + tcp_snd_cwnd_set(tp, tcp_packets_in_flight(tp) + sndcnt); 2633 2633 } 2634 2634 2635 2635 static inline void tcp_end_cwnd_reduction(struct sock *sk) ··· 2642 2642 /* Reset cwnd to ssthresh in CWR or Recovery (unless it's undone) */ 2643 2643 if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH && 2644 2644 (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || tp->undo_marker)) { 2645 - tp->snd_cwnd = tp->snd_ssthresh; 2645 + tcp_snd_cwnd_set(tp, tp->snd_ssthresh); 2646 2646 tp->snd_cwnd_stamp = tcp_jiffies32; 2647 2647 } 2648 2648 tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR); ··· 2709 2709 2710 2710 /* FIXME: breaks with very large cwnd */ 2711 2711 tp->prior_ssthresh = tcp_current_ssthresh(sk); 2712 - tp->snd_cwnd = tp->snd_cwnd * 2713 - tcp_mss_to_mtu(sk, tp->mss_cache) / 2714 - icsk->icsk_mtup.probe_size; 2712 + tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) * 2713 + tcp_mss_to_mtu(sk, tp->mss_cache) / 2714 + icsk->icsk_mtup.probe_size); 2715 2715 tp->snd_cwnd_cnt = 0; 2716 2716 tp->snd_cwnd_stamp = tcp_jiffies32; 2717 2717 tp->snd_ssthresh = tcp_current_ssthresh(sk); ··· 3034 3034 tp->snd_una == tp->mtu_probe.probe_seq_start) { 3035 3035 tcp_mtup_probe_failed(sk); 3036 3036 /* Restores the reduction we did in tcp_mtup_probe() */ 3037 - tp->snd_cwnd++; 3037 + tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + 1); 3038 3038 tcp_simple_retransmit(sk); 3039 3039 return; 3040 3040 } ··· 5436 5436 return false; 5437 5437 5438 5438 /* If we filled the congestion window, do not expand. */ 5439 - if (tcp_packets_in_flight(tp) >= tp->snd_cwnd) 5439 + if (tcp_packets_in_flight(tp) >= tcp_snd_cwnd(tp)) 5440 5440 return false; 5441 5441 5442 5442 return true; ··· 5998 5998 * retransmission has occurred. 5999 5999 */ 6000 6000 if (tp->total_retrans > 1 && tp->undo_marker) 6001 - tp->snd_cwnd = 1; 6001 + tcp_snd_cwnd_set(tp, 1); 6002 6002 else 6003 - tp->snd_cwnd = tcp_init_cwnd(tp, __sk_dst_get(sk)); 6003 + tcp_snd_cwnd_set(tp, tcp_init_cwnd(tp, __sk_dst_get(sk))); 6004 6004 tp->snd_cwnd_stamp = tcp_jiffies32; 6005 6005 6006 6006 bpf_skops_established(sk, bpf_op, skb);
+1 -1
net/ipv4/tcp_ipv4.c
··· 2621 2621 jiffies_to_clock_t(icsk->icsk_rto), 2622 2622 jiffies_to_clock_t(icsk->icsk_ack.ato), 2623 2623 (icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(sk), 2624 - tp->snd_cwnd, 2624 + tcp_snd_cwnd(tp), 2625 2625 state == TCP_LISTEN ? 2626 2626 fastopenq->max_qlen : 2627 2627 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
+3 -3
net/ipv4/tcp_lp.c
··· 297 297 lp->flag &= ~LP_WITHIN_THR; 298 298 299 299 pr_debug("TCP-LP: %05o|%5u|%5u|%15u|%15u|%15u\n", lp->flag, 300 - tp->snd_cwnd, lp->remote_hz, lp->owd_min, lp->owd_max, 300 + tcp_snd_cwnd(tp), lp->remote_hz, lp->owd_min, lp->owd_max, 301 301 lp->sowd >> 3); 302 302 303 303 if (lp->flag & LP_WITHIN_THR) ··· 313 313 /* happened within inference 314 314 * drop snd_cwnd into 1 */ 315 315 if (lp->flag & LP_WITHIN_INF) 316 - tp->snd_cwnd = 1U; 316 + tcp_snd_cwnd_set(tp, 1U); 317 317 318 318 /* happened after inference 319 319 * cut snd_cwnd into half */ 320 320 else 321 - tp->snd_cwnd = max(tp->snd_cwnd >> 1U, 1U); 321 + tcp_snd_cwnd_set(tp, max(tcp_snd_cwnd(tp) >> 1U, 1U)); 322 322 323 323 /* record this drop time */ 324 324 lp->last_drop = now;
+6 -6
net/ipv4/tcp_metrics.c
··· 388 388 if (!net->ipv4.sysctl_tcp_no_ssthresh_metrics_save && 389 389 !tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) { 390 390 val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH); 391 - if (val && (tp->snd_cwnd >> 1) > val) 391 + if (val && (tcp_snd_cwnd(tp) >> 1) > val) 392 392 tcp_metric_set(tm, TCP_METRIC_SSTHRESH, 393 - tp->snd_cwnd >> 1); 393 + tcp_snd_cwnd(tp) >> 1); 394 394 } 395 395 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) { 396 396 val = tcp_metric_get(tm, TCP_METRIC_CWND); 397 - if (tp->snd_cwnd > val) 397 + if (tcp_snd_cwnd(tp) > val) 398 398 tcp_metric_set(tm, TCP_METRIC_CWND, 399 - tp->snd_cwnd); 399 + tcp_snd_cwnd(tp)); 400 400 } 401 401 } else if (!tcp_in_slow_start(tp) && 402 402 icsk->icsk_ca_state == TCP_CA_Open) { ··· 404 404 if (!net->ipv4.sysctl_tcp_no_ssthresh_metrics_save && 405 405 !tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) 406 406 tcp_metric_set(tm, TCP_METRIC_SSTHRESH, 407 - max(tp->snd_cwnd >> 1, tp->snd_ssthresh)); 407 + max(tcp_snd_cwnd(tp) >> 1, tp->snd_ssthresh)); 408 408 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) { 409 409 val = tcp_metric_get(tm, TCP_METRIC_CWND); 410 - tcp_metric_set(tm, TCP_METRIC_CWND, (val + tp->snd_cwnd) >> 1); 410 + tcp_metric_set(tm, TCP_METRIC_CWND, (val + tcp_snd_cwnd(tp)) >> 1); 411 411 } 412 412 } else { 413 413 /* Else slow start did not finish, cwnd is non-sense,
+12 -12
net/ipv4/tcp_nv.c
··· 197 197 } 198 198 199 199 if (ca->cwnd_growth_factor < 0) { 200 - cnt = tp->snd_cwnd << -ca->cwnd_growth_factor; 200 + cnt = tcp_snd_cwnd(tp) << -ca->cwnd_growth_factor; 201 201 tcp_cong_avoid_ai(tp, cnt, acked); 202 202 } else { 203 - cnt = max(4U, tp->snd_cwnd >> ca->cwnd_growth_factor); 203 + cnt = max(4U, tcp_snd_cwnd(tp) >> ca->cwnd_growth_factor); 204 204 tcp_cong_avoid_ai(tp, cnt, acked); 205 205 } 206 206 } ··· 209 209 { 210 210 const struct tcp_sock *tp = tcp_sk(sk); 211 211 212 - return max((tp->snd_cwnd * nv_loss_dec_factor) >> 10, 2U); 212 + return max((tcp_snd_cwnd(tp) * nv_loss_dec_factor) >> 10, 2U); 213 213 } 214 214 215 215 static void tcpnv_state(struct sock *sk, u8 new_state) ··· 257 257 return; 258 258 259 259 /* Stop cwnd growth if we were in catch up mode */ 260 - if (ca->nv_catchup && tp->snd_cwnd >= nv_min_cwnd) { 260 + if (ca->nv_catchup && tcp_snd_cwnd(tp) >= nv_min_cwnd) { 261 261 ca->nv_catchup = 0; 262 262 ca->nv_allow_cwnd_growth = 0; 263 263 } ··· 371 371 * if cwnd < max_win, grow cwnd 372 372 * else leave the same 373 373 */ 374 - if (tp->snd_cwnd > max_win) { 374 + if (tcp_snd_cwnd(tp) > max_win) { 375 375 /* there is congestion, check that it is ok 376 376 * to make a CA decision 377 377 * 1. We should have at least nv_dec_eval_min_calls ··· 398 398 ca->nv_allow_cwnd_growth = 0; 399 399 tp->snd_ssthresh = 400 400 (nv_ssthresh_factor * max_win) >> 3; 401 - if (tp->snd_cwnd - max_win > 2) { 401 + if (tcp_snd_cwnd(tp) - max_win > 2) { 402 402 /* gap > 2, we do exponential cwnd decrease */ 403 403 int dec; 404 404 405 - dec = max(2U, ((tp->snd_cwnd - max_win) * 405 + dec = max(2U, ((tcp_snd_cwnd(tp) - max_win) * 406 406 nv_cong_dec_mult) >> 7); 407 - tp->snd_cwnd -= dec; 407 + tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) - dec); 408 408 } else if (nv_cong_dec_mult > 0) { 409 - tp->snd_cwnd = max_win; 409 + tcp_snd_cwnd_set(tp, max_win); 410 410 } 411 411 if (ca->cwnd_growth_factor > 0) 412 412 ca->cwnd_growth_factor = 0; 413 413 ca->nv_no_cong_cnt = 0; 414 - } else if (tp->snd_cwnd <= max_win - nv_pad_buffer) { 414 + } else if (tcp_snd_cwnd(tp) <= max_win - nv_pad_buffer) { 415 415 /* There is no congestion, grow cwnd if allowed*/ 416 416 if (ca->nv_eval_call_cnt < nv_inc_eval_min_calls) 417 417 return; ··· 444 444 * (it wasn't before, if it is now is because nv 445 445 * decreased it). 446 446 */ 447 - if (tp->snd_cwnd < nv_min_cwnd) 448 - tp->snd_cwnd = nv_min_cwnd; 447 + if (tcp_snd_cwnd(tp) < nv_min_cwnd) 448 + tcp_snd_cwnd_set(tp, nv_min_cwnd); 449 449 } 450 450 } 451 451
+15 -15
net/ipv4/tcp_output.c
··· 142 142 { 143 143 struct tcp_sock *tp = tcp_sk(sk); 144 144 u32 restart_cwnd = tcp_init_cwnd(tp, __sk_dst_get(sk)); 145 - u32 cwnd = tp->snd_cwnd; 145 + u32 cwnd = tcp_snd_cwnd(tp); 146 146 147 147 tcp_ca_event(sk, CA_EVENT_CWND_RESTART); 148 148 ··· 151 151 152 152 while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd) 153 153 cwnd >>= 1; 154 - tp->snd_cwnd = max(cwnd, restart_cwnd); 154 + tcp_snd_cwnd_set(tp, max(cwnd, restart_cwnd)); 155 155 tp->snd_cwnd_stamp = tcp_jiffies32; 156 156 tp->snd_cwnd_used = 0; 157 157 } ··· 1013 1013 struct tcp_sock *tp = tcp_sk(sk); 1014 1014 1015 1015 if (tp->lost_out > tp->retrans_out && 1016 - tp->snd_cwnd > tcp_packets_in_flight(tp)) { 1016 + tcp_snd_cwnd(tp) > tcp_packets_in_flight(tp)) { 1017 1017 tcp_mstamp_refresh(tp); 1018 1018 tcp_xmit_retransmit_queue(sk); 1019 1019 } ··· 1860 1860 /* Limited by application or receiver window. */ 1861 1861 u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk)); 1862 1862 u32 win_used = max(tp->snd_cwnd_used, init_win); 1863 - if (win_used < tp->snd_cwnd) { 1863 + if (win_used < tcp_snd_cwnd(tp)) { 1864 1864 tp->snd_ssthresh = tcp_current_ssthresh(sk); 1865 - tp->snd_cwnd = (tp->snd_cwnd + win_used) >> 1; 1865 + tcp_snd_cwnd_set(tp, (tcp_snd_cwnd(tp) + win_used) >> 1); 1866 1866 } 1867 1867 tp->snd_cwnd_used = 0; 1868 1868 } ··· 2043 2043 return 1; 2044 2044 2045 2045 in_flight = tcp_packets_in_flight(tp); 2046 - cwnd = tp->snd_cwnd; 2046 + cwnd = tcp_snd_cwnd(tp); 2047 2047 if (in_flight >= cwnd) 2048 2048 return 0; 2049 2049 ··· 2196 2196 in_flight = tcp_packets_in_flight(tp); 2197 2197 2198 2198 BUG_ON(tcp_skb_pcount(skb) <= 1); 2199 - BUG_ON(tp->snd_cwnd <= in_flight); 2199 + BUG_ON(tcp_snd_cwnd(tp) <= in_flight); 2200 2200 2201 2201 send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; 2202 2202 2203 2203 /* From in_flight test above, we know that cwnd > in_flight. */ 2204 - cong_win = (tp->snd_cwnd - in_flight) * tp->mss_cache; 2204 + cong_win = (tcp_snd_cwnd(tp) - in_flight) * tp->mss_cache; 2205 2205 2206 2206 limit = min(send_win, cong_win); 2207 2207 ··· 2215 2215 2216 2216 win_divisor = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tso_win_divisor); 2217 2217 if (win_divisor) { 2218 - u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache); 2218 + u32 chunk = min(tp->snd_wnd, tcp_snd_cwnd(tp) * tp->mss_cache); 2219 2219 2220 2220 /* If at least some fraction of a window is available, 2221 2221 * just use it. ··· 2345 2345 if (likely(!icsk->icsk_mtup.enabled || 2346 2346 icsk->icsk_mtup.probe_size || 2347 2347 inet_csk(sk)->icsk_ca_state != TCP_CA_Open || 2348 - tp->snd_cwnd < 11 || 2348 + tcp_snd_cwnd(tp) < 11 || 2349 2349 tp->rx_opt.num_sacks || tp->rx_opt.dsack)) 2350 2350 return -1; 2351 2351 ··· 2381 2381 return 0; 2382 2382 2383 2383 /* Do we need to wait to drain cwnd? With none in flight, don't stall */ 2384 - if (tcp_packets_in_flight(tp) + 2 > tp->snd_cwnd) { 2384 + if (tcp_packets_in_flight(tp) + 2 > tcp_snd_cwnd(tp)) { 2385 2385 if (!tcp_packets_in_flight(tp)) 2386 2386 return -1; 2387 2387 else ··· 2450 2450 if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) { 2451 2451 /* Decrement cwnd here because we are sending 2452 2452 * effectively two packets. */ 2453 - tp->snd_cwnd--; 2453 + tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) - 1); 2454 2454 tcp_event_new_data_sent(sk, nskb); 2455 2455 2456 2456 icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len); ··· 2708 2708 else 2709 2709 tcp_chrono_stop(sk, TCP_CHRONO_RWND_LIMITED); 2710 2710 2711 - is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tp->snd_cwnd); 2711 + is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tcp_snd_cwnd(tp)); 2712 2712 if (likely(sent_pkts || is_cwnd_limited)) 2713 2713 tcp_cwnd_validate(sk, is_cwnd_limited); 2714 2714 ··· 2818 2818 if (unlikely(!skb)) { 2819 2819 WARN_ONCE(tp->packets_out, 2820 2820 "invalid inflight: %u state %u cwnd %u mss %d\n", 2821 - tp->packets_out, sk->sk_state, tp->snd_cwnd, mss); 2821 + tp->packets_out, sk->sk_state, tcp_snd_cwnd(tp), mss); 2822 2822 inet_csk(sk)->icsk_pending = 0; 2823 2823 return; 2824 2824 } ··· 3302 3302 if (!hole) 3303 3303 tp->retransmit_skb_hint = skb; 3304 3304 3305 - segs = tp->snd_cwnd - tcp_packets_in_flight(tp); 3305 + segs = tcp_snd_cwnd(tp) - tcp_packets_in_flight(tp); 3306 3306 if (segs <= 0) 3307 3307 break; 3308 3308 sacked = TCP_SKB_CB(skb)->sacked;
+1 -1
net/ipv4/tcp_rate.c
··· 195 195 /* Nothing in sending host's qdisc queues or NIC tx queue. */ 196 196 sk_wmem_alloc_get(sk) < SKB_TRUESIZE(1) && 197 197 /* We are not limited by CWND. */ 198 - tcp_packets_in_flight(tp) < tp->snd_cwnd && 198 + tcp_packets_in_flight(tp) < tcp_snd_cwnd(tp) && 199 199 /* All lost packets have been retransmitted. */ 200 200 tp->lost_out <= tp->retrans_out) 201 201 tp->app_limited =
+2 -2
net/ipv4/tcp_scalable.c
··· 27 27 if (!acked) 28 28 return; 29 29 } 30 - tcp_cong_avoid_ai(tp, min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT), 30 + tcp_cong_avoid_ai(tp, min(tcp_snd_cwnd(tp), TCP_SCALABLE_AI_CNT), 31 31 acked); 32 32 } 33 33 ··· 35 35 { 36 36 const struct tcp_sock *tp = tcp_sk(sk); 37 37 38 - return max(tp->snd_cwnd - (tp->snd_cwnd>>TCP_SCALABLE_MD_SCALE), 2U); 38 + return max(tcp_snd_cwnd(tp) - (tcp_snd_cwnd(tp)>>TCP_SCALABLE_MD_SCALE), 2U); 39 39 } 40 40 41 41 static struct tcp_congestion_ops tcp_scalable __read_mostly = {
+11 -10
net/ipv4/tcp_vegas.c
··· 159 159 160 160 static inline u32 tcp_vegas_ssthresh(struct tcp_sock *tp) 161 161 { 162 - return min(tp->snd_ssthresh, tp->snd_cwnd); 162 + return min(tp->snd_ssthresh, tcp_snd_cwnd(tp)); 163 163 } 164 164 165 165 static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked) ··· 217 217 * This is: 218 218 * (actual rate in segments) * baseRTT 219 219 */ 220 - target_cwnd = (u64)tp->snd_cwnd * vegas->baseRTT; 220 + target_cwnd = (u64)tcp_snd_cwnd(tp) * vegas->baseRTT; 221 221 do_div(target_cwnd, rtt); 222 222 223 223 /* Calculate the difference between the window we had, 224 224 * and the window we would like to have. This quantity 225 225 * is the "Diff" from the Arizona Vegas papers. 226 226 */ 227 - diff = tp->snd_cwnd * (rtt-vegas->baseRTT) / vegas->baseRTT; 227 + diff = tcp_snd_cwnd(tp) * (rtt-vegas->baseRTT) / vegas->baseRTT; 228 228 229 229 if (diff > gamma && tcp_in_slow_start(tp)) { 230 230 /* Going too fast. Time to slow down ··· 238 238 * truncation robs us of full link 239 239 * utilization. 240 240 */ 241 - tp->snd_cwnd = min(tp->snd_cwnd, (u32)target_cwnd+1); 241 + tcp_snd_cwnd_set(tp, min(tcp_snd_cwnd(tp), 242 + (u32)target_cwnd + 1)); 242 243 tp->snd_ssthresh = tcp_vegas_ssthresh(tp); 243 244 244 245 } else if (tcp_in_slow_start(tp)) { ··· 255 254 /* The old window was too fast, so 256 255 * we slow down. 257 256 */ 258 - tp->snd_cwnd--; 257 + tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) - 1); 259 258 tp->snd_ssthresh 260 259 = tcp_vegas_ssthresh(tp); 261 260 } else if (diff < alpha) { 262 261 /* We don't have enough extra packets 263 262 * in the network, so speed up. 264 263 */ 265 - tp->snd_cwnd++; 264 + tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + 1); 266 265 } else { 267 266 /* Sending just as fast as we 268 267 * should be. ··· 270 269 } 271 270 } 272 271 273 - if (tp->snd_cwnd < 2) 274 - tp->snd_cwnd = 2; 275 - else if (tp->snd_cwnd > tp->snd_cwnd_clamp) 276 - tp->snd_cwnd = tp->snd_cwnd_clamp; 272 + if (tcp_snd_cwnd(tp) < 2) 273 + tcp_snd_cwnd_set(tp, 2); 274 + else if (tcp_snd_cwnd(tp) > tp->snd_cwnd_clamp) 275 + tcp_snd_cwnd_set(tp, tp->snd_cwnd_clamp); 277 276 278 277 tp->snd_ssthresh = tcp_current_ssthresh(sk); 279 278 }
+12 -12
net/ipv4/tcp_veno.c
··· 146 146 147 147 rtt = veno->minrtt; 148 148 149 - target_cwnd = (u64)tp->snd_cwnd * veno->basertt; 149 + target_cwnd = (u64)tcp_snd_cwnd(tp) * veno->basertt; 150 150 target_cwnd <<= V_PARAM_SHIFT; 151 151 do_div(target_cwnd, rtt); 152 152 153 - veno->diff = (tp->snd_cwnd << V_PARAM_SHIFT) - target_cwnd; 153 + veno->diff = (tcp_snd_cwnd(tp) << V_PARAM_SHIFT) - target_cwnd; 154 154 155 155 if (tcp_in_slow_start(tp)) { 156 156 /* Slow start. */ ··· 164 164 /* In the "non-congestive state", increase cwnd 165 165 * every rtt. 166 166 */ 167 - tcp_cong_avoid_ai(tp, tp->snd_cwnd, acked); 167 + tcp_cong_avoid_ai(tp, tcp_snd_cwnd(tp), acked); 168 168 } else { 169 169 /* In the "congestive state", increase cwnd 170 170 * every other rtt. 171 171 */ 172 - if (tp->snd_cwnd_cnt >= tp->snd_cwnd) { 172 + if (tp->snd_cwnd_cnt >= tcp_snd_cwnd(tp)) { 173 173 if (veno->inc && 174 - tp->snd_cwnd < tp->snd_cwnd_clamp) { 175 - tp->snd_cwnd++; 174 + tcp_snd_cwnd(tp) < tp->snd_cwnd_clamp) { 175 + tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + 1); 176 176 veno->inc = 0; 177 177 } else 178 178 veno->inc = 1; ··· 181 181 tp->snd_cwnd_cnt += acked; 182 182 } 183 183 done: 184 - if (tp->snd_cwnd < 2) 185 - tp->snd_cwnd = 2; 186 - else if (tp->snd_cwnd > tp->snd_cwnd_clamp) 187 - tp->snd_cwnd = tp->snd_cwnd_clamp; 184 + if (tcp_snd_cwnd(tp) < 2) 185 + tcp_snd_cwnd_set(tp, 2); 186 + else if (tcp_snd_cwnd(tp) > tp->snd_cwnd_clamp) 187 + tcp_snd_cwnd_set(tp, tp->snd_cwnd_clamp); 188 188 } 189 189 /* Wipe the slate clean for the next rtt. */ 190 190 /* veno->cntrtt = 0; */ ··· 199 199 200 200 if (veno->diff < beta) 201 201 /* in "non-congestive state", cut cwnd by 1/5 */ 202 - return max(tp->snd_cwnd * 4 / 5, 2U); 202 + return max(tcp_snd_cwnd(tp) * 4 / 5, 2U); 203 203 else 204 204 /* in "congestive state", cut cwnd by 1/2 */ 205 - return max(tp->snd_cwnd >> 1U, 2U); 205 + return max(tcp_snd_cwnd(tp) >> 1U, 2U); 206 206 } 207 207 208 208 static struct tcp_congestion_ops tcp_veno __read_mostly = {
+2 -1
net/ipv4/tcp_westwood.c
··· 244 244 245 245 switch (event) { 246 246 case CA_EVENT_COMPLETE_CWR: 247 - tp->snd_cwnd = tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk); 247 + tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk); 248 + tcp_snd_cwnd_set(tp, tp->snd_ssthresh); 248 249 break; 249 250 case CA_EVENT_LOSS: 250 251 tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk);
+15 -15
net/ipv4/tcp_yeah.c
··· 71 71 72 72 if (!yeah->doing_reno_now) { 73 73 /* Scalable */ 74 - tcp_cong_avoid_ai(tp, min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT), 74 + tcp_cong_avoid_ai(tp, min(tcp_snd_cwnd(tp), TCP_SCALABLE_AI_CNT), 75 75 acked); 76 76 } else { 77 77 /* Reno */ 78 - tcp_cong_avoid_ai(tp, tp->snd_cwnd, acked); 78 + tcp_cong_avoid_ai(tp, tcp_snd_cwnd(tp), acked); 79 79 } 80 80 81 81 /* The key players are v_vegas.beg_snd_una and v_beg_snd_nxt. ··· 130 130 /* Compute excess number of packets above bandwidth 131 131 * Avoid doing full 64 bit divide. 132 132 */ 133 - bw = tp->snd_cwnd; 133 + bw = tcp_snd_cwnd(tp); 134 134 bw *= rtt - yeah->vegas.baseRTT; 135 135 do_div(bw, rtt); 136 136 queue = bw; ··· 138 138 if (queue > TCP_YEAH_ALPHA || 139 139 rtt - yeah->vegas.baseRTT > (yeah->vegas.baseRTT / TCP_YEAH_PHY)) { 140 140 if (queue > TCP_YEAH_ALPHA && 141 - tp->snd_cwnd > yeah->reno_count) { 141 + tcp_snd_cwnd(tp) > yeah->reno_count) { 142 142 u32 reduction = min(queue / TCP_YEAH_GAMMA , 143 - tp->snd_cwnd >> TCP_YEAH_EPSILON); 143 + tcp_snd_cwnd(tp) >> TCP_YEAH_EPSILON); 144 144 145 - tp->snd_cwnd -= reduction; 145 + tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) - reduction); 146 146 147 - tp->snd_cwnd = max(tp->snd_cwnd, 148 - yeah->reno_count); 147 + tcp_snd_cwnd_set(tp, max(tcp_snd_cwnd(tp), 148 + yeah->reno_count)); 149 149 150 - tp->snd_ssthresh = tp->snd_cwnd; 150 + tp->snd_ssthresh = tcp_snd_cwnd(tp); 151 151 } 152 152 153 153 if (yeah->reno_count <= 2) 154 - yeah->reno_count = max(tp->snd_cwnd>>1, 2U); 154 + yeah->reno_count = max(tcp_snd_cwnd(tp)>>1, 2U); 155 155 else 156 156 yeah->reno_count++; 157 157 ··· 176 176 */ 177 177 yeah->vegas.beg_snd_una = yeah->vegas.beg_snd_nxt; 178 178 yeah->vegas.beg_snd_nxt = tp->snd_nxt; 179 - yeah->vegas.beg_snd_cwnd = tp->snd_cwnd; 179 + yeah->vegas.beg_snd_cwnd = tcp_snd_cwnd(tp); 180 180 181 181 /* Wipe the slate clean for the next RTT. */ 182 182 yeah->vegas.cntRTT = 0; ··· 193 193 if (yeah->doing_reno_now < TCP_YEAH_RHO) { 194 194 reduction = yeah->lastQ; 195 195 196 - reduction = min(reduction, max(tp->snd_cwnd>>1, 2U)); 196 + reduction = min(reduction, max(tcp_snd_cwnd(tp)>>1, 2U)); 197 197 198 - reduction = max(reduction, tp->snd_cwnd >> TCP_YEAH_DELTA); 198 + reduction = max(reduction, tcp_snd_cwnd(tp) >> TCP_YEAH_DELTA); 199 199 } else 200 - reduction = max(tp->snd_cwnd>>1, 2U); 200 + reduction = max(tcp_snd_cwnd(tp)>>1, 2U); 201 201 202 202 yeah->fast_count = 0; 203 203 yeah->reno_count = max(yeah->reno_count>>1, 2U); 204 204 205 - return max_t(int, tp->snd_cwnd - reduction, 2); 205 + return max_t(int, tcp_snd_cwnd(tp) - reduction, 2); 206 206 } 207 207 208 208 static struct tcp_congestion_ops tcp_yeah __read_mostly = {
+1 -1
net/ipv6/tcp_ipv6.c
··· 2044 2044 jiffies_to_clock_t(icsk->icsk_rto), 2045 2045 jiffies_to_clock_t(icsk->icsk_ack.ato), 2046 2046 (icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(sp), 2047 - tp->snd_cwnd, 2047 + tcp_snd_cwnd(tp), 2048 2048 state == TCP_LISTEN ? 2049 2049 fastopenq->max_qlen : 2050 2050 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)