Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

tcp: avoid possible arithmetic overflows

icsk_rto is a 32bit field, and icsk_backoff can reach 15 by default,
or more if some sysctl (eg tcp_retries2) are changed.

Better use 64bit to perform icsk_rto << icsk_backoff operations

As Joe Perches suggested, add a helper for this.

Yuchung spotted the tcp_v4_err() case.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Eric Dumazet and committed by
David S. Miller
fcdd1cf4 35f7aa53

+23 -14
+9
include/net/inet_connection_sock.h
··· 242 242 #endif 243 243 } 244 244 245 + static inline unsigned long 246 + inet_csk_rto_backoff(const struct inet_connection_sock *icsk, 247 + unsigned long max_when) 248 + { 249 + u64 when = (u64)icsk->icsk_rto << icsk->icsk_backoff; 250 + 251 + return (unsigned long)min_t(u64, when, max_when); 252 + } 253 + 245 254 struct sock *inet_csk_accept(struct sock *sk, int flags, int *err); 246 255 247 256 struct request_sock *inet_csk_search_req(const struct sock *sk,
+3 -2
net/ipv4/tcp_input.c
··· 3208 3208 * This function is not for random using! 3209 3209 */ 3210 3210 } else { 3211 + unsigned long when = inet_csk_rto_backoff(icsk, TCP_RTO_MAX); 3212 + 3211 3213 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, 3212 - min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX), 3213 - TCP_RTO_MAX); 3214 + when, TCP_RTO_MAX); 3214 3215 } 3215 3216 } 3216 3217
+3 -3
net/ipv4/tcp_ipv4.c
··· 430 430 break; 431 431 432 432 icsk->icsk_backoff--; 433 - inet_csk(sk)->icsk_rto = (tp->srtt_us ? __tcp_set_rto(tp) : 434 - TCP_TIMEOUT_INIT) << icsk->icsk_backoff; 435 - tcp_bound_rto(sk); 433 + icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) : 434 + TCP_TIMEOUT_INIT; 435 + icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX); 436 436 437 437 skb = tcp_write_queue_head(sk); 438 438 BUG_ON(!skb);
+6 -7
net/ipv4/tcp_output.c
··· 3279 3279 { 3280 3280 struct inet_connection_sock *icsk = inet_csk(sk); 3281 3281 struct tcp_sock *tp = tcp_sk(sk); 3282 + unsigned long probe_max; 3282 3283 int err; 3283 3284 3284 3285 err = tcp_write_wakeup(sk); ··· 3295 3294 if (icsk->icsk_backoff < sysctl_tcp_retries2) 3296 3295 icsk->icsk_backoff++; 3297 3296 icsk->icsk_probes_out++; 3298 - inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, 3299 - min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX), 3300 - TCP_RTO_MAX); 3297 + probe_max = TCP_RTO_MAX; 3301 3298 } else { 3302 3299 /* If packet was not sent due to local congestion, 3303 3300 * do not backoff and do not remember icsk_probes_out. ··· 3305 3306 */ 3306 3307 if (!icsk->icsk_probes_out) 3307 3308 icsk->icsk_probes_out = 1; 3308 - inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, 3309 - min(icsk->icsk_rto << icsk->icsk_backoff, 3310 - TCP_RESOURCE_PROBE_INTERVAL), 3311 - TCP_RTO_MAX); 3309 + probe_max = TCP_RESOURCE_PROBE_INTERVAL; 3312 3310 } 3311 + inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, 3312 + inet_csk_rto_backoff(icsk, probe_max), 3313 + TCP_RTO_MAX); 3313 3314 } 3314 3315 3315 3316 int tcp_rtx_synack(struct sock *sk, struct request_sock *req)
+2 -2
net/ipv4/tcp_timer.c
··· 180 180 181 181 retry_until = sysctl_tcp_retries2; 182 182 if (sock_flag(sk, SOCK_DEAD)) { 183 - const int alive = (icsk->icsk_rto < TCP_RTO_MAX); 183 + const int alive = icsk->icsk_rto < TCP_RTO_MAX; 184 184 185 185 retry_until = tcp_orphan_retries(sk, alive); 186 186 do_reset = alive || ··· 294 294 max_probes = sysctl_tcp_retries2; 295 295 296 296 if (sock_flag(sk, SOCK_DEAD)) { 297 - const int alive = ((icsk->icsk_rto << icsk->icsk_backoff) < TCP_RTO_MAX); 297 + const int alive = inet_csk_rto_backoff(icsk, TCP_RTO_MAX) < TCP_RTO_MAX; 298 298 299 299 max_probes = tcp_orphan_retries(sk, alive); 300 300