Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

tcp: introduce tcp_skb_timestamp_us() helper

There are few places where TCP reads skb->skb_mstamp expecting
a value in usec unit.

skb->tstamp (aka skb->skb_mstamp) will soon store CLOCK_TAI nsec value.

Add tcp_skb_timestamp_us() to provide proper conversion when needed.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Eric Dumazet and committed by
David S. Miller
2fd66ffb 72b0094f

+27 -18
+7 -1
include/net/tcp.h
··· 774 774 return div_u64(skb->skb_mstamp, USEC_PER_SEC / TCP_TS_HZ); 775 775 } 776 776 777 + /* provide the departure time in us unit */ 778 + static inline u64 tcp_skb_timestamp_us(const struct sk_buff *skb) 779 + { 780 + return skb->skb_mstamp; 781 + } 782 + 777 783 778 784 #define tcp_flag_byte(th) (((u_int8_t *)th)[13]) 779 785 ··· 1946 1940 { 1947 1941 const struct sk_buff *skb = tcp_rtx_queue_head(sk); 1948 1942 u32 rto = inet_csk(sk)->icsk_rto; 1949 - u64 rto_time_stamp_us = skb->skb_mstamp + jiffies_to_usecs(rto); 1943 + u64 rto_time_stamp_us = tcp_skb_timestamp_us(skb) + jiffies_to_usecs(rto); 1950 1944 1951 1945 return rto_time_stamp_us - tcp_sk(sk)->tcp_mstamp; 1952 1946 }
+6 -5
net/ipv4/tcp_input.c
··· 1305 1305 */ 1306 1306 tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked, 1307 1307 start_seq, end_seq, dup_sack, pcount, 1308 - skb->skb_mstamp); 1308 + tcp_skb_timestamp_us(skb)); 1309 1309 tcp_rate_skb_delivered(sk, skb, state->rate); 1310 1310 1311 1311 if (skb == tp->lost_skb_hint) ··· 1580 1580 TCP_SKB_CB(skb)->end_seq, 1581 1581 dup_sack, 1582 1582 tcp_skb_pcount(skb), 1583 - skb->skb_mstamp); 1583 + tcp_skb_timestamp_us(skb)); 1584 1584 tcp_rate_skb_delivered(sk, skb, state->rate); 1585 1585 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) 1586 1586 list_del_init(&skb->tcp_tsorted_anchor); ··· 3103 3103 tp->retrans_out -= acked_pcount; 3104 3104 flag |= FLAG_RETRANS_DATA_ACKED; 3105 3105 } else if (!(sacked & TCPCB_SACKED_ACKED)) { 3106 - last_ackt = skb->skb_mstamp; 3106 + last_ackt = tcp_skb_timestamp_us(skb); 3107 3107 WARN_ON_ONCE(last_ackt == 0); 3108 3108 if (!first_ackt) 3109 3109 first_ackt = last_ackt; ··· 3121 3121 tp->delivered += acked_pcount; 3122 3122 if (!tcp_skb_spurious_retrans(tp, skb)) 3123 3123 tcp_rack_advance(tp, sacked, scb->end_seq, 3124 - skb->skb_mstamp); 3124 + tcp_skb_timestamp_us(skb)); 3125 3125 } 3126 3126 if (sacked & TCPCB_LOST) 3127 3127 tp->lost_out -= acked_pcount; ··· 3215 3215 tp->lost_cnt_hint -= min(tp->lost_cnt_hint, delta); 3216 3216 } 3217 3217 } else if (skb && rtt_update && sack_rtt_us >= 0 && 3218 - sack_rtt_us > tcp_stamp_us_delta(tp->tcp_mstamp, skb->skb_mstamp)) { 3218 + sack_rtt_us > tcp_stamp_us_delta(tp->tcp_mstamp, 3219 + tcp_skb_timestamp_us(skb))) { 3219 3220 /* Do not re-arm RTO if the sack RTT is measured from data sent 3220 3221 * after when the head was last (re)transmitted. Otherwise the 3221 3222 * timeout may continue to extend in loss recovery.
+1 -1
net/ipv4/tcp_ipv4.c
··· 544 544 BUG_ON(!skb); 545 545 546 546 tcp_mstamp_refresh(tp); 547 - delta_us = (u32)(tp->tcp_mstamp - skb->skb_mstamp); 547 + delta_us = (u32)(tp->tcp_mstamp - tcp_skb_timestamp_us(skb)); 548 548 remaining = icsk->icsk_rto - 549 549 usecs_to_jiffies(delta_us); 550 550
+1 -1
net/ipv4/tcp_output.c
··· 1966 1966 head = tcp_rtx_queue_head(sk); 1967 1967 if (!head) 1968 1968 goto send_now; 1969 - age = tcp_stamp_us_delta(tp->tcp_mstamp, head->skb_mstamp); 1969 + age = tcp_stamp_us_delta(tp->tcp_mstamp, tcp_skb_timestamp_us(head)); 1970 1970 /* If next ACK is likely to come too late (half srtt), do not defer */ 1971 1971 if (age < (tp->srtt_us >> 4)) 1972 1972 goto send_now;
+9 -8
net/ipv4/tcp_rate.c
··· 55 55 * bandwidth estimate. 56 56 */ 57 57 if (!tp->packets_out) { 58 - tp->first_tx_mstamp = skb->skb_mstamp; 59 - tp->delivered_mstamp = skb->skb_mstamp; 58 + u64 tstamp_us = tcp_skb_timestamp_us(skb); 59 + 60 + tp->first_tx_mstamp = tstamp_us; 61 + tp->delivered_mstamp = tstamp_us; 60 62 } 61 63 62 64 TCP_SKB_CB(skb)->tx.first_tx_mstamp = tp->first_tx_mstamp; ··· 90 88 rs->is_app_limited = scb->tx.is_app_limited; 91 89 rs->is_retrans = scb->sacked & TCPCB_RETRANS; 92 90 93 - /* Find the duration of the "send phase" of this window: */ 94 - rs->interval_us = tcp_stamp_us_delta( 95 - skb->skb_mstamp, 96 - scb->tx.first_tx_mstamp); 97 - 98 91 /* Record send time of most recently ACKed packet: */ 99 - tp->first_tx_mstamp = skb->skb_mstamp; 92 + tp->first_tx_mstamp = tcp_skb_timestamp_us(skb); 93 + /* Find the duration of the "send phase" of this window: */ 94 + rs->interval_us = tcp_stamp_us_delta(tp->first_tx_mstamp, 95 + scb->tx.first_tx_mstamp); 96 + 100 97 } 101 98 /* Mark off the skb delivered once it's sacked to avoid being 102 99 * used again when it's cumulatively acked. For acked packets
+3 -2
net/ipv4/tcp_recovery.c
··· 50 50 s32 tcp_rack_skb_timeout(struct tcp_sock *tp, struct sk_buff *skb, u32 reo_wnd) 51 51 { 52 52 return tp->rack.rtt_us + reo_wnd - 53 - tcp_stamp_us_delta(tp->tcp_mstamp, skb->skb_mstamp); 53 + tcp_stamp_us_delta(tp->tcp_mstamp, tcp_skb_timestamp_us(skb)); 54 54 } 55 55 56 56 /* RACK loss detection (IETF draft draft-ietf-tcpm-rack-01): ··· 91 91 !(scb->sacked & TCPCB_SACKED_RETRANS)) 92 92 continue; 93 93 94 - if (!tcp_rack_sent_after(tp->rack.mstamp, skb->skb_mstamp, 94 + if (!tcp_rack_sent_after(tp->rack.mstamp, 95 + tcp_skb_timestamp_us(skb), 95 96 tp->rack.end_seq, scb->end_seq)) 96 97 break; 97 98