[TCP]: Eliminate redundant computations in tcp_write_xmit().

tcp_snd_test() is run for every packet output by a single
call to tcp_write_xmit(), but this is not necessary.

For one, the congestion window space needs to only be
calculated one time, then used throughout the duration
of the loop.

This cleanup also makes experimenting with different TSO
packetization schemes much easier.

Signed-off-by: David S. Miller <davem@davemloft.net>

+31 -9
+31 -9
net/ipv4/tcp_output.c
··· 887 { 888 struct tcp_sock *tp = tcp_sk(sk); 889 struct sk_buff *skb; 890 int sent_pkts; 891 892 /* If we are closed, the bytes will have to remain here. ··· 897 if (unlikely(sk->sk_state == TCP_CLOSE)) 898 return 0; 899 900 sent_pkts = 0; 901 - while ((skb = sk->sk_send_head) && 902 - tcp_snd_test(sk, skb, mss_now, 903 - tcp_skb_is_last(sk, skb) ? nonagle : 904 - TCP_NAGLE_PUSH)) { 905 - if (skb->len > mss_now) { 906 - if (tcp_fragment(sk, skb, mss_now)) 907 break; 908 } 909 910 TCP_SKB_CB(skb)->when = tcp_time_stamp; 911 tcp_tso_set_push(skb); 912 - if (tcp_transmit_skb(sk, skb_clone(skb, GFP_ATOMIC))) 913 break; 914 915 /* Advance the send_head. This one is sent out. ··· 930 update_send_head(sk, tp, skb); 931 932 tcp_minshall_update(tp, mss_now, skb); 933 - sent_pkts = 1; 934 } 935 936 - if (sent_pkts) { 937 tcp_cwnd_validate(sk, tp); 938 return 0; 939 }
··· 887 { 888 struct tcp_sock *tp = tcp_sk(sk); 889 struct sk_buff *skb; 890 + unsigned int tso_segs, cwnd_quota; 891 int sent_pkts; 892 893 /* If we are closed, the bytes will have to remain here. ··· 896 if (unlikely(sk->sk_state == TCP_CLOSE)) 897 return 0; 898 899 + skb = sk->sk_send_head; 900 + if (unlikely(!skb)) 901 + return 0; 902 + 903 + tso_segs = tcp_init_tso_segs(sk, skb); 904 + cwnd_quota = tcp_cwnd_test(tp, skb); 905 sent_pkts = 0; 906 + 907 + while (cwnd_quota >= tso_segs) { 908 + if (unlikely(!tcp_nagle_test(tp, skb, mss_now, 909 + (tcp_skb_is_last(sk, skb) ? 910 + nonagle : TCP_NAGLE_PUSH)))) 911 + break; 912 + 913 + if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) 914 + break; 915 + 916 + if (unlikely(skb->len > mss_now)) { 917 + if (unlikely(tcp_fragment(sk, skb, mss_now))) 918 break; 919 } 920 921 TCP_SKB_CB(skb)->when = tcp_time_stamp; 922 tcp_tso_set_push(skb); 923 + if (unlikely(tcp_transmit_skb(sk, skb_clone(skb, GFP_ATOMIC)))) 924 break; 925 926 /* Advance the send_head. This one is sent out. ··· 917 update_send_head(sk, tp, skb); 918 919 tcp_minshall_update(tp, mss_now, skb); 920 + sent_pkts++; 921 + 922 + /* Do not optimize this to use tso_segs. If we chopped up 923 + * the packet above, tso_segs will no longer be valid. 924 + */ 925 + cwnd_quota -= tcp_skb_pcount(skb); 926 + skb = sk->sk_send_head; 927 + if (!skb) 928 + break; 929 + tso_segs = tcp_init_tso_segs(sk, skb); 930 } 931 932 + if (likely(sent_pkts)) { 933 tcp_cwnd_validate(sk, tp); 934 return 0; 935 }