[TCP]: Break out tcp_snd_test() into it's constituent parts.

tcp_snd_test() does several different things, use inline
functions to express this more clearly.

1) It initializes the TSO count of SKB, if necessary.
2) It performs the Nagle test.
3) It makes sure the congestion window is adhered to.
4) It makes sure SKB fits into the send window.

This cleanup also sets things up so that things like the
available packets in the congestion window does not need
to be calculated multiple times by packet sending loops
such as tcp_write_xmit().

Signed-off-by: David S. Miller <davem@davemloft.net>

+83 -39
+83 -39
net/ipv4/tcp_output.c
··· 434 434 } 435 435 } 436 436 437 + /* Does SKB fit into the send window? */ 438 + static inline int tcp_snd_wnd_test(struct tcp_sock *tp, struct sk_buff *skb, unsigned int cur_mss) 439 + { 440 + u32 end_seq = TCP_SKB_CB(skb)->end_seq; 441 + 442 + return !after(end_seq, tp->snd_una + tp->snd_wnd); 443 + } 444 + 445 + /* Can at least one segment of SKB be sent right now, according to the 446 + * congestion window rules? If so, return how many segments are allowed. 447 + */ 448 + static inline unsigned int tcp_cwnd_test(struct tcp_sock *tp, struct sk_buff *skb) 449 + { 450 + u32 in_flight, cwnd; 451 + 452 + /* Don't be strict about the congestion window for the final FIN. */ 453 + if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) 454 + return 1; 455 + 456 + in_flight = tcp_packets_in_flight(tp); 457 + cwnd = tp->snd_cwnd; 458 + if (in_flight < cwnd) 459 + return (cwnd - in_flight); 460 + 461 + return 0; 462 + } 463 + 437 464 static inline int tcp_minshall_check(const struct tcp_sock *tp) 438 465 { 439 466 return after(tp->snd_sml,tp->snd_una) && ··· 469 442 470 443 /* Return 0, if packet can be sent now without violation Nagle's rules: 471 444 * 1. It is full sized. 472 - * 2. Or it contains FIN. 445 + * 2. Or it contains FIN. (already checked by caller) 473 446 * 3. Or TCP_NODELAY was set. 474 447 * 4. Or TCP_CORK is not set, and all sent packets are ACKed. 475 448 * With Minshall's modification: all sent small packets are ACKed. ··· 480 453 unsigned mss_now, int nonagle) 481 454 { 482 455 return (skb->len < mss_now && 483 - !(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) && 484 456 ((nonagle&TCP_NAGLE_CORK) || 485 457 (!nonagle && 486 458 tp->packets_out && 487 459 tcp_minshall_check(tp)))); 488 460 } 489 461 490 - /* This checks if the data bearing packet SKB (usually sk->sk_send_head) 491 - * should be put on the wire right now. 462 + /* Return non-zero if the Nagle test allows this packet to be 463 + * sent now. 492 464 */ 493 - static int tcp_snd_test(struct sock *sk, struct sk_buff *skb, 494 - unsigned cur_mss, int nonagle) 465 + static inline int tcp_nagle_test(struct tcp_sock *tp, struct sk_buff *skb, 466 + unsigned int cur_mss, int nonagle) 467 + { 468 + /* Nagle rule does not apply to frames, which sit in the middle of the 469 + * write_queue (they have no chances to get new data). 470 + * 471 + * This is implemented in the callers, where they modify the 'nonagle' 472 + * argument based upon the location of SKB in the send queue. 473 + */ 474 + if (nonagle & TCP_NAGLE_PUSH) 475 + return 1; 476 + 477 + /* Don't use the nagle rule for urgent data (or for the final FIN). */ 478 + if (tp->urg_mode || 479 + (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)) 480 + return 1; 481 + 482 + if (!tcp_nagle_check(tp, skb, cur_mss, nonagle)) 483 + return 1; 484 + 485 + return 0; 486 + } 487 + 488 + /* This must be invoked the first time we consider transmitting 489 + * SKB onto the wire. 490 + */ 491 + static inline int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb) 492 + { 493 + int tso_segs = tcp_skb_pcount(skb); 494 + 495 + if (!tso_segs) { 496 + tcp_set_skb_tso_segs(sk, skb); 497 + tso_segs = tcp_skb_pcount(skb); 498 + } 499 + return tso_segs; 500 + } 501 + 502 + /* This checks if the data bearing packet SKB (usually sk->sk_send_head) 503 + * should be put on the wire right now. If so, it returns the number of 504 + * packets allowed by the congestion window. 505 + */ 506 + static unsigned int tcp_snd_test(struct sock *sk, struct sk_buff *skb, 507 + unsigned int cur_mss, int nonagle) 495 508 { 496 509 struct tcp_sock *tp = tcp_sk(sk); 497 - int pkts = tcp_skb_pcount(skb); 510 + unsigned int cwnd_quota; 498 511 499 - if (!pkts) { 500 - tcp_set_skb_tso_segs(sk, skb); 501 - pkts = tcp_skb_pcount(skb); 502 - } 512 + tcp_init_tso_segs(sk, skb); 503 513 504 - /* RFC 1122 - section 4.2.3.4 505 - * 506 - * We must queue if 507 - * 508 - * a) The right edge of this frame exceeds the window 509 - * b) There are packets in flight and we have a small segment 510 - * [SWS avoidance and Nagle algorithm] 511 - * (part of SWS is done on packetization) 512 - * Minshall version sounds: there are no _small_ 513 - * segments in flight. (tcp_nagle_check) 514 - * c) We have too many packets 'in flight' 515 - * 516 - * Don't use the nagle rule for urgent data (or 517 - * for the final FIN -DaveM). 518 - * 519 - * Also, Nagle rule does not apply to frames, which 520 - * sit in the middle of queue (they have no chances 521 - * to get new data) and if room at tail of skb is 522 - * not enough to save something seriously (<32 for now). 523 - */ 514 + if (!tcp_nagle_test(tp, skb, cur_mss, nonagle)) 515 + return 0; 524 516 525 - /* Don't be strict about the congestion window for the 526 - * final FIN frame. -DaveM 527 - */ 528 - return (((nonagle&TCP_NAGLE_PUSH) || tp->urg_mode 529 - || !tcp_nagle_check(tp, skb, cur_mss, nonagle)) && 530 - (((tcp_packets_in_flight(tp) + (pkts-1)) < tp->snd_cwnd) || 531 - (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)) && 532 - !after(TCP_SKB_CB(skb)->end_seq, tp->snd_una + tp->snd_wnd)); 517 + cwnd_quota = tcp_cwnd_test(tp, skb); 518 + if (cwnd_quota && 519 + !tcp_snd_wnd_test(tp, skb, cur_mss)) 520 + cwnd_quota = 0; 521 + 522 + return cwnd_quota; 533 523 } 534 524 535 525 static inline int tcp_skb_is_last(const struct sock *sk,