Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: tso: cache transport header length

Add tlen field into struct tso_t, and change tso_start()
to return skb_transport_offset(skb) + tso->tlen

This removes from callers the need to use tcp_hdrlen(skb) and
will ease UDP segmentation offload addition.

v2: calls tso_start() earlier in otx2_sq_append_tso() [Jakub]

Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Eric Dumazet and committed by
David S. Miller
761b331c 504b9121

+24 -22
+3 -2
drivers/net/ethernet/cavium/thunder/nicvf_queues.c
··· 1489 1489 int seg_subdescs = 0, desc_cnt = 0; 1490 1490 int seg_len, total_len, data_left; 1491 1491 int hdr_qentry = qentry; 1492 - int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 1492 + int hdr_len; 1493 1493 1494 - tso_start(skb, &tso); 1494 + hdr_len = tso_start(skb, &tso); 1495 + 1495 1496 total_len = skb->len - hdr_len; 1496 1497 while (total_len > 0) { 1497 1498 char *hdr;
+2 -3
drivers/net/ethernet/freescale/fec_main.c
··· 710 710 struct net_device *ndev) 711 711 { 712 712 struct fec_enet_private *fep = netdev_priv(ndev); 713 - int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 714 - int total_len, data_left; 713 + int hdr_len, total_len, data_left; 715 714 struct bufdesc *bdp = txq->bd.cur; 716 715 struct tso_t tso; 717 716 unsigned int index = 0; ··· 730 731 } 731 732 732 733 /* Initialize the TSO handler, and prepare the first payload */ 733 - tso_start(skb, &tso); 734 + hdr_len = tso_start(skb, &tso); 734 735 735 736 total_len = skb->len - hdr_len; 736 737 while (total_len > 0) {
+2 -3
drivers/net/ethernet/marvell/mv643xx_eth.c
··· 816 816 struct net_device *dev) 817 817 { 818 818 struct mv643xx_eth_private *mp = txq_to_mp(txq); 819 - int total_len, data_left, ret; 819 + int hdr_len, total_len, data_left, ret; 820 820 int desc_count = 0; 821 821 struct tso_t tso; 822 - int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 823 822 struct tx_desc *first_tx_desc; 824 823 u32 first_cmd_sts = 0; 825 824 ··· 831 832 first_tx_desc = &txq->tx_desc_area[txq->tx_curr_desc]; 832 833 833 834 /* Initialize the TSO handler, and prepare the first payload */ 834 - tso_start(skb, &tso); 835 + hdr_len = tso_start(skb, &tso); 835 836 836 837 total_len = skb->len - hdr_len; 837 838 while (total_len > 0) {
+2 -3
drivers/net/ethernet/marvell/mvneta.c
··· 2604 2604 static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev, 2605 2605 struct mvneta_tx_queue *txq) 2606 2606 { 2607 - int total_len, data_left; 2607 + int hdr_len, total_len, data_left; 2608 2608 int desc_count = 0; 2609 2609 struct mvneta_port *pp = netdev_priv(dev); 2610 2610 struct tso_t tso; 2611 - int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 2612 2611 int i; 2613 2612 2614 2613 /* Count needed descriptors */ ··· 2620 2621 } 2621 2622 2622 2623 /* Initialize the TSO handler, and prepare the first payload */ 2623 - tso_start(skb, &tso); 2624 + hdr_len = tso_start(skb, &tso); 2624 2625 2625 2626 total_len = skb->len - hdr_len; 2626 2627 while (total_len > 0) {
+3 -3
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
··· 3160 3160 struct mvpp2_txq_pcpu *txq_pcpu) 3161 3161 { 3162 3162 struct mvpp2_port *port = netdev_priv(dev); 3163 + int hdr_sz, i, len, descs = 0; 3163 3164 struct tso_t tso; 3164 - int hdr_sz = skb_transport_offset(skb) + tcp_hdrlen(skb); 3165 - int i, len, descs = 0; 3166 3165 3167 3166 /* Check number of available descriptors */ 3168 3167 if (mvpp2_aggr_desc_num_check(port, aggr_txq, tso_count_descs(skb)) || ··· 3169 3170 tso_count_descs(skb))) 3170 3171 return 0; 3171 3172 3172 - tso_start(skb, &tso); 3173 + hdr_sz = tso_start(skb, &tso); 3174 + 3173 3175 len = skb->len - hdr_sz; 3174 3176 while (len > 0) { 3175 3177 int left = min_t(int, skb_shinfo(skb)->gso_size, len);
+3 -3
drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
··· 619 619 struct sk_buff *skb, u16 qidx) 620 620 { 621 621 struct netdev_queue *txq = netdev_get_tx_queue(pfvf->netdev, qidx); 622 - int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 623 - int tcp_data, seg_len, pkt_len, offset; 622 + int hdr_len, tcp_data, seg_len, pkt_len, offset; 624 623 struct nix_sqe_hdr_s *sqe_hdr; 625 624 int first_sqe = sq->head; 626 625 struct sg_list list; 627 626 struct tso_t tso; 627 + 628 + hdr_len = tso_start(skb, &tso); 628 629 629 630 /* Map SKB's fragments to DMA. 630 631 * It's done here to avoid mapping for every TSO segment's packet. ··· 637 636 638 637 netdev_tx_sent_queue(txq, skb->len); 639 638 640 - tso_start(skb, &tso); 641 639 tcp_data = skb->len - hdr_len; 642 640 while (tcp_data > 0) { 643 641 char *hdr;
+2 -1
include/net/tso.h
··· 11 11 int size; 12 12 void *data; 13 13 u16 ip_id; 14 + u8 tlen; /* transport header len */ 14 15 bool ipv6; 15 16 u32 tcp_seq; 16 17 }; ··· 20 19 void tso_build_hdr(const struct sk_buff *skb, char *hdr, struct tso_t *tso, 21 20 int size, bool is_last); 22 21 void tso_build_data(const struct sk_buff *skb, struct tso_t *tso, int size); 23 - void tso_start(struct sk_buff *skb, struct tso_t *tso); 22 + int tso_start(struct sk_buff *skb, struct tso_t *tso); 24 23 25 24 #endif /* _TSO_H */
+7 -4
net/core/tso.c
··· 17 17 int size, bool is_last) 18 18 { 19 19 struct tcphdr *tcph; 20 - int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 20 + int hdr_len = skb_transport_offset(skb) + tso->tlen; 21 21 int mac_hdr_len = skb_network_offset(skb); 22 22 23 23 memcpy(hdr, skb->data, hdr_len); ··· 30 30 } else { 31 31 struct ipv6hdr *iph = (void *)(hdr + mac_hdr_len); 32 32 33 - iph->payload_len = htons(size + tcp_hdrlen(skb)); 33 + iph->payload_len = htons(size + tso->tlen); 34 34 } 35 35 tcph = (struct tcphdr *)(hdr + skb_transport_offset(skb)); 36 36 put_unaligned_be32(tso->tcp_seq, &tcph->seq); ··· 62 62 } 63 63 EXPORT_SYMBOL(tso_build_data); 64 64 65 - void tso_start(struct sk_buff *skb, struct tso_t *tso) 65 + int tso_start(struct sk_buff *skb, struct tso_t *tso) 66 66 { 67 - int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 67 + int tlen = tcp_hdrlen(skb); 68 + int hdr_len = skb_transport_offset(skb) + tlen; 68 69 70 + tso->tlen = tlen; 69 71 tso->ip_id = ntohs(ip_hdr(skb)->id); 70 72 tso->tcp_seq = ntohl(tcp_hdr(skb)->seq); 71 73 tso->next_frag_idx = 0; ··· 85 83 tso->data = skb_frag_address(frag); 86 84 tso->next_frag_idx++; 87 85 } 86 + return hdr_len; 88 87 } 89 88 EXPORT_SYMBOL(tso_start);