Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

vmxnet3: must split too big fragments

vmxnet3 has a 16Kbytes limit per tx descriptor, that happened to work
as long as we provided PAGE_SIZE fragments.

Our stack can now build larger fragments, so we need to split them to
the 16kbytes boundary.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Reported-by: jongman heo <jongman.heo@samsung.com>
Tested-by: jongman heo <jongman.heo@samsung.com>
Cc: Shreyas Bhatewara <sbhatewara@vmware.com>
Reviewed-by: Bhavesh Davda <bhavesh@vmware.com>
Signed-off-by: Shreyas Bhatewara <sbhatewara@vmware.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Eric Dumazet and committed by
David S. Miller
a4d7e485 78933636

+45 -20
+45 -20
drivers/net/vmxnet3/vmxnet3_drv.c
··· 744 744 745 745 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 746 746 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; 747 + u32 buf_size; 747 748 748 - tbi = tq->buf_info + tq->tx_ring.next2fill; 749 - tbi->map_type = VMXNET3_MAP_PAGE; 750 - tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag, 751 - 0, skb_frag_size(frag), 752 - DMA_TO_DEVICE); 749 + buf_offset = 0; 750 + len = skb_frag_size(frag); 751 + while (len) { 752 + tbi = tq->buf_info + tq->tx_ring.next2fill; 753 + if (len < VMXNET3_MAX_TX_BUF_SIZE) { 754 + buf_size = len; 755 + dw2 |= len; 756 + } else { 757 + buf_size = VMXNET3_MAX_TX_BUF_SIZE; 758 + /* spec says that for TxDesc.len, 0 == 2^14 */ 759 + } 760 + tbi->map_type = VMXNET3_MAP_PAGE; 761 + tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag, 762 + buf_offset, buf_size, 763 + DMA_TO_DEVICE); 753 764 754 - tbi->len = skb_frag_size(frag); 765 + tbi->len = buf_size; 755 766 756 - gdesc = tq->tx_ring.base + tq->tx_ring.next2fill; 757 - BUG_ON(gdesc->txd.gen == tq->tx_ring.gen); 767 + gdesc = tq->tx_ring.base + tq->tx_ring.next2fill; 768 + BUG_ON(gdesc->txd.gen == tq->tx_ring.gen); 758 769 759 - gdesc->txd.addr = cpu_to_le64(tbi->dma_addr); 760 - gdesc->dword[2] = cpu_to_le32(dw2 | skb_frag_size(frag)); 761 - gdesc->dword[3] = 0; 770 + gdesc->txd.addr = cpu_to_le64(tbi->dma_addr); 771 + gdesc->dword[2] = cpu_to_le32(dw2); 772 + gdesc->dword[3] = 0; 762 773 763 - dev_dbg(&adapter->netdev->dev, 764 - "txd[%u]: 0x%llu %u %u\n", 765 - tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr), 766 - le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]); 767 - vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); 768 - dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT; 774 + dev_dbg(&adapter->netdev->dev, 775 + "txd[%u]: 0x%llu %u %u\n", 776 + tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr), 777 + le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]); 778 + vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring); 779 + dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT; 780 + 781 + len -= buf_size; 782 + buf_offset += buf_size; 783 + } 769 784 } 770 785 771 786 ctx->eop_txd = gdesc; ··· 901 886 } 902 887 } 903 888 889 + static int txd_estimate(const struct sk_buff *skb) 890 + { 891 + int count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1; 892 + int i; 893 + 894 + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 895 + const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; 896 + 897 + count += VMXNET3_TXD_NEEDED(skb_frag_size(frag)); 898 + } 899 + return count; 900 + } 904 901 905 902 /* 906 903 * Transmits a pkt thru a given tq ··· 941 914 union Vmxnet3_GenericDesc tempTxDesc; 942 915 #endif 943 916 944 - /* conservatively estimate # of descriptors to use */ 945 - count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 946 - skb_shinfo(skb)->nr_frags + 1; 917 + count = txd_estimate(skb); 947 918 948 919 ctx.ipv4 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IP)); 949 920