Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

amd8111e: add GRO support

Use napi_complete_done() instead of __napi_complete() to :

1) Get support of gro_flush_timeout if opt-in
2) Not rearm interrupts for busy-polling users.
3) use standard NAPI API.
4) get rid of baroque code and ease maintenance.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Eric Dumazet and committed by
David S. Miller
c46e9907 1fa8c5f3

+72 -92
+72 -92
drivers/net/ethernet/amd/amd8111e.c
··· 695 695 void __iomem *mmio = lp->mmio; 696 696 struct sk_buff *skb,*new_skb; 697 697 int min_pkt_len, status; 698 - unsigned int intr0; 699 698 int num_rx_pkt = 0; 700 699 short pkt_len; 701 700 #if AMD8111E_VLAN_TAG_USED 702 701 short vtag; 703 702 #endif 704 - int rx_pkt_limit = budget; 705 - unsigned long flags; 706 703 707 - if (rx_pkt_limit <= 0) 708 - goto rx_not_empty; 704 + while (num_rx_pkt < budget) { 705 + status = le16_to_cpu(lp->rx_ring[rx_index].rx_flags); 706 + if (status & OWN_BIT) 707 + break; 709 708 710 - do{ 711 - /* process receive packets until we use the quota. 712 - * If we own the next entry, it's a new packet. Send it up. 709 + /* There is a tricky error noted by John Murphy, 710 + * <murf@perftech.com> to Russ Nelson: Even with 711 + * full-sized * buffers it's possible for a 712 + * jabber packet to use two buffers, with only 713 + * the last correctly noting the error. 713 714 */ 714 - while(1) { 715 - status = le16_to_cpu(lp->rx_ring[rx_index].rx_flags); 716 - if (status & OWN_BIT) 717 - break; 718 - 719 - /* There is a tricky error noted by John Murphy, 720 - * <murf@perftech.com> to Russ Nelson: Even with 721 - * full-sized * buffers it's possible for a 722 - * jabber packet to use two buffers, with only 723 - * the last correctly noting the error. 724 - */ 725 - if(status & ERR_BIT) { 726 - /* resetting flags */ 727 - lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; 728 - goto err_next_pkt; 729 - } 730 - /* check for STP and ENP */ 731 - if(!((status & STP_BIT) && (status & ENP_BIT))){ 732 - /* resetting flags */ 733 - lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; 734 - goto err_next_pkt; 735 - } 736 - pkt_len = le16_to_cpu(lp->rx_ring[rx_index].msg_count) - 4; 715 + if (status & ERR_BIT) { 716 + /* resetting flags */ 717 + lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; 718 + goto err_next_pkt; 719 + } 720 + /* check for STP and ENP */ 721 + if (!((status & STP_BIT) && (status & ENP_BIT))){ 722 + /* resetting flags */ 723 + lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; 724 + goto err_next_pkt; 725 + } 726 + pkt_len = le16_to_cpu(lp->rx_ring[rx_index].msg_count) - 4; 737 727 738 728 #if AMD8111E_VLAN_TAG_USED 739 - vtag = status & TT_MASK; 740 - /*MAC will strip vlan tag*/ 741 - if (vtag != 0) 742 - min_pkt_len =MIN_PKT_LEN - 4; 729 + vtag = status & TT_MASK; 730 + /* MAC will strip vlan tag */ 731 + if (vtag != 0) 732 + min_pkt_len = MIN_PKT_LEN - 4; 743 733 else 744 734 #endif 745 - min_pkt_len =MIN_PKT_LEN; 735 + min_pkt_len = MIN_PKT_LEN; 746 736 747 - if (pkt_len < min_pkt_len) { 748 - lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; 749 - lp->drv_rx_errors++; 750 - goto err_next_pkt; 751 - } 752 - if(--rx_pkt_limit < 0) 753 - goto rx_not_empty; 754 - new_skb = netdev_alloc_skb(dev, lp->rx_buff_len); 755 - if (!new_skb) { 756 - /* if allocation fail, 757 - * ignore that pkt and go to next one 758 - */ 759 - lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; 760 - lp->drv_rx_errors++; 761 - goto err_next_pkt; 762 - } 737 + if (pkt_len < min_pkt_len) { 738 + lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; 739 + lp->drv_rx_errors++; 740 + goto err_next_pkt; 741 + } 742 + new_skb = netdev_alloc_skb(dev, lp->rx_buff_len); 743 + if (!new_skb) { 744 + /* if allocation fail, 745 + * ignore that pkt and go to next one 746 + */ 747 + lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS; 748 + lp->drv_rx_errors++; 749 + goto err_next_pkt; 750 + } 763 751 764 - skb_reserve(new_skb, 2); 765 - skb = lp->rx_skbuff[rx_index]; 766 - pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[rx_index], 767 - lp->rx_buff_len-2, PCI_DMA_FROMDEVICE); 768 - skb_put(skb, pkt_len); 769 - lp->rx_skbuff[rx_index] = new_skb; 770 - lp->rx_dma_addr[rx_index] = pci_map_single(lp->pci_dev, 771 - new_skb->data, 772 - lp->rx_buff_len-2, 773 - PCI_DMA_FROMDEVICE); 752 + skb_reserve(new_skb, 2); 753 + skb = lp->rx_skbuff[rx_index]; 754 + pci_unmap_single(lp->pci_dev,lp->rx_dma_addr[rx_index], 755 + lp->rx_buff_len-2, PCI_DMA_FROMDEVICE); 756 + skb_put(skb, pkt_len); 757 + lp->rx_skbuff[rx_index] = new_skb; 758 + lp->rx_dma_addr[rx_index] = pci_map_single(lp->pci_dev, 759 + new_skb->data, 760 + lp->rx_buff_len-2, 761 + PCI_DMA_FROMDEVICE); 774 762 775 - skb->protocol = eth_type_trans(skb, dev); 763 + skb->protocol = eth_type_trans(skb, dev); 776 764 777 765 #if AMD8111E_VLAN_TAG_USED 778 - if (vtag == TT_VLAN_TAGGED){ 779 - u16 vlan_tag = le16_to_cpu(lp->rx_ring[rx_index].tag_ctrl_info); 780 - __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); 781 - } 782 - #endif 783 - netif_receive_skb(skb); 784 - /*COAL update rx coalescing parameters*/ 785 - lp->coal_conf.rx_packets++; 786 - lp->coal_conf.rx_bytes += pkt_len; 787 - num_rx_pkt++; 788 - 789 - err_next_pkt: 790 - lp->rx_ring[rx_index].buff_phy_addr 791 - = cpu_to_le32(lp->rx_dma_addr[rx_index]); 792 - lp->rx_ring[rx_index].buff_count = 793 - cpu_to_le16(lp->rx_buff_len-2); 794 - wmb(); 795 - lp->rx_ring[rx_index].rx_flags |= cpu_to_le16(OWN_BIT); 796 - rx_index = (++lp->rx_idx) & RX_RING_DR_MOD_MASK; 766 + if (vtag == TT_VLAN_TAGGED){ 767 + u16 vlan_tag = le16_to_cpu(lp->rx_ring[rx_index].tag_ctrl_info); 768 + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag); 797 769 } 798 - /* Check the interrupt status register for more packets in the 799 - * mean time. Process them since we have not used up our quota. 800 - */ 801 - intr0 = readl(mmio + INT0); 802 - /*Ack receive packets */ 803 - writel(intr0 & RINT0,mmio + INT0); 770 + #endif 771 + napi_gro_receive(napi, skb); 772 + /* COAL update rx coalescing parameters */ 773 + lp->coal_conf.rx_packets++; 774 + lp->coal_conf.rx_bytes += pkt_len; 775 + num_rx_pkt++; 804 776 805 - } while(intr0 & RINT0); 777 + err_next_pkt: 778 + lp->rx_ring[rx_index].buff_phy_addr 779 + = cpu_to_le32(lp->rx_dma_addr[rx_index]); 780 + lp->rx_ring[rx_index].buff_count = 781 + cpu_to_le16(lp->rx_buff_len-2); 782 + wmb(); 783 + lp->rx_ring[rx_index].rx_flags |= cpu_to_le16(OWN_BIT); 784 + rx_index = (++lp->rx_idx) & RX_RING_DR_MOD_MASK; 785 + } 806 786 807 - if (rx_pkt_limit > 0) { 787 + if (num_rx_pkt < budget && napi_complete_done(napi, num_rx_pkt)) { 788 + unsigned long flags; 789 + 808 790 /* Receive descriptor is empty now */ 809 791 spin_lock_irqsave(&lp->lock, flags); 810 - __napi_complete(napi); 811 792 writel(VAL0|RINTEN0, mmio + INTEN0); 812 793 writel(VAL2 | RDMD0, mmio + CMD0); 813 794 spin_unlock_irqrestore(&lp->lock, flags); 814 795 } 815 796 816 - rx_not_empty: 817 797 return num_rx_pkt; 818 798 } 819 799