Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[netdrvr forcedeth] scatter gather and segmentation offload support

also:
- eliminate use of pointless get_nvpriv() wrapper,
and use netdev_priv() directly.
- use NETDEV_TX_xxx return codes

authored by

Ayaz Abdulla and committed by
Jeff Garzik
ac9c1897 97890897

+166 -91
+166 -91
drivers/net/forcedeth.c
··· 96 96 * 0.42: 06 Aug 2005: Fix lack of link speed initialization 97 97 * in the second (and later) nv_open call 98 98 * 0.43: 10 Aug 2005: Add support for tx checksum. 99 + * 0.44: 20 Aug 2005: Add support for scatter gather and segmentation. 99 100 * 100 101 * Known bugs: 101 102 * We suspect that on some hardware no TX done interrupts are generated. ··· 108 107 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few 109 108 * superfluous timer interrupts from the nic. 110 109 */ 111 - #define FORCEDETH_VERSION "0.43" 110 + #define FORCEDETH_VERSION "0.44" 112 111 #define DRV_NAME "forcedeth" 113 112 114 113 #include <linux/module.h> ··· 341 340 /* error and valid are the same for both */ 342 341 #define NV_TX2_ERROR (1<<30) 343 342 #define NV_TX2_VALID (1<<31) 343 + #define NV_TX2_TSO (1<<28) 344 + #define NV_TX2_TSO_SHIFT 14 344 345 #define NV_TX2_CHECKSUM_L3 (1<<27) 345 346 #define NV_TX2_CHECKSUM_L4 (1<<26) 346 347 ··· 545 542 546 543 static inline u8 __iomem *get_hwbase(struct net_device *dev) 547 544 { 548 - return get_nvpriv(dev)->base; 545 + return ((struct fe_priv *)netdev_priv(dev))->base; 549 546 } 550 547 551 548 static inline void pci_push(u8 __iomem *base) ··· 634 631 635 632 static int phy_reset(struct net_device *dev) 636 633 { 637 - struct fe_priv *np = get_nvpriv(dev); 634 + struct fe_priv *np = netdev_priv(dev); 638 635 u32 miicontrol; 639 636 unsigned int tries = 0; 640 637 ··· 737 734 738 735 static void nv_start_rx(struct net_device *dev) 739 736 { 740 - struct fe_priv *np = get_nvpriv(dev); 737 + struct fe_priv *np = netdev_priv(dev); 741 738 u8 __iomem *base = get_hwbase(dev); 742 739 743 740 dprintk(KERN_DEBUG "%s: nv_start_rx\n", dev->name); ··· 793 790 794 791 static void nv_txrx_reset(struct net_device *dev) 795 792 { 796 - struct fe_priv *np = get_nvpriv(dev); 793 + struct fe_priv *np = netdev_priv(dev); 797 794 u8 __iomem *base = get_hwbase(dev); 798 795 799 796 dprintk(KERN_DEBUG "%s: nv_txrx_reset\n", dev->name); ··· 812 809 */ 813 810 static struct net_device_stats *nv_get_stats(struct net_device *dev) 814 811 { 815 - struct fe_priv *np = get_nvpriv(dev); 812 + struct fe_priv *np = netdev_priv(dev); 816 813 817 814 /* It seems that the nic always generates interrupts and doesn't 818 815 * accumulate errors internally. Thus the current values in np->stats ··· 828 825 */ 829 826 static int nv_alloc_rx(struct net_device *dev) 830 827 { 831 - struct fe_priv *np = get_nvpriv(dev); 828 + struct fe_priv *np = netdev_priv(dev); 832 829 unsigned int refill_rx = np->refill_rx; 833 830 int nr; 834 831 ··· 872 869 static void nv_do_rx_refill(unsigned long data) 873 870 { 874 871 struct net_device *dev = (struct net_device *) data; 875 - struct fe_priv *np = get_nvpriv(dev); 872 + struct fe_priv *np = netdev_priv(dev); 876 873 877 874 disable_irq(dev->irq); 878 875 if (nv_alloc_rx(dev)) { ··· 886 883 887 884 static void nv_init_rx(struct net_device *dev) 888 885 { 889 - struct fe_priv *np = get_nvpriv(dev); 886 + struct fe_priv *np = netdev_priv(dev); 890 887 int i; 891 888 892 889 np->cur_rx = RX_RING; ··· 900 897 901 898 static void nv_init_tx(struct net_device *dev) 902 899 { 903 - struct fe_priv *np = get_nvpriv(dev); 900 + struct fe_priv *np = netdev_priv(dev); 904 901 int i; 905 902 906 903 np->next_tx = np->nic_tx = 0; 907 - for (i = 0; i < TX_RING; i++) 904 + for (i = 0; i < TX_RING; i++) { 908 905 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 909 906 np->tx_ring.orig[i].FlagLen = 0; 910 907 else 911 908 np->tx_ring.ex[i].FlagLen = 0; 909 + np->tx_skbuff[i] = NULL; 910 + } 912 911 } 913 912 914 913 static int nv_init_ring(struct net_device *dev) ··· 920 915 return nv_alloc_rx(dev); 921 916 } 922 917 918 + static void nv_release_txskb(struct net_device *dev, unsigned int skbnr) 919 + { 920 + struct fe_priv *np = netdev_priv(dev); 921 + struct sk_buff *skb = np->tx_skbuff[skbnr]; 922 + unsigned int j, entry, fragments; 923 + 924 + dprintk(KERN_INFO "%s: nv_release_txskb for skbnr %d, skb %p\n", 925 + dev->name, skbnr, np->tx_skbuff[skbnr]); 926 + 927 + entry = skbnr; 928 + if ((fragments = skb_shinfo(skb)->nr_frags) != 0) { 929 + for (j = fragments; j >= 1; j--) { 930 + skb_frag_t *frag = &skb_shinfo(skb)->frags[j-1]; 931 + pci_unmap_page(np->pci_dev, np->tx_dma[entry], 932 + frag->size, 933 + PCI_DMA_TODEVICE); 934 + entry = (entry - 1) % TX_RING; 935 + } 936 + } 937 + pci_unmap_single(np->pci_dev, np->tx_dma[entry], 938 + skb->len - skb->data_len, 939 + PCI_DMA_TODEVICE); 940 + dev_kfree_skb_irq(skb); 941 + np->tx_skbuff[skbnr] = NULL; 942 + } 943 + 923 944 static void nv_drain_tx(struct net_device *dev) 924 945 { 925 - struct fe_priv *np = get_nvpriv(dev); 926 - int i; 946 + struct fe_priv *np = netdev_priv(dev); 947 + unsigned int i; 948 + 927 949 for (i = 0; i < TX_RING; i++) { 928 950 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 929 951 np->tx_ring.orig[i].FlagLen = 0; 930 952 else 931 953 np->tx_ring.ex[i].FlagLen = 0; 932 954 if (np->tx_skbuff[i]) { 933 - pci_unmap_single(np->pci_dev, np->tx_dma[i], 934 - np->tx_skbuff[i]->len, 935 - PCI_DMA_TODEVICE); 936 - dev_kfree_skb(np->tx_skbuff[i]); 937 - np->tx_skbuff[i] = NULL; 955 + nv_release_txskb(dev, i); 938 956 np->stats.tx_dropped++; 939 957 } 940 958 } ··· 965 937 966 938 static void nv_drain_rx(struct net_device *dev) 967 939 { 968 - struct fe_priv *np = get_nvpriv(dev); 940 + struct fe_priv *np = netdev_priv(dev); 969 941 int i; 970 942 for (i = 0; i < RX_RING; i++) { 971 943 if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) ··· 995 967 */ 996 968 static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) 997 969 { 998 - struct fe_priv *np = get_nvpriv(dev); 999 - int nr = np->next_tx % TX_RING; 1000 - u32 tx_checksum = (skb->ip_summed == CHECKSUM_HW ? (NV_TX2_CHECKSUM_L3|NV_TX2_CHECKSUM_L4) : 0); 1001 - 1002 - np->tx_skbuff[nr] = skb; 1003 - np->tx_dma[nr] = pci_map_single(np->pci_dev, skb->data,skb->len, 1004 - PCI_DMA_TODEVICE); 1005 - 1006 - if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1007 - np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]); 1008 - else { 1009 - np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32; 1010 - np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF; 1011 - } 970 + struct fe_priv *np = netdev_priv(dev); 971 + u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET); 972 + unsigned int fragments = skb_shinfo(skb)->nr_frags; 973 + unsigned int nr = (np->next_tx + fragments) % TX_RING; 974 + unsigned int i; 1012 975 1013 976 spin_lock_irq(&np->lock); 1014 - wmb(); 1015 - if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) 1016 - np->tx_ring.orig[nr].FlagLen = cpu_to_le32( (skb->len-1) | np->tx_flags | tx_checksum); 977 + 978 + if ((np->next_tx - np->nic_tx + fragments) > TX_LIMIT_STOP) { 979 + spin_unlock_irq(&np->lock); 980 + netif_stop_queue(dev); 981 + return NETDEV_TX_BUSY; 982 + } 983 + 984 + np->tx_skbuff[nr] = skb; 985 + 986 + if (fragments) { 987 + dprintk(KERN_DEBUG "%s: nv_start_xmit: buffer contains %d fragments\n", dev->name, fragments); 988 + /* setup descriptors in reverse order */ 989 + for (i = fragments; i >= 1; i--) { 990 + skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1]; 991 + np->tx_dma[nr] = pci_map_page(np->pci_dev, frag->page, frag->page_offset, frag->size, 992 + PCI_DMA_TODEVICE); 993 + 994 + if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 995 + np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]); 996 + np->tx_ring.orig[nr].FlagLen = cpu_to_le32( (frag->size-1) | np->tx_flags | tx_flags_extra); 997 + } else { 998 + np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32; 999 + np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF; 1000 + np->tx_ring.ex[nr].FlagLen = cpu_to_le32( (frag->size-1) | np->tx_flags | tx_flags_extra); 1001 + } 1002 + 1003 + nr = (nr - 1) % TX_RING; 1004 + 1005 + if (np->desc_ver == DESC_VER_1) 1006 + tx_flags_extra &= ~NV_TX_LASTPACKET; 1007 + else 1008 + tx_flags_extra &= ~NV_TX2_LASTPACKET; 1009 + } 1010 + } 1011 + 1012 + #ifdef NETIF_F_TSO 1013 + if (skb_shinfo(skb)->tso_size) 1014 + tx_flags_extra |= NV_TX2_TSO | (skb_shinfo(skb)->tso_size << NV_TX2_TSO_SHIFT); 1017 1015 else 1018 - np->tx_ring.ex[nr].FlagLen = cpu_to_le32( (skb->len-1) | np->tx_flags | tx_checksum); 1019 - dprintk(KERN_DEBUG "%s: nv_start_xmit: packet packet %d queued for transmission\n", 1020 - dev->name, np->next_tx); 1016 + #endif 1017 + tx_flags_extra |= (skb->ip_summed == CHECKSUM_HW ? (NV_TX2_CHECKSUM_L3|NV_TX2_CHECKSUM_L4) : 0); 1018 + 1019 + np->tx_dma[nr] = pci_map_single(np->pci_dev, skb->data, skb->len-skb->data_len, 1020 + PCI_DMA_TODEVICE); 1021 + 1022 + if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) { 1023 + np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]); 1024 + np->tx_ring.orig[nr].FlagLen = cpu_to_le32( (skb->len-skb->data_len-1) | np->tx_flags | tx_flags_extra); 1025 + } else { 1026 + np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32; 1027 + np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF; 1028 + np->tx_ring.ex[nr].FlagLen = cpu_to_le32( (skb->len-skb->data_len-1) | np->tx_flags | tx_flags_extra); 1029 + } 1030 + 1031 + dprintk(KERN_DEBUG "%s: nv_start_xmit: packet packet %d queued for transmission. tx_flags_extra: %x\n", 1032 + dev->name, np->next_tx, tx_flags_extra); 1021 1033 { 1022 1034 int j; 1023 1035 for (j=0; j<64; j++) { ··· 1068 1000 dprintk("\n"); 1069 1001 } 1070 1002 1071 - np->next_tx++; 1003 + np->next_tx += 1 + fragments; 1072 1004 1073 1005 dev->trans_start = jiffies; 1074 - if (np->next_tx - np->nic_tx >= TX_LIMIT_STOP) 1075 - netif_stop_queue(dev); 1076 1006 spin_unlock_irq(&np->lock); 1077 1007 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 1078 1008 pci_push(get_hwbase(dev)); 1079 - return 0; 1009 + return NETDEV_TX_OK; 1080 1010 } 1081 1011 1082 1012 /* ··· 1084 1018 */ 1085 1019 static void nv_tx_done(struct net_device *dev) 1086 1020 { 1087 - struct fe_priv *np = get_nvpriv(dev); 1021 + struct fe_priv *np = netdev_priv(dev); 1088 1022 u32 Flags; 1089 - int i; 1023 + unsigned int i; 1024 + struct sk_buff *skb; 1090 1025 1091 1026 while (np->nic_tx != np->next_tx) { 1092 1027 i = np->nic_tx % TX_RING; ··· 1102 1035 if (Flags & NV_TX_VALID) 1103 1036 break; 1104 1037 if (np->desc_ver == DESC_VER_1) { 1105 - if (Flags & (NV_TX_RETRYERROR|NV_TX_CARRIERLOST|NV_TX_LATECOLLISION| 1106 - NV_TX_UNDERFLOW|NV_TX_ERROR)) { 1107 - if (Flags & NV_TX_UNDERFLOW) 1108 - np->stats.tx_fifo_errors++; 1109 - if (Flags & NV_TX_CARRIERLOST) 1110 - np->stats.tx_carrier_errors++; 1111 - np->stats.tx_errors++; 1112 - } else { 1113 - np->stats.tx_packets++; 1114 - np->stats.tx_bytes += np->tx_skbuff[i]->len; 1038 + if (Flags & NV_TX_LASTPACKET) { 1039 + skb = np->tx_skbuff[i]; 1040 + if (Flags & (NV_TX_RETRYERROR|NV_TX_CARRIERLOST|NV_TX_LATECOLLISION| 1041 + NV_TX_UNDERFLOW|NV_TX_ERROR)) { 1042 + if (Flags & NV_TX_UNDERFLOW) 1043 + np->stats.tx_fifo_errors++; 1044 + if (Flags & NV_TX_CARRIERLOST) 1045 + np->stats.tx_carrier_errors++; 1046 + np->stats.tx_errors++; 1047 + } else { 1048 + np->stats.tx_packets++; 1049 + np->stats.tx_bytes += skb->len; 1050 + } 1051 + nv_release_txskb(dev, i); 1115 1052 } 1116 1053 } else { 1117 - if (Flags & (NV_TX2_RETRYERROR|NV_TX2_CARRIERLOST|NV_TX2_LATECOLLISION| 1118 - NV_TX2_UNDERFLOW|NV_TX2_ERROR)) { 1119 - if (Flags & NV_TX2_UNDERFLOW) 1120 - np->stats.tx_fifo_errors++; 1121 - if (Flags & NV_TX2_CARRIERLOST) 1122 - np->stats.tx_carrier_errors++; 1123 - np->stats.tx_errors++; 1124 - } else { 1125 - np->stats.tx_packets++; 1126 - np->stats.tx_bytes += np->tx_skbuff[i]->len; 1054 + if (Flags & NV_TX2_LASTPACKET) { 1055 + skb = np->tx_skbuff[i]; 1056 + if (Flags & (NV_TX2_RETRYERROR|NV_TX2_CARRIERLOST|NV_TX2_LATECOLLISION| 1057 + NV_TX2_UNDERFLOW|NV_TX2_ERROR)) { 1058 + if (Flags & NV_TX2_UNDERFLOW) 1059 + np->stats.tx_fifo_errors++; 1060 + if (Flags & NV_TX2_CARRIERLOST) 1061 + np->stats.tx_carrier_errors++; 1062 + np->stats.tx_errors++; 1063 + } else { 1064 + np->stats.tx_packets++; 1065 + np->stats.tx_bytes += skb->len; 1066 + } 1067 + nv_release_txskb(dev, i); 1127 1068 } 1128 1069 } 1129 - pci_unmap_single(np->pci_dev, np->tx_dma[i], 1130 - np->tx_skbuff[i]->len, 1131 - PCI_DMA_TODEVICE); 1132 - dev_kfree_skb_irq(np->tx_skbuff[i]); 1133 - np->tx_skbuff[i] = NULL; 1134 1070 np->nic_tx++; 1135 1071 } 1136 1072 if (np->next_tx - np->nic_tx < TX_LIMIT_START) ··· 1146 1076 */ 1147 1077 static void nv_tx_timeout(struct net_device *dev) 1148 1078 { 1149 - struct fe_priv *np = get_nvpriv(dev); 1079 + struct fe_priv *np = netdev_priv(dev); 1150 1080 u8 __iomem *base = get_hwbase(dev); 1151 1081 1152 1082 printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name, ··· 1279 1209 1280 1210 static void nv_rx_process(struct net_device *dev) 1281 1211 { 1282 - struct fe_priv *np = get_nvpriv(dev); 1212 + struct fe_priv *np = netdev_priv(dev); 1283 1213 u32 Flags; 1284 1214 1285 1215 for (;;) { ··· 1434 1364 */ 1435 1365 static int nv_change_mtu(struct net_device *dev, int new_mtu) 1436 1366 { 1437 - struct fe_priv *np = get_nvpriv(dev); 1367 + struct fe_priv *np = netdev_priv(dev); 1438 1368 int old_mtu; 1439 1369 1440 1370 if (new_mtu < 64 || new_mtu > np->pkt_limit) ··· 1519 1449 */ 1520 1450 static int nv_set_mac_address(struct net_device *dev, void *addr) 1521 1451 { 1522 - struct fe_priv *np = get_nvpriv(dev); 1452 + struct fe_priv *np = netdev_priv(dev); 1523 1453 struct sockaddr *macaddr = (struct sockaddr*)addr; 1524 1454 1525 1455 if(!is_valid_ether_addr(macaddr->sa_data)) ··· 1554 1484 */ 1555 1485 static void nv_set_multicast(struct net_device *dev) 1556 1486 { 1557 - struct fe_priv *np = get_nvpriv(dev); 1487 + struct fe_priv *np = netdev_priv(dev); 1558 1488 u8 __iomem *base = get_hwbase(dev); 1559 1489 u32 addr[2]; 1560 1490 u32 mask[2]; ··· 1614 1544 1615 1545 static int nv_update_linkspeed(struct net_device *dev) 1616 1546 { 1617 - struct fe_priv *np = get_nvpriv(dev); 1547 + struct fe_priv *np = netdev_priv(dev); 1618 1548 u8 __iomem *base = get_hwbase(dev); 1619 1549 int adv, lpa; 1620 1550 int newls = np->linkspeed; ··· 1784 1714 static irqreturn_t nv_nic_irq(int foo, void *data, struct pt_regs *regs) 1785 1715 { 1786 1716 struct net_device *dev = (struct net_device *) data; 1787 - struct fe_priv *np = get_nvpriv(dev); 1717 + struct fe_priv *np = netdev_priv(dev); 1788 1718 u8 __iomem *base = get_hwbase(dev); 1789 1719 u32 events; 1790 1720 int i; ··· 1856 1786 static void nv_do_nic_poll(unsigned long data) 1857 1787 { 1858 1788 struct net_device *dev = (struct net_device *) data; 1859 - struct fe_priv *np = get_nvpriv(dev); 1789 + struct fe_priv *np = netdev_priv(dev); 1860 1790 u8 __iomem *base = get_hwbase(dev); 1861 1791 1862 1792 disable_irq(dev->irq); ··· 1880 1810 1881 1811 static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 1882 1812 { 1883 - struct fe_priv *np = get_nvpriv(dev); 1813 + struct fe_priv *np = netdev_priv(dev); 1884 1814 strcpy(info->driver, "forcedeth"); 1885 1815 strcpy(info->version, FORCEDETH_VERSION); 1886 1816 strcpy(info->bus_info, pci_name(np->pci_dev)); ··· 1888 1818 1889 1819 static void nv_get_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo) 1890 1820 { 1891 - struct fe_priv *np = get_nvpriv(dev); 1821 + struct fe_priv *np = netdev_priv(dev); 1892 1822 wolinfo->supported = WAKE_MAGIC; 1893 1823 1894 1824 spin_lock_irq(&np->lock); ··· 1899 1829 1900 1830 static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo) 1901 1831 { 1902 - struct fe_priv *np = get_nvpriv(dev); 1832 + struct fe_priv *np = netdev_priv(dev); 1903 1833 u8 __iomem *base = get_hwbase(dev); 1904 1834 1905 1835 spin_lock_irq(&np->lock); ··· 2100 2030 2101 2031 static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf) 2102 2032 { 2103 - struct fe_priv *np = get_nvpriv(dev); 2033 + struct fe_priv *np = netdev_priv(dev); 2104 2034 u8 __iomem *base = get_hwbase(dev); 2105 2035 u32 *rbuf = buf; 2106 2036 int i; ··· 2114 2044 2115 2045 static int nv_nway_reset(struct net_device *dev) 2116 2046 { 2117 - struct fe_priv *np = get_nvpriv(dev); 2047 + struct fe_priv *np = netdev_priv(dev); 2118 2048 int ret; 2119 2049 2120 2050 spin_lock_irq(&np->lock); ··· 2149 2079 2150 2080 static int nv_open(struct net_device *dev) 2151 2081 { 2152 - struct fe_priv *np = get_nvpriv(dev); 2082 + struct fe_priv *np = netdev_priv(dev); 2153 2083 u8 __iomem *base = get_hwbase(dev); 2154 2084 int ret, oom, i; 2155 2085 ··· 2285 2215 2286 2216 static int nv_close(struct net_device *dev) 2287 2217 { 2288 - struct fe_priv *np = get_nvpriv(dev); 2218 + struct fe_priv *np = netdev_priv(dev); 2289 2219 u8 __iomem *base; 2290 2220 2291 2221 spin_lock_irq(&np->lock); ··· 2341 2271 if (!dev) 2342 2272 goto out; 2343 2273 2344 - np = get_nvpriv(dev); 2274 + np = netdev_priv(dev); 2345 2275 np->pci_dev = pci_dev; 2346 2276 spin_lock_init(&np->lock); 2347 2277 SET_MODULE_OWNER(dev); ··· 2393 2323 if (pci_set_dma_mask(pci_dev, 0x0000007fffffffffULL)) { 2394 2324 printk(KERN_INFO "forcedeth: 64-bit DMA failed, using 32-bit addressing for device %s.\n", 2395 2325 pci_name(pci_dev)); 2326 + } else { 2327 + dev->features |= NETIF_F_HIGHDMA; 2396 2328 } 2397 2329 np->txrxctl_bits = NVREG_TXRXCTL_DESC_3; 2398 2330 } else if (id->driver_data & DEV_HAS_LARGEDESC) { ··· 2413 2341 2414 2342 if (id->driver_data & DEV_HAS_CHECKSUM) { 2415 2343 np->txrxctl_bits |= NVREG_TXRXCTL_RXCHECK; 2416 - dev->features |= NETIF_F_HW_CSUM; 2417 - } 2344 + dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; 2345 + #ifdef NETIF_F_TSO 2346 + dev->features |= NETIF_F_TSO; 2347 + #endif 2348 + } 2418 2349 2419 2350 err = -ENOMEM; 2420 2351 np->base = ioremap(addr, NV_PCI_REGSZ); ··· 2497 2422 np->wolenabled = 0; 2498 2423 2499 2424 if (np->desc_ver == DESC_VER_1) { 2500 - np->tx_flags = NV_TX_LASTPACKET|NV_TX_VALID; 2425 + np->tx_flags = NV_TX_VALID; 2501 2426 } else { 2502 - np->tx_flags = NV_TX2_LASTPACKET|NV_TX2_VALID; 2427 + np->tx_flags = NV_TX2_VALID; 2503 2428 } 2504 2429 np->irqmask = NVREG_IRQMASK_WANTED; 2505 2430 if (id->driver_data & DEV_NEED_TIMERIRQ) ··· 2588 2513 static void __devexit nv_remove(struct pci_dev *pci_dev) 2589 2514 { 2590 2515 struct net_device *dev = pci_get_drvdata(pci_dev); 2591 - struct fe_priv *np = get_nvpriv(dev); 2516 + struct fe_priv *np = netdev_priv(dev); 2592 2517 2593 2518 unregister_netdev(dev); 2594 2519