Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'davem-next' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6

+3419 -2373
+3 -1
MAINTAINERS
··· 750 750 M: syrjala@sci.fi 751 751 S: Maintained 752 752 753 - ATL1 ETHERNET DRIVER 753 + ATLX ETHERNET DRIVERS 754 754 P: Jay Cliburn 755 755 M: jcliburn@gmail.com 756 756 P: Chris Snook 757 757 M: csnook@redhat.com 758 + P: Jie Yang 759 + M: jie.yang@atheros.com 758 760 L: atl1-devel@lists.sourceforge.net 759 761 W: http://sourceforge.net/projects/atl1 760 762 W: http://atl1.sourceforge.net
+2 -2
drivers/net/3c505.c
··· 130 130 131 131 static const char timeout_msg[] = "*** timeout at %s:%s (line %d) ***\n"; 132 132 #define TIMEOUT_MSG(lineno) \ 133 - printk(timeout_msg, filename,__FUNCTION__,(lineno)) 133 + printk(timeout_msg, filename,__func__,(lineno)) 134 134 135 135 static const char invalid_pcb_msg[] = 136 136 "*** invalid pcb length %d at %s:%s (line %d) ***\n"; 137 137 #define INVALID_PCB_MSG(len) \ 138 - printk(invalid_pcb_msg, (len),filename,__FUNCTION__,__LINE__) 138 + printk(invalid_pcb_msg, (len),filename,__func__,__LINE__) 139 139 140 140 static char search_msg[] __initdata = KERN_INFO "%s: Looking for 3c505 adapter at address %#x..."; 141 141
+2 -1
drivers/net/8139too.c
··· 309 309 Cfg9346 = 0x50, 310 310 Config0 = 0x51, 311 311 Config1 = 0x52, 312 - FlashReg = 0x54, 312 + TimerInt = 0x54, 313 313 MediaStatus = 0x58, 314 314 Config3 = 0x59, 315 315 Config4 = 0x5A, /* absent on RTL-8139A */ ··· 325 325 FIFOTMS = 0x70, /* FIFO Control and test. */ 326 326 CSCR = 0x74, /* Chip Status and Configuration Register. */ 327 327 PARA78 = 0x78, 328 + FlashReg = 0xD4, /* Communication with Flash ROM, four bytes. */ 328 329 PARA7c = 0x7c, /* Magic transceiver parameter register. */ 329 330 Config5 = 0xD8, /* absent on RTL-8139A */ 330 331 };
+3
drivers/net/Kconfig
··· 2057 2057 tristate "Realtek 8169 gigabit ethernet support" 2058 2058 depends on PCI 2059 2059 select CRC32 2060 + select MII 2060 2061 ---help--- 2061 2062 Say Y here if you have a Realtek 8169 PCI Gigabit Ethernet adapter. 2062 2063 ··· 2412 2411 tristate "Intel(R) 10GbE PCI Express adapters support" 2413 2412 depends on PCI && INET 2414 2413 select INET_LRO 2414 + select INTEL_IOATDMA 2415 2415 ---help--- 2416 2416 This driver supports Intel(R) 10GbE PCI Express family of 2417 2417 adapters. For more information on how to identify your adapter, go ··· 2464 2462 select FW_LOADER 2465 2463 select CRC32 2466 2464 select INET_LRO 2465 + select INTEL_IOATDMA 2467 2466 ---help--- 2468 2467 This driver supports Myricom Myri-10G Dual Protocol interface in 2469 2468 Ethernet mode. If the eeprom on your board is not recent enough,
+9 -9
drivers/net/arcnet/arcnet.c
··· 442 442 BUGMSG(D_NORMAL, "WARNING! Station address FF may confuse " 443 443 "DOS networking programs!\n"); 444 444 445 - BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__); 445 + BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__); 446 446 if (ASTATUS() & RESETflag) { 447 - BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__); 447 + BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__); 448 448 ACOMMAND(CFLAGScmd | RESETclear); 449 449 } 450 450 451 451 452 - BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__); 452 + BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__); 453 453 /* make sure we're ready to receive IRQ's. */ 454 454 AINTMASK(0); 455 455 udelay(1); /* give it time to set the mask before 456 456 * we reset it again. (may not even be 457 457 * necessary) 458 458 */ 459 - BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__); 459 + BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__); 460 460 lp->intmask = NORXflag | RECONflag; 461 461 AINTMASK(lp->intmask); 462 - BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__); 462 + BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__); 463 463 464 464 netif_start_queue(dev); 465 465 ··· 670 670 freeskb = 0; 671 671 } 672 672 673 - BUGMSG(D_DEBUG, "%s: %d: %s, status: %x\n",__FILE__,__LINE__,__FUNCTION__,ASTATUS()); 673 + BUGMSG(D_DEBUG, "%s: %d: %s, status: %x\n",__FILE__,__LINE__,__func__,ASTATUS()); 674 674 /* make sure we didn't ignore a TX IRQ while we were in here */ 675 675 AINTMASK(0); 676 676 677 - BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__); 677 + BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__); 678 678 lp->intmask |= TXFREEflag|EXCNAKflag; 679 679 AINTMASK(lp->intmask); 680 - BUGMSG(D_DEBUG, "%s: %d: %s, status: %x\n",__FILE__,__LINE__,__FUNCTION__,ASTATUS()); 680 + BUGMSG(D_DEBUG, "%s: %d: %s, status: %x\n",__FILE__,__LINE__,__func__,ASTATUS()); 681 681 682 682 spin_unlock_irqrestore(&lp->lock, flags); 683 683 if (freeskb) { ··· 798 798 diagstatus = (status >> 8) & 0xFF; 799 799 800 800 BUGMSG(D_DEBUG, "%s: %d: %s: status=%x\n", 801 - __FILE__,__LINE__,__FUNCTION__,status); 801 + __FILE__,__LINE__,__func__,status); 802 802 didsomething = 0; 803 803 804 804 /*
+8 -8
drivers/net/arcnet/com20020.c
··· 238 238 u_char inbyte; 239 239 240 240 BUGMSG(D_DEBUG, "%s: %d: %s: dev: %p, lp: %p, dev->name: %s\n", 241 - __FILE__,__LINE__,__FUNCTION__,dev,lp,dev->name); 241 + __FILE__,__LINE__,__func__,dev,lp,dev->name); 242 242 BUGMSG(D_INIT, "Resetting %s (status=%02Xh)\n", 243 243 dev->name, ASTATUS()); 244 244 245 - BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__); 245 + BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__); 246 246 lp->config = TXENcfg | (lp->timeout << 3) | (lp->backplane << 2); 247 247 /* power-up defaults */ 248 248 SETCONF; 249 - BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__); 249 + BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__); 250 250 251 251 if (really_reset) { 252 252 /* reset the card */ ··· 254 254 mdelay(RESETtime * 2); /* COM20020 seems to be slower sometimes */ 255 255 } 256 256 /* clear flags & end reset */ 257 - BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__); 257 + BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__); 258 258 ACOMMAND(CFLAGScmd | RESETclear | CONFIGclear); 259 259 260 260 /* verify that the ARCnet signature byte is present */ 261 - BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__); 261 + BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__); 262 262 263 263 com20020_copy_from_card(dev, 0, 0, &inbyte, 1); 264 - BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__); 264 + BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__); 265 265 if (inbyte != TESTvalue) { 266 - BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__); 266 + BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__); 267 267 BUGMSG(D_NORMAL, "reset failed: TESTvalue not present.\n"); 268 268 return 1; 269 269 } 270 270 /* enable extended (512-byte) packets */ 271 271 ACOMMAND(CONFIGcmd | EXTconf); 272 - BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__FUNCTION__); 272 + BUGMSG(D_DEBUG, "%s: %d: %s\n",__FILE__,__LINE__,__func__); 273 273 274 274 /* done! return success. */ 275 275 return 0;
-2
drivers/net/atl1e/atl1e_main.c
··· 2390 2390 } 2391 2391 2392 2392 /* Init GPHY as early as possible due to power saving issue */ 2393 - spin_lock(&adapter->mdio_lock); 2394 2393 atl1e_phy_init(&adapter->hw); 2395 - spin_unlock(&adapter->mdio_lock); 2396 2394 /* reset the controller to 2397 2395 * put the device in a known good starting state */ 2398 2396 err = atl1e_reset_hw(&adapter->hw);
+2 -1
drivers/net/au1000_eth.c
··· 653 653 654 654 aup = dev->priv; 655 655 656 + spin_lock_init(&aup->lock); 657 + 656 658 /* Allocate the data buffers */ 657 659 /* Snooping works fine with eth on all au1xxx */ 658 660 aup->vaddr = (u32)dma_alloc_noncoherent(NULL, MAX_BUF_SIZE * ··· 755 753 aup->tx_db_inuse[i] = pDB; 756 754 } 757 755 758 - spin_lock_init(&aup->lock); 759 756 dev->base_addr = base; 760 757 dev->irq = irq; 761 758 dev->open = au1000_open;
+7 -7
drivers/net/ax88796.c
··· 153 153 while ((ei_inb(addr + EN0_ISR) & ENISR_RESET) == 0) { 154 154 if (jiffies - reset_start_time > 2*HZ/100) { 155 155 dev_warn(&ax->dev->dev, "%s: %s did not complete.\n", 156 - __FUNCTION__, dev->name); 156 + __func__, dev->name); 157 157 break; 158 158 } 159 159 } ··· 173 173 if (ei_status.dmaing) { 174 174 dev_err(&ax->dev->dev, "%s: DMAing conflict in %s " 175 175 "[DMAstat:%d][irqlock:%d].\n", 176 - dev->name, __FUNCTION__, 176 + dev->name, __func__, 177 177 ei_status.dmaing, ei_status.irqlock); 178 178 return; 179 179 } ··· 215 215 dev_err(&ax->dev->dev, 216 216 "%s: DMAing conflict in %s " 217 217 "[DMAstat:%d][irqlock:%d].\n", 218 - dev->name, __FUNCTION__, 218 + dev->name, __func__, 219 219 ei_status.dmaing, ei_status.irqlock); 220 220 return; 221 221 } ··· 260 260 if (ei_status.dmaing) { 261 261 dev_err(&ax->dev->dev, "%s: DMAing conflict in %s." 262 262 "[DMAstat:%d][irqlock:%d]\n", 263 - dev->name, __FUNCTION__, 263 + dev->name, __func__, 264 264 ei_status.dmaing, ei_status.irqlock); 265 265 return; 266 266 } ··· 396 396 { 397 397 if (phy_debug) 398 398 pr_debug("%s: dev %p, %04x, %04x, %d\n", 399 - __FUNCTION__, dev, phy_addr, reg, opc); 399 + __func__, dev, phy_addr, reg, opc); 400 400 401 401 ax_mii_ei_outbits(dev, 0x3f, 6); /* pre-amble */ 402 402 ax_mii_ei_outbits(dev, 1, 2); /* frame-start */ ··· 422 422 spin_unlock_irqrestore(&ei_local->page_lock, flags); 423 423 424 424 if (phy_debug) 425 - pr_debug("%s: %04x.%04x => read %04x\n", __FUNCTION__, 425 + pr_debug("%s: %04x.%04x => read %04x\n", __func__, 426 426 phy_addr, reg, result); 427 427 428 428 return result; ··· 436 436 unsigned long flags; 437 437 438 438 dev_dbg(&ax->dev->dev, "%s: %p, %04x, %04x %04x\n", 439 - __FUNCTION__, dev, phy_addr, reg, value); 439 + __func__, dev, phy_addr, reg, value); 440 440 441 441 spin_lock_irqsave(&ei->page_lock, flags); 442 442
+4 -4
drivers/net/bfin_mac.c
··· 811 811 { 812 812 u32 opmode; 813 813 814 - pr_debug("%s: %s\n", DRV_NAME, __FUNCTION__); 814 + pr_debug("%s: %s\n", DRV_NAME, __func__); 815 815 816 816 /* Set RX DMA */ 817 817 bfin_write_DMA1_NEXT_DESC_PTR(&(rx_list_head->desc_a)); ··· 847 847 /* Our watchdog timed out. Called by the networking layer */ 848 848 static void bfin_mac_timeout(struct net_device *dev) 849 849 { 850 - pr_debug("%s: %s\n", dev->name, __FUNCTION__); 850 + pr_debug("%s: %s\n", dev->name, __func__); 851 851 852 852 bfin_mac_disable(); 853 853 ··· 949 949 { 950 950 struct bfin_mac_local *lp = netdev_priv(dev); 951 951 int retval; 952 - pr_debug("%s: %s\n", dev->name, __FUNCTION__); 952 + pr_debug("%s: %s\n", dev->name, __func__); 953 953 954 954 /* 955 955 * Check that the address is valid. If its not, refuse ··· 989 989 static int bfin_mac_close(struct net_device *dev) 990 990 { 991 991 struct bfin_mac_local *lp = netdev_priv(dev); 992 - pr_debug("%s: %s\n", dev->name, __FUNCTION__); 992 + pr_debug("%s: %s\n", dev->name, __func__); 993 993 994 994 netif_stop_queue(dev); 995 995 netif_carrier_off(dev);
+24
drivers/net/bonding/bond_alb.c
··· 38 38 #include <linux/in.h> 39 39 #include <net/ipx.h> 40 40 #include <net/arp.h> 41 + #include <net/ipv6.h> 41 42 #include <asm/byteorder.h> 42 43 #include "bonding.h" 43 44 #include "bond_alb.h" ··· 82 81 #define RLB_PROMISC_TIMEOUT 10*ALB_TIMER_TICKS_PER_SEC 83 82 84 83 static const u8 mac_bcast[ETH_ALEN] = {0xff,0xff,0xff,0xff,0xff,0xff}; 84 + static const u8 mac_v6_allmcast[ETH_ALEN] = {0x33,0x33,0x00,0x00,0x00,0x01}; 85 85 static const int alb_delta_in_ticks = HZ / ALB_TIMER_TICKS_PER_SEC; 86 86 87 87 #pragma pack(1) ··· 1292 1290 u32 hash_index = 0; 1293 1291 const u8 *hash_start = NULL; 1294 1292 int res = 1; 1293 + struct ipv6hdr *ip6hdr; 1295 1294 1296 1295 skb_reset_mac_header(skb); 1297 1296 eth_data = eth_hdr(skb); ··· 1322 1319 } 1323 1320 break; 1324 1321 case ETH_P_IPV6: 1322 + /* IPv6 doesn't really use broadcast mac address, but leave 1323 + * that here just in case. 1324 + */ 1325 1325 if (memcmp(eth_data->h_dest, mac_bcast, ETH_ALEN) == 0) { 1326 + do_tx_balance = 0; 1327 + break; 1328 + } 1329 + 1330 + /* IPv6 uses all-nodes multicast as an equivalent to 1331 + * broadcasts in IPv4. 1332 + */ 1333 + if (memcmp(eth_data->h_dest, mac_v6_allmcast, ETH_ALEN) == 0) { 1334 + do_tx_balance = 0; 1335 + break; 1336 + } 1337 + 1338 + /* Additianally, DAD probes should not be tx-balanced as that 1339 + * will lead to false positives for duplicate addresses and 1340 + * prevent address configuration from working. 1341 + */ 1342 + ip6hdr = ipv6_hdr(skb); 1343 + if (ipv6_addr_any(&ip6hdr->saddr)) { 1326 1344 do_tx_balance = 0; 1327 1345 break; 1328 1346 }
+6
drivers/net/bonding/bond_main.c
··· 4493 4493 4494 4494 static const struct ethtool_ops bond_ethtool_ops = { 4495 4495 .get_drvinfo = bond_ethtool_get_drvinfo, 4496 + .get_link = ethtool_op_get_link, 4497 + .get_tx_csum = ethtool_op_get_tx_csum, 4498 + .get_sg = ethtool_op_get_sg, 4499 + .get_tso = ethtool_op_get_tso, 4500 + .get_ufo = ethtool_op_get_ufo, 4501 + .get_flags = ethtool_op_get_flags, 4496 4502 }; 4497 4503 4498 4504 /*
+1 -1
drivers/net/bonding/bonding.h
··· 32 32 #ifdef BONDING_DEBUG 33 33 #define dprintk(fmt, args...) \ 34 34 printk(KERN_DEBUG \ 35 - DRV_NAME ": %s() %d: " fmt, __FUNCTION__, __LINE__ , ## args ) 35 + DRV_NAME ": %s() %d: " fmt, __func__, __LINE__ , ## args ) 36 36 #else 37 37 #define dprintk(fmt, args...) 38 38 #endif /* BONDING_DEBUG */
-2
drivers/net/cs89x0.c
··· 1397 1397 release_dma: 1398 1398 #if ALLOW_DMA 1399 1399 free_dma(dev->dma); 1400 - #endif 1401 1400 release_irq: 1402 - #if ALLOW_DMA 1403 1401 release_dma_buff(lp); 1404 1402 #endif 1405 1403 writereg(dev, PP_LineCTL, readreg(dev, PP_LineCTL) & ~(SERIAL_TX_ON | SERIAL_RX_ON));
+4 -4
drivers/net/cxgb3/cxgb3_offload.c
··· 1018 1018 1019 1019 skb = alloc_skb(sizeof(*req), GFP_ATOMIC); 1020 1020 if (!skb) { 1021 - printk(KERN_ERR "%s: cannot allocate skb!\n", __FUNCTION__); 1021 + printk(KERN_ERR "%s: cannot allocate skb!\n", __func__); 1022 1022 return; 1023 1023 } 1024 1024 skb->priority = CPL_PRIORITY_CONTROL; ··· 1049 1049 return; 1050 1050 if (!is_offloading(newdev)) { 1051 1051 printk(KERN_WARNING "%s: Redirect to non-offload " 1052 - "device ignored.\n", __FUNCTION__); 1052 + "device ignored.\n", __func__); 1053 1053 return; 1054 1054 } 1055 1055 tdev = dev2t3cdev(olddev); 1056 1056 BUG_ON(!tdev); 1057 1057 if (tdev != dev2t3cdev(newdev)) { 1058 1058 printk(KERN_WARNING "%s: Redirect to different " 1059 - "offload device ignored.\n", __FUNCTION__); 1059 + "offload device ignored.\n", __func__); 1060 1060 return; 1061 1061 } 1062 1062 ··· 1064 1064 e = t3_l2t_get(tdev, new->neighbour, newdev); 1065 1065 if (!e) { 1066 1066 printk(KERN_ERR "%s: couldn't allocate new l2t entry!\n", 1067 - __FUNCTION__); 1067 + __func__); 1068 1068 return; 1069 1069 } 1070 1070
-35
drivers/net/cxgb3/sge.c
··· 1937 1937 eh->h_proto == htons(ETH_P_IP) && ih->ihl == (sizeof(*ih) >> 2); 1938 1938 } 1939 1939 1940 - #define TCP_FLAG_MASK (TCP_FLAG_CWR | TCP_FLAG_ECE | TCP_FLAG_URG |\ 1941 - TCP_FLAG_ACK | TCP_FLAG_PSH | TCP_FLAG_RST |\ 1942 - TCP_FLAG_SYN | TCP_FLAG_FIN) 1943 - #define TSTAMP_WORD ((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |\ 1944 - (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP) 1945 - 1946 - /** 1947 - * lro_segment_ok - check if a TCP segment is eligible for LRO 1948 - * @tcph: the TCP header of the packet 1949 - * 1950 - * Returns true if a TCP packet is eligible for LRO. This requires that 1951 - * the packet have only the ACK flag set and no TCP options besides 1952 - * time stamps. 1953 - */ 1954 - static inline int lro_segment_ok(const struct tcphdr *tcph) 1955 - { 1956 - int optlen; 1957 - 1958 - if (unlikely((tcp_flag_word(tcph) & TCP_FLAG_MASK) != TCP_FLAG_ACK)) 1959 - return 0; 1960 - 1961 - optlen = (tcph->doff << 2) - sizeof(*tcph); 1962 - if (optlen) { 1963 - const u32 *opt = (const u32 *)(tcph + 1); 1964 - 1965 - if (optlen != TCPOLEN_TSTAMP_ALIGNED || 1966 - *opt != htonl(TSTAMP_WORD) || !opt[2]) 1967 - return 0; 1968 - } 1969 - return 1; 1970 - } 1971 - 1972 1940 static int t3_get_lro_header(void **eh, void **iph, void **tcph, 1973 1941 u64 *hdr_flags, void *priv) 1974 1942 { ··· 1948 1980 *eh = (struct ethhdr *)(cpl + 1); 1949 1981 *iph = (struct iphdr *)((struct ethhdr *)*eh + 1); 1950 1982 *tcph = (struct tcphdr *)((struct iphdr *)*iph + 1); 1951 - 1952 - if (!lro_segment_ok(*tcph)) 1953 - return -1; 1954 1983 1955 1984 *hdr_flags = LRO_IPV4 | LRO_TCP; 1956 1985 return 0;
+1 -1
drivers/net/e100.c
··· 191 191 #define DPRINTK(nlevel, klevel, fmt, args...) \ 192 192 (void)((NETIF_MSG_##nlevel & nic->msg_enable) && \ 193 193 printk(KERN_##klevel PFX "%s: %s: " fmt, nic->netdev->name, \ 194 - __FUNCTION__ , ## args)) 194 + __func__ , ## args)) 195 195 196 196 #define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\ 197 197 PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \
+2 -2
drivers/net/ehea/ehea.h
··· 40 40 #include <asm/io.h> 41 41 42 42 #define DRV_NAME "ehea" 43 - #define DRV_VERSION "EHEA_0092" 43 + #define DRV_VERSION "EHEA_0093" 44 44 45 45 /* eHEA capability flags */ 46 46 #define DLPAR_PORT_ADD_REM 1 47 47 #define DLPAR_MEM_ADD 2 48 48 #define DLPAR_MEM_REM 4 49 - #define EHEA_CAPABILITIES (DLPAR_PORT_ADD_REM | DLPAR_MEM_ADD) 49 + #define EHEA_CAPABILITIES (DLPAR_PORT_ADD_REM | DLPAR_MEM_ADD | DLPAR_MEM_REM) 50 50 51 51 #define EHEA_MSG_DEFAULT (NETIF_MSG_LINK | NETIF_MSG_TIMER \ 52 52 | NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
+4 -22
drivers/net/ehea/ehea_main.c
··· 219 219 } 220 220 221 221 out_update: 222 + mutex_lock(&ehea_fw_handles.lock); 222 223 kfree(ehea_fw_handles.arr); 223 224 ehea_fw_handles.arr = arr; 224 225 ehea_fw_handles.num_entries = i; 226 + mutex_unlock(&ehea_fw_handles.lock); 225 227 } 226 228 227 229 static void ehea_update_bcmc_registrations(void) ··· 295 293 } 296 294 297 295 out_update: 296 + spin_lock(&ehea_bcmc_regs.lock); 298 297 kfree(ehea_bcmc_regs.arr); 299 298 ehea_bcmc_regs.arr = arr; 300 299 ehea_bcmc_regs.num_entries = i; 300 + spin_unlock(&ehea_bcmc_regs.lock); 301 301 } 302 302 303 303 static struct net_device_stats *ehea_get_stats(struct net_device *dev) ··· 1774 1770 1775 1771 memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len); 1776 1772 1777 - spin_lock(&ehea_bcmc_regs.lock); 1778 - 1779 1773 /* Deregister old MAC in pHYP */ 1780 1774 if (port->state == EHEA_PORT_UP) { 1781 1775 ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC); ··· 1794 1792 1795 1793 out_upregs: 1796 1794 ehea_update_bcmc_registrations(); 1797 - spin_unlock(&ehea_bcmc_regs.lock); 1798 1795 out_free: 1799 1796 kfree(cb0); 1800 1797 out: ··· 1955 1954 } 1956 1955 ehea_promiscuous(dev, 0); 1957 1956 1958 - spin_lock(&ehea_bcmc_regs.lock); 1959 - 1960 1957 if (dev->flags & IFF_ALLMULTI) { 1961 1958 ehea_allmulti(dev, 1); 1962 1959 goto out; ··· 1984 1985 } 1985 1986 out: 1986 1987 ehea_update_bcmc_registrations(); 1987 - spin_unlock(&ehea_bcmc_regs.lock); 1988 1988 return; 1989 1989 } 1990 1990 ··· 2464 2466 if (port->state == EHEA_PORT_UP) 2465 2467 return 0; 2466 2468 2467 - mutex_lock(&ehea_fw_handles.lock); 2468 - 2469 2469 ret = ehea_port_res_setup(port, port->num_def_qps, 2470 2470 port->num_add_tx_qps); 2471 2471 if (ret) { ··· 2500 2504 } 2501 2505 } 2502 2506 2503 - spin_lock(&ehea_bcmc_regs.lock); 2504 - 2505 2507 ret = ehea_broadcast_reg_helper(port, H_REG_BCMC); 2506 2508 if (ret) { 2507 2509 ret = -EIO; ··· 2521 2527 ehea_info("Failed starting %s. ret=%i", dev->name, ret); 2522 2528 2523 2529 ehea_update_bcmc_registrations(); 2524 - spin_unlock(&ehea_bcmc_regs.lock); 2525 2530 2526 2531 ehea_update_firmware_handles(); 2527 - mutex_unlock(&ehea_fw_handles.lock); 2528 2532 2529 2533 return ret; 2530 2534 } ··· 2572 2580 if (port->state == EHEA_PORT_DOWN) 2573 2581 return 0; 2574 2582 2575 - mutex_lock(&ehea_fw_handles.lock); 2576 - 2577 - spin_lock(&ehea_bcmc_regs.lock); 2578 2583 ehea_drop_multicast_list(dev); 2579 2584 ehea_broadcast_reg_helper(port, H_DEREG_BCMC); 2580 2585 ··· 2580 2591 port->state = EHEA_PORT_DOWN; 2581 2592 2582 2593 ehea_update_bcmc_registrations(); 2583 - spin_unlock(&ehea_bcmc_regs.lock); 2584 2594 2585 2595 ret = ehea_clean_all_portres(port); 2586 2596 if (ret) ··· 2587 2599 dev->name, ret); 2588 2600 2589 2601 ehea_update_firmware_handles(); 2590 - mutex_unlock(&ehea_fw_handles.lock); 2591 2602 2592 2603 return ret; 2593 2604 } ··· 3365 3378 ehea_error("Invalid ibmebus device probed"); 3366 3379 return -EINVAL; 3367 3380 } 3368 - mutex_lock(&ehea_fw_handles.lock); 3369 3381 3370 3382 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL); 3371 3383 if (!adapter) { ··· 3448 3462 3449 3463 out: 3450 3464 ehea_update_firmware_handles(); 3451 - mutex_unlock(&ehea_fw_handles.lock); 3452 3465 return ret; 3453 3466 } 3454 3467 ··· 3466 3481 3467 3482 flush_scheduled_work(); 3468 3483 3469 - mutex_lock(&ehea_fw_handles.lock); 3470 - 3471 3484 ibmebus_free_irq(adapter->neq->attr.ist1, adapter); 3472 3485 tasklet_kill(&adapter->neq_tasklet); 3473 3486 ··· 3475 3492 kfree(adapter); 3476 3493 3477 3494 ehea_update_firmware_handles(); 3478 - mutex_unlock(&ehea_fw_handles.lock); 3479 3495 3480 3496 return 0; 3481 3497 }
+1 -1
drivers/net/ehea/ehea_phyp.c
··· 535 535 cb_logaddr, /* R5 */ 536 536 0, 0, 0, 0, 0); /* R6-R10 */ 537 537 #ifdef DEBUG 538 - ehea_dmp(cb_addr, sizeof(struct hcp_query_ehea), "hcp_query_ehea"); 538 + ehea_dump(cb_addr, sizeof(struct hcp_query_ehea), "hcp_query_ehea"); 539 539 #endif 540 540 return hret; 541 541 }
+2 -1
drivers/net/ehea/ehea_qmr.c
··· 595 595 end_section = start_section + ((nr_pages * PAGE_SIZE) / EHEA_SECTSIZE); 596 596 mr_len = *(unsigned long *)arg; 597 597 598 - ehea_bmap = kzalloc(sizeof(struct ehea_bmap), GFP_KERNEL); 598 + if (!ehea_bmap) 599 + ehea_bmap = kzalloc(sizeof(struct ehea_bmap), GFP_KERNEL); 599 600 if (!ehea_bmap) 600 601 return -ENOMEM; 601 602
+28 -28
drivers/net/enc28j60.c
··· 110 110 } 111 111 if (ret && netif_msg_drv(priv)) 112 112 printk(KERN_DEBUG DRV_NAME ": %s() failed: ret = %d\n", 113 - __FUNCTION__, ret); 113 + __func__, ret); 114 114 115 115 return ret; 116 116 } ··· 131 131 ret = spi_write(priv->spi, priv->spi_transfer_buf, len + 1); 132 132 if (ret && netif_msg_drv(priv)) 133 133 printk(KERN_DEBUG DRV_NAME ": %s() failed: ret = %d\n", 134 - __FUNCTION__, ret); 134 + __func__, ret); 135 135 } 136 136 return ret; 137 137 } ··· 156 156 ret = spi_write_then_read(priv->spi, tx_buf, 1, rx_buf, slen); 157 157 if (ret) 158 158 printk(KERN_DEBUG DRV_NAME ": %s() failed: ret = %d\n", 159 - __FUNCTION__, ret); 159 + __func__, ret); 160 160 else 161 161 val = rx_buf[slen - 1]; 162 162 ··· 176 176 ret = spi_write(priv->spi, priv->spi_transfer_buf, 2); 177 177 if (ret && netif_msg_drv(priv)) 178 178 printk(KERN_DEBUG DRV_NAME ": %s() failed: ret = %d\n", 179 - __FUNCTION__, ret); 179 + __func__, ret); 180 180 return ret; 181 181 } 182 182 183 183 static void enc28j60_soft_reset(struct enc28j60_net *priv) 184 184 { 185 185 if (netif_msg_hw(priv)) 186 - printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __FUNCTION__); 186 + printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __func__); 187 187 188 188 spi_write_op(priv, ENC28J60_SOFT_RESET, 0, ENC28J60_SOFT_RESET); 189 189 /* Errata workaround #1, CLKRDY check is unreliable, ··· 357 357 reg = nolock_regw_read(priv, ERDPTL); 358 358 if (reg != addr) 359 359 printk(KERN_DEBUG DRV_NAME ": %s() error writing ERDPT " 360 - "(0x%04x - 0x%04x)\n", __FUNCTION__, reg, addr); 360 + "(0x%04x - 0x%04x)\n", __func__, reg, addr); 361 361 } 362 362 #endif 363 363 spi_read_buf(priv, len, data); ··· 380 380 if (reg != TXSTART_INIT) 381 381 printk(KERN_DEBUG DRV_NAME 382 382 ": %s() ERWPT:0x%04x != 0x%04x\n", 383 - __FUNCTION__, reg, TXSTART_INIT); 383 + __func__, reg, TXSTART_INIT); 384 384 } 385 385 #endif 386 386 /* Set the TXND pointer to correspond to the packet size given */ ··· 390 390 if (netif_msg_hw(priv)) 391 391 printk(KERN_DEBUG DRV_NAME 392 392 ": %s() after control byte ERWPT:0x%04x\n", 393 - __FUNCTION__, nolock_regw_read(priv, EWRPTL)); 393 + __func__, nolock_regw_read(priv, EWRPTL)); 394 394 /* copy the packet into the transmit buffer */ 395 395 spi_write_buf(priv, len, data); 396 396 if (netif_msg_hw(priv)) 397 397 printk(KERN_DEBUG DRV_NAME 398 398 ": %s() after write packet ERWPT:0x%04x, len=%d\n", 399 - __FUNCTION__, nolock_regw_read(priv, EWRPTL), len); 399 + __func__, nolock_regw_read(priv, EWRPTL), len); 400 400 mutex_unlock(&priv->lock); 401 401 } 402 402 ··· 495 495 if (netif_msg_drv(priv)) 496 496 printk(KERN_DEBUG DRV_NAME 497 497 ": %s() Hardware must be disabled to set " 498 - "Mac address\n", __FUNCTION__); 498 + "Mac address\n", __func__); 499 499 ret = -EBUSY; 500 500 } 501 501 mutex_unlock(&priv->lock); ··· 575 575 if (start > 0x1FFF || end > 0x1FFF || start > end) { 576 576 if (netif_msg_drv(priv)) 577 577 printk(KERN_ERR DRV_NAME ": %s(%d, %d) RXFIFO " 578 - "bad parameters!\n", __FUNCTION__, start, end); 578 + "bad parameters!\n", __func__, start, end); 579 579 return; 580 580 } 581 581 /* set receive buffer start + end */ ··· 591 591 if (start > 0x1FFF || end > 0x1FFF || start > end) { 592 592 if (netif_msg_drv(priv)) 593 593 printk(KERN_ERR DRV_NAME ": %s(%d, %d) TXFIFO " 594 - "bad parameters!\n", __FUNCTION__, start, end); 594 + "bad parameters!\n", __func__, start, end); 595 595 return; 596 596 } 597 597 /* set transmit buffer start + end */ ··· 630 630 u8 reg; 631 631 632 632 if (netif_msg_drv(priv)) 633 - printk(KERN_DEBUG DRV_NAME ": %s() - %s\n", __FUNCTION__, 633 + printk(KERN_DEBUG DRV_NAME ": %s() - %s\n", __func__, 634 634 priv->full_duplex ? "FullDuplex" : "HalfDuplex"); 635 635 636 636 mutex_lock(&priv->lock); ··· 661 661 if (reg == 0x00 || reg == 0xff) { 662 662 if (netif_msg_drv(priv)) 663 663 printk(KERN_DEBUG DRV_NAME ": %s() Invalid RevId %d\n", 664 - __FUNCTION__, reg); 664 + __func__, reg); 665 665 return 0; 666 666 } 667 667 ··· 724 724 /* enable interrupts */ 725 725 if (netif_msg_hw(priv)) 726 726 printk(KERN_DEBUG DRV_NAME ": %s() enabling interrupts.\n", 727 - __FUNCTION__); 727 + __func__); 728 728 729 729 enc28j60_phy_write(priv, PHIE, PHIE_PGEIE | PHIE_PLNKIE); 730 730 ··· 888 888 if (netif_msg_rx_err(priv)) 889 889 dev_err(&ndev->dev, 890 890 "%s() Invalid packet address!! 0x%04x\n", 891 - __FUNCTION__, priv->next_pk_ptr); 891 + __func__, priv->next_pk_ptr); 892 892 /* packet address corrupted: reset RX logic */ 893 893 mutex_lock(&priv->lock); 894 894 nolock_reg_bfclr(priv, ECON1, ECON1_RXEN); ··· 917 917 rxstat |= rsv[4]; 918 918 919 919 if (netif_msg_rx_status(priv)) 920 - enc28j60_dump_rsv(priv, __FUNCTION__, next_packet, len, rxstat); 920 + enc28j60_dump_rsv(priv, __func__, next_packet, len, rxstat); 921 921 922 922 if (!RSV_GETBIT(rxstat, RSV_RXOK)) { 923 923 if (netif_msg_rx_err(priv)) ··· 941 941 enc28j60_mem_read(priv, priv->next_pk_ptr + sizeof(rsv), 942 942 len, skb_put(skb, len)); 943 943 if (netif_msg_pktdata(priv)) 944 - dump_packet(__FUNCTION__, skb->len, skb->data); 944 + dump_packet(__func__, skb->len, skb->data); 945 945 skb->protocol = eth_type_trans(skb, ndev); 946 946 /* update statistics */ 947 947 ndev->stats.rx_packets++; ··· 958 958 erxrdpt = erxrdpt_workaround(next_packet, RXSTART_INIT, RXEND_INIT); 959 959 if (netif_msg_hw(priv)) 960 960 printk(KERN_DEBUG DRV_NAME ": %s() ERXRDPT:0x%04x\n", 961 - __FUNCTION__, erxrdpt); 961 + __func__, erxrdpt); 962 962 963 963 mutex_lock(&priv->lock); 964 964 nolock_regw_write(priv, ERXRDPTL, erxrdpt); ··· 968 968 reg = nolock_regw_read(priv, ERXRDPTL); 969 969 if (reg != erxrdpt) 970 970 printk(KERN_DEBUG DRV_NAME ": %s() ERXRDPT verify " 971 - "error (0x%04x - 0x%04x)\n", __FUNCTION__, 971 + "error (0x%04x - 0x%04x)\n", __func__, 972 972 reg, erxrdpt); 973 973 } 974 974 #endif ··· 1006 1006 mutex_unlock(&priv->lock); 1007 1007 if (netif_msg_rx_status(priv)) 1008 1008 printk(KERN_DEBUG DRV_NAME ": %s() free_space = %d\n", 1009 - __FUNCTION__, free_space); 1009 + __func__, free_space); 1010 1010 return free_space; 1011 1011 } 1012 1012 ··· 1022 1022 reg = enc28j60_phy_read(priv, PHSTAT2); 1023 1023 if (netif_msg_hw(priv)) 1024 1024 printk(KERN_DEBUG DRV_NAME ": %s() PHSTAT1: %04x, " 1025 - "PHSTAT2: %04x\n", __FUNCTION__, 1025 + "PHSTAT2: %04x\n", __func__, 1026 1026 enc28j60_phy_read(priv, PHSTAT1), reg); 1027 1027 duplex = reg & PHSTAT2_DPXSTAT; 1028 1028 ··· 1095 1095 int intflags, loop; 1096 1096 1097 1097 if (netif_msg_intr(priv)) 1098 - printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __FUNCTION__); 1098 + printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __func__); 1099 1099 /* disable further interrupts */ 1100 1100 locked_reg_bfclr(priv, EIE, EIE_INTIE); 1101 1101 ··· 1198 1198 /* re-enable interrupts */ 1199 1199 locked_reg_bfset(priv, EIE, EIE_INTIE); 1200 1200 if (netif_msg_intr(priv)) 1201 - printk(KERN_DEBUG DRV_NAME ": %s() exit\n", __FUNCTION__); 1201 + printk(KERN_DEBUG DRV_NAME ": %s() exit\n", __func__); 1202 1202 } 1203 1203 1204 1204 /* ··· 1213 1213 ": Tx Packet Len:%d\n", priv->tx_skb->len); 1214 1214 1215 1215 if (netif_msg_pktdata(priv)) 1216 - dump_packet(__FUNCTION__, 1216 + dump_packet(__func__, 1217 1217 priv->tx_skb->len, priv->tx_skb->data); 1218 1218 enc28j60_packet_write(priv, priv->tx_skb->len, priv->tx_skb->data); 1219 1219 ··· 1254 1254 struct enc28j60_net *priv = netdev_priv(dev); 1255 1255 1256 1256 if (netif_msg_tx_queued(priv)) 1257 - printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __FUNCTION__); 1257 + printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __func__); 1258 1258 1259 1259 /* If some error occurs while trying to transmit this 1260 1260 * packet, you should return '1' from this function. ··· 1325 1325 struct enc28j60_net *priv = netdev_priv(dev); 1326 1326 1327 1327 if (netif_msg_drv(priv)) 1328 - printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __FUNCTION__); 1328 + printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __func__); 1329 1329 1330 1330 if (!is_valid_ether_addr(dev->dev_addr)) { 1331 1331 if (netif_msg_ifup(priv)) { ··· 1363 1363 struct enc28j60_net *priv = netdev_priv(dev); 1364 1364 1365 1365 if (netif_msg_drv(priv)) 1366 - printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __FUNCTION__); 1366 + printk(KERN_DEBUG DRV_NAME ": %s() enter\n", __func__); 1367 1367 1368 1368 enc28j60_hw_disable(priv); 1369 1369 enc28j60_lowpower(priv, true);
+1 -1
drivers/net/ibm_newemac/phy.c
··· 321 321 322 322 static int m88e1111_init(struct mii_phy *phy) 323 323 { 324 - pr_debug("%s: Marvell 88E1111 Ethernet\n", __FUNCTION__); 324 + pr_debug("%s: Marvell 88E1111 Ethernet\n", __func__); 325 325 phy_write(phy, 0x14, 0x0ce3); 326 326 phy_write(phy, 0x18, 0x4101); 327 327 phy_write(phy, 0x09, 0x0e00);
+1 -1
drivers/net/ixgb/ixgb.h
··· 85 85 #define DPRINTK(nlevel, klevel, fmt, args...) \ 86 86 (void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \ 87 87 printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \ 88 - __FUNCTION__ , ## args)) 88 + __func__ , ## args)) 89 89 90 90 91 91 /* TX/RX descriptor defines */
+25 -35
drivers/net/ixgbe/ixgbe.h
··· 1 1 /******************************************************************************* 2 2 3 3 Intel 10 Gigabit PCI Express Linux driver 4 - Copyright(c) 1999 - 2007 Intel Corporation. 4 + Copyright(c) 1999 - 2008 Intel Corporation. 5 5 6 6 This program is free software; you can redistribute it and/or modify it 7 7 under the terms and conditions of the GNU General Public License, ··· 20 20 the file called "COPYING". 21 21 22 22 Contact Information: 23 - Linux NICS <linux.nics@intel.com> 24 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 25 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 26 25 ··· 40 41 #include <linux/dca.h> 41 42 #endif 42 43 43 - #define IXGBE_ERR(args...) printk(KERN_ERR "ixgbe: " args) 44 - 45 44 #define PFX "ixgbe: " 46 45 #define DPRINTK(nlevel, klevel, fmt, args...) \ 47 46 ((void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \ 48 47 printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \ 49 - __FUNCTION__ , ## args))) 48 + __func__ , ## args))) 50 49 51 50 /* TX/RX descriptor defines */ 52 51 #define IXGBE_DEFAULT_TXD 1024 ··· 54 57 #define IXGBE_DEFAULT_RXD 1024 55 58 #define IXGBE_MAX_RXD 4096 56 59 #define IXGBE_MIN_RXD 64 57 - 58 - #define IXGBE_DEFAULT_RXQ 1 59 - #define IXGBE_MAX_RXQ 1 60 - #define IXGBE_MIN_RXQ 1 61 - 62 - #define IXGBE_DEFAULT_ITR_RX_USECS 125 /* 8k irqs/sec */ 63 - #define IXGBE_DEFAULT_ITR_TX_USECS 250 /* 4k irqs/sec */ 64 - #define IXGBE_MIN_ITR_USECS 100 /* 500k irqs/sec */ 65 - #define IXGBE_MAX_ITR_USECS 10000 /* 100 irqs/sec */ 66 60 67 61 /* flow control */ 68 62 #define IXGBE_DEFAULT_FCRTL 0x10000 ··· 75 87 #define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_256 76 88 77 89 #define MAXIMUM_ETHERNET_VLAN_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN) 78 - 79 - /* How many Tx Descriptors do we need to call netif_wake_queue? */ 80 - #define IXGBE_TX_QUEUE_WAKE 16 81 90 82 91 /* How many Rx Buffers do we bundle into one write to the hardware ? */ 83 92 #define IXGBE_RX_BUFFER_WRITE 16 /* Must be power of 2 */ ··· 104 119 dma_addr_t dma; 105 120 struct page *page; 106 121 dma_addr_t page_dma; 122 + unsigned int page_offset; 107 123 }; 108 124 109 125 struct ixgbe_queue_stats { ··· 143 157 struct net_lro_mgr lro_mgr; 144 158 bool lro_used; 145 159 struct ixgbe_queue_stats stats; 146 - u8 v_idx; /* maps directly to the index for this ring in the hardware 147 - * vector array, can also be used for finding the bit in EICR 148 - * and friends that represents the vector for this ring */ 160 + u16 v_idx; /* maps directly to the index for this ring in the hardware 161 + * vector array, can also be used for finding the bit in EICR 162 + * and friends that represents the vector for this ring */ 149 163 150 - u32 eims_value; 151 - u16 itr_register; 152 164 153 - char name[IFNAMSIZ + 5]; 154 165 u16 work_limit; /* max work per interrupt */ 155 166 u16 rx_buf_len; 156 167 }; ··· 174 191 DECLARE_BITMAP(txr_idx, MAX_TX_QUEUES); /* Tx ring indices */ 175 192 u8 rxr_count; /* Rx ring count assigned to this vector */ 176 193 u8 txr_count; /* Tx ring count assigned to this vector */ 177 - u8 tx_eitr; 178 - u8 rx_eitr; 194 + u8 tx_itr; 195 + u8 rx_itr; 179 196 u32 eitr; 180 197 }; 181 198 ··· 223 240 224 241 /* TX */ 225 242 struct ixgbe_ring *tx_ring; /* One per active queue */ 243 + int num_tx_queues; 226 244 u64 restart_queue; 245 + u64 hw_csum_tx_good; 227 246 u64 lsc_int; 228 247 u64 hw_tso_ctxt; 229 248 u64 hw_tso6_ctxt; ··· 234 249 235 250 /* RX */ 236 251 struct ixgbe_ring *rx_ring; /* One per active queue */ 237 - u64 hw_csum_tx_good; 252 + int num_rx_queues; 238 253 u64 hw_csum_rx_error; 239 254 u64 hw_csum_rx_good; 240 255 u64 non_eop_descs; 241 - int num_tx_queues; 242 - int num_rx_queues; 243 256 int num_msix_vectors; 244 257 struct ixgbe_ring_feature ring_feature[3]; 245 258 struct msix_entry *msix_entries; ··· 284 301 struct ixgbe_hw_stats stats; 285 302 286 303 /* Interrupt Throttle Rate */ 287 - u32 rx_eitr; 288 - u32 tx_eitr; 304 + u32 eitr_param; 289 305 290 306 unsigned long state; 291 307 u64 tx_busy; 292 308 u64 lro_aggregated; 293 309 u64 lro_flushed; 294 310 u64 lro_no_desc; 311 + unsigned int tx_ring_count; 312 + unsigned int rx_ring_count; 313 + 314 + u32 link_speed; 315 + bool link_up; 316 + unsigned long link_check_timeout; 317 + 318 + struct work_struct watchdog_task; 295 319 }; 296 320 297 321 enum ixbge_state_t { ··· 320 330 extern void ixgbe_down(struct ixgbe_adapter *adapter); 321 331 extern void ixgbe_reinit_locked(struct ixgbe_adapter *adapter); 322 332 extern void ixgbe_reset(struct ixgbe_adapter *adapter); 323 - extern void ixgbe_update_stats(struct ixgbe_adapter *adapter); 324 333 extern void ixgbe_set_ethtool_ops(struct net_device *netdev); 325 - extern int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, 326 - struct ixgbe_ring *rxdr); 327 - extern int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter, 328 - struct ixgbe_ring *txdr); 334 + extern int ixgbe_setup_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *); 335 + extern int ixgbe_setup_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *); 336 + extern void ixgbe_free_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *); 337 + extern void ixgbe_free_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *); 338 + extern void ixgbe_update_stats(struct ixgbe_adapter *adapter); 329 339 330 340 #endif /* _IXGBE_H_ */
+517 -107
drivers/net/ixgbe/ixgbe_82598.c
··· 1 1 /******************************************************************************* 2 2 3 3 Intel 10 Gigabit PCI Express Linux driver 4 - Copyright(c) 1999 - 2007 Intel Corporation. 4 + Copyright(c) 1999 - 2008 Intel Corporation. 5 5 6 6 This program is free software; you can redistribute it and/or modify it 7 7 under the terms and conditions of the GNU General Public License, ··· 20 20 the file called "COPYING". 21 21 22 22 Contact Information: 23 - Linux NICS <linux.nics@intel.com> 24 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 25 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 26 25 ··· 38 39 #define IXGBE_82598_MC_TBL_SIZE 128 39 40 #define IXGBE_82598_VFT_TBL_SIZE 128 40 41 41 - static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw); 42 - static s32 ixgbe_get_link_settings_82598(struct ixgbe_hw *hw, u32 *speed, 43 - bool *autoneg); 44 - static s32 ixgbe_get_copper_link_settings_82598(struct ixgbe_hw *hw, 45 - u32 *speed, bool *autoneg); 46 - static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw); 47 - static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw); 48 - static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, u32 *speed, 49 - bool *link_up); 50 - static s32 ixgbe_setup_mac_link_speed_82598(struct ixgbe_hw *hw, u32 speed, 51 - bool autoneg, 52 - bool autoneg_wait_to_complete); 42 + static s32 ixgbe_get_copper_link_capabilities_82598(struct ixgbe_hw *hw, 43 + ixgbe_link_speed *speed, 44 + bool *autoneg); 53 45 static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw); 54 - static s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw, u32 speed, 55 - bool autoneg, 56 - bool autoneg_wait_to_complete); 57 - static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw); 46 + static s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw, 47 + ixgbe_link_speed speed, 48 + bool autoneg, 49 + bool autoneg_wait_to_complete); 58 50 59 - 51 + /** 52 + */ 60 53 static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw) 61 54 { 62 - hw->mac.num_rx_queues = IXGBE_82598_MAX_RX_QUEUES; 63 - hw->mac.num_tx_queues = IXGBE_82598_MAX_TX_QUEUES; 64 - hw->mac.mcft_size = IXGBE_82598_MC_TBL_SIZE; 65 - hw->mac.vft_size = IXGBE_82598_VFT_TBL_SIZE; 66 - hw->mac.num_rar_entries = IXGBE_82598_RAR_ENTRIES; 55 + struct ixgbe_mac_info *mac = &hw->mac; 56 + struct ixgbe_phy_info *phy = &hw->phy; 67 57 68 - /* PHY ops are filled in by default properly for Fiber only */ 69 - if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) { 70 - hw->mac.ops.setup_link = &ixgbe_setup_copper_link_82598; 71 - hw->mac.ops.setup_link_speed = &ixgbe_setup_copper_link_speed_82598; 72 - hw->mac.ops.get_link_settings = 73 - &ixgbe_get_copper_link_settings_82598; 58 + /* Call PHY identify routine to get the phy type */ 59 + ixgbe_identify_phy_generic(hw); 74 60 75 - /* Call PHY identify routine to get the phy type */ 76 - ixgbe_identify_phy(hw); 77 - 78 - switch (hw->phy.type) { 79 - case ixgbe_phy_tn: 80 - hw->phy.ops.setup_link = &ixgbe_setup_tnx_phy_link; 81 - hw->phy.ops.check_link = &ixgbe_check_tnx_phy_link; 82 - hw->phy.ops.setup_link_speed = 83 - &ixgbe_setup_tnx_phy_link_speed; 84 - break; 85 - default: 86 - break; 87 - } 61 + /* PHY Init */ 62 + switch (phy->type) { 63 + default: 64 + break; 88 65 } 66 + 67 + if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { 68 + mac->ops.setup_link = &ixgbe_setup_copper_link_82598; 69 + mac->ops.setup_link_speed = 70 + &ixgbe_setup_copper_link_speed_82598; 71 + mac->ops.get_link_capabilities = 72 + &ixgbe_get_copper_link_capabilities_82598; 73 + } 74 + 75 + mac->mcft_size = IXGBE_82598_MC_TBL_SIZE; 76 + mac->vft_size = IXGBE_82598_VFT_TBL_SIZE; 77 + mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES; 78 + mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES; 79 + mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES; 89 80 90 81 return 0; 91 82 } 92 83 93 84 /** 94 - * ixgbe_get_link_settings_82598 - Determines default link settings 85 + * ixgbe_get_link_capabilities_82598 - Determines link capabilities 95 86 * @hw: pointer to hardware structure 96 87 * @speed: pointer to link speed 97 88 * @autoneg: boolean auto-negotiation value 98 89 * 99 - * Determines the default link settings by reading the AUTOC register. 90 + * Determines the link capabilities by reading the AUTOC register. 100 91 **/ 101 - static s32 ixgbe_get_link_settings_82598(struct ixgbe_hw *hw, u32 *speed, 102 - bool *autoneg) 92 + static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw, 93 + ixgbe_link_speed *speed, 94 + bool *autoneg) 103 95 { 104 96 s32 status = 0; 105 97 s32 autoc_reg; ··· 139 149 } 140 150 141 151 /** 142 - * ixgbe_get_copper_link_settings_82598 - Determines default link settings 152 + * ixgbe_get_copper_link_capabilities_82598 - Determines link capabilities 143 153 * @hw: pointer to hardware structure 144 154 * @speed: pointer to link speed 145 155 * @autoneg: boolean auto-negotiation value 146 156 * 147 - * Determines the default link settings by reading the AUTOC register. 157 + * Determines the link capabilities by reading the AUTOC register. 148 158 **/ 149 - static s32 ixgbe_get_copper_link_settings_82598(struct ixgbe_hw *hw, 150 - u32 *speed, bool *autoneg) 159 + s32 ixgbe_get_copper_link_capabilities_82598(struct ixgbe_hw *hw, 160 + ixgbe_link_speed *speed, 161 + bool *autoneg) 151 162 { 152 163 s32 status = IXGBE_ERR_LINK_SETUP; 153 164 u16 speed_ability; ··· 156 165 *speed = 0; 157 166 *autoneg = true; 158 167 159 - status = ixgbe_read_phy_reg(hw, IXGBE_MDIO_PHY_SPEED_ABILITY, 160 - IXGBE_MDIO_PMA_PMD_DEV_TYPE, 161 - &speed_ability); 168 + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_SPEED_ABILITY, 169 + IXGBE_MDIO_PMA_PMD_DEV_TYPE, 170 + &speed_ability); 162 171 163 172 if (status == 0) { 164 173 if (speed_ability & IXGBE_MDIO_PHY_SPEED_10G) ··· 186 195 case IXGBE_DEV_ID_82598AF_SINGLE_PORT: 187 196 case IXGBE_DEV_ID_82598EB_CX4: 188 197 case IXGBE_DEV_ID_82598_CX4_DUAL_PORT: 198 + case IXGBE_DEV_ID_82598EB_XF_LR: 189 199 media_type = ixgbe_media_type_fiber; 190 - break; 191 - case IXGBE_DEV_ID_82598AT_DUAL_PORT: 192 - media_type = ixgbe_media_type_copper; 193 200 break; 194 201 default: 195 202 media_type = ixgbe_media_type_unknown; ··· 195 206 } 196 207 197 208 return media_type; 209 + } 210 + 211 + /** 212 + * ixgbe_setup_fc_82598 - Configure flow control settings 213 + * @hw: pointer to hardware structure 214 + * @packetbuf_num: packet buffer number (0-7) 215 + * 216 + * Configures the flow control settings based on SW configuration. This 217 + * function is used for 802.3x flow control configuration only. 218 + **/ 219 + s32 ixgbe_setup_fc_82598(struct ixgbe_hw *hw, s32 packetbuf_num) 220 + { 221 + u32 frctl_reg; 222 + u32 rmcs_reg; 223 + 224 + if (packetbuf_num < 0 || packetbuf_num > 7) { 225 + hw_dbg(hw, "Invalid packet buffer number [%d], expected range is" 226 + " 0-7\n", packetbuf_num); 227 + } 228 + 229 + frctl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL); 230 + frctl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE); 231 + 232 + rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS); 233 + rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X); 234 + 235 + /* 236 + * 10 gig parts do not have a word in the EEPROM to determine the 237 + * default flow control setting, so we explicitly set it to full. 238 + */ 239 + if (hw->fc.type == ixgbe_fc_default) 240 + hw->fc.type = ixgbe_fc_full; 241 + 242 + /* 243 + * We want to save off the original Flow Control configuration just in 244 + * case we get disconnected and then reconnected into a different hub 245 + * or switch with different Flow Control capabilities. 246 + */ 247 + hw->fc.original_type = hw->fc.type; 248 + 249 + /* 250 + * The possible values of the "flow_control" parameter are: 251 + * 0: Flow control is completely disabled 252 + * 1: Rx flow control is enabled (we can receive pause frames but not 253 + * send pause frames). 254 + * 2: Tx flow control is enabled (we can send pause frames but we do not 255 + * support receiving pause frames) 256 + * 3: Both Rx and Tx flow control (symmetric) are enabled. 257 + * other: Invalid. 258 + */ 259 + switch (hw->fc.type) { 260 + case ixgbe_fc_none: 261 + break; 262 + case ixgbe_fc_rx_pause: 263 + /* 264 + * Rx Flow control is enabled, 265 + * and Tx Flow control is disabled. 266 + */ 267 + frctl_reg |= IXGBE_FCTRL_RFCE; 268 + break; 269 + case ixgbe_fc_tx_pause: 270 + /* 271 + * Tx Flow control is enabled, and Rx Flow control is disabled, 272 + * by a software over-ride. 273 + */ 274 + rmcs_reg |= IXGBE_RMCS_TFCE_802_3X; 275 + break; 276 + case ixgbe_fc_full: 277 + /* 278 + * Flow control (both Rx and Tx) is enabled by a software 279 + * over-ride. 280 + */ 281 + frctl_reg |= IXGBE_FCTRL_RFCE; 282 + rmcs_reg |= IXGBE_RMCS_TFCE_802_3X; 283 + break; 284 + default: 285 + /* We should never get here. The value should be 0-3. */ 286 + hw_dbg(hw, "Flow control param set incorrectly\n"); 287 + break; 288 + } 289 + 290 + /* Enable 802.3x based flow control settings. */ 291 + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, frctl_reg); 292 + IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg); 293 + 294 + /* 295 + * Check for invalid software configuration, zeros are completely 296 + * invalid for all parameters used past this point, and if we enable 297 + * flow control with zero water marks, we blast flow control packets. 298 + */ 299 + if (!hw->fc.low_water || !hw->fc.high_water || !hw->fc.pause_time) { 300 + hw_dbg(hw, "Flow control structure initialized incorrectly\n"); 301 + return IXGBE_ERR_INVALID_LINK_SETTINGS; 302 + } 303 + 304 + /* 305 + * We need to set up the Receive Threshold high and low water 306 + * marks as well as (optionally) enabling the transmission of 307 + * XON frames. 308 + */ 309 + if (hw->fc.type & ixgbe_fc_tx_pause) { 310 + if (hw->fc.send_xon) { 311 + IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), 312 + (hw->fc.low_water | IXGBE_FCRTL_XONE)); 313 + } else { 314 + IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), 315 + hw->fc.low_water); 316 + } 317 + IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num), 318 + (hw->fc.high_water)|IXGBE_FCRTH_FCEN); 319 + } 320 + 321 + IXGBE_WRITE_REG(hw, IXGBE_FCTTV(0), hw->fc.pause_time); 322 + IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1)); 323 + 324 + return 0; 198 325 } 199 326 200 327 /** ··· 357 252 } 358 253 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { 359 254 status = IXGBE_ERR_AUTONEG_NOT_COMPLETE; 360 - hw_dbg(hw, 361 - "Autonegotiation did not complete.\n"); 255 + hw_dbg(hw, "Autonegotiation did not complete.\n"); 362 256 } 363 257 } 364 258 } ··· 367 263 * case we get disconnected and then reconnected into a different hub 368 264 * or switch with different Flow Control capabilities. 369 265 */ 370 - hw->fc.type = hw->fc.original_type; 371 - ixgbe_setup_fc(hw, 0); 266 + hw->fc.original_type = hw->fc.type; 267 + ixgbe_setup_fc_82598(hw, 0); 372 268 373 269 /* Add delay to filter out noises during initial link setup */ 374 270 msleep(50); ··· 381 277 * @hw: pointer to hardware structure 382 278 * @speed: pointer to link speed 383 279 * @link_up: true is link is up, false otherwise 280 + * @link_up_wait_to_complete: bool used to wait for link up or not 384 281 * 385 282 * Reads the links register to determine if link is up and the current speed 386 283 **/ 387 - static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, u32 *speed, 388 - bool *link_up) 284 + static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, 285 + ixgbe_link_speed *speed, bool *link_up, 286 + bool link_up_wait_to_complete) 389 287 { 390 288 u32 links_reg; 289 + u32 i; 391 290 392 291 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 393 - 394 - if (links_reg & IXGBE_LINKS_UP) 395 - *link_up = true; 396 - else 397 - *link_up = false; 292 + if (link_up_wait_to_complete) { 293 + for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { 294 + if (links_reg & IXGBE_LINKS_UP) { 295 + *link_up = true; 296 + break; 297 + } else { 298 + *link_up = false; 299 + } 300 + msleep(100); 301 + links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 302 + } 303 + } else { 304 + if (links_reg & IXGBE_LINKS_UP) 305 + *link_up = true; 306 + else 307 + *link_up = false; 308 + } 398 309 399 310 if (links_reg & IXGBE_LINKS_SPEED) 400 311 *speed = IXGBE_LINK_SPEED_10GB_FULL; ··· 418 299 419 300 return 0; 420 301 } 302 + 421 303 422 304 /** 423 305 * ixgbe_setup_mac_link_speed_82598 - Set MAC link speed ··· 430 310 * Set the link speed in the AUTOC register and restarts link. 431 311 **/ 432 312 static s32 ixgbe_setup_mac_link_speed_82598(struct ixgbe_hw *hw, 433 - u32 speed, bool autoneg, 434 - bool autoneg_wait_to_complete) 313 + ixgbe_link_speed speed, bool autoneg, 314 + bool autoneg_wait_to_complete) 435 315 { 436 316 s32 status = 0; 437 317 438 318 /* If speed is 10G, then check for CX4 or XAUI. */ 439 319 if ((speed == IXGBE_LINK_SPEED_10GB_FULL) && 440 - (!(hw->mac.link_attach_type & IXGBE_AUTOC_10G_KX4))) 320 + (!(hw->mac.link_attach_type & IXGBE_AUTOC_10G_KX4))) { 441 321 hw->mac.link_mode_select = IXGBE_AUTOC_LMS_10G_LINK_NO_AN; 442 - else if ((speed == IXGBE_LINK_SPEED_1GB_FULL) && (!autoneg)) 322 + } else if ((speed == IXGBE_LINK_SPEED_1GB_FULL) && (!autoneg)) { 443 323 hw->mac.link_mode_select = IXGBE_AUTOC_LMS_1G_LINK_NO_AN; 444 - else if (autoneg) { 324 + } else if (autoneg) { 445 325 /* BX mode - Autonegotiate 1G */ 446 326 if (!(hw->mac.link_attach_type & IXGBE_AUTOC_1G_PMA_PMD)) 447 327 hw->mac.link_mode_select = IXGBE_AUTOC_LMS_1G_AN; ··· 460 340 * ixgbe_hw This will write the AUTOC register based on the new 461 341 * stored values 462 342 */ 463 - hw->mac.ops.setup_link(hw); 343 + ixgbe_setup_mac_link_82598(hw); 464 344 } 465 345 466 346 return status; ··· 478 358 **/ 479 359 static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw) 480 360 { 481 - s32 status = 0; 361 + s32 status; 482 362 483 363 /* Restart autonegotiation on PHY */ 484 - if (hw->phy.ops.setup_link) 485 - status = hw->phy.ops.setup_link(hw); 364 + status = hw->phy.ops.setup_link(hw); 486 365 487 - /* Set MAC to KX/KX4 autoneg, which defaultis to Parallel detection */ 366 + /* Set MAC to KX/KX4 autoneg, which defaults to Parallel detection */ 488 367 hw->mac.link_attach_type = (IXGBE_AUTOC_10G_KX4 | IXGBE_AUTOC_1G_KX); 489 368 hw->mac.link_mode_select = IXGBE_AUTOC_LMS_KX4_AN; 490 369 491 370 /* Set up MAC */ 492 - hw->mac.ops.setup_link(hw); 371 + ixgbe_setup_mac_link_82598(hw); 493 372 494 373 return status; 495 374 } ··· 502 383 * 503 384 * Sets the link speed in the AUTOC register in the MAC and restarts link. 504 385 **/ 505 - static s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw, u32 speed, 506 - bool autoneg, 507 - bool autoneg_wait_to_complete) 386 + static s32 ixgbe_setup_copper_link_speed_82598(struct ixgbe_hw *hw, 387 + ixgbe_link_speed speed, 388 + bool autoneg, 389 + bool autoneg_wait_to_complete) 508 390 { 509 - s32 status = 0; 391 + s32 status; 510 392 511 393 /* Setup the PHY according to input speed */ 512 - if (hw->phy.ops.setup_link_speed) 513 - status = hw->phy.ops.setup_link_speed(hw, speed, autoneg, 514 - autoneg_wait_to_complete); 394 + status = hw->phy.ops.setup_link_speed(hw, speed, autoneg, 395 + autoneg_wait_to_complete); 515 396 516 397 /* Set MAC to KX/KX4 autoneg, which defaults to Parallel detection */ 517 398 hw->mac.link_attach_type = (IXGBE_AUTOC_10G_KX4 | IXGBE_AUTOC_1G_KX); 518 399 hw->mac.link_mode_select = IXGBE_AUTOC_LMS_KX4_AN; 519 400 520 401 /* Set up MAC */ 521 - hw->mac.ops.setup_link(hw); 402 + ixgbe_setup_mac_link_82598(hw); 522 403 523 404 return status; 524 405 } ··· 527 408 * ixgbe_reset_hw_82598 - Performs hardware reset 528 409 * @hw: pointer to hardware structure 529 410 * 530 - * Resets the hardware by reseting the transmit and receive units, masks and 411 + * Resets the hardware by resetting the transmit and receive units, masks and 531 412 * clears all interrupts, performing a PHY reset, and performing a link (MAC) 532 413 * reset. 533 414 **/ ··· 541 422 u8 analog_val; 542 423 543 424 /* Call adapter stop to disable tx/rx and clear interrupts */ 544 - ixgbe_stop_adapter(hw); 425 + hw->mac.ops.stop_adapter(hw); 545 426 546 427 /* 547 - * Power up the Atlas TX lanes if they are currently powered down. 548 - * Atlas TX lanes are powered down for MAC loopback tests, but 428 + * Power up the Atlas Tx lanes if they are currently powered down. 429 + * Atlas Tx lanes are powered down for MAC loopback tests, but 549 430 * they are not automatically restored on reset. 550 431 */ 551 - ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val); 432 + hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val); 552 433 if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) { 553 - /* Enable TX Atlas so packets can be transmitted again */ 554 - ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, &analog_val); 434 + /* Enable Tx Atlas so packets can be transmitted again */ 435 + hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, 436 + &analog_val); 555 437 analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN; 556 - ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, analog_val); 438 + hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK, 439 + analog_val); 557 440 558 - ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, &analog_val); 441 + hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, 442 + &analog_val); 559 443 analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL; 560 - ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, analog_val); 444 + hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G, 445 + analog_val); 561 446 562 - ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, &analog_val); 447 + hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, 448 + &analog_val); 563 449 analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL; 564 - ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, analog_val); 450 + hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G, 451 + analog_val); 565 452 566 - ixgbe_read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, &analog_val); 453 + hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, 454 + &analog_val); 567 455 analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL; 568 - ixgbe_write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, analog_val); 456 + hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN, 457 + analog_val); 569 458 } 570 459 571 460 /* Reset PHY */ 572 - ixgbe_reset_phy(hw); 461 + if (hw->phy.reset_disable == false) 462 + hw->phy.ops.reset(hw); 573 463 574 464 /* 575 465 * Prevent the PCI-E bus from from hanging by disabling PCI-E master ··· 631 503 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); 632 504 } else { 633 505 hw->mac.link_attach_type = 634 - (autoc & IXGBE_AUTOC_LMS_ATTACH_TYPE); 506 + (autoc & IXGBE_AUTOC_LMS_ATTACH_TYPE); 635 507 hw->mac.link_mode_select = (autoc & IXGBE_AUTOC_LMS_MASK); 636 508 hw->mac.link_settings_loaded = true; 637 509 } 638 510 639 511 /* Store the permanent mac address */ 640 - ixgbe_get_mac_addr(hw, hw->mac.perm_addr); 512 + hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); 641 513 642 514 return status; 643 515 } 644 516 517 + /** 518 + * ixgbe_set_vmdq_82598 - Associate a VMDq set index with a rx address 519 + * @hw: pointer to hardware struct 520 + * @rar: receive address register index to associate with a VMDq index 521 + * @vmdq: VMDq set index 522 + **/ 523 + s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) 524 + { 525 + u32 rar_high; 526 + 527 + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); 528 + rar_high &= ~IXGBE_RAH_VIND_MASK; 529 + rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK); 530 + IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high); 531 + return 0; 532 + } 533 + 534 + /** 535 + * ixgbe_clear_vmdq_82598 - Disassociate a VMDq set index from an rx address 536 + * @hw: pointer to hardware struct 537 + * @rar: receive address register index to associate with a VMDq index 538 + * @vmdq: VMDq clear index (not used in 82598, but elsewhere) 539 + **/ 540 + static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) 541 + { 542 + u32 rar_high; 543 + u32 rar_entries = hw->mac.num_rar_entries; 544 + 545 + if (rar < rar_entries) { 546 + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); 547 + if (rar_high & IXGBE_RAH_VIND_MASK) { 548 + rar_high &= ~IXGBE_RAH_VIND_MASK; 549 + IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high); 550 + } 551 + } else { 552 + hw_dbg(hw, "RAR index %d is out of range.\n", rar); 553 + } 554 + 555 + return 0; 556 + } 557 + 558 + /** 559 + * ixgbe_set_vfta_82598 - Set VLAN filter table 560 + * @hw: pointer to hardware structure 561 + * @vlan: VLAN id to write to VLAN filter 562 + * @vind: VMDq output index that maps queue to VLAN id in VFTA 563 + * @vlan_on: boolean flag to turn on/off VLAN in VFTA 564 + * 565 + * Turn on/off specified VLAN in the VLAN filter table. 566 + **/ 567 + s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, 568 + bool vlan_on) 569 + { 570 + u32 regindex; 571 + u32 bitindex; 572 + u32 bits; 573 + u32 vftabyte; 574 + 575 + if (vlan > 4095) 576 + return IXGBE_ERR_PARAM; 577 + 578 + /* Determine 32-bit word position in array */ 579 + regindex = (vlan >> 5) & 0x7F; /* upper seven bits */ 580 + 581 + /* Determine the location of the (VMD) queue index */ 582 + vftabyte = ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */ 583 + bitindex = (vlan & 0x7) << 2; /* lower 3 bits indicate nibble */ 584 + 585 + /* Set the nibble for VMD queue index */ 586 + bits = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex)); 587 + bits &= (~(0x0F << bitindex)); 588 + bits |= (vind << bitindex); 589 + IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vftabyte, regindex), bits); 590 + 591 + /* Determine the location of the bit for this VLAN id */ 592 + bitindex = vlan & 0x1F; /* lower five bits */ 593 + 594 + bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex)); 595 + if (vlan_on) 596 + /* Turn on this VLAN id */ 597 + bits |= (1 << bitindex); 598 + else 599 + /* Turn off this VLAN id */ 600 + bits &= ~(1 << bitindex); 601 + IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits); 602 + 603 + return 0; 604 + } 605 + 606 + /** 607 + * ixgbe_clear_vfta_82598 - Clear VLAN filter table 608 + * @hw: pointer to hardware structure 609 + * 610 + * Clears the VLAN filer table, and the VMDq index associated with the filter 611 + **/ 612 + static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw) 613 + { 614 + u32 offset; 615 + u32 vlanbyte; 616 + 617 + for (offset = 0; offset < hw->mac.vft_size; offset++) 618 + IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0); 619 + 620 + for (vlanbyte = 0; vlanbyte < 4; vlanbyte++) 621 + for (offset = 0; offset < hw->mac.vft_size; offset++) 622 + IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset), 623 + 0); 624 + 625 + return 0; 626 + } 627 + 628 + /** 629 + * ixgbe_blink_led_start_82598 - Blink LED based on index. 630 + * @hw: pointer to hardware structure 631 + * @index: led number to blink 632 + **/ 633 + static s32 ixgbe_blink_led_start_82598(struct ixgbe_hw *hw, u32 index) 634 + { 635 + ixgbe_link_speed speed = 0; 636 + bool link_up = 0; 637 + u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 638 + u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 639 + 640 + /* 641 + * Link must be up to auto-blink the LEDs on the 82598EB MAC; 642 + * force it if link is down. 643 + */ 644 + hw->mac.ops.check_link(hw, &speed, &link_up, false); 645 + 646 + if (!link_up) { 647 + autoc_reg |= IXGBE_AUTOC_FLU; 648 + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); 649 + msleep(10); 650 + } 651 + 652 + led_reg &= ~IXGBE_LED_MODE_MASK(index); 653 + led_reg |= IXGBE_LED_BLINK(index); 654 + IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); 655 + IXGBE_WRITE_FLUSH(hw); 656 + 657 + return 0; 658 + } 659 + 660 + /** 661 + * ixgbe_blink_led_stop_82598 - Stop blinking LED based on index. 662 + * @hw: pointer to hardware structure 663 + * @index: led number to stop blinking 664 + **/ 665 + static s32 ixgbe_blink_led_stop_82598(struct ixgbe_hw *hw, u32 index) 666 + { 667 + u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 668 + u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 669 + 670 + autoc_reg &= ~IXGBE_AUTOC_FLU; 671 + autoc_reg |= IXGBE_AUTOC_AN_RESTART; 672 + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); 673 + 674 + led_reg &= ~IXGBE_LED_MODE_MASK(index); 675 + led_reg &= ~IXGBE_LED_BLINK(index); 676 + led_reg |= IXGBE_LED_LINK_ACTIVE << IXGBE_LED_MODE_SHIFT(index); 677 + IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); 678 + IXGBE_WRITE_FLUSH(hw); 679 + 680 + return 0; 681 + } 682 + 683 + /** 684 + * ixgbe_read_analog_reg8_82598 - Reads 8 bit Atlas analog register 685 + * @hw: pointer to hardware structure 686 + * @reg: analog register to read 687 + * @val: read value 688 + * 689 + * Performs read operation to Atlas analog register specified. 690 + **/ 691 + s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val) 692 + { 693 + u32 atlas_ctl; 694 + 695 + IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, 696 + IXGBE_ATLASCTL_WRITE_CMD | (reg << 8)); 697 + IXGBE_WRITE_FLUSH(hw); 698 + udelay(10); 699 + atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL); 700 + *val = (u8)atlas_ctl; 701 + 702 + return 0; 703 + } 704 + 705 + /** 706 + * ixgbe_write_analog_reg8_82598 - Writes 8 bit Atlas analog register 707 + * @hw: pointer to hardware structure 708 + * @reg: atlas register to write 709 + * @val: value to write 710 + * 711 + * Performs write operation to Atlas analog register specified. 712 + **/ 713 + s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val) 714 + { 715 + u32 atlas_ctl; 716 + 717 + atlas_ctl = (reg << 8) | val; 718 + IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl); 719 + IXGBE_WRITE_FLUSH(hw); 720 + udelay(10); 721 + 722 + return 0; 723 + } 724 + 725 + /** 726 + * ixgbe_get_supported_physical_layer_82598 - Returns physical layer type 727 + * @hw: pointer to hardware structure 728 + * 729 + * Determines physical layer capabilities of the current configuration. 730 + **/ 731 + s32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw) 732 + { 733 + s32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; 734 + 735 + switch (hw->device_id) { 736 + case IXGBE_DEV_ID_82598EB_CX4: 737 + case IXGBE_DEV_ID_82598_CX4_DUAL_PORT: 738 + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4; 739 + break; 740 + case IXGBE_DEV_ID_82598AF_DUAL_PORT: 741 + case IXGBE_DEV_ID_82598AF_SINGLE_PORT: 742 + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; 743 + break; 744 + case IXGBE_DEV_ID_82598EB_XF_LR: 745 + physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; 746 + break; 747 + 748 + default: 749 + physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; 750 + break; 751 + } 752 + 753 + return physical_layer; 754 + } 755 + 645 756 static struct ixgbe_mac_operations mac_ops_82598 = { 646 - .reset = &ixgbe_reset_hw_82598, 757 + .init_hw = &ixgbe_init_hw_generic, 758 + .reset_hw = &ixgbe_reset_hw_82598, 759 + .start_hw = &ixgbe_start_hw_generic, 760 + .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic, 647 761 .get_media_type = &ixgbe_get_media_type_82598, 762 + .get_supported_physical_layer = &ixgbe_get_supported_physical_layer_82598, 763 + .get_mac_addr = &ixgbe_get_mac_addr_generic, 764 + .stop_adapter = &ixgbe_stop_adapter_generic, 765 + .read_analog_reg8 = &ixgbe_read_analog_reg8_82598, 766 + .write_analog_reg8 = &ixgbe_write_analog_reg8_82598, 648 767 .setup_link = &ixgbe_setup_mac_link_82598, 649 - .check_link = &ixgbe_check_mac_link_82598, 650 768 .setup_link_speed = &ixgbe_setup_mac_link_speed_82598, 651 - .get_link_settings = &ixgbe_get_link_settings_82598, 769 + .check_link = &ixgbe_check_mac_link_82598, 770 + .get_link_capabilities = &ixgbe_get_link_capabilities_82598, 771 + .led_on = &ixgbe_led_on_generic, 772 + .led_off = &ixgbe_led_off_generic, 773 + .blink_led_start = &ixgbe_blink_led_start_82598, 774 + .blink_led_stop = &ixgbe_blink_led_stop_82598, 775 + .set_rar = &ixgbe_set_rar_generic, 776 + .clear_rar = &ixgbe_clear_rar_generic, 777 + .set_vmdq = &ixgbe_set_vmdq_82598, 778 + .clear_vmdq = &ixgbe_clear_vmdq_82598, 779 + .init_rx_addrs = &ixgbe_init_rx_addrs_generic, 780 + .update_uc_addr_list = &ixgbe_update_uc_addr_list_generic, 781 + .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic, 782 + .enable_mc = &ixgbe_enable_mc_generic, 783 + .disable_mc = &ixgbe_disable_mc_generic, 784 + .clear_vfta = &ixgbe_clear_vfta_82598, 785 + .set_vfta = &ixgbe_set_vfta_82598, 786 + .setup_fc = &ixgbe_setup_fc_82598, 787 + }; 788 + 789 + static struct ixgbe_eeprom_operations eeprom_ops_82598 = { 790 + .init_params = &ixgbe_init_eeprom_params_generic, 791 + .read = &ixgbe_read_eeprom_generic, 792 + .validate_checksum = &ixgbe_validate_eeprom_checksum_generic, 793 + .update_checksum = &ixgbe_update_eeprom_checksum_generic, 794 + }; 795 + 796 + static struct ixgbe_phy_operations phy_ops_82598 = { 797 + .identify = &ixgbe_identify_phy_generic, 798 + /* .identify_sfp = &ixgbe_identify_sfp_module_generic, */ 799 + .reset = &ixgbe_reset_phy_generic, 800 + .read_reg = &ixgbe_read_phy_reg_generic, 801 + .write_reg = &ixgbe_write_phy_reg_generic, 802 + .setup_link = &ixgbe_setup_phy_link_generic, 803 + .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, 652 804 }; 653 805 654 806 struct ixgbe_info ixgbe_82598_info = { 655 807 .mac = ixgbe_mac_82598EB, 656 808 .get_invariants = &ixgbe_get_invariants_82598, 657 809 .mac_ops = &mac_ops_82598, 810 + .eeprom_ops = &eeprom_ops_82598, 811 + .phy_ops = &phy_ops_82598, 658 812 }; 659 813
+637 -335
drivers/net/ixgbe/ixgbe_common.c
··· 1 1 /******************************************************************************* 2 2 3 3 Intel 10 Gigabit PCI Express Linux driver 4 - Copyright(c) 1999 - 2007 Intel Corporation. 4 + Copyright(c) 1999 - 2008 Intel Corporation. 5 5 6 6 This program is free software; you can redistribute it and/or modify it 7 7 under the terms and conditions of the GNU General Public License, ··· 20 20 the file called "COPYING". 21 21 22 22 Contact Information: 23 - Linux NICS <linux.nics@intel.com> 24 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 25 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 26 25 ··· 32 33 #include "ixgbe_common.h" 33 34 #include "ixgbe_phy.h" 34 35 35 - static s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw); 36 - 37 36 static s32 ixgbe_poll_eeprom_eerd_done(struct ixgbe_hw *hw); 37 + static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw); 38 38 static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw); 39 39 static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw); 40 + static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw); 41 + static void ixgbe_standby_eeprom(struct ixgbe_hw *hw); 42 + static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, 43 + u16 count); 44 + static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count); 45 + static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); 46 + static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); 47 + static void ixgbe_release_eeprom(struct ixgbe_hw *hw); 40 48 static u16 ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw); 41 49 42 - static s32 ixgbe_clear_vfta(struct ixgbe_hw *hw); 43 - static s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw); 50 + static void ixgbe_enable_rar(struct ixgbe_hw *hw, u32 index); 51 + static void ixgbe_disable_rar(struct ixgbe_hw *hw, u32 index); 44 52 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr); 45 53 static void ixgbe_add_mc_addr(struct ixgbe_hw *hw, u8 *mc_addr); 54 + static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq); 46 55 47 56 /** 48 - * ixgbe_start_hw - Prepare hardware for TX/RX 57 + * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx 49 58 * @hw: pointer to hardware structure 50 59 * 51 60 * Starts the hardware by filling the bus info structure and media type, clears ··· 61 54 * table, VLAN filter table, calls routine to set up link and flow control 62 55 * settings, and leaves transmit and receive units disabled and uninitialized 63 56 **/ 64 - s32 ixgbe_start_hw(struct ixgbe_hw *hw) 57 + s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) 65 58 { 66 59 u32 ctrl_ext; 67 60 ··· 69 62 hw->phy.media_type = hw->mac.ops.get_media_type(hw); 70 63 71 64 /* Identify the PHY */ 72 - ixgbe_identify_phy(hw); 65 + hw->phy.ops.identify(hw); 73 66 74 67 /* 75 68 * Store MAC address from RAR0, clear receive address registers, and 76 69 * clear the multicast table 77 70 */ 78 - ixgbe_init_rx_addrs(hw); 71 + hw->mac.ops.init_rx_addrs(hw); 79 72 80 73 /* Clear the VLAN filter table */ 81 - ixgbe_clear_vfta(hw); 74 + hw->mac.ops.clear_vfta(hw); 82 75 83 76 /* Set up link */ 84 77 hw->mac.ops.setup_link(hw); 85 78 86 79 /* Clear statistics registers */ 87 - ixgbe_clear_hw_cntrs(hw); 80 + hw->mac.ops.clear_hw_cntrs(hw); 88 81 89 82 /* Set No Snoop Disable */ 90 83 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); ··· 99 92 } 100 93 101 94 /** 102 - * ixgbe_init_hw - Generic hardware initialization 95 + * ixgbe_init_hw_generic - Generic hardware initialization 103 96 * @hw: pointer to hardware structure 104 97 * 105 - * Initialize the hardware by reseting the hardware, filling the bus info 98 + * Initialize the hardware by resetting the hardware, filling the bus info 106 99 * structure and media type, clears all on chip counters, initializes receive 107 100 * address registers, multicast table, VLAN filter table, calls routine to set 108 101 * up link and flow control settings, and leaves transmit and receive units 109 102 * disabled and uninitialized 110 103 **/ 111 - s32 ixgbe_init_hw(struct ixgbe_hw *hw) 104 + s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw) 112 105 { 113 106 /* Reset the hardware */ 114 - hw->mac.ops.reset(hw); 107 + hw->mac.ops.reset_hw(hw); 115 108 116 109 /* Start the HW */ 117 - ixgbe_start_hw(hw); 110 + hw->mac.ops.start_hw(hw); 118 111 119 112 return 0; 120 113 } 121 114 122 115 /** 123 - * ixgbe_clear_hw_cntrs - Generic clear hardware counters 116 + * ixgbe_clear_hw_cntrs_generic - Generic clear hardware counters 124 117 * @hw: pointer to hardware structure 125 118 * 126 119 * Clears all hardware statistics counters by reading them from the hardware 127 120 * Statistics counters are clear on read. 128 121 **/ 129 - static s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw) 122 + s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw) 130 123 { 131 124 u16 i = 0; 132 125 ··· 198 191 } 199 192 200 193 /** 201 - * ixgbe_get_mac_addr - Generic get MAC address 194 + * ixgbe_read_pba_num_generic - Reads part number from EEPROM 195 + * @hw: pointer to hardware structure 196 + * @pba_num: stores the part number from the EEPROM 197 + * 198 + * Reads the part number from the EEPROM. 199 + **/ 200 + s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num) 201 + { 202 + s32 ret_val; 203 + u16 data; 204 + 205 + ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data); 206 + if (ret_val) { 207 + hw_dbg(hw, "NVM Read Error\n"); 208 + return ret_val; 209 + } 210 + *pba_num = (u32)(data << 16); 211 + 212 + ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data); 213 + if (ret_val) { 214 + hw_dbg(hw, "NVM Read Error\n"); 215 + return ret_val; 216 + } 217 + *pba_num |= data; 218 + 219 + return 0; 220 + } 221 + 222 + /** 223 + * ixgbe_get_mac_addr_generic - Generic get MAC address 202 224 * @hw: pointer to hardware structure 203 225 * @mac_addr: Adapter MAC address 204 226 * ··· 235 199 * A reset of the adapter must be performed prior to calling this function 236 200 * in order for the MAC address to have been loaded from the EEPROM into RAR0 237 201 **/ 238 - s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr) 202 + s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr) 239 203 { 240 204 u32 rar_high; 241 205 u32 rar_low; ··· 253 217 return 0; 254 218 } 255 219 256 - s32 ixgbe_read_part_num(struct ixgbe_hw *hw, u32 *part_num) 257 - { 258 - s32 ret_val; 259 - u16 data; 260 - 261 - ret_val = ixgbe_read_eeprom(hw, IXGBE_PBANUM0_PTR, &data); 262 - if (ret_val) { 263 - hw_dbg(hw, "NVM Read Error\n"); 264 - return ret_val; 265 - } 266 - *part_num = (u32)(data << 16); 267 - 268 - ret_val = ixgbe_read_eeprom(hw, IXGBE_PBANUM1_PTR, &data); 269 - if (ret_val) { 270 - hw_dbg(hw, "NVM Read Error\n"); 271 - return ret_val; 272 - } 273 - *part_num |= data; 274 - 275 - return 0; 276 - } 277 - 278 220 /** 279 - * ixgbe_stop_adapter - Generic stop TX/RX units 221 + * ixgbe_stop_adapter_generic - Generic stop Tx/Rx units 280 222 * @hw: pointer to hardware structure 281 223 * 282 224 * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts, ··· 262 248 * the shared code and drivers to determine if the adapter is in a stopped 263 249 * state and should not touch the hardware. 264 250 **/ 265 - s32 ixgbe_stop_adapter(struct ixgbe_hw *hw) 251 + s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw) 266 252 { 267 253 u32 number_of_queues; 268 254 u32 reg_val; ··· 278 264 reg_val = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 279 265 reg_val &= ~(IXGBE_RXCTRL_RXEN); 280 266 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_val); 267 + IXGBE_WRITE_FLUSH(hw); 281 268 msleep(2); 282 269 283 270 /* Clear interrupt mask to stop from interrupts being generated */ ··· 288 273 IXGBE_READ_REG(hw, IXGBE_EICR); 289 274 290 275 /* Disable the transmit unit. Each queue must be disabled. */ 291 - number_of_queues = hw->mac.num_tx_queues; 276 + number_of_queues = hw->mac.max_tx_queues; 292 277 for (i = 0; i < number_of_queues; i++) { 293 278 reg_val = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i)); 294 279 if (reg_val & IXGBE_TXDCTL_ENABLE) { ··· 297 282 } 298 283 } 299 284 285 + /* 286 + * Prevent the PCI-E bus from from hanging by disabling PCI-E master 287 + * access and verify no pending requests 288 + */ 289 + if (ixgbe_disable_pcie_master(hw) != 0) 290 + hw_dbg(hw, "PCI-E Master disable polling has failed.\n"); 291 + 300 292 return 0; 301 293 } 302 294 303 295 /** 304 - * ixgbe_led_on - Turns on the software controllable LEDs. 296 + * ixgbe_led_on_generic - Turns on the software controllable LEDs. 305 297 * @hw: pointer to hardware structure 306 298 * @index: led number to turn on 307 299 **/ 308 - s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index) 300 + s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index) 309 301 { 310 302 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 311 303 ··· 326 304 } 327 305 328 306 /** 329 - * ixgbe_led_off - Turns off the software controllable LEDs. 307 + * ixgbe_led_off_generic - Turns off the software controllable LEDs. 330 308 * @hw: pointer to hardware structure 331 309 * @index: led number to turn off 332 310 **/ 333 - s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index) 311 + s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index) 334 312 { 335 313 u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 336 314 ··· 343 321 return 0; 344 322 } 345 323 346 - 347 324 /** 348 - * ixgbe_init_eeprom - Initialize EEPROM params 325 + * ixgbe_init_eeprom_params_generic - Initialize EEPROM params 349 326 * @hw: pointer to hardware structure 350 327 * 351 328 * Initializes the EEPROM parameters ixgbe_eeprom_info within the 352 329 * ixgbe_hw struct in order to set up EEPROM access. 353 330 **/ 354 - s32 ixgbe_init_eeprom(struct ixgbe_hw *hw) 331 + s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw) 355 332 { 356 333 struct ixgbe_eeprom_info *eeprom = &hw->eeprom; 357 334 u32 eec; ··· 358 337 359 338 if (eeprom->type == ixgbe_eeprom_uninitialized) { 360 339 eeprom->type = ixgbe_eeprom_none; 340 + /* Set default semaphore delay to 10ms which is a well 341 + * tested value */ 342 + eeprom->semaphore_delay = 10; 361 343 362 344 /* 363 345 * Check for EEPROM present first. ··· 393 369 } 394 370 395 371 /** 396 - * ixgbe_read_eeprom - Read EEPROM word using EERD 372 + * ixgbe_read_eeprom_bit_bang_generic - Read EEPROM word using bit-bang 373 + * @hw: pointer to hardware structure 374 + * @offset: offset within the EEPROM to be read 375 + * @data: read 16 bit value from EEPROM 376 + * 377 + * Reads 16 bit value from EEPROM through bit-bang method 378 + **/ 379 + s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, 380 + u16 *data) 381 + { 382 + s32 status; 383 + u16 word_in; 384 + u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI; 385 + 386 + hw->eeprom.ops.init_params(hw); 387 + 388 + if (offset >= hw->eeprom.word_size) { 389 + status = IXGBE_ERR_EEPROM; 390 + goto out; 391 + } 392 + 393 + /* Prepare the EEPROM for reading */ 394 + status = ixgbe_acquire_eeprom(hw); 395 + 396 + if (status == 0) { 397 + if (ixgbe_ready_eeprom(hw) != 0) { 398 + ixgbe_release_eeprom(hw); 399 + status = IXGBE_ERR_EEPROM; 400 + } 401 + } 402 + 403 + if (status == 0) { 404 + ixgbe_standby_eeprom(hw); 405 + 406 + /* 407 + * Some SPI eeproms use the 8th address bit embedded in the 408 + * opcode 409 + */ 410 + if ((hw->eeprom.address_bits == 8) && (offset >= 128)) 411 + read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI; 412 + 413 + /* Send the READ command (opcode + addr) */ 414 + ixgbe_shift_out_eeprom_bits(hw, read_opcode, 415 + IXGBE_EEPROM_OPCODE_BITS); 416 + ixgbe_shift_out_eeprom_bits(hw, (u16)(offset*2), 417 + hw->eeprom.address_bits); 418 + 419 + /* Read the data. */ 420 + word_in = ixgbe_shift_in_eeprom_bits(hw, 16); 421 + *data = (word_in >> 8) | (word_in << 8); 422 + 423 + /* End this read operation */ 424 + ixgbe_release_eeprom(hw); 425 + } 426 + 427 + out: 428 + return status; 429 + } 430 + 431 + /** 432 + * ixgbe_read_eeprom_generic - Read EEPROM word using EERD 397 433 * @hw: pointer to hardware structure 398 434 * @offset: offset of word in the EEPROM to read 399 435 * @data: word read from the EEPROM 400 436 * 401 437 * Reads a 16 bit word from the EEPROM using the EERD register. 402 438 **/ 403 - s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data) 439 + s32 ixgbe_read_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 *data) 404 440 { 405 441 u32 eerd; 406 442 s32 status; 443 + 444 + hw->eeprom.ops.init_params(hw); 445 + 446 + if (offset >= hw->eeprom.word_size) { 447 + status = IXGBE_ERR_EEPROM; 448 + goto out; 449 + } 407 450 408 451 eerd = (offset << IXGBE_EEPROM_READ_ADDR_SHIFT) + 409 452 IXGBE_EEPROM_READ_REG_START; ··· 480 389 481 390 if (status == 0) 482 391 *data = (IXGBE_READ_REG(hw, IXGBE_EERD) >> 483 - IXGBE_EEPROM_READ_REG_DATA); 392 + IXGBE_EEPROM_READ_REG_DATA); 484 393 else 485 394 hw_dbg(hw, "Eeprom read timed out\n"); 486 395 396 + out: 487 397 return status; 488 398 } 489 399 ··· 507 415 break; 508 416 } 509 417 udelay(5); 418 + } 419 + return status; 420 + } 421 + 422 + /** 423 + * ixgbe_acquire_eeprom - Acquire EEPROM using bit-bang 424 + * @hw: pointer to hardware structure 425 + * 426 + * Prepares EEPROM for access using bit-bang method. This function should 427 + * be called before issuing a command to the EEPROM. 428 + **/ 429 + static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw) 430 + { 431 + s32 status = 0; 432 + u32 eec; 433 + u32 i; 434 + 435 + if (ixgbe_acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0) 436 + status = IXGBE_ERR_SWFW_SYNC; 437 + 438 + if (status == 0) { 439 + eec = IXGBE_READ_REG(hw, IXGBE_EEC); 440 + 441 + /* Request EEPROM Access */ 442 + eec |= IXGBE_EEC_REQ; 443 + IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 444 + 445 + for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) { 446 + eec = IXGBE_READ_REG(hw, IXGBE_EEC); 447 + if (eec & IXGBE_EEC_GNT) 448 + break; 449 + udelay(5); 450 + } 451 + 452 + /* Release if grant not acquired */ 453 + if (!(eec & IXGBE_EEC_GNT)) { 454 + eec &= ~IXGBE_EEC_REQ; 455 + IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 456 + hw_dbg(hw, "Could not acquire EEPROM grant\n"); 457 + 458 + ixgbe_release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); 459 + status = IXGBE_ERR_EEPROM; 460 + } 461 + } 462 + 463 + /* Setup EEPROM for Read/Write */ 464 + if (status == 0) { 465 + /* Clear CS and SK */ 466 + eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK); 467 + IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 468 + IXGBE_WRITE_FLUSH(hw); 469 + udelay(1); 510 470 } 511 471 return status; 512 472 } ··· 619 475 */ 620 476 if (i >= timeout) { 621 477 hw_dbg(hw, "Driver can't access the Eeprom - Semaphore " 622 - "not granted.\n"); 478 + "not granted.\n"); 623 479 ixgbe_release_eeprom_semaphore(hw); 624 480 status = IXGBE_ERR_EEPROM; 625 481 } ··· 647 503 } 648 504 649 505 /** 506 + * ixgbe_ready_eeprom - Polls for EEPROM ready 507 + * @hw: pointer to hardware structure 508 + **/ 509 + static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw) 510 + { 511 + s32 status = 0; 512 + u16 i; 513 + u8 spi_stat_reg; 514 + 515 + /* 516 + * Read "Status Register" repeatedly until the LSB is cleared. The 517 + * EEPROM will signal that the command has been completed by clearing 518 + * bit 0 of the internal status register. If it's not cleared within 519 + * 5 milliseconds, then error out. 520 + */ 521 + for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) { 522 + ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI, 523 + IXGBE_EEPROM_OPCODE_BITS); 524 + spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8); 525 + if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI)) 526 + break; 527 + 528 + udelay(5); 529 + ixgbe_standby_eeprom(hw); 530 + }; 531 + 532 + /* 533 + * On some parts, SPI write time could vary from 0-20mSec on 3.3V 534 + * devices (and only 0-5mSec on 5V devices) 535 + */ 536 + if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) { 537 + hw_dbg(hw, "SPI EEPROM Status error\n"); 538 + status = IXGBE_ERR_EEPROM; 539 + } 540 + 541 + return status; 542 + } 543 + 544 + /** 545 + * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state 546 + * @hw: pointer to hardware structure 547 + **/ 548 + static void ixgbe_standby_eeprom(struct ixgbe_hw *hw) 549 + { 550 + u32 eec; 551 + 552 + eec = IXGBE_READ_REG(hw, IXGBE_EEC); 553 + 554 + /* Toggle CS to flush commands */ 555 + eec |= IXGBE_EEC_CS; 556 + IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 557 + IXGBE_WRITE_FLUSH(hw); 558 + udelay(1); 559 + eec &= ~IXGBE_EEC_CS; 560 + IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 561 + IXGBE_WRITE_FLUSH(hw); 562 + udelay(1); 563 + } 564 + 565 + /** 566 + * ixgbe_shift_out_eeprom_bits - Shift data bits out to the EEPROM. 567 + * @hw: pointer to hardware structure 568 + * @data: data to send to the EEPROM 569 + * @count: number of bits to shift out 570 + **/ 571 + static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, 572 + u16 count) 573 + { 574 + u32 eec; 575 + u32 mask; 576 + u32 i; 577 + 578 + eec = IXGBE_READ_REG(hw, IXGBE_EEC); 579 + 580 + /* 581 + * Mask is used to shift "count" bits of "data" out to the EEPROM 582 + * one bit at a time. Determine the starting bit based on count 583 + */ 584 + mask = 0x01 << (count - 1); 585 + 586 + for (i = 0; i < count; i++) { 587 + /* 588 + * A "1" is shifted out to the EEPROM by setting bit "DI" to a 589 + * "1", and then raising and then lowering the clock (the SK 590 + * bit controls the clock input to the EEPROM). A "0" is 591 + * shifted out to the EEPROM by setting "DI" to "0" and then 592 + * raising and then lowering the clock. 593 + */ 594 + if (data & mask) 595 + eec |= IXGBE_EEC_DI; 596 + else 597 + eec &= ~IXGBE_EEC_DI; 598 + 599 + IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 600 + IXGBE_WRITE_FLUSH(hw); 601 + 602 + udelay(1); 603 + 604 + ixgbe_raise_eeprom_clk(hw, &eec); 605 + ixgbe_lower_eeprom_clk(hw, &eec); 606 + 607 + /* 608 + * Shift mask to signify next bit of data to shift in to the 609 + * EEPROM 610 + */ 611 + mask = mask >> 1; 612 + }; 613 + 614 + /* We leave the "DI" bit set to "0" when we leave this routine. */ 615 + eec &= ~IXGBE_EEC_DI; 616 + IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 617 + IXGBE_WRITE_FLUSH(hw); 618 + } 619 + 620 + /** 621 + * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM 622 + * @hw: pointer to hardware structure 623 + **/ 624 + static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count) 625 + { 626 + u32 eec; 627 + u32 i; 628 + u16 data = 0; 629 + 630 + /* 631 + * In order to read a register from the EEPROM, we need to shift 632 + * 'count' bits in from the EEPROM. Bits are "shifted in" by raising 633 + * the clock input to the EEPROM (setting the SK bit), and then reading 634 + * the value of the "DO" bit. During this "shifting in" process the 635 + * "DI" bit should always be clear. 636 + */ 637 + eec = IXGBE_READ_REG(hw, IXGBE_EEC); 638 + 639 + eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI); 640 + 641 + for (i = 0; i < count; i++) { 642 + data = data << 1; 643 + ixgbe_raise_eeprom_clk(hw, &eec); 644 + 645 + eec = IXGBE_READ_REG(hw, IXGBE_EEC); 646 + 647 + eec &= ~(IXGBE_EEC_DI); 648 + if (eec & IXGBE_EEC_DO) 649 + data |= 1; 650 + 651 + ixgbe_lower_eeprom_clk(hw, &eec); 652 + } 653 + 654 + return data; 655 + } 656 + 657 + /** 658 + * ixgbe_raise_eeprom_clk - Raises the EEPROM's clock input. 659 + * @hw: pointer to hardware structure 660 + * @eec: EEC register's current value 661 + **/ 662 + static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) 663 + { 664 + /* 665 + * Raise the clock input to the EEPROM 666 + * (setting the SK bit), then delay 667 + */ 668 + *eec = *eec | IXGBE_EEC_SK; 669 + IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec); 670 + IXGBE_WRITE_FLUSH(hw); 671 + udelay(1); 672 + } 673 + 674 + /** 675 + * ixgbe_lower_eeprom_clk - Lowers the EEPROM's clock input. 676 + * @hw: pointer to hardware structure 677 + * @eecd: EECD's current value 678 + **/ 679 + static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) 680 + { 681 + /* 682 + * Lower the clock input to the EEPROM (clearing the SK bit), then 683 + * delay 684 + */ 685 + *eec = *eec & ~IXGBE_EEC_SK; 686 + IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec); 687 + IXGBE_WRITE_FLUSH(hw); 688 + udelay(1); 689 + } 690 + 691 + /** 692 + * ixgbe_release_eeprom - Release EEPROM, release semaphores 693 + * @hw: pointer to hardware structure 694 + **/ 695 + static void ixgbe_release_eeprom(struct ixgbe_hw *hw) 696 + { 697 + u32 eec; 698 + 699 + eec = IXGBE_READ_REG(hw, IXGBE_EEC); 700 + 701 + eec |= IXGBE_EEC_CS; /* Pull CS high */ 702 + eec &= ~IXGBE_EEC_SK; /* Lower SCK */ 703 + 704 + IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 705 + IXGBE_WRITE_FLUSH(hw); 706 + 707 + udelay(1); 708 + 709 + /* Stop requesting EEPROM access */ 710 + eec &= ~IXGBE_EEC_REQ; 711 + IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); 712 + 713 + ixgbe_release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); 714 + } 715 + 716 + /** 650 717 * ixgbe_calc_eeprom_checksum - Calculates and returns the checksum 651 718 * @hw: pointer to hardware structure 652 719 **/ ··· 872 517 873 518 /* Include 0x0-0x3F in the checksum */ 874 519 for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) { 875 - if (ixgbe_read_eeprom(hw, i, &word) != 0) { 520 + if (hw->eeprom.ops.read(hw, i, &word) != 0) { 876 521 hw_dbg(hw, "EEPROM read failed\n"); 877 522 break; 878 523 } ··· 881 526 882 527 /* Include all data from pointers except for the fw pointer */ 883 528 for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) { 884 - ixgbe_read_eeprom(hw, i, &pointer); 529 + hw->eeprom.ops.read(hw, i, &pointer); 885 530 886 531 /* Make sure the pointer seems valid */ 887 532 if (pointer != 0xFFFF && pointer != 0) { 888 - ixgbe_read_eeprom(hw, pointer, &length); 533 + hw->eeprom.ops.read(hw, pointer, &length); 889 534 890 535 if (length != 0xFFFF && length != 0) { 891 536 for (j = pointer+1; j <= pointer+length; j++) { 892 - ixgbe_read_eeprom(hw, j, &word); 537 + hw->eeprom.ops.read(hw, j, &word); 893 538 checksum += word; 894 539 } 895 540 } ··· 902 547 } 903 548 904 549 /** 905 - * ixgbe_validate_eeprom_checksum - Validate EEPROM checksum 550 + * ixgbe_validate_eeprom_checksum_generic - Validate EEPROM checksum 906 551 * @hw: pointer to hardware structure 907 552 * @checksum_val: calculated checksum 908 553 * 909 554 * Performs checksum calculation and validates the EEPROM checksum. If the 910 555 * caller does not need checksum_val, the value can be NULL. 911 556 **/ 912 - s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val) 557 + s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, 558 + u16 *checksum_val) 913 559 { 914 560 s32 status; 915 561 u16 checksum; ··· 921 565 * not continue or we could be in for a very long wait while every 922 566 * EEPROM read fails 923 567 */ 924 - status = ixgbe_read_eeprom(hw, 0, &checksum); 568 + status = hw->eeprom.ops.read(hw, 0, &checksum); 925 569 926 570 if (status == 0) { 927 571 checksum = ixgbe_calc_eeprom_checksum(hw); 928 572 929 - ixgbe_read_eeprom(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum); 573 + hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum); 930 574 931 575 /* 932 576 * Verify read checksum from EEPROM is the same as ··· 938 582 /* If the user cares, return the calculated checksum */ 939 583 if (checksum_val) 940 584 *checksum_val = checksum; 585 + } else { 586 + hw_dbg(hw, "EEPROM read failed\n"); 587 + } 588 + 589 + return status; 590 + } 591 + 592 + /** 593 + * ixgbe_update_eeprom_checksum_generic - Updates the EEPROM checksum 594 + * @hw: pointer to hardware structure 595 + **/ 596 + s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw) 597 + { 598 + s32 status; 599 + u16 checksum; 600 + 601 + /* 602 + * Read the first word from the EEPROM. If this times out or fails, do 603 + * not continue or we could be in for a very long wait while every 604 + * EEPROM read fails 605 + */ 606 + status = hw->eeprom.ops.read(hw, 0, &checksum); 607 + 608 + if (status == 0) { 609 + checksum = ixgbe_calc_eeprom_checksum(hw); 610 + status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, 611 + checksum); 941 612 } else { 942 613 hw_dbg(hw, "EEPROM read failed\n"); 943 614 } ··· 990 607 status = IXGBE_ERR_INVALID_MAC_ADDR; 991 608 /* Reject the zero address */ 992 609 else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 && 993 - mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) 610 + mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) 994 611 status = IXGBE_ERR_INVALID_MAC_ADDR; 995 612 996 613 return status; 997 614 } 998 615 999 616 /** 1000 - * ixgbe_set_rar - Set RX address register 617 + * ixgbe_set_rar_generic - Set Rx address register 1001 618 * @hw: pointer to hardware structure 1002 - * @addr: Address to put into receive address register 1003 619 * @index: Receive address register to write 1004 - * @vind: Vind to set RAR to 620 + * @addr: Address to put into receive address register 621 + * @vmdq: VMDq "set" or "pool" index 1005 622 * @enable_addr: set flag that address is active 1006 623 * 1007 624 * Puts an ethernet address into a receive address register. 1008 625 **/ 1009 - s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vind, 1010 - u32 enable_addr) 626 + s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, 627 + u32 enable_addr) 1011 628 { 1012 629 u32 rar_low, rar_high; 630 + u32 rar_entries = hw->mac.num_rar_entries; 1013 631 1014 - /* 1015 - * HW expects these in little endian so we reverse the byte order from 1016 - * network order (big endian) to little endian 1017 - */ 1018 - rar_low = ((u32)addr[0] | 1019 - ((u32)addr[1] << 8) | 1020 - ((u32)addr[2] << 16) | 1021 - ((u32)addr[3] << 24)); 632 + /* setup VMDq pool selection before this RAR gets enabled */ 633 + hw->mac.ops.set_vmdq(hw, index, vmdq); 1022 634 1023 - rar_high = ((u32)addr[4] | 1024 - ((u32)addr[5] << 8) | 1025 - ((vind << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK)); 635 + /* Make sure we are using a valid rar index range */ 636 + if (index < rar_entries) { 637 + /* 638 + * HW expects these in little endian so we reverse the byte 639 + * order from network order (big endian) to little endian 640 + */ 641 + rar_low = ((u32)addr[0] | 642 + ((u32)addr[1] << 8) | 643 + ((u32)addr[2] << 16) | 644 + ((u32)addr[3] << 24)); 645 + /* 646 + * Some parts put the VMDq setting in the extra RAH bits, 647 + * so save everything except the lower 16 bits that hold part 648 + * of the address and the address valid bit. 649 + */ 650 + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); 651 + rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); 652 + rar_high |= ((u32)addr[4] | ((u32)addr[5] << 8)); 1026 653 1027 - if (enable_addr != 0) 1028 - rar_high |= IXGBE_RAH_AV; 654 + if (enable_addr != 0) 655 + rar_high |= IXGBE_RAH_AV; 1029 656 1030 - IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low); 1031 - IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); 657 + IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low); 658 + IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); 659 + } else { 660 + hw_dbg(hw, "RAR index %d is out of range.\n", index); 661 + } 1032 662 1033 663 return 0; 1034 664 } 1035 665 1036 666 /** 1037 - * ixgbe_init_rx_addrs - Initializes receive address filters. 667 + * ixgbe_clear_rar_generic - Remove Rx address register 668 + * @hw: pointer to hardware structure 669 + * @index: Receive address register to write 670 + * 671 + * Clears an ethernet address from a receive address register. 672 + **/ 673 + s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index) 674 + { 675 + u32 rar_high; 676 + u32 rar_entries = hw->mac.num_rar_entries; 677 + 678 + /* Make sure we are using a valid rar index range */ 679 + if (index < rar_entries) { 680 + /* 681 + * Some parts put the VMDq setting in the extra RAH bits, 682 + * so save everything except the lower 16 bits that hold part 683 + * of the address and the address valid bit. 684 + */ 685 + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); 686 + rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV); 687 + 688 + IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0); 689 + IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); 690 + } else { 691 + hw_dbg(hw, "RAR index %d is out of range.\n", index); 692 + } 693 + 694 + /* clear VMDq pool/queue selection for this RAR */ 695 + hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL); 696 + 697 + return 0; 698 + } 699 + 700 + /** 701 + * ixgbe_enable_rar - Enable Rx address register 702 + * @hw: pointer to hardware structure 703 + * @index: index into the RAR table 704 + * 705 + * Enables the select receive address register. 706 + **/ 707 + static void ixgbe_enable_rar(struct ixgbe_hw *hw, u32 index) 708 + { 709 + u32 rar_high; 710 + 711 + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); 712 + rar_high |= IXGBE_RAH_AV; 713 + IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); 714 + } 715 + 716 + /** 717 + * ixgbe_disable_rar - Disable Rx address register 718 + * @hw: pointer to hardware structure 719 + * @index: index into the RAR table 720 + * 721 + * Disables the select receive address register. 722 + **/ 723 + static void ixgbe_disable_rar(struct ixgbe_hw *hw, u32 index) 724 + { 725 + u32 rar_high; 726 + 727 + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index)); 728 + rar_high &= (~IXGBE_RAH_AV); 729 + IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); 730 + } 731 + 732 + /** 733 + * ixgbe_init_rx_addrs_generic - Initializes receive address filters. 1038 734 * @hw: pointer to hardware structure 1039 735 * 1040 736 * Places the MAC address in receive address register 0 and clears the rest 1041 - * of the receive addresss registers. Clears the multicast table. Assumes 737 + * of the receive address registers. Clears the multicast table. Assumes 1042 738 * the receiver is in reset when the routine is called. 1043 739 **/ 1044 - static s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw) 740 + s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw) 1045 741 { 1046 742 u32 i; 1047 743 u32 rar_entries = hw->mac.num_rar_entries; ··· 1133 671 if (ixgbe_validate_mac_addr(hw->mac.addr) == 1134 672 IXGBE_ERR_INVALID_MAC_ADDR) { 1135 673 /* Get the MAC address from the RAR0 for later reference */ 1136 - ixgbe_get_mac_addr(hw, hw->mac.addr); 674 + hw->mac.ops.get_mac_addr(hw, hw->mac.addr); 1137 675 1138 676 hw_dbg(hw, " Keeping Current RAR0 Addr =%.2X %.2X %.2X ", 1139 - hw->mac.addr[0], hw->mac.addr[1], 1140 - hw->mac.addr[2]); 677 + hw->mac.addr[0], hw->mac.addr[1], 678 + hw->mac.addr[2]); 1141 679 hw_dbg(hw, "%.2X %.2X %.2X\n", hw->mac.addr[3], 1142 - hw->mac.addr[4], hw->mac.addr[5]); 680 + hw->mac.addr[4], hw->mac.addr[5]); 1143 681 } else { 1144 682 /* Setup the receive address. */ 1145 683 hw_dbg(hw, "Overriding MAC Address in RAR[0]\n"); 1146 684 hw_dbg(hw, " New MAC Addr =%.2X %.2X %.2X ", 1147 - hw->mac.addr[0], hw->mac.addr[1], 1148 - hw->mac.addr[2]); 685 + hw->mac.addr[0], hw->mac.addr[1], 686 + hw->mac.addr[2]); 1149 687 hw_dbg(hw, "%.2X %.2X %.2X\n", hw->mac.addr[3], 1150 - hw->mac.addr[4], hw->mac.addr[5]); 688 + hw->mac.addr[4], hw->mac.addr[5]); 1151 689 1152 - ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 690 + hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 1153 691 } 692 + hw->addr_ctrl.overflow_promisc = 0; 1154 693 1155 694 hw->addr_ctrl.rar_used_count = 1; 1156 695 1157 696 /* Zero out the other receive addresses. */ 1158 - hw_dbg(hw, "Clearing RAR[1-15]\n"); 697 + hw_dbg(hw, "Clearing RAR[1-%d]\n", rar_entries - 1); 1159 698 for (i = 1; i < rar_entries; i++) { 1160 699 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0); 1161 700 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0); ··· 1171 708 for (i = 0; i < hw->mac.mcft_size; i++) 1172 709 IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0); 1173 710 711 + if (hw->mac.ops.init_uta_tables) 712 + hw->mac.ops.init_uta_tables(hw); 713 + 1174 714 return 0; 1175 715 } 1176 716 ··· 1184 718 * 1185 719 * Adds it to unused receive address register or goes into promiscuous mode. 1186 720 **/ 1187 - void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr) 721 + static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq) 1188 722 { 1189 723 u32 rar_entries = hw->mac.num_rar_entries; 1190 724 u32 rar; ··· 1199 733 if (hw->addr_ctrl.rar_used_count < rar_entries) { 1200 734 rar = hw->addr_ctrl.rar_used_count - 1201 735 hw->addr_ctrl.mc_addr_in_rar_count; 1202 - ixgbe_set_rar(hw, rar, addr, 0, IXGBE_RAH_AV); 736 + hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV); 1203 737 hw_dbg(hw, "Added a secondary address to RAR[%d]\n", rar); 1204 738 hw->addr_ctrl.rar_used_count++; 1205 739 } else { ··· 1210 744 } 1211 745 1212 746 /** 1213 - * ixgbe_update_uc_addr_list - Updates MAC list of secondary addresses 747 + * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses 1214 748 * @hw: pointer to hardware structure 1215 749 * @addr_list: the list of new addresses 1216 750 * @addr_count: number of addresses ··· 1223 757 * Drivers using secondary unicast addresses must set user_set_promisc when 1224 758 * manually putting the device into promiscuous mode. 1225 759 **/ 1226 - s32 ixgbe_update_uc_addr_list(struct ixgbe_hw *hw, u8 *addr_list, 760 + s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list, 1227 761 u32 addr_count, ixgbe_mc_addr_itr next) 1228 762 { 1229 763 u8 *addr; ··· 1253 787 for (i = 0; i < addr_count; i++) { 1254 788 hw_dbg(hw, " Adding the secondary addresses:\n"); 1255 789 addr = next(hw, &addr_list, &vmdq); 1256 - ixgbe_add_uc_addr(hw, addr); 790 + ixgbe_add_uc_addr(hw, addr, vmdq); 1257 791 } 1258 792 1259 793 if (hw->addr_ctrl.overflow_promisc) { ··· 1274 808 } 1275 809 } 1276 810 1277 - hw_dbg(hw, "ixgbe_update_uc_addr_list Complete\n"); 811 + hw_dbg(hw, "ixgbe_update_uc_addr_list_generic Complete\n"); 1278 812 return 0; 1279 813 } 1280 814 ··· 1287 821 * bit-vector to set in the multicast table. The hardware uses 12 bits, from 1288 822 * incoming rx multicast addresses, to determine the bit-vector to check in 1289 823 * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set 1290 - * by the MO field of the MCSTCTRL. The MO field is set during initalization 824 + * by the MO field of the MCSTCTRL. The MO field is set during initialization 1291 825 * to mc_filter_type. 1292 826 **/ 1293 827 static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) ··· 1295 829 u32 vector = 0; 1296 830 1297 831 switch (hw->mac.mc_filter_type) { 1298 - case 0: /* use bits [47:36] of the address */ 832 + case 0: /* use bits [47:36] of the address */ 1299 833 vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); 1300 834 break; 1301 - case 1: /* use bits [46:35] of the address */ 835 + case 1: /* use bits [46:35] of the address */ 1302 836 vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5)); 1303 837 break; 1304 - case 2: /* use bits [45:34] of the address */ 838 + case 2: /* use bits [45:34] of the address */ 1305 839 vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6)); 1306 840 break; 1307 - case 3: /* use bits [43:32] of the address */ 841 + case 3: /* use bits [43:32] of the address */ 1308 842 vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); 1309 843 break; 1310 - default: /* Invalid mc_filter_type */ 844 + default: /* Invalid mc_filter_type */ 1311 845 hw_dbg(hw, "MC filter type param set incorrectly\n"); 1312 846 break; 1313 847 } ··· 1362 896 static void ixgbe_add_mc_addr(struct ixgbe_hw *hw, u8 *mc_addr) 1363 897 { 1364 898 u32 rar_entries = hw->mac.num_rar_entries; 899 + u32 rar; 1365 900 1366 901 hw_dbg(hw, " MC Addr =%.2X %.2X %.2X %.2X %.2X %.2X\n", 1367 - mc_addr[0], mc_addr[1], mc_addr[2], 1368 - mc_addr[3], mc_addr[4], mc_addr[5]); 902 + mc_addr[0], mc_addr[1], mc_addr[2], 903 + mc_addr[3], mc_addr[4], mc_addr[5]); 1369 904 1370 905 /* 1371 906 * Place this multicast address in the RAR if there is room, 1372 907 * else put it in the MTA 1373 908 */ 1374 909 if (hw->addr_ctrl.rar_used_count < rar_entries) { 1375 - ixgbe_set_rar(hw, hw->addr_ctrl.rar_used_count, 1376 - mc_addr, 0, IXGBE_RAH_AV); 1377 - hw_dbg(hw, "Added a multicast address to RAR[%d]\n", 1378 - hw->addr_ctrl.rar_used_count); 910 + /* use RAR from the end up for multicast */ 911 + rar = rar_entries - hw->addr_ctrl.mc_addr_in_rar_count - 1; 912 + hw->mac.ops.set_rar(hw, rar, mc_addr, 0, IXGBE_RAH_AV); 913 + hw_dbg(hw, "Added a multicast address to RAR[%d]\n", rar); 1379 914 hw->addr_ctrl.rar_used_count++; 1380 915 hw->addr_ctrl.mc_addr_in_rar_count++; 1381 916 } else { ··· 1387 920 } 1388 921 1389 922 /** 1390 - * ixgbe_update_mc_addr_list - Updates MAC list of multicast addresses 923 + * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses 1391 924 * @hw: pointer to hardware structure 1392 925 * @mc_addr_list: the list of new multicast addresses 1393 926 * @mc_addr_count: number of addresses 1394 927 * @next: iterator function to walk the multicast address list 1395 928 * 1396 929 * The given list replaces any existing list. Clears the MC addrs from receive 1397 - * address registers and the multicast table. Uses unsed receive address 930 + * address registers and the multicast table. Uses unused receive address 1398 931 * registers for the first multicast addresses, and hashes the rest into the 1399 932 * multicast table. 1400 933 **/ 1401 - s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list, 1402 - u32 mc_addr_count, ixgbe_mc_addr_itr next) 934 + s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list, 935 + u32 mc_addr_count, ixgbe_mc_addr_itr next) 1403 936 { 1404 937 u32 i; 1405 938 u32 rar_entries = hw->mac.num_rar_entries; ··· 1415 948 hw->addr_ctrl.mta_in_use = 0; 1416 949 1417 950 /* Zero out the other receive addresses. */ 1418 - hw_dbg(hw, "Clearing RAR[1-15]\n"); 951 + hw_dbg(hw, "Clearing RAR[%d-%d]\n", hw->addr_ctrl.rar_used_count, 952 + rar_entries - 1); 1419 953 for (i = hw->addr_ctrl.rar_used_count; i < rar_entries; i++) { 1420 954 IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0); 1421 955 IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0); ··· 1436 968 /* Enable mta */ 1437 969 if (hw->addr_ctrl.mta_in_use > 0) 1438 970 IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, 1439 - IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); 971 + IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); 1440 972 1441 - hw_dbg(hw, "ixgbe_update_mc_addr_list Complete\n"); 973 + hw_dbg(hw, "ixgbe_update_mc_addr_list_generic Complete\n"); 1442 974 return 0; 1443 975 } 1444 976 1445 977 /** 1446 - * ixgbe_clear_vfta - Clear VLAN filter table 978 + * ixgbe_enable_mc_generic - Enable multicast address in RAR 1447 979 * @hw: pointer to hardware structure 1448 980 * 1449 - * Clears the VLAN filer table, and the VMDq index associated with the filter 981 + * Enables multicast address in RAR and the use of the multicast hash table. 1450 982 **/ 1451 - static s32 ixgbe_clear_vfta(struct ixgbe_hw *hw) 983 + s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw) 1452 984 { 1453 - u32 offset; 1454 - u32 vlanbyte; 985 + u32 i; 986 + u32 rar_entries = hw->mac.num_rar_entries; 987 + struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; 1455 988 1456 - for (offset = 0; offset < hw->mac.vft_size; offset++) 1457 - IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0); 989 + if (a->mc_addr_in_rar_count > 0) 990 + for (i = (rar_entries - a->mc_addr_in_rar_count); 991 + i < rar_entries; i++) 992 + ixgbe_enable_rar(hw, i); 1458 993 1459 - for (vlanbyte = 0; vlanbyte < 4; vlanbyte++) 1460 - for (offset = 0; offset < hw->mac.vft_size; offset++) 1461 - IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset), 1462 - 0); 1463 - 1464 - return 0; 1465 - } 1466 - 1467 - /** 1468 - * ixgbe_set_vfta - Set VLAN filter table 1469 - * @hw: pointer to hardware structure 1470 - * @vlan: VLAN id to write to VLAN filter 1471 - * @vind: VMDq output index that maps queue to VLAN id in VFTA 1472 - * @vlan_on: boolean flag to turn on/off VLAN in VFTA 1473 - * 1474 - * Turn on/off specified VLAN in the VLAN filter table. 1475 - **/ 1476 - s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, u32 vind, 1477 - bool vlan_on) 1478 - { 1479 - u32 VftaIndex; 1480 - u32 BitOffset; 1481 - u32 VftaReg; 1482 - u32 VftaByte; 1483 - 1484 - /* Determine 32-bit word position in array */ 1485 - VftaIndex = (vlan >> 5) & 0x7F; /* upper seven bits */ 1486 - 1487 - /* Determine the location of the (VMD) queue index */ 1488 - VftaByte = ((vlan >> 3) & 0x03); /* bits (4:3) indicating byte array */ 1489 - BitOffset = (vlan & 0x7) << 2; /* lower 3 bits indicate nibble */ 1490 - 1491 - /* Set the nibble for VMD queue index */ 1492 - VftaReg = IXGBE_READ_REG(hw, IXGBE_VFTAVIND(VftaByte, VftaIndex)); 1493 - VftaReg &= (~(0x0F << BitOffset)); 1494 - VftaReg |= (vind << BitOffset); 1495 - IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(VftaByte, VftaIndex), VftaReg); 1496 - 1497 - /* Determine the location of the bit for this VLAN id */ 1498 - BitOffset = vlan & 0x1F; /* lower five bits */ 1499 - 1500 - VftaReg = IXGBE_READ_REG(hw, IXGBE_VFTA(VftaIndex)); 1501 - if (vlan_on) 1502 - /* Turn on this VLAN id */ 1503 - VftaReg |= (1 << BitOffset); 1504 - else 1505 - /* Turn off this VLAN id */ 1506 - VftaReg &= ~(1 << BitOffset); 1507 - IXGBE_WRITE_REG(hw, IXGBE_VFTA(VftaIndex), VftaReg); 994 + if (a->mta_in_use > 0) 995 + IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE | 996 + hw->mac.mc_filter_type); 1508 997 1509 998 return 0; 1510 999 } 1511 1000 1512 1001 /** 1513 - * ixgbe_setup_fc - Configure flow control settings 1002 + * ixgbe_disable_mc_generic - Disable multicast address in RAR 1514 1003 * @hw: pointer to hardware structure 1515 - * @packetbuf_num: packet buffer number (0-7) 1516 1004 * 1517 - * Configures the flow control settings based on SW configuration. 1518 - * This function is used for 802.3x flow control configuration only. 1005 + * Disables multicast address in RAR and the use of the multicast hash table. 1519 1006 **/ 1520 - s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num) 1007 + s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw) 1521 1008 { 1522 - u32 frctl_reg; 1523 - u32 rmcs_reg; 1009 + u32 i; 1010 + u32 rar_entries = hw->mac.num_rar_entries; 1011 + struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; 1524 1012 1525 - if (packetbuf_num < 0 || packetbuf_num > 7) 1526 - hw_dbg(hw, "Invalid packet buffer number [%d], expected range " 1527 - "is 0-7\n", packetbuf_num); 1013 + if (a->mc_addr_in_rar_count > 0) 1014 + for (i = (rar_entries - a->mc_addr_in_rar_count); 1015 + i < rar_entries; i++) 1016 + ixgbe_disable_rar(hw, i); 1528 1017 1529 - frctl_reg = IXGBE_READ_REG(hw, IXGBE_FCTRL); 1530 - frctl_reg &= ~(IXGBE_FCTRL_RFCE | IXGBE_FCTRL_RPFCE); 1531 - 1532 - rmcs_reg = IXGBE_READ_REG(hw, IXGBE_RMCS); 1533 - rmcs_reg &= ~(IXGBE_RMCS_TFCE_PRIORITY | IXGBE_RMCS_TFCE_802_3X); 1534 - 1535 - /* 1536 - * 10 gig parts do not have a word in the EEPROM to determine the 1537 - * default flow control setting, so we explicitly set it to full. 1538 - */ 1539 - if (hw->fc.type == ixgbe_fc_default) 1540 - hw->fc.type = ixgbe_fc_full; 1541 - 1542 - /* 1543 - * We want to save off the original Flow Control configuration just in 1544 - * case we get disconnected and then reconnected into a different hub 1545 - * or switch with different Flow Control capabilities. 1546 - */ 1547 - hw->fc.type = hw->fc.original_type; 1548 - 1549 - /* 1550 - * The possible values of the "flow_control" parameter are: 1551 - * 0: Flow control is completely disabled 1552 - * 1: Rx flow control is enabled (we can receive pause frames but not 1553 - * send pause frames). 1554 - * 2: Tx flow control is enabled (we can send pause frames but we do not 1555 - * support receiving pause frames) 1556 - * 3: Both Rx and TX flow control (symmetric) are enabled. 1557 - * other: Invalid. 1558 - */ 1559 - switch (hw->fc.type) { 1560 - case ixgbe_fc_none: 1561 - break; 1562 - case ixgbe_fc_rx_pause: 1563 - /* 1564 - * RX Flow control is enabled, 1565 - * and TX Flow control is disabled. 1566 - */ 1567 - frctl_reg |= IXGBE_FCTRL_RFCE; 1568 - break; 1569 - case ixgbe_fc_tx_pause: 1570 - /* 1571 - * TX Flow control is enabled, and RX Flow control is disabled, 1572 - * by a software over-ride. 1573 - */ 1574 - rmcs_reg |= IXGBE_RMCS_TFCE_802_3X; 1575 - break; 1576 - case ixgbe_fc_full: 1577 - /* 1578 - * Flow control (both RX and TX) is enabled by a software 1579 - * over-ride. 1580 - */ 1581 - frctl_reg |= IXGBE_FCTRL_RFCE; 1582 - rmcs_reg |= IXGBE_RMCS_TFCE_802_3X; 1583 - break; 1584 - default: 1585 - /* We should never get here. The value should be 0-3. */ 1586 - hw_dbg(hw, "Flow control param set incorrectly\n"); 1587 - break; 1588 - } 1589 - 1590 - /* Enable 802.3x based flow control settings. */ 1591 - IXGBE_WRITE_REG(hw, IXGBE_FCTRL, frctl_reg); 1592 - IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg); 1593 - 1594 - /* 1595 - * Check for invalid software configuration, zeros are completely 1596 - * invalid for all parameters used past this point, and if we enable 1597 - * flow control with zero water marks, we blast flow control packets. 1598 - */ 1599 - if (!hw->fc.low_water || !hw->fc.high_water || !hw->fc.pause_time) { 1600 - hw_dbg(hw, "Flow control structure initialized incorrectly\n"); 1601 - return IXGBE_ERR_INVALID_LINK_SETTINGS; 1602 - } 1603 - 1604 - /* 1605 - * We need to set up the Receive Threshold high and low water 1606 - * marks as well as (optionally) enabling the transmission of 1607 - * XON frames. 1608 - */ 1609 - if (hw->fc.type & ixgbe_fc_tx_pause) { 1610 - if (hw->fc.send_xon) { 1611 - IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), 1612 - (hw->fc.low_water | IXGBE_FCRTL_XONE)); 1613 - } else { 1614 - IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), 1615 - hw->fc.low_water); 1616 - } 1617 - IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num), 1618 - (hw->fc.high_water)|IXGBE_FCRTH_FCEN); 1619 - } 1620 - 1621 - IXGBE_WRITE_REG(hw, IXGBE_FCTTV(0), hw->fc.pause_time); 1622 - IXGBE_WRITE_REG(hw, IXGBE_FCRTV, (hw->fc.pause_time >> 1)); 1018 + if (a->mta_in_use > 0) 1019 + IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); 1623 1020 1624 1021 return 0; 1625 1022 } ··· 1500 1167 **/ 1501 1168 s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) 1502 1169 { 1503 - u32 ctrl; 1504 - s32 i; 1170 + u32 i; 1171 + u32 reg_val; 1172 + u32 number_of_queues; 1505 1173 s32 status = IXGBE_ERR_MASTER_REQUESTS_PENDING; 1506 1174 1507 - ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); 1508 - ctrl |= IXGBE_CTRL_GIO_DIS; 1509 - IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); 1175 + /* Disable the receive unit by stopping each queue */ 1176 + number_of_queues = hw->mac.max_rx_queues; 1177 + for (i = 0; i < number_of_queues; i++) { 1178 + reg_val = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); 1179 + if (reg_val & IXGBE_RXDCTL_ENABLE) { 1180 + reg_val &= ~IXGBE_RXDCTL_ENABLE; 1181 + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), reg_val); 1182 + } 1183 + } 1184 + 1185 + reg_val = IXGBE_READ_REG(hw, IXGBE_CTRL); 1186 + reg_val |= IXGBE_CTRL_GIO_DIS; 1187 + IXGBE_WRITE_REG(hw, IXGBE_CTRL, reg_val); 1510 1188 1511 1189 for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { 1512 1190 if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) { ··· 1532 1188 1533 1189 1534 1190 /** 1535 - * ixgbe_acquire_swfw_sync - Aquire SWFW semaphore 1191 + * ixgbe_acquire_swfw_sync - Acquire SWFW semaphore 1536 1192 * @hw: pointer to hardware structure 1537 - * @mask: Mask to specify wich semaphore to acquire 1193 + * @mask: Mask to specify which semaphore to acquire 1538 1194 * 1539 - * Aquires the SWFW semaphore throught the GSSR register for the specified 1195 + * Acquires the SWFW semaphore thought the GSSR register for the specified 1540 1196 * function (CSR, PHY0, PHY1, EEPROM, Flash) 1541 1197 **/ 1542 1198 s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask) ··· 1578 1234 /** 1579 1235 * ixgbe_release_swfw_sync - Release SWFW semaphore 1580 1236 * @hw: pointer to hardware structure 1581 - * @mask: Mask to specify wich semaphore to release 1237 + * @mask: Mask to specify which semaphore to release 1582 1238 * 1583 - * Releases the SWFW semaphore throught the GSSR register for the specified 1239 + * Releases the SWFW semaphore thought the GSSR register for the specified 1584 1240 * function (CSR, PHY0, PHY1, EEPROM, Flash) 1585 1241 **/ 1586 1242 void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask) ··· 1595 1251 IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr); 1596 1252 1597 1253 ixgbe_release_eeprom_semaphore(hw); 1598 - } 1599 - 1600 - /** 1601 - * ixgbe_read_analog_reg8 - Reads 8 bit Atlas analog register 1602 - * @hw: pointer to hardware structure 1603 - * @reg: analog register to read 1604 - * @val: read value 1605 - * 1606 - * Performs write operation to analog register specified. 1607 - **/ 1608 - s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val) 1609 - { 1610 - u32 atlas_ctl; 1611 - 1612 - IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, 1613 - IXGBE_ATLASCTL_WRITE_CMD | (reg << 8)); 1614 - IXGBE_WRITE_FLUSH(hw); 1615 - udelay(10); 1616 - atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL); 1617 - *val = (u8)atlas_ctl; 1618 - 1619 - return 0; 1620 - } 1621 - 1622 - /** 1623 - * ixgbe_write_analog_reg8 - Writes 8 bit Atlas analog register 1624 - * @hw: pointer to hardware structure 1625 - * @reg: atlas register to write 1626 - * @val: value to write 1627 - * 1628 - * Performs write operation to Atlas analog register specified. 1629 - **/ 1630 - s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val) 1631 - { 1632 - u32 atlas_ctl; 1633 - 1634 - atlas_ctl = (reg << 8) | val; 1635 - IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl); 1636 - IXGBE_WRITE_FLUSH(hw); 1637 - udelay(10); 1638 - 1639 - return 0; 1640 1254 } 1641 1255
+32 -24
drivers/net/ixgbe/ixgbe_common.h
··· 1 1 /******************************************************************************* 2 2 3 3 Intel 10 Gigabit PCI Express Linux driver 4 - Copyright(c) 1999 - 2007 Intel Corporation. 4 + Copyright(c) 1999 - 2008 Intel Corporation. 5 5 6 6 This program is free software; you can redistribute it and/or modify it 7 7 under the terms and conditions of the GNU General Public License, ··· 20 20 the file called "COPYING". 21 21 22 22 Contact Information: 23 - Linux NICS <linux.nics@intel.com> 24 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 25 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 26 25 ··· 30 31 31 32 #include "ixgbe_type.h" 32 33 33 - s32 ixgbe_init_hw(struct ixgbe_hw *hw); 34 - s32 ixgbe_start_hw(struct ixgbe_hw *hw); 35 - s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr); 36 - s32 ixgbe_stop_adapter(struct ixgbe_hw *hw); 37 - s32 ixgbe_read_part_num(struct ixgbe_hw *hw, u32 *part_num); 34 + s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw); 35 + s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw); 36 + s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw); 37 + s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw); 38 + s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num); 39 + s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr); 40 + s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw); 41 + s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw); 38 42 39 - s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index); 40 - s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index); 43 + s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index); 44 + s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index); 41 45 42 - s32 ixgbe_init_eeprom(struct ixgbe_hw *hw); 43 - s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data); 44 - s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val); 46 + s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw); 47 + s32 ixgbe_read_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 *data); 48 + s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, 49 + u16 *data); 50 + s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, 51 + u16 *checksum_val); 52 + s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw); 45 53 46 - s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vind, 47 - u32 enable_addr); 48 - s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list, 49 - u32 mc_addr_count, ixgbe_mc_addr_itr next); 50 - s32 ixgbe_update_uc_addr_list(struct ixgbe_hw *hw, u8 *uc_addr_list, 51 - u32 mc_addr_count, ixgbe_mc_addr_itr next); 52 - s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on); 54 + s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, 55 + u32 enable_addr); 56 + s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index); 57 + s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw); 58 + s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list, 59 + u32 mc_addr_count, 60 + ixgbe_mc_addr_itr func); 61 + s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list, 62 + u32 addr_count, ixgbe_mc_addr_itr func); 63 + s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw); 64 + s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw); 65 + 53 66 s32 ixgbe_validate_mac_addr(u8 *mac_addr); 54 - 55 - s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packtetbuf_num); 56 - 57 67 s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask); 58 68 void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask); 59 69 s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw); 60 70 61 - s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val); 62 - s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val); 71 + s32 ixgbe_read_analog_reg8_generic(struct ixgbe_hw *hw, u32 reg, u8 *val); 72 + s32 ixgbe_write_analog_reg8_generic(struct ixgbe_hw *hw, u32 reg, u8 val); 63 73 64 74 #define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg))) 65 75
+154 -144
drivers/net/ixgbe/ixgbe_ethtool.c
··· 1 1 /******************************************************************************* 2 2 3 3 Intel 10 Gigabit PCI Express Linux driver 4 - Copyright(c) 1999 - 2007 Intel Corporation. 4 + Copyright(c) 1999 - 2008 Intel Corporation. 5 5 6 6 This program is free software; you can redistribute it and/or modify it 7 7 under the terms and conditions of the GNU General Public License, ··· 20 20 the file called "COPYING". 21 21 22 22 Contact Information: 23 - Linux NICS <linux.nics@intel.com> 24 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 25 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 26 25 ··· 47 48 }; 48 49 49 50 #define IXGBE_STAT(m) sizeof(((struct ixgbe_adapter *)0)->m), \ 50 - offsetof(struct ixgbe_adapter, m) 51 + offsetof(struct ixgbe_adapter, m) 51 52 static struct ixgbe_stats ixgbe_gstrings_stats[] = { 52 53 {"rx_packets", IXGBE_STAT(net_stats.rx_packets)}, 53 54 {"tx_packets", IXGBE_STAT(net_stats.tx_packets)}, ··· 94 95 }; 95 96 96 97 #define IXGBE_QUEUE_STATS_LEN \ 97 - ((((struct ixgbe_adapter *)netdev->priv)->num_tx_queues + \ 98 - ((struct ixgbe_adapter *)netdev->priv)->num_rx_queues) * \ 99 - (sizeof(struct ixgbe_queue_stats) / sizeof(u64))) 100 - #define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats) 98 + ((((struct ixgbe_adapter *)netdev->priv)->num_tx_queues + \ 99 + ((struct ixgbe_adapter *)netdev->priv)->num_rx_queues) * \ 100 + (sizeof(struct ixgbe_queue_stats) / sizeof(u64))) 101 + #define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN) 102 + #define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats) 101 103 #define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN) 102 104 103 105 static int ixgbe_get_settings(struct net_device *netdev, 104 - struct ethtool_cmd *ecmd) 106 + struct ethtool_cmd *ecmd) 105 107 { 106 108 struct ixgbe_adapter *adapter = netdev_priv(netdev); 107 109 struct ixgbe_hw *hw = &adapter->hw; ··· 114 114 ecmd->transceiver = XCVR_EXTERNAL; 115 115 if (hw->phy.media_type == ixgbe_media_type_copper) { 116 116 ecmd->supported |= (SUPPORTED_1000baseT_Full | 117 - SUPPORTED_TP | SUPPORTED_Autoneg); 117 + SUPPORTED_TP | SUPPORTED_Autoneg); 118 118 119 119 ecmd->advertising = (ADVERTISED_TP | ADVERTISED_Autoneg); 120 120 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) ··· 126 126 } else { 127 127 ecmd->supported |= SUPPORTED_FIBRE; 128 128 ecmd->advertising = (ADVERTISED_10000baseT_Full | 129 - ADVERTISED_FIBRE); 129 + ADVERTISED_FIBRE); 130 130 ecmd->port = PORT_FIBRE; 131 + ecmd->autoneg = AUTONEG_DISABLE; 131 132 } 132 133 133 - adapter->hw.mac.ops.check_link(hw, &(link_speed), &link_up); 134 + hw->mac.ops.check_link(hw, &link_speed, &link_up, false); 134 135 if (link_up) { 135 136 ecmd->speed = (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ? 136 - SPEED_10000 : SPEED_1000; 137 + SPEED_10000 : SPEED_1000; 137 138 ecmd->duplex = DUPLEX_FULL; 138 139 } else { 139 140 ecmd->speed = -1; ··· 145 144 } 146 145 147 146 static int ixgbe_set_settings(struct net_device *netdev, 148 - struct ethtool_cmd *ecmd) 147 + struct ethtool_cmd *ecmd) 149 148 { 150 149 struct ixgbe_adapter *adapter = netdev_priv(netdev); 151 150 struct ixgbe_hw *hw = &adapter->hw; ··· 165 164 } 166 165 167 166 static void ixgbe_get_pauseparam(struct net_device *netdev, 168 - struct ethtool_pauseparam *pause) 167 + struct ethtool_pauseparam *pause) 169 168 { 170 169 struct ixgbe_adapter *adapter = netdev_priv(netdev); 171 170 struct ixgbe_hw *hw = &adapter->hw; ··· 183 182 } 184 183 185 184 static int ixgbe_set_pauseparam(struct net_device *netdev, 186 - struct ethtool_pauseparam *pause) 185 + struct ethtool_pauseparam *pause) 187 186 { 188 187 struct ixgbe_adapter *adapter = netdev_priv(netdev); 189 188 struct ixgbe_hw *hw = &adapter->hw; ··· 242 241 if (data) 243 242 netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); 244 243 else 245 - netdev->features &= ~NETIF_F_IP_CSUM; 244 + netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); 246 245 247 246 return 0; 248 247 } ··· 282 281 #define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_ 283 282 284 283 static void ixgbe_get_regs(struct net_device *netdev, 285 - struct ethtool_regs *regs, void *p) 284 + struct ethtool_regs *regs, void *p) 286 285 { 287 286 struct ixgbe_adapter *adapter = netdev_priv(netdev); 288 287 struct ixgbe_hw *hw = &adapter->hw; ··· 316 315 regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC); 317 316 318 317 /* Interrupt */ 319 - regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICR); 318 + /* don't read EICR because it can clear interrupt causes, instead 319 + * read EICS which is a shadow but doesn't clear EICR */ 320 + regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICS); 320 321 regs_buff[19] = IXGBE_READ_REG(hw, IXGBE_EICS); 321 322 regs_buff[20] = IXGBE_READ_REG(hw, IXGBE_EIMS); 322 323 regs_buff[21] = IXGBE_READ_REG(hw, IXGBE_EIMC); ··· 328 325 regs_buff[25] = IXGBE_READ_REG(hw, IXGBE_IVAR(0)); 329 326 regs_buff[26] = IXGBE_READ_REG(hw, IXGBE_MSIXT); 330 327 regs_buff[27] = IXGBE_READ_REG(hw, IXGBE_MSIXPBA); 331 - regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL); 328 + regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL(0)); 332 329 regs_buff[29] = IXGBE_READ_REG(hw, IXGBE_GPIE); 333 330 334 331 /* Flow Control */ ··· 374 371 regs_buff[482 + i] = IXGBE_READ_REG(hw, IXGBE_RAL(i)); 375 372 for (i = 0; i < 16; i++) 376 373 regs_buff[498 + i] = IXGBE_READ_REG(hw, IXGBE_RAH(i)); 377 - regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE); 374 + regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0)); 378 375 regs_buff[515] = IXGBE_READ_REG(hw, IXGBE_FCTRL); 379 376 regs_buff[516] = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); 380 377 regs_buff[517] = IXGBE_READ_REG(hw, IXGBE_MCSTCTRL); ··· 422 419 regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM); 423 420 regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT); 424 421 425 - /* DCE */ 426 422 regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS); 427 423 regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS); 428 424 regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS); ··· 541 539 /* Diagnostic */ 542 540 regs_buff[1071] = IXGBE_READ_REG(hw, IXGBE_RDSTATCTL); 543 541 for (i = 0; i < 8; i++) 544 - regs_buff[1072] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i)); 542 + regs_buff[1072 + i] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i)); 545 543 regs_buff[1080] = IXGBE_READ_REG(hw, IXGBE_RDHMPN); 546 - regs_buff[1081] = IXGBE_READ_REG(hw, IXGBE_RIC_DW0); 547 - regs_buff[1082] = IXGBE_READ_REG(hw, IXGBE_RIC_DW1); 548 - regs_buff[1083] = IXGBE_READ_REG(hw, IXGBE_RIC_DW2); 549 - regs_buff[1084] = IXGBE_READ_REG(hw, IXGBE_RIC_DW3); 544 + for (i = 0; i < 4; i++) 545 + regs_buff[1081 + i] = IXGBE_READ_REG(hw, IXGBE_RIC_DW(i)); 550 546 regs_buff[1085] = IXGBE_READ_REG(hw, IXGBE_RDPROBE); 551 547 regs_buff[1086] = IXGBE_READ_REG(hw, IXGBE_TDSTATCTL); 552 548 for (i = 0; i < 8; i++) 553 - regs_buff[1087] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i)); 549 + regs_buff[1087 + i] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i)); 554 550 regs_buff[1095] = IXGBE_READ_REG(hw, IXGBE_TDHMPN); 555 - regs_buff[1096] = IXGBE_READ_REG(hw, IXGBE_TIC_DW0); 556 - regs_buff[1097] = IXGBE_READ_REG(hw, IXGBE_TIC_DW1); 557 - regs_buff[1098] = IXGBE_READ_REG(hw, IXGBE_TIC_DW2); 558 - regs_buff[1099] = IXGBE_READ_REG(hw, IXGBE_TIC_DW3); 551 + for (i = 0; i < 4; i++) 552 + regs_buff[1096 + i] = IXGBE_READ_REG(hw, IXGBE_TIC_DW(i)); 559 553 regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE); 560 554 regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL); 561 555 regs_buff[1102] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA0); ··· 564 566 regs_buff[1109] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA2); 565 567 regs_buff[1110] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA3); 566 568 for (i = 0; i < 8; i++) 567 - regs_buff[1111] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i)); 569 + regs_buff[1111 + i] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i)); 568 570 regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL); 569 571 regs_buff[1120] = IXGBE_READ_REG(hw, IXGBE_MDFTC1); 570 572 regs_buff[1121] = IXGBE_READ_REG(hw, IXGBE_MDFTC2); ··· 583 585 } 584 586 585 587 static int ixgbe_get_eeprom(struct net_device *netdev, 586 - struct ethtool_eeprom *eeprom, u8 *bytes) 588 + struct ethtool_eeprom *eeprom, u8 *bytes) 587 589 { 588 590 struct ixgbe_adapter *adapter = netdev_priv(netdev); 589 591 struct ixgbe_hw *hw = &adapter->hw; ··· 606 608 return -ENOMEM; 607 609 608 610 for (i = 0; i < eeprom_len; i++) { 609 - if ((ret_val = ixgbe_read_eeprom(hw, first_word + i, 610 - &eeprom_buff[i]))) 611 + if ((ret_val = hw->eeprom.ops.read(hw, first_word + i, 612 + &eeprom_buff[i]))) 611 613 break; 612 614 } 613 615 ··· 622 624 } 623 625 624 626 static void ixgbe_get_drvinfo(struct net_device *netdev, 625 - struct ethtool_drvinfo *drvinfo) 627 + struct ethtool_drvinfo *drvinfo) 626 628 { 627 629 struct ixgbe_adapter *adapter = netdev_priv(netdev); 628 630 ··· 635 637 } 636 638 637 639 static void ixgbe_get_ringparam(struct net_device *netdev, 638 - struct ethtool_ringparam *ring) 640 + struct ethtool_ringparam *ring) 639 641 { 640 642 struct ixgbe_adapter *adapter = netdev_priv(netdev); 641 643 struct ixgbe_ring *tx_ring = adapter->tx_ring; ··· 652 654 } 653 655 654 656 static int ixgbe_set_ringparam(struct net_device *netdev, 655 - struct ethtool_ringparam *ring) 657 + struct ethtool_ringparam *ring) 656 658 { 657 659 struct ixgbe_adapter *adapter = netdev_priv(netdev); 658 - struct ixgbe_tx_buffer *old_buf; 659 - struct ixgbe_rx_buffer *old_rx_buf; 660 - void *old_desc; 660 + struct ixgbe_ring *temp_ring; 661 661 int i, err; 662 - u32 new_rx_count, new_tx_count, old_size; 663 - dma_addr_t old_dma; 662 + u32 new_rx_count, new_tx_count; 664 663 665 664 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 666 665 return -EINVAL; ··· 676 681 return 0; 677 682 } 678 683 684 + if (adapter->num_tx_queues > adapter->num_rx_queues) 685 + temp_ring = vmalloc(adapter->num_tx_queues * 686 + sizeof(struct ixgbe_ring)); 687 + else 688 + temp_ring = vmalloc(adapter->num_rx_queues * 689 + sizeof(struct ixgbe_ring)); 690 + if (!temp_ring) 691 + return -ENOMEM; 692 + 679 693 while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) 680 694 msleep(1); 681 695 ··· 697 693 * to the tx and rx ring structs. 698 694 */ 699 695 if (new_tx_count != adapter->tx_ring->count) { 696 + memcpy(temp_ring, adapter->tx_ring, 697 + adapter->num_tx_queues * sizeof(struct ixgbe_ring)); 698 + 700 699 for (i = 0; i < adapter->num_tx_queues; i++) { 701 - /* Save existing descriptor ring */ 702 - old_buf = adapter->tx_ring[i].tx_buffer_info; 703 - old_desc = adapter->tx_ring[i].desc; 704 - old_size = adapter->tx_ring[i].size; 705 - old_dma = adapter->tx_ring[i].dma; 706 - /* Try to allocate a new one */ 707 - adapter->tx_ring[i].tx_buffer_info = NULL; 708 - adapter->tx_ring[i].desc = NULL; 709 - adapter->tx_ring[i].count = new_tx_count; 710 - err = ixgbe_setup_tx_resources(adapter, 711 - &adapter->tx_ring[i]); 700 + temp_ring[i].count = new_tx_count; 701 + err = ixgbe_setup_tx_resources(adapter, &temp_ring[i]); 712 702 if (err) { 713 - /* Restore the old one so at least 714 - the adapter still works, even if 715 - we failed the request */ 716 - adapter->tx_ring[i].tx_buffer_info = old_buf; 717 - adapter->tx_ring[i].desc = old_desc; 718 - adapter->tx_ring[i].size = old_size; 719 - adapter->tx_ring[i].dma = old_dma; 703 + while (i) { 704 + i--; 705 + ixgbe_free_tx_resources(adapter, 706 + &temp_ring[i]); 707 + } 720 708 goto err_setup; 721 709 } 722 - /* Free the old buffer manually */ 723 - vfree(old_buf); 724 - pci_free_consistent(adapter->pdev, old_size, 725 - old_desc, old_dma); 726 710 } 711 + 712 + for (i = 0; i < adapter->num_tx_queues; i++) 713 + ixgbe_free_tx_resources(adapter, &adapter->tx_ring[i]); 714 + 715 + memcpy(adapter->tx_ring, temp_ring, 716 + adapter->num_tx_queues * sizeof(struct ixgbe_ring)); 717 + 718 + adapter->tx_ring_count = new_tx_count; 727 719 } 728 720 729 721 if (new_rx_count != adapter->rx_ring->count) { 722 + memcpy(temp_ring, adapter->rx_ring, 723 + adapter->num_rx_queues * sizeof(struct ixgbe_ring)); 724 + 730 725 for (i = 0; i < adapter->num_rx_queues; i++) { 731 - 732 - old_rx_buf = adapter->rx_ring[i].rx_buffer_info; 733 - old_desc = adapter->rx_ring[i].desc; 734 - old_size = adapter->rx_ring[i].size; 735 - old_dma = adapter->rx_ring[i].dma; 736 - 737 - adapter->rx_ring[i].rx_buffer_info = NULL; 738 - adapter->rx_ring[i].desc = NULL; 739 - adapter->rx_ring[i].dma = 0; 740 - adapter->rx_ring[i].count = new_rx_count; 741 - err = ixgbe_setup_rx_resources(adapter, 742 - &adapter->rx_ring[i]); 726 + temp_ring[i].count = new_rx_count; 727 + err = ixgbe_setup_rx_resources(adapter, &temp_ring[i]); 743 728 if (err) { 744 - adapter->rx_ring[i].rx_buffer_info = old_rx_buf; 745 - adapter->rx_ring[i].desc = old_desc; 746 - adapter->rx_ring[i].size = old_size; 747 - adapter->rx_ring[i].dma = old_dma; 729 + while (i) { 730 + i--; 731 + ixgbe_free_rx_resources(adapter, 732 + &temp_ring[i]); 733 + } 748 734 goto err_setup; 749 735 } 750 - 751 - vfree(old_rx_buf); 752 - pci_free_consistent(adapter->pdev, old_size, old_desc, 753 - old_dma); 754 736 } 737 + 738 + for (i = 0; i < adapter->num_rx_queues; i++) 739 + ixgbe_free_rx_resources(adapter, &adapter->rx_ring[i]); 740 + 741 + memcpy(adapter->rx_ring, temp_ring, 742 + adapter->num_rx_queues * sizeof(struct ixgbe_ring)); 743 + 744 + adapter->rx_ring_count = new_rx_count; 755 745 } 756 746 747 + /* success! */ 757 748 err = 0; 758 749 err_setup: 759 - if (netif_running(adapter->netdev)) 750 + if (netif_running(netdev)) 760 751 ixgbe_up(adapter); 761 752 762 753 clear_bit(__IXGBE_RESETTING, &adapter->state); ··· 769 770 } 770 771 771 772 static void ixgbe_get_ethtool_stats(struct net_device *netdev, 772 - struct ethtool_stats *stats, u64 *data) 773 + struct ethtool_stats *stats, u64 *data) 773 774 { 774 775 struct ixgbe_adapter *adapter = netdev_priv(netdev); 775 776 u64 *queue_stat; ··· 777 778 int j, k; 778 779 int i; 779 780 u64 aggregated = 0, flushed = 0, no_desc = 0; 781 + for (i = 0; i < adapter->num_rx_queues; i++) { 782 + aggregated += adapter->rx_ring[i].lro_mgr.stats.aggregated; 783 + flushed += adapter->rx_ring[i].lro_mgr.stats.flushed; 784 + no_desc += adapter->rx_ring[i].lro_mgr.stats.no_desc; 785 + } 786 + adapter->lro_aggregated = aggregated; 787 + adapter->lro_flushed = flushed; 788 + adapter->lro_no_desc = no_desc; 780 789 781 790 ixgbe_update_stats(adapter); 782 791 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { 783 792 char *p = (char *)adapter + ixgbe_gstrings_stats[i].stat_offset; 784 793 data[i] = (ixgbe_gstrings_stats[i].sizeof_stat == 785 - sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 794 + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 786 795 } 787 796 for (j = 0; j < adapter->num_tx_queues; j++) { 788 797 queue_stat = (u64 *)&adapter->tx_ring[j].stats; ··· 799 792 i += k; 800 793 } 801 794 for (j = 0; j < adapter->num_rx_queues; j++) { 802 - aggregated += adapter->rx_ring[j].lro_mgr.stats.aggregated; 803 - flushed += adapter->rx_ring[j].lro_mgr.stats.flushed; 804 - no_desc += adapter->rx_ring[j].lro_mgr.stats.no_desc; 805 795 queue_stat = (u64 *)&adapter->rx_ring[j].stats; 806 796 for (k = 0; k < stat_count; k++) 807 797 data[i + k] = queue_stat[k]; 808 798 i += k; 809 799 } 810 - adapter->lro_aggregated = aggregated; 811 - adapter->lro_flushed = flushed; 812 - adapter->lro_no_desc = no_desc; 813 800 } 814 801 815 802 static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, 816 - u8 *data) 803 + u8 *data) 817 804 { 818 805 struct ixgbe_adapter *adapter = netdev_priv(netdev); 819 - u8 *p = data; 806 + char *p = (char *)data; 820 807 int i; 821 808 822 809 switch (stringset) { ··· 832 831 sprintf(p, "rx_queue_%u_bytes", i); 833 832 p += ETH_GSTRING_LEN; 834 833 } 835 - /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */ 834 + /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */ 836 835 break; 837 836 } 838 837 } 839 838 840 839 841 840 static void ixgbe_get_wol(struct net_device *netdev, 842 - struct ethtool_wolinfo *wol) 841 + struct ethtool_wolinfo *wol) 843 842 { 844 843 wol->supported = 0; 845 844 wol->wolopts = 0; ··· 860 859 static int ixgbe_phys_id(struct net_device *netdev, u32 data) 861 860 { 862 861 struct ixgbe_adapter *adapter = netdev_priv(netdev); 863 - u32 led_reg = IXGBE_READ_REG(&adapter->hw, IXGBE_LEDCTL); 862 + struct ixgbe_hw *hw = &adapter->hw; 863 + u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); 864 864 u32 i; 865 865 866 866 if (!data || data > 300) 867 867 data = 300; 868 868 869 869 for (i = 0; i < (data * 1000); i += 400) { 870 - ixgbe_led_on(&adapter->hw, IXGBE_LED_ON); 870 + hw->mac.ops.led_on(hw, IXGBE_LED_ON); 871 871 msleep_interruptible(200); 872 - ixgbe_led_off(&adapter->hw, IXGBE_LED_ON); 872 + hw->mac.ops.led_off(hw, IXGBE_LED_ON); 873 873 msleep_interruptible(200); 874 874 } 875 875 ··· 881 879 } 882 880 883 881 static int ixgbe_get_coalesce(struct net_device *netdev, 884 - struct ethtool_coalesce *ec) 882 + struct ethtool_coalesce *ec) 885 883 { 886 884 struct ixgbe_adapter *adapter = netdev_priv(netdev); 887 885 888 - if (adapter->rx_eitr < IXGBE_MIN_ITR_USECS) 889 - ec->rx_coalesce_usecs = adapter->rx_eitr; 890 - else 891 - ec->rx_coalesce_usecs = 1000000 / adapter->rx_eitr; 892 - 893 - if (adapter->tx_eitr < IXGBE_MIN_ITR_USECS) 894 - ec->tx_coalesce_usecs = adapter->tx_eitr; 895 - else 896 - ec->tx_coalesce_usecs = 1000000 / adapter->tx_eitr; 897 - 898 886 ec->tx_max_coalesced_frames_irq = adapter->tx_ring[0].work_limit; 887 + 888 + /* only valid if in constant ITR mode */ 889 + switch (adapter->itr_setting) { 890 + case 0: 891 + /* throttling disabled */ 892 + ec->rx_coalesce_usecs = 0; 893 + break; 894 + case 1: 895 + /* dynamic ITR mode */ 896 + ec->rx_coalesce_usecs = 1; 897 + break; 898 + default: 899 + /* fixed interrupt rate mode */ 900 + ec->rx_coalesce_usecs = 1000000/adapter->eitr_param; 901 + break; 902 + } 899 903 return 0; 900 904 } 901 905 902 906 static int ixgbe_set_coalesce(struct net_device *netdev, 903 - struct ethtool_coalesce *ec) 907 + struct ethtool_coalesce *ec) 904 908 { 905 909 struct ixgbe_adapter *adapter = netdev_priv(netdev); 906 - 907 - if ((ec->rx_coalesce_usecs > IXGBE_MAX_ITR_USECS) || 908 - ((ec->rx_coalesce_usecs != 0) && 909 - (ec->rx_coalesce_usecs != 1) && 910 - (ec->rx_coalesce_usecs != 3) && 911 - (ec->rx_coalesce_usecs < IXGBE_MIN_ITR_USECS))) 912 - return -EINVAL; 913 - if ((ec->tx_coalesce_usecs > IXGBE_MAX_ITR_USECS) || 914 - ((ec->tx_coalesce_usecs != 0) && 915 - (ec->tx_coalesce_usecs != 1) && 916 - (ec->tx_coalesce_usecs != 3) && 917 - (ec->tx_coalesce_usecs < IXGBE_MIN_ITR_USECS))) 918 - return -EINVAL; 919 - 920 - /* convert to rate of irq's per second */ 921 - if (ec->rx_coalesce_usecs < IXGBE_MIN_ITR_USECS) 922 - adapter->rx_eitr = ec->rx_coalesce_usecs; 923 - else 924 - adapter->rx_eitr = (1000000 / ec->rx_coalesce_usecs); 925 - 926 - if (ec->tx_coalesce_usecs < IXGBE_MIN_ITR_USECS) 927 - adapter->tx_eitr = ec->rx_coalesce_usecs; 928 - else 929 - adapter->tx_eitr = (1000000 / ec->tx_coalesce_usecs); 910 + struct ixgbe_hw *hw = &adapter->hw; 911 + int i; 930 912 931 913 if (ec->tx_max_coalesced_frames_irq) 932 - adapter->tx_ring[0].work_limit = 933 - ec->tx_max_coalesced_frames_irq; 914 + adapter->tx_ring[0].work_limit = ec->tx_max_coalesced_frames_irq; 934 915 935 - if (netif_running(netdev)) { 936 - ixgbe_down(adapter); 937 - ixgbe_up(adapter); 916 + if (ec->rx_coalesce_usecs > 1) { 917 + /* store the value in ints/second */ 918 + adapter->eitr_param = 1000000/ec->rx_coalesce_usecs; 919 + 920 + /* static value of interrupt rate */ 921 + adapter->itr_setting = adapter->eitr_param; 922 + /* clear the lower bit */ 923 + adapter->itr_setting &= ~1; 924 + } else if (ec->rx_coalesce_usecs == 1) { 925 + /* 1 means dynamic mode */ 926 + adapter->eitr_param = 20000; 927 + adapter->itr_setting = 1; 928 + } else { 929 + /* any other value means disable eitr, which is best 930 + * served by setting the interrupt rate very high */ 931 + adapter->eitr_param = 3000000; 932 + adapter->itr_setting = 0; 933 + } 934 + 935 + for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { 936 + struct ixgbe_q_vector *q_vector = &adapter->q_vector[i]; 937 + if (q_vector->txr_count && !q_vector->rxr_count) 938 + q_vector->eitr = (adapter->eitr_param >> 1); 939 + else 940 + /* rx only or mixed */ 941 + q_vector->eitr = adapter->eitr_param; 942 + IXGBE_WRITE_REG(hw, IXGBE_EITR(i), 943 + EITR_INTS_PER_SEC_TO_REG(q_vector->eitr)); 938 944 } 939 945 940 946 return 0; 941 947 } 942 948 943 949 944 - static struct ethtool_ops ixgbe_ethtool_ops = { 950 + static const struct ethtool_ops ixgbe_ethtool_ops = { 945 951 .get_settings = ixgbe_get_settings, 946 952 .set_settings = ixgbe_set_settings, 947 953 .get_drvinfo = ixgbe_get_drvinfo, ··· 976 966 .set_tso = ixgbe_set_tso, 977 967 .get_strings = ixgbe_get_strings, 978 968 .phys_id = ixgbe_phys_id, 979 - .get_sset_count = ixgbe_get_sset_count, 969 + .get_sset_count = ixgbe_get_sset_count, 980 970 .get_ethtool_stats = ixgbe_get_ethtool_stats, 981 971 .get_coalesce = ixgbe_get_coalesce, 982 972 .set_coalesce = ixgbe_set_coalesce,
+739 -570
drivers/net/ixgbe/ixgbe_main.c
··· 1 1 /******************************************************************************* 2 2 3 3 Intel 10 Gigabit PCI Express Linux driver 4 - Copyright(c) 1999 - 2007 Intel Corporation. 4 + Copyright(c) 1999 - 2008 Intel Corporation. 5 5 6 6 This program is free software; you can redistribute it and/or modify it 7 7 under the terms and conditions of the GNU General Public License, ··· 20 20 the file called "COPYING". 21 21 22 22 Contact Information: 23 - Linux NICS <linux.nics@intel.com> 24 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 25 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 26 25 ··· 45 46 46 47 char ixgbe_driver_name[] = "ixgbe"; 47 48 static const char ixgbe_driver_string[] = 48 - "Intel(R) 10 Gigabit PCI Express Network Driver"; 49 + "Intel(R) 10 Gigabit PCI Express Network Driver"; 49 50 50 - #define DRV_VERSION "1.3.18-k4" 51 + #define DRV_VERSION "1.3.30-k2" 51 52 const char ixgbe_driver_version[] = DRV_VERSION; 52 - static const char ixgbe_copyright[] = 53 - "Copyright (c) 1999-2007 Intel Corporation."; 53 + static char ixgbe_copyright[] = "Copyright (c) 1999-2007 Intel Corporation."; 54 54 55 55 static const struct ixgbe_info *ixgbe_info_tbl[] = { 56 - [board_82598] = &ixgbe_82598_info, 56 + [board_82598] = &ixgbe_82598_info, 57 57 }; 58 58 59 59 /* ixgbe_pci_tbl - PCI Device ID Table ··· 72 74 board_82598 }, 73 75 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT), 74 76 board_82598 }, 77 + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR), 78 + board_82598 }, 75 79 76 80 /* required last entry */ 77 81 {0, } 78 82 }; 79 83 MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl); 80 84 81 - #if defined(CONFIG_DCA) || defined (CONFIG_DCA_MODULE) 85 + #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) 82 86 static int ixgbe_notify_dca(struct notifier_block *, unsigned long event, 83 - void *p); 87 + void *p); 84 88 static struct notifier_block dca_notifier = { 85 89 .notifier_call = ixgbe_notify_dca, 86 90 .next = NULL, ··· 104 104 /* Let firmware take over control of h/w */ 105 105 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); 106 106 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, 107 - ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD); 107 + ctrl_ext & ~IXGBE_CTRL_EXT_DRV_LOAD); 108 108 } 109 109 110 110 static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter) ··· 114 114 /* Let firmware know the driver has taken over */ 115 115 ctrl_ext = IXGBE_READ_REG(&adapter->hw, IXGBE_CTRL_EXT); 116 116 IXGBE_WRITE_REG(&adapter->hw, IXGBE_CTRL_EXT, 117 - ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD); 117 + ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD); 118 118 } 119 - 120 - #ifdef DEBUG 121 - /** 122 - * ixgbe_get_hw_dev_name - return device name string 123 - * used by hardware layer to print debugging information 124 - **/ 125 - char *ixgbe_get_hw_dev_name(struct ixgbe_hw *hw) 126 - { 127 - struct ixgbe_adapter *adapter = hw->back; 128 - struct net_device *netdev = adapter->netdev; 129 - return netdev->name; 130 - } 131 - #endif 132 119 133 120 static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, u16 int_alloc_entry, 134 - u8 msix_vector) 121 + u8 msix_vector) 135 122 { 136 123 u32 ivar, index; 137 124 ··· 131 144 } 132 145 133 146 static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter, 134 - struct ixgbe_tx_buffer 135 - *tx_buffer_info) 147 + struct ixgbe_tx_buffer 148 + *tx_buffer_info) 136 149 { 137 150 if (tx_buffer_info->dma) { 138 151 pci_unmap_page(adapter->pdev, tx_buffer_info->dma, 139 - tx_buffer_info->length, PCI_DMA_TODEVICE); 152 + tx_buffer_info->length, PCI_DMA_TODEVICE); 140 153 tx_buffer_info->dma = 0; 141 154 } 142 155 if (tx_buffer_info->skb) { ··· 147 160 } 148 161 149 162 static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter, 150 - struct ixgbe_ring *tx_ring, 151 - unsigned int eop) 163 + struct ixgbe_ring *tx_ring, 164 + unsigned int eop) 152 165 { 153 166 struct ixgbe_hw *hw = &adapter->hw; 154 167 u32 head, tail; ··· 183 196 return false; 184 197 } 185 198 186 - #define IXGBE_MAX_TXD_PWR 14 187 - #define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR) 199 + #define IXGBE_MAX_TXD_PWR 14 200 + #define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR) 188 201 189 202 /* Tx Descriptors needed, worst case */ 190 203 #define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \ 191 204 (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0)) 192 205 #define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \ 193 - MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */ 206 + MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */ 194 207 195 208 #define GET_TX_HEAD_FROM_RING(ring) (\ 196 209 *(volatile u32 *) \ ··· 296 309 return (total_packets ? true : false); 297 310 } 298 311 299 - #if defined(CONFIG_DCA) || defined (CONFIG_DCA_MODULE) 312 + #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) 300 313 static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter, 301 - struct ixgbe_ring *rx_ring) 314 + struct ixgbe_ring *rx_ring) 302 315 { 303 316 u32 rxctrl; 304 317 int cpu = get_cpu(); ··· 317 330 } 318 331 319 332 static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter, 320 - struct ixgbe_ring *tx_ring) 333 + struct ixgbe_ring *tx_ring) 321 334 { 322 335 u32 txctrl; 323 336 int cpu = get_cpu(); ··· 393 406 * @rx_desc: rx descriptor 394 407 **/ 395 408 static void ixgbe_receive_skb(struct ixgbe_adapter *adapter, 396 - struct sk_buff *skb, u8 status, 397 - struct ixgbe_ring *ring, 409 + struct sk_buff *skb, u8 status, 410 + struct ixgbe_ring *ring, 398 411 union ixgbe_adv_rx_desc *rx_desc) 399 412 { 400 413 bool is_vlan = (status & IXGBE_RXD_STAT_VP); ··· 467 480 struct ixgbe_ring *rx_ring, 468 481 int cleaned_count) 469 482 { 470 - struct net_device *netdev = adapter->netdev; 471 483 struct pci_dev *pdev = adapter->pdev; 472 484 union ixgbe_adv_rx_desc *rx_desc; 473 485 struct ixgbe_rx_buffer *bi; ··· 479 493 while (cleaned_count--) { 480 494 rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i); 481 495 482 - if (!bi->page && 496 + if (!bi->page_dma && 483 497 (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) { 484 - bi->page = alloc_page(GFP_ATOMIC); 485 498 if (!bi->page) { 486 - adapter->alloc_rx_page_failed++; 487 - goto no_buffers; 499 + bi->page = alloc_page(GFP_ATOMIC); 500 + if (!bi->page) { 501 + adapter->alloc_rx_page_failed++; 502 + goto no_buffers; 503 + } 504 + bi->page_offset = 0; 505 + } else { 506 + /* use a half page if we're re-using */ 507 + bi->page_offset ^= (PAGE_SIZE / 2); 488 508 } 489 - bi->page_dma = pci_map_page(pdev, bi->page, 0, 490 - PAGE_SIZE, 491 - PCI_DMA_FROMDEVICE); 509 + 510 + bi->page_dma = pci_map_page(pdev, bi->page, 511 + bi->page_offset, 512 + (PAGE_SIZE / 2), 513 + PCI_DMA_FROMDEVICE); 492 514 } 493 515 494 516 if (!bi->skb) { 495 - struct sk_buff *skb = netdev_alloc_skb(netdev, bufsz); 517 + struct sk_buff *skb = netdev_alloc_skb(adapter->netdev, 518 + bufsz); 496 519 497 520 if (!skb) { 498 521 adapter->alloc_rx_buff_failed++; ··· 562 567 } 563 568 564 569 static bool ixgbe_clean_rx_irq(struct ixgbe_adapter *adapter, 565 - struct ixgbe_ring *rx_ring, 566 - int *work_done, int work_to_do) 570 + struct ixgbe_ring *rx_ring, 571 + int *work_done, int work_to_do) 567 572 { 568 - struct net_device *netdev = adapter->netdev; 569 573 struct pci_dev *pdev = adapter->pdev; 570 574 union ixgbe_adv_rx_desc *rx_desc, *next_rxd; 571 575 struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer; ··· 590 596 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { 591 597 hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc)); 592 598 len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >> 593 - IXGBE_RXDADV_HDRBUFLEN_SHIFT; 599 + IXGBE_RXDADV_HDRBUFLEN_SHIFT; 594 600 if (hdr_info & IXGBE_RXDADV_SPH) 595 601 adapter->rx_hdr_split++; 596 602 if (len > IXGBE_RX_HDR_SIZE) ··· 607 613 608 614 if (len && !skb_shinfo(skb)->nr_frags) { 609 615 pci_unmap_single(pdev, rx_buffer_info->dma, 610 - rx_ring->rx_buf_len + NET_IP_ALIGN, 611 - PCI_DMA_FROMDEVICE); 616 + rx_ring->rx_buf_len + NET_IP_ALIGN, 617 + PCI_DMA_FROMDEVICE); 612 618 skb_put(skb, len); 613 619 } 614 620 615 621 if (upper_len) { 616 622 pci_unmap_page(pdev, rx_buffer_info->page_dma, 617 - PAGE_SIZE, PCI_DMA_FROMDEVICE); 623 + PAGE_SIZE / 2, PCI_DMA_FROMDEVICE); 618 624 rx_buffer_info->page_dma = 0; 619 625 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, 620 - rx_buffer_info->page, 0, upper_len); 621 - rx_buffer_info->page = NULL; 626 + rx_buffer_info->page, 627 + rx_buffer_info->page_offset, 628 + upper_len); 629 + 630 + if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) || 631 + (page_count(rx_buffer_info->page) != 1)) 632 + rx_buffer_info->page = NULL; 633 + else 634 + get_page(rx_buffer_info->page); 622 635 623 636 skb->len += upper_len; 624 637 skb->data_len += upper_len; ··· 648 647 rx_buffer_info->skb = next_buffer->skb; 649 648 rx_buffer_info->dma = next_buffer->dma; 650 649 next_buffer->skb = skb; 650 + next_buffer->dma = 0; 651 651 adapter->non_eop_descs++; 652 652 goto next_desc; 653 653 } ··· 664 662 total_rx_bytes += skb->len; 665 663 total_rx_packets++; 666 664 667 - skb->protocol = eth_type_trans(skb, netdev); 665 + skb->protocol = eth_type_trans(skb, adapter->netdev); 668 666 ixgbe_receive_skb(adapter, skb, staterr, rx_ring, rx_desc); 669 - netdev->last_rx = jiffies; 667 + adapter->netdev->last_rx = jiffies; 670 668 671 669 next_desc: 672 670 rx_desc->wb.upper.status_error = 0; ··· 726 724 q_vector = &adapter->q_vector[v_idx]; 727 725 /* XXX for_each_bit(...) */ 728 726 r_idx = find_first_bit(q_vector->rxr_idx, 729 - adapter->num_rx_queues); 727 + adapter->num_rx_queues); 730 728 731 729 for (i = 0; i < q_vector->rxr_count; i++) { 732 730 j = adapter->rx_ring[r_idx].reg_idx; 733 731 ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(j), v_idx); 734 732 r_idx = find_next_bit(q_vector->rxr_idx, 735 - adapter->num_rx_queues, 736 - r_idx + 1); 733 + adapter->num_rx_queues, 734 + r_idx + 1); 737 735 } 738 736 r_idx = find_first_bit(q_vector->txr_idx, 739 - adapter->num_tx_queues); 737 + adapter->num_tx_queues); 740 738 741 739 for (i = 0; i < q_vector->txr_count; i++) { 742 740 j = adapter->tx_ring[r_idx].reg_idx; 743 741 ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(j), v_idx); 744 742 r_idx = find_next_bit(q_vector->txr_idx, 745 - adapter->num_tx_queues, 746 - r_idx + 1); 743 + adapter->num_tx_queues, 744 + r_idx + 1); 747 745 } 748 746 749 - /* if this is a tx only vector use half the irq (tx) rate */ 747 + /* if this is a tx only vector halve the interrupt rate */ 750 748 if (q_vector->txr_count && !q_vector->rxr_count) 751 - q_vector->eitr = adapter->tx_eitr; 749 + q_vector->eitr = (adapter->eitr_param >> 1); 752 750 else 753 - /* rx only or mixed */ 754 - q_vector->eitr = adapter->rx_eitr; 751 + /* rx only */ 752 + q_vector->eitr = adapter->eitr_param; 755 753 756 754 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 757 - EITR_INTS_PER_SEC_TO_REG(q_vector->eitr)); 755 + EITR_INTS_PER_SEC_TO_REG(q_vector->eitr)); 758 756 } 759 757 760 758 ixgbe_set_ivar(adapter, IXGBE_IVAR_OTHER_CAUSES_INDEX, v_idx); 761 759 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950); 762 760 763 - /* set up to autoclear timer, lsc, and the vectors */ 761 + /* set up to autoclear timer, and the vectors */ 764 762 mask = IXGBE_EIMS_ENABLE_MASK; 765 - mask &= ~IXGBE_EIMS_OTHER; 763 + mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC); 766 764 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask); 767 765 } 768 766 ··· 792 790 * parameter (see ixgbe_param.c) 793 791 **/ 794 792 static u8 ixgbe_update_itr(struct ixgbe_adapter *adapter, 795 - u32 eitr, u8 itr_setting, 796 - int packets, int bytes) 793 + u32 eitr, u8 itr_setting, 794 + int packets, int bytes) 797 795 { 798 796 unsigned int retval = itr_setting; 799 797 u32 timepassed_us; ··· 840 838 u32 new_itr; 841 839 u8 current_itr, ret_itr; 842 840 int i, r_idx, v_idx = ((void *)q_vector - (void *)(adapter->q_vector)) / 843 - sizeof(struct ixgbe_q_vector); 841 + sizeof(struct ixgbe_q_vector); 844 842 struct ixgbe_ring *rx_ring, *tx_ring; 845 843 846 844 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); 847 845 for (i = 0; i < q_vector->txr_count; i++) { 848 846 tx_ring = &(adapter->tx_ring[r_idx]); 849 847 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr, 850 - q_vector->tx_eitr, 851 - tx_ring->total_packets, 852 - tx_ring->total_bytes); 848 + q_vector->tx_itr, 849 + tx_ring->total_packets, 850 + tx_ring->total_bytes); 853 851 /* if the result for this queue would decrease interrupt 854 852 * rate for this vector then use that result */ 855 - q_vector->tx_eitr = ((q_vector->tx_eitr > ret_itr) ? 856 - q_vector->tx_eitr - 1 : ret_itr); 853 + q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ? 854 + q_vector->tx_itr - 1 : ret_itr); 857 855 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, 858 - r_idx + 1); 856 + r_idx + 1); 859 857 } 860 858 861 859 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 862 860 for (i = 0; i < q_vector->rxr_count; i++) { 863 861 rx_ring = &(adapter->rx_ring[r_idx]); 864 862 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr, 865 - q_vector->rx_eitr, 866 - rx_ring->total_packets, 867 - rx_ring->total_bytes); 863 + q_vector->rx_itr, 864 + rx_ring->total_packets, 865 + rx_ring->total_bytes); 868 866 /* if the result for this queue would decrease interrupt 869 867 * rate for this vector then use that result */ 870 - q_vector->rx_eitr = ((q_vector->rx_eitr > ret_itr) ? 871 - q_vector->rx_eitr - 1 : ret_itr); 868 + q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ? 869 + q_vector->rx_itr - 1 : ret_itr); 872 870 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, 873 - r_idx + 1); 871 + r_idx + 1); 874 872 } 875 873 876 - current_itr = max(q_vector->rx_eitr, q_vector->tx_eitr); 874 + current_itr = max(q_vector->rx_itr, q_vector->tx_itr); 877 875 878 876 switch (current_itr) { 879 877 /* counts and packets in update_itr are dependent on these numbers */ ··· 897 895 itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr); 898 896 /* must write high and low 16 bits to reset counter */ 899 897 DPRINTK(TX_ERR, DEBUG, "writing eitr(%d): %08X\n", v_idx, 900 - itr_reg); 898 + itr_reg); 901 899 IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg | (itr_reg)<<16); 902 900 } 903 901 904 902 return; 903 + } 904 + 905 + 906 + static void ixgbe_check_lsc(struct ixgbe_adapter *adapter) 907 + { 908 + struct ixgbe_hw *hw = &adapter->hw; 909 + 910 + adapter->lsc_int++; 911 + adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; 912 + adapter->link_check_timeout = jiffies; 913 + if (!test_bit(__IXGBE_DOWN, &adapter->state)) { 914 + IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC); 915 + schedule_work(&adapter->watchdog_task); 916 + } 905 917 } 906 918 907 919 static irqreturn_t ixgbe_msix_lsc(int irq, void *data) ··· 925 909 struct ixgbe_hw *hw = &adapter->hw; 926 910 u32 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 927 911 928 - if (eicr & IXGBE_EICR_LSC) { 929 - adapter->lsc_int++; 930 - if (!test_bit(__IXGBE_DOWN, &adapter->state)) 931 - mod_timer(&adapter->watchdog_timer, jiffies); 932 - } 912 + if (eicr & IXGBE_EICR_LSC) 913 + ixgbe_check_lsc(adapter); 933 914 934 915 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 935 916 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER); ··· 947 934 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); 948 935 for (i = 0; i < q_vector->txr_count; i++) { 949 936 tx_ring = &(adapter->tx_ring[r_idx]); 950 - #if defined(CONFIG_DCA) || defined (CONFIG_DCA_MODULE) 937 + #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) 951 938 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) 952 939 ixgbe_update_tx_dca(adapter, tx_ring); 953 940 #endif ··· 955 942 tx_ring->total_packets = 0; 956 943 ixgbe_clean_tx_irq(adapter, tx_ring); 957 944 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, 958 - r_idx + 1); 945 + r_idx + 1); 959 946 } 960 947 961 948 return IRQ_HANDLED; ··· 972 959 struct ixgbe_adapter *adapter = q_vector->adapter; 973 960 struct ixgbe_ring *rx_ring; 974 961 int r_idx; 962 + int i; 975 963 976 964 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 965 + for (i = 0; i < q_vector->rxr_count; i++) { 966 + rx_ring = &(adapter->rx_ring[r_idx]); 967 + rx_ring->total_bytes = 0; 968 + rx_ring->total_packets = 0; 969 + r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, 970 + r_idx + 1); 971 + } 972 + 977 973 if (!q_vector->rxr_count) 978 974 return IRQ_HANDLED; 979 975 976 + r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 980 977 rx_ring = &(adapter->rx_ring[r_idx]); 981 978 /* disable interrupts on this vector only */ 982 979 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, rx_ring->v_idx); 983 - rx_ring->total_bytes = 0; 984 - rx_ring->total_packets = 0; 985 980 netif_rx_schedule(adapter->netdev, &q_vector->napi); 986 981 987 982 return IRQ_HANDLED; ··· 1008 987 * @napi: napi struct with our devices info in it 1009 988 * @budget: amount of work driver is allowed to do this pass, in packets 1010 989 * 990 + * This function is optimized for cleaning one queue only on a single 991 + * q_vector!!! 1011 992 **/ 1012 993 static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget) 1013 994 { 1014 995 struct ixgbe_q_vector *q_vector = 1015 - container_of(napi, struct ixgbe_q_vector, napi); 996 + container_of(napi, struct ixgbe_q_vector, napi); 1016 997 struct ixgbe_adapter *adapter = q_vector->adapter; 1017 - struct ixgbe_ring *rx_ring; 998 + struct ixgbe_ring *rx_ring = NULL; 1018 999 int work_done = 0; 1019 1000 long r_idx; 1020 1001 1021 1002 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 1022 1003 rx_ring = &(adapter->rx_ring[r_idx]); 1023 - #if defined(CONFIG_DCA) || defined (CONFIG_DCA_MODULE) 1004 + #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) 1024 1005 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) 1025 1006 ixgbe_update_rx_dca(adapter, rx_ring); 1026 1007 #endif ··· 1032 1009 /* If all Rx work done, exit the polling mode */ 1033 1010 if (work_done < budget) { 1034 1011 netif_rx_complete(adapter->netdev, napi); 1035 - if (adapter->rx_eitr < IXGBE_MIN_ITR_USECS) 1012 + if (adapter->itr_setting & 3) 1036 1013 ixgbe_set_itr_msix(q_vector); 1037 1014 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 1038 1015 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, rx_ring->v_idx); ··· 1041 1018 return work_done; 1042 1019 } 1043 1020 1021 + /** 1022 + * ixgbe_clean_rxonly_many - msix (aka one shot) rx clean routine 1023 + * @napi: napi struct with our devices info in it 1024 + * @budget: amount of work driver is allowed to do this pass, in packets 1025 + * 1026 + * This function will clean more than one rx queue associated with a 1027 + * q_vector. 1028 + **/ 1029 + static int ixgbe_clean_rxonly_many(struct napi_struct *napi, int budget) 1030 + { 1031 + struct ixgbe_q_vector *q_vector = 1032 + container_of(napi, struct ixgbe_q_vector, napi); 1033 + struct ixgbe_adapter *adapter = q_vector->adapter; 1034 + struct ixgbe_ring *rx_ring = NULL; 1035 + int work_done = 0, i; 1036 + long r_idx; 1037 + u16 enable_mask = 0; 1038 + 1039 + /* attempt to distribute budget to each queue fairly, but don't allow 1040 + * the budget to go below 1 because we'll exit polling */ 1041 + budget /= (q_vector->rxr_count ?: 1); 1042 + budget = max(budget, 1); 1043 + r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 1044 + for (i = 0; i < q_vector->rxr_count; i++) { 1045 + rx_ring = &(adapter->rx_ring[r_idx]); 1046 + #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) 1047 + if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) 1048 + ixgbe_update_rx_dca(adapter, rx_ring); 1049 + #endif 1050 + ixgbe_clean_rx_irq(adapter, rx_ring, &work_done, budget); 1051 + enable_mask |= rx_ring->v_idx; 1052 + r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, 1053 + r_idx + 1); 1054 + } 1055 + 1056 + r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 1057 + rx_ring = &(adapter->rx_ring[r_idx]); 1058 + /* If all Rx work done, exit the polling mode */ 1059 + if (work_done < budget) { 1060 + netif_rx_complete(adapter->netdev, napi); 1061 + if (adapter->itr_setting & 3) 1062 + ixgbe_set_itr_msix(q_vector); 1063 + if (!test_bit(__IXGBE_DOWN, &adapter->state)) 1064 + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, enable_mask); 1065 + return 0; 1066 + } 1067 + 1068 + return work_done; 1069 + } 1044 1070 static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx, 1045 - int r_idx) 1071 + int r_idx) 1046 1072 { 1047 1073 a->q_vector[v_idx].adapter = a; 1048 1074 set_bit(r_idx, a->q_vector[v_idx].rxr_idx); ··· 1100 1028 } 1101 1029 1102 1030 static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx, 1103 - int r_idx) 1031 + int r_idx) 1104 1032 { 1105 1033 a->q_vector[v_idx].adapter = a; 1106 1034 set_bit(r_idx, a->q_vector[v_idx].txr_idx); ··· 1120 1048 * mapping configurations in here. 1121 1049 **/ 1122 1050 static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter, 1123 - int vectors) 1051 + int vectors) 1124 1052 { 1125 1053 int v_start = 0; 1126 1054 int rxr_idx = 0, txr_idx = 0; ··· 1197 1125 goto out; 1198 1126 1199 1127 #define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \ 1200 - (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \ 1201 - &ixgbe_msix_clean_many) 1128 + (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \ 1129 + &ixgbe_msix_clean_many) 1202 1130 for (vector = 0; vector < q_vectors; vector++) { 1203 1131 handler = SET_HANDLER(&adapter->q_vector[vector]); 1204 1132 sprintf(adapter->name[vector], "%s:v%d-%s", 1205 - netdev->name, vector, 1206 - (handler == &ixgbe_msix_clean_rx) ? "Rx" : 1207 - ((handler == &ixgbe_msix_clean_tx) ? "Tx" : "TxRx")); 1133 + netdev->name, vector, 1134 + (handler == &ixgbe_msix_clean_rx) ? "Rx" : 1135 + ((handler == &ixgbe_msix_clean_tx) ? "Tx" : "TxRx")); 1208 1136 err = request_irq(adapter->msix_entries[vector].vector, 1209 - handler, 0, adapter->name[vector], 1210 - &(adapter->q_vector[vector])); 1137 + handler, 0, adapter->name[vector], 1138 + &(adapter->q_vector[vector])); 1211 1139 if (err) { 1212 1140 DPRINTK(PROBE, ERR, 1213 - "request_irq failed for MSIX interrupt " 1214 - "Error: %d\n", err); 1141 + "request_irq failed for MSIX interrupt " 1142 + "Error: %d\n", err); 1215 1143 goto free_queue_irqs; 1216 1144 } 1217 1145 } 1218 1146 1219 1147 sprintf(adapter->name[vector], "%s:lsc", netdev->name); 1220 1148 err = request_irq(adapter->msix_entries[vector].vector, 1221 - &ixgbe_msix_lsc, 0, adapter->name[vector], netdev); 1149 + &ixgbe_msix_lsc, 0, adapter->name[vector], netdev); 1222 1150 if (err) { 1223 1151 DPRINTK(PROBE, ERR, 1224 1152 "request_irq for msix_lsc failed: %d\n", err); ··· 1230 1158 free_queue_irqs: 1231 1159 for (i = vector - 1; i >= 0; i--) 1232 1160 free_irq(adapter->msix_entries[--vector].vector, 1233 - &(adapter->q_vector[i])); 1161 + &(adapter->q_vector[i])); 1234 1162 adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; 1235 1163 pci_disable_msix(adapter->pdev); 1236 1164 kfree(adapter->msix_entries); ··· 1248 1176 struct ixgbe_ring *rx_ring = &adapter->rx_ring[0]; 1249 1177 struct ixgbe_ring *tx_ring = &adapter->tx_ring[0]; 1250 1178 1251 - q_vector->tx_eitr = ixgbe_update_itr(adapter, new_itr, 1252 - q_vector->tx_eitr, 1253 - tx_ring->total_packets, 1254 - tx_ring->total_bytes); 1255 - q_vector->rx_eitr = ixgbe_update_itr(adapter, new_itr, 1256 - q_vector->rx_eitr, 1257 - rx_ring->total_packets, 1258 - rx_ring->total_bytes); 1179 + q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr, 1180 + q_vector->tx_itr, 1181 + tx_ring->total_packets, 1182 + tx_ring->total_bytes); 1183 + q_vector->rx_itr = ixgbe_update_itr(adapter, new_itr, 1184 + q_vector->rx_itr, 1185 + rx_ring->total_packets, 1186 + rx_ring->total_bytes); 1259 1187 1260 - current_itr = max(q_vector->rx_eitr, q_vector->tx_eitr); 1188 + current_itr = max(q_vector->rx_itr, q_vector->tx_itr); 1261 1189 1262 1190 switch (current_itr) { 1263 1191 /* counts and packets in update_itr are dependent on these numbers */ ··· 1302 1230 struct ixgbe_hw *hw = &adapter->hw; 1303 1231 u32 eicr; 1304 1232 1305 - 1306 1233 /* for NAPI, using EIAM to auto-mask tx/rx interrupt bits on read 1307 1234 * therefore no explict interrupt disable is necessary */ 1308 1235 eicr = IXGBE_READ_REG(hw, IXGBE_EICR); 1309 - if (!eicr) 1236 + if (!eicr) { 1237 + /* shared interrupt alert! 1238 + * make sure interrupts are enabled because the read will 1239 + * have disabled interrupts due to EIAM */ 1240 + ixgbe_irq_enable(adapter); 1310 1241 return IRQ_NONE; /* Not our interrupt */ 1311 - 1312 - if (eicr & IXGBE_EICR_LSC) { 1313 - adapter->lsc_int++; 1314 - if (!test_bit(__IXGBE_DOWN, &adapter->state)) 1315 - mod_timer(&adapter->watchdog_timer, jiffies); 1316 1242 } 1317 1243 1244 + if (eicr & IXGBE_EICR_LSC) 1245 + ixgbe_check_lsc(adapter); 1318 1246 1319 1247 if (netif_rx_schedule_prep(netdev, &adapter->q_vector[0].napi)) { 1320 1248 adapter->tx_ring[0].total_packets = 0; ··· 1357 1285 err = ixgbe_request_msix_irqs(adapter); 1358 1286 } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { 1359 1287 err = request_irq(adapter->pdev->irq, &ixgbe_intr, 0, 1360 - netdev->name, netdev); 1288 + netdev->name, netdev); 1361 1289 } else { 1362 1290 err = request_irq(adapter->pdev->irq, &ixgbe_intr, IRQF_SHARED, 1363 - netdev->name, netdev); 1291 + netdev->name, netdev); 1364 1292 } 1365 1293 1366 1294 if (err) ··· 1384 1312 i--; 1385 1313 for (; i >= 0; i--) { 1386 1314 free_irq(adapter->msix_entries[i].vector, 1387 - &(adapter->q_vector[i])); 1315 + &(adapter->q_vector[i])); 1388 1316 } 1389 1317 1390 1318 ixgbe_reset_q_vectors(adapter); ··· 1431 1359 struct ixgbe_hw *hw = &adapter->hw; 1432 1360 1433 1361 IXGBE_WRITE_REG(hw, IXGBE_EITR(0), 1434 - EITR_INTS_PER_SEC_TO_REG(adapter->rx_eitr)); 1362 + EITR_INTS_PER_SEC_TO_REG(adapter->eitr_param)); 1435 1363 1436 1364 ixgbe_set_ivar(adapter, IXGBE_IVAR_RX_QUEUE(0), 0); 1437 1365 ixgbe_set_ivar(adapter, IXGBE_IVAR_TX_QUEUE(0), 0); ··· 1517 1445 srrctl |= IXGBE_RXBUFFER_2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; 1518 1446 srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; 1519 1447 srrctl |= ((IXGBE_RX_HDR_SIZE << 1520 - IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & 1521 - IXGBE_SRRCTL_BSIZEHDR_MASK); 1448 + IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & 1449 + IXGBE_SRRCTL_BSIZEHDR_MASK); 1522 1450 } else { 1523 1451 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 1524 1452 ··· 1535 1463 /** 1536 1464 * ixgbe_get_skb_hdr - helper function for LRO header processing 1537 1465 * @skb: pointer to sk_buff to be added to LRO packet 1538 - * @iphdr: pointer to tcp header structure 1466 + * @iphdr: pointer to ip header structure 1539 1467 * @tcph: pointer to tcp header structure 1540 1468 * @hdr_flags: pointer to header flags 1541 1469 * @priv: private data ··· 1560 1488 } 1561 1489 1562 1490 #define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \ 1563 - (((S) & (PAGE_SIZE - 1)) ? 1 : 0)) 1491 + (((S) & (PAGE_SIZE - 1)) ? 1 : 0)) 1564 1492 1565 1493 /** 1566 1494 * ixgbe_configure_rx - Configure 8259x Receive Unit after Reset ··· 1586 1514 int rx_buf_len; 1587 1515 1588 1516 /* Decide whether to use packet split mode or not */ 1589 - if (netdev->mtu > ETH_DATA_LEN) 1590 - adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED; 1591 - else 1592 - adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED; 1517 + adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED; 1593 1518 1594 1519 /* Set the RX buffer length according to the mode */ 1595 1520 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) { ··· 1707 1638 } 1708 1639 1709 1640 static void ixgbe_vlan_rx_register(struct net_device *netdev, 1710 - struct vlan_group *grp) 1641 + struct vlan_group *grp) 1711 1642 { 1712 1643 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1713 1644 u32 ctrl; ··· 1731 1662 static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid) 1732 1663 { 1733 1664 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1665 + struct ixgbe_hw *hw = &adapter->hw; 1734 1666 1735 1667 /* add VID to filter table */ 1736 - ixgbe_set_vfta(&adapter->hw, vid, 0, true); 1668 + hw->mac.ops.set_vfta(&adapter->hw, vid, 0, true); 1737 1669 } 1738 1670 1739 1671 static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) 1740 1672 { 1741 1673 struct ixgbe_adapter *adapter = netdev_priv(netdev); 1674 + struct ixgbe_hw *hw = &adapter->hw; 1742 1675 1743 1676 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 1744 1677 ixgbe_irq_disable(adapter); ··· 1751 1680 ixgbe_irq_enable(adapter); 1752 1681 1753 1682 /* remove VID from filter table */ 1754 - ixgbe_set_vfta(&adapter->hw, vid, 0, false); 1683 + hw->mac.ops.set_vfta(&adapter->hw, vid, 0, false); 1755 1684 } 1756 1685 1757 1686 static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter) ··· 1827 1756 addr_count = netdev->uc_count; 1828 1757 if (addr_count) 1829 1758 addr_list = netdev->uc_list->dmi_addr; 1830 - ixgbe_update_uc_addr_list(hw, addr_list, addr_count, 1831 - ixgbe_addr_list_itr); 1759 + hw->mac.ops.update_uc_addr_list(hw, addr_list, addr_count, 1760 + ixgbe_addr_list_itr); 1832 1761 1833 1762 /* reprogram multicast list */ 1834 1763 addr_count = netdev->mc_count; 1835 1764 if (addr_count) 1836 1765 addr_list = netdev->mc_list->dmi_addr; 1837 - ixgbe_update_mc_addr_list(hw, addr_list, addr_count, 1838 - ixgbe_addr_list_itr); 1766 + hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count, 1767 + ixgbe_addr_list_itr); 1839 1768 } 1840 1769 1841 1770 static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter) ··· 1849 1778 q_vectors = 1; 1850 1779 1851 1780 for (q_idx = 0; q_idx < q_vectors; q_idx++) { 1781 + struct napi_struct *napi; 1852 1782 q_vector = &adapter->q_vector[q_idx]; 1853 1783 if (!q_vector->rxr_count) 1854 1784 continue; 1855 - napi_enable(&q_vector->napi); 1785 + napi = &q_vector->napi; 1786 + if ((adapter->flags & IXGBE_FLAG_MSIX_ENABLED) && 1787 + (q_vector->rxr_count > 1)) 1788 + napi->poll = &ixgbe_clean_rxonly_many; 1789 + 1790 + napi_enable(napi); 1856 1791 } 1857 1792 } 1858 1793 ··· 1893 1816 ixgbe_configure_rx(adapter); 1894 1817 for (i = 0; i < adapter->num_rx_queues; i++) 1895 1818 ixgbe_alloc_rx_buffers(adapter, &adapter->rx_ring[i], 1896 - (adapter->rx_ring[i].count - 1)); 1819 + (adapter->rx_ring[i].count - 1)); 1897 1820 } 1898 1821 1899 1822 static int ixgbe_up_complete(struct ixgbe_adapter *adapter) ··· 1911 1834 (adapter->flags & IXGBE_FLAG_MSI_ENABLED)) { 1912 1835 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 1913 1836 gpie = (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_EIAME | 1914 - IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD); 1837 + IXGBE_GPIE_PBA_SUPPORT | IXGBE_GPIE_OCD); 1915 1838 } else { 1916 1839 /* MSI only */ 1917 1840 gpie = 0; ··· 1974 1897 1975 1898 /* bring the link up in the watchdog, this could race with our first 1976 1899 * link up interrupt but shouldn't be a problem */ 1900 + adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; 1901 + adapter->link_check_timeout = jiffies; 1977 1902 mod_timer(&adapter->watchdog_timer, jiffies); 1978 1903 return 0; 1979 1904 } ··· 2000 1921 2001 1922 void ixgbe_reset(struct ixgbe_adapter *adapter) 2002 1923 { 2003 - if (ixgbe_init_hw(&adapter->hw)) 2004 - DPRINTK(PROBE, ERR, "Hardware Error\n"); 1924 + struct ixgbe_hw *hw = &adapter->hw; 1925 + if (hw->mac.ops.init_hw(hw)) 1926 + dev_err(&adapter->pdev->dev, "Hardware Error\n"); 2005 1927 2006 1928 /* reprogram the RAR[0] in case user changed it. */ 2007 - ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV); 1929 + hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 2008 1930 2009 1931 } 2010 - 2011 - #ifdef CONFIG_PM 2012 - static int ixgbe_resume(struct pci_dev *pdev) 2013 - { 2014 - struct net_device *netdev = pci_get_drvdata(pdev); 2015 - struct ixgbe_adapter *adapter = netdev_priv(netdev); 2016 - u32 err; 2017 - 2018 - pci_set_power_state(pdev, PCI_D0); 2019 - pci_restore_state(pdev); 2020 - err = pci_enable_device(pdev); 2021 - if (err) { 2022 - printk(KERN_ERR "ixgbe: Cannot enable PCI device from " \ 2023 - "suspend\n"); 2024 - return err; 2025 - } 2026 - pci_set_master(pdev); 2027 - 2028 - pci_enable_wake(pdev, PCI_D3hot, 0); 2029 - pci_enable_wake(pdev, PCI_D3cold, 0); 2030 - 2031 - if (netif_running(netdev)) { 2032 - err = ixgbe_request_irq(adapter); 2033 - if (err) 2034 - return err; 2035 - } 2036 - 2037 - ixgbe_reset(adapter); 2038 - 2039 - if (netif_running(netdev)) 2040 - ixgbe_up(adapter); 2041 - 2042 - netif_device_attach(netdev); 2043 - 2044 - return 0; 2045 - } 2046 - #endif 2047 1932 2048 1933 /** 2049 1934 * ixgbe_clean_rx_ring - Free Rx Buffers per Queue ··· 2015 1972 * @rx_ring: ring to free buffers from 2016 1973 **/ 2017 1974 static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter, 2018 - struct ixgbe_ring *rx_ring) 1975 + struct ixgbe_ring *rx_ring) 2019 1976 { 2020 1977 struct pci_dev *pdev = adapter->pdev; 2021 1978 unsigned long size; ··· 2029 1986 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 2030 1987 if (rx_buffer_info->dma) { 2031 1988 pci_unmap_single(pdev, rx_buffer_info->dma, 2032 - rx_ring->rx_buf_len, 2033 - PCI_DMA_FROMDEVICE); 1989 + rx_ring->rx_buf_len, 1990 + PCI_DMA_FROMDEVICE); 2034 1991 rx_buffer_info->dma = 0; 2035 1992 } 2036 1993 if (rx_buffer_info->skb) { ··· 2039 1996 } 2040 1997 if (!rx_buffer_info->page) 2041 1998 continue; 2042 - pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE, 2043 - PCI_DMA_FROMDEVICE); 1999 + pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE / 2, 2000 + PCI_DMA_FROMDEVICE); 2044 2001 rx_buffer_info->page_dma = 0; 2045 - 2046 2002 put_page(rx_buffer_info->page); 2047 2003 rx_buffer_info->page = NULL; 2004 + rx_buffer_info->page_offset = 0; 2048 2005 } 2049 2006 2050 2007 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; ··· 2066 2023 * @tx_ring: ring to be cleaned 2067 2024 **/ 2068 2025 static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter, 2069 - struct ixgbe_ring *tx_ring) 2026 + struct ixgbe_ring *tx_ring) 2070 2027 { 2071 2028 struct ixgbe_tx_buffer *tx_buffer_info; 2072 2029 unsigned long size; ··· 2119 2076 void ixgbe_down(struct ixgbe_adapter *adapter) 2120 2077 { 2121 2078 struct net_device *netdev = adapter->netdev; 2079 + struct ixgbe_hw *hw = &adapter->hw; 2122 2080 u32 rxctrl; 2081 + u32 txdctl; 2082 + int i, j; 2123 2083 2124 2084 /* signal that we are down to the interrupt handler */ 2125 2085 set_bit(__IXGBE_DOWN, &adapter->state); 2126 2086 2127 2087 /* disable receives */ 2128 - rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL); 2129 - IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, 2130 - rxctrl & ~IXGBE_RXCTRL_RXEN); 2088 + rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 2089 + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); 2131 2090 2132 2091 netif_tx_disable(netdev); 2133 2092 2134 - /* disable transmits in the hardware */ 2135 - 2136 - /* flush both disables */ 2137 - IXGBE_WRITE_FLUSH(&adapter->hw); 2093 + IXGBE_WRITE_FLUSH(hw); 2138 2094 msleep(10); 2095 + 2096 + netif_tx_stop_all_queues(netdev); 2139 2097 2140 2098 ixgbe_irq_disable(adapter); 2141 2099 2142 2100 ixgbe_napi_disable_all(adapter); 2101 + 2143 2102 del_timer_sync(&adapter->watchdog_timer); 2103 + cancel_work_sync(&adapter->watchdog_task); 2104 + 2105 + /* disable transmits in the hardware now that interrupts are off */ 2106 + for (i = 0; i < adapter->num_tx_queues; i++) { 2107 + j = adapter->tx_ring[i].reg_idx; 2108 + txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); 2109 + IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), 2110 + (txdctl & ~IXGBE_TXDCTL_ENABLE)); 2111 + } 2144 2112 2145 2113 netif_carrier_off(netdev); 2146 - netif_tx_stop_all_queues(netdev); 2147 2114 2148 - #if defined(CONFIG_DCA) || defined (CONFIG_DCA_MODULE) 2115 + #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) 2149 2116 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { 2150 2117 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED; 2151 2118 dca_remove_requester(&adapter->pdev->dev); ··· 2167 2114 ixgbe_clean_all_tx_rings(adapter); 2168 2115 ixgbe_clean_all_rx_rings(adapter); 2169 2116 2170 - #if defined(CONFIG_DCA) || defined (CONFIG_DCA_MODULE) 2117 + #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) 2171 2118 /* since we reset the hardware DCA settings were cleared */ 2172 2119 if (dca_add_requester(&adapter->pdev->dev) == 0) { 2173 2120 adapter->flags |= IXGBE_FLAG_DCA_ENABLED; 2174 2121 /* always use CB2 mode, difference is masked 2175 2122 * in the CB driver */ 2176 - IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2); 2123 + IXGBE_WRITE_REG(hw, IXGBE_DCA_CTRL, 2); 2177 2124 ixgbe_setup_dca(adapter); 2178 2125 } 2179 2126 #endif 2180 - } 2181 - 2182 - static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state) 2183 - { 2184 - struct net_device *netdev = pci_get_drvdata(pdev); 2185 - struct ixgbe_adapter *adapter = netdev_priv(netdev); 2186 - #ifdef CONFIG_PM 2187 - int retval = 0; 2188 - #endif 2189 - 2190 - netif_device_detach(netdev); 2191 - 2192 - if (netif_running(netdev)) { 2193 - ixgbe_down(adapter); 2194 - ixgbe_free_irq(adapter); 2195 - } 2196 - 2197 - #ifdef CONFIG_PM 2198 - retval = pci_save_state(pdev); 2199 - if (retval) 2200 - return retval; 2201 - #endif 2202 - 2203 - pci_enable_wake(pdev, PCI_D3hot, 0); 2204 - pci_enable_wake(pdev, PCI_D3cold, 0); 2205 - 2206 - ixgbe_release_hw_control(adapter); 2207 - 2208 - pci_disable_device(pdev); 2209 - 2210 - pci_set_power_state(pdev, pci_choose_state(pdev, state)); 2211 - 2212 - return 0; 2213 - } 2214 - 2215 - static void ixgbe_shutdown(struct pci_dev *pdev) 2216 - { 2217 - ixgbe_suspend(pdev, PMSG_SUSPEND); 2218 2127 } 2219 2128 2220 2129 /** ··· 2189 2174 static int ixgbe_poll(struct napi_struct *napi, int budget) 2190 2175 { 2191 2176 struct ixgbe_q_vector *q_vector = container_of(napi, 2192 - struct ixgbe_q_vector, napi); 2177 + struct ixgbe_q_vector, napi); 2193 2178 struct ixgbe_adapter *adapter = q_vector->adapter; 2194 - int tx_cleaned = 0, work_done = 0; 2179 + int tx_cleaned, work_done = 0; 2195 2180 2196 - #if defined(CONFIG_DCA) || defined (CONFIG_DCA_MODULE) 2181 + #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) 2197 2182 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { 2198 2183 ixgbe_update_tx_dca(adapter, adapter->tx_ring); 2199 2184 ixgbe_update_rx_dca(adapter, adapter->rx_ring); ··· 2209 2194 /* If budget not fully consumed, exit the polling mode */ 2210 2195 if (work_done < budget) { 2211 2196 netif_rx_complete(adapter->netdev, napi); 2212 - if (adapter->rx_eitr < IXGBE_MIN_ITR_USECS) 2197 + if (adapter->itr_setting & 3) 2213 2198 ixgbe_set_itr(adapter); 2214 2199 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 2215 2200 ixgbe_irq_enable(adapter); 2216 2201 } 2217 - 2218 2202 return work_done; 2219 2203 } 2220 2204 ··· 2239 2225 ixgbe_reinit_locked(adapter); 2240 2226 } 2241 2227 2242 - static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, 2243 - int vectors) 2228 + static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter) 2244 2229 { 2245 - int err, vector_threshold; 2246 - 2247 - /* We'll want at least 3 (vector_threshold): 2248 - * 1) TxQ[0] Cleanup 2249 - * 2) RxQ[0] Cleanup 2250 - * 3) Other (Link Status Change, etc.) 2251 - * 4) TCP Timer (optional) 2252 - */ 2253 - vector_threshold = MIN_MSIX_COUNT; 2254 - 2255 - /* The more we get, the more we will assign to Tx/Rx Cleanup 2256 - * for the separate queues...where Rx Cleanup >= Tx Cleanup. 2257 - * Right now, we simply care about how many we'll get; we'll 2258 - * set them up later while requesting irq's. 2259 - */ 2260 - while (vectors >= vector_threshold) { 2261 - err = pci_enable_msix(adapter->pdev, adapter->msix_entries, 2262 - vectors); 2263 - if (!err) /* Success in acquiring all requested vectors. */ 2264 - break; 2265 - else if (err < 0) 2266 - vectors = 0; /* Nasty failure, quit now */ 2267 - else /* err == number of vectors we should try again with */ 2268 - vectors = err; 2269 - } 2270 - 2271 - if (vectors < vector_threshold) { 2272 - /* Can't allocate enough MSI-X interrupts? Oh well. 2273 - * This just means we'll go with either a single MSI 2274 - * vector or fall back to legacy interrupts. 2275 - */ 2276 - DPRINTK(HW, DEBUG, "Unable to allocate MSI-X interrupts\n"); 2277 - adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; 2278 - kfree(adapter->msix_entries); 2279 - adapter->msix_entries = NULL; 2280 - adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; 2281 - adapter->num_tx_queues = 1; 2282 - adapter->num_rx_queues = 1; 2283 - } else { 2284 - adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */ 2285 - adapter->num_msix_vectors = vectors; 2286 - } 2287 - } 2288 - 2289 - static void __devinit ixgbe_set_num_queues(struct ixgbe_adapter *adapter) 2290 - { 2291 - int nrq, ntq; 2230 + int nrq = 1, ntq = 1; 2292 2231 int feature_mask = 0, rss_i, rss_m; 2293 2232 2294 2233 /* Number of supported queues */ ··· 2279 2312 adapter->num_tx_queues = ntq; 2280 2313 } 2281 2314 2315 + static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, 2316 + int vectors) 2317 + { 2318 + int err, vector_threshold; 2319 + 2320 + /* We'll want at least 3 (vector_threshold): 2321 + * 1) TxQ[0] Cleanup 2322 + * 2) RxQ[0] Cleanup 2323 + * 3) Other (Link Status Change, etc.) 2324 + * 4) TCP Timer (optional) 2325 + */ 2326 + vector_threshold = MIN_MSIX_COUNT; 2327 + 2328 + /* The more we get, the more we will assign to Tx/Rx Cleanup 2329 + * for the separate queues...where Rx Cleanup >= Tx Cleanup. 2330 + * Right now, we simply care about how many we'll get; we'll 2331 + * set them up later while requesting irq's. 2332 + */ 2333 + while (vectors >= vector_threshold) { 2334 + err = pci_enable_msix(adapter->pdev, adapter->msix_entries, 2335 + vectors); 2336 + if (!err) /* Success in acquiring all requested vectors. */ 2337 + break; 2338 + else if (err < 0) 2339 + vectors = 0; /* Nasty failure, quit now */ 2340 + else /* err == number of vectors we should try again with */ 2341 + vectors = err; 2342 + } 2343 + 2344 + if (vectors < vector_threshold) { 2345 + /* Can't allocate enough MSI-X interrupts? Oh well. 2346 + * This just means we'll go with either a single MSI 2347 + * vector or fall back to legacy interrupts. 2348 + */ 2349 + DPRINTK(HW, DEBUG, "Unable to allocate MSI-X interrupts\n"); 2350 + adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; 2351 + kfree(adapter->msix_entries); 2352 + adapter->msix_entries = NULL; 2353 + adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; 2354 + ixgbe_set_num_queues(adapter); 2355 + } else { 2356 + adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */ 2357 + adapter->num_msix_vectors = vectors; 2358 + } 2359 + } 2360 + 2282 2361 /** 2283 2362 * ixgbe_cache_ring_register - Descriptor ring to register mapping 2284 2363 * @adapter: board private structure to initialize ··· 2334 2321 **/ 2335 2322 static void __devinit ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) 2336 2323 { 2337 - /* TODO: Remove all uses of the indices in the cases where multiple 2338 - * features are OR'd together, if the feature set makes sense. 2339 - */ 2340 2324 int feature_mask = 0, rss_i; 2341 2325 int i, txr_idx, rxr_idx; 2342 2326 ··· 2374 2364 int i; 2375 2365 2376 2366 adapter->tx_ring = kcalloc(adapter->num_tx_queues, 2377 - sizeof(struct ixgbe_ring), GFP_KERNEL); 2367 + sizeof(struct ixgbe_ring), GFP_KERNEL); 2378 2368 if (!adapter->tx_ring) 2379 2369 goto err_tx_ring_allocation; 2380 2370 2381 2371 adapter->rx_ring = kcalloc(adapter->num_rx_queues, 2382 - sizeof(struct ixgbe_ring), GFP_KERNEL); 2372 + sizeof(struct ixgbe_ring), GFP_KERNEL); 2383 2373 if (!adapter->rx_ring) 2384 2374 goto err_rx_ring_allocation; 2385 2375 2386 2376 for (i = 0; i < adapter->num_tx_queues; i++) { 2387 - adapter->tx_ring[i].count = IXGBE_DEFAULT_TXD; 2377 + adapter->tx_ring[i].count = adapter->tx_ring_count; 2388 2378 adapter->tx_ring[i].queue_index = i; 2389 2379 } 2380 + 2390 2381 for (i = 0; i < adapter->num_rx_queues; i++) { 2391 - adapter->rx_ring[i].count = IXGBE_DEFAULT_RXD; 2382 + adapter->rx_ring[i].count = adapter->rx_ring_count; 2392 2383 adapter->rx_ring[i].queue_index = i; 2393 2384 } 2394 2385 ··· 2411 2400 * capabilities of the hardware and the kernel. 2412 2401 **/ 2413 2402 static int __devinit ixgbe_set_interrupt_capability(struct ixgbe_adapter 2414 - *adapter) 2403 + *adapter) 2415 2404 { 2416 2405 int err = 0; 2417 2406 int vector, v_budget; 2418 - 2419 - /* 2420 - * Set the default interrupt throttle rate. 2421 - */ 2422 - adapter->rx_eitr = (1000000 / IXGBE_DEFAULT_ITR_RX_USECS); 2423 - adapter->tx_eitr = (1000000 / IXGBE_DEFAULT_ITR_TX_USECS); 2424 2407 2425 2408 /* 2426 2409 * It's easy to be greedy for MSI-X vectors, but it really ··· 2423 2418 * (roughly) twice the number of vectors as there are CPU's. 2424 2419 */ 2425 2420 v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues, 2426 - (int)(num_online_cpus() * 2)) + NON_Q_VECTORS; 2421 + (int)(num_online_cpus() * 2)) + NON_Q_VECTORS; 2427 2422 2428 2423 /* 2429 2424 * At the same time, hardware can only support a maximum of ··· 2437 2432 /* A failure in MSI-X entry allocation isn't fatal, but it does 2438 2433 * mean we disable MSI-X capabilities of the adapter. */ 2439 2434 adapter->msix_entries = kcalloc(v_budget, 2440 - sizeof(struct msix_entry), GFP_KERNEL); 2435 + sizeof(struct msix_entry), GFP_KERNEL); 2441 2436 if (!adapter->msix_entries) { 2442 2437 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; 2443 2438 ixgbe_set_num_queues(adapter); ··· 2446 2441 err = ixgbe_alloc_queues(adapter); 2447 2442 if (err) { 2448 2443 DPRINTK(PROBE, ERR, "Unable to allocate memory " 2449 - "for queues\n"); 2444 + "for queues\n"); 2450 2445 goto out; 2451 2446 } 2452 2447 ··· 2467 2462 adapter->flags |= IXGBE_FLAG_MSI_ENABLED; 2468 2463 } else { 2469 2464 DPRINTK(HW, DEBUG, "Unable to allocate MSI interrupt, " 2470 - "falling back to legacy. Error: %d\n", err); 2465 + "falling back to legacy. Error: %d\n", err); 2471 2466 /* reset err */ 2472 2467 err = 0; 2473 2468 } ··· 2523 2518 } 2524 2519 2525 2520 DPRINTK(DRV, INFO, "Multiqueue %s: Rx Queue count = %u, " 2526 - "Tx Queue count = %u\n", 2527 - (adapter->num_rx_queues > 1) ? "Enabled" : 2528 - "Disabled", adapter->num_rx_queues, adapter->num_tx_queues); 2521 + "Tx Queue count = %u\n", 2522 + (adapter->num_rx_queues > 1) ? "Enabled" : 2523 + "Disabled", adapter->num_rx_queues, adapter->num_tx_queues); 2529 2524 2530 2525 set_bit(__IXGBE_DOWN, &adapter->state); 2531 2526 ··· 2552 2547 struct pci_dev *pdev = adapter->pdev; 2553 2548 unsigned int rss; 2554 2549 2550 + /* PCI config space info */ 2551 + 2552 + hw->vendor_id = pdev->vendor; 2553 + hw->device_id = pdev->device; 2554 + hw->revision_id = pdev->revision; 2555 + hw->subsystem_vendor_id = pdev->subsystem_vendor; 2556 + hw->subsystem_device_id = pdev->subsystem_device; 2557 + 2555 2558 /* Set capability flags */ 2556 2559 rss = min(IXGBE_MAX_RSS_INDICES, (int)num_online_cpus()); 2557 2560 adapter->ring_feature[RING_F_RSS].indices = rss; 2558 2561 adapter->flags |= IXGBE_FLAG_RSS_ENABLED; 2559 - 2560 - /* Enable Dynamic interrupt throttling by default */ 2561 - adapter->rx_eitr = 1; 2562 - adapter->tx_eitr = 1; 2563 2562 2564 2563 /* default flow control settings */ 2565 2564 hw->fc.original_type = ixgbe_fc_none; ··· 2575 2566 2576 2567 /* select 10G link by default */ 2577 2568 hw->mac.link_mode_select = IXGBE_AUTOC_LMS_10G_LINK_NO_AN; 2578 - if (hw->mac.ops.reset(hw)) { 2579 - dev_err(&pdev->dev, "HW Init failed\n"); 2580 - return -EIO; 2581 - } 2582 - if (hw->mac.ops.setup_link_speed(hw, IXGBE_LINK_SPEED_10GB_FULL, true, 2583 - false)) { 2584 - dev_err(&pdev->dev, "Link Speed setup failed\n"); 2585 - return -EIO; 2586 - } 2569 + 2570 + /* enable itr by default in dynamic mode */ 2571 + adapter->itr_setting = 1; 2572 + adapter->eitr_param = 20000; 2573 + 2574 + /* set defaults for eitr in MegaBytes */ 2575 + adapter->eitr_low = 10; 2576 + adapter->eitr_high = 20; 2577 + 2578 + /* set default ring sizes */ 2579 + adapter->tx_ring_count = IXGBE_DEFAULT_TXD; 2580 + adapter->rx_ring_count = IXGBE_DEFAULT_RXD; 2587 2581 2588 2582 /* initialize eeprom parameters */ 2589 - if (ixgbe_init_eeprom(hw)) { 2583 + if (ixgbe_init_eeprom_params_generic(hw)) { 2590 2584 dev_err(&pdev->dev, "EEPROM initialization failed\n"); 2591 2585 return -EIO; 2592 2586 } ··· 2645 2633 } 2646 2634 2647 2635 /** 2636 + * ixgbe_setup_all_tx_resources - allocate all queues Tx resources 2637 + * @adapter: board private structure 2638 + * 2639 + * If this function returns with an error, then it's possible one or 2640 + * more of the rings is populated (while the rest are not). It is the 2641 + * callers duty to clean those orphaned rings. 2642 + * 2643 + * Return 0 on success, negative on failure 2644 + **/ 2645 + static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter) 2646 + { 2647 + int i, err = 0; 2648 + 2649 + for (i = 0; i < adapter->num_tx_queues; i++) { 2650 + err = ixgbe_setup_tx_resources(adapter, &adapter->tx_ring[i]); 2651 + if (!err) 2652 + continue; 2653 + DPRINTK(PROBE, ERR, "Allocation for Tx Queue %u failed\n", i); 2654 + break; 2655 + } 2656 + 2657 + return err; 2658 + } 2659 + 2660 + /** 2648 2661 * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors) 2649 2662 * @adapter: board private structure 2650 2663 * @rx_ring: rx descriptor ring (for a specific queue) to setup ··· 2677 2640 * Returns 0 on success, negative on failure 2678 2641 **/ 2679 2642 int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, 2680 - struct ixgbe_ring *rx_ring) 2643 + struct ixgbe_ring *rx_ring) 2681 2644 { 2682 2645 struct pci_dev *pdev = adapter->pdev; 2683 2646 int size; ··· 2692 2655 rx_ring->rx_buffer_info = vmalloc(size); 2693 2656 if (!rx_ring->rx_buffer_info) { 2694 2657 DPRINTK(PROBE, ERR, 2695 - "vmalloc allocation failed for the rx desc ring\n"); 2658 + "vmalloc allocation failed for the rx desc ring\n"); 2696 2659 goto alloc_failed; 2697 2660 } 2698 2661 memset(rx_ring->rx_buffer_info, 0, size); ··· 2705 2668 2706 2669 if (!rx_ring->desc) { 2707 2670 DPRINTK(PROBE, ERR, 2708 - "Memory allocation failed for the rx desc ring\n"); 2671 + "Memory allocation failed for the rx desc ring\n"); 2709 2672 vfree(rx_ring->rx_buffer_info); 2710 2673 goto alloc_failed; 2711 2674 } ··· 2722 2685 } 2723 2686 2724 2687 /** 2688 + * ixgbe_setup_all_rx_resources - allocate all queues Rx resources 2689 + * @adapter: board private structure 2690 + * 2691 + * If this function returns with an error, then it's possible one or 2692 + * more of the rings is populated (while the rest are not). It is the 2693 + * callers duty to clean those orphaned rings. 2694 + * 2695 + * Return 0 on success, negative on failure 2696 + **/ 2697 + 2698 + static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter) 2699 + { 2700 + int i, err = 0; 2701 + 2702 + for (i = 0; i < adapter->num_rx_queues; i++) { 2703 + err = ixgbe_setup_rx_resources(adapter, &adapter->rx_ring[i]); 2704 + if (!err) 2705 + continue; 2706 + DPRINTK(PROBE, ERR, "Allocation for Rx Queue %u failed\n", i); 2707 + break; 2708 + } 2709 + 2710 + return err; 2711 + } 2712 + 2713 + /** 2725 2714 * ixgbe_free_tx_resources - Free Tx Resources per Queue 2726 2715 * @adapter: board private structure 2727 2716 * @tx_ring: Tx descriptor ring for a specific queue 2728 2717 * 2729 2718 * Free all transmit software resources 2730 2719 **/ 2731 - static void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter, 2732 - struct ixgbe_ring *tx_ring) 2720 + void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter, 2721 + struct ixgbe_ring *tx_ring) 2733 2722 { 2734 2723 struct pci_dev *pdev = adapter->pdev; 2735 2724 ··· 2790 2727 * 2791 2728 * Free all receive software resources 2792 2729 **/ 2793 - static void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter, 2794 - struct ixgbe_ring *rx_ring) 2730 + void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter, 2731 + struct ixgbe_ring *rx_ring) 2795 2732 { 2796 2733 struct pci_dev *pdev = adapter->pdev; 2797 2734 ··· 2823 2760 } 2824 2761 2825 2762 /** 2826 - * ixgbe_setup_all_tx_resources - allocate all queues Tx resources 2827 - * @adapter: board private structure 2828 - * 2829 - * If this function returns with an error, then it's possible one or 2830 - * more of the rings is populated (while the rest are not). It is the 2831 - * callers duty to clean those orphaned rings. 2832 - * 2833 - * Return 0 on success, negative on failure 2834 - **/ 2835 - static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter) 2836 - { 2837 - int i, err = 0; 2838 - 2839 - for (i = 0; i < adapter->num_tx_queues; i++) { 2840 - err = ixgbe_setup_tx_resources(adapter, &adapter->tx_ring[i]); 2841 - if (err) { 2842 - DPRINTK(PROBE, ERR, 2843 - "Allocation for Tx Queue %u failed\n", i); 2844 - break; 2845 - } 2846 - } 2847 - 2848 - return err; 2849 - } 2850 - 2851 - /** 2852 - * ixgbe_setup_all_rx_resources - allocate all queues Rx resources 2853 - * @adapter: board private structure 2854 - * 2855 - * If this function returns with an error, then it's possible one or 2856 - * more of the rings is populated (while the rest are not). It is the 2857 - * callers duty to clean those orphaned rings. 2858 - * 2859 - * Return 0 on success, negative on failure 2860 - **/ 2861 - 2862 - static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter) 2863 - { 2864 - int i, err = 0; 2865 - 2866 - for (i = 0; i < adapter->num_rx_queues; i++) { 2867 - err = ixgbe_setup_rx_resources(adapter, &adapter->rx_ring[i]); 2868 - if (err) { 2869 - DPRINTK(PROBE, ERR, 2870 - "Allocation for Rx Queue %u failed\n", i); 2871 - break; 2872 - } 2873 - } 2874 - 2875 - return err; 2876 - } 2877 - 2878 - /** 2879 2763 * ixgbe_change_mtu - Change the Maximum Transfer Unit 2880 2764 * @netdev: network interface device structure 2881 2765 * @new_mtu: new value for maximum frame size ··· 2834 2824 struct ixgbe_adapter *adapter = netdev_priv(netdev); 2835 2825 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 2836 2826 2837 - if ((max_frame < (ETH_ZLEN + ETH_FCS_LEN)) || 2838 - (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) 2827 + /* MTU < 68 is an error and causes problems on some kernels */ 2828 + if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) 2839 2829 return -EINVAL; 2840 2830 2841 2831 DPRINTK(PROBE, INFO, "changing MTU from %d to %d\n", 2842 - netdev->mtu, new_mtu); 2832 + netdev->mtu, new_mtu); 2843 2833 /* must set new MTU before calling down or up */ 2844 2834 netdev->mtu = new_mtu; 2845 2835 ··· 2934 2924 } 2935 2925 2936 2926 /** 2927 + * ixgbe_napi_add_all - prep napi structs for use 2928 + * @adapter: private struct 2929 + * helper function to napi_add each possible q_vector->napi 2930 + */ 2931 + static void ixgbe_napi_add_all(struct ixgbe_adapter *adapter) 2932 + { 2933 + int q_idx, q_vectors; 2934 + int (*poll)(struct napi_struct *, int); 2935 + 2936 + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 2937 + poll = &ixgbe_clean_rxonly; 2938 + /* Only enable as many vectors as we have rx queues. */ 2939 + q_vectors = adapter->num_rx_queues; 2940 + } else { 2941 + poll = &ixgbe_poll; 2942 + /* only one q_vector for legacy modes */ 2943 + q_vectors = 1; 2944 + } 2945 + 2946 + for (q_idx = 0; q_idx < q_vectors; q_idx++) { 2947 + struct ixgbe_q_vector *q_vector = &adapter->q_vector[q_idx]; 2948 + netif_napi_add(adapter->netdev, &q_vector->napi, (*poll), 64); 2949 + } 2950 + } 2951 + 2952 + static void ixgbe_napi_del_all(struct ixgbe_adapter *adapter) 2953 + { 2954 + int q_idx; 2955 + int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 2956 + 2957 + /* legacy and MSI only use one vector */ 2958 + if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) 2959 + q_vectors = 1; 2960 + 2961 + for (q_idx = 0; q_idx < q_vectors; q_idx++) { 2962 + struct ixgbe_q_vector *q_vector = &adapter->q_vector[q_idx]; 2963 + if (!q_vector->rxr_count) 2964 + continue; 2965 + netif_napi_del(&q_vector->napi); 2966 + } 2967 + } 2968 + 2969 + #ifdef CONFIG_PM 2970 + static int ixgbe_resume(struct pci_dev *pdev) 2971 + { 2972 + struct net_device *netdev = pci_get_drvdata(pdev); 2973 + struct ixgbe_adapter *adapter = netdev_priv(netdev); 2974 + u32 err; 2975 + 2976 + pci_set_power_state(pdev, PCI_D0); 2977 + pci_restore_state(pdev); 2978 + err = pci_enable_device(pdev); 2979 + if (err) { 2980 + printk(KERN_ERR "ixgbe: Cannot enable PCI device from " 2981 + "suspend\n"); 2982 + return err; 2983 + } 2984 + pci_set_master(pdev); 2985 + 2986 + pci_enable_wake(pdev, PCI_D3hot, 0); 2987 + pci_enable_wake(pdev, PCI_D3cold, 0); 2988 + 2989 + err = ixgbe_init_interrupt_scheme(adapter); 2990 + if (err) { 2991 + printk(KERN_ERR "ixgbe: Cannot initialize interrupts for " 2992 + "device\n"); 2993 + return err; 2994 + } 2995 + 2996 + ixgbe_napi_add_all(adapter); 2997 + ixgbe_reset(adapter); 2998 + 2999 + if (netif_running(netdev)) { 3000 + err = ixgbe_open(adapter->netdev); 3001 + if (err) 3002 + return err; 3003 + } 3004 + 3005 + netif_device_attach(netdev); 3006 + 3007 + return 0; 3008 + } 3009 + 3010 + #endif /* CONFIG_PM */ 3011 + static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state) 3012 + { 3013 + struct net_device *netdev = pci_get_drvdata(pdev); 3014 + struct ixgbe_adapter *adapter = netdev_priv(netdev); 3015 + #ifdef CONFIG_PM 3016 + int retval = 0; 3017 + #endif 3018 + 3019 + netif_device_detach(netdev); 3020 + 3021 + if (netif_running(netdev)) { 3022 + ixgbe_down(adapter); 3023 + ixgbe_free_irq(adapter); 3024 + ixgbe_free_all_tx_resources(adapter); 3025 + ixgbe_free_all_rx_resources(adapter); 3026 + } 3027 + ixgbe_reset_interrupt_capability(adapter); 3028 + ixgbe_napi_del_all(adapter); 3029 + kfree(adapter->tx_ring); 3030 + kfree(adapter->rx_ring); 3031 + 3032 + #ifdef CONFIG_PM 3033 + retval = pci_save_state(pdev); 3034 + if (retval) 3035 + return retval; 3036 + #endif 3037 + 3038 + pci_enable_wake(pdev, PCI_D3hot, 0); 3039 + pci_enable_wake(pdev, PCI_D3cold, 0); 3040 + 3041 + ixgbe_release_hw_control(adapter); 3042 + 3043 + pci_disable_device(pdev); 3044 + 3045 + pci_set_power_state(pdev, pci_choose_state(pdev, state)); 3046 + 3047 + return 0; 3048 + } 3049 + 3050 + static void ixgbe_shutdown(struct pci_dev *pdev) 3051 + { 3052 + ixgbe_suspend(pdev, PMSG_SUSPEND); 3053 + } 3054 + 3055 + /** 2937 3056 * ixgbe_update_stats - Update the board statistics counters. 2938 3057 * @adapter: board private structure 2939 3058 **/ ··· 3135 2996 3136 2997 /* Rx Errors */ 3137 2998 adapter->net_stats.rx_errors = adapter->stats.crcerrs + 3138 - adapter->stats.rlec; 2999 + adapter->stats.rlec; 3139 3000 adapter->net_stats.rx_dropped = 0; 3140 3001 adapter->net_stats.rx_length_errors = adapter->stats.rlec; 3141 3002 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs; ··· 3149 3010 static void ixgbe_watchdog(unsigned long data) 3150 3011 { 3151 3012 struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; 3152 - struct net_device *netdev = adapter->netdev; 3153 - bool link_up; 3154 - u32 link_speed = 0; 3013 + struct ixgbe_hw *hw = &adapter->hw; 3155 3014 3156 - adapter->hw.mac.ops.check_link(&adapter->hw, &(link_speed), &link_up); 3015 + /* Do the watchdog outside of interrupt context due to the lovely 3016 + * delays that some of the newer hardware requires */ 3017 + if (!test_bit(__IXGBE_DOWN, &adapter->state)) { 3018 + /* Cause software interrupt to ensure rx rings are cleaned */ 3019 + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 3020 + u32 eics = 3021 + (1 << (adapter->num_msix_vectors - NON_Q_VECTORS)) - 1; 3022 + IXGBE_WRITE_REG(hw, IXGBE_EICS, eics); 3023 + } else { 3024 + /* For legacy and MSI interrupts don't set any bits that 3025 + * are enabled for EIAM, because this operation would 3026 + * set *both* EIMS and EICS for any bit in EIAM */ 3027 + IXGBE_WRITE_REG(hw, IXGBE_EICS, 3028 + (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER)); 3029 + } 3030 + /* Reset the timer */ 3031 + mod_timer(&adapter->watchdog_timer, 3032 + round_jiffies(jiffies + 2 * HZ)); 3033 + } 3034 + 3035 + schedule_work(&adapter->watchdog_task); 3036 + } 3037 + 3038 + /** 3039 + * ixgbe_watchdog_task - worker thread to bring link up 3040 + * @work: pointer to work_struct containing our data 3041 + **/ 3042 + static void ixgbe_watchdog_task(struct work_struct *work) 3043 + { 3044 + struct ixgbe_adapter *adapter = container_of(work, 3045 + struct ixgbe_adapter, 3046 + watchdog_task); 3047 + struct net_device *netdev = adapter->netdev; 3048 + struct ixgbe_hw *hw = &adapter->hw; 3049 + u32 link_speed = adapter->link_speed; 3050 + bool link_up = adapter->link_up; 3051 + 3052 + adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK; 3053 + 3054 + if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) { 3055 + hw->mac.ops.check_link(hw, &link_speed, &link_up, false); 3056 + if (link_up || 3057 + time_after(jiffies, (adapter->link_check_timeout + 3058 + IXGBE_TRY_LINK_TIMEOUT))) { 3059 + IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMC_LSC); 3060 + adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; 3061 + } 3062 + adapter->link_up = link_up; 3063 + adapter->link_speed = link_speed; 3064 + } 3157 3065 3158 3066 if (link_up) { 3159 3067 if (!netif_carrier_ok(netdev)) { 3160 - u32 frctl = IXGBE_READ_REG(&adapter->hw, IXGBE_FCTRL); 3161 - u32 rmcs = IXGBE_READ_REG(&adapter->hw, IXGBE_RMCS); 3068 + u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 3069 + u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS); 3162 3070 #define FLOW_RX (frctl & IXGBE_FCTRL_RFCE) 3163 3071 #define FLOW_TX (rmcs & IXGBE_RMCS_TFCE_802_3X) 3164 3072 DPRINTK(LINK, INFO, "NIC Link is Up %s, " 3165 - "Flow Control: %s\n", 3166 - (link_speed == IXGBE_LINK_SPEED_10GB_FULL ? 3167 - "10 Gbps" : 3168 - (link_speed == IXGBE_LINK_SPEED_1GB_FULL ? 3169 - "1 Gbps" : "unknown speed")), 3170 - ((FLOW_RX && FLOW_TX) ? "RX/TX" : 3171 - (FLOW_RX ? "RX" : 3172 - (FLOW_TX ? "TX" : "None")))); 3073 + "Flow Control: %s\n", 3074 + (link_speed == IXGBE_LINK_SPEED_10GB_FULL ? 3075 + "10 Gbps" : 3076 + (link_speed == IXGBE_LINK_SPEED_1GB_FULL ? 3077 + "1 Gbps" : "unknown speed")), 3078 + ((FLOW_RX && FLOW_TX) ? "RX/TX" : 3079 + (FLOW_RX ? "RX" : 3080 + (FLOW_TX ? "TX" : "None")))); 3173 3081 3174 3082 netif_carrier_on(netdev); 3175 3083 netif_tx_wake_all_queues(netdev); ··· 3225 3039 adapter->detect_tx_hung = true; 3226 3040 } 3227 3041 } else { 3042 + adapter->link_up = false; 3043 + adapter->link_speed = 0; 3228 3044 if (netif_carrier_ok(netdev)) { 3229 3045 DPRINTK(LINK, INFO, "NIC Link is Down\n"); 3230 3046 netif_carrier_off(netdev); ··· 3235 3047 } 3236 3048 3237 3049 ixgbe_update_stats(adapter); 3238 - 3239 - if (!test_bit(__IXGBE_DOWN, &adapter->state)) { 3240 - /* Cause software interrupt to ensure rx rings are cleaned */ 3241 - if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 3242 - u32 eics = 3243 - (1 << (adapter->num_msix_vectors - NON_Q_VECTORS)) - 1; 3244 - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, eics); 3245 - } else { 3246 - /* for legacy and MSI interrupts don't set any bits that 3247 - * are enabled for EIAM, because this operation would 3248 - * set *both* EIMS and EICS for any bit in EIAM */ 3249 - IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, 3250 - (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER)); 3251 - } 3252 - /* Reset the timer */ 3253 - mod_timer(&adapter->watchdog_timer, 3254 - round_jiffies(jiffies + 2 * HZ)); 3255 - } 3050 + adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK; 3256 3051 } 3257 3052 3258 3053 static int ixgbe_tso(struct ixgbe_adapter *adapter, 3259 - struct ixgbe_ring *tx_ring, struct sk_buff *skb, 3260 - u32 tx_flags, u8 *hdr_len) 3054 + struct ixgbe_ring *tx_ring, struct sk_buff *skb, 3055 + u32 tx_flags, u8 *hdr_len) 3261 3056 { 3262 3057 struct ixgbe_adv_tx_context_desc *context_desc; 3263 3058 unsigned int i; 3264 3059 int err; 3265 3060 struct ixgbe_tx_buffer *tx_buffer_info; 3266 - u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0; 3267 - u32 mss_l4len_idx = 0, l4len; 3061 + u32 vlan_macip_lens = 0, type_tucmd_mlhl; 3062 + u32 mss_l4len_idx, l4len; 3268 3063 3269 3064 if (skb_is_gso(skb)) { 3270 3065 if (skb_header_cloned(skb)) { ··· 3263 3092 iph->tot_len = 0; 3264 3093 iph->check = 0; 3265 3094 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, 3266 - iph->daddr, 0, 3267 - IPPROTO_TCP, 3268 - 0); 3095 + iph->daddr, 0, 3096 + IPPROTO_TCP, 3097 + 0); 3269 3098 adapter->hw_tso_ctxt++; 3270 3099 } else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) { 3271 3100 ipv6_hdr(skb)->payload_len = 0; 3272 3101 tcp_hdr(skb)->check = 3273 3102 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 3274 - &ipv6_hdr(skb)->daddr, 3275 - 0, IPPROTO_TCP, 0); 3103 + &ipv6_hdr(skb)->daddr, 3104 + 0, IPPROTO_TCP, 0); 3276 3105 adapter->hw_tso6_ctxt++; 3277 3106 } 3278 3107 ··· 3286 3115 vlan_macip_lens |= 3287 3116 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK); 3288 3117 vlan_macip_lens |= ((skb_network_offset(skb)) << 3289 - IXGBE_ADVTXD_MACLEN_SHIFT); 3118 + IXGBE_ADVTXD_MACLEN_SHIFT); 3290 3119 *hdr_len += skb_network_offset(skb); 3291 3120 vlan_macip_lens |= 3292 3121 (skb_transport_header(skb) - skb_network_header(skb)); ··· 3296 3125 context_desc->seqnum_seed = 0; 3297 3126 3298 3127 /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ 3299 - type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT | 3300 - IXGBE_ADVTXD_DTYP_CTXT); 3128 + type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT | 3129 + IXGBE_ADVTXD_DTYP_CTXT); 3301 3130 3302 3131 if (skb->protocol == htons(ETH_P_IP)) 3303 3132 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; ··· 3305 3134 context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl); 3306 3135 3307 3136 /* MSS L4LEN IDX */ 3308 - mss_l4len_idx |= 3137 + mss_l4len_idx = 3309 3138 (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT); 3310 3139 mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT); 3311 3140 /* use index 1 for TSO */ ··· 3326 3155 } 3327 3156 3328 3157 static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter, 3329 - struct ixgbe_ring *tx_ring, 3330 - struct sk_buff *skb, u32 tx_flags) 3158 + struct ixgbe_ring *tx_ring, 3159 + struct sk_buff *skb, u32 tx_flags) 3331 3160 { 3332 3161 struct ixgbe_adv_tx_context_desc *context_desc; 3333 3162 unsigned int i; ··· 3344 3173 vlan_macip_lens |= 3345 3174 (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK); 3346 3175 vlan_macip_lens |= (skb_network_offset(skb) << 3347 - IXGBE_ADVTXD_MACLEN_SHIFT); 3176 + IXGBE_ADVTXD_MACLEN_SHIFT); 3348 3177 if (skb->ip_summed == CHECKSUM_PARTIAL) 3349 3178 vlan_macip_lens |= (skb_transport_header(skb) - 3350 - skb_network_header(skb)); 3179 + skb_network_header(skb)); 3351 3180 3352 3181 context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); 3353 3182 context_desc->seqnum_seed = 0; 3354 3183 3355 3184 type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT | 3356 - IXGBE_ADVTXD_DTYP_CTXT); 3185 + IXGBE_ADVTXD_DTYP_CTXT); 3357 3186 3358 3187 if (skb->ip_summed == CHECKSUM_PARTIAL) { 3359 3188 switch (skb->protocol) { ··· 3361 3190 type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4; 3362 3191 if (ip_hdr(skb)->protocol == IPPROTO_TCP) 3363 3192 type_tucmd_mlhl |= 3364 - IXGBE_ADVTXD_TUCMD_L4T_TCP; 3193 + IXGBE_ADVTXD_TUCMD_L4T_TCP; 3365 3194 break; 3366 - 3367 3195 case __constant_htons(ETH_P_IPV6): 3368 3196 /* XXX what about other V6 headers?? */ 3369 3197 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP) 3370 3198 type_tucmd_mlhl |= 3371 - IXGBE_ADVTXD_TUCMD_L4T_TCP; 3199 + IXGBE_ADVTXD_TUCMD_L4T_TCP; 3372 3200 break; 3373 - 3374 3201 default: 3375 3202 if (unlikely(net_ratelimit())) { 3376 3203 DPRINTK(PROBE, WARNING, ··· 3385 3216 3386 3217 tx_buffer_info->time_stamp = jiffies; 3387 3218 tx_buffer_info->next_to_watch = i; 3219 + 3388 3220 adapter->hw_csum_tx_good++; 3389 3221 i++; 3390 3222 if (i == tx_ring->count) ··· 3394 3224 3395 3225 return true; 3396 3226 } 3227 + 3397 3228 return false; 3398 3229 } 3399 3230 3400 3231 static int ixgbe_tx_map(struct ixgbe_adapter *adapter, 3401 - struct ixgbe_ring *tx_ring, 3402 - struct sk_buff *skb, unsigned int first) 3232 + struct ixgbe_ring *tx_ring, 3233 + struct sk_buff *skb, unsigned int first) 3403 3234 { 3404 3235 struct ixgbe_tx_buffer *tx_buffer_info; 3405 3236 unsigned int len = skb->len; ··· 3418 3247 3419 3248 tx_buffer_info->length = size; 3420 3249 tx_buffer_info->dma = pci_map_single(adapter->pdev, 3421 - skb->data + offset, 3422 - size, PCI_DMA_TODEVICE); 3250 + skb->data + offset, 3251 + size, PCI_DMA_TODEVICE); 3423 3252 tx_buffer_info->time_stamp = jiffies; 3424 3253 tx_buffer_info->next_to_watch = i; 3425 3254 ··· 3444 3273 3445 3274 tx_buffer_info->length = size; 3446 3275 tx_buffer_info->dma = pci_map_page(adapter->pdev, 3447 - frag->page, 3448 - offset, 3449 - size, PCI_DMA_TODEVICE); 3276 + frag->page, 3277 + offset, 3278 + size, 3279 + PCI_DMA_TODEVICE); 3450 3280 tx_buffer_info->time_stamp = jiffies; 3451 3281 tx_buffer_info->next_to_watch = i; 3452 3282 ··· 3470 3298 } 3471 3299 3472 3300 static void ixgbe_tx_queue(struct ixgbe_adapter *adapter, 3473 - struct ixgbe_ring *tx_ring, 3474 - int tx_flags, int count, u32 paylen, u8 hdr_len) 3301 + struct ixgbe_ring *tx_ring, 3302 + int tx_flags, int count, u32 paylen, u8 hdr_len) 3475 3303 { 3476 3304 union ixgbe_adv_tx_desc *tx_desc = NULL; 3477 3305 struct ixgbe_tx_buffer *tx_buffer_info; ··· 3490 3318 cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE; 3491 3319 3492 3320 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 3493 - IXGBE_ADVTXD_POPTS_SHIFT; 3321 + IXGBE_ADVTXD_POPTS_SHIFT; 3494 3322 3495 3323 /* use index 1 context for tso */ 3496 3324 olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT); 3497 3325 if (tx_flags & IXGBE_TX_FLAGS_IPV4) 3498 3326 olinfo_status |= IXGBE_TXD_POPTS_IXSM << 3499 - IXGBE_ADVTXD_POPTS_SHIFT; 3327 + IXGBE_ADVTXD_POPTS_SHIFT; 3500 3328 3501 3329 } else if (tx_flags & IXGBE_TX_FLAGS_CSUM) 3502 3330 olinfo_status |= IXGBE_TXD_POPTS_TXSM << 3503 - IXGBE_ADVTXD_POPTS_SHIFT; 3331 + IXGBE_ADVTXD_POPTS_SHIFT; 3504 3332 3505 3333 olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT); 3506 3334 ··· 3510 3338 tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i); 3511 3339 tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma); 3512 3340 tx_desc->read.cmd_type_len = 3513 - cpu_to_le32(cmd_type_len | tx_buffer_info->length); 3341 + cpu_to_le32(cmd_type_len | tx_buffer_info->length); 3514 3342 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); 3515 - 3516 3343 i++; 3517 3344 if (i == tx_ring->count) 3518 3345 i = 0; ··· 3532 3361 } 3533 3362 3534 3363 static int __ixgbe_maybe_stop_tx(struct net_device *netdev, 3535 - struct ixgbe_ring *tx_ring, int size) 3364 + struct ixgbe_ring *tx_ring, int size) 3536 3365 { 3537 3366 struct ixgbe_adapter *adapter = netdev_priv(netdev); 3538 3367 ··· 3548 3377 return -EBUSY; 3549 3378 3550 3379 /* A reprieve! - use start_queue because it doesn't call schedule */ 3551 - netif_wake_subqueue(netdev, tx_ring->queue_index); 3380 + netif_start_subqueue(netdev, tx_ring->queue_index); 3552 3381 ++adapter->restart_queue; 3553 3382 return 0; 3554 3383 } 3555 3384 3556 3385 static int ixgbe_maybe_stop_tx(struct net_device *netdev, 3557 - struct ixgbe_ring *tx_ring, int size) 3386 + struct ixgbe_ring *tx_ring, int size) 3558 3387 { 3559 3388 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size)) 3560 3389 return 0; 3561 3390 return __ixgbe_maybe_stop_tx(netdev, tx_ring, size); 3562 3391 } 3563 3392 3564 - 3565 3393 static int ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netdev) 3566 3394 { 3567 3395 struct ixgbe_adapter *adapter = netdev_priv(netdev); 3568 3396 struct ixgbe_ring *tx_ring; 3569 - unsigned int len = skb->len; 3570 3397 unsigned int first; 3571 3398 unsigned int tx_flags = 0; 3572 3399 u8 hdr_len = 0; 3573 3400 int r_idx = 0, tso; 3574 - unsigned int mss = 0; 3575 3401 int count = 0; 3576 3402 unsigned int f; 3577 - unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 3578 - len -= skb->data_len; 3403 + 3579 3404 r_idx = (adapter->num_tx_queues - 1) & skb->queue_mapping; 3580 3405 tx_ring = &adapter->tx_ring[r_idx]; 3581 3406 3582 - 3583 - if (skb->len <= 0) { 3584 - dev_kfree_skb(skb); 3585 - return NETDEV_TX_OK; 3407 + if (adapter->vlgrp && vlan_tx_tag_present(skb)) { 3408 + tx_flags |= vlan_tx_tag_get(skb); 3409 + tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT; 3410 + tx_flags |= IXGBE_TX_FLAGS_VLAN; 3586 3411 } 3587 - mss = skb_shinfo(skb)->gso_size; 3588 - 3589 - if (mss) 3412 + /* three things can cause us to need a context descriptor */ 3413 + if (skb_is_gso(skb) || 3414 + (skb->ip_summed == CHECKSUM_PARTIAL) || 3415 + (tx_flags & IXGBE_TX_FLAGS_VLAN)) 3590 3416 count++; 3591 - else if (skb->ip_summed == CHECKSUM_PARTIAL) 3592 - count++; 3593 3417 3594 - count += TXD_USE_COUNT(len); 3595 - for (f = 0; f < nr_frags; f++) 3418 + count += TXD_USE_COUNT(skb_headlen(skb)); 3419 + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) 3596 3420 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); 3597 3421 3598 3422 if (ixgbe_maybe_stop_tx(netdev, tx_ring, count)) { 3599 3423 adapter->tx_busy++; 3600 3424 return NETDEV_TX_BUSY; 3601 - } 3602 - if (adapter->vlgrp && vlan_tx_tag_present(skb)) { 3603 - tx_flags |= IXGBE_TX_FLAGS_VLAN; 3604 - tx_flags |= (vlan_tx_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT); 3605 3425 } 3606 3426 3607 3427 if (skb->protocol == htons(ETH_P_IP)) ··· 3607 3445 if (tso) 3608 3446 tx_flags |= IXGBE_TX_FLAGS_TSO; 3609 3447 else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags) && 3610 - (skb->ip_summed == CHECKSUM_PARTIAL)) 3448 + (skb->ip_summed == CHECKSUM_PARTIAL)) 3611 3449 tx_flags |= IXGBE_TX_FLAGS_CSUM; 3612 3450 3613 3451 ixgbe_tx_queue(adapter, tx_ring, tx_flags, 3614 - ixgbe_tx_map(adapter, tx_ring, skb, first), 3615 - skb->len, hdr_len); 3452 + ixgbe_tx_map(adapter, tx_ring, skb, first), 3453 + skb->len, hdr_len); 3616 3454 3617 3455 netdev->trans_start = jiffies; 3618 3456 ··· 3646 3484 static int ixgbe_set_mac(struct net_device *netdev, void *p) 3647 3485 { 3648 3486 struct ixgbe_adapter *adapter = netdev_priv(netdev); 3487 + struct ixgbe_hw *hw = &adapter->hw; 3649 3488 struct sockaddr *addr = p; 3650 3489 3651 3490 if (!is_valid_ether_addr(addr->sa_data)) 3652 3491 return -EADDRNOTAVAIL; 3653 3492 3654 3493 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 3655 - memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len); 3494 + memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); 3656 3495 3657 - ixgbe_set_rar(&adapter->hw, 0, adapter->hw.mac.addr, 0, IXGBE_RAH_AV); 3496 + hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); 3658 3497 3659 3498 return 0; 3660 3499 } ··· 3679 3516 #endif 3680 3517 3681 3518 /** 3682 - * ixgbe_napi_add_all - prep napi structs for use 3683 - * @adapter: private struct 3684 - * helper function to napi_add each possible q_vector->napi 3685 - */ 3686 - static void ixgbe_napi_add_all(struct ixgbe_adapter *adapter) 3519 + * ixgbe_link_config - set up initial link with default speed and duplex 3520 + * @hw: pointer to private hardware struct 3521 + * 3522 + * Returns 0 on success, negative on failure 3523 + **/ 3524 + static int ixgbe_link_config(struct ixgbe_hw *hw) 3687 3525 { 3688 - int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 3689 - int (*poll)(struct napi_struct *, int); 3526 + u32 autoneg = IXGBE_LINK_SPEED_10GB_FULL; 3690 3527 3691 - if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 3692 - poll = &ixgbe_clean_rxonly; 3693 - } else { 3694 - poll = &ixgbe_poll; 3695 - /* only one q_vector for legacy modes */ 3696 - q_vectors = 1; 3697 - } 3528 + /* must always autoneg for both 1G and 10G link */ 3529 + hw->mac.autoneg = true; 3698 3530 3699 - for (i = 0; i < q_vectors; i++) { 3700 - struct ixgbe_q_vector *q_vector = &adapter->q_vector[i]; 3701 - netif_napi_add(adapter->netdev, &q_vector->napi, 3702 - (*poll), 64); 3703 - } 3531 + return hw->mac.ops.setup_link_speed(hw, autoneg, true, true); 3704 3532 } 3705 3533 3706 3534 /** ··· 3706 3552 * and a hardware reset occur. 3707 3553 **/ 3708 3554 static int __devinit ixgbe_probe(struct pci_dev *pdev, 3709 - const struct pci_device_id *ent) 3555 + const struct pci_device_id *ent) 3710 3556 { 3711 3557 struct net_device *netdev; 3712 3558 struct ixgbe_adapter *adapter = NULL; 3713 3559 struct ixgbe_hw *hw; 3714 3560 const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data]; 3715 - unsigned long mmio_start, mmio_len; 3716 3561 static int cards_found; 3717 3562 int i, err, pci_using_dac; 3718 3563 u16 link_status, link_speed, link_width; 3719 - u32 part_num; 3564 + u32 part_num, eec; 3720 3565 3721 3566 err = pci_enable_device(pdev); 3722 3567 if (err) ··· 3730 3577 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); 3731 3578 if (err) { 3732 3579 dev_err(&pdev->dev, "No usable DMA " 3733 - "configuration, aborting\n"); 3580 + "configuration, aborting\n"); 3734 3581 goto err_dma; 3735 3582 } 3736 3583 } ··· 3763 3610 hw->back = adapter; 3764 3611 adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1; 3765 3612 3766 - mmio_start = pci_resource_start(pdev, 0); 3767 - mmio_len = pci_resource_len(pdev, 0); 3768 - 3769 - hw->hw_addr = ioremap(mmio_start, mmio_len); 3613 + hw->hw_addr = ioremap(pci_resource_start(pdev, 0), 3614 + pci_resource_len(pdev, 0)); 3770 3615 if (!hw->hw_addr) { 3771 3616 err = -EIO; 3772 3617 goto err_ioremap; ··· 3794 3643 #endif 3795 3644 strcpy(netdev->name, pci_name(pdev)); 3796 3645 3797 - netdev->mem_start = mmio_start; 3798 - netdev->mem_end = mmio_start + mmio_len; 3799 - 3800 3646 adapter->bd_number = cards_found; 3801 - 3802 - /* PCI config space info */ 3803 - hw->vendor_id = pdev->vendor; 3804 - hw->device_id = pdev->device; 3805 - hw->revision_id = pdev->revision; 3806 - hw->subsystem_vendor_id = pdev->subsystem_vendor; 3807 - hw->subsystem_device_id = pdev->subsystem_device; 3808 3647 3809 3648 /* Setup hw api */ 3810 3649 memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops)); 3811 3650 hw->mac.type = ii->mac; 3651 + 3652 + /* EEPROM */ 3653 + memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops)); 3654 + eec = IXGBE_READ_REG(hw, IXGBE_EEC); 3655 + /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */ 3656 + if (!(eec & (1 << 8))) 3657 + hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic; 3658 + 3659 + /* PHY */ 3660 + memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops)); 3661 + /* phy->sfp_type = ixgbe_sfp_type_unknown; */ 3812 3662 3813 3663 err = ii->get_invariants(hw); 3814 3664 if (err) ··· 3820 3668 if (err) 3821 3669 goto err_sw_init; 3822 3670 3671 + /* reset_hw fills in the perm_addr as well */ 3672 + err = hw->mac.ops.reset_hw(hw); 3673 + if (err) { 3674 + dev_err(&adapter->pdev->dev, "HW Init failed: %d\n", err); 3675 + goto err_sw_init; 3676 + } 3677 + 3823 3678 netdev->features = NETIF_F_SG | 3824 - NETIF_F_IP_CSUM | 3825 - NETIF_F_HW_VLAN_TX | 3826 - NETIF_F_HW_VLAN_RX | 3827 - NETIF_F_HW_VLAN_FILTER; 3679 + NETIF_F_IP_CSUM | 3680 + NETIF_F_HW_VLAN_TX | 3681 + NETIF_F_HW_VLAN_RX | 3682 + NETIF_F_HW_VLAN_FILTER; 3828 3683 3829 3684 netdev->features |= NETIF_F_IPV6_CSUM; 3830 3685 netdev->features |= NETIF_F_TSO; ··· 3847 3688 netdev->features |= NETIF_F_HIGHDMA; 3848 3689 3849 3690 /* make sure the EEPROM is good */ 3850 - if (ixgbe_validate_eeprom_checksum(hw, NULL) < 0) { 3691 + if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) { 3851 3692 dev_err(&pdev->dev, "The EEPROM Checksum Is Not Valid\n"); 3852 3693 err = -EIO; 3853 3694 goto err_eeprom; ··· 3856 3697 memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len); 3857 3698 memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len); 3858 3699 3859 - if (ixgbe_validate_mac_addr(netdev->dev_addr)) { 3700 + if (ixgbe_validate_mac_addr(netdev->perm_addr)) { 3701 + dev_err(&pdev->dev, "invalid MAC address\n"); 3860 3702 err = -EIO; 3861 3703 goto err_eeprom; 3862 3704 } ··· 3867 3707 adapter->watchdog_timer.data = (unsigned long)adapter; 3868 3708 3869 3709 INIT_WORK(&adapter->reset_task, ixgbe_reset_task); 3710 + INIT_WORK(&adapter->watchdog_task, ixgbe_watchdog_task); 3870 3711 3871 3712 err = ixgbe_init_interrupt_scheme(adapter); 3872 3713 if (err) ··· 3878 3717 link_speed = link_status & IXGBE_PCI_LINK_SPEED; 3879 3718 link_width = link_status & IXGBE_PCI_LINK_WIDTH; 3880 3719 dev_info(&pdev->dev, "(PCI Express:%s:%s) " 3881 - "%02x:%02x:%02x:%02x:%02x:%02x\n", 3882 - ((link_speed == IXGBE_PCI_LINK_SPEED_5000) ? "5.0Gb/s" : 3883 - (link_speed == IXGBE_PCI_LINK_SPEED_2500) ? "2.5Gb/s" : 3884 - "Unknown"), 3885 - ((link_width == IXGBE_PCI_LINK_WIDTH_8) ? "Width x8" : 3886 - (link_width == IXGBE_PCI_LINK_WIDTH_4) ? "Width x4" : 3887 - (link_width == IXGBE_PCI_LINK_WIDTH_2) ? "Width x2" : 3888 - (link_width == IXGBE_PCI_LINK_WIDTH_1) ? "Width x1" : 3889 - "Unknown"), 3890 - netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2], 3891 - netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]); 3892 - ixgbe_read_part_num(hw, &part_num); 3720 + "%02x:%02x:%02x:%02x:%02x:%02x\n", 3721 + ((link_speed == IXGBE_PCI_LINK_SPEED_5000) ? "5.0Gb/s" : 3722 + (link_speed == IXGBE_PCI_LINK_SPEED_2500) ? "2.5Gb/s" : 3723 + "Unknown"), 3724 + ((link_width == IXGBE_PCI_LINK_WIDTH_8) ? "Width x8" : 3725 + (link_width == IXGBE_PCI_LINK_WIDTH_4) ? "Width x4" : 3726 + (link_width == IXGBE_PCI_LINK_WIDTH_2) ? "Width x2" : 3727 + (link_width == IXGBE_PCI_LINK_WIDTH_1) ? "Width x1" : 3728 + "Unknown"), 3729 + netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2], 3730 + netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5]); 3731 + ixgbe_read_pba_num_generic(hw, &part_num); 3893 3732 dev_info(&pdev->dev, "MAC: %d, PHY: %d, PBA No: %06x-%03x\n", 3894 - hw->mac.type, hw->phy.type, 3895 - (part_num >> 8), (part_num & 0xff)); 3733 + hw->mac.type, hw->phy.type, 3734 + (part_num >> 8), (part_num & 0xff)); 3896 3735 3897 3736 if (link_width <= IXGBE_PCI_LINK_WIDTH_4) { 3898 3737 dev_warn(&pdev->dev, "PCI-Express bandwidth available for " 3899 - "this card is not sufficient for optimal " 3900 - "performance.\n"); 3738 + "this card is not sufficient for optimal " 3739 + "performance.\n"); 3901 3740 dev_warn(&pdev->dev, "For optimal performance a x8 " 3902 - "PCI-Express slot is required.\n"); 3741 + "PCI-Express slot is required.\n"); 3903 3742 } 3904 3743 3905 3744 /* reset the hardware with the new settings */ 3906 - ixgbe_start_hw(hw); 3745 + hw->mac.ops.start_hw(hw); 3746 + 3747 + /* link_config depends on start_hw being called at least once */ 3748 + err = ixgbe_link_config(hw); 3749 + if (err) { 3750 + dev_err(&pdev->dev, "setup_link_speed FAILED %d\n", err); 3751 + goto err_register; 3752 + } 3907 3753 3908 3754 netif_carrier_off(netdev); 3909 3755 netif_tx_stop_all_queues(netdev); ··· 3922 3754 if (err) 3923 3755 goto err_register; 3924 3756 3925 - #if defined(CONFIG_DCA) || defined (CONFIG_DCA_MODULE) 3757 + #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) 3926 3758 if (dca_add_requester(&pdev->dev) == 0) { 3927 3759 adapter->flags |= IXGBE_FLAG_DCA_ENABLED; 3928 3760 /* always use CB2 mode, difference is masked ··· 3972 3804 3973 3805 flush_scheduled_work(); 3974 3806 3975 - #if defined(CONFIG_DCA) || defined (CONFIG_DCA_MODULE) 3807 + #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) 3976 3808 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { 3977 3809 adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED; 3978 3810 dca_remove_requester(&pdev->dev); ··· 3990 3822 pci_release_regions(pdev); 3991 3823 3992 3824 DPRINTK(PROBE, INFO, "complete\n"); 3825 + ixgbe_napi_del_all(adapter); 3993 3826 kfree(adapter->tx_ring); 3994 3827 kfree(adapter->rx_ring); 3995 3828 ··· 4008 3839 * this device has been detected. 4009 3840 */ 4010 3841 static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, 4011 - pci_channel_state_t state) 3842 + pci_channel_state_t state) 4012 3843 { 4013 3844 struct net_device *netdev = pci_get_drvdata(pdev); 4014 3845 struct ixgbe_adapter *adapter = netdev->priv; ··· 4019 3850 ixgbe_down(adapter); 4020 3851 pci_disable_device(pdev); 4021 3852 4022 - /* Request a slot slot reset. */ 3853 + /* Request a slot reset. */ 4023 3854 return PCI_ERS_RESULT_NEED_RESET; 4024 3855 } 4025 3856 ··· 4036 3867 4037 3868 if (pci_enable_device(pdev)) { 4038 3869 DPRINTK(PROBE, ERR, 4039 - "Cannot re-enable PCI device after reset.\n"); 3870 + "Cannot re-enable PCI device after reset.\n"); 4040 3871 return PCI_ERS_RESULT_DISCONNECT; 4041 3872 } 4042 3873 pci_set_master(pdev); ··· 4070 3901 } 4071 3902 4072 3903 netif_device_attach(netdev); 4073 - 4074 3904 } 4075 3905 4076 3906 static struct pci_error_handlers ixgbe_err_handler = { ··· 4105 3937 4106 3938 printk(KERN_INFO "%s: %s\n", ixgbe_driver_name, ixgbe_copyright); 4107 3939 4108 - #if defined(CONFIG_DCA) || defined (CONFIG_DCA_MODULE) 3940 + #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) 4109 3941 dca_register_notify(&dca_notifier); 4110 3942 4111 3943 #endif 4112 3944 ret = pci_register_driver(&ixgbe_driver); 4113 3945 return ret; 4114 3946 } 3947 + 4115 3948 module_init(ixgbe_init_module); 4116 3949 4117 3950 /** ··· 4123 3954 **/ 4124 3955 static void __exit ixgbe_exit_module(void) 4125 3956 { 4126 - #if defined(CONFIG_DCA) || defined (CONFIG_DCA_MODULE) 3957 + #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) 4127 3958 dca_unregister_notify(&dca_notifier); 4128 3959 #endif 4129 3960 pci_unregister_driver(&ixgbe_driver); 4130 3961 } 4131 3962 4132 - #if defined(CONFIG_DCA) || defined (CONFIG_DCA_MODULE) 3963 + #if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) 4133 3964 static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event, 4134 - void *p) 3965 + void *p) 4135 3966 { 4136 3967 int ret_val; 4137 3968 4138 3969 ret_val = driver_for_each_device(&ixgbe_driver.driver, NULL, &event, 4139 - __ixgbe_notify_dca); 3970 + __ixgbe_notify_dca); 4140 3971 4141 3972 return ret_val ? NOTIFY_BAD : NOTIFY_DONE; 4142 3973 }
+89 -155
drivers/net/ixgbe/ixgbe_phy.c
··· 1 1 /******************************************************************************* 2 2 3 3 Intel 10 Gigabit PCI Express Linux driver 4 - Copyright(c) 1999 - 2007 Intel Corporation. 4 + Copyright(c) 1999 - 2008 Intel Corporation. 5 5 6 6 This program is free software; you can redistribute it and/or modify it 7 7 under the terms and conditions of the GNU General Public License, ··· 20 20 the file called "COPYING". 21 21 22 22 Contact Information: 23 - Linux NICS <linux.nics@intel.com> 24 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 25 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 26 25 ··· 32 33 #include "ixgbe_common.h" 33 34 #include "ixgbe_phy.h" 34 35 36 + static bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr); 35 37 static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id); 36 38 static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw); 37 - static bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr); 38 - static s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, 39 - u32 device_type, u16 phy_data); 40 39 41 40 /** 42 - * ixgbe_identify_phy - Get physical layer module 41 + * ixgbe_identify_phy_generic - Get physical layer module 43 42 * @hw: pointer to hardware structure 44 43 * 45 44 * Determines the physical layer module found on the current adapter. 46 45 **/ 47 - s32 ixgbe_identify_phy(struct ixgbe_hw *hw) 46 + s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw) 48 47 { 49 48 s32 status = IXGBE_ERR_PHY_ADDR_INVALID; 50 49 u32 phy_addr; 51 50 52 - for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) { 53 - if (ixgbe_validate_phy_addr(hw, phy_addr)) { 54 - hw->phy.addr = phy_addr; 55 - ixgbe_get_phy_id(hw); 56 - hw->phy.type = ixgbe_get_phy_type_from_id(hw->phy.id); 57 - status = 0; 58 - break; 51 + if (hw->phy.type == ixgbe_phy_unknown) { 52 + for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) { 53 + if (ixgbe_validate_phy_addr(hw, phy_addr)) { 54 + hw->phy.addr = phy_addr; 55 + ixgbe_get_phy_id(hw); 56 + hw->phy.type = 57 + ixgbe_get_phy_type_from_id(hw->phy.id); 58 + status = 0; 59 + break; 60 + } 59 61 } 62 + } else { 63 + status = 0; 60 64 } 65 + 61 66 return status; 62 67 } 63 68 ··· 76 73 bool valid = false; 77 74 78 75 hw->phy.addr = phy_addr; 79 - ixgbe_read_phy_reg(hw, 80 - IXGBE_MDIO_PHY_ID_HIGH, 81 - IXGBE_MDIO_PMA_PMD_DEV_TYPE, 82 - &phy_id); 76 + hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH, 77 + IXGBE_MDIO_PMA_PMD_DEV_TYPE, &phy_id); 83 78 84 79 if (phy_id != 0xFFFF && phy_id != 0x0) 85 80 valid = true; ··· 96 95 u16 phy_id_high = 0; 97 96 u16 phy_id_low = 0; 98 97 99 - status = ixgbe_read_phy_reg(hw, 100 - IXGBE_MDIO_PHY_ID_HIGH, 101 - IXGBE_MDIO_PMA_PMD_DEV_TYPE, 102 - &phy_id_high); 98 + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH, 99 + IXGBE_MDIO_PMA_PMD_DEV_TYPE, 100 + &phy_id_high); 103 101 104 102 if (status == 0) { 105 103 hw->phy.id = (u32)(phy_id_high << 16); 106 - status = ixgbe_read_phy_reg(hw, 107 - IXGBE_MDIO_PHY_ID_LOW, 108 - IXGBE_MDIO_PMA_PMD_DEV_TYPE, 109 - &phy_id_low); 104 + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_LOW, 105 + IXGBE_MDIO_PMA_PMD_DEV_TYPE, 106 + &phy_id_low); 110 107 hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK); 111 108 hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK); 112 109 } 113 - 114 110 return status; 115 111 } 116 112 ··· 121 123 enum ixgbe_phy_type phy_type; 122 124 123 125 switch (phy_id) { 124 - case TN1010_PHY_ID: 125 - phy_type = ixgbe_phy_tn; 126 - break; 127 126 case QT2022_PHY_ID: 128 127 phy_type = ixgbe_phy_qt; 129 128 break; ··· 133 138 } 134 139 135 140 /** 136 - * ixgbe_reset_phy - Performs a PHY reset 141 + * ixgbe_reset_phy_generic - Performs a PHY reset 137 142 * @hw: pointer to hardware structure 138 143 **/ 139 - s32 ixgbe_reset_phy(struct ixgbe_hw *hw) 144 + s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw) 140 145 { 141 146 /* 142 147 * Perform soft PHY reset to the PHY_XS. 143 148 * This will cause a soft reset to the PHY 144 149 */ 145 - return ixgbe_write_phy_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL, 146 - IXGBE_MDIO_PHY_XS_DEV_TYPE, 147 - IXGBE_MDIO_PHY_XS_RESET); 150 + return hw->phy.ops.write_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL, 151 + IXGBE_MDIO_PHY_XS_DEV_TYPE, 152 + IXGBE_MDIO_PHY_XS_RESET); 148 153 } 149 154 150 155 /** 151 - * ixgbe_read_phy_reg - Reads a value from a specified PHY register 156 + * ixgbe_read_phy_reg_generic - Reads a value from a specified PHY register 152 157 * @hw: pointer to hardware structure 153 158 * @reg_addr: 32 bit address of PHY register to read 154 159 * @phy_data: Pointer to read data from PHY register 155 160 **/ 156 - s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, 157 - u32 device_type, u16 *phy_data) 161 + s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, 162 + u32 device_type, u16 *phy_data) 158 163 { 159 164 u32 command; 160 165 u32 i; 161 - u32 timeout = 10; 162 166 u32 data; 163 167 s32 status = 0; 164 168 u16 gssr; ··· 173 179 if (status == 0) { 174 180 /* Setup and write the address cycle command */ 175 181 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | 176 - (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | 177 - (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | 178 - (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); 182 + (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | 183 + (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | 184 + (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); 179 185 180 186 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); 181 187 ··· 184 190 * The MDI Command bit will clear when the operation is 185 191 * complete 186 192 */ 187 - for (i = 0; i < timeout; i++) { 193 + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { 188 194 udelay(10); 189 195 190 196 command = IXGBE_READ_REG(hw, IXGBE_MSCA); ··· 204 210 * command 205 211 */ 206 212 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | 207 - (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | 208 - (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | 209 - (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND)); 213 + (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | 214 + (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | 215 + (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND)); 210 216 211 217 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); 212 218 ··· 215 221 * completed. The MDI Command bit will clear when the 216 222 * operation is complete 217 223 */ 218 - for (i = 0; i < timeout; i++) { 224 + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { 219 225 udelay(10); 220 226 221 227 command = IXGBE_READ_REG(hw, IXGBE_MSCA); ··· 225 231 } 226 232 227 233 if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { 228 - hw_dbg(hw, 229 - "PHY read command didn't complete\n"); 234 + hw_dbg(hw, "PHY read command didn't complete\n"); 230 235 status = IXGBE_ERR_PHY; 231 236 } else { 232 237 /* ··· 240 247 241 248 ixgbe_release_swfw_sync(hw, gssr); 242 249 } 250 + 243 251 return status; 244 252 } 245 253 246 254 /** 247 - * ixgbe_write_phy_reg - Writes a value to specified PHY register 255 + * ixgbe_write_phy_reg_generic - Writes a value to specified PHY register 248 256 * @hw: pointer to hardware structure 249 257 * @reg_addr: 32 bit PHY register to write 250 258 * @device_type: 5 bit device type 251 259 * @phy_data: Data to write to the PHY register 252 260 **/ 253 - static s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, 254 - u32 device_type, u16 phy_data) 261 + s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, 262 + u32 device_type, u16 phy_data) 255 263 { 256 264 u32 command; 257 265 u32 i; 258 - u32 timeout = 10; 259 266 s32 status = 0; 260 267 u16 gssr; 261 268 ··· 273 280 274 281 /* Setup and write the address cycle command */ 275 282 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | 276 - (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | 277 - (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | 278 - (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); 283 + (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | 284 + (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | 285 + (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); 279 286 280 287 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); 281 288 ··· 284 291 * The MDI Command bit will clear when the operation is 285 292 * complete 286 293 */ 287 - for (i = 0; i < timeout; i++) { 294 + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { 288 295 udelay(10); 289 296 290 297 command = IXGBE_READ_REG(hw, IXGBE_MSCA); 291 298 292 - if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) { 293 - hw_dbg(hw, "PHY address cmd didn't complete\n"); 299 + if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) 294 300 break; 295 - } 296 301 } 297 302 298 - if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) 303 + if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { 304 + hw_dbg(hw, "PHY address cmd didn't complete\n"); 299 305 status = IXGBE_ERR_PHY; 306 + } 300 307 301 308 if (status == 0) { 302 309 /* ··· 304 311 * command 305 312 */ 306 313 command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | 307 - (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | 308 - (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | 309 - (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND)); 314 + (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | 315 + (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | 316 + (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND)); 310 317 311 318 IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); 312 319 ··· 315 322 * completed. The MDI Command bit will clear when the 316 323 * operation is complete 317 324 */ 318 - for (i = 0; i < timeout; i++) { 325 + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { 319 326 udelay(10); 320 327 321 328 command = IXGBE_READ_REG(hw, IXGBE_MSCA); 322 329 323 - if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) { 324 - hw_dbg(hw, "PHY write command did not " 325 - "complete.\n"); 330 + if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) 326 331 break; 327 - } 328 332 } 329 333 330 - if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) 334 + if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { 335 + hw_dbg(hw, "PHY address cmd didn't complete\n"); 331 336 status = IXGBE_ERR_PHY; 337 + } 332 338 } 333 339 334 340 ixgbe_release_swfw_sync(hw, gssr); ··· 337 345 } 338 346 339 347 /** 340 - * ixgbe_setup_tnx_phy_link - Set and restart autoneg 348 + * ixgbe_setup_phy_link_generic - Set and restart autoneg 341 349 * @hw: pointer to hardware structure 342 350 * 343 351 * Restart autonegotiation and PHY and waits for completion. 344 352 **/ 345 - s32 ixgbe_setup_tnx_phy_link(struct ixgbe_hw *hw) 353 + s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw) 346 354 { 347 355 s32 status = IXGBE_NOT_IMPLEMENTED; 348 356 u32 time_out; 349 357 u32 max_time_out = 10; 350 - u16 autoneg_speed_selection_register = 0x10; 351 - u16 autoneg_restart_mask = 0x0200; 352 - u16 autoneg_complete_mask = 0x0020; 353 - u16 autoneg_reg = 0; 358 + u16 autoneg_reg = IXGBE_MII_AUTONEG_REG; 354 359 355 360 /* 356 361 * Set advertisement settings in PHY based on autoneg_advertised 357 362 * settings. If autoneg_advertised = 0, then advertise default values 358 - * txn devices cannot be "forced" to a autoneg 10G and fail. But can 363 + * tnx devices cannot be "forced" to a autoneg 10G and fail. But can 359 364 * for a 1G. 360 365 */ 361 - ixgbe_read_phy_reg(hw, 362 - autoneg_speed_selection_register, 363 - IXGBE_MDIO_AUTO_NEG_DEV_TYPE, 364 - &autoneg_reg); 366 + hw->phy.ops.read_reg(hw, IXGBE_MII_SPEED_SELECTION_REG, 367 + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg); 365 368 366 369 if (hw->phy.autoneg_advertised == IXGBE_LINK_SPEED_1GB_FULL) 367 370 autoneg_reg &= 0xEFFF; /* 0 in bit 12 is 1G operation */ 368 371 else 369 372 autoneg_reg |= 0x1000; /* 1 in bit 12 is 10G/1G operation */ 370 373 371 - ixgbe_write_phy_reg(hw, 372 - autoneg_speed_selection_register, 373 - IXGBE_MDIO_AUTO_NEG_DEV_TYPE, 374 - autoneg_reg); 375 - 374 + hw->phy.ops.write_reg(hw, IXGBE_MII_SPEED_SELECTION_REG, 375 + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg); 376 376 377 377 /* Restart PHY autonegotiation and wait for completion */ 378 - ixgbe_read_phy_reg(hw, 379 - IXGBE_MDIO_AUTO_NEG_CONTROL, 380 - IXGBE_MDIO_AUTO_NEG_DEV_TYPE, 381 - &autoneg_reg); 378 + hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL, 379 + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg); 382 380 383 - autoneg_reg |= autoneg_restart_mask; 381 + autoneg_reg |= IXGBE_MII_RESTART; 384 382 385 - ixgbe_write_phy_reg(hw, 386 - IXGBE_MDIO_AUTO_NEG_CONTROL, 387 - IXGBE_MDIO_AUTO_NEG_DEV_TYPE, 388 - autoneg_reg); 383 + hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL, 384 + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg); 389 385 390 386 /* Wait for autonegotiation to finish */ 391 387 for (time_out = 0; time_out < max_time_out; time_out++) { 392 388 udelay(10); 393 389 /* Restart PHY autonegotiation and wait for completion */ 394 - status = ixgbe_read_phy_reg(hw, 395 - IXGBE_MDIO_AUTO_NEG_STATUS, 396 - IXGBE_MDIO_AUTO_NEG_DEV_TYPE, 397 - &autoneg_reg); 390 + status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, 391 + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, 392 + &autoneg_reg); 398 393 399 - autoneg_reg &= autoneg_complete_mask; 400 - if (autoneg_reg == autoneg_complete_mask) { 394 + autoneg_reg &= IXGBE_MII_AUTONEG_COMPLETE; 395 + if (autoneg_reg == IXGBE_MII_AUTONEG_COMPLETE) { 401 396 status = 0; 402 397 break; 403 398 } ··· 397 418 } 398 419 399 420 /** 400 - * ixgbe_check_tnx_phy_link - Determine link and speed status 401 - * @hw: pointer to hardware structure 402 - * 403 - * Reads the VS1 register to determine if link is up and the current speed for 404 - * the PHY. 405 - **/ 406 - s32 ixgbe_check_tnx_phy_link(struct ixgbe_hw *hw, u32 *speed, 407 - bool *link_up) 408 - { 409 - s32 status = 0; 410 - u32 time_out; 411 - u32 max_time_out = 10; 412 - u16 phy_link = 0; 413 - u16 phy_speed = 0; 414 - u16 phy_data = 0; 415 - 416 - /* Initialize speed and link to default case */ 417 - *link_up = false; 418 - *speed = IXGBE_LINK_SPEED_10GB_FULL; 419 - 420 - /* 421 - * Check current speed and link status of the PHY register. 422 - * This is a vendor specific register and may have to 423 - * be changed for other copper PHYs. 424 - */ 425 - for (time_out = 0; time_out < max_time_out; time_out++) { 426 - udelay(10); 427 - if (phy_link == IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS) { 428 - *link_up = true; 429 - if (phy_speed == 430 - IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS) 431 - *speed = IXGBE_LINK_SPEED_1GB_FULL; 432 - break; 433 - } else { 434 - status = ixgbe_read_phy_reg(hw, 435 - IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS, 436 - IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, 437 - &phy_data); 438 - phy_link = phy_data & 439 - IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS; 440 - phy_speed = phy_data & 441 - IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS; 442 - } 443 - } 444 - 445 - return status; 446 - } 447 - 448 - /** 449 - * ixgbe_setup_tnx_phy_link_speed - Sets the auto advertised capabilities 421 + * ixgbe_setup_phy_link_speed_generic - Sets the auto advertised capabilities 450 422 * @hw: pointer to hardware structure 451 423 * @speed: new link speed 452 424 * @autoneg: true if autonegotiation enabled 453 425 **/ 454 - s32 ixgbe_setup_tnx_phy_link_speed(struct ixgbe_hw *hw, u32 speed, 455 - bool autoneg, 456 - bool autoneg_wait_to_complete) 426 + s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, 427 + ixgbe_link_speed speed, 428 + bool autoneg, 429 + bool autoneg_wait_to_complete) 457 430 { 431 + 458 432 /* 459 433 * Clear autoneg_advertised and set new values based on input link 460 434 * speed. ··· 416 484 417 485 if (speed & IXGBE_LINK_SPEED_10GB_FULL) 418 486 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; 487 + 419 488 if (speed & IXGBE_LINK_SPEED_1GB_FULL) 420 489 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; 421 490 422 491 /* Setup link based on the new speed settings */ 423 - ixgbe_setup_tnx_phy_link(hw); 492 + hw->phy.ops.setup_link(hw); 424 493 425 494 return 0; 426 495 } 496 +
+46 -15
drivers/net/ixgbe/ixgbe_phy.h
··· 1 1 /******************************************************************************* 2 2 3 3 Intel 10 Gigabit PCI Express Linux driver 4 - Copyright(c) 1999 - 2007 Intel Corporation. 4 + Copyright(c) 1999 - 2008 Intel Corporation. 5 5 6 6 This program is free software; you can redistribute it and/or modify it 7 7 under the terms and conditions of the GNU General Public License, ··· 20 20 the file called "COPYING". 21 21 22 22 Contact Information: 23 - Linux NICS <linux.nics@intel.com> 24 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 25 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 26 25 ··· 29 30 #define _IXGBE_PHY_H_ 30 31 31 32 #include "ixgbe_type.h" 33 + #define IXGBE_I2C_EEPROM_DEV_ADDR 0xA0 32 34 33 - s32 ixgbe_setup_phy_link(struct ixgbe_hw *hw); 34 - s32 ixgbe_check_phy_link(struct ixgbe_hw *hw, u32 *speed, bool *link_up); 35 - s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw, u32 speed, bool autoneg, 36 - bool autoneg_wait_to_complete); 37 - s32 ixgbe_identify_phy(struct ixgbe_hw *hw); 38 - s32 ixgbe_reset_phy(struct ixgbe_hw *hw); 39 - s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, 40 - u32 device_type, u16 *phy_data); 35 + /* EEPROM byte offsets */ 36 + #define IXGBE_SFF_IDENTIFIER 0x0 37 + #define IXGBE_SFF_IDENTIFIER_SFP 0x3 38 + #define IXGBE_SFF_VENDOR_OUI_BYTE0 0x25 39 + #define IXGBE_SFF_VENDOR_OUI_BYTE1 0x26 40 + #define IXGBE_SFF_VENDOR_OUI_BYTE2 0x27 41 + #define IXGBE_SFF_1GBE_COMP_CODES 0x6 42 + #define IXGBE_SFF_10GBE_COMP_CODES 0x3 43 + #define IXGBE_SFF_TRANSMISSION_MEDIA 0x9 41 44 42 - /* PHY specific */ 43 - s32 ixgbe_setup_tnx_phy_link(struct ixgbe_hw *hw); 44 - s32 ixgbe_check_tnx_phy_link(struct ixgbe_hw *hw, u32 *speed, bool *link_up); 45 - s32 ixgbe_setup_tnx_phy_link_speed(struct ixgbe_hw *hw, u32 speed, bool autoneg, 46 - bool autoneg_wait_to_complete); 45 + /* Bitmasks */ 46 + #define IXGBE_SFF_TWIN_AX_CAPABLE 0x80 47 + #define IXGBE_SFF_1GBASESX_CAPABLE 0x1 48 + #define IXGBE_SFF_10GBASESR_CAPABLE 0x10 49 + #define IXGBE_SFF_10GBASELR_CAPABLE 0x20 50 + #define IXGBE_I2C_EEPROM_READ_MASK 0x100 51 + #define IXGBE_I2C_EEPROM_STATUS_MASK 0x3 52 + #define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0 53 + #define IXGBE_I2C_EEPROM_STATUS_PASS 0x1 54 + #define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2 55 + #define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3 56 + 57 + /* Bit-shift macros */ 58 + #define IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT 12 59 + #define IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT 8 60 + #define IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT 4 61 + 62 + /* Vendor OUIs: format of OUI is 0x[byte0][byte1][byte2][00] */ 63 + #define IXGBE_SFF_VENDOR_OUI_TYCO 0x00407600 64 + #define IXGBE_SFF_VENDOR_OUI_FTL 0x00906500 65 + #define IXGBE_SFF_VENDOR_OUI_AVAGO 0x00176A00 66 + 67 + 68 + s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw); 69 + s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw); 70 + s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw); 71 + s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, 72 + u32 device_type, u16 *phy_data); 73 + s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, 74 + u32 device_type, u16 phy_data); 75 + s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw); 76 + s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, 77 + ixgbe_link_speed speed, 78 + bool autoneg, 79 + bool autoneg_wait_to_complete); 47 80 48 81 #endif /* _IXGBE_PHY_H_ */
+338 -186
drivers/net/ixgbe/ixgbe_type.h
··· 1 1 /******************************************************************************* 2 2 3 3 Intel 10 Gigabit PCI Express Linux driver 4 - Copyright(c) 1999 - 2007 Intel Corporation. 4 + Copyright(c) 1999 - 2008 Intel Corporation. 5 5 6 6 This program is free software; you can redistribute it and/or modify it 7 7 under the terms and conditions of the GNU General Public License, ··· 20 20 the file called "COPYING". 21 21 22 22 Contact Information: 23 - Linux NICS <linux.nics@intel.com> 24 23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> 25 24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 26 25 ··· 36 37 /* Device IDs */ 37 38 #define IXGBE_DEV_ID_82598AF_DUAL_PORT 0x10C6 38 39 #define IXGBE_DEV_ID_82598AF_SINGLE_PORT 0x10C7 39 - #define IXGBE_DEV_ID_82598AT_DUAL_PORT 0x10C8 40 40 #define IXGBE_DEV_ID_82598EB_CX4 0x10DD 41 41 #define IXGBE_DEV_ID_82598_CX4_DUAL_PORT 0x10EC 42 + #define IXGBE_DEV_ID_82598EB_XF_LR 0x10F4 42 43 43 44 /* General Registers */ 44 45 #define IXGBE_CTRL 0x00000 ··· 69 70 #define IXGBE_EIMC 0x00888 70 71 #define IXGBE_EIAC 0x00810 71 72 #define IXGBE_EIAM 0x00890 72 - #define IXGBE_EITR(_i) (0x00820 + ((_i) * 4)) /* 0x820-0x86c */ 73 - #define IXGBE_IVAR(_i) (0x00900 + ((_i) * 4)) /* 24 at 0x900-0x960 */ 73 + #define IXGBE_EITR(_i) (((_i) <= 23) ? (0x00820 + ((_i) * 4)) : (0x012300 + ((_i) * 4))) 74 + #define IXGBE_IVAR(_i) (0x00900 + ((_i) * 4)) /* 24 at 0x900-0x960 */ 74 75 #define IXGBE_MSIXT 0x00000 /* MSI-X Table. 0x0000 - 0x01C */ 75 76 #define IXGBE_MSIXPBA 0x02000 /* MSI-X Pending bit array */ 76 - #define IXGBE_PBACL 0x11068 77 + #define IXGBE_PBACL(_i) (((_i) == 0) ? (0x11068) : (0x110C0 + ((_i) * 4))) 77 78 #define IXGBE_GPIE 0x00898 78 79 79 80 /* Flow Control Registers */ ··· 85 86 #define IXGBE_TFCS 0x0CE00 86 87 87 88 /* Receive DMA Registers */ 88 - #define IXGBE_RDBAL(_i) (0x01000 + ((_i) * 0x40)) /* 64 of each (0-63)*/ 89 - #define IXGBE_RDBAH(_i) (0x01004 + ((_i) * 0x40)) 90 - #define IXGBE_RDLEN(_i) (0x01008 + ((_i) * 0x40)) 91 - #define IXGBE_RDH(_i) (0x01010 + ((_i) * 0x40)) 92 - #define IXGBE_RDT(_i) (0x01018 + ((_i) * 0x40)) 93 - #define IXGBE_RXDCTL(_i) (0x01028 + ((_i) * 0x40)) 94 - #define IXGBE_RSCCTL(_i) (0x0102C + ((_i) * 0x40)) 95 - #define IXGBE_SRRCTL(_i) (0x02100 + ((_i) * 4)) 96 - /* array of 16 (0x02100-0x0213C) */ 97 - #define IXGBE_DCA_RXCTRL(_i) (0x02200 + ((_i) * 4)) 98 - /* array of 16 (0x02200-0x0223C) */ 99 - #define IXGBE_RDRXCTL 0x02F00 89 + #define IXGBE_RDBAL(_i) (((_i) < 64) ? (0x01000 + ((_i) * 0x40)) : (0x0D000 + ((_i - 64) * 0x40))) 90 + #define IXGBE_RDBAH(_i) (((_i) < 64) ? (0x01004 + ((_i) * 0x40)) : (0x0D004 + ((_i - 64) * 0x40))) 91 + #define IXGBE_RDLEN(_i) (((_i) < 64) ? (0x01008 + ((_i) * 0x40)) : (0x0D008 + ((_i - 64) * 0x40))) 92 + #define IXGBE_RDH(_i) (((_i) < 64) ? (0x01010 + ((_i) * 0x40)) : (0x0D010 + ((_i - 64) * 0x40))) 93 + #define IXGBE_RDT(_i) (((_i) < 64) ? (0x01018 + ((_i) * 0x40)) : (0x0D018 + ((_i - 64) * 0x40))) 94 + #define IXGBE_RXDCTL(_i) (((_i) < 64) ? (0x01028 + ((_i) * 0x40)) : (0x0D028 + ((_i - 64) * 0x40))) 95 + /* 96 + * Split and Replication Receive Control Registers 97 + * 00-15 : 0x02100 + n*4 98 + * 16-64 : 0x01014 + n*0x40 99 + * 64-127: 0x0D014 + (n-64)*0x40 100 + */ 101 + #define IXGBE_SRRCTL(_i) (((_i) <= 15) ? (0x02100 + ((_i) * 4)) : \ 102 + (((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \ 103 + (0x0D014 + ((_i - 64) * 0x40)))) 104 + /* 105 + * Rx DCA Control Register: 106 + * 00-15 : 0x02200 + n*4 107 + * 16-64 : 0x0100C + n*0x40 108 + * 64-127: 0x0D00C + (n-64)*0x40 109 + */ 110 + #define IXGBE_DCA_RXCTRL(_i) (((_i) <= 15) ? (0x02200 + ((_i) * 4)) : \ 111 + (((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \ 112 + (0x0D00C + ((_i - 64) * 0x40)))) 113 + #define IXGBE_RDRXCTL 0x02F00 100 114 #define IXGBE_RXPBSIZE(_i) (0x03C00 + ((_i) * 4)) 101 - /* 8 of these 0x03C00 - 0x03C1C */ 115 + /* 8 of these 0x03C00 - 0x03C1C */ 102 116 #define IXGBE_RXCTRL 0x03000 103 117 #define IXGBE_DROPEN 0x03D04 104 118 #define IXGBE_RXPBSIZE_SHIFT 10 ··· 119 107 /* Receive Registers */ 120 108 #define IXGBE_RXCSUM 0x05000 121 109 #define IXGBE_RFCTL 0x05008 110 + #define IXGBE_DRECCCTL 0x02F08 111 + #define IXGBE_DRECCCTL_DISABLE 0 112 + /* Multicast Table Array - 128 entries */ 122 113 #define IXGBE_MTA(_i) (0x05200 + ((_i) * 4)) 123 - /* Multicast Table Array - 128 entries */ 124 - #define IXGBE_RAL(_i) (0x05400 + ((_i) * 8)) /* 16 of these (0-15) */ 125 - #define IXGBE_RAH(_i) (0x05404 + ((_i) * 8)) /* 16 of these (0-15) */ 126 - #define IXGBE_PSRTYPE 0x05480 127 - /* 0x5480-0x54BC Packet split receive type */ 114 + #define IXGBE_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : (0x0A200 + ((_i) * 8))) 115 + #define IXGBE_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : (0x0A204 + ((_i) * 8))) 116 + /* Packet split receive type */ 117 + #define IXGBE_PSRTYPE(_i) (((_i) <= 15) ? (0x05480 + ((_i) * 4)) : (0x0EA00 + ((_i) * 4))) 118 + /* array of 4096 1-bit vlan filters */ 128 119 #define IXGBE_VFTA(_i) (0x0A000 + ((_i) * 4)) 129 - /* array of 4096 1-bit vlan filters */ 120 + /*array of 4096 4-bit vlan vmdq indices */ 130 121 #define IXGBE_VFTAVIND(_j, _i) (0x0A200 + ((_j) * 0x200) + ((_i) * 4)) 131 - /*array of 4096 4-bit vlan vmdq indicies */ 132 122 #define IXGBE_FCTRL 0x05080 133 123 #define IXGBE_VLNCTRL 0x05088 134 124 #define IXGBE_MCSTCTRL 0x05090 135 125 #define IXGBE_MRQC 0x05818 136 - #define IXGBE_VMD_CTL 0x0581C 137 126 #define IXGBE_IMIR(_i) (0x05A80 + ((_i) * 4)) /* 8 of these (0-7) */ 138 127 #define IXGBE_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* 8 of these (0-7) */ 139 128 #define IXGBE_IMIRVP 0x05AC0 129 + #define IXGBE_VMD_CTL 0x0581C 140 130 #define IXGBE_RETA(_i) (0x05C00 + ((_i) * 4)) /* 32 of these (0-31) */ 141 131 #define IXGBE_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* 10 of these (0-9) */ 142 132 133 + 143 134 /* Transmit DMA registers */ 144 - #define IXGBE_TDBAL(_i) (0x06000 + ((_i) * 0x40))/* 32 of these (0-31)*/ 135 + #define IXGBE_TDBAL(_i) (0x06000 + ((_i) * 0x40)) /* 32 of these (0-31)*/ 145 136 #define IXGBE_TDBAH(_i) (0x06004 + ((_i) * 0x40)) 146 137 #define IXGBE_TDLEN(_i) (0x06008 + ((_i) * 0x40)) 147 138 #define IXGBE_TDH(_i) (0x06010 + ((_i) * 0x40)) ··· 153 138 #define IXGBE_TDWBAL(_i) (0x06038 + ((_i) * 0x40)) 154 139 #define IXGBE_TDWBAH(_i) (0x0603C + ((_i) * 0x40)) 155 140 #define IXGBE_DTXCTL 0x07E00 156 - #define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4)) 157 - /* there are 16 of these (0-15) */ 141 + 142 + #define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4)) /* 16 of these (0-15) */ 158 143 #define IXGBE_TIPG 0x0CB00 159 - #define IXGBE_TXPBSIZE(_i) (0x0CC00 + ((_i) *0x04)) 160 - /* there are 8 of these */ 144 + #define IXGBE_TXPBSIZE(_i) (0x0CC00 + ((_i) * 4)) /* 8 of these */ 161 145 #define IXGBE_MNGTXMAP 0x0CD10 162 146 #define IXGBE_TIPG_FIBER_DEFAULT 3 163 147 #define IXGBE_TXPBSIZE_SHIFT 10 ··· 168 154 #define IXGBE_IPAV 0x05838 169 155 #define IXGBE_IP4AT 0x05840 /* IPv4 table 0x5840-0x5858 */ 170 156 #define IXGBE_IP6AT 0x05880 /* IPv6 table 0x5880-0x588F */ 157 + 171 158 #define IXGBE_WUPL 0x05900 172 159 #define IXGBE_WUPM 0x05A00 /* wake up pkt memory 0x5A00-0x5A7C */ 173 160 #define IXGBE_FHFT 0x09000 /* Flex host filter table 9000-93FC */ ··· 184 169 #define IXGBE_TDTQ2TCSR(_i) (0x0622C + ((_i) * 0x40)) /* 8 of these (0-7) */ 185 170 #define IXGBE_TDPT2TCCR(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */ 186 171 #define IXGBE_TDPT2TCSR(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */ 172 + 173 + 187 174 188 175 /* Stats registers */ 189 176 #define IXGBE_CRCERRS 0x04000 ··· 241 224 #define IXGBE_XEC 0x04120 242 225 243 226 #define IXGBE_RQSMR(_i) (0x02300 + ((_i) * 4)) /* 16 of these */ 244 - #define IXGBE_TQSMR(_i) (0x07300 + ((_i) * 4)) /* 8 of these */ 227 + #define IXGBE_TQSMR(_i) (((_i) <= 7) ? (0x07300 + ((_i) * 4)) : (0x08600 + ((_i) * 4))) 245 228 246 229 #define IXGBE_QPRC(_i) (0x01030 + ((_i) * 0x40)) /* 16 of these */ 247 230 #define IXGBE_QPTC(_i) (0x06030 + ((_i) * 0x40)) /* 16 of these */ ··· 292 275 #define IXGBE_DCA_CTRL 0x11074 293 276 294 277 /* Diagnostic Registers */ 295 - #define IXGBE_RDSTATCTL 0x02C20 296 - #define IXGBE_RDSTAT(_i) (0x02C00 + ((_i) * 4)) /* 0x02C00-0x02C1C */ 297 - #define IXGBE_RDHMPN 0x02F08 298 - #define IXGBE_RIC_DW0 0x02F10 299 - #define IXGBE_RIC_DW1 0x02F14 300 - #define IXGBE_RIC_DW2 0x02F18 301 - #define IXGBE_RIC_DW3 0x02F1C 302 - #define IXGBE_RDPROBE 0x02F20 303 - #define IXGBE_TDSTATCTL 0x07C20 304 - #define IXGBE_TDSTAT(_i) (0x07C00 + ((_i) * 4)) /* 0x07C00 - 0x07C1C */ 305 - #define IXGBE_TDHMPN 0x07F08 306 - #define IXGBE_TIC_DW0 0x07F10 307 - #define IXGBE_TIC_DW1 0x07F14 308 - #define IXGBE_TIC_DW2 0x07F18 309 - #define IXGBE_TIC_DW3 0x07F1C 310 - #define IXGBE_TDPROBE 0x07F20 311 - #define IXGBE_TXBUFCTRL 0x0C600 278 + #define IXGBE_RDSTATCTL 0x02C20 279 + #define IXGBE_RDSTAT(_i) (0x02C00 + ((_i) * 4)) /* 0x02C00-0x02C1C */ 280 + #define IXGBE_RDHMPN 0x02F08 281 + #define IXGBE_RIC_DW(_i) (0x02F10 + ((_i) * 4)) 282 + #define IXGBE_RDPROBE 0x02F20 283 + #define IXGBE_TDSTATCTL 0x07C20 284 + #define IXGBE_TDSTAT(_i) (0x07C00 + ((_i) * 4)) /* 0x07C00 - 0x07C1C */ 285 + #define IXGBE_TDHMPN 0x07F08 286 + #define IXGBE_TIC_DW(_i) (0x07F10 + ((_i) * 4)) 287 + #define IXGBE_TDPROBE 0x07F20 288 + #define IXGBE_TXBUFCTRL 0x0C600 312 289 #define IXGBE_TXBUFDATA0 0x0C610 313 290 #define IXGBE_TXBUFDATA1 0x0C614 314 291 #define IXGBE_TXBUFDATA2 0x0C618 ··· 403 392 404 393 #define IXGBE_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ 405 394 #define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ 406 - #define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* TX Desc writeback RO bit */ 395 + #define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ 407 396 #define IXGBE_DCA_MAX_QUEUES_82598 16 /* DCA regs only on 16 queues */ 408 397 409 398 /* MSCA Bit Masks */ ··· 427 416 #define IXGBE_MSCA_MDI_IN_PROG_EN 0x80000000 /* MDI in progress enable */ 428 417 429 418 /* MSRWD bit masks */ 430 - #define IXGBE_MSRWD_WRITE_DATA_MASK 0x0000FFFF 431 - #define IXGBE_MSRWD_WRITE_DATA_SHIFT 0 432 - #define IXGBE_MSRWD_READ_DATA_MASK 0xFFFF0000 433 - #define IXGBE_MSRWD_READ_DATA_SHIFT 16 419 + #define IXGBE_MSRWD_WRITE_DATA_MASK 0x0000FFFF 420 + #define IXGBE_MSRWD_WRITE_DATA_SHIFT 0 421 + #define IXGBE_MSRWD_READ_DATA_MASK 0xFFFF0000 422 + #define IXGBE_MSRWD_READ_DATA_SHIFT 16 434 423 435 424 /* Atlas registers */ 436 425 #define IXGBE_ATLAS_PDN_LPBK 0x24 ··· 445 434 #define IXGBE_ATLAS_PDN_TX_1G_QL_ALL 0xF0 446 435 #define IXGBE_ATLAS_PDN_TX_AN_QL_ALL 0xF0 447 436 437 + 448 438 /* Device Type definitions for new protocol MDIO commands */ 449 439 #define IXGBE_MDIO_PMA_PMD_DEV_TYPE 0x1 450 440 #define IXGBE_MDIO_PCS_DEV_TYPE 0x3 451 441 #define IXGBE_MDIO_PHY_XS_DEV_TYPE 0x4 452 442 #define IXGBE_MDIO_AUTO_NEG_DEV_TYPE 0x7 453 443 #define IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE 0x1E /* Device 30 */ 444 + 445 + #define IXGBE_MDIO_COMMAND_TIMEOUT 100 /* PHY Timeout for 1 GB mode */ 454 446 455 447 #define IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL 0x0 /* VS1 Control Reg */ 456 448 #define IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS 0x1 /* VS1 Status Reg */ ··· 468 454 #define IXGBE_MDIO_PHY_XS_RESET 0x8000 /* PHY_XS Reset */ 469 455 #define IXGBE_MDIO_PHY_ID_HIGH 0x2 /* PHY ID High Reg*/ 470 456 #define IXGBE_MDIO_PHY_ID_LOW 0x3 /* PHY ID Low Reg*/ 471 - #define IXGBE_MDIO_PHY_SPEED_ABILITY 0x4 /* Speed Abilty Reg */ 457 + #define IXGBE_MDIO_PHY_SPEED_ABILITY 0x4 /* Speed Ability Reg */ 472 458 #define IXGBE_MDIO_PHY_SPEED_10G 0x0001 /* 10G capable */ 473 459 #define IXGBE_MDIO_PHY_SPEED_1G 0x0010 /* 1G capable */ 460 + 461 + #define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR 0xC30A /* PHY_XS SDA/SCL Address Reg */ 462 + #define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */ 463 + #define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT 0xC30C /* PHY_XS SDA/SCL Status Reg */ 464 + 465 + /* MII clause 22/28 definitions */ 466 + #define IXGBE_MDIO_PHY_LOW_POWER_MODE 0x0800 467 + 468 + #define IXGBE_MII_SPEED_SELECTION_REG 0x10 469 + #define IXGBE_MII_RESTART 0x200 470 + #define IXGBE_MII_AUTONEG_COMPLETE 0x20 471 + #define IXGBE_MII_AUTONEG_REG 0x0 474 472 475 473 #define IXGBE_PHY_REVISION_MASK 0xFFFFFFF0 476 474 #define IXGBE_MAX_PHY_ADDR 32 477 475 478 476 /* PHY IDs*/ 479 - #define TN1010_PHY_ID 0x00A19410 480 477 #define QT2022_PHY_ID 0x0043A400 481 478 479 + /* PHY Types */ 480 + #define IXGBE_M88E1145_E_PHY_ID 0x01410CD0 481 + 482 482 /* General purpose Interrupt Enable */ 483 - #define IXGBE_GPIE_MSIX_MODE 0x00000010 /* MSI-X mode */ 484 - #define IXGBE_GPIE_OCD 0x00000020 /* Other Clear Disable */ 485 - #define IXGBE_GPIE_EIMEN 0x00000040 /* Immediate Interrupt Enable */ 486 - #define IXGBE_GPIE_EIAME 0x40000000 487 - #define IXGBE_GPIE_PBA_SUPPORT 0x80000000 483 + #define IXGBE_SDP0_GPIEN 0x00000001 /* SDP0 */ 484 + #define IXGBE_SDP1_GPIEN 0x00000002 /* SDP1 */ 485 + #define IXGBE_GPIE_MSIX_MODE 0x00000010 /* MSI-X mode */ 486 + #define IXGBE_GPIE_OCD 0x00000020 /* Other Clear Disable */ 487 + #define IXGBE_GPIE_EIMEN 0x00000040 /* Immediate Interrupt Enable */ 488 + #define IXGBE_GPIE_EIAME 0x40000000 489 + #define IXGBE_GPIE_PBA_SUPPORT 0x80000000 488 490 489 491 /* Transmit Flow Control status */ 490 492 #define IXGBE_TFCS_TXOFF 0x00000001 ··· 561 531 #define IXGBE_PAP_TXPAUSECNT_MASK 0x0000FFFF /* Pause counter mask */ 562 532 563 533 /* RMCS Bit Masks */ 564 - #define IXGBE_RMCS_RRM 0x00000002 /* Receive Recylce Mode enable */ 534 + #define IXGBE_RMCS_RRM 0x00000002 /* Receive Recycle Mode enable */ 565 535 /* Receive Arbitration Control: 0 Round Robin, 1 DFP */ 566 536 #define IXGBE_RMCS_RAC 0x00000004 567 537 #define IXGBE_RMCS_DFP IXGBE_RMCS_RAC /* Deficit Fixed Priority ena */ ··· 569 539 #define IXGBE_RMCS_TFCE_PRIORITY 0x00000010 /* Tx Priority flow control ena */ 570 540 #define IXGBE_RMCS_ARBDIS 0x00000040 /* Arbitration disable bit */ 571 541 542 + 572 543 /* Interrupt register bitmasks */ 573 544 574 545 /* Extended Interrupt Cause Read */ 575 546 #define IXGBE_EICR_RTX_QUEUE 0x0000FFFF /* RTx Queue Interrupt */ 576 547 #define IXGBE_EICR_LSC 0x00100000 /* Link Status Change */ 577 - #define IXGBE_EICR_MNG 0x00400000 /* Managability Event Interrupt */ 548 + #define IXGBE_EICR_MNG 0x00400000 /* Manageability Event Interrupt */ 549 + #define IXGBE_EICR_GPI_SDP0 0x01000000 /* Gen Purpose Interrupt on SDP0 */ 550 + #define IXGBE_EICR_GPI_SDP1 0x02000000 /* Gen Purpose Interrupt on SDP1 */ 578 551 #define IXGBE_EICR_PBUR 0x10000000 /* Packet Buffer Handler Error */ 579 552 #define IXGBE_EICR_DHER 0x20000000 /* Descriptor Handler Error */ 580 553 #define IXGBE_EICR_TCP_TIMER 0x40000000 /* TCP Timer */ ··· 585 552 586 553 /* Extended Interrupt Cause Set */ 587 554 #define IXGBE_EICS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ 588 - #define IXGBE_EICS_LSC IXGBE_EICR_LSC /* Link Status Change */ 589 - #define IXGBE_EICR_GPI_SDP0 0x01000000 /* Gen Purpose Interrupt on SDP0 */ 590 - #define IXGBE_EICS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ 591 - #define IXGBE_EICS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Error */ 592 - #define IXGBE_EICS_DHER IXGBE_EICR_DHER /* Desc Handler Error */ 555 + #define IXGBE_EICS_LSC IXGBE_EICR_LSC /* Link Status Change */ 556 + #define IXGBE_EICS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ 557 + #define IXGBE_EICS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */ 558 + #define IXGBE_EICS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */ 559 + #define IXGBE_EICS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */ 560 + #define IXGBE_EICS_DHER IXGBE_EICR_DHER /* Desc Handler Error */ 593 561 #define IXGBE_EICS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ 594 562 #define IXGBE_EICS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ 595 563 ··· 598 564 #define IXGBE_EIMS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ 599 565 #define IXGBE_EIMS_LSC IXGBE_EICR_LSC /* Link Status Change */ 600 566 #define IXGBE_EIMS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ 601 - #define IXGBE_EIMS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Error */ 567 + #define IXGBE_EIMS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */ 568 + #define IXGBE_EIMS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */ 569 + #define IXGBE_EIMS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */ 602 570 #define IXGBE_EIMS_DHER IXGBE_EICR_DHER /* Descr Handler Error */ 603 571 #define IXGBE_EIMS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ 604 572 #define IXGBE_EIMS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ ··· 609 573 #define IXGBE_EIMC_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ 610 574 #define IXGBE_EIMC_LSC IXGBE_EICR_LSC /* Link Status Change */ 611 575 #define IXGBE_EIMC_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ 612 - #define IXGBE_EIMC_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Error */ 613 - #define IXGBE_EIMC_DHER IXGBE_EICR_DHER /* Desc Handler Error */ 576 + #define IXGBE_EIMC_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */ 577 + #define IXGBE_EIMC_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */ 578 + #define IXGBE_EIMC_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */ 579 + #define IXGBE_EIMC_DHER IXGBE_EICR_DHER /* Desc Handler Err */ 614 580 #define IXGBE_EIMC_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ 615 581 #define IXGBE_EIMC_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ 616 582 617 - #define IXGBE_EIMS_ENABLE_MASK (\ 618 - IXGBE_EIMS_RTX_QUEUE | \ 619 - IXGBE_EIMS_LSC | \ 620 - IXGBE_EIMS_TCP_TIMER | \ 621 - IXGBE_EIMS_OTHER) 583 + #define IXGBE_EIMS_ENABLE_MASK ( \ 584 + IXGBE_EIMS_RTX_QUEUE | \ 585 + IXGBE_EIMS_LSC | \ 586 + IXGBE_EIMS_TCP_TIMER | \ 587 + IXGBE_EIMS_OTHER) 622 588 623 - /* Immediate Interrupt RX (A.K.A. Low Latency Interrupt) */ 589 + /* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */ 624 590 #define IXGBE_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */ 625 591 #define IXGBE_IMIR_PORT_BP 0x00020000 /* TCP port check bypass */ 626 592 #define IXGBE_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */ ··· 658 620 #define IXGBE_VLNCTRL_CFIEN 0x20000000 /* bit 29 */ 659 621 #define IXGBE_VLNCTRL_VFE 0x40000000 /* bit 30 */ 660 622 #define IXGBE_VLNCTRL_VME 0x80000000 /* bit 31 */ 623 + 661 624 662 625 #define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */ 663 626 ··· 707 668 #define IXGBE_AUTOC_AN_RESTART 0x00001000 708 669 #define IXGBE_AUTOC_FLU 0x00000001 709 670 #define IXGBE_AUTOC_LMS_SHIFT 13 710 - #define IXGBE_AUTOC_LMS_MASK (0x7 << IXGBE_AUTOC_LMS_SHIFT) 711 - #define IXGBE_AUTOC_LMS_1G_LINK_NO_AN (0x0 << IXGBE_AUTOC_LMS_SHIFT) 712 - #define IXGBE_AUTOC_LMS_10G_LINK_NO_AN (0x1 << IXGBE_AUTOC_LMS_SHIFT) 713 - #define IXGBE_AUTOC_LMS_1G_AN (0x2 << IXGBE_AUTOC_LMS_SHIFT) 714 - #define IXGBE_AUTOC_LMS_KX4_AN (0x4 << IXGBE_AUTOC_LMS_SHIFT) 715 - #define IXGBE_AUTOC_LMS_KX4_AN_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT) 716 - #define IXGBE_AUTOC_LMS_ATTACH_TYPE (0x7 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) 671 + #define IXGBE_AUTOC_LMS_MASK (0x7 << IXGBE_AUTOC_LMS_SHIFT) 672 + #define IXGBE_AUTOC_LMS_1G_LINK_NO_AN (0x0 << IXGBE_AUTOC_LMS_SHIFT) 673 + #define IXGBE_AUTOC_LMS_10G_LINK_NO_AN (0x1 << IXGBE_AUTOC_LMS_SHIFT) 674 + #define IXGBE_AUTOC_LMS_1G_AN (0x2 << IXGBE_AUTOC_LMS_SHIFT) 675 + #define IXGBE_AUTOC_LMS_KX4_AN (0x4 << IXGBE_AUTOC_LMS_SHIFT) 676 + #define IXGBE_AUTOC_LMS_KX4_AN_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT) 677 + #define IXGBE_AUTOC_LMS_ATTACH_TYPE (0x7 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) 717 678 718 - #define IXGBE_AUTOC_1G_PMA_PMD 0x00000200 719 - #define IXGBE_AUTOC_10G_PMA_PMD 0x00000180 679 + #define IXGBE_AUTOC_1G_PMA_PMD 0x00000200 680 + #define IXGBE_AUTOC_10G_PMA_PMD 0x00000180 720 681 #define IXGBE_AUTOC_10G_PMA_PMD_SHIFT 7 721 682 #define IXGBE_AUTOC_1G_PMA_PMD_SHIFT 9 722 683 #define IXGBE_AUTOC_10G_XAUI (0x0 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) ··· 742 703 #define IXGBE_LINKS_TL_FAULT 0x00001000 743 704 #define IXGBE_LINKS_SIGNAL 0x00000F00 744 705 706 + #define IXGBE_LINK_UP_TIME 90 /* 9.0 Seconds */ 745 707 #define IXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */ 746 708 747 709 /* SW Semaphore Register bitmasks */ ··· 797 757 #define IXGBE_PBANUM0_PTR 0x15 798 758 #define IXGBE_PBANUM1_PTR 0x16 799 759 760 + /* Legacy EEPROM word offsets */ 761 + #define IXGBE_ISCSI_BOOT_CAPS 0x0033 762 + #define IXGBE_ISCSI_SETUP_PORT_0 0x0030 763 + #define IXGBE_ISCSI_SETUP_PORT_1 0x0034 764 + 800 765 /* EEPROM Commands - SPI */ 801 766 #define IXGBE_EEPROM_MAX_RETRY_SPI 5000 /* Max wait 5ms for RDY signal */ 802 767 #define IXGBE_EEPROM_STATUS_RDY_SPI 0x01 ··· 809 764 #define IXGBE_EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */ 810 765 #define IXGBE_EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = addr bit-8 */ 811 766 #define IXGBE_EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Ena latch */ 812 - /* EEPROM reset Write Enbale latch */ 767 + /* EEPROM reset Write Enable latch */ 813 768 #define IXGBE_EEPROM_WRDI_OPCODE_SPI 0x04 814 769 #define IXGBE_EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status reg */ 815 770 #define IXGBE_EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status reg */ ··· 848 803 /* Number of 100 microseconds we wait for PCI Express master disable */ 849 804 #define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800 850 805 851 - /* PHY Types */ 852 - #define IXGBE_M88E1145_E_PHY_ID 0x01410CD0 853 - 854 806 /* Check whether address is multicast. This is little-endian specific check.*/ 855 807 #define IXGBE_IS_MULTICAST(Address) \ 856 - (bool)(((u8 *)(Address))[0] & ((u8)0x01)) 808 + (bool)(((u8 *)(Address))[0] & ((u8)0x01)) 857 809 858 810 /* Check whether an address is broadcast. */ 859 811 #define IXGBE_IS_BROADCAST(Address) \ 860 - ((((u8 *)(Address))[0] == ((u8)0xff)) && \ 861 - (((u8 *)(Address))[1] == ((u8)0xff))) 812 + ((((u8 *)(Address))[0] == ((u8)0xff)) && \ 813 + (((u8 *)(Address))[1] == ((u8)0xff))) 862 814 863 815 /* RAH */ 864 816 #define IXGBE_RAH_VIND_MASK 0x003C0000 865 817 #define IXGBE_RAH_VIND_SHIFT 18 866 818 #define IXGBE_RAH_AV 0x80000000 819 + #define IXGBE_CLEAR_VMDQ_ALL 0xFFFFFFFF 867 820 868 821 /* Header split receive */ 869 822 #define IXGBE_RFCTL_ISCSI_DIS 0x00000001 ··· 890 847 #define IXGBE_MAX_FRAME_SZ 0x40040000 891 848 892 849 #define IXGBE_TDWBAL_HEAD_WB_ENABLE 0x1 /* Tx head write-back enable */ 893 - #define IXGBE_TDWBAL_SEQNUM_WB_ENABLE 0x2 /* Tx seq. # write-back enable */ 850 + #define IXGBE_TDWBAL_SEQNUM_WB_ENABLE 0x2 /* Tx seq# write-back enable */ 894 851 895 852 /* Receive Config masks */ 896 853 #define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */ ··· 903 860 #define IXGBE_FCTRL_BAM 0x00000400 /* Broadcast Accept Mode */ 904 861 #define IXGBE_FCTRL_PMCF 0x00001000 /* Pass MAC Control Frames */ 905 862 #define IXGBE_FCTRL_DPF 0x00002000 /* Discard Pause Frame */ 906 - /* Receive Priority Flow Control Enbale */ 863 + /* Receive Priority Flow Control Enable */ 907 864 #define IXGBE_FCTRL_RPFCE 0x00004000 908 865 #define IXGBE_FCTRL_RFCE 0x00008000 /* Receive Flow Control Ena */ 909 866 ··· 933 890 /* Receive Descriptor bit definitions */ 934 891 #define IXGBE_RXD_STAT_DD 0x01 /* Descriptor Done */ 935 892 #define IXGBE_RXD_STAT_EOP 0x02 /* End of Packet */ 936 - #define IXGBE_RXD_STAT_IXSM 0x04 /* Ignore checksum */ 937 893 #define IXGBE_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ 938 - #define IXGBE_RXD_STAT_UDPCS 0x10 /* UDP xsum caculated */ 894 + #define IXGBE_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ 939 895 #define IXGBE_RXD_STAT_L4CS 0x20 /* L4 xsum calculated */ 940 896 #define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ 941 897 #define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */ ··· 950 908 #define IXGBE_RXD_ERR_USE 0x20 /* Undersize Error */ 951 909 #define IXGBE_RXD_ERR_TCPE 0x40 /* TCP/UDP Checksum Error */ 952 910 #define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */ 953 - #define IXGBE_RXDADV_HBO 0x00800000 911 + #define IXGBE_RXDADV_ERR_HBO 0x00800000 /*Header Buffer Overflow */ 954 912 #define IXGBE_RXDADV_ERR_CE 0x01000000 /* CRC Error */ 955 913 #define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */ 956 914 #define IXGBE_RXDADV_ERR_PE 0x08000000 /* Packet Error */ ··· 964 922 #define IXGBE_RXD_CFI_MASK 0x1000 /* CFI is bit 12 */ 965 923 #define IXGBE_RXD_CFI_SHIFT 12 966 924 925 + 967 926 /* SRRCTL bit definitions */ 968 - #define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */ 969 - #define IXGBE_SRRCTL_BSIZEPKT_MASK 0x0000007F 970 - #define IXGBE_SRRCTL_BSIZEHDR_MASK 0x00003F00 971 - #define IXGBE_SRRCTL_DESCTYPE_LEGACY 0x00000000 927 + #define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */ 928 + #define IXGBE_SRRCTL_BSIZEPKT_MASK 0x0000007F 929 + #define IXGBE_SRRCTL_BSIZEHDR_MASK 0x00003F00 930 + #define IXGBE_SRRCTL_DESCTYPE_LEGACY 0x00000000 972 931 #define IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 973 932 #define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000 974 933 #define IXGBE_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000 975 934 #define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 935 + #define IXGBE_SRRCTL_DESCTYPE_MASK 0x0E000000 976 936 977 937 #define IXGBE_RXDPS_HDRSTAT_HDRSP 0x00008000 978 938 #define IXGBE_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF ··· 1008 964 #define IXGBE_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */ 1009 965 #define IXGBE_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */ 1010 966 #define IXGBE_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */ 1011 - 1012 967 /* Masks to determine if packets should be dropped due to frame errors */ 1013 - #define IXGBE_RXD_ERR_FRAME_ERR_MASK (\ 1014 - IXGBE_RXD_ERR_CE | \ 1015 - IXGBE_RXD_ERR_LE | \ 1016 - IXGBE_RXD_ERR_PE | \ 1017 - IXGBE_RXD_ERR_OSE | \ 1018 - IXGBE_RXD_ERR_USE) 968 + #define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \ 969 + IXGBE_RXD_ERR_CE | \ 970 + IXGBE_RXD_ERR_LE | \ 971 + IXGBE_RXD_ERR_PE | \ 972 + IXGBE_RXD_ERR_OSE | \ 973 + IXGBE_RXD_ERR_USE) 1019 974 1020 - #define IXGBE_RXDADV_ERR_FRAME_ERR_MASK (\ 1021 - IXGBE_RXDADV_ERR_CE | \ 1022 - IXGBE_RXDADV_ERR_LE | \ 1023 - IXGBE_RXDADV_ERR_PE | \ 1024 - IXGBE_RXDADV_ERR_OSE | \ 1025 - IXGBE_RXDADV_ERR_USE) 975 + #define IXGBE_RXDADV_ERR_FRAME_ERR_MASK ( \ 976 + IXGBE_RXDADV_ERR_CE | \ 977 + IXGBE_RXDADV_ERR_LE | \ 978 + IXGBE_RXDADV_ERR_PE | \ 979 + IXGBE_RXDADV_ERR_OSE | \ 980 + IXGBE_RXDADV_ERR_USE) 1026 981 1027 982 /* Multicast bit mask */ 1028 983 #define IXGBE_MCSTCTRL_MFE 0x4 ··· 1037 994 #define IXGBE_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority in upper 3 of 16 */ 1038 995 #define IXGBE_TX_DESC_SPECIAL_PRI_SHIFT IXGBE_RX_DESC_SPECIAL_PRI_SHIFT 1039 996 997 + 1040 998 /* Transmit Descriptor - Legacy */ 1041 999 struct ixgbe_legacy_tx_desc { 1042 1000 u64 buffer_addr; /* Address of the descriptor's data buffer */ ··· 1052 1008 union { 1053 1009 __le32 data; 1054 1010 struct { 1055 - u8 status; /* Descriptor status */ 1056 - u8 css; /* Checksum start */ 1011 + u8 status; /* Descriptor status */ 1012 + u8 css; /* Checksum start */ 1057 1013 __le16 vlan; 1058 1014 } fields; 1059 1015 } upper; ··· 1062 1018 /* Transmit Descriptor - Advanced */ 1063 1019 union ixgbe_adv_tx_desc { 1064 1020 struct { 1065 - __le64 buffer_addr; /* Address of descriptor's data buf */ 1021 + __le64 buffer_addr; /* Address of descriptor's data buf */ 1066 1022 __le32 cmd_type_len; 1067 1023 __le32 olinfo_status; 1068 1024 } read; ··· 1094 1050 union { 1095 1051 __le32 data; 1096 1052 struct { 1097 - __le16 pkt_info; /* RSS type, Packet type */ 1098 - __le16 hdr_info; /* Split Header, header len */ 1053 + __le16 pkt_info; /* RSS, Pkt type */ 1054 + __le16 hdr_info; /* Splithdr, hdrlen */ 1099 1055 } hs_rss; 1100 1056 } lo_dword; 1101 1057 union { ··· 1123 1079 }; 1124 1080 1125 1081 /* Adv Transmit Descriptor Config Masks */ 1126 - #define IXGBE_ADVTXD_DTALEN_MASK 0x0000FFFF /* Data buffer length(bytes) */ 1082 + #define IXGBE_ADVTXD_DTALEN_MASK 0x0000FFFF /* Data buf length(bytes) */ 1127 1083 #define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */ 1128 1084 #define IXGBE_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Desc */ 1129 1085 #define IXGBE_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ 1130 1086 #define IXGBE_ADVTXD_DCMD_EOP IXGBE_TXD_CMD_EOP /* End of Packet */ 1131 1087 #define IXGBE_ADVTXD_DCMD_IFCS IXGBE_TXD_CMD_IFCS /* Insert FCS */ 1132 - #define IXGBE_ADVTXD_DCMD_RDMA 0x04000000 /* RDMA */ 1133 1088 #define IXGBE_ADVTXD_DCMD_RS IXGBE_TXD_CMD_RS /* Report Status */ 1134 - #define IXGBE_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000 /* DDP hdr type or iSCSI */ 1089 + #define IXGBE_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000 /* DDP hdr type or iSCSI */ 1135 1090 #define IXGBE_ADVTXD_DCMD_DEXT IXGBE_TXD_CMD_DEXT /* Desc ext (1=Adv) */ 1136 1091 #define IXGBE_ADVTXD_DCMD_VLE IXGBE_TXD_CMD_VLE /* VLAN pkt enable */ 1137 1092 #define IXGBE_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ 1138 1093 #define IXGBE_ADVTXD_STAT_DD IXGBE_TXD_STAT_DD /* Descriptor Done */ 1139 - #define IXGBE_ADVTXD_STAT_SN_CRC 0x00000002 /* NXTSEQ/SEED present in WB */ 1094 + #define IXGBE_ADVTXD_STAT_SN_CRC 0x00000002 /* NXTSEQ/SEED pres in WB */ 1140 1095 #define IXGBE_ADVTXD_STAT_RSV 0x0000000C /* STA Reserved */ 1141 1096 #define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */ 1097 + #define IXGBE_ADVTXD_CC 0x00000080 /* Check Context */ 1142 1098 #define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */ 1143 1099 #define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \ 1144 - IXGBE_ADVTXD_POPTS_SHIFT) 1100 + IXGBE_ADVTXD_POPTS_SHIFT) 1145 1101 #define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \ 1146 - IXGBE_ADVTXD_POPTS_SHIFT) 1147 - #define IXGBE_ADVTXD_POPTS_EOM 0x00000400 /* Enable L bit-RDMA DDP hdr */ 1148 - #define IXGBE_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */ 1149 - #define IXGBE_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */ 1150 - #define IXGBE_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */ 1151 - #define IXGBE_ADVTXD_POPTS_ISCO_FULL 0x00001800 /* 1st&Last TSO-full iSCSI PDU*/ 1152 - #define IXGBE_ADVTXD_POPTS_RSV 0x00002000 /* POPTS Reserved */ 1153 - #define IXGBE_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ 1154 - #define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ 1155 - #define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */ 1156 - #define IXGBE_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ 1157 - #define IXGBE_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */ 1158 - #define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */ 1159 - #define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ 1160 - #define IXGBE_ADVTXD_TUCMD_MKRREQ 0x00002000 /* Req requires Markers and CRC */ 1161 - #define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ 1162 - #define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ 1102 + IXGBE_ADVTXD_POPTS_SHIFT) 1103 + #define IXGBE_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */ 1104 + #define IXGBE_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */ 1105 + #define IXGBE_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */ 1106 + #define IXGBE_ADVTXD_POPTS_ISCO_FULL 0x00001800 /* 1st&Last TSO-full iSCSI PDU */ 1107 + #define IXGBE_ADVTXD_POPTS_RSV 0x00002000 /* POPTS Reserved */ 1108 + #define IXGBE_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ 1109 + #define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ 1110 + #define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */ 1111 + #define IXGBE_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ 1112 + #define IXGBE_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */ 1113 + #define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */ 1114 + #define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ 1115 + #define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */ 1116 + #define IXGBE_ADVTXD_TUCMD_MKRREQ 0x00002000 /*Req requires Markers and CRC*/ 1117 + #define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ 1118 + #define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ 1163 1119 1120 + /* Autonegotiation advertised speeds */ 1121 + typedef u32 ixgbe_autoneg_advertised; 1164 1122 /* Link speed */ 1123 + typedef u32 ixgbe_link_speed; 1165 1124 #define IXGBE_LINK_SPEED_UNKNOWN 0 1166 1125 #define IXGBE_LINK_SPEED_100_FULL 0x0008 1167 1126 #define IXGBE_LINK_SPEED_1GB_FULL 0x0020 1168 1127 #define IXGBE_LINK_SPEED_10GB_FULL 0x0080 1128 + #define IXGBE_LINK_SPEED_82598_AUTONEG (IXGBE_LINK_SPEED_1GB_FULL | \ 1129 + IXGBE_LINK_SPEED_10GB_FULL) 1130 + 1131 + /* Physical layer type */ 1132 + typedef u32 ixgbe_physical_layer; 1133 + #define IXGBE_PHYSICAL_LAYER_UNKNOWN 0 1134 + #define IXGBE_PHYSICAL_LAYER_10GBASE_T 0x0001 1135 + #define IXGBE_PHYSICAL_LAYER_1000BASE_T 0x0002 1136 + #define IXGBE_PHYSICAL_LAYER_100BASE_T 0x0004 1137 + #define IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU 0x0008 1138 + #define IXGBE_PHYSICAL_LAYER_10GBASE_LR 0x0010 1139 + #define IXGBE_PHYSICAL_LAYER_10GBASE_LRM 0x0020 1140 + #define IXGBE_PHYSICAL_LAYER_10GBASE_SR 0x0040 1141 + #define IXGBE_PHYSICAL_LAYER_10GBASE_KX4 0x0080 1142 + #define IXGBE_PHYSICAL_LAYER_10GBASE_CX4 0x0100 1143 + #define IXGBE_PHYSICAL_LAYER_1000BASE_KX 0x0200 1144 + #define IXGBE_PHYSICAL_LAYER_1000BASE_BX 0x0400 1169 1145 1170 1146 1171 1147 enum ixgbe_eeprom_type { ··· 1202 1138 1203 1139 enum ixgbe_phy_type { 1204 1140 ixgbe_phy_unknown = 0, 1205 - ixgbe_phy_tn, 1206 1141 ixgbe_phy_qt, 1207 - ixgbe_phy_xaui 1142 + ixgbe_phy_xaui, 1143 + ixgbe_phy_tw_tyco, 1144 + ixgbe_phy_tw_unknown, 1145 + ixgbe_phy_sfp_avago, 1146 + ixgbe_phy_sfp_ftl, 1147 + ixgbe_phy_sfp_unknown, 1148 + ixgbe_phy_generic 1149 + }; 1150 + 1151 + /* 1152 + * SFP+ module type IDs: 1153 + * 1154 + * ID Module Type 1155 + * ============= 1156 + * 0 SFP_DA_CU 1157 + * 1 SFP_SR 1158 + * 2 SFP_LR 1159 + */ 1160 + enum ixgbe_sfp_type { 1161 + ixgbe_sfp_type_da_cu = 0, 1162 + ixgbe_sfp_type_sr = 1, 1163 + ixgbe_sfp_type_lr = 2, 1164 + ixgbe_sfp_type_unknown = 0xFFFF 1208 1165 }; 1209 1166 1210 1167 enum ixgbe_media_type { 1211 1168 ixgbe_media_type_unknown = 0, 1212 1169 ixgbe_media_type_fiber, 1213 1170 ixgbe_media_type_copper, 1214 - ixgbe_media_type_backplane 1171 + ixgbe_media_type_backplane, 1172 + ixgbe_media_type_virtual 1215 1173 }; 1216 1174 1217 1175 /* Flow Control Settings */ ··· 1331 1245 typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr, 1332 1246 u32 *vmdq); 1333 1247 1248 + /* Function pointer table */ 1249 + struct ixgbe_eeprom_operations { 1250 + s32 (*init_params)(struct ixgbe_hw *); 1251 + s32 (*read)(struct ixgbe_hw *, u16, u16 *); 1252 + s32 (*write)(struct ixgbe_hw *, u16, u16); 1253 + s32 (*validate_checksum)(struct ixgbe_hw *, u16 *); 1254 + s32 (*update_checksum)(struct ixgbe_hw *); 1255 + }; 1256 + 1334 1257 struct ixgbe_mac_operations { 1335 - s32 (*reset)(struct ixgbe_hw *); 1258 + s32 (*init_hw)(struct ixgbe_hw *); 1259 + s32 (*reset_hw)(struct ixgbe_hw *); 1260 + s32 (*start_hw)(struct ixgbe_hw *); 1261 + s32 (*clear_hw_cntrs)(struct ixgbe_hw *); 1336 1262 enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *); 1263 + s32 (*get_supported_physical_layer)(struct ixgbe_hw *); 1264 + s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *); 1265 + s32 (*stop_adapter)(struct ixgbe_hw *); 1266 + s32 (*get_bus_info)(struct ixgbe_hw *); 1267 + s32 (*read_analog_reg8)(struct ixgbe_hw*, u32, u8*); 1268 + s32 (*write_analog_reg8)(struct ixgbe_hw*, u32, u8); 1269 + 1270 + /* Link */ 1337 1271 s32 (*setup_link)(struct ixgbe_hw *); 1338 - s32 (*check_link)(struct ixgbe_hw *, u32 *, bool *); 1339 - s32 (*setup_link_speed)(struct ixgbe_hw *, u32, bool, bool); 1340 - s32 (*get_link_settings)(struct ixgbe_hw *, u32 *, bool *); 1272 + s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool, 1273 + bool); 1274 + s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool); 1275 + s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *, 1276 + bool *); 1277 + 1278 + /* LED */ 1279 + s32 (*led_on)(struct ixgbe_hw *, u32); 1280 + s32 (*led_off)(struct ixgbe_hw *, u32); 1281 + s32 (*blink_led_start)(struct ixgbe_hw *, u32); 1282 + s32 (*blink_led_stop)(struct ixgbe_hw *, u32); 1283 + 1284 + /* RAR, Multicast, VLAN */ 1285 + s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32, u32); 1286 + s32 (*clear_rar)(struct ixgbe_hw *, u32); 1287 + s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32); 1288 + s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32); 1289 + s32 (*init_rx_addrs)(struct ixgbe_hw *); 1290 + s32 (*update_uc_addr_list)(struct ixgbe_hw *, u8 *, u32, 1291 + ixgbe_mc_addr_itr); 1292 + s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32, 1293 + ixgbe_mc_addr_itr); 1294 + s32 (*enable_mc)(struct ixgbe_hw *); 1295 + s32 (*disable_mc)(struct ixgbe_hw *); 1296 + s32 (*clear_vfta)(struct ixgbe_hw *); 1297 + s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool); 1298 + s32 (*init_uta_tables)(struct ixgbe_hw *); 1299 + 1300 + /* Flow Control */ 1301 + s32 (*setup_fc)(struct ixgbe_hw *, s32); 1341 1302 }; 1342 1303 1343 1304 struct ixgbe_phy_operations { 1305 + s32 (*identify)(struct ixgbe_hw *); 1306 + s32 (*identify_sfp)(struct ixgbe_hw *); 1307 + s32 (*reset)(struct ixgbe_hw *); 1308 + s32 (*read_reg)(struct ixgbe_hw *, u32, u32, u16 *); 1309 + s32 (*write_reg)(struct ixgbe_hw *, u32, u32, u16); 1344 1310 s32 (*setup_link)(struct ixgbe_hw *); 1345 - s32 (*check_link)(struct ixgbe_hw *, u32 *, bool *); 1346 - s32 (*setup_link_speed)(struct ixgbe_hw *, u32, bool, bool); 1347 - }; 1348 - 1349 - struct ixgbe_mac_info { 1350 - struct ixgbe_mac_operations ops; 1351 - enum ixgbe_mac_type type; 1352 - u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; 1353 - u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; 1354 - s32 mc_filter_type; 1355 - u32 mcft_size; 1356 - u32 vft_size; 1357 - u32 num_rar_entries; 1358 - u32 num_rx_queues; 1359 - u32 num_tx_queues; 1360 - u32 link_attach_type; 1361 - u32 link_mode_select; 1362 - bool link_settings_loaded; 1311 + s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool, 1312 + bool); 1313 + s32 (*read_i2c_byte)(struct ixgbe_hw *, u8, u8, u8 *); 1314 + s32 (*write_i2c_byte)(struct ixgbe_hw *, u8, u8, u8); 1315 + s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *); 1316 + s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8); 1363 1317 }; 1364 1318 1365 1319 struct ixgbe_eeprom_info { 1366 - enum ixgbe_eeprom_type type; 1367 - u16 word_size; 1368 - u16 address_bits; 1320 + struct ixgbe_eeprom_operations ops; 1321 + enum ixgbe_eeprom_type type; 1322 + u32 semaphore_delay; 1323 + u16 word_size; 1324 + u16 address_bits; 1325 + }; 1326 + 1327 + struct ixgbe_mac_info { 1328 + struct ixgbe_mac_operations ops; 1329 + enum ixgbe_mac_type type; 1330 + u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; 1331 + u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; 1332 + s32 mc_filter_type; 1333 + u32 mcft_size; 1334 + u32 vft_size; 1335 + u32 num_rar_entries; 1336 + u32 max_tx_queues; 1337 + u32 max_rx_queues; 1338 + u32 link_attach_type; 1339 + u32 link_mode_select; 1340 + bool link_settings_loaded; 1341 + bool autoneg; 1342 + bool autoneg_failed; 1369 1343 }; 1370 1344 1371 1345 struct ixgbe_phy_info { 1372 - struct ixgbe_phy_operations ops; 1373 - 1374 - enum ixgbe_phy_type type; 1375 - u32 addr; 1376 - u32 id; 1377 - u32 revision; 1378 - enum ixgbe_media_type media_type; 1379 - u32 autoneg_advertised; 1380 - bool autoneg_wait_to_complete; 1381 - }; 1382 - 1383 - struct ixgbe_info { 1384 - enum ixgbe_mac_type mac; 1385 - s32 (*get_invariants)(struct ixgbe_hw *); 1386 - struct ixgbe_mac_operations *mac_ops; 1346 + struct ixgbe_phy_operations ops; 1347 + enum ixgbe_phy_type type; 1348 + u32 addr; 1349 + u32 id; 1350 + enum ixgbe_sfp_type sfp_type; 1351 + u32 revision; 1352 + enum ixgbe_media_type media_type; 1353 + bool reset_disable; 1354 + ixgbe_autoneg_advertised autoneg_advertised; 1355 + bool autoneg_wait_to_complete; 1387 1356 }; 1388 1357 1389 1358 struct ixgbe_hw { ··· 1456 1315 u8 revision_id; 1457 1316 bool adapter_stopped; 1458 1317 }; 1318 + 1319 + struct ixgbe_info { 1320 + enum ixgbe_mac_type mac; 1321 + s32 (*get_invariants)(struct ixgbe_hw *); 1322 + struct ixgbe_mac_operations *mac_ops; 1323 + struct ixgbe_eeprom_operations *eeprom_ops; 1324 + struct ixgbe_phy_operations *phy_ops; 1325 + }; 1326 + 1459 1327 1460 1328 /* Error Codes */ 1461 1329 #define IXGBE_ERR_EEPROM -1 ··· 1484 1334 #define IXGBE_ERR_RESET_FAILED -15 1485 1335 #define IXGBE_ERR_SWFW_SYNC -16 1486 1336 #define IXGBE_ERR_PHY_ADDR_INVALID -17 1337 + #define IXGBE_ERR_I2C -18 1338 + #define IXGBE_ERR_SFP_NOT_SUPPORTED -19 1487 1339 #define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF 1488 1340 1489 1341 #endif /* _IXGBE_TYPE_H_ */
+1 -1
drivers/net/meth.c
··· 41 41 #endif 42 42 43 43 #if MFE_DEBUG>=1 44 - #define DPRINTK(str,args...) printk(KERN_DEBUG "meth: %s: " str, __FUNCTION__ , ## args) 44 + #define DPRINTK(str,args...) printk(KERN_DEBUG "meth: %s: " str, __func__ , ## args) 45 45 #define MFE_RX_DEBUG 2 46 46 #else 47 47 #define DPRINTK(str,args...)
+1 -1
drivers/net/mipsnet.c
··· 203 203 204 204 out_badirq: 205 205 printk(KERN_INFO "%s: %s(): irq %d for unknown device\n", 206 - dev->name, __FUNCTION__, irq); 206 + dev->name, __func__, irq); 207 207 return ret; 208 208 } 209 209
+1
drivers/net/mlx4/alloc.c
··· 33 33 34 34 #include <linux/errno.h> 35 35 #include <linux/slab.h> 36 + #include <linux/mm.h> 36 37 #include <linux/bitmap.h> 37 38 #include <linux/dma-mapping.h> 38 39 #include <linux/vmalloc.h>
+17 -16
drivers/net/myri10ge/myri10ge.c
··· 183 183 dma_addr_t fw_stats_bus; 184 184 int watchdog_tx_done; 185 185 int watchdog_tx_req; 186 - #ifdef CONFIG_DCA 186 + #if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE) 187 187 int cached_dca_tag; 188 188 int cpu; 189 189 __be32 __iomem *dca_tag; ··· 215 215 int msi_enabled; 216 216 int msix_enabled; 217 217 struct msix_entry *msix_vectors; 218 - #ifdef CONFIG_DCA 218 + #if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE) 219 219 int dca_enabled; 220 220 #endif 221 221 u32 link_state; ··· 891 891 struct myri10ge_slice_state *ss; 892 892 int i, status; 893 893 size_t bytes; 894 - #ifdef CONFIG_DCA 894 + #if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE) 895 895 unsigned long dca_tag_off; 896 896 #endif 897 897 ··· 986 986 } 987 987 put_be32(htonl(mgp->intr_coal_delay), mgp->intr_coal_delay_ptr); 988 988 989 - #ifdef CONFIG_DCA 989 + #if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE) 990 990 status = myri10ge_send_cmd(mgp, MXGEFW_CMD_GET_DCA_OFFSET, &cmd, 0); 991 991 dca_tag_off = cmd.data0; 992 992 for (i = 0; i < mgp->num_slices; i++) { ··· 1025 1025 return status; 1026 1026 } 1027 1027 1028 - #ifdef CONFIG_DCA 1028 + #if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE) 1029 1029 static void 1030 1030 myri10ge_write_dca(struct myri10ge_slice_state *ss, int cpu, int tag) 1031 1031 { ··· 1060 1060 } 1061 1061 err = dca_add_requester(&pdev->dev); 1062 1062 if (err) { 1063 - dev_err(&pdev->dev, 1064 - "dca_add_requester() failed, err=%d\n", err); 1063 + if (err != -ENODEV) 1064 + dev_err(&pdev->dev, 1065 + "dca_add_requester() failed, err=%d\n", err); 1065 1066 return; 1066 1067 } 1067 1068 mgp->dca_enabled = 1; ··· 1458 1457 struct net_device *netdev = ss->mgp->dev; 1459 1458 int work_done; 1460 1459 1461 - #ifdef CONFIG_DCA 1460 + #if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE) 1462 1461 if (ss->mgp->dca_enabled) 1463 1462 myri10ge_update_dca(ss); 1464 1463 #endif ··· 1687 1686 "tx_boundary", "WC", "irq", "MSI", "MSIX", 1688 1687 "read_dma_bw_MBs", "write_dma_bw_MBs", "read_write_dma_bw_MBs", 1689 1688 "serial_number", "watchdog_resets", 1690 - #ifdef CONFIG_DCA 1691 - "dca_capable", "dca_enabled", 1689 + #if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE) 1690 + "dca_capable_firmware", "dca_device_present", 1692 1691 #endif 1693 1692 "link_changes", "link_up", "dropped_link_overflow", 1694 1693 "dropped_link_error_or_filtered", ··· 1766 1765 data[i++] = (unsigned int)mgp->read_write_dma; 1767 1766 data[i++] = (unsigned int)mgp->serial_number; 1768 1767 data[i++] = (unsigned int)mgp->watchdog_resets; 1769 - #ifdef CONFIG_DCA 1768 + #if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE) 1770 1769 data[i++] = (unsigned int)(mgp->ss[0].dca_tag != NULL); 1771 1770 data[i++] = (unsigned int)(mgp->dca_enabled); 1772 1771 #endif ··· 3764 3763 dev_err(&pdev->dev, "failed reset\n"); 3765 3764 goto abort_with_slices; 3766 3765 } 3767 - #ifdef CONFIG_DCA 3766 + #if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE) 3768 3767 myri10ge_setup_dca(mgp); 3769 3768 #endif 3770 3769 pci_set_drvdata(pdev, mgp); ··· 3867 3866 netdev = mgp->dev; 3868 3867 unregister_netdev(netdev); 3869 3868 3870 - #ifdef CONFIG_DCA 3869 + #if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE) 3871 3870 myri10ge_teardown_dca(mgp); 3872 3871 #endif 3873 3872 myri10ge_dummy_rdma(mgp, 0); ··· 3912 3911 #endif 3913 3912 }; 3914 3913 3915 - #ifdef CONFIG_DCA 3914 + #if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE) 3916 3915 static int 3917 3916 myri10ge_notify_dca(struct notifier_block *nb, unsigned long event, void *p) 3918 3917 { ··· 3944 3943 myri10ge_driver.name, myri10ge_rss_hash); 3945 3944 myri10ge_rss_hash = MXGEFW_RSS_HASH_TYPE_SRC_PORT; 3946 3945 } 3947 - #ifdef CONFIG_DCA 3946 + #if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE) 3948 3947 dca_register_notify(&myri10ge_dca_notifier); 3949 3948 #endif 3950 3949 ··· 3955 3954 3956 3955 static __exit void myri10ge_cleanup_module(void) 3957 3956 { 3958 - #ifdef CONFIG_DCA 3957 + #if (defined CONFIG_DCA) || (defined CONFIG_DCA_MODULE) 3959 3958 dca_unregister_notify(&myri10ge_dca_notifier); 3960 3959 #endif 3961 3960 pci_unregister_driver(&myri10ge_driver);
+8 -1
drivers/net/ne.c
··· 844 844 { 845 845 struct net_device *dev = platform_get_drvdata(pdev); 846 846 847 - if (netif_running(dev)) 847 + if (netif_running(dev)) { 848 + struct pnp_dev *idev = (struct pnp_dev *)ei_status.priv; 848 849 netif_device_detach(dev); 850 + if (idev) 851 + pnp_stop_dev(idev); 852 + } 849 853 return 0; 850 854 } 851 855 ··· 858 854 struct net_device *dev = platform_get_drvdata(pdev); 859 855 860 856 if (netif_running(dev)) { 857 + struct pnp_dev *idev = (struct pnp_dev *)ei_status.priv; 858 + if (idev) 859 + pnp_start_dev(idev); 861 860 ne_reset_8390(dev); 862 861 NS8390p_init(dev, 1); 863 862 netif_device_attach(dev);
+1 -1
drivers/net/netx-eth.c
··· 189 189 190 190 if ((status & ISR_CON_HI) || (status & ISR_IND_HI)) 191 191 printk("%s: unexpected status: 0x%08x\n", 192 - __FUNCTION__, status); 192 + __func__, status); 193 193 194 194 fill_level = 195 195 readl(NETX_PFIFO_FILL_LEVEL(IND_FIFO_PORT_LO(priv->id)));
+1 -1
drivers/net/netxen/netxen_nic.h
··· 742 742 } while (0) 743 743 #else 744 744 #define DPRINTK(klevel, fmt, args...) do { \ 745 - printk(KERN_##klevel PFX "%s: %s: " fmt, __FUNCTION__,\ 745 + printk(KERN_##klevel PFX "%s: %s: " fmt, __func__,\ 746 746 (adapter != NULL && adapter->netdev != NULL) ? \ 747 747 adapter->netdev->name : NULL, \ 748 748 ## args); } while(0)
+10 -10
drivers/net/netxen/netxen_nic_main.c
··· 77 77 78 78 /* PCI Device ID Table */ 79 79 #define ENTRY(device) \ 80 - {PCI_DEVICE(0x4040, (device)), \ 80 + {PCI_DEVICE(PCI_VENDOR_ID_NETXEN, (device)), \ 81 81 .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0} 82 82 83 83 static struct pci_device_id netxen_pci_tbl[] __devinitdata = { 84 - ENTRY(0x0001), 85 - ENTRY(0x0002), 86 - ENTRY(0x0003), 87 - ENTRY(0x0004), 88 - ENTRY(0x0005), 89 - ENTRY(0x0024), 90 - ENTRY(0x0025), 91 - ENTRY(0x0100), 84 + ENTRY(PCI_DEVICE_ID_NX2031_10GXSR), 85 + ENTRY(PCI_DEVICE_ID_NX2031_10GCX4), 86 + ENTRY(PCI_DEVICE_ID_NX2031_4GCU), 87 + ENTRY(PCI_DEVICE_ID_NX2031_IMEZ), 88 + ENTRY(PCI_DEVICE_ID_NX2031_HMEZ), 89 + ENTRY(PCI_DEVICE_ID_NX2031_XG_MGMT), 90 + ENTRY(PCI_DEVICE_ID_NX2031_XG_MGMT2), 91 + ENTRY(PCI_DEVICE_ID_NX3031), 92 92 {0,} 93 93 }; 94 94 ··· 241 241 case NETXEN_BRDTYPE_P3_REF_QG: 242 242 case NETXEN_BRDTYPE_P3_4_GB: 243 243 case NETXEN_BRDTYPE_P3_4_GB_MM: 244 - adapter->msix_supported = 0; 244 + adapter->msix_supported = !!use_msi_x; 245 245 adapter->max_rx_desc_count = MAX_RCV_DESCRIPTORS_10G; 246 246 break; 247 247
+2 -2
drivers/net/pci-skeleton.c
··· 119 119 120 120 #ifdef NETDRV_DEBUG 121 121 /* note: prints function name for you */ 122 - # define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt, __FUNCTION__ , ## args) 122 + # define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt, __func__ , ## args) 123 123 #else 124 124 # define DPRINTK(fmt, args...) 125 125 #endif ··· 130 130 # define assert(expr) \ 131 131 if(!(expr)) { \ 132 132 printk( "Assertion failed! %s,%s,%s,line=%d\n", \ 133 - #expr,__FILE__,__FUNCTION__,__LINE__); \ 133 + #expr,__FILE__,__func__,__LINE__); \ 134 134 } 135 135 #endif 136 136
+2 -2
drivers/net/r6040.c
··· 370 370 /* Reset internal state machine */ 371 371 iowrite16(2, ioaddr + MAC_SM); 372 372 iowrite16(0, ioaddr + MAC_SM); 373 - udelay(5000); 373 + mdelay(5); 374 374 375 375 /* MAC Bus Control Register */ 376 376 iowrite16(MBCR_DEFAULT, ioaddr + MBCR); ··· 806 806 iowrite16(0x01, ioaddr + MCR1); /* Reset MAC */ 807 807 iowrite16(2, ioaddr + MAC_SM); /* Reset internal state machine */ 808 808 iowrite16(0, ioaddr + MAC_SM); 809 - udelay(5000); 809 + mdelay(5); 810 810 811 811 /* Restore MAC Address */ 812 812 adrp = (u16 *) dev->dev_addr;
+15 -12
drivers/net/r8169.c
··· 36 36 #define assert(expr) \ 37 37 if (!(expr)) { \ 38 38 printk( "Assertion failed! %s,%s,%s,line=%d\n", \ 39 - #expr,__FILE__,__FUNCTION__,__LINE__); \ 39 + #expr,__FILE__,__func__,__LINE__); \ 40 40 } 41 41 #define dprintk(fmt, args...) \ 42 42 do { printk(KERN_DEBUG PFX fmt, ## args); } while (0) ··· 2286 2286 2287 2287 RTL_R8(IntrMask); 2288 2288 2289 - RTL_W32(RxMissed, 0); 2290 - 2291 2289 rtl_set_rx_mode(dev); 2292 2290 2293 2291 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); ··· 2409 2411 RTL_W8(Cfg9346, Cfg9346_Lock); 2410 2412 2411 2413 RTL_R8(IntrMask); 2412 - 2413 - RTL_W32(RxMissed, 0); 2414 2414 2415 2415 rtl_set_rx_mode(dev); 2416 2416 ··· 3187 3191 return work_done; 3188 3192 } 3189 3193 3194 + static void rtl8169_rx_missed(struct net_device *dev, void __iomem *ioaddr) 3195 + { 3196 + struct rtl8169_private *tp = netdev_priv(dev); 3197 + 3198 + if (tp->mac_version > RTL_GIGA_MAC_VER_06) 3199 + return; 3200 + 3201 + dev->stats.rx_missed_errors += (RTL_R32(RxMissed) & 0xffffff); 3202 + RTL_W32(RxMissed, 0); 3203 + } 3204 + 3190 3205 static void rtl8169_down(struct net_device *dev) 3191 3206 { 3192 3207 struct rtl8169_private *tp = netdev_priv(dev); ··· 3215 3208 3216 3209 rtl8169_asic_down(ioaddr); 3217 3210 3218 - /* Update the error counts. */ 3219 - dev->stats.rx_missed_errors += RTL_R32(RxMissed); 3220 - RTL_W32(RxMissed, 0); 3211 + rtl8169_rx_missed(dev, ioaddr); 3221 3212 3222 3213 spin_unlock_irq(&tp->lock); 3223 3214 ··· 3337 3332 3338 3333 if (netif_running(dev)) { 3339 3334 spin_lock_irqsave(&tp->lock, flags); 3340 - dev->stats.rx_missed_errors += RTL_R32(RxMissed); 3341 - RTL_W32(RxMissed, 0); 3335 + rtl8169_rx_missed(dev, ioaddr); 3342 3336 spin_unlock_irqrestore(&tp->lock, flags); 3343 3337 } 3344 3338 ··· 3362 3358 3363 3359 rtl8169_asic_down(ioaddr); 3364 3360 3365 - dev->stats.rx_missed_errors += RTL_R32(RxMissed); 3366 - RTL_W32(RxMissed, 0); 3361 + rtl8169_rx_missed(dev, ioaddr); 3367 3362 3368 3363 spin_unlock_irq(&tp->lock); 3369 3364
+30 -28
drivers/net/s2io.c
··· 371 371 flags[i]); 372 372 } 373 373 374 - /* A flag indicating whether 'RX_PA_CFG_STRIP_VLAN_TAG' bit is set or not */ 375 - static int vlan_strip_flag; 376 - 377 374 /* Unregister the vlan */ 378 375 static void s2io_vlan_rx_kill_vid(struct net_device *dev, unsigned long vid) 379 376 { ··· 2300 2303 val64 = readq(&bar0->rx_pa_cfg); 2301 2304 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG; 2302 2305 writeq(val64, &bar0->rx_pa_cfg); 2303 - vlan_strip_flag = 0; 2306 + nic->vlan_strip_flag = 0; 2304 2307 } 2305 2308 2306 2309 /* ··· 3133 3136 if (skb == NULL) { 3134 3137 spin_unlock_irqrestore(&fifo_data->tx_lock, flags); 3135 3138 DBG_PRINT(ERR_DBG, "%s: Null skb ", 3136 - __FUNCTION__); 3139 + __func__); 3137 3140 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n"); 3138 3141 return; 3139 3142 } ··· 3493 3496 unsigned long long mem_alloc_cnt, mem_free_cnt, watchdog_cnt; 3494 3497 3495 3498 DBG_PRINT(INIT_DBG,"%s - Resetting XFrame card %s\n", 3496 - __FUNCTION__, sp->dev->name); 3499 + __func__, sp->dev->name); 3497 3500 3498 3501 /* Back up the PCI-X CMD reg, dont want to lose MMRBC, OST settings */ 3499 3502 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER, &(pci_cmd)); ··· 3515 3518 } 3516 3519 3517 3520 if (check_pci_device_id(val16) == (u16)PCI_ANY_ID) { 3518 - DBG_PRINT(ERR_DBG,"%s SW_Reset failed!\n", __FUNCTION__); 3521 + DBG_PRINT(ERR_DBG,"%s SW_Reset failed!\n", __func__); 3519 3522 } 3520 3523 3521 3524 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, pci_cmd); ··· 3765 3768 val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6)); 3766 3769 writeq(val64, &bar0->xmsi_access); 3767 3770 if (wait_for_msix_trans(nic, msix_index)) { 3768 - DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__); 3771 + DBG_PRINT(ERR_DBG, "failed in %s\n", __func__); 3769 3772 continue; 3770 3773 } 3771 3774 } ··· 3786 3789 val64 = (s2BIT(15) | vBIT(msix_index, 26, 6)); 3787 3790 writeq(val64, &bar0->xmsi_access); 3788 3791 if (wait_for_msix_trans(nic, msix_index)) { 3789 - DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__); 3792 + DBG_PRINT(ERR_DBG, "failed in %s\n", __func__); 3790 3793 continue; 3791 3794 } 3792 3795 addr = readq(&bar0->xmsi_address); ··· 3809 3812 GFP_KERNEL); 3810 3813 if (!nic->entries) { 3811 3814 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", \ 3812 - __FUNCTION__); 3815 + __func__); 3813 3816 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++; 3814 3817 return -ENOMEM; 3815 3818 } ··· 3823 3826 GFP_KERNEL); 3824 3827 if (!nic->s2io_entries) { 3825 3828 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", 3826 - __FUNCTION__); 3829 + __func__); 3827 3830 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++; 3828 3831 kfree(nic->entries); 3829 3832 nic->mac_control.stats_info->sw_stat.mem_freed ··· 5007 5010 val64 = readq(&bar0->rx_pa_cfg); 5008 5011 val64 &= ~RX_PA_CFG_STRIP_VLAN_TAG; 5009 5012 writeq(val64, &bar0->rx_pa_cfg); 5010 - vlan_strip_flag = 0; 5013 + sp->vlan_strip_flag = 0; 5011 5014 } 5012 5015 5013 5016 val64 = readq(&bar0->mac_cfg); ··· 5029 5032 val64 = readq(&bar0->rx_pa_cfg); 5030 5033 val64 |= RX_PA_CFG_STRIP_VLAN_TAG; 5031 5034 writeq(val64, &bar0->rx_pa_cfg); 5032 - vlan_strip_flag = 1; 5035 + sp->vlan_strip_flag = 1; 5033 5036 } 5034 5037 5035 5038 val64 = readq(&bar0->mac_cfg); ··· 6743 6746 ret = s2io_card_up(sp); 6744 6747 if (ret) { 6745 6748 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n", 6746 - __FUNCTION__); 6749 + __func__); 6747 6750 return ret; 6748 6751 } 6749 6752 s2io_wake_all_tx_queue(sp); ··· 7527 7530 default: 7528 7531 DBG_PRINT(ERR_DBG, 7529 7532 "%s: Samadhana!!\n", 7530 - __FUNCTION__); 7533 + __func__); 7531 7534 BUG(); 7532 7535 } 7533 7536 } ··· 7778 7781 return -ENOMEM; 7779 7782 } 7780 7783 if ((ret = pci_request_regions(pdev, s2io_driver_name))) { 7781 - DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x \n", __FUNCTION__, ret); 7784 + DBG_PRINT(ERR_DBG, "%s: Request Regions failed - %x \n", __func__, ret); 7782 7785 pci_disable_device(pdev); 7783 7786 return -ENODEV; 7784 7787 } ··· 7995 7998 if (sp->device_type & XFRAME_II_DEVICE) { 7996 7999 mode = s2io_verify_pci_mode(sp); 7997 8000 if (mode < 0) { 7998 - DBG_PRINT(ERR_DBG, "%s: ", __FUNCTION__); 8001 + DBG_PRINT(ERR_DBG, "%s: ", __func__); 7999 8002 DBG_PRINT(ERR_DBG, " Unsupported PCI bus mode\n"); 8000 8003 ret = -EBADSLT; 8001 8004 goto set_swap_failed; ··· 8203 8206 /* Initialize device name */ 8204 8207 sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name); 8205 8208 8209 + if (vlan_tag_strip) 8210 + sp->vlan_strip_flag = 1; 8211 + else 8212 + sp->vlan_strip_flag = 0; 8213 + 8206 8214 /* 8207 8215 * Make Link state as off at this point, when the Link change 8208 8216 * interrupt comes the state will be automatically changed to ··· 8301 8299 8302 8300 if (!(rxdp->Control_1 & RXD_FRAME_PROTO_TCP)) { 8303 8301 DBG_PRINT(INIT_DBG,"%s: Non-TCP frames not supported for LRO\n", 8304 - __FUNCTION__); 8302 + __func__); 8305 8303 return -1; 8306 8304 } 8307 8305 ··· 8313 8311 * If vlan stripping is disabled and the frame is VLAN tagged, 8314 8312 * shift the offset by the VLAN header size bytes. 8315 8313 */ 8316 - if ((!vlan_strip_flag) && 8314 + if ((!sp->vlan_strip_flag) && 8317 8315 (rxdp->Control_1 & RXD_FRAME_VLAN_TAG)) 8318 8316 ip_off += HEADER_VLAN_SIZE; 8319 8317 } else { ··· 8332 8330 static int check_for_socket_match(struct lro *lro, struct iphdr *ip, 8333 8331 struct tcphdr *tcp) 8334 8332 { 8335 - DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__); 8333 + DBG_PRINT(INFO_DBG,"%s: Been here...\n", __func__); 8336 8334 if ((lro->iph->saddr != ip->saddr) || (lro->iph->daddr != ip->daddr) || 8337 8335 (lro->tcph->source != tcp->source) || (lro->tcph->dest != tcp->dest)) 8338 8336 return -1; ··· 8347 8345 static void initiate_new_session(struct lro *lro, u8 *l2h, 8348 8346 struct iphdr *ip, struct tcphdr *tcp, u32 tcp_pyld_len, u16 vlan_tag) 8349 8347 { 8350 - DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__); 8348 + DBG_PRINT(INFO_DBG,"%s: Been here...\n", __func__); 8351 8349 lro->l2h = l2h; 8352 8350 lro->iph = ip; 8353 8351 lro->tcph = tcp; ··· 8377 8375 struct tcphdr *tcp = lro->tcph; 8378 8376 __sum16 nchk; 8379 8377 struct stat_block *statinfo = sp->mac_control.stats_info; 8380 - DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__); 8378 + DBG_PRINT(INFO_DBG,"%s: Been here...\n", __func__); 8381 8379 8382 8380 /* Update L3 header */ 8383 8381 ip->tot_len = htons(lro->total_len); ··· 8405 8403 static void aggregate_new_rx(struct lro *lro, struct iphdr *ip, 8406 8404 struct tcphdr *tcp, u32 l4_pyld) 8407 8405 { 8408 - DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__); 8406 + DBG_PRINT(INFO_DBG,"%s: Been here...\n", __func__); 8409 8407 lro->total_len += l4_pyld; 8410 8408 lro->frags_len += l4_pyld; 8411 8409 lro->tcp_next_seq += l4_pyld; ··· 8429 8427 { 8430 8428 u8 *ptr; 8431 8429 8432 - DBG_PRINT(INFO_DBG,"%s: Been here...\n", __FUNCTION__); 8430 + DBG_PRINT(INFO_DBG,"%s: Been here...\n", __func__); 8433 8431 8434 8432 if (!tcp_pyld_len) { 8435 8433 /* Runt frame or a pure ack */ ··· 8511 8509 8512 8510 if ((*lro)->tcp_next_seq != ntohl(tcph->seq)) { 8513 8511 DBG_PRINT(INFO_DBG, "%s:Out of order. expected " 8514 - "0x%x, actual 0x%x\n", __FUNCTION__, 8512 + "0x%x, actual 0x%x\n", __func__, 8515 8513 (*lro)->tcp_next_seq, 8516 8514 ntohl(tcph->seq)); 8517 8515 ··· 8551 8549 8552 8550 if (ret == 0) { /* sessions exceeded */ 8553 8551 DBG_PRINT(INFO_DBG,"%s:All LRO sessions already in use\n", 8554 - __FUNCTION__); 8552 + __func__); 8555 8553 *lro = NULL; 8556 8554 return ret; 8557 8555 } ··· 8573 8571 break; 8574 8572 default: 8575 8573 DBG_PRINT(ERR_DBG,"%s:Dont know, can't say!!\n", 8576 - __FUNCTION__); 8574 + __func__); 8577 8575 break; 8578 8576 } 8579 8577 ··· 8594 8592 8595 8593 skb->protocol = eth_type_trans(skb, dev); 8596 8594 if (sp->vlgrp && vlan_tag 8597 - && (vlan_strip_flag)) { 8595 + && (sp->vlan_strip_flag)) { 8598 8596 /* Queueing the vlan frame to the upper layer */ 8599 8597 if (sp->config.napi) 8600 8598 vlan_hwaccel_receive_skb(skb, sp->vlgrp, vlan_tag);
+1
drivers/net/s2io.h
··· 962 962 int task_flag; 963 963 unsigned long long start_time; 964 964 struct vlan_group *vlgrp; 965 + int vlan_strip_flag; 965 966 #define MSIX_FLG 0xA5 966 967 int num_entries; 967 968 struct msix_entry *entries;
+8 -10
drivers/net/sfc/efx.c
··· 445 445 struct efx_channel *channel; 446 446 struct efx_tx_queue *tx_queue; 447 447 struct efx_rx_queue *rx_queue; 448 + int rc; 448 449 449 450 EFX_ASSERT_RESET_SERIALISED(efx); 450 451 BUG_ON(efx->port_enabled); 452 + 453 + rc = falcon_flush_queues(efx); 454 + if (rc) 455 + EFX_ERR(efx, "failed to flush queues\n"); 456 + else 457 + EFX_LOG(efx, "successfully flushed all queues\n"); 451 458 452 459 efx_for_each_channel(channel, efx) { 453 460 EFX_LOG(channel->efx, "shut down chan %d\n", channel->channel); ··· 463 456 efx_fini_rx_queue(rx_queue); 464 457 efx_for_each_channel_tx_queue(tx_queue, channel) 465 458 efx_fini_tx_queue(tx_queue); 466 - } 467 - 468 - /* Do the event queues last so that we can handle flush events 469 - * for all DMA queues. */ 470 - efx_for_each_channel(channel, efx) { 471 - EFX_LOG(channel->efx, "shut down evq %d\n", channel->channel); 472 - 473 459 efx_fini_eventq(channel); 474 460 } 475 461 } ··· 780 780 return 0; 781 781 782 782 fail4: 783 - release_mem_region(efx->membase_phys, efx->type->mem_map_size); 783 + pci_release_region(efx->pci_dev, efx->type->mem_bar); 784 784 fail3: 785 785 efx->membase_phys = 0; 786 786 fail2: ··· 1092 1092 1093 1093 /* Isolate the MAC from the TX and RX engines, so that queue 1094 1094 * flushes will complete in a timely fashion. */ 1095 - falcon_deconfigure_mac_wrapper(efx); 1096 1095 falcon_drain_tx_fifo(efx); 1097 1096 1098 1097 /* Stop the kernel transmit interface late, so the watchdog ··· 1749 1750 .check_hw = efx_port_dummy_op_int, 1750 1751 .fini = efx_port_dummy_op_void, 1751 1752 .clear_interrupt = efx_port_dummy_op_void, 1752 - .reset_xaui = efx_port_dummy_op_void, 1753 1753 }; 1754 1754 1755 1755 static struct efx_board efx_dummy_board_info = {
+138 -122
drivers/net/sfc/falcon.c
··· 108 108 /* Max number of internal errors. After this resets will not be performed */ 109 109 #define FALCON_MAX_INT_ERRORS 4 110 110 111 - /* Maximum period that we wait for flush events. If the flush event 112 - * doesn't arrive in this period of time then we check if the queue 113 - * was disabled anyway. */ 114 - #define FALCON_FLUSH_TIMEOUT 10 /* 10ms */ 111 + /* We poll for events every FLUSH_INTERVAL ms, and check FLUSH_POLL_COUNT times 112 + */ 113 + #define FALCON_FLUSH_INTERVAL 10 114 + #define FALCON_FLUSH_POLL_COUNT 100 115 115 116 116 /************************************************************************** 117 117 * ··· 452 452 efx_oword_t tx_desc_ptr; 453 453 struct efx_nic *efx = tx_queue->efx; 454 454 455 + tx_queue->flushed = false; 456 + 455 457 /* Pin TX descriptor ring */ 456 458 falcon_init_special_buffer(efx, &tx_queue->txd); 457 459 ··· 494 492 } 495 493 } 496 494 497 - static int falcon_flush_tx_queue(struct efx_tx_queue *tx_queue) 495 + static void falcon_flush_tx_queue(struct efx_tx_queue *tx_queue) 498 496 { 499 497 struct efx_nic *efx = tx_queue->efx; 500 - struct efx_channel *channel = &efx->channel[0]; 501 498 efx_oword_t tx_flush_descq; 502 - unsigned int read_ptr, i; 503 499 504 500 /* Post a flush command */ 505 501 EFX_POPULATE_OWORD_2(tx_flush_descq, 506 502 TX_FLUSH_DESCQ_CMD, 1, 507 503 TX_FLUSH_DESCQ, tx_queue->queue); 508 504 falcon_write(efx, &tx_flush_descq, TX_FLUSH_DESCQ_REG_KER); 509 - msleep(FALCON_FLUSH_TIMEOUT); 510 - 511 - if (EFX_WORKAROUND_7803(efx)) 512 - return 0; 513 - 514 - /* Look for a flush completed event */ 515 - read_ptr = channel->eventq_read_ptr; 516 - for (i = 0; i < FALCON_EVQ_SIZE; ++i) { 517 - efx_qword_t *event = falcon_event(channel, read_ptr); 518 - int ev_code, ev_sub_code, ev_queue; 519 - if (!falcon_event_present(event)) 520 - break; 521 - 522 - ev_code = EFX_QWORD_FIELD(*event, EV_CODE); 523 - ev_sub_code = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_CODE); 524 - ev_queue = EFX_QWORD_FIELD(*event, DRIVER_EV_TX_DESCQ_ID); 525 - if ((ev_sub_code == TX_DESCQ_FLS_DONE_EV_DECODE) && 526 - (ev_queue == tx_queue->queue)) { 527 - EFX_LOG(efx, "tx queue %d flush command succesful\n", 528 - tx_queue->queue); 529 - return 0; 530 - } 531 - 532 - read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK; 533 - } 534 - 535 - if (EFX_WORKAROUND_11557(efx)) { 536 - efx_oword_t reg; 537 - bool enabled; 538 - 539 - falcon_read_table(efx, &reg, efx->type->txd_ptr_tbl_base, 540 - tx_queue->queue); 541 - enabled = EFX_OWORD_FIELD(reg, TX_DESCQ_EN); 542 - if (!enabled) { 543 - EFX_LOG(efx, "tx queue %d disabled without a " 544 - "flush event seen\n", tx_queue->queue); 545 - return 0; 546 - } 547 - } 548 - 549 - EFX_ERR(efx, "tx queue %d flush command timed out\n", tx_queue->queue); 550 - return -ETIMEDOUT; 551 505 } 552 506 553 507 void falcon_fini_tx(struct efx_tx_queue *tx_queue) ··· 511 553 struct efx_nic *efx = tx_queue->efx; 512 554 efx_oword_t tx_desc_ptr; 513 555 514 - /* Stop the hardware using the queue */ 515 - if (falcon_flush_tx_queue(tx_queue)) 516 - EFX_ERR(efx, "failed to flush tx queue %d\n", tx_queue->queue); 556 + /* The queue should have been flushed */ 557 + WARN_ON(!tx_queue->flushed); 517 558 518 559 /* Remove TX descriptor ring from card */ 519 560 EFX_ZERO_OWORD(tx_desc_ptr); ··· 600 643 rx_queue->queue, rx_queue->rxd.index, 601 644 rx_queue->rxd.index + rx_queue->rxd.entries - 1); 602 645 646 + rx_queue->flushed = false; 647 + 603 648 /* Pin RX descriptor ring */ 604 649 falcon_init_special_buffer(efx, &rx_queue->rxd); 605 650 ··· 622 663 rx_queue->queue); 623 664 } 624 665 625 - static int falcon_flush_rx_queue(struct efx_rx_queue *rx_queue) 666 + static void falcon_flush_rx_queue(struct efx_rx_queue *rx_queue) 626 667 { 627 668 struct efx_nic *efx = rx_queue->efx; 628 - struct efx_channel *channel = &efx->channel[0]; 629 - unsigned int read_ptr, i; 630 669 efx_oword_t rx_flush_descq; 631 670 632 671 /* Post a flush command */ ··· 632 675 RX_FLUSH_DESCQ_CMD, 1, 633 676 RX_FLUSH_DESCQ, rx_queue->queue); 634 677 falcon_write(efx, &rx_flush_descq, RX_FLUSH_DESCQ_REG_KER); 635 - msleep(FALCON_FLUSH_TIMEOUT); 636 - 637 - if (EFX_WORKAROUND_7803(efx)) 638 - return 0; 639 - 640 - /* Look for a flush completed event */ 641 - read_ptr = channel->eventq_read_ptr; 642 - for (i = 0; i < FALCON_EVQ_SIZE; ++i) { 643 - efx_qword_t *event = falcon_event(channel, read_ptr); 644 - int ev_code, ev_sub_code, ev_queue; 645 - bool ev_failed; 646 - if (!falcon_event_present(event)) 647 - break; 648 - 649 - ev_code = EFX_QWORD_FIELD(*event, EV_CODE); 650 - ev_sub_code = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_CODE); 651 - ev_queue = EFX_QWORD_FIELD(*event, DRIVER_EV_RX_DESCQ_ID); 652 - ev_failed = EFX_QWORD_FIELD(*event, DRIVER_EV_RX_FLUSH_FAIL); 653 - 654 - if ((ev_sub_code == RX_DESCQ_FLS_DONE_EV_DECODE) && 655 - (ev_queue == rx_queue->queue)) { 656 - if (ev_failed) { 657 - EFX_INFO(efx, "rx queue %d flush command " 658 - "failed\n", rx_queue->queue); 659 - return -EAGAIN; 660 - } else { 661 - EFX_LOG(efx, "rx queue %d flush command " 662 - "succesful\n", rx_queue->queue); 663 - return 0; 664 - } 665 - } 666 - 667 - read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK; 668 - } 669 - 670 - if (EFX_WORKAROUND_11557(efx)) { 671 - efx_oword_t reg; 672 - bool enabled; 673 - 674 - falcon_read_table(efx, &reg, efx->type->rxd_ptr_tbl_base, 675 - rx_queue->queue); 676 - enabled = EFX_OWORD_FIELD(reg, RX_DESCQ_EN); 677 - if (!enabled) { 678 - EFX_LOG(efx, "rx queue %d disabled without a " 679 - "flush event seen\n", rx_queue->queue); 680 - return 0; 681 - } 682 - } 683 - 684 - EFX_ERR(efx, "rx queue %d flush command timed out\n", rx_queue->queue); 685 - return -ETIMEDOUT; 686 678 } 687 679 688 680 void falcon_fini_rx(struct efx_rx_queue *rx_queue) 689 681 { 690 682 efx_oword_t rx_desc_ptr; 691 683 struct efx_nic *efx = rx_queue->efx; 692 - int i, rc; 693 684 694 - /* Try and flush the rx queue. This may need to be repeated */ 695 - for (i = 0; i < 5; i++) { 696 - rc = falcon_flush_rx_queue(rx_queue); 697 - if (rc == -EAGAIN) 698 - continue; 699 - break; 700 - } 701 - if (rc) { 702 - EFX_ERR(efx, "failed to flush rx queue %d\n", rx_queue->queue); 703 - efx_schedule_reset(efx, RESET_TYPE_INVISIBLE); 704 - } 685 + /* The queue should already have been flushed */ 686 + WARN_ON(!rx_queue->flushed); 705 687 706 688 /* Remove RX descriptor ring from card */ 707 689 EFX_ZERO_OWORD(rx_desc_ptr); ··· 903 1007 is_phy_event = true; 904 1008 905 1009 if ((falcon_rev(efx) >= FALCON_REV_B0) && 906 - EFX_OWORD_FIELD(*event, XG_MNT_INTR_B0)) 1010 + EFX_QWORD_FIELD(*event, XG_MNT_INTR_B0)) 907 1011 is_phy_event = true; 908 1012 909 1013 if (is_phy_event) { ··· 1151 1255 falcon_generate_event(channel, &test_event); 1152 1256 } 1153 1257 1258 + /************************************************************************** 1259 + * 1260 + * Flush handling 1261 + * 1262 + **************************************************************************/ 1263 + 1264 + 1265 + static void falcon_poll_flush_events(struct efx_nic *efx) 1266 + { 1267 + struct efx_channel *channel = &efx->channel[0]; 1268 + struct efx_tx_queue *tx_queue; 1269 + struct efx_rx_queue *rx_queue; 1270 + unsigned int read_ptr, i; 1271 + 1272 + read_ptr = channel->eventq_read_ptr; 1273 + for (i = 0; i < FALCON_EVQ_SIZE; ++i) { 1274 + efx_qword_t *event = falcon_event(channel, read_ptr); 1275 + int ev_code, ev_sub_code, ev_queue; 1276 + bool ev_failed; 1277 + if (!falcon_event_present(event)) 1278 + break; 1279 + 1280 + ev_code = EFX_QWORD_FIELD(*event, EV_CODE); 1281 + if (ev_code != DRIVER_EV_DECODE) 1282 + continue; 1283 + 1284 + ev_sub_code = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_CODE); 1285 + switch (ev_sub_code) { 1286 + case TX_DESCQ_FLS_DONE_EV_DECODE: 1287 + ev_queue = EFX_QWORD_FIELD(*event, 1288 + DRIVER_EV_TX_DESCQ_ID); 1289 + if (ev_queue < EFX_TX_QUEUE_COUNT) { 1290 + tx_queue = efx->tx_queue + ev_queue; 1291 + tx_queue->flushed = true; 1292 + } 1293 + break; 1294 + case RX_DESCQ_FLS_DONE_EV_DECODE: 1295 + ev_queue = EFX_QWORD_FIELD(*event, 1296 + DRIVER_EV_RX_DESCQ_ID); 1297 + ev_failed = EFX_QWORD_FIELD(*event, 1298 + DRIVER_EV_RX_FLUSH_FAIL); 1299 + if (ev_queue < efx->n_rx_queues) { 1300 + rx_queue = efx->rx_queue + ev_queue; 1301 + 1302 + /* retry the rx flush */ 1303 + if (ev_failed) 1304 + falcon_flush_rx_queue(rx_queue); 1305 + else 1306 + rx_queue->flushed = true; 1307 + } 1308 + break; 1309 + } 1310 + 1311 + read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK; 1312 + } 1313 + } 1314 + 1315 + /* Handle tx and rx flushes at the same time, since they run in 1316 + * parallel in the hardware and there's no reason for us to 1317 + * serialise them */ 1318 + int falcon_flush_queues(struct efx_nic *efx) 1319 + { 1320 + struct efx_rx_queue *rx_queue; 1321 + struct efx_tx_queue *tx_queue; 1322 + int i; 1323 + bool outstanding; 1324 + 1325 + /* Issue flush requests */ 1326 + efx_for_each_tx_queue(tx_queue, efx) { 1327 + tx_queue->flushed = false; 1328 + falcon_flush_tx_queue(tx_queue); 1329 + } 1330 + efx_for_each_rx_queue(rx_queue, efx) { 1331 + rx_queue->flushed = false; 1332 + falcon_flush_rx_queue(rx_queue); 1333 + } 1334 + 1335 + /* Poll the evq looking for flush completions. Since we're not pushing 1336 + * any more rx or tx descriptors at this point, we're in no danger of 1337 + * overflowing the evq whilst we wait */ 1338 + for (i = 0; i < FALCON_FLUSH_POLL_COUNT; ++i) { 1339 + msleep(FALCON_FLUSH_INTERVAL); 1340 + falcon_poll_flush_events(efx); 1341 + 1342 + /* Check if every queue has been succesfully flushed */ 1343 + outstanding = false; 1344 + efx_for_each_tx_queue(tx_queue, efx) 1345 + outstanding |= !tx_queue->flushed; 1346 + efx_for_each_rx_queue(rx_queue, efx) 1347 + outstanding |= !rx_queue->flushed; 1348 + if (!outstanding) 1349 + return 0; 1350 + } 1351 + 1352 + /* Mark the queues as all flushed. We're going to return failure 1353 + * leading to a reset, or fake up success anyway. "flushed" now 1354 + * indicates that we tried to flush. */ 1355 + efx_for_each_tx_queue(tx_queue, efx) { 1356 + if (!tx_queue->flushed) 1357 + EFX_ERR(efx, "tx queue %d flush command timed out\n", 1358 + tx_queue->queue); 1359 + tx_queue->flushed = true; 1360 + } 1361 + efx_for_each_rx_queue(rx_queue, efx) { 1362 + if (!rx_queue->flushed) 1363 + EFX_ERR(efx, "rx queue %d flush command timed out\n", 1364 + rx_queue->queue); 1365 + rx_queue->flushed = true; 1366 + } 1367 + 1368 + if (EFX_WORKAROUND_7803(efx)) 1369 + return 0; 1370 + 1371 + return -ETIMEDOUT; 1372 + } 1154 1373 1155 1374 /************************************************************************** 1156 1375 * ··· 1374 1363 EFX_OWORD_FMT "\n", EFX_OWORD_VAL(reg)); 1375 1364 } 1376 1365 1377 - /* Disable DMA bus mastering on both devices */ 1366 + /* Disable both devices */ 1378 1367 pci_disable_device(efx->pci_dev); 1379 1368 if (FALCON_IS_DUAL_FUNC(efx)) 1380 1369 pci_disable_device(nic_data->pci_dev2); 1370 + falcon_disable_interrupts(efx); 1381 1371 1382 1372 if (++n_int_errors < FALCON_MAX_INT_ERRORS) { 1383 1373 EFX_ERR(efx, "SYSTEM ERROR - reset scheduled\n"); ··· 1605 1593 ************************************************************************** 1606 1594 */ 1607 1595 1608 - #define FALCON_SPI_MAX_LEN sizeof(efx_oword_t) 1596 + #define FALCON_SPI_MAX_LEN ((unsigned) sizeof(efx_oword_t)) 1609 1597 1610 1598 /* Wait for SPI command completion */ 1611 1599 static int falcon_spi_wait(struct efx_nic *efx) ··· 1954 1942 1955 1943 /* Wait for transfer to complete */ 1956 1944 for (i = 0; i < 400; i++) { 1957 - if (*(volatile u32 *)dma_done == FALCON_STATS_DONE) 1945 + if (*(volatile u32 *)dma_done == FALCON_STATS_DONE) { 1946 + rmb(); /* Ensure the stats are valid. */ 1958 1947 return 0; 1948 + } 1959 1949 udelay(10); 1960 1950 } 1961 1951 ··· 2772 2758 2773 2759 /* Allocate storage for hardware specific data */ 2774 2760 nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL); 2761 + if (!nic_data) 2762 + return -ENOMEM; 2775 2763 efx->nic_data = nic_data; 2776 2764 2777 2765 /* Determine number of ports etc. */
+1
drivers/net/sfc/falcon.h
··· 86 86 extern int falcon_probe_nic(struct efx_nic *efx); 87 87 extern int falcon_probe_resources(struct efx_nic *efx); 88 88 extern int falcon_init_nic(struct efx_nic *efx); 89 + extern int falcon_flush_queues(struct efx_nic *efx); 89 90 extern int falcon_reset_hw(struct efx_nic *efx, enum reset_type method); 90 91 extern void falcon_remove_resources(struct efx_nic *efx); 91 92 extern void falcon_remove_nic(struct efx_nic *efx);
-1
drivers/net/sfc/falcon_hwdefs.h
··· 117 117 #define SF_PRST_WIDTH 1 118 118 #define EE_PRST_LBN 8 119 119 #define EE_PRST_WIDTH 1 120 - /* See pic_mode_t for decoding of this field */ 121 120 /* These bit definitions are extrapolated from the list of numerical 122 121 * values for STRAP_PINS. 123 122 */
-1
drivers/net/sfc/falcon_io.h
··· 13 13 14 14 #include <linux/io.h> 15 15 #include <linux/spinlock.h> 16 - #include "net_driver.h" 17 16 18 17 /************************************************************************** 19 18 *
+1 -87
drivers/net/sfc/falcon_xmac.c
··· 78 78 falcon_write(efx, &txdrv, XX_TXDRV_CTL_REG); 79 79 } 80 80 81 - static void falcon_hold_xaui_in_rst(struct efx_nic *efx) 82 - { 83 - efx_oword_t reg; 84 - 85 - EFX_ZERO_OWORD(reg); 86 - EFX_SET_OWORD_FIELD(reg, XX_PWRDNA_EN, 1); 87 - EFX_SET_OWORD_FIELD(reg, XX_PWRDNB_EN, 1); 88 - EFX_SET_OWORD_FIELD(reg, XX_PWRDNC_EN, 1); 89 - EFX_SET_OWORD_FIELD(reg, XX_PWRDND_EN, 1); 90 - EFX_SET_OWORD_FIELD(reg, XX_RSTPLLAB_EN, 1); 91 - EFX_SET_OWORD_FIELD(reg, XX_RSTPLLCD_EN, 1); 92 - EFX_SET_OWORD_FIELD(reg, XX_RESETA_EN, 1); 93 - EFX_SET_OWORD_FIELD(reg, XX_RESETB_EN, 1); 94 - EFX_SET_OWORD_FIELD(reg, XX_RESETC_EN, 1); 95 - EFX_SET_OWORD_FIELD(reg, XX_RESETD_EN, 1); 96 - EFX_SET_OWORD_FIELD(reg, XX_RSTXGXSRX_EN, 1); 97 - EFX_SET_OWORD_FIELD(reg, XX_RSTXGXSTX_EN, 1); 98 - falcon_write(efx, &reg, XX_PWR_RST_REG); 99 - udelay(10); 100 - } 101 - 102 - static int _falcon_reset_xaui_a(struct efx_nic *efx) 103 - { 104 - efx_oword_t reg; 105 - 106 - falcon_hold_xaui_in_rst(efx); 107 - falcon_read(efx, &reg, XX_PWR_RST_REG); 108 - 109 - /* Follow the RAMBUS XAUI data reset sequencing 110 - * Channels A and B first: power down, reset PLL, reset, clear 111 - */ 112 - EFX_SET_OWORD_FIELD(reg, XX_PWRDNA_EN, 0); 113 - EFX_SET_OWORD_FIELD(reg, XX_PWRDNB_EN, 0); 114 - falcon_write(efx, &reg, XX_PWR_RST_REG); 115 - udelay(10); 116 - 117 - EFX_SET_OWORD_FIELD(reg, XX_RSTPLLAB_EN, 0); 118 - falcon_write(efx, &reg, XX_PWR_RST_REG); 119 - udelay(10); 120 - 121 - EFX_SET_OWORD_FIELD(reg, XX_RESETA_EN, 0); 122 - EFX_SET_OWORD_FIELD(reg, XX_RESETB_EN, 0); 123 - falcon_write(efx, &reg, XX_PWR_RST_REG); 124 - udelay(10); 125 - 126 - /* Channels C and D: power down, reset PLL, reset, clear */ 127 - EFX_SET_OWORD_FIELD(reg, XX_PWRDNC_EN, 0); 128 - EFX_SET_OWORD_FIELD(reg, XX_PWRDND_EN, 0); 129 - falcon_write(efx, &reg, XX_PWR_RST_REG); 130 - udelay(10); 131 - 132 - EFX_SET_OWORD_FIELD(reg, XX_RSTPLLCD_EN, 0); 133 - falcon_write(efx, &reg, XX_PWR_RST_REG); 134 - udelay(10); 135 - 136 - EFX_SET_OWORD_FIELD(reg, XX_RESETC_EN, 0); 137 - EFX_SET_OWORD_FIELD(reg, XX_RESETD_EN, 0); 138 - falcon_write(efx, &reg, XX_PWR_RST_REG); 139 - udelay(10); 140 - 141 - /* Setup XAUI */ 142 - falcon_setup_xaui(efx); 143 - udelay(10); 144 - 145 - /* Take XGXS out of reset */ 146 - EFX_ZERO_OWORD(reg); 147 - falcon_write(efx, &reg, XX_PWR_RST_REG); 148 - udelay(10); 149 - 150 - return 0; 151 - } 152 - 153 - static int _falcon_reset_xaui_b(struct efx_nic *efx) 81 + int falcon_reset_xaui(struct efx_nic *efx) 154 82 { 155 83 efx_oword_t reg; 156 84 int count; ··· 97 169 } 98 170 EFX_ERR(efx, "timed out waiting for XAUI/XGXS reset\n"); 99 171 return -ETIMEDOUT; 100 - } 101 - 102 - int falcon_reset_xaui(struct efx_nic *efx) 103 - { 104 - int rc; 105 - 106 - if (EFX_WORKAROUND_9388(efx)) { 107 - falcon_hold_xaui_in_rst(efx); 108 - efx->phy_op->reset_xaui(efx); 109 - rc = _falcon_reset_xaui_a(efx); 110 - } else { 111 - rc = _falcon_reset_xaui_b(efx); 112 - } 113 - return rc; 114 172 } 115 173 116 174 static bool falcon_xgmii_status(struct efx_nic *efx)
+5 -3
drivers/net/sfc/net_driver.h
··· 160 160 * @channel: The associated channel 161 161 * @buffer: The software buffer ring 162 162 * @txd: The hardware descriptor ring 163 + * @flushed: Used when handling queue flushing 163 164 * @read_count: Current read pointer. 164 165 * This is the number of buffers that have been removed from both rings. 165 166 * @stopped: Stopped count. ··· 193 192 struct efx_nic *nic; 194 193 struct efx_tx_buffer *buffer; 195 194 struct efx_special_buffer txd; 195 + bool flushed; 196 196 197 197 /* Members used mainly on the completion path */ 198 198 unsigned int read_count ____cacheline_aligned_in_smp; ··· 262 260 * the remaining space in the allocation. 263 261 * @buf_dma_addr: Page's DMA address. 264 262 * @buf_data: Page's host address. 263 + * @flushed: Use when handling queue flushing 265 264 */ 266 265 struct efx_rx_queue { 267 266 struct efx_nic *efx; ··· 288 285 struct page *buf_page; 289 286 dma_addr_t buf_dma_addr; 290 287 char *buf_data; 288 + bool flushed; 291 289 }; 292 290 293 291 /** ··· 474 470 * This is the equivalent of NET_IP_ALIGN [which controls the alignment 475 471 * of the skb->head for hardware DMA]. 476 472 */ 477 - #if defined(__i386__) || defined(__x86_64__) 473 + #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 478 474 #define EFX_PAGE_IP_ALIGN 0 479 475 #else 480 476 #define EFX_PAGE_IP_ALIGN NET_IP_ALIGN ··· 507 503 * @clear_interrupt: Clear down interrupt 508 504 * @blink: Blink LEDs 509 505 * @check_hw: Check hardware 510 - * @reset_xaui: Reset XAUI side of PHY for (software sequenced reset) 511 506 * @mmds: MMD presence mask 512 507 * @loopbacks: Supported loopback modes mask 513 508 */ ··· 516 513 void (*reconfigure) (struct efx_nic *efx); 517 514 void (*clear_interrupt) (struct efx_nic *efx); 518 515 int (*check_hw) (struct efx_nic *efx); 519 - void (*reset_xaui) (struct efx_nic *efx); 520 516 int (*test) (struct efx_nic *efx); 521 517 int mmds; 522 518 unsigned loopbacks;
-12
drivers/net/sfc/sfe4001.c
··· 129 129 unsigned int i, j; 130 130 int rc; 131 131 u8 out; 132 - efx_oword_t reg; 133 - 134 - /* Ensure that XGXS and XAUI SerDes are held in reset */ 135 - EFX_POPULATE_OWORD_7(reg, XX_PWRDNA_EN, 1, 136 - XX_PWRDNB_EN, 1, 137 - XX_RSTPLLAB_EN, 1, 138 - XX_RESETA_EN, 1, 139 - XX_RESETB_EN, 1, 140 - XX_RSTXGXSRX_EN, 1, 141 - XX_RSTXGXSTX_EN, 1); 142 - falcon_write(efx, &reg, XX_PWR_RST_REG); 143 - udelay(10); 144 132 145 133 /* Clear any previous over-temperature alert */ 146 134 rc = i2c_smbus_read_byte_data(hwmon_client, RSL);
+11 -58
drivers/net/sfc/tenxpress.c
··· 146 146 return 0; 147 147 } 148 148 149 - static void tenxpress_reset_xaui(struct efx_nic *efx); 150 - 151 149 static int tenxpress_init(struct efx_nic *efx) 152 150 { 153 151 int rc, reg; ··· 214 216 { 215 217 int rc, reg; 216 218 217 - EFX_TRACE(efx, "%s\n", __func__); 219 + /* The XGMAC clock is driven from the SFC7101/SFT9001 312MHz clock, so 220 + * a special software reset can glitch the XGMAC sufficiently for stats 221 + * requests to fail. Since we don't ofen special_reset, just lock. */ 222 + spin_lock(&efx->stats_lock); 218 223 219 224 /* Initiate reset */ 220 225 reg = mdio_clause45_read(efx, efx->mii.phy_id, ··· 226 225 mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD, 227 226 PMA_PMD_EXT_CTRL_REG, reg); 228 227 229 - msleep(200); 228 + mdelay(200); 230 229 231 230 /* Wait for the blocks to come out of reset */ 232 231 rc = mdio_clause45_wait_reset_mmds(efx, 233 232 TENXPRESS_REQUIRED_DEVS); 234 233 if (rc < 0) 235 - return rc; 234 + goto unlock; 236 235 237 236 /* Try and reconfigure the device */ 238 237 rc = tenxpress_init(efx); 239 238 if (rc < 0) 240 - return rc; 239 + goto unlock; 241 240 242 - return 0; 241 + unlock: 242 + spin_unlock(&efx->stats_lock); 243 + return rc; 243 244 } 244 245 245 246 static void tenxpress_set_bad_lp(struct efx_nic *efx, bool bad_lp) ··· 377 374 struct tenxpress_phy_data *phy_data = efx->phy_data; 378 375 bool link_ok; 379 376 380 - link_ok = (phy_data->phy_mode == PHY_MODE_NORMAL && 381 - tenxpress_link_ok(efx, true)); 377 + link_ok = tenxpress_link_ok(efx, true); 382 378 383 379 if (link_ok != efx->link_up) 384 380 falcon_xmac_sim_phy_event(efx); ··· 430 428 PMA_PMD_LED_OVERR_REG, reg); 431 429 } 432 430 433 - static void tenxpress_reset_xaui(struct efx_nic *efx) 434 - { 435 - int phy = efx->mii.phy_id; 436 - int clk_ctrl, test_select, soft_rst2; 437 - 438 - /* Real work is done on clock_ctrl other resets are thought to be 439 - * optional but make the reset more reliable 440 - */ 441 - 442 - /* Read */ 443 - clk_ctrl = mdio_clause45_read(efx, phy, MDIO_MMD_PCS, 444 - PCS_CLOCK_CTRL_REG); 445 - test_select = mdio_clause45_read(efx, phy, MDIO_MMD_PCS, 446 - PCS_TEST_SELECT_REG); 447 - soft_rst2 = mdio_clause45_read(efx, phy, MDIO_MMD_PCS, 448 - PCS_SOFT_RST2_REG); 449 - 450 - /* Put in reset */ 451 - test_select &= ~(1 << CLK312_EN_LBN); 452 - mdio_clause45_write(efx, phy, MDIO_MMD_PCS, 453 - PCS_TEST_SELECT_REG, test_select); 454 - 455 - soft_rst2 &= ~((1 << XGXS_RST_N_LBN) | (1 << SERDES_RST_N_LBN)); 456 - mdio_clause45_write(efx, phy, MDIO_MMD_PCS, 457 - PCS_SOFT_RST2_REG, soft_rst2); 458 - 459 - clk_ctrl &= ~(1 << PLL312_RST_N_LBN); 460 - mdio_clause45_write(efx, phy, MDIO_MMD_PCS, 461 - PCS_CLOCK_CTRL_REG, clk_ctrl); 462 - udelay(10); 463 - 464 - /* Remove reset */ 465 - clk_ctrl |= (1 << PLL312_RST_N_LBN); 466 - mdio_clause45_write(efx, phy, MDIO_MMD_PCS, 467 - PCS_CLOCK_CTRL_REG, clk_ctrl); 468 - udelay(10); 469 - 470 - soft_rst2 |= ((1 << XGXS_RST_N_LBN) | (1 << SERDES_RST_N_LBN)); 471 - mdio_clause45_write(efx, phy, MDIO_MMD_PCS, 472 - PCS_SOFT_RST2_REG, soft_rst2); 473 - udelay(10); 474 - 475 - test_select |= (1 << CLK312_EN_LBN); 476 - mdio_clause45_write(efx, phy, MDIO_MMD_PCS, 477 - PCS_TEST_SELECT_REG, test_select); 478 - udelay(10); 479 - } 480 - 481 431 static int tenxpress_phy_test(struct efx_nic *efx) 482 432 { 483 433 /* BIST is automatically run after a special software reset */ ··· 442 488 .check_hw = tenxpress_phy_check_hw, 443 489 .fini = tenxpress_phy_fini, 444 490 .clear_interrupt = tenxpress_phy_clear_interrupt, 445 - .reset_xaui = tenxpress_reset_xaui, 446 491 .test = tenxpress_phy_test, 447 492 .mmds = TENXPRESS_REQUIRED_DEVS, 448 493 .loopbacks = TENXPRESS_LOOPBACKS,
+1 -1
drivers/net/sfc/tx.c
··· 516 516 /* Number of bytes inserted at the start of a TSO header buffer, 517 517 * similar to NET_IP_ALIGN. 518 518 */ 519 - #if defined(__i386__) || defined(__x86_64__) 519 + #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 520 520 #define TSOH_OFFSET 0 521 521 #else 522 522 #define TSOH_OFFSET NET_IP_ALIGN
-2
drivers/net/sfc/workarounds.h
··· 24 24 #define EFX_WORKAROUND_7575 EFX_WORKAROUND_ALWAYS 25 25 /* TX pkt parser problem with <= 16 byte TXes */ 26 26 #define EFX_WORKAROUND_9141 EFX_WORKAROUND_ALWAYS 27 - /* XGXS and XAUI reset sequencing in SW */ 28 - #define EFX_WORKAROUND_9388 EFX_WORKAROUND_ALWAYS 29 27 /* Low rate CRC errors require XAUI reset */ 30 28 #define EFX_WORKAROUND_10750 EFX_WORKAROUND_ALWAYS 31 29 /* TX_EV_PKT_ERR can be caused by a dangling TX descriptor
-1
drivers/net/sfc/xfp_phy.c
··· 165 165 .check_hw = xfp_phy_check_hw, 166 166 .fini = xfp_phy_fini, 167 167 .clear_interrupt = xfp_phy_clear_interrupt, 168 - .reset_xaui = efx_port_dummy_op_void, 169 168 .mmds = XFP_REQUIRED_DEVS, 170 169 .loopbacks = XFP_LOOPBACKS, 171 170 };
+11 -18
drivers/net/skfp/pmf.c
··· 44 44 int set, int local); 45 45 static int port_to_mib(struct s_smc *smc, int p); 46 46 47 - #define MOFFSS(e) ((int)&(((struct fddi_mib *)0)->e)) 48 - #define MOFFSA(e) ((int) (((struct fddi_mib *)0)->e)) 49 - 50 - #define MOFFMS(e) ((int)&(((struct fddi_mib_m *)0)->e)) 51 - #define MOFFMA(e) ((int) (((struct fddi_mib_m *)0)->e)) 52 - 53 - #define MOFFAS(e) ((int)&(((struct fddi_mib_a *)0)->e)) 54 - #define MOFFAA(e) ((int) (((struct fddi_mib_a *)0)->e)) 55 - 56 - #define MOFFPS(e) ((int)&(((struct fddi_mib_p *)0)->e)) 57 - #define MOFFPA(e) ((int) (((struct fddi_mib_p *)0)->e)) 47 + #define MOFFSS(e) offsetof(struct fddi_mib, e) 48 + #define MOFFMS(e) offsetof(struct fddi_mib_m, e) 49 + #define MOFFAS(e) offsetof(struct fddi_mib_a, e) 50 + #define MOFFPS(e) offsetof(struct fddi_mib_p, e) 58 51 59 52 60 53 #define AC_G 0x01 /* Get */ ··· 80 87 { SMT_P100D,AC_G, MOFFSS(fddiSMTOpVersionId), "S" } , 81 88 { SMT_P100E,AC_G, MOFFSS(fddiSMTHiVersionId), "S" } , 82 89 { SMT_P100F,AC_G, MOFFSS(fddiSMTLoVersionId), "S" } , 83 - { SMT_P1010,AC_G, MOFFSA(fddiSMTManufacturerData), "D" } , 84 - { SMT_P1011,AC_GR, MOFFSA(fddiSMTUserData), "D" } , 90 + { SMT_P1010,AC_G, MOFFSS(fddiSMTManufacturerData), "D" } , 91 + { SMT_P1011,AC_GR, MOFFSS(fddiSMTUserData), "D" } , 85 92 { SMT_P1012,AC_G, MOFFSS(fddiSMTMIBVersionId), "S" } , 86 93 87 94 /* StationConfigGrp */ ··· 96 103 { SMT_P101D,AC_GR, MOFFSS(fddiSMTTT_Notify), "wS" } , 97 104 { SMT_P101E,AC_GR, MOFFSS(fddiSMTStatRptPolicy), "bB" } , 98 105 { SMT_P101F,AC_GR, MOFFSS(fddiSMTTrace_MaxExpiration),"lL" } , 99 - { SMT_P1020,AC_G, MOFFSA(fddiSMTPORTIndexes), "II" } , 106 + { SMT_P1020,AC_G, MOFFSS(fddiSMTPORTIndexes), "II" } , 100 107 { SMT_P1021,AC_G, MOFFSS(fddiSMTMACIndexes), "I" } , 101 108 { SMT_P1022,AC_G, MOFFSS(fddiSMTBypassPresent), "F" } , 102 109 ··· 110 117 111 118 /* MIBOperationGrp */ 112 119 { SMT_P1032,AC_GROUP } , 113 - { SMT_P1033,AC_G, MOFFSA(fddiSMTTimeStamp),"P" } , 114 - { SMT_P1034,AC_G, MOFFSA(fddiSMTTransitionTimeStamp),"P" } , 120 + { SMT_P1033,AC_G, MOFFSS(fddiSMTTimeStamp),"P" } , 121 + { SMT_P1034,AC_G, MOFFSS(fddiSMTTransitionTimeStamp),"P" } , 115 122 /* NOTE : SMT_P1035 is already swapped ! SMT_P_SETCOUNT */ 116 123 { SMT_P1035,AC_G, MOFFSS(fddiSMTSetCount),"4P" } , 117 124 { SMT_P1036,AC_G, MOFFSS(fddiSMTLastSetStationId),"8" } , ··· 122 129 * PRIVATE EXTENSIONS 123 130 * only accessible locally to get/set passwd 124 131 */ 125 - { SMT_P10F0,AC_GR, MOFFSA(fddiPRPMFPasswd), "8" } , 132 + { SMT_P10F0,AC_GR, MOFFSS(fddiPRPMFPasswd), "8" } , 126 133 { SMT_P10F1,AC_GR, MOFFSS(fddiPRPMFStation), "8" } , 127 134 #ifdef ESS 128 135 { SMT_P10F2,AC_GR, MOFFSS(fddiESSPayload), "lL" } , ··· 238 245 { SMT_P400E,AC_GR, MOFFPS(fddiPORTConnectionPolicies),"bB" } , 239 246 { SMT_P400F,AC_G, MOFFPS(fddiPORTMacIndicated), "2" } , 240 247 { SMT_P4010,AC_G, MOFFPS(fddiPORTCurrentPath), "E" } , 241 - { SMT_P4011,AC_GR, MOFFPA(fddiPORTRequestedPaths), "l4" } , 248 + { SMT_P4011,AC_GR, MOFFPS(fddiPORTRequestedPaths), "l4" } , 242 249 { SMT_P4012,AC_G, MOFFPS(fddiPORTMACPlacement), "S" } , 243 250 { SMT_P4013,AC_G, MOFFPS(fddiPORTAvailablePaths), "B" } , 244 251 { SMT_P4016,AC_G, MOFFPS(fddiPORTPMDClass), "E" } ,
+33 -35
drivers/net/smc911x.c
··· 183 183 unsigned int reg, timeout=0, resets=1; 184 184 unsigned long flags; 185 185 186 - DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); 186 + DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__); 187 187 188 188 /* Take out of PM setting first */ 189 189 if ((SMC_GET_PMT_CTRL(lp) & PMT_CTRL_READY_) == 0) { ··· 272 272 unsigned mask, cfg, cr; 273 273 unsigned long flags; 274 274 275 - DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); 275 + DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__); 276 276 277 277 SMC_SET_MAC_ADDR(lp, dev->dev_addr); 278 278 ··· 329 329 unsigned cr; 330 330 unsigned long flags; 331 331 332 - DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", CARDNAME, __FUNCTION__); 332 + DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", CARDNAME, __func__); 333 333 334 334 /* Disable IRQ's */ 335 335 SMC_SET_INT_EN(lp, 0); ··· 348 348 struct smc911x_local *lp = netdev_priv(dev); 349 349 unsigned int fifo_count, timeout, reg; 350 350 351 - DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, "%s: --> %s\n", CARDNAME, __FUNCTION__); 351 + DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, "%s: --> %s\n", CARDNAME, __func__); 352 352 fifo_count = SMC_GET_RX_FIFO_INF(lp) & 0xFFFF; 353 353 if (fifo_count <= 4) { 354 354 /* Manually dump the packet data */ ··· 382 382 unsigned char *data; 383 383 384 384 DBG(SMC_DEBUG_FUNC | SMC_DEBUG_RX, "%s: --> %s\n", 385 - dev->name, __FUNCTION__); 385 + dev->name, __func__); 386 386 status = SMC_GET_RX_STS_FIFO(lp); 387 387 DBG(SMC_DEBUG_RX, "%s: Rx pkt len %d status 0x%08x \n", 388 388 dev->name, (status & 0x3fff0000) >> 16, status & 0xc000ffff); ··· 460 460 unsigned char *buf; 461 461 unsigned long flags; 462 462 463 - DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n", dev->name, __FUNCTION__); 463 + DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n", dev->name, __func__); 464 464 BUG_ON(lp->pending_tx_skb == NULL); 465 465 466 466 skb = lp->pending_tx_skb; ··· 524 524 unsigned long flags; 525 525 526 526 DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n", 527 - dev->name, __FUNCTION__); 527 + dev->name, __func__); 528 528 529 529 BUG_ON(lp->pending_tx_skb != NULL); 530 530 ··· 596 596 unsigned int tx_status; 597 597 598 598 DBG(SMC_DEBUG_FUNC | SMC_DEBUG_TX, "%s: --> %s\n", 599 - dev->name, __FUNCTION__); 599 + dev->name, __func__); 600 600 601 601 /* Collect the TX status */ 602 602 while (((SMC_GET_TX_FIFO_INF(lp) & TX_FIFO_INF_TSUSED_) >> 16) != 0) { ··· 647 647 SMC_GET_MII(lp, phyreg, phyaddr, phydata); 648 648 649 649 DBG(SMC_DEBUG_MISC, "%s: phyaddr=0x%x, phyreg=0x%02x, phydata=0x%04x\n", 650 - __FUNCTION__, phyaddr, phyreg, phydata); 650 + __func__, phyaddr, phyreg, phydata); 651 651 return phydata; 652 652 } 653 653 ··· 661 661 struct smc911x_local *lp = netdev_priv(dev); 662 662 663 663 DBG(SMC_DEBUG_MISC, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n", 664 - __FUNCTION__, phyaddr, phyreg, phydata); 664 + __func__, phyaddr, phyreg, phydata); 665 665 666 666 SMC_SET_MII(lp, phyreg, phyaddr, phydata); 667 667 } ··· 676 676 int phyaddr; 677 677 unsigned int cfg, id1, id2; 678 678 679 - DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); 679 + DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__); 680 680 681 681 lp->phy_type = 0; 682 682 ··· 746 746 int phyaddr = lp->mii.phy_id; 747 747 int bmcr; 748 748 749 - DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); 749 + DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__); 750 750 751 751 /* Enter Link Disable state */ 752 752 SMC_GET_PHY_BMCR(lp, phyaddr, bmcr); ··· 793 793 unsigned long flags; 794 794 unsigned int reg; 795 795 796 - DBG(SMC_DEBUG_FUNC, "%s: --> %s()\n", dev->name, __FUNCTION__); 796 + DBG(SMC_DEBUG_FUNC, "%s: --> %s()\n", dev->name, __func__); 797 797 798 798 spin_lock_irqsave(&lp->lock, flags); 799 799 reg = SMC_GET_PMT_CTRL(lp); ··· 852 852 int phyaddr = lp->mii.phy_id; 853 853 unsigned int bmcr, cr; 854 854 855 - DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); 855 + DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__); 856 856 857 857 if (mii_check_media(&lp->mii, netif_msg_link(lp), init)) { 858 858 /* duplex state has changed */ ··· 892 892 int status; 893 893 unsigned long flags; 894 894 895 - DBG(SMC_DEBUG_FUNC, "%s: --> %s()\n", dev->name, __FUNCTION__); 895 + DBG(SMC_DEBUG_FUNC, "%s: --> %s()\n", dev->name, __func__); 896 896 897 897 /* 898 898 * We should not be called if phy_type is zero. ··· 985 985 int phyaddr = lp->mii.phy_id; 986 986 int status; 987 987 988 - DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); 988 + DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__); 989 989 990 990 if (lp->phy_type == 0) 991 991 return; ··· 1013 1013 unsigned int rx_overrun=0, cr, pkts; 1014 1014 unsigned long flags; 1015 1015 1016 - DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); 1016 + DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__); 1017 1017 1018 1018 spin_lock_irqsave(&lp->lock, flags); 1019 1019 ··· 1174 1174 1175 1175 spin_unlock_irqrestore(&lp->lock, flags); 1176 1176 1177 - DBG(3, "%s: Interrupt done (%d loops)\n", dev->name, 8-timeout); 1178 - 1179 1177 return IRQ_HANDLED; 1180 1178 } 1181 1179 ··· 1186 1188 struct sk_buff *skb = lp->current_tx_skb; 1187 1189 unsigned long flags; 1188 1190 1189 - DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); 1191 + DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__); 1190 1192 1191 1193 DBG(SMC_DEBUG_TX | SMC_DEBUG_DMA, "%s: TX DMA irq handler\n", dev->name); 1192 1194 /* Clear the DMA interrupt sources */ ··· 1222 1224 unsigned long flags; 1223 1225 unsigned int pkts; 1224 1226 1225 - DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); 1227 + DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__); 1226 1228 DBG(SMC_DEBUG_RX | SMC_DEBUG_DMA, "%s: RX DMA irq handler\n", dev->name); 1227 1229 /* Clear the DMA interrupt sources */ 1228 1230 SMC_DMA_ACK_IRQ(dev, dma); ··· 1270 1272 int status, mask; 1271 1273 unsigned long flags; 1272 1274 1273 - DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); 1275 + DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__); 1274 1276 1275 1277 spin_lock_irqsave(&lp->lock, flags); 1276 1278 status = SMC_GET_INT(lp); ··· 1308 1310 unsigned int mcr, update_multicast = 0; 1309 1311 unsigned long flags; 1310 1312 1311 - DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); 1313 + DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__); 1312 1314 1313 1315 spin_lock_irqsave(&lp->lock, flags); 1314 1316 SMC_GET_MAC_CR(lp, mcr); ··· 1410 1412 { 1411 1413 struct smc911x_local *lp = netdev_priv(dev); 1412 1414 1413 - DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); 1415 + DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__); 1414 1416 1415 1417 /* 1416 1418 * Check that the address is valid. If its not, refuse ··· 1418 1420 * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx 1419 1421 */ 1420 1422 if (!is_valid_ether_addr(dev->dev_addr)) { 1421 - PRINTK("%s: no valid ethernet hw addr\n", __FUNCTION__); 1423 + PRINTK("%s: no valid ethernet hw addr\n", __func__); 1422 1424 return -EINVAL; 1423 1425 } 1424 1426 ··· 1447 1449 { 1448 1450 struct smc911x_local *lp = netdev_priv(dev); 1449 1451 1450 - DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); 1452 + DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__); 1451 1453 1452 1454 netif_stop_queue(dev); 1453 1455 netif_carrier_off(dev); ··· 1481 1483 int ret, status; 1482 1484 unsigned long flags; 1483 1485 1484 - DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); 1486 + DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__); 1485 1487 cmd->maxtxpkt = 1; 1486 1488 cmd->maxrxpkt = 1; 1487 1489 ··· 1619 1621 for(timeout=10;(e2p_cmd & E2P_CMD_EPC_BUSY_) && timeout; timeout--) { 1620 1622 if (e2p_cmd & E2P_CMD_EPC_TIMEOUT_) { 1621 1623 PRINTK("%s: %s timeout waiting for EEPROM to respond\n", 1622 - dev->name, __FUNCTION__); 1624 + dev->name, __func__); 1623 1625 return -EFAULT; 1624 1626 } 1625 1627 mdelay(1); ··· 1627 1629 } 1628 1630 if (timeout == 0) { 1629 1631 PRINTK("%s: %s timeout waiting for EEPROM CMD not busy\n", 1630 - dev->name, __FUNCTION__); 1632 + dev->name, __func__); 1631 1633 return -ETIMEDOUT; 1632 1634 } 1633 1635 return 0; ··· 1740 1742 int timeout = 20; 1741 1743 unsigned long cookie; 1742 1744 1743 - DBG(SMC_DEBUG_FUNC, "--> %s\n", __FUNCTION__); 1745 + DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__); 1744 1746 1745 1747 cookie = probe_irq_on(); 1746 1748 ··· 1806 1808 const char *version_string; 1807 1809 unsigned long irq_flags; 1808 1810 1809 - DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __FUNCTION__); 1811 + DBG(SMC_DEBUG_FUNC, "%s: --> %s\n", dev->name, __func__); 1810 1812 1811 1813 /* First, see if the endian word is recognized */ 1812 1814 val = SMC_GET_BYTE_TEST(lp); ··· 2056 2058 unsigned int *addr; 2057 2059 int ret; 2058 2060 2059 - DBG(SMC_DEBUG_FUNC, "--> %s\n", __FUNCTION__); 2061 + DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__); 2060 2062 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2061 2063 if (!res) { 2062 2064 ret = -ENODEV; ··· 2127 2129 struct smc911x_local *lp = netdev_priv(ndev); 2128 2130 struct resource *res; 2129 2131 2130 - DBG(SMC_DEBUG_FUNC, "--> %s\n", __FUNCTION__); 2132 + DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__); 2131 2133 platform_set_drvdata(pdev, NULL); 2132 2134 2133 2135 unregister_netdev(ndev); ··· 2157 2159 struct net_device *ndev = platform_get_drvdata(dev); 2158 2160 struct smc911x_local *lp = netdev_priv(ndev); 2159 2161 2160 - DBG(SMC_DEBUG_FUNC, "--> %s\n", __FUNCTION__); 2162 + DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__); 2161 2163 if (ndev) { 2162 2164 if (netif_running(ndev)) { 2163 2165 netif_device_detach(ndev); ··· 2175 2177 { 2176 2178 struct net_device *ndev = platform_get_drvdata(dev); 2177 2179 2178 - DBG(SMC_DEBUG_FUNC, "--> %s\n", __FUNCTION__); 2180 + DBG(SMC_DEBUG_FUNC, "--> %s\n", __func__); 2179 2181 if (ndev) { 2180 2182 struct smc911x_local *lp = netdev_priv(ndev); 2181 2183
+22 -21
drivers/net/smc91x.c
··· 270 270 unsigned int ctl, cfg; 271 271 struct sk_buff *pending_skb; 272 272 273 - DBG(2, "%s: %s\n", dev->name, __FUNCTION__); 273 + DBG(2, "%s: %s\n", dev->name, __func__); 274 274 275 275 /* Disable all interrupts, block TX tasklet */ 276 276 spin_lock_irq(&lp->lock); ··· 363 363 void __iomem *ioaddr = lp->base; 364 364 int mask; 365 365 366 - DBG(2, "%s: %s\n", dev->name, __FUNCTION__); 366 + DBG(2, "%s: %s\n", dev->name, __func__); 367 367 368 368 /* see the header file for options in TCR/RCR DEFAULT */ 369 369 SMC_SELECT_BANK(lp, 0); ··· 397 397 void __iomem *ioaddr = lp->base; 398 398 struct sk_buff *pending_skb; 399 399 400 - DBG(2, "%s: %s\n", CARDNAME, __FUNCTION__); 400 + DBG(2, "%s: %s\n", CARDNAME, __func__); 401 401 402 402 /* no more interrupts for me */ 403 403 spin_lock_irq(&lp->lock); ··· 430 430 void __iomem *ioaddr = lp->base; 431 431 unsigned int packet_number, status, packet_len; 432 432 433 - DBG(3, "%s: %s\n", dev->name, __FUNCTION__); 433 + DBG(3, "%s: %s\n", dev->name, __func__); 434 434 435 435 packet_number = SMC_GET_RXFIFO(lp); 436 436 if (unlikely(packet_number & RXFIFO_REMPTY)) { ··· 577 577 unsigned int packet_no, len; 578 578 unsigned char *buf; 579 579 580 - DBG(3, "%s: %s\n", dev->name, __FUNCTION__); 580 + DBG(3, "%s: %s\n", dev->name, __func__); 581 581 582 582 if (!smc_special_trylock(&lp->lock)) { 583 583 netif_stop_queue(dev); ··· 662 662 void __iomem *ioaddr = lp->base; 663 663 unsigned int numPages, poll_count, status; 664 664 665 - DBG(3, "%s: %s\n", dev->name, __FUNCTION__); 665 + DBG(3, "%s: %s\n", dev->name, __func__); 666 666 667 667 BUG_ON(lp->pending_tx_skb != NULL); 668 668 ··· 734 734 void __iomem *ioaddr = lp->base; 735 735 unsigned int saved_packet, packet_no, tx_status, pkt_len; 736 736 737 - DBG(3, "%s: %s\n", dev->name, __FUNCTION__); 737 + DBG(3, "%s: %s\n", dev->name, __func__); 738 738 739 739 /* If the TX FIFO is empty then nothing to do */ 740 740 packet_no = SMC_GET_TXFIFO(lp); ··· 856 856 SMC_SET_MII(lp, SMC_GET_MII(lp) & ~(MII_MCLK|MII_MDOE|MII_MDO)); 857 857 858 858 DBG(3, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n", 859 - __FUNCTION__, phyaddr, phyreg, phydata); 859 + __func__, phyaddr, phyreg, phydata); 860 860 861 861 SMC_SELECT_BANK(lp, 2); 862 862 return phydata; ··· 883 883 SMC_SET_MII(lp, SMC_GET_MII(lp) & ~(MII_MCLK|MII_MDOE|MII_MDO)); 884 884 885 885 DBG(3, "%s: phyaddr=0x%x, phyreg=0x%x, phydata=0x%x\n", 886 - __FUNCTION__, phyaddr, phyreg, phydata); 886 + __func__, phyaddr, phyreg, phydata); 887 887 888 888 SMC_SELECT_BANK(lp, 2); 889 889 } ··· 896 896 struct smc_local *lp = netdev_priv(dev); 897 897 int phyaddr; 898 898 899 - DBG(2, "%s: %s\n", dev->name, __FUNCTION__); 899 + DBG(2, "%s: %s\n", dev->name, __func__); 900 900 901 901 lp->phy_type = 0; 902 902 ··· 935 935 int phyaddr = lp->mii.phy_id; 936 936 int bmcr, cfg1; 937 937 938 - DBG(3, "%s: %s\n", dev->name, __FUNCTION__); 938 + DBG(3, "%s: %s\n", dev->name, __func__); 939 939 940 940 /* Enter Link Disable state */ 941 941 cfg1 = smc_phy_read(dev, phyaddr, PHY_CFG1_REG); ··· 1168 1168 int phyaddr = lp->mii.phy_id; 1169 1169 int phy18; 1170 1170 1171 - DBG(2, "%s: %s\n", dev->name, __FUNCTION__); 1171 + DBG(2, "%s: %s\n", dev->name, __func__); 1172 1172 1173 1173 if (lp->phy_type == 0) 1174 1174 return; ··· 1236 1236 int status, mask, timeout, card_stats; 1237 1237 int saved_pointer; 1238 1238 1239 - DBG(3, "%s: %s\n", dev->name, __FUNCTION__); 1239 + DBG(3, "%s: %s\n", dev->name, __func__); 1240 1240 1241 1241 spin_lock(&lp->lock); 1242 1242 ··· 1358 1358 void __iomem *ioaddr = lp->base; 1359 1359 int status, mask, eph_st, meminfo, fifo; 1360 1360 1361 - DBG(2, "%s: %s\n", dev->name, __FUNCTION__); 1361 + DBG(2, "%s: %s\n", dev->name, __func__); 1362 1362 1363 1363 spin_lock_irq(&lp->lock); 1364 1364 status = SMC_GET_INT(lp); ··· 1402 1402 unsigned char multicast_table[8]; 1403 1403 int update_multicast = 0; 1404 1404 1405 - DBG(2, "%s: %s\n", dev->name, __FUNCTION__); 1405 + DBG(2, "%s: %s\n", dev->name, __func__); 1406 1406 1407 1407 if (dev->flags & IFF_PROMISC) { 1408 1408 DBG(2, "%s: RCR_PRMS\n", dev->name); ··· 1505 1505 { 1506 1506 struct smc_local *lp = netdev_priv(dev); 1507 1507 1508 - DBG(2, "%s: %s\n", dev->name, __FUNCTION__); 1508 + DBG(2, "%s: %s\n", dev->name, __func__); 1509 1509 1510 1510 /* 1511 1511 * Check that the address is valid. If its not, refuse ··· 1513 1513 * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx 1514 1514 */ 1515 1515 if (!is_valid_ether_addr(dev->dev_addr)) { 1516 - PRINTK("%s: no valid ethernet hw addr\n", __FUNCTION__); 1516 + PRINTK("%s: no valid ethernet hw addr\n", __func__); 1517 1517 return -EINVAL; 1518 1518 } 1519 1519 ··· 1557 1557 { 1558 1558 struct smc_local *lp = netdev_priv(dev); 1559 1559 1560 - DBG(2, "%s: %s\n", dev->name, __FUNCTION__); 1560 + DBG(2, "%s: %s\n", dev->name, __func__); 1561 1561 1562 1562 netif_stop_queue(dev); 1563 1563 netif_carrier_off(dev); ··· 1700 1700 int timeout = 20; 1701 1701 unsigned long cookie; 1702 1702 1703 - DBG(2, "%s: %s\n", CARDNAME, __FUNCTION__); 1703 + DBG(2, "%s: %s\n", CARDNAME, __func__); 1704 1704 1705 1705 cookie = probe_irq_on(); 1706 1706 ··· 1778 1778 const char *version_string; 1779 1779 DECLARE_MAC_BUF(mac); 1780 1780 1781 - DBG(2, "%s: %s\n", CARDNAME, __FUNCTION__); 1781 + DBG(2, "%s: %s\n", CARDNAME, __func__); 1782 1782 1783 1783 /* First, see if the high byte is 0x33 */ 1784 1784 val = SMC_CURRENT_BANK(lp); ··· 1961 1961 if (dev->dma != (unsigned char)-1) 1962 1962 printk(" DMA %d", dev->dma); 1963 1963 1964 - printk("%s%s\n", nowait ? " [nowait]" : "", 1964 + printk("%s%s\n", 1965 + lp->cfg.flags & SMC91X_NOWAIT ? " [nowait]" : "", 1965 1966 THROTTLE_TX_PKTS ? " [throttle_tx]" : ""); 1966 1967 1967 1968 if (!is_valid_ether_addr(dev->dev_addr)) {
+2
drivers/net/smc91x.h
··· 446 446 #define SMC_CAN_USE_32BIT 1 447 447 #define SMC_NOWAIT 1 448 448 449 + #define SMC_IO_SHIFT (lp->io_shift) 450 + 449 451 #define SMC_inb(a, r) readb((a) + (r)) 450 452 #define SMC_inw(a, r) readw((a) + (r)) 451 453 #define SMC_inl(a, r) readl((a) + (r))
+62 -33
drivers/net/sundance.c
··· 409 409 static int eeprom_read(void __iomem *ioaddr, int location); 410 410 static int mdio_read(struct net_device *dev, int phy_id, int location); 411 411 static void mdio_write(struct net_device *dev, int phy_id, int location, int value); 412 + static int mdio_wait_link(struct net_device *dev, int wait); 412 413 static int netdev_open(struct net_device *dev); 413 414 static void check_duplex(struct net_device *dev); 414 415 static void netdev_timer(unsigned long data); ··· 784 783 mdio_delay(); 785 784 } 786 785 return; 786 + } 787 + 788 + static int mdio_wait_link(struct net_device *dev, int wait) 789 + { 790 + int bmsr; 791 + int phy_id; 792 + struct netdev_private *np; 793 + 794 + np = netdev_priv(dev); 795 + phy_id = np->phys[0]; 796 + 797 + do { 798 + bmsr = mdio_read(dev, phy_id, MII_BMSR); 799 + if (bmsr & 0x0004) 800 + return 0; 801 + mdelay(1); 802 + } while (--wait > 0); 803 + return -1; 787 804 } 788 805 789 806 static int netdev_open(struct net_device *dev) ··· 1412 1393 int speed; 1413 1394 1414 1395 if (intr_status & LinkChange) { 1415 - if (np->an_enable) { 1416 - mii_advertise = mdio_read (dev, np->phys[0], MII_ADVERTISE); 1417 - mii_lpa= mdio_read (dev, np->phys[0], MII_LPA); 1418 - mii_advertise &= mii_lpa; 1419 - printk (KERN_INFO "%s: Link changed: ", dev->name); 1420 - if (mii_advertise & ADVERTISE_100FULL) { 1421 - np->speed = 100; 1422 - printk ("100Mbps, full duplex\n"); 1423 - } else if (mii_advertise & ADVERTISE_100HALF) { 1424 - np->speed = 100; 1425 - printk ("100Mbps, half duplex\n"); 1426 - } else if (mii_advertise & ADVERTISE_10FULL) { 1427 - np->speed = 10; 1428 - printk ("10Mbps, full duplex\n"); 1429 - } else if (mii_advertise & ADVERTISE_10HALF) { 1430 - np->speed = 10; 1431 - printk ("10Mbps, half duplex\n"); 1432 - } else 1433 - printk ("\n"); 1396 + if (mdio_wait_link(dev, 10) == 0) { 1397 + printk(KERN_INFO "%s: Link up\n", dev->name); 1398 + if (np->an_enable) { 1399 + mii_advertise = mdio_read(dev, np->phys[0], 1400 + MII_ADVERTISE); 1401 + mii_lpa = mdio_read(dev, np->phys[0], MII_LPA); 1402 + mii_advertise &= mii_lpa; 1403 + printk(KERN_INFO "%s: Link changed: ", 1404 + dev->name); 1405 + if (mii_advertise & ADVERTISE_100FULL) { 1406 + np->speed = 100; 1407 + printk("100Mbps, full duplex\n"); 1408 + } else if (mii_advertise & ADVERTISE_100HALF) { 1409 + np->speed = 100; 1410 + printk("100Mbps, half duplex\n"); 1411 + } else if (mii_advertise & ADVERTISE_10FULL) { 1412 + np->speed = 10; 1413 + printk("10Mbps, full duplex\n"); 1414 + } else if (mii_advertise & ADVERTISE_10HALF) { 1415 + np->speed = 10; 1416 + printk("10Mbps, half duplex\n"); 1417 + } else 1418 + printk("\n"); 1434 1419 1420 + } else { 1421 + mii_ctl = mdio_read(dev, np->phys[0], MII_BMCR); 1422 + speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10; 1423 + np->speed = speed; 1424 + printk(KERN_INFO "%s: Link changed: %dMbps ,", 1425 + dev->name, speed); 1426 + printk("%s duplex.\n", 1427 + (mii_ctl & BMCR_FULLDPLX) ? 1428 + "full" : "half"); 1429 + } 1430 + check_duplex(dev); 1431 + if (np->flowctrl && np->mii_if.full_duplex) { 1432 + iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200, 1433 + ioaddr + MulticastFilter1+2); 1434 + iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl, 1435 + ioaddr + MACCtrl0); 1436 + } 1437 + netif_carrier_on(dev); 1435 1438 } else { 1436 - mii_ctl = mdio_read (dev, np->phys[0], MII_BMCR); 1437 - speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10; 1438 - np->speed = speed; 1439 - printk (KERN_INFO "%s: Link changed: %dMbps ,", 1440 - dev->name, speed); 1441 - printk ("%s duplex.\n", (mii_ctl & BMCR_FULLDPLX) ? 1442 - "full" : "half"); 1443 - } 1444 - check_duplex (dev); 1445 - if (np->flowctrl && np->mii_if.full_duplex) { 1446 - iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200, 1447 - ioaddr + MulticastFilter1+2); 1448 - iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl, 1449 - ioaddr + MACCtrl0); 1439 + printk(KERN_INFO "%s: Link down\n", dev->name); 1440 + netif_carrier_off(dev); 1450 1441 } 1451 1442 } 1452 1443 if (intr_status & StatsMax) {
+4 -4
drivers/net/tehuti.h
··· 539 539 540 540 #define ERR(fmt, args...) printk(KERN_ERR fmt, ## args) 541 541 #define DBG2(fmt, args...) \ 542 - printk(KERN_ERR "%s:%-5d: " fmt, __FUNCTION__, __LINE__, ## args) 542 + printk(KERN_ERR "%s:%-5d: " fmt, __func__, __LINE__, ## args) 543 543 544 544 #define BDX_ASSERT(x) BUG_ON(x) 545 545 546 546 #ifdef DEBUG 547 547 548 548 #define ENTER do { \ 549 - printk(KERN_ERR "%s:%-5d: ENTER\n", __FUNCTION__, __LINE__); \ 549 + printk(KERN_ERR "%s:%-5d: ENTER\n", __func__, __LINE__); \ 550 550 } while (0) 551 551 552 552 #define RET(args...) do { \ 553 - printk(KERN_ERR "%s:%-5d: RETURN\n", __FUNCTION__, __LINE__); \ 553 + printk(KERN_ERR "%s:%-5d: RETURN\n", __func__, __LINE__); \ 554 554 return args; } while (0) 555 555 556 556 #define DBG(fmt, args...) \ 557 - printk(KERN_ERR "%s:%-5d: " fmt, __FUNCTION__, __LINE__, ## args) 557 + printk(KERN_ERR "%s:%-5d: " fmt, __func__, __LINE__, ## args) 558 558 #else 559 559 #define ENTER do { } while (0) 560 560 #define RET(args...) return args
+3 -3
drivers/net/tsi108_eth.c
··· 263 263 return; 264 264 udelay(10); 265 265 } 266 - printk(KERN_ERR "%s function time out \n", __FUNCTION__); 266 + printk(KERN_ERR "%s function time out \n", __func__); 267 267 } 268 268 269 269 static int mii_speed(struct mii_if_info *mii) ··· 1059 1059 return; 1060 1060 udelay(10); 1061 1061 } 1062 - printk(KERN_ERR "%s function time out \n", __FUNCTION__); 1062 + printk(KERN_ERR "%s function time out \n", __func__); 1063 1063 } 1064 1064 1065 1065 static void tsi108_reset_ether(struct tsi108_prv_data * data) ··· 1244 1244 udelay(10); 1245 1245 } 1246 1246 if (i == 0) 1247 - printk(KERN_ERR "%s function time out \n", __FUNCTION__); 1247 + printk(KERN_ERR "%s function time out \n", __func__); 1248 1248 1249 1249 if (data->phy_type == TSI108_PHY_BCM54XX) { 1250 1250 tsi108_write_mii(data, 0x09, 0x0300);
-1
drivers/net/tulip/de2104x.c
··· 1418 1418 1419 1419 de_free_rings(de); 1420 1420 de_adapter_sleep(de); 1421 - pci_disable_device(de->pdev); 1422 1421 return 0; 1423 1422 } 1424 1423
+58 -58
drivers/net/ucc_geth.c
··· 400 400 enet_addr_cont = kmalloc(sizeof(struct enet_addr_container), GFP_KERNEL); 401 401 if (!enet_addr_cont) { 402 402 ugeth_err("%s: No memory for enet_addr_container object.", 403 - __FUNCTION__); 403 + __func__); 404 404 return NULL; 405 405 } 406 406 ··· 427 427 struct ucc_geth_82xx_address_filtering_pram *p_82xx_addr_filt; 428 428 429 429 if (!(paddr_num < NUM_OF_PADDRS)) { 430 - ugeth_warn("%s: Illegal paddr_num.", __FUNCTION__); 430 + ugeth_warn("%s: Illegal paddr_num.", __func__); 431 431 return -EINVAL; 432 432 } 433 433 ··· 447 447 struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt; 448 448 449 449 if (!(paddr_num < NUM_OF_PADDRS)) { 450 - ugeth_warn("%s: Illagel paddr_num.", __FUNCTION__); 450 + ugeth_warn("%s: Illagel paddr_num.", __func__); 451 451 return -EINVAL; 452 452 } 453 453 ··· 1441 1441 u32 upsmr, maccfg2, tbiBaseAddress; 1442 1442 u16 value; 1443 1443 1444 - ugeth_vdbg("%s: IN", __FUNCTION__); 1444 + ugeth_vdbg("%s: IN", __func__); 1445 1445 1446 1446 ug_info = ugeth->ug_info; 1447 1447 ug_regs = ugeth->ug_regs; ··· 1504 1504 if (ret_val != 0) { 1505 1505 if (netif_msg_probe(ugeth)) 1506 1506 ugeth_err("%s: Preamble length must be between 3 and 7 inclusive.", 1507 - __FUNCTION__); 1507 + __func__); 1508 1508 return ret_val; 1509 1509 } 1510 1510 ··· 1744 1744 /* check if the UCC number is in range. */ 1745 1745 if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) { 1746 1746 if (netif_msg_probe(ugeth)) 1747 - ugeth_err("%s: ucc_num out of range.", __FUNCTION__); 1747 + ugeth_err("%s: ucc_num out of range.", __func__); 1748 1748 return -EINVAL; 1749 1749 } 1750 1750 ··· 1773 1773 /* check if the UCC number is in range. */ 1774 1774 if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) { 1775 1775 if (netif_msg_probe(ugeth)) 1776 - ugeth_err("%s: ucc_num out of range.", __FUNCTION__); 1776 + ugeth_err("%s: ucc_num out of range.", __func__); 1777 1777 return -EINVAL; 1778 1778 } 1779 1779 ··· 2062 2062 ugeth_warn 2063 2063 ("%s: multicast address added to paddr will have no " 2064 2064 "effect - is this what you wanted?", 2065 - __FUNCTION__); 2065 + __func__); 2066 2066 2067 2067 ugeth->indAddrRegUsed[paddr_num] = 1; /* mark this paddr as used */ 2068 2068 /* store address in our database */ ··· 2278 2278 struct phy_device *phydev = ugeth->phydev; 2279 2279 u32 tempval; 2280 2280 2281 - ugeth_vdbg("%s: IN", __FUNCTION__); 2281 + ugeth_vdbg("%s: IN", __func__); 2282 2282 2283 2283 /* Disable the controller */ 2284 2284 ugeth_disable(ugeth, COMM_DIR_RX_AND_TX); ··· 2315 2315 (uf_info->bd_mem_part == MEM_PART_MURAM))) { 2316 2316 if (netif_msg_probe(ugeth)) 2317 2317 ugeth_err("%s: Bad memory partition value.", 2318 - __FUNCTION__); 2318 + __func__); 2319 2319 return -EINVAL; 2320 2320 } 2321 2321 ··· 2327 2327 if (netif_msg_probe(ugeth)) 2328 2328 ugeth_err 2329 2329 ("%s: Rx BD ring length must be multiple of 4, no smaller than 8.", 2330 - __FUNCTION__); 2330 + __func__); 2331 2331 return -EINVAL; 2332 2332 } 2333 2333 } ··· 2338 2338 if (netif_msg_probe(ugeth)) 2339 2339 ugeth_err 2340 2340 ("%s: Tx BD ring length must be no smaller than 2.", 2341 - __FUNCTION__); 2341 + __func__); 2342 2342 return -EINVAL; 2343 2343 } 2344 2344 } ··· 2349 2349 if (netif_msg_probe(ugeth)) 2350 2350 ugeth_err 2351 2351 ("%s: max_rx_buf_length must be non-zero multiple of 128.", 2352 - __FUNCTION__); 2352 + __func__); 2353 2353 return -EINVAL; 2354 2354 } 2355 2355 2356 2356 /* num Tx queues */ 2357 2357 if (ug_info->numQueuesTx > NUM_TX_QUEUES) { 2358 2358 if (netif_msg_probe(ugeth)) 2359 - ugeth_err("%s: number of tx queues too large.", __FUNCTION__); 2359 + ugeth_err("%s: number of tx queues too large.", __func__); 2360 2360 return -EINVAL; 2361 2361 } 2362 2362 2363 2363 /* num Rx queues */ 2364 2364 if (ug_info->numQueuesRx > NUM_RX_QUEUES) { 2365 2365 if (netif_msg_probe(ugeth)) 2366 - ugeth_err("%s: number of rx queues too large.", __FUNCTION__); 2366 + ugeth_err("%s: number of rx queues too large.", __func__); 2367 2367 return -EINVAL; 2368 2368 } 2369 2369 ··· 2374 2374 ugeth_err 2375 2375 ("%s: VLAN priority table entry must not be" 2376 2376 " larger than number of Rx queues.", 2377 - __FUNCTION__); 2377 + __func__); 2378 2378 return -EINVAL; 2379 2379 } 2380 2380 } ··· 2386 2386 ugeth_err 2387 2387 ("%s: IP priority table entry must not be" 2388 2388 " larger than number of Rx queues.", 2389 - __FUNCTION__); 2389 + __func__); 2390 2390 return -EINVAL; 2391 2391 } 2392 2392 } ··· 2394 2394 if (ug_info->cam && !ug_info->ecamptr) { 2395 2395 if (netif_msg_probe(ugeth)) 2396 2396 ugeth_err("%s: If cam mode is chosen, must supply cam ptr.", 2397 - __FUNCTION__); 2397 + __func__); 2398 2398 return -EINVAL; 2399 2399 } 2400 2400 ··· 2404 2404 if (netif_msg_probe(ugeth)) 2405 2405 ugeth_err("%s: Number of station addresses greater than 1 " 2406 2406 "not allowed in extended parsing mode.", 2407 - __FUNCTION__); 2407 + __func__); 2408 2408 return -EINVAL; 2409 2409 } 2410 2410 ··· 2418 2418 /* Initialize the general fast UCC block. */ 2419 2419 if (ucc_fast_init(uf_info, &ugeth->uccf)) { 2420 2420 if (netif_msg_probe(ugeth)) 2421 - ugeth_err("%s: Failed to init uccf.", __FUNCTION__); 2421 + ugeth_err("%s: Failed to init uccf.", __func__); 2422 2422 ucc_geth_memclean(ugeth); 2423 2423 return -ENOMEM; 2424 2424 } ··· 2448 2448 u8 __iomem *endOfRing; 2449 2449 u8 numThreadsRxNumerical, numThreadsTxNumerical; 2450 2450 2451 - ugeth_vdbg("%s: IN", __FUNCTION__); 2451 + ugeth_vdbg("%s: IN", __func__); 2452 2452 uccf = ugeth->uccf; 2453 2453 ug_info = ugeth->ug_info; 2454 2454 uf_info = &ug_info->uf_info; ··· 2474 2474 default: 2475 2475 if (netif_msg_ifup(ugeth)) 2476 2476 ugeth_err("%s: Bad number of Rx threads value.", 2477 - __FUNCTION__); 2477 + __func__); 2478 2478 ucc_geth_memclean(ugeth); 2479 2479 return -EINVAL; 2480 2480 break; ··· 2499 2499 default: 2500 2500 if (netif_msg_ifup(ugeth)) 2501 2501 ugeth_err("%s: Bad number of Tx threads value.", 2502 - __FUNCTION__); 2502 + __func__); 2503 2503 ucc_geth_memclean(ugeth); 2504 2504 return -EINVAL; 2505 2505 break; ··· 2553 2553 if (ret_val != 0) { 2554 2554 if (netif_msg_ifup(ugeth)) 2555 2555 ugeth_err("%s: IPGIFG initialization parameter too large.", 2556 - __FUNCTION__); 2556 + __func__); 2557 2557 ucc_geth_memclean(ugeth); 2558 2558 return ret_val; 2559 2559 } ··· 2571 2571 if (ret_val != 0) { 2572 2572 if (netif_msg_ifup(ugeth)) 2573 2573 ugeth_err("%s: Half Duplex initialization parameter too large.", 2574 - __FUNCTION__); 2574 + __func__); 2575 2575 ucc_geth_memclean(ugeth); 2576 2576 return ret_val; 2577 2577 } ··· 2626 2626 if (netif_msg_ifup(ugeth)) 2627 2627 ugeth_err 2628 2628 ("%s: Can not allocate memory for Tx bd rings.", 2629 - __FUNCTION__); 2629 + __func__); 2630 2630 ucc_geth_memclean(ugeth); 2631 2631 return -ENOMEM; 2632 2632 } ··· 2662 2662 if (netif_msg_ifup(ugeth)) 2663 2663 ugeth_err 2664 2664 ("%s: Can not allocate memory for Rx bd rings.", 2665 - __FUNCTION__); 2665 + __func__); 2666 2666 ucc_geth_memclean(ugeth); 2667 2667 return -ENOMEM; 2668 2668 } ··· 2678 2678 if (ugeth->tx_skbuff[j] == NULL) { 2679 2679 if (netif_msg_ifup(ugeth)) 2680 2680 ugeth_err("%s: Could not allocate tx_skbuff", 2681 - __FUNCTION__); 2681 + __func__); 2682 2682 ucc_geth_memclean(ugeth); 2683 2683 return -ENOMEM; 2684 2684 } ··· 2710 2710 if (ugeth->rx_skbuff[j] == NULL) { 2711 2711 if (netif_msg_ifup(ugeth)) 2712 2712 ugeth_err("%s: Could not allocate rx_skbuff", 2713 - __FUNCTION__); 2713 + __func__); 2714 2714 ucc_geth_memclean(ugeth); 2715 2715 return -ENOMEM; 2716 2716 } ··· 2744 2744 if (netif_msg_ifup(ugeth)) 2745 2745 ugeth_err 2746 2746 ("%s: Can not allocate DPRAM memory for p_tx_glbl_pram.", 2747 - __FUNCTION__); 2747 + __func__); 2748 2748 ucc_geth_memclean(ugeth); 2749 2749 return -ENOMEM; 2750 2750 } ··· 2767 2767 if (netif_msg_ifup(ugeth)) 2768 2768 ugeth_err 2769 2769 ("%s: Can not allocate DPRAM memory for p_thread_data_tx.", 2770 - __FUNCTION__); 2770 + __func__); 2771 2771 ucc_geth_memclean(ugeth); 2772 2772 return -ENOMEM; 2773 2773 } ··· 2797 2797 if (netif_msg_ifup(ugeth)) 2798 2798 ugeth_err 2799 2799 ("%s: Can not allocate DPRAM memory for p_send_q_mem_reg.", 2800 - __FUNCTION__); 2800 + __func__); 2801 2801 ucc_geth_memclean(ugeth); 2802 2802 return -ENOMEM; 2803 2803 } ··· 2841 2841 if (netif_msg_ifup(ugeth)) 2842 2842 ugeth_err 2843 2843 ("%s: Can not allocate DPRAM memory for p_scheduler.", 2844 - __FUNCTION__); 2844 + __func__); 2845 2845 ucc_geth_memclean(ugeth); 2846 2846 return -ENOMEM; 2847 2847 } ··· 2892 2892 ugeth_err 2893 2893 ("%s: Can not allocate DPRAM memory for" 2894 2894 " p_tx_fw_statistics_pram.", 2895 - __FUNCTION__); 2895 + __func__); 2896 2896 ucc_geth_memclean(ugeth); 2897 2897 return -ENOMEM; 2898 2898 } ··· 2932 2932 if (netif_msg_ifup(ugeth)) 2933 2933 ugeth_err 2934 2934 ("%s: Can not allocate DPRAM memory for p_rx_glbl_pram.", 2935 - __FUNCTION__); 2935 + __func__); 2936 2936 ucc_geth_memclean(ugeth); 2937 2937 return -ENOMEM; 2938 2938 } ··· 2954 2954 if (netif_msg_ifup(ugeth)) 2955 2955 ugeth_err 2956 2956 ("%s: Can not allocate DPRAM memory for p_thread_data_rx.", 2957 - __FUNCTION__); 2957 + __func__); 2958 2958 ucc_geth_memclean(ugeth); 2959 2959 return -ENOMEM; 2960 2960 } ··· 2978 2978 if (netif_msg_ifup(ugeth)) 2979 2979 ugeth_err 2980 2980 ("%s: Can not allocate DPRAM memory for" 2981 - " p_rx_fw_statistics_pram.", __FUNCTION__); 2981 + " p_rx_fw_statistics_pram.", __func__); 2982 2982 ucc_geth_memclean(ugeth); 2983 2983 return -ENOMEM; 2984 2984 } ··· 3001 3001 if (netif_msg_ifup(ugeth)) 3002 3002 ugeth_err 3003 3003 ("%s: Can not allocate DPRAM memory for" 3004 - " p_rx_irq_coalescing_tbl.", __FUNCTION__); 3004 + " p_rx_irq_coalescing_tbl.", __func__); 3005 3005 ucc_geth_memclean(ugeth); 3006 3006 return -ENOMEM; 3007 3007 } ··· 3070 3070 if (netif_msg_ifup(ugeth)) 3071 3071 ugeth_err 3072 3072 ("%s: Can not allocate DPRAM memory for p_rx_bd_qs_tbl.", 3073 - __FUNCTION__); 3073 + __func__); 3074 3074 ucc_geth_memclean(ugeth); 3075 3075 return -ENOMEM; 3076 3076 } ··· 3147 3147 if (!ug_info->extendedFilteringChainPointer) { 3148 3148 if (netif_msg_ifup(ugeth)) 3149 3149 ugeth_err("%s: Null Extended Filtering Chain Pointer.", 3150 - __FUNCTION__); 3150 + __func__); 3151 3151 ucc_geth_memclean(ugeth); 3152 3152 return -EINVAL; 3153 3153 } ··· 3161 3161 if (netif_msg_ifup(ugeth)) 3162 3162 ugeth_err 3163 3163 ("%s: Can not allocate DPRAM memory for" 3164 - " p_exf_glbl_param.", __FUNCTION__); 3164 + " p_exf_glbl_param.", __func__); 3165 3165 ucc_geth_memclean(ugeth); 3166 3166 return -ENOMEM; 3167 3167 } ··· 3209 3209 if (netif_msg_ifup(ugeth)) 3210 3210 ugeth_err 3211 3211 ("%s: Can not allocate memory for" 3212 - " p_UccInitEnetParamShadows.", __FUNCTION__); 3212 + " p_UccInitEnetParamShadows.", __func__); 3213 3213 ucc_geth_memclean(ugeth); 3214 3214 return -ENOMEM; 3215 3215 } ··· 3244 3244 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES)) { 3245 3245 if (netif_msg_ifup(ugeth)) 3246 3246 ugeth_err("%s: Invalid largest External Lookup Key Size.", 3247 - __FUNCTION__); 3247 + __func__); 3248 3248 ucc_geth_memclean(ugeth); 3249 3249 return -EINVAL; 3250 3250 } ··· 3271 3271 ug_info->riscRx, 1)) != 0) { 3272 3272 if (netif_msg_ifup(ugeth)) 3273 3273 ugeth_err("%s: Can not fill p_init_enet_param_shadow.", 3274 - __FUNCTION__); 3274 + __func__); 3275 3275 ucc_geth_memclean(ugeth); 3276 3276 return ret_val; 3277 3277 } ··· 3287 3287 ug_info->riscTx, 0)) != 0) { 3288 3288 if (netif_msg_ifup(ugeth)) 3289 3289 ugeth_err("%s: Can not fill p_init_enet_param_shadow.", 3290 - __FUNCTION__); 3290 + __func__); 3291 3291 ucc_geth_memclean(ugeth); 3292 3292 return ret_val; 3293 3293 } ··· 3297 3297 if ((ret_val = rx_bd_buffer_set(ugeth, (u8) i)) != 0) { 3298 3298 if (netif_msg_ifup(ugeth)) 3299 3299 ugeth_err("%s: Can not fill Rx bds with buffers.", 3300 - __FUNCTION__); 3300 + __func__); 3301 3301 ucc_geth_memclean(ugeth); 3302 3302 return ret_val; 3303 3303 } ··· 3309 3309 if (netif_msg_ifup(ugeth)) 3310 3310 ugeth_err 3311 3311 ("%s: Can not allocate DPRAM memory for p_init_enet_pram.", 3312 - __FUNCTION__); 3312 + __func__); 3313 3313 ucc_geth_memclean(ugeth); 3314 3314 return -ENOMEM; 3315 3315 } ··· 3360 3360 { 3361 3361 struct ucc_geth_private *ugeth = netdev_priv(dev); 3362 3362 3363 - ugeth_vdbg("%s: IN", __FUNCTION__); 3363 + ugeth_vdbg("%s: IN", __func__); 3364 3364 3365 3365 dev->stats.tx_errors++; 3366 3366 ··· 3386 3386 u32 bd_status; 3387 3387 u8 txQ = 0; 3388 3388 3389 - ugeth_vdbg("%s: IN", __FUNCTION__); 3389 + ugeth_vdbg("%s: IN", __func__); 3390 3390 3391 3391 spin_lock_irq(&ugeth->lock); 3392 3392 ··· 3459 3459 u8 *bdBuffer; 3460 3460 struct net_device *dev; 3461 3461 3462 - ugeth_vdbg("%s: IN", __FUNCTION__); 3462 + ugeth_vdbg("%s: IN", __func__); 3463 3463 3464 3464 dev = ugeth->dev; 3465 3465 ··· 3481 3481 (bd_status & R_ERRORS_FATAL)) { 3482 3482 if (netif_msg_rx_err(ugeth)) 3483 3483 ugeth_err("%s, %d: ERROR!!! skb - 0x%08x", 3484 - __FUNCTION__, __LINE__, (u32) skb); 3484 + __func__, __LINE__, (u32) skb); 3485 3485 if (skb) 3486 3486 dev_kfree_skb_any(skb); 3487 3487 ··· 3507 3507 skb = get_new_skb(ugeth, bd); 3508 3508 if (!skb) { 3509 3509 if (netif_msg_rx_err(ugeth)) 3510 - ugeth_warn("%s: No Rx Data Buffer", __FUNCTION__); 3510 + ugeth_warn("%s: No Rx Data Buffer", __func__); 3511 3511 dev->stats.rx_dropped++; 3512 3512 break; 3513 3513 } ··· 3613 3613 register u32 tx_mask; 3614 3614 u8 i; 3615 3615 3616 - ugeth_vdbg("%s: IN", __FUNCTION__); 3616 + ugeth_vdbg("%s: IN", __func__); 3617 3617 3618 3618 uccf = ugeth->uccf; 3619 3619 ug_info = ugeth->ug_info; ··· 3683 3683 struct ucc_geth_private *ugeth = netdev_priv(dev); 3684 3684 int err; 3685 3685 3686 - ugeth_vdbg("%s: IN", __FUNCTION__); 3686 + ugeth_vdbg("%s: IN", __func__); 3687 3687 3688 3688 /* Test station address */ 3689 3689 if (dev->dev_addr[0] & ENET_GROUP_ADDR) { 3690 3690 if (netif_msg_ifup(ugeth)) 3691 3691 ugeth_err("%s: Multicast address used for station address" 3692 - " - is this what you wanted?", __FUNCTION__); 3692 + " - is this what you wanted?", __func__); 3693 3693 return -EINVAL; 3694 3694 } 3695 3695 ··· 3772 3772 { 3773 3773 struct ucc_geth_private *ugeth = netdev_priv(dev); 3774 3774 3775 - ugeth_vdbg("%s: IN", __FUNCTION__); 3775 + ugeth_vdbg("%s: IN", __func__); 3776 3776 3777 3777 napi_disable(&ugeth->napi); 3778 3778 ··· 3840 3840 PHY_INTERFACE_MODE_TBI, PHY_INTERFACE_MODE_RTBI, 3841 3841 }; 3842 3842 3843 - ugeth_vdbg("%s: IN", __FUNCTION__); 3843 + ugeth_vdbg("%s: IN", __func__); 3844 3844 3845 3845 prop = of_get_property(np, "cell-index", NULL); 3846 3846 if (!prop) { ··· 3857 3857 if (ug_info == NULL) { 3858 3858 if (netif_msg_probe(&debug)) 3859 3859 ugeth_err("%s: [%d] Missing additional data!", 3860 - __FUNCTION__, ucc_num); 3860 + __func__, ucc_num); 3861 3861 return -ENODEV; 3862 3862 } 3863 3863
+239 -96
drivers/net/usb/hso.c
··· 92 92 93 93 #define HSO_NET_TX_TIMEOUT (HZ*10) 94 94 95 - /* Serial port defines and structs. */ 96 - #define HSO_SERIAL_FLAG_RX_SENT 0 97 - 98 95 #define HSO_SERIAL_MAGIC 0x48534f31 99 96 100 97 /* Number of ttys to handle */ ··· 176 179 unsigned long flags; 177 180 }; 178 181 182 + enum rx_ctrl_state{ 183 + RX_IDLE, 184 + RX_SENT, 185 + RX_PENDING 186 + }; 187 + 179 188 struct hso_serial { 180 189 struct hso_device *parent; 181 190 int magic; ··· 208 205 struct usb_endpoint_descriptor *in_endp; 209 206 struct usb_endpoint_descriptor *out_endp; 210 207 211 - unsigned long flags; 208 + enum rx_ctrl_state rx_state; 212 209 u8 rts_state; 213 210 u8 dtr_state; 214 211 unsigned tx_urb_used:1; ··· 219 216 spinlock_t serial_lock; 220 217 221 218 int (*write_data) (struct hso_serial *serial); 219 + /* Hacks required to get flow control 220 + * working on the serial receive buffers 221 + * so as not to drop characters on the floor. 222 + */ 223 + int curr_rx_urb_idx; 224 + u16 curr_rx_urb_offset; 225 + u8 rx_urb_filled[MAX_RX_URBS]; 226 + struct tasklet_struct unthrottle_tasklet; 227 + struct work_struct retry_unthrottle_workqueue; 222 228 }; 223 229 224 230 struct hso_device { ··· 283 271 static int hso_serial_tiocmset(struct tty_struct *tty, struct file *file, 284 272 unsigned int set, unsigned int clear); 285 273 static void ctrl_callback(struct urb *urb); 286 - static void put_rxbuf_data(struct urb *urb, struct hso_serial *serial); 274 + static int put_rxbuf_data(struct urb *urb, struct hso_serial *serial); 287 275 static void hso_kick_transmit(struct hso_serial *serial); 288 276 /* Helper functions */ 289 277 static int hso_mux_submit_intr_urb(struct hso_shared_int *mux_int, ··· 299 287 static void hso_free_shared_int(struct hso_shared_int *shared_int); 300 288 static int hso_stop_net_device(struct hso_device *hso_dev); 301 289 static void hso_serial_ref_free(struct kref *ref); 290 + static void hso_std_serial_read_bulk_callback(struct urb *urb); 291 + static int hso_mux_serial_read(struct hso_serial *serial); 302 292 static void async_get_intf(struct work_struct *data); 303 293 static void async_put_intf(struct work_struct *data); 304 294 static int hso_put_activity(struct hso_device *hso_dev); ··· 471 457 return sprintf(buf, "%s\n", port_name); 472 458 } 473 459 static DEVICE_ATTR(hsotype, S_IRUGO, hso_sysfs_show_porttype, NULL); 460 + 461 + static int hso_urb_to_index(struct hso_serial *serial, struct urb *urb) 462 + { 463 + int idx; 464 + 465 + for (idx = 0; idx < serial->num_rx_urbs; idx++) 466 + if (serial->rx_urb[idx] == urb) 467 + return idx; 468 + dev_err(serial->parent->dev, "hso_urb_to_index failed\n"); 469 + return -1; 470 + } 474 471 475 472 /* converts mux value to a port spec value */ 476 473 static u32 hso_mux_to_port(int mux) ··· 1064 1039 return; 1065 1040 } 1066 1041 1042 + static void hso_resubmit_rx_bulk_urb(struct hso_serial *serial, struct urb *urb) 1043 + { 1044 + int result; 1045 + #ifdef CONFIG_HSO_AUTOPM 1046 + usb_mark_last_busy(urb->dev); 1047 + #endif 1048 + /* We are done with this URB, resubmit it. Prep the USB to wait for 1049 + * another frame */ 1050 + usb_fill_bulk_urb(urb, serial->parent->usb, 1051 + usb_rcvbulkpipe(serial->parent->usb, 1052 + serial->in_endp-> 1053 + bEndpointAddress & 0x7F), 1054 + urb->transfer_buffer, serial->rx_data_length, 1055 + hso_std_serial_read_bulk_callback, serial); 1056 + /* Give this to the USB subsystem so it can tell us when more data 1057 + * arrives. */ 1058 + result = usb_submit_urb(urb, GFP_ATOMIC); 1059 + if (result) { 1060 + dev_err(&urb->dev->dev, "%s failed submit serial rx_urb %d\n", 1061 + __func__, result); 1062 + } 1063 + } 1064 + 1065 + 1066 + 1067 + 1068 + static void put_rxbuf_data_and_resubmit_bulk_urb(struct hso_serial *serial) 1069 + { 1070 + int count; 1071 + struct urb *curr_urb; 1072 + 1073 + while (serial->rx_urb_filled[serial->curr_rx_urb_idx]) { 1074 + curr_urb = serial->rx_urb[serial->curr_rx_urb_idx]; 1075 + count = put_rxbuf_data(curr_urb, serial); 1076 + if (count == -1) 1077 + return; 1078 + if (count == 0) { 1079 + serial->curr_rx_urb_idx++; 1080 + if (serial->curr_rx_urb_idx >= serial->num_rx_urbs) 1081 + serial->curr_rx_urb_idx = 0; 1082 + hso_resubmit_rx_bulk_urb(serial, curr_urb); 1083 + } 1084 + } 1085 + } 1086 + 1087 + static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial) 1088 + { 1089 + int count = 0; 1090 + struct urb *urb; 1091 + 1092 + urb = serial->rx_urb[0]; 1093 + if (serial->open_count > 0) { 1094 + count = put_rxbuf_data(urb, serial); 1095 + if (count == -1) 1096 + return; 1097 + } 1098 + /* Re issue a read as long as we receive data. */ 1099 + 1100 + if (count == 0 && ((urb->actual_length != 0) || 1101 + (serial->rx_state == RX_PENDING))) { 1102 + serial->rx_state = RX_SENT; 1103 + hso_mux_serial_read(serial); 1104 + } else 1105 + serial->rx_state = RX_IDLE; 1106 + } 1107 + 1108 + 1109 + /* read callback for Diag and CS port */ 1110 + static void hso_std_serial_read_bulk_callback(struct urb *urb) 1111 + { 1112 + struct hso_serial *serial = urb->context; 1113 + int status = urb->status; 1114 + 1115 + /* sanity check */ 1116 + if (!serial) { 1117 + D1("serial == NULL"); 1118 + return; 1119 + } else if (status) { 1120 + log_usb_status(status, __func__); 1121 + return; 1122 + } 1123 + 1124 + D4("\n--- Got serial_read_bulk callback %02x ---", status); 1125 + D1("Actual length = %d\n", urb->actual_length); 1126 + DUMP1(urb->transfer_buffer, urb->actual_length); 1127 + 1128 + /* Anyone listening? */ 1129 + if (serial->open_count == 0) 1130 + return; 1131 + 1132 + if (status == 0) { 1133 + if (serial->parent->port_spec & HSO_INFO_CRC_BUG) { 1134 + u32 rest; 1135 + u8 crc_check[4] = { 0xDE, 0xAD, 0xBE, 0xEF }; 1136 + rest = 1137 + urb->actual_length % 1138 + serial->in_endp->wMaxPacketSize; 1139 + if (((rest == 5) || (rest == 6)) 1140 + && !memcmp(((u8 *) urb->transfer_buffer) + 1141 + urb->actual_length - 4, crc_check, 4)) { 1142 + urb->actual_length -= 4; 1143 + } 1144 + } 1145 + /* Valid data, handle RX data */ 1146 + spin_lock(&serial->serial_lock); 1147 + serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 1; 1148 + put_rxbuf_data_and_resubmit_bulk_urb(serial); 1149 + spin_unlock(&serial->serial_lock); 1150 + } else if (status == -ENOENT || status == -ECONNRESET) { 1151 + /* Unlinked - check for throttled port. */ 1152 + D2("Port %d, successfully unlinked urb", serial->minor); 1153 + spin_lock(&serial->serial_lock); 1154 + serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 0; 1155 + hso_resubmit_rx_bulk_urb(serial, urb); 1156 + spin_unlock(&serial->serial_lock); 1157 + } else { 1158 + D2("Port %d, status = %d for read urb", serial->minor, status); 1159 + return; 1160 + } 1161 + } 1162 + 1163 + /* 1164 + * This needs to be a tasklet otherwise we will 1165 + * end up recursively calling this function. 1166 + */ 1167 + void hso_unthrottle_tasklet(struct hso_serial *serial) 1168 + { 1169 + unsigned long flags; 1170 + 1171 + spin_lock_irqsave(&serial->serial_lock, flags); 1172 + if ((serial->parent->port_spec & HSO_INTF_MUX)) 1173 + put_rxbuf_data_and_resubmit_ctrl_urb(serial); 1174 + else 1175 + put_rxbuf_data_and_resubmit_bulk_urb(serial); 1176 + spin_unlock_irqrestore(&serial->serial_lock, flags); 1177 + } 1178 + 1179 + static void hso_unthrottle(struct tty_struct *tty) 1180 + { 1181 + struct hso_serial *serial = get_serial_by_tty(tty); 1182 + 1183 + tasklet_hi_schedule(&serial->unthrottle_tasklet); 1184 + } 1185 + 1186 + void hso_unthrottle_workfunc(struct work_struct *work) 1187 + { 1188 + struct hso_serial *serial = 1189 + container_of(work, struct hso_serial, 1190 + retry_unthrottle_workqueue); 1191 + hso_unthrottle_tasklet(serial); 1192 + } 1193 + 1067 1194 /* open the requested serial port */ 1068 1195 static int hso_serial_open(struct tty_struct *tty, struct file *filp) 1069 1196 { ··· 1241 1064 tty->driver_data = serial; 1242 1065 serial->tty = tty; 1243 1066 1244 - /* check for port allready opened, if not set the termios */ 1067 + /* check for port already opened, if not set the termios */ 1245 1068 serial->open_count++; 1246 1069 if (serial->open_count == 1) { 1247 1070 tty->low_latency = 1; 1248 - serial->flags = 0; 1071 + serial->rx_state = RX_IDLE; 1249 1072 /* Force default termio settings */ 1250 1073 _hso_serial_set_termios(tty, NULL); 1074 + tasklet_init(&serial->unthrottle_tasklet, 1075 + (void (*)(unsigned long))hso_unthrottle_tasklet, 1076 + (unsigned long)serial); 1077 + INIT_WORK(&serial->retry_unthrottle_workqueue, 1078 + hso_unthrottle_workfunc); 1251 1079 result = hso_start_serial_device(serial->parent, GFP_KERNEL); 1252 1080 if (result) { 1253 1081 hso_stop_serial_device(serial->parent); ··· 1299 1117 } 1300 1118 if (!usb_gone) 1301 1119 hso_stop_serial_device(serial->parent); 1120 + tasklet_kill(&serial->unthrottle_tasklet); 1121 + cancel_work_sync(&serial->retry_unthrottle_workqueue); 1302 1122 } 1123 + 1303 1124 if (!usb_gone) 1304 1125 usb_autopm_put_interface(serial->parent->interface); 1126 + 1305 1127 mutex_unlock(&serial->parent->mutex); 1306 1128 } 1307 1129 ··· 1608 1422 (1 << i)); 1609 1423 if (serial != NULL) { 1610 1424 D1("Pending read interrupt on port %d\n", i); 1611 - if (!test_and_set_bit(HSO_SERIAL_FLAG_RX_SENT, 1612 - &serial->flags)) { 1425 + spin_lock(&serial->serial_lock); 1426 + if (serial->rx_state == RX_IDLE) { 1613 1427 /* Setup and send a ctrl req read on 1614 1428 * port i */ 1615 - hso_mux_serial_read(serial); 1429 + if (!serial->rx_urb_filled[0]) { 1430 + serial->rx_state = RX_SENT; 1431 + hso_mux_serial_read(serial); 1432 + } else 1433 + serial->rx_state = RX_PENDING; 1434 + 1616 1435 } else { 1617 1436 D1("Already pending a read on " 1618 1437 "port %d\n", i); 1619 1438 } 1439 + spin_unlock(&serial->serial_lock); 1620 1440 } 1621 1441 } 1622 1442 } ··· 1724 1532 if (req->bRequestType == 1725 1533 (USB_DIR_IN | USB_TYPE_OPTION_VENDOR | USB_RECIP_INTERFACE)) { 1726 1534 /* response to a read command */ 1727 - if (serial->open_count > 0) { 1728 - /* handle RX data the normal way */ 1729 - put_rxbuf_data(urb, serial); 1730 - } 1731 - 1732 - /* Re issue a read as long as we receive data. */ 1733 - if (urb->actual_length != 0) 1734 - hso_mux_serial_read(serial); 1735 - else 1736 - clear_bit(HSO_SERIAL_FLAG_RX_SENT, &serial->flags); 1535 + serial->rx_urb_filled[0] = 1; 1536 + spin_lock(&serial->serial_lock); 1537 + put_rxbuf_data_and_resubmit_ctrl_urb(serial); 1538 + spin_unlock(&serial->serial_lock); 1737 1539 } else { 1738 1540 hso_put_activity(serial->parent); 1739 1541 if (serial->tty) ··· 1738 1552 } 1739 1553 1740 1554 /* handle RX data for serial port */ 1741 - static void put_rxbuf_data(struct urb *urb, struct hso_serial *serial) 1555 + static int put_rxbuf_data(struct urb *urb, struct hso_serial *serial) 1742 1556 { 1743 1557 struct tty_struct *tty = serial->tty; 1744 - 1558 + int write_length_remaining = 0; 1559 + int curr_write_len; 1745 1560 /* Sanity check */ 1746 1561 if (urb == NULL || serial == NULL) { 1747 1562 D1("serial = NULL"); 1748 - return; 1563 + return -2; 1749 1564 } 1750 1565 1751 1566 /* Push data to tty */ 1752 - if (tty && urb->actual_length) { 1567 + if (tty) { 1568 + write_length_remaining = urb->actual_length - 1569 + serial->curr_rx_urb_offset; 1753 1570 D1("data to push to tty"); 1754 - tty_insert_flip_string(tty, urb->transfer_buffer, 1755 - urb->actual_length); 1756 - tty_flip_buffer_push(tty); 1757 - } 1758 - } 1759 - 1760 - /* read callback for Diag and CS port */ 1761 - static void hso_std_serial_read_bulk_callback(struct urb *urb) 1762 - { 1763 - struct hso_serial *serial = urb->context; 1764 - int result; 1765 - int status = urb->status; 1766 - 1767 - /* sanity check */ 1768 - if (!serial) { 1769 - D1("serial == NULL"); 1770 - return; 1771 - } else if (status) { 1772 - log_usb_status(status, __func__); 1773 - return; 1774 - } 1775 - 1776 - D4("\n--- Got serial_read_bulk callback %02x ---", status); 1777 - D1("Actual length = %d\n", urb->actual_length); 1778 - DUMP1(urb->transfer_buffer, urb->actual_length); 1779 - 1780 - /* Anyone listening? */ 1781 - if (serial->open_count == 0) 1782 - return; 1783 - 1784 - if (status == 0) { 1785 - if (serial->parent->port_spec & HSO_INFO_CRC_BUG) { 1786 - u32 rest; 1787 - u8 crc_check[4] = { 0xDE, 0xAD, 0xBE, 0xEF }; 1788 - rest = 1789 - urb->actual_length % 1790 - serial->in_endp->wMaxPacketSize; 1791 - if (((rest == 5) || (rest == 6)) 1792 - && !memcmp(((u8 *) urb->transfer_buffer) + 1793 - urb->actual_length - 4, crc_check, 4)) { 1794 - urb->actual_length -= 4; 1795 - } 1571 + while (write_length_remaining) { 1572 + if (test_bit(TTY_THROTTLED, &tty->flags)) 1573 + return -1; 1574 + curr_write_len = tty_insert_flip_string 1575 + (tty, urb->transfer_buffer + 1576 + serial->curr_rx_urb_offset, 1577 + write_length_remaining); 1578 + serial->curr_rx_urb_offset += curr_write_len; 1579 + write_length_remaining -= curr_write_len; 1580 + tty_flip_buffer_push(tty); 1796 1581 } 1797 - /* Valid data, handle RX data */ 1798 - put_rxbuf_data(urb, serial); 1799 - } else if (status == -ENOENT || status == -ECONNRESET) { 1800 - /* Unlinked - check for throttled port. */ 1801 - D2("Port %d, successfully unlinked urb", serial->minor); 1802 - } else { 1803 - D2("Port %d, status = %d for read urb", serial->minor, status); 1804 - return; 1805 1582 } 1806 - 1807 - usb_mark_last_busy(urb->dev); 1808 - 1809 - /* We are done with this URB, resubmit it. Prep the USB to wait for 1810 - * another frame */ 1811 - usb_fill_bulk_urb(urb, serial->parent->usb, 1812 - usb_rcvbulkpipe(serial->parent->usb, 1813 - serial->in_endp-> 1814 - bEndpointAddress & 0x7F), 1815 - urb->transfer_buffer, serial->rx_data_length, 1816 - hso_std_serial_read_bulk_callback, serial); 1817 - /* Give this to the USB subsystem so it can tell us when more data 1818 - * arrives. */ 1819 - result = usb_submit_urb(urb, GFP_ATOMIC); 1820 - if (result) { 1821 - dev_err(&urb->dev->dev, "%s failed submit serial rx_urb %d", 1822 - __func__, result); 1583 + if (write_length_remaining == 0) { 1584 + serial->curr_rx_urb_offset = 0; 1585 + serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 0; 1823 1586 } 1587 + return write_length_remaining; 1824 1588 } 1589 + 1825 1590 1826 1591 /* Base driver functions */ 1827 1592 ··· 1931 1794 return -ENODEV; 1932 1795 1933 1796 for (i = 0; i < serial->num_rx_urbs; i++) { 1934 - if (serial->rx_urb[i]) 1797 + if (serial->rx_urb[i]) { 1935 1798 usb_kill_urb(serial->rx_urb[i]); 1799 + serial->rx_urb_filled[i] = 0; 1800 + } 1936 1801 } 1802 + serial->curr_rx_urb_idx = 0; 1803 + serial->curr_rx_urb_offset = 0; 1937 1804 1938 1805 if (serial->tx_urb) 1939 1806 usb_kill_urb(serial->tx_urb); ··· 2352 2211 USB_DIR_IN); 2353 2212 if (!serial->in_endp) { 2354 2213 dev_err(&interface->dev, "Failed to find BULK IN ep\n"); 2355 - goto exit; 2214 + goto exit2; 2356 2215 } 2357 2216 2358 2217 if (! 2359 2218 (serial->out_endp = 2360 2219 hso_get_ep(interface, USB_ENDPOINT_XFER_BULK, USB_DIR_OUT))) { 2361 2220 dev_err(&interface->dev, "Failed to find BULK IN ep\n"); 2362 - goto exit; 2221 + goto exit2; 2363 2222 } 2364 2223 2365 2224 serial->write_data = hso_std_serial_write_data; ··· 2372 2231 2373 2232 /* done, return it */ 2374 2233 return hso_dev; 2234 + 2235 + exit2: 2236 + hso_serial_common_free(serial); 2375 2237 exit: 2376 - if (hso_dev && serial) 2377 - hso_serial_common_free(serial); 2378 2238 kfree(serial); 2379 2239 hso_free_device(hso_dev); 2380 2240 return NULL; ··· 2882 2740 .chars_in_buffer = hso_serial_chars_in_buffer, 2883 2741 .tiocmget = hso_serial_tiocmget, 2884 2742 .tiocmset = hso_serial_tiocmset, 2743 + .unthrottle = hso_unthrottle 2885 2744 }; 2886 2745 2887 2746 static struct usb_driver hso_driver = {
+1 -1
drivers/net/usb/mcs7830.c
··· 118 118 119 119 if (urb->status < 0) 120 120 printk(KERN_DEBUG "%s() failed with %d\n", 121 - __FUNCTION__, urb->status); 121 + __func__, urb->status); 122 122 123 123 kfree(req); 124 124 usb_free_urb(urb);
+10 -10
drivers/net/usb/pegasus.c
··· 119 119 default: 120 120 if (netif_msg_drv(pegasus) && printk_ratelimit()) 121 121 dev_dbg(&pegasus->intf->dev, "%s, status %d\n", 122 - __FUNCTION__, urb->status); 122 + __func__, urb->status); 123 123 } 124 124 pegasus->flags &= ~ETH_REGS_CHANGED; 125 125 wake_up(&pegasus->ctrl_wait); ··· 136 136 if (!buffer) { 137 137 if (netif_msg_drv(pegasus)) 138 138 dev_warn(&pegasus->intf->dev, "out of memory in %s\n", 139 - __FUNCTION__); 139 + __func__); 140 140 return -ENOMEM; 141 141 } 142 142 add_wait_queue(&pegasus->ctrl_wait, &wait); ··· 224 224 netif_device_detach(pegasus->net); 225 225 if (netif_msg_drv(pegasus)) 226 226 dev_err(&pegasus->intf->dev, "%s, status %d\n", 227 - __FUNCTION__, ret); 227 + __func__, ret); 228 228 goto out; 229 229 } 230 230 ··· 246 246 if (!tmp) { 247 247 if (netif_msg_drv(pegasus)) 248 248 dev_warn(&pegasus->intf->dev, "out of memory in %s\n", 249 - __FUNCTION__); 249 + __func__); 250 250 return -ENOMEM; 251 251 } 252 252 memcpy(tmp, &data, 1); ··· 277 277 netif_device_detach(pegasus->net); 278 278 if (netif_msg_drv(pegasus) && printk_ratelimit()) 279 279 dev_err(&pegasus->intf->dev, "%s, status %d\n", 280 - __FUNCTION__, ret); 280 + __func__, ret); 281 281 goto out; 282 282 } 283 283 ··· 310 310 netif_device_detach(pegasus->net); 311 311 if (netif_msg_drv(pegasus)) 312 312 dev_err(&pegasus->intf->dev, "%s, status %d\n", 313 - __FUNCTION__, ret); 313 + __func__, ret); 314 314 } 315 315 316 316 return ret; ··· 341 341 } 342 342 fail: 343 343 if (netif_msg_drv(pegasus)) 344 - dev_warn(&pegasus->intf->dev, "%s failed\n", __FUNCTION__); 344 + dev_warn(&pegasus->intf->dev, "%s failed\n", __func__); 345 345 346 346 return ret; 347 347 } ··· 378 378 379 379 fail: 380 380 if (netif_msg_drv(pegasus)) 381 - dev_warn(&pegasus->intf->dev, "%s failed\n", __FUNCTION__); 381 + dev_warn(&pegasus->intf->dev, "%s failed\n", __func__); 382 382 return -ETIMEDOUT; 383 383 } 384 384 ··· 415 415 416 416 fail: 417 417 if (netif_msg_drv(pegasus)) 418 - dev_warn(&pegasus->intf->dev, "%s failed\n", __FUNCTION__); 418 + dev_warn(&pegasus->intf->dev, "%s failed\n", __func__); 419 419 return -ETIMEDOUT; 420 420 } 421 421 ··· 463 463 return ret; 464 464 fail: 465 465 if (netif_msg_drv(pegasus)) 466 - dev_warn(&pegasus->intf->dev, "%s failed\n", __FUNCTION__); 466 + dev_warn(&pegasus->intf->dev, "%s failed\n", __func__); 467 467 return -ETIMEDOUT; 468 468 } 469 469 #endif /* PEGASUS_WRITE_EEPROM */
+1 -1
drivers/net/via-velocity.h
··· 1381 1381 #define ASSERT(x) { \ 1382 1382 if (!(x)) { \ 1383 1383 printk(KERN_ERR "assertion %s failed: file %s line %d\n", #x,\ 1384 - __FUNCTION__, __LINE__);\ 1384 + __func__, __LINE__);\ 1385 1385 BUG(); \ 1386 1386 }\ 1387 1387 }
+3 -3
drivers/net/wan/cycx_drv.c
··· 407 407 if (cfm->version != CFM_VERSION) { 408 408 printk(KERN_ERR "%s:%s: firmware format %u rejected! " 409 409 "Expecting %u.\n", 410 - modname, __FUNCTION__, cfm->version, CFM_VERSION); 410 + modname, __func__, cfm->version, CFM_VERSION); 411 411 return -EINVAL; 412 412 } 413 413 ··· 420 420 */ 421 421 if (cksum != cfm->checksum) { 422 422 printk(KERN_ERR "%s:%s: firmware corrupted!\n", 423 - modname, __FUNCTION__); 423 + modname, __func__); 424 424 printk(KERN_ERR " cdsize = 0x%x (expected 0x%lx)\n", 425 425 len - (int)sizeof(struct cycx_firmware) - 1, 426 426 cfm->info.codesize); ··· 432 432 /* If everything is ok, set reset, data and code pointers */ 433 433 img_hdr = (struct cycx_fw_header *)&cfm->image; 434 434 #ifdef FIRMWARE_DEBUG 435 - printk(KERN_INFO "%s:%s: image sizes\n", __FUNCTION__, modname); 435 + printk(KERN_INFO "%s:%s: image sizes\n", __func__, modname); 436 436 printk(KERN_INFO " reset=%lu\n", img_hdr->reset_size); 437 437 printk(KERN_INFO " data=%lu\n", img_hdr->data_size); 438 438 printk(KERN_INFO " code=%lu\n", img_hdr->code_size);
+6 -6
drivers/net/wan/cycx_x25.c
··· 874 874 nibble_to_byte(d + (sizeloc >> 1), rem, sizerem, sizeloc & 1); 875 875 876 876 dprintk(1, KERN_INFO "%s:lcn=%d, local=%s, remote=%s\n", 877 - __FUNCTION__, lcn, loc, rem); 877 + __func__, lcn, loc, rem); 878 878 879 879 dev = cycx_x25_get_dev_by_dte_addr(wandev, rem); 880 880 if (!dev) { ··· 902 902 cycx_peek(&card->hw, cmd->buf, &lcn, sizeof(lcn)); 903 903 cycx_peek(&card->hw, cmd->buf + 1, &key, sizeof(key)); 904 904 dprintk(1, KERN_INFO "%s: %s:lcn=%d, key=%d\n", 905 - card->devname, __FUNCTION__, lcn, key); 905 + card->devname, __func__, lcn, key); 906 906 907 907 dev = cycx_x25_get_dev_by_lcn(wandev, -key); 908 908 if (!dev) { ··· 929 929 930 930 cycx_peek(&card->hw, cmd->buf, &lcn, sizeof(lcn)); 931 931 dprintk(1, KERN_INFO "%s: %s:lcn=%d\n", 932 - card->devname, __FUNCTION__, lcn); 932 + card->devname, __func__, lcn); 933 933 dev = cycx_x25_get_dev_by_lcn(wandev, lcn); 934 934 if (!dev) { 935 935 /* Invalid channel, discard packet */ ··· 950 950 u8 lcn; 951 951 952 952 cycx_peek(&card->hw, cmd->buf, &lcn, sizeof(lcn)); 953 - dprintk(1, KERN_INFO "%s:lcn=%d\n", __FUNCTION__, lcn); 953 + dprintk(1, KERN_INFO "%s:lcn=%d\n", __func__, lcn); 954 954 955 955 dev = cycx_x25_get_dev_by_lcn(wandev, lcn); 956 956 if (dev) { ··· 1381 1381 cycx_x25_chan_disconnect(dev); 1382 1382 else 1383 1383 printk(KERN_ERR "%s: %s for svc (%s) not connected!\n", 1384 - chan->card->devname, __FUNCTION__, dev->name); 1384 + chan->card->devname, __func__, dev->name); 1385 1385 } 1386 1386 1387 1387 /* Set logical channel state. */ ··· 1485 1485 unsigned char *ptr; 1486 1486 1487 1487 if ((skb = dev_alloc_skb(1)) == NULL) { 1488 - printk(KERN_ERR "%s: out of memory\n", __FUNCTION__); 1488 + printk(KERN_ERR "%s: out of memory\n", __func__); 1489 1489 return; 1490 1490 } 1491 1491
+1 -1
drivers/net/wan/dscc4.c
··· 647 647 648 648 skb = dpriv->rx_skbuff[dpriv->rx_current++%RX_RING_SIZE]; 649 649 if (!skb) { 650 - printk(KERN_DEBUG "%s: skb=0 (%s)\n", dev->name, __FUNCTION__); 650 + printk(KERN_DEBUG "%s: skb=0 (%s)\n", dev->name, __func__); 651 651 goto refill; 652 652 } 653 653 pkt_len = TO_SIZE(le32_to_cpu(rx_fd->state2));
+5 -3
drivers/net/wan/hdlc_x25.c
··· 163 163 164 164 static int x25_rx(struct sk_buff *skb) 165 165 { 166 + struct net_device *dev = skb->dev; 167 + 166 168 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) { 167 - skb->dev->stats.rx_dropped++; 169 + dev->stats.rx_dropped++; 168 170 return NET_RX_DROP; 169 171 } 170 172 171 - if (lapb_data_received(skb->dev, skb) == LAPB_OK) 173 + if (lapb_data_received(dev, skb) == LAPB_OK) 172 174 return NET_RX_SUCCESS; 173 175 174 - skb->dev->stats.rx_errors++; 176 + dev->stats.rx_errors++; 175 177 dev_kfree_skb_any(skb); 176 178 return NET_RX_DROP; 177 179 }
+1 -1
drivers/net/wan/pc300_tty.c
··· 548 548 { 549 549 st_cpc_tty_area *cpc_tty; 550 550 551 - CPC_TTY_DBG("%s: set:%x clear:%x\n", __FUNCTION__, set, clear); 551 + CPC_TTY_DBG("%s: set:%x clear:%x\n", __func__, set, clear); 552 552 553 553 if (!tty || !tty->driver_data ) { 554 554 CPC_TTY_DBG("hdlcX-tty: no TTY to chars in buffer\n");
+10
include/linux/pci_ids.h
··· 2247 2247 #define PCI_DEVICE_ID_3DLABS_PERMEDIA2 0x0007 2248 2248 #define PCI_DEVICE_ID_3DLABS_PERMEDIA2V 0x0009 2249 2249 2250 + #define PCI_VENDOR_ID_NETXEN 0x4040 2251 + #define PCI_DEVICE_ID_NX2031_10GXSR 0x0001 2252 + #define PCI_DEVICE_ID_NX2031_10GCX4 0x0002 2253 + #define PCI_DEVICE_ID_NX2031_4GCU 0x0003 2254 + #define PCI_DEVICE_ID_NX2031_IMEZ 0x0004 2255 + #define PCI_DEVICE_ID_NX2031_HMEZ 0x0005 2256 + #define PCI_DEVICE_ID_NX2031_XG_MGMT 0x0024 2257 + #define PCI_DEVICE_ID_NX2031_XG_MGMT2 0x0025 2258 + #define PCI_DEVICE_ID_NX3031 0x0100 2259 + 2250 2260 #define PCI_VENDOR_ID_AKS 0x416c 2251 2261 #define PCI_DEVICE_ID_AKS_ALADDINCARD 0x0100 2252 2262