Merge branch 'upstream-davem' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6

+852 -575
+1 -1
drivers/net/3c509.c
··· 1063 struct sk_buff *skb; 1064 1065 skb = dev_alloc_skb(pkt_len+5); 1066 - dev->stats.rx_bytes += pkt_len; 1067 if (el3_debug > 4) 1068 printk("Receiving packet size %d status %4.4x.\n", 1069 pkt_len, rx_status); ··· 1077 skb->protocol = eth_type_trans(skb,dev); 1078 netif_rx(skb); 1079 dev->last_rx = jiffies; 1080 dev->stats.rx_packets++; 1081 continue; 1082 }
··· 1063 struct sk_buff *skb; 1064 1065 skb = dev_alloc_skb(pkt_len+5); 1066 if (el3_debug > 4) 1067 printk("Receiving packet size %d status %4.4x.\n", 1068 pkt_len, rx_status); ··· 1078 skb->protocol = eth_type_trans(skb,dev); 1079 netif_rx(skb); 1080 dev->last_rx = jiffies; 1081 + dev->stats.rx_bytes += pkt_len; 1082 dev->stats.rx_packets++; 1083 continue; 1084 }
+1 -6
drivers/net/au1000_eth.c
··· 1239 */ 1240 static irqreturn_t au1000_interrupt(int irq, void *dev_id) 1241 { 1242 - struct net_device *dev = (struct net_device *) dev_id; 1243 - 1244 - if (dev == NULL) { 1245 - printk(KERN_ERR "%s: isr: null dev ptr\n", dev->name); 1246 - return IRQ_RETVAL(1); 1247 - } 1248 1249 /* Handle RX interrupts first to minimize chance of overrun */ 1250
··· 1239 */ 1240 static irqreturn_t au1000_interrupt(int irq, void *dev_id) 1241 { 1242 + struct net_device *dev = dev_id; 1243 1244 /* Handle RX interrupts first to minimize chance of overrun */ 1245
-1
drivers/net/bfin_mac.c
··· 22 #include <linux/crc32.h> 23 #include <linux/device.h> 24 #include <linux/spinlock.h> 25 - #include <linux/ethtool.h> 26 #include <linux/mii.h> 27 #include <linux/phy.h> 28 #include <linux/netdevice.h>
··· 22 #include <linux/crc32.h> 23 #include <linux/device.h> 24 #include <linux/spinlock.h> 25 #include <linux/mii.h> 26 #include <linux/phy.h> 27 #include <linux/netdevice.h>
+179 -55
drivers/net/cpmac.c
··· 38 #include <linux/platform_device.h> 39 #include <linux/dma-mapping.h> 40 #include <asm/gpio.h> 41 42 MODULE_AUTHOR("Eugene Konev <ejka@imfi.kspu.ru>"); 43 MODULE_DESCRIPTION("TI AR7 ethernet driver (CPMAC)"); ··· 188 #define CPMAC_EOQ 0x1000 189 struct sk_buff *skb; 190 struct cpmac_desc *next; 191 dma_addr_t mapping; 192 dma_addr_t data_mapping; 193 }; ··· 210 struct work_struct reset_work; 211 struct platform_device *pdev; 212 struct napi_struct napi; 213 }; 214 215 static irqreturn_t cpmac_irq(int, void *); ··· 242 for (i = 0; i < sizeof(*desc) / 4; i++) 243 printk(" %08x", ((u32 *)desc)[i]); 244 printk("\n"); 245 } 246 247 static void cpmac_dump_skb(struct net_device *dev, struct sk_buff *skb) ··· 425 static int cpmac_poll(struct napi_struct *napi, int budget) 426 { 427 struct sk_buff *skb; 428 - struct cpmac_desc *desc; 429 - int received = 0; 430 struct cpmac_priv *priv = container_of(napi, struct cpmac_priv, napi); 431 432 spin_lock(&priv->rx_lock); 433 if (unlikely(!priv->rx_head)) { 434 if (netif_msg_rx_err(priv) && net_ratelimit()) 435 printk(KERN_WARNING "%s: rx: polling, but no queue\n", 436 priv->dev->name); 437 netif_rx_complete(priv->dev, napi); 438 return 0; 439 } 440 441 desc = priv->rx_head; 442 while (((desc->dataflags & CPMAC_OWN) == 0) && (received < budget)) { 443 skb = cpmac_rx_one(priv, desc); 444 if (likely(skb)) { 445 netif_receive_skb(skb); ··· 469 desc = desc->next; 470 } 471 472 priv->rx_head = desc; 473 spin_unlock(&priv->rx_lock); 474 if (unlikely(netif_msg_rx_status(priv))) 475 printk(KERN_DEBUG "%s: poll processed %d packets\n", 476 priv->dev->name, received); 477 - if (desc->dataflags & CPMAC_OWN) { 478 netif_rx_complete(priv->dev, napi); 479 - cpmac_write(priv->regs, CPMAC_RX_PTR(0), (u32)desc->mapping); 480 cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1); 481 return 0; 482 } 483 484 return 1; 485 } 486 487 static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev) ··· 560 int queue, len; 561 struct cpmac_desc *desc; 562 struct cpmac_priv *priv = netdev_priv(dev); 563 564 if (unlikely(skb_padto(skb, ETH_ZLEN))) 565 return NETDEV_TX_OK; ··· 729 desc->dataflags = CPMAC_OWN; 730 dev->stats.rx_dropped++; 731 } 732 desc = desc->next; 733 } 734 } 735 736 static void cpmac_clear_tx(struct net_device *dev) ··· 745 priv->desc_ring[i].dataflags = 0; 746 if (priv->desc_ring[i].skb) { 747 dev_kfree_skb_any(priv->desc_ring[i].skb); 748 - if (netif_subqueue_stopped(dev, i)) 749 - netif_wake_subqueue(dev, i); 750 } 751 } 752 } 753 754 static void cpmac_hw_error(struct work_struct *work) 755 { 756 struct cpmac_priv *priv = 757 container_of(work, struct cpmac_priv, reset_work); 758 ··· 761 spin_unlock(&priv->rx_lock); 762 cpmac_clear_tx(priv->dev); 763 cpmac_hw_start(priv->dev); 764 - napi_enable(&priv->napi); 765 - netif_start_queue(priv->dev); 766 } 767 768 static irqreturn_t cpmac_irq(int irq, void *dev_id) ··· 833 834 cpmac_write(priv->regs, CPMAC_MAC_EOI_VECTOR, 0); 835 836 - if (unlikely(status & (MAC_INT_HOST | MAC_INT_STATUS))) { 837 - if (netif_msg_drv(priv) && net_ratelimit()) 838 - printk(KERN_ERR "%s: hw error, resetting...\n", 839 - dev->name); 840 - netif_stop_queue(dev); 841 - napi_disable(&priv->napi); 842 - cpmac_hw_stop(dev); 843 - schedule_work(&priv->reset_work); 844 - if (unlikely(netif_msg_hw(priv))) 845 - cpmac_dump_regs(dev); 846 - } 847 848 return IRQ_HANDLED; 849 } 850 851 static void cpmac_tx_timeout(struct net_device *dev) 852 { 853 - struct cpmac_priv *priv = netdev_priv(dev); 854 int i; 855 856 spin_lock(&priv->lock); 857 dev->stats.tx_errors++; 858 spin_unlock(&priv->lock); 859 if (netif_msg_tx_err(priv) && net_ratelimit()) 860 printk(KERN_WARNING "%s: transmit timeout\n", dev->name); 861 - /* 862 - * FIXME: waking up random queue is not the best thing to 863 - * do... on the other hand why we got here at all? 864 - */ 865 - #ifdef CONFIG_NETDEVICES_MULTIQUEUE 866 for (i = 0; i < CPMAC_QUEUES; i++) 867 - if (priv->desc_ring[i].skb) { 868 - priv->desc_ring[i].dataflags = 0; 869 - dev_kfree_skb_any(priv->desc_ring[i].skb); 870 - netif_wake_subqueue(dev, i); 871 - break; 872 - } 873 - #else 874 - priv->desc_ring[0].dataflags = 0; 875 - if (priv->desc_ring[0].skb) 876 - dev_kfree_skb_any(priv->desc_ring[0].skb); 877 - netif_wake_queue(dev); 878 - #endif 879 } 880 881 static int cpmac_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) ··· 1034 desc->buflen = CPMAC_SKB_SIZE; 1035 desc->dataflags = CPMAC_OWN; 1036 desc->next = &priv->rx_head[(i + 1) % priv->ring_size]; 1037 desc->hw_next = (u32)desc->next->mapping; 1038 } 1039 1040 if ((res = request_irq(dev->irq, cpmac_irq, IRQF_SHARED, 1041 dev->name, dev))) { ··· 1048 goto fail_irq; 1049 } 1050 1051 INIT_WORK(&priv->reset_work, cpmac_hw_error); 1052 cpmac_hw_start(dev); 1053 ··· 1144 1145 if (phy_id == PHY_MAX_ADDR) { 1146 if (external_switch || dumb_switch) { 1147 - struct fixed_phy_status status = {}; 1148 - 1149 - /* 1150 - * FIXME: this should be in the platform code! 1151 - * Since there is not platform code at all (that is, 1152 - * no mainline users of that driver), place it here 1153 - * for now. 1154 - */ 1155 - phy_id = 0; 1156 - status.link = 1; 1157 - status.duplex = 1; 1158 - status.speed = 100; 1159 - fixed_phy_add(PHY_POLL, phy_id, &status); 1160 } else { 1161 - printk(KERN_ERR "cpmac: no PHY present\n"); 1162 return -ENODEV; 1163 } 1164 } ··· 1190 priv->msg_enable = netif_msg_init(debug_level, 0xff); 1191 memcpy(dev->dev_addr, pdata->dev_addr, sizeof(dev->dev_addr)); 1192 1193 - snprintf(priv->phy_name, BUS_ID_SIZE, PHY_ID_FMT, mdio_bus_id, phy_id); 1194 - 1195 - priv->phy = phy_connect(dev, priv->phy_name, &cpmac_adjust_link, 0, 1196 - PHY_INTERFACE_MODE_MII); 1197 if (IS_ERR(priv->phy)) { 1198 if (netif_msg_drv(priv)) 1199 printk(KERN_ERR "%s: Could not attach to PHY\n",
··· 38 #include <linux/platform_device.h> 39 #include <linux/dma-mapping.h> 40 #include <asm/gpio.h> 41 + #include <asm/atomic.h> 42 43 MODULE_AUTHOR("Eugene Konev <ejka@imfi.kspu.ru>"); 44 MODULE_DESCRIPTION("TI AR7 ethernet driver (CPMAC)"); ··· 187 #define CPMAC_EOQ 0x1000 188 struct sk_buff *skb; 189 struct cpmac_desc *next; 190 + struct cpmac_desc *prev; 191 dma_addr_t mapping; 192 dma_addr_t data_mapping; 193 }; ··· 208 struct work_struct reset_work; 209 struct platform_device *pdev; 210 struct napi_struct napi; 211 + atomic_t reset_pending; 212 }; 213 214 static irqreturn_t cpmac_irq(int, void *); ··· 239 for (i = 0; i < sizeof(*desc) / 4; i++) 240 printk(" %08x", ((u32 *)desc)[i]); 241 printk("\n"); 242 + } 243 + 244 + static void cpmac_dump_all_desc(struct net_device *dev) 245 + { 246 + struct cpmac_priv *priv = netdev_priv(dev); 247 + struct cpmac_desc *dump = priv->rx_head; 248 + do { 249 + cpmac_dump_desc(dev, dump); 250 + dump = dump->next; 251 + } while (dump != priv->rx_head); 252 } 253 254 static void cpmac_dump_skb(struct net_device *dev, struct sk_buff *skb) ··· 412 static int cpmac_poll(struct napi_struct *napi, int budget) 413 { 414 struct sk_buff *skb; 415 + struct cpmac_desc *desc, *restart; 416 struct cpmac_priv *priv = container_of(napi, struct cpmac_priv, napi); 417 + int received = 0, processed = 0; 418 419 spin_lock(&priv->rx_lock); 420 if (unlikely(!priv->rx_head)) { 421 if (netif_msg_rx_err(priv) && net_ratelimit()) 422 printk(KERN_WARNING "%s: rx: polling, but no queue\n", 423 priv->dev->name); 424 + spin_unlock(&priv->rx_lock); 425 netif_rx_complete(priv->dev, napi); 426 return 0; 427 } 428 429 desc = priv->rx_head; 430 + restart = NULL; 431 while (((desc->dataflags & CPMAC_OWN) == 0) && (received < budget)) { 432 + processed++; 433 + 434 + if ((desc->dataflags & CPMAC_EOQ) != 0) { 435 + /* The last update to eoq->hw_next didn't happen 436 + * soon enough, and the receiver stopped here. 437 + *Remember this descriptor so we can restart 438 + * the receiver after freeing some space. 439 + */ 440 + if (unlikely(restart)) { 441 + if (netif_msg_rx_err(priv)) 442 + printk(KERN_ERR "%s: poll found a" 443 + " duplicate EOQ: %p and %p\n", 444 + priv->dev->name, restart, desc); 445 + goto fatal_error; 446 + } 447 + 448 + restart = desc->next; 449 + } 450 + 451 skb = cpmac_rx_one(priv, desc); 452 if (likely(skb)) { 453 netif_receive_skb(skb); ··· 435 desc = desc->next; 436 } 437 438 + if (desc != priv->rx_head) { 439 + /* We freed some buffers, but not the whole ring, 440 + * add what we did free to the rx list */ 441 + desc->prev->hw_next = (u32)0; 442 + priv->rx_head->prev->hw_next = priv->rx_head->mapping; 443 + } 444 + 445 + /* Optimization: If we did not actually process an EOQ (perhaps because 446 + * of quota limits), check to see if the tail of the queue has EOQ set. 447 + * We should immediately restart in that case so that the receiver can 448 + * restart and run in parallel with more packet processing. 449 + * This lets us handle slightly larger bursts before running 450 + * out of ring space (assuming dev->weight < ring_size) */ 451 + 452 + if (!restart && 453 + (priv->rx_head->prev->dataflags & (CPMAC_OWN|CPMAC_EOQ)) 454 + == CPMAC_EOQ && 455 + (priv->rx_head->dataflags & CPMAC_OWN) != 0) { 456 + /* reset EOQ so the poll loop (above) doesn't try to 457 + * restart this when it eventually gets to this descriptor. 458 + */ 459 + priv->rx_head->prev->dataflags &= ~CPMAC_EOQ; 460 + restart = priv->rx_head; 461 + } 462 + 463 + if (restart) { 464 + priv->dev->stats.rx_errors++; 465 + priv->dev->stats.rx_fifo_errors++; 466 + if (netif_msg_rx_err(priv) && net_ratelimit()) 467 + printk(KERN_WARNING "%s: rx dma ring overrun\n", 468 + priv->dev->name); 469 + 470 + if (unlikely((restart->dataflags & CPMAC_OWN) == 0)) { 471 + if (netif_msg_drv(priv)) 472 + printk(KERN_ERR "%s: cpmac_poll is trying to " 473 + "restart rx from a descriptor that's " 474 + "not free: %p\n", 475 + priv->dev->name, restart); 476 + goto fatal_error; 477 + } 478 + 479 + cpmac_write(priv->regs, CPMAC_RX_PTR(0), restart->mapping); 480 + } 481 + 482 priv->rx_head = desc; 483 spin_unlock(&priv->rx_lock); 484 if (unlikely(netif_msg_rx_status(priv))) 485 printk(KERN_DEBUG "%s: poll processed %d packets\n", 486 priv->dev->name, received); 487 + if (processed == 0) { 488 + /* we ran out of packets to read, 489 + * revert to interrupt-driven mode */ 490 netif_rx_complete(priv->dev, napi); 491 cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1); 492 return 0; 493 } 494 495 return 1; 496 + 497 + fatal_error: 498 + /* Something went horribly wrong. 499 + * Reset hardware to try to recover rather than wedging. */ 500 + 501 + if (netif_msg_drv(priv)) { 502 + printk(KERN_ERR "%s: cpmac_poll is confused. " 503 + "Resetting hardware\n", priv->dev->name); 504 + cpmac_dump_all_desc(priv->dev); 505 + printk(KERN_DEBUG "%s: RX_PTR(0)=0x%08x RX_ACK(0)=0x%08x\n", 506 + priv->dev->name, 507 + cpmac_read(priv->regs, CPMAC_RX_PTR(0)), 508 + cpmac_read(priv->regs, CPMAC_RX_ACK(0))); 509 + } 510 + 511 + spin_unlock(&priv->rx_lock); 512 + netif_rx_complete(priv->dev, napi); 513 + netif_stop_queue(priv->dev); 514 + napi_disable(&priv->napi); 515 + 516 + atomic_inc(&priv->reset_pending); 517 + cpmac_hw_stop(priv->dev); 518 + if (!schedule_work(&priv->reset_work)) 519 + atomic_dec(&priv->reset_pending); 520 + return 0; 521 + 522 } 523 524 static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev) ··· 455 int queue, len; 456 struct cpmac_desc *desc; 457 struct cpmac_priv *priv = netdev_priv(dev); 458 + 459 + if (unlikely(atomic_read(&priv->reset_pending))) 460 + return NETDEV_TX_BUSY; 461 462 if (unlikely(skb_padto(skb, ETH_ZLEN))) 463 return NETDEV_TX_OK; ··· 621 desc->dataflags = CPMAC_OWN; 622 dev->stats.rx_dropped++; 623 } 624 + desc->hw_next = desc->next->mapping; 625 desc = desc->next; 626 } 627 + priv->rx_head->prev->hw_next = 0; 628 } 629 630 static void cpmac_clear_tx(struct net_device *dev) ··· 635 priv->desc_ring[i].dataflags = 0; 636 if (priv->desc_ring[i].skb) { 637 dev_kfree_skb_any(priv->desc_ring[i].skb); 638 + priv->desc_ring[i].skb = NULL; 639 } 640 } 641 } 642 643 static void cpmac_hw_error(struct work_struct *work) 644 { 645 + int i; 646 struct cpmac_priv *priv = 647 container_of(work, struct cpmac_priv, reset_work); 648 ··· 651 spin_unlock(&priv->rx_lock); 652 cpmac_clear_tx(priv->dev); 653 cpmac_hw_start(priv->dev); 654 + barrier(); 655 + atomic_dec(&priv->reset_pending); 656 + 657 + for (i = 0; i < CPMAC_QUEUES; i++) 658 + netif_wake_subqueue(priv->dev, i); 659 + netif_wake_queue(priv->dev); 660 + cpmac_write(priv->regs, CPMAC_MAC_INT_ENABLE, 3); 661 + } 662 + 663 + static void cpmac_check_status(struct net_device *dev) 664 + { 665 + struct cpmac_priv *priv = netdev_priv(dev); 666 + 667 + u32 macstatus = cpmac_read(priv->regs, CPMAC_MAC_STATUS); 668 + int rx_channel = (macstatus >> 8) & 7; 669 + int rx_code = (macstatus >> 12) & 15; 670 + int tx_channel = (macstatus >> 16) & 7; 671 + int tx_code = (macstatus >> 20) & 15; 672 + 673 + if (rx_code || tx_code) { 674 + if (netif_msg_drv(priv) && net_ratelimit()) { 675 + /* Can't find any documentation on what these 676 + *error codes actually are. So just log them and hope.. 677 + */ 678 + if (rx_code) 679 + printk(KERN_WARNING "%s: host error %d on rx " 680 + "channel %d (macstatus %08x), resetting\n", 681 + dev->name, rx_code, rx_channel, macstatus); 682 + if (tx_code) 683 + printk(KERN_WARNING "%s: host error %d on tx " 684 + "channel %d (macstatus %08x), resetting\n", 685 + dev->name, tx_code, tx_channel, macstatus); 686 + } 687 + 688 + netif_stop_queue(dev); 689 + cpmac_hw_stop(dev); 690 + if (schedule_work(&priv->reset_work)) 691 + atomic_inc(&priv->reset_pending); 692 + if (unlikely(netif_msg_hw(priv))) 693 + cpmac_dump_regs(dev); 694 + } 695 + cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff); 696 } 697 698 static irqreturn_t cpmac_irq(int irq, void *dev_id) ··· 683 684 cpmac_write(priv->regs, CPMAC_MAC_EOI_VECTOR, 0); 685 686 + if (unlikely(status & (MAC_INT_HOST | MAC_INT_STATUS))) 687 + cpmac_check_status(dev); 688 689 return IRQ_HANDLED; 690 } 691 692 static void cpmac_tx_timeout(struct net_device *dev) 693 { 694 int i; 695 + struct cpmac_priv *priv = netdev_priv(dev); 696 697 spin_lock(&priv->lock); 698 dev->stats.tx_errors++; 699 spin_unlock(&priv->lock); 700 if (netif_msg_tx_err(priv) && net_ratelimit()) 701 printk(KERN_WARNING "%s: transmit timeout\n", dev->name); 702 + 703 + atomic_inc(&priv->reset_pending); 704 + barrier(); 705 + cpmac_clear_tx(dev); 706 + barrier(); 707 + atomic_dec(&priv->reset_pending); 708 + 709 + netif_wake_queue(priv->dev); 710 for (i = 0; i < CPMAC_QUEUES; i++) 711 + netif_wake_subqueue(dev, i); 712 } 713 714 static int cpmac_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) ··· 901 desc->buflen = CPMAC_SKB_SIZE; 902 desc->dataflags = CPMAC_OWN; 903 desc->next = &priv->rx_head[(i + 1) % priv->ring_size]; 904 + desc->next->prev = desc; 905 desc->hw_next = (u32)desc->next->mapping; 906 } 907 + 908 + priv->rx_head->prev->hw_next = (u32)0; 909 910 if ((res = request_irq(dev->irq, cpmac_irq, IRQF_SHARED, 911 dev->name, dev))) { ··· 912 goto fail_irq; 913 } 914 915 + atomic_set(&priv->reset_pending, 0); 916 INIT_WORK(&priv->reset_work, cpmac_hw_error); 917 cpmac_hw_start(dev); 918 ··· 1007 1008 if (phy_id == PHY_MAX_ADDR) { 1009 if (external_switch || dumb_switch) { 1010 + mdio_bus_id = 0; /* fixed phys bus */ 1011 + phy_id = pdev->id; 1012 } else { 1013 + dev_err(&pdev->dev, "no PHY present\n"); 1014 return -ENODEV; 1015 } 1016 } ··· 1064 priv->msg_enable = netif_msg_init(debug_level, 0xff); 1065 memcpy(dev->dev_addr, pdata->dev_addr, sizeof(dev->dev_addr)); 1066 1067 + priv->phy = phy_connect(dev, cpmac_mii.phy_map[phy_id]->dev.bus_id, 1068 + &cpmac_adjust_link, 0, PHY_INTERFACE_MODE_MII); 1069 if (IS_ERR(priv->phy)) { 1070 if (netif_msg_drv(priv)) 1071 printk(KERN_ERR "%s: Could not attach to PHY\n",
+1 -1
drivers/net/dm9000.c
··· 903 if (netif_msg_ifdown(db)) 904 dev_dbg(db->dev, "shutting down %s\n", ndev->name); 905 906 - cancel_delayed_work(&db->phy_poll); 907 908 netif_stop_queue(ndev); 909 netif_carrier_off(ndev);
··· 903 if (netif_msg_ifdown(db)) 904 dev_dbg(db->dev, "shutting down %s\n", ndev->name); 905 906 + cancel_delayed_work_sync(&db->phy_poll); 907 908 netif_stop_queue(ndev); 909 netif_carrier_off(ndev);
+2 -2
drivers/net/e1000e/netdev.c
··· 4201 struct e1000_adapter *adapter; 4202 struct e1000_hw *hw; 4203 const struct e1000_info *ei = e1000_info_tbl[ent->driver_data]; 4204 - unsigned long mmio_start, mmio_len; 4205 - unsigned long flash_start, flash_len; 4206 4207 static int cards_found; 4208 int i, err, pci_using_dac;
··· 4201 struct e1000_adapter *adapter; 4202 struct e1000_hw *hw; 4203 const struct e1000_info *ei = e1000_info_tbl[ent->driver_data]; 4204 + resource_size_t mmio_start, mmio_len; 4205 + resource_size_t flash_start, flash_len; 4206 4207 static int cards_found; 4208 int i, err, pci_using_dac;
+2 -3
drivers/net/ehea/ehea_main.c
··· 2213 goto out; 2214 } 2215 2216 - memset(cb1->vlan_filter, 0, sizeof(cb1->vlan_filter)); 2217 - 2218 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id, 2219 H_PORT_CB1, H_PORT_CB1_ALL, cb1); 2220 if (hret != H_SUCCESS) ··· 3176 3177 static void ehea_shutdown_single_port(struct ehea_port *port) 3178 { 3179 unregister_netdev(port->netdev); 3180 ehea_unregister_port(port); 3181 kfree(port->mc_list); 3182 free_netdev(port->netdev); 3183 - port->adapter->active_ports--; 3184 } 3185 3186 static int ehea_setup_ports(struct ehea_adapter *adapter)
··· 2213 goto out; 2214 } 2215 2216 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id, 2217 H_PORT_CB1, H_PORT_CB1_ALL, cb1); 2218 if (hret != H_SUCCESS) ··· 3178 3179 static void ehea_shutdown_single_port(struct ehea_port *port) 3180 { 3181 + struct ehea_adapter *adapter = port->adapter; 3182 unregister_netdev(port->netdev); 3183 ehea_unregister_port(port); 3184 kfree(port->mc_list); 3185 free_netdev(port->netdev); 3186 + adapter->active_ports--; 3187 } 3188 3189 static int ehea_setup_ports(struct ehea_adapter *adapter)
+1
drivers/net/forcedeth.c
··· 5823 writel(txreg, base + NvRegTransmitPoll); 5824 5825 rc = nv_open(dev); 5826 out: 5827 return rc; 5828 }
··· 5823 writel(txreg, base + NvRegTransmitPoll); 5824 5825 rc = nv_open(dev); 5826 + nv_set_multicast(dev); 5827 out: 5828 return rc; 5829 }
+1 -1
drivers/net/fs_enet/fs_enet-main.c
··· 1093 if (registered) 1094 unregister_netdev(ndev); 1095 1096 - if (fep != NULL) { 1097 (*fep->ops->free_bd)(ndev); 1098 (*fep->ops->cleanup_data)(ndev); 1099 }
··· 1093 if (registered) 1094 unregister_netdev(ndev); 1095 1096 + if (fep && fep->ops) { 1097 (*fep->ops->free_bd)(ndev); 1098 (*fep->ops->cleanup_data)(ndev); 1099 }
+2 -1
drivers/net/hamradio/scc.c
··· 1340 case PARAM_RTS: 1341 if ( !(scc->wreg[R5] & RTS) ) 1342 { 1343 - if (arg != TX_OFF) 1344 scc_key_trx(scc, TX_ON); 1345 scc_start_tx_timer(scc, t_txdelay, scc->kiss.txdelay); 1346 } else { 1347 if (arg == TX_OFF) 1348 {
··· 1340 case PARAM_RTS: 1341 if ( !(scc->wreg[R5] & RTS) ) 1342 { 1343 + if (arg != TX_OFF) { 1344 scc_key_trx(scc, TX_ON); 1345 scc_start_tx_timer(scc, t_txdelay, scc->kiss.txdelay); 1346 + } 1347 } else { 1348 if (arg == TX_OFF) 1349 {
+1 -1
drivers/net/myri10ge/myri10ge.c
··· 631 return status; 632 } 633 634 - int myri10ge_get_firmware_capabilities(struct myri10ge_priv *mgp) 635 { 636 struct myri10ge_cmd cmd; 637 int status;
··· 631 return status; 632 } 633 634 + static int myri10ge_get_firmware_capabilities(struct myri10ge_priv *mgp) 635 { 636 struct myri10ge_cmd cmd; 637 int status;
+3 -1
drivers/net/pcmcia/fmvj18x_cs.c
··· 391 cardtype = CONTEC; 392 break; 393 case MANFID_FUJITSU: 394 - if (link->card_id == PRODID_FUJITSU_MBH10302) 395 /* RATOC REX-5588/9822/4886's PRODID are 0004(=MBH10302), 396 but these are MBH10304 based card. */ 397 cardtype = MBH10304;
··· 391 cardtype = CONTEC; 392 break; 393 case MANFID_FUJITSU: 394 + if (link->conf.ConfigBase == 0x0fe0) 395 + cardtype = MBH10302; 396 + else if (link->card_id == PRODID_FUJITSU_MBH10302) 397 /* RATOC REX-5588/9822/4886's PRODID are 0004(=MBH10302), 398 but these are MBH10304 based card. */ 399 cardtype = MBH10304;
+8 -4
drivers/net/pcmcia/xirc2ps_cs.c
··· 1461 set_multicast_list(struct net_device *dev) 1462 { 1463 unsigned int ioaddr = dev->base_addr; 1464 1465 SelectPage(0x42); 1466 if (dev->flags & IFF_PROMISC) { /* snoop */ 1467 - PutByte(XIRCREG42_SWC1, 0x06); /* set MPE and PME */ 1468 } else if (dev->mc_count > 9 || (dev->flags & IFF_ALLMULTI)) { 1469 - PutByte(XIRCREG42_SWC1, 0x02); /* set MPE */ 1470 } else if (dev->mc_count) { 1471 /* the chip can filter 9 addresses perfectly */ 1472 - PutByte(XIRCREG42_SWC1, 0x01); 1473 SelectPage(0x40); 1474 PutByte(XIRCREG40_CMD0, Offline); 1475 set_addresses(dev); 1476 SelectPage(0x40); 1477 PutByte(XIRCREG40_CMD0, EnableRecv | Online); 1478 } else { /* standard usage */ 1479 - PutByte(XIRCREG42_SWC1, 0x00); 1480 } 1481 SelectPage(0); 1482 } ··· 1725 1726 /* enable receiver and put the mac online */ 1727 if (full) { 1728 SelectPage(0x40); 1729 PutByte(XIRCREG40_CMD0, EnableRecv | Online); 1730 }
··· 1461 set_multicast_list(struct net_device *dev) 1462 { 1463 unsigned int ioaddr = dev->base_addr; 1464 + unsigned value; 1465 1466 SelectPage(0x42); 1467 + value = GetByte(XIRCREG42_SWC1) & 0xC0; 1468 + 1469 if (dev->flags & IFF_PROMISC) { /* snoop */ 1470 + PutByte(XIRCREG42_SWC1, value | 0x06); /* set MPE and PME */ 1471 } else if (dev->mc_count > 9 || (dev->flags & IFF_ALLMULTI)) { 1472 + PutByte(XIRCREG42_SWC1, value | 0x02); /* set MPE */ 1473 } else if (dev->mc_count) { 1474 /* the chip can filter 9 addresses perfectly */ 1475 + PutByte(XIRCREG42_SWC1, value | 0x01); 1476 SelectPage(0x40); 1477 PutByte(XIRCREG40_CMD0, Offline); 1478 set_addresses(dev); 1479 SelectPage(0x40); 1480 PutByte(XIRCREG40_CMD0, EnableRecv | Online); 1481 } else { /* standard usage */ 1482 + PutByte(XIRCREG42_SWC1, value | 0x00); 1483 } 1484 SelectPage(0); 1485 } ··· 1722 1723 /* enable receiver and put the mac online */ 1724 if (full) { 1725 + set_multicast_list(dev); 1726 SelectPage(0x40); 1727 PutByte(XIRCREG40_CMD0, EnableRecv | Online); 1728 }
+2 -2
drivers/net/pcnet32.c
··· 325 static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs, 326 void *ptr); 327 static void pcnet32_purge_tx_ring(struct net_device *dev); 328 - static int pcnet32_alloc_ring(struct net_device *dev, char *name); 329 static void pcnet32_free_ring(struct net_device *dev); 330 static void pcnet32_check_media(struct net_device *dev, int verbose); 331 ··· 1983 } 1984 1985 /* if any allocation fails, caller must also call pcnet32_free_ring */ 1986 - static int pcnet32_alloc_ring(struct net_device *dev, char *name) 1987 { 1988 struct pcnet32_private *lp = netdev_priv(dev); 1989
··· 325 static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs, 326 void *ptr); 327 static void pcnet32_purge_tx_ring(struct net_device *dev); 328 + static int pcnet32_alloc_ring(struct net_device *dev, const char *name); 329 static void pcnet32_free_ring(struct net_device *dev); 330 static void pcnet32_check_media(struct net_device *dev, int verbose); 331 ··· 1983 } 1984 1985 /* if any allocation fails, caller must also call pcnet32_free_ring */ 1986 + static int pcnet32_alloc_ring(struct net_device *dev, const char *name) 1987 { 1988 struct pcnet32_private *lp = netdev_priv(dev); 1989
+1 -1
drivers/net/phy/Kconfig
··· 5 menuconfig PHYLIB 6 tristate "PHY Device support and infrastructure" 7 depends on !S390 8 - depends on NET_ETHERNET && (BROKEN || !S390) 9 help 10 Ethernet controllers are usually attached to PHY 11 devices. This option provides infrastructure for
··· 5 menuconfig PHYLIB 6 tristate "PHY Device support and infrastructure" 7 depends on !S390 8 + depends on NET_ETHERNET 9 help 10 Ethernet controllers are usually attached to PHY 11 devices. This option provides infrastructure for
+1
drivers/net/phy/phy_device.c
··· 207 208 return 0; 209 } 210 211 /** 212 * get_phy_device - reads the specified PHY device and returns its @phy_device struct
··· 207 208 return 0; 209 } 210 + EXPORT_SYMBOL(get_phy_id); 211 212 /** 213 * get_phy_device - reads the specified PHY device and returns its @phy_device struct
+1 -1
drivers/net/s2io-regs.h
··· 250 u64 tx_mat0_n[0x8]; 251 #define TX_MAT_SET(fifo, msi) vBIT(msi, (8 * fifo), 8) 252 253 - u8 unused_1[0x8]; 254 u64 stat_byte_cnt; 255 #define STAT_BC(n) vBIT(n,4,12) 256
··· 250 u64 tx_mat0_n[0x8]; 251 #define TX_MAT_SET(fifo, msi) vBIT(msi, (8 * fifo), 8) 252 253 + u64 xmsi_mask_reg; 254 u64 stat_byte_cnt; 255 #define STAT_BC(n) vBIT(n,4,12) 256
+293 -201
drivers/net/s2io.c
··· 86 #include "s2io.h" 87 #include "s2io-regs.h" 88 89 - #define DRV_VERSION "2.0.26.23" 90 91 /* S2io Driver name & version. */ 92 static char s2io_driver_name[] = "Neterion"; ··· 1113 struct pci_dev *tdev = NULL; 1114 while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) { 1115 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) { 1116 - if (tdev->bus == s2io_pdev->bus->parent) 1117 pci_dev_put(tdev); 1118 return 1; 1119 } 1120 } 1121 return 0; ··· 1220 TTI_DATA1_MEM_TX_URNG_B(0x10) | 1221 TTI_DATA1_MEM_TX_URNG_C(0x30) | 1222 TTI_DATA1_MEM_TX_TIMER_AC_EN; 1223 - 1224 - if (use_continuous_tx_intrs && (link == LINK_UP)) 1225 - val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN; 1226 writeq(val64, &bar0->tti_data1_mem); 1227 1228 - val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) | 1229 - TTI_DATA2_MEM_TX_UFC_B(0x20) | 1230 - TTI_DATA2_MEM_TX_UFC_C(0x40) | 1231 - TTI_DATA2_MEM_TX_UFC_D(0x80); 1232 1233 writeq(val64, &bar0->tti_data2_mem); 1234 ··· 2832 } 2833 } 2834 2835 /** 2836 * s2io_poll - Rx interrupt handler for NAPI support 2837 * @napi : pointer to the napi structure. ··· 2854 * 0 on success and 1 if there are No Rx packets to be processed. 2855 */ 2856 2857 - static int s2io_poll(struct napi_struct *napi, int budget) 2858 { 2859 struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi); 2860 struct net_device *dev = nic->dev; 2861 - int pkt_cnt = 0, org_pkts_to_process; 2862 - struct mac_info *mac_control; 2863 struct config_param *config; 2864 struct XENA_dev_config __iomem *bar0 = nic->bar0; 2865 - int i; 2866 2867 - mac_control = &nic->mac_control; 2868 config = &nic->config; 2869 2870 - nic->pkts_to_process = budget; 2871 - org_pkts_to_process = nic->pkts_to_process; 2872 - 2873 - writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int); 2874 - readl(&bar0->rx_traffic_int); 2875 2876 for (i = 0; i < config->rx_ring_num; i++) { 2877 - rx_intr_handler(&mac_control->rings[i]); 2878 - pkt_cnt = org_pkts_to_process - nic->pkts_to_process; 2879 - if (!nic->pkts_to_process) { 2880 - /* Quota for the current iteration has been met */ 2881 - goto no_rx; 2882 - } 2883 - } 2884 - 2885 - netif_rx_complete(dev, napi); 2886 - 2887 - for (i = 0; i < config->rx_ring_num; i++) { 2888 - if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) { 2889 - DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name); 2890 - DBG_PRINT(INFO_DBG, " in Rx Poll!!\n"); 2891 break; 2892 - } 2893 } 2894 - /* Re enable the Rx interrupts. */ 2895 - writeq(0x0, &bar0->rx_traffic_mask); 2896 - readl(&bar0->rx_traffic_mask); 2897 - return pkt_cnt; 2898 - 2899 - no_rx: 2900 - for (i = 0; i < config->rx_ring_num; i++) { 2901 - if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) { 2902 - DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name); 2903 - DBG_PRINT(INFO_DBG, " in Rx Poll!!\n"); 2904 - break; 2905 - } 2906 } 2907 - return pkt_cnt; 2908 } 2909 2910 #ifdef CONFIG_NET_POLL_CONTROLLER ··· 2961 2962 /* check for received packet and indicate up to network */ 2963 for (i = 0; i < config->rx_ring_num; i++) 2964 - rx_intr_handler(&mac_control->rings[i]); 2965 2966 for (i = 0; i < config->rx_ring_num; i++) { 2967 if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) { ··· 2977 2978 /** 2979 * rx_intr_handler - Rx interrupt handler 2980 - * @nic: device private variable. 2981 * Description: 2982 * If the interrupt is because of a received frame or if the 2983 * receive ring contains fresh as yet un-processed frames,this function is ··· 2986 * stopped and sends the skb to the OSM's Rx handler and then increments 2987 * the offset. 2988 * Return Value: 2989 - * NONE. 2990 */ 2991 - static void rx_intr_handler(struct ring_info *ring_data) 2992 { 2993 int get_block, put_block; 2994 struct rx_curr_get_info get_info, put_info; 2995 struct RxD_t *rxdp; 2996 struct sk_buff *skb; 2997 - int pkt_cnt = 0; 2998 int i; 2999 struct RxD1* rxdp1; 3000 struct RxD3* rxdp3; ··· 3021 DBG_PRINT(ERR_DBG, "%s: The skb is ", 3022 ring_data->dev->name); 3023 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n"); 3024 - return; 3025 } 3026 if (ring_data->rxd_mode == RXD_MODE_1) { 3027 rxdp1 = (struct RxD1*)rxdp; ··· 3058 rxdp = ring_data->rx_blocks[get_block].block_virt_addr; 3059 } 3060 3061 - if(ring_data->nic->config.napi){ 3062 - ring_data->nic->pkts_to_process -= 1; 3063 - if (!ring_data->nic->pkts_to_process) 3064 break; 3065 } 3066 pkt_cnt++; ··· 3079 } 3080 } 3081 } 3082 } 3083 3084 /** ··· 3776 { 3777 struct XENA_dev_config __iomem *bar0 = nic->bar0; 3778 u64 val64; 3779 - int i; 3780 3781 for (i=0; i < MAX_REQUESTED_MSI_X; i++) { 3782 writeq(nic->msix_info[i].addr, &bar0->xmsi_address); 3783 writeq(nic->msix_info[i].data, &bar0->xmsi_data); 3784 - val64 = (s2BIT(7) | s2BIT(15) | vBIT(i, 26, 6)); 3785 writeq(val64, &bar0->xmsi_access); 3786 - if (wait_for_msix_trans(nic, i)) { 3787 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__); 3788 continue; 3789 } ··· 3799 { 3800 struct XENA_dev_config __iomem *bar0 = nic->bar0; 3801 u64 val64, addr, data; 3802 - int i; 3803 3804 /* Store and display */ 3805 for (i=0; i < MAX_REQUESTED_MSI_X; i++) { 3806 - val64 = (s2BIT(15) | vBIT(i, 26, 6)); 3807 writeq(val64, &bar0->xmsi_access); 3808 - if (wait_for_msix_trans(nic, i)) { 3809 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__); 3810 continue; 3811 } ··· 3825 static int s2io_enable_msi_x(struct s2io_nic *nic) 3826 { 3827 struct XENA_dev_config __iomem *bar0 = nic->bar0; 3828 - u64 tx_mat, rx_mat; 3829 u16 msi_control; /* Temp variable */ 3830 int ret, i, j, msix_indx = 1; 3831 3832 - nic->entries = kcalloc(MAX_REQUESTED_MSI_X, sizeof(struct msix_entry), 3833 GFP_KERNEL); 3834 if (!nic->entries) { 3835 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", \ ··· 3838 return -ENOMEM; 3839 } 3840 nic->mac_control.stats_info->sw_stat.mem_allocated 3841 - += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry)); 3842 3843 nic->s2io_entries = 3844 - kcalloc(MAX_REQUESTED_MSI_X, sizeof(struct s2io_msix_entry), 3845 GFP_KERNEL); 3846 if (!nic->s2io_entries) { 3847 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", ··· 3851 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++; 3852 kfree(nic->entries); 3853 nic->mac_control.stats_info->sw_stat.mem_freed 3854 - += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry)); 3855 return -ENOMEM; 3856 } 3857 nic->mac_control.stats_info->sw_stat.mem_allocated 3858 - += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry)); 3859 3860 - for (i=0; i< MAX_REQUESTED_MSI_X; i++) { 3861 - nic->entries[i].entry = i; 3862 - nic->s2io_entries[i].entry = i; 3863 nic->s2io_entries[i].arg = NULL; 3864 nic->s2io_entries[i].in_use = 0; 3865 } 3866 3867 - tx_mat = readq(&bar0->tx_mat0_n[0]); 3868 - for (i=0; i<nic->config.tx_fifo_num; i++, msix_indx++) { 3869 - tx_mat |= TX_MAT_SET(i, msix_indx); 3870 - nic->s2io_entries[msix_indx].arg = &nic->mac_control.fifos[i]; 3871 - nic->s2io_entries[msix_indx].type = MSIX_FIFO_TYPE; 3872 - nic->s2io_entries[msix_indx].in_use = MSIX_FLG; 3873 - } 3874 - writeq(tx_mat, &bar0->tx_mat0_n[0]); 3875 - 3876 rx_mat = readq(&bar0->rx_mat); 3877 - for (j = 0; j < nic->config.rx_ring_num; j++, msix_indx++) { 3878 rx_mat |= RX_MAT_SET(j, msix_indx); 3879 - nic->s2io_entries[msix_indx].arg 3880 - = &nic->mac_control.rings[j]; 3881 - nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE; 3882 - nic->s2io_entries[msix_indx].in_use = MSIX_FLG; 3883 } 3884 writeq(rx_mat, &bar0->rx_mat); 3885 3886 - nic->avail_msix_vectors = 0; 3887 - ret = pci_enable_msix(nic->pdev, nic->entries, MAX_REQUESTED_MSI_X); 3888 /* We fail init if error or we get less vectors than min required */ 3889 - if (ret >= (nic->config.tx_fifo_num + nic->config.rx_ring_num + 1)) { 3890 - nic->avail_msix_vectors = ret; 3891 - ret = pci_enable_msix(nic->pdev, nic->entries, ret); 3892 - } 3893 if (ret) { 3894 DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name); 3895 kfree(nic->entries); 3896 nic->mac_control.stats_info->sw_stat.mem_freed 3897 - += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry)); 3898 kfree(nic->s2io_entries); 3899 nic->mac_control.stats_info->sw_stat.mem_freed 3900 - += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry)); 3901 nic->entries = NULL; 3902 nic->s2io_entries = NULL; 3903 - nic->avail_msix_vectors = 0; 3904 return -ENOMEM; 3905 } 3906 - if (!nic->avail_msix_vectors) 3907 - nic->avail_msix_vectors = MAX_REQUESTED_MSI_X; 3908 3909 /* 3910 * To enable MSI-X, MSI also needs to be enabled, due to a bug ··· 3968 int i; 3969 u16 msi_control; 3970 3971 - for (i = 0; i < MAX_REQUESTED_MSI_X; i++) { 3972 if (sp->s2io_entries[i].in_use == 3973 MSIX_REGISTERED_SUCCESS) { 3974 int vector = sp->entries[i].vector; ··· 4024 netif_carrier_off(dev); 4025 sp->last_link_state = 0; 4026 4027 - if (sp->config.intr_type == MSI_X) { 4028 - int ret = s2io_enable_msi_x(sp); 4029 - 4030 - if (!ret) { 4031 - ret = s2io_test_msi(sp); 4032 - /* rollback MSI-X, will re-enable during add_isr() */ 4033 - remove_msix_isr(sp); 4034 - } 4035 - if (ret) { 4036 - 4037 - DBG_PRINT(ERR_DBG, 4038 - "%s: MSI-X requested but failed to enable\n", 4039 - dev->name); 4040 - sp->config.intr_type = INTA; 4041 - } 4042 - } 4043 - 4044 - /* NAPI doesn't work well with MSI(X) */ 4045 - if (sp->config.intr_type != INTA) { 4046 - if(sp->config.napi) 4047 - sp->config.napi = 0; 4048 - } 4049 - 4050 /* Initialize H/W and enable interrupts */ 4051 err = s2io_card_up(sp); 4052 if (err) { ··· 4046 if (sp->entries) { 4047 kfree(sp->entries); 4048 sp->mac_control.stats_info->sw_stat.mem_freed 4049 - += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry)); 4050 } 4051 if (sp->s2io_entries) { 4052 kfree(sp->s2io_entries); 4053 sp->mac_control.stats_info->sw_stat.mem_freed 4054 - += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry)); 4055 } 4056 } 4057 return err; ··· 4353 mod_timer(&sp->alarm_timer, jiffies + HZ / 2); 4354 } 4355 4356 - static int s2io_chk_rx_buffers(struct ring_info *ring) 4357 - { 4358 - if (fill_rx_buffers(ring) == -ENOMEM) { 4359 - DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name); 4360 - DBG_PRINT(INFO_DBG, " in Rx Intr!!\n"); 4361 - } 4362 - return 0; 4363 - } 4364 - 4365 static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id) 4366 { 4367 struct ring_info *ring = (struct ring_info *)dev_id; 4368 struct s2io_nic *sp = ring->nic; 4369 4370 - if (!is_s2io_card_up(sp)) 4371 return IRQ_HANDLED; 4372 4373 - rx_intr_handler(ring); 4374 - s2io_chk_rx_buffers(ring); 4375 4376 return IRQ_HANDLED; 4377 } 4378 4379 static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id) 4380 { 4381 - struct fifo_info *fifo = (struct fifo_info *)dev_id; 4382 - struct s2io_nic *sp = fifo->nic; 4383 4384 - if (!is_s2io_card_up(sp)) 4385 return IRQ_HANDLED; 4386 4387 - tx_intr_handler(fifo); 4388 return IRQ_HANDLED; 4389 } 4390 static void s2io_txpic_intr_handle(struct s2io_nic *sp) 4391 { 4392 struct XENA_dev_config __iomem *bar0 = sp->bar0; ··· 4812 4813 if (config->napi) { 4814 if (reason & GEN_INTR_RXTRAFFIC) { 4815 - if (likely(netif_rx_schedule_prep(dev, 4816 - &sp->napi))) { 4817 - __netif_rx_schedule(dev, &sp->napi); 4818 - writeq(S2IO_MINUS_ONE, 4819 - &bar0->rx_traffic_mask); 4820 - } else 4821 - writeq(S2IO_MINUS_ONE, 4822 - &bar0->rx_traffic_int); 4823 } 4824 } else { 4825 /* ··· 4827 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int); 4828 4829 for (i = 0; i < config->rx_ring_num; i++) 4830 - rx_intr_handler(&mac_control->rings[i]); 4831 } 4832 4833 /* ··· 7030 7031 /* After proper initialization of H/W, register ISR */ 7032 if (sp->config.intr_type == MSI_X) { 7033 - int i, msix_tx_cnt=0,msix_rx_cnt=0; 7034 7035 - for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) { 7036 - if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) { 7037 - sprintf(sp->desc[i], "%s:MSI-X-%d-TX", 7038 dev->name, i); 7039 - err = request_irq(sp->entries[i].vector, 7040 - s2io_msix_fifo_handle, 0, sp->desc[i], 7041 - sp->s2io_entries[i].arg); 7042 - /* If either data or addr is zero print it */ 7043 - if(!(sp->msix_info[i].addr && 7044 - sp->msix_info[i].data)) { 7045 - DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx " 7046 - "Data:0x%llx\n",sp->desc[i], 7047 - (unsigned long long) 7048 - sp->msix_info[i].addr, 7049 - (unsigned long long) 7050 - sp->msix_info[i].data); 7051 - } else { 7052 - msix_tx_cnt++; 7053 } 7054 - } else { 7055 - sprintf(sp->desc[i], "%s:MSI-X-%d-RX", 7056 - dev->name, i); 7057 - err = request_irq(sp->entries[i].vector, 7058 - s2io_msix_ring_handle, 0, sp->desc[i], 7059 - sp->s2io_entries[i].arg); 7060 - /* If either data or addr is zero print it */ 7061 - if(!(sp->msix_info[i].addr && 7062 sp->msix_info[i].data)) { 7063 - DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx " 7064 - "Data:0x%llx\n",sp->desc[i], 7065 (unsigned long long) 7066 sp->msix_info[i].addr, 7067 (unsigned long long) 7068 - sp->msix_info[i].data); 7069 - } else { 7070 msix_rx_cnt++; 7071 } 7072 } 7073 - if (err) { 7074 - remove_msix_isr(sp); 7075 - DBG_PRINT(ERR_DBG,"%s:MSI-X-%d registration " 7076 - "failed\n", dev->name, i); 7077 - DBG_PRINT(ERR_DBG, "%s: defaulting to INTA\n", 7078 - dev->name); 7079 - sp->config.intr_type = INTA; 7080 - break; 7081 - } 7082 - sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS; 7083 } 7084 if (!err) { 7085 - printk(KERN_INFO "MSI-X-TX %d entries enabled\n", 7086 - msix_tx_cnt); 7087 printk(KERN_INFO "MSI-X-RX %d entries enabled\n", 7088 - msix_rx_cnt); 7089 } 7090 } 7091 if (sp->config.intr_type == INTA) { ··· 7126 clear_bit(__S2IO_STATE_CARD_UP, &sp->state); 7127 7128 /* Disable napi */ 7129 - if (config->napi) 7130 - napi_disable(&sp->napi); 7131 7132 /* disable Tx and Rx traffic on the NIC */ 7133 if (do_io) ··· 7226 } 7227 7228 /* Initialise napi */ 7229 - if (config->napi) 7230 - napi_enable(&sp->napi); 7231 7232 /* Maintain the state prior to the open */ 7233 if (sp->promisc_flg) ··· 7277 /* Enable select interrupts */ 7278 en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS); 7279 if (sp->config.intr_type != INTA) 7280 - en_dis_able_nic_intrs(sp, ENA_ALL_INTRS, DISABLE_INTRS); 7281 else { 7282 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR; 7283 interruptible |= TX_PIC_INTR; ··· 7675 rx_ring_num = MAX_RX_RINGS; 7676 } 7677 7678 - if (*dev_intr_type != INTA) 7679 - napi = 0; 7680 - 7681 if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) { 7682 DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. " 7683 "Defaulting to INTA\n"); ··· 7975 * will use eth_mac_addr() for dev->set_mac_address 7976 * mac address will be set every time dev->open() is called 7977 */ 7978 - netif_napi_add(dev, &sp->napi, s2io_poll, 32); 7979 - 7980 #ifdef CONFIG_NET_POLL_CONTROLLER 7981 dev->poll_controller = s2io_netpoll; 7982 #endif ··· 8016 ret = -EBADSLT; 8017 goto set_swap_failed; 8018 } 8019 } 8020 8021 /* Not needed for Herc */ ··· 8093 8094 /* store mac addresses from CAM to s2io_nic structure */ 8095 do_s2io_store_unicast_mc(sp); 8096 8097 /* Store the values of the MSIX table in the s2io_nic structure */ 8098 store_xmsi_data(sp); ··· 8164 break; 8165 } 8166 8167 - if (napi) 8168 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name); 8169 8170 DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name, 8171 sp->config.tx_fifo_num);
··· 86 #include "s2io.h" 87 #include "s2io-regs.h" 88 89 + #define DRV_VERSION "2.0.26.24" 90 91 /* S2io Driver name & version. */ 92 static char s2io_driver_name[] = "Neterion"; ··· 1113 struct pci_dev *tdev = NULL; 1114 while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) { 1115 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) { 1116 + if (tdev->bus == s2io_pdev->bus->parent) { 1117 pci_dev_put(tdev); 1118 return 1; 1119 + } 1120 } 1121 } 1122 return 0; ··· 1219 TTI_DATA1_MEM_TX_URNG_B(0x10) | 1220 TTI_DATA1_MEM_TX_URNG_C(0x30) | 1221 TTI_DATA1_MEM_TX_TIMER_AC_EN; 1222 + if (i == 0) 1223 + if (use_continuous_tx_intrs && (link == LINK_UP)) 1224 + val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN; 1225 writeq(val64, &bar0->tti_data1_mem); 1226 1227 + if (nic->config.intr_type == MSI_X) { 1228 + val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) | 1229 + TTI_DATA2_MEM_TX_UFC_B(0x100) | 1230 + TTI_DATA2_MEM_TX_UFC_C(0x200) | 1231 + TTI_DATA2_MEM_TX_UFC_D(0x300); 1232 + } else { 1233 + if ((nic->config.tx_steering_type == 1234 + TX_DEFAULT_STEERING) && 1235 + (config->tx_fifo_num > 1) && 1236 + (i >= nic->udp_fifo_idx) && 1237 + (i < (nic->udp_fifo_idx + 1238 + nic->total_udp_fifos))) 1239 + val64 = TTI_DATA2_MEM_TX_UFC_A(0x50) | 1240 + TTI_DATA2_MEM_TX_UFC_B(0x80) | 1241 + TTI_DATA2_MEM_TX_UFC_C(0x100) | 1242 + TTI_DATA2_MEM_TX_UFC_D(0x120); 1243 + else 1244 + val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) | 1245 + TTI_DATA2_MEM_TX_UFC_B(0x20) | 1246 + TTI_DATA2_MEM_TX_UFC_C(0x40) | 1247 + TTI_DATA2_MEM_TX_UFC_D(0x80); 1248 + } 1249 1250 writeq(val64, &bar0->tti_data2_mem); 1251 ··· 2813 } 2814 } 2815 2816 + static int s2io_chk_rx_buffers(struct ring_info *ring) 2817 + { 2818 + if (fill_rx_buffers(ring) == -ENOMEM) { 2819 + DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name); 2820 + DBG_PRINT(INFO_DBG, " in Rx Intr!!\n"); 2821 + } 2822 + return 0; 2823 + } 2824 + 2825 /** 2826 * s2io_poll - Rx interrupt handler for NAPI support 2827 * @napi : pointer to the napi structure. ··· 2826 * 0 on success and 1 if there are No Rx packets to be processed. 2827 */ 2828 2829 + static int s2io_poll_msix(struct napi_struct *napi, int budget) 2830 + { 2831 + struct ring_info *ring = container_of(napi, struct ring_info, napi); 2832 + struct net_device *dev = ring->dev; 2833 + struct config_param *config; 2834 + struct mac_info *mac_control; 2835 + int pkts_processed = 0; 2836 + u8 *addr = NULL, val8 = 0; 2837 + struct s2io_nic *nic = dev->priv; 2838 + struct XENA_dev_config __iomem *bar0 = nic->bar0; 2839 + int budget_org = budget; 2840 + 2841 + config = &nic->config; 2842 + mac_control = &nic->mac_control; 2843 + 2844 + if (unlikely(!is_s2io_card_up(nic))) 2845 + return 0; 2846 + 2847 + pkts_processed = rx_intr_handler(ring, budget); 2848 + s2io_chk_rx_buffers(ring); 2849 + 2850 + if (pkts_processed < budget_org) { 2851 + netif_rx_complete(dev, napi); 2852 + /*Re Enable MSI-Rx Vector*/ 2853 + addr = (u8 *)&bar0->xmsi_mask_reg; 2854 + addr += 7 - ring->ring_no; 2855 + val8 = (ring->ring_no == 0) ? 0x3f : 0xbf; 2856 + writeb(val8, addr); 2857 + val8 = readb(addr); 2858 + } 2859 + return pkts_processed; 2860 + } 2861 + static int s2io_poll_inta(struct napi_struct *napi, int budget) 2862 { 2863 struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi); 2864 + struct ring_info *ring; 2865 struct net_device *dev = nic->dev; 2866 struct config_param *config; 2867 + struct mac_info *mac_control; 2868 + int pkts_processed = 0; 2869 + int ring_pkts_processed, i; 2870 struct XENA_dev_config __iomem *bar0 = nic->bar0; 2871 + int budget_org = budget; 2872 2873 config = &nic->config; 2874 + mac_control = &nic->mac_control; 2875 2876 + if (unlikely(!is_s2io_card_up(nic))) 2877 + return 0; 2878 2879 for (i = 0; i < config->rx_ring_num; i++) { 2880 + ring = &mac_control->rings[i]; 2881 + ring_pkts_processed = rx_intr_handler(ring, budget); 2882 + s2io_chk_rx_buffers(ring); 2883 + pkts_processed += ring_pkts_processed; 2884 + budget -= ring_pkts_processed; 2885 + if (budget <= 0) 2886 break; 2887 } 2888 + if (pkts_processed < budget_org) { 2889 + netif_rx_complete(dev, napi); 2890 + /* Re enable the Rx interrupts for the ring */ 2891 + writeq(0, &bar0->rx_traffic_mask); 2892 + readl(&bar0->rx_traffic_mask); 2893 } 2894 + return pkts_processed; 2895 } 2896 2897 #ifdef CONFIG_NET_POLL_CONTROLLER ··· 2918 2919 /* check for received packet and indicate up to network */ 2920 for (i = 0; i < config->rx_ring_num; i++) 2921 + rx_intr_handler(&mac_control->rings[i], 0); 2922 2923 for (i = 0; i < config->rx_ring_num; i++) { 2924 if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) { ··· 2934 2935 /** 2936 * rx_intr_handler - Rx interrupt handler 2937 + * @ring_info: per ring structure. 2938 + * @budget: budget for napi processing. 2939 * Description: 2940 * If the interrupt is because of a received frame or if the 2941 * receive ring contains fresh as yet un-processed frames,this function is ··· 2942 * stopped and sends the skb to the OSM's Rx handler and then increments 2943 * the offset. 2944 * Return Value: 2945 + * No. of napi packets processed. 2946 */ 2947 + static int rx_intr_handler(struct ring_info *ring_data, int budget) 2948 { 2949 int get_block, put_block; 2950 struct rx_curr_get_info get_info, put_info; 2951 struct RxD_t *rxdp; 2952 struct sk_buff *skb; 2953 + int pkt_cnt = 0, napi_pkts = 0; 2954 int i; 2955 struct RxD1* rxdp1; 2956 struct RxD3* rxdp3; ··· 2977 DBG_PRINT(ERR_DBG, "%s: The skb is ", 2978 ring_data->dev->name); 2979 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n"); 2980 + return 0; 2981 } 2982 if (ring_data->rxd_mode == RXD_MODE_1) { 2983 rxdp1 = (struct RxD1*)rxdp; ··· 3014 rxdp = ring_data->rx_blocks[get_block].block_virt_addr; 3015 } 3016 3017 + if (ring_data->nic->config.napi) { 3018 + budget--; 3019 + napi_pkts++; 3020 + if (!budget) 3021 break; 3022 } 3023 pkt_cnt++; ··· 3034 } 3035 } 3036 } 3037 + return(napi_pkts); 3038 } 3039 3040 /** ··· 3730 { 3731 struct XENA_dev_config __iomem *bar0 = nic->bar0; 3732 u64 val64; 3733 + int i, msix_index; 3734 + 3735 + 3736 + if (nic->device_type == XFRAME_I_DEVICE) 3737 + return; 3738 3739 for (i=0; i < MAX_REQUESTED_MSI_X; i++) { 3740 + msix_index = (i) ? ((i-1) * 8 + 1): 0; 3741 writeq(nic->msix_info[i].addr, &bar0->xmsi_address); 3742 writeq(nic->msix_info[i].data, &bar0->xmsi_data); 3743 + val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6)); 3744 writeq(val64, &bar0->xmsi_access); 3745 + if (wait_for_msix_trans(nic, msix_index)) { 3746 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__); 3747 continue; 3748 } ··· 3748 { 3749 struct XENA_dev_config __iomem *bar0 = nic->bar0; 3750 u64 val64, addr, data; 3751 + int i, msix_index; 3752 + 3753 + if (nic->device_type == XFRAME_I_DEVICE) 3754 + return; 3755 3756 /* Store and display */ 3757 for (i=0; i < MAX_REQUESTED_MSI_X; i++) { 3758 + msix_index = (i) ? ((i-1) * 8 + 1): 0; 3759 + val64 = (s2BIT(15) | vBIT(msix_index, 26, 6)); 3760 writeq(val64, &bar0->xmsi_access); 3761 + if (wait_for_msix_trans(nic, msix_index)) { 3762 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__); 3763 continue; 3764 } ··· 3770 static int s2io_enable_msi_x(struct s2io_nic *nic) 3771 { 3772 struct XENA_dev_config __iomem *bar0 = nic->bar0; 3773 + u64 rx_mat; 3774 u16 msi_control; /* Temp variable */ 3775 int ret, i, j, msix_indx = 1; 3776 3777 + nic->entries = kmalloc(nic->num_entries * sizeof(struct msix_entry), 3778 GFP_KERNEL); 3779 if (!nic->entries) { 3780 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", \ ··· 3783 return -ENOMEM; 3784 } 3785 nic->mac_control.stats_info->sw_stat.mem_allocated 3786 + += (nic->num_entries * sizeof(struct msix_entry)); 3787 + 3788 + memset(nic->entries, 0, nic->num_entries * sizeof(struct msix_entry)); 3789 3790 nic->s2io_entries = 3791 + kmalloc(nic->num_entries * sizeof(struct s2io_msix_entry), 3792 GFP_KERNEL); 3793 if (!nic->s2io_entries) { 3794 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", ··· 3794 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++; 3795 kfree(nic->entries); 3796 nic->mac_control.stats_info->sw_stat.mem_freed 3797 + += (nic->num_entries * sizeof(struct msix_entry)); 3798 return -ENOMEM; 3799 } 3800 nic->mac_control.stats_info->sw_stat.mem_allocated 3801 + += (nic->num_entries * sizeof(struct s2io_msix_entry)); 3802 + memset(nic->s2io_entries, 0, 3803 + nic->num_entries * sizeof(struct s2io_msix_entry)); 3804 3805 + nic->entries[0].entry = 0; 3806 + nic->s2io_entries[0].entry = 0; 3807 + nic->s2io_entries[0].in_use = MSIX_FLG; 3808 + nic->s2io_entries[0].type = MSIX_ALARM_TYPE; 3809 + nic->s2io_entries[0].arg = &nic->mac_control.fifos; 3810 + 3811 + for (i = 1; i < nic->num_entries; i++) { 3812 + nic->entries[i].entry = ((i - 1) * 8) + 1; 3813 + nic->s2io_entries[i].entry = ((i - 1) * 8) + 1; 3814 nic->s2io_entries[i].arg = NULL; 3815 nic->s2io_entries[i].in_use = 0; 3816 } 3817 3818 rx_mat = readq(&bar0->rx_mat); 3819 + for (j = 0; j < nic->config.rx_ring_num; j++) { 3820 rx_mat |= RX_MAT_SET(j, msix_indx); 3821 + nic->s2io_entries[j+1].arg = &nic->mac_control.rings[j]; 3822 + nic->s2io_entries[j+1].type = MSIX_RING_TYPE; 3823 + nic->s2io_entries[j+1].in_use = MSIX_FLG; 3824 + msix_indx += 8; 3825 } 3826 writeq(rx_mat, &bar0->rx_mat); 3827 + readq(&bar0->rx_mat); 3828 3829 + ret = pci_enable_msix(nic->pdev, nic->entries, nic->num_entries); 3830 /* We fail init if error or we get less vectors than min required */ 3831 if (ret) { 3832 DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name); 3833 kfree(nic->entries); 3834 nic->mac_control.stats_info->sw_stat.mem_freed 3835 + += (nic->num_entries * sizeof(struct msix_entry)); 3836 kfree(nic->s2io_entries); 3837 nic->mac_control.stats_info->sw_stat.mem_freed 3838 + += (nic->num_entries * sizeof(struct s2io_msix_entry)); 3839 nic->entries = NULL; 3840 nic->s2io_entries = NULL; 3841 return -ENOMEM; 3842 } 3843 3844 /* 3845 * To enable MSI-X, MSI also needs to be enabled, due to a bug ··· 3919 int i; 3920 u16 msi_control; 3921 3922 + for (i = 0; i < sp->num_entries; i++) { 3923 if (sp->s2io_entries[i].in_use == 3924 MSIX_REGISTERED_SUCCESS) { 3925 int vector = sp->entries[i].vector; ··· 3975 netif_carrier_off(dev); 3976 sp->last_link_state = 0; 3977 3978 /* Initialize H/W and enable interrupts */ 3979 err = s2io_card_up(sp); 3980 if (err) { ··· 4020 if (sp->entries) { 4021 kfree(sp->entries); 4022 sp->mac_control.stats_info->sw_stat.mem_freed 4023 + += (sp->num_entries * sizeof(struct msix_entry)); 4024 } 4025 if (sp->s2io_entries) { 4026 kfree(sp->s2io_entries); 4027 sp->mac_control.stats_info->sw_stat.mem_freed 4028 + += (sp->num_entries * sizeof(struct s2io_msix_entry)); 4029 } 4030 } 4031 return err; ··· 4327 mod_timer(&sp->alarm_timer, jiffies + HZ / 2); 4328 } 4329 4330 static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id) 4331 { 4332 struct ring_info *ring = (struct ring_info *)dev_id; 4333 struct s2io_nic *sp = ring->nic; 4334 + struct XENA_dev_config __iomem *bar0 = sp->bar0; 4335 + struct net_device *dev = sp->dev; 4336 4337 + if (unlikely(!is_s2io_card_up(sp))) 4338 return IRQ_HANDLED; 4339 4340 + if (sp->config.napi) { 4341 + u8 *addr = NULL, val8 = 0; 4342 + 4343 + addr = (u8 *)&bar0->xmsi_mask_reg; 4344 + addr += (7 - ring->ring_no); 4345 + val8 = (ring->ring_no == 0) ? 0x7f : 0xff; 4346 + writeb(val8, addr); 4347 + val8 = readb(addr); 4348 + netif_rx_schedule(dev, &ring->napi); 4349 + } else { 4350 + rx_intr_handler(ring, 0); 4351 + s2io_chk_rx_buffers(ring); 4352 + } 4353 4354 return IRQ_HANDLED; 4355 } 4356 4357 static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id) 4358 { 4359 + int i; 4360 + struct fifo_info *fifos = (struct fifo_info *)dev_id; 4361 + struct s2io_nic *sp = fifos->nic; 4362 + struct XENA_dev_config __iomem *bar0 = sp->bar0; 4363 + struct config_param *config = &sp->config; 4364 + u64 reason; 4365 4366 + if (unlikely(!is_s2io_card_up(sp))) 4367 + return IRQ_NONE; 4368 + 4369 + reason = readq(&bar0->general_int_status); 4370 + if (unlikely(reason == S2IO_MINUS_ONE)) 4371 + /* Nothing much can be done. Get out */ 4372 return IRQ_HANDLED; 4373 4374 + writeq(S2IO_MINUS_ONE, &bar0->general_int_mask); 4375 + 4376 + if (reason & GEN_INTR_TXTRAFFIC) 4377 + writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int); 4378 + 4379 + for (i = 0; i < config->tx_fifo_num; i++) 4380 + tx_intr_handler(&fifos[i]); 4381 + 4382 + writeq(sp->general_int_mask, &bar0->general_int_mask); 4383 + readl(&bar0->general_int_status); 4384 + 4385 return IRQ_HANDLED; 4386 } 4387 + 4388 static void s2io_txpic_intr_handle(struct s2io_nic *sp) 4389 { 4390 struct XENA_dev_config __iomem *bar0 = sp->bar0; ··· 4762 4763 if (config->napi) { 4764 if (reason & GEN_INTR_RXTRAFFIC) { 4765 + netif_rx_schedule(dev, &sp->napi); 4766 + writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask); 4767 + writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int); 4768 + readl(&bar0->rx_traffic_int); 4769 } 4770 } else { 4771 /* ··· 4781 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int); 4782 4783 for (i = 0; i < config->rx_ring_num; i++) 4784 + rx_intr_handler(&mac_control->rings[i], 0); 4785 } 4786 4787 /* ··· 6984 6985 /* After proper initialization of H/W, register ISR */ 6986 if (sp->config.intr_type == MSI_X) { 6987 + int i, msix_rx_cnt = 0; 6988 6989 + for (i = 0; i < sp->num_entries; i++) { 6990 + if (sp->s2io_entries[i].in_use == MSIX_FLG) { 6991 + if (sp->s2io_entries[i].type == 6992 + MSIX_RING_TYPE) { 6993 + sprintf(sp->desc[i], "%s:MSI-X-%d-RX", 6994 + dev->name, i); 6995 + err = request_irq(sp->entries[i].vector, 6996 + s2io_msix_ring_handle, 0, 6997 + sp->desc[i], 6998 + sp->s2io_entries[i].arg); 6999 + } else if (sp->s2io_entries[i].type == 7000 + MSIX_ALARM_TYPE) { 7001 + sprintf(sp->desc[i], "%s:MSI-X-%d-TX", 7002 dev->name, i); 7003 + err = request_irq(sp->entries[i].vector, 7004 + s2io_msix_fifo_handle, 0, 7005 + sp->desc[i], 7006 + sp->s2io_entries[i].arg); 7007 + 7008 } 7009 + /* if either data or addr is zero print it. */ 7010 + if (!(sp->msix_info[i].addr && 7011 sp->msix_info[i].data)) { 7012 + DBG_PRINT(ERR_DBG, 7013 + "%s @Addr:0x%llx Data:0x%llx\n", 7014 + sp->desc[i], 7015 (unsigned long long) 7016 sp->msix_info[i].addr, 7017 (unsigned long long) 7018 + ntohl(sp->msix_info[i].data)); 7019 + } else 7020 msix_rx_cnt++; 7021 + if (err) { 7022 + remove_msix_isr(sp); 7023 + 7024 + DBG_PRINT(ERR_DBG, 7025 + "%s:MSI-X-%d registration " 7026 + "failed\n", dev->name, i); 7027 + 7028 + DBG_PRINT(ERR_DBG, 7029 + "%s: Defaulting to INTA\n", 7030 + dev->name); 7031 + sp->config.intr_type = INTA; 7032 + break; 7033 } 7034 + sp->s2io_entries[i].in_use = 7035 + MSIX_REGISTERED_SUCCESS; 7036 } 7037 } 7038 if (!err) { 7039 printk(KERN_INFO "MSI-X-RX %d entries enabled\n", 7040 + --msix_rx_cnt); 7041 + DBG_PRINT(INFO_DBG, "MSI-X-TX entries enabled" 7042 + " through alarm vector\n"); 7043 } 7044 } 7045 if (sp->config.intr_type == INTA) { ··· 7080 clear_bit(__S2IO_STATE_CARD_UP, &sp->state); 7081 7082 /* Disable napi */ 7083 + if (sp->config.napi) { 7084 + int off = 0; 7085 + if (config->intr_type == MSI_X) { 7086 + for (; off < sp->config.rx_ring_num; off++) 7087 + napi_disable(&sp->mac_control.rings[off].napi); 7088 + } 7089 + else 7090 + napi_disable(&sp->napi); 7091 + } 7092 7093 /* disable Tx and Rx traffic on the NIC */ 7094 if (do_io) ··· 7173 } 7174 7175 /* Initialise napi */ 7176 + if (config->napi) { 7177 + int i; 7178 + if (config->intr_type == MSI_X) { 7179 + for (i = 0; i < sp->config.rx_ring_num; i++) 7180 + napi_enable(&sp->mac_control.rings[i].napi); 7181 + } else { 7182 + napi_enable(&sp->napi); 7183 + } 7184 + } 7185 7186 /* Maintain the state prior to the open */ 7187 if (sp->promisc_flg) ··· 7217 /* Enable select interrupts */ 7218 en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS); 7219 if (sp->config.intr_type != INTA) 7220 + en_dis_able_nic_intrs(sp, TX_TRAFFIC_INTR, ENABLE_INTRS); 7221 else { 7222 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR; 7223 interruptible |= TX_PIC_INTR; ··· 7615 rx_ring_num = MAX_RX_RINGS; 7616 } 7617 7618 if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) { 7619 DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. " 7620 "Defaulting to INTA\n"); ··· 7918 * will use eth_mac_addr() for dev->set_mac_address 7919 * mac address will be set every time dev->open() is called 7920 */ 7921 #ifdef CONFIG_NET_POLL_CONTROLLER 7922 dev->poll_controller = s2io_netpoll; 7923 #endif ··· 7961 ret = -EBADSLT; 7962 goto set_swap_failed; 7963 } 7964 + } 7965 + 7966 + if (sp->config.intr_type == MSI_X) { 7967 + sp->num_entries = config->rx_ring_num + 1; 7968 + ret = s2io_enable_msi_x(sp); 7969 + 7970 + if (!ret) { 7971 + ret = s2io_test_msi(sp); 7972 + /* rollback MSI-X, will re-enable during add_isr() */ 7973 + remove_msix_isr(sp); 7974 + } 7975 + if (ret) { 7976 + 7977 + DBG_PRINT(ERR_DBG, 7978 + "%s: MSI-X requested but failed to enable\n", 7979 + dev->name); 7980 + sp->config.intr_type = INTA; 7981 + } 7982 + } 7983 + 7984 + if (config->intr_type == MSI_X) { 7985 + for (i = 0; i < config->rx_ring_num ; i++) 7986 + netif_napi_add(dev, &mac_control->rings[i].napi, 7987 + s2io_poll_msix, 64); 7988 + } else { 7989 + netif_napi_add(dev, &sp->napi, s2io_poll_inta, 64); 7990 } 7991 7992 /* Not needed for Herc */ ··· 8012 8013 /* store mac addresses from CAM to s2io_nic structure */ 8014 do_s2io_store_unicast_mc(sp); 8015 + 8016 + /* Configure MSIX vector for number of rings configured plus one */ 8017 + if ((sp->device_type == XFRAME_II_DEVICE) && 8018 + (config->intr_type == MSI_X)) 8019 + sp->num_entries = config->rx_ring_num + 1; 8020 8021 /* Store the values of the MSIX table in the s2io_nic structure */ 8022 store_xmsi_data(sp); ··· 8078 break; 8079 } 8080 8081 + switch (sp->config.napi) { 8082 + case 0: 8083 + DBG_PRINT(ERR_DBG, "%s: NAPI disabled\n", dev->name); 8084 + break; 8085 + case 1: 8086 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name); 8087 + break; 8088 + } 8089 8090 DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name, 8091 sp->config.tx_fifo_num);
+15 -7
drivers/net/s2io.h
··· 706 /* per-ring buffer counter */ 707 u32 rx_bufs_left; 708 709 - #define MAX_LRO_SESSIONS 32 710 struct lro lro0_n[MAX_LRO_SESSIONS]; 711 u8 lro; 712 ··· 724 725 /* copy of sp->pdev pointer */ 726 struct pci_dev *pdev; 727 728 /* 729 * Place holders for the virtual and physical addresses of ··· 846 * Structure to keep track of the MSI-X vectors and the corresponding 847 * argument registered against each vector 848 */ 849 - #define MAX_REQUESTED_MSI_X 17 850 struct s2io_msix_entry 851 { 852 u16 vector; ··· 854 void *arg; 855 856 u8 type; 857 - #define MSIX_FIFO_TYPE 1 858 - #define MSIX_RING_TYPE 2 859 860 u8 in_use; 861 #define MSIX_REGISTERED_SUCCESS 0xAA ··· 882 */ 883 int pkts_to_process; 884 struct net_device *dev; 885 - struct napi_struct napi; 886 struct mac_info mac_control; 887 struct config_param config; 888 struct pci_dev *pdev; ··· 952 */ 953 u8 other_fifo_idx; 954 955 /* after blink, the adapter must be restored with original 956 * values. 957 */ ··· 967 unsigned long long start_time; 968 struct vlan_group *vlgrp; 969 #define MSIX_FLG 0xA5 970 struct msix_entry *entries; 971 int msi_detected; 972 wait_queue_head_t msi_wait; ··· 988 u16 lro_max_aggr_per_sess; 989 volatile unsigned long state; 990 u64 general_int_mask; 991 #define VPD_STRING_LEN 80 992 u8 product_name[VPD_STRING_LEN]; 993 u8 serial_num[VPD_STRING_LEN]; ··· 1110 static int init_shared_mem(struct s2io_nic *sp); 1111 static void free_shared_mem(struct s2io_nic *sp); 1112 static int init_nic(struct s2io_nic *nic); 1113 - static void rx_intr_handler(struct ring_info *ring_data); 1114 static void tx_intr_handler(struct fifo_info *fifo_data); 1115 static void s2io_handle_errors(void * dev_id); 1116 ··· 1121 static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp); 1122 static void s2io_link(struct s2io_nic * sp, int link); 1123 static void s2io_reset(struct s2io_nic * sp); 1124 - static int s2io_poll(struct napi_struct *napi, int budget); 1125 static void s2io_init_pci(struct s2io_nic * sp); 1126 static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr); 1127 static void s2io_alarm_handle(unsigned long data);
··· 706 /* per-ring buffer counter */ 707 u32 rx_bufs_left; 708 709 + #define MAX_LRO_SESSIONS 32 710 struct lro lro0_n[MAX_LRO_SESSIONS]; 711 u8 lro; 712 ··· 724 725 /* copy of sp->pdev pointer */ 726 struct pci_dev *pdev; 727 + 728 + /* Per ring napi struct */ 729 + struct napi_struct napi; 730 + 731 + unsigned long interrupt_count; 732 733 /* 734 * Place holders for the virtual and physical addresses of ··· 841 * Structure to keep track of the MSI-X vectors and the corresponding 842 * argument registered against each vector 843 */ 844 + #define MAX_REQUESTED_MSI_X 9 845 struct s2io_msix_entry 846 { 847 u16 vector; ··· 849 void *arg; 850 851 u8 type; 852 + #define MSIX_ALARM_TYPE 1 853 + #define MSIX_RING_TYPE 2 854 855 u8 in_use; 856 #define MSIX_REGISTERED_SUCCESS 0xAA ··· 877 */ 878 int pkts_to_process; 879 struct net_device *dev; 880 struct mac_info mac_control; 881 struct config_param config; 882 struct pci_dev *pdev; ··· 948 */ 949 u8 other_fifo_idx; 950 951 + struct napi_struct napi; 952 /* after blink, the adapter must be restored with original 953 * values. 954 */ ··· 962 unsigned long long start_time; 963 struct vlan_group *vlgrp; 964 #define MSIX_FLG 0xA5 965 + int num_entries; 966 struct msix_entry *entries; 967 int msi_detected; 968 wait_queue_head_t msi_wait; ··· 982 u16 lro_max_aggr_per_sess; 983 volatile unsigned long state; 984 u64 general_int_mask; 985 + 986 #define VPD_STRING_LEN 80 987 u8 product_name[VPD_STRING_LEN]; 988 u8 serial_num[VPD_STRING_LEN]; ··· 1103 static int init_shared_mem(struct s2io_nic *sp); 1104 static void free_shared_mem(struct s2io_nic *sp); 1105 static int init_nic(struct s2io_nic *nic); 1106 + static int rx_intr_handler(struct ring_info *ring_data, int budget); 1107 static void tx_intr_handler(struct fifo_info *fifo_data); 1108 static void s2io_handle_errors(void * dev_id); 1109 ··· 1114 static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp); 1115 static void s2io_link(struct s2io_nic * sp, int link); 1116 static void s2io_reset(struct s2io_nic * sp); 1117 + static int s2io_poll_msix(struct napi_struct *napi, int budget); 1118 + static int s2io_poll_inta(struct napi_struct *napi, int budget); 1119 static void s2io_init_pci(struct s2io_nic * sp); 1120 static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr); 1121 static void s2io_alarm_handle(unsigned long data);
+31 -36
drivers/net/sb1250-mac.c
··· 179 #define SBMAC_MAX_TXDESCR 256 180 #define SBMAC_MAX_RXDESCR 256 181 182 - #define ETHER_ALIGN 2 183 - #define ETHER_ADDR_LEN 6 184 #define ENET_PACKET_SIZE 1518 185 /*#define ENET_PACKET_SIZE 9216 */ 186 ··· 261 spinlock_t sbm_lock; /* spin lock */ 262 int sbm_devflags; /* current device flags */ 263 264 - int sbm_buffersize; 265 - 266 /* 267 * Controller-specific things 268 */ ··· 302 static void sbdma_initctx(struct sbmacdma *d, struct sbmac_softc *s, int chan, 303 int txrx, int maxdescr); 304 static void sbdma_channel_start(struct sbmacdma *d, int rxtx); 305 - static int sbdma_add_rcvbuffer(struct sbmacdma *d, struct sk_buff *m); 306 static int sbdma_add_txbuffer(struct sbmacdma *d, struct sk_buff *m); 307 static void sbdma_emptyring(struct sbmacdma *d); 308 - static void sbdma_fillring(struct sbmacdma *d); 309 static int sbdma_rx_process(struct sbmac_softc *sc, struct sbmacdma *d, 310 int work_to_do, int poll); 311 static void sbdma_tx_process(struct sbmac_softc *sc, struct sbmacdma *d, ··· 775 d->sbdma_remptr = NULL; 776 } 777 778 - static void sbdma_align_skb(struct sk_buff *skb,int power2,int offset) 779 { 780 - unsigned long addr; 781 - unsigned long newaddr; 782 783 - addr = (unsigned long) skb->data; 784 - 785 - newaddr = (addr + power2 - 1) & ~(power2 - 1); 786 - 787 - skb_reserve(skb,newaddr-addr+offset); 788 } 789 790 ··· 792 * this queues a buffer for inbound packets. 793 * 794 * Input parameters: 795 - * d - DMA channel descriptor 796 * sb - sk_buff to add, or NULL if we should allocate one 797 * 798 * Return value: ··· 802 ********************************************************************* */ 803 804 805 - static int sbdma_add_rcvbuffer(struct sbmacdma *d, struct sk_buff *sb) 806 { 807 struct sbdmadscr *dsc; 808 struct sbdmadscr *nextdsc; 809 struct sk_buff *sb_new = NULL; ··· 846 */ 847 848 if (sb == NULL) { 849 - sb_new = dev_alloc_skb(ENET_PACKET_SIZE + SMP_CACHE_BYTES * 2 + ETHER_ALIGN); 850 if (sb_new == NULL) { 851 pr_info("%s: sk_buff allocation failed\n", 852 d->sbdma_eth->sbm_dev->name); 853 return -ENOBUFS; 854 } 855 856 - sbdma_align_skb(sb_new, SMP_CACHE_BYTES, ETHER_ALIGN); 857 } 858 else { 859 sb_new = sb; ··· 874 * Do not interrupt per DMA transfer. 875 */ 876 dsc->dscr_a = virt_to_phys(sb_new->data) | 877 - V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize+ETHER_ALIGN)) | 0; 878 #else 879 dsc->dscr_a = virt_to_phys(sb_new->data) | 880 - V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize+ETHER_ALIGN)) | 881 M_DMA_DSCRA_INTERRUPT; 882 #endif 883 ··· 1032 * with sk_buffs 1033 * 1034 * Input parameters: 1035 - * d - DMA channel 1036 * 1037 * Return value: 1038 * nothing 1039 ********************************************************************* */ 1040 1041 - static void sbdma_fillring(struct sbmacdma *d) 1042 { 1043 int idx; 1044 1045 - for (idx = 0; idx < SBMAC_MAX_RXDESCR-1; idx++) { 1046 - if (sbdma_add_rcvbuffer(d,NULL) != 0) 1047 break; 1048 } 1049 } ··· 1160 * packet and put it right back on the receive ring. 1161 */ 1162 1163 - if (unlikely (sbdma_add_rcvbuffer(d,NULL) == 1164 - -ENOBUFS)) { 1165 dev->stats.rx_dropped++; 1166 - sbdma_add_rcvbuffer(d,sb); /* re-add old buffer */ 1167 /* No point in continuing at the moment */ 1168 printk(KERN_ERR "dropped packet (1)\n"); 1169 d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr); ··· 1214 * put it back on the receive ring. 1215 */ 1216 dev->stats.rx_errors++; 1217 - sbdma_add_rcvbuffer(d,sb); 1218 } 1219 1220 ··· 1572 * Fill the receive ring 1573 */ 1574 1575 - sbdma_fillring(&(s->sbm_rxdma)); 1576 1577 /* 1578 * Turn on the rest of the bits in the enable register ··· 2313 for (i = 0; i < 6; i++) { 2314 dev->dev_addr[i] = eaddr[i]; 2315 } 2316 - 2317 - 2318 - /* 2319 - * Init packet size 2320 - */ 2321 - 2322 - sc->sbm_buffersize = ENET_PACKET_SIZE + SMP_CACHE_BYTES * 2 + ETHER_ALIGN; 2323 2324 /* 2325 * Initialize context (get pointers to registers and stuff), then
··· 179 #define SBMAC_MAX_TXDESCR 256 180 #define SBMAC_MAX_RXDESCR 256 181 182 + #define ETHER_ADDR_LEN 6 183 #define ENET_PACKET_SIZE 1518 184 /*#define ENET_PACKET_SIZE 9216 */ 185 ··· 262 spinlock_t sbm_lock; /* spin lock */ 263 int sbm_devflags; /* current device flags */ 264 265 /* 266 * Controller-specific things 267 */ ··· 305 static void sbdma_initctx(struct sbmacdma *d, struct sbmac_softc *s, int chan, 306 int txrx, int maxdescr); 307 static void sbdma_channel_start(struct sbmacdma *d, int rxtx); 308 + static int sbdma_add_rcvbuffer(struct sbmac_softc *sc, struct sbmacdma *d, 309 + struct sk_buff *m); 310 static int sbdma_add_txbuffer(struct sbmacdma *d, struct sk_buff *m); 311 static void sbdma_emptyring(struct sbmacdma *d); 312 + static void sbdma_fillring(struct sbmac_softc *sc, struct sbmacdma *d); 313 static int sbdma_rx_process(struct sbmac_softc *sc, struct sbmacdma *d, 314 int work_to_do, int poll); 315 static void sbdma_tx_process(struct sbmac_softc *sc, struct sbmacdma *d, ··· 777 d->sbdma_remptr = NULL; 778 } 779 780 + static inline void sbdma_align_skb(struct sk_buff *skb, 781 + unsigned int power2, unsigned int offset) 782 { 783 + unsigned char *addr = skb->data; 784 + unsigned char *newaddr = PTR_ALIGN(addr, power2); 785 786 + skb_reserve(skb, newaddr - addr + offset); 787 } 788 789 ··· 797 * this queues a buffer for inbound packets. 798 * 799 * Input parameters: 800 + * sc - softc structure 801 + * d - DMA channel descriptor 802 * sb - sk_buff to add, or NULL if we should allocate one 803 * 804 * Return value: ··· 806 ********************************************************************* */ 807 808 809 + static int sbdma_add_rcvbuffer(struct sbmac_softc *sc, struct sbmacdma *d, 810 + struct sk_buff *sb) 811 { 812 + struct net_device *dev = sc->sbm_dev; 813 struct sbdmadscr *dsc; 814 struct sbdmadscr *nextdsc; 815 struct sk_buff *sb_new = NULL; ··· 848 */ 849 850 if (sb == NULL) { 851 + sb_new = netdev_alloc_skb(dev, ENET_PACKET_SIZE + 852 + SMP_CACHE_BYTES * 2 + 853 + NET_IP_ALIGN); 854 if (sb_new == NULL) { 855 pr_info("%s: sk_buff allocation failed\n", 856 d->sbdma_eth->sbm_dev->name); 857 return -ENOBUFS; 858 } 859 860 + sbdma_align_skb(sb_new, SMP_CACHE_BYTES, NET_IP_ALIGN); 861 } 862 else { 863 sb_new = sb; ··· 874 * Do not interrupt per DMA transfer. 875 */ 876 dsc->dscr_a = virt_to_phys(sb_new->data) | 877 + V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize + NET_IP_ALIGN)) | 0; 878 #else 879 dsc->dscr_a = virt_to_phys(sb_new->data) | 880 + V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize + NET_IP_ALIGN)) | 881 M_DMA_DSCRA_INTERRUPT; 882 #endif 883 ··· 1032 * with sk_buffs 1033 * 1034 * Input parameters: 1035 + * sc - softc structure 1036 + * d - DMA channel 1037 * 1038 * Return value: 1039 * nothing 1040 ********************************************************************* */ 1041 1042 + static void sbdma_fillring(struct sbmac_softc *sc, struct sbmacdma *d) 1043 { 1044 int idx; 1045 1046 + for (idx = 0; idx < SBMAC_MAX_RXDESCR - 1; idx++) { 1047 + if (sbdma_add_rcvbuffer(sc, d, NULL) != 0) 1048 break; 1049 } 1050 } ··· 1159 * packet and put it right back on the receive ring. 1160 */ 1161 1162 + if (unlikely(sbdma_add_rcvbuffer(sc, d, NULL) == 1163 + -ENOBUFS)) { 1164 dev->stats.rx_dropped++; 1165 + /* Re-add old buffer */ 1166 + sbdma_add_rcvbuffer(sc, d, sb); 1167 /* No point in continuing at the moment */ 1168 printk(KERN_ERR "dropped packet (1)\n"); 1169 d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr); ··· 1212 * put it back on the receive ring. 1213 */ 1214 dev->stats.rx_errors++; 1215 + sbdma_add_rcvbuffer(sc, d, sb); 1216 } 1217 1218 ··· 1570 * Fill the receive ring 1571 */ 1572 1573 + sbdma_fillring(s, &(s->sbm_rxdma)); 1574 1575 /* 1576 * Turn on the rest of the bits in the enable register ··· 2311 for (i = 0; i < 6; i++) { 2312 dev->dev_addr[i] = eaddr[i]; 2313 } 2314 2315 /* 2316 * Initialize context (get pointers to registers and stuff), then
+5 -3
drivers/net/sc92031.c
··· 953 unsigned entry; 954 u32 tx_status; 955 956 - if (skb_padto(skb, ETH_ZLEN)) 957 - return NETDEV_TX_OK; 958 - 959 if (unlikely(skb->len > TX_BUF_SIZE)) { 960 dev->stats.tx_dropped++; 961 goto out; ··· 972 skb_copy_and_csum_dev(skb, priv->tx_bufs + entry * TX_BUF_SIZE); 973 974 len = skb->len; 975 976 wmb(); 977
··· 953 unsigned entry; 954 u32 tx_status; 955 956 if (unlikely(skb->len > TX_BUF_SIZE)) { 957 dev->stats.tx_dropped++; 958 goto out; ··· 975 skb_copy_and_csum_dev(skb, priv->tx_bufs + entry * TX_BUF_SIZE); 976 977 len = skb->len; 978 + if (unlikely(len < ETH_ZLEN)) { 979 + memset(priv->tx_bufs + entry * TX_BUF_SIZE + len, 980 + 0, ETH_ZLEN - len); 981 + len = ETH_ZLEN; 982 + } 983 984 wmb(); 985
+2 -5
drivers/net/sfc/bitfield.h
··· 483 #endif 484 485 #define EFX_SET_OWORD_FIELD_VER(efx, oword, field, value) do { \ 486 - if (FALCON_REV(efx) >= FALCON_REV_B0) { \ 487 EFX_SET_OWORD_FIELD((oword), field##_B0, (value)); \ 488 } else { \ 489 EFX_SET_OWORD_FIELD((oword), field##_A1, (value)); \ ··· 491 } while (0) 492 493 #define EFX_QWORD_FIELD_VER(efx, qword, field) \ 494 - (FALCON_REV(efx) >= FALCON_REV_B0 ? \ 495 EFX_QWORD_FIELD((qword), field##_B0) : \ 496 EFX_QWORD_FIELD((qword), field##_A1)) 497 ··· 501 #define DMA_ADDR_T_WIDTH (8 * sizeof(dma_addr_t)) 502 #define EFX_DMA_TYPE_WIDTH(width) \ 503 (((width) < DMA_ADDR_T_WIDTH) ? (width) : DMA_ADDR_T_WIDTH) 504 - #define EFX_DMA_MAX_MASK ((DMA_ADDR_T_WIDTH == 64) ? \ 505 - ~((u64) 0) : ~((u32) 0)) 506 - #define EFX_DMA_MASK(mask) ((mask) & EFX_DMA_MAX_MASK) 507 508 #endif /* EFX_BITFIELD_H */
··· 483 #endif 484 485 #define EFX_SET_OWORD_FIELD_VER(efx, oword, field, value) do { \ 486 + if (falcon_rev(efx) >= FALCON_REV_B0) { \ 487 EFX_SET_OWORD_FIELD((oword), field##_B0, (value)); \ 488 } else { \ 489 EFX_SET_OWORD_FIELD((oword), field##_A1, (value)); \ ··· 491 } while (0) 492 493 #define EFX_QWORD_FIELD_VER(efx, qword, field) \ 494 + (falcon_rev(efx) >= FALCON_REV_B0 ? \ 495 EFX_QWORD_FIELD((qword), field##_B0) : \ 496 EFX_QWORD_FIELD((qword), field##_A1)) 497 ··· 501 #define DMA_ADDR_T_WIDTH (8 * sizeof(dma_addr_t)) 502 #define EFX_DMA_TYPE_WIDTH(width) \ 503 (((width) < DMA_ADDR_T_WIDTH) ? (width) : DMA_ADDR_T_WIDTH) 504 505 #endif /* EFX_BITFIELD_H */
+3 -6
drivers/net/sfc/boards.c
··· 27 struct efx_blinker *bl = &efx->board_info.blinker; 28 efx->board_info.set_fault_led(efx, bl->state); 29 bl->state = !bl->state; 30 - if (bl->resubmit) { 31 - bl->timer.expires = jiffies + BLINK_INTERVAL; 32 - add_timer(&bl->timer); 33 - } 34 } 35 36 static void board_blink(struct efx_nic *efx, int blink) ··· 42 blinker->state = 0; 43 setup_timer(&blinker->timer, blink_led_timer, 44 (unsigned long)efx); 45 - blinker->timer.expires = jiffies + BLINK_INTERVAL; 46 - add_timer(&blinker->timer); 47 } else { 48 blinker->resubmit = 0; 49 if (blinker->timer.function)
··· 27 struct efx_blinker *bl = &efx->board_info.blinker; 28 efx->board_info.set_fault_led(efx, bl->state); 29 bl->state = !bl->state; 30 + if (bl->resubmit) 31 + mod_timer(&bl->timer, jiffies + BLINK_INTERVAL); 32 } 33 34 static void board_blink(struct efx_nic *efx, int blink) ··· 44 blinker->state = 0; 45 setup_timer(&blinker->timer, blink_led_timer, 46 (unsigned long)efx); 47 + mod_timer(&blinker->timer, jiffies + BLINK_INTERVAL); 48 } else { 49 blinker->resubmit = 0; 50 if (blinker->timer.function)
+40 -44
drivers/net/sfc/efx.c
··· 199 */ 200 static inline void efx_channel_processed(struct efx_channel *channel) 201 { 202 - /* Write to EVQ_RPTR_REG. If a new event arrived in a race 203 - * with finishing processing, a new interrupt will be raised. 204 - */ 205 channel->work_pending = 0; 206 - smp_wmb(); /* Ensure channel updated before any new interrupt. */ 207 falcon_eventq_read_ack(channel); 208 } 209 ··· 266 napi_disable(&channel->napi_str); 267 268 /* Poll the channel */ 269 - (void) efx_process_channel(channel, efx->type->evq_size); 270 271 /* Ack the eventq. This may cause an interrupt to be generated 272 * when they are reenabled */ ··· 318 * 319 *************************************************************************/ 320 321 - /* Setup per-NIC RX buffer parameters. 322 - * Calculate the rx buffer allocation parameters required to support 323 - * the current MTU, including padding for header alignment and overruns. 324 - */ 325 - static void efx_calc_rx_buffer_params(struct efx_nic *efx) 326 - { 327 - unsigned int order, len; 328 - 329 - len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) + 330 - EFX_MAX_FRAME_LEN(efx->net_dev->mtu) + 331 - efx->type->rx_buffer_padding); 332 - 333 - /* Calculate page-order */ 334 - for (order = 0; ((1u << order) * PAGE_SIZE) < len; ++order) 335 - ; 336 - 337 - efx->rx_buffer_len = len; 338 - efx->rx_buffer_order = order; 339 - } 340 - 341 static int efx_probe_channel(struct efx_channel *channel) 342 { 343 struct efx_tx_queue *tx_queue; ··· 368 struct efx_channel *channel; 369 int rc = 0; 370 371 - efx_calc_rx_buffer_params(efx); 372 373 /* Initialise the channels */ 374 efx_for_each_channel(channel, efx) { ··· 428 netif_napi_add(channel->napi_dev, &channel->napi_str, 429 efx_poll, napi_weight); 430 431 channel->work_pending = 0; 432 channel->enabled = 1; 433 - smp_wmb(); /* ensure channel updated before first interrupt */ 434 435 napi_enable(&channel->napi_str); 436 ··· 695 mutex_unlock(&efx->mac_lock); 696 697 /* Serialise against efx_set_multicast_list() */ 698 - if (NET_DEV_REGISTERED(efx)) { 699 netif_tx_lock_bh(efx->net_dev); 700 netif_tx_unlock_bh(efx->net_dev); 701 } ··· 782 efx->membase = ioremap_nocache(efx->membase_phys, 783 efx->type->mem_map_size); 784 if (!efx->membase) { 785 - EFX_ERR(efx, "could not map memory BAR %d at %lx+%x\n", 786 - efx->type->mem_bar, efx->membase_phys, 787 efx->type->mem_map_size); 788 rc = -ENOMEM; 789 goto fail4; 790 } 791 - EFX_LOG(efx, "memory BAR %u at %lx+%x (virtual %p)\n", 792 - efx->type->mem_bar, efx->membase_phys, efx->type->mem_map_size, 793 - efx->membase); 794 795 return 0; 796 797 fail4: 798 release_mem_region(efx->membase_phys, efx->type->mem_map_size); 799 fail3: 800 - efx->membase_phys = 0UL; 801 fail2: 802 pci_disable_device(efx->pci_dev); 803 fail1: ··· 816 817 if (efx->membase_phys) { 818 pci_release_region(efx->pci_dev, efx->type->mem_bar); 819 - efx->membase_phys = 0UL; 820 } 821 822 pci_disable_device(efx->pci_dev); ··· 1035 return; 1036 if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT)) 1037 return; 1038 - if (NET_DEV_REGISTERED(efx) && !netif_running(efx->net_dev)) 1039 return; 1040 1041 /* Mark the port as enabled so port reconfigurations can start, then ··· 1065 cancel_delayed_work_sync(&efx->monitor_work); 1066 1067 /* Ensure that all RX slow refills are complete. */ 1068 - efx_for_each_rx_queue(rx_queue, efx) { 1069 cancel_delayed_work_sync(&rx_queue->work); 1070 - } 1071 1072 /* Stop scheduled port reconfigurations */ 1073 cancel_work_sync(&efx->reconfigure_work); ··· 1092 falcon_disable_interrupts(efx); 1093 if (efx->legacy_irq) 1094 synchronize_irq(efx->legacy_irq); 1095 - efx_for_each_channel_with_interrupt(channel, efx) 1096 if (channel->irq) 1097 synchronize_irq(channel->irq); 1098 1099 /* Stop all NAPI processing and synchronous rx refills */ 1100 efx_for_each_channel(channel, efx) ··· 1117 /* Stop the kernel transmit interface late, so the watchdog 1118 * timer isn't ticking over the flush */ 1119 efx_stop_queue(efx); 1120 - if (NET_DEV_REGISTERED(efx)) { 1121 netif_tx_lock_bh(efx->net_dev); 1122 netif_tx_unlock_bh(efx->net_dev); 1123 } ··· 1336 return 0; 1337 } 1338 1339 - /* Context: process, dev_base_lock held, non-blocking. */ 1340 static struct net_device_stats *efx_net_stats(struct net_device *net_dev) 1341 { 1342 struct efx_nic *efx = net_dev->priv; 1343 struct efx_mac_stats *mac_stats = &efx->mac_stats; 1344 struct net_device_stats *stats = &net_dev->stats; 1345 1346 if (!spin_trylock(&efx->stats_lock)) 1347 return stats; 1348 if (efx->state == STATE_RUNNING) { ··· 1490 static int efx_netdev_event(struct notifier_block *this, 1491 unsigned long event, void *ptr) 1492 { 1493 - struct net_device *net_dev = (struct net_device *)ptr; 1494 1495 if (net_dev->open == efx_net_open && event == NETDEV_CHANGENAME) { 1496 struct efx_nic *efx = net_dev->priv; ··· 1559 efx_for_each_tx_queue(tx_queue, efx) 1560 efx_release_tx_buffers(tx_queue); 1561 1562 - if (NET_DEV_REGISTERED(efx)) { 1563 strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); 1564 unregister_netdev(efx->net_dev); 1565 } ··· 1684 if (method == RESET_TYPE_DISABLE) { 1685 /* Reinitialise the device anyway so the driver unload sequence 1686 * can talk to the external SRAM */ 1687 - (void) falcon_init_nic(efx); 1688 rc = -EIO; 1689 goto fail4; 1690 }
··· 199 */ 200 static inline void efx_channel_processed(struct efx_channel *channel) 201 { 202 + /* The interrupt handler for this channel may set work_pending 203 + * as soon as we acknowledge the events we've seen. Make sure 204 + * it's cleared before then. */ 205 channel->work_pending = 0; 206 + smp_wmb(); 207 + 208 falcon_eventq_read_ack(channel); 209 } 210 ··· 265 napi_disable(&channel->napi_str); 266 267 /* Poll the channel */ 268 + efx_process_channel(channel, efx->type->evq_size); 269 270 /* Ack the eventq. This may cause an interrupt to be generated 271 * when they are reenabled */ ··· 317 * 318 *************************************************************************/ 319 320 static int efx_probe_channel(struct efx_channel *channel) 321 { 322 struct efx_tx_queue *tx_queue; ··· 387 struct efx_channel *channel; 388 int rc = 0; 389 390 + /* Calculate the rx buffer allocation parameters required to 391 + * support the current MTU, including padding for header 392 + * alignment and overruns. 393 + */ 394 + efx->rx_buffer_len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) + 395 + EFX_MAX_FRAME_LEN(efx->net_dev->mtu) + 396 + efx->type->rx_buffer_padding); 397 + efx->rx_buffer_order = get_order(efx->rx_buffer_len); 398 399 /* Initialise the channels */ 400 efx_for_each_channel(channel, efx) { ··· 440 netif_napi_add(channel->napi_dev, &channel->napi_str, 441 efx_poll, napi_weight); 442 443 + /* The interrupt handler for this channel may set work_pending 444 + * as soon as we enable it. Make sure it's cleared before 445 + * then. Similarly, make sure it sees the enabled flag set. */ 446 channel->work_pending = 0; 447 channel->enabled = 1; 448 + smp_wmb(); 449 450 napi_enable(&channel->napi_str); 451 ··· 704 mutex_unlock(&efx->mac_lock); 705 706 /* Serialise against efx_set_multicast_list() */ 707 + if (efx_dev_registered(efx)) { 708 netif_tx_lock_bh(efx->net_dev); 709 netif_tx_unlock_bh(efx->net_dev); 710 } ··· 791 efx->membase = ioremap_nocache(efx->membase_phys, 792 efx->type->mem_map_size); 793 if (!efx->membase) { 794 + EFX_ERR(efx, "could not map memory BAR %d at %llx+%x\n", 795 + efx->type->mem_bar, 796 + (unsigned long long)efx->membase_phys, 797 efx->type->mem_map_size); 798 rc = -ENOMEM; 799 goto fail4; 800 } 801 + EFX_LOG(efx, "memory BAR %u at %llx+%x (virtual %p)\n", 802 + efx->type->mem_bar, (unsigned long long)efx->membase_phys, 803 + efx->type->mem_map_size, efx->membase); 804 805 return 0; 806 807 fail4: 808 release_mem_region(efx->membase_phys, efx->type->mem_map_size); 809 fail3: 810 + efx->membase_phys = 0; 811 fail2: 812 pci_disable_device(efx->pci_dev); 813 fail1: ··· 824 825 if (efx->membase_phys) { 826 pci_release_region(efx->pci_dev, efx->type->mem_bar); 827 + efx->membase_phys = 0; 828 } 829 830 pci_disable_device(efx->pci_dev); ··· 1043 return; 1044 if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT)) 1045 return; 1046 + if (efx_dev_registered(efx) && !netif_running(efx->net_dev)) 1047 return; 1048 1049 /* Mark the port as enabled so port reconfigurations can start, then ··· 1073 cancel_delayed_work_sync(&efx->monitor_work); 1074 1075 /* Ensure that all RX slow refills are complete. */ 1076 + efx_for_each_rx_queue(rx_queue, efx) 1077 cancel_delayed_work_sync(&rx_queue->work); 1078 1079 /* Stop scheduled port reconfigurations */ 1080 cancel_work_sync(&efx->reconfigure_work); ··· 1101 falcon_disable_interrupts(efx); 1102 if (efx->legacy_irq) 1103 synchronize_irq(efx->legacy_irq); 1104 + efx_for_each_channel_with_interrupt(channel, efx) { 1105 if (channel->irq) 1106 synchronize_irq(channel->irq); 1107 + } 1108 1109 /* Stop all NAPI processing and synchronous rx refills */ 1110 efx_for_each_channel(channel, efx) ··· 1125 /* Stop the kernel transmit interface late, so the watchdog 1126 * timer isn't ticking over the flush */ 1127 efx_stop_queue(efx); 1128 + if (efx_dev_registered(efx)) { 1129 netif_tx_lock_bh(efx->net_dev); 1130 netif_tx_unlock_bh(efx->net_dev); 1131 } ··· 1344 return 0; 1345 } 1346 1347 + /* Context: process, dev_base_lock or RTNL held, non-blocking. */ 1348 static struct net_device_stats *efx_net_stats(struct net_device *net_dev) 1349 { 1350 struct efx_nic *efx = net_dev->priv; 1351 struct efx_mac_stats *mac_stats = &efx->mac_stats; 1352 struct net_device_stats *stats = &net_dev->stats; 1353 1354 + /* Update stats if possible, but do not wait if another thread 1355 + * is updating them (or resetting the NIC); slightly stale 1356 + * stats are acceptable. 1357 + */ 1358 if (!spin_trylock(&efx->stats_lock)) 1359 return stats; 1360 if (efx->state == STATE_RUNNING) { ··· 1494 static int efx_netdev_event(struct notifier_block *this, 1495 unsigned long event, void *ptr) 1496 { 1497 + struct net_device *net_dev = ptr; 1498 1499 if (net_dev->open == efx_net_open && event == NETDEV_CHANGENAME) { 1500 struct efx_nic *efx = net_dev->priv; ··· 1563 efx_for_each_tx_queue(tx_queue, efx) 1564 efx_release_tx_buffers(tx_queue); 1565 1566 + if (efx_dev_registered(efx)) { 1567 strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); 1568 unregister_netdev(efx->net_dev); 1569 } ··· 1688 if (method == RESET_TYPE_DISABLE) { 1689 /* Reinitialise the device anyway so the driver unload sequence 1690 * can talk to the external SRAM */ 1691 + falcon_init_nic(efx); 1692 rc = -EIO; 1693 goto fail4; 1694 }
+38 -49
drivers/net/sfc/falcon.c
··· 116 ************************************************************************** 117 */ 118 119 - /* DMA address mask (up to 46-bit, avoiding compiler warnings) 120 - * 121 - * Note that it is possible to have a platform with 64-bit longs and 122 - * 32-bit DMA addresses, or vice versa. EFX_DMA_MASK takes care of the 123 - * platform DMA mask. 124 - */ 125 - #if BITS_PER_LONG == 64 126 - #define FALCON_DMA_MASK EFX_DMA_MASK(0x00003fffffffffffUL) 127 - #else 128 - #define FALCON_DMA_MASK EFX_DMA_MASK(0x00003fffffffffffULL) 129 - #endif 130 131 /* TX DMA length mask (13-bit) */ 132 #define FALCON_TX_DMA_MASK (4096 - 1) ··· 136 #define PCI_EXP_LNKSTA_LNK_WID_LBN 4 137 138 #define FALCON_IS_DUAL_FUNC(efx) \ 139 - (FALCON_REV(efx) < FALCON_REV_B0) 140 141 /************************************************************************** 142 * ··· 456 TX_DESCQ_TYPE, 0, 457 TX_NON_IP_DROP_DIS_B0, 1); 458 459 - if (FALCON_REV(efx) >= FALCON_REV_B0) { 460 int csum = !(efx->net_dev->features & NETIF_F_IP_CSUM); 461 EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_IP_CHKSM_DIS_B0, csum); 462 EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_TCP_CHKSM_DIS_B0, csum); ··· 465 falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, 466 tx_queue->queue); 467 468 - if (FALCON_REV(efx) < FALCON_REV_B0) { 469 efx_oword_t reg; 470 471 BUG_ON(tx_queue->queue >= 128); /* HW limit */ ··· 626 efx_oword_t rx_desc_ptr; 627 struct efx_nic *efx = rx_queue->efx; 628 int rc; 629 - int is_b0 = FALCON_REV(efx) >= FALCON_REV_B0; 630 int iscsi_digest_en = is_b0; 631 632 EFX_LOG(efx, "RX queue %d ring in special buffers %d-%d\n", ··· 813 tx_ev_q_label = EFX_QWORD_FIELD(*event, TX_EV_Q_LABEL); 814 tx_queue = &efx->tx_queue[tx_ev_q_label]; 815 816 - if (NET_DEV_REGISTERED(efx)) 817 netif_tx_lock(efx->net_dev); 818 falcon_notify_tx_desc(tx_queue); 819 - if (NET_DEV_REGISTERED(efx)) 820 netif_tx_unlock(efx->net_dev); 821 } else if (EFX_QWORD_FIELD(*event, TX_EV_PKT_ERR) && 822 EFX_WORKAROUND_10727(efx)) { ··· 875 RX_EV_TCP_UDP_CHKSUM_ERR); 876 rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, RX_EV_ETH_CRC_ERR); 877 rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, RX_EV_FRM_TRUNC); 878 - rx_ev_drib_nib = ((FALCON_REV(efx) >= FALCON_REV_B0) ? 879 0 : EFX_QWORD_FIELD(*event, RX_EV_DRIB_NIB)); 880 rx_ev_pause_frm = EFX_QWORD_FIELD(*event, RX_EV_PAUSE_FRM_ERR); 881 ··· 1056 EFX_QWORD_FIELD(*event, XG_PHY_INTR)) 1057 is_phy_event = 1; 1058 1059 - if ((FALCON_REV(efx) >= FALCON_REV_B0) && 1060 EFX_OWORD_FIELD(*event, XG_MNT_INTR_B0)) 1061 is_phy_event = 1; 1062 ··· 1396 static irqreturn_t falcon_fatal_interrupt(struct efx_nic *efx) 1397 { 1398 struct falcon_nic_data *nic_data = efx->nic_data; 1399 - efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr; 1400 efx_oword_t fatal_intr; 1401 int error, mem_perr; 1402 static int n_int_errors; ··· 1442 */ 1443 static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id) 1444 { 1445 - struct efx_nic *efx = (struct efx_nic *)dev_id; 1446 - efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr; 1447 struct efx_channel *channel; 1448 efx_dword_t reg; 1449 u32 queues; ··· 1480 1481 static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id) 1482 { 1483 - struct efx_nic *efx = (struct efx_nic *)dev_id; 1484 - efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr; 1485 struct efx_channel *channel; 1486 int syserr; 1487 int queues; ··· 1533 */ 1534 static irqreturn_t falcon_msi_interrupt(int irq, void *dev_id) 1535 { 1536 - struct efx_channel *channel = (struct efx_channel *)dev_id; 1537 struct efx_nic *efx = channel->efx; 1538 - efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr; 1539 int syserr; 1540 1541 efx->last_irq_cpu = raw_smp_processor_id(); ··· 1563 unsigned long offset; 1564 efx_dword_t dword; 1565 1566 - if (FALCON_REV(efx) < FALCON_REV_B0) 1567 return; 1568 1569 for (offset = RX_RSS_INDIR_TBL_B0; ··· 1586 1587 if (!EFX_INT_MODE_USE_MSI(efx)) { 1588 irq_handler_t handler; 1589 - if (FALCON_REV(efx) >= FALCON_REV_B0) 1590 handler = falcon_legacy_interrupt_b0; 1591 else 1592 handler = falcon_legacy_interrupt_a1; ··· 1627 efx_oword_t reg; 1628 1629 /* Disable MSI/MSI-X interrupts */ 1630 - efx_for_each_channel_with_interrupt(channel, efx) 1631 if (channel->irq) 1632 free_irq(channel->irq, channel); 1633 1634 /* ACK legacy interrupt */ 1635 - if (FALCON_REV(efx) >= FALCON_REV_B0) 1636 falcon_read(efx, &reg, INT_ISR0_B0); 1637 else 1638 falcon_irq_ack_a1(efx); ··· 1724 efx_oword_t temp; 1725 int count; 1726 1727 - if ((FALCON_REV(efx) < FALCON_REV_B0) || 1728 (efx->loopback_mode != LOOPBACK_NONE)) 1729 return; 1730 ··· 1777 { 1778 efx_oword_t temp; 1779 1780 - if (FALCON_REV(efx) < FALCON_REV_B0) 1781 return; 1782 1783 /* Isolate the MAC -> RX */ ··· 1815 MAC_SPEED, link_speed); 1816 /* On B0, MAC backpressure can be disabled and packets get 1817 * discarded. */ 1818 - if (FALCON_REV(efx) >= FALCON_REV_B0) { 1819 EFX_SET_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0, 1820 !efx->link_up); 1821 } ··· 1833 EFX_SET_OWORD_FIELD_VER(efx, reg, RX_XOFF_MAC_EN, tx_fc); 1834 1835 /* Unisolate the MAC -> RX */ 1836 - if (FALCON_REV(efx) >= FALCON_REV_B0) 1837 EFX_SET_OWORD_FIELD(reg, RX_INGR_EN_B0, 1); 1838 falcon_write(efx, &reg, RX_CFG_REG_KER); 1839 } ··· 1848 return 0; 1849 1850 /* Statistics fetch will fail if the MAC is in TX drain */ 1851 - if (FALCON_REV(efx) >= FALCON_REV_B0) { 1852 efx_oword_t temp; 1853 falcon_read(efx, &temp, MAC0_CTRL_REG_KER); 1854 if (EFX_OWORD_FIELD(temp, TXFIFO_DRAIN_EN_B0)) ··· 1932 static void falcon_mdio_write(struct net_device *net_dev, int phy_id, 1933 int addr, int value) 1934 { 1935 - struct efx_nic *efx = (struct efx_nic *)net_dev->priv; 1936 unsigned int phy_id2 = phy_id & FALCON_PHY_ID_ID_MASK; 1937 efx_oword_t reg; 1938 ··· 2000 * could be read, -1 will be returned. */ 2001 static int falcon_mdio_read(struct net_device *net_dev, int phy_id, int addr) 2002 { 2003 - struct efx_nic *efx = (struct efx_nic *)net_dev->priv; 2004 unsigned int phy_addr = phy_id & FALCON_PHY_ID_ID_MASK; 2005 efx_oword_t reg; 2006 int value = -1; ··· 2105 falcon_init_mdio(&efx->mii); 2106 2107 /* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */ 2108 - if (FALCON_REV(efx) >= FALCON_REV_B0) 2109 efx->flow_control = EFX_FC_RX | EFX_FC_TX; 2110 else 2111 efx->flow_control = EFX_FC_RX; ··· 2365 return -ENODEV; 2366 } 2367 2368 - switch (FALCON_REV(efx)) { 2369 case FALCON_REV_A0: 2370 case 0xff: 2371 EFX_ERR(efx, "Falcon rev A0 not supported\n"); ··· 2391 break; 2392 2393 default: 2394 - EFX_ERR(efx, "Unknown Falcon rev %d\n", FALCON_REV(efx)); 2395 return -ENODEV; 2396 } 2397 ··· 2411 2412 /* Allocate storage for hardware specific data */ 2413 nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL); 2414 - efx->nic_data = (void *) nic_data; 2415 2416 /* Determine number of ports etc. */ 2417 rc = falcon_probe_nic_variant(efx); ··· 2481 */ 2482 int falcon_init_nic(struct efx_nic *efx) 2483 { 2484 - struct falcon_nic_data *data; 2485 efx_oword_t temp; 2486 unsigned thresh; 2487 int rc; 2488 - 2489 - data = (struct falcon_nic_data *)efx->nic_data; 2490 2491 /* Set up the address region register. This is only needed 2492 * for the B0 FPGA, but since we are just pushing in the ··· 2551 2552 /* Set number of RSS queues for receive path. */ 2553 falcon_read(efx, &temp, RX_FILTER_CTL_REG); 2554 - if (FALCON_REV(efx) >= FALCON_REV_B0) 2555 EFX_SET_OWORD_FIELD(temp, NUM_KER, 0); 2556 else 2557 EFX_SET_OWORD_FIELD(temp, NUM_KER, efx->rss_queues - 1); ··· 2589 /* Prefetch threshold 2 => fetch when descriptor cache half empty */ 2590 EFX_SET_OWORD_FIELD(temp, TX_PREF_THRESHOLD, 2); 2591 /* Squash TX of packets of 16 bytes or less */ 2592 - if (FALCON_REV(efx) >= FALCON_REV_B0 && EFX_WORKAROUND_9141(efx)) 2593 EFX_SET_OWORD_FIELD(temp, TX_FLUSH_MIN_LEN_EN_B0, 1); 2594 falcon_write(efx, &temp, TX_CFG2_REG_KER); 2595 ··· 2606 if (EFX_WORKAROUND_7575(efx)) 2607 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_USR_BUF_SIZE, 2608 (3 * 4096) / 32); 2609 - if (FALCON_REV(efx) >= FALCON_REV_B0) 2610 EFX_SET_OWORD_FIELD(temp, RX_INGR_EN_B0, 1); 2611 2612 /* RX FIFO flow control thresholds */ ··· 2622 falcon_write(efx, &temp, RX_CFG_REG_KER); 2623 2624 /* Set destination of both TX and RX Flush events */ 2625 - if (FALCON_REV(efx) >= FALCON_REV_B0) { 2626 EFX_POPULATE_OWORD_1(temp, FLS_EVQ_ID, 0); 2627 falcon_write(efx, &temp, DP_CTRL_REG); 2628 } ··· 2636 2637 falcon_free_buffer(efx, &efx->irq_status); 2638 2639 - (void) falcon_reset_hw(efx, RESET_TYPE_ALL); 2640 2641 /* Release the second function after the reset */ 2642 if (nic_data->pci_dev2) {
··· 116 ************************************************************************** 117 */ 118 119 + /* DMA address mask */ 120 + #define FALCON_DMA_MASK DMA_BIT_MASK(46) 121 122 /* TX DMA length mask (13-bit) */ 123 #define FALCON_TX_DMA_MASK (4096 - 1) ··· 145 #define PCI_EXP_LNKSTA_LNK_WID_LBN 4 146 147 #define FALCON_IS_DUAL_FUNC(efx) \ 148 + (falcon_rev(efx) < FALCON_REV_B0) 149 150 /************************************************************************** 151 * ··· 465 TX_DESCQ_TYPE, 0, 466 TX_NON_IP_DROP_DIS_B0, 1); 467 468 + if (falcon_rev(efx) >= FALCON_REV_B0) { 469 int csum = !(efx->net_dev->features & NETIF_F_IP_CSUM); 470 EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_IP_CHKSM_DIS_B0, csum); 471 EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_TCP_CHKSM_DIS_B0, csum); ··· 474 falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, 475 tx_queue->queue); 476 477 + if (falcon_rev(efx) < FALCON_REV_B0) { 478 efx_oword_t reg; 479 480 BUG_ON(tx_queue->queue >= 128); /* HW limit */ ··· 635 efx_oword_t rx_desc_ptr; 636 struct efx_nic *efx = rx_queue->efx; 637 int rc; 638 + int is_b0 = falcon_rev(efx) >= FALCON_REV_B0; 639 int iscsi_digest_en = is_b0; 640 641 EFX_LOG(efx, "RX queue %d ring in special buffers %d-%d\n", ··· 822 tx_ev_q_label = EFX_QWORD_FIELD(*event, TX_EV_Q_LABEL); 823 tx_queue = &efx->tx_queue[tx_ev_q_label]; 824 825 + if (efx_dev_registered(efx)) 826 netif_tx_lock(efx->net_dev); 827 falcon_notify_tx_desc(tx_queue); 828 + if (efx_dev_registered(efx)) 829 netif_tx_unlock(efx->net_dev); 830 } else if (EFX_QWORD_FIELD(*event, TX_EV_PKT_ERR) && 831 EFX_WORKAROUND_10727(efx)) { ··· 884 RX_EV_TCP_UDP_CHKSUM_ERR); 885 rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, RX_EV_ETH_CRC_ERR); 886 rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, RX_EV_FRM_TRUNC); 887 + rx_ev_drib_nib = ((falcon_rev(efx) >= FALCON_REV_B0) ? 888 0 : EFX_QWORD_FIELD(*event, RX_EV_DRIB_NIB)); 889 rx_ev_pause_frm = EFX_QWORD_FIELD(*event, RX_EV_PAUSE_FRM_ERR); 890 ··· 1065 EFX_QWORD_FIELD(*event, XG_PHY_INTR)) 1066 is_phy_event = 1; 1067 1068 + if ((falcon_rev(efx) >= FALCON_REV_B0) && 1069 EFX_OWORD_FIELD(*event, XG_MNT_INTR_B0)) 1070 is_phy_event = 1; 1071 ··· 1405 static irqreturn_t falcon_fatal_interrupt(struct efx_nic *efx) 1406 { 1407 struct falcon_nic_data *nic_data = efx->nic_data; 1408 + efx_oword_t *int_ker = efx->irq_status.addr; 1409 efx_oword_t fatal_intr; 1410 int error, mem_perr; 1411 static int n_int_errors; ··· 1451 */ 1452 static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id) 1453 { 1454 + struct efx_nic *efx = dev_id; 1455 + efx_oword_t *int_ker = efx->irq_status.addr; 1456 struct efx_channel *channel; 1457 efx_dword_t reg; 1458 u32 queues; ··· 1489 1490 static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id) 1491 { 1492 + struct efx_nic *efx = dev_id; 1493 + efx_oword_t *int_ker = efx->irq_status.addr; 1494 struct efx_channel *channel; 1495 int syserr; 1496 int queues; ··· 1542 */ 1543 static irqreturn_t falcon_msi_interrupt(int irq, void *dev_id) 1544 { 1545 + struct efx_channel *channel = dev_id; 1546 struct efx_nic *efx = channel->efx; 1547 + efx_oword_t *int_ker = efx->irq_status.addr; 1548 int syserr; 1549 1550 efx->last_irq_cpu = raw_smp_processor_id(); ··· 1572 unsigned long offset; 1573 efx_dword_t dword; 1574 1575 + if (falcon_rev(efx) < FALCON_REV_B0) 1576 return; 1577 1578 for (offset = RX_RSS_INDIR_TBL_B0; ··· 1595 1596 if (!EFX_INT_MODE_USE_MSI(efx)) { 1597 irq_handler_t handler; 1598 + if (falcon_rev(efx) >= FALCON_REV_B0) 1599 handler = falcon_legacy_interrupt_b0; 1600 else 1601 handler = falcon_legacy_interrupt_a1; ··· 1636 efx_oword_t reg; 1637 1638 /* Disable MSI/MSI-X interrupts */ 1639 + efx_for_each_channel_with_interrupt(channel, efx) { 1640 if (channel->irq) 1641 free_irq(channel->irq, channel); 1642 + } 1643 1644 /* ACK legacy interrupt */ 1645 + if (falcon_rev(efx) >= FALCON_REV_B0) 1646 falcon_read(efx, &reg, INT_ISR0_B0); 1647 else 1648 falcon_irq_ack_a1(efx); ··· 1732 efx_oword_t temp; 1733 int count; 1734 1735 + if ((falcon_rev(efx) < FALCON_REV_B0) || 1736 (efx->loopback_mode != LOOPBACK_NONE)) 1737 return; 1738 ··· 1785 { 1786 efx_oword_t temp; 1787 1788 + if (falcon_rev(efx) < FALCON_REV_B0) 1789 return; 1790 1791 /* Isolate the MAC -> RX */ ··· 1823 MAC_SPEED, link_speed); 1824 /* On B0, MAC backpressure can be disabled and packets get 1825 * discarded. */ 1826 + if (falcon_rev(efx) >= FALCON_REV_B0) { 1827 EFX_SET_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0, 1828 !efx->link_up); 1829 } ··· 1841 EFX_SET_OWORD_FIELD_VER(efx, reg, RX_XOFF_MAC_EN, tx_fc); 1842 1843 /* Unisolate the MAC -> RX */ 1844 + if (falcon_rev(efx) >= FALCON_REV_B0) 1845 EFX_SET_OWORD_FIELD(reg, RX_INGR_EN_B0, 1); 1846 falcon_write(efx, &reg, RX_CFG_REG_KER); 1847 } ··· 1856 return 0; 1857 1858 /* Statistics fetch will fail if the MAC is in TX drain */ 1859 + if (falcon_rev(efx) >= FALCON_REV_B0) { 1860 efx_oword_t temp; 1861 falcon_read(efx, &temp, MAC0_CTRL_REG_KER); 1862 if (EFX_OWORD_FIELD(temp, TXFIFO_DRAIN_EN_B0)) ··· 1940 static void falcon_mdio_write(struct net_device *net_dev, int phy_id, 1941 int addr, int value) 1942 { 1943 + struct efx_nic *efx = net_dev->priv; 1944 unsigned int phy_id2 = phy_id & FALCON_PHY_ID_ID_MASK; 1945 efx_oword_t reg; 1946 ··· 2008 * could be read, -1 will be returned. */ 2009 static int falcon_mdio_read(struct net_device *net_dev, int phy_id, int addr) 2010 { 2011 + struct efx_nic *efx = net_dev->priv; 2012 unsigned int phy_addr = phy_id & FALCON_PHY_ID_ID_MASK; 2013 efx_oword_t reg; 2014 int value = -1; ··· 2113 falcon_init_mdio(&efx->mii); 2114 2115 /* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */ 2116 + if (falcon_rev(efx) >= FALCON_REV_B0) 2117 efx->flow_control = EFX_FC_RX | EFX_FC_TX; 2118 else 2119 efx->flow_control = EFX_FC_RX; ··· 2373 return -ENODEV; 2374 } 2375 2376 + switch (falcon_rev(efx)) { 2377 case FALCON_REV_A0: 2378 case 0xff: 2379 EFX_ERR(efx, "Falcon rev A0 not supported\n"); ··· 2399 break; 2400 2401 default: 2402 + EFX_ERR(efx, "Unknown Falcon rev %d\n", falcon_rev(efx)); 2403 return -ENODEV; 2404 } 2405 ··· 2419 2420 /* Allocate storage for hardware specific data */ 2421 nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL); 2422 + efx->nic_data = nic_data; 2423 2424 /* Determine number of ports etc. */ 2425 rc = falcon_probe_nic_variant(efx); ··· 2489 */ 2490 int falcon_init_nic(struct efx_nic *efx) 2491 { 2492 efx_oword_t temp; 2493 unsigned thresh; 2494 int rc; 2495 2496 /* Set up the address region register. This is only needed 2497 * for the B0 FPGA, but since we are just pushing in the ··· 2562 2563 /* Set number of RSS queues for receive path. */ 2564 falcon_read(efx, &temp, RX_FILTER_CTL_REG); 2565 + if (falcon_rev(efx) >= FALCON_REV_B0) 2566 EFX_SET_OWORD_FIELD(temp, NUM_KER, 0); 2567 else 2568 EFX_SET_OWORD_FIELD(temp, NUM_KER, efx->rss_queues - 1); ··· 2600 /* Prefetch threshold 2 => fetch when descriptor cache half empty */ 2601 EFX_SET_OWORD_FIELD(temp, TX_PREF_THRESHOLD, 2); 2602 /* Squash TX of packets of 16 bytes or less */ 2603 + if (falcon_rev(efx) >= FALCON_REV_B0 && EFX_WORKAROUND_9141(efx)) 2604 EFX_SET_OWORD_FIELD(temp, TX_FLUSH_MIN_LEN_EN_B0, 1); 2605 falcon_write(efx, &temp, TX_CFG2_REG_KER); 2606 ··· 2617 if (EFX_WORKAROUND_7575(efx)) 2618 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_USR_BUF_SIZE, 2619 (3 * 4096) / 32); 2620 + if (falcon_rev(efx) >= FALCON_REV_B0) 2621 EFX_SET_OWORD_FIELD(temp, RX_INGR_EN_B0, 1); 2622 2623 /* RX FIFO flow control thresholds */ ··· 2633 falcon_write(efx, &temp, RX_CFG_REG_KER); 2634 2635 /* Set destination of both TX and RX Flush events */ 2636 + if (falcon_rev(efx) >= FALCON_REV_B0) { 2637 EFX_POPULATE_OWORD_1(temp, FLS_EVQ_ID, 0); 2638 falcon_write(efx, &temp, DP_CTRL_REG); 2639 } ··· 2647 2648 falcon_free_buffer(efx, &efx->irq_status); 2649 2650 + falcon_reset_hw(efx, RESET_TYPE_ALL); 2651 2652 /* Release the second function after the reset */ 2653 if (nic_data->pci_dev2) {
+4 -1
drivers/net/sfc/falcon.h
··· 23 FALCON_REV_B0 = 2, 24 }; 25 26 - #define FALCON_REV(efx) ((efx)->pci_dev->revision) 27 28 extern struct efx_nic_type falcon_a_nic_type; 29 extern struct efx_nic_type falcon_b_nic_type;
··· 23 FALCON_REV_B0 = 2, 24 }; 25 26 + static inline int falcon_rev(struct efx_nic *efx) 27 + { 28 + return efx->pci_dev->revision; 29 + } 30 31 extern struct efx_nic_type falcon_a_nic_type; 32 extern struct efx_nic_type falcon_b_nic_type;
+2 -2
drivers/net/sfc/falcon_hwdefs.h
··· 1125 u8 port1_phy_type; 1126 __le16 asic_sub_revision; 1127 __le16 board_revision; 1128 - } __attribute__ ((packed)); 1129 1130 #define NVCONFIG_BASE 0x300 1131 #define NVCONFIG_BOARD_MAGIC_NUM 0xFA1C ··· 1144 __le16 board_struct_ver; 1145 __le16 board_checksum; 1146 struct falcon_nvconfig_board_v2 board_v2; 1147 - } __attribute__ ((packed)); 1148 1149 #endif /* EFX_FALCON_HWDEFS_H */
··· 1125 u8 port1_phy_type; 1126 __le16 asic_sub_revision; 1127 __le16 board_revision; 1128 + } __packed; 1129 1130 #define NVCONFIG_BASE 0x300 1131 #define NVCONFIG_BOARD_MAGIC_NUM 0xFA1C ··· 1144 __le16 board_struct_ver; 1145 __le16 board_checksum; 1146 struct falcon_nvconfig_board_v2 board_v2; 1147 + } __packed; 1148 1149 #endif /* EFX_FALCON_HWDEFS_H */
+21 -8
drivers/net/sfc/falcon_io.h
··· 56 #define FALCON_USE_QWORD_IO 1 57 #endif 58 59 - #define _falcon_writeq(efx, value, reg) \ 60 - __raw_writeq((__force u64) (value), (efx)->membase + (reg)) 61 - #define _falcon_writel(efx, value, reg) \ 62 - __raw_writel((__force u32) (value), (efx)->membase + (reg)) 63 - #define _falcon_readq(efx, reg) \ 64 - ((__force __le64) __raw_readq((efx)->membase + (reg))) 65 - #define _falcon_readl(efx, reg) \ 66 - ((__force __le32) __raw_readl((efx)->membase + (reg))) 67 68 /* Writes to a normal 16-byte Falcon register, locking as appropriate. */ 69 static inline void falcon_write(struct efx_nic *efx, efx_oword_t *value,
··· 56 #define FALCON_USE_QWORD_IO 1 57 #endif 58 59 + #ifdef FALCON_USE_QWORD_IO 60 + static inline void _falcon_writeq(struct efx_nic *efx, __le64 value, 61 + unsigned int reg) 62 + { 63 + __raw_writeq((__force u64)value, efx->membase + reg); 64 + } 65 + static inline __le64 _falcon_readq(struct efx_nic *efx, unsigned int reg) 66 + { 67 + return (__force __le64)__raw_readq(efx->membase + reg); 68 + } 69 + #endif 70 + 71 + static inline void _falcon_writel(struct efx_nic *efx, __le32 value, 72 + unsigned int reg) 73 + { 74 + __raw_writel((__force u32)value, efx->membase + reg); 75 + } 76 + static inline __le32 _falcon_readl(struct efx_nic *efx, unsigned int reg) 77 + { 78 + return (__force __le32)__raw_readl(efx->membase + reg); 79 + } 80 81 /* Writes to a normal 16-byte Falcon register, locking as appropriate. */ 82 static inline void falcon_write(struct efx_nic *efx, efx_oword_t *value,
+5 -5
drivers/net/sfc/falcon_xmac.c
··· 221 { 222 efx_dword_t reg; 223 224 - if (FALCON_REV(efx) < FALCON_REV_B0) 225 return 1; 226 227 /* The ISR latches, so clear it and re-read */ ··· 241 { 242 efx_dword_t reg; 243 244 - if ((FALCON_REV(efx) < FALCON_REV_B0) || LOOPBACK_INTERNAL(efx)) 245 return; 246 247 /* Flush the ISR */ ··· 454 455 EFX_LOG(efx, "%s Clobbering XAUI (%d tries left).\n", 456 __func__, tries); 457 - (void) falcon_reset_xaui(efx); 458 udelay(200); 459 tries--; 460 } ··· 572 xaui_link_ok = falcon_xaui_link_ok(efx); 573 574 if (EFX_WORKAROUND_5147(efx) && !xaui_link_ok) 575 - (void) falcon_reset_xaui(efx); 576 577 /* Call the PHY check_hw routine */ 578 rc = efx->phy_op->check_hw(efx); ··· 639 reset = ((flow_control & EFX_FC_TX) && 640 !(efx->flow_control & EFX_FC_TX)); 641 if (EFX_WORKAROUND_11482(efx) && reset) { 642 - if (FALCON_REV(efx) >= FALCON_REV_B0) { 643 /* Recover by resetting the EM block */ 644 if (efx->link_up) 645 falcon_drain_tx_fifo(efx);
··· 221 { 222 efx_dword_t reg; 223 224 + if (falcon_rev(efx) < FALCON_REV_B0) 225 return 1; 226 227 /* The ISR latches, so clear it and re-read */ ··· 241 { 242 efx_dword_t reg; 243 244 + if ((falcon_rev(efx) < FALCON_REV_B0) || LOOPBACK_INTERNAL(efx)) 245 return; 246 247 /* Flush the ISR */ ··· 454 455 EFX_LOG(efx, "%s Clobbering XAUI (%d tries left).\n", 456 __func__, tries); 457 + falcon_reset_xaui(efx); 458 udelay(200); 459 tries--; 460 } ··· 572 xaui_link_ok = falcon_xaui_link_ok(efx); 573 574 if (EFX_WORKAROUND_5147(efx) && !xaui_link_ok) 575 + falcon_reset_xaui(efx); 576 577 /* Call the PHY check_hw routine */ 578 rc = efx->phy_op->check_hw(efx); ··· 639 reset = ((flow_control & EFX_FC_TX) && 640 !(efx->flow_control & EFX_FC_TX)); 641 if (EFX_WORKAROUND_11482(efx) && reset) { 642 + if (falcon_rev(efx) >= FALCON_REV_B0) { 643 /* Recover by resetting the EM block */ 644 if (efx->link_up) 645 falcon_drain_tx_fifo(efx);
+22 -22
drivers/net/sfc/net_driver.h
··· 42 #ifndef EFX_DRIVER_NAME 43 #define EFX_DRIVER_NAME "sfc" 44 #endif 45 - #define EFX_DRIVER_VERSION "2.2.0136" 46 47 #ifdef EFX_ENABLE_DEBUG 48 #define EFX_BUG_ON_PARANOID(x) BUG_ON(x) ··· 52 #define EFX_WARN_ON_PARANOID(x) do {} while (0) 53 #endif 54 55 - #define NET_DEV_REGISTERED(efx) \ 56 - ((efx)->net_dev->reg_state == NETREG_REGISTERED) 57 - 58 - /* Include net device name in log messages if it has been registered. 59 - * Use efx->name not efx->net_dev->name so that races with (un)registration 60 - * are harmless. 61 - */ 62 - #define NET_DEV_NAME(efx) (NET_DEV_REGISTERED(efx) ? (efx)->name : "") 63 - 64 /* Un-rate-limited logging */ 65 #define EFX_ERR(efx, fmt, args...) \ 66 - dev_err(&((efx)->pci_dev->dev), "ERR: %s " fmt, NET_DEV_NAME(efx), ##args) 67 68 #define EFX_INFO(efx, fmt, args...) \ 69 - dev_info(&((efx)->pci_dev->dev), "INFO: %s " fmt, NET_DEV_NAME(efx), ##args) 70 71 #ifdef EFX_ENABLE_DEBUG 72 #define EFX_LOG(efx, fmt, args...) \ 73 - dev_info(&((efx)->pci_dev->dev), "DBG: %s " fmt, NET_DEV_NAME(efx), ##args) 74 #else 75 #define EFX_LOG(efx, fmt, args...) \ 76 - dev_dbg(&((efx)->pci_dev->dev), "DBG: %s " fmt, NET_DEV_NAME(efx), ##args) 77 #endif 78 79 #define EFX_TRACE(efx, fmt, args...) do {} while (0) ··· 80 81 #define EFX_LOG_RL(efx, fmt, args...) \ 82 do {if (net_ratelimit()) EFX_LOG(efx, fmt, ##args); } while (0) 83 - 84 - /* Kernel headers may redefine inline anyway */ 85 - #ifndef inline 86 - #define inline inline __attribute__ ((always_inline)) 87 - #endif 88 89 /************************************************************************** 90 * ··· 681 struct workqueue_struct *workqueue; 682 struct work_struct reset_work; 683 struct delayed_work monitor_work; 684 - unsigned long membase_phys; 685 void __iomem *membase; 686 spinlock_t biu_lock; 687 enum efx_int_mode interrupt_mode; ··· 705 706 unsigned n_rx_nodesc_drop_cnt; 707 708 - void *nic_data; 709 710 struct mutex mac_lock; 711 int port_enabled; ··· 746 void *loopback_selftest; 747 }; 748 749 /** 750 * struct efx_nic_type - Efx device type definition 751 * @mem_bar: Memory BAR number ··· 795 unsigned int txd_ring_mask; 796 unsigned int rxd_ring_mask; 797 unsigned int evq_size; 798 - dma_addr_t max_dma_mask; 799 unsigned int tx_dma_mask; 800 unsigned bug5391_mask; 801
··· 42 #ifndef EFX_DRIVER_NAME 43 #define EFX_DRIVER_NAME "sfc" 44 #endif 45 + #define EFX_DRIVER_VERSION "2.2" 46 47 #ifdef EFX_ENABLE_DEBUG 48 #define EFX_BUG_ON_PARANOID(x) BUG_ON(x) ··· 52 #define EFX_WARN_ON_PARANOID(x) do {} while (0) 53 #endif 54 55 /* Un-rate-limited logging */ 56 #define EFX_ERR(efx, fmt, args...) \ 57 + dev_err(&((efx)->pci_dev->dev), "ERR: %s " fmt, efx_dev_name(efx), ##args) 58 59 #define EFX_INFO(efx, fmt, args...) \ 60 + dev_info(&((efx)->pci_dev->dev), "INFO: %s " fmt, efx_dev_name(efx), ##args) 61 62 #ifdef EFX_ENABLE_DEBUG 63 #define EFX_LOG(efx, fmt, args...) \ 64 + dev_info(&((efx)->pci_dev->dev), "DBG: %s " fmt, efx_dev_name(efx), ##args) 65 #else 66 #define EFX_LOG(efx, fmt, args...) \ 67 + dev_dbg(&((efx)->pci_dev->dev), "DBG: %s " fmt, efx_dev_name(efx), ##args) 68 #endif 69 70 #define EFX_TRACE(efx, fmt, args...) do {} while (0) ··· 89 90 #define EFX_LOG_RL(efx, fmt, args...) \ 91 do {if (net_ratelimit()) EFX_LOG(efx, fmt, ##args); } while (0) 92 93 /************************************************************************** 94 * ··· 695 struct workqueue_struct *workqueue; 696 struct work_struct reset_work; 697 struct delayed_work monitor_work; 698 + resource_size_t membase_phys; 699 void __iomem *membase; 700 spinlock_t biu_lock; 701 enum efx_int_mode interrupt_mode; ··· 719 720 unsigned n_rx_nodesc_drop_cnt; 721 722 + struct falcon_nic_data *nic_data; 723 724 struct mutex mac_lock; 725 int port_enabled; ··· 760 void *loopback_selftest; 761 }; 762 763 + static inline int efx_dev_registered(struct efx_nic *efx) 764 + { 765 + return efx->net_dev->reg_state == NETREG_REGISTERED; 766 + } 767 + 768 + /* Net device name, for inclusion in log messages if it has been registered. 769 + * Use efx->name not efx->net_dev->name so that races with (un)registration 770 + * are harmless. 771 + */ 772 + static inline const char *efx_dev_name(struct efx_nic *efx) 773 + { 774 + return efx_dev_registered(efx) ? efx->name : ""; 775 + } 776 + 777 /** 778 * struct efx_nic_type - Efx device type definition 779 * @mem_bar: Memory BAR number ··· 795 unsigned int txd_ring_mask; 796 unsigned int rxd_ring_mask; 797 unsigned int evq_size; 798 + u64 max_dma_mask; 799 unsigned int tx_dma_mask; 800 unsigned bug5391_mask; 801
+27 -21
drivers/net/sfc/rx.c
··· 86 */ 87 #define EFX_RXD_HEAD_ROOM 2 88 89 - /* Macros for zero-order pages (potentially) containing multiple RX buffers */ 90 - #define RX_DATA_OFFSET(_data) \ 91 - (((unsigned long) (_data)) & (PAGE_SIZE-1)) 92 - #define RX_BUF_OFFSET(_rx_buf) \ 93 - RX_DATA_OFFSET((_rx_buf)->data) 94 - 95 - #define RX_PAGE_SIZE(_efx) \ 96 - (PAGE_SIZE * (1u << (_efx)->rx_buffer_order)) 97 98 99 /************************************************************************** ··· 109 static int efx_lro_get_skb_hdr(struct sk_buff *skb, void **ip_hdr, 110 void **tcpudp_hdr, u64 *hdr_flags, void *priv) 111 { 112 - struct efx_channel *channel = (struct efx_channel *)priv; 113 struct iphdr *iph; 114 struct tcphdr *th; 115 ··· 134 void **ip_hdr, void **tcpudp_hdr, u64 *hdr_flags, 135 void *priv) 136 { 137 - struct efx_channel *channel = (struct efx_channel *)priv; 138 struct ethhdr *eh; 139 struct iphdr *iph; 140 141 /* We support EtherII and VLAN encapsulated IPv4 */ 142 - eh = (struct ethhdr *)(page_address(frag->page) + frag->page_offset); 143 *mac_hdr = eh; 144 145 if (eh->h_proto == htons(ETH_P_IP)) { ··· 272 return -ENOMEM; 273 274 dma_addr = pci_map_page(efx->pci_dev, rx_buf->page, 275 - 0, RX_PAGE_SIZE(efx), 276 PCI_DMA_FROMDEVICE); 277 278 if (unlikely(pci_dma_mapping_error(dma_addr))) { ··· 283 284 rx_queue->buf_page = rx_buf->page; 285 rx_queue->buf_dma_addr = dma_addr; 286 - rx_queue->buf_data = ((char *) page_address(rx_buf->page) + 287 EFX_PAGE_IP_ALIGN); 288 } 289 290 - offset = RX_DATA_OFFSET(rx_queue->buf_data); 291 rx_buf->len = bytes; 292 - rx_buf->dma_addr = rx_queue->buf_dma_addr + offset; 293 rx_buf->data = rx_queue->buf_data; 294 295 /* Try to pack multiple buffers per page */ 296 if (efx->rx_buffer_order == 0) { ··· 298 rx_queue->buf_data += ((bytes + 0x1ff) & ~0x1ff); 299 offset += ((bytes + 0x1ff) & ~0x1ff); 300 301 - space = RX_PAGE_SIZE(efx) - offset; 302 if (space >= bytes) { 303 /* Refs dropped on kernel releasing each skb */ 304 get_page(rx_queue->buf_page); ··· 347 EFX_BUG_ON_PARANOID(rx_buf->skb); 348 if (rx_buf->unmap_addr) { 349 pci_unmap_page(efx->pci_dev, rx_buf->unmap_addr, 350 - RX_PAGE_SIZE(efx), PCI_DMA_FROMDEVICE); 351 rx_buf->unmap_addr = 0; 352 } 353 } else if (likely(rx_buf->skb)) { ··· 404 return 0; 405 406 /* Record minimum fill level */ 407 - if (unlikely(fill_level < rx_queue->min_fill)) 408 if (fill_level) 409 rx_queue->min_fill = fill_level; 410 411 /* Acquire RX add lock. If this lock is contended, then a fast 412 * fill must already be in progress (e.g. in the refill ··· 557 struct skb_frag_struct frags; 558 559 frags.page = rx_buf->page; 560 - frags.page_offset = RX_BUF_OFFSET(rx_buf); 561 frags.size = rx_buf->len; 562 563 lro_receive_frags(lro_mgr, &frags, rx_buf->len, ··· 602 if (unlikely(rx_buf->len > hdr_len)) { 603 struct skb_frag_struct *frag = skb_shinfo(skb)->frags; 604 frag->page = rx_buf->page; 605 - frag->page_offset = RX_BUF_OFFSET(rx_buf) + hdr_len; 606 frag->size = skb->len - hdr_len; 607 skb_shinfo(skb)->nr_frags = 1; 608 skb->data_len = frag->size; ··· 856 /* For a page that is part-way through splitting into RX buffers */ 857 if (rx_queue->buf_page != NULL) { 858 pci_unmap_page(rx_queue->efx->pci_dev, rx_queue->buf_dma_addr, 859 - RX_PAGE_SIZE(rx_queue->efx), PCI_DMA_FROMDEVICE); 860 __free_pages(rx_queue->buf_page, 861 rx_queue->efx->rx_buffer_order); 862 rx_queue->buf_page = NULL;
··· 86 */ 87 #define EFX_RXD_HEAD_ROOM 2 88 89 + static inline unsigned int efx_rx_buf_offset(struct efx_rx_buffer *buf) 90 + { 91 + /* Offset is always within one page, so we don't need to consider 92 + * the page order. 93 + */ 94 + return (__force unsigned long) buf->data & (PAGE_SIZE - 1); 95 + } 96 + static inline unsigned int efx_rx_buf_size(struct efx_nic *efx) 97 + { 98 + return PAGE_SIZE << efx->rx_buffer_order; 99 + } 100 101 102 /************************************************************************** ··· 106 static int efx_lro_get_skb_hdr(struct sk_buff *skb, void **ip_hdr, 107 void **tcpudp_hdr, u64 *hdr_flags, void *priv) 108 { 109 + struct efx_channel *channel = priv; 110 struct iphdr *iph; 111 struct tcphdr *th; 112 ··· 131 void **ip_hdr, void **tcpudp_hdr, u64 *hdr_flags, 132 void *priv) 133 { 134 + struct efx_channel *channel = priv; 135 struct ethhdr *eh; 136 struct iphdr *iph; 137 138 /* We support EtherII and VLAN encapsulated IPv4 */ 139 + eh = page_address(frag->page) + frag->page_offset; 140 *mac_hdr = eh; 141 142 if (eh->h_proto == htons(ETH_P_IP)) { ··· 269 return -ENOMEM; 270 271 dma_addr = pci_map_page(efx->pci_dev, rx_buf->page, 272 + 0, efx_rx_buf_size(efx), 273 PCI_DMA_FROMDEVICE); 274 275 if (unlikely(pci_dma_mapping_error(dma_addr))) { ··· 280 281 rx_queue->buf_page = rx_buf->page; 282 rx_queue->buf_dma_addr = dma_addr; 283 + rx_queue->buf_data = (page_address(rx_buf->page) + 284 EFX_PAGE_IP_ALIGN); 285 } 286 287 rx_buf->len = bytes; 288 rx_buf->data = rx_queue->buf_data; 289 + offset = efx_rx_buf_offset(rx_buf); 290 + rx_buf->dma_addr = rx_queue->buf_dma_addr + offset; 291 292 /* Try to pack multiple buffers per page */ 293 if (efx->rx_buffer_order == 0) { ··· 295 rx_queue->buf_data += ((bytes + 0x1ff) & ~0x1ff); 296 offset += ((bytes + 0x1ff) & ~0x1ff); 297 298 + space = efx_rx_buf_size(efx) - offset; 299 if (space >= bytes) { 300 /* Refs dropped on kernel releasing each skb */ 301 get_page(rx_queue->buf_page); ··· 344 EFX_BUG_ON_PARANOID(rx_buf->skb); 345 if (rx_buf->unmap_addr) { 346 pci_unmap_page(efx->pci_dev, rx_buf->unmap_addr, 347 + efx_rx_buf_size(efx), 348 + PCI_DMA_FROMDEVICE); 349 rx_buf->unmap_addr = 0; 350 } 351 } else if (likely(rx_buf->skb)) { ··· 400 return 0; 401 402 /* Record minimum fill level */ 403 + if (unlikely(fill_level < rx_queue->min_fill)) { 404 if (fill_level) 405 rx_queue->min_fill = fill_level; 406 + } 407 408 /* Acquire RX add lock. If this lock is contended, then a fast 409 * fill must already be in progress (e.g. in the refill ··· 552 struct skb_frag_struct frags; 553 554 frags.page = rx_buf->page; 555 + frags.page_offset = efx_rx_buf_offset(rx_buf); 556 frags.size = rx_buf->len; 557 558 lro_receive_frags(lro_mgr, &frags, rx_buf->len, ··· 597 if (unlikely(rx_buf->len > hdr_len)) { 598 struct skb_frag_struct *frag = skb_shinfo(skb)->frags; 599 frag->page = rx_buf->page; 600 + frag->page_offset = efx_rx_buf_offset(rx_buf) + hdr_len; 601 frag->size = skb->len - hdr_len; 602 skb_shinfo(skb)->nr_frags = 1; 603 skb->data_len = frag->size; ··· 851 /* For a page that is part-way through splitting into RX buffers */ 852 if (rx_queue->buf_page != NULL) { 853 pci_unmap_page(rx_queue->efx->pci_dev, rx_queue->buf_dma_addr, 854 + efx_rx_buf_size(rx_queue->efx), 855 + PCI_DMA_FROMDEVICE); 856 __free_pages(rx_queue->buf_page, 857 rx_queue->efx->rx_buffer_order); 858 rx_queue->buf_page = NULL;
+8 -6
drivers/net/sfc/selftest.c
··· 290 291 payload = &state->payload; 292 293 - received = (struct efx_loopback_payload *)(char *) buf_ptr; 294 received->ip.saddr = payload->ip.saddr; 295 received->ip.check = payload->ip.check; 296 ··· 424 * interrupt handler. */ 425 smp_wmb(); 426 427 - if (NET_DEV_REGISTERED(efx)) 428 netif_tx_lock_bh(efx->net_dev); 429 rc = efx_xmit(efx, tx_queue, skb); 430 - if (NET_DEV_REGISTERED(efx)) 431 netif_tx_unlock_bh(efx->net_dev); 432 433 if (rc != NETDEV_TX_OK) { ··· 453 int tx_done = 0, rx_good, rx_bad; 454 int i, rc = 0; 455 456 - if (NET_DEV_REGISTERED(efx)) 457 netif_tx_lock_bh(efx->net_dev); 458 459 /* Count the number of tx completions, and decrement the refcnt. Any ··· 465 dev_kfree_skb_any(skb); 466 } 467 468 - if (NET_DEV_REGISTERED(efx)) 469 netif_tx_unlock_bh(efx->net_dev); 470 471 /* Check TX completion and received packet counts */ ··· 517 state->packet_count = min(1 << (i << 2), state->packet_count); 518 state->skbs = kzalloc(sizeof(state->skbs[0]) * 519 state->packet_count, GFP_KERNEL); 520 state->flush = 0; 521 522 EFX_LOG(efx, "TX queue %d testing %s loopback with %d " ··· 702 * "flushing" so all inflight packets are dropped */ 703 BUG_ON(efx->loopback_selftest); 704 state->flush = 1; 705 - efx->loopback_selftest = (void *)state; 706 707 rc = efx_test_loopbacks(efx, tests, loopback_modes); 708
··· 290 291 payload = &state->payload; 292 293 + received = (struct efx_loopback_payload *) buf_ptr; 294 received->ip.saddr = payload->ip.saddr; 295 received->ip.check = payload->ip.check; 296 ··· 424 * interrupt handler. */ 425 smp_wmb(); 426 427 + if (efx_dev_registered(efx)) 428 netif_tx_lock_bh(efx->net_dev); 429 rc = efx_xmit(efx, tx_queue, skb); 430 + if (efx_dev_registered(efx)) 431 netif_tx_unlock_bh(efx->net_dev); 432 433 if (rc != NETDEV_TX_OK) { ··· 453 int tx_done = 0, rx_good, rx_bad; 454 int i, rc = 0; 455 456 + if (efx_dev_registered(efx)) 457 netif_tx_lock_bh(efx->net_dev); 458 459 /* Count the number of tx completions, and decrement the refcnt. Any ··· 465 dev_kfree_skb_any(skb); 466 } 467 468 + if (efx_dev_registered(efx)) 469 netif_tx_unlock_bh(efx->net_dev); 470 471 /* Check TX completion and received packet counts */ ··· 517 state->packet_count = min(1 << (i << 2), state->packet_count); 518 state->skbs = kzalloc(sizeof(state->skbs[0]) * 519 state->packet_count, GFP_KERNEL); 520 + if (!state->skbs) 521 + return -ENOMEM; 522 state->flush = 0; 523 524 EFX_LOG(efx, "TX queue %d testing %s loopback with %d " ··· 700 * "flushing" so all inflight packets are dropped */ 701 BUG_ON(efx->loopback_selftest); 702 state->flush = 1; 703 + efx->loopback_selftest = state; 704 705 rc = efx_test_loopbacks(efx, tests, loopback_modes); 706
+7 -7
drivers/net/sfc/sfe4001.c
··· 116 117 /* Turn off all power rails */ 118 out = 0xff; 119 - (void) efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1); 120 121 /* Disable port 1 outputs on IO expander */ 122 cfg = 0xff; 123 - (void) efx_i2c_write(i2c, PCA9539, P1_CONFIG, &cfg, 1); 124 125 /* Disable port 0 outputs on IO expander */ 126 cfg = 0xff; 127 - (void) efx_i2c_write(i2c, PCA9539, P0_CONFIG, &cfg, 1); 128 129 /* Clear any over-temperature alert */ 130 - (void) efx_i2c_read(i2c, MAX6647, RSL, &in, 1); 131 } 132 133 /* The P0_EN_3V3X line on SFE4001 boards (from A2 onward) is connected ··· 253 fail3: 254 /* Turn off all power rails */ 255 out = 0xff; 256 - (void) efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1); 257 /* Disable port 1 outputs on IO expander */ 258 out = 0xff; 259 - (void) efx_i2c_write(i2c, PCA9539, P1_CONFIG, &out, 1); 260 fail2: 261 /* Disable port 0 outputs on IO expander */ 262 out = 0xff; 263 - (void) efx_i2c_write(i2c, PCA9539, P0_CONFIG, &out, 1); 264 fail1: 265 return rc; 266 }
··· 116 117 /* Turn off all power rails */ 118 out = 0xff; 119 + efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1); 120 121 /* Disable port 1 outputs on IO expander */ 122 cfg = 0xff; 123 + efx_i2c_write(i2c, PCA9539, P1_CONFIG, &cfg, 1); 124 125 /* Disable port 0 outputs on IO expander */ 126 cfg = 0xff; 127 + efx_i2c_write(i2c, PCA9539, P0_CONFIG, &cfg, 1); 128 129 /* Clear any over-temperature alert */ 130 + efx_i2c_read(i2c, MAX6647, RSL, &in, 1); 131 } 132 133 /* The P0_EN_3V3X line on SFE4001 boards (from A2 onward) is connected ··· 253 fail3: 254 /* Turn off all power rails */ 255 out = 0xff; 256 + efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1); 257 /* Disable port 1 outputs on IO expander */ 258 out = 0xff; 259 + efx_i2c_write(i2c, PCA9539, P1_CONFIG, &out, 1); 260 fail2: 261 /* Disable port 0 outputs on IO expander */ 262 out = 0xff; 263 + efx_i2c_write(i2c, PCA9539, P0_CONFIG, &out, 1); 264 fail1: 265 return rc; 266 }
+3 -1
drivers/net/sfc/tenxpress.c
··· 211 int rc = 0; 212 213 phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL); 214 efx->phy_data = phy_data; 215 216 tenxpress_set_state(efx, TENXPRESS_STATUS_NORMAL); ··· 378 * perform a special software reset */ 379 if ((phy_data->tx_disabled && !efx->tx_disabled) || 380 loop_change) { 381 - (void) tenxpress_special_reset(efx); 382 falcon_reset_xaui(efx); 383 } 384
··· 211 int rc = 0; 212 213 phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL); 214 + if (!phy_data) 215 + return -ENOMEM; 216 efx->phy_data = phy_data; 217 218 tenxpress_set_state(efx, TENXPRESS_STATUS_NORMAL); ··· 376 * perform a special software reset */ 377 if ((phy_data->tx_disabled && !efx->tx_disabled) || 378 loop_change) { 379 + tenxpress_special_reset(efx); 380 falcon_reset_xaui(efx); 381 } 382
+7 -4
drivers/net/sfc/tx.c
··· 387 if (unlikely(tx_queue->stopped)) { 388 fill_level = tx_queue->insert_count - tx_queue->read_count; 389 if (fill_level < EFX_NETDEV_TX_THRESHOLD(tx_queue)) { 390 - EFX_BUG_ON_PARANOID(!NET_DEV_REGISTERED(efx)); 391 392 /* Do this under netif_tx_lock(), to avoid racing 393 * with efx_xmit(). */ ··· 639 base_dma = tsoh->dma_addr & PAGE_MASK; 640 641 p = &tx_queue->tso_headers_free; 642 - while (*p != NULL) 643 if (((unsigned long)*p & PAGE_MASK) == base_kva) 644 *p = (*p)->next; 645 else 646 p = &(*p)->next; 647 648 pci_free_consistent(pci_dev, PAGE_SIZE, (void *)base_kva, base_dma); 649 } ··· 940 941 /* Allocate a DMA-mapped header buffer. */ 942 if (likely(TSOH_SIZE(st->p.header_length) <= TSOH_STD_SIZE)) { 943 - if (tx_queue->tso_headers_free == NULL) 944 if (efx_tsoh_block_alloc(tx_queue)) 945 return -1; 946 EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free); 947 tsoh = tx_queue->tso_headers_free; 948 tx_queue->tso_headers_free = tsoh->next; ··· 1108 { 1109 unsigned i; 1110 1111 - if (tx_queue->buffer) 1112 for (i = 0; i <= tx_queue->efx->type->txd_ring_mask; ++i) 1113 efx_tsoh_free(tx_queue, &tx_queue->buffer[i]); 1114 1115 while (tx_queue->tso_headers_free != NULL) 1116 efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free,
··· 387 if (unlikely(tx_queue->stopped)) { 388 fill_level = tx_queue->insert_count - tx_queue->read_count; 389 if (fill_level < EFX_NETDEV_TX_THRESHOLD(tx_queue)) { 390 + EFX_BUG_ON_PARANOID(!efx_dev_registered(efx)); 391 392 /* Do this under netif_tx_lock(), to avoid racing 393 * with efx_xmit(). */ ··· 639 base_dma = tsoh->dma_addr & PAGE_MASK; 640 641 p = &tx_queue->tso_headers_free; 642 + while (*p != NULL) { 643 if (((unsigned long)*p & PAGE_MASK) == base_kva) 644 *p = (*p)->next; 645 else 646 p = &(*p)->next; 647 + } 648 649 pci_free_consistent(pci_dev, PAGE_SIZE, (void *)base_kva, base_dma); 650 } ··· 939 940 /* Allocate a DMA-mapped header buffer. */ 941 if (likely(TSOH_SIZE(st->p.header_length) <= TSOH_STD_SIZE)) { 942 + if (tx_queue->tso_headers_free == NULL) { 943 if (efx_tsoh_block_alloc(tx_queue)) 944 return -1; 945 + } 946 EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free); 947 tsoh = tx_queue->tso_headers_free; 948 tx_queue->tso_headers_free = tsoh->next; ··· 1106 { 1107 unsigned i; 1108 1109 + if (tx_queue->buffer) { 1110 for (i = 0; i <= tx_queue->efx->type->txd_ring_mask; ++i) 1111 efx_tsoh_free(tx_queue, &tx_queue->buffer[i]); 1112 + } 1113 1114 while (tx_queue->tso_headers_free != NULL) 1115 efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free,
+1 -1
drivers/net/sfc/workarounds.h
··· 16 */ 17 18 #define EFX_WORKAROUND_ALWAYS(efx) 1 19 - #define EFX_WORKAROUND_FALCON_A(efx) (FALCON_REV(efx) <= FALCON_REV_A1) 20 21 /* XAUI resets if link not detected */ 22 #define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS
··· 16 */ 17 18 #define EFX_WORKAROUND_ALWAYS(efx) 1 19 + #define EFX_WORKAROUND_FALCON_A(efx) (falcon_rev(efx) <= FALCON_REV_A1) 20 21 /* XAUI resets if link not detected */ 22 #define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS
+3 -1
drivers/net/sfc/xfp_phy.c
··· 85 int rc; 86 87 phy_data = kzalloc(sizeof(struct xfp_phy_data), GFP_KERNEL); 88 - efx->phy_data = (void *) phy_data; 89 90 EFX_INFO(efx, "XFP: PHY ID reg %x (OUI %x model %x revision" 91 " %x)\n", devid, MDIO_ID_OUI(devid), MDIO_ID_MODEL(devid),
··· 85 int rc; 86 87 phy_data = kzalloc(sizeof(struct xfp_phy_data), GFP_KERNEL); 88 + if (!phy_data) 89 + return -ENOMEM; 90 + efx->phy_data = phy_data; 91 92 EFX_INFO(efx, "XFP: PHY ID reg %x (OUI %x model %x revision" 93 " %x)\n", devid, MDIO_ID_OUI(devid), MDIO_ID_MODEL(devid),
+19 -10
drivers/net/sky2.c
··· 1159 } 1160 1161 #ifdef SKY2_VLAN_TAG_USED 1162 - static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) 1163 { 1164 - struct sky2_port *sky2 = netdev_priv(dev); 1165 - struct sky2_hw *hw = sky2->hw; 1166 - u16 port = sky2->port; 1167 - 1168 - netif_tx_lock_bh(dev); 1169 - napi_disable(&hw->napi); 1170 - 1171 - sky2->vlgrp = grp; 1172 - if (grp) { 1173 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), 1174 RX_VLAN_STRIP_ON); 1175 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), ··· 1172 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), 1173 TX_VLAN_TAG_OFF); 1174 } 1175 1176 sky2_read32(hw, B0_Y2_SP_LISR); 1177 napi_enable(&hw->napi); ··· 1422 1423 sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map, 1424 TX_RING_SIZE - 1); 1425 1426 err = sky2_rx_start(sky2); 1427 if (err)
··· 1159 } 1160 1161 #ifdef SKY2_VLAN_TAG_USED 1162 + static void sky2_set_vlan_mode(struct sky2_hw *hw, u16 port, bool onoff) 1163 { 1164 + if (onoff) { 1165 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), 1166 RX_VLAN_STRIP_ON); 1167 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), ··· 1180 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), 1181 TX_VLAN_TAG_OFF); 1182 } 1183 + } 1184 + 1185 + static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) 1186 + { 1187 + struct sky2_port *sky2 = netdev_priv(dev); 1188 + struct sky2_hw *hw = sky2->hw; 1189 + u16 port = sky2->port; 1190 + 1191 + netif_tx_lock_bh(dev); 1192 + napi_disable(&hw->napi); 1193 + 1194 + sky2->vlgrp = grp; 1195 + sky2_set_vlan_mode(hw, port, grp != NULL); 1196 1197 sky2_read32(hw, B0_Y2_SP_LISR); 1198 napi_enable(&hw->napi); ··· 1417 1418 sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map, 1419 TX_RING_SIZE - 1); 1420 + 1421 + #ifdef SKY2_VLAN_TAG_USED 1422 + sky2_set_vlan_mode(hw, port, sky2->vlgrp != NULL); 1423 + #endif 1424 1425 err = sky2_rx_start(sky2); 1426 if (err)
+1 -1
drivers/net/tokenring/3c359.h
··· 264 u16 asb; 265 266 u8 __iomem *xl_mmio; 267 - char *xl_card_name; 268 struct pci_dev *pdev ; 269 270 spinlock_t xl_lock ;
··· 264 u16 asb; 265 266 u8 __iomem *xl_mmio; 267 + const char *xl_card_name; 268 struct pci_dev *pdev ; 269 270 spinlock_t xl_lock ;
+1 -1
drivers/net/tokenring/olympic.h
··· 254 u8 __iomem *olympic_mmio; 255 u8 __iomem *olympic_lap; 256 struct pci_dev *pdev ; 257 - char *olympic_card_name ; 258 259 spinlock_t olympic_lock ; 260
··· 254 u8 __iomem *olympic_mmio; 255 u8 __iomem *olympic_lap; 256 struct pci_dev *pdev ; 257 + const char *olympic_card_name; 258 259 spinlock_t olympic_lock ; 260
+15 -1
drivers/net/tulip/uli526x.c
··· 225 static const struct ethtool_ops netdev_ethtool_ops; 226 static u16 read_srom_word(long, int); 227 static irqreturn_t uli526x_interrupt(int, void *); 228 static void uli526x_descriptor_init(struct uli526x_board_info *, unsigned long); 229 static void allocate_rx_buffer(struct uli526x_board_info *); 230 static void update_cr6(u32, unsigned long); ··· 342 dev->get_stats = &uli526x_get_stats; 343 dev->set_multicast_list = &uli526x_set_filter_mode; 344 dev->ethtool_ops = &netdev_ethtool_ops; 345 spin_lock_init(&db->lock); 346 347 ··· 687 db->cr5_data = inl(ioaddr + DCR5); 688 outl(db->cr5_data, ioaddr + DCR5); 689 if ( !(db->cr5_data & 0x180c1) ) { 690 - spin_unlock_irqrestore(&db->lock, flags); 691 outl(db->cr7_data, ioaddr + DCR7); 692 return IRQ_HANDLED; 693 } 694 ··· 722 return IRQ_HANDLED; 723 } 724 725 726 /* 727 * Free TX resource after TX complete
··· 225 static const struct ethtool_ops netdev_ethtool_ops; 226 static u16 read_srom_word(long, int); 227 static irqreturn_t uli526x_interrupt(int, void *); 228 + #ifdef CONFIG_NET_POLL_CONTROLLER 229 + static void uli526x_poll(struct net_device *dev); 230 + #endif 231 static void uli526x_descriptor_init(struct uli526x_board_info *, unsigned long); 232 static void allocate_rx_buffer(struct uli526x_board_info *); 233 static void update_cr6(u32, unsigned long); ··· 339 dev->get_stats = &uli526x_get_stats; 340 dev->set_multicast_list = &uli526x_set_filter_mode; 341 dev->ethtool_ops = &netdev_ethtool_ops; 342 + #ifdef CONFIG_NET_POLL_CONTROLLER 343 + dev->poll_controller = &uli526x_poll; 344 + #endif 345 spin_lock_init(&db->lock); 346 347 ··· 681 db->cr5_data = inl(ioaddr + DCR5); 682 outl(db->cr5_data, ioaddr + DCR5); 683 if ( !(db->cr5_data & 0x180c1) ) { 684 + /* Restore CR7 to enable interrupt mask */ 685 outl(db->cr7_data, ioaddr + DCR7); 686 + spin_unlock_irqrestore(&db->lock, flags); 687 return IRQ_HANDLED; 688 } 689 ··· 715 return IRQ_HANDLED; 716 } 717 718 + #ifdef CONFIG_NET_POLL_CONTROLLER 719 + static void uli526x_poll(struct net_device *dev) 720 + { 721 + /* ISR grabs the irqsave lock, so this should be safe */ 722 + uli526x_interrupt(dev->irq, dev); 723 + } 724 + #endif 725 726 /* 727 * Free TX resource after TX complete
+5 -4
drivers/net/ucc_geth.c
··· 237 skb->dev = ugeth->dev; 238 239 out_be32(&((struct qe_bd __iomem *)bd)->buf, 240 - dma_map_single(NULL, 241 skb->data, 242 ugeth->ug_info->uf_info.max_rx_buf_length + 243 UCC_GETH_RX_DATA_BUF_ALIGNMENT, ··· 2158 continue; 2159 for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) { 2160 if (ugeth->tx_skbuff[i][j]) { 2161 - dma_unmap_single(NULL, 2162 in_be32(&((struct qe_bd __iomem *)bd)->buf), 2163 (in_be32((u32 __iomem *)bd) & 2164 BD_LENGTH_MASK), ··· 2186 bd = ugeth->p_rx_bd_ring[i]; 2187 for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) { 2188 if (ugeth->rx_skbuff[i][j]) { 2189 - dma_unmap_single(NULL, 2190 in_be32(&((struct qe_bd __iomem *)bd)->buf), 2191 ugeth->ug_info-> 2192 uf_info.max_rx_buf_length + ··· 3406 3407 /* set up the buffer descriptor */ 3408 out_be32(&((struct qe_bd __iomem *)bd)->buf, 3409 - dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE)); 3410 3411 /* printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data); */ 3412
··· 237 skb->dev = ugeth->dev; 238 239 out_be32(&((struct qe_bd __iomem *)bd)->buf, 240 + dma_map_single(&ugeth->dev->dev, 241 skb->data, 242 ugeth->ug_info->uf_info.max_rx_buf_length + 243 UCC_GETH_RX_DATA_BUF_ALIGNMENT, ··· 2158 continue; 2159 for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) { 2160 if (ugeth->tx_skbuff[i][j]) { 2161 + dma_unmap_single(&ugeth->dev->dev, 2162 in_be32(&((struct qe_bd __iomem *)bd)->buf), 2163 (in_be32((u32 __iomem *)bd) & 2164 BD_LENGTH_MASK), ··· 2186 bd = ugeth->p_rx_bd_ring[i]; 2187 for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) { 2188 if (ugeth->rx_skbuff[i][j]) { 2189 + dma_unmap_single(&ugeth->dev->dev, 2190 in_be32(&((struct qe_bd __iomem *)bd)->buf), 2191 ugeth->ug_info-> 2192 uf_info.max_rx_buf_length + ··· 3406 3407 /* set up the buffer descriptor */ 3408 out_be32(&((struct qe_bd __iomem *)bd)->buf, 3409 + dma_map_single(&ugeth->dev->dev, skb->data, 3410 + skb->len, DMA_TO_DEVICE)); 3411 3412 /* printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data); */ 3413
+4
drivers/net/usb/asix.c
··· 1440 // Belkin F5D5055 1441 USB_DEVICE(0x050d, 0x5055), 1442 .driver_info = (unsigned long) &ax88178_info, 1443 }, 1444 { }, // END 1445 };
··· 1440 // Belkin F5D5055 1441 USB_DEVICE(0x050d, 0x5055), 1442 .driver_info = (unsigned long) &ax88178_info, 1443 + }, { 1444 + // Apple USB Ethernet Adapter 1445 + USB_DEVICE(0x05ac, 0x1402), 1446 + .driver_info = (unsigned long) &ax88772_info, 1447 }, 1448 { }, // END 1449 };
+1 -1
drivers/net/usb/rndis_host.c
··· 194 dev_dbg(&info->control->dev, 195 "rndis response error, code %d\n", retval); 196 } 197 - msleep(2); 198 } 199 dev_dbg(&info->control->dev, "rndis response timeout\n"); 200 return -ETIMEDOUT;
··· 194 dev_dbg(&info->control->dev, 195 "rndis response error, code %d\n", retval); 196 } 197 + msleep(20); 198 } 199 dev_dbg(&info->control->dev, "rndis response timeout\n"); 200 return -ETIMEDOUT;
+1 -2
drivers/net/virtio_net.c
··· 470 kfree_skb(skb); 471 vi->num--; 472 } 473 - while ((skb = __skb_dequeue(&vi->send)) != NULL) 474 - kfree_skb(skb); 475 476 BUG_ON(vi->num != 0); 477
··· 470 kfree_skb(skb); 471 vi->num--; 472 } 473 + __skb_queue_purge(&vi->send); 474 475 BUG_ON(vi->num != 0); 476
+11 -8
drivers/net/wan/hdlc.c
··· 43 44 #undef DEBUG_LINK 45 46 - static struct hdlc_proto *first_proto = NULL; 47 - 48 49 static int hdlc_change_mtu(struct net_device *dev, int new_mtu) 50 { ··· 313 314 void register_hdlc_protocol(struct hdlc_proto *proto) 315 { 316 proto->next = first_proto; 317 first_proto = proto; 318 } 319 320 321 void unregister_hdlc_protocol(struct hdlc_proto *proto) 322 { 323 - struct hdlc_proto **p = &first_proto; 324 - while (*p) { 325 - if (*p == proto) { 326 - *p = proto->next; 327 - return; 328 - } 329 p = &((*p)->next); 330 } 331 } 332 333
··· 43 44 #undef DEBUG_LINK 45 46 + static struct hdlc_proto *first_proto; 47 48 static int hdlc_change_mtu(struct net_device *dev, int new_mtu) 49 { ··· 314 315 void register_hdlc_protocol(struct hdlc_proto *proto) 316 { 317 + rtnl_lock(); 318 proto->next = first_proto; 319 first_proto = proto; 320 + rtnl_unlock(); 321 } 322 323 324 void unregister_hdlc_protocol(struct hdlc_proto *proto) 325 { 326 + struct hdlc_proto **p; 327 + 328 + rtnl_lock(); 329 + p = &first_proto; 330 + while (*p != proto) { 331 + BUG_ON(!*p); 332 p = &((*p)->next); 333 } 334 + *p = proto->next; 335 + rtnl_unlock(); 336 } 337 338
+48 -32
drivers/net/wan/hdlc_cisco.c
··· 56 cisco_proto settings; 57 58 struct timer_list timer; 59 unsigned long last_poll; 60 int up; 61 int request_sent; ··· 159 { 160 struct net_device *dev = skb->dev; 161 hdlc_device *hdlc = dev_to_hdlc(dev); 162 struct hdlc_header *data = (struct hdlc_header*)skb->data; 163 struct cisco_packet *cisco_data; 164 struct in_device *in_dev; ··· 222 goto rx_error; 223 224 case CISCO_KEEPALIVE_REQ: 225 - state(hdlc)->rxseq = ntohl(cisco_data->par1); 226 - if (state(hdlc)->request_sent && 227 - ntohl(cisco_data->par2) == state(hdlc)->txseq) { 228 - state(hdlc)->last_poll = jiffies; 229 - if (!state(hdlc)->up) { 230 u32 sec, min, hrs, days; 231 sec = ntohl(cisco_data->time) / 1000; 232 min = sec / 60; sec -= min * 60; ··· 235 days = hrs / 24; hrs -= days * 24; 236 printk(KERN_INFO "%s: Link up (peer " 237 "uptime %ud%uh%um%us)\n", 238 - dev->name, days, hrs, 239 - min, sec); 240 netif_dormant_off(dev); 241 - state(hdlc)->up = 1; 242 } 243 } 244 245 dev_kfree_skb_any(skb); 246 return NET_RX_SUCCESS; ··· 264 { 265 struct net_device *dev = (struct net_device *)arg; 266 hdlc_device *hdlc = dev_to_hdlc(dev); 267 268 - if (state(hdlc)->up && 269 - time_after(jiffies, state(hdlc)->last_poll + 270 - state(hdlc)->settings.timeout * HZ)) { 271 - state(hdlc)->up = 0; 272 printk(KERN_INFO "%s: Link down\n", dev->name); 273 netif_dormant_on(dev); 274 } 275 276 - cisco_keepalive_send(dev, CISCO_KEEPALIVE_REQ, 277 - htonl(++state(hdlc)->txseq), 278 - htonl(state(hdlc)->rxseq)); 279 - state(hdlc)->request_sent = 1; 280 - state(hdlc)->timer.expires = jiffies + 281 - state(hdlc)->settings.interval * HZ; 282 - state(hdlc)->timer.function = cisco_timer; 283 - state(hdlc)->timer.data = arg; 284 - add_timer(&state(hdlc)->timer); 285 } 286 287 ··· 290 static void cisco_start(struct net_device *dev) 291 { 292 hdlc_device *hdlc = dev_to_hdlc(dev); 293 - state(hdlc)->up = 0; 294 - state(hdlc)->request_sent = 0; 295 - state(hdlc)->txseq = state(hdlc)->rxseq = 0; 296 297 - init_timer(&state(hdlc)->timer); 298 - state(hdlc)->timer.expires = jiffies + HZ; /*First poll after 1s*/ 299 - state(hdlc)->timer.function = cisco_timer; 300 - state(hdlc)->timer.data = (unsigned long)dev; 301 - add_timer(&state(hdlc)->timer); 302 } 303 304 ··· 311 static void cisco_stop(struct net_device *dev) 312 { 313 hdlc_device *hdlc = dev_to_hdlc(dev); 314 - del_timer_sync(&state(hdlc)->timer); 315 netif_dormant_on(dev); 316 - state(hdlc)->up = 0; 317 - state(hdlc)->request_sent = 0; 318 } 319 320 ··· 382 return result; 383 384 memcpy(&state(hdlc)->settings, &new_settings, size); 385 dev->hard_start_xmit = hdlc->xmit; 386 dev->header_ops = &cisco_header_ops; 387 dev->type = ARPHRD_CISCO;
··· 56 cisco_proto settings; 57 58 struct timer_list timer; 59 + spinlock_t lock; 60 unsigned long last_poll; 61 int up; 62 int request_sent; ··· 158 { 159 struct net_device *dev = skb->dev; 160 hdlc_device *hdlc = dev_to_hdlc(dev); 161 + struct cisco_state *st = state(hdlc); 162 struct hdlc_header *data = (struct hdlc_header*)skb->data; 163 struct cisco_packet *cisco_data; 164 struct in_device *in_dev; ··· 220 goto rx_error; 221 222 case CISCO_KEEPALIVE_REQ: 223 + spin_lock(&st->lock); 224 + st->rxseq = ntohl(cisco_data->par1); 225 + if (st->request_sent && 226 + ntohl(cisco_data->par2) == st->txseq) { 227 + st->last_poll = jiffies; 228 + if (!st->up) { 229 u32 sec, min, hrs, days; 230 sec = ntohl(cisco_data->time) / 1000; 231 min = sec / 60; sec -= min * 60; ··· 232 days = hrs / 24; hrs -= days * 24; 233 printk(KERN_INFO "%s: Link up (peer " 234 "uptime %ud%uh%um%us)\n", 235 + dev->name, days, hrs, min, sec); 236 netif_dormant_off(dev); 237 + st->up = 1; 238 } 239 } 240 + spin_unlock(&st->lock); 241 242 dev_kfree_skb_any(skb); 243 return NET_RX_SUCCESS; ··· 261 { 262 struct net_device *dev = (struct net_device *)arg; 263 hdlc_device *hdlc = dev_to_hdlc(dev); 264 + struct cisco_state *st = state(hdlc); 265 266 + spin_lock(&st->lock); 267 + if (st->up && 268 + time_after(jiffies, st->last_poll + st->settings.timeout * HZ)) { 269 + st->up = 0; 270 printk(KERN_INFO "%s: Link down\n", dev->name); 271 netif_dormant_on(dev); 272 } 273 274 + cisco_keepalive_send(dev, CISCO_KEEPALIVE_REQ, htonl(++st->txseq), 275 + htonl(st->rxseq)); 276 + st->request_sent = 1; 277 + spin_unlock(&st->lock); 278 + 279 + st->timer.expires = jiffies + st->settings.interval * HZ; 280 + st->timer.function = cisco_timer; 281 + st->timer.data = arg; 282 + add_timer(&st->timer); 283 } 284 285 ··· 286 static void cisco_start(struct net_device *dev) 287 { 288 hdlc_device *hdlc = dev_to_hdlc(dev); 289 + struct cisco_state *st = state(hdlc); 290 + unsigned long flags; 291 292 + spin_lock_irqsave(&st->lock, flags); 293 + st->up = 0; 294 + st->request_sent = 0; 295 + st->txseq = st->rxseq = 0; 296 + spin_unlock_irqrestore(&st->lock, flags); 297 + 298 + init_timer(&st->timer); 299 + st->timer.expires = jiffies + HZ; /* First poll after 1 s */ 300 + st->timer.function = cisco_timer; 301 + st->timer.data = (unsigned long)dev; 302 + add_timer(&st->timer); 303 } 304 305 ··· 302 static void cisco_stop(struct net_device *dev) 303 { 304 hdlc_device *hdlc = dev_to_hdlc(dev); 305 + struct cisco_state *st = state(hdlc); 306 + unsigned long flags; 307 + 308 + del_timer_sync(&st->timer); 309 + 310 + spin_lock_irqsave(&st->lock, flags); 311 netif_dormant_on(dev); 312 + st->up = 0; 313 + st->request_sent = 0; 314 + spin_unlock_irqrestore(&st->lock, flags); 315 } 316 317 ··· 367 return result; 368 369 memcpy(&state(hdlc)->settings, &new_settings, size); 370 + spin_lock_init(&state(hdlc)->lock); 371 dev->hard_start_xmit = hdlc->xmit; 372 dev->header_ops = &cisco_header_ops; 373 dev->type = ARPHRD_CISCO;
+2 -4
drivers/net/xen-netfront.c
··· 946 work_done++; 947 } 948 949 - while ((skb = __skb_dequeue(&errq))) 950 - kfree_skb(skb); 951 952 work_done -= handle_incoming_queue(dev, &rxq); 953 ··· 1078 } 1079 } 1080 1081 - while ((skb = __skb_dequeue(&free_list)) != NULL) 1082 - dev_kfree_skb(skb); 1083 1084 spin_unlock_bh(&np->rx_lock); 1085 }
··· 946 work_done++; 947 } 948 949 + __skb_queue_purge(&errq); 950 951 work_done -= handle_incoming_queue(dev, &rxq); 952 ··· 1079 } 1080 } 1081 1082 + __skb_queue_purge(&free_list); 1083 1084 spin_unlock_bh(&np->rx_lock); 1085 }