Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

sungem: Spring cleaning and GRO support

This patch simplifies the logic and locking in sungem significantly:

- LLTX is gone, all private locks are gone, mutex is gone
- We don't poll the PHY while the interface is down
- The above allowed me to get rid of a pile of state flags
using the proper interface state provided by the networking
stack when needed and overall simplify the driver a lot
- Allocate the bulk of RX skbs at init time using GFP_KERNEL
- Fix a bug where the dev->features were set after register_netdev()
- Added GRO while at it

Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Benjamin Herrenschmidt and committed by
David S. Miller
fe09bb61 6f92c66f

+379 -547
+379 -522
drivers/net/sungem.c
··· 10 10 * NAPI and NETPOLL support 11 11 * (C) 2004 by Eric Lemoine (eric.lemoine@gmail.com) 12 12 * 13 - * TODO: 14 - * - Now that the driver was significantly simplified, I need to rework 15 - * the locking. I'm sure we don't need _2_ spinlocks, and we probably 16 - * can avoid taking most of them for so long period of time (and schedule 17 - * instead). The main issues at this point are caused by the netdev layer 18 - * though: 19 - * 20 - * gem_change_mtu() and gem_set_multicast() are called with a read_lock() 21 - * help by net/core/dev.c, thus they can't schedule. That means they can't 22 - * call napi_disable() neither, thus force gem_poll() to keep a spinlock 23 - * where it could have been dropped. change_mtu especially would love also to 24 - * be able to msleep instead of horrid locked delays when resetting the HW, 25 - * but that read_lock() makes it impossible, unless I defer it's action to 26 - * the reset task, which means it'll be asynchronous (won't take effect until 27 - * the system schedules a bit). 28 - * 29 - * Also, it would probably be possible to also remove most of the long-life 30 - * locking in open/resume code path (gem_reinit_chip) by beeing more careful 31 - * about when we can start taking interrupts or get xmit() called... 32 13 */ 33 14 34 15 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt ··· 38 57 #include <linux/workqueue.h> 39 58 #include <linux/if_vlan.h> 40 59 #include <linux/bitops.h> 41 - #include <linux/mutex.h> 42 60 #include <linux/mm.h> 43 61 #include <linux/gfp.h> 44 62 ··· 75 95 SUPPORTED_Pause | SUPPORTED_Autoneg) 76 96 77 97 #define DRV_NAME "sungem" 78 - #define DRV_VERSION "0.98" 79 - #define DRV_RELDATE "8/24/03" 80 - #define DRV_AUTHOR "David S. Miller (davem@redhat.com)" 98 + #define DRV_VERSION "1.0" 99 + #define DRV_AUTHOR "David S. Miller <davem@redhat.com>" 81 100 82 101 static char version[] __devinitdata = 83 - DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n"; 102 + DRV_NAME ".c:v" DRV_VERSION " " DRV_AUTHOR "\n"; 84 103 85 104 MODULE_AUTHOR(DRV_AUTHOR); 86 105 MODULE_DESCRIPTION("Sun GEM Gbit ethernet driver"); ··· 197 218 { 198 219 /* Disable all interrupts, including TXDONE */ 199 220 writel(GREG_STAT_NAPI | GREG_STAT_TXDONE, gp->regs + GREG_IMASK); 221 + (void)readl(gp->regs + GREG_IMASK); /* write posting */ 200 222 } 201 223 202 224 static void gem_get_cell(struct gem *gp) ··· 225 245 udelay(10); 226 246 } 227 247 #endif /* CONFIG_PPC_PMAC */ 248 + } 249 + 250 + static inline void gem_netif_stop(struct gem *gp) 251 + { 252 + gp->dev->trans_start = jiffies; /* prevent tx timeout */ 253 + napi_disable(&gp->napi); 254 + netif_tx_disable(gp->dev); 255 + } 256 + 257 + static inline void gem_netif_start(struct gem *gp) 258 + { 259 + /* NOTE: unconditional netif_wake_queue is only 260 + * appropriate so long as all callers are assured to 261 + * have free tx slots. 262 + */ 263 + netif_wake_queue(gp->dev); 264 + napi_enable(&gp->napi); 265 + } 266 + 267 + static void gem_schedule_reset(struct gem *gp) 268 + { 269 + gp->reset_task_pending = 1; 270 + schedule_work(&gp->reset_task); 228 271 } 229 272 230 273 static void gem_handle_mif_event(struct gem *gp, u32 reg_val, u32 changed_bits) ··· 607 604 gp->dev->name); 608 605 dev->stats.rx_errors++; 609 606 610 - goto do_reset; 607 + return 1; 611 608 } 612 609 613 610 if (gem_status & GREG_STAT_PCS) { 614 611 if (gem_pcs_interrupt(dev, gp, gem_status)) 615 - goto do_reset; 612 + return 1; 616 613 } 617 614 618 615 if (gem_status & GREG_STAT_TXMAC) { 619 616 if (gem_txmac_interrupt(dev, gp, gem_status)) 620 - goto do_reset; 617 + return 1; 621 618 } 622 619 623 620 if (gem_status & GREG_STAT_RXMAC) { 624 621 if (gem_rxmac_interrupt(dev, gp, gem_status)) 625 - goto do_reset; 622 + return 1; 626 623 } 627 624 628 625 if (gem_status & GREG_STAT_MAC) { 629 626 if (gem_mac_interrupt(dev, gp, gem_status)) 630 - goto do_reset; 627 + return 1; 631 628 } 632 629 633 630 if (gem_status & GREG_STAT_MIF) { 634 631 if (gem_mif_interrupt(dev, gp, gem_status)) 635 - goto do_reset; 632 + return 1; 636 633 } 637 634 638 635 if (gem_status & GREG_STAT_PCIERR) { 639 636 if (gem_pci_interrupt(dev, gp, gem_status)) 640 - goto do_reset; 637 + return 1; 641 638 } 642 639 643 640 return 0; 644 - 645 - do_reset: 646 - gp->reset_task_pending = 1; 647 - schedule_work(&gp->reset_task); 648 - 649 - return 1; 650 641 } 651 642 652 643 static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_status) 653 644 { 654 645 int entry, limit; 655 - 656 - if (netif_msg_intr(gp)) 657 - printk(KERN_DEBUG "%s: tx interrupt, gem_status: 0x%x\n", 658 - gp->dev->name, gem_status); 659 646 660 647 entry = gp->tx_old; 661 648 limit = ((gem_status & GREG_STAT_TXNR) >> GREG_STAT_TXNR_SHIFT); ··· 690 697 } 691 698 692 699 dev->stats.tx_packets++; 693 - dev_kfree_skb_irq(skb); 700 + dev_kfree_skb(skb); 694 701 } 695 702 gp->tx_old = entry; 696 703 697 - if (netif_queue_stopped(dev) && 698 - TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1)) 699 - netif_wake_queue(dev); 704 + /* Need to make the tx_old update visible to gem_start_xmit() 705 + * before checking for netif_queue_stopped(). Without the 706 + * memory barrier, there is a small possibility that gem_start_xmit() 707 + * will miss it and cause the queue to be stopped forever. 708 + */ 709 + smp_mb(); 710 + 711 + if (unlikely(netif_queue_stopped(dev) && 712 + TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1))) { 713 + struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); 714 + 715 + __netif_tx_lock(txq, smp_processor_id()); 716 + if (netif_queue_stopped(dev) && 717 + TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1)) 718 + netif_wake_queue(dev); 719 + __netif_tx_unlock(txq); 720 + } 700 721 } 701 722 702 723 static __inline__ void gem_post_rxds(struct gem *gp, int limit) ··· 741 734 mb(); 742 735 writel(kick, gp->regs + RXDMA_KICK); 743 736 } 737 + } 738 + 739 + #define ALIGNED_RX_SKB_ADDR(addr) \ 740 + ((((unsigned long)(addr) + (64UL - 1UL)) & ~(64UL - 1UL)) - (unsigned long)(addr)) 741 + static __inline__ struct sk_buff *gem_alloc_skb(struct net_device *dev, int size, 742 + gfp_t gfp_flags) 743 + { 744 + struct sk_buff *skb = alloc_skb(size + 64, gfp_flags); 745 + 746 + if (likely(skb)) { 747 + unsigned long offset = ALIGNED_RX_SKB_ADDR(skb->data); 748 + skb_reserve(skb, offset); 749 + skb->dev = dev; 750 + } 751 + return skb; 744 752 } 745 753 746 754 static int gem_rx(struct gem *gp, int work_to_do) ··· 821 799 if (len > RX_COPY_THRESHOLD) { 822 800 struct sk_buff *new_skb; 823 801 824 - new_skb = gem_alloc_skb(RX_BUF_ALLOC_SIZE(gp), GFP_ATOMIC); 802 + new_skb = gem_alloc_skb(dev, RX_BUF_ALLOC_SIZE(gp), GFP_ATOMIC); 825 803 if (new_skb == NULL) { 826 804 drops++; 827 805 goto drop_it; ··· 830 808 RX_BUF_ALLOC_SIZE(gp), 831 809 PCI_DMA_FROMDEVICE); 832 810 gp->rx_skbs[entry] = new_skb; 833 - new_skb->dev = gp->dev; 834 811 skb_put(new_skb, (gp->rx_buf_sz + RX_OFFSET)); 835 812 rxd->buffer = cpu_to_le64(pci_map_page(gp->pdev, 836 813 virt_to_page(new_skb->data), ··· 841 820 /* Trim the original skb for the netif. */ 842 821 skb_trim(skb, len); 843 822 } else { 844 - struct sk_buff *copy_skb = dev_alloc_skb(len + 2); 823 + struct sk_buff *copy_skb = netdev_alloc_skb(dev, len + 2); 845 824 846 825 if (copy_skb == NULL) { 847 826 drops++; ··· 863 842 skb->ip_summed = CHECKSUM_COMPLETE; 864 843 skb->protocol = eth_type_trans(skb, gp->dev); 865 844 866 - netif_receive_skb(skb); 845 + napi_gro_receive(&gp->napi, skb); 867 846 868 847 dev->stats.rx_packets++; 869 848 dev->stats.rx_bytes += len; ··· 886 865 { 887 866 struct gem *gp = container_of(napi, struct gem, napi); 888 867 struct net_device *dev = gp->dev; 889 - unsigned long flags; 890 868 int work_done; 891 - 892 - /* 893 - * NAPI locking nightmare: See comment at head of driver 894 - */ 895 - spin_lock_irqsave(&gp->lock, flags); 896 869 897 870 work_done = 0; 898 871 do { 899 872 /* Handle anomalies */ 900 - if (gp->status & GREG_STAT_ABNORMAL) { 901 - if (gem_abnormal_irq(dev, gp, gp->status)) 902 - break; 873 + if (unlikely(gp->status & GREG_STAT_ABNORMAL)) { 874 + struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); 875 + int reset; 876 + 877 + /* We run the abnormal interrupt handling code with 878 + * the Tx lock. It only resets the Rx portion of the 879 + * chip, but we need to guard it against DMA being 880 + * restarted by the link poll timer 881 + */ 882 + __netif_tx_lock(txq, smp_processor_id()); 883 + reset = gem_abnormal_irq(dev, gp, gp->status); 884 + __netif_tx_unlock(txq); 885 + if (reset) { 886 + gem_schedule_reset(gp); 887 + napi_complete(napi); 888 + return work_done; 889 + } 903 890 } 904 891 905 892 /* Run TX completion thread */ 906 - spin_lock(&gp->tx_lock); 907 893 gem_tx(dev, gp, gp->status); 908 - spin_unlock(&gp->tx_lock); 909 - 910 - spin_unlock_irqrestore(&gp->lock, flags); 911 894 912 895 /* Run RX thread. We don't use any locking here, 913 896 * code willing to do bad things - like cleaning the ··· 923 898 if (work_done >= budget) 924 899 return work_done; 925 900 926 - spin_lock_irqsave(&gp->lock, flags); 927 - 928 901 gp->status = readl(gp->regs + GREG_STAT); 929 902 } while (gp->status & GREG_STAT_NAPI); 930 903 931 - __napi_complete(napi); 904 + napi_complete(napi); 932 905 gem_enable_ints(gp); 933 - 934 - spin_unlock_irqrestore(&gp->lock, flags); 935 906 936 907 return work_done; 937 908 } ··· 936 915 { 937 916 struct net_device *dev = dev_id; 938 917 struct gem *gp = netdev_priv(dev); 939 - unsigned long flags; 940 - 941 - /* Swallow interrupts when shutting the chip down, though 942 - * that shouldn't happen, we should have done free_irq() at 943 - * this point... 944 - */ 945 - if (!gp->running) 946 - return IRQ_HANDLED; 947 - 948 - spin_lock_irqsave(&gp->lock, flags); 949 918 950 919 if (napi_schedule_prep(&gp->napi)) { 951 920 u32 gem_status = readl(gp->regs + GREG_STAT); 952 921 953 - if (gem_status == 0) { 922 + if (unlikely(gem_status == 0)) { 954 923 napi_enable(&gp->napi); 955 - spin_unlock_irqrestore(&gp->lock, flags); 956 924 return IRQ_NONE; 957 925 } 926 + if (netif_msg_intr(gp)) 927 + printk(KERN_DEBUG "%s: gem_interrupt() gem_status: 0x%x\n", 928 + gp->dev->name, gem_status); 929 + 958 930 gp->status = gem_status; 959 931 gem_disable_ints(gp); 960 932 __napi_schedule(&gp->napi); 961 933 } 962 - 963 - spin_unlock_irqrestore(&gp->lock, flags); 964 934 965 935 /* If polling was disabled at the time we received that 966 936 * interrupt, we may return IRQ_HANDLED here while we ··· 963 951 #ifdef CONFIG_NET_POLL_CONTROLLER 964 952 static void gem_poll_controller(struct net_device *dev) 965 953 { 966 - /* gem_interrupt is safe to reentrance so no need 967 - * to disable_irq here. 968 - */ 969 - gem_interrupt(dev->irq, dev); 954 + struct gem *gp = netdev_priv(dev); 955 + 956 + disable_irq(gp->pdev->irq); 957 + gem_interrupt(gp->pdev->irq, dev); 958 + enable_irq(gp->pdev->irq); 970 959 } 971 960 #endif 972 961 ··· 976 963 struct gem *gp = netdev_priv(dev); 977 964 978 965 netdev_err(dev, "transmit timed out, resetting\n"); 979 - if (!gp->running) { 980 - netdev_err(dev, "hrm.. hw not running !\n"); 981 - return; 982 - } 966 + 983 967 netdev_err(dev, "TX_STATE[%08x:%08x:%08x]\n", 984 968 readl(gp->regs + TXDMA_CFG), 985 969 readl(gp->regs + MAC_TXSTAT), ··· 986 976 readl(gp->regs + MAC_RXSTAT), 987 977 readl(gp->regs + MAC_RXCFG)); 988 978 989 - spin_lock_irq(&gp->lock); 990 - spin_lock(&gp->tx_lock); 991 - 992 - gp->reset_task_pending = 1; 993 - schedule_work(&gp->reset_task); 994 - 995 - spin_unlock(&gp->tx_lock); 996 - spin_unlock_irq(&gp->lock); 979 + gem_schedule_reset(gp); 997 980 } 998 981 999 982 static __inline__ int gem_intme(int entry) ··· 1004 1001 struct gem *gp = netdev_priv(dev); 1005 1002 int entry; 1006 1003 u64 ctrl; 1007 - unsigned long flags; 1008 1004 1009 1005 ctrl = 0; 1010 1006 if (skb->ip_summed == CHECKSUM_PARTIAL) { ··· 1015 1013 (csum_stuff_off << 21)); 1016 1014 } 1017 1015 1018 - if (!spin_trylock_irqsave(&gp->tx_lock, flags)) { 1019 - /* Tell upper layer to requeue */ 1020 - return NETDEV_TX_LOCKED; 1021 - } 1022 - /* We raced with gem_do_stop() */ 1023 - if (!gp->running) { 1024 - spin_unlock_irqrestore(&gp->tx_lock, flags); 1025 - return NETDEV_TX_BUSY; 1026 - } 1027 - 1028 - /* This is a hard error, log it. */ 1029 - if (TX_BUFFS_AVAIL(gp) <= (skb_shinfo(skb)->nr_frags + 1)) { 1030 - netif_stop_queue(dev); 1031 - spin_unlock_irqrestore(&gp->tx_lock, flags); 1032 - netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); 1016 + if (unlikely(TX_BUFFS_AVAIL(gp) <= (skb_shinfo(skb)->nr_frags + 1))) { 1017 + /* This is a hard error, log it. */ 1018 + if (!netif_queue_stopped(dev)) { 1019 + netif_stop_queue(dev); 1020 + netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); 1021 + } 1033 1022 return NETDEV_TX_BUSY; 1034 1023 } 1035 1024 ··· 1097 1104 } 1098 1105 1099 1106 gp->tx_new = entry; 1100 - if (TX_BUFFS_AVAIL(gp) <= (MAX_SKB_FRAGS + 1)) 1107 + if (unlikely(TX_BUFFS_AVAIL(gp) <= (MAX_SKB_FRAGS + 1))) { 1101 1108 netif_stop_queue(dev); 1102 1109 1110 + /* netif_stop_queue() must be done before checking 1111 + * checking tx index in TX_BUFFS_AVAIL() below, because 1112 + * in gem_tx(), we update tx_old before checking for 1113 + * netif_queue_stopped(). 1114 + */ 1115 + smp_mb(); 1116 + if (TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1)) 1117 + netif_wake_queue(dev); 1118 + } 1103 1119 if (netif_msg_tx_queued(gp)) 1104 1120 printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n", 1105 1121 dev->name, entry, skb->len); 1106 1122 mb(); 1107 1123 writel(gp->tx_new, gp->regs + TXDMA_KICK); 1108 - spin_unlock_irqrestore(&gp->tx_lock, flags); 1109 - 1110 - dev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */ 1111 1124 1112 1125 return NETDEV_TX_OK; 1113 1126 } ··· 1183 1184 1184 1185 #define STOP_TRIES 32 1185 1186 1186 - /* Must be invoked under gp->lock and gp->tx_lock. */ 1187 1187 static void gem_reset(struct gem *gp) 1188 1188 { 1189 1189 int limit; ··· 1211 1213 gem_pcs_reinit_adv(gp); 1212 1214 } 1213 1215 1214 - /* Must be invoked under gp->lock and gp->tx_lock. */ 1215 1216 static void gem_start_dma(struct gem *gp) 1216 1217 { 1217 1218 u32 val; ··· 1233 1236 writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK); 1234 1237 } 1235 1238 1236 - /* Must be invoked under gp->lock and gp->tx_lock. DMA won't be 1237 - * actually stopped before about 4ms tho ... 1239 + /* DMA won't be actually stopped before about 4ms tho ... 1238 1240 */ 1239 1241 static void gem_stop_dma(struct gem *gp) 1240 1242 { ··· 1255 1259 } 1256 1260 1257 1261 1258 - /* Must be invoked under gp->lock and gp->tx_lock. */ 1259 1262 // XXX dbl check what that function should do when called on PCS PHY 1260 1263 static void gem_begin_auto_negotiation(struct gem *gp, struct ethtool_cmd *ep) 1261 1264 { ··· 1314 1319 /* If we are asleep, we don't try to actually setup the PHY, we 1315 1320 * just store the settings 1316 1321 */ 1317 - if (gp->asleep) { 1322 + if (!netif_device_present(gp->dev)) { 1318 1323 gp->phy_mii.autoneg = gp->want_autoneg = autoneg; 1319 1324 gp->phy_mii.speed = speed; 1320 1325 gp->phy_mii.duplex = duplex; ··· 1340 1345 1341 1346 /* A link-up condition has occurred, initialize and enable the 1342 1347 * rest of the chip. 1343 - * 1344 - * Must be invoked under gp->lock and gp->tx_lock. 1345 1348 */ 1346 1349 static int gem_set_link_modes(struct gem *gp) 1347 1350 { 1348 - u32 val; 1351 + struct netdev_queue *txq = netdev_get_tx_queue(gp->dev, 0); 1349 1352 int full_duplex, speed, pause; 1353 + u32 val; 1350 1354 1351 1355 full_duplex = 0; 1352 1356 speed = SPEED_10; ··· 1369 1375 netif_info(gp, link, gp->dev, "Link is up at %d Mbps, %s-duplex\n", 1370 1376 speed, (full_duplex ? "full" : "half")); 1371 1377 1372 - if (!gp->running) 1373 - return 0; 1378 + 1379 + /* We take the tx queue lock to avoid collisions between 1380 + * this code, the tx path and the NAPI-driven error path 1381 + */ 1382 + __netif_tx_lock(txq, smp_processor_id()); 1374 1383 1375 1384 val = (MAC_TXCFG_EIPG0 | MAC_TXCFG_NGU); 1376 1385 if (full_duplex) { ··· 1422 1425 pause = 1; 1423 1426 } 1424 1427 1425 - if (netif_msg_link(gp)) { 1426 - if (pause) { 1427 - netdev_info(gp->dev, 1428 - "Pause is enabled (rxfifo: %d off: %d on: %d)\n", 1429 - gp->rx_fifo_sz, 1430 - gp->rx_pause_off, 1431 - gp->rx_pause_on); 1432 - } else { 1433 - netdev_info(gp->dev, "Pause is disabled\n"); 1434 - } 1435 - } 1436 - 1437 1428 if (!full_duplex) 1438 1429 writel(512, gp->regs + MAC_STIME); 1439 1430 else ··· 1435 1450 1436 1451 gem_start_dma(gp); 1437 1452 1453 + __netif_tx_unlock(txq); 1454 + 1455 + if (netif_msg_link(gp)) { 1456 + if (pause) { 1457 + netdev_info(gp->dev, 1458 + "Pause is enabled (rxfifo: %d off: %d on: %d)\n", 1459 + gp->rx_fifo_sz, 1460 + gp->rx_pause_off, 1461 + gp->rx_pause_on); 1462 + } else { 1463 + netdev_info(gp->dev, "Pause is disabled\n"); 1464 + } 1465 + } 1466 + 1438 1467 return 0; 1439 1468 } 1440 1469 1441 - /* Must be invoked under gp->lock and gp->tx_lock. */ 1442 1470 static int gem_mdio_link_not_up(struct gem *gp) 1443 1471 { 1444 1472 switch (gp->lstate) { ··· 1499 1501 static void gem_link_timer(unsigned long data) 1500 1502 { 1501 1503 struct gem *gp = (struct gem *) data; 1504 + struct net_device *dev = gp->dev; 1502 1505 int restart_aneg = 0; 1503 1506 1504 - if (gp->asleep) 1505 - return; 1506 - 1507 - spin_lock_irq(&gp->lock); 1508 - spin_lock(&gp->tx_lock); 1509 - gem_get_cell(gp); 1510 - 1511 - /* If the reset task is still pending, we just 1512 - * reschedule the link timer 1513 - */ 1507 + /* There's no point doing anything if we're going to be reset */ 1514 1508 if (gp->reset_task_pending) 1515 - goto restart; 1509 + return; 1516 1510 1517 1511 if (gp->phy_type == phy_serialink || 1518 1512 gp->phy_type == phy_serdes) { ··· 1518 1528 goto restart; 1519 1529 1520 1530 gp->lstate = link_up; 1521 - netif_carrier_on(gp->dev); 1531 + netif_carrier_on(dev); 1522 1532 (void)gem_set_link_modes(gp); 1523 1533 } 1524 1534 goto restart; ··· 1534 1544 gp->last_forced_speed = gp->phy_mii.speed; 1535 1545 gp->timer_ticks = 5; 1536 1546 if (netif_msg_link(gp)) 1537 - netdev_info(gp->dev, 1547 + netdev_info(dev, 1538 1548 "Got link after fallback, retrying autoneg once...\n"); 1539 1549 gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, gp->phy_mii.advertising); 1540 1550 } else if (gp->lstate != link_up) { 1541 1551 gp->lstate = link_up; 1542 - netif_carrier_on(gp->dev); 1552 + netif_carrier_on(dev); 1543 1553 if (gem_set_link_modes(gp)) 1544 1554 restart_aneg = 1; 1545 1555 } ··· 1549 1559 */ 1550 1560 if (gp->lstate == link_up) { 1551 1561 gp->lstate = link_down; 1552 - netif_info(gp, link, gp->dev, "Link down\n"); 1553 - netif_carrier_off(gp->dev); 1554 - gp->reset_task_pending = 1; 1555 - schedule_work(&gp->reset_task); 1556 - restart_aneg = 1; 1562 + netif_info(gp, link, dev, "Link down\n"); 1563 + netif_carrier_off(dev); 1564 + gem_schedule_reset(gp); 1565 + /* The reset task will restart the timer */ 1566 + return; 1557 1567 } else if (++gp->timer_ticks > 10) { 1558 1568 if (found_mii_phy(gp)) 1559 1569 restart_aneg = gem_mdio_link_not_up(gp); ··· 1563 1573 } 1564 1574 if (restart_aneg) { 1565 1575 gem_begin_auto_negotiation(gp, NULL); 1566 - goto out_unlock; 1576 + return; 1567 1577 } 1568 1578 restart: 1569 1579 mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10)); 1570 - out_unlock: 1571 - gem_put_cell(gp); 1572 - spin_unlock(&gp->tx_lock); 1573 - spin_unlock_irq(&gp->lock); 1574 1580 } 1575 1581 1576 - /* Must be invoked under gp->lock and gp->tx_lock. */ 1577 1582 static void gem_clean_rings(struct gem *gp) 1578 1583 { 1579 1584 struct gem_init_block *gb = gp->init_block; ··· 1619 1634 } 1620 1635 } 1621 1636 1622 - /* Must be invoked under gp->lock and gp->tx_lock. */ 1623 1637 static void gem_init_rings(struct gem *gp) 1624 1638 { 1625 1639 struct gem_init_block *gb = gp->init_block; ··· 1637 1653 struct sk_buff *skb; 1638 1654 struct gem_rxd *rxd = &gb->rxd[i]; 1639 1655 1640 - skb = gem_alloc_skb(RX_BUF_ALLOC_SIZE(gp), GFP_ATOMIC); 1656 + skb = gem_alloc_skb(dev, RX_BUF_ALLOC_SIZE(gp), GFP_KERNEL); 1641 1657 if (!skb) { 1642 1658 rxd->buffer = 0; 1643 1659 rxd->status_word = 0; ··· 1645 1661 } 1646 1662 1647 1663 gp->rx_skbs[i] = skb; 1648 - skb->dev = dev; 1649 1664 skb_put(skb, (gp->rx_buf_sz + RX_OFFSET)); 1650 1665 dma_addr = pci_map_page(gp->pdev, 1651 1666 virt_to_page(skb->data), ··· 1720 1737 1721 1738 if (gp->phy_type == phy_mii_mdio0 || 1722 1739 gp->phy_type == phy_mii_mdio1) { 1723 - // XXX check for errors 1740 + /* Reset and detect MII PHY */ 1724 1741 mii_phy_probe(&gp->phy_mii, gp->mii_phy_addr); 1725 1742 1726 1743 /* Init PHY */ ··· 1736 1753 gp->lstate = link_down; 1737 1754 netif_carrier_off(gp->dev); 1738 1755 1739 - /* Can I advertise gigabit here ? I'd need BCM PHY docs... */ 1740 - spin_lock_irq(&gp->lock); 1756 + /* Print things out */ 1757 + if (gp->phy_type == phy_mii_mdio0 || 1758 + gp->phy_type == phy_mii_mdio1) 1759 + netdev_info(gp->dev, "Found %s PHY\n", 1760 + gp->phy_mii.def ? gp->phy_mii.def->name : "no"); 1761 + 1741 1762 gem_begin_auto_negotiation(gp, NULL); 1742 - spin_unlock_irq(&gp->lock); 1743 1763 } 1744 1764 1745 - /* Must be invoked under gp->lock and gp->tx_lock. */ 1746 1765 static void gem_init_dma(struct gem *gp) 1747 1766 { 1748 1767 u64 desc_dma = (u64) gp->gblock_dvma; ··· 1782 1797 gp->regs + RXDMA_BLANK); 1783 1798 } 1784 1799 1785 - /* Must be invoked under gp->lock and gp->tx_lock. */ 1786 1800 static u32 gem_setup_multicast(struct gem *gp) 1787 1801 { 1788 1802 u32 rxcfg = 0; ··· 1819 1835 return rxcfg; 1820 1836 } 1821 1837 1822 - /* Must be invoked under gp->lock and gp->tx_lock. */ 1823 1838 static void gem_init_mac(struct gem *gp) 1824 1839 { 1825 1840 unsigned char *e = &gp->dev->dev_addr[0]; ··· 1901 1918 writel(0, gp->regs + WOL_WAKECSR); 1902 1919 } 1903 1920 1904 - /* Must be invoked under gp->lock and gp->tx_lock. */ 1905 1921 static void gem_init_pause_thresholds(struct gem *gp) 1906 1922 { 1907 1923 u32 cfg; ··· 2061 2079 return 0; 2062 2080 } 2063 2081 2064 - /* Must be invoked under gp->lock and gp->tx_lock. */ 2065 2082 static void gem_reinit_chip(struct gem *gp) 2066 2083 { 2067 2084 /* Reset the chip */ ··· 2081 2100 } 2082 2101 2083 2102 2084 - /* Must be invoked with no lock held. */ 2085 2103 static void gem_stop_phy(struct gem *gp, int wol) 2086 2104 { 2087 2105 u32 mifcfg; 2088 - unsigned long flags; 2089 2106 2090 2107 /* Let the chip settle down a bit, it seems that helps 2091 2108 * for sleep mode on some models ··· 2129 2150 writel(0, gp->regs + RXDMA_CFG); 2130 2151 2131 2152 if (!wol) { 2132 - spin_lock_irqsave(&gp->lock, flags); 2133 - spin_lock(&gp->tx_lock); 2134 2153 gem_reset(gp); 2135 2154 writel(MAC_TXRST_CMD, gp->regs + MAC_TXRST); 2136 2155 writel(MAC_RXRST_CMD, gp->regs + MAC_RXRST); 2137 - spin_unlock(&gp->tx_lock); 2138 - spin_unlock_irqrestore(&gp->lock, flags); 2139 - 2140 - /* No need to take the lock here */ 2141 2156 2142 2157 if (found_mii_phy(gp) && gp->phy_mii.def->ops->suspend) 2143 2158 gp->phy_mii.def->ops->suspend(&gp->phy_mii); ··· 2148 2175 } 2149 2176 } 2150 2177 2151 - 2152 2178 static int gem_do_start(struct net_device *dev) 2153 2179 { 2154 2180 struct gem *gp = netdev_priv(dev); 2155 - unsigned long flags; 2156 - 2157 - spin_lock_irqsave(&gp->lock, flags); 2158 - spin_lock(&gp->tx_lock); 2181 + int rc; 2159 2182 2160 2183 /* Enable the cell */ 2161 2184 gem_get_cell(gp); 2162 2185 2186 + /* Make sure PCI access and bus master are enabled */ 2187 + rc = pci_enable_device(gp->pdev); 2188 + if (rc) { 2189 + netdev_err(dev, "Failed to enable chip on PCI bus !\n"); 2190 + 2191 + /* Put cell and forget it for now, it will be considered as 2192 + * still asleep, a new sleep cycle may bring it back 2193 + */ 2194 + gem_put_cell(gp); 2195 + return -ENXIO; 2196 + } 2197 + pci_set_master(gp->pdev); 2198 + 2163 2199 /* Init & setup chip hardware */ 2164 2200 gem_reinit_chip(gp); 2165 2201 2166 - gp->running = 1; 2167 - 2168 - napi_enable(&gp->napi); 2169 - 2170 - if (gp->lstate == link_up) { 2171 - netif_carrier_on(gp->dev); 2172 - gem_set_link_modes(gp); 2173 - } 2174 - 2175 - netif_wake_queue(gp->dev); 2176 - 2177 - spin_unlock(&gp->tx_lock); 2178 - spin_unlock_irqrestore(&gp->lock, flags); 2179 - 2180 - if (request_irq(gp->pdev->irq, gem_interrupt, 2181 - IRQF_SHARED, dev->name, (void *)dev)) { 2202 + /* An interrupt might come in handy */ 2203 + rc = request_irq(gp->pdev->irq, gem_interrupt, 2204 + IRQF_SHARED, dev->name, (void *)dev); 2205 + if (rc) { 2182 2206 netdev_err(dev, "failed to request irq !\n"); 2183 2207 2184 - spin_lock_irqsave(&gp->lock, flags); 2185 - spin_lock(&gp->tx_lock); 2186 - 2187 - napi_disable(&gp->napi); 2188 - 2189 - gp->running = 0; 2190 2208 gem_reset(gp); 2191 2209 gem_clean_rings(gp); 2192 2210 gem_put_cell(gp); 2193 - 2194 - spin_unlock(&gp->tx_lock); 2195 - spin_unlock_irqrestore(&gp->lock, flags); 2196 - 2197 - return -EAGAIN; 2211 + return rc; 2198 2212 } 2213 + 2214 + /* Mark us as attached again if we come from resume(), this has 2215 + * no effect if we weren't detatched and needs to be done now. 2216 + */ 2217 + netif_device_attach(dev); 2218 + 2219 + /* Restart NAPI & queues */ 2220 + gem_netif_start(gp); 2221 + 2222 + /* Detect & init PHY, start autoneg etc... this will 2223 + * eventually result in starting DMA operations when 2224 + * the link is up 2225 + */ 2226 + gem_init_phy(gp); 2199 2227 2200 2228 return 0; 2201 2229 } ··· 2204 2230 static void gem_do_stop(struct net_device *dev, int wol) 2205 2231 { 2206 2232 struct gem *gp = netdev_priv(dev); 2207 - unsigned long flags; 2208 2233 2209 - spin_lock_irqsave(&gp->lock, flags); 2210 - spin_lock(&gp->tx_lock); 2234 + /* Stop NAPI and stop tx queue */ 2235 + gem_netif_stop(gp); 2211 2236 2212 - gp->running = 0; 2213 - 2214 - /* Stop netif queue */ 2215 - netif_stop_queue(dev); 2216 - 2217 - /* Make sure ints are disabled */ 2237 + /* Make sure ints are disabled. We don't care about 2238 + * synchronizing as NAPI is disabled, thus a stray 2239 + * interrupt will do nothing bad (our irq handler 2240 + * just schedules NAPI) 2241 + */ 2218 2242 gem_disable_ints(gp); 2219 2243 2220 - /* We can drop the lock now */ 2221 - spin_unlock(&gp->tx_lock); 2222 - spin_unlock_irqrestore(&gp->lock, flags); 2244 + /* Stop the link timer */ 2245 + del_timer_sync(&gp->link_timer); 2246 + 2247 + /* We cannot cancel the reset task while holding the 2248 + * rtnl lock, we'd get an A->B / B->A deadlock stituation 2249 + * if we did. This is not an issue however as the reset 2250 + * task is synchronized vs. us (rtnl_lock) and will do 2251 + * nothing if the device is down or suspended. We do 2252 + * still clear reset_task_pending to avoid a spurrious 2253 + * reset later on in case we do resume before it gets 2254 + * scheduled. 2255 + */ 2256 + gp->reset_task_pending = 0; 2223 2257 2224 2258 /* If we are going to sleep with WOL */ 2225 2259 gem_stop_dma(gp); ··· 2242 2260 /* No irq needed anymore */ 2243 2261 free_irq(gp->pdev->irq, (void *) dev); 2244 2262 2263 + /* Shut the PHY down eventually and setup WOL */ 2264 + gem_stop_phy(gp, wol); 2265 + 2266 + /* Make sure bus master is disabled */ 2267 + pci_disable_device(gp->pdev); 2268 + 2245 2269 /* Cell not needed neither if no WOL */ 2246 - if (!wol) { 2247 - spin_lock_irqsave(&gp->lock, flags); 2270 + if (!wol) 2248 2271 gem_put_cell(gp); 2249 - spin_unlock_irqrestore(&gp->lock, flags); 2250 - } 2251 2272 } 2252 2273 2253 2274 static void gem_reset_task(struct work_struct *work) 2254 2275 { 2255 2276 struct gem *gp = container_of(work, struct gem, reset_task); 2256 2277 2257 - mutex_lock(&gp->pm_mutex); 2278 + /* Lock out the network stack (essentially shield ourselves 2279 + * against a racing open, close, control call, or suspend 2280 + */ 2281 + rtnl_lock(); 2258 2282 2259 - if (gp->opened) 2260 - napi_disable(&gp->napi); 2261 - 2262 - spin_lock_irq(&gp->lock); 2263 - spin_lock(&gp->tx_lock); 2264 - 2265 - if (gp->running) { 2266 - netif_stop_queue(gp->dev); 2267 - 2268 - /* Reset the chip & rings */ 2269 - gem_reinit_chip(gp); 2270 - if (gp->lstate == link_up) 2271 - gem_set_link_modes(gp); 2272 - netif_wake_queue(gp->dev); 2283 + /* Skip the reset task if suspended or closed, or if it's 2284 + * been cancelled by gem_do_stop (see comment there) 2285 + */ 2286 + if (!netif_device_present(gp->dev) || 2287 + !netif_running(gp->dev) || 2288 + !gp->reset_task_pending) { 2289 + rtnl_unlock(); 2290 + return; 2273 2291 } 2274 2292 2293 + /* Stop the link timer */ 2294 + del_timer_sync(&gp->link_timer); 2295 + 2296 + /* Stop NAPI and tx */ 2297 + gem_netif_stop(gp); 2298 + 2299 + /* Reset the chip & rings */ 2300 + gem_reinit_chip(gp); 2301 + if (gp->lstate == link_up) 2302 + gem_set_link_modes(gp); 2303 + 2304 + /* Restart NAPI and Tx */ 2305 + gem_netif_start(gp); 2306 + 2307 + /* We are back ! */ 2275 2308 gp->reset_task_pending = 0; 2276 2309 2277 - spin_unlock(&gp->tx_lock); 2278 - spin_unlock_irq(&gp->lock); 2310 + /* If the link is not up, restart autoneg, else restart the 2311 + * polling timer 2312 + */ 2313 + if (gp->lstate != link_up) 2314 + gem_begin_auto_negotiation(gp, NULL); 2315 + else 2316 + mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10)); 2279 2317 2280 - if (gp->opened) 2281 - napi_enable(&gp->napi); 2282 - 2283 - mutex_unlock(&gp->pm_mutex); 2318 + rtnl_unlock(); 2284 2319 } 2285 - 2286 2320 2287 2321 static int gem_open(struct net_device *dev) 2288 2322 { 2289 - struct gem *gp = netdev_priv(dev); 2290 - int rc = 0; 2291 - 2292 - mutex_lock(&gp->pm_mutex); 2293 - 2294 - /* We need the cell enabled */ 2295 - if (!gp->asleep) 2296 - rc = gem_do_start(dev); 2297 - gp->opened = (rc == 0); 2298 - 2299 - mutex_unlock(&gp->pm_mutex); 2300 - 2301 - return rc; 2323 + /* We allow open while suspended, we just do nothing, 2324 + * the chip will be initialized in resume() 2325 + */ 2326 + if (netif_device_present(dev)) 2327 + return gem_do_start(dev); 2328 + return 0; 2302 2329 } 2303 2330 2304 2331 static int gem_close(struct net_device *dev) 2305 2332 { 2306 - struct gem *gp = netdev_priv(dev); 2307 - 2308 - mutex_lock(&gp->pm_mutex); 2309 - 2310 - napi_disable(&gp->napi); 2311 - 2312 - gp->opened = 0; 2313 - if (!gp->asleep) 2333 + if (netif_device_present(dev)) 2314 2334 gem_do_stop(dev, 0); 2315 - 2316 - mutex_unlock(&gp->pm_mutex); 2317 2335 2318 2336 return 0; 2319 2337 } ··· 2323 2341 { 2324 2342 struct net_device *dev = pci_get_drvdata(pdev); 2325 2343 struct gem *gp = netdev_priv(dev); 2326 - unsigned long flags; 2327 2344 2328 - mutex_lock(&gp->pm_mutex); 2345 + /* Lock the network stack first to avoid racing with open/close, 2346 + * reset task and setting calls 2347 + */ 2348 + rtnl_lock(); 2329 2349 2330 - netdev_info(dev, "suspending, WakeOnLan %s\n", 2331 - (gp->wake_on_lan && gp->opened) ? "enabled" : "disabled"); 2332 - 2333 - /* Keep the cell enabled during the entire operation */ 2334 - spin_lock_irqsave(&gp->lock, flags); 2335 - spin_lock(&gp->tx_lock); 2336 - gem_get_cell(gp); 2337 - spin_unlock(&gp->tx_lock); 2338 - spin_unlock_irqrestore(&gp->lock, flags); 2339 - 2340 - /* If the driver is opened, we stop the MAC */ 2341 - if (gp->opened) { 2342 - napi_disable(&gp->napi); 2343 - 2344 - /* Stop traffic, mark us closed */ 2350 + /* Not running, mark ourselves non-present, no need for 2351 + * a lock here 2352 + */ 2353 + if (!netif_running(dev)) { 2345 2354 netif_device_detach(dev); 2355 + rtnl_unlock(); 2356 + return 0; 2357 + } 2358 + netdev_info(dev, "suspending, WakeOnLan %s\n", 2359 + (gp->wake_on_lan && netif_running(dev)) ? 2360 + "enabled" : "disabled"); 2346 2361 2347 - /* Switch off MAC, remember WOL setting */ 2348 - gp->asleep_wol = gp->wake_on_lan; 2349 - gem_do_stop(dev, gp->asleep_wol); 2350 - } else 2351 - gp->asleep_wol = 0; 2352 - 2353 - /* Mark us asleep */ 2354 - gp->asleep = 1; 2355 - wmb(); 2356 - 2357 - /* Stop the link timer */ 2358 - del_timer_sync(&gp->link_timer); 2359 - 2360 - /* Now we release the mutex to not block the reset task who 2361 - * can take it too. We are marked asleep, so there will be no 2362 - * conflict here 2362 + /* Tell the network stack we're gone. gem_do_stop() below will 2363 + * synchronize with TX, stop NAPI etc... 2363 2364 */ 2364 - mutex_unlock(&gp->pm_mutex); 2365 + netif_device_detach(dev); 2365 2366 2366 - /* Wait for the pending reset task to complete */ 2367 - flush_work_sync(&gp->reset_task); 2367 + /* Switch off chip, remember WOL setting */ 2368 + gp->asleep_wol = gp->wake_on_lan; 2369 + gem_do_stop(dev, gp->asleep_wol); 2368 2370 2369 - /* Shut the PHY down eventually and setup WOL */ 2370 - gem_stop_phy(gp, gp->asleep_wol); 2371 - 2372 - /* Make sure bus master is disabled */ 2373 - pci_disable_device(gp->pdev); 2374 - 2375 - /* Release the cell, no need to take a lock at this point since 2376 - * nothing else can happen now 2377 - */ 2378 - gem_put_cell(gp); 2371 + /* Unlock the network stack */ 2372 + rtnl_unlock(); 2379 2373 2380 2374 return 0; 2381 2375 } ··· 2360 2402 { 2361 2403 struct net_device *dev = pci_get_drvdata(pdev); 2362 2404 struct gem *gp = netdev_priv(dev); 2363 - unsigned long flags; 2364 2405 2365 - netdev_info(dev, "resuming\n"); 2406 + /* See locking comment in gem_suspend */ 2407 + rtnl_lock(); 2366 2408 2367 - mutex_lock(&gp->pm_mutex); 2368 - 2369 - /* Keep the cell enabled during the entire operation, no need to 2370 - * take a lock here tho since nothing else can happen while we are 2371 - * marked asleep 2409 + /* Not running, mark ourselves present, no need for 2410 + * a lock here 2372 2411 */ 2373 - gem_get_cell(gp); 2374 - 2375 - /* Make sure PCI access and bus master are enabled */ 2376 - if (pci_enable_device(gp->pdev)) { 2377 - netdev_err(dev, "Can't re-enable chip !\n"); 2378 - /* Put cell and forget it for now, it will be considered as 2379 - * still asleep, a new sleep cycle may bring it back 2380 - */ 2381 - gem_put_cell(gp); 2382 - mutex_unlock(&gp->pm_mutex); 2412 + if (!netif_running(dev)) { 2413 + netif_device_attach(dev); 2414 + rtnl_unlock(); 2383 2415 return 0; 2384 2416 } 2385 - pci_set_master(gp->pdev); 2386 2417 2387 - /* Reset everything */ 2388 - gem_reset(gp); 2389 - 2390 - /* Mark us woken up */ 2391 - gp->asleep = 0; 2392 - wmb(); 2393 - 2394 - /* Bring the PHY back. Again, lock is useless at this point as 2395 - * nothing can be happening until we restart the whole thing 2418 + /* Restart chip. If that fails there isn't much we can do, we 2419 + * leave things stopped. 2396 2420 */ 2397 - gem_init_phy(gp); 2398 - 2399 - /* If we were opened, bring everything back */ 2400 - if (gp->opened) { 2401 - /* Restart MAC */ 2402 - gem_do_start(dev); 2403 - 2404 - /* Re-attach net device */ 2405 - netif_device_attach(dev); 2406 - } 2407 - 2408 - spin_lock_irqsave(&gp->lock, flags); 2409 - spin_lock(&gp->tx_lock); 2421 + gem_do_start(dev); 2410 2422 2411 2423 /* If we had WOL enabled, the cell clock was never turned off during 2412 2424 * sleep, so we end up beeing unbalanced. Fix that here ··· 2384 2456 if (gp->asleep_wol) 2385 2457 gem_put_cell(gp); 2386 2458 2387 - /* This function doesn't need to hold the cell, it will be held if the 2388 - * driver is open by gem_do_start(). 2389 - */ 2390 - gem_put_cell(gp); 2391 - 2392 - spin_unlock(&gp->tx_lock); 2393 - spin_unlock_irqrestore(&gp->lock, flags); 2394 - 2395 - mutex_unlock(&gp->pm_mutex); 2459 + /* Unlock the network stack */ 2460 + rtnl_unlock(); 2396 2461 2397 2462 return 0; 2398 2463 } ··· 2395 2474 { 2396 2475 struct gem *gp = netdev_priv(dev); 2397 2476 2398 - spin_lock_irq(&gp->lock); 2399 - spin_lock(&gp->tx_lock); 2400 - 2401 2477 /* I have seen this being called while the PM was in progress, 2402 - * so we shield against this 2478 + * so we shield against this. Let's also not poke at registers 2479 + * while the reset task is going on. 2480 + * 2481 + * TODO: Move stats collection elsewhere (link timer ?) and 2482 + * make this a nop to avoid all those synchro issues 2403 2483 */ 2404 - if (gp->running) { 2405 - dev->stats.rx_crc_errors += readl(gp->regs + MAC_FCSERR); 2406 - writel(0, gp->regs + MAC_FCSERR); 2484 + if (!netif_device_present(dev) || !netif_running(dev)) 2485 + goto bail; 2407 2486 2408 - dev->stats.rx_frame_errors += readl(gp->regs + MAC_AERR); 2409 - writel(0, gp->regs + MAC_AERR); 2487 + /* Better safe than sorry... */ 2488 + if (WARN_ON(!gp->cell_enabled)) 2489 + goto bail; 2410 2490 2411 - dev->stats.rx_length_errors += readl(gp->regs + MAC_LERR); 2412 - writel(0, gp->regs + MAC_LERR); 2491 + dev->stats.rx_crc_errors += readl(gp->regs + MAC_FCSERR); 2492 + writel(0, gp->regs + MAC_FCSERR); 2413 2493 2414 - dev->stats.tx_aborted_errors += readl(gp->regs + MAC_ECOLL); 2415 - dev->stats.collisions += 2416 - (readl(gp->regs + MAC_ECOLL) + 2417 - readl(gp->regs + MAC_LCOLL)); 2418 - writel(0, gp->regs + MAC_ECOLL); 2419 - writel(0, gp->regs + MAC_LCOLL); 2420 - } 2494 + dev->stats.rx_frame_errors += readl(gp->regs + MAC_AERR); 2495 + writel(0, gp->regs + MAC_AERR); 2421 2496 2422 - spin_unlock(&gp->tx_lock); 2423 - spin_unlock_irq(&gp->lock); 2497 + dev->stats.rx_length_errors += readl(gp->regs + MAC_LERR); 2498 + writel(0, gp->regs + MAC_LERR); 2424 2499 2500 + dev->stats.tx_aborted_errors += readl(gp->regs + MAC_ECOLL); 2501 + dev->stats.collisions += 2502 + (readl(gp->regs + MAC_ECOLL) + readl(gp->regs + MAC_LCOLL)); 2503 + writel(0, gp->regs + MAC_ECOLL); 2504 + writel(0, gp->regs + MAC_LCOLL); 2505 + bail: 2425 2506 return &dev->stats; 2426 2507 } 2427 2508 ··· 2436 2513 if (!is_valid_ether_addr(macaddr->sa_data)) 2437 2514 return -EADDRNOTAVAIL; 2438 2515 2439 - if (!netif_running(dev) || !netif_device_present(dev)) { 2440 - /* We'll just catch it later when the 2441 - * device is up'd or resumed. 2442 - */ 2443 - memcpy(dev->dev_addr, macaddr->sa_data, dev->addr_len); 2444 - return 0; 2445 - } 2446 - 2447 - mutex_lock(&gp->pm_mutex); 2448 2516 memcpy(dev->dev_addr, macaddr->sa_data, dev->addr_len); 2449 - if (gp->running) { 2450 - writel((e[4] << 8) | e[5], gp->regs + MAC_ADDR0); 2451 - writel((e[2] << 8) | e[3], gp->regs + MAC_ADDR1); 2452 - writel((e[0] << 8) | e[1], gp->regs + MAC_ADDR2); 2453 - } 2454 - mutex_unlock(&gp->pm_mutex); 2517 + 2518 + /* We'll just catch it later when the device is up'd or resumed */ 2519 + if (!netif_running(dev) || !netif_device_present(dev)) 2520 + return 0; 2521 + 2522 + /* Better safe than sorry... */ 2523 + if (WARN_ON(!gp->cell_enabled)) 2524 + return 0; 2525 + 2526 + writel((e[4] << 8) | e[5], gp->regs + MAC_ADDR0); 2527 + writel((e[2] << 8) | e[3], gp->regs + MAC_ADDR1); 2528 + writel((e[0] << 8) | e[1], gp->regs + MAC_ADDR2); 2455 2529 2456 2530 return 0; 2457 2531 } ··· 2459 2539 u32 rxcfg, rxcfg_new; 2460 2540 int limit = 10000; 2461 2541 2542 + if (!netif_running(dev) || !netif_device_present(dev)) 2543 + return; 2462 2544 2463 - spin_lock_irq(&gp->lock); 2464 - spin_lock(&gp->tx_lock); 2465 - 2466 - if (!gp->running) 2467 - goto bail; 2468 - 2469 - netif_stop_queue(dev); 2545 + /* Better safe than sorry... */ 2546 + if (gp->reset_task_pending || WARN_ON(!gp->cell_enabled)) 2547 + return; 2470 2548 2471 2549 rxcfg = readl(gp->regs + MAC_RXCFG); 2472 2550 rxcfg_new = gem_setup_multicast(gp); ··· 2484 2566 rxcfg |= rxcfg_new; 2485 2567 2486 2568 writel(rxcfg, gp->regs + MAC_RXCFG); 2487 - 2488 - netif_wake_queue(dev); 2489 - 2490 - bail: 2491 - spin_unlock(&gp->tx_lock); 2492 - spin_unlock_irq(&gp->lock); 2493 2569 } 2494 2570 2495 2571 /* Jumbo-grams don't seem to work :-( */ ··· 2501 2589 if (new_mtu < GEM_MIN_MTU || new_mtu > GEM_MAX_MTU) 2502 2590 return -EINVAL; 2503 2591 2504 - if (!netif_running(dev) || !netif_device_present(dev)) { 2505 - /* We'll just catch it later when the 2506 - * device is up'd or resumed. 2507 - */ 2508 - dev->mtu = new_mtu; 2509 - return 0; 2510 - } 2511 - 2512 - mutex_lock(&gp->pm_mutex); 2513 - spin_lock_irq(&gp->lock); 2514 - spin_lock(&gp->tx_lock); 2515 2592 dev->mtu = new_mtu; 2516 - if (gp->running) { 2517 - gem_reinit_chip(gp); 2518 - if (gp->lstate == link_up) 2519 - gem_set_link_modes(gp); 2520 - } 2521 - spin_unlock(&gp->tx_lock); 2522 - spin_unlock_irq(&gp->lock); 2523 - mutex_unlock(&gp->pm_mutex); 2593 + 2594 + /* We'll just catch it later when the device is up'd or resumed */ 2595 + if (!netif_running(dev) || !netif_device_present(dev)) 2596 + return 0; 2597 + 2598 + /* Better safe than sorry... */ 2599 + if (WARN_ON(!gp->cell_enabled)) 2600 + return 0; 2601 + 2602 + gem_netif_stop(gp); 2603 + gem_reinit_chip(gp); 2604 + if (gp->lstate == link_up) 2605 + gem_set_link_modes(gp); 2606 + gem_netif_start(gp); 2524 2607 2525 2608 return 0; 2526 2609 } ··· 2547 2640 cmd->phy_address = 0; /* XXX fixed PHYAD */ 2548 2641 2549 2642 /* Return current PHY settings */ 2550 - spin_lock_irq(&gp->lock); 2551 2643 cmd->autoneg = gp->want_autoneg; 2552 2644 ethtool_cmd_speed_set(cmd, gp->phy_mii.speed); 2553 2645 cmd->duplex = gp->phy_mii.duplex; ··· 2558 2652 */ 2559 2653 if (cmd->advertising == 0) 2560 2654 cmd->advertising = cmd->supported; 2561 - spin_unlock_irq(&gp->lock); 2562 2655 } else { // XXX PCS ? 2563 2656 cmd->supported = 2564 2657 (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | ··· 2611 2706 return -EINVAL; 2612 2707 2613 2708 /* Apply settings and restart link process. */ 2614 - spin_lock_irq(&gp->lock); 2615 - gem_get_cell(gp); 2616 - gem_begin_auto_negotiation(gp, cmd); 2617 - gem_put_cell(gp); 2618 - spin_unlock_irq(&gp->lock); 2709 + if (netif_device_present(gp->dev)) { 2710 + del_timer_sync(&gp->link_timer); 2711 + gem_begin_auto_negotiation(gp, cmd); 2712 + } 2619 2713 2620 2714 return 0; 2621 2715 } ··· 2626 2722 if (!gp->want_autoneg) 2627 2723 return -EINVAL; 2628 2724 2629 - /* Restart link process. */ 2630 - spin_lock_irq(&gp->lock); 2631 - gem_get_cell(gp); 2632 - gem_begin_auto_negotiation(gp, NULL); 2633 - gem_put_cell(gp); 2634 - spin_unlock_irq(&gp->lock); 2725 + /* Restart link process */ 2726 + if (netif_device_present(gp->dev)) { 2727 + del_timer_sync(&gp->link_timer); 2728 + gem_begin_auto_negotiation(gp, NULL); 2729 + } 2635 2730 2636 2731 return 0; 2637 2732 } ··· 2694 2791 struct gem *gp = netdev_priv(dev); 2695 2792 struct mii_ioctl_data *data = if_mii(ifr); 2696 2793 int rc = -EOPNOTSUPP; 2697 - unsigned long flags; 2698 2794 2699 - /* Hold the PM mutex while doing ioctl's or we may collide 2700 - * with power management. 2795 + /* For SIOCGMIIREG and SIOCSMIIREG the core checks for us that 2796 + * netif_device_present() is true and holds rtnl_lock for us 2797 + * so we have nothing to worry about 2701 2798 */ 2702 - mutex_lock(&gp->pm_mutex); 2703 - 2704 - spin_lock_irqsave(&gp->lock, flags); 2705 - gem_get_cell(gp); 2706 - spin_unlock_irqrestore(&gp->lock, flags); 2707 2799 2708 2800 switch (cmd) { 2709 2801 case SIOCGMIIPHY: /* Get address of MII PHY in use. */ ··· 2706 2808 /* Fallthrough... */ 2707 2809 2708 2810 case SIOCGMIIREG: /* Read MII PHY register. */ 2709 - if (!gp->running) 2710 - rc = -EAGAIN; 2711 - else { 2712 - data->val_out = __phy_read(gp, data->phy_id & 0x1f, 2713 - data->reg_num & 0x1f); 2714 - rc = 0; 2715 - } 2811 + data->val_out = __phy_read(gp, data->phy_id & 0x1f, 2812 + data->reg_num & 0x1f); 2813 + rc = 0; 2716 2814 break; 2717 2815 2718 2816 case SIOCSMIIREG: /* Write MII PHY register. */ 2719 - if (!gp->running) 2720 - rc = -EAGAIN; 2721 - else { 2722 - __phy_write(gp, data->phy_id & 0x1f, data->reg_num & 0x1f, 2723 - data->val_in); 2724 - rc = 0; 2725 - } 2817 + __phy_write(gp, data->phy_id & 0x1f, data->reg_num & 0x1f, 2818 + data->val_in); 2819 + rc = 0; 2726 2820 break; 2727 2821 }; 2728 - 2729 - spin_lock_irqsave(&gp->lock, flags); 2730 - gem_put_cell(gp); 2731 - spin_unlock_irqrestore(&gp->lock, flags); 2732 - 2733 - mutex_unlock(&gp->pm_mutex); 2734 - 2735 2822 return rc; 2736 2823 } 2737 2824 ··· 2804 2921 2805 2922 unregister_netdev(dev); 2806 2923 2807 - /* Stop the link timer */ 2808 - del_timer_sync(&gp->link_timer); 2809 - 2810 - /* We shouldn't need any locking here */ 2811 - gem_get_cell(gp); 2812 - 2813 - /* Cancel reset task */ 2924 + /* Ensure reset task is truely gone */ 2814 2925 cancel_work_sync(&gp->reset_task); 2815 - 2816 - /* Shut the PHY down */ 2817 - gem_stop_phy(gp, 0); 2818 - 2819 - gem_put_cell(gp); 2820 - 2821 - /* Make sure bus master is disabled */ 2822 - pci_disable_device(gp->pdev); 2823 2926 2824 2927 /* Free resources */ 2825 2928 pci_free_consistent(pdev, ··· 2912 3043 2913 3044 gp->msg_enable = DEFAULT_MSG; 2914 3045 2915 - spin_lock_init(&gp->lock); 2916 - spin_lock_init(&gp->tx_lock); 2917 - mutex_init(&gp->pm_mutex); 2918 - 2919 3046 init_timer(&gp->link_timer); 2920 3047 gp->link_timer.function = gem_link_timer; 2921 3048 gp->link_timer.data = (unsigned long) gp; ··· 2987 3122 /* Set that now, in case PM kicks in now */ 2988 3123 pci_set_drvdata(pdev, dev); 2989 3124 2990 - /* Detect & init PHY, start autoneg, we release the cell now 2991 - * too, it will be managed by whoever needs it 2992 - */ 2993 - gem_init_phy(gp); 2994 - 2995 - spin_lock_irq(&gp->lock); 2996 - gem_put_cell(gp); 2997 - spin_unlock_irq(&gp->lock); 3125 + /* We can do scatter/gather and HW checksum */ 3126 + dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM; 3127 + dev->features |= dev->hw_features | NETIF_F_RXCSUM; 3128 + if (pci_using_dac) 3129 + dev->features |= NETIF_F_HIGHDMA; 2998 3130 2999 3131 /* Register with kernel */ 3000 3132 if (register_netdev(dev)) { ··· 3000 3138 goto err_out_free_consistent; 3001 3139 } 3002 3140 3141 + /* Undo the get_cell with appropriate locking (we could use 3142 + * ndo_init/uninit but that would be even more clumsy imho) 3143 + */ 3144 + rtnl_lock(); 3145 + gem_put_cell(gp); 3146 + rtnl_unlock(); 3147 + 3003 3148 netdev_info(dev, "Sun GEM (PCI) 10/100/1000BaseT Ethernet %pM\n", 3004 3149 dev->dev_addr); 3005 - 3006 - if (gp->phy_type == phy_mii_mdio0 || 3007 - gp->phy_type == phy_mii_mdio1) 3008 - netdev_info(dev, "Found %s PHY\n", 3009 - gp->phy_mii.def ? gp->phy_mii.def->name : "no"); 3010 - 3011 - /* GEM can do it all... */ 3012 - dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM; 3013 - dev->features |= dev->hw_features | NETIF_F_RXCSUM | NETIF_F_LLTX; 3014 - if (pci_using_dac) 3015 - dev->features |= NETIF_F_HIGHDMA; 3016 - 3017 3150 return 0; 3018 3151 3019 3152 err_out_free_consistent:
-25
drivers/net/sungem.h
··· 973 973 }; 974 974 975 975 struct gem { 976 - spinlock_t lock; 977 - spinlock_t tx_lock; 978 976 void __iomem *regs; 979 977 int rx_new, rx_old; 980 978 int tx_new, tx_old; 981 979 982 980 unsigned int has_wol : 1; /* chip supports wake-on-lan */ 983 - unsigned int asleep : 1; /* chip asleep, protected by pm_mutex */ 984 981 unsigned int asleep_wol : 1; /* was asleep with WOL enabled */ 985 - unsigned int opened : 1; /* driver opened, protected by pm_mutex */ 986 - unsigned int running : 1; /* chip running, protected by lock */ 987 982 988 - /* cell enable count, protected by lock */ 989 983 int cell_enabled; 990 - 991 - struct mutex pm_mutex; 992 - 993 984 u32 msg_enable; 994 985 u32 status; 995 986 ··· 1023 1032 1024 1033 #define found_mii_phy(gp) ((gp->phy_type == phy_mii_mdio0 || gp->phy_type == phy_mii_mdio1) && \ 1025 1034 gp->phy_mii.def && gp->phy_mii.def->ops) 1026 - 1027 - #define ALIGNED_RX_SKB_ADDR(addr) \ 1028 - ((((unsigned long)(addr) + (64UL - 1UL)) & ~(64UL - 1UL)) - (unsigned long)(addr)) 1029 - static __inline__ struct sk_buff *gem_alloc_skb(int size, 1030 - gfp_t gfp_flags) 1031 - { 1032 - struct sk_buff *skb = alloc_skb(size + 64, gfp_flags); 1033 - 1034 - if (skb) { 1035 - int offset = (int) ALIGNED_RX_SKB_ADDR(skb->data); 1036 - if (offset) 1037 - skb_reserve(skb, offset); 1038 - } 1039 - 1040 - return skb; 1041 - } 1042 1035 1043 1036 #endif /* _SUNGEM_H */