Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

via-rhine: per device debug level.

Signed-off-by: Francois Romieu <romieu@fr.zoreil.com>

+79 -90
+79 -90
drivers/net/ethernet/via/via-rhine.c
··· 39 39 40 40 /* A few user-configurable values. 41 41 These may be modified when a driver module is loaded. */ 42 - 43 - #define DEBUG 44 - static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */ 42 + static int debug = 0; 43 + #define RHINE_MSG_DEFAULT \ 44 + (0x0000) 45 45 46 46 /* Set the copy breakpoint for the copy-only-tiny-frames scheme. 47 47 Setting to > 1518 effectively disables this feature. */ ··· 130 130 module_param(debug, int, 0); 131 131 module_param(rx_copybreak, int, 0); 132 132 module_param(avoid_D3, bool, 0); 133 - MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)"); 133 + MODULE_PARM_DESC(debug, "VIA Rhine debug message flags"); 134 134 MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames"); 135 135 MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)"); 136 136 ··· 450 450 struct work_struct slow_event_task; 451 451 struct work_struct reset_task; 452 452 453 + u32 msg_enable; 454 + 453 455 /* Frequently used values: keep some adjacent for cache effect. */ 454 456 u32 quirks; 455 457 struct rx_desc *rx_head_desc; ··· 514 512 udelay(10); 515 513 } 516 514 if (i > 64) { 517 - netdev_dbg(rp->dev, "%s bit wait (%02x/%02x) cycle " 518 - "count: %04d\n", high ? "high" : "low", reg, mask, i); 515 + netif_dbg(rp, hw, rp->dev, "%s bit wait (%02x/%02x) cycle " 516 + "count: %04d\n", high ? "high" : "low", reg, mask, i); 519 517 } 520 518 } 521 519 ··· 615 613 { 616 614 struct rhine_private *rp = netdev_priv(dev); 617 615 void __iomem *ioaddr = rp->base; 616 + u8 cmd1; 618 617 619 618 iowrite8(Cmd1Reset, ioaddr + ChipCmd1); 620 619 IOSYNC; ··· 631 628 rhine_wait_bit_low(rp, ChipCmd1, Cmd1Reset); 632 629 } 633 630 634 - if (debug > 1) 635 - netdev_info(dev, "Reset %s\n", 636 - (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) ? 637 - "failed" : "succeeded"); 631 + cmd1 = ioread8(ioaddr + ChipCmd1); 632 + netif_info(rp, hw, dev, "Reset %s\n", (cmd1 & Cmd1Reset) ? 633 + "failed" : "succeeded"); 638 634 } 639 635 640 636 #ifdef USE_MMIO ··· 708 706 struct net_device *dev = rp->dev; 709 707 710 708 if (status & IntrTxAborted) { 711 - if (debug > 1) 712 - netdev_info(dev, "Abort %08x, frame dropped\n", status); 709 + netif_info(rp, tx_err, dev, 710 + "Abort %08x, frame dropped\n", status); 713 711 } 714 712 715 713 if (status & IntrTxUnderrun) { 716 714 rhine_kick_tx_threshold(rp); 717 - if (debug > 1) 718 - netdev_info(dev, "Transmitter underrun, Tx threshold now %02x\n", 719 - rp->tx_thresh); 715 + netif_info(rp, tx_err ,dev, "Transmitter underrun, " 716 + "Tx threshold now %02x\n", rp->tx_thresh); 720 717 } 721 718 722 - if (status & IntrTxDescRace) { 723 - if (debug > 2) 724 - netdev_info(dev, "Tx descriptor write-back race\n"); 725 - } 719 + if (status & IntrTxDescRace) 720 + netif_info(rp, tx_err, dev, "Tx descriptor write-back race\n"); 726 721 727 722 if ((status & IntrTxError) && 728 723 (status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace)) == 0) { 729 724 rhine_kick_tx_threshold(rp); 730 - if (debug > 1) 731 - netdev_info(dev, "Unspecified error. Tx threshold now %02x\n", 732 - rp->tx_thresh); 725 + netif_info(rp, tx_err, dev, "Unspecified error. " 726 + "Tx threshold now %02x\n", rp->tx_thresh); 733 727 } 734 728 735 729 rhine_restart_tx(dev); ··· 787 789 788 790 if (status & RHINE_EVENT_NAPI_TX) { 789 791 if (status & RHINE_EVENT_NAPI_TX_ERR) { 790 - u8 cmd; 791 - 792 792 /* Avoid scavenging before Tx engine turned off */ 793 793 rhine_wait_bit_low(rp, ChipCmd, CmdTxOn); 794 - cmd = ioread8(ioaddr + ChipCmd); 795 - if ((cmd & CmdTxOn) && (debug > 2)) { 796 - netdev_warn(dev, "%s: Tx engine still on\n", 797 - __func__); 798 - } 794 + if (ioread8(ioaddr + ChipCmd) & CmdTxOn) 795 + netif_warn(rp, tx_err, dev, "Tx still on\n"); 799 796 } 797 + 800 798 rhine_tx(dev); 801 799 802 800 if (status & RHINE_EVENT_NAPI_TX_ERR) ··· 937 943 rp->quirks = quirks; 938 944 rp->pioaddr = pioaddr; 939 945 rp->pdev = pdev; 946 + rp->msg_enable = netif_msg_init(debug, RHINE_MSG_DEFAULT); 940 947 941 948 rc = pci_request_regions(pdev, DRV_NAME); 942 949 if (rc) ··· 1059 1064 } 1060 1065 } 1061 1066 rp->mii_if.phy_id = phy_id; 1062 - if (debug > 1 && avoid_D3) 1063 - netdev_info(dev, "No D3 power state at shutdown\n"); 1067 + if (avoid_D3) 1068 + netif_info(rp, probe, dev, "No D3 power state at shutdown\n"); 1064 1069 1065 1070 return 0; 1066 1071 ··· 1236 1241 struct rhine_private *rp = netdev_priv(dev); 1237 1242 void __iomem *ioaddr = rp->base; 1238 1243 1239 - mii_check_media(&rp->mii_if, debug, init_media); 1244 + mii_check_media(&rp->mii_if, netif_msg_link(rp), init_media); 1240 1245 1241 1246 if (rp->mii_if.full_duplex) 1242 1247 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex, ··· 1244 1249 else 1245 1250 iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex, 1246 1251 ioaddr + ChipCmd1); 1247 - if (debug > 1) 1248 - netdev_info(dev, "force_media %d, carrier %d\n", 1249 - rp->mii_if.force_media, netif_carrier_ok(dev)); 1252 + 1253 + netif_info(rp, link, dev, "force_media %d, carrier %d\n", 1254 + rp->mii_if.force_media, netif_carrier_ok(dev)); 1250 1255 } 1251 1256 1252 1257 /* Called after status of force_media possibly changed */ 1253 1258 static void rhine_set_carrier(struct mii_if_info *mii) 1254 1259 { 1260 + struct net_device *dev = mii->dev; 1261 + struct rhine_private *rp = netdev_priv(dev); 1262 + 1255 1263 if (mii->force_media) { 1256 1264 /* autoneg is off: Link is always assumed to be up */ 1257 - if (!netif_carrier_ok(mii->dev)) 1258 - netif_carrier_on(mii->dev); 1259 - } 1260 - else /* Let MMI library update carrier status */ 1261 - rhine_check_media(mii->dev, 0); 1262 - if (debug > 1) 1263 - netdev_info(mii->dev, "force_media %d, carrier %d\n", 1264 - mii->force_media, netif_carrier_ok(mii->dev)); 1265 + if (!netif_carrier_ok(dev)) 1266 + netif_carrier_on(dev); 1267 + } else /* Let MMI library update carrier status */ 1268 + rhine_check_media(dev, 0); 1269 + 1270 + netif_info(rp, link, dev, "force_media %d, carrier %d\n", 1271 + mii->force_media, netif_carrier_ok(dev)); 1265 1272 } 1266 1273 1267 1274 /** ··· 1567 1570 if (rc) 1568 1571 return rc; 1569 1572 1570 - if (debug > 1) 1571 - netdev_dbg(dev, "%s() irq %d\n", __func__, rp->pdev->irq); 1573 + netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->pdev->irq); 1572 1574 1573 1575 rc = alloc_ring(dev); 1574 1576 if (rc) { ··· 1579 1583 rhine_chip_reset(dev); 1580 1584 rhine_task_enable(rp); 1581 1585 init_registers(dev); 1582 - if (debug > 2) 1583 - netdev_dbg(dev, "%s() Done - status %04x MII status: %04x\n", 1584 - __func__, ioread16(ioaddr + ChipCmd), 1585 - mdio_read(dev, rp->mii_if.phy_id, MII_BMSR)); 1586 + 1587 + netif_dbg(rp, ifup, dev, "%s() Done - status %04x MII status: %04x\n", 1588 + __func__, ioread16(ioaddr + ChipCmd), 1589 + mdio_read(dev, rp->mii_if.phy_id, MII_BMSR)); 1586 1590 1587 1591 netif_start_queue(dev); 1588 1592 ··· 1712 1716 if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN) 1713 1717 netif_stop_queue(dev); 1714 1718 1715 - if (debug > 4) { 1716 - netdev_dbg(dev, "Transmit frame #%d queued in slot %d\n", 1717 - rp->cur_tx-1, entry); 1718 - } 1719 + netif_dbg(rp, tx_queued, dev, "Transmit frame #%d queued in slot %d\n", 1720 + rp->cur_tx - 1, entry); 1721 + 1719 1722 return NETDEV_TX_OK; 1720 1723 } 1721 1724 ··· 1735 1740 1736 1741 status = rhine_get_events(rp); 1737 1742 1738 - if (debug > 4) 1739 - netdev_dbg(dev, "Interrupt, status %08x\n", status); 1743 + netif_dbg(rp, intr, dev, "Interrupt, status %08x\n", status); 1740 1744 1741 1745 if (status & RHINE_EVENT) { 1742 1746 handled = 1; ··· 1745 1751 } 1746 1752 1747 1753 if (status & ~(IntrLinkChange | IntrStatsMax | RHINE_EVENT_NAPI)) { 1748 - if (debug > 1) 1749 - netdev_err(dev, "Something Wicked happened! %08x\n", 1750 - status); 1754 + netif_err(rp, intr, dev, "Something Wicked happened! %08x\n", 1755 + status); 1751 1756 } 1752 1757 1753 1758 return IRQ_RETVAL(handled); ··· 1762 1769 /* find and cleanup dirty tx descriptors */ 1763 1770 while (rp->dirty_tx != rp->cur_tx) { 1764 1771 txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status); 1765 - if (debug > 6) 1766 - netdev_dbg(dev, "Tx scavenge %d status %08x\n", 1767 - entry, txstatus); 1772 + netif_dbg(rp, tx_done, dev, "Tx scavenge %d status %08x\n", 1773 + entry, txstatus); 1768 1774 if (txstatus & DescOwn) 1769 1775 break; 1770 1776 if (txstatus & 0x8000) { 1771 - if (debug > 1) 1772 - netdev_dbg(dev, "Transmit error, Tx status %08x\n", 1773 - txstatus); 1777 + netif_dbg(rp, tx_done, dev, 1778 + "Transmit error, Tx status %08x\n", txstatus); 1774 1779 dev->stats.tx_errors++; 1775 1780 if (txstatus & 0x0400) 1776 1781 dev->stats.tx_carrier_errors++; ··· 1790 1799 dev->stats.collisions += (txstatus >> 3) & 0x0F; 1791 1800 else 1792 1801 dev->stats.collisions += txstatus & 0x0F; 1793 - if (debug > 6) 1794 - netdev_dbg(dev, "collisions: %1.1x:%1.1x\n", 1795 - (txstatus >> 3) & 0xF, 1796 - txstatus & 0xF); 1802 + netif_dbg(rp, tx_done, dev, "collisions: %1.1x:%1.1x\n", 1803 + (txstatus >> 3) & 0xF, txstatus & 0xF); 1797 1804 dev->stats.tx_bytes += rp->tx_skbuff[entry]->len; 1798 1805 dev->stats.tx_packets++; 1799 1806 } ··· 1832 1843 int count; 1833 1844 int entry = rp->cur_rx % RX_RING_SIZE; 1834 1845 1835 - if (debug > 4) { 1836 - netdev_dbg(dev, "%s(), entry %d status %08x\n", 1837 - __func__, entry, 1838 - le32_to_cpu(rp->rx_head_desc->rx_status)); 1839 - } 1846 + netif_dbg(rp, rx_status, dev, "%s(), entry %d status %08x\n", __func__, 1847 + entry, le32_to_cpu(rp->rx_head_desc->rx_status)); 1840 1848 1841 1849 /* If EOP is set on the next entry, it's a new packet. Send it up. */ 1842 1850 for (count = 0; count < limit; ++count) { ··· 1845 1859 if (desc_status & DescOwn) 1846 1860 break; 1847 1861 1848 - if (debug > 4) 1849 - netdev_dbg(dev, "%s() status is %08x\n", 1850 - __func__, desc_status); 1862 + netif_dbg(rp, rx_status, dev, "%s() status %08x\n", __func__, 1863 + desc_status); 1851 1864 1852 1865 if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) { 1853 1866 if ((desc_status & RxWholePkt) != RxWholePkt) { ··· 1862 1877 dev->stats.rx_length_errors++; 1863 1878 } else if (desc_status & RxErr) { 1864 1879 /* There was a error. */ 1865 - if (debug > 2) 1866 - netdev_dbg(dev, "%s() Rx error was %08x\n", 1867 - __func__, desc_status); 1880 + netif_dbg(rp, rx_err, dev, 1881 + "%s() Rx error %08x\n", __func__, 1882 + desc_status); 1868 1883 dev->stats.rx_errors++; 1869 1884 if (desc_status & 0x0030) 1870 1885 dev->stats.rx_length_errors++; ··· 1985 2000 } 1986 2001 else { 1987 2002 /* This should never happen */ 1988 - if (debug > 1) 1989 - netdev_warn(dev, "%s() Another error occurred %08x\n", 1990 - __func__, intr_status); 2003 + netif_warn(rp, tx_err, dev, "another error occurred %08x\n", 2004 + intr_status); 1991 2005 } 1992 2006 1993 2007 } ··· 2008 2024 2009 2025 if (intr_status & IntrLinkChange) 2010 2026 rhine_check_media(dev, 0); 2027 + 2028 + if (intr_status & IntrPCIErr) 2029 + netif_warn(rp, hw, dev, "PCI error\n"); 2011 2030 2012 2031 napi_disable(&rp->napi); 2013 2032 rhine_irq_disable(rp); ··· 2131 2144 2132 2145 static u32 netdev_get_msglevel(struct net_device *dev) 2133 2146 { 2134 - return debug; 2147 + struct rhine_private *rp = netdev_priv(dev); 2148 + 2149 + return rp->msg_enable; 2135 2150 } 2136 2151 2137 2152 static void netdev_set_msglevel(struct net_device *dev, u32 value) 2138 2153 { 2139 - debug = value; 2154 + struct rhine_private *rp = netdev_priv(dev); 2155 + 2156 + rp->msg_enable = value; 2140 2157 } 2141 2158 2142 2159 static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) ··· 2213 2222 napi_disable(&rp->napi); 2214 2223 netif_stop_queue(dev); 2215 2224 2216 - if (debug > 1) 2217 - netdev_dbg(dev, "Shutting down ethercard, status was %04x\n", 2218 - ioread16(ioaddr + ChipCmd)); 2225 + netif_dbg(rp, ifdown, dev, "Shutting down ethercard, status was %04x\n", 2226 + ioread16(ioaddr + ChipCmd)); 2219 2227 2220 2228 /* Switch to loopback mode to avoid hardware races. */ 2221 2229 iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig); ··· 2330 2340 return 0; 2331 2341 2332 2342 ret = pci_set_power_state(pdev, PCI_D0); 2333 - if (debug > 1) 2334 - netdev_info(dev, "Entering power state D0 %s (%d)\n", 2335 - ret ? "failed" : "succeeded", ret); 2343 + netif_info(rp, drv, dev, "Entering power state D0 %s (%d)\n", 2344 + ret ? "failed" : "succeeded", ret); 2336 2345 2337 2346 pci_restore_state(pdev); 2338 2347