Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

sh: sh_eth: Add support ethtool

This commit supports following functions.
- get_settings
- set_settings
- nway_reset
- get_msglevel
- set_msglevel
- get_link
- get_strings
- get_ethtool_stats
- get_sset_count

About other function, the device does not support.

Signed-off-by: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
Signed-off-by: Nobuhiro Iwamatsu <nobuhiro.iwamatsu.yj@renesas.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Nobuhiro Iwamatsu and committed by
David S. Miller
dc19e4e5 59ed5aba

+189 -19
+189 -19
drivers/net/sh_eth.c
··· 32 32 #include <linux/io.h> 33 33 #include <linux/pm_runtime.h> 34 34 #include <linux/slab.h> 35 + #include <linux/ethtool.h> 35 36 #include <asm/cacheflush.h> 36 37 37 38 #include "sh_eth.h" 39 + 40 + #define SH_ETH_DEF_MSG_ENABLE \ 41 + (NETIF_MSG_LINK | \ 42 + NETIF_MSG_TIMER | \ 43 + NETIF_MSG_RX_ERR| \ 44 + NETIF_MSG_TX_ERR) 38 45 39 46 /* There is CPU dependent code */ 40 47 #if defined(CONFIG_CPU_SUBTYPE_SH7724) ··· 824 817 return 0; 825 818 } 826 819 820 + static void sh_eth_rcv_snd_disable(u32 ioaddr) 821 + { 822 + /* disable tx and rx */ 823 + writel(readl(ioaddr + ECMR) & 824 + ~(ECMR_RE | ECMR_TE), ioaddr + ECMR); 825 + } 826 + 827 + static void sh_eth_rcv_snd_enable(u32 ioaddr) 828 + { 829 + /* enable tx and rx */ 830 + writel(readl(ioaddr + ECMR) | 831 + (ECMR_RE | ECMR_TE), ioaddr + ECMR); 832 + } 833 + 827 834 /* error control function */ 828 835 static void sh_eth_error(struct net_device *ndev, int intr_status) 829 836 { ··· 864 843 if (mdp->ether_link_active_low) 865 844 link_stat = ~link_stat; 866 845 } 867 - if (!(link_stat & PHY_ST_LINK)) { 868 - /* Link Down : disable tx and rx */ 869 - writel(readl(ioaddr + ECMR) & 870 - ~(ECMR_RE | ECMR_TE), ioaddr + ECMR); 871 - } else { 846 + if (!(link_stat & PHY_ST_LINK)) 847 + sh_eth_rcv_snd_disable(ioaddr); 848 + else { 872 849 /* Link Up */ 873 850 writel(readl(ioaddr + EESIPR) & 874 851 ~DMAC_M_ECI, ioaddr + EESIPR); ··· 876 857 writel(readl(ioaddr + EESIPR) | 877 858 DMAC_M_ECI, ioaddr + EESIPR); 878 859 /* enable tx and rx */ 879 - writel(readl(ioaddr + ECMR) | 880 - (ECMR_RE | ECMR_TE), ioaddr + ECMR); 860 + sh_eth_rcv_snd_enable(ioaddr); 881 861 } 882 862 } 883 863 } ··· 885 867 /* Write buck end. unused write back interrupt */ 886 868 if (intr_status & EESR_TABT) /* Transmit Abort int */ 887 869 mdp->stats.tx_aborted_errors++; 870 + if (netif_msg_tx_err(mdp)) 871 + dev_err(&ndev->dev, "Transmit Abort\n"); 888 872 } 889 873 890 874 if (intr_status & EESR_RABT) { ··· 894 874 if (intr_status & EESR_RFRMER) { 895 875 /* Receive Frame Overflow int */ 896 876 mdp->stats.rx_frame_errors++; 897 - dev_err(&ndev->dev, "Receive Frame Overflow\n"); 877 + if (netif_msg_rx_err(mdp)) 878 + dev_err(&ndev->dev, "Receive Abort\n"); 898 879 } 899 880 } 900 881 901 - if (!mdp->cd->no_ade) { 902 - if (intr_status & EESR_ADE && intr_status & EESR_TDE && 903 - intr_status & EESR_TFE) 904 - mdp->stats.tx_fifo_errors++; 882 + if (intr_status & EESR_TDE) { 883 + /* Transmit Descriptor Empty int */ 884 + mdp->stats.tx_fifo_errors++; 885 + if (netif_msg_tx_err(mdp)) 886 + dev_err(&ndev->dev, "Transmit Descriptor Empty\n"); 887 + } 888 + 889 + if (intr_status & EESR_TFE) { 890 + /* FIFO under flow */ 891 + mdp->stats.tx_fifo_errors++; 892 + if (netif_msg_tx_err(mdp)) 893 + dev_err(&ndev->dev, "Transmit FIFO Under flow\n"); 905 894 } 906 895 907 896 if (intr_status & EESR_RDE) { ··· 919 890 920 891 if (readl(ioaddr + EDRRR) ^ EDRRR_R) 921 892 writel(EDRRR_R, ioaddr + EDRRR); 922 - dev_err(&ndev->dev, "Receive Descriptor Empty\n"); 893 + if (netif_msg_rx_err(mdp)) 894 + dev_err(&ndev->dev, "Receive Descriptor Empty\n"); 923 895 } 896 + 924 897 if (intr_status & EESR_RFE) { 925 898 /* Receive FIFO Overflow int */ 926 899 mdp->stats.rx_fifo_errors++; 927 - dev_err(&ndev->dev, "Receive FIFO Overflow\n"); 900 + if (netif_msg_rx_err(mdp)) 901 + dev_err(&ndev->dev, "Receive FIFO Overflow\n"); 902 + } 903 + 904 + if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) { 905 + /* Address Error */ 906 + mdp->stats.tx_fifo_errors++; 907 + if (netif_msg_tx_err(mdp)) 908 + dev_err(&ndev->dev, "Address Error\n"); 928 909 } 929 910 930 911 mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE; ··· 1051 1012 mdp->duplex = -1; 1052 1013 } 1053 1014 1054 - if (new_state) 1015 + if (new_state && netif_msg_link(mdp)) 1055 1016 phy_print_status(phydev); 1056 1017 } 1057 1018 ··· 1102 1063 return 0; 1103 1064 } 1104 1065 1066 + static int sh_eth_get_settings(struct net_device *ndev, 1067 + struct ethtool_cmd *ecmd) 1068 + { 1069 + struct sh_eth_private *mdp = netdev_priv(ndev); 1070 + unsigned long flags; 1071 + int ret; 1072 + 1073 + spin_lock_irqsave(&mdp->lock, flags); 1074 + ret = phy_ethtool_gset(mdp->phydev, ecmd); 1075 + spin_unlock_irqrestore(&mdp->lock, flags); 1076 + 1077 + return ret; 1078 + } 1079 + 1080 + static int sh_eth_set_settings(struct net_device *ndev, 1081 + struct ethtool_cmd *ecmd) 1082 + { 1083 + struct sh_eth_private *mdp = netdev_priv(ndev); 1084 + unsigned long flags; 1085 + int ret; 1086 + u32 ioaddr = ndev->base_addr; 1087 + 1088 + spin_lock_irqsave(&mdp->lock, flags); 1089 + 1090 + /* disable tx and rx */ 1091 + sh_eth_rcv_snd_disable(ioaddr); 1092 + 1093 + ret = phy_ethtool_sset(mdp->phydev, ecmd); 1094 + if (ret) 1095 + goto error_exit; 1096 + 1097 + if (ecmd->duplex == DUPLEX_FULL) 1098 + mdp->duplex = 1; 1099 + else 1100 + mdp->duplex = 0; 1101 + 1102 + if (mdp->cd->set_duplex) 1103 + mdp->cd->set_duplex(ndev); 1104 + 1105 + error_exit: 1106 + mdelay(1); 1107 + 1108 + /* enable tx and rx */ 1109 + sh_eth_rcv_snd_enable(ioaddr); 1110 + 1111 + spin_unlock_irqrestore(&mdp->lock, flags); 1112 + 1113 + return ret; 1114 + } 1115 + 1116 + static int sh_eth_nway_reset(struct net_device *ndev) 1117 + { 1118 + struct sh_eth_private *mdp = netdev_priv(ndev); 1119 + unsigned long flags; 1120 + int ret; 1121 + 1122 + spin_lock_irqsave(&mdp->lock, flags); 1123 + ret = phy_start_aneg(mdp->phydev); 1124 + spin_unlock_irqrestore(&mdp->lock, flags); 1125 + 1126 + return ret; 1127 + } 1128 + 1129 + static u32 sh_eth_get_msglevel(struct net_device *ndev) 1130 + { 1131 + struct sh_eth_private *mdp = netdev_priv(ndev); 1132 + return mdp->msg_enable; 1133 + } 1134 + 1135 + static void sh_eth_set_msglevel(struct net_device *ndev, u32 value) 1136 + { 1137 + struct sh_eth_private *mdp = netdev_priv(ndev); 1138 + mdp->msg_enable = value; 1139 + } 1140 + 1141 + static const char sh_eth_gstrings_stats[][ETH_GSTRING_LEN] = { 1142 + "rx_current", "tx_current", 1143 + "rx_dirty", "tx_dirty", 1144 + }; 1145 + #define SH_ETH_STATS_LEN ARRAY_SIZE(sh_eth_gstrings_stats) 1146 + 1147 + static int sh_eth_get_sset_count(struct net_device *netdev, int sset) 1148 + { 1149 + switch (sset) { 1150 + case ETH_SS_STATS: 1151 + return SH_ETH_STATS_LEN; 1152 + default: 1153 + return -EOPNOTSUPP; 1154 + } 1155 + } 1156 + 1157 + static void sh_eth_get_ethtool_stats(struct net_device *ndev, 1158 + struct ethtool_stats *stats, u64 *data) 1159 + { 1160 + struct sh_eth_private *mdp = netdev_priv(ndev); 1161 + int i = 0; 1162 + 1163 + /* device-specific stats */ 1164 + data[i++] = mdp->cur_rx; 1165 + data[i++] = mdp->cur_tx; 1166 + data[i++] = mdp->dirty_rx; 1167 + data[i++] = mdp->dirty_tx; 1168 + } 1169 + 1170 + static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data) 1171 + { 1172 + switch (stringset) { 1173 + case ETH_SS_STATS: 1174 + memcpy(data, *sh_eth_gstrings_stats, 1175 + sizeof(sh_eth_gstrings_stats)); 1176 + break; 1177 + } 1178 + } 1179 + 1180 + static struct ethtool_ops sh_eth_ethtool_ops = { 1181 + .get_settings = sh_eth_get_settings, 1182 + .set_settings = sh_eth_set_settings, 1183 + .nway_reset = sh_eth_nway_reset, 1184 + .get_msglevel = sh_eth_get_msglevel, 1185 + .set_msglevel = sh_eth_set_msglevel, 1186 + .get_link = ethtool_op_get_link, 1187 + .get_strings = sh_eth_get_strings, 1188 + .get_ethtool_stats = sh_eth_get_ethtool_stats, 1189 + .get_sset_count = sh_eth_get_sset_count, 1190 + }; 1191 + 1105 1192 /* network device open function */ 1106 1193 static int sh_eth_open(struct net_device *ndev) 1107 1194 { ··· 1238 1073 1239 1074 ret = request_irq(ndev->irq, sh_eth_interrupt, 1240 1075 #if defined(CONFIG_CPU_SUBTYPE_SH7763) || \ 1241 - defined(CONFIG_CPU_SUBTYPE_SH7764) || \ 1242 - defined(CONFIG_CPU_SUBTYPE_SH7757) 1076 + defined(CONFIG_CPU_SUBTYPE_SH7764) || \ 1077 + defined(CONFIG_CPU_SUBTYPE_SH7757) 1243 1078 IRQF_SHARED, 1244 1079 #else 1245 1080 0, ··· 1288 1123 1289 1124 netif_stop_queue(ndev); 1290 1125 1291 - /* worning message out. */ 1292 - printk(KERN_WARNING "%s: transmit timed out, status %8.8x," 1126 + if (netif_msg_timer(mdp)) 1127 + dev_err(&ndev->dev, "%s: transmit timed out, status %8.8x," 1293 1128 " resetting...\n", ndev->name, (int)readl(ioaddr + EESR)); 1294 1129 1295 1130 /* tx_errors count up */ ··· 1332 1167 spin_lock_irqsave(&mdp->lock, flags); 1333 1168 if ((mdp->cur_tx - mdp->dirty_tx) >= (TX_RING_SIZE - 4)) { 1334 1169 if (!sh_eth_txfree(ndev)) { 1170 + if (netif_msg_tx_queued(mdp)) 1171 + dev_warn(&ndev->dev, "TxFD exhausted.\n"); 1335 1172 netif_stop_queue(ndev); 1336 1173 spin_unlock_irqrestore(&mdp->lock, flags); 1337 1174 return NETDEV_TX_BUSY; ··· 1664 1497 1665 1498 /* set function */ 1666 1499 ndev->netdev_ops = &sh_eth_netdev_ops; 1500 + SET_ETHTOOL_OPS(ndev, &sh_eth_ethtool_ops); 1667 1501 ndev->watchdog_timeo = TX_TIMEOUT; 1668 1502 1503 + /* debug message level */ 1504 + mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE; 1669 1505 mdp->post_rx = POST_RX >> (devno << 1); 1670 1506 mdp->post_fw = POST_FW >> (devno << 1); 1671 1507