Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'amd-xgbe-next'

Tom Lendacky says:

====================
amd-xgbe: AMD XGBE driver updates 2018-05-21

The following updates are included in this driver update series:

- Fix the debug output for the max channels count
- Read (once) and save the port property registers during probe
- Remove the use of the comm_owned field
- Remove unused SFP diagnostic support indicator field
- Add ethtool --module-info support
- Add ethtool --show-ring/--set-ring support
- Update the driver in preparation for ethtool --set-channels support
- Add ethtool --show-channels/--set-channels support
- Update the driver to always perform link training in KR mode
- Advertise FEC support when using a KR re-driver
- Update the BelFuse quirk to now support SGMII
- Improve 100Mbps auto-negotiation for BelFuse parts

This patch series is based on net-next.

---

Changes since v1:
- Update the --set-channels support to the use of the combined, rx and
tx options as specified in the ethtool man page (in other words, don't
create combined channels based on the min of the tx and rx channels
specified).
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+699 -258
+92 -45
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
··· 1312 1312 return 0; 1313 1313 } 1314 1314 1315 + static void xgbe_free_memory(struct xgbe_prv_data *pdata) 1316 + { 1317 + struct xgbe_desc_if *desc_if = &pdata->desc_if; 1318 + 1319 + /* Free the ring descriptors and buffers */ 1320 + desc_if->free_ring_resources(pdata); 1321 + 1322 + /* Free the channel and ring structures */ 1323 + xgbe_free_channels(pdata); 1324 + } 1325 + 1326 + static int xgbe_alloc_memory(struct xgbe_prv_data *pdata) 1327 + { 1328 + struct xgbe_desc_if *desc_if = &pdata->desc_if; 1329 + struct net_device *netdev = pdata->netdev; 1330 + int ret; 1331 + 1332 + if (pdata->new_tx_ring_count) { 1333 + pdata->tx_ring_count = pdata->new_tx_ring_count; 1334 + pdata->tx_q_count = pdata->tx_ring_count; 1335 + pdata->new_tx_ring_count = 0; 1336 + } 1337 + 1338 + if (pdata->new_rx_ring_count) { 1339 + pdata->rx_ring_count = pdata->new_rx_ring_count; 1340 + pdata->new_rx_ring_count = 0; 1341 + } 1342 + 1343 + /* Calculate the Rx buffer size before allocating rings */ 1344 + pdata->rx_buf_size = xgbe_calc_rx_buf_size(netdev, netdev->mtu); 1345 + 1346 + /* Allocate the channel and ring structures */ 1347 + ret = xgbe_alloc_channels(pdata); 1348 + if (ret) 1349 + return ret; 1350 + 1351 + /* Allocate the ring descriptors and buffers */ 1352 + ret = desc_if->alloc_ring_resources(pdata); 1353 + if (ret) 1354 + goto err_channels; 1355 + 1356 + /* Initialize the service and Tx timers */ 1357 + xgbe_init_timers(pdata); 1358 + 1359 + return 0; 1360 + 1361 + err_channels: 1362 + xgbe_free_memory(pdata); 1363 + 1364 + return ret; 1365 + } 1366 + 1315 1367 static int xgbe_start(struct xgbe_prv_data *pdata) 1316 1368 { 1317 1369 struct xgbe_hw_if *hw_if = &pdata->hw_if; 1318 1370 struct xgbe_phy_if *phy_if = &pdata->phy_if; 1319 1371 struct net_device *netdev = pdata->netdev; 1372 + unsigned int i; 1320 1373 int ret; 1321 1374 1322 - DBGPR("-->xgbe_start\n"); 1375 + /* Set the number of queues */ 1376 + ret = netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count); 1377 + if (ret) { 1378 + netdev_err(netdev, "error setting real tx queue count\n"); 1379 + return ret; 1380 + } 1381 + 1382 + ret = netif_set_real_num_rx_queues(netdev, pdata->rx_ring_count); 1383 + if (ret) { 1384 + netdev_err(netdev, "error setting real rx queue count\n"); 1385 + return ret; 1386 + } 1387 + 1388 + /* Set RSS lookup table data for programming */ 1389 + for (i = 0; i < XGBE_RSS_MAX_TABLE_SIZE; i++) 1390 + XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH, 1391 + i % pdata->rx_ring_count); 1323 1392 1324 1393 ret = hw_if->init(pdata); 1325 1394 if (ret) ··· 1415 1346 queue_work(pdata->dev_workqueue, &pdata->service_work); 1416 1347 1417 1348 clear_bit(XGBE_STOPPED, &pdata->dev_state); 1418 - 1419 - DBGPR("<--xgbe_start\n"); 1420 1349 1421 1350 return 0; 1422 1351 ··· 1493 1426 netdev_alert(pdata->netdev, "device stopped\n"); 1494 1427 } 1495 1428 1496 - static void xgbe_restart_dev(struct xgbe_prv_data *pdata) 1429 + void xgbe_full_restart_dev(struct xgbe_prv_data *pdata) 1497 1430 { 1498 - DBGPR("-->xgbe_restart_dev\n"); 1431 + /* If not running, "restart" will happen on open */ 1432 + if (!netif_running(pdata->netdev)) 1433 + return; 1499 1434 1435 + xgbe_stop(pdata); 1436 + 1437 + xgbe_free_memory(pdata); 1438 + xgbe_alloc_memory(pdata); 1439 + 1440 + xgbe_start(pdata); 1441 + } 1442 + 1443 + void xgbe_restart_dev(struct xgbe_prv_data *pdata) 1444 + { 1500 1445 /* If not running, "restart" will happen on open */ 1501 1446 if (!netif_running(pdata->netdev)) 1502 1447 return; ··· 1519 1440 xgbe_free_rx_data(pdata); 1520 1441 1521 1442 xgbe_start(pdata); 1522 - 1523 - DBGPR("<--xgbe_restart_dev\n"); 1524 1443 } 1525 1444 1526 1445 static void xgbe_restart(struct work_struct *work) ··· 1904 1827 static int xgbe_open(struct net_device *netdev) 1905 1828 { 1906 1829 struct xgbe_prv_data *pdata = netdev_priv(netdev); 1907 - struct xgbe_desc_if *desc_if = &pdata->desc_if; 1908 1830 int ret; 1909 - 1910 - DBGPR("-->xgbe_open\n"); 1911 1831 1912 1832 /* Create the various names based on netdev name */ 1913 1833 snprintf(pdata->an_name, sizeof(pdata->an_name) - 1, "%s-pcs", ··· 1950 1876 goto err_sysclk; 1951 1877 } 1952 1878 1953 - /* Calculate the Rx buffer size before allocating rings */ 1954 - ret = xgbe_calc_rx_buf_size(netdev, netdev->mtu); 1955 - if (ret < 0) 1956 - goto err_ptpclk; 1957 - pdata->rx_buf_size = ret; 1958 - 1959 - /* Allocate the channel and ring structures */ 1960 - ret = xgbe_alloc_channels(pdata); 1961 - if (ret) 1962 - goto err_ptpclk; 1963 - 1964 - /* Allocate the ring descriptors and buffers */ 1965 - ret = desc_if->alloc_ring_resources(pdata); 1966 - if (ret) 1967 - goto err_channels; 1968 - 1969 1879 INIT_WORK(&pdata->service_work, xgbe_service); 1970 1880 INIT_WORK(&pdata->restart_work, xgbe_restart); 1971 1881 INIT_WORK(&pdata->stopdev_work, xgbe_stopdev); 1972 1882 INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp); 1973 - xgbe_init_timers(pdata); 1883 + 1884 + ret = xgbe_alloc_memory(pdata); 1885 + if (ret) 1886 + goto err_ptpclk; 1974 1887 1975 1888 ret = xgbe_start(pdata); 1976 1889 if (ret) 1977 - goto err_rings; 1890 + goto err_mem; 1978 1891 1979 1892 clear_bit(XGBE_DOWN, &pdata->dev_state); 1980 1893 1981 - DBGPR("<--xgbe_open\n"); 1982 - 1983 1894 return 0; 1984 1895 1985 - err_rings: 1986 - desc_if->free_ring_resources(pdata); 1987 - 1988 - err_channels: 1989 - xgbe_free_channels(pdata); 1896 + err_mem: 1897 + xgbe_free_memory(pdata); 1990 1898 1991 1899 err_ptpclk: 1992 1900 clk_disable_unprepare(pdata->ptpclk); ··· 1988 1932 static int xgbe_close(struct net_device *netdev) 1989 1933 { 1990 1934 struct xgbe_prv_data *pdata = netdev_priv(netdev); 1991 - struct xgbe_desc_if *desc_if = &pdata->desc_if; 1992 - 1993 - DBGPR("-->xgbe_close\n"); 1994 1935 1995 1936 /* Stop the device */ 1996 1937 xgbe_stop(pdata); 1997 1938 1998 - /* Free the ring descriptors and buffers */ 1999 - desc_if->free_ring_resources(pdata); 2000 - 2001 - /* Free the channel and ring structures */ 2002 - xgbe_free_channels(pdata); 1939 + xgbe_free_memory(pdata); 2003 1940 2004 1941 /* Disable the clocks */ 2005 1942 clk_disable_unprepare(pdata->ptpclk); ··· 2005 1956 destroy_workqueue(pdata->dev_workqueue); 2006 1957 2007 1958 set_bit(XGBE_DOWN, &pdata->dev_state); 2008 - 2009 - DBGPR("<--xgbe_close\n"); 2010 1959 2011 1960 return 0; 2012 1961 }
+217
drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
··· 626 626 return 0; 627 627 } 628 628 629 + static int xgbe_get_module_info(struct net_device *netdev, 630 + struct ethtool_modinfo *modinfo) 631 + { 632 + struct xgbe_prv_data *pdata = netdev_priv(netdev); 633 + 634 + return pdata->phy_if.module_info(pdata, modinfo); 635 + } 636 + 637 + static int xgbe_get_module_eeprom(struct net_device *netdev, 638 + struct ethtool_eeprom *eeprom, u8 *data) 639 + { 640 + struct xgbe_prv_data *pdata = netdev_priv(netdev); 641 + 642 + return pdata->phy_if.module_eeprom(pdata, eeprom, data); 643 + } 644 + 645 + static void xgbe_get_ringparam(struct net_device *netdev, 646 + struct ethtool_ringparam *ringparam) 647 + { 648 + struct xgbe_prv_data *pdata = netdev_priv(netdev); 649 + 650 + ringparam->rx_max_pending = XGBE_RX_DESC_CNT_MAX; 651 + ringparam->tx_max_pending = XGBE_TX_DESC_CNT_MAX; 652 + ringparam->rx_pending = pdata->rx_desc_count; 653 + ringparam->tx_pending = pdata->tx_desc_count; 654 + } 655 + 656 + static int xgbe_set_ringparam(struct net_device *netdev, 657 + struct ethtool_ringparam *ringparam) 658 + { 659 + struct xgbe_prv_data *pdata = netdev_priv(netdev); 660 + unsigned int rx, tx; 661 + 662 + if (ringparam->rx_mini_pending || ringparam->rx_jumbo_pending) { 663 + netdev_err(netdev, "unsupported ring parameter\n"); 664 + return -EINVAL; 665 + } 666 + 667 + if ((ringparam->rx_pending < XGBE_RX_DESC_CNT_MIN) || 668 + (ringparam->rx_pending > XGBE_RX_DESC_CNT_MAX)) { 669 + netdev_err(netdev, 670 + "rx ring parameter must be between %u and %u\n", 671 + XGBE_RX_DESC_CNT_MIN, XGBE_RX_DESC_CNT_MAX); 672 + return -EINVAL; 673 + } 674 + 675 + if ((ringparam->tx_pending < XGBE_TX_DESC_CNT_MIN) || 676 + (ringparam->tx_pending > XGBE_TX_DESC_CNT_MAX)) { 677 + netdev_err(netdev, 678 + "tx ring parameter must be between %u and %u\n", 679 + XGBE_TX_DESC_CNT_MIN, XGBE_TX_DESC_CNT_MAX); 680 + return -EINVAL; 681 + } 682 + 683 + rx = __rounddown_pow_of_two(ringparam->rx_pending); 684 + if (rx != ringparam->rx_pending) 685 + netdev_notice(netdev, 686 + "rx ring parameter rounded to power of two: %u\n", 687 + rx); 688 + 689 + tx = __rounddown_pow_of_two(ringparam->tx_pending); 690 + if (tx != ringparam->tx_pending) 691 + netdev_notice(netdev, 692 + "tx ring parameter rounded to power of two: %u\n", 693 + tx); 694 + 695 + if ((rx == pdata->rx_desc_count) && 696 + (tx == pdata->tx_desc_count)) 697 + goto out; 698 + 699 + pdata->rx_desc_count = rx; 700 + pdata->tx_desc_count = tx; 701 + 702 + xgbe_restart_dev(pdata); 703 + 704 + out: 705 + return 0; 706 + } 707 + 708 + static void xgbe_get_channels(struct net_device *netdev, 709 + struct ethtool_channels *channels) 710 + { 711 + struct xgbe_prv_data *pdata = netdev_priv(netdev); 712 + unsigned int rx, tx, combined; 713 + 714 + /* Calculate maximums allowed: 715 + * - Take into account the number of available IRQs 716 + * - Do not take into account the number of online CPUs so that 717 + * the user can over-subscribe if desired 718 + * - Tx is additionally limited by the number of hardware queues 719 + */ 720 + rx = min(pdata->hw_feat.rx_ch_cnt, pdata->rx_max_channel_count); 721 + rx = min(rx, pdata->channel_irq_count); 722 + tx = min(pdata->hw_feat.tx_ch_cnt, pdata->tx_max_channel_count); 723 + tx = min(tx, pdata->channel_irq_count); 724 + tx = min(tx, pdata->tx_max_q_count); 725 + 726 + combined = min(rx, tx); 727 + 728 + channels->max_combined = combined; 729 + channels->max_rx = rx ? rx - 1 : 0; 730 + channels->max_tx = tx ? tx - 1 : 0; 731 + 732 + /* Get current settings based on device state */ 733 + rx = pdata->new_rx_ring_count ? : pdata->rx_ring_count; 734 + tx = pdata->new_tx_ring_count ? : pdata->tx_ring_count; 735 + 736 + combined = min(rx, tx); 737 + rx -= combined; 738 + tx -= combined; 739 + 740 + channels->combined_count = combined; 741 + channels->rx_count = rx; 742 + channels->tx_count = tx; 743 + } 744 + 745 + static void xgbe_print_set_channels_input(struct net_device *netdev, 746 + struct ethtool_channels *channels) 747 + { 748 + netdev_err(netdev, "channel inputs: combined=%u, rx-only=%u, tx-only=%u\n", 749 + channels->combined_count, channels->rx_count, 750 + channels->tx_count); 751 + } 752 + 753 + static int xgbe_set_channels(struct net_device *netdev, 754 + struct ethtool_channels *channels) 755 + { 756 + struct xgbe_prv_data *pdata = netdev_priv(netdev); 757 + unsigned int rx, rx_curr, tx, tx_curr, combined; 758 + 759 + /* Calculate maximums allowed: 760 + * - Take into account the number of available IRQs 761 + * - Do not take into account the number of online CPUs so that 762 + * the user can over-subscribe if desired 763 + * - Tx is additionally limited by the number of hardware queues 764 + */ 765 + rx = min(pdata->hw_feat.rx_ch_cnt, pdata->rx_max_channel_count); 766 + rx = min(rx, pdata->channel_irq_count); 767 + tx = min(pdata->hw_feat.tx_ch_cnt, pdata->tx_max_channel_count); 768 + tx = min(tx, pdata->tx_max_q_count); 769 + tx = min(tx, pdata->channel_irq_count); 770 + 771 + combined = min(rx, tx); 772 + 773 + /* Should not be setting other count */ 774 + if (channels->other_count) { 775 + netdev_err(netdev, 776 + "other channel count must be zero\n"); 777 + return -EINVAL; 778 + } 779 + 780 + /* Require at least one Combined (Rx and Tx) channel */ 781 + if (!channels->combined_count) { 782 + netdev_err(netdev, 783 + "at least one combined Rx/Tx channel is required\n"); 784 + xgbe_print_set_channels_input(netdev, channels); 785 + return -EINVAL; 786 + } 787 + 788 + /* Check combined channels */ 789 + if (channels->combined_count > combined) { 790 + netdev_err(netdev, 791 + "combined channel count cannot exceed %u\n", 792 + combined); 793 + xgbe_print_set_channels_input(netdev, channels); 794 + return -EINVAL; 795 + } 796 + 797 + /* Can have some Rx-only or Tx-only channels, but not both */ 798 + if (channels->rx_count && channels->tx_count) { 799 + netdev_err(netdev, 800 + "cannot specify both Rx-only and Tx-only channels\n"); 801 + xgbe_print_set_channels_input(netdev, channels); 802 + return -EINVAL; 803 + } 804 + 805 + /* Check that we don't exceed the maximum number of channels */ 806 + if ((channels->combined_count + channels->rx_count) > rx) { 807 + netdev_err(netdev, 808 + "total Rx channels (%u) requested exceeds maximum available (%u)\n", 809 + channels->combined_count + channels->rx_count, rx); 810 + xgbe_print_set_channels_input(netdev, channels); 811 + return -EINVAL; 812 + } 813 + 814 + if ((channels->combined_count + channels->tx_count) > tx) { 815 + netdev_err(netdev, 816 + "total Tx channels (%u) requested exceeds maximum available (%u)\n", 817 + channels->combined_count + channels->tx_count, tx); 818 + xgbe_print_set_channels_input(netdev, channels); 819 + return -EINVAL; 820 + } 821 + 822 + rx = channels->combined_count + channels->rx_count; 823 + tx = channels->combined_count + channels->tx_count; 824 + 825 + rx_curr = pdata->new_rx_ring_count ? : pdata->rx_ring_count; 826 + tx_curr = pdata->new_tx_ring_count ? : pdata->tx_ring_count; 827 + 828 + if ((rx == rx_curr) && (tx == tx_curr)) 829 + goto out; 830 + 831 + pdata->new_rx_ring_count = rx; 832 + pdata->new_tx_ring_count = tx; 833 + 834 + xgbe_full_restart_dev(pdata); 835 + 836 + out: 837 + return 0; 838 + } 839 + 629 840 static const struct ethtool_ops xgbe_ethtool_ops = { 630 841 .get_drvinfo = xgbe_get_drvinfo, 631 842 .get_msglevel = xgbe_get_msglevel, ··· 857 646 .get_ts_info = xgbe_get_ts_info, 858 647 .get_link_ksettings = xgbe_get_link_ksettings, 859 648 .set_link_ksettings = xgbe_set_link_ksettings, 649 + .get_module_info = xgbe_get_module_info, 650 + .get_module_eeprom = xgbe_get_module_eeprom, 651 + .get_ringparam = xgbe_get_ringparam, 652 + .set_ringparam = xgbe_set_ringparam, 653 + .get_channels = xgbe_get_channels, 654 + .set_channels = xgbe_set_channels, 860 655 }; 861 656 862 657 const struct ethtool_ops *xgbe_get_ethtool_ops(void)
+1 -19
drivers/net/ethernet/amd/xgbe/xgbe-main.c
··· 265 265 { 266 266 struct net_device *netdev = pdata->netdev; 267 267 struct device *dev = pdata->dev; 268 - unsigned int i; 269 268 int ret; 270 269 271 270 netdev->irq = pdata->dev_irq; ··· 323 324 pdata->tx_ring_count, pdata->rx_ring_count); 324 325 } 325 326 326 - /* Set the number of queues */ 327 - ret = netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count); 328 - if (ret) { 329 - dev_err(dev, "error setting real tx queue count\n"); 330 - return ret; 331 - } 332 - 333 - ret = netif_set_real_num_rx_queues(netdev, pdata->rx_ring_count); 334 - if (ret) { 335 - dev_err(dev, "error setting real rx queue count\n"); 336 - return ret; 337 - } 338 - 339 - /* Initialize RSS hash key and lookup table */ 327 + /* Initialize RSS hash key */ 340 328 netdev_rss_key_fill(pdata->rss_key, sizeof(pdata->rss_key)); 341 - 342 - for (i = 0; i < XGBE_RSS_MAX_TABLE_SIZE; i++) 343 - XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH, 344 - i % pdata->rx_ring_count); 345 329 346 330 XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1); 347 331 XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
+80 -87
drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
··· 126 126 #include "xgbe.h" 127 127 #include "xgbe-common.h" 128 128 129 + static int xgbe_phy_module_eeprom(struct xgbe_prv_data *pdata, 130 + struct ethtool_eeprom *eeprom, u8 *data) 131 + { 132 + if (!pdata->phy_if.phy_impl.module_eeprom) 133 + return -ENXIO; 134 + 135 + return pdata->phy_if.phy_impl.module_eeprom(pdata, eeprom, data); 136 + } 137 + 138 + static int xgbe_phy_module_info(struct xgbe_prv_data *pdata, 139 + struct ethtool_modinfo *modinfo) 140 + { 141 + if (!pdata->phy_if.phy_impl.module_info) 142 + return -ENXIO; 143 + 144 + return pdata->phy_if.phy_impl.module_info(pdata, modinfo); 145 + } 146 + 129 147 static void xgbe_an37_clear_interrupts(struct xgbe_prv_data *pdata) 130 148 { 131 149 int reg; ··· 216 198 xgbe_an37_clear_interrupts(pdata); 217 199 } 218 200 219 - static void xgbe_an73_enable_kr_training(struct xgbe_prv_data *pdata) 220 - { 221 - unsigned int reg; 222 - 223 - reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL); 224 - 225 - reg |= XGBE_KR_TRAINING_ENABLE; 226 - XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg); 227 - } 228 - 229 - static void xgbe_an73_disable_kr_training(struct xgbe_prv_data *pdata) 230 - { 231 - unsigned int reg; 232 - 233 - reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL); 234 - 235 - reg &= ~XGBE_KR_TRAINING_ENABLE; 236 - XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg); 237 - } 238 - 239 201 static void xgbe_kr_mode(struct xgbe_prv_data *pdata) 240 202 { 241 - /* Enable KR training */ 242 - xgbe_an73_enable_kr_training(pdata); 243 - 244 203 /* Set MAC to 10G speed */ 245 204 pdata->hw_if.set_speed(pdata, SPEED_10000); 246 205 ··· 227 232 228 233 static void xgbe_kx_2500_mode(struct xgbe_prv_data *pdata) 229 234 { 230 - /* Disable KR training */ 231 - xgbe_an73_disable_kr_training(pdata); 232 - 233 235 /* Set MAC to 2.5G speed */ 234 236 pdata->hw_if.set_speed(pdata, SPEED_2500); 235 237 ··· 236 244 237 245 static void xgbe_kx_1000_mode(struct xgbe_prv_data *pdata) 238 246 { 239 - /* Disable KR training */ 240 - xgbe_an73_disable_kr_training(pdata); 241 - 242 247 /* Set MAC to 1G speed */ 243 248 pdata->hw_if.set_speed(pdata, SPEED_1000); 244 249 ··· 249 260 if (pdata->kr_redrv) 250 261 return xgbe_kr_mode(pdata); 251 262 252 - /* Disable KR training */ 253 - xgbe_an73_disable_kr_training(pdata); 254 - 255 263 /* Set MAC to 10G speed */ 256 264 pdata->hw_if.set_speed(pdata, SPEED_10000); 257 265 ··· 258 272 259 273 static void xgbe_x_mode(struct xgbe_prv_data *pdata) 260 274 { 261 - /* Disable KR training */ 262 - xgbe_an73_disable_kr_training(pdata); 263 - 264 275 /* Set MAC to 1G speed */ 265 276 pdata->hw_if.set_speed(pdata, SPEED_1000); 266 277 ··· 267 284 268 285 static void xgbe_sgmii_1000_mode(struct xgbe_prv_data *pdata) 269 286 { 270 - /* Disable KR training */ 271 - xgbe_an73_disable_kr_training(pdata); 272 - 273 287 /* Set MAC to 1G speed */ 274 288 pdata->hw_if.set_speed(pdata, SPEED_1000); 275 289 ··· 276 296 277 297 static void xgbe_sgmii_100_mode(struct xgbe_prv_data *pdata) 278 298 { 279 - /* Disable KR training */ 280 - xgbe_an73_disable_kr_training(pdata); 281 - 282 299 /* Set MAC to 1G speed */ 283 300 pdata->hw_if.set_speed(pdata, SPEED_1000); 284 301 ··· 331 354 xgbe_change_mode(pdata, pdata->phy_if.phy_impl.switch_mode(pdata)); 332 355 } 333 356 334 - static void xgbe_set_mode(struct xgbe_prv_data *pdata, 357 + static bool xgbe_set_mode(struct xgbe_prv_data *pdata, 335 358 enum xgbe_mode mode) 336 359 { 337 360 if (mode == xgbe_cur_mode(pdata)) 338 - return; 361 + return false; 339 362 340 363 xgbe_change_mode(pdata, mode); 364 + 365 + return true; 341 366 } 342 367 343 368 static bool xgbe_use_mode(struct xgbe_prv_data *pdata, ··· 386 407 { 387 408 unsigned int reg; 388 409 410 + /* Disable KR training for now */ 411 + reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL); 412 + reg &= ~XGBE_KR_TRAINING_ENABLE; 413 + XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg); 414 + 415 + /* Update AN settings */ 389 416 reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1); 390 417 reg &= ~MDIO_AN_CTRL1_ENABLE; 391 418 ··· 489 504 XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FECCTRL, reg); 490 505 491 506 /* Start KR training */ 507 + if (pdata->phy_if.phy_impl.kr_training_pre) 508 + pdata->phy_if.phy_impl.kr_training_pre(pdata); 509 + 492 510 reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL); 493 - if (reg & XGBE_KR_TRAINING_ENABLE) { 494 - if (pdata->phy_if.phy_impl.kr_training_pre) 495 - pdata->phy_if.phy_impl.kr_training_pre(pdata); 511 + reg |= XGBE_KR_TRAINING_ENABLE; 512 + reg |= XGBE_KR_TRAINING_START; 513 + XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg); 496 514 497 - reg |= XGBE_KR_TRAINING_START; 498 - XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, 499 - reg); 515 + netif_dbg(pdata, link, pdata->netdev, 516 + "KR training initiated\n"); 500 517 501 - netif_dbg(pdata, link, pdata->netdev, 502 - "KR training initiated\n"); 503 - 504 - if (pdata->phy_if.phy_impl.kr_training_post) 505 - pdata->phy_if.phy_impl.kr_training_post(pdata); 506 - } 518 + if (pdata->phy_if.phy_impl.kr_training_post) 519 + pdata->phy_if.phy_impl.kr_training_post(pdata); 507 520 508 521 return XGBE_AN_PAGE_RECEIVED; 509 522 } ··· 1180 1197 return 0; 1181 1198 } 1182 1199 1183 - static int __xgbe_phy_config_aneg(struct xgbe_prv_data *pdata) 1200 + static int __xgbe_phy_config_aneg(struct xgbe_prv_data *pdata, bool set_mode) 1184 1201 { 1185 1202 int ret; 1203 + 1204 + mutex_lock(&pdata->an_mutex); 1186 1205 1187 1206 set_bit(XGBE_LINK_INIT, &pdata->dev_state); 1188 1207 pdata->link_check = jiffies; 1189 1208 1190 1209 ret = pdata->phy_if.phy_impl.an_config(pdata); 1191 1210 if (ret) 1192 - return ret; 1211 + goto out; 1193 1212 1194 1213 if (pdata->phy.autoneg != AUTONEG_ENABLE) { 1195 1214 ret = xgbe_phy_config_fixed(pdata); 1196 1215 if (ret || !pdata->kr_redrv) 1197 - return ret; 1216 + goto out; 1198 1217 1199 1218 netif_dbg(pdata, link, pdata->netdev, "AN redriver support\n"); 1200 1219 } else { ··· 1206 1221 /* Disable auto-negotiation interrupt */ 1207 1222 disable_irq(pdata->an_irq); 1208 1223 1209 - /* Start auto-negotiation in a supported mode */ 1210 - if (xgbe_use_mode(pdata, XGBE_MODE_KR)) { 1211 - xgbe_set_mode(pdata, XGBE_MODE_KR); 1212 - } else if (xgbe_use_mode(pdata, XGBE_MODE_KX_2500)) { 1213 - xgbe_set_mode(pdata, XGBE_MODE_KX_2500); 1214 - } else if (xgbe_use_mode(pdata, XGBE_MODE_KX_1000)) { 1215 - xgbe_set_mode(pdata, XGBE_MODE_KX_1000); 1216 - } else if (xgbe_use_mode(pdata, XGBE_MODE_SFI)) { 1217 - xgbe_set_mode(pdata, XGBE_MODE_SFI); 1218 - } else if (xgbe_use_mode(pdata, XGBE_MODE_X)) { 1219 - xgbe_set_mode(pdata, XGBE_MODE_X); 1220 - } else if (xgbe_use_mode(pdata, XGBE_MODE_SGMII_1000)) { 1221 - xgbe_set_mode(pdata, XGBE_MODE_SGMII_1000); 1222 - } else if (xgbe_use_mode(pdata, XGBE_MODE_SGMII_100)) { 1223 - xgbe_set_mode(pdata, XGBE_MODE_SGMII_100); 1224 - } else { 1225 - enable_irq(pdata->an_irq); 1226 - return -EINVAL; 1224 + if (set_mode) { 1225 + /* Start auto-negotiation in a supported mode */ 1226 + if (xgbe_use_mode(pdata, XGBE_MODE_KR)) { 1227 + xgbe_set_mode(pdata, XGBE_MODE_KR); 1228 + } else if (xgbe_use_mode(pdata, XGBE_MODE_KX_2500)) { 1229 + xgbe_set_mode(pdata, XGBE_MODE_KX_2500); 1230 + } else if (xgbe_use_mode(pdata, XGBE_MODE_KX_1000)) { 1231 + xgbe_set_mode(pdata, XGBE_MODE_KX_1000); 1232 + } else if (xgbe_use_mode(pdata, XGBE_MODE_SFI)) { 1233 + xgbe_set_mode(pdata, XGBE_MODE_SFI); 1234 + } else if (xgbe_use_mode(pdata, XGBE_MODE_X)) { 1235 + xgbe_set_mode(pdata, XGBE_MODE_X); 1236 + } else if (xgbe_use_mode(pdata, XGBE_MODE_SGMII_1000)) { 1237 + xgbe_set_mode(pdata, XGBE_MODE_SGMII_1000); 1238 + } else if (xgbe_use_mode(pdata, XGBE_MODE_SGMII_100)) { 1239 + xgbe_set_mode(pdata, XGBE_MODE_SGMII_100); 1240 + } else { 1241 + enable_irq(pdata->an_irq); 1242 + ret = -EINVAL; 1243 + goto out; 1244 + } 1227 1245 } 1228 1246 1229 1247 /* Disable and stop any in progress auto-negotiation */ ··· 1246 1258 xgbe_an_init(pdata); 1247 1259 xgbe_an_restart(pdata); 1248 1260 1249 - return 0; 1250 - } 1251 - 1252 - static int xgbe_phy_config_aneg(struct xgbe_prv_data *pdata) 1253 - { 1254 - int ret; 1255 - 1256 - mutex_lock(&pdata->an_mutex); 1257 - 1258 - ret = __xgbe_phy_config_aneg(pdata); 1261 + out: 1259 1262 if (ret) 1260 1263 set_bit(XGBE_LINK_ERR, &pdata->dev_state); 1261 1264 else ··· 1255 1276 mutex_unlock(&pdata->an_mutex); 1256 1277 1257 1278 return ret; 1279 + } 1280 + 1281 + static int xgbe_phy_config_aneg(struct xgbe_prv_data *pdata) 1282 + { 1283 + return __xgbe_phy_config_aneg(pdata, true); 1284 + } 1285 + 1286 + static int xgbe_phy_reconfig_aneg(struct xgbe_prv_data *pdata) 1287 + { 1288 + return __xgbe_phy_config_aneg(pdata, false); 1258 1289 } 1259 1290 1260 1291 static bool xgbe_phy_aneg_done(struct xgbe_prv_data *pdata) ··· 1323 1334 1324 1335 pdata->phy.duplex = DUPLEX_FULL; 1325 1336 1326 - xgbe_set_mode(pdata, mode); 1337 + if (xgbe_set_mode(pdata, mode) && pdata->an_again) 1338 + xgbe_phy_reconfig_aneg(pdata); 1327 1339 } 1328 1340 1329 1341 static void xgbe_phy_status(struct xgbe_prv_data *pdata) ··· 1629 1639 phy_if->phy_valid_speed = xgbe_phy_valid_speed; 1630 1640 1631 1641 phy_if->an_isr = xgbe_an_combined_isr; 1642 + 1643 + phy_if->module_info = xgbe_phy_module_info; 1644 + phy_if->module_eeprom = xgbe_phy_module_eeprom; 1632 1645 }
+27 -9
drivers/net/ethernet/amd/xgbe/xgbe-pci.c
··· 335 335 pdata->awcr = XGBE_DMA_PCI_AWCR; 336 336 pdata->awarcr = XGBE_DMA_PCI_AWARCR; 337 337 338 + /* Read the port property registers */ 339 + pdata->pp0 = XP_IOREAD(pdata, XP_PROP_0); 340 + pdata->pp1 = XP_IOREAD(pdata, XP_PROP_1); 341 + pdata->pp2 = XP_IOREAD(pdata, XP_PROP_2); 342 + pdata->pp3 = XP_IOREAD(pdata, XP_PROP_3); 343 + pdata->pp4 = XP_IOREAD(pdata, XP_PROP_4); 344 + if (netif_msg_probe(pdata)) { 345 + dev_dbg(dev, "port property 0 = %#010x\n", pdata->pp0); 346 + dev_dbg(dev, "port property 1 = %#010x\n", pdata->pp1); 347 + dev_dbg(dev, "port property 2 = %#010x\n", pdata->pp2); 348 + dev_dbg(dev, "port property 3 = %#010x\n", pdata->pp3); 349 + dev_dbg(dev, "port property 4 = %#010x\n", pdata->pp4); 350 + } 351 + 338 352 /* Set the maximum channels and queues */ 339 - reg = XP_IOREAD(pdata, XP_PROP_1); 340 - pdata->tx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_DMA); 341 - pdata->rx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_DMA); 342 - pdata->tx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_QUEUES); 343 - pdata->rx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_QUEUES); 353 + pdata->tx_max_channel_count = XP_GET_BITS(pdata->pp1, XP_PROP_1, 354 + MAX_TX_DMA); 355 + pdata->rx_max_channel_count = XP_GET_BITS(pdata->pp1, XP_PROP_1, 356 + MAX_RX_DMA); 357 + pdata->tx_max_q_count = XP_GET_BITS(pdata->pp1, XP_PROP_1, 358 + MAX_TX_QUEUES); 359 + pdata->rx_max_q_count = XP_GET_BITS(pdata->pp1, XP_PROP_1, 360 + MAX_RX_QUEUES); 344 361 if (netif_msg_probe(pdata)) { 345 362 dev_dbg(dev, "max tx/rx channel count = %u/%u\n", 346 363 pdata->tx_max_channel_count, 347 - pdata->tx_max_channel_count); 364 + pdata->rx_max_channel_count); 348 365 dev_dbg(dev, "max tx/rx hw queue count = %u/%u\n", 349 366 pdata->tx_max_q_count, pdata->rx_max_q_count); 350 367 } ··· 370 353 xgbe_set_counts(pdata); 371 354 372 355 /* Set the maximum fifo amounts */ 373 - reg = XP_IOREAD(pdata, XP_PROP_2); 374 - pdata->tx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, TX_FIFO_SIZE); 356 + pdata->tx_max_fifo_size = XP_GET_BITS(pdata->pp2, XP_PROP_2, 357 + TX_FIFO_SIZE); 375 358 pdata->tx_max_fifo_size *= 16384; 376 359 pdata->tx_max_fifo_size = min(pdata->tx_max_fifo_size, 377 360 pdata->vdata->tx_max_fifo_size); 378 - pdata->rx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, RX_FIFO_SIZE); 361 + pdata->rx_max_fifo_size = XP_GET_BITS(pdata->pp2, XP_PROP_2, 362 + RX_FIFO_SIZE); 379 363 pdata->rx_max_fifo_size *= 16384; 380 364 pdata->rx_max_fifo_size = min(pdata->rx_max_fifo_size, 381 365 pdata->vdata->rx_max_fifo_size);
+251 -98
drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c
··· 119 119 #include <linux/kmod.h> 120 120 #include <linux/mdio.h> 121 121 #include <linux/phy.h> 122 + #include <linux/ethtool.h> 122 123 123 124 #include "xgbe.h" 124 125 #include "xgbe-common.h" ··· 271 270 u8 vendor[32]; 272 271 }; 273 272 273 + #define XGBE_SFP_DIAGS_SUPPORTED(_x) \ 274 + ((_x)->extd[XGBE_SFP_EXTD_SFF_8472] && \ 275 + !((_x)->extd[XGBE_SFP_EXTD_DIAG] & XGBE_SFP_EXTD_DIAG_ADDR_CHANGE)) 276 + 277 + #define XGBE_SFP_EEPROM_BASE_LEN 256 278 + #define XGBE_SFP_EEPROM_DIAG_LEN 256 279 + #define XGBE_SFP_EEPROM_MAX (XGBE_SFP_EEPROM_BASE_LEN + \ 280 + XGBE_SFP_EEPROM_DIAG_LEN) 281 + 274 282 #define XGBE_BEL_FUSE_VENDOR "BEL-FUSE " 275 283 #define XGBE_BEL_FUSE_PARTNO "1GBT-SFP06 " 276 284 ··· 337 327 338 328 unsigned int mdio_addr; 339 329 340 - unsigned int comm_owned; 341 - 342 330 /* SFP Support */ 343 331 enum xgbe_sfp_comm sfp_comm; 344 332 unsigned int sfp_mux_address; ··· 353 345 unsigned int sfp_rx_los; 354 346 unsigned int sfp_tx_fault; 355 347 unsigned int sfp_mod_absent; 356 - unsigned int sfp_diags; 357 348 unsigned int sfp_changed; 358 349 unsigned int sfp_phy_avail; 359 350 unsigned int sfp_cable_len; ··· 389 382 static int xgbe_phy_i2c_xfer(struct xgbe_prv_data *pdata, 390 383 struct xgbe_i2c_op *i2c_op) 391 384 { 392 - struct xgbe_phy_data *phy_data = pdata->phy_data; 393 - 394 - /* Be sure we own the bus */ 395 - if (WARN_ON(!phy_data->comm_owned)) 396 - return -EIO; 397 - 398 385 return pdata->i2c_if.i2c_xfer(pdata, i2c_op); 399 386 } 400 387 ··· 550 549 551 550 static void xgbe_phy_put_comm_ownership(struct xgbe_prv_data *pdata) 552 551 { 553 - struct xgbe_phy_data *phy_data = pdata->phy_data; 554 - 555 - phy_data->comm_owned = 0; 556 - 557 552 mutex_unlock(&xgbe_phy_comm_lock); 558 553 } 559 554 ··· 558 561 struct xgbe_phy_data *phy_data = pdata->phy_data; 559 562 unsigned long timeout; 560 563 unsigned int mutex_id; 561 - 562 - if (phy_data->comm_owned) 563 - return 0; 564 564 565 565 /* The I2C and MDIO/GPIO bus is multiplexed between multiple devices, 566 566 * the driver needs to take the software mutex and then the hardware ··· 587 593 XP_IOWRITE(pdata, XP_I2C_MUTEX, mutex_id); 588 594 XP_IOWRITE(pdata, XP_MDIO_MUTEX, mutex_id); 589 595 590 - phy_data->comm_owned = 1; 591 596 return 0; 592 597 } 593 598 ··· 860 867 struct xgbe_phy_data *phy_data = pdata->phy_data; 861 868 unsigned int phy_id = phy_data->phydev->phy_id; 862 869 870 + if (phy_data->port_mode != XGBE_PORT_MODE_SFP) 871 + return false; 872 + 863 873 if ((phy_id & 0xfffffff0) != 0x01ff0cc0) 864 874 return false; 865 875 ··· 888 892 return true; 889 893 } 890 894 895 + static bool xgbe_phy_belfuse_phy_quirks(struct xgbe_prv_data *pdata) 896 + { 897 + struct xgbe_phy_data *phy_data = pdata->phy_data; 898 + struct xgbe_sfp_eeprom *sfp_eeprom = &phy_data->sfp_eeprom; 899 + unsigned int phy_id = phy_data->phydev->phy_id; 900 + int reg; 901 + 902 + if (phy_data->port_mode != XGBE_PORT_MODE_SFP) 903 + return false; 904 + 905 + if (memcmp(&sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_NAME], 906 + XGBE_BEL_FUSE_VENDOR, XGBE_SFP_BASE_VENDOR_NAME_LEN)) 907 + return false; 908 + 909 + /* For Bel-Fuse, use the extra AN flag */ 910 + pdata->an_again = 1; 911 + 912 + if (memcmp(&sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_PN], 913 + XGBE_BEL_FUSE_PARTNO, XGBE_SFP_BASE_VENDOR_PN_LEN)) 914 + return false; 915 + 916 + if ((phy_id & 0xfffffff0) != 0x03625d10) 917 + return false; 918 + 919 + /* Disable RGMII mode */ 920 + phy_write(phy_data->phydev, 0x18, 0x7007); 921 + reg = phy_read(phy_data->phydev, 0x18); 922 + phy_write(phy_data->phydev, 0x18, reg & ~0x0080); 923 + 924 + /* Enable fiber register bank */ 925 + phy_write(phy_data->phydev, 0x1c, 0x7c00); 926 + reg = phy_read(phy_data->phydev, 0x1c); 927 + reg &= 0x03ff; 928 + reg &= ~0x0001; 929 + phy_write(phy_data->phydev, 0x1c, 0x8000 | 0x7c00 | reg | 0x0001); 930 + 931 + /* Power down SerDes */ 932 + reg = phy_read(phy_data->phydev, 0x00); 933 + phy_write(phy_data->phydev, 0x00, reg | 0x00800); 934 + 935 + /* Configure SGMII-to-Copper mode */ 936 + phy_write(phy_data->phydev, 0x1c, 0x7c00); 937 + reg = phy_read(phy_data->phydev, 0x1c); 938 + reg &= 0x03ff; 939 + reg &= ~0x0006; 940 + phy_write(phy_data->phydev, 0x1c, 0x8000 | 0x7c00 | reg | 0x0004); 941 + 942 + /* Power up SerDes */ 943 + reg = phy_read(phy_data->phydev, 0x00); 944 + phy_write(phy_data->phydev, 0x00, reg & ~0x00800); 945 + 946 + /* Enable copper register bank */ 947 + phy_write(phy_data->phydev, 0x1c, 0x7c00); 948 + reg = phy_read(phy_data->phydev, 0x1c); 949 + reg &= 0x03ff; 950 + reg &= ~0x0001; 951 + phy_write(phy_data->phydev, 0x1c, 0x8000 | 0x7c00 | reg); 952 + 953 + /* Power up SerDes */ 954 + reg = phy_read(phy_data->phydev, 0x00); 955 + phy_write(phy_data->phydev, 0x00, reg & ~0x00800); 956 + 957 + phy_data->phydev->supported = PHY_GBIT_FEATURES; 958 + phy_data->phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; 959 + phy_data->phydev->advertising = phy_data->phydev->supported; 960 + 961 + netif_dbg(pdata, drv, pdata->netdev, 962 + "BelFuse PHY quirk in place\n"); 963 + 964 + return true; 965 + } 966 + 891 967 static void xgbe_phy_external_phy_quirks(struct xgbe_prv_data *pdata) 892 968 { 969 + if (xgbe_phy_belfuse_phy_quirks(pdata)) 970 + return; 971 + 893 972 if (xgbe_phy_finisar_phy_quirks(pdata)) 894 973 return; 895 974 } ··· 980 909 /* If we already have a PHY, just return */ 981 910 if (phy_data->phydev) 982 911 return 0; 912 + 913 + /* Clear the extra AN flag */ 914 + pdata->an_again = 0; 983 915 984 916 /* Check for the use of an external PHY */ 985 917 if (phy_data->phydev_mode == XGBE_MDIO_MODE_NONE) ··· 1108 1034 return false; 1109 1035 } 1110 1036 1111 - static bool xgbe_phy_belfuse_parse_quirks(struct xgbe_prv_data *pdata) 1112 - { 1113 - struct xgbe_phy_data *phy_data = pdata->phy_data; 1114 - struct xgbe_sfp_eeprom *sfp_eeprom = &phy_data->sfp_eeprom; 1115 - 1116 - if (memcmp(&sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_NAME], 1117 - XGBE_BEL_FUSE_VENDOR, XGBE_SFP_BASE_VENDOR_NAME_LEN)) 1118 - return false; 1119 - 1120 - if (!memcmp(&sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_PN], 1121 - XGBE_BEL_FUSE_PARTNO, XGBE_SFP_BASE_VENDOR_PN_LEN)) { 1122 - phy_data->sfp_base = XGBE_SFP_BASE_1000_SX; 1123 - phy_data->sfp_cable = XGBE_SFP_CABLE_ACTIVE; 1124 - phy_data->sfp_speed = XGBE_SFP_SPEED_1000; 1125 - if (phy_data->sfp_changed) 1126 - netif_dbg(pdata, drv, pdata->netdev, 1127 - "Bel-Fuse SFP quirk in place\n"); 1128 - return true; 1129 - } 1130 - 1131 - return false; 1132 - } 1133 - 1134 - static bool xgbe_phy_sfp_parse_quirks(struct xgbe_prv_data *pdata) 1135 - { 1136 - if (xgbe_phy_belfuse_parse_quirks(pdata)) 1137 - return true; 1138 - 1139 - return false; 1140 - } 1141 - 1142 1037 static void xgbe_phy_sfp_parse_eeprom(struct xgbe_prv_data *pdata) 1143 1038 { 1144 1039 struct xgbe_phy_data *phy_data = pdata->phy_data; ··· 1125 1082 /* Update transceiver signals (eeprom extd/options) */ 1126 1083 phy_data->sfp_tx_fault = xgbe_phy_check_sfp_tx_fault(phy_data); 1127 1084 phy_data->sfp_rx_los = xgbe_phy_check_sfp_rx_los(phy_data); 1128 - 1129 - if (xgbe_phy_sfp_parse_quirks(pdata)) 1130 - return; 1131 1085 1132 1086 /* Assume ACTIVE cable unless told it is PASSIVE */ 1133 1087 if (sfp_base[XGBE_SFP_BASE_CABLE] & XGBE_SFP_BASE_CABLE_PASSIVE) { ··· 1267 1227 1268 1228 memcpy(&phy_data->sfp_eeprom, &sfp_eeprom, sizeof(sfp_eeprom)); 1269 1229 1270 - if (sfp_eeprom.extd[XGBE_SFP_EXTD_SFF_8472]) { 1271 - u8 diag_type = sfp_eeprom.extd[XGBE_SFP_EXTD_DIAG]; 1272 - 1273 - if (!(diag_type & XGBE_SFP_EXTD_DIAG_ADDR_CHANGE)) 1274 - phy_data->sfp_diags = 1; 1275 - } 1276 - 1277 1230 xgbe_phy_free_phy_device(pdata); 1278 1231 } else { 1279 1232 phy_data->sfp_changed = 0; ··· 1316 1283 phy_data->sfp_rx_los = 0; 1317 1284 phy_data->sfp_tx_fault = 0; 1318 1285 phy_data->sfp_mod_absent = 1; 1319 - phy_data->sfp_diags = 0; 1320 1286 phy_data->sfp_base = XGBE_SFP_BASE_UNKNOWN; 1321 1287 phy_data->sfp_cable = XGBE_SFP_CABLE_UNKNOWN; 1322 1288 phy_data->sfp_speed = XGBE_SFP_SPEED_UNKNOWN; ··· 1356 1324 xgbe_phy_sfp_phy_settings(pdata); 1357 1325 1358 1326 xgbe_phy_put_comm_ownership(pdata); 1327 + } 1328 + 1329 + static int xgbe_phy_module_eeprom(struct xgbe_prv_data *pdata, 1330 + struct ethtool_eeprom *eeprom, u8 *data) 1331 + { 1332 + struct xgbe_phy_data *phy_data = pdata->phy_data; 1333 + u8 eeprom_addr, eeprom_data[XGBE_SFP_EEPROM_MAX]; 1334 + struct xgbe_sfp_eeprom *sfp_eeprom; 1335 + unsigned int i, j, rem; 1336 + int ret; 1337 + 1338 + rem = eeprom->len; 1339 + 1340 + if (!eeprom->len) { 1341 + ret = -EINVAL; 1342 + goto done; 1343 + } 1344 + 1345 + if ((eeprom->offset + eeprom->len) > XGBE_SFP_EEPROM_MAX) { 1346 + ret = -EINVAL; 1347 + goto done; 1348 + } 1349 + 1350 + if (phy_data->port_mode != XGBE_PORT_MODE_SFP) { 1351 + ret = -ENXIO; 1352 + goto done; 1353 + } 1354 + 1355 + if (!netif_running(pdata->netdev)) { 1356 + ret = -EIO; 1357 + goto done; 1358 + } 1359 + 1360 + if (phy_data->sfp_mod_absent) { 1361 + ret = -EIO; 1362 + goto done; 1363 + } 1364 + 1365 + ret = xgbe_phy_get_comm_ownership(pdata); 1366 + if (ret) { 1367 + ret = -EIO; 1368 + goto done; 1369 + } 1370 + 1371 + ret = xgbe_phy_sfp_get_mux(pdata); 1372 + if (ret) { 1373 + netdev_err(pdata->netdev, "I2C error setting SFP MUX\n"); 1374 + ret = -EIO; 1375 + goto put_own; 1376 + } 1377 + 1378 + /* Read the SFP serial ID eeprom */ 1379 + eeprom_addr = 0; 1380 + ret = xgbe_phy_i2c_read(pdata, XGBE_SFP_SERIAL_ID_ADDRESS, 1381 + &eeprom_addr, sizeof(eeprom_addr), 1382 + eeprom_data, XGBE_SFP_EEPROM_BASE_LEN); 1383 + if (ret) { 1384 + netdev_err(pdata->netdev, 1385 + "I2C error reading SFP EEPROM\n"); 1386 + ret = -EIO; 1387 + goto put_mux; 1388 + } 1389 + 1390 + sfp_eeprom = (struct xgbe_sfp_eeprom *)eeprom_data; 1391 + 1392 + if (XGBE_SFP_DIAGS_SUPPORTED(sfp_eeprom)) { 1393 + /* Read the SFP diagnostic eeprom */ 1394 + eeprom_addr = 0; 1395 + ret = xgbe_phy_i2c_read(pdata, XGBE_SFP_DIAG_INFO_ADDRESS, 1396 + &eeprom_addr, sizeof(eeprom_addr), 1397 + eeprom_data + XGBE_SFP_EEPROM_BASE_LEN, 1398 + XGBE_SFP_EEPROM_DIAG_LEN); 1399 + if (ret) { 1400 + netdev_err(pdata->netdev, 1401 + "I2C error reading SFP DIAGS\n"); 1402 + ret = -EIO; 1403 + goto put_mux; 1404 + } 1405 + } 1406 + 1407 + for (i = 0, j = eeprom->offset; i < eeprom->len; i++, j++) { 1408 + if ((j >= XGBE_SFP_EEPROM_BASE_LEN) && 1409 + !XGBE_SFP_DIAGS_SUPPORTED(sfp_eeprom)) 1410 + break; 1411 + 1412 + data[i] = eeprom_data[j]; 1413 + rem--; 1414 + } 1415 + 1416 + put_mux: 1417 + xgbe_phy_sfp_put_mux(pdata); 1418 + 1419 + put_own: 1420 + xgbe_phy_put_comm_ownership(pdata); 1421 + 1422 + done: 1423 + eeprom->len -= rem; 1424 + 1425 + return ret; 1426 + } 1427 + 1428 + static int xgbe_phy_module_info(struct xgbe_prv_data *pdata, 1429 + struct ethtool_modinfo *modinfo) 1430 + { 1431 + struct xgbe_phy_data *phy_data = pdata->phy_data; 1432 + 1433 + if (phy_data->port_mode != XGBE_PORT_MODE_SFP) 1434 + return -ENXIO; 1435 + 1436 + if (!netif_running(pdata->netdev)) 1437 + return -EIO; 1438 + 1439 + if (phy_data->sfp_mod_absent) 1440 + return -EIO; 1441 + 1442 + if (XGBE_SFP_DIAGS_SUPPORTED(&phy_data->sfp_eeprom)) { 1443 + modinfo->type = ETH_MODULE_SFF_8472; 1444 + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; 1445 + } else { 1446 + modinfo->type = ETH_MODULE_SFF_8079; 1447 + modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; 1448 + } 1449 + 1450 + return 0; 1359 1451 } 1360 1452 1361 1453 static void xgbe_phy_phydev_flowctrl(struct xgbe_prv_data *pdata) ··· 1766 1610 /* With the KR re-driver we need to advertise a single speed */ 1767 1611 XGBE_CLR_ADV(dlks, 1000baseKX_Full); 1768 1612 XGBE_CLR_ADV(dlks, 10000baseKR_Full); 1613 + 1614 + /* Advertise FEC support is present */ 1615 + if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE) 1616 + XGBE_SET_ADV(dlks, 10000baseR_FEC); 1769 1617 1770 1618 switch (phy_data->port_mode) { 1771 1619 case XGBE_PORT_MODE_BACKPLANE: ··· 2581 2421 static void xgbe_phy_sfp_gpio_setup(struct xgbe_prv_data *pdata) 2582 2422 { 2583 2423 struct xgbe_phy_data *phy_data = pdata->phy_data; 2584 - unsigned int reg; 2585 - 2586 - reg = XP_IOREAD(pdata, XP_PROP_3); 2587 2424 2588 2425 phy_data->sfp_gpio_address = XGBE_GPIO_ADDRESS_PCA9555 + 2589 - XP_GET_BITS(reg, XP_PROP_3, GPIO_ADDR); 2426 + XP_GET_BITS(pdata->pp3, XP_PROP_3, 2427 + GPIO_ADDR); 2590 2428 2591 - phy_data->sfp_gpio_mask = XP_GET_BITS(reg, XP_PROP_3, GPIO_MASK); 2429 + phy_data->sfp_gpio_mask = XP_GET_BITS(pdata->pp3, XP_PROP_3, 2430 + GPIO_MASK); 2592 2431 2593 - phy_data->sfp_gpio_rx_los = XP_GET_BITS(reg, XP_PROP_3, 2432 + phy_data->sfp_gpio_rx_los = XP_GET_BITS(pdata->pp3, XP_PROP_3, 2594 2433 GPIO_RX_LOS); 2595 - phy_data->sfp_gpio_tx_fault = XP_GET_BITS(reg, XP_PROP_3, 2434 + phy_data->sfp_gpio_tx_fault = XP_GET_BITS(pdata->pp3, XP_PROP_3, 2596 2435 GPIO_TX_FAULT); 2597 - phy_data->sfp_gpio_mod_absent = XP_GET_BITS(reg, XP_PROP_3, 2436 + phy_data->sfp_gpio_mod_absent = XP_GET_BITS(pdata->pp3, XP_PROP_3, 2598 2437 GPIO_MOD_ABS); 2599 - phy_data->sfp_gpio_rate_select = XP_GET_BITS(reg, XP_PROP_3, 2438 + phy_data->sfp_gpio_rate_select = XP_GET_BITS(pdata->pp3, XP_PROP_3, 2600 2439 GPIO_RATE_SELECT); 2601 2440 2602 2441 if (netif_msg_probe(pdata)) { ··· 2617 2458 static void xgbe_phy_sfp_comm_setup(struct xgbe_prv_data *pdata) 2618 2459 { 2619 2460 struct xgbe_phy_data *phy_data = pdata->phy_data; 2620 - unsigned int reg, mux_addr_hi, mux_addr_lo; 2461 + unsigned int mux_addr_hi, mux_addr_lo; 2621 2462 2622 - reg = XP_IOREAD(pdata, XP_PROP_4); 2623 - 2624 - mux_addr_hi = XP_GET_BITS(reg, XP_PROP_4, MUX_ADDR_HI); 2625 - mux_addr_lo = XP_GET_BITS(reg, XP_PROP_4, MUX_ADDR_LO); 2463 + mux_addr_hi = XP_GET_BITS(pdata->pp4, XP_PROP_4, MUX_ADDR_HI); 2464 + mux_addr_lo = XP_GET_BITS(pdata->pp4, XP_PROP_4, MUX_ADDR_LO); 2626 2465 if (mux_addr_lo == XGBE_SFP_DIRECT) 2627 2466 return; 2628 2467 2629 2468 phy_data->sfp_comm = XGBE_SFP_COMM_PCA9545; 2630 2469 phy_data->sfp_mux_address = (mux_addr_hi << 2) + mux_addr_lo; 2631 - phy_data->sfp_mux_channel = XP_GET_BITS(reg, XP_PROP_4, MUX_CHAN); 2470 + phy_data->sfp_mux_channel = XP_GET_BITS(pdata->pp4, XP_PROP_4, 2471 + MUX_CHAN); 2632 2472 2633 2473 if (netif_msg_probe(pdata)) { 2634 2474 dev_dbg(pdata->dev, "SFP: mux_address=%#x\n", ··· 2750 2592 static int xgbe_phy_mdio_reset_setup(struct xgbe_prv_data *pdata) 2751 2593 { 2752 2594 struct xgbe_phy_data *phy_data = pdata->phy_data; 2753 - unsigned int reg; 2754 2595 2755 2596 if (phy_data->conn_type != XGBE_CONN_TYPE_MDIO) 2756 2597 return 0; 2757 2598 2758 - reg = XP_IOREAD(pdata, XP_PROP_3); 2759 - phy_data->mdio_reset = XP_GET_BITS(reg, XP_PROP_3, MDIO_RESET); 2599 + phy_data->mdio_reset = XP_GET_BITS(pdata->pp3, XP_PROP_3, MDIO_RESET); 2760 2600 switch (phy_data->mdio_reset) { 2761 2601 case XGBE_MDIO_RESET_NONE: 2762 2602 case XGBE_MDIO_RESET_I2C_GPIO: ··· 2768 2612 2769 2613 if (phy_data->mdio_reset == XGBE_MDIO_RESET_I2C_GPIO) { 2770 2614 phy_data->mdio_reset_addr = XGBE_GPIO_ADDRESS_PCA9555 + 2771 - XP_GET_BITS(reg, XP_PROP_3, 2615 + XP_GET_BITS(pdata->pp3, XP_PROP_3, 2772 2616 MDIO_RESET_I2C_ADDR); 2773 - phy_data->mdio_reset_gpio = XP_GET_BITS(reg, XP_PROP_3, 2617 + phy_data->mdio_reset_gpio = XP_GET_BITS(pdata->pp3, XP_PROP_3, 2774 2618 MDIO_RESET_I2C_GPIO); 2775 2619 } else if (phy_data->mdio_reset == XGBE_MDIO_RESET_INT_GPIO) { 2776 - phy_data->mdio_reset_gpio = XP_GET_BITS(reg, XP_PROP_3, 2620 + phy_data->mdio_reset_gpio = XP_GET_BITS(pdata->pp3, XP_PROP_3, 2777 2621 MDIO_RESET_INT_GPIO); 2778 2622 } 2779 2623 ··· 2863 2707 2864 2708 static bool xgbe_phy_port_enabled(struct xgbe_prv_data *pdata) 2865 2709 { 2866 - unsigned int reg; 2867 - 2868 - reg = XP_IOREAD(pdata, XP_PROP_0); 2869 - if (!XP_GET_BITS(reg, XP_PROP_0, PORT_SPEEDS)) 2710 + if (!XP_GET_BITS(pdata->pp0, XP_PROP_0, PORT_SPEEDS)) 2870 2711 return false; 2871 - if (!XP_GET_BITS(reg, XP_PROP_0, CONN_TYPE)) 2712 + if (!XP_GET_BITS(pdata->pp0, XP_PROP_0, CONN_TYPE)) 2872 2713 return false; 2873 2714 2874 2715 return true; ··· 3074 2921 struct ethtool_link_ksettings *lks = &pdata->phy.lks; 3075 2922 struct xgbe_phy_data *phy_data; 3076 2923 struct mii_bus *mii; 3077 - unsigned int reg; 3078 2924 int ret; 3079 2925 3080 2926 /* Check if enabled */ ··· 3092 2940 return -ENOMEM; 3093 2941 pdata->phy_data = phy_data; 3094 2942 3095 - reg = XP_IOREAD(pdata, XP_PROP_0); 3096 - phy_data->port_mode = XP_GET_BITS(reg, XP_PROP_0, PORT_MODE); 3097 - phy_data->port_id = XP_GET_BITS(reg, XP_PROP_0, PORT_ID); 3098 - phy_data->port_speeds = XP_GET_BITS(reg, XP_PROP_0, PORT_SPEEDS); 3099 - phy_data->conn_type = XP_GET_BITS(reg, XP_PROP_0, CONN_TYPE); 3100 - phy_data->mdio_addr = XP_GET_BITS(reg, XP_PROP_0, MDIO_ADDR); 2943 + phy_data->port_mode = XP_GET_BITS(pdata->pp0, XP_PROP_0, PORT_MODE); 2944 + phy_data->port_id = XP_GET_BITS(pdata->pp0, XP_PROP_0, PORT_ID); 2945 + phy_data->port_speeds = XP_GET_BITS(pdata->pp0, XP_PROP_0, PORT_SPEEDS); 2946 + phy_data->conn_type = XP_GET_BITS(pdata->pp0, XP_PROP_0, CONN_TYPE); 2947 + phy_data->mdio_addr = XP_GET_BITS(pdata->pp0, XP_PROP_0, MDIO_ADDR); 3101 2948 if (netif_msg_probe(pdata)) { 3102 2949 dev_dbg(pdata->dev, "port mode=%u\n", phy_data->port_mode); 3103 2950 dev_dbg(pdata->dev, "port id=%u\n", phy_data->port_id); ··· 3105 2954 dev_dbg(pdata->dev, "mdio addr=%u\n", phy_data->mdio_addr); 3106 2955 } 3107 2956 3108 - reg = XP_IOREAD(pdata, XP_PROP_4); 3109 - phy_data->redrv = XP_GET_BITS(reg, XP_PROP_4, REDRV_PRESENT); 3110 - phy_data->redrv_if = XP_GET_BITS(reg, XP_PROP_4, REDRV_IF); 3111 - phy_data->redrv_addr = XP_GET_BITS(reg, XP_PROP_4, REDRV_ADDR); 3112 - phy_data->redrv_lane = XP_GET_BITS(reg, XP_PROP_4, REDRV_LANE); 3113 - phy_data->redrv_model = XP_GET_BITS(reg, XP_PROP_4, REDRV_MODEL); 2957 + phy_data->redrv = XP_GET_BITS(pdata->pp4, XP_PROP_4, REDRV_PRESENT); 2958 + phy_data->redrv_if = XP_GET_BITS(pdata->pp4, XP_PROP_4, REDRV_IF); 2959 + phy_data->redrv_addr = XP_GET_BITS(pdata->pp4, XP_PROP_4, REDRV_ADDR); 2960 + phy_data->redrv_lane = XP_GET_BITS(pdata->pp4, XP_PROP_4, REDRV_LANE); 2961 + phy_data->redrv_model = XP_GET_BITS(pdata->pp4, XP_PROP_4, REDRV_MODEL); 3114 2962 if (phy_data->redrv && netif_msg_probe(pdata)) { 3115 2963 dev_dbg(pdata->dev, "redrv present\n"); 3116 2964 dev_dbg(pdata->dev, "redrv i/f=%u\n", phy_data->redrv_if); ··· 3381 3231 3382 3232 phy_impl->kr_training_pre = xgbe_phy_kr_training_pre; 3383 3233 phy_impl->kr_training_post = xgbe_phy_kr_training_post; 3234 + 3235 + phy_impl->module_info = xgbe_phy_module_info; 3236 + phy_impl->module_eeprom = xgbe_phy_module_eeprom; 3384 3237 }
+31
drivers/net/ethernet/amd/xgbe/xgbe.h
··· 144 144 #define XGBE_TX_DESC_MAX_PROC (XGBE_TX_DESC_CNT >> 1) 145 145 #define XGBE_RX_DESC_CNT 512 146 146 147 + #define XGBE_TX_DESC_CNT_MIN 64 148 + #define XGBE_TX_DESC_CNT_MAX 4096 149 + #define XGBE_RX_DESC_CNT_MIN 64 150 + #define XGBE_RX_DESC_CNT_MAX 4096 151 + 147 152 #define XGBE_TX_MAX_BUF_SIZE (0x3fff & ~(64 - 1)) 148 153 149 154 /* Descriptors required for maximum contiguous TSO/GSO packet */ ··· 840 835 * Optional routines: 841 836 * an_pre, an_post 842 837 * kr_training_pre, kr_training_post 838 + * module_info, module_eeprom 843 839 */ 844 840 struct xgbe_phy_impl_if { 845 841 /* Perform Setup/teardown actions */ ··· 889 883 /* Pre/Post KR training enablement support */ 890 884 void (*kr_training_pre)(struct xgbe_prv_data *); 891 885 void (*kr_training_post)(struct xgbe_prv_data *); 886 + 887 + /* SFP module related info */ 888 + int (*module_info)(struct xgbe_prv_data *pdata, 889 + struct ethtool_modinfo *modinfo); 890 + int (*module_eeprom)(struct xgbe_prv_data *pdata, 891 + struct ethtool_eeprom *eeprom, u8 *data); 892 892 }; 893 893 894 894 struct xgbe_phy_if { ··· 916 904 917 905 /* For single interrupt support */ 918 906 irqreturn_t (*an_isr)(struct xgbe_prv_data *); 907 + 908 + /* For ethtool PHY support */ 909 + int (*module_info)(struct xgbe_prv_data *pdata, 910 + struct ethtool_modinfo *modinfo); 911 + int (*module_eeprom)(struct xgbe_prv_data *pdata, 912 + struct ethtool_eeprom *eeprom, u8 *data); 919 913 920 914 /* PHY implementation specific services */ 921 915 struct xgbe_phy_impl_if phy_impl; ··· 1045 1027 void __iomem *xprop_regs; /* XGBE property registers */ 1046 1028 void __iomem *xi2c_regs; /* XGBE I2C CSRs */ 1047 1029 1030 + /* Port property registers */ 1031 + unsigned int pp0; 1032 + unsigned int pp1; 1033 + unsigned int pp2; 1034 + unsigned int pp3; 1035 + unsigned int pp4; 1036 + 1048 1037 /* Overall device lock */ 1049 1038 spinlock_t lock; 1050 1039 ··· 1121 1096 unsigned int tx_desc_count; 1122 1097 unsigned int rx_ring_count; 1123 1098 unsigned int rx_desc_count; 1099 + 1100 + unsigned int new_tx_ring_count; 1101 + unsigned int new_rx_ring_count; 1124 1102 1125 1103 unsigned int tx_max_q_count; 1126 1104 unsigned int rx_max_q_count; ··· 1261 1233 enum xgbe_rx kr_state; 1262 1234 enum xgbe_rx kx_state; 1263 1235 struct work_struct an_work; 1236 + unsigned int an_again; 1264 1237 unsigned int an_supported; 1265 1238 unsigned int parallel_detect; 1266 1239 unsigned int fec_ability; ··· 1339 1310 int xgbe_powerdown(struct net_device *, unsigned int); 1340 1311 void xgbe_init_rx_coalesce(struct xgbe_prv_data *); 1341 1312 void xgbe_init_tx_coalesce(struct xgbe_prv_data *); 1313 + void xgbe_restart_dev(struct xgbe_prv_data *pdata); 1314 + void xgbe_full_restart_dev(struct xgbe_prv_data *pdata); 1342 1315 1343 1316 #ifdef CONFIG_DEBUG_FS 1344 1317 void xgbe_debugfs_init(struct xgbe_prv_data *);