Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'mvneta-sgmii'

Stas Sergeev says:

====================
mvneta: SGMII-based in-band link state signaling

Currently the fixed-link DT binding is pre-configured and
cannot be changed in run-time. This means the cable unplug
events are not being detected, and the link parameters can't
be negotiated.

The following patches are needed when mvneta is used
in fixed-link mode (without MDIO).
They add an API to fixed_phy that allows to update
status, and use that API in the mvneta driver when parsing
the SGMII in-band status.

There is also another implementation that doesn't add any API
and does everything in mvneta driver locally:
https://lkml.org/lkml/2015/3/31/327
I'll let people decide which approach is better.
No strong opinion on my side.
====================

Signed-off-by: Stas Sergeev <stsp@users.sourceforge.net>
Signed-off-by: David S. Miller <davem@davemloft.net>

+133 -11
+95 -11
drivers/net/ethernet/marvell/mvneta.c
··· 100 100 #define MVNETA_TXQ_CMD 0x2448 101 101 #define MVNETA_TXQ_DISABLE_SHIFT 8 102 102 #define MVNETA_TXQ_ENABLE_MASK 0x000000ff 103 + #define MVNETA_GMAC_CLOCK_DIVIDER 0x24f4 104 + #define MVNETA_GMAC_1MS_CLOCK_ENABLE BIT(31) 103 105 #define MVNETA_ACC_MODE 0x2500 104 106 #define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2)) 105 107 #define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff ··· 124 122 #define MVNETA_TX_INTR_MASK_ALL (0xff << 0) 125 123 #define MVNETA_RX_INTR_MASK(nr_rxqs) (((1 << nr_rxqs) - 1) << 8) 126 124 #define MVNETA_RX_INTR_MASK_ALL (0xff << 8) 125 + #define MVNETA_MISCINTR_INTR_MASK BIT(31) 127 126 128 127 #define MVNETA_INTR_OLD_CAUSE 0x25a8 129 128 #define MVNETA_INTR_OLD_MASK 0x25ac ··· 168 165 #define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc 169 166 #define MVNETA_GMAC0_PORT_ENABLE BIT(0) 170 167 #define MVNETA_GMAC_CTRL_2 0x2c08 168 + #define MVNETA_GMAC2_INBAND_AN_ENABLE BIT(0) 171 169 #define MVNETA_GMAC2_PCS_ENABLE BIT(3) 172 170 #define MVNETA_GMAC2_PORT_RGMII BIT(4) 173 171 #define MVNETA_GMAC2_PORT_RESET BIT(6) ··· 184 180 #define MVNETA_GMAC_AUTONEG_CONFIG 0x2c0c 185 181 #define MVNETA_GMAC_FORCE_LINK_DOWN BIT(0) 186 182 #define MVNETA_GMAC_FORCE_LINK_PASS BIT(1) 183 + #define MVNETA_GMAC_INBAND_AN_ENABLE BIT(2) 187 184 #define MVNETA_GMAC_CONFIG_MII_SPEED BIT(5) 188 185 #define MVNETA_GMAC_CONFIG_GMII_SPEED BIT(6) 189 186 #define MVNETA_GMAC_AN_SPEED_EN BIT(7) 187 + #define MVNETA_GMAC_AN_FLOW_CTRL_EN BIT(11) 190 188 #define MVNETA_GMAC_CONFIG_FULL_DUPLEX BIT(12) 191 189 #define MVNETA_GMAC_AN_DUPLEX_EN BIT(13) 192 190 #define MVNETA_MIB_COUNTERS_BASE 0x3080 ··· 310 304 unsigned int link; 311 305 unsigned int duplex; 312 306 unsigned int speed; 307 + int use_inband_status:1; 313 308 }; 314 309 315 310 /* The mvneta_tx_desc and mvneta_rx_desc structures describe the ··· 1000 993 val = mvreg_read(pp, MVNETA_UNIT_CONTROL); 1001 994 val &= ~MVNETA_PHY_POLLING_ENABLE; 1002 995 mvreg_write(pp, MVNETA_UNIT_CONTROL, val); 996 + 997 + if (pp->use_inband_status) { 998 + val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); 999 + val &= ~(MVNETA_GMAC_FORCE_LINK_PASS | 1000 + MVNETA_GMAC_FORCE_LINK_DOWN | 1001 + MVNETA_GMAC_AN_FLOW_CTRL_EN); 1002 + val |= MVNETA_GMAC_INBAND_AN_ENABLE | 1003 + MVNETA_GMAC_AN_SPEED_EN | 1004 + MVNETA_GMAC_AN_DUPLEX_EN; 1005 + mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); 1006 + val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER); 1007 + val |= MVNETA_GMAC_1MS_CLOCK_ENABLE; 1008 + mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val); 1009 + } 1003 1010 1004 1011 mvneta_set_ucast_table(pp, -1); 1005 1012 mvneta_set_special_mcast_table(pp, -1); ··· 2064 2043 return IRQ_HANDLED; 2065 2044 } 2066 2045 2046 + static int mvneta_fixed_link_update(struct mvneta_port *pp, 2047 + struct phy_device *phy) 2048 + { 2049 + struct fixed_phy_status status; 2050 + struct fixed_phy_status changed = {}; 2051 + u32 gmac_stat = mvreg_read(pp, MVNETA_GMAC_STATUS); 2052 + 2053 + status.link = !!(gmac_stat & MVNETA_GMAC_LINK_UP); 2054 + if (gmac_stat & MVNETA_GMAC_SPEED_1000) 2055 + status.speed = SPEED_1000; 2056 + else if (gmac_stat & MVNETA_GMAC_SPEED_100) 2057 + status.speed = SPEED_100; 2058 + else 2059 + status.speed = SPEED_10; 2060 + status.duplex = !!(gmac_stat & MVNETA_GMAC_FULL_DUPLEX); 2061 + changed.link = 1; 2062 + changed.speed = 1; 2063 + changed.duplex = 1; 2064 + fixed_phy_update_state(phy, &status, &changed); 2065 + return 0; 2066 + } 2067 + 2067 2068 /* NAPI handler 2068 2069 * Bits 0 - 7 of the causeRxTx register indicate that are transmitted 2069 2070 * packets on the corresponding TXQ (Bit 0 is for TX queue 1). ··· 2106 2063 } 2107 2064 2108 2065 /* Read cause register */ 2109 - cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE) & 2110 - (MVNETA_RX_INTR_MASK(rxq_number) | MVNETA_TX_INTR_MASK(txq_number)); 2066 + cause_rx_tx = mvreg_read(pp, MVNETA_INTR_NEW_CAUSE); 2067 + if (cause_rx_tx & MVNETA_MISCINTR_INTR_MASK) { 2068 + u32 cause_misc = mvreg_read(pp, MVNETA_INTR_MISC_CAUSE); 2069 + 2070 + mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0); 2071 + if (pp->use_inband_status && (cause_misc & 2072 + (MVNETA_CAUSE_PHY_STATUS_CHANGE | 2073 + MVNETA_CAUSE_LINK_CHANGE | 2074 + MVNETA_CAUSE_PSC_SYNC_CHANGE))) { 2075 + mvneta_fixed_link_update(pp, pp->phy_dev); 2076 + } 2077 + } 2111 2078 2112 2079 /* Release Tx descriptors */ 2113 2080 if (cause_rx_tx & MVNETA_TX_INTR_MASK_ALL) { ··· 2162 2109 napi_complete(napi); 2163 2110 local_irq_save(flags); 2164 2111 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 2165 - MVNETA_RX_INTR_MASK(rxq_number) | MVNETA_TX_INTR_MASK(txq_number)); 2112 + MVNETA_RX_INTR_MASK(rxq_number) | 2113 + MVNETA_TX_INTR_MASK(txq_number) | 2114 + MVNETA_MISCINTR_INTR_MASK); 2166 2115 local_irq_restore(flags); 2167 2116 } 2168 2117 ··· 2428 2373 2429 2374 /* Unmask interrupts */ 2430 2375 mvreg_write(pp, MVNETA_INTR_NEW_MASK, 2431 - MVNETA_RX_INTR_MASK(rxq_number) | MVNETA_TX_INTR_MASK(txq_number)); 2376 + MVNETA_RX_INTR_MASK(rxq_number) | 2377 + MVNETA_TX_INTR_MASK(txq_number) | 2378 + MVNETA_MISCINTR_INTR_MASK); 2379 + mvreg_write(pp, MVNETA_INTR_MISC_MASK, 2380 + MVNETA_CAUSE_PHY_STATUS_CHANGE | 2381 + MVNETA_CAUSE_LINK_CHANGE | 2382 + MVNETA_CAUSE_PSC_SYNC_CHANGE); 2432 2383 2433 2384 phy_start(pp->phy_dev); 2434 2385 netif_tx_start_all_queues(pp->dev); ··· 2584 2523 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); 2585 2524 val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED | 2586 2525 MVNETA_GMAC_CONFIG_GMII_SPEED | 2587 - MVNETA_GMAC_CONFIG_FULL_DUPLEX | 2588 - MVNETA_GMAC_AN_SPEED_EN | 2589 - MVNETA_GMAC_AN_DUPLEX_EN); 2526 + MVNETA_GMAC_CONFIG_FULL_DUPLEX); 2590 2527 2591 2528 if (phydev->duplex) 2592 2529 val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX; ··· 2613 2554 2614 2555 if (status_change) { 2615 2556 if (phydev->link) { 2616 - u32 val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); 2617 - val |= (MVNETA_GMAC_FORCE_LINK_PASS | 2618 - MVNETA_GMAC_FORCE_LINK_DOWN); 2619 - mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); 2557 + if (!pp->use_inband_status) { 2558 + u32 val = mvreg_read(pp, 2559 + MVNETA_GMAC_AUTONEG_CONFIG); 2560 + val &= ~MVNETA_GMAC_FORCE_LINK_DOWN; 2561 + val |= MVNETA_GMAC_FORCE_LINK_PASS; 2562 + mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, 2563 + val); 2564 + } 2620 2565 mvneta_port_up(pp); 2621 2566 } else { 2567 + if (!pp->use_inband_status) { 2568 + u32 val = mvreg_read(pp, 2569 + MVNETA_GMAC_AUTONEG_CONFIG); 2570 + val &= ~MVNETA_GMAC_FORCE_LINK_PASS; 2571 + val |= MVNETA_GMAC_FORCE_LINK_DOWN; 2572 + mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, 2573 + val); 2574 + } 2622 2575 mvneta_port_down(pp); 2623 2576 } 2624 2577 phy_print_status(phydev); ··· 2981 2910 return -EINVAL; 2982 2911 } 2983 2912 2913 + if (pp->use_inband_status) 2914 + ctrl |= MVNETA_GMAC2_INBAND_AN_ENABLE; 2915 + 2984 2916 /* Cancel Port Reset */ 2985 2917 ctrl &= ~MVNETA_GMAC2_PORT_RESET; 2986 2918 mvreg_write(pp, MVNETA_GMAC_CTRL_2, ctrl); ··· 3008 2934 char hw_mac_addr[ETH_ALEN]; 3009 2935 const char *mac_from; 3010 2936 int phy_mode; 2937 + int fixed_phy = 0; 3011 2938 int err; 3012 2939 3013 2940 /* Our multiqueue support is not complete, so for now, only ··· 3042 2967 dev_err(&pdev->dev, "cannot register fixed PHY\n"); 3043 2968 goto err_free_irq; 3044 2969 } 2970 + fixed_phy = 1; 3045 2971 3046 2972 /* In the case of a fixed PHY, the DT node associated 3047 2973 * to the PHY is the Ethernet MAC DT node. ··· 3066 2990 pp = netdev_priv(dev); 3067 2991 pp->phy_node = phy_node; 3068 2992 pp->phy_interface = phy_mode; 2993 + pp->use_inband_status = (phy_mode == PHY_INTERFACE_MODE_SGMII) && 2994 + fixed_phy; 3069 2995 3070 2996 pp->clk = devm_clk_get(&pdev->dev, NULL); 3071 2997 if (IS_ERR(pp->clk)) { ··· 3144 3066 dev->dev_addr); 3145 3067 3146 3068 platform_set_drvdata(pdev, pp->dev); 3069 + 3070 + if (pp->use_inband_status) { 3071 + struct phy_device *phy = of_phy_find_device(dn); 3072 + 3073 + mvneta_fixed_link_update(pp, phy); 3074 + } 3147 3075 3148 3076 return 0; 3149 3077
+29
drivers/net/phy/fixed_phy.c
··· 183 183 } 184 184 EXPORT_SYMBOL_GPL(fixed_phy_set_link_update); 185 185 186 + int fixed_phy_update_state(struct phy_device *phydev, 187 + const struct fixed_phy_status *status, 188 + const struct fixed_phy_status *changed) 189 + { 190 + struct fixed_mdio_bus *fmb = &platform_fmb; 191 + struct fixed_phy *fp; 192 + 193 + if (!phydev || !phydev->bus) 194 + return -EINVAL; 195 + 196 + list_for_each_entry(fp, &fmb->phys, node) { 197 + if (fp->addr == phydev->addr) { 198 + #define _UPD(x) if (changed->x) \ 199 + fp->status.x = status->x 200 + _UPD(link); 201 + _UPD(speed); 202 + _UPD(duplex); 203 + _UPD(pause); 204 + _UPD(asym_pause); 205 + #undef _UPD 206 + fixed_phy_update_regs(fp); 207 + return 0; 208 + } 209 + } 210 + 211 + return -ENOENT; 212 + } 213 + EXPORT_SYMBOL(fixed_phy_update_state); 214 + 186 215 int fixed_phy_add(unsigned int irq, int phy_addr, 187 216 struct fixed_phy_status *status) 188 217 {
+9
include/linux/phy_fixed.h
··· 21 21 extern int fixed_phy_set_link_update(struct phy_device *phydev, 22 22 int (*link_update)(struct net_device *, 23 23 struct fixed_phy_status *)); 24 + extern int fixed_phy_update_state(struct phy_device *phydev, 25 + const struct fixed_phy_status *status, 26 + const struct fixed_phy_status *changed); 24 27 #else 25 28 static inline int fixed_phy_add(unsigned int irq, int phy_id, 26 29 struct fixed_phy_status *status) ··· 43 40 static inline int fixed_phy_set_link_update(struct phy_device *phydev, 44 41 int (*link_update)(struct net_device *, 45 42 struct fixed_phy_status *)) 43 + { 44 + return -ENODEV; 45 + } 46 + static inline int fixed_phy_update_state(struct phy_device *phydev, 47 + const struct fixed_phy_status *status, 48 + const struct fixed_phy_status *changed) 46 49 { 47 50 return -ENODEV; 48 51 }