Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'enetc-bd-ring-cleanup'

Vladimir Oltean says:

====================
ENETC BD ring cleanup

The highlights of this patch set are:

- Installing a BPF program and changing PTP RX timestamping settings are
currently implemented through a port reconfiguration procedure which
triggers an AN restart on the PHY, and these procedures are not
generally guaranteed to leave the port in a sane state. Patches 9/12
and 11/12 address that.

- Attempting to put the port down (or trying to reconfigure it) has the
driver oppose some resistance if it's bombarded with RX traffic
(it won't go down). Patch 12/12 addresses that.

The other 9 patches are just cleanup in the BD ring setup/teardown code,
which gradually led to bringing the driver in a position where resolving
those 2 issues was possible.
====================

Link: https://lore.kernel.org/r/20230117230234.2950873-1-vladimir.oltean@nxp.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+359 -168
+339 -167
drivers/net/ethernet/freescale/enetc/enetc.c
··· 1715 1715 si->hw_features |= ENETC_SI_F_PSFP; 1716 1716 } 1717 1717 1718 - static int enetc_dma_alloc_bdr(struct enetc_bdr *r, size_t bd_size) 1718 + static int enetc_dma_alloc_bdr(struct enetc_bdr_resource *res) 1719 1719 { 1720 - r->bd_base = dma_alloc_coherent(r->dev, r->bd_count * bd_size, 1721 - &r->bd_dma_base, GFP_KERNEL); 1722 - if (!r->bd_base) 1720 + size_t bd_base_size = res->bd_count * res->bd_size; 1721 + 1722 + res->bd_base = dma_alloc_coherent(res->dev, bd_base_size, 1723 + &res->bd_dma_base, GFP_KERNEL); 1724 + if (!res->bd_base) 1723 1725 return -ENOMEM; 1724 1726 1725 1727 /* h/w requires 128B alignment */ 1726 - if (!IS_ALIGNED(r->bd_dma_base, 128)) { 1727 - dma_free_coherent(r->dev, r->bd_count * bd_size, r->bd_base, 1728 - r->bd_dma_base); 1728 + if (!IS_ALIGNED(res->bd_dma_base, 128)) { 1729 + dma_free_coherent(res->dev, bd_base_size, res->bd_base, 1730 + res->bd_dma_base); 1729 1731 return -EINVAL; 1730 1732 } 1731 1733 1732 1734 return 0; 1733 1735 } 1734 1736 1735 - static int enetc_alloc_txbdr(struct enetc_bdr *txr) 1737 + static void enetc_dma_free_bdr(const struct enetc_bdr_resource *res) 1738 + { 1739 + size_t bd_base_size = res->bd_count * res->bd_size; 1740 + 1741 + dma_free_coherent(res->dev, bd_base_size, res->bd_base, 1742 + res->bd_dma_base); 1743 + } 1744 + 1745 + static int enetc_alloc_tx_resource(struct enetc_bdr_resource *res, 1746 + struct device *dev, size_t bd_count) 1736 1747 { 1737 1748 int err; 1738 1749 1739 - txr->tx_swbd = vzalloc(txr->bd_count * sizeof(struct enetc_tx_swbd)); 1740 - if (!txr->tx_swbd) 1750 + res->dev = dev; 1751 + res->bd_count = bd_count; 1752 + res->bd_size = sizeof(union enetc_tx_bd); 1753 + 1754 + res->tx_swbd = vzalloc(bd_count * sizeof(*res->tx_swbd)); 1755 + if (!res->tx_swbd) 1741 1756 return -ENOMEM; 1742 1757 1743 - err = enetc_dma_alloc_bdr(txr, sizeof(union enetc_tx_bd)); 1758 + err = enetc_dma_alloc_bdr(res); 1744 1759 if (err) 1745 1760 goto err_alloc_bdr; 1746 1761 1747 - txr->tso_headers = dma_alloc_coherent(txr->dev, 1748 - txr->bd_count * TSO_HEADER_SIZE, 1749 - &txr->tso_headers_dma, 1762 + res->tso_headers = dma_alloc_coherent(dev, bd_count * TSO_HEADER_SIZE, 1763 + &res->tso_headers_dma, 1750 1764 GFP_KERNEL); 1751 - if (!txr->tso_headers) { 1765 + if (!res->tso_headers) { 1752 1766 err = -ENOMEM; 1753 1767 goto err_alloc_tso; 1754 1768 } 1755 1769 1756 - txr->next_to_clean = 0; 1757 - txr->next_to_use = 0; 1758 - 1759 1770 return 0; 1760 1771 1761 1772 err_alloc_tso: 1762 - dma_free_coherent(txr->dev, txr->bd_count * sizeof(union enetc_tx_bd), 1763 - txr->bd_base, txr->bd_dma_base); 1764 - txr->bd_base = NULL; 1773 + enetc_dma_free_bdr(res); 1765 1774 err_alloc_bdr: 1766 - vfree(txr->tx_swbd); 1767 - txr->tx_swbd = NULL; 1775 + vfree(res->tx_swbd); 1776 + res->tx_swbd = NULL; 1768 1777 1769 1778 return err; 1770 1779 } 1771 1780 1772 - static void enetc_free_txbdr(struct enetc_bdr *txr) 1781 + static void enetc_free_tx_resource(const struct enetc_bdr_resource *res) 1773 1782 { 1774 - int size, i; 1775 - 1776 - for (i = 0; i < txr->bd_count; i++) 1777 - enetc_free_tx_frame(txr, &txr->tx_swbd[i]); 1778 - 1779 - size = txr->bd_count * sizeof(union enetc_tx_bd); 1780 - 1781 - dma_free_coherent(txr->dev, txr->bd_count * TSO_HEADER_SIZE, 1782 - txr->tso_headers, txr->tso_headers_dma); 1783 - txr->tso_headers = NULL; 1784 - 1785 - dma_free_coherent(txr->dev, size, txr->bd_base, txr->bd_dma_base); 1786 - txr->bd_base = NULL; 1787 - 1788 - vfree(txr->tx_swbd); 1789 - txr->tx_swbd = NULL; 1783 + dma_free_coherent(res->dev, res->bd_count * TSO_HEADER_SIZE, 1784 + res->tso_headers, res->tso_headers_dma); 1785 + enetc_dma_free_bdr(res); 1786 + vfree(res->tx_swbd); 1790 1787 } 1791 1788 1792 - static int enetc_alloc_tx_resources(struct enetc_ndev_priv *priv) 1789 + static struct enetc_bdr_resource * 1790 + enetc_alloc_tx_resources(struct enetc_ndev_priv *priv) 1793 1791 { 1792 + struct enetc_bdr_resource *tx_res; 1794 1793 int i, err; 1795 1794 1796 - for (i = 0; i < priv->num_tx_rings; i++) { 1797 - err = enetc_alloc_txbdr(priv->tx_ring[i]); 1795 + tx_res = kcalloc(priv->num_tx_rings, sizeof(*tx_res), GFP_KERNEL); 1796 + if (!tx_res) 1797 + return ERR_PTR(-ENOMEM); 1798 1798 1799 + for (i = 0; i < priv->num_tx_rings; i++) { 1800 + struct enetc_bdr *tx_ring = priv->tx_ring[i]; 1801 + 1802 + err = enetc_alloc_tx_resource(&tx_res[i], tx_ring->dev, 1803 + tx_ring->bd_count); 1799 1804 if (err) 1800 1805 goto fail; 1801 1806 } 1802 1807 1803 - return 0; 1808 + return tx_res; 1804 1809 1805 1810 fail: 1806 1811 while (i-- > 0) 1807 - enetc_free_txbdr(priv->tx_ring[i]); 1812 + enetc_free_tx_resource(&tx_res[i]); 1808 1813 1809 - return err; 1814 + kfree(tx_res); 1815 + 1816 + return ERR_PTR(err); 1810 1817 } 1811 1818 1812 - static void enetc_free_tx_resources(struct enetc_ndev_priv *priv) 1819 + static void enetc_free_tx_resources(const struct enetc_bdr_resource *tx_res, 1820 + size_t num_resources) 1813 1821 { 1814 - int i; 1822 + size_t i; 1815 1823 1816 - for (i = 0; i < priv->num_tx_rings; i++) 1817 - enetc_free_txbdr(priv->tx_ring[i]); 1824 + for (i = 0; i < num_resources; i++) 1825 + enetc_free_tx_resource(&tx_res[i]); 1826 + 1827 + kfree(tx_res); 1818 1828 } 1819 1829 1820 - static int enetc_alloc_rxbdr(struct enetc_bdr *rxr, bool extended) 1830 + static int enetc_alloc_rx_resource(struct enetc_bdr_resource *res, 1831 + struct device *dev, size_t bd_count, 1832 + bool extended) 1821 1833 { 1822 - size_t size = sizeof(union enetc_rx_bd); 1823 1834 int err; 1824 1835 1825 - rxr->rx_swbd = vzalloc(rxr->bd_count * sizeof(struct enetc_rx_swbd)); 1826 - if (!rxr->rx_swbd) 1836 + res->dev = dev; 1837 + res->bd_count = bd_count; 1838 + res->bd_size = sizeof(union enetc_rx_bd); 1839 + if (extended) 1840 + res->bd_size *= 2; 1841 + 1842 + res->rx_swbd = vzalloc(bd_count * sizeof(struct enetc_rx_swbd)); 1843 + if (!res->rx_swbd) 1827 1844 return -ENOMEM; 1828 1845 1829 - if (extended) 1830 - size *= 2; 1831 - 1832 - err = enetc_dma_alloc_bdr(rxr, size); 1846 + err = enetc_dma_alloc_bdr(res); 1833 1847 if (err) { 1834 - vfree(rxr->rx_swbd); 1848 + vfree(res->rx_swbd); 1835 1849 return err; 1836 1850 } 1837 1851 1838 - rxr->next_to_clean = 0; 1839 - rxr->next_to_use = 0; 1840 - rxr->next_to_alloc = 0; 1841 - rxr->ext_en = extended; 1842 - 1843 1852 return 0; 1844 1853 } 1845 1854 1846 - static void enetc_free_rxbdr(struct enetc_bdr *rxr) 1855 + static void enetc_free_rx_resource(const struct enetc_bdr_resource *res) 1847 1856 { 1848 - int size; 1849 - 1850 - size = rxr->bd_count * sizeof(union enetc_rx_bd); 1851 - 1852 - dma_free_coherent(rxr->dev, size, rxr->bd_base, rxr->bd_dma_base); 1853 - rxr->bd_base = NULL; 1854 - 1855 - vfree(rxr->rx_swbd); 1856 - rxr->rx_swbd = NULL; 1857 + enetc_dma_free_bdr(res); 1858 + vfree(res->rx_swbd); 1857 1859 } 1858 1860 1859 - static int enetc_alloc_rx_resources(struct enetc_ndev_priv *priv) 1861 + static struct enetc_bdr_resource * 1862 + enetc_alloc_rx_resources(struct enetc_ndev_priv *priv, bool extended) 1860 1863 { 1861 - bool extended = !!(priv->active_offloads & ENETC_F_RX_TSTAMP); 1864 + struct enetc_bdr_resource *rx_res; 1862 1865 int i, err; 1863 1866 1864 - for (i = 0; i < priv->num_rx_rings; i++) { 1865 - err = enetc_alloc_rxbdr(priv->rx_ring[i], extended); 1867 + rx_res = kcalloc(priv->num_rx_rings, sizeof(*rx_res), GFP_KERNEL); 1868 + if (!rx_res) 1869 + return ERR_PTR(-ENOMEM); 1866 1870 1871 + for (i = 0; i < priv->num_rx_rings; i++) { 1872 + struct enetc_bdr *rx_ring = priv->rx_ring[i]; 1873 + 1874 + err = enetc_alloc_rx_resource(&rx_res[i], rx_ring->dev, 1875 + rx_ring->bd_count, extended); 1867 1876 if (err) 1868 1877 goto fail; 1869 1878 } 1870 1879 1871 - return 0; 1880 + return rx_res; 1872 1881 1873 1882 fail: 1874 1883 while (i-- > 0) 1875 - enetc_free_rxbdr(priv->rx_ring[i]); 1884 + enetc_free_rx_resource(&rx_res[i]); 1876 1885 1877 - return err; 1886 + kfree(rx_res); 1887 + 1888 + return ERR_PTR(err); 1878 1889 } 1879 1890 1880 - static void enetc_free_rx_resources(struct enetc_ndev_priv *priv) 1891 + static void enetc_free_rx_resources(const struct enetc_bdr_resource *rx_res, 1892 + size_t num_resources) 1893 + { 1894 + size_t i; 1895 + 1896 + for (i = 0; i < num_resources; i++) 1897 + enetc_free_rx_resource(&rx_res[i]); 1898 + 1899 + kfree(rx_res); 1900 + } 1901 + 1902 + static void enetc_assign_tx_resource(struct enetc_bdr *tx_ring, 1903 + const struct enetc_bdr_resource *res) 1904 + { 1905 + tx_ring->bd_base = res ? res->bd_base : NULL; 1906 + tx_ring->bd_dma_base = res ? res->bd_dma_base : 0; 1907 + tx_ring->tx_swbd = res ? res->tx_swbd : NULL; 1908 + tx_ring->tso_headers = res ? res->tso_headers : NULL; 1909 + tx_ring->tso_headers_dma = res ? res->tso_headers_dma : 0; 1910 + } 1911 + 1912 + static void enetc_assign_rx_resource(struct enetc_bdr *rx_ring, 1913 + const struct enetc_bdr_resource *res) 1914 + { 1915 + rx_ring->bd_base = res ? res->bd_base : NULL; 1916 + rx_ring->bd_dma_base = res ? res->bd_dma_base : 0; 1917 + rx_ring->rx_swbd = res ? res->rx_swbd : NULL; 1918 + } 1919 + 1920 + static void enetc_assign_tx_resources(struct enetc_ndev_priv *priv, 1921 + const struct enetc_bdr_resource *res) 1881 1922 { 1882 1923 int i; 1883 1924 1884 - for (i = 0; i < priv->num_rx_rings; i++) 1885 - enetc_free_rxbdr(priv->rx_ring[i]); 1925 + if (priv->tx_res) 1926 + enetc_free_tx_resources(priv->tx_res, priv->num_tx_rings); 1927 + 1928 + for (i = 0; i < priv->num_tx_rings; i++) { 1929 + enetc_assign_tx_resource(priv->tx_ring[i], 1930 + res ? &res[i] : NULL); 1931 + } 1932 + 1933 + priv->tx_res = res; 1934 + } 1935 + 1936 + static void enetc_assign_rx_resources(struct enetc_ndev_priv *priv, 1937 + const struct enetc_bdr_resource *res) 1938 + { 1939 + int i; 1940 + 1941 + if (priv->rx_res) 1942 + enetc_free_rx_resources(priv->rx_res, priv->num_rx_rings); 1943 + 1944 + for (i = 0; i < priv->num_rx_rings; i++) { 1945 + enetc_assign_rx_resource(priv->rx_ring[i], 1946 + res ? &res[i] : NULL); 1947 + } 1948 + 1949 + priv->rx_res = res; 1886 1950 } 1887 1951 1888 1952 static void enetc_free_tx_ring(struct enetc_bdr *tx_ring) 1889 1953 { 1890 1954 int i; 1891 1955 1892 - if (!tx_ring->tx_swbd) 1893 - return; 1894 - 1895 1956 for (i = 0; i < tx_ring->bd_count; i++) { 1896 1957 struct enetc_tx_swbd *tx_swbd = &tx_ring->tx_swbd[i]; 1897 1958 1898 1959 enetc_free_tx_frame(tx_ring, tx_swbd); 1899 1960 } 1900 - 1901 - tx_ring->next_to_clean = 0; 1902 - tx_ring->next_to_use = 0; 1903 1961 } 1904 1962 1905 1963 static void enetc_free_rx_ring(struct enetc_bdr *rx_ring) 1906 1964 { 1907 1965 int i; 1908 - 1909 - if (!rx_ring->rx_swbd) 1910 - return; 1911 1966 1912 1967 for (i = 0; i < rx_ring->bd_count; i++) { 1913 1968 struct enetc_rx_swbd *rx_swbd = &rx_ring->rx_swbd[i]; ··· 1975 1920 __free_page(rx_swbd->page); 1976 1921 rx_swbd->page = NULL; 1977 1922 } 1978 - 1979 - rx_ring->next_to_clean = 0; 1980 - rx_ring->next_to_use = 0; 1981 - rx_ring->next_to_alloc = 0; 1982 1923 } 1983 1924 1984 1925 static void enetc_free_rxtx_rings(struct enetc_ndev_priv *priv) ··· 2088 2037 /* enable Tx ints by setting pkt thr to 1 */ 2089 2038 enetc_txbdr_wr(hw, idx, ENETC_TBICR0, ENETC_TBICR0_ICEN | 0x1); 2090 2039 2091 - tbmr = ENETC_TBMR_EN | ENETC_TBMR_SET_PRIO(tx_ring->prio); 2040 + tbmr = ENETC_TBMR_SET_PRIO(tx_ring->prio); 2092 2041 if (tx_ring->ndev->features & NETIF_F_HW_VLAN_CTAG_TX) 2093 2042 tbmr |= ENETC_TBMR_VIH; 2094 2043 ··· 2100 2049 tx_ring->idr = hw->reg + ENETC_SITXIDR; 2101 2050 } 2102 2051 2103 - static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring) 2052 + static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring, 2053 + bool extended) 2104 2054 { 2105 2055 int idx = rx_ring->index; 2106 - u32 rbmr; 2056 + u32 rbmr = 0; 2107 2057 2108 2058 enetc_rxbdr_wr(hw, idx, ENETC_RBBAR0, 2109 2059 lower_32_bits(rx_ring->bd_dma_base)); ··· 2131 2079 /* enable Rx ints by setting pkt thr to 1 */ 2132 2080 enetc_rxbdr_wr(hw, idx, ENETC_RBICR0, ENETC_RBICR0_ICEN | 0x1); 2133 2081 2134 - rbmr = ENETC_RBMR_EN; 2135 - 2082 + rx_ring->ext_en = extended; 2136 2083 if (rx_ring->ext_en) 2137 2084 rbmr |= ENETC_RBMR_BDS; 2138 2085 ··· 2141 2090 rx_ring->rcir = hw->reg + ENETC_BDR(RX, idx, ENETC_RBCIR); 2142 2091 rx_ring->idr = hw->reg + ENETC_SIRXIDR; 2143 2092 2093 + rx_ring->next_to_clean = 0; 2094 + rx_ring->next_to_use = 0; 2095 + rx_ring->next_to_alloc = 0; 2096 + 2144 2097 enetc_lock_mdio(); 2145 2098 enetc_refill_rx_ring(rx_ring, enetc_bd_unused(rx_ring)); 2146 2099 enetc_unlock_mdio(); 2147 2100 2148 - /* enable ring */ 2149 2101 enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr); 2150 2102 } 2151 2103 2152 - static void enetc_setup_bdrs(struct enetc_ndev_priv *priv) 2104 + static void enetc_setup_bdrs(struct enetc_ndev_priv *priv, bool extended) 2153 2105 { 2154 2106 struct enetc_hw *hw = &priv->si->hw; 2155 2107 int i; ··· 2161 2107 enetc_setup_txbdr(hw, priv->tx_ring[i]); 2162 2108 2163 2109 for (i = 0; i < priv->num_rx_rings; i++) 2164 - enetc_setup_rxbdr(hw, priv->rx_ring[i]); 2110 + enetc_setup_rxbdr(hw, priv->rx_ring[i], extended); 2165 2111 } 2166 2112 2167 - static void enetc_clear_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring) 2113 + static void enetc_enable_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring) 2114 + { 2115 + int idx = tx_ring->index; 2116 + u32 tbmr; 2117 + 2118 + tbmr = enetc_txbdr_rd(hw, idx, ENETC_TBMR); 2119 + tbmr |= ENETC_TBMR_EN; 2120 + enetc_txbdr_wr(hw, idx, ENETC_TBMR, tbmr); 2121 + } 2122 + 2123 + static void enetc_enable_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring) 2124 + { 2125 + int idx = rx_ring->index; 2126 + u32 rbmr; 2127 + 2128 + rbmr = enetc_rxbdr_rd(hw, idx, ENETC_RBMR); 2129 + rbmr |= ENETC_RBMR_EN; 2130 + enetc_rxbdr_wr(hw, idx, ENETC_RBMR, rbmr); 2131 + } 2132 + 2133 + static void enetc_enable_bdrs(struct enetc_ndev_priv *priv) 2134 + { 2135 + struct enetc_hw *hw = &priv->si->hw; 2136 + int i; 2137 + 2138 + for (i = 0; i < priv->num_tx_rings; i++) 2139 + enetc_enable_txbdr(hw, priv->tx_ring[i]); 2140 + 2141 + for (i = 0; i < priv->num_rx_rings; i++) 2142 + enetc_enable_rxbdr(hw, priv->rx_ring[i]); 2143 + } 2144 + 2145 + static void enetc_disable_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring) 2168 2146 { 2169 2147 int idx = rx_ring->index; 2170 2148 ··· 2204 2118 enetc_rxbdr_wr(hw, idx, ENETC_RBMR, 0); 2205 2119 } 2206 2120 2207 - static void enetc_clear_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring) 2121 + static void enetc_disable_txbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring) 2208 2122 { 2209 - int delay = 8, timeout = 100; 2210 - int idx = tx_ring->index; 2123 + int idx = rx_ring->index; 2211 2124 2212 2125 /* disable EN bit on ring */ 2213 2126 enetc_txbdr_wr(hw, idx, ENETC_TBMR, 0); 2127 + } 2128 + 2129 + static void enetc_disable_bdrs(struct enetc_ndev_priv *priv) 2130 + { 2131 + struct enetc_hw *hw = &priv->si->hw; 2132 + int i; 2133 + 2134 + for (i = 0; i < priv->num_tx_rings; i++) 2135 + enetc_disable_txbdr(hw, priv->tx_ring[i]); 2136 + 2137 + for (i = 0; i < priv->num_rx_rings; i++) 2138 + enetc_disable_rxbdr(hw, priv->rx_ring[i]); 2139 + } 2140 + 2141 + static void enetc_wait_txbdr(struct enetc_hw *hw, struct enetc_bdr *tx_ring) 2142 + { 2143 + int delay = 8, timeout = 100; 2144 + int idx = tx_ring->index; 2214 2145 2215 2146 /* wait for busy to clear */ 2216 2147 while (delay < timeout && ··· 2241 2138 idx); 2242 2139 } 2243 2140 2244 - static void enetc_clear_bdrs(struct enetc_ndev_priv *priv) 2141 + static void enetc_wait_bdrs(struct enetc_ndev_priv *priv) 2245 2142 { 2246 2143 struct enetc_hw *hw = &priv->si->hw; 2247 2144 int i; 2248 2145 2249 2146 for (i = 0; i < priv->num_tx_rings; i++) 2250 - enetc_clear_txbdr(hw, priv->tx_ring[i]); 2251 - 2252 - for (i = 0; i < priv->num_rx_rings; i++) 2253 - enetc_clear_rxbdr(hw, priv->rx_ring[i]); 2254 - 2255 - udelay(1); 2147 + enetc_wait_txbdr(hw, priv->tx_ring[i]); 2256 2148 } 2257 2149 2258 2150 static int enetc_setup_irqs(struct enetc_ndev_priv *priv) ··· 2363 2265 struct ethtool_eee edata; 2364 2266 int err; 2365 2267 2366 - if (!priv->phylink) 2367 - return 0; /* phy-less mode */ 2268 + if (!priv->phylink) { 2269 + /* phy-less mode */ 2270 + netif_carrier_on(ndev); 2271 + return 0; 2272 + } 2368 2273 2369 2274 err = phylink_of_phy_connect(priv->phylink, priv->dev->of_node, 0); 2370 2275 if (err) { ··· 2378 2277 /* disable EEE autoneg, until ENETC driver supports it */ 2379 2278 memset(&edata, 0, sizeof(struct ethtool_eee)); 2380 2279 phylink_ethtool_set_eee(priv->phylink, &edata); 2280 + 2281 + phylink_start(priv->phylink); 2381 2282 2382 2283 return 0; 2383 2284 } ··· 2422 2319 enable_irq(irq); 2423 2320 } 2424 2321 2425 - if (priv->phylink) 2426 - phylink_start(priv->phylink); 2427 - else 2428 - netif_carrier_on(ndev); 2322 + enetc_enable_bdrs(priv); 2429 2323 2430 2324 netif_tx_start_all_queues(ndev); 2431 2325 } ··· 2430 2330 int enetc_open(struct net_device *ndev) 2431 2331 { 2432 2332 struct enetc_ndev_priv *priv = netdev_priv(ndev); 2333 + struct enetc_bdr_resource *tx_res, *rx_res; 2433 2334 int num_stack_tx_queues; 2335 + bool extended; 2434 2336 int err; 2337 + 2338 + extended = !!(priv->active_offloads & ENETC_F_RX_TSTAMP); 2435 2339 2436 2340 err = enetc_setup_irqs(priv); 2437 2341 if (err) ··· 2445 2341 if (err) 2446 2342 goto err_phy_connect; 2447 2343 2448 - err = enetc_alloc_tx_resources(priv); 2449 - if (err) 2344 + tx_res = enetc_alloc_tx_resources(priv); 2345 + if (IS_ERR(tx_res)) { 2346 + err = PTR_ERR(tx_res); 2450 2347 goto err_alloc_tx; 2348 + } 2451 2349 2452 - err = enetc_alloc_rx_resources(priv); 2453 - if (err) 2350 + rx_res = enetc_alloc_rx_resources(priv, extended); 2351 + if (IS_ERR(rx_res)) { 2352 + err = PTR_ERR(rx_res); 2454 2353 goto err_alloc_rx; 2354 + } 2455 2355 2456 2356 num_stack_tx_queues = enetc_num_stack_tx_queues(priv); 2457 2357 ··· 2468 2360 goto err_set_queues; 2469 2361 2470 2362 enetc_tx_onestep_tstamp_init(priv); 2471 - enetc_setup_bdrs(priv); 2363 + enetc_assign_tx_resources(priv, tx_res); 2364 + enetc_assign_rx_resources(priv, rx_res); 2365 + enetc_setup_bdrs(priv, extended); 2472 2366 enetc_start(ndev); 2473 2367 2474 2368 return 0; 2475 2369 2476 2370 err_set_queues: 2477 - enetc_free_rx_resources(priv); 2371 + enetc_free_rx_resources(rx_res, priv->num_rx_rings); 2478 2372 err_alloc_rx: 2479 - enetc_free_tx_resources(priv); 2373 + enetc_free_tx_resources(tx_res, priv->num_tx_rings); 2480 2374 err_alloc_tx: 2481 2375 if (priv->phylink) 2482 2376 phylink_disconnect_phy(priv->phylink); ··· 2495 2385 2496 2386 netif_tx_stop_all_queues(ndev); 2497 2387 2388 + enetc_disable_bdrs(priv); 2389 + 2498 2390 for (i = 0; i < priv->bdr_int_num; i++) { 2499 2391 int irq = pci_irq_vector(priv->si->pdev, 2500 2392 ENETC_BDR_INT_BASE_IDX + i); ··· 2506 2394 napi_disable(&priv->int_vector[i]->napi); 2507 2395 } 2508 2396 2509 - if (priv->phylink) 2510 - phylink_stop(priv->phylink); 2511 - else 2512 - netif_carrier_off(ndev); 2397 + enetc_wait_bdrs(priv); 2513 2398 2514 2399 enetc_clear_interrupts(priv); 2515 2400 } ··· 2516 2407 struct enetc_ndev_priv *priv = netdev_priv(ndev); 2517 2408 2518 2409 enetc_stop(ndev); 2519 - enetc_clear_bdrs(priv); 2520 2410 2521 - if (priv->phylink) 2411 + if (priv->phylink) { 2412 + phylink_stop(priv->phylink); 2522 2413 phylink_disconnect_phy(priv->phylink); 2414 + } else { 2415 + netif_carrier_off(ndev); 2416 + } 2417 + 2523 2418 enetc_free_rxtx_rings(priv); 2524 - enetc_free_rx_resources(priv); 2525 - enetc_free_tx_resources(priv); 2419 + 2420 + /* Avoids dangling pointers and also frees old resources */ 2421 + enetc_assign_rx_resources(priv, NULL); 2422 + enetc_assign_tx_resources(priv, NULL); 2423 + 2526 2424 enetc_free_irqs(priv); 2527 2425 2528 2426 return 0; 2427 + } 2428 + 2429 + static int enetc_reconfigure(struct enetc_ndev_priv *priv, bool extended, 2430 + int (*cb)(struct enetc_ndev_priv *priv, void *ctx), 2431 + void *ctx) 2432 + { 2433 + struct enetc_bdr_resource *tx_res, *rx_res; 2434 + int err; 2435 + 2436 + ASSERT_RTNL(); 2437 + 2438 + /* If the interface is down, run the callback right away, 2439 + * without reconfiguration. 2440 + */ 2441 + if (!netif_running(priv->ndev)) { 2442 + if (cb) 2443 + cb(priv, ctx); 2444 + 2445 + return 0; 2446 + } 2447 + 2448 + tx_res = enetc_alloc_tx_resources(priv); 2449 + if (IS_ERR(tx_res)) { 2450 + err = PTR_ERR(tx_res); 2451 + goto out; 2452 + } 2453 + 2454 + rx_res = enetc_alloc_rx_resources(priv, extended); 2455 + if (IS_ERR(rx_res)) { 2456 + err = PTR_ERR(rx_res); 2457 + goto out_free_tx_res; 2458 + } 2459 + 2460 + enetc_stop(priv->ndev); 2461 + enetc_free_rxtx_rings(priv); 2462 + 2463 + /* Interface is down, run optional callback now */ 2464 + if (cb) 2465 + cb(priv, ctx); 2466 + 2467 + enetc_assign_tx_resources(priv, tx_res); 2468 + enetc_assign_rx_resources(priv, rx_res); 2469 + enetc_setup_bdrs(priv, extended); 2470 + enetc_start(priv->ndev); 2471 + 2472 + return 0; 2473 + 2474 + out_free_tx_res: 2475 + enetc_free_tx_resources(tx_res, priv->num_tx_rings); 2476 + out: 2477 + return err; 2529 2478 } 2530 2479 2531 2480 int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data) ··· 2643 2476 return 0; 2644 2477 } 2645 2478 2646 - static int enetc_setup_xdp_prog(struct net_device *dev, struct bpf_prog *prog, 2647 - struct netlink_ext_ack *extack) 2479 + static int enetc_reconfigure_xdp_cb(struct enetc_ndev_priv *priv, void *ctx) 2648 2480 { 2649 - struct enetc_ndev_priv *priv = netdev_priv(dev); 2650 - struct bpf_prog *old_prog; 2651 - bool is_up; 2481 + struct bpf_prog *old_prog, *prog = ctx; 2652 2482 int i; 2653 - 2654 - /* The buffer layout is changing, so we need to drain the old 2655 - * RX buffers and seed new ones. 2656 - */ 2657 - is_up = netif_running(dev); 2658 - if (is_up) 2659 - dev_close(dev); 2660 2483 2661 2484 old_prog = xchg(&priv->xdp_prog, prog); 2662 2485 if (old_prog) ··· 2663 2506 rx_ring->buffer_offset = ENETC_RXB_PAD; 2664 2507 } 2665 2508 2666 - if (is_up) 2667 - return dev_open(dev, extack); 2668 - 2669 2509 return 0; 2670 2510 } 2671 2511 2672 - int enetc_setup_bpf(struct net_device *dev, struct netdev_bpf *xdp) 2512 + static int enetc_setup_xdp_prog(struct net_device *ndev, struct bpf_prog *prog, 2513 + struct netlink_ext_ack *extack) 2673 2514 { 2674 - switch (xdp->command) { 2515 + struct enetc_ndev_priv *priv = netdev_priv(ndev); 2516 + bool extended; 2517 + 2518 + extended = !!(priv->active_offloads & ENETC_F_RX_TSTAMP); 2519 + 2520 + /* The buffer layout is changing, so we need to drain the old 2521 + * RX buffers and seed new ones. 2522 + */ 2523 + return enetc_reconfigure(priv, extended, enetc_reconfigure_xdp_cb, prog); 2524 + } 2525 + 2526 + int enetc_setup_bpf(struct net_device *ndev, struct netdev_bpf *bpf) 2527 + { 2528 + switch (bpf->command) { 2675 2529 case XDP_SETUP_PROG: 2676 - return enetc_setup_xdp_prog(dev, xdp->prog, xdp->extack); 2530 + return enetc_setup_xdp_prog(ndev, bpf->prog, bpf->extack); 2677 2531 default: 2678 2532 return -EINVAL; 2679 2533 } ··· 2779 2611 static int enetc_hwtstamp_set(struct net_device *ndev, struct ifreq *ifr) 2780 2612 { 2781 2613 struct enetc_ndev_priv *priv = netdev_priv(ndev); 2614 + int err, new_offloads = priv->active_offloads; 2782 2615 struct hwtstamp_config config; 2783 - int ao; 2784 2616 2785 2617 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 2786 2618 return -EFAULT; 2787 2619 2788 2620 switch (config.tx_type) { 2789 2621 case HWTSTAMP_TX_OFF: 2790 - priv->active_offloads &= ~ENETC_F_TX_TSTAMP_MASK; 2622 + new_offloads &= ~ENETC_F_TX_TSTAMP_MASK; 2791 2623 break; 2792 2624 case HWTSTAMP_TX_ON: 2793 - priv->active_offloads &= ~ENETC_F_TX_TSTAMP_MASK; 2794 - priv->active_offloads |= ENETC_F_TX_TSTAMP; 2625 + new_offloads &= ~ENETC_F_TX_TSTAMP_MASK; 2626 + new_offloads |= ENETC_F_TX_TSTAMP; 2795 2627 break; 2796 2628 case HWTSTAMP_TX_ONESTEP_SYNC: 2797 - priv->active_offloads &= ~ENETC_F_TX_TSTAMP_MASK; 2798 - priv->active_offloads |= ENETC_F_TX_ONESTEP_SYNC_TSTAMP; 2629 + new_offloads &= ~ENETC_F_TX_TSTAMP_MASK; 2630 + new_offloads |= ENETC_F_TX_ONESTEP_SYNC_TSTAMP; 2799 2631 break; 2800 2632 default: 2801 2633 return -ERANGE; 2802 2634 } 2803 2635 2804 - ao = priv->active_offloads; 2805 2636 switch (config.rx_filter) { 2806 2637 case HWTSTAMP_FILTER_NONE: 2807 - priv->active_offloads &= ~ENETC_F_RX_TSTAMP; 2638 + new_offloads &= ~ENETC_F_RX_TSTAMP; 2808 2639 break; 2809 2640 default: 2810 - priv->active_offloads |= ENETC_F_RX_TSTAMP; 2641 + new_offloads |= ENETC_F_RX_TSTAMP; 2811 2642 config.rx_filter = HWTSTAMP_FILTER_ALL; 2812 2643 } 2813 2644 2814 - if (netif_running(ndev) && ao != priv->active_offloads) { 2815 - enetc_close(ndev); 2816 - enetc_open(ndev); 2645 + if ((new_offloads ^ priv->active_offloads) & ENETC_F_RX_TSTAMP) { 2646 + bool extended = !!(new_offloads & ENETC_F_RX_TSTAMP); 2647 + 2648 + err = enetc_reconfigure(priv, extended, NULL, NULL); 2649 + if (err) 2650 + return err; 2817 2651 } 2652 + 2653 + priv->active_offloads = new_offloads; 2818 2654 2819 2655 return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ? 2820 2656 -EFAULT : 0;
+20 -1
drivers/net/ethernet/freescale/enetc/enetc.h
··· 85 85 #define ENETC_TX_RING_DEFAULT_SIZE 2048 86 86 #define ENETC_DEFAULT_TX_WORK (ENETC_TX_RING_DEFAULT_SIZE / 2) 87 87 88 + struct enetc_bdr_resource { 89 + /* Input arguments saved for teardown */ 90 + struct device *dev; /* for DMA mapping */ 91 + size_t bd_count; 92 + size_t bd_size; 93 + 94 + /* Resource proper */ 95 + void *bd_base; /* points to Rx or Tx BD ring */ 96 + dma_addr_t bd_dma_base; 97 + union { 98 + struct enetc_tx_swbd *tx_swbd; 99 + struct enetc_rx_swbd *rx_swbd; 100 + }; 101 + char *tso_headers; 102 + dma_addr_t tso_headers_dma; 103 + }; 104 + 88 105 struct enetc_bdr { 89 106 struct device *dev; /* for DMA mapping */ 90 107 struct net_device *ndev; ··· 361 344 struct enetc_bdr **xdp_tx_ring; 362 345 struct enetc_bdr *tx_ring[16]; 363 346 struct enetc_bdr *rx_ring[16]; 347 + const struct enetc_bdr_resource *tx_res; 348 + const struct enetc_bdr_resource *rx_res; 364 349 365 350 struct enetc_cls_rule *cls_rules; 366 351 ··· 415 396 void enetc_set_features(struct net_device *ndev, netdev_features_t features); 416 397 int enetc_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd); 417 398 int enetc_setup_tc_mqprio(struct net_device *ndev, void *type_data); 418 - int enetc_setup_bpf(struct net_device *dev, struct netdev_bpf *xdp); 399 + int enetc_setup_bpf(struct net_device *ndev, struct netdev_bpf *bpf); 419 400 int enetc_xdp_xmit(struct net_device *ndev, int num_frames, 420 401 struct xdp_frame **frames, u32 flags); 421 402