Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'net-macb-various-cleanups'

Théo Lebrun says:

====================
net: macb: various cleanups

Fix many oddities inside the MACB driver. They accumulated in my
work-in-progress branch while working on MACB/GEM EyeQ5 support.

Part of this series has been seen on the lkml in March then June.
See below for a semblance of a changelog.

The initial goal was to post them alongside EyeQ5 support, but that
makes for too big of a series. It'll come afterwards, with new
features (interrupt coalescing, ethtool .set_channels() and XDP mostly).

[0]: https://lore.kernel.org/lkml/20250627-macb-v2-0-ff8207d0bb77@bootlin.com/
====================

Link: https://patch.msgid.link/20251014-macb-cleanup-v1-0-31cd266e22cd@bootlin.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+151 -201
+4 -4
Documentation/devicetree/bindings/net/cdns,macb.yaml
··· 47 47 - const: cdns,macb # Generic 48 48 49 49 - enum: 50 - - atmel,sama5d29-gem # GEM XL IP (10/100) on Atmel sama5d29 SoCs 51 50 - atmel,sama5d2-gem # GEM IP (10/100) on Atmel sama5d2 SoCs 51 + - atmel,sama5d29-gem # GEM XL IP (10/100) on Atmel sama5d29 SoCs 52 52 - atmel,sama5d3-gem # Gigabit IP on Atmel sama5d3 SoCs 53 53 - atmel,sama5d4-gem # GEM IP (10/100) on Atmel sama5d4 SoCs 54 + - cdns,emac # Generic 55 + - cdns,gem # Generic 56 + - cdns,macb # Generic 54 57 - cdns,np4-macb # NP4 SoC devices 55 58 - microchip,sama7g5-emac # Microchip SAMA7G5 ethernet interface 56 59 - microchip,sama7g5-gem # Microchip SAMA7G5 gigabit ethernet interface 57 60 - raspberrypi,rp1-gem # Raspberry Pi RP1 gigabit ethernet interface 58 61 - sifive,fu540-c000-gem # SiFive FU540-C000 SoC 59 - - cdns,emac # Generic 60 - - cdns,gem # Generic 61 - - cdns,macb # Generic 62 62 63 63 - items: 64 64 - enum:
+35 -36
drivers/net/ethernet/cadence/macb.h
··· 15 15 #include <linux/phy/phy.h> 16 16 #include <linux/workqueue.h> 17 17 18 - #if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT) || defined(CONFIG_MACB_USE_HWSTAMP) 19 - #define MACB_EXT_DESC 20 - #endif 21 - 22 18 #define MACB_GREGS_NBR 16 23 19 #define MACB_GREGS_VERSION 2 24 20 #define MACB_MAX_QUEUES 8 ··· 752 756 #define MACB_MAN_C45_CODE 2 753 757 754 758 /* Capability mask bits */ 755 - #define MACB_CAPS_ISR_CLEAR_ON_WRITE 0x00000001 756 - #define MACB_CAPS_USRIO_HAS_CLKEN 0x00000002 757 - #define MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII 0x00000004 758 - #define MACB_CAPS_NO_GIGABIT_HALF 0x00000008 759 - #define MACB_CAPS_USRIO_DISABLED 0x00000010 760 - #define MACB_CAPS_JUMBO 0x00000020 761 - #define MACB_CAPS_GEM_HAS_PTP 0x00000040 762 - #define MACB_CAPS_BD_RD_PREFETCH 0x00000080 763 - #define MACB_CAPS_NEEDS_RSTONUBR 0x00000100 764 - #define MACB_CAPS_MIIONRGMII 0x00000200 765 - #define MACB_CAPS_NEED_TSUCLK 0x00000400 766 - #define MACB_CAPS_QUEUE_DISABLE 0x00000800 767 - #define MACB_CAPS_QBV 0x00001000 768 - #define MACB_CAPS_PCS 0x01000000 769 - #define MACB_CAPS_HIGH_SPEED 0x02000000 770 - #define MACB_CAPS_CLK_HW_CHG 0x04000000 771 - #define MACB_CAPS_MACB_IS_EMAC 0x08000000 772 - #define MACB_CAPS_FIFO_MODE 0x10000000 773 - #define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000 774 - #define MACB_CAPS_SG_DISABLED 0x40000000 775 - #define MACB_CAPS_MACB_IS_GEM 0x80000000 759 + #define MACB_CAPS_ISR_CLEAR_ON_WRITE BIT(0) 760 + #define MACB_CAPS_USRIO_HAS_CLKEN BIT(1) 761 + #define MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII BIT(2) 762 + #define MACB_CAPS_NO_GIGABIT_HALF BIT(3) 763 + #define MACB_CAPS_USRIO_DISABLED BIT(4) 764 + #define MACB_CAPS_JUMBO BIT(5) 765 + #define MACB_CAPS_GEM_HAS_PTP BIT(6) 766 + #define MACB_CAPS_BD_RD_PREFETCH BIT(7) 767 + #define MACB_CAPS_NEEDS_RSTONUBR BIT(8) 768 + #define MACB_CAPS_MIIONRGMII BIT(9) 769 + #define MACB_CAPS_NEED_TSUCLK BIT(10) 770 + #define MACB_CAPS_QUEUE_DISABLE BIT(11) 771 + #define MACB_CAPS_QBV BIT(12) 772 + #define MACB_CAPS_PCS BIT(13) 773 + #define MACB_CAPS_HIGH_SPEED BIT(14) 774 + #define MACB_CAPS_CLK_HW_CHG BIT(15) 775 + #define MACB_CAPS_MACB_IS_EMAC BIT(16) 776 + #define MACB_CAPS_FIFO_MODE BIT(17) 777 + #define MACB_CAPS_GIGABIT_MODE_AVAILABLE BIT(18) 778 + #define MACB_CAPS_SG_DISABLED BIT(19) 779 + #define MACB_CAPS_MACB_IS_GEM BIT(20) 780 + #define MACB_CAPS_DMA_64B BIT(21) 781 + #define MACB_CAPS_DMA_PTP BIT(22) 776 782 777 783 /* LSO settings */ 778 784 #define MACB_LSO_UFO_ENABLE 0x01 ··· 851 853 u32 ctrl; 852 854 }; 853 855 854 - #ifdef MACB_EXT_DESC 855 - #define HW_DMA_CAP_32B 0 856 - #define HW_DMA_CAP_64B (1 << 0) 857 - #define HW_DMA_CAP_PTP (1 << 1) 858 - #define HW_DMA_CAP_64B_PTP (HW_DMA_CAP_64B | HW_DMA_CAP_PTP) 859 - 860 856 struct macb_dma_desc_64 { 861 857 u32 addrh; 862 858 u32 resvd; ··· 860 868 u32 ts_1; 861 869 u32 ts_2; 862 870 }; 863 - #endif 864 871 865 872 /* DMA descriptor bitfields */ 866 873 #define MACB_RX_USED_OFFSET 0 ··· 1290 1299 unsigned int tx_ring_size; 1291 1300 1292 1301 unsigned int num_queues; 1293 - unsigned int queue_mask; 1294 1302 struct macb_queue queues[MACB_MAX_QUEUES]; 1295 1303 1296 1304 spinlock_t lock; ··· 1339 1349 1340 1350 struct phy *sgmii_phy; /* for ZynqMP SGMII mode */ 1341 1351 1342 - #ifdef MACB_EXT_DESC 1343 - uint8_t hw_dma_cap; 1344 - #endif 1345 1352 spinlock_t tsu_clk_lock; /* gem tsu clock locking */ 1346 1353 unsigned int tsu_rate; 1347 1354 struct ptp_clock *ptp_clock; ··· 1428 1441 { 1429 1442 return DIV_ROUND_UP(GENMASK(GEM_ON_TIME_SIZE - 1, 0) * 1430 1443 ENST_TIME_GRANULARITY_NS * 1000, (speed_mbps)); 1444 + } 1445 + 1446 + static inline bool macb_dma64(struct macb *bp) 1447 + { 1448 + return IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) && 1449 + bp->caps & MACB_CAPS_DMA_64B; 1450 + } 1451 + 1452 + static inline bool macb_dma_ptp(struct macb *bp) 1453 + { 1454 + return IS_ENABLED(CONFIG_MACB_USE_HWSTAMP) && 1455 + bp->caps & MACB_CAPS_DMA_PTP; 1431 1456 } 1432 1457 1433 1458 /**
+103 -154
drivers/net/ethernet/cadence/macb_main.c
··· 6 6 */ 7 7 8 8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 - #include <linux/clk.h> 9 + #include <linux/circ_buf.h> 10 10 #include <linux/clk-provider.h> 11 + #include <linux/clk.h> 11 12 #include <linux/crc32.h> 13 + #include <linux/dma-mapping.h> 14 + #include <linux/etherdevice.h> 15 + #include <linux/firmware/xlnx-zynqmp.h> 16 + #include <linux/inetdevice.h> 17 + #include <linux/inetdevice.h> 18 + #include <linux/init.h> 19 + #include <linux/interrupt.h> 20 + #include <linux/io.h> 21 + #include <linux/iopoll.h> 22 + #include <linux/ip.h> 23 + #include <linux/kernel.h> 12 24 #include <linux/module.h> 13 25 #include <linux/moduleparam.h> 14 - #include <linux/kernel.h> 15 - #include <linux/types.h> 16 - #include <linux/circ_buf.h> 17 - #include <linux/slab.h> 18 - #include <linux/init.h> 19 - #include <linux/io.h> 20 - #include <linux/interrupt.h> 21 26 #include <linux/netdevice.h> 22 - #include <linux/etherdevice.h> 23 - #include <linux/dma-mapping.h> 24 - #include <linux/platform_device.h> 25 - #include <linux/phylink.h> 26 27 #include <linux/of.h> 27 28 #include <linux/of_mdio.h> 28 29 #include <linux/of_net.h> 29 - #include <linux/ip.h> 30 - #include <linux/udp.h> 31 - #include <linux/tcp.h> 32 - #include <linux/iopoll.h> 33 30 #include <linux/phy/phy.h> 31 + #include <linux/phylink.h> 32 + #include <linux/platform_device.h> 34 33 #include <linux/pm_runtime.h> 35 34 #include <linux/ptp_classify.h> 36 35 #include <linux/reset.h> 37 - #include <linux/firmware/xlnx-zynqmp.h> 38 - #include <linux/inetdevice.h> 36 + #include <linux/slab.h> 37 + #include <linux/tcp.h> 38 + #include <linux/types.h> 39 + #include <linux/udp.h> 39 40 #include <net/pkt_sched.h> 40 41 #include "macb.h" 41 42 ··· 122 121 */ 123 122 static unsigned int macb_dma_desc_get_size(struct macb *bp) 124 123 { 125 - #ifdef MACB_EXT_DESC 126 - unsigned int desc_size; 124 + unsigned int desc_size = sizeof(struct macb_dma_desc); 127 125 128 - switch (bp->hw_dma_cap) { 129 - case HW_DMA_CAP_64B: 130 - desc_size = sizeof(struct macb_dma_desc) 131 - + sizeof(struct macb_dma_desc_64); 132 - break; 133 - case HW_DMA_CAP_PTP: 134 - desc_size = sizeof(struct macb_dma_desc) 135 - + sizeof(struct macb_dma_desc_ptp); 136 - break; 137 - case HW_DMA_CAP_64B_PTP: 138 - desc_size = sizeof(struct macb_dma_desc) 139 - + sizeof(struct macb_dma_desc_64) 140 - + sizeof(struct macb_dma_desc_ptp); 141 - break; 142 - default: 143 - desc_size = sizeof(struct macb_dma_desc); 144 - } 126 + if (macb_dma64(bp)) 127 + desc_size += sizeof(struct macb_dma_desc_64); 128 + if (macb_dma_ptp(bp)) 129 + desc_size += sizeof(struct macb_dma_desc_ptp); 130 + 145 131 return desc_size; 146 - #endif 147 - return sizeof(struct macb_dma_desc); 148 132 } 149 133 150 134 static unsigned int macb_adj_dma_desc_idx(struct macb *bp, unsigned int desc_idx) 151 135 { 152 - #ifdef MACB_EXT_DESC 153 - switch (bp->hw_dma_cap) { 154 - case HW_DMA_CAP_64B: 155 - case HW_DMA_CAP_PTP: 156 - desc_idx <<= 1; 157 - break; 158 - case HW_DMA_CAP_64B_PTP: 159 - desc_idx *= 3; 160 - break; 161 - default: 162 - break; 163 - } 164 - #endif 165 - return desc_idx; 136 + return desc_idx * (1 + macb_dma64(bp) + macb_dma_ptp(bp)); 166 137 } 167 138 168 - #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 169 139 static struct macb_dma_desc_64 *macb_64b_desc(struct macb *bp, struct macb_dma_desc *desc) 170 140 { 171 141 return (struct macb_dma_desc_64 *)((void *)desc 172 142 + sizeof(struct macb_dma_desc)); 173 143 } 174 - #endif 175 144 176 145 /* Ring buffer accessors */ 177 146 static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index) ··· 463 492 struct macb_queue *queue; 464 493 unsigned int q; 465 494 466 - #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 467 495 /* Single register for all queues' high 32 bits. */ 468 - if (bp->hw_dma_cap & HW_DMA_CAP_64B) { 496 + if (macb_dma64(bp)) { 469 497 macb_writel(bp, RBQPH, 470 498 upper_32_bits(bp->queues[0].rx_ring_dma)); 471 499 macb_writel(bp, TBQPH, 472 500 upper_32_bits(bp->queues[0].tx_ring_dma)); 473 501 } 474 - #endif 475 502 476 503 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { 477 504 queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma)); ··· 994 1025 995 1026 static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_t addr) 996 1027 { 997 - #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 998 - struct macb_dma_desc_64 *desc_64; 1028 + if (macb_dma64(bp)) { 1029 + struct macb_dma_desc_64 *desc_64; 999 1030 1000 - if (bp->hw_dma_cap & HW_DMA_CAP_64B) { 1001 1031 desc_64 = macb_64b_desc(bp, desc); 1002 1032 desc_64->addrh = upper_32_bits(addr); 1003 1033 /* The low bits of RX address contain the RX_USED bit, clearing ··· 1005 1037 */ 1006 1038 dma_wmb(); 1007 1039 } 1008 - #endif 1040 + 1009 1041 desc->addr = lower_32_bits(addr); 1010 1042 } 1011 1043 1012 1044 static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc) 1013 1045 { 1014 1046 dma_addr_t addr = 0; 1015 - #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 1016 - struct macb_dma_desc_64 *desc_64; 1017 1047 1018 - if (bp->hw_dma_cap & HW_DMA_CAP_64B) { 1048 + if (macb_dma64(bp)) { 1049 + struct macb_dma_desc_64 *desc_64; 1050 + 1019 1051 desc_64 = macb_64b_desc(bp, desc); 1020 1052 addr = ((u64)(desc_64->addrh) << 32); 1021 1053 } 1022 - #endif 1023 1054 addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr)); 1024 - #ifdef CONFIG_MACB_USE_HWSTAMP 1025 - if (bp->hw_dma_cap & HW_DMA_CAP_PTP) 1055 + if (macb_dma_ptp(bp)) 1026 1056 addr &= ~GEM_BIT(DMA_RXVALID); 1027 - #endif 1028 1057 return addr; 1029 1058 } 1030 1059 ··· 1989 2024 struct sk_buff *skb, 1990 2025 unsigned int hdrlen) 1991 2026 { 1992 - dma_addr_t mapping; 1993 - unsigned int len, entry, i, tx_head = queue->tx_head; 2027 + unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags; 2028 + unsigned int len, i, tx_head = queue->tx_head; 2029 + u32 ctrl, lso_ctrl = 0, seq_ctrl = 0; 2030 + unsigned int eof = 1, mss_mfs = 0; 1994 2031 struct macb_tx_skb *tx_skb = NULL; 1995 2032 struct macb_dma_desc *desc; 1996 - unsigned int offset, size, count = 0; 1997 - unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags; 1998 - unsigned int eof = 1, mss_mfs = 0; 1999 - u32 ctrl, lso_ctrl = 0, seq_ctrl = 0; 2033 + unsigned int offset, size; 2034 + dma_addr_t mapping; 2000 2035 2001 2036 /* LSO */ 2002 2037 if (skb_shinfo(skb)->gso_size != 0) { ··· 2016 2051 2017 2052 offset = 0; 2018 2053 while (len) { 2019 - entry = macb_tx_ring_wrap(bp, tx_head); 2020 - tx_skb = &queue->tx_skb[entry]; 2054 + tx_skb = macb_tx_skb(queue, tx_head); 2021 2055 2022 2056 mapping = dma_map_single(&bp->pdev->dev, 2023 2057 skb->data + offset, ··· 2032 2068 2033 2069 len -= size; 2034 2070 offset += size; 2035 - count++; 2036 2071 tx_head++; 2037 2072 2038 - size = min(len, bp->max_tx_length); 2073 + size = umin(len, bp->max_tx_length); 2039 2074 } 2040 2075 2041 2076 /* Then, map paged data from fragments */ ··· 2044 2081 len = skb_frag_size(frag); 2045 2082 offset = 0; 2046 2083 while (len) { 2047 - size = min(len, bp->max_tx_length); 2048 - entry = macb_tx_ring_wrap(bp, tx_head); 2049 - tx_skb = &queue->tx_skb[entry]; 2084 + size = umin(len, bp->max_tx_length); 2085 + tx_skb = macb_tx_skb(queue, tx_head); 2050 2086 2051 2087 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 2052 2088 offset, size, DMA_TO_DEVICE); ··· 2060 2098 2061 2099 len -= size; 2062 2100 offset += size; 2063 - count++; 2064 2101 tx_head++; 2065 2102 } 2066 2103 } ··· 2081 2120 * to set the end of TX queue 2082 2121 */ 2083 2122 i = tx_head; 2084 - entry = macb_tx_ring_wrap(bp, i); 2085 2123 ctrl = MACB_BIT(TX_USED); 2086 - desc = macb_tx_desc(queue, entry); 2124 + desc = macb_tx_desc(queue, i); 2087 2125 desc->ctrl = ctrl; 2088 2126 2089 2127 if (lso_ctrl) { ··· 2102 2142 2103 2143 do { 2104 2144 i--; 2105 - entry = macb_tx_ring_wrap(bp, i); 2106 - tx_skb = &queue->tx_skb[entry]; 2107 - desc = macb_tx_desc(queue, entry); 2145 + tx_skb = macb_tx_skb(queue, i); 2146 + desc = macb_tx_desc(queue, i); 2108 2147 2109 2148 ctrl = (u32)tx_skb->size; 2110 2149 if (eof) { 2111 2150 ctrl |= MACB_BIT(TX_LAST); 2112 2151 eof = 0; 2113 2152 } 2114 - if (unlikely(entry == (bp->tx_ring_size - 1))) 2153 + if (unlikely(macb_tx_ring_wrap(bp, i) == bp->tx_ring_size - 1)) 2115 2154 ctrl |= MACB_BIT(TX_WRAP); 2116 2155 2117 2156 /* First descriptor is header descriptor */ ··· 2138 2179 2139 2180 queue->tx_head = tx_head; 2140 2181 2141 - return count; 2182 + return 0; 2142 2183 2143 2184 dma_error: 2144 2185 netdev_err(bp->dev, "TX DMA map failed\n"); ··· 2149 2190 macb_tx_unmap(bp, tx_skb, 0); 2150 2191 } 2151 2192 2152 - return 0; 2193 + return -ENOMEM; 2153 2194 } 2154 2195 2155 2196 static netdev_features_t macb_features_check(struct sk_buff *skb, ··· 2277 2318 return ret; 2278 2319 } 2279 2320 2280 - #ifdef CONFIG_MACB_USE_HWSTAMP 2281 - if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && 2282 - (bp->hw_dma_cap & HW_DMA_CAP_PTP)) 2321 + if (macb_dma_ptp(bp) && 2322 + (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) 2283 2323 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 2284 - #endif 2285 2324 2286 2325 is_lso = (skb_shinfo(skb)->gso_size != 0); 2287 2326 ··· 2296 2339 return NETDEV_TX_BUSY; 2297 2340 } 2298 2341 } else 2299 - hdrlen = min(skb_headlen(skb), bp->max_tx_length); 2342 + hdrlen = umin(skb_headlen(skb), bp->max_tx_length); 2300 2343 2301 2344 #if defined(DEBUG) && defined(VERBOSE_DEBUG) 2302 2345 netdev_vdbg(bp->dev, ··· 2335 2378 } 2336 2379 2337 2380 /* Map socket buffer for DMA transfer */ 2338 - if (!macb_tx_map(bp, queue, skb, hdrlen)) { 2381 + if (macb_tx_map(bp, queue, skb, hdrlen)) { 2339 2382 dev_kfree_skb_any(skb); 2340 2383 goto unlock; 2341 2384 } ··· 2756 2799 dmacfg &= ~GEM_BIT(TXCOEN); 2757 2800 2758 2801 dmacfg &= ~GEM_BIT(ADDR64); 2759 - #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 2760 - if (bp->hw_dma_cap & HW_DMA_CAP_64B) 2802 + if (macb_dma64(bp)) 2761 2803 dmacfg |= GEM_BIT(ADDR64); 2762 - #endif 2763 - #ifdef CONFIG_MACB_USE_HWSTAMP 2764 - if (bp->hw_dma_cap & HW_DMA_CAP_PTP) 2804 + if (macb_dma_ptp(bp)) 2765 2805 dmacfg |= GEM_BIT(RXEXT) | GEM_BIT(TXEXT); 2766 - #endif 2767 2806 netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n", 2768 2807 dmacfg); 2769 2808 gem_writel(bp, DMACFG, dmacfg); ··· 3535 3582 { 3536 3583 struct macb *bp = netdev_priv(dev); 3537 3584 3538 - if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0) { 3585 + if (!macb_dma_ptp(bp)) { 3539 3586 ethtool_op_get_ts_info(dev, info); 3540 3587 return 0; 3541 3588 } ··· 4061 4108 struct macb *bp = netdev_priv(ndev); 4062 4109 struct ethtool_link_ksettings kset; 4063 4110 struct macb_queue *queue; 4111 + u32 queue_mask; 4112 + u8 queue_id; 4064 4113 size_t i; 4065 4114 int err; 4066 4115 ··· 4114 4159 goto cleanup; 4115 4160 } 4116 4161 4117 - /* gate_mask must not select queues outside the valid queue_mask */ 4118 - if (entry->gate_mask & ~bp->queue_mask) { 4162 + /* gate_mask must not select queues outside the valid queues */ 4163 + queue_id = order_base_2(entry->gate_mask); 4164 + if (queue_id >= bp->num_queues) { 4119 4165 netdev_err(ndev, "Entry %zu: gate_mask 0x%x exceeds queue range (max_queues=%d)\n", 4120 4166 i, entry->gate_mask, bp->num_queues); 4121 4167 err = -EINVAL; ··· 4150 4194 goto cleanup; 4151 4195 } 4152 4196 4153 - enst_queue[i].queue_id = order_base_2(entry->gate_mask); 4197 + enst_queue[i].queue_id = queue_id; 4154 4198 enst_queue[i].start_time_mask = 4155 4199 (start_time_sec << GEM_START_TIME_SEC_OFFSET) | 4156 4200 start_time_nsec; ··· 4178 4222 /* All validations passed - proceed with hardware configuration */ 4179 4223 scoped_guard(spinlock_irqsave, &bp->lock) { 4180 4224 /* Disable ENST queues if running before configuring */ 4225 + queue_mask = BIT_U32(bp->num_queues) - 1; 4181 4226 gem_writel(bp, ENST_CONTROL, 4182 - bp->queue_mask << GEM_ENST_DISABLE_QUEUE_OFFSET); 4227 + queue_mask << GEM_ENST_DISABLE_QUEUE_OFFSET); 4183 4228 4184 4229 for (i = 0; i < conf->num_entries; i++) { 4185 4230 queue = &bp->queues[enst_queue[i].queue_id]; ··· 4209 4252 { 4210 4253 struct macb *bp = netdev_priv(ndev); 4211 4254 struct macb_queue *queue; 4212 - u32 enst_disable_mask; 4255 + u32 queue_mask; 4213 4256 unsigned int q; 4214 4257 4215 4258 netdev_reset_tc(ndev); 4216 - enst_disable_mask = bp->queue_mask << GEM_ENST_DISABLE_QUEUE_OFFSET; 4259 + queue_mask = BIT_U32(bp->num_queues) - 1; 4217 4260 4218 4261 scoped_guard(spinlock_irqsave, &bp->lock) { 4219 4262 /* Single disable command for all queues */ 4220 - gem_writel(bp, ENST_CONTROL, enst_disable_mask); 4263 + gem_writel(bp, ENST_CONTROL, 4264 + queue_mask << GEM_ENST_DISABLE_QUEUE_OFFSET); 4221 4265 4222 4266 /* Clear all queue ENST registers in batch */ 4223 4267 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { ··· 4328 4370 "GEM doesn't support hardware ptp.\n"); 4329 4371 else { 4330 4372 #ifdef CONFIG_MACB_USE_HWSTAMP 4331 - bp->hw_dma_cap |= HW_DMA_CAP_PTP; 4373 + bp->caps |= MACB_CAPS_DMA_PTP; 4332 4374 bp->ptp_info = &gem_ptp_info; 4333 4375 #endif 4334 4376 } ··· 4341 4383 dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps); 4342 4384 } 4343 4385 4344 - static void macb_probe_queues(void __iomem *mem, 4345 - bool native_io, 4346 - unsigned int *queue_mask, 4347 - unsigned int *num_queues) 4386 + static int macb_probe_queues(struct device *dev, void __iomem *mem, bool native_io) 4348 4387 { 4349 - *queue_mask = 0x1; 4350 - *num_queues = 1; 4388 + /* BIT(0) is never set but queue 0 always exists. */ 4389 + unsigned int queue_mask = 0x1; 4351 4390 4352 - /* is it macb or gem ? 4353 - * 4354 - * We need to read directly from the hardware here because 4355 - * we are early in the probe process and don't have the 4356 - * MACB_CAPS_MACB_IS_GEM flag positioned 4357 - */ 4358 - if (!hw_is_gem(mem, native_io)) 4359 - return; 4391 + /* Use hw_is_gem() as MACB_CAPS_MACB_IS_GEM is not yet positioned. */ 4392 + if (hw_is_gem(mem, native_io)) { 4393 + if (native_io) 4394 + queue_mask |= __raw_readl(mem + GEM_DCFG6) & 0xFF; 4395 + else 4396 + queue_mask |= readl_relaxed(mem + GEM_DCFG6) & 0xFF; 4360 4397 4361 - /* bit 0 is never set but queue 0 always exists */ 4362 - *queue_mask |= readl_relaxed(mem + GEM_DCFG6) & 0xff; 4363 - *num_queues = hweight32(*queue_mask); 4398 + if (fls(queue_mask) != ffz(queue_mask)) { 4399 + dev_err(dev, "queue mask %#x has a hole\n", queue_mask); 4400 + return -EINVAL; 4401 + } 4402 + } 4403 + 4404 + return hweight32(queue_mask); 4364 4405 } 4365 4406 4366 4407 static void macb_clks_disable(struct clk *pclk, struct clk *hclk, struct clk *tx_clk, ··· 4477 4520 * register mapping but we don't want to test the queue index then 4478 4521 * compute the corresponding register offset at run time. 4479 4522 */ 4480 - for (hw_q = 0, q = 0; hw_q < MACB_MAX_QUEUES; ++hw_q) { 4481 - if (!(bp->queue_mask & (1 << hw_q))) 4482 - continue; 4483 - 4523 + for (hw_q = 0, q = 0; hw_q < bp->num_queues; ++hw_q) { 4484 4524 queue = &bp->queues[q]; 4485 4525 queue->bp = bp; 4486 4526 spin_lock_init(&queue->tx_ptr_lock); ··· 4568 4614 * each 4-tuple define requires 1 T2 screener reg + 3 compare regs 4569 4615 */ 4570 4616 reg = gem_readl(bp, DCFG8); 4571 - bp->max_tuples = min((GEM_BFEXT(SCR2CMP, reg) / 3), 4572 - GEM_BFEXT(T2SCR, reg)); 4617 + bp->max_tuples = umin((GEM_BFEXT(SCR2CMP, reg) / 3), 4618 + GEM_BFEXT(T2SCR, reg)); 4573 4619 INIT_LIST_HEAD(&bp->rx_fs_list.list); 4574 4620 if (bp->max_tuples > 0) { 4575 4621 /* also needs one ethtype match to check IPv4 */ ··· 5378 5424 static int macb_probe(struct platform_device *pdev) 5379 5425 { 5380 5426 const struct macb_config *macb_config = &default_gem_config; 5381 - int (*clk_init)(struct platform_device *, struct clk **, 5382 - struct clk **, struct clk **, struct clk **, 5383 - struct clk **) = macb_config->clk_init; 5384 - int (*init)(struct platform_device *) = macb_config->init; 5385 5427 struct device_node *np = pdev->dev.of_node; 5386 5428 struct clk *pclk, *hclk = NULL, *tx_clk = NULL, *rx_clk = NULL; 5387 5429 struct clk *tsu_clk = NULL; 5388 - unsigned int queue_mask, num_queues; 5389 - bool native_io; 5390 5430 phy_interface_t interface; 5391 5431 struct net_device *dev; 5392 5432 struct resource *regs; 5393 5433 u32 wtrmrk_rst_val; 5394 5434 void __iomem *mem; 5395 5435 struct macb *bp; 5436 + int num_queues; 5437 + bool native_io; 5396 5438 int err, val; 5397 5439 5398 5440 mem = devm_platform_get_and_ioremap_resource(pdev, 0, &regs); ··· 5399 5449 const struct of_device_id *match; 5400 5450 5401 5451 match = of_match_node(macb_dt_ids, np); 5402 - if (match && match->data) { 5452 + if (match && match->data) 5403 5453 macb_config = match->data; 5404 - clk_init = macb_config->clk_init; 5405 - init = macb_config->init; 5406 - } 5407 5454 } 5408 5455 5409 - err = clk_init(pdev, &pclk, &hclk, &tx_clk, &rx_clk, &tsu_clk); 5456 + err = macb_config->clk_init(pdev, &pclk, &hclk, &tx_clk, &rx_clk, &tsu_clk); 5410 5457 if (err) 5411 5458 return err; 5412 5459 ··· 5414 5467 pm_runtime_enable(&pdev->dev); 5415 5468 native_io = hw_is_native_io(mem); 5416 5469 5417 - macb_probe_queues(mem, native_io, &queue_mask, &num_queues); 5470 + num_queues = macb_probe_queues(&pdev->dev, mem, native_io); 5471 + if (num_queues < 0) { 5472 + err = num_queues; 5473 + goto err_disable_clocks; 5474 + } 5475 + 5418 5476 dev = alloc_etherdev_mq(sizeof(*bp), num_queues); 5419 5477 if (!dev) { 5420 5478 err = -ENOMEM; ··· 5443 5491 bp->macb_reg_writel = hw_writel; 5444 5492 } 5445 5493 bp->num_queues = num_queues; 5446 - bp->queue_mask = queue_mask; 5447 - if (macb_config) 5448 - bp->dma_burst_length = macb_config->dma_burst_length; 5494 + bp->dma_burst_length = macb_config->dma_burst_length; 5449 5495 bp->pclk = pclk; 5450 5496 bp->hclk = hclk; 5451 5497 bp->tx_clk = tx_clk; 5452 5498 bp->rx_clk = rx_clk; 5453 5499 bp->tsu_clk = tsu_clk; 5454 - if (macb_config) 5455 - bp->jumbo_max_len = macb_config->jumbo_max_len; 5500 + bp->jumbo_max_len = macb_config->jumbo_max_len; 5456 5501 5457 5502 if (!hw_is_gem(bp->regs, bp->native_io)) 5458 5503 bp->max_tx_length = MACB_MAX_TX_LEN; ··· 5495 5546 dev_err(&pdev->dev, "failed to set DMA mask\n"); 5496 5547 goto err_out_free_netdev; 5497 5548 } 5498 - bp->hw_dma_cap |= HW_DMA_CAP_64B; 5549 + bp->caps |= MACB_CAPS_DMA_64B; 5499 5550 } 5500 5551 #endif 5501 5552 platform_set_drvdata(pdev, dev); ··· 5543 5594 bp->phy_interface = interface; 5544 5595 5545 5596 /* IP specific init */ 5546 - err = init(pdev); 5597 + err = macb_config->init(pdev); 5547 5598 if (err) 5548 5599 goto err_out_free_netdev; 5549 5600
+9 -7
drivers/net/ethernet/cadence/macb_ptp.c
··· 28 28 static struct macb_dma_desc_ptp *macb_ptp_desc(struct macb *bp, 29 29 struct macb_dma_desc *desc) 30 30 { 31 - if (bp->hw_dma_cap == HW_DMA_CAP_PTP) 32 - return (struct macb_dma_desc_ptp *) 33 - ((u8 *)desc + sizeof(struct macb_dma_desc)); 34 - if (bp->hw_dma_cap == HW_DMA_CAP_64B_PTP) 31 + if (!macb_dma_ptp(bp)) 32 + return NULL; 33 + 34 + if (macb_dma64(bp)) 35 35 return (struct macb_dma_desc_ptp *) 36 36 ((u8 *)desc + sizeof(struct macb_dma_desc) 37 37 + sizeof(struct macb_dma_desc_64)); 38 - return NULL; 38 + else 39 + return (struct macb_dma_desc_ptp *) 40 + ((u8 *)desc + sizeof(struct macb_dma_desc)); 39 41 } 40 42 41 43 static int gem_tsu_get_time(struct ptp_clock_info *ptp, struct timespec64 *ts, ··· 382 380 struct macb *bp = netdev_priv(dev); 383 381 384 382 *tstamp_config = bp->tstamp_config; 385 - if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0) 383 + if (!macb_dma_ptp(bp)) 386 384 return -EOPNOTSUPP; 387 385 388 386 return 0; ··· 409 407 struct macb *bp = netdev_priv(dev); 410 408 u32 regval; 411 409 412 - if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0) 410 + if (!macb_dma_ptp(bp)) 413 411 return -EOPNOTSUPP; 414 412 415 413 switch (tstamp_config->tx_type) {