Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

tsnep: Add TSN endpoint Ethernet MAC driver

The TSN endpoint Ethernet MAC is a FPGA based network device for
real-time communication.

It is integrated as Ethernet controller with ethtool and PTP support.
For real-time communcation TC_SETUP_QDISC_TAPRIO is supported.

Signed-off-by: Gerhard Engleder <gerhard@engleder-embedded.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Gerhard Engleder and committed by
David S. Miller
403f69bb 603094b2

+3511
+1
drivers/net/ethernet/Kconfig
··· 73 73 source "drivers/net/ethernet/dec/Kconfig" 74 74 source "drivers/net/ethernet/dlink/Kconfig" 75 75 source "drivers/net/ethernet/emulex/Kconfig" 76 + source "drivers/net/ethernet/engleder/Kconfig" 76 77 source "drivers/net/ethernet/ezchip/Kconfig" 77 78 source "drivers/net/ethernet/faraday/Kconfig" 78 79 source "drivers/net/ethernet/freescale/Kconfig"
+1
drivers/net/ethernet/Makefile
··· 36 36 obj-$(CONFIG_NET_VENDOR_DEC) += dec/ 37 37 obj-$(CONFIG_NET_VENDOR_DLINK) += dlink/ 38 38 obj-$(CONFIG_NET_VENDOR_EMULEX) += emulex/ 39 + obj-$(CONFIG_NET_VENDOR_ENGLEDER) += engleder/ 39 40 obj-$(CONFIG_NET_VENDOR_EZCHIP) += ezchip/ 40 41 obj-$(CONFIG_NET_VENDOR_FARADAY) += faraday/ 41 42 obj-$(CONFIG_NET_VENDOR_FREESCALE) += freescale/
+38
drivers/net/ethernet/engleder/Kconfig
··· 1 + # SPDX-License-Identifier: GPL-2.0 2 + # 3 + # Engleder network device configuration 4 + # 5 + 6 + config NET_VENDOR_ENGLEDER 7 + bool "Engleder devices" 8 + default y 9 + help 10 + If you have a network (Ethernet) card belonging to this class, say Y. 11 + 12 + Note that the answer to this question doesn't directly affect the 13 + kernel: saying N will just cause the configurator to skip all 14 + the questions about Engleder devices. If you say Y, you will be asked 15 + for your specific card in the following questions. 16 + 17 + if NET_VENDOR_ENGLEDER 18 + 19 + config TSNEP 20 + tristate "TSN endpoint support" 21 + depends on PTP_1588_CLOCK_OPTIONAL 22 + select PHYLIB 23 + help 24 + Support for the Engleder TSN endpoint Ethernet MAC IP Core. 25 + 26 + To compile this driver as a module, choose M here. The module will be 27 + called tsnep. 28 + 29 + config TSNEP_SELFTESTS 30 + bool "TSN endpoint self test support" 31 + default n 32 + depends on TSNEP 33 + help 34 + This enables self test support within the TSN endpoint driver. 35 + 36 + If unsure, say N. 37 + 38 + endif # NET_VENDOR_ENGLEDER
+10
drivers/net/ethernet/engleder/Makefile
··· 1 + # SPDX-License-Identifier: GPL-2.0 2 + # 3 + # Makefile for the Engleder Ethernet drivers 4 + # 5 + 6 + obj-$(CONFIG_TSNEP) += tsnep.o 7 + 8 + tsnep-objs := tsnep_main.o tsnep_ethtool.o tsnep_ptp.o tsnep_tc.o \ 9 + $(tsnep-y) 10 + tsnep-$(CONFIG_TSNEP_SELFTESTS) += tsnep_selftests.o
+190
drivers/net/ethernet/engleder/tsnep.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */ 3 + 4 + #ifndef _TSNEP_H 5 + #define _TSNEP_H 6 + 7 + #include "tsnep_hw.h" 8 + 9 + #include <linux/platform_device.h> 10 + #include <linux/dma-mapping.h> 11 + #include <linux/etherdevice.h> 12 + #include <linux/phy.h> 13 + #include <linux/ethtool.h> 14 + #include <linux/net_tstamp.h> 15 + #include <linux/ptp_clock_kernel.h> 16 + #include <linux/miscdevice.h> 17 + 18 + #define TSNEP "tsnep" 19 + 20 + #define TSNEP_RING_SIZE 256 21 + #define TSNEP_RING_ENTRIES_PER_PAGE (PAGE_SIZE / TSNEP_DESC_SIZE) 22 + #define TSNEP_RING_PAGE_COUNT (TSNEP_RING_SIZE / TSNEP_RING_ENTRIES_PER_PAGE) 23 + 24 + #define TSNEP_QUEUES 1 25 + 26 + struct tsnep_gcl { 27 + void __iomem *addr; 28 + 29 + u64 base_time; 30 + u64 cycle_time; 31 + u64 cycle_time_extension; 32 + 33 + struct tsnep_gcl_operation operation[TSNEP_GCL_COUNT]; 34 + int count; 35 + 36 + u64 change_limit; 37 + 38 + u64 start_time; 39 + bool change; 40 + }; 41 + 42 + struct tsnep_tx_entry { 43 + struct tsnep_tx_desc *desc; 44 + struct tsnep_tx_desc_wb *desc_wb; 45 + dma_addr_t desc_dma; 46 + bool owner_user_flag; 47 + 48 + u32 properties; 49 + 50 + struct sk_buff *skb; 51 + size_t len; 52 + DEFINE_DMA_UNMAP_ADDR(dma); 53 + }; 54 + 55 + struct tsnep_tx { 56 + struct tsnep_adapter *adapter; 57 + void __iomem *addr; 58 + 59 + void *page[TSNEP_RING_PAGE_COUNT]; 60 + dma_addr_t page_dma[TSNEP_RING_PAGE_COUNT]; 61 + 62 + /* TX ring lock */ 63 + spinlock_t lock; 64 + struct tsnep_tx_entry entry[TSNEP_RING_SIZE]; 65 + int write; 66 + int read; 67 + u32 owner_counter; 68 + int increment_owner_counter; 69 + 70 + u32 packets; 71 + u32 bytes; 72 + u32 dropped; 73 + }; 74 + 75 + struct tsnep_rx_entry { 76 + struct tsnep_rx_desc *desc; 77 + struct tsnep_rx_desc_wb *desc_wb; 78 + dma_addr_t desc_dma; 79 + 80 + u32 properties; 81 + 82 + struct sk_buff *skb; 83 + size_t len; 84 + DEFINE_DMA_UNMAP_ADDR(dma); 85 + }; 86 + 87 + struct tsnep_rx { 88 + struct tsnep_adapter *adapter; 89 + void __iomem *addr; 90 + 91 + void *page[TSNEP_RING_PAGE_COUNT]; 92 + dma_addr_t page_dma[TSNEP_RING_PAGE_COUNT]; 93 + 94 + struct tsnep_rx_entry entry[TSNEP_RING_SIZE]; 95 + int read; 96 + u32 owner_counter; 97 + int increment_owner_counter; 98 + 99 + u32 packets; 100 + u32 bytes; 101 + u32 dropped; 102 + u32 multicast; 103 + }; 104 + 105 + struct tsnep_queue { 106 + struct tsnep_adapter *adapter; 107 + 108 + struct tsnep_tx *tx; 109 + struct tsnep_rx *rx; 110 + 111 + struct napi_struct napi; 112 + 113 + u32 irq_mask; 114 + }; 115 + 116 + struct tsnep_adapter { 117 + struct net_device *netdev; 118 + u8 mac_address[ETH_ALEN]; 119 + struct mii_bus *mdiobus; 120 + bool suppress_preamble; 121 + phy_interface_t phy_mode; 122 + struct phy_device *phydev; 123 + int msg_enable; 124 + 125 + struct platform_device *pdev; 126 + struct device *dmadev; 127 + void __iomem *addr; 128 + unsigned long size; 129 + int irq; 130 + 131 + bool gate_control; 132 + /* gate control lock */ 133 + struct mutex gate_control_lock; 134 + bool gate_control_active; 135 + struct tsnep_gcl gcl[2]; 136 + int next_gcl; 137 + 138 + struct hwtstamp_config hwtstamp_config; 139 + struct ptp_clock *ptp_clock; 140 + struct ptp_clock_info ptp_clock_info; 141 + /* ptp clock lock */ 142 + spinlock_t ptp_lock; 143 + 144 + int num_tx_queues; 145 + struct tsnep_tx tx[TSNEP_MAX_QUEUES]; 146 + int num_rx_queues; 147 + struct tsnep_rx rx[TSNEP_MAX_QUEUES]; 148 + 149 + int num_queues; 150 + struct tsnep_queue queue[TSNEP_MAX_QUEUES]; 151 + }; 152 + 153 + extern const struct ethtool_ops tsnep_ethtool_ops; 154 + 155 + int tsnep_ptp_init(struct tsnep_adapter *adapter); 156 + void tsnep_ptp_cleanup(struct tsnep_adapter *adapter); 157 + int tsnep_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); 158 + 159 + int tsnep_tc_init(struct tsnep_adapter *adapter); 160 + void tsnep_tc_cleanup(struct tsnep_adapter *adapter); 161 + int tsnep_tc_setup(struct net_device *netdev, enum tc_setup_type type, 162 + void *type_data); 163 + 164 + #if IS_ENABLED(CONFIG_TSNEP_SELFTESTS) 165 + int tsnep_ethtool_get_test_count(void); 166 + void tsnep_ethtool_get_test_strings(u8 *data); 167 + void tsnep_ethtool_self_test(struct net_device *netdev, 168 + struct ethtool_test *eth_test, u64 *data); 169 + #else 170 + static inline int tsnep_ethtool_get_test_count(void) 171 + { 172 + return -EOPNOTSUPP; 173 + } 174 + 175 + static inline void tsnep_ethtool_get_test_strings(u8 *data) 176 + { 177 + /* not enabled */ 178 + } 179 + 180 + static inline void tsnep_ethtool_self_test(struct net_device *dev, 181 + struct ethtool_test *eth_test, 182 + u64 *data) 183 + { 184 + /* not enabled */ 185 + } 186 + #endif /* CONFIG_TSNEP_SELFTESTS */ 187 + 188 + void tsnep_get_system_time(struct tsnep_adapter *adapter, u64 *time); 189 + 190 + #endif /* _TSNEP_H */
+293
drivers/net/ethernet/engleder/tsnep_ethtool.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */ 3 + 4 + #include "tsnep.h" 5 + 6 + static const char tsnep_stats_strings[][ETH_GSTRING_LEN] = { 7 + "rx_packets", 8 + "rx_bytes", 9 + "rx_dropped", 10 + "rx_multicast", 11 + "rx_phy_errors", 12 + "rx_forwarded_phy_errors", 13 + "rx_invalid_frame_errors", 14 + "tx_packets", 15 + "tx_bytes", 16 + "tx_dropped", 17 + }; 18 + 19 + struct tsnep_stats { 20 + u64 rx_packets; 21 + u64 rx_bytes; 22 + u64 rx_dropped; 23 + u64 rx_multicast; 24 + u64 rx_phy_errors; 25 + u64 rx_forwarded_phy_errors; 26 + u64 rx_invalid_frame_errors; 27 + u64 tx_packets; 28 + u64 tx_bytes; 29 + u64 tx_dropped; 30 + }; 31 + 32 + #define TSNEP_STATS_COUNT (sizeof(struct tsnep_stats) / sizeof(u64)) 33 + 34 + static const char tsnep_rx_queue_stats_strings[][ETH_GSTRING_LEN] = { 35 + "rx_%d_packets", 36 + "rx_%d_bytes", 37 + "rx_%d_dropped", 38 + "rx_%d_multicast", 39 + "rx_%d_no_descriptor_errors", 40 + "rx_%d_buffer_too_small_errors", 41 + "rx_%d_fifo_overflow_errors", 42 + "rx_%d_invalid_frame_errors", 43 + }; 44 + 45 + struct tsnep_rx_queue_stats { 46 + u64 rx_packets; 47 + u64 rx_bytes; 48 + u64 rx_dropped; 49 + u64 rx_multicast; 50 + u64 rx_no_descriptor_errors; 51 + u64 rx_buffer_too_small_errors; 52 + u64 rx_fifo_overflow_errors; 53 + u64 rx_invalid_frame_errors; 54 + }; 55 + 56 + #define TSNEP_RX_QUEUE_STATS_COUNT (sizeof(struct tsnep_rx_queue_stats) / \ 57 + sizeof(u64)) 58 + 59 + static const char tsnep_tx_queue_stats_strings[][ETH_GSTRING_LEN] = { 60 + "tx_%d_packets", 61 + "tx_%d_bytes", 62 + "tx_%d_dropped", 63 + }; 64 + 65 + struct tsnep_tx_queue_stats { 66 + u64 tx_packets; 67 + u64 tx_bytes; 68 + u64 tx_dropped; 69 + }; 70 + 71 + #define TSNEP_TX_QUEUE_STATS_COUNT (sizeof(struct tsnep_tx_queue_stats) / \ 72 + sizeof(u64)) 73 + 74 + static void tsnep_ethtool_get_drvinfo(struct net_device *netdev, 75 + struct ethtool_drvinfo *drvinfo) 76 + { 77 + struct tsnep_adapter *adapter = netdev_priv(netdev); 78 + 79 + strscpy(drvinfo->driver, TSNEP, sizeof(drvinfo->driver)); 80 + strscpy(drvinfo->bus_info, dev_name(&adapter->pdev->dev), 81 + sizeof(drvinfo->bus_info)); 82 + } 83 + 84 + static int tsnep_ethtool_get_regs_len(struct net_device *netdev) 85 + { 86 + struct tsnep_adapter *adapter = netdev_priv(netdev); 87 + int len; 88 + int num_additional_queues; 89 + 90 + len = TSNEP_MAC_SIZE; 91 + 92 + /* first queue pair is within TSNEP_MAC_SIZE, only queues additional to 93 + * the first queue pair extend the register length by TSNEP_QUEUE_SIZE 94 + */ 95 + num_additional_queues = 96 + max(adapter->num_tx_queues, adapter->num_rx_queues) - 1; 97 + len += TSNEP_QUEUE_SIZE * num_additional_queues; 98 + 99 + return len; 100 + } 101 + 102 + static void tsnep_ethtool_get_regs(struct net_device *netdev, 103 + struct ethtool_regs *regs, 104 + void *p) 105 + { 106 + struct tsnep_adapter *adapter = netdev_priv(netdev); 107 + 108 + regs->version = 1; 109 + 110 + memcpy_fromio(p, adapter->addr, regs->len); 111 + } 112 + 113 + static u32 tsnep_ethtool_get_msglevel(struct net_device *netdev) 114 + { 115 + struct tsnep_adapter *adapter = netdev_priv(netdev); 116 + 117 + return adapter->msg_enable; 118 + } 119 + 120 + static void tsnep_ethtool_set_msglevel(struct net_device *netdev, u32 data) 121 + { 122 + struct tsnep_adapter *adapter = netdev_priv(netdev); 123 + 124 + adapter->msg_enable = data; 125 + } 126 + 127 + static void tsnep_ethtool_get_strings(struct net_device *netdev, u32 stringset, 128 + u8 *data) 129 + { 130 + struct tsnep_adapter *adapter = netdev_priv(netdev); 131 + int rx_count = adapter->num_rx_queues; 132 + int tx_count = adapter->num_tx_queues; 133 + int i, j; 134 + 135 + switch (stringset) { 136 + case ETH_SS_STATS: 137 + memcpy(data, tsnep_stats_strings, sizeof(tsnep_stats_strings)); 138 + data += sizeof(tsnep_stats_strings); 139 + 140 + for (i = 0; i < rx_count; i++) { 141 + for (j = 0; j < TSNEP_RX_QUEUE_STATS_COUNT; j++) { 142 + snprintf(data, ETH_GSTRING_LEN, 143 + tsnep_rx_queue_stats_strings[j], i); 144 + data += ETH_GSTRING_LEN; 145 + } 146 + } 147 + 148 + for (i = 0; i < tx_count; i++) { 149 + for (j = 0; j < TSNEP_TX_QUEUE_STATS_COUNT; j++) { 150 + snprintf(data, ETH_GSTRING_LEN, 151 + tsnep_tx_queue_stats_strings[j], i); 152 + data += ETH_GSTRING_LEN; 153 + } 154 + } 155 + break; 156 + case ETH_SS_TEST: 157 + tsnep_ethtool_get_test_strings(data); 158 + break; 159 + } 160 + } 161 + 162 + static void tsnep_ethtool_get_ethtool_stats(struct net_device *netdev, 163 + struct ethtool_stats *stats, 164 + u64 *data) 165 + { 166 + struct tsnep_adapter *adapter = netdev_priv(netdev); 167 + int rx_count = adapter->num_rx_queues; 168 + int tx_count = adapter->num_tx_queues; 169 + struct tsnep_stats tsnep_stats; 170 + struct tsnep_rx_queue_stats tsnep_rx_queue_stats; 171 + struct tsnep_tx_queue_stats tsnep_tx_queue_stats; 172 + u32 reg; 173 + int i; 174 + 175 + memset(&tsnep_stats, 0, sizeof(tsnep_stats)); 176 + for (i = 0; i < adapter->num_rx_queues; i++) { 177 + tsnep_stats.rx_packets += adapter->rx[i].packets; 178 + tsnep_stats.rx_bytes += adapter->rx[i].bytes; 179 + tsnep_stats.rx_dropped += adapter->rx[i].dropped; 180 + tsnep_stats.rx_multicast += adapter->rx[i].multicast; 181 + } 182 + reg = ioread32(adapter->addr + ECM_STAT); 183 + tsnep_stats.rx_phy_errors = 184 + (reg & ECM_STAT_RX_ERR_MASK) >> ECM_STAT_RX_ERR_SHIFT; 185 + tsnep_stats.rx_forwarded_phy_errors = 186 + (reg & ECM_STAT_FWD_RX_ERR_MASK) >> ECM_STAT_FWD_RX_ERR_SHIFT; 187 + tsnep_stats.rx_invalid_frame_errors = 188 + (reg & ECM_STAT_INV_FRM_MASK) >> ECM_STAT_INV_FRM_SHIFT; 189 + for (i = 0; i < adapter->num_tx_queues; i++) { 190 + tsnep_stats.tx_packets += adapter->tx[i].packets; 191 + tsnep_stats.tx_bytes += adapter->tx[i].bytes; 192 + tsnep_stats.tx_dropped += adapter->tx[i].dropped; 193 + } 194 + memcpy(data, &tsnep_stats, sizeof(tsnep_stats)); 195 + data += TSNEP_STATS_COUNT; 196 + 197 + for (i = 0; i < rx_count; i++) { 198 + memset(&tsnep_rx_queue_stats, 0, sizeof(tsnep_rx_queue_stats)); 199 + tsnep_rx_queue_stats.rx_packets = adapter->rx[i].packets; 200 + tsnep_rx_queue_stats.rx_bytes = adapter->rx[i].bytes; 201 + tsnep_rx_queue_stats.rx_dropped = adapter->rx[i].dropped; 202 + tsnep_rx_queue_stats.rx_multicast = adapter->rx[i].multicast; 203 + reg = ioread32(adapter->addr + TSNEP_QUEUE(i) + 204 + TSNEP_RX_STATISTIC); 205 + tsnep_rx_queue_stats.rx_no_descriptor_errors = 206 + (reg & TSNEP_RX_STATISTIC_NO_DESC_MASK) >> 207 + TSNEP_RX_STATISTIC_NO_DESC_SHIFT; 208 + tsnep_rx_queue_stats.rx_buffer_too_small_errors = 209 + (reg & TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_MASK) >> 210 + TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_SHIFT; 211 + tsnep_rx_queue_stats.rx_fifo_overflow_errors = 212 + (reg & TSNEP_RX_STATISTIC_FIFO_OVERFLOW_MASK) >> 213 + TSNEP_RX_STATISTIC_FIFO_OVERFLOW_SHIFT; 214 + tsnep_rx_queue_stats.rx_invalid_frame_errors = 215 + (reg & TSNEP_RX_STATISTIC_INVALID_FRAME_MASK) >> 216 + TSNEP_RX_STATISTIC_INVALID_FRAME_SHIFT; 217 + memcpy(data, &tsnep_rx_queue_stats, 218 + sizeof(tsnep_rx_queue_stats)); 219 + data += TSNEP_RX_QUEUE_STATS_COUNT; 220 + } 221 + 222 + for (i = 0; i < tx_count; i++) { 223 + memset(&tsnep_tx_queue_stats, 0, sizeof(tsnep_tx_queue_stats)); 224 + tsnep_tx_queue_stats.tx_packets += adapter->tx[i].packets; 225 + tsnep_tx_queue_stats.tx_bytes += adapter->tx[i].bytes; 226 + tsnep_tx_queue_stats.tx_dropped += adapter->tx[i].dropped; 227 + memcpy(data, &tsnep_tx_queue_stats, 228 + sizeof(tsnep_tx_queue_stats)); 229 + data += TSNEP_TX_QUEUE_STATS_COUNT; 230 + } 231 + } 232 + 233 + static int tsnep_ethtool_get_sset_count(struct net_device *netdev, int sset) 234 + { 235 + struct tsnep_adapter *adapter = netdev_priv(netdev); 236 + int rx_count; 237 + int tx_count; 238 + 239 + switch (sset) { 240 + case ETH_SS_STATS: 241 + rx_count = adapter->num_rx_queues; 242 + tx_count = adapter->num_tx_queues; 243 + return TSNEP_STATS_COUNT + 244 + TSNEP_RX_QUEUE_STATS_COUNT * rx_count + 245 + TSNEP_TX_QUEUE_STATS_COUNT * tx_count; 246 + case ETH_SS_TEST: 247 + return tsnep_ethtool_get_test_count(); 248 + default: 249 + return -EOPNOTSUPP; 250 + } 251 + } 252 + 253 + static int tsnep_ethtool_get_ts_info(struct net_device *dev, 254 + struct ethtool_ts_info *info) 255 + { 256 + struct tsnep_adapter *adapter = netdev_priv(dev); 257 + 258 + info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | 259 + SOF_TIMESTAMPING_RX_SOFTWARE | 260 + SOF_TIMESTAMPING_SOFTWARE | 261 + SOF_TIMESTAMPING_TX_HARDWARE | 262 + SOF_TIMESTAMPING_RX_HARDWARE | 263 + SOF_TIMESTAMPING_RAW_HARDWARE; 264 + 265 + if (adapter->ptp_clock) 266 + info->phc_index = ptp_clock_index(adapter->ptp_clock); 267 + else 268 + info->phc_index = -1; 269 + 270 + info->tx_types = BIT(HWTSTAMP_TX_OFF) | 271 + BIT(HWTSTAMP_TX_ON); 272 + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | 273 + BIT(HWTSTAMP_FILTER_ALL); 274 + 275 + return 0; 276 + } 277 + 278 + const struct ethtool_ops tsnep_ethtool_ops = { 279 + .get_drvinfo = tsnep_ethtool_get_drvinfo, 280 + .get_regs_len = tsnep_ethtool_get_regs_len, 281 + .get_regs = tsnep_ethtool_get_regs, 282 + .get_msglevel = tsnep_ethtool_get_msglevel, 283 + .set_msglevel = tsnep_ethtool_set_msglevel, 284 + .nway_reset = phy_ethtool_nway_reset, 285 + .get_link = ethtool_op_get_link, 286 + .self_test = tsnep_ethtool_self_test, 287 + .get_strings = tsnep_ethtool_get_strings, 288 + .get_ethtool_stats = tsnep_ethtool_get_ethtool_stats, 289 + .get_sset_count = tsnep_ethtool_get_sset_count, 290 + .get_ts_info = tsnep_ethtool_get_ts_info, 291 + .get_link_ksettings = phy_ethtool_get_link_ksettings, 292 + .set_link_ksettings = phy_ethtool_set_link_ksettings, 293 + };
+230
drivers/net/ethernet/engleder/tsnep_hw.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */ 3 + 4 + /* Hardware definition of TSNEP and EtherCAT MAC device */ 5 + 6 + #ifndef _TSNEP_HW_H 7 + #define _TSNEP_HW_H 8 + 9 + #include <linux/types.h> 10 + 11 + /* type */ 12 + #define ECM_TYPE 0x0000 13 + #define ECM_REVISION_MASK 0x000000FF 14 + #define ECM_REVISION_SHIFT 0 15 + #define ECM_VERSION_MASK 0x0000FF00 16 + #define ECM_VERSION_SHIFT 8 17 + #define ECM_QUEUE_COUNT_MASK 0x00070000 18 + #define ECM_QUEUE_COUNT_SHIFT 16 19 + #define ECM_GATE_CONTROL 0x02000000 20 + 21 + /* system time */ 22 + #define ECM_SYSTEM_TIME_LOW 0x0008 23 + #define ECM_SYSTEM_TIME_HIGH 0x000C 24 + 25 + /* clock */ 26 + #define ECM_CLOCK_RATE 0x0010 27 + #define ECM_CLOCK_RATE_OFFSET_MASK 0x7FFFFFFF 28 + #define ECM_CLOCK_RATE_OFFSET_SIGN 0x80000000 29 + 30 + /* interrupt */ 31 + #define ECM_INT_ENABLE 0x0018 32 + #define ECM_INT_ACTIVE 0x001C 33 + #define ECM_INT_ACKNOWLEDGE 0x001C 34 + #define ECM_INT_LINK 0x00000020 35 + #define ECM_INT_TX_0 0x00000100 36 + #define ECM_INT_RX_0 0x00000200 37 + #define ECM_INT_ALL 0x7FFFFFFF 38 + #define ECM_INT_DISABLE 0x80000000 39 + 40 + /* reset */ 41 + #define ECM_RESET 0x0020 42 + #define ECM_RESET_COMMON 0x00000001 43 + #define ECM_RESET_CHANNEL 0x00000100 44 + #define ECM_RESET_TXRX 0x00010000 45 + 46 + /* control and status */ 47 + #define ECM_STATUS 0x0080 48 + #define ECM_LINK_MODE_OFF 0x01000000 49 + #define ECM_LINK_MODE_100 0x02000000 50 + #define ECM_LINK_MODE_1000 0x04000000 51 + #define ECM_NO_LINK 0x01000000 52 + #define ECM_LINK_MODE_MASK 0x06000000 53 + 54 + /* management data */ 55 + #define ECM_MD_CONTROL 0x0084 56 + #define ECM_MD_STATUS 0x0084 57 + #define ECM_MD_PREAMBLE 0x00000001 58 + #define ECM_MD_READ 0x00000004 59 + #define ECM_MD_WRITE 0x00000002 60 + #define ECM_MD_ADDR_MASK 0x000000F8 61 + #define ECM_MD_ADDR_SHIFT 3 62 + #define ECM_MD_PHY_ADDR_MASK 0x00001F00 63 + #define ECM_MD_PHY_ADDR_SHIFT 8 64 + #define ECM_MD_BUSY 0x00000001 65 + #define ECM_MD_DATA_MASK 0xFFFF0000 66 + #define ECM_MD_DATA_SHIFT 16 67 + 68 + /* statistic */ 69 + #define ECM_STAT 0x00B0 70 + #define ECM_STAT_RX_ERR_MASK 0x000000FF 71 + #define ECM_STAT_RX_ERR_SHIFT 0 72 + #define ECM_STAT_INV_FRM_MASK 0x0000FF00 73 + #define ECM_STAT_INV_FRM_SHIFT 8 74 + #define ECM_STAT_FWD_RX_ERR_MASK 0x00FF0000 75 + #define ECM_STAT_FWD_RX_ERR_SHIFT 16 76 + 77 + /* tsnep */ 78 + #define TSNEP_MAC_SIZE 0x4000 79 + #define TSNEP_QUEUE_SIZE 0x1000 80 + #define TSNEP_QUEUE(n) ({ typeof(n) __n = (n); \ 81 + (__n) == 0 ? \ 82 + 0 : \ 83 + TSNEP_MAC_SIZE + TSNEP_QUEUE_SIZE * ((__n) - 1); }) 84 + #define TSNEP_MAX_QUEUES 8 85 + #define TSNEP_MAX_FRAME_SIZE (2 * 1024) /* hardware supports actually 16k */ 86 + #define TSNEP_DESC_SIZE 256 87 + #define TSNEP_DESC_OFFSET 128 88 + 89 + /* tsnep register */ 90 + #define TSNEP_INFO 0x0100 91 + #define TSNEP_INFO_RX_ASSIGN 0x00010000 92 + #define TSNEP_INFO_TX_TIME 0x00020000 93 + #define TSNEP_CONTROL 0x0108 94 + #define TSNEP_CONTROL_TX_RESET 0x00000001 95 + #define TSNEP_CONTROL_TX_ENABLE 0x00000002 96 + #define TSNEP_CONTROL_TX_DMA_ERROR 0x00000010 97 + #define TSNEP_CONTROL_TX_DESC_ERROR 0x00000020 98 + #define TSNEP_CONTROL_RX_RESET 0x00000100 99 + #define TSNEP_CONTROL_RX_ENABLE 0x00000200 100 + #define TSNEP_CONTROL_RX_DISABLE 0x00000400 101 + #define TSNEP_CONTROL_RX_DMA_ERROR 0x00001000 102 + #define TSNEP_CONTROL_RX_DESC_ERROR 0x00002000 103 + #define TSNEP_TX_DESC_ADDR_LOW 0x0140 104 + #define TSNEP_TX_DESC_ADDR_HIGH 0x0144 105 + #define TSNEP_RX_DESC_ADDR_LOW 0x0180 106 + #define TSNEP_RX_DESC_ADDR_HIGH 0x0184 107 + #define TSNEP_RESET_OWNER_COUNTER 0x01 108 + #define TSNEP_RX_STATISTIC 0x0190 109 + #define TSNEP_RX_STATISTIC_NO_DESC_MASK 0x000000FF 110 + #define TSNEP_RX_STATISTIC_NO_DESC_SHIFT 0 111 + #define TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_MASK 0x0000FF00 112 + #define TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_SHIFT 8 113 + #define TSNEP_RX_STATISTIC_FIFO_OVERFLOW_MASK 0x00FF0000 114 + #define TSNEP_RX_STATISTIC_FIFO_OVERFLOW_SHIFT 16 115 + #define TSNEP_RX_STATISTIC_INVALID_FRAME_MASK 0xFF000000 116 + #define TSNEP_RX_STATISTIC_INVALID_FRAME_SHIFT 24 117 + #define TSNEP_RX_STATISTIC_NO_DESC 0x0190 118 + #define TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL 0x0191 119 + #define TSNEP_RX_STATISTIC_FIFO_OVERFLOW 0x0192 120 + #define TSNEP_RX_STATISTIC_INVALID_FRAME 0x0193 121 + #define TSNEP_RX_ASSIGN 0x01A0 122 + #define TSNEP_RX_ASSIGN_ETHER_TYPE_ACTIVE 0x00000001 123 + #define TSNEP_RX_ASSIGN_ETHER_TYPE_MASK 0xFFFF0000 124 + #define TSNEP_RX_ASSIGN_ETHER_TYPE_SHIFT 16 125 + #define TSNEP_MAC_ADDRESS_LOW 0x0800 126 + #define TSNEP_MAC_ADDRESS_HIGH 0x0804 127 + #define TSNEP_RX_FILTER 0x0806 128 + #define TSNEP_RX_FILTER_ACCEPT_ALL_MULTICASTS 0x0001 129 + #define TSNEP_RX_FILTER_ACCEPT_ALL_UNICASTS 0x0002 130 + #define TSNEP_GC 0x0808 131 + #define TSNEP_GC_ENABLE_A 0x00000002 132 + #define TSNEP_GC_ENABLE_B 0x00000004 133 + #define TSNEP_GC_DISABLE 0x00000008 134 + #define TSNEP_GC_ENABLE_TIMEOUT 0x00000010 135 + #define TSNEP_GC_ACTIVE_A 0x00000002 136 + #define TSNEP_GC_ACTIVE_B 0x00000004 137 + #define TSNEP_GC_CHANGE_AB 0x00000008 138 + #define TSNEP_GC_TIMEOUT_ACTIVE 0x00000010 139 + #define TSNEP_GC_TIMEOUT_SIGNAL 0x00000020 140 + #define TSNEP_GC_LIST_ERROR 0x00000080 141 + #define TSNEP_GC_OPEN 0x00FF0000 142 + #define TSNEP_GC_OPEN_SHIFT 16 143 + #define TSNEP_GC_NEXT_OPEN 0xFF000000 144 + #define TSNEP_GC_NEXT_OPEN_SHIFT 24 145 + #define TSNEP_GC_TIMEOUT 131072 146 + #define TSNEP_GC_TIME 0x080C 147 + #define TSNEP_GC_CHANGE 0x0810 148 + #define TSNEP_GCL_A 0x2000 149 + #define TSNEP_GCL_B 0x2800 150 + #define TSNEP_GCL_SIZE SZ_2K 151 + 152 + /* tsnep gate control list operation */ 153 + struct tsnep_gcl_operation { 154 + u32 properties; 155 + u32 interval; 156 + }; 157 + 158 + #define TSNEP_GCL_COUNT (TSNEP_GCL_SIZE / sizeof(struct tsnep_gcl_operation)) 159 + #define TSNEP_GCL_MASK 0x000000FF 160 + #define TSNEP_GCL_INSERT 0x20000000 161 + #define TSNEP_GCL_CHANGE 0x40000000 162 + #define TSNEP_GCL_LAST 0x80000000 163 + #define TSNEP_GCL_MIN_INTERVAL 32 164 + 165 + /* tsnep TX/RX descriptor */ 166 + #define TSNEP_DESC_SIZE 256 167 + #define TSNEP_DESC_SIZE_DATA_AFTER 2048 168 + #define TSNEP_DESC_OFFSET 128 169 + #define TSNEP_DESC_OWNER_COUNTER_MASK 0xC0000000 170 + #define TSNEP_DESC_OWNER_COUNTER_SHIFT 30 171 + #define TSNEP_DESC_LENGTH_MASK 0x00003FFF 172 + #define TSNEP_DESC_INTERRUPT_FLAG 0x00040000 173 + #define TSNEP_DESC_EXTENDED_WRITEBACK_FLAG 0x00080000 174 + #define TSNEP_DESC_NO_LINK_FLAG 0x01000000 175 + 176 + /* tsnep TX descriptor */ 177 + struct tsnep_tx_desc { 178 + __le32 properties; 179 + __le32 more_properties; 180 + __le32 reserved[2]; 181 + __le64 next; 182 + __le64 tx; 183 + }; 184 + 185 + #define TSNEP_TX_DESC_OWNER_MASK 0xE0000000 186 + #define TSNEP_TX_DESC_OWNER_USER_FLAG 0x20000000 187 + #define TSNEP_TX_DESC_LAST_FRAGMENT_FLAG 0x00010000 188 + #define TSNEP_TX_DESC_DATA_AFTER_DESC_FLAG 0x00020000 189 + 190 + /* tsnep TX descriptor writeback */ 191 + struct tsnep_tx_desc_wb { 192 + __le32 properties; 193 + __le32 reserved1[3]; 194 + __le64 timestamp; 195 + __le32 dma_delay; 196 + __le32 reserved2; 197 + }; 198 + 199 + #define TSNEP_TX_DESC_UNDERRUN_ERROR_FLAG 0x00010000 200 + #define TSNEP_TX_DESC_DMA_DELAY_FIRST_DATA_MASK 0x0000FFFC 201 + #define TSNEP_TX_DESC_DMA_DELAY_FIRST_DATA_SHIFT 2 202 + #define TSNEP_TX_DESC_DMA_DELAY_LAST_DATA_MASK 0xFFFC0000 203 + #define TSNEP_TX_DESC_DMA_DELAY_LAST_DATA_SHIFT 18 204 + #define TSNEP_TX_DESC_DMA_DELAY_NS 64 205 + 206 + /* tsnep RX descriptor */ 207 + struct tsnep_rx_desc { 208 + __le32 properties; 209 + __le32 reserved[3]; 210 + __le64 next; 211 + __le64 rx; 212 + }; 213 + 214 + #define TSNEP_RX_DESC_BUFFER_SIZE_MASK 0x00003FFC 215 + 216 + /* tsnep RX descriptor writeback */ 217 + struct tsnep_rx_desc_wb { 218 + __le32 properties; 219 + __le32 reserved[7]; 220 + }; 221 + 222 + /* tsnep RX inline meta */ 223 + struct tsnep_rx_inline { 224 + __le64 reserved; 225 + __le64 timestamp; 226 + }; 227 + 228 + #define TSNEP_RX_INLINE_METADATA_SIZE (sizeof(struct tsnep_rx_inline)) 229 + 230 + #endif /* _TSNEP_HW_H */
+1273
drivers/net/ethernet/engleder/tsnep_main.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */ 3 + 4 + /* TSN endpoint Ethernet MAC driver 5 + * 6 + * The TSN endpoint Ethernet MAC is a FPGA based network device for real-time 7 + * communication. It is designed for endpoints within TSN (Time Sensitive 8 + * Networking) networks; e.g., for PLCs in the industrial automation case. 9 + * 10 + * It supports multiple TX/RX queue pairs. The first TX/RX queue pair is used 11 + * by the driver. 12 + * 13 + * More information can be found here: 14 + * - www.embedded-experts.at/tsn 15 + * - www.engleder-embedded.com 16 + */ 17 + 18 + #include "tsnep.h" 19 + #include "tsnep_hw.h" 20 + 21 + #include <linux/module.h> 22 + #include <linux/of.h> 23 + #include <linux/of_net.h> 24 + #include <linux/of_mdio.h> 25 + #include <linux/interrupt.h> 26 + #include <linux/etherdevice.h> 27 + #include <linux/phy.h> 28 + #include <linux/iopoll.h> 29 + 30 + #define RX_SKB_LENGTH (round_up(TSNEP_RX_INLINE_METADATA_SIZE + ETH_HLEN + \ 31 + TSNEP_MAX_FRAME_SIZE + ETH_FCS_LEN, 4)) 32 + #define RX_SKB_RESERVE ((16 - TSNEP_RX_INLINE_METADATA_SIZE) + NET_IP_ALIGN) 33 + #define RX_SKB_ALLOC_LENGTH (RX_SKB_RESERVE + RX_SKB_LENGTH) 34 + 35 + #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 36 + #define DMA_ADDR_HIGH(dma_addr) ((u32)(((dma_addr) >> 32) & 0xFFFFFFFF)) 37 + #else 38 + #define DMA_ADDR_HIGH(dma_addr) ((u32)(0)) 39 + #endif 40 + #define DMA_ADDR_LOW(dma_addr) ((u32)((dma_addr) & 0xFFFFFFFF)) 41 + 42 + static void tsnep_enable_irq(struct tsnep_adapter *adapter, u32 mask) 43 + { 44 + iowrite32(mask, adapter->addr + ECM_INT_ENABLE); 45 + } 46 + 47 + static void tsnep_disable_irq(struct tsnep_adapter *adapter, u32 mask) 48 + { 49 + mask |= ECM_INT_DISABLE; 50 + iowrite32(mask, adapter->addr + ECM_INT_ENABLE); 51 + } 52 + 53 + static irqreturn_t tsnep_irq(int irq, void *arg) 54 + { 55 + struct tsnep_adapter *adapter = arg; 56 + u32 active = ioread32(adapter->addr + ECM_INT_ACTIVE); 57 + 58 + /* acknowledge interrupt */ 59 + if (active != 0) 60 + iowrite32(active, adapter->addr + ECM_INT_ACKNOWLEDGE); 61 + 62 + /* handle link interrupt */ 63 + if ((active & ECM_INT_LINK) != 0) { 64 + if (adapter->netdev->phydev) 65 + phy_mac_interrupt(adapter->netdev->phydev); 66 + } 67 + 68 + /* handle TX/RX queue 0 interrupt */ 69 + if ((active & adapter->queue[0].irq_mask) != 0) { 70 + if (adapter->netdev) { 71 + tsnep_disable_irq(adapter, adapter->queue[0].irq_mask); 72 + napi_schedule(&adapter->queue[0].napi); 73 + } 74 + } 75 + 76 + return IRQ_HANDLED; 77 + } 78 + 79 + static int tsnep_mdiobus_read(struct mii_bus *bus, int addr, int regnum) 80 + { 81 + struct tsnep_adapter *adapter = bus->priv; 82 + u32 md; 83 + int retval; 84 + 85 + if (regnum & MII_ADDR_C45) 86 + return -EOPNOTSUPP; 87 + 88 + md = ECM_MD_READ; 89 + if (!adapter->suppress_preamble) 90 + md |= ECM_MD_PREAMBLE; 91 + md |= (regnum << ECM_MD_ADDR_SHIFT) & ECM_MD_ADDR_MASK; 92 + md |= (addr << ECM_MD_PHY_ADDR_SHIFT) & ECM_MD_PHY_ADDR_MASK; 93 + iowrite32(md, adapter->addr + ECM_MD_CONTROL); 94 + retval = readl_poll_timeout_atomic(adapter->addr + ECM_MD_STATUS, md, 95 + !(md & ECM_MD_BUSY), 16, 1000); 96 + if (retval != 0) 97 + return retval; 98 + 99 + return (md & ECM_MD_DATA_MASK) >> ECM_MD_DATA_SHIFT; 100 + } 101 + 102 + static int tsnep_mdiobus_write(struct mii_bus *bus, int addr, int regnum, 103 + u16 val) 104 + { 105 + struct tsnep_adapter *adapter = bus->priv; 106 + u32 md; 107 + int retval; 108 + 109 + if (regnum & MII_ADDR_C45) 110 + return -EOPNOTSUPP; 111 + 112 + md = ECM_MD_WRITE; 113 + if (!adapter->suppress_preamble) 114 + md |= ECM_MD_PREAMBLE; 115 + md |= (regnum << ECM_MD_ADDR_SHIFT) & ECM_MD_ADDR_MASK; 116 + md |= (addr << ECM_MD_PHY_ADDR_SHIFT) & ECM_MD_PHY_ADDR_MASK; 117 + md |= ((u32)val << ECM_MD_DATA_SHIFT) & ECM_MD_DATA_MASK; 118 + iowrite32(md, adapter->addr + ECM_MD_CONTROL); 119 + retval = readl_poll_timeout_atomic(adapter->addr + ECM_MD_STATUS, md, 120 + !(md & ECM_MD_BUSY), 16, 1000); 121 + if (retval != 0) 122 + return retval; 123 + 124 + return 0; 125 + } 126 + 127 + static void tsnep_phy_link_status_change(struct net_device *netdev) 128 + { 129 + struct tsnep_adapter *adapter = netdev_priv(netdev); 130 + struct phy_device *phydev = netdev->phydev; 131 + u32 mode; 132 + 133 + if (phydev->link) { 134 + switch (phydev->speed) { 135 + case SPEED_100: 136 + mode = ECM_LINK_MODE_100; 137 + break; 138 + case SPEED_1000: 139 + mode = ECM_LINK_MODE_1000; 140 + break; 141 + default: 142 + mode = ECM_LINK_MODE_OFF; 143 + break; 144 + } 145 + iowrite32(mode, adapter->addr + ECM_STATUS); 146 + } 147 + 148 + phy_print_status(netdev->phydev); 149 + } 150 + 151 + static int tsnep_phy_open(struct tsnep_adapter *adapter) 152 + { 153 + struct phy_device *phydev; 154 + struct ethtool_eee ethtool_eee; 155 + int retval; 156 + 157 + retval = phy_connect_direct(adapter->netdev, adapter->phydev, 158 + tsnep_phy_link_status_change, 159 + adapter->phy_mode); 160 + if (retval) 161 + return retval; 162 + phydev = adapter->netdev->phydev; 163 + 164 + /* MAC supports only 100Mbps|1000Mbps full duplex 165 + * SPE (Single Pair Ethernet) is also an option but not implemented yet 166 + */ 167 + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT); 168 + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT); 169 + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT); 170 + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT); 171 + 172 + /* disable EEE autoneg, EEE not supported by TSNEP */ 173 + memset(&ethtool_eee, 0, sizeof(ethtool_eee)); 174 + phy_ethtool_set_eee(adapter->phydev, &ethtool_eee); 175 + 176 + adapter->phydev->irq = PHY_MAC_INTERRUPT; 177 + phy_start(adapter->phydev); 178 + 179 + return 0; 180 + } 181 + 182 + static void tsnep_phy_close(struct tsnep_adapter *adapter) 183 + { 184 + phy_stop(adapter->netdev->phydev); 185 + phy_disconnect(adapter->netdev->phydev); 186 + adapter->netdev->phydev = NULL; 187 + } 188 + 189 + static void tsnep_tx_ring_cleanup(struct tsnep_tx *tx) 190 + { 191 + struct device *dmadev = tx->adapter->dmadev; 192 + int i; 193 + 194 + memset(tx->entry, 0, sizeof(tx->entry)); 195 + 196 + for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) { 197 + if (tx->page[i]) { 198 + dma_free_coherent(dmadev, PAGE_SIZE, tx->page[i], 199 + tx->page_dma[i]); 200 + tx->page[i] = NULL; 201 + tx->page_dma[i] = 0; 202 + } 203 + } 204 + } 205 + 206 + static int tsnep_tx_ring_init(struct tsnep_tx *tx) 207 + { 208 + struct device *dmadev = tx->adapter->dmadev; 209 + struct tsnep_tx_entry *entry; 210 + struct tsnep_tx_entry *next_entry; 211 + int i, j; 212 + int retval; 213 + 214 + for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) { 215 + tx->page[i] = 216 + dma_alloc_coherent(dmadev, PAGE_SIZE, &tx->page_dma[i], 217 + GFP_KERNEL); 218 + if (!tx->page[i]) { 219 + retval = -ENOMEM; 220 + goto alloc_failed; 221 + } 222 + for (j = 0; j < TSNEP_RING_ENTRIES_PER_PAGE; j++) { 223 + entry = &tx->entry[TSNEP_RING_ENTRIES_PER_PAGE * i + j]; 224 + entry->desc_wb = (struct tsnep_tx_desc_wb *) 225 + (((u8 *)tx->page[i]) + TSNEP_DESC_SIZE * j); 226 + entry->desc = (struct tsnep_tx_desc *) 227 + (((u8 *)entry->desc_wb) + TSNEP_DESC_OFFSET); 228 + entry->desc_dma = tx->page_dma[i] + TSNEP_DESC_SIZE * j; 229 + } 230 + } 231 + for (i = 0; i < TSNEP_RING_SIZE; i++) { 232 + entry = &tx->entry[i]; 233 + next_entry = &tx->entry[(i + 1) % TSNEP_RING_SIZE]; 234 + entry->desc->next = __cpu_to_le64(next_entry->desc_dma); 235 + } 236 + 237 + return 0; 238 + 239 + alloc_failed: 240 + tsnep_tx_ring_cleanup(tx); 241 + return retval; 242 + } 243 + 244 + static void tsnep_tx_activate(struct tsnep_tx *tx, int index, bool last) 245 + { 246 + struct tsnep_tx_entry *entry = &tx->entry[index]; 247 + 248 + entry->properties = 0; 249 + if (entry->skb) { 250 + entry->properties = 251 + skb_pagelen(entry->skb) & TSNEP_DESC_LENGTH_MASK; 252 + entry->properties |= TSNEP_DESC_INTERRUPT_FLAG; 253 + if (skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS) 254 + entry->properties |= TSNEP_DESC_EXTENDED_WRITEBACK_FLAG; 255 + 256 + /* toggle user flag to prevent false acknowledge 257 + * 258 + * Only the first fragment is acknowledged. For all other 259 + * fragments no acknowledge is done and the last written owner 260 + * counter stays in the writeback descriptor. Therefore, it is 261 + * possible that the last written owner counter is identical to 262 + * the new incremented owner counter and a false acknowledge is 263 + * detected before the real acknowledge has been done by 264 + * hardware. 265 + * 266 + * The user flag is used to prevent this situation. The user 267 + * flag is copied to the writeback descriptor by the hardware 268 + * and is used as additional acknowledge data. By toggeling the 269 + * user flag only for the first fragment (which is 270 + * acknowledged), it is guaranteed that the last acknowledge 271 + * done for this descriptor has used a different user flag and 272 + * cannot be detected as false acknowledge. 273 + */ 274 + entry->owner_user_flag = !entry->owner_user_flag; 275 + } 276 + if (last) 277 + entry->properties |= TSNEP_TX_DESC_LAST_FRAGMENT_FLAG; 278 + if (index == tx->increment_owner_counter) { 279 + tx->owner_counter++; 280 + if (tx->owner_counter == 4) 281 + tx->owner_counter = 1; 282 + tx->increment_owner_counter--; 283 + if (tx->increment_owner_counter < 0) 284 + tx->increment_owner_counter = TSNEP_RING_SIZE - 1; 285 + } 286 + entry->properties |= 287 + (tx->owner_counter << TSNEP_DESC_OWNER_COUNTER_SHIFT) & 288 + TSNEP_DESC_OWNER_COUNTER_MASK; 289 + if (entry->owner_user_flag) 290 + entry->properties |= TSNEP_TX_DESC_OWNER_USER_FLAG; 291 + entry->desc->more_properties = 292 + __cpu_to_le32(entry->len & TSNEP_DESC_LENGTH_MASK); 293 + 294 + /* descriptor properties shall be written last, because valid data is 295 + * signaled there 296 + */ 297 + dma_wmb(); 298 + 299 + entry->desc->properties = __cpu_to_le32(entry->properties); 300 + } 301 + 302 + static int tsnep_tx_desc_available(struct tsnep_tx *tx) 303 + { 304 + if (tx->read <= tx->write) 305 + return TSNEP_RING_SIZE - tx->write + tx->read - 1; 306 + else 307 + return tx->read - tx->write - 1; 308 + } 309 + 310 + static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count) 311 + { 312 + struct device *dmadev = tx->adapter->dmadev; 313 + struct tsnep_tx_entry *entry; 314 + unsigned int len; 315 + dma_addr_t dma; 316 + int i; 317 + 318 + for (i = 0; i < count; i++) { 319 + entry = &tx->entry[(tx->write + i) % TSNEP_RING_SIZE]; 320 + 321 + if (i == 0) { 322 + len = skb_headlen(skb); 323 + dma = dma_map_single(dmadev, skb->data, len, 324 + DMA_TO_DEVICE); 325 + } else { 326 + len = skb_frag_size(&skb_shinfo(skb)->frags[i - 1]); 327 + dma = skb_frag_dma_map(dmadev, 328 + &skb_shinfo(skb)->frags[i - 1], 329 + 0, len, DMA_TO_DEVICE); 330 + } 331 + if (dma_mapping_error(dmadev, dma)) 332 + return -ENOMEM; 333 + 334 + entry->len = len; 335 + dma_unmap_addr_set(entry, dma, dma); 336 + 337 + entry->desc->tx = __cpu_to_le64(dma); 338 + } 339 + 340 + return 0; 341 + } 342 + 343 + static void tsnep_tx_unmap(struct tsnep_tx *tx, int count) 344 + { 345 + struct device *dmadev = tx->adapter->dmadev; 346 + struct tsnep_tx_entry *entry; 347 + int i; 348 + 349 + for (i = 0; i < count; i++) { 350 + entry = &tx->entry[(tx->read + i) % TSNEP_RING_SIZE]; 351 + 352 + if (entry->len) { 353 + if (i == 0) 354 + dma_unmap_single(dmadev, 355 + dma_unmap_addr(entry, dma), 356 + dma_unmap_len(entry, len), 357 + DMA_TO_DEVICE); 358 + else 359 + dma_unmap_page(dmadev, 360 + dma_unmap_addr(entry, dma), 361 + dma_unmap_len(entry, len), 362 + DMA_TO_DEVICE); 363 + entry->len = 0; 364 + } 365 + } 366 + } 367 + 368 + static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb, 369 + struct tsnep_tx *tx) 370 + { 371 + unsigned long flags; 372 + int count = 1; 373 + struct tsnep_tx_entry *entry; 374 + int i; 375 + int retval; 376 + 377 + if (skb_shinfo(skb)->nr_frags > 0) 378 + count += skb_shinfo(skb)->nr_frags; 379 + 380 + spin_lock_irqsave(&tx->lock, flags); 381 + 382 + if (tsnep_tx_desc_available(tx) < count) { 383 + /* ring full, shall not happen because queue is stopped if full 384 + * below 385 + */ 386 + netif_stop_queue(tx->adapter->netdev); 387 + 388 + spin_unlock_irqrestore(&tx->lock, flags); 389 + 390 + return NETDEV_TX_BUSY; 391 + } 392 + 393 + entry = &tx->entry[tx->write]; 394 + entry->skb = skb; 395 + 396 + retval = tsnep_tx_map(skb, tx, count); 397 + if (retval != 0) { 398 + tsnep_tx_unmap(tx, count); 399 + dev_kfree_skb_any(entry->skb); 400 + entry->skb = NULL; 401 + 402 + tx->dropped++; 403 + 404 + spin_unlock_irqrestore(&tx->lock, flags); 405 + 406 + netdev_err(tx->adapter->netdev, "TX DMA map failed\n"); 407 + 408 + return NETDEV_TX_OK; 409 + } 410 + 411 + if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) 412 + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 413 + 414 + for (i = 0; i < count; i++) 415 + tsnep_tx_activate(tx, (tx->write + i) % TSNEP_RING_SIZE, 416 + i == (count - 1)); 417 + tx->write = (tx->write + count) % TSNEP_RING_SIZE; 418 + 419 + skb_tx_timestamp(skb); 420 + 421 + /* descriptor properties shall be valid before hardware is notified */ 422 + dma_wmb(); 423 + 424 + iowrite32(TSNEP_CONTROL_TX_ENABLE, tx->addr + TSNEP_CONTROL); 425 + 426 + if (tsnep_tx_desc_available(tx) < (MAX_SKB_FRAGS + 1)) { 427 + /* ring can get full with next frame */ 428 + netif_stop_queue(tx->adapter->netdev); 429 + } 430 + 431 + tx->packets++; 432 + tx->bytes += skb_pagelen(entry->skb) + ETH_FCS_LEN; 433 + 434 + spin_unlock_irqrestore(&tx->lock, flags); 435 + 436 + return NETDEV_TX_OK; 437 + } 438 + 439 + static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget) 440 + { 441 + unsigned long flags; 442 + int budget = 128; 443 + struct tsnep_tx_entry *entry; 444 + int count; 445 + 446 + spin_lock_irqsave(&tx->lock, flags); 447 + 448 + do { 449 + if (tx->read == tx->write) 450 + break; 451 + 452 + entry = &tx->entry[tx->read]; 453 + if ((__le32_to_cpu(entry->desc_wb->properties) & 454 + TSNEP_TX_DESC_OWNER_MASK) != 455 + (entry->properties & TSNEP_TX_DESC_OWNER_MASK)) 456 + break; 457 + 458 + /* descriptor properties shall be read first, because valid data 459 + * is signaled there 460 + */ 461 + dma_rmb(); 462 + 463 + count = 1; 464 + if (skb_shinfo(entry->skb)->nr_frags > 0) 465 + count += skb_shinfo(entry->skb)->nr_frags; 466 + 467 + tsnep_tx_unmap(tx, count); 468 + 469 + if ((skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS) && 470 + (__le32_to_cpu(entry->desc_wb->properties) & 471 + TSNEP_DESC_EXTENDED_WRITEBACK_FLAG)) { 472 + struct skb_shared_hwtstamps hwtstamps; 473 + u64 timestamp = 474 + __le64_to_cpu(entry->desc_wb->timestamp); 475 + 476 + memset(&hwtstamps, 0, sizeof(hwtstamps)); 477 + hwtstamps.hwtstamp = ns_to_ktime(timestamp); 478 + 479 + skb_tstamp_tx(entry->skb, &hwtstamps); 480 + } 481 + 482 + napi_consume_skb(entry->skb, budget); 483 + entry->skb = NULL; 484 + 485 + tx->read = (tx->read + count) % TSNEP_RING_SIZE; 486 + 487 + budget--; 488 + } while (likely(budget)); 489 + 490 + if ((tsnep_tx_desc_available(tx) >= ((MAX_SKB_FRAGS + 1) * 2)) && 491 + netif_queue_stopped(tx->adapter->netdev)) { 492 + netif_wake_queue(tx->adapter->netdev); 493 + } 494 + 495 + spin_unlock_irqrestore(&tx->lock, flags); 496 + 497 + return (budget != 0); 498 + } 499 + 500 + static int tsnep_tx_open(struct tsnep_adapter *adapter, void __iomem *addr, 501 + struct tsnep_tx *tx) 502 + { 503 + dma_addr_t dma; 504 + int retval; 505 + 506 + memset(tx, 0, sizeof(*tx)); 507 + tx->adapter = adapter; 508 + tx->addr = addr; 509 + 510 + retval = tsnep_tx_ring_init(tx); 511 + if (retval) 512 + return retval; 513 + 514 + dma = tx->entry[0].desc_dma | TSNEP_RESET_OWNER_COUNTER; 515 + iowrite32(DMA_ADDR_LOW(dma), tx->addr + TSNEP_TX_DESC_ADDR_LOW); 516 + iowrite32(DMA_ADDR_HIGH(dma), tx->addr + TSNEP_TX_DESC_ADDR_HIGH); 517 + tx->owner_counter = 1; 518 + tx->increment_owner_counter = TSNEP_RING_SIZE - 1; 519 + 520 + spin_lock_init(&tx->lock); 521 + 522 + return 0; 523 + } 524 + 525 + static void tsnep_tx_close(struct tsnep_tx *tx) 526 + { 527 + u32 val; 528 + 529 + readx_poll_timeout(ioread32, tx->addr + TSNEP_CONTROL, val, 530 + ((val & TSNEP_CONTROL_TX_ENABLE) == 0), 10000, 531 + 1000000); 532 + 533 + tsnep_tx_ring_cleanup(tx); 534 + } 535 + 536 + static void tsnep_rx_ring_cleanup(struct tsnep_rx *rx) 537 + { 538 + struct device *dmadev = rx->adapter->dmadev; 539 + struct tsnep_rx_entry *entry; 540 + int i; 541 + 542 + for (i = 0; i < TSNEP_RING_SIZE; i++) { 543 + entry = &rx->entry[i]; 544 + if (dma_unmap_addr(entry, dma)) 545 + dma_unmap_single(dmadev, dma_unmap_addr(entry, dma), 546 + dma_unmap_len(entry, len), 547 + DMA_FROM_DEVICE); 548 + if (entry->skb) 549 + dev_kfree_skb(entry->skb); 550 + } 551 + 552 + memset(rx->entry, 0, sizeof(rx->entry)); 553 + 554 + for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) { 555 + if (rx->page[i]) { 556 + dma_free_coherent(dmadev, PAGE_SIZE, rx->page[i], 557 + rx->page_dma[i]); 558 + rx->page[i] = NULL; 559 + rx->page_dma[i] = 0; 560 + } 561 + } 562 + } 563 + 564 + static int tsnep_rx_alloc_and_map_skb(struct tsnep_rx *rx, 565 + struct tsnep_rx_entry *entry) 566 + { 567 + struct device *dmadev = rx->adapter->dmadev; 568 + struct sk_buff *skb; 569 + dma_addr_t dma; 570 + 571 + skb = __netdev_alloc_skb(rx->adapter->netdev, RX_SKB_ALLOC_LENGTH, 572 + GFP_ATOMIC | GFP_DMA); 573 + if (!skb) 574 + return -ENOMEM; 575 + 576 + skb_reserve(skb, RX_SKB_RESERVE); 577 + 578 + dma = dma_map_single(dmadev, skb->data, RX_SKB_LENGTH, 579 + DMA_FROM_DEVICE); 580 + if (dma_mapping_error(dmadev, dma)) { 581 + dev_kfree_skb(skb); 582 + return -ENOMEM; 583 + } 584 + 585 + entry->skb = skb; 586 + entry->len = RX_SKB_LENGTH; 587 + dma_unmap_addr_set(entry, dma, dma); 588 + entry->desc->rx = __cpu_to_le64(dma); 589 + 590 + return 0; 591 + } 592 + 593 + static int tsnep_rx_ring_init(struct tsnep_rx *rx) 594 + { 595 + struct device *dmadev = rx->adapter->dmadev; 596 + struct tsnep_rx_entry *entry; 597 + struct tsnep_rx_entry *next_entry; 598 + int i, j; 599 + int retval; 600 + 601 + for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) { 602 + rx->page[i] = 603 + dma_alloc_coherent(dmadev, PAGE_SIZE, &rx->page_dma[i], 604 + GFP_KERNEL); 605 + if (!rx->page[i]) { 606 + retval = -ENOMEM; 607 + goto failed; 608 + } 609 + for (j = 0; j < TSNEP_RING_ENTRIES_PER_PAGE; j++) { 610 + entry = &rx->entry[TSNEP_RING_ENTRIES_PER_PAGE * i + j]; 611 + entry->desc_wb = (struct tsnep_rx_desc_wb *) 612 + (((u8 *)rx->page[i]) + TSNEP_DESC_SIZE * j); 613 + entry->desc = (struct tsnep_rx_desc *) 614 + (((u8 *)entry->desc_wb) + TSNEP_DESC_OFFSET); 615 + entry->desc_dma = rx->page_dma[i] + TSNEP_DESC_SIZE * j; 616 + } 617 + } 618 + for (i = 0; i < TSNEP_RING_SIZE; i++) { 619 + entry = &rx->entry[i]; 620 + next_entry = &rx->entry[(i + 1) % TSNEP_RING_SIZE]; 621 + entry->desc->next = __cpu_to_le64(next_entry->desc_dma); 622 + 623 + retval = tsnep_rx_alloc_and_map_skb(rx, entry); 624 + if (retval) 625 + goto failed; 626 + } 627 + 628 + return 0; 629 + 630 + failed: 631 + tsnep_rx_ring_cleanup(rx); 632 + return retval; 633 + } 634 + 635 + static void tsnep_rx_activate(struct tsnep_rx *rx, int index) 636 + { 637 + struct tsnep_rx_entry *entry = &rx->entry[index]; 638 + 639 + /* RX_SKB_LENGTH is a multiple of 4 */ 640 + entry->properties = entry->len & TSNEP_DESC_LENGTH_MASK; 641 + entry->properties |= TSNEP_DESC_INTERRUPT_FLAG; 642 + if (index == rx->increment_owner_counter) { 643 + rx->owner_counter++; 644 + if (rx->owner_counter == 4) 645 + rx->owner_counter = 1; 646 + rx->increment_owner_counter--; 647 + if (rx->increment_owner_counter < 0) 648 + rx->increment_owner_counter = TSNEP_RING_SIZE - 1; 649 + } 650 + entry->properties |= 651 + (rx->owner_counter << TSNEP_DESC_OWNER_COUNTER_SHIFT) & 652 + TSNEP_DESC_OWNER_COUNTER_MASK; 653 + 654 + /* descriptor properties shall be written last, because valid data is 655 + * signaled there 656 + */ 657 + dma_wmb(); 658 + 659 + entry->desc->properties = __cpu_to_le32(entry->properties); 660 + } 661 + 662 + static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi, 663 + int budget) 664 + { 665 + struct device *dmadev = rx->adapter->dmadev; 666 + int done = 0; 667 + struct tsnep_rx_entry *entry; 668 + struct sk_buff *skb; 669 + size_t len; 670 + dma_addr_t dma; 671 + int length; 672 + bool enable = false; 673 + int retval; 674 + 675 + while (likely(done < budget)) { 676 + entry = &rx->entry[rx->read]; 677 + if ((__le32_to_cpu(entry->desc_wb->properties) & 678 + TSNEP_DESC_OWNER_COUNTER_MASK) != 679 + (entry->properties & TSNEP_DESC_OWNER_COUNTER_MASK)) 680 + break; 681 + 682 + /* descriptor properties shall be read first, because valid data 683 + * is signaled there 684 + */ 685 + dma_rmb(); 686 + 687 + skb = entry->skb; 688 + len = dma_unmap_len(entry, len); 689 + dma = dma_unmap_addr(entry, dma); 690 + 691 + /* forward skb only if allocation is successful, otherwise 692 + * skb is reused and frame dropped 693 + */ 694 + retval = tsnep_rx_alloc_and_map_skb(rx, entry); 695 + if (!retval) { 696 + dma_unmap_single(dmadev, dma, len, DMA_FROM_DEVICE); 697 + 698 + length = __le32_to_cpu(entry->desc_wb->properties) & 699 + TSNEP_DESC_LENGTH_MASK; 700 + skb_put(skb, length - ETH_FCS_LEN); 701 + if (rx->adapter->hwtstamp_config.rx_filter == 702 + HWTSTAMP_FILTER_ALL) { 703 + struct skb_shared_hwtstamps *hwtstamps = 704 + skb_hwtstamps(skb); 705 + struct tsnep_rx_inline *rx_inline = 706 + (struct tsnep_rx_inline *)skb->data; 707 + u64 timestamp = 708 + __le64_to_cpu(rx_inline->timestamp); 709 + 710 + memset(hwtstamps, 0, sizeof(*hwtstamps)); 711 + hwtstamps->hwtstamp = ns_to_ktime(timestamp); 712 + } 713 + skb_pull(skb, TSNEP_RX_INLINE_METADATA_SIZE); 714 + skb->protocol = eth_type_trans(skb, 715 + rx->adapter->netdev); 716 + 717 + rx->packets++; 718 + rx->bytes += length - TSNEP_RX_INLINE_METADATA_SIZE; 719 + if (skb->pkt_type == PACKET_MULTICAST) 720 + rx->multicast++; 721 + 722 + napi_gro_receive(napi, skb); 723 + done++; 724 + } else { 725 + rx->dropped++; 726 + } 727 + 728 + tsnep_rx_activate(rx, rx->read); 729 + 730 + enable = true; 731 + 732 + rx->read = (rx->read + 1) % TSNEP_RING_SIZE; 733 + } 734 + 735 + if (enable) { 736 + /* descriptor properties shall be valid before hardware is 737 + * notified 738 + */ 739 + dma_wmb(); 740 + 741 + iowrite32(TSNEP_CONTROL_RX_ENABLE, rx->addr + TSNEP_CONTROL); 742 + } 743 + 744 + return done; 745 + } 746 + 747 + static int tsnep_rx_open(struct tsnep_adapter *adapter, void __iomem *addr, 748 + struct tsnep_rx *rx) 749 + { 750 + dma_addr_t dma; 751 + int i; 752 + int retval; 753 + 754 + memset(rx, 0, sizeof(*rx)); 755 + rx->adapter = adapter; 756 + rx->addr = addr; 757 + 758 + retval = tsnep_rx_ring_init(rx); 759 + if (retval) 760 + return retval; 761 + 762 + dma = rx->entry[0].desc_dma | TSNEP_RESET_OWNER_COUNTER; 763 + iowrite32(DMA_ADDR_LOW(dma), rx->addr + TSNEP_RX_DESC_ADDR_LOW); 764 + iowrite32(DMA_ADDR_HIGH(dma), rx->addr + TSNEP_RX_DESC_ADDR_HIGH); 765 + rx->owner_counter = 1; 766 + rx->increment_owner_counter = TSNEP_RING_SIZE - 1; 767 + 768 + for (i = 0; i < TSNEP_RING_SIZE; i++) 769 + tsnep_rx_activate(rx, i); 770 + 771 + /* descriptor properties shall be valid before hardware is notified */ 772 + dma_wmb(); 773 + 774 + iowrite32(TSNEP_CONTROL_RX_ENABLE, rx->addr + TSNEP_CONTROL); 775 + 776 + return 0; 777 + } 778 + 779 + static void tsnep_rx_close(struct tsnep_rx *rx) 780 + { 781 + u32 val; 782 + 783 + iowrite32(TSNEP_CONTROL_RX_DISABLE, rx->addr + TSNEP_CONTROL); 784 + readx_poll_timeout(ioread32, rx->addr + TSNEP_CONTROL, val, 785 + ((val & TSNEP_CONTROL_RX_ENABLE) == 0), 10000, 786 + 1000000); 787 + 788 + tsnep_rx_ring_cleanup(rx); 789 + } 790 + 791 + static int tsnep_poll(struct napi_struct *napi, int budget) 792 + { 793 + struct tsnep_queue *queue = container_of(napi, struct tsnep_queue, 794 + napi); 795 + bool complete = true; 796 + int done = 0; 797 + 798 + if (queue->tx) 799 + complete = tsnep_tx_poll(queue->tx, budget); 800 + 801 + if (queue->rx) { 802 + done = tsnep_rx_poll(queue->rx, napi, budget); 803 + if (done >= budget) 804 + complete = false; 805 + } 806 + 807 + /* if all work not completed, return budget and keep polling */ 808 + if (!complete) 809 + return budget; 810 + 811 + if (likely(napi_complete_done(napi, done))) 812 + tsnep_enable_irq(queue->adapter, queue->irq_mask); 813 + 814 + return min(done, budget - 1); 815 + } 816 + 817 + static int tsnep_netdev_open(struct net_device *netdev) 818 + { 819 + struct tsnep_adapter *adapter = netdev_priv(netdev); 820 + int i; 821 + void __iomem *addr; 822 + int tx_queue_index = 0; 823 + int rx_queue_index = 0; 824 + int retval; 825 + 826 + retval = tsnep_phy_open(adapter); 827 + if (retval) 828 + return retval; 829 + 830 + for (i = 0; i < adapter->num_queues; i++) { 831 + adapter->queue[i].adapter = adapter; 832 + if (adapter->queue[i].tx) { 833 + addr = adapter->addr + TSNEP_QUEUE(tx_queue_index); 834 + retval = tsnep_tx_open(adapter, addr, 835 + adapter->queue[i].tx); 836 + if (retval) 837 + goto failed; 838 + tx_queue_index++; 839 + } 840 + if (adapter->queue[i].rx) { 841 + addr = adapter->addr + TSNEP_QUEUE(rx_queue_index); 842 + retval = tsnep_rx_open(adapter, addr, 843 + adapter->queue[i].rx); 844 + if (retval) 845 + goto failed; 846 + rx_queue_index++; 847 + } 848 + } 849 + 850 + retval = netif_set_real_num_tx_queues(adapter->netdev, 851 + adapter->num_tx_queues); 852 + if (retval) 853 + goto failed; 854 + retval = netif_set_real_num_rx_queues(adapter->netdev, 855 + adapter->num_rx_queues); 856 + if (retval) 857 + goto failed; 858 + 859 + for (i = 0; i < adapter->num_queues; i++) { 860 + netif_napi_add(adapter->netdev, &adapter->queue[i].napi, 861 + tsnep_poll, 64); 862 + napi_enable(&adapter->queue[i].napi); 863 + 864 + tsnep_enable_irq(adapter, adapter->queue[i].irq_mask); 865 + } 866 + 867 + return 0; 868 + 869 + failed: 870 + for (i = 0; i < adapter->num_queues; i++) { 871 + if (adapter->queue[i].rx) 872 + tsnep_rx_close(adapter->queue[i].rx); 873 + if (adapter->queue[i].tx) 874 + tsnep_tx_close(adapter->queue[i].tx); 875 + } 876 + tsnep_phy_close(adapter); 877 + return retval; 878 + } 879 + 880 + static int tsnep_netdev_close(struct net_device *netdev) 881 + { 882 + struct tsnep_adapter *adapter = netdev_priv(netdev); 883 + int i; 884 + 885 + for (i = 0; i < adapter->num_queues; i++) { 886 + tsnep_disable_irq(adapter, adapter->queue[i].irq_mask); 887 + 888 + napi_disable(&adapter->queue[i].napi); 889 + netif_napi_del(&adapter->queue[i].napi); 890 + 891 + if (adapter->queue[i].rx) 892 + tsnep_rx_close(adapter->queue[i].rx); 893 + if (adapter->queue[i].tx) 894 + tsnep_tx_close(adapter->queue[i].tx); 895 + } 896 + 897 + tsnep_phy_close(adapter); 898 + 899 + return 0; 900 + } 901 + 902 + static netdev_tx_t tsnep_netdev_xmit_frame(struct sk_buff *skb, 903 + struct net_device *netdev) 904 + { 905 + struct tsnep_adapter *adapter = netdev_priv(netdev); 906 + u16 queue_mapping = skb_get_queue_mapping(skb); 907 + 908 + if (queue_mapping >= adapter->num_tx_queues) 909 + queue_mapping = 0; 910 + 911 + return tsnep_xmit_frame_ring(skb, &adapter->tx[queue_mapping]); 912 + } 913 + 914 + static int tsnep_netdev_ioctl(struct net_device *netdev, struct ifreq *ifr, 915 + int cmd) 916 + { 917 + if (!netif_running(netdev)) 918 + return -EINVAL; 919 + if (cmd == SIOCSHWTSTAMP || cmd == SIOCGHWTSTAMP) 920 + return tsnep_ptp_ioctl(netdev, ifr, cmd); 921 + return phy_mii_ioctl(netdev->phydev, ifr, cmd); 922 + } 923 + 924 + static void tsnep_netdev_set_multicast(struct net_device *netdev) 925 + { 926 + struct tsnep_adapter *adapter = netdev_priv(netdev); 927 + 928 + u16 rx_filter = 0; 929 + 930 + /* configured MAC address and broadcasts are never filtered */ 931 + if (netdev->flags & IFF_PROMISC) { 932 + rx_filter |= TSNEP_RX_FILTER_ACCEPT_ALL_MULTICASTS; 933 + rx_filter |= TSNEP_RX_FILTER_ACCEPT_ALL_UNICASTS; 934 + } else if (!netdev_mc_empty(netdev) || (netdev->flags & IFF_ALLMULTI)) { 935 + rx_filter |= TSNEP_RX_FILTER_ACCEPT_ALL_MULTICASTS; 936 + } 937 + iowrite16(rx_filter, adapter->addr + TSNEP_RX_FILTER); 938 + } 939 + 940 + static void tsnep_netdev_get_stats64(struct net_device *netdev, 941 + struct rtnl_link_stats64 *stats) 942 + { 943 + struct tsnep_adapter *adapter = netdev_priv(netdev); 944 + u32 reg; 945 + u32 val; 946 + int i; 947 + 948 + for (i = 0; i < adapter->num_tx_queues; i++) { 949 + stats->tx_packets += adapter->tx[i].packets; 950 + stats->tx_bytes += adapter->tx[i].bytes; 951 + stats->tx_dropped += adapter->tx[i].dropped; 952 + } 953 + for (i = 0; i < adapter->num_rx_queues; i++) { 954 + stats->rx_packets += adapter->rx[i].packets; 955 + stats->rx_bytes += adapter->rx[i].bytes; 956 + stats->rx_dropped += adapter->rx[i].dropped; 957 + stats->multicast += adapter->rx[i].multicast; 958 + 959 + reg = ioread32(adapter->addr + TSNEP_QUEUE(i) + 960 + TSNEP_RX_STATISTIC); 961 + val = (reg & TSNEP_RX_STATISTIC_NO_DESC_MASK) >> 962 + TSNEP_RX_STATISTIC_NO_DESC_SHIFT; 963 + stats->rx_dropped += val; 964 + val = (reg & TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_MASK) >> 965 + TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_SHIFT; 966 + stats->rx_dropped += val; 967 + val = (reg & TSNEP_RX_STATISTIC_FIFO_OVERFLOW_MASK) >> 968 + TSNEP_RX_STATISTIC_FIFO_OVERFLOW_SHIFT; 969 + stats->rx_errors += val; 970 + stats->rx_fifo_errors += val; 971 + val = (reg & TSNEP_RX_STATISTIC_INVALID_FRAME_MASK) >> 972 + TSNEP_RX_STATISTIC_INVALID_FRAME_SHIFT; 973 + stats->rx_errors += val; 974 + stats->rx_frame_errors += val; 975 + } 976 + 977 + reg = ioread32(adapter->addr + ECM_STAT); 978 + val = (reg & ECM_STAT_RX_ERR_MASK) >> ECM_STAT_RX_ERR_SHIFT; 979 + stats->rx_errors += val; 980 + val = (reg & ECM_STAT_INV_FRM_MASK) >> ECM_STAT_INV_FRM_SHIFT; 981 + stats->rx_errors += val; 982 + stats->rx_crc_errors += val; 983 + val = (reg & ECM_STAT_FWD_RX_ERR_MASK) >> ECM_STAT_FWD_RX_ERR_SHIFT; 984 + stats->rx_errors += val; 985 + } 986 + 987 + static void tsnep_mac_set_address(struct tsnep_adapter *adapter, u8 *addr) 988 + { 989 + iowrite32(*(u32 *)addr, adapter->addr + TSNEP_MAC_ADDRESS_LOW); 990 + iowrite16(*(u16 *)(addr + sizeof(u32)), 991 + adapter->addr + TSNEP_MAC_ADDRESS_HIGH); 992 + 993 + ether_addr_copy(adapter->mac_address, addr); 994 + netif_info(adapter, drv, adapter->netdev, "MAC address set to %pM\n", 995 + addr); 996 + } 997 + 998 + static int tsnep_netdev_set_mac_address(struct net_device *netdev, void *addr) 999 + { 1000 + struct tsnep_adapter *adapter = netdev_priv(netdev); 1001 + struct sockaddr *sock_addr = addr; 1002 + int retval; 1003 + 1004 + retval = eth_prepare_mac_addr_change(netdev, sock_addr); 1005 + if (retval) 1006 + return retval; 1007 + ether_addr_copy(netdev->dev_addr, sock_addr->sa_data); 1008 + tsnep_mac_set_address(adapter, sock_addr->sa_data); 1009 + 1010 + return 0; 1011 + } 1012 + 1013 + static const struct net_device_ops tsnep_netdev_ops = { 1014 + .ndo_open = tsnep_netdev_open, 1015 + .ndo_stop = tsnep_netdev_close, 1016 + .ndo_start_xmit = tsnep_netdev_xmit_frame, 1017 + .ndo_eth_ioctl = tsnep_netdev_ioctl, 1018 + .ndo_set_rx_mode = tsnep_netdev_set_multicast, 1019 + 1020 + .ndo_get_stats64 = tsnep_netdev_get_stats64, 1021 + .ndo_set_mac_address = tsnep_netdev_set_mac_address, 1022 + .ndo_setup_tc = tsnep_tc_setup, 1023 + }; 1024 + 1025 + static int tsnep_mac_init(struct tsnep_adapter *adapter) 1026 + { 1027 + int retval; 1028 + 1029 + /* initialize RX filtering, at least configured MAC address and 1030 + * broadcast are not filtered 1031 + */ 1032 + iowrite16(0, adapter->addr + TSNEP_RX_FILTER); 1033 + 1034 + /* try to get MAC address in the following order: 1035 + * - device tree 1036 + * - valid MAC address already set 1037 + * - MAC address register if valid 1038 + * - random MAC address 1039 + */ 1040 + retval = of_get_mac_address(adapter->pdev->dev.of_node, 1041 + adapter->mac_address); 1042 + if (retval == -EPROBE_DEFER) 1043 + return retval; 1044 + if (retval && !is_valid_ether_addr(adapter->mac_address)) { 1045 + *(u32 *)adapter->mac_address = 1046 + ioread32(adapter->addr + TSNEP_MAC_ADDRESS_LOW); 1047 + *(u16 *)(adapter->mac_address + sizeof(u32)) = 1048 + ioread16(adapter->addr + TSNEP_MAC_ADDRESS_HIGH); 1049 + if (!is_valid_ether_addr(adapter->mac_address)) 1050 + eth_random_addr(adapter->mac_address); 1051 + } 1052 + 1053 + tsnep_mac_set_address(adapter, adapter->mac_address); 1054 + ether_addr_copy(adapter->netdev->dev_addr, adapter->mac_address); 1055 + 1056 + return 0; 1057 + } 1058 + 1059 + static int tsnep_mdio_init(struct tsnep_adapter *adapter) 1060 + { 1061 + struct device_node *np = adapter->pdev->dev.of_node; 1062 + int retval; 1063 + 1064 + if (np) { 1065 + np = of_get_child_by_name(np, "mdio"); 1066 + if (!np) 1067 + return 0; 1068 + 1069 + adapter->suppress_preamble = 1070 + of_property_read_bool(np, "suppress-preamble"); 1071 + } 1072 + 1073 + adapter->mdiobus = devm_mdiobus_alloc(&adapter->pdev->dev); 1074 + if (!adapter->mdiobus) { 1075 + retval = -ENOMEM; 1076 + 1077 + goto out; 1078 + } 1079 + 1080 + adapter->mdiobus->priv = (void *)adapter; 1081 + adapter->mdiobus->parent = &adapter->pdev->dev; 1082 + adapter->mdiobus->read = tsnep_mdiobus_read; 1083 + adapter->mdiobus->write = tsnep_mdiobus_write; 1084 + adapter->mdiobus->name = TSNEP "-mdiobus"; 1085 + snprintf(adapter->mdiobus->id, MII_BUS_ID_SIZE, "%s", 1086 + adapter->pdev->name); 1087 + 1088 + /* do not scan broadcast address */ 1089 + adapter->mdiobus->phy_mask = 0x0000001; 1090 + 1091 + retval = of_mdiobus_register(adapter->mdiobus, np); 1092 + if (np) 1093 + of_node_put(np); 1094 + out: 1095 + 1096 + return retval; 1097 + } 1098 + 1099 + static int tsnep_phy_init(struct tsnep_adapter *adapter) 1100 + { 1101 + struct device_node *phy_node; 1102 + int retval; 1103 + 1104 + retval = of_get_phy_mode(adapter->pdev->dev.of_node, 1105 + &adapter->phy_mode); 1106 + if (retval) 1107 + adapter->phy_mode = PHY_INTERFACE_MODE_GMII; 1108 + 1109 + phy_node = of_parse_phandle(adapter->pdev->dev.of_node, "phy-handle", 1110 + 0); 1111 + adapter->phydev = of_phy_find_device(phy_node); 1112 + of_node_put(phy_node); 1113 + if (!adapter->phydev && adapter->mdiobus) 1114 + adapter->phydev = phy_find_first(adapter->mdiobus); 1115 + if (!adapter->phydev) 1116 + return -EIO; 1117 + 1118 + return 0; 1119 + } 1120 + 1121 + static int tsnep_probe(struct platform_device *pdev) 1122 + { 1123 + struct tsnep_adapter *adapter; 1124 + struct net_device *netdev; 1125 + struct resource *io; 1126 + u32 type; 1127 + int revision; 1128 + int version; 1129 + int retval; 1130 + 1131 + netdev = devm_alloc_etherdev_mqs(&pdev->dev, 1132 + sizeof(struct tsnep_adapter), 1133 + TSNEP_MAX_QUEUES, TSNEP_MAX_QUEUES); 1134 + if (!netdev) 1135 + return -ENODEV; 1136 + SET_NETDEV_DEV(netdev, &pdev->dev); 1137 + adapter = netdev_priv(netdev); 1138 + platform_set_drvdata(pdev, adapter); 1139 + adapter->pdev = pdev; 1140 + adapter->dmadev = &pdev->dev; 1141 + adapter->netdev = netdev; 1142 + adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE | 1143 + NETIF_MSG_LINK | NETIF_MSG_IFUP | 1144 + NETIF_MSG_IFDOWN | NETIF_MSG_TX_QUEUED; 1145 + 1146 + netdev->min_mtu = ETH_MIN_MTU; 1147 + netdev->max_mtu = TSNEP_MAX_FRAME_SIZE; 1148 + 1149 + mutex_init(&adapter->gate_control_lock); 1150 + 1151 + io = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1152 + adapter->addr = devm_ioremap_resource(&pdev->dev, io); 1153 + if (IS_ERR(adapter->addr)) 1154 + return PTR_ERR(adapter->addr); 1155 + adapter->size = io->end - io->start + 1; 1156 + adapter->irq = platform_get_irq(pdev, 0); 1157 + netdev->mem_start = io->start; 1158 + netdev->mem_end = io->end; 1159 + netdev->irq = adapter->irq; 1160 + 1161 + type = ioread32(adapter->addr + ECM_TYPE); 1162 + revision = (type & ECM_REVISION_MASK) >> ECM_REVISION_SHIFT; 1163 + version = (type & ECM_VERSION_MASK) >> ECM_VERSION_SHIFT; 1164 + adapter->gate_control = type & ECM_GATE_CONTROL; 1165 + 1166 + adapter->num_tx_queues = TSNEP_QUEUES; 1167 + adapter->num_rx_queues = TSNEP_QUEUES; 1168 + adapter->num_queues = TSNEP_QUEUES; 1169 + adapter->queue[0].tx = &adapter->tx[0]; 1170 + adapter->queue[0].rx = &adapter->rx[0]; 1171 + adapter->queue[0].irq_mask = ECM_INT_TX_0 | ECM_INT_RX_0; 1172 + 1173 + tsnep_disable_irq(adapter, ECM_INT_ALL); 1174 + retval = devm_request_irq(&adapter->pdev->dev, adapter->irq, tsnep_irq, 1175 + 0, TSNEP, adapter); 1176 + if (retval != 0) { 1177 + dev_err(&adapter->pdev->dev, "can't get assigned irq %d.\n", 1178 + adapter->irq); 1179 + return retval; 1180 + } 1181 + tsnep_enable_irq(adapter, ECM_INT_LINK); 1182 + 1183 + retval = tsnep_mac_init(adapter); 1184 + if (retval) 1185 + goto mac_init_failed; 1186 + 1187 + retval = tsnep_mdio_init(adapter); 1188 + if (retval) 1189 + goto mdio_init_failed; 1190 + 1191 + retval = tsnep_phy_init(adapter); 1192 + if (retval) 1193 + goto phy_init_failed; 1194 + 1195 + retval = tsnep_ptp_init(adapter); 1196 + if (retval) 1197 + goto ptp_init_failed; 1198 + 1199 + retval = tsnep_tc_init(adapter); 1200 + if (retval) 1201 + goto tc_init_failed; 1202 + 1203 + netdev->netdev_ops = &tsnep_netdev_ops; 1204 + netdev->ethtool_ops = &tsnep_ethtool_ops; 1205 + netdev->features = NETIF_F_SG; 1206 + netdev->hw_features = netdev->features; 1207 + 1208 + /* carrier off reporting is important to ethtool even BEFORE open */ 1209 + netif_carrier_off(netdev); 1210 + 1211 + retval = register_netdev(netdev); 1212 + if (retval) 1213 + goto register_failed; 1214 + 1215 + dev_info(&adapter->pdev->dev, "device version %d.%02d\n", version, 1216 + revision); 1217 + if (adapter->gate_control) 1218 + dev_info(&adapter->pdev->dev, "gate control detected\n"); 1219 + 1220 + return 0; 1221 + 1222 + register_failed: 1223 + tsnep_tc_cleanup(adapter); 1224 + tc_init_failed: 1225 + tsnep_ptp_cleanup(adapter); 1226 + ptp_init_failed: 1227 + phy_init_failed: 1228 + if (adapter->mdiobus) 1229 + mdiobus_unregister(adapter->mdiobus); 1230 + mdio_init_failed: 1231 + mac_init_failed: 1232 + tsnep_disable_irq(adapter, ECM_INT_ALL); 1233 + return retval; 1234 + } 1235 + 1236 + static int tsnep_remove(struct platform_device *pdev) 1237 + { 1238 + struct tsnep_adapter *adapter = platform_get_drvdata(pdev); 1239 + 1240 + unregister_netdev(adapter->netdev); 1241 + 1242 + tsnep_tc_cleanup(adapter); 1243 + 1244 + tsnep_ptp_cleanup(adapter); 1245 + 1246 + if (adapter->mdiobus) 1247 + mdiobus_unregister(adapter->mdiobus); 1248 + 1249 + tsnep_disable_irq(adapter, ECM_INT_ALL); 1250 + 1251 + return 0; 1252 + } 1253 + 1254 + static const struct of_device_id tsnep_of_match[] = { 1255 + { .compatible = "engleder,tsnep", }, 1256 + { }, 1257 + }; 1258 + MODULE_DEVICE_TABLE(of, tsnep_of_match); 1259 + 1260 + static struct platform_driver tsnep_driver = { 1261 + .driver = { 1262 + .name = TSNEP, 1263 + .owner = THIS_MODULE, 1264 + .of_match_table = of_match_ptr(tsnep_of_match), 1265 + }, 1266 + .probe = tsnep_probe, 1267 + .remove = tsnep_remove, 1268 + }; 1269 + module_platform_driver(tsnep_driver); 1270 + 1271 + MODULE_AUTHOR("Gerhard Engleder <gerhard@engleder-embedded.com>"); 1272 + MODULE_DESCRIPTION("TSN endpoint Ethernet MAC driver"); 1273 + MODULE_LICENSE("GPL");
+221
drivers/net/ethernet/engleder/tsnep_ptp.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */ 3 + 4 + #include "tsnep.h" 5 + 6 + void tsnep_get_system_time(struct tsnep_adapter *adapter, u64 *time) 7 + { 8 + u32 high_before; 9 + u32 low; 10 + u32 high; 11 + 12 + /* read high dword twice to detect overrun */ 13 + high = ioread32(adapter->addr + ECM_SYSTEM_TIME_HIGH); 14 + do { 15 + low = ioread32(adapter->addr + ECM_SYSTEM_TIME_LOW); 16 + high_before = high; 17 + high = ioread32(adapter->addr + ECM_SYSTEM_TIME_HIGH); 18 + } while (high != high_before); 19 + *time = (((u64)high) << 32) | ((u64)low); 20 + } 21 + 22 + int tsnep_ptp_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 23 + { 24 + struct tsnep_adapter *adapter = netdev_priv(netdev); 25 + struct hwtstamp_config config; 26 + 27 + if (!ifr) 28 + return -EINVAL; 29 + 30 + if (cmd == SIOCSHWTSTAMP) { 31 + if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 32 + return -EFAULT; 33 + 34 + if (config.flags) 35 + return -EINVAL; 36 + 37 + switch (config.tx_type) { 38 + case HWTSTAMP_TX_OFF: 39 + case HWTSTAMP_TX_ON: 40 + break; 41 + default: 42 + return -ERANGE; 43 + } 44 + 45 + switch (config.rx_filter) { 46 + case HWTSTAMP_FILTER_NONE: 47 + break; 48 + case HWTSTAMP_FILTER_ALL: 49 + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 50 + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 51 + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 52 + case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 53 + case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 54 + case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 55 + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 56 + case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 57 + case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 58 + case HWTSTAMP_FILTER_PTP_V2_EVENT: 59 + case HWTSTAMP_FILTER_PTP_V2_SYNC: 60 + case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 61 + case HWTSTAMP_FILTER_NTP_ALL: 62 + config.rx_filter = HWTSTAMP_FILTER_ALL; 63 + break; 64 + default: 65 + return -ERANGE; 66 + } 67 + 68 + memcpy(&adapter->hwtstamp_config, &config, 69 + sizeof(adapter->hwtstamp_config)); 70 + } 71 + 72 + if (copy_to_user(ifr->ifr_data, &adapter->hwtstamp_config, 73 + sizeof(adapter->hwtstamp_config))) 74 + return -EFAULT; 75 + 76 + return 0; 77 + } 78 + 79 + static int tsnep_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) 80 + { 81 + struct tsnep_adapter *adapter = container_of(ptp, struct tsnep_adapter, 82 + ptp_clock_info); 83 + bool negative = false; 84 + u64 rate_offset; 85 + 86 + if (scaled_ppm < 0) { 87 + scaled_ppm = -scaled_ppm; 88 + negative = true; 89 + } 90 + 91 + /* convert from 16 bit to 32 bit binary fractional, divide by 1000000 to 92 + * eliminate ppm, multiply with 8 to compensate 8ns clock cycle time, 93 + * simplify calculation because 15625 * 8 = 1000000 / 8 94 + */ 95 + rate_offset = scaled_ppm; 96 + rate_offset <<= 16 - 3; 97 + rate_offset = div_u64(rate_offset, 15625); 98 + 99 + rate_offset &= ECM_CLOCK_RATE_OFFSET_MASK; 100 + if (negative) 101 + rate_offset |= ECM_CLOCK_RATE_OFFSET_SIGN; 102 + iowrite32(rate_offset & 0xFFFFFFFF, adapter->addr + ECM_CLOCK_RATE); 103 + 104 + return 0; 105 + } 106 + 107 + static int tsnep_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) 108 + { 109 + struct tsnep_adapter *adapter = container_of(ptp, struct tsnep_adapter, 110 + ptp_clock_info); 111 + u64 system_time; 112 + unsigned long flags; 113 + 114 + spin_lock_irqsave(&adapter->ptp_lock, flags); 115 + 116 + tsnep_get_system_time(adapter, &system_time); 117 + 118 + system_time += delta; 119 + 120 + /* high dword is buffered in hardware and synchronously written to 121 + * system time when low dword is written 122 + */ 123 + iowrite32(system_time >> 32, adapter->addr + ECM_SYSTEM_TIME_HIGH); 124 + iowrite32(system_time & 0xFFFFFFFF, 125 + adapter->addr + ECM_SYSTEM_TIME_LOW); 126 + 127 + spin_unlock_irqrestore(&adapter->ptp_lock, flags); 128 + 129 + return 0; 130 + } 131 + 132 + static int tsnep_ptp_gettimex64(struct ptp_clock_info *ptp, 133 + struct timespec64 *ts, 134 + struct ptp_system_timestamp *sts) 135 + { 136 + struct tsnep_adapter *adapter = container_of(ptp, struct tsnep_adapter, 137 + ptp_clock_info); 138 + u32 high_before; 139 + u32 low; 140 + u32 high; 141 + u64 system_time; 142 + 143 + /* read high dword twice to detect overrun */ 144 + high = ioread32(adapter->addr + ECM_SYSTEM_TIME_HIGH); 145 + do { 146 + ptp_read_system_prets(sts); 147 + low = ioread32(adapter->addr + ECM_SYSTEM_TIME_LOW); 148 + ptp_read_system_postts(sts); 149 + high_before = high; 150 + high = ioread32(adapter->addr + ECM_SYSTEM_TIME_HIGH); 151 + } while (high != high_before); 152 + system_time = (((u64)high) << 32) | ((u64)low); 153 + 154 + *ts = ns_to_timespec64(system_time); 155 + 156 + return 0; 157 + } 158 + 159 + static int tsnep_ptp_settime64(struct ptp_clock_info *ptp, 160 + const struct timespec64 *ts) 161 + { 162 + struct tsnep_adapter *adapter = container_of(ptp, struct tsnep_adapter, 163 + ptp_clock_info); 164 + u64 system_time = timespec64_to_ns(ts); 165 + unsigned long flags; 166 + 167 + spin_lock_irqsave(&adapter->ptp_lock, flags); 168 + 169 + /* high dword is buffered in hardware and synchronously written to 170 + * system time when low dword is written 171 + */ 172 + iowrite32(system_time >> 32, adapter->addr + ECM_SYSTEM_TIME_HIGH); 173 + iowrite32(system_time & 0xFFFFFFFF, 174 + adapter->addr + ECM_SYSTEM_TIME_LOW); 175 + 176 + spin_unlock_irqrestore(&adapter->ptp_lock, flags); 177 + 178 + return 0; 179 + } 180 + 181 + int tsnep_ptp_init(struct tsnep_adapter *adapter) 182 + { 183 + int retval = 0; 184 + 185 + adapter->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; 186 + adapter->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF; 187 + 188 + snprintf(adapter->ptp_clock_info.name, 16, "%s", TSNEP); 189 + adapter->ptp_clock_info.owner = THIS_MODULE; 190 + /* at most 2^-1ns adjustment every clock cycle for 8ns clock cycle time, 191 + * stay slightly below because only bits below 2^-1ns are supported 192 + */ 193 + adapter->ptp_clock_info.max_adj = (500000000 / 8 - 1); 194 + adapter->ptp_clock_info.adjfine = tsnep_ptp_adjfine; 195 + adapter->ptp_clock_info.adjtime = tsnep_ptp_adjtime; 196 + adapter->ptp_clock_info.gettimex64 = tsnep_ptp_gettimex64; 197 + adapter->ptp_clock_info.settime64 = tsnep_ptp_settime64; 198 + 199 + spin_lock_init(&adapter->ptp_lock); 200 + 201 + adapter->ptp_clock = ptp_clock_register(&adapter->ptp_clock_info, 202 + &adapter->pdev->dev); 203 + if (IS_ERR(adapter->ptp_clock)) { 204 + netdev_err(adapter->netdev, "ptp_clock_register failed\n"); 205 + 206 + retval = PTR_ERR(adapter->ptp_clock); 207 + adapter->ptp_clock = NULL; 208 + } else if (adapter->ptp_clock) { 209 + netdev_info(adapter->netdev, "PHC added\n"); 210 + } 211 + 212 + return retval; 213 + } 214 + 215 + void tsnep_ptp_cleanup(struct tsnep_adapter *adapter) 216 + { 217 + if (adapter->ptp_clock) { 218 + ptp_clock_unregister(adapter->ptp_clock); 219 + netdev_info(adapter->netdev, "PHC removed\n"); 220 + } 221 + }
+811
drivers/net/ethernet/engleder/tsnep_selftests.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */ 3 + 4 + #include "tsnep.h" 5 + 6 + #include <net/pkt_sched.h> 7 + 8 + enum tsnep_test { 9 + TSNEP_TEST_ENABLE = 0, 10 + TSNEP_TEST_TAPRIO, 11 + TSNEP_TEST_TAPRIO_CHANGE, 12 + TSNEP_TEST_TAPRIO_EXTENSION, 13 + }; 14 + 15 + static const char tsnep_test_strings[][ETH_GSTRING_LEN] = { 16 + "Enable timeout (offline)", 17 + "TAPRIO (offline)", 18 + "TAPRIO change (offline)", 19 + "TAPRIO extension (offline)", 20 + }; 21 + 22 + #define TSNEP_TEST_COUNT (sizeof(tsnep_test_strings) / ETH_GSTRING_LEN) 23 + 24 + static bool enable_gc_timeout(struct tsnep_adapter *adapter) 25 + { 26 + iowrite8(TSNEP_GC_ENABLE_TIMEOUT, adapter->addr + TSNEP_GC); 27 + if (!(ioread32(adapter->addr + TSNEP_GC) & TSNEP_GC_TIMEOUT_ACTIVE)) 28 + return false; 29 + 30 + return true; 31 + } 32 + 33 + static bool gc_timeout_signaled(struct tsnep_adapter *adapter) 34 + { 35 + if (ioread32(adapter->addr + TSNEP_GC) & TSNEP_GC_TIMEOUT_SIGNAL) 36 + return true; 37 + 38 + return false; 39 + } 40 + 41 + static bool ack_gc_timeout(struct tsnep_adapter *adapter) 42 + { 43 + iowrite8(TSNEP_GC_ENABLE_TIMEOUT, adapter->addr + TSNEP_GC); 44 + if (ioread32(adapter->addr + TSNEP_GC) & 45 + (TSNEP_GC_TIMEOUT_ACTIVE | TSNEP_GC_TIMEOUT_SIGNAL)) 46 + return false; 47 + return true; 48 + } 49 + 50 + static bool enable_gc(struct tsnep_adapter *adapter, bool a) 51 + { 52 + u8 enable; 53 + u8 active; 54 + 55 + if (a) { 56 + enable = TSNEP_GC_ENABLE_A; 57 + active = TSNEP_GC_ACTIVE_A; 58 + } else { 59 + enable = TSNEP_GC_ENABLE_B; 60 + active = TSNEP_GC_ACTIVE_B; 61 + } 62 + 63 + iowrite8(enable, adapter->addr + TSNEP_GC); 64 + if (!(ioread32(adapter->addr + TSNEP_GC) & active)) 65 + return false; 66 + 67 + return true; 68 + } 69 + 70 + static bool disable_gc(struct tsnep_adapter *adapter) 71 + { 72 + iowrite8(TSNEP_GC_DISABLE, adapter->addr + TSNEP_GC); 73 + if (ioread32(adapter->addr + TSNEP_GC) & 74 + (TSNEP_GC_ACTIVE_A | TSNEP_GC_ACTIVE_B)) 75 + return false; 76 + 77 + return true; 78 + } 79 + 80 + static bool gc_delayed_enable(struct tsnep_adapter *adapter, bool a, int delay) 81 + { 82 + u64 before, after; 83 + u32 time; 84 + bool enabled; 85 + 86 + if (!disable_gc(adapter)) 87 + return false; 88 + 89 + before = ktime_get_ns(); 90 + 91 + if (!enable_gc_timeout(adapter)) 92 + return false; 93 + 94 + /* for start time after timeout, the timeout can guarantee, that enable 95 + * is blocked if too late 96 + */ 97 + time = ioread32(adapter->addr + ECM_SYSTEM_TIME_LOW); 98 + time += TSNEP_GC_TIMEOUT; 99 + iowrite32(time, adapter->addr + TSNEP_GC_TIME); 100 + 101 + ndelay(delay); 102 + 103 + enabled = enable_gc(adapter, a); 104 + after = ktime_get_ns(); 105 + 106 + if (delay > TSNEP_GC_TIMEOUT) { 107 + /* timeout must have blocked enable */ 108 + if (enabled) 109 + return false; 110 + } else if ((after - before) < TSNEP_GC_TIMEOUT * 14 / 16) { 111 + /* timeout must not have blocked enable */ 112 + if (!enabled) 113 + return false; 114 + } 115 + 116 + if (enabled) { 117 + if (gc_timeout_signaled(adapter)) 118 + return false; 119 + } else { 120 + if (!gc_timeout_signaled(adapter)) 121 + return false; 122 + if (!ack_gc_timeout(adapter)) 123 + return false; 124 + } 125 + 126 + if (!disable_gc(adapter)) 127 + return false; 128 + 129 + return true; 130 + } 131 + 132 + static bool tsnep_test_gc_enable(struct tsnep_adapter *adapter) 133 + { 134 + int i; 135 + 136 + iowrite32(0x80000001, adapter->addr + TSNEP_GCL_A + 0); 137 + iowrite32(100000, adapter->addr + TSNEP_GCL_A + 4); 138 + 139 + for (i = 0; i < 200000; i += 100) { 140 + if (!gc_delayed_enable(adapter, true, i)) 141 + return false; 142 + } 143 + 144 + iowrite32(0x80000001, adapter->addr + TSNEP_GCL_B + 0); 145 + iowrite32(100000, adapter->addr + TSNEP_GCL_B + 4); 146 + 147 + for (i = 0; i < 200000; i += 100) { 148 + if (!gc_delayed_enable(adapter, false, i)) 149 + return false; 150 + } 151 + 152 + return true; 153 + } 154 + 155 + static void delay_base_time(struct tsnep_adapter *adapter, 156 + struct tc_taprio_qopt_offload *qopt, s64 ms) 157 + { 158 + u64 system_time; 159 + u64 base_time = ktime_to_ns(qopt->base_time); 160 + u64 n; 161 + 162 + tsnep_get_system_time(adapter, &system_time); 163 + system_time += ms * 1000000; 164 + n = div64_u64(system_time - base_time, qopt->cycle_time); 165 + 166 + qopt->base_time = ktime_add_ns(qopt->base_time, 167 + (n + 1) * qopt->cycle_time); 168 + } 169 + 170 + static void get_gate_state(struct tsnep_adapter *adapter, u32 *gc, u32 *gc_time, 171 + u64 *system_time) 172 + { 173 + u32 time_high_before; 174 + u32 time_low; 175 + u32 time_high; 176 + u32 gc_time_before; 177 + 178 + time_high = ioread32(adapter->addr + ECM_SYSTEM_TIME_HIGH); 179 + *gc_time = ioread32(adapter->addr + TSNEP_GC_TIME); 180 + do { 181 + time_low = ioread32(adapter->addr + ECM_SYSTEM_TIME_LOW); 182 + *gc = ioread32(adapter->addr + TSNEP_GC); 183 + 184 + gc_time_before = *gc_time; 185 + *gc_time = ioread32(adapter->addr + TSNEP_GC_TIME); 186 + time_high_before = time_high; 187 + time_high = ioread32(adapter->addr + ECM_SYSTEM_TIME_HIGH); 188 + } while ((time_high != time_high_before) || 189 + (*gc_time != gc_time_before)); 190 + 191 + *system_time = (((u64)time_high) << 32) | ((u64)time_low); 192 + } 193 + 194 + static int get_operation(struct tsnep_gcl *gcl, u64 system_time, u64 *next) 195 + { 196 + u64 n = div64_u64(system_time - gcl->base_time, gcl->cycle_time); 197 + u64 cycle_start = gcl->base_time + gcl->cycle_time * n; 198 + int i; 199 + 200 + *next = cycle_start; 201 + for (i = 0; i < gcl->count; i++) { 202 + *next += gcl->operation[i].interval; 203 + if (*next > system_time) 204 + break; 205 + } 206 + 207 + return i; 208 + } 209 + 210 + static bool check_gate(struct tsnep_adapter *adapter) 211 + { 212 + u32 gc_time; 213 + u32 gc; 214 + u64 system_time; 215 + struct tsnep_gcl *curr; 216 + struct tsnep_gcl *prev; 217 + u64 next_time; 218 + u8 gate_open; 219 + u8 next_gate_open; 220 + 221 + get_gate_state(adapter, &gc, &gc_time, &system_time); 222 + 223 + if (gc & TSNEP_GC_ACTIVE_A) { 224 + curr = &adapter->gcl[0]; 225 + prev = &adapter->gcl[1]; 226 + } else if (gc & TSNEP_GC_ACTIVE_B) { 227 + curr = &adapter->gcl[1]; 228 + prev = &adapter->gcl[0]; 229 + } else { 230 + return false; 231 + } 232 + if (curr->start_time <= system_time) { 233 + /* GCL is already active */ 234 + int index; 235 + 236 + index = get_operation(curr, system_time, &next_time); 237 + gate_open = curr->operation[index].properties & TSNEP_GCL_MASK; 238 + if (index == curr->count - 1) 239 + index = 0; 240 + else 241 + index++; 242 + next_gate_open = 243 + curr->operation[index].properties & TSNEP_GCL_MASK; 244 + } else if (curr->change) { 245 + /* operation of previous GCL is active */ 246 + int index; 247 + u64 start_before; 248 + u64 n; 249 + 250 + index = get_operation(prev, system_time, &next_time); 251 + next_time = curr->start_time; 252 + start_before = prev->base_time; 253 + n = div64_u64(curr->start_time - start_before, 254 + prev->cycle_time); 255 + start_before += n * prev->cycle_time; 256 + if (curr->start_time == start_before) 257 + start_before -= prev->cycle_time; 258 + if (((start_before + prev->cycle_time_extension) >= 259 + curr->start_time) && 260 + (curr->start_time - prev->cycle_time_extension <= 261 + system_time)) { 262 + /* extend */ 263 + index = prev->count - 1; 264 + } 265 + gate_open = prev->operation[index].properties & TSNEP_GCL_MASK; 266 + next_gate_open = 267 + curr->operation[0].properties & TSNEP_GCL_MASK; 268 + } else { 269 + /* GCL is waiting for start */ 270 + next_time = curr->start_time; 271 + gate_open = 0xFF; 272 + next_gate_open = curr->operation[0].properties & TSNEP_GCL_MASK; 273 + } 274 + 275 + if (gc_time != (next_time & 0xFFFFFFFF)) { 276 + dev_err(&adapter->pdev->dev, "gate control time 0x%x!=0x%llx\n", 277 + gc_time, next_time); 278 + return false; 279 + } 280 + if (((gc & TSNEP_GC_OPEN) >> TSNEP_GC_OPEN_SHIFT) != gate_open) { 281 + dev_err(&adapter->pdev->dev, 282 + "gate control open 0x%02x!=0x%02x\n", 283 + ((gc & TSNEP_GC_OPEN) >> TSNEP_GC_OPEN_SHIFT), 284 + gate_open); 285 + return false; 286 + } 287 + if (((gc & TSNEP_GC_NEXT_OPEN) >> TSNEP_GC_NEXT_OPEN_SHIFT) != 288 + next_gate_open) { 289 + dev_err(&adapter->pdev->dev, 290 + "gate control next open 0x%02x!=0x%02x\n", 291 + ((gc & TSNEP_GC_NEXT_OPEN) >> TSNEP_GC_NEXT_OPEN_SHIFT), 292 + next_gate_open); 293 + return false; 294 + } 295 + 296 + return true; 297 + } 298 + 299 + static bool check_gate_duration(struct tsnep_adapter *adapter, s64 ms) 300 + { 301 + ktime_t start = ktime_get(); 302 + 303 + do { 304 + if (!check_gate(adapter)) 305 + return false; 306 + } while (ktime_ms_delta(ktime_get(), start) < ms); 307 + 308 + return true; 309 + } 310 + 311 + static bool enable_check_taprio(struct tsnep_adapter *adapter, 312 + struct tc_taprio_qopt_offload *qopt, s64 ms) 313 + { 314 + int retval; 315 + 316 + retval = tsnep_tc_setup(adapter->netdev, TC_SETUP_QDISC_TAPRIO, qopt); 317 + if (retval) 318 + return false; 319 + 320 + if (!check_gate_duration(adapter, ms)) 321 + return false; 322 + 323 + return true; 324 + } 325 + 326 + static bool disable_taprio(struct tsnep_adapter *adapter) 327 + { 328 + struct tc_taprio_qopt_offload qopt; 329 + int retval; 330 + 331 + memset(&qopt, 0, sizeof(qopt)); 332 + qopt.enable = 0; 333 + retval = tsnep_tc_setup(adapter->netdev, TC_SETUP_QDISC_TAPRIO, &qopt); 334 + if (retval) 335 + return false; 336 + 337 + return true; 338 + } 339 + 340 + static bool run_taprio(struct tsnep_adapter *adapter, 341 + struct tc_taprio_qopt_offload *qopt, s64 ms) 342 + { 343 + if (!enable_check_taprio(adapter, qopt, ms)) 344 + return false; 345 + 346 + if (!disable_taprio(adapter)) 347 + return false; 348 + 349 + return true; 350 + } 351 + 352 + static bool tsnep_test_taprio(struct tsnep_adapter *adapter) 353 + { 354 + struct tc_taprio_qopt_offload *qopt; 355 + int i; 356 + 357 + qopt = kzalloc(struct_size(qopt, entries, 255), GFP_KERNEL); 358 + if (!qopt) 359 + return false; 360 + for (i = 0; i < 255; i++) 361 + qopt->entries[i].command = TC_TAPRIO_CMD_SET_GATES; 362 + 363 + qopt->enable = 1; 364 + qopt->base_time = ktime_set(0, 0); 365 + qopt->cycle_time = 1500000; 366 + qopt->cycle_time_extension = 0; 367 + qopt->entries[0].gate_mask = 0x02; 368 + qopt->entries[0].interval = 200000; 369 + qopt->entries[1].gate_mask = 0x03; 370 + qopt->entries[1].interval = 800000; 371 + qopt->entries[2].gate_mask = 0x07; 372 + qopt->entries[2].interval = 240000; 373 + qopt->entries[3].gate_mask = 0x01; 374 + qopt->entries[3].interval = 80000; 375 + qopt->entries[4].gate_mask = 0x04; 376 + qopt->entries[4].interval = 70000; 377 + qopt->entries[5].gate_mask = 0x06; 378 + qopt->entries[5].interval = 60000; 379 + qopt->entries[6].gate_mask = 0x0F; 380 + qopt->entries[6].interval = 50000; 381 + qopt->num_entries = 7; 382 + if (!run_taprio(adapter, qopt, 100)) 383 + goto failed; 384 + 385 + qopt->enable = 1; 386 + qopt->base_time = ktime_set(0, 0); 387 + qopt->cycle_time = 411854; 388 + qopt->cycle_time_extension = 0; 389 + qopt->entries[0].gate_mask = 0x17; 390 + qopt->entries[0].interval = 23842; 391 + qopt->entries[1].gate_mask = 0x16; 392 + qopt->entries[1].interval = 13482; 393 + qopt->entries[2].gate_mask = 0x15; 394 + qopt->entries[2].interval = 49428; 395 + qopt->entries[3].gate_mask = 0x14; 396 + qopt->entries[3].interval = 38189; 397 + qopt->entries[4].gate_mask = 0x13; 398 + qopt->entries[4].interval = 92321; 399 + qopt->entries[5].gate_mask = 0x12; 400 + qopt->entries[5].interval = 71239; 401 + qopt->entries[6].gate_mask = 0x11; 402 + qopt->entries[6].interval = 69932; 403 + qopt->entries[7].gate_mask = 0x10; 404 + qopt->entries[7].interval = 53421; 405 + qopt->num_entries = 8; 406 + if (!run_taprio(adapter, qopt, 100)) 407 + goto failed; 408 + 409 + qopt->enable = 1; 410 + qopt->base_time = ktime_set(0, 0); 411 + delay_base_time(adapter, qopt, 12); 412 + qopt->cycle_time = 125000; 413 + qopt->cycle_time_extension = 0; 414 + qopt->entries[0].gate_mask = 0x27; 415 + qopt->entries[0].interval = 15000; 416 + qopt->entries[1].gate_mask = 0x26; 417 + qopt->entries[1].interval = 15000; 418 + qopt->entries[2].gate_mask = 0x25; 419 + qopt->entries[2].interval = 12500; 420 + qopt->entries[3].gate_mask = 0x24; 421 + qopt->entries[3].interval = 17500; 422 + qopt->entries[4].gate_mask = 0x23; 423 + qopt->entries[4].interval = 10000; 424 + qopt->entries[5].gate_mask = 0x22; 425 + qopt->entries[5].interval = 11000; 426 + qopt->entries[6].gate_mask = 0x21; 427 + qopt->entries[6].interval = 9000; 428 + qopt->entries[7].gate_mask = 0x20; 429 + qopt->entries[7].interval = 10000; 430 + qopt->entries[8].gate_mask = 0x20; 431 + qopt->entries[8].interval = 12500; 432 + qopt->entries[9].gate_mask = 0x20; 433 + qopt->entries[9].interval = 12500; 434 + qopt->num_entries = 10; 435 + if (!run_taprio(adapter, qopt, 100)) 436 + goto failed; 437 + 438 + kfree(qopt); 439 + 440 + return true; 441 + 442 + failed: 443 + disable_taprio(adapter); 444 + kfree(qopt); 445 + 446 + return false; 447 + } 448 + 449 + static bool tsnep_test_taprio_change(struct tsnep_adapter *adapter) 450 + { 451 + struct tc_taprio_qopt_offload *qopt; 452 + int i; 453 + 454 + qopt = kzalloc(struct_size(qopt, entries, 255), GFP_KERNEL); 455 + if (!qopt) 456 + return false; 457 + for (i = 0; i < 255; i++) 458 + qopt->entries[i].command = TC_TAPRIO_CMD_SET_GATES; 459 + 460 + qopt->enable = 1; 461 + qopt->base_time = ktime_set(0, 0); 462 + qopt->cycle_time = 100000; 463 + qopt->cycle_time_extension = 0; 464 + qopt->entries[0].gate_mask = 0x30; 465 + qopt->entries[0].interval = 20000; 466 + qopt->entries[1].gate_mask = 0x31; 467 + qopt->entries[1].interval = 80000; 468 + qopt->num_entries = 2; 469 + if (!enable_check_taprio(adapter, qopt, 100)) 470 + goto failed; 471 + 472 + /* change to identical */ 473 + if (!enable_check_taprio(adapter, qopt, 100)) 474 + goto failed; 475 + delay_base_time(adapter, qopt, 17); 476 + if (!enable_check_taprio(adapter, qopt, 100)) 477 + goto failed; 478 + 479 + /* change to same cycle time */ 480 + qopt->base_time = ktime_set(0, 0); 481 + qopt->entries[0].gate_mask = 0x42; 482 + qopt->entries[1].gate_mask = 0x43; 483 + delay_base_time(adapter, qopt, 2); 484 + if (!enable_check_taprio(adapter, qopt, 100)) 485 + goto failed; 486 + qopt->base_time = ktime_set(0, 0); 487 + qopt->entries[0].gate_mask = 0x54; 488 + qopt->entries[0].interval = 33333; 489 + qopt->entries[1].gate_mask = 0x55; 490 + qopt->entries[1].interval = 66667; 491 + delay_base_time(adapter, qopt, 23); 492 + if (!enable_check_taprio(adapter, qopt, 100)) 493 + goto failed; 494 + qopt->base_time = ktime_set(0, 0); 495 + qopt->entries[0].gate_mask = 0x66; 496 + qopt->entries[0].interval = 50000; 497 + qopt->entries[1].gate_mask = 0x67; 498 + qopt->entries[1].interval = 25000; 499 + qopt->entries[2].gate_mask = 0x68; 500 + qopt->entries[2].interval = 25000; 501 + qopt->num_entries = 3; 502 + delay_base_time(adapter, qopt, 11); 503 + if (!enable_check_taprio(adapter, qopt, 100)) 504 + goto failed; 505 + 506 + /* change to multiple of cycle time */ 507 + qopt->base_time = ktime_set(0, 0); 508 + qopt->cycle_time = 200000; 509 + qopt->entries[0].gate_mask = 0x79; 510 + qopt->entries[0].interval = 50000; 511 + qopt->entries[1].gate_mask = 0x7A; 512 + qopt->entries[1].interval = 150000; 513 + qopt->num_entries = 2; 514 + delay_base_time(adapter, qopt, 11); 515 + if (!enable_check_taprio(adapter, qopt, 100)) 516 + goto failed; 517 + qopt->base_time = ktime_set(0, 0); 518 + qopt->cycle_time = 1000000; 519 + qopt->entries[0].gate_mask = 0x7B; 520 + qopt->entries[0].interval = 125000; 521 + qopt->entries[1].gate_mask = 0x7C; 522 + qopt->entries[1].interval = 250000; 523 + qopt->entries[2].gate_mask = 0x7D; 524 + qopt->entries[2].interval = 375000; 525 + qopt->entries[3].gate_mask = 0x7E; 526 + qopt->entries[3].interval = 250000; 527 + qopt->num_entries = 4; 528 + delay_base_time(adapter, qopt, 3); 529 + if (!enable_check_taprio(adapter, qopt, 100)) 530 + goto failed; 531 + 532 + /* change to shorter cycle time */ 533 + qopt->base_time = ktime_set(0, 0); 534 + qopt->cycle_time = 333333; 535 + qopt->entries[0].gate_mask = 0x8F; 536 + qopt->entries[0].interval = 166666; 537 + qopt->entries[1].gate_mask = 0x80; 538 + qopt->entries[1].interval = 166667; 539 + qopt->num_entries = 2; 540 + delay_base_time(adapter, qopt, 11); 541 + if (!enable_check_taprio(adapter, qopt, 100)) 542 + goto failed; 543 + qopt->base_time = ktime_set(0, 0); 544 + qopt->cycle_time = 62500; 545 + qopt->entries[0].gate_mask = 0x81; 546 + qopt->entries[0].interval = 31250; 547 + qopt->entries[1].gate_mask = 0x82; 548 + qopt->entries[1].interval = 15625; 549 + qopt->entries[2].gate_mask = 0x83; 550 + qopt->entries[2].interval = 15625; 551 + qopt->num_entries = 3; 552 + delay_base_time(adapter, qopt, 1); 553 + if (!enable_check_taprio(adapter, qopt, 100)) 554 + goto failed; 555 + 556 + /* change to longer cycle time */ 557 + qopt->base_time = ktime_set(0, 0); 558 + qopt->cycle_time = 400000; 559 + qopt->entries[0].gate_mask = 0x84; 560 + qopt->entries[0].interval = 100000; 561 + qopt->entries[1].gate_mask = 0x85; 562 + qopt->entries[1].interval = 100000; 563 + qopt->entries[2].gate_mask = 0x86; 564 + qopt->entries[2].interval = 100000; 565 + qopt->entries[3].gate_mask = 0x87; 566 + qopt->entries[3].interval = 100000; 567 + qopt->num_entries = 4; 568 + delay_base_time(adapter, qopt, 7); 569 + if (!enable_check_taprio(adapter, qopt, 100)) 570 + goto failed; 571 + qopt->base_time = ktime_set(0, 0); 572 + qopt->cycle_time = 1700000; 573 + qopt->entries[0].gate_mask = 0x88; 574 + qopt->entries[0].interval = 200000; 575 + qopt->entries[1].gate_mask = 0x89; 576 + qopt->entries[1].interval = 300000; 577 + qopt->entries[2].gate_mask = 0x8A; 578 + qopt->entries[2].interval = 600000; 579 + qopt->entries[3].gate_mask = 0x8B; 580 + qopt->entries[3].interval = 100000; 581 + qopt->entries[4].gate_mask = 0x8C; 582 + qopt->entries[4].interval = 500000; 583 + qopt->num_entries = 5; 584 + delay_base_time(adapter, qopt, 6); 585 + if (!enable_check_taprio(adapter, qopt, 100)) 586 + goto failed; 587 + 588 + if (!disable_taprio(adapter)) 589 + goto failed; 590 + 591 + kfree(qopt); 592 + 593 + return true; 594 + 595 + failed: 596 + disable_taprio(adapter); 597 + kfree(qopt); 598 + 599 + return false; 600 + } 601 + 602 + static bool tsnep_test_taprio_extension(struct tsnep_adapter *adapter) 603 + { 604 + struct tc_taprio_qopt_offload *qopt; 605 + int i; 606 + 607 + qopt = kzalloc(struct_size(qopt, entries, 255), GFP_KERNEL); 608 + if (!qopt) 609 + return false; 610 + for (i = 0; i < 255; i++) 611 + qopt->entries[i].command = TC_TAPRIO_CMD_SET_GATES; 612 + 613 + qopt->enable = 1; 614 + qopt->base_time = ktime_set(0, 0); 615 + qopt->cycle_time = 100000; 616 + qopt->cycle_time_extension = 50000; 617 + qopt->entries[0].gate_mask = 0x90; 618 + qopt->entries[0].interval = 20000; 619 + qopt->entries[1].gate_mask = 0x91; 620 + qopt->entries[1].interval = 80000; 621 + qopt->num_entries = 2; 622 + if (!enable_check_taprio(adapter, qopt, 100)) 623 + goto failed; 624 + 625 + /* change to different phase */ 626 + qopt->base_time = ktime_set(0, 50000); 627 + qopt->entries[0].gate_mask = 0x92; 628 + qopt->entries[0].interval = 33000; 629 + qopt->entries[1].gate_mask = 0x93; 630 + qopt->entries[1].interval = 67000; 631 + qopt->num_entries = 2; 632 + delay_base_time(adapter, qopt, 2); 633 + if (!enable_check_taprio(adapter, qopt, 100)) 634 + goto failed; 635 + 636 + /* change to different phase and longer cycle time */ 637 + qopt->base_time = ktime_set(0, 0); 638 + qopt->cycle_time = 1000000; 639 + qopt->cycle_time_extension = 700000; 640 + qopt->entries[0].gate_mask = 0x94; 641 + qopt->entries[0].interval = 400000; 642 + qopt->entries[1].gate_mask = 0x95; 643 + qopt->entries[1].interval = 600000; 644 + qopt->num_entries = 2; 645 + delay_base_time(adapter, qopt, 7); 646 + if (!enable_check_taprio(adapter, qopt, 100)) 647 + goto failed; 648 + qopt->base_time = ktime_set(0, 700000); 649 + qopt->cycle_time = 2000000; 650 + qopt->cycle_time_extension = 1900000; 651 + qopt->entries[0].gate_mask = 0x96; 652 + qopt->entries[0].interval = 400000; 653 + qopt->entries[1].gate_mask = 0x97; 654 + qopt->entries[1].interval = 1600000; 655 + qopt->num_entries = 2; 656 + delay_base_time(adapter, qopt, 9); 657 + if (!enable_check_taprio(adapter, qopt, 100)) 658 + goto failed; 659 + 660 + /* change to different phase and shorter cycle time */ 661 + qopt->base_time = ktime_set(0, 0); 662 + qopt->cycle_time = 1500000; 663 + qopt->cycle_time_extension = 700000; 664 + qopt->entries[0].gate_mask = 0x98; 665 + qopt->entries[0].interval = 400000; 666 + qopt->entries[1].gate_mask = 0x99; 667 + qopt->entries[1].interval = 600000; 668 + qopt->entries[2].gate_mask = 0x9A; 669 + qopt->entries[2].interval = 500000; 670 + qopt->num_entries = 3; 671 + delay_base_time(adapter, qopt, 3); 672 + if (!enable_check_taprio(adapter, qopt, 100)) 673 + goto failed; 674 + qopt->base_time = ktime_set(0, 100000); 675 + qopt->cycle_time = 500000; 676 + qopt->cycle_time_extension = 300000; 677 + qopt->entries[0].gate_mask = 0x9B; 678 + qopt->entries[0].interval = 150000; 679 + qopt->entries[1].gate_mask = 0x9C; 680 + qopt->entries[1].interval = 350000; 681 + qopt->num_entries = 2; 682 + delay_base_time(adapter, qopt, 9); 683 + if (!enable_check_taprio(adapter, qopt, 100)) 684 + goto failed; 685 + 686 + /* change to different cycle time */ 687 + qopt->base_time = ktime_set(0, 0); 688 + qopt->cycle_time = 1000000; 689 + qopt->cycle_time_extension = 700000; 690 + qopt->entries[0].gate_mask = 0xAD; 691 + qopt->entries[0].interval = 400000; 692 + qopt->entries[1].gate_mask = 0xAE; 693 + qopt->entries[1].interval = 300000; 694 + qopt->entries[2].gate_mask = 0xAF; 695 + qopt->entries[2].interval = 300000; 696 + qopt->num_entries = 3; 697 + if (!enable_check_taprio(adapter, qopt, 100)) 698 + goto failed; 699 + qopt->base_time = ktime_set(0, 0); 700 + qopt->cycle_time = 400000; 701 + qopt->cycle_time_extension = 100000; 702 + qopt->entries[0].gate_mask = 0xA0; 703 + qopt->entries[0].interval = 200000; 704 + qopt->entries[1].gate_mask = 0xA1; 705 + qopt->entries[1].interval = 200000; 706 + qopt->num_entries = 2; 707 + delay_base_time(adapter, qopt, 19); 708 + if (!enable_check_taprio(adapter, qopt, 100)) 709 + goto failed; 710 + qopt->base_time = ktime_set(0, 0); 711 + qopt->cycle_time = 500000; 712 + qopt->cycle_time_extension = 499999; 713 + qopt->entries[0].gate_mask = 0xB2; 714 + qopt->entries[0].interval = 100000; 715 + qopt->entries[1].gate_mask = 0xB3; 716 + qopt->entries[1].interval = 100000; 717 + qopt->entries[2].gate_mask = 0xB4; 718 + qopt->entries[2].interval = 100000; 719 + qopt->entries[3].gate_mask = 0xB5; 720 + qopt->entries[3].interval = 200000; 721 + qopt->num_entries = 4; 722 + delay_base_time(adapter, qopt, 19); 723 + if (!enable_check_taprio(adapter, qopt, 100)) 724 + goto failed; 725 + qopt->base_time = ktime_set(0, 0); 726 + qopt->cycle_time = 6000000; 727 + qopt->cycle_time_extension = 5999999; 728 + qopt->entries[0].gate_mask = 0xC6; 729 + qopt->entries[0].interval = 1000000; 730 + qopt->entries[1].gate_mask = 0xC7; 731 + qopt->entries[1].interval = 1000000; 732 + qopt->entries[2].gate_mask = 0xC8; 733 + qopt->entries[2].interval = 1000000; 734 + qopt->entries[3].gate_mask = 0xC9; 735 + qopt->entries[3].interval = 1500000; 736 + qopt->entries[4].gate_mask = 0xCA; 737 + qopt->entries[4].interval = 1500000; 738 + qopt->num_entries = 5; 739 + delay_base_time(adapter, qopt, 1); 740 + if (!enable_check_taprio(adapter, qopt, 100)) 741 + goto failed; 742 + 743 + if (!disable_taprio(adapter)) 744 + goto failed; 745 + 746 + kfree(qopt); 747 + 748 + return true; 749 + 750 + failed: 751 + disable_taprio(adapter); 752 + kfree(qopt); 753 + 754 + return false; 755 + } 756 + 757 + int tsnep_ethtool_get_test_count(void) 758 + { 759 + return TSNEP_TEST_COUNT; 760 + } 761 + 762 + void tsnep_ethtool_get_test_strings(u8 *data) 763 + { 764 + memcpy(data, tsnep_test_strings, sizeof(tsnep_test_strings)); 765 + } 766 + 767 + void tsnep_ethtool_self_test(struct net_device *netdev, 768 + struct ethtool_test *eth_test, u64 *data) 769 + { 770 + struct tsnep_adapter *adapter = netdev_priv(netdev); 771 + 772 + eth_test->len = TSNEP_TEST_COUNT; 773 + 774 + if (eth_test->flags != ETH_TEST_FL_OFFLINE) { 775 + /* no tests are done online */ 776 + data[TSNEP_TEST_ENABLE] = 0; 777 + data[TSNEP_TEST_TAPRIO] = 0; 778 + data[TSNEP_TEST_TAPRIO_CHANGE] = 0; 779 + data[TSNEP_TEST_TAPRIO_EXTENSION] = 0; 780 + 781 + return; 782 + } 783 + 784 + if (tsnep_test_gc_enable(adapter)) { 785 + data[TSNEP_TEST_ENABLE] = 0; 786 + } else { 787 + eth_test->flags |= ETH_TEST_FL_FAILED; 788 + data[TSNEP_TEST_ENABLE] = 1; 789 + } 790 + 791 + if (tsnep_test_taprio(adapter)) { 792 + data[TSNEP_TEST_TAPRIO] = 0; 793 + } else { 794 + eth_test->flags |= ETH_TEST_FL_FAILED; 795 + data[TSNEP_TEST_TAPRIO] = 1; 796 + } 797 + 798 + if (tsnep_test_taprio_change(adapter)) { 799 + data[TSNEP_TEST_TAPRIO_CHANGE] = 0; 800 + } else { 801 + eth_test->flags |= ETH_TEST_FL_FAILED; 802 + data[TSNEP_TEST_TAPRIO_CHANGE] = 1; 803 + } 804 + 805 + if (tsnep_test_taprio_extension(adapter)) { 806 + data[TSNEP_TEST_TAPRIO_EXTENSION] = 0; 807 + } else { 808 + eth_test->flags |= ETH_TEST_FL_FAILED; 809 + data[TSNEP_TEST_TAPRIO_EXTENSION] = 1; 810 + } 811 + }
+443
drivers/net/ethernet/engleder/tsnep_tc.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */ 3 + 4 + #include "tsnep.h" 5 + 6 + #include <net/pkt_sched.h> 7 + 8 + /* save one operation at the end for additional operation at list change */ 9 + #define TSNEP_MAX_GCL_NUM (TSNEP_GCL_COUNT - 1) 10 + 11 + static int tsnep_validate_gcl(struct tc_taprio_qopt_offload *qopt) 12 + { 13 + int i; 14 + u64 cycle_time; 15 + 16 + if (!qopt->cycle_time) 17 + return -ERANGE; 18 + if (qopt->num_entries > TSNEP_MAX_GCL_NUM) 19 + return -EINVAL; 20 + cycle_time = 0; 21 + for (i = 0; i < qopt->num_entries; i++) { 22 + if (qopt->entries[i].command != TC_TAPRIO_CMD_SET_GATES) 23 + return -EINVAL; 24 + if (qopt->entries[i].gate_mask & ~TSNEP_GCL_MASK) 25 + return -EINVAL; 26 + if (qopt->entries[i].interval < TSNEP_GCL_MIN_INTERVAL) 27 + return -EINVAL; 28 + cycle_time += qopt->entries[i].interval; 29 + } 30 + if (qopt->cycle_time != cycle_time) 31 + return -EINVAL; 32 + if (qopt->cycle_time_extension >= qopt->cycle_time) 33 + return -EINVAL; 34 + 35 + return 0; 36 + } 37 + 38 + static void tsnep_write_gcl_operation(struct tsnep_gcl *gcl, int index, 39 + u32 properties, u32 interval, bool flush) 40 + { 41 + void __iomem *addr = gcl->addr + 42 + sizeof(struct tsnep_gcl_operation) * index; 43 + 44 + gcl->operation[index].properties = properties; 45 + gcl->operation[index].interval = interval; 46 + 47 + iowrite32(properties, addr); 48 + iowrite32(interval, addr + sizeof(u32)); 49 + 50 + if (flush) { 51 + /* flush write with read access */ 52 + ioread32(addr); 53 + } 54 + } 55 + 56 + static u64 tsnep_change_duration(struct tsnep_gcl *gcl, int index) 57 + { 58 + u64 duration; 59 + int count; 60 + 61 + /* change needs to be triggered one or two operations before start of 62 + * new gate control list 63 + * - change is triggered at start of operation (minimum one operation) 64 + * - operation with adjusted interval is inserted on demand to exactly 65 + * meet the start of the new gate control list (optional) 66 + * 67 + * additionally properties are read directly after start of previous 68 + * operation 69 + * 70 + * therefore, three operations needs to be considered for the limit 71 + */ 72 + duration = 0; 73 + count = 3; 74 + while (count) { 75 + duration += gcl->operation[index].interval; 76 + 77 + index--; 78 + if (index < 0) 79 + index = gcl->count - 1; 80 + 81 + count--; 82 + } 83 + 84 + return duration; 85 + } 86 + 87 + static void tsnep_write_gcl(struct tsnep_gcl *gcl, 88 + struct tc_taprio_qopt_offload *qopt) 89 + { 90 + int i; 91 + u32 properties; 92 + u64 extend; 93 + u64 cut; 94 + 95 + gcl->base_time = ktime_to_ns(qopt->base_time); 96 + gcl->cycle_time = qopt->cycle_time; 97 + gcl->cycle_time_extension = qopt->cycle_time_extension; 98 + 99 + for (i = 0; i < qopt->num_entries; i++) { 100 + properties = qopt->entries[i].gate_mask; 101 + if (i == (qopt->num_entries - 1)) 102 + properties |= TSNEP_GCL_LAST; 103 + 104 + tsnep_write_gcl_operation(gcl, i, properties, 105 + qopt->entries[i].interval, true); 106 + } 107 + gcl->count = qopt->num_entries; 108 + 109 + /* calculate change limit; i.e., the time needed between enable and 110 + * start of new gate control list 111 + */ 112 + 113 + /* case 1: extend cycle time for change 114 + * - change duration of last operation 115 + * - cycle time extension 116 + */ 117 + extend = tsnep_change_duration(gcl, gcl->count - 1); 118 + extend += gcl->cycle_time_extension; 119 + 120 + /* case 2: cut cycle time for change 121 + * - maximum change duration 122 + */ 123 + cut = 0; 124 + for (i = 0; i < gcl->count; i++) 125 + cut = max(cut, tsnep_change_duration(gcl, i)); 126 + 127 + /* use maximum, because the actual case (extend or cut) can be 128 + * determined only after limit is known (chicken-and-egg problem) 129 + */ 130 + gcl->change_limit = max(extend, cut); 131 + } 132 + 133 + static u64 tsnep_gcl_start_after(struct tsnep_gcl *gcl, u64 limit) 134 + { 135 + u64 start = gcl->base_time; 136 + u64 n; 137 + 138 + if (start <= limit) { 139 + n = div64_u64(limit - start, gcl->cycle_time); 140 + start += (n + 1) * gcl->cycle_time; 141 + } 142 + 143 + return start; 144 + } 145 + 146 + static u64 tsnep_gcl_start_before(struct tsnep_gcl *gcl, u64 limit) 147 + { 148 + u64 start = gcl->base_time; 149 + u64 n; 150 + 151 + n = div64_u64(limit - start, gcl->cycle_time); 152 + start += n * gcl->cycle_time; 153 + if (start == limit) 154 + start -= gcl->cycle_time; 155 + 156 + return start; 157 + } 158 + 159 + static u64 tsnep_set_gcl_change(struct tsnep_gcl *gcl, int index, u64 change, 160 + bool insert) 161 + { 162 + /* previous operation triggers change and properties are evaluated at 163 + * start of operation 164 + */ 165 + if (index == 0) 166 + index = gcl->count - 1; 167 + else 168 + index = index - 1; 169 + change -= gcl->operation[index].interval; 170 + 171 + /* optionally change to new list with additional operation in between */ 172 + if (insert) { 173 + void __iomem *addr = gcl->addr + 174 + sizeof(struct tsnep_gcl_operation) * index; 175 + 176 + gcl->operation[index].properties |= TSNEP_GCL_INSERT; 177 + iowrite32(gcl->operation[index].properties, addr); 178 + } 179 + 180 + return change; 181 + } 182 + 183 + static void tsnep_clean_gcl(struct tsnep_gcl *gcl) 184 + { 185 + int i; 186 + u32 mask = TSNEP_GCL_LAST | TSNEP_GCL_MASK; 187 + void __iomem *addr; 188 + 189 + /* search for insert operation and reset properties */ 190 + for (i = 0; i < gcl->count; i++) { 191 + if (gcl->operation[i].properties & ~mask) { 192 + addr = gcl->addr + 193 + sizeof(struct tsnep_gcl_operation) * i; 194 + 195 + gcl->operation[i].properties &= mask; 196 + iowrite32(gcl->operation[i].properties, addr); 197 + 198 + break; 199 + } 200 + } 201 + } 202 + 203 + static u64 tsnep_insert_gcl_operation(struct tsnep_gcl *gcl, int ref, 204 + u64 change, u32 interval) 205 + { 206 + u32 properties; 207 + 208 + properties = gcl->operation[ref].properties & TSNEP_GCL_MASK; 209 + /* change to new list directly after inserted operation */ 210 + properties |= TSNEP_GCL_CHANGE; 211 + 212 + /* last operation of list is reserved to insert operation */ 213 + tsnep_write_gcl_operation(gcl, TSNEP_GCL_COUNT - 1, properties, 214 + interval, false); 215 + 216 + return tsnep_set_gcl_change(gcl, ref, change, true); 217 + } 218 + 219 + static u64 tsnep_extend_gcl(struct tsnep_gcl *gcl, u64 start, u32 extension) 220 + { 221 + int ref = gcl->count - 1; 222 + u32 interval = gcl->operation[ref].interval + extension; 223 + 224 + start -= gcl->operation[ref].interval; 225 + 226 + return tsnep_insert_gcl_operation(gcl, ref, start, interval); 227 + } 228 + 229 + static u64 tsnep_cut_gcl(struct tsnep_gcl *gcl, u64 start, u64 cycle_time) 230 + { 231 + u64 sum = 0; 232 + int i; 233 + 234 + /* find operation which shall be cutted */ 235 + for (i = 0; i < gcl->count; i++) { 236 + u64 sum_tmp = sum + gcl->operation[i].interval; 237 + u64 interval; 238 + 239 + /* sum up operations as long as cycle time is not exceeded */ 240 + if (sum_tmp > cycle_time) 241 + break; 242 + 243 + /* remaining interval must be big enough for hardware */ 244 + interval = cycle_time - sum_tmp; 245 + if (interval > 0 && interval < TSNEP_GCL_MIN_INTERVAL) 246 + break; 247 + 248 + sum = sum_tmp; 249 + } 250 + if (sum == cycle_time) { 251 + /* no need to cut operation itself or whole cycle 252 + * => change exactly at operation 253 + */ 254 + return tsnep_set_gcl_change(gcl, i, start + sum, false); 255 + } 256 + return tsnep_insert_gcl_operation(gcl, i, start + sum, 257 + cycle_time - sum); 258 + } 259 + 260 + static int tsnep_enable_gcl(struct tsnep_adapter *adapter, 261 + struct tsnep_gcl *gcl, struct tsnep_gcl *curr) 262 + { 263 + u64 system_time; 264 + u64 timeout; 265 + u64 limit; 266 + 267 + /* estimate timeout limit after timeout enable, actually timeout limit 268 + * in hardware will be earlier than estimate so we are on the safe side 269 + */ 270 + tsnep_get_system_time(adapter, &system_time); 271 + timeout = system_time + TSNEP_GC_TIMEOUT; 272 + 273 + if (curr) 274 + limit = timeout + curr->change_limit; 275 + else 276 + limit = timeout; 277 + 278 + gcl->start_time = tsnep_gcl_start_after(gcl, limit); 279 + 280 + /* gate control time register is only 32bit => time shall be in the near 281 + * future (no driver support for far future implemented) 282 + */ 283 + if ((gcl->start_time - system_time) >= U32_MAX) 284 + return -EAGAIN; 285 + 286 + if (curr) { 287 + /* change gate control list */ 288 + u64 last; 289 + u64 change; 290 + 291 + last = tsnep_gcl_start_before(curr, gcl->start_time); 292 + if ((last + curr->cycle_time) == gcl->start_time) 293 + change = tsnep_cut_gcl(curr, last, 294 + gcl->start_time - last); 295 + else if (((gcl->start_time - last) <= 296 + curr->cycle_time_extension) || 297 + ((gcl->start_time - last) <= TSNEP_GCL_MIN_INTERVAL)) 298 + change = tsnep_extend_gcl(curr, last, 299 + gcl->start_time - last); 300 + else 301 + change = tsnep_cut_gcl(curr, last, 302 + gcl->start_time - last); 303 + 304 + WARN_ON(change <= timeout); 305 + gcl->change = true; 306 + iowrite32(change & 0xFFFFFFFF, adapter->addr + TSNEP_GC_CHANGE); 307 + } else { 308 + /* start gate control list */ 309 + WARN_ON(gcl->start_time <= timeout); 310 + gcl->change = false; 311 + iowrite32(gcl->start_time & 0xFFFFFFFF, 312 + adapter->addr + TSNEP_GC_TIME); 313 + } 314 + 315 + return 0; 316 + } 317 + 318 + static int tsnep_taprio(struct tsnep_adapter *adapter, 319 + struct tc_taprio_qopt_offload *qopt) 320 + { 321 + struct tsnep_gcl *gcl; 322 + struct tsnep_gcl *curr; 323 + int retval; 324 + 325 + if (!adapter->gate_control) 326 + return -EOPNOTSUPP; 327 + 328 + if (!qopt->enable) { 329 + /* disable gate control if active */ 330 + mutex_lock(&adapter->gate_control_lock); 331 + 332 + if (adapter->gate_control_active) { 333 + iowrite8(TSNEP_GC_DISABLE, adapter->addr + TSNEP_GC); 334 + adapter->gate_control_active = false; 335 + } 336 + 337 + mutex_unlock(&adapter->gate_control_lock); 338 + 339 + return 0; 340 + } 341 + 342 + retval = tsnep_validate_gcl(qopt); 343 + if (retval) 344 + return retval; 345 + 346 + mutex_lock(&adapter->gate_control_lock); 347 + 348 + gcl = &adapter->gcl[adapter->next_gcl]; 349 + tsnep_write_gcl(gcl, qopt); 350 + 351 + /* select current gate control list if active */ 352 + if (adapter->gate_control_active) { 353 + if (adapter->next_gcl == 0) 354 + curr = &adapter->gcl[1]; 355 + else 356 + curr = &adapter->gcl[0]; 357 + } else { 358 + curr = NULL; 359 + } 360 + 361 + for (;;) { 362 + /* start timeout which discards late enable, this helps ensuring 363 + * that start/change time are in the future at enable 364 + */ 365 + iowrite8(TSNEP_GC_ENABLE_TIMEOUT, adapter->addr + TSNEP_GC); 366 + 367 + retval = tsnep_enable_gcl(adapter, gcl, curr); 368 + if (retval) { 369 + mutex_unlock(&adapter->gate_control_lock); 370 + 371 + return retval; 372 + } 373 + 374 + /* enable gate control list */ 375 + if (adapter->next_gcl == 0) 376 + iowrite8(TSNEP_GC_ENABLE_A, adapter->addr + TSNEP_GC); 377 + else 378 + iowrite8(TSNEP_GC_ENABLE_B, adapter->addr + TSNEP_GC); 379 + 380 + /* done if timeout did not happen */ 381 + if (!(ioread32(adapter->addr + TSNEP_GC) & 382 + TSNEP_GC_TIMEOUT_SIGNAL)) 383 + break; 384 + 385 + /* timeout is acknowledged with any enable */ 386 + iowrite8(TSNEP_GC_ENABLE_A, adapter->addr + TSNEP_GC); 387 + 388 + if (curr) 389 + tsnep_clean_gcl(curr); 390 + 391 + /* retry because of timeout */ 392 + } 393 + 394 + adapter->gate_control_active = true; 395 + 396 + if (adapter->next_gcl == 0) 397 + adapter->next_gcl = 1; 398 + else 399 + adapter->next_gcl = 0; 400 + 401 + mutex_unlock(&adapter->gate_control_lock); 402 + 403 + return 0; 404 + } 405 + 406 + int tsnep_tc_setup(struct net_device *netdev, enum tc_setup_type type, 407 + void *type_data) 408 + { 409 + struct tsnep_adapter *adapter = netdev_priv(netdev); 410 + 411 + switch (type) { 412 + case TC_SETUP_QDISC_TAPRIO: 413 + return tsnep_taprio(adapter, type_data); 414 + default: 415 + return -EOPNOTSUPP; 416 + } 417 + } 418 + 419 + int tsnep_tc_init(struct tsnep_adapter *adapter) 420 + { 421 + if (!adapter->gate_control) 422 + return 0; 423 + 424 + /* open all gates */ 425 + iowrite8(TSNEP_GC_DISABLE, adapter->addr + TSNEP_GC); 426 + iowrite32(TSNEP_GC_OPEN | TSNEP_GC_NEXT_OPEN, adapter->addr + TSNEP_GC); 427 + 428 + adapter->gcl[0].addr = adapter->addr + TSNEP_GCL_A; 429 + adapter->gcl[1].addr = adapter->addr + TSNEP_GCL_B; 430 + 431 + return 0; 432 + } 433 + 434 + void tsnep_tc_cleanup(struct tsnep_adapter *adapter) 435 + { 436 + if (!adapter->gate_control) 437 + return; 438 + 439 + if (adapter->gate_control_active) { 440 + iowrite8(TSNEP_GC_DISABLE, adapter->addr + TSNEP_GC); 441 + adapter->gate_control_active = false; 442 + } 443 + }