Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.9 2998 lines 86 kB view raw
1/* Synopsys DWC Ethernet Quality-of-Service v4.10a linux driver 2 * 3 * This is a driver for the Synopsys DWC Ethernet QoS IP version 4.10a (GMAC). 4 * This version introduced a lot of changes which breaks backwards 5 * compatibility the non-QoS IP from Synopsys (used in the ST Micro drivers). 6 * Some fields differ between version 4.00a and 4.10a, mainly the interrupt 7 * bit fields. The driver could be made compatible with 4.00, if all relevant 8 * HW erratas are handled. 9 * 10 * The GMAC is highly configurable at synthesis time. This driver has been 11 * developed for a subset of the total available feature set. Currently 12 * it supports: 13 * - TSO 14 * - Checksum offload for RX and TX. 15 * - Energy efficient ethernet. 16 * - GMII phy interface. 17 * - The statistics module. 18 * - Single RX and TX queue. 19 * 20 * Copyright (C) 2015 Axis Communications AB. 21 * 22 * This program is free software; you can redistribute it and/or modify it 23 * under the terms and conditions of the GNU General Public License, 24 * version 2, as published by the Free Software Foundation. 25 */ 26 27#include <linux/clk.h> 28#include <linux/module.h> 29#include <linux/kernel.h> 30#include <linux/init.h> 31#include <linux/io.h> 32#include <linux/ethtool.h> 33#include <linux/stat.h> 34#include <linux/types.h> 35 36#include <linux/slab.h> 37#include <linux/delay.h> 38#include <linux/mm.h> 39#include <linux/netdevice.h> 40#include <linux/etherdevice.h> 41#include <linux/platform_device.h> 42 43#include <linux/phy.h> 44#include <linux/mii.h> 45#include <linux/dma-mapping.h> 46#include <linux/vmalloc.h> 47 48#include <linux/device.h> 49#include <linux/bitrev.h> 50#include <linux/crc32.h> 51 52#include <linux/of.h> 53#include <linux/interrupt.h> 54#include <linux/clocksource.h> 55#include <linux/net_tstamp.h> 56#include <linux/pm_runtime.h> 57#include <linux/of_net.h> 58#include <linux/of_address.h> 59#include <linux/of_mdio.h> 60#include <linux/timer.h> 61#include <linux/tcp.h> 62 63#define DRIVER_NAME "dwceqos" 64#define DRIVER_DESCRIPTION "Synopsys DWC Ethernet QoS driver" 65#define DRIVER_VERSION "0.9" 66 67#define DWCEQOS_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \ 68 NETIF_MSG_LINK | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP) 69 70#define DWCEQOS_TX_TIMEOUT 5 /* Seconds */ 71 72#define DWCEQOS_LPI_TIMER_MIN 8 73#define DWCEQOS_LPI_TIMER_MAX ((1 << 20) - 1) 74 75#define DWCEQOS_RX_BUF_SIZE 2048 76 77#define DWCEQOS_RX_DCNT 256 78#define DWCEQOS_TX_DCNT 256 79 80#define DWCEQOS_HASH_TABLE_SIZE 64 81 82/* The size field in the DMA descriptor is 14 bits */ 83#define BYTES_PER_DMA_DESC 16376 84 85/* Hardware registers */ 86#define START_MAC_REG_OFFSET 0x0000 87#define MAX_MAC_REG_OFFSET 0x0bd0 88#define START_MTL_REG_OFFSET 0x0c00 89#define MAX_MTL_REG_OFFSET 0x0d7c 90#define START_DMA_REG_OFFSET 0x1000 91#define MAX_DMA_REG_OFFSET 0x117C 92 93#define REG_SPACE_SIZE 0x1800 94 95/* DMA */ 96#define REG_DWCEQOS_DMA_MODE 0x1000 97#define REG_DWCEQOS_DMA_SYSBUS_MODE 0x1004 98#define REG_DWCEQOS_DMA_IS 0x1008 99#define REG_DWCEQOS_DMA_DEBUG_ST0 0x100c 100 101/* DMA channel registers */ 102#define REG_DWCEQOS_DMA_CH0_CTRL 0x1100 103#define REG_DWCEQOS_DMA_CH0_TX_CTRL 0x1104 104#define REG_DWCEQOS_DMA_CH0_RX_CTRL 0x1108 105#define REG_DWCEQOS_DMA_CH0_TXDESC_LIST 0x1114 106#define REG_DWCEQOS_DMA_CH0_RXDESC_LIST 0x111c 107#define REG_DWCEQOS_DMA_CH0_TXDESC_TAIL 0x1120 108#define REG_DWCEQOS_DMA_CH0_RXDESC_TAIL 0x1128 109#define REG_DWCEQOS_DMA_CH0_TXDESC_LEN 0x112c 110#define REG_DWCEQOS_DMA_CH0_RXDESC_LEN 0x1130 111#define REG_DWCEQOS_DMA_CH0_IE 0x1134 112#define REG_DWCEQOS_DMA_CH0_CUR_TXDESC 0x1144 113#define REG_DWCEQOS_DMA_CH0_CUR_RXDESC 0x114c 114#define REG_DWCEQOS_DMA_CH0_CUR_TXBUF 0x1154 115#define REG_DWCEQOS_DMA_CH0_CUR_RXBUG 0x115c 116#define REG_DWCEQOS_DMA_CH0_STA 0x1160 117 118#define DWCEQOS_DMA_MODE_TXPR BIT(11) 119#define DWCEQOS_DMA_MODE_DA BIT(1) 120 121#define DWCEQOS_DMA_SYSBUS_MODE_EN_LPI BIT(31) 122#define DWCEQOS_DMA_SYSBUS_MODE_FB BIT(0) 123#define DWCEQOS_DMA_SYSBUS_MODE_AAL BIT(12) 124 125#define DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT(x) \ 126 (((x) << 16) & 0x000F0000) 127#define DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT_DEFAULT 3 128#define DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT_MASK GENMASK(19, 16) 129 130#define DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT(x) \ 131 (((x) << 24) & 0x0F000000) 132#define DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT_DEFAULT 3 133#define DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT_MASK GENMASK(27, 24) 134 135#define DWCEQOS_DMA_SYSBUS_MODE_BURST_MASK GENMASK(7, 1) 136#define DWCEQOS_DMA_SYSBUS_MODE_BURST(x) \ 137 (((x) << 1) & DWCEQOS_DMA_SYSBUS_MODE_BURST_MASK) 138#define DWCEQOS_DMA_SYSBUS_MODE_BURST_DEFAULT GENMASK(3, 1) 139 140#define DWCEQOS_DMA_CH_CTRL_PBLX8 BIT(16) 141#define DWCEQOS_DMA_CH_CTRL_DSL(x) ((x) << 18) 142 143#define DWCEQOS_DMA_CH_CTRL_PBL(x) ((x) << 16) 144#define DWCEQOS_DMA_CH_CTRL_START BIT(0) 145#define DWCEQOS_DMA_CH_RX_CTRL_BUFSIZE(x) ((x) << 1) 146#define DWCEQOS_DMA_CH_TX_OSP BIT(4) 147#define DWCEQOS_DMA_CH_TX_TSE BIT(12) 148 149#define DWCEQOS_DMA_CH0_IE_NIE BIT(15) 150#define DWCEQOS_DMA_CH0_IE_AIE BIT(14) 151#define DWCEQOS_DMA_CH0_IE_RIE BIT(6) 152#define DWCEQOS_DMA_CH0_IE_TIE BIT(0) 153#define DWCEQOS_DMA_CH0_IE_FBEE BIT(12) 154#define DWCEQOS_DMA_CH0_IE_RBUE BIT(7) 155 156#define DWCEQOS_DMA_IS_DC0IS BIT(0) 157#define DWCEQOS_DMA_IS_MTLIS BIT(16) 158#define DWCEQOS_DMA_IS_MACIS BIT(17) 159 160#define DWCEQOS_DMA_CH0_IS_TI BIT(0) 161#define DWCEQOS_DMA_CH0_IS_RI BIT(6) 162#define DWCEQOS_DMA_CH0_IS_RBU BIT(7) 163#define DWCEQOS_DMA_CH0_IS_FBE BIT(12) 164#define DWCEQOS_DMA_CH0_IS_CDE BIT(13) 165#define DWCEQOS_DMA_CH0_IS_AIS BIT(14) 166 167#define DWCEQOS_DMA_CH0_IS_TEB GENMASK(18, 16) 168#define DWCEQOS_DMA_CH0_IS_TX_ERR_READ BIT(16) 169#define DWCEQOS_DMA_CH0_IS_TX_ERR_DESCR BIT(17) 170 171#define DWCEQOS_DMA_CH0_IS_REB GENMASK(21, 19) 172#define DWCEQOS_DMA_CH0_IS_RX_ERR_READ BIT(19) 173#define DWCEQOS_DMA_CH0_IS_RX_ERR_DESCR BIT(20) 174 175/* DMA descriptor bits for RX normal descriptor (read format) */ 176#define DWCEQOS_DMA_RDES3_OWN BIT(31) 177#define DWCEQOS_DMA_RDES3_INTE BIT(30) 178#define DWCEQOS_DMA_RDES3_BUF2V BIT(25) 179#define DWCEQOS_DMA_RDES3_BUF1V BIT(24) 180 181/* DMA descriptor bits for RX normal descriptor (write back format) */ 182#define DWCEQOS_DMA_RDES1_IPCE BIT(7) 183#define DWCEQOS_DMA_RDES3_ES BIT(15) 184#define DWCEQOS_DMA_RDES3_E_JT BIT(14) 185#define DWCEQOS_DMA_RDES3_PL(x) ((x) & 0x7fff) 186#define DWCEQOS_DMA_RDES1_PT 0x00000007 187#define DWCEQOS_DMA_RDES1_PT_UDP BIT(0) 188#define DWCEQOS_DMA_RDES1_PT_TCP BIT(1) 189#define DWCEQOS_DMA_RDES1_PT_ICMP 0x00000003 190 191/* DMA descriptor bits for TX normal descriptor (read format) */ 192#define DWCEQOS_DMA_TDES2_IOC BIT(31) 193#define DWCEQOS_DMA_TDES3_OWN BIT(31) 194#define DWCEQOS_DMA_TDES3_CTXT BIT(30) 195#define DWCEQOS_DMA_TDES3_FD BIT(29) 196#define DWCEQOS_DMA_TDES3_LD BIT(28) 197#define DWCEQOS_DMA_TDES3_CIPH BIT(16) 198#define DWCEQOS_DMA_TDES3_CIPP BIT(17) 199#define DWCEQOS_DMA_TDES3_CA 0x00030000 200#define DWCEQOS_DMA_TDES3_TSE BIT(18) 201#define DWCEQOS_DMA_DES3_THL(x) ((x) << 19) 202#define DWCEQOS_DMA_DES2_B2L(x) ((x) << 16) 203 204#define DWCEQOS_DMA_TDES3_TCMSSV BIT(26) 205 206/* DMA channel states */ 207#define DMA_TX_CH_STOPPED 0 208#define DMA_TX_CH_SUSPENDED 6 209 210#define DMA_GET_TX_STATE_CH0(status0) ((status0 & 0xF000) >> 12) 211 212/* MTL */ 213#define REG_DWCEQOS_MTL_OPER 0x0c00 214#define REG_DWCEQOS_MTL_DEBUG_ST 0x0c0c 215#define REG_DWCEQOS_MTL_TXQ0_DEBUG_ST 0x0d08 216#define REG_DWCEQOS_MTL_RXQ0_DEBUG_ST 0x0d38 217 218#define REG_DWCEQOS_MTL_IS 0x0c20 219#define REG_DWCEQOS_MTL_TXQ0_OPER 0x0d00 220#define REG_DWCEQOS_MTL_RXQ0_OPER 0x0d30 221#define REG_DWCEQOS_MTL_RXQ0_MIS_CNT 0x0d34 222#define REG_DWCEQOS_MTL_RXQ0_CTRL 0x0d3c 223 224#define REG_DWCEQOS_MTL_Q0_ISCTRL 0x0d2c 225 226#define DWCEQOS_MTL_SCHALG_STRICT 0x00000060 227 228#define DWCEQOS_MTL_TXQ_TXQEN BIT(3) 229#define DWCEQOS_MTL_TXQ_TSF BIT(1) 230#define DWCEQOS_MTL_TXQ_FTQ BIT(0) 231#define DWCEQOS_MTL_TXQ_TTC512 0x00000070 232 233#define DWCEQOS_MTL_TXQ_SIZE(x) ((((x) - 256) & 0xff00) << 8) 234 235#define DWCEQOS_MTL_RXQ_SIZE(x) ((((x) - 256) & 0xff00) << 12) 236#define DWCEQOS_MTL_RXQ_EHFC BIT(7) 237#define DWCEQOS_MTL_RXQ_DIS_TCP_EF BIT(6) 238#define DWCEQOS_MTL_RXQ_FEP BIT(4) 239#define DWCEQOS_MTL_RXQ_FUP BIT(3) 240#define DWCEQOS_MTL_RXQ_RSF BIT(5) 241#define DWCEQOS_MTL_RXQ_RTC32 BIT(0) 242 243/* MAC */ 244#define REG_DWCEQOS_MAC_CFG 0x0000 245#define REG_DWCEQOS_MAC_EXT_CFG 0x0004 246#define REG_DWCEQOS_MAC_PKT_FILT 0x0008 247#define REG_DWCEQOS_MAC_WD_TO 0x000c 248#define REG_DWCEQOS_HASTABLE_LO 0x0010 249#define REG_DWCEQOS_HASTABLE_HI 0x0014 250#define REG_DWCEQOS_MAC_IS 0x00b0 251#define REG_DWCEQOS_MAC_IE 0x00b4 252#define REG_DWCEQOS_MAC_STAT 0x00b8 253#define REG_DWCEQOS_MAC_MDIO_ADDR 0x0200 254#define REG_DWCEQOS_MAC_MDIO_DATA 0x0204 255#define REG_DWCEQOS_MAC_MAC_ADDR0_HI 0x0300 256#define REG_DWCEQOS_MAC_MAC_ADDR0_LO 0x0304 257#define REG_DWCEQOS_MAC_RXQ0_CTRL0 0x00a0 258#define REG_DWCEQOS_MAC_HW_FEATURE0 0x011c 259#define REG_DWCEQOS_MAC_HW_FEATURE1 0x0120 260#define REG_DWCEQOS_MAC_HW_FEATURE2 0x0124 261#define REG_DWCEQOS_MAC_HASHTABLE_LO 0x0010 262#define REG_DWCEQOS_MAC_HASHTABLE_HI 0x0014 263#define REG_DWCEQOS_MAC_LPI_CTRL_STATUS 0x00d0 264#define REG_DWCEQOS_MAC_LPI_TIMERS_CTRL 0x00d4 265#define REG_DWCEQOS_MAC_LPI_ENTRY_TIMER 0x00d8 266#define REG_DWCEQOS_MAC_1US_TIC_COUNTER 0x00dc 267#define REG_DWCEQOS_MAC_RX_FLOW_CTRL 0x0090 268#define REG_DWCEQOS_MAC_Q0_TX_FLOW 0x0070 269 270#define DWCEQOS_MAC_CFG_ACS BIT(20) 271#define DWCEQOS_MAC_CFG_JD BIT(17) 272#define DWCEQOS_MAC_CFG_JE BIT(16) 273#define DWCEQOS_MAC_CFG_PS BIT(15) 274#define DWCEQOS_MAC_CFG_FES BIT(14) 275#define DWCEQOS_MAC_CFG_DM BIT(13) 276#define DWCEQOS_MAC_CFG_DO BIT(10) 277#define DWCEQOS_MAC_CFG_TE BIT(1) 278#define DWCEQOS_MAC_CFG_IPC BIT(27) 279#define DWCEQOS_MAC_CFG_RE BIT(0) 280 281#define DWCEQOS_ADDR_HIGH(reg) (0x00000300 + (reg * 8)) 282#define DWCEQOS_ADDR_LOW(reg) (0x00000304 + (reg * 8)) 283 284#define DWCEQOS_MAC_IS_LPI_INT BIT(5) 285#define DWCEQOS_MAC_IS_MMC_INT BIT(8) 286 287#define DWCEQOS_MAC_RXQ_EN BIT(1) 288#define DWCEQOS_MAC_MAC_ADDR_HI_EN BIT(31) 289#define DWCEQOS_MAC_PKT_FILT_RA BIT(31) 290#define DWCEQOS_MAC_PKT_FILT_HPF BIT(10) 291#define DWCEQOS_MAC_PKT_FILT_SAF BIT(9) 292#define DWCEQOS_MAC_PKT_FILT_SAIF BIT(8) 293#define DWCEQOS_MAC_PKT_FILT_DBF BIT(5) 294#define DWCEQOS_MAC_PKT_FILT_PM BIT(4) 295#define DWCEQOS_MAC_PKT_FILT_DAIF BIT(3) 296#define DWCEQOS_MAC_PKT_FILT_HMC BIT(2) 297#define DWCEQOS_MAC_PKT_FILT_HUC BIT(1) 298#define DWCEQOS_MAC_PKT_FILT_PR BIT(0) 299 300#define DWCEQOS_MAC_MDIO_ADDR_CR(x) (((x & 15)) << 8) 301#define DWCEQOS_MAC_MDIO_ADDR_CR_20 2 302#define DWCEQOS_MAC_MDIO_ADDR_CR_35 3 303#define DWCEQOS_MAC_MDIO_ADDR_CR_60 0 304#define DWCEQOS_MAC_MDIO_ADDR_CR_100 1 305#define DWCEQOS_MAC_MDIO_ADDR_CR_150 4 306#define DWCEQOS_MAC_MDIO_ADDR_CR_250 5 307#define DWCEQOS_MAC_MDIO_ADDR_GOC_READ 0x0000000c 308#define DWCEQOS_MAC_MDIO_ADDR_GOC_WRITE BIT(2) 309#define DWCEQOS_MAC_MDIO_ADDR_GB BIT(0) 310 311#define DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIEN BIT(0) 312#define DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIEX BIT(1) 313#define DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIEN BIT(2) 314#define DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIEX BIT(3) 315#define DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIST BIT(8) 316#define DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIST BIT(9) 317#define DWCEQOS_MAC_LPI_CTRL_STATUS_LPIEN BIT(16) 318#define DWCEQOS_MAC_LPI_CTRL_STATUS_PLS BIT(17) 319#define DWCEQOS_MAC_LPI_CTRL_STATUS_PLSEN BIT(18) 320#define DWCEQOS_MAC_LPI_CTRL_STATUS_LIPTXA BIT(19) 321#define DWCEQOS_MAC_LPI_CTRL_STATUS_LPITE BIT(20) 322#define DWCEQOS_MAC_LPI_CTRL_STATUS_LPITCSE BIT(21) 323 324#define DWCEQOS_MAC_1US_TIC_COUNTER_VAL(x) ((x) & GENMASK(11, 0)) 325 326#define DWCEQOS_LPI_CTRL_ENABLE_EEE (DWCEQOS_MAC_LPI_CTRL_STATUS_LPITE | \ 327 DWCEQOS_MAC_LPI_CTRL_STATUS_LIPTXA | \ 328 DWCEQOS_MAC_LPI_CTRL_STATUS_LPIEN) 329 330#define DWCEQOS_MAC_RX_FLOW_CTRL_RFE BIT(0) 331 332#define DWCEQOS_MAC_Q0_TX_FLOW_TFE BIT(1) 333#define DWCEQOS_MAC_Q0_TX_FLOW_PT(time) ((time) << 16) 334#define DWCEQOS_MAC_Q0_TX_FLOW_PLT_4_SLOTS (0 << 4) 335 336/* Features */ 337#define DWCEQOS_MAC_HW_FEATURE0_RXCOESEL BIT(16) 338#define DWCEQOS_MAC_HW_FEATURE0_TXCOESEL BIT(14) 339#define DWCEQOS_MAC_HW_FEATURE0_HDSEL BIT(2) 340#define DWCEQOS_MAC_HW_FEATURE0_EEESEL BIT(13) 341#define DWCEQOS_MAC_HW_FEATURE0_GMIISEL BIT(1) 342#define DWCEQOS_MAC_HW_FEATURE0_MIISEL BIT(0) 343 344#define DWCEQOS_MAC_HW_FEATURE1_TSOEN BIT(18) 345#define DWCEQOS_MAC_HW_FEATURE1_TXFIFOSIZE(x) ((128 << ((x) & 0x7c0)) >> 6) 346#define DWCEQOS_MAC_HW_FEATURE1_RXFIFOSIZE(x) (128 << ((x) & 0x1f)) 347 348#define DWCEQOS_MAX_PERFECT_ADDRESSES(feature1) \ 349 (1 + (((feature1) & 0x1fc0000) >> 18)) 350 351#define DWCEQOS_MDIO_PHYADDR(x) (((x) & 0x1f) << 21) 352#define DWCEQOS_MDIO_PHYREG(x) (((x) & 0x1f) << 16) 353 354#define DWCEQOS_DMA_MODE_SWR BIT(0) 355 356#define DWCEQOS_DWCEQOS_RX_BUF_SIZE 2048 357 358/* Mac Management Counters */ 359#define REG_DWCEQOS_MMC_CTRL 0x0700 360#define REG_DWCEQOS_MMC_RXIRQ 0x0704 361#define REG_DWCEQOS_MMC_TXIRQ 0x0708 362#define REG_DWCEQOS_MMC_RXIRQMASK 0x070c 363#define REG_DWCEQOS_MMC_TXIRQMASK 0x0710 364 365#define DWCEQOS_MMC_CTRL_CNTRST BIT(0) 366#define DWCEQOS_MMC_CTRL_RSTONRD BIT(2) 367 368#define DWC_MMC_TXLPITRANSCNTR 0x07F0 369#define DWC_MMC_TXLPIUSCNTR 0x07EC 370#define DWC_MMC_TXOVERSIZE_G 0x0778 371#define DWC_MMC_TXVLANPACKETS_G 0x0774 372#define DWC_MMC_TXPAUSEPACKETS 0x0770 373#define DWC_MMC_TXEXCESSDEF 0x076C 374#define DWC_MMC_TXPACKETCOUNT_G 0x0768 375#define DWC_MMC_TXOCTETCOUNT_G 0x0764 376#define DWC_MMC_TXCARRIERERROR 0x0760 377#define DWC_MMC_TXEXCESSCOL 0x075C 378#define DWC_MMC_TXLATECOL 0x0758 379#define DWC_MMC_TXDEFERRED 0x0754 380#define DWC_MMC_TXMULTICOL_G 0x0750 381#define DWC_MMC_TXSINGLECOL_G 0x074C 382#define DWC_MMC_TXUNDERFLOWERROR 0x0748 383#define DWC_MMC_TXBROADCASTPACKETS_GB 0x0744 384#define DWC_MMC_TXMULTICASTPACKETS_GB 0x0740 385#define DWC_MMC_TXUNICASTPACKETS_GB 0x073C 386#define DWC_MMC_TX1024TOMAXOCTETS_GB 0x0738 387#define DWC_MMC_TX512TO1023OCTETS_GB 0x0734 388#define DWC_MMC_TX256TO511OCTETS_GB 0x0730 389#define DWC_MMC_TX128TO255OCTETS_GB 0x072C 390#define DWC_MMC_TX65TO127OCTETS_GB 0x0728 391#define DWC_MMC_TX64OCTETS_GB 0x0724 392#define DWC_MMC_TXMULTICASTPACKETS_G 0x0720 393#define DWC_MMC_TXBROADCASTPACKETS_G 0x071C 394#define DWC_MMC_TXPACKETCOUNT_GB 0x0718 395#define DWC_MMC_TXOCTETCOUNT_GB 0x0714 396 397#define DWC_MMC_RXLPITRANSCNTR 0x07F8 398#define DWC_MMC_RXLPIUSCNTR 0x07F4 399#define DWC_MMC_RXCTRLPACKETS_G 0x07E4 400#define DWC_MMC_RXRCVERROR 0x07E0 401#define DWC_MMC_RXWATCHDOG 0x07DC 402#define DWC_MMC_RXVLANPACKETS_GB 0x07D8 403#define DWC_MMC_RXFIFOOVERFLOW 0x07D4 404#define DWC_MMC_RXPAUSEPACKETS 0x07D0 405#define DWC_MMC_RXOUTOFRANGETYPE 0x07CC 406#define DWC_MMC_RXLENGTHERROR 0x07C8 407#define DWC_MMC_RXUNICASTPACKETS_G 0x07C4 408#define DWC_MMC_RX1024TOMAXOCTETS_GB 0x07C0 409#define DWC_MMC_RX512TO1023OCTETS_GB 0x07BC 410#define DWC_MMC_RX256TO511OCTETS_GB 0x07B8 411#define DWC_MMC_RX128TO255OCTETS_GB 0x07B4 412#define DWC_MMC_RX65TO127OCTETS_GB 0x07B0 413#define DWC_MMC_RX64OCTETS_GB 0x07AC 414#define DWC_MMC_RXOVERSIZE_G 0x07A8 415#define DWC_MMC_RXUNDERSIZE_G 0x07A4 416#define DWC_MMC_RXJABBERERROR 0x07A0 417#define DWC_MMC_RXRUNTERROR 0x079C 418#define DWC_MMC_RXALIGNMENTERROR 0x0798 419#define DWC_MMC_RXCRCERROR 0x0794 420#define DWC_MMC_RXMULTICASTPACKETS_G 0x0790 421#define DWC_MMC_RXBROADCASTPACKETS_G 0x078C 422#define DWC_MMC_RXOCTETCOUNT_G 0x0788 423#define DWC_MMC_RXOCTETCOUNT_GB 0x0784 424#define DWC_MMC_RXPACKETCOUNT_GB 0x0780 425 426static int debug = -1; 427module_param(debug, int, 0); 428MODULE_PARM_DESC(debug, "DWC_eth_qos debug level (0=none,...,16=all)"); 429 430/* DMA ring descriptor. These are used as support descriptors for the HW DMA */ 431struct ring_desc { 432 struct sk_buff *skb; 433 dma_addr_t mapping; 434 size_t len; 435}; 436 437/* DMA hardware descriptor */ 438struct dwceqos_dma_desc { 439 u32 des0; 440 u32 des1; 441 u32 des2; 442 u32 des3; 443} ____cacheline_aligned; 444 445struct dwceqos_mmc_counters { 446 __u64 txlpitranscntr; 447 __u64 txpiuscntr; 448 __u64 txoversize_g; 449 __u64 txvlanpackets_g; 450 __u64 txpausepackets; 451 __u64 txexcessdef; 452 __u64 txpacketcount_g; 453 __u64 txoctetcount_g; 454 __u64 txcarriererror; 455 __u64 txexcesscol; 456 __u64 txlatecol; 457 __u64 txdeferred; 458 __u64 txmulticol_g; 459 __u64 txsinglecol_g; 460 __u64 txunderflowerror; 461 __u64 txbroadcastpackets_gb; 462 __u64 txmulticastpackets_gb; 463 __u64 txunicastpackets_gb; 464 __u64 tx1024tomaxoctets_gb; 465 __u64 tx512to1023octets_gb; 466 __u64 tx256to511octets_gb; 467 __u64 tx128to255octets_gb; 468 __u64 tx65to127octets_gb; 469 __u64 tx64octets_gb; 470 __u64 txmulticastpackets_g; 471 __u64 txbroadcastpackets_g; 472 __u64 txpacketcount_gb; 473 __u64 txoctetcount_gb; 474 475 __u64 rxlpitranscntr; 476 __u64 rxlpiuscntr; 477 __u64 rxctrlpackets_g; 478 __u64 rxrcverror; 479 __u64 rxwatchdog; 480 __u64 rxvlanpackets_gb; 481 __u64 rxfifooverflow; 482 __u64 rxpausepackets; 483 __u64 rxoutofrangetype; 484 __u64 rxlengtherror; 485 __u64 rxunicastpackets_g; 486 __u64 rx1024tomaxoctets_gb; 487 __u64 rx512to1023octets_gb; 488 __u64 rx256to511octets_gb; 489 __u64 rx128to255octets_gb; 490 __u64 rx65to127octets_gb; 491 __u64 rx64octets_gb; 492 __u64 rxoversize_g; 493 __u64 rxundersize_g; 494 __u64 rxjabbererror; 495 __u64 rxrunterror; 496 __u64 rxalignmenterror; 497 __u64 rxcrcerror; 498 __u64 rxmulticastpackets_g; 499 __u64 rxbroadcastpackets_g; 500 __u64 rxoctetcount_g; 501 __u64 rxoctetcount_gb; 502 __u64 rxpacketcount_gb; 503}; 504 505/* Ethtool statistics */ 506 507struct dwceqos_stat { 508 const char stat_name[ETH_GSTRING_LEN]; 509 int offset; 510}; 511 512#define STAT_ITEM(name, var) \ 513 {\ 514 name,\ 515 offsetof(struct dwceqos_mmc_counters, var),\ 516 } 517 518static const struct dwceqos_stat dwceqos_ethtool_stats[] = { 519 STAT_ITEM("tx_bytes", txoctetcount_gb), 520 STAT_ITEM("tx_packets", txpacketcount_gb), 521 STAT_ITEM("tx_unicst_packets", txunicastpackets_gb), 522 STAT_ITEM("tx_broadcast_packets", txbroadcastpackets_gb), 523 STAT_ITEM("tx_multicast_packets", txmulticastpackets_gb), 524 STAT_ITEM("tx_pause_packets", txpausepackets), 525 STAT_ITEM("tx_up_to_64_byte_packets", tx64octets_gb), 526 STAT_ITEM("tx_65_to_127_byte_packets", tx65to127octets_gb), 527 STAT_ITEM("tx_128_to_255_byte_packets", tx128to255octets_gb), 528 STAT_ITEM("tx_256_to_511_byte_packets", tx256to511octets_gb), 529 STAT_ITEM("tx_512_to_1023_byte_packets", tx512to1023octets_gb), 530 STAT_ITEM("tx_1024_to_maxsize_packets", tx1024tomaxoctets_gb), 531 STAT_ITEM("tx_underflow_errors", txunderflowerror), 532 STAT_ITEM("tx_lpi_count", txlpitranscntr), 533 534 STAT_ITEM("rx_bytes", rxoctetcount_gb), 535 STAT_ITEM("rx_packets", rxpacketcount_gb), 536 STAT_ITEM("rx_unicast_packets", rxunicastpackets_g), 537 STAT_ITEM("rx_broadcast_packets", rxbroadcastpackets_g), 538 STAT_ITEM("rx_multicast_packets", rxmulticastpackets_g), 539 STAT_ITEM("rx_vlan_packets", rxvlanpackets_gb), 540 STAT_ITEM("rx_pause_packets", rxpausepackets), 541 STAT_ITEM("rx_up_to_64_byte_packets", rx64octets_gb), 542 STAT_ITEM("rx_65_to_127_byte_packets", rx65to127octets_gb), 543 STAT_ITEM("rx_128_to_255_byte_packets", rx128to255octets_gb), 544 STAT_ITEM("rx_256_to_511_byte_packets", rx256to511octets_gb), 545 STAT_ITEM("rx_512_to_1023_byte_packets", rx512to1023octets_gb), 546 STAT_ITEM("rx_1024_to_maxsize_packets", rx1024tomaxoctets_gb), 547 STAT_ITEM("rx_fifo_overflow_errors", rxfifooverflow), 548 STAT_ITEM("rx_oversize_packets", rxoversize_g), 549 STAT_ITEM("rx_undersize_packets", rxundersize_g), 550 STAT_ITEM("rx_jabbers", rxjabbererror), 551 STAT_ITEM("rx_align_errors", rxalignmenterror), 552 STAT_ITEM("rx_crc_errors", rxcrcerror), 553 STAT_ITEM("rx_lpi_count", rxlpitranscntr), 554}; 555 556/* Configuration of AXI bus parameters. 557 * These values depend on the parameters set on the MAC core as well 558 * as the AXI interconnect. 559 */ 560struct dwceqos_bus_cfg { 561 /* Enable AXI low-power interface. */ 562 bool en_lpi; 563 /* Limit on number of outstanding AXI write requests. */ 564 u32 write_requests; 565 /* Limit on number of outstanding AXI read requests. */ 566 u32 read_requests; 567 /* Bitmap of allowed AXI burst lengths, 4-256 beats. */ 568 u32 burst_map; 569 /* DMA Programmable burst length*/ 570 u32 tx_pbl; 571 u32 rx_pbl; 572}; 573 574struct dwceqos_flowcontrol { 575 int autoneg; 576 int rx; 577 int rx_current; 578 int tx; 579 int tx_current; 580}; 581 582struct net_local { 583 void __iomem *baseaddr; 584 struct clk *phy_ref_clk; 585 struct clk *apb_pclk; 586 587 struct device_node *phy_node; 588 struct net_device *ndev; 589 struct platform_device *pdev; 590 591 u32 msg_enable; 592 593 struct tasklet_struct tx_bdreclaim_tasklet; 594 struct workqueue_struct *txtimeout_handler_wq; 595 struct work_struct txtimeout_reinit; 596 597 phy_interface_t phy_interface; 598 struct mii_bus *mii_bus; 599 600 unsigned int link; 601 unsigned int speed; 602 unsigned int duplex; 603 604 struct napi_struct napi; 605 606 /* DMA Descriptor Areas */ 607 struct ring_desc *rx_skb; 608 struct ring_desc *tx_skb; 609 610 struct dwceqos_dma_desc *tx_descs; 611 struct dwceqos_dma_desc *rx_descs; 612 613 /* DMA Mapped Descriptor areas*/ 614 dma_addr_t tx_descs_addr; 615 dma_addr_t rx_descs_addr; 616 dma_addr_t tx_descs_tail_addr; 617 dma_addr_t rx_descs_tail_addr; 618 619 size_t tx_free; 620 size_t tx_next; 621 size_t rx_cur; 622 size_t tx_cur; 623 624 /* Spinlocks for accessing DMA Descriptors */ 625 spinlock_t tx_lock; 626 627 /* Spinlock for register read-modify-writes. */ 628 spinlock_t hw_lock; 629 630 u32 feature0; 631 u32 feature1; 632 u32 feature2; 633 634 struct dwceqos_bus_cfg bus_cfg; 635 bool en_tx_lpi_clockgating; 636 637 int eee_enabled; 638 int eee_active; 639 int csr_val; 640 u32 gso_size; 641 642 struct dwceqos_mmc_counters mmc_counters; 643 /* Protect the mmc_counter updates. */ 644 spinlock_t stats_lock; 645 u32 mmc_rx_counters_mask; 646 u32 mmc_tx_counters_mask; 647 648 struct dwceqos_flowcontrol flowcontrol; 649 650 /* Tracks the intermediate state of phy started but hardware 651 * init not finished yet. 652 */ 653 bool phy_defer; 654}; 655 656static void dwceqos_read_mmc_counters(struct net_local *lp, u32 rx_mask, 657 u32 tx_mask); 658 659static void dwceqos_set_umac_addr(struct net_local *lp, unsigned char *addr, 660 unsigned int reg_n); 661static int dwceqos_stop(struct net_device *ndev); 662static int dwceqos_open(struct net_device *ndev); 663static void dwceqos_tx_poll_demand(struct net_local *lp); 664 665static void dwceqos_set_rx_flowcontrol(struct net_local *lp, bool enable); 666static void dwceqos_set_tx_flowcontrol(struct net_local *lp, bool enable); 667 668static void dwceqos_reset_state(struct net_local *lp); 669 670#define dwceqos_read(lp, reg) \ 671 readl_relaxed(((void __iomem *)((lp)->baseaddr)) + (reg)) 672#define dwceqos_write(lp, reg, val) \ 673 writel_relaxed((val), ((void __iomem *)((lp)->baseaddr)) + (reg)) 674 675static void dwceqos_reset_state(struct net_local *lp) 676{ 677 lp->link = 0; 678 lp->speed = 0; 679 lp->duplex = DUPLEX_UNKNOWN; 680 lp->flowcontrol.rx_current = 0; 681 lp->flowcontrol.tx_current = 0; 682 lp->eee_active = 0; 683 lp->eee_enabled = 0; 684} 685 686static void print_descriptor(struct net_local *lp, int index, int tx) 687{ 688 struct dwceqos_dma_desc *dd; 689 690 if (tx) 691 dd = (struct dwceqos_dma_desc *)&lp->tx_descs[index]; 692 else 693 dd = (struct dwceqos_dma_desc *)&lp->rx_descs[index]; 694 695 pr_info("%s DMA Descriptor #%d@%p Contents:\n", tx ? "TX" : "RX", 696 index, dd); 697 pr_info("0x%08x 0x%08x 0x%08x 0x%08x\n", dd->des0, dd->des1, dd->des2, 698 dd->des3); 699} 700 701static void print_status(struct net_local *lp) 702{ 703 size_t desci, i; 704 705 pr_info("tx_free %zu, tx_cur %zu, tx_next %zu\n", lp->tx_free, 706 lp->tx_cur, lp->tx_next); 707 708 print_descriptor(lp, lp->rx_cur, 0); 709 710 for (desci = (lp->tx_cur - 10) % DWCEQOS_TX_DCNT, i = 0; 711 i < DWCEQOS_TX_DCNT; 712 ++i) { 713 print_descriptor(lp, desci, 1); 714 desci = (desci + 1) % DWCEQOS_TX_DCNT; 715 } 716 717 pr_info("DMA_Debug_Status0: 0x%08x\n", 718 dwceqos_read(lp, REG_DWCEQOS_DMA_DEBUG_ST0)); 719 pr_info("DMA_CH0_Status: 0x%08x\n", 720 dwceqos_read(lp, REG_DWCEQOS_DMA_IS)); 721 pr_info("DMA_CH0_Current_App_TxDesc: 0x%08x\n", 722 dwceqos_read(lp, 0x1144)); 723 pr_info("DMA_CH0_Current_App_TxBuff: 0x%08x\n", 724 dwceqos_read(lp, 0x1154)); 725 pr_info("MTL_Debug_Status: 0x%08x\n", 726 dwceqos_read(lp, REG_DWCEQOS_MTL_DEBUG_ST)); 727 pr_info("MTL_TXQ0_Debug_Status: 0x%08x\n", 728 dwceqos_read(lp, REG_DWCEQOS_MTL_TXQ0_DEBUG_ST)); 729 pr_info("MTL_RXQ0_Debug_Status: 0x%08x\n", 730 dwceqos_read(lp, REG_DWCEQOS_MTL_RXQ0_DEBUG_ST)); 731 pr_info("Current TX DMA: 0x%08x, RX DMA: 0x%08x\n", 732 dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_CUR_TXDESC), 733 dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_CUR_RXDESC)); 734} 735 736static void dwceqos_mdio_set_csr(struct net_local *lp) 737{ 738 int rate = clk_get_rate(lp->apb_pclk); 739 740 if (rate <= 20000000) 741 lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_20; 742 else if (rate <= 35000000) 743 lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_35; 744 else if (rate <= 60000000) 745 lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_60; 746 else if (rate <= 100000000) 747 lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_100; 748 else if (rate <= 150000000) 749 lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_150; 750 else if (rate <= 250000000) 751 lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_250; 752} 753 754/* Simple MDIO functions implementing mii_bus */ 755static int dwceqos_mdio_read(struct mii_bus *bus, int mii_id, int phyreg) 756{ 757 struct net_local *lp = bus->priv; 758 u32 regval; 759 int i; 760 int data; 761 762 regval = DWCEQOS_MDIO_PHYADDR(mii_id) | 763 DWCEQOS_MDIO_PHYREG(phyreg) | 764 DWCEQOS_MAC_MDIO_ADDR_CR(lp->csr_val) | 765 DWCEQOS_MAC_MDIO_ADDR_GB | 766 DWCEQOS_MAC_MDIO_ADDR_GOC_READ; 767 dwceqos_write(lp, REG_DWCEQOS_MAC_MDIO_ADDR, regval); 768 769 for (i = 0; i < 5; ++i) { 770 usleep_range(64, 128); 771 if (!(dwceqos_read(lp, REG_DWCEQOS_MAC_MDIO_ADDR) & 772 DWCEQOS_MAC_MDIO_ADDR_GB)) 773 break; 774 } 775 776 data = dwceqos_read(lp, REG_DWCEQOS_MAC_MDIO_DATA); 777 if (i == 5) { 778 netdev_warn(lp->ndev, "MDIO read timed out\n"); 779 data = 0xffff; 780 } 781 782 return data & 0xffff; 783} 784 785static int dwceqos_mdio_write(struct mii_bus *bus, int mii_id, int phyreg, 786 u16 value) 787{ 788 struct net_local *lp = bus->priv; 789 u32 regval; 790 int i; 791 792 dwceqos_write(lp, REG_DWCEQOS_MAC_MDIO_DATA, value); 793 794 regval = DWCEQOS_MDIO_PHYADDR(mii_id) | 795 DWCEQOS_MDIO_PHYREG(phyreg) | 796 DWCEQOS_MAC_MDIO_ADDR_CR(lp->csr_val) | 797 DWCEQOS_MAC_MDIO_ADDR_GB | 798 DWCEQOS_MAC_MDIO_ADDR_GOC_WRITE; 799 dwceqos_write(lp, REG_DWCEQOS_MAC_MDIO_ADDR, regval); 800 801 for (i = 0; i < 5; ++i) { 802 usleep_range(64, 128); 803 if (!(dwceqos_read(lp, REG_DWCEQOS_MAC_MDIO_ADDR) & 804 DWCEQOS_MAC_MDIO_ADDR_GB)) 805 break; 806 } 807 if (i == 5) 808 netdev_warn(lp->ndev, "MDIO write timed out\n"); 809 return 0; 810} 811 812static int dwceqos_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) 813{ 814 struct net_local *lp = netdev_priv(ndev); 815 struct phy_device *phydev = ndev->phydev; 816 817 if (!netif_running(ndev)) 818 return -EINVAL; 819 820 if (!phydev) 821 return -ENODEV; 822 823 switch (cmd) { 824 case SIOCGMIIPHY: 825 case SIOCGMIIREG: 826 case SIOCSMIIREG: 827 return phy_mii_ioctl(phydev, rq, cmd); 828 default: 829 dev_info(&lp->pdev->dev, "ioctl %X not implemented.\n", cmd); 830 return -EOPNOTSUPP; 831 } 832} 833 834static void dwceqos_link_down(struct net_local *lp) 835{ 836 u32 regval; 837 unsigned long flags; 838 839 /* Indicate link down to the LPI state machine */ 840 spin_lock_irqsave(&lp->hw_lock, flags); 841 regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS); 842 regval &= ~DWCEQOS_MAC_LPI_CTRL_STATUS_PLS; 843 dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval); 844 spin_unlock_irqrestore(&lp->hw_lock, flags); 845} 846 847static void dwceqos_link_up(struct net_local *lp) 848{ 849 struct net_device *ndev = lp->ndev; 850 u32 regval; 851 unsigned long flags; 852 853 /* Indicate link up to the LPI state machine */ 854 spin_lock_irqsave(&lp->hw_lock, flags); 855 regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS); 856 regval |= DWCEQOS_MAC_LPI_CTRL_STATUS_PLS; 857 dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval); 858 spin_unlock_irqrestore(&lp->hw_lock, flags); 859 860 lp->eee_active = !phy_init_eee(ndev->phydev, 0); 861 862 /* Check for changed EEE capability */ 863 if (!lp->eee_active && lp->eee_enabled) { 864 lp->eee_enabled = 0; 865 866 spin_lock_irqsave(&lp->hw_lock, flags); 867 regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS); 868 regval &= ~DWCEQOS_LPI_CTRL_ENABLE_EEE; 869 dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval); 870 spin_unlock_irqrestore(&lp->hw_lock, flags); 871 } 872} 873 874static void dwceqos_set_speed(struct net_local *lp) 875{ 876 struct net_device *ndev = lp->ndev; 877 struct phy_device *phydev = ndev->phydev; 878 u32 regval; 879 880 regval = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG); 881 regval &= ~(DWCEQOS_MAC_CFG_PS | DWCEQOS_MAC_CFG_FES | 882 DWCEQOS_MAC_CFG_DM); 883 884 if (phydev->duplex) 885 regval |= DWCEQOS_MAC_CFG_DM; 886 if (phydev->speed == SPEED_10) { 887 regval |= DWCEQOS_MAC_CFG_PS; 888 } else if (phydev->speed == SPEED_100) { 889 regval |= DWCEQOS_MAC_CFG_PS | 890 DWCEQOS_MAC_CFG_FES; 891 } else if (phydev->speed != SPEED_1000) { 892 netdev_err(lp->ndev, 893 "unknown PHY speed %d\n", 894 phydev->speed); 895 return; 896 } 897 898 dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, regval); 899} 900 901static void dwceqos_adjust_link(struct net_device *ndev) 902{ 903 struct net_local *lp = netdev_priv(ndev); 904 struct phy_device *phydev = ndev->phydev; 905 int status_change = 0; 906 907 if (lp->phy_defer) 908 return; 909 910 if (phydev->link) { 911 if ((lp->speed != phydev->speed) || 912 (lp->duplex != phydev->duplex)) { 913 dwceqos_set_speed(lp); 914 915 lp->speed = phydev->speed; 916 lp->duplex = phydev->duplex; 917 status_change = 1; 918 } 919 920 if (lp->flowcontrol.autoneg) { 921 lp->flowcontrol.rx = phydev->pause || 922 phydev->asym_pause; 923 lp->flowcontrol.tx = phydev->pause || 924 phydev->asym_pause; 925 } 926 927 if (lp->flowcontrol.rx != lp->flowcontrol.rx_current) { 928 if (netif_msg_link(lp)) 929 netdev_dbg(ndev, "set rx flow to %d\n", 930 lp->flowcontrol.rx); 931 dwceqos_set_rx_flowcontrol(lp, lp->flowcontrol.rx); 932 lp->flowcontrol.rx_current = lp->flowcontrol.rx; 933 } 934 if (lp->flowcontrol.tx != lp->flowcontrol.tx_current) { 935 if (netif_msg_link(lp)) 936 netdev_dbg(ndev, "set tx flow to %d\n", 937 lp->flowcontrol.tx); 938 dwceqos_set_tx_flowcontrol(lp, lp->flowcontrol.tx); 939 lp->flowcontrol.tx_current = lp->flowcontrol.tx; 940 } 941 } 942 943 if (phydev->link != lp->link) { 944 lp->link = phydev->link; 945 status_change = 1; 946 } 947 948 if (status_change) { 949 if (phydev->link) { 950 netif_trans_update(lp->ndev); 951 dwceqos_link_up(lp); 952 } else { 953 dwceqos_link_down(lp); 954 } 955 phy_print_status(phydev); 956 } 957} 958 959static int dwceqos_mii_probe(struct net_device *ndev) 960{ 961 struct net_local *lp = netdev_priv(ndev); 962 struct phy_device *phydev = NULL; 963 964 if (lp->phy_node) { 965 phydev = of_phy_connect(lp->ndev, 966 lp->phy_node, 967 &dwceqos_adjust_link, 968 0, 969 lp->phy_interface); 970 971 if (!phydev) { 972 netdev_err(ndev, "no PHY found\n"); 973 return -1; 974 } 975 } else { 976 netdev_err(ndev, "no PHY configured\n"); 977 return -ENODEV; 978 } 979 980 if (netif_msg_probe(lp)) 981 phy_attached_info(phydev); 982 983 phydev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause | 984 SUPPORTED_Asym_Pause; 985 986 lp->link = 0; 987 lp->speed = 0; 988 lp->duplex = DUPLEX_UNKNOWN; 989 lp->flowcontrol.autoneg = AUTONEG_ENABLE; 990 991 return 0; 992} 993 994static void dwceqos_alloc_rxring_desc(struct net_local *lp, int index) 995{ 996 struct sk_buff *new_skb; 997 dma_addr_t new_skb_baddr = 0; 998 999 new_skb = netdev_alloc_skb(lp->ndev, DWCEQOS_RX_BUF_SIZE); 1000 if (!new_skb) { 1001 netdev_err(lp->ndev, "alloc_skb error for desc %d\n", index); 1002 goto err_out; 1003 } 1004 1005 new_skb_baddr = dma_map_single(lp->ndev->dev.parent, 1006 new_skb->data, DWCEQOS_RX_BUF_SIZE, 1007 DMA_FROM_DEVICE); 1008 if (dma_mapping_error(lp->ndev->dev.parent, new_skb_baddr)) { 1009 netdev_err(lp->ndev, "DMA map error\n"); 1010 dev_kfree_skb(new_skb); 1011 new_skb = NULL; 1012 goto err_out; 1013 } 1014 1015 lp->rx_descs[index].des0 = new_skb_baddr; 1016 lp->rx_descs[index].des1 = 0; 1017 lp->rx_descs[index].des2 = 0; 1018 lp->rx_descs[index].des3 = DWCEQOS_DMA_RDES3_INTE | 1019 DWCEQOS_DMA_RDES3_BUF1V | 1020 DWCEQOS_DMA_RDES3_OWN; 1021 1022 lp->rx_skb[index].mapping = new_skb_baddr; 1023 lp->rx_skb[index].len = DWCEQOS_RX_BUF_SIZE; 1024 1025err_out: 1026 lp->rx_skb[index].skb = new_skb; 1027} 1028 1029static void dwceqos_clean_rings(struct net_local *lp) 1030{ 1031 int i; 1032 1033 if (lp->rx_skb) { 1034 for (i = 0; i < DWCEQOS_RX_DCNT; i++) { 1035 if (lp->rx_skb[i].skb) { 1036 dma_unmap_single(lp->ndev->dev.parent, 1037 lp->rx_skb[i].mapping, 1038 lp->rx_skb[i].len, 1039 DMA_FROM_DEVICE); 1040 1041 dev_kfree_skb(lp->rx_skb[i].skb); 1042 lp->rx_skb[i].skb = NULL; 1043 lp->rx_skb[i].mapping = 0; 1044 } 1045 } 1046 } 1047 1048 if (lp->tx_skb) { 1049 for (i = 0; i < DWCEQOS_TX_DCNT; i++) { 1050 if (lp->tx_skb[i].skb) { 1051 dev_kfree_skb(lp->tx_skb[i].skb); 1052 lp->tx_skb[i].skb = NULL; 1053 } 1054 if (lp->tx_skb[i].mapping) { 1055 dma_unmap_single(lp->ndev->dev.parent, 1056 lp->tx_skb[i].mapping, 1057 lp->tx_skb[i].len, 1058 DMA_TO_DEVICE); 1059 lp->tx_skb[i].mapping = 0; 1060 } 1061 } 1062 } 1063} 1064 1065static void dwceqos_descriptor_free(struct net_local *lp) 1066{ 1067 int size; 1068 1069 dwceqos_clean_rings(lp); 1070 1071 kfree(lp->tx_skb); 1072 lp->tx_skb = NULL; 1073 kfree(lp->rx_skb); 1074 lp->rx_skb = NULL; 1075 1076 size = DWCEQOS_RX_DCNT * sizeof(struct dwceqos_dma_desc); 1077 if (lp->rx_descs) { 1078 dma_free_coherent(lp->ndev->dev.parent, size, 1079 (void *)(lp->rx_descs), lp->rx_descs_addr); 1080 lp->rx_descs = NULL; 1081 } 1082 1083 size = DWCEQOS_TX_DCNT * sizeof(struct dwceqos_dma_desc); 1084 if (lp->tx_descs) { 1085 dma_free_coherent(lp->ndev->dev.parent, size, 1086 (void *)(lp->tx_descs), lp->tx_descs_addr); 1087 lp->tx_descs = NULL; 1088 } 1089} 1090 1091static int dwceqos_descriptor_init(struct net_local *lp) 1092{ 1093 int size; 1094 u32 i; 1095 1096 lp->gso_size = 0; 1097 1098 lp->tx_skb = NULL; 1099 lp->rx_skb = NULL; 1100 lp->rx_descs = NULL; 1101 lp->tx_descs = NULL; 1102 1103 /* Reset the DMA indexes */ 1104 lp->rx_cur = 0; 1105 lp->tx_cur = 0; 1106 lp->tx_next = 0; 1107 lp->tx_free = DWCEQOS_TX_DCNT; 1108 1109 /* Allocate Ring descriptors */ 1110 size = DWCEQOS_RX_DCNT * sizeof(struct ring_desc); 1111 lp->rx_skb = kzalloc(size, GFP_KERNEL); 1112 if (!lp->rx_skb) 1113 goto err_out; 1114 1115 size = DWCEQOS_TX_DCNT * sizeof(struct ring_desc); 1116 lp->tx_skb = kzalloc(size, GFP_KERNEL); 1117 if (!lp->tx_skb) 1118 goto err_out; 1119 1120 /* Allocate DMA descriptors */ 1121 size = DWCEQOS_RX_DCNT * sizeof(struct dwceqos_dma_desc); 1122 lp->rx_descs = dma_alloc_coherent(lp->ndev->dev.parent, size, 1123 &lp->rx_descs_addr, GFP_KERNEL); 1124 if (!lp->rx_descs) 1125 goto err_out; 1126 lp->rx_descs_tail_addr = lp->rx_descs_addr + 1127 sizeof(struct dwceqos_dma_desc) * DWCEQOS_RX_DCNT; 1128 1129 size = DWCEQOS_TX_DCNT * sizeof(struct dwceqos_dma_desc); 1130 lp->tx_descs = dma_alloc_coherent(lp->ndev->dev.parent, size, 1131 &lp->tx_descs_addr, GFP_KERNEL); 1132 if (!lp->tx_descs) 1133 goto err_out; 1134 lp->tx_descs_tail_addr = lp->tx_descs_addr + 1135 sizeof(struct dwceqos_dma_desc) * DWCEQOS_TX_DCNT; 1136 1137 /* Initialize RX Ring Descriptors and buffers */ 1138 for (i = 0; i < DWCEQOS_RX_DCNT; ++i) { 1139 dwceqos_alloc_rxring_desc(lp, i); 1140 if (!(lp->rx_skb[lp->rx_cur].skb)) 1141 goto err_out; 1142 } 1143 1144 /* Initialize TX Descriptors */ 1145 for (i = 0; i < DWCEQOS_TX_DCNT; ++i) { 1146 lp->tx_descs[i].des0 = 0; 1147 lp->tx_descs[i].des1 = 0; 1148 lp->tx_descs[i].des2 = 0; 1149 lp->tx_descs[i].des3 = 0; 1150 } 1151 1152 /* Make descriptor writes visible to the DMA. */ 1153 wmb(); 1154 1155 return 0; 1156 1157err_out: 1158 dwceqos_descriptor_free(lp); 1159 return -ENOMEM; 1160} 1161 1162static int dwceqos_packet_avail(struct net_local *lp) 1163{ 1164 return !(lp->rx_descs[lp->rx_cur].des3 & DWCEQOS_DMA_RDES3_OWN); 1165} 1166 1167static void dwceqos_get_hwfeatures(struct net_local *lp) 1168{ 1169 lp->feature0 = dwceqos_read(lp, REG_DWCEQOS_MAC_HW_FEATURE0); 1170 lp->feature1 = dwceqos_read(lp, REG_DWCEQOS_MAC_HW_FEATURE1); 1171 lp->feature2 = dwceqos_read(lp, REG_DWCEQOS_MAC_HW_FEATURE2); 1172} 1173 1174static void dwceqos_dma_enable_txirq(struct net_local *lp) 1175{ 1176 u32 regval; 1177 unsigned long flags; 1178 1179 spin_lock_irqsave(&lp->hw_lock, flags); 1180 regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_IE); 1181 regval |= DWCEQOS_DMA_CH0_IE_TIE; 1182 dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, regval); 1183 spin_unlock_irqrestore(&lp->hw_lock, flags); 1184} 1185 1186static void dwceqos_dma_disable_txirq(struct net_local *lp) 1187{ 1188 u32 regval; 1189 unsigned long flags; 1190 1191 spin_lock_irqsave(&lp->hw_lock, flags); 1192 regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_IE); 1193 regval &= ~DWCEQOS_DMA_CH0_IE_TIE; 1194 dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, regval); 1195 spin_unlock_irqrestore(&lp->hw_lock, flags); 1196} 1197 1198static void dwceqos_dma_enable_rxirq(struct net_local *lp) 1199{ 1200 u32 regval; 1201 unsigned long flags; 1202 1203 spin_lock_irqsave(&lp->hw_lock, flags); 1204 regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_IE); 1205 regval |= DWCEQOS_DMA_CH0_IE_RIE; 1206 dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, regval); 1207 spin_unlock_irqrestore(&lp->hw_lock, flags); 1208} 1209 1210static void dwceqos_dma_disable_rxirq(struct net_local *lp) 1211{ 1212 u32 regval; 1213 unsigned long flags; 1214 1215 spin_lock_irqsave(&lp->hw_lock, flags); 1216 regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_IE); 1217 regval &= ~DWCEQOS_DMA_CH0_IE_RIE; 1218 dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, regval); 1219 spin_unlock_irqrestore(&lp->hw_lock, flags); 1220} 1221 1222static void dwceqos_enable_mmc_interrupt(struct net_local *lp) 1223{ 1224 dwceqos_write(lp, REG_DWCEQOS_MMC_RXIRQMASK, 0); 1225 dwceqos_write(lp, REG_DWCEQOS_MMC_TXIRQMASK, 0); 1226} 1227 1228static int dwceqos_mii_init(struct net_local *lp) 1229{ 1230 int ret = -ENXIO; 1231 struct resource res; 1232 struct device_node *mdionode; 1233 1234 mdionode = of_get_child_by_name(lp->pdev->dev.of_node, "mdio"); 1235 1236 if (!mdionode) 1237 return 0; 1238 1239 lp->mii_bus = mdiobus_alloc(); 1240 if (!lp->mii_bus) { 1241 ret = -ENOMEM; 1242 goto err_out; 1243 } 1244 1245 lp->mii_bus->name = "DWCEQOS MII bus"; 1246 lp->mii_bus->read = &dwceqos_mdio_read; 1247 lp->mii_bus->write = &dwceqos_mdio_write; 1248 lp->mii_bus->priv = lp; 1249 lp->mii_bus->parent = &lp->pdev->dev; 1250 1251 of_address_to_resource(lp->pdev->dev.of_node, 0, &res); 1252 snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%.8llx", 1253 (unsigned long long)res.start); 1254 if (of_mdiobus_register(lp->mii_bus, mdionode)) 1255 goto err_out_free_mdiobus; 1256 1257 return 0; 1258 1259err_out_free_mdiobus: 1260 mdiobus_free(lp->mii_bus); 1261err_out: 1262 of_node_put(mdionode); 1263 return ret; 1264} 1265 1266/* DMA reset. When issued also resets all MTL and MAC registers as well */ 1267static void dwceqos_reset_hw(struct net_local *lp) 1268{ 1269 /* Wait (at most) 0.5 seconds for DMA reset*/ 1270 int i = 5000; 1271 u32 reg; 1272 1273 /* Force gigabit to guarantee a TX clock for GMII. */ 1274 reg = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG); 1275 reg &= ~(DWCEQOS_MAC_CFG_PS | DWCEQOS_MAC_CFG_FES); 1276 reg |= DWCEQOS_MAC_CFG_DM; 1277 dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, reg); 1278 1279 dwceqos_write(lp, REG_DWCEQOS_DMA_MODE, DWCEQOS_DMA_MODE_SWR); 1280 1281 do { 1282 udelay(100); 1283 i--; 1284 reg = dwceqos_read(lp, REG_DWCEQOS_DMA_MODE); 1285 } while ((reg & DWCEQOS_DMA_MODE_SWR) && i); 1286 /* We might experience a timeout if the chip clock mux is broken */ 1287 if (!i) 1288 netdev_err(lp->ndev, "DMA reset timed out!\n"); 1289} 1290 1291static void dwceqos_fatal_bus_error(struct net_local *lp, u32 dma_status) 1292{ 1293 if (dma_status & DWCEQOS_DMA_CH0_IS_TEB) { 1294 netdev_err(lp->ndev, "txdma bus error %s %s (status=%08x)\n", 1295 dma_status & DWCEQOS_DMA_CH0_IS_TX_ERR_READ ? 1296 "read" : "write", 1297 dma_status & DWCEQOS_DMA_CH0_IS_TX_ERR_DESCR ? 1298 "descr" : "data", 1299 dma_status); 1300 1301 print_status(lp); 1302 } 1303 if (dma_status & DWCEQOS_DMA_CH0_IS_REB) { 1304 netdev_err(lp->ndev, "rxdma bus error %s %s (status=%08x)\n", 1305 dma_status & DWCEQOS_DMA_CH0_IS_RX_ERR_READ ? 1306 "read" : "write", 1307 dma_status & DWCEQOS_DMA_CH0_IS_RX_ERR_DESCR ? 1308 "descr" : "data", 1309 dma_status); 1310 1311 print_status(lp); 1312 } 1313} 1314 1315static void dwceqos_mmc_interrupt(struct net_local *lp) 1316{ 1317 unsigned long flags; 1318 1319 spin_lock_irqsave(&lp->stats_lock, flags); 1320 1321 /* A latched mmc interrupt can not be masked, we must read 1322 * all the counters with an interrupt pending. 1323 */ 1324 dwceqos_read_mmc_counters(lp, 1325 dwceqos_read(lp, REG_DWCEQOS_MMC_RXIRQ), 1326 dwceqos_read(lp, REG_DWCEQOS_MMC_TXIRQ)); 1327 1328 spin_unlock_irqrestore(&lp->stats_lock, flags); 1329} 1330 1331static void dwceqos_mac_interrupt(struct net_local *lp) 1332{ 1333 u32 cause; 1334 1335 cause = dwceqos_read(lp, REG_DWCEQOS_MAC_IS); 1336 1337 if (cause & DWCEQOS_MAC_IS_MMC_INT) 1338 dwceqos_mmc_interrupt(lp); 1339} 1340 1341static irqreturn_t dwceqos_interrupt(int irq, void *dev_id) 1342{ 1343 struct net_device *ndev = dev_id; 1344 struct net_local *lp = netdev_priv(ndev); 1345 1346 u32 cause; 1347 u32 dma_status; 1348 irqreturn_t ret = IRQ_NONE; 1349 1350 cause = dwceqos_read(lp, REG_DWCEQOS_DMA_IS); 1351 /* DMA Channel 0 Interrupt */ 1352 if (cause & DWCEQOS_DMA_IS_DC0IS) { 1353 dma_status = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_STA); 1354 1355 /* Transmit Interrupt */ 1356 if (dma_status & DWCEQOS_DMA_CH0_IS_TI) { 1357 tasklet_schedule(&lp->tx_bdreclaim_tasklet); 1358 dwceqos_dma_disable_txirq(lp); 1359 } 1360 1361 /* Receive Interrupt */ 1362 if (dma_status & DWCEQOS_DMA_CH0_IS_RI) { 1363 /* Disable RX IRQs */ 1364 dwceqos_dma_disable_rxirq(lp); 1365 napi_schedule(&lp->napi); 1366 } 1367 1368 /* Fatal Bus Error interrupt */ 1369 if (unlikely(dma_status & DWCEQOS_DMA_CH0_IS_FBE)) { 1370 dwceqos_fatal_bus_error(lp, dma_status); 1371 1372 /* errata 9000831707 */ 1373 dma_status |= DWCEQOS_DMA_CH0_IS_TEB | 1374 DWCEQOS_DMA_CH0_IS_REB; 1375 } 1376 1377 /* Ack all DMA Channel 0 IRQs */ 1378 dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_STA, dma_status); 1379 ret = IRQ_HANDLED; 1380 } 1381 1382 if (cause & DWCEQOS_DMA_IS_MTLIS) { 1383 u32 val = dwceqos_read(lp, REG_DWCEQOS_MTL_Q0_ISCTRL); 1384 1385 dwceqos_write(lp, REG_DWCEQOS_MTL_Q0_ISCTRL, val); 1386 ret = IRQ_HANDLED; 1387 } 1388 1389 if (cause & DWCEQOS_DMA_IS_MACIS) { 1390 dwceqos_mac_interrupt(lp); 1391 ret = IRQ_HANDLED; 1392 } 1393 return ret; 1394} 1395 1396static void dwceqos_set_rx_flowcontrol(struct net_local *lp, bool enable) 1397{ 1398 u32 regval; 1399 unsigned long flags; 1400 1401 spin_lock_irqsave(&lp->hw_lock, flags); 1402 1403 regval = dwceqos_read(lp, REG_DWCEQOS_MAC_RX_FLOW_CTRL); 1404 if (enable) 1405 regval |= DWCEQOS_MAC_RX_FLOW_CTRL_RFE; 1406 else 1407 regval &= ~DWCEQOS_MAC_RX_FLOW_CTRL_RFE; 1408 dwceqos_write(lp, REG_DWCEQOS_MAC_RX_FLOW_CTRL, regval); 1409 1410 spin_unlock_irqrestore(&lp->hw_lock, flags); 1411} 1412 1413static void dwceqos_set_tx_flowcontrol(struct net_local *lp, bool enable) 1414{ 1415 u32 regval; 1416 unsigned long flags; 1417 1418 spin_lock_irqsave(&lp->hw_lock, flags); 1419 1420 /* MTL flow control */ 1421 regval = dwceqos_read(lp, REG_DWCEQOS_MTL_RXQ0_OPER); 1422 if (enable) 1423 regval |= DWCEQOS_MTL_RXQ_EHFC; 1424 else 1425 regval &= ~DWCEQOS_MTL_RXQ_EHFC; 1426 1427 dwceqos_write(lp, REG_DWCEQOS_MTL_RXQ0_OPER, regval); 1428 1429 /* MAC flow control */ 1430 regval = dwceqos_read(lp, REG_DWCEQOS_MAC_Q0_TX_FLOW); 1431 if (enable) 1432 regval |= DWCEQOS_MAC_Q0_TX_FLOW_TFE; 1433 else 1434 regval &= ~DWCEQOS_MAC_Q0_TX_FLOW_TFE; 1435 dwceqos_write(lp, REG_DWCEQOS_MAC_Q0_TX_FLOW, regval); 1436 1437 spin_unlock_irqrestore(&lp->hw_lock, flags); 1438} 1439 1440static void dwceqos_configure_flow_control(struct net_local *lp) 1441{ 1442 u32 regval; 1443 unsigned long flags; 1444 int RQS, RFD, RFA; 1445 1446 spin_lock_irqsave(&lp->hw_lock, flags); 1447 1448 regval = dwceqos_read(lp, REG_DWCEQOS_MTL_RXQ0_OPER); 1449 1450 /* The queue size is in units of 256 bytes. We want 512 bytes units for 1451 * the threshold fields. 1452 */ 1453 RQS = ((regval >> 20) & 0x3FF) + 1; 1454 RQS /= 2; 1455 1456 /* The thresholds are relative to a full queue, with a bias 1457 * of 1 KiByte below full. 1458 */ 1459 RFD = RQS / 2 - 2; 1460 RFA = RQS / 8 - 2; 1461 1462 regval = (regval & 0xFFF000FF) | (RFD << 14) | (RFA << 8); 1463 1464 if (RFD >= 0 && RFA >= 0) { 1465 dwceqos_write(lp, REG_DWCEQOS_MTL_RXQ0_OPER, regval); 1466 } else { 1467 netdev_warn(lp->ndev, 1468 "FIFO too small for flow control."); 1469 } 1470 1471 regval = DWCEQOS_MAC_Q0_TX_FLOW_PT(256) | 1472 DWCEQOS_MAC_Q0_TX_FLOW_PLT_4_SLOTS; 1473 1474 dwceqos_write(lp, REG_DWCEQOS_MAC_Q0_TX_FLOW, regval); 1475 1476 spin_unlock_irqrestore(&lp->hw_lock, flags); 1477} 1478 1479static void dwceqos_configure_clock(struct net_local *lp) 1480{ 1481 unsigned long rate_mhz = clk_get_rate(lp->apb_pclk) / 1000000; 1482 1483 BUG_ON(!rate_mhz); 1484 1485 dwceqos_write(lp, 1486 REG_DWCEQOS_MAC_1US_TIC_COUNTER, 1487 DWCEQOS_MAC_1US_TIC_COUNTER_VAL(rate_mhz - 1)); 1488} 1489 1490static void dwceqos_configure_bus(struct net_local *lp) 1491{ 1492 u32 sysbus_reg; 1493 1494 /* N.B. We do not support the Fixed Burst mode because it 1495 * opens a race window by making HW access to DMA descriptors 1496 * non-atomic. 1497 */ 1498 1499 sysbus_reg = DWCEQOS_DMA_SYSBUS_MODE_AAL; 1500 1501 if (lp->bus_cfg.en_lpi) 1502 sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_EN_LPI; 1503 1504 if (lp->bus_cfg.burst_map) 1505 sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_BURST( 1506 lp->bus_cfg.burst_map); 1507 else 1508 sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_BURST( 1509 DWCEQOS_DMA_SYSBUS_MODE_BURST_DEFAULT); 1510 1511 if (lp->bus_cfg.read_requests) 1512 sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT( 1513 lp->bus_cfg.read_requests - 1); 1514 else 1515 sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT( 1516 DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT_DEFAULT); 1517 1518 if (lp->bus_cfg.write_requests) 1519 sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT( 1520 lp->bus_cfg.write_requests - 1); 1521 else 1522 sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT( 1523 DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT_DEFAULT); 1524 1525 if (netif_msg_hw(lp)) 1526 netdev_dbg(lp->ndev, "SysbusMode %#X\n", sysbus_reg); 1527 1528 dwceqos_write(lp, REG_DWCEQOS_DMA_SYSBUS_MODE, sysbus_reg); 1529} 1530 1531static void dwceqos_init_hw(struct net_local *lp) 1532{ 1533 struct net_device *ndev = lp->ndev; 1534 u32 regval; 1535 u32 buswidth; 1536 u32 dma_skip; 1537 1538 /* Software reset */ 1539 dwceqos_reset_hw(lp); 1540 1541 dwceqos_configure_bus(lp); 1542 1543 /* Probe data bus width, 32/64/128 bits. */ 1544 dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL, 0xF); 1545 regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL); 1546 buswidth = (regval ^ 0xF) + 1; 1547 1548 /* Cache-align dma descriptors. */ 1549 dma_skip = (sizeof(struct dwceqos_dma_desc) - 16) / buswidth; 1550 dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_CTRL, 1551 DWCEQOS_DMA_CH_CTRL_DSL(dma_skip) | 1552 DWCEQOS_DMA_CH_CTRL_PBLX8); 1553 1554 /* Initialize DMA Channel 0 */ 1555 dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_LEN, DWCEQOS_TX_DCNT - 1); 1556 dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RXDESC_LEN, DWCEQOS_RX_DCNT - 1); 1557 dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_LIST, 1558 (u32)lp->tx_descs_addr); 1559 dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RXDESC_LIST, 1560 (u32)lp->rx_descs_addr); 1561 1562 dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL, 1563 lp->tx_descs_tail_addr); 1564 dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RXDESC_TAIL, 1565 lp->rx_descs_tail_addr); 1566 1567 if (lp->bus_cfg.tx_pbl) 1568 regval = DWCEQOS_DMA_CH_CTRL_PBL(lp->bus_cfg.tx_pbl); 1569 else 1570 regval = DWCEQOS_DMA_CH_CTRL_PBL(2); 1571 1572 /* Enable TSO if the HW support it */ 1573 if (lp->feature1 & DWCEQOS_MAC_HW_FEATURE1_TSOEN) 1574 regval |= DWCEQOS_DMA_CH_TX_TSE; 1575 1576 dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TX_CTRL, regval); 1577 1578 if (lp->bus_cfg.rx_pbl) 1579 regval = DWCEQOS_DMA_CH_CTRL_PBL(lp->bus_cfg.rx_pbl); 1580 else 1581 regval = DWCEQOS_DMA_CH_CTRL_PBL(2); 1582 1583 regval |= DWCEQOS_DMA_CH_RX_CTRL_BUFSIZE(DWCEQOS_DWCEQOS_RX_BUF_SIZE); 1584 dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RX_CTRL, regval); 1585 1586 regval |= DWCEQOS_DMA_CH_CTRL_START; 1587 dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RX_CTRL, regval); 1588 1589 /* Initialize MTL Queues */ 1590 regval = DWCEQOS_MTL_SCHALG_STRICT; 1591 dwceqos_write(lp, REG_DWCEQOS_MTL_OPER, regval); 1592 1593 regval = DWCEQOS_MTL_TXQ_SIZE( 1594 DWCEQOS_MAC_HW_FEATURE1_TXFIFOSIZE(lp->feature1)) | 1595 DWCEQOS_MTL_TXQ_TXQEN | DWCEQOS_MTL_TXQ_TSF | 1596 DWCEQOS_MTL_TXQ_TTC512; 1597 dwceqos_write(lp, REG_DWCEQOS_MTL_TXQ0_OPER, regval); 1598 1599 regval = DWCEQOS_MTL_RXQ_SIZE( 1600 DWCEQOS_MAC_HW_FEATURE1_RXFIFOSIZE(lp->feature1)) | 1601 DWCEQOS_MTL_RXQ_FUP | DWCEQOS_MTL_RXQ_FEP | DWCEQOS_MTL_RXQ_RSF; 1602 dwceqos_write(lp, REG_DWCEQOS_MTL_RXQ0_OPER, regval); 1603 1604 dwceqos_configure_flow_control(lp); 1605 1606 /* Initialize MAC */ 1607 dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0); 1608 1609 lp->eee_enabled = 0; 1610 1611 dwceqos_configure_clock(lp); 1612 1613 /* MMC counters */ 1614 1615 /* probe implemented counters */ 1616 dwceqos_write(lp, REG_DWCEQOS_MMC_RXIRQMASK, ~0u); 1617 dwceqos_write(lp, REG_DWCEQOS_MMC_TXIRQMASK, ~0u); 1618 lp->mmc_rx_counters_mask = dwceqos_read(lp, REG_DWCEQOS_MMC_RXIRQMASK); 1619 lp->mmc_tx_counters_mask = dwceqos_read(lp, REG_DWCEQOS_MMC_TXIRQMASK); 1620 1621 dwceqos_write(lp, REG_DWCEQOS_MMC_CTRL, DWCEQOS_MMC_CTRL_CNTRST | 1622 DWCEQOS_MMC_CTRL_RSTONRD); 1623 dwceqos_enable_mmc_interrupt(lp); 1624 1625 dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, 0); 1626 dwceqos_write(lp, REG_DWCEQOS_MAC_IE, 0); 1627 1628 dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, DWCEQOS_MAC_CFG_IPC | 1629 DWCEQOS_MAC_CFG_DM | DWCEQOS_MAC_CFG_TE | DWCEQOS_MAC_CFG_RE); 1630 1631 /* Start TX DMA */ 1632 regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_TX_CTRL); 1633 dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TX_CTRL, 1634 regval | DWCEQOS_DMA_CH_CTRL_START); 1635 1636 /* Enable MAC TX/RX */ 1637 regval = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG); 1638 dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, 1639 regval | DWCEQOS_MAC_CFG_TE | DWCEQOS_MAC_CFG_RE); 1640 1641 lp->phy_defer = false; 1642 mutex_lock(&ndev->phydev->lock); 1643 phy_read_status(ndev->phydev); 1644 dwceqos_adjust_link(lp->ndev); 1645 mutex_unlock(&ndev->phydev->lock); 1646} 1647 1648static void dwceqos_tx_reclaim(unsigned long data) 1649{ 1650 struct net_device *ndev = (struct net_device *)data; 1651 struct net_local *lp = netdev_priv(ndev); 1652 unsigned int tx_bytes = 0; 1653 unsigned int tx_packets = 0; 1654 1655 spin_lock(&lp->tx_lock); 1656 1657 while (lp->tx_free < DWCEQOS_TX_DCNT) { 1658 struct dwceqos_dma_desc *dd = &lp->tx_descs[lp->tx_cur]; 1659 struct ring_desc *rd = &lp->tx_skb[lp->tx_cur]; 1660 1661 /* Descriptor still being held by DMA ? */ 1662 if (dd->des3 & DWCEQOS_DMA_TDES3_OWN) 1663 break; 1664 1665 if (rd->mapping) 1666 dma_unmap_single(ndev->dev.parent, rd->mapping, rd->len, 1667 DMA_TO_DEVICE); 1668 1669 if (unlikely(rd->skb)) { 1670 ++tx_packets; 1671 tx_bytes += rd->skb->len; 1672 dev_consume_skb_any(rd->skb); 1673 } 1674 1675 rd->skb = NULL; 1676 rd->mapping = 0; 1677 lp->tx_free++; 1678 lp->tx_cur = (lp->tx_cur + 1) % DWCEQOS_TX_DCNT; 1679 1680 if ((dd->des3 & DWCEQOS_DMA_TDES3_LD) && 1681 (dd->des3 & DWCEQOS_DMA_RDES3_ES)) { 1682 if (netif_msg_tx_err(lp)) 1683 netdev_err(ndev, "TX Error, TDES3 = 0x%x\n", 1684 dd->des3); 1685 if (netif_msg_hw(lp)) 1686 print_status(lp); 1687 } 1688 } 1689 spin_unlock(&lp->tx_lock); 1690 1691 netdev_completed_queue(ndev, tx_packets, tx_bytes); 1692 1693 dwceqos_dma_enable_txirq(lp); 1694 netif_wake_queue(ndev); 1695} 1696 1697static int dwceqos_rx(struct net_local *lp, int budget) 1698{ 1699 struct sk_buff *skb; 1700 u32 tot_size = 0; 1701 unsigned int n_packets = 0; 1702 unsigned int n_descs = 0; 1703 u32 len; 1704 1705 struct dwceqos_dma_desc *dd; 1706 struct sk_buff *new_skb; 1707 dma_addr_t new_skb_baddr = 0; 1708 1709 while (n_descs < budget) { 1710 if (!dwceqos_packet_avail(lp)) 1711 break; 1712 1713 new_skb = netdev_alloc_skb(lp->ndev, DWCEQOS_RX_BUF_SIZE); 1714 if (!new_skb) { 1715 netdev_err(lp->ndev, "no memory for new sk_buff\n"); 1716 break; 1717 } 1718 1719 /* Get dma handle of skb->data */ 1720 new_skb_baddr = (u32)dma_map_single(lp->ndev->dev.parent, 1721 new_skb->data, 1722 DWCEQOS_RX_BUF_SIZE, 1723 DMA_FROM_DEVICE); 1724 if (dma_mapping_error(lp->ndev->dev.parent, new_skb_baddr)) { 1725 netdev_err(lp->ndev, "DMA map error\n"); 1726 dev_kfree_skb(new_skb); 1727 break; 1728 } 1729 1730 /* Read descriptor data after reading owner bit. */ 1731 dma_rmb(); 1732 1733 dd = &lp->rx_descs[lp->rx_cur]; 1734 len = DWCEQOS_DMA_RDES3_PL(dd->des3); 1735 skb = lp->rx_skb[lp->rx_cur].skb; 1736 1737 /* Unmap old buffer */ 1738 dma_unmap_single(lp->ndev->dev.parent, 1739 lp->rx_skb[lp->rx_cur].mapping, 1740 lp->rx_skb[lp->rx_cur].len, DMA_FROM_DEVICE); 1741 1742 /* Discard packet on reception error or bad checksum */ 1743 if ((dd->des3 & DWCEQOS_DMA_RDES3_ES) || 1744 (dd->des1 & DWCEQOS_DMA_RDES1_IPCE)) { 1745 dev_kfree_skb(skb); 1746 skb = NULL; 1747 } else { 1748 skb_put(skb, len); 1749 skb->protocol = eth_type_trans(skb, lp->ndev); 1750 switch (dd->des1 & DWCEQOS_DMA_RDES1_PT) { 1751 case DWCEQOS_DMA_RDES1_PT_UDP: 1752 case DWCEQOS_DMA_RDES1_PT_TCP: 1753 case DWCEQOS_DMA_RDES1_PT_ICMP: 1754 skb->ip_summed = CHECKSUM_UNNECESSARY; 1755 break; 1756 default: 1757 skb->ip_summed = CHECKSUM_NONE; 1758 break; 1759 } 1760 } 1761 1762 if (unlikely(!skb)) { 1763 if (netif_msg_rx_err(lp)) 1764 netdev_dbg(lp->ndev, "rx error: des3=%X\n", 1765 lp->rx_descs[lp->rx_cur].des3); 1766 } else { 1767 tot_size += skb->len; 1768 n_packets++; 1769 1770 netif_receive_skb(skb); 1771 } 1772 1773 lp->rx_descs[lp->rx_cur].des0 = new_skb_baddr; 1774 lp->rx_descs[lp->rx_cur].des1 = 0; 1775 lp->rx_descs[lp->rx_cur].des2 = 0; 1776 /* The DMA must observe des0/1/2 written before des3. */ 1777 wmb(); 1778 lp->rx_descs[lp->rx_cur].des3 = DWCEQOS_DMA_RDES3_INTE | 1779 DWCEQOS_DMA_RDES3_OWN | 1780 DWCEQOS_DMA_RDES3_BUF1V; 1781 1782 lp->rx_skb[lp->rx_cur].mapping = new_skb_baddr; 1783 lp->rx_skb[lp->rx_cur].len = DWCEQOS_RX_BUF_SIZE; 1784 lp->rx_skb[lp->rx_cur].skb = new_skb; 1785 1786 n_descs++; 1787 lp->rx_cur = (lp->rx_cur + 1) % DWCEQOS_RX_DCNT; 1788 } 1789 1790 /* Make sure any ownership update is written to the descriptors before 1791 * DMA wakeup. 1792 */ 1793 wmb(); 1794 1795 dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_STA, DWCEQOS_DMA_CH0_IS_RI); 1796 /* Wake up RX by writing tail pointer */ 1797 dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RXDESC_TAIL, 1798 lp->rx_descs_tail_addr); 1799 1800 return n_descs; 1801} 1802 1803static int dwceqos_rx_poll(struct napi_struct *napi, int budget) 1804{ 1805 struct net_local *lp = container_of(napi, struct net_local, napi); 1806 int work_done = 0; 1807 1808 work_done = dwceqos_rx(lp, budget - work_done); 1809 1810 if (!dwceqos_packet_avail(lp) && work_done < budget) { 1811 napi_complete(napi); 1812 dwceqos_dma_enable_rxirq(lp); 1813 } else { 1814 work_done = budget; 1815 } 1816 1817 return work_done; 1818} 1819 1820/* Reinitialize function if a TX timed out */ 1821static void dwceqos_reinit_for_txtimeout(struct work_struct *data) 1822{ 1823 struct net_local *lp = container_of(data, struct net_local, 1824 txtimeout_reinit); 1825 1826 netdev_err(lp->ndev, "transmit timeout %d s, resetting...\n", 1827 DWCEQOS_TX_TIMEOUT); 1828 1829 if (netif_msg_hw(lp)) 1830 print_status(lp); 1831 1832 rtnl_lock(); 1833 dwceqos_stop(lp->ndev); 1834 dwceqos_open(lp->ndev); 1835 rtnl_unlock(); 1836} 1837 1838/* DT Probing function called by main probe */ 1839static inline int dwceqos_probe_config_dt(struct platform_device *pdev) 1840{ 1841 struct net_device *ndev; 1842 struct net_local *lp; 1843 const void *mac_address; 1844 struct dwceqos_bus_cfg *bus_cfg; 1845 struct device_node *np = pdev->dev.of_node; 1846 1847 ndev = platform_get_drvdata(pdev); 1848 lp = netdev_priv(ndev); 1849 bus_cfg = &lp->bus_cfg; 1850 1851 /* Set the MAC address. */ 1852 mac_address = of_get_mac_address(pdev->dev.of_node); 1853 if (mac_address) 1854 ether_addr_copy(ndev->dev_addr, mac_address); 1855 1856 /* These are all optional parameters */ 1857 lp->en_tx_lpi_clockgating = of_property_read_bool(np, 1858 "snps,en-tx-lpi-clockgating"); 1859 bus_cfg->en_lpi = of_property_read_bool(np, "snps,en-lpi"); 1860 of_property_read_u32(np, "snps,write-requests", 1861 &bus_cfg->write_requests); 1862 of_property_read_u32(np, "snps,read-requests", &bus_cfg->read_requests); 1863 of_property_read_u32(np, "snps,burst-map", &bus_cfg->burst_map); 1864 of_property_read_u32(np, "snps,txpbl", &bus_cfg->tx_pbl); 1865 of_property_read_u32(np, "snps,rxpbl", &bus_cfg->rx_pbl); 1866 1867 netdev_dbg(ndev, "BusCfg: lpi:%u wr:%u rr:%u bm:%X rxpbl:%u txpbl:%d\n", 1868 bus_cfg->en_lpi, 1869 bus_cfg->write_requests, 1870 bus_cfg->read_requests, 1871 bus_cfg->burst_map, 1872 bus_cfg->rx_pbl, 1873 bus_cfg->tx_pbl); 1874 1875 return 0; 1876} 1877 1878static int dwceqos_open(struct net_device *ndev) 1879{ 1880 struct net_local *lp = netdev_priv(ndev); 1881 int res; 1882 1883 dwceqos_reset_state(lp); 1884 res = dwceqos_descriptor_init(lp); 1885 if (res) { 1886 netdev_err(ndev, "Unable to allocate DMA memory, rc %d\n", res); 1887 return res; 1888 } 1889 netdev_reset_queue(ndev); 1890 1891 /* The dwceqos reset state machine requires all phy clocks to complete, 1892 * hence the unusual init order with phy_start first. 1893 */ 1894 lp->phy_defer = true; 1895 phy_start(ndev->phydev); 1896 dwceqos_init_hw(lp); 1897 napi_enable(&lp->napi); 1898 1899 netif_start_queue(ndev); 1900 tasklet_enable(&lp->tx_bdreclaim_tasklet); 1901 1902 /* Enable Interrupts -- do this only after we enable NAPI and the 1903 * tasklet. 1904 */ 1905 dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, 1906 DWCEQOS_DMA_CH0_IE_NIE | 1907 DWCEQOS_DMA_CH0_IE_RIE | DWCEQOS_DMA_CH0_IE_TIE | 1908 DWCEQOS_DMA_CH0_IE_AIE | 1909 DWCEQOS_DMA_CH0_IE_FBEE); 1910 1911 return 0; 1912} 1913 1914static bool dweqos_is_tx_dma_suspended(struct net_local *lp) 1915{ 1916 u32 reg; 1917 1918 reg = dwceqos_read(lp, REG_DWCEQOS_DMA_DEBUG_ST0); 1919 reg = DMA_GET_TX_STATE_CH0(reg); 1920 1921 return reg == DMA_TX_CH_SUSPENDED; 1922} 1923 1924static void dwceqos_drain_dma(struct net_local *lp) 1925{ 1926 /* Wait for all pending TX buffers to be sent. Upper limit based 1927 * on max frame size on a 10 Mbit link. 1928 */ 1929 size_t limit = (DWCEQOS_TX_DCNT * 1250) / 100; 1930 1931 while (!dweqos_is_tx_dma_suspended(lp) && limit--) 1932 usleep_range(100, 200); 1933} 1934 1935static int dwceqos_stop(struct net_device *ndev) 1936{ 1937 struct net_local *lp = netdev_priv(ndev); 1938 1939 tasklet_disable(&lp->tx_bdreclaim_tasklet); 1940 napi_disable(&lp->napi); 1941 1942 /* Stop all tx before we drain the tx dma. */ 1943 netif_tx_lock_bh(lp->ndev); 1944 netif_stop_queue(ndev); 1945 netif_tx_unlock_bh(lp->ndev); 1946 1947 dwceqos_drain_dma(lp); 1948 dwceqos_reset_hw(lp); 1949 phy_stop(ndev->phydev); 1950 1951 dwceqos_descriptor_free(lp); 1952 1953 return 0; 1954} 1955 1956static void dwceqos_dmadesc_set_ctx(struct net_local *lp, 1957 unsigned short gso_size) 1958{ 1959 struct dwceqos_dma_desc *dd = &lp->tx_descs[lp->tx_next]; 1960 1961 dd->des0 = 0; 1962 dd->des1 = 0; 1963 dd->des2 = gso_size; 1964 dd->des3 = DWCEQOS_DMA_TDES3_CTXT | DWCEQOS_DMA_TDES3_TCMSSV; 1965 1966 lp->tx_next = (lp->tx_next + 1) % DWCEQOS_TX_DCNT; 1967} 1968 1969static void dwceqos_tx_poll_demand(struct net_local *lp) 1970{ 1971 dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL, 1972 lp->tx_descs_tail_addr); 1973} 1974 1975struct dwceqos_tx { 1976 size_t nr_descriptors; 1977 size_t initial_descriptor; 1978 size_t last_descriptor; 1979 size_t prev_gso_size; 1980 size_t network_header_len; 1981}; 1982 1983static void dwceqos_tx_prepare(struct sk_buff *skb, struct net_local *lp, 1984 struct dwceqos_tx *tx) 1985{ 1986 size_t n = 1; 1987 size_t i; 1988 1989 if (skb_is_gso(skb) && skb_shinfo(skb)->gso_size != lp->gso_size) 1990 ++n; 1991 1992 for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) { 1993 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1994 1995 n += (skb_frag_size(frag) + BYTES_PER_DMA_DESC - 1) / 1996 BYTES_PER_DMA_DESC; 1997 } 1998 1999 tx->nr_descriptors = n; 2000 tx->initial_descriptor = lp->tx_next; 2001 tx->last_descriptor = lp->tx_next; 2002 tx->prev_gso_size = lp->gso_size; 2003 2004 tx->network_header_len = skb_transport_offset(skb); 2005 if (skb_is_gso(skb)) 2006 tx->network_header_len += tcp_hdrlen(skb); 2007} 2008 2009static int dwceqos_tx_linear(struct sk_buff *skb, struct net_local *lp, 2010 struct dwceqos_tx *tx) 2011{ 2012 struct ring_desc *rd; 2013 struct dwceqos_dma_desc *dd; 2014 size_t payload_len; 2015 dma_addr_t dma_handle; 2016 2017 if (skb_is_gso(skb) && skb_shinfo(skb)->gso_size != lp->gso_size) { 2018 dwceqos_dmadesc_set_ctx(lp, skb_shinfo(skb)->gso_size); 2019 lp->gso_size = skb_shinfo(skb)->gso_size; 2020 } 2021 2022 dma_handle = dma_map_single(lp->ndev->dev.parent, skb->data, 2023 skb_headlen(skb), DMA_TO_DEVICE); 2024 2025 if (dma_mapping_error(lp->ndev->dev.parent, dma_handle)) { 2026 netdev_err(lp->ndev, "TX DMA Mapping error\n"); 2027 return -ENOMEM; 2028 } 2029 2030 rd = &lp->tx_skb[lp->tx_next]; 2031 dd = &lp->tx_descs[lp->tx_next]; 2032 2033 rd->skb = NULL; 2034 rd->len = skb_headlen(skb); 2035 rd->mapping = dma_handle; 2036 2037 /* Set up DMA Descriptor */ 2038 dd->des0 = dma_handle; 2039 2040 if (skb_is_gso(skb)) { 2041 payload_len = skb_headlen(skb) - tx->network_header_len; 2042 2043 if (payload_len) 2044 dd->des1 = dma_handle + tx->network_header_len; 2045 dd->des2 = tx->network_header_len | 2046 DWCEQOS_DMA_DES2_B2L(payload_len); 2047 dd->des3 = DWCEQOS_DMA_TDES3_TSE | 2048 DWCEQOS_DMA_DES3_THL((tcp_hdrlen(skb) / 4)) | 2049 (skb->len - tx->network_header_len); 2050 } else { 2051 dd->des1 = 0; 2052 dd->des2 = skb_headlen(skb); 2053 dd->des3 = skb->len; 2054 2055 switch (skb->ip_summed) { 2056 case CHECKSUM_PARTIAL: 2057 dd->des3 |= DWCEQOS_DMA_TDES3_CA; 2058 case CHECKSUM_NONE: 2059 case CHECKSUM_UNNECESSARY: 2060 case CHECKSUM_COMPLETE: 2061 default: 2062 break; 2063 } 2064 } 2065 2066 dd->des3 |= DWCEQOS_DMA_TDES3_FD; 2067 if (lp->tx_next != tx->initial_descriptor) 2068 dd->des3 |= DWCEQOS_DMA_TDES3_OWN; 2069 2070 tx->last_descriptor = lp->tx_next; 2071 lp->tx_next = (lp->tx_next + 1) % DWCEQOS_TX_DCNT; 2072 2073 return 0; 2074} 2075 2076static int dwceqos_tx_frags(struct sk_buff *skb, struct net_local *lp, 2077 struct dwceqos_tx *tx) 2078{ 2079 struct ring_desc *rd = NULL; 2080 struct dwceqos_dma_desc *dd; 2081 dma_addr_t dma_handle; 2082 size_t i; 2083 2084 /* Setup more ring and DMA descriptor if the packet is fragmented */ 2085 for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) { 2086 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 2087 size_t frag_size; 2088 size_t consumed_size; 2089 2090 /* Map DMA Area */ 2091 dma_handle = skb_frag_dma_map(lp->ndev->dev.parent, frag, 0, 2092 skb_frag_size(frag), 2093 DMA_TO_DEVICE); 2094 if (dma_mapping_error(lp->ndev->dev.parent, dma_handle)) { 2095 netdev_err(lp->ndev, "DMA Mapping error\n"); 2096 return -ENOMEM; 2097 } 2098 2099 /* order-3 fragments span more than one descriptor. */ 2100 frag_size = skb_frag_size(frag); 2101 consumed_size = 0; 2102 while (consumed_size < frag_size) { 2103 size_t dma_size = min_t(size_t, 16376, 2104 frag_size - consumed_size); 2105 2106 rd = &lp->tx_skb[lp->tx_next]; 2107 memset(rd, 0, sizeof(*rd)); 2108 2109 dd = &lp->tx_descs[lp->tx_next]; 2110 2111 /* Set DMA Descriptor fields */ 2112 dd->des0 = dma_handle + consumed_size; 2113 dd->des1 = 0; 2114 dd->des2 = dma_size; 2115 2116 if (skb_is_gso(skb)) 2117 dd->des3 = (skb->len - tx->network_header_len); 2118 else 2119 dd->des3 = skb->len; 2120 2121 dd->des3 |= DWCEQOS_DMA_TDES3_OWN; 2122 2123 tx->last_descriptor = lp->tx_next; 2124 lp->tx_next = (lp->tx_next + 1) % DWCEQOS_TX_DCNT; 2125 consumed_size += dma_size; 2126 } 2127 2128 rd->len = skb_frag_size(frag); 2129 rd->mapping = dma_handle; 2130 } 2131 2132 return 0; 2133} 2134 2135static void dwceqos_tx_finalize(struct sk_buff *skb, struct net_local *lp, 2136 struct dwceqos_tx *tx) 2137{ 2138 lp->tx_descs[tx->last_descriptor].des3 |= DWCEQOS_DMA_TDES3_LD; 2139 lp->tx_descs[tx->last_descriptor].des2 |= DWCEQOS_DMA_TDES2_IOC; 2140 2141 lp->tx_skb[tx->last_descriptor].skb = skb; 2142 2143 /* Make all descriptor updates visible to the DMA before setting the 2144 * owner bit. 2145 */ 2146 wmb(); 2147 2148 lp->tx_descs[tx->initial_descriptor].des3 |= DWCEQOS_DMA_TDES3_OWN; 2149 2150 /* Make the owner bit visible before TX wakeup. */ 2151 wmb(); 2152 2153 dwceqos_tx_poll_demand(lp); 2154} 2155 2156static void dwceqos_tx_rollback(struct net_local *lp, struct dwceqos_tx *tx) 2157{ 2158 size_t i = tx->initial_descriptor; 2159 2160 while (i != lp->tx_next) { 2161 if (lp->tx_skb[i].mapping) 2162 dma_unmap_single(lp->ndev->dev.parent, 2163 lp->tx_skb[i].mapping, 2164 lp->tx_skb[i].len, 2165 DMA_TO_DEVICE); 2166 2167 lp->tx_skb[i].mapping = 0; 2168 lp->tx_skb[i].skb = NULL; 2169 2170 memset(&lp->tx_descs[i], 0, sizeof(lp->tx_descs[i])); 2171 2172 i = (i + 1) % DWCEQOS_TX_DCNT; 2173 } 2174 2175 lp->tx_next = tx->initial_descriptor; 2176 lp->gso_size = tx->prev_gso_size; 2177} 2178 2179static int dwceqos_start_xmit(struct sk_buff *skb, struct net_device *ndev) 2180{ 2181 struct net_local *lp = netdev_priv(ndev); 2182 struct dwceqos_tx trans; 2183 int err; 2184 2185 dwceqos_tx_prepare(skb, lp, &trans); 2186 if (lp->tx_free < trans.nr_descriptors) { 2187 netif_stop_queue(ndev); 2188 return NETDEV_TX_BUSY; 2189 } 2190 2191 err = dwceqos_tx_linear(skb, lp, &trans); 2192 if (err) 2193 goto tx_error; 2194 2195 err = dwceqos_tx_frags(skb, lp, &trans); 2196 if (err) 2197 goto tx_error; 2198 2199 WARN_ON(lp->tx_next != 2200 ((trans.initial_descriptor + trans.nr_descriptors) % 2201 DWCEQOS_TX_DCNT)); 2202 2203 spin_lock_bh(&lp->tx_lock); 2204 lp->tx_free -= trans.nr_descriptors; 2205 dwceqos_tx_finalize(skb, lp, &trans); 2206 netdev_sent_queue(ndev, skb->len); 2207 spin_unlock_bh(&lp->tx_lock); 2208 2209 netif_trans_update(ndev); 2210 return 0; 2211 2212tx_error: 2213 dwceqos_tx_rollback(lp, &trans); 2214 dev_kfree_skb(skb); 2215 return 0; 2216} 2217 2218/* Set MAC address and then update HW accordingly */ 2219static int dwceqos_set_mac_address(struct net_device *ndev, void *addr) 2220{ 2221 struct net_local *lp = netdev_priv(ndev); 2222 struct sockaddr *hwaddr = (struct sockaddr *)addr; 2223 2224 if (netif_running(ndev)) 2225 return -EBUSY; 2226 2227 if (!is_valid_ether_addr(hwaddr->sa_data)) 2228 return -EADDRNOTAVAIL; 2229 2230 memcpy(ndev->dev_addr, hwaddr->sa_data, ndev->addr_len); 2231 2232 dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0); 2233 return 0; 2234} 2235 2236static void dwceqos_tx_timeout(struct net_device *ndev) 2237{ 2238 struct net_local *lp = netdev_priv(ndev); 2239 2240 queue_work(lp->txtimeout_handler_wq, &lp->txtimeout_reinit); 2241} 2242 2243static void dwceqos_set_umac_addr(struct net_local *lp, unsigned char *addr, 2244 unsigned int reg_n) 2245{ 2246 unsigned long data; 2247 2248 data = (addr[5] << 8) | addr[4]; 2249 dwceqos_write(lp, DWCEQOS_ADDR_HIGH(reg_n), 2250 data | DWCEQOS_MAC_MAC_ADDR_HI_EN); 2251 data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0]; 2252 dwceqos_write(lp, DWCEQOS_ADDR_LOW(reg_n), data); 2253} 2254 2255static void dwceqos_disable_umac_addr(struct net_local *lp, unsigned int reg_n) 2256{ 2257 /* Do not disable MAC address 0 */ 2258 if (reg_n != 0) 2259 dwceqos_write(lp, DWCEQOS_ADDR_HIGH(reg_n), 0); 2260} 2261 2262static void dwceqos_set_rx_mode(struct net_device *ndev) 2263{ 2264 struct net_local *lp = netdev_priv(ndev); 2265 u32 regval = 0; 2266 u32 mc_filter[2]; 2267 int reg = 1; 2268 struct netdev_hw_addr *ha; 2269 unsigned int max_mac_addr; 2270 2271 max_mac_addr = DWCEQOS_MAX_PERFECT_ADDRESSES(lp->feature1); 2272 2273 if (ndev->flags & IFF_PROMISC) { 2274 regval = DWCEQOS_MAC_PKT_FILT_PR; 2275 } else if (((netdev_mc_count(ndev) > DWCEQOS_HASH_TABLE_SIZE) || 2276 (ndev->flags & IFF_ALLMULTI))) { 2277 regval = DWCEQOS_MAC_PKT_FILT_PM; 2278 dwceqos_write(lp, REG_DWCEQOS_HASTABLE_LO, 0xffffffff); 2279 dwceqos_write(lp, REG_DWCEQOS_HASTABLE_HI, 0xffffffff); 2280 } else if (!netdev_mc_empty(ndev)) { 2281 regval = DWCEQOS_MAC_PKT_FILT_HMC; 2282 memset(mc_filter, 0, sizeof(mc_filter)); 2283 netdev_for_each_mc_addr(ha, ndev) { 2284 /* The upper 6 bits of the calculated CRC are used to 2285 * index the contens of the hash table 2286 */ 2287 int bit_nr = bitrev32(~crc32_le(~0, ha->addr, 6)) >> 26; 2288 /* The most significant bit determines the register 2289 * to use (H/L) while the other 5 bits determine 2290 * the bit within the register. 2291 */ 2292 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); 2293 } 2294 dwceqos_write(lp, REG_DWCEQOS_HASTABLE_LO, mc_filter[0]); 2295 dwceqos_write(lp, REG_DWCEQOS_HASTABLE_HI, mc_filter[1]); 2296 } 2297 if (netdev_uc_count(ndev) > max_mac_addr) { 2298 regval |= DWCEQOS_MAC_PKT_FILT_PR; 2299 } else { 2300 netdev_for_each_uc_addr(ha, ndev) { 2301 dwceqos_set_umac_addr(lp, ha->addr, reg); 2302 reg++; 2303 } 2304 for (; reg < DWCEQOS_MAX_PERFECT_ADDRESSES(lp->feature1); reg++) 2305 dwceqos_disable_umac_addr(lp, reg); 2306 } 2307 dwceqos_write(lp, REG_DWCEQOS_MAC_PKT_FILT, regval); 2308} 2309 2310#ifdef CONFIG_NET_POLL_CONTROLLER 2311static void dwceqos_poll_controller(struct net_device *ndev) 2312{ 2313 disable_irq(ndev->irq); 2314 dwceqos_interrupt(ndev->irq, ndev); 2315 enable_irq(ndev->irq); 2316} 2317#endif 2318 2319static void dwceqos_read_mmc_counters(struct net_local *lp, u32 rx_mask, 2320 u32 tx_mask) 2321{ 2322 if (tx_mask & BIT(27)) 2323 lp->mmc_counters.txlpitranscntr += 2324 dwceqos_read(lp, DWC_MMC_TXLPITRANSCNTR); 2325 if (tx_mask & BIT(26)) 2326 lp->mmc_counters.txpiuscntr += 2327 dwceqos_read(lp, DWC_MMC_TXLPIUSCNTR); 2328 if (tx_mask & BIT(25)) 2329 lp->mmc_counters.txoversize_g += 2330 dwceqos_read(lp, DWC_MMC_TXOVERSIZE_G); 2331 if (tx_mask & BIT(24)) 2332 lp->mmc_counters.txvlanpackets_g += 2333 dwceqos_read(lp, DWC_MMC_TXVLANPACKETS_G); 2334 if (tx_mask & BIT(23)) 2335 lp->mmc_counters.txpausepackets += 2336 dwceqos_read(lp, DWC_MMC_TXPAUSEPACKETS); 2337 if (tx_mask & BIT(22)) 2338 lp->mmc_counters.txexcessdef += 2339 dwceqos_read(lp, DWC_MMC_TXEXCESSDEF); 2340 if (tx_mask & BIT(21)) 2341 lp->mmc_counters.txpacketcount_g += 2342 dwceqos_read(lp, DWC_MMC_TXPACKETCOUNT_G); 2343 if (tx_mask & BIT(20)) 2344 lp->mmc_counters.txoctetcount_g += 2345 dwceqos_read(lp, DWC_MMC_TXOCTETCOUNT_G); 2346 if (tx_mask & BIT(19)) 2347 lp->mmc_counters.txcarriererror += 2348 dwceqos_read(lp, DWC_MMC_TXCARRIERERROR); 2349 if (tx_mask & BIT(18)) 2350 lp->mmc_counters.txexcesscol += 2351 dwceqos_read(lp, DWC_MMC_TXEXCESSCOL); 2352 if (tx_mask & BIT(17)) 2353 lp->mmc_counters.txlatecol += 2354 dwceqos_read(lp, DWC_MMC_TXLATECOL); 2355 if (tx_mask & BIT(16)) 2356 lp->mmc_counters.txdeferred += 2357 dwceqos_read(lp, DWC_MMC_TXDEFERRED); 2358 if (tx_mask & BIT(15)) 2359 lp->mmc_counters.txmulticol_g += 2360 dwceqos_read(lp, DWC_MMC_TXMULTICOL_G); 2361 if (tx_mask & BIT(14)) 2362 lp->mmc_counters.txsinglecol_g += 2363 dwceqos_read(lp, DWC_MMC_TXSINGLECOL_G); 2364 if (tx_mask & BIT(13)) 2365 lp->mmc_counters.txunderflowerror += 2366 dwceqos_read(lp, DWC_MMC_TXUNDERFLOWERROR); 2367 if (tx_mask & BIT(12)) 2368 lp->mmc_counters.txbroadcastpackets_gb += 2369 dwceqos_read(lp, DWC_MMC_TXBROADCASTPACKETS_GB); 2370 if (tx_mask & BIT(11)) 2371 lp->mmc_counters.txmulticastpackets_gb += 2372 dwceqos_read(lp, DWC_MMC_TXMULTICASTPACKETS_GB); 2373 if (tx_mask & BIT(10)) 2374 lp->mmc_counters.txunicastpackets_gb += 2375 dwceqos_read(lp, DWC_MMC_TXUNICASTPACKETS_GB); 2376 if (tx_mask & BIT(9)) 2377 lp->mmc_counters.tx1024tomaxoctets_gb += 2378 dwceqos_read(lp, DWC_MMC_TX1024TOMAXOCTETS_GB); 2379 if (tx_mask & BIT(8)) 2380 lp->mmc_counters.tx512to1023octets_gb += 2381 dwceqos_read(lp, DWC_MMC_TX512TO1023OCTETS_GB); 2382 if (tx_mask & BIT(7)) 2383 lp->mmc_counters.tx256to511octets_gb += 2384 dwceqos_read(lp, DWC_MMC_TX256TO511OCTETS_GB); 2385 if (tx_mask & BIT(6)) 2386 lp->mmc_counters.tx128to255octets_gb += 2387 dwceqos_read(lp, DWC_MMC_TX128TO255OCTETS_GB); 2388 if (tx_mask & BIT(5)) 2389 lp->mmc_counters.tx65to127octets_gb += 2390 dwceqos_read(lp, DWC_MMC_TX65TO127OCTETS_GB); 2391 if (tx_mask & BIT(4)) 2392 lp->mmc_counters.tx64octets_gb += 2393 dwceqos_read(lp, DWC_MMC_TX64OCTETS_GB); 2394 if (tx_mask & BIT(3)) 2395 lp->mmc_counters.txmulticastpackets_g += 2396 dwceqos_read(lp, DWC_MMC_TXMULTICASTPACKETS_G); 2397 if (tx_mask & BIT(2)) 2398 lp->mmc_counters.txbroadcastpackets_g += 2399 dwceqos_read(lp, DWC_MMC_TXBROADCASTPACKETS_G); 2400 if (tx_mask & BIT(1)) 2401 lp->mmc_counters.txpacketcount_gb += 2402 dwceqos_read(lp, DWC_MMC_TXPACKETCOUNT_GB); 2403 if (tx_mask & BIT(0)) 2404 lp->mmc_counters.txoctetcount_gb += 2405 dwceqos_read(lp, DWC_MMC_TXOCTETCOUNT_GB); 2406 2407 if (rx_mask & BIT(27)) 2408 lp->mmc_counters.rxlpitranscntr += 2409 dwceqos_read(lp, DWC_MMC_RXLPITRANSCNTR); 2410 if (rx_mask & BIT(26)) 2411 lp->mmc_counters.rxlpiuscntr += 2412 dwceqos_read(lp, DWC_MMC_RXLPIUSCNTR); 2413 if (rx_mask & BIT(25)) 2414 lp->mmc_counters.rxctrlpackets_g += 2415 dwceqos_read(lp, DWC_MMC_RXCTRLPACKETS_G); 2416 if (rx_mask & BIT(24)) 2417 lp->mmc_counters.rxrcverror += 2418 dwceqos_read(lp, DWC_MMC_RXRCVERROR); 2419 if (rx_mask & BIT(23)) 2420 lp->mmc_counters.rxwatchdog += 2421 dwceqos_read(lp, DWC_MMC_RXWATCHDOG); 2422 if (rx_mask & BIT(22)) 2423 lp->mmc_counters.rxvlanpackets_gb += 2424 dwceqos_read(lp, DWC_MMC_RXVLANPACKETS_GB); 2425 if (rx_mask & BIT(21)) 2426 lp->mmc_counters.rxfifooverflow += 2427 dwceqos_read(lp, DWC_MMC_RXFIFOOVERFLOW); 2428 if (rx_mask & BIT(20)) 2429 lp->mmc_counters.rxpausepackets += 2430 dwceqos_read(lp, DWC_MMC_RXPAUSEPACKETS); 2431 if (rx_mask & BIT(19)) 2432 lp->mmc_counters.rxoutofrangetype += 2433 dwceqos_read(lp, DWC_MMC_RXOUTOFRANGETYPE); 2434 if (rx_mask & BIT(18)) 2435 lp->mmc_counters.rxlengtherror += 2436 dwceqos_read(lp, DWC_MMC_RXLENGTHERROR); 2437 if (rx_mask & BIT(17)) 2438 lp->mmc_counters.rxunicastpackets_g += 2439 dwceqos_read(lp, DWC_MMC_RXUNICASTPACKETS_G); 2440 if (rx_mask & BIT(16)) 2441 lp->mmc_counters.rx1024tomaxoctets_gb += 2442 dwceqos_read(lp, DWC_MMC_RX1024TOMAXOCTETS_GB); 2443 if (rx_mask & BIT(15)) 2444 lp->mmc_counters.rx512to1023octets_gb += 2445 dwceqos_read(lp, DWC_MMC_RX512TO1023OCTETS_GB); 2446 if (rx_mask & BIT(14)) 2447 lp->mmc_counters.rx256to511octets_gb += 2448 dwceqos_read(lp, DWC_MMC_RX256TO511OCTETS_GB); 2449 if (rx_mask & BIT(13)) 2450 lp->mmc_counters.rx128to255octets_gb += 2451 dwceqos_read(lp, DWC_MMC_RX128TO255OCTETS_GB); 2452 if (rx_mask & BIT(12)) 2453 lp->mmc_counters.rx65to127octets_gb += 2454 dwceqos_read(lp, DWC_MMC_RX65TO127OCTETS_GB); 2455 if (rx_mask & BIT(11)) 2456 lp->mmc_counters.rx64octets_gb += 2457 dwceqos_read(lp, DWC_MMC_RX64OCTETS_GB); 2458 if (rx_mask & BIT(10)) 2459 lp->mmc_counters.rxoversize_g += 2460 dwceqos_read(lp, DWC_MMC_RXOVERSIZE_G); 2461 if (rx_mask & BIT(9)) 2462 lp->mmc_counters.rxundersize_g += 2463 dwceqos_read(lp, DWC_MMC_RXUNDERSIZE_G); 2464 if (rx_mask & BIT(8)) 2465 lp->mmc_counters.rxjabbererror += 2466 dwceqos_read(lp, DWC_MMC_RXJABBERERROR); 2467 if (rx_mask & BIT(7)) 2468 lp->mmc_counters.rxrunterror += 2469 dwceqos_read(lp, DWC_MMC_RXRUNTERROR); 2470 if (rx_mask & BIT(6)) 2471 lp->mmc_counters.rxalignmenterror += 2472 dwceqos_read(lp, DWC_MMC_RXALIGNMENTERROR); 2473 if (rx_mask & BIT(5)) 2474 lp->mmc_counters.rxcrcerror += 2475 dwceqos_read(lp, DWC_MMC_RXCRCERROR); 2476 if (rx_mask & BIT(4)) 2477 lp->mmc_counters.rxmulticastpackets_g += 2478 dwceqos_read(lp, DWC_MMC_RXMULTICASTPACKETS_G); 2479 if (rx_mask & BIT(3)) 2480 lp->mmc_counters.rxbroadcastpackets_g += 2481 dwceqos_read(lp, DWC_MMC_RXBROADCASTPACKETS_G); 2482 if (rx_mask & BIT(2)) 2483 lp->mmc_counters.rxoctetcount_g += 2484 dwceqos_read(lp, DWC_MMC_RXOCTETCOUNT_G); 2485 if (rx_mask & BIT(1)) 2486 lp->mmc_counters.rxoctetcount_gb += 2487 dwceqos_read(lp, DWC_MMC_RXOCTETCOUNT_GB); 2488 if (rx_mask & BIT(0)) 2489 lp->mmc_counters.rxpacketcount_gb += 2490 dwceqos_read(lp, DWC_MMC_RXPACKETCOUNT_GB); 2491} 2492 2493static struct rtnl_link_stats64* 2494dwceqos_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *s) 2495{ 2496 unsigned long flags; 2497 struct net_local *lp = netdev_priv(ndev); 2498 struct dwceqos_mmc_counters *hwstats = &lp->mmc_counters; 2499 2500 spin_lock_irqsave(&lp->stats_lock, flags); 2501 dwceqos_read_mmc_counters(lp, lp->mmc_rx_counters_mask, 2502 lp->mmc_tx_counters_mask); 2503 spin_unlock_irqrestore(&lp->stats_lock, flags); 2504 2505 s->rx_packets = hwstats->rxpacketcount_gb; 2506 s->rx_bytes = hwstats->rxoctetcount_gb; 2507 s->rx_errors = hwstats->rxpacketcount_gb - 2508 hwstats->rxbroadcastpackets_g - 2509 hwstats->rxmulticastpackets_g - 2510 hwstats->rxunicastpackets_g; 2511 s->multicast = hwstats->rxmulticastpackets_g; 2512 s->rx_length_errors = hwstats->rxlengtherror; 2513 s->rx_crc_errors = hwstats->rxcrcerror; 2514 s->rx_fifo_errors = hwstats->rxfifooverflow; 2515 2516 s->tx_packets = hwstats->txpacketcount_gb; 2517 s->tx_bytes = hwstats->txoctetcount_gb; 2518 2519 if (lp->mmc_tx_counters_mask & BIT(21)) 2520 s->tx_errors = hwstats->txpacketcount_gb - 2521 hwstats->txpacketcount_g; 2522 else 2523 s->tx_errors = hwstats->txunderflowerror + 2524 hwstats->txcarriererror; 2525 2526 return s; 2527} 2528 2529static void 2530dwceqos_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *ed) 2531{ 2532 const struct net_local *lp = netdev_priv(ndev); 2533 2534 strcpy(ed->driver, lp->pdev->dev.driver->name); 2535 strcpy(ed->version, DRIVER_VERSION); 2536} 2537 2538static void dwceqos_get_pauseparam(struct net_device *ndev, 2539 struct ethtool_pauseparam *pp) 2540{ 2541 const struct net_local *lp = netdev_priv(ndev); 2542 2543 pp->autoneg = lp->flowcontrol.autoneg; 2544 pp->tx_pause = lp->flowcontrol.tx; 2545 pp->rx_pause = lp->flowcontrol.rx; 2546} 2547 2548static int dwceqos_set_pauseparam(struct net_device *ndev, 2549 struct ethtool_pauseparam *pp) 2550{ 2551 struct net_local *lp = netdev_priv(ndev); 2552 int ret = 0; 2553 2554 lp->flowcontrol.autoneg = pp->autoneg; 2555 if (pp->autoneg) { 2556 ndev->phydev->advertising |= ADVERTISED_Pause; 2557 ndev->phydev->advertising |= ADVERTISED_Asym_Pause; 2558 } else { 2559 ndev->phydev->advertising &= ~ADVERTISED_Pause; 2560 ndev->phydev->advertising &= ~ADVERTISED_Asym_Pause; 2561 lp->flowcontrol.rx = pp->rx_pause; 2562 lp->flowcontrol.tx = pp->tx_pause; 2563 } 2564 2565 if (netif_running(ndev)) 2566 ret = phy_start_aneg(ndev->phydev); 2567 2568 return ret; 2569} 2570 2571static void dwceqos_get_strings(struct net_device *ndev, u32 stringset, 2572 u8 *data) 2573{ 2574 size_t i; 2575 2576 if (stringset != ETH_SS_STATS) 2577 return; 2578 2579 for (i = 0; i < ARRAY_SIZE(dwceqos_ethtool_stats); ++i) { 2580 memcpy(data, dwceqos_ethtool_stats[i].stat_name, 2581 ETH_GSTRING_LEN); 2582 data += ETH_GSTRING_LEN; 2583 } 2584} 2585 2586static void dwceqos_get_ethtool_stats(struct net_device *ndev, 2587 struct ethtool_stats *stats, u64 *data) 2588{ 2589 struct net_local *lp = netdev_priv(ndev); 2590 unsigned long flags; 2591 size_t i; 2592 u8 *mmcstat = (u8 *)&lp->mmc_counters; 2593 2594 spin_lock_irqsave(&lp->stats_lock, flags); 2595 dwceqos_read_mmc_counters(lp, lp->mmc_rx_counters_mask, 2596 lp->mmc_tx_counters_mask); 2597 spin_unlock_irqrestore(&lp->stats_lock, flags); 2598 2599 for (i = 0; i < ARRAY_SIZE(dwceqos_ethtool_stats); ++i) { 2600 memcpy(data, 2601 mmcstat + dwceqos_ethtool_stats[i].offset, 2602 sizeof(u64)); 2603 data++; 2604 } 2605} 2606 2607static int dwceqos_get_sset_count(struct net_device *ndev, int sset) 2608{ 2609 if (sset == ETH_SS_STATS) 2610 return ARRAY_SIZE(dwceqos_ethtool_stats); 2611 2612 return -EOPNOTSUPP; 2613} 2614 2615static void dwceqos_get_regs(struct net_device *dev, struct ethtool_regs *regs, 2616 void *space) 2617{ 2618 const struct net_local *lp = netdev_priv(dev); 2619 u32 *reg_space = (u32 *)space; 2620 int reg_offset; 2621 int reg_ix = 0; 2622 2623 /* MAC registers */ 2624 for (reg_offset = START_MAC_REG_OFFSET; 2625 reg_offset <= MAX_DMA_REG_OFFSET; reg_offset += 4) { 2626 reg_space[reg_ix] = dwceqos_read(lp, reg_offset); 2627 reg_ix++; 2628 } 2629 /* MTL registers */ 2630 for (reg_offset = START_MTL_REG_OFFSET; 2631 reg_offset <= MAX_MTL_REG_OFFSET; reg_offset += 4) { 2632 reg_space[reg_ix] = dwceqos_read(lp, reg_offset); 2633 reg_ix++; 2634 } 2635 2636 /* DMA registers */ 2637 for (reg_offset = START_DMA_REG_OFFSET; 2638 reg_offset <= MAX_DMA_REG_OFFSET; reg_offset += 4) { 2639 reg_space[reg_ix] = dwceqos_read(lp, reg_offset); 2640 reg_ix++; 2641 } 2642 2643 BUG_ON(4 * reg_ix > REG_SPACE_SIZE); 2644} 2645 2646static int dwceqos_get_regs_len(struct net_device *dev) 2647{ 2648 return REG_SPACE_SIZE; 2649} 2650 2651static inline const char *dwceqos_get_rx_lpi_state(u32 lpi_ctrl) 2652{ 2653 return (lpi_ctrl & DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIST) ? "on" : "off"; 2654} 2655 2656static inline const char *dwceqos_get_tx_lpi_state(u32 lpi_ctrl) 2657{ 2658 return (lpi_ctrl & DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIST) ? "on" : "off"; 2659} 2660 2661static int dwceqos_get_eee(struct net_device *ndev, struct ethtool_eee *edata) 2662{ 2663 struct net_local *lp = netdev_priv(ndev); 2664 u32 lpi_status; 2665 u32 lpi_enabled; 2666 2667 if (!(lp->feature0 & DWCEQOS_MAC_HW_FEATURE0_EEESEL)) 2668 return -EOPNOTSUPP; 2669 2670 edata->eee_active = lp->eee_active; 2671 edata->eee_enabled = lp->eee_enabled; 2672 edata->tx_lpi_timer = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_ENTRY_TIMER); 2673 lpi_status = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS); 2674 lpi_enabled = !!(lpi_status & DWCEQOS_MAC_LPI_CTRL_STATUS_LIPTXA); 2675 edata->tx_lpi_enabled = lpi_enabled; 2676 2677 if (netif_msg_hw(lp)) { 2678 u32 regval; 2679 2680 regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS); 2681 2682 netdev_info(lp->ndev, "MAC LPI State: RX:%s TX:%s\n", 2683 dwceqos_get_rx_lpi_state(regval), 2684 dwceqos_get_tx_lpi_state(regval)); 2685 } 2686 2687 return phy_ethtool_get_eee(ndev->phydev, edata); 2688} 2689 2690static int dwceqos_set_eee(struct net_device *ndev, struct ethtool_eee *edata) 2691{ 2692 struct net_local *lp = netdev_priv(ndev); 2693 u32 regval; 2694 unsigned long flags; 2695 2696 if (!(lp->feature0 & DWCEQOS_MAC_HW_FEATURE0_EEESEL)) 2697 return -EOPNOTSUPP; 2698 2699 if (edata->eee_enabled && !lp->eee_active) 2700 return -EOPNOTSUPP; 2701 2702 if (edata->tx_lpi_enabled) { 2703 if (edata->tx_lpi_timer < DWCEQOS_LPI_TIMER_MIN || 2704 edata->tx_lpi_timer > DWCEQOS_LPI_TIMER_MAX) 2705 return -EINVAL; 2706 } 2707 2708 lp->eee_enabled = edata->eee_enabled; 2709 2710 if (edata->eee_enabled && edata->tx_lpi_enabled) { 2711 dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_ENTRY_TIMER, 2712 edata->tx_lpi_timer); 2713 2714 spin_lock_irqsave(&lp->hw_lock, flags); 2715 regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS); 2716 regval |= DWCEQOS_LPI_CTRL_ENABLE_EEE; 2717 if (lp->en_tx_lpi_clockgating) 2718 regval |= DWCEQOS_MAC_LPI_CTRL_STATUS_LPITCSE; 2719 dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval); 2720 spin_unlock_irqrestore(&lp->hw_lock, flags); 2721 } else { 2722 spin_lock_irqsave(&lp->hw_lock, flags); 2723 regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS); 2724 regval &= ~DWCEQOS_LPI_CTRL_ENABLE_EEE; 2725 dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval); 2726 spin_unlock_irqrestore(&lp->hw_lock, flags); 2727 } 2728 2729 return phy_ethtool_set_eee(ndev->phydev, edata); 2730} 2731 2732static u32 dwceqos_get_msglevel(struct net_device *ndev) 2733{ 2734 const struct net_local *lp = netdev_priv(ndev); 2735 2736 return lp->msg_enable; 2737} 2738 2739static void dwceqos_set_msglevel(struct net_device *ndev, u32 msglevel) 2740{ 2741 struct net_local *lp = netdev_priv(ndev); 2742 2743 lp->msg_enable = msglevel; 2744} 2745 2746static const struct ethtool_ops dwceqos_ethtool_ops = { 2747 .get_drvinfo = dwceqos_get_drvinfo, 2748 .get_link = ethtool_op_get_link, 2749 .get_pauseparam = dwceqos_get_pauseparam, 2750 .set_pauseparam = dwceqos_set_pauseparam, 2751 .get_strings = dwceqos_get_strings, 2752 .get_ethtool_stats = dwceqos_get_ethtool_stats, 2753 .get_sset_count = dwceqos_get_sset_count, 2754 .get_regs = dwceqos_get_regs, 2755 .get_regs_len = dwceqos_get_regs_len, 2756 .get_eee = dwceqos_get_eee, 2757 .set_eee = dwceqos_set_eee, 2758 .get_msglevel = dwceqos_get_msglevel, 2759 .set_msglevel = dwceqos_set_msglevel, 2760 .get_link_ksettings = phy_ethtool_get_link_ksettings, 2761 .set_link_ksettings = phy_ethtool_set_link_ksettings, 2762}; 2763 2764static const struct net_device_ops netdev_ops = { 2765 .ndo_open = dwceqos_open, 2766 .ndo_stop = dwceqos_stop, 2767 .ndo_start_xmit = dwceqos_start_xmit, 2768 .ndo_set_rx_mode = dwceqos_set_rx_mode, 2769 .ndo_set_mac_address = dwceqos_set_mac_address, 2770#ifdef CONFIG_NET_POLL_CONTROLLER 2771 .ndo_poll_controller = dwceqos_poll_controller, 2772#endif 2773 .ndo_do_ioctl = dwceqos_ioctl, 2774 .ndo_tx_timeout = dwceqos_tx_timeout, 2775 .ndo_get_stats64 = dwceqos_get_stats64, 2776}; 2777 2778static const struct of_device_id dwceq_of_match[] = { 2779 { .compatible = "snps,dwc-qos-ethernet-4.10", }, 2780 {} 2781}; 2782MODULE_DEVICE_TABLE(of, dwceq_of_match); 2783 2784static int dwceqos_probe(struct platform_device *pdev) 2785{ 2786 struct resource *r_mem = NULL; 2787 struct net_device *ndev; 2788 struct net_local *lp; 2789 int ret = -ENXIO; 2790 2791 r_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2792 if (!r_mem) { 2793 dev_err(&pdev->dev, "no IO resource defined.\n"); 2794 return -ENXIO; 2795 } 2796 2797 ndev = alloc_etherdev(sizeof(*lp)); 2798 if (!ndev) { 2799 dev_err(&pdev->dev, "etherdev allocation failed.\n"); 2800 return -ENOMEM; 2801 } 2802 2803 SET_NETDEV_DEV(ndev, &pdev->dev); 2804 2805 lp = netdev_priv(ndev); 2806 lp->ndev = ndev; 2807 lp->pdev = pdev; 2808 lp->msg_enable = netif_msg_init(debug, DWCEQOS_MSG_DEFAULT); 2809 2810 spin_lock_init(&lp->tx_lock); 2811 spin_lock_init(&lp->hw_lock); 2812 spin_lock_init(&lp->stats_lock); 2813 2814 lp->apb_pclk = devm_clk_get(&pdev->dev, "apb_pclk"); 2815 if (IS_ERR(lp->apb_pclk)) { 2816 dev_err(&pdev->dev, "apb_pclk clock not found.\n"); 2817 ret = PTR_ERR(lp->apb_pclk); 2818 goto err_out_free_netdev; 2819 } 2820 2821 ret = clk_prepare_enable(lp->apb_pclk); 2822 if (ret) { 2823 dev_err(&pdev->dev, "Unable to enable APER clock.\n"); 2824 goto err_out_free_netdev; 2825 } 2826 2827 lp->baseaddr = devm_ioremap_resource(&pdev->dev, r_mem); 2828 if (IS_ERR(lp->baseaddr)) { 2829 dev_err(&pdev->dev, "failed to map baseaddress.\n"); 2830 ret = PTR_ERR(lp->baseaddr); 2831 goto err_out_clk_dis_aper; 2832 } 2833 2834 ndev->irq = platform_get_irq(pdev, 0); 2835 ndev->watchdog_timeo = DWCEQOS_TX_TIMEOUT * HZ; 2836 ndev->netdev_ops = &netdev_ops; 2837 ndev->ethtool_ops = &dwceqos_ethtool_ops; 2838 ndev->base_addr = r_mem->start; 2839 2840 dwceqos_get_hwfeatures(lp); 2841 dwceqos_mdio_set_csr(lp); 2842 2843 ndev->hw_features = NETIF_F_SG; 2844 2845 if (lp->feature1 & DWCEQOS_MAC_HW_FEATURE1_TSOEN) 2846 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; 2847 2848 if (lp->feature0 & DWCEQOS_MAC_HW_FEATURE0_TXCOESEL) 2849 ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 2850 2851 if (lp->feature0 & DWCEQOS_MAC_HW_FEATURE0_RXCOESEL) 2852 ndev->hw_features |= NETIF_F_RXCSUM; 2853 2854 ndev->features = ndev->hw_features; 2855 2856 lp->phy_ref_clk = devm_clk_get(&pdev->dev, "phy_ref_clk"); 2857 if (IS_ERR(lp->phy_ref_clk)) { 2858 dev_err(&pdev->dev, "phy_ref_clk clock not found.\n"); 2859 ret = PTR_ERR(lp->phy_ref_clk); 2860 goto err_out_clk_dis_aper; 2861 } 2862 2863 ret = clk_prepare_enable(lp->phy_ref_clk); 2864 if (ret) { 2865 dev_err(&pdev->dev, "Unable to enable device clock.\n"); 2866 goto err_out_clk_dis_aper; 2867 } 2868 2869 lp->phy_node = of_parse_phandle(lp->pdev->dev.of_node, 2870 "phy-handle", 0); 2871 if (!lp->phy_node && of_phy_is_fixed_link(lp->pdev->dev.of_node)) { 2872 ret = of_phy_register_fixed_link(lp->pdev->dev.of_node); 2873 if (ret < 0) { 2874 dev_err(&pdev->dev, "invalid fixed-link"); 2875 goto err_out_clk_dis_phy; 2876 } 2877 2878 lp->phy_node = of_node_get(lp->pdev->dev.of_node); 2879 } 2880 2881 ret = of_get_phy_mode(lp->pdev->dev.of_node); 2882 if (ret < 0) { 2883 dev_err(&lp->pdev->dev, "error in getting phy i/f\n"); 2884 goto err_out_deregister_fixed_link; 2885 } 2886 2887 lp->phy_interface = ret; 2888 2889 ret = dwceqos_mii_init(lp); 2890 if (ret) { 2891 dev_err(&lp->pdev->dev, "error in dwceqos_mii_init\n"); 2892 goto err_out_deregister_fixed_link; 2893 } 2894 2895 ret = dwceqos_mii_probe(ndev); 2896 if (ret != 0) { 2897 netdev_err(ndev, "mii_probe fail.\n"); 2898 ret = -ENXIO; 2899 goto err_out_deregister_fixed_link; 2900 } 2901 2902 dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0); 2903 2904 tasklet_init(&lp->tx_bdreclaim_tasklet, dwceqos_tx_reclaim, 2905 (unsigned long)ndev); 2906 tasklet_disable(&lp->tx_bdreclaim_tasklet); 2907 2908 lp->txtimeout_handler_wq = alloc_workqueue(DRIVER_NAME, 2909 WQ_MEM_RECLAIM, 0); 2910 INIT_WORK(&lp->txtimeout_reinit, dwceqos_reinit_for_txtimeout); 2911 2912 platform_set_drvdata(pdev, ndev); 2913 ret = dwceqos_probe_config_dt(pdev); 2914 if (ret) { 2915 dev_err(&lp->pdev->dev, "Unable to retrieve DT, error %d\n", 2916 ret); 2917 goto err_out_deregister_fixed_link; 2918 } 2919 dev_info(&lp->pdev->dev, "pdev->id %d, baseaddr 0x%08lx, irq %d\n", 2920 pdev->id, ndev->base_addr, ndev->irq); 2921 2922 ret = devm_request_irq(&pdev->dev, ndev->irq, &dwceqos_interrupt, 0, 2923 ndev->name, ndev); 2924 if (ret) { 2925 dev_err(&lp->pdev->dev, "Unable to request IRQ %d, error %d\n", 2926 ndev->irq, ret); 2927 goto err_out_deregister_fixed_link; 2928 } 2929 2930 if (netif_msg_probe(lp)) 2931 netdev_dbg(ndev, "net_local@%p\n", lp); 2932 2933 netif_napi_add(ndev, &lp->napi, dwceqos_rx_poll, NAPI_POLL_WEIGHT); 2934 2935 ret = register_netdev(ndev); 2936 if (ret) { 2937 dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); 2938 goto err_out_deregister_fixed_link; 2939 } 2940 2941 return 0; 2942 2943err_out_deregister_fixed_link: 2944 if (of_phy_is_fixed_link(pdev->dev.of_node)) 2945 of_phy_deregister_fixed_link(pdev->dev.of_node); 2946err_out_clk_dis_phy: 2947 clk_disable_unprepare(lp->phy_ref_clk); 2948err_out_clk_dis_aper: 2949 clk_disable_unprepare(lp->apb_pclk); 2950err_out_free_netdev: 2951 of_node_put(lp->phy_node); 2952 free_netdev(ndev); 2953 platform_set_drvdata(pdev, NULL); 2954 return ret; 2955} 2956 2957static int dwceqos_remove(struct platform_device *pdev) 2958{ 2959 struct net_device *ndev = platform_get_drvdata(pdev); 2960 struct net_local *lp; 2961 2962 if (ndev) { 2963 lp = netdev_priv(ndev); 2964 2965 if (ndev->phydev) { 2966 phy_disconnect(ndev->phydev); 2967 if (of_phy_is_fixed_link(pdev->dev.of_node)) 2968 of_phy_deregister_fixed_link(pdev->dev.of_node); 2969 } 2970 mdiobus_unregister(lp->mii_bus); 2971 mdiobus_free(lp->mii_bus); 2972 2973 unregister_netdev(ndev); 2974 2975 clk_disable_unprepare(lp->phy_ref_clk); 2976 clk_disable_unprepare(lp->apb_pclk); 2977 2978 free_netdev(ndev); 2979 } 2980 2981 return 0; 2982} 2983 2984static struct platform_driver dwceqos_driver = { 2985 .probe = dwceqos_probe, 2986 .remove = dwceqos_remove, 2987 .driver = { 2988 .name = DRIVER_NAME, 2989 .of_match_table = dwceq_of_match, 2990 }, 2991}; 2992 2993module_platform_driver(dwceqos_driver); 2994 2995MODULE_DESCRIPTION("DWC Ethernet QoS v4.10a driver"); 2996MODULE_LICENSE("GPL v2"); 2997MODULE_AUTHOR("Andreas Irestaal <andreas.irestal@axis.com>"); 2998MODULE_AUTHOR("Lars Persson <lars.persson@axis.com>");