Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drivers/net/ethernet/xilinx: added Xilinx AXI Ethernet driver

This driver adds support for Xilinx 10/100/1000 AXI Ethernet.

It can be used, for instance, on Xilinx boards with a Microblaze
architecture like the ML605.

The patch is against the latest net-next tree and checkpatch clean.

Signed-off-by: Ariane Keller <ariane.keller@tik.ee.ethz.ch>
Signed-off-by: Daniel Borkmann <daniel.borkmann@tik.ee.ethz.ch>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

danborkmann@iogearbox.net and committed by
David S. Miller
8a3b7a25 a44acd55

+2444
+6
MAINTAINERS
··· 7469 7469 F: Documentation/filesystems/xfs.txt 7470 7470 F: fs/xfs/ 7471 7471 7472 + XILINX AXI ETHERNET DRIVER 7473 + M: Ariane Keller <ariane.keller@tik.ee.ethz.ch> 7474 + M: Daniel Borkmann <daniel.borkmann@tik.ee.ethz.ch> 7475 + S: Maintained 7476 + F: drivers/net/ethernet/xilinx/xilinx_axienet* 7477 + 7472 7478 XILINX SYSTEMACE DRIVER 7473 7479 M: Grant Likely <grant.likely@secretlab.ca> 7474 7480 W: http://www.secretlab.ca/
+8
drivers/net/ethernet/xilinx/Kconfig
··· 25 25 ---help--- 26 26 This driver supports the 10/100 Ethernet Lite from Xilinx. 27 27 28 + config XILINX_AXI_EMAC 29 + tristate "Xilinx 10/100/1000 AXI Ethernet support" 30 + depends on (PPC32 || MICROBLAZE) 31 + select PHYLIB 32 + ---help--- 33 + This driver supports the 10/100/1000 Ethernet from Xilinx for the 34 + AXI bus interface used in Xilinx Virtex FPGAs. 35 + 28 36 config XILINX_LL_TEMAC 29 37 tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver" 30 38 depends on (PPC || MICROBLAZE)
+2
drivers/net/ethernet/xilinx/Makefile
··· 5 5 ll_temac-objs := ll_temac_main.o ll_temac_mdio.o 6 6 obj-$(CONFIG_XILINX_LL_TEMAC) += ll_temac.o 7 7 obj-$(CONFIG_XILINX_EMACLITE) += xilinx_emaclite.o 8 + xilinx_emac-objs := xilinx_axienet_main.o xilinx_axienet_mdio.o 9 + obj-$(CONFIG_XILINX_AXI_EMAC) += xilinx_emac.o
+508
drivers/net/ethernet/xilinx/xilinx_axienet.h
··· 1 + /* 2 + * Definitions for Xilinx Axi Ethernet device driver. 3 + * 4 + * Copyright (c) 2009 Secret Lab Technologies, Ltd. 5 + * Copyright (c) 2010 Xilinx, Inc. All rights reserved. 6 + * Copyright (c) 2012 Daniel Borkmann, <daniel.borkmann@tik.ee.ethz.ch> 7 + * Copyright (c) 2012 Ariane Keller, <ariane.keller@tik.ee.ethz.ch> 8 + */ 9 + 10 + #ifndef XILINX_AXIENET_H 11 + #define XILINX_AXIENET_H 12 + 13 + #include <linux/netdevice.h> 14 + #include <linux/spinlock.h> 15 + #include <linux/interrupt.h> 16 + 17 + /* Packet size info */ 18 + #define XAE_HDR_SIZE 14 /* Size of Ethernet header */ 19 + #define XAE_HDR_VLAN_SIZE 18 /* Size of an Ethernet hdr + VLAN */ 20 + #define XAE_TRL_SIZE 4 /* Size of Ethernet trailer (FCS) */ 21 + #define XAE_MTU 1500 /* Max MTU of an Ethernet frame */ 22 + #define XAE_JUMBO_MTU 9000 /* Max MTU of a jumbo Eth. frame */ 23 + 24 + #define XAE_MAX_FRAME_SIZE (XAE_MTU + XAE_HDR_SIZE + XAE_TRL_SIZE) 25 + #define XAE_MAX_VLAN_FRAME_SIZE (XAE_MTU + XAE_HDR_VLAN_SIZE + XAE_TRL_SIZE) 26 + #define XAE_MAX_JUMBO_FRAME_SIZE (XAE_JUMBO_MTU + XAE_HDR_SIZE + XAE_TRL_SIZE) 27 + 28 + /* Configuration options */ 29 + 30 + /* Accept all incoming packets. Default: disabled (cleared) */ 31 + #define XAE_OPTION_PROMISC (1 << 0) 32 + 33 + /* Jumbo frame support for Tx & Rx. Default: disabled (cleared) */ 34 + #define XAE_OPTION_JUMBO (1 << 1) 35 + 36 + /* VLAN Rx & Tx frame support. Default: disabled (cleared) */ 37 + #define XAE_OPTION_VLAN (1 << 2) 38 + 39 + /* Enable recognition of flow control frames on Rx. Default: enabled (set) */ 40 + #define XAE_OPTION_FLOW_CONTROL (1 << 4) 41 + 42 + /* Strip FCS and PAD from incoming frames. Note: PAD from VLAN frames is not 43 + * stripped. Default: disabled (set) */ 44 + #define XAE_OPTION_FCS_STRIP (1 << 5) 45 + 46 + /* Generate FCS field and add PAD automatically for outgoing frames. 47 + * Default: enabled (set) */ 48 + #define XAE_OPTION_FCS_INSERT (1 << 6) 49 + 50 + /* Enable Length/Type error checking for incoming frames. When this option is 51 + * set, the MAC will filter frames that have a mismatched type/length field 52 + * and if XAE_OPTION_REPORT_RXERR is set, the user is notified when these 53 + * types of frames are encountered. When this option is cleared, the MAC will 54 + * allow these types of frames to be received. Default: enabled (set) */ 55 + #define XAE_OPTION_LENTYPE_ERR (1 << 7) 56 + 57 + /* Enable the transmitter. Default: enabled (set) */ 58 + #define XAE_OPTION_TXEN (1 << 11) 59 + 60 + /* Enable the receiver. Default: enabled (set) */ 61 + #define XAE_OPTION_RXEN (1 << 12) 62 + 63 + /* Default options set when device is initialized or reset */ 64 + #define XAE_OPTION_DEFAULTS \ 65 + (XAE_OPTION_TXEN | \ 66 + XAE_OPTION_FLOW_CONTROL | \ 67 + XAE_OPTION_RXEN) 68 + 69 + /* Axi DMA Register definitions */ 70 + 71 + #define XAXIDMA_TX_CR_OFFSET 0x00000000 /* Channel control */ 72 + #define XAXIDMA_TX_SR_OFFSET 0x00000004 /* Status */ 73 + #define XAXIDMA_TX_CDESC_OFFSET 0x00000008 /* Current descriptor pointer */ 74 + #define XAXIDMA_TX_TDESC_OFFSET 0x00000010 /* Tail descriptor pointer */ 75 + 76 + #define XAXIDMA_RX_CR_OFFSET 0x00000030 /* Channel control */ 77 + #define XAXIDMA_RX_SR_OFFSET 0x00000034 /* Status */ 78 + #define XAXIDMA_RX_CDESC_OFFSET 0x00000038 /* Current descriptor pointer */ 79 + #define XAXIDMA_RX_TDESC_OFFSET 0x00000040 /* Tail descriptor pointer */ 80 + 81 + #define XAXIDMA_CR_RUNSTOP_MASK 0x00000001 /* Start/stop DMA channel */ 82 + #define XAXIDMA_CR_RESET_MASK 0x00000004 /* Reset DMA engine */ 83 + 84 + #define XAXIDMA_BD_NDESC_OFFSET 0x00 /* Next descriptor pointer */ 85 + #define XAXIDMA_BD_BUFA_OFFSET 0x08 /* Buffer address */ 86 + #define XAXIDMA_BD_CTRL_LEN_OFFSET 0x18 /* Control/buffer length */ 87 + #define XAXIDMA_BD_STS_OFFSET 0x1C /* Status */ 88 + #define XAXIDMA_BD_USR0_OFFSET 0x20 /* User IP specific word0 */ 89 + #define XAXIDMA_BD_USR1_OFFSET 0x24 /* User IP specific word1 */ 90 + #define XAXIDMA_BD_USR2_OFFSET 0x28 /* User IP specific word2 */ 91 + #define XAXIDMA_BD_USR3_OFFSET 0x2C /* User IP specific word3 */ 92 + #define XAXIDMA_BD_USR4_OFFSET 0x30 /* User IP specific word4 */ 93 + #define XAXIDMA_BD_ID_OFFSET 0x34 /* Sw ID */ 94 + #define XAXIDMA_BD_HAS_STSCNTRL_OFFSET 0x38 /* Whether has stscntrl strm */ 95 + #define XAXIDMA_BD_HAS_DRE_OFFSET 0x3C /* Whether has DRE */ 96 + 97 + #define XAXIDMA_BD_HAS_DRE_SHIFT 8 /* Whether has DRE shift */ 98 + #define XAXIDMA_BD_HAS_DRE_MASK 0xF00 /* Whether has DRE mask */ 99 + #define XAXIDMA_BD_WORDLEN_MASK 0xFF /* Whether has DRE mask */ 100 + 101 + #define XAXIDMA_BD_CTRL_LENGTH_MASK 0x007FFFFF /* Requested len */ 102 + #define XAXIDMA_BD_CTRL_TXSOF_MASK 0x08000000 /* First tx packet */ 103 + #define XAXIDMA_BD_CTRL_TXEOF_MASK 0x04000000 /* Last tx packet */ 104 + #define XAXIDMA_BD_CTRL_ALL_MASK 0x0C000000 /* All control bits */ 105 + 106 + #define XAXIDMA_DELAY_MASK 0xFF000000 /* Delay timeout counter */ 107 + #define XAXIDMA_COALESCE_MASK 0x00FF0000 /* Coalesce counter */ 108 + 109 + #define XAXIDMA_DELAY_SHIFT 24 110 + #define XAXIDMA_COALESCE_SHIFT 16 111 + 112 + #define XAXIDMA_IRQ_IOC_MASK 0x00001000 /* Completion intr */ 113 + #define XAXIDMA_IRQ_DELAY_MASK 0x00002000 /* Delay interrupt */ 114 + #define XAXIDMA_IRQ_ERROR_MASK 0x00004000 /* Error interrupt */ 115 + #define XAXIDMA_IRQ_ALL_MASK 0x00007000 /* All interrupts */ 116 + 117 + /* Default TX/RX Threshold and waitbound values for SGDMA mode */ 118 + #define XAXIDMA_DFT_TX_THRESHOLD 24 119 + #define XAXIDMA_DFT_TX_WAITBOUND 254 120 + #define XAXIDMA_DFT_RX_THRESHOLD 24 121 + #define XAXIDMA_DFT_RX_WAITBOUND 254 122 + 123 + #define XAXIDMA_BD_CTRL_TXSOF_MASK 0x08000000 /* First tx packet */ 124 + #define XAXIDMA_BD_CTRL_TXEOF_MASK 0x04000000 /* Last tx packet */ 125 + #define XAXIDMA_BD_CTRL_ALL_MASK 0x0C000000 /* All control bits */ 126 + 127 + #define XAXIDMA_BD_STS_ACTUAL_LEN_MASK 0x007FFFFF /* Actual len */ 128 + #define XAXIDMA_BD_STS_COMPLETE_MASK 0x80000000 /* Completed */ 129 + #define XAXIDMA_BD_STS_DEC_ERR_MASK 0x40000000 /* Decode error */ 130 + #define XAXIDMA_BD_STS_SLV_ERR_MASK 0x20000000 /* Slave error */ 131 + #define XAXIDMA_BD_STS_INT_ERR_MASK 0x10000000 /* Internal err */ 132 + #define XAXIDMA_BD_STS_ALL_ERR_MASK 0x70000000 /* All errors */ 133 + #define XAXIDMA_BD_STS_RXSOF_MASK 0x08000000 /* First rx pkt */ 134 + #define XAXIDMA_BD_STS_RXEOF_MASK 0x04000000 /* Last rx pkt */ 135 + #define XAXIDMA_BD_STS_ALL_MASK 0xFC000000 /* All status bits */ 136 + 137 + #define XAXIDMA_BD_MINIMUM_ALIGNMENT 0x40 138 + 139 + /* Axi Ethernet registers definition */ 140 + #define XAE_RAF_OFFSET 0x00000000 /* Reset and Address filter */ 141 + #define XAE_TPF_OFFSET 0x00000004 /* Tx Pause Frame */ 142 + #define XAE_IFGP_OFFSET 0x00000008 /* Tx Inter-frame gap adjustment*/ 143 + #define XAE_IS_OFFSET 0x0000000C /* Interrupt status */ 144 + #define XAE_IP_OFFSET 0x00000010 /* Interrupt pending */ 145 + #define XAE_IE_OFFSET 0x00000014 /* Interrupt enable */ 146 + #define XAE_TTAG_OFFSET 0x00000018 /* Tx VLAN TAG */ 147 + #define XAE_RTAG_OFFSET 0x0000001C /* Rx VLAN TAG */ 148 + #define XAE_UAWL_OFFSET 0x00000020 /* Unicast address word lower */ 149 + #define XAE_UAWU_OFFSET 0x00000024 /* Unicast address word upper */ 150 + #define XAE_TPID0_OFFSET 0x00000028 /* VLAN TPID0 register */ 151 + #define XAE_TPID1_OFFSET 0x0000002C /* VLAN TPID1 register */ 152 + #define XAE_PPST_OFFSET 0x00000030 /* PCS PMA Soft Temac Status Reg */ 153 + #define XAE_RCW0_OFFSET 0x00000400 /* Rx Configuration Word 0 */ 154 + #define XAE_RCW1_OFFSET 0x00000404 /* Rx Configuration Word 1 */ 155 + #define XAE_TC_OFFSET 0x00000408 /* Tx Configuration */ 156 + #define XAE_FCC_OFFSET 0x0000040C /* Flow Control Configuration */ 157 + #define XAE_EMMC_OFFSET 0x00000410 /* EMAC mode configuration */ 158 + #define XAE_PHYC_OFFSET 0x00000414 /* RGMII/SGMII configuration */ 159 + #define XAE_MDIO_MC_OFFSET 0x00000500 /* MII Management Config */ 160 + #define XAE_MDIO_MCR_OFFSET 0x00000504 /* MII Management Control */ 161 + #define XAE_MDIO_MWD_OFFSET 0x00000508 /* MII Management Write Data */ 162 + #define XAE_MDIO_MRD_OFFSET 0x0000050C /* MII Management Read Data */ 163 + #define XAE_MDIO_MIS_OFFSET 0x00000600 /* MII Management Interrupt Status */ 164 + #define XAE_MDIO_MIP_OFFSET 0x00000620 /* MII Mgmt Interrupt Pending 165 + * register offset */ 166 + #define XAE_MDIO_MIE_OFFSET 0x00000640 /* MII Management Interrupt Enable 167 + * register offset */ 168 + #define XAE_MDIO_MIC_OFFSET 0x00000660 /* MII Management Interrupt Clear 169 + * register offset. */ 170 + #define XAE_UAW0_OFFSET 0x00000700 /* Unicast address word 0 */ 171 + #define XAE_UAW1_OFFSET 0x00000704 /* Unicast address word 1 */ 172 + #define XAE_FMI_OFFSET 0x00000708 /* Filter Mask Index */ 173 + #define XAE_AF0_OFFSET 0x00000710 /* Address Filter 0 */ 174 + #define XAE_AF1_OFFSET 0x00000714 /* Address Filter 1 */ 175 + 176 + #define XAE_TX_VLAN_DATA_OFFSET 0x00004000 /* TX VLAN data table address */ 177 + #define XAE_RX_VLAN_DATA_OFFSET 0x00008000 /* RX VLAN data table address */ 178 + #define XAE_MCAST_TABLE_OFFSET 0x00020000 /* Multicast table address */ 179 + 180 + /* Bit Masks for Axi Ethernet RAF register */ 181 + #define XAE_RAF_MCSTREJ_MASK 0x00000002 /* Reject receive multicast 182 + * destination address */ 183 + #define XAE_RAF_BCSTREJ_MASK 0x00000004 /* Reject receive broadcast 184 + * destination address */ 185 + #define XAE_RAF_TXVTAGMODE_MASK 0x00000018 /* Tx VLAN TAG mode */ 186 + #define XAE_RAF_RXVTAGMODE_MASK 0x00000060 /* Rx VLAN TAG mode */ 187 + #define XAE_RAF_TXVSTRPMODE_MASK 0x00000180 /* Tx VLAN STRIP mode */ 188 + #define XAE_RAF_RXVSTRPMODE_MASK 0x00000600 /* Rx VLAN STRIP mode */ 189 + #define XAE_RAF_NEWFNCENBL_MASK 0x00000800 /* New function mode */ 190 + #define XAE_RAF_EMULTIFLTRENBL_MASK 0x00001000 /* Exteneded Multicast 191 + * Filtering mode 192 + */ 193 + #define XAE_RAF_STATSRST_MASK 0x00002000 /* Stats. Counter Reset */ 194 + #define XAE_RAF_RXBADFRMEN_MASK 0x00004000 /* Recv Bad Frame Enable */ 195 + #define XAE_RAF_TXVTAGMODE_SHIFT 3 /* Tx Tag mode shift bits */ 196 + #define XAE_RAF_RXVTAGMODE_SHIFT 5 /* Rx Tag mode shift bits */ 197 + #define XAE_RAF_TXVSTRPMODE_SHIFT 7 /* Tx strip mode shift bits*/ 198 + #define XAE_RAF_RXVSTRPMODE_SHIFT 9 /* Rx Strip mode shift bits*/ 199 + 200 + /* Bit Masks for Axi Ethernet TPF and IFGP registers */ 201 + #define XAE_TPF_TPFV_MASK 0x0000FFFF /* Tx pause frame value */ 202 + #define XAE_IFGP0_IFGP_MASK 0x0000007F /* Transmit inter-frame 203 + * gap adjustment value */ 204 + 205 + /* Bit Masks for Axi Ethernet IS, IE and IP registers, Same masks apply 206 + * for all 3 registers. */ 207 + #define XAE_INT_HARDACSCMPLT_MASK 0x00000001 /* Hard register access 208 + * complete */ 209 + #define XAE_INT_AUTONEG_MASK 0x00000002 /* Auto negotiation 210 + * complete */ 211 + #define XAE_INT_RXCMPIT_MASK 0x00000004 /* Rx complete */ 212 + #define XAE_INT_RXRJECT_MASK 0x00000008 /* Rx frame rejected */ 213 + #define XAE_INT_RXFIFOOVR_MASK 0x00000010 /* Rx fifo overrun */ 214 + #define XAE_INT_TXCMPIT_MASK 0x00000020 /* Tx complete */ 215 + #define XAE_INT_RXDCMLOCK_MASK 0x00000040 /* Rx Dcm Lock */ 216 + #define XAE_INT_MGTRDY_MASK 0x00000080 /* MGT clock Lock */ 217 + #define XAE_INT_PHYRSTCMPLT_MASK 0x00000100 /* Phy Reset complete */ 218 + #define XAE_INT_ALL_MASK 0x0000003F /* All the ints */ 219 + 220 + #define XAE_INT_RECV_ERROR_MASK \ 221 + (XAE_INT_RXRJECT_MASK | XAE_INT_RXFIFOOVR_MASK) /* INT bits that 222 + * indicate receive 223 + * errors */ 224 + 225 + /* Bit masks for Axi Ethernet VLAN TPID Word 0 register */ 226 + #define XAE_TPID_0_MASK 0x0000FFFF /* TPID 0 */ 227 + #define XAE_TPID_1_MASK 0xFFFF0000 /* TPID 1 */ 228 + 229 + /* Bit masks for Axi Ethernet VLAN TPID Word 1 register */ 230 + #define XAE_TPID_2_MASK 0x0000FFFF /* TPID 0 */ 231 + #define XAE_TPID_3_MASK 0xFFFF0000 /* TPID 1 */ 232 + 233 + /* Bit masks for Axi Ethernet RCW1 register */ 234 + #define XAE_RCW1_RST_MASK 0x80000000 /* Reset */ 235 + #define XAE_RCW1_JUM_MASK 0x40000000 /* Jumbo frame enable */ 236 + #define XAE_RCW1_FCS_MASK 0x20000000 /* In-Band FCS enable 237 + * (FCS not stripped) */ 238 + #define XAE_RCW1_RX_MASK 0x10000000 /* Receiver enable */ 239 + #define XAE_RCW1_VLAN_MASK 0x08000000 /* VLAN frame enable */ 240 + #define XAE_RCW1_LT_DIS_MASK 0x02000000 /* Length/type field valid check 241 + * disable */ 242 + #define XAE_RCW1_CL_DIS_MASK 0x01000000 /* Control frame Length check 243 + * disable */ 244 + #define XAE_RCW1_PAUSEADDR_MASK 0x0000FFFF /* Pause frame source address 245 + * bits [47:32]. Bits [31:0] are 246 + * stored in register RCW0 */ 247 + 248 + /* Bit masks for Axi Ethernet TC register */ 249 + #define XAE_TC_RST_MASK 0x80000000 /* Reset */ 250 + #define XAE_TC_JUM_MASK 0x40000000 /* Jumbo frame enable */ 251 + #define XAE_TC_FCS_MASK 0x20000000 /* In-Band FCS enable 252 + * (FCS not generated) */ 253 + #define XAE_TC_TX_MASK 0x10000000 /* Transmitter enable */ 254 + #define XAE_TC_VLAN_MASK 0x08000000 /* VLAN frame enable */ 255 + #define XAE_TC_IFG_MASK 0x02000000 /* Inter-frame gap adjustment 256 + * enable */ 257 + 258 + /* Bit masks for Axi Ethernet FCC register */ 259 + #define XAE_FCC_FCRX_MASK 0x20000000 /* Rx flow control enable */ 260 + #define XAE_FCC_FCTX_MASK 0x40000000 /* Tx flow control enable */ 261 + 262 + /* Bit masks for Axi Ethernet EMMC register */ 263 + #define XAE_EMMC_LINKSPEED_MASK 0xC0000000 /* Link speed */ 264 + #define XAE_EMMC_RGMII_MASK 0x20000000 /* RGMII mode enable */ 265 + #define XAE_EMMC_SGMII_MASK 0x10000000 /* SGMII mode enable */ 266 + #define XAE_EMMC_GPCS_MASK 0x08000000 /* 1000BaseX mode enable */ 267 + #define XAE_EMMC_HOST_MASK 0x04000000 /* Host interface enable */ 268 + #define XAE_EMMC_TX16BIT 0x02000000 /* 16 bit Tx client enable */ 269 + #define XAE_EMMC_RX16BIT 0x01000000 /* 16 bit Rx client enable */ 270 + #define XAE_EMMC_LINKSPD_10 0x00000000 /* Link Speed mask for 10 Mbit */ 271 + #define XAE_EMMC_LINKSPD_100 0x40000000 /* Link Speed mask for 100 Mbit */ 272 + #define XAE_EMMC_LINKSPD_1000 0x80000000 /* Link Speed mask for 1000 Mbit */ 273 + 274 + /* Bit masks for Axi Ethernet PHYC register */ 275 + #define XAE_PHYC_SGMIILINKSPEED_MASK 0xC0000000 /* SGMII link speed mask*/ 276 + #define XAE_PHYC_RGMIILINKSPEED_MASK 0x0000000C /* RGMII link speed */ 277 + #define XAE_PHYC_RGMIIHD_MASK 0x00000002 /* RGMII Half-duplex */ 278 + #define XAE_PHYC_RGMIILINK_MASK 0x00000001 /* RGMII link status */ 279 + #define XAE_PHYC_RGLINKSPD_10 0x00000000 /* RGMII link 10 Mbit */ 280 + #define XAE_PHYC_RGLINKSPD_100 0x00000004 /* RGMII link 100 Mbit */ 281 + #define XAE_PHYC_RGLINKSPD_1000 0x00000008 /* RGMII link 1000 Mbit */ 282 + #define XAE_PHYC_SGLINKSPD_10 0x00000000 /* SGMII link 10 Mbit */ 283 + #define XAE_PHYC_SGLINKSPD_100 0x40000000 /* SGMII link 100 Mbit */ 284 + #define XAE_PHYC_SGLINKSPD_1000 0x80000000 /* SGMII link 1000 Mbit */ 285 + 286 + /* Bit masks for Axi Ethernet MDIO interface MC register */ 287 + #define XAE_MDIO_MC_MDIOEN_MASK 0x00000040 /* MII management enable */ 288 + #define XAE_MDIO_MC_CLOCK_DIVIDE_MAX 0x3F /* Maximum MDIO divisor */ 289 + 290 + /* Bit masks for Axi Ethernet MDIO interface MCR register */ 291 + #define XAE_MDIO_MCR_PHYAD_MASK 0x1F000000 /* Phy Address Mask */ 292 + #define XAE_MDIO_MCR_PHYAD_SHIFT 24 /* Phy Address Shift */ 293 + #define XAE_MDIO_MCR_REGAD_MASK 0x001F0000 /* Reg Address Mask */ 294 + #define XAE_MDIO_MCR_REGAD_SHIFT 16 /* Reg Address Shift */ 295 + #define XAE_MDIO_MCR_OP_MASK 0x0000C000 /* Operation Code Mask */ 296 + #define XAE_MDIO_MCR_OP_SHIFT 13 /* Operation Code Shift */ 297 + #define XAE_MDIO_MCR_OP_READ_MASK 0x00008000 /* Op Code Read Mask */ 298 + #define XAE_MDIO_MCR_OP_WRITE_MASK 0x00004000 /* Op Code Write Mask */ 299 + #define XAE_MDIO_MCR_INITIATE_MASK 0x00000800 /* Ready Mask */ 300 + #define XAE_MDIO_MCR_READY_MASK 0x00000080 /* Ready Mask */ 301 + 302 + /* Bit masks for Axi Ethernet MDIO interface MIS, MIP, MIE, MIC registers */ 303 + #define XAE_MDIO_INT_MIIM_RDY_MASK 0x00000001 /* MIIM Interrupt */ 304 + 305 + /* Bit masks for Axi Ethernet UAW1 register */ 306 + #define XAE_UAW1_UNICASTADDR_MASK 0x0000FFFF /* Station address bits 307 + * [47:32]; Station address 308 + * bits [31:0] are stored in 309 + * register UAW0 */ 310 + 311 + /* Bit masks for Axi Ethernet FMI register */ 312 + #define XAE_FMI_PM_MASK 0x80000000 /* Promis. mode enable */ 313 + #define XAE_FMI_IND_MASK 0x00000003 /* Index Mask */ 314 + 315 + #define XAE_MDIO_DIV_DFT 29 /* Default MDIO clock divisor */ 316 + 317 + /* Defines for different options for C_PHY_TYPE parameter in Axi Ethernet IP */ 318 + #define XAE_PHY_TYPE_MII 0 319 + #define XAE_PHY_TYPE_GMII 1 320 + #define XAE_PHY_TYPE_RGMII_1_3 2 321 + #define XAE_PHY_TYPE_RGMII_2_0 3 322 + #define XAE_PHY_TYPE_SGMII 4 323 + #define XAE_PHY_TYPE_1000BASE_X 5 324 + 325 + #define XAE_MULTICAST_CAM_TABLE_NUM 4 /* Total number of entries in the 326 + * hardware multicast table. */ 327 + 328 + /* Axi Ethernet Synthesis features */ 329 + #define XAE_FEATURE_PARTIAL_RX_CSUM (1 << 0) 330 + #define XAE_FEATURE_PARTIAL_TX_CSUM (1 << 1) 331 + #define XAE_FEATURE_FULL_RX_CSUM (1 << 2) 332 + #define XAE_FEATURE_FULL_TX_CSUM (1 << 3) 333 + 334 + #define XAE_NO_CSUM_OFFLOAD 0 335 + 336 + #define XAE_FULL_CSUM_STATUS_MASK 0x00000038 337 + #define XAE_IP_UDP_CSUM_VALIDATED 0x00000003 338 + #define XAE_IP_TCP_CSUM_VALIDATED 0x00000002 339 + 340 + #define DELAY_OF_ONE_MILLISEC 1000 341 + 342 + /** 343 + * struct axidma_bd - Axi Dma buffer descriptor layout 344 + * @next: MM2S/S2MM Next Descriptor Pointer 345 + * @reserved1: Reserved and not used 346 + * @phys: MM2S/S2MM Buffer Address 347 + * @reserved2: Reserved and not used 348 + * @reserved3: Reserved and not used 349 + * @reserved4: Reserved and not used 350 + * @cntrl: MM2S/S2MM Control value 351 + * @status: MM2S/S2MM Status value 352 + * @app0: MM2S/S2MM User Application Field 0. 353 + * @app1: MM2S/S2MM User Application Field 1. 354 + * @app2: MM2S/S2MM User Application Field 2. 355 + * @app3: MM2S/S2MM User Application Field 3. 356 + * @app4: MM2S/S2MM User Application Field 4. 357 + * @sw_id_offset: MM2S/S2MM Sw ID 358 + * @reserved5: Reserved and not used 359 + * @reserved6: Reserved and not used 360 + */ 361 + struct axidma_bd { 362 + u32 next; /* Physical address of next buffer descriptor */ 363 + u32 reserved1; 364 + u32 phys; 365 + u32 reserved2; 366 + u32 reserved3; 367 + u32 reserved4; 368 + u32 cntrl; 369 + u32 status; 370 + u32 app0; 371 + u32 app1; /* TX start << 16 | insert */ 372 + u32 app2; /* TX csum seed */ 373 + u32 app3; 374 + u32 app4; 375 + u32 sw_id_offset; 376 + u32 reserved5; 377 + u32 reserved6; 378 + }; 379 + 380 + /** 381 + * struct axienet_local - axienet private per device data 382 + * @ndev: Pointer for net_device to which it will be attached. 383 + * @dev: Pointer to device structure 384 + * @phy_dev: Pointer to PHY device structure attached to the axienet_local 385 + * @phy_node: Pointer to device node structure 386 + * @mii_bus: Pointer to MII bus structure 387 + * @mdio_irqs: IRQs table for MDIO bus required in mii_bus structure 388 + * @regs: Base address for the axienet_local device address space 389 + * @dma_regs: Base address for the axidma device address space 390 + * @dma_err_tasklet: Tasklet structure to process Axi DMA errors 391 + * @tx_irq: Axidma TX IRQ number 392 + * @rx_irq: Axidma RX IRQ number 393 + * @temac_type: axienet type to identify between soft and hard temac 394 + * @phy_type: Phy type to identify between MII/GMII/RGMII/SGMII/1000 Base-X 395 + * @options: AxiEthernet option word 396 + * @last_link: Phy link state in which the PHY was negotiated earlier 397 + * @features: Stores the extended features supported by the axienet hw 398 + * @tx_bd_v: Virtual address of the TX buffer descriptor ring 399 + * @tx_bd_p: Physical address(start address) of the TX buffer descr. ring 400 + * @rx_bd_v: Virtual address of the RX buffer descriptor ring 401 + * @rx_bd_p: Physical address(start address) of the RX buffer descr. ring 402 + * @tx_bd_ci: Stores the index of the Tx buffer descriptor in the ring being 403 + * accessed currently. Used while alloc. BDs before a TX starts 404 + * @tx_bd_tail: Stores the index of the Tx buffer descriptor in the ring being 405 + * accessed currently. Used while processing BDs after the TX 406 + * completed. 407 + * @rx_bd_ci: Stores the index of the Rx buffer descriptor in the ring being 408 + * accessed currently. 409 + * @max_frm_size: Stores the maximum size of the frame that can be that 410 + * Txed/Rxed in the existing hardware. If jumbo option is 411 + * supported, the maximum frame size would be 9k. Else it is 412 + * 1522 bytes (assuming support for basic VLAN) 413 + * @jumbo_support: Stores hardware configuration for jumbo support. If hardware 414 + * can handle jumbo packets, this entry will be 1, else 0. 415 + */ 416 + struct axienet_local { 417 + struct net_device *ndev; 418 + struct device *dev; 419 + 420 + /* Connection to PHY device */ 421 + struct phy_device *phy_dev; /* Pointer to PHY device */ 422 + struct device_node *phy_node; 423 + 424 + /* MDIO bus data */ 425 + struct mii_bus *mii_bus; /* MII bus reference */ 426 + int mdio_irqs[PHY_MAX_ADDR]; /* IRQs table for MDIO bus */ 427 + 428 + /* IO registers, dma functions and IRQs */ 429 + void __iomem *regs; 430 + void __iomem *dma_regs; 431 + 432 + struct tasklet_struct dma_err_tasklet; 433 + 434 + int tx_irq; 435 + int rx_irq; 436 + u32 temac_type; 437 + u32 phy_type; 438 + 439 + u32 options; /* Current options word */ 440 + u32 last_link; 441 + u32 features; 442 + 443 + /* Buffer descriptors */ 444 + struct axidma_bd *tx_bd_v; 445 + dma_addr_t tx_bd_p; 446 + struct axidma_bd *rx_bd_v; 447 + dma_addr_t rx_bd_p; 448 + u32 tx_bd_ci; 449 + u32 tx_bd_tail; 450 + u32 rx_bd_ci; 451 + 452 + u32 max_frm_size; 453 + u32 jumbo_support; 454 + 455 + int csum_offload_on_tx_path; 456 + int csum_offload_on_rx_path; 457 + 458 + u32 coalesce_count_rx; 459 + u32 coalesce_count_tx; 460 + }; 461 + 462 + /** 463 + * struct axiethernet_option - Used to set axi ethernet hardware options 464 + * @opt: Option to be set. 465 + * @reg: Register offset to be written for setting the option 466 + * @m_or: Mask to be ORed for setting the option in the register 467 + */ 468 + struct axienet_option { 469 + u32 opt; 470 + u32 reg; 471 + u32 m_or; 472 + }; 473 + 474 + /** 475 + * axienet_ior - Memory mapped Axi Ethernet register read 476 + * @lp: Pointer to axienet local structure 477 + * @offset: Address offset from the base address of Axi Ethernet core 478 + * 479 + * returns: The contents of the Axi Ethernet register 480 + * 481 + * This function returns the contents of the corresponding register. 482 + */ 483 + static inline u32 axienet_ior(struct axienet_local *lp, off_t offset) 484 + { 485 + return in_be32(lp->regs + offset); 486 + } 487 + 488 + /** 489 + * axienet_iow - Memory mapped Axi Ethernet register write 490 + * @lp: Pointer to axienet local structure 491 + * @offset: Address offset from the base address of Axi Ethernet core 492 + * @value: Value to be written into the Axi Ethernet register 493 + * 494 + * This function writes the desired value into the corresponding Axi Ethernet 495 + * register. 496 + */ 497 + static inline void axienet_iow(struct axienet_local *lp, off_t offset, 498 + u32 value) 499 + { 500 + out_be32((lp->regs + offset), value); 501 + } 502 + 503 + /* Function prototypes visible in xilinx_axienet_mdio.c for other files */ 504 + int axienet_mdio_setup(struct axienet_local *lp, struct device_node *np); 505 + int axienet_mdio_wait_until_ready(struct axienet_local *lp); 506 + void axienet_mdio_teardown(struct axienet_local *lp); 507 + 508 + #endif /* XILINX_AXI_ENET_H */
+1682
drivers/net/ethernet/xilinx/xilinx_axienet_main.c
··· 1 + /* 2 + * Xilinx Axi Ethernet device driver 3 + * 4 + * Copyright (c) 2008 Nissin Systems Co., Ltd., Yoshio Kashiwagi 5 + * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net> 6 + * Copyright (c) 2008-2009 Secret Lab Technologies Ltd. 7 + * Copyright (c) 2010 Xilinx, Inc. All rights reserved. 8 + * Copyright (c) 2012 Daniel Borkmann, <daniel.borkmann@tik.ee.ethz.ch> 9 + * Copyright (c) 2012 Ariane Keller, <ariane.keller@tik.ee.ethz.ch> 10 + * 11 + * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6 12 + * and Spartan6. 13 + * 14 + * TODO: 15 + * - Add Axi Fifo support. 16 + * - Factor out Axi DMA code into separate driver. 17 + * - Test and fix basic multicast filtering. 18 + * - Add support for extended multicast filtering. 19 + * - Test basic VLAN support. 20 + * - Add support for extended VLAN support. 21 + */ 22 + 23 + #include <linux/delay.h> 24 + #include <linux/etherdevice.h> 25 + #include <linux/init.h> 26 + #include <linux/module.h> 27 + #include <linux/netdevice.h> 28 + #include <linux/of_mdio.h> 29 + #include <linux/of_platform.h> 30 + #include <linux/of_address.h> 31 + #include <linux/skbuff.h> 32 + #include <linux/spinlock.h> 33 + #include <linux/phy.h> 34 + #include <linux/mii.h> 35 + #include <linux/ethtool.h> 36 + 37 + #include "xilinx_axienet.h" 38 + 39 + /* Descriptors defines for Tx and Rx DMA - 2^n for the best performance */ 40 + #define TX_BD_NUM 64 41 + #define RX_BD_NUM 128 42 + 43 + /* Must be shorter than length of ethtool_drvinfo.driver field to fit */ 44 + #define DRIVER_NAME "xaxienet" 45 + #define DRIVER_DESCRIPTION "Xilinx Axi Ethernet driver" 46 + #define DRIVER_VERSION "1.00a" 47 + 48 + #define AXIENET_REGS_N 32 49 + 50 + /* Match table for of_platform binding */ 51 + static struct of_device_id axienet_of_match[] __devinitdata = { 52 + { .compatible = "xlnx,axi-ethernet-1.00.a", }, 53 + { .compatible = "xlnx,axi-ethernet-1.01.a", }, 54 + { .compatible = "xlnx,axi-ethernet-2.01.a", }, 55 + {}, 56 + }; 57 + 58 + MODULE_DEVICE_TABLE(of, axienet_of_match); 59 + 60 + /* Option table for setting up Axi Ethernet hardware options */ 61 + static struct axienet_option axienet_options[] = { 62 + /* Turn on jumbo packet support for both Rx and Tx */ 63 + { 64 + .opt = XAE_OPTION_JUMBO, 65 + .reg = XAE_TC_OFFSET, 66 + .m_or = XAE_TC_JUM_MASK, 67 + }, { 68 + .opt = XAE_OPTION_JUMBO, 69 + .reg = XAE_RCW1_OFFSET, 70 + .m_or = XAE_RCW1_JUM_MASK, 71 + }, { /* Turn on VLAN packet support for both Rx and Tx */ 72 + .opt = XAE_OPTION_VLAN, 73 + .reg = XAE_TC_OFFSET, 74 + .m_or = XAE_TC_VLAN_MASK, 75 + }, { 76 + .opt = XAE_OPTION_VLAN, 77 + .reg = XAE_RCW1_OFFSET, 78 + .m_or = XAE_RCW1_VLAN_MASK, 79 + }, { /* Turn on FCS stripping on receive packets */ 80 + .opt = XAE_OPTION_FCS_STRIP, 81 + .reg = XAE_RCW1_OFFSET, 82 + .m_or = XAE_RCW1_FCS_MASK, 83 + }, { /* Turn on FCS insertion on transmit packets */ 84 + .opt = XAE_OPTION_FCS_INSERT, 85 + .reg = XAE_TC_OFFSET, 86 + .m_or = XAE_TC_FCS_MASK, 87 + }, { /* Turn off length/type field checking on receive packets */ 88 + .opt = XAE_OPTION_LENTYPE_ERR, 89 + .reg = XAE_RCW1_OFFSET, 90 + .m_or = XAE_RCW1_LT_DIS_MASK, 91 + }, { /* Turn on Rx flow control */ 92 + .opt = XAE_OPTION_FLOW_CONTROL, 93 + .reg = XAE_FCC_OFFSET, 94 + .m_or = XAE_FCC_FCRX_MASK, 95 + }, { /* Turn on Tx flow control */ 96 + .opt = XAE_OPTION_FLOW_CONTROL, 97 + .reg = XAE_FCC_OFFSET, 98 + .m_or = XAE_FCC_FCTX_MASK, 99 + }, { /* Turn on promiscuous frame filtering */ 100 + .opt = XAE_OPTION_PROMISC, 101 + .reg = XAE_FMI_OFFSET, 102 + .m_or = XAE_FMI_PM_MASK, 103 + }, { /* Enable transmitter */ 104 + .opt = XAE_OPTION_TXEN, 105 + .reg = XAE_TC_OFFSET, 106 + .m_or = XAE_TC_TX_MASK, 107 + }, { /* Enable receiver */ 108 + .opt = XAE_OPTION_RXEN, 109 + .reg = XAE_RCW1_OFFSET, 110 + .m_or = XAE_RCW1_RX_MASK, 111 + }, 112 + {} 113 + }; 114 + 115 + /** 116 + * axienet_dma_in32 - Memory mapped Axi DMA register read 117 + * @lp: Pointer to axienet local structure 118 + * @reg: Address offset from the base address of the Axi DMA core 119 + * 120 + * returns: The contents of the Axi DMA register 121 + * 122 + * This function returns the contents of the corresponding Axi DMA register. 123 + */ 124 + static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg) 125 + { 126 + return in_be32(lp->dma_regs + reg); 127 + } 128 + 129 + /** 130 + * axienet_dma_out32 - Memory mapped Axi DMA register write. 131 + * @lp: Pointer to axienet local structure 132 + * @reg: Address offset from the base address of the Axi DMA core 133 + * @value: Value to be written into the Axi DMA register 134 + * 135 + * This function writes the desired value into the corresponding Axi DMA 136 + * register. 137 + */ 138 + static inline void axienet_dma_out32(struct axienet_local *lp, 139 + off_t reg, u32 value) 140 + { 141 + out_be32((lp->dma_regs + reg), value); 142 + } 143 + 144 + /** 145 + * axienet_dma_bd_release - Release buffer descriptor rings 146 + * @ndev: Pointer to the net_device structure 147 + * 148 + * This function is used to release the descriptors allocated in 149 + * axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet 150 + * driver stop api is called. 151 + */ 152 + static void axienet_dma_bd_release(struct net_device *ndev) 153 + { 154 + int i; 155 + struct axienet_local *lp = netdev_priv(ndev); 156 + 157 + for (i = 0; i < RX_BD_NUM; i++) { 158 + dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys, 159 + lp->max_frm_size, DMA_FROM_DEVICE); 160 + dev_kfree_skb((struct sk_buff *) 161 + (lp->rx_bd_v[i].sw_id_offset)); 162 + } 163 + 164 + if (lp->rx_bd_v) { 165 + dma_free_coherent(ndev->dev.parent, 166 + sizeof(*lp->rx_bd_v) * RX_BD_NUM, 167 + lp->rx_bd_v, 168 + lp->rx_bd_p); 169 + } 170 + if (lp->tx_bd_v) { 171 + dma_free_coherent(ndev->dev.parent, 172 + sizeof(*lp->tx_bd_v) * TX_BD_NUM, 173 + lp->tx_bd_v, 174 + lp->tx_bd_p); 175 + } 176 + } 177 + 178 + /** 179 + * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA 180 + * @ndev: Pointer to the net_device structure 181 + * 182 + * returns: 0, on success 183 + * -ENOMEM, on failure 184 + * 185 + * This function is called to initialize the Rx and Tx DMA descriptor 186 + * rings. This initializes the descriptors with required default values 187 + * and is called when Axi Ethernet driver reset is called. 188 + */ 189 + static int axienet_dma_bd_init(struct net_device *ndev) 190 + { 191 + u32 cr; 192 + int i; 193 + struct sk_buff *skb; 194 + struct axienet_local *lp = netdev_priv(ndev); 195 + 196 + /* Reset the indexes which are used for accessing the BDs */ 197 + lp->tx_bd_ci = 0; 198 + lp->tx_bd_tail = 0; 199 + lp->rx_bd_ci = 0; 200 + 201 + /* 202 + * Allocate the Tx and Rx buffer descriptors. 203 + */ 204 + lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent, 205 + sizeof(*lp->tx_bd_v) * TX_BD_NUM, 206 + &lp->tx_bd_p, 207 + GFP_KERNEL); 208 + if (!lp->tx_bd_v) { 209 + dev_err(&ndev->dev, "unable to allocate DMA Tx buffer " 210 + "descriptors"); 211 + goto out; 212 + } 213 + 214 + lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent, 215 + sizeof(*lp->rx_bd_v) * RX_BD_NUM, 216 + &lp->rx_bd_p, 217 + GFP_KERNEL); 218 + if (!lp->rx_bd_v) { 219 + dev_err(&ndev->dev, "unable to allocate DMA Rx buffer " 220 + "descriptors"); 221 + goto out; 222 + } 223 + 224 + memset(lp->tx_bd_v, 0, sizeof(*lp->tx_bd_v) * TX_BD_NUM); 225 + for (i = 0; i < TX_BD_NUM; i++) { 226 + lp->tx_bd_v[i].next = lp->tx_bd_p + 227 + sizeof(*lp->tx_bd_v) * 228 + ((i + 1) % TX_BD_NUM); 229 + } 230 + 231 + memset(lp->rx_bd_v, 0, sizeof(*lp->rx_bd_v) * RX_BD_NUM); 232 + for (i = 0; i < RX_BD_NUM; i++) { 233 + lp->rx_bd_v[i].next = lp->rx_bd_p + 234 + sizeof(*lp->rx_bd_v) * 235 + ((i + 1) % RX_BD_NUM); 236 + 237 + skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size); 238 + if (!skb) { 239 + dev_err(&ndev->dev, "alloc_skb error %d\n", i); 240 + goto out; 241 + } 242 + 243 + lp->rx_bd_v[i].sw_id_offset = (u32) skb; 244 + lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent, 245 + skb->data, 246 + lp->max_frm_size, 247 + DMA_FROM_DEVICE); 248 + lp->rx_bd_v[i].cntrl = lp->max_frm_size; 249 + } 250 + 251 + /* Start updating the Rx channel control register */ 252 + cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 253 + /* Update the interrupt coalesce count */ 254 + cr = ((cr & ~XAXIDMA_COALESCE_MASK) | 255 + ((lp->coalesce_count_rx) << XAXIDMA_COALESCE_SHIFT)); 256 + /* Update the delay timer count */ 257 + cr = ((cr & ~XAXIDMA_DELAY_MASK) | 258 + (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); 259 + /* Enable coalesce, delay timer and error interrupts */ 260 + cr |= XAXIDMA_IRQ_ALL_MASK; 261 + /* Write to the Rx channel control register */ 262 + axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 263 + 264 + /* Start updating the Tx channel control register */ 265 + cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 266 + /* Update the interrupt coalesce count */ 267 + cr = (((cr & ~XAXIDMA_COALESCE_MASK)) | 268 + ((lp->coalesce_count_tx) << XAXIDMA_COALESCE_SHIFT)); 269 + /* Update the delay timer count */ 270 + cr = (((cr & ~XAXIDMA_DELAY_MASK)) | 271 + (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); 272 + /* Enable coalesce, delay timer and error interrupts */ 273 + cr |= XAXIDMA_IRQ_ALL_MASK; 274 + /* Write to the Tx channel control register */ 275 + axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 276 + 277 + /* Populate the tail pointer and bring the Rx Axi DMA engine out of 278 + * halted state. This will make the Rx side ready for reception.*/ 279 + axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p); 280 + cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 281 + axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, 282 + cr | XAXIDMA_CR_RUNSTOP_MASK); 283 + axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p + 284 + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1))); 285 + 286 + /* Write to the RS (Run-stop) bit in the Tx channel control register. 287 + * Tx channel is now ready to run. But only after we write to the 288 + * tail pointer register that the Tx channel will start transmitting */ 289 + axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p); 290 + cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 291 + axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, 292 + cr | XAXIDMA_CR_RUNSTOP_MASK); 293 + 294 + return 0; 295 + out: 296 + axienet_dma_bd_release(ndev); 297 + return -ENOMEM; 298 + } 299 + 300 + /** 301 + * axienet_set_mac_address - Write the MAC address 302 + * @ndev: Pointer to the net_device structure 303 + * @address: 6 byte Address to be written as MAC address 304 + * 305 + * This function is called to initialize the MAC address of the Axi Ethernet 306 + * core. It writes to the UAW0 and UAW1 registers of the core. 307 + */ 308 + static void axienet_set_mac_address(struct net_device *ndev, void *address) 309 + { 310 + struct axienet_local *lp = netdev_priv(ndev); 311 + 312 + if (address) 313 + memcpy(ndev->dev_addr, address, ETH_ALEN); 314 + if (!is_valid_ether_addr(ndev->dev_addr)) 315 + random_ether_addr(ndev->dev_addr); 316 + 317 + /* Set up unicast MAC address filter set its mac address */ 318 + axienet_iow(lp, XAE_UAW0_OFFSET, 319 + (ndev->dev_addr[0]) | 320 + (ndev->dev_addr[1] << 8) | 321 + (ndev->dev_addr[2] << 16) | 322 + (ndev->dev_addr[3] << 24)); 323 + axienet_iow(lp, XAE_UAW1_OFFSET, 324 + (((axienet_ior(lp, XAE_UAW1_OFFSET)) & 325 + ~XAE_UAW1_UNICASTADDR_MASK) | 326 + (ndev->dev_addr[4] | 327 + (ndev->dev_addr[5] << 8)))); 328 + } 329 + 330 + /** 331 + * netdev_set_mac_address - Write the MAC address (from outside the driver) 332 + * @ndev: Pointer to the net_device structure 333 + * @p: 6 byte Address to be written as MAC address 334 + * 335 + * returns: 0 for all conditions. Presently, there is no failure case. 336 + * 337 + * This function is called to initialize the MAC address of the Axi Ethernet 338 + * core. It calls the core specific axienet_set_mac_address. This is the 339 + * function that goes into net_device_ops structure entry ndo_set_mac_address. 340 + */ 341 + static int netdev_set_mac_address(struct net_device *ndev, void *p) 342 + { 343 + struct sockaddr *addr = p; 344 + axienet_set_mac_address(ndev, addr->sa_data); 345 + return 0; 346 + } 347 + 348 + /** 349 + * axienet_set_multicast_list - Prepare the multicast table 350 + * @ndev: Pointer to the net_device structure 351 + * 352 + * This function is called to initialize the multicast table during 353 + * initialization. The Axi Ethernet basic multicast support has a four-entry 354 + * multicast table which is initialized here. Additionally this function 355 + * goes into the net_device_ops structure entry ndo_set_multicast_list. This 356 + * means whenever the multicast table entries need to be updated this 357 + * function gets called. 358 + */ 359 + static void axienet_set_multicast_list(struct net_device *ndev) 360 + { 361 + int i; 362 + u32 reg, af0reg, af1reg; 363 + struct axienet_local *lp = netdev_priv(ndev); 364 + 365 + if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) || 366 + netdev_mc_count(ndev) > XAE_MULTICAST_CAM_TABLE_NUM) { 367 + /* We must make the kernel realize we had to move into 368 + * promiscuous mode. If it was a promiscuous mode request 369 + * the flag is already set. If not we set it. */ 370 + ndev->flags |= IFF_PROMISC; 371 + reg = axienet_ior(lp, XAE_FMI_OFFSET); 372 + reg |= XAE_FMI_PM_MASK; 373 + axienet_iow(lp, XAE_FMI_OFFSET, reg); 374 + dev_info(&ndev->dev, "Promiscuous mode enabled.\n"); 375 + } else if (!netdev_mc_empty(ndev)) { 376 + struct netdev_hw_addr *ha; 377 + 378 + i = 0; 379 + netdev_for_each_mc_addr(ha, ndev) { 380 + if (i >= XAE_MULTICAST_CAM_TABLE_NUM) 381 + break; 382 + 383 + af0reg = (ha->addr[0]); 384 + af0reg |= (ha->addr[1] << 8); 385 + af0reg |= (ha->addr[2] << 16); 386 + af0reg |= (ha->addr[3] << 24); 387 + 388 + af1reg = (ha->addr[4]); 389 + af1reg |= (ha->addr[5] << 8); 390 + 391 + reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00; 392 + reg |= i; 393 + 394 + axienet_iow(lp, XAE_FMI_OFFSET, reg); 395 + axienet_iow(lp, XAE_AF0_OFFSET, af0reg); 396 + axienet_iow(lp, XAE_AF1_OFFSET, af1reg); 397 + i++; 398 + } 399 + } else { 400 + reg = axienet_ior(lp, XAE_FMI_OFFSET); 401 + reg &= ~XAE_FMI_PM_MASK; 402 + 403 + axienet_iow(lp, XAE_FMI_OFFSET, reg); 404 + 405 + for (i = 0; i < XAE_MULTICAST_CAM_TABLE_NUM; i++) { 406 + reg = axienet_ior(lp, XAE_FMI_OFFSET) & 0xFFFFFF00; 407 + reg |= i; 408 + 409 + axienet_iow(lp, XAE_FMI_OFFSET, reg); 410 + axienet_iow(lp, XAE_AF0_OFFSET, 0); 411 + axienet_iow(lp, XAE_AF1_OFFSET, 0); 412 + } 413 + 414 + dev_info(&ndev->dev, "Promiscuous mode disabled.\n"); 415 + } 416 + } 417 + 418 + /** 419 + * axienet_setoptions - Set an Axi Ethernet option 420 + * @ndev: Pointer to the net_device structure 421 + * @options: Option to be enabled/disabled 422 + * 423 + * The Axi Ethernet core has multiple features which can be selectively turned 424 + * on or off. The typical options could be jumbo frame option, basic VLAN 425 + * option, promiscuous mode option etc. This function is used to set or clear 426 + * these options in the Axi Ethernet hardware. This is done through 427 + * axienet_option structure . 428 + */ 429 + static void axienet_setoptions(struct net_device *ndev, u32 options) 430 + { 431 + int reg; 432 + struct axienet_local *lp = netdev_priv(ndev); 433 + struct axienet_option *tp = &axienet_options[0]; 434 + 435 + while (tp->opt) { 436 + reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or)); 437 + if (options & tp->opt) 438 + reg |= tp->m_or; 439 + axienet_iow(lp, tp->reg, reg); 440 + tp++; 441 + } 442 + 443 + lp->options |= options; 444 + } 445 + 446 + static void __axienet_device_reset(struct axienet_local *lp, 447 + struct device *dev, off_t offset) 448 + { 449 + u32 timeout; 450 + /* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset 451 + * process of Axi DMA takes a while to complete as all pending 452 + * commands/transfers will be flushed or completed during this 453 + * reset process. */ 454 + axienet_dma_out32(lp, offset, XAXIDMA_CR_RESET_MASK); 455 + timeout = DELAY_OF_ONE_MILLISEC; 456 + while (axienet_dma_in32(lp, offset) & XAXIDMA_CR_RESET_MASK) { 457 + udelay(1); 458 + if (--timeout == 0) { 459 + dev_err(dev, "axienet_device_reset DMA " 460 + "reset timeout!\n"); 461 + break; 462 + } 463 + } 464 + } 465 + 466 + /** 467 + * axienet_device_reset - Reset and initialize the Axi Ethernet hardware. 468 + * @ndev: Pointer to the net_device structure 469 + * 470 + * This function is called to reset and initialize the Axi Ethernet core. This 471 + * is typically called during initialization. It does a reset of the Axi DMA 472 + * Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines 473 + * areconnected to Axi Ethernet reset lines, this in turn resets the Axi 474 + * Ethernet core. No separate hardware reset is done for the Axi Ethernet 475 + * core. 476 + */ 477 + static void axienet_device_reset(struct net_device *ndev) 478 + { 479 + u32 axienet_status; 480 + struct axienet_local *lp = netdev_priv(ndev); 481 + 482 + __axienet_device_reset(lp, &ndev->dev, XAXIDMA_TX_CR_OFFSET); 483 + __axienet_device_reset(lp, &ndev->dev, XAXIDMA_RX_CR_OFFSET); 484 + 485 + lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE; 486 + lp->options &= (~XAE_OPTION_JUMBO); 487 + 488 + if ((ndev->mtu > XAE_MTU) && 489 + (ndev->mtu <= XAE_JUMBO_MTU) && 490 + (lp->jumbo_support)) { 491 + lp->max_frm_size = ndev->mtu + XAE_HDR_VLAN_SIZE + 492 + XAE_TRL_SIZE; 493 + lp->options |= XAE_OPTION_JUMBO; 494 + } 495 + 496 + if (axienet_dma_bd_init(ndev)) { 497 + dev_err(&ndev->dev, "axienet_device_reset descriptor " 498 + "allocation failed\n"); 499 + } 500 + 501 + axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET); 502 + axienet_status &= ~XAE_RCW1_RX_MASK; 503 + axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status); 504 + 505 + axienet_status = axienet_ior(lp, XAE_IP_OFFSET); 506 + if (axienet_status & XAE_INT_RXRJECT_MASK) 507 + axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK); 508 + 509 + axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK); 510 + 511 + /* Sync default options with HW but leave receiver and 512 + * transmitter disabled.*/ 513 + axienet_setoptions(ndev, lp->options & 514 + ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 515 + axienet_set_mac_address(ndev, NULL); 516 + axienet_set_multicast_list(ndev); 517 + axienet_setoptions(ndev, lp->options); 518 + 519 + ndev->trans_start = jiffies; 520 + } 521 + 522 + /** 523 + * axienet_adjust_link - Adjust the PHY link speed/duplex. 524 + * @ndev: Pointer to the net_device structure 525 + * 526 + * This function is called to change the speed and duplex setting after 527 + * auto negotiation is done by the PHY. This is the function that gets 528 + * registered with the PHY interface through the "of_phy_connect" call. 529 + */ 530 + static void axienet_adjust_link(struct net_device *ndev) 531 + { 532 + u32 emmc_reg; 533 + u32 link_state; 534 + u32 setspeed = 1; 535 + struct axienet_local *lp = netdev_priv(ndev); 536 + struct phy_device *phy = lp->phy_dev; 537 + 538 + link_state = phy->speed | (phy->duplex << 1) | phy->link; 539 + if (lp->last_link != link_state) { 540 + if ((phy->speed == SPEED_10) || (phy->speed == SPEED_100)) { 541 + if (lp->phy_type == XAE_PHY_TYPE_1000BASE_X) 542 + setspeed = 0; 543 + } else { 544 + if ((phy->speed == SPEED_1000) && 545 + (lp->phy_type == XAE_PHY_TYPE_MII)) 546 + setspeed = 0; 547 + } 548 + 549 + if (setspeed == 1) { 550 + emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET); 551 + emmc_reg &= ~XAE_EMMC_LINKSPEED_MASK; 552 + 553 + switch (phy->speed) { 554 + case SPEED_1000: 555 + emmc_reg |= XAE_EMMC_LINKSPD_1000; 556 + break; 557 + case SPEED_100: 558 + emmc_reg |= XAE_EMMC_LINKSPD_100; 559 + break; 560 + case SPEED_10: 561 + emmc_reg |= XAE_EMMC_LINKSPD_10; 562 + break; 563 + default: 564 + dev_err(&ndev->dev, "Speed other than 10, 100 " 565 + "or 1Gbps is not supported\n"); 566 + break; 567 + } 568 + 569 + axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg); 570 + lp->last_link = link_state; 571 + phy_print_status(phy); 572 + } else { 573 + dev_err(&ndev->dev, "Error setting Axi Ethernet " 574 + "mac speed\n"); 575 + } 576 + } 577 + } 578 + 579 + /** 580 + * axienet_start_xmit_done - Invoked once a transmit is completed by the 581 + * Axi DMA Tx channel. 582 + * @ndev: Pointer to the net_device structure 583 + * 584 + * This function is invoked from the Axi DMA Tx isr to notify the completion 585 + * of transmit operation. It clears fields in the corresponding Tx BDs and 586 + * unmaps the corresponding buffer so that CPU can regain ownership of the 587 + * buffer. It finally invokes "netif_wake_queue" to restart transmission if 588 + * required. 589 + */ 590 + static void axienet_start_xmit_done(struct net_device *ndev) 591 + { 592 + u32 size = 0; 593 + u32 packets = 0; 594 + struct axienet_local *lp = netdev_priv(ndev); 595 + struct axidma_bd *cur_p; 596 + unsigned int status = 0; 597 + 598 + cur_p = &lp->tx_bd_v[lp->tx_bd_ci]; 599 + status = cur_p->status; 600 + while (status & XAXIDMA_BD_STS_COMPLETE_MASK) { 601 + dma_unmap_single(ndev->dev.parent, cur_p->phys, 602 + (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK), 603 + DMA_TO_DEVICE); 604 + if (cur_p->app4) 605 + dev_kfree_skb_irq((struct sk_buff *)cur_p->app4); 606 + /*cur_p->phys = 0;*/ 607 + cur_p->app0 = 0; 608 + cur_p->app1 = 0; 609 + cur_p->app2 = 0; 610 + cur_p->app4 = 0; 611 + cur_p->status = 0; 612 + 613 + size += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK; 614 + packets++; 615 + 616 + lp->tx_bd_ci = ++lp->tx_bd_ci % TX_BD_NUM; 617 + cur_p = &lp->tx_bd_v[lp->tx_bd_ci]; 618 + status = cur_p->status; 619 + } 620 + 621 + ndev->stats.tx_packets += packets; 622 + ndev->stats.tx_bytes += size; 623 + netif_wake_queue(ndev); 624 + } 625 + 626 + /** 627 + * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy 628 + * @lp: Pointer to the axienet_local structure 629 + * @num_frag: The number of BDs to check for 630 + * 631 + * returns: 0, on success 632 + * NETDEV_TX_BUSY, if any of the descriptors are not free 633 + * 634 + * This function is invoked before BDs are allocated and transmission starts. 635 + * This function returns 0 if a BD or group of BDs can be allocated for 636 + * transmission. If the BD or any of the BDs are not free the function 637 + * returns a busy status. This is invoked from axienet_start_xmit. 638 + */ 639 + static inline int axienet_check_tx_bd_space(struct axienet_local *lp, 640 + int num_frag) 641 + { 642 + struct axidma_bd *cur_p; 643 + cur_p = &lp->tx_bd_v[(lp->tx_bd_tail + num_frag) % TX_BD_NUM]; 644 + if (cur_p->status & XAXIDMA_BD_STS_ALL_MASK) 645 + return NETDEV_TX_BUSY; 646 + return 0; 647 + } 648 + 649 + /** 650 + * axienet_start_xmit - Starts the transmission. 651 + * @skb: sk_buff pointer that contains data to be Txed. 652 + * @ndev: Pointer to net_device structure. 653 + * 654 + * returns: NETDEV_TX_OK, on success 655 + * NETDEV_TX_BUSY, if any of the descriptors are not free 656 + * 657 + * This function is invoked from upper layers to initiate transmission. The 658 + * function uses the next available free BDs and populates their fields to 659 + * start the transmission. Additionally if checksum offloading is supported, 660 + * it populates AXI Stream Control fields with appropriate values. 661 + */ 662 + static int axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) 663 + { 664 + u32 ii; 665 + u32 num_frag; 666 + u32 csum_start_off; 667 + u32 csum_index_off; 668 + skb_frag_t *frag; 669 + dma_addr_t tail_p; 670 + struct axienet_local *lp = netdev_priv(ndev); 671 + struct axidma_bd *cur_p; 672 + 673 + num_frag = skb_shinfo(skb)->nr_frags; 674 + cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; 675 + 676 + if (axienet_check_tx_bd_space(lp, num_frag)) { 677 + if (!netif_queue_stopped(ndev)) 678 + netif_stop_queue(ndev); 679 + return NETDEV_TX_BUSY; 680 + } 681 + 682 + if (skb->ip_summed == CHECKSUM_PARTIAL) { 683 + if (lp->features & XAE_FEATURE_FULL_TX_CSUM) { 684 + /* Tx Full Checksum Offload Enabled */ 685 + cur_p->app0 |= 2; 686 + } else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) { 687 + csum_start_off = skb_transport_offset(skb); 688 + csum_index_off = csum_start_off + skb->csum_offset; 689 + /* Tx Partial Checksum Offload Enabled */ 690 + cur_p->app0 |= 1; 691 + cur_p->app1 = (csum_start_off << 16) | csum_index_off; 692 + } 693 + } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { 694 + cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */ 695 + } 696 + 697 + cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK; 698 + cur_p->phys = dma_map_single(ndev->dev.parent, skb->data, 699 + skb_headlen(skb), DMA_TO_DEVICE); 700 + 701 + for (ii = 0; ii < num_frag; ii++) { 702 + lp->tx_bd_tail = ++lp->tx_bd_tail % TX_BD_NUM; 703 + cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; 704 + frag = &skb_shinfo(skb)->frags[ii]; 705 + cur_p->phys = dma_map_single(ndev->dev.parent, 706 + skb_frag_address(frag), 707 + skb_frag_size(frag), 708 + DMA_TO_DEVICE); 709 + cur_p->cntrl = skb_frag_size(frag); 710 + } 711 + 712 + cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK; 713 + cur_p->app4 = (unsigned long)skb; 714 + 715 + tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail; 716 + /* Start the transfer */ 717 + axienet_dma_out32(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p); 718 + lp->tx_bd_tail = ++lp->tx_bd_tail % TX_BD_NUM; 719 + 720 + return NETDEV_TX_OK; 721 + } 722 + 723 + /** 724 + * axienet_recv - Is called from Axi DMA Rx Isr to complete the received 725 + * BD processing. 726 + * @ndev: Pointer to net_device structure. 727 + * 728 + * This function is invoked from the Axi DMA Rx isr to process the Rx BDs. It 729 + * does minimal processing and invokes "netif_rx" to complete further 730 + * processing. 731 + */ 732 + static void axienet_recv(struct net_device *ndev) 733 + { 734 + u32 length; 735 + u32 csumstatus; 736 + u32 size = 0; 737 + u32 packets = 0; 738 + dma_addr_t tail_p; 739 + struct axienet_local *lp = netdev_priv(ndev); 740 + struct sk_buff *skb, *new_skb; 741 + struct axidma_bd *cur_p; 742 + 743 + tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci; 744 + cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; 745 + 746 + while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) { 747 + skb = (struct sk_buff *) (cur_p->sw_id_offset); 748 + length = cur_p->app4 & 0x0000FFFF; 749 + 750 + dma_unmap_single(ndev->dev.parent, cur_p->phys, 751 + lp->max_frm_size, 752 + DMA_FROM_DEVICE); 753 + 754 + skb_put(skb, length); 755 + skb->protocol = eth_type_trans(skb, ndev); 756 + /*skb_checksum_none_assert(skb);*/ 757 + skb->ip_summed = CHECKSUM_NONE; 758 + 759 + /* if we're doing Rx csum offload, set it up */ 760 + if (lp->features & XAE_FEATURE_FULL_RX_CSUM) { 761 + csumstatus = (cur_p->app2 & 762 + XAE_FULL_CSUM_STATUS_MASK) >> 3; 763 + if ((csumstatus == XAE_IP_TCP_CSUM_VALIDATED) || 764 + (csumstatus == XAE_IP_UDP_CSUM_VALIDATED)) { 765 + skb->ip_summed = CHECKSUM_UNNECESSARY; 766 + } 767 + } else if ((lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) != 0 && 768 + skb->protocol == __constant_htons(ETH_P_IP) && 769 + skb->len > 64) { 770 + skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF); 771 + skb->ip_summed = CHECKSUM_COMPLETE; 772 + } 773 + 774 + netif_rx(skb); 775 + 776 + size += length; 777 + packets++; 778 + 779 + new_skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size); 780 + if (!new_skb) { 781 + dev_err(&ndev->dev, "no memory for new sk_buff\n"); 782 + return; 783 + } 784 + cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data, 785 + lp->max_frm_size, 786 + DMA_FROM_DEVICE); 787 + cur_p->cntrl = lp->max_frm_size; 788 + cur_p->status = 0; 789 + cur_p->sw_id_offset = (u32) new_skb; 790 + 791 + lp->rx_bd_ci = ++lp->rx_bd_ci % RX_BD_NUM; 792 + cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; 793 + } 794 + 795 + ndev->stats.rx_packets += packets; 796 + ndev->stats.rx_bytes += size; 797 + 798 + axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p); 799 + } 800 + 801 + /** 802 + * axienet_tx_irq - Tx Done Isr. 803 + * @irq: irq number 804 + * @_ndev: net_device pointer 805 + * 806 + * returns: IRQ_HANDLED for all cases. 807 + * 808 + * This is the Axi DMA Tx done Isr. It invokes "axienet_start_xmit_done" 809 + * to complete the BD processing. 810 + */ 811 + static irqreturn_t axienet_tx_irq(int irq, void *_ndev) 812 + { 813 + u32 cr; 814 + unsigned int status; 815 + struct net_device *ndev = _ndev; 816 + struct axienet_local *lp = netdev_priv(ndev); 817 + 818 + status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET); 819 + if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) { 820 + axienet_start_xmit_done(lp->ndev); 821 + goto out; 822 + } 823 + if (!(status & XAXIDMA_IRQ_ALL_MASK)) 824 + dev_err(&ndev->dev, "No interrupts asserted in Tx path"); 825 + if (status & XAXIDMA_IRQ_ERROR_MASK) { 826 + dev_err(&ndev->dev, "DMA Tx error 0x%x\n", status); 827 + dev_err(&ndev->dev, "Current BD is at: 0x%x\n", 828 + (lp->tx_bd_v[lp->tx_bd_ci]).phys); 829 + 830 + cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 831 + /* Disable coalesce, delay timer and error interrupts */ 832 + cr &= (~XAXIDMA_IRQ_ALL_MASK); 833 + /* Write to the Tx channel control register */ 834 + axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 835 + 836 + cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 837 + /* Disable coalesce, delay timer and error interrupts */ 838 + cr &= (~XAXIDMA_IRQ_ALL_MASK); 839 + /* Write to the Rx channel control register */ 840 + axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 841 + 842 + tasklet_schedule(&lp->dma_err_tasklet); 843 + } 844 + out: 845 + axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status); 846 + return IRQ_HANDLED; 847 + } 848 + 849 + /** 850 + * axienet_rx_irq - Rx Isr. 851 + * @irq: irq number 852 + * @_ndev: net_device pointer 853 + * 854 + * returns: IRQ_HANDLED for all cases. 855 + * 856 + * This is the Axi DMA Rx Isr. It invokes "axienet_recv" to complete the BD 857 + * processing. 858 + */ 859 + static irqreturn_t axienet_rx_irq(int irq, void *_ndev) 860 + { 861 + u32 cr; 862 + unsigned int status; 863 + struct net_device *ndev = _ndev; 864 + struct axienet_local *lp = netdev_priv(ndev); 865 + 866 + status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET); 867 + if (status & (XAXIDMA_IRQ_IOC_MASK | XAXIDMA_IRQ_DELAY_MASK)) { 868 + axienet_recv(lp->ndev); 869 + goto out; 870 + } 871 + if (!(status & XAXIDMA_IRQ_ALL_MASK)) 872 + dev_err(&ndev->dev, "No interrupts asserted in Rx path"); 873 + if (status & XAXIDMA_IRQ_ERROR_MASK) { 874 + dev_err(&ndev->dev, "DMA Rx error 0x%x\n", status); 875 + dev_err(&ndev->dev, "Current BD is at: 0x%x\n", 876 + (lp->rx_bd_v[lp->rx_bd_ci]).phys); 877 + 878 + cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 879 + /* Disable coalesce, delay timer and error interrupts */ 880 + cr &= (~XAXIDMA_IRQ_ALL_MASK); 881 + /* Finally write to the Tx channel control register */ 882 + axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 883 + 884 + cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 885 + /* Disable coalesce, delay timer and error interrupts */ 886 + cr &= (~XAXIDMA_IRQ_ALL_MASK); 887 + /* write to the Rx channel control register */ 888 + axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 889 + 890 + tasklet_schedule(&lp->dma_err_tasklet); 891 + } 892 + out: 893 + axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status); 894 + return IRQ_HANDLED; 895 + } 896 + 897 + /** 898 + * axienet_open - Driver open routine. 899 + * @ndev: Pointer to net_device structure 900 + * 901 + * returns: 0, on success. 902 + * -ENODEV, if PHY cannot be connected to 903 + * non-zero error value on failure 904 + * 905 + * This is the driver open routine. It calls phy_start to start the PHY device. 906 + * It also allocates interrupt service routines, enables the interrupt lines 907 + * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer 908 + * descriptors are initialized. 909 + */ 910 + static int axienet_open(struct net_device *ndev) 911 + { 912 + int ret, mdio_mcreg; 913 + struct axienet_local *lp = netdev_priv(ndev); 914 + 915 + dev_dbg(&ndev->dev, "axienet_open()\n"); 916 + 917 + mdio_mcreg = axienet_ior(lp, XAE_MDIO_MC_OFFSET); 918 + ret = axienet_mdio_wait_until_ready(lp); 919 + if (ret < 0) 920 + return ret; 921 + /* Disable the MDIO interface till Axi Ethernet Reset is completed. 922 + * When we do an Axi Ethernet reset, it resets the complete core 923 + * including the MDIO. If MDIO is not disabled when the reset 924 + * process is started, MDIO will be broken afterwards. */ 925 + axienet_iow(lp, XAE_MDIO_MC_OFFSET, 926 + (mdio_mcreg & (~XAE_MDIO_MC_MDIOEN_MASK))); 927 + axienet_device_reset(ndev); 928 + /* Enable the MDIO */ 929 + axienet_iow(lp, XAE_MDIO_MC_OFFSET, mdio_mcreg); 930 + ret = axienet_mdio_wait_until_ready(lp); 931 + if (ret < 0) 932 + return ret; 933 + 934 + if (lp->phy_node) { 935 + lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node, 936 + axienet_adjust_link, 0, 937 + PHY_INTERFACE_MODE_GMII); 938 + if (!lp->phy_dev) { 939 + dev_err(lp->dev, "of_phy_connect() failed\n"); 940 + return -ENODEV; 941 + } 942 + phy_start(lp->phy_dev); 943 + } 944 + 945 + /* Enable interrupts for Axi DMA Tx */ 946 + ret = request_irq(lp->tx_irq, axienet_tx_irq, 0, ndev->name, ndev); 947 + if (ret) 948 + goto err_tx_irq; 949 + /* Enable interrupts for Axi DMA Rx */ 950 + ret = request_irq(lp->rx_irq, axienet_rx_irq, 0, ndev->name, ndev); 951 + if (ret) 952 + goto err_rx_irq; 953 + /* Enable tasklets for Axi DMA error handling */ 954 + tasklet_enable(&lp->dma_err_tasklet); 955 + return 0; 956 + 957 + err_rx_irq: 958 + free_irq(lp->tx_irq, ndev); 959 + err_tx_irq: 960 + if (lp->phy_dev) 961 + phy_disconnect(lp->phy_dev); 962 + lp->phy_dev = NULL; 963 + dev_err(lp->dev, "request_irq() failed\n"); 964 + return ret; 965 + } 966 + 967 + /** 968 + * axienet_stop - Driver stop routine. 969 + * @ndev: Pointer to net_device structure 970 + * 971 + * returns: 0, on success. 972 + * 973 + * This is the driver stop routine. It calls phy_disconnect to stop the PHY 974 + * device. It also removes the interrupt handlers and disables the interrupts. 975 + * The Axi DMA Tx/Rx BDs are released. 976 + */ 977 + static int axienet_stop(struct net_device *ndev) 978 + { 979 + u32 cr; 980 + struct axienet_local *lp = netdev_priv(ndev); 981 + 982 + dev_dbg(&ndev->dev, "axienet_close()\n"); 983 + 984 + cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 985 + axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, 986 + cr & (~XAXIDMA_CR_RUNSTOP_MASK)); 987 + cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 988 + axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, 989 + cr & (~XAXIDMA_CR_RUNSTOP_MASK)); 990 + axienet_setoptions(ndev, lp->options & 991 + ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 992 + 993 + tasklet_disable(&lp->dma_err_tasklet); 994 + 995 + free_irq(lp->tx_irq, ndev); 996 + free_irq(lp->rx_irq, ndev); 997 + 998 + if (lp->phy_dev) 999 + phy_disconnect(lp->phy_dev); 1000 + lp->phy_dev = NULL; 1001 + 1002 + axienet_dma_bd_release(ndev); 1003 + return 0; 1004 + } 1005 + 1006 + /** 1007 + * axienet_change_mtu - Driver change mtu routine. 1008 + * @ndev: Pointer to net_device structure 1009 + * @new_mtu: New mtu value to be applied 1010 + * 1011 + * returns: Always returns 0 (success). 1012 + * 1013 + * This is the change mtu driver routine. It checks if the Axi Ethernet 1014 + * hardware supports jumbo frames before changing the mtu. This can be 1015 + * called only when the device is not up. 1016 + */ 1017 + static int axienet_change_mtu(struct net_device *ndev, int new_mtu) 1018 + { 1019 + struct axienet_local *lp = netdev_priv(ndev); 1020 + 1021 + if (netif_running(ndev)) 1022 + return -EBUSY; 1023 + if (lp->jumbo_support) { 1024 + if ((new_mtu > XAE_JUMBO_MTU) || (new_mtu < 64)) 1025 + return -EINVAL; 1026 + ndev->mtu = new_mtu; 1027 + } else { 1028 + if ((new_mtu > XAE_MTU) || (new_mtu < 64)) 1029 + return -EINVAL; 1030 + ndev->mtu = new_mtu; 1031 + } 1032 + 1033 + return 0; 1034 + } 1035 + 1036 + #ifdef CONFIG_NET_POLL_CONTROLLER 1037 + /** 1038 + * axienet_poll_controller - Axi Ethernet poll mechanism. 1039 + * @ndev: Pointer to net_device structure 1040 + * 1041 + * This implements Rx/Tx ISR poll mechanisms. The interrupts are disabled prior 1042 + * to polling the ISRs and are enabled back after the polling is done. 1043 + */ 1044 + static void axienet_poll_controller(struct net_device *ndev) 1045 + { 1046 + struct axienet_local *lp = netdev_priv(ndev); 1047 + disable_irq(lp->tx_irq); 1048 + disable_irq(lp->rx_irq); 1049 + axienet_rx_irq(lp->tx_irq, ndev); 1050 + axienet_tx_irq(lp->rx_irq, ndev); 1051 + enable_irq(lp->tx_irq); 1052 + enable_irq(lp->rx_irq); 1053 + } 1054 + #endif 1055 + 1056 + static const struct net_device_ops axienet_netdev_ops = { 1057 + .ndo_open = axienet_open, 1058 + .ndo_stop = axienet_stop, 1059 + .ndo_start_xmit = axienet_start_xmit, 1060 + .ndo_change_mtu = axienet_change_mtu, 1061 + .ndo_set_mac_address = netdev_set_mac_address, 1062 + .ndo_validate_addr = eth_validate_addr, 1063 + .ndo_set_rx_mode = axienet_set_multicast_list, 1064 + #ifdef CONFIG_NET_POLL_CONTROLLER 1065 + .ndo_poll_controller = axienet_poll_controller, 1066 + #endif 1067 + }; 1068 + 1069 + /** 1070 + * axienet_ethtools_get_settings - Get Axi Ethernet settings related to PHY. 1071 + * @ndev: Pointer to net_device structure 1072 + * @ecmd: Pointer to ethtool_cmd structure 1073 + * 1074 + * This implements ethtool command for getting PHY settings. If PHY could 1075 + * not be found, the function returns -ENODEV. This function calls the 1076 + * relevant PHY ethtool API to get the PHY settings. 1077 + * Issue "ethtool ethX" under linux prompt to execute this function. 1078 + */ 1079 + static int axienet_ethtools_get_settings(struct net_device *ndev, 1080 + struct ethtool_cmd *ecmd) 1081 + { 1082 + struct axienet_local *lp = netdev_priv(ndev); 1083 + struct phy_device *phydev = lp->phy_dev; 1084 + if (!phydev) 1085 + return -ENODEV; 1086 + return phy_ethtool_gset(phydev, ecmd); 1087 + } 1088 + 1089 + /** 1090 + * axienet_ethtools_set_settings - Set PHY settings as passed in the argument. 1091 + * @ndev: Pointer to net_device structure 1092 + * @ecmd: Pointer to ethtool_cmd structure 1093 + * 1094 + * This implements ethtool command for setting various PHY settings. If PHY 1095 + * could not be found, the function returns -ENODEV. This function calls the 1096 + * relevant PHY ethtool API to set the PHY. 1097 + * Issue e.g. "ethtool -s ethX speed 1000" under linux prompt to execute this 1098 + * function. 1099 + */ 1100 + static int axienet_ethtools_set_settings(struct net_device *ndev, 1101 + struct ethtool_cmd *ecmd) 1102 + { 1103 + struct axienet_local *lp = netdev_priv(ndev); 1104 + struct phy_device *phydev = lp->phy_dev; 1105 + if (!phydev) 1106 + return -ENODEV; 1107 + return phy_ethtool_sset(phydev, ecmd); 1108 + } 1109 + 1110 + /** 1111 + * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information. 1112 + * @ndev: Pointer to net_device structure 1113 + * @ed: Pointer to ethtool_drvinfo structure 1114 + * 1115 + * This implements ethtool command for getting the driver information. 1116 + * Issue "ethtool -i ethX" under linux prompt to execute this function. 1117 + */ 1118 + static void axienet_ethtools_get_drvinfo(struct net_device *ndev, 1119 + struct ethtool_drvinfo *ed) 1120 + { 1121 + memset(ed, 0, sizeof(struct ethtool_drvinfo)); 1122 + strcpy(ed->driver, DRIVER_NAME); 1123 + strcpy(ed->version, DRIVER_VERSION); 1124 + ed->regdump_len = sizeof(u32) * AXIENET_REGS_N; 1125 + } 1126 + 1127 + /** 1128 + * axienet_ethtools_get_regs_len - Get the total regs length present in the 1129 + * AxiEthernet core. 1130 + * @ndev: Pointer to net_device structure 1131 + * 1132 + * This implements ethtool command for getting the total register length 1133 + * information. 1134 + */ 1135 + static int axienet_ethtools_get_regs_len(struct net_device *ndev) 1136 + { 1137 + return sizeof(u32) * AXIENET_REGS_N; 1138 + } 1139 + 1140 + /** 1141 + * axienet_ethtools_get_regs - Dump the contents of all registers present 1142 + * in AxiEthernet core. 1143 + * @ndev: Pointer to net_device structure 1144 + * @regs: Pointer to ethtool_regs structure 1145 + * @ret: Void pointer used to return the contents of the registers. 1146 + * 1147 + * This implements ethtool command for getting the Axi Ethernet register dump. 1148 + * Issue "ethtool -d ethX" to execute this function. 1149 + */ 1150 + static void axienet_ethtools_get_regs(struct net_device *ndev, 1151 + struct ethtool_regs *regs, void *ret) 1152 + { 1153 + u32 *data = (u32 *) ret; 1154 + size_t len = sizeof(u32) * AXIENET_REGS_N; 1155 + struct axienet_local *lp = netdev_priv(ndev); 1156 + 1157 + regs->version = 0; 1158 + regs->len = len; 1159 + 1160 + memset(data, 0, len); 1161 + data[0] = axienet_ior(lp, XAE_RAF_OFFSET); 1162 + data[1] = axienet_ior(lp, XAE_TPF_OFFSET); 1163 + data[2] = axienet_ior(lp, XAE_IFGP_OFFSET); 1164 + data[3] = axienet_ior(lp, XAE_IS_OFFSET); 1165 + data[4] = axienet_ior(lp, XAE_IP_OFFSET); 1166 + data[5] = axienet_ior(lp, XAE_IE_OFFSET); 1167 + data[6] = axienet_ior(lp, XAE_TTAG_OFFSET); 1168 + data[7] = axienet_ior(lp, XAE_RTAG_OFFSET); 1169 + data[8] = axienet_ior(lp, XAE_UAWL_OFFSET); 1170 + data[9] = axienet_ior(lp, XAE_UAWU_OFFSET); 1171 + data[10] = axienet_ior(lp, XAE_TPID0_OFFSET); 1172 + data[11] = axienet_ior(lp, XAE_TPID1_OFFSET); 1173 + data[12] = axienet_ior(lp, XAE_PPST_OFFSET); 1174 + data[13] = axienet_ior(lp, XAE_RCW0_OFFSET); 1175 + data[14] = axienet_ior(lp, XAE_RCW1_OFFSET); 1176 + data[15] = axienet_ior(lp, XAE_TC_OFFSET); 1177 + data[16] = axienet_ior(lp, XAE_FCC_OFFSET); 1178 + data[17] = axienet_ior(lp, XAE_EMMC_OFFSET); 1179 + data[18] = axienet_ior(lp, XAE_PHYC_OFFSET); 1180 + data[19] = axienet_ior(lp, XAE_MDIO_MC_OFFSET); 1181 + data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET); 1182 + data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET); 1183 + data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET); 1184 + data[23] = axienet_ior(lp, XAE_MDIO_MIS_OFFSET); 1185 + data[24] = axienet_ior(lp, XAE_MDIO_MIP_OFFSET); 1186 + data[25] = axienet_ior(lp, XAE_MDIO_MIE_OFFSET); 1187 + data[26] = axienet_ior(lp, XAE_MDIO_MIC_OFFSET); 1188 + data[27] = axienet_ior(lp, XAE_UAW0_OFFSET); 1189 + data[28] = axienet_ior(lp, XAE_UAW1_OFFSET); 1190 + data[29] = axienet_ior(lp, XAE_FMI_OFFSET); 1191 + data[30] = axienet_ior(lp, XAE_AF0_OFFSET); 1192 + data[31] = axienet_ior(lp, XAE_AF1_OFFSET); 1193 + } 1194 + 1195 + /** 1196 + * axienet_ethtools_get_pauseparam - Get the pause parameter setting for 1197 + * Tx and Rx paths. 1198 + * @ndev: Pointer to net_device structure 1199 + * @epauseparm: Pointer to ethtool_pauseparam structure. 1200 + * 1201 + * This implements ethtool command for getting axi ethernet pause frame 1202 + * setting. Issue "ethtool -a ethX" to execute this function. 1203 + */ 1204 + static void 1205 + axienet_ethtools_get_pauseparam(struct net_device *ndev, 1206 + struct ethtool_pauseparam *epauseparm) 1207 + { 1208 + u32 regval; 1209 + struct axienet_local *lp = netdev_priv(ndev); 1210 + epauseparm->autoneg = 0; 1211 + regval = axienet_ior(lp, XAE_FCC_OFFSET); 1212 + epauseparm->tx_pause = regval & XAE_FCC_FCTX_MASK; 1213 + epauseparm->rx_pause = regval & XAE_FCC_FCRX_MASK; 1214 + } 1215 + 1216 + /** 1217 + * axienet_ethtools_set_pauseparam - Set device pause parameter(flow control) 1218 + * settings. 1219 + * @ndev: Pointer to net_device structure 1220 + * @epauseparam:Pointer to ethtool_pauseparam structure 1221 + * 1222 + * This implements ethtool command for enabling flow control on Rx and Tx 1223 + * paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this 1224 + * function. 1225 + */ 1226 + static int 1227 + axienet_ethtools_set_pauseparam(struct net_device *ndev, 1228 + struct ethtool_pauseparam *epauseparm) 1229 + { 1230 + u32 regval = 0; 1231 + struct axienet_local *lp = netdev_priv(ndev); 1232 + 1233 + if (netif_running(ndev)) { 1234 + printk(KERN_ERR "%s: Please stop netif before applying " 1235 + "configruation\n", ndev->name); 1236 + return -EFAULT; 1237 + } 1238 + 1239 + regval = axienet_ior(lp, XAE_FCC_OFFSET); 1240 + if (epauseparm->tx_pause) 1241 + regval |= XAE_FCC_FCTX_MASK; 1242 + else 1243 + regval &= ~XAE_FCC_FCTX_MASK; 1244 + if (epauseparm->rx_pause) 1245 + regval |= XAE_FCC_FCRX_MASK; 1246 + else 1247 + regval &= ~XAE_FCC_FCRX_MASK; 1248 + axienet_iow(lp, XAE_FCC_OFFSET, regval); 1249 + 1250 + return 0; 1251 + } 1252 + 1253 + /** 1254 + * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count. 1255 + * @ndev: Pointer to net_device structure 1256 + * @ecoalesce: Pointer to ethtool_coalesce structure 1257 + * 1258 + * This implements ethtool command for getting the DMA interrupt coalescing 1259 + * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to 1260 + * execute this function. 1261 + */ 1262 + static int axienet_ethtools_get_coalesce(struct net_device *ndev, 1263 + struct ethtool_coalesce *ecoalesce) 1264 + { 1265 + u32 regval = 0; 1266 + struct axienet_local *lp = netdev_priv(ndev); 1267 + regval = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 1268 + ecoalesce->rx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK) 1269 + >> XAXIDMA_COALESCE_SHIFT; 1270 + regval = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 1271 + ecoalesce->tx_max_coalesced_frames = (regval & XAXIDMA_COALESCE_MASK) 1272 + >> XAXIDMA_COALESCE_SHIFT; 1273 + return 0; 1274 + } 1275 + 1276 + /** 1277 + * axienet_ethtools_set_coalesce - Set DMA interrupt coalescing count. 1278 + * @ndev: Pointer to net_device structure 1279 + * @ecoalesce: Pointer to ethtool_coalesce structure 1280 + * 1281 + * This implements ethtool command for setting the DMA interrupt coalescing 1282 + * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux 1283 + * prompt to execute this function. 1284 + */ 1285 + static int axienet_ethtools_set_coalesce(struct net_device *ndev, 1286 + struct ethtool_coalesce *ecoalesce) 1287 + { 1288 + struct axienet_local *lp = netdev_priv(ndev); 1289 + 1290 + if (netif_running(ndev)) { 1291 + printk(KERN_ERR "%s: Please stop netif before applying " 1292 + "configruation\n", ndev->name); 1293 + return -EFAULT; 1294 + } 1295 + 1296 + if ((ecoalesce->rx_coalesce_usecs) || 1297 + (ecoalesce->rx_coalesce_usecs_irq) || 1298 + (ecoalesce->rx_max_coalesced_frames_irq) || 1299 + (ecoalesce->tx_coalesce_usecs) || 1300 + (ecoalesce->tx_coalesce_usecs_irq) || 1301 + (ecoalesce->tx_max_coalesced_frames_irq) || 1302 + (ecoalesce->stats_block_coalesce_usecs) || 1303 + (ecoalesce->use_adaptive_rx_coalesce) || 1304 + (ecoalesce->use_adaptive_tx_coalesce) || 1305 + (ecoalesce->pkt_rate_low) || 1306 + (ecoalesce->rx_coalesce_usecs_low) || 1307 + (ecoalesce->rx_max_coalesced_frames_low) || 1308 + (ecoalesce->tx_coalesce_usecs_low) || 1309 + (ecoalesce->tx_max_coalesced_frames_low) || 1310 + (ecoalesce->pkt_rate_high) || 1311 + (ecoalesce->rx_coalesce_usecs_high) || 1312 + (ecoalesce->rx_max_coalesced_frames_high) || 1313 + (ecoalesce->tx_coalesce_usecs_high) || 1314 + (ecoalesce->tx_max_coalesced_frames_high) || 1315 + (ecoalesce->rate_sample_interval)) 1316 + return -EOPNOTSUPP; 1317 + if (ecoalesce->rx_max_coalesced_frames) 1318 + lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames; 1319 + if (ecoalesce->tx_max_coalesced_frames) 1320 + lp->coalesce_count_tx = ecoalesce->tx_max_coalesced_frames; 1321 + 1322 + return 0; 1323 + } 1324 + 1325 + static struct ethtool_ops axienet_ethtool_ops = { 1326 + .get_settings = axienet_ethtools_get_settings, 1327 + .set_settings = axienet_ethtools_set_settings, 1328 + .get_drvinfo = axienet_ethtools_get_drvinfo, 1329 + .get_regs_len = axienet_ethtools_get_regs_len, 1330 + .get_regs = axienet_ethtools_get_regs, 1331 + .get_link = ethtool_op_get_link, 1332 + .get_pauseparam = axienet_ethtools_get_pauseparam, 1333 + .set_pauseparam = axienet_ethtools_set_pauseparam, 1334 + .get_coalesce = axienet_ethtools_get_coalesce, 1335 + .set_coalesce = axienet_ethtools_set_coalesce, 1336 + }; 1337 + 1338 + /** 1339 + * axienet_dma_err_handler - Tasklet handler for Axi DMA Error 1340 + * @data: Data passed 1341 + * 1342 + * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the 1343 + * Tx/Rx BDs. 1344 + */ 1345 + static void axienet_dma_err_handler(unsigned long data) 1346 + { 1347 + u32 axienet_status; 1348 + u32 cr, i; 1349 + int mdio_mcreg; 1350 + struct axienet_local *lp = (struct axienet_local *) data; 1351 + struct net_device *ndev = lp->ndev; 1352 + struct axidma_bd *cur_p; 1353 + 1354 + axienet_setoptions(ndev, lp->options & 1355 + ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 1356 + mdio_mcreg = axienet_ior(lp, XAE_MDIO_MC_OFFSET); 1357 + axienet_mdio_wait_until_ready(lp); 1358 + /* Disable the MDIO interface till Axi Ethernet Reset is completed. 1359 + * When we do an Axi Ethernet reset, it resets the complete core 1360 + * including the MDIO. So if MDIO is not disabled when the reset 1361 + * process is started, MDIO will be broken afterwards. */ 1362 + axienet_iow(lp, XAE_MDIO_MC_OFFSET, (mdio_mcreg & 1363 + ~XAE_MDIO_MC_MDIOEN_MASK)); 1364 + 1365 + __axienet_device_reset(lp, &ndev->dev, XAXIDMA_TX_CR_OFFSET); 1366 + __axienet_device_reset(lp, &ndev->dev, XAXIDMA_RX_CR_OFFSET); 1367 + 1368 + axienet_iow(lp, XAE_MDIO_MC_OFFSET, mdio_mcreg); 1369 + axienet_mdio_wait_until_ready(lp); 1370 + 1371 + for (i = 0; i < TX_BD_NUM; i++) { 1372 + cur_p = &lp->tx_bd_v[i]; 1373 + if (cur_p->phys) 1374 + dma_unmap_single(ndev->dev.parent, cur_p->phys, 1375 + (cur_p->cntrl & 1376 + XAXIDMA_BD_CTRL_LENGTH_MASK), 1377 + DMA_TO_DEVICE); 1378 + if (cur_p->app4) 1379 + dev_kfree_skb_irq((struct sk_buff *) cur_p->app4); 1380 + cur_p->phys = 0; 1381 + cur_p->cntrl = 0; 1382 + cur_p->status = 0; 1383 + cur_p->app0 = 0; 1384 + cur_p->app1 = 0; 1385 + cur_p->app2 = 0; 1386 + cur_p->app3 = 0; 1387 + cur_p->app4 = 0; 1388 + cur_p->sw_id_offset = 0; 1389 + } 1390 + 1391 + for (i = 0; i < RX_BD_NUM; i++) { 1392 + cur_p = &lp->rx_bd_v[i]; 1393 + cur_p->status = 0; 1394 + cur_p->app0 = 0; 1395 + cur_p->app1 = 0; 1396 + cur_p->app2 = 0; 1397 + cur_p->app3 = 0; 1398 + cur_p->app4 = 0; 1399 + } 1400 + 1401 + lp->tx_bd_ci = 0; 1402 + lp->tx_bd_tail = 0; 1403 + lp->rx_bd_ci = 0; 1404 + 1405 + /* Start updating the Rx channel control register */ 1406 + cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 1407 + /* Update the interrupt coalesce count */ 1408 + cr = ((cr & ~XAXIDMA_COALESCE_MASK) | 1409 + (XAXIDMA_DFT_RX_THRESHOLD << XAXIDMA_COALESCE_SHIFT)); 1410 + /* Update the delay timer count */ 1411 + cr = ((cr & ~XAXIDMA_DELAY_MASK) | 1412 + (XAXIDMA_DFT_RX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); 1413 + /* Enable coalesce, delay timer and error interrupts */ 1414 + cr |= XAXIDMA_IRQ_ALL_MASK; 1415 + /* Finally write to the Rx channel control register */ 1416 + axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr); 1417 + 1418 + /* Start updating the Tx channel control register */ 1419 + cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 1420 + /* Update the interrupt coalesce count */ 1421 + cr = (((cr & ~XAXIDMA_COALESCE_MASK)) | 1422 + (XAXIDMA_DFT_TX_THRESHOLD << XAXIDMA_COALESCE_SHIFT)); 1423 + /* Update the delay timer count */ 1424 + cr = (((cr & ~XAXIDMA_DELAY_MASK)) | 1425 + (XAXIDMA_DFT_TX_WAITBOUND << XAXIDMA_DELAY_SHIFT)); 1426 + /* Enable coalesce, delay timer and error interrupts */ 1427 + cr |= XAXIDMA_IRQ_ALL_MASK; 1428 + /* Finally write to the Tx channel control register */ 1429 + axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr); 1430 + 1431 + /* Populate the tail pointer and bring the Rx Axi DMA engine out of 1432 + * halted state. This will make the Rx side ready for reception.*/ 1433 + axienet_dma_out32(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p); 1434 + cr = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET); 1435 + axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, 1436 + cr | XAXIDMA_CR_RUNSTOP_MASK); 1437 + axienet_dma_out32(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p + 1438 + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1))); 1439 + 1440 + /* Write to the RS (Run-stop) bit in the Tx channel control register. 1441 + * Tx channel is now ready to run. But only after we write to the 1442 + * tail pointer register that the Tx channel will start transmitting */ 1443 + axienet_dma_out32(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p); 1444 + cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); 1445 + axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, 1446 + cr | XAXIDMA_CR_RUNSTOP_MASK); 1447 + 1448 + axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET); 1449 + axienet_status &= ~XAE_RCW1_RX_MASK; 1450 + axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status); 1451 + 1452 + axienet_status = axienet_ior(lp, XAE_IP_OFFSET); 1453 + if (axienet_status & XAE_INT_RXRJECT_MASK) 1454 + axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK); 1455 + axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK); 1456 + 1457 + /* Sync default options with HW but leave receiver and 1458 + * transmitter disabled.*/ 1459 + axienet_setoptions(ndev, lp->options & 1460 + ~(XAE_OPTION_TXEN | XAE_OPTION_RXEN)); 1461 + axienet_set_mac_address(ndev, NULL); 1462 + axienet_set_multicast_list(ndev); 1463 + axienet_setoptions(ndev, lp->options); 1464 + } 1465 + 1466 + /** 1467 + * axienet_of_probe - Axi Ethernet probe function. 1468 + * @op: Pointer to platform device structure. 1469 + * @match: Pointer to device id structure 1470 + * 1471 + * returns: 0, on success 1472 + * Non-zero error value on failure. 1473 + * 1474 + * This is the probe routine for Axi Ethernet driver. This is called before 1475 + * any other driver routines are invoked. It allocates and sets up the Ethernet 1476 + * device. Parses through device tree and populates fields of 1477 + * axienet_local. It registers the Ethernet device. 1478 + */ 1479 + static int __devinit axienet_of_probe(struct platform_device *op) 1480 + { 1481 + __be32 *p; 1482 + int size, ret = 0; 1483 + struct device_node *np; 1484 + struct axienet_local *lp; 1485 + struct net_device *ndev; 1486 + const void *addr; 1487 + 1488 + ndev = alloc_etherdev(sizeof(*lp)); 1489 + if (!ndev) { 1490 + dev_err(&op->dev, "could not allocate device.\n"); 1491 + return -ENOMEM; 1492 + } 1493 + 1494 + ether_setup(ndev); 1495 + dev_set_drvdata(&op->dev, ndev); 1496 + 1497 + SET_NETDEV_DEV(ndev, &op->dev); 1498 + ndev->flags &= ~IFF_MULTICAST; /* clear multicast */ 1499 + ndev->features = NETIF_F_SG | NETIF_F_FRAGLIST; 1500 + ndev->netdev_ops = &axienet_netdev_ops; 1501 + ndev->ethtool_ops = &axienet_ethtool_ops; 1502 + 1503 + lp = netdev_priv(ndev); 1504 + lp->ndev = ndev; 1505 + lp->dev = &op->dev; 1506 + lp->options = XAE_OPTION_DEFAULTS; 1507 + /* Map device registers */ 1508 + lp->regs = of_iomap(op->dev.of_node, 0); 1509 + if (!lp->regs) { 1510 + dev_err(&op->dev, "could not map Axi Ethernet regs.\n"); 1511 + goto nodev; 1512 + } 1513 + /* Setup checksum offload, but default to off if not specified */ 1514 + lp->features = 0; 1515 + 1516 + p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,txcsum", NULL); 1517 + if (p) { 1518 + switch (be32_to_cpup(p)) { 1519 + case 1: 1520 + lp->csum_offload_on_tx_path = 1521 + XAE_FEATURE_PARTIAL_TX_CSUM; 1522 + lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM; 1523 + /* Can checksum TCP/UDP over IPv4. */ 1524 + ndev->features |= NETIF_F_IP_CSUM; 1525 + break; 1526 + case 2: 1527 + lp->csum_offload_on_tx_path = 1528 + XAE_FEATURE_FULL_TX_CSUM; 1529 + lp->features |= XAE_FEATURE_FULL_TX_CSUM; 1530 + /* Can checksum TCP/UDP over IPv4. */ 1531 + ndev->features |= NETIF_F_IP_CSUM; 1532 + break; 1533 + default: 1534 + lp->csum_offload_on_tx_path = XAE_NO_CSUM_OFFLOAD; 1535 + } 1536 + } 1537 + p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,rxcsum", NULL); 1538 + if (p) { 1539 + switch (be32_to_cpup(p)) { 1540 + case 1: 1541 + lp->csum_offload_on_rx_path = 1542 + XAE_FEATURE_PARTIAL_RX_CSUM; 1543 + lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM; 1544 + break; 1545 + case 2: 1546 + lp->csum_offload_on_rx_path = 1547 + XAE_FEATURE_FULL_RX_CSUM; 1548 + lp->features |= XAE_FEATURE_FULL_RX_CSUM; 1549 + break; 1550 + default: 1551 + lp->csum_offload_on_rx_path = XAE_NO_CSUM_OFFLOAD; 1552 + } 1553 + } 1554 + /* For supporting jumbo frames, the Axi Ethernet hardware must have 1555 + * a larger Rx/Tx Memory. Typically, the size must be more than or 1556 + * equal to 16384 bytes, so that we can enable jumbo option and start 1557 + * supporting jumbo frames. Here we check for memory allocated for 1558 + * Rx/Tx in the hardware from the device-tree and accordingly set 1559 + * flags. */ 1560 + p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,rxmem", NULL); 1561 + if (p) { 1562 + if ((be32_to_cpup(p)) >= 0x4000) 1563 + lp->jumbo_support = 1; 1564 + } 1565 + p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,temac-type", 1566 + NULL); 1567 + if (p) 1568 + lp->temac_type = be32_to_cpup(p); 1569 + p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,phy-type", NULL); 1570 + if (p) 1571 + lp->phy_type = be32_to_cpup(p); 1572 + 1573 + /* Find the DMA node, map the DMA registers, and decode the DMA IRQs */ 1574 + np = of_parse_phandle(op->dev.of_node, "axistream-connected", 0); 1575 + if (!np) { 1576 + dev_err(&op->dev, "could not find DMA node\n"); 1577 + goto err_iounmap; 1578 + } 1579 + lp->dma_regs = of_iomap(np, 0); 1580 + if (lp->dma_regs) { 1581 + dev_dbg(&op->dev, "MEM base: %p\n", lp->dma_regs); 1582 + } else { 1583 + dev_err(&op->dev, "unable to map DMA registers\n"); 1584 + of_node_put(np); 1585 + } 1586 + lp->rx_irq = irq_of_parse_and_map(np, 1); 1587 + lp->tx_irq = irq_of_parse_and_map(np, 0); 1588 + of_node_put(np); 1589 + if ((lp->rx_irq == NO_IRQ) || (lp->tx_irq == NO_IRQ)) { 1590 + dev_err(&op->dev, "could not determine irqs\n"); 1591 + ret = -ENOMEM; 1592 + goto err_iounmap_2; 1593 + } 1594 + 1595 + /* Retrieve the MAC address */ 1596 + addr = of_get_property(op->dev.of_node, "local-mac-address", &size); 1597 + if ((!addr) || (size != 6)) { 1598 + dev_err(&op->dev, "could not find MAC address\n"); 1599 + ret = -ENODEV; 1600 + goto err_iounmap_2; 1601 + } 1602 + axienet_set_mac_address(ndev, (void *) addr); 1603 + 1604 + lp->coalesce_count_rx = XAXIDMA_DFT_RX_THRESHOLD; 1605 + lp->coalesce_count_tx = XAXIDMA_DFT_TX_THRESHOLD; 1606 + 1607 + lp->phy_node = of_parse_phandle(op->dev.of_node, "phy-handle", 0); 1608 + ret = axienet_mdio_setup(lp, op->dev.of_node); 1609 + if (ret) 1610 + dev_warn(&op->dev, "error registering MDIO bus\n"); 1611 + 1612 + ret = register_netdev(lp->ndev); 1613 + if (ret) { 1614 + dev_err(lp->dev, "register_netdev() error (%i)\n", ret); 1615 + goto err_iounmap_2; 1616 + } 1617 + 1618 + tasklet_init(&lp->dma_err_tasklet, axienet_dma_err_handler, 1619 + (unsigned long) lp); 1620 + tasklet_disable(&lp->dma_err_tasklet); 1621 + 1622 + return 0; 1623 + 1624 + err_iounmap_2: 1625 + if (lp->dma_regs) 1626 + iounmap(lp->dma_regs); 1627 + err_iounmap: 1628 + iounmap(lp->regs); 1629 + nodev: 1630 + free_netdev(ndev); 1631 + ndev = NULL; 1632 + return ret; 1633 + } 1634 + 1635 + static int __devexit axienet_of_remove(struct platform_device *op) 1636 + { 1637 + struct net_device *ndev = dev_get_drvdata(&op->dev); 1638 + struct axienet_local *lp = netdev_priv(ndev); 1639 + 1640 + axienet_mdio_teardown(lp); 1641 + unregister_netdev(ndev); 1642 + 1643 + if (lp->phy_node) 1644 + of_node_put(lp->phy_node); 1645 + lp->phy_node = NULL; 1646 + 1647 + dev_set_drvdata(&op->dev, NULL); 1648 + 1649 + iounmap(lp->regs); 1650 + if (lp->dma_regs) 1651 + iounmap(lp->dma_regs); 1652 + free_netdev(ndev); 1653 + 1654 + return 0; 1655 + } 1656 + 1657 + static struct platform_driver axienet_of_driver = { 1658 + .probe = axienet_of_probe, 1659 + .remove = __devexit_p(axienet_of_remove), 1660 + .driver = { 1661 + .owner = THIS_MODULE, 1662 + .name = "xilinx_axienet", 1663 + .of_match_table = axienet_of_match, 1664 + }, 1665 + }; 1666 + 1667 + static int __init axienet_init(void) 1668 + { 1669 + return platform_driver_register(&axienet_of_driver); 1670 + } 1671 + 1672 + static void __exit axienet_exit(void) 1673 + { 1674 + platform_driver_unregister(&axienet_of_driver); 1675 + } 1676 + 1677 + module_init(axienet_init); 1678 + module_exit(axienet_exit); 1679 + 1680 + MODULE_DESCRIPTION("Xilinx Axi Ethernet driver"); 1681 + MODULE_AUTHOR("Xilinx"); 1682 + MODULE_LICENSE("GPL");
+238
drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
··· 1 + /* 2 + * MDIO bus driver for the Xilinx Axi Ethernet device 3 + * 4 + * Copyright (c) 2009 Secret Lab Technologies, Ltd. 5 + * Copyright (c) 2010 Xilinx, Inc. All rights reserved. 6 + * Copyright (c) 2012 Daniel Borkmann, <daniel.borkmann@tik.ee.ethz.ch> 7 + * Copyright (c) 2012 Ariane Keller, <ariane.keller@tik.ee.ethz.ch> 8 + */ 9 + 10 + #include <linux/of_address.h> 11 + #include <linux/of_mdio.h> 12 + #include <linux/jiffies.h> 13 + 14 + #include "xilinx_axienet.h" 15 + 16 + #define MAX_MDIO_FREQ 2500000 /* 2.5 MHz */ 17 + #define DEFAULT_CLOCK_DIVISOR XAE_MDIO_DIV_DFT 18 + 19 + /* Wait till MDIO interface is ready to accept a new transaction.*/ 20 + int axienet_mdio_wait_until_ready(struct axienet_local *lp) 21 + { 22 + long end = jiffies + 2; 23 + while (!(axienet_ior(lp, XAE_MDIO_MCR_OFFSET) & 24 + XAE_MDIO_MCR_READY_MASK)) { 25 + if (end - jiffies <= 0) { 26 + WARN_ON(1); 27 + return -ETIMEDOUT; 28 + } 29 + udelay(1); 30 + } 31 + return 0; 32 + } 33 + 34 + /** 35 + * axienet_mdio_read - MDIO interface read function 36 + * @bus: Pointer to mii bus structure 37 + * @phy_id: Address of the PHY device 38 + * @reg: PHY register to read 39 + * 40 + * returns: The register contents on success, -ETIMEDOUT on a timeout 41 + * 42 + * Reads the contents of the requested register from the requested PHY 43 + * address by first writing the details into MCR register. After a while 44 + * the register MRD is read to obtain the PHY register content. 45 + */ 46 + static int axienet_mdio_read(struct mii_bus *bus, int phy_id, int reg) 47 + { 48 + u32 rc; 49 + int ret; 50 + struct axienet_local *lp = bus->priv; 51 + 52 + ret = axienet_mdio_wait_until_ready(lp); 53 + if (ret < 0) 54 + return ret; 55 + 56 + axienet_iow(lp, XAE_MDIO_MCR_OFFSET, 57 + (((phy_id << XAE_MDIO_MCR_PHYAD_SHIFT) & 58 + XAE_MDIO_MCR_PHYAD_MASK) | 59 + ((reg << XAE_MDIO_MCR_REGAD_SHIFT) & 60 + XAE_MDIO_MCR_REGAD_MASK) | 61 + XAE_MDIO_MCR_INITIATE_MASK | 62 + XAE_MDIO_MCR_OP_READ_MASK)); 63 + 64 + ret = axienet_mdio_wait_until_ready(lp); 65 + if (ret < 0) 66 + return ret; 67 + 68 + rc = axienet_ior(lp, XAE_MDIO_MRD_OFFSET) & 0x0000FFFF; 69 + 70 + dev_dbg(lp->dev, "axienet_mdio_read(phy_id=%i, reg=%x) == %x\n", 71 + phy_id, reg, rc); 72 + 73 + return rc; 74 + } 75 + 76 + /** 77 + * axienet_mdio_write - MDIO interface write function 78 + * @bus: Pointer to mii bus structure 79 + * @phy_id: Address of the PHY device 80 + * @reg: PHY register to write to 81 + * @val: Value to be written into the register 82 + * 83 + * returns: 0 on success, -ETIMEDOUT on a timeout 84 + * 85 + * Writes the value to the requested register by first writing the value 86 + * into MWD register. The the MCR register is then appropriately setup 87 + * to finish the write operation. 88 + */ 89 + static int axienet_mdio_write(struct mii_bus *bus, int phy_id, int reg, 90 + u16 val) 91 + { 92 + int ret; 93 + struct axienet_local *lp = bus->priv; 94 + 95 + dev_dbg(lp->dev, "axienet_mdio_write(phy_id=%i, reg=%x, val=%x)\n", 96 + phy_id, reg, val); 97 + 98 + ret = axienet_mdio_wait_until_ready(lp); 99 + if (ret < 0) 100 + return ret; 101 + 102 + axienet_iow(lp, XAE_MDIO_MWD_OFFSET, (u32) val); 103 + axienet_iow(lp, XAE_MDIO_MCR_OFFSET, 104 + (((phy_id << XAE_MDIO_MCR_PHYAD_SHIFT) & 105 + XAE_MDIO_MCR_PHYAD_MASK) | 106 + ((reg << XAE_MDIO_MCR_REGAD_SHIFT) & 107 + XAE_MDIO_MCR_REGAD_MASK) | 108 + XAE_MDIO_MCR_INITIATE_MASK | 109 + XAE_MDIO_MCR_OP_WRITE_MASK)); 110 + 111 + ret = axienet_mdio_wait_until_ready(lp); 112 + if (ret < 0) 113 + return ret; 114 + return 0; 115 + } 116 + 117 + /** 118 + * axienet_mdio_setup - MDIO setup function 119 + * @lp: Pointer to axienet local data structure. 120 + * @np: Pointer to device node 121 + * 122 + * returns: 0 on success, -ETIMEDOUT on a timeout, -ENOMEM when 123 + * mdiobus_alloc (to allocate memory for mii bus structure) fails. 124 + * 125 + * Sets up the MDIO interface by initializing the MDIO clock and enabling the 126 + * MDIO interface in hardware. Register the MDIO interface. 127 + **/ 128 + int axienet_mdio_setup(struct axienet_local *lp, struct device_node *np) 129 + { 130 + int ret; 131 + u32 clk_div, host_clock; 132 + u32 *property_p; 133 + struct mii_bus *bus; 134 + struct resource res; 135 + struct device_node *np1; 136 + 137 + /* clk_div can be calculated by deriving it from the equation: 138 + * fMDIO = fHOST / ((1 + clk_div) * 2) 139 + * 140 + * Where fMDIO <= 2500000, so we get: 141 + * fHOST / ((1 + clk_div) * 2) <= 2500000 142 + * 143 + * Then we get: 144 + * 1 / ((1 + clk_div) * 2) <= (2500000 / fHOST) 145 + * 146 + * Then we get: 147 + * 1 / (1 + clk_div) <= ((2500000 * 2) / fHOST) 148 + * 149 + * Then we get: 150 + * 1 / (1 + clk_div) <= (5000000 / fHOST) 151 + * 152 + * So: 153 + * (1 + clk_div) >= (fHOST / 5000000) 154 + * 155 + * And finally: 156 + * clk_div >= (fHOST / 5000000) - 1 157 + * 158 + * fHOST can be read from the flattened device tree as property 159 + * "clock-frequency" from the CPU 160 + */ 161 + 162 + np1 = of_find_node_by_name(NULL, "cpu"); 163 + if (!np1) { 164 + printk(KERN_WARNING "%s(): Could not find CPU device node.", 165 + __func__); 166 + printk(KERN_WARNING "Setting MDIO clock divisor to " 167 + "default %d\n", DEFAULT_CLOCK_DIVISOR); 168 + clk_div = DEFAULT_CLOCK_DIVISOR; 169 + goto issue; 170 + } 171 + property_p = (u32 *) of_get_property(np1, "clock-frequency", NULL); 172 + if (!property_p) { 173 + printk(KERN_WARNING "%s(): Could not find CPU property: " 174 + "clock-frequency.", __func__); 175 + printk(KERN_WARNING "Setting MDIO clock divisor to " 176 + "default %d\n", DEFAULT_CLOCK_DIVISOR); 177 + clk_div = DEFAULT_CLOCK_DIVISOR; 178 + goto issue; 179 + } 180 + 181 + host_clock = be32_to_cpup(property_p); 182 + clk_div = (host_clock / (MAX_MDIO_FREQ * 2)) - 1; 183 + /* If there is any remainder from the division of 184 + * fHOST / (MAX_MDIO_FREQ * 2), then we need to add 185 + * 1 to the clock divisor or we will surely be above 2.5 MHz */ 186 + if (host_clock % (MAX_MDIO_FREQ * 2)) 187 + clk_div++; 188 + 189 + printk(KERN_DEBUG "%s(): Setting MDIO clock divisor to %u based " 190 + "on %u Hz host clock.\n", __func__, clk_div, host_clock); 191 + 192 + of_node_put(np1); 193 + issue: 194 + axienet_iow(lp, XAE_MDIO_MC_OFFSET, 195 + (((u32) clk_div) | XAE_MDIO_MC_MDIOEN_MASK)); 196 + 197 + ret = axienet_mdio_wait_until_ready(lp); 198 + if (ret < 0) 199 + return ret; 200 + 201 + bus = mdiobus_alloc(); 202 + if (!bus) 203 + return -ENOMEM; 204 + 205 + np1 = of_get_parent(lp->phy_node); 206 + of_address_to_resource(np1, 0, &res); 207 + snprintf(bus->id, MII_BUS_ID_SIZE, "%.8llx", 208 + (unsigned long long) res.start); 209 + 210 + bus->priv = lp; 211 + bus->name = "Xilinx Axi Ethernet MDIO"; 212 + bus->read = axienet_mdio_read; 213 + bus->write = axienet_mdio_write; 214 + bus->parent = lp->dev; 215 + bus->irq = lp->mdio_irqs; /* preallocated IRQ table */ 216 + lp->mii_bus = bus; 217 + 218 + ret = of_mdiobus_register(bus, np1); 219 + if (ret) { 220 + mdiobus_free(bus); 221 + return ret; 222 + } 223 + return 0; 224 + } 225 + 226 + /** 227 + * axienet_mdio_teardown - MDIO remove function 228 + * @lp: Pointer to axienet local data structure. 229 + * 230 + * Unregisters the MDIO and frees any associate memory for mii bus. 231 + */ 232 + void axienet_mdio_teardown(struct axienet_local *lp) 233 + { 234 + mdiobus_unregister(lp->mii_bus); 235 + kfree(lp->mii_bus->irq); 236 + mdiobus_free(lp->mii_bus); 237 + lp->mii_bus = NULL; 238 + }