Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: emac: emac gigabit ethernet controller driver

Add support for the Qualcomm Technologies, Inc. EMAC gigabit Ethernet
controller.

This driver supports the following features:
1) Checksum offload.
2) Interrupt coalescing support.
3) SGMII phy.
4) phylib interface for external phy

Based on original work by
Niranjana Vishwanathapura <nvishwan@codeaurora.org>
Gilad Avidov <gavidov@codeaurora.org>

Signed-off-by: Timur Tabi <timur@codeaurora.org>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Timur Tabi and committed by
David S. Miller
b9b17deb 04bed143

+3974
+111
Documentation/devicetree/bindings/net/qcom-emac.txt
··· 1 + Qualcomm Technologies EMAC Gigabit Ethernet Controller 2 + 3 + This network controller consists of two devices: a MAC and an SGMII 4 + internal PHY. Each device is represented by a device tree node. A phandle 5 + connects the MAC node to its corresponding internal phy node. Another 6 + phandle points to the external PHY node. 7 + 8 + Required properties: 9 + 10 + MAC node: 11 + - compatible : Should be "qcom,fsm9900-emac". 12 + - reg : Offset and length of the register regions for the device 13 + - interrupts : Interrupt number used by this controller 14 + - mac-address : The 6-byte MAC address. If present, it is the default 15 + MAC address. 16 + - internal-phy : phandle to the internal PHY node 17 + - phy-handle : phandle the the external PHY node 18 + 19 + Internal PHY node: 20 + - compatible : Should be "qcom,fsm9900-emac-sgmii" or "qcom,qdf2432-emac-sgmii". 21 + - reg : Offset and length of the register region(s) for the device 22 + - interrupts : Interrupt number used by this controller 23 + 24 + The external phy child node: 25 + - reg : The phy address 26 + 27 + Example: 28 + 29 + FSM9900: 30 + 31 + soc { 32 + #address-cells = <1>; 33 + #size-cells = <1>; 34 + 35 + emac0: ethernet@feb20000 { 36 + compatible = "qcom,fsm9900-emac"; 37 + reg = <0xfeb20000 0x10000>, 38 + <0xfeb36000 0x1000>; 39 + interrupts = <76>; 40 + 41 + clocks = <&gcc 0>, <&gcc 1>, <&gcc 3>, <&gcc 4>, <&gcc 5>, 42 + <&gcc 6>, <&gcc 7>; 43 + clock-names = "axi_clk", "cfg_ahb_clk", "high_speed_clk", 44 + "mdio_clk", "tx_clk", "rx_clk", "sys_clk"; 45 + 46 + internal-phy = <&emac_sgmii>; 47 + 48 + phy-handle = <&phy0>; 49 + 50 + #address-cells = <1>; 51 + #size-cells = <0>; 52 + phy0: ethernet-phy@0 { 53 + reg = <0>; 54 + }; 55 + 56 + pinctrl-names = "default"; 57 + pinctrl-0 = <&mdio_pins_a>; 58 + }; 59 + 60 + emac_sgmii: ethernet@feb38000 { 61 + compatible = "qcom,fsm9900-emac-sgmii"; 62 + reg = <0xfeb38000 0x1000>; 63 + interrupts = <80>; 64 + }; 65 + 66 + tlmm: pinctrl@fd510000 { 67 + compatible = "qcom,fsm9900-pinctrl"; 68 + 69 + mdio_pins_a: mdio { 70 + state { 71 + pins = "gpio123", "gpio124"; 72 + function = "mdio"; 73 + }; 74 + }; 75 + }; 76 + 77 + 78 + QDF2432: 79 + 80 + soc { 81 + #address-cells = <2>; 82 + #size-cells = <2>; 83 + 84 + emac0: ethernet@38800000 { 85 + compatible = "qcom,fsm9900-emac"; 86 + reg = <0x0 0x38800000 0x0 0x10000>, 87 + <0x0 0x38816000 0x0 0x1000>; 88 + interrupts = <0 256 4>; 89 + 90 + clocks = <&gcc 0>, <&gcc 1>, <&gcc 3>, <&gcc 4>, <&gcc 5>, 91 + <&gcc 6>, <&gcc 7>; 92 + clock-names = "axi_clk", "cfg_ahb_clk", "high_speed_clk", 93 + "mdio_clk", "tx_clk", "rx_clk", "sys_clk"; 94 + 95 + internal-phy = <&emac_sgmii>; 96 + 97 + phy-handle = <&phy0>; 98 + 99 + #address-cells = <1>; 100 + #size-cells = <0>; 101 + phy0: ethernet-phy@4 { 102 + reg = <4>; 103 + }; 104 + }; 105 + 106 + emac_sgmii: ethernet@410400 { 107 + compatible = "qcom,qdf2432-emac-sgmii"; 108 + reg = <0x0 0x00410400 0x0 0xc00>, /* Base address */ 109 + <0x0 0x00410000 0x0 0x400>; /* Per-lane digital */ 110 + interrupts = <0 254 1>; 111 + };
+6
MAINTAINERS
··· 9696 9696 S: Supported 9697 9697 F: drivers/net/wireless/ath/ath10k/ 9698 9698 9699 + QUALCOMM EMAC GIGABIT ETHERNET DRIVER 9700 + M: Timur Tabi <timur@codeaurora.org> 9701 + L: netdev@vger.kernel.org 9702 + S: Supported 9703 + F: drivers/net/ethernet/qualcomm/emac/ 9704 + 9699 9705 QUALCOMM HEXAGON ARCHITECTURE 9700 9706 M: Richard Kuo <rkuo@codeaurora.org> 9701 9707 L: linux-hexagon@vger.kernel.org
+12
drivers/net/ethernet/qualcomm/Kconfig
··· 24 24 To compile this driver as a module, choose M here. The module 25 25 will be called qcaspi. 26 26 27 + config QCOM_EMAC 28 + tristate "Qualcomm Technologies, Inc. EMAC Gigabit Ethernet support" 29 + select CRC32 30 + select PHYLIB 31 + ---help--- 32 + This driver supports the Qualcomm Technologies, Inc. Gigabit 33 + Ethernet Media Access Controller (EMAC). The controller 34 + supports IEEE 802.3-2002, half-duplex mode at 10/100 Mb/s, 35 + full-duplex mode at 10/100/1000Mb/s, Wake On LAN (WOL) for 36 + low power, Receive-Side Scaling (RSS), and IEEE 1588-2008 37 + Precision Clock Synchronization Protocol. 38 + 27 39 endif # NET_VENDOR_QUALCOMM
+2
drivers/net/ethernet/qualcomm/Makefile
··· 4 4 5 5 obj-$(CONFIG_QCA7000) += qcaspi.o 6 6 qcaspi-objs := qca_spi.o qca_framing.o qca_7k.o qca_debug.o 7 + 8 + obj-y += emac/
+7
drivers/net/ethernet/qualcomm/emac/Makefile
··· 1 + # 2 + # Makefile for the Qualcomm Technologies, Inc. EMAC Gigabit Ethernet driver 3 + # 4 + 5 + obj-$(CONFIG_QCOM_EMAC) += qcom-emac.o 6 + 7 + qcom-emac-objs := emac.o emac-mac.o emac-phy.o emac-sgmii.o
+1528
drivers/net/ethernet/qualcomm/emac/emac-mac.c
··· 1 + /* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved. 2 + * 3 + * This program is free software; you can redistribute it and/or modify 4 + * it under the terms of the GNU General Public License version 2 and 5 + * only version 2 as published by the Free Software Foundation. 6 + * 7 + * This program is distributed in the hope that it will be useful, 8 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 9 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 10 + * GNU General Public License for more details. 11 + */ 12 + 13 + /* Qualcomm Technologies, Inc. EMAC Ethernet Controller MAC layer support 14 + */ 15 + 16 + #include <linux/tcp.h> 17 + #include <linux/ip.h> 18 + #include <linux/ipv6.h> 19 + #include <linux/crc32.h> 20 + #include <linux/if_vlan.h> 21 + #include <linux/jiffies.h> 22 + #include <linux/phy.h> 23 + #include <linux/of.h> 24 + #include <net/ip6_checksum.h> 25 + #include "emac.h" 26 + #include "emac-sgmii.h" 27 + 28 + /* EMAC base register offsets */ 29 + #define EMAC_MAC_CTRL 0x001480 30 + #define EMAC_WOL_CTRL0 0x0014a0 31 + #define EMAC_RSS_KEY0 0x0014b0 32 + #define EMAC_H1TPD_BASE_ADDR_LO 0x0014e0 33 + #define EMAC_H2TPD_BASE_ADDR_LO 0x0014e4 34 + #define EMAC_H3TPD_BASE_ADDR_LO 0x0014e8 35 + #define EMAC_INTER_SRAM_PART9 0x001534 36 + #define EMAC_DESC_CTRL_0 0x001540 37 + #define EMAC_DESC_CTRL_1 0x001544 38 + #define EMAC_DESC_CTRL_2 0x001550 39 + #define EMAC_DESC_CTRL_10 0x001554 40 + #define EMAC_DESC_CTRL_12 0x001558 41 + #define EMAC_DESC_CTRL_13 0x00155c 42 + #define EMAC_DESC_CTRL_3 0x001560 43 + #define EMAC_DESC_CTRL_4 0x001564 44 + #define EMAC_DESC_CTRL_5 0x001568 45 + #define EMAC_DESC_CTRL_14 0x00156c 46 + #define EMAC_DESC_CTRL_15 0x001570 47 + #define EMAC_DESC_CTRL_16 0x001574 48 + #define EMAC_DESC_CTRL_6 0x001578 49 + #define EMAC_DESC_CTRL_8 0x001580 50 + #define EMAC_DESC_CTRL_9 0x001584 51 + #define EMAC_DESC_CTRL_11 0x001588 52 + #define EMAC_TXQ_CTRL_0 0x001590 53 + #define EMAC_TXQ_CTRL_1 0x001594 54 + #define EMAC_TXQ_CTRL_2 0x001598 55 + #define EMAC_RXQ_CTRL_0 0x0015a0 56 + #define EMAC_RXQ_CTRL_1 0x0015a4 57 + #define EMAC_RXQ_CTRL_2 0x0015a8 58 + #define EMAC_RXQ_CTRL_3 0x0015ac 59 + #define EMAC_BASE_CPU_NUMBER 0x0015b8 60 + #define EMAC_DMA_CTRL 0x0015c0 61 + #define EMAC_MAILBOX_0 0x0015e0 62 + #define EMAC_MAILBOX_5 0x0015e4 63 + #define EMAC_MAILBOX_6 0x0015e8 64 + #define EMAC_MAILBOX_13 0x0015ec 65 + #define EMAC_MAILBOX_2 0x0015f4 66 + #define EMAC_MAILBOX_3 0x0015f8 67 + #define EMAC_MAILBOX_11 0x00160c 68 + #define EMAC_AXI_MAST_CTRL 0x001610 69 + #define EMAC_MAILBOX_12 0x001614 70 + #define EMAC_MAILBOX_9 0x001618 71 + #define EMAC_MAILBOX_10 0x00161c 72 + #define EMAC_ATHR_HEADER_CTRL 0x001620 73 + #define EMAC_CLK_GATE_CTRL 0x001814 74 + #define EMAC_MISC_CTRL 0x001990 75 + #define EMAC_MAILBOX_7 0x0019e0 76 + #define EMAC_MAILBOX_8 0x0019e4 77 + #define EMAC_MAILBOX_15 0x001bd4 78 + #define EMAC_MAILBOX_16 0x001bd8 79 + 80 + /* EMAC_MAC_CTRL */ 81 + #define SINGLE_PAUSE_MODE 0x10000000 82 + #define DEBUG_MODE 0x08000000 83 + #define BROAD_EN 0x04000000 84 + #define MULTI_ALL 0x02000000 85 + #define RX_CHKSUM_EN 0x01000000 86 + #define HUGE 0x00800000 87 + #define SPEED(x) (((x) & 0x3) << 20) 88 + #define SPEED_MASK SPEED(0x3) 89 + #define SIMR 0x00080000 90 + #define TPAUSE 0x00010000 91 + #define PROM_MODE 0x00008000 92 + #define VLAN_STRIP 0x00004000 93 + #define PRLEN_BMSK 0x00003c00 94 + #define PRLEN_SHFT 10 95 + #define HUGEN 0x00000200 96 + #define FLCHK 0x00000100 97 + #define PCRCE 0x00000080 98 + #define CRCE 0x00000040 99 + #define FULLD 0x00000020 100 + #define MAC_LP_EN 0x00000010 101 + #define RXFC 0x00000008 102 + #define TXFC 0x00000004 103 + #define RXEN 0x00000002 104 + #define TXEN 0x00000001 105 + 106 + 107 + /* EMAC_WOL_CTRL0 */ 108 + #define LK_CHG_PME 0x20 109 + #define LK_CHG_EN 0x10 110 + #define MG_FRAME_PME 0x8 111 + #define MG_FRAME_EN 0x4 112 + #define WK_FRAME_EN 0x1 113 + 114 + /* EMAC_DESC_CTRL_3 */ 115 + #define RFD_RING_SIZE_BMSK 0xfff 116 + 117 + /* EMAC_DESC_CTRL_4 */ 118 + #define RX_BUFFER_SIZE_BMSK 0xffff 119 + 120 + /* EMAC_DESC_CTRL_6 */ 121 + #define RRD_RING_SIZE_BMSK 0xfff 122 + 123 + /* EMAC_DESC_CTRL_9 */ 124 + #define TPD_RING_SIZE_BMSK 0xffff 125 + 126 + /* EMAC_TXQ_CTRL_0 */ 127 + #define NUM_TXF_BURST_PREF_BMSK 0xffff0000 128 + #define NUM_TXF_BURST_PREF_SHFT 16 129 + #define LS_8023_SP 0x80 130 + #define TXQ_MODE 0x40 131 + #define TXQ_EN 0x20 132 + #define IP_OP_SP 0x10 133 + #define NUM_TPD_BURST_PREF_BMSK 0xf 134 + #define NUM_TPD_BURST_PREF_SHFT 0 135 + 136 + /* EMAC_TXQ_CTRL_1 */ 137 + #define JUMBO_TASK_OFFLOAD_THRESHOLD_BMSK 0x7ff 138 + 139 + /* EMAC_TXQ_CTRL_2 */ 140 + #define TXF_HWM_BMSK 0xfff0000 141 + #define TXF_LWM_BMSK 0xfff 142 + 143 + /* EMAC_RXQ_CTRL_0 */ 144 + #define RXQ_EN BIT(31) 145 + #define CUT_THRU_EN BIT(30) 146 + #define RSS_HASH_EN BIT(29) 147 + #define NUM_RFD_BURST_PREF_BMSK 0x3f00000 148 + #define NUM_RFD_BURST_PREF_SHFT 20 149 + #define IDT_TABLE_SIZE_BMSK 0x1ff00 150 + #define IDT_TABLE_SIZE_SHFT 8 151 + #define SP_IPV6 0x80 152 + 153 + /* EMAC_RXQ_CTRL_1 */ 154 + #define JUMBO_1KAH_BMSK 0xf000 155 + #define JUMBO_1KAH_SHFT 12 156 + #define RFD_PREF_LOW_TH 0x10 157 + #define RFD_PREF_LOW_THRESHOLD_BMSK 0xfc0 158 + #define RFD_PREF_LOW_THRESHOLD_SHFT 6 159 + #define RFD_PREF_UP_TH 0x10 160 + #define RFD_PREF_UP_THRESHOLD_BMSK 0x3f 161 + #define RFD_PREF_UP_THRESHOLD_SHFT 0 162 + 163 + /* EMAC_RXQ_CTRL_2 */ 164 + #define RXF_DOF_THRESFHOLD 0x1a0 165 + #define RXF_DOF_THRESHOLD_BMSK 0xfff0000 166 + #define RXF_DOF_THRESHOLD_SHFT 16 167 + #define RXF_UOF_THRESFHOLD 0xbe 168 + #define RXF_UOF_THRESHOLD_BMSK 0xfff 169 + #define RXF_UOF_THRESHOLD_SHFT 0 170 + 171 + /* EMAC_RXQ_CTRL_3 */ 172 + #define RXD_TIMER_BMSK 0xffff0000 173 + #define RXD_THRESHOLD_BMSK 0xfff 174 + #define RXD_THRESHOLD_SHFT 0 175 + 176 + /* EMAC_DMA_CTRL */ 177 + #define DMAW_DLY_CNT_BMSK 0xf0000 178 + #define DMAW_DLY_CNT_SHFT 16 179 + #define DMAR_DLY_CNT_BMSK 0xf800 180 + #define DMAR_DLY_CNT_SHFT 11 181 + #define DMAR_REQ_PRI 0x400 182 + #define REGWRBLEN_BMSK 0x380 183 + #define REGWRBLEN_SHFT 7 184 + #define REGRDBLEN_BMSK 0x70 185 + #define REGRDBLEN_SHFT 4 186 + #define OUT_ORDER_MODE 0x4 187 + #define ENH_ORDER_MODE 0x2 188 + #define IN_ORDER_MODE 0x1 189 + 190 + /* EMAC_MAILBOX_13 */ 191 + #define RFD3_PROC_IDX_BMSK 0xfff0000 192 + #define RFD3_PROC_IDX_SHFT 16 193 + #define RFD3_PROD_IDX_BMSK 0xfff 194 + #define RFD3_PROD_IDX_SHFT 0 195 + 196 + /* EMAC_MAILBOX_2 */ 197 + #define NTPD_CONS_IDX_BMSK 0xffff0000 198 + #define NTPD_CONS_IDX_SHFT 16 199 + 200 + /* EMAC_MAILBOX_3 */ 201 + #define RFD0_CONS_IDX_BMSK 0xfff 202 + #define RFD0_CONS_IDX_SHFT 0 203 + 204 + /* EMAC_MAILBOX_11 */ 205 + #define H3TPD_PROD_IDX_BMSK 0xffff0000 206 + #define H3TPD_PROD_IDX_SHFT 16 207 + 208 + /* EMAC_AXI_MAST_CTRL */ 209 + #define DATA_BYTE_SWAP 0x8 210 + #define MAX_BOUND 0x2 211 + #define MAX_BTYPE 0x1 212 + 213 + /* EMAC_MAILBOX_12 */ 214 + #define H3TPD_CONS_IDX_BMSK 0xffff0000 215 + #define H3TPD_CONS_IDX_SHFT 16 216 + 217 + /* EMAC_MAILBOX_9 */ 218 + #define H2TPD_PROD_IDX_BMSK 0xffff 219 + #define H2TPD_PROD_IDX_SHFT 0 220 + 221 + /* EMAC_MAILBOX_10 */ 222 + #define H1TPD_CONS_IDX_BMSK 0xffff0000 223 + #define H1TPD_CONS_IDX_SHFT 16 224 + #define H2TPD_CONS_IDX_BMSK 0xffff 225 + #define H2TPD_CONS_IDX_SHFT 0 226 + 227 + /* EMAC_ATHR_HEADER_CTRL */ 228 + #define HEADER_CNT_EN 0x2 229 + #define HEADER_ENABLE 0x1 230 + 231 + /* EMAC_MAILBOX_0 */ 232 + #define RFD0_PROC_IDX_BMSK 0xfff0000 233 + #define RFD0_PROC_IDX_SHFT 16 234 + #define RFD0_PROD_IDX_BMSK 0xfff 235 + #define RFD0_PROD_IDX_SHFT 0 236 + 237 + /* EMAC_MAILBOX_5 */ 238 + #define RFD1_PROC_IDX_BMSK 0xfff0000 239 + #define RFD1_PROC_IDX_SHFT 16 240 + #define RFD1_PROD_IDX_BMSK 0xfff 241 + #define RFD1_PROD_IDX_SHFT 0 242 + 243 + /* EMAC_MISC_CTRL */ 244 + #define RX_UNCPL_INT_EN 0x1 245 + 246 + /* EMAC_MAILBOX_7 */ 247 + #define RFD2_CONS_IDX_BMSK 0xfff0000 248 + #define RFD2_CONS_IDX_SHFT 16 249 + #define RFD1_CONS_IDX_BMSK 0xfff 250 + #define RFD1_CONS_IDX_SHFT 0 251 + 252 + /* EMAC_MAILBOX_8 */ 253 + #define RFD3_CONS_IDX_BMSK 0xfff 254 + #define RFD3_CONS_IDX_SHFT 0 255 + 256 + /* EMAC_MAILBOX_15 */ 257 + #define NTPD_PROD_IDX_BMSK 0xffff 258 + #define NTPD_PROD_IDX_SHFT 0 259 + 260 + /* EMAC_MAILBOX_16 */ 261 + #define H1TPD_PROD_IDX_BMSK 0xffff 262 + #define H1TPD_PROD_IDX_SHFT 0 263 + 264 + #define RXQ0_RSS_HSTYP_IPV6_TCP_EN 0x20 265 + #define RXQ0_RSS_HSTYP_IPV6_EN 0x10 266 + #define RXQ0_RSS_HSTYP_IPV4_TCP_EN 0x8 267 + #define RXQ0_RSS_HSTYP_IPV4_EN 0x4 268 + 269 + /* EMAC_EMAC_WRAPPER_TX_TS_INX */ 270 + #define EMAC_WRAPPER_TX_TS_EMPTY BIT(31) 271 + #define EMAC_WRAPPER_TX_TS_INX_BMSK 0xffff 272 + 273 + struct emac_skb_cb { 274 + u32 tpd_idx; 275 + unsigned long jiffies; 276 + }; 277 + 278 + #define EMAC_SKB_CB(skb) ((struct emac_skb_cb *)(skb)->cb) 279 + #define EMAC_RSS_IDT_SIZE 256 280 + #define JUMBO_1KAH 0x4 281 + #define RXD_TH 0x100 282 + #define EMAC_TPD_LAST_FRAGMENT 0x80000000 283 + #define EMAC_TPD_TSTAMP_SAVE 0x80000000 284 + 285 + /* EMAC Errors in emac_rrd.word[3] */ 286 + #define EMAC_RRD_L4F BIT(14) 287 + #define EMAC_RRD_IPF BIT(15) 288 + #define EMAC_RRD_CRC BIT(21) 289 + #define EMAC_RRD_FAE BIT(22) 290 + #define EMAC_RRD_TRN BIT(23) 291 + #define EMAC_RRD_RNT BIT(24) 292 + #define EMAC_RRD_INC BIT(25) 293 + #define EMAC_RRD_FOV BIT(29) 294 + #define EMAC_RRD_LEN BIT(30) 295 + 296 + /* Error bits that will result in a received frame being discarded */ 297 + #define EMAC_RRD_ERROR (EMAC_RRD_IPF | EMAC_RRD_CRC | EMAC_RRD_FAE | \ 298 + EMAC_RRD_TRN | EMAC_RRD_RNT | EMAC_RRD_INC | \ 299 + EMAC_RRD_FOV | EMAC_RRD_LEN) 300 + #define EMAC_RRD_STATS_DW_IDX 3 301 + 302 + #define EMAC_RRD(RXQ, SIZE, IDX) ((RXQ)->rrd.v_addr + (SIZE * (IDX))) 303 + #define EMAC_RFD(RXQ, SIZE, IDX) ((RXQ)->rfd.v_addr + (SIZE * (IDX))) 304 + #define EMAC_TPD(TXQ, SIZE, IDX) ((TXQ)->tpd.v_addr + (SIZE * (IDX))) 305 + 306 + #define GET_RFD_BUFFER(RXQ, IDX) (&((RXQ)->rfd.rfbuff[(IDX)])) 307 + #define GET_TPD_BUFFER(RTQ, IDX) (&((RTQ)->tpd.tpbuff[(IDX)])) 308 + 309 + #define EMAC_TX_POLL_HWTXTSTAMP_THRESHOLD 8 310 + 311 + #define ISR_RX_PKT (\ 312 + RX_PKT_INT0 |\ 313 + RX_PKT_INT1 |\ 314 + RX_PKT_INT2 |\ 315 + RX_PKT_INT3) 316 + 317 + #define EMAC_MAC_IRQ_RES "core0" 318 + 319 + void emac_mac_multicast_addr_set(struct emac_adapter *adpt, u8 *addr) 320 + { 321 + u32 crc32, bit, reg, mta; 322 + 323 + /* Calculate the CRC of the MAC address */ 324 + crc32 = ether_crc(ETH_ALEN, addr); 325 + 326 + /* The HASH Table is an array of 2 32-bit registers. It is 327 + * treated like an array of 64 bits (BitArray[hash_value]). 328 + * Use the upper 6 bits of the above CRC as the hash value. 329 + */ 330 + reg = (crc32 >> 31) & 0x1; 331 + bit = (crc32 >> 26) & 0x1F; 332 + 333 + mta = readl(adpt->base + EMAC_HASH_TAB_REG0 + (reg << 2)); 334 + mta |= BIT(bit); 335 + writel(mta, adpt->base + EMAC_HASH_TAB_REG0 + (reg << 2)); 336 + } 337 + 338 + void emac_mac_multicast_addr_clear(struct emac_adapter *adpt) 339 + { 340 + writel(0, adpt->base + EMAC_HASH_TAB_REG0); 341 + writel(0, adpt->base + EMAC_HASH_TAB_REG1); 342 + } 343 + 344 + /* definitions for RSS */ 345 + #define EMAC_RSS_KEY(_i, _type) \ 346 + (EMAC_RSS_KEY0 + ((_i) * sizeof(_type))) 347 + #define EMAC_RSS_TBL(_i, _type) \ 348 + (EMAC_IDT_TABLE0 + ((_i) * sizeof(_type))) 349 + 350 + /* Config MAC modes */ 351 + void emac_mac_mode_config(struct emac_adapter *adpt) 352 + { 353 + struct net_device *netdev = adpt->netdev; 354 + u32 mac; 355 + 356 + mac = readl(adpt->base + EMAC_MAC_CTRL); 357 + mac &= ~(VLAN_STRIP | PROM_MODE | MULTI_ALL | MAC_LP_EN); 358 + 359 + if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) 360 + mac |= VLAN_STRIP; 361 + 362 + if (netdev->flags & IFF_PROMISC) 363 + mac |= PROM_MODE; 364 + 365 + if (netdev->flags & IFF_ALLMULTI) 366 + mac |= MULTI_ALL; 367 + 368 + writel(mac, adpt->base + EMAC_MAC_CTRL); 369 + } 370 + 371 + /* Config descriptor rings */ 372 + static void emac_mac_dma_rings_config(struct emac_adapter *adpt) 373 + { 374 + static const unsigned short tpd_q_offset[] = { 375 + EMAC_DESC_CTRL_8, EMAC_H1TPD_BASE_ADDR_LO, 376 + EMAC_H2TPD_BASE_ADDR_LO, EMAC_H3TPD_BASE_ADDR_LO}; 377 + static const unsigned short rfd_q_offset[] = { 378 + EMAC_DESC_CTRL_2, EMAC_DESC_CTRL_10, 379 + EMAC_DESC_CTRL_12, EMAC_DESC_CTRL_13}; 380 + static const unsigned short rrd_q_offset[] = { 381 + EMAC_DESC_CTRL_5, EMAC_DESC_CTRL_14, 382 + EMAC_DESC_CTRL_15, EMAC_DESC_CTRL_16}; 383 + 384 + /* TPD (Transmit Packet Descriptor) */ 385 + writel(upper_32_bits(adpt->tx_q.tpd.dma_addr), 386 + adpt->base + EMAC_DESC_CTRL_1); 387 + 388 + writel(lower_32_bits(adpt->tx_q.tpd.dma_addr), 389 + adpt->base + tpd_q_offset[0]); 390 + 391 + writel(adpt->tx_q.tpd.count & TPD_RING_SIZE_BMSK, 392 + adpt->base + EMAC_DESC_CTRL_9); 393 + 394 + /* RFD (Receive Free Descriptor) & RRD (Receive Return Descriptor) */ 395 + writel(upper_32_bits(adpt->rx_q.rfd.dma_addr), 396 + adpt->base + EMAC_DESC_CTRL_0); 397 + 398 + writel(lower_32_bits(adpt->rx_q.rfd.dma_addr), 399 + adpt->base + rfd_q_offset[0]); 400 + writel(lower_32_bits(adpt->rx_q.rrd.dma_addr), 401 + adpt->base + rrd_q_offset[0]); 402 + 403 + writel(adpt->rx_q.rfd.count & RFD_RING_SIZE_BMSK, 404 + adpt->base + EMAC_DESC_CTRL_3); 405 + writel(adpt->rx_q.rrd.count & RRD_RING_SIZE_BMSK, 406 + adpt->base + EMAC_DESC_CTRL_6); 407 + 408 + writel(adpt->rxbuf_size & RX_BUFFER_SIZE_BMSK, 409 + adpt->base + EMAC_DESC_CTRL_4); 410 + 411 + writel(0, adpt->base + EMAC_DESC_CTRL_11); 412 + 413 + /* Load all of the base addresses above and ensure that triggering HW to 414 + * read ring pointers is flushed 415 + */ 416 + writel(1, adpt->base + EMAC_INTER_SRAM_PART9); 417 + } 418 + 419 + /* Config transmit parameters */ 420 + static void emac_mac_tx_config(struct emac_adapter *adpt) 421 + { 422 + u32 val; 423 + 424 + writel((EMAC_MAX_TX_OFFLOAD_THRESH >> 3) & 425 + JUMBO_TASK_OFFLOAD_THRESHOLD_BMSK, adpt->base + EMAC_TXQ_CTRL_1); 426 + 427 + val = (adpt->tpd_burst << NUM_TPD_BURST_PREF_SHFT) & 428 + NUM_TPD_BURST_PREF_BMSK; 429 + 430 + val |= TXQ_MODE | LS_8023_SP; 431 + val |= (0x0100 << NUM_TXF_BURST_PREF_SHFT) & 432 + NUM_TXF_BURST_PREF_BMSK; 433 + 434 + writel(val, adpt->base + EMAC_TXQ_CTRL_0); 435 + emac_reg_update32(adpt->base + EMAC_TXQ_CTRL_2, 436 + (TXF_HWM_BMSK | TXF_LWM_BMSK), 0); 437 + } 438 + 439 + /* Config receive parameters */ 440 + static void emac_mac_rx_config(struct emac_adapter *adpt) 441 + { 442 + u32 val; 443 + 444 + val = (adpt->rfd_burst << NUM_RFD_BURST_PREF_SHFT) & 445 + NUM_RFD_BURST_PREF_BMSK; 446 + val |= (SP_IPV6 | CUT_THRU_EN); 447 + 448 + writel(val, adpt->base + EMAC_RXQ_CTRL_0); 449 + 450 + val = readl(adpt->base + EMAC_RXQ_CTRL_1); 451 + val &= ~(JUMBO_1KAH_BMSK | RFD_PREF_LOW_THRESHOLD_BMSK | 452 + RFD_PREF_UP_THRESHOLD_BMSK); 453 + val |= (JUMBO_1KAH << JUMBO_1KAH_SHFT) | 454 + (RFD_PREF_LOW_TH << RFD_PREF_LOW_THRESHOLD_SHFT) | 455 + (RFD_PREF_UP_TH << RFD_PREF_UP_THRESHOLD_SHFT); 456 + writel(val, adpt->base + EMAC_RXQ_CTRL_1); 457 + 458 + val = readl(adpt->base + EMAC_RXQ_CTRL_2); 459 + val &= ~(RXF_DOF_THRESHOLD_BMSK | RXF_UOF_THRESHOLD_BMSK); 460 + val |= (RXF_DOF_THRESFHOLD << RXF_DOF_THRESHOLD_SHFT) | 461 + (RXF_UOF_THRESFHOLD << RXF_UOF_THRESHOLD_SHFT); 462 + writel(val, adpt->base + EMAC_RXQ_CTRL_2); 463 + 464 + val = readl(adpt->base + EMAC_RXQ_CTRL_3); 465 + val &= ~(RXD_TIMER_BMSK | RXD_THRESHOLD_BMSK); 466 + val |= RXD_TH << RXD_THRESHOLD_SHFT; 467 + writel(val, adpt->base + EMAC_RXQ_CTRL_3); 468 + } 469 + 470 + /* Config dma */ 471 + static void emac_mac_dma_config(struct emac_adapter *adpt) 472 + { 473 + u32 dma_ctrl = DMAR_REQ_PRI; 474 + 475 + switch (adpt->dma_order) { 476 + case emac_dma_ord_in: 477 + dma_ctrl |= IN_ORDER_MODE; 478 + break; 479 + case emac_dma_ord_enh: 480 + dma_ctrl |= ENH_ORDER_MODE; 481 + break; 482 + case emac_dma_ord_out: 483 + dma_ctrl |= OUT_ORDER_MODE; 484 + break; 485 + default: 486 + break; 487 + } 488 + 489 + dma_ctrl |= (((u32)adpt->dmar_block) << REGRDBLEN_SHFT) & 490 + REGRDBLEN_BMSK; 491 + dma_ctrl |= (((u32)adpt->dmaw_block) << REGWRBLEN_SHFT) & 492 + REGWRBLEN_BMSK; 493 + dma_ctrl |= (((u32)adpt->dmar_dly_cnt) << DMAR_DLY_CNT_SHFT) & 494 + DMAR_DLY_CNT_BMSK; 495 + dma_ctrl |= (((u32)adpt->dmaw_dly_cnt) << DMAW_DLY_CNT_SHFT) & 496 + DMAW_DLY_CNT_BMSK; 497 + 498 + /* config DMA and ensure that configuration is flushed to HW */ 499 + writel(dma_ctrl, adpt->base + EMAC_DMA_CTRL); 500 + } 501 + 502 + /* set MAC address */ 503 + static void emac_set_mac_address(struct emac_adapter *adpt, u8 *addr) 504 + { 505 + u32 sta; 506 + 507 + /* for example: 00-A0-C6-11-22-33 508 + * 0<-->C6112233, 1<-->00A0. 509 + */ 510 + 511 + /* low 32bit word */ 512 + sta = (((u32)addr[2]) << 24) | (((u32)addr[3]) << 16) | 513 + (((u32)addr[4]) << 8) | (((u32)addr[5])); 514 + writel(sta, adpt->base + EMAC_MAC_STA_ADDR0); 515 + 516 + /* hight 32bit word */ 517 + sta = (((u32)addr[0]) << 8) | (u32)addr[1]; 518 + writel(sta, adpt->base + EMAC_MAC_STA_ADDR1); 519 + } 520 + 521 + static void emac_mac_config(struct emac_adapter *adpt) 522 + { 523 + struct net_device *netdev = adpt->netdev; 524 + unsigned int max_frame; 525 + u32 val; 526 + 527 + emac_set_mac_address(adpt, netdev->dev_addr); 528 + 529 + max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; 530 + adpt->rxbuf_size = netdev->mtu > EMAC_DEF_RX_BUF_SIZE ? 531 + ALIGN(max_frame, 8) : EMAC_DEF_RX_BUF_SIZE; 532 + 533 + emac_mac_dma_rings_config(adpt); 534 + 535 + writel(netdev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, 536 + adpt->base + EMAC_MAX_FRAM_LEN_CTRL); 537 + 538 + emac_mac_tx_config(adpt); 539 + emac_mac_rx_config(adpt); 540 + emac_mac_dma_config(adpt); 541 + 542 + val = readl(adpt->base + EMAC_AXI_MAST_CTRL); 543 + val &= ~(DATA_BYTE_SWAP | MAX_BOUND); 544 + val |= MAX_BTYPE; 545 + writel(val, adpt->base + EMAC_AXI_MAST_CTRL); 546 + writel(0, adpt->base + EMAC_CLK_GATE_CTRL); 547 + writel(RX_UNCPL_INT_EN, adpt->base + EMAC_MISC_CTRL); 548 + } 549 + 550 + void emac_mac_reset(struct emac_adapter *adpt) 551 + { 552 + emac_mac_stop(adpt); 553 + 554 + emac_reg_update32(adpt->base + EMAC_DMA_MAS_CTRL, 0, SOFT_RST); 555 + usleep_range(100, 150); /* reset may take up to 100usec */ 556 + 557 + /* interrupt clear-on-read */ 558 + emac_reg_update32(adpt->base + EMAC_DMA_MAS_CTRL, 0, INT_RD_CLR_EN); 559 + } 560 + 561 + void emac_mac_start(struct emac_adapter *adpt) 562 + { 563 + struct phy_device *phydev = adpt->phydev; 564 + u32 mac, csr1; 565 + 566 + /* enable tx queue */ 567 + emac_reg_update32(adpt->base + EMAC_TXQ_CTRL_0, 0, TXQ_EN); 568 + 569 + /* enable rx queue */ 570 + emac_reg_update32(adpt->base + EMAC_RXQ_CTRL_0, 0, RXQ_EN); 571 + 572 + /* enable mac control */ 573 + mac = readl(adpt->base + EMAC_MAC_CTRL); 574 + csr1 = readl(adpt->csr + EMAC_EMAC_WRAPPER_CSR1); 575 + 576 + mac |= TXEN | RXEN; /* enable RX/TX */ 577 + 578 + /* We don't have ethtool support yet, so force flow-control mode 579 + * to 'full' always. 580 + */ 581 + mac |= TXFC | RXFC; 582 + 583 + /* setup link speed */ 584 + mac &= ~SPEED_MASK; 585 + if (phydev->speed == SPEED_1000) { 586 + mac |= SPEED(2); 587 + csr1 |= FREQ_MODE; 588 + } else { 589 + mac |= SPEED(1); 590 + csr1 &= ~FREQ_MODE; 591 + } 592 + 593 + if (phydev->duplex == DUPLEX_FULL) 594 + mac |= FULLD; 595 + else 596 + mac &= ~FULLD; 597 + 598 + /* other parameters */ 599 + mac |= (CRCE | PCRCE); 600 + mac |= ((adpt->preamble << PRLEN_SHFT) & PRLEN_BMSK); 601 + mac |= BROAD_EN; 602 + mac |= FLCHK; 603 + mac &= ~RX_CHKSUM_EN; 604 + mac &= ~(HUGEN | VLAN_STRIP | TPAUSE | SIMR | HUGE | MULTI_ALL | 605 + DEBUG_MODE | SINGLE_PAUSE_MODE); 606 + 607 + writel_relaxed(csr1, adpt->csr + EMAC_EMAC_WRAPPER_CSR1); 608 + 609 + writel_relaxed(mac, adpt->base + EMAC_MAC_CTRL); 610 + 611 + /* enable interrupt read clear, low power sleep mode and 612 + * the irq moderators 613 + */ 614 + 615 + writel_relaxed(adpt->irq_mod, adpt->base + EMAC_IRQ_MOD_TIM_INIT); 616 + writel_relaxed(INT_RD_CLR_EN | LPW_MODE | IRQ_MODERATOR_EN | 617 + IRQ_MODERATOR2_EN, adpt->base + EMAC_DMA_MAS_CTRL); 618 + 619 + emac_mac_mode_config(adpt); 620 + 621 + emac_reg_update32(adpt->base + EMAC_ATHR_HEADER_CTRL, 622 + (HEADER_ENABLE | HEADER_CNT_EN), 0); 623 + 624 + emac_reg_update32(adpt->csr + EMAC_EMAC_WRAPPER_CSR2, 0, WOL_EN); 625 + } 626 + 627 + void emac_mac_stop(struct emac_adapter *adpt) 628 + { 629 + emac_reg_update32(adpt->base + EMAC_RXQ_CTRL_0, RXQ_EN, 0); 630 + emac_reg_update32(adpt->base + EMAC_TXQ_CTRL_0, TXQ_EN, 0); 631 + emac_reg_update32(adpt->base + EMAC_MAC_CTRL, TXEN | RXEN, 0); 632 + usleep_range(1000, 1050); /* stopping mac may take upto 1msec */ 633 + } 634 + 635 + /* Free all descriptors of given transmit queue */ 636 + static void emac_tx_q_descs_free(struct emac_adapter *adpt) 637 + { 638 + struct emac_tx_queue *tx_q = &adpt->tx_q; 639 + unsigned int i; 640 + size_t size; 641 + 642 + /* ring already cleared, nothing to do */ 643 + if (!tx_q->tpd.tpbuff) 644 + return; 645 + 646 + for (i = 0; i < tx_q->tpd.count; i++) { 647 + struct emac_buffer *tpbuf = GET_TPD_BUFFER(tx_q, i); 648 + 649 + if (tpbuf->dma_addr) { 650 + dma_unmap_single(adpt->netdev->dev.parent, 651 + tpbuf->dma_addr, tpbuf->length, 652 + DMA_TO_DEVICE); 653 + tpbuf->dma_addr = 0; 654 + } 655 + if (tpbuf->skb) { 656 + dev_kfree_skb_any(tpbuf->skb); 657 + tpbuf->skb = NULL; 658 + } 659 + } 660 + 661 + size = sizeof(struct emac_buffer) * tx_q->tpd.count; 662 + memset(tx_q->tpd.tpbuff, 0, size); 663 + 664 + /* clear the descriptor ring */ 665 + memset(tx_q->tpd.v_addr, 0, tx_q->tpd.size); 666 + 667 + tx_q->tpd.consume_idx = 0; 668 + tx_q->tpd.produce_idx = 0; 669 + } 670 + 671 + /* Free all descriptors of given receive queue */ 672 + static void emac_rx_q_free_descs(struct emac_adapter *adpt) 673 + { 674 + struct device *dev = adpt->netdev->dev.parent; 675 + struct emac_rx_queue *rx_q = &adpt->rx_q; 676 + unsigned int i; 677 + size_t size; 678 + 679 + /* ring already cleared, nothing to do */ 680 + if (!rx_q->rfd.rfbuff) 681 + return; 682 + 683 + for (i = 0; i < rx_q->rfd.count; i++) { 684 + struct emac_buffer *rfbuf = GET_RFD_BUFFER(rx_q, i); 685 + 686 + if (rfbuf->dma_addr) { 687 + dma_unmap_single(dev, rfbuf->dma_addr, rfbuf->length, 688 + DMA_FROM_DEVICE); 689 + rfbuf->dma_addr = 0; 690 + } 691 + if (rfbuf->skb) { 692 + dev_kfree_skb(rfbuf->skb); 693 + rfbuf->skb = NULL; 694 + } 695 + } 696 + 697 + size = sizeof(struct emac_buffer) * rx_q->rfd.count; 698 + memset(rx_q->rfd.rfbuff, 0, size); 699 + 700 + /* clear the descriptor rings */ 701 + memset(rx_q->rrd.v_addr, 0, rx_q->rrd.size); 702 + rx_q->rrd.produce_idx = 0; 703 + rx_q->rrd.consume_idx = 0; 704 + 705 + memset(rx_q->rfd.v_addr, 0, rx_q->rfd.size); 706 + rx_q->rfd.produce_idx = 0; 707 + rx_q->rfd.consume_idx = 0; 708 + } 709 + 710 + /* Free all buffers associated with given transmit queue */ 711 + static void emac_tx_q_bufs_free(struct emac_adapter *adpt) 712 + { 713 + struct emac_tx_queue *tx_q = &adpt->tx_q; 714 + 715 + emac_tx_q_descs_free(adpt); 716 + 717 + kfree(tx_q->tpd.tpbuff); 718 + tx_q->tpd.tpbuff = NULL; 719 + tx_q->tpd.v_addr = NULL; 720 + tx_q->tpd.dma_addr = 0; 721 + tx_q->tpd.size = 0; 722 + } 723 + 724 + /* Allocate TX descriptor ring for the given transmit queue */ 725 + static int emac_tx_q_desc_alloc(struct emac_adapter *adpt, 726 + struct emac_tx_queue *tx_q) 727 + { 728 + struct emac_ring_header *ring_header = &adpt->ring_header; 729 + size_t size; 730 + 731 + size = sizeof(struct emac_buffer) * tx_q->tpd.count; 732 + tx_q->tpd.tpbuff = kzalloc(size, GFP_KERNEL); 733 + if (!tx_q->tpd.tpbuff) 734 + return -ENOMEM; 735 + 736 + tx_q->tpd.size = tx_q->tpd.count * (adpt->tpd_size * 4); 737 + tx_q->tpd.dma_addr = ring_header->dma_addr + ring_header->used; 738 + tx_q->tpd.v_addr = ring_header->v_addr + ring_header->used; 739 + ring_header->used += ALIGN(tx_q->tpd.size, 8); 740 + tx_q->tpd.produce_idx = 0; 741 + tx_q->tpd.consume_idx = 0; 742 + 743 + return 0; 744 + } 745 + 746 + /* Free all buffers associated with given transmit queue */ 747 + static void emac_rx_q_bufs_free(struct emac_adapter *adpt) 748 + { 749 + struct emac_rx_queue *rx_q = &adpt->rx_q; 750 + 751 + emac_rx_q_free_descs(adpt); 752 + 753 + kfree(rx_q->rfd.rfbuff); 754 + rx_q->rfd.rfbuff = NULL; 755 + 756 + rx_q->rfd.v_addr = NULL; 757 + rx_q->rfd.dma_addr = 0; 758 + rx_q->rfd.size = 0; 759 + 760 + rx_q->rrd.v_addr = NULL; 761 + rx_q->rrd.dma_addr = 0; 762 + rx_q->rrd.size = 0; 763 + } 764 + 765 + /* Allocate RX descriptor rings for the given receive queue */ 766 + static int emac_rx_descs_alloc(struct emac_adapter *adpt) 767 + { 768 + struct emac_ring_header *ring_header = &adpt->ring_header; 769 + struct emac_rx_queue *rx_q = &adpt->rx_q; 770 + size_t size; 771 + 772 + size = sizeof(struct emac_buffer) * rx_q->rfd.count; 773 + rx_q->rfd.rfbuff = kzalloc(size, GFP_KERNEL); 774 + if (!rx_q->rfd.rfbuff) 775 + return -ENOMEM; 776 + 777 + rx_q->rrd.size = rx_q->rrd.count * (adpt->rrd_size * 4); 778 + rx_q->rfd.size = rx_q->rfd.count * (adpt->rfd_size * 4); 779 + 780 + rx_q->rrd.dma_addr = ring_header->dma_addr + ring_header->used; 781 + rx_q->rrd.v_addr = ring_header->v_addr + ring_header->used; 782 + ring_header->used += ALIGN(rx_q->rrd.size, 8); 783 + 784 + rx_q->rfd.dma_addr = ring_header->dma_addr + ring_header->used; 785 + rx_q->rfd.v_addr = ring_header->v_addr + ring_header->used; 786 + ring_header->used += ALIGN(rx_q->rfd.size, 8); 787 + 788 + rx_q->rrd.produce_idx = 0; 789 + rx_q->rrd.consume_idx = 0; 790 + 791 + rx_q->rfd.produce_idx = 0; 792 + rx_q->rfd.consume_idx = 0; 793 + 794 + return 0; 795 + } 796 + 797 + /* Allocate all TX and RX descriptor rings */ 798 + int emac_mac_rx_tx_rings_alloc_all(struct emac_adapter *adpt) 799 + { 800 + struct emac_ring_header *ring_header = &adpt->ring_header; 801 + struct device *dev = adpt->netdev->dev.parent; 802 + unsigned int num_tx_descs = adpt->tx_desc_cnt; 803 + unsigned int num_rx_descs = adpt->rx_desc_cnt; 804 + int ret; 805 + 806 + adpt->tx_q.tpd.count = adpt->tx_desc_cnt; 807 + 808 + adpt->rx_q.rrd.count = adpt->rx_desc_cnt; 809 + adpt->rx_q.rfd.count = adpt->rx_desc_cnt; 810 + 811 + /* Ring DMA buffer. Each ring may need up to 8 bytes for alignment, 812 + * hence the additional padding bytes are allocated. 813 + */ 814 + ring_header->size = num_tx_descs * (adpt->tpd_size * 4) + 815 + num_rx_descs * (adpt->rfd_size * 4) + 816 + num_rx_descs * (adpt->rrd_size * 4) + 817 + 8 + 2 * 8; /* 8 byte per one Tx and two Rx rings */ 818 + 819 + ring_header->used = 0; 820 + ring_header->v_addr = dma_zalloc_coherent(dev, ring_header->size, 821 + &ring_header->dma_addr, 822 + GFP_KERNEL); 823 + if (!ring_header->v_addr) 824 + return -ENOMEM; 825 + 826 + ring_header->used = ALIGN(ring_header->dma_addr, 8) - 827 + ring_header->dma_addr; 828 + 829 + ret = emac_tx_q_desc_alloc(adpt, &adpt->tx_q); 830 + if (ret) { 831 + netdev_err(adpt->netdev, "error: Tx Queue alloc failed\n"); 832 + goto err_alloc_tx; 833 + } 834 + 835 + ret = emac_rx_descs_alloc(adpt); 836 + if (ret) { 837 + netdev_err(adpt->netdev, "error: Rx Queue alloc failed\n"); 838 + goto err_alloc_rx; 839 + } 840 + 841 + return 0; 842 + 843 + err_alloc_rx: 844 + emac_tx_q_bufs_free(adpt); 845 + err_alloc_tx: 846 + dma_free_coherent(dev, ring_header->size, 847 + ring_header->v_addr, ring_header->dma_addr); 848 + 849 + ring_header->v_addr = NULL; 850 + ring_header->dma_addr = 0; 851 + ring_header->size = 0; 852 + ring_header->used = 0; 853 + 854 + return ret; 855 + } 856 + 857 + /* Free all TX and RX descriptor rings */ 858 + void emac_mac_rx_tx_rings_free_all(struct emac_adapter *adpt) 859 + { 860 + struct emac_ring_header *ring_header = &adpt->ring_header; 861 + struct device *dev = adpt->netdev->dev.parent; 862 + 863 + emac_tx_q_bufs_free(adpt); 864 + emac_rx_q_bufs_free(adpt); 865 + 866 + dma_free_coherent(dev, ring_header->size, 867 + ring_header->v_addr, ring_header->dma_addr); 868 + 869 + ring_header->v_addr = NULL; 870 + ring_header->dma_addr = 0; 871 + ring_header->size = 0; 872 + ring_header->used = 0; 873 + } 874 + 875 + /* Initialize descriptor rings */ 876 + static void emac_mac_rx_tx_ring_reset_all(struct emac_adapter *adpt) 877 + { 878 + unsigned int i; 879 + 880 + adpt->tx_q.tpd.produce_idx = 0; 881 + adpt->tx_q.tpd.consume_idx = 0; 882 + for (i = 0; i < adpt->tx_q.tpd.count; i++) 883 + adpt->tx_q.tpd.tpbuff[i].dma_addr = 0; 884 + 885 + adpt->rx_q.rrd.produce_idx = 0; 886 + adpt->rx_q.rrd.consume_idx = 0; 887 + adpt->rx_q.rfd.produce_idx = 0; 888 + adpt->rx_q.rfd.consume_idx = 0; 889 + for (i = 0; i < adpt->rx_q.rfd.count; i++) 890 + adpt->rx_q.rfd.rfbuff[i].dma_addr = 0; 891 + } 892 + 893 + /* Produce new receive free descriptor */ 894 + static void emac_mac_rx_rfd_create(struct emac_adapter *adpt, 895 + struct emac_rx_queue *rx_q, 896 + dma_addr_t addr) 897 + { 898 + u32 *hw_rfd = EMAC_RFD(rx_q, adpt->rfd_size, rx_q->rfd.produce_idx); 899 + 900 + *(hw_rfd++) = lower_32_bits(addr); 901 + *hw_rfd = upper_32_bits(addr); 902 + 903 + if (++rx_q->rfd.produce_idx == rx_q->rfd.count) 904 + rx_q->rfd.produce_idx = 0; 905 + } 906 + 907 + /* Fill up receive queue's RFD with preallocated receive buffers */ 908 + static void emac_mac_rx_descs_refill(struct emac_adapter *adpt, 909 + struct emac_rx_queue *rx_q) 910 + { 911 + struct emac_buffer *curr_rxbuf; 912 + struct emac_buffer *next_rxbuf; 913 + unsigned int count = 0; 914 + u32 next_produce_idx; 915 + 916 + next_produce_idx = rx_q->rfd.produce_idx + 1; 917 + if (next_produce_idx == rx_q->rfd.count) 918 + next_produce_idx = 0; 919 + 920 + curr_rxbuf = GET_RFD_BUFFER(rx_q, rx_q->rfd.produce_idx); 921 + next_rxbuf = GET_RFD_BUFFER(rx_q, next_produce_idx); 922 + 923 + /* this always has a blank rx_buffer*/ 924 + while (!next_rxbuf->dma_addr) { 925 + struct sk_buff *skb; 926 + int ret; 927 + 928 + skb = netdev_alloc_skb_ip_align(adpt->netdev, adpt->rxbuf_size); 929 + if (!skb) 930 + break; 931 + 932 + curr_rxbuf->dma_addr = 933 + dma_map_single(adpt->netdev->dev.parent, skb->data, 934 + curr_rxbuf->length, DMA_FROM_DEVICE); 935 + ret = dma_mapping_error(adpt->netdev->dev.parent, 936 + curr_rxbuf->dma_addr); 937 + if (ret) { 938 + dev_kfree_skb(skb); 939 + break; 940 + } 941 + curr_rxbuf->skb = skb; 942 + curr_rxbuf->length = adpt->rxbuf_size; 943 + 944 + emac_mac_rx_rfd_create(adpt, rx_q, curr_rxbuf->dma_addr); 945 + next_produce_idx = rx_q->rfd.produce_idx + 1; 946 + if (next_produce_idx == rx_q->rfd.count) 947 + next_produce_idx = 0; 948 + 949 + curr_rxbuf = GET_RFD_BUFFER(rx_q, rx_q->rfd.produce_idx); 950 + next_rxbuf = GET_RFD_BUFFER(rx_q, next_produce_idx); 951 + count++; 952 + } 953 + 954 + if (count) { 955 + u32 prod_idx = (rx_q->rfd.produce_idx << rx_q->produce_shift) & 956 + rx_q->produce_mask; 957 + emac_reg_update32(adpt->base + rx_q->produce_reg, 958 + rx_q->produce_mask, prod_idx); 959 + } 960 + } 961 + 962 + static void emac_adjust_link(struct net_device *netdev) 963 + { 964 + struct emac_adapter *adpt = netdev_priv(netdev); 965 + struct phy_device *phydev = netdev->phydev; 966 + 967 + if (phydev->link) 968 + emac_mac_start(adpt); 969 + else 970 + emac_mac_stop(adpt); 971 + 972 + phy_print_status(phydev); 973 + } 974 + 975 + /* Bringup the interface/HW */ 976 + int emac_mac_up(struct emac_adapter *adpt) 977 + { 978 + struct net_device *netdev = adpt->netdev; 979 + struct emac_irq *irq = &adpt->irq; 980 + int ret; 981 + 982 + emac_mac_rx_tx_ring_reset_all(adpt); 983 + emac_mac_config(adpt); 984 + 985 + ret = request_irq(irq->irq, emac_isr, 0, EMAC_MAC_IRQ_RES, irq); 986 + if (ret) { 987 + netdev_err(adpt->netdev, "could not request %s irq\n", 988 + EMAC_MAC_IRQ_RES); 989 + return ret; 990 + } 991 + 992 + emac_mac_rx_descs_refill(adpt, &adpt->rx_q); 993 + 994 + ret = phy_connect_direct(netdev, adpt->phydev, emac_adjust_link, 995 + PHY_INTERFACE_MODE_SGMII); 996 + if (ret) { 997 + netdev_err(adpt->netdev, "could not connect phy\n"); 998 + free_irq(irq->irq, irq); 999 + return ret; 1000 + } 1001 + 1002 + /* enable mac irq */ 1003 + writel((u32)~DIS_INT, adpt->base + EMAC_INT_STATUS); 1004 + writel(adpt->irq.mask, adpt->base + EMAC_INT_MASK); 1005 + 1006 + adpt->phydev->irq = PHY_IGNORE_INTERRUPT; 1007 + phy_start(adpt->phydev); 1008 + 1009 + napi_enable(&adpt->rx_q.napi); 1010 + netif_start_queue(netdev); 1011 + 1012 + return 0; 1013 + } 1014 + 1015 + /* Bring down the interface/HW */ 1016 + void emac_mac_down(struct emac_adapter *adpt) 1017 + { 1018 + struct net_device *netdev = adpt->netdev; 1019 + 1020 + netif_stop_queue(netdev); 1021 + napi_disable(&adpt->rx_q.napi); 1022 + 1023 + phy_stop(adpt->phydev); 1024 + phy_disconnect(adpt->phydev); 1025 + 1026 + /* disable mac irq */ 1027 + writel(DIS_INT, adpt->base + EMAC_INT_STATUS); 1028 + writel(0, adpt->base + EMAC_INT_MASK); 1029 + synchronize_irq(adpt->irq.irq); 1030 + free_irq(adpt->irq.irq, &adpt->irq); 1031 + 1032 + emac_mac_reset(adpt); 1033 + 1034 + emac_tx_q_descs_free(adpt); 1035 + netdev_reset_queue(adpt->netdev); 1036 + emac_rx_q_free_descs(adpt); 1037 + } 1038 + 1039 + /* Consume next received packet descriptor */ 1040 + static bool emac_rx_process_rrd(struct emac_adapter *adpt, 1041 + struct emac_rx_queue *rx_q, 1042 + struct emac_rrd *rrd) 1043 + { 1044 + u32 *hw_rrd = EMAC_RRD(rx_q, adpt->rrd_size, rx_q->rrd.consume_idx); 1045 + 1046 + rrd->word[3] = *(hw_rrd + 3); 1047 + 1048 + if (!RRD_UPDT(rrd)) 1049 + return false; 1050 + 1051 + rrd->word[4] = 0; 1052 + rrd->word[5] = 0; 1053 + 1054 + rrd->word[0] = *(hw_rrd++); 1055 + rrd->word[1] = *(hw_rrd++); 1056 + rrd->word[2] = *(hw_rrd++); 1057 + 1058 + if (unlikely(RRD_NOR(rrd) != 1)) { 1059 + netdev_err(adpt->netdev, 1060 + "error: multi-RFD not support yet! nor:%lu\n", 1061 + RRD_NOR(rrd)); 1062 + } 1063 + 1064 + /* mark rrd as processed */ 1065 + RRD_UPDT_SET(rrd, 0); 1066 + *hw_rrd = rrd->word[3]; 1067 + 1068 + if (++rx_q->rrd.consume_idx == rx_q->rrd.count) 1069 + rx_q->rrd.consume_idx = 0; 1070 + 1071 + return true; 1072 + } 1073 + 1074 + /* Produce new transmit descriptor */ 1075 + static void emac_tx_tpd_create(struct emac_adapter *adpt, 1076 + struct emac_tx_queue *tx_q, struct emac_tpd *tpd) 1077 + { 1078 + u32 *hw_tpd; 1079 + 1080 + tx_q->tpd.last_produce_idx = tx_q->tpd.produce_idx; 1081 + hw_tpd = EMAC_TPD(tx_q, adpt->tpd_size, tx_q->tpd.produce_idx); 1082 + 1083 + if (++tx_q->tpd.produce_idx == tx_q->tpd.count) 1084 + tx_q->tpd.produce_idx = 0; 1085 + 1086 + *(hw_tpd++) = tpd->word[0]; 1087 + *(hw_tpd++) = tpd->word[1]; 1088 + *(hw_tpd++) = tpd->word[2]; 1089 + *hw_tpd = tpd->word[3]; 1090 + } 1091 + 1092 + /* Mark the last transmit descriptor as such (for the transmit packet) */ 1093 + static void emac_tx_tpd_mark_last(struct emac_adapter *adpt, 1094 + struct emac_tx_queue *tx_q) 1095 + { 1096 + u32 *hw_tpd = 1097 + EMAC_TPD(tx_q, adpt->tpd_size, tx_q->tpd.last_produce_idx); 1098 + u32 tmp_tpd; 1099 + 1100 + tmp_tpd = *(hw_tpd + 1); 1101 + tmp_tpd |= EMAC_TPD_LAST_FRAGMENT; 1102 + *(hw_tpd + 1) = tmp_tpd; 1103 + } 1104 + 1105 + static void emac_rx_rfd_clean(struct emac_rx_queue *rx_q, struct emac_rrd *rrd) 1106 + { 1107 + struct emac_buffer *rfbuf = rx_q->rfd.rfbuff; 1108 + u32 consume_idx = RRD_SI(rrd); 1109 + unsigned int i; 1110 + 1111 + for (i = 0; i < RRD_NOR(rrd); i++) { 1112 + rfbuf[consume_idx].skb = NULL; 1113 + if (++consume_idx == rx_q->rfd.count) 1114 + consume_idx = 0; 1115 + } 1116 + 1117 + rx_q->rfd.consume_idx = consume_idx; 1118 + rx_q->rfd.process_idx = consume_idx; 1119 + } 1120 + 1121 + /* Push the received skb to upper layers */ 1122 + static void emac_receive_skb(struct emac_rx_queue *rx_q, 1123 + struct sk_buff *skb, 1124 + u16 vlan_tag, bool vlan_flag) 1125 + { 1126 + if (vlan_flag) { 1127 + u16 vlan; 1128 + 1129 + EMAC_TAG_TO_VLAN(vlan_tag, vlan); 1130 + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan); 1131 + } 1132 + 1133 + napi_gro_receive(&rx_q->napi, skb); 1134 + } 1135 + 1136 + /* Process receive event */ 1137 + void emac_mac_rx_process(struct emac_adapter *adpt, struct emac_rx_queue *rx_q, 1138 + int *num_pkts, int max_pkts) 1139 + { 1140 + u32 proc_idx, hw_consume_idx, num_consume_pkts; 1141 + struct net_device *netdev = adpt->netdev; 1142 + struct emac_buffer *rfbuf; 1143 + unsigned int count = 0; 1144 + struct emac_rrd rrd; 1145 + struct sk_buff *skb; 1146 + u32 reg; 1147 + 1148 + reg = readl_relaxed(adpt->base + rx_q->consume_reg); 1149 + 1150 + hw_consume_idx = (reg & rx_q->consume_mask) >> rx_q->consume_shift; 1151 + num_consume_pkts = (hw_consume_idx >= rx_q->rrd.consume_idx) ? 1152 + (hw_consume_idx - rx_q->rrd.consume_idx) : 1153 + (hw_consume_idx + rx_q->rrd.count - rx_q->rrd.consume_idx); 1154 + 1155 + do { 1156 + if (!num_consume_pkts) 1157 + break; 1158 + 1159 + if (!emac_rx_process_rrd(adpt, rx_q, &rrd)) 1160 + break; 1161 + 1162 + if (likely(RRD_NOR(&rrd) == 1)) { 1163 + /* good receive */ 1164 + rfbuf = GET_RFD_BUFFER(rx_q, RRD_SI(&rrd)); 1165 + dma_unmap_single(adpt->netdev->dev.parent, 1166 + rfbuf->dma_addr, rfbuf->length, 1167 + DMA_FROM_DEVICE); 1168 + rfbuf->dma_addr = 0; 1169 + skb = rfbuf->skb; 1170 + } else { 1171 + netdev_err(adpt->netdev, 1172 + "error: multi-RFD not support yet!\n"); 1173 + break; 1174 + } 1175 + emac_rx_rfd_clean(rx_q, &rrd); 1176 + num_consume_pkts--; 1177 + count++; 1178 + 1179 + /* Due to a HW issue in L4 check sum detection (UDP/TCP frags 1180 + * with DF set are marked as error), drop packets based on the 1181 + * error mask rather than the summary bit (ignoring L4F errors) 1182 + */ 1183 + if (rrd.word[EMAC_RRD_STATS_DW_IDX] & EMAC_RRD_ERROR) { 1184 + netif_dbg(adpt, rx_status, adpt->netdev, 1185 + "Drop error packet[RRD: 0x%x:0x%x:0x%x:0x%x]\n", 1186 + rrd.word[0], rrd.word[1], 1187 + rrd.word[2], rrd.word[3]); 1188 + 1189 + dev_kfree_skb(skb); 1190 + continue; 1191 + } 1192 + 1193 + skb_put(skb, RRD_PKT_SIZE(&rrd) - ETH_FCS_LEN); 1194 + skb->dev = netdev; 1195 + skb->protocol = eth_type_trans(skb, skb->dev); 1196 + if (netdev->features & NETIF_F_RXCSUM) 1197 + skb->ip_summed = RRD_L4F(&rrd) ? 1198 + CHECKSUM_NONE : CHECKSUM_UNNECESSARY; 1199 + else 1200 + skb_checksum_none_assert(skb); 1201 + 1202 + emac_receive_skb(rx_q, skb, (u16)RRD_CVALN_TAG(&rrd), 1203 + (bool)RRD_CVTAG(&rrd)); 1204 + 1205 + netdev->last_rx = jiffies; 1206 + (*num_pkts)++; 1207 + } while (*num_pkts < max_pkts); 1208 + 1209 + if (count) { 1210 + proc_idx = (rx_q->rfd.process_idx << rx_q->process_shft) & 1211 + rx_q->process_mask; 1212 + emac_reg_update32(adpt->base + rx_q->process_reg, 1213 + rx_q->process_mask, proc_idx); 1214 + emac_mac_rx_descs_refill(adpt, rx_q); 1215 + } 1216 + } 1217 + 1218 + /* get the number of free transmit descriptors */ 1219 + static unsigned int emac_tpd_num_free_descs(struct emac_tx_queue *tx_q) 1220 + { 1221 + u32 produce_idx = tx_q->tpd.produce_idx; 1222 + u32 consume_idx = tx_q->tpd.consume_idx; 1223 + 1224 + return (consume_idx > produce_idx) ? 1225 + (consume_idx - produce_idx - 1) : 1226 + (tx_q->tpd.count + consume_idx - produce_idx - 1); 1227 + } 1228 + 1229 + /* Process transmit event */ 1230 + void emac_mac_tx_process(struct emac_adapter *adpt, struct emac_tx_queue *tx_q) 1231 + { 1232 + u32 reg = readl_relaxed(adpt->base + tx_q->consume_reg); 1233 + u32 hw_consume_idx, pkts_compl = 0, bytes_compl = 0; 1234 + struct emac_buffer *tpbuf; 1235 + 1236 + hw_consume_idx = (reg & tx_q->consume_mask) >> tx_q->consume_shift; 1237 + 1238 + while (tx_q->tpd.consume_idx != hw_consume_idx) { 1239 + tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.consume_idx); 1240 + if (tpbuf->dma_addr) { 1241 + dma_unmap_single(adpt->netdev->dev.parent, 1242 + tpbuf->dma_addr, tpbuf->length, 1243 + DMA_TO_DEVICE); 1244 + tpbuf->dma_addr = 0; 1245 + } 1246 + 1247 + if (tpbuf->skb) { 1248 + pkts_compl++; 1249 + bytes_compl += tpbuf->skb->len; 1250 + dev_kfree_skb_irq(tpbuf->skb); 1251 + tpbuf->skb = NULL; 1252 + } 1253 + 1254 + if (++tx_q->tpd.consume_idx == tx_q->tpd.count) 1255 + tx_q->tpd.consume_idx = 0; 1256 + } 1257 + 1258 + netdev_completed_queue(adpt->netdev, pkts_compl, bytes_compl); 1259 + 1260 + if (netif_queue_stopped(adpt->netdev)) 1261 + if (emac_tpd_num_free_descs(tx_q) > (MAX_SKB_FRAGS + 1)) 1262 + netif_wake_queue(adpt->netdev); 1263 + } 1264 + 1265 + /* Initialize all queue data structures */ 1266 + void emac_mac_rx_tx_ring_init_all(struct platform_device *pdev, 1267 + struct emac_adapter *adpt) 1268 + { 1269 + adpt->rx_q.netdev = adpt->netdev; 1270 + 1271 + adpt->rx_q.produce_reg = EMAC_MAILBOX_0; 1272 + adpt->rx_q.produce_mask = RFD0_PROD_IDX_BMSK; 1273 + adpt->rx_q.produce_shift = RFD0_PROD_IDX_SHFT; 1274 + 1275 + adpt->rx_q.process_reg = EMAC_MAILBOX_0; 1276 + adpt->rx_q.process_mask = RFD0_PROC_IDX_BMSK; 1277 + adpt->rx_q.process_shft = RFD0_PROC_IDX_SHFT; 1278 + 1279 + adpt->rx_q.consume_reg = EMAC_MAILBOX_3; 1280 + adpt->rx_q.consume_mask = RFD0_CONS_IDX_BMSK; 1281 + adpt->rx_q.consume_shift = RFD0_CONS_IDX_SHFT; 1282 + 1283 + adpt->rx_q.irq = &adpt->irq; 1284 + adpt->rx_q.intr = adpt->irq.mask & ISR_RX_PKT; 1285 + 1286 + adpt->tx_q.produce_reg = EMAC_MAILBOX_15; 1287 + adpt->tx_q.produce_mask = NTPD_PROD_IDX_BMSK; 1288 + adpt->tx_q.produce_shift = NTPD_PROD_IDX_SHFT; 1289 + 1290 + adpt->tx_q.consume_reg = EMAC_MAILBOX_2; 1291 + adpt->tx_q.consume_mask = NTPD_CONS_IDX_BMSK; 1292 + adpt->tx_q.consume_shift = NTPD_CONS_IDX_SHFT; 1293 + } 1294 + 1295 + /* Fill up transmit descriptors with TSO and Checksum offload information */ 1296 + static int emac_tso_csum(struct emac_adapter *adpt, 1297 + struct emac_tx_queue *tx_q, 1298 + struct sk_buff *skb, 1299 + struct emac_tpd *tpd) 1300 + { 1301 + unsigned int hdr_len; 1302 + int ret; 1303 + 1304 + if (skb_is_gso(skb)) { 1305 + if (skb_header_cloned(skb)) { 1306 + ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); 1307 + if (unlikely(ret)) 1308 + return ret; 1309 + } 1310 + 1311 + if (skb->protocol == htons(ETH_P_IP)) { 1312 + u32 pkt_len = ((unsigned char *)ip_hdr(skb) - skb->data) 1313 + + ntohs(ip_hdr(skb)->tot_len); 1314 + if (skb->len > pkt_len) 1315 + pskb_trim(skb, pkt_len); 1316 + } 1317 + 1318 + hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 1319 + if (unlikely(skb->len == hdr_len)) { 1320 + /* we only need to do csum */ 1321 + netif_warn(adpt, tx_err, adpt->netdev, 1322 + "tso not needed for packet with 0 data\n"); 1323 + goto do_csum; 1324 + } 1325 + 1326 + if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) { 1327 + ip_hdr(skb)->check = 0; 1328 + tcp_hdr(skb)->check = 1329 + ~csum_tcpudp_magic(ip_hdr(skb)->saddr, 1330 + ip_hdr(skb)->daddr, 1331 + 0, IPPROTO_TCP, 0); 1332 + TPD_IPV4_SET(tpd, 1); 1333 + } 1334 + 1335 + if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) { 1336 + /* ipv6 tso need an extra tpd */ 1337 + struct emac_tpd extra_tpd; 1338 + 1339 + memset(tpd, 0, sizeof(*tpd)); 1340 + memset(&extra_tpd, 0, sizeof(extra_tpd)); 1341 + 1342 + ipv6_hdr(skb)->payload_len = 0; 1343 + tcp_hdr(skb)->check = 1344 + ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, 1345 + &ipv6_hdr(skb)->daddr, 1346 + 0, IPPROTO_TCP, 0); 1347 + TPD_PKT_LEN_SET(&extra_tpd, skb->len); 1348 + TPD_LSO_SET(&extra_tpd, 1); 1349 + TPD_LSOV_SET(&extra_tpd, 1); 1350 + emac_tx_tpd_create(adpt, tx_q, &extra_tpd); 1351 + TPD_LSOV_SET(tpd, 1); 1352 + } 1353 + 1354 + TPD_LSO_SET(tpd, 1); 1355 + TPD_TCPHDR_OFFSET_SET(tpd, skb_transport_offset(skb)); 1356 + TPD_MSS_SET(tpd, skb_shinfo(skb)->gso_size); 1357 + return 0; 1358 + } 1359 + 1360 + do_csum: 1361 + if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { 1362 + unsigned int css, cso; 1363 + 1364 + cso = skb_transport_offset(skb); 1365 + if (unlikely(cso & 0x1)) { 1366 + netdev_err(adpt->netdev, 1367 + "error: payload offset should be even\n"); 1368 + return -EINVAL; 1369 + } 1370 + css = cso + skb->csum_offset; 1371 + 1372 + TPD_PAYLOAD_OFFSET_SET(tpd, cso >> 1); 1373 + TPD_CXSUM_OFFSET_SET(tpd, css >> 1); 1374 + TPD_CSX_SET(tpd, 1); 1375 + } 1376 + 1377 + return 0; 1378 + } 1379 + 1380 + /* Fill up transmit descriptors */ 1381 + static void emac_tx_fill_tpd(struct emac_adapter *adpt, 1382 + struct emac_tx_queue *tx_q, struct sk_buff *skb, 1383 + struct emac_tpd *tpd) 1384 + { 1385 + unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 1386 + unsigned int first = tx_q->tpd.produce_idx; 1387 + unsigned int len = skb_headlen(skb); 1388 + struct emac_buffer *tpbuf = NULL; 1389 + unsigned int mapped_len = 0; 1390 + unsigned int i; 1391 + int count = 0; 1392 + int ret; 1393 + 1394 + /* if Large Segment Offload is (in TCP Segmentation Offload struct) */ 1395 + if (TPD_LSO(tpd)) { 1396 + mapped_len = skb_transport_offset(skb) + tcp_hdrlen(skb); 1397 + 1398 + tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx); 1399 + tpbuf->length = mapped_len; 1400 + tpbuf->dma_addr = dma_map_single(adpt->netdev->dev.parent, 1401 + skb->data, tpbuf->length, 1402 + DMA_TO_DEVICE); 1403 + ret = dma_mapping_error(adpt->netdev->dev.parent, 1404 + tpbuf->dma_addr); 1405 + if (ret) 1406 + goto error; 1407 + 1408 + TPD_BUFFER_ADDR_L_SET(tpd, lower_32_bits(tpbuf->dma_addr)); 1409 + TPD_BUFFER_ADDR_H_SET(tpd, upper_32_bits(tpbuf->dma_addr)); 1410 + TPD_BUF_LEN_SET(tpd, tpbuf->length); 1411 + emac_tx_tpd_create(adpt, tx_q, tpd); 1412 + count++; 1413 + } 1414 + 1415 + if (mapped_len < len) { 1416 + tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx); 1417 + tpbuf->length = len - mapped_len; 1418 + tpbuf->dma_addr = dma_map_single(adpt->netdev->dev.parent, 1419 + skb->data + mapped_len, 1420 + tpbuf->length, DMA_TO_DEVICE); 1421 + ret = dma_mapping_error(adpt->netdev->dev.parent, 1422 + tpbuf->dma_addr); 1423 + if (ret) 1424 + goto error; 1425 + 1426 + TPD_BUFFER_ADDR_L_SET(tpd, lower_32_bits(tpbuf->dma_addr)); 1427 + TPD_BUFFER_ADDR_H_SET(tpd, upper_32_bits(tpbuf->dma_addr)); 1428 + TPD_BUF_LEN_SET(tpd, tpbuf->length); 1429 + emac_tx_tpd_create(adpt, tx_q, tpd); 1430 + count++; 1431 + } 1432 + 1433 + for (i = 0; i < nr_frags; i++) { 1434 + struct skb_frag_struct *frag; 1435 + 1436 + frag = &skb_shinfo(skb)->frags[i]; 1437 + 1438 + tpbuf = GET_TPD_BUFFER(tx_q, tx_q->tpd.produce_idx); 1439 + tpbuf->length = frag->size; 1440 + tpbuf->dma_addr = dma_map_page(adpt->netdev->dev.parent, 1441 + frag->page.p, frag->page_offset, 1442 + tpbuf->length, DMA_TO_DEVICE); 1443 + ret = dma_mapping_error(adpt->netdev->dev.parent, 1444 + tpbuf->dma_addr); 1445 + if (ret) 1446 + goto error; 1447 + 1448 + TPD_BUFFER_ADDR_L_SET(tpd, lower_32_bits(tpbuf->dma_addr)); 1449 + TPD_BUFFER_ADDR_H_SET(tpd, upper_32_bits(tpbuf->dma_addr)); 1450 + TPD_BUF_LEN_SET(tpd, tpbuf->length); 1451 + emac_tx_tpd_create(adpt, tx_q, tpd); 1452 + count++; 1453 + } 1454 + 1455 + /* The last tpd */ 1456 + wmb(); 1457 + emac_tx_tpd_mark_last(adpt, tx_q); 1458 + 1459 + /* The last buffer info contain the skb address, 1460 + * so it will be freed after unmap 1461 + */ 1462 + tpbuf->skb = skb; 1463 + 1464 + return; 1465 + 1466 + error: 1467 + /* One of the memory mappings failed, so undo everything */ 1468 + tx_q->tpd.produce_idx = first; 1469 + 1470 + while (count--) { 1471 + tpbuf = GET_TPD_BUFFER(tx_q, first); 1472 + dma_unmap_page(adpt->netdev->dev.parent, tpbuf->dma_addr, 1473 + tpbuf->length, DMA_TO_DEVICE); 1474 + tpbuf->dma_addr = 0; 1475 + tpbuf->length = 0; 1476 + 1477 + if (++first == tx_q->tpd.count) 1478 + first = 0; 1479 + } 1480 + 1481 + dev_kfree_skb(skb); 1482 + } 1483 + 1484 + /* Transmit the packet using specified transmit queue */ 1485 + int emac_mac_tx_buf_send(struct emac_adapter *adpt, struct emac_tx_queue *tx_q, 1486 + struct sk_buff *skb) 1487 + { 1488 + struct emac_tpd tpd; 1489 + u32 prod_idx; 1490 + 1491 + memset(&tpd, 0, sizeof(tpd)); 1492 + 1493 + if (emac_tso_csum(adpt, tx_q, skb, &tpd) != 0) { 1494 + dev_kfree_skb_any(skb); 1495 + return NETDEV_TX_OK; 1496 + } 1497 + 1498 + if (skb_vlan_tag_present(skb)) { 1499 + u16 tag; 1500 + 1501 + EMAC_VLAN_TO_TAG(skb_vlan_tag_get(skb), tag); 1502 + TPD_CVLAN_TAG_SET(&tpd, tag); 1503 + TPD_INSTC_SET(&tpd, 1); 1504 + } 1505 + 1506 + if (skb_network_offset(skb) != ETH_HLEN) 1507 + TPD_TYP_SET(&tpd, 1); 1508 + 1509 + emac_tx_fill_tpd(adpt, tx_q, skb, &tpd); 1510 + 1511 + netdev_sent_queue(adpt->netdev, skb->len); 1512 + 1513 + /* Make sure the are enough free descriptors to hold one 1514 + * maximum-sized SKB. We need one desc for each fragment, 1515 + * one for the checksum (emac_tso_csum), one for TSO, and 1516 + * and one for the SKB header. 1517 + */ 1518 + if (emac_tpd_num_free_descs(tx_q) < (MAX_SKB_FRAGS + 3)) 1519 + netif_stop_queue(adpt->netdev); 1520 + 1521 + /* update produce idx */ 1522 + prod_idx = (tx_q->tpd.produce_idx << tx_q->produce_shift) & 1523 + tx_q->produce_mask; 1524 + emac_reg_update32(adpt->base + tx_q->produce_reg, 1525 + tx_q->produce_mask, prod_idx); 1526 + 1527 + return NETDEV_TX_OK; 1528 + }
+248
drivers/net/ethernet/qualcomm/emac/emac-mac.h
··· 1 + /* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved. 2 + * 3 + * This program is free software; you can redistribute it and/or modify 4 + * it under the terms of the GNU General Public License version 2 and 5 + * only version 2 as published by the Free Software Foundation. 6 + * 7 + * This program is distributed in the hope that it will be useful, 8 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 9 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 10 + * GNU General Public License for more details. 11 + */ 12 + 13 + /* EMAC DMA HW engine uses three rings: 14 + * Tx: 15 + * TPD: Transmit Packet Descriptor ring. 16 + * Rx: 17 + * RFD: Receive Free Descriptor ring. 18 + * Ring of descriptors with empty buffers to be filled by Rx HW. 19 + * RRD: Receive Return Descriptor ring. 20 + * Ring of descriptors with buffers filled with received data. 21 + */ 22 + 23 + #ifndef _EMAC_HW_H_ 24 + #define _EMAC_HW_H_ 25 + 26 + /* EMAC_CSR register offsets */ 27 + #define EMAC_EMAC_WRAPPER_CSR1 0x000000 28 + #define EMAC_EMAC_WRAPPER_CSR2 0x000004 29 + #define EMAC_EMAC_WRAPPER_TX_TS_LO 0x000104 30 + #define EMAC_EMAC_WRAPPER_TX_TS_HI 0x000108 31 + #define EMAC_EMAC_WRAPPER_TX_TS_INX 0x00010c 32 + 33 + /* DMA Order Settings */ 34 + enum emac_dma_order { 35 + emac_dma_ord_in = 1, 36 + emac_dma_ord_enh = 2, 37 + emac_dma_ord_out = 4 38 + }; 39 + 40 + enum emac_dma_req_block { 41 + emac_dma_req_128 = 0, 42 + emac_dma_req_256 = 1, 43 + emac_dma_req_512 = 2, 44 + emac_dma_req_1024 = 3, 45 + emac_dma_req_2048 = 4, 46 + emac_dma_req_4096 = 5 47 + }; 48 + 49 + /* Returns the value of bits idx...idx+n_bits */ 50 + #define BITS_GET(val, lo, hi) ((le32_to_cpu(val) & GENMASK((hi), (lo))) >> lo) 51 + #define BITS_SET(val, lo, hi, new_val) \ 52 + val = cpu_to_le32((le32_to_cpu(val) & (~GENMASK((hi), (lo)))) | \ 53 + (((new_val) << (lo)) & GENMASK((hi), (lo)))) 54 + 55 + /* RRD (Receive Return Descriptor) */ 56 + struct emac_rrd { 57 + u32 word[6]; 58 + 59 + /* number of RFD */ 60 + #define RRD_NOR(rrd) BITS_GET((rrd)->word[0], 16, 19) 61 + /* start consumer index of rfd-ring */ 62 + #define RRD_SI(rrd) BITS_GET((rrd)->word[0], 20, 31) 63 + /* vlan-tag (CVID, CFI and PRI) */ 64 + #define RRD_CVALN_TAG(rrd) BITS_GET((rrd)->word[2], 0, 15) 65 + /* length of the packet */ 66 + #define RRD_PKT_SIZE(rrd) BITS_GET((rrd)->word[3], 0, 13) 67 + /* L4(TCP/UDP) checksum failed */ 68 + #define RRD_L4F(rrd) BITS_GET((rrd)->word[3], 14, 14) 69 + /* vlan tagged */ 70 + #define RRD_CVTAG(rrd) BITS_GET((rrd)->word[3], 16, 16) 71 + /* When set, indicates that the descriptor is updated by the IP core. 72 + * When cleared, indicates that the descriptor is invalid. 73 + */ 74 + #define RRD_UPDT(rrd) BITS_GET((rrd)->word[3], 31, 31) 75 + #define RRD_UPDT_SET(rrd, val) BITS_SET((rrd)->word[3], 31, 31, val) 76 + /* timestamp low */ 77 + #define RRD_TS_LOW(rrd) BITS_GET((rrd)->word[4], 0, 29) 78 + /* timestamp high */ 79 + #define RRD_TS_HI(rrd) le32_to_cpu((rrd)->word[5]) 80 + }; 81 + 82 + /* TPD (Transmit Packet Descriptor) */ 83 + struct emac_tpd { 84 + u32 word[4]; 85 + 86 + /* Number of bytes of the transmit packet. (include 4-byte CRC) */ 87 + #define TPD_BUF_LEN_SET(tpd, val) BITS_SET((tpd)->word[0], 0, 15, val) 88 + /* Custom Checksum Offload: When set, ask IP core to offload custom checksum */ 89 + #define TPD_CSX_SET(tpd, val) BITS_SET((tpd)->word[1], 8, 8, val) 90 + /* TCP Large Send Offload: When set, ask IP core to do offload TCP Large Send */ 91 + #define TPD_LSO(tpd) BITS_GET((tpd)->word[1], 12, 12) 92 + #define TPD_LSO_SET(tpd, val) BITS_SET((tpd)->word[1], 12, 12, val) 93 + /* Large Send Offload Version: When set, indicates this is an LSOv2 94 + * (for both IPv4 and IPv6). When cleared, indicates this is an LSOv1 95 + * (only for IPv4). 96 + */ 97 + #define TPD_LSOV_SET(tpd, val) BITS_SET((tpd)->word[1], 13, 13, val) 98 + /* IPv4 packet: When set, indicates this is an IPv4 packet, this bit is only 99 + * for LSOV2 format. 100 + */ 101 + #define TPD_IPV4_SET(tpd, val) BITS_SET((tpd)->word[1], 16, 16, val) 102 + /* 0: Ethernet frame (DA+SA+TYPE+DATA+CRC) 103 + * 1: IEEE 802.3 frame (DA+SA+LEN+DSAP+SSAP+CTL+ORG+TYPE+DATA+CRC) 104 + */ 105 + #define TPD_TYP_SET(tpd, val) BITS_SET((tpd)->word[1], 17, 17, val) 106 + /* Low-32bit Buffer Address */ 107 + #define TPD_BUFFER_ADDR_L_SET(tpd, val) ((tpd)->word[2] = cpu_to_le32(val)) 108 + /* CVLAN Tag to be inserted if INS_VLAN_TAG is set, CVLAN TPID based on global 109 + * register configuration. 110 + */ 111 + #define TPD_CVLAN_TAG_SET(tpd, val) BITS_SET((tpd)->word[3], 0, 15, val) 112 + /* Insert CVlan Tag: When set, ask MAC to insert CVLAN TAG to outgoing packet 113 + */ 114 + #define TPD_INSTC_SET(tpd, val) BITS_SET((tpd)->word[3], 17, 17, val) 115 + /* High-14bit Buffer Address, So, the 64b-bit address is 116 + * {DESC_CTRL_11_TX_DATA_HIADDR[17:0],(register) BUFFER_ADDR_H, BUFFER_ADDR_L} 117 + */ 118 + #define TPD_BUFFER_ADDR_H_SET(tpd, val) BITS_SET((tpd)->word[3], 18, 30, val) 119 + /* Format D. Word offset from the 1st byte of this packet to start to calculate 120 + * the custom checksum. 121 + */ 122 + #define TPD_PAYLOAD_OFFSET_SET(tpd, val) BITS_SET((tpd)->word[1], 0, 7, val) 123 + /* Format D. Word offset from the 1st byte of this packet to fill the custom 124 + * checksum to 125 + */ 126 + #define TPD_CXSUM_OFFSET_SET(tpd, val) BITS_SET((tpd)->word[1], 18, 25, val) 127 + 128 + /* Format C. TCP Header offset from the 1st byte of this packet. (byte unit) */ 129 + #define TPD_TCPHDR_OFFSET_SET(tpd, val) BITS_SET((tpd)->word[1], 0, 7, val) 130 + /* Format C. MSS (Maximum Segment Size) got from the protocol layer. (byte unit) 131 + */ 132 + #define TPD_MSS_SET(tpd, val) BITS_SET((tpd)->word[1], 18, 30, val) 133 + /* packet length in ext tpd */ 134 + #define TPD_PKT_LEN_SET(tpd, val) ((tpd)->word[2] = cpu_to_le32(val)) 135 + }; 136 + 137 + /* emac_ring_header represents a single, contiguous block of DMA space 138 + * mapped for the three descriptor rings (tpd, rfd, rrd) 139 + */ 140 + struct emac_ring_header { 141 + void *v_addr; /* virtual address */ 142 + dma_addr_t dma_addr; /* dma address */ 143 + size_t size; /* length in bytes */ 144 + size_t used; 145 + }; 146 + 147 + /* emac_buffer is wrapper around a pointer to a socket buffer 148 + * so a DMA handle can be stored along with the skb 149 + */ 150 + struct emac_buffer { 151 + struct sk_buff *skb; /* socket buffer */ 152 + u16 length; /* rx buffer length */ 153 + dma_addr_t dma_addr; /* dma address */ 154 + }; 155 + 156 + /* receive free descriptor (rfd) ring */ 157 + struct emac_rfd_ring { 158 + struct emac_buffer *rfbuff; 159 + u32 *v_addr; /* virtual address */ 160 + dma_addr_t dma_addr; /* dma address */ 161 + size_t size; /* length in bytes */ 162 + unsigned int count; /* number of desc in the ring */ 163 + unsigned int produce_idx; 164 + unsigned int process_idx; 165 + unsigned int consume_idx; /* unused */ 166 + }; 167 + 168 + /* Receive Return Desciptor (RRD) ring */ 169 + struct emac_rrd_ring { 170 + u32 *v_addr; /* virtual address */ 171 + dma_addr_t dma_addr; /* physical address */ 172 + size_t size; /* length in bytes */ 173 + unsigned int count; /* number of desc in the ring */ 174 + unsigned int produce_idx; /* unused */ 175 + unsigned int consume_idx; 176 + }; 177 + 178 + /* Rx queue */ 179 + struct emac_rx_queue { 180 + struct net_device *netdev; /* netdev ring belongs to */ 181 + struct emac_rrd_ring rrd; 182 + struct emac_rfd_ring rfd; 183 + struct napi_struct napi; 184 + struct emac_irq *irq; 185 + 186 + u32 intr; 187 + u32 produce_mask; 188 + u32 process_mask; 189 + u32 consume_mask; 190 + 191 + u16 produce_reg; 192 + u16 process_reg; 193 + u16 consume_reg; 194 + 195 + u8 produce_shift; 196 + u8 process_shft; 197 + u8 consume_shift; 198 + }; 199 + 200 + /* Transimit Packet Descriptor (tpd) ring */ 201 + struct emac_tpd_ring { 202 + struct emac_buffer *tpbuff; 203 + u32 *v_addr; /* virtual address */ 204 + dma_addr_t dma_addr; /* dma address */ 205 + 206 + size_t size; /* length in bytes */ 207 + unsigned int count; /* number of desc in the ring */ 208 + unsigned int produce_idx; 209 + unsigned int consume_idx; 210 + unsigned int last_produce_idx; 211 + }; 212 + 213 + /* Tx queue */ 214 + struct emac_tx_queue { 215 + struct emac_tpd_ring tpd; 216 + 217 + u32 produce_mask; 218 + u32 consume_mask; 219 + 220 + u16 max_packets; /* max packets per interrupt */ 221 + u16 produce_reg; 222 + u16 consume_reg; 223 + 224 + u8 produce_shift; 225 + u8 consume_shift; 226 + }; 227 + 228 + struct emac_adapter; 229 + 230 + int emac_mac_up(struct emac_adapter *adpt); 231 + void emac_mac_down(struct emac_adapter *adpt); 232 + void emac_mac_reset(struct emac_adapter *adpt); 233 + void emac_mac_start(struct emac_adapter *adpt); 234 + void emac_mac_stop(struct emac_adapter *adpt); 235 + void emac_mac_mode_config(struct emac_adapter *adpt); 236 + void emac_mac_rx_process(struct emac_adapter *adpt, struct emac_rx_queue *rx_q, 237 + int *num_pkts, int max_pkts); 238 + int emac_mac_tx_buf_send(struct emac_adapter *adpt, struct emac_tx_queue *tx_q, 239 + struct sk_buff *skb); 240 + void emac_mac_tx_process(struct emac_adapter *adpt, struct emac_tx_queue *tx_q); 241 + void emac_mac_rx_tx_ring_init_all(struct platform_device *pdev, 242 + struct emac_adapter *adpt); 243 + int emac_mac_rx_tx_rings_alloc_all(struct emac_adapter *adpt); 244 + void emac_mac_rx_tx_rings_free_all(struct emac_adapter *adpt); 245 + void emac_mac_multicast_addr_clear(struct emac_adapter *adpt); 246 + void emac_mac_multicast_addr_set(struct emac_adapter *adpt, u8 *addr); 247 + 248 + #endif /*_EMAC_HW_H_*/
+204
drivers/net/ethernet/qualcomm/emac/emac-phy.c
··· 1 + /* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved. 2 + * 3 + * This program is free software; you can redistribute it and/or modify 4 + * it under the terms of the GNU General Public License version 2 and 5 + * only version 2 as published by the Free Software Foundation. 6 + * 7 + * This program is distributed in the hope that it will be useful, 8 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 9 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 10 + * GNU General Public License for more details. 11 + */ 12 + 13 + /* Qualcomm Technologies, Inc. EMAC PHY Controller driver. 14 + */ 15 + 16 + #include <linux/module.h> 17 + #include <linux/of.h> 18 + #include <linux/of_net.h> 19 + #include <linux/of_mdio.h> 20 + #include <linux/phy.h> 21 + #include <linux/iopoll.h> 22 + #include "emac.h" 23 + #include "emac-mac.h" 24 + #include "emac-phy.h" 25 + #include "emac-sgmii.h" 26 + 27 + /* EMAC base register offsets */ 28 + #define EMAC_MDIO_CTRL 0x001414 29 + #define EMAC_PHY_STS 0x001418 30 + #define EMAC_MDIO_EX_CTRL 0x001440 31 + 32 + /* EMAC_MDIO_CTRL */ 33 + #define MDIO_MODE BIT(30) 34 + #define MDIO_PR BIT(29) 35 + #define MDIO_AP_EN BIT(28) 36 + #define MDIO_BUSY BIT(27) 37 + #define MDIO_CLK_SEL_BMSK 0x7000000 38 + #define MDIO_CLK_SEL_SHFT 24 39 + #define MDIO_START BIT(23) 40 + #define SUP_PREAMBLE BIT(22) 41 + #define MDIO_RD_NWR BIT(21) 42 + #define MDIO_REG_ADDR_BMSK 0x1f0000 43 + #define MDIO_REG_ADDR_SHFT 16 44 + #define MDIO_DATA_BMSK 0xffff 45 + #define MDIO_DATA_SHFT 0 46 + 47 + /* EMAC_PHY_STS */ 48 + #define PHY_ADDR_BMSK 0x1f0000 49 + #define PHY_ADDR_SHFT 16 50 + 51 + #define MDIO_CLK_25_4 0 52 + #define MDIO_CLK_25_28 7 53 + 54 + #define MDIO_WAIT_TIMES 1000 55 + 56 + #define EMAC_LINK_SPEED_DEFAULT (\ 57 + EMAC_LINK_SPEED_10_HALF |\ 58 + EMAC_LINK_SPEED_10_FULL |\ 59 + EMAC_LINK_SPEED_100_HALF |\ 60 + EMAC_LINK_SPEED_100_FULL |\ 61 + EMAC_LINK_SPEED_1GB_FULL) 62 + 63 + /** 64 + * emac_phy_mdio_autopoll_disable() - disable mdio autopoll 65 + * @adpt: the emac adapter 66 + * 67 + * The autopoll feature takes over the MDIO bus. In order for 68 + * the PHY driver to be able to talk to the PHY over the MDIO 69 + * bus, we need to temporarily disable the autopoll feature. 70 + */ 71 + static int emac_phy_mdio_autopoll_disable(struct emac_adapter *adpt) 72 + { 73 + u32 val; 74 + 75 + /* disable autopoll */ 76 + emac_reg_update32(adpt->base + EMAC_MDIO_CTRL, MDIO_AP_EN, 0); 77 + 78 + /* wait for any mdio polling to complete */ 79 + if (!readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, val, 80 + !(val & MDIO_BUSY), 100, MDIO_WAIT_TIMES * 100)) 81 + return 0; 82 + 83 + /* failed to disable; ensure it is enabled before returning */ 84 + emac_reg_update32(adpt->base + EMAC_MDIO_CTRL, 0, MDIO_AP_EN); 85 + 86 + return -EBUSY; 87 + } 88 + 89 + /** 90 + * emac_phy_mdio_autopoll_disable() - disable mdio autopoll 91 + * @adpt: the emac adapter 92 + * 93 + * The EMAC has the ability to poll the external PHY on the MDIO 94 + * bus for link state changes. This eliminates the need for the 95 + * driver to poll the phy. If if the link state does change, 96 + * the EMAC issues an interrupt on behalf of the PHY. 97 + */ 98 + static void emac_phy_mdio_autopoll_enable(struct emac_adapter *adpt) 99 + { 100 + emac_reg_update32(adpt->base + EMAC_MDIO_CTRL, 0, MDIO_AP_EN); 101 + } 102 + 103 + static int emac_mdio_read(struct mii_bus *bus, int addr, int regnum) 104 + { 105 + struct emac_adapter *adpt = bus->priv; 106 + u32 reg; 107 + int ret; 108 + 109 + ret = emac_phy_mdio_autopoll_disable(adpt); 110 + if (ret) 111 + return ret; 112 + 113 + emac_reg_update32(adpt->base + EMAC_PHY_STS, PHY_ADDR_BMSK, 114 + (addr << PHY_ADDR_SHFT)); 115 + 116 + reg = SUP_PREAMBLE | 117 + ((MDIO_CLK_25_4 << MDIO_CLK_SEL_SHFT) & MDIO_CLK_SEL_BMSK) | 118 + ((regnum << MDIO_REG_ADDR_SHFT) & MDIO_REG_ADDR_BMSK) | 119 + MDIO_START | MDIO_RD_NWR; 120 + 121 + writel(reg, adpt->base + EMAC_MDIO_CTRL); 122 + 123 + if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg, 124 + !(reg & (MDIO_START | MDIO_BUSY)), 125 + 100, MDIO_WAIT_TIMES * 100)) 126 + ret = -EIO; 127 + else 128 + ret = (reg >> MDIO_DATA_SHFT) & MDIO_DATA_BMSK; 129 + 130 + emac_phy_mdio_autopoll_enable(adpt); 131 + 132 + return ret; 133 + } 134 + 135 + static int emac_mdio_write(struct mii_bus *bus, int addr, int regnum, u16 val) 136 + { 137 + struct emac_adapter *adpt = bus->priv; 138 + u32 reg; 139 + int ret; 140 + 141 + ret = emac_phy_mdio_autopoll_disable(adpt); 142 + if (ret) 143 + return ret; 144 + 145 + emac_reg_update32(adpt->base + EMAC_PHY_STS, PHY_ADDR_BMSK, 146 + (addr << PHY_ADDR_SHFT)); 147 + 148 + reg = SUP_PREAMBLE | 149 + ((MDIO_CLK_25_4 << MDIO_CLK_SEL_SHFT) & MDIO_CLK_SEL_BMSK) | 150 + ((regnum << MDIO_REG_ADDR_SHFT) & MDIO_REG_ADDR_BMSK) | 151 + ((val << MDIO_DATA_SHFT) & MDIO_DATA_BMSK) | 152 + MDIO_START; 153 + 154 + writel(reg, adpt->base + EMAC_MDIO_CTRL); 155 + 156 + if (readl_poll_timeout(adpt->base + EMAC_MDIO_CTRL, reg, 157 + !(reg & (MDIO_START | MDIO_BUSY)), 100, 158 + MDIO_WAIT_TIMES * 100)) 159 + ret = -EIO; 160 + 161 + emac_phy_mdio_autopoll_enable(adpt); 162 + 163 + return ret; 164 + } 165 + 166 + /* Configure the MDIO bus and connect the external PHY */ 167 + int emac_phy_config(struct platform_device *pdev, struct emac_adapter *adpt) 168 + { 169 + struct device_node *np = pdev->dev.of_node; 170 + struct device_node *phy_np; 171 + struct mii_bus *mii_bus; 172 + int ret; 173 + 174 + /* Create the mii_bus object for talking to the MDIO bus */ 175 + adpt->mii_bus = mii_bus = devm_mdiobus_alloc(&pdev->dev); 176 + if (!mii_bus) 177 + return -ENOMEM; 178 + 179 + mii_bus->name = "emac-mdio"; 180 + snprintf(mii_bus->id, MII_BUS_ID_SIZE, "%s", pdev->name); 181 + mii_bus->read = emac_mdio_read; 182 + mii_bus->write = emac_mdio_write; 183 + mii_bus->parent = &pdev->dev; 184 + mii_bus->priv = adpt; 185 + 186 + ret = of_mdiobus_register(mii_bus, np); 187 + if (ret) { 188 + dev_err(&pdev->dev, "could not register mdio bus\n"); 189 + return ret; 190 + } 191 + 192 + phy_np = of_parse_phandle(np, "phy-handle", 0); 193 + adpt->phydev = of_phy_find_device(phy_np); 194 + if (!adpt->phydev) { 195 + dev_err(&pdev->dev, "could not find external phy\n"); 196 + mdiobus_unregister(mii_bus); 197 + return -ENODEV; 198 + } 199 + 200 + if (adpt->phydev->drv) 201 + phy_attached_print(adpt->phydev, NULL); 202 + 203 + return 0; 204 + }
+33
drivers/net/ethernet/qualcomm/emac/emac-phy.h
··· 1 + /* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. 2 + * 3 + * This program is free software; you can redistribute it and/or modify 4 + * it under the terms of the GNU General Public License version 2 and 5 + * only version 2 as published by the Free Software Foundation. 6 + * 7 + * This program is distributed in the hope that it will be useful, 8 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 9 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 10 + * GNU General Public License for more details. 11 + */ 12 + 13 + #ifndef _EMAC_PHY_H_ 14 + #define _EMAC_PHY_H_ 15 + 16 + typedef int (*emac_sgmii_initialize)(struct emac_adapter *adpt); 17 + 18 + /** emac_phy - internal emac phy 19 + * @base base address 20 + * @digital per-lane digital block 21 + * @initialize initialization function 22 + */ 23 + struct emac_phy { 24 + void __iomem *base; 25 + void __iomem *digital; 26 + emac_sgmii_initialize initialize; 27 + }; 28 + 29 + struct emac_adapter; 30 + 31 + int emac_phy_config(struct platform_device *pdev, struct emac_adapter *adpt); 32 + 33 + #endif /* _EMAC_PHY_H_ */
+721
drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
··· 1 + /* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. 2 + * 3 + * This program is free software; you can redistribute it and/or modify 4 + * it under the terms of the GNU General Public License version 2 and 5 + * only version 2 as published by the Free Software Foundation. 6 + * 7 + * This program is distributed in the hope that it will be useful, 8 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 9 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 10 + * GNU General Public License for more details. 11 + */ 12 + 13 + /* Qualcomm Technologies, Inc. EMAC SGMII Controller driver. 14 + */ 15 + 16 + #include <linux/iopoll.h> 17 + #include <linux/of_device.h> 18 + #include "emac.h" 19 + #include "emac-mac.h" 20 + #include "emac-sgmii.h" 21 + 22 + /* EMAC_QSERDES register offsets */ 23 + #define EMAC_QSERDES_COM_SYS_CLK_CTRL 0x000000 24 + #define EMAC_QSERDES_COM_PLL_CNTRL 0x000014 25 + #define EMAC_QSERDES_COM_PLL_IP_SETI 0x000018 26 + #define EMAC_QSERDES_COM_PLL_CP_SETI 0x000024 27 + #define EMAC_QSERDES_COM_PLL_IP_SETP 0x000028 28 + #define EMAC_QSERDES_COM_PLL_CP_SETP 0x00002c 29 + #define EMAC_QSERDES_COM_SYSCLK_EN_SEL 0x000038 30 + #define EMAC_QSERDES_COM_RESETSM_CNTRL 0x000040 31 + #define EMAC_QSERDES_COM_PLLLOCK_CMP1 0x000044 32 + #define EMAC_QSERDES_COM_PLLLOCK_CMP2 0x000048 33 + #define EMAC_QSERDES_COM_PLLLOCK_CMP3 0x00004c 34 + #define EMAC_QSERDES_COM_PLLLOCK_CMP_EN 0x000050 35 + #define EMAC_QSERDES_COM_DEC_START1 0x000064 36 + #define EMAC_QSERDES_COM_DIV_FRAC_START1 0x000098 37 + #define EMAC_QSERDES_COM_DIV_FRAC_START2 0x00009c 38 + #define EMAC_QSERDES_COM_DIV_FRAC_START3 0x0000a0 39 + #define EMAC_QSERDES_COM_DEC_START2 0x0000a4 40 + #define EMAC_QSERDES_COM_PLL_CRCTRL 0x0000ac 41 + #define EMAC_QSERDES_COM_RESET_SM 0x0000bc 42 + #define EMAC_QSERDES_TX_BIST_MODE_LANENO 0x000100 43 + #define EMAC_QSERDES_TX_TX_EMP_POST1_LVL 0x000108 44 + #define EMAC_QSERDES_TX_TX_DRV_LVL 0x00010c 45 + #define EMAC_QSERDES_TX_LANE_MODE 0x000150 46 + #define EMAC_QSERDES_TX_TRAN_DRVR_EMP_EN 0x000170 47 + #define EMAC_QSERDES_RX_CDR_CONTROL 0x000200 48 + #define EMAC_QSERDES_RX_CDR_CONTROL2 0x000210 49 + #define EMAC_QSERDES_RX_RX_EQ_GAIN12 0x000230 50 + 51 + /* EMAC_SGMII register offsets */ 52 + #define EMAC_SGMII_PHY_SERDES_START 0x000000 53 + #define EMAC_SGMII_PHY_CMN_PWR_CTRL 0x000004 54 + #define EMAC_SGMII_PHY_RX_PWR_CTRL 0x000008 55 + #define EMAC_SGMII_PHY_TX_PWR_CTRL 0x00000C 56 + #define EMAC_SGMII_PHY_LANE_CTRL1 0x000018 57 + #define EMAC_SGMII_PHY_AUTONEG_CFG2 0x000048 58 + #define EMAC_SGMII_PHY_CDR_CTRL0 0x000058 59 + #define EMAC_SGMII_PHY_SPEED_CFG1 0x000074 60 + #define EMAC_SGMII_PHY_POW_DWN_CTRL0 0x000080 61 + #define EMAC_SGMII_PHY_RESET_CTRL 0x0000a8 62 + #define EMAC_SGMII_PHY_IRQ_CMD 0x0000ac 63 + #define EMAC_SGMII_PHY_INTERRUPT_CLEAR 0x0000b0 64 + #define EMAC_SGMII_PHY_INTERRUPT_MASK 0x0000b4 65 + #define EMAC_SGMII_PHY_INTERRUPT_STATUS 0x0000b8 66 + #define EMAC_SGMII_PHY_RX_CHK_STATUS 0x0000d4 67 + #define EMAC_SGMII_PHY_AUTONEG0_STATUS 0x0000e0 68 + #define EMAC_SGMII_PHY_AUTONEG1_STATUS 0x0000e4 69 + 70 + /* EMAC_QSERDES_COM_PLL_IP_SETI */ 71 + #define PLL_IPSETI(x) ((x) & 0x3f) 72 + 73 + /* EMAC_QSERDES_COM_PLL_CP_SETI */ 74 + #define PLL_CPSETI(x) ((x) & 0xff) 75 + 76 + /* EMAC_QSERDES_COM_PLL_IP_SETP */ 77 + #define PLL_IPSETP(x) ((x) & 0x3f) 78 + 79 + /* EMAC_QSERDES_COM_PLL_CP_SETP */ 80 + #define PLL_CPSETP(x) ((x) & 0x1f) 81 + 82 + /* EMAC_QSERDES_COM_PLL_CRCTRL */ 83 + #define PLL_RCTRL(x) (((x) & 0xf) << 4) 84 + #define PLL_CCTRL(x) ((x) & 0xf) 85 + 86 + /* SGMII v2 PHY registers per lane */ 87 + #define EMAC_SGMII_PHY_LN_OFFSET 0x0400 88 + 89 + /* SGMII v2 digital lane registers */ 90 + #define EMAC_SGMII_LN_DRVR_CTRL0 0x00C 91 + #define EMAC_SGMII_LN_DRVR_TAP_EN 0x018 92 + #define EMAC_SGMII_LN_TX_MARGINING 0x01C 93 + #define EMAC_SGMII_LN_TX_PRE 0x020 94 + #define EMAC_SGMII_LN_TX_POST 0x024 95 + #define EMAC_SGMII_LN_TX_BAND_MODE 0x060 96 + #define EMAC_SGMII_LN_LANE_MODE 0x064 97 + #define EMAC_SGMII_LN_PARALLEL_RATE 0x078 98 + #define EMAC_SGMII_LN_CML_CTRL_MODE0 0x0B8 99 + #define EMAC_SGMII_LN_MIXER_CTRL_MODE0 0x0D0 100 + #define EMAC_SGMII_LN_VGA_INITVAL 0x134 101 + #define EMAC_SGMII_LN_UCDR_FO_GAIN_MODE0 0x17C 102 + #define EMAC_SGMII_LN_UCDR_SO_GAIN_MODE0 0x188 103 + #define EMAC_SGMII_LN_UCDR_SO_CONFIG 0x194 104 + #define EMAC_SGMII_LN_RX_BAND 0x19C 105 + #define EMAC_SGMII_LN_RX_RCVR_PATH1_MODE0 0x1B8 106 + #define EMAC_SGMII_LN_RSM_CONFIG 0x1F0 107 + #define EMAC_SGMII_LN_SIGDET_ENABLES 0x224 108 + #define EMAC_SGMII_LN_SIGDET_CNTRL 0x228 109 + #define EMAC_SGMII_LN_SIGDET_DEGLITCH_CNTRL 0x22C 110 + #define EMAC_SGMII_LN_RX_EN_SIGNAL 0x2A0 111 + #define EMAC_SGMII_LN_RX_MISC_CNTRL0 0x2AC 112 + #define EMAC_SGMII_LN_DRVR_LOGIC_CLKDIV 0x2BC 113 + 114 + /* SGMII v2 digital lane register values */ 115 + #define UCDR_STEP_BY_TWO_MODE0 BIT(7) 116 + #define UCDR_xO_GAIN_MODE(x) ((x) & 0x7f) 117 + #define UCDR_ENABLE BIT(6) 118 + #define UCDR_SO_SATURATION(x) ((x) & 0x3f) 119 + #define SIGDET_LP_BYP_PS4 BIT(7) 120 + #define SIGDET_EN_PS0_TO_PS2 BIT(6) 121 + #define EN_ACCOUPLEVCM_SW_MUX BIT(5) 122 + #define EN_ACCOUPLEVCM_SW BIT(4) 123 + #define RX_SYNC_EN BIT(3) 124 + #define RXTERM_HIGHZ_PS5 BIT(2) 125 + #define SIGDET_EN_PS3 BIT(1) 126 + #define EN_ACCOUPLE_VCM_PS3 BIT(0) 127 + #define UFS_MODE BIT(5) 128 + #define TXVAL_VALID_INIT BIT(4) 129 + #define TXVAL_VALID_MUX BIT(3) 130 + #define TXVAL_VALID BIT(2) 131 + #define USB3P1_MODE BIT(1) 132 + #define KR_PCIGEN3_MODE BIT(0) 133 + #define PRE_EN BIT(3) 134 + #define POST_EN BIT(2) 135 + #define MAIN_EN_MUX BIT(1) 136 + #define MAIN_EN BIT(0) 137 + #define TX_MARGINING_MUX BIT(6) 138 + #define TX_MARGINING(x) ((x) & 0x3f) 139 + #define TX_PRE_MUX BIT(6) 140 + #define TX_PRE(x) ((x) & 0x3f) 141 + #define TX_POST_MUX BIT(6) 142 + #define TX_POST(x) ((x) & 0x3f) 143 + #define CML_GEAR_MODE(x) (((x) & 7) << 3) 144 + #define CML2CMOS_IBOOST_MODE(x) ((x) & 7) 145 + #define MIXER_LOADB_MODE(x) (((x) & 0xf) << 2) 146 + #define MIXER_DATARATE_MODE(x) ((x) & 3) 147 + #define VGA_THRESH_DFE(x) ((x) & 0x3f) 148 + #define SIGDET_LP_BYP_PS0_TO_PS2 BIT(5) 149 + #define SIGDET_LP_BYP_MUX BIT(4) 150 + #define SIGDET_LP_BYP BIT(3) 151 + #define SIGDET_EN_MUX BIT(2) 152 + #define SIGDET_EN BIT(1) 153 + #define SIGDET_FLT_BYP BIT(0) 154 + #define SIGDET_LVL(x) (((x) & 0xf) << 4) 155 + #define SIGDET_BW_CTRL(x) ((x) & 0xf) 156 + #define SIGDET_DEGLITCH_CTRL(x) (((x) & 0xf) << 1) 157 + #define SIGDET_DEGLITCH_BYP BIT(0) 158 + #define INVERT_PCS_RX_CLK BIT(7) 159 + #define PWM_EN BIT(6) 160 + #define RXBIAS_SEL(x) (((x) & 0x3) << 4) 161 + #define EBDAC_SIGN BIT(3) 162 + #define EDAC_SIGN BIT(2) 163 + #define EN_AUXTAP1SIGN_INVERT BIT(1) 164 + #define EN_DAC_CHOPPING BIT(0) 165 + #define DRVR_LOGIC_CLK_EN BIT(4) 166 + #define DRVR_LOGIC_CLK_DIV(x) ((x) & 0xf) 167 + #define PARALLEL_RATE_MODE2(x) (((x) & 0x3) << 4) 168 + #define PARALLEL_RATE_MODE1(x) (((x) & 0x3) << 2) 169 + #define PARALLEL_RATE_MODE0(x) ((x) & 0x3) 170 + #define BAND_MODE2(x) (((x) & 0x3) << 4) 171 + #define BAND_MODE1(x) (((x) & 0x3) << 2) 172 + #define BAND_MODE0(x) ((x) & 0x3) 173 + #define LANE_SYNC_MODE BIT(5) 174 + #define LANE_MODE(x) ((x) & 0x1f) 175 + #define CDR_PD_SEL_MODE0(x) (((x) & 0x3) << 5) 176 + #define EN_DLL_MODE0 BIT(4) 177 + #define EN_IQ_DCC_MODE0 BIT(3) 178 + #define EN_IQCAL_MODE0 BIT(2) 179 + #define EN_QPATH_MODE0 BIT(1) 180 + #define EN_EPATH_MODE0 BIT(0) 181 + #define FORCE_TSYNC_ACK BIT(7) 182 + #define FORCE_CMN_ACK BIT(6) 183 + #define FORCE_CMN_READY BIT(5) 184 + #define EN_RCLK_DEGLITCH BIT(4) 185 + #define BYPASS_RSM_CDR_RESET BIT(3) 186 + #define BYPASS_RSM_TSYNC BIT(2) 187 + #define BYPASS_RSM_SAMP_CAL BIT(1) 188 + #define BYPASS_RSM_DLL_CAL BIT(0) 189 + 190 + /* EMAC_QSERDES_COM_SYS_CLK_CTRL */ 191 + #define SYSCLK_CM BIT(4) 192 + #define SYSCLK_AC_COUPLE BIT(3) 193 + 194 + /* EMAC_QSERDES_COM_PLL_CNTRL */ 195 + #define OCP_EN BIT(5) 196 + #define PLL_DIV_FFEN BIT(2) 197 + #define PLL_DIV_ORD BIT(1) 198 + 199 + /* EMAC_QSERDES_COM_SYSCLK_EN_SEL */ 200 + #define SYSCLK_SEL_CMOS BIT(3) 201 + 202 + /* EMAC_QSERDES_COM_RESETSM_CNTRL */ 203 + #define FRQ_TUNE_MODE BIT(4) 204 + 205 + /* EMAC_QSERDES_COM_PLLLOCK_CMP_EN */ 206 + #define PLLLOCK_CMP_EN BIT(0) 207 + 208 + /* EMAC_QSERDES_COM_DEC_START1 */ 209 + #define DEC_START1_MUX BIT(7) 210 + #define DEC_START1(x) ((x) & 0x7f) 211 + 212 + /* EMAC_QSERDES_COM_DIV_FRAC_START1 * EMAC_QSERDES_COM_DIV_FRAC_START2 */ 213 + #define DIV_FRAC_START_MUX BIT(7) 214 + #define DIV_FRAC_START(x) ((x) & 0x7f) 215 + 216 + /* EMAC_QSERDES_COM_DIV_FRAC_START3 */ 217 + #define DIV_FRAC_START3_MUX BIT(4) 218 + #define DIV_FRAC_START3(x) ((x) & 0xf) 219 + 220 + /* EMAC_QSERDES_COM_DEC_START2 */ 221 + #define DEC_START2_MUX BIT(1) 222 + #define DEC_START2 BIT(0) 223 + 224 + /* EMAC_QSERDES_COM_RESET_SM */ 225 + #define READY BIT(5) 226 + 227 + /* EMAC_QSERDES_TX_TX_EMP_POST1_LVL */ 228 + #define TX_EMP_POST1_LVL_MUX BIT(5) 229 + #define TX_EMP_POST1_LVL(x) ((x) & 0x1f) 230 + #define TX_EMP_POST1_LVL_BMSK 0x1f 231 + #define TX_EMP_POST1_LVL_SHFT 0 232 + 233 + /* EMAC_QSERDES_TX_TX_DRV_LVL */ 234 + #define TX_DRV_LVL_MUX BIT(4) 235 + #define TX_DRV_LVL(x) ((x) & 0xf) 236 + 237 + /* EMAC_QSERDES_TX_TRAN_DRVR_EMP_EN */ 238 + #define EMP_EN_MUX BIT(1) 239 + #define EMP_EN BIT(0) 240 + 241 + /* EMAC_QSERDES_RX_CDR_CONTROL & EMAC_QSERDES_RX_CDR_CONTROL2 */ 242 + #define HBW_PD_EN BIT(7) 243 + #define SECONDORDERENABLE BIT(6) 244 + #define FIRSTORDER_THRESH(x) (((x) & 0x7) << 3) 245 + #define SECONDORDERGAIN(x) ((x) & 0x7) 246 + 247 + /* EMAC_QSERDES_RX_RX_EQ_GAIN12 */ 248 + #define RX_EQ_GAIN2(x) (((x) & 0xf) << 4) 249 + #define RX_EQ_GAIN1(x) ((x) & 0xf) 250 + 251 + /* EMAC_SGMII_PHY_SERDES_START */ 252 + #define SERDES_START BIT(0) 253 + 254 + /* EMAC_SGMII_PHY_CMN_PWR_CTRL */ 255 + #define BIAS_EN BIT(6) 256 + #define PLL_EN BIT(5) 257 + #define SYSCLK_EN BIT(4) 258 + #define CLKBUF_L_EN BIT(3) 259 + #define PLL_TXCLK_EN BIT(1) 260 + #define PLL_RXCLK_EN BIT(0) 261 + 262 + /* EMAC_SGMII_PHY_RX_PWR_CTRL */ 263 + #define L0_RX_SIGDET_EN BIT(7) 264 + #define L0_RX_TERM_MODE(x) (((x) & 3) << 4) 265 + #define L0_RX_I_EN BIT(1) 266 + 267 + /* EMAC_SGMII_PHY_TX_PWR_CTRL */ 268 + #define L0_TX_EN BIT(5) 269 + #define L0_CLKBUF_EN BIT(4) 270 + #define L0_TRAN_BIAS_EN BIT(1) 271 + 272 + /* EMAC_SGMII_PHY_LANE_CTRL1 */ 273 + #define L0_RX_EQUALIZE_ENABLE BIT(6) 274 + #define L0_RESET_TSYNC_EN BIT(4) 275 + #define L0_DRV_LVL(x) ((x) & 0xf) 276 + 277 + /* EMAC_SGMII_PHY_AUTONEG_CFG2 */ 278 + #define FORCE_AN_TX_CFG BIT(5) 279 + #define FORCE_AN_RX_CFG BIT(4) 280 + #define AN_ENABLE BIT(0) 281 + 282 + /* EMAC_SGMII_PHY_SPEED_CFG1 */ 283 + #define DUPLEX_MODE BIT(4) 284 + #define SPDMODE_1000 BIT(1) 285 + #define SPDMODE_100 BIT(0) 286 + #define SPDMODE_10 0 287 + #define SPDMODE_BMSK 3 288 + #define SPDMODE_SHFT 0 289 + 290 + /* EMAC_SGMII_PHY_POW_DWN_CTRL0 */ 291 + #define PWRDN_B BIT(0) 292 + #define CDR_MAX_CNT(x) ((x) & 0xff) 293 + 294 + /* EMAC_QSERDES_TX_BIST_MODE_LANENO */ 295 + #define BIST_LANE_NUMBER(x) (((x) & 3) << 5) 296 + #define BISTMODE(x) ((x) & 0x1f) 297 + 298 + /* EMAC_QSERDES_COM_PLLLOCK_CMPx */ 299 + #define PLLLOCK_CMP(x) ((x) & 0xff) 300 + 301 + /* EMAC_SGMII_PHY_RESET_CTRL */ 302 + #define PHY_SW_RESET BIT(0) 303 + 304 + /* EMAC_SGMII_PHY_IRQ_CMD */ 305 + #define IRQ_GLOBAL_CLEAR BIT(0) 306 + 307 + /* EMAC_SGMII_PHY_INTERRUPT_MASK */ 308 + #define DECODE_CODE_ERR BIT(7) 309 + #define DECODE_DISP_ERR BIT(6) 310 + #define PLL_UNLOCK BIT(5) 311 + #define AN_ILLEGAL_TERM BIT(4) 312 + #define SYNC_FAIL BIT(3) 313 + #define AN_START BIT(2) 314 + #define AN_END BIT(1) 315 + #define AN_REQUEST BIT(0) 316 + 317 + #define SGMII_PHY_IRQ_CLR_WAIT_TIME 10 318 + 319 + #define SGMII_PHY_INTERRUPT_ERR (\ 320 + DECODE_CODE_ERR |\ 321 + DECODE_DISP_ERR) 322 + 323 + #define SGMII_ISR_AN_MASK (\ 324 + AN_REQUEST |\ 325 + AN_START |\ 326 + AN_END |\ 327 + AN_ILLEGAL_TERM |\ 328 + PLL_UNLOCK |\ 329 + SYNC_FAIL) 330 + 331 + #define SGMII_ISR_MASK (\ 332 + SGMII_PHY_INTERRUPT_ERR |\ 333 + SGMII_ISR_AN_MASK) 334 + 335 + /* SGMII TX_CONFIG */ 336 + #define TXCFG_LINK 0x8000 337 + #define TXCFG_MODE_BMSK 0x1c00 338 + #define TXCFG_1000_FULL 0x1800 339 + #define TXCFG_100_FULL 0x1400 340 + #define TXCFG_100_HALF 0x0400 341 + #define TXCFG_10_FULL 0x1000 342 + #define TXCFG_10_HALF 0x0000 343 + 344 + #define SERDES_START_WAIT_TIMES 100 345 + 346 + struct emac_reg_write { 347 + unsigned int offset; 348 + u32 val; 349 + }; 350 + 351 + static void emac_reg_write_all(void __iomem *base, 352 + const struct emac_reg_write *itr, size_t size) 353 + { 354 + size_t i; 355 + 356 + for (i = 0; i < size; ++itr, ++i) 357 + writel(itr->val, base + itr->offset); 358 + } 359 + 360 + static const struct emac_reg_write physical_coding_sublayer_programming_v1[] = { 361 + {EMAC_SGMII_PHY_CDR_CTRL0, CDR_MAX_CNT(15)}, 362 + {EMAC_SGMII_PHY_POW_DWN_CTRL0, PWRDN_B}, 363 + {EMAC_SGMII_PHY_CMN_PWR_CTRL, 364 + BIAS_EN | SYSCLK_EN | CLKBUF_L_EN | PLL_TXCLK_EN | PLL_RXCLK_EN}, 365 + {EMAC_SGMII_PHY_TX_PWR_CTRL, L0_TX_EN | L0_CLKBUF_EN | L0_TRAN_BIAS_EN}, 366 + {EMAC_SGMII_PHY_RX_PWR_CTRL, 367 + L0_RX_SIGDET_EN | L0_RX_TERM_MODE(1) | L0_RX_I_EN}, 368 + {EMAC_SGMII_PHY_CMN_PWR_CTRL, 369 + BIAS_EN | PLL_EN | SYSCLK_EN | CLKBUF_L_EN | PLL_TXCLK_EN | 370 + PLL_RXCLK_EN}, 371 + {EMAC_SGMII_PHY_LANE_CTRL1, 372 + L0_RX_EQUALIZE_ENABLE | L0_RESET_TSYNC_EN | L0_DRV_LVL(15)}, 373 + }; 374 + 375 + static const struct emac_reg_write sysclk_refclk_setting[] = { 376 + {EMAC_QSERDES_COM_SYSCLK_EN_SEL, SYSCLK_SEL_CMOS}, 377 + {EMAC_QSERDES_COM_SYS_CLK_CTRL, SYSCLK_CM | SYSCLK_AC_COUPLE}, 378 + }; 379 + 380 + static const struct emac_reg_write pll_setting[] = { 381 + {EMAC_QSERDES_COM_PLL_IP_SETI, PLL_IPSETI(1)}, 382 + {EMAC_QSERDES_COM_PLL_CP_SETI, PLL_CPSETI(59)}, 383 + {EMAC_QSERDES_COM_PLL_IP_SETP, PLL_IPSETP(10)}, 384 + {EMAC_QSERDES_COM_PLL_CP_SETP, PLL_CPSETP(9)}, 385 + {EMAC_QSERDES_COM_PLL_CRCTRL, PLL_RCTRL(15) | PLL_CCTRL(11)}, 386 + {EMAC_QSERDES_COM_PLL_CNTRL, OCP_EN | PLL_DIV_FFEN | PLL_DIV_ORD}, 387 + {EMAC_QSERDES_COM_DEC_START1, DEC_START1_MUX | DEC_START1(2)}, 388 + {EMAC_QSERDES_COM_DEC_START2, DEC_START2_MUX | DEC_START2}, 389 + {EMAC_QSERDES_COM_DIV_FRAC_START1, 390 + DIV_FRAC_START_MUX | DIV_FRAC_START(85)}, 391 + {EMAC_QSERDES_COM_DIV_FRAC_START2, 392 + DIV_FRAC_START_MUX | DIV_FRAC_START(42)}, 393 + {EMAC_QSERDES_COM_DIV_FRAC_START3, 394 + DIV_FRAC_START3_MUX | DIV_FRAC_START3(3)}, 395 + {EMAC_QSERDES_COM_PLLLOCK_CMP1, PLLLOCK_CMP(43)}, 396 + {EMAC_QSERDES_COM_PLLLOCK_CMP2, PLLLOCK_CMP(104)}, 397 + {EMAC_QSERDES_COM_PLLLOCK_CMP3, PLLLOCK_CMP(0)}, 398 + {EMAC_QSERDES_COM_PLLLOCK_CMP_EN, PLLLOCK_CMP_EN}, 399 + {EMAC_QSERDES_COM_RESETSM_CNTRL, FRQ_TUNE_MODE}, 400 + }; 401 + 402 + static const struct emac_reg_write cdr_setting[] = { 403 + {EMAC_QSERDES_RX_CDR_CONTROL, 404 + SECONDORDERENABLE | FIRSTORDER_THRESH(3) | SECONDORDERGAIN(2)}, 405 + {EMAC_QSERDES_RX_CDR_CONTROL2, 406 + SECONDORDERENABLE | FIRSTORDER_THRESH(3) | SECONDORDERGAIN(4)}, 407 + }; 408 + 409 + static const struct emac_reg_write tx_rx_setting[] = { 410 + {EMAC_QSERDES_TX_BIST_MODE_LANENO, 0}, 411 + {EMAC_QSERDES_TX_TX_DRV_LVL, TX_DRV_LVL_MUX | TX_DRV_LVL(15)}, 412 + {EMAC_QSERDES_TX_TRAN_DRVR_EMP_EN, EMP_EN_MUX | EMP_EN}, 413 + {EMAC_QSERDES_TX_TX_EMP_POST1_LVL, 414 + TX_EMP_POST1_LVL_MUX | TX_EMP_POST1_LVL(1)}, 415 + {EMAC_QSERDES_RX_RX_EQ_GAIN12, RX_EQ_GAIN2(15) | RX_EQ_GAIN1(15)}, 416 + {EMAC_QSERDES_TX_LANE_MODE, LANE_MODE(8)}, 417 + }; 418 + 419 + static const struct emac_reg_write sgmii_v2_laned[] = { 420 + /* CDR Settings */ 421 + {EMAC_SGMII_LN_UCDR_FO_GAIN_MODE0, 422 + UCDR_STEP_BY_TWO_MODE0 | UCDR_xO_GAIN_MODE(10)}, 423 + {EMAC_SGMII_LN_UCDR_SO_GAIN_MODE0, UCDR_xO_GAIN_MODE(6)}, 424 + {EMAC_SGMII_LN_UCDR_SO_CONFIG, UCDR_ENABLE | UCDR_SO_SATURATION(12)}, 425 + 426 + /* TX/RX Settings */ 427 + {EMAC_SGMII_LN_RX_EN_SIGNAL, SIGDET_LP_BYP_PS4 | SIGDET_EN_PS0_TO_PS2}, 428 + 429 + {EMAC_SGMII_LN_DRVR_CTRL0, TXVAL_VALID_INIT | KR_PCIGEN3_MODE}, 430 + {EMAC_SGMII_LN_DRVR_TAP_EN, MAIN_EN}, 431 + {EMAC_SGMII_LN_TX_MARGINING, TX_MARGINING_MUX | TX_MARGINING(25)}, 432 + {EMAC_SGMII_LN_TX_PRE, TX_PRE_MUX}, 433 + {EMAC_SGMII_LN_TX_POST, TX_POST_MUX}, 434 + 435 + {EMAC_SGMII_LN_CML_CTRL_MODE0, 436 + CML_GEAR_MODE(1) | CML2CMOS_IBOOST_MODE(1)}, 437 + {EMAC_SGMII_LN_MIXER_CTRL_MODE0, 438 + MIXER_LOADB_MODE(12) | MIXER_DATARATE_MODE(1)}, 439 + {EMAC_SGMII_LN_VGA_INITVAL, VGA_THRESH_DFE(31)}, 440 + {EMAC_SGMII_LN_SIGDET_ENABLES, 441 + SIGDET_LP_BYP_PS0_TO_PS2 | SIGDET_FLT_BYP}, 442 + {EMAC_SGMII_LN_SIGDET_CNTRL, SIGDET_LVL(8)}, 443 + 444 + {EMAC_SGMII_LN_SIGDET_DEGLITCH_CNTRL, SIGDET_DEGLITCH_CTRL(4)}, 445 + {EMAC_SGMII_LN_RX_MISC_CNTRL0, 0}, 446 + {EMAC_SGMII_LN_DRVR_LOGIC_CLKDIV, 447 + DRVR_LOGIC_CLK_EN | DRVR_LOGIC_CLK_DIV(4)}, 448 + 449 + {EMAC_SGMII_LN_PARALLEL_RATE, PARALLEL_RATE_MODE0(1)}, 450 + {EMAC_SGMII_LN_TX_BAND_MODE, BAND_MODE0(2)}, 451 + {EMAC_SGMII_LN_RX_BAND, BAND_MODE0(3)}, 452 + {EMAC_SGMII_LN_LANE_MODE, LANE_MODE(26)}, 453 + {EMAC_SGMII_LN_RX_RCVR_PATH1_MODE0, CDR_PD_SEL_MODE0(3)}, 454 + {EMAC_SGMII_LN_RSM_CONFIG, BYPASS_RSM_SAMP_CAL | BYPASS_RSM_DLL_CAL}, 455 + }; 456 + 457 + static const struct emac_reg_write physical_coding_sublayer_programming_v2[] = { 458 + {EMAC_SGMII_PHY_POW_DWN_CTRL0, PWRDN_B}, 459 + {EMAC_SGMII_PHY_CDR_CTRL0, CDR_MAX_CNT(15)}, 460 + {EMAC_SGMII_PHY_TX_PWR_CTRL, 0}, 461 + {EMAC_SGMII_PHY_LANE_CTRL1, L0_RX_EQUALIZE_ENABLE}, 462 + }; 463 + 464 + static int emac_sgmii_link_init(struct emac_adapter *adpt) 465 + { 466 + struct phy_device *phydev = adpt->phydev; 467 + struct emac_phy *phy = &adpt->phy; 468 + u32 val; 469 + 470 + val = readl(phy->base + EMAC_SGMII_PHY_AUTONEG_CFG2); 471 + 472 + if (phydev->autoneg == AUTONEG_ENABLE) { 473 + val &= ~(FORCE_AN_RX_CFG | FORCE_AN_TX_CFG); 474 + val |= AN_ENABLE; 475 + writel(val, phy->base + EMAC_SGMII_PHY_AUTONEG_CFG2); 476 + } else { 477 + u32 speed_cfg; 478 + 479 + switch (phydev->speed) { 480 + case SPEED_10: 481 + speed_cfg = SPDMODE_10; 482 + break; 483 + case SPEED_100: 484 + speed_cfg = SPDMODE_100; 485 + break; 486 + case SPEED_1000: 487 + speed_cfg = SPDMODE_1000; 488 + break; 489 + default: 490 + return -EINVAL; 491 + } 492 + 493 + if (phydev->duplex == DUPLEX_FULL) 494 + speed_cfg |= DUPLEX_MODE; 495 + 496 + val &= ~AN_ENABLE; 497 + writel(speed_cfg, phy->base + EMAC_SGMII_PHY_SPEED_CFG1); 498 + writel(val, phy->base + EMAC_SGMII_PHY_AUTONEG_CFG2); 499 + } 500 + 501 + return 0; 502 + } 503 + 504 + static int emac_sgmii_irq_clear(struct emac_adapter *adpt, u32 irq_bits) 505 + { 506 + struct emac_phy *phy = &adpt->phy; 507 + u32 status; 508 + 509 + writel_relaxed(irq_bits, phy->base + EMAC_SGMII_PHY_INTERRUPT_CLEAR); 510 + writel_relaxed(IRQ_GLOBAL_CLEAR, phy->base + EMAC_SGMII_PHY_IRQ_CMD); 511 + /* Ensure interrupt clear command is written to HW */ 512 + wmb(); 513 + 514 + /* After set the IRQ_GLOBAL_CLEAR bit, the status clearing must 515 + * be confirmed before clearing the bits in other registers. 516 + * It takes a few cycles for hw to clear the interrupt status. 517 + */ 518 + if (readl_poll_timeout_atomic(phy->base + 519 + EMAC_SGMII_PHY_INTERRUPT_STATUS, 520 + status, !(status & irq_bits), 1, 521 + SGMII_PHY_IRQ_CLR_WAIT_TIME)) { 522 + netdev_err(adpt->netdev, 523 + "error: failed clear SGMII irq: status:0x%x bits:0x%x\n", 524 + status, irq_bits); 525 + return -EIO; 526 + } 527 + 528 + /* Finalize clearing procedure */ 529 + writel_relaxed(0, phy->base + EMAC_SGMII_PHY_IRQ_CMD); 530 + writel_relaxed(0, phy->base + EMAC_SGMII_PHY_INTERRUPT_CLEAR); 531 + 532 + /* Ensure that clearing procedure finalization is written to HW */ 533 + wmb(); 534 + 535 + return 0; 536 + } 537 + 538 + int emac_sgmii_init_v1(struct emac_adapter *adpt) 539 + { 540 + struct emac_phy *phy = &adpt->phy; 541 + unsigned int i; 542 + int ret; 543 + 544 + ret = emac_sgmii_link_init(adpt); 545 + if (ret) 546 + return ret; 547 + 548 + emac_reg_write_all(phy->base, physical_coding_sublayer_programming_v1, 549 + ARRAY_SIZE(physical_coding_sublayer_programming_v1)); 550 + emac_reg_write_all(phy->base, sysclk_refclk_setting, 551 + ARRAY_SIZE(sysclk_refclk_setting)); 552 + emac_reg_write_all(phy->base, pll_setting, ARRAY_SIZE(pll_setting)); 553 + emac_reg_write_all(phy->base, cdr_setting, ARRAY_SIZE(cdr_setting)); 554 + emac_reg_write_all(phy->base, tx_rx_setting, 555 + ARRAY_SIZE(tx_rx_setting)); 556 + 557 + /* Power up the Ser/Des engine */ 558 + writel(SERDES_START, phy->base + EMAC_SGMII_PHY_SERDES_START); 559 + 560 + for (i = 0; i < SERDES_START_WAIT_TIMES; i++) { 561 + if (readl(phy->base + EMAC_QSERDES_COM_RESET_SM) & READY) 562 + break; 563 + usleep_range(100, 200); 564 + } 565 + 566 + if (i == SERDES_START_WAIT_TIMES) { 567 + netdev_err(adpt->netdev, "error: ser/des failed to start\n"); 568 + return -EIO; 569 + } 570 + /* Mask out all the SGMII Interrupt */ 571 + writel(0, phy->base + EMAC_SGMII_PHY_INTERRUPT_MASK); 572 + 573 + emac_sgmii_irq_clear(adpt, SGMII_PHY_INTERRUPT_ERR); 574 + 575 + return 0; 576 + } 577 + 578 + int emac_sgmii_init_v2(struct emac_adapter *adpt) 579 + { 580 + struct emac_phy *phy = &adpt->phy; 581 + void __iomem *phy_regs = phy->base; 582 + void __iomem *laned = phy->digital; 583 + unsigned int i; 584 + u32 lnstatus; 585 + int ret; 586 + 587 + ret = emac_sgmii_link_init(adpt); 588 + if (ret) 589 + return ret; 590 + 591 + /* PCS lane-x init */ 592 + emac_reg_write_all(phy->base, physical_coding_sublayer_programming_v2, 593 + ARRAY_SIZE(physical_coding_sublayer_programming_v2)); 594 + 595 + /* SGMII lane-x init */ 596 + emac_reg_write_all(phy->digital, 597 + sgmii_v2_laned, ARRAY_SIZE(sgmii_v2_laned)); 598 + 599 + /* Power up PCS and start reset lane state machine */ 600 + 601 + writel(0, phy_regs + EMAC_SGMII_PHY_RESET_CTRL); 602 + writel(1, laned + SGMII_LN_RSM_START); 603 + 604 + /* Wait for c_ready assertion */ 605 + for (i = 0; i < SERDES_START_WAIT_TIMES; i++) { 606 + lnstatus = readl(phy_regs + SGMII_PHY_LN_LANE_STATUS); 607 + if (lnstatus & BIT(1)) 608 + break; 609 + usleep_range(100, 200); 610 + } 611 + 612 + if (i == SERDES_START_WAIT_TIMES) { 613 + netdev_err(adpt->netdev, "SGMII failed to start\n"); 614 + return -EIO; 615 + } 616 + 617 + /* Disable digital and SERDES loopback */ 618 + writel(0, phy_regs + SGMII_PHY_LN_BIST_GEN0); 619 + writel(0, phy_regs + SGMII_PHY_LN_BIST_GEN2); 620 + writel(0, phy_regs + SGMII_PHY_LN_CDR_CTRL1); 621 + 622 + /* Mask out all the SGMII Interrupt */ 623 + writel(0, phy_regs + EMAC_SGMII_PHY_INTERRUPT_MASK); 624 + 625 + emac_sgmii_irq_clear(adpt, SGMII_PHY_INTERRUPT_ERR); 626 + 627 + return 0; 628 + } 629 + 630 + static void emac_sgmii_reset_prepare(struct emac_adapter *adpt) 631 + { 632 + struct emac_phy *phy = &adpt->phy; 633 + u32 val; 634 + 635 + /* Reset PHY */ 636 + val = readl(phy->base + EMAC_EMAC_WRAPPER_CSR2); 637 + writel(((val & ~PHY_RESET) | PHY_RESET), phy->base + 638 + EMAC_EMAC_WRAPPER_CSR2); 639 + /* Ensure phy-reset command is written to HW before the release cmd */ 640 + msleep(50); 641 + val = readl(phy->base + EMAC_EMAC_WRAPPER_CSR2); 642 + writel((val & ~PHY_RESET), phy->base + EMAC_EMAC_WRAPPER_CSR2); 643 + /* Ensure phy-reset release command is written to HW before initializing 644 + * SGMII 645 + */ 646 + msleep(50); 647 + } 648 + 649 + void emac_sgmii_reset(struct emac_adapter *adpt) 650 + { 651 + int ret; 652 + 653 + clk_set_rate(adpt->clk[EMAC_CLK_HIGH_SPEED], 19200000); 654 + emac_sgmii_reset_prepare(adpt); 655 + 656 + ret = adpt->phy.initialize(adpt); 657 + if (ret) 658 + netdev_err(adpt->netdev, 659 + "could not reinitialize internal PHY (error=%i)\n", 660 + ret); 661 + 662 + clk_set_rate(adpt->clk[EMAC_CLK_HIGH_SPEED], 125000000); 663 + } 664 + 665 + static const struct of_device_id emac_sgmii_dt_match[] = { 666 + { 667 + .compatible = "qcom,fsm9900-emac-sgmii", 668 + .data = emac_sgmii_init_v1, 669 + }, 670 + { 671 + .compatible = "qcom,qdf2432-emac-sgmii", 672 + .data = emac_sgmii_init_v2, 673 + }, 674 + {} 675 + }; 676 + 677 + int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt) 678 + { 679 + struct platform_device *sgmii_pdev = NULL; 680 + struct emac_phy *phy = &adpt->phy; 681 + struct resource *res; 682 + const struct of_device_id *match; 683 + struct device_node *np; 684 + 685 + np = of_parse_phandle(pdev->dev.of_node, "internal-phy", 0); 686 + if (!np) { 687 + dev_err(&pdev->dev, "missing internal-phy property\n"); 688 + return -ENODEV; 689 + } 690 + 691 + sgmii_pdev = of_find_device_by_node(np); 692 + if (!sgmii_pdev) { 693 + dev_err(&pdev->dev, "invalid internal-phy property\n"); 694 + return -ENODEV; 695 + } 696 + 697 + match = of_match_device(emac_sgmii_dt_match, &sgmii_pdev->dev); 698 + if (!match) { 699 + dev_err(&pdev->dev, "unrecognized internal phy node\n"); 700 + return -ENODEV; 701 + } 702 + 703 + phy->initialize = (emac_sgmii_initialize)match->data; 704 + 705 + /* Base address is the first address */ 706 + res = platform_get_resource(sgmii_pdev, IORESOURCE_MEM, 0); 707 + phy->base = devm_ioremap_resource(&sgmii_pdev->dev, res); 708 + if (IS_ERR(phy->base)) 709 + return PTR_ERR(phy->base); 710 + 711 + /* v2 SGMII has a per-lane digital digital, so parse it if it exists */ 712 + res = platform_get_resource(sgmii_pdev, IORESOURCE_MEM, 1); 713 + if (res) { 714 + phy->digital = devm_ioremap_resource(&sgmii_pdev->dev, res); 715 + if (IS_ERR(phy->base)) 716 + return PTR_ERR(phy->base); 717 + 718 + } 719 + 720 + return phy->initialize(adpt); 721 + }
+24
drivers/net/ethernet/qualcomm/emac/emac-sgmii.h
··· 1 + /* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. 2 + * 3 + * This program is free software; you can redistribute it and/or modify 4 + * it under the terms of the GNU General Public License version 2 and 5 + * only version 2 as published by the Free Software Foundation. 6 + * 7 + * This program is distributed in the hope that it will be useful, 8 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 9 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 10 + * GNU General Public License for more details. 11 + */ 12 + 13 + #ifndef _EMAC_SGMII_H_ 14 + #define _EMAC_SGMII_H_ 15 + 16 + struct emac_adapter; 17 + struct platform_device; 18 + 19 + int emac_sgmii_init_v1(struct emac_adapter *adpt); 20 + int emac_sgmii_init_v2(struct emac_adapter *adpt); 21 + int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt); 22 + void emac_sgmii_reset(struct emac_adapter *adpt); 23 + 24 + #endif
+743
drivers/net/ethernet/qualcomm/emac/emac.c
··· 1 + /* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved. 2 + * 3 + * This program is free software; you can redistribute it and/or modify 4 + * it under the terms of the GNU General Public License version 2 and 5 + * only version 2 as published by the Free Software Foundation. 6 + * 7 + * This program is distributed in the hope that it will be useful, 8 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 9 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 10 + * GNU General Public License for more details. 11 + */ 12 + 13 + /* Qualcomm Technologies, Inc. EMAC Gigabit Ethernet Driver */ 14 + 15 + #include <linux/if_ether.h> 16 + #include <linux/if_vlan.h> 17 + #include <linux/interrupt.h> 18 + #include <linux/io.h> 19 + #include <linux/module.h> 20 + #include <linux/of.h> 21 + #include <linux/of_net.h> 22 + #include <linux/of_device.h> 23 + #include <linux/phy.h> 24 + #include <linux/platform_device.h> 25 + #include "emac.h" 26 + #include "emac-mac.h" 27 + #include "emac-phy.h" 28 + #include "emac-sgmii.h" 29 + 30 + #define EMAC_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \ 31 + NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP) 32 + 33 + #define EMAC_RRD_SIZE 4 34 + /* The RRD size if timestamping is enabled: */ 35 + #define EMAC_TS_RRD_SIZE 6 36 + #define EMAC_TPD_SIZE 4 37 + #define EMAC_RFD_SIZE 2 38 + 39 + #define REG_MAC_RX_STATUS_BIN EMAC_RXMAC_STATC_REG0 40 + #define REG_MAC_RX_STATUS_END EMAC_RXMAC_STATC_REG22 41 + #define REG_MAC_TX_STATUS_BIN EMAC_TXMAC_STATC_REG0 42 + #define REG_MAC_TX_STATUS_END EMAC_TXMAC_STATC_REG24 43 + 44 + #define RXQ0_NUM_RFD_PREF_DEF 8 45 + #define TXQ0_NUM_TPD_PREF_DEF 5 46 + 47 + #define EMAC_PREAMBLE_DEF 7 48 + 49 + #define DMAR_DLY_CNT_DEF 15 50 + #define DMAW_DLY_CNT_DEF 4 51 + 52 + #define IMR_NORMAL_MASK (\ 53 + ISR_ERROR |\ 54 + ISR_GPHY_LINK |\ 55 + ISR_TX_PKT |\ 56 + GPHY_WAKEUP_INT) 57 + 58 + #define IMR_EXTENDED_MASK (\ 59 + SW_MAN_INT |\ 60 + ISR_OVER |\ 61 + ISR_ERROR |\ 62 + ISR_GPHY_LINK |\ 63 + ISR_TX_PKT |\ 64 + GPHY_WAKEUP_INT) 65 + 66 + #define ISR_TX_PKT (\ 67 + TX_PKT_INT |\ 68 + TX_PKT_INT1 |\ 69 + TX_PKT_INT2 |\ 70 + TX_PKT_INT3) 71 + 72 + #define ISR_GPHY_LINK (\ 73 + GPHY_LINK_UP_INT |\ 74 + GPHY_LINK_DOWN_INT) 75 + 76 + #define ISR_OVER (\ 77 + RFD0_UR_INT |\ 78 + RFD1_UR_INT |\ 79 + RFD2_UR_INT |\ 80 + RFD3_UR_INT |\ 81 + RFD4_UR_INT |\ 82 + RXF_OF_INT |\ 83 + TXF_UR_INT) 84 + 85 + #define ISR_ERROR (\ 86 + DMAR_TO_INT |\ 87 + DMAW_TO_INT |\ 88 + TXQ_TO_INT) 89 + 90 + /* in sync with enum emac_clk_id */ 91 + static const char * const emac_clk_name[] = { 92 + "axi_clk", "cfg_ahb_clk", "high_speed_clk", "mdio_clk", "tx_clk", 93 + "rx_clk", "sys_clk" 94 + }; 95 + 96 + void emac_reg_update32(void __iomem *addr, u32 mask, u32 val) 97 + { 98 + u32 data = readl(addr); 99 + 100 + writel(((data & ~mask) | val), addr); 101 + } 102 + 103 + /* reinitialize */ 104 + int emac_reinit_locked(struct emac_adapter *adpt) 105 + { 106 + int ret; 107 + 108 + mutex_lock(&adpt->reset_lock); 109 + 110 + emac_mac_down(adpt); 111 + emac_sgmii_reset(adpt); 112 + ret = emac_mac_up(adpt); 113 + 114 + mutex_unlock(&adpt->reset_lock); 115 + 116 + return ret; 117 + } 118 + 119 + /* NAPI */ 120 + static int emac_napi_rtx(struct napi_struct *napi, int budget) 121 + { 122 + struct emac_rx_queue *rx_q = 123 + container_of(napi, struct emac_rx_queue, napi); 124 + struct emac_adapter *adpt = netdev_priv(rx_q->netdev); 125 + struct emac_irq *irq = rx_q->irq; 126 + int work_done = 0; 127 + 128 + emac_mac_rx_process(adpt, rx_q, &work_done, budget); 129 + 130 + if (work_done < budget) { 131 + napi_complete(napi); 132 + 133 + irq->mask |= rx_q->intr; 134 + writel(irq->mask, adpt->base + EMAC_INT_MASK); 135 + } 136 + 137 + return work_done; 138 + } 139 + 140 + /* Transmit the packet */ 141 + static int emac_start_xmit(struct sk_buff *skb, struct net_device *netdev) 142 + { 143 + struct emac_adapter *adpt = netdev_priv(netdev); 144 + 145 + return emac_mac_tx_buf_send(adpt, &adpt->tx_q, skb); 146 + } 147 + 148 + irqreturn_t emac_isr(int _irq, void *data) 149 + { 150 + struct emac_irq *irq = data; 151 + struct emac_adapter *adpt = 152 + container_of(irq, struct emac_adapter, irq); 153 + struct emac_rx_queue *rx_q = &adpt->rx_q; 154 + u32 isr, status; 155 + 156 + /* disable the interrupt */ 157 + writel(0, adpt->base + EMAC_INT_MASK); 158 + 159 + isr = readl_relaxed(adpt->base + EMAC_INT_STATUS); 160 + 161 + status = isr & irq->mask; 162 + if (status == 0) 163 + goto exit; 164 + 165 + if (status & ISR_ERROR) { 166 + netif_warn(adpt, intr, adpt->netdev, 167 + "warning: error irq status 0x%lx\n", 168 + status & ISR_ERROR); 169 + /* reset MAC */ 170 + schedule_work(&adpt->work_thread); 171 + } 172 + 173 + /* Schedule the napi for receive queue with interrupt 174 + * status bit set 175 + */ 176 + if (status & rx_q->intr) { 177 + if (napi_schedule_prep(&rx_q->napi)) { 178 + irq->mask &= ~rx_q->intr; 179 + __napi_schedule(&rx_q->napi); 180 + } 181 + } 182 + 183 + if (status & TX_PKT_INT) 184 + emac_mac_tx_process(adpt, &adpt->tx_q); 185 + 186 + if (status & ISR_OVER) 187 + net_warn_ratelimited("warning: TX/RX overflow\n"); 188 + 189 + /* link event */ 190 + if (status & ISR_GPHY_LINK) 191 + phy_mac_interrupt(adpt->phydev, !!(status & GPHY_LINK_UP_INT)); 192 + 193 + exit: 194 + /* enable the interrupt */ 195 + writel(irq->mask, adpt->base + EMAC_INT_MASK); 196 + 197 + return IRQ_HANDLED; 198 + } 199 + 200 + /* Configure VLAN tag strip/insert feature */ 201 + static int emac_set_features(struct net_device *netdev, 202 + netdev_features_t features) 203 + { 204 + netdev_features_t changed = features ^ netdev->features; 205 + struct emac_adapter *adpt = netdev_priv(netdev); 206 + 207 + /* We only need to reprogram the hardware if the VLAN tag features 208 + * have changed, and if it's already running. 209 + */ 210 + if (!(changed & (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX))) 211 + return 0; 212 + 213 + if (!netif_running(netdev)) 214 + return 0; 215 + 216 + /* emac_mac_mode_config() uses netdev->features to configure the EMAC, 217 + * so make sure it's set first. 218 + */ 219 + netdev->features = features; 220 + 221 + return emac_reinit_locked(adpt); 222 + } 223 + 224 + /* Configure Multicast and Promiscuous modes */ 225 + static void emac_rx_mode_set(struct net_device *netdev) 226 + { 227 + struct emac_adapter *adpt = netdev_priv(netdev); 228 + struct netdev_hw_addr *ha; 229 + 230 + emac_mac_mode_config(adpt); 231 + 232 + /* update multicast address filtering */ 233 + emac_mac_multicast_addr_clear(adpt); 234 + netdev_for_each_mc_addr(ha, netdev) 235 + emac_mac_multicast_addr_set(adpt, ha->addr); 236 + } 237 + 238 + /* Change the Maximum Transfer Unit (MTU) */ 239 + static int emac_change_mtu(struct net_device *netdev, int new_mtu) 240 + { 241 + unsigned int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; 242 + struct emac_adapter *adpt = netdev_priv(netdev); 243 + 244 + if ((max_frame < EMAC_MIN_ETH_FRAME_SIZE) || 245 + (max_frame > EMAC_MAX_ETH_FRAME_SIZE)) { 246 + netdev_err(adpt->netdev, "error: invalid MTU setting\n"); 247 + return -EINVAL; 248 + } 249 + 250 + netif_info(adpt, hw, adpt->netdev, 251 + "changing MTU from %d to %d\n", netdev->mtu, 252 + new_mtu); 253 + netdev->mtu = new_mtu; 254 + 255 + if (netif_running(netdev)) 256 + return emac_reinit_locked(adpt); 257 + 258 + return 0; 259 + } 260 + 261 + /* Called when the network interface is made active */ 262 + static int emac_open(struct net_device *netdev) 263 + { 264 + struct emac_adapter *adpt = netdev_priv(netdev); 265 + int ret; 266 + 267 + /* allocate rx/tx dma buffer & descriptors */ 268 + ret = emac_mac_rx_tx_rings_alloc_all(adpt); 269 + if (ret) { 270 + netdev_err(adpt->netdev, "error allocating rx/tx rings\n"); 271 + return ret; 272 + } 273 + 274 + ret = emac_mac_up(adpt); 275 + if (ret) { 276 + emac_mac_rx_tx_rings_free_all(adpt); 277 + return ret; 278 + } 279 + 280 + emac_mac_start(adpt); 281 + 282 + return 0; 283 + } 284 + 285 + /* Called when the network interface is disabled */ 286 + static int emac_close(struct net_device *netdev) 287 + { 288 + struct emac_adapter *adpt = netdev_priv(netdev); 289 + 290 + mutex_lock(&adpt->reset_lock); 291 + 292 + emac_mac_down(adpt); 293 + emac_mac_rx_tx_rings_free_all(adpt); 294 + 295 + mutex_unlock(&adpt->reset_lock); 296 + 297 + return 0; 298 + } 299 + 300 + /* Respond to a TX hang */ 301 + static void emac_tx_timeout(struct net_device *netdev) 302 + { 303 + struct emac_adapter *adpt = netdev_priv(netdev); 304 + 305 + schedule_work(&adpt->work_thread); 306 + } 307 + 308 + /* IOCTL support for the interface */ 309 + static int emac_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) 310 + { 311 + if (!netif_running(netdev)) 312 + return -EINVAL; 313 + 314 + if (!netdev->phydev) 315 + return -ENODEV; 316 + 317 + return phy_mii_ioctl(netdev->phydev, ifr, cmd); 318 + } 319 + 320 + /* Provide network statistics info for the interface */ 321 + static struct rtnl_link_stats64 *emac_get_stats64(struct net_device *netdev, 322 + struct rtnl_link_stats64 *net_stats) 323 + { 324 + struct emac_adapter *adpt = netdev_priv(netdev); 325 + unsigned int addr = REG_MAC_RX_STATUS_BIN; 326 + struct emac_stats *stats = &adpt->stats; 327 + u64 *stats_itr = &adpt->stats.rx_ok; 328 + u32 val; 329 + 330 + spin_lock(&stats->lock); 331 + 332 + while (addr <= REG_MAC_RX_STATUS_END) { 333 + val = readl_relaxed(adpt->base + addr); 334 + *stats_itr += val; 335 + stats_itr++; 336 + addr += sizeof(u32); 337 + } 338 + 339 + /* additional rx status */ 340 + val = readl_relaxed(adpt->base + EMAC_RXMAC_STATC_REG23); 341 + adpt->stats.rx_crc_align += val; 342 + val = readl_relaxed(adpt->base + EMAC_RXMAC_STATC_REG24); 343 + adpt->stats.rx_jabbers += val; 344 + 345 + /* update tx status */ 346 + addr = REG_MAC_TX_STATUS_BIN; 347 + stats_itr = &adpt->stats.tx_ok; 348 + 349 + while (addr <= REG_MAC_TX_STATUS_END) { 350 + val = readl_relaxed(adpt->base + addr); 351 + *stats_itr += val; 352 + ++stats_itr; 353 + addr += sizeof(u32); 354 + } 355 + 356 + /* additional tx status */ 357 + val = readl_relaxed(adpt->base + EMAC_TXMAC_STATC_REG25); 358 + adpt->stats.tx_col += val; 359 + 360 + /* return parsed statistics */ 361 + net_stats->rx_packets = stats->rx_ok; 362 + net_stats->tx_packets = stats->tx_ok; 363 + net_stats->rx_bytes = stats->rx_byte_cnt; 364 + net_stats->tx_bytes = stats->tx_byte_cnt; 365 + net_stats->multicast = stats->rx_mcast; 366 + net_stats->collisions = stats->tx_1_col + stats->tx_2_col * 2 + 367 + stats->tx_late_col + stats->tx_abort_col; 368 + 369 + net_stats->rx_errors = stats->rx_frag + stats->rx_fcs_err + 370 + stats->rx_len_err + stats->rx_sz_ov + 371 + stats->rx_align_err; 372 + net_stats->rx_fifo_errors = stats->rx_rxf_ov; 373 + net_stats->rx_length_errors = stats->rx_len_err; 374 + net_stats->rx_crc_errors = stats->rx_fcs_err; 375 + net_stats->rx_frame_errors = stats->rx_align_err; 376 + net_stats->rx_over_errors = stats->rx_rxf_ov; 377 + net_stats->rx_missed_errors = stats->rx_rxf_ov; 378 + 379 + net_stats->tx_errors = stats->tx_late_col + stats->tx_abort_col + 380 + stats->tx_underrun + stats->tx_trunc; 381 + net_stats->tx_fifo_errors = stats->tx_underrun; 382 + net_stats->tx_aborted_errors = stats->tx_abort_col; 383 + net_stats->tx_window_errors = stats->tx_late_col; 384 + 385 + spin_unlock(&stats->lock); 386 + 387 + return net_stats; 388 + } 389 + 390 + static const struct net_device_ops emac_netdev_ops = { 391 + .ndo_open = emac_open, 392 + .ndo_stop = emac_close, 393 + .ndo_validate_addr = eth_validate_addr, 394 + .ndo_start_xmit = emac_start_xmit, 395 + .ndo_set_mac_address = eth_mac_addr, 396 + .ndo_change_mtu = emac_change_mtu, 397 + .ndo_do_ioctl = emac_ioctl, 398 + .ndo_tx_timeout = emac_tx_timeout, 399 + .ndo_get_stats64 = emac_get_stats64, 400 + .ndo_set_features = emac_set_features, 401 + .ndo_set_rx_mode = emac_rx_mode_set, 402 + }; 403 + 404 + /* Watchdog task routine, called to reinitialize the EMAC */ 405 + static void emac_work_thread(struct work_struct *work) 406 + { 407 + struct emac_adapter *adpt = 408 + container_of(work, struct emac_adapter, work_thread); 409 + 410 + emac_reinit_locked(adpt); 411 + } 412 + 413 + /* Initialize various data structures */ 414 + static void emac_init_adapter(struct emac_adapter *adpt) 415 + { 416 + u32 reg; 417 + 418 + /* descriptors */ 419 + adpt->tx_desc_cnt = EMAC_DEF_TX_DESCS; 420 + adpt->rx_desc_cnt = EMAC_DEF_RX_DESCS; 421 + 422 + /* dma */ 423 + adpt->dma_order = emac_dma_ord_out; 424 + adpt->dmar_block = emac_dma_req_4096; 425 + adpt->dmaw_block = emac_dma_req_128; 426 + adpt->dmar_dly_cnt = DMAR_DLY_CNT_DEF; 427 + adpt->dmaw_dly_cnt = DMAW_DLY_CNT_DEF; 428 + adpt->tpd_burst = TXQ0_NUM_TPD_PREF_DEF; 429 + adpt->rfd_burst = RXQ0_NUM_RFD_PREF_DEF; 430 + 431 + /* irq moderator */ 432 + reg = ((EMAC_DEF_RX_IRQ_MOD >> 1) << IRQ_MODERATOR2_INIT_SHFT) | 433 + ((EMAC_DEF_TX_IRQ_MOD >> 1) << IRQ_MODERATOR_INIT_SHFT); 434 + adpt->irq_mod = reg; 435 + 436 + /* others */ 437 + adpt->preamble = EMAC_PREAMBLE_DEF; 438 + } 439 + 440 + /* Get the clock */ 441 + static int emac_clks_get(struct platform_device *pdev, 442 + struct emac_adapter *adpt) 443 + { 444 + unsigned int i; 445 + 446 + for (i = 0; i < EMAC_CLK_CNT; i++) { 447 + struct clk *clk = devm_clk_get(&pdev->dev, emac_clk_name[i]); 448 + 449 + if (IS_ERR(clk)) { 450 + dev_err(&pdev->dev, 451 + "could not claim clock %s (error=%li)\n", 452 + emac_clk_name[i], PTR_ERR(clk)); 453 + 454 + return PTR_ERR(clk); 455 + } 456 + 457 + adpt->clk[i] = clk; 458 + } 459 + 460 + return 0; 461 + } 462 + 463 + /* Initialize clocks */ 464 + static int emac_clks_phase1_init(struct platform_device *pdev, 465 + struct emac_adapter *adpt) 466 + { 467 + int ret; 468 + 469 + ret = emac_clks_get(pdev, adpt); 470 + if (ret) 471 + return ret; 472 + 473 + ret = clk_prepare_enable(adpt->clk[EMAC_CLK_AXI]); 474 + if (ret) 475 + return ret; 476 + 477 + ret = clk_prepare_enable(adpt->clk[EMAC_CLK_CFG_AHB]); 478 + if (ret) 479 + return ret; 480 + 481 + ret = clk_set_rate(adpt->clk[EMAC_CLK_HIGH_SPEED], 19200000); 482 + if (ret) 483 + return ret; 484 + 485 + return clk_prepare_enable(adpt->clk[EMAC_CLK_HIGH_SPEED]); 486 + } 487 + 488 + /* Enable clocks; needs emac_clks_phase1_init to be called before */ 489 + static int emac_clks_phase2_init(struct platform_device *pdev, 490 + struct emac_adapter *adpt) 491 + { 492 + int ret; 493 + 494 + ret = clk_set_rate(adpt->clk[EMAC_CLK_TX], 125000000); 495 + if (ret) 496 + return ret; 497 + 498 + ret = clk_prepare_enable(adpt->clk[EMAC_CLK_TX]); 499 + if (ret) 500 + return ret; 501 + 502 + ret = clk_set_rate(adpt->clk[EMAC_CLK_HIGH_SPEED], 125000000); 503 + if (ret) 504 + return ret; 505 + 506 + ret = clk_set_rate(adpt->clk[EMAC_CLK_MDIO], 25000000); 507 + if (ret) 508 + return ret; 509 + 510 + ret = clk_prepare_enable(adpt->clk[EMAC_CLK_MDIO]); 511 + if (ret) 512 + return ret; 513 + 514 + ret = clk_prepare_enable(adpt->clk[EMAC_CLK_RX]); 515 + if (ret) 516 + return ret; 517 + 518 + return clk_prepare_enable(adpt->clk[EMAC_CLK_SYS]); 519 + } 520 + 521 + static void emac_clks_teardown(struct emac_adapter *adpt) 522 + { 523 + 524 + unsigned int i; 525 + 526 + for (i = 0; i < EMAC_CLK_CNT; i++) 527 + clk_disable_unprepare(adpt->clk[i]); 528 + } 529 + 530 + /* Get the resources */ 531 + static int emac_probe_resources(struct platform_device *pdev, 532 + struct emac_adapter *adpt) 533 + { 534 + struct device_node *node = pdev->dev.of_node; 535 + struct net_device *netdev = adpt->netdev; 536 + struct resource *res; 537 + const void *maddr; 538 + int ret = 0; 539 + 540 + /* get mac address */ 541 + maddr = of_get_mac_address(node); 542 + if (!maddr) 543 + eth_hw_addr_random(netdev); 544 + else 545 + ether_addr_copy(netdev->dev_addr, maddr); 546 + 547 + /* Core 0 interrupt */ 548 + ret = platform_get_irq(pdev, 0); 549 + if (ret < 0) { 550 + dev_err(&pdev->dev, 551 + "error: missing core0 irq resource (error=%i)\n", ret); 552 + return ret; 553 + } 554 + adpt->irq.irq = ret; 555 + 556 + /* base register address */ 557 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 558 + adpt->base = devm_ioremap_resource(&pdev->dev, res); 559 + if (IS_ERR(adpt->base)) 560 + return PTR_ERR(adpt->base); 561 + 562 + /* CSR register address */ 563 + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 564 + adpt->csr = devm_ioremap_resource(&pdev->dev, res); 565 + if (IS_ERR(adpt->csr)) 566 + return PTR_ERR(adpt->csr); 567 + 568 + netdev->base_addr = (unsigned long)adpt->base; 569 + 570 + return 0; 571 + } 572 + 573 + static const struct of_device_id emac_dt_match[] = { 574 + { 575 + .compatible = "qcom,fsm9900-emac", 576 + }, 577 + {} 578 + }; 579 + 580 + static int emac_probe(struct platform_device *pdev) 581 + { 582 + struct net_device *netdev; 583 + struct emac_adapter *adpt; 584 + struct emac_phy *phy; 585 + u16 devid, revid; 586 + u32 reg; 587 + int ret; 588 + 589 + /* The EMAC itself is capable of 64-bit DMA, so try that first. */ 590 + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 591 + if (ret) { 592 + /* Some platforms may restrict the EMAC's address bus to less 593 + * then the size of DDR. In this case, we need to try a 594 + * smaller mask. We could try every possible smaller mask, 595 + * but that's overkill. Instead, just fall to 32-bit, which 596 + * should always work. 597 + */ 598 + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 599 + if (ret) { 600 + dev_err(&pdev->dev, "could not set DMA mask\n"); 601 + return ret; 602 + } 603 + } 604 + 605 + netdev = alloc_etherdev(sizeof(struct emac_adapter)); 606 + if (!netdev) 607 + return -ENOMEM; 608 + 609 + dev_set_drvdata(&pdev->dev, netdev); 610 + SET_NETDEV_DEV(netdev, &pdev->dev); 611 + 612 + adpt = netdev_priv(netdev); 613 + adpt->netdev = netdev; 614 + adpt->msg_enable = EMAC_MSG_DEFAULT; 615 + 616 + phy = &adpt->phy; 617 + 618 + mutex_init(&adpt->reset_lock); 619 + spin_lock_init(&adpt->stats.lock); 620 + 621 + adpt->irq.mask = RX_PKT_INT0 | IMR_NORMAL_MASK; 622 + 623 + ret = emac_probe_resources(pdev, adpt); 624 + if (ret) 625 + goto err_undo_netdev; 626 + 627 + /* initialize clocks */ 628 + ret = emac_clks_phase1_init(pdev, adpt); 629 + if (ret) { 630 + dev_err(&pdev->dev, "could not initialize clocks\n"); 631 + goto err_undo_netdev; 632 + } 633 + 634 + netdev->watchdog_timeo = EMAC_WATCHDOG_TIME; 635 + netdev->irq = adpt->irq.irq; 636 + 637 + adpt->rrd_size = EMAC_RRD_SIZE; 638 + adpt->tpd_size = EMAC_TPD_SIZE; 639 + adpt->rfd_size = EMAC_RFD_SIZE; 640 + 641 + netdev->netdev_ops = &emac_netdev_ops; 642 + 643 + emac_init_adapter(adpt); 644 + 645 + /* init external phy */ 646 + ret = emac_phy_config(pdev, adpt); 647 + if (ret) 648 + goto err_undo_clocks; 649 + 650 + /* init internal sgmii phy */ 651 + ret = emac_sgmii_config(pdev, adpt); 652 + if (ret) 653 + goto err_undo_mdiobus; 654 + 655 + /* enable clocks */ 656 + ret = emac_clks_phase2_init(pdev, adpt); 657 + if (ret) { 658 + dev_err(&pdev->dev, "could not initialize clocks\n"); 659 + goto err_undo_mdiobus; 660 + } 661 + 662 + emac_mac_reset(adpt); 663 + 664 + /* set hw features */ 665 + netdev->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM | 666 + NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_RX | 667 + NETIF_F_HW_VLAN_CTAG_TX; 668 + netdev->hw_features = netdev->features; 669 + 670 + netdev->vlan_features |= NETIF_F_SG | NETIF_F_HW_CSUM | 671 + NETIF_F_TSO | NETIF_F_TSO6; 672 + 673 + INIT_WORK(&adpt->work_thread, emac_work_thread); 674 + 675 + /* Initialize queues */ 676 + emac_mac_rx_tx_ring_init_all(pdev, adpt); 677 + 678 + netif_napi_add(netdev, &adpt->rx_q.napi, emac_napi_rtx, 679 + NAPI_POLL_WEIGHT); 680 + 681 + ret = register_netdev(netdev); 682 + if (ret) { 683 + dev_err(&pdev->dev, "could not register net device\n"); 684 + goto err_undo_napi; 685 + } 686 + 687 + reg = readl_relaxed(adpt->base + EMAC_DMA_MAS_CTRL); 688 + devid = (reg & DEV_ID_NUM_BMSK) >> DEV_ID_NUM_SHFT; 689 + revid = (reg & DEV_REV_NUM_BMSK) >> DEV_REV_NUM_SHFT; 690 + reg = readl_relaxed(adpt->base + EMAC_CORE_HW_VERSION); 691 + 692 + netif_info(adpt, probe, netdev, 693 + "hardware id %d.%d, hardware version %d.%d.%d\n", 694 + devid, revid, 695 + (reg & MAJOR_BMSK) >> MAJOR_SHFT, 696 + (reg & MINOR_BMSK) >> MINOR_SHFT, 697 + (reg & STEP_BMSK) >> STEP_SHFT); 698 + 699 + return 0; 700 + 701 + err_undo_napi: 702 + netif_napi_del(&adpt->rx_q.napi); 703 + err_undo_mdiobus: 704 + mdiobus_unregister(adpt->mii_bus); 705 + err_undo_clocks: 706 + emac_clks_teardown(adpt); 707 + err_undo_netdev: 708 + free_netdev(netdev); 709 + 710 + return ret; 711 + } 712 + 713 + static int emac_remove(struct platform_device *pdev) 714 + { 715 + struct net_device *netdev = dev_get_drvdata(&pdev->dev); 716 + struct emac_adapter *adpt = netdev_priv(netdev); 717 + 718 + unregister_netdev(netdev); 719 + netif_napi_del(&adpt->rx_q.napi); 720 + 721 + emac_clks_teardown(adpt); 722 + 723 + mdiobus_unregister(adpt->mii_bus); 724 + free_netdev(netdev); 725 + dev_set_drvdata(&pdev->dev, NULL); 726 + 727 + return 0; 728 + } 729 + 730 + static struct platform_driver emac_platform_driver = { 731 + .probe = emac_probe, 732 + .remove = emac_remove, 733 + .driver = { 734 + .owner = THIS_MODULE, 735 + .name = "qcom-emac", 736 + .of_match_table = emac_dt_match, 737 + }, 738 + }; 739 + 740 + module_platform_driver(emac_platform_driver); 741 + 742 + MODULE_LICENSE("GPL v2"); 743 + MODULE_ALIAS("platform:qcom-emac");
+335
drivers/net/ethernet/qualcomm/emac/emac.h
··· 1 + /* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved. 2 + * 3 + * This program is free software; you can redistribute it and/or modify 4 + * it under the terms of the GNU General Public License version 2 and 5 + * only version 2 as published by the Free Software Foundation. 6 + * 7 + * This program is distributed in the hope that it will be useful, 8 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 9 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 10 + * GNU General Public License for more details. 11 + */ 12 + 13 + #ifndef _EMAC_H_ 14 + #define _EMAC_H_ 15 + 16 + #include <linux/irqreturn.h> 17 + #include <linux/netdevice.h> 18 + #include <linux/clk.h> 19 + #include <linux/platform_device.h> 20 + #include "emac-mac.h" 21 + #include "emac-phy.h" 22 + 23 + /* EMAC base register offsets */ 24 + #define EMAC_DMA_MAS_CTRL 0x001400 25 + #define EMAC_IRQ_MOD_TIM_INIT 0x001408 26 + #define EMAC_BLK_IDLE_STS 0x00140c 27 + #define EMAC_PHY_LINK_DELAY 0x00141c 28 + #define EMAC_SYS_ALIV_CTRL 0x001434 29 + #define EMAC_MAC_IPGIFG_CTRL 0x001484 30 + #define EMAC_MAC_STA_ADDR0 0x001488 31 + #define EMAC_MAC_STA_ADDR1 0x00148c 32 + #define EMAC_HASH_TAB_REG0 0x001490 33 + #define EMAC_HASH_TAB_REG1 0x001494 34 + #define EMAC_MAC_HALF_DPLX_CTRL 0x001498 35 + #define EMAC_MAX_FRAM_LEN_CTRL 0x00149c 36 + #define EMAC_INT_STATUS 0x001600 37 + #define EMAC_INT_MASK 0x001604 38 + #define EMAC_RXMAC_STATC_REG0 0x001700 39 + #define EMAC_RXMAC_STATC_REG22 0x001758 40 + #define EMAC_TXMAC_STATC_REG0 0x001760 41 + #define EMAC_TXMAC_STATC_REG24 0x0017c0 42 + #define EMAC_CORE_HW_VERSION 0x001974 43 + #define EMAC_IDT_TABLE0 0x001b00 44 + #define EMAC_RXMAC_STATC_REG23 0x001bc8 45 + #define EMAC_RXMAC_STATC_REG24 0x001bcc 46 + #define EMAC_TXMAC_STATC_REG25 0x001bd0 47 + #define EMAC_INT1_MASK 0x001bf0 48 + #define EMAC_INT1_STATUS 0x001bf4 49 + #define EMAC_INT2_MASK 0x001bf8 50 + #define EMAC_INT2_STATUS 0x001bfc 51 + #define EMAC_INT3_MASK 0x001c00 52 + #define EMAC_INT3_STATUS 0x001c04 53 + 54 + /* EMAC_DMA_MAS_CTRL */ 55 + #define DEV_ID_NUM_BMSK 0x7f000000 56 + #define DEV_ID_NUM_SHFT 24 57 + #define DEV_REV_NUM_BMSK 0xff0000 58 + #define DEV_REV_NUM_SHFT 16 59 + #define INT_RD_CLR_EN 0x4000 60 + #define IRQ_MODERATOR2_EN 0x800 61 + #define IRQ_MODERATOR_EN 0x400 62 + #define LPW_CLK_SEL 0x80 63 + #define LPW_STATE 0x20 64 + #define LPW_MODE 0x10 65 + #define SOFT_RST 0x1 66 + 67 + /* EMAC_IRQ_MOD_TIM_INIT */ 68 + #define IRQ_MODERATOR2_INIT_BMSK 0xffff0000 69 + #define IRQ_MODERATOR2_INIT_SHFT 16 70 + #define IRQ_MODERATOR_INIT_BMSK 0xffff 71 + #define IRQ_MODERATOR_INIT_SHFT 0 72 + 73 + /* EMAC_INT_STATUS */ 74 + #define DIS_INT BIT(31) 75 + #define PTP_INT BIT(30) 76 + #define RFD4_UR_INT BIT(29) 77 + #define TX_PKT_INT3 BIT(26) 78 + #define TX_PKT_INT2 BIT(25) 79 + #define TX_PKT_INT1 BIT(24) 80 + #define RX_PKT_INT3 BIT(19) 81 + #define RX_PKT_INT2 BIT(18) 82 + #define RX_PKT_INT1 BIT(17) 83 + #define RX_PKT_INT0 BIT(16) 84 + #define TX_PKT_INT BIT(15) 85 + #define TXQ_TO_INT BIT(14) 86 + #define GPHY_WAKEUP_INT BIT(13) 87 + #define GPHY_LINK_DOWN_INT BIT(12) 88 + #define GPHY_LINK_UP_INT BIT(11) 89 + #define DMAW_TO_INT BIT(10) 90 + #define DMAR_TO_INT BIT(9) 91 + #define TXF_UR_INT BIT(8) 92 + #define RFD3_UR_INT BIT(7) 93 + #define RFD2_UR_INT BIT(6) 94 + #define RFD1_UR_INT BIT(5) 95 + #define RFD0_UR_INT BIT(4) 96 + #define RXF_OF_INT BIT(3) 97 + #define SW_MAN_INT BIT(2) 98 + 99 + /* EMAC_MAILBOX_6 */ 100 + #define RFD2_PROC_IDX_BMSK 0xfff0000 101 + #define RFD2_PROC_IDX_SHFT 16 102 + #define RFD2_PROD_IDX_BMSK 0xfff 103 + #define RFD2_PROD_IDX_SHFT 0 104 + 105 + /* EMAC_CORE_HW_VERSION */ 106 + #define MAJOR_BMSK 0xf0000000 107 + #define MAJOR_SHFT 28 108 + #define MINOR_BMSK 0xfff0000 109 + #define MINOR_SHFT 16 110 + #define STEP_BMSK 0xffff 111 + #define STEP_SHFT 0 112 + 113 + /* EMAC_EMAC_WRAPPER_CSR1 */ 114 + #define TX_INDX_FIFO_SYNC_RST BIT(23) 115 + #define TX_TS_FIFO_SYNC_RST BIT(22) 116 + #define RX_TS_FIFO2_SYNC_RST BIT(21) 117 + #define RX_TS_FIFO1_SYNC_RST BIT(20) 118 + #define TX_TS_ENABLE BIT(16) 119 + #define DIS_1588_CLKS BIT(11) 120 + #define FREQ_MODE BIT(9) 121 + #define ENABLE_RRD_TIMESTAMP BIT(3) 122 + 123 + /* EMAC_EMAC_WRAPPER_CSR2 */ 124 + #define HDRIVE_BMSK 0x3000 125 + #define HDRIVE_SHFT 12 126 + #define SLB_EN BIT(9) 127 + #define PLB_EN BIT(8) 128 + #define WOL_EN BIT(3) 129 + #define PHY_RESET BIT(0) 130 + 131 + #define EMAC_DEV_ID 0x0040 132 + 133 + /* SGMII v2 per lane registers */ 134 + #define SGMII_LN_RSM_START 0x029C 135 + 136 + /* SGMII v2 PHY common registers */ 137 + #define SGMII_PHY_CMN_CTRL 0x0408 138 + #define SGMII_PHY_CMN_RESET_CTRL 0x0410 139 + 140 + /* SGMII v2 PHY registers per lane */ 141 + #define SGMII_PHY_LN_OFFSET 0x0400 142 + #define SGMII_PHY_LN_LANE_STATUS 0x00DC 143 + #define SGMII_PHY_LN_BIST_GEN0 0x008C 144 + #define SGMII_PHY_LN_BIST_GEN1 0x0090 145 + #define SGMII_PHY_LN_BIST_GEN2 0x0094 146 + #define SGMII_PHY_LN_BIST_GEN3 0x0098 147 + #define SGMII_PHY_LN_CDR_CTRL1 0x005C 148 + 149 + enum emac_clk_id { 150 + EMAC_CLK_AXI, 151 + EMAC_CLK_CFG_AHB, 152 + EMAC_CLK_HIGH_SPEED, 153 + EMAC_CLK_MDIO, 154 + EMAC_CLK_TX, 155 + EMAC_CLK_RX, 156 + EMAC_CLK_SYS, 157 + EMAC_CLK_CNT 158 + }; 159 + 160 + #define EMAC_LINK_SPEED_UNKNOWN 0x0 161 + #define EMAC_LINK_SPEED_10_HALF BIT(0) 162 + #define EMAC_LINK_SPEED_10_FULL BIT(1) 163 + #define EMAC_LINK_SPEED_100_HALF BIT(2) 164 + #define EMAC_LINK_SPEED_100_FULL BIT(3) 165 + #define EMAC_LINK_SPEED_1GB_FULL BIT(5) 166 + 167 + #define EMAC_MAX_SETUP_LNK_CYCLE 100 168 + 169 + /* Wake On Lan */ 170 + #define EMAC_WOL_PHY 0x00000001 /* PHY Status Change */ 171 + #define EMAC_WOL_MAGIC 0x00000002 /* Magic Packet */ 172 + 173 + struct emac_stats { 174 + /* rx */ 175 + u64 rx_ok; /* good packets */ 176 + u64 rx_bcast; /* good broadcast packets */ 177 + u64 rx_mcast; /* good multicast packets */ 178 + u64 rx_pause; /* pause packet */ 179 + u64 rx_ctrl; /* control packets other than pause frame. */ 180 + u64 rx_fcs_err; /* packets with bad FCS. */ 181 + u64 rx_len_err; /* packets with length mismatch */ 182 + u64 rx_byte_cnt; /* good bytes count (without FCS) */ 183 + u64 rx_runt; /* runt packets */ 184 + u64 rx_frag; /* fragment count */ 185 + u64 rx_sz_64; /* packets that are 64 bytes */ 186 + u64 rx_sz_65_127; /* packets that are 65-127 bytes */ 187 + u64 rx_sz_128_255; /* packets that are 128-255 bytes */ 188 + u64 rx_sz_256_511; /* packets that are 256-511 bytes */ 189 + u64 rx_sz_512_1023; /* packets that are 512-1023 bytes */ 190 + u64 rx_sz_1024_1518; /* packets that are 1024-1518 bytes */ 191 + u64 rx_sz_1519_max; /* packets that are 1519-MTU bytes*/ 192 + u64 rx_sz_ov; /* packets that are >MTU bytes (truncated) */ 193 + u64 rx_rxf_ov; /* packets dropped due to RX FIFO overflow */ 194 + u64 rx_align_err; /* alignment errors */ 195 + u64 rx_bcast_byte_cnt; /* broadcast packets byte count (without FCS) */ 196 + u64 rx_mcast_byte_cnt; /* multicast packets byte count (without FCS) */ 197 + u64 rx_err_addr; /* packets dropped due to address filtering */ 198 + u64 rx_crc_align; /* CRC align errors */ 199 + u64 rx_jabbers; /* jabbers */ 200 + 201 + /* tx */ 202 + u64 tx_ok; /* good packets */ 203 + u64 tx_bcast; /* good broadcast packets */ 204 + u64 tx_mcast; /* good multicast packets */ 205 + u64 tx_pause; /* pause packets */ 206 + u64 tx_exc_defer; /* packets with excessive deferral */ 207 + u64 tx_ctrl; /* control packets other than pause frame */ 208 + u64 tx_defer; /* packets that are deferred. */ 209 + u64 tx_byte_cnt; /* good bytes count (without FCS) */ 210 + u64 tx_sz_64; /* packets that are 64 bytes */ 211 + u64 tx_sz_65_127; /* packets that are 65-127 bytes */ 212 + u64 tx_sz_128_255; /* packets that are 128-255 bytes */ 213 + u64 tx_sz_256_511; /* packets that are 256-511 bytes */ 214 + u64 tx_sz_512_1023; /* packets that are 512-1023 bytes */ 215 + u64 tx_sz_1024_1518; /* packets that are 1024-1518 bytes */ 216 + u64 tx_sz_1519_max; /* packets that are 1519-MTU bytes */ 217 + u64 tx_1_col; /* packets single prior collision */ 218 + u64 tx_2_col; /* packets with multiple prior collisions */ 219 + u64 tx_late_col; /* packets with late collisions */ 220 + u64 tx_abort_col; /* packets aborted due to excess collisions */ 221 + u64 tx_underrun; /* packets aborted due to FIFO underrun */ 222 + u64 tx_rd_eop; /* count of reads beyond EOP */ 223 + u64 tx_len_err; /* packets with length mismatch */ 224 + u64 tx_trunc; /* packets truncated due to size >MTU */ 225 + u64 tx_bcast_byte; /* broadcast packets byte count (without FCS) */ 226 + u64 tx_mcast_byte; /* multicast packets byte count (without FCS) */ 227 + u64 tx_col; /* collisions */ 228 + 229 + spinlock_t lock; /* prevent multiple simultaneous readers */ 230 + }; 231 + 232 + /* RSS hstype Definitions */ 233 + #define EMAC_RSS_HSTYP_IPV4_EN 0x00000001 234 + #define EMAC_RSS_HSTYP_TCP4_EN 0x00000002 235 + #define EMAC_RSS_HSTYP_IPV6_EN 0x00000004 236 + #define EMAC_RSS_HSTYP_TCP6_EN 0x00000008 237 + #define EMAC_RSS_HSTYP_ALL_EN (\ 238 + EMAC_RSS_HSTYP_IPV4_EN |\ 239 + EMAC_RSS_HSTYP_TCP4_EN |\ 240 + EMAC_RSS_HSTYP_IPV6_EN |\ 241 + EMAC_RSS_HSTYP_TCP6_EN) 242 + 243 + #define EMAC_VLAN_TO_TAG(_vlan, _tag) \ 244 + (_tag = ((((_vlan) >> 8) & 0xFF) | (((_vlan) & 0xFF) << 8))) 245 + 246 + #define EMAC_TAG_TO_VLAN(_tag, _vlan) \ 247 + (_vlan = ((((_tag) >> 8) & 0xFF) | (((_tag) & 0xFF) << 8))) 248 + 249 + #define EMAC_DEF_RX_BUF_SIZE 1536 250 + #define EMAC_MAX_JUMBO_PKT_SIZE (9 * 1024) 251 + #define EMAC_MAX_TX_OFFLOAD_THRESH (9 * 1024) 252 + 253 + #define EMAC_MAX_ETH_FRAME_SIZE EMAC_MAX_JUMBO_PKT_SIZE 254 + #define EMAC_MIN_ETH_FRAME_SIZE 68 255 + 256 + #define EMAC_DEF_TX_QUEUES 1 257 + #define EMAC_DEF_RX_QUEUES 1 258 + 259 + #define EMAC_MIN_TX_DESCS 128 260 + #define EMAC_MIN_RX_DESCS 128 261 + 262 + #define EMAC_MAX_TX_DESCS 16383 263 + #define EMAC_MAX_RX_DESCS 2047 264 + 265 + #define EMAC_DEF_TX_DESCS 512 266 + #define EMAC_DEF_RX_DESCS 256 267 + 268 + #define EMAC_DEF_RX_IRQ_MOD 250 269 + #define EMAC_DEF_TX_IRQ_MOD 250 270 + 271 + #define EMAC_WATCHDOG_TIME (5 * HZ) 272 + 273 + /* by default check link every 4 seconds */ 274 + #define EMAC_TRY_LINK_TIMEOUT (4 * HZ) 275 + 276 + /* emac_irq per-device (per-adapter) irq properties. 277 + * @irq: irq number. 278 + * @mask mask to use over status register. 279 + */ 280 + struct emac_irq { 281 + unsigned int irq; 282 + u32 mask; 283 + }; 284 + 285 + /* The device's main data structure */ 286 + struct emac_adapter { 287 + struct net_device *netdev; 288 + struct mii_bus *mii_bus; 289 + struct phy_device *phydev; 290 + 291 + void __iomem *base; 292 + void __iomem *csr; 293 + 294 + struct emac_phy phy; 295 + struct emac_stats stats; 296 + 297 + struct emac_irq irq; 298 + struct clk *clk[EMAC_CLK_CNT]; 299 + 300 + /* All Descriptor memory */ 301 + struct emac_ring_header ring_header; 302 + struct emac_tx_queue tx_q; 303 + struct emac_rx_queue rx_q; 304 + unsigned int tx_desc_cnt; 305 + unsigned int rx_desc_cnt; 306 + unsigned int rrd_size; /* in quad words */ 307 + unsigned int rfd_size; /* in quad words */ 308 + unsigned int tpd_size; /* in quad words */ 309 + 310 + unsigned int rxbuf_size; 311 + 312 + /* Ring parameter */ 313 + u8 tpd_burst; 314 + u8 rfd_burst; 315 + unsigned int dmaw_dly_cnt; 316 + unsigned int dmar_dly_cnt; 317 + enum emac_dma_req_block dmar_block; 318 + enum emac_dma_req_block dmaw_block; 319 + enum emac_dma_order dma_order; 320 + 321 + u32 irq_mod; 322 + u32 preamble; 323 + 324 + struct work_struct work_thread; 325 + 326 + u16 msg_enable; 327 + 328 + struct mutex reset_lock; 329 + }; 330 + 331 + int emac_reinit_locked(struct emac_adapter *adpt); 332 + void emac_reg_update32(void __iomem *addr, u32 mask, u32 val); 333 + irqreturn_t emac_isr(int irq, void *data); 334 + 335 + #endif /* _EMAC_H_ */