Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'txgbe'

Jiawen Wu says:

====================
net: WangXun txgbe ethernet driver

This patch series adds support for WangXun 10 gigabit NIC, to initialize
hardware, set mac address, and register netdev.

Change log:
v6: address comments:
Jakub Kicinski: check with scripts/kernel-doc
v5: address comments:
Jakub Kicinski: clean build with W=1 C=1
v4: address comments:
Andrew Lunn: https://lore.kernel.org/all/YzXROBtztWopeeaA@lunn.ch/
v3: address comments:
Andrew Lunn: remove hw function ops, reorder functions, use BIT(n)
for register bit offset, move the same code of txgbe
and ngbe to libwx
v2: address comments:
Andrew Lunn: https://lore.kernel.org/netdev/YvRhld5rD%2FxgITEg@lunn.ch/
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+1273 -19
+6
drivers/net/ethernet/wangxun/Kconfig
··· 16 16 17 17 if NET_VENDOR_WANGXUN 18 18 19 + config LIBWX 20 + tristate 21 + help 22 + Common library for Wangxun(R) Ethernet drivers. 23 + 19 24 config NGBE 20 25 tristate "Wangxun(R) GbE PCI Express adapters support" 21 26 depends on PCI ··· 37 32 config TXGBE 38 33 tristate "Wangxun(R) 10GbE PCI Express adapters support" 39 34 depends on PCI 35 + select LIBWX 40 36 help 41 37 This driver supports Wangxun(R) 10GbE PCI Express family of 42 38 adapters.
+1
drivers/net/ethernet/wangxun/Makefile
··· 3 3 # Makefile for the Wangxun network device drivers. 4 4 # 5 5 6 + obj-$(CONFIG_LIBWX) += libwx/ 6 7 obj-$(CONFIG_TXGBE) += txgbe/ 7 8 obj-$(CONFIG_NGBE) += ngbe/
+7
drivers/net/ethernet/wangxun/libwx/Makefile
··· 1 + # SPDX-License-Identifier: GPL-2.0 2 + # Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. 3 + # 4 + 5 + obj-$(CONFIG_LIBWX) += libwx.o 6 + 7 + libwx-objs := wx_hw.o
+475
drivers/net/ethernet/wangxun/libwx/wx_hw.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */ 3 + 4 + #include <linux/etherdevice.h> 5 + #include <linux/if_ether.h> 6 + #include <linux/iopoll.h> 7 + #include <linux/pci.h> 8 + 9 + #include "wx_type.h" 10 + #include "wx_hw.h" 11 + 12 + static void wx_intr_disable(struct wx_hw *wxhw, u64 qmask) 13 + { 14 + u32 mask; 15 + 16 + mask = (qmask & 0xFFFFFFFF); 17 + if (mask) 18 + wr32(wxhw, WX_PX_IMS(0), mask); 19 + 20 + if (wxhw->mac.type == wx_mac_sp) { 21 + mask = (qmask >> 32); 22 + if (mask) 23 + wr32(wxhw, WX_PX_IMS(1), mask); 24 + } 25 + } 26 + 27 + /* cmd_addr is used for some special command: 28 + * 1. to be sector address, when implemented erase sector command 29 + * 2. to be flash address when implemented read, write flash address 30 + */ 31 + static int wx_fmgr_cmd_op(struct wx_hw *wxhw, u32 cmd, u32 cmd_addr) 32 + { 33 + u32 cmd_val = 0, val = 0; 34 + 35 + cmd_val = WX_SPI_CMD_CMD(cmd) | 36 + WX_SPI_CMD_CLK(WX_SPI_CLK_DIV) | 37 + cmd_addr; 38 + wr32(wxhw, WX_SPI_CMD, cmd_val); 39 + 40 + return read_poll_timeout(rd32, val, (val & 0x1), 10, 100000, 41 + false, wxhw, WX_SPI_STATUS); 42 + } 43 + 44 + static int wx_flash_read_dword(struct wx_hw *wxhw, u32 addr, u32 *data) 45 + { 46 + int ret = 0; 47 + 48 + ret = wx_fmgr_cmd_op(wxhw, WX_SPI_CMD_READ_DWORD, addr); 49 + if (ret < 0) 50 + return ret; 51 + 52 + *data = rd32(wxhw, WX_SPI_DATA); 53 + 54 + return ret; 55 + } 56 + 57 + int wx_check_flash_load(struct wx_hw *hw, u32 check_bit) 58 + { 59 + u32 reg = 0; 60 + int err = 0; 61 + 62 + /* if there's flash existing */ 63 + if (!(rd32(hw, WX_SPI_STATUS) & 64 + WX_SPI_STATUS_FLASH_BYPASS)) { 65 + /* wait hw load flash done */ 66 + err = read_poll_timeout(rd32, reg, !(reg & check_bit), 20000, 2000000, 67 + false, hw, WX_SPI_ILDR_STATUS); 68 + if (err < 0) 69 + wx_err(hw, "Check flash load timeout.\n"); 70 + } 71 + 72 + return err; 73 + } 74 + EXPORT_SYMBOL(wx_check_flash_load); 75 + 76 + /** 77 + * wx_get_mac_addr - Generic get MAC address 78 + * @wxhw: pointer to hardware structure 79 + * @mac_addr: Adapter MAC address 80 + * 81 + * Reads the adapter's MAC address from first Receive Address Register (RAR0) 82 + * A reset of the adapter must be performed prior to calling this function 83 + * in order for the MAC address to have been loaded from the EEPROM into RAR0 84 + **/ 85 + void wx_get_mac_addr(struct wx_hw *wxhw, u8 *mac_addr) 86 + { 87 + u32 rar_high; 88 + u32 rar_low; 89 + u16 i; 90 + 91 + wr32(wxhw, WX_PSR_MAC_SWC_IDX, 0); 92 + rar_high = rd32(wxhw, WX_PSR_MAC_SWC_AD_H); 93 + rar_low = rd32(wxhw, WX_PSR_MAC_SWC_AD_L); 94 + 95 + for (i = 0; i < 2; i++) 96 + mac_addr[i] = (u8)(rar_high >> (1 - i) * 8); 97 + 98 + for (i = 0; i < 4; i++) 99 + mac_addr[i + 2] = (u8)(rar_low >> (3 - i) * 8); 100 + } 101 + EXPORT_SYMBOL(wx_get_mac_addr); 102 + 103 + /** 104 + * wx_set_rar - Set Rx address register 105 + * @wxhw: pointer to hardware structure 106 + * @index: Receive address register to write 107 + * @addr: Address to put into receive address register 108 + * @pools: VMDq "set" or "pool" index 109 + * @enable_addr: set flag that address is active 110 + * 111 + * Puts an ethernet address into a receive address register. 112 + **/ 113 + int wx_set_rar(struct wx_hw *wxhw, u32 index, u8 *addr, u64 pools, 114 + u32 enable_addr) 115 + { 116 + u32 rar_entries = wxhw->mac.num_rar_entries; 117 + u32 rar_low, rar_high; 118 + 119 + /* Make sure we are using a valid rar index range */ 120 + if (index >= rar_entries) { 121 + wx_err(wxhw, "RAR index %d is out of range.\n", index); 122 + return -EINVAL; 123 + } 124 + 125 + /* select the MAC address */ 126 + wr32(wxhw, WX_PSR_MAC_SWC_IDX, index); 127 + 128 + /* setup VMDq pool mapping */ 129 + wr32(wxhw, WX_PSR_MAC_SWC_VM_L, pools & 0xFFFFFFFF); 130 + if (wxhw->mac.type == wx_mac_sp) 131 + wr32(wxhw, WX_PSR_MAC_SWC_VM_H, pools >> 32); 132 + 133 + /* HW expects these in little endian so we reverse the byte 134 + * order from network order (big endian) to little endian 135 + * 136 + * Some parts put the VMDq setting in the extra RAH bits, 137 + * so save everything except the lower 16 bits that hold part 138 + * of the address and the address valid bit. 139 + */ 140 + rar_low = ((u32)addr[5] | 141 + ((u32)addr[4] << 8) | 142 + ((u32)addr[3] << 16) | 143 + ((u32)addr[2] << 24)); 144 + rar_high = ((u32)addr[1] | 145 + ((u32)addr[0] << 8)); 146 + if (enable_addr != 0) 147 + rar_high |= WX_PSR_MAC_SWC_AD_H_AV; 148 + 149 + wr32(wxhw, WX_PSR_MAC_SWC_AD_L, rar_low); 150 + wr32m(wxhw, WX_PSR_MAC_SWC_AD_H, 151 + (WX_PSR_MAC_SWC_AD_H_AD(~0) | 152 + WX_PSR_MAC_SWC_AD_H_ADTYPE(~0) | 153 + WX_PSR_MAC_SWC_AD_H_AV), 154 + rar_high); 155 + 156 + return 0; 157 + } 158 + EXPORT_SYMBOL(wx_set_rar); 159 + 160 + /** 161 + * wx_clear_rar - Remove Rx address register 162 + * @wxhw: pointer to hardware structure 163 + * @index: Receive address register to write 164 + * 165 + * Clears an ethernet address from a receive address register. 166 + **/ 167 + int wx_clear_rar(struct wx_hw *wxhw, u32 index) 168 + { 169 + u32 rar_entries = wxhw->mac.num_rar_entries; 170 + 171 + /* Make sure we are using a valid rar index range */ 172 + if (index >= rar_entries) { 173 + wx_err(wxhw, "RAR index %d is out of range.\n", index); 174 + return -EINVAL; 175 + } 176 + 177 + /* Some parts put the VMDq setting in the extra RAH bits, 178 + * so save everything except the lower 16 bits that hold part 179 + * of the address and the address valid bit. 180 + */ 181 + wr32(wxhw, WX_PSR_MAC_SWC_IDX, index); 182 + 183 + wr32(wxhw, WX_PSR_MAC_SWC_VM_L, 0); 184 + wr32(wxhw, WX_PSR_MAC_SWC_VM_H, 0); 185 + 186 + wr32(wxhw, WX_PSR_MAC_SWC_AD_L, 0); 187 + wr32m(wxhw, WX_PSR_MAC_SWC_AD_H, 188 + (WX_PSR_MAC_SWC_AD_H_AD(~0) | 189 + WX_PSR_MAC_SWC_AD_H_ADTYPE(~0) | 190 + WX_PSR_MAC_SWC_AD_H_AV), 191 + 0); 192 + 193 + return 0; 194 + } 195 + EXPORT_SYMBOL(wx_clear_rar); 196 + 197 + /** 198 + * wx_clear_vmdq - Disassociate a VMDq pool index from a rx address 199 + * @wxhw: pointer to hardware struct 200 + * @rar: receive address register index to disassociate 201 + * @vmdq: VMDq pool index to remove from the rar 202 + **/ 203 + static int wx_clear_vmdq(struct wx_hw *wxhw, u32 rar, u32 __maybe_unused vmdq) 204 + { 205 + u32 rar_entries = wxhw->mac.num_rar_entries; 206 + u32 mpsar_lo, mpsar_hi; 207 + 208 + /* Make sure we are using a valid rar index range */ 209 + if (rar >= rar_entries) { 210 + wx_err(wxhw, "RAR index %d is out of range.\n", rar); 211 + return -EINVAL; 212 + } 213 + 214 + wr32(wxhw, WX_PSR_MAC_SWC_IDX, rar); 215 + mpsar_lo = rd32(wxhw, WX_PSR_MAC_SWC_VM_L); 216 + mpsar_hi = rd32(wxhw, WX_PSR_MAC_SWC_VM_H); 217 + 218 + if (!mpsar_lo && !mpsar_hi) 219 + return 0; 220 + 221 + /* was that the last pool using this rar? */ 222 + if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0) 223 + wx_clear_rar(wxhw, rar); 224 + 225 + return 0; 226 + } 227 + 228 + /** 229 + * wx_init_uta_tables - Initialize the Unicast Table Array 230 + * @wxhw: pointer to hardware structure 231 + **/ 232 + static void wx_init_uta_tables(struct wx_hw *wxhw) 233 + { 234 + int i; 235 + 236 + wx_dbg(wxhw, " Clearing UTA\n"); 237 + 238 + for (i = 0; i < 128; i++) 239 + wr32(wxhw, WX_PSR_UC_TBL(i), 0); 240 + } 241 + 242 + /** 243 + * wx_init_rx_addrs - Initializes receive address filters. 244 + * @wxhw: pointer to hardware structure 245 + * 246 + * Places the MAC address in receive address register 0 and clears the rest 247 + * of the receive address registers. Clears the multicast table. Assumes 248 + * the receiver is in reset when the routine is called. 249 + **/ 250 + void wx_init_rx_addrs(struct wx_hw *wxhw) 251 + { 252 + u32 rar_entries = wxhw->mac.num_rar_entries; 253 + u32 psrctl; 254 + int i; 255 + 256 + /* If the current mac address is valid, assume it is a software override 257 + * to the permanent address. 258 + * Otherwise, use the permanent address from the eeprom. 259 + */ 260 + if (!is_valid_ether_addr(wxhw->mac.addr)) { 261 + /* Get the MAC address from the RAR0 for later reference */ 262 + wx_get_mac_addr(wxhw, wxhw->mac.addr); 263 + wx_dbg(wxhw, "Keeping Current RAR0 Addr = %pM\n", wxhw->mac.addr); 264 + } else { 265 + /* Setup the receive address. */ 266 + wx_dbg(wxhw, "Overriding MAC Address in RAR[0]\n"); 267 + wx_dbg(wxhw, "New MAC Addr = %pM\n", wxhw->mac.addr); 268 + 269 + wx_set_rar(wxhw, 0, wxhw->mac.addr, 0, WX_PSR_MAC_SWC_AD_H_AV); 270 + 271 + if (wxhw->mac.type == wx_mac_sp) { 272 + /* clear VMDq pool/queue selection for RAR 0 */ 273 + wx_clear_vmdq(wxhw, 0, WX_CLEAR_VMDQ_ALL); 274 + } 275 + } 276 + 277 + /* Zero out the other receive addresses. */ 278 + wx_dbg(wxhw, "Clearing RAR[1-%d]\n", rar_entries - 1); 279 + for (i = 1; i < rar_entries; i++) { 280 + wr32(wxhw, WX_PSR_MAC_SWC_IDX, i); 281 + wr32(wxhw, WX_PSR_MAC_SWC_AD_L, 0); 282 + wr32(wxhw, WX_PSR_MAC_SWC_AD_H, 0); 283 + } 284 + 285 + /* Clear the MTA */ 286 + wxhw->addr_ctrl.mta_in_use = 0; 287 + psrctl = rd32(wxhw, WX_PSR_CTL); 288 + psrctl &= ~(WX_PSR_CTL_MO | WX_PSR_CTL_MFE); 289 + psrctl |= wxhw->mac.mc_filter_type << WX_PSR_CTL_MO_SHIFT; 290 + wr32(wxhw, WX_PSR_CTL, psrctl); 291 + wx_dbg(wxhw, " Clearing MTA\n"); 292 + for (i = 0; i < wxhw->mac.mcft_size; i++) 293 + wr32(wxhw, WX_PSR_MC_TBL(i), 0); 294 + 295 + wx_init_uta_tables(wxhw); 296 + } 297 + EXPORT_SYMBOL(wx_init_rx_addrs); 298 + 299 + void wx_disable_rx(struct wx_hw *wxhw) 300 + { 301 + u32 pfdtxgswc; 302 + u32 rxctrl; 303 + 304 + rxctrl = rd32(wxhw, WX_RDB_PB_CTL); 305 + if (rxctrl & WX_RDB_PB_CTL_RXEN) { 306 + pfdtxgswc = rd32(wxhw, WX_PSR_CTL); 307 + if (pfdtxgswc & WX_PSR_CTL_SW_EN) { 308 + pfdtxgswc &= ~WX_PSR_CTL_SW_EN; 309 + wr32(wxhw, WX_PSR_CTL, pfdtxgswc); 310 + wxhw->mac.set_lben = true; 311 + } else { 312 + wxhw->mac.set_lben = false; 313 + } 314 + rxctrl &= ~WX_RDB_PB_CTL_RXEN; 315 + wr32(wxhw, WX_RDB_PB_CTL, rxctrl); 316 + 317 + if (!(((wxhw->subsystem_device_id & WX_NCSI_MASK) == WX_NCSI_SUP) || 318 + ((wxhw->subsystem_device_id & WX_WOL_MASK) == WX_WOL_SUP))) { 319 + /* disable mac receiver */ 320 + wr32m(wxhw, WX_MAC_RX_CFG, 321 + WX_MAC_RX_CFG_RE, 0); 322 + } 323 + } 324 + } 325 + EXPORT_SYMBOL(wx_disable_rx); 326 + 327 + /** 328 + * wx_disable_pcie_master - Disable PCI-express master access 329 + * @wxhw: pointer to hardware structure 330 + * 331 + * Disables PCI-Express master access and verifies there are no pending 332 + * requests. 333 + **/ 334 + int wx_disable_pcie_master(struct wx_hw *wxhw) 335 + { 336 + int status = 0; 337 + u32 val; 338 + 339 + /* Always set this bit to ensure any future transactions are blocked */ 340 + pci_clear_master(wxhw->pdev); 341 + 342 + /* Exit if master requests are blocked */ 343 + if (!(rd32(wxhw, WX_PX_TRANSACTION_PENDING))) 344 + return 0; 345 + 346 + /* Poll for master request bit to clear */ 347 + status = read_poll_timeout(rd32, val, !val, 100, WX_PCI_MASTER_DISABLE_TIMEOUT, 348 + false, wxhw, WX_PX_TRANSACTION_PENDING); 349 + if (status < 0) 350 + wx_err(wxhw, "PCIe transaction pending bit did not clear.\n"); 351 + 352 + return status; 353 + } 354 + EXPORT_SYMBOL(wx_disable_pcie_master); 355 + 356 + /** 357 + * wx_stop_adapter - Generic stop Tx/Rx units 358 + * @wxhw: pointer to hardware structure 359 + * 360 + * Sets the adapter_stopped flag within wx_hw struct. Clears interrupts, 361 + * disables transmit and receive units. The adapter_stopped flag is used by 362 + * the shared code and drivers to determine if the adapter is in a stopped 363 + * state and should not touch the hardware. 364 + **/ 365 + int wx_stop_adapter(struct wx_hw *wxhw) 366 + { 367 + u16 i; 368 + 369 + /* Set the adapter_stopped flag so other driver functions stop touching 370 + * the hardware 371 + */ 372 + wxhw->adapter_stopped = true; 373 + 374 + /* Disable the receive unit */ 375 + wx_disable_rx(wxhw); 376 + 377 + /* Set interrupt mask to stop interrupts from being generated */ 378 + wx_intr_disable(wxhw, WX_INTR_ALL); 379 + 380 + /* Clear any pending interrupts, flush previous writes */ 381 + wr32(wxhw, WX_PX_MISC_IC, 0xffffffff); 382 + wr32(wxhw, WX_BME_CTL, 0x3); 383 + 384 + /* Disable the transmit unit. Each queue must be disabled. */ 385 + for (i = 0; i < wxhw->mac.max_tx_queues; i++) { 386 + wr32m(wxhw, WX_PX_TR_CFG(i), 387 + WX_PX_TR_CFG_SWFLSH | WX_PX_TR_CFG_ENABLE, 388 + WX_PX_TR_CFG_SWFLSH); 389 + } 390 + 391 + /* Disable the receive unit by stopping each queue */ 392 + for (i = 0; i < wxhw->mac.max_rx_queues; i++) { 393 + wr32m(wxhw, WX_PX_RR_CFG(i), 394 + WX_PX_RR_CFG_RR_EN, 0); 395 + } 396 + 397 + /* flush all queues disables */ 398 + WX_WRITE_FLUSH(wxhw); 399 + 400 + /* Prevent the PCI-E bus from hanging by disabling PCI-E master 401 + * access and verify no pending requests 402 + */ 403 + return wx_disable_pcie_master(wxhw); 404 + } 405 + EXPORT_SYMBOL(wx_stop_adapter); 406 + 407 + void wx_reset_misc(struct wx_hw *wxhw) 408 + { 409 + int i; 410 + 411 + /* receive packets that size > 2048 */ 412 + wr32m(wxhw, WX_MAC_RX_CFG, WX_MAC_RX_CFG_JE, WX_MAC_RX_CFG_JE); 413 + 414 + /* clear counters on read */ 415 + wr32m(wxhw, WX_MMC_CONTROL, 416 + WX_MMC_CONTROL_RSTONRD, WX_MMC_CONTROL_RSTONRD); 417 + 418 + wr32m(wxhw, WX_MAC_RX_FLOW_CTRL, 419 + WX_MAC_RX_FLOW_CTRL_RFE, WX_MAC_RX_FLOW_CTRL_RFE); 420 + 421 + wr32(wxhw, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR); 422 + 423 + wr32m(wxhw, WX_MIS_RST_ST, 424 + WX_MIS_RST_ST_RST_INIT, 0x1E00); 425 + 426 + /* errata 4: initialize mng flex tbl and wakeup flex tbl*/ 427 + wr32(wxhw, WX_PSR_MNG_FLEX_SEL, 0); 428 + for (i = 0; i < 16; i++) { 429 + wr32(wxhw, WX_PSR_MNG_FLEX_DW_L(i), 0); 430 + wr32(wxhw, WX_PSR_MNG_FLEX_DW_H(i), 0); 431 + wr32(wxhw, WX_PSR_MNG_FLEX_MSK(i), 0); 432 + } 433 + wr32(wxhw, WX_PSR_LAN_FLEX_SEL, 0); 434 + for (i = 0; i < 16; i++) { 435 + wr32(wxhw, WX_PSR_LAN_FLEX_DW_L(i), 0); 436 + wr32(wxhw, WX_PSR_LAN_FLEX_DW_H(i), 0); 437 + wr32(wxhw, WX_PSR_LAN_FLEX_MSK(i), 0); 438 + } 439 + 440 + /* set pause frame dst mac addr */ 441 + wr32(wxhw, WX_RDB_PFCMACDAL, 0xC2000001); 442 + wr32(wxhw, WX_RDB_PFCMACDAH, 0x0180); 443 + } 444 + EXPORT_SYMBOL(wx_reset_misc); 445 + 446 + int wx_sw_init(struct wx_hw *wxhw) 447 + { 448 + struct pci_dev *pdev = wxhw->pdev; 449 + u32 ssid = 0; 450 + int err = 0; 451 + 452 + wxhw->vendor_id = pdev->vendor; 453 + wxhw->device_id = pdev->device; 454 + wxhw->revision_id = pdev->revision; 455 + wxhw->oem_svid = pdev->subsystem_vendor; 456 + wxhw->oem_ssid = pdev->subsystem_device; 457 + wxhw->bus.device = PCI_SLOT(pdev->devfn); 458 + wxhw->bus.func = PCI_FUNC(pdev->devfn); 459 + 460 + if (wxhw->oem_svid == PCI_VENDOR_ID_WANGXUN) { 461 + wxhw->subsystem_vendor_id = pdev->subsystem_vendor; 462 + wxhw->subsystem_device_id = pdev->subsystem_device; 463 + } else { 464 + err = wx_flash_read_dword(wxhw, 0xfffdc, &ssid); 465 + if (!err) 466 + wxhw->subsystem_device_id = swab16((u16)ssid); 467 + 468 + return err; 469 + } 470 + 471 + return 0; 472 + } 473 + EXPORT_SYMBOL(wx_sw_init); 474 + 475 + MODULE_LICENSE("GPL");
+18
drivers/net/ethernet/wangxun/libwx/wx_hw.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */ 3 + 4 + #ifndef _WX_HW_H_ 5 + #define _WX_HW_H_ 6 + 7 + int wx_check_flash_load(struct wx_hw *hw, u32 check_bit); 8 + void wx_get_mac_addr(struct wx_hw *wxhw, u8 *mac_addr); 9 + int wx_set_rar(struct wx_hw *wxhw, u32 index, u8 *addr, u64 pools, u32 enable_addr); 10 + int wx_clear_rar(struct wx_hw *wxhw, u32 index); 11 + void wx_init_rx_addrs(struct wx_hw *wxhw); 12 + void wx_disable_rx(struct wx_hw *wxhw); 13 + int wx_disable_pcie_master(struct wx_hw *wxhw); 14 + int wx_stop_adapter(struct wx_hw *wxhw); 15 + void wx_reset_misc(struct wx_hw *wxhw); 16 + int wx_sw_init(struct wx_hw *wxhw); 17 + 18 + #endif /* _WX_HW_H_ */
+237
drivers/net/ethernet/wangxun/libwx/wx_type.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */ 3 + 4 + #ifndef _WX_TYPE_H_ 5 + #define _WX_TYPE_H_ 6 + 7 + /* Vendor ID */ 8 + #ifndef PCI_VENDOR_ID_WANGXUN 9 + #define PCI_VENDOR_ID_WANGXUN 0x8088 10 + #endif 11 + 12 + #define WX_NCSI_SUP 0x8000 13 + #define WX_NCSI_MASK 0x8000 14 + #define WX_WOL_SUP 0x4000 15 + #define WX_WOL_MASK 0x4000 16 + 17 + /**************** Global Registers ****************************/ 18 + /* chip control Registers */ 19 + #define WX_MIS_PWR 0x10000 20 + #define WX_MIS_RST 0x1000C 21 + #define WX_MIS_RST_LAN_RST(_i) BIT((_i) + 1) 22 + #define WX_MIS_RST_ST 0x10030 23 + #define WX_MIS_RST_ST_RST_INI_SHIFT 8 24 + #define WX_MIS_RST_ST_RST_INIT (0xFF << WX_MIS_RST_ST_RST_INI_SHIFT) 25 + 26 + /* FMGR Registers */ 27 + #define WX_SPI_CMD 0x10104 28 + #define WX_SPI_CMD_READ_DWORD 0x1 29 + #define WX_SPI_CLK_DIV 0x3 30 + #define WX_SPI_CMD_CMD(_v) (((_v) & 0x7) << 28) 31 + #define WX_SPI_CMD_CLK(_v) (((_v) & 0x7) << 25) 32 + #define WX_SPI_CMD_ADDR(_v) (((_v) & 0xFFFFFF)) 33 + #define WX_SPI_DATA 0x10108 34 + #define WX_SPI_DATA_BYPASS BIT(31) 35 + #define WX_SPI_DATA_STATUS(_v) (((_v) & 0xFF) << 16) 36 + #define WX_SPI_DATA_OP_DONE BIT(0) 37 + #define WX_SPI_STATUS 0x1010C 38 + #define WX_SPI_STATUS_OPDONE BIT(0) 39 + #define WX_SPI_STATUS_FLASH_BYPASS BIT(31) 40 + #define WX_SPI_ILDR_STATUS 0x10120 41 + 42 + /* Sensors for PVT(Process Voltage Temperature) */ 43 + #define WX_TS_EN 0x10304 44 + #define WX_TS_EN_ENA BIT(0) 45 + #define WX_TS_ALARM_THRE 0x1030C 46 + #define WX_TS_DALARM_THRE 0x10310 47 + #define WX_TS_INT_EN 0x10314 48 + #define WX_TS_INT_EN_DALARM_INT_EN BIT(1) 49 + #define WX_TS_INT_EN_ALARM_INT_EN BIT(0) 50 + #define WX_TS_ALARM_ST 0x10318 51 + #define WX_TS_ALARM_ST_DALARM BIT(1) 52 + #define WX_TS_ALARM_ST_ALARM BIT(0) 53 + 54 + /*********************** Transmit DMA registers **************************/ 55 + /* transmit global control */ 56 + #define WX_TDM_CTL 0x18000 57 + /* TDM CTL BIT */ 58 + #define WX_TDM_CTL_TE BIT(0) /* Transmit Enable */ 59 + 60 + /***************************** RDB registers *********************************/ 61 + /* receive packet buffer */ 62 + #define WX_RDB_PB_CTL 0x19000 63 + #define WX_RDB_PB_CTL_RXEN BIT(31) /* Enable Receiver */ 64 + #define WX_RDB_PB_CTL_DISABLED BIT(0) 65 + /* statistic */ 66 + #define WX_RDB_PFCMACDAL 0x19210 67 + #define WX_RDB_PFCMACDAH 0x19214 68 + 69 + /******************************* PSR Registers *******************************/ 70 + /* psr control */ 71 + #define WX_PSR_CTL 0x15000 72 + /* Header split receive */ 73 + #define WX_PSR_CTL_SW_EN BIT(18) 74 + #define WX_PSR_CTL_RSC_ACK BIT(17) 75 + #define WX_PSR_CTL_RSC_DIS BIT(16) 76 + #define WX_PSR_CTL_PCSD BIT(13) 77 + #define WX_PSR_CTL_IPPCSE BIT(12) 78 + #define WX_PSR_CTL_BAM BIT(10) 79 + #define WX_PSR_CTL_UPE BIT(9) 80 + #define WX_PSR_CTL_MPE BIT(8) 81 + #define WX_PSR_CTL_MFE BIT(7) 82 + #define WX_PSR_CTL_MO_SHIFT 5 83 + #define WX_PSR_CTL_MO (0x3 << WX_PSR_CTL_MO_SHIFT) 84 + #define WX_PSR_CTL_TPE BIT(4) 85 + /* mcasst/ucast overflow tbl */ 86 + #define WX_PSR_MC_TBL(_i) (0x15200 + ((_i) * 4)) 87 + #define WX_PSR_UC_TBL(_i) (0x15400 + ((_i) * 4)) 88 + 89 + /* Management */ 90 + #define WX_PSR_MNG_FLEX_SEL 0x1582C 91 + #define WX_PSR_MNG_FLEX_DW_L(_i) (0x15A00 + ((_i) * 16)) 92 + #define WX_PSR_MNG_FLEX_DW_H(_i) (0x15A04 + ((_i) * 16)) 93 + #define WX_PSR_MNG_FLEX_MSK(_i) (0x15A08 + ((_i) * 16)) 94 + #define WX_PSR_LAN_FLEX_SEL 0x15B8C 95 + #define WX_PSR_LAN_FLEX_DW_L(_i) (0x15C00 + ((_i) * 16)) 96 + #define WX_PSR_LAN_FLEX_DW_H(_i) (0x15C04 + ((_i) * 16)) 97 + #define WX_PSR_LAN_FLEX_MSK(_i) (0x15C08 + ((_i) * 16)) 98 + 99 + /* mac switcher */ 100 + #define WX_PSR_MAC_SWC_AD_L 0x16200 101 + #define WX_PSR_MAC_SWC_AD_H 0x16204 102 + #define WX_PSR_MAC_SWC_AD_H_AD(v) (((v) & 0xFFFF)) 103 + #define WX_PSR_MAC_SWC_AD_H_ADTYPE(v) (((v) & 0x1) << 30) 104 + #define WX_PSR_MAC_SWC_AD_H_AV BIT(31) 105 + #define WX_PSR_MAC_SWC_VM_L 0x16208 106 + #define WX_PSR_MAC_SWC_VM_H 0x1620C 107 + #define WX_PSR_MAC_SWC_IDX 0x16210 108 + #define WX_CLEAR_VMDQ_ALL 0xFFFFFFFFU 109 + 110 + /************************************* ETH MAC *****************************/ 111 + #define WX_MAC_TX_CFG 0x11000 112 + #define WX_MAC_TX_CFG_TE BIT(0) 113 + #define WX_MAC_RX_CFG 0x11004 114 + #define WX_MAC_RX_CFG_RE BIT(0) 115 + #define WX_MAC_RX_CFG_JE BIT(8) 116 + #define WX_MAC_PKT_FLT 0x11008 117 + #define WX_MAC_PKT_FLT_PR BIT(0) /* promiscuous mode */ 118 + #define WX_MAC_RX_FLOW_CTRL 0x11090 119 + #define WX_MAC_RX_FLOW_CTRL_RFE BIT(0) /* receive fc enable */ 120 + #define WX_MMC_CONTROL 0x11800 121 + #define WX_MMC_CONTROL_RSTONRD BIT(2) /* reset on read */ 122 + 123 + /********************************* BAR registers ***************************/ 124 + /* Interrupt Registers */ 125 + #define WX_BME_CTL 0x12020 126 + #define WX_PX_MISC_IC 0x100 127 + #define WX_PX_IMS(_i) (0x140 + (_i) * 4) 128 + #define WX_PX_TRANSACTION_PENDING 0x168 129 + 130 + /* transmit DMA Registers */ 131 + #define WX_PX_TR_CFG(_i) (0x03010 + ((_i) * 0x40)) 132 + /* Transmit Config masks */ 133 + #define WX_PX_TR_CFG_ENABLE BIT(0) /* Ena specific Tx Queue */ 134 + #define WX_PX_TR_CFG_TR_SIZE_SHIFT 1 /* tx desc number per ring */ 135 + #define WX_PX_TR_CFG_SWFLSH BIT(26) /* Tx Desc. wr-bk flushing */ 136 + #define WX_PX_TR_CFG_WTHRESH_SHIFT 16 /* shift to WTHRESH bits */ 137 + #define WX_PX_TR_CFG_THRE_SHIFT 8 138 + 139 + /* Receive DMA Registers */ 140 + #define WX_PX_RR_CFG(_i) (0x01010 + ((_i) * 0x40)) 141 + /* PX_RR_CFG bit definitions */ 142 + #define WX_PX_RR_CFG_RR_EN BIT(0) 143 + 144 + /* Number of 80 microseconds we wait for PCI Express master disable */ 145 + #define WX_PCI_MASTER_DISABLE_TIMEOUT 80000 146 + 147 + /* Bus parameters */ 148 + struct wx_bus_info { 149 + u8 func; 150 + u16 device; 151 + }; 152 + 153 + struct wx_thermal_sensor_data { 154 + s16 temp; 155 + s16 alarm_thresh; 156 + s16 dalarm_thresh; 157 + }; 158 + 159 + enum wx_mac_type { 160 + wx_mac_unknown = 0, 161 + wx_mac_sp, 162 + wx_mac_em 163 + }; 164 + 165 + struct wx_mac_info { 166 + enum wx_mac_type type; 167 + bool set_lben; 168 + u8 addr[ETH_ALEN]; 169 + u8 perm_addr[ETH_ALEN]; 170 + s32 mc_filter_type; 171 + u32 mcft_size; 172 + u32 num_rar_entries; 173 + u32 max_tx_queues; 174 + u32 max_rx_queues; 175 + struct wx_thermal_sensor_data sensor; 176 + }; 177 + 178 + struct wx_addr_filter_info { 179 + u32 num_mc_addrs; 180 + u32 mta_in_use; 181 + bool user_set_promisc; 182 + }; 183 + 184 + struct wx_hw { 185 + u8 __iomem *hw_addr; 186 + struct pci_dev *pdev; 187 + struct wx_bus_info bus; 188 + struct wx_mac_info mac; 189 + struct wx_addr_filter_info addr_ctrl; 190 + u16 device_id; 191 + u16 vendor_id; 192 + u16 subsystem_device_id; 193 + u16 subsystem_vendor_id; 194 + u8 revision_id; 195 + u16 oem_ssid; 196 + u16 oem_svid; 197 + bool adapter_stopped; 198 + }; 199 + 200 + #define WX_INTR_ALL (~0ULL) 201 + 202 + /* register operations */ 203 + #define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg))) 204 + #define rd32(a, reg) readl((a)->hw_addr + (reg)) 205 + 206 + static inline u32 207 + rd32m(struct wx_hw *wxhw, u32 reg, u32 mask) 208 + { 209 + u32 val; 210 + 211 + val = rd32(wxhw, reg); 212 + return val & mask; 213 + } 214 + 215 + static inline void 216 + wr32m(struct wx_hw *wxhw, u32 reg, u32 mask, u32 field) 217 + { 218 + u32 val; 219 + 220 + val = rd32(wxhw, reg); 221 + val = ((val & ~mask) | (field & mask)); 222 + 223 + wr32(wxhw, reg, val); 224 + } 225 + 226 + /* On some domestic CPU platforms, sometimes IO is not synchronized with 227 + * flushing memory, here use readl() to flush PCI read and write. 228 + */ 229 + #define WX_WRITE_FLUSH(H) rd32(H, WX_MIS_PWR) 230 + 231 + #define wx_err(wxhw, fmt, arg...) \ 232 + dev_err(&(wxhw)->pdev->dev, fmt, ##arg) 233 + 234 + #define wx_dbg(wxhw, fmt, arg...) \ 235 + dev_dbg(&(wxhw)->pdev->dev, fmt, ##arg) 236 + 237 + #endif /* _WX_TYPE_H_ */
+2 -1
drivers/net/ethernet/wangxun/txgbe/Makefile
··· 6 6 7 7 obj-$(CONFIG_TXGBE) += txgbe.o 8 8 9 - txgbe-objs := txgbe_main.o 9 + txgbe-objs := txgbe_main.o \ 10 + txgbe_hw.o
+20 -2
drivers/net/ethernet/wangxun/txgbe/txgbe.h
··· 4 4 #ifndef _TXGBE_H_ 5 5 #define _TXGBE_H_ 6 6 7 - #include "txgbe_type.h" 8 - 9 7 #define TXGBE_MAX_FDIR_INDICES 63 10 8 11 9 #define TXGBE_MAX_RX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1) 12 10 #define TXGBE_MAX_TX_QUEUES (TXGBE_MAX_FDIR_INDICES + 1) 11 + 12 + #define TXGBE_SP_MAX_TX_QUEUES 128 13 + #define TXGBE_SP_MAX_RX_QUEUES 128 14 + #define TXGBE_SP_RAR_ENTRIES 128 15 + #define TXGBE_SP_MC_TBL_SIZE 128 16 + 17 + struct txgbe_mac_addr { 18 + u8 addr[ETH_ALEN]; 19 + u16 state; /* bitmask */ 20 + u64 pools; 21 + }; 22 + 23 + #define TXGBE_MAC_STATE_DEFAULT 0x1 24 + #define TXGBE_MAC_STATE_MODIFIED 0x2 25 + #define TXGBE_MAC_STATE_IN_USE 0x4 13 26 14 27 /* board specific private data structure */ 15 28 struct txgbe_adapter { ··· 30 17 /* OS defined structs */ 31 18 struct net_device *netdev; 32 19 struct pci_dev *pdev; 20 + 21 + /* structs defined in txgbe_type.h */ 22 + struct txgbe_hw hw; 23 + u16 msg_enable; 24 + struct txgbe_mac_addr *mac_table; 33 25 }; 34 26 35 27 extern char txgbe_driver_name[];
+99
drivers/net/ethernet/wangxun/txgbe/txgbe_hw.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */ 3 + 4 + #include <linux/etherdevice.h> 5 + #include <linux/if_ether.h> 6 + #include <linux/string.h> 7 + #include <linux/iopoll.h> 8 + #include <linux/types.h> 9 + #include <linux/pci.h> 10 + 11 + #include "../libwx/wx_type.h" 12 + #include "../libwx/wx_hw.h" 13 + #include "txgbe_type.h" 14 + #include "txgbe_hw.h" 15 + #include "txgbe.h" 16 + 17 + /** 18 + * txgbe_init_thermal_sensor_thresh - Inits thermal sensor thresholds 19 + * @hw: pointer to hardware structure 20 + * 21 + * Inits the thermal sensor thresholds according to the NVM map 22 + * and save off the threshold and location values into mac.thermal_sensor_data 23 + **/ 24 + static void txgbe_init_thermal_sensor_thresh(struct txgbe_hw *hw) 25 + { 26 + struct wx_hw *wxhw = &hw->wxhw; 27 + struct wx_thermal_sensor_data *data = &wxhw->mac.sensor; 28 + 29 + memset(data, 0, sizeof(struct wx_thermal_sensor_data)); 30 + 31 + /* Only support thermal sensors attached to SP physical port 0 */ 32 + if (wxhw->bus.func) 33 + return; 34 + 35 + wr32(wxhw, TXGBE_TS_CTL, TXGBE_TS_CTL_EVAL_MD); 36 + 37 + wr32(wxhw, WX_TS_INT_EN, 38 + WX_TS_INT_EN_ALARM_INT_EN | WX_TS_INT_EN_DALARM_INT_EN); 39 + wr32(wxhw, WX_TS_EN, WX_TS_EN_ENA); 40 + 41 + data->alarm_thresh = 100; 42 + wr32(wxhw, WX_TS_ALARM_THRE, 677); 43 + data->dalarm_thresh = 90; 44 + wr32(wxhw, WX_TS_DALARM_THRE, 614); 45 + } 46 + 47 + static void txgbe_reset_misc(struct txgbe_hw *hw) 48 + { 49 + struct wx_hw *wxhw = &hw->wxhw; 50 + 51 + wx_reset_misc(wxhw); 52 + txgbe_init_thermal_sensor_thresh(hw); 53 + } 54 + 55 + /** 56 + * txgbe_reset_hw - Perform hardware reset 57 + * @hw: pointer to hardware structure 58 + * 59 + * Resets the hardware by resetting the transmit and receive units, masks 60 + * and clears all interrupts, perform a PHY reset, and perform a link (MAC) 61 + * reset. 62 + **/ 63 + int txgbe_reset_hw(struct txgbe_hw *hw) 64 + { 65 + struct wx_hw *wxhw = &hw->wxhw; 66 + u32 reset = 0; 67 + int status; 68 + 69 + /* Call adapter stop to disable tx/rx and clear interrupts */ 70 + status = wx_stop_adapter(wxhw); 71 + if (status != 0) 72 + return status; 73 + 74 + reset = WX_MIS_RST_LAN_RST(wxhw->bus.func); 75 + wr32(wxhw, WX_MIS_RST, reset | rd32(wxhw, WX_MIS_RST)); 76 + 77 + WX_WRITE_FLUSH(wxhw); 78 + usleep_range(10, 100); 79 + 80 + status = wx_check_flash_load(wxhw, TXGBE_SPI_ILDR_STATUS_LAN_SW_RST(wxhw->bus.func)); 81 + if (status != 0) 82 + return status; 83 + 84 + txgbe_reset_misc(hw); 85 + 86 + /* Store the permanent mac address */ 87 + wx_get_mac_addr(wxhw, wxhw->mac.perm_addr); 88 + 89 + /* Store MAC address from RAR0, clear receive address registers, and 90 + * clear the multicast table. Also reset num_rar_entries to 128, 91 + * since we modify this value when programming the SAN MAC address. 92 + */ 93 + wxhw->mac.num_rar_entries = TXGBE_SP_RAR_ENTRIES; 94 + wx_init_rx_addrs(wxhw); 95 + 96 + pci_set_master(wxhw->pdev); 97 + 98 + return 0; 99 + }
+9
drivers/net/ethernet/wangxun/txgbe/txgbe_hw.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */ 3 + 4 + #ifndef _TXGBE_HW_H_ 5 + #define _TXGBE_HW_H_ 6 + 7 + int txgbe_reset_hw(struct txgbe_hw *hw); 8 + 9 + #endif /* _TXGBE_HW_H_ */
+381 -1
drivers/net/ethernet/wangxun/txgbe/txgbe_main.c
··· 8 8 #include <linux/string.h> 9 9 #include <linux/aer.h> 10 10 #include <linux/etherdevice.h> 11 + #include <net/ip.h> 11 12 13 + #include "../libwx/wx_type.h" 14 + #include "../libwx/wx_hw.h" 15 + #include "txgbe_type.h" 16 + #include "txgbe_hw.h" 12 17 #include "txgbe.h" 13 18 14 19 char txgbe_driver_name[] = "txgbe"; ··· 35 30 36 31 #define DEFAULT_DEBUG_LEVEL_SHIFT 3 37 32 33 + static void txgbe_check_minimum_link(struct txgbe_adapter *adapter) 34 + { 35 + struct pci_dev *pdev; 36 + 37 + pdev = adapter->pdev; 38 + pcie_print_link_status(pdev); 39 + } 40 + 41 + /** 42 + * txgbe_enumerate_functions - Get the number of ports this device has 43 + * @adapter: adapter structure 44 + * 45 + * This function enumerates the phsyical functions co-located on a single slot, 46 + * in order to determine how many ports a device has. This is most useful in 47 + * determining the required GT/s of PCIe bandwidth necessary for optimal 48 + * performance. 49 + **/ 50 + static int txgbe_enumerate_functions(struct txgbe_adapter *adapter) 51 + { 52 + struct pci_dev *entry, *pdev = adapter->pdev; 53 + int physfns = 0; 54 + 55 + list_for_each_entry(entry, &pdev->bus->devices, bus_list) { 56 + /* When the devices on the bus don't all match our device ID, 57 + * we can't reliably determine the correct number of 58 + * functions. This can occur if a function has been direct 59 + * attached to a virtual machine using VT-d. 60 + */ 61 + if (entry->vendor != pdev->vendor || 62 + entry->device != pdev->device) 63 + return -EINVAL; 64 + 65 + physfns++; 66 + } 67 + 68 + return physfns; 69 + } 70 + 71 + static void txgbe_sync_mac_table(struct txgbe_adapter *adapter) 72 + { 73 + struct txgbe_hw *hw = &adapter->hw; 74 + struct wx_hw *wxhw = &hw->wxhw; 75 + int i; 76 + 77 + for (i = 0; i < wxhw->mac.num_rar_entries; i++) { 78 + if (adapter->mac_table[i].state & TXGBE_MAC_STATE_MODIFIED) { 79 + if (adapter->mac_table[i].state & TXGBE_MAC_STATE_IN_USE) { 80 + wx_set_rar(wxhw, i, 81 + adapter->mac_table[i].addr, 82 + adapter->mac_table[i].pools, 83 + WX_PSR_MAC_SWC_AD_H_AV); 84 + } else { 85 + wx_clear_rar(wxhw, i); 86 + } 87 + adapter->mac_table[i].state &= ~(TXGBE_MAC_STATE_MODIFIED); 88 + } 89 + } 90 + } 91 + 92 + /* this function destroys the first RAR entry */ 93 + static void txgbe_mac_set_default_filter(struct txgbe_adapter *adapter, 94 + u8 *addr) 95 + { 96 + struct wx_hw *wxhw = &adapter->hw.wxhw; 97 + 98 + memcpy(&adapter->mac_table[0].addr, addr, ETH_ALEN); 99 + adapter->mac_table[0].pools = 1ULL; 100 + adapter->mac_table[0].state = (TXGBE_MAC_STATE_DEFAULT | 101 + TXGBE_MAC_STATE_IN_USE); 102 + wx_set_rar(wxhw, 0, adapter->mac_table[0].addr, 103 + adapter->mac_table[0].pools, 104 + WX_PSR_MAC_SWC_AD_H_AV); 105 + } 106 + 107 + static void txgbe_flush_sw_mac_table(struct txgbe_adapter *adapter) 108 + { 109 + struct wx_hw *wxhw = &adapter->hw.wxhw; 110 + u32 i; 111 + 112 + for (i = 0; i < wxhw->mac.num_rar_entries; i++) { 113 + adapter->mac_table[i].state |= TXGBE_MAC_STATE_MODIFIED; 114 + adapter->mac_table[i].state &= ~TXGBE_MAC_STATE_IN_USE; 115 + memset(adapter->mac_table[i].addr, 0, ETH_ALEN); 116 + adapter->mac_table[i].pools = 0; 117 + } 118 + txgbe_sync_mac_table(adapter); 119 + } 120 + 121 + static int txgbe_del_mac_filter(struct txgbe_adapter *adapter, u8 *addr, u16 pool) 122 + { 123 + struct wx_hw *wxhw = &adapter->hw.wxhw; 124 + u32 i; 125 + 126 + if (is_zero_ether_addr(addr)) 127 + return -EINVAL; 128 + 129 + /* search table for addr, if found, set to 0 and sync */ 130 + for (i = 0; i < wxhw->mac.num_rar_entries; i++) { 131 + if (ether_addr_equal(addr, adapter->mac_table[i].addr)) { 132 + if (adapter->mac_table[i].pools & (1ULL << pool)) { 133 + adapter->mac_table[i].state |= TXGBE_MAC_STATE_MODIFIED; 134 + adapter->mac_table[i].state &= ~TXGBE_MAC_STATE_IN_USE; 135 + adapter->mac_table[i].pools &= ~(1ULL << pool); 136 + txgbe_sync_mac_table(adapter); 137 + } 138 + return 0; 139 + } 140 + 141 + if (adapter->mac_table[i].pools != (1 << pool)) 142 + continue; 143 + if (!ether_addr_equal(addr, adapter->mac_table[i].addr)) 144 + continue; 145 + 146 + adapter->mac_table[i].state |= TXGBE_MAC_STATE_MODIFIED; 147 + adapter->mac_table[i].state &= ~TXGBE_MAC_STATE_IN_USE; 148 + memset(adapter->mac_table[i].addr, 0, ETH_ALEN); 149 + adapter->mac_table[i].pools = 0; 150 + txgbe_sync_mac_table(adapter); 151 + return 0; 152 + } 153 + return -ENOMEM; 154 + } 155 + 156 + static void txgbe_reset(struct txgbe_adapter *adapter) 157 + { 158 + struct net_device *netdev = adapter->netdev; 159 + struct txgbe_hw *hw = &adapter->hw; 160 + u8 old_addr[ETH_ALEN]; 161 + int err; 162 + 163 + err = txgbe_reset_hw(hw); 164 + if (err != 0) 165 + dev_err(&adapter->pdev->dev, "Hardware Error: %d\n", err); 166 + 167 + /* do not flush user set addresses */ 168 + memcpy(old_addr, &adapter->mac_table[0].addr, netdev->addr_len); 169 + txgbe_flush_sw_mac_table(adapter); 170 + txgbe_mac_set_default_filter(adapter, old_addr); 171 + } 172 + 173 + static void txgbe_disable_device(struct txgbe_adapter *adapter) 174 + { 175 + struct net_device *netdev = adapter->netdev; 176 + struct wx_hw *wxhw = &adapter->hw.wxhw; 177 + 178 + wx_disable_pcie_master(wxhw); 179 + /* disable receives */ 180 + wx_disable_rx(wxhw); 181 + 182 + netif_carrier_off(netdev); 183 + netif_tx_disable(netdev); 184 + 185 + if (wxhw->bus.func < 2) 186 + wr32m(wxhw, TXGBE_MIS_PRB_CTL, TXGBE_MIS_PRB_CTL_LAN_UP(wxhw->bus.func), 0); 187 + else 188 + dev_err(&adapter->pdev->dev, 189 + "%s: invalid bus lan id %d\n", 190 + __func__, wxhw->bus.func); 191 + 192 + if (!(((wxhw->subsystem_device_id & WX_NCSI_MASK) == WX_NCSI_SUP) || 193 + ((wxhw->subsystem_device_id & WX_WOL_MASK) == WX_WOL_SUP))) { 194 + /* disable mac transmiter */ 195 + wr32m(wxhw, WX_MAC_TX_CFG, WX_MAC_TX_CFG_TE, 0); 196 + } 197 + 198 + /* Disable the Tx DMA engine */ 199 + wr32m(wxhw, WX_TDM_CTL, WX_TDM_CTL_TE, 0); 200 + } 201 + 202 + static void txgbe_down(struct txgbe_adapter *adapter) 203 + { 204 + txgbe_disable_device(adapter); 205 + txgbe_reset(adapter); 206 + } 207 + 208 + /** 209 + * txgbe_sw_init - Initialize general software structures (struct txgbe_adapter) 210 + * @adapter: board private structure to initialize 211 + **/ 212 + static int txgbe_sw_init(struct txgbe_adapter *adapter) 213 + { 214 + struct pci_dev *pdev = adapter->pdev; 215 + struct txgbe_hw *hw = &adapter->hw; 216 + struct wx_hw *wxhw = &hw->wxhw; 217 + int err; 218 + 219 + wxhw->hw_addr = adapter->io_addr; 220 + wxhw->pdev = pdev; 221 + 222 + /* PCI config space info */ 223 + err = wx_sw_init(wxhw); 224 + if (err < 0) { 225 + netif_err(adapter, probe, adapter->netdev, 226 + "read of internal subsystem device id failed\n"); 227 + return err; 228 + } 229 + 230 + switch (wxhw->device_id) { 231 + case TXGBE_DEV_ID_SP1000: 232 + case TXGBE_DEV_ID_WX1820: 233 + wxhw->mac.type = wx_mac_sp; 234 + break; 235 + default: 236 + wxhw->mac.type = wx_mac_unknown; 237 + break; 238 + } 239 + 240 + wxhw->mac.num_rar_entries = TXGBE_SP_RAR_ENTRIES; 241 + wxhw->mac.max_tx_queues = TXGBE_SP_MAX_TX_QUEUES; 242 + wxhw->mac.max_rx_queues = TXGBE_SP_MAX_RX_QUEUES; 243 + wxhw->mac.mcft_size = TXGBE_SP_MC_TBL_SIZE; 244 + 245 + adapter->mac_table = kcalloc(wxhw->mac.num_rar_entries, 246 + sizeof(struct txgbe_mac_addr), 247 + GFP_KERNEL); 248 + if (!adapter->mac_table) { 249 + netif_err(adapter, probe, adapter->netdev, 250 + "mac_table allocation failed\n"); 251 + return -ENOMEM; 252 + } 253 + 254 + return 0; 255 + } 256 + 257 + /** 258 + * txgbe_open - Called when a network interface is made active 259 + * @netdev: network interface device structure 260 + * 261 + * Returns 0 on success, negative value on failure 262 + * 263 + * The open entry point is called when a network interface is made 264 + * active by the system (IFF_UP). 265 + **/ 266 + static int txgbe_open(struct net_device *netdev) 267 + { 268 + return 0; 269 + } 270 + 271 + /** 272 + * txgbe_close_suspend - actions necessary to both suspend and close flows 273 + * @adapter: the private adapter struct 274 + * 275 + * This function should contain the necessary work common to both suspending 276 + * and closing of the device. 277 + */ 278 + static void txgbe_close_suspend(struct txgbe_adapter *adapter) 279 + { 280 + txgbe_disable_device(adapter); 281 + } 282 + 283 + /** 284 + * txgbe_close - Disables a network interface 285 + * @netdev: network interface device structure 286 + * 287 + * Returns 0, this is not allowed to fail 288 + * 289 + * The close entry point is called when an interface is de-activated 290 + * by the OS. The hardware is still under the drivers control, but 291 + * needs to be disabled. A global MAC reset is issued to stop the 292 + * hardware, and all transmit and receive resources are freed. 293 + **/ 294 + static int txgbe_close(struct net_device *netdev) 295 + { 296 + struct txgbe_adapter *adapter = netdev_priv(netdev); 297 + 298 + txgbe_down(adapter); 299 + 300 + return 0; 301 + } 302 + 38 303 static void txgbe_dev_shutdown(struct pci_dev *pdev, bool *enable_wake) 39 304 { 40 305 struct txgbe_adapter *adapter = pci_get_drvdata(pdev); 41 306 struct net_device *netdev = adapter->netdev; 42 307 43 308 netif_device_detach(netdev); 309 + 310 + rtnl_lock(); 311 + if (netif_running(netdev)) 312 + txgbe_close_suspend(adapter); 313 + rtnl_unlock(); 44 314 45 315 pci_disable_device(pdev); 46 316 } ··· 332 52 } 333 53 } 334 54 55 + static netdev_tx_t txgbe_xmit_frame(struct sk_buff *skb, 56 + struct net_device *netdev) 57 + { 58 + return NETDEV_TX_OK; 59 + } 60 + 61 + /** 62 + * txgbe_set_mac - Change the Ethernet Address of the NIC 63 + * @netdev: network interface device structure 64 + * @p: pointer to an address structure 65 + * 66 + * Returns 0 on success, negative on failure 67 + **/ 68 + static int txgbe_set_mac(struct net_device *netdev, void *p) 69 + { 70 + struct txgbe_adapter *adapter = netdev_priv(netdev); 71 + struct wx_hw *wxhw = &adapter->hw.wxhw; 72 + struct sockaddr *addr = p; 73 + int retval; 74 + 75 + retval = eth_prepare_mac_addr_change(netdev, addr); 76 + if (retval) 77 + return retval; 78 + 79 + txgbe_del_mac_filter(adapter, wxhw->mac.addr, 0); 80 + eth_hw_addr_set(netdev, addr->sa_data); 81 + memcpy(wxhw->mac.addr, addr->sa_data, netdev->addr_len); 82 + 83 + txgbe_mac_set_default_filter(adapter, wxhw->mac.addr); 84 + 85 + return 0; 86 + } 87 + 88 + static const struct net_device_ops txgbe_netdev_ops = { 89 + .ndo_open = txgbe_open, 90 + .ndo_stop = txgbe_close, 91 + .ndo_start_xmit = txgbe_xmit_frame, 92 + .ndo_validate_addr = eth_validate_addr, 93 + .ndo_set_mac_address = txgbe_set_mac, 94 + }; 95 + 335 96 /** 336 97 * txgbe_probe - Device Initialization Routine 337 98 * @pdev: PCI device information struct ··· 388 67 const struct pci_device_id __always_unused *ent) 389 68 { 390 69 struct txgbe_adapter *adapter = NULL; 70 + struct txgbe_hw *hw = NULL; 71 + struct wx_hw *wxhw = NULL; 391 72 struct net_device *netdev; 392 - int err; 73 + int err, expected_gts; 393 74 394 75 err = pci_enable_device_mem(pdev); 395 76 if (err) ··· 430 107 adapter = netdev_priv(netdev); 431 108 adapter->netdev = netdev; 432 109 adapter->pdev = pdev; 110 + hw = &adapter->hw; 111 + wxhw = &hw->wxhw; 112 + adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1; 433 113 434 114 adapter->io_addr = devm_ioremap(&pdev->dev, 435 115 pci_resource_start(pdev, 0), ··· 442 116 goto err_pci_release_regions; 443 117 } 444 118 119 + netdev->netdev_ops = &txgbe_netdev_ops; 120 + 121 + /* setup the private structure */ 122 + err = txgbe_sw_init(adapter); 123 + if (err) 124 + goto err_free_mac_table; 125 + 126 + /* check if flash load is done after hw power up */ 127 + err = wx_check_flash_load(wxhw, TXGBE_SPI_ILDR_STATUS_PERST); 128 + if (err) 129 + goto err_free_mac_table; 130 + err = wx_check_flash_load(wxhw, TXGBE_SPI_ILDR_STATUS_PWRRST); 131 + if (err) 132 + goto err_free_mac_table; 133 + 134 + err = txgbe_reset_hw(hw); 135 + if (err) { 136 + dev_err(&pdev->dev, "HW Init failed: %d\n", err); 137 + goto err_free_mac_table; 138 + } 139 + 445 140 netdev->features |= NETIF_F_HIGHDMA; 141 + 142 + eth_hw_addr_set(netdev, wxhw->mac.perm_addr); 143 + txgbe_mac_set_default_filter(adapter, wxhw->mac.perm_addr); 144 + 145 + err = register_netdev(netdev); 146 + if (err) 147 + goto err_free_mac_table; 446 148 447 149 pci_set_drvdata(pdev, adapter); 448 150 151 + /* calculate the expected PCIe bandwidth required for optimal 152 + * performance. Note that some older parts will never have enough 153 + * bandwidth due to being older generation PCIe parts. We clamp these 154 + * parts to ensure that no warning is displayed, as this could confuse 155 + * users otherwise. 156 + */ 157 + expected_gts = txgbe_enumerate_functions(adapter) * 10; 158 + 159 + /* don't check link if we failed to enumerate functions */ 160 + if (expected_gts > 0) 161 + txgbe_check_minimum_link(adapter); 162 + else 163 + dev_warn(&pdev->dev, "Failed to enumerate PF devices.\n"); 164 + 165 + netif_info(adapter, probe, netdev, "%pM\n", netdev->dev_addr); 166 + 449 167 return 0; 450 168 169 + err_free_mac_table: 170 + kfree(adapter->mac_table); 451 171 err_pci_release_regions: 452 172 pci_disable_pcie_error_reporting(pdev); 453 173 pci_release_selected_regions(pdev, ··· 514 142 **/ 515 143 static void txgbe_remove(struct pci_dev *pdev) 516 144 { 145 + struct txgbe_adapter *adapter = pci_get_drvdata(pdev); 146 + struct net_device *netdev; 147 + 148 + netdev = adapter->netdev; 149 + unregister_netdev(netdev); 150 + 517 151 pci_release_selected_regions(pdev, 518 152 pci_select_bars(pdev, IORESOURCE_MEM)); 153 + 154 + kfree(adapter->mac_table); 519 155 520 156 pci_disable_pcie_error_reporting(pdev); 521 157
+18 -15
drivers/net/ethernet/wangxun/txgbe/txgbe_type.h
··· 4 4 #ifndef _TXGBE_TYPE_H_ 5 5 #define _TXGBE_TYPE_H_ 6 6 7 - #include <linux/types.h> 8 - #include <linux/netdevice.h> 9 - 10 - /************ txgbe_register.h ************/ 11 - /* Vendor ID */ 12 - #ifndef PCI_VENDOR_ID_WANGXUN 13 - #define PCI_VENDOR_ID_WANGXUN 0x8088 14 - #endif 15 - 16 7 /* Device IDs */ 17 8 #define TXGBE_DEV_ID_SP1000 0x1001 18 9 #define TXGBE_DEV_ID_WX1820 0x2001 ··· 33 42 #define TXGBE_ID_WX1820_MAC_SGMII 0x2060 34 43 #define TXGBE_ID_MAC_SGMII 0x60 35 44 36 - #define TXGBE_NCSI_SUP 0x8000 37 - #define TXGBE_NCSI_MASK 0x8000 38 - #define TXGBE_WOL_SUP 0x4000 39 - #define TXGBE_WOL_MASK 0x4000 40 - #define TXGBE_DEV_MASK 0xf0 41 - 42 45 /* Combined interface*/ 43 46 #define TXGBE_ID_SFI_XAUI 0x50 44 47 45 48 /* Revision ID */ 46 49 #define TXGBE_SP_MPW 1 50 + 51 + /**************** SP Registers ****************************/ 52 + /* chip control Registers */ 53 + #define TXGBE_MIS_PRB_CTL 0x10010 54 + #define TXGBE_MIS_PRB_CTL_LAN_UP(_i) BIT(1 - (_i)) 55 + /* FMGR Registers */ 56 + #define TXGBE_SPI_ILDR_STATUS 0x10120 57 + #define TXGBE_SPI_ILDR_STATUS_PERST BIT(0) /* PCIE_PERST is done */ 58 + #define TXGBE_SPI_ILDR_STATUS_PWRRST BIT(1) /* Power on reset is done */ 59 + #define TXGBE_SPI_ILDR_STATUS_LAN_SW_RST(_i) BIT((_i) + 9) /* lan soft reset done */ 60 + 61 + /* Sensors for PVT(Process Voltage Temperature) */ 62 + #define TXGBE_TS_CTL 0x10300 63 + #define TXGBE_TS_CTL_EVAL_MD BIT(31) 64 + 65 + struct txgbe_hw { 66 + struct wx_hw wxhw; 67 + }; 47 68 48 69 #endif /* _TXGBE_TYPE_H_ */