Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

staging: et131x: Put all .c files into one big file

Created one big .c file for the driver, moving the contents of all
driver .c files into it.

Signed-off-by: Mark Einon <mark.einon@gmail.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>

authored by

Mark Einon and committed by
Greg Kroah-Hartman
d2796743 cd8eca6f

+5102 -5862
-11
drivers/staging/et131x/Makefile
··· 3 3 # 4 4 5 5 obj-$(CONFIG_ET131X) += et131x.o 6 - 7 - et131x-y := et1310_eeprom.o \ 8 - et1310_mac.o \ 9 - et1310_phy.o \ 10 - et1310_pm.o \ 11 - et1310_rx.o \ 12 - et1310_tx.o \ 13 - et131x_initpci.o \ 14 - et131x_ethtool.o \ 15 - et131x_isr.o \ 16 - et131x_netdev.o
-408
drivers/staging/et131x/et1310_eeprom.c
··· 1 - /* 2 - * Agere Systems Inc. 3 - * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs 4 - * 5 - * Copyright © 2005 Agere Systems Inc. 6 - * All rights reserved. 7 - * http://www.agere.com 8 - * 9 - * Copyright (c) 2011 Mark Einon <mark.einon@gmail.com> 10 - * 11 - *------------------------------------------------------------------------------ 12 - * 13 - * et1310_eeprom.c - Code used to access the device's EEPROM 14 - * 15 - *------------------------------------------------------------------------------ 16 - * 17 - * SOFTWARE LICENSE 18 - * 19 - * This software is provided subject to the following terms and conditions, 20 - * which you should read carefully before using the software. Using this 21 - * software indicates your acceptance of these terms and conditions. If you do 22 - * not agree with these terms and conditions, do not use the software. 23 - * 24 - * Copyright © 2005 Agere Systems Inc. 25 - * All rights reserved. 26 - * 27 - * Redistribution and use in source or binary forms, with or without 28 - * modifications, are permitted provided that the following conditions are met: 29 - * 30 - * . Redistributions of source code must retain the above copyright notice, this 31 - * list of conditions and the following Disclaimer as comments in the code as 32 - * well as in the documentation and/or other materials provided with the 33 - * distribution. 34 - * 35 - * . Redistributions in binary form must reproduce the above copyright notice, 36 - * this list of conditions and the following Disclaimer in the documentation 37 - * and/or other materials provided with the distribution. 38 - * 39 - * . Neither the name of Agere Systems Inc. nor the names of the contributors 40 - * may be used to endorse or promote products derived from this software 41 - * without specific prior written permission. 42 - * 43 - * Disclaimer 44 - * 45 - * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, 46 - * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF 47 - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY 48 - * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN 49 - * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY 50 - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 51 - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 52 - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 53 - * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT 54 - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 55 - * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 56 - * DAMAGE. 57 - * 58 - */ 59 - 60 - #include "et131x_defs.h" 61 - 62 - #include <linux/pci.h> 63 - #include <linux/init.h> 64 - #include <linux/module.h> 65 - #include <linux/types.h> 66 - #include <linux/kernel.h> 67 - 68 - #include <linux/sched.h> 69 - #include <linux/ptrace.h> 70 - #include <linux/ctype.h> 71 - #include <linux/string.h> 72 - #include <linux/timer.h> 73 - #include <linux/interrupt.h> 74 - #include <linux/in.h> 75 - #include <linux/delay.h> 76 - #include <linux/bitops.h> 77 - #include <linux/io.h> 78 - #include <asm/system.h> 79 - 80 - #include <linux/netdevice.h> 81 - #include <linux/etherdevice.h> 82 - #include <linux/skbuff.h> 83 - #include <linux/if_arp.h> 84 - #include <linux/ioport.h> 85 - 86 - #include "et1310_phy.h" 87 - #include "et131x_adapter.h" 88 - #include "et131x.h" 89 - 90 - /* 91 - * EEPROM Defines 92 - */ 93 - 94 - /* LBCIF Register Groups (addressed via 32-bit offsets) */ 95 - #define LBCIF_DWORD0_GROUP 0xAC 96 - #define LBCIF_DWORD1_GROUP 0xB0 97 - 98 - /* LBCIF Registers (addressed via 8-bit offsets) */ 99 - #define LBCIF_ADDRESS_REGISTER 0xAC 100 - #define LBCIF_DATA_REGISTER 0xB0 101 - #define LBCIF_CONTROL_REGISTER 0xB1 102 - #define LBCIF_STATUS_REGISTER 0xB2 103 - 104 - /* LBCIF Control Register Bits */ 105 - #define LBCIF_CONTROL_SEQUENTIAL_READ 0x01 106 - #define LBCIF_CONTROL_PAGE_WRITE 0x02 107 - #define LBCIF_CONTROL_EEPROM_RELOAD 0x08 108 - #define LBCIF_CONTROL_TWO_BYTE_ADDR 0x20 109 - #define LBCIF_CONTROL_I2C_WRITE 0x40 110 - #define LBCIF_CONTROL_LBCIF_ENABLE 0x80 111 - 112 - /* LBCIF Status Register Bits */ 113 - #define LBCIF_STATUS_PHY_QUEUE_AVAIL 0x01 114 - #define LBCIF_STATUS_I2C_IDLE 0x02 115 - #define LBCIF_STATUS_ACK_ERROR 0x04 116 - #define LBCIF_STATUS_GENERAL_ERROR 0x08 117 - #define LBCIF_STATUS_CHECKSUM_ERROR 0x40 118 - #define LBCIF_STATUS_EEPROM_PRESENT 0x80 119 - 120 - /* Miscellaneous Constraints */ 121 - #define MAX_NUM_REGISTER_POLLS 1000 122 - #define MAX_NUM_WRITE_RETRIES 2 123 - 124 - static int eeprom_wait_ready(struct pci_dev *pdev, u32 *status) 125 - { 126 - u32 reg; 127 - int i; 128 - 129 - /* 130 - * 1. Check LBCIF Status Register for bits 6 & 3:2 all equal to 0 and 131 - * bits 7,1:0 both equal to 1, at least once after reset. 132 - * Subsequent operations need only to check that bits 1:0 are equal 133 - * to 1 prior to starting a single byte read/write 134 - */ 135 - 136 - for (i = 0; i < MAX_NUM_REGISTER_POLLS; i++) { 137 - /* Read registers grouped in DWORD1 */ 138 - if (pci_read_config_dword(pdev, LBCIF_DWORD1_GROUP, &reg)) 139 - return -EIO; 140 - 141 - /* I2C idle and Phy Queue Avail both true */ 142 - if ((reg & 0x3000) == 0x3000) { 143 - if (status) 144 - *status = reg; 145 - return reg & 0xFF; 146 - } 147 - } 148 - return -ETIMEDOUT; 149 - } 150 - 151 - 152 - /** 153 - * eeprom_write - Write a byte to the ET1310's EEPROM 154 - * @adapter: pointer to our private adapter structure 155 - * @addr: the address to write 156 - * @data: the value to write 157 - * 158 - * Returns 1 for a successful write. 159 - */ 160 - static int eeprom_write(struct et131x_adapter *adapter, u32 addr, u8 data) 161 - { 162 - struct pci_dev *pdev = adapter->pdev; 163 - int index = 0; 164 - int retries; 165 - int err = 0; 166 - int i2c_wack = 0; 167 - int writeok = 0; 168 - u32 status; 169 - u32 val = 0; 170 - 171 - /* 172 - * For an EEPROM, an I2C single byte write is defined as a START 173 - * condition followed by the device address, EEPROM address, one byte 174 - * of data and a STOP condition. The STOP condition will trigger the 175 - * EEPROM's internally timed write cycle to the nonvolatile memory. 176 - * All inputs are disabled during this write cycle and the EEPROM will 177 - * not respond to any access until the internal write is complete. 178 - */ 179 - 180 - err = eeprom_wait_ready(pdev, NULL); 181 - if (err) 182 - return err; 183 - 184 - /* 185 - * 2. Write to the LBCIF Control Register: bit 7=1, bit 6=1, bit 3=0, 186 - * and bits 1:0 both =0. Bit 5 should be set according to the 187 - * type of EEPROM being accessed (1=two byte addressing, 0=one 188 - * byte addressing). 189 - */ 190 - if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER, 191 - LBCIF_CONTROL_LBCIF_ENABLE | LBCIF_CONTROL_I2C_WRITE)) 192 - return -EIO; 193 - 194 - i2c_wack = 1; 195 - 196 - /* Prepare EEPROM address for Step 3 */ 197 - 198 - for (retries = 0; retries < MAX_NUM_WRITE_RETRIES; retries++) { 199 - /* Write the address to the LBCIF Address Register */ 200 - if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr)) 201 - break; 202 - /* 203 - * Write the data to the LBCIF Data Register (the I2C write 204 - * will begin). 205 - */ 206 - if (pci_write_config_byte(pdev, LBCIF_DATA_REGISTER, data)) 207 - break; 208 - /* 209 - * Monitor bit 1:0 of the LBCIF Status Register. When bits 210 - * 1:0 are both equal to 1, the I2C write has completed and the 211 - * internal write cycle of the EEPROM is about to start. 212 - * (bits 1:0 = 01 is a legal state while waiting from both 213 - * equal to 1, but bits 1:0 = 10 is invalid and implies that 214 - * something is broken). 215 - */ 216 - err = eeprom_wait_ready(pdev, &status); 217 - if (err < 0) 218 - return 0; 219 - 220 - /* 221 - * Check bit 3 of the LBCIF Status Register. If equal to 1, 222 - * an error has occurred.Don't break here if we are revision 223 - * 1, this is so we do a blind write for load bug. 224 - */ 225 - if ((status & LBCIF_STATUS_GENERAL_ERROR) 226 - && adapter->pdev->revision == 0) 227 - break; 228 - 229 - /* 230 - * Check bit 2 of the LBCIF Status Register. If equal to 1 an 231 - * ACK error has occurred on the address phase of the write. 232 - * This could be due to an actual hardware failure or the 233 - * EEPROM may still be in its internal write cycle from a 234 - * previous write. This write operation was ignored and must be 235 - *repeated later. 236 - */ 237 - if (status & LBCIF_STATUS_ACK_ERROR) { 238 - /* 239 - * This could be due to an actual hardware failure 240 - * or the EEPROM may still be in its internal write 241 - * cycle from a previous write. This write operation 242 - * was ignored and must be repeated later. 243 - */ 244 - udelay(10); 245 - continue; 246 - } 247 - 248 - writeok = 1; 249 - break; 250 - } 251 - 252 - /* 253 - * Set bit 6 of the LBCIF Control Register = 0. 254 - */ 255 - udelay(10); 256 - 257 - while (i2c_wack) { 258 - if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER, 259 - LBCIF_CONTROL_LBCIF_ENABLE)) 260 - writeok = 0; 261 - 262 - /* Do read until internal ACK_ERROR goes away meaning write 263 - * completed 264 - */ 265 - do { 266 - pci_write_config_dword(pdev, 267 - LBCIF_ADDRESS_REGISTER, 268 - addr); 269 - do { 270 - pci_read_config_dword(pdev, 271 - LBCIF_DATA_REGISTER, &val); 272 - } while ((val & 0x00010000) == 0); 273 - } while (val & 0x00040000); 274 - 275 - if ((val & 0xFF00) != 0xC000 || index == 10000) 276 - break; 277 - index++; 278 - } 279 - return writeok ? 0 : -EIO; 280 - } 281 - 282 - /** 283 - * eeprom_read - Read a byte from the ET1310's EEPROM 284 - * @adapter: pointer to our private adapter structure 285 - * @addr: the address from which to read 286 - * @pdata: a pointer to a byte in which to store the value of the read 287 - * @eeprom_id: the ID of the EEPROM 288 - * @addrmode: how the EEPROM is to be accessed 289 - * 290 - * Returns 1 for a successful read 291 - */ 292 - static int eeprom_read(struct et131x_adapter *adapter, u32 addr, u8 *pdata) 293 - { 294 - struct pci_dev *pdev = adapter->pdev; 295 - int err; 296 - u32 status; 297 - 298 - /* 299 - * A single byte read is similar to the single byte write, with the 300 - * exception of the data flow: 301 - */ 302 - 303 - err = eeprom_wait_ready(pdev, NULL); 304 - if (err) 305 - return err; 306 - /* 307 - * Write to the LBCIF Control Register: bit 7=1, bit 6=0, bit 3=0, 308 - * and bits 1:0 both =0. Bit 5 should be set according to the type 309 - * of EEPROM being accessed (1=two byte addressing, 0=one byte 310 - * addressing). 311 - */ 312 - if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER, 313 - LBCIF_CONTROL_LBCIF_ENABLE)) 314 - return -EIO; 315 - /* 316 - * Write the address to the LBCIF Address Register (I2C read will 317 - * begin). 318 - */ 319 - if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr)) 320 - return -EIO; 321 - /* 322 - * Monitor bit 0 of the LBCIF Status Register. When = 1, I2C read 323 - * is complete. (if bit 1 =1 and bit 0 stays = 0, a hardware failure 324 - * has occurred). 325 - */ 326 - err = eeprom_wait_ready(pdev, &status); 327 - if (err < 0) 328 - return err; 329 - /* 330 - * Regardless of error status, read data byte from LBCIF Data 331 - * Register. 332 - */ 333 - *pdata = err; 334 - /* 335 - * Check bit 2 of the LBCIF Status Register. If = 1, 336 - * then an error has occurred. 337 - */ 338 - return (status & LBCIF_STATUS_ACK_ERROR) ? -EIO : 0; 339 - } 340 - 341 - int et131x_init_eeprom(struct et131x_adapter *adapter) 342 - { 343 - struct pci_dev *pdev = adapter->pdev; 344 - u8 eestatus; 345 - 346 - /* We first need to check the EEPROM Status code located at offset 347 - * 0xB2 of config space 348 - */ 349 - pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, 350 - &eestatus); 351 - 352 - /* THIS IS A WORKAROUND: 353 - * I need to call this function twice to get my card in a 354 - * LG M1 Express Dual running. I tried also a msleep before this 355 - * function, because I thougth there could be some time condidions 356 - * but it didn't work. Call the whole function twice also work. 357 - */ 358 - if (pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, &eestatus)) { 359 - dev_err(&pdev->dev, 360 - "Could not read PCI config space for EEPROM Status\n"); 361 - return -EIO; 362 - } 363 - 364 - /* Determine if the error(s) we care about are present. If they are 365 - * present we need to fail. 366 - */ 367 - if (eestatus & 0x4C) { 368 - int write_failed = 0; 369 - if (pdev->revision == 0x01) { 370 - int i; 371 - static const u8 eedata[4] = { 0xFE, 0x13, 0x10, 0xFF }; 372 - 373 - /* Re-write the first 4 bytes if we have an eeprom 374 - * present and the revision id is 1, this fixes the 375 - * corruption seen with 1310 B Silicon 376 - */ 377 - for (i = 0; i < 3; i++) 378 - if (eeprom_write(adapter, i, eedata[i]) < 0) 379 - write_failed = 1; 380 - } 381 - if (pdev->revision != 0x01 || write_failed) { 382 - dev_err(&pdev->dev, 383 - "Fatal EEPROM Status Error - 0x%04x\n", eestatus); 384 - 385 - /* This error could mean that there was an error 386 - * reading the eeprom or that the eeprom doesn't exist. 387 - * We will treat each case the same and not try to 388 - * gather additional information that normally would 389 - * come from the eeprom, like MAC Address 390 - */ 391 - adapter->has_eeprom = 0; 392 - return -EIO; 393 - } 394 - } 395 - adapter->has_eeprom = 1; 396 - 397 - /* Read the EEPROM for information regarding LED behavior. Refer to 398 - * ET1310_phy.c, et131x_xcvr_init(), for its use. 399 - */ 400 - eeprom_read(adapter, 0x70, &adapter->eeprom_data[0]); 401 - eeprom_read(adapter, 0x71, &adapter->eeprom_data[1]); 402 - 403 - if (adapter->eeprom_data[0] != 0xcd) 404 - /* Disable all optional features */ 405 - adapter->eeprom_data[1] = 0x00; 406 - 407 - return 0; 408 - }
-654
drivers/staging/et131x/et1310_mac.c
··· 1 - /* 2 - * Agere Systems Inc. 3 - * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs 4 - * 5 - * Copyright © 2005 Agere Systems Inc. 6 - * All rights reserved. 7 - * http://www.agere.com 8 - * 9 - * Copyright (c) 2011 Mark Einon <mark.einon@gmail.com> 10 - * 11 - *------------------------------------------------------------------------------ 12 - * 13 - * et1310_mac.c - All code and routines pertaining to the MAC 14 - * 15 - *------------------------------------------------------------------------------ 16 - * 17 - * SOFTWARE LICENSE 18 - * 19 - * This software is provided subject to the following terms and conditions, 20 - * which you should read carefully before using the software. Using this 21 - * software indicates your acceptance of these terms and conditions. If you do 22 - * not agree with these terms and conditions, do not use the software. 23 - * 24 - * Copyright © 2005 Agere Systems Inc. 25 - * All rights reserved. 26 - * 27 - * Redistribution and use in source or binary forms, with or without 28 - * modifications, are permitted provided that the following conditions are met: 29 - * 30 - * . Redistributions of source code must retain the above copyright notice, this 31 - * list of conditions and the following Disclaimer as comments in the code as 32 - * well as in the documentation and/or other materials provided with the 33 - * distribution. 34 - * 35 - * . Redistributions in binary form must reproduce the above copyright notice, 36 - * this list of conditions and the following Disclaimer in the documentation 37 - * and/or other materials provided with the distribution. 38 - * 39 - * . Neither the name of Agere Systems Inc. nor the names of the contributors 40 - * may be used to endorse or promote products derived from this software 41 - * without specific prior written permission. 42 - * 43 - * Disclaimer 44 - * 45 - * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, 46 - * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF 47 - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY 48 - * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN 49 - * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY 50 - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 51 - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 52 - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 53 - * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT 54 - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 55 - * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 56 - * DAMAGE. 57 - * 58 - */ 59 - 60 - #include "et131x_defs.h" 61 - 62 - #include <linux/init.h> 63 - #include <linux/module.h> 64 - #include <linux/types.h> 65 - #include <linux/kernel.h> 66 - 67 - #include <linux/sched.h> 68 - #include <linux/ptrace.h> 69 - #include <linux/ctype.h> 70 - #include <linux/string.h> 71 - #include <linux/timer.h> 72 - #include <linux/interrupt.h> 73 - #include <linux/in.h> 74 - #include <linux/delay.h> 75 - #include <linux/io.h> 76 - #include <linux/bitops.h> 77 - #include <linux/pci.h> 78 - #include <asm/system.h> 79 - 80 - #include <linux/netdevice.h> 81 - #include <linux/etherdevice.h> 82 - #include <linux/skbuff.h> 83 - #include <linux/if_arp.h> 84 - #include <linux/ioport.h> 85 - #include <linux/crc32.h> 86 - #include <linux/phy.h> 87 - 88 - #include "et1310_phy.h" 89 - #include "et131x_adapter.h" 90 - #include "et131x.h" 91 - 92 - #define COUNTER_WRAP_16_BIT 0x10000 93 - #define COUNTER_WRAP_12_BIT 0x1000 94 - 95 - /** 96 - * et1310_config_mac_regs1 - Initialize the first part of MAC regs 97 - * @adapter: pointer to our adapter structure 98 - */ 99 - void et1310_config_mac_regs1(struct et131x_adapter *adapter) 100 - { 101 - struct mac_regs __iomem *macregs = &adapter->regs->mac; 102 - u32 station1; 103 - u32 station2; 104 - u32 ipg; 105 - 106 - /* First we need to reset everything. Write to MAC configuration 107 - * register 1 to perform reset. 108 - */ 109 - writel(0xC00F0000, &macregs->cfg1); 110 - 111 - /* Next lets configure the MAC Inter-packet gap register */ 112 - ipg = 0x38005860; /* IPG1 0x38 IPG2 0x58 B2B 0x60 */ 113 - ipg |= 0x50 << 8; /* ifg enforce 0x50 */ 114 - writel(ipg, &macregs->ipg); 115 - 116 - /* Next lets configure the MAC Half Duplex register */ 117 - /* BEB trunc 0xA, Ex Defer, Rexmit 0xF Coll 0x37 */ 118 - writel(0x00A1F037, &macregs->hfdp); 119 - 120 - /* Next lets configure the MAC Interface Control register */ 121 - writel(0, &macregs->if_ctrl); 122 - 123 - /* Let's move on to setting up the mii management configuration */ 124 - writel(0x07, &macregs->mii_mgmt_cfg); /* Clock reset 0x7 */ 125 - 126 - /* Next lets configure the MAC Station Address register. These 127 - * values are read from the EEPROM during initialization and stored 128 - * in the adapter structure. We write what is stored in the adapter 129 - * structure to the MAC Station Address registers high and low. This 130 - * station address is used for generating and checking pause control 131 - * packets. 132 - */ 133 - station2 = (adapter->addr[1] << ET_MAC_STATION_ADDR2_OC2_SHIFT) | 134 - (adapter->addr[0] << ET_MAC_STATION_ADDR2_OC1_SHIFT); 135 - station1 = (adapter->addr[5] << ET_MAC_STATION_ADDR1_OC6_SHIFT) | 136 - (adapter->addr[4] << ET_MAC_STATION_ADDR1_OC5_SHIFT) | 137 - (adapter->addr[3] << ET_MAC_STATION_ADDR1_OC4_SHIFT) | 138 - adapter->addr[2]; 139 - writel(station1, &macregs->station_addr_1); 140 - writel(station2, &macregs->station_addr_2); 141 - 142 - /* Max ethernet packet in bytes that will passed by the mac without 143 - * being truncated. Allow the MAC to pass 4 more than our max packet 144 - * size. This is 4 for the Ethernet CRC. 145 - * 146 - * Packets larger than (registry_jumbo_packet) that do not contain a 147 - * VLAN ID will be dropped by the Rx function. 148 - */ 149 - writel(adapter->registry_jumbo_packet + 4, &macregs->max_fm_len); 150 - 151 - /* clear out MAC config reset */ 152 - writel(0, &macregs->cfg1); 153 - } 154 - 155 - /** 156 - * et1310_config_mac_regs2 - Initialize the second part of MAC regs 157 - * @adapter: pointer to our adapter structure 158 - */ 159 - void et1310_config_mac_regs2(struct et131x_adapter *adapter) 160 - { 161 - int32_t delay = 0; 162 - struct mac_regs __iomem *mac = &adapter->regs->mac; 163 - struct phy_device *phydev = adapter->phydev; 164 - u32 cfg1; 165 - u32 cfg2; 166 - u32 ifctrl; 167 - u32 ctl; 168 - 169 - ctl = readl(&adapter->regs->txmac.ctl); 170 - cfg1 = readl(&mac->cfg1); 171 - cfg2 = readl(&mac->cfg2); 172 - ifctrl = readl(&mac->if_ctrl); 173 - 174 - /* Set up the if mode bits */ 175 - cfg2 &= ~0x300; 176 - if (phydev && phydev->speed == SPEED_1000) { 177 - cfg2 |= 0x200; 178 - /* Phy mode bit */ 179 - ifctrl &= ~(1 << 24); 180 - } else { 181 - cfg2 |= 0x100; 182 - ifctrl |= (1 << 24); 183 - } 184 - 185 - /* We need to enable Rx/Tx */ 186 - cfg1 |= CFG1_RX_ENABLE | CFG1_TX_ENABLE | CFG1_TX_FLOW; 187 - /* Initialize loop back to off */ 188 - cfg1 &= ~(CFG1_LOOPBACK | CFG1_RX_FLOW); 189 - if (adapter->flowcontrol == FLOW_RXONLY || 190 - adapter->flowcontrol == FLOW_BOTH) 191 - cfg1 |= CFG1_RX_FLOW; 192 - writel(cfg1, &mac->cfg1); 193 - 194 - /* Now we need to initialize the MAC Configuration 2 register */ 195 - /* preamble 7, check length, huge frame off, pad crc, crc enable 196 - full duplex off */ 197 - cfg2 |= 0x7016; 198 - cfg2 &= ~0x0021; 199 - 200 - /* Turn on duplex if needed */ 201 - if (phydev && phydev->duplex == DUPLEX_FULL) 202 - cfg2 |= 0x01; 203 - 204 - ifctrl &= ~(1 << 26); 205 - if (phydev && phydev->duplex == DUPLEX_HALF) 206 - ifctrl |= (1<<26); /* Enable ghd */ 207 - 208 - writel(ifctrl, &mac->if_ctrl); 209 - writel(cfg2, &mac->cfg2); 210 - 211 - do { 212 - udelay(10); 213 - delay++; 214 - cfg1 = readl(&mac->cfg1); 215 - } while ((cfg1 & CFG1_WAIT) != CFG1_WAIT && delay < 100); 216 - 217 - if (delay == 100) { 218 - dev_warn(&adapter->pdev->dev, 219 - "Syncd bits did not respond correctly cfg1 word 0x%08x\n", 220 - cfg1); 221 - } 222 - 223 - /* Enable txmac */ 224 - ctl |= 0x09; /* TX mac enable, FC disable */ 225 - writel(ctl, &adapter->regs->txmac.ctl); 226 - 227 - /* Ready to start the RXDMA/TXDMA engine */ 228 - if (adapter->flags & fMP_ADAPTER_LOWER_POWER) { 229 - et131x_rx_dma_enable(adapter); 230 - et131x_tx_dma_enable(adapter); 231 - } 232 - } 233 - 234 - void et1310_config_rxmac_regs(struct et131x_adapter *adapter) 235 - { 236 - struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac; 237 - struct phy_device *phydev = adapter->phydev; 238 - u32 sa_lo; 239 - u32 sa_hi = 0; 240 - u32 pf_ctrl = 0; 241 - 242 - /* Disable the MAC while it is being configured (also disable WOL) */ 243 - writel(0x8, &rxmac->ctrl); 244 - 245 - /* Initialize WOL to disabled. */ 246 - writel(0, &rxmac->crc0); 247 - writel(0, &rxmac->crc12); 248 - writel(0, &rxmac->crc34); 249 - 250 - /* We need to set the WOL mask0 - mask4 next. We initialize it to 251 - * its default Values of 0x00000000 because there are not WOL masks 252 - * as of this time. 253 - */ 254 - writel(0, &rxmac->mask0_word0); 255 - writel(0, &rxmac->mask0_word1); 256 - writel(0, &rxmac->mask0_word2); 257 - writel(0, &rxmac->mask0_word3); 258 - 259 - writel(0, &rxmac->mask1_word0); 260 - writel(0, &rxmac->mask1_word1); 261 - writel(0, &rxmac->mask1_word2); 262 - writel(0, &rxmac->mask1_word3); 263 - 264 - writel(0, &rxmac->mask2_word0); 265 - writel(0, &rxmac->mask2_word1); 266 - writel(0, &rxmac->mask2_word2); 267 - writel(0, &rxmac->mask2_word3); 268 - 269 - writel(0, &rxmac->mask3_word0); 270 - writel(0, &rxmac->mask3_word1); 271 - writel(0, &rxmac->mask3_word2); 272 - writel(0, &rxmac->mask3_word3); 273 - 274 - writel(0, &rxmac->mask4_word0); 275 - writel(0, &rxmac->mask4_word1); 276 - writel(0, &rxmac->mask4_word2); 277 - writel(0, &rxmac->mask4_word3); 278 - 279 - /* Lets setup the WOL Source Address */ 280 - sa_lo = (adapter->addr[2] << ET_WOL_LO_SA3_SHIFT) | 281 - (adapter->addr[3] << ET_WOL_LO_SA4_SHIFT) | 282 - (adapter->addr[4] << ET_WOL_LO_SA5_SHIFT) | 283 - adapter->addr[5]; 284 - writel(sa_lo, &rxmac->sa_lo); 285 - 286 - sa_hi = (u32) (adapter->addr[0] << ET_WOL_HI_SA1_SHIFT) | 287 - adapter->addr[1]; 288 - writel(sa_hi, &rxmac->sa_hi); 289 - 290 - /* Disable all Packet Filtering */ 291 - writel(0, &rxmac->pf_ctrl); 292 - 293 - /* Let's initialize the Unicast Packet filtering address */ 294 - if (adapter->packet_filter & ET131X_PACKET_TYPE_DIRECTED) { 295 - et1310_setup_device_for_unicast(adapter); 296 - pf_ctrl |= 4; /* Unicast filter */ 297 - } else { 298 - writel(0, &rxmac->uni_pf_addr1); 299 - writel(0, &rxmac->uni_pf_addr2); 300 - writel(0, &rxmac->uni_pf_addr3); 301 - } 302 - 303 - /* Let's initialize the Multicast hash */ 304 - if (!(adapter->packet_filter & ET131X_PACKET_TYPE_ALL_MULTICAST)) { 305 - pf_ctrl |= 2; /* Multicast filter */ 306 - et1310_setup_device_for_multicast(adapter); 307 - } 308 - 309 - /* Runt packet filtering. Didn't work in version A silicon. */ 310 - pf_ctrl |= (NIC_MIN_PACKET_SIZE + 4) << 16; 311 - pf_ctrl |= 8; /* Fragment filter */ 312 - 313 - if (adapter->registry_jumbo_packet > 8192) 314 - /* In order to transmit jumbo packets greater than 8k, the 315 - * FIFO between RxMAC and RxDMA needs to be reduced in size 316 - * to (16k - Jumbo packet size). In order to implement this, 317 - * we must use "cut through" mode in the RxMAC, which chops 318 - * packets down into segments which are (max_size * 16). In 319 - * this case we selected 256 bytes, since this is the size of 320 - * the PCI-Express TLP's that the 1310 uses. 321 - * 322 - * seg_en on, fc_en off, size 0x10 323 - */ 324 - writel(0x41, &rxmac->mcif_ctrl_max_seg); 325 - else 326 - writel(0, &rxmac->mcif_ctrl_max_seg); 327 - 328 - /* Initialize the MCIF water marks */ 329 - writel(0, &rxmac->mcif_water_mark); 330 - 331 - /* Initialize the MIF control */ 332 - writel(0, &rxmac->mif_ctrl); 333 - 334 - /* Initialize the Space Available Register */ 335 - writel(0, &rxmac->space_avail); 336 - 337 - /* Initialize the the mif_ctrl register 338 - * bit 3: Receive code error. One or more nibbles were signaled as 339 - * errors during the reception of the packet. Clear this 340 - * bit in Gigabit, set it in 100Mbit. This was derived 341 - * experimentally at UNH. 342 - * bit 4: Receive CRC error. The packet's CRC did not match the 343 - * internally generated CRC. 344 - * bit 5: Receive length check error. Indicates that frame length 345 - * field value in the packet does not match the actual data 346 - * byte length and is not a type field. 347 - * bit 16: Receive frame truncated. 348 - * bit 17: Drop packet enable 349 - */ 350 - if (phydev && phydev->speed == SPEED_100) 351 - writel(0x30038, &rxmac->mif_ctrl); 352 - else 353 - writel(0x30030, &rxmac->mif_ctrl); 354 - 355 - /* Finally we initialize RxMac to be enabled & WOL disabled. Packet 356 - * filter is always enabled since it is where the runt packets are 357 - * supposed to be dropped. For version A silicon, runt packet 358 - * dropping doesn't work, so it is disabled in the pf_ctrl register, 359 - * but we still leave the packet filter on. 360 - */ 361 - writel(pf_ctrl, &rxmac->pf_ctrl); 362 - writel(0x9, &rxmac->ctrl); 363 - } 364 - 365 - void et1310_config_txmac_regs(struct et131x_adapter *adapter) 366 - { 367 - struct txmac_regs __iomem *txmac = &adapter->regs->txmac; 368 - 369 - /* We need to update the Control Frame Parameters 370 - * cfpt - control frame pause timer set to 64 (0x40) 371 - * cfep - control frame extended pause timer set to 0x0 372 - */ 373 - if (adapter->flowcontrol == FLOW_NONE) 374 - writel(0, &txmac->cf_param); 375 - else 376 - writel(0x40, &txmac->cf_param); 377 - } 378 - 379 - void et1310_config_macstat_regs(struct et131x_adapter *adapter) 380 - { 381 - struct macstat_regs __iomem *macstat = 382 - &adapter->regs->macstat; 383 - 384 - /* Next we need to initialize all the macstat registers to zero on 385 - * the device. 386 - */ 387 - writel(0, &macstat->txrx_0_64_byte_frames); 388 - writel(0, &macstat->txrx_65_127_byte_frames); 389 - writel(0, &macstat->txrx_128_255_byte_frames); 390 - writel(0, &macstat->txrx_256_511_byte_frames); 391 - writel(0, &macstat->txrx_512_1023_byte_frames); 392 - writel(0, &macstat->txrx_1024_1518_byte_frames); 393 - writel(0, &macstat->txrx_1519_1522_gvln_frames); 394 - 395 - writel(0, &macstat->rx_bytes); 396 - writel(0, &macstat->rx_packets); 397 - writel(0, &macstat->rx_fcs_errs); 398 - writel(0, &macstat->rx_multicast_packets); 399 - writel(0, &macstat->rx_broadcast_packets); 400 - writel(0, &macstat->rx_control_frames); 401 - writel(0, &macstat->rx_pause_frames); 402 - writel(0, &macstat->rx_unknown_opcodes); 403 - writel(0, &macstat->rx_align_errs); 404 - writel(0, &macstat->rx_frame_len_errs); 405 - writel(0, &macstat->rx_code_errs); 406 - writel(0, &macstat->rx_carrier_sense_errs); 407 - writel(0, &macstat->rx_undersize_packets); 408 - writel(0, &macstat->rx_oversize_packets); 409 - writel(0, &macstat->rx_fragment_packets); 410 - writel(0, &macstat->rx_jabbers); 411 - writel(0, &macstat->rx_drops); 412 - 413 - writel(0, &macstat->tx_bytes); 414 - writel(0, &macstat->tx_packets); 415 - writel(0, &macstat->tx_multicast_packets); 416 - writel(0, &macstat->tx_broadcast_packets); 417 - writel(0, &macstat->tx_pause_frames); 418 - writel(0, &macstat->tx_deferred); 419 - writel(0, &macstat->tx_excessive_deferred); 420 - writel(0, &macstat->tx_single_collisions); 421 - writel(0, &macstat->tx_multiple_collisions); 422 - writel(0, &macstat->tx_late_collisions); 423 - writel(0, &macstat->tx_excessive_collisions); 424 - writel(0, &macstat->tx_total_collisions); 425 - writel(0, &macstat->tx_pause_honored_frames); 426 - writel(0, &macstat->tx_drops); 427 - writel(0, &macstat->tx_jabbers); 428 - writel(0, &macstat->tx_fcs_errs); 429 - writel(0, &macstat->tx_control_frames); 430 - writel(0, &macstat->tx_oversize_frames); 431 - writel(0, &macstat->tx_undersize_frames); 432 - writel(0, &macstat->tx_fragments); 433 - writel(0, &macstat->carry_reg1); 434 - writel(0, &macstat->carry_reg2); 435 - 436 - /* Unmask any counters that we want to track the overflow of. 437 - * Initially this will be all counters. It may become clear later 438 - * that we do not need to track all counters. 439 - */ 440 - writel(0xFFFFBE32, &macstat->carry_reg1_mask); 441 - writel(0xFFFE7E8B, &macstat->carry_reg2_mask); 442 - } 443 - 444 - void et1310_config_flow_control(struct et131x_adapter *adapter) 445 - { 446 - struct phy_device *phydev = adapter->phydev; 447 - 448 - if (phydev->duplex == DUPLEX_HALF) { 449 - adapter->flowcontrol = FLOW_NONE; 450 - } else { 451 - char remote_pause, remote_async_pause; 452 - 453 - et1310_phy_access_mii_bit(adapter, 454 - TRUEPHY_BIT_READ, 5, 10, &remote_pause); 455 - et1310_phy_access_mii_bit(adapter, 456 - TRUEPHY_BIT_READ, 5, 11, 457 - &remote_async_pause); 458 - 459 - if ((remote_pause == TRUEPHY_BIT_SET) && 460 - (remote_async_pause == TRUEPHY_BIT_SET)) { 461 - adapter->flowcontrol = adapter->wanted_flow; 462 - } else if ((remote_pause == TRUEPHY_BIT_SET) && 463 - (remote_async_pause == TRUEPHY_BIT_CLEAR)) { 464 - if (adapter->wanted_flow == FLOW_BOTH) 465 - adapter->flowcontrol = FLOW_BOTH; 466 - else 467 - adapter->flowcontrol = FLOW_NONE; 468 - } else if ((remote_pause == TRUEPHY_BIT_CLEAR) && 469 - (remote_async_pause == TRUEPHY_BIT_CLEAR)) { 470 - adapter->flowcontrol = FLOW_NONE; 471 - } else {/* if (remote_pause == TRUEPHY_CLEAR_BIT && 472 - remote_async_pause == TRUEPHY_SET_BIT) */ 473 - if (adapter->wanted_flow == FLOW_BOTH) 474 - adapter->flowcontrol = FLOW_RXONLY; 475 - else 476 - adapter->flowcontrol = FLOW_NONE; 477 - } 478 - } 479 - } 480 - 481 - /** 482 - * et1310_update_macstat_host_counters - Update the local copy of the statistics 483 - * @adapter: pointer to the adapter structure 484 - */ 485 - void et1310_update_macstat_host_counters(struct et131x_adapter *adapter) 486 - { 487 - struct ce_stats *stats = &adapter->stats; 488 - struct macstat_regs __iomem *macstat = 489 - &adapter->regs->macstat; 490 - 491 - stats->tx_collisions += readl(&macstat->tx_total_collisions); 492 - stats->tx_first_collisions += readl(&macstat->tx_single_collisions); 493 - stats->tx_deferred += readl(&macstat->tx_deferred); 494 - stats->tx_excessive_collisions += 495 - readl(&macstat->tx_multiple_collisions); 496 - stats->tx_late_collisions += readl(&macstat->tx_late_collisions); 497 - stats->tx_underflows += readl(&macstat->tx_undersize_frames); 498 - stats->tx_max_pkt_errs += readl(&macstat->tx_oversize_frames); 499 - 500 - stats->rx_align_errs += readl(&macstat->rx_align_errs); 501 - stats->rx_crc_errs += readl(&macstat->rx_code_errs); 502 - stats->rcvd_pkts_dropped += readl(&macstat->rx_drops); 503 - stats->rx_overflows += readl(&macstat->rx_oversize_packets); 504 - stats->rx_code_violations += readl(&macstat->rx_fcs_errs); 505 - stats->rx_length_errs += readl(&macstat->rx_frame_len_errs); 506 - stats->rx_other_errs += readl(&macstat->rx_fragment_packets); 507 - } 508 - 509 - /** 510 - * et1310_handle_macstat_interrupt 511 - * @adapter: pointer to the adapter structure 512 - * 513 - * One of the MACSTAT counters has wrapped. Update the local copy of 514 - * the statistics held in the adapter structure, checking the "wrap" 515 - * bit for each counter. 516 - */ 517 - void et1310_handle_macstat_interrupt(struct et131x_adapter *adapter) 518 - { 519 - u32 carry_reg1; 520 - u32 carry_reg2; 521 - 522 - /* Read the interrupt bits from the register(s). These are Clear On 523 - * Write. 524 - */ 525 - carry_reg1 = readl(&adapter->regs->macstat.carry_reg1); 526 - carry_reg2 = readl(&adapter->regs->macstat.carry_reg2); 527 - 528 - writel(carry_reg1, &adapter->regs->macstat.carry_reg1); 529 - writel(carry_reg2, &adapter->regs->macstat.carry_reg2); 530 - 531 - /* We need to do update the host copy of all the MAC_STAT counters. 532 - * For each counter, check it's overflow bit. If the overflow bit is 533 - * set, then increment the host version of the count by one complete 534 - * revolution of the counter. This routine is called when the counter 535 - * block indicates that one of the counters has wrapped. 536 - */ 537 - if (carry_reg1 & (1 << 14)) 538 - adapter->stats.rx_code_violations += COUNTER_WRAP_16_BIT; 539 - if (carry_reg1 & (1 << 8)) 540 - adapter->stats.rx_align_errs += COUNTER_WRAP_12_BIT; 541 - if (carry_reg1 & (1 << 7)) 542 - adapter->stats.rx_length_errs += COUNTER_WRAP_16_BIT; 543 - if (carry_reg1 & (1 << 2)) 544 - adapter->stats.rx_other_errs += COUNTER_WRAP_16_BIT; 545 - if (carry_reg1 & (1 << 6)) 546 - adapter->stats.rx_crc_errs += COUNTER_WRAP_16_BIT; 547 - if (carry_reg1 & (1 << 3)) 548 - adapter->stats.rx_overflows += COUNTER_WRAP_16_BIT; 549 - if (carry_reg1 & (1 << 0)) 550 - adapter->stats.rcvd_pkts_dropped += COUNTER_WRAP_16_BIT; 551 - if (carry_reg2 & (1 << 16)) 552 - adapter->stats.tx_max_pkt_errs += COUNTER_WRAP_12_BIT; 553 - if (carry_reg2 & (1 << 15)) 554 - adapter->stats.tx_underflows += COUNTER_WRAP_12_BIT; 555 - if (carry_reg2 & (1 << 6)) 556 - adapter->stats.tx_first_collisions += COUNTER_WRAP_12_BIT; 557 - if (carry_reg2 & (1 << 8)) 558 - adapter->stats.tx_deferred += COUNTER_WRAP_12_BIT; 559 - if (carry_reg2 & (1 << 5)) 560 - adapter->stats.tx_excessive_collisions += COUNTER_WRAP_12_BIT; 561 - if (carry_reg2 & (1 << 4)) 562 - adapter->stats.tx_late_collisions += COUNTER_WRAP_12_BIT; 563 - if (carry_reg2 & (1 << 2)) 564 - adapter->stats.tx_collisions += COUNTER_WRAP_12_BIT; 565 - } 566 - 567 - void et1310_setup_device_for_multicast(struct et131x_adapter *adapter) 568 - { 569 - struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac; 570 - uint32_t nIndex; 571 - uint32_t result; 572 - uint32_t hash1 = 0; 573 - uint32_t hash2 = 0; 574 - uint32_t hash3 = 0; 575 - uint32_t hash4 = 0; 576 - u32 pm_csr; 577 - 578 - /* If ET131X_PACKET_TYPE_MULTICAST is specified, then we provision 579 - * the multi-cast LIST. If it is NOT specified, (and "ALL" is not 580 - * specified) then we should pass NO multi-cast addresses to the 581 - * driver. 582 - */ 583 - if (adapter->packet_filter & ET131X_PACKET_TYPE_MULTICAST) { 584 - /* Loop through our multicast array and set up the device */ 585 - for (nIndex = 0; nIndex < adapter->multicast_addr_count; 586 - nIndex++) { 587 - result = ether_crc(6, adapter->multicast_list[nIndex]); 588 - 589 - result = (result & 0x3F800000) >> 23; 590 - 591 - if (result < 32) { 592 - hash1 |= (1 << result); 593 - } else if ((31 < result) && (result < 64)) { 594 - result -= 32; 595 - hash2 |= (1 << result); 596 - } else if ((63 < result) && (result < 96)) { 597 - result -= 64; 598 - hash3 |= (1 << result); 599 - } else { 600 - result -= 96; 601 - hash4 |= (1 << result); 602 - } 603 - } 604 - } 605 - 606 - /* Write out the new hash to the device */ 607 - pm_csr = readl(&adapter->regs->global.pm_csr); 608 - if (!et1310_in_phy_coma(adapter)) { 609 - writel(hash1, &rxmac->multi_hash1); 610 - writel(hash2, &rxmac->multi_hash2); 611 - writel(hash3, &rxmac->multi_hash3); 612 - writel(hash4, &rxmac->multi_hash4); 613 - } 614 - } 615 - 616 - void et1310_setup_device_for_unicast(struct et131x_adapter *adapter) 617 - { 618 - struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac; 619 - u32 uni_pf1; 620 - u32 uni_pf2; 621 - u32 uni_pf3; 622 - u32 pm_csr; 623 - 624 - /* Set up unicast packet filter reg 3 to be the first two octets of 625 - * the MAC address for both address 626 - * 627 - * Set up unicast packet filter reg 2 to be the octets 2 - 5 of the 628 - * MAC address for second address 629 - * 630 - * Set up unicast packet filter reg 3 to be the octets 2 - 5 of the 631 - * MAC address for first address 632 - */ 633 - uni_pf3 = (adapter->addr[0] << ET_UNI_PF_ADDR2_1_SHIFT) | 634 - (adapter->addr[1] << ET_UNI_PF_ADDR2_2_SHIFT) | 635 - (adapter->addr[0] << ET_UNI_PF_ADDR1_1_SHIFT) | 636 - adapter->addr[1]; 637 - 638 - uni_pf2 = (adapter->addr[2] << ET_UNI_PF_ADDR2_3_SHIFT) | 639 - (adapter->addr[3] << ET_UNI_PF_ADDR2_4_SHIFT) | 640 - (adapter->addr[4] << ET_UNI_PF_ADDR2_5_SHIFT) | 641 - adapter->addr[5]; 642 - 643 - uni_pf1 = (adapter->addr[2] << ET_UNI_PF_ADDR1_3_SHIFT) | 644 - (adapter->addr[3] << ET_UNI_PF_ADDR1_4_SHIFT) | 645 - (adapter->addr[4] << ET_UNI_PF_ADDR1_5_SHIFT) | 646 - adapter->addr[5]; 647 - 648 - pm_csr = readl(&adapter->regs->global.pm_csr); 649 - if (!et1310_in_phy_coma(adapter)) { 650 - writel(uni_pf1, &rxmac->uni_pf_addr1); 651 - writel(uni_pf2, &rxmac->uni_pf_addr2); 652 - writel(uni_pf3, &rxmac->uni_pf_addr3); 653 - } 654 - }
-375
drivers/staging/et131x/et1310_phy.c
··· 1 - /* 2 - * Agere Systems Inc. 3 - * 10/100/1000 Base-T Ethernet Driver for the ET1310 and ET131x series MACs 4 - * 5 - * Copyright * 2005 Agere Systems Inc. 6 - * All rights reserved. 7 - * http://www.agere.com 8 - * 9 - * Copyright (c) 2011 Mark Einon <mark.einon@gmail.com> 10 - * 11 - *------------------------------------------------------------------------------ 12 - * 13 - * et1310_phy.c - Routines for configuring and accessing the PHY 14 - * 15 - *------------------------------------------------------------------------------ 16 - * 17 - * SOFTWARE LICENSE 18 - * 19 - * This software is provided subject to the following terms and conditions, 20 - * which you should read carefully before using the software. Using this 21 - * software indicates your acceptance of these terms and conditions. If you do 22 - * not agree with these terms and conditions, do not use the software. 23 - * 24 - * Copyright * 2005 Agere Systems Inc. 25 - * All rights reserved. 26 - * 27 - * Redistribution and use in source or binary forms, with or without 28 - * modifications, are permitted provided that the following conditions are met: 29 - * 30 - * . Redistributions of source code must retain the above copyright notice, this 31 - * list of conditions and the following Disclaimer as comments in the code as 32 - * well as in the documentation and/or other materials provided with the 33 - * distribution. 34 - * 35 - * . Redistributions in binary form must reproduce the above copyright notice, 36 - * this list of conditions and the following Disclaimer in the documentation 37 - * and/or other materials provided with the distribution. 38 - * 39 - * . Neither the name of Agere Systems Inc. nor the names of the contributors 40 - * may be used to endorse or promote products derived from this software 41 - * without specific prior written permission. 42 - * 43 - * Disclaimer 44 - * 45 - * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, 46 - * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF 47 - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY 48 - * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN 49 - * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY 50 - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 51 - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 52 - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 53 - * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT 54 - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 55 - * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 56 - * DAMAGE. 57 - * 58 - */ 59 - 60 - #include "et131x_defs.h" 61 - 62 - #include <linux/pci.h> 63 - #include <linux/init.h> 64 - #include <linux/module.h> 65 - #include <linux/types.h> 66 - #include <linux/kernel.h> 67 - 68 - #include <linux/sched.h> 69 - #include <linux/ptrace.h> 70 - #include <linux/ctype.h> 71 - #include <linux/string.h> 72 - #include <linux/timer.h> 73 - #include <linux/interrupt.h> 74 - #include <linux/in.h> 75 - #include <linux/delay.h> 76 - #include <linux/io.h> 77 - #include <linux/bitops.h> 78 - #include <asm/system.h> 79 - 80 - #include <linux/netdevice.h> 81 - #include <linux/etherdevice.h> 82 - #include <linux/skbuff.h> 83 - #include <linux/if_arp.h> 84 - #include <linux/ioport.h> 85 - #include <linux/random.h> 86 - #include <linux/phy.h> 87 - 88 - #include "et1310_phy.h" 89 - 90 - #include "et131x_adapter.h" 91 - 92 - #include "et1310_address_map.h" 93 - #include "et1310_tx.h" 94 - #include "et1310_rx.h" 95 - 96 - #include "et131x.h" 97 - 98 - int et131x_mdio_read(struct mii_bus *bus, int phy_addr, int reg) 99 - { 100 - struct net_device *netdev = bus->priv; 101 - struct et131x_adapter *adapter = netdev_priv(netdev); 102 - u16 value; 103 - int ret; 104 - 105 - ret = et131x_phy_mii_read(adapter, phy_addr, reg, &value); 106 - 107 - if (ret < 0) 108 - return ret; 109 - else 110 - return value; 111 - } 112 - 113 - int et131x_mdio_write(struct mii_bus *bus, int phy_addr, int reg, u16 value) 114 - { 115 - struct net_device *netdev = bus->priv; 116 - struct et131x_adapter *adapter = netdev_priv(netdev); 117 - 118 - return et131x_mii_write(adapter, reg, value); 119 - } 120 - 121 - int et131x_mdio_reset(struct mii_bus *bus) 122 - { 123 - struct net_device *netdev = bus->priv; 124 - struct et131x_adapter *adapter = netdev_priv(netdev); 125 - 126 - et131x_mii_write(adapter, MII_BMCR, BMCR_RESET); 127 - 128 - return 0; 129 - } 130 - 131 - int et131x_mii_read(struct et131x_adapter *adapter, u8 reg, u16 *value) 132 - { 133 - struct phy_device *phydev = adapter->phydev; 134 - 135 - if (!phydev) 136 - return -EIO; 137 - 138 - return et131x_phy_mii_read(adapter, phydev->addr, reg, value); 139 - } 140 - 141 - /** 142 - * et131x_phy_mii_read - Read from the PHY through the MII Interface on the MAC 143 - * @adapter: pointer to our private adapter structure 144 - * @addr: the address of the transceiver 145 - * @reg: the register to read 146 - * @value: pointer to a 16-bit value in which the value will be stored 147 - * 148 - * Returns 0 on success, errno on failure (as defined in errno.h) 149 - */ 150 - int et131x_phy_mii_read(struct et131x_adapter *adapter, u8 addr, 151 - u8 reg, u16 *value) 152 - { 153 - struct mac_regs __iomem *mac = &adapter->regs->mac; 154 - int status = 0; 155 - u32 delay = 0; 156 - u32 mii_addr; 157 - u32 mii_cmd; 158 - u32 mii_indicator; 159 - 160 - /* Save a local copy of the registers we are dealing with so we can 161 - * set them back 162 - */ 163 - mii_addr = readl(&mac->mii_mgmt_addr); 164 - mii_cmd = readl(&mac->mii_mgmt_cmd); 165 - 166 - /* Stop the current operation */ 167 - writel(0, &mac->mii_mgmt_cmd); 168 - 169 - /* Set up the register we need to read from on the correct PHY */ 170 - writel(MII_ADDR(addr, reg), &mac->mii_mgmt_addr); 171 - 172 - writel(0x1, &mac->mii_mgmt_cmd); 173 - 174 - do { 175 - udelay(50); 176 - delay++; 177 - mii_indicator = readl(&mac->mii_mgmt_indicator); 178 - } while ((mii_indicator & MGMT_WAIT) && delay < 50); 179 - 180 - /* If we hit the max delay, we could not read the register */ 181 - if (delay == 50) { 182 - dev_warn(&adapter->pdev->dev, 183 - "reg 0x%08x could not be read\n", reg); 184 - dev_warn(&adapter->pdev->dev, "status is 0x%08x\n", 185 - mii_indicator); 186 - 187 - status = -EIO; 188 - } 189 - 190 - /* If we hit here we were able to read the register and we need to 191 - * return the value to the caller */ 192 - *value = readl(&mac->mii_mgmt_stat) & 0xFFFF; 193 - 194 - /* Stop the read operation */ 195 - writel(0, &mac->mii_mgmt_cmd); 196 - 197 - /* set the registers we touched back to the state at which we entered 198 - * this function 199 - */ 200 - writel(mii_addr, &mac->mii_mgmt_addr); 201 - writel(mii_cmd, &mac->mii_mgmt_cmd); 202 - 203 - return status; 204 - } 205 - 206 - /** 207 - * et131x_mii_write - Write to a PHY register through the MII interface of the MAC 208 - * @adapter: pointer to our private adapter structure 209 - * @reg: the register to read 210 - * @value: 16-bit value to write 211 - * 212 - * FIXME: one caller in netdev still 213 - * 214 - * Return 0 on success, errno on failure (as defined in errno.h) 215 - */ 216 - int et131x_mii_write(struct et131x_adapter *adapter, u8 reg, u16 value) 217 - { 218 - struct mac_regs __iomem *mac = &adapter->regs->mac; 219 - struct phy_device *phydev = adapter->phydev; 220 - int status = 0; 221 - u8 addr; 222 - u32 delay = 0; 223 - u32 mii_addr; 224 - u32 mii_cmd; 225 - u32 mii_indicator; 226 - 227 - if (!phydev) 228 - return -EIO; 229 - 230 - addr = phydev->addr; 231 - 232 - /* Save a local copy of the registers we are dealing with so we can 233 - * set them back 234 - */ 235 - mii_addr = readl(&mac->mii_mgmt_addr); 236 - mii_cmd = readl(&mac->mii_mgmt_cmd); 237 - 238 - /* Stop the current operation */ 239 - writel(0, &mac->mii_mgmt_cmd); 240 - 241 - /* Set up the register we need to write to on the correct PHY */ 242 - writel(MII_ADDR(addr, reg), &mac->mii_mgmt_addr); 243 - 244 - /* Add the value to write to the registers to the mac */ 245 - writel(value, &mac->mii_mgmt_ctrl); 246 - 247 - do { 248 - udelay(50); 249 - delay++; 250 - mii_indicator = readl(&mac->mii_mgmt_indicator); 251 - } while ((mii_indicator & MGMT_BUSY) && delay < 100); 252 - 253 - /* If we hit the max delay, we could not write the register */ 254 - if (delay == 100) { 255 - u16 tmp; 256 - 257 - dev_warn(&adapter->pdev->dev, 258 - "reg 0x%08x could not be written", reg); 259 - dev_warn(&adapter->pdev->dev, "status is 0x%08x\n", 260 - mii_indicator); 261 - dev_warn(&adapter->pdev->dev, "command is 0x%08x\n", 262 - readl(&mac->mii_mgmt_cmd)); 263 - 264 - et131x_mii_read(adapter, reg, &tmp); 265 - 266 - status = -EIO; 267 - } 268 - /* Stop the write operation */ 269 - writel(0, &mac->mii_mgmt_cmd); 270 - 271 - /* 272 - * set the registers we touched back to the state at which we entered 273 - * this function 274 - */ 275 - writel(mii_addr, &mac->mii_mgmt_addr); 276 - writel(mii_cmd, &mac->mii_mgmt_cmd); 277 - 278 - return status; 279 - } 280 - 281 - /** 282 - * et1310_phy_power_down - PHY power control 283 - * @adapter: device to control 284 - * @down: true for off/false for back on 285 - * 286 - * one hundred, ten, one thousand megs 287 - * How would you like to have your LAN accessed 288 - * Can't you see that this code processed 289 - * Phy power, phy power.. 290 - */ 291 - void et1310_phy_power_down(struct et131x_adapter *adapter, bool down) 292 - { 293 - u16 data; 294 - 295 - et131x_mii_read(adapter, MII_BMCR, &data); 296 - data &= ~BMCR_PDOWN; 297 - if (down) 298 - data |= BMCR_PDOWN; 299 - et131x_mii_write(adapter, MII_BMCR, data); 300 - } 301 - 302 - /* Still used from _mac for BIT_READ */ 303 - void et1310_phy_access_mii_bit(struct et131x_adapter *adapter, u16 action, 304 - u16 regnum, u16 bitnum, u8 *value) 305 - { 306 - u16 reg; 307 - u16 mask = 0x0001 << bitnum; 308 - 309 - /* Read the requested register */ 310 - et131x_mii_read(adapter, regnum, &reg); 311 - 312 - switch (action) { 313 - case TRUEPHY_BIT_READ: 314 - *value = (reg & mask) >> bitnum; 315 - break; 316 - 317 - case TRUEPHY_BIT_SET: 318 - et131x_mii_write(adapter, regnum, reg | mask); 319 - break; 320 - 321 - case TRUEPHY_BIT_CLEAR: 322 - et131x_mii_write(adapter, regnum, reg & ~mask); 323 - break; 324 - 325 - default: 326 - break; 327 - } 328 - } 329 - 330 - /** 331 - * et131x_xcvr_init - Init the phy if we are setting it into force mode 332 - * @adapter: pointer to our private adapter structure 333 - * 334 - */ 335 - void et131x_xcvr_init(struct et131x_adapter *adapter) 336 - { 337 - u16 imr; 338 - u16 isr; 339 - u16 lcr2; 340 - 341 - et131x_mii_read(adapter, PHY_INTERRUPT_STATUS, &isr); 342 - et131x_mii_read(adapter, PHY_INTERRUPT_MASK, &imr); 343 - 344 - /* Set the link status interrupt only. Bad behavior when link status 345 - * and auto neg are set, we run into a nested interrupt problem 346 - */ 347 - imr |= (ET_PHY_INT_MASK_AUTONEGSTAT & 348 - ET_PHY_INT_MASK_LINKSTAT & 349 - ET_PHY_INT_MASK_ENABLE); 350 - 351 - et131x_mii_write(adapter, PHY_INTERRUPT_MASK, imr); 352 - 353 - /* Set the LED behavior such that LED 1 indicates speed (off = 354 - * 10Mbits, blink = 100Mbits, on = 1000Mbits) and LED 2 indicates 355 - * link and activity (on for link, blink off for activity). 356 - * 357 - * NOTE: Some customizations have been added here for specific 358 - * vendors; The LED behavior is now determined by vendor data in the 359 - * EEPROM. However, the above description is the default. 360 - */ 361 - if ((adapter->eeprom_data[1] & 0x4) == 0) { 362 - et131x_mii_read(adapter, PHY_LED_2, &lcr2); 363 - 364 - lcr2 &= (ET_LED2_LED_100TX & ET_LED2_LED_1000T); 365 - lcr2 |= (LED_VAL_LINKON_ACTIVE << LED_LINK_SHIFT); 366 - 367 - if ((adapter->eeprom_data[1] & 0x8) == 0) 368 - lcr2 |= (LED_VAL_1000BT_100BTX << LED_TXRX_SHIFT); 369 - else 370 - lcr2 |= (LED_VAL_LINKON << LED_TXRX_SHIFT); 371 - 372 - et131x_mii_write(adapter, PHY_LED_2, lcr2); 373 - } 374 - } 375 -
-204
drivers/staging/et131x/et1310_pm.c
··· 1 - /* 2 - * Agere Systems Inc. 3 - * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs 4 - * 5 - * Copyright © 2005 Agere Systems Inc. 6 - * All rights reserved. 7 - * http://www.agere.com 8 - * 9 - * Copyright (c) 2011 Mark Einon <mark.einon@gmail.com> 10 - * 11 - *------------------------------------------------------------------------------ 12 - * 13 - * et1310_pm.c - All power management related code (not completely implemented) 14 - * 15 - *------------------------------------------------------------------------------ 16 - * 17 - * SOFTWARE LICENSE 18 - * 19 - * This software is provided subject to the following terms and conditions, 20 - * which you should read carefully before using the software. Using this 21 - * software indicates your acceptance of these terms and conditions. If you do 22 - * not agree with these terms and conditions, do not use the software. 23 - * 24 - * Copyright © 2005 Agere Systems Inc. 25 - * All rights reserved. 26 - * 27 - * Redistribution and use in source or binary forms, with or without 28 - * modifications, are permitted provided that the following conditions are met: 29 - * 30 - * . Redistributions of source code must retain the above copyright notice, this 31 - * list of conditions and the following Disclaimer as comments in the code as 32 - * well as in the documentation and/or other materials provided with the 33 - * distribution. 34 - * 35 - * . Redistributions in binary form must reproduce the above copyright notice, 36 - * this list of conditions and the following Disclaimer in the documentation 37 - * and/or other materials provided with the distribution. 38 - * 39 - * . Neither the name of Agere Systems Inc. nor the names of the contributors 40 - * may be used to endorse or promote products derived from this software 41 - * without specific prior written permission. 42 - * 43 - * Disclaimer 44 - * 45 - * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, 46 - * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF 47 - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY 48 - * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN 49 - * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY 50 - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 51 - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 52 - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 53 - * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT 54 - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 55 - * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 56 - * DAMAGE. 57 - * 58 - */ 59 - 60 - #include "et131x_defs.h" 61 - 62 - #include <linux/init.h> 63 - #include <linux/module.h> 64 - #include <linux/types.h> 65 - #include <linux/kernel.h> 66 - 67 - #include <linux/sched.h> 68 - #include <linux/ptrace.h> 69 - #include <linux/ctype.h> 70 - #include <linux/string.h> 71 - #include <linux/timer.h> 72 - #include <linux/interrupt.h> 73 - #include <linux/in.h> 74 - #include <linux/delay.h> 75 - #include <linux/io.h> 76 - #include <linux/bitops.h> 77 - #include <asm/system.h> 78 - 79 - #include <linux/netdevice.h> 80 - #include <linux/etherdevice.h> 81 - #include <linux/skbuff.h> 82 - #include <linux/if_arp.h> 83 - #include <linux/ioport.h> 84 - 85 - #include "et1310_phy.h" 86 - #include "et1310_rx.h" 87 - #include "et131x_adapter.h" 88 - #include "et131x.h" 89 - 90 - /** 91 - * et1310_in_phy_coma - check if the device is in phy coma 92 - * @adapter: pointer to our adapter structure 93 - * 94 - * Returns 0 if the device is not in phy coma, 1 if it is in phy coma 95 - */ 96 - int et1310_in_phy_coma(struct et131x_adapter *adapter) 97 - { 98 - u32 pmcsr; 99 - 100 - pmcsr = readl(&adapter->regs->global.pm_csr); 101 - 102 - return ET_PM_PHY_SW_COMA & pmcsr ? 1 : 0; 103 - } 104 - 105 - /** 106 - * et1310_enable_phy_coma - called when network cable is unplugged 107 - * @adapter: pointer to our adapter structure 108 - * 109 - * driver receive an phy status change interrupt while in D0 and check that 110 - * phy_status is down. 111 - * 112 - * -- gate off JAGCore; 113 - * -- set gigE PHY in Coma mode 114 - * -- wake on phy_interrupt; Perform software reset JAGCore, 115 - * re-initialize jagcore and gigE PHY 116 - * 117 - * Add D0-ASPM-PhyLinkDown Support: 118 - * -- while in D0, when there is a phy_interrupt indicating phy link 119 - * down status, call the MPSetPhyComa routine to enter this active 120 - * state power saving mode 121 - * -- while in D0-ASPM-PhyLinkDown mode, when there is a phy_interrupt 122 - * indicating linkup status, call the MPDisablePhyComa routine to 123 - * restore JAGCore and gigE PHY 124 - */ 125 - void et1310_enable_phy_coma(struct et131x_adapter *adapter) 126 - { 127 - unsigned long flags; 128 - u32 pmcsr; 129 - 130 - pmcsr = readl(&adapter->regs->global.pm_csr); 131 - 132 - /* Save the GbE PHY speed and duplex modes. Need to restore this 133 - * when cable is plugged back in 134 - */ 135 - /* 136 - * TODO - when PM is re-enabled, check if we need to 137 - * perform a similar task as this - 138 - * adapter->pdown_speed = adapter->ai_force_speed; 139 - * adapter->pdown_duplex = adapter->ai_force_duplex; 140 - */ 141 - 142 - /* Stop sending packets. */ 143 - spin_lock_irqsave(&adapter->send_hw_lock, flags); 144 - adapter->flags |= fMP_ADAPTER_LOWER_POWER; 145 - spin_unlock_irqrestore(&adapter->send_hw_lock, flags); 146 - 147 - /* Wait for outstanding Receive packets */ 148 - 149 - et131x_disable_txrx(adapter->netdev); 150 - 151 - /* Gate off JAGCore 3 clock domains */ 152 - pmcsr &= ~ET_PMCSR_INIT; 153 - writel(pmcsr, &adapter->regs->global.pm_csr); 154 - 155 - /* Program gigE PHY in to Coma mode */ 156 - pmcsr |= ET_PM_PHY_SW_COMA; 157 - writel(pmcsr, &adapter->regs->global.pm_csr); 158 - } 159 - 160 - /** 161 - * et1310_disable_phy_coma - Disable the Phy Coma Mode 162 - * @adapter: pointer to our adapter structure 163 - */ 164 - void et1310_disable_phy_coma(struct et131x_adapter *adapter) 165 - { 166 - u32 pmcsr; 167 - 168 - pmcsr = readl(&adapter->regs->global.pm_csr); 169 - 170 - /* Disable phy_sw_coma register and re-enable JAGCore clocks */ 171 - pmcsr |= ET_PMCSR_INIT; 172 - pmcsr &= ~ET_PM_PHY_SW_COMA; 173 - writel(pmcsr, &adapter->regs->global.pm_csr); 174 - 175 - /* Restore the GbE PHY speed and duplex modes; 176 - * Reset JAGCore; re-configure and initialize JAGCore and gigE PHY 177 - */ 178 - /* TODO - when PM is re-enabled, check if we need to 179 - * perform a similar task as this - 180 - * adapter->ai_force_speed = adapter->pdown_speed; 181 - * adapter->ai_force_duplex = adapter->pdown_duplex; 182 - */ 183 - 184 - /* Re-initialize the send structures */ 185 - et131x_init_send(adapter); 186 - 187 - /* Reset the RFD list and re-start RU */ 188 - et131x_reset_recv(adapter); 189 - 190 - /* Bring the device back to the state it was during init prior to 191 - * autonegotiation being complete. This way, when we get the auto-neg 192 - * complete interrupt, we can complete init by calling ConfigMacREGS2. 193 - */ 194 - et131x_soft_reset(adapter); 195 - 196 - /* setup et1310 as per the documentation ?? */ 197 - et131x_adapter_setup(adapter); 198 - 199 - /* Allow Tx to restart */ 200 - adapter->flags &= ~fMP_ADAPTER_LOWER_POWER; 201 - 202 - et131x_enable_txrx(adapter->netdev); 203 - } 204 -
-1160
drivers/staging/et131x/et1310_rx.c
··· 1 - /* 2 - * Agere Systems Inc. 3 - * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs 4 - * 5 - * Copyright © 2005 Agere Systems Inc. 6 - * All rights reserved. 7 - * http://www.agere.com 8 - * 9 - * Copyright (c) 2011 Mark Einon <mark.einon@gmail.com> 10 - * 11 - *------------------------------------------------------------------------------ 12 - * 13 - * et1310_rx.c - Routines used to perform data reception 14 - * 15 - *------------------------------------------------------------------------------ 16 - * 17 - * SOFTWARE LICENSE 18 - * 19 - * This software is provided subject to the following terms and conditions, 20 - * which you should read carefully before using the software. Using this 21 - * software indicates your acceptance of these terms and conditions. If you do 22 - * not agree with these terms and conditions, do not use the software. 23 - * 24 - * Copyright © 2005 Agere Systems Inc. 25 - * All rights reserved. 26 - * 27 - * Redistribution and use in source or binary forms, with or without 28 - * modifications, are permitted provided that the following conditions are met: 29 - * 30 - * . Redistributions of source code must retain the above copyright notice, this 31 - * list of conditions and the following Disclaimer as comments in the code as 32 - * well as in the documentation and/or other materials provided with the 33 - * distribution. 34 - * 35 - * . Redistributions in binary form must reproduce the above copyright notice, 36 - * this list of conditions and the following Disclaimer in the documentation 37 - * and/or other materials provided with the distribution. 38 - * 39 - * . Neither the name of Agere Systems Inc. nor the names of the contributors 40 - * may be used to endorse or promote products derived from this software 41 - * without specific prior written permission. 42 - * 43 - * Disclaimer 44 - * 45 - * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, 46 - * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF 47 - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY 48 - * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN 49 - * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY 50 - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 51 - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 52 - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 53 - * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT 54 - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 55 - * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 56 - * DAMAGE. 57 - * 58 - */ 59 - 60 - #include "et131x_defs.h" 61 - 62 - #include <linux/pci.h> 63 - #include <linux/init.h> 64 - #include <linux/module.h> 65 - #include <linux/types.h> 66 - #include <linux/kernel.h> 67 - 68 - #include <linux/sched.h> 69 - #include <linux/ptrace.h> 70 - #include <linux/slab.h> 71 - #include <linux/ctype.h> 72 - #include <linux/string.h> 73 - #include <linux/timer.h> 74 - #include <linux/interrupt.h> 75 - #include <linux/in.h> 76 - #include <linux/delay.h> 77 - #include <linux/io.h> 78 - #include <linux/bitops.h> 79 - #include <asm/system.h> 80 - 81 - #include <linux/netdevice.h> 82 - #include <linux/etherdevice.h> 83 - #include <linux/skbuff.h> 84 - #include <linux/if_arp.h> 85 - #include <linux/ioport.h> 86 - #include <linux/phy.h> 87 - 88 - #include "et1310_phy.h" 89 - #include "et131x_adapter.h" 90 - #include "et1310_rx.h" 91 - #include "et131x.h" 92 - 93 - static inline u32 bump_free_buff_ring(u32 *free_buff_ring, u32 limit) 94 - { 95 - u32 tmp_free_buff_ring = *free_buff_ring; 96 - tmp_free_buff_ring++; 97 - /* This works for all cases where limit < 1024. The 1023 case 98 - works because 1023++ is 1024 which means the if condition is not 99 - taken but the carry of the bit into the wrap bit toggles the wrap 100 - value correctly */ 101 - if ((tmp_free_buff_ring & ET_DMA10_MASK) > limit) { 102 - tmp_free_buff_ring &= ~ET_DMA10_MASK; 103 - tmp_free_buff_ring ^= ET_DMA10_WRAP; 104 - } 105 - /* For the 1023 case */ 106 - tmp_free_buff_ring &= (ET_DMA10_MASK|ET_DMA10_WRAP); 107 - *free_buff_ring = tmp_free_buff_ring; 108 - return tmp_free_buff_ring; 109 - } 110 - 111 - /** 112 - * et131x_rx_dma_memory_alloc 113 - * @adapter: pointer to our private adapter structure 114 - * 115 - * Returns 0 on success and errno on failure (as defined in errno.h) 116 - * 117 - * Allocates Free buffer ring 1 for sure, free buffer ring 0 if required, 118 - * and the Packet Status Ring. 119 - */ 120 - int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter) 121 - { 122 - u32 i, j; 123 - u32 bufsize; 124 - u32 pktstat_ringsize, fbr_chunksize; 125 - struct rx_ring *rx_ring; 126 - 127 - /* Setup some convenience pointers */ 128 - rx_ring = &adapter->rx_ring; 129 - 130 - /* Alloc memory for the lookup table */ 131 - #ifdef USE_FBR0 132 - rx_ring->fbr[0] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL); 133 - #endif 134 - rx_ring->fbr[1] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL); 135 - 136 - /* The first thing we will do is configure the sizes of the buffer 137 - * rings. These will change based on jumbo packet support. Larger 138 - * jumbo packets increases the size of each entry in FBR0, and the 139 - * number of entries in FBR0, while at the same time decreasing the 140 - * number of entries in FBR1. 141 - * 142 - * FBR1 holds "large" frames, FBR0 holds "small" frames. If FBR1 143 - * entries are huge in order to accommodate a "jumbo" frame, then it 144 - * will have less entries. Conversely, FBR1 will now be relied upon 145 - * to carry more "normal" frames, thus it's entry size also increases 146 - * and the number of entries goes up too (since it now carries 147 - * "small" + "regular" packets. 148 - * 149 - * In this scheme, we try to maintain 512 entries between the two 150 - * rings. Also, FBR1 remains a constant size - when it's size doubles 151 - * the number of entries halves. FBR0 increases in size, however. 152 - */ 153 - 154 - if (adapter->registry_jumbo_packet < 2048) { 155 - #ifdef USE_FBR0 156 - rx_ring->fbr0_buffsize = 256; 157 - rx_ring->fbr0_num_entries = 512; 158 - #endif 159 - rx_ring->fbr1_buffsize = 2048; 160 - rx_ring->fbr1_num_entries = 512; 161 - } else if (adapter->registry_jumbo_packet < 4096) { 162 - #ifdef USE_FBR0 163 - rx_ring->fbr0_buffsize = 512; 164 - rx_ring->fbr0_num_entries = 1024; 165 - #endif 166 - rx_ring->fbr1_buffsize = 4096; 167 - rx_ring->fbr1_num_entries = 512; 168 - } else { 169 - #ifdef USE_FBR0 170 - rx_ring->fbr0_buffsize = 1024; 171 - rx_ring->fbr0_num_entries = 768; 172 - #endif 173 - rx_ring->fbr1_buffsize = 16384; 174 - rx_ring->fbr1_num_entries = 128; 175 - } 176 - 177 - #ifdef USE_FBR0 178 - adapter->rx_ring.psr_num_entries = adapter->rx_ring.fbr0_num_entries + 179 - adapter->rx_ring.fbr1_num_entries; 180 - #else 181 - adapter->rx_ring.psr_num_entries = adapter->rx_ring.fbr1_num_entries; 182 - #endif 183 - 184 - /* Allocate an area of memory for Free Buffer Ring 1 */ 185 - bufsize = (sizeof(struct fbr_desc) * rx_ring->fbr1_num_entries) + 0xfff; 186 - rx_ring->fbr1_ring_virtaddr = pci_alloc_consistent(adapter->pdev, 187 - bufsize, 188 - &rx_ring->fbr1_ring_physaddr); 189 - if (!rx_ring->fbr1_ring_virtaddr) { 190 - dev_err(&adapter->pdev->dev, 191 - "Cannot alloc memory for Free Buffer Ring 1\n"); 192 - return -ENOMEM; 193 - } 194 - 195 - /* Save physical address 196 - * 197 - * NOTE: pci_alloc_consistent(), used above to alloc DMA regions, 198 - * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses 199 - * are ever returned, make sure the high part is retrieved here 200 - * before storing the adjusted address. 201 - */ 202 - rx_ring->fbr1_real_physaddr = rx_ring->fbr1_ring_physaddr; 203 - 204 - /* Align Free Buffer Ring 1 on a 4K boundary */ 205 - et131x_align_allocated_memory(adapter, 206 - &rx_ring->fbr1_real_physaddr, 207 - &rx_ring->fbr1_offset, 0x0FFF); 208 - 209 - rx_ring->fbr1_ring_virtaddr = 210 - (void *)((u8 *) rx_ring->fbr1_ring_virtaddr + 211 - rx_ring->fbr1_offset); 212 - 213 - #ifdef USE_FBR0 214 - /* Allocate an area of memory for Free Buffer Ring 0 */ 215 - bufsize = (sizeof(struct fbr_desc) * rx_ring->fbr0_num_entries) + 0xfff; 216 - rx_ring->fbr0_ring_virtaddr = pci_alloc_consistent(adapter->pdev, 217 - bufsize, 218 - &rx_ring->fbr0_ring_physaddr); 219 - if (!rx_ring->fbr0_ring_virtaddr) { 220 - dev_err(&adapter->pdev->dev, 221 - "Cannot alloc memory for Free Buffer Ring 0\n"); 222 - return -ENOMEM; 223 - } 224 - 225 - /* Save physical address 226 - * 227 - * NOTE: pci_alloc_consistent(), used above to alloc DMA regions, 228 - * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses 229 - * are ever returned, make sure the high part is retrieved here before 230 - * storing the adjusted address. 231 - */ 232 - rx_ring->fbr0_real_physaddr = rx_ring->fbr0_ring_physaddr; 233 - 234 - /* Align Free Buffer Ring 0 on a 4K boundary */ 235 - et131x_align_allocated_memory(adapter, 236 - &rx_ring->fbr0_real_physaddr, 237 - &rx_ring->fbr0_offset, 0x0FFF); 238 - 239 - rx_ring->fbr0_ring_virtaddr = 240 - (void *)((u8 *) rx_ring->fbr0_ring_virtaddr + 241 - rx_ring->fbr0_offset); 242 - #endif 243 - for (i = 0; i < (rx_ring->fbr1_num_entries / FBR_CHUNKS); i++) { 244 - u64 fbr1_offset; 245 - u64 fbr1_tmp_physaddr; 246 - u32 fbr1_align; 247 - 248 - /* This code allocates an area of memory big enough for N 249 - * free buffers + (buffer_size - 1) so that the buffers can 250 - * be aligned on 4k boundaries. If each buffer were aligned 251 - * to a buffer_size boundary, the effect would be to double 252 - * the size of FBR0. By allocating N buffers at once, we 253 - * reduce this overhead. 254 - */ 255 - if (rx_ring->fbr1_buffsize > 4096) 256 - fbr1_align = 4096; 257 - else 258 - fbr1_align = rx_ring->fbr1_buffsize; 259 - 260 - fbr_chunksize = 261 - (FBR_CHUNKS * rx_ring->fbr1_buffsize) + fbr1_align - 1; 262 - rx_ring->fbr1_mem_virtaddrs[i] = 263 - pci_alloc_consistent(adapter->pdev, fbr_chunksize, 264 - &rx_ring->fbr1_mem_physaddrs[i]); 265 - 266 - if (!rx_ring->fbr1_mem_virtaddrs[i]) { 267 - dev_err(&adapter->pdev->dev, 268 - "Could not alloc memory\n"); 269 - return -ENOMEM; 270 - } 271 - 272 - /* See NOTE in "Save Physical Address" comment above */ 273 - fbr1_tmp_physaddr = rx_ring->fbr1_mem_physaddrs[i]; 274 - 275 - et131x_align_allocated_memory(adapter, 276 - &fbr1_tmp_physaddr, 277 - &fbr1_offset, (fbr1_align - 1)); 278 - 279 - for (j = 0; j < FBR_CHUNKS; j++) { 280 - u32 index = (i * FBR_CHUNKS) + j; 281 - 282 - /* Save the Virtual address of this index for quick 283 - * access later 284 - */ 285 - rx_ring->fbr[1]->virt[index] = 286 - (u8 *) rx_ring->fbr1_mem_virtaddrs[i] + 287 - (j * rx_ring->fbr1_buffsize) + fbr1_offset; 288 - 289 - /* now store the physical address in the descriptor 290 - * so the device can access it 291 - */ 292 - rx_ring->fbr[1]->bus_high[index] = 293 - (u32) (fbr1_tmp_physaddr >> 32); 294 - rx_ring->fbr[1]->bus_low[index] = 295 - (u32) fbr1_tmp_physaddr; 296 - 297 - fbr1_tmp_physaddr += rx_ring->fbr1_buffsize; 298 - 299 - rx_ring->fbr[1]->buffer1[index] = 300 - rx_ring->fbr[1]->virt[index]; 301 - rx_ring->fbr[1]->buffer2[index] = 302 - rx_ring->fbr[1]->virt[index] - 4; 303 - } 304 - } 305 - 306 - #ifdef USE_FBR0 307 - /* Same for FBR0 (if in use) */ 308 - for (i = 0; i < (rx_ring->fbr0_num_entries / FBR_CHUNKS); i++) { 309 - u64 fbr0_offset; 310 - u64 fbr0_tmp_physaddr; 311 - 312 - fbr_chunksize = 313 - ((FBR_CHUNKS + 1) * rx_ring->fbr0_buffsize) - 1; 314 - rx_ring->fbr0_mem_virtaddrs[i] = 315 - pci_alloc_consistent(adapter->pdev, fbr_chunksize, 316 - &rx_ring->fbr0_mem_physaddrs[i]); 317 - 318 - if (!rx_ring->fbr0_mem_virtaddrs[i]) { 319 - dev_err(&adapter->pdev->dev, 320 - "Could not alloc memory\n"); 321 - return -ENOMEM; 322 - } 323 - 324 - /* See NOTE in "Save Physical Address" comment above */ 325 - fbr0_tmp_physaddr = rx_ring->fbr0_mem_physaddrs[i]; 326 - 327 - et131x_align_allocated_memory(adapter, 328 - &fbr0_tmp_physaddr, 329 - &fbr0_offset, 330 - rx_ring->fbr0_buffsize - 1); 331 - 332 - for (j = 0; j < FBR_CHUNKS; j++) { 333 - u32 index = (i * FBR_CHUNKS) + j; 334 - 335 - rx_ring->fbr[0]->virt[index] = 336 - (u8 *) rx_ring->fbr0_mem_virtaddrs[i] + 337 - (j * rx_ring->fbr0_buffsize) + fbr0_offset; 338 - 339 - rx_ring->fbr[0]->bus_high[index] = 340 - (u32) (fbr0_tmp_physaddr >> 32); 341 - rx_ring->fbr[0]->bus_low[index] = 342 - (u32) fbr0_tmp_physaddr; 343 - 344 - fbr0_tmp_physaddr += rx_ring->fbr0_buffsize; 345 - 346 - rx_ring->fbr[0]->buffer1[index] = 347 - rx_ring->fbr[0]->virt[index]; 348 - rx_ring->fbr[0]->buffer2[index] = 349 - rx_ring->fbr[0]->virt[index] - 4; 350 - } 351 - } 352 - #endif 353 - 354 - /* Allocate an area of memory for FIFO of Packet Status ring entries */ 355 - pktstat_ringsize = 356 - sizeof(struct pkt_stat_desc) * adapter->rx_ring.psr_num_entries; 357 - 358 - rx_ring->ps_ring_virtaddr = pci_alloc_consistent(adapter->pdev, 359 - pktstat_ringsize, 360 - &rx_ring->ps_ring_physaddr); 361 - 362 - if (!rx_ring->ps_ring_virtaddr) { 363 - dev_err(&adapter->pdev->dev, 364 - "Cannot alloc memory for Packet Status Ring\n"); 365 - return -ENOMEM; 366 - } 367 - printk(KERN_INFO "Packet Status Ring %lx\n", 368 - (unsigned long) rx_ring->ps_ring_physaddr); 369 - 370 - /* 371 - * NOTE : pci_alloc_consistent(), used above to alloc DMA regions, 372 - * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses 373 - * are ever returned, make sure the high part is retrieved here before 374 - * storing the adjusted address. 375 - */ 376 - 377 - /* Allocate an area of memory for writeback of status information */ 378 - rx_ring->rx_status_block = pci_alloc_consistent(adapter->pdev, 379 - sizeof(struct rx_status_block), 380 - &rx_ring->rx_status_bus); 381 - if (!rx_ring->rx_status_block) { 382 - dev_err(&adapter->pdev->dev, 383 - "Cannot alloc memory for Status Block\n"); 384 - return -ENOMEM; 385 - } 386 - rx_ring->num_rfd = NIC_DEFAULT_NUM_RFD; 387 - printk(KERN_INFO "PRS %lx\n", (unsigned long)rx_ring->rx_status_bus); 388 - 389 - /* Recv 390 - * pci_pool_create initializes a lookaside list. After successful 391 - * creation, nonpaged fixed-size blocks can be allocated from and 392 - * freed to the lookaside list. 393 - * RFDs will be allocated from this pool. 394 - */ 395 - rx_ring->recv_lookaside = kmem_cache_create(adapter->netdev->name, 396 - sizeof(struct rfd), 397 - 0, 398 - SLAB_CACHE_DMA | 399 - SLAB_HWCACHE_ALIGN, 400 - NULL); 401 - 402 - adapter->flags |= fMP_ADAPTER_RECV_LOOKASIDE; 403 - 404 - /* The RFDs are going to be put on lists later on, so initialize the 405 - * lists now. 406 - */ 407 - INIT_LIST_HEAD(&rx_ring->recv_list); 408 - return 0; 409 - } 410 - 411 - /** 412 - * et131x_rx_dma_memory_free - Free all memory allocated within this module. 413 - * @adapter: pointer to our private adapter structure 414 - */ 415 - void et131x_rx_dma_memory_free(struct et131x_adapter *adapter) 416 - { 417 - u32 index; 418 - u32 bufsize; 419 - u32 pktstat_ringsize; 420 - struct rfd *rfd; 421 - struct rx_ring *rx_ring; 422 - 423 - /* Setup some convenience pointers */ 424 - rx_ring = &adapter->rx_ring; 425 - 426 - /* Free RFDs and associated packet descriptors */ 427 - WARN_ON(rx_ring->num_ready_recv != rx_ring->num_rfd); 428 - 429 - while (!list_empty(&rx_ring->recv_list)) { 430 - rfd = (struct rfd *) list_entry(rx_ring->recv_list.next, 431 - struct rfd, list_node); 432 - 433 - list_del(&rfd->list_node); 434 - rfd->skb = NULL; 435 - kmem_cache_free(adapter->rx_ring.recv_lookaside, rfd); 436 - } 437 - 438 - /* Free Free Buffer Ring 1 */ 439 - if (rx_ring->fbr1_ring_virtaddr) { 440 - /* First the packet memory */ 441 - for (index = 0; index < 442 - (rx_ring->fbr1_num_entries / FBR_CHUNKS); index++) { 443 - if (rx_ring->fbr1_mem_virtaddrs[index]) { 444 - u32 fbr1_align; 445 - 446 - if (rx_ring->fbr1_buffsize > 4096) 447 - fbr1_align = 4096; 448 - else 449 - fbr1_align = rx_ring->fbr1_buffsize; 450 - 451 - bufsize = 452 - (rx_ring->fbr1_buffsize * FBR_CHUNKS) + 453 - fbr1_align - 1; 454 - 455 - pci_free_consistent(adapter->pdev, 456 - bufsize, 457 - rx_ring->fbr1_mem_virtaddrs[index], 458 - rx_ring->fbr1_mem_physaddrs[index]); 459 - 460 - rx_ring->fbr1_mem_virtaddrs[index] = NULL; 461 - } 462 - } 463 - 464 - /* Now the FIFO itself */ 465 - rx_ring->fbr1_ring_virtaddr = (void *)((u8 *) 466 - rx_ring->fbr1_ring_virtaddr - rx_ring->fbr1_offset); 467 - 468 - bufsize = (sizeof(struct fbr_desc) * rx_ring->fbr1_num_entries) 469 - + 0xfff; 470 - 471 - pci_free_consistent(adapter->pdev, bufsize, 472 - rx_ring->fbr1_ring_virtaddr, 473 - rx_ring->fbr1_ring_physaddr); 474 - 475 - rx_ring->fbr1_ring_virtaddr = NULL; 476 - } 477 - 478 - #ifdef USE_FBR0 479 - /* Now the same for Free Buffer Ring 0 */ 480 - if (rx_ring->fbr0_ring_virtaddr) { 481 - /* First the packet memory */ 482 - for (index = 0; index < 483 - (rx_ring->fbr0_num_entries / FBR_CHUNKS); index++) { 484 - if (rx_ring->fbr0_mem_virtaddrs[index]) { 485 - bufsize = 486 - (rx_ring->fbr0_buffsize * 487 - (FBR_CHUNKS + 1)) - 1; 488 - 489 - pci_free_consistent(adapter->pdev, 490 - bufsize, 491 - rx_ring->fbr0_mem_virtaddrs[index], 492 - rx_ring->fbr0_mem_physaddrs[index]); 493 - 494 - rx_ring->fbr0_mem_virtaddrs[index] = NULL; 495 - } 496 - } 497 - 498 - /* Now the FIFO itself */ 499 - rx_ring->fbr0_ring_virtaddr = (void *)((u8 *) 500 - rx_ring->fbr0_ring_virtaddr - rx_ring->fbr0_offset); 501 - 502 - bufsize = (sizeof(struct fbr_desc) * rx_ring->fbr0_num_entries) 503 - + 0xfff; 504 - 505 - pci_free_consistent(adapter->pdev, 506 - bufsize, 507 - rx_ring->fbr0_ring_virtaddr, 508 - rx_ring->fbr0_ring_physaddr); 509 - 510 - rx_ring->fbr0_ring_virtaddr = NULL; 511 - } 512 - #endif 513 - 514 - /* Free Packet Status Ring */ 515 - if (rx_ring->ps_ring_virtaddr) { 516 - pktstat_ringsize = 517 - sizeof(struct pkt_stat_desc) * 518 - adapter->rx_ring.psr_num_entries; 519 - 520 - pci_free_consistent(adapter->pdev, pktstat_ringsize, 521 - rx_ring->ps_ring_virtaddr, 522 - rx_ring->ps_ring_physaddr); 523 - 524 - rx_ring->ps_ring_virtaddr = NULL; 525 - } 526 - 527 - /* Free area of memory for the writeback of status information */ 528 - if (rx_ring->rx_status_block) { 529 - pci_free_consistent(adapter->pdev, 530 - sizeof(struct rx_status_block), 531 - rx_ring->rx_status_block, rx_ring->rx_status_bus); 532 - rx_ring->rx_status_block = NULL; 533 - } 534 - 535 - /* Free receive buffer pool */ 536 - 537 - /* Free receive packet pool */ 538 - 539 - /* Destroy the lookaside (RFD) pool */ 540 - if (adapter->flags & fMP_ADAPTER_RECV_LOOKASIDE) { 541 - kmem_cache_destroy(rx_ring->recv_lookaside); 542 - adapter->flags &= ~fMP_ADAPTER_RECV_LOOKASIDE; 543 - } 544 - 545 - /* Free the FBR Lookup Table */ 546 - #ifdef USE_FBR0 547 - kfree(rx_ring->fbr[0]); 548 - #endif 549 - 550 - kfree(rx_ring->fbr[1]); 551 - 552 - /* Reset Counters */ 553 - rx_ring->num_ready_recv = 0; 554 - } 555 - 556 - /** 557 - * et131x_init_recv - Initialize receive data structures. 558 - * @adapter: pointer to our private adapter structure 559 - * 560 - * Returns 0 on success and errno on failure (as defined in errno.h) 561 - */ 562 - int et131x_init_recv(struct et131x_adapter *adapter) 563 - { 564 - int status = -ENOMEM; 565 - struct rfd *rfd = NULL; 566 - u32 rfdct; 567 - u32 numrfd = 0; 568 - struct rx_ring *rx_ring; 569 - 570 - /* Setup some convenience pointers */ 571 - rx_ring = &adapter->rx_ring; 572 - 573 - /* Setup each RFD */ 574 - for (rfdct = 0; rfdct < rx_ring->num_rfd; rfdct++) { 575 - rfd = kmem_cache_alloc(rx_ring->recv_lookaside, 576 - GFP_ATOMIC | GFP_DMA); 577 - 578 - if (!rfd) { 579 - dev_err(&adapter->pdev->dev, 580 - "Couldn't alloc RFD out of kmem_cache\n"); 581 - status = -ENOMEM; 582 - continue; 583 - } 584 - 585 - rfd->skb = NULL; 586 - 587 - /* Add this RFD to the recv_list */ 588 - list_add_tail(&rfd->list_node, &rx_ring->recv_list); 589 - 590 - /* Increment both the available RFD's, and the total RFD's. */ 591 - rx_ring->num_ready_recv++; 592 - numrfd++; 593 - } 594 - 595 - if (numrfd > NIC_MIN_NUM_RFD) 596 - status = 0; 597 - 598 - rx_ring->num_rfd = numrfd; 599 - 600 - if (status != 0) { 601 - kmem_cache_free(rx_ring->recv_lookaside, rfd); 602 - dev_err(&adapter->pdev->dev, 603 - "Allocation problems in et131x_init_recv\n"); 604 - } 605 - return status; 606 - } 607 - 608 - /** 609 - * et131x_config_rx_dma_regs - Start of Rx_DMA init sequence 610 - * @adapter: pointer to our adapter structure 611 - */ 612 - void et131x_config_rx_dma_regs(struct et131x_adapter *adapter) 613 - { 614 - struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma; 615 - struct rx_ring *rx_local = &adapter->rx_ring; 616 - struct fbr_desc *fbr_entry; 617 - u32 entry; 618 - u32 psr_num_des; 619 - unsigned long flags; 620 - 621 - /* Halt RXDMA to perform the reconfigure. */ 622 - et131x_rx_dma_disable(adapter); 623 - 624 - /* Load the completion writeback physical address 625 - * 626 - * NOTE : pci_alloc_consistent(), used above to alloc DMA regions, 627 - * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses 628 - * are ever returned, make sure the high part is retrieved here 629 - * before storing the adjusted address. 630 - */ 631 - writel((u32) ((u64)rx_local->rx_status_bus >> 32), 632 - &rx_dma->dma_wb_base_hi); 633 - writel((u32) rx_local->rx_status_bus, &rx_dma->dma_wb_base_lo); 634 - 635 - memset(rx_local->rx_status_block, 0, sizeof(struct rx_status_block)); 636 - 637 - /* Set the address and parameters of the packet status ring into the 638 - * 1310's registers 639 - */ 640 - writel((u32) ((u64)rx_local->ps_ring_physaddr >> 32), 641 - &rx_dma->psr_base_hi); 642 - writel((u32) rx_local->ps_ring_physaddr, &rx_dma->psr_base_lo); 643 - writel(rx_local->psr_num_entries - 1, &rx_dma->psr_num_des); 644 - writel(0, &rx_dma->psr_full_offset); 645 - 646 - psr_num_des = readl(&rx_dma->psr_num_des) & 0xFFF; 647 - writel((psr_num_des * LO_MARK_PERCENT_FOR_PSR) / 100, 648 - &rx_dma->psr_min_des); 649 - 650 - spin_lock_irqsave(&adapter->rcv_lock, flags); 651 - 652 - /* These local variables track the PSR in the adapter structure */ 653 - rx_local->local_psr_full = 0; 654 - 655 - /* Now's the best time to initialize FBR1 contents */ 656 - fbr_entry = (struct fbr_desc *) rx_local->fbr1_ring_virtaddr; 657 - for (entry = 0; entry < rx_local->fbr1_num_entries; entry++) { 658 - fbr_entry->addr_hi = rx_local->fbr[1]->bus_high[entry]; 659 - fbr_entry->addr_lo = rx_local->fbr[1]->bus_low[entry]; 660 - fbr_entry->word2 = entry; 661 - fbr_entry++; 662 - } 663 - 664 - /* Set the address and parameters of Free buffer ring 1 (and 0 if 665 - * required) into the 1310's registers 666 - */ 667 - writel((u32) (rx_local->fbr1_real_physaddr >> 32), 668 - &rx_dma->fbr1_base_hi); 669 - writel((u32) rx_local->fbr1_real_physaddr, &rx_dma->fbr1_base_lo); 670 - writel(rx_local->fbr1_num_entries - 1, &rx_dma->fbr1_num_des); 671 - writel(ET_DMA10_WRAP, &rx_dma->fbr1_full_offset); 672 - 673 - /* This variable tracks the free buffer ring 1 full position, so it 674 - * has to match the above. 675 - */ 676 - rx_local->local_fbr1_full = ET_DMA10_WRAP; 677 - writel( 678 - ((rx_local->fbr1_num_entries * LO_MARK_PERCENT_FOR_RX) / 100) - 1, 679 - &rx_dma->fbr1_min_des); 680 - 681 - #ifdef USE_FBR0 682 - /* Now's the best time to initialize FBR0 contents */ 683 - fbr_entry = (struct fbr_desc *) rx_local->fbr0_ring_virtaddr; 684 - for (entry = 0; entry < rx_local->fbr0_num_entries; entry++) { 685 - fbr_entry->addr_hi = rx_local->fbr[0]->bus_high[entry]; 686 - fbr_entry->addr_lo = rx_local->fbr[0]->bus_low[entry]; 687 - fbr_entry->word2 = entry; 688 - fbr_entry++; 689 - } 690 - 691 - writel((u32) (rx_local->fbr0_real_physaddr >> 32), 692 - &rx_dma->fbr0_base_hi); 693 - writel((u32) rx_local->fbr0_real_physaddr, &rx_dma->fbr0_base_lo); 694 - writel(rx_local->fbr0_num_entries - 1, &rx_dma->fbr0_num_des); 695 - writel(ET_DMA10_WRAP, &rx_dma->fbr0_full_offset); 696 - 697 - /* This variable tracks the free buffer ring 0 full position, so it 698 - * has to match the above. 699 - */ 700 - rx_local->local_fbr0_full = ET_DMA10_WRAP; 701 - writel( 702 - ((rx_local->fbr0_num_entries * LO_MARK_PERCENT_FOR_RX) / 100) - 1, 703 - &rx_dma->fbr0_min_des); 704 - #endif 705 - 706 - /* Program the number of packets we will receive before generating an 707 - * interrupt. 708 - * For version B silicon, this value gets updated once autoneg is 709 - *complete. 710 - */ 711 - writel(PARM_RX_NUM_BUFS_DEF, &rx_dma->num_pkt_done); 712 - 713 - /* The "time_done" is not working correctly to coalesce interrupts 714 - * after a given time period, but rather is giving us an interrupt 715 - * regardless of whether we have received packets. 716 - * This value gets updated once autoneg is complete. 717 - */ 718 - writel(PARM_RX_TIME_INT_DEF, &rx_dma->max_pkt_time); 719 - 720 - spin_unlock_irqrestore(&adapter->rcv_lock, flags); 721 - } 722 - 723 - /** 724 - * et131x_set_rx_dma_timer - Set the heartbeat timer according to line rate. 725 - * @adapter: pointer to our adapter structure 726 - */ 727 - void et131x_set_rx_dma_timer(struct et131x_adapter *adapter) 728 - { 729 - struct phy_device *phydev = adapter->phydev; 730 - 731 - if (!phydev) 732 - return; 733 - 734 - /* For version B silicon, we do not use the RxDMA timer for 10 and 100 735 - * Mbits/s line rates. We do not enable and RxDMA interrupt coalescing. 736 - */ 737 - if ((phydev->speed == SPEED_100) || (phydev->speed == SPEED_10)) { 738 - writel(0, &adapter->regs->rxdma.max_pkt_time); 739 - writel(1, &adapter->regs->rxdma.num_pkt_done); 740 - } 741 - } 742 - 743 - /** 744 - * NICReturnRFD - Recycle a RFD and put it back onto the receive list 745 - * @adapter: pointer to our adapter 746 - * @rfd: pointer to the RFD 747 - */ 748 - static void nic_return_rfd(struct et131x_adapter *adapter, struct rfd *rfd) 749 - { 750 - struct rx_ring *rx_local = &adapter->rx_ring; 751 - struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma; 752 - u16 buff_index = rfd->bufferindex; 753 - u8 ring_index = rfd->ringindex; 754 - unsigned long flags; 755 - 756 - /* We don't use any of the OOB data besides status. Otherwise, we 757 - * need to clean up OOB data 758 - */ 759 - if ( 760 - #ifdef USE_FBR0 761 - (ring_index == 0 && buff_index < rx_local->fbr0_num_entries) || 762 - #endif 763 - (ring_index == 1 && buff_index < rx_local->fbr1_num_entries)) { 764 - spin_lock_irqsave(&adapter->fbr_lock, flags); 765 - 766 - if (ring_index == 1) { 767 - struct fbr_desc *next = 768 - (struct fbr_desc *) (rx_local->fbr1_ring_virtaddr) + 769 - INDEX10(rx_local->local_fbr1_full); 770 - 771 - /* Handle the Free Buffer Ring advancement here. Write 772 - * the PA / Buffer Index for the returned buffer into 773 - * the oldest (next to be freed)FBR entry 774 - */ 775 - next->addr_hi = rx_local->fbr[1]->bus_high[buff_index]; 776 - next->addr_lo = rx_local->fbr[1]->bus_low[buff_index]; 777 - next->word2 = buff_index; 778 - 779 - writel(bump_free_buff_ring(&rx_local->local_fbr1_full, 780 - rx_local->fbr1_num_entries - 1), 781 - &rx_dma->fbr1_full_offset); 782 - } 783 - #ifdef USE_FBR0 784 - else { 785 - struct fbr_desc *next = (struct fbr_desc *) 786 - rx_local->fbr0_ring_virtaddr + 787 - INDEX10(rx_local->local_fbr0_full); 788 - 789 - /* Handle the Free Buffer Ring advancement here. Write 790 - * the PA / Buffer Index for the returned buffer into 791 - * the oldest (next to be freed) FBR entry 792 - */ 793 - next->addr_hi = rx_local->fbr[0]->bus_high[buff_index]; 794 - next->addr_lo = rx_local->fbr[0]->bus_low[buff_index]; 795 - next->word2 = buff_index; 796 - 797 - writel(bump_free_buff_ring(&rx_local->local_fbr0_full, 798 - rx_local->fbr0_num_entries - 1), 799 - &rx_dma->fbr0_full_offset); 800 - } 801 - #endif 802 - spin_unlock_irqrestore(&adapter->fbr_lock, flags); 803 - } else { 804 - dev_err(&adapter->pdev->dev, 805 - "%s illegal Buffer Index returned\n", __func__); 806 - } 807 - 808 - /* The processing on this RFD is done, so put it back on the tail of 809 - * our list 810 - */ 811 - spin_lock_irqsave(&adapter->rcv_lock, flags); 812 - list_add_tail(&rfd->list_node, &rx_local->recv_list); 813 - rx_local->num_ready_recv++; 814 - spin_unlock_irqrestore(&adapter->rcv_lock, flags); 815 - 816 - WARN_ON(rx_local->num_ready_recv > rx_local->num_rfd); 817 - } 818 - 819 - /** 820 - * et131x_rx_dma_disable - Stop of Rx_DMA on the ET1310 821 - * @adapter: pointer to our adapter structure 822 - */ 823 - void et131x_rx_dma_disable(struct et131x_adapter *adapter) 824 - { 825 - u32 csr; 826 - /* Setup the receive dma configuration register */ 827 - writel(0x00002001, &adapter->regs->rxdma.csr); 828 - csr = readl(&adapter->regs->rxdma.csr); 829 - if ((csr & 0x00020000) == 0) { /* Check halt status (bit 17) */ 830 - udelay(5); 831 - csr = readl(&adapter->regs->rxdma.csr); 832 - if ((csr & 0x00020000) == 0) 833 - dev_err(&adapter->pdev->dev, 834 - "RX Dma failed to enter halt state. CSR 0x%08x\n", 835 - csr); 836 - } 837 - } 838 - 839 - /** 840 - * et131x_rx_dma_enable - re-start of Rx_DMA on the ET1310. 841 - * @adapter: pointer to our adapter structure 842 - */ 843 - void et131x_rx_dma_enable(struct et131x_adapter *adapter) 844 - { 845 - /* Setup the receive dma configuration register for normal operation */ 846 - u32 csr = 0x2000; /* FBR1 enable */ 847 - 848 - if (adapter->rx_ring.fbr1_buffsize == 4096) 849 - csr |= 0x0800; 850 - else if (adapter->rx_ring.fbr1_buffsize == 8192) 851 - csr |= 0x1000; 852 - else if (adapter->rx_ring.fbr1_buffsize == 16384) 853 - csr |= 0x1800; 854 - #ifdef USE_FBR0 855 - csr |= 0x0400; /* FBR0 enable */ 856 - if (adapter->rx_ring.fbr0_buffsize == 256) 857 - csr |= 0x0100; 858 - else if (adapter->rx_ring.fbr0_buffsize == 512) 859 - csr |= 0x0200; 860 - else if (adapter->rx_ring.fbr0_buffsize == 1024) 861 - csr |= 0x0300; 862 - #endif 863 - writel(csr, &adapter->regs->rxdma.csr); 864 - 865 - csr = readl(&adapter->regs->rxdma.csr); 866 - if ((csr & 0x00020000) != 0) { 867 - udelay(5); 868 - csr = readl(&adapter->regs->rxdma.csr); 869 - if ((csr & 0x00020000) != 0) { 870 - dev_err(&adapter->pdev->dev, 871 - "RX Dma failed to exit halt state. CSR 0x%08x\n", 872 - csr); 873 - } 874 - } 875 - } 876 - 877 - /** 878 - * nic_rx_pkts - Checks the hardware for available packets 879 - * @adapter: pointer to our adapter 880 - * 881 - * Returns rfd, a pointer to our MPRFD. 882 - * 883 - * Checks the hardware for available packets, using completion ring 884 - * If packets are available, it gets an RFD from the recv_list, attaches 885 - * the packet to it, puts the RFD in the RecvPendList, and also returns 886 - * the pointer to the RFD. 887 - */ 888 - static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter) 889 - { 890 - struct rx_ring *rx_local = &adapter->rx_ring; 891 - struct rx_status_block *status; 892 - struct pkt_stat_desc *psr; 893 - struct rfd *rfd; 894 - u32 i; 895 - u8 *buf; 896 - unsigned long flags; 897 - struct list_head *element; 898 - u8 ring_index; 899 - u16 buff_index; 900 - u32 len; 901 - u32 word0; 902 - u32 word1; 903 - 904 - /* RX Status block is written by the DMA engine prior to every 905 - * interrupt. It contains the next to be used entry in the Packet 906 - * Status Ring, and also the two Free Buffer rings. 907 - */ 908 - status = rx_local->rx_status_block; 909 - word1 = status->word1 >> 16; /* Get the useful bits */ 910 - 911 - /* Check the PSR and wrap bits do not match */ 912 - if ((word1 & 0x1FFF) == (rx_local->local_psr_full & 0x1FFF)) 913 - /* Looks like this ring is not updated yet */ 914 - return NULL; 915 - 916 - /* The packet status ring indicates that data is available. */ 917 - psr = (struct pkt_stat_desc *) (rx_local->ps_ring_virtaddr) + 918 - (rx_local->local_psr_full & 0xFFF); 919 - 920 - /* Grab any information that is required once the PSR is 921 - * advanced, since we can no longer rely on the memory being 922 - * accurate 923 - */ 924 - len = psr->word1 & 0xFFFF; 925 - ring_index = (psr->word1 >> 26) & 0x03; 926 - buff_index = (psr->word1 >> 16) & 0x3FF; 927 - word0 = psr->word0; 928 - 929 - /* Indicate that we have used this PSR entry. */ 930 - /* FIXME wrap 12 */ 931 - add_12bit(&rx_local->local_psr_full, 1); 932 - if ( 933 - (rx_local->local_psr_full & 0xFFF) > rx_local->psr_num_entries - 1) { 934 - /* Clear psr full and toggle the wrap bit */ 935 - rx_local->local_psr_full &= ~0xFFF; 936 - rx_local->local_psr_full ^= 0x1000; 937 - } 938 - 939 - writel(rx_local->local_psr_full, 940 - &adapter->regs->rxdma.psr_full_offset); 941 - 942 - #ifndef USE_FBR0 943 - if (ring_index != 1) 944 - return NULL; 945 - #endif 946 - 947 - #ifdef USE_FBR0 948 - if (ring_index > 1 || 949 - (ring_index == 0 && 950 - buff_index > rx_local->fbr0_num_entries - 1) || 951 - (ring_index == 1 && 952 - buff_index > rx_local->fbr1_num_entries - 1)) 953 - #else 954 - if (ring_index != 1 || buff_index > rx_local->fbr1_num_entries - 1) 955 - #endif 956 - { 957 - /* Illegal buffer or ring index cannot be used by S/W*/ 958 - dev_err(&adapter->pdev->dev, 959 - "NICRxPkts PSR Entry %d indicates " 960 - "length of %d and/or bad bi(%d)\n", 961 - rx_local->local_psr_full & 0xFFF, 962 - len, buff_index); 963 - return NULL; 964 - } 965 - 966 - /* Get and fill the RFD. */ 967 - spin_lock_irqsave(&adapter->rcv_lock, flags); 968 - 969 - rfd = NULL; 970 - element = rx_local->recv_list.next; 971 - rfd = (struct rfd *) list_entry(element, struct rfd, list_node); 972 - 973 - if (rfd == NULL) { 974 - spin_unlock_irqrestore(&adapter->rcv_lock, flags); 975 - return NULL; 976 - } 977 - 978 - list_del(&rfd->list_node); 979 - rx_local->num_ready_recv--; 980 - 981 - spin_unlock_irqrestore(&adapter->rcv_lock, flags); 982 - 983 - rfd->bufferindex = buff_index; 984 - rfd->ringindex = ring_index; 985 - 986 - /* In V1 silicon, there is a bug which screws up filtering of 987 - * runt packets. Therefore runt packet filtering is disabled 988 - * in the MAC and the packets are dropped here. They are 989 - * also counted here. 990 - */ 991 - if (len < (NIC_MIN_PACKET_SIZE + 4)) { 992 - adapter->stats.rx_other_errs++; 993 - len = 0; 994 - } 995 - 996 - if (len) { 997 - /* Determine if this is a multicast packet coming in */ 998 - if ((word0 & ALCATEL_MULTICAST_PKT) && 999 - !(word0 & ALCATEL_BROADCAST_PKT)) { 1000 - /* Promiscuous mode and Multicast mode are 1001 - * not mutually exclusive as was first 1002 - * thought. I guess Promiscuous is just 1003 - * considered a super-set of the other 1004 - * filters. Generally filter is 0x2b when in 1005 - * promiscuous mode. 1006 - */ 1007 - if ((adapter->packet_filter & 1008 - ET131X_PACKET_TYPE_MULTICAST) 1009 - && !(adapter->packet_filter & 1010 - ET131X_PACKET_TYPE_PROMISCUOUS) 1011 - && !(adapter->packet_filter & 1012 - ET131X_PACKET_TYPE_ALL_MULTICAST)) { 1013 - buf = rx_local->fbr[ring_index]-> 1014 - virt[buff_index]; 1015 - 1016 - /* Loop through our list to see if the 1017 - * destination address of this packet 1018 - * matches one in our list. 1019 - */ 1020 - for (i = 0; i < adapter->multicast_addr_count; 1021 - i++) { 1022 - if (buf[0] == 1023 - adapter->multicast_list[i][0] 1024 - && buf[1] == 1025 - adapter->multicast_list[i][1] 1026 - && buf[2] == 1027 - adapter->multicast_list[i][2] 1028 - && buf[3] == 1029 - adapter->multicast_list[i][3] 1030 - && buf[4] == 1031 - adapter->multicast_list[i][4] 1032 - && buf[5] == 1033 - adapter->multicast_list[i][5]) { 1034 - break; 1035 - } 1036 - } 1037 - 1038 - /* If our index is equal to the number 1039 - * of Multicast address we have, then 1040 - * this means we did not find this 1041 - * packet's matching address in our 1042 - * list. Set the len to zero, 1043 - * so we free our RFD when we return 1044 - * from this function. 1045 - */ 1046 - if (i == adapter->multicast_addr_count) 1047 - len = 0; 1048 - } 1049 - 1050 - if (len > 0) 1051 - adapter->stats.multicast_pkts_rcvd++; 1052 - } else if (word0 & ALCATEL_BROADCAST_PKT) 1053 - adapter->stats.broadcast_pkts_rcvd++; 1054 - else 1055 - /* Not sure what this counter measures in 1056 - * promiscuous mode. Perhaps we should check 1057 - * the MAC address to see if it is directed 1058 - * to us in promiscuous mode. 1059 - */ 1060 - adapter->stats.unicast_pkts_rcvd++; 1061 - } 1062 - 1063 - if (len > 0) { 1064 - struct sk_buff *skb = NULL; 1065 - 1066 - /*rfd->len = len - 4; */ 1067 - rfd->len = len; 1068 - 1069 - skb = dev_alloc_skb(rfd->len + 2); 1070 - if (!skb) { 1071 - dev_err(&adapter->pdev->dev, 1072 - "Couldn't alloc an SKB for Rx\n"); 1073 - return NULL; 1074 - } 1075 - 1076 - adapter->net_stats.rx_bytes += rfd->len; 1077 - 1078 - memcpy(skb_put(skb, rfd->len), 1079 - rx_local->fbr[ring_index]->virt[buff_index], 1080 - rfd->len); 1081 - 1082 - skb->dev = adapter->netdev; 1083 - skb->protocol = eth_type_trans(skb, adapter->netdev); 1084 - skb->ip_summed = CHECKSUM_NONE; 1085 - 1086 - netif_rx(skb); 1087 - } else { 1088 - rfd->len = 0; 1089 - } 1090 - 1091 - nic_return_rfd(adapter, rfd); 1092 - return rfd; 1093 - } 1094 - 1095 - /** 1096 - * et131x_reset_recv - Reset the receive list 1097 - * @adapter: pointer to our adapter 1098 - * 1099 - * Assumption, Rcv spinlock has been acquired. 1100 - */ 1101 - void et131x_reset_recv(struct et131x_adapter *adapter) 1102 - { 1103 - WARN_ON(list_empty(&adapter->rx_ring.recv_list)); 1104 - } 1105 - 1106 - /** 1107 - * et131x_handle_recv_interrupt - Interrupt handler for receive processing 1108 - * @adapter: pointer to our adapter 1109 - * 1110 - * Assumption, Rcv spinlock has been acquired. 1111 - */ 1112 - void et131x_handle_recv_interrupt(struct et131x_adapter *adapter) 1113 - { 1114 - struct rfd *rfd = NULL; 1115 - u32 count = 0; 1116 - bool done = true; 1117 - 1118 - /* Process up to available RFD's */ 1119 - while (count < NUM_PACKETS_HANDLED) { 1120 - if (list_empty(&adapter->rx_ring.recv_list)) { 1121 - WARN_ON(adapter->rx_ring.num_ready_recv != 0); 1122 - done = false; 1123 - break; 1124 - } 1125 - 1126 - rfd = nic_rx_pkts(adapter); 1127 - 1128 - if (rfd == NULL) 1129 - break; 1130 - 1131 - /* Do not receive any packets until a filter has been set. 1132 - * Do not receive any packets until we have link. 1133 - * If length is zero, return the RFD in order to advance the 1134 - * Free buffer ring. 1135 - */ 1136 - if (!adapter->packet_filter || 1137 - !netif_carrier_ok(adapter->netdev) || 1138 - rfd->len == 0) 1139 - continue; 1140 - 1141 - /* Increment the number of packets we received */ 1142 - adapter->net_stats.rx_packets++; 1143 - 1144 - /* Set the status on the packet, either resources or success */ 1145 - if (adapter->rx_ring.num_ready_recv < RFD_LOW_WATER_MARK) { 1146 - dev_warn(&adapter->pdev->dev, 1147 - "RFD's are running out\n"); 1148 - } 1149 - count++; 1150 - } 1151 - 1152 - if (count == NUM_PACKETS_HANDLED || !done) { 1153 - adapter->rx_ring.unfinished_receives = true; 1154 - writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO, 1155 - &adapter->regs->global.watchdog_timer); 1156 - } else 1157 - /* Watchdog timer will disable itself if appropriate. */ 1158 - adapter->rx_ring.unfinished_receives = false; 1159 - } 1160 -
-793
drivers/staging/et131x/et1310_tx.c
··· 1 - /* 2 - * Agere Systems Inc. 3 - * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs 4 - * 5 - * Copyright © 2005 Agere Systems Inc. 6 - * All rights reserved. 7 - * http://www.agere.com 8 - * 9 - * Copyright (c) 2011 Mark Einon <mark.einon@gmail.com> 10 - * 11 - *------------------------------------------------------------------------------ 12 - * 13 - * et1310_tx.c - Routines used to perform data transmission. 14 - * 15 - *------------------------------------------------------------------------------ 16 - * 17 - * SOFTWARE LICENSE 18 - * 19 - * This software is provided subject to the following terms and conditions, 20 - * which you should read carefully before using the software. Using this 21 - * software indicates your acceptance of these terms and conditions. If you do 22 - * not agree with these terms and conditions, do not use the software. 23 - * 24 - * Copyright © 2005 Agere Systems Inc. 25 - * All rights reserved. 26 - * 27 - * Redistribution and use in source or binary forms, with or without 28 - * modifications, are permitted provided that the following conditions are met: 29 - * 30 - * . Redistributions of source code must retain the above copyright notice, this 31 - * list of conditions and the following Disclaimer as comments in the code as 32 - * well as in the documentation and/or other materials provided with the 33 - * distribution. 34 - * 35 - * . Redistributions in binary form must reproduce the above copyright notice, 36 - * this list of conditions and the following Disclaimer in the documentation 37 - * and/or other materials provided with the distribution. 38 - * 39 - * . Neither the name of Agere Systems Inc. nor the names of the contributors 40 - * may be used to endorse or promote products derived from this software 41 - * without specific prior written permission. 42 - * 43 - * Disclaimer 44 - * 45 - * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, 46 - * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF 47 - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY 48 - * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN 49 - * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY 50 - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 51 - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 52 - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 53 - * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT 54 - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 55 - * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 56 - * DAMAGE. 57 - * 58 - */ 59 - 60 - #include "et131x_defs.h" 61 - 62 - #include <linux/pci.h> 63 - #include <linux/init.h> 64 - #include <linux/module.h> 65 - #include <linux/types.h> 66 - #include <linux/kernel.h> 67 - 68 - #include <linux/sched.h> 69 - #include <linux/ptrace.h> 70 - #include <linux/slab.h> 71 - #include <linux/ctype.h> 72 - #include <linux/string.h> 73 - #include <linux/timer.h> 74 - #include <linux/interrupt.h> 75 - #include <linux/in.h> 76 - #include <linux/delay.h> 77 - #include <linux/io.h> 78 - #include <linux/bitops.h> 79 - #include <asm/system.h> 80 - 81 - #include <linux/netdevice.h> 82 - #include <linux/etherdevice.h> 83 - #include <linux/skbuff.h> 84 - #include <linux/if_arp.h> 85 - #include <linux/ioport.h> 86 - #include <linux/phy.h> 87 - 88 - #include "et1310_phy.h" 89 - #include "et131x_adapter.h" 90 - #include "et1310_tx.h" 91 - #include "et131x.h" 92 - 93 - /** 94 - * et131x_tx_dma_memory_alloc 95 - * @adapter: pointer to our private adapter structure 96 - * 97 - * Returns 0 on success and errno on failure (as defined in errno.h). 98 - * 99 - * Allocates memory that will be visible both to the device and to the CPU. 100 - * The OS will pass us packets, pointers to which we will insert in the Tx 101 - * Descriptor queue. The device will read this queue to find the packets in 102 - * memory. The device will update the "status" in memory each time it xmits a 103 - * packet. 104 - */ 105 - int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter) 106 - { 107 - int desc_size = 0; 108 - struct tx_ring *tx_ring = &adapter->tx_ring; 109 - 110 - /* Allocate memory for the TCB's (Transmit Control Block) */ 111 - adapter->tx_ring.tcb_ring = 112 - kcalloc(NUM_TCB, sizeof(struct tcb), GFP_ATOMIC | GFP_DMA); 113 - if (!adapter->tx_ring.tcb_ring) { 114 - dev_err(&adapter->pdev->dev, "Cannot alloc memory for TCBs\n"); 115 - return -ENOMEM; 116 - } 117 - 118 - /* Allocate enough memory for the Tx descriptor ring, and allocate 119 - * some extra so that the ring can be aligned on a 4k boundary. 120 - */ 121 - desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX) + 4096 - 1; 122 - tx_ring->tx_desc_ring = 123 - (struct tx_desc *) pci_alloc_consistent(adapter->pdev, desc_size, 124 - &tx_ring->tx_desc_ring_pa); 125 - if (!adapter->tx_ring.tx_desc_ring) { 126 - dev_err(&adapter->pdev->dev, 127 - "Cannot alloc memory for Tx Ring\n"); 128 - return -ENOMEM; 129 - } 130 - 131 - /* Save physical address 132 - * 133 - * NOTE: pci_alloc_consistent(), used above to alloc DMA regions, 134 - * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses 135 - * are ever returned, make sure the high part is retrieved here before 136 - * storing the adjusted address. 137 - */ 138 - /* Allocate memory for the Tx status block */ 139 - tx_ring->tx_status = pci_alloc_consistent(adapter->pdev, 140 - sizeof(u32), 141 - &tx_ring->tx_status_pa); 142 - if (!adapter->tx_ring.tx_status_pa) { 143 - dev_err(&adapter->pdev->dev, 144 - "Cannot alloc memory for Tx status block\n"); 145 - return -ENOMEM; 146 - } 147 - return 0; 148 - } 149 - 150 - /** 151 - * et131x_tx_dma_memory_free - Free all memory allocated within this module 152 - * @adapter: pointer to our private adapter structure 153 - * 154 - * Returns 0 on success and errno on failure (as defined in errno.h). 155 - */ 156 - void et131x_tx_dma_memory_free(struct et131x_adapter *adapter) 157 - { 158 - int desc_size = 0; 159 - 160 - if (adapter->tx_ring.tx_desc_ring) { 161 - /* Free memory relating to Tx rings here */ 162 - desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX) 163 - + 4096 - 1; 164 - pci_free_consistent(adapter->pdev, 165 - desc_size, 166 - adapter->tx_ring.tx_desc_ring, 167 - adapter->tx_ring.tx_desc_ring_pa); 168 - adapter->tx_ring.tx_desc_ring = NULL; 169 - } 170 - 171 - /* Free memory for the Tx status block */ 172 - if (adapter->tx_ring.tx_status) { 173 - pci_free_consistent(adapter->pdev, 174 - sizeof(u32), 175 - adapter->tx_ring.tx_status, 176 - adapter->tx_ring.tx_status_pa); 177 - 178 - adapter->tx_ring.tx_status = NULL; 179 - } 180 - /* Free the memory for the tcb structures */ 181 - kfree(adapter->tx_ring.tcb_ring); 182 - } 183 - 184 - /** 185 - * et131x_config_tx_dma_regs - Set up the tx dma section of the JAGCore. 186 - * @adapter: pointer to our private adapter structure 187 - * 188 - * Configure the transmit engine with the ring buffers we have created 189 - * and prepare it for use. 190 - */ 191 - void et131x_config_tx_dma_regs(struct et131x_adapter *adapter) 192 - { 193 - struct txdma_regs __iomem *txdma = &adapter->regs->txdma; 194 - 195 - /* Load the hardware with the start of the transmit descriptor ring. */ 196 - writel((u32) ((u64)adapter->tx_ring.tx_desc_ring_pa >> 32), 197 - &txdma->pr_base_hi); 198 - writel((u32) adapter->tx_ring.tx_desc_ring_pa, 199 - &txdma->pr_base_lo); 200 - 201 - /* Initialise the transmit DMA engine */ 202 - writel(NUM_DESC_PER_RING_TX - 1, &txdma->pr_num_des); 203 - 204 - /* Load the completion writeback physical address */ 205 - writel((u32)((u64)adapter->tx_ring.tx_status_pa >> 32), 206 - &txdma->dma_wb_base_hi); 207 - writel((u32)adapter->tx_ring.tx_status_pa, &txdma->dma_wb_base_lo); 208 - 209 - *adapter->tx_ring.tx_status = 0; 210 - 211 - writel(0, &txdma->service_request); 212 - adapter->tx_ring.send_idx = 0; 213 - } 214 - 215 - /** 216 - * et131x_tx_dma_disable - Stop of Tx_DMA on the ET1310 217 - * @adapter: pointer to our adapter structure 218 - */ 219 - void et131x_tx_dma_disable(struct et131x_adapter *adapter) 220 - { 221 - /* Setup the tramsmit dma configuration register */ 222 - writel(ET_TXDMA_CSR_HALT|ET_TXDMA_SNGL_EPKT, 223 - &adapter->regs->txdma.csr); 224 - } 225 - 226 - /** 227 - * et131x_tx_dma_enable - re-start of Tx_DMA on the ET1310. 228 - * @adapter: pointer to our adapter structure 229 - * 230 - * Mainly used after a return to the D0 (full-power) state from a lower state. 231 - */ 232 - void et131x_tx_dma_enable(struct et131x_adapter *adapter) 233 - { 234 - /* Setup the transmit dma configuration register for normal 235 - * operation 236 - */ 237 - writel(ET_TXDMA_SNGL_EPKT|(PARM_DMA_CACHE_DEF << ET_TXDMA_CACHE_SHIFT), 238 - &adapter->regs->txdma.csr); 239 - } 240 - 241 - /** 242 - * et131x_init_send - Initialize send data structures 243 - * @adapter: pointer to our private adapter structure 244 - */ 245 - void et131x_init_send(struct et131x_adapter *adapter) 246 - { 247 - struct tcb *tcb; 248 - u32 ct; 249 - struct tx_ring *tx_ring; 250 - 251 - /* Setup some convenience pointers */ 252 - tx_ring = &adapter->tx_ring; 253 - tcb = adapter->tx_ring.tcb_ring; 254 - 255 - tx_ring->tcb_qhead = tcb; 256 - 257 - memset(tcb, 0, sizeof(struct tcb) * NUM_TCB); 258 - 259 - /* Go through and set up each TCB */ 260 - for (ct = 0; ct++ < NUM_TCB; tcb++) 261 - /* Set the link pointer in HW TCB to the next TCB in the 262 - * chain 263 - */ 264 - tcb->next = tcb + 1; 265 - 266 - /* Set the tail pointer */ 267 - tcb--; 268 - tx_ring->tcb_qtail = tcb; 269 - tcb->next = NULL; 270 - /* Curr send queue should now be empty */ 271 - tx_ring->send_head = NULL; 272 - tx_ring->send_tail = NULL; 273 - } 274 - 275 - /** 276 - * nic_send_packet - NIC specific send handler for version B silicon. 277 - * @adapter: pointer to our adapter 278 - * @tcb: pointer to struct tcb 279 - * 280 - * Returns 0 or errno. 281 - */ 282 - static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb) 283 - { 284 - u32 i; 285 - struct tx_desc desc[24]; /* 24 x 16 byte */ 286 - u32 frag = 0; 287 - u32 thiscopy, remainder; 288 - struct sk_buff *skb = tcb->skb; 289 - u32 nr_frags = skb_shinfo(skb)->nr_frags + 1; 290 - struct skb_frag_struct *frags = &skb_shinfo(skb)->frags[0]; 291 - unsigned long flags; 292 - struct phy_device *phydev = adapter->phydev; 293 - 294 - /* Part of the optimizations of this send routine restrict us to 295 - * sending 24 fragments at a pass. In practice we should never see 296 - * more than 5 fragments. 297 - * 298 - * NOTE: The older version of this function (below) can handle any 299 - * number of fragments. If needed, we can call this function, 300 - * although it is less efficient. 301 - */ 302 - if (nr_frags > 23) 303 - return -EIO; 304 - 305 - memset(desc, 0, sizeof(struct tx_desc) * (nr_frags + 1)); 306 - 307 - for (i = 0; i < nr_frags; i++) { 308 - /* If there is something in this element, lets get a 309 - * descriptor from the ring and get the necessary data 310 - */ 311 - if (i == 0) { 312 - /* If the fragments are smaller than a standard MTU, 313 - * then map them to a single descriptor in the Tx 314 - * Desc ring. However, if they're larger, as is 315 - * possible with support for jumbo packets, then 316 - * split them each across 2 descriptors. 317 - * 318 - * This will work until we determine why the hardware 319 - * doesn't seem to like large fragments. 320 - */ 321 - if ((skb->len - skb->data_len) <= 1514) { 322 - desc[frag].addr_hi = 0; 323 - /* Low 16bits are length, high is vlan and 324 - unused currently so zero */ 325 - desc[frag].len_vlan = 326 - skb->len - skb->data_len; 327 - 328 - /* NOTE: Here, the dma_addr_t returned from 329 - * pci_map_single() is implicitly cast as a 330 - * u32. Although dma_addr_t can be 331 - * 64-bit, the address returned by 332 - * pci_map_single() is always 32-bit 333 - * addressable (as defined by the pci/dma 334 - * subsystem) 335 - */ 336 - desc[frag++].addr_lo = 337 - pci_map_single(adapter->pdev, 338 - skb->data, 339 - skb->len - 340 - skb->data_len, 341 - PCI_DMA_TODEVICE); 342 - } else { 343 - desc[frag].addr_hi = 0; 344 - desc[frag].len_vlan = 345 - (skb->len - skb->data_len) / 2; 346 - 347 - /* NOTE: Here, the dma_addr_t returned from 348 - * pci_map_single() is implicitly cast as a 349 - * u32. Although dma_addr_t can be 350 - * 64-bit, the address returned by 351 - * pci_map_single() is always 32-bit 352 - * addressable (as defined by the pci/dma 353 - * subsystem) 354 - */ 355 - desc[frag++].addr_lo = 356 - pci_map_single(adapter->pdev, 357 - skb->data, 358 - ((skb->len - 359 - skb->data_len) / 2), 360 - PCI_DMA_TODEVICE); 361 - desc[frag].addr_hi = 0; 362 - 363 - desc[frag].len_vlan = 364 - (skb->len - skb->data_len) / 2; 365 - 366 - /* NOTE: Here, the dma_addr_t returned from 367 - * pci_map_single() is implicitly cast as a 368 - * u32. Although dma_addr_t can be 369 - * 64-bit, the address returned by 370 - * pci_map_single() is always 32-bit 371 - * addressable (as defined by the pci/dma 372 - * subsystem) 373 - */ 374 - desc[frag++].addr_lo = 375 - pci_map_single(adapter->pdev, 376 - skb->data + 377 - ((skb->len - 378 - skb->data_len) / 2), 379 - ((skb->len - 380 - skb->data_len) / 2), 381 - PCI_DMA_TODEVICE); 382 - } 383 - } else { 384 - desc[frag].addr_hi = 0; 385 - desc[frag].len_vlan = 386 - frags[i - 1].size; 387 - 388 - /* NOTE: Here, the dma_addr_t returned from 389 - * pci_map_page() is implicitly cast as a u32. 390 - * Although dma_addr_t can be 64-bit, the address 391 - * returned by pci_map_page() is always 32-bit 392 - * addressable (as defined by the pci/dma subsystem) 393 - */ 394 - desc[frag++].addr_lo = 395 - pci_map_page(adapter->pdev, 396 - frags[i - 1].page, 397 - frags[i - 1].page_offset, 398 - frags[i - 1].size, 399 - PCI_DMA_TODEVICE); 400 - } 401 - } 402 - 403 - if (frag == 0) 404 - return -EIO; 405 - 406 - if (phydev && phydev->speed == SPEED_1000) { 407 - if (++adapter->tx_ring.since_irq == PARM_TX_NUM_BUFS_DEF) { 408 - /* Last element & Interrupt flag */ 409 - desc[frag - 1].flags = 0x5; 410 - adapter->tx_ring.since_irq = 0; 411 - } else { /* Last element */ 412 - desc[frag - 1].flags = 0x1; 413 - } 414 - } else 415 - desc[frag - 1].flags = 0x5; 416 - 417 - desc[0].flags |= 2; /* First element flag */ 418 - 419 - tcb->index_start = adapter->tx_ring.send_idx; 420 - tcb->stale = 0; 421 - 422 - spin_lock_irqsave(&adapter->send_hw_lock, flags); 423 - 424 - thiscopy = NUM_DESC_PER_RING_TX - 425 - INDEX10(adapter->tx_ring.send_idx); 426 - 427 - if (thiscopy >= frag) { 428 - remainder = 0; 429 - thiscopy = frag; 430 - } else { 431 - remainder = frag - thiscopy; 432 - } 433 - 434 - memcpy(adapter->tx_ring.tx_desc_ring + 435 - INDEX10(adapter->tx_ring.send_idx), desc, 436 - sizeof(struct tx_desc) * thiscopy); 437 - 438 - add_10bit(&adapter->tx_ring.send_idx, thiscopy); 439 - 440 - if (INDEX10(adapter->tx_ring.send_idx) == 0 || 441 - INDEX10(adapter->tx_ring.send_idx) == NUM_DESC_PER_RING_TX) { 442 - adapter->tx_ring.send_idx &= ~ET_DMA10_MASK; 443 - adapter->tx_ring.send_idx ^= ET_DMA10_WRAP; 444 - } 445 - 446 - if (remainder) { 447 - memcpy(adapter->tx_ring.tx_desc_ring, 448 - desc + thiscopy, 449 - sizeof(struct tx_desc) * remainder); 450 - 451 - add_10bit(&adapter->tx_ring.send_idx, remainder); 452 - } 453 - 454 - if (INDEX10(adapter->tx_ring.send_idx) == 0) { 455 - if (adapter->tx_ring.send_idx) 456 - tcb->index = NUM_DESC_PER_RING_TX - 1; 457 - else 458 - tcb->index = ET_DMA10_WRAP|(NUM_DESC_PER_RING_TX - 1); 459 - } else 460 - tcb->index = adapter->tx_ring.send_idx - 1; 461 - 462 - spin_lock(&adapter->tcb_send_qlock); 463 - 464 - if (adapter->tx_ring.send_tail) 465 - adapter->tx_ring.send_tail->next = tcb; 466 - else 467 - adapter->tx_ring.send_head = tcb; 468 - 469 - adapter->tx_ring.send_tail = tcb; 470 - 471 - WARN_ON(tcb->next != NULL); 472 - 473 - adapter->tx_ring.used++; 474 - 475 - spin_unlock(&adapter->tcb_send_qlock); 476 - 477 - /* Write the new write pointer back to the device. */ 478 - writel(adapter->tx_ring.send_idx, 479 - &adapter->regs->txdma.service_request); 480 - 481 - /* For Gig only, we use Tx Interrupt coalescing. Enable the software 482 - * timer to wake us up if this packet isn't followed by N more. 483 - */ 484 - if (phydev && phydev->speed == SPEED_1000) { 485 - writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO, 486 - &adapter->regs->global.watchdog_timer); 487 - } 488 - spin_unlock_irqrestore(&adapter->send_hw_lock, flags); 489 - 490 - return 0; 491 - } 492 - 493 - /** 494 - * send_packet - Do the work to send a packet 495 - * @skb: the packet(s) to send 496 - * @adapter: a pointer to the device's private adapter structure 497 - * 498 - * Return 0 in almost all cases; non-zero value in extreme hard failure only. 499 - * 500 - * Assumption: Send spinlock has been acquired 501 - */ 502 - static int send_packet(struct sk_buff *skb, struct et131x_adapter *adapter) 503 - { 504 - int status; 505 - struct tcb *tcb = NULL; 506 - u16 *shbufva; 507 - unsigned long flags; 508 - 509 - /* All packets must have at least a MAC address and a protocol type */ 510 - if (skb->len < ETH_HLEN) 511 - return -EIO; 512 - 513 - /* Get a TCB for this packet */ 514 - spin_lock_irqsave(&adapter->tcb_ready_qlock, flags); 515 - 516 - tcb = adapter->tx_ring.tcb_qhead; 517 - 518 - if (tcb == NULL) { 519 - spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags); 520 - return -ENOMEM; 521 - } 522 - 523 - adapter->tx_ring.tcb_qhead = tcb->next; 524 - 525 - if (adapter->tx_ring.tcb_qhead == NULL) 526 - adapter->tx_ring.tcb_qtail = NULL; 527 - 528 - spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags); 529 - 530 - tcb->skb = skb; 531 - 532 - if (skb->data != NULL && skb->len - skb->data_len >= 6) { 533 - shbufva = (u16 *) skb->data; 534 - 535 - if ((shbufva[0] == 0xffff) && 536 - (shbufva[1] == 0xffff) && (shbufva[2] == 0xffff)) { 537 - tcb->flags |= fMP_DEST_BROAD; 538 - } else if ((shbufva[0] & 0x3) == 0x0001) { 539 - tcb->flags |= fMP_DEST_MULTI; 540 - } 541 - } 542 - 543 - tcb->next = NULL; 544 - 545 - /* Call the NIC specific send handler. */ 546 - status = nic_send_packet(adapter, tcb); 547 - 548 - if (status != 0) { 549 - spin_lock_irqsave(&adapter->tcb_ready_qlock, flags); 550 - 551 - if (adapter->tx_ring.tcb_qtail) 552 - adapter->tx_ring.tcb_qtail->next = tcb; 553 - else 554 - /* Apparently ready Q is empty. */ 555 - adapter->tx_ring.tcb_qhead = tcb; 556 - 557 - adapter->tx_ring.tcb_qtail = tcb; 558 - spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags); 559 - return status; 560 - } 561 - WARN_ON(adapter->tx_ring.used > NUM_TCB); 562 - return 0; 563 - } 564 - 565 - /** 566 - * et131x_send_packets - This function is called by the OS to send packets 567 - * @skb: the packet(s) to send 568 - * @netdev:device on which to TX the above packet(s) 569 - * 570 - * Return 0 in almost all cases; non-zero value in extreme hard failure only 571 - */ 572 - int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev) 573 - { 574 - int status = 0; 575 - struct et131x_adapter *adapter = NULL; 576 - 577 - adapter = netdev_priv(netdev); 578 - 579 - /* Send these packets 580 - * 581 - * NOTE: The Linux Tx entry point is only given one packet at a time 582 - * to Tx, so the PacketCount and it's array used makes no sense here 583 - */ 584 - 585 - /* TCB is not available */ 586 - if (adapter->tx_ring.used >= NUM_TCB) { 587 - /* NOTE: If there's an error on send, no need to queue the 588 - * packet under Linux; if we just send an error up to the 589 - * netif layer, it will resend the skb to us. 590 - */ 591 - status = -ENOMEM; 592 - } else { 593 - /* We need to see if the link is up; if it's not, make the 594 - * netif layer think we're good and drop the packet 595 - */ 596 - if ((adapter->flags & fMP_ADAPTER_FAIL_SEND_MASK) || 597 - !netif_carrier_ok(netdev)) { 598 - dev_kfree_skb_any(skb); 599 - skb = NULL; 600 - 601 - adapter->net_stats.tx_dropped++; 602 - } else { 603 - status = send_packet(skb, adapter); 604 - if (status != 0 && status != -ENOMEM) { 605 - /* On any other error, make netif think we're 606 - * OK and drop the packet 607 - */ 608 - dev_kfree_skb_any(skb); 609 - skb = NULL; 610 - adapter->net_stats.tx_dropped++; 611 - } 612 - } 613 - } 614 - return status; 615 - } 616 - 617 - /** 618 - * free_send_packet - Recycle a struct tcb 619 - * @adapter: pointer to our adapter 620 - * @tcb: pointer to struct tcb 621 - * 622 - * Complete the packet if necessary 623 - * Assumption - Send spinlock has been acquired 624 - */ 625 - static inline void free_send_packet(struct et131x_adapter *adapter, 626 - struct tcb *tcb) 627 - { 628 - unsigned long flags; 629 - struct tx_desc *desc = NULL; 630 - struct net_device_stats *stats = &adapter->net_stats; 631 - 632 - if (tcb->flags & fMP_DEST_BROAD) 633 - atomic_inc(&adapter->stats.broadcast_pkts_xmtd); 634 - else if (tcb->flags & fMP_DEST_MULTI) 635 - atomic_inc(&adapter->stats.multicast_pkts_xmtd); 636 - else 637 - atomic_inc(&adapter->stats.unicast_pkts_xmtd); 638 - 639 - if (tcb->skb) { 640 - stats->tx_bytes += tcb->skb->len; 641 - 642 - /* Iterate through the TX descriptors on the ring 643 - * corresponding to this packet and umap the fragments 644 - * they point to 645 - */ 646 - do { 647 - desc = (struct tx_desc *) 648 - (adapter->tx_ring.tx_desc_ring + 649 - INDEX10(tcb->index_start)); 650 - 651 - pci_unmap_single(adapter->pdev, 652 - desc->addr_lo, 653 - desc->len_vlan, PCI_DMA_TODEVICE); 654 - 655 - add_10bit(&tcb->index_start, 1); 656 - if (INDEX10(tcb->index_start) >= 657 - NUM_DESC_PER_RING_TX) { 658 - tcb->index_start &= ~ET_DMA10_MASK; 659 - tcb->index_start ^= ET_DMA10_WRAP; 660 - } 661 - } while (desc != (adapter->tx_ring.tx_desc_ring + 662 - INDEX10(tcb->index))); 663 - 664 - dev_kfree_skb_any(tcb->skb); 665 - } 666 - 667 - memset(tcb, 0, sizeof(struct tcb)); 668 - 669 - /* Add the TCB to the Ready Q */ 670 - spin_lock_irqsave(&adapter->tcb_ready_qlock, flags); 671 - 672 - adapter->net_stats.tx_packets++; 673 - 674 - if (adapter->tx_ring.tcb_qtail) 675 - adapter->tx_ring.tcb_qtail->next = tcb; 676 - else 677 - /* Apparently ready Q is empty. */ 678 - adapter->tx_ring.tcb_qhead = tcb; 679 - 680 - adapter->tx_ring.tcb_qtail = tcb; 681 - 682 - spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags); 683 - WARN_ON(adapter->tx_ring.used < 0); 684 - } 685 - 686 - /** 687 - * et131x_free_busy_send_packets - Free and complete the stopped active sends 688 - * @adapter: pointer to our adapter 689 - * 690 - * Assumption - Send spinlock has been acquired 691 - */ 692 - void et131x_free_busy_send_packets(struct et131x_adapter *adapter) 693 - { 694 - struct tcb *tcb; 695 - unsigned long flags; 696 - u32 freed = 0; 697 - 698 - /* Any packets being sent? Check the first TCB on the send list */ 699 - spin_lock_irqsave(&adapter->tcb_send_qlock, flags); 700 - 701 - tcb = adapter->tx_ring.send_head; 702 - 703 - while (tcb != NULL && freed < NUM_TCB) { 704 - struct tcb *next = tcb->next; 705 - 706 - adapter->tx_ring.send_head = next; 707 - 708 - if (next == NULL) 709 - adapter->tx_ring.send_tail = NULL; 710 - 711 - adapter->tx_ring.used--; 712 - 713 - spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); 714 - 715 - freed++; 716 - free_send_packet(adapter, tcb); 717 - 718 - spin_lock_irqsave(&adapter->tcb_send_qlock, flags); 719 - 720 - tcb = adapter->tx_ring.send_head; 721 - } 722 - 723 - WARN_ON(freed == NUM_TCB); 724 - 725 - spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); 726 - 727 - adapter->tx_ring.used = 0; 728 - } 729 - 730 - /** 731 - * et131x_handle_send_interrupt - Interrupt handler for sending processing 732 - * @adapter: pointer to our adapter 733 - * 734 - * Re-claim the send resources, complete sends and get more to send from 735 - * the send wait queue. 736 - * 737 - * Assumption - Send spinlock has been acquired 738 - */ 739 - void et131x_handle_send_interrupt(struct et131x_adapter *adapter) 740 - { 741 - unsigned long flags; 742 - u32 serviced; 743 - struct tcb *tcb; 744 - u32 index; 745 - 746 - serviced = readl(&adapter->regs->txdma.new_service_complete); 747 - index = INDEX10(serviced); 748 - 749 - /* Has the ring wrapped? Process any descriptors that do not have 750 - * the same "wrap" indicator as the current completion indicator 751 - */ 752 - spin_lock_irqsave(&adapter->tcb_send_qlock, flags); 753 - 754 - tcb = adapter->tx_ring.send_head; 755 - 756 - while (tcb && 757 - ((serviced ^ tcb->index) & ET_DMA10_WRAP) && 758 - index < INDEX10(tcb->index)) { 759 - adapter->tx_ring.used--; 760 - adapter->tx_ring.send_head = tcb->next; 761 - if (tcb->next == NULL) 762 - adapter->tx_ring.send_tail = NULL; 763 - 764 - spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); 765 - free_send_packet(adapter, tcb); 766 - spin_lock_irqsave(&adapter->tcb_send_qlock, flags); 767 - 768 - /* Goto the next packet */ 769 - tcb = adapter->tx_ring.send_head; 770 - } 771 - while (tcb && 772 - !((serviced ^ tcb->index) & ET_DMA10_WRAP) 773 - && index > (tcb->index & ET_DMA10_MASK)) { 774 - adapter->tx_ring.used--; 775 - adapter->tx_ring.send_head = tcb->next; 776 - if (tcb->next == NULL) 777 - adapter->tx_ring.send_tail = NULL; 778 - 779 - spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); 780 - free_send_packet(adapter, tcb); 781 - spin_lock_irqsave(&adapter->tcb_send_qlock, flags); 782 - 783 - /* Goto the next packet */ 784 - tcb = adapter->tx_ring.send_head; 785 - } 786 - 787 - /* Wake up the queue when we hit a low-water mark */ 788 - if (adapter->tx_ring.used <= NUM_TCB / 3) 789 - netif_wake_queue(adapter->netdev); 790 - 791 - spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); 792 - } 793 -
+5042
drivers/staging/et131x/et131x.c
··· 1 + /* 2 + * Agere Systems Inc. 3 + * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs 4 + * 5 + * Copyright © 2005 Agere Systems Inc. 6 + * All rights reserved. 7 + * http://www.agere.com 8 + * 9 + * Copyright (c) 2011 Mark Einon <mark.einon@gmail.com> 10 + * 11 + *------------------------------------------------------------------------------ 12 + * 13 + * SOFTWARE LICENSE 14 + * 15 + * This software is provided subject to the following terms and conditions, 16 + * which you should read carefully before using the software. Using this 17 + * software indicates your acceptance of these terms and conditions. If you do 18 + * not agree with these terms and conditions, do not use the software. 19 + * 20 + * Copyright © 2005 Agere Systems Inc. 21 + * All rights reserved. 22 + * 23 + * Redistribution and use in source or binary forms, with or without 24 + * modifications, are permitted provided that the following conditions are met: 25 + * 26 + * . Redistributions of source code must retain the above copyright notice, this 27 + * list of conditions and the following Disclaimer as comments in the code as 28 + * well as in the documentation and/or other materials provided with the 29 + * distribution. 30 + * 31 + * . Redistributions in binary form must reproduce the above copyright notice, 32 + * this list of conditions and the following Disclaimer in the documentation 33 + * and/or other materials provided with the distribution. 34 + * 35 + * . Neither the name of Agere Systems Inc. nor the names of the contributors 36 + * may be used to endorse or promote products derived from this software 37 + * without specific prior written permission. 38 + * 39 + * Disclaimer 40 + * 41 + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, 42 + * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF 43 + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY 44 + * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN 45 + * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY 46 + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 47 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 48 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 49 + * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT 50 + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 51 + * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 52 + * DAMAGE. 53 + * 54 + */ 55 + 56 + #include "et131x_defs.h" 57 + 58 + #include <linux/pci.h> 59 + #include <linux/init.h> 60 + #include <linux/module.h> 61 + #include <linux/types.h> 62 + #include <linux/kernel.h> 63 + 64 + #include <linux/sched.h> 65 + #include <linux/ptrace.h> 66 + #include <linux/slab.h> 67 + #include <linux/ctype.h> 68 + #include <linux/string.h> 69 + #include <linux/timer.h> 70 + #include <linux/interrupt.h> 71 + #include <linux/in.h> 72 + #include <linux/delay.h> 73 + #include <linux/bitops.h> 74 + #include <linux/io.h> 75 + #include <asm/system.h> 76 + 77 + #include <linux/netdevice.h> 78 + #include <linux/etherdevice.h> 79 + #include <linux/skbuff.h> 80 + #include <linux/if_arp.h> 81 + #include <linux/ioport.h> 82 + #include <linux/crc32.h> 83 + #include <linux/random.h> 84 + #include <linux/phy.h> 85 + 86 + #include "et1310_phy.h" 87 + #include "et131x_adapter.h" 88 + #include "et1310_address_map.h" 89 + #include "et1310_tx.h" 90 + #include "et1310_rx.h" 91 + #include "et131x.h" 92 + 93 + MODULE_AUTHOR("Victor Soriano <vjsoriano@agere.com>"); 94 + MODULE_AUTHOR("Mark Einon <mark.einon@gmail.com>"); 95 + MODULE_LICENSE("Dual BSD/GPL"); 96 + MODULE_DESCRIPTION("10/100/1000 Base-T Ethernet Driver " 97 + "for the ET1310 by Agere Systems"); 98 + 99 + /* EEPROM functions */ 100 + 101 + static int eeprom_wait_ready(struct pci_dev *pdev, u32 *status) 102 + { 103 + u32 reg; 104 + int i; 105 + 106 + /* 107 + * 1. Check LBCIF Status Register for bits 6 & 3:2 all equal to 0 and 108 + * bits 7,1:0 both equal to 1, at least once after reset. 109 + * Subsequent operations need only to check that bits 1:0 are equal 110 + * to 1 prior to starting a single byte read/write 111 + */ 112 + 113 + for (i = 0; i < MAX_NUM_REGISTER_POLLS; i++) { 114 + /* Read registers grouped in DWORD1 */ 115 + if (pci_read_config_dword(pdev, LBCIF_DWORD1_GROUP, &reg)) 116 + return -EIO; 117 + 118 + /* I2C idle and Phy Queue Avail both true */ 119 + if ((reg & 0x3000) == 0x3000) { 120 + if (status) 121 + *status = reg; 122 + return reg & 0xFF; 123 + } 124 + } 125 + return -ETIMEDOUT; 126 + } 127 + 128 + 129 + /** 130 + * eeprom_write - Write a byte to the ET1310's EEPROM 131 + * @adapter: pointer to our private adapter structure 132 + * @addr: the address to write 133 + * @data: the value to write 134 + * 135 + * Returns 1 for a successful write. 136 + */ 137 + static int eeprom_write(struct et131x_adapter *adapter, u32 addr, u8 data) 138 + { 139 + struct pci_dev *pdev = adapter->pdev; 140 + int index = 0; 141 + int retries; 142 + int err = 0; 143 + int i2c_wack = 0; 144 + int writeok = 0; 145 + u32 status; 146 + u32 val = 0; 147 + 148 + /* 149 + * For an EEPROM, an I2C single byte write is defined as a START 150 + * condition followed by the device address, EEPROM address, one byte 151 + * of data and a STOP condition. The STOP condition will trigger the 152 + * EEPROM's internally timed write cycle to the nonvolatile memory. 153 + * All inputs are disabled during this write cycle and the EEPROM will 154 + * not respond to any access until the internal write is complete. 155 + */ 156 + 157 + err = eeprom_wait_ready(pdev, NULL); 158 + if (err) 159 + return err; 160 + 161 + /* 162 + * 2. Write to the LBCIF Control Register: bit 7=1, bit 6=1, bit 3=0, 163 + * and bits 1:0 both =0. Bit 5 should be set according to the 164 + * type of EEPROM being accessed (1=two byte addressing, 0=one 165 + * byte addressing). 166 + */ 167 + if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER, 168 + LBCIF_CONTROL_LBCIF_ENABLE | LBCIF_CONTROL_I2C_WRITE)) 169 + return -EIO; 170 + 171 + i2c_wack = 1; 172 + 173 + /* Prepare EEPROM address for Step 3 */ 174 + 175 + for (retries = 0; retries < MAX_NUM_WRITE_RETRIES; retries++) { 176 + /* Write the address to the LBCIF Address Register */ 177 + if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr)) 178 + break; 179 + /* 180 + * Write the data to the LBCIF Data Register (the I2C write 181 + * will begin). 182 + */ 183 + if (pci_write_config_byte(pdev, LBCIF_DATA_REGISTER, data)) 184 + break; 185 + /* 186 + * Monitor bit 1:0 of the LBCIF Status Register. When bits 187 + * 1:0 are both equal to 1, the I2C write has completed and the 188 + * internal write cycle of the EEPROM is about to start. 189 + * (bits 1:0 = 01 is a legal state while waiting from both 190 + * equal to 1, but bits 1:0 = 10 is invalid and implies that 191 + * something is broken). 192 + */ 193 + err = eeprom_wait_ready(pdev, &status); 194 + if (err < 0) 195 + return 0; 196 + 197 + /* 198 + * Check bit 3 of the LBCIF Status Register. If equal to 1, 199 + * an error has occurred.Don't break here if we are revision 200 + * 1, this is so we do a blind write for load bug. 201 + */ 202 + if ((status & LBCIF_STATUS_GENERAL_ERROR) 203 + && adapter->pdev->revision == 0) 204 + break; 205 + 206 + /* 207 + * Check bit 2 of the LBCIF Status Register. If equal to 1 an 208 + * ACK error has occurred on the address phase of the write. 209 + * This could be due to an actual hardware failure or the 210 + * EEPROM may still be in its internal write cycle from a 211 + * previous write. This write operation was ignored and must be 212 + *repeated later. 213 + */ 214 + if (status & LBCIF_STATUS_ACK_ERROR) { 215 + /* 216 + * This could be due to an actual hardware failure 217 + * or the EEPROM may still be in its internal write 218 + * cycle from a previous write. This write operation 219 + * was ignored and must be repeated later. 220 + */ 221 + udelay(10); 222 + continue; 223 + } 224 + 225 + writeok = 1; 226 + break; 227 + } 228 + 229 + /* 230 + * Set bit 6 of the LBCIF Control Register = 0. 231 + */ 232 + udelay(10); 233 + 234 + while (i2c_wack) { 235 + if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER, 236 + LBCIF_CONTROL_LBCIF_ENABLE)) 237 + writeok = 0; 238 + 239 + /* Do read until internal ACK_ERROR goes away meaning write 240 + * completed 241 + */ 242 + do { 243 + pci_write_config_dword(pdev, 244 + LBCIF_ADDRESS_REGISTER, 245 + addr); 246 + do { 247 + pci_read_config_dword(pdev, 248 + LBCIF_DATA_REGISTER, &val); 249 + } while ((val & 0x00010000) == 0); 250 + } while (val & 0x00040000); 251 + 252 + if ((val & 0xFF00) != 0xC000 || index == 10000) 253 + break; 254 + index++; 255 + } 256 + return writeok ? 0 : -EIO; 257 + } 258 + 259 + /** 260 + * eeprom_read - Read a byte from the ET1310's EEPROM 261 + * @adapter: pointer to our private adapter structure 262 + * @addr: the address from which to read 263 + * @pdata: a pointer to a byte in which to store the value of the read 264 + * @eeprom_id: the ID of the EEPROM 265 + * @addrmode: how the EEPROM is to be accessed 266 + * 267 + * Returns 1 for a successful read 268 + */ 269 + static int eeprom_read(struct et131x_adapter *adapter, u32 addr, u8 *pdata) 270 + { 271 + struct pci_dev *pdev = adapter->pdev; 272 + int err; 273 + u32 status; 274 + 275 + /* 276 + * A single byte read is similar to the single byte write, with the 277 + * exception of the data flow: 278 + */ 279 + 280 + err = eeprom_wait_ready(pdev, NULL); 281 + if (err) 282 + return err; 283 + /* 284 + * Write to the LBCIF Control Register: bit 7=1, bit 6=0, bit 3=0, 285 + * and bits 1:0 both =0. Bit 5 should be set according to the type 286 + * of EEPROM being accessed (1=two byte addressing, 0=one byte 287 + * addressing). 288 + */ 289 + if (pci_write_config_byte(pdev, LBCIF_CONTROL_REGISTER, 290 + LBCIF_CONTROL_LBCIF_ENABLE)) 291 + return -EIO; 292 + /* 293 + * Write the address to the LBCIF Address Register (I2C read will 294 + * begin). 295 + */ 296 + if (pci_write_config_dword(pdev, LBCIF_ADDRESS_REGISTER, addr)) 297 + return -EIO; 298 + /* 299 + * Monitor bit 0 of the LBCIF Status Register. When = 1, I2C read 300 + * is complete. (if bit 1 =1 and bit 0 stays = 0, a hardware failure 301 + * has occurred). 302 + */ 303 + err = eeprom_wait_ready(pdev, &status); 304 + if (err < 0) 305 + return err; 306 + /* 307 + * Regardless of error status, read data byte from LBCIF Data 308 + * Register. 309 + */ 310 + *pdata = err; 311 + /* 312 + * Check bit 2 of the LBCIF Status Register. If = 1, 313 + * then an error has occurred. 314 + */ 315 + return (status & LBCIF_STATUS_ACK_ERROR) ? -EIO : 0; 316 + } 317 + 318 + int et131x_init_eeprom(struct et131x_adapter *adapter) 319 + { 320 + struct pci_dev *pdev = adapter->pdev; 321 + u8 eestatus; 322 + 323 + /* We first need to check the EEPROM Status code located at offset 324 + * 0xB2 of config space 325 + */ 326 + pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, 327 + &eestatus); 328 + 329 + /* THIS IS A WORKAROUND: 330 + * I need to call this function twice to get my card in a 331 + * LG M1 Express Dual running. I tried also a msleep before this 332 + * function, because I thougth there could be some time condidions 333 + * but it didn't work. Call the whole function twice also work. 334 + */ 335 + if (pci_read_config_byte(pdev, ET1310_PCI_EEPROM_STATUS, &eestatus)) { 336 + dev_err(&pdev->dev, 337 + "Could not read PCI config space for EEPROM Status\n"); 338 + return -EIO; 339 + } 340 + 341 + /* Determine if the error(s) we care about are present. If they are 342 + * present we need to fail. 343 + */ 344 + if (eestatus & 0x4C) { 345 + int write_failed = 0; 346 + if (pdev->revision == 0x01) { 347 + int i; 348 + static const u8 eedata[4] = { 0xFE, 0x13, 0x10, 0xFF }; 349 + 350 + /* Re-write the first 4 bytes if we have an eeprom 351 + * present and the revision id is 1, this fixes the 352 + * corruption seen with 1310 B Silicon 353 + */ 354 + for (i = 0; i < 3; i++) 355 + if (eeprom_write(adapter, i, eedata[i]) < 0) 356 + write_failed = 1; 357 + } 358 + if (pdev->revision != 0x01 || write_failed) { 359 + dev_err(&pdev->dev, 360 + "Fatal EEPROM Status Error - 0x%04x\n", eestatus); 361 + 362 + /* This error could mean that there was an error 363 + * reading the eeprom or that the eeprom doesn't exist. 364 + * We will treat each case the same and not try to 365 + * gather additional information that normally would 366 + * come from the eeprom, like MAC Address 367 + */ 368 + adapter->has_eeprom = 0; 369 + return -EIO; 370 + } 371 + } 372 + adapter->has_eeprom = 1; 373 + 374 + /* Read the EEPROM for information regarding LED behavior. Refer to 375 + * ET1310_phy.c, et131x_xcvr_init(), for its use. 376 + */ 377 + eeprom_read(adapter, 0x70, &adapter->eeprom_data[0]); 378 + eeprom_read(adapter, 0x71, &adapter->eeprom_data[1]); 379 + 380 + if (adapter->eeprom_data[0] != 0xcd) 381 + /* Disable all optional features */ 382 + adapter->eeprom_data[1] = 0x00; 383 + 384 + return 0; 385 + } 386 + 387 + /* MAC functions */ 388 + 389 + /** 390 + * et1310_config_mac_regs1 - Initialize the first part of MAC regs 391 + * @adapter: pointer to our adapter structure 392 + */ 393 + void et1310_config_mac_regs1(struct et131x_adapter *adapter) 394 + { 395 + struct mac_regs __iomem *macregs = &adapter->regs->mac; 396 + u32 station1; 397 + u32 station2; 398 + u32 ipg; 399 + 400 + /* First we need to reset everything. Write to MAC configuration 401 + * register 1 to perform reset. 402 + */ 403 + writel(0xC00F0000, &macregs->cfg1); 404 + 405 + /* Next lets configure the MAC Inter-packet gap register */ 406 + ipg = 0x38005860; /* IPG1 0x38 IPG2 0x58 B2B 0x60 */ 407 + ipg |= 0x50 << 8; /* ifg enforce 0x50 */ 408 + writel(ipg, &macregs->ipg); 409 + 410 + /* Next lets configure the MAC Half Duplex register */ 411 + /* BEB trunc 0xA, Ex Defer, Rexmit 0xF Coll 0x37 */ 412 + writel(0x00A1F037, &macregs->hfdp); 413 + 414 + /* Next lets configure the MAC Interface Control register */ 415 + writel(0, &macregs->if_ctrl); 416 + 417 + /* Let's move on to setting up the mii management configuration */ 418 + writel(0x07, &macregs->mii_mgmt_cfg); /* Clock reset 0x7 */ 419 + 420 + /* Next lets configure the MAC Station Address register. These 421 + * values are read from the EEPROM during initialization and stored 422 + * in the adapter structure. We write what is stored in the adapter 423 + * structure to the MAC Station Address registers high and low. This 424 + * station address is used for generating and checking pause control 425 + * packets. 426 + */ 427 + station2 = (adapter->addr[1] << ET_MAC_STATION_ADDR2_OC2_SHIFT) | 428 + (adapter->addr[0] << ET_MAC_STATION_ADDR2_OC1_SHIFT); 429 + station1 = (adapter->addr[5] << ET_MAC_STATION_ADDR1_OC6_SHIFT) | 430 + (adapter->addr[4] << ET_MAC_STATION_ADDR1_OC5_SHIFT) | 431 + (adapter->addr[3] << ET_MAC_STATION_ADDR1_OC4_SHIFT) | 432 + adapter->addr[2]; 433 + writel(station1, &macregs->station_addr_1); 434 + writel(station2, &macregs->station_addr_2); 435 + 436 + /* Max ethernet packet in bytes that will passed by the mac without 437 + * being truncated. Allow the MAC to pass 4 more than our max packet 438 + * size. This is 4 for the Ethernet CRC. 439 + * 440 + * Packets larger than (registry_jumbo_packet) that do not contain a 441 + * VLAN ID will be dropped by the Rx function. 442 + */ 443 + writel(adapter->registry_jumbo_packet + 4, &macregs->max_fm_len); 444 + 445 + /* clear out MAC config reset */ 446 + writel(0, &macregs->cfg1); 447 + } 448 + 449 + /** 450 + * et1310_config_mac_regs2 - Initialize the second part of MAC regs 451 + * @adapter: pointer to our adapter structure 452 + */ 453 + void et1310_config_mac_regs2(struct et131x_adapter *adapter) 454 + { 455 + int32_t delay = 0; 456 + struct mac_regs __iomem *mac = &adapter->regs->mac; 457 + struct phy_device *phydev = adapter->phydev; 458 + u32 cfg1; 459 + u32 cfg2; 460 + u32 ifctrl; 461 + u32 ctl; 462 + 463 + ctl = readl(&adapter->regs->txmac.ctl); 464 + cfg1 = readl(&mac->cfg1); 465 + cfg2 = readl(&mac->cfg2); 466 + ifctrl = readl(&mac->if_ctrl); 467 + 468 + /* Set up the if mode bits */ 469 + cfg2 &= ~0x300; 470 + if (phydev && phydev->speed == SPEED_1000) { 471 + cfg2 |= 0x200; 472 + /* Phy mode bit */ 473 + ifctrl &= ~(1 << 24); 474 + } else { 475 + cfg2 |= 0x100; 476 + ifctrl |= (1 << 24); 477 + } 478 + 479 + /* We need to enable Rx/Tx */ 480 + cfg1 |= CFG1_RX_ENABLE | CFG1_TX_ENABLE | CFG1_TX_FLOW; 481 + /* Initialize loop back to off */ 482 + cfg1 &= ~(CFG1_LOOPBACK | CFG1_RX_FLOW); 483 + if (adapter->flowcontrol == FLOW_RXONLY || 484 + adapter->flowcontrol == FLOW_BOTH) 485 + cfg1 |= CFG1_RX_FLOW; 486 + writel(cfg1, &mac->cfg1); 487 + 488 + /* Now we need to initialize the MAC Configuration 2 register */ 489 + /* preamble 7, check length, huge frame off, pad crc, crc enable 490 + full duplex off */ 491 + cfg2 |= 0x7016; 492 + cfg2 &= ~0x0021; 493 + 494 + /* Turn on duplex if needed */ 495 + if (phydev && phydev->duplex == DUPLEX_FULL) 496 + cfg2 |= 0x01; 497 + 498 + ifctrl &= ~(1 << 26); 499 + if (phydev && phydev->duplex == DUPLEX_HALF) 500 + ifctrl |= (1<<26); /* Enable ghd */ 501 + 502 + writel(ifctrl, &mac->if_ctrl); 503 + writel(cfg2, &mac->cfg2); 504 + 505 + do { 506 + udelay(10); 507 + delay++; 508 + cfg1 = readl(&mac->cfg1); 509 + } while ((cfg1 & CFG1_WAIT) != CFG1_WAIT && delay < 100); 510 + 511 + if (delay == 100) { 512 + dev_warn(&adapter->pdev->dev, 513 + "Syncd bits did not respond correctly cfg1 word 0x%08x\n", 514 + cfg1); 515 + } 516 + 517 + /* Enable txmac */ 518 + ctl |= 0x09; /* TX mac enable, FC disable */ 519 + writel(ctl, &adapter->regs->txmac.ctl); 520 + 521 + /* Ready to start the RXDMA/TXDMA engine */ 522 + if (adapter->flags & fMP_ADAPTER_LOWER_POWER) { 523 + et131x_rx_dma_enable(adapter); 524 + et131x_tx_dma_enable(adapter); 525 + } 526 + } 527 + 528 + void et1310_config_rxmac_regs(struct et131x_adapter *adapter) 529 + { 530 + struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac; 531 + struct phy_device *phydev = adapter->phydev; 532 + u32 sa_lo; 533 + u32 sa_hi = 0; 534 + u32 pf_ctrl = 0; 535 + 536 + /* Disable the MAC while it is being configured (also disable WOL) */ 537 + writel(0x8, &rxmac->ctrl); 538 + 539 + /* Initialize WOL to disabled. */ 540 + writel(0, &rxmac->crc0); 541 + writel(0, &rxmac->crc12); 542 + writel(0, &rxmac->crc34); 543 + 544 + /* We need to set the WOL mask0 - mask4 next. We initialize it to 545 + * its default Values of 0x00000000 because there are not WOL masks 546 + * as of this time. 547 + */ 548 + writel(0, &rxmac->mask0_word0); 549 + writel(0, &rxmac->mask0_word1); 550 + writel(0, &rxmac->mask0_word2); 551 + writel(0, &rxmac->mask0_word3); 552 + 553 + writel(0, &rxmac->mask1_word0); 554 + writel(0, &rxmac->mask1_word1); 555 + writel(0, &rxmac->mask1_word2); 556 + writel(0, &rxmac->mask1_word3); 557 + 558 + writel(0, &rxmac->mask2_word0); 559 + writel(0, &rxmac->mask2_word1); 560 + writel(0, &rxmac->mask2_word2); 561 + writel(0, &rxmac->mask2_word3); 562 + 563 + writel(0, &rxmac->mask3_word0); 564 + writel(0, &rxmac->mask3_word1); 565 + writel(0, &rxmac->mask3_word2); 566 + writel(0, &rxmac->mask3_word3); 567 + 568 + writel(0, &rxmac->mask4_word0); 569 + writel(0, &rxmac->mask4_word1); 570 + writel(0, &rxmac->mask4_word2); 571 + writel(0, &rxmac->mask4_word3); 572 + 573 + /* Lets setup the WOL Source Address */ 574 + sa_lo = (adapter->addr[2] << ET_WOL_LO_SA3_SHIFT) | 575 + (adapter->addr[3] << ET_WOL_LO_SA4_SHIFT) | 576 + (adapter->addr[4] << ET_WOL_LO_SA5_SHIFT) | 577 + adapter->addr[5]; 578 + writel(sa_lo, &rxmac->sa_lo); 579 + 580 + sa_hi = (u32) (adapter->addr[0] << ET_WOL_HI_SA1_SHIFT) | 581 + adapter->addr[1]; 582 + writel(sa_hi, &rxmac->sa_hi); 583 + 584 + /* Disable all Packet Filtering */ 585 + writel(0, &rxmac->pf_ctrl); 586 + 587 + /* Let's initialize the Unicast Packet filtering address */ 588 + if (adapter->packet_filter & ET131X_PACKET_TYPE_DIRECTED) { 589 + et1310_setup_device_for_unicast(adapter); 590 + pf_ctrl |= 4; /* Unicast filter */ 591 + } else { 592 + writel(0, &rxmac->uni_pf_addr1); 593 + writel(0, &rxmac->uni_pf_addr2); 594 + writel(0, &rxmac->uni_pf_addr3); 595 + } 596 + 597 + /* Let's initialize the Multicast hash */ 598 + if (!(adapter->packet_filter & ET131X_PACKET_TYPE_ALL_MULTICAST)) { 599 + pf_ctrl |= 2; /* Multicast filter */ 600 + et1310_setup_device_for_multicast(adapter); 601 + } 602 + 603 + /* Runt packet filtering. Didn't work in version A silicon. */ 604 + pf_ctrl |= (NIC_MIN_PACKET_SIZE + 4) << 16; 605 + pf_ctrl |= 8; /* Fragment filter */ 606 + 607 + if (adapter->registry_jumbo_packet > 8192) 608 + /* In order to transmit jumbo packets greater than 8k, the 609 + * FIFO between RxMAC and RxDMA needs to be reduced in size 610 + * to (16k - Jumbo packet size). In order to implement this, 611 + * we must use "cut through" mode in the RxMAC, which chops 612 + * packets down into segments which are (max_size * 16). In 613 + * this case we selected 256 bytes, since this is the size of 614 + * the PCI-Express TLP's that the 1310 uses. 615 + * 616 + * seg_en on, fc_en off, size 0x10 617 + */ 618 + writel(0x41, &rxmac->mcif_ctrl_max_seg); 619 + else 620 + writel(0, &rxmac->mcif_ctrl_max_seg); 621 + 622 + /* Initialize the MCIF water marks */ 623 + writel(0, &rxmac->mcif_water_mark); 624 + 625 + /* Initialize the MIF control */ 626 + writel(0, &rxmac->mif_ctrl); 627 + 628 + /* Initialize the Space Available Register */ 629 + writel(0, &rxmac->space_avail); 630 + 631 + /* Initialize the the mif_ctrl register 632 + * bit 3: Receive code error. One or more nibbles were signaled as 633 + * errors during the reception of the packet. Clear this 634 + * bit in Gigabit, set it in 100Mbit. This was derived 635 + * experimentally at UNH. 636 + * bit 4: Receive CRC error. The packet's CRC did not match the 637 + * internally generated CRC. 638 + * bit 5: Receive length check error. Indicates that frame length 639 + * field value in the packet does not match the actual data 640 + * byte length and is not a type field. 641 + * bit 16: Receive frame truncated. 642 + * bit 17: Drop packet enable 643 + */ 644 + if (phydev && phydev->speed == SPEED_100) 645 + writel(0x30038, &rxmac->mif_ctrl); 646 + else 647 + writel(0x30030, &rxmac->mif_ctrl); 648 + 649 + /* Finally we initialize RxMac to be enabled & WOL disabled. Packet 650 + * filter is always enabled since it is where the runt packets are 651 + * supposed to be dropped. For version A silicon, runt packet 652 + * dropping doesn't work, so it is disabled in the pf_ctrl register, 653 + * but we still leave the packet filter on. 654 + */ 655 + writel(pf_ctrl, &rxmac->pf_ctrl); 656 + writel(0x9, &rxmac->ctrl); 657 + } 658 + 659 + void et1310_config_txmac_regs(struct et131x_adapter *adapter) 660 + { 661 + struct txmac_regs __iomem *txmac = &adapter->regs->txmac; 662 + 663 + /* We need to update the Control Frame Parameters 664 + * cfpt - control frame pause timer set to 64 (0x40) 665 + * cfep - control frame extended pause timer set to 0x0 666 + */ 667 + if (adapter->flowcontrol == FLOW_NONE) 668 + writel(0, &txmac->cf_param); 669 + else 670 + writel(0x40, &txmac->cf_param); 671 + } 672 + 673 + void et1310_config_macstat_regs(struct et131x_adapter *adapter) 674 + { 675 + struct macstat_regs __iomem *macstat = 676 + &adapter->regs->macstat; 677 + 678 + /* Next we need to initialize all the macstat registers to zero on 679 + * the device. 680 + */ 681 + writel(0, &macstat->txrx_0_64_byte_frames); 682 + writel(0, &macstat->txrx_65_127_byte_frames); 683 + writel(0, &macstat->txrx_128_255_byte_frames); 684 + writel(0, &macstat->txrx_256_511_byte_frames); 685 + writel(0, &macstat->txrx_512_1023_byte_frames); 686 + writel(0, &macstat->txrx_1024_1518_byte_frames); 687 + writel(0, &macstat->txrx_1519_1522_gvln_frames); 688 + 689 + writel(0, &macstat->rx_bytes); 690 + writel(0, &macstat->rx_packets); 691 + writel(0, &macstat->rx_fcs_errs); 692 + writel(0, &macstat->rx_multicast_packets); 693 + writel(0, &macstat->rx_broadcast_packets); 694 + writel(0, &macstat->rx_control_frames); 695 + writel(0, &macstat->rx_pause_frames); 696 + writel(0, &macstat->rx_unknown_opcodes); 697 + writel(0, &macstat->rx_align_errs); 698 + writel(0, &macstat->rx_frame_len_errs); 699 + writel(0, &macstat->rx_code_errs); 700 + writel(0, &macstat->rx_carrier_sense_errs); 701 + writel(0, &macstat->rx_undersize_packets); 702 + writel(0, &macstat->rx_oversize_packets); 703 + writel(0, &macstat->rx_fragment_packets); 704 + writel(0, &macstat->rx_jabbers); 705 + writel(0, &macstat->rx_drops); 706 + 707 + writel(0, &macstat->tx_bytes); 708 + writel(0, &macstat->tx_packets); 709 + writel(0, &macstat->tx_multicast_packets); 710 + writel(0, &macstat->tx_broadcast_packets); 711 + writel(0, &macstat->tx_pause_frames); 712 + writel(0, &macstat->tx_deferred); 713 + writel(0, &macstat->tx_excessive_deferred); 714 + writel(0, &macstat->tx_single_collisions); 715 + writel(0, &macstat->tx_multiple_collisions); 716 + writel(0, &macstat->tx_late_collisions); 717 + writel(0, &macstat->tx_excessive_collisions); 718 + writel(0, &macstat->tx_total_collisions); 719 + writel(0, &macstat->tx_pause_honored_frames); 720 + writel(0, &macstat->tx_drops); 721 + writel(0, &macstat->tx_jabbers); 722 + writel(0, &macstat->tx_fcs_errs); 723 + writel(0, &macstat->tx_control_frames); 724 + writel(0, &macstat->tx_oversize_frames); 725 + writel(0, &macstat->tx_undersize_frames); 726 + writel(0, &macstat->tx_fragments); 727 + writel(0, &macstat->carry_reg1); 728 + writel(0, &macstat->carry_reg2); 729 + 730 + /* Unmask any counters that we want to track the overflow of. 731 + * Initially this will be all counters. It may become clear later 732 + * that we do not need to track all counters. 733 + */ 734 + writel(0xFFFFBE32, &macstat->carry_reg1_mask); 735 + writel(0xFFFE7E8B, &macstat->carry_reg2_mask); 736 + } 737 + 738 + void et1310_config_flow_control(struct et131x_adapter *adapter) 739 + { 740 + struct phy_device *phydev = adapter->phydev; 741 + 742 + if (phydev->duplex == DUPLEX_HALF) { 743 + adapter->flowcontrol = FLOW_NONE; 744 + } else { 745 + char remote_pause, remote_async_pause; 746 + 747 + et1310_phy_access_mii_bit(adapter, 748 + TRUEPHY_BIT_READ, 5, 10, &remote_pause); 749 + et1310_phy_access_mii_bit(adapter, 750 + TRUEPHY_BIT_READ, 5, 11, 751 + &remote_async_pause); 752 + 753 + if ((remote_pause == TRUEPHY_BIT_SET) && 754 + (remote_async_pause == TRUEPHY_BIT_SET)) { 755 + adapter->flowcontrol = adapter->wanted_flow; 756 + } else if ((remote_pause == TRUEPHY_BIT_SET) && 757 + (remote_async_pause == TRUEPHY_BIT_CLEAR)) { 758 + if (adapter->wanted_flow == FLOW_BOTH) 759 + adapter->flowcontrol = FLOW_BOTH; 760 + else 761 + adapter->flowcontrol = FLOW_NONE; 762 + } else if ((remote_pause == TRUEPHY_BIT_CLEAR) && 763 + (remote_async_pause == TRUEPHY_BIT_CLEAR)) { 764 + adapter->flowcontrol = FLOW_NONE; 765 + } else {/* if (remote_pause == TRUEPHY_CLEAR_BIT && 766 + remote_async_pause == TRUEPHY_SET_BIT) */ 767 + if (adapter->wanted_flow == FLOW_BOTH) 768 + adapter->flowcontrol = FLOW_RXONLY; 769 + else 770 + adapter->flowcontrol = FLOW_NONE; 771 + } 772 + } 773 + } 774 + 775 + /** 776 + * et1310_update_macstat_host_counters - Update the local copy of the statistics 777 + * @adapter: pointer to the adapter structure 778 + */ 779 + void et1310_update_macstat_host_counters(struct et131x_adapter *adapter) 780 + { 781 + struct ce_stats *stats = &adapter->stats; 782 + struct macstat_regs __iomem *macstat = 783 + &adapter->regs->macstat; 784 + 785 + stats->tx_collisions += readl(&macstat->tx_total_collisions); 786 + stats->tx_first_collisions += readl(&macstat->tx_single_collisions); 787 + stats->tx_deferred += readl(&macstat->tx_deferred); 788 + stats->tx_excessive_collisions += 789 + readl(&macstat->tx_multiple_collisions); 790 + stats->tx_late_collisions += readl(&macstat->tx_late_collisions); 791 + stats->tx_underflows += readl(&macstat->tx_undersize_frames); 792 + stats->tx_max_pkt_errs += readl(&macstat->tx_oversize_frames); 793 + 794 + stats->rx_align_errs += readl(&macstat->rx_align_errs); 795 + stats->rx_crc_errs += readl(&macstat->rx_code_errs); 796 + stats->rcvd_pkts_dropped += readl(&macstat->rx_drops); 797 + stats->rx_overflows += readl(&macstat->rx_oversize_packets); 798 + stats->rx_code_violations += readl(&macstat->rx_fcs_errs); 799 + stats->rx_length_errs += readl(&macstat->rx_frame_len_errs); 800 + stats->rx_other_errs += readl(&macstat->rx_fragment_packets); 801 + } 802 + 803 + /** 804 + * et1310_handle_macstat_interrupt 805 + * @adapter: pointer to the adapter structure 806 + * 807 + * One of the MACSTAT counters has wrapped. Update the local copy of 808 + * the statistics held in the adapter structure, checking the "wrap" 809 + * bit for each counter. 810 + */ 811 + void et1310_handle_macstat_interrupt(struct et131x_adapter *adapter) 812 + { 813 + u32 carry_reg1; 814 + u32 carry_reg2; 815 + 816 + /* Read the interrupt bits from the register(s). These are Clear On 817 + * Write. 818 + */ 819 + carry_reg1 = readl(&adapter->regs->macstat.carry_reg1); 820 + carry_reg2 = readl(&adapter->regs->macstat.carry_reg2); 821 + 822 + writel(carry_reg1, &adapter->regs->macstat.carry_reg1); 823 + writel(carry_reg2, &adapter->regs->macstat.carry_reg2); 824 + 825 + /* We need to do update the host copy of all the MAC_STAT counters. 826 + * For each counter, check it's overflow bit. If the overflow bit is 827 + * set, then increment the host version of the count by one complete 828 + * revolution of the counter. This routine is called when the counter 829 + * block indicates that one of the counters has wrapped. 830 + */ 831 + if (carry_reg1 & (1 << 14)) 832 + adapter->stats.rx_code_violations += COUNTER_WRAP_16_BIT; 833 + if (carry_reg1 & (1 << 8)) 834 + adapter->stats.rx_align_errs += COUNTER_WRAP_12_BIT; 835 + if (carry_reg1 & (1 << 7)) 836 + adapter->stats.rx_length_errs += COUNTER_WRAP_16_BIT; 837 + if (carry_reg1 & (1 << 2)) 838 + adapter->stats.rx_other_errs += COUNTER_WRAP_16_BIT; 839 + if (carry_reg1 & (1 << 6)) 840 + adapter->stats.rx_crc_errs += COUNTER_WRAP_16_BIT; 841 + if (carry_reg1 & (1 << 3)) 842 + adapter->stats.rx_overflows += COUNTER_WRAP_16_BIT; 843 + if (carry_reg1 & (1 << 0)) 844 + adapter->stats.rcvd_pkts_dropped += COUNTER_WRAP_16_BIT; 845 + if (carry_reg2 & (1 << 16)) 846 + adapter->stats.tx_max_pkt_errs += COUNTER_WRAP_12_BIT; 847 + if (carry_reg2 & (1 << 15)) 848 + adapter->stats.tx_underflows += COUNTER_WRAP_12_BIT; 849 + if (carry_reg2 & (1 << 6)) 850 + adapter->stats.tx_first_collisions += COUNTER_WRAP_12_BIT; 851 + if (carry_reg2 & (1 << 8)) 852 + adapter->stats.tx_deferred += COUNTER_WRAP_12_BIT; 853 + if (carry_reg2 & (1 << 5)) 854 + adapter->stats.tx_excessive_collisions += COUNTER_WRAP_12_BIT; 855 + if (carry_reg2 & (1 << 4)) 856 + adapter->stats.tx_late_collisions += COUNTER_WRAP_12_BIT; 857 + if (carry_reg2 & (1 << 2)) 858 + adapter->stats.tx_collisions += COUNTER_WRAP_12_BIT; 859 + } 860 + 861 + void et1310_setup_device_for_multicast(struct et131x_adapter *adapter) 862 + { 863 + struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac; 864 + uint32_t nIndex; 865 + uint32_t result; 866 + uint32_t hash1 = 0; 867 + uint32_t hash2 = 0; 868 + uint32_t hash3 = 0; 869 + uint32_t hash4 = 0; 870 + u32 pm_csr; 871 + 872 + /* If ET131X_PACKET_TYPE_MULTICAST is specified, then we provision 873 + * the multi-cast LIST. If it is NOT specified, (and "ALL" is not 874 + * specified) then we should pass NO multi-cast addresses to the 875 + * driver. 876 + */ 877 + if (adapter->packet_filter & ET131X_PACKET_TYPE_MULTICAST) { 878 + /* Loop through our multicast array and set up the device */ 879 + for (nIndex = 0; nIndex < adapter->multicast_addr_count; 880 + nIndex++) { 881 + result = ether_crc(6, adapter->multicast_list[nIndex]); 882 + 883 + result = (result & 0x3F800000) >> 23; 884 + 885 + if (result < 32) { 886 + hash1 |= (1 << result); 887 + } else if ((31 < result) && (result < 64)) { 888 + result -= 32; 889 + hash2 |= (1 << result); 890 + } else if ((63 < result) && (result < 96)) { 891 + result -= 64; 892 + hash3 |= (1 << result); 893 + } else { 894 + result -= 96; 895 + hash4 |= (1 << result); 896 + } 897 + } 898 + } 899 + 900 + /* Write out the new hash to the device */ 901 + pm_csr = readl(&adapter->regs->global.pm_csr); 902 + if (!et1310_in_phy_coma(adapter)) { 903 + writel(hash1, &rxmac->multi_hash1); 904 + writel(hash2, &rxmac->multi_hash2); 905 + writel(hash3, &rxmac->multi_hash3); 906 + writel(hash4, &rxmac->multi_hash4); 907 + } 908 + } 909 + 910 + void et1310_setup_device_for_unicast(struct et131x_adapter *adapter) 911 + { 912 + struct rxmac_regs __iomem *rxmac = &adapter->regs->rxmac; 913 + u32 uni_pf1; 914 + u32 uni_pf2; 915 + u32 uni_pf3; 916 + u32 pm_csr; 917 + 918 + /* Set up unicast packet filter reg 3 to be the first two octets of 919 + * the MAC address for both address 920 + * 921 + * Set up unicast packet filter reg 2 to be the octets 2 - 5 of the 922 + * MAC address for second address 923 + * 924 + * Set up unicast packet filter reg 3 to be the octets 2 - 5 of the 925 + * MAC address for first address 926 + */ 927 + uni_pf3 = (adapter->addr[0] << ET_UNI_PF_ADDR2_1_SHIFT) | 928 + (adapter->addr[1] << ET_UNI_PF_ADDR2_2_SHIFT) | 929 + (adapter->addr[0] << ET_UNI_PF_ADDR1_1_SHIFT) | 930 + adapter->addr[1]; 931 + 932 + uni_pf2 = (adapter->addr[2] << ET_UNI_PF_ADDR2_3_SHIFT) | 933 + (adapter->addr[3] << ET_UNI_PF_ADDR2_4_SHIFT) | 934 + (adapter->addr[4] << ET_UNI_PF_ADDR2_5_SHIFT) | 935 + adapter->addr[5]; 936 + 937 + uni_pf1 = (adapter->addr[2] << ET_UNI_PF_ADDR1_3_SHIFT) | 938 + (adapter->addr[3] << ET_UNI_PF_ADDR1_4_SHIFT) | 939 + (adapter->addr[4] << ET_UNI_PF_ADDR1_5_SHIFT) | 940 + adapter->addr[5]; 941 + 942 + pm_csr = readl(&adapter->regs->global.pm_csr); 943 + if (!et1310_in_phy_coma(adapter)) { 944 + writel(uni_pf1, &rxmac->uni_pf_addr1); 945 + writel(uni_pf2, &rxmac->uni_pf_addr2); 946 + writel(uni_pf3, &rxmac->uni_pf_addr3); 947 + } 948 + } 949 + 950 + /* PHY functions */ 951 + 952 + int et131x_mdio_read(struct mii_bus *bus, int phy_addr, int reg) 953 + { 954 + struct net_device *netdev = bus->priv; 955 + struct et131x_adapter *adapter = netdev_priv(netdev); 956 + u16 value; 957 + int ret; 958 + 959 + ret = et131x_phy_mii_read(adapter, phy_addr, reg, &value); 960 + 961 + if (ret < 0) 962 + return ret; 963 + else 964 + return value; 965 + } 966 + 967 + int et131x_mdio_write(struct mii_bus *bus, int phy_addr, int reg, u16 value) 968 + { 969 + struct net_device *netdev = bus->priv; 970 + struct et131x_adapter *adapter = netdev_priv(netdev); 971 + 972 + return et131x_mii_write(adapter, reg, value); 973 + } 974 + 975 + int et131x_mdio_reset(struct mii_bus *bus) 976 + { 977 + struct net_device *netdev = bus->priv; 978 + struct et131x_adapter *adapter = netdev_priv(netdev); 979 + 980 + et131x_mii_write(adapter, MII_BMCR, BMCR_RESET); 981 + 982 + return 0; 983 + } 984 + 985 + int et131x_mii_read(struct et131x_adapter *adapter, u8 reg, u16 *value) 986 + { 987 + struct phy_device *phydev = adapter->phydev; 988 + 989 + if (!phydev) 990 + return -EIO; 991 + 992 + return et131x_phy_mii_read(adapter, phydev->addr, reg, value); 993 + } 994 + 995 + /** 996 + * et131x_phy_mii_read - Read from the PHY through the MII Interface on the MAC 997 + * @adapter: pointer to our private adapter structure 998 + * @addr: the address of the transceiver 999 + * @reg: the register to read 1000 + * @value: pointer to a 16-bit value in which the value will be stored 1001 + * 1002 + * Returns 0 on success, errno on failure (as defined in errno.h) 1003 + */ 1004 + int et131x_phy_mii_read(struct et131x_adapter *adapter, u8 addr, 1005 + u8 reg, u16 *value) 1006 + { 1007 + struct mac_regs __iomem *mac = &adapter->regs->mac; 1008 + int status = 0; 1009 + u32 delay = 0; 1010 + u32 mii_addr; 1011 + u32 mii_cmd; 1012 + u32 mii_indicator; 1013 + 1014 + /* Save a local copy of the registers we are dealing with so we can 1015 + * set them back 1016 + */ 1017 + mii_addr = readl(&mac->mii_mgmt_addr); 1018 + mii_cmd = readl(&mac->mii_mgmt_cmd); 1019 + 1020 + /* Stop the current operation */ 1021 + writel(0, &mac->mii_mgmt_cmd); 1022 + 1023 + /* Set up the register we need to read from on the correct PHY */ 1024 + writel(MII_ADDR(addr, reg), &mac->mii_mgmt_addr); 1025 + 1026 + writel(0x1, &mac->mii_mgmt_cmd); 1027 + 1028 + do { 1029 + udelay(50); 1030 + delay++; 1031 + mii_indicator = readl(&mac->mii_mgmt_indicator); 1032 + } while ((mii_indicator & MGMT_WAIT) && delay < 50); 1033 + 1034 + /* If we hit the max delay, we could not read the register */ 1035 + if (delay == 50) { 1036 + dev_warn(&adapter->pdev->dev, 1037 + "reg 0x%08x could not be read\n", reg); 1038 + dev_warn(&adapter->pdev->dev, "status is 0x%08x\n", 1039 + mii_indicator); 1040 + 1041 + status = -EIO; 1042 + } 1043 + 1044 + /* If we hit here we were able to read the register and we need to 1045 + * return the value to the caller */ 1046 + *value = readl(&mac->mii_mgmt_stat) & 0xFFFF; 1047 + 1048 + /* Stop the read operation */ 1049 + writel(0, &mac->mii_mgmt_cmd); 1050 + 1051 + /* set the registers we touched back to the state at which we entered 1052 + * this function 1053 + */ 1054 + writel(mii_addr, &mac->mii_mgmt_addr); 1055 + writel(mii_cmd, &mac->mii_mgmt_cmd); 1056 + 1057 + return status; 1058 + } 1059 + 1060 + /** 1061 + * et131x_mii_write - Write to a PHY register through the MII interface of the MAC 1062 + * @adapter: pointer to our private adapter structure 1063 + * @reg: the register to read 1064 + * @value: 16-bit value to write 1065 + * 1066 + * FIXME: one caller in netdev still 1067 + * 1068 + * Return 0 on success, errno on failure (as defined in errno.h) 1069 + */ 1070 + int et131x_mii_write(struct et131x_adapter *adapter, u8 reg, u16 value) 1071 + { 1072 + struct mac_regs __iomem *mac = &adapter->regs->mac; 1073 + struct phy_device *phydev = adapter->phydev; 1074 + int status = 0; 1075 + u8 addr; 1076 + u32 delay = 0; 1077 + u32 mii_addr; 1078 + u32 mii_cmd; 1079 + u32 mii_indicator; 1080 + 1081 + if (!phydev) 1082 + return -EIO; 1083 + 1084 + addr = phydev->addr; 1085 + 1086 + /* Save a local copy of the registers we are dealing with so we can 1087 + * set them back 1088 + */ 1089 + mii_addr = readl(&mac->mii_mgmt_addr); 1090 + mii_cmd = readl(&mac->mii_mgmt_cmd); 1091 + 1092 + /* Stop the current operation */ 1093 + writel(0, &mac->mii_mgmt_cmd); 1094 + 1095 + /* Set up the register we need to write to on the correct PHY */ 1096 + writel(MII_ADDR(addr, reg), &mac->mii_mgmt_addr); 1097 + 1098 + /* Add the value to write to the registers to the mac */ 1099 + writel(value, &mac->mii_mgmt_ctrl); 1100 + 1101 + do { 1102 + udelay(50); 1103 + delay++; 1104 + mii_indicator = readl(&mac->mii_mgmt_indicator); 1105 + } while ((mii_indicator & MGMT_BUSY) && delay < 100); 1106 + 1107 + /* If we hit the max delay, we could not write the register */ 1108 + if (delay == 100) { 1109 + u16 tmp; 1110 + 1111 + dev_warn(&adapter->pdev->dev, 1112 + "reg 0x%08x could not be written", reg); 1113 + dev_warn(&adapter->pdev->dev, "status is 0x%08x\n", 1114 + mii_indicator); 1115 + dev_warn(&adapter->pdev->dev, "command is 0x%08x\n", 1116 + readl(&mac->mii_mgmt_cmd)); 1117 + 1118 + et131x_mii_read(adapter, reg, &tmp); 1119 + 1120 + status = -EIO; 1121 + } 1122 + /* Stop the write operation */ 1123 + writel(0, &mac->mii_mgmt_cmd); 1124 + 1125 + /* 1126 + * set the registers we touched back to the state at which we entered 1127 + * this function 1128 + */ 1129 + writel(mii_addr, &mac->mii_mgmt_addr); 1130 + writel(mii_cmd, &mac->mii_mgmt_cmd); 1131 + 1132 + return status; 1133 + } 1134 + 1135 + /** 1136 + * et1310_phy_power_down - PHY power control 1137 + * @adapter: device to control 1138 + * @down: true for off/false for back on 1139 + * 1140 + * one hundred, ten, one thousand megs 1141 + * How would you like to have your LAN accessed 1142 + * Can't you see that this code processed 1143 + * Phy power, phy power.. 1144 + */ 1145 + void et1310_phy_power_down(struct et131x_adapter *adapter, bool down) 1146 + { 1147 + u16 data; 1148 + 1149 + et131x_mii_read(adapter, MII_BMCR, &data); 1150 + data &= ~BMCR_PDOWN; 1151 + if (down) 1152 + data |= BMCR_PDOWN; 1153 + et131x_mii_write(adapter, MII_BMCR, data); 1154 + } 1155 + 1156 + /* Still used from _mac for BIT_READ */ 1157 + void et1310_phy_access_mii_bit(struct et131x_adapter *adapter, u16 action, 1158 + u16 regnum, u16 bitnum, u8 *value) 1159 + { 1160 + u16 reg; 1161 + u16 mask = 0x0001 << bitnum; 1162 + 1163 + /* Read the requested register */ 1164 + et131x_mii_read(adapter, regnum, &reg); 1165 + 1166 + switch (action) { 1167 + case TRUEPHY_BIT_READ: 1168 + *value = (reg & mask) >> bitnum; 1169 + break; 1170 + 1171 + case TRUEPHY_BIT_SET: 1172 + et131x_mii_write(adapter, regnum, reg | mask); 1173 + break; 1174 + 1175 + case TRUEPHY_BIT_CLEAR: 1176 + et131x_mii_write(adapter, regnum, reg & ~mask); 1177 + break; 1178 + 1179 + default: 1180 + break; 1181 + } 1182 + } 1183 + 1184 + /** 1185 + * et131x_xcvr_init - Init the phy if we are setting it into force mode 1186 + * @adapter: pointer to our private adapter structure 1187 + * 1188 + */ 1189 + void et131x_xcvr_init(struct et131x_adapter *adapter) 1190 + { 1191 + u16 imr; 1192 + u16 isr; 1193 + u16 lcr2; 1194 + 1195 + et131x_mii_read(adapter, PHY_INTERRUPT_STATUS, &isr); 1196 + et131x_mii_read(adapter, PHY_INTERRUPT_MASK, &imr); 1197 + 1198 + /* Set the link status interrupt only. Bad behavior when link status 1199 + * and auto neg are set, we run into a nested interrupt problem 1200 + */ 1201 + imr |= (ET_PHY_INT_MASK_AUTONEGSTAT & 1202 + ET_PHY_INT_MASK_LINKSTAT & 1203 + ET_PHY_INT_MASK_ENABLE); 1204 + 1205 + et131x_mii_write(adapter, PHY_INTERRUPT_MASK, imr); 1206 + 1207 + /* Set the LED behavior such that LED 1 indicates speed (off = 1208 + * 10Mbits, blink = 100Mbits, on = 1000Mbits) and LED 2 indicates 1209 + * link and activity (on for link, blink off for activity). 1210 + * 1211 + * NOTE: Some customizations have been added here for specific 1212 + * vendors; The LED behavior is now determined by vendor data in the 1213 + * EEPROM. However, the above description is the default. 1214 + */ 1215 + if ((adapter->eeprom_data[1] & 0x4) == 0) { 1216 + et131x_mii_read(adapter, PHY_LED_2, &lcr2); 1217 + 1218 + lcr2 &= (ET_LED2_LED_100TX & ET_LED2_LED_1000T); 1219 + lcr2 |= (LED_VAL_LINKON_ACTIVE << LED_LINK_SHIFT); 1220 + 1221 + if ((adapter->eeprom_data[1] & 0x8) == 0) 1222 + lcr2 |= (LED_VAL_1000BT_100BTX << LED_TXRX_SHIFT); 1223 + else 1224 + lcr2 |= (LED_VAL_LINKON << LED_TXRX_SHIFT); 1225 + 1226 + et131x_mii_write(adapter, PHY_LED_2, lcr2); 1227 + } 1228 + } 1229 + 1230 + /* PM functions */ 1231 + 1232 + /** 1233 + * et1310_in_phy_coma - check if the device is in phy coma 1234 + * @adapter: pointer to our adapter structure 1235 + * 1236 + * Returns 0 if the device is not in phy coma, 1 if it is in phy coma 1237 + */ 1238 + int et1310_in_phy_coma(struct et131x_adapter *adapter) 1239 + { 1240 + u32 pmcsr; 1241 + 1242 + pmcsr = readl(&adapter->regs->global.pm_csr); 1243 + 1244 + return ET_PM_PHY_SW_COMA & pmcsr ? 1 : 0; 1245 + } 1246 + 1247 + /** 1248 + * et1310_enable_phy_coma - called when network cable is unplugged 1249 + * @adapter: pointer to our adapter structure 1250 + * 1251 + * driver receive an phy status change interrupt while in D0 and check that 1252 + * phy_status is down. 1253 + * 1254 + * -- gate off JAGCore; 1255 + * -- set gigE PHY in Coma mode 1256 + * -- wake on phy_interrupt; Perform software reset JAGCore, 1257 + * re-initialize jagcore and gigE PHY 1258 + * 1259 + * Add D0-ASPM-PhyLinkDown Support: 1260 + * -- while in D0, when there is a phy_interrupt indicating phy link 1261 + * down status, call the MPSetPhyComa routine to enter this active 1262 + * state power saving mode 1263 + * -- while in D0-ASPM-PhyLinkDown mode, when there is a phy_interrupt 1264 + * indicating linkup status, call the MPDisablePhyComa routine to 1265 + * restore JAGCore and gigE PHY 1266 + */ 1267 + void et1310_enable_phy_coma(struct et131x_adapter *adapter) 1268 + { 1269 + unsigned long flags; 1270 + u32 pmcsr; 1271 + 1272 + pmcsr = readl(&adapter->regs->global.pm_csr); 1273 + 1274 + /* Save the GbE PHY speed and duplex modes. Need to restore this 1275 + * when cable is plugged back in 1276 + */ 1277 + /* 1278 + * TODO - when PM is re-enabled, check if we need to 1279 + * perform a similar task as this - 1280 + * adapter->pdown_speed = adapter->ai_force_speed; 1281 + * adapter->pdown_duplex = adapter->ai_force_duplex; 1282 + */ 1283 + 1284 + /* Stop sending packets. */ 1285 + spin_lock_irqsave(&adapter->send_hw_lock, flags); 1286 + adapter->flags |= fMP_ADAPTER_LOWER_POWER; 1287 + spin_unlock_irqrestore(&adapter->send_hw_lock, flags); 1288 + 1289 + /* Wait for outstanding Receive packets */ 1290 + 1291 + et131x_disable_txrx(adapter->netdev); 1292 + 1293 + /* Gate off JAGCore 3 clock domains */ 1294 + pmcsr &= ~ET_PMCSR_INIT; 1295 + writel(pmcsr, &adapter->regs->global.pm_csr); 1296 + 1297 + /* Program gigE PHY in to Coma mode */ 1298 + pmcsr |= ET_PM_PHY_SW_COMA; 1299 + writel(pmcsr, &adapter->regs->global.pm_csr); 1300 + } 1301 + 1302 + /** 1303 + * et1310_disable_phy_coma - Disable the Phy Coma Mode 1304 + * @adapter: pointer to our adapter structure 1305 + */ 1306 + void et1310_disable_phy_coma(struct et131x_adapter *adapter) 1307 + { 1308 + u32 pmcsr; 1309 + 1310 + pmcsr = readl(&adapter->regs->global.pm_csr); 1311 + 1312 + /* Disable phy_sw_coma register and re-enable JAGCore clocks */ 1313 + pmcsr |= ET_PMCSR_INIT; 1314 + pmcsr &= ~ET_PM_PHY_SW_COMA; 1315 + writel(pmcsr, &adapter->regs->global.pm_csr); 1316 + 1317 + /* Restore the GbE PHY speed and duplex modes; 1318 + * Reset JAGCore; re-configure and initialize JAGCore and gigE PHY 1319 + */ 1320 + /* TODO - when PM is re-enabled, check if we need to 1321 + * perform a similar task as this - 1322 + * adapter->ai_force_speed = adapter->pdown_speed; 1323 + * adapter->ai_force_duplex = adapter->pdown_duplex; 1324 + */ 1325 + 1326 + /* Re-initialize the send structures */ 1327 + et131x_init_send(adapter); 1328 + 1329 + /* Reset the RFD list and re-start RU */ 1330 + et131x_reset_recv(adapter); 1331 + 1332 + /* Bring the device back to the state it was during init prior to 1333 + * autonegotiation being complete. This way, when we get the auto-neg 1334 + * complete interrupt, we can complete init by calling ConfigMacREGS2. 1335 + */ 1336 + et131x_soft_reset(adapter); 1337 + 1338 + /* setup et1310 as per the documentation ?? */ 1339 + et131x_adapter_setup(adapter); 1340 + 1341 + /* Allow Tx to restart */ 1342 + adapter->flags &= ~fMP_ADAPTER_LOWER_POWER; 1343 + 1344 + et131x_enable_txrx(adapter->netdev); 1345 + } 1346 + 1347 + /* RX functions */ 1348 + 1349 + static inline u32 bump_free_buff_ring(u32 *free_buff_ring, u32 limit) 1350 + { 1351 + u32 tmp_free_buff_ring = *free_buff_ring; 1352 + tmp_free_buff_ring++; 1353 + /* This works for all cases where limit < 1024. The 1023 case 1354 + works because 1023++ is 1024 which means the if condition is not 1355 + taken but the carry of the bit into the wrap bit toggles the wrap 1356 + value correctly */ 1357 + if ((tmp_free_buff_ring & ET_DMA10_MASK) > limit) { 1358 + tmp_free_buff_ring &= ~ET_DMA10_MASK; 1359 + tmp_free_buff_ring ^= ET_DMA10_WRAP; 1360 + } 1361 + /* For the 1023 case */ 1362 + tmp_free_buff_ring &= (ET_DMA10_MASK|ET_DMA10_WRAP); 1363 + *free_buff_ring = tmp_free_buff_ring; 1364 + return tmp_free_buff_ring; 1365 + } 1366 + 1367 + /** 1368 + * et131x_rx_dma_memory_alloc 1369 + * @adapter: pointer to our private adapter structure 1370 + * 1371 + * Returns 0 on success and errno on failure (as defined in errno.h) 1372 + * 1373 + * Allocates Free buffer ring 1 for sure, free buffer ring 0 if required, 1374 + * and the Packet Status Ring. 1375 + */ 1376 + int et131x_rx_dma_memory_alloc(struct et131x_adapter *adapter) 1377 + { 1378 + u32 i, j; 1379 + u32 bufsize; 1380 + u32 pktstat_ringsize, fbr_chunksize; 1381 + struct rx_ring *rx_ring; 1382 + 1383 + /* Setup some convenience pointers */ 1384 + rx_ring = &adapter->rx_ring; 1385 + 1386 + /* Alloc memory for the lookup table */ 1387 + #ifdef USE_FBR0 1388 + rx_ring->fbr[0] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL); 1389 + #endif 1390 + rx_ring->fbr[1] = kmalloc(sizeof(struct fbr_lookup), GFP_KERNEL); 1391 + 1392 + /* The first thing we will do is configure the sizes of the buffer 1393 + * rings. These will change based on jumbo packet support. Larger 1394 + * jumbo packets increases the size of each entry in FBR0, and the 1395 + * number of entries in FBR0, while at the same time decreasing the 1396 + * number of entries in FBR1. 1397 + * 1398 + * FBR1 holds "large" frames, FBR0 holds "small" frames. If FBR1 1399 + * entries are huge in order to accommodate a "jumbo" frame, then it 1400 + * will have less entries. Conversely, FBR1 will now be relied upon 1401 + * to carry more "normal" frames, thus it's entry size also increases 1402 + * and the number of entries goes up too (since it now carries 1403 + * "small" + "regular" packets. 1404 + * 1405 + * In this scheme, we try to maintain 512 entries between the two 1406 + * rings. Also, FBR1 remains a constant size - when it's size doubles 1407 + * the number of entries halves. FBR0 increases in size, however. 1408 + */ 1409 + 1410 + if (adapter->registry_jumbo_packet < 2048) { 1411 + #ifdef USE_FBR0 1412 + rx_ring->fbr0_buffsize = 256; 1413 + rx_ring->fbr0_num_entries = 512; 1414 + #endif 1415 + rx_ring->fbr1_buffsize = 2048; 1416 + rx_ring->fbr1_num_entries = 512; 1417 + } else if (adapter->registry_jumbo_packet < 4096) { 1418 + #ifdef USE_FBR0 1419 + rx_ring->fbr0_buffsize = 512; 1420 + rx_ring->fbr0_num_entries = 1024; 1421 + #endif 1422 + rx_ring->fbr1_buffsize = 4096; 1423 + rx_ring->fbr1_num_entries = 512; 1424 + } else { 1425 + #ifdef USE_FBR0 1426 + rx_ring->fbr0_buffsize = 1024; 1427 + rx_ring->fbr0_num_entries = 768; 1428 + #endif 1429 + rx_ring->fbr1_buffsize = 16384; 1430 + rx_ring->fbr1_num_entries = 128; 1431 + } 1432 + 1433 + #ifdef USE_FBR0 1434 + adapter->rx_ring.psr_num_entries = adapter->rx_ring.fbr0_num_entries + 1435 + adapter->rx_ring.fbr1_num_entries; 1436 + #else 1437 + adapter->rx_ring.psr_num_entries = adapter->rx_ring.fbr1_num_entries; 1438 + #endif 1439 + 1440 + /* Allocate an area of memory for Free Buffer Ring 1 */ 1441 + bufsize = (sizeof(struct fbr_desc) * rx_ring->fbr1_num_entries) + 0xfff; 1442 + rx_ring->fbr1_ring_virtaddr = pci_alloc_consistent(adapter->pdev, 1443 + bufsize, 1444 + &rx_ring->fbr1_ring_physaddr); 1445 + if (!rx_ring->fbr1_ring_virtaddr) { 1446 + dev_err(&adapter->pdev->dev, 1447 + "Cannot alloc memory for Free Buffer Ring 1\n"); 1448 + return -ENOMEM; 1449 + } 1450 + 1451 + /* Save physical address 1452 + * 1453 + * NOTE: pci_alloc_consistent(), used above to alloc DMA regions, 1454 + * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses 1455 + * are ever returned, make sure the high part is retrieved here 1456 + * before storing the adjusted address. 1457 + */ 1458 + rx_ring->fbr1_real_physaddr = rx_ring->fbr1_ring_physaddr; 1459 + 1460 + /* Align Free Buffer Ring 1 on a 4K boundary */ 1461 + et131x_align_allocated_memory(adapter, 1462 + &rx_ring->fbr1_real_physaddr, 1463 + &rx_ring->fbr1_offset, 0x0FFF); 1464 + 1465 + rx_ring->fbr1_ring_virtaddr = 1466 + (void *)((u8 *) rx_ring->fbr1_ring_virtaddr + 1467 + rx_ring->fbr1_offset); 1468 + 1469 + #ifdef USE_FBR0 1470 + /* Allocate an area of memory for Free Buffer Ring 0 */ 1471 + bufsize = (sizeof(struct fbr_desc) * rx_ring->fbr0_num_entries) + 0xfff; 1472 + rx_ring->fbr0_ring_virtaddr = pci_alloc_consistent(adapter->pdev, 1473 + bufsize, 1474 + &rx_ring->fbr0_ring_physaddr); 1475 + if (!rx_ring->fbr0_ring_virtaddr) { 1476 + dev_err(&adapter->pdev->dev, 1477 + "Cannot alloc memory for Free Buffer Ring 0\n"); 1478 + return -ENOMEM; 1479 + } 1480 + 1481 + /* Save physical address 1482 + * 1483 + * NOTE: pci_alloc_consistent(), used above to alloc DMA regions, 1484 + * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses 1485 + * are ever returned, make sure the high part is retrieved here before 1486 + * storing the adjusted address. 1487 + */ 1488 + rx_ring->fbr0_real_physaddr = rx_ring->fbr0_ring_physaddr; 1489 + 1490 + /* Align Free Buffer Ring 0 on a 4K boundary */ 1491 + et131x_align_allocated_memory(adapter, 1492 + &rx_ring->fbr0_real_physaddr, 1493 + &rx_ring->fbr0_offset, 0x0FFF); 1494 + 1495 + rx_ring->fbr0_ring_virtaddr = 1496 + (void *)((u8 *) rx_ring->fbr0_ring_virtaddr + 1497 + rx_ring->fbr0_offset); 1498 + #endif 1499 + for (i = 0; i < (rx_ring->fbr1_num_entries / FBR_CHUNKS); i++) { 1500 + u64 fbr1_offset; 1501 + u64 fbr1_tmp_physaddr; 1502 + u32 fbr1_align; 1503 + 1504 + /* This code allocates an area of memory big enough for N 1505 + * free buffers + (buffer_size - 1) so that the buffers can 1506 + * be aligned on 4k boundaries. If each buffer were aligned 1507 + * to a buffer_size boundary, the effect would be to double 1508 + * the size of FBR0. By allocating N buffers at once, we 1509 + * reduce this overhead. 1510 + */ 1511 + if (rx_ring->fbr1_buffsize > 4096) 1512 + fbr1_align = 4096; 1513 + else 1514 + fbr1_align = rx_ring->fbr1_buffsize; 1515 + 1516 + fbr_chunksize = 1517 + (FBR_CHUNKS * rx_ring->fbr1_buffsize) + fbr1_align - 1; 1518 + rx_ring->fbr1_mem_virtaddrs[i] = 1519 + pci_alloc_consistent(adapter->pdev, fbr_chunksize, 1520 + &rx_ring->fbr1_mem_physaddrs[i]); 1521 + 1522 + if (!rx_ring->fbr1_mem_virtaddrs[i]) { 1523 + dev_err(&adapter->pdev->dev, 1524 + "Could not alloc memory\n"); 1525 + return -ENOMEM; 1526 + } 1527 + 1528 + /* See NOTE in "Save Physical Address" comment above */ 1529 + fbr1_tmp_physaddr = rx_ring->fbr1_mem_physaddrs[i]; 1530 + 1531 + et131x_align_allocated_memory(adapter, 1532 + &fbr1_tmp_physaddr, 1533 + &fbr1_offset, (fbr1_align - 1)); 1534 + 1535 + for (j = 0; j < FBR_CHUNKS; j++) { 1536 + u32 index = (i * FBR_CHUNKS) + j; 1537 + 1538 + /* Save the Virtual address of this index for quick 1539 + * access later 1540 + */ 1541 + rx_ring->fbr[1]->virt[index] = 1542 + (u8 *) rx_ring->fbr1_mem_virtaddrs[i] + 1543 + (j * rx_ring->fbr1_buffsize) + fbr1_offset; 1544 + 1545 + /* now store the physical address in the descriptor 1546 + * so the device can access it 1547 + */ 1548 + rx_ring->fbr[1]->bus_high[index] = 1549 + (u32) (fbr1_tmp_physaddr >> 32); 1550 + rx_ring->fbr[1]->bus_low[index] = 1551 + (u32) fbr1_tmp_physaddr; 1552 + 1553 + fbr1_tmp_physaddr += rx_ring->fbr1_buffsize; 1554 + 1555 + rx_ring->fbr[1]->buffer1[index] = 1556 + rx_ring->fbr[1]->virt[index]; 1557 + rx_ring->fbr[1]->buffer2[index] = 1558 + rx_ring->fbr[1]->virt[index] - 4; 1559 + } 1560 + } 1561 + 1562 + #ifdef USE_FBR0 1563 + /* Same for FBR0 (if in use) */ 1564 + for (i = 0; i < (rx_ring->fbr0_num_entries / FBR_CHUNKS); i++) { 1565 + u64 fbr0_offset; 1566 + u64 fbr0_tmp_physaddr; 1567 + 1568 + fbr_chunksize = 1569 + ((FBR_CHUNKS + 1) * rx_ring->fbr0_buffsize) - 1; 1570 + rx_ring->fbr0_mem_virtaddrs[i] = 1571 + pci_alloc_consistent(adapter->pdev, fbr_chunksize, 1572 + &rx_ring->fbr0_mem_physaddrs[i]); 1573 + 1574 + if (!rx_ring->fbr0_mem_virtaddrs[i]) { 1575 + dev_err(&adapter->pdev->dev, 1576 + "Could not alloc memory\n"); 1577 + return -ENOMEM; 1578 + } 1579 + 1580 + /* See NOTE in "Save Physical Address" comment above */ 1581 + fbr0_tmp_physaddr = rx_ring->fbr0_mem_physaddrs[i]; 1582 + 1583 + et131x_align_allocated_memory(adapter, 1584 + &fbr0_tmp_physaddr, 1585 + &fbr0_offset, 1586 + rx_ring->fbr0_buffsize - 1); 1587 + 1588 + for (j = 0; j < FBR_CHUNKS; j++) { 1589 + u32 index = (i * FBR_CHUNKS) + j; 1590 + 1591 + rx_ring->fbr[0]->virt[index] = 1592 + (u8 *) rx_ring->fbr0_mem_virtaddrs[i] + 1593 + (j * rx_ring->fbr0_buffsize) + fbr0_offset; 1594 + 1595 + rx_ring->fbr[0]->bus_high[index] = 1596 + (u32) (fbr0_tmp_physaddr >> 32); 1597 + rx_ring->fbr[0]->bus_low[index] = 1598 + (u32) fbr0_tmp_physaddr; 1599 + 1600 + fbr0_tmp_physaddr += rx_ring->fbr0_buffsize; 1601 + 1602 + rx_ring->fbr[0]->buffer1[index] = 1603 + rx_ring->fbr[0]->virt[index]; 1604 + rx_ring->fbr[0]->buffer2[index] = 1605 + rx_ring->fbr[0]->virt[index] - 4; 1606 + } 1607 + } 1608 + #endif 1609 + 1610 + /* Allocate an area of memory for FIFO of Packet Status ring entries */ 1611 + pktstat_ringsize = 1612 + sizeof(struct pkt_stat_desc) * adapter->rx_ring.psr_num_entries; 1613 + 1614 + rx_ring->ps_ring_virtaddr = pci_alloc_consistent(adapter->pdev, 1615 + pktstat_ringsize, 1616 + &rx_ring->ps_ring_physaddr); 1617 + 1618 + if (!rx_ring->ps_ring_virtaddr) { 1619 + dev_err(&adapter->pdev->dev, 1620 + "Cannot alloc memory for Packet Status Ring\n"); 1621 + return -ENOMEM; 1622 + } 1623 + printk(KERN_INFO "Packet Status Ring %lx\n", 1624 + (unsigned long) rx_ring->ps_ring_physaddr); 1625 + 1626 + /* 1627 + * NOTE : pci_alloc_consistent(), used above to alloc DMA regions, 1628 + * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses 1629 + * are ever returned, make sure the high part is retrieved here before 1630 + * storing the adjusted address. 1631 + */ 1632 + 1633 + /* Allocate an area of memory for writeback of status information */ 1634 + rx_ring->rx_status_block = pci_alloc_consistent(adapter->pdev, 1635 + sizeof(struct rx_status_block), 1636 + &rx_ring->rx_status_bus); 1637 + if (!rx_ring->rx_status_block) { 1638 + dev_err(&adapter->pdev->dev, 1639 + "Cannot alloc memory for Status Block\n"); 1640 + return -ENOMEM; 1641 + } 1642 + rx_ring->num_rfd = NIC_DEFAULT_NUM_RFD; 1643 + printk(KERN_INFO "PRS %lx\n", (unsigned long)rx_ring->rx_status_bus); 1644 + 1645 + /* Recv 1646 + * pci_pool_create initializes a lookaside list. After successful 1647 + * creation, nonpaged fixed-size blocks can be allocated from and 1648 + * freed to the lookaside list. 1649 + * RFDs will be allocated from this pool. 1650 + */ 1651 + rx_ring->recv_lookaside = kmem_cache_create(adapter->netdev->name, 1652 + sizeof(struct rfd), 1653 + 0, 1654 + SLAB_CACHE_DMA | 1655 + SLAB_HWCACHE_ALIGN, 1656 + NULL); 1657 + 1658 + adapter->flags |= fMP_ADAPTER_RECV_LOOKASIDE; 1659 + 1660 + /* The RFDs are going to be put on lists later on, so initialize the 1661 + * lists now. 1662 + */ 1663 + INIT_LIST_HEAD(&rx_ring->recv_list); 1664 + return 0; 1665 + } 1666 + 1667 + /** 1668 + * et131x_rx_dma_memory_free - Free all memory allocated within this module. 1669 + * @adapter: pointer to our private adapter structure 1670 + */ 1671 + void et131x_rx_dma_memory_free(struct et131x_adapter *adapter) 1672 + { 1673 + u32 index; 1674 + u32 bufsize; 1675 + u32 pktstat_ringsize; 1676 + struct rfd *rfd; 1677 + struct rx_ring *rx_ring; 1678 + 1679 + /* Setup some convenience pointers */ 1680 + rx_ring = &adapter->rx_ring; 1681 + 1682 + /* Free RFDs and associated packet descriptors */ 1683 + WARN_ON(rx_ring->num_ready_recv != rx_ring->num_rfd); 1684 + 1685 + while (!list_empty(&rx_ring->recv_list)) { 1686 + rfd = (struct rfd *) list_entry(rx_ring->recv_list.next, 1687 + struct rfd, list_node); 1688 + 1689 + list_del(&rfd->list_node); 1690 + rfd->skb = NULL; 1691 + kmem_cache_free(adapter->rx_ring.recv_lookaside, rfd); 1692 + } 1693 + 1694 + /* Free Free Buffer Ring 1 */ 1695 + if (rx_ring->fbr1_ring_virtaddr) { 1696 + /* First the packet memory */ 1697 + for (index = 0; index < 1698 + (rx_ring->fbr1_num_entries / FBR_CHUNKS); index++) { 1699 + if (rx_ring->fbr1_mem_virtaddrs[index]) { 1700 + u32 fbr1_align; 1701 + 1702 + if (rx_ring->fbr1_buffsize > 4096) 1703 + fbr1_align = 4096; 1704 + else 1705 + fbr1_align = rx_ring->fbr1_buffsize; 1706 + 1707 + bufsize = 1708 + (rx_ring->fbr1_buffsize * FBR_CHUNKS) + 1709 + fbr1_align - 1; 1710 + 1711 + pci_free_consistent(adapter->pdev, 1712 + bufsize, 1713 + rx_ring->fbr1_mem_virtaddrs[index], 1714 + rx_ring->fbr1_mem_physaddrs[index]); 1715 + 1716 + rx_ring->fbr1_mem_virtaddrs[index] = NULL; 1717 + } 1718 + } 1719 + 1720 + /* Now the FIFO itself */ 1721 + rx_ring->fbr1_ring_virtaddr = (void *)((u8 *) 1722 + rx_ring->fbr1_ring_virtaddr - rx_ring->fbr1_offset); 1723 + 1724 + bufsize = (sizeof(struct fbr_desc) * rx_ring->fbr1_num_entries) 1725 + + 0xfff; 1726 + 1727 + pci_free_consistent(adapter->pdev, bufsize, 1728 + rx_ring->fbr1_ring_virtaddr, 1729 + rx_ring->fbr1_ring_physaddr); 1730 + 1731 + rx_ring->fbr1_ring_virtaddr = NULL; 1732 + } 1733 + 1734 + #ifdef USE_FBR0 1735 + /* Now the same for Free Buffer Ring 0 */ 1736 + if (rx_ring->fbr0_ring_virtaddr) { 1737 + /* First the packet memory */ 1738 + for (index = 0; index < 1739 + (rx_ring->fbr0_num_entries / FBR_CHUNKS); index++) { 1740 + if (rx_ring->fbr0_mem_virtaddrs[index]) { 1741 + bufsize = 1742 + (rx_ring->fbr0_buffsize * 1743 + (FBR_CHUNKS + 1)) - 1; 1744 + 1745 + pci_free_consistent(adapter->pdev, 1746 + bufsize, 1747 + rx_ring->fbr0_mem_virtaddrs[index], 1748 + rx_ring->fbr0_mem_physaddrs[index]); 1749 + 1750 + rx_ring->fbr0_mem_virtaddrs[index] = NULL; 1751 + } 1752 + } 1753 + 1754 + /* Now the FIFO itself */ 1755 + rx_ring->fbr0_ring_virtaddr = (void *)((u8 *) 1756 + rx_ring->fbr0_ring_virtaddr - rx_ring->fbr0_offset); 1757 + 1758 + bufsize = (sizeof(struct fbr_desc) * rx_ring->fbr0_num_entries) 1759 + + 0xfff; 1760 + 1761 + pci_free_consistent(adapter->pdev, 1762 + bufsize, 1763 + rx_ring->fbr0_ring_virtaddr, 1764 + rx_ring->fbr0_ring_physaddr); 1765 + 1766 + rx_ring->fbr0_ring_virtaddr = NULL; 1767 + } 1768 + #endif 1769 + 1770 + /* Free Packet Status Ring */ 1771 + if (rx_ring->ps_ring_virtaddr) { 1772 + pktstat_ringsize = 1773 + sizeof(struct pkt_stat_desc) * 1774 + adapter->rx_ring.psr_num_entries; 1775 + 1776 + pci_free_consistent(adapter->pdev, pktstat_ringsize, 1777 + rx_ring->ps_ring_virtaddr, 1778 + rx_ring->ps_ring_physaddr); 1779 + 1780 + rx_ring->ps_ring_virtaddr = NULL; 1781 + } 1782 + 1783 + /* Free area of memory for the writeback of status information */ 1784 + if (rx_ring->rx_status_block) { 1785 + pci_free_consistent(adapter->pdev, 1786 + sizeof(struct rx_status_block), 1787 + rx_ring->rx_status_block, rx_ring->rx_status_bus); 1788 + rx_ring->rx_status_block = NULL; 1789 + } 1790 + 1791 + /* Free receive buffer pool */ 1792 + 1793 + /* Free receive packet pool */ 1794 + 1795 + /* Destroy the lookaside (RFD) pool */ 1796 + if (adapter->flags & fMP_ADAPTER_RECV_LOOKASIDE) { 1797 + kmem_cache_destroy(rx_ring->recv_lookaside); 1798 + adapter->flags &= ~fMP_ADAPTER_RECV_LOOKASIDE; 1799 + } 1800 + 1801 + /* Free the FBR Lookup Table */ 1802 + #ifdef USE_FBR0 1803 + kfree(rx_ring->fbr[0]); 1804 + #endif 1805 + 1806 + kfree(rx_ring->fbr[1]); 1807 + 1808 + /* Reset Counters */ 1809 + rx_ring->num_ready_recv = 0; 1810 + } 1811 + 1812 + /** 1813 + * et131x_init_recv - Initialize receive data structures. 1814 + * @adapter: pointer to our private adapter structure 1815 + * 1816 + * Returns 0 on success and errno on failure (as defined in errno.h) 1817 + */ 1818 + int et131x_init_recv(struct et131x_adapter *adapter) 1819 + { 1820 + int status = -ENOMEM; 1821 + struct rfd *rfd = NULL; 1822 + u32 rfdct; 1823 + u32 numrfd = 0; 1824 + struct rx_ring *rx_ring; 1825 + 1826 + /* Setup some convenience pointers */ 1827 + rx_ring = &adapter->rx_ring; 1828 + 1829 + /* Setup each RFD */ 1830 + for (rfdct = 0; rfdct < rx_ring->num_rfd; rfdct++) { 1831 + rfd = kmem_cache_alloc(rx_ring->recv_lookaside, 1832 + GFP_ATOMIC | GFP_DMA); 1833 + 1834 + if (!rfd) { 1835 + dev_err(&adapter->pdev->dev, 1836 + "Couldn't alloc RFD out of kmem_cache\n"); 1837 + status = -ENOMEM; 1838 + continue; 1839 + } 1840 + 1841 + rfd->skb = NULL; 1842 + 1843 + /* Add this RFD to the recv_list */ 1844 + list_add_tail(&rfd->list_node, &rx_ring->recv_list); 1845 + 1846 + /* Increment both the available RFD's, and the total RFD's. */ 1847 + rx_ring->num_ready_recv++; 1848 + numrfd++; 1849 + } 1850 + 1851 + if (numrfd > NIC_MIN_NUM_RFD) 1852 + status = 0; 1853 + 1854 + rx_ring->num_rfd = numrfd; 1855 + 1856 + if (status != 0) { 1857 + kmem_cache_free(rx_ring->recv_lookaside, rfd); 1858 + dev_err(&adapter->pdev->dev, 1859 + "Allocation problems in et131x_init_recv\n"); 1860 + } 1861 + return status; 1862 + } 1863 + 1864 + /** 1865 + * et131x_config_rx_dma_regs - Start of Rx_DMA init sequence 1866 + * @adapter: pointer to our adapter structure 1867 + */ 1868 + void et131x_config_rx_dma_regs(struct et131x_adapter *adapter) 1869 + { 1870 + struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma; 1871 + struct rx_ring *rx_local = &adapter->rx_ring; 1872 + struct fbr_desc *fbr_entry; 1873 + u32 entry; 1874 + u32 psr_num_des; 1875 + unsigned long flags; 1876 + 1877 + /* Halt RXDMA to perform the reconfigure. */ 1878 + et131x_rx_dma_disable(adapter); 1879 + 1880 + /* Load the completion writeback physical address 1881 + * 1882 + * NOTE : pci_alloc_consistent(), used above to alloc DMA regions, 1883 + * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses 1884 + * are ever returned, make sure the high part is retrieved here 1885 + * before storing the adjusted address. 1886 + */ 1887 + writel((u32) ((u64)rx_local->rx_status_bus >> 32), 1888 + &rx_dma->dma_wb_base_hi); 1889 + writel((u32) rx_local->rx_status_bus, &rx_dma->dma_wb_base_lo); 1890 + 1891 + memset(rx_local->rx_status_block, 0, sizeof(struct rx_status_block)); 1892 + 1893 + /* Set the address and parameters of the packet status ring into the 1894 + * 1310's registers 1895 + */ 1896 + writel((u32) ((u64)rx_local->ps_ring_physaddr >> 32), 1897 + &rx_dma->psr_base_hi); 1898 + writel((u32) rx_local->ps_ring_physaddr, &rx_dma->psr_base_lo); 1899 + writel(rx_local->psr_num_entries - 1, &rx_dma->psr_num_des); 1900 + writel(0, &rx_dma->psr_full_offset); 1901 + 1902 + psr_num_des = readl(&rx_dma->psr_num_des) & 0xFFF; 1903 + writel((psr_num_des * LO_MARK_PERCENT_FOR_PSR) / 100, 1904 + &rx_dma->psr_min_des); 1905 + 1906 + spin_lock_irqsave(&adapter->rcv_lock, flags); 1907 + 1908 + /* These local variables track the PSR in the adapter structure */ 1909 + rx_local->local_psr_full = 0; 1910 + 1911 + /* Now's the best time to initialize FBR1 contents */ 1912 + fbr_entry = (struct fbr_desc *) rx_local->fbr1_ring_virtaddr; 1913 + for (entry = 0; entry < rx_local->fbr1_num_entries; entry++) { 1914 + fbr_entry->addr_hi = rx_local->fbr[1]->bus_high[entry]; 1915 + fbr_entry->addr_lo = rx_local->fbr[1]->bus_low[entry]; 1916 + fbr_entry->word2 = entry; 1917 + fbr_entry++; 1918 + } 1919 + 1920 + /* Set the address and parameters of Free buffer ring 1 (and 0 if 1921 + * required) into the 1310's registers 1922 + */ 1923 + writel((u32) (rx_local->fbr1_real_physaddr >> 32), 1924 + &rx_dma->fbr1_base_hi); 1925 + writel((u32) rx_local->fbr1_real_physaddr, &rx_dma->fbr1_base_lo); 1926 + writel(rx_local->fbr1_num_entries - 1, &rx_dma->fbr1_num_des); 1927 + writel(ET_DMA10_WRAP, &rx_dma->fbr1_full_offset); 1928 + 1929 + /* This variable tracks the free buffer ring 1 full position, so it 1930 + * has to match the above. 1931 + */ 1932 + rx_local->local_fbr1_full = ET_DMA10_WRAP; 1933 + writel( 1934 + ((rx_local->fbr1_num_entries * LO_MARK_PERCENT_FOR_RX) / 100) - 1, 1935 + &rx_dma->fbr1_min_des); 1936 + 1937 + #ifdef USE_FBR0 1938 + /* Now's the best time to initialize FBR0 contents */ 1939 + fbr_entry = (struct fbr_desc *) rx_local->fbr0_ring_virtaddr; 1940 + for (entry = 0; entry < rx_local->fbr0_num_entries; entry++) { 1941 + fbr_entry->addr_hi = rx_local->fbr[0]->bus_high[entry]; 1942 + fbr_entry->addr_lo = rx_local->fbr[0]->bus_low[entry]; 1943 + fbr_entry->word2 = entry; 1944 + fbr_entry++; 1945 + } 1946 + 1947 + writel((u32) (rx_local->fbr0_real_physaddr >> 32), 1948 + &rx_dma->fbr0_base_hi); 1949 + writel((u32) rx_local->fbr0_real_physaddr, &rx_dma->fbr0_base_lo); 1950 + writel(rx_local->fbr0_num_entries - 1, &rx_dma->fbr0_num_des); 1951 + writel(ET_DMA10_WRAP, &rx_dma->fbr0_full_offset); 1952 + 1953 + /* This variable tracks the free buffer ring 0 full position, so it 1954 + * has to match the above. 1955 + */ 1956 + rx_local->local_fbr0_full = ET_DMA10_WRAP; 1957 + writel( 1958 + ((rx_local->fbr0_num_entries * LO_MARK_PERCENT_FOR_RX) / 100) - 1, 1959 + &rx_dma->fbr0_min_des); 1960 + #endif 1961 + 1962 + /* Program the number of packets we will receive before generating an 1963 + * interrupt. 1964 + * For version B silicon, this value gets updated once autoneg is 1965 + *complete. 1966 + */ 1967 + writel(PARM_RX_NUM_BUFS_DEF, &rx_dma->num_pkt_done); 1968 + 1969 + /* The "time_done" is not working correctly to coalesce interrupts 1970 + * after a given time period, but rather is giving us an interrupt 1971 + * regardless of whether we have received packets. 1972 + * This value gets updated once autoneg is complete. 1973 + */ 1974 + writel(PARM_RX_TIME_INT_DEF, &rx_dma->max_pkt_time); 1975 + 1976 + spin_unlock_irqrestore(&adapter->rcv_lock, flags); 1977 + } 1978 + 1979 + /** 1980 + * et131x_set_rx_dma_timer - Set the heartbeat timer according to line rate. 1981 + * @adapter: pointer to our adapter structure 1982 + */ 1983 + void et131x_set_rx_dma_timer(struct et131x_adapter *adapter) 1984 + { 1985 + struct phy_device *phydev = adapter->phydev; 1986 + 1987 + if (!phydev) 1988 + return; 1989 + 1990 + /* For version B silicon, we do not use the RxDMA timer for 10 and 100 1991 + * Mbits/s line rates. We do not enable and RxDMA interrupt coalescing. 1992 + */ 1993 + if ((phydev->speed == SPEED_100) || (phydev->speed == SPEED_10)) { 1994 + writel(0, &adapter->regs->rxdma.max_pkt_time); 1995 + writel(1, &adapter->regs->rxdma.num_pkt_done); 1996 + } 1997 + } 1998 + 1999 + /** 2000 + * NICReturnRFD - Recycle a RFD and put it back onto the receive list 2001 + * @adapter: pointer to our adapter 2002 + * @rfd: pointer to the RFD 2003 + */ 2004 + static void nic_return_rfd(struct et131x_adapter *adapter, struct rfd *rfd) 2005 + { 2006 + struct rx_ring *rx_local = &adapter->rx_ring; 2007 + struct rxdma_regs __iomem *rx_dma = &adapter->regs->rxdma; 2008 + u16 buff_index = rfd->bufferindex; 2009 + u8 ring_index = rfd->ringindex; 2010 + unsigned long flags; 2011 + 2012 + /* We don't use any of the OOB data besides status. Otherwise, we 2013 + * need to clean up OOB data 2014 + */ 2015 + if ( 2016 + #ifdef USE_FBR0 2017 + (ring_index == 0 && buff_index < rx_local->fbr0_num_entries) || 2018 + #endif 2019 + (ring_index == 1 && buff_index < rx_local->fbr1_num_entries)) { 2020 + spin_lock_irqsave(&adapter->fbr_lock, flags); 2021 + 2022 + if (ring_index == 1) { 2023 + struct fbr_desc *next = 2024 + (struct fbr_desc *) (rx_local->fbr1_ring_virtaddr) + 2025 + INDEX10(rx_local->local_fbr1_full); 2026 + 2027 + /* Handle the Free Buffer Ring advancement here. Write 2028 + * the PA / Buffer Index for the returned buffer into 2029 + * the oldest (next to be freed)FBR entry 2030 + */ 2031 + next->addr_hi = rx_local->fbr[1]->bus_high[buff_index]; 2032 + next->addr_lo = rx_local->fbr[1]->bus_low[buff_index]; 2033 + next->word2 = buff_index; 2034 + 2035 + writel(bump_free_buff_ring(&rx_local->local_fbr1_full, 2036 + rx_local->fbr1_num_entries - 1), 2037 + &rx_dma->fbr1_full_offset); 2038 + } 2039 + #ifdef USE_FBR0 2040 + else { 2041 + struct fbr_desc *next = (struct fbr_desc *) 2042 + rx_local->fbr0_ring_virtaddr + 2043 + INDEX10(rx_local->local_fbr0_full); 2044 + 2045 + /* Handle the Free Buffer Ring advancement here. Write 2046 + * the PA / Buffer Index for the returned buffer into 2047 + * the oldest (next to be freed) FBR entry 2048 + */ 2049 + next->addr_hi = rx_local->fbr[0]->bus_high[buff_index]; 2050 + next->addr_lo = rx_local->fbr[0]->bus_low[buff_index]; 2051 + next->word2 = buff_index; 2052 + 2053 + writel(bump_free_buff_ring(&rx_local->local_fbr0_full, 2054 + rx_local->fbr0_num_entries - 1), 2055 + &rx_dma->fbr0_full_offset); 2056 + } 2057 + #endif 2058 + spin_unlock_irqrestore(&adapter->fbr_lock, flags); 2059 + } else { 2060 + dev_err(&adapter->pdev->dev, 2061 + "%s illegal Buffer Index returned\n", __func__); 2062 + } 2063 + 2064 + /* The processing on this RFD is done, so put it back on the tail of 2065 + * our list 2066 + */ 2067 + spin_lock_irqsave(&adapter->rcv_lock, flags); 2068 + list_add_tail(&rfd->list_node, &rx_local->recv_list); 2069 + rx_local->num_ready_recv++; 2070 + spin_unlock_irqrestore(&adapter->rcv_lock, flags); 2071 + 2072 + WARN_ON(rx_local->num_ready_recv > rx_local->num_rfd); 2073 + } 2074 + 2075 + /** 2076 + * et131x_rx_dma_disable - Stop of Rx_DMA on the ET1310 2077 + * @adapter: pointer to our adapter structure 2078 + */ 2079 + void et131x_rx_dma_disable(struct et131x_adapter *adapter) 2080 + { 2081 + u32 csr; 2082 + /* Setup the receive dma configuration register */ 2083 + writel(0x00002001, &adapter->regs->rxdma.csr); 2084 + csr = readl(&adapter->regs->rxdma.csr); 2085 + if ((csr & 0x00020000) == 0) { /* Check halt status (bit 17) */ 2086 + udelay(5); 2087 + csr = readl(&adapter->regs->rxdma.csr); 2088 + if ((csr & 0x00020000) == 0) 2089 + dev_err(&adapter->pdev->dev, 2090 + "RX Dma failed to enter halt state. CSR 0x%08x\n", 2091 + csr); 2092 + } 2093 + } 2094 + 2095 + /** 2096 + * et131x_rx_dma_enable - re-start of Rx_DMA on the ET1310. 2097 + * @adapter: pointer to our adapter structure 2098 + */ 2099 + void et131x_rx_dma_enable(struct et131x_adapter *adapter) 2100 + { 2101 + /* Setup the receive dma configuration register for normal operation */ 2102 + u32 csr = 0x2000; /* FBR1 enable */ 2103 + 2104 + if (adapter->rx_ring.fbr1_buffsize == 4096) 2105 + csr |= 0x0800; 2106 + else if (adapter->rx_ring.fbr1_buffsize == 8192) 2107 + csr |= 0x1000; 2108 + else if (adapter->rx_ring.fbr1_buffsize == 16384) 2109 + csr |= 0x1800; 2110 + #ifdef USE_FBR0 2111 + csr |= 0x0400; /* FBR0 enable */ 2112 + if (adapter->rx_ring.fbr0_buffsize == 256) 2113 + csr |= 0x0100; 2114 + else if (adapter->rx_ring.fbr0_buffsize == 512) 2115 + csr |= 0x0200; 2116 + else if (adapter->rx_ring.fbr0_buffsize == 1024) 2117 + csr |= 0x0300; 2118 + #endif 2119 + writel(csr, &adapter->regs->rxdma.csr); 2120 + 2121 + csr = readl(&adapter->regs->rxdma.csr); 2122 + if ((csr & 0x00020000) != 0) { 2123 + udelay(5); 2124 + csr = readl(&adapter->regs->rxdma.csr); 2125 + if ((csr & 0x00020000) != 0) { 2126 + dev_err(&adapter->pdev->dev, 2127 + "RX Dma failed to exit halt state. CSR 0x%08x\n", 2128 + csr); 2129 + } 2130 + } 2131 + } 2132 + 2133 + /** 2134 + * nic_rx_pkts - Checks the hardware for available packets 2135 + * @adapter: pointer to our adapter 2136 + * 2137 + * Returns rfd, a pointer to our MPRFD. 2138 + * 2139 + * Checks the hardware for available packets, using completion ring 2140 + * If packets are available, it gets an RFD from the recv_list, attaches 2141 + * the packet to it, puts the RFD in the RecvPendList, and also returns 2142 + * the pointer to the RFD. 2143 + */ 2144 + static struct rfd *nic_rx_pkts(struct et131x_adapter *adapter) 2145 + { 2146 + struct rx_ring *rx_local = &adapter->rx_ring; 2147 + struct rx_status_block *status; 2148 + struct pkt_stat_desc *psr; 2149 + struct rfd *rfd; 2150 + u32 i; 2151 + u8 *buf; 2152 + unsigned long flags; 2153 + struct list_head *element; 2154 + u8 ring_index; 2155 + u16 buff_index; 2156 + u32 len; 2157 + u32 word0; 2158 + u32 word1; 2159 + 2160 + /* RX Status block is written by the DMA engine prior to every 2161 + * interrupt. It contains the next to be used entry in the Packet 2162 + * Status Ring, and also the two Free Buffer rings. 2163 + */ 2164 + status = rx_local->rx_status_block; 2165 + word1 = status->word1 >> 16; /* Get the useful bits */ 2166 + 2167 + /* Check the PSR and wrap bits do not match */ 2168 + if ((word1 & 0x1FFF) == (rx_local->local_psr_full & 0x1FFF)) 2169 + /* Looks like this ring is not updated yet */ 2170 + return NULL; 2171 + 2172 + /* The packet status ring indicates that data is available. */ 2173 + psr = (struct pkt_stat_desc *) (rx_local->ps_ring_virtaddr) + 2174 + (rx_local->local_psr_full & 0xFFF); 2175 + 2176 + /* Grab any information that is required once the PSR is 2177 + * advanced, since we can no longer rely on the memory being 2178 + * accurate 2179 + */ 2180 + len = psr->word1 & 0xFFFF; 2181 + ring_index = (psr->word1 >> 26) & 0x03; 2182 + buff_index = (psr->word1 >> 16) & 0x3FF; 2183 + word0 = psr->word0; 2184 + 2185 + /* Indicate that we have used this PSR entry. */ 2186 + /* FIXME wrap 12 */ 2187 + add_12bit(&rx_local->local_psr_full, 1); 2188 + if ( 2189 + (rx_local->local_psr_full & 0xFFF) > rx_local->psr_num_entries - 1) { 2190 + /* Clear psr full and toggle the wrap bit */ 2191 + rx_local->local_psr_full &= ~0xFFF; 2192 + rx_local->local_psr_full ^= 0x1000; 2193 + } 2194 + 2195 + writel(rx_local->local_psr_full, 2196 + &adapter->regs->rxdma.psr_full_offset); 2197 + 2198 + #ifndef USE_FBR0 2199 + if (ring_index != 1) 2200 + return NULL; 2201 + #endif 2202 + 2203 + #ifdef USE_FBR0 2204 + if (ring_index > 1 || 2205 + (ring_index == 0 && 2206 + buff_index > rx_local->fbr0_num_entries - 1) || 2207 + (ring_index == 1 && 2208 + buff_index > rx_local->fbr1_num_entries - 1)) 2209 + #else 2210 + if (ring_index != 1 || buff_index > rx_local->fbr1_num_entries - 1) 2211 + #endif 2212 + { 2213 + /* Illegal buffer or ring index cannot be used by S/W*/ 2214 + dev_err(&adapter->pdev->dev, 2215 + "NICRxPkts PSR Entry %d indicates " 2216 + "length of %d and/or bad bi(%d)\n", 2217 + rx_local->local_psr_full & 0xFFF, 2218 + len, buff_index); 2219 + return NULL; 2220 + } 2221 + 2222 + /* Get and fill the RFD. */ 2223 + spin_lock_irqsave(&adapter->rcv_lock, flags); 2224 + 2225 + rfd = NULL; 2226 + element = rx_local->recv_list.next; 2227 + rfd = (struct rfd *) list_entry(element, struct rfd, list_node); 2228 + 2229 + if (rfd == NULL) { 2230 + spin_unlock_irqrestore(&adapter->rcv_lock, flags); 2231 + return NULL; 2232 + } 2233 + 2234 + list_del(&rfd->list_node); 2235 + rx_local->num_ready_recv--; 2236 + 2237 + spin_unlock_irqrestore(&adapter->rcv_lock, flags); 2238 + 2239 + rfd->bufferindex = buff_index; 2240 + rfd->ringindex = ring_index; 2241 + 2242 + /* In V1 silicon, there is a bug which screws up filtering of 2243 + * runt packets. Therefore runt packet filtering is disabled 2244 + * in the MAC and the packets are dropped here. They are 2245 + * also counted here. 2246 + */ 2247 + if (len < (NIC_MIN_PACKET_SIZE + 4)) { 2248 + adapter->stats.rx_other_errs++; 2249 + len = 0; 2250 + } 2251 + 2252 + if (len) { 2253 + /* Determine if this is a multicast packet coming in */ 2254 + if ((word0 & ALCATEL_MULTICAST_PKT) && 2255 + !(word0 & ALCATEL_BROADCAST_PKT)) { 2256 + /* Promiscuous mode and Multicast mode are 2257 + * not mutually exclusive as was first 2258 + * thought. I guess Promiscuous is just 2259 + * considered a super-set of the other 2260 + * filters. Generally filter is 0x2b when in 2261 + * promiscuous mode. 2262 + */ 2263 + if ((adapter->packet_filter & 2264 + ET131X_PACKET_TYPE_MULTICAST) 2265 + && !(adapter->packet_filter & 2266 + ET131X_PACKET_TYPE_PROMISCUOUS) 2267 + && !(adapter->packet_filter & 2268 + ET131X_PACKET_TYPE_ALL_MULTICAST)) { 2269 + buf = rx_local->fbr[ring_index]-> 2270 + virt[buff_index]; 2271 + 2272 + /* Loop through our list to see if the 2273 + * destination address of this packet 2274 + * matches one in our list. 2275 + */ 2276 + for (i = 0; i < adapter->multicast_addr_count; 2277 + i++) { 2278 + if (buf[0] == 2279 + adapter->multicast_list[i][0] 2280 + && buf[1] == 2281 + adapter->multicast_list[i][1] 2282 + && buf[2] == 2283 + adapter->multicast_list[i][2] 2284 + && buf[3] == 2285 + adapter->multicast_list[i][3] 2286 + && buf[4] == 2287 + adapter->multicast_list[i][4] 2288 + && buf[5] == 2289 + adapter->multicast_list[i][5]) { 2290 + break; 2291 + } 2292 + } 2293 + 2294 + /* If our index is equal to the number 2295 + * of Multicast address we have, then 2296 + * this means we did not find this 2297 + * packet's matching address in our 2298 + * list. Set the len to zero, 2299 + * so we free our RFD when we return 2300 + * from this function. 2301 + */ 2302 + if (i == adapter->multicast_addr_count) 2303 + len = 0; 2304 + } 2305 + 2306 + if (len > 0) 2307 + adapter->stats.multicast_pkts_rcvd++; 2308 + } else if (word0 & ALCATEL_BROADCAST_PKT) 2309 + adapter->stats.broadcast_pkts_rcvd++; 2310 + else 2311 + /* Not sure what this counter measures in 2312 + * promiscuous mode. Perhaps we should check 2313 + * the MAC address to see if it is directed 2314 + * to us in promiscuous mode. 2315 + */ 2316 + adapter->stats.unicast_pkts_rcvd++; 2317 + } 2318 + 2319 + if (len > 0) { 2320 + struct sk_buff *skb = NULL; 2321 + 2322 + /*rfd->len = len - 4; */ 2323 + rfd->len = len; 2324 + 2325 + skb = dev_alloc_skb(rfd->len + 2); 2326 + if (!skb) { 2327 + dev_err(&adapter->pdev->dev, 2328 + "Couldn't alloc an SKB for Rx\n"); 2329 + return NULL; 2330 + } 2331 + 2332 + adapter->net_stats.rx_bytes += rfd->len; 2333 + 2334 + memcpy(skb_put(skb, rfd->len), 2335 + rx_local->fbr[ring_index]->virt[buff_index], 2336 + rfd->len); 2337 + 2338 + skb->dev = adapter->netdev; 2339 + skb->protocol = eth_type_trans(skb, adapter->netdev); 2340 + skb->ip_summed = CHECKSUM_NONE; 2341 + 2342 + netif_rx(skb); 2343 + } else { 2344 + rfd->len = 0; 2345 + } 2346 + 2347 + nic_return_rfd(adapter, rfd); 2348 + return rfd; 2349 + } 2350 + 2351 + /** 2352 + * et131x_reset_recv - Reset the receive list 2353 + * @adapter: pointer to our adapter 2354 + * 2355 + * Assumption, Rcv spinlock has been acquired. 2356 + */ 2357 + void et131x_reset_recv(struct et131x_adapter *adapter) 2358 + { 2359 + WARN_ON(list_empty(&adapter->rx_ring.recv_list)); 2360 + } 2361 + 2362 + /** 2363 + * et131x_handle_recv_interrupt - Interrupt handler for receive processing 2364 + * @adapter: pointer to our adapter 2365 + * 2366 + * Assumption, Rcv spinlock has been acquired. 2367 + */ 2368 + void et131x_handle_recv_interrupt(struct et131x_adapter *adapter) 2369 + { 2370 + struct rfd *rfd = NULL; 2371 + u32 count = 0; 2372 + bool done = true; 2373 + 2374 + /* Process up to available RFD's */ 2375 + while (count < NUM_PACKETS_HANDLED) { 2376 + if (list_empty(&adapter->rx_ring.recv_list)) { 2377 + WARN_ON(adapter->rx_ring.num_ready_recv != 0); 2378 + done = false; 2379 + break; 2380 + } 2381 + 2382 + rfd = nic_rx_pkts(adapter); 2383 + 2384 + if (rfd == NULL) 2385 + break; 2386 + 2387 + /* Do not receive any packets until a filter has been set. 2388 + * Do not receive any packets until we have link. 2389 + * If length is zero, return the RFD in order to advance the 2390 + * Free buffer ring. 2391 + */ 2392 + if (!adapter->packet_filter || 2393 + !netif_carrier_ok(adapter->netdev) || 2394 + rfd->len == 0) 2395 + continue; 2396 + 2397 + /* Increment the number of packets we received */ 2398 + adapter->net_stats.rx_packets++; 2399 + 2400 + /* Set the status on the packet, either resources or success */ 2401 + if (adapter->rx_ring.num_ready_recv < RFD_LOW_WATER_MARK) { 2402 + dev_warn(&adapter->pdev->dev, 2403 + "RFD's are running out\n"); 2404 + } 2405 + count++; 2406 + } 2407 + 2408 + if (count == NUM_PACKETS_HANDLED || !done) { 2409 + adapter->rx_ring.unfinished_receives = true; 2410 + writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO, 2411 + &adapter->regs->global.watchdog_timer); 2412 + } else 2413 + /* Watchdog timer will disable itself if appropriate. */ 2414 + adapter->rx_ring.unfinished_receives = false; 2415 + } 2416 + 2417 + /* TX functions */ 2418 + 2419 + /** 2420 + * et131x_tx_dma_memory_alloc 2421 + * @adapter: pointer to our private adapter structure 2422 + * 2423 + * Returns 0 on success and errno on failure (as defined in errno.h). 2424 + * 2425 + * Allocates memory that will be visible both to the device and to the CPU. 2426 + * The OS will pass us packets, pointers to which we will insert in the Tx 2427 + * Descriptor queue. The device will read this queue to find the packets in 2428 + * memory. The device will update the "status" in memory each time it xmits a 2429 + * packet. 2430 + */ 2431 + int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter) 2432 + { 2433 + int desc_size = 0; 2434 + struct tx_ring *tx_ring = &adapter->tx_ring; 2435 + 2436 + /* Allocate memory for the TCB's (Transmit Control Block) */ 2437 + adapter->tx_ring.tcb_ring = 2438 + kcalloc(NUM_TCB, sizeof(struct tcb), GFP_ATOMIC | GFP_DMA); 2439 + if (!adapter->tx_ring.tcb_ring) { 2440 + dev_err(&adapter->pdev->dev, "Cannot alloc memory for TCBs\n"); 2441 + return -ENOMEM; 2442 + } 2443 + 2444 + /* Allocate enough memory for the Tx descriptor ring, and allocate 2445 + * some extra so that the ring can be aligned on a 4k boundary. 2446 + */ 2447 + desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX) + 4096 - 1; 2448 + tx_ring->tx_desc_ring = 2449 + (struct tx_desc *) pci_alloc_consistent(adapter->pdev, desc_size, 2450 + &tx_ring->tx_desc_ring_pa); 2451 + if (!adapter->tx_ring.tx_desc_ring) { 2452 + dev_err(&adapter->pdev->dev, 2453 + "Cannot alloc memory for Tx Ring\n"); 2454 + return -ENOMEM; 2455 + } 2456 + 2457 + /* Save physical address 2458 + * 2459 + * NOTE: pci_alloc_consistent(), used above to alloc DMA regions, 2460 + * ALWAYS returns SAC (32-bit) addresses. If DAC (64-bit) addresses 2461 + * are ever returned, make sure the high part is retrieved here before 2462 + * storing the adjusted address. 2463 + */ 2464 + /* Allocate memory for the Tx status block */ 2465 + tx_ring->tx_status = pci_alloc_consistent(adapter->pdev, 2466 + sizeof(u32), 2467 + &tx_ring->tx_status_pa); 2468 + if (!adapter->tx_ring.tx_status_pa) { 2469 + dev_err(&adapter->pdev->dev, 2470 + "Cannot alloc memory for Tx status block\n"); 2471 + return -ENOMEM; 2472 + } 2473 + return 0; 2474 + } 2475 + 2476 + /** 2477 + * et131x_tx_dma_memory_free - Free all memory allocated within this module 2478 + * @adapter: pointer to our private adapter structure 2479 + * 2480 + * Returns 0 on success and errno on failure (as defined in errno.h). 2481 + */ 2482 + void et131x_tx_dma_memory_free(struct et131x_adapter *adapter) 2483 + { 2484 + int desc_size = 0; 2485 + 2486 + if (adapter->tx_ring.tx_desc_ring) { 2487 + /* Free memory relating to Tx rings here */ 2488 + desc_size = (sizeof(struct tx_desc) * NUM_DESC_PER_RING_TX) 2489 + + 4096 - 1; 2490 + pci_free_consistent(adapter->pdev, 2491 + desc_size, 2492 + adapter->tx_ring.tx_desc_ring, 2493 + adapter->tx_ring.tx_desc_ring_pa); 2494 + adapter->tx_ring.tx_desc_ring = NULL; 2495 + } 2496 + 2497 + /* Free memory for the Tx status block */ 2498 + if (adapter->tx_ring.tx_status) { 2499 + pci_free_consistent(adapter->pdev, 2500 + sizeof(u32), 2501 + adapter->tx_ring.tx_status, 2502 + adapter->tx_ring.tx_status_pa); 2503 + 2504 + adapter->tx_ring.tx_status = NULL; 2505 + } 2506 + /* Free the memory for the tcb structures */ 2507 + kfree(adapter->tx_ring.tcb_ring); 2508 + } 2509 + 2510 + /** 2511 + * et131x_config_tx_dma_regs - Set up the tx dma section of the JAGCore. 2512 + * @adapter: pointer to our private adapter structure 2513 + * 2514 + * Configure the transmit engine with the ring buffers we have created 2515 + * and prepare it for use. 2516 + */ 2517 + void et131x_config_tx_dma_regs(struct et131x_adapter *adapter) 2518 + { 2519 + struct txdma_regs __iomem *txdma = &adapter->regs->txdma; 2520 + 2521 + /* Load the hardware with the start of the transmit descriptor ring. */ 2522 + writel((u32) ((u64)adapter->tx_ring.tx_desc_ring_pa >> 32), 2523 + &txdma->pr_base_hi); 2524 + writel((u32) adapter->tx_ring.tx_desc_ring_pa, 2525 + &txdma->pr_base_lo); 2526 + 2527 + /* Initialise the transmit DMA engine */ 2528 + writel(NUM_DESC_PER_RING_TX - 1, &txdma->pr_num_des); 2529 + 2530 + /* Load the completion writeback physical address */ 2531 + writel((u32)((u64)adapter->tx_ring.tx_status_pa >> 32), 2532 + &txdma->dma_wb_base_hi); 2533 + writel((u32)adapter->tx_ring.tx_status_pa, &txdma->dma_wb_base_lo); 2534 + 2535 + *adapter->tx_ring.tx_status = 0; 2536 + 2537 + writel(0, &txdma->service_request); 2538 + adapter->tx_ring.send_idx = 0; 2539 + } 2540 + 2541 + /** 2542 + * et131x_tx_dma_disable - Stop of Tx_DMA on the ET1310 2543 + * @adapter: pointer to our adapter structure 2544 + */ 2545 + void et131x_tx_dma_disable(struct et131x_adapter *adapter) 2546 + { 2547 + /* Setup the tramsmit dma configuration register */ 2548 + writel(ET_TXDMA_CSR_HALT|ET_TXDMA_SNGL_EPKT, 2549 + &adapter->regs->txdma.csr); 2550 + } 2551 + 2552 + /** 2553 + * et131x_tx_dma_enable - re-start of Tx_DMA on the ET1310. 2554 + * @adapter: pointer to our adapter structure 2555 + * 2556 + * Mainly used after a return to the D0 (full-power) state from a lower state. 2557 + */ 2558 + void et131x_tx_dma_enable(struct et131x_adapter *adapter) 2559 + { 2560 + /* Setup the transmit dma configuration register for normal 2561 + * operation 2562 + */ 2563 + writel(ET_TXDMA_SNGL_EPKT|(PARM_DMA_CACHE_DEF << ET_TXDMA_CACHE_SHIFT), 2564 + &adapter->regs->txdma.csr); 2565 + } 2566 + 2567 + /** 2568 + * et131x_init_send - Initialize send data structures 2569 + * @adapter: pointer to our private adapter structure 2570 + */ 2571 + void et131x_init_send(struct et131x_adapter *adapter) 2572 + { 2573 + struct tcb *tcb; 2574 + u32 ct; 2575 + struct tx_ring *tx_ring; 2576 + 2577 + /* Setup some convenience pointers */ 2578 + tx_ring = &adapter->tx_ring; 2579 + tcb = adapter->tx_ring.tcb_ring; 2580 + 2581 + tx_ring->tcb_qhead = tcb; 2582 + 2583 + memset(tcb, 0, sizeof(struct tcb) * NUM_TCB); 2584 + 2585 + /* Go through and set up each TCB */ 2586 + for (ct = 0; ct++ < NUM_TCB; tcb++) 2587 + /* Set the link pointer in HW TCB to the next TCB in the 2588 + * chain 2589 + */ 2590 + tcb->next = tcb + 1; 2591 + 2592 + /* Set the tail pointer */ 2593 + tcb--; 2594 + tx_ring->tcb_qtail = tcb; 2595 + tcb->next = NULL; 2596 + /* Curr send queue should now be empty */ 2597 + tx_ring->send_head = NULL; 2598 + tx_ring->send_tail = NULL; 2599 + } 2600 + 2601 + /** 2602 + * nic_send_packet - NIC specific send handler for version B silicon. 2603 + * @adapter: pointer to our adapter 2604 + * @tcb: pointer to struct tcb 2605 + * 2606 + * Returns 0 or errno. 2607 + */ 2608 + static int nic_send_packet(struct et131x_adapter *adapter, struct tcb *tcb) 2609 + { 2610 + u32 i; 2611 + struct tx_desc desc[24]; /* 24 x 16 byte */ 2612 + u32 frag = 0; 2613 + u32 thiscopy, remainder; 2614 + struct sk_buff *skb = tcb->skb; 2615 + u32 nr_frags = skb_shinfo(skb)->nr_frags + 1; 2616 + struct skb_frag_struct *frags = &skb_shinfo(skb)->frags[0]; 2617 + unsigned long flags; 2618 + struct phy_device *phydev = adapter->phydev; 2619 + 2620 + /* Part of the optimizations of this send routine restrict us to 2621 + * sending 24 fragments at a pass. In practice we should never see 2622 + * more than 5 fragments. 2623 + * 2624 + * NOTE: The older version of this function (below) can handle any 2625 + * number of fragments. If needed, we can call this function, 2626 + * although it is less efficient. 2627 + */ 2628 + if (nr_frags > 23) 2629 + return -EIO; 2630 + 2631 + memset(desc, 0, sizeof(struct tx_desc) * (nr_frags + 1)); 2632 + 2633 + for (i = 0; i < nr_frags; i++) { 2634 + /* If there is something in this element, lets get a 2635 + * descriptor from the ring and get the necessary data 2636 + */ 2637 + if (i == 0) { 2638 + /* If the fragments are smaller than a standard MTU, 2639 + * then map them to a single descriptor in the Tx 2640 + * Desc ring. However, if they're larger, as is 2641 + * possible with support for jumbo packets, then 2642 + * split them each across 2 descriptors. 2643 + * 2644 + * This will work until we determine why the hardware 2645 + * doesn't seem to like large fragments. 2646 + */ 2647 + if ((skb->len - skb->data_len) <= 1514) { 2648 + desc[frag].addr_hi = 0; 2649 + /* Low 16bits are length, high is vlan and 2650 + unused currently so zero */ 2651 + desc[frag].len_vlan = 2652 + skb->len - skb->data_len; 2653 + 2654 + /* NOTE: Here, the dma_addr_t returned from 2655 + * pci_map_single() is implicitly cast as a 2656 + * u32. Although dma_addr_t can be 2657 + * 64-bit, the address returned by 2658 + * pci_map_single() is always 32-bit 2659 + * addressable (as defined by the pci/dma 2660 + * subsystem) 2661 + */ 2662 + desc[frag++].addr_lo = 2663 + pci_map_single(adapter->pdev, 2664 + skb->data, 2665 + skb->len - 2666 + skb->data_len, 2667 + PCI_DMA_TODEVICE); 2668 + } else { 2669 + desc[frag].addr_hi = 0; 2670 + desc[frag].len_vlan = 2671 + (skb->len - skb->data_len) / 2; 2672 + 2673 + /* NOTE: Here, the dma_addr_t returned from 2674 + * pci_map_single() is implicitly cast as a 2675 + * u32. Although dma_addr_t can be 2676 + * 64-bit, the address returned by 2677 + * pci_map_single() is always 32-bit 2678 + * addressable (as defined by the pci/dma 2679 + * subsystem) 2680 + */ 2681 + desc[frag++].addr_lo = 2682 + pci_map_single(adapter->pdev, 2683 + skb->data, 2684 + ((skb->len - 2685 + skb->data_len) / 2), 2686 + PCI_DMA_TODEVICE); 2687 + desc[frag].addr_hi = 0; 2688 + 2689 + desc[frag].len_vlan = 2690 + (skb->len - skb->data_len) / 2; 2691 + 2692 + /* NOTE: Here, the dma_addr_t returned from 2693 + * pci_map_single() is implicitly cast as a 2694 + * u32. Although dma_addr_t can be 2695 + * 64-bit, the address returned by 2696 + * pci_map_single() is always 32-bit 2697 + * addressable (as defined by the pci/dma 2698 + * subsystem) 2699 + */ 2700 + desc[frag++].addr_lo = 2701 + pci_map_single(adapter->pdev, 2702 + skb->data + 2703 + ((skb->len - 2704 + skb->data_len) / 2), 2705 + ((skb->len - 2706 + skb->data_len) / 2), 2707 + PCI_DMA_TODEVICE); 2708 + } 2709 + } else { 2710 + desc[frag].addr_hi = 0; 2711 + desc[frag].len_vlan = 2712 + frags[i - 1].size; 2713 + 2714 + /* NOTE: Here, the dma_addr_t returned from 2715 + * pci_map_page() is implicitly cast as a u32. 2716 + * Although dma_addr_t can be 64-bit, the address 2717 + * returned by pci_map_page() is always 32-bit 2718 + * addressable (as defined by the pci/dma subsystem) 2719 + */ 2720 + desc[frag++].addr_lo = 2721 + pci_map_page(adapter->pdev, 2722 + frags[i - 1].page, 2723 + frags[i - 1].page_offset, 2724 + frags[i - 1].size, 2725 + PCI_DMA_TODEVICE); 2726 + } 2727 + } 2728 + 2729 + if (frag == 0) 2730 + return -EIO; 2731 + 2732 + if (phydev && phydev->speed == SPEED_1000) { 2733 + if (++adapter->tx_ring.since_irq == PARM_TX_NUM_BUFS_DEF) { 2734 + /* Last element & Interrupt flag */ 2735 + desc[frag - 1].flags = 0x5; 2736 + adapter->tx_ring.since_irq = 0; 2737 + } else { /* Last element */ 2738 + desc[frag - 1].flags = 0x1; 2739 + } 2740 + } else 2741 + desc[frag - 1].flags = 0x5; 2742 + 2743 + desc[0].flags |= 2; /* First element flag */ 2744 + 2745 + tcb->index_start = adapter->tx_ring.send_idx; 2746 + tcb->stale = 0; 2747 + 2748 + spin_lock_irqsave(&adapter->send_hw_lock, flags); 2749 + 2750 + thiscopy = NUM_DESC_PER_RING_TX - 2751 + INDEX10(adapter->tx_ring.send_idx); 2752 + 2753 + if (thiscopy >= frag) { 2754 + remainder = 0; 2755 + thiscopy = frag; 2756 + } else { 2757 + remainder = frag - thiscopy; 2758 + } 2759 + 2760 + memcpy(adapter->tx_ring.tx_desc_ring + 2761 + INDEX10(adapter->tx_ring.send_idx), desc, 2762 + sizeof(struct tx_desc) * thiscopy); 2763 + 2764 + add_10bit(&adapter->tx_ring.send_idx, thiscopy); 2765 + 2766 + if (INDEX10(adapter->tx_ring.send_idx) == 0 || 2767 + INDEX10(adapter->tx_ring.send_idx) == NUM_DESC_PER_RING_TX) { 2768 + adapter->tx_ring.send_idx &= ~ET_DMA10_MASK; 2769 + adapter->tx_ring.send_idx ^= ET_DMA10_WRAP; 2770 + } 2771 + 2772 + if (remainder) { 2773 + memcpy(adapter->tx_ring.tx_desc_ring, 2774 + desc + thiscopy, 2775 + sizeof(struct tx_desc) * remainder); 2776 + 2777 + add_10bit(&adapter->tx_ring.send_idx, remainder); 2778 + } 2779 + 2780 + if (INDEX10(adapter->tx_ring.send_idx) == 0) { 2781 + if (adapter->tx_ring.send_idx) 2782 + tcb->index = NUM_DESC_PER_RING_TX - 1; 2783 + else 2784 + tcb->index = ET_DMA10_WRAP|(NUM_DESC_PER_RING_TX - 1); 2785 + } else 2786 + tcb->index = adapter->tx_ring.send_idx - 1; 2787 + 2788 + spin_lock(&adapter->tcb_send_qlock); 2789 + 2790 + if (adapter->tx_ring.send_tail) 2791 + adapter->tx_ring.send_tail->next = tcb; 2792 + else 2793 + adapter->tx_ring.send_head = tcb; 2794 + 2795 + adapter->tx_ring.send_tail = tcb; 2796 + 2797 + WARN_ON(tcb->next != NULL); 2798 + 2799 + adapter->tx_ring.used++; 2800 + 2801 + spin_unlock(&adapter->tcb_send_qlock); 2802 + 2803 + /* Write the new write pointer back to the device. */ 2804 + writel(adapter->tx_ring.send_idx, 2805 + &adapter->regs->txdma.service_request); 2806 + 2807 + /* For Gig only, we use Tx Interrupt coalescing. Enable the software 2808 + * timer to wake us up if this packet isn't followed by N more. 2809 + */ 2810 + if (phydev && phydev->speed == SPEED_1000) { 2811 + writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO, 2812 + &adapter->regs->global.watchdog_timer); 2813 + } 2814 + spin_unlock_irqrestore(&adapter->send_hw_lock, flags); 2815 + 2816 + return 0; 2817 + } 2818 + 2819 + /** 2820 + * send_packet - Do the work to send a packet 2821 + * @skb: the packet(s) to send 2822 + * @adapter: a pointer to the device's private adapter structure 2823 + * 2824 + * Return 0 in almost all cases; non-zero value in extreme hard failure only. 2825 + * 2826 + * Assumption: Send spinlock has been acquired 2827 + */ 2828 + static int send_packet(struct sk_buff *skb, struct et131x_adapter *adapter) 2829 + { 2830 + int status; 2831 + struct tcb *tcb = NULL; 2832 + u16 *shbufva; 2833 + unsigned long flags; 2834 + 2835 + /* All packets must have at least a MAC address and a protocol type */ 2836 + if (skb->len < ETH_HLEN) 2837 + return -EIO; 2838 + 2839 + /* Get a TCB for this packet */ 2840 + spin_lock_irqsave(&adapter->tcb_ready_qlock, flags); 2841 + 2842 + tcb = adapter->tx_ring.tcb_qhead; 2843 + 2844 + if (tcb == NULL) { 2845 + spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags); 2846 + return -ENOMEM; 2847 + } 2848 + 2849 + adapter->tx_ring.tcb_qhead = tcb->next; 2850 + 2851 + if (adapter->tx_ring.tcb_qhead == NULL) 2852 + adapter->tx_ring.tcb_qtail = NULL; 2853 + 2854 + spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags); 2855 + 2856 + tcb->skb = skb; 2857 + 2858 + if (skb->data != NULL && skb->len - skb->data_len >= 6) { 2859 + shbufva = (u16 *) skb->data; 2860 + 2861 + if ((shbufva[0] == 0xffff) && 2862 + (shbufva[1] == 0xffff) && (shbufva[2] == 0xffff)) { 2863 + tcb->flags |= fMP_DEST_BROAD; 2864 + } else if ((shbufva[0] & 0x3) == 0x0001) { 2865 + tcb->flags |= fMP_DEST_MULTI; 2866 + } 2867 + } 2868 + 2869 + tcb->next = NULL; 2870 + 2871 + /* Call the NIC specific send handler. */ 2872 + status = nic_send_packet(adapter, tcb); 2873 + 2874 + if (status != 0) { 2875 + spin_lock_irqsave(&adapter->tcb_ready_qlock, flags); 2876 + 2877 + if (adapter->tx_ring.tcb_qtail) 2878 + adapter->tx_ring.tcb_qtail->next = tcb; 2879 + else 2880 + /* Apparently ready Q is empty. */ 2881 + adapter->tx_ring.tcb_qhead = tcb; 2882 + 2883 + adapter->tx_ring.tcb_qtail = tcb; 2884 + spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags); 2885 + return status; 2886 + } 2887 + WARN_ON(adapter->tx_ring.used > NUM_TCB); 2888 + return 0; 2889 + } 2890 + 2891 + /** 2892 + * et131x_send_packets - This function is called by the OS to send packets 2893 + * @skb: the packet(s) to send 2894 + * @netdev:device on which to TX the above packet(s) 2895 + * 2896 + * Return 0 in almost all cases; non-zero value in extreme hard failure only 2897 + */ 2898 + int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev) 2899 + { 2900 + int status = 0; 2901 + struct et131x_adapter *adapter = NULL; 2902 + 2903 + adapter = netdev_priv(netdev); 2904 + 2905 + /* Send these packets 2906 + * 2907 + * NOTE: The Linux Tx entry point is only given one packet at a time 2908 + * to Tx, so the PacketCount and it's array used makes no sense here 2909 + */ 2910 + 2911 + /* TCB is not available */ 2912 + if (adapter->tx_ring.used >= NUM_TCB) { 2913 + /* NOTE: If there's an error on send, no need to queue the 2914 + * packet under Linux; if we just send an error up to the 2915 + * netif layer, it will resend the skb to us. 2916 + */ 2917 + status = -ENOMEM; 2918 + } else { 2919 + /* We need to see if the link is up; if it's not, make the 2920 + * netif layer think we're good and drop the packet 2921 + */ 2922 + if ((adapter->flags & fMP_ADAPTER_FAIL_SEND_MASK) || 2923 + !netif_carrier_ok(netdev)) { 2924 + dev_kfree_skb_any(skb); 2925 + skb = NULL; 2926 + 2927 + adapter->net_stats.tx_dropped++; 2928 + } else { 2929 + status = send_packet(skb, adapter); 2930 + if (status != 0 && status != -ENOMEM) { 2931 + /* On any other error, make netif think we're 2932 + * OK and drop the packet 2933 + */ 2934 + dev_kfree_skb_any(skb); 2935 + skb = NULL; 2936 + adapter->net_stats.tx_dropped++; 2937 + } 2938 + } 2939 + } 2940 + return status; 2941 + } 2942 + 2943 + /** 2944 + * free_send_packet - Recycle a struct tcb 2945 + * @adapter: pointer to our adapter 2946 + * @tcb: pointer to struct tcb 2947 + * 2948 + * Complete the packet if necessary 2949 + * Assumption - Send spinlock has been acquired 2950 + */ 2951 + static inline void free_send_packet(struct et131x_adapter *adapter, 2952 + struct tcb *tcb) 2953 + { 2954 + unsigned long flags; 2955 + struct tx_desc *desc = NULL; 2956 + struct net_device_stats *stats = &adapter->net_stats; 2957 + 2958 + if (tcb->flags & fMP_DEST_BROAD) 2959 + atomic_inc(&adapter->stats.broadcast_pkts_xmtd); 2960 + else if (tcb->flags & fMP_DEST_MULTI) 2961 + atomic_inc(&adapter->stats.multicast_pkts_xmtd); 2962 + else 2963 + atomic_inc(&adapter->stats.unicast_pkts_xmtd); 2964 + 2965 + if (tcb->skb) { 2966 + stats->tx_bytes += tcb->skb->len; 2967 + 2968 + /* Iterate through the TX descriptors on the ring 2969 + * corresponding to this packet and umap the fragments 2970 + * they point to 2971 + */ 2972 + do { 2973 + desc = (struct tx_desc *) 2974 + (adapter->tx_ring.tx_desc_ring + 2975 + INDEX10(tcb->index_start)); 2976 + 2977 + pci_unmap_single(adapter->pdev, 2978 + desc->addr_lo, 2979 + desc->len_vlan, PCI_DMA_TODEVICE); 2980 + 2981 + add_10bit(&tcb->index_start, 1); 2982 + if (INDEX10(tcb->index_start) >= 2983 + NUM_DESC_PER_RING_TX) { 2984 + tcb->index_start &= ~ET_DMA10_MASK; 2985 + tcb->index_start ^= ET_DMA10_WRAP; 2986 + } 2987 + } while (desc != (adapter->tx_ring.tx_desc_ring + 2988 + INDEX10(tcb->index))); 2989 + 2990 + dev_kfree_skb_any(tcb->skb); 2991 + } 2992 + 2993 + memset(tcb, 0, sizeof(struct tcb)); 2994 + 2995 + /* Add the TCB to the Ready Q */ 2996 + spin_lock_irqsave(&adapter->tcb_ready_qlock, flags); 2997 + 2998 + adapter->net_stats.tx_packets++; 2999 + 3000 + if (adapter->tx_ring.tcb_qtail) 3001 + adapter->tx_ring.tcb_qtail->next = tcb; 3002 + else 3003 + /* Apparently ready Q is empty. */ 3004 + adapter->tx_ring.tcb_qhead = tcb; 3005 + 3006 + adapter->tx_ring.tcb_qtail = tcb; 3007 + 3008 + spin_unlock_irqrestore(&adapter->tcb_ready_qlock, flags); 3009 + WARN_ON(adapter->tx_ring.used < 0); 3010 + } 3011 + 3012 + /** 3013 + * et131x_free_busy_send_packets - Free and complete the stopped active sends 3014 + * @adapter: pointer to our adapter 3015 + * 3016 + * Assumption - Send spinlock has been acquired 3017 + */ 3018 + void et131x_free_busy_send_packets(struct et131x_adapter *adapter) 3019 + { 3020 + struct tcb *tcb; 3021 + unsigned long flags; 3022 + u32 freed = 0; 3023 + 3024 + /* Any packets being sent? Check the first TCB on the send list */ 3025 + spin_lock_irqsave(&adapter->tcb_send_qlock, flags); 3026 + 3027 + tcb = adapter->tx_ring.send_head; 3028 + 3029 + while (tcb != NULL && freed < NUM_TCB) { 3030 + struct tcb *next = tcb->next; 3031 + 3032 + adapter->tx_ring.send_head = next; 3033 + 3034 + if (next == NULL) 3035 + adapter->tx_ring.send_tail = NULL; 3036 + 3037 + adapter->tx_ring.used--; 3038 + 3039 + spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); 3040 + 3041 + freed++; 3042 + free_send_packet(adapter, tcb); 3043 + 3044 + spin_lock_irqsave(&adapter->tcb_send_qlock, flags); 3045 + 3046 + tcb = adapter->tx_ring.send_head; 3047 + } 3048 + 3049 + WARN_ON(freed == NUM_TCB); 3050 + 3051 + spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); 3052 + 3053 + adapter->tx_ring.used = 0; 3054 + } 3055 + 3056 + /** 3057 + * et131x_handle_send_interrupt - Interrupt handler for sending processing 3058 + * @adapter: pointer to our adapter 3059 + * 3060 + * Re-claim the send resources, complete sends and get more to send from 3061 + * the send wait queue. 3062 + * 3063 + * Assumption - Send spinlock has been acquired 3064 + */ 3065 + void et131x_handle_send_interrupt(struct et131x_adapter *adapter) 3066 + { 3067 + unsigned long flags; 3068 + u32 serviced; 3069 + struct tcb *tcb; 3070 + u32 index; 3071 + 3072 + serviced = readl(&adapter->regs->txdma.new_service_complete); 3073 + index = INDEX10(serviced); 3074 + 3075 + /* Has the ring wrapped? Process any descriptors that do not have 3076 + * the same "wrap" indicator as the current completion indicator 3077 + */ 3078 + spin_lock_irqsave(&adapter->tcb_send_qlock, flags); 3079 + 3080 + tcb = adapter->tx_ring.send_head; 3081 + 3082 + while (tcb && 3083 + ((serviced ^ tcb->index) & ET_DMA10_WRAP) && 3084 + index < INDEX10(tcb->index)) { 3085 + adapter->tx_ring.used--; 3086 + adapter->tx_ring.send_head = tcb->next; 3087 + if (tcb->next == NULL) 3088 + adapter->tx_ring.send_tail = NULL; 3089 + 3090 + spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); 3091 + free_send_packet(adapter, tcb); 3092 + spin_lock_irqsave(&adapter->tcb_send_qlock, flags); 3093 + 3094 + /* Goto the next packet */ 3095 + tcb = adapter->tx_ring.send_head; 3096 + } 3097 + while (tcb && 3098 + !((serviced ^ tcb->index) & ET_DMA10_WRAP) 3099 + && index > (tcb->index & ET_DMA10_MASK)) { 3100 + adapter->tx_ring.used--; 3101 + adapter->tx_ring.send_head = tcb->next; 3102 + if (tcb->next == NULL) 3103 + adapter->tx_ring.send_tail = NULL; 3104 + 3105 + spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); 3106 + free_send_packet(adapter, tcb); 3107 + spin_lock_irqsave(&adapter->tcb_send_qlock, flags); 3108 + 3109 + /* Goto the next packet */ 3110 + tcb = adapter->tx_ring.send_head; 3111 + } 3112 + 3113 + /* Wake up the queue when we hit a low-water mark */ 3114 + if (adapter->tx_ring.used <= NUM_TCB / 3) 3115 + netif_wake_queue(adapter->netdev); 3116 + 3117 + spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); 3118 + } 3119 + 3120 + /* ETHTOOL functions */ 3121 + 3122 + static int et131x_get_settings(struct net_device *netdev, 3123 + struct ethtool_cmd *cmd) 3124 + { 3125 + struct et131x_adapter *adapter = netdev_priv(netdev); 3126 + 3127 + return phy_ethtool_gset(adapter->phydev, cmd); 3128 + } 3129 + 3130 + static int et131x_set_settings(struct net_device *netdev, 3131 + struct ethtool_cmd *cmd) 3132 + { 3133 + struct et131x_adapter *adapter = netdev_priv(netdev); 3134 + 3135 + return phy_ethtool_sset(adapter->phydev, cmd); 3136 + } 3137 + 3138 + static int et131x_get_regs_len(struct net_device *netdev) 3139 + { 3140 + #define ET131X_REGS_LEN 256 3141 + return ET131X_REGS_LEN * sizeof(u32); 3142 + } 3143 + 3144 + static void et131x_get_regs(struct net_device *netdev, 3145 + struct ethtool_regs *regs, void *regs_data) 3146 + { 3147 + struct et131x_adapter *adapter = netdev_priv(netdev); 3148 + struct address_map __iomem *aregs = adapter->regs; 3149 + u32 *regs_buff = regs_data; 3150 + u32 num = 0; 3151 + 3152 + memset(regs_data, 0, et131x_get_regs_len(netdev)); 3153 + 3154 + regs->version = (1 << 24) | (adapter->pdev->revision << 16) | 3155 + adapter->pdev->device; 3156 + 3157 + /* PHY regs */ 3158 + et131x_mii_read(adapter, MII_BMCR, (u16 *)&regs_buff[num++]); 3159 + et131x_mii_read(adapter, MII_BMSR, (u16 *)&regs_buff[num++]); 3160 + et131x_mii_read(adapter, MII_PHYSID1, (u16 *)&regs_buff[num++]); 3161 + et131x_mii_read(adapter, MII_PHYSID2, (u16 *)&regs_buff[num++]); 3162 + et131x_mii_read(adapter, MII_ADVERTISE, (u16 *)&regs_buff[num++]); 3163 + et131x_mii_read(adapter, MII_LPA, (u16 *)&regs_buff[num++]); 3164 + et131x_mii_read(adapter, MII_EXPANSION, (u16 *)&regs_buff[num++]); 3165 + /* Autoneg next page transmit reg */ 3166 + et131x_mii_read(adapter, 0x07, (u16 *)&regs_buff[num++]); 3167 + /* Link partner next page reg */ 3168 + et131x_mii_read(adapter, 0x08, (u16 *)&regs_buff[num++]); 3169 + et131x_mii_read(adapter, MII_CTRL1000, (u16 *)&regs_buff[num++]); 3170 + et131x_mii_read(adapter, MII_STAT1000, (u16 *)&regs_buff[num++]); 3171 + et131x_mii_read(adapter, MII_ESTATUS, (u16 *)&regs_buff[num++]); 3172 + et131x_mii_read(adapter, PHY_INDEX_REG, (u16 *)&regs_buff[num++]); 3173 + et131x_mii_read(adapter, PHY_DATA_REG, (u16 *)&regs_buff[num++]); 3174 + et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, 3175 + (u16 *)&regs_buff[num++]); 3176 + et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL, 3177 + (u16 *)&regs_buff[num++]); 3178 + et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL+1, 3179 + (u16 *)&regs_buff[num++]); 3180 + et131x_mii_read(adapter, PHY_REGISTER_MGMT_CONTROL, 3181 + (u16 *)&regs_buff[num++]); 3182 + et131x_mii_read(adapter, PHY_CONFIG, (u16 *)&regs_buff[num++]); 3183 + et131x_mii_read(adapter, PHY_PHY_CONTROL, (u16 *)&regs_buff[num++]); 3184 + et131x_mii_read(adapter, PHY_INTERRUPT_MASK, (u16 *)&regs_buff[num++]); 3185 + et131x_mii_read(adapter, PHY_INTERRUPT_STATUS, 3186 + (u16 *)&regs_buff[num++]); 3187 + et131x_mii_read(adapter, PHY_PHY_STATUS, (u16 *)&regs_buff[num++]); 3188 + et131x_mii_read(adapter, PHY_LED_1, (u16 *)&regs_buff[num++]); 3189 + et131x_mii_read(adapter, PHY_LED_2, (u16 *)&regs_buff[num++]); 3190 + 3191 + /* Global regs */ 3192 + regs_buff[num++] = readl(&aregs->global.txq_start_addr); 3193 + regs_buff[num++] = readl(&aregs->global.txq_end_addr); 3194 + regs_buff[num++] = readl(&aregs->global.rxq_start_addr); 3195 + regs_buff[num++] = readl(&aregs->global.rxq_end_addr); 3196 + regs_buff[num++] = readl(&aregs->global.pm_csr); 3197 + regs_buff[num++] = adapter->stats.interrupt_status; 3198 + regs_buff[num++] = readl(&aregs->global.int_mask); 3199 + regs_buff[num++] = readl(&aregs->global.int_alias_clr_en); 3200 + regs_buff[num++] = readl(&aregs->global.int_status_alias); 3201 + regs_buff[num++] = readl(&aregs->global.sw_reset); 3202 + regs_buff[num++] = readl(&aregs->global.slv_timer); 3203 + regs_buff[num++] = readl(&aregs->global.msi_config); 3204 + regs_buff[num++] = readl(&aregs->global.loopback); 3205 + regs_buff[num++] = readl(&aregs->global.watchdog_timer); 3206 + 3207 + /* TXDMA regs */ 3208 + regs_buff[num++] = readl(&aregs->txdma.csr); 3209 + regs_buff[num++] = readl(&aregs->txdma.pr_base_hi); 3210 + regs_buff[num++] = readl(&aregs->txdma.pr_base_lo); 3211 + regs_buff[num++] = readl(&aregs->txdma.pr_num_des); 3212 + regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr); 3213 + regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr_ext); 3214 + regs_buff[num++] = readl(&aregs->txdma.txq_rd_addr); 3215 + regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_hi); 3216 + regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_lo); 3217 + regs_buff[num++] = readl(&aregs->txdma.service_request); 3218 + regs_buff[num++] = readl(&aregs->txdma.service_complete); 3219 + regs_buff[num++] = readl(&aregs->txdma.cache_rd_index); 3220 + regs_buff[num++] = readl(&aregs->txdma.cache_wr_index); 3221 + regs_buff[num++] = readl(&aregs->txdma.tx_dma_error); 3222 + regs_buff[num++] = readl(&aregs->txdma.desc_abort_cnt); 3223 + regs_buff[num++] = readl(&aregs->txdma.payload_abort_cnt); 3224 + regs_buff[num++] = readl(&aregs->txdma.writeback_abort_cnt); 3225 + regs_buff[num++] = readl(&aregs->txdma.desc_timeout_cnt); 3226 + regs_buff[num++] = readl(&aregs->txdma.payload_timeout_cnt); 3227 + regs_buff[num++] = readl(&aregs->txdma.writeback_timeout_cnt); 3228 + regs_buff[num++] = readl(&aregs->txdma.desc_error_cnt); 3229 + regs_buff[num++] = readl(&aregs->txdma.payload_error_cnt); 3230 + regs_buff[num++] = readl(&aregs->txdma.writeback_error_cnt); 3231 + regs_buff[num++] = readl(&aregs->txdma.dropped_tlp_cnt); 3232 + regs_buff[num++] = readl(&aregs->txdma.new_service_complete); 3233 + regs_buff[num++] = readl(&aregs->txdma.ethernet_packet_cnt); 3234 + 3235 + /* RXDMA regs */ 3236 + regs_buff[num++] = readl(&aregs->rxdma.csr); 3237 + regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_hi); 3238 + regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_lo); 3239 + regs_buff[num++] = readl(&aregs->rxdma.num_pkt_done); 3240 + regs_buff[num++] = readl(&aregs->rxdma.max_pkt_time); 3241 + regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr); 3242 + regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr_ext); 3243 + regs_buff[num++] = readl(&aregs->rxdma.rxq_wr_addr); 3244 + regs_buff[num++] = readl(&aregs->rxdma.psr_base_hi); 3245 + regs_buff[num++] = readl(&aregs->rxdma.psr_base_lo); 3246 + regs_buff[num++] = readl(&aregs->rxdma.psr_num_des); 3247 + regs_buff[num++] = readl(&aregs->rxdma.psr_avail_offset); 3248 + regs_buff[num++] = readl(&aregs->rxdma.psr_full_offset); 3249 + regs_buff[num++] = readl(&aregs->rxdma.psr_access_index); 3250 + regs_buff[num++] = readl(&aregs->rxdma.psr_min_des); 3251 + regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_lo); 3252 + regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_hi); 3253 + regs_buff[num++] = readl(&aregs->rxdma.fbr0_num_des); 3254 + regs_buff[num++] = readl(&aregs->rxdma.fbr0_avail_offset); 3255 + regs_buff[num++] = readl(&aregs->rxdma.fbr0_full_offset); 3256 + regs_buff[num++] = readl(&aregs->rxdma.fbr0_rd_index); 3257 + regs_buff[num++] = readl(&aregs->rxdma.fbr0_min_des); 3258 + regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_lo); 3259 + regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_hi); 3260 + regs_buff[num++] = readl(&aregs->rxdma.fbr1_num_des); 3261 + regs_buff[num++] = readl(&aregs->rxdma.fbr1_avail_offset); 3262 + regs_buff[num++] = readl(&aregs->rxdma.fbr1_full_offset); 3263 + regs_buff[num++] = readl(&aregs->rxdma.fbr1_rd_index); 3264 + regs_buff[num++] = readl(&aregs->rxdma.fbr1_min_des); 3265 + } 3266 + 3267 + #define ET131X_DRVINFO_LEN 32 /* value from ethtool.h */ 3268 + static void et131x_get_drvinfo(struct net_device *netdev, 3269 + struct ethtool_drvinfo *info) 3270 + { 3271 + struct et131x_adapter *adapter = netdev_priv(netdev); 3272 + 3273 + strncpy(info->driver, DRIVER_NAME, ET131X_DRVINFO_LEN); 3274 + strncpy(info->version, DRIVER_VERSION, ET131X_DRVINFO_LEN); 3275 + strncpy(info->bus_info, pci_name(adapter->pdev), ET131X_DRVINFO_LEN); 3276 + } 3277 + 3278 + static struct ethtool_ops et131x_ethtool_ops = { 3279 + .get_settings = et131x_get_settings, 3280 + .set_settings = et131x_set_settings, 3281 + .get_drvinfo = et131x_get_drvinfo, 3282 + .get_regs_len = et131x_get_regs_len, 3283 + .get_regs = et131x_get_regs, 3284 + .get_link = ethtool_op_get_link, 3285 + }; 3286 + 3287 + void et131x_set_ethtool_ops(struct net_device *netdev) 3288 + { 3289 + SET_ETHTOOL_OPS(netdev, &et131x_ethtool_ops); 3290 + } 3291 + 3292 + /* PCI functions */ 3293 + 3294 + /** 3295 + * et131x_hwaddr_init - set up the MAC Address on the ET1310 3296 + * @adapter: pointer to our private adapter structure 3297 + */ 3298 + void et131x_hwaddr_init(struct et131x_adapter *adapter) 3299 + { 3300 + /* If have our default mac from init and no mac address from 3301 + * EEPROM then we need to generate the last octet and set it on the 3302 + * device 3303 + */ 3304 + if (adapter->rom_addr[0] == 0x00 && 3305 + adapter->rom_addr[1] == 0x00 && 3306 + adapter->rom_addr[2] == 0x00 && 3307 + adapter->rom_addr[3] == 0x00 && 3308 + adapter->rom_addr[4] == 0x00 && 3309 + adapter->rom_addr[5] == 0x00) { 3310 + /* 3311 + * We need to randomly generate the last octet so we 3312 + * decrease our chances of setting the mac address to 3313 + * same as another one of our cards in the system 3314 + */ 3315 + get_random_bytes(&adapter->addr[5], 1); 3316 + /* 3317 + * We have the default value in the register we are 3318 + * working with so we need to copy the current 3319 + * address into the permanent address 3320 + */ 3321 + memcpy(adapter->rom_addr, 3322 + adapter->addr, ETH_ALEN); 3323 + } else { 3324 + /* We do not have an override address, so set the 3325 + * current address to the permanent address and add 3326 + * it to the device 3327 + */ 3328 + memcpy(adapter->addr, 3329 + adapter->rom_addr, ETH_ALEN); 3330 + } 3331 + } 3332 + 3333 + /** 3334 + * et131x_pci_init - initial PCI setup 3335 + * @adapter: pointer to our private adapter structure 3336 + * @pdev: our PCI device 3337 + * 3338 + * Perform the initial setup of PCI registers and if possible initialise 3339 + * the MAC address. At this point the I/O registers have yet to be mapped 3340 + */ 3341 + static int et131x_pci_init(struct et131x_adapter *adapter, 3342 + struct pci_dev *pdev) 3343 + { 3344 + int i; 3345 + u8 max_payload; 3346 + u8 read_size_reg; 3347 + 3348 + if (et131x_init_eeprom(adapter) < 0) 3349 + return -EIO; 3350 + 3351 + /* Let's set up the PORT LOGIC Register. First we need to know what 3352 + * the max_payload_size is 3353 + */ 3354 + if (pci_read_config_byte(pdev, ET1310_PCI_MAX_PYLD, &max_payload)) { 3355 + dev_err(&pdev->dev, 3356 + "Could not read PCI config space for Max Payload Size\n"); 3357 + return -EIO; 3358 + } 3359 + 3360 + /* Program the Ack/Nak latency and replay timers */ 3361 + max_payload &= 0x07; /* Only the lower 3 bits are valid */ 3362 + 3363 + if (max_payload < 2) { 3364 + static const u16 acknak[2] = { 0x76, 0xD0 }; 3365 + static const u16 replay[2] = { 0x1E0, 0x2ED }; 3366 + 3367 + if (pci_write_config_word(pdev, ET1310_PCI_ACK_NACK, 3368 + acknak[max_payload])) { 3369 + dev_err(&pdev->dev, 3370 + "Could not write PCI config space for ACK/NAK\n"); 3371 + return -EIO; 3372 + } 3373 + if (pci_write_config_word(pdev, ET1310_PCI_REPLAY, 3374 + replay[max_payload])) { 3375 + dev_err(&pdev->dev, 3376 + "Could not write PCI config space for Replay Timer\n"); 3377 + return -EIO; 3378 + } 3379 + } 3380 + 3381 + /* l0s and l1 latency timers. We are using default values. 3382 + * Representing 001 for L0s and 010 for L1 3383 + */ 3384 + if (pci_write_config_byte(pdev, ET1310_PCI_L0L1LATENCY, 0x11)) { 3385 + dev_err(&pdev->dev, 3386 + "Could not write PCI config space for Latency Timers\n"); 3387 + return -EIO; 3388 + } 3389 + 3390 + /* Change the max read size to 2k */ 3391 + if (pci_read_config_byte(pdev, 0x51, &read_size_reg)) { 3392 + dev_err(&pdev->dev, 3393 + "Could not read PCI config space for Max read size\n"); 3394 + return -EIO; 3395 + } 3396 + 3397 + read_size_reg &= 0x8f; 3398 + read_size_reg |= 0x40; 3399 + 3400 + if (pci_write_config_byte(pdev, 0x51, read_size_reg)) { 3401 + dev_err(&pdev->dev, 3402 + "Could not write PCI config space for Max read size\n"); 3403 + return -EIO; 3404 + } 3405 + 3406 + /* Get MAC address from config space if an eeprom exists, otherwise 3407 + * the MAC address there will not be valid 3408 + */ 3409 + if (!adapter->has_eeprom) { 3410 + et131x_hwaddr_init(adapter); 3411 + return 0; 3412 + } 3413 + 3414 + for (i = 0; i < ETH_ALEN; i++) { 3415 + if (pci_read_config_byte(pdev, ET1310_PCI_MAC_ADDRESS + i, 3416 + adapter->rom_addr + i)) { 3417 + dev_err(&pdev->dev, "Could not read PCI config space for MAC address\n"); 3418 + return -EIO; 3419 + } 3420 + } 3421 + memcpy(adapter->addr, adapter->rom_addr, ETH_ALEN); 3422 + return 0; 3423 + } 3424 + 3425 + /** 3426 + * et131x_error_timer_handler 3427 + * @data: timer-specific variable; here a pointer to our adapter structure 3428 + * 3429 + * The routine called when the error timer expires, to track the number of 3430 + * recurring errors. 3431 + */ 3432 + void et131x_error_timer_handler(unsigned long data) 3433 + { 3434 + struct et131x_adapter *adapter = (struct et131x_adapter *) data; 3435 + struct phy_device *phydev = adapter->phydev; 3436 + 3437 + if (et1310_in_phy_coma(adapter)) { 3438 + /* Bring the device immediately out of coma, to 3439 + * prevent it from sleeping indefinitely, this 3440 + * mechanism could be improved! */ 3441 + et1310_disable_phy_coma(adapter); 3442 + adapter->boot_coma = 20; 3443 + } else { 3444 + et1310_update_macstat_host_counters(adapter); 3445 + } 3446 + 3447 + if (!phydev->link && adapter->boot_coma < 11) 3448 + adapter->boot_coma++; 3449 + 3450 + if (adapter->boot_coma == 10) { 3451 + if (!phydev->link) { 3452 + if (!et1310_in_phy_coma(adapter)) { 3453 + /* NOTE - This was originally a 'sync with 3454 + * interrupt'. How to do that under Linux? 3455 + */ 3456 + et131x_enable_interrupts(adapter); 3457 + et1310_enable_phy_coma(adapter); 3458 + } 3459 + } 3460 + } 3461 + 3462 + /* This is a periodic timer, so reschedule */ 3463 + mod_timer(&adapter->error_timer, jiffies + 3464 + TX_ERROR_PERIOD * HZ / 1000); 3465 + } 3466 + 3467 + /** 3468 + * et131x_configure_global_regs - configure JAGCore global regs 3469 + * @adapter: pointer to our adapter structure 3470 + * 3471 + * Used to configure the global registers on the JAGCore 3472 + */ 3473 + void et131x_configure_global_regs(struct et131x_adapter *adapter) 3474 + { 3475 + struct global_regs __iomem *regs = &adapter->regs->global; 3476 + 3477 + writel(0, &regs->rxq_start_addr); 3478 + writel(INTERNAL_MEM_SIZE - 1, &regs->txq_end_addr); 3479 + 3480 + if (adapter->registry_jumbo_packet < 2048) { 3481 + /* Tx / RxDMA and Tx/Rx MAC interfaces have a 1k word 3482 + * block of RAM that the driver can split between Tx 3483 + * and Rx as it desires. Our default is to split it 3484 + * 50/50: 3485 + */ 3486 + writel(PARM_RX_MEM_END_DEF, &regs->rxq_end_addr); 3487 + writel(PARM_RX_MEM_END_DEF + 1, &regs->txq_start_addr); 3488 + } else if (adapter->registry_jumbo_packet < 8192) { 3489 + /* For jumbo packets > 2k but < 8k, split 50-50. */ 3490 + writel(INTERNAL_MEM_RX_OFFSET, &regs->rxq_end_addr); 3491 + writel(INTERNAL_MEM_RX_OFFSET + 1, &regs->txq_start_addr); 3492 + } else { 3493 + /* 9216 is the only packet size greater than 8k that 3494 + * is available. The Tx buffer has to be big enough 3495 + * for one whole packet on the Tx side. We'll make 3496 + * the Tx 9408, and give the rest to Rx 3497 + */ 3498 + writel(0x01b3, &regs->rxq_end_addr); 3499 + writel(0x01b4, &regs->txq_start_addr); 3500 + } 3501 + 3502 + /* Initialize the loopback register. Disable all loopbacks. */ 3503 + writel(0, &regs->loopback); 3504 + 3505 + /* MSI Register */ 3506 + writel(0, &regs->msi_config); 3507 + 3508 + /* By default, disable the watchdog timer. It will be enabled when 3509 + * a packet is queued. 3510 + */ 3511 + writel(0, &regs->watchdog_timer); 3512 + } 3513 + 3514 + /** 3515 + * et131x_adapter_setup - Set the adapter up as per cassini+ documentation 3516 + * @adapter: pointer to our private adapter structure 3517 + * 3518 + * Returns 0 on success, errno on failure (as defined in errno.h) 3519 + */ 3520 + void et131x_adapter_setup(struct et131x_adapter *adapter) 3521 + { 3522 + /* Configure the JAGCore */ 3523 + et131x_configure_global_regs(adapter); 3524 + 3525 + et1310_config_mac_regs1(adapter); 3526 + 3527 + /* Configure the MMC registers */ 3528 + /* All we need to do is initialize the Memory Control Register */ 3529 + writel(ET_MMC_ENABLE, &adapter->regs->mmc.mmc_ctrl); 3530 + 3531 + et1310_config_rxmac_regs(adapter); 3532 + et1310_config_txmac_regs(adapter); 3533 + 3534 + et131x_config_rx_dma_regs(adapter); 3535 + et131x_config_tx_dma_regs(adapter); 3536 + 3537 + et1310_config_macstat_regs(adapter); 3538 + 3539 + et1310_phy_power_down(adapter, 0); 3540 + et131x_xcvr_init(adapter); 3541 + } 3542 + 3543 + /** 3544 + * et131x_soft_reset - Issue a soft reset to the hardware, complete for ET1310 3545 + * @adapter: pointer to our private adapter structure 3546 + */ 3547 + void et131x_soft_reset(struct et131x_adapter *adapter) 3548 + { 3549 + /* Disable MAC Core */ 3550 + writel(0xc00f0000, &adapter->regs->mac.cfg1); 3551 + 3552 + /* Set everything to a reset value */ 3553 + writel(0x7F, &adapter->regs->global.sw_reset); 3554 + writel(0x000f0000, &adapter->regs->mac.cfg1); 3555 + writel(0x00000000, &adapter->regs->mac.cfg1); 3556 + } 3557 + 3558 + /** 3559 + * et131x_align_allocated_memory - Align allocated memory on a given boundary 3560 + * @adapter: pointer to our adapter structure 3561 + * @phys_addr: pointer to Physical address 3562 + * @offset: pointer to the offset variable 3563 + * @mask: correct mask 3564 + */ 3565 + void et131x_align_allocated_memory(struct et131x_adapter *adapter, 3566 + uint64_t *phys_addr, 3567 + uint64_t *offset, uint64_t mask) 3568 + { 3569 + uint64_t new_addr; 3570 + 3571 + *offset = 0; 3572 + 3573 + new_addr = *phys_addr & ~mask; 3574 + 3575 + if (new_addr != *phys_addr) { 3576 + /* Move to next aligned block */ 3577 + new_addr += mask + 1; 3578 + /* Return offset for adjusting virt addr */ 3579 + *offset = new_addr - *phys_addr; 3580 + /* Return new physical address */ 3581 + *phys_addr = new_addr; 3582 + } 3583 + } 3584 + 3585 + /** 3586 + * et131x_adapter_memory_alloc 3587 + * @adapter: pointer to our private adapter structure 3588 + * 3589 + * Returns 0 on success, errno on failure (as defined in errno.h). 3590 + * 3591 + * Allocate all the memory blocks for send, receive and others. 3592 + */ 3593 + int et131x_adapter_memory_alloc(struct et131x_adapter *adapter) 3594 + { 3595 + int status; 3596 + 3597 + /* Allocate memory for the Tx Ring */ 3598 + status = et131x_tx_dma_memory_alloc(adapter); 3599 + if (status != 0) { 3600 + dev_err(&adapter->pdev->dev, 3601 + "et131x_tx_dma_memory_alloc FAILED\n"); 3602 + return status; 3603 + } 3604 + /* Receive buffer memory allocation */ 3605 + status = et131x_rx_dma_memory_alloc(adapter); 3606 + if (status != 0) { 3607 + dev_err(&adapter->pdev->dev, 3608 + "et131x_rx_dma_memory_alloc FAILED\n"); 3609 + et131x_tx_dma_memory_free(adapter); 3610 + return status; 3611 + } 3612 + 3613 + /* Init receive data structures */ 3614 + status = et131x_init_recv(adapter); 3615 + if (status != 0) { 3616 + dev_err(&adapter->pdev->dev, 3617 + "et131x_init_recv FAILED\n"); 3618 + et131x_tx_dma_memory_free(adapter); 3619 + et131x_rx_dma_memory_free(adapter); 3620 + } 3621 + return status; 3622 + } 3623 + 3624 + /** 3625 + * et131x_adapter_memory_free - Free all memory allocated for use by Tx & Rx 3626 + * @adapter: pointer to our private adapter structure 3627 + */ 3628 + void et131x_adapter_memory_free(struct et131x_adapter *adapter) 3629 + { 3630 + /* Free DMA memory */ 3631 + et131x_tx_dma_memory_free(adapter); 3632 + et131x_rx_dma_memory_free(adapter); 3633 + } 3634 + 3635 + static void et131x_adjust_link(struct net_device *netdev) 3636 + { 3637 + struct et131x_adapter *adapter = netdev_priv(netdev); 3638 + struct phy_device *phydev = adapter->phydev; 3639 + 3640 + if (netif_carrier_ok(netdev)) { 3641 + adapter->boot_coma = 20; 3642 + 3643 + if (phydev && phydev->speed == SPEED_10) { 3644 + /* 3645 + * NOTE - Is there a way to query this without 3646 + * TruePHY? 3647 + * && TRU_QueryCoreType(adapter->hTruePhy, 0)== 3648 + * EMI_TRUEPHY_A13O) { 3649 + */ 3650 + u16 register18; 3651 + 3652 + et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, 3653 + &register18); 3654 + et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG, 3655 + register18 | 0x4); 3656 + et131x_mii_write(adapter, PHY_INDEX_REG, 3657 + register18 | 0x8402); 3658 + et131x_mii_write(adapter, PHY_DATA_REG, 3659 + register18 | 511); 3660 + et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG, 3661 + register18); 3662 + } 3663 + 3664 + et1310_config_flow_control(adapter); 3665 + 3666 + if (phydev && phydev->speed == SPEED_1000 && 3667 + adapter->registry_jumbo_packet > 2048) { 3668 + u16 reg; 3669 + 3670 + et131x_mii_read(adapter, PHY_CONFIG, &reg); 3671 + reg &= ~ET_PHY_CONFIG_TX_FIFO_DEPTH; 3672 + reg |= ET_PHY_CONFIG_FIFO_DEPTH_32; 3673 + et131x_mii_write(adapter, PHY_CONFIG, reg); 3674 + } 3675 + 3676 + et131x_set_rx_dma_timer(adapter); 3677 + et1310_config_mac_regs2(adapter); 3678 + } 3679 + 3680 + if (phydev && phydev->link != adapter->link) { 3681 + /* 3682 + * Check to see if we are in coma mode and if 3683 + * so, disable it because we will not be able 3684 + * to read PHY values until we are out. 3685 + */ 3686 + if (et1310_in_phy_coma(adapter)) 3687 + et1310_disable_phy_coma(adapter); 3688 + 3689 + if (phydev->link) { 3690 + adapter->boot_coma = 20; 3691 + } else { 3692 + dev_warn(&adapter->pdev->dev, 3693 + "Link down - cable problem ?\n"); 3694 + adapter->boot_coma = 0; 3695 + 3696 + if (phydev->speed == SPEED_10) { 3697 + /* NOTE - Is there a way to query this without 3698 + * TruePHY? 3699 + * && TRU_QueryCoreType(adapter->hTruePhy, 0) == 3700 + * EMI_TRUEPHY_A13O) 3701 + */ 3702 + u16 register18; 3703 + 3704 + et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, 3705 + &register18); 3706 + et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG, 3707 + register18 | 0x4); 3708 + et131x_mii_write(adapter, PHY_INDEX_REG, 3709 + register18 | 0x8402); 3710 + et131x_mii_write(adapter, PHY_DATA_REG, 3711 + register18 | 511); 3712 + et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG, 3713 + register18); 3714 + } 3715 + 3716 + /* Free the packets being actively sent & stopped */ 3717 + et131x_free_busy_send_packets(adapter); 3718 + 3719 + /* Re-initialize the send structures */ 3720 + et131x_init_send(adapter); 3721 + 3722 + /* Reset the RFD list and re-start RU */ 3723 + et131x_reset_recv(adapter); 3724 + 3725 + /* 3726 + * Bring the device back to the state it was during 3727 + * init prior to autonegotiation being complete. This 3728 + * way, when we get the auto-neg complete interrupt, 3729 + * we can complete init by calling config_mac_regs2. 3730 + */ 3731 + et131x_soft_reset(adapter); 3732 + 3733 + /* Setup ET1310 as per the documentation */ 3734 + et131x_adapter_setup(adapter); 3735 + 3736 + /* perform reset of tx/rx */ 3737 + et131x_disable_txrx(netdev); 3738 + et131x_enable_txrx(netdev); 3739 + } 3740 + 3741 + adapter->link = phydev->link; 3742 + 3743 + phy_print_status(phydev); 3744 + } 3745 + } 3746 + 3747 + static int et131x_mii_probe(struct net_device *netdev) 3748 + { 3749 + struct et131x_adapter *adapter = netdev_priv(netdev); 3750 + struct phy_device *phydev = NULL; 3751 + 3752 + phydev = phy_find_first(adapter->mii_bus); 3753 + if (!phydev) { 3754 + dev_err(&adapter->pdev->dev, "no PHY found\n"); 3755 + return -ENODEV; 3756 + } 3757 + 3758 + phydev = phy_connect(netdev, dev_name(&phydev->dev), 3759 + &et131x_adjust_link, 0, PHY_INTERFACE_MODE_MII); 3760 + 3761 + if (IS_ERR(phydev)) { 3762 + dev_err(&adapter->pdev->dev, "Could not attach to PHY\n"); 3763 + return PTR_ERR(phydev); 3764 + } 3765 + 3766 + phydev->supported &= (SUPPORTED_10baseT_Half 3767 + | SUPPORTED_10baseT_Full 3768 + | SUPPORTED_100baseT_Half 3769 + | SUPPORTED_100baseT_Full 3770 + | SUPPORTED_Autoneg 3771 + | SUPPORTED_MII 3772 + | SUPPORTED_TP); 3773 + 3774 + if (adapter->pdev->device != ET131X_PCI_DEVICE_ID_FAST) 3775 + phydev->supported |= SUPPORTED_1000baseT_Full; 3776 + 3777 + phydev->advertising = phydev->supported; 3778 + adapter->phydev = phydev; 3779 + 3780 + dev_info(&adapter->pdev->dev, "attached PHY driver [%s] " 3781 + "(mii_bus:phy_addr=%s)\n", 3782 + phydev->drv->name, dev_name(&phydev->dev)); 3783 + 3784 + return 0; 3785 + } 3786 + 3787 + /** 3788 + * et131x_adapter_init 3789 + * @adapter: pointer to the private adapter struct 3790 + * @pdev: pointer to the PCI device 3791 + * 3792 + * Initialize the data structures for the et131x_adapter object and link 3793 + * them together with the platform provided device structures. 3794 + */ 3795 + static struct et131x_adapter *et131x_adapter_init(struct net_device *netdev, 3796 + struct pci_dev *pdev) 3797 + { 3798 + static const u8 default_mac[] = { 0x00, 0x05, 0x3d, 0x00, 0x02, 0x00 }; 3799 + 3800 + struct et131x_adapter *adapter; 3801 + 3802 + /* Allocate private adapter struct and copy in relevant information */ 3803 + adapter = netdev_priv(netdev); 3804 + adapter->pdev = pci_dev_get(pdev); 3805 + adapter->netdev = netdev; 3806 + 3807 + /* Do the same for the netdev struct */ 3808 + netdev->irq = pdev->irq; 3809 + netdev->base_addr = pci_resource_start(pdev, 0); 3810 + 3811 + /* Initialize spinlocks here */ 3812 + spin_lock_init(&adapter->lock); 3813 + spin_lock_init(&adapter->tcb_send_qlock); 3814 + spin_lock_init(&adapter->tcb_ready_qlock); 3815 + spin_lock_init(&adapter->send_hw_lock); 3816 + spin_lock_init(&adapter->rcv_lock); 3817 + spin_lock_init(&adapter->rcv_pend_lock); 3818 + spin_lock_init(&adapter->fbr_lock); 3819 + spin_lock_init(&adapter->phy_lock); 3820 + 3821 + adapter->registry_jumbo_packet = 1514; /* 1514-9216 */ 3822 + 3823 + /* Set the MAC address to a default */ 3824 + memcpy(adapter->addr, default_mac, ETH_ALEN); 3825 + 3826 + return adapter; 3827 + } 3828 + 3829 + /** 3830 + * et131x_pci_setup - Perform device initialization 3831 + * @pdev: a pointer to the device's pci_dev structure 3832 + * @ent: this device's entry in the pci_device_id table 3833 + * 3834 + * Returns 0 on success, errno on failure (as defined in errno.h) 3835 + * 3836 + * Registered in the pci_driver structure, this function is called when the 3837 + * PCI subsystem finds a new PCI device which matches the information 3838 + * contained in the pci_device_id table. This routine is the equivalent to 3839 + * a device insertion routine. 3840 + */ 3841 + static int __devinit et131x_pci_setup(struct pci_dev *pdev, 3842 + const struct pci_device_id *ent) 3843 + { 3844 + int result; 3845 + int pm_cap; 3846 + struct net_device *netdev; 3847 + struct et131x_adapter *adapter; 3848 + int ii; 3849 + 3850 + result = pci_enable_device(pdev); 3851 + if (result) { 3852 + dev_err(&pdev->dev, "pci_enable_device() failed\n"); 3853 + goto err_out; 3854 + } 3855 + 3856 + /* Perform some basic PCI checks */ 3857 + if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 3858 + dev_err(&pdev->dev, "Can't find PCI device's base address\n"); 3859 + goto err_disable; 3860 + } 3861 + 3862 + if (pci_request_regions(pdev, DRIVER_NAME)) { 3863 + dev_err(&pdev->dev, "Can't get PCI resources\n"); 3864 + goto err_disable; 3865 + } 3866 + 3867 + pci_set_master(pdev); 3868 + 3869 + /* Query PCI for Power Mgmt Capabilities 3870 + * 3871 + * NOTE: Now reading PowerMgmt in another location; is this still 3872 + * needed? 3873 + */ 3874 + pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); 3875 + if (!pm_cap) { 3876 + dev_err(&pdev->dev, 3877 + "Cannot find Power Management capabilities\n"); 3878 + result = -EIO; 3879 + goto err_release_res; 3880 + } 3881 + 3882 + /* Check the DMA addressing support of this device */ 3883 + if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { 3884 + result = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 3885 + if (result) { 3886 + dev_err(&pdev->dev, 3887 + "Unable to obtain 64 bit DMA for consistent allocations\n"); 3888 + goto err_release_res; 3889 + } 3890 + } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { 3891 + result = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 3892 + if (result) { 3893 + dev_err(&pdev->dev, 3894 + "Unable to obtain 32 bit DMA for consistent allocations\n"); 3895 + goto err_release_res; 3896 + } 3897 + } else { 3898 + dev_err(&pdev->dev, "No usable DMA addressing method\n"); 3899 + result = -EIO; 3900 + goto err_release_res; 3901 + } 3902 + 3903 + /* Allocate netdev and private adapter structs */ 3904 + netdev = et131x_device_alloc(); 3905 + if (!netdev) { 3906 + dev_err(&pdev->dev, "Couldn't alloc netdev struct\n"); 3907 + result = -ENOMEM; 3908 + goto err_release_res; 3909 + } 3910 + 3911 + SET_NETDEV_DEV(netdev, &pdev->dev); 3912 + et131x_set_ethtool_ops(netdev); 3913 + 3914 + adapter = et131x_adapter_init(netdev, pdev); 3915 + 3916 + /* Initialise the PCI setup for the device */ 3917 + et131x_pci_init(adapter, pdev); 3918 + 3919 + /* Map the bus-relative registers to system virtual memory */ 3920 + adapter->regs = pci_ioremap_bar(pdev, 0); 3921 + if (!adapter->regs) { 3922 + dev_err(&pdev->dev, "Cannot map device registers\n"); 3923 + result = -ENOMEM; 3924 + goto err_free_dev; 3925 + } 3926 + 3927 + /* If Phy COMA mode was enabled when we went down, disable it here. */ 3928 + writel(ET_PMCSR_INIT, &adapter->regs->global.pm_csr); 3929 + 3930 + /* Issue a global reset to the et1310 */ 3931 + et131x_soft_reset(adapter); 3932 + 3933 + /* Disable all interrupts (paranoid) */ 3934 + et131x_disable_interrupts(adapter); 3935 + 3936 + /* Allocate DMA memory */ 3937 + result = et131x_adapter_memory_alloc(adapter); 3938 + if (result) { 3939 + dev_err(&pdev->dev, "Could not alloc adapater memory (DMA)\n"); 3940 + goto err_iounmap; 3941 + } 3942 + 3943 + /* Init send data structures */ 3944 + et131x_init_send(adapter); 3945 + 3946 + /* Set up the task structure for the ISR's deferred handler */ 3947 + INIT_WORK(&adapter->task, et131x_isr_handler); 3948 + 3949 + /* Copy address into the net_device struct */ 3950 + memcpy(netdev->dev_addr, adapter->addr, ETH_ALEN); 3951 + 3952 + /* Init variable for counting how long we do not have link status */ 3953 + adapter->boot_coma = 0; 3954 + et1310_disable_phy_coma(adapter); 3955 + 3956 + /* Setup the mii_bus struct */ 3957 + adapter->mii_bus = mdiobus_alloc(); 3958 + if (!adapter->mii_bus) { 3959 + dev_err(&pdev->dev, "Alloc of mii_bus struct failed\n"); 3960 + goto err_mem_free; 3961 + } 3962 + 3963 + adapter->mii_bus->name = "et131x_eth_mii"; 3964 + snprintf(adapter->mii_bus->id, MII_BUS_ID_SIZE, "%x", 3965 + (adapter->pdev->bus->number << 8) | adapter->pdev->devfn); 3966 + adapter->mii_bus->priv = netdev; 3967 + adapter->mii_bus->read = et131x_mdio_read; 3968 + adapter->mii_bus->write = et131x_mdio_write; 3969 + adapter->mii_bus->reset = et131x_mdio_reset; 3970 + adapter->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); 3971 + if (!adapter->mii_bus->irq) { 3972 + dev_err(&pdev->dev, "mii_bus irq allocation failed\n"); 3973 + goto err_mdio_free; 3974 + } 3975 + 3976 + for (ii = 0; ii < PHY_MAX_ADDR; ii++) 3977 + adapter->mii_bus->irq[ii] = PHY_POLL; 3978 + 3979 + if (mdiobus_register(adapter->mii_bus)) { 3980 + dev_err(&pdev->dev, "failed to register MII bus\n"); 3981 + mdiobus_free(adapter->mii_bus); 3982 + goto err_mdio_free_irq; 3983 + } 3984 + 3985 + if (et131x_mii_probe(netdev)) { 3986 + dev_err(&pdev->dev, "failed to probe MII bus\n"); 3987 + goto err_mdio_unregister; 3988 + } 3989 + 3990 + /* Setup et1310 as per the documentation */ 3991 + et131x_adapter_setup(adapter); 3992 + 3993 + /* We can enable interrupts now 3994 + * 3995 + * NOTE - Because registration of interrupt handler is done in the 3996 + * device's open(), defer enabling device interrupts to that 3997 + * point 3998 + */ 3999 + 4000 + /* Register the net_device struct with the Linux network layer */ 4001 + result = register_netdev(netdev); 4002 + if (result != 0) { 4003 + dev_err(&pdev->dev, "register_netdev() failed\n"); 4004 + goto err_mdio_unregister; 4005 + } 4006 + 4007 + /* Register the net_device struct with the PCI subsystem. Save a copy 4008 + * of the PCI config space for this device now that the device has 4009 + * been initialized, just in case it needs to be quickly restored. 4010 + */ 4011 + pci_set_drvdata(pdev, netdev); 4012 + pci_save_state(adapter->pdev); 4013 + 4014 + return result; 4015 + 4016 + err_mdio_unregister: 4017 + mdiobus_unregister(adapter->mii_bus); 4018 + err_mdio_free_irq: 4019 + kfree(adapter->mii_bus->irq); 4020 + err_mdio_free: 4021 + mdiobus_free(adapter->mii_bus); 4022 + err_mem_free: 4023 + et131x_adapter_memory_free(adapter); 4024 + err_iounmap: 4025 + iounmap(adapter->regs); 4026 + err_free_dev: 4027 + pci_dev_put(pdev); 4028 + free_netdev(netdev); 4029 + err_release_res: 4030 + pci_release_regions(pdev); 4031 + err_disable: 4032 + pci_disable_device(pdev); 4033 + err_out: 4034 + return result; 4035 + } 4036 + 4037 + /** 4038 + * et131x_pci_remove 4039 + * @pdev: a pointer to the device's pci_dev structure 4040 + * 4041 + * Registered in the pci_driver structure, this function is called when the 4042 + * PCI subsystem detects that a PCI device which matches the information 4043 + * contained in the pci_device_id table has been removed. 4044 + */ 4045 + static void __devexit et131x_pci_remove(struct pci_dev *pdev) 4046 + { 4047 + struct net_device *netdev = pci_get_drvdata(pdev); 4048 + struct et131x_adapter *adapter = netdev_priv(netdev); 4049 + 4050 + unregister_netdev(netdev); 4051 + mdiobus_unregister(adapter->mii_bus); 4052 + kfree(adapter->mii_bus->irq); 4053 + mdiobus_free(adapter->mii_bus); 4054 + 4055 + et131x_adapter_memory_free(adapter); 4056 + iounmap(adapter->regs); 4057 + pci_dev_put(pdev); 4058 + 4059 + free_netdev(netdev); 4060 + pci_release_regions(pdev); 4061 + pci_disable_device(pdev); 4062 + } 4063 + 4064 + #ifdef CONFIG_PM_SLEEP 4065 + static int et131x_suspend(struct device *dev) 4066 + { 4067 + struct pci_dev *pdev = to_pci_dev(dev); 4068 + struct net_device *netdev = pci_get_drvdata(pdev); 4069 + 4070 + if (netif_running(netdev)) { 4071 + netif_device_detach(netdev); 4072 + et131x_down(netdev); 4073 + pci_save_state(pdev); 4074 + } 4075 + 4076 + return 0; 4077 + } 4078 + 4079 + static int et131x_resume(struct device *dev) 4080 + { 4081 + struct pci_dev *pdev = to_pci_dev(dev); 4082 + struct net_device *netdev = pci_get_drvdata(pdev); 4083 + 4084 + if (netif_running(netdev)) { 4085 + pci_restore_state(pdev); 4086 + et131x_up(netdev); 4087 + netif_device_attach(netdev); 4088 + } 4089 + 4090 + return 0; 4091 + } 4092 + 4093 + static SIMPLE_DEV_PM_OPS(et131x_pm_ops, et131x_suspend, et131x_resume); 4094 + #define ET131X_PM_OPS (&et131x_pm_ops) 4095 + #else 4096 + #define ET131X_PM_OPS NULL 4097 + #endif 4098 + 4099 + static struct pci_device_id et131x_pci_table[] __devinitdata = { 4100 + {ET131X_PCI_VENDOR_ID, ET131X_PCI_DEVICE_ID_GIG, PCI_ANY_ID, 4101 + PCI_ANY_ID, 0, 0, 0UL}, 4102 + {ET131X_PCI_VENDOR_ID, ET131X_PCI_DEVICE_ID_FAST, PCI_ANY_ID, 4103 + PCI_ANY_ID, 0, 0, 0UL}, 4104 + {0,} 4105 + }; 4106 + 4107 + MODULE_DEVICE_TABLE(pci, et131x_pci_table); 4108 + 4109 + static struct pci_driver et131x_driver = { 4110 + .name = DRIVER_NAME, 4111 + .id_table = et131x_pci_table, 4112 + .probe = et131x_pci_setup, 4113 + .remove = __devexit_p(et131x_pci_remove), 4114 + .driver.pm = ET131X_PM_OPS, 4115 + }; 4116 + 4117 + /** 4118 + * et131x_init_module - The "main" entry point called on driver initialization 4119 + * 4120 + * Returns 0 on success, errno on failure (as defined in errno.h) 4121 + */ 4122 + static int __init et131x_init_module(void) 4123 + { 4124 + return pci_register_driver(&et131x_driver); 4125 + } 4126 + 4127 + /** 4128 + * et131x_cleanup_module - The entry point called on driver cleanup 4129 + */ 4130 + static void __exit et131x_cleanup_module(void) 4131 + { 4132 + pci_unregister_driver(&et131x_driver); 4133 + } 4134 + 4135 + module_init(et131x_init_module); 4136 + module_exit(et131x_cleanup_module); 4137 + 4138 + /* ISR functions */ 4139 + 4140 + /** 4141 + * et131x_enable_interrupts - enable interrupt 4142 + * @adapter: et131x device 4143 + * 4144 + * Enable the appropriate interrupts on the ET131x according to our 4145 + * configuration 4146 + */ 4147 + void et131x_enable_interrupts(struct et131x_adapter *adapter) 4148 + { 4149 + u32 mask; 4150 + 4151 + /* Enable all global interrupts */ 4152 + if (adapter->flowcontrol == FLOW_TXONLY || 4153 + adapter->flowcontrol == FLOW_BOTH) 4154 + mask = INT_MASK_ENABLE; 4155 + else 4156 + mask = INT_MASK_ENABLE_NO_FLOW; 4157 + 4158 + writel(mask, &adapter->regs->global.int_mask); 4159 + } 4160 + 4161 + /** 4162 + * et131x_disable_interrupts - interrupt disable 4163 + * @adapter: et131x device 4164 + * 4165 + * Block all interrupts from the et131x device at the device itself 4166 + */ 4167 + void et131x_disable_interrupts(struct et131x_adapter *adapter) 4168 + { 4169 + /* Disable all global interrupts */ 4170 + writel(INT_MASK_DISABLE, &adapter->regs->global.int_mask); 4171 + } 4172 + 4173 + 4174 + /** 4175 + * et131x_isr - The Interrupt Service Routine for the driver. 4176 + * @irq: the IRQ on which the interrupt was received. 4177 + * @dev_id: device-specific info (here a pointer to a net_device struct) 4178 + * 4179 + * Returns a value indicating if the interrupt was handled. 4180 + */ 4181 + irqreturn_t et131x_isr(int irq, void *dev_id) 4182 + { 4183 + bool handled = true; 4184 + struct net_device *netdev = (struct net_device *)dev_id; 4185 + struct et131x_adapter *adapter = NULL; 4186 + u32 status; 4187 + 4188 + if (!netif_device_present(netdev)) { 4189 + handled = false; 4190 + goto out; 4191 + } 4192 + 4193 + adapter = netdev_priv(netdev); 4194 + 4195 + /* If the adapter is in low power state, then it should not 4196 + * recognize any interrupt 4197 + */ 4198 + 4199 + /* Disable Device Interrupts */ 4200 + et131x_disable_interrupts(adapter); 4201 + 4202 + /* Get a copy of the value in the interrupt status register 4203 + * so we can process the interrupting section 4204 + */ 4205 + status = readl(&adapter->regs->global.int_status); 4206 + 4207 + if (adapter->flowcontrol == FLOW_TXONLY || 4208 + adapter->flowcontrol == FLOW_BOTH) { 4209 + status &= ~INT_MASK_ENABLE; 4210 + } else { 4211 + status &= ~INT_MASK_ENABLE_NO_FLOW; 4212 + } 4213 + 4214 + /* Make sure this is our interrupt */ 4215 + if (!status) { 4216 + handled = false; 4217 + et131x_enable_interrupts(adapter); 4218 + goto out; 4219 + } 4220 + 4221 + /* This is our interrupt, so process accordingly */ 4222 + 4223 + if (status & ET_INTR_WATCHDOG) { 4224 + struct tcb *tcb = adapter->tx_ring.send_head; 4225 + 4226 + if (tcb) 4227 + if (++tcb->stale > 1) 4228 + status |= ET_INTR_TXDMA_ISR; 4229 + 4230 + if (adapter->rx_ring.unfinished_receives) 4231 + status |= ET_INTR_RXDMA_XFR_DONE; 4232 + else if (tcb == NULL) 4233 + writel(0, &adapter->regs->global.watchdog_timer); 4234 + 4235 + status &= ~ET_INTR_WATCHDOG; 4236 + } 4237 + 4238 + if (status == 0) { 4239 + /* This interrupt has in some way been "handled" by 4240 + * the ISR. Either it was a spurious Rx interrupt, or 4241 + * it was a Tx interrupt that has been filtered by 4242 + * the ISR. 4243 + */ 4244 + et131x_enable_interrupts(adapter); 4245 + goto out; 4246 + } 4247 + 4248 + /* We need to save the interrupt status value for use in our 4249 + * DPC. We will clear the software copy of that in that 4250 + * routine. 4251 + */ 4252 + adapter->stats.interrupt_status = status; 4253 + 4254 + /* Schedule the ISR handler as a bottom-half task in the 4255 + * kernel's tq_immediate queue, and mark the queue for 4256 + * execution 4257 + */ 4258 + schedule_work(&adapter->task); 4259 + out: 4260 + return IRQ_RETVAL(handled); 4261 + } 4262 + 4263 + /** 4264 + * et131x_isr_handler - The ISR handler 4265 + * @p_adapter, a pointer to the device's private adapter structure 4266 + * 4267 + * scheduled to run in a deferred context by the ISR. This is where the ISR's 4268 + * work actually gets done. 4269 + */ 4270 + void et131x_isr_handler(struct work_struct *work) 4271 + { 4272 + struct et131x_adapter *adapter = 4273 + container_of(work, struct et131x_adapter, task); 4274 + u32 status = adapter->stats.interrupt_status; 4275 + struct address_map __iomem *iomem = adapter->regs; 4276 + 4277 + /* 4278 + * These first two are by far the most common. Once handled, we clear 4279 + * their two bits in the status word. If the word is now zero, we 4280 + * exit. 4281 + */ 4282 + /* Handle all the completed Transmit interrupts */ 4283 + if (status & ET_INTR_TXDMA_ISR) 4284 + et131x_handle_send_interrupt(adapter); 4285 + 4286 + /* Handle all the completed Receives interrupts */ 4287 + if (status & ET_INTR_RXDMA_XFR_DONE) 4288 + et131x_handle_recv_interrupt(adapter); 4289 + 4290 + status &= 0xffffffd7; 4291 + 4292 + if (status) { 4293 + /* Handle the TXDMA Error interrupt */ 4294 + if (status & ET_INTR_TXDMA_ERR) { 4295 + u32 txdma_err; 4296 + 4297 + /* Following read also clears the register (COR) */ 4298 + txdma_err = readl(&iomem->txdma.tx_dma_error); 4299 + 4300 + dev_warn(&adapter->pdev->dev, 4301 + "TXDMA_ERR interrupt, error = %d\n", 4302 + txdma_err); 4303 + } 4304 + 4305 + /* Handle Free Buffer Ring 0 and 1 Low interrupt */ 4306 + if (status & 4307 + (ET_INTR_RXDMA_FB_R0_LOW | ET_INTR_RXDMA_FB_R1_LOW)) { 4308 + /* 4309 + * This indicates the number of unused buffers in 4310 + * RXDMA free buffer ring 0 is <= the limit you 4311 + * programmed. Free buffer resources need to be 4312 + * returned. Free buffers are consumed as packets 4313 + * are passed from the network to the host. The host 4314 + * becomes aware of the packets from the contents of 4315 + * the packet status ring. This ring is queried when 4316 + * the packet done interrupt occurs. Packets are then 4317 + * passed to the OS. When the OS is done with the 4318 + * packets the resources can be returned to the 4319 + * ET1310 for re-use. This interrupt is one method of 4320 + * returning resources. 4321 + */ 4322 + 4323 + /* If the user has flow control on, then we will 4324 + * send a pause packet, otherwise just exit 4325 + */ 4326 + if (adapter->flowcontrol == FLOW_TXONLY || 4327 + adapter->flowcontrol == FLOW_BOTH) { 4328 + u32 pm_csr; 4329 + 4330 + /* Tell the device to send a pause packet via 4331 + * the back pressure register (bp req and 4332 + * bp xon/xoff) 4333 + */ 4334 + pm_csr = readl(&iomem->global.pm_csr); 4335 + if (!et1310_in_phy_coma(adapter)) 4336 + writel(3, &iomem->txmac.bp_ctrl); 4337 + } 4338 + } 4339 + 4340 + /* Handle Packet Status Ring Low Interrupt */ 4341 + if (status & ET_INTR_RXDMA_STAT_LOW) { 4342 + 4343 + /* 4344 + * Same idea as with the two Free Buffer Rings. 4345 + * Packets going from the network to the host each 4346 + * consume a free buffer resource and a packet status 4347 + * resource. These resoures are passed to the OS. 4348 + * When the OS is done with the resources, they need 4349 + * to be returned to the ET1310. This is one method 4350 + * of returning the resources. 4351 + */ 4352 + } 4353 + 4354 + /* Handle RXDMA Error Interrupt */ 4355 + if (status & ET_INTR_RXDMA_ERR) { 4356 + /* 4357 + * The rxdma_error interrupt is sent when a time-out 4358 + * on a request issued by the JAGCore has occurred or 4359 + * a completion is returned with an un-successful 4360 + * status. In both cases the request is considered 4361 + * complete. The JAGCore will automatically re-try the 4362 + * request in question. Normally information on events 4363 + * like these are sent to the host using the "Advanced 4364 + * Error Reporting" capability. This interrupt is 4365 + * another way of getting similar information. The 4366 + * only thing required is to clear the interrupt by 4367 + * reading the ISR in the global resources. The 4368 + * JAGCore will do a re-try on the request. Normally 4369 + * you should never see this interrupt. If you start 4370 + * to see this interrupt occurring frequently then 4371 + * something bad has occurred. A reset might be the 4372 + * thing to do. 4373 + */ 4374 + /* TRAP();*/ 4375 + 4376 + dev_warn(&adapter->pdev->dev, 4377 + "RxDMA_ERR interrupt, error %x\n", 4378 + readl(&iomem->txmac.tx_test)); 4379 + } 4380 + 4381 + /* Handle the Wake on LAN Event */ 4382 + if (status & ET_INTR_WOL) { 4383 + /* 4384 + * This is a secondary interrupt for wake on LAN. 4385 + * The driver should never see this, if it does, 4386 + * something serious is wrong. We will TRAP the 4387 + * message when we are in DBG mode, otherwise we 4388 + * will ignore it. 4389 + */ 4390 + dev_err(&adapter->pdev->dev, "WAKE_ON_LAN interrupt\n"); 4391 + } 4392 + 4393 + /* Let's move on to the TxMac */ 4394 + if (status & ET_INTR_TXMAC) { 4395 + u32 err = readl(&iomem->txmac.err); 4396 + 4397 + /* 4398 + * When any of the errors occur and TXMAC generates 4399 + * an interrupt to report these errors, it usually 4400 + * means that TXMAC has detected an error in the data 4401 + * stream retrieved from the on-chip Tx Q. All of 4402 + * these errors are catastrophic and TXMAC won't be 4403 + * able to recover data when these errors occur. In 4404 + * a nutshell, the whole Tx path will have to be reset 4405 + * and re-configured afterwards. 4406 + */ 4407 + dev_warn(&adapter->pdev->dev, 4408 + "TXMAC interrupt, error 0x%08x\n", 4409 + err); 4410 + 4411 + /* If we are debugging, we want to see this error, 4412 + * otherwise we just want the device to be reset and 4413 + * continue 4414 + */ 4415 + } 4416 + 4417 + /* Handle RXMAC Interrupt */ 4418 + if (status & ET_INTR_RXMAC) { 4419 + /* 4420 + * These interrupts are catastrophic to the device, 4421 + * what we need to do is disable the interrupts and 4422 + * set the flag to cause us to reset so we can solve 4423 + * this issue. 4424 + */ 4425 + /* MP_SET_FLAG( adapter, 4426 + fMP_ADAPTER_HARDWARE_ERROR); */ 4427 + 4428 + dev_warn(&adapter->pdev->dev, 4429 + "RXMAC interrupt, error 0x%08x. Requesting reset\n", 4430 + readl(&iomem->rxmac.err_reg)); 4431 + 4432 + dev_warn(&adapter->pdev->dev, 4433 + "Enable 0x%08x, Diag 0x%08x\n", 4434 + readl(&iomem->rxmac.ctrl), 4435 + readl(&iomem->rxmac.rxq_diag)); 4436 + 4437 + /* 4438 + * If we are debugging, we want to see this error, 4439 + * otherwise we just want the device to be reset and 4440 + * continue 4441 + */ 4442 + } 4443 + 4444 + /* Handle MAC_STAT Interrupt */ 4445 + if (status & ET_INTR_MAC_STAT) { 4446 + /* 4447 + * This means at least one of the un-masked counters 4448 + * in the MAC_STAT block has rolled over. Use this 4449 + * to maintain the top, software managed bits of the 4450 + * counter(s). 4451 + */ 4452 + et1310_handle_macstat_interrupt(adapter); 4453 + } 4454 + 4455 + /* Handle SLV Timeout Interrupt */ 4456 + if (status & ET_INTR_SLV_TIMEOUT) { 4457 + /* 4458 + * This means a timeout has occurred on a read or 4459 + * write request to one of the JAGCore registers. The 4460 + * Global Resources block has terminated the request 4461 + * and on a read request, returned a "fake" value. 4462 + * The most likely reasons are: Bad Address or the 4463 + * addressed module is in a power-down state and 4464 + * can't respond. 4465 + */ 4466 + } 4467 + } 4468 + et131x_enable_interrupts(adapter); 4469 + } 4470 + 4471 + /* NETDEV functions */ 4472 + 4473 + /** 4474 + * et131x_stats - Return the current device statistics. 4475 + * @netdev: device whose stats are being queried 4476 + * 4477 + * Returns 0 on success, errno on failure (as defined in errno.h) 4478 + */ 4479 + static struct net_device_stats *et131x_stats(struct net_device *netdev) 4480 + { 4481 + struct et131x_adapter *adapter = netdev_priv(netdev); 4482 + struct net_device_stats *stats = &adapter->net_stats; 4483 + struct ce_stats *devstat = &adapter->stats; 4484 + 4485 + stats->rx_errors = devstat->rx_length_errs + 4486 + devstat->rx_align_errs + 4487 + devstat->rx_crc_errs + 4488 + devstat->rx_code_violations + 4489 + devstat->rx_other_errs; 4490 + stats->tx_errors = devstat->tx_max_pkt_errs; 4491 + stats->multicast = devstat->multicast_pkts_rcvd; 4492 + stats->collisions = devstat->tx_collisions; 4493 + 4494 + stats->rx_length_errors = devstat->rx_length_errs; 4495 + stats->rx_over_errors = devstat->rx_overflows; 4496 + stats->rx_crc_errors = devstat->rx_crc_errs; 4497 + 4498 + /* NOTE: These stats don't have corresponding values in CE_STATS, 4499 + * so we're going to have to update these directly from within the 4500 + * TX/RX code 4501 + */ 4502 + /* stats->rx_bytes = 20; devstat->; */ 4503 + /* stats->tx_bytes = 20; devstat->; */ 4504 + /* stats->rx_dropped = devstat->; */ 4505 + /* stats->tx_dropped = devstat->; */ 4506 + 4507 + /* NOTE: Not used, can't find analogous statistics */ 4508 + /* stats->rx_frame_errors = devstat->; */ 4509 + /* stats->rx_fifo_errors = devstat->; */ 4510 + /* stats->rx_missed_errors = devstat->; */ 4511 + 4512 + /* stats->tx_aborted_errors = devstat->; */ 4513 + /* stats->tx_carrier_errors = devstat->; */ 4514 + /* stats->tx_fifo_errors = devstat->; */ 4515 + /* stats->tx_heartbeat_errors = devstat->; */ 4516 + /* stats->tx_window_errors = devstat->; */ 4517 + return stats; 4518 + } 4519 + 4520 + /** 4521 + * et131x_enable_txrx - Enable tx/rx queues 4522 + * @netdev: device to be enabled 4523 + */ 4524 + void et131x_enable_txrx(struct net_device *netdev) 4525 + { 4526 + struct et131x_adapter *adapter = netdev_priv(netdev); 4527 + 4528 + /* Enable the Tx and Rx DMA engines (if not already enabled) */ 4529 + et131x_rx_dma_enable(adapter); 4530 + et131x_tx_dma_enable(adapter); 4531 + 4532 + /* Enable device interrupts */ 4533 + if (adapter->flags & fMP_ADAPTER_INTERRUPT_IN_USE) 4534 + et131x_enable_interrupts(adapter); 4535 + 4536 + /* We're ready to move some data, so start the queue */ 4537 + netif_start_queue(netdev); 4538 + } 4539 + 4540 + /** 4541 + * et131x_disable_txrx - Disable tx/rx queues 4542 + * @netdev: device to be disabled 4543 + */ 4544 + void et131x_disable_txrx(struct net_device *netdev) 4545 + { 4546 + struct et131x_adapter *adapter = netdev_priv(netdev); 4547 + 4548 + /* First thing is to stop the queue */ 4549 + netif_stop_queue(netdev); 4550 + 4551 + /* Stop the Tx and Rx DMA engines */ 4552 + et131x_rx_dma_disable(adapter); 4553 + et131x_tx_dma_disable(adapter); 4554 + 4555 + /* Disable device interrupts */ 4556 + et131x_disable_interrupts(adapter); 4557 + } 4558 + 4559 + /** 4560 + * et131x_up - Bring up a device for use. 4561 + * @netdev: device to be opened 4562 + */ 4563 + void et131x_up(struct net_device *netdev) 4564 + { 4565 + struct et131x_adapter *adapter = netdev_priv(netdev); 4566 + 4567 + et131x_enable_txrx(netdev); 4568 + phy_start(adapter->phydev); 4569 + } 4570 + 4571 + /** 4572 + * et131x_open - Open the device for use. 4573 + * @netdev: device to be opened 4574 + * 4575 + * Returns 0 on success, errno on failure (as defined in errno.h) 4576 + */ 4577 + int et131x_open(struct net_device *netdev) 4578 + { 4579 + int result = 0; 4580 + struct et131x_adapter *adapter = netdev_priv(netdev); 4581 + 4582 + /* Start the timer to track NIC errors */ 4583 + init_timer(&adapter->error_timer); 4584 + adapter->error_timer.expires = jiffies + TX_ERROR_PERIOD * HZ / 1000; 4585 + adapter->error_timer.function = et131x_error_timer_handler; 4586 + adapter->error_timer.data = (unsigned long)adapter; 4587 + add_timer(&adapter->error_timer); 4588 + 4589 + /* Register our IRQ */ 4590 + result = request_irq(netdev->irq, et131x_isr, IRQF_SHARED, 4591 + netdev->name, netdev); 4592 + if (result) { 4593 + dev_err(&adapter->pdev->dev, "could not register IRQ %d\n", 4594 + netdev->irq); 4595 + return result; 4596 + } 4597 + 4598 + adapter->flags |= fMP_ADAPTER_INTERRUPT_IN_USE; 4599 + 4600 + et131x_up(netdev); 4601 + 4602 + return result; 4603 + } 4604 + 4605 + /** 4606 + * et131x_down - Bring down the device 4607 + * @netdev: device to be broght down 4608 + */ 4609 + void et131x_down(struct net_device *netdev) 4610 + { 4611 + struct et131x_adapter *adapter = netdev_priv(netdev); 4612 + 4613 + /* Save the timestamp for the TX watchdog, prevent a timeout */ 4614 + netdev->trans_start = jiffies; 4615 + 4616 + phy_stop(adapter->phydev); 4617 + et131x_disable_txrx(netdev); 4618 + } 4619 + 4620 + /** 4621 + * et131x_close - Close the device 4622 + * @netdev: device to be closed 4623 + * 4624 + * Returns 0 on success, errno on failure (as defined in errno.h) 4625 + */ 4626 + int et131x_close(struct net_device *netdev) 4627 + { 4628 + struct et131x_adapter *adapter = netdev_priv(netdev); 4629 + 4630 + et131x_down(netdev); 4631 + 4632 + adapter->flags &= ~fMP_ADAPTER_INTERRUPT_IN_USE; 4633 + free_irq(netdev->irq, netdev); 4634 + 4635 + /* Stop the error timer */ 4636 + return del_timer_sync(&adapter->error_timer); 4637 + } 4638 + 4639 + /** 4640 + * et131x_ioctl - The I/O Control handler for the driver 4641 + * @netdev: device on which the control request is being made 4642 + * @reqbuf: a pointer to the IOCTL request buffer 4643 + * @cmd: the IOCTL command code 4644 + * 4645 + * Returns 0 on success, errno on failure (as defined in errno.h) 4646 + */ 4647 + static int et131x_ioctl(struct net_device *netdev, struct ifreq *reqbuf, int cmd) 4648 + { 4649 + struct et131x_adapter *adapter = netdev_priv(netdev); 4650 + 4651 + if (!adapter->phydev) 4652 + return -EINVAL; 4653 + 4654 + return phy_mii_ioctl(adapter->phydev, reqbuf, cmd); 4655 + } 4656 + 4657 + /** 4658 + * et131x_set_packet_filter - Configures the Rx Packet filtering on the device 4659 + * @adapter: pointer to our private adapter structure 4660 + * 4661 + * FIXME: lot of dups with MAC code 4662 + * 4663 + * Returns 0 on success, errno on failure 4664 + */ 4665 + static int et131x_set_packet_filter(struct et131x_adapter *adapter) 4666 + { 4667 + int status = 0; 4668 + uint32_t filter = adapter->packet_filter; 4669 + u32 ctrl; 4670 + u32 pf_ctrl; 4671 + 4672 + ctrl = readl(&adapter->regs->rxmac.ctrl); 4673 + pf_ctrl = readl(&adapter->regs->rxmac.pf_ctrl); 4674 + 4675 + /* Default to disabled packet filtering. Enable it in the individual 4676 + * case statements that require the device to filter something 4677 + */ 4678 + ctrl |= 0x04; 4679 + 4680 + /* Set us to be in promiscuous mode so we receive everything, this 4681 + * is also true when we get a packet filter of 0 4682 + */ 4683 + if ((filter & ET131X_PACKET_TYPE_PROMISCUOUS) || filter == 0) 4684 + pf_ctrl &= ~7; /* Clear filter bits */ 4685 + else { 4686 + /* 4687 + * Set us up with Multicast packet filtering. Three cases are 4688 + * possible - (1) we have a multi-cast list, (2) we receive ALL 4689 + * multicast entries or (3) we receive none. 4690 + */ 4691 + if (filter & ET131X_PACKET_TYPE_ALL_MULTICAST) 4692 + pf_ctrl &= ~2; /* Multicast filter bit */ 4693 + else { 4694 + et1310_setup_device_for_multicast(adapter); 4695 + pf_ctrl |= 2; 4696 + ctrl &= ~0x04; 4697 + } 4698 + 4699 + /* Set us up with Unicast packet filtering */ 4700 + if (filter & ET131X_PACKET_TYPE_DIRECTED) { 4701 + et1310_setup_device_for_unicast(adapter); 4702 + pf_ctrl |= 4; 4703 + ctrl &= ~0x04; 4704 + } 4705 + 4706 + /* Set us up with Broadcast packet filtering */ 4707 + if (filter & ET131X_PACKET_TYPE_BROADCAST) { 4708 + pf_ctrl |= 1; /* Broadcast filter bit */ 4709 + ctrl &= ~0x04; 4710 + } else 4711 + pf_ctrl &= ~1; 4712 + 4713 + /* Setup the receive mac configuration registers - Packet 4714 + * Filter control + the enable / disable for packet filter 4715 + * in the control reg. 4716 + */ 4717 + writel(pf_ctrl, &adapter->regs->rxmac.pf_ctrl); 4718 + writel(ctrl, &adapter->regs->rxmac.ctrl); 4719 + } 4720 + return status; 4721 + } 4722 + 4723 + /** 4724 + * et131x_multicast - The handler to configure multicasting on the interface 4725 + * @netdev: a pointer to a net_device struct representing the device 4726 + */ 4727 + static void et131x_multicast(struct net_device *netdev) 4728 + { 4729 + struct et131x_adapter *adapter = netdev_priv(netdev); 4730 + uint32_t packet_filter = 0; 4731 + unsigned long flags; 4732 + struct netdev_hw_addr *ha; 4733 + int i; 4734 + 4735 + spin_lock_irqsave(&adapter->lock, flags); 4736 + 4737 + /* Before we modify the platform-independent filter flags, store them 4738 + * locally. This allows us to determine if anything's changed and if 4739 + * we even need to bother the hardware 4740 + */ 4741 + packet_filter = adapter->packet_filter; 4742 + 4743 + /* Clear the 'multicast' flag locally; because we only have a single 4744 + * flag to check multicast, and multiple multicast addresses can be 4745 + * set, this is the easiest way to determine if more than one 4746 + * multicast address is being set. 4747 + */ 4748 + packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST; 4749 + 4750 + /* Check the net_device flags and set the device independent flags 4751 + * accordingly 4752 + */ 4753 + 4754 + if (netdev->flags & IFF_PROMISC) 4755 + adapter->packet_filter |= ET131X_PACKET_TYPE_PROMISCUOUS; 4756 + else 4757 + adapter->packet_filter &= ~ET131X_PACKET_TYPE_PROMISCUOUS; 4758 + 4759 + if (netdev->flags & IFF_ALLMULTI) 4760 + adapter->packet_filter |= ET131X_PACKET_TYPE_ALL_MULTICAST; 4761 + 4762 + if (netdev_mc_count(netdev) > NIC_MAX_MCAST_LIST) 4763 + adapter->packet_filter |= ET131X_PACKET_TYPE_ALL_MULTICAST; 4764 + 4765 + if (netdev_mc_count(netdev) < 1) { 4766 + adapter->packet_filter &= ~ET131X_PACKET_TYPE_ALL_MULTICAST; 4767 + adapter->packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST; 4768 + } else 4769 + adapter->packet_filter |= ET131X_PACKET_TYPE_MULTICAST; 4770 + 4771 + /* Set values in the private adapter struct */ 4772 + i = 0; 4773 + netdev_for_each_mc_addr(ha, netdev) { 4774 + if (i == NIC_MAX_MCAST_LIST) 4775 + break; 4776 + memcpy(adapter->multicast_list[i++], ha->addr, ETH_ALEN); 4777 + } 4778 + adapter->multicast_addr_count = i; 4779 + 4780 + /* Are the new flags different from the previous ones? If not, then no 4781 + * action is required 4782 + * 4783 + * NOTE - This block will always update the multicast_list with the 4784 + * hardware, even if the addresses aren't the same. 4785 + */ 4786 + if (packet_filter != adapter->packet_filter) { 4787 + /* Call the device's filter function */ 4788 + et131x_set_packet_filter(adapter); 4789 + } 4790 + spin_unlock_irqrestore(&adapter->lock, flags); 4791 + } 4792 + 4793 + /** 4794 + * et131x_tx - The handler to tx a packet on the device 4795 + * @skb: data to be Tx'd 4796 + * @netdev: device on which data is to be Tx'd 4797 + * 4798 + * Returns 0 on success, errno on failure (as defined in errno.h) 4799 + */ 4800 + static int et131x_tx(struct sk_buff *skb, struct net_device *netdev) 4801 + { 4802 + int status = 0; 4803 + 4804 + /* Save the timestamp for the TX timeout watchdog */ 4805 + netdev->trans_start = jiffies; 4806 + 4807 + /* Call the device-specific data Tx routine */ 4808 + status = et131x_send_packets(skb, netdev); 4809 + 4810 + /* Check status and manage the netif queue if necessary */ 4811 + if (status != 0) { 4812 + if (status == -ENOMEM) { 4813 + /* Put the queue to sleep until resources are 4814 + * available 4815 + */ 4816 + netif_stop_queue(netdev); 4817 + status = NETDEV_TX_BUSY; 4818 + } else { 4819 + status = NETDEV_TX_OK; 4820 + } 4821 + } 4822 + return status; 4823 + } 4824 + 4825 + /** 4826 + * et131x_tx_timeout - Timeout handler 4827 + * @netdev: a pointer to a net_device struct representing the device 4828 + * 4829 + * The handler called when a Tx request times out. The timeout period is 4830 + * specified by the 'tx_timeo" element in the net_device structure (see 4831 + * et131x_alloc_device() to see how this value is set). 4832 + */ 4833 + static void et131x_tx_timeout(struct net_device *netdev) 4834 + { 4835 + struct et131x_adapter *adapter = netdev_priv(netdev); 4836 + struct tcb *tcb; 4837 + unsigned long flags; 4838 + 4839 + /* If the device is closed, ignore the timeout */ 4840 + if (~(adapter->flags & fMP_ADAPTER_INTERRUPT_IN_USE)) 4841 + return; 4842 + 4843 + /* Any nonrecoverable hardware error? 4844 + * Checks adapter->flags for any failure in phy reading 4845 + */ 4846 + if (adapter->flags & fMP_ADAPTER_NON_RECOVER_ERROR) 4847 + return; 4848 + 4849 + /* Hardware failure? */ 4850 + if (adapter->flags & fMP_ADAPTER_HARDWARE_ERROR) { 4851 + dev_err(&adapter->pdev->dev, "hardware error - reset\n"); 4852 + return; 4853 + } 4854 + 4855 + /* Is send stuck? */ 4856 + spin_lock_irqsave(&adapter->tcb_send_qlock, flags); 4857 + 4858 + tcb = adapter->tx_ring.send_head; 4859 + 4860 + if (tcb != NULL) { 4861 + tcb->count++; 4862 + 4863 + if (tcb->count > NIC_SEND_HANG_THRESHOLD) { 4864 + spin_unlock_irqrestore(&adapter->tcb_send_qlock, 4865 + flags); 4866 + 4867 + dev_warn(&adapter->pdev->dev, 4868 + "Send stuck - reset. tcb->WrIndex %x, flags 0x%08x\n", 4869 + tcb->index, 4870 + tcb->flags); 4871 + 4872 + adapter->net_stats.tx_errors++; 4873 + 4874 + /* perform reset of tx/rx */ 4875 + et131x_disable_txrx(netdev); 4876 + et131x_enable_txrx(netdev); 4877 + return; 4878 + } 4879 + } 4880 + 4881 + spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); 4882 + } 4883 + 4884 + /** 4885 + * et131x_change_mtu - The handler called to change the MTU for the device 4886 + * @netdev: device whose MTU is to be changed 4887 + * @new_mtu: the desired MTU 4888 + * 4889 + * Returns 0 on success, errno on failure (as defined in errno.h) 4890 + */ 4891 + static int et131x_change_mtu(struct net_device *netdev, int new_mtu) 4892 + { 4893 + int result = 0; 4894 + struct et131x_adapter *adapter = netdev_priv(netdev); 4895 + 4896 + /* Make sure the requested MTU is valid */ 4897 + if (new_mtu < 64 || new_mtu > 9216) 4898 + return -EINVAL; 4899 + 4900 + et131x_disable_txrx(netdev); 4901 + et131x_handle_send_interrupt(adapter); 4902 + et131x_handle_recv_interrupt(adapter); 4903 + 4904 + /* Set the new MTU */ 4905 + netdev->mtu = new_mtu; 4906 + 4907 + /* Free Rx DMA memory */ 4908 + et131x_adapter_memory_free(adapter); 4909 + 4910 + /* Set the config parameter for Jumbo Packet support */ 4911 + adapter->registry_jumbo_packet = new_mtu + 14; 4912 + et131x_soft_reset(adapter); 4913 + 4914 + /* Alloc and init Rx DMA memory */ 4915 + result = et131x_adapter_memory_alloc(adapter); 4916 + if (result != 0) { 4917 + dev_warn(&adapter->pdev->dev, 4918 + "Change MTU failed; couldn't re-alloc DMA memory\n"); 4919 + return result; 4920 + } 4921 + 4922 + et131x_init_send(adapter); 4923 + 4924 + et131x_hwaddr_init(adapter); 4925 + memcpy(netdev->dev_addr, adapter->addr, ETH_ALEN); 4926 + 4927 + /* Init the device with the new settings */ 4928 + et131x_adapter_setup(adapter); 4929 + 4930 + et131x_enable_txrx(netdev); 4931 + 4932 + return result; 4933 + } 4934 + 4935 + /** 4936 + * et131x_set_mac_addr - handler to change the MAC address for the device 4937 + * @netdev: device whose MAC is to be changed 4938 + * @new_mac: the desired MAC address 4939 + * 4940 + * Returns 0 on success, errno on failure (as defined in errno.h) 4941 + * 4942 + * IMPLEMENTED BY : blux http://berndlux.de 22.01.2007 21:14 4943 + */ 4944 + static int et131x_set_mac_addr(struct net_device *netdev, void *new_mac) 4945 + { 4946 + int result = 0; 4947 + struct et131x_adapter *adapter = netdev_priv(netdev); 4948 + struct sockaddr *address = new_mac; 4949 + 4950 + /* begin blux */ 4951 + 4952 + if (adapter == NULL) 4953 + return -ENODEV; 4954 + 4955 + /* Make sure the requested MAC is valid */ 4956 + if (!is_valid_ether_addr(address->sa_data)) 4957 + return -EINVAL; 4958 + 4959 + et131x_disable_txrx(netdev); 4960 + et131x_handle_send_interrupt(adapter); 4961 + et131x_handle_recv_interrupt(adapter); 4962 + 4963 + /* Set the new MAC */ 4964 + /* netdev->set_mac_address = &new_mac; */ 4965 + 4966 + memcpy(netdev->dev_addr, address->sa_data, netdev->addr_len); 4967 + 4968 + printk(KERN_INFO "%s: Setting MAC address to %pM\n", 4969 + netdev->name, netdev->dev_addr); 4970 + 4971 + /* Free Rx DMA memory */ 4972 + et131x_adapter_memory_free(adapter); 4973 + 4974 + et131x_soft_reset(adapter); 4975 + 4976 + /* Alloc and init Rx DMA memory */ 4977 + result = et131x_adapter_memory_alloc(adapter); 4978 + if (result != 0) { 4979 + dev_err(&adapter->pdev->dev, 4980 + "Change MAC failed; couldn't re-alloc DMA memory\n"); 4981 + return result; 4982 + } 4983 + 4984 + et131x_init_send(adapter); 4985 + 4986 + et131x_hwaddr_init(adapter); 4987 + 4988 + /* Init the device with the new settings */ 4989 + et131x_adapter_setup(adapter); 4990 + 4991 + et131x_enable_txrx(netdev); 4992 + 4993 + return result; 4994 + } 4995 + 4996 + static const struct net_device_ops et131x_netdev_ops = { 4997 + .ndo_open = et131x_open, 4998 + .ndo_stop = et131x_close, 4999 + .ndo_start_xmit = et131x_tx, 5000 + .ndo_set_multicast_list = et131x_multicast, 5001 + .ndo_tx_timeout = et131x_tx_timeout, 5002 + .ndo_change_mtu = et131x_change_mtu, 5003 + .ndo_set_mac_address = et131x_set_mac_addr, 5004 + .ndo_validate_addr = eth_validate_addr, 5005 + .ndo_get_stats = et131x_stats, 5006 + .ndo_do_ioctl = et131x_ioctl, 5007 + }; 5008 + 5009 + /** 5010 + * et131x_device_alloc 5011 + * 5012 + * Returns pointer to the allocated and initialized net_device struct for 5013 + * this device. 5014 + * 5015 + * Create instances of net_device and wl_private for the new adapter and 5016 + * register the device's entry points in the net_device structure. 5017 + */ 5018 + struct net_device *et131x_device_alloc(void) 5019 + { 5020 + struct net_device *netdev; 5021 + 5022 + /* Alloc net_device and adapter structs */ 5023 + netdev = alloc_etherdev(sizeof(struct et131x_adapter)); 5024 + 5025 + if (!netdev) { 5026 + printk(KERN_ERR "et131x: Alloc of net_device struct failed\n"); 5027 + return NULL; 5028 + } 5029 + 5030 + /* 5031 + * Setup the function registration table (and other data) for a 5032 + * net_device 5033 + */ 5034 + netdev->watchdog_timeo = ET131X_TX_TIMEOUT; 5035 + netdev->netdev_ops = &et131x_netdev_ops; 5036 + 5037 + /* Poll? */ 5038 + /* netdev->poll = &et131x_poll; */ 5039 + /* netdev->poll_controller = &et131x_poll_controller; */ 5040 + return netdev; 5041 + } 5042 +
+60 -2
drivers/staging/et131x/et131x.h
··· 1 1 /* 2 - * Merged from files 3 - * 4 2 * Copyright © 2005 Agere Systems Inc. 5 3 * All rights reserved. 6 4 * http://www.agere.com ··· 48 50 49 51 #define DRIVER_NAME "et131x" 50 52 #define DRIVER_VERSION "v2.0" 53 + 54 + /* EEPROM defines */ 55 + 56 + /* LBCIF Register Groups (addressed via 32-bit offsets) */ 57 + #define LBCIF_DWORD0_GROUP 0xAC 58 + #define LBCIF_DWORD1_GROUP 0xB0 59 + 60 + /* LBCIF Registers (addressed via 8-bit offsets) */ 61 + #define LBCIF_ADDRESS_REGISTER 0xAC 62 + #define LBCIF_DATA_REGISTER 0xB0 63 + #define LBCIF_CONTROL_REGISTER 0xB1 64 + #define LBCIF_STATUS_REGISTER 0xB2 65 + 66 + /* LBCIF Control Register Bits */ 67 + #define LBCIF_CONTROL_SEQUENTIAL_READ 0x01 68 + #define LBCIF_CONTROL_PAGE_WRITE 0x02 69 + #define LBCIF_CONTROL_EEPROM_RELOAD 0x08 70 + #define LBCIF_CONTROL_TWO_BYTE_ADDR 0x20 71 + #define LBCIF_CONTROL_I2C_WRITE 0x40 72 + #define LBCIF_CONTROL_LBCIF_ENABLE 0x80 73 + 74 + /* LBCIF Status Register Bits */ 75 + #define LBCIF_STATUS_PHY_QUEUE_AVAIL 0x01 76 + #define LBCIF_STATUS_I2C_IDLE 0x02 77 + #define LBCIF_STATUS_ACK_ERROR 0x04 78 + #define LBCIF_STATUS_GENERAL_ERROR 0x08 79 + #define LBCIF_STATUS_CHECKSUM_ERROR 0x40 80 + #define LBCIF_STATUS_EEPROM_PRESENT 0x80 81 + 82 + /* Miscellaneous Constraints */ 83 + #define MAX_NUM_REGISTER_POLLS 1000 84 + #define MAX_NUM_WRITE_RETRIES 2 85 + 86 + /* MAC defines */ 87 + #define COUNTER_WRAP_16_BIT 0x10000 88 + #define COUNTER_WRAP_12_BIT 0x1000 89 + 90 + /* PCI defines */ 91 + #define INTERNAL_MEM_SIZE 0x400 /* 1024 of internal memory */ 92 + #define INTERNAL_MEM_RX_OFFSET 0x1FF /* 50% Tx, 50% Rx */ 93 + 94 + /* ISR defines */ 95 + /* 96 + * For interrupts, normal running is: 97 + * rxdma_xfr_done, phy_interrupt, mac_stat_interrupt, 98 + * watchdog_interrupt & txdma_xfer_done 99 + * 100 + * In both cases, when flow control is enabled for either Tx or bi-direction, 101 + * we additional enable rx_fbr0_low and rx_fbr1_low, so we know when the 102 + * buffer rings are running low. 103 + */ 104 + #define INT_MASK_DISABLE 0xffffffff 105 + 106 + /* NOTE: Masking out MAC_STAT Interrupt for now... 107 + * #define INT_MASK_ENABLE 0xfff6bf17 108 + * #define INT_MASK_ENABLE_NO_FLOW 0xfff6bfd7 109 + */ 110 + #define INT_MASK_ENABLE 0xfffebf17 111 + #define INT_MASK_ENABLE_NO_FLOW 0xfffebfd7 112 + 51 113 52 114 /* et131x_eeprom.c */ 53 115 int et131x_init_eeprom(struct et131x_adapter *adapter);
-201
drivers/staging/et131x/et131x_ethtool.c
··· 1 - /* 2 - * Copyright (c) 2011 Mark Einon <mark.einon@gmail.com> 3 - * 4 - * This program is free software; you can redistribute it and/or modify 5 - * it under the terms of the GNU General Public License as published by 6 - * the Free Software Foundation; either version 2 of the License, or 7 - * (at your option) any later version. 8 - * 9 - * This program is distributed in the hope that it will be useful, 10 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 - * GNU General Public License for more details. 13 - * 14 - * You should have received a copy of the GNU General Public License 15 - * along with this program; if not, write to the Free Software 16 - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 17 - */ 18 - 19 - #include "et131x_defs.h" 20 - 21 - #include <linux/types.h> 22 - #include <linux/interrupt.h> 23 - #include <linux/netdevice.h> 24 - #include <linux/ethtool.h> 25 - #include <linux/phy.h> 26 - #include <linux/pci.h> 27 - 28 - #include "et131x_adapter.h" 29 - #include "et1310_phy.h" 30 - #include "et131x.h" 31 - 32 - static int et131x_get_settings(struct net_device *netdev, 33 - struct ethtool_cmd *cmd) 34 - { 35 - struct et131x_adapter *adapter = netdev_priv(netdev); 36 - 37 - return phy_ethtool_gset(adapter->phydev, cmd); 38 - } 39 - 40 - static int et131x_set_settings(struct net_device *netdev, 41 - struct ethtool_cmd *cmd) 42 - { 43 - struct et131x_adapter *adapter = netdev_priv(netdev); 44 - 45 - return phy_ethtool_sset(adapter->phydev, cmd); 46 - } 47 - 48 - static int et131x_get_regs_len(struct net_device *netdev) 49 - { 50 - #define ET131X_REGS_LEN 256 51 - return ET131X_REGS_LEN * sizeof(u32); 52 - } 53 - 54 - static void et131x_get_regs(struct net_device *netdev, 55 - struct ethtool_regs *regs, void *regs_data) 56 - { 57 - struct et131x_adapter *adapter = netdev_priv(netdev); 58 - struct address_map __iomem *aregs = adapter->regs; 59 - u32 *regs_buff = regs_data; 60 - u32 num = 0; 61 - 62 - memset(regs_data, 0, et131x_get_regs_len(netdev)); 63 - 64 - regs->version = (1 << 24) | (adapter->pdev->revision << 16) | 65 - adapter->pdev->device; 66 - 67 - /* PHY regs */ 68 - et131x_mii_read(adapter, MII_BMCR, (u16 *)&regs_buff[num++]); 69 - et131x_mii_read(adapter, MII_BMSR, (u16 *)&regs_buff[num++]); 70 - et131x_mii_read(adapter, MII_PHYSID1, (u16 *)&regs_buff[num++]); 71 - et131x_mii_read(adapter, MII_PHYSID2, (u16 *)&regs_buff[num++]); 72 - et131x_mii_read(adapter, MII_ADVERTISE, (u16 *)&regs_buff[num++]); 73 - et131x_mii_read(adapter, MII_LPA, (u16 *)&regs_buff[num++]); 74 - et131x_mii_read(adapter, MII_EXPANSION, (u16 *)&regs_buff[num++]); 75 - /* Autoneg next page transmit reg */ 76 - et131x_mii_read(adapter, 0x07, (u16 *)&regs_buff[num++]); 77 - /* Link partner next page reg */ 78 - et131x_mii_read(adapter, 0x08, (u16 *)&regs_buff[num++]); 79 - et131x_mii_read(adapter, MII_CTRL1000, (u16 *)&regs_buff[num++]); 80 - et131x_mii_read(adapter, MII_STAT1000, (u16 *)&regs_buff[num++]); 81 - et131x_mii_read(adapter, MII_ESTATUS, (u16 *)&regs_buff[num++]); 82 - et131x_mii_read(adapter, PHY_INDEX_REG, (u16 *)&regs_buff[num++]); 83 - et131x_mii_read(adapter, PHY_DATA_REG, (u16 *)&regs_buff[num++]); 84 - et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, 85 - (u16 *)&regs_buff[num++]); 86 - et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL, 87 - (u16 *)&regs_buff[num++]); 88 - et131x_mii_read(adapter, PHY_LOOPBACK_CONTROL+1, 89 - (u16 *)&regs_buff[num++]); 90 - et131x_mii_read(adapter, PHY_REGISTER_MGMT_CONTROL, 91 - (u16 *)&regs_buff[num++]); 92 - et131x_mii_read(adapter, PHY_CONFIG, (u16 *)&regs_buff[num++]); 93 - et131x_mii_read(adapter, PHY_PHY_CONTROL, (u16 *)&regs_buff[num++]); 94 - et131x_mii_read(adapter, PHY_INTERRUPT_MASK, (u16 *)&regs_buff[num++]); 95 - et131x_mii_read(adapter, PHY_INTERRUPT_STATUS, 96 - (u16 *)&regs_buff[num++]); 97 - et131x_mii_read(adapter, PHY_PHY_STATUS, (u16 *)&regs_buff[num++]); 98 - et131x_mii_read(adapter, PHY_LED_1, (u16 *)&regs_buff[num++]); 99 - et131x_mii_read(adapter, PHY_LED_2, (u16 *)&regs_buff[num++]); 100 - 101 - /* Global regs */ 102 - regs_buff[num++] = readl(&aregs->global.txq_start_addr); 103 - regs_buff[num++] = readl(&aregs->global.txq_end_addr); 104 - regs_buff[num++] = readl(&aregs->global.rxq_start_addr); 105 - regs_buff[num++] = readl(&aregs->global.rxq_end_addr); 106 - regs_buff[num++] = readl(&aregs->global.pm_csr); 107 - regs_buff[num++] = adapter->stats.interrupt_status; 108 - regs_buff[num++] = readl(&aregs->global.int_mask); 109 - regs_buff[num++] = readl(&aregs->global.int_alias_clr_en); 110 - regs_buff[num++] = readl(&aregs->global.int_status_alias); 111 - regs_buff[num++] = readl(&aregs->global.sw_reset); 112 - regs_buff[num++] = readl(&aregs->global.slv_timer); 113 - regs_buff[num++] = readl(&aregs->global.msi_config); 114 - regs_buff[num++] = readl(&aregs->global.loopback); 115 - regs_buff[num++] = readl(&aregs->global.watchdog_timer); 116 - 117 - /* TXDMA regs */ 118 - regs_buff[num++] = readl(&aregs->txdma.csr); 119 - regs_buff[num++] = readl(&aregs->txdma.pr_base_hi); 120 - regs_buff[num++] = readl(&aregs->txdma.pr_base_lo); 121 - regs_buff[num++] = readl(&aregs->txdma.pr_num_des); 122 - regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr); 123 - regs_buff[num++] = readl(&aregs->txdma.txq_wr_addr_ext); 124 - regs_buff[num++] = readl(&aregs->txdma.txq_rd_addr); 125 - regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_hi); 126 - regs_buff[num++] = readl(&aregs->txdma.dma_wb_base_lo); 127 - regs_buff[num++] = readl(&aregs->txdma.service_request); 128 - regs_buff[num++] = readl(&aregs->txdma.service_complete); 129 - regs_buff[num++] = readl(&aregs->txdma.cache_rd_index); 130 - regs_buff[num++] = readl(&aregs->txdma.cache_wr_index); 131 - regs_buff[num++] = readl(&aregs->txdma.tx_dma_error); 132 - regs_buff[num++] = readl(&aregs->txdma.desc_abort_cnt); 133 - regs_buff[num++] = readl(&aregs->txdma.payload_abort_cnt); 134 - regs_buff[num++] = readl(&aregs->txdma.writeback_abort_cnt); 135 - regs_buff[num++] = readl(&aregs->txdma.desc_timeout_cnt); 136 - regs_buff[num++] = readl(&aregs->txdma.payload_timeout_cnt); 137 - regs_buff[num++] = readl(&aregs->txdma.writeback_timeout_cnt); 138 - regs_buff[num++] = readl(&aregs->txdma.desc_error_cnt); 139 - regs_buff[num++] = readl(&aregs->txdma.payload_error_cnt); 140 - regs_buff[num++] = readl(&aregs->txdma.writeback_error_cnt); 141 - regs_buff[num++] = readl(&aregs->txdma.dropped_tlp_cnt); 142 - regs_buff[num++] = readl(&aregs->txdma.new_service_complete); 143 - regs_buff[num++] = readl(&aregs->txdma.ethernet_packet_cnt); 144 - 145 - /* RXDMA regs */ 146 - regs_buff[num++] = readl(&aregs->rxdma.csr); 147 - regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_hi); 148 - regs_buff[num++] = readl(&aregs->rxdma.dma_wb_base_lo); 149 - regs_buff[num++] = readl(&aregs->rxdma.num_pkt_done); 150 - regs_buff[num++] = readl(&aregs->rxdma.max_pkt_time); 151 - regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr); 152 - regs_buff[num++] = readl(&aregs->rxdma.rxq_rd_addr_ext); 153 - regs_buff[num++] = readl(&aregs->rxdma.rxq_wr_addr); 154 - regs_buff[num++] = readl(&aregs->rxdma.psr_base_hi); 155 - regs_buff[num++] = readl(&aregs->rxdma.psr_base_lo); 156 - regs_buff[num++] = readl(&aregs->rxdma.psr_num_des); 157 - regs_buff[num++] = readl(&aregs->rxdma.psr_avail_offset); 158 - regs_buff[num++] = readl(&aregs->rxdma.psr_full_offset); 159 - regs_buff[num++] = readl(&aregs->rxdma.psr_access_index); 160 - regs_buff[num++] = readl(&aregs->rxdma.psr_min_des); 161 - regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_lo); 162 - regs_buff[num++] = readl(&aregs->rxdma.fbr0_base_hi); 163 - regs_buff[num++] = readl(&aregs->rxdma.fbr0_num_des); 164 - regs_buff[num++] = readl(&aregs->rxdma.fbr0_avail_offset); 165 - regs_buff[num++] = readl(&aregs->rxdma.fbr0_full_offset); 166 - regs_buff[num++] = readl(&aregs->rxdma.fbr0_rd_index); 167 - regs_buff[num++] = readl(&aregs->rxdma.fbr0_min_des); 168 - regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_lo); 169 - regs_buff[num++] = readl(&aregs->rxdma.fbr1_base_hi); 170 - regs_buff[num++] = readl(&aregs->rxdma.fbr1_num_des); 171 - regs_buff[num++] = readl(&aregs->rxdma.fbr1_avail_offset); 172 - regs_buff[num++] = readl(&aregs->rxdma.fbr1_full_offset); 173 - regs_buff[num++] = readl(&aregs->rxdma.fbr1_rd_index); 174 - regs_buff[num++] = readl(&aregs->rxdma.fbr1_min_des); 175 - } 176 - 177 - #define ET131X_DRVINFO_LEN 32 /* value from ethtool.h */ 178 - static void et131x_get_drvinfo(struct net_device *netdev, 179 - struct ethtool_drvinfo *info) 180 - { 181 - struct et131x_adapter *adapter = netdev_priv(netdev); 182 - 183 - strncpy(info->driver, DRIVER_NAME, ET131X_DRVINFO_LEN); 184 - strncpy(info->version, DRIVER_VERSION, ET131X_DRVINFO_LEN); 185 - strncpy(info->bus_info, pci_name(adapter->pdev), ET131X_DRVINFO_LEN); 186 - } 187 - 188 - static struct ethtool_ops et131x_ethtool_ops = { 189 - .get_settings = et131x_get_settings, 190 - .set_settings = et131x_set_settings, 191 - .get_drvinfo = et131x_get_drvinfo, 192 - .get_regs_len = et131x_get_regs_len, 193 - .get_regs = et131x_get_regs, 194 - .get_link = ethtool_op_get_link, 195 - }; 196 - 197 - void et131x_set_ethtool_ops(struct net_device *netdev) 198 - { 199 - SET_ETHTOOL_OPS(netdev, &et131x_ethtool_ops); 200 - } 201 -
-950
drivers/staging/et131x/et131x_initpci.c
··· 1 - /* 2 - * Agere Systems Inc. 3 - * 10/100/1000 Base-T Ethernet Driver for the ET1310 and ET131x series MACs 4 - * 5 - * Copyright © 2005 Agere Systems Inc. 6 - * All rights reserved. 7 - * http://www.agere.com 8 - * 9 - * Copyright (c) 2011 Mark Einon <mark.einon@gmail.com> 10 - * 11 - *------------------------------------------------------------------------------ 12 - * 13 - * et131x_initpci.c - Routines and data used to register the driver with the 14 - * PCI (and PCI Express) subsystem, as well as basic driver 15 - * init and startup. 16 - * 17 - *------------------------------------------------------------------------------ 18 - * 19 - * SOFTWARE LICENSE 20 - * 21 - * This software is provided subject to the following terms and conditions, 22 - * which you should read carefully before using the software. Using this 23 - * software indicates your acceptance of these terms and conditions. If you do 24 - * not agree with these terms and conditions, do not use the software. 25 - * 26 - * Copyright © 2005 Agere Systems Inc. 27 - * All rights reserved. 28 - * 29 - * Redistribution and use in source or binary forms, with or without 30 - * modifications, are permitted provided that the following conditions are met: 31 - * 32 - * . Redistributions of source code must retain the above copyright notice, this 33 - * list of conditions and the following Disclaimer as comments in the code as 34 - * well as in the documentation and/or other materials provided with the 35 - * distribution. 36 - * 37 - * . Redistributions in binary form must reproduce the above copyright notice, 38 - * this list of conditions and the following Disclaimer in the documentation 39 - * and/or other materials provided with the distribution. 40 - * 41 - * . Neither the name of Agere Systems Inc. nor the names of the contributors 42 - * may be used to endorse or promote products derived from this software 43 - * without specific prior written permission. 44 - * 45 - * Disclaimer 46 - * 47 - * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, 48 - * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF 49 - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY 50 - * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN 51 - * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY 52 - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 53 - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 54 - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 55 - * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT 56 - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 57 - * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 58 - * DAMAGE. 59 - * 60 - */ 61 - 62 - #include "et131x_defs.h" 63 - 64 - #include <linux/pci.h> 65 - #include <linux/init.h> 66 - #include <linux/module.h> 67 - #include <linux/types.h> 68 - #include <linux/kernel.h> 69 - 70 - #include <linux/sched.h> 71 - #include <linux/ptrace.h> 72 - #include <linux/ctype.h> 73 - #include <linux/string.h> 74 - #include <linux/timer.h> 75 - #include <linux/interrupt.h> 76 - #include <linux/in.h> 77 - #include <linux/delay.h> 78 - #include <linux/io.h> 79 - #include <linux/bitops.h> 80 - #include <asm/system.h> 81 - 82 - #include <linux/netdevice.h> 83 - #include <linux/etherdevice.h> 84 - #include <linux/phy.h> 85 - #include <linux/skbuff.h> 86 - #include <linux/if_arp.h> 87 - #include <linux/ioport.h> 88 - #include <linux/random.h> 89 - 90 - #include "et1310_phy.h" 91 - 92 - #include "et131x_adapter.h" 93 - 94 - #include "et1310_address_map.h" 95 - #include "et1310_tx.h" 96 - #include "et1310_rx.h" 97 - #include "et131x.h" 98 - 99 - #define INTERNAL_MEM_SIZE 0x400 /* 1024 of internal memory */ 100 - #define INTERNAL_MEM_RX_OFFSET 0x1FF /* 50% Tx, 50% Rx */ 101 - 102 - /** 103 - * et131x_hwaddr_init - set up the MAC Address on the ET1310 104 - * @adapter: pointer to our private adapter structure 105 - */ 106 - void et131x_hwaddr_init(struct et131x_adapter *adapter) 107 - { 108 - /* If have our default mac from init and no mac address from 109 - * EEPROM then we need to generate the last octet and set it on the 110 - * device 111 - */ 112 - if (adapter->rom_addr[0] == 0x00 && 113 - adapter->rom_addr[1] == 0x00 && 114 - adapter->rom_addr[2] == 0x00 && 115 - adapter->rom_addr[3] == 0x00 && 116 - adapter->rom_addr[4] == 0x00 && 117 - adapter->rom_addr[5] == 0x00) { 118 - /* 119 - * We need to randomly generate the last octet so we 120 - * decrease our chances of setting the mac address to 121 - * same as another one of our cards in the system 122 - */ 123 - get_random_bytes(&adapter->addr[5], 1); 124 - /* 125 - * We have the default value in the register we are 126 - * working with so we need to copy the current 127 - * address into the permanent address 128 - */ 129 - memcpy(adapter->rom_addr, 130 - adapter->addr, ETH_ALEN); 131 - } else { 132 - /* We do not have an override address, so set the 133 - * current address to the permanent address and add 134 - * it to the device 135 - */ 136 - memcpy(adapter->addr, 137 - adapter->rom_addr, ETH_ALEN); 138 - } 139 - } 140 - 141 - /** 142 - * et131x_pci_init - initial PCI setup 143 - * @adapter: pointer to our private adapter structure 144 - * @pdev: our PCI device 145 - * 146 - * Perform the initial setup of PCI registers and if possible initialise 147 - * the MAC address. At this point the I/O registers have yet to be mapped 148 - */ 149 - static int et131x_pci_init(struct et131x_adapter *adapter, 150 - struct pci_dev *pdev) 151 - { 152 - int i; 153 - u8 max_payload; 154 - u8 read_size_reg; 155 - 156 - if (et131x_init_eeprom(adapter) < 0) 157 - return -EIO; 158 - 159 - /* Let's set up the PORT LOGIC Register. First we need to know what 160 - * the max_payload_size is 161 - */ 162 - if (pci_read_config_byte(pdev, ET1310_PCI_MAX_PYLD, &max_payload)) { 163 - dev_err(&pdev->dev, 164 - "Could not read PCI config space for Max Payload Size\n"); 165 - return -EIO; 166 - } 167 - 168 - /* Program the Ack/Nak latency and replay timers */ 169 - max_payload &= 0x07; /* Only the lower 3 bits are valid */ 170 - 171 - if (max_payload < 2) { 172 - static const u16 acknak[2] = { 0x76, 0xD0 }; 173 - static const u16 replay[2] = { 0x1E0, 0x2ED }; 174 - 175 - if (pci_write_config_word(pdev, ET1310_PCI_ACK_NACK, 176 - acknak[max_payload])) { 177 - dev_err(&pdev->dev, 178 - "Could not write PCI config space for ACK/NAK\n"); 179 - return -EIO; 180 - } 181 - if (pci_write_config_word(pdev, ET1310_PCI_REPLAY, 182 - replay[max_payload])) { 183 - dev_err(&pdev->dev, 184 - "Could not write PCI config space for Replay Timer\n"); 185 - return -EIO; 186 - } 187 - } 188 - 189 - /* l0s and l1 latency timers. We are using default values. 190 - * Representing 001 for L0s and 010 for L1 191 - */ 192 - if (pci_write_config_byte(pdev, ET1310_PCI_L0L1LATENCY, 0x11)) { 193 - dev_err(&pdev->dev, 194 - "Could not write PCI config space for Latency Timers\n"); 195 - return -EIO; 196 - } 197 - 198 - /* Change the max read size to 2k */ 199 - if (pci_read_config_byte(pdev, 0x51, &read_size_reg)) { 200 - dev_err(&pdev->dev, 201 - "Could not read PCI config space for Max read size\n"); 202 - return -EIO; 203 - } 204 - 205 - read_size_reg &= 0x8f; 206 - read_size_reg |= 0x40; 207 - 208 - if (pci_write_config_byte(pdev, 0x51, read_size_reg)) { 209 - dev_err(&pdev->dev, 210 - "Could not write PCI config space for Max read size\n"); 211 - return -EIO; 212 - } 213 - 214 - /* Get MAC address from config space if an eeprom exists, otherwise 215 - * the MAC address there will not be valid 216 - */ 217 - if (!adapter->has_eeprom) { 218 - et131x_hwaddr_init(adapter); 219 - return 0; 220 - } 221 - 222 - for (i = 0; i < ETH_ALEN; i++) { 223 - if (pci_read_config_byte(pdev, ET1310_PCI_MAC_ADDRESS + i, 224 - adapter->rom_addr + i)) { 225 - dev_err(&pdev->dev, "Could not read PCI config space for MAC address\n"); 226 - return -EIO; 227 - } 228 - } 229 - memcpy(adapter->addr, adapter->rom_addr, ETH_ALEN); 230 - return 0; 231 - } 232 - 233 - /** 234 - * et131x_error_timer_handler 235 - * @data: timer-specific variable; here a pointer to our adapter structure 236 - * 237 - * The routine called when the error timer expires, to track the number of 238 - * recurring errors. 239 - */ 240 - void et131x_error_timer_handler(unsigned long data) 241 - { 242 - struct et131x_adapter *adapter = (struct et131x_adapter *) data; 243 - struct phy_device *phydev = adapter->phydev; 244 - 245 - if (et1310_in_phy_coma(adapter)) { 246 - /* Bring the device immediately out of coma, to 247 - * prevent it from sleeping indefinitely, this 248 - * mechanism could be improved! */ 249 - et1310_disable_phy_coma(adapter); 250 - adapter->boot_coma = 20; 251 - } else { 252 - et1310_update_macstat_host_counters(adapter); 253 - } 254 - 255 - if (!phydev->link && adapter->boot_coma < 11) 256 - adapter->boot_coma++; 257 - 258 - if (adapter->boot_coma == 10) { 259 - if (!phydev->link) { 260 - if (!et1310_in_phy_coma(adapter)) { 261 - /* NOTE - This was originally a 'sync with 262 - * interrupt'. How to do that under Linux? 263 - */ 264 - et131x_enable_interrupts(adapter); 265 - et1310_enable_phy_coma(adapter); 266 - } 267 - } 268 - } 269 - 270 - /* This is a periodic timer, so reschedule */ 271 - mod_timer(&adapter->error_timer, jiffies + 272 - TX_ERROR_PERIOD * HZ / 1000); 273 - } 274 - 275 - /** 276 - * et131x_configure_global_regs - configure JAGCore global regs 277 - * @adapter: pointer to our adapter structure 278 - * 279 - * Used to configure the global registers on the JAGCore 280 - */ 281 - void et131x_configure_global_regs(struct et131x_adapter *adapter) 282 - { 283 - struct global_regs __iomem *regs = &adapter->regs->global; 284 - 285 - writel(0, &regs->rxq_start_addr); 286 - writel(INTERNAL_MEM_SIZE - 1, &regs->txq_end_addr); 287 - 288 - if (adapter->registry_jumbo_packet < 2048) { 289 - /* Tx / RxDMA and Tx/Rx MAC interfaces have a 1k word 290 - * block of RAM that the driver can split between Tx 291 - * and Rx as it desires. Our default is to split it 292 - * 50/50: 293 - */ 294 - writel(PARM_RX_MEM_END_DEF, &regs->rxq_end_addr); 295 - writel(PARM_RX_MEM_END_DEF + 1, &regs->txq_start_addr); 296 - } else if (adapter->registry_jumbo_packet < 8192) { 297 - /* For jumbo packets > 2k but < 8k, split 50-50. */ 298 - writel(INTERNAL_MEM_RX_OFFSET, &regs->rxq_end_addr); 299 - writel(INTERNAL_MEM_RX_OFFSET + 1, &regs->txq_start_addr); 300 - } else { 301 - /* 9216 is the only packet size greater than 8k that 302 - * is available. The Tx buffer has to be big enough 303 - * for one whole packet on the Tx side. We'll make 304 - * the Tx 9408, and give the rest to Rx 305 - */ 306 - writel(0x01b3, &regs->rxq_end_addr); 307 - writel(0x01b4, &regs->txq_start_addr); 308 - } 309 - 310 - /* Initialize the loopback register. Disable all loopbacks. */ 311 - writel(0, &regs->loopback); 312 - 313 - /* MSI Register */ 314 - writel(0, &regs->msi_config); 315 - 316 - /* By default, disable the watchdog timer. It will be enabled when 317 - * a packet is queued. 318 - */ 319 - writel(0, &regs->watchdog_timer); 320 - } 321 - 322 - /** 323 - * et131x_adapter_setup - Set the adapter up as per cassini+ documentation 324 - * @adapter: pointer to our private adapter structure 325 - * 326 - * Returns 0 on success, errno on failure (as defined in errno.h) 327 - */ 328 - void et131x_adapter_setup(struct et131x_adapter *adapter) 329 - { 330 - /* Configure the JAGCore */ 331 - et131x_configure_global_regs(adapter); 332 - 333 - et1310_config_mac_regs1(adapter); 334 - 335 - /* Configure the MMC registers */ 336 - /* All we need to do is initialize the Memory Control Register */ 337 - writel(ET_MMC_ENABLE, &adapter->regs->mmc.mmc_ctrl); 338 - 339 - et1310_config_rxmac_regs(adapter); 340 - et1310_config_txmac_regs(adapter); 341 - 342 - et131x_config_rx_dma_regs(adapter); 343 - et131x_config_tx_dma_regs(adapter); 344 - 345 - et1310_config_macstat_regs(adapter); 346 - 347 - et1310_phy_power_down(adapter, 0); 348 - et131x_xcvr_init(adapter); 349 - } 350 - 351 - /** 352 - * et131x_soft_reset - Issue a soft reset to the hardware, complete for ET1310 353 - * @adapter: pointer to our private adapter structure 354 - */ 355 - void et131x_soft_reset(struct et131x_adapter *adapter) 356 - { 357 - /* Disable MAC Core */ 358 - writel(0xc00f0000, &adapter->regs->mac.cfg1); 359 - 360 - /* Set everything to a reset value */ 361 - writel(0x7F, &adapter->regs->global.sw_reset); 362 - writel(0x000f0000, &adapter->regs->mac.cfg1); 363 - writel(0x00000000, &adapter->regs->mac.cfg1); 364 - } 365 - 366 - /** 367 - * et131x_align_allocated_memory - Align allocated memory on a given boundary 368 - * @adapter: pointer to our adapter structure 369 - * @phys_addr: pointer to Physical address 370 - * @offset: pointer to the offset variable 371 - * @mask: correct mask 372 - */ 373 - void et131x_align_allocated_memory(struct et131x_adapter *adapter, 374 - uint64_t *phys_addr, 375 - uint64_t *offset, uint64_t mask) 376 - { 377 - uint64_t new_addr; 378 - 379 - *offset = 0; 380 - 381 - new_addr = *phys_addr & ~mask; 382 - 383 - if (new_addr != *phys_addr) { 384 - /* Move to next aligned block */ 385 - new_addr += mask + 1; 386 - /* Return offset for adjusting virt addr */ 387 - *offset = new_addr - *phys_addr; 388 - /* Return new physical address */ 389 - *phys_addr = new_addr; 390 - } 391 - } 392 - 393 - /** 394 - * et131x_adapter_memory_alloc 395 - * @adapter: pointer to our private adapter structure 396 - * 397 - * Returns 0 on success, errno on failure (as defined in errno.h). 398 - * 399 - * Allocate all the memory blocks for send, receive and others. 400 - */ 401 - int et131x_adapter_memory_alloc(struct et131x_adapter *adapter) 402 - { 403 - int status; 404 - 405 - /* Allocate memory for the Tx Ring */ 406 - status = et131x_tx_dma_memory_alloc(adapter); 407 - if (status != 0) { 408 - dev_err(&adapter->pdev->dev, 409 - "et131x_tx_dma_memory_alloc FAILED\n"); 410 - return status; 411 - } 412 - /* Receive buffer memory allocation */ 413 - status = et131x_rx_dma_memory_alloc(adapter); 414 - if (status != 0) { 415 - dev_err(&adapter->pdev->dev, 416 - "et131x_rx_dma_memory_alloc FAILED\n"); 417 - et131x_tx_dma_memory_free(adapter); 418 - return status; 419 - } 420 - 421 - /* Init receive data structures */ 422 - status = et131x_init_recv(adapter); 423 - if (status != 0) { 424 - dev_err(&adapter->pdev->dev, 425 - "et131x_init_recv FAILED\n"); 426 - et131x_tx_dma_memory_free(adapter); 427 - et131x_rx_dma_memory_free(adapter); 428 - } 429 - return status; 430 - } 431 - 432 - /** 433 - * et131x_adapter_memory_free - Free all memory allocated for use by Tx & Rx 434 - * @adapter: pointer to our private adapter structure 435 - */ 436 - void et131x_adapter_memory_free(struct et131x_adapter *adapter) 437 - { 438 - /* Free DMA memory */ 439 - et131x_tx_dma_memory_free(adapter); 440 - et131x_rx_dma_memory_free(adapter); 441 - } 442 - 443 - static void et131x_adjust_link(struct net_device *netdev) 444 - { 445 - struct et131x_adapter *adapter = netdev_priv(netdev); 446 - struct phy_device *phydev = adapter->phydev; 447 - 448 - if (netif_carrier_ok(netdev)) { 449 - adapter->boot_coma = 20; 450 - 451 - if (phydev && phydev->speed == SPEED_10) { 452 - /* 453 - * NOTE - Is there a way to query this without 454 - * TruePHY? 455 - * && TRU_QueryCoreType(adapter->hTruePhy, 0)== 456 - * EMI_TRUEPHY_A13O) { 457 - */ 458 - u16 register18; 459 - 460 - et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, 461 - &register18); 462 - et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG, 463 - register18 | 0x4); 464 - et131x_mii_write(adapter, PHY_INDEX_REG, 465 - register18 | 0x8402); 466 - et131x_mii_write(adapter, PHY_DATA_REG, 467 - register18 | 511); 468 - et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG, 469 - register18); 470 - } 471 - 472 - et1310_config_flow_control(adapter); 473 - 474 - if (phydev && phydev->speed == SPEED_1000 && 475 - adapter->registry_jumbo_packet > 2048) { 476 - u16 reg; 477 - 478 - et131x_mii_read(adapter, PHY_CONFIG, &reg); 479 - reg &= ~ET_PHY_CONFIG_TX_FIFO_DEPTH; 480 - reg |= ET_PHY_CONFIG_FIFO_DEPTH_32; 481 - et131x_mii_write(adapter, PHY_CONFIG, reg); 482 - } 483 - 484 - et131x_set_rx_dma_timer(adapter); 485 - et1310_config_mac_regs2(adapter); 486 - } 487 - 488 - if (phydev && phydev->link != adapter->link) { 489 - /* 490 - * Check to see if we are in coma mode and if 491 - * so, disable it because we will not be able 492 - * to read PHY values until we are out. 493 - */ 494 - if (et1310_in_phy_coma(adapter)) 495 - et1310_disable_phy_coma(adapter); 496 - 497 - if (phydev->link) { 498 - adapter->boot_coma = 20; 499 - } else { 500 - dev_warn(&adapter->pdev->dev, 501 - "Link down - cable problem ?\n"); 502 - adapter->boot_coma = 0; 503 - 504 - if (phydev->speed == SPEED_10) { 505 - /* NOTE - Is there a way to query this without 506 - * TruePHY? 507 - * && TRU_QueryCoreType(adapter->hTruePhy, 0) == 508 - * EMI_TRUEPHY_A13O) 509 - */ 510 - u16 register18; 511 - 512 - et131x_mii_read(adapter, PHY_MPHY_CONTROL_REG, 513 - &register18); 514 - et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG, 515 - register18 | 0x4); 516 - et131x_mii_write(adapter, PHY_INDEX_REG, 517 - register18 | 0x8402); 518 - et131x_mii_write(adapter, PHY_DATA_REG, 519 - register18 | 511); 520 - et131x_mii_write(adapter, PHY_MPHY_CONTROL_REG, 521 - register18); 522 - } 523 - 524 - /* Free the packets being actively sent & stopped */ 525 - et131x_free_busy_send_packets(adapter); 526 - 527 - /* Re-initialize the send structures */ 528 - et131x_init_send(adapter); 529 - 530 - /* Reset the RFD list and re-start RU */ 531 - et131x_reset_recv(adapter); 532 - 533 - /* 534 - * Bring the device back to the state it was during 535 - * init prior to autonegotiation being complete. This 536 - * way, when we get the auto-neg complete interrupt, 537 - * we can complete init by calling config_mac_regs2. 538 - */ 539 - et131x_soft_reset(adapter); 540 - 541 - /* Setup ET1310 as per the documentation */ 542 - et131x_adapter_setup(adapter); 543 - 544 - /* perform reset of tx/rx */ 545 - et131x_disable_txrx(netdev); 546 - et131x_enable_txrx(netdev); 547 - } 548 - 549 - adapter->link = phydev->link; 550 - 551 - phy_print_status(phydev); 552 - } 553 - } 554 - 555 - static int et131x_mii_probe(struct net_device *netdev) 556 - { 557 - struct et131x_adapter *adapter = netdev_priv(netdev); 558 - struct phy_device *phydev = NULL; 559 - 560 - phydev = phy_find_first(adapter->mii_bus); 561 - if (!phydev) { 562 - dev_err(&adapter->pdev->dev, "no PHY found\n"); 563 - return -ENODEV; 564 - } 565 - 566 - phydev = phy_connect(netdev, dev_name(&phydev->dev), 567 - &et131x_adjust_link, 0, PHY_INTERFACE_MODE_MII); 568 - 569 - if (IS_ERR(phydev)) { 570 - dev_err(&adapter->pdev->dev, "Could not attach to PHY\n"); 571 - return PTR_ERR(phydev); 572 - } 573 - 574 - phydev->supported &= (SUPPORTED_10baseT_Half 575 - | SUPPORTED_10baseT_Full 576 - | SUPPORTED_100baseT_Half 577 - | SUPPORTED_100baseT_Full 578 - | SUPPORTED_Autoneg 579 - | SUPPORTED_MII 580 - | SUPPORTED_TP); 581 - 582 - if (adapter->pdev->device != ET131X_PCI_DEVICE_ID_FAST) 583 - phydev->supported |= SUPPORTED_1000baseT_Full; 584 - 585 - phydev->advertising = phydev->supported; 586 - adapter->phydev = phydev; 587 - 588 - dev_info(&adapter->pdev->dev, "attached PHY driver [%s] " 589 - "(mii_bus:phy_addr=%s)\n", 590 - phydev->drv->name, dev_name(&phydev->dev)); 591 - 592 - return 0; 593 - } 594 - 595 - /** 596 - * et131x_adapter_init 597 - * @adapter: pointer to the private adapter struct 598 - * @pdev: pointer to the PCI device 599 - * 600 - * Initialize the data structures for the et131x_adapter object and link 601 - * them together with the platform provided device structures. 602 - */ 603 - static struct et131x_adapter *et131x_adapter_init(struct net_device *netdev, 604 - struct pci_dev *pdev) 605 - { 606 - static const u8 default_mac[] = { 0x00, 0x05, 0x3d, 0x00, 0x02, 0x00 }; 607 - 608 - struct et131x_adapter *adapter; 609 - 610 - /* Allocate private adapter struct and copy in relevant information */ 611 - adapter = netdev_priv(netdev); 612 - adapter->pdev = pci_dev_get(pdev); 613 - adapter->netdev = netdev; 614 - 615 - /* Do the same for the netdev struct */ 616 - netdev->irq = pdev->irq; 617 - netdev->base_addr = pci_resource_start(pdev, 0); 618 - 619 - /* Initialize spinlocks here */ 620 - spin_lock_init(&adapter->lock); 621 - spin_lock_init(&adapter->tcb_send_qlock); 622 - spin_lock_init(&adapter->tcb_ready_qlock); 623 - spin_lock_init(&adapter->send_hw_lock); 624 - spin_lock_init(&adapter->rcv_lock); 625 - spin_lock_init(&adapter->rcv_pend_lock); 626 - spin_lock_init(&adapter->fbr_lock); 627 - spin_lock_init(&adapter->phy_lock); 628 - 629 - adapter->registry_jumbo_packet = 1514; /* 1514-9216 */ 630 - 631 - /* Set the MAC address to a default */ 632 - memcpy(adapter->addr, default_mac, ETH_ALEN); 633 - 634 - return adapter; 635 - } 636 - 637 - /** 638 - * et131x_pci_setup - Perform device initialization 639 - * @pdev: a pointer to the device's pci_dev structure 640 - * @ent: this device's entry in the pci_device_id table 641 - * 642 - * Returns 0 on success, errno on failure (as defined in errno.h) 643 - * 644 - * Registered in the pci_driver structure, this function is called when the 645 - * PCI subsystem finds a new PCI device which matches the information 646 - * contained in the pci_device_id table. This routine is the equivalent to 647 - * a device insertion routine. 648 - */ 649 - static int __devinit et131x_pci_setup(struct pci_dev *pdev, 650 - const struct pci_device_id *ent) 651 - { 652 - int result; 653 - int pm_cap; 654 - struct net_device *netdev; 655 - struct et131x_adapter *adapter; 656 - int ii; 657 - 658 - result = pci_enable_device(pdev); 659 - if (result) { 660 - dev_err(&pdev->dev, "pci_enable_device() failed\n"); 661 - goto err_out; 662 - } 663 - 664 - /* Perform some basic PCI checks */ 665 - if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 666 - dev_err(&pdev->dev, "Can't find PCI device's base address\n"); 667 - goto err_disable; 668 - } 669 - 670 - if (pci_request_regions(pdev, DRIVER_NAME)) { 671 - dev_err(&pdev->dev, "Can't get PCI resources\n"); 672 - goto err_disable; 673 - } 674 - 675 - pci_set_master(pdev); 676 - 677 - /* Query PCI for Power Mgmt Capabilities 678 - * 679 - * NOTE: Now reading PowerMgmt in another location; is this still 680 - * needed? 681 - */ 682 - pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM); 683 - if (!pm_cap) { 684 - dev_err(&pdev->dev, 685 - "Cannot find Power Management capabilities\n"); 686 - result = -EIO; 687 - goto err_release_res; 688 - } 689 - 690 - /* Check the DMA addressing support of this device */ 691 - if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { 692 - result = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); 693 - if (result) { 694 - dev_err(&pdev->dev, 695 - "Unable to obtain 64 bit DMA for consistent allocations\n"); 696 - goto err_release_res; 697 - } 698 - } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { 699 - result = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 700 - if (result) { 701 - dev_err(&pdev->dev, 702 - "Unable to obtain 32 bit DMA for consistent allocations\n"); 703 - goto err_release_res; 704 - } 705 - } else { 706 - dev_err(&pdev->dev, "No usable DMA addressing method\n"); 707 - result = -EIO; 708 - goto err_release_res; 709 - } 710 - 711 - /* Allocate netdev and private adapter structs */ 712 - netdev = et131x_device_alloc(); 713 - if (!netdev) { 714 - dev_err(&pdev->dev, "Couldn't alloc netdev struct\n"); 715 - result = -ENOMEM; 716 - goto err_release_res; 717 - } 718 - 719 - SET_NETDEV_DEV(netdev, &pdev->dev); 720 - et131x_set_ethtool_ops(netdev); 721 - 722 - adapter = et131x_adapter_init(netdev, pdev); 723 - 724 - /* Initialise the PCI setup for the device */ 725 - et131x_pci_init(adapter, pdev); 726 - 727 - /* Map the bus-relative registers to system virtual memory */ 728 - adapter->regs = pci_ioremap_bar(pdev, 0); 729 - if (!adapter->regs) { 730 - dev_err(&pdev->dev, "Cannot map device registers\n"); 731 - result = -ENOMEM; 732 - goto err_free_dev; 733 - } 734 - 735 - /* If Phy COMA mode was enabled when we went down, disable it here. */ 736 - writel(ET_PMCSR_INIT, &adapter->regs->global.pm_csr); 737 - 738 - /* Issue a global reset to the et1310 */ 739 - et131x_soft_reset(adapter); 740 - 741 - /* Disable all interrupts (paranoid) */ 742 - et131x_disable_interrupts(adapter); 743 - 744 - /* Allocate DMA memory */ 745 - result = et131x_adapter_memory_alloc(adapter); 746 - if (result) { 747 - dev_err(&pdev->dev, "Could not alloc adapater memory (DMA)\n"); 748 - goto err_iounmap; 749 - } 750 - 751 - /* Init send data structures */ 752 - et131x_init_send(adapter); 753 - 754 - /* Set up the task structure for the ISR's deferred handler */ 755 - INIT_WORK(&adapter->task, et131x_isr_handler); 756 - 757 - /* Copy address into the net_device struct */ 758 - memcpy(netdev->dev_addr, adapter->addr, ETH_ALEN); 759 - 760 - /* Init variable for counting how long we do not have link status */ 761 - adapter->boot_coma = 0; 762 - et1310_disable_phy_coma(adapter); 763 - 764 - /* Setup the mii_bus struct */ 765 - adapter->mii_bus = mdiobus_alloc(); 766 - if (!adapter->mii_bus) { 767 - dev_err(&pdev->dev, "Alloc of mii_bus struct failed\n"); 768 - goto err_mem_free; 769 - } 770 - 771 - adapter->mii_bus->name = "et131x_eth_mii"; 772 - snprintf(adapter->mii_bus->id, MII_BUS_ID_SIZE, "%x", 773 - (adapter->pdev->bus->number << 8) | adapter->pdev->devfn); 774 - adapter->mii_bus->priv = netdev; 775 - adapter->mii_bus->read = et131x_mdio_read; 776 - adapter->mii_bus->write = et131x_mdio_write; 777 - adapter->mii_bus->reset = et131x_mdio_reset; 778 - adapter->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); 779 - if (!adapter->mii_bus->irq) { 780 - dev_err(&pdev->dev, "mii_bus irq allocation failed\n"); 781 - goto err_mdio_free; 782 - } 783 - 784 - for (ii = 0; ii < PHY_MAX_ADDR; ii++) 785 - adapter->mii_bus->irq[ii] = PHY_POLL; 786 - 787 - if (mdiobus_register(adapter->mii_bus)) { 788 - dev_err(&pdev->dev, "failed to register MII bus\n"); 789 - mdiobus_free(adapter->mii_bus); 790 - goto err_mdio_free_irq; 791 - } 792 - 793 - if (et131x_mii_probe(netdev)) { 794 - dev_err(&pdev->dev, "failed to probe MII bus\n"); 795 - goto err_mdio_unregister; 796 - } 797 - 798 - /* Setup et1310 as per the documentation */ 799 - et131x_adapter_setup(adapter); 800 - 801 - /* We can enable interrupts now 802 - * 803 - * NOTE - Because registration of interrupt handler is done in the 804 - * device's open(), defer enabling device interrupts to that 805 - * point 806 - */ 807 - 808 - /* Register the net_device struct with the Linux network layer */ 809 - result = register_netdev(netdev); 810 - if (result != 0) { 811 - dev_err(&pdev->dev, "register_netdev() failed\n"); 812 - goto err_mdio_unregister; 813 - } 814 - 815 - /* Register the net_device struct with the PCI subsystem. Save a copy 816 - * of the PCI config space for this device now that the device has 817 - * been initialized, just in case it needs to be quickly restored. 818 - */ 819 - pci_set_drvdata(pdev, netdev); 820 - pci_save_state(adapter->pdev); 821 - 822 - return result; 823 - 824 - err_mdio_unregister: 825 - mdiobus_unregister(adapter->mii_bus); 826 - err_mdio_free_irq: 827 - kfree(adapter->mii_bus->irq); 828 - err_mdio_free: 829 - mdiobus_free(adapter->mii_bus); 830 - err_mem_free: 831 - et131x_adapter_memory_free(adapter); 832 - err_iounmap: 833 - iounmap(adapter->regs); 834 - err_free_dev: 835 - pci_dev_put(pdev); 836 - free_netdev(netdev); 837 - err_release_res: 838 - pci_release_regions(pdev); 839 - err_disable: 840 - pci_disable_device(pdev); 841 - err_out: 842 - return result; 843 - } 844 - 845 - /** 846 - * et131x_pci_remove 847 - * @pdev: a pointer to the device's pci_dev structure 848 - * 849 - * Registered in the pci_driver structure, this function is called when the 850 - * PCI subsystem detects that a PCI device which matches the information 851 - * contained in the pci_device_id table has been removed. 852 - */ 853 - static void __devexit et131x_pci_remove(struct pci_dev *pdev) 854 - { 855 - struct net_device *netdev = pci_get_drvdata(pdev); 856 - struct et131x_adapter *adapter = netdev_priv(netdev); 857 - 858 - unregister_netdev(netdev); 859 - mdiobus_unregister(adapter->mii_bus); 860 - kfree(adapter->mii_bus->irq); 861 - mdiobus_free(adapter->mii_bus); 862 - 863 - et131x_adapter_memory_free(adapter); 864 - iounmap(adapter->regs); 865 - pci_dev_put(pdev); 866 - 867 - free_netdev(netdev); 868 - pci_release_regions(pdev); 869 - pci_disable_device(pdev); 870 - } 871 - 872 - #ifdef CONFIG_PM_SLEEP 873 - static int et131x_suspend(struct device *dev) 874 - { 875 - struct pci_dev *pdev = to_pci_dev(dev); 876 - struct net_device *netdev = pci_get_drvdata(pdev); 877 - 878 - if (netif_running(netdev)) { 879 - netif_device_detach(netdev); 880 - et131x_down(netdev); 881 - pci_save_state(pdev); 882 - } 883 - 884 - return 0; 885 - } 886 - 887 - static int et131x_resume(struct device *dev) 888 - { 889 - struct pci_dev *pdev = to_pci_dev(dev); 890 - struct net_device *netdev = pci_get_drvdata(pdev); 891 - 892 - if (netif_running(netdev)) { 893 - pci_restore_state(pdev); 894 - et131x_up(netdev); 895 - netif_device_attach(netdev); 896 - } 897 - 898 - return 0; 899 - } 900 - 901 - static SIMPLE_DEV_PM_OPS(et131x_pm_ops, et131x_suspend, et131x_resume); 902 - #define ET131X_PM_OPS (&et131x_pm_ops) 903 - #else 904 - #define ET131X_PM_OPS NULL 905 - #endif 906 - 907 - static struct pci_device_id et131x_pci_table[] __devinitdata = { 908 - {ET131X_PCI_VENDOR_ID, ET131X_PCI_DEVICE_ID_GIG, PCI_ANY_ID, 909 - PCI_ANY_ID, 0, 0, 0UL}, 910 - {ET131X_PCI_VENDOR_ID, ET131X_PCI_DEVICE_ID_FAST, PCI_ANY_ID, 911 - PCI_ANY_ID, 0, 0, 0UL}, 912 - {0,} 913 - }; 914 - 915 - MODULE_DEVICE_TABLE(pci, et131x_pci_table); 916 - 917 - static struct pci_driver et131x_driver = { 918 - .name = DRIVER_NAME, 919 - .id_table = et131x_pci_table, 920 - .probe = et131x_pci_setup, 921 - .remove = __devexit_p(et131x_pci_remove), 922 - .driver.pm = ET131X_PM_OPS, 923 - }; 924 - 925 - /** 926 - * et131x_init_module - The "main" entry point called on driver initialization 927 - * 928 - * Returns 0 on success, errno on failure (as defined in errno.h) 929 - */ 930 - static int __init et131x_init_module(void) 931 - { 932 - return pci_register_driver(&et131x_driver); 933 - } 934 - 935 - /** 936 - * et131x_cleanup_module - The entry point called on driver cleanup 937 - */ 938 - static void __exit et131x_cleanup_module(void) 939 - { 940 - pci_unregister_driver(&et131x_driver); 941 - } 942 - 943 - module_init(et131x_init_module); 944 - module_exit(et131x_cleanup_module); 945 - 946 - MODULE_AUTHOR("Victor Soriano <vjsoriano@agere.com>"); 947 - MODULE_AUTHOR("Mark Einon <mark.einon@gmail.com>"); 948 - MODULE_LICENSE("Dual BSD/GPL"); 949 - MODULE_DESCRIPTION("10/100/1000 Base-T Ethernet Driver " 950 - "for the ET1310 by Agere Systems");
-442
drivers/staging/et131x/et131x_isr.c
··· 1 - /* 2 - * Agere Systems Inc. 3 - * 10/100/1000 Base-T Ethernet Driver for the ET1301 and ET131x series MACs 4 - * 5 - * Copyright © 2005 Agere Systems Inc. 6 - * All rights reserved. 7 - * http://www.agere.com 8 - * 9 - * Copyright (c) 2011 Mark Einon <mark.einon@gmail.com> 10 - * 11 - *------------------------------------------------------------------------------ 12 - * 13 - * et131x_isr.c - File which contains the ISR, ISR handler, and related routines 14 - * for processing interrupts from the device. 15 - * 16 - *------------------------------------------------------------------------------ 17 - * 18 - * SOFTWARE LICENSE 19 - * 20 - * This software is provided subject to the following terms and conditions, 21 - * which you should read carefully before using the software. Using this 22 - * software indicates your acceptance of these terms and conditions. If you do 23 - * not agree with these terms and conditions, do not use the software. 24 - * 25 - * Copyright © 2005 Agere Systems Inc. 26 - * All rights reserved. 27 - * 28 - * Redistribution and use in source or binary forms, with or without 29 - * modifications, are permitted provided that the following conditions are met: 30 - * 31 - * . Redistributions of source code must retain the above copyright notice, this 32 - * list of conditions and the following Disclaimer as comments in the code as 33 - * well as in the documentation and/or other materials provided with the 34 - * distribution. 35 - * 36 - * . Redistributions in binary form must reproduce the above copyright notice, 37 - * this list of conditions and the following Disclaimer in the documentation 38 - * and/or other materials provided with the distribution. 39 - * 40 - * . Neither the name of Agere Systems Inc. nor the names of the contributors 41 - * may be used to endorse or promote products derived from this software 42 - * without specific prior written permission. 43 - * 44 - * Disclaimer 45 - * 46 - * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, 47 - * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF 48 - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY 49 - * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN 50 - * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY 51 - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 52 - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 53 - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 54 - * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT 55 - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 56 - * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 57 - * DAMAGE. 58 - * 59 - */ 60 - 61 - #include "et131x_defs.h" 62 - 63 - #include <linux/init.h> 64 - #include <linux/module.h> 65 - #include <linux/types.h> 66 - #include <linux/kernel.h> 67 - 68 - #include <linux/sched.h> 69 - #include <linux/ptrace.h> 70 - #include <linux/ctype.h> 71 - #include <linux/string.h> 72 - #include <linux/timer.h> 73 - #include <linux/interrupt.h> 74 - #include <linux/in.h> 75 - #include <linux/delay.h> 76 - #include <linux/io.h> 77 - #include <linux/bitops.h> 78 - #include <linux/pci.h> 79 - #include <asm/system.h> 80 - 81 - #include <linux/netdevice.h> 82 - #include <linux/etherdevice.h> 83 - #include <linux/skbuff.h> 84 - #include <linux/if_arp.h> 85 - #include <linux/ioport.h> 86 - 87 - #include "et1310_phy.h" 88 - #include "et131x_adapter.h" 89 - #include "et131x.h" 90 - 91 - /* 92 - * For interrupts, normal running is: 93 - * rxdma_xfr_done, phy_interrupt, mac_stat_interrupt, 94 - * watchdog_interrupt & txdma_xfer_done 95 - * 96 - * In both cases, when flow control is enabled for either Tx or bi-direction, 97 - * we additional enable rx_fbr0_low and rx_fbr1_low, so we know when the 98 - * buffer rings are running low. 99 - */ 100 - #define INT_MASK_DISABLE 0xffffffff 101 - 102 - /* NOTE: Masking out MAC_STAT Interrupt for now... 103 - * #define INT_MASK_ENABLE 0xfff6bf17 104 - * #define INT_MASK_ENABLE_NO_FLOW 0xfff6bfd7 105 - */ 106 - #define INT_MASK_ENABLE 0xfffebf17 107 - #define INT_MASK_ENABLE_NO_FLOW 0xfffebfd7 108 - 109 - 110 - /** 111 - * et131x_enable_interrupts - enable interrupt 112 - * @adapter: et131x device 113 - * 114 - * Enable the appropriate interrupts on the ET131x according to our 115 - * configuration 116 - */ 117 - 118 - void et131x_enable_interrupts(struct et131x_adapter *adapter) 119 - { 120 - u32 mask; 121 - 122 - /* Enable all global interrupts */ 123 - if (adapter->flowcontrol == FLOW_TXONLY || 124 - adapter->flowcontrol == FLOW_BOTH) 125 - mask = INT_MASK_ENABLE; 126 - else 127 - mask = INT_MASK_ENABLE_NO_FLOW; 128 - 129 - writel(mask, &adapter->regs->global.int_mask); 130 - } 131 - 132 - /** 133 - * et131x_disable_interrupts - interrupt disable 134 - * @adapter: et131x device 135 - * 136 - * Block all interrupts from the et131x device at the device itself 137 - */ 138 - 139 - void et131x_disable_interrupts(struct et131x_adapter *adapter) 140 - { 141 - /* Disable all global interrupts */ 142 - writel(INT_MASK_DISABLE, &adapter->regs->global.int_mask); 143 - } 144 - 145 - 146 - /** 147 - * et131x_isr - The Interrupt Service Routine for the driver. 148 - * @irq: the IRQ on which the interrupt was received. 149 - * @dev_id: device-specific info (here a pointer to a net_device struct) 150 - * 151 - * Returns a value indicating if the interrupt was handled. 152 - */ 153 - 154 - irqreturn_t et131x_isr(int irq, void *dev_id) 155 - { 156 - bool handled = true; 157 - struct net_device *netdev = (struct net_device *)dev_id; 158 - struct et131x_adapter *adapter = NULL; 159 - u32 status; 160 - 161 - if (!netif_device_present(netdev)) { 162 - handled = false; 163 - goto out; 164 - } 165 - 166 - adapter = netdev_priv(netdev); 167 - 168 - /* If the adapter is in low power state, then it should not 169 - * recognize any interrupt 170 - */ 171 - 172 - /* Disable Device Interrupts */ 173 - et131x_disable_interrupts(adapter); 174 - 175 - /* Get a copy of the value in the interrupt status register 176 - * so we can process the interrupting section 177 - */ 178 - status = readl(&adapter->regs->global.int_status); 179 - 180 - if (adapter->flowcontrol == FLOW_TXONLY || 181 - adapter->flowcontrol == FLOW_BOTH) { 182 - status &= ~INT_MASK_ENABLE; 183 - } else { 184 - status &= ~INT_MASK_ENABLE_NO_FLOW; 185 - } 186 - 187 - /* Make sure this is our interrupt */ 188 - if (!status) { 189 - handled = false; 190 - et131x_enable_interrupts(adapter); 191 - goto out; 192 - } 193 - 194 - /* This is our interrupt, so process accordingly */ 195 - 196 - if (status & ET_INTR_WATCHDOG) { 197 - struct tcb *tcb = adapter->tx_ring.send_head; 198 - 199 - if (tcb) 200 - if (++tcb->stale > 1) 201 - status |= ET_INTR_TXDMA_ISR; 202 - 203 - if (adapter->rx_ring.unfinished_receives) 204 - status |= ET_INTR_RXDMA_XFR_DONE; 205 - else if (tcb == NULL) 206 - writel(0, &adapter->regs->global.watchdog_timer); 207 - 208 - status &= ~ET_INTR_WATCHDOG; 209 - } 210 - 211 - if (status == 0) { 212 - /* This interrupt has in some way been "handled" by 213 - * the ISR. Either it was a spurious Rx interrupt, or 214 - * it was a Tx interrupt that has been filtered by 215 - * the ISR. 216 - */ 217 - et131x_enable_interrupts(adapter); 218 - goto out; 219 - } 220 - 221 - /* We need to save the interrupt status value for use in our 222 - * DPC. We will clear the software copy of that in that 223 - * routine. 224 - */ 225 - adapter->stats.interrupt_status = status; 226 - 227 - /* Schedule the ISR handler as a bottom-half task in the 228 - * kernel's tq_immediate queue, and mark the queue for 229 - * execution 230 - */ 231 - schedule_work(&adapter->task); 232 - out: 233 - return IRQ_RETVAL(handled); 234 - } 235 - 236 - /** 237 - * et131x_isr_handler - The ISR handler 238 - * @p_adapter, a pointer to the device's private adapter structure 239 - * 240 - * scheduled to run in a deferred context by the ISR. This is where the ISR's 241 - * work actually gets done. 242 - */ 243 - void et131x_isr_handler(struct work_struct *work) 244 - { 245 - struct et131x_adapter *adapter = 246 - container_of(work, struct et131x_adapter, task); 247 - u32 status = adapter->stats.interrupt_status; 248 - struct address_map __iomem *iomem = adapter->regs; 249 - 250 - /* 251 - * These first two are by far the most common. Once handled, we clear 252 - * their two bits in the status word. If the word is now zero, we 253 - * exit. 254 - */ 255 - /* Handle all the completed Transmit interrupts */ 256 - if (status & ET_INTR_TXDMA_ISR) 257 - et131x_handle_send_interrupt(adapter); 258 - 259 - /* Handle all the completed Receives interrupts */ 260 - if (status & ET_INTR_RXDMA_XFR_DONE) 261 - et131x_handle_recv_interrupt(adapter); 262 - 263 - status &= 0xffffffd7; 264 - 265 - if (status) { 266 - /* Handle the TXDMA Error interrupt */ 267 - if (status & ET_INTR_TXDMA_ERR) { 268 - u32 txdma_err; 269 - 270 - /* Following read also clears the register (COR) */ 271 - txdma_err = readl(&iomem->txdma.tx_dma_error); 272 - 273 - dev_warn(&adapter->pdev->dev, 274 - "TXDMA_ERR interrupt, error = %d\n", 275 - txdma_err); 276 - } 277 - 278 - /* Handle Free Buffer Ring 0 and 1 Low interrupt */ 279 - if (status & 280 - (ET_INTR_RXDMA_FB_R0_LOW | ET_INTR_RXDMA_FB_R1_LOW)) { 281 - /* 282 - * This indicates the number of unused buffers in 283 - * RXDMA free buffer ring 0 is <= the limit you 284 - * programmed. Free buffer resources need to be 285 - * returned. Free buffers are consumed as packets 286 - * are passed from the network to the host. The host 287 - * becomes aware of the packets from the contents of 288 - * the packet status ring. This ring is queried when 289 - * the packet done interrupt occurs. Packets are then 290 - * passed to the OS. When the OS is done with the 291 - * packets the resources can be returned to the 292 - * ET1310 for re-use. This interrupt is one method of 293 - * returning resources. 294 - */ 295 - 296 - /* If the user has flow control on, then we will 297 - * send a pause packet, otherwise just exit 298 - */ 299 - if (adapter->flowcontrol == FLOW_TXONLY || 300 - adapter->flowcontrol == FLOW_BOTH) { 301 - u32 pm_csr; 302 - 303 - /* Tell the device to send a pause packet via 304 - * the back pressure register (bp req and 305 - * bp xon/xoff) 306 - */ 307 - pm_csr = readl(&iomem->global.pm_csr); 308 - if (!et1310_in_phy_coma(adapter)) 309 - writel(3, &iomem->txmac.bp_ctrl); 310 - } 311 - } 312 - 313 - /* Handle Packet Status Ring Low Interrupt */ 314 - if (status & ET_INTR_RXDMA_STAT_LOW) { 315 - 316 - /* 317 - * Same idea as with the two Free Buffer Rings. 318 - * Packets going from the network to the host each 319 - * consume a free buffer resource and a packet status 320 - * resource. These resoures are passed to the OS. 321 - * When the OS is done with the resources, they need 322 - * to be returned to the ET1310. This is one method 323 - * of returning the resources. 324 - */ 325 - } 326 - 327 - /* Handle RXDMA Error Interrupt */ 328 - if (status & ET_INTR_RXDMA_ERR) { 329 - /* 330 - * The rxdma_error interrupt is sent when a time-out 331 - * on a request issued by the JAGCore has occurred or 332 - * a completion is returned with an un-successful 333 - * status. In both cases the request is considered 334 - * complete. The JAGCore will automatically re-try the 335 - * request in question. Normally information on events 336 - * like these are sent to the host using the "Advanced 337 - * Error Reporting" capability. This interrupt is 338 - * another way of getting similar information. The 339 - * only thing required is to clear the interrupt by 340 - * reading the ISR in the global resources. The 341 - * JAGCore will do a re-try on the request. Normally 342 - * you should never see this interrupt. If you start 343 - * to see this interrupt occurring frequently then 344 - * something bad has occurred. A reset might be the 345 - * thing to do. 346 - */ 347 - /* TRAP();*/ 348 - 349 - dev_warn(&adapter->pdev->dev, 350 - "RxDMA_ERR interrupt, error %x\n", 351 - readl(&iomem->txmac.tx_test)); 352 - } 353 - 354 - /* Handle the Wake on LAN Event */ 355 - if (status & ET_INTR_WOL) { 356 - /* 357 - * This is a secondary interrupt for wake on LAN. 358 - * The driver should never see this, if it does, 359 - * something serious is wrong. We will TRAP the 360 - * message when we are in DBG mode, otherwise we 361 - * will ignore it. 362 - */ 363 - dev_err(&adapter->pdev->dev, "WAKE_ON_LAN interrupt\n"); 364 - } 365 - 366 - /* Let's move on to the TxMac */ 367 - if (status & ET_INTR_TXMAC) { 368 - u32 err = readl(&iomem->txmac.err); 369 - 370 - /* 371 - * When any of the errors occur and TXMAC generates 372 - * an interrupt to report these errors, it usually 373 - * means that TXMAC has detected an error in the data 374 - * stream retrieved from the on-chip Tx Q. All of 375 - * these errors are catastrophic and TXMAC won't be 376 - * able to recover data when these errors occur. In 377 - * a nutshell, the whole Tx path will have to be reset 378 - * and re-configured afterwards. 379 - */ 380 - dev_warn(&adapter->pdev->dev, 381 - "TXMAC interrupt, error 0x%08x\n", 382 - err); 383 - 384 - /* If we are debugging, we want to see this error, 385 - * otherwise we just want the device to be reset and 386 - * continue 387 - */ 388 - } 389 - 390 - /* Handle RXMAC Interrupt */ 391 - if (status & ET_INTR_RXMAC) { 392 - /* 393 - * These interrupts are catastrophic to the device, 394 - * what we need to do is disable the interrupts and 395 - * set the flag to cause us to reset so we can solve 396 - * this issue. 397 - */ 398 - /* MP_SET_FLAG( adapter, 399 - fMP_ADAPTER_HARDWARE_ERROR); */ 400 - 401 - dev_warn(&adapter->pdev->dev, 402 - "RXMAC interrupt, error 0x%08x. Requesting reset\n", 403 - readl(&iomem->rxmac.err_reg)); 404 - 405 - dev_warn(&adapter->pdev->dev, 406 - "Enable 0x%08x, Diag 0x%08x\n", 407 - readl(&iomem->rxmac.ctrl), 408 - readl(&iomem->rxmac.rxq_diag)); 409 - 410 - /* 411 - * If we are debugging, we want to see this error, 412 - * otherwise we just want the device to be reset and 413 - * continue 414 - */ 415 - } 416 - 417 - /* Handle MAC_STAT Interrupt */ 418 - if (status & ET_INTR_MAC_STAT) { 419 - /* 420 - * This means at least one of the un-masked counters 421 - * in the MAC_STAT block has rolled over. Use this 422 - * to maintain the top, software managed bits of the 423 - * counter(s). 424 - */ 425 - et1310_handle_macstat_interrupt(adapter); 426 - } 427 - 428 - /* Handle SLV Timeout Interrupt */ 429 - if (status & ET_INTR_SLV_TIMEOUT) { 430 - /* 431 - * This means a timeout has occurred on a read or 432 - * write request to one of the JAGCore registers. The 433 - * Global Resources block has terminated the request 434 - * and on a read request, returned a "fake" value. 435 - * The most likely reasons are: Bad Address or the 436 - * addressed module is in a power-down state and 437 - * can't respond. 438 - */ 439 - } 440 - } 441 - et131x_enable_interrupts(adapter); 442 - }
-662
drivers/staging/et131x/et131x_netdev.c
··· 1 - /* 2 - * Agere Systems Inc. 3 - * 10/100/1000 Base-T Ethernet Driver for the ET1310 and ET131x series MACs 4 - * 5 - * Copyright © 2005 Agere Systems Inc. 6 - * All rights reserved. 7 - * http://www.agere.com 8 - * 9 - * Copyright (c) 2011 Mark Einon <mark.einon@gmail.com> 10 - * 11 - *------------------------------------------------------------------------------ 12 - * 13 - * et131x_netdev.c - Routines and data required by all Linux network devices. 14 - * 15 - *------------------------------------------------------------------------------ 16 - * 17 - * SOFTWARE LICENSE 18 - * 19 - * This software is provided subject to the following terms and conditions, 20 - * which you should read carefully before using the software. Using this 21 - * software indicates your acceptance of these terms and conditions. If you do 22 - * not agree with these terms and conditions, do not use the software. 23 - * 24 - * Copyright © 2005 Agere Systems Inc. 25 - * All rights reserved. 26 - * 27 - * Redistribution and use in source or binary forms, with or without 28 - * modifications, are permitted provided that the following conditions are met: 29 - * 30 - * . Redistributions of source code must retain the above copyright notice, this 31 - * list of conditions and the following Disclaimer as comments in the code as 32 - * well as in the documentation and/or other materials provided with the 33 - * distribution. 34 - * 35 - * . Redistributions in binary form must reproduce the above copyright notice, 36 - * this list of conditions and the following Disclaimer in the documentation 37 - * and/or other materials provided with the distribution. 38 - * 39 - * . Neither the name of Agere Systems Inc. nor the names of the contributors 40 - * may be used to endorse or promote products derived from this software 41 - * without specific prior written permission. 42 - * 43 - * Disclaimer 44 - * 45 - * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, 46 - * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF 47 - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY 48 - * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN 49 - * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY 50 - * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 51 - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 52 - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 53 - * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT 54 - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 55 - * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH 56 - * DAMAGE. 57 - * 58 - */ 59 - 60 - #include "et131x_defs.h" 61 - 62 - #include <linux/init.h> 63 - #include <linux/module.h> 64 - #include <linux/types.h> 65 - #include <linux/kernel.h> 66 - 67 - #include <linux/sched.h> 68 - #include <linux/ptrace.h> 69 - #include <linux/ctype.h> 70 - #include <linux/string.h> 71 - #include <linux/timer.h> 72 - #include <linux/interrupt.h> 73 - #include <linux/in.h> 74 - #include <linux/delay.h> 75 - #include <linux/io.h> 76 - #include <linux/bitops.h> 77 - #include <linux/pci.h> 78 - #include <asm/system.h> 79 - 80 - #include <linux/mii.h> 81 - #include <linux/netdevice.h> 82 - #include <linux/etherdevice.h> 83 - #include <linux/skbuff.h> 84 - #include <linux/if_arp.h> 85 - #include <linux/ioport.h> 86 - #include <linux/phy.h> 87 - 88 - #include "et1310_phy.h" 89 - #include "et1310_tx.h" 90 - #include "et131x_adapter.h" 91 - #include "et131x.h" 92 - 93 - /** 94 - * et131x_stats - Return the current device statistics. 95 - * @netdev: device whose stats are being queried 96 - * 97 - * Returns 0 on success, errno on failure (as defined in errno.h) 98 - */ 99 - static struct net_device_stats *et131x_stats(struct net_device *netdev) 100 - { 101 - struct et131x_adapter *adapter = netdev_priv(netdev); 102 - struct net_device_stats *stats = &adapter->net_stats; 103 - struct ce_stats *devstat = &adapter->stats; 104 - 105 - stats->rx_errors = devstat->rx_length_errs + 106 - devstat->rx_align_errs + 107 - devstat->rx_crc_errs + 108 - devstat->rx_code_violations + 109 - devstat->rx_other_errs; 110 - stats->tx_errors = devstat->tx_max_pkt_errs; 111 - stats->multicast = devstat->multicast_pkts_rcvd; 112 - stats->collisions = devstat->tx_collisions; 113 - 114 - stats->rx_length_errors = devstat->rx_length_errs; 115 - stats->rx_over_errors = devstat->rx_overflows; 116 - stats->rx_crc_errors = devstat->rx_crc_errs; 117 - 118 - /* NOTE: These stats don't have corresponding values in CE_STATS, 119 - * so we're going to have to update these directly from within the 120 - * TX/RX code 121 - */ 122 - /* stats->rx_bytes = 20; devstat->; */ 123 - /* stats->tx_bytes = 20; devstat->; */ 124 - /* stats->rx_dropped = devstat->; */ 125 - /* stats->tx_dropped = devstat->; */ 126 - 127 - /* NOTE: Not used, can't find analogous statistics */ 128 - /* stats->rx_frame_errors = devstat->; */ 129 - /* stats->rx_fifo_errors = devstat->; */ 130 - /* stats->rx_missed_errors = devstat->; */ 131 - 132 - /* stats->tx_aborted_errors = devstat->; */ 133 - /* stats->tx_carrier_errors = devstat->; */ 134 - /* stats->tx_fifo_errors = devstat->; */ 135 - /* stats->tx_heartbeat_errors = devstat->; */ 136 - /* stats->tx_window_errors = devstat->; */ 137 - return stats; 138 - } 139 - 140 - /** 141 - * et131x_enable_txrx - Enable tx/rx queues 142 - * @netdev: device to be enabled 143 - */ 144 - void et131x_enable_txrx(struct net_device *netdev) 145 - { 146 - struct et131x_adapter *adapter = netdev_priv(netdev); 147 - 148 - /* Enable the Tx and Rx DMA engines (if not already enabled) */ 149 - et131x_rx_dma_enable(adapter); 150 - et131x_tx_dma_enable(adapter); 151 - 152 - /* Enable device interrupts */ 153 - if (adapter->flags & fMP_ADAPTER_INTERRUPT_IN_USE) 154 - et131x_enable_interrupts(adapter); 155 - 156 - /* We're ready to move some data, so start the queue */ 157 - netif_start_queue(netdev); 158 - } 159 - 160 - /** 161 - * et131x_disable_txrx - Disable tx/rx queues 162 - * @netdev: device to be disabled 163 - */ 164 - void et131x_disable_txrx(struct net_device *netdev) 165 - { 166 - struct et131x_adapter *adapter = netdev_priv(netdev); 167 - 168 - /* First thing is to stop the queue */ 169 - netif_stop_queue(netdev); 170 - 171 - /* Stop the Tx and Rx DMA engines */ 172 - et131x_rx_dma_disable(adapter); 173 - et131x_tx_dma_disable(adapter); 174 - 175 - /* Disable device interrupts */ 176 - et131x_disable_interrupts(adapter); 177 - } 178 - 179 - /** 180 - * et131x_up - Bring up a device for use. 181 - * @netdev: device to be opened 182 - */ 183 - void et131x_up(struct net_device *netdev) 184 - { 185 - struct et131x_adapter *adapter = netdev_priv(netdev); 186 - 187 - et131x_enable_txrx(netdev); 188 - phy_start(adapter->phydev); 189 - } 190 - 191 - /** 192 - * et131x_open - Open the device for use. 193 - * @netdev: device to be opened 194 - * 195 - * Returns 0 on success, errno on failure (as defined in errno.h) 196 - */ 197 - int et131x_open(struct net_device *netdev) 198 - { 199 - int result = 0; 200 - struct et131x_adapter *adapter = netdev_priv(netdev); 201 - 202 - /* Start the timer to track NIC errors */ 203 - init_timer(&adapter->error_timer); 204 - adapter->error_timer.expires = jiffies + TX_ERROR_PERIOD * HZ / 1000; 205 - adapter->error_timer.function = et131x_error_timer_handler; 206 - adapter->error_timer.data = (unsigned long)adapter; 207 - add_timer(&adapter->error_timer); 208 - 209 - /* Register our IRQ */ 210 - result = request_irq(netdev->irq, et131x_isr, IRQF_SHARED, 211 - netdev->name, netdev); 212 - if (result) { 213 - dev_err(&adapter->pdev->dev, "could not register IRQ %d\n", 214 - netdev->irq); 215 - return result; 216 - } 217 - 218 - adapter->flags |= fMP_ADAPTER_INTERRUPT_IN_USE; 219 - 220 - et131x_up(netdev); 221 - 222 - return result; 223 - } 224 - 225 - /** 226 - * et131x_down - Bring down the device 227 - * @netdev: device to be broght down 228 - */ 229 - void et131x_down(struct net_device *netdev) 230 - { 231 - struct et131x_adapter *adapter = netdev_priv(netdev); 232 - 233 - /* Save the timestamp for the TX watchdog, prevent a timeout */ 234 - netdev->trans_start = jiffies; 235 - 236 - phy_stop(adapter->phydev); 237 - et131x_disable_txrx(netdev); 238 - } 239 - 240 - /** 241 - * et131x_close - Close the device 242 - * @netdev: device to be closed 243 - * 244 - * Returns 0 on success, errno on failure (as defined in errno.h) 245 - */ 246 - int et131x_close(struct net_device *netdev) 247 - { 248 - struct et131x_adapter *adapter = netdev_priv(netdev); 249 - 250 - et131x_down(netdev); 251 - 252 - adapter->flags &= ~fMP_ADAPTER_INTERRUPT_IN_USE; 253 - free_irq(netdev->irq, netdev); 254 - 255 - /* Stop the error timer */ 256 - return del_timer_sync(&adapter->error_timer); 257 - } 258 - 259 - /** 260 - * et131x_ioctl - The I/O Control handler for the driver 261 - * @netdev: device on which the control request is being made 262 - * @reqbuf: a pointer to the IOCTL request buffer 263 - * @cmd: the IOCTL command code 264 - * 265 - * Returns 0 on success, errno on failure (as defined in errno.h) 266 - */ 267 - static int et131x_ioctl(struct net_device *netdev, struct ifreq *reqbuf, int cmd) 268 - { 269 - struct et131x_adapter *adapter = netdev_priv(netdev); 270 - 271 - if (!adapter->phydev) 272 - return -EINVAL; 273 - 274 - return phy_mii_ioctl(adapter->phydev, reqbuf, cmd); 275 - } 276 - 277 - /** 278 - * et131x_set_packet_filter - Configures the Rx Packet filtering on the device 279 - * @adapter: pointer to our private adapter structure 280 - * 281 - * FIXME: lot of dups with MAC code 282 - * 283 - * Returns 0 on success, errno on failure 284 - */ 285 - static int et131x_set_packet_filter(struct et131x_adapter *adapter) 286 - { 287 - int status = 0; 288 - uint32_t filter = adapter->packet_filter; 289 - u32 ctrl; 290 - u32 pf_ctrl; 291 - 292 - ctrl = readl(&adapter->regs->rxmac.ctrl); 293 - pf_ctrl = readl(&adapter->regs->rxmac.pf_ctrl); 294 - 295 - /* Default to disabled packet filtering. Enable it in the individual 296 - * case statements that require the device to filter something 297 - */ 298 - ctrl |= 0x04; 299 - 300 - /* Set us to be in promiscuous mode so we receive everything, this 301 - * is also true when we get a packet filter of 0 302 - */ 303 - if ((filter & ET131X_PACKET_TYPE_PROMISCUOUS) || filter == 0) 304 - pf_ctrl &= ~7; /* Clear filter bits */ 305 - else { 306 - /* 307 - * Set us up with Multicast packet filtering. Three cases are 308 - * possible - (1) we have a multi-cast list, (2) we receive ALL 309 - * multicast entries or (3) we receive none. 310 - */ 311 - if (filter & ET131X_PACKET_TYPE_ALL_MULTICAST) 312 - pf_ctrl &= ~2; /* Multicast filter bit */ 313 - else { 314 - et1310_setup_device_for_multicast(adapter); 315 - pf_ctrl |= 2; 316 - ctrl &= ~0x04; 317 - } 318 - 319 - /* Set us up with Unicast packet filtering */ 320 - if (filter & ET131X_PACKET_TYPE_DIRECTED) { 321 - et1310_setup_device_for_unicast(adapter); 322 - pf_ctrl |= 4; 323 - ctrl &= ~0x04; 324 - } 325 - 326 - /* Set us up with Broadcast packet filtering */ 327 - if (filter & ET131X_PACKET_TYPE_BROADCAST) { 328 - pf_ctrl |= 1; /* Broadcast filter bit */ 329 - ctrl &= ~0x04; 330 - } else 331 - pf_ctrl &= ~1; 332 - 333 - /* Setup the receive mac configuration registers - Packet 334 - * Filter control + the enable / disable for packet filter 335 - * in the control reg. 336 - */ 337 - writel(pf_ctrl, &adapter->regs->rxmac.pf_ctrl); 338 - writel(ctrl, &adapter->regs->rxmac.ctrl); 339 - } 340 - return status; 341 - } 342 - 343 - /** 344 - * et131x_multicast - The handler to configure multicasting on the interface 345 - * @netdev: a pointer to a net_device struct representing the device 346 - */ 347 - static void et131x_multicast(struct net_device *netdev) 348 - { 349 - struct et131x_adapter *adapter = netdev_priv(netdev); 350 - uint32_t packet_filter = 0; 351 - unsigned long flags; 352 - struct netdev_hw_addr *ha; 353 - int i; 354 - 355 - spin_lock_irqsave(&adapter->lock, flags); 356 - 357 - /* Before we modify the platform-independent filter flags, store them 358 - * locally. This allows us to determine if anything's changed and if 359 - * we even need to bother the hardware 360 - */ 361 - packet_filter = adapter->packet_filter; 362 - 363 - /* Clear the 'multicast' flag locally; because we only have a single 364 - * flag to check multicast, and multiple multicast addresses can be 365 - * set, this is the easiest way to determine if more than one 366 - * multicast address is being set. 367 - */ 368 - packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST; 369 - 370 - /* Check the net_device flags and set the device independent flags 371 - * accordingly 372 - */ 373 - 374 - if (netdev->flags & IFF_PROMISC) 375 - adapter->packet_filter |= ET131X_PACKET_TYPE_PROMISCUOUS; 376 - else 377 - adapter->packet_filter &= ~ET131X_PACKET_TYPE_PROMISCUOUS; 378 - 379 - if (netdev->flags & IFF_ALLMULTI) 380 - adapter->packet_filter |= ET131X_PACKET_TYPE_ALL_MULTICAST; 381 - 382 - if (netdev_mc_count(netdev) > NIC_MAX_MCAST_LIST) 383 - adapter->packet_filter |= ET131X_PACKET_TYPE_ALL_MULTICAST; 384 - 385 - if (netdev_mc_count(netdev) < 1) { 386 - adapter->packet_filter &= ~ET131X_PACKET_TYPE_ALL_MULTICAST; 387 - adapter->packet_filter &= ~ET131X_PACKET_TYPE_MULTICAST; 388 - } else 389 - adapter->packet_filter |= ET131X_PACKET_TYPE_MULTICAST; 390 - 391 - /* Set values in the private adapter struct */ 392 - i = 0; 393 - netdev_for_each_mc_addr(ha, netdev) { 394 - if (i == NIC_MAX_MCAST_LIST) 395 - break; 396 - memcpy(adapter->multicast_list[i++], ha->addr, ETH_ALEN); 397 - } 398 - adapter->multicast_addr_count = i; 399 - 400 - /* Are the new flags different from the previous ones? If not, then no 401 - * action is required 402 - * 403 - * NOTE - This block will always update the multicast_list with the 404 - * hardware, even if the addresses aren't the same. 405 - */ 406 - if (packet_filter != adapter->packet_filter) { 407 - /* Call the device's filter function */ 408 - et131x_set_packet_filter(adapter); 409 - } 410 - spin_unlock_irqrestore(&adapter->lock, flags); 411 - } 412 - 413 - /** 414 - * et131x_tx - The handler to tx a packet on the device 415 - * @skb: data to be Tx'd 416 - * @netdev: device on which data is to be Tx'd 417 - * 418 - * Returns 0 on success, errno on failure (as defined in errno.h) 419 - */ 420 - static int et131x_tx(struct sk_buff *skb, struct net_device *netdev) 421 - { 422 - int status = 0; 423 - 424 - /* Save the timestamp for the TX timeout watchdog */ 425 - netdev->trans_start = jiffies; 426 - 427 - /* Call the device-specific data Tx routine */ 428 - status = et131x_send_packets(skb, netdev); 429 - 430 - /* Check status and manage the netif queue if necessary */ 431 - if (status != 0) { 432 - if (status == -ENOMEM) { 433 - /* Put the queue to sleep until resources are 434 - * available 435 - */ 436 - netif_stop_queue(netdev); 437 - status = NETDEV_TX_BUSY; 438 - } else { 439 - status = NETDEV_TX_OK; 440 - } 441 - } 442 - return status; 443 - } 444 - 445 - /** 446 - * et131x_tx_timeout - Timeout handler 447 - * @netdev: a pointer to a net_device struct representing the device 448 - * 449 - * The handler called when a Tx request times out. The timeout period is 450 - * specified by the 'tx_timeo" element in the net_device structure (see 451 - * et131x_alloc_device() to see how this value is set). 452 - */ 453 - static void et131x_tx_timeout(struct net_device *netdev) 454 - { 455 - struct et131x_adapter *adapter = netdev_priv(netdev); 456 - struct tcb *tcb; 457 - unsigned long flags; 458 - 459 - /* If the device is closed, ignore the timeout */ 460 - if (~(adapter->flags & fMP_ADAPTER_INTERRUPT_IN_USE)) 461 - return; 462 - 463 - /* Any nonrecoverable hardware error? 464 - * Checks adapter->flags for any failure in phy reading 465 - */ 466 - if (adapter->flags & fMP_ADAPTER_NON_RECOVER_ERROR) 467 - return; 468 - 469 - /* Hardware failure? */ 470 - if (adapter->flags & fMP_ADAPTER_HARDWARE_ERROR) { 471 - dev_err(&adapter->pdev->dev, "hardware error - reset\n"); 472 - return; 473 - } 474 - 475 - /* Is send stuck? */ 476 - spin_lock_irqsave(&adapter->tcb_send_qlock, flags); 477 - 478 - tcb = adapter->tx_ring.send_head; 479 - 480 - if (tcb != NULL) { 481 - tcb->count++; 482 - 483 - if (tcb->count > NIC_SEND_HANG_THRESHOLD) { 484 - spin_unlock_irqrestore(&adapter->tcb_send_qlock, 485 - flags); 486 - 487 - dev_warn(&adapter->pdev->dev, 488 - "Send stuck - reset. tcb->WrIndex %x, flags 0x%08x\n", 489 - tcb->index, 490 - tcb->flags); 491 - 492 - adapter->net_stats.tx_errors++; 493 - 494 - /* perform reset of tx/rx */ 495 - et131x_disable_txrx(netdev); 496 - et131x_enable_txrx(netdev); 497 - return; 498 - } 499 - } 500 - 501 - spin_unlock_irqrestore(&adapter->tcb_send_qlock, flags); 502 - } 503 - 504 - /** 505 - * et131x_change_mtu - The handler called to change the MTU for the device 506 - * @netdev: device whose MTU is to be changed 507 - * @new_mtu: the desired MTU 508 - * 509 - * Returns 0 on success, errno on failure (as defined in errno.h) 510 - */ 511 - static int et131x_change_mtu(struct net_device *netdev, int new_mtu) 512 - { 513 - int result = 0; 514 - struct et131x_adapter *adapter = netdev_priv(netdev); 515 - 516 - /* Make sure the requested MTU is valid */ 517 - if (new_mtu < 64 || new_mtu > 9216) 518 - return -EINVAL; 519 - 520 - et131x_disable_txrx(netdev); 521 - et131x_handle_send_interrupt(adapter); 522 - et131x_handle_recv_interrupt(adapter); 523 - 524 - /* Set the new MTU */ 525 - netdev->mtu = new_mtu; 526 - 527 - /* Free Rx DMA memory */ 528 - et131x_adapter_memory_free(adapter); 529 - 530 - /* Set the config parameter for Jumbo Packet support */ 531 - adapter->registry_jumbo_packet = new_mtu + 14; 532 - et131x_soft_reset(adapter); 533 - 534 - /* Alloc and init Rx DMA memory */ 535 - result = et131x_adapter_memory_alloc(adapter); 536 - if (result != 0) { 537 - dev_warn(&adapter->pdev->dev, 538 - "Change MTU failed; couldn't re-alloc DMA memory\n"); 539 - return result; 540 - } 541 - 542 - et131x_init_send(adapter); 543 - 544 - et131x_hwaddr_init(adapter); 545 - memcpy(netdev->dev_addr, adapter->addr, ETH_ALEN); 546 - 547 - /* Init the device with the new settings */ 548 - et131x_adapter_setup(adapter); 549 - 550 - et131x_enable_txrx(netdev); 551 - 552 - return result; 553 - } 554 - 555 - /** 556 - * et131x_set_mac_addr - handler to change the MAC address for the device 557 - * @netdev: device whose MAC is to be changed 558 - * @new_mac: the desired MAC address 559 - * 560 - * Returns 0 on success, errno on failure (as defined in errno.h) 561 - * 562 - * IMPLEMENTED BY : blux http://berndlux.de 22.01.2007 21:14 563 - */ 564 - static int et131x_set_mac_addr(struct net_device *netdev, void *new_mac) 565 - { 566 - int result = 0; 567 - struct et131x_adapter *adapter = netdev_priv(netdev); 568 - struct sockaddr *address = new_mac; 569 - 570 - /* begin blux */ 571 - 572 - if (adapter == NULL) 573 - return -ENODEV; 574 - 575 - /* Make sure the requested MAC is valid */ 576 - if (!is_valid_ether_addr(address->sa_data)) 577 - return -EINVAL; 578 - 579 - et131x_disable_txrx(netdev); 580 - et131x_handle_send_interrupt(adapter); 581 - et131x_handle_recv_interrupt(adapter); 582 - 583 - /* Set the new MAC */ 584 - /* netdev->set_mac_address = &new_mac; */ 585 - 586 - memcpy(netdev->dev_addr, address->sa_data, netdev->addr_len); 587 - 588 - printk(KERN_INFO "%s: Setting MAC address to %pM\n", 589 - netdev->name, netdev->dev_addr); 590 - 591 - /* Free Rx DMA memory */ 592 - et131x_adapter_memory_free(adapter); 593 - 594 - et131x_soft_reset(adapter); 595 - 596 - /* Alloc and init Rx DMA memory */ 597 - result = et131x_adapter_memory_alloc(adapter); 598 - if (result != 0) { 599 - dev_err(&adapter->pdev->dev, 600 - "Change MAC failed; couldn't re-alloc DMA memory\n"); 601 - return result; 602 - } 603 - 604 - et131x_init_send(adapter); 605 - 606 - et131x_hwaddr_init(adapter); 607 - 608 - /* Init the device with the new settings */ 609 - et131x_adapter_setup(adapter); 610 - 611 - et131x_enable_txrx(netdev); 612 - 613 - return result; 614 - } 615 - 616 - static const struct net_device_ops et131x_netdev_ops = { 617 - .ndo_open = et131x_open, 618 - .ndo_stop = et131x_close, 619 - .ndo_start_xmit = et131x_tx, 620 - .ndo_set_multicast_list = et131x_multicast, 621 - .ndo_tx_timeout = et131x_tx_timeout, 622 - .ndo_change_mtu = et131x_change_mtu, 623 - .ndo_set_mac_address = et131x_set_mac_addr, 624 - .ndo_validate_addr = eth_validate_addr, 625 - .ndo_get_stats = et131x_stats, 626 - .ndo_do_ioctl = et131x_ioctl, 627 - }; 628 - 629 - /** 630 - * et131x_device_alloc 631 - * 632 - * Returns pointer to the allocated and initialized net_device struct for 633 - * this device. 634 - * 635 - * Create instances of net_device and wl_private for the new adapter and 636 - * register the device's entry points in the net_device structure. 637 - */ 638 - struct net_device *et131x_device_alloc(void) 639 - { 640 - struct net_device *netdev; 641 - 642 - /* Alloc net_device and adapter structs */ 643 - netdev = alloc_etherdev(sizeof(struct et131x_adapter)); 644 - 645 - if (!netdev) { 646 - printk(KERN_ERR "et131x: Alloc of net_device struct failed\n"); 647 - return NULL; 648 - } 649 - 650 - /* 651 - * Setup the function registration table (and other data) for a 652 - * net_device 653 - */ 654 - netdev->watchdog_timeo = ET131X_TX_TIMEOUT; 655 - netdev->netdev_ops = &et131x_netdev_ops; 656 - 657 - /* Poll? */ 658 - /* netdev->poll = &et131x_poll; */ 659 - /* netdev->poll_controller = &et131x_poll_controller; */ 660 - return netdev; 661 - } 662 -