Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.35-rc4 2328 lines 62 kB view raw
1/* 2 * ipg.c: Device Driver for the IP1000 Gigabit Ethernet Adapter 3 * 4 * Copyright (C) 2003, 2007 IC Plus Corp 5 * 6 * Original Author: 7 * 8 * Craig Rich 9 * Sundance Technology, Inc. 10 * www.sundanceti.com 11 * craig_rich@sundanceti.com 12 * 13 * Current Maintainer: 14 * 15 * Sorbica Shieh. 16 * http://www.icplus.com.tw 17 * sorbica@icplus.com.tw 18 * 19 * Jesse Huang 20 * http://www.icplus.com.tw 21 * jesse@icplus.com.tw 22 */ 23#include <linux/crc32.h> 24#include <linux/ethtool.h> 25#include <linux/gfp.h> 26#include <linux/mii.h> 27#include <linux/mutex.h> 28 29#include <asm/div64.h> 30 31#define IPG_RX_RING_BYTES (sizeof(struct ipg_rx) * IPG_RFDLIST_LENGTH) 32#define IPG_TX_RING_BYTES (sizeof(struct ipg_tx) * IPG_TFDLIST_LENGTH) 33#define IPG_RESET_MASK \ 34 (IPG_AC_GLOBAL_RESET | IPG_AC_RX_RESET | IPG_AC_TX_RESET | \ 35 IPG_AC_DMA | IPG_AC_FIFO | IPG_AC_NETWORK | IPG_AC_HOST | \ 36 IPG_AC_AUTO_INIT) 37 38#define ipg_w32(val32, reg) iowrite32((val32), ioaddr + (reg)) 39#define ipg_w16(val16, reg) iowrite16((val16), ioaddr + (reg)) 40#define ipg_w8(val8, reg) iowrite8((val8), ioaddr + (reg)) 41 42#define ipg_r32(reg) ioread32(ioaddr + (reg)) 43#define ipg_r16(reg) ioread16(ioaddr + (reg)) 44#define ipg_r8(reg) ioread8(ioaddr + (reg)) 45 46enum { 47 netdev_io_size = 128 48}; 49 50#include "ipg.h" 51#define DRV_NAME "ipg" 52 53MODULE_AUTHOR("IC Plus Corp. 2003"); 54MODULE_DESCRIPTION("IC Plus IP1000 Gigabit Ethernet Adapter Linux Driver"); 55MODULE_LICENSE("GPL"); 56 57/* 58 * Defaults 59 */ 60#define IPG_MAX_RXFRAME_SIZE 0x0600 61#define IPG_RXFRAG_SIZE 0x0600 62#define IPG_RXSUPPORT_SIZE 0x0600 63#define IPG_IS_JUMBO false 64 65/* 66 * Variable record -- index by leading revision/length 67 * Revision/Length(=N*4), Address1, Data1, Address2, Data2,...,AddressN,DataN 68 */ 69static unsigned short DefaultPhyParam[] = { 70 /* 11/12/03 IP1000A v1-3 rev=0x40 */ 71 /*-------------------------------------------------------------------------- 72 (0x4000|(15*4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 22, 0x85bd, 24, 0xfff2, 73 27, 0x0c10, 28, 0x0c10, 29, 0x2c10, 31, 0x0003, 23, 0x92f6, 74 31, 0x0000, 23, 0x003d, 30, 0x00de, 20, 0x20e7, 9, 0x0700, 75 --------------------------------------------------------------------------*/ 76 /* 12/17/03 IP1000A v1-4 rev=0x40 */ 77 (0x4000 | (07 * 4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 27, 0xeb8e, 31, 78 0x0000, 79 30, 0x005e, 9, 0x0700, 80 /* 01/09/04 IP1000A v1-5 rev=0x41 */ 81 (0x4100 | (07 * 4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 27, 0xeb8e, 31, 82 0x0000, 83 30, 0x005e, 9, 0x0700, 84 0x0000 85}; 86 87static const char *ipg_brand_name[] = { 88 "IC PLUS IP1000 1000/100/10 based NIC", 89 "Sundance Technology ST2021 based NIC", 90 "Tamarack Microelectronics TC9020/9021 based NIC", 91 "Tamarack Microelectronics TC9020/9021 based NIC", 92 "D-Link NIC IP1000A" 93}; 94 95static DEFINE_PCI_DEVICE_TABLE(ipg_pci_tbl) = { 96 { PCI_VDEVICE(SUNDANCE, 0x1023), 0 }, 97 { PCI_VDEVICE(SUNDANCE, 0x2021), 1 }, 98 { PCI_VDEVICE(SUNDANCE, 0x1021), 2 }, 99 { PCI_VDEVICE(DLINK, 0x9021), 3 }, 100 { PCI_VDEVICE(DLINK, 0x4020), 4 }, 101 { 0, } 102}; 103 104MODULE_DEVICE_TABLE(pci, ipg_pci_tbl); 105 106static inline void __iomem *ipg_ioaddr(struct net_device *dev) 107{ 108 struct ipg_nic_private *sp = netdev_priv(dev); 109 return sp->ioaddr; 110} 111 112#ifdef IPG_DEBUG 113static void ipg_dump_rfdlist(struct net_device *dev) 114{ 115 struct ipg_nic_private *sp = netdev_priv(dev); 116 void __iomem *ioaddr = sp->ioaddr; 117 unsigned int i; 118 u32 offset; 119 120 IPG_DEBUG_MSG("_dump_rfdlist\n"); 121 122 printk(KERN_INFO "rx_current = %2.2x\n", sp->rx_current); 123 printk(KERN_INFO "rx_dirty = %2.2x\n", sp->rx_dirty); 124 printk(KERN_INFO "RFDList start address = %16.16lx\n", 125 (unsigned long) sp->rxd_map); 126 printk(KERN_INFO "RFDListPtr register = %8.8x%8.8x\n", 127 ipg_r32(IPG_RFDLISTPTR1), ipg_r32(IPG_RFDLISTPTR0)); 128 129 for (i = 0; i < IPG_RFDLIST_LENGTH; i++) { 130 offset = (u32) &sp->rxd[i].next_desc - (u32) sp->rxd; 131 printk(KERN_INFO "%2.2x %4.4x RFDNextPtr = %16.16lx\n", i, 132 offset, (unsigned long) sp->rxd[i].next_desc); 133 offset = (u32) &sp->rxd[i].rfs - (u32) sp->rxd; 134 printk(KERN_INFO "%2.2x %4.4x RFS = %16.16lx\n", i, 135 offset, (unsigned long) sp->rxd[i].rfs); 136 offset = (u32) &sp->rxd[i].frag_info - (u32) sp->rxd; 137 printk(KERN_INFO "%2.2x %4.4x frag_info = %16.16lx\n", i, 138 offset, (unsigned long) sp->rxd[i].frag_info); 139 } 140} 141 142static void ipg_dump_tfdlist(struct net_device *dev) 143{ 144 struct ipg_nic_private *sp = netdev_priv(dev); 145 void __iomem *ioaddr = sp->ioaddr; 146 unsigned int i; 147 u32 offset; 148 149 IPG_DEBUG_MSG("_dump_tfdlist\n"); 150 151 printk(KERN_INFO "tx_current = %2.2x\n", sp->tx_current); 152 printk(KERN_INFO "tx_dirty = %2.2x\n", sp->tx_dirty); 153 printk(KERN_INFO "TFDList start address = %16.16lx\n", 154 (unsigned long) sp->txd_map); 155 printk(KERN_INFO "TFDListPtr register = %8.8x%8.8x\n", 156 ipg_r32(IPG_TFDLISTPTR1), ipg_r32(IPG_TFDLISTPTR0)); 157 158 for (i = 0; i < IPG_TFDLIST_LENGTH; i++) { 159 offset = (u32) &sp->txd[i].next_desc - (u32) sp->txd; 160 printk(KERN_INFO "%2.2x %4.4x TFDNextPtr = %16.16lx\n", i, 161 offset, (unsigned long) sp->txd[i].next_desc); 162 163 offset = (u32) &sp->txd[i].tfc - (u32) sp->txd; 164 printk(KERN_INFO "%2.2x %4.4x TFC = %16.16lx\n", i, 165 offset, (unsigned long) sp->txd[i].tfc); 166 offset = (u32) &sp->txd[i].frag_info - (u32) sp->txd; 167 printk(KERN_INFO "%2.2x %4.4x frag_info = %16.16lx\n", i, 168 offset, (unsigned long) sp->txd[i].frag_info); 169 } 170} 171#endif 172 173static void ipg_write_phy_ctl(void __iomem *ioaddr, u8 data) 174{ 175 ipg_w8(IPG_PC_RSVD_MASK & data, PHY_CTRL); 176 ndelay(IPG_PC_PHYCTRLWAIT_NS); 177} 178 179static void ipg_drive_phy_ctl_low_high(void __iomem *ioaddr, u8 data) 180{ 181 ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | data); 182 ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | data); 183} 184 185static void send_three_state(void __iomem *ioaddr, u8 phyctrlpolarity) 186{ 187 phyctrlpolarity |= (IPG_PC_MGMTDATA & 0) | IPG_PC_MGMTDIR; 188 189 ipg_drive_phy_ctl_low_high(ioaddr, phyctrlpolarity); 190} 191 192static void send_end(void __iomem *ioaddr, u8 phyctrlpolarity) 193{ 194 ipg_w8((IPG_PC_MGMTCLK_LO | (IPG_PC_MGMTDATA & 0) | IPG_PC_MGMTDIR | 195 phyctrlpolarity) & IPG_PC_RSVD_MASK, PHY_CTRL); 196} 197 198static u16 read_phy_bit(void __iomem *ioaddr, u8 phyctrlpolarity) 199{ 200 u16 bit_data; 201 202 ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | phyctrlpolarity); 203 204 bit_data = ((ipg_r8(PHY_CTRL) & IPG_PC_MGMTDATA) >> 1) & 1; 205 206 ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | phyctrlpolarity); 207 208 return bit_data; 209} 210 211/* 212 * Read a register from the Physical Layer device located 213 * on the IPG NIC, using the IPG PHYCTRL register. 214 */ 215static int mdio_read(struct net_device *dev, int phy_id, int phy_reg) 216{ 217 void __iomem *ioaddr = ipg_ioaddr(dev); 218 /* 219 * The GMII mangement frame structure for a read is as follows: 220 * 221 * |Preamble|st|op|phyad|regad|ta| data |idle| 222 * |< 32 1s>|01|10|AAAAA|RRRRR|z0|DDDDDDDDDDDDDDDD|z | 223 * 224 * <32 1s> = 32 consecutive logic 1 values 225 * A = bit of Physical Layer device address (MSB first) 226 * R = bit of register address (MSB first) 227 * z = High impedance state 228 * D = bit of read data (MSB first) 229 * 230 * Transmission order is 'Preamble' field first, bits transmitted 231 * left to right (first to last). 232 */ 233 struct { 234 u32 field; 235 unsigned int len; 236 } p[] = { 237 { GMII_PREAMBLE, 32 }, /* Preamble */ 238 { GMII_ST, 2 }, /* ST */ 239 { GMII_READ, 2 }, /* OP */ 240 { phy_id, 5 }, /* PHYAD */ 241 { phy_reg, 5 }, /* REGAD */ 242 { 0x0000, 2 }, /* TA */ 243 { 0x0000, 16 }, /* DATA */ 244 { 0x0000, 1 } /* IDLE */ 245 }; 246 unsigned int i, j; 247 u8 polarity, data; 248 249 polarity = ipg_r8(PHY_CTRL); 250 polarity &= (IPG_PC_DUPLEX_POLARITY | IPG_PC_LINK_POLARITY); 251 252 /* Create the Preamble, ST, OP, PHYAD, and REGAD field. */ 253 for (j = 0; j < 5; j++) { 254 for (i = 0; i < p[j].len; i++) { 255 /* For each variable length field, the MSB must be 256 * transmitted first. Rotate through the field bits, 257 * starting with the MSB, and move each bit into the 258 * the 1st (2^1) bit position (this is the bit position 259 * corresponding to the MgmtData bit of the PhyCtrl 260 * register for the IPG). 261 * 262 * Example: ST = 01; 263 * 264 * First write a '0' to bit 1 of the PhyCtrl 265 * register, then write a '1' to bit 1 of the 266 * PhyCtrl register. 267 * 268 * To do this, right shift the MSB of ST by the value: 269 * [field length - 1 - #ST bits already written] 270 * then left shift this result by 1. 271 */ 272 data = (p[j].field >> (p[j].len - 1 - i)) << 1; 273 data &= IPG_PC_MGMTDATA; 274 data |= polarity | IPG_PC_MGMTDIR; 275 276 ipg_drive_phy_ctl_low_high(ioaddr, data); 277 } 278 } 279 280 send_three_state(ioaddr, polarity); 281 282 read_phy_bit(ioaddr, polarity); 283 284 /* 285 * For a read cycle, the bits for the next two fields (TA and 286 * DATA) are driven by the PHY (the IPG reads these bits). 287 */ 288 for (i = 0; i < p[6].len; i++) { 289 p[6].field |= 290 (read_phy_bit(ioaddr, polarity) << (p[6].len - 1 - i)); 291 } 292 293 send_three_state(ioaddr, polarity); 294 send_three_state(ioaddr, polarity); 295 send_three_state(ioaddr, polarity); 296 send_end(ioaddr, polarity); 297 298 /* Return the value of the DATA field. */ 299 return p[6].field; 300} 301 302/* 303 * Write to a register from the Physical Layer device located 304 * on the IPG NIC, using the IPG PHYCTRL register. 305 */ 306static void mdio_write(struct net_device *dev, int phy_id, int phy_reg, int val) 307{ 308 void __iomem *ioaddr = ipg_ioaddr(dev); 309 /* 310 * The GMII mangement frame structure for a read is as follows: 311 * 312 * |Preamble|st|op|phyad|regad|ta| data |idle| 313 * |< 32 1s>|01|10|AAAAA|RRRRR|z0|DDDDDDDDDDDDDDDD|z | 314 * 315 * <32 1s> = 32 consecutive logic 1 values 316 * A = bit of Physical Layer device address (MSB first) 317 * R = bit of register address (MSB first) 318 * z = High impedance state 319 * D = bit of write data (MSB first) 320 * 321 * Transmission order is 'Preamble' field first, bits transmitted 322 * left to right (first to last). 323 */ 324 struct { 325 u32 field; 326 unsigned int len; 327 } p[] = { 328 { GMII_PREAMBLE, 32 }, /* Preamble */ 329 { GMII_ST, 2 }, /* ST */ 330 { GMII_WRITE, 2 }, /* OP */ 331 { phy_id, 5 }, /* PHYAD */ 332 { phy_reg, 5 }, /* REGAD */ 333 { 0x0002, 2 }, /* TA */ 334 { val & 0xffff, 16 }, /* DATA */ 335 { 0x0000, 1 } /* IDLE */ 336 }; 337 unsigned int i, j; 338 u8 polarity, data; 339 340 polarity = ipg_r8(PHY_CTRL); 341 polarity &= (IPG_PC_DUPLEX_POLARITY | IPG_PC_LINK_POLARITY); 342 343 /* Create the Preamble, ST, OP, PHYAD, and REGAD field. */ 344 for (j = 0; j < 7; j++) { 345 for (i = 0; i < p[j].len; i++) { 346 /* For each variable length field, the MSB must be 347 * transmitted first. Rotate through the field bits, 348 * starting with the MSB, and move each bit into the 349 * the 1st (2^1) bit position (this is the bit position 350 * corresponding to the MgmtData bit of the PhyCtrl 351 * register for the IPG). 352 * 353 * Example: ST = 01; 354 * 355 * First write a '0' to bit 1 of the PhyCtrl 356 * register, then write a '1' to bit 1 of the 357 * PhyCtrl register. 358 * 359 * To do this, right shift the MSB of ST by the value: 360 * [field length - 1 - #ST bits already written] 361 * then left shift this result by 1. 362 */ 363 data = (p[j].field >> (p[j].len - 1 - i)) << 1; 364 data &= IPG_PC_MGMTDATA; 365 data |= polarity | IPG_PC_MGMTDIR; 366 367 ipg_drive_phy_ctl_low_high(ioaddr, data); 368 } 369 } 370 371 /* The last cycle is a tri-state, so read from the PHY. */ 372 for (j = 7; j < 8; j++) { 373 for (i = 0; i < p[j].len; i++) { 374 ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | polarity); 375 376 p[j].field |= ((ipg_r8(PHY_CTRL) & 377 IPG_PC_MGMTDATA) >> 1) << (p[j].len - 1 - i); 378 379 ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | polarity); 380 } 381 } 382} 383 384static void ipg_set_led_mode(struct net_device *dev) 385{ 386 struct ipg_nic_private *sp = netdev_priv(dev); 387 void __iomem *ioaddr = sp->ioaddr; 388 u32 mode; 389 390 mode = ipg_r32(ASIC_CTRL); 391 mode &= ~(IPG_AC_LED_MODE_BIT_1 | IPG_AC_LED_MODE | IPG_AC_LED_SPEED); 392 393 if ((sp->led_mode & 0x03) > 1) 394 mode |= IPG_AC_LED_MODE_BIT_1; /* Write Asic Control Bit 29 */ 395 396 if ((sp->led_mode & 0x01) == 1) 397 mode |= IPG_AC_LED_MODE; /* Write Asic Control Bit 14 */ 398 399 if ((sp->led_mode & 0x08) == 8) 400 mode |= IPG_AC_LED_SPEED; /* Write Asic Control Bit 27 */ 401 402 ipg_w32(mode, ASIC_CTRL); 403} 404 405static void ipg_set_phy_set(struct net_device *dev) 406{ 407 struct ipg_nic_private *sp = netdev_priv(dev); 408 void __iomem *ioaddr = sp->ioaddr; 409 int physet; 410 411 physet = ipg_r8(PHY_SET); 412 physet &= ~(IPG_PS_MEM_LENB9B | IPG_PS_MEM_LEN9 | IPG_PS_NON_COMPDET); 413 physet |= ((sp->led_mode & 0x70) >> 4); 414 ipg_w8(physet, PHY_SET); 415} 416 417static int ipg_reset(struct net_device *dev, u32 resetflags) 418{ 419 /* Assert functional resets via the IPG AsicCtrl 420 * register as specified by the 'resetflags' input 421 * parameter. 422 */ 423 void __iomem *ioaddr = ipg_ioaddr(dev); 424 unsigned int timeout_count = 0; 425 426 IPG_DEBUG_MSG("_reset\n"); 427 428 ipg_w32(ipg_r32(ASIC_CTRL) | resetflags, ASIC_CTRL); 429 430 /* Delay added to account for problem with 10Mbps reset. */ 431 mdelay(IPG_AC_RESETWAIT); 432 433 while (IPG_AC_RESET_BUSY & ipg_r32(ASIC_CTRL)) { 434 mdelay(IPG_AC_RESETWAIT); 435 if (++timeout_count > IPG_AC_RESET_TIMEOUT) 436 return -ETIME; 437 } 438 /* Set LED Mode in Asic Control */ 439 ipg_set_led_mode(dev); 440 441 /* Set PHYSet Register Value */ 442 ipg_set_phy_set(dev); 443 return 0; 444} 445 446/* Find the GMII PHY address. */ 447static int ipg_find_phyaddr(struct net_device *dev) 448{ 449 unsigned int phyaddr, i; 450 451 for (i = 0; i < 32; i++) { 452 u32 status; 453 454 /* Search for the correct PHY address among 32 possible. */ 455 phyaddr = (IPG_NIC_PHY_ADDRESS + i) % 32; 456 457 /* 10/22/03 Grace change verify from GMII_PHY_STATUS to 458 GMII_PHY_ID1 459 */ 460 461 status = mdio_read(dev, phyaddr, MII_BMSR); 462 463 if ((status != 0xFFFF) && (status != 0)) 464 return phyaddr; 465 } 466 467 return 0x1f; 468} 469 470/* 471 * Configure IPG based on result of IEEE 802.3 PHY 472 * auto-negotiation. 473 */ 474static int ipg_config_autoneg(struct net_device *dev) 475{ 476 struct ipg_nic_private *sp = netdev_priv(dev); 477 void __iomem *ioaddr = sp->ioaddr; 478 unsigned int txflowcontrol; 479 unsigned int rxflowcontrol; 480 unsigned int fullduplex; 481 u32 mac_ctrl_val; 482 u32 asicctrl; 483 u8 phyctrl; 484 485 IPG_DEBUG_MSG("_config_autoneg\n"); 486 487 asicctrl = ipg_r32(ASIC_CTRL); 488 phyctrl = ipg_r8(PHY_CTRL); 489 mac_ctrl_val = ipg_r32(MAC_CTRL); 490 491 /* Set flags for use in resolving auto-negotation, assuming 492 * non-1000Mbps, half duplex, no flow control. 493 */ 494 fullduplex = 0; 495 txflowcontrol = 0; 496 rxflowcontrol = 0; 497 498 /* To accomodate a problem in 10Mbps operation, 499 * set a global flag if PHY running in 10Mbps mode. 500 */ 501 sp->tenmbpsmode = 0; 502 503 printk(KERN_INFO "%s: Link speed = ", dev->name); 504 505 /* Determine actual speed of operation. */ 506 switch (phyctrl & IPG_PC_LINK_SPEED) { 507 case IPG_PC_LINK_SPEED_10MBPS: 508 printk("10Mbps.\n"); 509 printk(KERN_INFO "%s: 10Mbps operational mode enabled.\n", 510 dev->name); 511 sp->tenmbpsmode = 1; 512 break; 513 case IPG_PC_LINK_SPEED_100MBPS: 514 printk("100Mbps.\n"); 515 break; 516 case IPG_PC_LINK_SPEED_1000MBPS: 517 printk("1000Mbps.\n"); 518 break; 519 default: 520 printk("undefined!\n"); 521 return 0; 522 } 523 524 if (phyctrl & IPG_PC_DUPLEX_STATUS) { 525 fullduplex = 1; 526 txflowcontrol = 1; 527 rxflowcontrol = 1; 528 } 529 530 /* Configure full duplex, and flow control. */ 531 if (fullduplex == 1) { 532 /* Configure IPG for full duplex operation. */ 533 printk(KERN_INFO "%s: setting full duplex, ", dev->name); 534 535 mac_ctrl_val |= IPG_MC_DUPLEX_SELECT_FD; 536 537 if (txflowcontrol == 1) { 538 printk("TX flow control"); 539 mac_ctrl_val |= IPG_MC_TX_FLOW_CONTROL_ENABLE; 540 } else { 541 printk("no TX flow control"); 542 mac_ctrl_val &= ~IPG_MC_TX_FLOW_CONTROL_ENABLE; 543 } 544 545 if (rxflowcontrol == 1) { 546 printk(", RX flow control."); 547 mac_ctrl_val |= IPG_MC_RX_FLOW_CONTROL_ENABLE; 548 } else { 549 printk(", no RX flow control."); 550 mac_ctrl_val &= ~IPG_MC_RX_FLOW_CONTROL_ENABLE; 551 } 552 553 printk("\n"); 554 } else { 555 /* Configure IPG for half duplex operation. */ 556 printk(KERN_INFO "%s: setting half duplex, " 557 "no TX flow control, no RX flow control.\n", dev->name); 558 559 mac_ctrl_val &= ~IPG_MC_DUPLEX_SELECT_FD & 560 ~IPG_MC_TX_FLOW_CONTROL_ENABLE & 561 ~IPG_MC_RX_FLOW_CONTROL_ENABLE; 562 } 563 ipg_w32(mac_ctrl_val, MAC_CTRL); 564 return 0; 565} 566 567/* Determine and configure multicast operation and set 568 * receive mode for IPG. 569 */ 570static void ipg_nic_set_multicast_list(struct net_device *dev) 571{ 572 void __iomem *ioaddr = ipg_ioaddr(dev); 573 struct netdev_hw_addr *ha; 574 unsigned int hashindex; 575 u32 hashtable[2]; 576 u8 receivemode; 577 578 IPG_DEBUG_MSG("_nic_set_multicast_list\n"); 579 580 receivemode = IPG_RM_RECEIVEUNICAST | IPG_RM_RECEIVEBROADCAST; 581 582 if (dev->flags & IFF_PROMISC) { 583 /* NIC to be configured in promiscuous mode. */ 584 receivemode = IPG_RM_RECEIVEALLFRAMES; 585 } else if ((dev->flags & IFF_ALLMULTI) || 586 ((dev->flags & IFF_MULTICAST) && 587 (netdev_mc_count(dev) > IPG_MULTICAST_HASHTABLE_SIZE))) { 588 /* NIC to be configured to receive all multicast 589 * frames. */ 590 receivemode |= IPG_RM_RECEIVEMULTICAST; 591 } else if ((dev->flags & IFF_MULTICAST) && !netdev_mc_empty(dev)) { 592 /* NIC to be configured to receive selected 593 * multicast addresses. */ 594 receivemode |= IPG_RM_RECEIVEMULTICASTHASH; 595 } 596 597 /* Calculate the bits to set for the 64 bit, IPG HASHTABLE. 598 * The IPG applies a cyclic-redundancy-check (the same CRC 599 * used to calculate the frame data FCS) to the destination 600 * address all incoming multicast frames whose destination 601 * address has the multicast bit set. The least significant 602 * 6 bits of the CRC result are used as an addressing index 603 * into the hash table. If the value of the bit addressed by 604 * this index is a 1, the frame is passed to the host system. 605 */ 606 607 /* Clear hashtable. */ 608 hashtable[0] = 0x00000000; 609 hashtable[1] = 0x00000000; 610 611 /* Cycle through all multicast addresses to filter. */ 612 netdev_for_each_mc_addr(ha, dev) { 613 /* Calculate CRC result for each multicast address. */ 614 hashindex = crc32_le(0xffffffff, ha->addr, 615 ETH_ALEN); 616 617 /* Use only the least significant 6 bits. */ 618 hashindex = hashindex & 0x3F; 619 620 /* Within "hashtable", set bit number "hashindex" 621 * to a logic 1. 622 */ 623 set_bit(hashindex, (void *)hashtable); 624 } 625 626 /* Write the value of the hashtable, to the 4, 16 bit 627 * HASHTABLE IPG registers. 628 */ 629 ipg_w32(hashtable[0], HASHTABLE_0); 630 ipg_w32(hashtable[1], HASHTABLE_1); 631 632 ipg_w8(IPG_RM_RSVD_MASK & receivemode, RECEIVE_MODE); 633 634 IPG_DEBUG_MSG("ReceiveMode = %x\n", ipg_r8(RECEIVE_MODE)); 635} 636 637static int ipg_io_config(struct net_device *dev) 638{ 639 struct ipg_nic_private *sp = netdev_priv(dev); 640 void __iomem *ioaddr = ipg_ioaddr(dev); 641 u32 origmacctrl; 642 u32 restoremacctrl; 643 644 IPG_DEBUG_MSG("_io_config\n"); 645 646 origmacctrl = ipg_r32(MAC_CTRL); 647 648 restoremacctrl = origmacctrl | IPG_MC_STATISTICS_ENABLE; 649 650 /* Based on compilation option, determine if FCS is to be 651 * stripped on receive frames by IPG. 652 */ 653 if (!IPG_STRIP_FCS_ON_RX) 654 restoremacctrl |= IPG_MC_RCV_FCS; 655 656 /* Determine if transmitter and/or receiver are 657 * enabled so we may restore MACCTRL correctly. 658 */ 659 if (origmacctrl & IPG_MC_TX_ENABLED) 660 restoremacctrl |= IPG_MC_TX_ENABLE; 661 662 if (origmacctrl & IPG_MC_RX_ENABLED) 663 restoremacctrl |= IPG_MC_RX_ENABLE; 664 665 /* Transmitter and receiver must be disabled before setting 666 * IFSSelect. 667 */ 668 ipg_w32((origmacctrl & (IPG_MC_RX_DISABLE | IPG_MC_TX_DISABLE)) & 669 IPG_MC_RSVD_MASK, MAC_CTRL); 670 671 /* Now that transmitter and receiver are disabled, write 672 * to IFSSelect. 673 */ 674 ipg_w32((origmacctrl & IPG_MC_IFS_96BIT) & IPG_MC_RSVD_MASK, MAC_CTRL); 675 676 /* Set RECEIVEMODE register. */ 677 ipg_nic_set_multicast_list(dev); 678 679 ipg_w16(sp->max_rxframe_size, MAX_FRAME_SIZE); 680 681 ipg_w8(IPG_RXDMAPOLLPERIOD_VALUE, RX_DMA_POLL_PERIOD); 682 ipg_w8(IPG_RXDMAURGENTTHRESH_VALUE, RX_DMA_URGENT_THRESH); 683 ipg_w8(IPG_RXDMABURSTTHRESH_VALUE, RX_DMA_BURST_THRESH); 684 ipg_w8(IPG_TXDMAPOLLPERIOD_VALUE, TX_DMA_POLL_PERIOD); 685 ipg_w8(IPG_TXDMAURGENTTHRESH_VALUE, TX_DMA_URGENT_THRESH); 686 ipg_w8(IPG_TXDMABURSTTHRESH_VALUE, TX_DMA_BURST_THRESH); 687 ipg_w16((IPG_IE_HOST_ERROR | IPG_IE_TX_DMA_COMPLETE | 688 IPG_IE_TX_COMPLETE | IPG_IE_INT_REQUESTED | 689 IPG_IE_UPDATE_STATS | IPG_IE_LINK_EVENT | 690 IPG_IE_RX_DMA_COMPLETE | IPG_IE_RX_DMA_PRIORITY), INT_ENABLE); 691 ipg_w16(IPG_FLOWONTHRESH_VALUE, FLOW_ON_THRESH); 692 ipg_w16(IPG_FLOWOFFTHRESH_VALUE, FLOW_OFF_THRESH); 693 694 /* IPG multi-frag frame bug workaround. 695 * Per silicon revision B3 eratta. 696 */ 697 ipg_w16(ipg_r16(DEBUG_CTRL) | 0x0200, DEBUG_CTRL); 698 699 /* IPG TX poll now bug workaround. 700 * Per silicon revision B3 eratta. 701 */ 702 ipg_w16(ipg_r16(DEBUG_CTRL) | 0x0010, DEBUG_CTRL); 703 704 /* IPG RX poll now bug workaround. 705 * Per silicon revision B3 eratta. 706 */ 707 ipg_w16(ipg_r16(DEBUG_CTRL) | 0x0020, DEBUG_CTRL); 708 709 /* Now restore MACCTRL to original setting. */ 710 ipg_w32(IPG_MC_RSVD_MASK & restoremacctrl, MAC_CTRL); 711 712 /* Disable unused RMON statistics. */ 713 ipg_w32(IPG_RZ_ALL, RMON_STATISTICS_MASK); 714 715 /* Disable unused MIB statistics. */ 716 ipg_w32(IPG_SM_MACCONTROLFRAMESXMTD | IPG_SM_MACCONTROLFRAMESRCVD | 717 IPG_SM_BCSTOCTETXMTOK_BCSTFRAMESXMTDOK | IPG_SM_TXJUMBOFRAMES | 718 IPG_SM_MCSTOCTETXMTOK_MCSTFRAMESXMTDOK | IPG_SM_RXJUMBOFRAMES | 719 IPG_SM_BCSTOCTETRCVDOK_BCSTFRAMESRCVDOK | 720 IPG_SM_UDPCHECKSUMERRORS | IPG_SM_TCPCHECKSUMERRORS | 721 IPG_SM_IPCHECKSUMERRORS, STATISTICS_MASK); 722 723 return 0; 724} 725 726/* 727 * Create a receive buffer within system memory and update 728 * NIC private structure appropriately. 729 */ 730static int ipg_get_rxbuff(struct net_device *dev, int entry) 731{ 732 struct ipg_nic_private *sp = netdev_priv(dev); 733 struct ipg_rx *rxfd = sp->rxd + entry; 734 struct sk_buff *skb; 735 u64 rxfragsize; 736 737 IPG_DEBUG_MSG("_get_rxbuff\n"); 738 739 skb = netdev_alloc_skb_ip_align(dev, sp->rxsupport_size); 740 if (!skb) { 741 sp->rx_buff[entry] = NULL; 742 return -ENOMEM; 743 } 744 745 /* Associate the receive buffer with the IPG NIC. */ 746 skb->dev = dev; 747 748 /* Save the address of the sk_buff structure. */ 749 sp->rx_buff[entry] = skb; 750 751 rxfd->frag_info = cpu_to_le64(pci_map_single(sp->pdev, skb->data, 752 sp->rx_buf_sz, PCI_DMA_FROMDEVICE)); 753 754 /* Set the RFD fragment length. */ 755 rxfragsize = sp->rxfrag_size; 756 rxfd->frag_info |= cpu_to_le64((rxfragsize << 48) & IPG_RFI_FRAGLEN); 757 758 return 0; 759} 760 761static int init_rfdlist(struct net_device *dev) 762{ 763 struct ipg_nic_private *sp = netdev_priv(dev); 764 void __iomem *ioaddr = sp->ioaddr; 765 unsigned int i; 766 767 IPG_DEBUG_MSG("_init_rfdlist\n"); 768 769 for (i = 0; i < IPG_RFDLIST_LENGTH; i++) { 770 struct ipg_rx *rxfd = sp->rxd + i; 771 772 if (sp->rx_buff[i]) { 773 pci_unmap_single(sp->pdev, 774 le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN, 775 sp->rx_buf_sz, PCI_DMA_FROMDEVICE); 776 dev_kfree_skb_irq(sp->rx_buff[i]); 777 sp->rx_buff[i] = NULL; 778 } 779 780 /* Clear out the RFS field. */ 781 rxfd->rfs = 0x0000000000000000; 782 783 if (ipg_get_rxbuff(dev, i) < 0) { 784 /* 785 * A receive buffer was not ready, break the 786 * RFD list here. 787 */ 788 IPG_DEBUG_MSG("Cannot allocate Rx buffer.\n"); 789 790 /* Just in case we cannot allocate a single RFD. 791 * Should not occur. 792 */ 793 if (i == 0) { 794 printk(KERN_ERR "%s: No memory available" 795 " for RFD list.\n", dev->name); 796 return -ENOMEM; 797 } 798 } 799 800 rxfd->next_desc = cpu_to_le64(sp->rxd_map + 801 sizeof(struct ipg_rx)*(i + 1)); 802 } 803 sp->rxd[i - 1].next_desc = cpu_to_le64(sp->rxd_map); 804 805 sp->rx_current = 0; 806 sp->rx_dirty = 0; 807 808 /* Write the location of the RFDList to the IPG. */ 809 ipg_w32((u32) sp->rxd_map, RFD_LIST_PTR_0); 810 ipg_w32(0x00000000, RFD_LIST_PTR_1); 811 812 return 0; 813} 814 815static void init_tfdlist(struct net_device *dev) 816{ 817 struct ipg_nic_private *sp = netdev_priv(dev); 818 void __iomem *ioaddr = sp->ioaddr; 819 unsigned int i; 820 821 IPG_DEBUG_MSG("_init_tfdlist\n"); 822 823 for (i = 0; i < IPG_TFDLIST_LENGTH; i++) { 824 struct ipg_tx *txfd = sp->txd + i; 825 826 txfd->tfc = cpu_to_le64(IPG_TFC_TFDDONE); 827 828 if (sp->tx_buff[i]) { 829 dev_kfree_skb_irq(sp->tx_buff[i]); 830 sp->tx_buff[i] = NULL; 831 } 832 833 txfd->next_desc = cpu_to_le64(sp->txd_map + 834 sizeof(struct ipg_tx)*(i + 1)); 835 } 836 sp->txd[i - 1].next_desc = cpu_to_le64(sp->txd_map); 837 838 sp->tx_current = 0; 839 sp->tx_dirty = 0; 840 841 /* Write the location of the TFDList to the IPG. */ 842 IPG_DDEBUG_MSG("Starting TFDListPtr = %8.8x\n", 843 (u32) sp->txd_map); 844 ipg_w32((u32) sp->txd_map, TFD_LIST_PTR_0); 845 ipg_w32(0x00000000, TFD_LIST_PTR_1); 846 847 sp->reset_current_tfd = 1; 848} 849 850/* 851 * Free all transmit buffers which have already been transfered 852 * via DMA to the IPG. 853 */ 854static void ipg_nic_txfree(struct net_device *dev) 855{ 856 struct ipg_nic_private *sp = netdev_priv(dev); 857 unsigned int released, pending, dirty; 858 859 IPG_DEBUG_MSG("_nic_txfree\n"); 860 861 pending = sp->tx_current - sp->tx_dirty; 862 dirty = sp->tx_dirty % IPG_TFDLIST_LENGTH; 863 864 for (released = 0; released < pending; released++) { 865 struct sk_buff *skb = sp->tx_buff[dirty]; 866 struct ipg_tx *txfd = sp->txd + dirty; 867 868 IPG_DEBUG_MSG("TFC = %16.16lx\n", (unsigned long) txfd->tfc); 869 870 /* Look at each TFD's TFC field beginning 871 * at the last freed TFD up to the current TFD. 872 * If the TFDDone bit is set, free the associated 873 * buffer. 874 */ 875 if (!(txfd->tfc & cpu_to_le64(IPG_TFC_TFDDONE))) 876 break; 877 878 /* Free the transmit buffer. */ 879 if (skb) { 880 pci_unmap_single(sp->pdev, 881 le64_to_cpu(txfd->frag_info) & ~IPG_TFI_FRAGLEN, 882 skb->len, PCI_DMA_TODEVICE); 883 884 dev_kfree_skb_irq(skb); 885 886 sp->tx_buff[dirty] = NULL; 887 } 888 dirty = (dirty + 1) % IPG_TFDLIST_LENGTH; 889 } 890 891 sp->tx_dirty += released; 892 893 if (netif_queue_stopped(dev) && 894 (sp->tx_current != (sp->tx_dirty + IPG_TFDLIST_LENGTH))) { 895 netif_wake_queue(dev); 896 } 897} 898 899static void ipg_tx_timeout(struct net_device *dev) 900{ 901 struct ipg_nic_private *sp = netdev_priv(dev); 902 void __iomem *ioaddr = sp->ioaddr; 903 904 ipg_reset(dev, IPG_AC_TX_RESET | IPG_AC_DMA | IPG_AC_NETWORK | 905 IPG_AC_FIFO); 906 907 spin_lock_irq(&sp->lock); 908 909 /* Re-configure after DMA reset. */ 910 if (ipg_io_config(dev) < 0) { 911 printk(KERN_INFO "%s: Error during re-configuration.\n", 912 dev->name); 913 } 914 915 init_tfdlist(dev); 916 917 spin_unlock_irq(&sp->lock); 918 919 ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) & IPG_MC_RSVD_MASK, 920 MAC_CTRL); 921} 922 923/* 924 * For TxComplete interrupts, free all transmit 925 * buffers which have already been transfered via DMA 926 * to the IPG. 927 */ 928static void ipg_nic_txcleanup(struct net_device *dev) 929{ 930 struct ipg_nic_private *sp = netdev_priv(dev); 931 void __iomem *ioaddr = sp->ioaddr; 932 unsigned int i; 933 934 IPG_DEBUG_MSG("_nic_txcleanup\n"); 935 936 for (i = 0; i < IPG_TFDLIST_LENGTH; i++) { 937 /* Reading the TXSTATUS register clears the 938 * TX_COMPLETE interrupt. 939 */ 940 u32 txstatusdword = ipg_r32(TX_STATUS); 941 942 IPG_DEBUG_MSG("TxStatus = %8.8x\n", txstatusdword); 943 944 /* Check for Transmit errors. Error bits only valid if 945 * TX_COMPLETE bit in the TXSTATUS register is a 1. 946 */ 947 if (!(txstatusdword & IPG_TS_TX_COMPLETE)) 948 break; 949 950 /* If in 10Mbps mode, indicate transmit is ready. */ 951 if (sp->tenmbpsmode) { 952 netif_wake_queue(dev); 953 } 954 955 /* Transmit error, increment stat counters. */ 956 if (txstatusdword & IPG_TS_TX_ERROR) { 957 IPG_DEBUG_MSG("Transmit error.\n"); 958 sp->stats.tx_errors++; 959 } 960 961 /* Late collision, re-enable transmitter. */ 962 if (txstatusdword & IPG_TS_LATE_COLLISION) { 963 IPG_DEBUG_MSG("Late collision on transmit.\n"); 964 ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) & 965 IPG_MC_RSVD_MASK, MAC_CTRL); 966 } 967 968 /* Maximum collisions, re-enable transmitter. */ 969 if (txstatusdword & IPG_TS_TX_MAX_COLL) { 970 IPG_DEBUG_MSG("Maximum collisions on transmit.\n"); 971 ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) & 972 IPG_MC_RSVD_MASK, MAC_CTRL); 973 } 974 975 /* Transmit underrun, reset and re-enable 976 * transmitter. 977 */ 978 if (txstatusdword & IPG_TS_TX_UNDERRUN) { 979 IPG_DEBUG_MSG("Transmitter underrun.\n"); 980 sp->stats.tx_fifo_errors++; 981 ipg_reset(dev, IPG_AC_TX_RESET | IPG_AC_DMA | 982 IPG_AC_NETWORK | IPG_AC_FIFO); 983 984 /* Re-configure after DMA reset. */ 985 if (ipg_io_config(dev) < 0) { 986 printk(KERN_INFO 987 "%s: Error during re-configuration.\n", 988 dev->name); 989 } 990 init_tfdlist(dev); 991 992 ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) & 993 IPG_MC_RSVD_MASK, MAC_CTRL); 994 } 995 } 996 997 ipg_nic_txfree(dev); 998} 999 1000/* Provides statistical information about the IPG NIC. */ 1001static struct net_device_stats *ipg_nic_get_stats(struct net_device *dev) 1002{ 1003 struct ipg_nic_private *sp = netdev_priv(dev); 1004 void __iomem *ioaddr = sp->ioaddr; 1005 u16 temp1; 1006 u16 temp2; 1007 1008 IPG_DEBUG_MSG("_nic_get_stats\n"); 1009 1010 /* Check to see if the NIC has been initialized via nic_open, 1011 * before trying to read statistic registers. 1012 */ 1013 if (!test_bit(__LINK_STATE_START, &dev->state)) 1014 return &sp->stats; 1015 1016 sp->stats.rx_packets += ipg_r32(IPG_FRAMESRCVDOK); 1017 sp->stats.tx_packets += ipg_r32(IPG_FRAMESXMTDOK); 1018 sp->stats.rx_bytes += ipg_r32(IPG_OCTETRCVOK); 1019 sp->stats.tx_bytes += ipg_r32(IPG_OCTETXMTOK); 1020 temp1 = ipg_r16(IPG_FRAMESLOSTRXERRORS); 1021 sp->stats.rx_errors += temp1; 1022 sp->stats.rx_missed_errors += temp1; 1023 temp1 = ipg_r32(IPG_SINGLECOLFRAMES) + ipg_r32(IPG_MULTICOLFRAMES) + 1024 ipg_r32(IPG_LATECOLLISIONS); 1025 temp2 = ipg_r16(IPG_CARRIERSENSEERRORS); 1026 sp->stats.collisions += temp1; 1027 sp->stats.tx_dropped += ipg_r16(IPG_FRAMESABORTXSCOLLS); 1028 sp->stats.tx_errors += ipg_r16(IPG_FRAMESWEXDEFERRAL) + 1029 ipg_r32(IPG_FRAMESWDEFERREDXMT) + temp1 + temp2; 1030 sp->stats.multicast += ipg_r32(IPG_MCSTOCTETRCVDOK); 1031 1032 /* detailed tx_errors */ 1033 sp->stats.tx_carrier_errors += temp2; 1034 1035 /* detailed rx_errors */ 1036 sp->stats.rx_length_errors += ipg_r16(IPG_INRANGELENGTHERRORS) + 1037 ipg_r16(IPG_FRAMETOOLONGERRRORS); 1038 sp->stats.rx_crc_errors += ipg_r16(IPG_FRAMECHECKSEQERRORS); 1039 1040 /* Unutilized IPG statistic registers. */ 1041 ipg_r32(IPG_MCSTFRAMESRCVDOK); 1042 1043 return &sp->stats; 1044} 1045 1046/* Restore used receive buffers. */ 1047static int ipg_nic_rxrestore(struct net_device *dev) 1048{ 1049 struct ipg_nic_private *sp = netdev_priv(dev); 1050 const unsigned int curr = sp->rx_current; 1051 unsigned int dirty = sp->rx_dirty; 1052 1053 IPG_DEBUG_MSG("_nic_rxrestore\n"); 1054 1055 for (dirty = sp->rx_dirty; curr - dirty > 0; dirty++) { 1056 unsigned int entry = dirty % IPG_RFDLIST_LENGTH; 1057 1058 /* rx_copybreak may poke hole here and there. */ 1059 if (sp->rx_buff[entry]) 1060 continue; 1061 1062 /* Generate a new receive buffer to replace the 1063 * current buffer (which will be released by the 1064 * Linux system). 1065 */ 1066 if (ipg_get_rxbuff(dev, entry) < 0) { 1067 IPG_DEBUG_MSG("Cannot allocate new Rx buffer.\n"); 1068 1069 break; 1070 } 1071 1072 /* Reset the RFS field. */ 1073 sp->rxd[entry].rfs = 0x0000000000000000; 1074 } 1075 sp->rx_dirty = dirty; 1076 1077 return 0; 1078} 1079 1080/* use jumboindex and jumbosize to control jumbo frame status 1081 * initial status is jumboindex=-1 and jumbosize=0 1082 * 1. jumboindex = -1 and jumbosize=0 : previous jumbo frame has been done. 1083 * 2. jumboindex != -1 and jumbosize != 0 : jumbo frame is not over size and receiving 1084 * 3. jumboindex = -1 and jumbosize != 0 : jumbo frame is over size, already dump 1085 * previous receiving and need to continue dumping the current one 1086 */ 1087enum { 1088 NORMAL_PACKET, 1089 ERROR_PACKET 1090}; 1091 1092enum { 1093 FRAME_NO_START_NO_END = 0, 1094 FRAME_WITH_START = 1, 1095 FRAME_WITH_END = 10, 1096 FRAME_WITH_START_WITH_END = 11 1097}; 1098 1099static void ipg_nic_rx_free_skb(struct net_device *dev) 1100{ 1101 struct ipg_nic_private *sp = netdev_priv(dev); 1102 unsigned int entry = sp->rx_current % IPG_RFDLIST_LENGTH; 1103 1104 if (sp->rx_buff[entry]) { 1105 struct ipg_rx *rxfd = sp->rxd + entry; 1106 1107 pci_unmap_single(sp->pdev, 1108 le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN, 1109 sp->rx_buf_sz, PCI_DMA_FROMDEVICE); 1110 dev_kfree_skb_irq(sp->rx_buff[entry]); 1111 sp->rx_buff[entry] = NULL; 1112 } 1113} 1114 1115static int ipg_nic_rx_check_frame_type(struct net_device *dev) 1116{ 1117 struct ipg_nic_private *sp = netdev_priv(dev); 1118 struct ipg_rx *rxfd = sp->rxd + (sp->rx_current % IPG_RFDLIST_LENGTH); 1119 int type = FRAME_NO_START_NO_END; 1120 1121 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMESTART) 1122 type += FRAME_WITH_START; 1123 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMEEND) 1124 type += FRAME_WITH_END; 1125 return type; 1126} 1127 1128static int ipg_nic_rx_check_error(struct net_device *dev) 1129{ 1130 struct ipg_nic_private *sp = netdev_priv(dev); 1131 unsigned int entry = sp->rx_current % IPG_RFDLIST_LENGTH; 1132 struct ipg_rx *rxfd = sp->rxd + entry; 1133 1134 if (IPG_DROP_ON_RX_ETH_ERRORS && (le64_to_cpu(rxfd->rfs) & 1135 (IPG_RFS_RXFIFOOVERRUN | IPG_RFS_RXRUNTFRAME | 1136 IPG_RFS_RXALIGNMENTERROR | IPG_RFS_RXFCSERROR | 1137 IPG_RFS_RXOVERSIZEDFRAME | IPG_RFS_RXLENGTHERROR))) { 1138 IPG_DEBUG_MSG("Rx error, RFS = %16.16lx\n", 1139 (unsigned long) rxfd->rfs); 1140 1141 /* Increment general receive error statistic. */ 1142 sp->stats.rx_errors++; 1143 1144 /* Increment detailed receive error statistics. */ 1145 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFIFOOVERRUN) { 1146 IPG_DEBUG_MSG("RX FIFO overrun occured.\n"); 1147 1148 sp->stats.rx_fifo_errors++; 1149 } 1150 1151 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXRUNTFRAME) { 1152 IPG_DEBUG_MSG("RX runt occured.\n"); 1153 sp->stats.rx_length_errors++; 1154 } 1155 1156 /* Do nothing for IPG_RFS_RXOVERSIZEDFRAME, 1157 * error count handled by a IPG statistic register. 1158 */ 1159 1160 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXALIGNMENTERROR) { 1161 IPG_DEBUG_MSG("RX alignment error occured.\n"); 1162 sp->stats.rx_frame_errors++; 1163 } 1164 1165 /* Do nothing for IPG_RFS_RXFCSERROR, error count 1166 * handled by a IPG statistic register. 1167 */ 1168 1169 /* Free the memory associated with the RX 1170 * buffer since it is erroneous and we will 1171 * not pass it to higher layer processes. 1172 */ 1173 if (sp->rx_buff[entry]) { 1174 pci_unmap_single(sp->pdev, 1175 le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN, 1176 sp->rx_buf_sz, PCI_DMA_FROMDEVICE); 1177 1178 dev_kfree_skb_irq(sp->rx_buff[entry]); 1179 sp->rx_buff[entry] = NULL; 1180 } 1181 return ERROR_PACKET; 1182 } 1183 return NORMAL_PACKET; 1184} 1185 1186static void ipg_nic_rx_with_start_and_end(struct net_device *dev, 1187 struct ipg_nic_private *sp, 1188 struct ipg_rx *rxfd, unsigned entry) 1189{ 1190 struct ipg_jumbo *jumbo = &sp->jumbo; 1191 struct sk_buff *skb; 1192 int framelen; 1193 1194 if (jumbo->found_start) { 1195 dev_kfree_skb_irq(jumbo->skb); 1196 jumbo->found_start = 0; 1197 jumbo->current_size = 0; 1198 jumbo->skb = NULL; 1199 } 1200 1201 /* 1: found error, 0 no error */ 1202 if (ipg_nic_rx_check_error(dev) != NORMAL_PACKET) 1203 return; 1204 1205 skb = sp->rx_buff[entry]; 1206 if (!skb) 1207 return; 1208 1209 /* accept this frame and send to upper layer */ 1210 framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN; 1211 if (framelen > sp->rxfrag_size) 1212 framelen = sp->rxfrag_size; 1213 1214 skb_put(skb, framelen); 1215 skb->protocol = eth_type_trans(skb, dev); 1216 skb->ip_summed = CHECKSUM_NONE; 1217 netif_rx(skb); 1218 sp->rx_buff[entry] = NULL; 1219} 1220 1221static void ipg_nic_rx_with_start(struct net_device *dev, 1222 struct ipg_nic_private *sp, 1223 struct ipg_rx *rxfd, unsigned entry) 1224{ 1225 struct ipg_jumbo *jumbo = &sp->jumbo; 1226 struct pci_dev *pdev = sp->pdev; 1227 struct sk_buff *skb; 1228 1229 /* 1: found error, 0 no error */ 1230 if (ipg_nic_rx_check_error(dev) != NORMAL_PACKET) 1231 return; 1232 1233 /* accept this frame and send to upper layer */ 1234 skb = sp->rx_buff[entry]; 1235 if (!skb) 1236 return; 1237 1238 if (jumbo->found_start) 1239 dev_kfree_skb_irq(jumbo->skb); 1240 1241 pci_unmap_single(pdev, le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN, 1242 sp->rx_buf_sz, PCI_DMA_FROMDEVICE); 1243 1244 skb_put(skb, sp->rxfrag_size); 1245 1246 jumbo->found_start = 1; 1247 jumbo->current_size = sp->rxfrag_size; 1248 jumbo->skb = skb; 1249 1250 sp->rx_buff[entry] = NULL; 1251} 1252 1253static void ipg_nic_rx_with_end(struct net_device *dev, 1254 struct ipg_nic_private *sp, 1255 struct ipg_rx *rxfd, unsigned entry) 1256{ 1257 struct ipg_jumbo *jumbo = &sp->jumbo; 1258 1259 /* 1: found error, 0 no error */ 1260 if (ipg_nic_rx_check_error(dev) == NORMAL_PACKET) { 1261 struct sk_buff *skb = sp->rx_buff[entry]; 1262 1263 if (!skb) 1264 return; 1265 1266 if (jumbo->found_start) { 1267 int framelen, endframelen; 1268 1269 framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN; 1270 1271 endframelen = framelen - jumbo->current_size; 1272 if (framelen > sp->rxsupport_size) 1273 dev_kfree_skb_irq(jumbo->skb); 1274 else { 1275 memcpy(skb_put(jumbo->skb, endframelen), 1276 skb->data, endframelen); 1277 1278 jumbo->skb->protocol = 1279 eth_type_trans(jumbo->skb, dev); 1280 1281 jumbo->skb->ip_summed = CHECKSUM_NONE; 1282 netif_rx(jumbo->skb); 1283 } 1284 } 1285 1286 jumbo->found_start = 0; 1287 jumbo->current_size = 0; 1288 jumbo->skb = NULL; 1289 1290 ipg_nic_rx_free_skb(dev); 1291 } else { 1292 dev_kfree_skb_irq(jumbo->skb); 1293 jumbo->found_start = 0; 1294 jumbo->current_size = 0; 1295 jumbo->skb = NULL; 1296 } 1297} 1298 1299static void ipg_nic_rx_no_start_no_end(struct net_device *dev, 1300 struct ipg_nic_private *sp, 1301 struct ipg_rx *rxfd, unsigned entry) 1302{ 1303 struct ipg_jumbo *jumbo = &sp->jumbo; 1304 1305 /* 1: found error, 0 no error */ 1306 if (ipg_nic_rx_check_error(dev) == NORMAL_PACKET) { 1307 struct sk_buff *skb = sp->rx_buff[entry]; 1308 1309 if (skb) { 1310 if (jumbo->found_start) { 1311 jumbo->current_size += sp->rxfrag_size; 1312 if (jumbo->current_size <= sp->rxsupport_size) { 1313 memcpy(skb_put(jumbo->skb, 1314 sp->rxfrag_size), 1315 skb->data, sp->rxfrag_size); 1316 } 1317 } 1318 ipg_nic_rx_free_skb(dev); 1319 } 1320 } else { 1321 dev_kfree_skb_irq(jumbo->skb); 1322 jumbo->found_start = 0; 1323 jumbo->current_size = 0; 1324 jumbo->skb = NULL; 1325 } 1326} 1327 1328static int ipg_nic_rx_jumbo(struct net_device *dev) 1329{ 1330 struct ipg_nic_private *sp = netdev_priv(dev); 1331 unsigned int curr = sp->rx_current; 1332 void __iomem *ioaddr = sp->ioaddr; 1333 unsigned int i; 1334 1335 IPG_DEBUG_MSG("_nic_rx\n"); 1336 1337 for (i = 0; i < IPG_MAXRFDPROCESS_COUNT; i++, curr++) { 1338 unsigned int entry = curr % IPG_RFDLIST_LENGTH; 1339 struct ipg_rx *rxfd = sp->rxd + entry; 1340 1341 if (!(rxfd->rfs & cpu_to_le64(IPG_RFS_RFDDONE))) 1342 break; 1343 1344 switch (ipg_nic_rx_check_frame_type(dev)) { 1345 case FRAME_WITH_START_WITH_END: 1346 ipg_nic_rx_with_start_and_end(dev, sp, rxfd, entry); 1347 break; 1348 case FRAME_WITH_START: 1349 ipg_nic_rx_with_start(dev, sp, rxfd, entry); 1350 break; 1351 case FRAME_WITH_END: 1352 ipg_nic_rx_with_end(dev, sp, rxfd, entry); 1353 break; 1354 case FRAME_NO_START_NO_END: 1355 ipg_nic_rx_no_start_no_end(dev, sp, rxfd, entry); 1356 break; 1357 } 1358 } 1359 1360 sp->rx_current = curr; 1361 1362 if (i == IPG_MAXRFDPROCESS_COUNT) { 1363 /* There are more RFDs to process, however the 1364 * allocated amount of RFD processing time has 1365 * expired. Assert Interrupt Requested to make 1366 * sure we come back to process the remaining RFDs. 1367 */ 1368 ipg_w32(ipg_r32(ASIC_CTRL) | IPG_AC_INT_REQUEST, ASIC_CTRL); 1369 } 1370 1371 ipg_nic_rxrestore(dev); 1372 1373 return 0; 1374} 1375 1376static int ipg_nic_rx(struct net_device *dev) 1377{ 1378 /* Transfer received Ethernet frames to higher network layers. */ 1379 struct ipg_nic_private *sp = netdev_priv(dev); 1380 unsigned int curr = sp->rx_current; 1381 void __iomem *ioaddr = sp->ioaddr; 1382 struct ipg_rx *rxfd; 1383 unsigned int i; 1384 1385 IPG_DEBUG_MSG("_nic_rx\n"); 1386 1387#define __RFS_MASK \ 1388 cpu_to_le64(IPG_RFS_RFDDONE | IPG_RFS_FRAMESTART | IPG_RFS_FRAMEEND) 1389 1390 for (i = 0; i < IPG_MAXRFDPROCESS_COUNT; i++, curr++) { 1391 unsigned int entry = curr % IPG_RFDLIST_LENGTH; 1392 struct sk_buff *skb = sp->rx_buff[entry]; 1393 unsigned int framelen; 1394 1395 rxfd = sp->rxd + entry; 1396 1397 if (((rxfd->rfs & __RFS_MASK) != __RFS_MASK) || !skb) 1398 break; 1399 1400 /* Get received frame length. */ 1401 framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN; 1402 1403 /* Check for jumbo frame arrival with too small 1404 * RXFRAG_SIZE. 1405 */ 1406 if (framelen > sp->rxfrag_size) { 1407 IPG_DEBUG_MSG 1408 ("RFS FrameLen > allocated fragment size.\n"); 1409 1410 framelen = sp->rxfrag_size; 1411 } 1412 1413 if ((IPG_DROP_ON_RX_ETH_ERRORS && (le64_to_cpu(rxfd->rfs) & 1414 (IPG_RFS_RXFIFOOVERRUN | IPG_RFS_RXRUNTFRAME | 1415 IPG_RFS_RXALIGNMENTERROR | IPG_RFS_RXFCSERROR | 1416 IPG_RFS_RXOVERSIZEDFRAME | IPG_RFS_RXLENGTHERROR)))) { 1417 1418 IPG_DEBUG_MSG("Rx error, RFS = %16.16lx\n", 1419 (unsigned long int) rxfd->rfs); 1420 1421 /* Increment general receive error statistic. */ 1422 sp->stats.rx_errors++; 1423 1424 /* Increment detailed receive error statistics. */ 1425 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFIFOOVERRUN) { 1426 IPG_DEBUG_MSG("RX FIFO overrun occured.\n"); 1427 sp->stats.rx_fifo_errors++; 1428 } 1429 1430 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXRUNTFRAME) { 1431 IPG_DEBUG_MSG("RX runt occured.\n"); 1432 sp->stats.rx_length_errors++; 1433 } 1434 1435 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXOVERSIZEDFRAME) ; 1436 /* Do nothing, error count handled by a IPG 1437 * statistic register. 1438 */ 1439 1440 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXALIGNMENTERROR) { 1441 IPG_DEBUG_MSG("RX alignment error occured.\n"); 1442 sp->stats.rx_frame_errors++; 1443 } 1444 1445 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFCSERROR) ; 1446 /* Do nothing, error count handled by a IPG 1447 * statistic register. 1448 */ 1449 1450 /* Free the memory associated with the RX 1451 * buffer since it is erroneous and we will 1452 * not pass it to higher layer processes. 1453 */ 1454 if (skb) { 1455 __le64 info = rxfd->frag_info; 1456 1457 pci_unmap_single(sp->pdev, 1458 le64_to_cpu(info) & ~IPG_RFI_FRAGLEN, 1459 sp->rx_buf_sz, PCI_DMA_FROMDEVICE); 1460 1461 dev_kfree_skb_irq(skb); 1462 } 1463 } else { 1464 1465 /* Adjust the new buffer length to accomodate the size 1466 * of the received frame. 1467 */ 1468 skb_put(skb, framelen); 1469 1470 /* Set the buffer's protocol field to Ethernet. */ 1471 skb->protocol = eth_type_trans(skb, dev); 1472 1473 /* The IPG encountered an error with (or 1474 * there were no) IP/TCP/UDP checksums. 1475 * This may or may not indicate an invalid 1476 * IP/TCP/UDP frame was received. Let the 1477 * upper layer decide. 1478 */ 1479 skb->ip_summed = CHECKSUM_NONE; 1480 1481 /* Hand off frame for higher layer processing. 1482 * The function netif_rx() releases the sk_buff 1483 * when processing completes. 1484 */ 1485 netif_rx(skb); 1486 } 1487 1488 /* Assure RX buffer is not reused by IPG. */ 1489 sp->rx_buff[entry] = NULL; 1490 } 1491 1492 /* 1493 * If there are more RFDs to proces and the allocated amount of RFD 1494 * processing time has expired, assert Interrupt Requested to make 1495 * sure we come back to process the remaining RFDs. 1496 */ 1497 if (i == IPG_MAXRFDPROCESS_COUNT) 1498 ipg_w32(ipg_r32(ASIC_CTRL) | IPG_AC_INT_REQUEST, ASIC_CTRL); 1499 1500#ifdef IPG_DEBUG 1501 /* Check if the RFD list contained no receive frame data. */ 1502 if (!i) 1503 sp->EmptyRFDListCount++; 1504#endif 1505 while ((le64_to_cpu(rxfd->rfs) & IPG_RFS_RFDDONE) && 1506 !((le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMESTART) && 1507 (le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMEEND))) { 1508 unsigned int entry = curr++ % IPG_RFDLIST_LENGTH; 1509 1510 rxfd = sp->rxd + entry; 1511 1512 IPG_DEBUG_MSG("Frame requires multiple RFDs.\n"); 1513 1514 /* An unexpected event, additional code needed to handle 1515 * properly. So for the time being, just disregard the 1516 * frame. 1517 */ 1518 1519 /* Free the memory associated with the RX 1520 * buffer since it is erroneous and we will 1521 * not pass it to higher layer processes. 1522 */ 1523 if (sp->rx_buff[entry]) { 1524 pci_unmap_single(sp->pdev, 1525 le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN, 1526 sp->rx_buf_sz, PCI_DMA_FROMDEVICE); 1527 dev_kfree_skb_irq(sp->rx_buff[entry]); 1528 } 1529 1530 /* Assure RX buffer is not reused by IPG. */ 1531 sp->rx_buff[entry] = NULL; 1532 } 1533 1534 sp->rx_current = curr; 1535 1536 /* Check to see if there are a minimum number of used 1537 * RFDs before restoring any (should improve performance.) 1538 */ 1539 if ((curr - sp->rx_dirty) >= IPG_MINUSEDRFDSTOFREE) 1540 ipg_nic_rxrestore(dev); 1541 1542 return 0; 1543} 1544 1545static void ipg_reset_after_host_error(struct work_struct *work) 1546{ 1547 struct ipg_nic_private *sp = 1548 container_of(work, struct ipg_nic_private, task.work); 1549 struct net_device *dev = sp->dev; 1550 1551 /* 1552 * Acknowledge HostError interrupt by resetting 1553 * IPG DMA and HOST. 1554 */ 1555 ipg_reset(dev, IPG_AC_GLOBAL_RESET | IPG_AC_HOST | IPG_AC_DMA); 1556 1557 init_rfdlist(dev); 1558 init_tfdlist(dev); 1559 1560 if (ipg_io_config(dev) < 0) { 1561 printk(KERN_INFO "%s: Cannot recover from PCI error.\n", 1562 dev->name); 1563 schedule_delayed_work(&sp->task, HZ); 1564 } 1565} 1566 1567static irqreturn_t ipg_interrupt_handler(int irq, void *dev_inst) 1568{ 1569 struct net_device *dev = dev_inst; 1570 struct ipg_nic_private *sp = netdev_priv(dev); 1571 void __iomem *ioaddr = sp->ioaddr; 1572 unsigned int handled = 0; 1573 u16 status; 1574 1575 IPG_DEBUG_MSG("_interrupt_handler\n"); 1576 1577 if (sp->is_jumbo) 1578 ipg_nic_rxrestore(dev); 1579 1580 spin_lock(&sp->lock); 1581 1582 /* Get interrupt source information, and acknowledge 1583 * some (i.e. TxDMAComplete, RxDMAComplete, RxEarly, 1584 * IntRequested, MacControlFrame, LinkEvent) interrupts 1585 * if issued. Also, all IPG interrupts are disabled by 1586 * reading IntStatusAck. 1587 */ 1588 status = ipg_r16(INT_STATUS_ACK); 1589 1590 IPG_DEBUG_MSG("IntStatusAck = %4.4x\n", status); 1591 1592 /* Shared IRQ of remove event. */ 1593 if (!(status & IPG_IS_RSVD_MASK)) 1594 goto out_enable; 1595 1596 handled = 1; 1597 1598 if (unlikely(!netif_running(dev))) 1599 goto out_unlock; 1600 1601 /* If RFDListEnd interrupt, restore all used RFDs. */ 1602 if (status & IPG_IS_RFD_LIST_END) { 1603 IPG_DEBUG_MSG("RFDListEnd Interrupt.\n"); 1604 1605 /* The RFD list end indicates an RFD was encountered 1606 * with a 0 NextPtr, or with an RFDDone bit set to 1 1607 * (indicating the RFD is not read for use by the 1608 * IPG.) Try to restore all RFDs. 1609 */ 1610 ipg_nic_rxrestore(dev); 1611 1612#ifdef IPG_DEBUG 1613 /* Increment the RFDlistendCount counter. */ 1614 sp->RFDlistendCount++; 1615#endif 1616 } 1617 1618 /* If RFDListEnd, RxDMAPriority, RxDMAComplete, or 1619 * IntRequested interrupt, process received frames. */ 1620 if ((status & IPG_IS_RX_DMA_PRIORITY) || 1621 (status & IPG_IS_RFD_LIST_END) || 1622 (status & IPG_IS_RX_DMA_COMPLETE) || 1623 (status & IPG_IS_INT_REQUESTED)) { 1624#ifdef IPG_DEBUG 1625 /* Increment the RFD list checked counter if interrupted 1626 * only to check the RFD list. */ 1627 if (status & (~(IPG_IS_RX_DMA_PRIORITY | IPG_IS_RFD_LIST_END | 1628 IPG_IS_RX_DMA_COMPLETE | IPG_IS_INT_REQUESTED) & 1629 (IPG_IS_HOST_ERROR | IPG_IS_TX_DMA_COMPLETE | 1630 IPG_IS_LINK_EVENT | IPG_IS_TX_COMPLETE | 1631 IPG_IS_UPDATE_STATS))) 1632 sp->RFDListCheckedCount++; 1633#endif 1634 1635 if (sp->is_jumbo) 1636 ipg_nic_rx_jumbo(dev); 1637 else 1638 ipg_nic_rx(dev); 1639 } 1640 1641 /* If TxDMAComplete interrupt, free used TFDs. */ 1642 if (status & IPG_IS_TX_DMA_COMPLETE) 1643 ipg_nic_txfree(dev); 1644 1645 /* TxComplete interrupts indicate one of numerous actions. 1646 * Determine what action to take based on TXSTATUS register. 1647 */ 1648 if (status & IPG_IS_TX_COMPLETE) 1649 ipg_nic_txcleanup(dev); 1650 1651 /* If UpdateStats interrupt, update Linux Ethernet statistics */ 1652 if (status & IPG_IS_UPDATE_STATS) 1653 ipg_nic_get_stats(dev); 1654 1655 /* If HostError interrupt, reset IPG. */ 1656 if (status & IPG_IS_HOST_ERROR) { 1657 IPG_DDEBUG_MSG("HostError Interrupt\n"); 1658 1659 schedule_delayed_work(&sp->task, 0); 1660 } 1661 1662 /* If LinkEvent interrupt, resolve autonegotiation. */ 1663 if (status & IPG_IS_LINK_EVENT) { 1664 if (ipg_config_autoneg(dev) < 0) 1665 printk(KERN_INFO "%s: Auto-negotiation error.\n", 1666 dev->name); 1667 } 1668 1669 /* If MACCtrlFrame interrupt, do nothing. */ 1670 if (status & IPG_IS_MAC_CTRL_FRAME) 1671 IPG_DEBUG_MSG("MACCtrlFrame interrupt.\n"); 1672 1673 /* If RxComplete interrupt, do nothing. */ 1674 if (status & IPG_IS_RX_COMPLETE) 1675 IPG_DEBUG_MSG("RxComplete interrupt.\n"); 1676 1677 /* If RxEarly interrupt, do nothing. */ 1678 if (status & IPG_IS_RX_EARLY) 1679 IPG_DEBUG_MSG("RxEarly interrupt.\n"); 1680 1681out_enable: 1682 /* Re-enable IPG interrupts. */ 1683 ipg_w16(IPG_IE_TX_DMA_COMPLETE | IPG_IE_RX_DMA_COMPLETE | 1684 IPG_IE_HOST_ERROR | IPG_IE_INT_REQUESTED | IPG_IE_TX_COMPLETE | 1685 IPG_IE_LINK_EVENT | IPG_IE_UPDATE_STATS, INT_ENABLE); 1686out_unlock: 1687 spin_unlock(&sp->lock); 1688 1689 return IRQ_RETVAL(handled); 1690} 1691 1692static void ipg_rx_clear(struct ipg_nic_private *sp) 1693{ 1694 unsigned int i; 1695 1696 for (i = 0; i < IPG_RFDLIST_LENGTH; i++) { 1697 if (sp->rx_buff[i]) { 1698 struct ipg_rx *rxfd = sp->rxd + i; 1699 1700 dev_kfree_skb_irq(sp->rx_buff[i]); 1701 sp->rx_buff[i] = NULL; 1702 pci_unmap_single(sp->pdev, 1703 le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN, 1704 sp->rx_buf_sz, PCI_DMA_FROMDEVICE); 1705 } 1706 } 1707} 1708 1709static void ipg_tx_clear(struct ipg_nic_private *sp) 1710{ 1711 unsigned int i; 1712 1713 for (i = 0; i < IPG_TFDLIST_LENGTH; i++) { 1714 if (sp->tx_buff[i]) { 1715 struct ipg_tx *txfd = sp->txd + i; 1716 1717 pci_unmap_single(sp->pdev, 1718 le64_to_cpu(txfd->frag_info) & ~IPG_TFI_FRAGLEN, 1719 sp->tx_buff[i]->len, PCI_DMA_TODEVICE); 1720 1721 dev_kfree_skb_irq(sp->tx_buff[i]); 1722 1723 sp->tx_buff[i] = NULL; 1724 } 1725 } 1726} 1727 1728static int ipg_nic_open(struct net_device *dev) 1729{ 1730 struct ipg_nic_private *sp = netdev_priv(dev); 1731 void __iomem *ioaddr = sp->ioaddr; 1732 struct pci_dev *pdev = sp->pdev; 1733 int rc; 1734 1735 IPG_DEBUG_MSG("_nic_open\n"); 1736 1737 sp->rx_buf_sz = sp->rxsupport_size; 1738 1739 /* Check for interrupt line conflicts, and request interrupt 1740 * line for IPG. 1741 * 1742 * IMPORTANT: Disable IPG interrupts prior to registering 1743 * IRQ. 1744 */ 1745 ipg_w16(0x0000, INT_ENABLE); 1746 1747 /* Register the interrupt line to be used by the IPG within 1748 * the Linux system. 1749 */ 1750 rc = request_irq(pdev->irq, ipg_interrupt_handler, IRQF_SHARED, 1751 dev->name, dev); 1752 if (rc < 0) { 1753 printk(KERN_INFO "%s: Error when requesting interrupt.\n", 1754 dev->name); 1755 goto out; 1756 } 1757 1758 dev->irq = pdev->irq; 1759 1760 rc = -ENOMEM; 1761 1762 sp->rxd = dma_alloc_coherent(&pdev->dev, IPG_RX_RING_BYTES, 1763 &sp->rxd_map, GFP_KERNEL); 1764 if (!sp->rxd) 1765 goto err_free_irq_0; 1766 1767 sp->txd = dma_alloc_coherent(&pdev->dev, IPG_TX_RING_BYTES, 1768 &sp->txd_map, GFP_KERNEL); 1769 if (!sp->txd) 1770 goto err_free_rx_1; 1771 1772 rc = init_rfdlist(dev); 1773 if (rc < 0) { 1774 printk(KERN_INFO "%s: Error during configuration.\n", 1775 dev->name); 1776 goto err_free_tx_2; 1777 } 1778 1779 init_tfdlist(dev); 1780 1781 rc = ipg_io_config(dev); 1782 if (rc < 0) { 1783 printk(KERN_INFO "%s: Error during configuration.\n", 1784 dev->name); 1785 goto err_release_tfdlist_3; 1786 } 1787 1788 /* Resolve autonegotiation. */ 1789 if (ipg_config_autoneg(dev) < 0) 1790 printk(KERN_INFO "%s: Auto-negotiation error.\n", dev->name); 1791 1792 /* initialize JUMBO Frame control variable */ 1793 sp->jumbo.found_start = 0; 1794 sp->jumbo.current_size = 0; 1795 sp->jumbo.skb = NULL; 1796 1797 /* Enable transmit and receive operation of the IPG. */ 1798 ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_RX_ENABLE | IPG_MC_TX_ENABLE) & 1799 IPG_MC_RSVD_MASK, MAC_CTRL); 1800 1801 netif_start_queue(dev); 1802out: 1803 return rc; 1804 1805err_release_tfdlist_3: 1806 ipg_tx_clear(sp); 1807 ipg_rx_clear(sp); 1808err_free_tx_2: 1809 dma_free_coherent(&pdev->dev, IPG_TX_RING_BYTES, sp->txd, sp->txd_map); 1810err_free_rx_1: 1811 dma_free_coherent(&pdev->dev, IPG_RX_RING_BYTES, sp->rxd, sp->rxd_map); 1812err_free_irq_0: 1813 free_irq(pdev->irq, dev); 1814 goto out; 1815} 1816 1817static int ipg_nic_stop(struct net_device *dev) 1818{ 1819 struct ipg_nic_private *sp = netdev_priv(dev); 1820 void __iomem *ioaddr = sp->ioaddr; 1821 struct pci_dev *pdev = sp->pdev; 1822 1823 IPG_DEBUG_MSG("_nic_stop\n"); 1824 1825 netif_stop_queue(dev); 1826 1827 IPG_DUMPTFDLIST(dev); 1828 1829 do { 1830 (void) ipg_r16(INT_STATUS_ACK); 1831 1832 ipg_reset(dev, IPG_AC_GLOBAL_RESET | IPG_AC_HOST | IPG_AC_DMA); 1833 1834 synchronize_irq(pdev->irq); 1835 } while (ipg_r16(INT_ENABLE) & IPG_IE_RSVD_MASK); 1836 1837 ipg_rx_clear(sp); 1838 1839 ipg_tx_clear(sp); 1840 1841 pci_free_consistent(pdev, IPG_RX_RING_BYTES, sp->rxd, sp->rxd_map); 1842 pci_free_consistent(pdev, IPG_TX_RING_BYTES, sp->txd, sp->txd_map); 1843 1844 free_irq(pdev->irq, dev); 1845 1846 return 0; 1847} 1848 1849static netdev_tx_t ipg_nic_hard_start_xmit(struct sk_buff *skb, 1850 struct net_device *dev) 1851{ 1852 struct ipg_nic_private *sp = netdev_priv(dev); 1853 void __iomem *ioaddr = sp->ioaddr; 1854 unsigned int entry = sp->tx_current % IPG_TFDLIST_LENGTH; 1855 unsigned long flags; 1856 struct ipg_tx *txfd; 1857 1858 IPG_DDEBUG_MSG("_nic_hard_start_xmit\n"); 1859 1860 /* If in 10Mbps mode, stop the transmit queue so 1861 * no more transmit frames are accepted. 1862 */ 1863 if (sp->tenmbpsmode) 1864 netif_stop_queue(dev); 1865 1866 if (sp->reset_current_tfd) { 1867 sp->reset_current_tfd = 0; 1868 entry = 0; 1869 } 1870 1871 txfd = sp->txd + entry; 1872 1873 sp->tx_buff[entry] = skb; 1874 1875 /* Clear all TFC fields, except TFDDONE. */ 1876 txfd->tfc = cpu_to_le64(IPG_TFC_TFDDONE); 1877 1878 /* Specify the TFC field within the TFD. */ 1879 txfd->tfc |= cpu_to_le64(IPG_TFC_WORDALIGNDISABLED | 1880 (IPG_TFC_FRAMEID & sp->tx_current) | 1881 (IPG_TFC_FRAGCOUNT & (1 << 24))); 1882 /* 1883 * 16--17 (WordAlign) <- 3 (disable), 1884 * 0--15 (FrameId) <- sp->tx_current, 1885 * 24--27 (FragCount) <- 1 1886 */ 1887 1888 /* Request TxComplete interrupts at an interval defined 1889 * by the constant IPG_FRAMESBETWEENTXCOMPLETES. 1890 * Request TxComplete interrupt for every frame 1891 * if in 10Mbps mode to accomodate problem with 10Mbps 1892 * processing. 1893 */ 1894 if (sp->tenmbpsmode) 1895 txfd->tfc |= cpu_to_le64(IPG_TFC_TXINDICATE); 1896 txfd->tfc |= cpu_to_le64(IPG_TFC_TXDMAINDICATE); 1897 /* Based on compilation option, determine if FCS is to be 1898 * appended to transmit frame by IPG. 1899 */ 1900 if (!(IPG_APPEND_FCS_ON_TX)) 1901 txfd->tfc |= cpu_to_le64(IPG_TFC_FCSAPPENDDISABLE); 1902 1903 /* Based on compilation option, determine if IP, TCP and/or 1904 * UDP checksums are to be added to transmit frame by IPG. 1905 */ 1906 if (IPG_ADD_IPCHECKSUM_ON_TX) 1907 txfd->tfc |= cpu_to_le64(IPG_TFC_IPCHECKSUMENABLE); 1908 1909 if (IPG_ADD_TCPCHECKSUM_ON_TX) 1910 txfd->tfc |= cpu_to_le64(IPG_TFC_TCPCHECKSUMENABLE); 1911 1912 if (IPG_ADD_UDPCHECKSUM_ON_TX) 1913 txfd->tfc |= cpu_to_le64(IPG_TFC_UDPCHECKSUMENABLE); 1914 1915 /* Based on compilation option, determine if VLAN tag info is to be 1916 * inserted into transmit frame by IPG. 1917 */ 1918 if (IPG_INSERT_MANUAL_VLAN_TAG) { 1919 txfd->tfc |= cpu_to_le64(IPG_TFC_VLANTAGINSERT | 1920 ((u64) IPG_MANUAL_VLAN_VID << 32) | 1921 ((u64) IPG_MANUAL_VLAN_CFI << 44) | 1922 ((u64) IPG_MANUAL_VLAN_USERPRIORITY << 45)); 1923 } 1924 1925 /* The fragment start location within system memory is defined 1926 * by the sk_buff structure's data field. The physical address 1927 * of this location within the system's virtual memory space 1928 * is determined using the IPG_HOST2BUS_MAP function. 1929 */ 1930 txfd->frag_info = cpu_to_le64(pci_map_single(sp->pdev, skb->data, 1931 skb->len, PCI_DMA_TODEVICE)); 1932 1933 /* The length of the fragment within system memory is defined by 1934 * the sk_buff structure's len field. 1935 */ 1936 txfd->frag_info |= cpu_to_le64(IPG_TFI_FRAGLEN & 1937 ((u64) (skb->len & 0xffff) << 48)); 1938 1939 /* Clear the TFDDone bit last to indicate the TFD is ready 1940 * for transfer to the IPG. 1941 */ 1942 txfd->tfc &= cpu_to_le64(~IPG_TFC_TFDDONE); 1943 1944 spin_lock_irqsave(&sp->lock, flags); 1945 1946 sp->tx_current++; 1947 1948 mmiowb(); 1949 1950 ipg_w32(IPG_DC_TX_DMA_POLL_NOW, DMA_CTRL); 1951 1952 if (sp->tx_current == (sp->tx_dirty + IPG_TFDLIST_LENGTH)) 1953 netif_stop_queue(dev); 1954 1955 spin_unlock_irqrestore(&sp->lock, flags); 1956 1957 return NETDEV_TX_OK; 1958} 1959 1960static void ipg_set_phy_default_param(unsigned char rev, 1961 struct net_device *dev, int phy_address) 1962{ 1963 unsigned short length; 1964 unsigned char revision; 1965 unsigned short *phy_param; 1966 unsigned short address, value; 1967 1968 phy_param = &DefaultPhyParam[0]; 1969 length = *phy_param & 0x00FF; 1970 revision = (unsigned char)((*phy_param) >> 8); 1971 phy_param++; 1972 while (length != 0) { 1973 if (rev == revision) { 1974 while (length > 1) { 1975 address = *phy_param; 1976 value = *(phy_param + 1); 1977 phy_param += 2; 1978 mdio_write(dev, phy_address, address, value); 1979 length -= 4; 1980 } 1981 break; 1982 } else { 1983 phy_param += length / 2; 1984 length = *phy_param & 0x00FF; 1985 revision = (unsigned char)((*phy_param) >> 8); 1986 phy_param++; 1987 } 1988 } 1989} 1990 1991static int read_eeprom(struct net_device *dev, int eep_addr) 1992{ 1993 void __iomem *ioaddr = ipg_ioaddr(dev); 1994 unsigned int i; 1995 int ret = 0; 1996 u16 value; 1997 1998 value = IPG_EC_EEPROM_READOPCODE | (eep_addr & 0xff); 1999 ipg_w16(value, EEPROM_CTRL); 2000 2001 for (i = 0; i < 1000; i++) { 2002 u16 data; 2003 2004 mdelay(10); 2005 data = ipg_r16(EEPROM_CTRL); 2006 if (!(data & IPG_EC_EEPROM_BUSY)) { 2007 ret = ipg_r16(EEPROM_DATA); 2008 break; 2009 } 2010 } 2011 return ret; 2012} 2013 2014static void ipg_init_mii(struct net_device *dev) 2015{ 2016 struct ipg_nic_private *sp = netdev_priv(dev); 2017 struct mii_if_info *mii_if = &sp->mii_if; 2018 int phyaddr; 2019 2020 mii_if->dev = dev; 2021 mii_if->mdio_read = mdio_read; 2022 mii_if->mdio_write = mdio_write; 2023 mii_if->phy_id_mask = 0x1f; 2024 mii_if->reg_num_mask = 0x1f; 2025 2026 mii_if->phy_id = phyaddr = ipg_find_phyaddr(dev); 2027 2028 if (phyaddr != 0x1f) { 2029 u16 mii_phyctrl, mii_1000cr; 2030 u8 revisionid = 0; 2031 2032 mii_1000cr = mdio_read(dev, phyaddr, MII_CTRL1000); 2033 mii_1000cr |= ADVERTISE_1000FULL | ADVERTISE_1000HALF | 2034 GMII_PHY_1000BASETCONTROL_PreferMaster; 2035 mdio_write(dev, phyaddr, MII_CTRL1000, mii_1000cr); 2036 2037 mii_phyctrl = mdio_read(dev, phyaddr, MII_BMCR); 2038 2039 /* Set default phyparam */ 2040 pci_read_config_byte(sp->pdev, PCI_REVISION_ID, &revisionid); 2041 ipg_set_phy_default_param(revisionid, dev, phyaddr); 2042 2043 /* Reset PHY */ 2044 mii_phyctrl |= BMCR_RESET | BMCR_ANRESTART; 2045 mdio_write(dev, phyaddr, MII_BMCR, mii_phyctrl); 2046 2047 } 2048} 2049 2050static int ipg_hw_init(struct net_device *dev) 2051{ 2052 struct ipg_nic_private *sp = netdev_priv(dev); 2053 void __iomem *ioaddr = sp->ioaddr; 2054 unsigned int i; 2055 int rc; 2056 2057 /* Read/Write and Reset EEPROM Value */ 2058 /* Read LED Mode Configuration from EEPROM */ 2059 sp->led_mode = read_eeprom(dev, 6); 2060 2061 /* Reset all functions within the IPG. Do not assert 2062 * RST_OUT as not compatible with some PHYs. 2063 */ 2064 rc = ipg_reset(dev, IPG_RESET_MASK); 2065 if (rc < 0) 2066 goto out; 2067 2068 ipg_init_mii(dev); 2069 2070 /* Read MAC Address from EEPROM */ 2071 for (i = 0; i < 3; i++) 2072 sp->station_addr[i] = read_eeprom(dev, 16 + i); 2073 2074 for (i = 0; i < 3; i++) 2075 ipg_w16(sp->station_addr[i], STATION_ADDRESS_0 + 2*i); 2076 2077 /* Set station address in ethernet_device structure. */ 2078 dev->dev_addr[0] = ipg_r16(STATION_ADDRESS_0) & 0x00ff; 2079 dev->dev_addr[1] = (ipg_r16(STATION_ADDRESS_0) & 0xff00) >> 8; 2080 dev->dev_addr[2] = ipg_r16(STATION_ADDRESS_1) & 0x00ff; 2081 dev->dev_addr[3] = (ipg_r16(STATION_ADDRESS_1) & 0xff00) >> 8; 2082 dev->dev_addr[4] = ipg_r16(STATION_ADDRESS_2) & 0x00ff; 2083 dev->dev_addr[5] = (ipg_r16(STATION_ADDRESS_2) & 0xff00) >> 8; 2084out: 2085 return rc; 2086} 2087 2088static int ipg_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 2089{ 2090 struct ipg_nic_private *sp = netdev_priv(dev); 2091 int rc; 2092 2093 mutex_lock(&sp->mii_mutex); 2094 rc = generic_mii_ioctl(&sp->mii_if, if_mii(ifr), cmd, NULL); 2095 mutex_unlock(&sp->mii_mutex); 2096 2097 return rc; 2098} 2099 2100static int ipg_nic_change_mtu(struct net_device *dev, int new_mtu) 2101{ 2102 struct ipg_nic_private *sp = netdev_priv(dev); 2103 int err; 2104 2105 /* Function to accomodate changes to Maximum Transfer Unit 2106 * (or MTU) of IPG NIC. Cannot use default function since 2107 * the default will not allow for MTU > 1500 bytes. 2108 */ 2109 2110 IPG_DEBUG_MSG("_nic_change_mtu\n"); 2111 2112 /* 2113 * Check that the new MTU value is between 68 (14 byte header, 46 byte 2114 * payload, 4 byte FCS) and 10 KB, which is the largest supported MTU. 2115 */ 2116 if (new_mtu < 68 || new_mtu > 10240) 2117 return -EINVAL; 2118 2119 err = ipg_nic_stop(dev); 2120 if (err) 2121 return err; 2122 2123 dev->mtu = new_mtu; 2124 2125 sp->max_rxframe_size = new_mtu; 2126 2127 sp->rxfrag_size = new_mtu; 2128 if (sp->rxfrag_size > 4088) 2129 sp->rxfrag_size = 4088; 2130 2131 sp->rxsupport_size = sp->max_rxframe_size; 2132 2133 if (new_mtu > 0x0600) 2134 sp->is_jumbo = true; 2135 else 2136 sp->is_jumbo = false; 2137 2138 return ipg_nic_open(dev); 2139} 2140 2141static int ipg_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 2142{ 2143 struct ipg_nic_private *sp = netdev_priv(dev); 2144 int rc; 2145 2146 mutex_lock(&sp->mii_mutex); 2147 rc = mii_ethtool_gset(&sp->mii_if, cmd); 2148 mutex_unlock(&sp->mii_mutex); 2149 2150 return rc; 2151} 2152 2153static int ipg_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 2154{ 2155 struct ipg_nic_private *sp = netdev_priv(dev); 2156 int rc; 2157 2158 mutex_lock(&sp->mii_mutex); 2159 rc = mii_ethtool_sset(&sp->mii_if, cmd); 2160 mutex_unlock(&sp->mii_mutex); 2161 2162 return rc; 2163} 2164 2165static int ipg_nway_reset(struct net_device *dev) 2166{ 2167 struct ipg_nic_private *sp = netdev_priv(dev); 2168 int rc; 2169 2170 mutex_lock(&sp->mii_mutex); 2171 rc = mii_nway_restart(&sp->mii_if); 2172 mutex_unlock(&sp->mii_mutex); 2173 2174 return rc; 2175} 2176 2177static const struct ethtool_ops ipg_ethtool_ops = { 2178 .get_settings = ipg_get_settings, 2179 .set_settings = ipg_set_settings, 2180 .nway_reset = ipg_nway_reset, 2181}; 2182 2183static void __devexit ipg_remove(struct pci_dev *pdev) 2184{ 2185 struct net_device *dev = pci_get_drvdata(pdev); 2186 struct ipg_nic_private *sp = netdev_priv(dev); 2187 2188 IPG_DEBUG_MSG("_remove\n"); 2189 2190 /* Un-register Ethernet device. */ 2191 unregister_netdev(dev); 2192 2193 pci_iounmap(pdev, sp->ioaddr); 2194 2195 pci_release_regions(pdev); 2196 2197 free_netdev(dev); 2198 pci_disable_device(pdev); 2199 pci_set_drvdata(pdev, NULL); 2200} 2201 2202static const struct net_device_ops ipg_netdev_ops = { 2203 .ndo_open = ipg_nic_open, 2204 .ndo_stop = ipg_nic_stop, 2205 .ndo_start_xmit = ipg_nic_hard_start_xmit, 2206 .ndo_get_stats = ipg_nic_get_stats, 2207 .ndo_set_multicast_list = ipg_nic_set_multicast_list, 2208 .ndo_do_ioctl = ipg_ioctl, 2209 .ndo_tx_timeout = ipg_tx_timeout, 2210 .ndo_change_mtu = ipg_nic_change_mtu, 2211 .ndo_set_mac_address = eth_mac_addr, 2212 .ndo_validate_addr = eth_validate_addr, 2213}; 2214 2215static int __devinit ipg_probe(struct pci_dev *pdev, 2216 const struct pci_device_id *id) 2217{ 2218 unsigned int i = id->driver_data; 2219 struct ipg_nic_private *sp; 2220 struct net_device *dev; 2221 void __iomem *ioaddr; 2222 int rc; 2223 2224 rc = pci_enable_device(pdev); 2225 if (rc < 0) 2226 goto out; 2227 2228 printk(KERN_INFO "%s: %s\n", pci_name(pdev), ipg_brand_name[i]); 2229 2230 pci_set_master(pdev); 2231 2232 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(40)); 2233 if (rc < 0) { 2234 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 2235 if (rc < 0) { 2236 printk(KERN_ERR "%s: DMA config failed.\n", 2237 pci_name(pdev)); 2238 goto err_disable_0; 2239 } 2240 } 2241 2242 /* 2243 * Initialize net device. 2244 */ 2245 dev = alloc_etherdev(sizeof(struct ipg_nic_private)); 2246 if (!dev) { 2247 printk(KERN_ERR "%s: alloc_etherdev failed\n", pci_name(pdev)); 2248 rc = -ENOMEM; 2249 goto err_disable_0; 2250 } 2251 2252 sp = netdev_priv(dev); 2253 spin_lock_init(&sp->lock); 2254 mutex_init(&sp->mii_mutex); 2255 2256 sp->is_jumbo = IPG_IS_JUMBO; 2257 sp->rxfrag_size = IPG_RXFRAG_SIZE; 2258 sp->rxsupport_size = IPG_RXSUPPORT_SIZE; 2259 sp->max_rxframe_size = IPG_MAX_RXFRAME_SIZE; 2260 2261 /* Declare IPG NIC functions for Ethernet device methods. 2262 */ 2263 dev->netdev_ops = &ipg_netdev_ops; 2264 SET_NETDEV_DEV(dev, &pdev->dev); 2265 SET_ETHTOOL_OPS(dev, &ipg_ethtool_ops); 2266 2267 rc = pci_request_regions(pdev, DRV_NAME); 2268 if (rc) 2269 goto err_free_dev_1; 2270 2271 ioaddr = pci_iomap(pdev, 1, pci_resource_len(pdev, 1)); 2272 if (!ioaddr) { 2273 printk(KERN_ERR "%s cannot map MMIO\n", pci_name(pdev)); 2274 rc = -EIO; 2275 goto err_release_regions_2; 2276 } 2277 2278 /* Save the pointer to the PCI device information. */ 2279 sp->ioaddr = ioaddr; 2280 sp->pdev = pdev; 2281 sp->dev = dev; 2282 2283 INIT_DELAYED_WORK(&sp->task, ipg_reset_after_host_error); 2284 2285 pci_set_drvdata(pdev, dev); 2286 2287 rc = ipg_hw_init(dev); 2288 if (rc < 0) 2289 goto err_unmap_3; 2290 2291 rc = register_netdev(dev); 2292 if (rc < 0) 2293 goto err_unmap_3; 2294 2295 printk(KERN_INFO "Ethernet device registered as: %s\n", dev->name); 2296out: 2297 return rc; 2298 2299err_unmap_3: 2300 pci_iounmap(pdev, ioaddr); 2301err_release_regions_2: 2302 pci_release_regions(pdev); 2303err_free_dev_1: 2304 free_netdev(dev); 2305err_disable_0: 2306 pci_disable_device(pdev); 2307 goto out; 2308} 2309 2310static struct pci_driver ipg_pci_driver = { 2311 .name = IPG_DRIVER_NAME, 2312 .id_table = ipg_pci_tbl, 2313 .probe = ipg_probe, 2314 .remove = __devexit_p(ipg_remove), 2315}; 2316 2317static int __init ipg_init_module(void) 2318{ 2319 return pci_register_driver(&ipg_pci_driver); 2320} 2321 2322static void __exit ipg_exit_module(void) 2323{ 2324 pci_unregister_driver(&ipg_pci_driver); 2325} 2326 2327module_init(ipg_init_module); 2328module_exit(ipg_exit_module);