Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v2.6.24 2340 lines 63 kB view raw
1/* 2 * ipg.c: Device Driver for the IP1000 Gigabit Ethernet Adapter 3 * 4 * Copyright (C) 2003, 2007 IC Plus Corp 5 * 6 * Original Author: 7 * 8 * Craig Rich 9 * Sundance Technology, Inc. 10 * www.sundanceti.com 11 * craig_rich@sundanceti.com 12 * 13 * Current Maintainer: 14 * 15 * Sorbica Shieh. 16 * http://www.icplus.com.tw 17 * sorbica@icplus.com.tw 18 * 19 * Jesse Huang 20 * http://www.icplus.com.tw 21 * jesse@icplus.com.tw 22 */ 23#include <linux/crc32.h> 24#include <linux/ethtool.h> 25#include <linux/mii.h> 26#include <linux/mutex.h> 27 28#include <asm/div64.h> 29 30#define IPG_RX_RING_BYTES (sizeof(struct ipg_rx) * IPG_RFDLIST_LENGTH) 31#define IPG_TX_RING_BYTES (sizeof(struct ipg_tx) * IPG_TFDLIST_LENGTH) 32#define IPG_RESET_MASK \ 33 (IPG_AC_GLOBAL_RESET | IPG_AC_RX_RESET | IPG_AC_TX_RESET | \ 34 IPG_AC_DMA | IPG_AC_FIFO | IPG_AC_NETWORK | IPG_AC_HOST | \ 35 IPG_AC_AUTO_INIT) 36 37#define ipg_w32(val32,reg) iowrite32((val32), ioaddr + (reg)) 38#define ipg_w16(val16,reg) iowrite16((val16), ioaddr + (reg)) 39#define ipg_w8(val8,reg) iowrite8((val8), ioaddr + (reg)) 40 41#define ipg_r32(reg) ioread32(ioaddr + (reg)) 42#define ipg_r16(reg) ioread16(ioaddr + (reg)) 43#define ipg_r8(reg) ioread8(ioaddr + (reg)) 44 45#define JUMBO_FRAME_4k_ONLY 46enum { 47 netdev_io_size = 128 48}; 49 50#include "ipg.h" 51#define DRV_NAME "ipg" 52 53MODULE_AUTHOR("IC Plus Corp. 2003"); 54MODULE_DESCRIPTION("IC Plus IP1000 Gigabit Ethernet Adapter Linux Driver " 55 DrvVer); 56MODULE_LICENSE("GPL"); 57 58//variable record -- index by leading revision/length 59//Revision/Length(=N*4), Address1, Data1, Address2, Data2,...,AddressN,DataN 60static unsigned short DefaultPhyParam[] = { 61 // 11/12/03 IP1000A v1-3 rev=0x40 62 /*-------------------------------------------------------------------------- 63 (0x4000|(15*4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 22, 0x85bd, 24, 0xfff2, 64 27, 0x0c10, 28, 0x0c10, 29, 0x2c10, 31, 0x0003, 23, 0x92f6, 65 31, 0x0000, 23, 0x003d, 30, 0x00de, 20, 0x20e7, 9, 0x0700, 66 --------------------------------------------------------------------------*/ 67 // 12/17/03 IP1000A v1-4 rev=0x40 68 (0x4000 | (07 * 4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 27, 0xeb8e, 31, 69 0x0000, 70 30, 0x005e, 9, 0x0700, 71 // 01/09/04 IP1000A v1-5 rev=0x41 72 (0x4100 | (07 * 4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 27, 0xeb8e, 31, 73 0x0000, 74 30, 0x005e, 9, 0x0700, 75 0x0000 76}; 77 78static const char *ipg_brand_name[] = { 79 "IC PLUS IP1000 1000/100/10 based NIC", 80 "Sundance Technology ST2021 based NIC", 81 "Tamarack Microelectronics TC9020/9021 based NIC", 82 "Tamarack Microelectronics TC9020/9021 based NIC", 83 "D-Link NIC", 84 "D-Link NIC IP1000A" 85}; 86 87static struct pci_device_id ipg_pci_tbl[] __devinitdata = { 88 { PCI_VDEVICE(SUNDANCE, 0x1023), 0 }, 89 { PCI_VDEVICE(SUNDANCE, 0x2021), 1 }, 90 { PCI_VDEVICE(SUNDANCE, 0x1021), 2 }, 91 { PCI_VDEVICE(DLINK, 0x9021), 3 }, 92 { PCI_VDEVICE(DLINK, 0x4000), 4 }, 93 { PCI_VDEVICE(DLINK, 0x4020), 5 }, 94 { 0, } 95}; 96 97MODULE_DEVICE_TABLE(pci, ipg_pci_tbl); 98 99static inline void __iomem *ipg_ioaddr(struct net_device *dev) 100{ 101 struct ipg_nic_private *sp = netdev_priv(dev); 102 return sp->ioaddr; 103} 104 105#ifdef IPG_DEBUG 106static void ipg_dump_rfdlist(struct net_device *dev) 107{ 108 struct ipg_nic_private *sp = netdev_priv(dev); 109 void __iomem *ioaddr = sp->ioaddr; 110 unsigned int i; 111 u32 offset; 112 113 IPG_DEBUG_MSG("_dump_rfdlist\n"); 114 115 printk(KERN_INFO "rx_current = %2.2x\n", sp->rx_current); 116 printk(KERN_INFO "rx_dirty = %2.2x\n", sp->rx_dirty); 117 printk(KERN_INFO "RFDList start address = %16.16lx\n", 118 (unsigned long) sp->rxd_map); 119 printk(KERN_INFO "RFDListPtr register = %8.8x%8.8x\n", 120 ipg_r32(IPG_RFDLISTPTR1), ipg_r32(IPG_RFDLISTPTR0)); 121 122 for (i = 0; i < IPG_RFDLIST_LENGTH; i++) { 123 offset = (u32) &sp->rxd[i].next_desc - (u32) sp->rxd; 124 printk(KERN_INFO "%2.2x %4.4x RFDNextPtr = %16.16lx\n", i, 125 offset, (unsigned long) sp->rxd[i].next_desc); 126 offset = (u32) &sp->rxd[i].rfs - (u32) sp->rxd; 127 printk(KERN_INFO "%2.2x %4.4x RFS = %16.16lx\n", i, 128 offset, (unsigned long) sp->rxd[i].rfs); 129 offset = (u32) &sp->rxd[i].frag_info - (u32) sp->rxd; 130 printk(KERN_INFO "%2.2x %4.4x frag_info = %16.16lx\n", i, 131 offset, (unsigned long) sp->rxd[i].frag_info); 132 } 133} 134 135static void ipg_dump_tfdlist(struct net_device *dev) 136{ 137 struct ipg_nic_private *sp = netdev_priv(dev); 138 void __iomem *ioaddr = sp->ioaddr; 139 unsigned int i; 140 u32 offset; 141 142 IPG_DEBUG_MSG("_dump_tfdlist\n"); 143 144 printk(KERN_INFO "tx_current = %2.2x\n", sp->tx_current); 145 printk(KERN_INFO "tx_dirty = %2.2x\n", sp->tx_dirty); 146 printk(KERN_INFO "TFDList start address = %16.16lx\n", 147 (unsigned long) sp->txd_map); 148 printk(KERN_INFO "TFDListPtr register = %8.8x%8.8x\n", 149 ipg_r32(IPG_TFDLISTPTR1), ipg_r32(IPG_TFDLISTPTR0)); 150 151 for (i = 0; i < IPG_TFDLIST_LENGTH; i++) { 152 offset = (u32) &sp->txd[i].next_desc - (u32) sp->txd; 153 printk(KERN_INFO "%2.2x %4.4x TFDNextPtr = %16.16lx\n", i, 154 offset, (unsigned long) sp->txd[i].next_desc); 155 156 offset = (u32) &sp->txd[i].tfc - (u32) sp->txd; 157 printk(KERN_INFO "%2.2x %4.4x TFC = %16.16lx\n", i, 158 offset, (unsigned long) sp->txd[i].tfc); 159 offset = (u32) &sp->txd[i].frag_info - (u32) sp->txd; 160 printk(KERN_INFO "%2.2x %4.4x frag_info = %16.16lx\n", i, 161 offset, (unsigned long) sp->txd[i].frag_info); 162 } 163} 164#endif 165 166static void ipg_write_phy_ctl(void __iomem *ioaddr, u8 data) 167{ 168 ipg_w8(IPG_PC_RSVD_MASK & data, PHY_CTRL); 169 ndelay(IPG_PC_PHYCTRLWAIT_NS); 170} 171 172static void ipg_drive_phy_ctl_low_high(void __iomem *ioaddr, u8 data) 173{ 174 ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | data); 175 ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | data); 176} 177 178static void send_three_state(void __iomem *ioaddr, u8 phyctrlpolarity) 179{ 180 phyctrlpolarity |= (IPG_PC_MGMTDATA & 0) | IPG_PC_MGMTDIR; 181 182 ipg_drive_phy_ctl_low_high(ioaddr, phyctrlpolarity); 183} 184 185static void send_end(void __iomem *ioaddr, u8 phyctrlpolarity) 186{ 187 ipg_w8((IPG_PC_MGMTCLK_LO | (IPG_PC_MGMTDATA & 0) | IPG_PC_MGMTDIR | 188 phyctrlpolarity) & IPG_PC_RSVD_MASK, PHY_CTRL); 189} 190 191static u16 read_phy_bit(void __iomem * ioaddr, u8 phyctrlpolarity) 192{ 193 u16 bit_data; 194 195 ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | phyctrlpolarity); 196 197 bit_data = ((ipg_r8(PHY_CTRL) & IPG_PC_MGMTDATA) >> 1) & 1; 198 199 ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | phyctrlpolarity); 200 201 return bit_data; 202} 203 204/* 205 * Read a register from the Physical Layer device located 206 * on the IPG NIC, using the IPG PHYCTRL register. 207 */ 208static int mdio_read(struct net_device * dev, int phy_id, int phy_reg) 209{ 210 void __iomem *ioaddr = ipg_ioaddr(dev); 211 /* 212 * The GMII mangement frame structure for a read is as follows: 213 * 214 * |Preamble|st|op|phyad|regad|ta| data |idle| 215 * |< 32 1s>|01|10|AAAAA|RRRRR|z0|DDDDDDDDDDDDDDDD|z | 216 * 217 * <32 1s> = 32 consecutive logic 1 values 218 * A = bit of Physical Layer device address (MSB first) 219 * R = bit of register address (MSB first) 220 * z = High impedance state 221 * D = bit of read data (MSB first) 222 * 223 * Transmission order is 'Preamble' field first, bits transmitted 224 * left to right (first to last). 225 */ 226 struct { 227 u32 field; 228 unsigned int len; 229 } p[] = { 230 { GMII_PREAMBLE, 32 }, /* Preamble */ 231 { GMII_ST, 2 }, /* ST */ 232 { GMII_READ, 2 }, /* OP */ 233 { phy_id, 5 }, /* PHYAD */ 234 { phy_reg, 5 }, /* REGAD */ 235 { 0x0000, 2 }, /* TA */ 236 { 0x0000, 16 }, /* DATA */ 237 { 0x0000, 1 } /* IDLE */ 238 }; 239 unsigned int i, j; 240 u8 polarity, data; 241 242 polarity = ipg_r8(PHY_CTRL); 243 polarity &= (IPG_PC_DUPLEX_POLARITY | IPG_PC_LINK_POLARITY); 244 245 /* Create the Preamble, ST, OP, PHYAD, and REGAD field. */ 246 for (j = 0; j < 5; j++) { 247 for (i = 0; i < p[j].len; i++) { 248 /* For each variable length field, the MSB must be 249 * transmitted first. Rotate through the field bits, 250 * starting with the MSB, and move each bit into the 251 * the 1st (2^1) bit position (this is the bit position 252 * corresponding to the MgmtData bit of the PhyCtrl 253 * register for the IPG). 254 * 255 * Example: ST = 01; 256 * 257 * First write a '0' to bit 1 of the PhyCtrl 258 * register, then write a '1' to bit 1 of the 259 * PhyCtrl register. 260 * 261 * To do this, right shift the MSB of ST by the value: 262 * [field length - 1 - #ST bits already written] 263 * then left shift this result by 1. 264 */ 265 data = (p[j].field >> (p[j].len - 1 - i)) << 1; 266 data &= IPG_PC_MGMTDATA; 267 data |= polarity | IPG_PC_MGMTDIR; 268 269 ipg_drive_phy_ctl_low_high(ioaddr, data); 270 } 271 } 272 273 send_three_state(ioaddr, polarity); 274 275 read_phy_bit(ioaddr, polarity); 276 277 /* 278 * For a read cycle, the bits for the next two fields (TA and 279 * DATA) are driven by the PHY (the IPG reads these bits). 280 */ 281 for (i = 0; i < p[6].len; i++) { 282 p[6].field |= 283 (read_phy_bit(ioaddr, polarity) << (p[6].len - 1 - i)); 284 } 285 286 send_three_state(ioaddr, polarity); 287 send_three_state(ioaddr, polarity); 288 send_three_state(ioaddr, polarity); 289 send_end(ioaddr, polarity); 290 291 /* Return the value of the DATA field. */ 292 return p[6].field; 293} 294 295/* 296 * Write to a register from the Physical Layer device located 297 * on the IPG NIC, using the IPG PHYCTRL register. 298 */ 299static void mdio_write(struct net_device *dev, int phy_id, int phy_reg, int val) 300{ 301 void __iomem *ioaddr = ipg_ioaddr(dev); 302 /* 303 * The GMII mangement frame structure for a read is as follows: 304 * 305 * |Preamble|st|op|phyad|regad|ta| data |idle| 306 * |< 32 1s>|01|10|AAAAA|RRRRR|z0|DDDDDDDDDDDDDDDD|z | 307 * 308 * <32 1s> = 32 consecutive logic 1 values 309 * A = bit of Physical Layer device address (MSB first) 310 * R = bit of register address (MSB first) 311 * z = High impedance state 312 * D = bit of write data (MSB first) 313 * 314 * Transmission order is 'Preamble' field first, bits transmitted 315 * left to right (first to last). 316 */ 317 struct { 318 u32 field; 319 unsigned int len; 320 } p[] = { 321 { GMII_PREAMBLE, 32 }, /* Preamble */ 322 { GMII_ST, 2 }, /* ST */ 323 { GMII_WRITE, 2 }, /* OP */ 324 { phy_id, 5 }, /* PHYAD */ 325 { phy_reg, 5 }, /* REGAD */ 326 { 0x0002, 2 }, /* TA */ 327 { val & 0xffff, 16 }, /* DATA */ 328 { 0x0000, 1 } /* IDLE */ 329 }; 330 unsigned int i, j; 331 u8 polarity, data; 332 333 polarity = ipg_r8(PHY_CTRL); 334 polarity &= (IPG_PC_DUPLEX_POLARITY | IPG_PC_LINK_POLARITY); 335 336 /* Create the Preamble, ST, OP, PHYAD, and REGAD field. */ 337 for (j = 0; j < 7; j++) { 338 for (i = 0; i < p[j].len; i++) { 339 /* For each variable length field, the MSB must be 340 * transmitted first. Rotate through the field bits, 341 * starting with the MSB, and move each bit into the 342 * the 1st (2^1) bit position (this is the bit position 343 * corresponding to the MgmtData bit of the PhyCtrl 344 * register for the IPG). 345 * 346 * Example: ST = 01; 347 * 348 * First write a '0' to bit 1 of the PhyCtrl 349 * register, then write a '1' to bit 1 of the 350 * PhyCtrl register. 351 * 352 * To do this, right shift the MSB of ST by the value: 353 * [field length - 1 - #ST bits already written] 354 * then left shift this result by 1. 355 */ 356 data = (p[j].field >> (p[j].len - 1 - i)) << 1; 357 data &= IPG_PC_MGMTDATA; 358 data |= polarity | IPG_PC_MGMTDIR; 359 360 ipg_drive_phy_ctl_low_high(ioaddr, data); 361 } 362 } 363 364 /* The last cycle is a tri-state, so read from the PHY. */ 365 for (j = 7; j < 8; j++) { 366 for (i = 0; i < p[j].len; i++) { 367 ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | polarity); 368 369 p[j].field |= ((ipg_r8(PHY_CTRL) & 370 IPG_PC_MGMTDATA) >> 1) << (p[j].len - 1 - i); 371 372 ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | polarity); 373 } 374 } 375} 376 377/* Set LED_Mode JES20040127EEPROM */ 378static void ipg_set_led_mode(struct net_device *dev) 379{ 380 struct ipg_nic_private *sp = netdev_priv(dev); 381 void __iomem *ioaddr = sp->ioaddr; 382 u32 mode; 383 384 mode = ipg_r32(ASIC_CTRL); 385 mode &= ~(IPG_AC_LED_MODE_BIT_1 | IPG_AC_LED_MODE | IPG_AC_LED_SPEED); 386 387 if ((sp->LED_Mode & 0x03) > 1) 388 mode |= IPG_AC_LED_MODE_BIT_1; /* Write Asic Control Bit 29 */ 389 390 if ((sp->LED_Mode & 0x01) == 1) 391 mode |= IPG_AC_LED_MODE; /* Write Asic Control Bit 14 */ 392 393 if ((sp->LED_Mode & 0x08) == 8) 394 mode |= IPG_AC_LED_SPEED; /* Write Asic Control Bit 27 */ 395 396 ipg_w32(mode, ASIC_CTRL); 397} 398 399/* Set PHYSet JES20040127EEPROM */ 400static void ipg_set_phy_set(struct net_device *dev) 401{ 402 struct ipg_nic_private *sp = netdev_priv(dev); 403 void __iomem *ioaddr = sp->ioaddr; 404 int physet; 405 406 physet = ipg_r8(PHY_SET); 407 physet &= ~(IPG_PS_MEM_LENB9B | IPG_PS_MEM_LEN9 | IPG_PS_NON_COMPDET); 408 physet |= ((sp->LED_Mode & 0x70) >> 4); 409 ipg_w8(physet, PHY_SET); 410} 411 412static int ipg_reset(struct net_device *dev, u32 resetflags) 413{ 414 /* Assert functional resets via the IPG AsicCtrl 415 * register as specified by the 'resetflags' input 416 * parameter. 417 */ 418 void __iomem *ioaddr = ipg_ioaddr(dev); //JES20040127EEPROM: 419 unsigned int timeout_count = 0; 420 421 IPG_DEBUG_MSG("_reset\n"); 422 423 ipg_w32(ipg_r32(ASIC_CTRL) | resetflags, ASIC_CTRL); 424 425 /* Delay added to account for problem with 10Mbps reset. */ 426 mdelay(IPG_AC_RESETWAIT); 427 428 while (IPG_AC_RESET_BUSY & ipg_r32(ASIC_CTRL)) { 429 mdelay(IPG_AC_RESETWAIT); 430 if (++timeout_count > IPG_AC_RESET_TIMEOUT) 431 return -ETIME; 432 } 433 /* Set LED Mode in Asic Control JES20040127EEPROM */ 434 ipg_set_led_mode(dev); 435 436 /* Set PHYSet Register Value JES20040127EEPROM */ 437 ipg_set_phy_set(dev); 438 return 0; 439} 440 441/* Find the GMII PHY address. */ 442static int ipg_find_phyaddr(struct net_device *dev) 443{ 444 unsigned int phyaddr, i; 445 446 for (i = 0; i < 32; i++) { 447 u32 status; 448 449 /* Search for the correct PHY address among 32 possible. */ 450 phyaddr = (IPG_NIC_PHY_ADDRESS + i) % 32; 451 452 /* 10/22/03 Grace change verify from GMII_PHY_STATUS to 453 GMII_PHY_ID1 454 */ 455 456 status = mdio_read(dev, phyaddr, MII_BMSR); 457 458 if ((status != 0xFFFF) && (status != 0)) 459 return phyaddr; 460 } 461 462 return 0x1f; 463} 464 465/* 466 * Configure IPG based on result of IEEE 802.3 PHY 467 * auto-negotiation. 468 */ 469static int ipg_config_autoneg(struct net_device *dev) 470{ 471 struct ipg_nic_private *sp = netdev_priv(dev); 472 void __iomem *ioaddr = sp->ioaddr; 473 unsigned int txflowcontrol; 474 unsigned int rxflowcontrol; 475 unsigned int fullduplex; 476 unsigned int gig; 477 u32 mac_ctrl_val; 478 u32 asicctrl; 479 u8 phyctrl; 480 481 IPG_DEBUG_MSG("_config_autoneg\n"); 482 483 asicctrl = ipg_r32(ASIC_CTRL); 484 phyctrl = ipg_r8(PHY_CTRL); 485 mac_ctrl_val = ipg_r32(MAC_CTRL); 486 487 /* Set flags for use in resolving auto-negotation, assuming 488 * non-1000Mbps, half duplex, no flow control. 489 */ 490 fullduplex = 0; 491 txflowcontrol = 0; 492 rxflowcontrol = 0; 493 gig = 0; 494 495 /* To accomodate a problem in 10Mbps operation, 496 * set a global flag if PHY running in 10Mbps mode. 497 */ 498 sp->tenmbpsmode = 0; 499 500 printk(KERN_INFO "%s: Link speed = ", dev->name); 501 502 /* Determine actual speed of operation. */ 503 switch (phyctrl & IPG_PC_LINK_SPEED) { 504 case IPG_PC_LINK_SPEED_10MBPS: 505 printk("10Mbps.\n"); 506 printk(KERN_INFO "%s: 10Mbps operational mode enabled.\n", 507 dev->name); 508 sp->tenmbpsmode = 1; 509 break; 510 case IPG_PC_LINK_SPEED_100MBPS: 511 printk("100Mbps.\n"); 512 break; 513 case IPG_PC_LINK_SPEED_1000MBPS: 514 printk("1000Mbps.\n"); 515 gig = 1; 516 break; 517 default: 518 printk("undefined!\n"); 519 return 0; 520 } 521 522 if (phyctrl & IPG_PC_DUPLEX_STATUS) { 523 fullduplex = 1; 524 txflowcontrol = 1; 525 rxflowcontrol = 1; 526 } 527 528 /* Configure full duplex, and flow control. */ 529 if (fullduplex == 1) { 530 /* Configure IPG for full duplex operation. */ 531 printk(KERN_INFO "%s: setting full duplex, ", dev->name); 532 533 mac_ctrl_val |= IPG_MC_DUPLEX_SELECT_FD; 534 535 if (txflowcontrol == 1) { 536 printk("TX flow control"); 537 mac_ctrl_val |= IPG_MC_TX_FLOW_CONTROL_ENABLE; 538 } else { 539 printk("no TX flow control"); 540 mac_ctrl_val &= ~IPG_MC_TX_FLOW_CONTROL_ENABLE; 541 } 542 543 if (rxflowcontrol == 1) { 544 printk(", RX flow control."); 545 mac_ctrl_val |= IPG_MC_RX_FLOW_CONTROL_ENABLE; 546 } else { 547 printk(", no RX flow control."); 548 mac_ctrl_val &= ~IPG_MC_RX_FLOW_CONTROL_ENABLE; 549 } 550 551 printk("\n"); 552 } else { 553 /* Configure IPG for half duplex operation. */ 554 printk(KERN_INFO "%s: setting half duplex, " 555 "no TX flow control, no RX flow control.\n", dev->name); 556 557 mac_ctrl_val &= ~IPG_MC_DUPLEX_SELECT_FD & 558 ~IPG_MC_TX_FLOW_CONTROL_ENABLE & 559 ~IPG_MC_RX_FLOW_CONTROL_ENABLE; 560 } 561 ipg_w32(mac_ctrl_val, MAC_CTRL); 562 return 0; 563} 564 565/* Determine and configure multicast operation and set 566 * receive mode for IPG. 567 */ 568static void ipg_nic_set_multicast_list(struct net_device *dev) 569{ 570 void __iomem *ioaddr = ipg_ioaddr(dev); 571 struct dev_mc_list *mc_list_ptr; 572 unsigned int hashindex; 573 u32 hashtable[2]; 574 u8 receivemode; 575 576 IPG_DEBUG_MSG("_nic_set_multicast_list\n"); 577 578 receivemode = IPG_RM_RECEIVEUNICAST | IPG_RM_RECEIVEBROADCAST; 579 580 if (dev->flags & IFF_PROMISC) { 581 /* NIC to be configured in promiscuous mode. */ 582 receivemode = IPG_RM_RECEIVEALLFRAMES; 583 } else if ((dev->flags & IFF_ALLMULTI) || 584 (dev->flags & IFF_MULTICAST & 585 (dev->mc_count > IPG_MULTICAST_HASHTABLE_SIZE))) { 586 /* NIC to be configured to receive all multicast 587 * frames. */ 588 receivemode |= IPG_RM_RECEIVEMULTICAST; 589 } else if (dev->flags & IFF_MULTICAST & (dev->mc_count > 0)) { 590 /* NIC to be configured to receive selected 591 * multicast addresses. */ 592 receivemode |= IPG_RM_RECEIVEMULTICASTHASH; 593 } 594 595 /* Calculate the bits to set for the 64 bit, IPG HASHTABLE. 596 * The IPG applies a cyclic-redundancy-check (the same CRC 597 * used to calculate the frame data FCS) to the destination 598 * address all incoming multicast frames whose destination 599 * address has the multicast bit set. The least significant 600 * 6 bits of the CRC result are used as an addressing index 601 * into the hash table. If the value of the bit addressed by 602 * this index is a 1, the frame is passed to the host system. 603 */ 604 605 /* Clear hashtable. */ 606 hashtable[0] = 0x00000000; 607 hashtable[1] = 0x00000000; 608 609 /* Cycle through all multicast addresses to filter. */ 610 for (mc_list_ptr = dev->mc_list; 611 mc_list_ptr != NULL; mc_list_ptr = mc_list_ptr->next) { 612 /* Calculate CRC result for each multicast address. */ 613 hashindex = crc32_le(0xffffffff, mc_list_ptr->dmi_addr, 614 ETH_ALEN); 615 616 /* Use only the least significant 6 bits. */ 617 hashindex = hashindex & 0x3F; 618 619 /* Within "hashtable", set bit number "hashindex" 620 * to a logic 1. 621 */ 622 set_bit(hashindex, (void *)hashtable); 623 } 624 625 /* Write the value of the hashtable, to the 4, 16 bit 626 * HASHTABLE IPG registers. 627 */ 628 ipg_w32(hashtable[0], HASHTABLE_0); 629 ipg_w32(hashtable[1], HASHTABLE_1); 630 631 ipg_w8(IPG_RM_RSVD_MASK & receivemode, RECEIVE_MODE); 632 633 IPG_DEBUG_MSG("ReceiveMode = %x\n", ipg_r8(RECEIVE_MODE)); 634} 635 636static int ipg_io_config(struct net_device *dev) 637{ 638 void __iomem *ioaddr = ipg_ioaddr(dev); 639 u32 origmacctrl; 640 u32 restoremacctrl; 641 642 IPG_DEBUG_MSG("_io_config\n"); 643 644 origmacctrl = ipg_r32(MAC_CTRL); 645 646 restoremacctrl = origmacctrl | IPG_MC_STATISTICS_ENABLE; 647 648 /* Based on compilation option, determine if FCS is to be 649 * stripped on receive frames by IPG. 650 */ 651 if (!IPG_STRIP_FCS_ON_RX) 652 restoremacctrl |= IPG_MC_RCV_FCS; 653 654 /* Determine if transmitter and/or receiver are 655 * enabled so we may restore MACCTRL correctly. 656 */ 657 if (origmacctrl & IPG_MC_TX_ENABLED) 658 restoremacctrl |= IPG_MC_TX_ENABLE; 659 660 if (origmacctrl & IPG_MC_RX_ENABLED) 661 restoremacctrl |= IPG_MC_RX_ENABLE; 662 663 /* Transmitter and receiver must be disabled before setting 664 * IFSSelect. 665 */ 666 ipg_w32((origmacctrl & (IPG_MC_RX_DISABLE | IPG_MC_TX_DISABLE)) & 667 IPG_MC_RSVD_MASK, MAC_CTRL); 668 669 /* Now that transmitter and receiver are disabled, write 670 * to IFSSelect. 671 */ 672 ipg_w32((origmacctrl & IPG_MC_IFS_96BIT) & IPG_MC_RSVD_MASK, MAC_CTRL); 673 674 /* Set RECEIVEMODE register. */ 675 ipg_nic_set_multicast_list(dev); 676 677 ipg_w16(IPG_MAX_RXFRAME_SIZE, MAX_FRAME_SIZE); 678 679 ipg_w8(IPG_RXDMAPOLLPERIOD_VALUE, RX_DMA_POLL_PERIOD); 680 ipg_w8(IPG_RXDMAURGENTTHRESH_VALUE, RX_DMA_URGENT_THRESH); 681 ipg_w8(IPG_RXDMABURSTTHRESH_VALUE, RX_DMA_BURST_THRESH); 682 ipg_w8(IPG_TXDMAPOLLPERIOD_VALUE, TX_DMA_POLL_PERIOD); 683 ipg_w8(IPG_TXDMAURGENTTHRESH_VALUE, TX_DMA_URGENT_THRESH); 684 ipg_w8(IPG_TXDMABURSTTHRESH_VALUE, TX_DMA_BURST_THRESH); 685 ipg_w16((IPG_IE_HOST_ERROR | IPG_IE_TX_DMA_COMPLETE | 686 IPG_IE_TX_COMPLETE | IPG_IE_INT_REQUESTED | 687 IPG_IE_UPDATE_STATS | IPG_IE_LINK_EVENT | 688 IPG_IE_RX_DMA_COMPLETE | IPG_IE_RX_DMA_PRIORITY), INT_ENABLE); 689 ipg_w16(IPG_FLOWONTHRESH_VALUE, FLOW_ON_THRESH); 690 ipg_w16(IPG_FLOWOFFTHRESH_VALUE, FLOW_OFF_THRESH); 691 692 /* IPG multi-frag frame bug workaround. 693 * Per silicon revision B3 eratta. 694 */ 695 ipg_w16(ipg_r16(DEBUG_CTRL) | 0x0200, DEBUG_CTRL); 696 697 /* IPG TX poll now bug workaround. 698 * Per silicon revision B3 eratta. 699 */ 700 ipg_w16(ipg_r16(DEBUG_CTRL) | 0x0010, DEBUG_CTRL); 701 702 /* IPG RX poll now bug workaround. 703 * Per silicon revision B3 eratta. 704 */ 705 ipg_w16(ipg_r16(DEBUG_CTRL) | 0x0020, DEBUG_CTRL); 706 707 /* Now restore MACCTRL to original setting. */ 708 ipg_w32(IPG_MC_RSVD_MASK & restoremacctrl, MAC_CTRL); 709 710 /* Disable unused RMON statistics. */ 711 ipg_w32(IPG_RZ_ALL, RMON_STATISTICS_MASK); 712 713 /* Disable unused MIB statistics. */ 714 ipg_w32(IPG_SM_MACCONTROLFRAMESXMTD | IPG_SM_MACCONTROLFRAMESRCVD | 715 IPG_SM_BCSTOCTETXMTOK_BCSTFRAMESXMTDOK | IPG_SM_TXJUMBOFRAMES | 716 IPG_SM_MCSTOCTETXMTOK_MCSTFRAMESXMTDOK | IPG_SM_RXJUMBOFRAMES | 717 IPG_SM_BCSTOCTETRCVDOK_BCSTFRAMESRCVDOK | 718 IPG_SM_UDPCHECKSUMERRORS | IPG_SM_TCPCHECKSUMERRORS | 719 IPG_SM_IPCHECKSUMERRORS, STATISTICS_MASK); 720 721 return 0; 722} 723 724/* 725 * Create a receive buffer within system memory and update 726 * NIC private structure appropriately. 727 */ 728static int ipg_get_rxbuff(struct net_device *dev, int entry) 729{ 730 struct ipg_nic_private *sp = netdev_priv(dev); 731 struct ipg_rx *rxfd = sp->rxd + entry; 732 struct sk_buff *skb; 733 u64 rxfragsize; 734 735 IPG_DEBUG_MSG("_get_rxbuff\n"); 736 737 skb = netdev_alloc_skb(dev, IPG_RXSUPPORT_SIZE + NET_IP_ALIGN); 738 if (!skb) { 739 sp->RxBuff[entry] = NULL; 740 return -ENOMEM; 741 } 742 743 /* Adjust the data start location within the buffer to 744 * align IP address field to a 16 byte boundary. 745 */ 746 skb_reserve(skb, NET_IP_ALIGN); 747 748 /* Associate the receive buffer with the IPG NIC. */ 749 skb->dev = dev; 750 751 /* Save the address of the sk_buff structure. */ 752 sp->RxBuff[entry] = skb; 753 754 rxfd->frag_info = cpu_to_le64(pci_map_single(sp->pdev, skb->data, 755 sp->rx_buf_sz, PCI_DMA_FROMDEVICE)); 756 757 /* Set the RFD fragment length. */ 758 rxfragsize = IPG_RXFRAG_SIZE; 759 rxfd->frag_info |= cpu_to_le64((rxfragsize << 48) & IPG_RFI_FRAGLEN); 760 761 return 0; 762} 763 764static int init_rfdlist(struct net_device *dev) 765{ 766 struct ipg_nic_private *sp = netdev_priv(dev); 767 void __iomem *ioaddr = sp->ioaddr; 768 unsigned int i; 769 770 IPG_DEBUG_MSG("_init_rfdlist\n"); 771 772 for (i = 0; i < IPG_RFDLIST_LENGTH; i++) { 773 struct ipg_rx *rxfd = sp->rxd + i; 774 775 if (sp->RxBuff[i]) { 776 pci_unmap_single(sp->pdev, 777 le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN, 778 sp->rx_buf_sz, PCI_DMA_FROMDEVICE); 779 IPG_DEV_KFREE_SKB(sp->RxBuff[i]); 780 sp->RxBuff[i] = NULL; 781 } 782 783 /* Clear out the RFS field. */ 784 rxfd->rfs = 0x0000000000000000; 785 786 if (ipg_get_rxbuff(dev, i) < 0) { 787 /* 788 * A receive buffer was not ready, break the 789 * RFD list here. 790 */ 791 IPG_DEBUG_MSG("Cannot allocate Rx buffer.\n"); 792 793 /* Just in case we cannot allocate a single RFD. 794 * Should not occur. 795 */ 796 if (i == 0) { 797 printk(KERN_ERR "%s: No memory available" 798 " for RFD list.\n", dev->name); 799 return -ENOMEM; 800 } 801 } 802 803 rxfd->next_desc = cpu_to_le64(sp->rxd_map + 804 sizeof(struct ipg_rx)*(i + 1)); 805 } 806 sp->rxd[i - 1].next_desc = cpu_to_le64(sp->rxd_map); 807 808 sp->rx_current = 0; 809 sp->rx_dirty = 0; 810 811 /* Write the location of the RFDList to the IPG. */ 812 ipg_w32((u32) sp->rxd_map, RFD_LIST_PTR_0); 813 ipg_w32(0x00000000, RFD_LIST_PTR_1); 814 815 return 0; 816} 817 818static void init_tfdlist(struct net_device *dev) 819{ 820 struct ipg_nic_private *sp = netdev_priv(dev); 821 void __iomem *ioaddr = sp->ioaddr; 822 unsigned int i; 823 824 IPG_DEBUG_MSG("_init_tfdlist\n"); 825 826 for (i = 0; i < IPG_TFDLIST_LENGTH; i++) { 827 struct ipg_tx *txfd = sp->txd + i; 828 829 txfd->tfc = cpu_to_le64(IPG_TFC_TFDDONE); 830 831 if (sp->TxBuff[i]) { 832 IPG_DEV_KFREE_SKB(sp->TxBuff[i]); 833 sp->TxBuff[i] = NULL; 834 } 835 836 txfd->next_desc = cpu_to_le64(sp->txd_map + 837 sizeof(struct ipg_tx)*(i + 1)); 838 } 839 sp->txd[i - 1].next_desc = cpu_to_le64(sp->txd_map); 840 841 sp->tx_current = 0; 842 sp->tx_dirty = 0; 843 844 /* Write the location of the TFDList to the IPG. */ 845 IPG_DDEBUG_MSG("Starting TFDListPtr = %8.8x\n", 846 (u32) sp->txd_map); 847 ipg_w32((u32) sp->txd_map, TFD_LIST_PTR_0); 848 ipg_w32(0x00000000, TFD_LIST_PTR_1); 849 850 sp->ResetCurrentTFD = 1; 851} 852 853/* 854 * Free all transmit buffers which have already been transfered 855 * via DMA to the IPG. 856 */ 857static void ipg_nic_txfree(struct net_device *dev) 858{ 859 struct ipg_nic_private *sp = netdev_priv(dev); 860 unsigned int released, pending, dirty; 861 862 IPG_DEBUG_MSG("_nic_txfree\n"); 863 864 pending = sp->tx_current - sp->tx_dirty; 865 dirty = sp->tx_dirty % IPG_TFDLIST_LENGTH; 866 867 for (released = 0; released < pending; released++) { 868 struct sk_buff *skb = sp->TxBuff[dirty]; 869 struct ipg_tx *txfd = sp->txd + dirty; 870 871 IPG_DEBUG_MSG("TFC = %16.16lx\n", (unsigned long) txfd->tfc); 872 873 /* Look at each TFD's TFC field beginning 874 * at the last freed TFD up to the current TFD. 875 * If the TFDDone bit is set, free the associated 876 * buffer. 877 */ 878 if (!(txfd->tfc & cpu_to_le64(IPG_TFC_TFDDONE))) 879 break; 880 881 /* Free the transmit buffer. */ 882 if (skb) { 883 pci_unmap_single(sp->pdev, 884 le64_to_cpu(txfd->frag_info) & ~IPG_TFI_FRAGLEN, 885 skb->len, PCI_DMA_TODEVICE); 886 887 IPG_DEV_KFREE_SKB(skb); 888 889 sp->TxBuff[dirty] = NULL; 890 } 891 dirty = (dirty + 1) % IPG_TFDLIST_LENGTH; 892 } 893 894 sp->tx_dirty += released; 895 896 if (netif_queue_stopped(dev) && 897 (sp->tx_current != (sp->tx_dirty + IPG_TFDLIST_LENGTH))) { 898 netif_wake_queue(dev); 899 } 900} 901 902static void ipg_tx_timeout(struct net_device *dev) 903{ 904 struct ipg_nic_private *sp = netdev_priv(dev); 905 void __iomem *ioaddr = sp->ioaddr; 906 907 ipg_reset(dev, IPG_AC_TX_RESET | IPG_AC_DMA | IPG_AC_NETWORK | 908 IPG_AC_FIFO); 909 910 spin_lock_irq(&sp->lock); 911 912 /* Re-configure after DMA reset. */ 913 if (ipg_io_config(dev) < 0) { 914 printk(KERN_INFO "%s: Error during re-configuration.\n", 915 dev->name); 916 } 917 918 init_tfdlist(dev); 919 920 spin_unlock_irq(&sp->lock); 921 922 ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) & IPG_MC_RSVD_MASK, 923 MAC_CTRL); 924} 925 926/* 927 * For TxComplete interrupts, free all transmit 928 * buffers which have already been transfered via DMA 929 * to the IPG. 930 */ 931static void ipg_nic_txcleanup(struct net_device *dev) 932{ 933 struct ipg_nic_private *sp = netdev_priv(dev); 934 void __iomem *ioaddr = sp->ioaddr; 935 unsigned int i; 936 937 IPG_DEBUG_MSG("_nic_txcleanup\n"); 938 939 for (i = 0; i < IPG_TFDLIST_LENGTH; i++) { 940 /* Reading the TXSTATUS register clears the 941 * TX_COMPLETE interrupt. 942 */ 943 u32 txstatusdword = ipg_r32(TX_STATUS); 944 945 IPG_DEBUG_MSG("TxStatus = %8.8x\n", txstatusdword); 946 947 /* Check for Transmit errors. Error bits only valid if 948 * TX_COMPLETE bit in the TXSTATUS register is a 1. 949 */ 950 if (!(txstatusdword & IPG_TS_TX_COMPLETE)) 951 break; 952 953 /* If in 10Mbps mode, indicate transmit is ready. */ 954 if (sp->tenmbpsmode) { 955 netif_wake_queue(dev); 956 } 957 958 /* Transmit error, increment stat counters. */ 959 if (txstatusdword & IPG_TS_TX_ERROR) { 960 IPG_DEBUG_MSG("Transmit error.\n"); 961 sp->stats.tx_errors++; 962 } 963 964 /* Late collision, re-enable transmitter. */ 965 if (txstatusdword & IPG_TS_LATE_COLLISION) { 966 IPG_DEBUG_MSG("Late collision on transmit.\n"); 967 ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) & 968 IPG_MC_RSVD_MASK, MAC_CTRL); 969 } 970 971 /* Maximum collisions, re-enable transmitter. */ 972 if (txstatusdword & IPG_TS_TX_MAX_COLL) { 973 IPG_DEBUG_MSG("Maximum collisions on transmit.\n"); 974 ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) & 975 IPG_MC_RSVD_MASK, MAC_CTRL); 976 } 977 978 /* Transmit underrun, reset and re-enable 979 * transmitter. 980 */ 981 if (txstatusdword & IPG_TS_TX_UNDERRUN) { 982 IPG_DEBUG_MSG("Transmitter underrun.\n"); 983 sp->stats.tx_fifo_errors++; 984 ipg_reset(dev, IPG_AC_TX_RESET | IPG_AC_DMA | 985 IPG_AC_NETWORK | IPG_AC_FIFO); 986 987 /* Re-configure after DMA reset. */ 988 if (ipg_io_config(dev) < 0) { 989 printk(KERN_INFO 990 "%s: Error during re-configuration.\n", 991 dev->name); 992 } 993 init_tfdlist(dev); 994 995 ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) & 996 IPG_MC_RSVD_MASK, MAC_CTRL); 997 } 998 } 999 1000 ipg_nic_txfree(dev); 1001} 1002 1003/* Provides statistical information about the IPG NIC. */ 1004static struct net_device_stats *ipg_nic_get_stats(struct net_device *dev) 1005{ 1006 struct ipg_nic_private *sp = netdev_priv(dev); 1007 void __iomem *ioaddr = sp->ioaddr; 1008 u16 temp1; 1009 u16 temp2; 1010 1011 IPG_DEBUG_MSG("_nic_get_stats\n"); 1012 1013 /* Check to see if the NIC has been initialized via nic_open, 1014 * before trying to read statistic registers. 1015 */ 1016 if (!test_bit(__LINK_STATE_START, &dev->state)) 1017 return &sp->stats; 1018 1019 sp->stats.rx_packets += ipg_r32(IPG_FRAMESRCVDOK); 1020 sp->stats.tx_packets += ipg_r32(IPG_FRAMESXMTDOK); 1021 sp->stats.rx_bytes += ipg_r32(IPG_OCTETRCVOK); 1022 sp->stats.tx_bytes += ipg_r32(IPG_OCTETXMTOK); 1023 temp1 = ipg_r16(IPG_FRAMESLOSTRXERRORS); 1024 sp->stats.rx_errors += temp1; 1025 sp->stats.rx_missed_errors += temp1; 1026 temp1 = ipg_r32(IPG_SINGLECOLFRAMES) + ipg_r32(IPG_MULTICOLFRAMES) + 1027 ipg_r32(IPG_LATECOLLISIONS); 1028 temp2 = ipg_r16(IPG_CARRIERSENSEERRORS); 1029 sp->stats.collisions += temp1; 1030 sp->stats.tx_dropped += ipg_r16(IPG_FRAMESABORTXSCOLLS); 1031 sp->stats.tx_errors += ipg_r16(IPG_FRAMESWEXDEFERRAL) + 1032 ipg_r32(IPG_FRAMESWDEFERREDXMT) + temp1 + temp2; 1033 sp->stats.multicast += ipg_r32(IPG_MCSTOCTETRCVDOK); 1034 1035 /* detailed tx_errors */ 1036 sp->stats.tx_carrier_errors += temp2; 1037 1038 /* detailed rx_errors */ 1039 sp->stats.rx_length_errors += ipg_r16(IPG_INRANGELENGTHERRORS) + 1040 ipg_r16(IPG_FRAMETOOLONGERRRORS); 1041 sp->stats.rx_crc_errors += ipg_r16(IPG_FRAMECHECKSEQERRORS); 1042 1043 /* Unutilized IPG statistic registers. */ 1044 ipg_r32(IPG_MCSTFRAMESRCVDOK); 1045 1046 return &sp->stats; 1047} 1048 1049/* Restore used receive buffers. */ 1050static int ipg_nic_rxrestore(struct net_device *dev) 1051{ 1052 struct ipg_nic_private *sp = netdev_priv(dev); 1053 const unsigned int curr = sp->rx_current; 1054 unsigned int dirty = sp->rx_dirty; 1055 1056 IPG_DEBUG_MSG("_nic_rxrestore\n"); 1057 1058 for (dirty = sp->rx_dirty; curr - dirty > 0; dirty++) { 1059 unsigned int entry = dirty % IPG_RFDLIST_LENGTH; 1060 1061 /* rx_copybreak may poke hole here and there. */ 1062 if (sp->RxBuff[entry]) 1063 continue; 1064 1065 /* Generate a new receive buffer to replace the 1066 * current buffer (which will be released by the 1067 * Linux system). 1068 */ 1069 if (ipg_get_rxbuff(dev, entry) < 0) { 1070 IPG_DEBUG_MSG("Cannot allocate new Rx buffer.\n"); 1071 1072 break; 1073 } 1074 1075 /* Reset the RFS field. */ 1076 sp->rxd[entry].rfs = 0x0000000000000000; 1077 } 1078 sp->rx_dirty = dirty; 1079 1080 return 0; 1081} 1082 1083#ifdef JUMBO_FRAME 1084 1085/* use jumboindex and jumbosize to control jumbo frame status 1086 initial status is jumboindex=-1 and jumbosize=0 1087 1. jumboindex = -1 and jumbosize=0 : previous jumbo frame has been done. 1088 2. jumboindex != -1 and jumbosize != 0 : jumbo frame is not over size and receiving 1089 3. jumboindex = -1 and jumbosize != 0 : jumbo frame is over size, already dump 1090 previous receiving and need to continue dumping the current one 1091*/ 1092enum { 1093 NormalPacket, 1094 ErrorPacket 1095}; 1096 1097enum { 1098 Frame_NoStart_NoEnd = 0, 1099 Frame_WithStart = 1, 1100 Frame_WithEnd = 10, 1101 Frame_WithStart_WithEnd = 11 1102}; 1103 1104inline void ipg_nic_rx_free_skb(struct net_device *dev) 1105{ 1106 struct ipg_nic_private *sp = netdev_priv(dev); 1107 unsigned int entry = sp->rx_current % IPG_RFDLIST_LENGTH; 1108 1109 if (sp->RxBuff[entry]) { 1110 struct ipg_rx *rxfd = sp->rxd + entry; 1111 1112 pci_unmap_single(sp->pdev, 1113 le64_to_cpu(rxfd->frag_info & ~IPG_RFI_FRAGLEN), 1114 sp->rx_buf_sz, PCI_DMA_FROMDEVICE); 1115 IPG_DEV_KFREE_SKB(sp->RxBuff[entry]); 1116 sp->RxBuff[entry] = NULL; 1117 } 1118} 1119 1120inline int ipg_nic_rx_check_frame_type(struct net_device *dev) 1121{ 1122 struct ipg_nic_private *sp = netdev_priv(dev); 1123 struct ipg_rx *rxfd = sp->rxd + (sp->rx_current % IPG_RFDLIST_LENGTH); 1124 int type = Frame_NoStart_NoEnd; 1125 1126 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMESTART) 1127 type += Frame_WithStart; 1128 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMEEND) 1129 type += Frame_WithEnd; 1130 return type; 1131} 1132 1133inline int ipg_nic_rx_check_error(struct net_device *dev) 1134{ 1135 struct ipg_nic_private *sp = netdev_priv(dev); 1136 unsigned int entry = sp->rx_current % IPG_RFDLIST_LENGTH; 1137 struct ipg_rx *rxfd = sp->rxd + entry; 1138 1139 if (IPG_DROP_ON_RX_ETH_ERRORS && (le64_to_cpu(rxfd->rfs) & 1140 (IPG_RFS_RXFIFOOVERRUN | IPG_RFS_RXRUNTFRAME | 1141 IPG_RFS_RXALIGNMENTERROR | IPG_RFS_RXFCSERROR | 1142 IPG_RFS_RXOVERSIZEDFRAME | IPG_RFS_RXLENGTHERROR))) { 1143 IPG_DEBUG_MSG("Rx error, RFS = %16.16lx\n", 1144 (unsigned long) rxfd->rfs); 1145 1146 /* Increment general receive error statistic. */ 1147 sp->stats.rx_errors++; 1148 1149 /* Increment detailed receive error statistics. */ 1150 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFIFOOVERRUN) { 1151 IPG_DEBUG_MSG("RX FIFO overrun occured.\n"); 1152 1153 sp->stats.rx_fifo_errors++; 1154 } 1155 1156 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXRUNTFRAME) { 1157 IPG_DEBUG_MSG("RX runt occured.\n"); 1158 sp->stats.rx_length_errors++; 1159 } 1160 1161 /* Do nothing for IPG_RFS_RXOVERSIZEDFRAME, 1162 * error count handled by a IPG statistic register. 1163 */ 1164 1165 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXALIGNMENTERROR) { 1166 IPG_DEBUG_MSG("RX alignment error occured.\n"); 1167 sp->stats.rx_frame_errors++; 1168 } 1169 1170 /* Do nothing for IPG_RFS_RXFCSERROR, error count 1171 * handled by a IPG statistic register. 1172 */ 1173 1174 /* Free the memory associated with the RX 1175 * buffer since it is erroneous and we will 1176 * not pass it to higher layer processes. 1177 */ 1178 if (sp->RxBuff[entry]) { 1179 pci_unmap_single(sp->pdev, 1180 le64_to_cpu(rxfd->frag_info & ~IPG_RFI_FRAGLEN), 1181 sp->rx_buf_sz, PCI_DMA_FROMDEVICE); 1182 1183 IPG_DEV_KFREE_SKB(sp->RxBuff[entry]); 1184 sp->RxBuff[entry] = NULL; 1185 } 1186 return ErrorPacket; 1187 } 1188 return NormalPacket; 1189} 1190 1191static void ipg_nic_rx_with_start_and_end(struct net_device *dev, 1192 struct ipg_nic_private *sp, 1193 struct ipg_rx *rxfd, unsigned entry) 1194{ 1195 struct SJumbo *jumbo = &sp->Jumbo; 1196 struct sk_buff *skb; 1197 int framelen; 1198 1199 if (jumbo->FoundStart) { 1200 IPG_DEV_KFREE_SKB(jumbo->skb); 1201 jumbo->FoundStart = 0; 1202 jumbo->CurrentSize = 0; 1203 jumbo->skb = NULL; 1204 } 1205 1206 // 1: found error, 0 no error 1207 if (ipg_nic_rx_check_error(dev) != NormalPacket) 1208 return; 1209 1210 skb = sp->RxBuff[entry]; 1211 if (!skb) 1212 return; 1213 1214 // accept this frame and send to upper layer 1215 framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN; 1216 if (framelen > IPG_RXFRAG_SIZE) 1217 framelen = IPG_RXFRAG_SIZE; 1218 1219 skb_put(skb, framelen); 1220 skb->protocol = eth_type_trans(skb, dev); 1221 skb->ip_summed = CHECKSUM_NONE; 1222 netif_rx(skb); 1223 dev->last_rx = jiffies; 1224 sp->RxBuff[entry] = NULL; 1225} 1226 1227static void ipg_nic_rx_with_start(struct net_device *dev, 1228 struct ipg_nic_private *sp, 1229 struct ipg_rx *rxfd, unsigned entry) 1230{ 1231 struct SJumbo *jumbo = &sp->Jumbo; 1232 struct pci_dev *pdev = sp->pdev; 1233 struct sk_buff *skb; 1234 1235 // 1: found error, 0 no error 1236 if (ipg_nic_rx_check_error(dev) != NormalPacket) 1237 return; 1238 1239 // accept this frame and send to upper layer 1240 skb = sp->RxBuff[entry]; 1241 if (!skb) 1242 return; 1243 1244 if (jumbo->FoundStart) 1245 IPG_DEV_KFREE_SKB(jumbo->skb); 1246 1247 pci_unmap_single(pdev, le64_to_cpu(rxfd->frag_info & ~IPG_RFI_FRAGLEN), 1248 sp->rx_buf_sz, PCI_DMA_FROMDEVICE); 1249 1250 skb_put(skb, IPG_RXFRAG_SIZE); 1251 1252 jumbo->FoundStart = 1; 1253 jumbo->CurrentSize = IPG_RXFRAG_SIZE; 1254 jumbo->skb = skb; 1255 1256 sp->RxBuff[entry] = NULL; 1257 dev->last_rx = jiffies; 1258} 1259 1260static void ipg_nic_rx_with_end(struct net_device *dev, 1261 struct ipg_nic_private *sp, 1262 struct ipg_rx *rxfd, unsigned entry) 1263{ 1264 struct SJumbo *jumbo = &sp->Jumbo; 1265 1266 //1: found error, 0 no error 1267 if (ipg_nic_rx_check_error(dev) == NormalPacket) { 1268 struct sk_buff *skb = sp->RxBuff[entry]; 1269 1270 if (!skb) 1271 return; 1272 1273 if (jumbo->FoundStart) { 1274 int framelen, endframelen; 1275 1276 framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN; 1277 1278 endframeLen = framelen - jumbo->CurrentSize; 1279 /* 1280 if (framelen > IPG_RXFRAG_SIZE) 1281 framelen=IPG_RXFRAG_SIZE; 1282 */ 1283 if (framelen > IPG_RXSUPPORT_SIZE) 1284 IPG_DEV_KFREE_SKB(jumbo->skb); 1285 else { 1286 memcpy(skb_put(jumbo->skb, endframeLen), 1287 skb->data, endframeLen); 1288 1289 jumbo->skb->protocol = 1290 eth_type_trans(jumbo->skb, dev); 1291 1292 jumbo->skb->ip_summed = CHECKSUM_NONE; 1293 netif_rx(jumbo->skb); 1294 } 1295 } 1296 1297 dev->last_rx = jiffies; 1298 jumbo->FoundStart = 0; 1299 jumbo->CurrentSize = 0; 1300 jumbo->skb = NULL; 1301 1302 ipg_nic_rx_free_skb(dev); 1303 } else { 1304 IPG_DEV_KFREE_SKB(jumbo->skb); 1305 jumbo->FoundStart = 0; 1306 jumbo->CurrentSize = 0; 1307 jumbo->skb = NULL; 1308 } 1309} 1310 1311static void ipg_nic_rx_no_start_no_end(struct net_device *dev, 1312 struct ipg_nic_private *sp, 1313 struct ipg_rx *rxfd, unsigned entry) 1314{ 1315 struct SJumbo *jumbo = &sp->Jumbo; 1316 1317 //1: found error, 0 no error 1318 if (ipg_nic_rx_check_error(dev) == NormalPacket) { 1319 struct sk_buff *skb = sp->RxBuff[entry]; 1320 1321 if (skb) { 1322 if (jumbo->FoundStart) { 1323 jumbo->CurrentSize += IPG_RXFRAG_SIZE; 1324 if (jumbo->CurrentSize <= IPG_RXSUPPORT_SIZE) { 1325 memcpy(skb_put(jumbo->skb, 1326 IPG_RXFRAG_SIZE), 1327 skb->data, IPG_RXFRAG_SIZE); 1328 } 1329 } 1330 dev->last_rx = jiffies; 1331 ipg_nic_rx_free_skb(dev); 1332 } 1333 } else { 1334 IPG_DEV_KFREE_SKB(jumbo->skb); 1335 jumbo->FoundStart = 0; 1336 jumbo->CurrentSize = 0; 1337 jumbo->skb = NULL; 1338 } 1339} 1340 1341static int ipg_nic_rx(struct net_device *dev) 1342{ 1343 struct ipg_nic_private *sp = netdev_priv(dev); 1344 unsigned int curr = sp->rx_current; 1345 void __iomem *ioaddr = sp->ioaddr; 1346 unsigned int i; 1347 1348 IPG_DEBUG_MSG("_nic_rx\n"); 1349 1350 for (i = 0; i < IPG_MAXRFDPROCESS_COUNT; i++, curr++) { 1351 unsigned int entry = curr % IPG_RFDLIST_LENGTH; 1352 struct ipg_rx *rxfd = sp->rxd + entry; 1353 1354 if (!(rxfd->rfs & le64_to_cpu(IPG_RFS_RFDDONE))) 1355 break; 1356 1357 switch (ipg_nic_rx_check_frame_type(dev)) { 1358 case Frame_WithStart_WithEnd: 1359 ipg_nic_rx_with_start_and_end(dev, tp, rxfd, entry); 1360 break; 1361 case Frame_WithStart: 1362 ipg_nic_rx_with_start(dev, tp, rxfd, entry); 1363 break; 1364 case Frame_WithEnd: 1365 ipg_nic_rx_with_end(dev, tp, rxfd, entry); 1366 break; 1367 case Frame_NoStart_NoEnd: 1368 ipg_nic_rx_no_start_no_end(dev, tp, rxfd, entry); 1369 break; 1370 } 1371 } 1372 1373 sp->rx_current = curr; 1374 1375 if (i == IPG_MAXRFDPROCESS_COUNT) { 1376 /* There are more RFDs to process, however the 1377 * allocated amount of RFD processing time has 1378 * expired. Assert Interrupt Requested to make 1379 * sure we come back to process the remaining RFDs. 1380 */ 1381 ipg_w32(ipg_r32(ASIC_CTRL) | IPG_AC_INT_REQUEST, ASIC_CTRL); 1382 } 1383 1384 ipg_nic_rxrestore(dev); 1385 1386 return 0; 1387} 1388 1389#else 1390static int ipg_nic_rx(struct net_device *dev) 1391{ 1392 /* Transfer received Ethernet frames to higher network layers. */ 1393 struct ipg_nic_private *sp = netdev_priv(dev); 1394 unsigned int curr = sp->rx_current; 1395 void __iomem *ioaddr = sp->ioaddr; 1396 struct ipg_rx *rxfd; 1397 unsigned int i; 1398 1399 IPG_DEBUG_MSG("_nic_rx\n"); 1400 1401#define __RFS_MASK \ 1402 cpu_to_le64(IPG_RFS_RFDDONE | IPG_RFS_FRAMESTART | IPG_RFS_FRAMEEND) 1403 1404 for (i = 0; i < IPG_MAXRFDPROCESS_COUNT; i++, curr++) { 1405 unsigned int entry = curr % IPG_RFDLIST_LENGTH; 1406 struct sk_buff *skb = sp->RxBuff[entry]; 1407 unsigned int framelen; 1408 1409 rxfd = sp->rxd + entry; 1410 1411 if (((rxfd->rfs & __RFS_MASK) != __RFS_MASK) || !skb) 1412 break; 1413 1414 /* Get received frame length. */ 1415 framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN; 1416 1417 /* Check for jumbo frame arrival with too small 1418 * RXFRAG_SIZE. 1419 */ 1420 if (framelen > IPG_RXFRAG_SIZE) { 1421 IPG_DEBUG_MSG 1422 ("RFS FrameLen > allocated fragment size.\n"); 1423 1424 framelen = IPG_RXFRAG_SIZE; 1425 } 1426 1427 if ((IPG_DROP_ON_RX_ETH_ERRORS && (le64_to_cpu(rxfd->rfs) & 1428 (IPG_RFS_RXFIFOOVERRUN | IPG_RFS_RXRUNTFRAME | 1429 IPG_RFS_RXALIGNMENTERROR | IPG_RFS_RXFCSERROR | 1430 IPG_RFS_RXOVERSIZEDFRAME | IPG_RFS_RXLENGTHERROR)))) { 1431 1432 IPG_DEBUG_MSG("Rx error, RFS = %16.16lx\n", 1433 (unsigned long int) rxfd->rfs); 1434 1435 /* Increment general receive error statistic. */ 1436 sp->stats.rx_errors++; 1437 1438 /* Increment detailed receive error statistics. */ 1439 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFIFOOVERRUN) { 1440 IPG_DEBUG_MSG("RX FIFO overrun occured.\n"); 1441 sp->stats.rx_fifo_errors++; 1442 } 1443 1444 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXRUNTFRAME) { 1445 IPG_DEBUG_MSG("RX runt occured.\n"); 1446 sp->stats.rx_length_errors++; 1447 } 1448 1449 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXOVERSIZEDFRAME) ; 1450 /* Do nothing, error count handled by a IPG 1451 * statistic register. 1452 */ 1453 1454 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXALIGNMENTERROR) { 1455 IPG_DEBUG_MSG("RX alignment error occured.\n"); 1456 sp->stats.rx_frame_errors++; 1457 } 1458 1459 if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFCSERROR) ; 1460 /* Do nothing, error count handled by a IPG 1461 * statistic register. 1462 */ 1463 1464 /* Free the memory associated with the RX 1465 * buffer since it is erroneous and we will 1466 * not pass it to higher layer processes. 1467 */ 1468 if (skb) { 1469 __le64 info = rxfd->frag_info; 1470 1471 pci_unmap_single(sp->pdev, 1472 le64_to_cpu(info) & ~IPG_RFI_FRAGLEN, 1473 sp->rx_buf_sz, PCI_DMA_FROMDEVICE); 1474 1475 IPG_DEV_KFREE_SKB(skb); 1476 } 1477 } else { 1478 1479 /* Adjust the new buffer length to accomodate the size 1480 * of the received frame. 1481 */ 1482 skb_put(skb, framelen); 1483 1484 /* Set the buffer's protocol field to Ethernet. */ 1485 skb->protocol = eth_type_trans(skb, dev); 1486 1487 /* If the frame contains an IP/TCP/UDP frame, 1488 * determine if upper layer must check IP/TCP/UDP 1489 * checksums. 1490 * 1491 * NOTE: DO NOT RELY ON THE TCP/UDP CHECKSUM 1492 * VERIFICATION FOR SILICON REVISIONS B3 1493 * AND EARLIER! 1494 * 1495 if ((le64_to_cpu(rxfd->rfs & 1496 (IPG_RFS_TCPDETECTED | IPG_RFS_UDPDETECTED | 1497 IPG_RFS_IPDETECTED))) && 1498 !(le64_to_cpu(rxfd->rfs & 1499 (IPG_RFS_TCPERROR | IPG_RFS_UDPERROR | 1500 IPG_RFS_IPERROR)))) { 1501 * Indicate IP checksums were performed 1502 * by the IPG. 1503 * 1504 skb->ip_summed = CHECKSUM_UNNECESSARY; 1505 } else 1506 */ 1507 { 1508 /* The IPG encountered an error with (or 1509 * there were no) IP/TCP/UDP checksums. 1510 * This may or may not indicate an invalid 1511 * IP/TCP/UDP frame was received. Let the 1512 * upper layer decide. 1513 */ 1514 skb->ip_summed = CHECKSUM_NONE; 1515 } 1516 1517 /* Hand off frame for higher layer processing. 1518 * The function netif_rx() releases the sk_buff 1519 * when processing completes. 1520 */ 1521 netif_rx(skb); 1522 1523 /* Record frame receive time (jiffies = Linux 1524 * kernel current time stamp). 1525 */ 1526 dev->last_rx = jiffies; 1527 } 1528 1529 /* Assure RX buffer is not reused by IPG. */ 1530 sp->RxBuff[entry] = NULL; 1531 } 1532 1533 /* 1534 * If there are more RFDs to proces and the allocated amount of RFD 1535 * processing time has expired, assert Interrupt Requested to make 1536 * sure we come back to process the remaining RFDs. 1537 */ 1538 if (i == IPG_MAXRFDPROCESS_COUNT) 1539 ipg_w32(ipg_r32(ASIC_CTRL) | IPG_AC_INT_REQUEST, ASIC_CTRL); 1540 1541#ifdef IPG_DEBUG 1542 /* Check if the RFD list contained no receive frame data. */ 1543 if (!i) 1544 sp->EmptyRFDListCount++; 1545#endif 1546 while ((le64_to_cpu(rxfd->rfs) & IPG_RFS_RFDDONE) && 1547 !((le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMESTART) && 1548 (le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMEEND))) { 1549 unsigned int entry = curr++ % IPG_RFDLIST_LENGTH; 1550 1551 rxfd = sp->rxd + entry; 1552 1553 IPG_DEBUG_MSG("Frame requires multiple RFDs.\n"); 1554 1555 /* An unexpected event, additional code needed to handle 1556 * properly. So for the time being, just disregard the 1557 * frame. 1558 */ 1559 1560 /* Free the memory associated with the RX 1561 * buffer since it is erroneous and we will 1562 * not pass it to higher layer processes. 1563 */ 1564 if (sp->RxBuff[entry]) { 1565 pci_unmap_single(sp->pdev, 1566 le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN, 1567 sp->rx_buf_sz, PCI_DMA_FROMDEVICE); 1568 IPG_DEV_KFREE_SKB(sp->RxBuff[entry]); 1569 } 1570 1571 /* Assure RX buffer is not reused by IPG. */ 1572 sp->RxBuff[entry] = NULL; 1573 } 1574 1575 sp->rx_current = curr; 1576 1577 /* Check to see if there are a minimum number of used 1578 * RFDs before restoring any (should improve performance.) 1579 */ 1580 if ((curr - sp->rx_dirty) >= IPG_MINUSEDRFDSTOFREE) 1581 ipg_nic_rxrestore(dev); 1582 1583 return 0; 1584} 1585#endif 1586 1587static void ipg_reset_after_host_error(struct work_struct *work) 1588{ 1589 struct ipg_nic_private *sp = 1590 container_of(work, struct ipg_nic_private, task.work); 1591 struct net_device *dev = sp->dev; 1592 1593 IPG_DDEBUG_MSG("DMACtrl = %8.8x\n", ioread32(sp->ioaddr + IPG_DMACTRL)); 1594 1595 /* 1596 * Acknowledge HostError interrupt by resetting 1597 * IPG DMA and HOST. 1598 */ 1599 ipg_reset(dev, IPG_AC_GLOBAL_RESET | IPG_AC_HOST | IPG_AC_DMA); 1600 1601 init_rfdlist(dev); 1602 init_tfdlist(dev); 1603 1604 if (ipg_io_config(dev) < 0) { 1605 printk(KERN_INFO "%s: Cannot recover from PCI error.\n", 1606 dev->name); 1607 schedule_delayed_work(&sp->task, HZ); 1608 } 1609} 1610 1611static irqreturn_t ipg_interrupt_handler(int irq, void *dev_inst) 1612{ 1613 struct net_device *dev = dev_inst; 1614 struct ipg_nic_private *sp = netdev_priv(dev); 1615 void __iomem *ioaddr = sp->ioaddr; 1616 unsigned int handled = 0; 1617 u16 status; 1618 1619 IPG_DEBUG_MSG("_interrupt_handler\n"); 1620 1621#ifdef JUMBO_FRAME 1622 ipg_nic_rxrestore(dev); 1623#endif 1624 spin_lock(&sp->lock); 1625 1626 /* Get interrupt source information, and acknowledge 1627 * some (i.e. TxDMAComplete, RxDMAComplete, RxEarly, 1628 * IntRequested, MacControlFrame, LinkEvent) interrupts 1629 * if issued. Also, all IPG interrupts are disabled by 1630 * reading IntStatusAck. 1631 */ 1632 status = ipg_r16(INT_STATUS_ACK); 1633 1634 IPG_DEBUG_MSG("IntStatusAck = %4.4x\n", status); 1635 1636 /* Shared IRQ of remove event. */ 1637 if (!(status & IPG_IS_RSVD_MASK)) 1638 goto out_enable; 1639 1640 handled = 1; 1641 1642 if (unlikely(!netif_running(dev))) 1643 goto out_unlock; 1644 1645 /* If RFDListEnd interrupt, restore all used RFDs. */ 1646 if (status & IPG_IS_RFD_LIST_END) { 1647 IPG_DEBUG_MSG("RFDListEnd Interrupt.\n"); 1648 1649 /* The RFD list end indicates an RFD was encountered 1650 * with a 0 NextPtr, or with an RFDDone bit set to 1 1651 * (indicating the RFD is not read for use by the 1652 * IPG.) Try to restore all RFDs. 1653 */ 1654 ipg_nic_rxrestore(dev); 1655 1656#ifdef IPG_DEBUG 1657 /* Increment the RFDlistendCount counter. */ 1658 sp->RFDlistendCount++; 1659#endif 1660 } 1661 1662 /* If RFDListEnd, RxDMAPriority, RxDMAComplete, or 1663 * IntRequested interrupt, process received frames. */ 1664 if ((status & IPG_IS_RX_DMA_PRIORITY) || 1665 (status & IPG_IS_RFD_LIST_END) || 1666 (status & IPG_IS_RX_DMA_COMPLETE) || 1667 (status & IPG_IS_INT_REQUESTED)) { 1668#ifdef IPG_DEBUG 1669 /* Increment the RFD list checked counter if interrupted 1670 * only to check the RFD list. */ 1671 if (status & (~(IPG_IS_RX_DMA_PRIORITY | IPG_IS_RFD_LIST_END | 1672 IPG_IS_RX_DMA_COMPLETE | IPG_IS_INT_REQUESTED) & 1673 (IPG_IS_HOST_ERROR | IPG_IS_TX_DMA_COMPLETE | 1674 IPG_IS_LINK_EVENT | IPG_IS_TX_COMPLETE | 1675 IPG_IS_UPDATE_STATS))) 1676 sp->RFDListCheckedCount++; 1677#endif 1678 1679 ipg_nic_rx(dev); 1680 } 1681 1682 /* If TxDMAComplete interrupt, free used TFDs. */ 1683 if (status & IPG_IS_TX_DMA_COMPLETE) 1684 ipg_nic_txfree(dev); 1685 1686 /* TxComplete interrupts indicate one of numerous actions. 1687 * Determine what action to take based on TXSTATUS register. 1688 */ 1689 if (status & IPG_IS_TX_COMPLETE) 1690 ipg_nic_txcleanup(dev); 1691 1692 /* If UpdateStats interrupt, update Linux Ethernet statistics */ 1693 if (status & IPG_IS_UPDATE_STATS) 1694 ipg_nic_get_stats(dev); 1695 1696 /* If HostError interrupt, reset IPG. */ 1697 if (status & IPG_IS_HOST_ERROR) { 1698 IPG_DDEBUG_MSG("HostError Interrupt\n"); 1699 1700 schedule_delayed_work(&sp->task, 0); 1701 } 1702 1703 /* If LinkEvent interrupt, resolve autonegotiation. */ 1704 if (status & IPG_IS_LINK_EVENT) { 1705 if (ipg_config_autoneg(dev) < 0) 1706 printk(KERN_INFO "%s: Auto-negotiation error.\n", 1707 dev->name); 1708 } 1709 1710 /* If MACCtrlFrame interrupt, do nothing. */ 1711 if (status & IPG_IS_MAC_CTRL_FRAME) 1712 IPG_DEBUG_MSG("MACCtrlFrame interrupt.\n"); 1713 1714 /* If RxComplete interrupt, do nothing. */ 1715 if (status & IPG_IS_RX_COMPLETE) 1716 IPG_DEBUG_MSG("RxComplete interrupt.\n"); 1717 1718 /* If RxEarly interrupt, do nothing. */ 1719 if (status & IPG_IS_RX_EARLY) 1720 IPG_DEBUG_MSG("RxEarly interrupt.\n"); 1721 1722out_enable: 1723 /* Re-enable IPG interrupts. */ 1724 ipg_w16(IPG_IE_TX_DMA_COMPLETE | IPG_IE_RX_DMA_COMPLETE | 1725 IPG_IE_HOST_ERROR | IPG_IE_INT_REQUESTED | IPG_IE_TX_COMPLETE | 1726 IPG_IE_LINK_EVENT | IPG_IE_UPDATE_STATS, INT_ENABLE); 1727out_unlock: 1728 spin_unlock(&sp->lock); 1729 1730 return IRQ_RETVAL(handled); 1731} 1732 1733static void ipg_rx_clear(struct ipg_nic_private *sp) 1734{ 1735 unsigned int i; 1736 1737 for (i = 0; i < IPG_RFDLIST_LENGTH; i++) { 1738 if (sp->RxBuff[i]) { 1739 struct ipg_rx *rxfd = sp->rxd + i; 1740 1741 IPG_DEV_KFREE_SKB(sp->RxBuff[i]); 1742 sp->RxBuff[i] = NULL; 1743 pci_unmap_single(sp->pdev, 1744 le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN, 1745 sp->rx_buf_sz, PCI_DMA_FROMDEVICE); 1746 } 1747 } 1748} 1749 1750static void ipg_tx_clear(struct ipg_nic_private *sp) 1751{ 1752 unsigned int i; 1753 1754 for (i = 0; i < IPG_TFDLIST_LENGTH; i++) { 1755 if (sp->TxBuff[i]) { 1756 struct ipg_tx *txfd = sp->txd + i; 1757 1758 pci_unmap_single(sp->pdev, 1759 le64_to_cpu(txfd->frag_info) & ~IPG_TFI_FRAGLEN, 1760 sp->TxBuff[i]->len, PCI_DMA_TODEVICE); 1761 1762 IPG_DEV_KFREE_SKB(sp->TxBuff[i]); 1763 1764 sp->TxBuff[i] = NULL; 1765 } 1766 } 1767} 1768 1769static int ipg_nic_open(struct net_device *dev) 1770{ 1771 struct ipg_nic_private *sp = netdev_priv(dev); 1772 void __iomem *ioaddr = sp->ioaddr; 1773 struct pci_dev *pdev = sp->pdev; 1774 int rc; 1775 1776 IPG_DEBUG_MSG("_nic_open\n"); 1777 1778 sp->rx_buf_sz = IPG_RXSUPPORT_SIZE; 1779 1780 /* Check for interrupt line conflicts, and request interrupt 1781 * line for IPG. 1782 * 1783 * IMPORTANT: Disable IPG interrupts prior to registering 1784 * IRQ. 1785 */ 1786 ipg_w16(0x0000, INT_ENABLE); 1787 1788 /* Register the interrupt line to be used by the IPG within 1789 * the Linux system. 1790 */ 1791 rc = request_irq(pdev->irq, &ipg_interrupt_handler, IRQF_SHARED, 1792 dev->name, dev); 1793 if (rc < 0) { 1794 printk(KERN_INFO "%s: Error when requesting interrupt.\n", 1795 dev->name); 1796 goto out; 1797 } 1798 1799 dev->irq = pdev->irq; 1800 1801 rc = -ENOMEM; 1802 1803 sp->rxd = dma_alloc_coherent(&pdev->dev, IPG_RX_RING_BYTES, 1804 &sp->rxd_map, GFP_KERNEL); 1805 if (!sp->rxd) 1806 goto err_free_irq_0; 1807 1808 sp->txd = dma_alloc_coherent(&pdev->dev, IPG_TX_RING_BYTES, 1809 &sp->txd_map, GFP_KERNEL); 1810 if (!sp->txd) 1811 goto err_free_rx_1; 1812 1813 rc = init_rfdlist(dev); 1814 if (rc < 0) { 1815 printk(KERN_INFO "%s: Error during configuration.\n", 1816 dev->name); 1817 goto err_free_tx_2; 1818 } 1819 1820 init_tfdlist(dev); 1821 1822 rc = ipg_io_config(dev); 1823 if (rc < 0) { 1824 printk(KERN_INFO "%s: Error during configuration.\n", 1825 dev->name); 1826 goto err_release_tfdlist_3; 1827 } 1828 1829 /* Resolve autonegotiation. */ 1830 if (ipg_config_autoneg(dev) < 0) 1831 printk(KERN_INFO "%s: Auto-negotiation error.\n", dev->name); 1832 1833#ifdef JUMBO_FRAME 1834 /* initialize JUMBO Frame control variable */ 1835 sp->Jumbo.FoundStart = 0; 1836 sp->Jumbo.CurrentSize = 0; 1837 sp->Jumbo.skb = 0; 1838 dev->mtu = IPG_TXFRAG_SIZE; 1839#endif 1840 1841 /* Enable transmit and receive operation of the IPG. */ 1842 ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_RX_ENABLE | IPG_MC_TX_ENABLE) & 1843 IPG_MC_RSVD_MASK, MAC_CTRL); 1844 1845 netif_start_queue(dev); 1846out: 1847 return rc; 1848 1849err_release_tfdlist_3: 1850 ipg_tx_clear(sp); 1851 ipg_rx_clear(sp); 1852err_free_tx_2: 1853 dma_free_coherent(&pdev->dev, IPG_TX_RING_BYTES, sp->txd, sp->txd_map); 1854err_free_rx_1: 1855 dma_free_coherent(&pdev->dev, IPG_RX_RING_BYTES, sp->rxd, sp->rxd_map); 1856err_free_irq_0: 1857 free_irq(pdev->irq, dev); 1858 goto out; 1859} 1860 1861static int ipg_nic_stop(struct net_device *dev) 1862{ 1863 struct ipg_nic_private *sp = netdev_priv(dev); 1864 void __iomem *ioaddr = sp->ioaddr; 1865 struct pci_dev *pdev = sp->pdev; 1866 1867 IPG_DEBUG_MSG("_nic_stop\n"); 1868 1869 netif_stop_queue(dev); 1870 1871 IPG_DDEBUG_MSG("RFDlistendCount = %i\n", sp->RFDlistendCount); 1872 IPG_DDEBUG_MSG("RFDListCheckedCount = %i\n", sp->rxdCheckedCount); 1873 IPG_DDEBUG_MSG("EmptyRFDListCount = %i\n", sp->EmptyRFDListCount); 1874 IPG_DUMPTFDLIST(dev); 1875 1876 do { 1877 (void) ipg_r16(INT_STATUS_ACK); 1878 1879 ipg_reset(dev, IPG_AC_GLOBAL_RESET | IPG_AC_HOST | IPG_AC_DMA); 1880 1881 synchronize_irq(pdev->irq); 1882 } while (ipg_r16(INT_ENABLE) & IPG_IE_RSVD_MASK); 1883 1884 ipg_rx_clear(sp); 1885 1886 ipg_tx_clear(sp); 1887 1888 pci_free_consistent(pdev, IPG_RX_RING_BYTES, sp->rxd, sp->rxd_map); 1889 pci_free_consistent(pdev, IPG_TX_RING_BYTES, sp->txd, sp->txd_map); 1890 1891 free_irq(pdev->irq, dev); 1892 1893 return 0; 1894} 1895 1896static int ipg_nic_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) 1897{ 1898 struct ipg_nic_private *sp = netdev_priv(dev); 1899 void __iomem *ioaddr = sp->ioaddr; 1900 unsigned int entry = sp->tx_current % IPG_TFDLIST_LENGTH; 1901 unsigned long flags; 1902 struct ipg_tx *txfd; 1903 1904 IPG_DDEBUG_MSG("_nic_hard_start_xmit\n"); 1905 1906 /* If in 10Mbps mode, stop the transmit queue so 1907 * no more transmit frames are accepted. 1908 */ 1909 if (sp->tenmbpsmode) 1910 netif_stop_queue(dev); 1911 1912 if (sp->ResetCurrentTFD) { 1913 sp->ResetCurrentTFD = 0; 1914 entry = 0; 1915 } 1916 1917 txfd = sp->txd + entry; 1918 1919 sp->TxBuff[entry] = skb; 1920 1921 /* Clear all TFC fields, except TFDDONE. */ 1922 txfd->tfc = cpu_to_le64(IPG_TFC_TFDDONE); 1923 1924 /* Specify the TFC field within the TFD. */ 1925 txfd->tfc |= cpu_to_le64(IPG_TFC_WORDALIGNDISABLED | 1926 (IPG_TFC_FRAMEID & cpu_to_le64(sp->tx_current)) | 1927 (IPG_TFC_FRAGCOUNT & (1 << 24))); 1928 1929 /* Request TxComplete interrupts at an interval defined 1930 * by the constant IPG_FRAMESBETWEENTXCOMPLETES. 1931 * Request TxComplete interrupt for every frame 1932 * if in 10Mbps mode to accomodate problem with 10Mbps 1933 * processing. 1934 */ 1935 if (sp->tenmbpsmode) 1936 txfd->tfc |= cpu_to_le64(IPG_TFC_TXINDICATE); 1937 txfd->tfc |= cpu_to_le64(IPG_TFC_TXDMAINDICATE); 1938 /* Based on compilation option, determine if FCS is to be 1939 * appended to transmit frame by IPG. 1940 */ 1941 if (!(IPG_APPEND_FCS_ON_TX)) 1942 txfd->tfc |= cpu_to_le64(IPG_TFC_FCSAPPENDDISABLE); 1943 1944 /* Based on compilation option, determine if IP, TCP and/or 1945 * UDP checksums are to be added to transmit frame by IPG. 1946 */ 1947 if (IPG_ADD_IPCHECKSUM_ON_TX) 1948 txfd->tfc |= cpu_to_le64(IPG_TFC_IPCHECKSUMENABLE); 1949 1950 if (IPG_ADD_TCPCHECKSUM_ON_TX) 1951 txfd->tfc |= cpu_to_le64(IPG_TFC_TCPCHECKSUMENABLE); 1952 1953 if (IPG_ADD_UDPCHECKSUM_ON_TX) 1954 txfd->tfc |= cpu_to_le64(IPG_TFC_UDPCHECKSUMENABLE); 1955 1956 /* Based on compilation option, determine if VLAN tag info is to be 1957 * inserted into transmit frame by IPG. 1958 */ 1959 if (IPG_INSERT_MANUAL_VLAN_TAG) { 1960 txfd->tfc |= cpu_to_le64(IPG_TFC_VLANTAGINSERT | 1961 ((u64) IPG_MANUAL_VLAN_VID << 32) | 1962 ((u64) IPG_MANUAL_VLAN_CFI << 44) | 1963 ((u64) IPG_MANUAL_VLAN_USERPRIORITY << 45)); 1964 } 1965 1966 /* The fragment start location within system memory is defined 1967 * by the sk_buff structure's data field. The physical address 1968 * of this location within the system's virtual memory space 1969 * is determined using the IPG_HOST2BUS_MAP function. 1970 */ 1971 txfd->frag_info = cpu_to_le64(pci_map_single(sp->pdev, skb->data, 1972 skb->len, PCI_DMA_TODEVICE)); 1973 1974 /* The length of the fragment within system memory is defined by 1975 * the sk_buff structure's len field. 1976 */ 1977 txfd->frag_info |= cpu_to_le64(IPG_TFI_FRAGLEN & 1978 ((u64) (skb->len & 0xffff) << 48)); 1979 1980 /* Clear the TFDDone bit last to indicate the TFD is ready 1981 * for transfer to the IPG. 1982 */ 1983 txfd->tfc &= cpu_to_le64(~IPG_TFC_TFDDONE); 1984 1985 spin_lock_irqsave(&sp->lock, flags); 1986 1987 sp->tx_current++; 1988 1989 mmiowb(); 1990 1991 ipg_w32(IPG_DC_TX_DMA_POLL_NOW, DMA_CTRL); 1992 1993 if (sp->tx_current == (sp->tx_dirty + IPG_TFDLIST_LENGTH)) 1994 netif_stop_queue(dev); 1995 1996 spin_unlock_irqrestore(&sp->lock, flags); 1997 1998 return NETDEV_TX_OK; 1999} 2000 2001static void ipg_set_phy_default_param(unsigned char rev, 2002 struct net_device *dev, int phy_address) 2003{ 2004 unsigned short length; 2005 unsigned char revision; 2006 unsigned short *phy_param; 2007 unsigned short address, value; 2008 2009 phy_param = &DefaultPhyParam[0]; 2010 length = *phy_param & 0x00FF; 2011 revision = (unsigned char)((*phy_param) >> 8); 2012 phy_param++; 2013 while (length != 0) { 2014 if (rev == revision) { 2015 while (length > 1) { 2016 address = *phy_param; 2017 value = *(phy_param + 1); 2018 phy_param += 2; 2019 mdio_write(dev, phy_address, address, value); 2020 length -= 4; 2021 } 2022 break; 2023 } else { 2024 phy_param += length / 2; 2025 length = *phy_param & 0x00FF; 2026 revision = (unsigned char)((*phy_param) >> 8); 2027 phy_param++; 2028 } 2029 } 2030} 2031 2032/* JES20040127EEPROM */ 2033static int read_eeprom(struct net_device *dev, int eep_addr) 2034{ 2035 void __iomem *ioaddr = ipg_ioaddr(dev); 2036 unsigned int i; 2037 int ret = 0; 2038 u16 value; 2039 2040 value = IPG_EC_EEPROM_READOPCODE | (eep_addr & 0xff); 2041 ipg_w16(value, EEPROM_CTRL); 2042 2043 for (i = 0; i < 1000; i++) { 2044 u16 data; 2045 2046 mdelay(10); 2047 data = ipg_r16(EEPROM_CTRL); 2048 if (!(data & IPG_EC_EEPROM_BUSY)) { 2049 ret = ipg_r16(EEPROM_DATA); 2050 break; 2051 } 2052 } 2053 return ret; 2054} 2055 2056static void ipg_init_mii(struct net_device *dev) 2057{ 2058 struct ipg_nic_private *sp = netdev_priv(dev); 2059 struct mii_if_info *mii_if = &sp->mii_if; 2060 int phyaddr; 2061 2062 mii_if->dev = dev; 2063 mii_if->mdio_read = mdio_read; 2064 mii_if->mdio_write = mdio_write; 2065 mii_if->phy_id_mask = 0x1f; 2066 mii_if->reg_num_mask = 0x1f; 2067 2068 mii_if->phy_id = phyaddr = ipg_find_phyaddr(dev); 2069 2070 if (phyaddr != 0x1f) { 2071 u16 mii_phyctrl, mii_1000cr; 2072 u8 revisionid = 0; 2073 2074 mii_1000cr = mdio_read(dev, phyaddr, MII_CTRL1000); 2075 mii_1000cr |= ADVERTISE_1000FULL | ADVERTISE_1000HALF | 2076 GMII_PHY_1000BASETCONTROL_PreferMaster; 2077 mdio_write(dev, phyaddr, MII_CTRL1000, mii_1000cr); 2078 2079 mii_phyctrl = mdio_read(dev, phyaddr, MII_BMCR); 2080 2081 /* Set default phyparam */ 2082 pci_read_config_byte(sp->pdev, PCI_REVISION_ID, &revisionid); 2083 ipg_set_phy_default_param(revisionid, dev, phyaddr); 2084 2085 /* Reset PHY */ 2086 mii_phyctrl |= BMCR_RESET | BMCR_ANRESTART; 2087 mdio_write(dev, phyaddr, MII_BMCR, mii_phyctrl); 2088 2089 } 2090} 2091 2092static int ipg_hw_init(struct net_device *dev) 2093{ 2094 struct ipg_nic_private *sp = netdev_priv(dev); 2095 void __iomem *ioaddr = sp->ioaddr; 2096 unsigned int i; 2097 int rc; 2098 2099 /* Read/Write and Reset EEPROM Value Jesse20040128EEPROM_VALUE */ 2100 /* Read LED Mode Configuration from EEPROM */ 2101 sp->LED_Mode = read_eeprom(dev, 6); 2102 2103 /* Reset all functions within the IPG. Do not assert 2104 * RST_OUT as not compatible with some PHYs. 2105 */ 2106 rc = ipg_reset(dev, IPG_RESET_MASK); 2107 if (rc < 0) 2108 goto out; 2109 2110 ipg_init_mii(dev); 2111 2112 /* Read MAC Address from EEPROM */ 2113 for (i = 0; i < 3; i++) 2114 sp->station_addr[i] = read_eeprom(dev, 16 + i); 2115 2116 for (i = 0; i < 3; i++) 2117 ipg_w16(sp->station_addr[i], STATION_ADDRESS_0 + 2*i); 2118 2119 /* Set station address in ethernet_device structure. */ 2120 dev->dev_addr[0] = ipg_r16(STATION_ADDRESS_0) & 0x00ff; 2121 dev->dev_addr[1] = (ipg_r16(STATION_ADDRESS_0) & 0xff00) >> 8; 2122 dev->dev_addr[2] = ipg_r16(STATION_ADDRESS_1) & 0x00ff; 2123 dev->dev_addr[3] = (ipg_r16(STATION_ADDRESS_1) & 0xff00) >> 8; 2124 dev->dev_addr[4] = ipg_r16(STATION_ADDRESS_2) & 0x00ff; 2125 dev->dev_addr[5] = (ipg_r16(STATION_ADDRESS_2) & 0xff00) >> 8; 2126out: 2127 return rc; 2128} 2129 2130static int ipg_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 2131{ 2132 struct ipg_nic_private *sp = netdev_priv(dev); 2133 int rc; 2134 2135 mutex_lock(&sp->mii_mutex); 2136 rc = generic_mii_ioctl(&sp->mii_if, if_mii(ifr), cmd, NULL); 2137 mutex_unlock(&sp->mii_mutex); 2138 2139 return rc; 2140} 2141 2142static int ipg_nic_change_mtu(struct net_device *dev, int new_mtu) 2143{ 2144 /* Function to accomodate changes to Maximum Transfer Unit 2145 * (or MTU) of IPG NIC. Cannot use default function since 2146 * the default will not allow for MTU > 1500 bytes. 2147 */ 2148 2149 IPG_DEBUG_MSG("_nic_change_mtu\n"); 2150 2151 /* Check that the new MTU value is between 68 (14 byte header, 46 2152 * byte payload, 4 byte FCS) and IPG_MAX_RXFRAME_SIZE, which 2153 * corresponds to the MAXFRAMESIZE register in the IPG. 2154 */ 2155 if ((new_mtu < 68) || (new_mtu > IPG_MAX_RXFRAME_SIZE)) 2156 return -EINVAL; 2157 2158 dev->mtu = new_mtu; 2159 2160 return 0; 2161} 2162 2163static int ipg_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 2164{ 2165 struct ipg_nic_private *sp = netdev_priv(dev); 2166 int rc; 2167 2168 mutex_lock(&sp->mii_mutex); 2169 rc = mii_ethtool_gset(&sp->mii_if, cmd); 2170 mutex_unlock(&sp->mii_mutex); 2171 2172 return rc; 2173} 2174 2175static int ipg_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 2176{ 2177 struct ipg_nic_private *sp = netdev_priv(dev); 2178 int rc; 2179 2180 mutex_lock(&sp->mii_mutex); 2181 rc = mii_ethtool_sset(&sp->mii_if, cmd); 2182 mutex_unlock(&sp->mii_mutex); 2183 2184 return rc; 2185} 2186 2187static int ipg_nway_reset(struct net_device *dev) 2188{ 2189 struct ipg_nic_private *sp = netdev_priv(dev); 2190 int rc; 2191 2192 mutex_lock(&sp->mii_mutex); 2193 rc = mii_nway_restart(&sp->mii_if); 2194 mutex_unlock(&sp->mii_mutex); 2195 2196 return rc; 2197} 2198 2199static struct ethtool_ops ipg_ethtool_ops = { 2200 .get_settings = ipg_get_settings, 2201 .set_settings = ipg_set_settings, 2202 .nway_reset = ipg_nway_reset, 2203}; 2204 2205static void ipg_remove(struct pci_dev *pdev) 2206{ 2207 struct net_device *dev = pci_get_drvdata(pdev); 2208 struct ipg_nic_private *sp = netdev_priv(dev); 2209 2210 IPG_DEBUG_MSG("_remove\n"); 2211 2212 /* Un-register Ethernet device. */ 2213 unregister_netdev(dev); 2214 2215 pci_iounmap(pdev, sp->ioaddr); 2216 2217 pci_release_regions(pdev); 2218 2219 free_netdev(dev); 2220 pci_disable_device(pdev); 2221 pci_set_drvdata(pdev, NULL); 2222} 2223 2224static int __devinit ipg_probe(struct pci_dev *pdev, 2225 const struct pci_device_id *id) 2226{ 2227 unsigned int i = id->driver_data; 2228 struct ipg_nic_private *sp; 2229 struct net_device *dev; 2230 void __iomem *ioaddr; 2231 int rc; 2232 2233 rc = pci_enable_device(pdev); 2234 if (rc < 0) 2235 goto out; 2236 2237 printk(KERN_INFO "%s: %s\n", pci_name(pdev), ipg_brand_name[i]); 2238 2239 pci_set_master(pdev); 2240 2241 rc = pci_set_dma_mask(pdev, DMA_40BIT_MASK); 2242 if (rc < 0) { 2243 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 2244 if (rc < 0) { 2245 printk(KERN_ERR "%s: DMA config failed.\n", 2246 pci_name(pdev)); 2247 goto err_disable_0; 2248 } 2249 } 2250 2251 /* 2252 * Initialize net device. 2253 */ 2254 dev = alloc_etherdev(sizeof(struct ipg_nic_private)); 2255 if (!dev) { 2256 printk(KERN_ERR "%s: alloc_etherdev failed\n", pci_name(pdev)); 2257 rc = -ENOMEM; 2258 goto err_disable_0; 2259 } 2260 2261 sp = netdev_priv(dev); 2262 spin_lock_init(&sp->lock); 2263 mutex_init(&sp->mii_mutex); 2264 2265 /* Declare IPG NIC functions for Ethernet device methods. 2266 */ 2267 dev->open = &ipg_nic_open; 2268 dev->stop = &ipg_nic_stop; 2269 dev->hard_start_xmit = &ipg_nic_hard_start_xmit; 2270 dev->get_stats = &ipg_nic_get_stats; 2271 dev->set_multicast_list = &ipg_nic_set_multicast_list; 2272 dev->do_ioctl = ipg_ioctl; 2273 dev->tx_timeout = ipg_tx_timeout; 2274 dev->change_mtu = &ipg_nic_change_mtu; 2275 2276 SET_NETDEV_DEV(dev, &pdev->dev); 2277 SET_ETHTOOL_OPS(dev, &ipg_ethtool_ops); 2278 2279 rc = pci_request_regions(pdev, DRV_NAME); 2280 if (rc) 2281 goto err_free_dev_1; 2282 2283 ioaddr = pci_iomap(pdev, 1, pci_resource_len(pdev, 1)); 2284 if (!ioaddr) { 2285 printk(KERN_ERR "%s cannot map MMIO\n", pci_name(pdev)); 2286 rc = -EIO; 2287 goto err_release_regions_2; 2288 } 2289 2290 /* Save the pointer to the PCI device information. */ 2291 sp->ioaddr = ioaddr; 2292 sp->pdev = pdev; 2293 sp->dev = dev; 2294 2295 INIT_DELAYED_WORK(&sp->task, ipg_reset_after_host_error); 2296 2297 pci_set_drvdata(pdev, dev); 2298 2299 rc = ipg_hw_init(dev); 2300 if (rc < 0) 2301 goto err_unmap_3; 2302 2303 rc = register_netdev(dev); 2304 if (rc < 0) 2305 goto err_unmap_3; 2306 2307 printk(KERN_INFO "Ethernet device registered as: %s\n", dev->name); 2308out: 2309 return rc; 2310 2311err_unmap_3: 2312 pci_iounmap(pdev, ioaddr); 2313err_release_regions_2: 2314 pci_release_regions(pdev); 2315err_free_dev_1: 2316 free_netdev(dev); 2317err_disable_0: 2318 pci_disable_device(pdev); 2319 goto out; 2320} 2321 2322static struct pci_driver ipg_pci_driver = { 2323 .name = IPG_DRIVER_NAME, 2324 .id_table = ipg_pci_tbl, 2325 .probe = ipg_probe, 2326 .remove = __devexit_p(ipg_remove), 2327}; 2328 2329static int __init ipg_init_module(void) 2330{ 2331 return pci_register_driver(&ipg_pci_driver); 2332} 2333 2334static void __exit ipg_exit_module(void) 2335{ 2336 pci_unregister_driver(&ipg_pci_driver); 2337} 2338 2339module_init(ipg_init_module); 2340module_exit(ipg_exit_module);