Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.25-rc6 1867 lines 45 kB view raw
1/* 2 sis190.c: Silicon Integrated Systems SiS190 ethernet driver 3 4 Copyright (c) 2003 K.M. Liu <kmliu@sis.com> 5 Copyright (c) 2003, 2004 Jeff Garzik <jgarzik@pobox.com> 6 Copyright (c) 2003, 2004, 2005 Francois Romieu <romieu@fr.zoreil.com> 7 8 Based on r8169.c, tg3.c, 8139cp.c, skge.c, epic100.c and SiS 190/191 9 genuine driver. 10 11 This software may be used and distributed according to the terms of 12 the GNU General Public License (GPL), incorporated herein by reference. 13 Drivers based on or derived from this code fall under the GPL and must 14 retain the authorship, copyright and license notice. This file is not 15 a complete program and may only be used when the entire operating 16 system is licensed under the GPL. 17 18 See the file COPYING in this distribution for more information. 19 20 */ 21 22#include <linux/module.h> 23#include <linux/moduleparam.h> 24#include <linux/netdevice.h> 25#include <linux/rtnetlink.h> 26#include <linux/etherdevice.h> 27#include <linux/ethtool.h> 28#include <linux/pci.h> 29#include <linux/mii.h> 30#include <linux/delay.h> 31#include <linux/crc32.h> 32#include <linux/dma-mapping.h> 33#include <asm/irq.h> 34 35#define net_drv(p, arg...) if (netif_msg_drv(p)) \ 36 printk(arg) 37#define net_probe(p, arg...) if (netif_msg_probe(p)) \ 38 printk(arg) 39#define net_link(p, arg...) if (netif_msg_link(p)) \ 40 printk(arg) 41#define net_intr(p, arg...) if (netif_msg_intr(p)) \ 42 printk(arg) 43#define net_tx_err(p, arg...) if (netif_msg_tx_err(p)) \ 44 printk(arg) 45 46#define PHY_MAX_ADDR 32 47#define PHY_ID_ANY 0x1f 48#define MII_REG_ANY 0x1f 49 50#define DRV_VERSION "1.2" 51#define DRV_NAME "sis190" 52#define SIS190_DRIVER_NAME DRV_NAME " Gigabit Ethernet driver " DRV_VERSION 53#define PFX DRV_NAME ": " 54 55#define sis190_rx_skb netif_rx 56#define sis190_rx_quota(count, quota) count 57 58#define MAC_ADDR_LEN 6 59 60#define NUM_TX_DESC 64 /* [8..1024] */ 61#define NUM_RX_DESC 64 /* [8..8192] */ 62#define TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc)) 63#define RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc)) 64#define RX_BUF_SIZE 1536 65#define RX_BUF_MASK 0xfff8 66 67#define SIS190_REGS_SIZE 0x80 68#define SIS190_TX_TIMEOUT (6*HZ) 69#define SIS190_PHY_TIMEOUT (10*HZ) 70#define SIS190_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \ 71 NETIF_MSG_LINK | NETIF_MSG_IFUP | \ 72 NETIF_MSG_IFDOWN) 73 74/* Enhanced PHY access register bit definitions */ 75#define EhnMIIread 0x0000 76#define EhnMIIwrite 0x0020 77#define EhnMIIdataShift 16 78#define EhnMIIpmdShift 6 /* 7016 only */ 79#define EhnMIIregShift 11 80#define EhnMIIreq 0x0010 81#define EhnMIInotDone 0x0010 82 83/* Write/read MMIO register */ 84#define SIS_W8(reg, val) writeb ((val), ioaddr + (reg)) 85#define SIS_W16(reg, val) writew ((val), ioaddr + (reg)) 86#define SIS_W32(reg, val) writel ((val), ioaddr + (reg)) 87#define SIS_R8(reg) readb (ioaddr + (reg)) 88#define SIS_R16(reg) readw (ioaddr + (reg)) 89#define SIS_R32(reg) readl (ioaddr + (reg)) 90 91#define SIS_PCI_COMMIT() SIS_R32(IntrControl) 92 93enum sis190_registers { 94 TxControl = 0x00, 95 TxDescStartAddr = 0x04, 96 rsv0 = 0x08, // reserved 97 TxSts = 0x0c, // unused (Control/Status) 98 RxControl = 0x10, 99 RxDescStartAddr = 0x14, 100 rsv1 = 0x18, // reserved 101 RxSts = 0x1c, // unused 102 IntrStatus = 0x20, 103 IntrMask = 0x24, 104 IntrControl = 0x28, 105 IntrTimer = 0x2c, // unused (Interupt Timer) 106 PMControl = 0x30, // unused (Power Mgmt Control/Status) 107 rsv2 = 0x34, // reserved 108 ROMControl = 0x38, 109 ROMInterface = 0x3c, 110 StationControl = 0x40, 111 GMIIControl = 0x44, 112 GIoCR = 0x48, // unused (GMAC IO Compensation) 113 GIoCtrl = 0x4c, // unused (GMAC IO Control) 114 TxMacControl = 0x50, 115 TxLimit = 0x54, // unused (Tx MAC Timer/TryLimit) 116 RGDelay = 0x58, // unused (RGMII Tx Internal Delay) 117 rsv3 = 0x5c, // reserved 118 RxMacControl = 0x60, 119 RxMacAddr = 0x62, 120 RxHashTable = 0x68, 121 // Undocumented = 0x6c, 122 RxWolCtrl = 0x70, 123 RxWolData = 0x74, // unused (Rx WOL Data Access) 124 RxMPSControl = 0x78, // unused (Rx MPS Control) 125 rsv4 = 0x7c, // reserved 126}; 127 128enum sis190_register_content { 129 /* IntrStatus */ 130 SoftInt = 0x40000000, // unused 131 Timeup = 0x20000000, // unused 132 PauseFrame = 0x00080000, // unused 133 MagicPacket = 0x00040000, // unused 134 WakeupFrame = 0x00020000, // unused 135 LinkChange = 0x00010000, 136 RxQEmpty = 0x00000080, 137 RxQInt = 0x00000040, 138 TxQ1Empty = 0x00000020, // unused 139 TxQ1Int = 0x00000010, 140 TxQ0Empty = 0x00000008, // unused 141 TxQ0Int = 0x00000004, 142 RxHalt = 0x00000002, 143 TxHalt = 0x00000001, 144 145 /* {Rx/Tx}CmdBits */ 146 CmdReset = 0x10, 147 CmdRxEnb = 0x08, // unused 148 CmdTxEnb = 0x01, 149 RxBufEmpty = 0x01, // unused 150 151 /* Cfg9346Bits */ 152 Cfg9346_Lock = 0x00, // unused 153 Cfg9346_Unlock = 0xc0, // unused 154 155 /* RxMacControl */ 156 AcceptErr = 0x20, // unused 157 AcceptRunt = 0x10, // unused 158 AcceptBroadcast = 0x0800, 159 AcceptMulticast = 0x0400, 160 AcceptMyPhys = 0x0200, 161 AcceptAllPhys = 0x0100, 162 163 /* RxConfigBits */ 164 RxCfgFIFOShift = 13, 165 RxCfgDMAShift = 8, // 0x1a in RxControl ? 166 167 /* TxConfigBits */ 168 TxInterFrameGapShift = 24, 169 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */ 170 171 LinkStatus = 0x02, // unused 172 FullDup = 0x01, // unused 173 174 /* TBICSRBit */ 175 TBILinkOK = 0x02000000, // unused 176}; 177 178struct TxDesc { 179 __le32 PSize; 180 __le32 status; 181 __le32 addr; 182 __le32 size; 183}; 184 185struct RxDesc { 186 __le32 PSize; 187 __le32 status; 188 __le32 addr; 189 __le32 size; 190}; 191 192enum _DescStatusBit { 193 /* _Desc.status */ 194 OWNbit = 0x80000000, // RXOWN/TXOWN 195 INTbit = 0x40000000, // RXINT/TXINT 196 CRCbit = 0x00020000, // CRCOFF/CRCEN 197 PADbit = 0x00010000, // PREADD/PADEN 198 /* _Desc.size */ 199 RingEnd = 0x80000000, 200 /* TxDesc.status */ 201 LSEN = 0x08000000, // TSO ? -- FR 202 IPCS = 0x04000000, 203 TCPCS = 0x02000000, 204 UDPCS = 0x01000000, 205 BSTEN = 0x00800000, 206 EXTEN = 0x00400000, 207 DEFEN = 0x00200000, 208 BKFEN = 0x00100000, 209 CRSEN = 0x00080000, 210 COLEN = 0x00040000, 211 THOL3 = 0x30000000, 212 THOL2 = 0x20000000, 213 THOL1 = 0x10000000, 214 THOL0 = 0x00000000, 215 /* RxDesc.status */ 216 IPON = 0x20000000, 217 TCPON = 0x10000000, 218 UDPON = 0x08000000, 219 Wakup = 0x00400000, 220 Magic = 0x00200000, 221 Pause = 0x00100000, 222 DEFbit = 0x00200000, 223 BCAST = 0x000c0000, 224 MCAST = 0x00080000, 225 UCAST = 0x00040000, 226 /* RxDesc.PSize */ 227 TAGON = 0x80000000, 228 RxDescCountMask = 0x7f000000, // multi-desc pkt when > 1 ? -- FR 229 ABORT = 0x00800000, 230 SHORT = 0x00400000, 231 LIMIT = 0x00200000, 232 MIIER = 0x00100000, 233 OVRUN = 0x00080000, 234 NIBON = 0x00040000, 235 COLON = 0x00020000, 236 CRCOK = 0x00010000, 237 RxSizeMask = 0x0000ffff 238 /* 239 * The asic could apparently do vlan, TSO, jumbo (sis191 only) and 240 * provide two (unused with Linux) Tx queues. No publically 241 * available documentation alas. 242 */ 243}; 244 245enum sis190_eeprom_access_register_bits { 246 EECS = 0x00000001, // unused 247 EECLK = 0x00000002, // unused 248 EEDO = 0x00000008, // unused 249 EEDI = 0x00000004, // unused 250 EEREQ = 0x00000080, 251 EEROP = 0x00000200, 252 EEWOP = 0x00000100 // unused 253}; 254 255/* EEPROM Addresses */ 256enum sis190_eeprom_address { 257 EEPROMSignature = 0x00, 258 EEPROMCLK = 0x01, // unused 259 EEPROMInfo = 0x02, 260 EEPROMMACAddr = 0x03 261}; 262 263enum sis190_feature { 264 F_HAS_RGMII = 1, 265 F_PHY_88E1111 = 2, 266 F_PHY_BCM5461 = 4 267}; 268 269struct sis190_private { 270 void __iomem *mmio_addr; 271 struct pci_dev *pci_dev; 272 struct net_device *dev; 273 spinlock_t lock; 274 u32 rx_buf_sz; 275 u32 cur_rx; 276 u32 cur_tx; 277 u32 dirty_rx; 278 u32 dirty_tx; 279 dma_addr_t rx_dma; 280 dma_addr_t tx_dma; 281 struct RxDesc *RxDescRing; 282 struct TxDesc *TxDescRing; 283 struct sk_buff *Rx_skbuff[NUM_RX_DESC]; 284 struct sk_buff *Tx_skbuff[NUM_TX_DESC]; 285 struct work_struct phy_task; 286 struct timer_list timer; 287 u32 msg_enable; 288 struct mii_if_info mii_if; 289 struct list_head first_phy; 290 u32 features; 291}; 292 293struct sis190_phy { 294 struct list_head list; 295 int phy_id; 296 u16 id[2]; 297 u16 status; 298 u8 type; 299}; 300 301enum sis190_phy_type { 302 UNKNOWN = 0x00, 303 HOME = 0x01, 304 LAN = 0x02, 305 MIX = 0x03 306}; 307 308static struct mii_chip_info { 309 const char *name; 310 u16 id[2]; 311 unsigned int type; 312 u32 feature; 313} mii_chip_table[] = { 314 { "Broadcom PHY BCM5461", { 0x0020, 0x60c0 }, LAN, F_PHY_BCM5461 }, 315 { "Broadcom PHY AC131", { 0x0143, 0xbc70 }, LAN, 0 }, 316 { "Agere PHY ET1101B", { 0x0282, 0xf010 }, LAN, 0 }, 317 { "Marvell PHY 88E1111", { 0x0141, 0x0cc0 }, LAN, F_PHY_88E1111 }, 318 { "Realtek PHY RTL8201", { 0x0000, 0x8200 }, LAN, 0 }, 319 { NULL, } 320}; 321 322static const struct { 323 const char *name; 324} sis_chip_info[] = { 325 { "SiS 190 PCI Fast Ethernet adapter" }, 326 { "SiS 191 PCI Gigabit Ethernet adapter" }, 327}; 328 329static struct pci_device_id sis190_pci_tbl[] = { 330 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 }, 331 { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0191), 0, 0, 1 }, 332 { 0, }, 333}; 334 335MODULE_DEVICE_TABLE(pci, sis190_pci_tbl); 336 337static int rx_copybreak = 200; 338 339static struct { 340 u32 msg_enable; 341} debug = { -1 }; 342 343MODULE_DESCRIPTION("SiS sis190 Gigabit Ethernet driver"); 344module_param(rx_copybreak, int, 0); 345MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames"); 346module_param_named(debug, debug.msg_enable, int, 0); 347MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)"); 348MODULE_AUTHOR("K.M. Liu <kmliu@sis.com>, Ueimor <romieu@fr.zoreil.com>"); 349MODULE_VERSION(DRV_VERSION); 350MODULE_LICENSE("GPL"); 351 352static const u32 sis190_intr_mask = 353 RxQEmpty | RxQInt | TxQ1Int | TxQ0Int | RxHalt | TxHalt | LinkChange; 354 355/* 356 * Maximum number of multicast addresses to filter (vs. Rx-all-multicast). 357 * The chips use a 64 element hash table based on the Ethernet CRC. 358 */ 359static const int multicast_filter_limit = 32; 360 361static void __mdio_cmd(void __iomem *ioaddr, u32 ctl) 362{ 363 unsigned int i; 364 365 SIS_W32(GMIIControl, ctl); 366 367 msleep(1); 368 369 for (i = 0; i < 100; i++) { 370 if (!(SIS_R32(GMIIControl) & EhnMIInotDone)) 371 break; 372 msleep(1); 373 } 374 375 if (i > 99) 376 printk(KERN_ERR PFX "PHY command failed !\n"); 377} 378 379static void mdio_write(void __iomem *ioaddr, int phy_id, int reg, int val) 380{ 381 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIwrite | 382 (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift) | 383 (((u32) val) << EhnMIIdataShift)); 384} 385 386static int mdio_read(void __iomem *ioaddr, int phy_id, int reg) 387{ 388 __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIread | 389 (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift)); 390 391 return (u16) (SIS_R32(GMIIControl) >> EhnMIIdataShift); 392} 393 394static void __mdio_write(struct net_device *dev, int phy_id, int reg, int val) 395{ 396 struct sis190_private *tp = netdev_priv(dev); 397 398 mdio_write(tp->mmio_addr, phy_id, reg, val); 399} 400 401static int __mdio_read(struct net_device *dev, int phy_id, int reg) 402{ 403 struct sis190_private *tp = netdev_priv(dev); 404 405 return mdio_read(tp->mmio_addr, phy_id, reg); 406} 407 408static u16 mdio_read_latched(void __iomem *ioaddr, int phy_id, int reg) 409{ 410 mdio_read(ioaddr, phy_id, reg); 411 return mdio_read(ioaddr, phy_id, reg); 412} 413 414static u16 __devinit sis190_read_eeprom(void __iomem *ioaddr, u32 reg) 415{ 416 u16 data = 0xffff; 417 unsigned int i; 418 419 if (!(SIS_R32(ROMControl) & 0x0002)) 420 return 0; 421 422 SIS_W32(ROMInterface, EEREQ | EEROP | (reg << 10)); 423 424 for (i = 0; i < 200; i++) { 425 if (!(SIS_R32(ROMInterface) & EEREQ)) { 426 data = (SIS_R32(ROMInterface) & 0xffff0000) >> 16; 427 break; 428 } 429 msleep(1); 430 } 431 432 return data; 433} 434 435static void sis190_irq_mask_and_ack(void __iomem *ioaddr) 436{ 437 SIS_W32(IntrMask, 0x00); 438 SIS_W32(IntrStatus, 0xffffffff); 439 SIS_PCI_COMMIT(); 440} 441 442static void sis190_asic_down(void __iomem *ioaddr) 443{ 444 /* Stop the chip's Tx and Rx DMA processes. */ 445 446 SIS_W32(TxControl, 0x1a00); 447 SIS_W32(RxControl, 0x1a00); 448 449 sis190_irq_mask_and_ack(ioaddr); 450} 451 452static void sis190_mark_as_last_descriptor(struct RxDesc *desc) 453{ 454 desc->size |= cpu_to_le32(RingEnd); 455} 456 457static inline void sis190_give_to_asic(struct RxDesc *desc, u32 rx_buf_sz) 458{ 459 u32 eor = le32_to_cpu(desc->size) & RingEnd; 460 461 desc->PSize = 0x0; 462 desc->size = cpu_to_le32((rx_buf_sz & RX_BUF_MASK) | eor); 463 wmb(); 464 desc->status = cpu_to_le32(OWNbit | INTbit); 465} 466 467static inline void sis190_map_to_asic(struct RxDesc *desc, dma_addr_t mapping, 468 u32 rx_buf_sz) 469{ 470 desc->addr = cpu_to_le32(mapping); 471 sis190_give_to_asic(desc, rx_buf_sz); 472} 473 474static inline void sis190_make_unusable_by_asic(struct RxDesc *desc) 475{ 476 desc->PSize = 0x0; 477 desc->addr = cpu_to_le32(0xdeadbeef); 478 desc->size &= cpu_to_le32(RingEnd); 479 wmb(); 480 desc->status = 0x0; 481} 482 483static int sis190_alloc_rx_skb(struct pci_dev *pdev, struct sk_buff **sk_buff, 484 struct RxDesc *desc, u32 rx_buf_sz) 485{ 486 struct sk_buff *skb; 487 dma_addr_t mapping; 488 int ret = 0; 489 490 skb = dev_alloc_skb(rx_buf_sz); 491 if (!skb) 492 goto err_out; 493 494 *sk_buff = skb; 495 496 mapping = pci_map_single(pdev, skb->data, rx_buf_sz, 497 PCI_DMA_FROMDEVICE); 498 499 sis190_map_to_asic(desc, mapping, rx_buf_sz); 500out: 501 return ret; 502 503err_out: 504 ret = -ENOMEM; 505 sis190_make_unusable_by_asic(desc); 506 goto out; 507} 508 509static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev, 510 u32 start, u32 end) 511{ 512 u32 cur; 513 514 for (cur = start; cur < end; cur++) { 515 int ret, i = cur % NUM_RX_DESC; 516 517 if (tp->Rx_skbuff[i]) 518 continue; 519 520 ret = sis190_alloc_rx_skb(tp->pci_dev, tp->Rx_skbuff + i, 521 tp->RxDescRing + i, tp->rx_buf_sz); 522 if (ret < 0) 523 break; 524 } 525 return cur - start; 526} 527 528static inline int sis190_try_rx_copy(struct sk_buff **sk_buff, int pkt_size, 529 struct RxDesc *desc, int rx_buf_sz) 530{ 531 int ret = -1; 532 533 if (pkt_size < rx_copybreak) { 534 struct sk_buff *skb; 535 536 skb = dev_alloc_skb(pkt_size + NET_IP_ALIGN); 537 if (skb) { 538 skb_reserve(skb, NET_IP_ALIGN); 539 skb_copy_to_linear_data(skb, sk_buff[0]->data, pkt_size); 540 *sk_buff = skb; 541 sis190_give_to_asic(desc, rx_buf_sz); 542 ret = 0; 543 } 544 } 545 return ret; 546} 547 548static inline int sis190_rx_pkt_err(u32 status, struct net_device_stats *stats) 549{ 550#define ErrMask (OVRUN | SHORT | LIMIT | MIIER | NIBON | COLON | ABORT) 551 552 if ((status & CRCOK) && !(status & ErrMask)) 553 return 0; 554 555 if (!(status & CRCOK)) 556 stats->rx_crc_errors++; 557 else if (status & OVRUN) 558 stats->rx_over_errors++; 559 else if (status & (SHORT | LIMIT)) 560 stats->rx_length_errors++; 561 else if (status & (MIIER | NIBON | COLON)) 562 stats->rx_frame_errors++; 563 564 stats->rx_errors++; 565 return -1; 566} 567 568static int sis190_rx_interrupt(struct net_device *dev, 569 struct sis190_private *tp, void __iomem *ioaddr) 570{ 571 struct net_device_stats *stats = &dev->stats; 572 u32 rx_left, cur_rx = tp->cur_rx; 573 u32 delta, count; 574 575 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx; 576 rx_left = sis190_rx_quota(rx_left, (u32) dev->quota); 577 578 for (; rx_left > 0; rx_left--, cur_rx++) { 579 unsigned int entry = cur_rx % NUM_RX_DESC; 580 struct RxDesc *desc = tp->RxDescRing + entry; 581 u32 status; 582 583 if (le32_to_cpu(desc->status) & OWNbit) 584 break; 585 586 status = le32_to_cpu(desc->PSize); 587 588 // net_intr(tp, KERN_INFO "%s: Rx PSize = %08x.\n", dev->name, 589 // status); 590 591 if (sis190_rx_pkt_err(status, stats) < 0) 592 sis190_give_to_asic(desc, tp->rx_buf_sz); 593 else { 594 struct sk_buff *skb = tp->Rx_skbuff[entry]; 595 int pkt_size = (status & RxSizeMask) - 4; 596 void (*pci_action)(struct pci_dev *, dma_addr_t, 597 size_t, int) = pci_dma_sync_single_for_device; 598 599 if (unlikely(pkt_size > tp->rx_buf_sz)) { 600 net_intr(tp, KERN_INFO 601 "%s: (frag) status = %08x.\n", 602 dev->name, status); 603 stats->rx_dropped++; 604 stats->rx_length_errors++; 605 sis190_give_to_asic(desc, tp->rx_buf_sz); 606 continue; 607 } 608 609 pci_dma_sync_single_for_cpu(tp->pci_dev, 610 le32_to_cpu(desc->addr), tp->rx_buf_sz, 611 PCI_DMA_FROMDEVICE); 612 613 if (sis190_try_rx_copy(&skb, pkt_size, desc, 614 tp->rx_buf_sz)) { 615 pci_action = pci_unmap_single; 616 tp->Rx_skbuff[entry] = NULL; 617 sis190_make_unusable_by_asic(desc); 618 } 619 620 pci_action(tp->pci_dev, le32_to_cpu(desc->addr), 621 tp->rx_buf_sz, PCI_DMA_FROMDEVICE); 622 623 skb_put(skb, pkt_size); 624 skb->protocol = eth_type_trans(skb, dev); 625 626 sis190_rx_skb(skb); 627 628 dev->last_rx = jiffies; 629 stats->rx_packets++; 630 stats->rx_bytes += pkt_size; 631 if ((status & BCAST) == MCAST) 632 stats->multicast++; 633 } 634 } 635 count = cur_rx - tp->cur_rx; 636 tp->cur_rx = cur_rx; 637 638 delta = sis190_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx); 639 if (!delta && count && netif_msg_intr(tp)) 640 printk(KERN_INFO "%s: no Rx buffer allocated.\n", dev->name); 641 tp->dirty_rx += delta; 642 643 if (((tp->dirty_rx + NUM_RX_DESC) == tp->cur_rx) && netif_msg_intr(tp)) 644 printk(KERN_EMERG "%s: Rx buffers exhausted.\n", dev->name); 645 646 return count; 647} 648 649static void sis190_unmap_tx_skb(struct pci_dev *pdev, struct sk_buff *skb, 650 struct TxDesc *desc) 651{ 652 unsigned int len; 653 654 len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len; 655 656 pci_unmap_single(pdev, le32_to_cpu(desc->addr), len, PCI_DMA_TODEVICE); 657 658 memset(desc, 0x00, sizeof(*desc)); 659} 660 661static void sis190_tx_interrupt(struct net_device *dev, 662 struct sis190_private *tp, void __iomem *ioaddr) 663{ 664 u32 pending, dirty_tx = tp->dirty_tx; 665 /* 666 * It would not be needed if queueing was allowed to be enabled 667 * again too early (hint: think preempt and unclocked smp systems). 668 */ 669 unsigned int queue_stopped; 670 671 smp_rmb(); 672 pending = tp->cur_tx - dirty_tx; 673 queue_stopped = (pending == NUM_TX_DESC); 674 675 for (; pending; pending--, dirty_tx++) { 676 unsigned int entry = dirty_tx % NUM_TX_DESC; 677 struct TxDesc *txd = tp->TxDescRing + entry; 678 struct sk_buff *skb; 679 680 if (le32_to_cpu(txd->status) & OWNbit) 681 break; 682 683 skb = tp->Tx_skbuff[entry]; 684 685 dev->stats.tx_packets++; 686 dev->stats.tx_bytes += skb->len; 687 688 sis190_unmap_tx_skb(tp->pci_dev, skb, txd); 689 tp->Tx_skbuff[entry] = NULL; 690 dev_kfree_skb_irq(skb); 691 } 692 693 if (tp->dirty_tx != dirty_tx) { 694 tp->dirty_tx = dirty_tx; 695 smp_wmb(); 696 if (queue_stopped) 697 netif_wake_queue(dev); 698 } 699} 700 701/* 702 * The interrupt handler does all of the Rx thread work and cleans up after 703 * the Tx thread. 704 */ 705static irqreturn_t sis190_interrupt(int irq, void *__dev) 706{ 707 struct net_device *dev = __dev; 708 struct sis190_private *tp = netdev_priv(dev); 709 void __iomem *ioaddr = tp->mmio_addr; 710 unsigned int handled = 0; 711 u32 status; 712 713 status = SIS_R32(IntrStatus); 714 715 if ((status == 0xffffffff) || !status) 716 goto out; 717 718 handled = 1; 719 720 if (unlikely(!netif_running(dev))) { 721 sis190_asic_down(ioaddr); 722 goto out; 723 } 724 725 SIS_W32(IntrStatus, status); 726 727 // net_intr(tp, KERN_INFO "%s: status = %08x.\n", dev->name, status); 728 729 if (status & LinkChange) { 730 net_intr(tp, KERN_INFO "%s: link change.\n", dev->name); 731 schedule_work(&tp->phy_task); 732 } 733 734 if (status & RxQInt) 735 sis190_rx_interrupt(dev, tp, ioaddr); 736 737 if (status & TxQ0Int) 738 sis190_tx_interrupt(dev, tp, ioaddr); 739out: 740 return IRQ_RETVAL(handled); 741} 742 743#ifdef CONFIG_NET_POLL_CONTROLLER 744static void sis190_netpoll(struct net_device *dev) 745{ 746 struct sis190_private *tp = netdev_priv(dev); 747 struct pci_dev *pdev = tp->pci_dev; 748 749 disable_irq(pdev->irq); 750 sis190_interrupt(pdev->irq, dev); 751 enable_irq(pdev->irq); 752} 753#endif 754 755static void sis190_free_rx_skb(struct sis190_private *tp, 756 struct sk_buff **sk_buff, struct RxDesc *desc) 757{ 758 struct pci_dev *pdev = tp->pci_dev; 759 760 pci_unmap_single(pdev, le32_to_cpu(desc->addr), tp->rx_buf_sz, 761 PCI_DMA_FROMDEVICE); 762 dev_kfree_skb(*sk_buff); 763 *sk_buff = NULL; 764 sis190_make_unusable_by_asic(desc); 765} 766 767static void sis190_rx_clear(struct sis190_private *tp) 768{ 769 unsigned int i; 770 771 for (i = 0; i < NUM_RX_DESC; i++) { 772 if (!tp->Rx_skbuff[i]) 773 continue; 774 sis190_free_rx_skb(tp, tp->Rx_skbuff + i, tp->RxDescRing + i); 775 } 776} 777 778static void sis190_init_ring_indexes(struct sis190_private *tp) 779{ 780 tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0; 781} 782 783static int sis190_init_ring(struct net_device *dev) 784{ 785 struct sis190_private *tp = netdev_priv(dev); 786 787 sis190_init_ring_indexes(tp); 788 789 memset(tp->Tx_skbuff, 0x0, NUM_TX_DESC * sizeof(struct sk_buff *)); 790 memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *)); 791 792 if (sis190_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC) 793 goto err_rx_clear; 794 795 sis190_mark_as_last_descriptor(tp->RxDescRing + NUM_RX_DESC - 1); 796 797 return 0; 798 799err_rx_clear: 800 sis190_rx_clear(tp); 801 return -ENOMEM; 802} 803 804static void sis190_set_rx_mode(struct net_device *dev) 805{ 806 struct sis190_private *tp = netdev_priv(dev); 807 void __iomem *ioaddr = tp->mmio_addr; 808 unsigned long flags; 809 u32 mc_filter[2]; /* Multicast hash filter */ 810 u16 rx_mode; 811 812 if (dev->flags & IFF_PROMISC) { 813 rx_mode = 814 AcceptBroadcast | AcceptMulticast | AcceptMyPhys | 815 AcceptAllPhys; 816 mc_filter[1] = mc_filter[0] = 0xffffffff; 817 } else if ((dev->mc_count > multicast_filter_limit) || 818 (dev->flags & IFF_ALLMULTI)) { 819 /* Too many to filter perfectly -- accept all multicasts. */ 820 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; 821 mc_filter[1] = mc_filter[0] = 0xffffffff; 822 } else { 823 struct dev_mc_list *mclist; 824 unsigned int i; 825 826 rx_mode = AcceptBroadcast | AcceptMyPhys; 827 mc_filter[1] = mc_filter[0] = 0; 828 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; 829 i++, mclist = mclist->next) { 830 int bit_nr = 831 ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f; 832 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); 833 rx_mode |= AcceptMulticast; 834 } 835 } 836 837 spin_lock_irqsave(&tp->lock, flags); 838 839 SIS_W16(RxMacControl, rx_mode | 0x2); 840 SIS_W32(RxHashTable, mc_filter[0]); 841 SIS_W32(RxHashTable + 4, mc_filter[1]); 842 843 spin_unlock_irqrestore(&tp->lock, flags); 844} 845 846static void sis190_soft_reset(void __iomem *ioaddr) 847{ 848 SIS_W32(IntrControl, 0x8000); 849 SIS_PCI_COMMIT(); 850 SIS_W32(IntrControl, 0x0); 851 sis190_asic_down(ioaddr); 852} 853 854static void sis190_hw_start(struct net_device *dev) 855{ 856 struct sis190_private *tp = netdev_priv(dev); 857 void __iomem *ioaddr = tp->mmio_addr; 858 859 sis190_soft_reset(ioaddr); 860 861 SIS_W32(TxDescStartAddr, tp->tx_dma); 862 SIS_W32(RxDescStartAddr, tp->rx_dma); 863 864 SIS_W32(IntrStatus, 0xffffffff); 865 SIS_W32(IntrMask, 0x0); 866 SIS_W32(GMIIControl, 0x0); 867 SIS_W32(TxMacControl, 0x60); 868 SIS_W16(RxMacControl, 0x02); 869 SIS_W32(RxHashTable, 0x0); 870 SIS_W32(0x6c, 0x0); 871 SIS_W32(RxWolCtrl, 0x0); 872 SIS_W32(RxWolData, 0x0); 873 874 SIS_PCI_COMMIT(); 875 876 sis190_set_rx_mode(dev); 877 878 /* Enable all known interrupts by setting the interrupt mask. */ 879 SIS_W32(IntrMask, sis190_intr_mask); 880 881 SIS_W32(TxControl, 0x1a00 | CmdTxEnb); 882 SIS_W32(RxControl, 0x1a1d); 883 884 netif_start_queue(dev); 885} 886 887static void sis190_phy_task(struct work_struct *work) 888{ 889 struct sis190_private *tp = 890 container_of(work, struct sis190_private, phy_task); 891 struct net_device *dev = tp->dev; 892 void __iomem *ioaddr = tp->mmio_addr; 893 int phy_id = tp->mii_if.phy_id; 894 u16 val; 895 896 rtnl_lock(); 897 898 if (!netif_running(dev)) 899 goto out_unlock; 900 901 val = mdio_read(ioaddr, phy_id, MII_BMCR); 902 if (val & BMCR_RESET) { 903 // FIXME: needlessly high ? -- FR 02/07/2005 904 mod_timer(&tp->timer, jiffies + HZ/10); 905 } else if (!(mdio_read_latched(ioaddr, phy_id, MII_BMSR) & 906 BMSR_ANEGCOMPLETE)) { 907 net_link(tp, KERN_WARNING "%s: PHY reset until link up.\n", 908 dev->name); 909 netif_carrier_off(dev); 910 mdio_write(ioaddr, phy_id, MII_BMCR, val | BMCR_RESET); 911 mod_timer(&tp->timer, jiffies + SIS190_PHY_TIMEOUT); 912 } else { 913 /* Rejoice ! */ 914 struct { 915 int val; 916 u32 ctl; 917 const char *msg; 918 } reg31[] = { 919 { LPA_1000XFULL | LPA_SLCT, 0x07000c00 | 0x00001000, 920 "1000 Mbps Full Duplex" }, 921 { LPA_1000XHALF | LPA_SLCT, 0x07000c00, 922 "1000 Mbps Half Duplex" }, 923 { LPA_100FULL, 0x04000800 | 0x00001000, 924 "100 Mbps Full Duplex" }, 925 { LPA_100HALF, 0x04000800, 926 "100 Mbps Half Duplex" }, 927 { LPA_10FULL, 0x04000400 | 0x00001000, 928 "10 Mbps Full Duplex" }, 929 { LPA_10HALF, 0x04000400, 930 "10 Mbps Half Duplex" }, 931 { 0, 0x04000400, "unknown" } 932 }, *p; 933 u16 adv; 934 935 val = mdio_read(ioaddr, phy_id, 0x1f); 936 net_link(tp, KERN_INFO "%s: mii ext = %04x.\n", dev->name, val); 937 938 val = mdio_read(ioaddr, phy_id, MII_LPA); 939 adv = mdio_read(ioaddr, phy_id, MII_ADVERTISE); 940 net_link(tp, KERN_INFO "%s: mii lpa = %04x adv = %04x.\n", 941 dev->name, val, adv); 942 943 val &= adv; 944 945 for (p = reg31; p->val; p++) { 946 if ((val & p->val) == p->val) 947 break; 948 } 949 950 p->ctl |= SIS_R32(StationControl) & ~0x0f001c00; 951 952 if ((tp->features & F_HAS_RGMII) && 953 (tp->features & F_PHY_BCM5461)) { 954 // Set Tx Delay in RGMII mode. 955 mdio_write(ioaddr, phy_id, 0x18, 0xf1c7); 956 udelay(200); 957 mdio_write(ioaddr, phy_id, 0x1c, 0x8c00); 958 p->ctl |= 0x03000000; 959 } 960 961 SIS_W32(StationControl, p->ctl); 962 963 if (tp->features & F_HAS_RGMII) { 964 SIS_W32(RGDelay, 0x0441); 965 SIS_W32(RGDelay, 0x0440); 966 } 967 968 net_link(tp, KERN_INFO "%s: link on %s mode.\n", dev->name, 969 p->msg); 970 netif_carrier_on(dev); 971 } 972 973out_unlock: 974 rtnl_unlock(); 975} 976 977static void sis190_phy_timer(unsigned long __opaque) 978{ 979 struct net_device *dev = (struct net_device *)__opaque; 980 struct sis190_private *tp = netdev_priv(dev); 981 982 if (likely(netif_running(dev))) 983 schedule_work(&tp->phy_task); 984} 985 986static inline void sis190_delete_timer(struct net_device *dev) 987{ 988 struct sis190_private *tp = netdev_priv(dev); 989 990 del_timer_sync(&tp->timer); 991} 992 993static inline void sis190_request_timer(struct net_device *dev) 994{ 995 struct sis190_private *tp = netdev_priv(dev); 996 struct timer_list *timer = &tp->timer; 997 998 init_timer(timer); 999 timer->expires = jiffies + SIS190_PHY_TIMEOUT; 1000 timer->data = (unsigned long)dev; 1001 timer->function = sis190_phy_timer; 1002 add_timer(timer); 1003} 1004 1005static void sis190_set_rxbufsize(struct sis190_private *tp, 1006 struct net_device *dev) 1007{ 1008 unsigned int mtu = dev->mtu; 1009 1010 tp->rx_buf_sz = (mtu > RX_BUF_SIZE) ? mtu + ETH_HLEN + 8 : RX_BUF_SIZE; 1011 /* RxDesc->size has a licence to kill the lower bits */ 1012 if (tp->rx_buf_sz & 0x07) { 1013 tp->rx_buf_sz += 8; 1014 tp->rx_buf_sz &= RX_BUF_MASK; 1015 } 1016} 1017 1018static int sis190_open(struct net_device *dev) 1019{ 1020 struct sis190_private *tp = netdev_priv(dev); 1021 struct pci_dev *pdev = tp->pci_dev; 1022 int rc = -ENOMEM; 1023 1024 sis190_set_rxbufsize(tp, dev); 1025 1026 /* 1027 * Rx and Tx descriptors need 256 bytes alignment. 1028 * pci_alloc_consistent() guarantees a stronger alignment. 1029 */ 1030 tp->TxDescRing = pci_alloc_consistent(pdev, TX_RING_BYTES, &tp->tx_dma); 1031 if (!tp->TxDescRing) 1032 goto out; 1033 1034 tp->RxDescRing = pci_alloc_consistent(pdev, RX_RING_BYTES, &tp->rx_dma); 1035 if (!tp->RxDescRing) 1036 goto err_free_tx_0; 1037 1038 rc = sis190_init_ring(dev); 1039 if (rc < 0) 1040 goto err_free_rx_1; 1041 1042 sis190_request_timer(dev); 1043 1044 rc = request_irq(dev->irq, sis190_interrupt, IRQF_SHARED, dev->name, dev); 1045 if (rc < 0) 1046 goto err_release_timer_2; 1047 1048 sis190_hw_start(dev); 1049out: 1050 return rc; 1051 1052err_release_timer_2: 1053 sis190_delete_timer(dev); 1054 sis190_rx_clear(tp); 1055err_free_rx_1: 1056 pci_free_consistent(tp->pci_dev, RX_RING_BYTES, tp->RxDescRing, 1057 tp->rx_dma); 1058err_free_tx_0: 1059 pci_free_consistent(tp->pci_dev, TX_RING_BYTES, tp->TxDescRing, 1060 tp->tx_dma); 1061 goto out; 1062} 1063 1064static void sis190_tx_clear(struct sis190_private *tp) 1065{ 1066 unsigned int i; 1067 1068 for (i = 0; i < NUM_TX_DESC; i++) { 1069 struct sk_buff *skb = tp->Tx_skbuff[i]; 1070 1071 if (!skb) 1072 continue; 1073 1074 sis190_unmap_tx_skb(tp->pci_dev, skb, tp->TxDescRing + i); 1075 tp->Tx_skbuff[i] = NULL; 1076 dev_kfree_skb(skb); 1077 1078 tp->dev->stats.tx_dropped++; 1079 } 1080 tp->cur_tx = tp->dirty_tx = 0; 1081} 1082 1083static void sis190_down(struct net_device *dev) 1084{ 1085 struct sis190_private *tp = netdev_priv(dev); 1086 void __iomem *ioaddr = tp->mmio_addr; 1087 unsigned int poll_locked = 0; 1088 1089 sis190_delete_timer(dev); 1090 1091 netif_stop_queue(dev); 1092 1093 do { 1094 spin_lock_irq(&tp->lock); 1095 1096 sis190_asic_down(ioaddr); 1097 1098 spin_unlock_irq(&tp->lock); 1099 1100 synchronize_irq(dev->irq); 1101 1102 if (!poll_locked) 1103 poll_locked++; 1104 1105 synchronize_sched(); 1106 1107 } while (SIS_R32(IntrMask)); 1108 1109 sis190_tx_clear(tp); 1110 sis190_rx_clear(tp); 1111} 1112 1113static int sis190_close(struct net_device *dev) 1114{ 1115 struct sis190_private *tp = netdev_priv(dev); 1116 struct pci_dev *pdev = tp->pci_dev; 1117 1118 sis190_down(dev); 1119 1120 free_irq(dev->irq, dev); 1121 1122 pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma); 1123 pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma); 1124 1125 tp->TxDescRing = NULL; 1126 tp->RxDescRing = NULL; 1127 1128 return 0; 1129} 1130 1131static int sis190_start_xmit(struct sk_buff *skb, struct net_device *dev) 1132{ 1133 struct sis190_private *tp = netdev_priv(dev); 1134 void __iomem *ioaddr = tp->mmio_addr; 1135 u32 len, entry, dirty_tx; 1136 struct TxDesc *desc; 1137 dma_addr_t mapping; 1138 1139 if (unlikely(skb->len < ETH_ZLEN)) { 1140 if (skb_padto(skb, ETH_ZLEN)) { 1141 dev->stats.tx_dropped++; 1142 goto out; 1143 } 1144 len = ETH_ZLEN; 1145 } else { 1146 len = skb->len; 1147 } 1148 1149 entry = tp->cur_tx % NUM_TX_DESC; 1150 desc = tp->TxDescRing + entry; 1151 1152 if (unlikely(le32_to_cpu(desc->status) & OWNbit)) { 1153 netif_stop_queue(dev); 1154 net_tx_err(tp, KERN_ERR PFX 1155 "%s: BUG! Tx Ring full when queue awake!\n", 1156 dev->name); 1157 return NETDEV_TX_BUSY; 1158 } 1159 1160 mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE); 1161 1162 tp->Tx_skbuff[entry] = skb; 1163 1164 desc->PSize = cpu_to_le32(len); 1165 desc->addr = cpu_to_le32(mapping); 1166 1167 desc->size = cpu_to_le32(len); 1168 if (entry == (NUM_TX_DESC - 1)) 1169 desc->size |= cpu_to_le32(RingEnd); 1170 1171 wmb(); 1172 1173 desc->status = cpu_to_le32(OWNbit | INTbit | DEFbit | CRCbit | PADbit); 1174 1175 tp->cur_tx++; 1176 1177 smp_wmb(); 1178 1179 SIS_W32(TxControl, 0x1a00 | CmdReset | CmdTxEnb); 1180 1181 dev->trans_start = jiffies; 1182 1183 dirty_tx = tp->dirty_tx; 1184 if ((tp->cur_tx - NUM_TX_DESC) == dirty_tx) { 1185 netif_stop_queue(dev); 1186 smp_rmb(); 1187 if (dirty_tx != tp->dirty_tx) 1188 netif_wake_queue(dev); 1189 } 1190out: 1191 return NETDEV_TX_OK; 1192} 1193 1194static void sis190_free_phy(struct list_head *first_phy) 1195{ 1196 struct sis190_phy *cur, *next; 1197 1198 list_for_each_entry_safe(cur, next, first_phy, list) { 1199 kfree(cur); 1200 } 1201} 1202 1203/** 1204 * sis190_default_phy - Select default PHY for sis190 mac. 1205 * @dev: the net device to probe for 1206 * 1207 * Select first detected PHY with link as default. 1208 * If no one is link on, select PHY whose types is HOME as default. 1209 * If HOME doesn't exist, select LAN. 1210 */ 1211static u16 sis190_default_phy(struct net_device *dev) 1212{ 1213 struct sis190_phy *phy, *phy_home, *phy_default, *phy_lan; 1214 struct sis190_private *tp = netdev_priv(dev); 1215 struct mii_if_info *mii_if = &tp->mii_if; 1216 void __iomem *ioaddr = tp->mmio_addr; 1217 u16 status; 1218 1219 phy_home = phy_default = phy_lan = NULL; 1220 1221 list_for_each_entry(phy, &tp->first_phy, list) { 1222 status = mdio_read_latched(ioaddr, phy->phy_id, MII_BMSR); 1223 1224 // Link ON & Not select default PHY & not ghost PHY. 1225 if ((status & BMSR_LSTATUS) && 1226 !phy_default && 1227 (phy->type != UNKNOWN)) { 1228 phy_default = phy; 1229 } else { 1230 status = mdio_read(ioaddr, phy->phy_id, MII_BMCR); 1231 mdio_write(ioaddr, phy->phy_id, MII_BMCR, 1232 status | BMCR_ANENABLE | BMCR_ISOLATE); 1233 if (phy->type == HOME) 1234 phy_home = phy; 1235 else if (phy->type == LAN) 1236 phy_lan = phy; 1237 } 1238 } 1239 1240 if (!phy_default) { 1241 if (phy_home) 1242 phy_default = phy_home; 1243 else if (phy_lan) 1244 phy_default = phy_lan; 1245 else 1246 phy_default = list_entry(&tp->first_phy, 1247 struct sis190_phy, list); 1248 } 1249 1250 if (mii_if->phy_id != phy_default->phy_id) { 1251 mii_if->phy_id = phy_default->phy_id; 1252 net_probe(tp, KERN_INFO 1253 "%s: Using transceiver at address %d as default.\n", 1254 pci_name(tp->pci_dev), mii_if->phy_id); 1255 } 1256 1257 status = mdio_read(ioaddr, mii_if->phy_id, MII_BMCR); 1258 status &= (~BMCR_ISOLATE); 1259 1260 mdio_write(ioaddr, mii_if->phy_id, MII_BMCR, status); 1261 status = mdio_read_latched(ioaddr, mii_if->phy_id, MII_BMSR); 1262 1263 return status; 1264} 1265 1266static void sis190_init_phy(struct net_device *dev, struct sis190_private *tp, 1267 struct sis190_phy *phy, unsigned int phy_id, 1268 u16 mii_status) 1269{ 1270 void __iomem *ioaddr = tp->mmio_addr; 1271 struct mii_chip_info *p; 1272 1273 INIT_LIST_HEAD(&phy->list); 1274 phy->status = mii_status; 1275 phy->phy_id = phy_id; 1276 1277 phy->id[0] = mdio_read(ioaddr, phy_id, MII_PHYSID1); 1278 phy->id[1] = mdio_read(ioaddr, phy_id, MII_PHYSID2); 1279 1280 for (p = mii_chip_table; p->type; p++) { 1281 if ((p->id[0] == phy->id[0]) && 1282 (p->id[1] == (phy->id[1] & 0xfff0))) { 1283 break; 1284 } 1285 } 1286 1287 if (p->id[1]) { 1288 phy->type = (p->type == MIX) ? 1289 ((mii_status & (BMSR_100FULL | BMSR_100HALF)) ? 1290 LAN : HOME) : p->type; 1291 tp->features |= p->feature; 1292 } else 1293 phy->type = UNKNOWN; 1294 1295 net_probe(tp, KERN_INFO "%s: %s transceiver at address %d.\n", 1296 pci_name(tp->pci_dev), 1297 (phy->type == UNKNOWN) ? "Unknown PHY" : p->name, phy_id); 1298} 1299 1300static void sis190_mii_probe_88e1111_fixup(struct sis190_private *tp) 1301{ 1302 if (tp->features & F_PHY_88E1111) { 1303 void __iomem *ioaddr = tp->mmio_addr; 1304 int phy_id = tp->mii_if.phy_id; 1305 u16 reg[2][2] = { 1306 { 0x808b, 0x0ce1 }, 1307 { 0x808f, 0x0c60 } 1308 }, *p; 1309 1310 p = (tp->features & F_HAS_RGMII) ? reg[0] : reg[1]; 1311 1312 mdio_write(ioaddr, phy_id, 0x1b, p[0]); 1313 udelay(200); 1314 mdio_write(ioaddr, phy_id, 0x14, p[1]); 1315 udelay(200); 1316 } 1317} 1318 1319/** 1320 * sis190_mii_probe - Probe MII PHY for sis190 1321 * @dev: the net device to probe for 1322 * 1323 * Search for total of 32 possible mii phy addresses. 1324 * Identify and set current phy if found one, 1325 * return error if it failed to found. 1326 */ 1327static int __devinit sis190_mii_probe(struct net_device *dev) 1328{ 1329 struct sis190_private *tp = netdev_priv(dev); 1330 struct mii_if_info *mii_if = &tp->mii_if; 1331 void __iomem *ioaddr = tp->mmio_addr; 1332 int phy_id; 1333 int rc = 0; 1334 1335 INIT_LIST_HEAD(&tp->first_phy); 1336 1337 for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) { 1338 struct sis190_phy *phy; 1339 u16 status; 1340 1341 status = mdio_read_latched(ioaddr, phy_id, MII_BMSR); 1342 1343 // Try next mii if the current one is not accessible. 1344 if (status == 0xffff || status == 0x0000) 1345 continue; 1346 1347 phy = kmalloc(sizeof(*phy), GFP_KERNEL); 1348 if (!phy) { 1349 sis190_free_phy(&tp->first_phy); 1350 rc = -ENOMEM; 1351 goto out; 1352 } 1353 1354 sis190_init_phy(dev, tp, phy, phy_id, status); 1355 1356 list_add(&tp->first_phy, &phy->list); 1357 } 1358 1359 if (list_empty(&tp->first_phy)) { 1360 net_probe(tp, KERN_INFO "%s: No MII transceivers found!\n", 1361 pci_name(tp->pci_dev)); 1362 rc = -EIO; 1363 goto out; 1364 } 1365 1366 /* Select default PHY for mac */ 1367 sis190_default_phy(dev); 1368 1369 sis190_mii_probe_88e1111_fixup(tp); 1370 1371 mii_if->dev = dev; 1372 mii_if->mdio_read = __mdio_read; 1373 mii_if->mdio_write = __mdio_write; 1374 mii_if->phy_id_mask = PHY_ID_ANY; 1375 mii_if->reg_num_mask = MII_REG_ANY; 1376out: 1377 return rc; 1378} 1379 1380static void sis190_mii_remove(struct net_device *dev) 1381{ 1382 struct sis190_private *tp = netdev_priv(dev); 1383 1384 sis190_free_phy(&tp->first_phy); 1385} 1386 1387static void sis190_release_board(struct pci_dev *pdev) 1388{ 1389 struct net_device *dev = pci_get_drvdata(pdev); 1390 struct sis190_private *tp = netdev_priv(dev); 1391 1392 iounmap(tp->mmio_addr); 1393 pci_release_regions(pdev); 1394 pci_disable_device(pdev); 1395 free_netdev(dev); 1396} 1397 1398static struct net_device * __devinit sis190_init_board(struct pci_dev *pdev) 1399{ 1400 struct sis190_private *tp; 1401 struct net_device *dev; 1402 void __iomem *ioaddr; 1403 int rc; 1404 1405 dev = alloc_etherdev(sizeof(*tp)); 1406 if (!dev) { 1407 net_drv(&debug, KERN_ERR PFX "unable to alloc new ethernet\n"); 1408 rc = -ENOMEM; 1409 goto err_out_0; 1410 } 1411 1412 SET_NETDEV_DEV(dev, &pdev->dev); 1413 1414 tp = netdev_priv(dev); 1415 tp->dev = dev; 1416 tp->msg_enable = netif_msg_init(debug.msg_enable, SIS190_MSG_DEFAULT); 1417 1418 rc = pci_enable_device(pdev); 1419 if (rc < 0) { 1420 net_probe(tp, KERN_ERR "%s: enable failure\n", pci_name(pdev)); 1421 goto err_free_dev_1; 1422 } 1423 1424 rc = -ENODEV; 1425 1426 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 1427 net_probe(tp, KERN_ERR "%s: region #0 is no MMIO resource.\n", 1428 pci_name(pdev)); 1429 goto err_pci_disable_2; 1430 } 1431 if (pci_resource_len(pdev, 0) < SIS190_REGS_SIZE) { 1432 net_probe(tp, KERN_ERR "%s: invalid PCI region size(s).\n", 1433 pci_name(pdev)); 1434 goto err_pci_disable_2; 1435 } 1436 1437 rc = pci_request_regions(pdev, DRV_NAME); 1438 if (rc < 0) { 1439 net_probe(tp, KERN_ERR PFX "%s: could not request regions.\n", 1440 pci_name(pdev)); 1441 goto err_pci_disable_2; 1442 } 1443 1444 rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK); 1445 if (rc < 0) { 1446 net_probe(tp, KERN_ERR "%s: DMA configuration failed.\n", 1447 pci_name(pdev)); 1448 goto err_free_res_3; 1449 } 1450 1451 pci_set_master(pdev); 1452 1453 ioaddr = ioremap(pci_resource_start(pdev, 0), SIS190_REGS_SIZE); 1454 if (!ioaddr) { 1455 net_probe(tp, KERN_ERR "%s: cannot remap MMIO, aborting\n", 1456 pci_name(pdev)); 1457 rc = -EIO; 1458 goto err_free_res_3; 1459 } 1460 1461 tp->pci_dev = pdev; 1462 tp->mmio_addr = ioaddr; 1463 1464 sis190_irq_mask_and_ack(ioaddr); 1465 1466 sis190_soft_reset(ioaddr); 1467out: 1468 return dev; 1469 1470err_free_res_3: 1471 pci_release_regions(pdev); 1472err_pci_disable_2: 1473 pci_disable_device(pdev); 1474err_free_dev_1: 1475 free_netdev(dev); 1476err_out_0: 1477 dev = ERR_PTR(rc); 1478 goto out; 1479} 1480 1481static void sis190_tx_timeout(struct net_device *dev) 1482{ 1483 struct sis190_private *tp = netdev_priv(dev); 1484 void __iomem *ioaddr = tp->mmio_addr; 1485 u8 tmp8; 1486 1487 /* Disable Tx, if not already */ 1488 tmp8 = SIS_R8(TxControl); 1489 if (tmp8 & CmdTxEnb) 1490 SIS_W8(TxControl, tmp8 & ~CmdTxEnb); 1491 1492 1493 net_tx_err(tp, KERN_INFO "%s: Transmit timeout, status %08x %08x.\n", 1494 dev->name, SIS_R32(TxControl), SIS_R32(TxSts)); 1495 1496 /* Disable interrupts by clearing the interrupt mask. */ 1497 SIS_W32(IntrMask, 0x0000); 1498 1499 /* Stop a shared interrupt from scavenging while we are. */ 1500 spin_lock_irq(&tp->lock); 1501 sis190_tx_clear(tp); 1502 spin_unlock_irq(&tp->lock); 1503 1504 /* ...and finally, reset everything. */ 1505 sis190_hw_start(dev); 1506 1507 netif_wake_queue(dev); 1508} 1509 1510static void sis190_set_rgmii(struct sis190_private *tp, u8 reg) 1511{ 1512 tp->features |= (reg & 0x80) ? F_HAS_RGMII : 0; 1513} 1514 1515static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev, 1516 struct net_device *dev) 1517{ 1518 struct sis190_private *tp = netdev_priv(dev); 1519 void __iomem *ioaddr = tp->mmio_addr; 1520 u16 sig; 1521 int i; 1522 1523 net_probe(tp, KERN_INFO "%s: Read MAC address from EEPROM\n", 1524 pci_name(pdev)); 1525 1526 /* Check to see if there is a sane EEPROM */ 1527 sig = (u16) sis190_read_eeprom(ioaddr, EEPROMSignature); 1528 1529 if ((sig == 0xffff) || (sig == 0x0000)) { 1530 net_probe(tp, KERN_INFO "%s: Error EEPROM read %x.\n", 1531 pci_name(pdev), sig); 1532 return -EIO; 1533 } 1534 1535 /* Get MAC address from EEPROM */ 1536 for (i = 0; i < MAC_ADDR_LEN / 2; i++) { 1537 u16 w = sis190_read_eeprom(ioaddr, EEPROMMACAddr + i); 1538 1539 ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(w); 1540 } 1541 1542 sis190_set_rgmii(tp, sis190_read_eeprom(ioaddr, EEPROMInfo)); 1543 1544 return 0; 1545} 1546 1547/** 1548 * sis190_get_mac_addr_from_apc - Get MAC address for SiS96x model 1549 * @pdev: PCI device 1550 * @dev: network device to get address for 1551 * 1552 * SiS96x model, use APC CMOS RAM to store MAC address. 1553 * APC CMOS RAM is accessed through ISA bridge. 1554 * MAC address is read into @net_dev->dev_addr. 1555 */ 1556static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev, 1557 struct net_device *dev) 1558{ 1559 static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 }; 1560 struct sis190_private *tp = netdev_priv(dev); 1561 struct pci_dev *isa_bridge; 1562 u8 reg, tmp8; 1563 unsigned int i; 1564 1565 net_probe(tp, KERN_INFO "%s: Read MAC address from APC.\n", 1566 pci_name(pdev)); 1567 1568 for (i = 0; i < ARRAY_SIZE(ids); i++) { 1569 isa_bridge = pci_get_device(PCI_VENDOR_ID_SI, ids[i], NULL); 1570 if (isa_bridge) 1571 break; 1572 } 1573 1574 if (!isa_bridge) { 1575 net_probe(tp, KERN_INFO "%s: Can not find ISA bridge.\n", 1576 pci_name(pdev)); 1577 return -EIO; 1578 } 1579 1580 /* Enable port 78h & 79h to access APC Registers. */ 1581 pci_read_config_byte(isa_bridge, 0x48, &tmp8); 1582 reg = (tmp8 & ~0x02); 1583 pci_write_config_byte(isa_bridge, 0x48, reg); 1584 udelay(50); 1585 pci_read_config_byte(isa_bridge, 0x48, &reg); 1586 1587 for (i = 0; i < MAC_ADDR_LEN; i++) { 1588 outb(0x9 + i, 0x78); 1589 dev->dev_addr[i] = inb(0x79); 1590 } 1591 1592 outb(0x12, 0x78); 1593 reg = inb(0x79); 1594 1595 sis190_set_rgmii(tp, reg); 1596 1597 /* Restore the value to ISA Bridge */ 1598 pci_write_config_byte(isa_bridge, 0x48, tmp8); 1599 pci_dev_put(isa_bridge); 1600 1601 return 0; 1602} 1603 1604/** 1605 * sis190_init_rxfilter - Initialize the Rx filter 1606 * @dev: network device to initialize 1607 * 1608 * Set receive filter address to our MAC address 1609 * and enable packet filtering. 1610 */ 1611static inline void sis190_init_rxfilter(struct net_device *dev) 1612{ 1613 struct sis190_private *tp = netdev_priv(dev); 1614 void __iomem *ioaddr = tp->mmio_addr; 1615 u16 ctl; 1616 int i; 1617 1618 ctl = SIS_R16(RxMacControl); 1619 /* 1620 * Disable packet filtering before setting filter. 1621 * Note: SiS's driver writes 32 bits but RxMacControl is 16 bits 1622 * only and followed by RxMacAddr (6 bytes). Strange. -- FR 1623 */ 1624 SIS_W16(RxMacControl, ctl & ~0x0f00); 1625 1626 for (i = 0; i < MAC_ADDR_LEN; i++) 1627 SIS_W8(RxMacAddr + i, dev->dev_addr[i]); 1628 1629 SIS_W16(RxMacControl, ctl); 1630 SIS_PCI_COMMIT(); 1631} 1632 1633static int __devinit sis190_get_mac_addr(struct pci_dev *pdev, 1634 struct net_device *dev) 1635{ 1636 int rc; 1637 1638 rc = sis190_get_mac_addr_from_eeprom(pdev, dev); 1639 if (rc < 0) { 1640 u8 reg; 1641 1642 pci_read_config_byte(pdev, 0x73, &reg); 1643 1644 if (reg & 0x00000001) 1645 rc = sis190_get_mac_addr_from_apc(pdev, dev); 1646 } 1647 return rc; 1648} 1649 1650static void sis190_set_speed_auto(struct net_device *dev) 1651{ 1652 struct sis190_private *tp = netdev_priv(dev); 1653 void __iomem *ioaddr = tp->mmio_addr; 1654 int phy_id = tp->mii_if.phy_id; 1655 int val; 1656 1657 net_link(tp, KERN_INFO "%s: Enabling Auto-negotiation.\n", dev->name); 1658 1659 val = mdio_read(ioaddr, phy_id, MII_ADVERTISE); 1660 1661 // Enable 10/100 Full/Half Mode, leave MII_ADVERTISE bit4:0 1662 // unchanged. 1663 mdio_write(ioaddr, phy_id, MII_ADVERTISE, (val & ADVERTISE_SLCT) | 1664 ADVERTISE_100FULL | ADVERTISE_10FULL | 1665 ADVERTISE_100HALF | ADVERTISE_10HALF); 1666 1667 // Enable 1000 Full Mode. 1668 mdio_write(ioaddr, phy_id, MII_CTRL1000, ADVERTISE_1000FULL); 1669 1670 // Enable auto-negotiation and restart auto-negotiation. 1671 mdio_write(ioaddr, phy_id, MII_BMCR, 1672 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET); 1673} 1674 1675static int sis190_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1676{ 1677 struct sis190_private *tp = netdev_priv(dev); 1678 1679 return mii_ethtool_gset(&tp->mii_if, cmd); 1680} 1681 1682static int sis190_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 1683{ 1684 struct sis190_private *tp = netdev_priv(dev); 1685 1686 return mii_ethtool_sset(&tp->mii_if, cmd); 1687} 1688 1689static void sis190_get_drvinfo(struct net_device *dev, 1690 struct ethtool_drvinfo *info) 1691{ 1692 struct sis190_private *tp = netdev_priv(dev); 1693 1694 strcpy(info->driver, DRV_NAME); 1695 strcpy(info->version, DRV_VERSION); 1696 strcpy(info->bus_info, pci_name(tp->pci_dev)); 1697} 1698 1699static int sis190_get_regs_len(struct net_device *dev) 1700{ 1701 return SIS190_REGS_SIZE; 1702} 1703 1704static void sis190_get_regs(struct net_device *dev, struct ethtool_regs *regs, 1705 void *p) 1706{ 1707 struct sis190_private *tp = netdev_priv(dev); 1708 unsigned long flags; 1709 1710 if (regs->len > SIS190_REGS_SIZE) 1711 regs->len = SIS190_REGS_SIZE; 1712 1713 spin_lock_irqsave(&tp->lock, flags); 1714 memcpy_fromio(p, tp->mmio_addr, regs->len); 1715 spin_unlock_irqrestore(&tp->lock, flags); 1716} 1717 1718static int sis190_nway_reset(struct net_device *dev) 1719{ 1720 struct sis190_private *tp = netdev_priv(dev); 1721 1722 return mii_nway_restart(&tp->mii_if); 1723} 1724 1725static u32 sis190_get_msglevel(struct net_device *dev) 1726{ 1727 struct sis190_private *tp = netdev_priv(dev); 1728 1729 return tp->msg_enable; 1730} 1731 1732static void sis190_set_msglevel(struct net_device *dev, u32 value) 1733{ 1734 struct sis190_private *tp = netdev_priv(dev); 1735 1736 tp->msg_enable = value; 1737} 1738 1739static const struct ethtool_ops sis190_ethtool_ops = { 1740 .get_settings = sis190_get_settings, 1741 .set_settings = sis190_set_settings, 1742 .get_drvinfo = sis190_get_drvinfo, 1743 .get_regs_len = sis190_get_regs_len, 1744 .get_regs = sis190_get_regs, 1745 .get_link = ethtool_op_get_link, 1746 .get_msglevel = sis190_get_msglevel, 1747 .set_msglevel = sis190_set_msglevel, 1748 .nway_reset = sis190_nway_reset, 1749}; 1750 1751static int sis190_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 1752{ 1753 struct sis190_private *tp = netdev_priv(dev); 1754 1755 return !netif_running(dev) ? -EINVAL : 1756 generic_mii_ioctl(&tp->mii_if, if_mii(ifr), cmd, NULL); 1757} 1758 1759static int __devinit sis190_init_one(struct pci_dev *pdev, 1760 const struct pci_device_id *ent) 1761{ 1762 static int printed_version = 0; 1763 struct sis190_private *tp; 1764 struct net_device *dev; 1765 void __iomem *ioaddr; 1766 int rc; 1767 DECLARE_MAC_BUF(mac); 1768 1769 if (!printed_version) { 1770 net_drv(&debug, KERN_INFO SIS190_DRIVER_NAME " loaded.\n"); 1771 printed_version = 1; 1772 } 1773 1774 dev = sis190_init_board(pdev); 1775 if (IS_ERR(dev)) { 1776 rc = PTR_ERR(dev); 1777 goto out; 1778 } 1779 1780 pci_set_drvdata(pdev, dev); 1781 1782 tp = netdev_priv(dev); 1783 ioaddr = tp->mmio_addr; 1784 1785 rc = sis190_get_mac_addr(pdev, dev); 1786 if (rc < 0) 1787 goto err_release_board; 1788 1789 sis190_init_rxfilter(dev); 1790 1791 INIT_WORK(&tp->phy_task, sis190_phy_task); 1792 1793 dev->open = sis190_open; 1794 dev->stop = sis190_close; 1795 dev->do_ioctl = sis190_ioctl; 1796 dev->tx_timeout = sis190_tx_timeout; 1797 dev->watchdog_timeo = SIS190_TX_TIMEOUT; 1798 dev->hard_start_xmit = sis190_start_xmit; 1799#ifdef CONFIG_NET_POLL_CONTROLLER 1800 dev->poll_controller = sis190_netpoll; 1801#endif 1802 dev->set_multicast_list = sis190_set_rx_mode; 1803 SET_ETHTOOL_OPS(dev, &sis190_ethtool_ops); 1804 dev->irq = pdev->irq; 1805 dev->base_addr = (unsigned long) 0xdead; 1806 1807 spin_lock_init(&tp->lock); 1808 1809 rc = sis190_mii_probe(dev); 1810 if (rc < 0) 1811 goto err_release_board; 1812 1813 rc = register_netdev(dev); 1814 if (rc < 0) 1815 goto err_remove_mii; 1816 1817 net_probe(tp, KERN_INFO "%s: %s at %p (IRQ: %d), " 1818 "%s\n", 1819 pci_name(pdev), sis_chip_info[ent->driver_data].name, 1820 ioaddr, dev->irq, print_mac(mac, dev->dev_addr)); 1821 1822 net_probe(tp, KERN_INFO "%s: %s mode.\n", dev->name, 1823 (tp->features & F_HAS_RGMII) ? "RGMII" : "GMII"); 1824 1825 netif_carrier_off(dev); 1826 1827 sis190_set_speed_auto(dev); 1828out: 1829 return rc; 1830 1831err_remove_mii: 1832 sis190_mii_remove(dev); 1833err_release_board: 1834 sis190_release_board(pdev); 1835 goto out; 1836} 1837 1838static void __devexit sis190_remove_one(struct pci_dev *pdev) 1839{ 1840 struct net_device *dev = pci_get_drvdata(pdev); 1841 1842 sis190_mii_remove(dev); 1843 flush_scheduled_work(); 1844 unregister_netdev(dev); 1845 sis190_release_board(pdev); 1846 pci_set_drvdata(pdev, NULL); 1847} 1848 1849static struct pci_driver sis190_pci_driver = { 1850 .name = DRV_NAME, 1851 .id_table = sis190_pci_tbl, 1852 .probe = sis190_init_one, 1853 .remove = __devexit_p(sis190_remove_one), 1854}; 1855 1856static int __init sis190_init_module(void) 1857{ 1858 return pci_register_driver(&sis190_pci_driver); 1859} 1860 1861static void __exit sis190_cleanup_module(void) 1862{ 1863 pci_unregister_driver(&sis190_pci_driver); 1864} 1865 1866module_init(sis190_init_module); 1867module_exit(sis190_cleanup_module);