at v2.6.31-rc2 860 lines 24 kB view raw
1/* 2 * meth.c -- O2 Builtin 10/100 Ethernet driver 3 * 4 * Copyright (C) 2001-2003 Ilya Volynets 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11#include <linux/delay.h> 12#include <linux/dma-mapping.h> 13#include <linux/init.h> 14#include <linux/kernel.h> 15#include <linux/module.h> 16#include <linux/platform_device.h> 17#include <linux/slab.h> 18#include <linux/errno.h> 19#include <linux/types.h> 20#include <linux/interrupt.h> 21 22#include <linux/in.h> 23#include <linux/in6.h> 24#include <linux/device.h> /* struct device, et al */ 25#include <linux/netdevice.h> /* struct device, and other headers */ 26#include <linux/etherdevice.h> /* eth_type_trans */ 27#include <linux/ip.h> /* struct iphdr */ 28#include <linux/tcp.h> /* struct tcphdr */ 29#include <linux/skbuff.h> 30#include <linux/mii.h> /* MII definitions */ 31 32#include <asm/ip32/mace.h> 33#include <asm/ip32/ip32_ints.h> 34 35#include <asm/io.h> 36 37#include "meth.h" 38 39#ifndef MFE_DEBUG 40#define MFE_DEBUG 0 41#endif 42 43#if MFE_DEBUG>=1 44#define DPRINTK(str,args...) printk(KERN_DEBUG "meth: %s: " str, __func__ , ## args) 45#define MFE_RX_DEBUG 2 46#else 47#define DPRINTK(str,args...) 48#define MFE_RX_DEBUG 0 49#endif 50 51 52static const char *meth_str="SGI O2 Fast Ethernet"; 53 54#define HAVE_TX_TIMEOUT 55/* The maximum time waited (in jiffies) before assuming a Tx failed. (400ms) */ 56#define TX_TIMEOUT (400*HZ/1000) 57 58#ifdef HAVE_TX_TIMEOUT 59static int timeout = TX_TIMEOUT; 60module_param(timeout, int, 0); 61#endif 62 63/* 64 * This structure is private to each device. It is used to pass 65 * packets in and out, so there is place for a packet 66 */ 67struct meth_private { 68 /* in-memory copy of MAC Control register */ 69 unsigned long mac_ctrl; 70 /* in-memory copy of DMA Control register */ 71 unsigned long dma_ctrl; 72 /* address of PHY, used by mdio_* functions, initialized in mdio_probe */ 73 unsigned long phy_addr; 74 tx_packet *tx_ring; 75 dma_addr_t tx_ring_dma; 76 struct sk_buff *tx_skbs[TX_RING_ENTRIES]; 77 dma_addr_t tx_skb_dmas[TX_RING_ENTRIES]; 78 unsigned long tx_read, tx_write, tx_count; 79 80 rx_packet *rx_ring[RX_RING_ENTRIES]; 81 dma_addr_t rx_ring_dmas[RX_RING_ENTRIES]; 82 struct sk_buff *rx_skbs[RX_RING_ENTRIES]; 83 unsigned long rx_write; 84 85 spinlock_t meth_lock; 86}; 87 88static void meth_tx_timeout(struct net_device *dev); 89static irqreturn_t meth_interrupt(int irq, void *dev_id); 90 91/* global, initialized in ip32-setup.c */ 92char o2meth_eaddr[8]={0,0,0,0,0,0,0,0}; 93 94static inline void load_eaddr(struct net_device *dev) 95{ 96 int i; 97 u64 macaddr; 98 99 DPRINTK("Loading MAC Address: %pM\n", dev->dev_addr); 100 macaddr = 0; 101 for (i = 0; i < 6; i++) 102 macaddr |= (u64)dev->dev_addr[i] << ((5 - i) * 8); 103 104 mace->eth.mac_addr = macaddr; 105} 106 107/* 108 * Waits for BUSY status of mdio bus to clear 109 */ 110#define WAIT_FOR_PHY(___rval) \ 111 while ((___rval = mace->eth.phy_data) & MDIO_BUSY) { \ 112 udelay(25); \ 113 } 114/*read phy register, return value read */ 115static unsigned long mdio_read(struct meth_private *priv, unsigned long phyreg) 116{ 117 unsigned long rval; 118 WAIT_FOR_PHY(rval); 119 mace->eth.phy_regs = (priv->phy_addr << 5) | (phyreg & 0x1f); 120 udelay(25); 121 mace->eth.phy_trans_go = 1; 122 udelay(25); 123 WAIT_FOR_PHY(rval); 124 return rval & MDIO_DATA_MASK; 125} 126 127static int mdio_probe(struct meth_private *priv) 128{ 129 int i; 130 unsigned long p2, p3, flags; 131 /* check if phy is detected already */ 132 if(priv->phy_addr>=0&&priv->phy_addr<32) 133 return 0; 134 spin_lock_irqsave(&priv->meth_lock, flags); 135 for (i=0;i<32;++i){ 136 priv->phy_addr=i; 137 p2=mdio_read(priv,2); 138 p3=mdio_read(priv,3); 139#if MFE_DEBUG>=2 140 switch ((p2<<12)|(p3>>4)){ 141 case PHY_QS6612X: 142 DPRINTK("PHY is QS6612X\n"); 143 break; 144 case PHY_ICS1889: 145 DPRINTK("PHY is ICS1889\n"); 146 break; 147 case PHY_ICS1890: 148 DPRINTK("PHY is ICS1890\n"); 149 break; 150 case PHY_DP83840: 151 DPRINTK("PHY is DP83840\n"); 152 break; 153 } 154#endif 155 if(p2!=0xffff&&p2!=0x0000){ 156 DPRINTK("PHY code: %x\n",(p2<<12)|(p3>>4)); 157 break; 158 } 159 } 160 spin_unlock_irqrestore(&priv->meth_lock, flags); 161 if(priv->phy_addr<32) { 162 return 0; 163 } 164 DPRINTK("Oopsie! PHY is not known!\n"); 165 priv->phy_addr=-1; 166 return -ENODEV; 167} 168 169static void meth_check_link(struct net_device *dev) 170{ 171 struct meth_private *priv = netdev_priv(dev); 172 unsigned long mii_advertising = mdio_read(priv, 4); 173 unsigned long mii_partner = mdio_read(priv, 5); 174 unsigned long negotiated = mii_advertising & mii_partner; 175 unsigned long duplex, speed; 176 177 if (mii_partner == 0xffff) 178 return; 179 180 speed = (negotiated & 0x0380) ? METH_100MBIT : 0; 181 duplex = ((negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040) ? 182 METH_PHY_FDX : 0; 183 184 if ((priv->mac_ctrl & METH_PHY_FDX) ^ duplex) { 185 DPRINTK("Setting %s-duplex\n", duplex ? "full" : "half"); 186 if (duplex) 187 priv->mac_ctrl |= METH_PHY_FDX; 188 else 189 priv->mac_ctrl &= ~METH_PHY_FDX; 190 mace->eth.mac_ctrl = priv->mac_ctrl; 191 } 192 193 if ((priv->mac_ctrl & METH_100MBIT) ^ speed) { 194 DPRINTK("Setting %dMbs mode\n", speed ? 100 : 10); 195 if (duplex) 196 priv->mac_ctrl |= METH_100MBIT; 197 else 198 priv->mac_ctrl &= ~METH_100MBIT; 199 mace->eth.mac_ctrl = priv->mac_ctrl; 200 } 201} 202 203 204static int meth_init_tx_ring(struct meth_private *priv) 205{ 206 /* Init TX ring */ 207 priv->tx_ring = dma_alloc_coherent(NULL, TX_RING_BUFFER_SIZE, 208 &priv->tx_ring_dma, GFP_ATOMIC); 209 if (!priv->tx_ring) 210 return -ENOMEM; 211 memset(priv->tx_ring, 0, TX_RING_BUFFER_SIZE); 212 priv->tx_count = priv->tx_read = priv->tx_write = 0; 213 mace->eth.tx_ring_base = priv->tx_ring_dma; 214 /* Now init skb save area */ 215 memset(priv->tx_skbs, 0, sizeof(priv->tx_skbs)); 216 memset(priv->tx_skb_dmas, 0, sizeof(priv->tx_skb_dmas)); 217 return 0; 218} 219 220static int meth_init_rx_ring(struct meth_private *priv) 221{ 222 int i; 223 224 for (i = 0; i < RX_RING_ENTRIES; i++) { 225 priv->rx_skbs[i] = alloc_skb(METH_RX_BUFF_SIZE, 0); 226 /* 8byte status vector + 3quad padding + 2byte padding, 227 * to put data on 64bit aligned boundary */ 228 skb_reserve(priv->rx_skbs[i],METH_RX_HEAD); 229 priv->rx_ring[i]=(rx_packet*)(priv->rx_skbs[i]->head); 230 /* I'll need to re-sync it after each RX */ 231 priv->rx_ring_dmas[i] = 232 dma_map_single(NULL, priv->rx_ring[i], 233 METH_RX_BUFF_SIZE, DMA_FROM_DEVICE); 234 mace->eth.rx_fifo = priv->rx_ring_dmas[i]; 235 } 236 priv->rx_write = 0; 237 return 0; 238} 239static void meth_free_tx_ring(struct meth_private *priv) 240{ 241 int i; 242 243 /* Remove any pending skb */ 244 for (i = 0; i < TX_RING_ENTRIES; i++) { 245 if (priv->tx_skbs[i]) 246 dev_kfree_skb(priv->tx_skbs[i]); 247 priv->tx_skbs[i] = NULL; 248 } 249 dma_free_coherent(NULL, TX_RING_BUFFER_SIZE, priv->tx_ring, 250 priv->tx_ring_dma); 251} 252 253/* Presumes RX DMA engine is stopped, and RX fifo ring is reset */ 254static void meth_free_rx_ring(struct meth_private *priv) 255{ 256 int i; 257 258 for (i = 0; i < RX_RING_ENTRIES; i++) { 259 dma_unmap_single(NULL, priv->rx_ring_dmas[i], 260 METH_RX_BUFF_SIZE, DMA_FROM_DEVICE); 261 priv->rx_ring[i] = 0; 262 priv->rx_ring_dmas[i] = 0; 263 kfree_skb(priv->rx_skbs[i]); 264 } 265} 266 267int meth_reset(struct net_device *dev) 268{ 269 struct meth_private *priv = netdev_priv(dev); 270 271 /* Reset card */ 272 mace->eth.mac_ctrl = SGI_MAC_RESET; 273 udelay(1); 274 mace->eth.mac_ctrl = 0; 275 udelay(25); 276 277 /* Load ethernet address */ 278 load_eaddr(dev); 279 /* Should load some "errata", but later */ 280 281 /* Check for device */ 282 if (mdio_probe(priv) < 0) { 283 DPRINTK("Unable to find PHY\n"); 284 return -ENODEV; 285 } 286 287 /* Initial mode: 10 | Half-duplex | Accept normal packets */ 288 priv->mac_ctrl = METH_ACCEPT_MCAST | METH_DEFAULT_IPG; 289 if (dev->flags & IFF_PROMISC) 290 priv->mac_ctrl |= METH_PROMISC; 291 mace->eth.mac_ctrl = priv->mac_ctrl; 292 293 /* Autonegotiate speed and duplex mode */ 294 meth_check_link(dev); 295 296 /* Now set dma control, but don't enable DMA, yet */ 297 priv->dma_ctrl = (4 << METH_RX_OFFSET_SHIFT) | 298 (RX_RING_ENTRIES << METH_RX_DEPTH_SHIFT); 299 mace->eth.dma_ctrl = priv->dma_ctrl; 300 301 return 0; 302} 303 304/*============End Helper Routines=====================*/ 305 306/* 307 * Open and close 308 */ 309static int meth_open(struct net_device *dev) 310{ 311 struct meth_private *priv = netdev_priv(dev); 312 int ret; 313 314 priv->phy_addr = -1; /* No PHY is known yet... */ 315 316 /* Initialize the hardware */ 317 ret = meth_reset(dev); 318 if (ret < 0) 319 return ret; 320 321 /* Allocate the ring buffers */ 322 ret = meth_init_tx_ring(priv); 323 if (ret < 0) 324 return ret; 325 ret = meth_init_rx_ring(priv); 326 if (ret < 0) 327 goto out_free_tx_ring; 328 329 ret = request_irq(dev->irq, meth_interrupt, 0, meth_str, dev); 330 if (ret) { 331 printk(KERN_ERR "%s: Can't get irq %d\n", dev->name, dev->irq); 332 goto out_free_rx_ring; 333 } 334 335 /* Start DMA */ 336 priv->dma_ctrl |= METH_DMA_TX_EN | /*METH_DMA_TX_INT_EN |*/ 337 METH_DMA_RX_EN | METH_DMA_RX_INT_EN; 338 mace->eth.dma_ctrl = priv->dma_ctrl; 339 340 DPRINTK("About to start queue\n"); 341 netif_start_queue(dev); 342 343 return 0; 344 345out_free_rx_ring: 346 meth_free_rx_ring(priv); 347out_free_tx_ring: 348 meth_free_tx_ring(priv); 349 350 return ret; 351} 352 353static int meth_release(struct net_device *dev) 354{ 355 struct meth_private *priv = netdev_priv(dev); 356 357 DPRINTK("Stopping queue\n"); 358 netif_stop_queue(dev); /* can't transmit any more */ 359 /* shut down DMA */ 360 priv->dma_ctrl &= ~(METH_DMA_TX_EN | METH_DMA_TX_INT_EN | 361 METH_DMA_RX_EN | METH_DMA_RX_INT_EN); 362 mace->eth.dma_ctrl = priv->dma_ctrl; 363 free_irq(dev->irq, dev); 364 meth_free_tx_ring(priv); 365 meth_free_rx_ring(priv); 366 367 return 0; 368} 369 370/* 371 * Receive a packet: retrieve, encapsulate and pass over to upper levels 372 */ 373static void meth_rx(struct net_device* dev, unsigned long int_status) 374{ 375 struct sk_buff *skb; 376 unsigned long status, flags; 377 struct meth_private *priv = netdev_priv(dev); 378 unsigned long fifo_rptr = (int_status & METH_INT_RX_RPTR_MASK) >> 8; 379 380 spin_lock_irqsave(&priv->meth_lock, flags); 381 priv->dma_ctrl &= ~METH_DMA_RX_INT_EN; 382 mace->eth.dma_ctrl = priv->dma_ctrl; 383 spin_unlock_irqrestore(&priv->meth_lock, flags); 384 385 if (int_status & METH_INT_RX_UNDERFLOW) { 386 fifo_rptr = (fifo_rptr - 1) & 0x0f; 387 } 388 while (priv->rx_write != fifo_rptr) { 389 dma_unmap_single(NULL, priv->rx_ring_dmas[priv->rx_write], 390 METH_RX_BUFF_SIZE, DMA_FROM_DEVICE); 391 status = priv->rx_ring[priv->rx_write]->status.raw; 392#if MFE_DEBUG 393 if (!(status & METH_RX_ST_VALID)) { 394 DPRINTK("Not received? status=%016lx\n",status); 395 } 396#endif 397 if ((!(status & METH_RX_STATUS_ERRORS)) && (status & METH_RX_ST_VALID)) { 398 int len = (status & 0xffff) - 4; /* omit CRC */ 399 /* length sanity check */ 400 if (len < 60 || len > 1518) { 401 printk(KERN_DEBUG "%s: bogus packet size: %ld, status=%#2Lx.\n", 402 dev->name, priv->rx_write, 403 priv->rx_ring[priv->rx_write]->status.raw); 404 dev->stats.rx_errors++; 405 dev->stats.rx_length_errors++; 406 skb = priv->rx_skbs[priv->rx_write]; 407 } else { 408 skb = alloc_skb(METH_RX_BUFF_SIZE, GFP_ATOMIC); 409 if (!skb) { 410 /* Ouch! No memory! Drop packet on the floor */ 411 DPRINTK("No mem: dropping packet\n"); 412 dev->stats.rx_dropped++; 413 skb = priv->rx_skbs[priv->rx_write]; 414 } else { 415 struct sk_buff *skb_c = priv->rx_skbs[priv->rx_write]; 416 /* 8byte status vector + 3quad padding + 2byte padding, 417 * to put data on 64bit aligned boundary */ 418 skb_reserve(skb, METH_RX_HEAD); 419 /* Write metadata, and then pass to the receive level */ 420 skb_put(skb_c, len); 421 priv->rx_skbs[priv->rx_write] = skb; 422 skb_c->protocol = eth_type_trans(skb_c, dev); 423 dev->stats.rx_packets++; 424 dev->stats.rx_bytes += len; 425 netif_rx(skb_c); 426 } 427 } 428 } else { 429 dev->stats.rx_errors++; 430 skb=priv->rx_skbs[priv->rx_write]; 431#if MFE_DEBUG>0 432 printk(KERN_WARNING "meth: RX error: status=0x%016lx\n",status); 433 if(status&METH_RX_ST_RCV_CODE_VIOLATION) 434 printk(KERN_WARNING "Receive Code Violation\n"); 435 if(status&METH_RX_ST_CRC_ERR) 436 printk(KERN_WARNING "CRC error\n"); 437 if(status&METH_RX_ST_INV_PREAMBLE_CTX) 438 printk(KERN_WARNING "Invalid Preamble Context\n"); 439 if(status&METH_RX_ST_LONG_EVT_SEEN) 440 printk(KERN_WARNING "Long Event Seen...\n"); 441 if(status&METH_RX_ST_BAD_PACKET) 442 printk(KERN_WARNING "Bad Packet\n"); 443 if(status&METH_RX_ST_CARRIER_EVT_SEEN) 444 printk(KERN_WARNING "Carrier Event Seen\n"); 445#endif 446 } 447 priv->rx_ring[priv->rx_write] = (rx_packet*)skb->head; 448 priv->rx_ring[priv->rx_write]->status.raw = 0; 449 priv->rx_ring_dmas[priv->rx_write] = 450 dma_map_single(NULL, priv->rx_ring[priv->rx_write], 451 METH_RX_BUFF_SIZE, DMA_FROM_DEVICE); 452 mace->eth.rx_fifo = priv->rx_ring_dmas[priv->rx_write]; 453 ADVANCE_RX_PTR(priv->rx_write); 454 } 455 spin_lock_irqsave(&priv->meth_lock, flags); 456 /* In case there was underflow, and Rx DMA was disabled */ 457 priv->dma_ctrl |= METH_DMA_RX_INT_EN | METH_DMA_RX_EN; 458 mace->eth.dma_ctrl = priv->dma_ctrl; 459 mace->eth.int_stat = METH_INT_RX_THRESHOLD; 460 spin_unlock_irqrestore(&priv->meth_lock, flags); 461} 462 463static int meth_tx_full(struct net_device *dev) 464{ 465 struct meth_private *priv = netdev_priv(dev); 466 467 return (priv->tx_count >= TX_RING_ENTRIES - 1); 468} 469 470static void meth_tx_cleanup(struct net_device* dev, unsigned long int_status) 471{ 472 struct meth_private *priv = netdev_priv(dev); 473 unsigned long status, flags; 474 struct sk_buff *skb; 475 unsigned long rptr = (int_status&TX_INFO_RPTR) >> 16; 476 477 spin_lock_irqsave(&priv->meth_lock, flags); 478 479 /* Stop DMA notification */ 480 priv->dma_ctrl &= ~(METH_DMA_TX_INT_EN); 481 mace->eth.dma_ctrl = priv->dma_ctrl; 482 483 while (priv->tx_read != rptr) { 484 skb = priv->tx_skbs[priv->tx_read]; 485 status = priv->tx_ring[priv->tx_read].header.raw; 486#if MFE_DEBUG>=1 487 if (priv->tx_read == priv->tx_write) 488 DPRINTK("Auchi! tx_read=%d,tx_write=%d,rptr=%d?\n", priv->tx_read, priv->tx_write,rptr); 489#endif 490 if (status & METH_TX_ST_DONE) { 491 if (status & METH_TX_ST_SUCCESS){ 492 dev->stats.tx_packets++; 493 dev->stats.tx_bytes += skb->len; 494 } else { 495 dev->stats.tx_errors++; 496#if MFE_DEBUG>=1 497 DPRINTK("TX error: status=%016lx <",status); 498 if(status & METH_TX_ST_SUCCESS) 499 printk(" SUCCESS"); 500 if(status & METH_TX_ST_TOOLONG) 501 printk(" TOOLONG"); 502 if(status & METH_TX_ST_UNDERRUN) 503 printk(" UNDERRUN"); 504 if(status & METH_TX_ST_EXCCOLL) 505 printk(" EXCCOLL"); 506 if(status & METH_TX_ST_DEFER) 507 printk(" DEFER"); 508 if(status & METH_TX_ST_LATECOLL) 509 printk(" LATECOLL"); 510 printk(" >\n"); 511#endif 512 } 513 } else { 514 DPRINTK("RPTR points us here, but packet not done?\n"); 515 break; 516 } 517 dev_kfree_skb_irq(skb); 518 priv->tx_skbs[priv->tx_read] = NULL; 519 priv->tx_ring[priv->tx_read].header.raw = 0; 520 priv->tx_read = (priv->tx_read+1)&(TX_RING_ENTRIES-1); 521 priv->tx_count--; 522 } 523 524 /* wake up queue if it was stopped */ 525 if (netif_queue_stopped(dev) && !meth_tx_full(dev)) { 526 netif_wake_queue(dev); 527 } 528 529 mace->eth.int_stat = METH_INT_TX_EMPTY | METH_INT_TX_PKT; 530 spin_unlock_irqrestore(&priv->meth_lock, flags); 531} 532 533static void meth_error(struct net_device* dev, unsigned status) 534{ 535 struct meth_private *priv = netdev_priv(dev); 536 unsigned long flags; 537 538 printk(KERN_WARNING "meth: error status: 0x%08x\n",status); 539 /* check for errors too... */ 540 if (status & (METH_INT_TX_LINK_FAIL)) 541 printk(KERN_WARNING "meth: link failure\n"); 542 /* Should I do full reset in this case? */ 543 if (status & (METH_INT_MEM_ERROR)) 544 printk(KERN_WARNING "meth: memory error\n"); 545 if (status & (METH_INT_TX_ABORT)) 546 printk(KERN_WARNING "meth: aborted\n"); 547 if (status & (METH_INT_RX_OVERFLOW)) 548 printk(KERN_WARNING "meth: Rx overflow\n"); 549 if (status & (METH_INT_RX_UNDERFLOW)) { 550 printk(KERN_WARNING "meth: Rx underflow\n"); 551 spin_lock_irqsave(&priv->meth_lock, flags); 552 mace->eth.int_stat = METH_INT_RX_UNDERFLOW; 553 /* more underflow interrupts will be delivered, 554 * effectively throwing us into an infinite loop. 555 * Thus I stop processing Rx in this case. */ 556 priv->dma_ctrl &= ~METH_DMA_RX_EN; 557 mace->eth.dma_ctrl = priv->dma_ctrl; 558 DPRINTK("Disabled meth Rx DMA temporarily\n"); 559 spin_unlock_irqrestore(&priv->meth_lock, flags); 560 } 561 mace->eth.int_stat = METH_INT_ERROR; 562} 563 564/* 565 * The typical interrupt entry point 566 */ 567static irqreturn_t meth_interrupt(int irq, void *dev_id) 568{ 569 struct net_device *dev = (struct net_device *)dev_id; 570 struct meth_private *priv = netdev_priv(dev); 571 unsigned long status; 572 573 status = mace->eth.int_stat; 574 while (status & 0xff) { 575 /* First handle errors - if we get Rx underflow, 576 * Rx DMA will be disabled, and Rx handler will reenable 577 * it. I don't think it's possible to get Rx underflow, 578 * without getting Rx interrupt */ 579 if (status & METH_INT_ERROR) { 580 meth_error(dev, status); 581 } 582 if (status & (METH_INT_TX_EMPTY | METH_INT_TX_PKT)) { 583 /* a transmission is over: free the skb */ 584 meth_tx_cleanup(dev, status); 585 } 586 if (status & METH_INT_RX_THRESHOLD) { 587 if (!(priv->dma_ctrl & METH_DMA_RX_INT_EN)) 588 break; 589 /* send it to meth_rx for handling */ 590 meth_rx(dev, status); 591 } 592 status = mace->eth.int_stat; 593 } 594 595 return IRQ_HANDLED; 596} 597 598/* 599 * Transmits packets that fit into TX descriptor (are <=120B) 600 */ 601static void meth_tx_short_prepare(struct meth_private *priv, 602 struct sk_buff *skb) 603{ 604 tx_packet *desc = &priv->tx_ring[priv->tx_write]; 605 int len = (skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len; 606 607 desc->header.raw = METH_TX_CMD_INT_EN | (len-1) | ((128-len) << 16); 608 /* maybe I should set whole thing to 0 first... */ 609 skb_copy_from_linear_data(skb, desc->data.dt + (120 - len), skb->len); 610 if (skb->len < len) 611 memset(desc->data.dt + 120 - len + skb->len, 0, len-skb->len); 612} 613#define TX_CATBUF1 BIT(25) 614static void meth_tx_1page_prepare(struct meth_private *priv, 615 struct sk_buff *skb) 616{ 617 tx_packet *desc = &priv->tx_ring[priv->tx_write]; 618 void *buffer_data = (void *)(((unsigned long)skb->data + 7) & ~7); 619 int unaligned_len = (int)((unsigned long)buffer_data - (unsigned long)skb->data); 620 int buffer_len = skb->len - unaligned_len; 621 dma_addr_t catbuf; 622 623 desc->header.raw = METH_TX_CMD_INT_EN | TX_CATBUF1 | (skb->len - 1); 624 625 /* unaligned part */ 626 if (unaligned_len) { 627 skb_copy_from_linear_data(skb, desc->data.dt + (120 - unaligned_len), 628 unaligned_len); 629 desc->header.raw |= (128 - unaligned_len) << 16; 630 } 631 632 /* first page */ 633 catbuf = dma_map_single(NULL, buffer_data, buffer_len, 634 DMA_TO_DEVICE); 635 desc->data.cat_buf[0].form.start_addr = catbuf >> 3; 636 desc->data.cat_buf[0].form.len = buffer_len - 1; 637} 638#define TX_CATBUF2 BIT(26) 639static void meth_tx_2page_prepare(struct meth_private *priv, 640 struct sk_buff *skb) 641{ 642 tx_packet *desc = &priv->tx_ring[priv->tx_write]; 643 void *buffer1_data = (void *)(((unsigned long)skb->data + 7) & ~7); 644 void *buffer2_data = (void *)PAGE_ALIGN((unsigned long)skb->data); 645 int unaligned_len = (int)((unsigned long)buffer1_data - (unsigned long)skb->data); 646 int buffer1_len = (int)((unsigned long)buffer2_data - (unsigned long)buffer1_data); 647 int buffer2_len = skb->len - buffer1_len - unaligned_len; 648 dma_addr_t catbuf1, catbuf2; 649 650 desc->header.raw = METH_TX_CMD_INT_EN | TX_CATBUF1 | TX_CATBUF2| (skb->len - 1); 651 /* unaligned part */ 652 if (unaligned_len){ 653 skb_copy_from_linear_data(skb, desc->data.dt + (120 - unaligned_len), 654 unaligned_len); 655 desc->header.raw |= (128 - unaligned_len) << 16; 656 } 657 658 /* first page */ 659 catbuf1 = dma_map_single(NULL, buffer1_data, buffer1_len, 660 DMA_TO_DEVICE); 661 desc->data.cat_buf[0].form.start_addr = catbuf1 >> 3; 662 desc->data.cat_buf[0].form.len = buffer1_len - 1; 663 /* second page */ 664 catbuf2 = dma_map_single(NULL, buffer2_data, buffer2_len, 665 DMA_TO_DEVICE); 666 desc->data.cat_buf[1].form.start_addr = catbuf2 >> 3; 667 desc->data.cat_buf[1].form.len = buffer2_len - 1; 668} 669 670static void meth_add_to_tx_ring(struct meth_private *priv, struct sk_buff *skb) 671{ 672 /* Remember the skb, so we can free it at interrupt time */ 673 priv->tx_skbs[priv->tx_write] = skb; 674 if (skb->len <= 120) { 675 /* Whole packet fits into descriptor */ 676 meth_tx_short_prepare(priv, skb); 677 } else if (PAGE_ALIGN((unsigned long)skb->data) != 678 PAGE_ALIGN((unsigned long)skb->data + skb->len - 1)) { 679 /* Packet crosses page boundary */ 680 meth_tx_2page_prepare(priv, skb); 681 } else { 682 /* Packet is in one page */ 683 meth_tx_1page_prepare(priv, skb); 684 } 685 priv->tx_write = (priv->tx_write + 1) & (TX_RING_ENTRIES - 1); 686 mace->eth.tx_info = priv->tx_write; 687 priv->tx_count++; 688} 689 690/* 691 * Transmit a packet (called by the kernel) 692 */ 693static int meth_tx(struct sk_buff *skb, struct net_device *dev) 694{ 695 struct meth_private *priv = netdev_priv(dev); 696 unsigned long flags; 697 698 spin_lock_irqsave(&priv->meth_lock, flags); 699 /* Stop DMA notification */ 700 priv->dma_ctrl &= ~(METH_DMA_TX_INT_EN); 701 mace->eth.dma_ctrl = priv->dma_ctrl; 702 703 meth_add_to_tx_ring(priv, skb); 704 dev->trans_start = jiffies; /* save the timestamp */ 705 706 /* If TX ring is full, tell the upper layer to stop sending packets */ 707 if (meth_tx_full(dev)) { 708 printk(KERN_DEBUG "TX full: stopping\n"); 709 netif_stop_queue(dev); 710 } 711 712 /* Restart DMA notification */ 713 priv->dma_ctrl |= METH_DMA_TX_INT_EN; 714 mace->eth.dma_ctrl = priv->dma_ctrl; 715 716 spin_unlock_irqrestore(&priv->meth_lock, flags); 717 718 return 0; 719} 720 721/* 722 * Deal with a transmit timeout. 723 */ 724static void meth_tx_timeout(struct net_device *dev) 725{ 726 struct meth_private *priv = netdev_priv(dev); 727 unsigned long flags; 728 729 printk(KERN_WARNING "%s: transmit timed out\n", dev->name); 730 731 /* Protect against concurrent rx interrupts */ 732 spin_lock_irqsave(&priv->meth_lock,flags); 733 734 /* Try to reset the interface. */ 735 meth_reset(dev); 736 737 dev->stats.tx_errors++; 738 739 /* Clear all rings */ 740 meth_free_tx_ring(priv); 741 meth_free_rx_ring(priv); 742 meth_init_tx_ring(priv); 743 meth_init_rx_ring(priv); 744 745 /* Restart dma */ 746 priv->dma_ctrl |= METH_DMA_TX_EN | METH_DMA_RX_EN | METH_DMA_RX_INT_EN; 747 mace->eth.dma_ctrl = priv->dma_ctrl; 748 749 /* Enable interrupt */ 750 spin_unlock_irqrestore(&priv->meth_lock, flags); 751 752 dev->trans_start = jiffies; 753 netif_wake_queue(dev); 754 755 return; 756} 757 758/* 759 * Ioctl commands 760 */ 761static int meth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 762{ 763 /* XXX Not yet implemented */ 764 switch(cmd) { 765 case SIOCGMIIPHY: 766 case SIOCGMIIREG: 767 case SIOCSMIIREG: 768 default: 769 return -EOPNOTSUPP; 770 } 771} 772 773static const struct net_device_ops meth_netdev_ops = { 774 .ndo_open = meth_open, 775 .ndo_stop = meth_release, 776 .ndo_start_xmit = meth_tx, 777 .ndo_do_ioctl = meth_ioctl, 778 .ndo_tx_timeout = meth_tx_timeout, 779 .ndo_change_mtu = eth_change_mtu, 780 .ndo_validate_addr = eth_validate_addr, 781 .ndo_set_mac_address = eth_mac_addr, 782}; 783 784/* 785 * The init function. 786 */ 787static int __init meth_probe(struct platform_device *pdev) 788{ 789 struct net_device *dev; 790 struct meth_private *priv; 791 int err; 792 793 dev = alloc_etherdev(sizeof(struct meth_private)); 794 if (!dev) 795 return -ENOMEM; 796 797 dev->netdev_ops = &meth_netdev_ops; 798 dev->watchdog_timeo = timeout; 799 dev->irq = MACE_ETHERNET_IRQ; 800 dev->base_addr = (unsigned long)&mace->eth; 801 memcpy(dev->dev_addr, o2meth_eaddr, 6); 802 803 priv = netdev_priv(dev); 804 spin_lock_init(&priv->meth_lock); 805 SET_NETDEV_DEV(dev, &pdev->dev); 806 807 err = register_netdev(dev); 808 if (err) { 809 free_netdev(dev); 810 return err; 811 } 812 813 printk(KERN_INFO "%s: SGI MACE Ethernet rev. %d\n", 814 dev->name, (unsigned int)(mace->eth.mac_ctrl >> 29)); 815 return 0; 816} 817 818static int __exit meth_remove(struct platform_device *pdev) 819{ 820 struct net_device *dev = platform_get_drvdata(pdev); 821 822 unregister_netdev(dev); 823 free_netdev(dev); 824 platform_set_drvdata(pdev, NULL); 825 826 return 0; 827} 828 829static struct platform_driver meth_driver = { 830 .probe = meth_probe, 831 .remove = __devexit_p(meth_remove), 832 .driver = { 833 .name = "meth", 834 .owner = THIS_MODULE, 835 } 836}; 837 838static int __init meth_init_module(void) 839{ 840 int err; 841 842 err = platform_driver_register(&meth_driver); 843 if (err) 844 printk(KERN_ERR "Driver registration failed\n"); 845 846 return err; 847} 848 849static void __exit meth_exit_module(void) 850{ 851 platform_driver_unregister(&meth_driver); 852} 853 854module_init(meth_init_module); 855module_exit(meth_exit_module); 856 857MODULE_AUTHOR("Ilya Volynets <ilya@theIlya.com>"); 858MODULE_DESCRIPTION("SGI O2 Builtin Fast Ethernet driver"); 859MODULE_LICENSE("GPL"); 860MODULE_ALIAS("platform:meth");