at v2.6.24-rc2 853 lines 24 kB view raw
1/* 2 * meth.c -- O2 Builtin 10/100 Ethernet driver 3 * 4 * Copyright (C) 2001-2003 Ilya Volynets 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11#include <linux/delay.h> 12#include <linux/dma-mapping.h> 13#include <linux/init.h> 14#include <linux/kernel.h> 15#include <linux/module.h> 16#include <linux/platform_device.h> 17#include <linux/slab.h> 18#include <linux/errno.h> 19#include <linux/types.h> 20#include <linux/interrupt.h> 21 22#include <linux/in.h> 23#include <linux/in6.h> 24#include <linux/device.h> /* struct device, et al */ 25#include <linux/netdevice.h> /* struct device, and other headers */ 26#include <linux/etherdevice.h> /* eth_type_trans */ 27#include <linux/ip.h> /* struct iphdr */ 28#include <linux/tcp.h> /* struct tcphdr */ 29#include <linux/skbuff.h> 30#include <linux/mii.h> /* MII definitions */ 31 32#include <asm/ip32/mace.h> 33#include <asm/ip32/ip32_ints.h> 34 35#include <asm/io.h> 36 37#include "meth.h" 38 39#ifndef MFE_DEBUG 40#define MFE_DEBUG 0 41#endif 42 43#if MFE_DEBUG>=1 44#define DPRINTK(str,args...) printk(KERN_DEBUG "meth: %s: " str, __FUNCTION__ , ## args) 45#define MFE_RX_DEBUG 2 46#else 47#define DPRINTK(str,args...) 48#define MFE_RX_DEBUG 0 49#endif 50 51 52static const char *meth_str="SGI O2 Fast Ethernet"; 53 54#define HAVE_TX_TIMEOUT 55/* The maximum time waited (in jiffies) before assuming a Tx failed. (400ms) */ 56#define TX_TIMEOUT (400*HZ/1000) 57 58#ifdef HAVE_TX_TIMEOUT 59static int timeout = TX_TIMEOUT; 60module_param(timeout, int, 0); 61#endif 62 63/* 64 * This structure is private to each device. It is used to pass 65 * packets in and out, so there is place for a packet 66 */ 67struct meth_private { 68 /* in-memory copy of MAC Control register */ 69 unsigned long mac_ctrl; 70 /* in-memory copy of DMA Control register */ 71 unsigned long dma_ctrl; 72 /* address of PHY, used by mdio_* functions, initialized in mdio_probe */ 73 unsigned long phy_addr; 74 tx_packet *tx_ring; 75 dma_addr_t tx_ring_dma; 76 struct sk_buff *tx_skbs[TX_RING_ENTRIES]; 77 dma_addr_t tx_skb_dmas[TX_RING_ENTRIES]; 78 unsigned long tx_read, tx_write, tx_count; 79 80 rx_packet *rx_ring[RX_RING_ENTRIES]; 81 dma_addr_t rx_ring_dmas[RX_RING_ENTRIES]; 82 struct sk_buff *rx_skbs[RX_RING_ENTRIES]; 83 unsigned long rx_write; 84 85 spinlock_t meth_lock; 86}; 87 88static void meth_tx_timeout(struct net_device *dev); 89static irqreturn_t meth_interrupt(int irq, void *dev_id); 90 91/* global, initialized in ip32-setup.c */ 92char o2meth_eaddr[8]={0,0,0,0,0,0,0,0}; 93 94static inline void load_eaddr(struct net_device *dev) 95{ 96 int i; 97 DECLARE_MAC_BUF(mac); 98 99 for (i = 0; i < 6; i++) 100 dev->dev_addr[i] = o2meth_eaddr[i]; 101 DPRINTK("Loading MAC Address: %s\n", print_mac(mac, dev->dev_addr)); 102 mace->eth.mac_addr = (*(unsigned long*)o2meth_eaddr) >> 16; 103} 104 105/* 106 * Waits for BUSY status of mdio bus to clear 107 */ 108#define WAIT_FOR_PHY(___rval) \ 109 while ((___rval = mace->eth.phy_data) & MDIO_BUSY) { \ 110 udelay(25); \ 111 } 112/*read phy register, return value read */ 113static unsigned long mdio_read(struct meth_private *priv, unsigned long phyreg) 114{ 115 unsigned long rval; 116 WAIT_FOR_PHY(rval); 117 mace->eth.phy_regs = (priv->phy_addr << 5) | (phyreg & 0x1f); 118 udelay(25); 119 mace->eth.phy_trans_go = 1; 120 udelay(25); 121 WAIT_FOR_PHY(rval); 122 return rval & MDIO_DATA_MASK; 123} 124 125static int mdio_probe(struct meth_private *priv) 126{ 127 int i; 128 unsigned long p2, p3; 129 /* check if phy is detected already */ 130 if(priv->phy_addr>=0&&priv->phy_addr<32) 131 return 0; 132 spin_lock(&priv->meth_lock); 133 for (i=0;i<32;++i){ 134 priv->phy_addr=i; 135 p2=mdio_read(priv,2); 136 p3=mdio_read(priv,3); 137#if MFE_DEBUG>=2 138 switch ((p2<<12)|(p3>>4)){ 139 case PHY_QS6612X: 140 DPRINTK("PHY is QS6612X\n"); 141 break; 142 case PHY_ICS1889: 143 DPRINTK("PHY is ICS1889\n"); 144 break; 145 case PHY_ICS1890: 146 DPRINTK("PHY is ICS1890\n"); 147 break; 148 case PHY_DP83840: 149 DPRINTK("PHY is DP83840\n"); 150 break; 151 } 152#endif 153 if(p2!=0xffff&&p2!=0x0000){ 154 DPRINTK("PHY code: %x\n",(p2<<12)|(p3>>4)); 155 break; 156 } 157 } 158 spin_unlock(&priv->meth_lock); 159 if(priv->phy_addr<32) { 160 return 0; 161 } 162 DPRINTK("Oopsie! PHY is not known!\n"); 163 priv->phy_addr=-1; 164 return -ENODEV; 165} 166 167static void meth_check_link(struct net_device *dev) 168{ 169 struct meth_private *priv = netdev_priv(dev); 170 unsigned long mii_advertising = mdio_read(priv, 4); 171 unsigned long mii_partner = mdio_read(priv, 5); 172 unsigned long negotiated = mii_advertising & mii_partner; 173 unsigned long duplex, speed; 174 175 if (mii_partner == 0xffff) 176 return; 177 178 speed = (negotiated & 0x0380) ? METH_100MBIT : 0; 179 duplex = ((negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040) ? 180 METH_PHY_FDX : 0; 181 182 if ((priv->mac_ctrl & METH_PHY_FDX) ^ duplex) { 183 DPRINTK("Setting %s-duplex\n", duplex ? "full" : "half"); 184 if (duplex) 185 priv->mac_ctrl |= METH_PHY_FDX; 186 else 187 priv->mac_ctrl &= ~METH_PHY_FDX; 188 mace->eth.mac_ctrl = priv->mac_ctrl; 189 } 190 191 if ((priv->mac_ctrl & METH_100MBIT) ^ speed) { 192 DPRINTK("Setting %dMbs mode\n", speed ? 100 : 10); 193 if (duplex) 194 priv->mac_ctrl |= METH_100MBIT; 195 else 196 priv->mac_ctrl &= ~METH_100MBIT; 197 mace->eth.mac_ctrl = priv->mac_ctrl; 198 } 199} 200 201 202static int meth_init_tx_ring(struct meth_private *priv) 203{ 204 /* Init TX ring */ 205 priv->tx_ring = dma_alloc_coherent(NULL, TX_RING_BUFFER_SIZE, 206 &priv->tx_ring_dma, GFP_ATOMIC); 207 if (!priv->tx_ring) 208 return -ENOMEM; 209 memset(priv->tx_ring, 0, TX_RING_BUFFER_SIZE); 210 priv->tx_count = priv->tx_read = priv->tx_write = 0; 211 mace->eth.tx_ring_base = priv->tx_ring_dma; 212 /* Now init skb save area */ 213 memset(priv->tx_skbs, 0, sizeof(priv->tx_skbs)); 214 memset(priv->tx_skb_dmas, 0, sizeof(priv->tx_skb_dmas)); 215 return 0; 216} 217 218static int meth_init_rx_ring(struct meth_private *priv) 219{ 220 int i; 221 222 for (i = 0; i < RX_RING_ENTRIES; i++) { 223 priv->rx_skbs[i] = alloc_skb(METH_RX_BUFF_SIZE, 0); 224 /* 8byte status vector + 3quad padding + 2byte padding, 225 * to put data on 64bit aligned boundary */ 226 skb_reserve(priv->rx_skbs[i],METH_RX_HEAD); 227 priv->rx_ring[i]=(rx_packet*)(priv->rx_skbs[i]->head); 228 /* I'll need to re-sync it after each RX */ 229 priv->rx_ring_dmas[i] = 230 dma_map_single(NULL, priv->rx_ring[i], 231 METH_RX_BUFF_SIZE, DMA_FROM_DEVICE); 232 mace->eth.rx_fifo = priv->rx_ring_dmas[i]; 233 } 234 priv->rx_write = 0; 235 return 0; 236} 237static void meth_free_tx_ring(struct meth_private *priv) 238{ 239 int i; 240 241 /* Remove any pending skb */ 242 for (i = 0; i < TX_RING_ENTRIES; i++) { 243 if (priv->tx_skbs[i]) 244 dev_kfree_skb(priv->tx_skbs[i]); 245 priv->tx_skbs[i] = NULL; 246 } 247 dma_free_coherent(NULL, TX_RING_BUFFER_SIZE, priv->tx_ring, 248 priv->tx_ring_dma); 249} 250 251/* Presumes RX DMA engine is stopped, and RX fifo ring is reset */ 252static void meth_free_rx_ring(struct meth_private *priv) 253{ 254 int i; 255 256 for (i = 0; i < RX_RING_ENTRIES; i++) { 257 dma_unmap_single(NULL, priv->rx_ring_dmas[i], 258 METH_RX_BUFF_SIZE, DMA_FROM_DEVICE); 259 priv->rx_ring[i] = 0; 260 priv->rx_ring_dmas[i] = 0; 261 kfree_skb(priv->rx_skbs[i]); 262 } 263} 264 265int meth_reset(struct net_device *dev) 266{ 267 struct meth_private *priv = netdev_priv(dev); 268 269 /* Reset card */ 270 mace->eth.mac_ctrl = SGI_MAC_RESET; 271 udelay(1); 272 mace->eth.mac_ctrl = 0; 273 udelay(25); 274 275 /* Load ethernet address */ 276 load_eaddr(dev); 277 /* Should load some "errata", but later */ 278 279 /* Check for device */ 280 if (mdio_probe(priv) < 0) { 281 DPRINTK("Unable to find PHY\n"); 282 return -ENODEV; 283 } 284 285 /* Initial mode: 10 | Half-duplex | Accept normal packets */ 286 priv->mac_ctrl = METH_ACCEPT_MCAST | METH_DEFAULT_IPG; 287 if (dev->flags | IFF_PROMISC) 288 priv->mac_ctrl |= METH_PROMISC; 289 mace->eth.mac_ctrl = priv->mac_ctrl; 290 291 /* Autonegotiate speed and duplex mode */ 292 meth_check_link(dev); 293 294 /* Now set dma control, but don't enable DMA, yet */ 295 priv->dma_ctrl = (4 << METH_RX_OFFSET_SHIFT) | 296 (RX_RING_ENTRIES << METH_RX_DEPTH_SHIFT); 297 mace->eth.dma_ctrl = priv->dma_ctrl; 298 299 return 0; 300} 301 302/*============End Helper Routines=====================*/ 303 304/* 305 * Open and close 306 */ 307static int meth_open(struct net_device *dev) 308{ 309 struct meth_private *priv = netdev_priv(dev); 310 int ret; 311 312 priv->phy_addr = -1; /* No PHY is known yet... */ 313 314 /* Initialize the hardware */ 315 ret = meth_reset(dev); 316 if (ret < 0) 317 return ret; 318 319 /* Allocate the ring buffers */ 320 ret = meth_init_tx_ring(priv); 321 if (ret < 0) 322 return ret; 323 ret = meth_init_rx_ring(priv); 324 if (ret < 0) 325 goto out_free_tx_ring; 326 327 ret = request_irq(dev->irq, meth_interrupt, 0, meth_str, dev); 328 if (ret) { 329 printk(KERN_ERR "%s: Can't get irq %d\n", dev->name, dev->irq); 330 goto out_free_rx_ring; 331 } 332 333 /* Start DMA */ 334 priv->dma_ctrl |= METH_DMA_TX_EN | /*METH_DMA_TX_INT_EN |*/ 335 METH_DMA_RX_EN | METH_DMA_RX_INT_EN; 336 mace->eth.dma_ctrl = priv->dma_ctrl; 337 338 DPRINTK("About to start queue\n"); 339 netif_start_queue(dev); 340 341 return 0; 342 343out_free_rx_ring: 344 meth_free_rx_ring(priv); 345out_free_tx_ring: 346 meth_free_tx_ring(priv); 347 348 return ret; 349} 350 351static int meth_release(struct net_device *dev) 352{ 353 struct meth_private *priv = netdev_priv(dev); 354 355 DPRINTK("Stopping queue\n"); 356 netif_stop_queue(dev); /* can't transmit any more */ 357 /* shut down DMA */ 358 priv->dma_ctrl &= ~(METH_DMA_TX_EN | METH_DMA_TX_INT_EN | 359 METH_DMA_RX_EN | METH_DMA_RX_INT_EN); 360 mace->eth.dma_ctrl = priv->dma_ctrl; 361 free_irq(dev->irq, dev); 362 meth_free_tx_ring(priv); 363 meth_free_rx_ring(priv); 364 365 return 0; 366} 367 368/* 369 * Receive a packet: retrieve, encapsulate and pass over to upper levels 370 */ 371static void meth_rx(struct net_device* dev, unsigned long int_status) 372{ 373 struct sk_buff *skb; 374 unsigned long status; 375 struct meth_private *priv = netdev_priv(dev); 376 unsigned long fifo_rptr = (int_status & METH_INT_RX_RPTR_MASK) >> 8; 377 378 spin_lock(&priv->meth_lock); 379 priv->dma_ctrl &= ~METH_DMA_RX_INT_EN; 380 mace->eth.dma_ctrl = priv->dma_ctrl; 381 spin_unlock(&priv->meth_lock); 382 383 if (int_status & METH_INT_RX_UNDERFLOW) { 384 fifo_rptr = (fifo_rptr - 1) & 0x0f; 385 } 386 while (priv->rx_write != fifo_rptr) { 387 dma_unmap_single(NULL, priv->rx_ring_dmas[priv->rx_write], 388 METH_RX_BUFF_SIZE, DMA_FROM_DEVICE); 389 status = priv->rx_ring[priv->rx_write]->status.raw; 390#if MFE_DEBUG 391 if (!(status & METH_RX_ST_VALID)) { 392 DPRINTK("Not received? status=%016lx\n",status); 393 } 394#endif 395 if ((!(status & METH_RX_STATUS_ERRORS)) && (status & METH_RX_ST_VALID)) { 396 int len = (status & 0xffff) - 4; /* omit CRC */ 397 /* length sanity check */ 398 if (len < 60 || len > 1518) { 399 printk(KERN_DEBUG "%s: bogus packet size: %ld, status=%#2lx.\n", 400 dev->name, priv->rx_write, 401 priv->rx_ring[priv->rx_write]->status.raw); 402 dev->stats.rx_errors++; 403 dev->stats.rx_length_errors++; 404 skb = priv->rx_skbs[priv->rx_write]; 405 } else { 406 skb = alloc_skb(METH_RX_BUFF_SIZE, GFP_ATOMIC); 407 if (!skb) { 408 /* Ouch! No memory! Drop packet on the floor */ 409 DPRINTK("No mem: dropping packet\n"); 410 dev->stats.rx_dropped++; 411 skb = priv->rx_skbs[priv->rx_write]; 412 } else { 413 struct sk_buff *skb_c = priv->rx_skbs[priv->rx_write]; 414 /* 8byte status vector + 3quad padding + 2byte padding, 415 * to put data on 64bit aligned boundary */ 416 skb_reserve(skb, METH_RX_HEAD); 417 /* Write metadata, and then pass to the receive level */ 418 skb_put(skb_c, len); 419 priv->rx_skbs[priv->rx_write] = skb; 420 skb_c->protocol = eth_type_trans(skb_c, dev); 421 dev->last_rx = jiffies; 422 dev->stats.rx_packets++; 423 dev->stats.rx_bytes += len; 424 netif_rx(skb_c); 425 } 426 } 427 } else { 428 dev->stats.rx_errors++; 429 skb=priv->rx_skbs[priv->rx_write]; 430#if MFE_DEBUG>0 431 printk(KERN_WARNING "meth: RX error: status=0x%016lx\n",status); 432 if(status&METH_RX_ST_RCV_CODE_VIOLATION) 433 printk(KERN_WARNING "Receive Code Violation\n"); 434 if(status&METH_RX_ST_CRC_ERR) 435 printk(KERN_WARNING "CRC error\n"); 436 if(status&METH_RX_ST_INV_PREAMBLE_CTX) 437 printk(KERN_WARNING "Invalid Preamble Context\n"); 438 if(status&METH_RX_ST_LONG_EVT_SEEN) 439 printk(KERN_WARNING "Long Event Seen...\n"); 440 if(status&METH_RX_ST_BAD_PACKET) 441 printk(KERN_WARNING "Bad Packet\n"); 442 if(status&METH_RX_ST_CARRIER_EVT_SEEN) 443 printk(KERN_WARNING "Carrier Event Seen\n"); 444#endif 445 } 446 priv->rx_ring[priv->rx_write] = (rx_packet*)skb->head; 447 priv->rx_ring[priv->rx_write]->status.raw = 0; 448 priv->rx_ring_dmas[priv->rx_write] = 449 dma_map_single(NULL, priv->rx_ring[priv->rx_write], 450 METH_RX_BUFF_SIZE, DMA_FROM_DEVICE); 451 mace->eth.rx_fifo = priv->rx_ring_dmas[priv->rx_write]; 452 ADVANCE_RX_PTR(priv->rx_write); 453 } 454 spin_lock(&priv->meth_lock); 455 /* In case there was underflow, and Rx DMA was disabled */ 456 priv->dma_ctrl |= METH_DMA_RX_INT_EN | METH_DMA_RX_EN; 457 mace->eth.dma_ctrl = priv->dma_ctrl; 458 mace->eth.int_stat = METH_INT_RX_THRESHOLD; 459 spin_unlock(&priv->meth_lock); 460} 461 462static int meth_tx_full(struct net_device *dev) 463{ 464 struct meth_private *priv = netdev_priv(dev); 465 466 return (priv->tx_count >= TX_RING_ENTRIES - 1); 467} 468 469static void meth_tx_cleanup(struct net_device* dev, unsigned long int_status) 470{ 471 struct meth_private *priv = netdev_priv(dev); 472 unsigned long status; 473 struct sk_buff *skb; 474 unsigned long rptr = (int_status&TX_INFO_RPTR) >> 16; 475 476 spin_lock(&priv->meth_lock); 477 478 /* Stop DMA notification */ 479 priv->dma_ctrl &= ~(METH_DMA_TX_INT_EN); 480 mace->eth.dma_ctrl = priv->dma_ctrl; 481 482 while (priv->tx_read != rptr) { 483 skb = priv->tx_skbs[priv->tx_read]; 484 status = priv->tx_ring[priv->tx_read].header.raw; 485#if MFE_DEBUG>=1 486 if (priv->tx_read == priv->tx_write) 487 DPRINTK("Auchi! tx_read=%d,tx_write=%d,rptr=%d?\n", priv->tx_read, priv->tx_write,rptr); 488#endif 489 if (status & METH_TX_ST_DONE) { 490 if (status & METH_TX_ST_SUCCESS){ 491 dev->stats.tx_packets++; 492 dev->stats.tx_bytes += skb->len; 493 } else { 494 dev->stats.tx_errors++; 495#if MFE_DEBUG>=1 496 DPRINTK("TX error: status=%016lx <",status); 497 if(status & METH_TX_ST_SUCCESS) 498 printk(" SUCCESS"); 499 if(status & METH_TX_ST_TOOLONG) 500 printk(" TOOLONG"); 501 if(status & METH_TX_ST_UNDERRUN) 502 printk(" UNDERRUN"); 503 if(status & METH_TX_ST_EXCCOLL) 504 printk(" EXCCOLL"); 505 if(status & METH_TX_ST_DEFER) 506 printk(" DEFER"); 507 if(status & METH_TX_ST_LATECOLL) 508 printk(" LATECOLL"); 509 printk(" >\n"); 510#endif 511 } 512 } else { 513 DPRINTK("RPTR points us here, but packet not done?\n"); 514 break; 515 } 516 dev_kfree_skb_irq(skb); 517 priv->tx_skbs[priv->tx_read] = NULL; 518 priv->tx_ring[priv->tx_read].header.raw = 0; 519 priv->tx_read = (priv->tx_read+1)&(TX_RING_ENTRIES-1); 520 priv->tx_count--; 521 } 522 523 /* wake up queue if it was stopped */ 524 if (netif_queue_stopped(dev) && !meth_tx_full(dev)) { 525 netif_wake_queue(dev); 526 } 527 528 mace->eth.int_stat = METH_INT_TX_EMPTY | METH_INT_TX_PKT; 529 spin_unlock(&priv->meth_lock); 530} 531 532static void meth_error(struct net_device* dev, unsigned status) 533{ 534 struct meth_private *priv = netdev_priv(dev); 535 536 printk(KERN_WARNING "meth: error status: 0x%08x\n",status); 537 /* check for errors too... */ 538 if (status & (METH_INT_TX_LINK_FAIL)) 539 printk(KERN_WARNING "meth: link failure\n"); 540 /* Should I do full reset in this case? */ 541 if (status & (METH_INT_MEM_ERROR)) 542 printk(KERN_WARNING "meth: memory error\n"); 543 if (status & (METH_INT_TX_ABORT)) 544 printk(KERN_WARNING "meth: aborted\n"); 545 if (status & (METH_INT_RX_OVERFLOW)) 546 printk(KERN_WARNING "meth: Rx overflow\n"); 547 if (status & (METH_INT_RX_UNDERFLOW)) { 548 printk(KERN_WARNING "meth: Rx underflow\n"); 549 spin_lock(&priv->meth_lock); 550 mace->eth.int_stat = METH_INT_RX_UNDERFLOW; 551 /* more underflow interrupts will be delivered, 552 * effectively throwing us into an infinite loop. 553 * Thus I stop processing Rx in this case. */ 554 priv->dma_ctrl &= ~METH_DMA_RX_EN; 555 mace->eth.dma_ctrl = priv->dma_ctrl; 556 DPRINTK("Disabled meth Rx DMA temporarily\n"); 557 spin_unlock(&priv->meth_lock); 558 } 559 mace->eth.int_stat = METH_INT_ERROR; 560} 561 562/* 563 * The typical interrupt entry point 564 */ 565static irqreturn_t meth_interrupt(int irq, void *dev_id) 566{ 567 struct net_device *dev = (struct net_device *)dev_id; 568 struct meth_private *priv = netdev_priv(dev); 569 unsigned long status; 570 571 status = mace->eth.int_stat; 572 while (status & 0xff) { 573 /* First handle errors - if we get Rx underflow, 574 * Rx DMA will be disabled, and Rx handler will reenable 575 * it. I don't think it's possible to get Rx underflow, 576 * without getting Rx interrupt */ 577 if (status & METH_INT_ERROR) { 578 meth_error(dev, status); 579 } 580 if (status & (METH_INT_TX_EMPTY | METH_INT_TX_PKT)) { 581 /* a transmission is over: free the skb */ 582 meth_tx_cleanup(dev, status); 583 } 584 if (status & METH_INT_RX_THRESHOLD) { 585 if (!(priv->dma_ctrl & METH_DMA_RX_INT_EN)) 586 break; 587 /* send it to meth_rx for handling */ 588 meth_rx(dev, status); 589 } 590 status = mace->eth.int_stat; 591 } 592 593 return IRQ_HANDLED; 594} 595 596/* 597 * Transmits packets that fit into TX descriptor (are <=120B) 598 */ 599static void meth_tx_short_prepare(struct meth_private *priv, 600 struct sk_buff *skb) 601{ 602 tx_packet *desc = &priv->tx_ring[priv->tx_write]; 603 int len = (skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len; 604 605 desc->header.raw = METH_TX_CMD_INT_EN | (len-1) | ((128-len) << 16); 606 /* maybe I should set whole thing to 0 first... */ 607 skb_copy_from_linear_data(skb, desc->data.dt + (120 - len), skb->len); 608 if (skb->len < len) 609 memset(desc->data.dt + 120 - len + skb->len, 0, len-skb->len); 610} 611#define TX_CATBUF1 BIT(25) 612static void meth_tx_1page_prepare(struct meth_private *priv, 613 struct sk_buff *skb) 614{ 615 tx_packet *desc = &priv->tx_ring[priv->tx_write]; 616 void *buffer_data = (void *)(((unsigned long)skb->data + 7) & ~7); 617 int unaligned_len = (int)((unsigned long)buffer_data - (unsigned long)skb->data); 618 int buffer_len = skb->len - unaligned_len; 619 dma_addr_t catbuf; 620 621 desc->header.raw = METH_TX_CMD_INT_EN | TX_CATBUF1 | (skb->len - 1); 622 623 /* unaligned part */ 624 if (unaligned_len) { 625 skb_copy_from_linear_data(skb, desc->data.dt + (120 - unaligned_len), 626 unaligned_len); 627 desc->header.raw |= (128 - unaligned_len) << 16; 628 } 629 630 /* first page */ 631 catbuf = dma_map_single(NULL, buffer_data, buffer_len, 632 DMA_TO_DEVICE); 633 desc->data.cat_buf[0].form.start_addr = catbuf >> 3; 634 desc->data.cat_buf[0].form.len = buffer_len - 1; 635} 636#define TX_CATBUF2 BIT(26) 637static void meth_tx_2page_prepare(struct meth_private *priv, 638 struct sk_buff *skb) 639{ 640 tx_packet *desc = &priv->tx_ring[priv->tx_write]; 641 void *buffer1_data = (void *)(((unsigned long)skb->data + 7) & ~7); 642 void *buffer2_data = (void *)PAGE_ALIGN((unsigned long)skb->data); 643 int unaligned_len = (int)((unsigned long)buffer1_data - (unsigned long)skb->data); 644 int buffer1_len = (int)((unsigned long)buffer2_data - (unsigned long)buffer1_data); 645 int buffer2_len = skb->len - buffer1_len - unaligned_len; 646 dma_addr_t catbuf1, catbuf2; 647 648 desc->header.raw = METH_TX_CMD_INT_EN | TX_CATBUF1 | TX_CATBUF2| (skb->len - 1); 649 /* unaligned part */ 650 if (unaligned_len){ 651 skb_copy_from_linear_data(skb, desc->data.dt + (120 - unaligned_len), 652 unaligned_len); 653 desc->header.raw |= (128 - unaligned_len) << 16; 654 } 655 656 /* first page */ 657 catbuf1 = dma_map_single(NULL, buffer1_data, buffer1_len, 658 DMA_TO_DEVICE); 659 desc->data.cat_buf[0].form.start_addr = catbuf1 >> 3; 660 desc->data.cat_buf[0].form.len = buffer1_len - 1; 661 /* second page */ 662 catbuf2 = dma_map_single(NULL, buffer2_data, buffer2_len, 663 DMA_TO_DEVICE); 664 desc->data.cat_buf[1].form.start_addr = catbuf2 >> 3; 665 desc->data.cat_buf[1].form.len = buffer2_len - 1; 666} 667 668static void meth_add_to_tx_ring(struct meth_private *priv, struct sk_buff *skb) 669{ 670 /* Remember the skb, so we can free it at interrupt time */ 671 priv->tx_skbs[priv->tx_write] = skb; 672 if (skb->len <= 120) { 673 /* Whole packet fits into descriptor */ 674 meth_tx_short_prepare(priv, skb); 675 } else if (PAGE_ALIGN((unsigned long)skb->data) != 676 PAGE_ALIGN((unsigned long)skb->data + skb->len - 1)) { 677 /* Packet crosses page boundary */ 678 meth_tx_2page_prepare(priv, skb); 679 } else { 680 /* Packet is in one page */ 681 meth_tx_1page_prepare(priv, skb); 682 } 683 priv->tx_write = (priv->tx_write + 1) & (TX_RING_ENTRIES - 1); 684 mace->eth.tx_info = priv->tx_write; 685 priv->tx_count++; 686} 687 688/* 689 * Transmit a packet (called by the kernel) 690 */ 691static int meth_tx(struct sk_buff *skb, struct net_device *dev) 692{ 693 struct meth_private *priv = netdev_priv(dev); 694 unsigned long flags; 695 696 spin_lock_irqsave(&priv->meth_lock, flags); 697 /* Stop DMA notification */ 698 priv->dma_ctrl &= ~(METH_DMA_TX_INT_EN); 699 mace->eth.dma_ctrl = priv->dma_ctrl; 700 701 meth_add_to_tx_ring(priv, skb); 702 dev->trans_start = jiffies; /* save the timestamp */ 703 704 /* If TX ring is full, tell the upper layer to stop sending packets */ 705 if (meth_tx_full(dev)) { 706 printk(KERN_DEBUG "TX full: stopping\n"); 707 netif_stop_queue(dev); 708 } 709 710 /* Restart DMA notification */ 711 priv->dma_ctrl |= METH_DMA_TX_INT_EN; 712 mace->eth.dma_ctrl = priv->dma_ctrl; 713 714 spin_unlock_irqrestore(&priv->meth_lock, flags); 715 716 return 0; 717} 718 719/* 720 * Deal with a transmit timeout. 721 */ 722static void meth_tx_timeout(struct net_device *dev) 723{ 724 struct meth_private *priv = netdev_priv(dev); 725 unsigned long flags; 726 727 printk(KERN_WARNING "%s: transmit timed out\n", dev->name); 728 729 /* Protect against concurrent rx interrupts */ 730 spin_lock_irqsave(&priv->meth_lock,flags); 731 732 /* Try to reset the interface. */ 733 meth_reset(dev); 734 735 dev->stats.tx_errors++; 736 737 /* Clear all rings */ 738 meth_free_tx_ring(priv); 739 meth_free_rx_ring(priv); 740 meth_init_tx_ring(priv); 741 meth_init_rx_ring(priv); 742 743 /* Restart dma */ 744 priv->dma_ctrl |= METH_DMA_TX_EN | METH_DMA_RX_EN | METH_DMA_RX_INT_EN; 745 mace->eth.dma_ctrl = priv->dma_ctrl; 746 747 /* Enable interrupt */ 748 spin_unlock_irqrestore(&priv->meth_lock, flags); 749 750 dev->trans_start = jiffies; 751 netif_wake_queue(dev); 752 753 return; 754} 755 756/* 757 * Ioctl commands 758 */ 759static int meth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 760{ 761 /* XXX Not yet implemented */ 762 switch(cmd) { 763 case SIOCGMIIPHY: 764 case SIOCGMIIREG: 765 case SIOCSMIIREG: 766 default: 767 return -EOPNOTSUPP; 768 } 769} 770 771/* 772 * Return statistics to the caller 773 */ 774/* 775 * The init function. 776 */ 777static int __init meth_probe(struct platform_device *pdev) 778{ 779 struct net_device *dev; 780 struct meth_private *priv; 781 int err; 782 783 dev = alloc_etherdev(sizeof(struct meth_private)); 784 if (!dev) 785 return -ENOMEM; 786 787 dev->open = meth_open; 788 dev->stop = meth_release; 789 dev->hard_start_xmit = meth_tx; 790 dev->do_ioctl = meth_ioctl; 791#ifdef HAVE_TX_TIMEOUT 792 dev->tx_timeout = meth_tx_timeout; 793 dev->watchdog_timeo = timeout; 794#endif 795 dev->irq = MACE_ETHERNET_IRQ; 796 dev->base_addr = (unsigned long)&mace->eth; 797 798 priv = netdev_priv(dev); 799 spin_lock_init(&priv->meth_lock); 800 SET_NETDEV_DEV(dev, &pdev->dev); 801 802 err = register_netdev(dev); 803 if (err) { 804 free_netdev(dev); 805 return err; 806 } 807 808 printk(KERN_INFO "%s: SGI MACE Ethernet rev. %d\n", 809 dev->name, (unsigned int)(mace->eth.mac_ctrl >> 29)); 810 return 0; 811} 812 813static int __exit meth_remove(struct platform_device *pdev) 814{ 815 struct net_device *dev = platform_get_drvdata(pdev); 816 817 unregister_netdev(dev); 818 free_netdev(dev); 819 platform_set_drvdata(pdev, NULL); 820 821 return 0; 822} 823 824static struct platform_driver meth_driver = { 825 .probe = meth_probe, 826 .remove = __devexit_p(meth_remove), 827 .driver = { 828 .name = "meth", 829 } 830}; 831 832static int __init meth_init_module(void) 833{ 834 int err; 835 836 err = platform_driver_register(&meth_driver); 837 if (err) 838 printk(KERN_ERR "Driver registration failed\n"); 839 840 return err; 841} 842 843static void __exit meth_exit_module(void) 844{ 845 platform_driver_unregister(&meth_driver); 846} 847 848module_init(meth_init_module); 849module_exit(meth_exit_module); 850 851MODULE_AUTHOR("Ilya Volynets <ilya@theIlya.com>"); 852MODULE_DESCRIPTION("SGI O2 Builtin Fast Ethernet driver"); 853MODULE_LICENSE("GPL");