Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at 17431928194b36a0f88082df875e2e036da7fddf 1608 lines 41 kB view raw
1/* 82596.c: A generic 82596 ethernet driver for linux. */ 2/* 3 Based on Apricot.c 4 Written 1994 by Mark Evans. 5 This driver is for the Apricot 82596 bus-master interface 6 7 Modularised 12/94 Mark Evans 8 9 10 Modified to support the 82596 ethernet chips on 680x0 VME boards. 11 by Richard Hirst <richard@sleepie.demon.co.uk> 12 Renamed to be 82596.c 13 14 980825: Changed to receive directly in to sk_buffs which are 15 allocated at open() time. Eliminates copy on incoming frames 16 (small ones are still copied). Shared data now held in a 17 non-cached page, so we can run on 68060 in copyback mode. 18 19 TBD: 20 * look at deferring rx frames rather than discarding (as per tulip) 21 * handle tx ring full as per tulip 22 * performance test to tune rx_copybreak 23 24 Most of my modifications relate to the braindead big-endian 25 implementation by Intel. When the i596 is operating in 26 'big-endian' mode, it thinks a 32 bit value of 0x12345678 27 should be stored as 0x56781234. This is a real pain, when 28 you have linked lists which are shared by the 680x0 and the 29 i596. 30 31 Driver skeleton 32 Written 1993 by Donald Becker. 33 Copyright 1993 United States Government as represented by the Director, 34 National Security Agency. This software may only be used and distributed 35 according to the terms of the GNU General Public License as modified by SRC, 36 incorporated herein by reference. 37 38 The author may be reached as becker@scyld.com, or C/O 39 Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403 40 41 */ 42 43#include <linux/module.h> 44#include <linux/kernel.h> 45#include <linux/string.h> 46#include <linux/errno.h> 47#include <linux/ioport.h> 48#include <linux/interrupt.h> 49#include <linux/delay.h> 50#include <linux/netdevice.h> 51#include <linux/etherdevice.h> 52#include <linux/skbuff.h> 53#include <linux/init.h> 54#include <linux/bitops.h> 55#include <linux/gfp.h> 56 57#include <asm/io.h> 58#include <asm/dma.h> 59#include <asm/pgtable.h> 60#include <asm/cacheflush.h> 61 62static char version[] __initdata = 63 "82596.c $Revision: 1.5 $\n"; 64 65#define DRV_NAME "82596" 66 67/* DEBUG flags 68 */ 69 70#define DEB_INIT 0x0001 71#define DEB_PROBE 0x0002 72#define DEB_SERIOUS 0x0004 73#define DEB_ERRORS 0x0008 74#define DEB_MULTI 0x0010 75#define DEB_TDR 0x0020 76#define DEB_OPEN 0x0040 77#define DEB_RESET 0x0080 78#define DEB_ADDCMD 0x0100 79#define DEB_STATUS 0x0200 80#define DEB_STARTTX 0x0400 81#define DEB_RXADDR 0x0800 82#define DEB_TXADDR 0x1000 83#define DEB_RXFRAME 0x2000 84#define DEB_INTS 0x4000 85#define DEB_STRUCT 0x8000 86#define DEB_ANY 0xffff 87 88 89#define DEB(x,y) if (i596_debug & (x)) y 90 91 92#if defined(CONFIG_MVME16x_NET) || defined(CONFIG_MVME16x_NET_MODULE) 93#define ENABLE_MVME16x_NET 94#endif 95#if defined(CONFIG_BVME6000_NET) || defined(CONFIG_BVME6000_NET_MODULE) 96#define ENABLE_BVME6000_NET 97#endif 98#if defined(CONFIG_APRICOT) || defined(CONFIG_APRICOT_MODULE) 99#define ENABLE_APRICOT 100#endif 101 102#ifdef ENABLE_MVME16x_NET 103#include <asm/mvme16xhw.h> 104#endif 105#ifdef ENABLE_BVME6000_NET 106#include <asm/bvme6000hw.h> 107#endif 108 109/* 110 * Define various macros for Channel Attention, word swapping etc., dependent 111 * on architecture. MVME and BVME are 680x0 based, otherwise it is Intel. 112 */ 113 114#ifdef __mc68000__ 115#define WSWAPrfd(x) ((struct i596_rfd *) (((u32)(x)<<16) | ((((u32)(x)))>>16))) 116#define WSWAPrbd(x) ((struct i596_rbd *) (((u32)(x)<<16) | ((((u32)(x)))>>16))) 117#define WSWAPiscp(x) ((struct i596_iscp *)(((u32)(x)<<16) | ((((u32)(x)))>>16))) 118#define WSWAPscb(x) ((struct i596_scb *) (((u32)(x)<<16) | ((((u32)(x)))>>16))) 119#define WSWAPcmd(x) ((struct i596_cmd *) (((u32)(x)<<16) | ((((u32)(x)))>>16))) 120#define WSWAPtbd(x) ((struct i596_tbd *) (((u32)(x)<<16) | ((((u32)(x)))>>16))) 121#define WSWAPchar(x) ((char *) (((u32)(x)<<16) | ((((u32)(x)))>>16))) 122#define ISCP_BUSY 0x00010000 123#define MACH_IS_APRICOT 0 124#else 125#define WSWAPrfd(x) ((struct i596_rfd *)((long)x)) 126#define WSWAPrbd(x) ((struct i596_rbd *)((long)x)) 127#define WSWAPiscp(x) ((struct i596_iscp *)((long)x)) 128#define WSWAPscb(x) ((struct i596_scb *)((long)x)) 129#define WSWAPcmd(x) ((struct i596_cmd *)((long)x)) 130#define WSWAPtbd(x) ((struct i596_tbd *)((long)x)) 131#define WSWAPchar(x) ((char *)((long)x)) 132#define ISCP_BUSY 0x0001 133#define MACH_IS_APRICOT 1 134#endif 135 136/* 137 * The MPU_PORT command allows direct access to the 82596. With PORT access 138 * the following commands are available (p5-18). The 32-bit port command 139 * must be word-swapped with the most significant word written first. 140 * This only applies to VME boards. 141 */ 142#define PORT_RESET 0x00 /* reset 82596 */ 143#define PORT_SELFTEST 0x01 /* selftest */ 144#define PORT_ALTSCP 0x02 /* alternate SCB address */ 145#define PORT_ALTDUMP 0x03 /* Alternate DUMP address */ 146 147static int i596_debug = (DEB_SERIOUS|DEB_PROBE); 148 149MODULE_AUTHOR("Richard Hirst"); 150MODULE_DESCRIPTION("i82596 driver"); 151MODULE_LICENSE("GPL"); 152 153module_param(i596_debug, int, 0); 154MODULE_PARM_DESC(i596_debug, "i82596 debug mask"); 155 156 157/* Copy frames shorter than rx_copybreak, otherwise pass on up in 158 * a full sized sk_buff. Value of 100 stolen from tulip.c (!alpha). 159 */ 160static int rx_copybreak = 100; 161 162#define PKT_BUF_SZ 1536 163#define MAX_MC_CNT 64 164 165#define I596_TOTAL_SIZE 17 166 167#define I596_NULL ((void *)0xffffffff) 168 169#define CMD_EOL 0x8000 /* The last command of the list, stop. */ 170#define CMD_SUSP 0x4000 /* Suspend after doing cmd. */ 171#define CMD_INTR 0x2000 /* Interrupt after doing cmd. */ 172 173#define CMD_FLEX 0x0008 /* Enable flexible memory model */ 174 175enum commands { 176 CmdNOp = 0, CmdSASetup = 1, CmdConfigure = 2, CmdMulticastList = 3, 177 CmdTx = 4, CmdTDR = 5, CmdDump = 6, CmdDiagnose = 7 178}; 179 180#define STAT_C 0x8000 /* Set to 0 after execution */ 181#define STAT_B 0x4000 /* Command being executed */ 182#define STAT_OK 0x2000 /* Command executed ok */ 183#define STAT_A 0x1000 /* Command aborted */ 184 185#define CUC_START 0x0100 186#define CUC_RESUME 0x0200 187#define CUC_SUSPEND 0x0300 188#define CUC_ABORT 0x0400 189#define RX_START 0x0010 190#define RX_RESUME 0x0020 191#define RX_SUSPEND 0x0030 192#define RX_ABORT 0x0040 193 194#define TX_TIMEOUT 5 195 196 197struct i596_reg { 198 unsigned short porthi; 199 unsigned short portlo; 200 unsigned long ca; 201}; 202 203#define EOF 0x8000 204#define SIZE_MASK 0x3fff 205 206struct i596_tbd { 207 unsigned short size; 208 unsigned short pad; 209 struct i596_tbd *next; 210 char *data; 211}; 212 213/* The command structure has two 'next' pointers; v_next is the address of 214 * the next command as seen by the CPU, b_next is the address of the next 215 * command as seen by the 82596. The b_next pointer, as used by the 82596 216 * always references the status field of the next command, rather than the 217 * v_next field, because the 82596 is unaware of v_next. It may seem more 218 * logical to put v_next at the end of the structure, but we cannot do that 219 * because the 82596 expects other fields to be there, depending on command 220 * type. 221 */ 222 223struct i596_cmd { 224 struct i596_cmd *v_next; /* Address from CPUs viewpoint */ 225 unsigned short status; 226 unsigned short command; 227 struct i596_cmd *b_next; /* Address from i596 viewpoint */ 228}; 229 230struct tx_cmd { 231 struct i596_cmd cmd; 232 struct i596_tbd *tbd; 233 unsigned short size; 234 unsigned short pad; 235 struct sk_buff *skb; /* So we can free it after tx */ 236}; 237 238struct tdr_cmd { 239 struct i596_cmd cmd; 240 unsigned short status; 241 unsigned short pad; 242}; 243 244struct mc_cmd { 245 struct i596_cmd cmd; 246 short mc_cnt; 247 char mc_addrs[MAX_MC_CNT*6]; 248}; 249 250struct sa_cmd { 251 struct i596_cmd cmd; 252 char eth_addr[8]; 253}; 254 255struct cf_cmd { 256 struct i596_cmd cmd; 257 char i596_config[16]; 258}; 259 260struct i596_rfd { 261 unsigned short stat; 262 unsigned short cmd; 263 struct i596_rfd *b_next; /* Address from i596 viewpoint */ 264 struct i596_rbd *rbd; 265 unsigned short count; 266 unsigned short size; 267 struct i596_rfd *v_next; /* Address from CPUs viewpoint */ 268 struct i596_rfd *v_prev; 269}; 270 271struct i596_rbd { 272 unsigned short count; 273 unsigned short zero1; 274 struct i596_rbd *b_next; 275 unsigned char *b_data; /* Address from i596 viewpoint */ 276 unsigned short size; 277 unsigned short zero2; 278 struct sk_buff *skb; 279 struct i596_rbd *v_next; 280 struct i596_rbd *b_addr; /* This rbd addr from i596 view */ 281 unsigned char *v_data; /* Address from CPUs viewpoint */ 282}; 283 284#define TX_RING_SIZE 64 285#define RX_RING_SIZE 16 286 287struct i596_scb { 288 unsigned short status; 289 unsigned short command; 290 struct i596_cmd *cmd; 291 struct i596_rfd *rfd; 292 unsigned long crc_err; 293 unsigned long align_err; 294 unsigned long resource_err; 295 unsigned long over_err; 296 unsigned long rcvdt_err; 297 unsigned long short_err; 298 unsigned short t_on; 299 unsigned short t_off; 300}; 301 302struct i596_iscp { 303 unsigned long stat; 304 struct i596_scb *scb; 305}; 306 307struct i596_scp { 308 unsigned long sysbus; 309 unsigned long pad; 310 struct i596_iscp *iscp; 311}; 312 313struct i596_private { 314 volatile struct i596_scp scp; 315 volatile struct i596_iscp iscp; 316 volatile struct i596_scb scb; 317 struct sa_cmd sa_cmd; 318 struct cf_cmd cf_cmd; 319 struct tdr_cmd tdr_cmd; 320 struct mc_cmd mc_cmd; 321 unsigned long stat; 322 int last_restart __attribute__((aligned(4))); 323 struct i596_rfd *rfd_head; 324 struct i596_rbd *rbd_head; 325 struct i596_cmd *cmd_tail; 326 struct i596_cmd *cmd_head; 327 int cmd_backlog; 328 unsigned long last_cmd; 329 struct i596_rfd rfds[RX_RING_SIZE]; 330 struct i596_rbd rbds[RX_RING_SIZE]; 331 struct tx_cmd tx_cmds[TX_RING_SIZE]; 332 struct i596_tbd tbds[TX_RING_SIZE]; 333 int next_tx_cmd; 334 spinlock_t lock; 335}; 336 337static char init_setup[] = 338{ 339 0x8E, /* length, prefetch on */ 340 0xC8, /* fifo to 8, monitor off */ 341#ifdef CONFIG_VME 342 0xc0, /* don't save bad frames */ 343#else 344 0x80, /* don't save bad frames */ 345#endif 346 0x2E, /* No source address insertion, 8 byte preamble */ 347 0x00, /* priority and backoff defaults */ 348 0x60, /* interframe spacing */ 349 0x00, /* slot time LSB */ 350 0xf2, /* slot time and retries */ 351 0x00, /* promiscuous mode */ 352 0x00, /* collision detect */ 353 0x40, /* minimum frame length */ 354 0xff, 355 0x00, 356 0x7f /* *multi IA */ }; 357 358static int i596_open(struct net_device *dev); 359static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev); 360static irqreturn_t i596_interrupt(int irq, void *dev_id); 361static int i596_close(struct net_device *dev); 362static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd); 363static void i596_tx_timeout (struct net_device *dev); 364static void print_eth(unsigned char *buf, char *str); 365static void set_multicast_list(struct net_device *dev); 366 367static int rx_ring_size = RX_RING_SIZE; 368static int ticks_limit = 25; 369static int max_cmd_backlog = TX_RING_SIZE-1; 370 371 372static inline void CA(struct net_device *dev) 373{ 374#ifdef ENABLE_MVME16x_NET 375 if (MACH_IS_MVME16x) { 376 ((struct i596_reg *) dev->base_addr)->ca = 1; 377 } 378#endif 379#ifdef ENABLE_BVME6000_NET 380 if (MACH_IS_BVME6000) { 381 volatile u32 i; 382 383 i = *(volatile u32 *) (dev->base_addr); 384 } 385#endif 386#ifdef ENABLE_APRICOT 387 if (MACH_IS_APRICOT) { 388 outw(0, (short) (dev->base_addr) + 4); 389 } 390#endif 391} 392 393 394static inline void MPU_PORT(struct net_device *dev, int c, volatile void *x) 395{ 396#ifdef ENABLE_MVME16x_NET 397 if (MACH_IS_MVME16x) { 398 struct i596_reg *p = (struct i596_reg *) (dev->base_addr); 399 p->porthi = ((c) | (u32) (x)) & 0xffff; 400 p->portlo = ((c) | (u32) (x)) >> 16; 401 } 402#endif 403#ifdef ENABLE_BVME6000_NET 404 if (MACH_IS_BVME6000) { 405 u32 v = (u32) (c) | (u32) (x); 406 v = ((u32) (v) << 16) | ((u32) (v) >> 16); 407 *(volatile u32 *) dev->base_addr = v; 408 udelay(1); 409 *(volatile u32 *) dev->base_addr = v; 410 } 411#endif 412} 413 414 415static inline int wait_istat(struct net_device *dev, struct i596_private *lp, int delcnt, char *str) 416{ 417 while (--delcnt && lp->iscp.stat) 418 udelay(10); 419 if (!delcnt) { 420 printk(KERN_ERR "%s: %s, status %4.4x, cmd %4.4x.\n", 421 dev->name, str, lp->scb.status, lp->scb.command); 422 return -1; 423 } 424 else 425 return 0; 426} 427 428 429static inline int wait_cmd(struct net_device *dev, struct i596_private *lp, int delcnt, char *str) 430{ 431 while (--delcnt && lp->scb.command) 432 udelay(10); 433 if (!delcnt) { 434 printk(KERN_ERR "%s: %s, status %4.4x, cmd %4.4x.\n", 435 dev->name, str, lp->scb.status, lp->scb.command); 436 return -1; 437 } 438 else 439 return 0; 440} 441 442 443static inline int wait_cfg(struct net_device *dev, struct i596_cmd *cmd, int delcnt, char *str) 444{ 445 volatile struct i596_cmd *c = cmd; 446 447 while (--delcnt && c->command) 448 udelay(10); 449 if (!delcnt) { 450 printk(KERN_ERR "%s: %s.\n", dev->name, str); 451 return -1; 452 } 453 else 454 return 0; 455} 456 457 458static void i596_display_data(struct net_device *dev) 459{ 460 struct i596_private *lp = dev->ml_priv; 461 struct i596_cmd *cmd; 462 struct i596_rfd *rfd; 463 struct i596_rbd *rbd; 464 465 printk(KERN_ERR "lp and scp at %p, .sysbus = %08lx, .iscp = %p\n", 466 &lp->scp, lp->scp.sysbus, lp->scp.iscp); 467 printk(KERN_ERR "iscp at %p, iscp.stat = %08lx, .scb = %p\n", 468 &lp->iscp, lp->iscp.stat, lp->iscp.scb); 469 printk(KERN_ERR "scb at %p, scb.status = %04x, .command = %04x," 470 " .cmd = %p, .rfd = %p\n", 471 &lp->scb, lp->scb.status, lp->scb.command, 472 lp->scb.cmd, lp->scb.rfd); 473 printk(KERN_ERR " errors: crc %lx, align %lx, resource %lx," 474 " over %lx, rcvdt %lx, short %lx\n", 475 lp->scb.crc_err, lp->scb.align_err, lp->scb.resource_err, 476 lp->scb.over_err, lp->scb.rcvdt_err, lp->scb.short_err); 477 cmd = lp->cmd_head; 478 while (cmd != I596_NULL) { 479 printk(KERN_ERR "cmd at %p, .status = %04x, .command = %04x, .b_next = %p\n", 480 cmd, cmd->status, cmd->command, cmd->b_next); 481 cmd = cmd->v_next; 482 } 483 rfd = lp->rfd_head; 484 printk(KERN_ERR "rfd_head = %p\n", rfd); 485 do { 486 printk(KERN_ERR " %p .stat %04x, .cmd %04x, b_next %p, rbd %p," 487 " count %04x\n", 488 rfd, rfd->stat, rfd->cmd, rfd->b_next, rfd->rbd, 489 rfd->count); 490 rfd = rfd->v_next; 491 } while (rfd != lp->rfd_head); 492 rbd = lp->rbd_head; 493 printk(KERN_ERR "rbd_head = %p\n", rbd); 494 do { 495 printk(KERN_ERR " %p .count %04x, b_next %p, b_data %p, size %04x\n", 496 rbd, rbd->count, rbd->b_next, rbd->b_data, rbd->size); 497 rbd = rbd->v_next; 498 } while (rbd != lp->rbd_head); 499} 500 501 502#if defined(ENABLE_MVME16x_NET) || defined(ENABLE_BVME6000_NET) 503static irqreturn_t i596_error(int irq, void *dev_id) 504{ 505 struct net_device *dev = dev_id; 506#ifdef ENABLE_MVME16x_NET 507 if (MACH_IS_MVME16x) { 508 volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000; 509 510 pcc2[0x28] = 1; 511 pcc2[0x2b] = 0x1d; 512 } 513#endif 514#ifdef ENABLE_BVME6000_NET 515 if (MACH_IS_BVME6000) { 516 volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG; 517 518 *ethirq = 1; 519 *ethirq = 3; 520 } 521#endif 522 printk(KERN_ERR "%s: Error interrupt\n", dev->name); 523 i596_display_data(dev); 524 return IRQ_HANDLED; 525} 526#endif 527 528static inline void init_rx_bufs(struct net_device *dev) 529{ 530 struct i596_private *lp = dev->ml_priv; 531 int i; 532 struct i596_rfd *rfd; 533 struct i596_rbd *rbd; 534 535 /* First build the Receive Buffer Descriptor List */ 536 537 for (i = 0, rbd = lp->rbds; i < rx_ring_size; i++, rbd++) { 538 struct sk_buff *skb = dev_alloc_skb(PKT_BUF_SZ); 539 540 if (skb == NULL) 541 panic("82596: alloc_skb() failed"); 542 skb->dev = dev; 543 rbd->v_next = rbd+1; 544 rbd->b_next = WSWAPrbd(virt_to_bus(rbd+1)); 545 rbd->b_addr = WSWAPrbd(virt_to_bus(rbd)); 546 rbd->skb = skb; 547 rbd->v_data = skb->data; 548 rbd->b_data = WSWAPchar(virt_to_bus(skb->data)); 549 rbd->size = PKT_BUF_SZ; 550#ifdef __mc68000__ 551 cache_clear(virt_to_phys(skb->data), PKT_BUF_SZ); 552#endif 553 } 554 lp->rbd_head = lp->rbds; 555 rbd = lp->rbds + rx_ring_size - 1; 556 rbd->v_next = lp->rbds; 557 rbd->b_next = WSWAPrbd(virt_to_bus(lp->rbds)); 558 559 /* Now build the Receive Frame Descriptor List */ 560 561 for (i = 0, rfd = lp->rfds; i < rx_ring_size; i++, rfd++) { 562 rfd->rbd = I596_NULL; 563 rfd->v_next = rfd+1; 564 rfd->v_prev = rfd-1; 565 rfd->b_next = WSWAPrfd(virt_to_bus(rfd+1)); 566 rfd->cmd = CMD_FLEX; 567 } 568 lp->rfd_head = lp->rfds; 569 lp->scb.rfd = WSWAPrfd(virt_to_bus(lp->rfds)); 570 rfd = lp->rfds; 571 rfd->rbd = lp->rbd_head; 572 rfd->v_prev = lp->rfds + rx_ring_size - 1; 573 rfd = lp->rfds + rx_ring_size - 1; 574 rfd->v_next = lp->rfds; 575 rfd->b_next = WSWAPrfd(virt_to_bus(lp->rfds)); 576 rfd->cmd = CMD_EOL|CMD_FLEX; 577} 578 579static inline void remove_rx_bufs(struct net_device *dev) 580{ 581 struct i596_private *lp = dev->ml_priv; 582 struct i596_rbd *rbd; 583 int i; 584 585 for (i = 0, rbd = lp->rbds; i < rx_ring_size; i++, rbd++) { 586 if (rbd->skb == NULL) 587 break; 588 dev_kfree_skb(rbd->skb); 589 } 590} 591 592 593static void rebuild_rx_bufs(struct net_device *dev) 594{ 595 struct i596_private *lp = dev->ml_priv; 596 int i; 597 598 /* Ensure rx frame/buffer descriptors are tidy */ 599 600 for (i = 0; i < rx_ring_size; i++) { 601 lp->rfds[i].rbd = I596_NULL; 602 lp->rfds[i].cmd = CMD_FLEX; 603 } 604 lp->rfds[rx_ring_size-1].cmd = CMD_EOL|CMD_FLEX; 605 lp->rfd_head = lp->rfds; 606 lp->scb.rfd = WSWAPrfd(virt_to_bus(lp->rfds)); 607 lp->rbd_head = lp->rbds; 608 lp->rfds[0].rbd = WSWAPrbd(virt_to_bus(lp->rbds)); 609} 610 611 612static int init_i596_mem(struct net_device *dev) 613{ 614 struct i596_private *lp = dev->ml_priv; 615#if !defined(ENABLE_MVME16x_NET) && !defined(ENABLE_BVME6000_NET) || defined(ENABLE_APRICOT) 616 short ioaddr = dev->base_addr; 617#endif 618 unsigned long flags; 619 620 MPU_PORT(dev, PORT_RESET, NULL); 621 622 udelay(100); /* Wait 100us - seems to help */ 623 624#if defined(ENABLE_MVME16x_NET) || defined(ENABLE_BVME6000_NET) 625#ifdef ENABLE_MVME16x_NET 626 if (MACH_IS_MVME16x) { 627 volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000; 628 629 /* Disable all ints for now */ 630 pcc2[0x28] = 1; 631 pcc2[0x2a] = 0x48; 632 /* Following disables snooping. Snooping is not required 633 * as we make appropriate use of non-cached pages for 634 * shared data, and cache_push/cache_clear. 635 */ 636 pcc2[0x2b] = 0x08; 637 } 638#endif 639#ifdef ENABLE_BVME6000_NET 640 if (MACH_IS_BVME6000) { 641 volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG; 642 643 *ethirq = 1; 644 } 645#endif 646 647 /* change the scp address */ 648 649 MPU_PORT(dev, PORT_ALTSCP, (void *)virt_to_bus((void *)&lp->scp)); 650 651#elif defined(ENABLE_APRICOT) 652 653 { 654 u32 scp = virt_to_bus(&lp->scp); 655 656 /* change the scp address */ 657 outw(0, ioaddr); 658 outw(0, ioaddr); 659 outb(4, ioaddr + 0xf); 660 outw(scp | 2, ioaddr); 661 outw(scp >> 16, ioaddr); 662 } 663#endif 664 665 lp->last_cmd = jiffies; 666 667#ifdef ENABLE_MVME16x_NET 668 if (MACH_IS_MVME16x) 669 lp->scp.sysbus = 0x00000054; 670#endif 671#ifdef ENABLE_BVME6000_NET 672 if (MACH_IS_BVME6000) 673 lp->scp.sysbus = 0x0000004c; 674#endif 675#ifdef ENABLE_APRICOT 676 if (MACH_IS_APRICOT) 677 lp->scp.sysbus = 0x00440000; 678#endif 679 680 lp->scp.iscp = WSWAPiscp(virt_to_bus((void *)&lp->iscp)); 681 lp->iscp.scb = WSWAPscb(virt_to_bus((void *)&lp->scb)); 682 lp->iscp.stat = ISCP_BUSY; 683 lp->cmd_backlog = 0; 684 685 lp->cmd_head = lp->scb.cmd = I596_NULL; 686 687#ifdef ENABLE_BVME6000_NET 688 if (MACH_IS_BVME6000) { 689 lp->scb.t_on = 7 * 25; 690 lp->scb.t_off = 1 * 25; 691 } 692#endif 693 694 DEB(DEB_INIT,printk(KERN_DEBUG "%s: starting i82596.\n", dev->name)); 695 696#if defined(ENABLE_APRICOT) 697 (void) inb(ioaddr + 0x10); 698 outb(4, ioaddr + 0xf); 699#endif 700 CA(dev); 701 702 if (wait_istat(dev,lp,1000,"initialization timed out")) 703 goto failed; 704 DEB(DEB_INIT,printk(KERN_DEBUG "%s: i82596 initialization successful\n", dev->name)); 705 706 /* Ensure rx frame/buffer descriptors are tidy */ 707 rebuild_rx_bufs(dev); 708 lp->scb.command = 0; 709 710#ifdef ENABLE_MVME16x_NET 711 if (MACH_IS_MVME16x) { 712 volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000; 713 714 /* Enable ints, etc. now */ 715 pcc2[0x2a] = 0x55; /* Edge sensitive */ 716 pcc2[0x2b] = 0x15; 717 } 718#endif 719#ifdef ENABLE_BVME6000_NET 720 if (MACH_IS_BVME6000) { 721 volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG; 722 723 *ethirq = 3; 724 } 725#endif 726 727 728 DEB(DEB_INIT,printk(KERN_DEBUG "%s: queuing CmdConfigure\n", dev->name)); 729 memcpy(lp->cf_cmd.i596_config, init_setup, 14); 730 lp->cf_cmd.cmd.command = CmdConfigure; 731 i596_add_cmd(dev, &lp->cf_cmd.cmd); 732 733 DEB(DEB_INIT,printk(KERN_DEBUG "%s: queuing CmdSASetup\n", dev->name)); 734 memcpy(lp->sa_cmd.eth_addr, dev->dev_addr, 6); 735 lp->sa_cmd.cmd.command = CmdSASetup; 736 i596_add_cmd(dev, &lp->sa_cmd.cmd); 737 738 DEB(DEB_INIT,printk(KERN_DEBUG "%s: queuing CmdTDR\n", dev->name)); 739 lp->tdr_cmd.cmd.command = CmdTDR; 740 i596_add_cmd(dev, &lp->tdr_cmd.cmd); 741 742 spin_lock_irqsave (&lp->lock, flags); 743 744 if (wait_cmd(dev,lp,1000,"timed out waiting to issue RX_START")) { 745 spin_unlock_irqrestore (&lp->lock, flags); 746 goto failed; 747 } 748 DEB(DEB_INIT,printk(KERN_DEBUG "%s: Issuing RX_START\n", dev->name)); 749 lp->scb.command = RX_START; 750 CA(dev); 751 752 spin_unlock_irqrestore (&lp->lock, flags); 753 754 if (wait_cmd(dev,lp,1000,"RX_START not processed")) 755 goto failed; 756 DEB(DEB_INIT,printk(KERN_DEBUG "%s: Receive unit started OK\n", dev->name)); 757 return 0; 758 759failed: 760 printk(KERN_CRIT "%s: Failed to initialise 82596\n", dev->name); 761 MPU_PORT(dev, PORT_RESET, NULL); 762 return -1; 763} 764 765static inline int i596_rx(struct net_device *dev) 766{ 767 struct i596_private *lp = dev->ml_priv; 768 struct i596_rfd *rfd; 769 struct i596_rbd *rbd; 770 int frames = 0; 771 772 DEB(DEB_RXFRAME,printk(KERN_DEBUG "i596_rx(), rfd_head %p, rbd_head %p\n", 773 lp->rfd_head, lp->rbd_head)); 774 775 rfd = lp->rfd_head; /* Ref next frame to check */ 776 777 while ((rfd->stat) & STAT_C) { /* Loop while complete frames */ 778 if (rfd->rbd == I596_NULL) 779 rbd = I596_NULL; 780 else if (rfd->rbd == lp->rbd_head->b_addr) 781 rbd = lp->rbd_head; 782 else { 783 printk(KERN_CRIT "%s: rbd chain broken!\n", dev->name); 784 /* XXX Now what? */ 785 rbd = I596_NULL; 786 } 787 DEB(DEB_RXFRAME, printk(KERN_DEBUG " rfd %p, rfd.rbd %p, rfd.stat %04x\n", 788 rfd, rfd->rbd, rfd->stat)); 789 790 if (rbd != I596_NULL && ((rfd->stat) & STAT_OK)) { 791 /* a good frame */ 792 int pkt_len = rbd->count & 0x3fff; 793 struct sk_buff *skb = rbd->skb; 794 int rx_in_place = 0; 795 796 DEB(DEB_RXADDR,print_eth(rbd->v_data, "received")); 797 frames++; 798 799 /* Check if the packet is long enough to just accept 800 * without copying to a properly sized skbuff. 801 */ 802 803 if (pkt_len > rx_copybreak) { 804 struct sk_buff *newskb; 805 806 /* Get fresh skbuff to replace filled one. */ 807 newskb = dev_alloc_skb(PKT_BUF_SZ); 808 if (newskb == NULL) { 809 skb = NULL; /* drop pkt */ 810 goto memory_squeeze; 811 } 812 /* Pass up the skb already on the Rx ring. */ 813 skb_put(skb, pkt_len); 814 rx_in_place = 1; 815 rbd->skb = newskb; 816 newskb->dev = dev; 817 rbd->v_data = newskb->data; 818 rbd->b_data = WSWAPchar(virt_to_bus(newskb->data)); 819#ifdef __mc68000__ 820 cache_clear(virt_to_phys(newskb->data), PKT_BUF_SZ); 821#endif 822 } 823 else 824 skb = dev_alloc_skb(pkt_len + 2); 825memory_squeeze: 826 if (skb == NULL) { 827 /* XXX tulip.c can defer packets here!! */ 828 printk(KERN_WARNING "%s: i596_rx Memory squeeze, dropping packet.\n", dev->name); 829 dev->stats.rx_dropped++; 830 } 831 else { 832 if (!rx_in_place) { 833 /* 16 byte align the data fields */ 834 skb_reserve(skb, 2); 835 memcpy(skb_put(skb,pkt_len), rbd->v_data, pkt_len); 836 } 837 skb->protocol=eth_type_trans(skb,dev); 838 skb->len = pkt_len; 839#ifdef __mc68000__ 840 cache_clear(virt_to_phys(rbd->skb->data), 841 pkt_len); 842#endif 843 netif_rx(skb); 844 dev->stats.rx_packets++; 845 dev->stats.rx_bytes+=pkt_len; 846 } 847 } 848 else { 849 DEB(DEB_ERRORS, printk(KERN_DEBUG "%s: Error, rfd.stat = 0x%04x\n", 850 dev->name, rfd->stat)); 851 dev->stats.rx_errors++; 852 if ((rfd->stat) & 0x0001) 853 dev->stats.collisions++; 854 if ((rfd->stat) & 0x0080) 855 dev->stats.rx_length_errors++; 856 if ((rfd->stat) & 0x0100) 857 dev->stats.rx_over_errors++; 858 if ((rfd->stat) & 0x0200) 859 dev->stats.rx_fifo_errors++; 860 if ((rfd->stat) & 0x0400) 861 dev->stats.rx_frame_errors++; 862 if ((rfd->stat) & 0x0800) 863 dev->stats.rx_crc_errors++; 864 if ((rfd->stat) & 0x1000) 865 dev->stats.rx_length_errors++; 866 } 867 868 /* Clear the buffer descriptor count and EOF + F flags */ 869 870 if (rbd != I596_NULL && (rbd->count & 0x4000)) { 871 rbd->count = 0; 872 lp->rbd_head = rbd->v_next; 873 } 874 875 /* Tidy the frame descriptor, marking it as end of list */ 876 877 rfd->rbd = I596_NULL; 878 rfd->stat = 0; 879 rfd->cmd = CMD_EOL|CMD_FLEX; 880 rfd->count = 0; 881 882 /* Remove end-of-list from old end descriptor */ 883 884 rfd->v_prev->cmd = CMD_FLEX; 885 886 /* Update record of next frame descriptor to process */ 887 888 lp->scb.rfd = rfd->b_next; 889 lp->rfd_head = rfd->v_next; 890 rfd = lp->rfd_head; 891 } 892 893 DEB(DEB_RXFRAME,printk(KERN_DEBUG "frames %d\n", frames)); 894 895 return 0; 896} 897 898 899static void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp) 900{ 901 struct i596_cmd *ptr; 902 903 while (lp->cmd_head != I596_NULL) { 904 ptr = lp->cmd_head; 905 lp->cmd_head = ptr->v_next; 906 lp->cmd_backlog--; 907 908 switch ((ptr->command) & 0x7) { 909 case CmdTx: 910 { 911 struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr; 912 struct sk_buff *skb = tx_cmd->skb; 913 914 dev_kfree_skb(skb); 915 916 dev->stats.tx_errors++; 917 dev->stats.tx_aborted_errors++; 918 919 ptr->v_next = ptr->b_next = I596_NULL; 920 tx_cmd->cmd.command = 0; /* Mark as free */ 921 break; 922 } 923 default: 924 ptr->v_next = ptr->b_next = I596_NULL; 925 } 926 } 927 928 wait_cmd(dev,lp,100,"i596_cleanup_cmd timed out"); 929 lp->scb.cmd = I596_NULL; 930} 931 932static void i596_reset(struct net_device *dev, struct i596_private *lp, 933 int ioaddr) 934{ 935 unsigned long flags; 936 937 DEB(DEB_RESET,printk(KERN_DEBUG "i596_reset\n")); 938 939 spin_lock_irqsave (&lp->lock, flags); 940 941 wait_cmd(dev,lp,100,"i596_reset timed out"); 942 943 netif_stop_queue(dev); 944 945 lp->scb.command = CUC_ABORT | RX_ABORT; 946 CA(dev); 947 948 /* wait for shutdown */ 949 wait_cmd(dev,lp,1000,"i596_reset 2 timed out"); 950 spin_unlock_irqrestore (&lp->lock, flags); 951 952 i596_cleanup_cmd(dev,lp); 953 i596_rx(dev); 954 955 netif_start_queue(dev); 956 init_i596_mem(dev); 957} 958 959static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd) 960{ 961 struct i596_private *lp = dev->ml_priv; 962 int ioaddr = dev->base_addr; 963 unsigned long flags; 964 965 DEB(DEB_ADDCMD,printk(KERN_DEBUG "i596_add_cmd\n")); 966 967 cmd->status = 0; 968 cmd->command |= (CMD_EOL | CMD_INTR); 969 cmd->v_next = cmd->b_next = I596_NULL; 970 971 spin_lock_irqsave (&lp->lock, flags); 972 973 if (lp->cmd_head != I596_NULL) { 974 lp->cmd_tail->v_next = cmd; 975 lp->cmd_tail->b_next = WSWAPcmd(virt_to_bus(&cmd->status)); 976 } else { 977 lp->cmd_head = cmd; 978 wait_cmd(dev,lp,100,"i596_add_cmd timed out"); 979 lp->scb.cmd = WSWAPcmd(virt_to_bus(&cmd->status)); 980 lp->scb.command = CUC_START; 981 CA(dev); 982 } 983 lp->cmd_tail = cmd; 984 lp->cmd_backlog++; 985 986 spin_unlock_irqrestore (&lp->lock, flags); 987 988 if (lp->cmd_backlog > max_cmd_backlog) { 989 unsigned long tickssofar = jiffies - lp->last_cmd; 990 991 if (tickssofar < ticks_limit) 992 return; 993 994 printk(KERN_NOTICE "%s: command unit timed out, status resetting.\n", dev->name); 995 996 i596_reset(dev, lp, ioaddr); 997 } 998} 999 1000static int i596_open(struct net_device *dev) 1001{ 1002 int res = 0; 1003 1004 DEB(DEB_OPEN,printk(KERN_DEBUG "%s: i596_open() irq %d.\n", dev->name, dev->irq)); 1005 1006 if (request_irq(dev->irq, i596_interrupt, 0, "i82596", dev)) { 1007 printk(KERN_ERR "%s: IRQ %d not free\n", dev->name, dev->irq); 1008 return -EAGAIN; 1009 } 1010#ifdef ENABLE_MVME16x_NET 1011 if (MACH_IS_MVME16x) { 1012 if (request_irq(0x56, i596_error, 0, "i82596_error", dev)) 1013 return -EAGAIN; 1014 } 1015#endif 1016 init_rx_bufs(dev); 1017 1018 netif_start_queue(dev); 1019 1020 /* Initialize the 82596 memory */ 1021 if (init_i596_mem(dev)) { 1022 res = -EAGAIN; 1023 free_irq(dev->irq, dev); 1024 } 1025 1026 return res; 1027} 1028 1029static void i596_tx_timeout (struct net_device *dev) 1030{ 1031 struct i596_private *lp = dev->ml_priv; 1032 int ioaddr = dev->base_addr; 1033 1034 /* Transmitter timeout, serious problems. */ 1035 DEB(DEB_ERRORS,printk(KERN_ERR "%s: transmit timed out, status resetting.\n", 1036 dev->name)); 1037 1038 dev->stats.tx_errors++; 1039 1040 /* Try to restart the adaptor */ 1041 if (lp->last_restart == dev->stats.tx_packets) { 1042 DEB(DEB_ERRORS,printk(KERN_ERR "Resetting board.\n")); 1043 /* Shutdown and restart */ 1044 i596_reset (dev, lp, ioaddr); 1045 } else { 1046 /* Issue a channel attention signal */ 1047 DEB(DEB_ERRORS,printk(KERN_ERR "Kicking board.\n")); 1048 lp->scb.command = CUC_START | RX_START; 1049 CA (dev); 1050 lp->last_restart = dev->stats.tx_packets; 1051 } 1052 1053 dev->trans_start = jiffies; /* prevent tx timeout */ 1054 netif_wake_queue (dev); 1055} 1056 1057static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev) 1058{ 1059 struct i596_private *lp = dev->ml_priv; 1060 struct tx_cmd *tx_cmd; 1061 struct i596_tbd *tbd; 1062 short length = skb->len; 1063 1064 DEB(DEB_STARTTX,printk(KERN_DEBUG "%s: i596_start_xmit(%x,%p) called\n", 1065 dev->name, skb->len, skb->data)); 1066 1067 if (skb->len < ETH_ZLEN) { 1068 if (skb_padto(skb, ETH_ZLEN)) 1069 return NETDEV_TX_OK; 1070 length = ETH_ZLEN; 1071 } 1072 netif_stop_queue(dev); 1073 1074 tx_cmd = lp->tx_cmds + lp->next_tx_cmd; 1075 tbd = lp->tbds + lp->next_tx_cmd; 1076 1077 if (tx_cmd->cmd.command) { 1078 printk(KERN_NOTICE "%s: xmit ring full, dropping packet.\n", 1079 dev->name); 1080 dev->stats.tx_dropped++; 1081 1082 dev_kfree_skb(skb); 1083 } else { 1084 if (++lp->next_tx_cmd == TX_RING_SIZE) 1085 lp->next_tx_cmd = 0; 1086 tx_cmd->tbd = WSWAPtbd(virt_to_bus(tbd)); 1087 tbd->next = I596_NULL; 1088 1089 tx_cmd->cmd.command = CMD_FLEX | CmdTx; 1090 tx_cmd->skb = skb; 1091 1092 tx_cmd->pad = 0; 1093 tx_cmd->size = 0; 1094 tbd->pad = 0; 1095 tbd->size = EOF | length; 1096 1097 tbd->data = WSWAPchar(virt_to_bus(skb->data)); 1098 1099#ifdef __mc68000__ 1100 cache_push(virt_to_phys(skb->data), length); 1101#endif 1102 DEB(DEB_TXADDR,print_eth(skb->data, "tx-queued")); 1103 i596_add_cmd(dev, &tx_cmd->cmd); 1104 1105 dev->stats.tx_packets++; 1106 dev->stats.tx_bytes += length; 1107 } 1108 1109 netif_start_queue(dev); 1110 1111 return NETDEV_TX_OK; 1112} 1113 1114static void print_eth(unsigned char *add, char *str) 1115{ 1116 printk(KERN_DEBUG "i596 0x%p, %pM --> %pM %02X%02X, %s\n", 1117 add, add + 6, add, add[12], add[13], str); 1118} 1119 1120static int io = 0x300; 1121static int irq = 10; 1122 1123static const struct net_device_ops i596_netdev_ops = { 1124 .ndo_open = i596_open, 1125 .ndo_stop = i596_close, 1126 .ndo_start_xmit = i596_start_xmit, 1127 .ndo_set_multicast_list = set_multicast_list, 1128 .ndo_tx_timeout = i596_tx_timeout, 1129 .ndo_change_mtu = eth_change_mtu, 1130 .ndo_set_mac_address = eth_mac_addr, 1131 .ndo_validate_addr = eth_validate_addr, 1132}; 1133 1134struct net_device * __init i82596_probe(int unit) 1135{ 1136 struct net_device *dev; 1137 int i; 1138 struct i596_private *lp; 1139 char eth_addr[8]; 1140 static int probed; 1141 int err; 1142 1143 if (probed) 1144 return ERR_PTR(-ENODEV); 1145 probed++; 1146 1147 dev = alloc_etherdev(0); 1148 if (!dev) 1149 return ERR_PTR(-ENOMEM); 1150 1151 if (unit >= 0) { 1152 sprintf(dev->name, "eth%d", unit); 1153 netdev_boot_setup_check(dev); 1154 } else { 1155 dev->base_addr = io; 1156 dev->irq = irq; 1157 } 1158 1159#ifdef ENABLE_MVME16x_NET 1160 if (MACH_IS_MVME16x) { 1161 if (mvme16x_config & MVME16x_CONFIG_NO_ETHERNET) { 1162 printk(KERN_NOTICE "Ethernet probe disabled - chip not present\n"); 1163 err = -ENODEV; 1164 goto out; 1165 } 1166 memcpy(eth_addr, (void *) 0xfffc1f2c, 6); /* YUCK! Get addr from NOVRAM */ 1167 dev->base_addr = MVME_I596_BASE; 1168 dev->irq = (unsigned) MVME16x_IRQ_I596; 1169 goto found; 1170 } 1171#endif 1172#ifdef ENABLE_BVME6000_NET 1173 if (MACH_IS_BVME6000) { 1174 volatile unsigned char *rtc = (unsigned char *) BVME_RTC_BASE; 1175 unsigned char msr = rtc[3]; 1176 int i; 1177 1178 rtc[3] |= 0x80; 1179 for (i = 0; i < 6; i++) 1180 eth_addr[i] = rtc[i * 4 + 7]; /* Stored in RTC RAM at offset 1 */ 1181 rtc[3] = msr; 1182 dev->base_addr = BVME_I596_BASE; 1183 dev->irq = (unsigned) BVME_IRQ_I596; 1184 goto found; 1185 } 1186#endif 1187#ifdef ENABLE_APRICOT 1188 { 1189 int checksum = 0; 1190 int ioaddr = 0x300; 1191 1192 /* this is easy the ethernet interface can only be at 0x300 */ 1193 /* first check nothing is already registered here */ 1194 1195 if (!request_region(ioaddr, I596_TOTAL_SIZE, DRV_NAME)) { 1196 printk(KERN_ERR "82596: IO address 0x%04x in use\n", ioaddr); 1197 err = -EBUSY; 1198 goto out; 1199 } 1200 1201 dev->base_addr = ioaddr; 1202 1203 for (i = 0; i < 8; i++) { 1204 eth_addr[i] = inb(ioaddr + 8 + i); 1205 checksum += eth_addr[i]; 1206 } 1207 1208 /* checksum is a multiple of 0x100, got this wrong first time 1209 some machines have 0x100, some 0x200. The DOS driver doesn't 1210 even bother with the checksum. 1211 Some other boards trip the checksum.. but then appear as 1212 ether address 0. Trap these - AC */ 1213 1214 if ((checksum % 0x100) || 1215 (memcmp(eth_addr, "\x00\x00\x49", 3) != 0)) { 1216 err = -ENODEV; 1217 goto out1; 1218 } 1219 1220 dev->irq = 10; 1221 goto found; 1222 } 1223#endif 1224 err = -ENODEV; 1225 goto out; 1226 1227found: 1228 dev->mem_start = (int)__get_free_pages(GFP_ATOMIC, 0); 1229 if (!dev->mem_start) { 1230 err = -ENOMEM; 1231 goto out1; 1232 } 1233 1234 DEB(DEB_PROBE,printk(KERN_INFO "%s: 82596 at %#3lx,", dev->name, dev->base_addr)); 1235 1236 for (i = 0; i < 6; i++) 1237 DEB(DEB_PROBE,printk(" %2.2X", dev->dev_addr[i] = eth_addr[i])); 1238 1239 DEB(DEB_PROBE,printk(" IRQ %d.\n", dev->irq)); 1240 1241 DEB(DEB_PROBE,printk(KERN_INFO "%s", version)); 1242 1243 /* The 82596-specific entries in the device structure. */ 1244 dev->netdev_ops = &i596_netdev_ops; 1245 dev->watchdog_timeo = TX_TIMEOUT; 1246 1247 dev->ml_priv = (void *)(dev->mem_start); 1248 1249 lp = dev->ml_priv; 1250 DEB(DEB_INIT,printk(KERN_DEBUG "%s: lp at 0x%08lx (%zd bytes), " 1251 "lp->scb at 0x%08lx\n", 1252 dev->name, (unsigned long)lp, 1253 sizeof(struct i596_private), (unsigned long)&lp->scb)); 1254 memset((void *) lp, 0, sizeof(struct i596_private)); 1255 1256#ifdef __mc68000__ 1257 cache_push(virt_to_phys((void *)(dev->mem_start)), 4096); 1258 cache_clear(virt_to_phys((void *)(dev->mem_start)), 4096); 1259 kernel_set_cachemode((void *)(dev->mem_start), 4096, IOMAP_NOCACHE_SER); 1260#endif 1261 lp->scb.command = 0; 1262 lp->scb.cmd = I596_NULL; 1263 lp->scb.rfd = I596_NULL; 1264 spin_lock_init(&lp->lock); 1265 1266 err = register_netdev(dev); 1267 if (err) 1268 goto out2; 1269 return dev; 1270out2: 1271#ifdef __mc68000__ 1272 /* XXX This assumes default cache mode to be IOMAP_FULL_CACHING, 1273 * XXX which may be invalid (CONFIG_060_WRITETHROUGH) 1274 */ 1275 kernel_set_cachemode((void *)(dev->mem_start), 4096, 1276 IOMAP_FULL_CACHING); 1277#endif 1278 free_page ((u32)(dev->mem_start)); 1279out1: 1280#ifdef ENABLE_APRICOT 1281 release_region(dev->base_addr, I596_TOTAL_SIZE); 1282#endif 1283out: 1284 free_netdev(dev); 1285 return ERR_PTR(err); 1286} 1287 1288static irqreturn_t i596_interrupt(int irq, void *dev_id) 1289{ 1290 struct net_device *dev = dev_id; 1291 struct i596_private *lp; 1292 short ioaddr; 1293 unsigned short status, ack_cmd = 0; 1294 int handled = 0; 1295 1296#ifdef ENABLE_BVME6000_NET 1297 if (MACH_IS_BVME6000) { 1298 if (*(char *) BVME_LOCAL_IRQ_STAT & BVME_ETHERR) { 1299 i596_error(irq, dev_id); 1300 return IRQ_HANDLED; 1301 } 1302 } 1303#endif 1304 if (dev == NULL) { 1305 printk(KERN_ERR "i596_interrupt(): irq %d for unknown device.\n", irq); 1306 return IRQ_NONE; 1307 } 1308 1309 ioaddr = dev->base_addr; 1310 lp = dev->ml_priv; 1311 1312 spin_lock (&lp->lock); 1313 1314 wait_cmd(dev,lp,100,"i596 interrupt, timeout"); 1315 status = lp->scb.status; 1316 1317 DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt, IRQ %d, status %4.4x.\n", 1318 dev->name, irq, status)); 1319 1320 ack_cmd = status & 0xf000; 1321 1322 if ((status & 0x8000) || (status & 0x2000)) { 1323 struct i596_cmd *ptr; 1324 1325 handled = 1; 1326 if ((status & 0x8000)) 1327 DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt completed command.\n", dev->name)); 1328 if ((status & 0x2000)) 1329 DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt command unit inactive %x.\n", dev->name, status & 0x0700)); 1330 1331 while ((lp->cmd_head != I596_NULL) && (lp->cmd_head->status & STAT_C)) { 1332 ptr = lp->cmd_head; 1333 1334 DEB(DEB_STATUS,printk(KERN_DEBUG "cmd_head->status = %04x, ->command = %04x\n", 1335 lp->cmd_head->status, lp->cmd_head->command)); 1336 lp->cmd_head = ptr->v_next; 1337 lp->cmd_backlog--; 1338 1339 switch ((ptr->command) & 0x7) { 1340 case CmdTx: 1341 { 1342 struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr; 1343 struct sk_buff *skb = tx_cmd->skb; 1344 1345 if ((ptr->status) & STAT_OK) { 1346 DEB(DEB_TXADDR,print_eth(skb->data, "tx-done")); 1347 } else { 1348 dev->stats.tx_errors++; 1349 if ((ptr->status) & 0x0020) 1350 dev->stats.collisions++; 1351 if (!((ptr->status) & 0x0040)) 1352 dev->stats.tx_heartbeat_errors++; 1353 if ((ptr->status) & 0x0400) 1354 dev->stats.tx_carrier_errors++; 1355 if ((ptr->status) & 0x0800) 1356 dev->stats.collisions++; 1357 if ((ptr->status) & 0x1000) 1358 dev->stats.tx_aborted_errors++; 1359 } 1360 1361 dev_kfree_skb_irq(skb); 1362 1363 tx_cmd->cmd.command = 0; /* Mark free */ 1364 break; 1365 } 1366 case CmdTDR: 1367 { 1368 unsigned short status = ((struct tdr_cmd *)ptr)->status; 1369 1370 if (status & 0x8000) { 1371 DEB(DEB_TDR,printk(KERN_INFO "%s: link ok.\n", dev->name)); 1372 } else { 1373 if (status & 0x4000) 1374 printk(KERN_ERR "%s: Transceiver problem.\n", dev->name); 1375 if (status & 0x2000) 1376 printk(KERN_ERR "%s: Termination problem.\n", dev->name); 1377 if (status & 0x1000) 1378 printk(KERN_ERR "%s: Short circuit.\n", dev->name); 1379 1380 DEB(DEB_TDR,printk(KERN_INFO "%s: Time %d.\n", dev->name, status & 0x07ff)); 1381 } 1382 break; 1383 } 1384 case CmdConfigure: 1385 case CmdMulticastList: 1386 /* Zap command so set_multicast_list() knows it is free */ 1387 ptr->command = 0; 1388 break; 1389 } 1390 ptr->v_next = ptr->b_next = I596_NULL; 1391 lp->last_cmd = jiffies; 1392 } 1393 1394 ptr = lp->cmd_head; 1395 while ((ptr != I596_NULL) && (ptr != lp->cmd_tail)) { 1396 ptr->command &= 0x1fff; 1397 ptr = ptr->v_next; 1398 } 1399 1400 if ((lp->cmd_head != I596_NULL)) 1401 ack_cmd |= CUC_START; 1402 lp->scb.cmd = WSWAPcmd(virt_to_bus(&lp->cmd_head->status)); 1403 } 1404 if ((status & 0x1000) || (status & 0x4000)) { 1405 if ((status & 0x4000)) 1406 DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt received a frame.\n", dev->name)); 1407 i596_rx(dev); 1408 /* Only RX_START if stopped - RGH 07-07-96 */ 1409 if (status & 0x1000) { 1410 if (netif_running(dev)) { 1411 DEB(DEB_ERRORS,printk(KERN_ERR "%s: i596 interrupt receive unit inactive, status 0x%x\n", dev->name, status)); 1412 ack_cmd |= RX_START; 1413 dev->stats.rx_errors++; 1414 dev->stats.rx_fifo_errors++; 1415 rebuild_rx_bufs(dev); 1416 } 1417 } 1418 } 1419 wait_cmd(dev,lp,100,"i596 interrupt, timeout"); 1420 lp->scb.command = ack_cmd; 1421 1422#ifdef ENABLE_MVME16x_NET 1423 if (MACH_IS_MVME16x) { 1424 /* Ack the interrupt */ 1425 1426 volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000; 1427 1428 pcc2[0x2a] |= 0x08; 1429 } 1430#endif 1431#ifdef ENABLE_BVME6000_NET 1432 if (MACH_IS_BVME6000) { 1433 volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG; 1434 1435 *ethirq = 1; 1436 *ethirq = 3; 1437 } 1438#endif 1439#ifdef ENABLE_APRICOT 1440 (void) inb(ioaddr + 0x10); 1441 outb(4, ioaddr + 0xf); 1442#endif 1443 CA(dev); 1444 1445 DEB(DEB_INTS,printk(KERN_DEBUG "%s: exiting interrupt.\n", dev->name)); 1446 1447 spin_unlock (&lp->lock); 1448 return IRQ_RETVAL(handled); 1449} 1450 1451static int i596_close(struct net_device *dev) 1452{ 1453 struct i596_private *lp = dev->ml_priv; 1454 unsigned long flags; 1455 1456 netif_stop_queue(dev); 1457 1458 DEB(DEB_INIT,printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x.\n", 1459 dev->name, lp->scb.status)); 1460 1461 spin_lock_irqsave(&lp->lock, flags); 1462 1463 wait_cmd(dev,lp,100,"close1 timed out"); 1464 lp->scb.command = CUC_ABORT | RX_ABORT; 1465 CA(dev); 1466 1467 wait_cmd(dev,lp,100,"close2 timed out"); 1468 1469 spin_unlock_irqrestore(&lp->lock, flags); 1470 DEB(DEB_STRUCT,i596_display_data(dev)); 1471 i596_cleanup_cmd(dev,lp); 1472 1473#ifdef ENABLE_MVME16x_NET 1474 if (MACH_IS_MVME16x) { 1475 volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000; 1476 1477 /* Disable all ints */ 1478 pcc2[0x28] = 1; 1479 pcc2[0x2a] = 0x40; 1480 pcc2[0x2b] = 0x40; /* Set snooping bits now! */ 1481 } 1482#endif 1483#ifdef ENABLE_BVME6000_NET 1484 if (MACH_IS_BVME6000) { 1485 volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG; 1486 1487 *ethirq = 1; 1488 } 1489#endif 1490 1491 free_irq(dev->irq, dev); 1492 remove_rx_bufs(dev); 1493 1494 return 0; 1495} 1496 1497/* 1498 * Set or clear the multicast filter for this adaptor. 1499 */ 1500 1501static void set_multicast_list(struct net_device *dev) 1502{ 1503 struct i596_private *lp = dev->ml_priv; 1504 int config = 0, cnt; 1505 1506 DEB(DEB_MULTI,printk(KERN_DEBUG "%s: set multicast list, %d entries, promisc %s, allmulti %s\n", 1507 dev->name, netdev_mc_count(dev), 1508 dev->flags & IFF_PROMISC ? "ON" : "OFF", 1509 dev->flags & IFF_ALLMULTI ? "ON" : "OFF")); 1510 1511 if (wait_cfg(dev, &lp->cf_cmd.cmd, 1000, "config change request timed out")) 1512 return; 1513 1514 if ((dev->flags & IFF_PROMISC) && !(lp->cf_cmd.i596_config[8] & 0x01)) { 1515 lp->cf_cmd.i596_config[8] |= 0x01; 1516 config = 1; 1517 } 1518 if (!(dev->flags & IFF_PROMISC) && (lp->cf_cmd.i596_config[8] & 0x01)) { 1519 lp->cf_cmd.i596_config[8] &= ~0x01; 1520 config = 1; 1521 } 1522 if ((dev->flags & IFF_ALLMULTI) && (lp->cf_cmd.i596_config[11] & 0x20)) { 1523 lp->cf_cmd.i596_config[11] &= ~0x20; 1524 config = 1; 1525 } 1526 if (!(dev->flags & IFF_ALLMULTI) && !(lp->cf_cmd.i596_config[11] & 0x20)) { 1527 lp->cf_cmd.i596_config[11] |= 0x20; 1528 config = 1; 1529 } 1530 if (config) { 1531 lp->cf_cmd.cmd.command = CmdConfigure; 1532 i596_add_cmd(dev, &lp->cf_cmd.cmd); 1533 } 1534 1535 cnt = netdev_mc_count(dev); 1536 if (cnt > MAX_MC_CNT) 1537 { 1538 cnt = MAX_MC_CNT; 1539 printk(KERN_ERR "%s: Only %d multicast addresses supported", 1540 dev->name, cnt); 1541 } 1542 1543 if (!netdev_mc_empty(dev)) { 1544 struct netdev_hw_addr *ha; 1545 unsigned char *cp; 1546 struct mc_cmd *cmd; 1547 1548 if (wait_cfg(dev, &lp->mc_cmd.cmd, 1000, "multicast list change request timed out")) 1549 return; 1550 cmd = &lp->mc_cmd; 1551 cmd->cmd.command = CmdMulticastList; 1552 cmd->mc_cnt = cnt * ETH_ALEN; 1553 cp = cmd->mc_addrs; 1554 netdev_for_each_mc_addr(ha, dev) { 1555 if (!cnt--) 1556 break; 1557 memcpy(cp, ha->addr, ETH_ALEN); 1558 if (i596_debug > 1) 1559 DEB(DEB_MULTI,printk(KERN_INFO "%s: Adding address %pM\n", 1560 dev->name, cp)); 1561 cp += ETH_ALEN; 1562 } 1563 i596_add_cmd(dev, &cmd->cmd); 1564 } 1565} 1566 1567#ifdef MODULE 1568static struct net_device *dev_82596; 1569 1570#ifdef ENABLE_APRICOT 1571module_param(irq, int, 0); 1572MODULE_PARM_DESC(irq, "Apricot IRQ number"); 1573#endif 1574 1575static int debug = -1; 1576module_param(debug, int, 0); 1577MODULE_PARM_DESC(debug, "i82596 debug mask"); 1578 1579int __init init_module(void) 1580{ 1581 if (debug >= 0) 1582 i596_debug = debug; 1583 dev_82596 = i82596_probe(-1); 1584 if (IS_ERR(dev_82596)) 1585 return PTR_ERR(dev_82596); 1586 return 0; 1587} 1588 1589void __exit cleanup_module(void) 1590{ 1591 unregister_netdev(dev_82596); 1592#ifdef __mc68000__ 1593 /* XXX This assumes default cache mode to be IOMAP_FULL_CACHING, 1594 * XXX which may be invalid (CONFIG_060_WRITETHROUGH) 1595 */ 1596 1597 kernel_set_cachemode((void *)(dev_82596->mem_start), 4096, 1598 IOMAP_FULL_CACHING); 1599#endif 1600 free_page ((u32)(dev_82596->mem_start)); 1601#ifdef ENABLE_APRICOT 1602 /* If we don't do this, we can't re-insmod it later. */ 1603 release_region(dev_82596->base_addr, I596_TOTAL_SIZE); 1604#endif 1605 free_netdev(dev_82596); 1606} 1607 1608#endif /* MODULE */