Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.34-rc5 1609 lines 41 kB view raw
1/* 82596.c: A generic 82596 ethernet driver for linux. */ 2/* 3 Based on Apricot.c 4 Written 1994 by Mark Evans. 5 This driver is for the Apricot 82596 bus-master interface 6 7 Modularised 12/94 Mark Evans 8 9 10 Modified to support the 82596 ethernet chips on 680x0 VME boards. 11 by Richard Hirst <richard@sleepie.demon.co.uk> 12 Renamed to be 82596.c 13 14 980825: Changed to receive directly in to sk_buffs which are 15 allocated at open() time. Eliminates copy on incoming frames 16 (small ones are still copied). Shared data now held in a 17 non-cached page, so we can run on 68060 in copyback mode. 18 19 TBD: 20 * look at deferring rx frames rather than discarding (as per tulip) 21 * handle tx ring full as per tulip 22 * performance test to tune rx_copybreak 23 24 Most of my modifications relate to the braindead big-endian 25 implementation by Intel. When the i596 is operating in 26 'big-endian' mode, it thinks a 32 bit value of 0x12345678 27 should be stored as 0x56781234. This is a real pain, when 28 you have linked lists which are shared by the 680x0 and the 29 i596. 30 31 Driver skeleton 32 Written 1993 by Donald Becker. 33 Copyright 1993 United States Government as represented by the Director, 34 National Security Agency. This software may only be used and distributed 35 according to the terms of the GNU General Public License as modified by SRC, 36 incorporated herein by reference. 37 38 The author may be reached as becker@scyld.com, or C/O 39 Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403 40 41 */ 42 43#include <linux/module.h> 44#include <linux/kernel.h> 45#include <linux/string.h> 46#include <linux/errno.h> 47#include <linux/ioport.h> 48#include <linux/interrupt.h> 49#include <linux/delay.h> 50#include <linux/netdevice.h> 51#include <linux/etherdevice.h> 52#include <linux/skbuff.h> 53#include <linux/init.h> 54#include <linux/bitops.h> 55#include <linux/gfp.h> 56 57#include <asm/io.h> 58#include <asm/dma.h> 59#include <asm/pgtable.h> 60#include <asm/cacheflush.h> 61 62static char version[] __initdata = 63 "82596.c $Revision: 1.5 $\n"; 64 65#define DRV_NAME "82596" 66 67/* DEBUG flags 68 */ 69 70#define DEB_INIT 0x0001 71#define DEB_PROBE 0x0002 72#define DEB_SERIOUS 0x0004 73#define DEB_ERRORS 0x0008 74#define DEB_MULTI 0x0010 75#define DEB_TDR 0x0020 76#define DEB_OPEN 0x0040 77#define DEB_RESET 0x0080 78#define DEB_ADDCMD 0x0100 79#define DEB_STATUS 0x0200 80#define DEB_STARTTX 0x0400 81#define DEB_RXADDR 0x0800 82#define DEB_TXADDR 0x1000 83#define DEB_RXFRAME 0x2000 84#define DEB_INTS 0x4000 85#define DEB_STRUCT 0x8000 86#define DEB_ANY 0xffff 87 88 89#define DEB(x,y) if (i596_debug & (x)) y 90 91 92#if defined(CONFIG_MVME16x_NET) || defined(CONFIG_MVME16x_NET_MODULE) 93#define ENABLE_MVME16x_NET 94#endif 95#if defined(CONFIG_BVME6000_NET) || defined(CONFIG_BVME6000_NET_MODULE) 96#define ENABLE_BVME6000_NET 97#endif 98#if defined(CONFIG_APRICOT) || defined(CONFIG_APRICOT_MODULE) 99#define ENABLE_APRICOT 100#endif 101 102#ifdef ENABLE_MVME16x_NET 103#include <asm/mvme16xhw.h> 104#endif 105#ifdef ENABLE_BVME6000_NET 106#include <asm/bvme6000hw.h> 107#endif 108 109/* 110 * Define various macros for Channel Attention, word swapping etc., dependent 111 * on architecture. MVME and BVME are 680x0 based, otherwise it is Intel. 112 */ 113 114#ifdef __mc68000__ 115#define WSWAPrfd(x) ((struct i596_rfd *) (((u32)(x)<<16) | ((((u32)(x)))>>16))) 116#define WSWAPrbd(x) ((struct i596_rbd *) (((u32)(x)<<16) | ((((u32)(x)))>>16))) 117#define WSWAPiscp(x) ((struct i596_iscp *)(((u32)(x)<<16) | ((((u32)(x)))>>16))) 118#define WSWAPscb(x) ((struct i596_scb *) (((u32)(x)<<16) | ((((u32)(x)))>>16))) 119#define WSWAPcmd(x) ((struct i596_cmd *) (((u32)(x)<<16) | ((((u32)(x)))>>16))) 120#define WSWAPtbd(x) ((struct i596_tbd *) (((u32)(x)<<16) | ((((u32)(x)))>>16))) 121#define WSWAPchar(x) ((char *) (((u32)(x)<<16) | ((((u32)(x)))>>16))) 122#define ISCP_BUSY 0x00010000 123#define MACH_IS_APRICOT 0 124#else 125#define WSWAPrfd(x) ((struct i596_rfd *)((long)x)) 126#define WSWAPrbd(x) ((struct i596_rbd *)((long)x)) 127#define WSWAPiscp(x) ((struct i596_iscp *)((long)x)) 128#define WSWAPscb(x) ((struct i596_scb *)((long)x)) 129#define WSWAPcmd(x) ((struct i596_cmd *)((long)x)) 130#define WSWAPtbd(x) ((struct i596_tbd *)((long)x)) 131#define WSWAPchar(x) ((char *)((long)x)) 132#define ISCP_BUSY 0x0001 133#define MACH_IS_APRICOT 1 134#endif 135 136/* 137 * The MPU_PORT command allows direct access to the 82596. With PORT access 138 * the following commands are available (p5-18). The 32-bit port command 139 * must be word-swapped with the most significant word written first. 140 * This only applies to VME boards. 141 */ 142#define PORT_RESET 0x00 /* reset 82596 */ 143#define PORT_SELFTEST 0x01 /* selftest */ 144#define PORT_ALTSCP 0x02 /* alternate SCB address */ 145#define PORT_ALTDUMP 0x03 /* Alternate DUMP address */ 146 147static int i596_debug = (DEB_SERIOUS|DEB_PROBE); 148 149MODULE_AUTHOR("Richard Hirst"); 150MODULE_DESCRIPTION("i82596 driver"); 151MODULE_LICENSE("GPL"); 152 153module_param(i596_debug, int, 0); 154MODULE_PARM_DESC(i596_debug, "i82596 debug mask"); 155 156 157/* Copy frames shorter than rx_copybreak, otherwise pass on up in 158 * a full sized sk_buff. Value of 100 stolen from tulip.c (!alpha). 159 */ 160static int rx_copybreak = 100; 161 162#define PKT_BUF_SZ 1536 163#define MAX_MC_CNT 64 164 165#define I596_TOTAL_SIZE 17 166 167#define I596_NULL ((void *)0xffffffff) 168 169#define CMD_EOL 0x8000 /* The last command of the list, stop. */ 170#define CMD_SUSP 0x4000 /* Suspend after doing cmd. */ 171#define CMD_INTR 0x2000 /* Interrupt after doing cmd. */ 172 173#define CMD_FLEX 0x0008 /* Enable flexible memory model */ 174 175enum commands { 176 CmdNOp = 0, CmdSASetup = 1, CmdConfigure = 2, CmdMulticastList = 3, 177 CmdTx = 4, CmdTDR = 5, CmdDump = 6, CmdDiagnose = 7 178}; 179 180#define STAT_C 0x8000 /* Set to 0 after execution */ 181#define STAT_B 0x4000 /* Command being executed */ 182#define STAT_OK 0x2000 /* Command executed ok */ 183#define STAT_A 0x1000 /* Command aborted */ 184 185#define CUC_START 0x0100 186#define CUC_RESUME 0x0200 187#define CUC_SUSPEND 0x0300 188#define CUC_ABORT 0x0400 189#define RX_START 0x0010 190#define RX_RESUME 0x0020 191#define RX_SUSPEND 0x0030 192#define RX_ABORT 0x0040 193 194#define TX_TIMEOUT 5 195 196 197struct i596_reg { 198 unsigned short porthi; 199 unsigned short portlo; 200 unsigned long ca; 201}; 202 203#define EOF 0x8000 204#define SIZE_MASK 0x3fff 205 206struct i596_tbd { 207 unsigned short size; 208 unsigned short pad; 209 struct i596_tbd *next; 210 char *data; 211}; 212 213/* The command structure has two 'next' pointers; v_next is the address of 214 * the next command as seen by the CPU, b_next is the address of the next 215 * command as seen by the 82596. The b_next pointer, as used by the 82596 216 * always references the status field of the next command, rather than the 217 * v_next field, because the 82596 is unaware of v_next. It may seem more 218 * logical to put v_next at the end of the structure, but we cannot do that 219 * because the 82596 expects other fields to be there, depending on command 220 * type. 221 */ 222 223struct i596_cmd { 224 struct i596_cmd *v_next; /* Address from CPUs viewpoint */ 225 unsigned short status; 226 unsigned short command; 227 struct i596_cmd *b_next; /* Address from i596 viewpoint */ 228}; 229 230struct tx_cmd { 231 struct i596_cmd cmd; 232 struct i596_tbd *tbd; 233 unsigned short size; 234 unsigned short pad; 235 struct sk_buff *skb; /* So we can free it after tx */ 236}; 237 238struct tdr_cmd { 239 struct i596_cmd cmd; 240 unsigned short status; 241 unsigned short pad; 242}; 243 244struct mc_cmd { 245 struct i596_cmd cmd; 246 short mc_cnt; 247 char mc_addrs[MAX_MC_CNT*6]; 248}; 249 250struct sa_cmd { 251 struct i596_cmd cmd; 252 char eth_addr[8]; 253}; 254 255struct cf_cmd { 256 struct i596_cmd cmd; 257 char i596_config[16]; 258}; 259 260struct i596_rfd { 261 unsigned short stat; 262 unsigned short cmd; 263 struct i596_rfd *b_next; /* Address from i596 viewpoint */ 264 struct i596_rbd *rbd; 265 unsigned short count; 266 unsigned short size; 267 struct i596_rfd *v_next; /* Address from CPUs viewpoint */ 268 struct i596_rfd *v_prev; 269}; 270 271struct i596_rbd { 272 unsigned short count; 273 unsigned short zero1; 274 struct i596_rbd *b_next; 275 unsigned char *b_data; /* Address from i596 viewpoint */ 276 unsigned short size; 277 unsigned short zero2; 278 struct sk_buff *skb; 279 struct i596_rbd *v_next; 280 struct i596_rbd *b_addr; /* This rbd addr from i596 view */ 281 unsigned char *v_data; /* Address from CPUs viewpoint */ 282}; 283 284#define TX_RING_SIZE 64 285#define RX_RING_SIZE 16 286 287struct i596_scb { 288 unsigned short status; 289 unsigned short command; 290 struct i596_cmd *cmd; 291 struct i596_rfd *rfd; 292 unsigned long crc_err; 293 unsigned long align_err; 294 unsigned long resource_err; 295 unsigned long over_err; 296 unsigned long rcvdt_err; 297 unsigned long short_err; 298 unsigned short t_on; 299 unsigned short t_off; 300}; 301 302struct i596_iscp { 303 unsigned long stat; 304 struct i596_scb *scb; 305}; 306 307struct i596_scp { 308 unsigned long sysbus; 309 unsigned long pad; 310 struct i596_iscp *iscp; 311}; 312 313struct i596_private { 314 volatile struct i596_scp scp; 315 volatile struct i596_iscp iscp; 316 volatile struct i596_scb scb; 317 struct sa_cmd sa_cmd; 318 struct cf_cmd cf_cmd; 319 struct tdr_cmd tdr_cmd; 320 struct mc_cmd mc_cmd; 321 unsigned long stat; 322 int last_restart __attribute__((aligned(4))); 323 struct i596_rfd *rfd_head; 324 struct i596_rbd *rbd_head; 325 struct i596_cmd *cmd_tail; 326 struct i596_cmd *cmd_head; 327 int cmd_backlog; 328 unsigned long last_cmd; 329 struct i596_rfd rfds[RX_RING_SIZE]; 330 struct i596_rbd rbds[RX_RING_SIZE]; 331 struct tx_cmd tx_cmds[TX_RING_SIZE]; 332 struct i596_tbd tbds[TX_RING_SIZE]; 333 int next_tx_cmd; 334 spinlock_t lock; 335}; 336 337static char init_setup[] = 338{ 339 0x8E, /* length, prefetch on */ 340 0xC8, /* fifo to 8, monitor off */ 341#ifdef CONFIG_VME 342 0xc0, /* don't save bad frames */ 343#else 344 0x80, /* don't save bad frames */ 345#endif 346 0x2E, /* No source address insertion, 8 byte preamble */ 347 0x00, /* priority and backoff defaults */ 348 0x60, /* interframe spacing */ 349 0x00, /* slot time LSB */ 350 0xf2, /* slot time and retries */ 351 0x00, /* promiscuous mode */ 352 0x00, /* collision detect */ 353 0x40, /* minimum frame length */ 354 0xff, 355 0x00, 356 0x7f /* *multi IA */ }; 357 358static int i596_open(struct net_device *dev); 359static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev); 360static irqreturn_t i596_interrupt(int irq, void *dev_id); 361static int i596_close(struct net_device *dev); 362static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd); 363static void i596_tx_timeout (struct net_device *dev); 364static void print_eth(unsigned char *buf, char *str); 365static void set_multicast_list(struct net_device *dev); 366 367static int rx_ring_size = RX_RING_SIZE; 368static int ticks_limit = 25; 369static int max_cmd_backlog = TX_RING_SIZE-1; 370 371 372static inline void CA(struct net_device *dev) 373{ 374#ifdef ENABLE_MVME16x_NET 375 if (MACH_IS_MVME16x) { 376 ((struct i596_reg *) dev->base_addr)->ca = 1; 377 } 378#endif 379#ifdef ENABLE_BVME6000_NET 380 if (MACH_IS_BVME6000) { 381 volatile u32 i; 382 383 i = *(volatile u32 *) (dev->base_addr); 384 } 385#endif 386#ifdef ENABLE_APRICOT 387 if (MACH_IS_APRICOT) { 388 outw(0, (short) (dev->base_addr) + 4); 389 } 390#endif 391} 392 393 394static inline void MPU_PORT(struct net_device *dev, int c, volatile void *x) 395{ 396#ifdef ENABLE_MVME16x_NET 397 if (MACH_IS_MVME16x) { 398 struct i596_reg *p = (struct i596_reg *) (dev->base_addr); 399 p->porthi = ((c) | (u32) (x)) & 0xffff; 400 p->portlo = ((c) | (u32) (x)) >> 16; 401 } 402#endif 403#ifdef ENABLE_BVME6000_NET 404 if (MACH_IS_BVME6000) { 405 u32 v = (u32) (c) | (u32) (x); 406 v = ((u32) (v) << 16) | ((u32) (v) >> 16); 407 *(volatile u32 *) dev->base_addr = v; 408 udelay(1); 409 *(volatile u32 *) dev->base_addr = v; 410 } 411#endif 412} 413 414 415static inline int wait_istat(struct net_device *dev, struct i596_private *lp, int delcnt, char *str) 416{ 417 while (--delcnt && lp->iscp.stat) 418 udelay(10); 419 if (!delcnt) { 420 printk(KERN_ERR "%s: %s, status %4.4x, cmd %4.4x.\n", 421 dev->name, str, lp->scb.status, lp->scb.command); 422 return -1; 423 } 424 else 425 return 0; 426} 427 428 429static inline int wait_cmd(struct net_device *dev, struct i596_private *lp, int delcnt, char *str) 430{ 431 while (--delcnt && lp->scb.command) 432 udelay(10); 433 if (!delcnt) { 434 printk(KERN_ERR "%s: %s, status %4.4x, cmd %4.4x.\n", 435 dev->name, str, lp->scb.status, lp->scb.command); 436 return -1; 437 } 438 else 439 return 0; 440} 441 442 443static inline int wait_cfg(struct net_device *dev, struct i596_cmd *cmd, int delcnt, char *str) 444{ 445 volatile struct i596_cmd *c = cmd; 446 447 while (--delcnt && c->command) 448 udelay(10); 449 if (!delcnt) { 450 printk(KERN_ERR "%s: %s.\n", dev->name, str); 451 return -1; 452 } 453 else 454 return 0; 455} 456 457 458static void i596_display_data(struct net_device *dev) 459{ 460 struct i596_private *lp = dev->ml_priv; 461 struct i596_cmd *cmd; 462 struct i596_rfd *rfd; 463 struct i596_rbd *rbd; 464 465 printk(KERN_ERR "lp and scp at %p, .sysbus = %08lx, .iscp = %p\n", 466 &lp->scp, lp->scp.sysbus, lp->scp.iscp); 467 printk(KERN_ERR "iscp at %p, iscp.stat = %08lx, .scb = %p\n", 468 &lp->iscp, lp->iscp.stat, lp->iscp.scb); 469 printk(KERN_ERR "scb at %p, scb.status = %04x, .command = %04x," 470 " .cmd = %p, .rfd = %p\n", 471 &lp->scb, lp->scb.status, lp->scb.command, 472 lp->scb.cmd, lp->scb.rfd); 473 printk(KERN_ERR " errors: crc %lx, align %lx, resource %lx," 474 " over %lx, rcvdt %lx, short %lx\n", 475 lp->scb.crc_err, lp->scb.align_err, lp->scb.resource_err, 476 lp->scb.over_err, lp->scb.rcvdt_err, lp->scb.short_err); 477 cmd = lp->cmd_head; 478 while (cmd != I596_NULL) { 479 printk(KERN_ERR "cmd at %p, .status = %04x, .command = %04x, .b_next = %p\n", 480 cmd, cmd->status, cmd->command, cmd->b_next); 481 cmd = cmd->v_next; 482 } 483 rfd = lp->rfd_head; 484 printk(KERN_ERR "rfd_head = %p\n", rfd); 485 do { 486 printk(KERN_ERR " %p .stat %04x, .cmd %04x, b_next %p, rbd %p," 487 " count %04x\n", 488 rfd, rfd->stat, rfd->cmd, rfd->b_next, rfd->rbd, 489 rfd->count); 490 rfd = rfd->v_next; 491 } while (rfd != lp->rfd_head); 492 rbd = lp->rbd_head; 493 printk(KERN_ERR "rbd_head = %p\n", rbd); 494 do { 495 printk(KERN_ERR " %p .count %04x, b_next %p, b_data %p, size %04x\n", 496 rbd, rbd->count, rbd->b_next, rbd->b_data, rbd->size); 497 rbd = rbd->v_next; 498 } while (rbd != lp->rbd_head); 499} 500 501 502#if defined(ENABLE_MVME16x_NET) || defined(ENABLE_BVME6000_NET) 503static irqreturn_t i596_error(int irq, void *dev_id) 504{ 505 struct net_device *dev = dev_id; 506#ifdef ENABLE_MVME16x_NET 507 if (MACH_IS_MVME16x) { 508 volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000; 509 510 pcc2[0x28] = 1; 511 pcc2[0x2b] = 0x1d; 512 } 513#endif 514#ifdef ENABLE_BVME6000_NET 515 if (MACH_IS_BVME6000) { 516 volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG; 517 518 *ethirq = 1; 519 *ethirq = 3; 520 } 521#endif 522 printk(KERN_ERR "%s: Error interrupt\n", dev->name); 523 i596_display_data(dev); 524 return IRQ_HANDLED; 525} 526#endif 527 528static inline void init_rx_bufs(struct net_device *dev) 529{ 530 struct i596_private *lp = dev->ml_priv; 531 int i; 532 struct i596_rfd *rfd; 533 struct i596_rbd *rbd; 534 535 /* First build the Receive Buffer Descriptor List */ 536 537 for (i = 0, rbd = lp->rbds; i < rx_ring_size; i++, rbd++) { 538 struct sk_buff *skb = dev_alloc_skb(PKT_BUF_SZ); 539 540 if (skb == NULL) 541 panic("82596: alloc_skb() failed"); 542 skb->dev = dev; 543 rbd->v_next = rbd+1; 544 rbd->b_next = WSWAPrbd(virt_to_bus(rbd+1)); 545 rbd->b_addr = WSWAPrbd(virt_to_bus(rbd)); 546 rbd->skb = skb; 547 rbd->v_data = skb->data; 548 rbd->b_data = WSWAPchar(virt_to_bus(skb->data)); 549 rbd->size = PKT_BUF_SZ; 550#ifdef __mc68000__ 551 cache_clear(virt_to_phys(skb->data), PKT_BUF_SZ); 552#endif 553 } 554 lp->rbd_head = lp->rbds; 555 rbd = lp->rbds + rx_ring_size - 1; 556 rbd->v_next = lp->rbds; 557 rbd->b_next = WSWAPrbd(virt_to_bus(lp->rbds)); 558 559 /* Now build the Receive Frame Descriptor List */ 560 561 for (i = 0, rfd = lp->rfds; i < rx_ring_size; i++, rfd++) { 562 rfd->rbd = I596_NULL; 563 rfd->v_next = rfd+1; 564 rfd->v_prev = rfd-1; 565 rfd->b_next = WSWAPrfd(virt_to_bus(rfd+1)); 566 rfd->cmd = CMD_FLEX; 567 } 568 lp->rfd_head = lp->rfds; 569 lp->scb.rfd = WSWAPrfd(virt_to_bus(lp->rfds)); 570 rfd = lp->rfds; 571 rfd->rbd = lp->rbd_head; 572 rfd->v_prev = lp->rfds + rx_ring_size - 1; 573 rfd = lp->rfds + rx_ring_size - 1; 574 rfd->v_next = lp->rfds; 575 rfd->b_next = WSWAPrfd(virt_to_bus(lp->rfds)); 576 rfd->cmd = CMD_EOL|CMD_FLEX; 577} 578 579static inline void remove_rx_bufs(struct net_device *dev) 580{ 581 struct i596_private *lp = dev->ml_priv; 582 struct i596_rbd *rbd; 583 int i; 584 585 for (i = 0, rbd = lp->rbds; i < rx_ring_size; i++, rbd++) { 586 if (rbd->skb == NULL) 587 break; 588 dev_kfree_skb(rbd->skb); 589 } 590} 591 592 593static void rebuild_rx_bufs(struct net_device *dev) 594{ 595 struct i596_private *lp = dev->ml_priv; 596 int i; 597 598 /* Ensure rx frame/buffer descriptors are tidy */ 599 600 for (i = 0; i < rx_ring_size; i++) { 601 lp->rfds[i].rbd = I596_NULL; 602 lp->rfds[i].cmd = CMD_FLEX; 603 } 604 lp->rfds[rx_ring_size-1].cmd = CMD_EOL|CMD_FLEX; 605 lp->rfd_head = lp->rfds; 606 lp->scb.rfd = WSWAPrfd(virt_to_bus(lp->rfds)); 607 lp->rbd_head = lp->rbds; 608 lp->rfds[0].rbd = WSWAPrbd(virt_to_bus(lp->rbds)); 609} 610 611 612static int init_i596_mem(struct net_device *dev) 613{ 614 struct i596_private *lp = dev->ml_priv; 615#if !defined(ENABLE_MVME16x_NET) && !defined(ENABLE_BVME6000_NET) || defined(ENABLE_APRICOT) 616 short ioaddr = dev->base_addr; 617#endif 618 unsigned long flags; 619 620 MPU_PORT(dev, PORT_RESET, NULL); 621 622 udelay(100); /* Wait 100us - seems to help */ 623 624#if defined(ENABLE_MVME16x_NET) || defined(ENABLE_BVME6000_NET) 625#ifdef ENABLE_MVME16x_NET 626 if (MACH_IS_MVME16x) { 627 volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000; 628 629 /* Disable all ints for now */ 630 pcc2[0x28] = 1; 631 pcc2[0x2a] = 0x48; 632 /* Following disables snooping. Snooping is not required 633 * as we make appropriate use of non-cached pages for 634 * shared data, and cache_push/cache_clear. 635 */ 636 pcc2[0x2b] = 0x08; 637 } 638#endif 639#ifdef ENABLE_BVME6000_NET 640 if (MACH_IS_BVME6000) { 641 volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG; 642 643 *ethirq = 1; 644 } 645#endif 646 647 /* change the scp address */ 648 649 MPU_PORT(dev, PORT_ALTSCP, (void *)virt_to_bus((void *)&lp->scp)); 650 651#elif defined(ENABLE_APRICOT) 652 653 { 654 u32 scp = virt_to_bus(&lp->scp); 655 656 /* change the scp address */ 657 outw(0, ioaddr); 658 outw(0, ioaddr); 659 outb(4, ioaddr + 0xf); 660 outw(scp | 2, ioaddr); 661 outw(scp >> 16, ioaddr); 662 } 663#endif 664 665 lp->last_cmd = jiffies; 666 667#ifdef ENABLE_MVME16x_NET 668 if (MACH_IS_MVME16x) 669 lp->scp.sysbus = 0x00000054; 670#endif 671#ifdef ENABLE_BVME6000_NET 672 if (MACH_IS_BVME6000) 673 lp->scp.sysbus = 0x0000004c; 674#endif 675#ifdef ENABLE_APRICOT 676 if (MACH_IS_APRICOT) 677 lp->scp.sysbus = 0x00440000; 678#endif 679 680 lp->scp.iscp = WSWAPiscp(virt_to_bus((void *)&lp->iscp)); 681 lp->iscp.scb = WSWAPscb(virt_to_bus((void *)&lp->scb)); 682 lp->iscp.stat = ISCP_BUSY; 683 lp->cmd_backlog = 0; 684 685 lp->cmd_head = lp->scb.cmd = I596_NULL; 686 687#ifdef ENABLE_BVME6000_NET 688 if (MACH_IS_BVME6000) { 689 lp->scb.t_on = 7 * 25; 690 lp->scb.t_off = 1 * 25; 691 } 692#endif 693 694 DEB(DEB_INIT,printk(KERN_DEBUG "%s: starting i82596.\n", dev->name)); 695 696#if defined(ENABLE_APRICOT) 697 (void) inb(ioaddr + 0x10); 698 outb(4, ioaddr + 0xf); 699#endif 700 CA(dev); 701 702 if (wait_istat(dev,lp,1000,"initialization timed out")) 703 goto failed; 704 DEB(DEB_INIT,printk(KERN_DEBUG "%s: i82596 initialization successful\n", dev->name)); 705 706 /* Ensure rx frame/buffer descriptors are tidy */ 707 rebuild_rx_bufs(dev); 708 lp->scb.command = 0; 709 710#ifdef ENABLE_MVME16x_NET 711 if (MACH_IS_MVME16x) { 712 volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000; 713 714 /* Enable ints, etc. now */ 715 pcc2[0x2a] = 0x55; /* Edge sensitive */ 716 pcc2[0x2b] = 0x15; 717 } 718#endif 719#ifdef ENABLE_BVME6000_NET 720 if (MACH_IS_BVME6000) { 721 volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG; 722 723 *ethirq = 3; 724 } 725#endif 726 727 728 DEB(DEB_INIT,printk(KERN_DEBUG "%s: queuing CmdConfigure\n", dev->name)); 729 memcpy(lp->cf_cmd.i596_config, init_setup, 14); 730 lp->cf_cmd.cmd.command = CmdConfigure; 731 i596_add_cmd(dev, &lp->cf_cmd.cmd); 732 733 DEB(DEB_INIT,printk(KERN_DEBUG "%s: queuing CmdSASetup\n", dev->name)); 734 memcpy(lp->sa_cmd.eth_addr, dev->dev_addr, 6); 735 lp->sa_cmd.cmd.command = CmdSASetup; 736 i596_add_cmd(dev, &lp->sa_cmd.cmd); 737 738 DEB(DEB_INIT,printk(KERN_DEBUG "%s: queuing CmdTDR\n", dev->name)); 739 lp->tdr_cmd.cmd.command = CmdTDR; 740 i596_add_cmd(dev, &lp->tdr_cmd.cmd); 741 742 spin_lock_irqsave (&lp->lock, flags); 743 744 if (wait_cmd(dev,lp,1000,"timed out waiting to issue RX_START")) { 745 spin_unlock_irqrestore (&lp->lock, flags); 746 goto failed; 747 } 748 DEB(DEB_INIT,printk(KERN_DEBUG "%s: Issuing RX_START\n", dev->name)); 749 lp->scb.command = RX_START; 750 CA(dev); 751 752 spin_unlock_irqrestore (&lp->lock, flags); 753 754 if (wait_cmd(dev,lp,1000,"RX_START not processed")) 755 goto failed; 756 DEB(DEB_INIT,printk(KERN_DEBUG "%s: Receive unit started OK\n", dev->name)); 757 return 0; 758 759failed: 760 printk(KERN_CRIT "%s: Failed to initialise 82596\n", dev->name); 761 MPU_PORT(dev, PORT_RESET, NULL); 762 return -1; 763} 764 765static inline int i596_rx(struct net_device *dev) 766{ 767 struct i596_private *lp = dev->ml_priv; 768 struct i596_rfd *rfd; 769 struct i596_rbd *rbd; 770 int frames = 0; 771 772 DEB(DEB_RXFRAME,printk(KERN_DEBUG "i596_rx(), rfd_head %p, rbd_head %p\n", 773 lp->rfd_head, lp->rbd_head)); 774 775 rfd = lp->rfd_head; /* Ref next frame to check */ 776 777 while ((rfd->stat) & STAT_C) { /* Loop while complete frames */ 778 if (rfd->rbd == I596_NULL) 779 rbd = I596_NULL; 780 else if (rfd->rbd == lp->rbd_head->b_addr) 781 rbd = lp->rbd_head; 782 else { 783 printk(KERN_CRIT "%s: rbd chain broken!\n", dev->name); 784 /* XXX Now what? */ 785 rbd = I596_NULL; 786 } 787 DEB(DEB_RXFRAME, printk(KERN_DEBUG " rfd %p, rfd.rbd %p, rfd.stat %04x\n", 788 rfd, rfd->rbd, rfd->stat)); 789 790 if (rbd != I596_NULL && ((rfd->stat) & STAT_OK)) { 791 /* a good frame */ 792 int pkt_len = rbd->count & 0x3fff; 793 struct sk_buff *skb = rbd->skb; 794 int rx_in_place = 0; 795 796 DEB(DEB_RXADDR,print_eth(rbd->v_data, "received")); 797 frames++; 798 799 /* Check if the packet is long enough to just accept 800 * without copying to a properly sized skbuff. 801 */ 802 803 if (pkt_len > rx_copybreak) { 804 struct sk_buff *newskb; 805 806 /* Get fresh skbuff to replace filled one. */ 807 newskb = dev_alloc_skb(PKT_BUF_SZ); 808 if (newskb == NULL) { 809 skb = NULL; /* drop pkt */ 810 goto memory_squeeze; 811 } 812 /* Pass up the skb already on the Rx ring. */ 813 skb_put(skb, pkt_len); 814 rx_in_place = 1; 815 rbd->skb = newskb; 816 newskb->dev = dev; 817 rbd->v_data = newskb->data; 818 rbd->b_data = WSWAPchar(virt_to_bus(newskb->data)); 819#ifdef __mc68000__ 820 cache_clear(virt_to_phys(newskb->data), PKT_BUF_SZ); 821#endif 822 } 823 else 824 skb = dev_alloc_skb(pkt_len + 2); 825memory_squeeze: 826 if (skb == NULL) { 827 /* XXX tulip.c can defer packets here!! */ 828 printk(KERN_WARNING "%s: i596_rx Memory squeeze, dropping packet.\n", dev->name); 829 dev->stats.rx_dropped++; 830 } 831 else { 832 if (!rx_in_place) { 833 /* 16 byte align the data fields */ 834 skb_reserve(skb, 2); 835 memcpy(skb_put(skb,pkt_len), rbd->v_data, pkt_len); 836 } 837 skb->protocol=eth_type_trans(skb,dev); 838 skb->len = pkt_len; 839#ifdef __mc68000__ 840 cache_clear(virt_to_phys(rbd->skb->data), 841 pkt_len); 842#endif 843 netif_rx(skb); 844 dev->stats.rx_packets++; 845 dev->stats.rx_bytes+=pkt_len; 846 } 847 } 848 else { 849 DEB(DEB_ERRORS, printk(KERN_DEBUG "%s: Error, rfd.stat = 0x%04x\n", 850 dev->name, rfd->stat)); 851 dev->stats.rx_errors++; 852 if ((rfd->stat) & 0x0001) 853 dev->stats.collisions++; 854 if ((rfd->stat) & 0x0080) 855 dev->stats.rx_length_errors++; 856 if ((rfd->stat) & 0x0100) 857 dev->stats.rx_over_errors++; 858 if ((rfd->stat) & 0x0200) 859 dev->stats.rx_fifo_errors++; 860 if ((rfd->stat) & 0x0400) 861 dev->stats.rx_frame_errors++; 862 if ((rfd->stat) & 0x0800) 863 dev->stats.rx_crc_errors++; 864 if ((rfd->stat) & 0x1000) 865 dev->stats.rx_length_errors++; 866 } 867 868 /* Clear the buffer descriptor count and EOF + F flags */ 869 870 if (rbd != I596_NULL && (rbd->count & 0x4000)) { 871 rbd->count = 0; 872 lp->rbd_head = rbd->v_next; 873 } 874 875 /* Tidy the frame descriptor, marking it as end of list */ 876 877 rfd->rbd = I596_NULL; 878 rfd->stat = 0; 879 rfd->cmd = CMD_EOL|CMD_FLEX; 880 rfd->count = 0; 881 882 /* Remove end-of-list from old end descriptor */ 883 884 rfd->v_prev->cmd = CMD_FLEX; 885 886 /* Update record of next frame descriptor to process */ 887 888 lp->scb.rfd = rfd->b_next; 889 lp->rfd_head = rfd->v_next; 890 rfd = lp->rfd_head; 891 } 892 893 DEB(DEB_RXFRAME,printk(KERN_DEBUG "frames %d\n", frames)); 894 895 return 0; 896} 897 898 899static void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp) 900{ 901 struct i596_cmd *ptr; 902 903 while (lp->cmd_head != I596_NULL) { 904 ptr = lp->cmd_head; 905 lp->cmd_head = ptr->v_next; 906 lp->cmd_backlog--; 907 908 switch ((ptr->command) & 0x7) { 909 case CmdTx: 910 { 911 struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr; 912 struct sk_buff *skb = tx_cmd->skb; 913 914 dev_kfree_skb(skb); 915 916 dev->stats.tx_errors++; 917 dev->stats.tx_aborted_errors++; 918 919 ptr->v_next = ptr->b_next = I596_NULL; 920 tx_cmd->cmd.command = 0; /* Mark as free */ 921 break; 922 } 923 default: 924 ptr->v_next = ptr->b_next = I596_NULL; 925 } 926 } 927 928 wait_cmd(dev,lp,100,"i596_cleanup_cmd timed out"); 929 lp->scb.cmd = I596_NULL; 930} 931 932static void i596_reset(struct net_device *dev, struct i596_private *lp, 933 int ioaddr) 934{ 935 unsigned long flags; 936 937 DEB(DEB_RESET,printk(KERN_DEBUG "i596_reset\n")); 938 939 spin_lock_irqsave (&lp->lock, flags); 940 941 wait_cmd(dev,lp,100,"i596_reset timed out"); 942 943 netif_stop_queue(dev); 944 945 lp->scb.command = CUC_ABORT | RX_ABORT; 946 CA(dev); 947 948 /* wait for shutdown */ 949 wait_cmd(dev,lp,1000,"i596_reset 2 timed out"); 950 spin_unlock_irqrestore (&lp->lock, flags); 951 952 i596_cleanup_cmd(dev,lp); 953 i596_rx(dev); 954 955 netif_start_queue(dev); 956 init_i596_mem(dev); 957} 958 959static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd) 960{ 961 struct i596_private *lp = dev->ml_priv; 962 int ioaddr = dev->base_addr; 963 unsigned long flags; 964 965 DEB(DEB_ADDCMD,printk(KERN_DEBUG "i596_add_cmd\n")); 966 967 cmd->status = 0; 968 cmd->command |= (CMD_EOL | CMD_INTR); 969 cmd->v_next = cmd->b_next = I596_NULL; 970 971 spin_lock_irqsave (&lp->lock, flags); 972 973 if (lp->cmd_head != I596_NULL) { 974 lp->cmd_tail->v_next = cmd; 975 lp->cmd_tail->b_next = WSWAPcmd(virt_to_bus(&cmd->status)); 976 } else { 977 lp->cmd_head = cmd; 978 wait_cmd(dev,lp,100,"i596_add_cmd timed out"); 979 lp->scb.cmd = WSWAPcmd(virt_to_bus(&cmd->status)); 980 lp->scb.command = CUC_START; 981 CA(dev); 982 } 983 lp->cmd_tail = cmd; 984 lp->cmd_backlog++; 985 986 spin_unlock_irqrestore (&lp->lock, flags); 987 988 if (lp->cmd_backlog > max_cmd_backlog) { 989 unsigned long tickssofar = jiffies - lp->last_cmd; 990 991 if (tickssofar < ticks_limit) 992 return; 993 994 printk(KERN_NOTICE "%s: command unit timed out, status resetting.\n", dev->name); 995 996 i596_reset(dev, lp, ioaddr); 997 } 998} 999 1000static int i596_open(struct net_device *dev) 1001{ 1002 int res = 0; 1003 1004 DEB(DEB_OPEN,printk(KERN_DEBUG "%s: i596_open() irq %d.\n", dev->name, dev->irq)); 1005 1006 if (request_irq(dev->irq, i596_interrupt, 0, "i82596", dev)) { 1007 printk(KERN_ERR "%s: IRQ %d not free\n", dev->name, dev->irq); 1008 return -EAGAIN; 1009 } 1010#ifdef ENABLE_MVME16x_NET 1011 if (MACH_IS_MVME16x) { 1012 if (request_irq(0x56, i596_error, 0, "i82596_error", dev)) 1013 return -EAGAIN; 1014 } 1015#endif 1016 init_rx_bufs(dev); 1017 1018 netif_start_queue(dev); 1019 1020 /* Initialize the 82596 memory */ 1021 if (init_i596_mem(dev)) { 1022 res = -EAGAIN; 1023 free_irq(dev->irq, dev); 1024 } 1025 1026 return res; 1027} 1028 1029static void i596_tx_timeout (struct net_device *dev) 1030{ 1031 struct i596_private *lp = dev->ml_priv; 1032 int ioaddr = dev->base_addr; 1033 1034 /* Transmitter timeout, serious problems. */ 1035 DEB(DEB_ERRORS,printk(KERN_ERR "%s: transmit timed out, status resetting.\n", 1036 dev->name)); 1037 1038 dev->stats.tx_errors++; 1039 1040 /* Try to restart the adaptor */ 1041 if (lp->last_restart == dev->stats.tx_packets) { 1042 DEB(DEB_ERRORS,printk(KERN_ERR "Resetting board.\n")); 1043 /* Shutdown and restart */ 1044 i596_reset (dev, lp, ioaddr); 1045 } else { 1046 /* Issue a channel attention signal */ 1047 DEB(DEB_ERRORS,printk(KERN_ERR "Kicking board.\n")); 1048 lp->scb.command = CUC_START | RX_START; 1049 CA (dev); 1050 lp->last_restart = dev->stats.tx_packets; 1051 } 1052 1053 dev->trans_start = jiffies; 1054 netif_wake_queue (dev); 1055} 1056 1057static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev) 1058{ 1059 struct i596_private *lp = dev->ml_priv; 1060 struct tx_cmd *tx_cmd; 1061 struct i596_tbd *tbd; 1062 short length = skb->len; 1063 dev->trans_start = jiffies; 1064 1065 DEB(DEB_STARTTX,printk(KERN_DEBUG "%s: i596_start_xmit(%x,%p) called\n", 1066 dev->name, skb->len, skb->data)); 1067 1068 if (skb->len < ETH_ZLEN) { 1069 if (skb_padto(skb, ETH_ZLEN)) 1070 return NETDEV_TX_OK; 1071 length = ETH_ZLEN; 1072 } 1073 netif_stop_queue(dev); 1074 1075 tx_cmd = lp->tx_cmds + lp->next_tx_cmd; 1076 tbd = lp->tbds + lp->next_tx_cmd; 1077 1078 if (tx_cmd->cmd.command) { 1079 printk(KERN_NOTICE "%s: xmit ring full, dropping packet.\n", 1080 dev->name); 1081 dev->stats.tx_dropped++; 1082 1083 dev_kfree_skb(skb); 1084 } else { 1085 if (++lp->next_tx_cmd == TX_RING_SIZE) 1086 lp->next_tx_cmd = 0; 1087 tx_cmd->tbd = WSWAPtbd(virt_to_bus(tbd)); 1088 tbd->next = I596_NULL; 1089 1090 tx_cmd->cmd.command = CMD_FLEX | CmdTx; 1091 tx_cmd->skb = skb; 1092 1093 tx_cmd->pad = 0; 1094 tx_cmd->size = 0; 1095 tbd->pad = 0; 1096 tbd->size = EOF | length; 1097 1098 tbd->data = WSWAPchar(virt_to_bus(skb->data)); 1099 1100#ifdef __mc68000__ 1101 cache_push(virt_to_phys(skb->data), length); 1102#endif 1103 DEB(DEB_TXADDR,print_eth(skb->data, "tx-queued")); 1104 i596_add_cmd(dev, &tx_cmd->cmd); 1105 1106 dev->stats.tx_packets++; 1107 dev->stats.tx_bytes += length; 1108 } 1109 1110 netif_start_queue(dev); 1111 1112 return NETDEV_TX_OK; 1113} 1114 1115static void print_eth(unsigned char *add, char *str) 1116{ 1117 printk(KERN_DEBUG "i596 0x%p, %pM --> %pM %02X%02X, %s\n", 1118 add, add + 6, add, add[12], add[13], str); 1119} 1120 1121static int io = 0x300; 1122static int irq = 10; 1123 1124static const struct net_device_ops i596_netdev_ops = { 1125 .ndo_open = i596_open, 1126 .ndo_stop = i596_close, 1127 .ndo_start_xmit = i596_start_xmit, 1128 .ndo_set_multicast_list = set_multicast_list, 1129 .ndo_tx_timeout = i596_tx_timeout, 1130 .ndo_change_mtu = eth_change_mtu, 1131 .ndo_set_mac_address = eth_mac_addr, 1132 .ndo_validate_addr = eth_validate_addr, 1133}; 1134 1135struct net_device * __init i82596_probe(int unit) 1136{ 1137 struct net_device *dev; 1138 int i; 1139 struct i596_private *lp; 1140 char eth_addr[8]; 1141 static int probed; 1142 int err; 1143 1144 if (probed) 1145 return ERR_PTR(-ENODEV); 1146 probed++; 1147 1148 dev = alloc_etherdev(0); 1149 if (!dev) 1150 return ERR_PTR(-ENOMEM); 1151 1152 if (unit >= 0) { 1153 sprintf(dev->name, "eth%d", unit); 1154 netdev_boot_setup_check(dev); 1155 } else { 1156 dev->base_addr = io; 1157 dev->irq = irq; 1158 } 1159 1160#ifdef ENABLE_MVME16x_NET 1161 if (MACH_IS_MVME16x) { 1162 if (mvme16x_config & MVME16x_CONFIG_NO_ETHERNET) { 1163 printk(KERN_NOTICE "Ethernet probe disabled - chip not present\n"); 1164 err = -ENODEV; 1165 goto out; 1166 } 1167 memcpy(eth_addr, (void *) 0xfffc1f2c, 6); /* YUCK! Get addr from NOVRAM */ 1168 dev->base_addr = MVME_I596_BASE; 1169 dev->irq = (unsigned) MVME16x_IRQ_I596; 1170 goto found; 1171 } 1172#endif 1173#ifdef ENABLE_BVME6000_NET 1174 if (MACH_IS_BVME6000) { 1175 volatile unsigned char *rtc = (unsigned char *) BVME_RTC_BASE; 1176 unsigned char msr = rtc[3]; 1177 int i; 1178 1179 rtc[3] |= 0x80; 1180 for (i = 0; i < 6; i++) 1181 eth_addr[i] = rtc[i * 4 + 7]; /* Stored in RTC RAM at offset 1 */ 1182 rtc[3] = msr; 1183 dev->base_addr = BVME_I596_BASE; 1184 dev->irq = (unsigned) BVME_IRQ_I596; 1185 goto found; 1186 } 1187#endif 1188#ifdef ENABLE_APRICOT 1189 { 1190 int checksum = 0; 1191 int ioaddr = 0x300; 1192 1193 /* this is easy the ethernet interface can only be at 0x300 */ 1194 /* first check nothing is already registered here */ 1195 1196 if (!request_region(ioaddr, I596_TOTAL_SIZE, DRV_NAME)) { 1197 printk(KERN_ERR "82596: IO address 0x%04x in use\n", ioaddr); 1198 err = -EBUSY; 1199 goto out; 1200 } 1201 1202 dev->base_addr = ioaddr; 1203 1204 for (i = 0; i < 8; i++) { 1205 eth_addr[i] = inb(ioaddr + 8 + i); 1206 checksum += eth_addr[i]; 1207 } 1208 1209 /* checksum is a multiple of 0x100, got this wrong first time 1210 some machines have 0x100, some 0x200. The DOS driver doesn't 1211 even bother with the checksum. 1212 Some other boards trip the checksum.. but then appear as 1213 ether address 0. Trap these - AC */ 1214 1215 if ((checksum % 0x100) || 1216 (memcmp(eth_addr, "\x00\x00\x49", 3) != 0)) { 1217 err = -ENODEV; 1218 goto out1; 1219 } 1220 1221 dev->irq = 10; 1222 goto found; 1223 } 1224#endif 1225 err = -ENODEV; 1226 goto out; 1227 1228found: 1229 dev->mem_start = (int)__get_free_pages(GFP_ATOMIC, 0); 1230 if (!dev->mem_start) { 1231 err = -ENOMEM; 1232 goto out1; 1233 } 1234 1235 DEB(DEB_PROBE,printk(KERN_INFO "%s: 82596 at %#3lx,", dev->name, dev->base_addr)); 1236 1237 for (i = 0; i < 6; i++) 1238 DEB(DEB_PROBE,printk(" %2.2X", dev->dev_addr[i] = eth_addr[i])); 1239 1240 DEB(DEB_PROBE,printk(" IRQ %d.\n", dev->irq)); 1241 1242 DEB(DEB_PROBE,printk(KERN_INFO "%s", version)); 1243 1244 /* The 82596-specific entries in the device structure. */ 1245 dev->netdev_ops = &i596_netdev_ops; 1246 dev->watchdog_timeo = TX_TIMEOUT; 1247 1248 dev->ml_priv = (void *)(dev->mem_start); 1249 1250 lp = dev->ml_priv; 1251 DEB(DEB_INIT,printk(KERN_DEBUG "%s: lp at 0x%08lx (%zd bytes), " 1252 "lp->scb at 0x%08lx\n", 1253 dev->name, (unsigned long)lp, 1254 sizeof(struct i596_private), (unsigned long)&lp->scb)); 1255 memset((void *) lp, 0, sizeof(struct i596_private)); 1256 1257#ifdef __mc68000__ 1258 cache_push(virt_to_phys((void *)(dev->mem_start)), 4096); 1259 cache_clear(virt_to_phys((void *)(dev->mem_start)), 4096); 1260 kernel_set_cachemode((void *)(dev->mem_start), 4096, IOMAP_NOCACHE_SER); 1261#endif 1262 lp->scb.command = 0; 1263 lp->scb.cmd = I596_NULL; 1264 lp->scb.rfd = I596_NULL; 1265 spin_lock_init(&lp->lock); 1266 1267 err = register_netdev(dev); 1268 if (err) 1269 goto out2; 1270 return dev; 1271out2: 1272#ifdef __mc68000__ 1273 /* XXX This assumes default cache mode to be IOMAP_FULL_CACHING, 1274 * XXX which may be invalid (CONFIG_060_WRITETHROUGH) 1275 */ 1276 kernel_set_cachemode((void *)(dev->mem_start), 4096, 1277 IOMAP_FULL_CACHING); 1278#endif 1279 free_page ((u32)(dev->mem_start)); 1280out1: 1281#ifdef ENABLE_APRICOT 1282 release_region(dev->base_addr, I596_TOTAL_SIZE); 1283#endif 1284out: 1285 free_netdev(dev); 1286 return ERR_PTR(err); 1287} 1288 1289static irqreturn_t i596_interrupt(int irq, void *dev_id) 1290{ 1291 struct net_device *dev = dev_id; 1292 struct i596_private *lp; 1293 short ioaddr; 1294 unsigned short status, ack_cmd = 0; 1295 int handled = 0; 1296 1297#ifdef ENABLE_BVME6000_NET 1298 if (MACH_IS_BVME6000) { 1299 if (*(char *) BVME_LOCAL_IRQ_STAT & BVME_ETHERR) { 1300 i596_error(irq, dev_id); 1301 return IRQ_HANDLED; 1302 } 1303 } 1304#endif 1305 if (dev == NULL) { 1306 printk(KERN_ERR "i596_interrupt(): irq %d for unknown device.\n", irq); 1307 return IRQ_NONE; 1308 } 1309 1310 ioaddr = dev->base_addr; 1311 lp = dev->ml_priv; 1312 1313 spin_lock (&lp->lock); 1314 1315 wait_cmd(dev,lp,100,"i596 interrupt, timeout"); 1316 status = lp->scb.status; 1317 1318 DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt, IRQ %d, status %4.4x.\n", 1319 dev->name, irq, status)); 1320 1321 ack_cmd = status & 0xf000; 1322 1323 if ((status & 0x8000) || (status & 0x2000)) { 1324 struct i596_cmd *ptr; 1325 1326 handled = 1; 1327 if ((status & 0x8000)) 1328 DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt completed command.\n", dev->name)); 1329 if ((status & 0x2000)) 1330 DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt command unit inactive %x.\n", dev->name, status & 0x0700)); 1331 1332 while ((lp->cmd_head != I596_NULL) && (lp->cmd_head->status & STAT_C)) { 1333 ptr = lp->cmd_head; 1334 1335 DEB(DEB_STATUS,printk(KERN_DEBUG "cmd_head->status = %04x, ->command = %04x\n", 1336 lp->cmd_head->status, lp->cmd_head->command)); 1337 lp->cmd_head = ptr->v_next; 1338 lp->cmd_backlog--; 1339 1340 switch ((ptr->command) & 0x7) { 1341 case CmdTx: 1342 { 1343 struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr; 1344 struct sk_buff *skb = tx_cmd->skb; 1345 1346 if ((ptr->status) & STAT_OK) { 1347 DEB(DEB_TXADDR,print_eth(skb->data, "tx-done")); 1348 } else { 1349 dev->stats.tx_errors++; 1350 if ((ptr->status) & 0x0020) 1351 dev->stats.collisions++; 1352 if (!((ptr->status) & 0x0040)) 1353 dev->stats.tx_heartbeat_errors++; 1354 if ((ptr->status) & 0x0400) 1355 dev->stats.tx_carrier_errors++; 1356 if ((ptr->status) & 0x0800) 1357 dev->stats.collisions++; 1358 if ((ptr->status) & 0x1000) 1359 dev->stats.tx_aborted_errors++; 1360 } 1361 1362 dev_kfree_skb_irq(skb); 1363 1364 tx_cmd->cmd.command = 0; /* Mark free */ 1365 break; 1366 } 1367 case CmdTDR: 1368 { 1369 unsigned short status = ((struct tdr_cmd *)ptr)->status; 1370 1371 if (status & 0x8000) { 1372 DEB(DEB_TDR,printk(KERN_INFO "%s: link ok.\n", dev->name)); 1373 } else { 1374 if (status & 0x4000) 1375 printk(KERN_ERR "%s: Transceiver problem.\n", dev->name); 1376 if (status & 0x2000) 1377 printk(KERN_ERR "%s: Termination problem.\n", dev->name); 1378 if (status & 0x1000) 1379 printk(KERN_ERR "%s: Short circuit.\n", dev->name); 1380 1381 DEB(DEB_TDR,printk(KERN_INFO "%s: Time %d.\n", dev->name, status & 0x07ff)); 1382 } 1383 break; 1384 } 1385 case CmdConfigure: 1386 case CmdMulticastList: 1387 /* Zap command so set_multicast_list() knows it is free */ 1388 ptr->command = 0; 1389 break; 1390 } 1391 ptr->v_next = ptr->b_next = I596_NULL; 1392 lp->last_cmd = jiffies; 1393 } 1394 1395 ptr = lp->cmd_head; 1396 while ((ptr != I596_NULL) && (ptr != lp->cmd_tail)) { 1397 ptr->command &= 0x1fff; 1398 ptr = ptr->v_next; 1399 } 1400 1401 if ((lp->cmd_head != I596_NULL)) 1402 ack_cmd |= CUC_START; 1403 lp->scb.cmd = WSWAPcmd(virt_to_bus(&lp->cmd_head->status)); 1404 } 1405 if ((status & 0x1000) || (status & 0x4000)) { 1406 if ((status & 0x4000)) 1407 DEB(DEB_INTS,printk(KERN_DEBUG "%s: i596 interrupt received a frame.\n", dev->name)); 1408 i596_rx(dev); 1409 /* Only RX_START if stopped - RGH 07-07-96 */ 1410 if (status & 0x1000) { 1411 if (netif_running(dev)) { 1412 DEB(DEB_ERRORS,printk(KERN_ERR "%s: i596 interrupt receive unit inactive, status 0x%x\n", dev->name, status)); 1413 ack_cmd |= RX_START; 1414 dev->stats.rx_errors++; 1415 dev->stats.rx_fifo_errors++; 1416 rebuild_rx_bufs(dev); 1417 } 1418 } 1419 } 1420 wait_cmd(dev,lp,100,"i596 interrupt, timeout"); 1421 lp->scb.command = ack_cmd; 1422 1423#ifdef ENABLE_MVME16x_NET 1424 if (MACH_IS_MVME16x) { 1425 /* Ack the interrupt */ 1426 1427 volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000; 1428 1429 pcc2[0x2a] |= 0x08; 1430 } 1431#endif 1432#ifdef ENABLE_BVME6000_NET 1433 if (MACH_IS_BVME6000) { 1434 volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG; 1435 1436 *ethirq = 1; 1437 *ethirq = 3; 1438 } 1439#endif 1440#ifdef ENABLE_APRICOT 1441 (void) inb(ioaddr + 0x10); 1442 outb(4, ioaddr + 0xf); 1443#endif 1444 CA(dev); 1445 1446 DEB(DEB_INTS,printk(KERN_DEBUG "%s: exiting interrupt.\n", dev->name)); 1447 1448 spin_unlock (&lp->lock); 1449 return IRQ_RETVAL(handled); 1450} 1451 1452static int i596_close(struct net_device *dev) 1453{ 1454 struct i596_private *lp = dev->ml_priv; 1455 unsigned long flags; 1456 1457 netif_stop_queue(dev); 1458 1459 DEB(DEB_INIT,printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x.\n", 1460 dev->name, lp->scb.status)); 1461 1462 spin_lock_irqsave(&lp->lock, flags); 1463 1464 wait_cmd(dev,lp,100,"close1 timed out"); 1465 lp->scb.command = CUC_ABORT | RX_ABORT; 1466 CA(dev); 1467 1468 wait_cmd(dev,lp,100,"close2 timed out"); 1469 1470 spin_unlock_irqrestore(&lp->lock, flags); 1471 DEB(DEB_STRUCT,i596_display_data(dev)); 1472 i596_cleanup_cmd(dev,lp); 1473 1474#ifdef ENABLE_MVME16x_NET 1475 if (MACH_IS_MVME16x) { 1476 volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000; 1477 1478 /* Disable all ints */ 1479 pcc2[0x28] = 1; 1480 pcc2[0x2a] = 0x40; 1481 pcc2[0x2b] = 0x40; /* Set snooping bits now! */ 1482 } 1483#endif 1484#ifdef ENABLE_BVME6000_NET 1485 if (MACH_IS_BVME6000) { 1486 volatile unsigned char *ethirq = (unsigned char *) BVME_ETHIRQ_REG; 1487 1488 *ethirq = 1; 1489 } 1490#endif 1491 1492 free_irq(dev->irq, dev); 1493 remove_rx_bufs(dev); 1494 1495 return 0; 1496} 1497 1498/* 1499 * Set or clear the multicast filter for this adaptor. 1500 */ 1501 1502static void set_multicast_list(struct net_device *dev) 1503{ 1504 struct i596_private *lp = dev->ml_priv; 1505 int config = 0, cnt; 1506 1507 DEB(DEB_MULTI,printk(KERN_DEBUG "%s: set multicast list, %d entries, promisc %s, allmulti %s\n", 1508 dev->name, netdev_mc_count(dev), 1509 dev->flags & IFF_PROMISC ? "ON" : "OFF", 1510 dev->flags & IFF_ALLMULTI ? "ON" : "OFF")); 1511 1512 if (wait_cfg(dev, &lp->cf_cmd.cmd, 1000, "config change request timed out")) 1513 return; 1514 1515 if ((dev->flags & IFF_PROMISC) && !(lp->cf_cmd.i596_config[8] & 0x01)) { 1516 lp->cf_cmd.i596_config[8] |= 0x01; 1517 config = 1; 1518 } 1519 if (!(dev->flags & IFF_PROMISC) && (lp->cf_cmd.i596_config[8] & 0x01)) { 1520 lp->cf_cmd.i596_config[8] &= ~0x01; 1521 config = 1; 1522 } 1523 if ((dev->flags & IFF_ALLMULTI) && (lp->cf_cmd.i596_config[11] & 0x20)) { 1524 lp->cf_cmd.i596_config[11] &= ~0x20; 1525 config = 1; 1526 } 1527 if (!(dev->flags & IFF_ALLMULTI) && !(lp->cf_cmd.i596_config[11] & 0x20)) { 1528 lp->cf_cmd.i596_config[11] |= 0x20; 1529 config = 1; 1530 } 1531 if (config) { 1532 lp->cf_cmd.cmd.command = CmdConfigure; 1533 i596_add_cmd(dev, &lp->cf_cmd.cmd); 1534 } 1535 1536 cnt = netdev_mc_count(dev); 1537 if (cnt > MAX_MC_CNT) 1538 { 1539 cnt = MAX_MC_CNT; 1540 printk(KERN_ERR "%s: Only %d multicast addresses supported", 1541 dev->name, cnt); 1542 } 1543 1544 if (!netdev_mc_empty(dev)) { 1545 struct dev_mc_list *dmi; 1546 unsigned char *cp; 1547 struct mc_cmd *cmd; 1548 1549 if (wait_cfg(dev, &lp->mc_cmd.cmd, 1000, "multicast list change request timed out")) 1550 return; 1551 cmd = &lp->mc_cmd; 1552 cmd->cmd.command = CmdMulticastList; 1553 cmd->mc_cnt = cnt * ETH_ALEN; 1554 cp = cmd->mc_addrs; 1555 netdev_for_each_mc_addr(dmi, dev) { 1556 if (!cnt--) 1557 break; 1558 memcpy(cp, dmi->dmi_addr, ETH_ALEN); 1559 if (i596_debug > 1) 1560 DEB(DEB_MULTI,printk(KERN_INFO "%s: Adding address %pM\n", 1561 dev->name, cp)); 1562 cp += ETH_ALEN; 1563 } 1564 i596_add_cmd(dev, &cmd->cmd); 1565 } 1566} 1567 1568#ifdef MODULE 1569static struct net_device *dev_82596; 1570 1571#ifdef ENABLE_APRICOT 1572module_param(irq, int, 0); 1573MODULE_PARM_DESC(irq, "Apricot IRQ number"); 1574#endif 1575 1576static int debug = -1; 1577module_param(debug, int, 0); 1578MODULE_PARM_DESC(debug, "i82596 debug mask"); 1579 1580int __init init_module(void) 1581{ 1582 if (debug >= 0) 1583 i596_debug = debug; 1584 dev_82596 = i82596_probe(-1); 1585 if (IS_ERR(dev_82596)) 1586 return PTR_ERR(dev_82596); 1587 return 0; 1588} 1589 1590void __exit cleanup_module(void) 1591{ 1592 unregister_netdev(dev_82596); 1593#ifdef __mc68000__ 1594 /* XXX This assumes default cache mode to be IOMAP_FULL_CACHING, 1595 * XXX which may be invalid (CONFIG_060_WRITETHROUGH) 1596 */ 1597 1598 kernel_set_cachemode((void *)(dev_82596->mem_start), 4096, 1599 IOMAP_FULL_CACHING); 1600#endif 1601 free_page ((u32)(dev_82596->mem_start)); 1602#ifdef ENABLE_APRICOT 1603 /* If we don't do this, we can't re-insmod it later. */ 1604 release_region(dev_82596->base_addr, I596_TOTAL_SIZE); 1605#endif 1606 free_netdev(dev_82596); 1607} 1608 1609#endif /* MODULE */