Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.26-rc6 1265 lines 33 kB view raw
1/* 2 * Intel IXP4xx Ethernet driver for Linux 3 * 4 * Copyright (C) 2007 Krzysztof Halasa <khc@pm.waw.pl> 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of version 2 of the GNU General Public License 8 * as published by the Free Software Foundation. 9 * 10 * Ethernet port config (0x00 is not present on IXP42X): 11 * 12 * logical port 0x00 0x10 0x20 13 * NPE 0 (NPE-A) 1 (NPE-B) 2 (NPE-C) 14 * physical PortId 2 0 1 15 * TX queue 23 24 25 16 * RX-free queue 26 27 28 17 * TX-done queue is always 31, per-port RX and TX-ready queues are configurable 18 * 19 * 20 * Queue entries: 21 * bits 0 -> 1 - NPE ID (RX and TX-done) 22 * bits 0 -> 2 - priority (TX, per 802.1D) 23 * bits 3 -> 4 - port ID (user-set?) 24 * bits 5 -> 31 - physical descriptor address 25 */ 26 27#include <linux/delay.h> 28#include <linux/dma-mapping.h> 29#include <linux/dmapool.h> 30#include <linux/etherdevice.h> 31#include <linux/io.h> 32#include <linux/kernel.h> 33#include <linux/mii.h> 34#include <linux/platform_device.h> 35#include <asm/arch/npe.h> 36#include <asm/arch/qmgr.h> 37 38#define DEBUG_QUEUES 0 39#define DEBUG_DESC 0 40#define DEBUG_RX 0 41#define DEBUG_TX 0 42#define DEBUG_PKT_BYTES 0 43#define DEBUG_MDIO 0 44#define DEBUG_CLOSE 0 45 46#define DRV_NAME "ixp4xx_eth" 47 48#define MAX_NPES 3 49 50#define RX_DESCS 64 /* also length of all RX queues */ 51#define TX_DESCS 16 /* also length of all TX queues */ 52#define TXDONE_QUEUE_LEN 64 /* dwords */ 53 54#define POOL_ALLOC_SIZE (sizeof(struct desc) * (RX_DESCS + TX_DESCS)) 55#define REGS_SIZE 0x1000 56#define MAX_MRU 1536 /* 0x600 */ 57#define RX_BUFF_SIZE ALIGN((NET_IP_ALIGN) + MAX_MRU, 4) 58 59#define NAPI_WEIGHT 16 60#define MDIO_INTERVAL (3 * HZ) 61#define MAX_MDIO_RETRIES 100 /* microseconds, typically 30 cycles */ 62#define MAX_MII_RESET_RETRIES 100 /* mdio_read() cycles, typically 4 */ 63#define MAX_CLOSE_WAIT 1000 /* microseconds, typically 2-3 cycles */ 64 65#define NPE_ID(port_id) ((port_id) >> 4) 66#define PHYSICAL_ID(port_id) ((NPE_ID(port_id) + 2) % 3) 67#define TX_QUEUE(port_id) (NPE_ID(port_id) + 23) 68#define RXFREE_QUEUE(port_id) (NPE_ID(port_id) + 26) 69#define TXDONE_QUEUE 31 70 71/* TX Control Registers */ 72#define TX_CNTRL0_TX_EN 0x01 73#define TX_CNTRL0_HALFDUPLEX 0x02 74#define TX_CNTRL0_RETRY 0x04 75#define TX_CNTRL0_PAD_EN 0x08 76#define TX_CNTRL0_APPEND_FCS 0x10 77#define TX_CNTRL0_2DEFER 0x20 78#define TX_CNTRL0_RMII 0x40 /* reduced MII */ 79#define TX_CNTRL1_RETRIES 0x0F /* 4 bits */ 80 81/* RX Control Registers */ 82#define RX_CNTRL0_RX_EN 0x01 83#define RX_CNTRL0_PADSTRIP_EN 0x02 84#define RX_CNTRL0_SEND_FCS 0x04 85#define RX_CNTRL0_PAUSE_EN 0x08 86#define RX_CNTRL0_LOOP_EN 0x10 87#define RX_CNTRL0_ADDR_FLTR_EN 0x20 88#define RX_CNTRL0_RX_RUNT_EN 0x40 89#define RX_CNTRL0_BCAST_DIS 0x80 90#define RX_CNTRL1_DEFER_EN 0x01 91 92/* Core Control Register */ 93#define CORE_RESET 0x01 94#define CORE_RX_FIFO_FLUSH 0x02 95#define CORE_TX_FIFO_FLUSH 0x04 96#define CORE_SEND_JAM 0x08 97#define CORE_MDC_EN 0x10 /* MDIO using NPE-B ETH-0 only */ 98 99#define DEFAULT_TX_CNTRL0 (TX_CNTRL0_TX_EN | TX_CNTRL0_RETRY | \ 100 TX_CNTRL0_PAD_EN | TX_CNTRL0_APPEND_FCS | \ 101 TX_CNTRL0_2DEFER) 102#define DEFAULT_RX_CNTRL0 RX_CNTRL0_RX_EN 103#define DEFAULT_CORE_CNTRL CORE_MDC_EN 104 105 106/* NPE message codes */ 107#define NPE_GETSTATUS 0x00 108#define NPE_EDB_SETPORTADDRESS 0x01 109#define NPE_EDB_GETMACADDRESSDATABASE 0x02 110#define NPE_EDB_SETMACADDRESSSDATABASE 0x03 111#define NPE_GETSTATS 0x04 112#define NPE_RESETSTATS 0x05 113#define NPE_SETMAXFRAMELENGTHS 0x06 114#define NPE_VLAN_SETRXTAGMODE 0x07 115#define NPE_VLAN_SETDEFAULTRXVID 0x08 116#define NPE_VLAN_SETPORTVLANTABLEENTRY 0x09 117#define NPE_VLAN_SETPORTVLANTABLERANGE 0x0A 118#define NPE_VLAN_SETRXQOSENTRY 0x0B 119#define NPE_VLAN_SETPORTIDEXTRACTIONMODE 0x0C 120#define NPE_STP_SETBLOCKINGSTATE 0x0D 121#define NPE_FW_SETFIREWALLMODE 0x0E 122#define NPE_PC_SETFRAMECONTROLDURATIONID 0x0F 123#define NPE_PC_SETAPMACTABLE 0x11 124#define NPE_SETLOOPBACK_MODE 0x12 125#define NPE_PC_SETBSSIDTABLE 0x13 126#define NPE_ADDRESS_FILTER_CONFIG 0x14 127#define NPE_APPENDFCSCONFIG 0x15 128#define NPE_NOTIFY_MAC_RECOVERY_DONE 0x16 129#define NPE_MAC_RECOVERY_START 0x17 130 131 132#ifdef __ARMEB__ 133typedef struct sk_buff buffer_t; 134#define free_buffer dev_kfree_skb 135#define free_buffer_irq dev_kfree_skb_irq 136#else 137typedef void buffer_t; 138#define free_buffer kfree 139#define free_buffer_irq kfree 140#endif 141 142struct eth_regs { 143 u32 tx_control[2], __res1[2]; /* 000 */ 144 u32 rx_control[2], __res2[2]; /* 010 */ 145 u32 random_seed, __res3[3]; /* 020 */ 146 u32 partial_empty_threshold, __res4; /* 030 */ 147 u32 partial_full_threshold, __res5; /* 038 */ 148 u32 tx_start_bytes, __res6[3]; /* 040 */ 149 u32 tx_deferral, rx_deferral, __res7[2];/* 050 */ 150 u32 tx_2part_deferral[2], __res8[2]; /* 060 */ 151 u32 slot_time, __res9[3]; /* 070 */ 152 u32 mdio_command[4]; /* 080 */ 153 u32 mdio_status[4]; /* 090 */ 154 u32 mcast_mask[6], __res10[2]; /* 0A0 */ 155 u32 mcast_addr[6], __res11[2]; /* 0C0 */ 156 u32 int_clock_threshold, __res12[3]; /* 0E0 */ 157 u32 hw_addr[6], __res13[61]; /* 0F0 */ 158 u32 core_control; /* 1FC */ 159}; 160 161struct port { 162 struct resource *mem_res; 163 struct eth_regs __iomem *regs; 164 struct npe *npe; 165 struct net_device *netdev; 166 struct napi_struct napi; 167 struct net_device_stats stat; 168 struct mii_if_info mii; 169 struct delayed_work mdio_thread; 170 struct eth_plat_info *plat; 171 buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS]; 172 struct desc *desc_tab; /* coherent */ 173 u32 desc_tab_phys; 174 int id; /* logical port ID */ 175 u16 mii_bmcr; 176}; 177 178/* NPE message structure */ 179struct msg { 180#ifdef __ARMEB__ 181 u8 cmd, eth_id, byte2, byte3; 182 u8 byte4, byte5, byte6, byte7; 183#else 184 u8 byte3, byte2, eth_id, cmd; 185 u8 byte7, byte6, byte5, byte4; 186#endif 187}; 188 189/* Ethernet packet descriptor */ 190struct desc { 191 u32 next; /* pointer to next buffer, unused */ 192 193#ifdef __ARMEB__ 194 u16 buf_len; /* buffer length */ 195 u16 pkt_len; /* packet length */ 196 u32 data; /* pointer to data buffer in RAM */ 197 u8 dest_id; 198 u8 src_id; 199 u16 flags; 200 u8 qos; 201 u8 padlen; 202 u16 vlan_tci; 203#else 204 u16 pkt_len; /* packet length */ 205 u16 buf_len; /* buffer length */ 206 u32 data; /* pointer to data buffer in RAM */ 207 u16 flags; 208 u8 src_id; 209 u8 dest_id; 210 u16 vlan_tci; 211 u8 padlen; 212 u8 qos; 213#endif 214 215#ifdef __ARMEB__ 216 u8 dst_mac_0, dst_mac_1, dst_mac_2, dst_mac_3; 217 u8 dst_mac_4, dst_mac_5, src_mac_0, src_mac_1; 218 u8 src_mac_2, src_mac_3, src_mac_4, src_mac_5; 219#else 220 u8 dst_mac_3, dst_mac_2, dst_mac_1, dst_mac_0; 221 u8 src_mac_1, src_mac_0, dst_mac_5, dst_mac_4; 222 u8 src_mac_5, src_mac_4, src_mac_3, src_mac_2; 223#endif 224}; 225 226 227#define rx_desc_phys(port, n) ((port)->desc_tab_phys + \ 228 (n) * sizeof(struct desc)) 229#define rx_desc_ptr(port, n) (&(port)->desc_tab[n]) 230 231#define tx_desc_phys(port, n) ((port)->desc_tab_phys + \ 232 ((n) + RX_DESCS) * sizeof(struct desc)) 233#define tx_desc_ptr(port, n) (&(port)->desc_tab[(n) + RX_DESCS]) 234 235#ifndef __ARMEB__ 236static inline void memcpy_swab32(u32 *dest, u32 *src, int cnt) 237{ 238 int i; 239 for (i = 0; i < cnt; i++) 240 dest[i] = swab32(src[i]); 241} 242#endif 243 244static spinlock_t mdio_lock; 245static struct eth_regs __iomem *mdio_regs; /* mdio command and status only */ 246static int ports_open; 247static struct port *npe_port_tab[MAX_NPES]; 248static struct dma_pool *dma_pool; 249 250 251static u16 mdio_cmd(struct net_device *dev, int phy_id, int location, 252 int write, u16 cmd) 253{ 254 int cycles = 0; 255 256 if (__raw_readl(&mdio_regs->mdio_command[3]) & 0x80) { 257 printk(KERN_ERR "%s: MII not ready to transmit\n", dev->name); 258 return 0; 259 } 260 261 if (write) { 262 __raw_writel(cmd & 0xFF, &mdio_regs->mdio_command[0]); 263 __raw_writel(cmd >> 8, &mdio_regs->mdio_command[1]); 264 } 265 __raw_writel(((phy_id << 5) | location) & 0xFF, 266 &mdio_regs->mdio_command[2]); 267 __raw_writel((phy_id >> 3) | (write << 2) | 0x80 /* GO */, 268 &mdio_regs->mdio_command[3]); 269 270 while ((cycles < MAX_MDIO_RETRIES) && 271 (__raw_readl(&mdio_regs->mdio_command[3]) & 0x80)) { 272 udelay(1); 273 cycles++; 274 } 275 276 if (cycles == MAX_MDIO_RETRIES) { 277 printk(KERN_ERR "%s: MII write failed\n", dev->name); 278 return 0; 279 } 280 281#if DEBUG_MDIO 282 printk(KERN_DEBUG "%s: mdio_cmd() took %i cycles\n", dev->name, 283 cycles); 284#endif 285 286 if (write) 287 return 0; 288 289 if (__raw_readl(&mdio_regs->mdio_status[3]) & 0x80) { 290 printk(KERN_ERR "%s: MII read failed\n", dev->name); 291 return 0; 292 } 293 294 return (__raw_readl(&mdio_regs->mdio_status[0]) & 0xFF) | 295 (__raw_readl(&mdio_regs->mdio_status[1]) << 8); 296} 297 298static int mdio_read(struct net_device *dev, int phy_id, int location) 299{ 300 unsigned long flags; 301 u16 val; 302 303 spin_lock_irqsave(&mdio_lock, flags); 304 val = mdio_cmd(dev, phy_id, location, 0, 0); 305 spin_unlock_irqrestore(&mdio_lock, flags); 306 return val; 307} 308 309static void mdio_write(struct net_device *dev, int phy_id, int location, 310 int val) 311{ 312 unsigned long flags; 313 314 spin_lock_irqsave(&mdio_lock, flags); 315 mdio_cmd(dev, phy_id, location, 1, val); 316 spin_unlock_irqrestore(&mdio_lock, flags); 317} 318 319static void phy_reset(struct net_device *dev, int phy_id) 320{ 321 struct port *port = netdev_priv(dev); 322 int cycles = 0; 323 324 mdio_write(dev, phy_id, MII_BMCR, port->mii_bmcr | BMCR_RESET); 325 326 while (cycles < MAX_MII_RESET_RETRIES) { 327 if (!(mdio_read(dev, phy_id, MII_BMCR) & BMCR_RESET)) { 328#if DEBUG_MDIO 329 printk(KERN_DEBUG "%s: phy_reset() took %i cycles\n", 330 dev->name, cycles); 331#endif 332 return; 333 } 334 udelay(1); 335 cycles++; 336 } 337 338 printk(KERN_ERR "%s: MII reset failed\n", dev->name); 339} 340 341static void eth_set_duplex(struct port *port) 342{ 343 if (port->mii.full_duplex) 344 __raw_writel(DEFAULT_TX_CNTRL0 & ~TX_CNTRL0_HALFDUPLEX, 345 &port->regs->tx_control[0]); 346 else 347 __raw_writel(DEFAULT_TX_CNTRL0 | TX_CNTRL0_HALFDUPLEX, 348 &port->regs->tx_control[0]); 349} 350 351 352static void phy_check_media(struct port *port, int init) 353{ 354 if (mii_check_media(&port->mii, 1, init)) 355 eth_set_duplex(port); 356 if (port->mii.force_media) { /* mii_check_media() doesn't work */ 357 struct net_device *dev = port->netdev; 358 int cur_link = mii_link_ok(&port->mii); 359 int prev_link = netif_carrier_ok(dev); 360 361 if (!prev_link && cur_link) { 362 printk(KERN_INFO "%s: link up\n", dev->name); 363 netif_carrier_on(dev); 364 } else if (prev_link && !cur_link) { 365 printk(KERN_INFO "%s: link down\n", dev->name); 366 netif_carrier_off(dev); 367 } 368 } 369} 370 371 372static void mdio_thread(struct work_struct *work) 373{ 374 struct port *port = container_of(work, struct port, mdio_thread.work); 375 376 phy_check_media(port, 0); 377 schedule_delayed_work(&port->mdio_thread, MDIO_INTERVAL); 378} 379 380 381static inline void debug_pkt(struct net_device *dev, const char *func, 382 u8 *data, int len) 383{ 384#if DEBUG_PKT_BYTES 385 int i; 386 387 printk(KERN_DEBUG "%s: %s(%i) ", dev->name, func, len); 388 for (i = 0; i < len; i++) { 389 if (i >= DEBUG_PKT_BYTES) 390 break; 391 printk("%s%02X", 392 ((i == 6) || (i == 12) || (i >= 14)) ? " " : "", 393 data[i]); 394 } 395 printk("\n"); 396#endif 397} 398 399 400static inline void debug_desc(u32 phys, struct desc *desc) 401{ 402#if DEBUG_DESC 403 printk(KERN_DEBUG "%X: %X %3X %3X %08X %2X < %2X %4X %X" 404 " %X %X %02X%02X%02X%02X%02X%02X < %02X%02X%02X%02X%02X%02X\n", 405 phys, desc->next, desc->buf_len, desc->pkt_len, 406 desc->data, desc->dest_id, desc->src_id, desc->flags, 407 desc->qos, desc->padlen, desc->vlan_tci, 408 desc->dst_mac_0, desc->dst_mac_1, desc->dst_mac_2, 409 desc->dst_mac_3, desc->dst_mac_4, desc->dst_mac_5, 410 desc->src_mac_0, desc->src_mac_1, desc->src_mac_2, 411 desc->src_mac_3, desc->src_mac_4, desc->src_mac_5); 412#endif 413} 414 415static inline void debug_queue(unsigned int queue, int is_get, u32 phys) 416{ 417#if DEBUG_QUEUES 418 static struct { 419 int queue; 420 char *name; 421 } names[] = { 422 { TX_QUEUE(0x10), "TX#0 " }, 423 { TX_QUEUE(0x20), "TX#1 " }, 424 { TX_QUEUE(0x00), "TX#2 " }, 425 { RXFREE_QUEUE(0x10), "RX-free#0 " }, 426 { RXFREE_QUEUE(0x20), "RX-free#1 " }, 427 { RXFREE_QUEUE(0x00), "RX-free#2 " }, 428 { TXDONE_QUEUE, "TX-done " }, 429 }; 430 int i; 431 432 for (i = 0; i < ARRAY_SIZE(names); i++) 433 if (names[i].queue == queue) 434 break; 435 436 printk(KERN_DEBUG "Queue %i %s%s %X\n", queue, 437 i < ARRAY_SIZE(names) ? names[i].name : "", 438 is_get ? "->" : "<-", phys); 439#endif 440} 441 442static inline u32 queue_get_entry(unsigned int queue) 443{ 444 u32 phys = qmgr_get_entry(queue); 445 debug_queue(queue, 1, phys); 446 return phys; 447} 448 449static inline int queue_get_desc(unsigned int queue, struct port *port, 450 int is_tx) 451{ 452 u32 phys, tab_phys, n_desc; 453 struct desc *tab; 454 455 if (!(phys = queue_get_entry(queue))) 456 return -1; 457 458 phys &= ~0x1F; /* mask out non-address bits */ 459 tab_phys = is_tx ? tx_desc_phys(port, 0) : rx_desc_phys(port, 0); 460 tab = is_tx ? tx_desc_ptr(port, 0) : rx_desc_ptr(port, 0); 461 n_desc = (phys - tab_phys) / sizeof(struct desc); 462 BUG_ON(n_desc >= (is_tx ? TX_DESCS : RX_DESCS)); 463 debug_desc(phys, &tab[n_desc]); 464 BUG_ON(tab[n_desc].next); 465 return n_desc; 466} 467 468static inline void queue_put_desc(unsigned int queue, u32 phys, 469 struct desc *desc) 470{ 471 debug_queue(queue, 0, phys); 472 debug_desc(phys, desc); 473 BUG_ON(phys & 0x1F); 474 qmgr_put_entry(queue, phys); 475 BUG_ON(qmgr_stat_overflow(queue)); 476} 477 478 479static inline void dma_unmap_tx(struct port *port, struct desc *desc) 480{ 481#ifdef __ARMEB__ 482 dma_unmap_single(&port->netdev->dev, desc->data, 483 desc->buf_len, DMA_TO_DEVICE); 484#else 485 dma_unmap_single(&port->netdev->dev, desc->data & ~3, 486 ALIGN((desc->data & 3) + desc->buf_len, 4), 487 DMA_TO_DEVICE); 488#endif 489} 490 491 492static void eth_rx_irq(void *pdev) 493{ 494 struct net_device *dev = pdev; 495 struct port *port = netdev_priv(dev); 496 497#if DEBUG_RX 498 printk(KERN_DEBUG "%s: eth_rx_irq\n", dev->name); 499#endif 500 qmgr_disable_irq(port->plat->rxq); 501 netif_rx_schedule(dev, &port->napi); 502} 503 504static int eth_poll(struct napi_struct *napi, int budget) 505{ 506 struct port *port = container_of(napi, struct port, napi); 507 struct net_device *dev = port->netdev; 508 unsigned int rxq = port->plat->rxq, rxfreeq = RXFREE_QUEUE(port->id); 509 int received = 0; 510 511#if DEBUG_RX 512 printk(KERN_DEBUG "%s: eth_poll\n", dev->name); 513#endif 514 515 while (received < budget) { 516 struct sk_buff *skb; 517 struct desc *desc; 518 int n; 519#ifdef __ARMEB__ 520 struct sk_buff *temp; 521 u32 phys; 522#endif 523 524 if ((n = queue_get_desc(rxq, port, 0)) < 0) { 525 received = 0; /* No packet received */ 526#if DEBUG_RX 527 printk(KERN_DEBUG "%s: eth_poll netif_rx_complete\n", 528 dev->name); 529#endif 530 netif_rx_complete(dev, napi); 531 qmgr_enable_irq(rxq); 532 if (!qmgr_stat_empty(rxq) && 533 netif_rx_reschedule(dev, napi)) { 534#if DEBUG_RX 535 printk(KERN_DEBUG "%s: eth_poll" 536 " netif_rx_reschedule successed\n", 537 dev->name); 538#endif 539 qmgr_disable_irq(rxq); 540 continue; 541 } 542#if DEBUG_RX 543 printk(KERN_DEBUG "%s: eth_poll all done\n", 544 dev->name); 545#endif 546 return 0; /* all work done */ 547 } 548 549 desc = rx_desc_ptr(port, n); 550 551#ifdef __ARMEB__ 552 if ((skb = netdev_alloc_skb(dev, RX_BUFF_SIZE))) { 553 phys = dma_map_single(&dev->dev, skb->data, 554 RX_BUFF_SIZE, DMA_FROM_DEVICE); 555 if (dma_mapping_error(phys)) { 556 dev_kfree_skb(skb); 557 skb = NULL; 558 } 559 } 560#else 561 skb = netdev_alloc_skb(dev, 562 ALIGN(NET_IP_ALIGN + desc->pkt_len, 4)); 563#endif 564 565 if (!skb) { 566 port->stat.rx_dropped++; 567 /* put the desc back on RX-ready queue */ 568 desc->buf_len = MAX_MRU; 569 desc->pkt_len = 0; 570 queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc); 571 continue; 572 } 573 574 /* process received frame */ 575#ifdef __ARMEB__ 576 temp = skb; 577 skb = port->rx_buff_tab[n]; 578 dma_unmap_single(&dev->dev, desc->data - NET_IP_ALIGN, 579 RX_BUFF_SIZE, DMA_FROM_DEVICE); 580#else 581 dma_sync_single(&dev->dev, desc->data - NET_IP_ALIGN, 582 RX_BUFF_SIZE, DMA_FROM_DEVICE); 583 memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n], 584 ALIGN(NET_IP_ALIGN + desc->pkt_len, 4) / 4); 585#endif 586 skb_reserve(skb, NET_IP_ALIGN); 587 skb_put(skb, desc->pkt_len); 588 589 debug_pkt(dev, "eth_poll", skb->data, skb->len); 590 591 skb->protocol = eth_type_trans(skb, dev); 592 dev->last_rx = jiffies; 593 port->stat.rx_packets++; 594 port->stat.rx_bytes += skb->len; 595 netif_receive_skb(skb); 596 597 /* put the new buffer on RX-free queue */ 598#ifdef __ARMEB__ 599 port->rx_buff_tab[n] = temp; 600 desc->data = phys + NET_IP_ALIGN; 601#endif 602 desc->buf_len = MAX_MRU; 603 desc->pkt_len = 0; 604 queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc); 605 received++; 606 } 607 608#if DEBUG_RX 609 printk(KERN_DEBUG "eth_poll(): end, not all work done\n"); 610#endif 611 return received; /* not all work done */ 612} 613 614 615static void eth_txdone_irq(void *unused) 616{ 617 u32 phys; 618 619#if DEBUG_TX 620 printk(KERN_DEBUG DRV_NAME ": eth_txdone_irq\n"); 621#endif 622 while ((phys = queue_get_entry(TXDONE_QUEUE)) != 0) { 623 u32 npe_id, n_desc; 624 struct port *port; 625 struct desc *desc; 626 int start; 627 628 npe_id = phys & 3; 629 BUG_ON(npe_id >= MAX_NPES); 630 port = npe_port_tab[npe_id]; 631 BUG_ON(!port); 632 phys &= ~0x1F; /* mask out non-address bits */ 633 n_desc = (phys - tx_desc_phys(port, 0)) / sizeof(struct desc); 634 BUG_ON(n_desc >= TX_DESCS); 635 desc = tx_desc_ptr(port, n_desc); 636 debug_desc(phys, desc); 637 638 if (port->tx_buff_tab[n_desc]) { /* not the draining packet */ 639 port->stat.tx_packets++; 640 port->stat.tx_bytes += desc->pkt_len; 641 642 dma_unmap_tx(port, desc); 643#if DEBUG_TX 644 printk(KERN_DEBUG "%s: eth_txdone_irq free %p\n", 645 port->netdev->name, port->tx_buff_tab[n_desc]); 646#endif 647 free_buffer_irq(port->tx_buff_tab[n_desc]); 648 port->tx_buff_tab[n_desc] = NULL; 649 } 650 651 start = qmgr_stat_empty(port->plat->txreadyq); 652 queue_put_desc(port->plat->txreadyq, phys, desc); 653 if (start) { 654#if DEBUG_TX 655 printk(KERN_DEBUG "%s: eth_txdone_irq xmit ready\n", 656 port->netdev->name); 657#endif 658 netif_wake_queue(port->netdev); 659 } 660 } 661} 662 663static int eth_xmit(struct sk_buff *skb, struct net_device *dev) 664{ 665 struct port *port = netdev_priv(dev); 666 unsigned int txreadyq = port->plat->txreadyq; 667 int len, offset, bytes, n; 668 void *mem; 669 u32 phys; 670 struct desc *desc; 671 672#if DEBUG_TX 673 printk(KERN_DEBUG "%s: eth_xmit\n", dev->name); 674#endif 675 676 if (unlikely(skb->len > MAX_MRU)) { 677 dev_kfree_skb(skb); 678 port->stat.tx_errors++; 679 return NETDEV_TX_OK; 680 } 681 682 debug_pkt(dev, "eth_xmit", skb->data, skb->len); 683 684 len = skb->len; 685#ifdef __ARMEB__ 686 offset = 0; /* no need to keep alignment */ 687 bytes = len; 688 mem = skb->data; 689#else 690 offset = (int)skb->data & 3; /* keep 32-bit alignment */ 691 bytes = ALIGN(offset + len, 4); 692 if (!(mem = kmalloc(bytes, GFP_ATOMIC))) { 693 dev_kfree_skb(skb); 694 port->stat.tx_dropped++; 695 return NETDEV_TX_OK; 696 } 697 memcpy_swab32(mem, (u32 *)((int)skb->data & ~3), bytes / 4); 698 dev_kfree_skb(skb); 699#endif 700 701 phys = dma_map_single(&dev->dev, mem, bytes, DMA_TO_DEVICE); 702 if (dma_mapping_error(phys)) { 703#ifdef __ARMEB__ 704 dev_kfree_skb(skb); 705#else 706 kfree(mem); 707#endif 708 port->stat.tx_dropped++; 709 return NETDEV_TX_OK; 710 } 711 712 n = queue_get_desc(txreadyq, port, 1); 713 BUG_ON(n < 0); 714 desc = tx_desc_ptr(port, n); 715 716#ifdef __ARMEB__ 717 port->tx_buff_tab[n] = skb; 718#else 719 port->tx_buff_tab[n] = mem; 720#endif 721 desc->data = phys + offset; 722 desc->buf_len = desc->pkt_len = len; 723 724 /* NPE firmware pads short frames with zeros internally */ 725 wmb(); 726 queue_put_desc(TX_QUEUE(port->id), tx_desc_phys(port, n), desc); 727 dev->trans_start = jiffies; 728 729 if (qmgr_stat_empty(txreadyq)) { 730#if DEBUG_TX 731 printk(KERN_DEBUG "%s: eth_xmit queue full\n", dev->name); 732#endif 733 netif_stop_queue(dev); 734 /* we could miss TX ready interrupt */ 735 if (!qmgr_stat_empty(txreadyq)) { 736#if DEBUG_TX 737 printk(KERN_DEBUG "%s: eth_xmit ready again\n", 738 dev->name); 739#endif 740 netif_wake_queue(dev); 741 } 742 } 743 744#if DEBUG_TX 745 printk(KERN_DEBUG "%s: eth_xmit end\n", dev->name); 746#endif 747 return NETDEV_TX_OK; 748} 749 750 751static struct net_device_stats *eth_stats(struct net_device *dev) 752{ 753 struct port *port = netdev_priv(dev); 754 return &port->stat; 755} 756 757static void eth_set_mcast_list(struct net_device *dev) 758{ 759 struct port *port = netdev_priv(dev); 760 struct dev_mc_list *mclist = dev->mc_list; 761 u8 diffs[ETH_ALEN], *addr; 762 int cnt = dev->mc_count, i; 763 764 if ((dev->flags & IFF_PROMISC) || !mclist || !cnt) { 765 __raw_writel(DEFAULT_RX_CNTRL0 & ~RX_CNTRL0_ADDR_FLTR_EN, 766 &port->regs->rx_control[0]); 767 return; 768 } 769 770 memset(diffs, 0, ETH_ALEN); 771 addr = mclist->dmi_addr; /* first MAC address */ 772 773 while (--cnt && (mclist = mclist->next)) 774 for (i = 0; i < ETH_ALEN; i++) 775 diffs[i] |= addr[i] ^ mclist->dmi_addr[i]; 776 777 for (i = 0; i < ETH_ALEN; i++) { 778 __raw_writel(addr[i], &port->regs->mcast_addr[i]); 779 __raw_writel(~diffs[i], &port->regs->mcast_mask[i]); 780 } 781 782 __raw_writel(DEFAULT_RX_CNTRL0 | RX_CNTRL0_ADDR_FLTR_EN, 783 &port->regs->rx_control[0]); 784} 785 786 787static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd) 788{ 789 struct port *port = netdev_priv(dev); 790 unsigned int duplex_chg; 791 int err; 792 793 if (!netif_running(dev)) 794 return -EINVAL; 795 err = generic_mii_ioctl(&port->mii, if_mii(req), cmd, &duplex_chg); 796 if (duplex_chg) 797 eth_set_duplex(port); 798 return err; 799} 800 801 802static int request_queues(struct port *port) 803{ 804 int err; 805 806 err = qmgr_request_queue(RXFREE_QUEUE(port->id), RX_DESCS, 0, 0); 807 if (err) 808 return err; 809 810 err = qmgr_request_queue(port->plat->rxq, RX_DESCS, 0, 0); 811 if (err) 812 goto rel_rxfree; 813 814 err = qmgr_request_queue(TX_QUEUE(port->id), TX_DESCS, 0, 0); 815 if (err) 816 goto rel_rx; 817 818 err = qmgr_request_queue(port->plat->txreadyq, TX_DESCS, 0, 0); 819 if (err) 820 goto rel_tx; 821 822 /* TX-done queue handles skbs sent out by the NPEs */ 823 if (!ports_open) { 824 err = qmgr_request_queue(TXDONE_QUEUE, TXDONE_QUEUE_LEN, 0, 0); 825 if (err) 826 goto rel_txready; 827 } 828 return 0; 829 830rel_txready: 831 qmgr_release_queue(port->plat->txreadyq); 832rel_tx: 833 qmgr_release_queue(TX_QUEUE(port->id)); 834rel_rx: 835 qmgr_release_queue(port->plat->rxq); 836rel_rxfree: 837 qmgr_release_queue(RXFREE_QUEUE(port->id)); 838 printk(KERN_DEBUG "%s: unable to request hardware queues\n", 839 port->netdev->name); 840 return err; 841} 842 843static void release_queues(struct port *port) 844{ 845 qmgr_release_queue(RXFREE_QUEUE(port->id)); 846 qmgr_release_queue(port->plat->rxq); 847 qmgr_release_queue(TX_QUEUE(port->id)); 848 qmgr_release_queue(port->plat->txreadyq); 849 850 if (!ports_open) 851 qmgr_release_queue(TXDONE_QUEUE); 852} 853 854static int init_queues(struct port *port) 855{ 856 int i; 857 858 if (!ports_open) 859 if (!(dma_pool = dma_pool_create(DRV_NAME, NULL, 860 POOL_ALLOC_SIZE, 32, 0))) 861 return -ENOMEM; 862 863 if (!(port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL, 864 &port->desc_tab_phys))) 865 return -ENOMEM; 866 memset(port->desc_tab, 0, POOL_ALLOC_SIZE); 867 memset(port->rx_buff_tab, 0, sizeof(port->rx_buff_tab)); /* tables */ 868 memset(port->tx_buff_tab, 0, sizeof(port->tx_buff_tab)); 869 870 /* Setup RX buffers */ 871 for (i = 0; i < RX_DESCS; i++) { 872 struct desc *desc = rx_desc_ptr(port, i); 873 buffer_t *buff; /* skb or kmalloc()ated memory */ 874 void *data; 875#ifdef __ARMEB__ 876 if (!(buff = netdev_alloc_skb(port->netdev, RX_BUFF_SIZE))) 877 return -ENOMEM; 878 data = buff->data; 879#else 880 if (!(buff = kmalloc(RX_BUFF_SIZE, GFP_KERNEL))) 881 return -ENOMEM; 882 data = buff; 883#endif 884 desc->buf_len = MAX_MRU; 885 desc->data = dma_map_single(&port->netdev->dev, data, 886 RX_BUFF_SIZE, DMA_FROM_DEVICE); 887 if (dma_mapping_error(desc->data)) { 888 free_buffer(buff); 889 return -EIO; 890 } 891 desc->data += NET_IP_ALIGN; 892 port->rx_buff_tab[i] = buff; 893 } 894 895 return 0; 896} 897 898static void destroy_queues(struct port *port) 899{ 900 int i; 901 902 if (port->desc_tab) { 903 for (i = 0; i < RX_DESCS; i++) { 904 struct desc *desc = rx_desc_ptr(port, i); 905 buffer_t *buff = port->rx_buff_tab[i]; 906 if (buff) { 907 dma_unmap_single(&port->netdev->dev, 908 desc->data - NET_IP_ALIGN, 909 RX_BUFF_SIZE, DMA_FROM_DEVICE); 910 free_buffer(buff); 911 } 912 } 913 for (i = 0; i < TX_DESCS; i++) { 914 struct desc *desc = tx_desc_ptr(port, i); 915 buffer_t *buff = port->tx_buff_tab[i]; 916 if (buff) { 917 dma_unmap_tx(port, desc); 918 free_buffer(buff); 919 } 920 } 921 dma_pool_free(dma_pool, port->desc_tab, port->desc_tab_phys); 922 port->desc_tab = NULL; 923 } 924 925 if (!ports_open && dma_pool) { 926 dma_pool_destroy(dma_pool); 927 dma_pool = NULL; 928 } 929} 930 931static int eth_open(struct net_device *dev) 932{ 933 struct port *port = netdev_priv(dev); 934 struct npe *npe = port->npe; 935 struct msg msg; 936 int i, err; 937 938 if (!npe_running(npe)) { 939 err = npe_load_firmware(npe, npe_name(npe), &dev->dev); 940 if (err) 941 return err; 942 943 if (npe_recv_message(npe, &msg, "ETH_GET_STATUS")) { 944 printk(KERN_ERR "%s: %s not responding\n", dev->name, 945 npe_name(npe)); 946 return -EIO; 947 } 948 } 949 950 mdio_write(dev, port->plat->phy, MII_BMCR, port->mii_bmcr); 951 952 memset(&msg, 0, sizeof(msg)); 953 msg.cmd = NPE_VLAN_SETRXQOSENTRY; 954 msg.eth_id = port->id; 955 msg.byte5 = port->plat->rxq | 0x80; 956 msg.byte7 = port->plat->rxq << 4; 957 for (i = 0; i < 8; i++) { 958 msg.byte3 = i; 959 if (npe_send_recv_message(port->npe, &msg, "ETH_SET_RXQ")) 960 return -EIO; 961 } 962 963 msg.cmd = NPE_EDB_SETPORTADDRESS; 964 msg.eth_id = PHYSICAL_ID(port->id); 965 msg.byte2 = dev->dev_addr[0]; 966 msg.byte3 = dev->dev_addr[1]; 967 msg.byte4 = dev->dev_addr[2]; 968 msg.byte5 = dev->dev_addr[3]; 969 msg.byte6 = dev->dev_addr[4]; 970 msg.byte7 = dev->dev_addr[5]; 971 if (npe_send_recv_message(port->npe, &msg, "ETH_SET_MAC")) 972 return -EIO; 973 974 memset(&msg, 0, sizeof(msg)); 975 msg.cmd = NPE_FW_SETFIREWALLMODE; 976 msg.eth_id = port->id; 977 if (npe_send_recv_message(port->npe, &msg, "ETH_SET_FIREWALL_MODE")) 978 return -EIO; 979 980 if ((err = request_queues(port)) != 0) 981 return err; 982 983 if ((err = init_queues(port)) != 0) { 984 destroy_queues(port); 985 release_queues(port); 986 return err; 987 } 988 989 for (i = 0; i < ETH_ALEN; i++) 990 __raw_writel(dev->dev_addr[i], &port->regs->hw_addr[i]); 991 __raw_writel(0x08, &port->regs->random_seed); 992 __raw_writel(0x12, &port->regs->partial_empty_threshold); 993 __raw_writel(0x30, &port->regs->partial_full_threshold); 994 __raw_writel(0x08, &port->regs->tx_start_bytes); 995 __raw_writel(0x15, &port->regs->tx_deferral); 996 __raw_writel(0x08, &port->regs->tx_2part_deferral[0]); 997 __raw_writel(0x07, &port->regs->tx_2part_deferral[1]); 998 __raw_writel(0x80, &port->regs->slot_time); 999 __raw_writel(0x01, &port->regs->int_clock_threshold); 1000 1001 /* Populate queues with buffers, no failure after this point */ 1002 for (i = 0; i < TX_DESCS; i++) 1003 queue_put_desc(port->plat->txreadyq, 1004 tx_desc_phys(port, i), tx_desc_ptr(port, i)); 1005 1006 for (i = 0; i < RX_DESCS; i++) 1007 queue_put_desc(RXFREE_QUEUE(port->id), 1008 rx_desc_phys(port, i), rx_desc_ptr(port, i)); 1009 1010 __raw_writel(TX_CNTRL1_RETRIES, &port->regs->tx_control[1]); 1011 __raw_writel(DEFAULT_TX_CNTRL0, &port->regs->tx_control[0]); 1012 __raw_writel(0, &port->regs->rx_control[1]); 1013 __raw_writel(DEFAULT_RX_CNTRL0, &port->regs->rx_control[0]); 1014 1015 napi_enable(&port->napi); 1016 phy_check_media(port, 1); 1017 eth_set_mcast_list(dev); 1018 netif_start_queue(dev); 1019 schedule_delayed_work(&port->mdio_thread, MDIO_INTERVAL); 1020 1021 qmgr_set_irq(port->plat->rxq, QUEUE_IRQ_SRC_NOT_EMPTY, 1022 eth_rx_irq, dev); 1023 if (!ports_open) { 1024 qmgr_set_irq(TXDONE_QUEUE, QUEUE_IRQ_SRC_NOT_EMPTY, 1025 eth_txdone_irq, NULL); 1026 qmgr_enable_irq(TXDONE_QUEUE); 1027 } 1028 ports_open++; 1029 /* we may already have RX data, enables IRQ */ 1030 netif_rx_schedule(dev, &port->napi); 1031 return 0; 1032} 1033 1034static int eth_close(struct net_device *dev) 1035{ 1036 struct port *port = netdev_priv(dev); 1037 struct msg msg; 1038 int buffs = RX_DESCS; /* allocated RX buffers */ 1039 int i; 1040 1041 ports_open--; 1042 qmgr_disable_irq(port->plat->rxq); 1043 napi_disable(&port->napi); 1044 netif_stop_queue(dev); 1045 1046 while (queue_get_desc(RXFREE_QUEUE(port->id), port, 0) >= 0) 1047 buffs--; 1048 1049 memset(&msg, 0, sizeof(msg)); 1050 msg.cmd = NPE_SETLOOPBACK_MODE; 1051 msg.eth_id = port->id; 1052 msg.byte3 = 1; 1053 if (npe_send_recv_message(port->npe, &msg, "ETH_ENABLE_LOOPBACK")) 1054 printk(KERN_CRIT "%s: unable to enable loopback\n", dev->name); 1055 1056 i = 0; 1057 do { /* drain RX buffers */ 1058 while (queue_get_desc(port->plat->rxq, port, 0) >= 0) 1059 buffs--; 1060 if (!buffs) 1061 break; 1062 if (qmgr_stat_empty(TX_QUEUE(port->id))) { 1063 /* we have to inject some packet */ 1064 struct desc *desc; 1065 u32 phys; 1066 int n = queue_get_desc(port->plat->txreadyq, port, 1); 1067 BUG_ON(n < 0); 1068 desc = tx_desc_ptr(port, n); 1069 phys = tx_desc_phys(port, n); 1070 desc->buf_len = desc->pkt_len = 1; 1071 wmb(); 1072 queue_put_desc(TX_QUEUE(port->id), phys, desc); 1073 } 1074 udelay(1); 1075 } while (++i < MAX_CLOSE_WAIT); 1076 1077 if (buffs) 1078 printk(KERN_CRIT "%s: unable to drain RX queue, %i buffer(s)" 1079 " left in NPE\n", dev->name, buffs); 1080#if DEBUG_CLOSE 1081 if (!buffs) 1082 printk(KERN_DEBUG "Draining RX queue took %i cycles\n", i); 1083#endif 1084 1085 buffs = TX_DESCS; 1086 while (queue_get_desc(TX_QUEUE(port->id), port, 1) >= 0) 1087 buffs--; /* cancel TX */ 1088 1089 i = 0; 1090 do { 1091 while (queue_get_desc(port->plat->txreadyq, port, 1) >= 0) 1092 buffs--; 1093 if (!buffs) 1094 break; 1095 } while (++i < MAX_CLOSE_WAIT); 1096 1097 if (buffs) 1098 printk(KERN_CRIT "%s: unable to drain TX queue, %i buffer(s) " 1099 "left in NPE\n", dev->name, buffs); 1100#if DEBUG_CLOSE 1101 if (!buffs) 1102 printk(KERN_DEBUG "Draining TX queues took %i cycles\n", i); 1103#endif 1104 1105 msg.byte3 = 0; 1106 if (npe_send_recv_message(port->npe, &msg, "ETH_DISABLE_LOOPBACK")) 1107 printk(KERN_CRIT "%s: unable to disable loopback\n", 1108 dev->name); 1109 1110 port->mii_bmcr = mdio_read(dev, port->plat->phy, MII_BMCR) & 1111 ~(BMCR_RESET | BMCR_PDOWN); /* may have been altered */ 1112 mdio_write(dev, port->plat->phy, MII_BMCR, 1113 port->mii_bmcr | BMCR_PDOWN); 1114 1115 if (!ports_open) 1116 qmgr_disable_irq(TXDONE_QUEUE); 1117 cancel_rearming_delayed_work(&port->mdio_thread); 1118 destroy_queues(port); 1119 release_queues(port); 1120 return 0; 1121} 1122 1123static int __devinit eth_init_one(struct platform_device *pdev) 1124{ 1125 struct port *port; 1126 struct net_device *dev; 1127 struct eth_plat_info *plat = pdev->dev.platform_data; 1128 u32 regs_phys; 1129 int err; 1130 1131 if (!(dev = alloc_etherdev(sizeof(struct port)))) 1132 return -ENOMEM; 1133 1134 SET_NETDEV_DEV(dev, &pdev->dev); 1135 port = netdev_priv(dev); 1136 port->netdev = dev; 1137 port->id = pdev->id; 1138 1139 switch (port->id) { 1140 case IXP4XX_ETH_NPEA: 1141 port->regs = (struct eth_regs __iomem *)IXP4XX_EthA_BASE_VIRT; 1142 regs_phys = IXP4XX_EthA_BASE_PHYS; 1143 break; 1144 case IXP4XX_ETH_NPEB: 1145 port->regs = (struct eth_regs __iomem *)IXP4XX_EthB_BASE_VIRT; 1146 regs_phys = IXP4XX_EthB_BASE_PHYS; 1147 break; 1148 case IXP4XX_ETH_NPEC: 1149 port->regs = (struct eth_regs __iomem *)IXP4XX_EthC_BASE_VIRT; 1150 regs_phys = IXP4XX_EthC_BASE_PHYS; 1151 break; 1152 default: 1153 err = -ENOSYS; 1154 goto err_free; 1155 } 1156 1157 dev->open = eth_open; 1158 dev->hard_start_xmit = eth_xmit; 1159 dev->stop = eth_close; 1160 dev->get_stats = eth_stats; 1161 dev->do_ioctl = eth_ioctl; 1162 dev->set_multicast_list = eth_set_mcast_list; 1163 dev->tx_queue_len = 100; 1164 1165 netif_napi_add(dev, &port->napi, eth_poll, NAPI_WEIGHT); 1166 1167 if (!(port->npe = npe_request(NPE_ID(port->id)))) { 1168 err = -EIO; 1169 goto err_free; 1170 } 1171 1172 if (register_netdev(dev)) { 1173 err = -EIO; 1174 goto err_npe_rel; 1175 } 1176 1177 port->mem_res = request_mem_region(regs_phys, REGS_SIZE, dev->name); 1178 if (!port->mem_res) { 1179 err = -EBUSY; 1180 goto err_unreg; 1181 } 1182 1183 port->plat = plat; 1184 npe_port_tab[NPE_ID(port->id)] = port; 1185 memcpy(dev->dev_addr, plat->hwaddr, ETH_ALEN); 1186 1187 platform_set_drvdata(pdev, dev); 1188 1189 __raw_writel(DEFAULT_CORE_CNTRL | CORE_RESET, 1190 &port->regs->core_control); 1191 udelay(50); 1192 __raw_writel(DEFAULT_CORE_CNTRL, &port->regs->core_control); 1193 udelay(50); 1194 1195 port->mii.dev = dev; 1196 port->mii.mdio_read = mdio_read; 1197 port->mii.mdio_write = mdio_write; 1198 port->mii.phy_id = plat->phy; 1199 port->mii.phy_id_mask = 0x1F; 1200 port->mii.reg_num_mask = 0x1F; 1201 1202 printk(KERN_INFO "%s: MII PHY %i on %s\n", dev->name, plat->phy, 1203 npe_name(port->npe)); 1204 1205 phy_reset(dev, plat->phy); 1206 port->mii_bmcr = mdio_read(dev, plat->phy, MII_BMCR) & 1207 ~(BMCR_RESET | BMCR_PDOWN); 1208 mdio_write(dev, plat->phy, MII_BMCR, port->mii_bmcr | BMCR_PDOWN); 1209 1210 INIT_DELAYED_WORK(&port->mdio_thread, mdio_thread); 1211 return 0; 1212 1213err_unreg: 1214 unregister_netdev(dev); 1215err_npe_rel: 1216 npe_release(port->npe); 1217err_free: 1218 free_netdev(dev); 1219 return err; 1220} 1221 1222static int __devexit eth_remove_one(struct platform_device *pdev) 1223{ 1224 struct net_device *dev = platform_get_drvdata(pdev); 1225 struct port *port = netdev_priv(dev); 1226 1227 unregister_netdev(dev); 1228 npe_port_tab[NPE_ID(port->id)] = NULL; 1229 platform_set_drvdata(pdev, NULL); 1230 npe_release(port->npe); 1231 release_resource(port->mem_res); 1232 free_netdev(dev); 1233 return 0; 1234} 1235 1236static struct platform_driver drv = { 1237 .driver.name = DRV_NAME, 1238 .probe = eth_init_one, 1239 .remove = eth_remove_one, 1240}; 1241 1242static int __init eth_init_module(void) 1243{ 1244 if (!(ixp4xx_read_feature_bits() & IXP4XX_FEATURE_NPEB_ETH0)) 1245 return -ENOSYS; 1246 1247 /* All MII PHY accesses use NPE-B Ethernet registers */ 1248 spin_lock_init(&mdio_lock); 1249 mdio_regs = (struct eth_regs __iomem *)IXP4XX_EthB_BASE_VIRT; 1250 __raw_writel(DEFAULT_CORE_CNTRL, &mdio_regs->core_control); 1251 1252 return platform_driver_register(&drv); 1253} 1254 1255static void __exit eth_cleanup_module(void) 1256{ 1257 platform_driver_unregister(&drv); 1258} 1259 1260MODULE_AUTHOR("Krzysztof Halasa"); 1261MODULE_DESCRIPTION("Intel IXP4xx Ethernet driver"); 1262MODULE_LICENSE("GPL v2"); 1263MODULE_ALIAS("platform:ixp4xx_eth"); 1264module_init(eth_init_module); 1265module_exit(eth_cleanup_module);