Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.29-rc7 1329 lines 34 kB view raw
1/* 2 * SuperH Ethernet device driver 3 * 4 * Copyright (C) 2006-2008 Nobuhiro Iwamatsu 5 * Copyright (C) 2008 Renesas Solutions Corp. 6 * 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms and conditions of the GNU General Public License, 9 * version 2, as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope it will be useful, but WITHOUT 12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 * more details. 15 * You should have received a copy of the GNU General Public License along with 16 * this program; if not, write to the Free Software Foundation, Inc., 17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * The full GNU General Public License is included in this distribution in 20 * the file called "COPYING". 21 */ 22 23#include <linux/init.h> 24#include <linux/dma-mapping.h> 25#include <linux/etherdevice.h> 26#include <linux/delay.h> 27#include <linux/platform_device.h> 28#include <linux/mdio-bitbang.h> 29#include <linux/netdevice.h> 30#include <linux/phy.h> 31#include <linux/cache.h> 32#include <linux/io.h> 33 34#include "sh_eth.h" 35 36/* CPU <-> EDMAC endian convert */ 37static inline __u32 cpu_to_edmac(struct sh_eth_private *mdp, u32 x) 38{ 39 switch (mdp->edmac_endian) { 40 case EDMAC_LITTLE_ENDIAN: 41 return cpu_to_le32(x); 42 case EDMAC_BIG_ENDIAN: 43 return cpu_to_be32(x); 44 } 45 return x; 46} 47 48static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x) 49{ 50 switch (mdp->edmac_endian) { 51 case EDMAC_LITTLE_ENDIAN: 52 return le32_to_cpu(x); 53 case EDMAC_BIG_ENDIAN: 54 return be32_to_cpu(x); 55 } 56 return x; 57} 58 59/* 60 * Program the hardware MAC address from dev->dev_addr. 61 */ 62static void update_mac_address(struct net_device *ndev) 63{ 64 u32 ioaddr = ndev->base_addr; 65 66 ctrl_outl((ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) | 67 (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), 68 ioaddr + MAHR); 69 ctrl_outl((ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), 70 ioaddr + MALR); 71} 72 73/* 74 * Get MAC address from SuperH MAC address register 75 * 76 * SuperH's Ethernet device doesn't have 'ROM' to MAC address. 77 * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g). 78 * When you want use this device, you must set MAC address in bootloader. 79 * 80 */ 81static void read_mac_address(struct net_device *ndev) 82{ 83 u32 ioaddr = ndev->base_addr; 84 85 ndev->dev_addr[0] = (ctrl_inl(ioaddr + MAHR) >> 24); 86 ndev->dev_addr[1] = (ctrl_inl(ioaddr + MAHR) >> 16) & 0xFF; 87 ndev->dev_addr[2] = (ctrl_inl(ioaddr + MAHR) >> 8) & 0xFF; 88 ndev->dev_addr[3] = (ctrl_inl(ioaddr + MAHR) & 0xFF); 89 ndev->dev_addr[4] = (ctrl_inl(ioaddr + MALR) >> 8) & 0xFF; 90 ndev->dev_addr[5] = (ctrl_inl(ioaddr + MALR) & 0xFF); 91} 92 93struct bb_info { 94 struct mdiobb_ctrl ctrl; 95 u32 addr; 96 u32 mmd_msk;/* MMD */ 97 u32 mdo_msk; 98 u32 mdi_msk; 99 u32 mdc_msk; 100}; 101 102/* PHY bit set */ 103static void bb_set(u32 addr, u32 msk) 104{ 105 ctrl_outl(ctrl_inl(addr) | msk, addr); 106} 107 108/* PHY bit clear */ 109static void bb_clr(u32 addr, u32 msk) 110{ 111 ctrl_outl((ctrl_inl(addr) & ~msk), addr); 112} 113 114/* PHY bit read */ 115static int bb_read(u32 addr, u32 msk) 116{ 117 return (ctrl_inl(addr) & msk) != 0; 118} 119 120/* Data I/O pin control */ 121static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit) 122{ 123 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); 124 if (bit) 125 bb_set(bitbang->addr, bitbang->mmd_msk); 126 else 127 bb_clr(bitbang->addr, bitbang->mmd_msk); 128} 129 130/* Set bit data*/ 131static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit) 132{ 133 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); 134 135 if (bit) 136 bb_set(bitbang->addr, bitbang->mdo_msk); 137 else 138 bb_clr(bitbang->addr, bitbang->mdo_msk); 139} 140 141/* Get bit data*/ 142static int sh_get_mdio(struct mdiobb_ctrl *ctrl) 143{ 144 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); 145 return bb_read(bitbang->addr, bitbang->mdi_msk); 146} 147 148/* MDC pin control */ 149static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit) 150{ 151 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl); 152 153 if (bit) 154 bb_set(bitbang->addr, bitbang->mdc_msk); 155 else 156 bb_clr(bitbang->addr, bitbang->mdc_msk); 157} 158 159/* mdio bus control struct */ 160static struct mdiobb_ops bb_ops = { 161 .owner = THIS_MODULE, 162 .set_mdc = sh_mdc_ctrl, 163 .set_mdio_dir = sh_mmd_ctrl, 164 .set_mdio_data = sh_set_mdio, 165 .get_mdio_data = sh_get_mdio, 166}; 167 168/* Chip Reset */ 169static void sh_eth_reset(struct net_device *ndev) 170{ 171 u32 ioaddr = ndev->base_addr; 172 173#if defined(CONFIG_CPU_SUBTYPE_SH7763) 174 int cnt = 100; 175 176 ctrl_outl(EDSR_ENALL, ioaddr + EDSR); 177 ctrl_outl(ctrl_inl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR); 178 while (cnt > 0) { 179 if (!(ctrl_inl(ioaddr + EDMR) & 0x3)) 180 break; 181 mdelay(1); 182 cnt--; 183 } 184 if (cnt < 0) 185 printk(KERN_ERR "Device reset fail\n"); 186 187 /* Table Init */ 188 ctrl_outl(0x0, ioaddr + TDLAR); 189 ctrl_outl(0x0, ioaddr + TDFAR); 190 ctrl_outl(0x0, ioaddr + TDFXR); 191 ctrl_outl(0x0, ioaddr + TDFFR); 192 ctrl_outl(0x0, ioaddr + RDLAR); 193 ctrl_outl(0x0, ioaddr + RDFAR); 194 ctrl_outl(0x0, ioaddr + RDFXR); 195 ctrl_outl(0x0, ioaddr + RDFFR); 196#else 197 ctrl_outl(ctrl_inl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR); 198 mdelay(3); 199 ctrl_outl(ctrl_inl(ioaddr + EDMR) & ~EDMR_SRST, ioaddr + EDMR); 200#endif 201} 202 203/* free skb and descriptor buffer */ 204static void sh_eth_ring_free(struct net_device *ndev) 205{ 206 struct sh_eth_private *mdp = netdev_priv(ndev); 207 int i; 208 209 /* Free Rx skb ringbuffer */ 210 if (mdp->rx_skbuff) { 211 for (i = 0; i < RX_RING_SIZE; i++) { 212 if (mdp->rx_skbuff[i]) 213 dev_kfree_skb(mdp->rx_skbuff[i]); 214 } 215 } 216 kfree(mdp->rx_skbuff); 217 218 /* Free Tx skb ringbuffer */ 219 if (mdp->tx_skbuff) { 220 for (i = 0; i < TX_RING_SIZE; i++) { 221 if (mdp->tx_skbuff[i]) 222 dev_kfree_skb(mdp->tx_skbuff[i]); 223 } 224 } 225 kfree(mdp->tx_skbuff); 226} 227 228/* format skb and descriptor buffer */ 229static void sh_eth_ring_format(struct net_device *ndev) 230{ 231 u32 ioaddr = ndev->base_addr, reserve = 0; 232 struct sh_eth_private *mdp = netdev_priv(ndev); 233 int i; 234 struct sk_buff *skb; 235 struct sh_eth_rxdesc *rxdesc = NULL; 236 struct sh_eth_txdesc *txdesc = NULL; 237 int rx_ringsize = sizeof(*rxdesc) * RX_RING_SIZE; 238 int tx_ringsize = sizeof(*txdesc) * TX_RING_SIZE; 239 240 mdp->cur_rx = mdp->cur_tx = 0; 241 mdp->dirty_rx = mdp->dirty_tx = 0; 242 243 memset(mdp->rx_ring, 0, rx_ringsize); 244 245 /* build Rx ring buffer */ 246 for (i = 0; i < RX_RING_SIZE; i++) { 247 /* skb */ 248 mdp->rx_skbuff[i] = NULL; 249 skb = dev_alloc_skb(mdp->rx_buf_sz); 250 mdp->rx_skbuff[i] = skb; 251 if (skb == NULL) 252 break; 253 skb->dev = ndev; /* Mark as being used by this device. */ 254#if defined(CONFIG_CPU_SUBTYPE_SH7763) 255 reserve = SH7763_SKB_ALIGN 256 - ((uint32_t)skb->data & (SH7763_SKB_ALIGN-1)); 257 if (reserve) 258 skb_reserve(skb, reserve); 259#else 260 skb_reserve(skb, RX_OFFSET); 261#endif 262 /* RX descriptor */ 263 rxdesc = &mdp->rx_ring[i]; 264 rxdesc->addr = (u32)skb->data & ~0x3UL; 265 rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP); 266 267 /* The size of the buffer is 16 byte boundary. */ 268 rxdesc->buffer_length = (mdp->rx_buf_sz + 16) & ~0x0F; 269 /* Rx descriptor address set */ 270 if (i == 0) { 271 ctrl_outl((u32)rxdesc, ioaddr + RDLAR); 272#if defined(CONFIG_CPU_SUBTYPE_SH7763) 273 ctrl_outl((u32)rxdesc, ioaddr + RDFAR); 274#endif 275 } 276 } 277 278 /* Rx descriptor address set */ 279#if defined(CONFIG_CPU_SUBTYPE_SH7763) 280 ctrl_outl((u32)rxdesc, ioaddr + RDFXR); 281 ctrl_outl(0x1, ioaddr + RDFFR); 282#endif 283 284 mdp->dirty_rx = (u32) (i - RX_RING_SIZE); 285 286 /* Mark the last entry as wrapping the ring. */ 287 rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL); 288 289 memset(mdp->tx_ring, 0, tx_ringsize); 290 291 /* build Tx ring buffer */ 292 for (i = 0; i < TX_RING_SIZE; i++) { 293 mdp->tx_skbuff[i] = NULL; 294 txdesc = &mdp->tx_ring[i]; 295 txdesc->status = cpu_to_edmac(mdp, TD_TFP); 296 txdesc->buffer_length = 0; 297 if (i == 0) { 298 /* Tx descriptor address set */ 299 ctrl_outl((u32)txdesc, ioaddr + TDLAR); 300#if defined(CONFIG_CPU_SUBTYPE_SH7763) 301 ctrl_outl((u32)txdesc, ioaddr + TDFAR); 302#endif 303 } 304 } 305 306 /* Tx descriptor address set */ 307#if defined(CONFIG_CPU_SUBTYPE_SH7763) 308 ctrl_outl((u32)txdesc, ioaddr + TDFXR); 309 ctrl_outl(0x1, ioaddr + TDFFR); 310#endif 311 312 txdesc->status |= cpu_to_edmac(mdp, TD_TDLE); 313} 314 315/* Get skb and descriptor buffer */ 316static int sh_eth_ring_init(struct net_device *ndev) 317{ 318 struct sh_eth_private *mdp = netdev_priv(ndev); 319 int rx_ringsize, tx_ringsize, ret = 0; 320 321 /* 322 * +26 gets the maximum ethernet encapsulation, +7 & ~7 because the 323 * card needs room to do 8 byte alignment, +2 so we can reserve 324 * the first 2 bytes, and +16 gets room for the status word from the 325 * card. 326 */ 327 mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ : 328 (((ndev->mtu + 26 + 7) & ~7) + 2 + 16)); 329 330 /* Allocate RX and TX skb rings */ 331 mdp->rx_skbuff = kmalloc(sizeof(*mdp->rx_skbuff) * RX_RING_SIZE, 332 GFP_KERNEL); 333 if (!mdp->rx_skbuff) { 334 printk(KERN_ERR "%s: Cannot allocate Rx skb\n", ndev->name); 335 ret = -ENOMEM; 336 return ret; 337 } 338 339 mdp->tx_skbuff = kmalloc(sizeof(*mdp->tx_skbuff) * TX_RING_SIZE, 340 GFP_KERNEL); 341 if (!mdp->tx_skbuff) { 342 printk(KERN_ERR "%s: Cannot allocate Tx skb\n", ndev->name); 343 ret = -ENOMEM; 344 goto skb_ring_free; 345 } 346 347 /* Allocate all Rx descriptors. */ 348 rx_ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE; 349 mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma, 350 GFP_KERNEL); 351 352 if (!mdp->rx_ring) { 353 printk(KERN_ERR "%s: Cannot allocate Rx Ring (size %d bytes)\n", 354 ndev->name, rx_ringsize); 355 ret = -ENOMEM; 356 goto desc_ring_free; 357 } 358 359 mdp->dirty_rx = 0; 360 361 /* Allocate all Tx descriptors. */ 362 tx_ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE; 363 mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma, 364 GFP_KERNEL); 365 if (!mdp->tx_ring) { 366 printk(KERN_ERR "%s: Cannot allocate Tx Ring (size %d bytes)\n", 367 ndev->name, tx_ringsize); 368 ret = -ENOMEM; 369 goto desc_ring_free; 370 } 371 return ret; 372 373desc_ring_free: 374 /* free DMA buffer */ 375 dma_free_coherent(NULL, rx_ringsize, mdp->rx_ring, mdp->rx_desc_dma); 376 377skb_ring_free: 378 /* Free Rx and Tx skb ring buffer */ 379 sh_eth_ring_free(ndev); 380 381 return ret; 382} 383 384static int sh_eth_dev_init(struct net_device *ndev) 385{ 386 int ret = 0; 387 struct sh_eth_private *mdp = netdev_priv(ndev); 388 u32 ioaddr = ndev->base_addr; 389 u_int32_t rx_int_var, tx_int_var; 390 u32 val; 391 392 /* Soft Reset */ 393 sh_eth_reset(ndev); 394 395 /* Descriptor format */ 396 sh_eth_ring_format(ndev); 397 ctrl_outl(RPADIR_INIT, ioaddr + RPADIR); 398 399 /* all sh_eth int mask */ 400 ctrl_outl(0, ioaddr + EESIPR); 401 402#if defined(CONFIG_CPU_SUBTYPE_SH7763) 403 ctrl_outl(EDMR_EL, ioaddr + EDMR); 404#else 405 ctrl_outl(0, ioaddr + EDMR); /* Endian change */ 406#endif 407 408 /* FIFO size set */ 409 ctrl_outl((FIFO_SIZE_T | FIFO_SIZE_R), ioaddr + FDR); 410 ctrl_outl(0, ioaddr + TFTR); 411 412 /* Frame recv control */ 413 ctrl_outl(0, ioaddr + RMCR); 414 415 rx_int_var = mdp->rx_int_var = DESC_I_RINT8 | DESC_I_RINT5; 416 tx_int_var = mdp->tx_int_var = DESC_I_TINT2; 417 ctrl_outl(rx_int_var | tx_int_var, ioaddr + TRSCER); 418 419#if defined(CONFIG_CPU_SUBTYPE_SH7763) 420 /* Burst sycle set */ 421 ctrl_outl(0x800, ioaddr + BCULR); 422#endif 423 424 ctrl_outl((FIFO_F_D_RFF | FIFO_F_D_RFD), ioaddr + FCFTR); 425 426#if !defined(CONFIG_CPU_SUBTYPE_SH7763) 427 ctrl_outl(0, ioaddr + TRIMD); 428#endif 429 430 /* Recv frame limit set register */ 431 ctrl_outl(RFLR_VALUE, ioaddr + RFLR); 432 433 ctrl_outl(ctrl_inl(ioaddr + EESR), ioaddr + EESR); 434 ctrl_outl((DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff), ioaddr + EESIPR); 435 436 /* PAUSE Prohibition */ 437 val = (ctrl_inl(ioaddr + ECMR) & ECMR_DM) | 438 ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE; 439 440 ctrl_outl(val, ioaddr + ECMR); 441 442 /* E-MAC Status Register clear */ 443 ctrl_outl(ECSR_INIT, ioaddr + ECSR); 444 445 /* E-MAC Interrupt Enable register */ 446 ctrl_outl(ECSIPR_INIT, ioaddr + ECSIPR); 447 448 /* Set MAC address */ 449 update_mac_address(ndev); 450 451 /* mask reset */ 452#if defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7763) 453 ctrl_outl(APR_AP, ioaddr + APR); 454 ctrl_outl(MPR_MP, ioaddr + MPR); 455 ctrl_outl(TPAUSER_UNLIMITED, ioaddr + TPAUSER); 456#endif 457#if defined(CONFIG_CPU_SUBTYPE_SH7710) 458 ctrl_outl(BCFR_UNLIMITED, ioaddr + BCFR); 459#endif 460 461 /* Setting the Rx mode will start the Rx process. */ 462 ctrl_outl(EDRRR_R, ioaddr + EDRRR); 463 464 netif_start_queue(ndev); 465 466 return ret; 467} 468 469/* free Tx skb function */ 470static int sh_eth_txfree(struct net_device *ndev) 471{ 472 struct sh_eth_private *mdp = netdev_priv(ndev); 473 struct sh_eth_txdesc *txdesc; 474 int freeNum = 0; 475 int entry = 0; 476 477 for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) { 478 entry = mdp->dirty_tx % TX_RING_SIZE; 479 txdesc = &mdp->tx_ring[entry]; 480 if (txdesc->status & cpu_to_edmac(mdp, TD_TACT)) 481 break; 482 /* Free the original skb. */ 483 if (mdp->tx_skbuff[entry]) { 484 dev_kfree_skb_irq(mdp->tx_skbuff[entry]); 485 mdp->tx_skbuff[entry] = NULL; 486 freeNum++; 487 } 488 txdesc->status = cpu_to_edmac(mdp, TD_TFP); 489 if (entry >= TX_RING_SIZE - 1) 490 txdesc->status |= cpu_to_edmac(mdp, TD_TDLE); 491 492 mdp->stats.tx_packets++; 493 mdp->stats.tx_bytes += txdesc->buffer_length; 494 } 495 return freeNum; 496} 497 498/* Packet receive function */ 499static int sh_eth_rx(struct net_device *ndev) 500{ 501 struct sh_eth_private *mdp = netdev_priv(ndev); 502 struct sh_eth_rxdesc *rxdesc; 503 504 int entry = mdp->cur_rx % RX_RING_SIZE; 505 int boguscnt = (mdp->dirty_rx + RX_RING_SIZE) - mdp->cur_rx; 506 struct sk_buff *skb; 507 u16 pkt_len = 0; 508 u32 desc_status, reserve = 0; 509 510 rxdesc = &mdp->rx_ring[entry]; 511 while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) { 512 desc_status = edmac_to_cpu(mdp, rxdesc->status); 513 pkt_len = rxdesc->frame_length; 514 515 if (--boguscnt < 0) 516 break; 517 518 if (!(desc_status & RDFEND)) 519 mdp->stats.rx_length_errors++; 520 521 if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 | 522 RD_RFS5 | RD_RFS6 | RD_RFS10)) { 523 mdp->stats.rx_errors++; 524 if (desc_status & RD_RFS1) 525 mdp->stats.rx_crc_errors++; 526 if (desc_status & RD_RFS2) 527 mdp->stats.rx_frame_errors++; 528 if (desc_status & RD_RFS3) 529 mdp->stats.rx_length_errors++; 530 if (desc_status & RD_RFS4) 531 mdp->stats.rx_length_errors++; 532 if (desc_status & RD_RFS6) 533 mdp->stats.rx_missed_errors++; 534 if (desc_status & RD_RFS10) 535 mdp->stats.rx_over_errors++; 536 } else { 537 swaps((char *)(rxdesc->addr & ~0x3), pkt_len + 2); 538 skb = mdp->rx_skbuff[entry]; 539 mdp->rx_skbuff[entry] = NULL; 540 skb_put(skb, pkt_len); 541 skb->protocol = eth_type_trans(skb, ndev); 542 netif_rx(skb); 543 mdp->stats.rx_packets++; 544 mdp->stats.rx_bytes += pkt_len; 545 } 546 rxdesc->status |= cpu_to_edmac(mdp, RD_RACT); 547 entry = (++mdp->cur_rx) % RX_RING_SIZE; 548 } 549 550 /* Refill the Rx ring buffers. */ 551 for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) { 552 entry = mdp->dirty_rx % RX_RING_SIZE; 553 rxdesc = &mdp->rx_ring[entry]; 554 /* The size of the buffer is 16 byte boundary. */ 555 rxdesc->buffer_length = (mdp->rx_buf_sz + 16) & ~0x0F; 556 557 if (mdp->rx_skbuff[entry] == NULL) { 558 skb = dev_alloc_skb(mdp->rx_buf_sz); 559 mdp->rx_skbuff[entry] = skb; 560 if (skb == NULL) 561 break; /* Better luck next round. */ 562 skb->dev = ndev; 563#if defined(CONFIG_CPU_SUBTYPE_SH7763) 564 reserve = SH7763_SKB_ALIGN 565 - ((uint32_t)skb->data & (SH7763_SKB_ALIGN-1)); 566 if (reserve) 567 skb_reserve(skb, reserve); 568#else 569 skb_reserve(skb, RX_OFFSET); 570#endif 571 skb->ip_summed = CHECKSUM_NONE; 572 rxdesc->addr = (u32)skb->data & ~0x3UL; 573 } 574 if (entry >= RX_RING_SIZE - 1) 575 rxdesc->status |= 576 cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL); 577 else 578 rxdesc->status |= 579 cpu_to_edmac(mdp, RD_RACT | RD_RFP); 580 } 581 582 /* Restart Rx engine if stopped. */ 583 /* If we don't need to check status, don't. -KDU */ 584 if (!(ctrl_inl(ndev->base_addr + EDRRR) & EDRRR_R)) 585 ctrl_outl(EDRRR_R, ndev->base_addr + EDRRR); 586 587 return 0; 588} 589 590/* error control function */ 591static void sh_eth_error(struct net_device *ndev, int intr_status) 592{ 593 struct sh_eth_private *mdp = netdev_priv(ndev); 594 u32 ioaddr = ndev->base_addr; 595 u32 felic_stat; 596 597 if (intr_status & EESR_ECI) { 598 felic_stat = ctrl_inl(ioaddr + ECSR); 599 ctrl_outl(felic_stat, ioaddr + ECSR); /* clear int */ 600 if (felic_stat & ECSR_ICD) 601 mdp->stats.tx_carrier_errors++; 602 if (felic_stat & ECSR_LCHNG) { 603 /* Link Changed */ 604 u32 link_stat = (ctrl_inl(ioaddr + PSR)); 605 if (!(link_stat & PHY_ST_LINK)) { 606 /* Link Down : disable tx and rx */ 607 ctrl_outl(ctrl_inl(ioaddr + ECMR) & 608 ~(ECMR_RE | ECMR_TE), ioaddr + ECMR); 609 } else { 610 /* Link Up */ 611 ctrl_outl(ctrl_inl(ioaddr + EESIPR) & 612 ~DMAC_M_ECI, ioaddr + EESIPR); 613 /*clear int */ 614 ctrl_outl(ctrl_inl(ioaddr + ECSR), 615 ioaddr + ECSR); 616 ctrl_outl(ctrl_inl(ioaddr + EESIPR) | 617 DMAC_M_ECI, ioaddr + EESIPR); 618 /* enable tx and rx */ 619 ctrl_outl(ctrl_inl(ioaddr + ECMR) | 620 (ECMR_RE | ECMR_TE), ioaddr + ECMR); 621 } 622 } 623 } 624 625 if (intr_status & EESR_TWB) { 626 /* Write buck end. unused write back interrupt */ 627 if (intr_status & EESR_TABT) /* Transmit Abort int */ 628 mdp->stats.tx_aborted_errors++; 629 } 630 631 if (intr_status & EESR_RABT) { 632 /* Receive Abort int */ 633 if (intr_status & EESR_RFRMER) { 634 /* Receive Frame Overflow int */ 635 mdp->stats.rx_frame_errors++; 636 printk(KERN_ERR "Receive Frame Overflow\n"); 637 } 638 } 639#if !defined(CONFIG_CPU_SUBTYPE_SH7763) 640 if (intr_status & EESR_ADE) { 641 if (intr_status & EESR_TDE) { 642 if (intr_status & EESR_TFE) 643 mdp->stats.tx_fifo_errors++; 644 } 645 } 646#endif 647 648 if (intr_status & EESR_RDE) { 649 /* Receive Descriptor Empty int */ 650 mdp->stats.rx_over_errors++; 651 652 if (ctrl_inl(ioaddr + EDRRR) ^ EDRRR_R) 653 ctrl_outl(EDRRR_R, ioaddr + EDRRR); 654 printk(KERN_ERR "Receive Descriptor Empty\n"); 655 } 656 if (intr_status & EESR_RFE) { 657 /* Receive FIFO Overflow int */ 658 mdp->stats.rx_fifo_errors++; 659 printk(KERN_ERR "Receive FIFO Overflow\n"); 660 } 661 if (intr_status & (EESR_TWB | EESR_TABT | 662#if !defined(CONFIG_CPU_SUBTYPE_SH7763) 663 EESR_ADE | 664#endif 665 EESR_TDE | EESR_TFE)) { 666 /* Tx error */ 667 u32 edtrr = ctrl_inl(ndev->base_addr + EDTRR); 668 /* dmesg */ 669 printk(KERN_ERR "%s:TX error. status=%8.8x cur_tx=%8.8x ", 670 ndev->name, intr_status, mdp->cur_tx); 671 printk(KERN_ERR "dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n", 672 mdp->dirty_tx, (u32) ndev->state, edtrr); 673 /* dirty buffer free */ 674 sh_eth_txfree(ndev); 675 676 /* SH7712 BUG */ 677 if (edtrr ^ EDTRR_TRNS) { 678 /* tx dma start */ 679 ctrl_outl(EDTRR_TRNS, ndev->base_addr + EDTRR); 680 } 681 /* wakeup */ 682 netif_wake_queue(ndev); 683 } 684} 685 686static irqreturn_t sh_eth_interrupt(int irq, void *netdev) 687{ 688 struct net_device *ndev = netdev; 689 struct sh_eth_private *mdp = netdev_priv(ndev); 690 u32 ioaddr, boguscnt = RX_RING_SIZE; 691 u32 intr_status = 0; 692 693 ioaddr = ndev->base_addr; 694 spin_lock(&mdp->lock); 695 696 /* Get interrpt stat */ 697 intr_status = ctrl_inl(ioaddr + EESR); 698 /* Clear interrupt */ 699 ctrl_outl(intr_status, ioaddr + EESR); 700 701 if (intr_status & (EESR_FRC | /* Frame recv*/ 702 EESR_RMAF | /* Multi cast address recv*/ 703 EESR_RRF | /* Bit frame recv */ 704 EESR_RTLF | /* Long frame recv*/ 705 EESR_RTSF | /* short frame recv */ 706 EESR_PRE | /* PHY-LSI recv error */ 707 EESR_CERF)){ /* recv frame CRC error */ 708 sh_eth_rx(ndev); 709 } 710 711 /* Tx Check */ 712 if (intr_status & TX_CHECK) { 713 sh_eth_txfree(ndev); 714 netif_wake_queue(ndev); 715 } 716 717 if (intr_status & EESR_ERR_CHECK) 718 sh_eth_error(ndev, intr_status); 719 720 if (--boguscnt < 0) { 721 printk(KERN_WARNING 722 "%s: Too much work at interrupt, status=0x%4.4x.\n", 723 ndev->name, intr_status); 724 } 725 726 spin_unlock(&mdp->lock); 727 728 return IRQ_HANDLED; 729} 730 731static void sh_eth_timer(unsigned long data) 732{ 733 struct net_device *ndev = (struct net_device *)data; 734 struct sh_eth_private *mdp = netdev_priv(ndev); 735 736 mod_timer(&mdp->timer, jiffies + (10 * HZ)); 737} 738 739/* PHY state control function */ 740static void sh_eth_adjust_link(struct net_device *ndev) 741{ 742 struct sh_eth_private *mdp = netdev_priv(ndev); 743 struct phy_device *phydev = mdp->phydev; 744 u32 ioaddr = ndev->base_addr; 745 int new_state = 0; 746 747 if (phydev->link != PHY_DOWN) { 748 if (phydev->duplex != mdp->duplex) { 749 new_state = 1; 750 mdp->duplex = phydev->duplex; 751#if defined(CONFIG_CPU_SUBTYPE_SH7763) 752 if (mdp->duplex) { /* FULL */ 753 ctrl_outl(ctrl_inl(ioaddr + ECMR) | ECMR_DM, 754 ioaddr + ECMR); 755 } else { /* Half */ 756 ctrl_outl(ctrl_inl(ioaddr + ECMR) & ~ECMR_DM, 757 ioaddr + ECMR); 758 } 759#endif 760 } 761 762 if (phydev->speed != mdp->speed) { 763 new_state = 1; 764 mdp->speed = phydev->speed; 765#if defined(CONFIG_CPU_SUBTYPE_SH7763) 766 switch (mdp->speed) { 767 case 10: /* 10BASE */ 768 ctrl_outl(GECMR_10, ioaddr + GECMR); break; 769 case 100:/* 100BASE */ 770 ctrl_outl(GECMR_100, ioaddr + GECMR); break; 771 case 1000: /* 1000BASE */ 772 ctrl_outl(GECMR_1000, ioaddr + GECMR); break; 773 default: 774 break; 775 } 776#endif 777 } 778 if (mdp->link == PHY_DOWN) { 779 ctrl_outl((ctrl_inl(ioaddr + ECMR) & ~ECMR_TXF) 780 | ECMR_DM, ioaddr + ECMR); 781 new_state = 1; 782 mdp->link = phydev->link; 783 } 784 } else if (mdp->link) { 785 new_state = 1; 786 mdp->link = PHY_DOWN; 787 mdp->speed = 0; 788 mdp->duplex = -1; 789 } 790 791 if (new_state) 792 phy_print_status(phydev); 793} 794 795/* PHY init function */ 796static int sh_eth_phy_init(struct net_device *ndev) 797{ 798 struct sh_eth_private *mdp = netdev_priv(ndev); 799 char phy_id[BUS_ID_SIZE]; 800 struct phy_device *phydev = NULL; 801 802 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, 803 mdp->mii_bus->id , mdp->phy_id); 804 805 mdp->link = PHY_DOWN; 806 mdp->speed = 0; 807 mdp->duplex = -1; 808 809 /* Try connect to PHY */ 810 phydev = phy_connect(ndev, phy_id, &sh_eth_adjust_link, 811 0, PHY_INTERFACE_MODE_MII); 812 if (IS_ERR(phydev)) { 813 dev_err(&ndev->dev, "phy_connect failed\n"); 814 return PTR_ERR(phydev); 815 } 816 dev_info(&ndev->dev, "attached phy %i to driver %s\n", 817 phydev->addr, phydev->drv->name); 818 819 mdp->phydev = phydev; 820 821 return 0; 822} 823 824/* PHY control start function */ 825static int sh_eth_phy_start(struct net_device *ndev) 826{ 827 struct sh_eth_private *mdp = netdev_priv(ndev); 828 int ret; 829 830 ret = sh_eth_phy_init(ndev); 831 if (ret) 832 return ret; 833 834 /* reset phy - this also wakes it from PDOWN */ 835 phy_write(mdp->phydev, MII_BMCR, BMCR_RESET); 836 phy_start(mdp->phydev); 837 838 return 0; 839} 840 841/* network device open function */ 842static int sh_eth_open(struct net_device *ndev) 843{ 844 int ret = 0; 845 struct sh_eth_private *mdp = netdev_priv(ndev); 846 847 ret = request_irq(ndev->irq, &sh_eth_interrupt, 0, ndev->name, ndev); 848 if (ret) { 849 printk(KERN_ERR "Can not assign IRQ number to %s\n", CARDNAME); 850 return ret; 851 } 852 853 /* Descriptor set */ 854 ret = sh_eth_ring_init(ndev); 855 if (ret) 856 goto out_free_irq; 857 858 /* device init */ 859 ret = sh_eth_dev_init(ndev); 860 if (ret) 861 goto out_free_irq; 862 863 /* PHY control start*/ 864 ret = sh_eth_phy_start(ndev); 865 if (ret) 866 goto out_free_irq; 867 868 /* Set the timer to check for link beat. */ 869 init_timer(&mdp->timer); 870 mdp->timer.expires = (jiffies + (24 * HZ)) / 10;/* 2.4 sec. */ 871 setup_timer(&mdp->timer, sh_eth_timer, (unsigned long)ndev); 872 873 return ret; 874 875out_free_irq: 876 free_irq(ndev->irq, ndev); 877 return ret; 878} 879 880/* Timeout function */ 881static void sh_eth_tx_timeout(struct net_device *ndev) 882{ 883 struct sh_eth_private *mdp = netdev_priv(ndev); 884 u32 ioaddr = ndev->base_addr; 885 struct sh_eth_rxdesc *rxdesc; 886 int i; 887 888 netif_stop_queue(ndev); 889 890 /* worning message out. */ 891 printk(KERN_WARNING "%s: transmit timed out, status %8.8x," 892 " resetting...\n", ndev->name, (int)ctrl_inl(ioaddr + EESR)); 893 894 /* tx_errors count up */ 895 mdp->stats.tx_errors++; 896 897 /* timer off */ 898 del_timer_sync(&mdp->timer); 899 900 /* Free all the skbuffs in the Rx queue. */ 901 for (i = 0; i < RX_RING_SIZE; i++) { 902 rxdesc = &mdp->rx_ring[i]; 903 rxdesc->status = 0; 904 rxdesc->addr = 0xBADF00D0; 905 if (mdp->rx_skbuff[i]) 906 dev_kfree_skb(mdp->rx_skbuff[i]); 907 mdp->rx_skbuff[i] = NULL; 908 } 909 for (i = 0; i < TX_RING_SIZE; i++) { 910 if (mdp->tx_skbuff[i]) 911 dev_kfree_skb(mdp->tx_skbuff[i]); 912 mdp->tx_skbuff[i] = NULL; 913 } 914 915 /* device init */ 916 sh_eth_dev_init(ndev); 917 918 /* timer on */ 919 mdp->timer.expires = (jiffies + (24 * HZ)) / 10;/* 2.4 sec. */ 920 add_timer(&mdp->timer); 921} 922 923/* Packet transmit function */ 924static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev) 925{ 926 struct sh_eth_private *mdp = netdev_priv(ndev); 927 struct sh_eth_txdesc *txdesc; 928 u32 entry; 929 unsigned long flags; 930 931 spin_lock_irqsave(&mdp->lock, flags); 932 if ((mdp->cur_tx - mdp->dirty_tx) >= (TX_RING_SIZE - 4)) { 933 if (!sh_eth_txfree(ndev)) { 934 netif_stop_queue(ndev); 935 spin_unlock_irqrestore(&mdp->lock, flags); 936 return 1; 937 } 938 } 939 spin_unlock_irqrestore(&mdp->lock, flags); 940 941 entry = mdp->cur_tx % TX_RING_SIZE; 942 mdp->tx_skbuff[entry] = skb; 943 txdesc = &mdp->tx_ring[entry]; 944 txdesc->addr = (u32)(skb->data); 945 /* soft swap. */ 946 swaps((char *)(txdesc->addr & ~0x3), skb->len + 2); 947 /* write back */ 948 __flush_purge_region(skb->data, skb->len); 949 if (skb->len < ETHERSMALL) 950 txdesc->buffer_length = ETHERSMALL; 951 else 952 txdesc->buffer_length = skb->len; 953 954 if (entry >= TX_RING_SIZE - 1) 955 txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE); 956 else 957 txdesc->status |= cpu_to_edmac(mdp, TD_TACT); 958 959 mdp->cur_tx++; 960 961 if (!(ctrl_inl(ndev->base_addr + EDTRR) & EDTRR_TRNS)) 962 ctrl_outl(EDTRR_TRNS, ndev->base_addr + EDTRR); 963 964 ndev->trans_start = jiffies; 965 966 return 0; 967} 968 969/* device close function */ 970static int sh_eth_close(struct net_device *ndev) 971{ 972 struct sh_eth_private *mdp = netdev_priv(ndev); 973 u32 ioaddr = ndev->base_addr; 974 int ringsize; 975 976 netif_stop_queue(ndev); 977 978 /* Disable interrupts by clearing the interrupt mask. */ 979 ctrl_outl(0x0000, ioaddr + EESIPR); 980 981 /* Stop the chip's Tx and Rx processes. */ 982 ctrl_outl(0, ioaddr + EDTRR); 983 ctrl_outl(0, ioaddr + EDRRR); 984 985 /* PHY Disconnect */ 986 if (mdp->phydev) { 987 phy_stop(mdp->phydev); 988 phy_disconnect(mdp->phydev); 989 } 990 991 free_irq(ndev->irq, ndev); 992 993 del_timer_sync(&mdp->timer); 994 995 /* Free all the skbuffs in the Rx queue. */ 996 sh_eth_ring_free(ndev); 997 998 /* free DMA buffer */ 999 ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE; 1000 dma_free_coherent(NULL, ringsize, mdp->rx_ring, mdp->rx_desc_dma); 1001 1002 /* free DMA buffer */ 1003 ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE; 1004 dma_free_coherent(NULL, ringsize, mdp->tx_ring, mdp->tx_desc_dma); 1005 1006 return 0; 1007} 1008 1009static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev) 1010{ 1011 struct sh_eth_private *mdp = netdev_priv(ndev); 1012 u32 ioaddr = ndev->base_addr; 1013 1014 mdp->stats.tx_dropped += ctrl_inl(ioaddr + TROCR); 1015 ctrl_outl(0, ioaddr + TROCR); /* (write clear) */ 1016 mdp->stats.collisions += ctrl_inl(ioaddr + CDCR); 1017 ctrl_outl(0, ioaddr + CDCR); /* (write clear) */ 1018 mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + LCCR); 1019 ctrl_outl(0, ioaddr + LCCR); /* (write clear) */ 1020#if defined(CONFIG_CPU_SUBTYPE_SH7763) 1021 mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + CERCR);/* CERCR */ 1022 ctrl_outl(0, ioaddr + CERCR); /* (write clear) */ 1023 mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + CEECR);/* CEECR */ 1024 ctrl_outl(0, ioaddr + CEECR); /* (write clear) */ 1025#else 1026 mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + CNDCR); 1027 ctrl_outl(0, ioaddr + CNDCR); /* (write clear) */ 1028#endif 1029 return &mdp->stats; 1030} 1031 1032/* ioctl to device funciotn*/ 1033static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq, 1034 int cmd) 1035{ 1036 struct sh_eth_private *mdp = netdev_priv(ndev); 1037 struct phy_device *phydev = mdp->phydev; 1038 1039 if (!netif_running(ndev)) 1040 return -EINVAL; 1041 1042 if (!phydev) 1043 return -ENODEV; 1044 1045 return phy_mii_ioctl(phydev, if_mii(rq), cmd); 1046} 1047 1048 1049/* Multicast reception directions set */ 1050static void sh_eth_set_multicast_list(struct net_device *ndev) 1051{ 1052 u32 ioaddr = ndev->base_addr; 1053 1054 if (ndev->flags & IFF_PROMISC) { 1055 /* Set promiscuous. */ 1056 ctrl_outl((ctrl_inl(ioaddr + ECMR) & ~ECMR_MCT) | ECMR_PRM, 1057 ioaddr + ECMR); 1058 } else { 1059 /* Normal, unicast/broadcast-only mode. */ 1060 ctrl_outl((ctrl_inl(ioaddr + ECMR) & ~ECMR_PRM) | ECMR_MCT, 1061 ioaddr + ECMR); 1062 } 1063} 1064 1065/* SuperH's TSU register init function */ 1066static void sh_eth_tsu_init(u32 ioaddr) 1067{ 1068 ctrl_outl(0, ioaddr + TSU_FWEN0); /* Disable forward(0->1) */ 1069 ctrl_outl(0, ioaddr + TSU_FWEN1); /* Disable forward(1->0) */ 1070 ctrl_outl(0, ioaddr + TSU_FCM); /* forward fifo 3k-3k */ 1071 ctrl_outl(0xc, ioaddr + TSU_BSYSL0); 1072 ctrl_outl(0xc, ioaddr + TSU_BSYSL1); 1073 ctrl_outl(0, ioaddr + TSU_PRISL0); 1074 ctrl_outl(0, ioaddr + TSU_PRISL1); 1075 ctrl_outl(0, ioaddr + TSU_FWSL0); 1076 ctrl_outl(0, ioaddr + TSU_FWSL1); 1077 ctrl_outl(TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, ioaddr + TSU_FWSLC); 1078#if defined(CONFIG_CPU_SUBTYPE_SH7763) 1079 ctrl_outl(0, ioaddr + TSU_QTAG0); /* Disable QTAG(0->1) */ 1080 ctrl_outl(0, ioaddr + TSU_QTAG1); /* Disable QTAG(1->0) */ 1081#else 1082 ctrl_outl(0, ioaddr + TSU_QTAGM0); /* Disable QTAG(0->1) */ 1083 ctrl_outl(0, ioaddr + TSU_QTAGM1); /* Disable QTAG(1->0) */ 1084#endif 1085 ctrl_outl(0, ioaddr + TSU_FWSR); /* all interrupt status clear */ 1086 ctrl_outl(0, ioaddr + TSU_FWINMK); /* Disable all interrupt */ 1087 ctrl_outl(0, ioaddr + TSU_TEN); /* Disable all CAM entry */ 1088 ctrl_outl(0, ioaddr + TSU_POST1); /* Disable CAM entry [ 0- 7] */ 1089 ctrl_outl(0, ioaddr + TSU_POST2); /* Disable CAM entry [ 8-15] */ 1090 ctrl_outl(0, ioaddr + TSU_POST3); /* Disable CAM entry [16-23] */ 1091 ctrl_outl(0, ioaddr + TSU_POST4); /* Disable CAM entry [24-31] */ 1092} 1093 1094/* MDIO bus release function */ 1095static int sh_mdio_release(struct net_device *ndev) 1096{ 1097 struct mii_bus *bus = dev_get_drvdata(&ndev->dev); 1098 1099 /* unregister mdio bus */ 1100 mdiobus_unregister(bus); 1101 1102 /* remove mdio bus info from net_device */ 1103 dev_set_drvdata(&ndev->dev, NULL); 1104 1105 /* free bitbang info */ 1106 free_mdio_bitbang(bus); 1107 1108 return 0; 1109} 1110 1111/* MDIO bus init function */ 1112static int sh_mdio_init(struct net_device *ndev, int id) 1113{ 1114 int ret, i; 1115 struct bb_info *bitbang; 1116 struct sh_eth_private *mdp = netdev_priv(ndev); 1117 1118 /* create bit control struct for PHY */ 1119 bitbang = kzalloc(sizeof(struct bb_info), GFP_KERNEL); 1120 if (!bitbang) { 1121 ret = -ENOMEM; 1122 goto out; 1123 } 1124 1125 /* bitbang init */ 1126 bitbang->addr = ndev->base_addr + PIR; 1127 bitbang->mdi_msk = 0x08; 1128 bitbang->mdo_msk = 0x04; 1129 bitbang->mmd_msk = 0x02;/* MMD */ 1130 bitbang->mdc_msk = 0x01; 1131 bitbang->ctrl.ops = &bb_ops; 1132 1133 /* MII contorller setting */ 1134 mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl); 1135 if (!mdp->mii_bus) { 1136 ret = -ENOMEM; 1137 goto out_free_bitbang; 1138 } 1139 1140 /* Hook up MII support for ethtool */ 1141 mdp->mii_bus->name = "sh_mii"; 1142 mdp->mii_bus->parent = &ndev->dev; 1143 snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%x", id); 1144 1145 /* PHY IRQ */ 1146 mdp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); 1147 if (!mdp->mii_bus->irq) { 1148 ret = -ENOMEM; 1149 goto out_free_bus; 1150 } 1151 1152 for (i = 0; i < PHY_MAX_ADDR; i++) 1153 mdp->mii_bus->irq[i] = PHY_POLL; 1154 1155 /* regist mdio bus */ 1156 ret = mdiobus_register(mdp->mii_bus); 1157 if (ret) 1158 goto out_free_irq; 1159 1160 dev_set_drvdata(&ndev->dev, mdp->mii_bus); 1161 1162 return 0; 1163 1164out_free_irq: 1165 kfree(mdp->mii_bus->irq); 1166 1167out_free_bus: 1168 free_mdio_bitbang(mdp->mii_bus); 1169 1170out_free_bitbang: 1171 kfree(bitbang); 1172 1173out: 1174 return ret; 1175} 1176 1177static int sh_eth_drv_probe(struct platform_device *pdev) 1178{ 1179 int ret, i, devno = 0; 1180 struct resource *res; 1181 struct net_device *ndev = NULL; 1182 struct sh_eth_private *mdp; 1183 struct sh_eth_plat_data *pd; 1184 1185 /* get base addr */ 1186 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1187 if (unlikely(res == NULL)) { 1188 dev_err(&pdev->dev, "invalid resource\n"); 1189 ret = -EINVAL; 1190 goto out; 1191 } 1192 1193 ndev = alloc_etherdev(sizeof(struct sh_eth_private)); 1194 if (!ndev) { 1195 printk(KERN_ERR "%s: could not allocate device.\n", CARDNAME); 1196 ret = -ENOMEM; 1197 goto out; 1198 } 1199 1200 /* The sh Ether-specific entries in the device structure. */ 1201 ndev->base_addr = res->start; 1202 devno = pdev->id; 1203 if (devno < 0) 1204 devno = 0; 1205 1206 ndev->dma = -1; 1207 ret = platform_get_irq(pdev, 0); 1208 if (ret < 0) { 1209 ret = -ENODEV; 1210 goto out_release; 1211 } 1212 ndev->irq = ret; 1213 1214 SET_NETDEV_DEV(ndev, &pdev->dev); 1215 1216 /* Fill in the fields of the device structure with ethernet values. */ 1217 ether_setup(ndev); 1218 1219 mdp = netdev_priv(ndev); 1220 spin_lock_init(&mdp->lock); 1221 1222 pd = (struct sh_eth_plat_data *)(pdev->dev.platform_data); 1223 /* get PHY ID */ 1224 mdp->phy_id = pd->phy; 1225 /* EDMAC endian */ 1226 mdp->edmac_endian = pd->edmac_endian; 1227 1228 /* set function */ 1229 ndev->open = sh_eth_open; 1230 ndev->hard_start_xmit = sh_eth_start_xmit; 1231 ndev->stop = sh_eth_close; 1232 ndev->get_stats = sh_eth_get_stats; 1233 ndev->set_multicast_list = sh_eth_set_multicast_list; 1234 ndev->do_ioctl = sh_eth_do_ioctl; 1235 ndev->tx_timeout = sh_eth_tx_timeout; 1236 ndev->watchdog_timeo = TX_TIMEOUT; 1237 1238 mdp->post_rx = POST_RX >> (devno << 1); 1239 mdp->post_fw = POST_FW >> (devno << 1); 1240 1241 /* read and set MAC address */ 1242 read_mac_address(ndev); 1243 1244 /* First device only init */ 1245 if (!devno) { 1246#if defined(ARSTR) 1247 /* reset device */ 1248 ctrl_outl(ARSTR_ARSTR, ARSTR); 1249 mdelay(1); 1250#endif 1251 1252#if defined(SH_TSU_ADDR) 1253 /* TSU init (Init only)*/ 1254 sh_eth_tsu_init(SH_TSU_ADDR); 1255#endif 1256 } 1257 1258 /* network device register */ 1259 ret = register_netdev(ndev); 1260 if (ret) 1261 goto out_release; 1262 1263 /* mdio bus init */ 1264 ret = sh_mdio_init(ndev, pdev->id); 1265 if (ret) 1266 goto out_unregister; 1267 1268 /* pritnt device infomation */ 1269 printk(KERN_INFO "%s: %s at 0x%x, ", 1270 ndev->name, CARDNAME, (u32) ndev->base_addr); 1271 1272 for (i = 0; i < 5; i++) 1273 printk("%02X:", ndev->dev_addr[i]); 1274 printk("%02X, IRQ %d.\n", ndev->dev_addr[i], ndev->irq); 1275 1276 platform_set_drvdata(pdev, ndev); 1277 1278 return ret; 1279 1280out_unregister: 1281 unregister_netdev(ndev); 1282 1283out_release: 1284 /* net_dev free */ 1285 if (ndev) 1286 free_netdev(ndev); 1287 1288out: 1289 return ret; 1290} 1291 1292static int sh_eth_drv_remove(struct platform_device *pdev) 1293{ 1294 struct net_device *ndev = platform_get_drvdata(pdev); 1295 1296 sh_mdio_release(ndev); 1297 unregister_netdev(ndev); 1298 flush_scheduled_work(); 1299 1300 free_netdev(ndev); 1301 platform_set_drvdata(pdev, NULL); 1302 1303 return 0; 1304} 1305 1306static struct platform_driver sh_eth_driver = { 1307 .probe = sh_eth_drv_probe, 1308 .remove = sh_eth_drv_remove, 1309 .driver = { 1310 .name = CARDNAME, 1311 }, 1312}; 1313 1314static int __init sh_eth_init(void) 1315{ 1316 return platform_driver_register(&sh_eth_driver); 1317} 1318 1319static void __exit sh_eth_cleanup(void) 1320{ 1321 platform_driver_unregister(&sh_eth_driver); 1322} 1323 1324module_init(sh_eth_init); 1325module_exit(sh_eth_cleanup); 1326 1327MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda"); 1328MODULE_DESCRIPTION("Renesas SuperH Ethernet driver"); 1329MODULE_LICENSE("GPL v2");