Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.33-rc3 1292 lines 36 kB view raw
1/* 2 * Copyright (C) 2006, 2007 Eugene Konev 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write to the Free Software 16 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 19#include <linux/module.h> 20#include <linux/init.h> 21#include <linux/moduleparam.h> 22 23#include <linux/sched.h> 24#include <linux/kernel.h> 25#include <linux/slab.h> 26#include <linux/errno.h> 27#include <linux/types.h> 28#include <linux/delay.h> 29 30#include <linux/netdevice.h> 31#include <linux/etherdevice.h> 32#include <linux/ethtool.h> 33#include <linux/skbuff.h> 34#include <linux/mii.h> 35#include <linux/phy.h> 36#include <linux/phy_fixed.h> 37#include <linux/platform_device.h> 38#include <linux/dma-mapping.h> 39#include <asm/gpio.h> 40#include <asm/atomic.h> 41 42MODULE_AUTHOR("Eugene Konev <ejka@imfi.kspu.ru>"); 43MODULE_DESCRIPTION("TI AR7 ethernet driver (CPMAC)"); 44MODULE_LICENSE("GPL"); 45MODULE_ALIAS("platform:cpmac"); 46 47static int debug_level = 8; 48static int dumb_switch; 49 50/* Next 2 are only used in cpmac_probe, so it's pointless to change them */ 51module_param(debug_level, int, 0444); 52module_param(dumb_switch, int, 0444); 53 54MODULE_PARM_DESC(debug_level, "Number of NETIF_MSG bits to enable"); 55MODULE_PARM_DESC(dumb_switch, "Assume switch is not connected to MDIO bus"); 56 57#define CPMAC_VERSION "0.5.1" 58/* frame size + 802.1q tag */ 59#define CPMAC_SKB_SIZE (ETH_FRAME_LEN + 4) 60#define CPMAC_QUEUES 8 61 62/* Ethernet registers */ 63#define CPMAC_TX_CONTROL 0x0004 64#define CPMAC_TX_TEARDOWN 0x0008 65#define CPMAC_RX_CONTROL 0x0014 66#define CPMAC_RX_TEARDOWN 0x0018 67#define CPMAC_MBP 0x0100 68# define MBP_RXPASSCRC 0x40000000 69# define MBP_RXQOS 0x20000000 70# define MBP_RXNOCHAIN 0x10000000 71# define MBP_RXCMF 0x01000000 72# define MBP_RXSHORT 0x00800000 73# define MBP_RXCEF 0x00400000 74# define MBP_RXPROMISC 0x00200000 75# define MBP_PROMISCCHAN(channel) (((channel) & 0x7) << 16) 76# define MBP_RXBCAST 0x00002000 77# define MBP_BCASTCHAN(channel) (((channel) & 0x7) << 8) 78# define MBP_RXMCAST 0x00000020 79# define MBP_MCASTCHAN(channel) ((channel) & 0x7) 80#define CPMAC_UNICAST_ENABLE 0x0104 81#define CPMAC_UNICAST_CLEAR 0x0108 82#define CPMAC_MAX_LENGTH 0x010c 83#define CPMAC_BUFFER_OFFSET 0x0110 84#define CPMAC_MAC_CONTROL 0x0160 85# define MAC_TXPTYPE 0x00000200 86# define MAC_TXPACE 0x00000040 87# define MAC_MII 0x00000020 88# define MAC_TXFLOW 0x00000010 89# define MAC_RXFLOW 0x00000008 90# define MAC_MTEST 0x00000004 91# define MAC_LOOPBACK 0x00000002 92# define MAC_FDX 0x00000001 93#define CPMAC_MAC_STATUS 0x0164 94# define MAC_STATUS_QOS 0x00000004 95# define MAC_STATUS_RXFLOW 0x00000002 96# define MAC_STATUS_TXFLOW 0x00000001 97#define CPMAC_TX_INT_ENABLE 0x0178 98#define CPMAC_TX_INT_CLEAR 0x017c 99#define CPMAC_MAC_INT_VECTOR 0x0180 100# define MAC_INT_STATUS 0x00080000 101# define MAC_INT_HOST 0x00040000 102# define MAC_INT_RX 0x00020000 103# define MAC_INT_TX 0x00010000 104#define CPMAC_MAC_EOI_VECTOR 0x0184 105#define CPMAC_RX_INT_ENABLE 0x0198 106#define CPMAC_RX_INT_CLEAR 0x019c 107#define CPMAC_MAC_INT_ENABLE 0x01a8 108#define CPMAC_MAC_INT_CLEAR 0x01ac 109#define CPMAC_MAC_ADDR_LO(channel) (0x01b0 + (channel) * 4) 110#define CPMAC_MAC_ADDR_MID 0x01d0 111#define CPMAC_MAC_ADDR_HI 0x01d4 112#define CPMAC_MAC_HASH_LO 0x01d8 113#define CPMAC_MAC_HASH_HI 0x01dc 114#define CPMAC_TX_PTR(channel) (0x0600 + (channel) * 4) 115#define CPMAC_RX_PTR(channel) (0x0620 + (channel) * 4) 116#define CPMAC_TX_ACK(channel) (0x0640 + (channel) * 4) 117#define CPMAC_RX_ACK(channel) (0x0660 + (channel) * 4) 118#define CPMAC_REG_END 0x0680 119/* 120 * Rx/Tx statistics 121 * TODO: use some of them to fill stats in cpmac_stats() 122 */ 123#define CPMAC_STATS_RX_GOOD 0x0200 124#define CPMAC_STATS_RX_BCAST 0x0204 125#define CPMAC_STATS_RX_MCAST 0x0208 126#define CPMAC_STATS_RX_PAUSE 0x020c 127#define CPMAC_STATS_RX_CRC 0x0210 128#define CPMAC_STATS_RX_ALIGN 0x0214 129#define CPMAC_STATS_RX_OVER 0x0218 130#define CPMAC_STATS_RX_JABBER 0x021c 131#define CPMAC_STATS_RX_UNDER 0x0220 132#define CPMAC_STATS_RX_FRAG 0x0224 133#define CPMAC_STATS_RX_FILTER 0x0228 134#define CPMAC_STATS_RX_QOSFILTER 0x022c 135#define CPMAC_STATS_RX_OCTETS 0x0230 136 137#define CPMAC_STATS_TX_GOOD 0x0234 138#define CPMAC_STATS_TX_BCAST 0x0238 139#define CPMAC_STATS_TX_MCAST 0x023c 140#define CPMAC_STATS_TX_PAUSE 0x0240 141#define CPMAC_STATS_TX_DEFER 0x0244 142#define CPMAC_STATS_TX_COLLISION 0x0248 143#define CPMAC_STATS_TX_SINGLECOLL 0x024c 144#define CPMAC_STATS_TX_MULTICOLL 0x0250 145#define CPMAC_STATS_TX_EXCESSCOLL 0x0254 146#define CPMAC_STATS_TX_LATECOLL 0x0258 147#define CPMAC_STATS_TX_UNDERRUN 0x025c 148#define CPMAC_STATS_TX_CARRIERSENSE 0x0260 149#define CPMAC_STATS_TX_OCTETS 0x0264 150 151#define cpmac_read(base, reg) (readl((void __iomem *)(base) + (reg))) 152#define cpmac_write(base, reg, val) (writel(val, (void __iomem *)(base) + \ 153 (reg))) 154 155/* MDIO bus */ 156#define CPMAC_MDIO_VERSION 0x0000 157#define CPMAC_MDIO_CONTROL 0x0004 158# define MDIOC_IDLE 0x80000000 159# define MDIOC_ENABLE 0x40000000 160# define MDIOC_PREAMBLE 0x00100000 161# define MDIOC_FAULT 0x00080000 162# define MDIOC_FAULTDETECT 0x00040000 163# define MDIOC_INTTEST 0x00020000 164# define MDIOC_CLKDIV(div) ((div) & 0xff) 165#define CPMAC_MDIO_ALIVE 0x0008 166#define CPMAC_MDIO_LINK 0x000c 167#define CPMAC_MDIO_ACCESS(channel) (0x0080 + (channel) * 8) 168# define MDIO_BUSY 0x80000000 169# define MDIO_WRITE 0x40000000 170# define MDIO_REG(reg) (((reg) & 0x1f) << 21) 171# define MDIO_PHY(phy) (((phy) & 0x1f) << 16) 172# define MDIO_DATA(data) ((data) & 0xffff) 173#define CPMAC_MDIO_PHYSEL(channel) (0x0084 + (channel) * 8) 174# define PHYSEL_LINKSEL 0x00000040 175# define PHYSEL_LINKINT 0x00000020 176 177struct cpmac_desc { 178 u32 hw_next; 179 u32 hw_data; 180 u16 buflen; 181 u16 bufflags; 182 u16 datalen; 183 u16 dataflags; 184#define CPMAC_SOP 0x8000 185#define CPMAC_EOP 0x4000 186#define CPMAC_OWN 0x2000 187#define CPMAC_EOQ 0x1000 188 struct sk_buff *skb; 189 struct cpmac_desc *next; 190 struct cpmac_desc *prev; 191 dma_addr_t mapping; 192 dma_addr_t data_mapping; 193}; 194 195struct cpmac_priv { 196 spinlock_t lock; 197 spinlock_t rx_lock; 198 struct cpmac_desc *rx_head; 199 int ring_size; 200 struct cpmac_desc *desc_ring; 201 dma_addr_t dma_ring; 202 void __iomem *regs; 203 struct mii_bus *mii_bus; 204 struct phy_device *phy; 205 char phy_name[MII_BUS_ID_SIZE + 3]; 206 int oldlink, oldspeed, oldduplex; 207 u32 msg_enable; 208 struct net_device *dev; 209 struct work_struct reset_work; 210 struct platform_device *pdev; 211 struct napi_struct napi; 212 atomic_t reset_pending; 213}; 214 215static irqreturn_t cpmac_irq(int, void *); 216static void cpmac_hw_start(struct net_device *dev); 217static void cpmac_hw_stop(struct net_device *dev); 218static int cpmac_stop(struct net_device *dev); 219static int cpmac_open(struct net_device *dev); 220 221static void cpmac_dump_regs(struct net_device *dev) 222{ 223 int i; 224 struct cpmac_priv *priv = netdev_priv(dev); 225 for (i = 0; i < CPMAC_REG_END; i += 4) { 226 if (i % 16 == 0) { 227 if (i) 228 printk("\n"); 229 printk(KERN_DEBUG "%s: reg[%p]:", dev->name, 230 priv->regs + i); 231 } 232 printk(" %08x", cpmac_read(priv->regs, i)); 233 } 234 printk("\n"); 235} 236 237static void cpmac_dump_desc(struct net_device *dev, struct cpmac_desc *desc) 238{ 239 int i; 240 printk(KERN_DEBUG "%s: desc[%p]:", dev->name, desc); 241 for (i = 0; i < sizeof(*desc) / 4; i++) 242 printk(" %08x", ((u32 *)desc)[i]); 243 printk("\n"); 244} 245 246static void cpmac_dump_all_desc(struct net_device *dev) 247{ 248 struct cpmac_priv *priv = netdev_priv(dev); 249 struct cpmac_desc *dump = priv->rx_head; 250 do { 251 cpmac_dump_desc(dev, dump); 252 dump = dump->next; 253 } while (dump != priv->rx_head); 254} 255 256static void cpmac_dump_skb(struct net_device *dev, struct sk_buff *skb) 257{ 258 int i; 259 printk(KERN_DEBUG "%s: skb 0x%p, len=%d\n", dev->name, skb, skb->len); 260 for (i = 0; i < skb->len; i++) { 261 if (i % 16 == 0) { 262 if (i) 263 printk("\n"); 264 printk(KERN_DEBUG "%s: data[%p]:", dev->name, 265 skb->data + i); 266 } 267 printk(" %02x", ((u8 *)skb->data)[i]); 268 } 269 printk("\n"); 270} 271 272static int cpmac_mdio_read(struct mii_bus *bus, int phy_id, int reg) 273{ 274 u32 val; 275 276 while (cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0)) & MDIO_BUSY) 277 cpu_relax(); 278 cpmac_write(bus->priv, CPMAC_MDIO_ACCESS(0), MDIO_BUSY | MDIO_REG(reg) | 279 MDIO_PHY(phy_id)); 280 while ((val = cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0))) & MDIO_BUSY) 281 cpu_relax(); 282 return MDIO_DATA(val); 283} 284 285static int cpmac_mdio_write(struct mii_bus *bus, int phy_id, 286 int reg, u16 val) 287{ 288 while (cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0)) & MDIO_BUSY) 289 cpu_relax(); 290 cpmac_write(bus->priv, CPMAC_MDIO_ACCESS(0), MDIO_BUSY | MDIO_WRITE | 291 MDIO_REG(reg) | MDIO_PHY(phy_id) | MDIO_DATA(val)); 292 return 0; 293} 294 295static int cpmac_mdio_reset(struct mii_bus *bus) 296{ 297 ar7_device_reset(AR7_RESET_BIT_MDIO); 298 cpmac_write(bus->priv, CPMAC_MDIO_CONTROL, MDIOC_ENABLE | 299 MDIOC_CLKDIV(ar7_cpmac_freq() / 2200000 - 1)); 300 return 0; 301} 302 303static int mii_irqs[PHY_MAX_ADDR] = { PHY_POLL, }; 304 305static struct mii_bus *cpmac_mii; 306 307static int cpmac_config(struct net_device *dev, struct ifmap *map) 308{ 309 if (dev->flags & IFF_UP) 310 return -EBUSY; 311 312 /* Don't allow changing the I/O address */ 313 if (map->base_addr != dev->base_addr) 314 return -EOPNOTSUPP; 315 316 /* ignore other fields */ 317 return 0; 318} 319 320static void cpmac_set_multicast_list(struct net_device *dev) 321{ 322 struct dev_mc_list *iter; 323 int i; 324 u8 tmp; 325 u32 mbp, bit, hash[2] = { 0, }; 326 struct cpmac_priv *priv = netdev_priv(dev); 327 328 mbp = cpmac_read(priv->regs, CPMAC_MBP); 329 if (dev->flags & IFF_PROMISC) { 330 cpmac_write(priv->regs, CPMAC_MBP, (mbp & ~MBP_PROMISCCHAN(0)) | 331 MBP_RXPROMISC); 332 } else { 333 cpmac_write(priv->regs, CPMAC_MBP, mbp & ~MBP_RXPROMISC); 334 if (dev->flags & IFF_ALLMULTI) { 335 /* enable all multicast mode */ 336 cpmac_write(priv->regs, CPMAC_MAC_HASH_LO, 0xffffffff); 337 cpmac_write(priv->regs, CPMAC_MAC_HASH_HI, 0xffffffff); 338 } else { 339 /* 340 * cpmac uses some strange mac address hashing 341 * (not crc32) 342 */ 343 for (i = 0, iter = dev->mc_list; i < dev->mc_count; 344 i++, iter = iter->next) { 345 bit = 0; 346 tmp = iter->dmi_addr[0]; 347 bit ^= (tmp >> 2) ^ (tmp << 4); 348 tmp = iter->dmi_addr[1]; 349 bit ^= (tmp >> 4) ^ (tmp << 2); 350 tmp = iter->dmi_addr[2]; 351 bit ^= (tmp >> 6) ^ tmp; 352 tmp = iter->dmi_addr[3]; 353 bit ^= (tmp >> 2) ^ (tmp << 4); 354 tmp = iter->dmi_addr[4]; 355 bit ^= (tmp >> 4) ^ (tmp << 2); 356 tmp = iter->dmi_addr[5]; 357 bit ^= (tmp >> 6) ^ tmp; 358 bit &= 0x3f; 359 hash[bit / 32] |= 1 << (bit % 32); 360 } 361 362 cpmac_write(priv->regs, CPMAC_MAC_HASH_LO, hash[0]); 363 cpmac_write(priv->regs, CPMAC_MAC_HASH_HI, hash[1]); 364 } 365 } 366} 367 368static struct sk_buff *cpmac_rx_one(struct cpmac_priv *priv, 369 struct cpmac_desc *desc) 370{ 371 struct sk_buff *skb, *result = NULL; 372 373 if (unlikely(netif_msg_hw(priv))) 374 cpmac_dump_desc(priv->dev, desc); 375 cpmac_write(priv->regs, CPMAC_RX_ACK(0), (u32)desc->mapping); 376 if (unlikely(!desc->datalen)) { 377 if (netif_msg_rx_err(priv) && net_ratelimit()) 378 printk(KERN_WARNING "%s: rx: spurious interrupt\n", 379 priv->dev->name); 380 return NULL; 381 } 382 383 skb = netdev_alloc_skb_ip_align(priv->dev, CPMAC_SKB_SIZE); 384 if (likely(skb)) { 385 skb_put(desc->skb, desc->datalen); 386 desc->skb->protocol = eth_type_trans(desc->skb, priv->dev); 387 desc->skb->ip_summed = CHECKSUM_NONE; 388 priv->dev->stats.rx_packets++; 389 priv->dev->stats.rx_bytes += desc->datalen; 390 result = desc->skb; 391 dma_unmap_single(&priv->dev->dev, desc->data_mapping, 392 CPMAC_SKB_SIZE, DMA_FROM_DEVICE); 393 desc->skb = skb; 394 desc->data_mapping = dma_map_single(&priv->dev->dev, skb->data, 395 CPMAC_SKB_SIZE, 396 DMA_FROM_DEVICE); 397 desc->hw_data = (u32)desc->data_mapping; 398 if (unlikely(netif_msg_pktdata(priv))) { 399 printk(KERN_DEBUG "%s: received packet:\n", 400 priv->dev->name); 401 cpmac_dump_skb(priv->dev, result); 402 } 403 } else { 404 if (netif_msg_rx_err(priv) && net_ratelimit()) 405 printk(KERN_WARNING 406 "%s: low on skbs, dropping packet\n", 407 priv->dev->name); 408 priv->dev->stats.rx_dropped++; 409 } 410 411 desc->buflen = CPMAC_SKB_SIZE; 412 desc->dataflags = CPMAC_OWN; 413 414 return result; 415} 416 417static int cpmac_poll(struct napi_struct *napi, int budget) 418{ 419 struct sk_buff *skb; 420 struct cpmac_desc *desc, *restart; 421 struct cpmac_priv *priv = container_of(napi, struct cpmac_priv, napi); 422 int received = 0, processed = 0; 423 424 spin_lock(&priv->rx_lock); 425 if (unlikely(!priv->rx_head)) { 426 if (netif_msg_rx_err(priv) && net_ratelimit()) 427 printk(KERN_WARNING "%s: rx: polling, but no queue\n", 428 priv->dev->name); 429 spin_unlock(&priv->rx_lock); 430 napi_complete(napi); 431 return 0; 432 } 433 434 desc = priv->rx_head; 435 restart = NULL; 436 while (((desc->dataflags & CPMAC_OWN) == 0) && (received < budget)) { 437 processed++; 438 439 if ((desc->dataflags & CPMAC_EOQ) != 0) { 440 /* The last update to eoq->hw_next didn't happen 441 * soon enough, and the receiver stopped here. 442 *Remember this descriptor so we can restart 443 * the receiver after freeing some space. 444 */ 445 if (unlikely(restart)) { 446 if (netif_msg_rx_err(priv)) 447 printk(KERN_ERR "%s: poll found a" 448 " duplicate EOQ: %p and %p\n", 449 priv->dev->name, restart, desc); 450 goto fatal_error; 451 } 452 453 restart = desc->next; 454 } 455 456 skb = cpmac_rx_one(priv, desc); 457 if (likely(skb)) { 458 netif_receive_skb(skb); 459 received++; 460 } 461 desc = desc->next; 462 } 463 464 if (desc != priv->rx_head) { 465 /* We freed some buffers, but not the whole ring, 466 * add what we did free to the rx list */ 467 desc->prev->hw_next = (u32)0; 468 priv->rx_head->prev->hw_next = priv->rx_head->mapping; 469 } 470 471 /* Optimization: If we did not actually process an EOQ (perhaps because 472 * of quota limits), check to see if the tail of the queue has EOQ set. 473 * We should immediately restart in that case so that the receiver can 474 * restart and run in parallel with more packet processing. 475 * This lets us handle slightly larger bursts before running 476 * out of ring space (assuming dev->weight < ring_size) */ 477 478 if (!restart && 479 (priv->rx_head->prev->dataflags & (CPMAC_OWN|CPMAC_EOQ)) 480 == CPMAC_EOQ && 481 (priv->rx_head->dataflags & CPMAC_OWN) != 0) { 482 /* reset EOQ so the poll loop (above) doesn't try to 483 * restart this when it eventually gets to this descriptor. 484 */ 485 priv->rx_head->prev->dataflags &= ~CPMAC_EOQ; 486 restart = priv->rx_head; 487 } 488 489 if (restart) { 490 priv->dev->stats.rx_errors++; 491 priv->dev->stats.rx_fifo_errors++; 492 if (netif_msg_rx_err(priv) && net_ratelimit()) 493 printk(KERN_WARNING "%s: rx dma ring overrun\n", 494 priv->dev->name); 495 496 if (unlikely((restart->dataflags & CPMAC_OWN) == 0)) { 497 if (netif_msg_drv(priv)) 498 printk(KERN_ERR "%s: cpmac_poll is trying to " 499 "restart rx from a descriptor that's " 500 "not free: %p\n", 501 priv->dev->name, restart); 502 goto fatal_error; 503 } 504 505 cpmac_write(priv->regs, CPMAC_RX_PTR(0), restart->mapping); 506 } 507 508 priv->rx_head = desc; 509 spin_unlock(&priv->rx_lock); 510 if (unlikely(netif_msg_rx_status(priv))) 511 printk(KERN_DEBUG "%s: poll processed %d packets\n", 512 priv->dev->name, received); 513 if (processed == 0) { 514 /* we ran out of packets to read, 515 * revert to interrupt-driven mode */ 516 napi_complete(napi); 517 cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1); 518 return 0; 519 } 520 521 return 1; 522 523fatal_error: 524 /* Something went horribly wrong. 525 * Reset hardware to try to recover rather than wedging. */ 526 527 if (netif_msg_drv(priv)) { 528 printk(KERN_ERR "%s: cpmac_poll is confused. " 529 "Resetting hardware\n", priv->dev->name); 530 cpmac_dump_all_desc(priv->dev); 531 printk(KERN_DEBUG "%s: RX_PTR(0)=0x%08x RX_ACK(0)=0x%08x\n", 532 priv->dev->name, 533 cpmac_read(priv->regs, CPMAC_RX_PTR(0)), 534 cpmac_read(priv->regs, CPMAC_RX_ACK(0))); 535 } 536 537 spin_unlock(&priv->rx_lock); 538 napi_complete(napi); 539 netif_tx_stop_all_queues(priv->dev); 540 napi_disable(&priv->napi); 541 542 atomic_inc(&priv->reset_pending); 543 cpmac_hw_stop(priv->dev); 544 if (!schedule_work(&priv->reset_work)) 545 atomic_dec(&priv->reset_pending); 546 return 0; 547 548} 549 550static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev) 551{ 552 int queue, len; 553 struct cpmac_desc *desc; 554 struct cpmac_priv *priv = netdev_priv(dev); 555 556 if (unlikely(atomic_read(&priv->reset_pending))) 557 return NETDEV_TX_BUSY; 558 559 if (unlikely(skb_padto(skb, ETH_ZLEN))) 560 return NETDEV_TX_OK; 561 562 len = max(skb->len, ETH_ZLEN); 563 queue = skb_get_queue_mapping(skb); 564 netif_stop_subqueue(dev, queue); 565 566 desc = &priv->desc_ring[queue]; 567 if (unlikely(desc->dataflags & CPMAC_OWN)) { 568 if (netif_msg_tx_err(priv) && net_ratelimit()) 569 printk(KERN_WARNING "%s: tx dma ring full\n", 570 dev->name); 571 return NETDEV_TX_BUSY; 572 } 573 574 spin_lock(&priv->lock); 575 dev->trans_start = jiffies; 576 spin_unlock(&priv->lock); 577 desc->dataflags = CPMAC_SOP | CPMAC_EOP | CPMAC_OWN; 578 desc->skb = skb; 579 desc->data_mapping = dma_map_single(&dev->dev, skb->data, len, 580 DMA_TO_DEVICE); 581 desc->hw_data = (u32)desc->data_mapping; 582 desc->datalen = len; 583 desc->buflen = len; 584 if (unlikely(netif_msg_tx_queued(priv))) 585 printk(KERN_DEBUG "%s: sending 0x%p, len=%d\n", dev->name, skb, 586 skb->len); 587 if (unlikely(netif_msg_hw(priv))) 588 cpmac_dump_desc(dev, desc); 589 if (unlikely(netif_msg_pktdata(priv))) 590 cpmac_dump_skb(dev, skb); 591 cpmac_write(priv->regs, CPMAC_TX_PTR(queue), (u32)desc->mapping); 592 593 return NETDEV_TX_OK; 594} 595 596static void cpmac_end_xmit(struct net_device *dev, int queue) 597{ 598 struct cpmac_desc *desc; 599 struct cpmac_priv *priv = netdev_priv(dev); 600 601 desc = &priv->desc_ring[queue]; 602 cpmac_write(priv->regs, CPMAC_TX_ACK(queue), (u32)desc->mapping); 603 if (likely(desc->skb)) { 604 spin_lock(&priv->lock); 605 dev->stats.tx_packets++; 606 dev->stats.tx_bytes += desc->skb->len; 607 spin_unlock(&priv->lock); 608 dma_unmap_single(&dev->dev, desc->data_mapping, desc->skb->len, 609 DMA_TO_DEVICE); 610 611 if (unlikely(netif_msg_tx_done(priv))) 612 printk(KERN_DEBUG "%s: sent 0x%p, len=%d\n", dev->name, 613 desc->skb, desc->skb->len); 614 615 dev_kfree_skb_irq(desc->skb); 616 desc->skb = NULL; 617 if (__netif_subqueue_stopped(dev, queue)) 618 netif_wake_subqueue(dev, queue); 619 } else { 620 if (netif_msg_tx_err(priv) && net_ratelimit()) 621 printk(KERN_WARNING 622 "%s: end_xmit: spurious interrupt\n", dev->name); 623 if (__netif_subqueue_stopped(dev, queue)) 624 netif_wake_subqueue(dev, queue); 625 } 626} 627 628static void cpmac_hw_stop(struct net_device *dev) 629{ 630 int i; 631 struct cpmac_priv *priv = netdev_priv(dev); 632 struct plat_cpmac_data *pdata = priv->pdev->dev.platform_data; 633 634 ar7_device_reset(pdata->reset_bit); 635 cpmac_write(priv->regs, CPMAC_RX_CONTROL, 636 cpmac_read(priv->regs, CPMAC_RX_CONTROL) & ~1); 637 cpmac_write(priv->regs, CPMAC_TX_CONTROL, 638 cpmac_read(priv->regs, CPMAC_TX_CONTROL) & ~1); 639 for (i = 0; i < 8; i++) { 640 cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0); 641 cpmac_write(priv->regs, CPMAC_RX_PTR(i), 0); 642 } 643 cpmac_write(priv->regs, CPMAC_UNICAST_CLEAR, 0xff); 644 cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 0xff); 645 cpmac_write(priv->regs, CPMAC_TX_INT_CLEAR, 0xff); 646 cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff); 647 cpmac_write(priv->regs, CPMAC_MAC_CONTROL, 648 cpmac_read(priv->regs, CPMAC_MAC_CONTROL) & ~MAC_MII); 649} 650 651static void cpmac_hw_start(struct net_device *dev) 652{ 653 int i; 654 struct cpmac_priv *priv = netdev_priv(dev); 655 struct plat_cpmac_data *pdata = priv->pdev->dev.platform_data; 656 657 ar7_device_reset(pdata->reset_bit); 658 for (i = 0; i < 8; i++) { 659 cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0); 660 cpmac_write(priv->regs, CPMAC_RX_PTR(i), 0); 661 } 662 cpmac_write(priv->regs, CPMAC_RX_PTR(0), priv->rx_head->mapping); 663 664 cpmac_write(priv->regs, CPMAC_MBP, MBP_RXSHORT | MBP_RXBCAST | 665 MBP_RXMCAST); 666 cpmac_write(priv->regs, CPMAC_BUFFER_OFFSET, 0); 667 for (i = 0; i < 8; i++) 668 cpmac_write(priv->regs, CPMAC_MAC_ADDR_LO(i), dev->dev_addr[5]); 669 cpmac_write(priv->regs, CPMAC_MAC_ADDR_MID, dev->dev_addr[4]); 670 cpmac_write(priv->regs, CPMAC_MAC_ADDR_HI, dev->dev_addr[0] | 671 (dev->dev_addr[1] << 8) | (dev->dev_addr[2] << 16) | 672 (dev->dev_addr[3] << 24)); 673 cpmac_write(priv->regs, CPMAC_MAX_LENGTH, CPMAC_SKB_SIZE); 674 cpmac_write(priv->regs, CPMAC_UNICAST_CLEAR, 0xff); 675 cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 0xff); 676 cpmac_write(priv->regs, CPMAC_TX_INT_CLEAR, 0xff); 677 cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff); 678 cpmac_write(priv->regs, CPMAC_UNICAST_ENABLE, 1); 679 cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1); 680 cpmac_write(priv->regs, CPMAC_TX_INT_ENABLE, 0xff); 681 cpmac_write(priv->regs, CPMAC_MAC_INT_ENABLE, 3); 682 683 cpmac_write(priv->regs, CPMAC_RX_CONTROL, 684 cpmac_read(priv->regs, CPMAC_RX_CONTROL) | 1); 685 cpmac_write(priv->regs, CPMAC_TX_CONTROL, 686 cpmac_read(priv->regs, CPMAC_TX_CONTROL) | 1); 687 cpmac_write(priv->regs, CPMAC_MAC_CONTROL, 688 cpmac_read(priv->regs, CPMAC_MAC_CONTROL) | MAC_MII | 689 MAC_FDX); 690} 691 692static void cpmac_clear_rx(struct net_device *dev) 693{ 694 struct cpmac_priv *priv = netdev_priv(dev); 695 struct cpmac_desc *desc; 696 int i; 697 if (unlikely(!priv->rx_head)) 698 return; 699 desc = priv->rx_head; 700 for (i = 0; i < priv->ring_size; i++) { 701 if ((desc->dataflags & CPMAC_OWN) == 0) { 702 if (netif_msg_rx_err(priv) && net_ratelimit()) 703 printk(KERN_WARNING "%s: packet dropped\n", 704 dev->name); 705 if (unlikely(netif_msg_hw(priv))) 706 cpmac_dump_desc(dev, desc); 707 desc->dataflags = CPMAC_OWN; 708 dev->stats.rx_dropped++; 709 } 710 desc->hw_next = desc->next->mapping; 711 desc = desc->next; 712 } 713 priv->rx_head->prev->hw_next = 0; 714} 715 716static void cpmac_clear_tx(struct net_device *dev) 717{ 718 struct cpmac_priv *priv = netdev_priv(dev); 719 int i; 720 if (unlikely(!priv->desc_ring)) 721 return; 722 for (i = 0; i < CPMAC_QUEUES; i++) { 723 priv->desc_ring[i].dataflags = 0; 724 if (priv->desc_ring[i].skb) { 725 dev_kfree_skb_any(priv->desc_ring[i].skb); 726 priv->desc_ring[i].skb = NULL; 727 } 728 } 729} 730 731static void cpmac_hw_error(struct work_struct *work) 732{ 733 struct cpmac_priv *priv = 734 container_of(work, struct cpmac_priv, reset_work); 735 736 spin_lock(&priv->rx_lock); 737 cpmac_clear_rx(priv->dev); 738 spin_unlock(&priv->rx_lock); 739 cpmac_clear_tx(priv->dev); 740 cpmac_hw_start(priv->dev); 741 barrier(); 742 atomic_dec(&priv->reset_pending); 743 744 netif_tx_wake_all_queues(priv->dev); 745 cpmac_write(priv->regs, CPMAC_MAC_INT_ENABLE, 3); 746} 747 748static void cpmac_check_status(struct net_device *dev) 749{ 750 struct cpmac_priv *priv = netdev_priv(dev); 751 752 u32 macstatus = cpmac_read(priv->regs, CPMAC_MAC_STATUS); 753 int rx_channel = (macstatus >> 8) & 7; 754 int rx_code = (macstatus >> 12) & 15; 755 int tx_channel = (macstatus >> 16) & 7; 756 int tx_code = (macstatus >> 20) & 15; 757 758 if (rx_code || tx_code) { 759 if (netif_msg_drv(priv) && net_ratelimit()) { 760 /* Can't find any documentation on what these 761 *error codes actually are. So just log them and hope.. 762 */ 763 if (rx_code) 764 printk(KERN_WARNING "%s: host error %d on rx " 765 "channel %d (macstatus %08x), resetting\n", 766 dev->name, rx_code, rx_channel, macstatus); 767 if (tx_code) 768 printk(KERN_WARNING "%s: host error %d on tx " 769 "channel %d (macstatus %08x), resetting\n", 770 dev->name, tx_code, tx_channel, macstatus); 771 } 772 773 netif_tx_stop_all_queues(dev); 774 cpmac_hw_stop(dev); 775 if (schedule_work(&priv->reset_work)) 776 atomic_inc(&priv->reset_pending); 777 if (unlikely(netif_msg_hw(priv))) 778 cpmac_dump_regs(dev); 779 } 780 cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff); 781} 782 783static irqreturn_t cpmac_irq(int irq, void *dev_id) 784{ 785 struct net_device *dev = dev_id; 786 struct cpmac_priv *priv; 787 int queue; 788 u32 status; 789 790 priv = netdev_priv(dev); 791 792 status = cpmac_read(priv->regs, CPMAC_MAC_INT_VECTOR); 793 794 if (unlikely(netif_msg_intr(priv))) 795 printk(KERN_DEBUG "%s: interrupt status: 0x%08x\n", dev->name, 796 status); 797 798 if (status & MAC_INT_TX) 799 cpmac_end_xmit(dev, (status & 7)); 800 801 if (status & MAC_INT_RX) { 802 queue = (status >> 8) & 7; 803 if (napi_schedule_prep(&priv->napi)) { 804 cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 1 << queue); 805 __napi_schedule(&priv->napi); 806 } 807 } 808 809 cpmac_write(priv->regs, CPMAC_MAC_EOI_VECTOR, 0); 810 811 if (unlikely(status & (MAC_INT_HOST | MAC_INT_STATUS))) 812 cpmac_check_status(dev); 813 814 return IRQ_HANDLED; 815} 816 817static void cpmac_tx_timeout(struct net_device *dev) 818{ 819 struct cpmac_priv *priv = netdev_priv(dev); 820 821 spin_lock(&priv->lock); 822 dev->stats.tx_errors++; 823 spin_unlock(&priv->lock); 824 if (netif_msg_tx_err(priv) && net_ratelimit()) 825 printk(KERN_WARNING "%s: transmit timeout\n", dev->name); 826 827 atomic_inc(&priv->reset_pending); 828 barrier(); 829 cpmac_clear_tx(dev); 830 barrier(); 831 atomic_dec(&priv->reset_pending); 832 833 netif_tx_wake_all_queues(priv->dev); 834} 835 836static int cpmac_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 837{ 838 struct cpmac_priv *priv = netdev_priv(dev); 839 if (!(netif_running(dev))) 840 return -EINVAL; 841 if (!priv->phy) 842 return -EINVAL; 843 if ((cmd == SIOCGMIIPHY) || (cmd == SIOCGMIIREG) || 844 (cmd == SIOCSMIIREG)) 845 return phy_mii_ioctl(priv->phy, if_mii(ifr), cmd); 846 847 return -EOPNOTSUPP; 848} 849 850static int cpmac_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 851{ 852 struct cpmac_priv *priv = netdev_priv(dev); 853 854 if (priv->phy) 855 return phy_ethtool_gset(priv->phy, cmd); 856 857 return -EINVAL; 858} 859 860static int cpmac_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) 861{ 862 struct cpmac_priv *priv = netdev_priv(dev); 863 864 if (!capable(CAP_NET_ADMIN)) 865 return -EPERM; 866 867 if (priv->phy) 868 return phy_ethtool_sset(priv->phy, cmd); 869 870 return -EINVAL; 871} 872 873static void cpmac_get_ringparam(struct net_device *dev, struct ethtool_ringparam* ring) 874{ 875 struct cpmac_priv *priv = netdev_priv(dev); 876 877 ring->rx_max_pending = 1024; 878 ring->rx_mini_max_pending = 1; 879 ring->rx_jumbo_max_pending = 1; 880 ring->tx_max_pending = 1; 881 882 ring->rx_pending = priv->ring_size; 883 ring->rx_mini_pending = 1; 884 ring->rx_jumbo_pending = 1; 885 ring->tx_pending = 1; 886} 887 888static int cpmac_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ring) 889{ 890 struct cpmac_priv *priv = netdev_priv(dev); 891 892 if (netif_running(dev)) 893 return -EBUSY; 894 priv->ring_size = ring->rx_pending; 895 return 0; 896} 897 898static void cpmac_get_drvinfo(struct net_device *dev, 899 struct ethtool_drvinfo *info) 900{ 901 strcpy(info->driver, "cpmac"); 902 strcpy(info->version, CPMAC_VERSION); 903 info->fw_version[0] = '\0'; 904 sprintf(info->bus_info, "%s", "cpmac"); 905 info->regdump_len = 0; 906} 907 908static const struct ethtool_ops cpmac_ethtool_ops = { 909 .get_settings = cpmac_get_settings, 910 .set_settings = cpmac_set_settings, 911 .get_drvinfo = cpmac_get_drvinfo, 912 .get_link = ethtool_op_get_link, 913 .get_ringparam = cpmac_get_ringparam, 914 .set_ringparam = cpmac_set_ringparam, 915}; 916 917static void cpmac_adjust_link(struct net_device *dev) 918{ 919 struct cpmac_priv *priv = netdev_priv(dev); 920 int new_state = 0; 921 922 spin_lock(&priv->lock); 923 if (priv->phy->link) { 924 netif_tx_start_all_queues(dev); 925 if (priv->phy->duplex != priv->oldduplex) { 926 new_state = 1; 927 priv->oldduplex = priv->phy->duplex; 928 } 929 930 if (priv->phy->speed != priv->oldspeed) { 931 new_state = 1; 932 priv->oldspeed = priv->phy->speed; 933 } 934 935 if (!priv->oldlink) { 936 new_state = 1; 937 priv->oldlink = 1; 938 } 939 } else if (priv->oldlink) { 940 new_state = 1; 941 priv->oldlink = 0; 942 priv->oldspeed = 0; 943 priv->oldduplex = -1; 944 } 945 946 if (new_state && netif_msg_link(priv) && net_ratelimit()) 947 phy_print_status(priv->phy); 948 949 spin_unlock(&priv->lock); 950} 951 952static int cpmac_open(struct net_device *dev) 953{ 954 int i, size, res; 955 struct cpmac_priv *priv = netdev_priv(dev); 956 struct resource *mem; 957 struct cpmac_desc *desc; 958 struct sk_buff *skb; 959 960 mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs"); 961 if (!request_mem_region(mem->start, mem->end - mem->start, dev->name)) { 962 if (netif_msg_drv(priv)) 963 printk(KERN_ERR "%s: failed to request registers\n", 964 dev->name); 965 res = -ENXIO; 966 goto fail_reserve; 967 } 968 969 priv->regs = ioremap(mem->start, mem->end - mem->start); 970 if (!priv->regs) { 971 if (netif_msg_drv(priv)) 972 printk(KERN_ERR "%s: failed to remap registers\n", 973 dev->name); 974 res = -ENXIO; 975 goto fail_remap; 976 } 977 978 size = priv->ring_size + CPMAC_QUEUES; 979 priv->desc_ring = dma_alloc_coherent(&dev->dev, 980 sizeof(struct cpmac_desc) * size, 981 &priv->dma_ring, 982 GFP_KERNEL); 983 if (!priv->desc_ring) { 984 res = -ENOMEM; 985 goto fail_alloc; 986 } 987 988 for (i = 0; i < size; i++) 989 priv->desc_ring[i].mapping = priv->dma_ring + sizeof(*desc) * i; 990 991 priv->rx_head = &priv->desc_ring[CPMAC_QUEUES]; 992 for (i = 0, desc = priv->rx_head; i < priv->ring_size; i++, desc++) { 993 skb = netdev_alloc_skb_ip_align(dev, CPMAC_SKB_SIZE); 994 if (unlikely(!skb)) { 995 res = -ENOMEM; 996 goto fail_desc; 997 } 998 desc->skb = skb; 999 desc->data_mapping = dma_map_single(&dev->dev, skb->data, 1000 CPMAC_SKB_SIZE, 1001 DMA_FROM_DEVICE); 1002 desc->hw_data = (u32)desc->data_mapping; 1003 desc->buflen = CPMAC_SKB_SIZE; 1004 desc->dataflags = CPMAC_OWN; 1005 desc->next = &priv->rx_head[(i + 1) % priv->ring_size]; 1006 desc->next->prev = desc; 1007 desc->hw_next = (u32)desc->next->mapping; 1008 } 1009 1010 priv->rx_head->prev->hw_next = (u32)0; 1011 1012 if ((res = request_irq(dev->irq, cpmac_irq, IRQF_SHARED, 1013 dev->name, dev))) { 1014 if (netif_msg_drv(priv)) 1015 printk(KERN_ERR "%s: failed to obtain irq\n", 1016 dev->name); 1017 goto fail_irq; 1018 } 1019 1020 atomic_set(&priv->reset_pending, 0); 1021 INIT_WORK(&priv->reset_work, cpmac_hw_error); 1022 cpmac_hw_start(dev); 1023 1024 napi_enable(&priv->napi); 1025 priv->phy->state = PHY_CHANGELINK; 1026 phy_start(priv->phy); 1027 1028 return 0; 1029 1030fail_irq: 1031fail_desc: 1032 for (i = 0; i < priv->ring_size; i++) { 1033 if (priv->rx_head[i].skb) { 1034 dma_unmap_single(&dev->dev, 1035 priv->rx_head[i].data_mapping, 1036 CPMAC_SKB_SIZE, 1037 DMA_FROM_DEVICE); 1038 kfree_skb(priv->rx_head[i].skb); 1039 } 1040 } 1041fail_alloc: 1042 kfree(priv->desc_ring); 1043 iounmap(priv->regs); 1044 1045fail_remap: 1046 release_mem_region(mem->start, mem->end - mem->start); 1047 1048fail_reserve: 1049 return res; 1050} 1051 1052static int cpmac_stop(struct net_device *dev) 1053{ 1054 int i; 1055 struct cpmac_priv *priv = netdev_priv(dev); 1056 struct resource *mem; 1057 1058 netif_tx_stop_all_queues(dev); 1059 1060 cancel_work_sync(&priv->reset_work); 1061 napi_disable(&priv->napi); 1062 phy_stop(priv->phy); 1063 1064 cpmac_hw_stop(dev); 1065 1066 for (i = 0; i < 8; i++) 1067 cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0); 1068 cpmac_write(priv->regs, CPMAC_RX_PTR(0), 0); 1069 cpmac_write(priv->regs, CPMAC_MBP, 0); 1070 1071 free_irq(dev->irq, dev); 1072 iounmap(priv->regs); 1073 mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs"); 1074 release_mem_region(mem->start, mem->end - mem->start); 1075 priv->rx_head = &priv->desc_ring[CPMAC_QUEUES]; 1076 for (i = 0; i < priv->ring_size; i++) { 1077 if (priv->rx_head[i].skb) { 1078 dma_unmap_single(&dev->dev, 1079 priv->rx_head[i].data_mapping, 1080 CPMAC_SKB_SIZE, 1081 DMA_FROM_DEVICE); 1082 kfree_skb(priv->rx_head[i].skb); 1083 } 1084 } 1085 1086 dma_free_coherent(&dev->dev, sizeof(struct cpmac_desc) * 1087 (CPMAC_QUEUES + priv->ring_size), 1088 priv->desc_ring, priv->dma_ring); 1089 return 0; 1090} 1091 1092static const struct net_device_ops cpmac_netdev_ops = { 1093 .ndo_open = cpmac_open, 1094 .ndo_stop = cpmac_stop, 1095 .ndo_start_xmit = cpmac_start_xmit, 1096 .ndo_tx_timeout = cpmac_tx_timeout, 1097 .ndo_set_multicast_list = cpmac_set_multicast_list, 1098 .ndo_do_ioctl = cpmac_ioctl, 1099 .ndo_set_config = cpmac_config, 1100 .ndo_change_mtu = eth_change_mtu, 1101 .ndo_validate_addr = eth_validate_addr, 1102 .ndo_set_mac_address = eth_mac_addr, 1103}; 1104 1105static int external_switch; 1106 1107static int __devinit cpmac_probe(struct platform_device *pdev) 1108{ 1109 int rc, phy_id; 1110 char mdio_bus_id[MII_BUS_ID_SIZE]; 1111 struct resource *mem; 1112 struct cpmac_priv *priv; 1113 struct net_device *dev; 1114 struct plat_cpmac_data *pdata; 1115 1116 pdata = pdev->dev.platform_data; 1117 1118 if (external_switch || dumb_switch) { 1119 strncpy(mdio_bus_id, "0", MII_BUS_ID_SIZE); /* fixed phys bus */ 1120 phy_id = pdev->id; 1121 } else { 1122 for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) { 1123 if (!(pdata->phy_mask & (1 << phy_id))) 1124 continue; 1125 if (!cpmac_mii->phy_map[phy_id]) 1126 continue; 1127 strncpy(mdio_bus_id, cpmac_mii->id, MII_BUS_ID_SIZE); 1128 break; 1129 } 1130 } 1131 1132 if (phy_id == PHY_MAX_ADDR) { 1133 dev_err(&pdev->dev, "no PHY present\n"); 1134 return -ENODEV; 1135 } 1136 1137 dev = alloc_etherdev_mq(sizeof(*priv), CPMAC_QUEUES); 1138 1139 if (!dev) { 1140 printk(KERN_ERR "cpmac: Unable to allocate net_device\n"); 1141 return -ENOMEM; 1142 } 1143 1144 platform_set_drvdata(pdev, dev); 1145 priv = netdev_priv(dev); 1146 1147 priv->pdev = pdev; 1148 mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); 1149 if (!mem) { 1150 rc = -ENODEV; 1151 goto fail; 1152 } 1153 1154 dev->irq = platform_get_irq_byname(pdev, "irq"); 1155 1156 dev->netdev_ops = &cpmac_netdev_ops; 1157 dev->ethtool_ops = &cpmac_ethtool_ops; 1158 1159 netif_napi_add(dev, &priv->napi, cpmac_poll, 64); 1160 1161 spin_lock_init(&priv->lock); 1162 spin_lock_init(&priv->rx_lock); 1163 priv->dev = dev; 1164 priv->ring_size = 64; 1165 priv->msg_enable = netif_msg_init(debug_level, 0xff); 1166 memcpy(dev->dev_addr, pdata->dev_addr, sizeof(pdata->dev_addr)); 1167 1168 snprintf(priv->phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, mdio_bus_id, phy_id); 1169 1170 priv->phy = phy_connect(dev, priv->phy_name, &cpmac_adjust_link, 0, 1171 PHY_INTERFACE_MODE_MII); 1172 1173 if (IS_ERR(priv->phy)) { 1174 if (netif_msg_drv(priv)) 1175 printk(KERN_ERR "%s: Could not attach to PHY\n", 1176 dev->name); 1177 return PTR_ERR(priv->phy); 1178 } 1179 1180 if ((rc = register_netdev(dev))) { 1181 printk(KERN_ERR "cpmac: error %i registering device %s\n", rc, 1182 dev->name); 1183 goto fail; 1184 } 1185 1186 if (netif_msg_probe(priv)) { 1187 printk(KERN_INFO 1188 "cpmac: device %s (regs: %p, irq: %d, phy: %s, " 1189 "mac: %pM)\n", dev->name, (void *)mem->start, dev->irq, 1190 priv->phy_name, dev->dev_addr); 1191 } 1192 return 0; 1193 1194fail: 1195 free_netdev(dev); 1196 return rc; 1197} 1198 1199static int __devexit cpmac_remove(struct platform_device *pdev) 1200{ 1201 struct net_device *dev = platform_get_drvdata(pdev); 1202 unregister_netdev(dev); 1203 free_netdev(dev); 1204 return 0; 1205} 1206 1207static struct platform_driver cpmac_driver = { 1208 .driver.name = "cpmac", 1209 .driver.owner = THIS_MODULE, 1210 .probe = cpmac_probe, 1211 .remove = __devexit_p(cpmac_remove), 1212}; 1213 1214int __devinit cpmac_init(void) 1215{ 1216 u32 mask; 1217 int i, res; 1218 1219 cpmac_mii = mdiobus_alloc(); 1220 if (cpmac_mii == NULL) 1221 return -ENOMEM; 1222 1223 cpmac_mii->name = "cpmac-mii"; 1224 cpmac_mii->read = cpmac_mdio_read; 1225 cpmac_mii->write = cpmac_mdio_write; 1226 cpmac_mii->reset = cpmac_mdio_reset; 1227 cpmac_mii->irq = mii_irqs; 1228 1229 cpmac_mii->priv = ioremap(AR7_REGS_MDIO, 256); 1230 1231 if (!cpmac_mii->priv) { 1232 printk(KERN_ERR "Can't ioremap mdio registers\n"); 1233 res = -ENXIO; 1234 goto fail_alloc; 1235 } 1236 1237#warning FIXME: unhardcode gpio&reset bits 1238 ar7_gpio_disable(26); 1239 ar7_gpio_disable(27); 1240 ar7_device_reset(AR7_RESET_BIT_CPMAC_LO); 1241 ar7_device_reset(AR7_RESET_BIT_CPMAC_HI); 1242 ar7_device_reset(AR7_RESET_BIT_EPHY); 1243 1244 cpmac_mii->reset(cpmac_mii); 1245 1246 for (i = 0; i < 300; i++) 1247 if ((mask = cpmac_read(cpmac_mii->priv, CPMAC_MDIO_ALIVE))) 1248 break; 1249 else 1250 msleep(10); 1251 1252 mask &= 0x7fffffff; 1253 if (mask & (mask - 1)) { 1254 external_switch = 1; 1255 mask = 0; 1256 } 1257 1258 cpmac_mii->phy_mask = ~(mask | 0x80000000); 1259 snprintf(cpmac_mii->id, MII_BUS_ID_SIZE, "1"); 1260 1261 res = mdiobus_register(cpmac_mii); 1262 if (res) 1263 goto fail_mii; 1264 1265 res = platform_driver_register(&cpmac_driver); 1266 if (res) 1267 goto fail_cpmac; 1268 1269 return 0; 1270 1271fail_cpmac: 1272 mdiobus_unregister(cpmac_mii); 1273 1274fail_mii: 1275 iounmap(cpmac_mii->priv); 1276 1277fail_alloc: 1278 mdiobus_free(cpmac_mii); 1279 1280 return res; 1281} 1282 1283void __devexit cpmac_exit(void) 1284{ 1285 platform_driver_unregister(&cpmac_driver); 1286 mdiobus_unregister(cpmac_mii); 1287 mdiobus_free(cpmac_mii); 1288 iounmap(cpmac_mii->priv); 1289} 1290 1291module_init(cpmac_init); 1292module_exit(cpmac_exit);