Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'cpmac-next'

Varka Bhadram says:

====================
This patch series cleanup for AR7 CPMAC Ethernet controller driver
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+148 -139
+148 -139
drivers/net/ethernet/ti/cpmac.c
··· 67 67 #define CPMAC_RX_CONTROL 0x0014 68 68 #define CPMAC_RX_TEARDOWN 0x0018 69 69 #define CPMAC_MBP 0x0100 70 - # define MBP_RXPASSCRC 0x40000000 71 - # define MBP_RXQOS 0x20000000 72 - # define MBP_RXNOCHAIN 0x10000000 73 - # define MBP_RXCMF 0x01000000 74 - # define MBP_RXSHORT 0x00800000 75 - # define MBP_RXCEF 0x00400000 76 - # define MBP_RXPROMISC 0x00200000 77 - # define MBP_PROMISCCHAN(channel) (((channel) & 0x7) << 16) 78 - # define MBP_RXBCAST 0x00002000 79 - # define MBP_BCASTCHAN(channel) (((channel) & 0x7) << 8) 80 - # define MBP_RXMCAST 0x00000020 81 - # define MBP_MCASTCHAN(channel) ((channel) & 0x7) 70 + #define MBP_RXPASSCRC 0x40000000 71 + #define MBP_RXQOS 0x20000000 72 + #define MBP_RXNOCHAIN 0x10000000 73 + #define MBP_RXCMF 0x01000000 74 + #define MBP_RXSHORT 0x00800000 75 + #define MBP_RXCEF 0x00400000 76 + #define MBP_RXPROMISC 0x00200000 77 + #define MBP_PROMISCCHAN(channel) (((channel) & 0x7) << 16) 78 + #define MBP_RXBCAST 0x00002000 79 + #define MBP_BCASTCHAN(channel) (((channel) & 0x7) << 8) 80 + #define MBP_RXMCAST 0x00000020 81 + #define MBP_MCASTCHAN(channel) ((channel) & 0x7) 82 82 #define CPMAC_UNICAST_ENABLE 0x0104 83 83 #define CPMAC_UNICAST_CLEAR 0x0108 84 84 #define CPMAC_MAX_LENGTH 0x010c 85 85 #define CPMAC_BUFFER_OFFSET 0x0110 86 86 #define CPMAC_MAC_CONTROL 0x0160 87 - # define MAC_TXPTYPE 0x00000200 88 - # define MAC_TXPACE 0x00000040 89 - # define MAC_MII 0x00000020 90 - # define MAC_TXFLOW 0x00000010 91 - # define MAC_RXFLOW 0x00000008 92 - # define MAC_MTEST 0x00000004 93 - # define MAC_LOOPBACK 0x00000002 94 - # define MAC_FDX 0x00000001 87 + #define MAC_TXPTYPE 0x00000200 88 + #define MAC_TXPACE 0x00000040 89 + #define MAC_MII 0x00000020 90 + #define MAC_TXFLOW 0x00000010 91 + #define MAC_RXFLOW 0x00000008 92 + #define MAC_MTEST 0x00000004 93 + #define MAC_LOOPBACK 0x00000002 94 + #define MAC_FDX 0x00000001 95 95 #define CPMAC_MAC_STATUS 0x0164 96 - # define MAC_STATUS_QOS 0x00000004 97 - # define MAC_STATUS_RXFLOW 0x00000002 98 - # define MAC_STATUS_TXFLOW 0x00000001 96 + #define MAC_STATUS_QOS 0x00000004 97 + #define MAC_STATUS_RXFLOW 0x00000002 98 + #define MAC_STATUS_TXFLOW 0x00000001 99 99 #define CPMAC_TX_INT_ENABLE 0x0178 100 100 #define CPMAC_TX_INT_CLEAR 0x017c 101 101 #define CPMAC_MAC_INT_VECTOR 0x0180 102 - # define MAC_INT_STATUS 0x00080000 103 - # define MAC_INT_HOST 0x00040000 104 - # define MAC_INT_RX 0x00020000 105 - # define MAC_INT_TX 0x00010000 102 + #define MAC_INT_STATUS 0x00080000 103 + #define MAC_INT_HOST 0x00040000 104 + #define MAC_INT_RX 0x00020000 105 + #define MAC_INT_TX 0x00010000 106 106 #define CPMAC_MAC_EOI_VECTOR 0x0184 107 107 #define CPMAC_RX_INT_ENABLE 0x0198 108 108 #define CPMAC_RX_INT_CLEAR 0x019c ··· 118 118 #define CPMAC_TX_ACK(channel) (0x0640 + (channel) * 4) 119 119 #define CPMAC_RX_ACK(channel) (0x0660 + (channel) * 4) 120 120 #define CPMAC_REG_END 0x0680 121 - /* 122 - * Rx/Tx statistics 121 + 122 + /* Rx/Tx statistics 123 123 * TODO: use some of them to fill stats in cpmac_stats() 124 124 */ 125 125 #define CPMAC_STATS_RX_GOOD 0x0200 ··· 157 157 /* MDIO bus */ 158 158 #define CPMAC_MDIO_VERSION 0x0000 159 159 #define CPMAC_MDIO_CONTROL 0x0004 160 - # define MDIOC_IDLE 0x80000000 161 - # define MDIOC_ENABLE 0x40000000 162 - # define MDIOC_PREAMBLE 0x00100000 163 - # define MDIOC_FAULT 0x00080000 164 - # define MDIOC_FAULTDETECT 0x00040000 165 - # define MDIOC_INTTEST 0x00020000 166 - # define MDIOC_CLKDIV(div) ((div) & 0xff) 160 + #define MDIOC_IDLE 0x80000000 161 + #define MDIOC_ENABLE 0x40000000 162 + #define MDIOC_PREAMBLE 0x00100000 163 + #define MDIOC_FAULT 0x00080000 164 + #define MDIOC_FAULTDETECT 0x00040000 165 + #define MDIOC_INTTEST 0x00020000 166 + #define MDIOC_CLKDIV(div) ((div) & 0xff) 167 167 #define CPMAC_MDIO_ALIVE 0x0008 168 168 #define CPMAC_MDIO_LINK 0x000c 169 169 #define CPMAC_MDIO_ACCESS(channel) (0x0080 + (channel) * 8) 170 - # define MDIO_BUSY 0x80000000 171 - # define MDIO_WRITE 0x40000000 172 - # define MDIO_REG(reg) (((reg) & 0x1f) << 21) 173 - # define MDIO_PHY(phy) (((phy) & 0x1f) << 16) 174 - # define MDIO_DATA(data) ((data) & 0xffff) 170 + #define MDIO_BUSY 0x80000000 171 + #define MDIO_WRITE 0x40000000 172 + #define MDIO_REG(reg) (((reg) & 0x1f) << 21) 173 + #define MDIO_PHY(phy) (((phy) & 0x1f) << 16) 174 + #define MDIO_DATA(data) ((data) & 0xffff) 175 175 #define CPMAC_MDIO_PHYSEL(channel) (0x0084 + (channel) * 8) 176 - # define PHYSEL_LINKSEL 0x00000040 177 - # define PHYSEL_LINKINT 0x00000020 176 + #define PHYSEL_LINKSEL 0x00000040 177 + #define PHYSEL_LINKINT 0x00000020 178 178 179 179 struct cpmac_desc { 180 180 u32 hw_next; ··· 224 224 { 225 225 int i; 226 226 struct cpmac_priv *priv = netdev_priv(dev); 227 + 227 228 for (i = 0; i < CPMAC_REG_END; i += 4) { 228 229 if (i % 16 == 0) { 229 230 if (i) 230 231 pr_cont("\n"); 231 - printk(KERN_DEBUG "%s: reg[%p]:", dev->name, 232 - priv->regs + i); 232 + netdev_dbg(dev, "reg[%p]:", priv->regs + i); 233 233 } 234 - printk(" %08x", cpmac_read(priv->regs, i)); 234 + pr_debug(" %08x", cpmac_read(priv->regs, i)); 235 235 } 236 - printk("\n"); 236 + pr_debug("\n"); 237 237 } 238 238 239 239 static void cpmac_dump_desc(struct net_device *dev, struct cpmac_desc *desc) 240 240 { 241 241 int i; 242 - printk(KERN_DEBUG "%s: desc[%p]:", dev->name, desc); 242 + 243 + netdev_dbg(dev, "desc[%p]:", desc); 243 244 for (i = 0; i < sizeof(*desc) / 4; i++) 244 - printk(" %08x", ((u32 *)desc)[i]); 245 - printk("\n"); 245 + pr_debug(" %08x", ((u32 *)desc)[i]); 246 + pr_debug("\n"); 246 247 } 247 248 248 249 static void cpmac_dump_all_desc(struct net_device *dev) 249 250 { 250 251 struct cpmac_priv *priv = netdev_priv(dev); 251 252 struct cpmac_desc *dump = priv->rx_head; 253 + 252 254 do { 253 255 cpmac_dump_desc(dev, dump); 254 256 dump = dump->next; ··· 260 258 static void cpmac_dump_skb(struct net_device *dev, struct sk_buff *skb) 261 259 { 262 260 int i; 263 - printk(KERN_DEBUG "%s: skb 0x%p, len=%d\n", dev->name, skb, skb->len); 261 + 262 + netdev_dbg(dev, "skb 0x%p, len=%d\n", skb, skb->len); 264 263 for (i = 0; i < skb->len; i++) { 265 264 if (i % 16 == 0) { 266 265 if (i) 267 266 pr_cont("\n"); 268 - printk(KERN_DEBUG "%s: data[%p]:", dev->name, 269 - skb->data + i); 267 + netdev_dbg(dev, "data[%p]:", skb->data + i); 270 268 } 271 - printk(" %02x", ((u8 *)skb->data)[i]); 269 + pr_debug(" %02x", ((u8 *)skb->data)[i]); 272 270 } 273 - printk("\n"); 271 + pr_debug("\n"); 274 272 } 275 273 276 274 static int cpmac_mdio_read(struct mii_bus *bus, int phy_id, int reg) ··· 283 281 MDIO_PHY(phy_id)); 284 282 while ((val = cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0))) & MDIO_BUSY) 285 283 cpu_relax(); 284 + 286 285 return MDIO_DATA(val); 287 286 } 288 287 ··· 294 291 cpu_relax(); 295 292 cpmac_write(bus->priv, CPMAC_MDIO_ACCESS(0), MDIO_BUSY | MDIO_WRITE | 296 293 MDIO_REG(reg) | MDIO_PHY(phy_id) | MDIO_DATA(val)); 294 + 297 295 return 0; 298 296 } 299 297 ··· 304 300 305 301 cpmac_clk = clk_get(&bus->dev, "cpmac"); 306 302 if (IS_ERR(cpmac_clk)) { 307 - printk(KERN_ERR "unable to get cpmac clock\n"); 303 + pr_err("unable to get cpmac clock\n"); 308 304 return -1; 309 305 } 310 306 ar7_device_reset(AR7_RESET_BIT_MDIO); 311 307 cpmac_write(bus->priv, CPMAC_MDIO_CONTROL, MDIOC_ENABLE | 312 308 MDIOC_CLKDIV(clk_get_rate(cpmac_clk) / 2200000 - 1)); 309 + 313 310 return 0; 314 311 } 315 312 ··· 336 331 cpmac_write(priv->regs, CPMAC_MAC_HASH_LO, 0xffffffff); 337 332 cpmac_write(priv->regs, CPMAC_MAC_HASH_HI, 0xffffffff); 338 333 } else { 339 - /* 340 - * cpmac uses some strange mac address hashing 334 + /* cpmac uses some strange mac address hashing 341 335 * (not crc32) 342 336 */ 343 337 netdev_for_each_mc_addr(ha, dev) { ··· 373 369 cpmac_write(priv->regs, CPMAC_RX_ACK(0), (u32)desc->mapping); 374 370 if (unlikely(!desc->datalen)) { 375 371 if (netif_msg_rx_err(priv) && net_ratelimit()) 376 - printk(KERN_WARNING "%s: rx: spurious interrupt\n", 377 - priv->dev->name); 372 + netdev_warn(priv->dev, "rx: spurious interrupt\n"); 373 + 378 374 return NULL; 379 375 } 380 376 ··· 394 390 DMA_FROM_DEVICE); 395 391 desc->hw_data = (u32)desc->data_mapping; 396 392 if (unlikely(netif_msg_pktdata(priv))) { 397 - printk(KERN_DEBUG "%s: received packet:\n", 398 - priv->dev->name); 393 + netdev_dbg(priv->dev, "received packet:\n"); 399 394 cpmac_dump_skb(priv->dev, result); 400 395 } 401 396 } else { 402 397 if (netif_msg_rx_err(priv) && net_ratelimit()) 403 - printk(KERN_WARNING 404 - "%s: low on skbs, dropping packet\n", 405 - priv->dev->name); 398 + netdev_warn(priv->dev, 399 + "low on skbs, dropping packet\n"); 400 + 406 401 priv->dev->stats.rx_dropped++; 407 402 } 408 403 ··· 421 418 spin_lock(&priv->rx_lock); 422 419 if (unlikely(!priv->rx_head)) { 423 420 if (netif_msg_rx_err(priv) && net_ratelimit()) 424 - printk(KERN_WARNING "%s: rx: polling, but no queue\n", 425 - priv->dev->name); 421 + netdev_warn(priv->dev, "rx: polling, but no queue\n"); 422 + 426 423 spin_unlock(&priv->rx_lock); 427 424 napi_complete(napi); 428 425 return 0; ··· 435 432 436 433 if ((desc->dataflags & CPMAC_EOQ) != 0) { 437 434 /* The last update to eoq->hw_next didn't happen 438 - * soon enough, and the receiver stopped here. 439 - *Remember this descriptor so we can restart 440 - * the receiver after freeing some space. 441 - */ 435 + * soon enough, and the receiver stopped here. 436 + * Remember this descriptor so we can restart 437 + * the receiver after freeing some space. 438 + */ 442 439 if (unlikely(restart)) { 443 440 if (netif_msg_rx_err(priv)) 444 - printk(KERN_ERR "%s: poll found a" 445 - " duplicate EOQ: %p and %p\n", 446 - priv->dev->name, restart, desc); 441 + netdev_err(priv->dev, "poll found a" 442 + " duplicate EOQ: %p and %p\n", 443 + restart, desc); 447 444 goto fatal_error; 448 445 } 449 446 ··· 460 457 461 458 if (desc != priv->rx_head) { 462 459 /* We freed some buffers, but not the whole ring, 463 - * add what we did free to the rx list */ 460 + * add what we did free to the rx list 461 + */ 464 462 desc->prev->hw_next = (u32)0; 465 463 priv->rx_head->prev->hw_next = priv->rx_head->mapping; 466 464 } 467 465 468 466 /* Optimization: If we did not actually process an EOQ (perhaps because 469 467 * of quota limits), check to see if the tail of the queue has EOQ set. 470 - * We should immediately restart in that case so that the receiver can 471 - * restart and run in parallel with more packet processing. 472 - * This lets us handle slightly larger bursts before running 473 - * out of ring space (assuming dev->weight < ring_size) */ 468 + * We should immediately restart in that case so that the receiver can 469 + * restart and run in parallel with more packet processing. 470 + * This lets us handle slightly larger bursts before running 471 + * out of ring space (assuming dev->weight < ring_size) 472 + */ 474 473 475 474 if (!restart && 476 475 (priv->rx_head->prev->dataflags & (CPMAC_OWN|CPMAC_EOQ)) 477 476 == CPMAC_EOQ && 478 477 (priv->rx_head->dataflags & CPMAC_OWN) != 0) { 479 478 /* reset EOQ so the poll loop (above) doesn't try to 480 - * restart this when it eventually gets to this descriptor. 481 - */ 479 + * restart this when it eventually gets to this descriptor. 480 + */ 482 481 priv->rx_head->prev->dataflags &= ~CPMAC_EOQ; 483 482 restart = priv->rx_head; 484 483 } ··· 489 484 priv->dev->stats.rx_errors++; 490 485 priv->dev->stats.rx_fifo_errors++; 491 486 if (netif_msg_rx_err(priv) && net_ratelimit()) 492 - printk(KERN_WARNING "%s: rx dma ring overrun\n", 493 - priv->dev->name); 487 + netdev_warn(priv->dev, "rx dma ring overrun\n"); 494 488 495 489 if (unlikely((restart->dataflags & CPMAC_OWN) == 0)) { 496 490 if (netif_msg_drv(priv)) 497 - printk(KERN_ERR "%s: cpmac_poll is trying to " 498 - "restart rx from a descriptor that's " 499 - "not free: %p\n", 500 - priv->dev->name, restart); 491 + netdev_err(priv->dev, "cpmac_poll is trying " 492 + "to restart rx from a descriptor " 493 + "that's not free: %p\n", restart); 501 494 goto fatal_error; 502 495 } 503 496 ··· 505 502 priv->rx_head = desc; 506 503 spin_unlock(&priv->rx_lock); 507 504 if (unlikely(netif_msg_rx_status(priv))) 508 - printk(KERN_DEBUG "%s: poll processed %d packets\n", 509 - priv->dev->name, received); 505 + netdev_dbg(priv->dev, "poll processed %d packets\n", received); 506 + 510 507 if (processed == 0) { 511 508 /* we ran out of packets to read, 512 - * revert to interrupt-driven mode */ 509 + * revert to interrupt-driven mode 510 + */ 513 511 napi_complete(napi); 514 512 cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1); 515 513 return 0; ··· 520 516 521 517 fatal_error: 522 518 /* Something went horribly wrong. 523 - * Reset hardware to try to recover rather than wedging. */ 524 - 519 + * Reset hardware to try to recover rather than wedging. 520 + */ 525 521 if (netif_msg_drv(priv)) { 526 - printk(KERN_ERR "%s: cpmac_poll is confused. " 527 - "Resetting hardware\n", priv->dev->name); 522 + netdev_err(priv->dev, "cpmac_poll is confused. " 523 + "Resetting hardware\n"); 528 524 cpmac_dump_all_desc(priv->dev); 529 - printk(KERN_DEBUG "%s: RX_PTR(0)=0x%08x RX_ACK(0)=0x%08x\n", 530 - priv->dev->name, 531 - cpmac_read(priv->regs, CPMAC_RX_PTR(0)), 532 - cpmac_read(priv->regs, CPMAC_RX_ACK(0))); 525 + netdev_dbg(priv->dev, "RX_PTR(0)=0x%08x RX_ACK(0)=0x%08x\n", 526 + cpmac_read(priv->regs, CPMAC_RX_PTR(0)), 527 + cpmac_read(priv->regs, CPMAC_RX_ACK(0))); 533 528 } 534 529 535 530 spin_unlock(&priv->rx_lock); ··· 540 537 cpmac_hw_stop(priv->dev); 541 538 if (!schedule_work(&priv->reset_work)) 542 539 atomic_dec(&priv->reset_pending); 540 + 543 541 return 0; 544 542 545 543 } ··· 564 560 desc = &priv->desc_ring[queue]; 565 561 if (unlikely(desc->dataflags & CPMAC_OWN)) { 566 562 if (netif_msg_tx_err(priv) && net_ratelimit()) 567 - printk(KERN_WARNING "%s: tx dma ring full\n", 568 - dev->name); 563 + netdev_warn(dev, "tx dma ring full\n"); 564 + 569 565 return NETDEV_TX_BUSY; 570 566 } 571 567 ··· 579 575 desc->datalen = len; 580 576 desc->buflen = len; 581 577 if (unlikely(netif_msg_tx_queued(priv))) 582 - printk(KERN_DEBUG "%s: sending 0x%p, len=%d\n", dev->name, skb, 583 - skb->len); 578 + netdev_dbg(dev, "sending 0x%p, len=%d\n", skb, skb->len); 584 579 if (unlikely(netif_msg_hw(priv))) 585 580 cpmac_dump_desc(dev, desc); 586 581 if (unlikely(netif_msg_pktdata(priv))) ··· 605 602 DMA_TO_DEVICE); 606 603 607 604 if (unlikely(netif_msg_tx_done(priv))) 608 - printk(KERN_DEBUG "%s: sent 0x%p, len=%d\n", dev->name, 609 - desc->skb, desc->skb->len); 605 + netdev_dbg(dev, "sent 0x%p, len=%d\n", 606 + desc->skb, desc->skb->len); 610 607 611 608 dev_kfree_skb_irq(desc->skb); 612 609 desc->skb = NULL; ··· 614 611 netif_wake_subqueue(dev, queue); 615 612 } else { 616 613 if (netif_msg_tx_err(priv) && net_ratelimit()) 617 - printk(KERN_WARNING 618 - "%s: end_xmit: spurious interrupt\n", dev->name); 614 + netdev_warn(dev, "end_xmit: spurious interrupt\n"); 619 615 if (__netif_subqueue_stopped(dev, queue)) 620 616 netif_wake_subqueue(dev, queue); 621 617 } ··· 689 687 struct cpmac_priv *priv = netdev_priv(dev); 690 688 struct cpmac_desc *desc; 691 689 int i; 690 + 692 691 if (unlikely(!priv->rx_head)) 693 692 return; 694 693 desc = priv->rx_head; 695 694 for (i = 0; i < priv->ring_size; i++) { 696 695 if ((desc->dataflags & CPMAC_OWN) == 0) { 697 696 if (netif_msg_rx_err(priv) && net_ratelimit()) 698 - printk(KERN_WARNING "%s: packet dropped\n", 699 - dev->name); 697 + netdev_warn(dev, "packet dropped\n"); 700 698 if (unlikely(netif_msg_hw(priv))) 701 699 cpmac_dump_desc(dev, desc); 702 700 desc->dataflags = CPMAC_OWN; ··· 712 710 { 713 711 struct cpmac_priv *priv = netdev_priv(dev); 714 712 int i; 713 + 715 714 if (unlikely(!priv->desc_ring)) 716 715 return; 717 716 for (i = 0; i < CPMAC_QUEUES; i++) { ··· 754 751 if (rx_code || tx_code) { 755 752 if (netif_msg_drv(priv) && net_ratelimit()) { 756 753 /* Can't find any documentation on what these 757 - *error codes actually are. So just log them and hope.. 754 + * error codes actually are. So just log them and hope.. 758 755 */ 759 756 if (rx_code) 760 - printk(KERN_WARNING "%s: host error %d on rx " 761 - "channel %d (macstatus %08x), resetting\n", 762 - dev->name, rx_code, rx_channel, macstatus); 757 + netdev_warn(dev, "host error %d on rx " 758 + "channel %d (macstatus %08x), resetting\n", 759 + rx_code, rx_channel, macstatus); 763 760 if (tx_code) 764 - printk(KERN_WARNING "%s: host error %d on tx " 765 - "channel %d (macstatus %08x), resetting\n", 766 - dev->name, tx_code, tx_channel, macstatus); 761 + netdev_warn(dev, "host error %d on tx " 762 + "channel %d (macstatus %08x), resetting\n", 763 + tx_code, tx_channel, macstatus); 767 764 } 768 765 769 766 netif_tx_stop_all_queues(dev); ··· 788 785 status = cpmac_read(priv->regs, CPMAC_MAC_INT_VECTOR); 789 786 790 787 if (unlikely(netif_msg_intr(priv))) 791 - printk(KERN_DEBUG "%s: interrupt status: 0x%08x\n", dev->name, 792 - status); 788 + netdev_dbg(dev, "interrupt status: 0x%08x\n", status); 793 789 794 790 if (status & MAC_INT_TX) 795 791 cpmac_end_xmit(dev, (status & 7)); ··· 817 815 dev->stats.tx_errors++; 818 816 spin_unlock(&priv->lock); 819 817 if (netif_msg_tx_err(priv) && net_ratelimit()) 820 - printk(KERN_WARNING "%s: transmit timeout\n", dev->name); 818 + netdev_warn(dev, "transmit timeout\n"); 821 819 822 820 atomic_inc(&priv->reset_pending); 823 821 barrier(); ··· 831 829 static int cpmac_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 832 830 { 833 831 struct cpmac_priv *priv = netdev_priv(dev); 832 + 834 833 if (!(netif_running(dev))) 835 834 return -EINVAL; 836 835 if (!priv->phy) ··· 887 884 if (netif_running(dev)) 888 885 return -EBUSY; 889 886 priv->ring_size = ring->rx_pending; 887 + 890 888 return 0; 891 889 } 892 890 ··· 955 951 mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs"); 956 952 if (!request_mem_region(mem->start, resource_size(mem), dev->name)) { 957 953 if (netif_msg_drv(priv)) 958 - printk(KERN_ERR "%s: failed to request registers\n", 959 - dev->name); 954 + netdev_err(dev, "failed to request registers\n"); 955 + 960 956 res = -ENXIO; 961 957 goto fail_reserve; 962 958 } ··· 964 960 priv->regs = ioremap(mem->start, resource_size(mem)); 965 961 if (!priv->regs) { 966 962 if (netif_msg_drv(priv)) 967 - printk(KERN_ERR "%s: failed to remap registers\n", 968 - dev->name); 963 + netdev_err(dev, "failed to remap registers\n"); 964 + 969 965 res = -ENXIO; 970 966 goto fail_remap; 971 967 } ··· 1007 1003 res = request_irq(dev->irq, cpmac_irq, IRQF_SHARED, dev->name, dev); 1008 1004 if (res) { 1009 1005 if (netif_msg_drv(priv)) 1010 - printk(KERN_ERR "%s: failed to obtain irq\n", 1011 - dev->name); 1006 + netdev_err(dev, "failed to obtain irq\n"); 1007 + 1012 1008 goto fail_irq; 1013 1009 } 1014 1010 ··· 1081 1077 dma_free_coherent(&dev->dev, sizeof(struct cpmac_desc) * 1082 1078 (CPMAC_QUEUES + priv->ring_size), 1083 1079 priv->desc_ring, priv->dma_ring); 1080 + 1084 1081 return 0; 1085 1082 } 1086 1083 ··· 1126 1121 1127 1122 if (phy_id == PHY_MAX_ADDR) { 1128 1123 dev_err(&pdev->dev, "no PHY present, falling back " 1129 - "to switch on MDIO bus 0\n"); 1124 + "to switch on MDIO bus 0\n"); 1130 1125 strncpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); /* fixed phys bus */ 1131 1126 phy_id = pdev->id; 1132 1127 } ··· 1142 1137 mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); 1143 1138 if (!mem) { 1144 1139 rc = -ENODEV; 1145 - goto fail; 1140 + goto out; 1146 1141 } 1147 1142 1148 1143 dev->irq = platform_get_irq_byname(pdev, "irq"); ··· 1167 1162 1168 1163 if (IS_ERR(priv->phy)) { 1169 1164 if (netif_msg_drv(priv)) 1170 - printk(KERN_ERR "%s: Could not attach to PHY\n", 1171 - dev->name); 1165 + dev_err(&pdev->dev, "Could not attach to PHY\n"); 1166 + 1172 1167 rc = PTR_ERR(priv->phy); 1173 - goto fail; 1168 + goto out; 1174 1169 } 1175 1170 1176 1171 rc = register_netdev(dev); 1177 1172 if (rc) { 1178 - printk(KERN_ERR "cpmac: error %i registering device %s\n", rc, 1179 - dev->name); 1173 + dev_err(&pdev->dev, "Could not register net device\n"); 1180 1174 goto fail; 1181 1175 } 1182 1176 1183 1177 if (netif_msg_probe(priv)) { 1184 - printk(KERN_INFO 1185 - "cpmac: device %s (regs: %p, irq: %d, phy: %s, " 1186 - "mac: %pM)\n", dev->name, (void *)mem->start, dev->irq, 1187 - priv->phy_name, dev->dev_addr); 1178 + dev_info(&pdev->dev, "regs: %p, irq: %d, phy: %s, " 1179 + "mac: %pM\n", (void *)mem->start, dev->irq, 1180 + priv->phy_name, dev->dev_addr); 1188 1181 } 1182 + 1189 1183 return 0; 1190 1184 1191 1185 fail: 1192 1186 free_netdev(dev); 1187 + out: 1193 1188 return rc; 1194 1189 } 1195 1190 1196 1191 static int cpmac_remove(struct platform_device *pdev) 1197 1192 { 1198 1193 struct net_device *dev = platform_get_drvdata(pdev); 1194 + 1199 1195 unregister_netdev(dev); 1200 1196 free_netdev(dev); 1197 + 1201 1198 return 0; 1202 1199 } 1203 1200 1204 1201 static struct platform_driver cpmac_driver = { 1205 - .driver.name = "cpmac", 1206 - .driver.owner = THIS_MODULE, 1207 - .probe = cpmac_probe, 1202 + .driver = { 1203 + .name = "cpmac", 1204 + .owner = THIS_MODULE, 1205 + }, 1206 + .probe = cpmac_probe, 1208 1207 .remove = cpmac_remove, 1209 1208 }; 1210 1209 ··· 1230 1221 cpmac_mii->priv = ioremap(AR7_REGS_MDIO, 256); 1231 1222 1232 1223 if (!cpmac_mii->priv) { 1233 - printk(KERN_ERR "Can't ioremap mdio registers\n"); 1224 + pr_err("Can't ioremap mdio registers\n"); 1234 1225 res = -ENXIO; 1235 1226 goto fail_alloc; 1236 1227 }