AR7 ethernet: small post-merge cleanups and fixes

Signed-off-by: Matteo Croce <technoboy85@gmail.com>
Signed-off-by: Eugene Konev <ejka@imfi.kspu.ru>
Signed-off-by: Felix Fietkau <nbd@openwrt.org>
Signed-off-by: Jeff Garzik <jeff@garzik.org>

authored by Matteo Croce and committed by Jeff Garzik 6cd043d9 02bae212

+12 -19
+12 -19
drivers/net/cpmac.c
··· 460 struct cpmac_desc *desc; 461 struct cpmac_priv *priv = netdev_priv(dev); 462 463 - if (unlikely(skb_padto(skb, ETH_ZLEN))) { 464 - if (netif_msg_tx_err(priv) && net_ratelimit()) 465 - printk(KERN_WARNING 466 - "%s: tx: padding failed, dropping\n", dev->name); 467 - spin_lock(&priv->lock); 468 - dev->stats.tx_dropped++; 469 - spin_unlock(&priv->lock); 470 - return -ENOMEM; 471 - } 472 473 len = max(skb->len, ETH_ZLEN); 474 - queue = skb_get_queue_mapping(skb); 475 #ifdef CONFIG_NETDEVICES_MULTIQUEUE 476 netif_stop_subqueue(dev, queue); 477 #else ··· 474 desc = &priv->desc_ring[queue]; 475 if (unlikely(desc->dataflags & CPMAC_OWN)) { 476 if (netif_msg_tx_err(priv) && net_ratelimit()) 477 - printk(KERN_WARNING "%s: tx dma ring full, dropping\n", 478 dev->name); 479 - spin_lock(&priv->lock); 480 - dev->stats.tx_dropped++; 481 - spin_unlock(&priv->lock); 482 - dev_kfree_skb_any(skb); 483 - return -ENOMEM; 484 } 485 486 spin_lock(&priv->lock); ··· 498 cpmac_dump_skb(dev, skb); 499 cpmac_write(priv->regs, CPMAC_TX_PTR(queue), (u32)desc->mapping); 500 501 - return 0; 502 } 503 504 static void cpmac_end_xmit(struct net_device *dev, int queue) ··· 635 int i; 636 if (unlikely(!priv->desc_ring)) 637 return; 638 - for (i = 0; i < CPMAC_QUEUES; i++) 639 if (priv->desc_ring[i].skb) { 640 dev_kfree_skb_any(priv->desc_ring[i].skb); 641 if (netif_subqueue_stopped(dev, i)) 642 netif_wake_subqueue(dev, i); 643 } 644 } 645 646 static void cpmac_hw_error(struct work_struct *work) ··· 718 #ifdef CONFIG_NETDEVICES_MULTIQUEUE 719 for (i = 0; i < CPMAC_QUEUES; i++) 720 if (priv->desc_ring[i].skb) { 721 dev_kfree_skb_any(priv->desc_ring[i].skb); 722 netif_wake_subqueue(dev, i); 723 break; 724 } 725 #else 726 if (priv->desc_ring[0].skb) 727 dev_kfree_skb_any(priv->desc_ring[0].skb); 728 netif_wake_queue(dev); ··· 787 { 788 struct cpmac_priv *priv = netdev_priv(dev); 789 790 - if (dev->flags && IFF_UP) 791 return -EBUSY; 792 priv->ring_size = ring->rx_pending; 793 return 0;
··· 460 struct cpmac_desc *desc; 461 struct cpmac_priv *priv = netdev_priv(dev); 462 463 + if (unlikely(skb_padto(skb, ETH_ZLEN))) 464 + return NETDEV_TX_OK; 465 466 len = max(skb->len, ETH_ZLEN); 467 + queue = skb->queue_mapping; 468 #ifdef CONFIG_NETDEVICES_MULTIQUEUE 469 netif_stop_subqueue(dev, queue); 470 #else ··· 481 desc = &priv->desc_ring[queue]; 482 if (unlikely(desc->dataflags & CPMAC_OWN)) { 483 if (netif_msg_tx_err(priv) && net_ratelimit()) 484 + printk(KERN_WARNING "%s: tx dma ring full\n", 485 dev->name); 486 + return NETDEV_TX_BUSY; 487 } 488 489 spin_lock(&priv->lock); ··· 509 cpmac_dump_skb(dev, skb); 510 cpmac_write(priv->regs, CPMAC_TX_PTR(queue), (u32)desc->mapping); 511 512 + return NETDEV_TX_OK; 513 } 514 515 static void cpmac_end_xmit(struct net_device *dev, int queue) ··· 646 int i; 647 if (unlikely(!priv->desc_ring)) 648 return; 649 + for (i = 0; i < CPMAC_QUEUES; i++) { 650 + priv->desc_ring[i].dataflags = 0; 651 if (priv->desc_ring[i].skb) { 652 dev_kfree_skb_any(priv->desc_ring[i].skb); 653 if (netif_subqueue_stopped(dev, i)) 654 netif_wake_subqueue(dev, i); 655 } 656 + } 657 } 658 659 static void cpmac_hw_error(struct work_struct *work) ··· 727 #ifdef CONFIG_NETDEVICES_MULTIQUEUE 728 for (i = 0; i < CPMAC_QUEUES; i++) 729 if (priv->desc_ring[i].skb) { 730 + priv->desc_ring[i].dataflags = 0; 731 dev_kfree_skb_any(priv->desc_ring[i].skb); 732 netif_wake_subqueue(dev, i); 733 break; 734 } 735 #else 736 + priv->desc_ring[0].dataflags = 0; 737 if (priv->desc_ring[0].skb) 738 dev_kfree_skb_any(priv->desc_ring[0].skb); 739 netif_wake_queue(dev); ··· 794 { 795 struct cpmac_priv *priv = netdev_priv(dev); 796 797 + if (netif_running(dev)) 798 return -EBUSY; 799 priv->ring_size = ring->rx_pending; 800 return 0;