Merge branch 'upstream-davem' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6

+852 -575
+1 -1
drivers/net/3c509.c
··· 1063 1063 struct sk_buff *skb; 1064 1064 1065 1065 skb = dev_alloc_skb(pkt_len+5); 1066 - dev->stats.rx_bytes += pkt_len; 1067 1066 if (el3_debug > 4) 1068 1067 printk("Receiving packet size %d status %4.4x.\n", 1069 1068 pkt_len, rx_status); ··· 1077 1078 skb->protocol = eth_type_trans(skb,dev); 1078 1079 netif_rx(skb); 1079 1080 dev->last_rx = jiffies; 1081 + dev->stats.rx_bytes += pkt_len; 1080 1082 dev->stats.rx_packets++; 1081 1083 continue; 1082 1084 }
+1 -6
drivers/net/au1000_eth.c
··· 1239 1239 */ 1240 1240 static irqreturn_t au1000_interrupt(int irq, void *dev_id) 1241 1241 { 1242 - struct net_device *dev = (struct net_device *) dev_id; 1243 - 1244 - if (dev == NULL) { 1245 - printk(KERN_ERR "%s: isr: null dev ptr\n", dev->name); 1246 - return IRQ_RETVAL(1); 1247 - } 1242 + struct net_device *dev = dev_id; 1248 1243 1249 1244 /* Handle RX interrupts first to minimize chance of overrun */ 1250 1245
-1
drivers/net/bfin_mac.c
··· 22 22 #include <linux/crc32.h> 23 23 #include <linux/device.h> 24 24 #include <linux/spinlock.h> 25 - #include <linux/ethtool.h> 26 25 #include <linux/mii.h> 27 26 #include <linux/phy.h> 28 27 #include <linux/netdevice.h>
+179 -55
drivers/net/cpmac.c
··· 38 38 #include <linux/platform_device.h> 39 39 #include <linux/dma-mapping.h> 40 40 #include <asm/gpio.h> 41 + #include <asm/atomic.h> 41 42 42 43 MODULE_AUTHOR("Eugene Konev <ejka@imfi.kspu.ru>"); 43 44 MODULE_DESCRIPTION("TI AR7 ethernet driver (CPMAC)"); ··· 188 187 #define CPMAC_EOQ 0x1000 189 188 struct sk_buff *skb; 190 189 struct cpmac_desc *next; 190 + struct cpmac_desc *prev; 191 191 dma_addr_t mapping; 192 192 dma_addr_t data_mapping; 193 193 }; ··· 210 208 struct work_struct reset_work; 211 209 struct platform_device *pdev; 212 210 struct napi_struct napi; 211 + atomic_t reset_pending; 213 212 }; 214 213 215 214 static irqreturn_t cpmac_irq(int, void *); ··· 242 239 for (i = 0; i < sizeof(*desc) / 4; i++) 243 240 printk(" %08x", ((u32 *)desc)[i]); 244 241 printk("\n"); 242 + } 243 + 244 + static void cpmac_dump_all_desc(struct net_device *dev) 245 + { 246 + struct cpmac_priv *priv = netdev_priv(dev); 247 + struct cpmac_desc *dump = priv->rx_head; 248 + do { 249 + cpmac_dump_desc(dev, dump); 250 + dump = dump->next; 251 + } while (dump != priv->rx_head); 245 252 } 246 253 247 254 static void cpmac_dump_skb(struct net_device *dev, struct sk_buff *skb) ··· 425 412 static int cpmac_poll(struct napi_struct *napi, int budget) 426 413 { 427 414 struct sk_buff *skb; 428 - struct cpmac_desc *desc; 429 - int received = 0; 415 + struct cpmac_desc *desc, *restart; 430 416 struct cpmac_priv *priv = container_of(napi, struct cpmac_priv, napi); 417 + int received = 0, processed = 0; 431 418 432 419 spin_lock(&priv->rx_lock); 433 420 if (unlikely(!priv->rx_head)) { 434 421 if (netif_msg_rx_err(priv) && net_ratelimit()) 435 422 printk(KERN_WARNING "%s: rx: polling, but no queue\n", 436 423 priv->dev->name); 424 + spin_unlock(&priv->rx_lock); 437 425 netif_rx_complete(priv->dev, napi); 438 426 return 0; 439 427 } 440 428 441 429 desc = priv->rx_head; 430 + restart = NULL; 442 431 while (((desc->dataflags & CPMAC_OWN) == 0) && (received < budget)) { 432 + processed++; 433 + 434 + if ((desc->dataflags & CPMAC_EOQ) != 0) { 435 + /* The last update to eoq->hw_next didn't happen 436 + * soon enough, and the receiver stopped here. 437 + *Remember this descriptor so we can restart 438 + * the receiver after freeing some space. 439 + */ 440 + if (unlikely(restart)) { 441 + if (netif_msg_rx_err(priv)) 442 + printk(KERN_ERR "%s: poll found a" 443 + " duplicate EOQ: %p and %p\n", 444 + priv->dev->name, restart, desc); 445 + goto fatal_error; 446 + } 447 + 448 + restart = desc->next; 449 + } 450 + 443 451 skb = cpmac_rx_one(priv, desc); 444 452 if (likely(skb)) { 445 453 netif_receive_skb(skb); ··· 469 435 desc = desc->next; 470 436 } 471 437 438 + if (desc != priv->rx_head) { 439 + /* We freed some buffers, but not the whole ring, 440 + * add what we did free to the rx list */ 441 + desc->prev->hw_next = (u32)0; 442 + priv->rx_head->prev->hw_next = priv->rx_head->mapping; 443 + } 444 + 445 + /* Optimization: If we did not actually process an EOQ (perhaps because 446 + * of quota limits), check to see if the tail of the queue has EOQ set. 447 + * We should immediately restart in that case so that the receiver can 448 + * restart and run in parallel with more packet processing. 449 + * This lets us handle slightly larger bursts before running 450 + * out of ring space (assuming dev->weight < ring_size) */ 451 + 452 + if (!restart && 453 + (priv->rx_head->prev->dataflags & (CPMAC_OWN|CPMAC_EOQ)) 454 + == CPMAC_EOQ && 455 + (priv->rx_head->dataflags & CPMAC_OWN) != 0) { 456 + /* reset EOQ so the poll loop (above) doesn't try to 457 + * restart this when it eventually gets to this descriptor. 458 + */ 459 + priv->rx_head->prev->dataflags &= ~CPMAC_EOQ; 460 + restart = priv->rx_head; 461 + } 462 + 463 + if (restart) { 464 + priv->dev->stats.rx_errors++; 465 + priv->dev->stats.rx_fifo_errors++; 466 + if (netif_msg_rx_err(priv) && net_ratelimit()) 467 + printk(KERN_WARNING "%s: rx dma ring overrun\n", 468 + priv->dev->name); 469 + 470 + if (unlikely((restart->dataflags & CPMAC_OWN) == 0)) { 471 + if (netif_msg_drv(priv)) 472 + printk(KERN_ERR "%s: cpmac_poll is trying to " 473 + "restart rx from a descriptor that's " 474 + "not free: %p\n", 475 + priv->dev->name, restart); 476 + goto fatal_error; 477 + } 478 + 479 + cpmac_write(priv->regs, CPMAC_RX_PTR(0), restart->mapping); 480 + } 481 + 472 482 priv->rx_head = desc; 473 483 spin_unlock(&priv->rx_lock); 474 484 if (unlikely(netif_msg_rx_status(priv))) 475 485 printk(KERN_DEBUG "%s: poll processed %d packets\n", 476 486 priv->dev->name, received); 477 - if (desc->dataflags & CPMAC_OWN) { 487 + if (processed == 0) { 488 + /* we ran out of packets to read, 489 + * revert to interrupt-driven mode */ 478 490 netif_rx_complete(priv->dev, napi); 479 - cpmac_write(priv->regs, CPMAC_RX_PTR(0), (u32)desc->mapping); 480 491 cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1); 481 492 return 0; 482 493 } 483 494 484 495 return 1; 496 + 497 + fatal_error: 498 + /* Something went horribly wrong. 499 + * Reset hardware to try to recover rather than wedging. */ 500 + 501 + if (netif_msg_drv(priv)) { 502 + printk(KERN_ERR "%s: cpmac_poll is confused. " 503 + "Resetting hardware\n", priv->dev->name); 504 + cpmac_dump_all_desc(priv->dev); 505 + printk(KERN_DEBUG "%s: RX_PTR(0)=0x%08x RX_ACK(0)=0x%08x\n", 506 + priv->dev->name, 507 + cpmac_read(priv->regs, CPMAC_RX_PTR(0)), 508 + cpmac_read(priv->regs, CPMAC_RX_ACK(0))); 509 + } 510 + 511 + spin_unlock(&priv->rx_lock); 512 + netif_rx_complete(priv->dev, napi); 513 + netif_stop_queue(priv->dev); 514 + napi_disable(&priv->napi); 515 + 516 + atomic_inc(&priv->reset_pending); 517 + cpmac_hw_stop(priv->dev); 518 + if (!schedule_work(&priv->reset_work)) 519 + atomic_dec(&priv->reset_pending); 520 + return 0; 521 + 485 522 } 486 523 487 524 static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev) ··· 560 455 int queue, len; 561 456 struct cpmac_desc *desc; 562 457 struct cpmac_priv *priv = netdev_priv(dev); 458 + 459 + if (unlikely(atomic_read(&priv->reset_pending))) 460 + return NETDEV_TX_BUSY; 563 461 564 462 if (unlikely(skb_padto(skb, ETH_ZLEN))) 565 463 return NETDEV_TX_OK; ··· 729 621 desc->dataflags = CPMAC_OWN; 730 622 dev->stats.rx_dropped++; 731 623 } 624 + desc->hw_next = desc->next->mapping; 732 625 desc = desc->next; 733 626 } 627 + priv->rx_head->prev->hw_next = 0; 734 628 } 735 629 736 630 static void cpmac_clear_tx(struct net_device *dev) ··· 745 635 priv->desc_ring[i].dataflags = 0; 746 636 if (priv->desc_ring[i].skb) { 747 637 dev_kfree_skb_any(priv->desc_ring[i].skb); 748 - if (netif_subqueue_stopped(dev, i)) 749 - netif_wake_subqueue(dev, i); 638 + priv->desc_ring[i].skb = NULL; 750 639 } 751 640 } 752 641 } 753 642 754 643 static void cpmac_hw_error(struct work_struct *work) 755 644 { 645 + int i; 756 646 struct cpmac_priv *priv = 757 647 container_of(work, struct cpmac_priv, reset_work); 758 648 ··· 761 651 spin_unlock(&priv->rx_lock); 762 652 cpmac_clear_tx(priv->dev); 763 653 cpmac_hw_start(priv->dev); 764 - napi_enable(&priv->napi); 765 - netif_start_queue(priv->dev); 654 + barrier(); 655 + atomic_dec(&priv->reset_pending); 656 + 657 + for (i = 0; i < CPMAC_QUEUES; i++) 658 + netif_wake_subqueue(priv->dev, i); 659 + netif_wake_queue(priv->dev); 660 + cpmac_write(priv->regs, CPMAC_MAC_INT_ENABLE, 3); 661 + } 662 + 663 + static void cpmac_check_status(struct net_device *dev) 664 + { 665 + struct cpmac_priv *priv = netdev_priv(dev); 666 + 667 + u32 macstatus = cpmac_read(priv->regs, CPMAC_MAC_STATUS); 668 + int rx_channel = (macstatus >> 8) & 7; 669 + int rx_code = (macstatus >> 12) & 15; 670 + int tx_channel = (macstatus >> 16) & 7; 671 + int tx_code = (macstatus >> 20) & 15; 672 + 673 + if (rx_code || tx_code) { 674 + if (netif_msg_drv(priv) && net_ratelimit()) { 675 + /* Can't find any documentation on what these 676 + *error codes actually are. So just log them and hope.. 677 + */ 678 + if (rx_code) 679 + printk(KERN_WARNING "%s: host error %d on rx " 680 + "channel %d (macstatus %08x), resetting\n", 681 + dev->name, rx_code, rx_channel, macstatus); 682 + if (tx_code) 683 + printk(KERN_WARNING "%s: host error %d on tx " 684 + "channel %d (macstatus %08x), resetting\n", 685 + dev->name, tx_code, tx_channel, macstatus); 686 + } 687 + 688 + netif_stop_queue(dev); 689 + cpmac_hw_stop(dev); 690 + if (schedule_work(&priv->reset_work)) 691 + atomic_inc(&priv->reset_pending); 692 + if (unlikely(netif_msg_hw(priv))) 693 + cpmac_dump_regs(dev); 694 + } 695 + cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff); 766 696 } 767 697 768 698 static irqreturn_t cpmac_irq(int irq, void *dev_id) ··· 833 683 834 684 cpmac_write(priv->regs, CPMAC_MAC_EOI_VECTOR, 0); 835 685 836 - if (unlikely(status & (MAC_INT_HOST | MAC_INT_STATUS))) { 837 - if (netif_msg_drv(priv) && net_ratelimit()) 838 - printk(KERN_ERR "%s: hw error, resetting...\n", 839 - dev->name); 840 - netif_stop_queue(dev); 841 - napi_disable(&priv->napi); 842 - cpmac_hw_stop(dev); 843 - schedule_work(&priv->reset_work); 844 - if (unlikely(netif_msg_hw(priv))) 845 - cpmac_dump_regs(dev); 846 - } 686 + if (unlikely(status & (MAC_INT_HOST | MAC_INT_STATUS))) 687 + cpmac_check_status(dev); 847 688 848 689 return IRQ_HANDLED; 849 690 } 850 691 851 692 static void cpmac_tx_timeout(struct net_device *dev) 852 693 { 853 - struct cpmac_priv *priv = netdev_priv(dev); 854 694 int i; 695 + struct cpmac_priv *priv = netdev_priv(dev); 855 696 856 697 spin_lock(&priv->lock); 857 698 dev->stats.tx_errors++; 858 699 spin_unlock(&priv->lock); 859 700 if (netif_msg_tx_err(priv) && net_ratelimit()) 860 701 printk(KERN_WARNING "%s: transmit timeout\n", dev->name); 861 - /* 862 - * FIXME: waking up random queue is not the best thing to 863 - * do... on the other hand why we got here at all? 864 - */ 865 - #ifdef CONFIG_NETDEVICES_MULTIQUEUE 702 + 703 + atomic_inc(&priv->reset_pending); 704 + barrier(); 705 + cpmac_clear_tx(dev); 706 + barrier(); 707 + atomic_dec(&priv->reset_pending); 708 + 709 + netif_wake_queue(priv->dev); 866 710 for (i = 0; i < CPMAC_QUEUES; i++) 867 - if (priv->desc_ring[i].skb) { 868 - priv->desc_ring[i].dataflags = 0; 869 - dev_kfree_skb_any(priv->desc_ring[i].skb); 870 - netif_wake_subqueue(dev, i); 871 - break; 872 - } 873 - #else 874 - priv->desc_ring[0].dataflags = 0; 875 - if (priv->desc_ring[0].skb) 876 - dev_kfree_skb_any(priv->desc_ring[0].skb); 877 - netif_wake_queue(dev); 878 - #endif 711 + netif_wake_subqueue(dev, i); 879 712 } 880 713 881 714 static int cpmac_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) ··· 1034 901 desc->buflen = CPMAC_SKB_SIZE; 1035 902 desc->dataflags = CPMAC_OWN; 1036 903 desc->next = &priv->rx_head[(i + 1) % priv->ring_size]; 904 + desc->next->prev = desc; 1037 905 desc->hw_next = (u32)desc->next->mapping; 1038 906 } 907 + 908 + priv->rx_head->prev->hw_next = (u32)0; 1039 909 1040 910 if ((res = request_irq(dev->irq, cpmac_irq, IRQF_SHARED, 1041 911 dev->name, dev))) { ··· 1048 912 goto fail_irq; 1049 913 } 1050 914 915 + atomic_set(&priv->reset_pending, 0); 1051 916 INIT_WORK(&priv->reset_work, cpmac_hw_error); 1052 917 cpmac_hw_start(dev); 1053 918 ··· 1144 1007 1145 1008 if (phy_id == PHY_MAX_ADDR) { 1146 1009 if (external_switch || dumb_switch) { 1147 - struct fixed_phy_status status = {}; 1148 - 1149 - /* 1150 - * FIXME: this should be in the platform code! 1151 - * Since there is not platform code at all (that is, 1152 - * no mainline users of that driver), place it here 1153 - * for now. 1154 - */ 1155 - phy_id = 0; 1156 - status.link = 1; 1157 - status.duplex = 1; 1158 - status.speed = 100; 1159 - fixed_phy_add(PHY_POLL, phy_id, &status); 1010 + mdio_bus_id = 0; /* fixed phys bus */ 1011 + phy_id = pdev->id; 1160 1012 } else { 1161 - printk(KERN_ERR "cpmac: no PHY present\n"); 1013 + dev_err(&pdev->dev, "no PHY present\n"); 1162 1014 return -ENODEV; 1163 1015 } 1164 1016 } ··· 1190 1064 priv->msg_enable = netif_msg_init(debug_level, 0xff); 1191 1065 memcpy(dev->dev_addr, pdata->dev_addr, sizeof(dev->dev_addr)); 1192 1066 1193 - snprintf(priv->phy_name, BUS_ID_SIZE, PHY_ID_FMT, mdio_bus_id, phy_id); 1194 - 1195 - priv->phy = phy_connect(dev, priv->phy_name, &cpmac_adjust_link, 0, 1196 - PHY_INTERFACE_MODE_MII); 1067 + priv->phy = phy_connect(dev, cpmac_mii.phy_map[phy_id]->dev.bus_id, 1068 + &cpmac_adjust_link, 0, PHY_INTERFACE_MODE_MII); 1197 1069 if (IS_ERR(priv->phy)) { 1198 1070 if (netif_msg_drv(priv)) 1199 1071 printk(KERN_ERR "%s: Could not attach to PHY\n",
+1 -1
drivers/net/dm9000.c
··· 903 903 if (netif_msg_ifdown(db)) 904 904 dev_dbg(db->dev, "shutting down %s\n", ndev->name); 905 905 906 - cancel_delayed_work(&db->phy_poll); 906 + cancel_delayed_work_sync(&db->phy_poll); 907 907 908 908 netif_stop_queue(ndev); 909 909 netif_carrier_off(ndev);
+2 -2
drivers/net/e1000e/netdev.c
··· 4201 4201 struct e1000_adapter *adapter; 4202 4202 struct e1000_hw *hw; 4203 4203 const struct e1000_info *ei = e1000_info_tbl[ent->driver_data]; 4204 - unsigned long mmio_start, mmio_len; 4205 - unsigned long flash_start, flash_len; 4204 + resource_size_t mmio_start, mmio_len; 4205 + resource_size_t flash_start, flash_len; 4206 4206 4207 4207 static int cards_found; 4208 4208 int i, err, pci_using_dac;
+2 -3
drivers/net/ehea/ehea_main.c
··· 2213 2213 goto out; 2214 2214 } 2215 2215 2216 - memset(cb1->vlan_filter, 0, sizeof(cb1->vlan_filter)); 2217 - 2218 2216 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id, 2219 2217 H_PORT_CB1, H_PORT_CB1_ALL, cb1); 2220 2218 if (hret != H_SUCCESS) ··· 3176 3178 3177 3179 static void ehea_shutdown_single_port(struct ehea_port *port) 3178 3180 { 3181 + struct ehea_adapter *adapter = port->adapter; 3179 3182 unregister_netdev(port->netdev); 3180 3183 ehea_unregister_port(port); 3181 3184 kfree(port->mc_list); 3182 3185 free_netdev(port->netdev); 3183 - port->adapter->active_ports--; 3186 + adapter->active_ports--; 3184 3187 } 3185 3188 3186 3189 static int ehea_setup_ports(struct ehea_adapter *adapter)
+1
drivers/net/forcedeth.c
··· 5823 5823 writel(txreg, base + NvRegTransmitPoll); 5824 5824 5825 5825 rc = nv_open(dev); 5826 + nv_set_multicast(dev); 5826 5827 out: 5827 5828 return rc; 5828 5829 }
+1 -1
drivers/net/fs_enet/fs_enet-main.c
··· 1093 1093 if (registered) 1094 1094 unregister_netdev(ndev); 1095 1095 1096 - if (fep != NULL) { 1096 + if (fep && fep->ops) { 1097 1097 (*fep->ops->free_bd)(ndev); 1098 1098 (*fep->ops->cleanup_data)(ndev); 1099 1099 }
+2 -1
drivers/net/hamradio/scc.c
··· 1340 1340 case PARAM_RTS: 1341 1341 if ( !(scc->wreg[R5] & RTS) ) 1342 1342 { 1343 - if (arg != TX_OFF) 1343 + if (arg != TX_OFF) { 1344 1344 scc_key_trx(scc, TX_ON); 1345 1345 scc_start_tx_timer(scc, t_txdelay, scc->kiss.txdelay); 1346 + } 1346 1347 } else { 1347 1348 if (arg == TX_OFF) 1348 1349 {
+1 -1
drivers/net/myri10ge/myri10ge.c
··· 631 631 return status; 632 632 } 633 633 634 - int myri10ge_get_firmware_capabilities(struct myri10ge_priv *mgp) 634 + static int myri10ge_get_firmware_capabilities(struct myri10ge_priv *mgp) 635 635 { 636 636 struct myri10ge_cmd cmd; 637 637 int status;
+3 -1
drivers/net/pcmcia/fmvj18x_cs.c
··· 391 391 cardtype = CONTEC; 392 392 break; 393 393 case MANFID_FUJITSU: 394 - if (link->card_id == PRODID_FUJITSU_MBH10302) 394 + if (link->conf.ConfigBase == 0x0fe0) 395 + cardtype = MBH10302; 396 + else if (link->card_id == PRODID_FUJITSU_MBH10302) 395 397 /* RATOC REX-5588/9822/4886's PRODID are 0004(=MBH10302), 396 398 but these are MBH10304 based card. */ 397 399 cardtype = MBH10304;
+8 -4
drivers/net/pcmcia/xirc2ps_cs.c
··· 1461 1461 set_multicast_list(struct net_device *dev) 1462 1462 { 1463 1463 unsigned int ioaddr = dev->base_addr; 1464 + unsigned value; 1464 1465 1465 1466 SelectPage(0x42); 1467 + value = GetByte(XIRCREG42_SWC1) & 0xC0; 1468 + 1466 1469 if (dev->flags & IFF_PROMISC) { /* snoop */ 1467 - PutByte(XIRCREG42_SWC1, 0x06); /* set MPE and PME */ 1470 + PutByte(XIRCREG42_SWC1, value | 0x06); /* set MPE and PME */ 1468 1471 } else if (dev->mc_count > 9 || (dev->flags & IFF_ALLMULTI)) { 1469 - PutByte(XIRCREG42_SWC1, 0x02); /* set MPE */ 1472 + PutByte(XIRCREG42_SWC1, value | 0x02); /* set MPE */ 1470 1473 } else if (dev->mc_count) { 1471 1474 /* the chip can filter 9 addresses perfectly */ 1472 - PutByte(XIRCREG42_SWC1, 0x01); 1475 + PutByte(XIRCREG42_SWC1, value | 0x01); 1473 1476 SelectPage(0x40); 1474 1477 PutByte(XIRCREG40_CMD0, Offline); 1475 1478 set_addresses(dev); 1476 1479 SelectPage(0x40); 1477 1480 PutByte(XIRCREG40_CMD0, EnableRecv | Online); 1478 1481 } else { /* standard usage */ 1479 - PutByte(XIRCREG42_SWC1, 0x00); 1482 + PutByte(XIRCREG42_SWC1, value | 0x00); 1480 1483 } 1481 1484 SelectPage(0); 1482 1485 } ··· 1725 1722 1726 1723 /* enable receiver and put the mac online */ 1727 1724 if (full) { 1725 + set_multicast_list(dev); 1728 1726 SelectPage(0x40); 1729 1727 PutByte(XIRCREG40_CMD0, EnableRecv | Online); 1730 1728 }
+2 -2
drivers/net/pcnet32.c
··· 325 325 static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs, 326 326 void *ptr); 327 327 static void pcnet32_purge_tx_ring(struct net_device *dev); 328 - static int pcnet32_alloc_ring(struct net_device *dev, char *name); 328 + static int pcnet32_alloc_ring(struct net_device *dev, const char *name); 329 329 static void pcnet32_free_ring(struct net_device *dev); 330 330 static void pcnet32_check_media(struct net_device *dev, int verbose); 331 331 ··· 1983 1983 } 1984 1984 1985 1985 /* if any allocation fails, caller must also call pcnet32_free_ring */ 1986 - static int pcnet32_alloc_ring(struct net_device *dev, char *name) 1986 + static int pcnet32_alloc_ring(struct net_device *dev, const char *name) 1987 1987 { 1988 1988 struct pcnet32_private *lp = netdev_priv(dev); 1989 1989
+1 -1
drivers/net/phy/Kconfig
··· 5 5 menuconfig PHYLIB 6 6 tristate "PHY Device support and infrastructure" 7 7 depends on !S390 8 - depends on NET_ETHERNET && (BROKEN || !S390) 8 + depends on NET_ETHERNET 9 9 help 10 10 Ethernet controllers are usually attached to PHY 11 11 devices. This option provides infrastructure for
+1
drivers/net/phy/phy_device.c
··· 207 207 208 208 return 0; 209 209 } 210 + EXPORT_SYMBOL(get_phy_id); 210 211 211 212 /** 212 213 * get_phy_device - reads the specified PHY device and returns its @phy_device struct
+1 -1
drivers/net/s2io-regs.h
··· 250 250 u64 tx_mat0_n[0x8]; 251 251 #define TX_MAT_SET(fifo, msi) vBIT(msi, (8 * fifo), 8) 252 252 253 - u8 unused_1[0x8]; 253 + u64 xmsi_mask_reg; 254 254 u64 stat_byte_cnt; 255 255 #define STAT_BC(n) vBIT(n,4,12) 256 256
+293 -201
drivers/net/s2io.c
··· 86 86 #include "s2io.h" 87 87 #include "s2io-regs.h" 88 88 89 - #define DRV_VERSION "2.0.26.23" 89 + #define DRV_VERSION "2.0.26.24" 90 90 91 91 /* S2io Driver name & version. */ 92 92 static char s2io_driver_name[] = "Neterion"; ··· 1113 1113 struct pci_dev *tdev = NULL; 1114 1114 while ((tdev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, tdev)) != NULL) { 1115 1115 if (tdev->vendor == NEC_VENID && tdev->device == NEC_DEVID) { 1116 - if (tdev->bus == s2io_pdev->bus->parent) 1116 + if (tdev->bus == s2io_pdev->bus->parent) { 1117 1117 pci_dev_put(tdev); 1118 1118 return 1; 1119 + } 1119 1120 } 1120 1121 } 1121 1122 return 0; ··· 1220 1219 TTI_DATA1_MEM_TX_URNG_B(0x10) | 1221 1220 TTI_DATA1_MEM_TX_URNG_C(0x30) | 1222 1221 TTI_DATA1_MEM_TX_TIMER_AC_EN; 1223 - 1224 - if (use_continuous_tx_intrs && (link == LINK_UP)) 1225 - val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN; 1222 + if (i == 0) 1223 + if (use_continuous_tx_intrs && (link == LINK_UP)) 1224 + val64 |= TTI_DATA1_MEM_TX_TIMER_CI_EN; 1226 1225 writeq(val64, &bar0->tti_data1_mem); 1227 1226 1228 - val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) | 1229 - TTI_DATA2_MEM_TX_UFC_B(0x20) | 1230 - TTI_DATA2_MEM_TX_UFC_C(0x40) | 1231 - TTI_DATA2_MEM_TX_UFC_D(0x80); 1227 + if (nic->config.intr_type == MSI_X) { 1228 + val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) | 1229 + TTI_DATA2_MEM_TX_UFC_B(0x100) | 1230 + TTI_DATA2_MEM_TX_UFC_C(0x200) | 1231 + TTI_DATA2_MEM_TX_UFC_D(0x300); 1232 + } else { 1233 + if ((nic->config.tx_steering_type == 1234 + TX_DEFAULT_STEERING) && 1235 + (config->tx_fifo_num > 1) && 1236 + (i >= nic->udp_fifo_idx) && 1237 + (i < (nic->udp_fifo_idx + 1238 + nic->total_udp_fifos))) 1239 + val64 = TTI_DATA2_MEM_TX_UFC_A(0x50) | 1240 + TTI_DATA2_MEM_TX_UFC_B(0x80) | 1241 + TTI_DATA2_MEM_TX_UFC_C(0x100) | 1242 + TTI_DATA2_MEM_TX_UFC_D(0x120); 1243 + else 1244 + val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) | 1245 + TTI_DATA2_MEM_TX_UFC_B(0x20) | 1246 + TTI_DATA2_MEM_TX_UFC_C(0x40) | 1247 + TTI_DATA2_MEM_TX_UFC_D(0x80); 1248 + } 1232 1249 1233 1250 writeq(val64, &bar0->tti_data2_mem); 1234 1251 ··· 2832 2813 } 2833 2814 } 2834 2815 2816 + static int s2io_chk_rx_buffers(struct ring_info *ring) 2817 + { 2818 + if (fill_rx_buffers(ring) == -ENOMEM) { 2819 + DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name); 2820 + DBG_PRINT(INFO_DBG, " in Rx Intr!!\n"); 2821 + } 2822 + return 0; 2823 + } 2824 + 2835 2825 /** 2836 2826 * s2io_poll - Rx interrupt handler for NAPI support 2837 2827 * @napi : pointer to the napi structure. ··· 2854 2826 * 0 on success and 1 if there are No Rx packets to be processed. 2855 2827 */ 2856 2828 2857 - static int s2io_poll(struct napi_struct *napi, int budget) 2829 + static int s2io_poll_msix(struct napi_struct *napi, int budget) 2830 + { 2831 + struct ring_info *ring = container_of(napi, struct ring_info, napi); 2832 + struct net_device *dev = ring->dev; 2833 + struct config_param *config; 2834 + struct mac_info *mac_control; 2835 + int pkts_processed = 0; 2836 + u8 *addr = NULL, val8 = 0; 2837 + struct s2io_nic *nic = dev->priv; 2838 + struct XENA_dev_config __iomem *bar0 = nic->bar0; 2839 + int budget_org = budget; 2840 + 2841 + config = &nic->config; 2842 + mac_control = &nic->mac_control; 2843 + 2844 + if (unlikely(!is_s2io_card_up(nic))) 2845 + return 0; 2846 + 2847 + pkts_processed = rx_intr_handler(ring, budget); 2848 + s2io_chk_rx_buffers(ring); 2849 + 2850 + if (pkts_processed < budget_org) { 2851 + netif_rx_complete(dev, napi); 2852 + /*Re Enable MSI-Rx Vector*/ 2853 + addr = (u8 *)&bar0->xmsi_mask_reg; 2854 + addr += 7 - ring->ring_no; 2855 + val8 = (ring->ring_no == 0) ? 0x3f : 0xbf; 2856 + writeb(val8, addr); 2857 + val8 = readb(addr); 2858 + } 2859 + return pkts_processed; 2860 + } 2861 + static int s2io_poll_inta(struct napi_struct *napi, int budget) 2858 2862 { 2859 2863 struct s2io_nic *nic = container_of(napi, struct s2io_nic, napi); 2864 + struct ring_info *ring; 2860 2865 struct net_device *dev = nic->dev; 2861 - int pkt_cnt = 0, org_pkts_to_process; 2862 - struct mac_info *mac_control; 2863 2866 struct config_param *config; 2867 + struct mac_info *mac_control; 2868 + int pkts_processed = 0; 2869 + int ring_pkts_processed, i; 2864 2870 struct XENA_dev_config __iomem *bar0 = nic->bar0; 2865 - int i; 2871 + int budget_org = budget; 2866 2872 2867 - mac_control = &nic->mac_control; 2868 2873 config = &nic->config; 2874 + mac_control = &nic->mac_control; 2869 2875 2870 - nic->pkts_to_process = budget; 2871 - org_pkts_to_process = nic->pkts_to_process; 2872 - 2873 - writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int); 2874 - readl(&bar0->rx_traffic_int); 2876 + if (unlikely(!is_s2io_card_up(nic))) 2877 + return 0; 2875 2878 2876 2879 for (i = 0; i < config->rx_ring_num; i++) { 2877 - rx_intr_handler(&mac_control->rings[i]); 2878 - pkt_cnt = org_pkts_to_process - nic->pkts_to_process; 2879 - if (!nic->pkts_to_process) { 2880 - /* Quota for the current iteration has been met */ 2881 - goto no_rx; 2882 - } 2883 - } 2884 - 2885 - netif_rx_complete(dev, napi); 2886 - 2887 - for (i = 0; i < config->rx_ring_num; i++) { 2888 - if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) { 2889 - DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name); 2890 - DBG_PRINT(INFO_DBG, " in Rx Poll!!\n"); 2880 + ring = &mac_control->rings[i]; 2881 + ring_pkts_processed = rx_intr_handler(ring, budget); 2882 + s2io_chk_rx_buffers(ring); 2883 + pkts_processed += ring_pkts_processed; 2884 + budget -= ring_pkts_processed; 2885 + if (budget <= 0) 2891 2886 break; 2892 - } 2893 2887 } 2894 - /* Re enable the Rx interrupts. */ 2895 - writeq(0x0, &bar0->rx_traffic_mask); 2896 - readl(&bar0->rx_traffic_mask); 2897 - return pkt_cnt; 2898 - 2899 - no_rx: 2900 - for (i = 0; i < config->rx_ring_num; i++) { 2901 - if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) { 2902 - DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name); 2903 - DBG_PRINT(INFO_DBG, " in Rx Poll!!\n"); 2904 - break; 2905 - } 2888 + if (pkts_processed < budget_org) { 2889 + netif_rx_complete(dev, napi); 2890 + /* Re enable the Rx interrupts for the ring */ 2891 + writeq(0, &bar0->rx_traffic_mask); 2892 + readl(&bar0->rx_traffic_mask); 2906 2893 } 2907 - return pkt_cnt; 2894 + return pkts_processed; 2908 2895 } 2909 2896 2910 2897 #ifdef CONFIG_NET_POLL_CONTROLLER ··· 2961 2918 2962 2919 /* check for received packet and indicate up to network */ 2963 2920 for (i = 0; i < config->rx_ring_num; i++) 2964 - rx_intr_handler(&mac_control->rings[i]); 2921 + rx_intr_handler(&mac_control->rings[i], 0); 2965 2922 2966 2923 for (i = 0; i < config->rx_ring_num; i++) { 2967 2924 if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) { ··· 2977 2934 2978 2935 /** 2979 2936 * rx_intr_handler - Rx interrupt handler 2980 - * @nic: device private variable. 2937 + * @ring_info: per ring structure. 2938 + * @budget: budget for napi processing. 2981 2939 * Description: 2982 2940 * If the interrupt is because of a received frame or if the 2983 2941 * receive ring contains fresh as yet un-processed frames,this function is ··· 2986 2942 * stopped and sends the skb to the OSM's Rx handler and then increments 2987 2943 * the offset. 2988 2944 * Return Value: 2989 - * NONE. 2945 + * No. of napi packets processed. 2990 2946 */ 2991 - static void rx_intr_handler(struct ring_info *ring_data) 2947 + static int rx_intr_handler(struct ring_info *ring_data, int budget) 2992 2948 { 2993 2949 int get_block, put_block; 2994 2950 struct rx_curr_get_info get_info, put_info; 2995 2951 struct RxD_t *rxdp; 2996 2952 struct sk_buff *skb; 2997 - int pkt_cnt = 0; 2953 + int pkt_cnt = 0, napi_pkts = 0; 2998 2954 int i; 2999 2955 struct RxD1* rxdp1; 3000 2956 struct RxD3* rxdp3; ··· 3021 2977 DBG_PRINT(ERR_DBG, "%s: The skb is ", 3022 2978 ring_data->dev->name); 3023 2979 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n"); 3024 - return; 2980 + return 0; 3025 2981 } 3026 2982 if (ring_data->rxd_mode == RXD_MODE_1) { 3027 2983 rxdp1 = (struct RxD1*)rxdp; ··· 3058 3014 rxdp = ring_data->rx_blocks[get_block].block_virt_addr; 3059 3015 } 3060 3016 3061 - if(ring_data->nic->config.napi){ 3062 - ring_data->nic->pkts_to_process -= 1; 3063 - if (!ring_data->nic->pkts_to_process) 3017 + if (ring_data->nic->config.napi) { 3018 + budget--; 3019 + napi_pkts++; 3020 + if (!budget) 3064 3021 break; 3065 3022 } 3066 3023 pkt_cnt++; ··· 3079 3034 } 3080 3035 } 3081 3036 } 3037 + return(napi_pkts); 3082 3038 } 3083 3039 3084 3040 /** ··· 3776 3730 { 3777 3731 struct XENA_dev_config __iomem *bar0 = nic->bar0; 3778 3732 u64 val64; 3779 - int i; 3733 + int i, msix_index; 3734 + 3735 + 3736 + if (nic->device_type == XFRAME_I_DEVICE) 3737 + return; 3780 3738 3781 3739 for (i=0; i < MAX_REQUESTED_MSI_X; i++) { 3740 + msix_index = (i) ? ((i-1) * 8 + 1): 0; 3782 3741 writeq(nic->msix_info[i].addr, &bar0->xmsi_address); 3783 3742 writeq(nic->msix_info[i].data, &bar0->xmsi_data); 3784 - val64 = (s2BIT(7) | s2BIT(15) | vBIT(i, 26, 6)); 3743 + val64 = (s2BIT(7) | s2BIT(15) | vBIT(msix_index, 26, 6)); 3785 3744 writeq(val64, &bar0->xmsi_access); 3786 - if (wait_for_msix_trans(nic, i)) { 3745 + if (wait_for_msix_trans(nic, msix_index)) { 3787 3746 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__); 3788 3747 continue; 3789 3748 } ··· 3799 3748 { 3800 3749 struct XENA_dev_config __iomem *bar0 = nic->bar0; 3801 3750 u64 val64, addr, data; 3802 - int i; 3751 + int i, msix_index; 3752 + 3753 + if (nic->device_type == XFRAME_I_DEVICE) 3754 + return; 3803 3755 3804 3756 /* Store and display */ 3805 3757 for (i=0; i < MAX_REQUESTED_MSI_X; i++) { 3806 - val64 = (s2BIT(15) | vBIT(i, 26, 6)); 3758 + msix_index = (i) ? ((i-1) * 8 + 1): 0; 3759 + val64 = (s2BIT(15) | vBIT(msix_index, 26, 6)); 3807 3760 writeq(val64, &bar0->xmsi_access); 3808 - if (wait_for_msix_trans(nic, i)) { 3761 + if (wait_for_msix_trans(nic, msix_index)) { 3809 3762 DBG_PRINT(ERR_DBG, "failed in %s\n", __FUNCTION__); 3810 3763 continue; 3811 3764 } ··· 3825 3770 static int s2io_enable_msi_x(struct s2io_nic *nic) 3826 3771 { 3827 3772 struct XENA_dev_config __iomem *bar0 = nic->bar0; 3828 - u64 tx_mat, rx_mat; 3773 + u64 rx_mat; 3829 3774 u16 msi_control; /* Temp variable */ 3830 3775 int ret, i, j, msix_indx = 1; 3831 3776 3832 - nic->entries = kcalloc(MAX_REQUESTED_MSI_X, sizeof(struct msix_entry), 3777 + nic->entries = kmalloc(nic->num_entries * sizeof(struct msix_entry), 3833 3778 GFP_KERNEL); 3834 3779 if (!nic->entries) { 3835 3780 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", \ ··· 3838 3783 return -ENOMEM; 3839 3784 } 3840 3785 nic->mac_control.stats_info->sw_stat.mem_allocated 3841 - += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry)); 3786 + += (nic->num_entries * sizeof(struct msix_entry)); 3787 + 3788 + memset(nic->entries, 0, nic->num_entries * sizeof(struct msix_entry)); 3842 3789 3843 3790 nic->s2io_entries = 3844 - kcalloc(MAX_REQUESTED_MSI_X, sizeof(struct s2io_msix_entry), 3791 + kmalloc(nic->num_entries * sizeof(struct s2io_msix_entry), 3845 3792 GFP_KERNEL); 3846 3793 if (!nic->s2io_entries) { 3847 3794 DBG_PRINT(INFO_DBG, "%s: Memory allocation failed\n", ··· 3851 3794 nic->mac_control.stats_info->sw_stat.mem_alloc_fail_cnt++; 3852 3795 kfree(nic->entries); 3853 3796 nic->mac_control.stats_info->sw_stat.mem_freed 3854 - += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry)); 3797 + += (nic->num_entries * sizeof(struct msix_entry)); 3855 3798 return -ENOMEM; 3856 3799 } 3857 3800 nic->mac_control.stats_info->sw_stat.mem_allocated 3858 - += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry)); 3801 + += (nic->num_entries * sizeof(struct s2io_msix_entry)); 3802 + memset(nic->s2io_entries, 0, 3803 + nic->num_entries * sizeof(struct s2io_msix_entry)); 3859 3804 3860 - for (i=0; i< MAX_REQUESTED_MSI_X; i++) { 3861 - nic->entries[i].entry = i; 3862 - nic->s2io_entries[i].entry = i; 3805 + nic->entries[0].entry = 0; 3806 + nic->s2io_entries[0].entry = 0; 3807 + nic->s2io_entries[0].in_use = MSIX_FLG; 3808 + nic->s2io_entries[0].type = MSIX_ALARM_TYPE; 3809 + nic->s2io_entries[0].arg = &nic->mac_control.fifos; 3810 + 3811 + for (i = 1; i < nic->num_entries; i++) { 3812 + nic->entries[i].entry = ((i - 1) * 8) + 1; 3813 + nic->s2io_entries[i].entry = ((i - 1) * 8) + 1; 3863 3814 nic->s2io_entries[i].arg = NULL; 3864 3815 nic->s2io_entries[i].in_use = 0; 3865 3816 } 3866 3817 3867 - tx_mat = readq(&bar0->tx_mat0_n[0]); 3868 - for (i=0; i<nic->config.tx_fifo_num; i++, msix_indx++) { 3869 - tx_mat |= TX_MAT_SET(i, msix_indx); 3870 - nic->s2io_entries[msix_indx].arg = &nic->mac_control.fifos[i]; 3871 - nic->s2io_entries[msix_indx].type = MSIX_FIFO_TYPE; 3872 - nic->s2io_entries[msix_indx].in_use = MSIX_FLG; 3873 - } 3874 - writeq(tx_mat, &bar0->tx_mat0_n[0]); 3875 - 3876 3818 rx_mat = readq(&bar0->rx_mat); 3877 - for (j = 0; j < nic->config.rx_ring_num; j++, msix_indx++) { 3819 + for (j = 0; j < nic->config.rx_ring_num; j++) { 3878 3820 rx_mat |= RX_MAT_SET(j, msix_indx); 3879 - nic->s2io_entries[msix_indx].arg 3880 - = &nic->mac_control.rings[j]; 3881 - nic->s2io_entries[msix_indx].type = MSIX_RING_TYPE; 3882 - nic->s2io_entries[msix_indx].in_use = MSIX_FLG; 3821 + nic->s2io_entries[j+1].arg = &nic->mac_control.rings[j]; 3822 + nic->s2io_entries[j+1].type = MSIX_RING_TYPE; 3823 + nic->s2io_entries[j+1].in_use = MSIX_FLG; 3824 + msix_indx += 8; 3883 3825 } 3884 3826 writeq(rx_mat, &bar0->rx_mat); 3827 + readq(&bar0->rx_mat); 3885 3828 3886 - nic->avail_msix_vectors = 0; 3887 - ret = pci_enable_msix(nic->pdev, nic->entries, MAX_REQUESTED_MSI_X); 3829 + ret = pci_enable_msix(nic->pdev, nic->entries, nic->num_entries); 3888 3830 /* We fail init if error or we get less vectors than min required */ 3889 - if (ret >= (nic->config.tx_fifo_num + nic->config.rx_ring_num + 1)) { 3890 - nic->avail_msix_vectors = ret; 3891 - ret = pci_enable_msix(nic->pdev, nic->entries, ret); 3892 - } 3893 3831 if (ret) { 3894 3832 DBG_PRINT(ERR_DBG, "%s: Enabling MSIX failed\n", nic->dev->name); 3895 3833 kfree(nic->entries); 3896 3834 nic->mac_control.stats_info->sw_stat.mem_freed 3897 - += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry)); 3835 + += (nic->num_entries * sizeof(struct msix_entry)); 3898 3836 kfree(nic->s2io_entries); 3899 3837 nic->mac_control.stats_info->sw_stat.mem_freed 3900 - += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry)); 3838 + += (nic->num_entries * sizeof(struct s2io_msix_entry)); 3901 3839 nic->entries = NULL; 3902 3840 nic->s2io_entries = NULL; 3903 - nic->avail_msix_vectors = 0; 3904 3841 return -ENOMEM; 3905 3842 } 3906 - if (!nic->avail_msix_vectors) 3907 - nic->avail_msix_vectors = MAX_REQUESTED_MSI_X; 3908 3843 3909 3844 /* 3910 3845 * To enable MSI-X, MSI also needs to be enabled, due to a bug ··· 3968 3919 int i; 3969 3920 u16 msi_control; 3970 3921 3971 - for (i = 0; i < MAX_REQUESTED_MSI_X; i++) { 3922 + for (i = 0; i < sp->num_entries; i++) { 3972 3923 if (sp->s2io_entries[i].in_use == 3973 3924 MSIX_REGISTERED_SUCCESS) { 3974 3925 int vector = sp->entries[i].vector; ··· 4024 3975 netif_carrier_off(dev); 4025 3976 sp->last_link_state = 0; 4026 3977 4027 - if (sp->config.intr_type == MSI_X) { 4028 - int ret = s2io_enable_msi_x(sp); 4029 - 4030 - if (!ret) { 4031 - ret = s2io_test_msi(sp); 4032 - /* rollback MSI-X, will re-enable during add_isr() */ 4033 - remove_msix_isr(sp); 4034 - } 4035 - if (ret) { 4036 - 4037 - DBG_PRINT(ERR_DBG, 4038 - "%s: MSI-X requested but failed to enable\n", 4039 - dev->name); 4040 - sp->config.intr_type = INTA; 4041 - } 4042 - } 4043 - 4044 - /* NAPI doesn't work well with MSI(X) */ 4045 - if (sp->config.intr_type != INTA) { 4046 - if(sp->config.napi) 4047 - sp->config.napi = 0; 4048 - } 4049 - 4050 3978 /* Initialize H/W and enable interrupts */ 4051 3979 err = s2io_card_up(sp); 4052 3980 if (err) { ··· 4046 4020 if (sp->entries) { 4047 4021 kfree(sp->entries); 4048 4022 sp->mac_control.stats_info->sw_stat.mem_freed 4049 - += (MAX_REQUESTED_MSI_X * sizeof(struct msix_entry)); 4023 + += (sp->num_entries * sizeof(struct msix_entry)); 4050 4024 } 4051 4025 if (sp->s2io_entries) { 4052 4026 kfree(sp->s2io_entries); 4053 4027 sp->mac_control.stats_info->sw_stat.mem_freed 4054 - += (MAX_REQUESTED_MSI_X * sizeof(struct s2io_msix_entry)); 4028 + += (sp->num_entries * sizeof(struct s2io_msix_entry)); 4055 4029 } 4056 4030 } 4057 4031 return err; ··· 4353 4327 mod_timer(&sp->alarm_timer, jiffies + HZ / 2); 4354 4328 } 4355 4329 4356 - static int s2io_chk_rx_buffers(struct ring_info *ring) 4357 - { 4358 - if (fill_rx_buffers(ring) == -ENOMEM) { 4359 - DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name); 4360 - DBG_PRINT(INFO_DBG, " in Rx Intr!!\n"); 4361 - } 4362 - return 0; 4363 - } 4364 - 4365 4330 static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id) 4366 4331 { 4367 4332 struct ring_info *ring = (struct ring_info *)dev_id; 4368 4333 struct s2io_nic *sp = ring->nic; 4334 + struct XENA_dev_config __iomem *bar0 = sp->bar0; 4335 + struct net_device *dev = sp->dev; 4369 4336 4370 - if (!is_s2io_card_up(sp)) 4337 + if (unlikely(!is_s2io_card_up(sp))) 4371 4338 return IRQ_HANDLED; 4372 4339 4373 - rx_intr_handler(ring); 4374 - s2io_chk_rx_buffers(ring); 4340 + if (sp->config.napi) { 4341 + u8 *addr = NULL, val8 = 0; 4342 + 4343 + addr = (u8 *)&bar0->xmsi_mask_reg; 4344 + addr += (7 - ring->ring_no); 4345 + val8 = (ring->ring_no == 0) ? 0x7f : 0xff; 4346 + writeb(val8, addr); 4347 + val8 = readb(addr); 4348 + netif_rx_schedule(dev, &ring->napi); 4349 + } else { 4350 + rx_intr_handler(ring, 0); 4351 + s2io_chk_rx_buffers(ring); 4352 + } 4375 4353 4376 4354 return IRQ_HANDLED; 4377 4355 } 4378 4356 4379 4357 static irqreturn_t s2io_msix_fifo_handle(int irq, void *dev_id) 4380 4358 { 4381 - struct fifo_info *fifo = (struct fifo_info *)dev_id; 4382 - struct s2io_nic *sp = fifo->nic; 4359 + int i; 4360 + struct fifo_info *fifos = (struct fifo_info *)dev_id; 4361 + struct s2io_nic *sp = fifos->nic; 4362 + struct XENA_dev_config __iomem *bar0 = sp->bar0; 4363 + struct config_param *config = &sp->config; 4364 + u64 reason; 4383 4365 4384 - if (!is_s2io_card_up(sp)) 4366 + if (unlikely(!is_s2io_card_up(sp))) 4367 + return IRQ_NONE; 4368 + 4369 + reason = readq(&bar0->general_int_status); 4370 + if (unlikely(reason == S2IO_MINUS_ONE)) 4371 + /* Nothing much can be done. Get out */ 4385 4372 return IRQ_HANDLED; 4386 4373 4387 - tx_intr_handler(fifo); 4374 + writeq(S2IO_MINUS_ONE, &bar0->general_int_mask); 4375 + 4376 + if (reason & GEN_INTR_TXTRAFFIC) 4377 + writeq(S2IO_MINUS_ONE, &bar0->tx_traffic_int); 4378 + 4379 + for (i = 0; i < config->tx_fifo_num; i++) 4380 + tx_intr_handler(&fifos[i]); 4381 + 4382 + writeq(sp->general_int_mask, &bar0->general_int_mask); 4383 + readl(&bar0->general_int_status); 4384 + 4388 4385 return IRQ_HANDLED; 4389 4386 } 4387 + 4390 4388 static void s2io_txpic_intr_handle(struct s2io_nic *sp) 4391 4389 { 4392 4390 struct XENA_dev_config __iomem *bar0 = sp->bar0; ··· 4812 4762 4813 4763 if (config->napi) { 4814 4764 if (reason & GEN_INTR_RXTRAFFIC) { 4815 - if (likely(netif_rx_schedule_prep(dev, 4816 - &sp->napi))) { 4817 - __netif_rx_schedule(dev, &sp->napi); 4818 - writeq(S2IO_MINUS_ONE, 4819 - &bar0->rx_traffic_mask); 4820 - } else 4821 - writeq(S2IO_MINUS_ONE, 4822 - &bar0->rx_traffic_int); 4765 + netif_rx_schedule(dev, &sp->napi); 4766 + writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_mask); 4767 + writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int); 4768 + readl(&bar0->rx_traffic_int); 4823 4769 } 4824 4770 } else { 4825 4771 /* ··· 4827 4781 writeq(S2IO_MINUS_ONE, &bar0->rx_traffic_int); 4828 4782 4829 4783 for (i = 0; i < config->rx_ring_num; i++) 4830 - rx_intr_handler(&mac_control->rings[i]); 4784 + rx_intr_handler(&mac_control->rings[i], 0); 4831 4785 } 4832 4786 4833 4787 /* ··· 7030 6984 7031 6985 /* After proper initialization of H/W, register ISR */ 7032 6986 if (sp->config.intr_type == MSI_X) { 7033 - int i, msix_tx_cnt=0,msix_rx_cnt=0; 6987 + int i, msix_rx_cnt = 0; 7034 6988 7035 - for (i=1; (sp->s2io_entries[i].in_use == MSIX_FLG); i++) { 7036 - if (sp->s2io_entries[i].type == MSIX_FIFO_TYPE) { 7037 - sprintf(sp->desc[i], "%s:MSI-X-%d-TX", 6989 + for (i = 0; i < sp->num_entries; i++) { 6990 + if (sp->s2io_entries[i].in_use == MSIX_FLG) { 6991 + if (sp->s2io_entries[i].type == 6992 + MSIX_RING_TYPE) { 6993 + sprintf(sp->desc[i], "%s:MSI-X-%d-RX", 6994 + dev->name, i); 6995 + err = request_irq(sp->entries[i].vector, 6996 + s2io_msix_ring_handle, 0, 6997 + sp->desc[i], 6998 + sp->s2io_entries[i].arg); 6999 + } else if (sp->s2io_entries[i].type == 7000 + MSIX_ALARM_TYPE) { 7001 + sprintf(sp->desc[i], "%s:MSI-X-%d-TX", 7038 7002 dev->name, i); 7039 - err = request_irq(sp->entries[i].vector, 7040 - s2io_msix_fifo_handle, 0, sp->desc[i], 7041 - sp->s2io_entries[i].arg); 7042 - /* If either data or addr is zero print it */ 7043 - if(!(sp->msix_info[i].addr && 7044 - sp->msix_info[i].data)) { 7045 - DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx " 7046 - "Data:0x%llx\n",sp->desc[i], 7047 - (unsigned long long) 7048 - sp->msix_info[i].addr, 7049 - (unsigned long long) 7050 - sp->msix_info[i].data); 7051 - } else { 7052 - msix_tx_cnt++; 7003 + err = request_irq(sp->entries[i].vector, 7004 + s2io_msix_fifo_handle, 0, 7005 + sp->desc[i], 7006 + sp->s2io_entries[i].arg); 7007 + 7053 7008 } 7054 - } else { 7055 - sprintf(sp->desc[i], "%s:MSI-X-%d-RX", 7056 - dev->name, i); 7057 - err = request_irq(sp->entries[i].vector, 7058 - s2io_msix_ring_handle, 0, sp->desc[i], 7059 - sp->s2io_entries[i].arg); 7060 - /* If either data or addr is zero print it */ 7061 - if(!(sp->msix_info[i].addr && 7009 + /* if either data or addr is zero print it. */ 7010 + if (!(sp->msix_info[i].addr && 7062 7011 sp->msix_info[i].data)) { 7063 - DBG_PRINT(ERR_DBG, "%s @ Addr:0x%llx " 7064 - "Data:0x%llx\n",sp->desc[i], 7012 + DBG_PRINT(ERR_DBG, 7013 + "%s @Addr:0x%llx Data:0x%llx\n", 7014 + sp->desc[i], 7065 7015 (unsigned long long) 7066 7016 sp->msix_info[i].addr, 7067 7017 (unsigned long long) 7068 - sp->msix_info[i].data); 7069 - } else { 7018 + ntohl(sp->msix_info[i].data)); 7019 + } else 7070 7020 msix_rx_cnt++; 7021 + if (err) { 7022 + remove_msix_isr(sp); 7023 + 7024 + DBG_PRINT(ERR_DBG, 7025 + "%s:MSI-X-%d registration " 7026 + "failed\n", dev->name, i); 7027 + 7028 + DBG_PRINT(ERR_DBG, 7029 + "%s: Defaulting to INTA\n", 7030 + dev->name); 7031 + sp->config.intr_type = INTA; 7032 + break; 7071 7033 } 7034 + sp->s2io_entries[i].in_use = 7035 + MSIX_REGISTERED_SUCCESS; 7072 7036 } 7073 - if (err) { 7074 - remove_msix_isr(sp); 7075 - DBG_PRINT(ERR_DBG,"%s:MSI-X-%d registration " 7076 - "failed\n", dev->name, i); 7077 - DBG_PRINT(ERR_DBG, "%s: defaulting to INTA\n", 7078 - dev->name); 7079 - sp->config.intr_type = INTA; 7080 - break; 7081 - } 7082 - sp->s2io_entries[i].in_use = MSIX_REGISTERED_SUCCESS; 7083 7037 } 7084 7038 if (!err) { 7085 - printk(KERN_INFO "MSI-X-TX %d entries enabled\n", 7086 - msix_tx_cnt); 7087 7039 printk(KERN_INFO "MSI-X-RX %d entries enabled\n", 7088 - msix_rx_cnt); 7040 + --msix_rx_cnt); 7041 + DBG_PRINT(INFO_DBG, "MSI-X-TX entries enabled" 7042 + " through alarm vector\n"); 7089 7043 } 7090 7044 } 7091 7045 if (sp->config.intr_type == INTA) { ··· 7126 7080 clear_bit(__S2IO_STATE_CARD_UP, &sp->state); 7127 7081 7128 7082 /* Disable napi */ 7129 - if (config->napi) 7130 - napi_disable(&sp->napi); 7083 + if (sp->config.napi) { 7084 + int off = 0; 7085 + if (config->intr_type == MSI_X) { 7086 + for (; off < sp->config.rx_ring_num; off++) 7087 + napi_disable(&sp->mac_control.rings[off].napi); 7088 + } 7089 + else 7090 + napi_disable(&sp->napi); 7091 + } 7131 7092 7132 7093 /* disable Tx and Rx traffic on the NIC */ 7133 7094 if (do_io) ··· 7226 7173 } 7227 7174 7228 7175 /* Initialise napi */ 7229 - if (config->napi) 7230 - napi_enable(&sp->napi); 7176 + if (config->napi) { 7177 + int i; 7178 + if (config->intr_type == MSI_X) { 7179 + for (i = 0; i < sp->config.rx_ring_num; i++) 7180 + napi_enable(&sp->mac_control.rings[i].napi); 7181 + } else { 7182 + napi_enable(&sp->napi); 7183 + } 7184 + } 7231 7185 7232 7186 /* Maintain the state prior to the open */ 7233 7187 if (sp->promisc_flg) ··· 7277 7217 /* Enable select interrupts */ 7278 7218 en_dis_err_alarms(sp, ENA_ALL_INTRS, ENABLE_INTRS); 7279 7219 if (sp->config.intr_type != INTA) 7280 - en_dis_able_nic_intrs(sp, ENA_ALL_INTRS, DISABLE_INTRS); 7220 + en_dis_able_nic_intrs(sp, TX_TRAFFIC_INTR, ENABLE_INTRS); 7281 7221 else { 7282 7222 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR; 7283 7223 interruptible |= TX_PIC_INTR; ··· 7675 7615 rx_ring_num = MAX_RX_RINGS; 7676 7616 } 7677 7617 7678 - if (*dev_intr_type != INTA) 7679 - napi = 0; 7680 - 7681 7618 if ((*dev_intr_type != INTA) && (*dev_intr_type != MSI_X)) { 7682 7619 DBG_PRINT(ERR_DBG, "s2io: Wrong intr_type requested. " 7683 7620 "Defaulting to INTA\n"); ··· 7975 7918 * will use eth_mac_addr() for dev->set_mac_address 7976 7919 * mac address will be set every time dev->open() is called 7977 7920 */ 7978 - netif_napi_add(dev, &sp->napi, s2io_poll, 32); 7979 - 7980 7921 #ifdef CONFIG_NET_POLL_CONTROLLER 7981 7922 dev->poll_controller = s2io_netpoll; 7982 7923 #endif ··· 8016 7961 ret = -EBADSLT; 8017 7962 goto set_swap_failed; 8018 7963 } 7964 + } 7965 + 7966 + if (sp->config.intr_type == MSI_X) { 7967 + sp->num_entries = config->rx_ring_num + 1; 7968 + ret = s2io_enable_msi_x(sp); 7969 + 7970 + if (!ret) { 7971 + ret = s2io_test_msi(sp); 7972 + /* rollback MSI-X, will re-enable during add_isr() */ 7973 + remove_msix_isr(sp); 7974 + } 7975 + if (ret) { 7976 + 7977 + DBG_PRINT(ERR_DBG, 7978 + "%s: MSI-X requested but failed to enable\n", 7979 + dev->name); 7980 + sp->config.intr_type = INTA; 7981 + } 7982 + } 7983 + 7984 + if (config->intr_type == MSI_X) { 7985 + for (i = 0; i < config->rx_ring_num ; i++) 7986 + netif_napi_add(dev, &mac_control->rings[i].napi, 7987 + s2io_poll_msix, 64); 7988 + } else { 7989 + netif_napi_add(dev, &sp->napi, s2io_poll_inta, 64); 8019 7990 } 8020 7991 8021 7992 /* Not needed for Herc */ ··· 8093 8012 8094 8013 /* store mac addresses from CAM to s2io_nic structure */ 8095 8014 do_s2io_store_unicast_mc(sp); 8015 + 8016 + /* Configure MSIX vector for number of rings configured plus one */ 8017 + if ((sp->device_type == XFRAME_II_DEVICE) && 8018 + (config->intr_type == MSI_X)) 8019 + sp->num_entries = config->rx_ring_num + 1; 8096 8020 8097 8021 /* Store the values of the MSIX table in the s2io_nic structure */ 8098 8022 store_xmsi_data(sp); ··· 8164 8078 break; 8165 8079 } 8166 8080 8167 - if (napi) 8081 + switch (sp->config.napi) { 8082 + case 0: 8083 + DBG_PRINT(ERR_DBG, "%s: NAPI disabled\n", dev->name); 8084 + break; 8085 + case 1: 8168 8086 DBG_PRINT(ERR_DBG, "%s: NAPI enabled\n", dev->name); 8087 + break; 8088 + } 8169 8089 8170 8090 DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name, 8171 8091 sp->config.tx_fifo_num);
+15 -7
drivers/net/s2io.h
··· 706 706 /* per-ring buffer counter */ 707 707 u32 rx_bufs_left; 708 708 709 - #define MAX_LRO_SESSIONS 32 709 + #define MAX_LRO_SESSIONS 32 710 710 struct lro lro0_n[MAX_LRO_SESSIONS]; 711 711 u8 lro; 712 712 ··· 724 724 725 725 /* copy of sp->pdev pointer */ 726 726 struct pci_dev *pdev; 727 + 728 + /* Per ring napi struct */ 729 + struct napi_struct napi; 730 + 731 + unsigned long interrupt_count; 727 732 728 733 /* 729 734 * Place holders for the virtual and physical addresses of ··· 846 841 * Structure to keep track of the MSI-X vectors and the corresponding 847 842 * argument registered against each vector 848 843 */ 849 - #define MAX_REQUESTED_MSI_X 17 844 + #define MAX_REQUESTED_MSI_X 9 850 845 struct s2io_msix_entry 851 846 { 852 847 u16 vector; ··· 854 849 void *arg; 855 850 856 851 u8 type; 857 - #define MSIX_FIFO_TYPE 1 858 - #define MSIX_RING_TYPE 2 852 + #define MSIX_ALARM_TYPE 1 853 + #define MSIX_RING_TYPE 2 859 854 860 855 u8 in_use; 861 856 #define MSIX_REGISTERED_SUCCESS 0xAA ··· 882 877 */ 883 878 int pkts_to_process; 884 879 struct net_device *dev; 885 - struct napi_struct napi; 886 880 struct mac_info mac_control; 887 881 struct config_param config; 888 882 struct pci_dev *pdev; ··· 952 948 */ 953 949 u8 other_fifo_idx; 954 950 951 + struct napi_struct napi; 955 952 /* after blink, the adapter must be restored with original 956 953 * values. 957 954 */ ··· 967 962 unsigned long long start_time; 968 963 struct vlan_group *vlgrp; 969 964 #define MSIX_FLG 0xA5 965 + int num_entries; 970 966 struct msix_entry *entries; 971 967 int msi_detected; 972 968 wait_queue_head_t msi_wait; ··· 988 982 u16 lro_max_aggr_per_sess; 989 983 volatile unsigned long state; 990 984 u64 general_int_mask; 985 + 991 986 #define VPD_STRING_LEN 80 992 987 u8 product_name[VPD_STRING_LEN]; 993 988 u8 serial_num[VPD_STRING_LEN]; ··· 1110 1103 static int init_shared_mem(struct s2io_nic *sp); 1111 1104 static void free_shared_mem(struct s2io_nic *sp); 1112 1105 static int init_nic(struct s2io_nic *nic); 1113 - static void rx_intr_handler(struct ring_info *ring_data); 1106 + static int rx_intr_handler(struct ring_info *ring_data, int budget); 1114 1107 static void tx_intr_handler(struct fifo_info *fifo_data); 1115 1108 static void s2io_handle_errors(void * dev_id); 1116 1109 ··· 1121 1114 static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp); 1122 1115 static void s2io_link(struct s2io_nic * sp, int link); 1123 1116 static void s2io_reset(struct s2io_nic * sp); 1124 - static int s2io_poll(struct napi_struct *napi, int budget); 1117 + static int s2io_poll_msix(struct napi_struct *napi, int budget); 1118 + static int s2io_poll_inta(struct napi_struct *napi, int budget); 1125 1119 static void s2io_init_pci(struct s2io_nic * sp); 1126 1120 static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr); 1127 1121 static void s2io_alarm_handle(unsigned long data);
+31 -36
drivers/net/sb1250-mac.c
··· 179 179 #define SBMAC_MAX_TXDESCR 256 180 180 #define SBMAC_MAX_RXDESCR 256 181 181 182 - #define ETHER_ALIGN 2 183 - #define ETHER_ADDR_LEN 6 182 + #define ETHER_ADDR_LEN 6 184 183 #define ENET_PACKET_SIZE 1518 185 184 /*#define ENET_PACKET_SIZE 9216 */ 186 185 ··· 261 262 spinlock_t sbm_lock; /* spin lock */ 262 263 int sbm_devflags; /* current device flags */ 263 264 264 - int sbm_buffersize; 265 - 266 265 /* 267 266 * Controller-specific things 268 267 */ ··· 302 305 static void sbdma_initctx(struct sbmacdma *d, struct sbmac_softc *s, int chan, 303 306 int txrx, int maxdescr); 304 307 static void sbdma_channel_start(struct sbmacdma *d, int rxtx); 305 - static int sbdma_add_rcvbuffer(struct sbmacdma *d, struct sk_buff *m); 308 + static int sbdma_add_rcvbuffer(struct sbmac_softc *sc, struct sbmacdma *d, 309 + struct sk_buff *m); 306 310 static int sbdma_add_txbuffer(struct sbmacdma *d, struct sk_buff *m); 307 311 static void sbdma_emptyring(struct sbmacdma *d); 308 - static void sbdma_fillring(struct sbmacdma *d); 312 + static void sbdma_fillring(struct sbmac_softc *sc, struct sbmacdma *d); 309 313 static int sbdma_rx_process(struct sbmac_softc *sc, struct sbmacdma *d, 310 314 int work_to_do, int poll); 311 315 static void sbdma_tx_process(struct sbmac_softc *sc, struct sbmacdma *d, ··· 775 777 d->sbdma_remptr = NULL; 776 778 } 777 779 778 - static void sbdma_align_skb(struct sk_buff *skb,int power2,int offset) 780 + static inline void sbdma_align_skb(struct sk_buff *skb, 781 + unsigned int power2, unsigned int offset) 779 782 { 780 - unsigned long addr; 781 - unsigned long newaddr; 783 + unsigned char *addr = skb->data; 784 + unsigned char *newaddr = PTR_ALIGN(addr, power2); 782 785 783 - addr = (unsigned long) skb->data; 784 - 785 - newaddr = (addr + power2 - 1) & ~(power2 - 1); 786 - 787 - skb_reserve(skb,newaddr-addr+offset); 786 + skb_reserve(skb, newaddr - addr + offset); 788 787 } 789 788 790 789 ··· 792 797 * this queues a buffer for inbound packets. 793 798 * 794 799 * Input parameters: 795 - * d - DMA channel descriptor 800 + * sc - softc structure 801 + * d - DMA channel descriptor 796 802 * sb - sk_buff to add, or NULL if we should allocate one 797 803 * 798 804 * Return value: ··· 802 806 ********************************************************************* */ 803 807 804 808 805 - static int sbdma_add_rcvbuffer(struct sbmacdma *d, struct sk_buff *sb) 809 + static int sbdma_add_rcvbuffer(struct sbmac_softc *sc, struct sbmacdma *d, 810 + struct sk_buff *sb) 806 811 { 812 + struct net_device *dev = sc->sbm_dev; 807 813 struct sbdmadscr *dsc; 808 814 struct sbdmadscr *nextdsc; 809 815 struct sk_buff *sb_new = NULL; ··· 846 848 */ 847 849 848 850 if (sb == NULL) { 849 - sb_new = dev_alloc_skb(ENET_PACKET_SIZE + SMP_CACHE_BYTES * 2 + ETHER_ALIGN); 851 + sb_new = netdev_alloc_skb(dev, ENET_PACKET_SIZE + 852 + SMP_CACHE_BYTES * 2 + 853 + NET_IP_ALIGN); 850 854 if (sb_new == NULL) { 851 855 pr_info("%s: sk_buff allocation failed\n", 852 856 d->sbdma_eth->sbm_dev->name); 853 857 return -ENOBUFS; 854 858 } 855 859 856 - sbdma_align_skb(sb_new, SMP_CACHE_BYTES, ETHER_ALIGN); 860 + sbdma_align_skb(sb_new, SMP_CACHE_BYTES, NET_IP_ALIGN); 857 861 } 858 862 else { 859 863 sb_new = sb; ··· 874 874 * Do not interrupt per DMA transfer. 875 875 */ 876 876 dsc->dscr_a = virt_to_phys(sb_new->data) | 877 - V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize+ETHER_ALIGN)) | 0; 877 + V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize + NET_IP_ALIGN)) | 0; 878 878 #else 879 879 dsc->dscr_a = virt_to_phys(sb_new->data) | 880 - V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize+ETHER_ALIGN)) | 880 + V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize + NET_IP_ALIGN)) | 881 881 M_DMA_DSCRA_INTERRUPT; 882 882 #endif 883 883 ··· 1032 1032 * with sk_buffs 1033 1033 * 1034 1034 * Input parameters: 1035 - * d - DMA channel 1035 + * sc - softc structure 1036 + * d - DMA channel 1036 1037 * 1037 1038 * Return value: 1038 1039 * nothing 1039 1040 ********************************************************************* */ 1040 1041 1041 - static void sbdma_fillring(struct sbmacdma *d) 1042 + static void sbdma_fillring(struct sbmac_softc *sc, struct sbmacdma *d) 1042 1043 { 1043 1044 int idx; 1044 1045 1045 - for (idx = 0; idx < SBMAC_MAX_RXDESCR-1; idx++) { 1046 - if (sbdma_add_rcvbuffer(d,NULL) != 0) 1046 + for (idx = 0; idx < SBMAC_MAX_RXDESCR - 1; idx++) { 1047 + if (sbdma_add_rcvbuffer(sc, d, NULL) != 0) 1047 1048 break; 1048 1049 } 1049 1050 } ··· 1160 1159 * packet and put it right back on the receive ring. 1161 1160 */ 1162 1161 1163 - if (unlikely (sbdma_add_rcvbuffer(d,NULL) == 1164 - -ENOBUFS)) { 1162 + if (unlikely(sbdma_add_rcvbuffer(sc, d, NULL) == 1163 + -ENOBUFS)) { 1165 1164 dev->stats.rx_dropped++; 1166 - sbdma_add_rcvbuffer(d,sb); /* re-add old buffer */ 1165 + /* Re-add old buffer */ 1166 + sbdma_add_rcvbuffer(sc, d, sb); 1167 1167 /* No point in continuing at the moment */ 1168 1168 printk(KERN_ERR "dropped packet (1)\n"); 1169 1169 d->sbdma_remptr = SBDMA_NEXTBUF(d,sbdma_remptr); ··· 1214 1212 * put it back on the receive ring. 1215 1213 */ 1216 1214 dev->stats.rx_errors++; 1217 - sbdma_add_rcvbuffer(d,sb); 1215 + sbdma_add_rcvbuffer(sc, d, sb); 1218 1216 } 1219 1217 1220 1218 ··· 1572 1570 * Fill the receive ring 1573 1571 */ 1574 1572 1575 - sbdma_fillring(&(s->sbm_rxdma)); 1573 + sbdma_fillring(s, &(s->sbm_rxdma)); 1576 1574 1577 1575 /* 1578 1576 * Turn on the rest of the bits in the enable register ··· 2313 2311 for (i = 0; i < 6; i++) { 2314 2312 dev->dev_addr[i] = eaddr[i]; 2315 2313 } 2316 - 2317 - 2318 - /* 2319 - * Init packet size 2320 - */ 2321 - 2322 - sc->sbm_buffersize = ENET_PACKET_SIZE + SMP_CACHE_BYTES * 2 + ETHER_ALIGN; 2323 2314 2324 2315 /* 2325 2316 * Initialize context (get pointers to registers and stuff), then
+5 -3
drivers/net/sc92031.c
··· 953 953 unsigned entry; 954 954 u32 tx_status; 955 955 956 - if (skb_padto(skb, ETH_ZLEN)) 957 - return NETDEV_TX_OK; 958 - 959 956 if (unlikely(skb->len > TX_BUF_SIZE)) { 960 957 dev->stats.tx_dropped++; 961 958 goto out; ··· 972 975 skb_copy_and_csum_dev(skb, priv->tx_bufs + entry * TX_BUF_SIZE); 973 976 974 977 len = skb->len; 978 + if (unlikely(len < ETH_ZLEN)) { 979 + memset(priv->tx_bufs + entry * TX_BUF_SIZE + len, 980 + 0, ETH_ZLEN - len); 981 + len = ETH_ZLEN; 982 + } 975 983 976 984 wmb(); 977 985
+2 -5
drivers/net/sfc/bitfield.h
··· 483 483 #endif 484 484 485 485 #define EFX_SET_OWORD_FIELD_VER(efx, oword, field, value) do { \ 486 - if (FALCON_REV(efx) >= FALCON_REV_B0) { \ 486 + if (falcon_rev(efx) >= FALCON_REV_B0) { \ 487 487 EFX_SET_OWORD_FIELD((oword), field##_B0, (value)); \ 488 488 } else { \ 489 489 EFX_SET_OWORD_FIELD((oword), field##_A1, (value)); \ ··· 491 491 } while (0) 492 492 493 493 #define EFX_QWORD_FIELD_VER(efx, qword, field) \ 494 - (FALCON_REV(efx) >= FALCON_REV_B0 ? \ 494 + (falcon_rev(efx) >= FALCON_REV_B0 ? \ 495 495 EFX_QWORD_FIELD((qword), field##_B0) : \ 496 496 EFX_QWORD_FIELD((qword), field##_A1)) 497 497 ··· 501 501 #define DMA_ADDR_T_WIDTH (8 * sizeof(dma_addr_t)) 502 502 #define EFX_DMA_TYPE_WIDTH(width) \ 503 503 (((width) < DMA_ADDR_T_WIDTH) ? (width) : DMA_ADDR_T_WIDTH) 504 - #define EFX_DMA_MAX_MASK ((DMA_ADDR_T_WIDTH == 64) ? \ 505 - ~((u64) 0) : ~((u32) 0)) 506 - #define EFX_DMA_MASK(mask) ((mask) & EFX_DMA_MAX_MASK) 507 504 508 505 #endif /* EFX_BITFIELD_H */
+3 -6
drivers/net/sfc/boards.c
··· 27 27 struct efx_blinker *bl = &efx->board_info.blinker; 28 28 efx->board_info.set_fault_led(efx, bl->state); 29 29 bl->state = !bl->state; 30 - if (bl->resubmit) { 31 - bl->timer.expires = jiffies + BLINK_INTERVAL; 32 - add_timer(&bl->timer); 33 - } 30 + if (bl->resubmit) 31 + mod_timer(&bl->timer, jiffies + BLINK_INTERVAL); 34 32 } 35 33 36 34 static void board_blink(struct efx_nic *efx, int blink) ··· 42 44 blinker->state = 0; 43 45 setup_timer(&blinker->timer, blink_led_timer, 44 46 (unsigned long)efx); 45 - blinker->timer.expires = jiffies + BLINK_INTERVAL; 46 - add_timer(&blinker->timer); 47 + mod_timer(&blinker->timer, jiffies + BLINK_INTERVAL); 47 48 } else { 48 49 blinker->resubmit = 0; 49 50 if (blinker->timer.function)
+40 -44
drivers/net/sfc/efx.c
··· 199 199 */ 200 200 static inline void efx_channel_processed(struct efx_channel *channel) 201 201 { 202 - /* Write to EVQ_RPTR_REG. If a new event arrived in a race 203 - * with finishing processing, a new interrupt will be raised. 204 - */ 202 + /* The interrupt handler for this channel may set work_pending 203 + * as soon as we acknowledge the events we've seen. Make sure 204 + * it's cleared before then. */ 205 205 channel->work_pending = 0; 206 - smp_wmb(); /* Ensure channel updated before any new interrupt. */ 206 + smp_wmb(); 207 + 207 208 falcon_eventq_read_ack(channel); 208 209 } 209 210 ··· 266 265 napi_disable(&channel->napi_str); 267 266 268 267 /* Poll the channel */ 269 - (void) efx_process_channel(channel, efx->type->evq_size); 268 + efx_process_channel(channel, efx->type->evq_size); 270 269 271 270 /* Ack the eventq. This may cause an interrupt to be generated 272 271 * when they are reenabled */ ··· 318 317 * 319 318 *************************************************************************/ 320 319 321 - /* Setup per-NIC RX buffer parameters. 322 - * Calculate the rx buffer allocation parameters required to support 323 - * the current MTU, including padding for header alignment and overruns. 324 - */ 325 - static void efx_calc_rx_buffer_params(struct efx_nic *efx) 326 - { 327 - unsigned int order, len; 328 - 329 - len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) + 330 - EFX_MAX_FRAME_LEN(efx->net_dev->mtu) + 331 - efx->type->rx_buffer_padding); 332 - 333 - /* Calculate page-order */ 334 - for (order = 0; ((1u << order) * PAGE_SIZE) < len; ++order) 335 - ; 336 - 337 - efx->rx_buffer_len = len; 338 - efx->rx_buffer_order = order; 339 - } 340 - 341 320 static int efx_probe_channel(struct efx_channel *channel) 342 321 { 343 322 struct efx_tx_queue *tx_queue; ··· 368 387 struct efx_channel *channel; 369 388 int rc = 0; 370 389 371 - efx_calc_rx_buffer_params(efx); 390 + /* Calculate the rx buffer allocation parameters required to 391 + * support the current MTU, including padding for header 392 + * alignment and overruns. 393 + */ 394 + efx->rx_buffer_len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) + 395 + EFX_MAX_FRAME_LEN(efx->net_dev->mtu) + 396 + efx->type->rx_buffer_padding); 397 + efx->rx_buffer_order = get_order(efx->rx_buffer_len); 372 398 373 399 /* Initialise the channels */ 374 400 efx_for_each_channel(channel, efx) { ··· 428 440 netif_napi_add(channel->napi_dev, &channel->napi_str, 429 441 efx_poll, napi_weight); 430 442 443 + /* The interrupt handler for this channel may set work_pending 444 + * as soon as we enable it. Make sure it's cleared before 445 + * then. Similarly, make sure it sees the enabled flag set. */ 431 446 channel->work_pending = 0; 432 447 channel->enabled = 1; 433 - smp_wmb(); /* ensure channel updated before first interrupt */ 448 + smp_wmb(); 434 449 435 450 napi_enable(&channel->napi_str); 436 451 ··· 695 704 mutex_unlock(&efx->mac_lock); 696 705 697 706 /* Serialise against efx_set_multicast_list() */ 698 - if (NET_DEV_REGISTERED(efx)) { 707 + if (efx_dev_registered(efx)) { 699 708 netif_tx_lock_bh(efx->net_dev); 700 709 netif_tx_unlock_bh(efx->net_dev); 701 710 } ··· 782 791 efx->membase = ioremap_nocache(efx->membase_phys, 783 792 efx->type->mem_map_size); 784 793 if (!efx->membase) { 785 - EFX_ERR(efx, "could not map memory BAR %d at %lx+%x\n", 786 - efx->type->mem_bar, efx->membase_phys, 794 + EFX_ERR(efx, "could not map memory BAR %d at %llx+%x\n", 795 + efx->type->mem_bar, 796 + (unsigned long long)efx->membase_phys, 787 797 efx->type->mem_map_size); 788 798 rc = -ENOMEM; 789 799 goto fail4; 790 800 } 791 - EFX_LOG(efx, "memory BAR %u at %lx+%x (virtual %p)\n", 792 - efx->type->mem_bar, efx->membase_phys, efx->type->mem_map_size, 793 - efx->membase); 801 + EFX_LOG(efx, "memory BAR %u at %llx+%x (virtual %p)\n", 802 + efx->type->mem_bar, (unsigned long long)efx->membase_phys, 803 + efx->type->mem_map_size, efx->membase); 794 804 795 805 return 0; 796 806 797 807 fail4: 798 808 release_mem_region(efx->membase_phys, efx->type->mem_map_size); 799 809 fail3: 800 - efx->membase_phys = 0UL; 810 + efx->membase_phys = 0; 801 811 fail2: 802 812 pci_disable_device(efx->pci_dev); 803 813 fail1: ··· 816 824 817 825 if (efx->membase_phys) { 818 826 pci_release_region(efx->pci_dev, efx->type->mem_bar); 819 - efx->membase_phys = 0UL; 827 + efx->membase_phys = 0; 820 828 } 821 829 822 830 pci_disable_device(efx->pci_dev); ··· 1035 1043 return; 1036 1044 if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT)) 1037 1045 return; 1038 - if (NET_DEV_REGISTERED(efx) && !netif_running(efx->net_dev)) 1046 + if (efx_dev_registered(efx) && !netif_running(efx->net_dev)) 1039 1047 return; 1040 1048 1041 1049 /* Mark the port as enabled so port reconfigurations can start, then ··· 1065 1073 cancel_delayed_work_sync(&efx->monitor_work); 1066 1074 1067 1075 /* Ensure that all RX slow refills are complete. */ 1068 - efx_for_each_rx_queue(rx_queue, efx) { 1076 + efx_for_each_rx_queue(rx_queue, efx) 1069 1077 cancel_delayed_work_sync(&rx_queue->work); 1070 - } 1071 1078 1072 1079 /* Stop scheduled port reconfigurations */ 1073 1080 cancel_work_sync(&efx->reconfigure_work); ··· 1092 1101 falcon_disable_interrupts(efx); 1093 1102 if (efx->legacy_irq) 1094 1103 synchronize_irq(efx->legacy_irq); 1095 - efx_for_each_channel_with_interrupt(channel, efx) 1104 + efx_for_each_channel_with_interrupt(channel, efx) { 1096 1105 if (channel->irq) 1097 1106 synchronize_irq(channel->irq); 1107 + } 1098 1108 1099 1109 /* Stop all NAPI processing and synchronous rx refills */ 1100 1110 efx_for_each_channel(channel, efx) ··· 1117 1125 /* Stop the kernel transmit interface late, so the watchdog 1118 1126 * timer isn't ticking over the flush */ 1119 1127 efx_stop_queue(efx); 1120 - if (NET_DEV_REGISTERED(efx)) { 1128 + if (efx_dev_registered(efx)) { 1121 1129 netif_tx_lock_bh(efx->net_dev); 1122 1130 netif_tx_unlock_bh(efx->net_dev); 1123 1131 } ··· 1336 1344 return 0; 1337 1345 } 1338 1346 1339 - /* Context: process, dev_base_lock held, non-blocking. */ 1347 + /* Context: process, dev_base_lock or RTNL held, non-blocking. */ 1340 1348 static struct net_device_stats *efx_net_stats(struct net_device *net_dev) 1341 1349 { 1342 1350 struct efx_nic *efx = net_dev->priv; 1343 1351 struct efx_mac_stats *mac_stats = &efx->mac_stats; 1344 1352 struct net_device_stats *stats = &net_dev->stats; 1345 1353 1354 + /* Update stats if possible, but do not wait if another thread 1355 + * is updating them (or resetting the NIC); slightly stale 1356 + * stats are acceptable. 1357 + */ 1346 1358 if (!spin_trylock(&efx->stats_lock)) 1347 1359 return stats; 1348 1360 if (efx->state == STATE_RUNNING) { ··· 1490 1494 static int efx_netdev_event(struct notifier_block *this, 1491 1495 unsigned long event, void *ptr) 1492 1496 { 1493 - struct net_device *net_dev = (struct net_device *)ptr; 1497 + struct net_device *net_dev = ptr; 1494 1498 1495 1499 if (net_dev->open == efx_net_open && event == NETDEV_CHANGENAME) { 1496 1500 struct efx_nic *efx = net_dev->priv; ··· 1559 1563 efx_for_each_tx_queue(tx_queue, efx) 1560 1564 efx_release_tx_buffers(tx_queue); 1561 1565 1562 - if (NET_DEV_REGISTERED(efx)) { 1566 + if (efx_dev_registered(efx)) { 1563 1567 strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); 1564 1568 unregister_netdev(efx->net_dev); 1565 1569 } ··· 1684 1688 if (method == RESET_TYPE_DISABLE) { 1685 1689 /* Reinitialise the device anyway so the driver unload sequence 1686 1690 * can talk to the external SRAM */ 1687 - (void) falcon_init_nic(efx); 1691 + falcon_init_nic(efx); 1688 1692 rc = -EIO; 1689 1693 goto fail4; 1690 1694 }
+38 -49
drivers/net/sfc/falcon.c
··· 116 116 ************************************************************************** 117 117 */ 118 118 119 - /* DMA address mask (up to 46-bit, avoiding compiler warnings) 120 - * 121 - * Note that it is possible to have a platform with 64-bit longs and 122 - * 32-bit DMA addresses, or vice versa. EFX_DMA_MASK takes care of the 123 - * platform DMA mask. 124 - */ 125 - #if BITS_PER_LONG == 64 126 - #define FALCON_DMA_MASK EFX_DMA_MASK(0x00003fffffffffffUL) 127 - #else 128 - #define FALCON_DMA_MASK EFX_DMA_MASK(0x00003fffffffffffULL) 129 - #endif 119 + /* DMA address mask */ 120 + #define FALCON_DMA_MASK DMA_BIT_MASK(46) 130 121 131 122 /* TX DMA length mask (13-bit) */ 132 123 #define FALCON_TX_DMA_MASK (4096 - 1) ··· 136 145 #define PCI_EXP_LNKSTA_LNK_WID_LBN 4 137 146 138 147 #define FALCON_IS_DUAL_FUNC(efx) \ 139 - (FALCON_REV(efx) < FALCON_REV_B0) 148 + (falcon_rev(efx) < FALCON_REV_B0) 140 149 141 150 /************************************************************************** 142 151 * ··· 456 465 TX_DESCQ_TYPE, 0, 457 466 TX_NON_IP_DROP_DIS_B0, 1); 458 467 459 - if (FALCON_REV(efx) >= FALCON_REV_B0) { 468 + if (falcon_rev(efx) >= FALCON_REV_B0) { 460 469 int csum = !(efx->net_dev->features & NETIF_F_IP_CSUM); 461 470 EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_IP_CHKSM_DIS_B0, csum); 462 471 EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_TCP_CHKSM_DIS_B0, csum); ··· 465 474 falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, 466 475 tx_queue->queue); 467 476 468 - if (FALCON_REV(efx) < FALCON_REV_B0) { 477 + if (falcon_rev(efx) < FALCON_REV_B0) { 469 478 efx_oword_t reg; 470 479 471 480 BUG_ON(tx_queue->queue >= 128); /* HW limit */ ··· 626 635 efx_oword_t rx_desc_ptr; 627 636 struct efx_nic *efx = rx_queue->efx; 628 637 int rc; 629 - int is_b0 = FALCON_REV(efx) >= FALCON_REV_B0; 638 + int is_b0 = falcon_rev(efx) >= FALCON_REV_B0; 630 639 int iscsi_digest_en = is_b0; 631 640 632 641 EFX_LOG(efx, "RX queue %d ring in special buffers %d-%d\n", ··· 813 822 tx_ev_q_label = EFX_QWORD_FIELD(*event, TX_EV_Q_LABEL); 814 823 tx_queue = &efx->tx_queue[tx_ev_q_label]; 815 824 816 - if (NET_DEV_REGISTERED(efx)) 825 + if (efx_dev_registered(efx)) 817 826 netif_tx_lock(efx->net_dev); 818 827 falcon_notify_tx_desc(tx_queue); 819 - if (NET_DEV_REGISTERED(efx)) 828 + if (efx_dev_registered(efx)) 820 829 netif_tx_unlock(efx->net_dev); 821 830 } else if (EFX_QWORD_FIELD(*event, TX_EV_PKT_ERR) && 822 831 EFX_WORKAROUND_10727(efx)) { ··· 875 884 RX_EV_TCP_UDP_CHKSUM_ERR); 876 885 rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, RX_EV_ETH_CRC_ERR); 877 886 rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, RX_EV_FRM_TRUNC); 878 - rx_ev_drib_nib = ((FALCON_REV(efx) >= FALCON_REV_B0) ? 887 + rx_ev_drib_nib = ((falcon_rev(efx) >= FALCON_REV_B0) ? 879 888 0 : EFX_QWORD_FIELD(*event, RX_EV_DRIB_NIB)); 880 889 rx_ev_pause_frm = EFX_QWORD_FIELD(*event, RX_EV_PAUSE_FRM_ERR); 881 890 ··· 1056 1065 EFX_QWORD_FIELD(*event, XG_PHY_INTR)) 1057 1066 is_phy_event = 1; 1058 1067 1059 - if ((FALCON_REV(efx) >= FALCON_REV_B0) && 1068 + if ((falcon_rev(efx) >= FALCON_REV_B0) && 1060 1069 EFX_OWORD_FIELD(*event, XG_MNT_INTR_B0)) 1061 1070 is_phy_event = 1; 1062 1071 ··· 1396 1405 static irqreturn_t falcon_fatal_interrupt(struct efx_nic *efx) 1397 1406 { 1398 1407 struct falcon_nic_data *nic_data = efx->nic_data; 1399 - efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr; 1408 + efx_oword_t *int_ker = efx->irq_status.addr; 1400 1409 efx_oword_t fatal_intr; 1401 1410 int error, mem_perr; 1402 1411 static int n_int_errors; ··· 1442 1451 */ 1443 1452 static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id) 1444 1453 { 1445 - struct efx_nic *efx = (struct efx_nic *)dev_id; 1446 - efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr; 1454 + struct efx_nic *efx = dev_id; 1455 + efx_oword_t *int_ker = efx->irq_status.addr; 1447 1456 struct efx_channel *channel; 1448 1457 efx_dword_t reg; 1449 1458 u32 queues; ··· 1480 1489 1481 1490 static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id) 1482 1491 { 1483 - struct efx_nic *efx = (struct efx_nic *)dev_id; 1484 - efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr; 1492 + struct efx_nic *efx = dev_id; 1493 + efx_oword_t *int_ker = efx->irq_status.addr; 1485 1494 struct efx_channel *channel; 1486 1495 int syserr; 1487 1496 int queues; ··· 1533 1542 */ 1534 1543 static irqreturn_t falcon_msi_interrupt(int irq, void *dev_id) 1535 1544 { 1536 - struct efx_channel *channel = (struct efx_channel *)dev_id; 1545 + struct efx_channel *channel = dev_id; 1537 1546 struct efx_nic *efx = channel->efx; 1538 - efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr; 1547 + efx_oword_t *int_ker = efx->irq_status.addr; 1539 1548 int syserr; 1540 1549 1541 1550 efx->last_irq_cpu = raw_smp_processor_id(); ··· 1563 1572 unsigned long offset; 1564 1573 efx_dword_t dword; 1565 1574 1566 - if (FALCON_REV(efx) < FALCON_REV_B0) 1575 + if (falcon_rev(efx) < FALCON_REV_B0) 1567 1576 return; 1568 1577 1569 1578 for (offset = RX_RSS_INDIR_TBL_B0; ··· 1586 1595 1587 1596 if (!EFX_INT_MODE_USE_MSI(efx)) { 1588 1597 irq_handler_t handler; 1589 - if (FALCON_REV(efx) >= FALCON_REV_B0) 1598 + if (falcon_rev(efx) >= FALCON_REV_B0) 1590 1599 handler = falcon_legacy_interrupt_b0; 1591 1600 else 1592 1601 handler = falcon_legacy_interrupt_a1; ··· 1627 1636 efx_oword_t reg; 1628 1637 1629 1638 /* Disable MSI/MSI-X interrupts */ 1630 - efx_for_each_channel_with_interrupt(channel, efx) 1639 + efx_for_each_channel_with_interrupt(channel, efx) { 1631 1640 if (channel->irq) 1632 1641 free_irq(channel->irq, channel); 1642 + } 1633 1643 1634 1644 /* ACK legacy interrupt */ 1635 - if (FALCON_REV(efx) >= FALCON_REV_B0) 1645 + if (falcon_rev(efx) >= FALCON_REV_B0) 1636 1646 falcon_read(efx, &reg, INT_ISR0_B0); 1637 1647 else 1638 1648 falcon_irq_ack_a1(efx); ··· 1724 1732 efx_oword_t temp; 1725 1733 int count; 1726 1734 1727 - if ((FALCON_REV(efx) < FALCON_REV_B0) || 1735 + if ((falcon_rev(efx) < FALCON_REV_B0) || 1728 1736 (efx->loopback_mode != LOOPBACK_NONE)) 1729 1737 return; 1730 1738 ··· 1777 1785 { 1778 1786 efx_oword_t temp; 1779 1787 1780 - if (FALCON_REV(efx) < FALCON_REV_B0) 1788 + if (falcon_rev(efx) < FALCON_REV_B0) 1781 1789 return; 1782 1790 1783 1791 /* Isolate the MAC -> RX */ ··· 1815 1823 MAC_SPEED, link_speed); 1816 1824 /* On B0, MAC backpressure can be disabled and packets get 1817 1825 * discarded. */ 1818 - if (FALCON_REV(efx) >= FALCON_REV_B0) { 1826 + if (falcon_rev(efx) >= FALCON_REV_B0) { 1819 1827 EFX_SET_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0, 1820 1828 !efx->link_up); 1821 1829 } ··· 1833 1841 EFX_SET_OWORD_FIELD_VER(efx, reg, RX_XOFF_MAC_EN, tx_fc); 1834 1842 1835 1843 /* Unisolate the MAC -> RX */ 1836 - if (FALCON_REV(efx) >= FALCON_REV_B0) 1844 + if (falcon_rev(efx) >= FALCON_REV_B0) 1837 1845 EFX_SET_OWORD_FIELD(reg, RX_INGR_EN_B0, 1); 1838 1846 falcon_write(efx, &reg, RX_CFG_REG_KER); 1839 1847 } ··· 1848 1856 return 0; 1849 1857 1850 1858 /* Statistics fetch will fail if the MAC is in TX drain */ 1851 - if (FALCON_REV(efx) >= FALCON_REV_B0) { 1859 + if (falcon_rev(efx) >= FALCON_REV_B0) { 1852 1860 efx_oword_t temp; 1853 1861 falcon_read(efx, &temp, MAC0_CTRL_REG_KER); 1854 1862 if (EFX_OWORD_FIELD(temp, TXFIFO_DRAIN_EN_B0)) ··· 1932 1940 static void falcon_mdio_write(struct net_device *net_dev, int phy_id, 1933 1941 int addr, int value) 1934 1942 { 1935 - struct efx_nic *efx = (struct efx_nic *)net_dev->priv; 1943 + struct efx_nic *efx = net_dev->priv; 1936 1944 unsigned int phy_id2 = phy_id & FALCON_PHY_ID_ID_MASK; 1937 1945 efx_oword_t reg; 1938 1946 ··· 2000 2008 * could be read, -1 will be returned. */ 2001 2009 static int falcon_mdio_read(struct net_device *net_dev, int phy_id, int addr) 2002 2010 { 2003 - struct efx_nic *efx = (struct efx_nic *)net_dev->priv; 2011 + struct efx_nic *efx = net_dev->priv; 2004 2012 unsigned int phy_addr = phy_id & FALCON_PHY_ID_ID_MASK; 2005 2013 efx_oword_t reg; 2006 2014 int value = -1; ··· 2105 2113 falcon_init_mdio(&efx->mii); 2106 2114 2107 2115 /* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */ 2108 - if (FALCON_REV(efx) >= FALCON_REV_B0) 2116 + if (falcon_rev(efx) >= FALCON_REV_B0) 2109 2117 efx->flow_control = EFX_FC_RX | EFX_FC_TX; 2110 2118 else 2111 2119 efx->flow_control = EFX_FC_RX; ··· 2365 2373 return -ENODEV; 2366 2374 } 2367 2375 2368 - switch (FALCON_REV(efx)) { 2376 + switch (falcon_rev(efx)) { 2369 2377 case FALCON_REV_A0: 2370 2378 case 0xff: 2371 2379 EFX_ERR(efx, "Falcon rev A0 not supported\n"); ··· 2391 2399 break; 2392 2400 2393 2401 default: 2394 - EFX_ERR(efx, "Unknown Falcon rev %d\n", FALCON_REV(efx)); 2402 + EFX_ERR(efx, "Unknown Falcon rev %d\n", falcon_rev(efx)); 2395 2403 return -ENODEV; 2396 2404 } 2397 2405 ··· 2411 2419 2412 2420 /* Allocate storage for hardware specific data */ 2413 2421 nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL); 2414 - efx->nic_data = (void *) nic_data; 2422 + efx->nic_data = nic_data; 2415 2423 2416 2424 /* Determine number of ports etc. */ 2417 2425 rc = falcon_probe_nic_variant(efx); ··· 2481 2489 */ 2482 2490 int falcon_init_nic(struct efx_nic *efx) 2483 2491 { 2484 - struct falcon_nic_data *data; 2485 2492 efx_oword_t temp; 2486 2493 unsigned thresh; 2487 2494 int rc; 2488 - 2489 - data = (struct falcon_nic_data *)efx->nic_data; 2490 2495 2491 2496 /* Set up the address region register. This is only needed 2492 2497 * for the B0 FPGA, but since we are just pushing in the ··· 2551 2562 2552 2563 /* Set number of RSS queues for receive path. */ 2553 2564 falcon_read(efx, &temp, RX_FILTER_CTL_REG); 2554 - if (FALCON_REV(efx) >= FALCON_REV_B0) 2565 + if (falcon_rev(efx) >= FALCON_REV_B0) 2555 2566 EFX_SET_OWORD_FIELD(temp, NUM_KER, 0); 2556 2567 else 2557 2568 EFX_SET_OWORD_FIELD(temp, NUM_KER, efx->rss_queues - 1); ··· 2589 2600 /* Prefetch threshold 2 => fetch when descriptor cache half empty */ 2590 2601 EFX_SET_OWORD_FIELD(temp, TX_PREF_THRESHOLD, 2); 2591 2602 /* Squash TX of packets of 16 bytes or less */ 2592 - if (FALCON_REV(efx) >= FALCON_REV_B0 && EFX_WORKAROUND_9141(efx)) 2603 + if (falcon_rev(efx) >= FALCON_REV_B0 && EFX_WORKAROUND_9141(efx)) 2593 2604 EFX_SET_OWORD_FIELD(temp, TX_FLUSH_MIN_LEN_EN_B0, 1); 2594 2605 falcon_write(efx, &temp, TX_CFG2_REG_KER); 2595 2606 ··· 2606 2617 if (EFX_WORKAROUND_7575(efx)) 2607 2618 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_USR_BUF_SIZE, 2608 2619 (3 * 4096) / 32); 2609 - if (FALCON_REV(efx) >= FALCON_REV_B0) 2620 + if (falcon_rev(efx) >= FALCON_REV_B0) 2610 2621 EFX_SET_OWORD_FIELD(temp, RX_INGR_EN_B0, 1); 2611 2622 2612 2623 /* RX FIFO flow control thresholds */ ··· 2622 2633 falcon_write(efx, &temp, RX_CFG_REG_KER); 2623 2634 2624 2635 /* Set destination of both TX and RX Flush events */ 2625 - if (FALCON_REV(efx) >= FALCON_REV_B0) { 2636 + if (falcon_rev(efx) >= FALCON_REV_B0) { 2626 2637 EFX_POPULATE_OWORD_1(temp, FLS_EVQ_ID, 0); 2627 2638 falcon_write(efx, &temp, DP_CTRL_REG); 2628 2639 } ··· 2636 2647 2637 2648 falcon_free_buffer(efx, &efx->irq_status); 2638 2649 2639 - (void) falcon_reset_hw(efx, RESET_TYPE_ALL); 2650 + falcon_reset_hw(efx, RESET_TYPE_ALL); 2640 2651 2641 2652 /* Release the second function after the reset */ 2642 2653 if (nic_data->pci_dev2) {
+4 -1
drivers/net/sfc/falcon.h
··· 23 23 FALCON_REV_B0 = 2, 24 24 }; 25 25 26 - #define FALCON_REV(efx) ((efx)->pci_dev->revision) 26 + static inline int falcon_rev(struct efx_nic *efx) 27 + { 28 + return efx->pci_dev->revision; 29 + } 27 30 28 31 extern struct efx_nic_type falcon_a_nic_type; 29 32 extern struct efx_nic_type falcon_b_nic_type;
+2 -2
drivers/net/sfc/falcon_hwdefs.h
··· 1125 1125 u8 port1_phy_type; 1126 1126 __le16 asic_sub_revision; 1127 1127 __le16 board_revision; 1128 - } __attribute__ ((packed)); 1128 + } __packed; 1129 1129 1130 1130 #define NVCONFIG_BASE 0x300 1131 1131 #define NVCONFIG_BOARD_MAGIC_NUM 0xFA1C ··· 1144 1144 __le16 board_struct_ver; 1145 1145 __le16 board_checksum; 1146 1146 struct falcon_nvconfig_board_v2 board_v2; 1147 - } __attribute__ ((packed)); 1147 + } __packed; 1148 1148 1149 1149 #endif /* EFX_FALCON_HWDEFS_H */
+21 -8
drivers/net/sfc/falcon_io.h
··· 56 56 #define FALCON_USE_QWORD_IO 1 57 57 #endif 58 58 59 - #define _falcon_writeq(efx, value, reg) \ 60 - __raw_writeq((__force u64) (value), (efx)->membase + (reg)) 61 - #define _falcon_writel(efx, value, reg) \ 62 - __raw_writel((__force u32) (value), (efx)->membase + (reg)) 63 - #define _falcon_readq(efx, reg) \ 64 - ((__force __le64) __raw_readq((efx)->membase + (reg))) 65 - #define _falcon_readl(efx, reg) \ 66 - ((__force __le32) __raw_readl((efx)->membase + (reg))) 59 + #ifdef FALCON_USE_QWORD_IO 60 + static inline void _falcon_writeq(struct efx_nic *efx, __le64 value, 61 + unsigned int reg) 62 + { 63 + __raw_writeq((__force u64)value, efx->membase + reg); 64 + } 65 + static inline __le64 _falcon_readq(struct efx_nic *efx, unsigned int reg) 66 + { 67 + return (__force __le64)__raw_readq(efx->membase + reg); 68 + } 69 + #endif 70 + 71 + static inline void _falcon_writel(struct efx_nic *efx, __le32 value, 72 + unsigned int reg) 73 + { 74 + __raw_writel((__force u32)value, efx->membase + reg); 75 + } 76 + static inline __le32 _falcon_readl(struct efx_nic *efx, unsigned int reg) 77 + { 78 + return (__force __le32)__raw_readl(efx->membase + reg); 79 + } 67 80 68 81 /* Writes to a normal 16-byte Falcon register, locking as appropriate. */ 69 82 static inline void falcon_write(struct efx_nic *efx, efx_oword_t *value,
+5 -5
drivers/net/sfc/falcon_xmac.c
··· 221 221 { 222 222 efx_dword_t reg; 223 223 224 - if (FALCON_REV(efx) < FALCON_REV_B0) 224 + if (falcon_rev(efx) < FALCON_REV_B0) 225 225 return 1; 226 226 227 227 /* The ISR latches, so clear it and re-read */ ··· 241 241 { 242 242 efx_dword_t reg; 243 243 244 - if ((FALCON_REV(efx) < FALCON_REV_B0) || LOOPBACK_INTERNAL(efx)) 244 + if ((falcon_rev(efx) < FALCON_REV_B0) || LOOPBACK_INTERNAL(efx)) 245 245 return; 246 246 247 247 /* Flush the ISR */ ··· 454 454 455 455 EFX_LOG(efx, "%s Clobbering XAUI (%d tries left).\n", 456 456 __func__, tries); 457 - (void) falcon_reset_xaui(efx); 457 + falcon_reset_xaui(efx); 458 458 udelay(200); 459 459 tries--; 460 460 } ··· 572 572 xaui_link_ok = falcon_xaui_link_ok(efx); 573 573 574 574 if (EFX_WORKAROUND_5147(efx) && !xaui_link_ok) 575 - (void) falcon_reset_xaui(efx); 575 + falcon_reset_xaui(efx); 576 576 577 577 /* Call the PHY check_hw routine */ 578 578 rc = efx->phy_op->check_hw(efx); ··· 639 639 reset = ((flow_control & EFX_FC_TX) && 640 640 !(efx->flow_control & EFX_FC_TX)); 641 641 if (EFX_WORKAROUND_11482(efx) && reset) { 642 - if (FALCON_REV(efx) >= FALCON_REV_B0) { 642 + if (falcon_rev(efx) >= FALCON_REV_B0) { 643 643 /* Recover by resetting the EM block */ 644 644 if (efx->link_up) 645 645 falcon_drain_tx_fifo(efx);
+22 -22
drivers/net/sfc/net_driver.h
··· 42 42 #ifndef EFX_DRIVER_NAME 43 43 #define EFX_DRIVER_NAME "sfc" 44 44 #endif 45 - #define EFX_DRIVER_VERSION "2.2.0136" 45 + #define EFX_DRIVER_VERSION "2.2" 46 46 47 47 #ifdef EFX_ENABLE_DEBUG 48 48 #define EFX_BUG_ON_PARANOID(x) BUG_ON(x) ··· 52 52 #define EFX_WARN_ON_PARANOID(x) do {} while (0) 53 53 #endif 54 54 55 - #define NET_DEV_REGISTERED(efx) \ 56 - ((efx)->net_dev->reg_state == NETREG_REGISTERED) 57 - 58 - /* Include net device name in log messages if it has been registered. 59 - * Use efx->name not efx->net_dev->name so that races with (un)registration 60 - * are harmless. 61 - */ 62 - #define NET_DEV_NAME(efx) (NET_DEV_REGISTERED(efx) ? (efx)->name : "") 63 - 64 55 /* Un-rate-limited logging */ 65 56 #define EFX_ERR(efx, fmt, args...) \ 66 - dev_err(&((efx)->pci_dev->dev), "ERR: %s " fmt, NET_DEV_NAME(efx), ##args) 57 + dev_err(&((efx)->pci_dev->dev), "ERR: %s " fmt, efx_dev_name(efx), ##args) 67 58 68 59 #define EFX_INFO(efx, fmt, args...) \ 69 - dev_info(&((efx)->pci_dev->dev), "INFO: %s " fmt, NET_DEV_NAME(efx), ##args) 60 + dev_info(&((efx)->pci_dev->dev), "INFO: %s " fmt, efx_dev_name(efx), ##args) 70 61 71 62 #ifdef EFX_ENABLE_DEBUG 72 63 #define EFX_LOG(efx, fmt, args...) \ 73 - dev_info(&((efx)->pci_dev->dev), "DBG: %s " fmt, NET_DEV_NAME(efx), ##args) 64 + dev_info(&((efx)->pci_dev->dev), "DBG: %s " fmt, efx_dev_name(efx), ##args) 74 65 #else 75 66 #define EFX_LOG(efx, fmt, args...) \ 76 - dev_dbg(&((efx)->pci_dev->dev), "DBG: %s " fmt, NET_DEV_NAME(efx), ##args) 67 + dev_dbg(&((efx)->pci_dev->dev), "DBG: %s " fmt, efx_dev_name(efx), ##args) 77 68 #endif 78 69 79 70 #define EFX_TRACE(efx, fmt, args...) do {} while (0) ··· 80 89 81 90 #define EFX_LOG_RL(efx, fmt, args...) \ 82 91 do {if (net_ratelimit()) EFX_LOG(efx, fmt, ##args); } while (0) 83 - 84 - /* Kernel headers may redefine inline anyway */ 85 - #ifndef inline 86 - #define inline inline __attribute__ ((always_inline)) 87 - #endif 88 92 89 93 /************************************************************************** 90 94 * ··· 681 695 struct workqueue_struct *workqueue; 682 696 struct work_struct reset_work; 683 697 struct delayed_work monitor_work; 684 - unsigned long membase_phys; 698 + resource_size_t membase_phys; 685 699 void __iomem *membase; 686 700 spinlock_t biu_lock; 687 701 enum efx_int_mode interrupt_mode; ··· 705 719 706 720 unsigned n_rx_nodesc_drop_cnt; 707 721 708 - void *nic_data; 722 + struct falcon_nic_data *nic_data; 709 723 710 724 struct mutex mac_lock; 711 725 int port_enabled; ··· 746 760 void *loopback_selftest; 747 761 }; 748 762 763 + static inline int efx_dev_registered(struct efx_nic *efx) 764 + { 765 + return efx->net_dev->reg_state == NETREG_REGISTERED; 766 + } 767 + 768 + /* Net device name, for inclusion in log messages if it has been registered. 769 + * Use efx->name not efx->net_dev->name so that races with (un)registration 770 + * are harmless. 771 + */ 772 + static inline const char *efx_dev_name(struct efx_nic *efx) 773 + { 774 + return efx_dev_registered(efx) ? efx->name : ""; 775 + } 776 + 749 777 /** 750 778 * struct efx_nic_type - Efx device type definition 751 779 * @mem_bar: Memory BAR number ··· 795 795 unsigned int txd_ring_mask; 796 796 unsigned int rxd_ring_mask; 797 797 unsigned int evq_size; 798 - dma_addr_t max_dma_mask; 798 + u64 max_dma_mask; 799 799 unsigned int tx_dma_mask; 800 800 unsigned bug5391_mask; 801 801
+27 -21
drivers/net/sfc/rx.c
··· 86 86 */ 87 87 #define EFX_RXD_HEAD_ROOM 2 88 88 89 - /* Macros for zero-order pages (potentially) containing multiple RX buffers */ 90 - #define RX_DATA_OFFSET(_data) \ 91 - (((unsigned long) (_data)) & (PAGE_SIZE-1)) 92 - #define RX_BUF_OFFSET(_rx_buf) \ 93 - RX_DATA_OFFSET((_rx_buf)->data) 94 - 95 - #define RX_PAGE_SIZE(_efx) \ 96 - (PAGE_SIZE * (1u << (_efx)->rx_buffer_order)) 89 + static inline unsigned int efx_rx_buf_offset(struct efx_rx_buffer *buf) 90 + { 91 + /* Offset is always within one page, so we don't need to consider 92 + * the page order. 93 + */ 94 + return (__force unsigned long) buf->data & (PAGE_SIZE - 1); 95 + } 96 + static inline unsigned int efx_rx_buf_size(struct efx_nic *efx) 97 + { 98 + return PAGE_SIZE << efx->rx_buffer_order; 99 + } 97 100 98 101 99 102 /************************************************************************** ··· 109 106 static int efx_lro_get_skb_hdr(struct sk_buff *skb, void **ip_hdr, 110 107 void **tcpudp_hdr, u64 *hdr_flags, void *priv) 111 108 { 112 - struct efx_channel *channel = (struct efx_channel *)priv; 109 + struct efx_channel *channel = priv; 113 110 struct iphdr *iph; 114 111 struct tcphdr *th; 115 112 ··· 134 131 void **ip_hdr, void **tcpudp_hdr, u64 *hdr_flags, 135 132 void *priv) 136 133 { 137 - struct efx_channel *channel = (struct efx_channel *)priv; 134 + struct efx_channel *channel = priv; 138 135 struct ethhdr *eh; 139 136 struct iphdr *iph; 140 137 141 138 /* We support EtherII and VLAN encapsulated IPv4 */ 142 - eh = (struct ethhdr *)(page_address(frag->page) + frag->page_offset); 139 + eh = page_address(frag->page) + frag->page_offset; 143 140 *mac_hdr = eh; 144 141 145 142 if (eh->h_proto == htons(ETH_P_IP)) { ··· 272 269 return -ENOMEM; 273 270 274 271 dma_addr = pci_map_page(efx->pci_dev, rx_buf->page, 275 - 0, RX_PAGE_SIZE(efx), 272 + 0, efx_rx_buf_size(efx), 276 273 PCI_DMA_FROMDEVICE); 277 274 278 275 if (unlikely(pci_dma_mapping_error(dma_addr))) { ··· 283 280 284 281 rx_queue->buf_page = rx_buf->page; 285 282 rx_queue->buf_dma_addr = dma_addr; 286 - rx_queue->buf_data = ((char *) page_address(rx_buf->page) + 283 + rx_queue->buf_data = (page_address(rx_buf->page) + 287 284 EFX_PAGE_IP_ALIGN); 288 285 } 289 286 290 - offset = RX_DATA_OFFSET(rx_queue->buf_data); 291 287 rx_buf->len = bytes; 292 - rx_buf->dma_addr = rx_queue->buf_dma_addr + offset; 293 288 rx_buf->data = rx_queue->buf_data; 289 + offset = efx_rx_buf_offset(rx_buf); 290 + rx_buf->dma_addr = rx_queue->buf_dma_addr + offset; 294 291 295 292 /* Try to pack multiple buffers per page */ 296 293 if (efx->rx_buffer_order == 0) { ··· 298 295 rx_queue->buf_data += ((bytes + 0x1ff) & ~0x1ff); 299 296 offset += ((bytes + 0x1ff) & ~0x1ff); 300 297 301 - space = RX_PAGE_SIZE(efx) - offset; 298 + space = efx_rx_buf_size(efx) - offset; 302 299 if (space >= bytes) { 303 300 /* Refs dropped on kernel releasing each skb */ 304 301 get_page(rx_queue->buf_page); ··· 347 344 EFX_BUG_ON_PARANOID(rx_buf->skb); 348 345 if (rx_buf->unmap_addr) { 349 346 pci_unmap_page(efx->pci_dev, rx_buf->unmap_addr, 350 - RX_PAGE_SIZE(efx), PCI_DMA_FROMDEVICE); 347 + efx_rx_buf_size(efx), 348 + PCI_DMA_FROMDEVICE); 351 349 rx_buf->unmap_addr = 0; 352 350 } 353 351 } else if (likely(rx_buf->skb)) { ··· 404 400 return 0; 405 401 406 402 /* Record minimum fill level */ 407 - if (unlikely(fill_level < rx_queue->min_fill)) 403 + if (unlikely(fill_level < rx_queue->min_fill)) { 408 404 if (fill_level) 409 405 rx_queue->min_fill = fill_level; 406 + } 410 407 411 408 /* Acquire RX add lock. If this lock is contended, then a fast 412 409 * fill must already be in progress (e.g. in the refill ··· 557 552 struct skb_frag_struct frags; 558 553 559 554 frags.page = rx_buf->page; 560 - frags.page_offset = RX_BUF_OFFSET(rx_buf); 555 + frags.page_offset = efx_rx_buf_offset(rx_buf); 561 556 frags.size = rx_buf->len; 562 557 563 558 lro_receive_frags(lro_mgr, &frags, rx_buf->len, ··· 602 597 if (unlikely(rx_buf->len > hdr_len)) { 603 598 struct skb_frag_struct *frag = skb_shinfo(skb)->frags; 604 599 frag->page = rx_buf->page; 605 - frag->page_offset = RX_BUF_OFFSET(rx_buf) + hdr_len; 600 + frag->page_offset = efx_rx_buf_offset(rx_buf) + hdr_len; 606 601 frag->size = skb->len - hdr_len; 607 602 skb_shinfo(skb)->nr_frags = 1; 608 603 skb->data_len = frag->size; ··· 856 851 /* For a page that is part-way through splitting into RX buffers */ 857 852 if (rx_queue->buf_page != NULL) { 858 853 pci_unmap_page(rx_queue->efx->pci_dev, rx_queue->buf_dma_addr, 859 - RX_PAGE_SIZE(rx_queue->efx), PCI_DMA_FROMDEVICE); 854 + efx_rx_buf_size(rx_queue->efx), 855 + PCI_DMA_FROMDEVICE); 860 856 __free_pages(rx_queue->buf_page, 861 857 rx_queue->efx->rx_buffer_order); 862 858 rx_queue->buf_page = NULL;
+8 -6
drivers/net/sfc/selftest.c
··· 290 290 291 291 payload = &state->payload; 292 292 293 - received = (struct efx_loopback_payload *)(char *) buf_ptr; 293 + received = (struct efx_loopback_payload *) buf_ptr; 294 294 received->ip.saddr = payload->ip.saddr; 295 295 received->ip.check = payload->ip.check; 296 296 ··· 424 424 * interrupt handler. */ 425 425 smp_wmb(); 426 426 427 - if (NET_DEV_REGISTERED(efx)) 427 + if (efx_dev_registered(efx)) 428 428 netif_tx_lock_bh(efx->net_dev); 429 429 rc = efx_xmit(efx, tx_queue, skb); 430 - if (NET_DEV_REGISTERED(efx)) 430 + if (efx_dev_registered(efx)) 431 431 netif_tx_unlock_bh(efx->net_dev); 432 432 433 433 if (rc != NETDEV_TX_OK) { ··· 453 453 int tx_done = 0, rx_good, rx_bad; 454 454 int i, rc = 0; 455 455 456 - if (NET_DEV_REGISTERED(efx)) 456 + if (efx_dev_registered(efx)) 457 457 netif_tx_lock_bh(efx->net_dev); 458 458 459 459 /* Count the number of tx completions, and decrement the refcnt. Any ··· 465 465 dev_kfree_skb_any(skb); 466 466 } 467 467 468 - if (NET_DEV_REGISTERED(efx)) 468 + if (efx_dev_registered(efx)) 469 469 netif_tx_unlock_bh(efx->net_dev); 470 470 471 471 /* Check TX completion and received packet counts */ ··· 517 517 state->packet_count = min(1 << (i << 2), state->packet_count); 518 518 state->skbs = kzalloc(sizeof(state->skbs[0]) * 519 519 state->packet_count, GFP_KERNEL); 520 + if (!state->skbs) 521 + return -ENOMEM; 520 522 state->flush = 0; 521 523 522 524 EFX_LOG(efx, "TX queue %d testing %s loopback with %d " ··· 702 700 * "flushing" so all inflight packets are dropped */ 703 701 BUG_ON(efx->loopback_selftest); 704 702 state->flush = 1; 705 - efx->loopback_selftest = (void *)state; 703 + efx->loopback_selftest = state; 706 704 707 705 rc = efx_test_loopbacks(efx, tests, loopback_modes); 708 706
+7 -7
drivers/net/sfc/sfe4001.c
··· 116 116 117 117 /* Turn off all power rails */ 118 118 out = 0xff; 119 - (void) efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1); 119 + efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1); 120 120 121 121 /* Disable port 1 outputs on IO expander */ 122 122 cfg = 0xff; 123 - (void) efx_i2c_write(i2c, PCA9539, P1_CONFIG, &cfg, 1); 123 + efx_i2c_write(i2c, PCA9539, P1_CONFIG, &cfg, 1); 124 124 125 125 /* Disable port 0 outputs on IO expander */ 126 126 cfg = 0xff; 127 - (void) efx_i2c_write(i2c, PCA9539, P0_CONFIG, &cfg, 1); 127 + efx_i2c_write(i2c, PCA9539, P0_CONFIG, &cfg, 1); 128 128 129 129 /* Clear any over-temperature alert */ 130 - (void) efx_i2c_read(i2c, MAX6647, RSL, &in, 1); 130 + efx_i2c_read(i2c, MAX6647, RSL, &in, 1); 131 131 } 132 132 133 133 /* The P0_EN_3V3X line on SFE4001 boards (from A2 onward) is connected ··· 253 253 fail3: 254 254 /* Turn off all power rails */ 255 255 out = 0xff; 256 - (void) efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1); 256 + efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1); 257 257 /* Disable port 1 outputs on IO expander */ 258 258 out = 0xff; 259 - (void) efx_i2c_write(i2c, PCA9539, P1_CONFIG, &out, 1); 259 + efx_i2c_write(i2c, PCA9539, P1_CONFIG, &out, 1); 260 260 fail2: 261 261 /* Disable port 0 outputs on IO expander */ 262 262 out = 0xff; 263 - (void) efx_i2c_write(i2c, PCA9539, P0_CONFIG, &out, 1); 263 + efx_i2c_write(i2c, PCA9539, P0_CONFIG, &out, 1); 264 264 fail1: 265 265 return rc; 266 266 }
+3 -1
drivers/net/sfc/tenxpress.c
··· 211 211 int rc = 0; 212 212 213 213 phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL); 214 + if (!phy_data) 215 + return -ENOMEM; 214 216 efx->phy_data = phy_data; 215 217 216 218 tenxpress_set_state(efx, TENXPRESS_STATUS_NORMAL); ··· 378 376 * perform a special software reset */ 379 377 if ((phy_data->tx_disabled && !efx->tx_disabled) || 380 378 loop_change) { 381 - (void) tenxpress_special_reset(efx); 379 + tenxpress_special_reset(efx); 382 380 falcon_reset_xaui(efx); 383 381 } 384 382
+7 -4
drivers/net/sfc/tx.c
··· 387 387 if (unlikely(tx_queue->stopped)) { 388 388 fill_level = tx_queue->insert_count - tx_queue->read_count; 389 389 if (fill_level < EFX_NETDEV_TX_THRESHOLD(tx_queue)) { 390 - EFX_BUG_ON_PARANOID(!NET_DEV_REGISTERED(efx)); 390 + EFX_BUG_ON_PARANOID(!efx_dev_registered(efx)); 391 391 392 392 /* Do this under netif_tx_lock(), to avoid racing 393 393 * with efx_xmit(). */ ··· 639 639 base_dma = tsoh->dma_addr & PAGE_MASK; 640 640 641 641 p = &tx_queue->tso_headers_free; 642 - while (*p != NULL) 642 + while (*p != NULL) { 643 643 if (((unsigned long)*p & PAGE_MASK) == base_kva) 644 644 *p = (*p)->next; 645 645 else 646 646 p = &(*p)->next; 647 + } 647 648 648 649 pci_free_consistent(pci_dev, PAGE_SIZE, (void *)base_kva, base_dma); 649 650 } ··· 940 939 941 940 /* Allocate a DMA-mapped header buffer. */ 942 941 if (likely(TSOH_SIZE(st->p.header_length) <= TSOH_STD_SIZE)) { 943 - if (tx_queue->tso_headers_free == NULL) 942 + if (tx_queue->tso_headers_free == NULL) { 944 943 if (efx_tsoh_block_alloc(tx_queue)) 945 944 return -1; 945 + } 946 946 EFX_BUG_ON_PARANOID(!tx_queue->tso_headers_free); 947 947 tsoh = tx_queue->tso_headers_free; 948 948 tx_queue->tso_headers_free = tsoh->next; ··· 1108 1106 { 1109 1107 unsigned i; 1110 1108 1111 - if (tx_queue->buffer) 1109 + if (tx_queue->buffer) { 1112 1110 for (i = 0; i <= tx_queue->efx->type->txd_ring_mask; ++i) 1113 1111 efx_tsoh_free(tx_queue, &tx_queue->buffer[i]); 1112 + } 1114 1113 1115 1114 while (tx_queue->tso_headers_free != NULL) 1116 1115 efx_tsoh_block_free(tx_queue, tx_queue->tso_headers_free,
+1 -1
drivers/net/sfc/workarounds.h
··· 16 16 */ 17 17 18 18 #define EFX_WORKAROUND_ALWAYS(efx) 1 19 - #define EFX_WORKAROUND_FALCON_A(efx) (FALCON_REV(efx) <= FALCON_REV_A1) 19 + #define EFX_WORKAROUND_FALCON_A(efx) (falcon_rev(efx) <= FALCON_REV_A1) 20 20 21 21 /* XAUI resets if link not detected */ 22 22 #define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS
+3 -1
drivers/net/sfc/xfp_phy.c
··· 85 85 int rc; 86 86 87 87 phy_data = kzalloc(sizeof(struct xfp_phy_data), GFP_KERNEL); 88 - efx->phy_data = (void *) phy_data; 88 + if (!phy_data) 89 + return -ENOMEM; 90 + efx->phy_data = phy_data; 89 91 90 92 EFX_INFO(efx, "XFP: PHY ID reg %x (OUI %x model %x revision" 91 93 " %x)\n", devid, MDIO_ID_OUI(devid), MDIO_ID_MODEL(devid),
+19 -10
drivers/net/sky2.c
··· 1159 1159 } 1160 1160 1161 1161 #ifdef SKY2_VLAN_TAG_USED 1162 - static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) 1162 + static void sky2_set_vlan_mode(struct sky2_hw *hw, u16 port, bool onoff) 1163 1163 { 1164 - struct sky2_port *sky2 = netdev_priv(dev); 1165 - struct sky2_hw *hw = sky2->hw; 1166 - u16 port = sky2->port; 1167 - 1168 - netif_tx_lock_bh(dev); 1169 - napi_disable(&hw->napi); 1170 - 1171 - sky2->vlgrp = grp; 1172 - if (grp) { 1164 + if (onoff) { 1173 1165 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), 1174 1166 RX_VLAN_STRIP_ON); 1175 1167 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), ··· 1172 1180 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), 1173 1181 TX_VLAN_TAG_OFF); 1174 1182 } 1183 + } 1184 + 1185 + static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) 1186 + { 1187 + struct sky2_port *sky2 = netdev_priv(dev); 1188 + struct sky2_hw *hw = sky2->hw; 1189 + u16 port = sky2->port; 1190 + 1191 + netif_tx_lock_bh(dev); 1192 + napi_disable(&hw->napi); 1193 + 1194 + sky2->vlgrp = grp; 1195 + sky2_set_vlan_mode(hw, port, grp != NULL); 1175 1196 1176 1197 sky2_read32(hw, B0_Y2_SP_LISR); 1177 1198 napi_enable(&hw->napi); ··· 1422 1417 1423 1418 sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map, 1424 1419 TX_RING_SIZE - 1); 1420 + 1421 + #ifdef SKY2_VLAN_TAG_USED 1422 + sky2_set_vlan_mode(hw, port, sky2->vlgrp != NULL); 1423 + #endif 1425 1424 1426 1425 err = sky2_rx_start(sky2); 1427 1426 if (err)
+1 -1
drivers/net/tokenring/3c359.h
··· 264 264 u16 asb; 265 265 266 266 u8 __iomem *xl_mmio; 267 - char *xl_card_name; 267 + const char *xl_card_name; 268 268 struct pci_dev *pdev ; 269 269 270 270 spinlock_t xl_lock ;
+1 -1
drivers/net/tokenring/olympic.h
··· 254 254 u8 __iomem *olympic_mmio; 255 255 u8 __iomem *olympic_lap; 256 256 struct pci_dev *pdev ; 257 - char *olympic_card_name ; 257 + const char *olympic_card_name; 258 258 259 259 spinlock_t olympic_lock ; 260 260
+15 -1
drivers/net/tulip/uli526x.c
··· 225 225 static const struct ethtool_ops netdev_ethtool_ops; 226 226 static u16 read_srom_word(long, int); 227 227 static irqreturn_t uli526x_interrupt(int, void *); 228 + #ifdef CONFIG_NET_POLL_CONTROLLER 229 + static void uli526x_poll(struct net_device *dev); 230 + #endif 228 231 static void uli526x_descriptor_init(struct uli526x_board_info *, unsigned long); 229 232 static void allocate_rx_buffer(struct uli526x_board_info *); 230 233 static void update_cr6(u32, unsigned long); ··· 342 339 dev->get_stats = &uli526x_get_stats; 343 340 dev->set_multicast_list = &uli526x_set_filter_mode; 344 341 dev->ethtool_ops = &netdev_ethtool_ops; 342 + #ifdef CONFIG_NET_POLL_CONTROLLER 343 + dev->poll_controller = &uli526x_poll; 344 + #endif 345 345 spin_lock_init(&db->lock); 346 346 347 347 ··· 687 681 db->cr5_data = inl(ioaddr + DCR5); 688 682 outl(db->cr5_data, ioaddr + DCR5); 689 683 if ( !(db->cr5_data & 0x180c1) ) { 690 - spin_unlock_irqrestore(&db->lock, flags); 684 + /* Restore CR7 to enable interrupt mask */ 691 685 outl(db->cr7_data, ioaddr + DCR7); 686 + spin_unlock_irqrestore(&db->lock, flags); 692 687 return IRQ_HANDLED; 693 688 } 694 689 ··· 722 715 return IRQ_HANDLED; 723 716 } 724 717 718 + #ifdef CONFIG_NET_POLL_CONTROLLER 719 + static void uli526x_poll(struct net_device *dev) 720 + { 721 + /* ISR grabs the irqsave lock, so this should be safe */ 722 + uli526x_interrupt(dev->irq, dev); 723 + } 724 + #endif 725 725 726 726 /* 727 727 * Free TX resource after TX complete
+5 -4
drivers/net/ucc_geth.c
··· 237 237 skb->dev = ugeth->dev; 238 238 239 239 out_be32(&((struct qe_bd __iomem *)bd)->buf, 240 - dma_map_single(NULL, 240 + dma_map_single(&ugeth->dev->dev, 241 241 skb->data, 242 242 ugeth->ug_info->uf_info.max_rx_buf_length + 243 243 UCC_GETH_RX_DATA_BUF_ALIGNMENT, ··· 2158 2158 continue; 2159 2159 for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) { 2160 2160 if (ugeth->tx_skbuff[i][j]) { 2161 - dma_unmap_single(NULL, 2161 + dma_unmap_single(&ugeth->dev->dev, 2162 2162 in_be32(&((struct qe_bd __iomem *)bd)->buf), 2163 2163 (in_be32((u32 __iomem *)bd) & 2164 2164 BD_LENGTH_MASK), ··· 2186 2186 bd = ugeth->p_rx_bd_ring[i]; 2187 2187 for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) { 2188 2188 if (ugeth->rx_skbuff[i][j]) { 2189 - dma_unmap_single(NULL, 2189 + dma_unmap_single(&ugeth->dev->dev, 2190 2190 in_be32(&((struct qe_bd __iomem *)bd)->buf), 2191 2191 ugeth->ug_info-> 2192 2192 uf_info.max_rx_buf_length + ··· 3406 3406 3407 3407 /* set up the buffer descriptor */ 3408 3408 out_be32(&((struct qe_bd __iomem *)bd)->buf, 3409 - dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE)); 3409 + dma_map_single(&ugeth->dev->dev, skb->data, 3410 + skb->len, DMA_TO_DEVICE)); 3410 3411 3411 3412 /* printk(KERN_DEBUG"skb->data is 0x%x\n",skb->data); */ 3412 3413
+4
drivers/net/usb/asix.c
··· 1440 1440 // Belkin F5D5055 1441 1441 USB_DEVICE(0x050d, 0x5055), 1442 1442 .driver_info = (unsigned long) &ax88178_info, 1443 + }, { 1444 + // Apple USB Ethernet Adapter 1445 + USB_DEVICE(0x05ac, 0x1402), 1446 + .driver_info = (unsigned long) &ax88772_info, 1443 1447 }, 1444 1448 { }, // END 1445 1449 };
+1 -1
drivers/net/usb/rndis_host.c
··· 194 194 dev_dbg(&info->control->dev, 195 195 "rndis response error, code %d\n", retval); 196 196 } 197 - msleep(2); 197 + msleep(20); 198 198 } 199 199 dev_dbg(&info->control->dev, "rndis response timeout\n"); 200 200 return -ETIMEDOUT;
+1 -2
drivers/net/virtio_net.c
··· 470 470 kfree_skb(skb); 471 471 vi->num--; 472 472 } 473 - while ((skb = __skb_dequeue(&vi->send)) != NULL) 474 - kfree_skb(skb); 473 + __skb_queue_purge(&vi->send); 475 474 476 475 BUG_ON(vi->num != 0); 477 476
+11 -8
drivers/net/wan/hdlc.c
··· 43 43 44 44 #undef DEBUG_LINK 45 45 46 - static struct hdlc_proto *first_proto = NULL; 47 - 46 + static struct hdlc_proto *first_proto; 48 47 49 48 static int hdlc_change_mtu(struct net_device *dev, int new_mtu) 50 49 { ··· 313 314 314 315 void register_hdlc_protocol(struct hdlc_proto *proto) 315 316 { 317 + rtnl_lock(); 316 318 proto->next = first_proto; 317 319 first_proto = proto; 320 + rtnl_unlock(); 318 321 } 319 322 320 323 321 324 void unregister_hdlc_protocol(struct hdlc_proto *proto) 322 325 { 323 - struct hdlc_proto **p = &first_proto; 324 - while (*p) { 325 - if (*p == proto) { 326 - *p = proto->next; 327 - return; 328 - } 326 + struct hdlc_proto **p; 327 + 328 + rtnl_lock(); 329 + p = &first_proto; 330 + while (*p != proto) { 331 + BUG_ON(!*p); 329 332 p = &((*p)->next); 330 333 } 334 + *p = proto->next; 335 + rtnl_unlock(); 331 336 } 332 337 333 338
+48 -32
drivers/net/wan/hdlc_cisco.c
··· 56 56 cisco_proto settings; 57 57 58 58 struct timer_list timer; 59 + spinlock_t lock; 59 60 unsigned long last_poll; 60 61 int up; 61 62 int request_sent; ··· 159 158 { 160 159 struct net_device *dev = skb->dev; 161 160 hdlc_device *hdlc = dev_to_hdlc(dev); 161 + struct cisco_state *st = state(hdlc); 162 162 struct hdlc_header *data = (struct hdlc_header*)skb->data; 163 163 struct cisco_packet *cisco_data; 164 164 struct in_device *in_dev; ··· 222 220 goto rx_error; 223 221 224 222 case CISCO_KEEPALIVE_REQ: 225 - state(hdlc)->rxseq = ntohl(cisco_data->par1); 226 - if (state(hdlc)->request_sent && 227 - ntohl(cisco_data->par2) == state(hdlc)->txseq) { 228 - state(hdlc)->last_poll = jiffies; 229 - if (!state(hdlc)->up) { 223 + spin_lock(&st->lock); 224 + st->rxseq = ntohl(cisco_data->par1); 225 + if (st->request_sent && 226 + ntohl(cisco_data->par2) == st->txseq) { 227 + st->last_poll = jiffies; 228 + if (!st->up) { 230 229 u32 sec, min, hrs, days; 231 230 sec = ntohl(cisco_data->time) / 1000; 232 231 min = sec / 60; sec -= min * 60; ··· 235 232 days = hrs / 24; hrs -= days * 24; 236 233 printk(KERN_INFO "%s: Link up (peer " 237 234 "uptime %ud%uh%um%us)\n", 238 - dev->name, days, hrs, 239 - min, sec); 235 + dev->name, days, hrs, min, sec); 240 236 netif_dormant_off(dev); 241 - state(hdlc)->up = 1; 237 + st->up = 1; 242 238 } 243 239 } 240 + spin_unlock(&st->lock); 244 241 245 242 dev_kfree_skb_any(skb); 246 243 return NET_RX_SUCCESS; ··· 264 261 { 265 262 struct net_device *dev = (struct net_device *)arg; 266 263 hdlc_device *hdlc = dev_to_hdlc(dev); 264 + struct cisco_state *st = state(hdlc); 267 265 268 - if (state(hdlc)->up && 269 - time_after(jiffies, state(hdlc)->last_poll + 270 - state(hdlc)->settings.timeout * HZ)) { 271 - state(hdlc)->up = 0; 266 + spin_lock(&st->lock); 267 + if (st->up && 268 + time_after(jiffies, st->last_poll + st->settings.timeout * HZ)) { 269 + st->up = 0; 272 270 printk(KERN_INFO "%s: Link down\n", dev->name); 273 271 netif_dormant_on(dev); 274 272 } 275 273 276 - cisco_keepalive_send(dev, CISCO_KEEPALIVE_REQ, 277 - htonl(++state(hdlc)->txseq), 278 - htonl(state(hdlc)->rxseq)); 279 - state(hdlc)->request_sent = 1; 280 - state(hdlc)->timer.expires = jiffies + 281 - state(hdlc)->settings.interval * HZ; 282 - state(hdlc)->timer.function = cisco_timer; 283 - state(hdlc)->timer.data = arg; 284 - add_timer(&state(hdlc)->timer); 274 + cisco_keepalive_send(dev, CISCO_KEEPALIVE_REQ, htonl(++st->txseq), 275 + htonl(st->rxseq)); 276 + st->request_sent = 1; 277 + spin_unlock(&st->lock); 278 + 279 + st->timer.expires = jiffies + st->settings.interval * HZ; 280 + st->timer.function = cisco_timer; 281 + st->timer.data = arg; 282 + add_timer(&st->timer); 285 283 } 286 284 287 285 ··· 290 286 static void cisco_start(struct net_device *dev) 291 287 { 292 288 hdlc_device *hdlc = dev_to_hdlc(dev); 293 - state(hdlc)->up = 0; 294 - state(hdlc)->request_sent = 0; 295 - state(hdlc)->txseq = state(hdlc)->rxseq = 0; 289 + struct cisco_state *st = state(hdlc); 290 + unsigned long flags; 296 291 297 - init_timer(&state(hdlc)->timer); 298 - state(hdlc)->timer.expires = jiffies + HZ; /*First poll after 1s*/ 299 - state(hdlc)->timer.function = cisco_timer; 300 - state(hdlc)->timer.data = (unsigned long)dev; 301 - add_timer(&state(hdlc)->timer); 292 + spin_lock_irqsave(&st->lock, flags); 293 + st->up = 0; 294 + st->request_sent = 0; 295 + st->txseq = st->rxseq = 0; 296 + spin_unlock_irqrestore(&st->lock, flags); 297 + 298 + init_timer(&st->timer); 299 + st->timer.expires = jiffies + HZ; /* First poll after 1 s */ 300 + st->timer.function = cisco_timer; 301 + st->timer.data = (unsigned long)dev; 302 + add_timer(&st->timer); 302 303 } 303 304 304 305 ··· 311 302 static void cisco_stop(struct net_device *dev) 312 303 { 313 304 hdlc_device *hdlc = dev_to_hdlc(dev); 314 - del_timer_sync(&state(hdlc)->timer); 305 + struct cisco_state *st = state(hdlc); 306 + unsigned long flags; 307 + 308 + del_timer_sync(&st->timer); 309 + 310 + spin_lock_irqsave(&st->lock, flags); 315 311 netif_dormant_on(dev); 316 - state(hdlc)->up = 0; 317 - state(hdlc)->request_sent = 0; 312 + st->up = 0; 313 + st->request_sent = 0; 314 + spin_unlock_irqrestore(&st->lock, flags); 318 315 } 319 316 320 317 ··· 382 367 return result; 383 368 384 369 memcpy(&state(hdlc)->settings, &new_settings, size); 370 + spin_lock_init(&state(hdlc)->lock); 385 371 dev->hard_start_xmit = hdlc->xmit; 386 372 dev->header_ops = &cisco_header_ops; 387 373 dev->type = ARPHRD_CISCO;
+2 -4
drivers/net/xen-netfront.c
··· 946 946 work_done++; 947 947 } 948 948 949 - while ((skb = __skb_dequeue(&errq))) 950 - kfree_skb(skb); 949 + __skb_queue_purge(&errq); 951 950 952 951 work_done -= handle_incoming_queue(dev, &rxq); 953 952 ··· 1078 1079 } 1079 1080 } 1080 1081 1081 - while ((skb = __skb_dequeue(&free_list)) != NULL) 1082 - dev_kfree_skb(skb); 1082 + __skb_queue_purge(&free_list); 1083 1083 1084 1084 spin_unlock_bh(&np->rx_lock); 1085 1085 }