···631 return status;632}633634+static int myri10ge_get_firmware_capabilities(struct myri10ge_priv *mgp)635{636 struct myri10ge_cmd cmd;637 int status;
+3-1
drivers/net/pcmcia/fmvj18x_cs.c
···391 cardtype = CONTEC;392 break;393 case MANFID_FUJITSU:394- if (link->card_id == PRODID_FUJITSU_MBH10302)00395 /* RATOC REX-5588/9822/4886's PRODID are 0004(=MBH10302),396 but these are MBH10304 based card. */ 397 cardtype = MBH10304;
···391 cardtype = CONTEC;392 break;393 case MANFID_FUJITSU:394+ if (link->conf.ConfigBase == 0x0fe0)395+ cardtype = MBH10302;396+ else if (link->card_id == PRODID_FUJITSU_MBH10302) 397 /* RATOC REX-5588/9822/4886's PRODID are 0004(=MBH10302),398 but these are MBH10304 based card. */ 399 cardtype = MBH10304;
+8-4
drivers/net/pcmcia/xirc2ps_cs.c
···1461set_multicast_list(struct net_device *dev)1462{1463 unsigned int ioaddr = dev->base_addr;014641465 SelectPage(0x42);001466 if (dev->flags & IFF_PROMISC) { /* snoop */1467- PutByte(XIRCREG42_SWC1, 0x06); /* set MPE and PME */1468 } else if (dev->mc_count > 9 || (dev->flags & IFF_ALLMULTI)) {1469- PutByte(XIRCREG42_SWC1, 0x02); /* set MPE */1470 } else if (dev->mc_count) {1471 /* the chip can filter 9 addresses perfectly */1472- PutByte(XIRCREG42_SWC1, 0x01);1473 SelectPage(0x40);1474 PutByte(XIRCREG40_CMD0, Offline);1475 set_addresses(dev);1476 SelectPage(0x40);1477 PutByte(XIRCREG40_CMD0, EnableRecv | Online);1478 } else { /* standard usage */1479- PutByte(XIRCREG42_SWC1, 0x00);1480 }1481 SelectPage(0);1482}···17251726 /* enable receiver and put the mac online */1727 if (full) {01728 SelectPage(0x40);1729 PutByte(XIRCREG40_CMD0, EnableRecv | Online);1730 }
···1461set_multicast_list(struct net_device *dev)1462{1463 unsigned int ioaddr = dev->base_addr;1464+ unsigned value;14651466 SelectPage(0x42);1467+ value = GetByte(XIRCREG42_SWC1) & 0xC0;1468+1469 if (dev->flags & IFF_PROMISC) { /* snoop */1470+ PutByte(XIRCREG42_SWC1, value | 0x06); /* set MPE and PME */1471 } else if (dev->mc_count > 9 || (dev->flags & IFF_ALLMULTI)) {1472+ PutByte(XIRCREG42_SWC1, value | 0x02); /* set MPE */1473 } else if (dev->mc_count) {1474 /* the chip can filter 9 addresses perfectly */1475+ PutByte(XIRCREG42_SWC1, value | 0x01);1476 SelectPage(0x40);1477 PutByte(XIRCREG40_CMD0, Offline);1478 set_addresses(dev);1479 SelectPage(0x40);1480 PutByte(XIRCREG40_CMD0, EnableRecv | Online);1481 } else { /* standard usage */1482+ PutByte(XIRCREG42_SWC1, value | 0x00);1483 }1484 SelectPage(0);1485}···17221723 /* enable receiver and put the mac online */1724 if (full) {1725+ set_multicast_list(dev);1726 SelectPage(0x40);1727 PutByte(XIRCREG40_CMD0, EnableRecv | Online);1728 }
+2-2
drivers/net/pcnet32.c
···325static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,326 void *ptr);327static void pcnet32_purge_tx_ring(struct net_device *dev);328-static int pcnet32_alloc_ring(struct net_device *dev, char *name);329static void pcnet32_free_ring(struct net_device *dev);330static void pcnet32_check_media(struct net_device *dev, int verbose);331···1983}19841985/* if any allocation fails, caller must also call pcnet32_free_ring */1986-static int pcnet32_alloc_ring(struct net_device *dev, char *name)1987{1988 struct pcnet32_private *lp = netdev_priv(dev);1989
···325static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,326 void *ptr);327static void pcnet32_purge_tx_ring(struct net_device *dev);328+static int pcnet32_alloc_ring(struct net_device *dev, const char *name);329static void pcnet32_free_ring(struct net_device *dev);330static void pcnet32_check_media(struct net_device *dev, int verbose);331···1983}19841985/* if any allocation fails, caller must also call pcnet32_free_ring */1986+static int pcnet32_alloc_ring(struct net_device *dev, const char *name)1987{1988 struct pcnet32_private *lp = netdev_priv(dev);1989
+1-1
drivers/net/phy/Kconfig
···5menuconfig PHYLIB6 tristate "PHY Device support and infrastructure"7 depends on !S3908- depends on NET_ETHERNET && (BROKEN || !S390)9 help10 Ethernet controllers are usually attached to PHY11 devices. This option provides infrastructure for
···5menuconfig PHYLIB6 tristate "PHY Device support and infrastructure"7 depends on !S3908+ depends on NET_ETHERNET9 help10 Ethernet controllers are usually attached to PHY11 devices. This option provides infrastructure for
+1
drivers/net/phy/phy_device.c
···207208 return 0;209}0210211/**212 * get_phy_device - reads the specified PHY device and returns its @phy_device struct
···207208 return 0;209}210+EXPORT_SYMBOL(get_phy_id);211212/**213 * get_phy_device - reads the specified PHY device and returns its @phy_device struct
···199 */200static inline void efx_channel_processed(struct efx_channel *channel)201{202- /* Write to EVQ_RPTR_REG. If a new event arrived in a race203- * with finishing processing, a new interrupt will be raised.204- */205 channel->work_pending = 0;206- smp_wmb(); /* Ensure channel updated before any new interrupt. */0207 falcon_eventq_read_ack(channel);208}209···266 napi_disable(&channel->napi_str);267268 /* Poll the channel */269- (void) efx_process_channel(channel, efx->type->evq_size);270271 /* Ack the eventq. This may cause an interrupt to be generated272 * when they are reenabled */···318 *319 *************************************************************************/320321-/* Setup per-NIC RX buffer parameters.322- * Calculate the rx buffer allocation parameters required to support323- * the current MTU, including padding for header alignment and overruns.324- */325-static void efx_calc_rx_buffer_params(struct efx_nic *efx)326-{327- unsigned int order, len;328-329- len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) +330- EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +331- efx->type->rx_buffer_padding);332-333- /* Calculate page-order */334- for (order = 0; ((1u << order) * PAGE_SIZE) < len; ++order)335- ;336-337- efx->rx_buffer_len = len;338- efx->rx_buffer_order = order;339-}340-341static int efx_probe_channel(struct efx_channel *channel)342{343 struct efx_tx_queue *tx_queue;···368 struct efx_channel *channel;369 int rc = 0;370371- efx_calc_rx_buffer_params(efx);0000000372373 /* Initialise the channels */374 efx_for_each_channel(channel, efx) {···428 netif_napi_add(channel->napi_dev, &channel->napi_str,429 efx_poll, napi_weight);430000431 channel->work_pending = 0;432 channel->enabled = 1;433- smp_wmb(); /* ensure channel updated before first interrupt */434435 napi_enable(&channel->napi_str);436···695 mutex_unlock(&efx->mac_lock);696697 /* Serialise against efx_set_multicast_list() */698- if (NET_DEV_REGISTERED(efx)) {699 netif_tx_lock_bh(efx->net_dev);700 netif_tx_unlock_bh(efx->net_dev);701 }···782 efx->membase = ioremap_nocache(efx->membase_phys,783 efx->type->mem_map_size);784 if (!efx->membase) {785- EFX_ERR(efx, "could not map memory BAR %d at %lx+%x\n",786- efx->type->mem_bar, efx->membase_phys,0787 efx->type->mem_map_size);788 rc = -ENOMEM;789 goto fail4;790 }791- EFX_LOG(efx, "memory BAR %u at %lx+%x (virtual %p)\n",792- efx->type->mem_bar, efx->membase_phys, efx->type->mem_map_size,793- efx->membase);794795 return 0;796797 fail4:798 release_mem_region(efx->membase_phys, efx->type->mem_map_size);799 fail3:800- efx->membase_phys = 0UL;801 fail2:802 pci_disable_device(efx->pci_dev);803 fail1:···816817 if (efx->membase_phys) {818 pci_release_region(efx->pci_dev, efx->type->mem_bar);819- efx->membase_phys = 0UL;820 }821822 pci_disable_device(efx->pci_dev);···1035 return;1036 if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT))1037 return;1038- if (NET_DEV_REGISTERED(efx) && !netif_running(efx->net_dev))1039 return;10401041 /* Mark the port as enabled so port reconfigurations can start, then···1065 cancel_delayed_work_sync(&efx->monitor_work);10661067 /* Ensure that all RX slow refills are complete. */1068- efx_for_each_rx_queue(rx_queue, efx) {1069 cancel_delayed_work_sync(&rx_queue->work);1070- }10711072 /* Stop scheduled port reconfigurations */1073 cancel_work_sync(&efx->reconfigure_work);···1092 falcon_disable_interrupts(efx);1093 if (efx->legacy_irq)1094 synchronize_irq(efx->legacy_irq);1095- efx_for_each_channel_with_interrupt(channel, efx)1096 if (channel->irq)1097 synchronize_irq(channel->irq);010981099 /* Stop all NAPI processing and synchronous rx refills */1100 efx_for_each_channel(channel, efx)···1117 /* Stop the kernel transmit interface late, so the watchdog1118 * timer isn't ticking over the flush */1119 efx_stop_queue(efx);1120- if (NET_DEV_REGISTERED(efx)) {1121 netif_tx_lock_bh(efx->net_dev);1122 netif_tx_unlock_bh(efx->net_dev);1123 }···1336 return 0;1337}13381339-/* Context: process, dev_base_lock held, non-blocking. */1340static struct net_device_stats *efx_net_stats(struct net_device *net_dev)1341{1342 struct efx_nic *efx = net_dev->priv;1343 struct efx_mac_stats *mac_stats = &efx->mac_stats;1344 struct net_device_stats *stats = &net_dev->stats;134500001346 if (!spin_trylock(&efx->stats_lock))1347 return stats;1348 if (efx->state == STATE_RUNNING) {···1490static int efx_netdev_event(struct notifier_block *this,1491 unsigned long event, void *ptr)1492{1493- struct net_device *net_dev = (struct net_device *)ptr;14941495 if (net_dev->open == efx_net_open && event == NETDEV_CHANGENAME) {1496 struct efx_nic *efx = net_dev->priv;···1559 efx_for_each_tx_queue(tx_queue, efx)1560 efx_release_tx_buffers(tx_queue);15611562- if (NET_DEV_REGISTERED(efx)) {1563 strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));1564 unregister_netdev(efx->net_dev);1565 }···1684 if (method == RESET_TYPE_DISABLE) {1685 /* Reinitialise the device anyway so the driver unload sequence1686 * can talk to the external SRAM */1687- (void) falcon_init_nic(efx);1688 rc = -EIO;1689 goto fail4;1690 }
···199 */200static inline void efx_channel_processed(struct efx_channel *channel)201{202+ /* The interrupt handler for this channel may set work_pending203+ * as soon as we acknowledge the events we've seen. Make sure204+ * it's cleared before then. */205 channel->work_pending = 0;206+ smp_wmb();207+208 falcon_eventq_read_ack(channel);209}210···265 napi_disable(&channel->napi_str);266267 /* Poll the channel */268+ efx_process_channel(channel, efx->type->evq_size);269270 /* Ack the eventq. This may cause an interrupt to be generated271 * when they are reenabled */···317 *318 *************************************************************************/31900000000000000000000320static int efx_probe_channel(struct efx_channel *channel)321{322 struct efx_tx_queue *tx_queue;···387 struct efx_channel *channel;388 int rc = 0;389390+ /* Calculate the rx buffer allocation parameters required to391+ * support the current MTU, including padding for header392+ * alignment and overruns.393+ */394+ efx->rx_buffer_len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) +395+ EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +396+ efx->type->rx_buffer_padding);397+ efx->rx_buffer_order = get_order(efx->rx_buffer_len);398399 /* Initialise the channels */400 efx_for_each_channel(channel, efx) {···440 netif_napi_add(channel->napi_dev, &channel->napi_str,441 efx_poll, napi_weight);442443+ /* The interrupt handler for this channel may set work_pending444+ * as soon as we enable it. Make sure it's cleared before445+ * then. Similarly, make sure it sees the enabled flag set. */446 channel->work_pending = 0;447 channel->enabled = 1;448+ smp_wmb();449450 napi_enable(&channel->napi_str);451···704 mutex_unlock(&efx->mac_lock);705706 /* Serialise against efx_set_multicast_list() */707+ if (efx_dev_registered(efx)) {708 netif_tx_lock_bh(efx->net_dev);709 netif_tx_unlock_bh(efx->net_dev);710 }···791 efx->membase = ioremap_nocache(efx->membase_phys,792 efx->type->mem_map_size);793 if (!efx->membase) {794+ EFX_ERR(efx, "could not map memory BAR %d at %llx+%x\n",795+ efx->type->mem_bar,796+ (unsigned long long)efx->membase_phys,797 efx->type->mem_map_size);798 rc = -ENOMEM;799 goto fail4;800 }801+ EFX_LOG(efx, "memory BAR %u at %llx+%x (virtual %p)\n",802+ efx->type->mem_bar, (unsigned long long)efx->membase_phys,803+ efx->type->mem_map_size, efx->membase);804805 return 0;806807 fail4:808 release_mem_region(efx->membase_phys, efx->type->mem_map_size);809 fail3:810+ efx->membase_phys = 0;811 fail2:812 pci_disable_device(efx->pci_dev);813 fail1:···824825 if (efx->membase_phys) {826 pci_release_region(efx->pci_dev, efx->type->mem_bar);827+ efx->membase_phys = 0;828 }829830 pci_disable_device(efx->pci_dev);···1043 return;1044 if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT))1045 return;1046+ if (efx_dev_registered(efx) && !netif_running(efx->net_dev))1047 return;10481049 /* Mark the port as enabled so port reconfigurations can start, then···1073 cancel_delayed_work_sync(&efx->monitor_work);10741075 /* Ensure that all RX slow refills are complete. */1076+ efx_for_each_rx_queue(rx_queue, efx)1077 cancel_delayed_work_sync(&rx_queue->work);010781079 /* Stop scheduled port reconfigurations */1080 cancel_work_sync(&efx->reconfigure_work);···1101 falcon_disable_interrupts(efx);1102 if (efx->legacy_irq)1103 synchronize_irq(efx->legacy_irq);1104+ efx_for_each_channel_with_interrupt(channel, efx) {1105 if (channel->irq)1106 synchronize_irq(channel->irq);1107+ }11081109 /* Stop all NAPI processing and synchronous rx refills */1110 efx_for_each_channel(channel, efx)···1125 /* Stop the kernel transmit interface late, so the watchdog1126 * timer isn't ticking over the flush */1127 efx_stop_queue(efx);1128+ if (efx_dev_registered(efx)) {1129 netif_tx_lock_bh(efx->net_dev);1130 netif_tx_unlock_bh(efx->net_dev);1131 }···1344 return 0;1345}13461347+/* Context: process, dev_base_lock or RTNL held, non-blocking. */1348static struct net_device_stats *efx_net_stats(struct net_device *net_dev)1349{1350 struct efx_nic *efx = net_dev->priv;1351 struct efx_mac_stats *mac_stats = &efx->mac_stats;1352 struct net_device_stats *stats = &net_dev->stats;13531354+ /* Update stats if possible, but do not wait if another thread1355+ * is updating them (or resetting the NIC); slightly stale1356+ * stats are acceptable.1357+ */1358 if (!spin_trylock(&efx->stats_lock))1359 return stats;1360 if (efx->state == STATE_RUNNING) {···1494static int efx_netdev_event(struct notifier_block *this,1495 unsigned long event, void *ptr)1496{1497+ struct net_device *net_dev = ptr;14981499 if (net_dev->open == efx_net_open && event == NETDEV_CHANGENAME) {1500 struct efx_nic *efx = net_dev->priv;···1563 efx_for_each_tx_queue(tx_queue, efx)1564 efx_release_tx_buffers(tx_queue);15651566+ if (efx_dev_registered(efx)) {1567 strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));1568 unregister_netdev(efx->net_dev);1569 }···1688 if (method == RESET_TYPE_DISABLE) {1689 /* Reinitialise the device anyway so the driver unload sequence1690 * can talk to the external SRAM */1691+ falcon_init_nic(efx);1692 rc = -EIO;1693 goto fail4;1694 }
+38-49
drivers/net/sfc/falcon.c
···116 **************************************************************************117 */118119-/* DMA address mask (up to 46-bit, avoiding compiler warnings)120- *121- * Note that it is possible to have a platform with 64-bit longs and122- * 32-bit DMA addresses, or vice versa. EFX_DMA_MASK takes care of the123- * platform DMA mask.124- */125-#if BITS_PER_LONG == 64126-#define FALCON_DMA_MASK EFX_DMA_MASK(0x00003fffffffffffUL)127-#else128-#define FALCON_DMA_MASK EFX_DMA_MASK(0x00003fffffffffffULL)129-#endif130131/* TX DMA length mask (13-bit) */132#define FALCON_TX_DMA_MASK (4096 - 1)···136#define PCI_EXP_LNKSTA_LNK_WID_LBN 4137138#define FALCON_IS_DUAL_FUNC(efx) \139- (FALCON_REV(efx) < FALCON_REV_B0)140141/**************************************************************************142 *···456 TX_DESCQ_TYPE, 0,457 TX_NON_IP_DROP_DIS_B0, 1);458459- if (FALCON_REV(efx) >= FALCON_REV_B0) {460 int csum = !(efx->net_dev->features & NETIF_F_IP_CSUM);461 EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_IP_CHKSM_DIS_B0, csum);462 EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_TCP_CHKSM_DIS_B0, csum);···465 falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,466 tx_queue->queue);467468- if (FALCON_REV(efx) < FALCON_REV_B0) {469 efx_oword_t reg;470471 BUG_ON(tx_queue->queue >= 128); /* HW limit */···626 efx_oword_t rx_desc_ptr;627 struct efx_nic *efx = rx_queue->efx;628 int rc;629- int is_b0 = FALCON_REV(efx) >= FALCON_REV_B0;630 int iscsi_digest_en = is_b0;631632 EFX_LOG(efx, "RX queue %d ring in special buffers %d-%d\n",···813 tx_ev_q_label = EFX_QWORD_FIELD(*event, TX_EV_Q_LABEL);814 tx_queue = &efx->tx_queue[tx_ev_q_label];815816- if (NET_DEV_REGISTERED(efx))817 netif_tx_lock(efx->net_dev);818 falcon_notify_tx_desc(tx_queue);819- if (NET_DEV_REGISTERED(efx))820 netif_tx_unlock(efx->net_dev);821 } else if (EFX_QWORD_FIELD(*event, TX_EV_PKT_ERR) &&822 EFX_WORKAROUND_10727(efx)) {···875 RX_EV_TCP_UDP_CHKSUM_ERR);876 rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, RX_EV_ETH_CRC_ERR);877 rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, RX_EV_FRM_TRUNC);878- rx_ev_drib_nib = ((FALCON_REV(efx) >= FALCON_REV_B0) ?879 0 : EFX_QWORD_FIELD(*event, RX_EV_DRIB_NIB));880 rx_ev_pause_frm = EFX_QWORD_FIELD(*event, RX_EV_PAUSE_FRM_ERR);881···1056 EFX_QWORD_FIELD(*event, XG_PHY_INTR))1057 is_phy_event = 1;10581059- if ((FALCON_REV(efx) >= FALCON_REV_B0) &&1060 EFX_OWORD_FIELD(*event, XG_MNT_INTR_B0))1061 is_phy_event = 1;1062···1396static irqreturn_t falcon_fatal_interrupt(struct efx_nic *efx)1397{1398 struct falcon_nic_data *nic_data = efx->nic_data;1399- efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr;1400 efx_oword_t fatal_intr;1401 int error, mem_perr;1402 static int n_int_errors;···1442 */1443static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id)1444{1445- struct efx_nic *efx = (struct efx_nic *)dev_id;1446- efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr;1447 struct efx_channel *channel;1448 efx_dword_t reg;1449 u32 queues;···14801481static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)1482{1483- struct efx_nic *efx = (struct efx_nic *)dev_id;1484- efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr;1485 struct efx_channel *channel;1486 int syserr;1487 int queues;···1533 */1534static irqreturn_t falcon_msi_interrupt(int irq, void *dev_id)1535{1536- struct efx_channel *channel = (struct efx_channel *)dev_id;1537 struct efx_nic *efx = channel->efx;1538- efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr;1539 int syserr;15401541 efx->last_irq_cpu = raw_smp_processor_id();···1563 unsigned long offset;1564 efx_dword_t dword;15651566- if (FALCON_REV(efx) < FALCON_REV_B0)1567 return;15681569 for (offset = RX_RSS_INDIR_TBL_B0;···15861587 if (!EFX_INT_MODE_USE_MSI(efx)) {1588 irq_handler_t handler;1589- if (FALCON_REV(efx) >= FALCON_REV_B0)1590 handler = falcon_legacy_interrupt_b0;1591 else1592 handler = falcon_legacy_interrupt_a1;···1627 efx_oword_t reg;16281629 /* Disable MSI/MSI-X interrupts */1630- efx_for_each_channel_with_interrupt(channel, efx)1631 if (channel->irq)1632 free_irq(channel->irq, channel);016331634 /* ACK legacy interrupt */1635- if (FALCON_REV(efx) >= FALCON_REV_B0)1636 falcon_read(efx, ®, INT_ISR0_B0);1637 else1638 falcon_irq_ack_a1(efx);···1724 efx_oword_t temp;1725 int count;17261727- if ((FALCON_REV(efx) < FALCON_REV_B0) ||1728 (efx->loopback_mode != LOOPBACK_NONE))1729 return;1730···1777{1778 efx_oword_t temp;17791780- if (FALCON_REV(efx) < FALCON_REV_B0)1781 return;17821783 /* Isolate the MAC -> RX */···1815 MAC_SPEED, link_speed);1816 /* On B0, MAC backpressure can be disabled and packets get1817 * discarded. */1818- if (FALCON_REV(efx) >= FALCON_REV_B0) {1819 EFX_SET_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0,1820 !efx->link_up);1821 }···1833 EFX_SET_OWORD_FIELD_VER(efx, reg, RX_XOFF_MAC_EN, tx_fc);18341835 /* Unisolate the MAC -> RX */1836- if (FALCON_REV(efx) >= FALCON_REV_B0)1837 EFX_SET_OWORD_FIELD(reg, RX_INGR_EN_B0, 1);1838 falcon_write(efx, ®, RX_CFG_REG_KER);1839}···1848 return 0;18491850 /* Statistics fetch will fail if the MAC is in TX drain */1851- if (FALCON_REV(efx) >= FALCON_REV_B0) {1852 efx_oword_t temp;1853 falcon_read(efx, &temp, MAC0_CTRL_REG_KER);1854 if (EFX_OWORD_FIELD(temp, TXFIFO_DRAIN_EN_B0))···1932static void falcon_mdio_write(struct net_device *net_dev, int phy_id,1933 int addr, int value)1934{1935- struct efx_nic *efx = (struct efx_nic *)net_dev->priv;1936 unsigned int phy_id2 = phy_id & FALCON_PHY_ID_ID_MASK;1937 efx_oword_t reg;1938···2000 * could be read, -1 will be returned. */2001static int falcon_mdio_read(struct net_device *net_dev, int phy_id, int addr)2002{2003- struct efx_nic *efx = (struct efx_nic *)net_dev->priv;2004 unsigned int phy_addr = phy_id & FALCON_PHY_ID_ID_MASK;2005 efx_oword_t reg;2006 int value = -1;···2105 falcon_init_mdio(&efx->mii);21062107 /* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */2108- if (FALCON_REV(efx) >= FALCON_REV_B0)2109 efx->flow_control = EFX_FC_RX | EFX_FC_TX;2110 else2111 efx->flow_control = EFX_FC_RX;···2365 return -ENODEV;2366 }23672368- switch (FALCON_REV(efx)) {2369 case FALCON_REV_A0:2370 case 0xff:2371 EFX_ERR(efx, "Falcon rev A0 not supported\n");···2391 break;23922393 default:2394- EFX_ERR(efx, "Unknown Falcon rev %d\n", FALCON_REV(efx));2395 return -ENODEV;2396 }2397···24112412 /* Allocate storage for hardware specific data */2413 nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);2414- efx->nic_data = (void *) nic_data;24152416 /* Determine number of ports etc. */2417 rc = falcon_probe_nic_variant(efx);···2481 */2482int falcon_init_nic(struct efx_nic *efx)2483{2484- struct falcon_nic_data *data;2485 efx_oword_t temp;2486 unsigned thresh;2487 int rc;2488-2489- data = (struct falcon_nic_data *)efx->nic_data;24902491 /* Set up the address region register. This is only needed2492 * for the B0 FPGA, but since we are just pushing in the···25512552 /* Set number of RSS queues for receive path. */2553 falcon_read(efx, &temp, RX_FILTER_CTL_REG);2554- if (FALCON_REV(efx) >= FALCON_REV_B0)2555 EFX_SET_OWORD_FIELD(temp, NUM_KER, 0);2556 else2557 EFX_SET_OWORD_FIELD(temp, NUM_KER, efx->rss_queues - 1);···2589 /* Prefetch threshold 2 => fetch when descriptor cache half empty */2590 EFX_SET_OWORD_FIELD(temp, TX_PREF_THRESHOLD, 2);2591 /* Squash TX of packets of 16 bytes or less */2592- if (FALCON_REV(efx) >= FALCON_REV_B0 && EFX_WORKAROUND_9141(efx))2593 EFX_SET_OWORD_FIELD(temp, TX_FLUSH_MIN_LEN_EN_B0, 1);2594 falcon_write(efx, &temp, TX_CFG2_REG_KER);2595···2606 if (EFX_WORKAROUND_7575(efx))2607 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_USR_BUF_SIZE,2608 (3 * 4096) / 32);2609- if (FALCON_REV(efx) >= FALCON_REV_B0)2610 EFX_SET_OWORD_FIELD(temp, RX_INGR_EN_B0, 1);26112612 /* RX FIFO flow control thresholds */···2622 falcon_write(efx, &temp, RX_CFG_REG_KER);26232624 /* Set destination of both TX and RX Flush events */2625- if (FALCON_REV(efx) >= FALCON_REV_B0) {2626 EFX_POPULATE_OWORD_1(temp, FLS_EVQ_ID, 0);2627 falcon_write(efx, &temp, DP_CTRL_REG);2628 }···26362637 falcon_free_buffer(efx, &efx->irq_status);26382639- (void) falcon_reset_hw(efx, RESET_TYPE_ALL);26402641 /* Release the second function after the reset */2642 if (nic_data->pci_dev2) {
···116 **************************************************************************117 */118119+/* DMA address mask */120+#define FALCON_DMA_MASK DMA_BIT_MASK(46)000000000121122/* TX DMA length mask (13-bit) */123#define FALCON_TX_DMA_MASK (4096 - 1)···145#define PCI_EXP_LNKSTA_LNK_WID_LBN 4146147#define FALCON_IS_DUAL_FUNC(efx) \148+ (falcon_rev(efx) < FALCON_REV_B0)149150/**************************************************************************151 *···465 TX_DESCQ_TYPE, 0,466 TX_NON_IP_DROP_DIS_B0, 1);467468+ if (falcon_rev(efx) >= FALCON_REV_B0) {469 int csum = !(efx->net_dev->features & NETIF_F_IP_CSUM);470 EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_IP_CHKSM_DIS_B0, csum);471 EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_TCP_CHKSM_DIS_B0, csum);···474 falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,475 tx_queue->queue);476477+ if (falcon_rev(efx) < FALCON_REV_B0) {478 efx_oword_t reg;479480 BUG_ON(tx_queue->queue >= 128); /* HW limit */···635 efx_oword_t rx_desc_ptr;636 struct efx_nic *efx = rx_queue->efx;637 int rc;638+ int is_b0 = falcon_rev(efx) >= FALCON_REV_B0;639 int iscsi_digest_en = is_b0;640641 EFX_LOG(efx, "RX queue %d ring in special buffers %d-%d\n",···822 tx_ev_q_label = EFX_QWORD_FIELD(*event, TX_EV_Q_LABEL);823 tx_queue = &efx->tx_queue[tx_ev_q_label];824825+ if (efx_dev_registered(efx))826 netif_tx_lock(efx->net_dev);827 falcon_notify_tx_desc(tx_queue);828+ if (efx_dev_registered(efx))829 netif_tx_unlock(efx->net_dev);830 } else if (EFX_QWORD_FIELD(*event, TX_EV_PKT_ERR) &&831 EFX_WORKAROUND_10727(efx)) {···884 RX_EV_TCP_UDP_CHKSUM_ERR);885 rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, RX_EV_ETH_CRC_ERR);886 rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, RX_EV_FRM_TRUNC);887+ rx_ev_drib_nib = ((falcon_rev(efx) >= FALCON_REV_B0) ?888 0 : EFX_QWORD_FIELD(*event, RX_EV_DRIB_NIB));889 rx_ev_pause_frm = EFX_QWORD_FIELD(*event, RX_EV_PAUSE_FRM_ERR);890···1065 EFX_QWORD_FIELD(*event, XG_PHY_INTR))1066 is_phy_event = 1;10671068+ if ((falcon_rev(efx) >= FALCON_REV_B0) &&1069 EFX_OWORD_FIELD(*event, XG_MNT_INTR_B0))1070 is_phy_event = 1;1071···1405static irqreturn_t falcon_fatal_interrupt(struct efx_nic *efx)1406{1407 struct falcon_nic_data *nic_data = efx->nic_data;1408+ efx_oword_t *int_ker = efx->irq_status.addr;1409 efx_oword_t fatal_intr;1410 int error, mem_perr;1411 static int n_int_errors;···1451 */1452static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id)1453{1454+ struct efx_nic *efx = dev_id;1455+ efx_oword_t *int_ker = efx->irq_status.addr;1456 struct efx_channel *channel;1457 efx_dword_t reg;1458 u32 queues;···14891490static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)1491{1492+ struct efx_nic *efx = dev_id;1493+ efx_oword_t *int_ker = efx->irq_status.addr;1494 struct efx_channel *channel;1495 int syserr;1496 int queues;···1542 */1543static irqreturn_t falcon_msi_interrupt(int irq, void *dev_id)1544{1545+ struct efx_channel *channel = dev_id;1546 struct efx_nic *efx = channel->efx;1547+ efx_oword_t *int_ker = efx->irq_status.addr;1548 int syserr;15491550 efx->last_irq_cpu = raw_smp_processor_id();···1572 unsigned long offset;1573 efx_dword_t dword;15741575+ if (falcon_rev(efx) < FALCON_REV_B0)1576 return;15771578 for (offset = RX_RSS_INDIR_TBL_B0;···15951596 if (!EFX_INT_MODE_USE_MSI(efx)) {1597 irq_handler_t handler;1598+ if (falcon_rev(efx) >= FALCON_REV_B0)1599 handler = falcon_legacy_interrupt_b0;1600 else1601 handler = falcon_legacy_interrupt_a1;···1636 efx_oword_t reg;16371638 /* Disable MSI/MSI-X interrupts */1639+ efx_for_each_channel_with_interrupt(channel, efx) {1640 if (channel->irq)1641 free_irq(channel->irq, channel);1642+ }16431644 /* ACK legacy interrupt */1645+ if (falcon_rev(efx) >= FALCON_REV_B0)1646 falcon_read(efx, ®, INT_ISR0_B0);1647 else1648 falcon_irq_ack_a1(efx);···1732 efx_oword_t temp;1733 int count;17341735+ if ((falcon_rev(efx) < FALCON_REV_B0) ||1736 (efx->loopback_mode != LOOPBACK_NONE))1737 return;1738···1785{1786 efx_oword_t temp;17871788+ if (falcon_rev(efx) < FALCON_REV_B0)1789 return;17901791 /* Isolate the MAC -> RX */···1823 MAC_SPEED, link_speed);1824 /* On B0, MAC backpressure can be disabled and packets get1825 * discarded. */1826+ if (falcon_rev(efx) >= FALCON_REV_B0) {1827 EFX_SET_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0,1828 !efx->link_up);1829 }···1841 EFX_SET_OWORD_FIELD_VER(efx, reg, RX_XOFF_MAC_EN, tx_fc);18421843 /* Unisolate the MAC -> RX */1844+ if (falcon_rev(efx) >= FALCON_REV_B0)1845 EFX_SET_OWORD_FIELD(reg, RX_INGR_EN_B0, 1);1846 falcon_write(efx, ®, RX_CFG_REG_KER);1847}···1856 return 0;18571858 /* Statistics fetch will fail if the MAC is in TX drain */1859+ if (falcon_rev(efx) >= FALCON_REV_B0) {1860 efx_oword_t temp;1861 falcon_read(efx, &temp, MAC0_CTRL_REG_KER);1862 if (EFX_OWORD_FIELD(temp, TXFIFO_DRAIN_EN_B0))···1940static void falcon_mdio_write(struct net_device *net_dev, int phy_id,1941 int addr, int value)1942{1943+ struct efx_nic *efx = net_dev->priv;1944 unsigned int phy_id2 = phy_id & FALCON_PHY_ID_ID_MASK;1945 efx_oword_t reg;1946···2008 * could be read, -1 will be returned. */2009static int falcon_mdio_read(struct net_device *net_dev, int phy_id, int addr)2010{2011+ struct efx_nic *efx = net_dev->priv;2012 unsigned int phy_addr = phy_id & FALCON_PHY_ID_ID_MASK;2013 efx_oword_t reg;2014 int value = -1;···2113 falcon_init_mdio(&efx->mii);21142115 /* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */2116+ if (falcon_rev(efx) >= FALCON_REV_B0)2117 efx->flow_control = EFX_FC_RX | EFX_FC_TX;2118 else2119 efx->flow_control = EFX_FC_RX;···2373 return -ENODEV;2374 }23752376+ switch (falcon_rev(efx)) {2377 case FALCON_REV_A0:2378 case 0xff:2379 EFX_ERR(efx, "Falcon rev A0 not supported\n");···2399 break;24002401 default:2402+ EFX_ERR(efx, "Unknown Falcon rev %d\n", falcon_rev(efx));2403 return -ENODEV;2404 }2405···24192420 /* Allocate storage for hardware specific data */2421 nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);2422+ efx->nic_data = nic_data;24232424 /* Determine number of ports etc. */2425 rc = falcon_probe_nic_variant(efx);···2489 */2490int falcon_init_nic(struct efx_nic *efx)2491{02492 efx_oword_t temp;2493 unsigned thresh;2494 int rc;0024952496 /* Set up the address region register. This is only needed2497 * for the B0 FPGA, but since we are just pushing in the···25622563 /* Set number of RSS queues for receive path. */2564 falcon_read(efx, &temp, RX_FILTER_CTL_REG);2565+ if (falcon_rev(efx) >= FALCON_REV_B0)2566 EFX_SET_OWORD_FIELD(temp, NUM_KER, 0);2567 else2568 EFX_SET_OWORD_FIELD(temp, NUM_KER, efx->rss_queues - 1);···2600 /* Prefetch threshold 2 => fetch when descriptor cache half empty */2601 EFX_SET_OWORD_FIELD(temp, TX_PREF_THRESHOLD, 2);2602 /* Squash TX of packets of 16 bytes or less */2603+ if (falcon_rev(efx) >= FALCON_REV_B0 && EFX_WORKAROUND_9141(efx))2604 EFX_SET_OWORD_FIELD(temp, TX_FLUSH_MIN_LEN_EN_B0, 1);2605 falcon_write(efx, &temp, TX_CFG2_REG_KER);2606···2617 if (EFX_WORKAROUND_7575(efx))2618 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_USR_BUF_SIZE,2619 (3 * 4096) / 32);2620+ if (falcon_rev(efx) >= FALCON_REV_B0)2621 EFX_SET_OWORD_FIELD(temp, RX_INGR_EN_B0, 1);26222623 /* RX FIFO flow control thresholds */···2633 falcon_write(efx, &temp, RX_CFG_REG_KER);26342635 /* Set destination of both TX and RX Flush events */2636+ if (falcon_rev(efx) >= FALCON_REV_B0) {2637 EFX_POPULATE_OWORD_1(temp, FLS_EVQ_ID, 0);2638 falcon_write(efx, &temp, DP_CTRL_REG);2639 }···26472648 falcon_free_buffer(efx, &efx->irq_status);26492650+ falcon_reset_hw(efx, RESET_TYPE_ALL);26512652 /* Release the second function after the reset */2653 if (nic_data->pci_dev2) {