Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

Conflicts:
drivers/net/benet/be_cmds.h
include/linux/sysctl.h

+1110 -558
+1 -1
drivers/isdn/hardware/mISDN/hfcmulti.c
··· 3152 3152 hfcmulti_pcm(struct hfc_multi *hc, int ch, int slot_tx, int bank_tx, 3153 3153 int slot_rx, int bank_rx) 3154 3154 { 3155 - if (slot_rx < 0 || slot_rx < 0 || bank_tx < 0 || bank_rx < 0) { 3155 + if (slot_tx < 0 || slot_rx < 0 || bank_tx < 0 || bank_rx < 0) { 3156 3156 /* disable PCM */ 3157 3157 mode_hfcmulti(hc, ch, hc->chan[ch].protocol, -1, 0, -1, 0); 3158 3158 return;
+2 -2
drivers/net/3c507.c
··· 56 56 #include <linux/errno.h> 57 57 #include <linux/netdevice.h> 58 58 #include <linux/etherdevice.h> 59 + #include <linux/if_ether.h> 59 60 #include <linux/skbuff.h> 60 61 #include <linux/slab.h> 61 62 #include <linux/init.h> ··· 735 734 memcpy_toio(lp->base, init_words + 5, sizeof(init_words) - 10); 736 735 737 736 /* Fill in the station address. */ 738 - memcpy_toio(lp->base+SA_OFFSET, dev->dev_addr, 739 - sizeof(dev->dev_addr)); 737 + memcpy_toio(lp->base+SA_OFFSET, dev->dev_addr, ETH_ALEN); 740 738 741 739 /* The Tx-block list is written as needed. We just set up the values. */ 742 740 lp->tx_cmd_link = IDLELOOP + 4;
+1 -1
drivers/net/atarilance.c
··· 663 663 while (--i > 0) 664 664 if (DREG & CSR0_IDON) 665 665 break; 666 - if (i < 0 || (DREG & CSR0_ERR)) { 666 + if (i <= 0 || (DREG & CSR0_ERR)) { 667 667 DPRINTK( 2, ( "lance_open(): opening %s failed, i=%d, csr0=%04x\n", 668 668 dev->name, i, DREG )); 669 669 DREG = CSR0_STOP;
+5 -2
drivers/net/atlx/atl2.c
··· 1959 1959 return -ENOMEM; 1960 1960 1961 1961 for (i = first_dword; i < last_dword; i++) { 1962 - if (!atl2_read_eeprom(hw, i*4, &(eeprom_buff[i-first_dword]))) 1963 - return -EIO; 1962 + if (!atl2_read_eeprom(hw, i*4, &(eeprom_buff[i-first_dword]))) { 1963 + ret_val = -EIO; 1964 + goto free; 1965 + } 1964 1966 } 1965 1967 1966 1968 memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 3), 1967 1969 eeprom->len); 1970 + free: 1968 1971 kfree(eeprom_buff); 1969 1972 1970 1973 return ret_val;
+1
drivers/net/benet/be.h
··· 275 275 u32 tx_fc; /* Tx flow control */ 276 276 int link_speed; 277 277 u8 port_type; 278 + u8 transceiver; 278 279 }; 279 280 280 281 extern const struct ethtool_ops be_ethtool_ops;
+36
drivers/net/benet/be_cmds.c
··· 1479 1479 return status; 1480 1480 } 1481 1481 1482 + int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num, 1483 + u8 loopback_type, u8 enable) 1484 + { 1485 + struct be_mcc_wrb *wrb; 1486 + struct be_cmd_req_set_lmode *req; 1487 + int status; 1488 + 1489 + spin_lock_bh(&adapter->mcc_lock); 1490 + 1491 + wrb = wrb_from_mccq(adapter); 1492 + if (!wrb) { 1493 + status = -EBUSY; 1494 + goto err; 1495 + } 1496 + 1497 + req = embedded_payload(wrb); 1498 + 1499 + be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, 1500 + OPCODE_LOWLEVEL_SET_LOOPBACK_MODE); 1501 + 1502 + be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, 1503 + OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, 1504 + sizeof(*req)); 1505 + 1506 + req->src_port = port_num; 1507 + req->dest_port = port_num; 1508 + req->loopback_type = loopback_type; 1509 + req->loopback_state = enable; 1510 + 1511 + status = be_mcc_notify_wait(adapter); 1512 + err: 1513 + spin_unlock_bh(&adapter->mcc_lock); 1514 + return status; 1515 + } 1516 + 1482 1517 int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num, 1483 1518 u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern) 1484 1519 { ··· 1536 1501 1537 1502 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, 1538 1503 OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req)); 1504 + req->hdr.timeout = 4; 1539 1505 1540 1506 req->pattern = cpu_to_le64(pattern); 1541 1507 req->src_port = cpu_to_le32(port_num);
+17
drivers/net/benet/be_cmds.h
··· 156 156 157 157 #define OPCODE_LOWLEVEL_HOST_DDR_DMA 17 158 158 #define OPCODE_LOWLEVEL_LOOPBACK_TEST 18 159 + #define OPCODE_LOWLEVEL_SET_LOOPBACK_MODE 19 159 160 160 161 struct be_cmd_req_hdr { 161 162 u8 opcode; /* dword 0 */ ··· 823 822 u32 ticks_compl; 824 823 }; 825 824 825 + struct be_cmd_req_set_lmode { 826 + struct be_cmd_req_hdr hdr; 827 + u8 src_port; 828 + u8 dest_port; 829 + u8 loopback_type; 830 + u8 loopback_state; 831 + }; 832 + 833 + struct be_cmd_resp_set_lmode { 834 + struct be_cmd_resp_hdr resp_hdr; 835 + u8 rsvd0[4]; 836 + }; 837 + 826 838 /********************** DDR DMA test *********************/ 827 839 struct be_cmd_req_ddrdma_test { 828 840 struct be_cmd_req_hdr hdr; ··· 942 928 u32 byte_cnt, struct be_dma_mem *cmd); 943 929 extern int be_cmd_get_seeprom_data(struct be_adapter *adapter, 944 930 struct be_dma_mem *nonemb_cmd); 931 + extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num, 932 + u8 loopback_type, u8 enable); 933 +
+54 -23
drivers/net/benet/be_ethtool.c
··· 118 118 #define BE_MAC_LOOPBACK 0x0 119 119 #define BE_PHY_LOOPBACK 0x1 120 120 #define BE_ONE_PORT_EXT_LOOPBACK 0x2 121 + #define BE_NO_LOOPBACK 0xff 121 122 122 123 static void 123 124 be_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) ··· 340 339 341 340 status = be_cmd_read_port_type(adapter, adapter->port_num, 342 341 &connector); 343 - switch (connector) { 344 - case 7: 345 - ecmd->port = PORT_FIBRE; 346 - break; 347 - default: 348 - ecmd->port = PORT_TP; 349 - break; 342 + if (!status) { 343 + switch (connector) { 344 + case 7: 345 + ecmd->port = PORT_FIBRE; 346 + ecmd->transceiver = XCVR_EXTERNAL; 347 + break; 348 + case 0: 349 + ecmd->port = PORT_TP; 350 + ecmd->transceiver = XCVR_EXTERNAL; 351 + break; 352 + default: 353 + ecmd->port = PORT_TP; 354 + ecmd->transceiver = XCVR_INTERNAL; 355 + break; 356 + } 357 + } else { 358 + ecmd->port = PORT_AUI; 359 + ecmd->transceiver = XCVR_INTERNAL; 350 360 } 351 361 352 362 /* Save for future use */ 353 363 adapter->link_speed = ecmd->speed; 354 364 adapter->port_type = ecmd->port; 365 + adapter->transceiver = ecmd->transceiver; 355 366 } else { 356 367 ecmd->speed = adapter->link_speed; 357 368 ecmd->port = adapter->port_type; 369 + ecmd->transceiver = adapter->transceiver; 358 370 } 359 371 360 372 ecmd->duplex = DUPLEX_FULL; 361 373 ecmd->autoneg = AUTONEG_DISABLE; 362 - ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_TP); 363 374 ecmd->phy_address = adapter->port_num; 364 - ecmd->transceiver = XCVR_INTERNAL; 375 + switch (ecmd->port) { 376 + case PORT_FIBRE: 377 + ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE); 378 + break; 379 + case PORT_TP: 380 + ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_TP); 381 + break; 382 + case PORT_AUI: 383 + ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_AUI); 384 + break; 385 + } 365 386 366 387 return 0; 367 388 } ··· 512 489 return ret; 513 490 } 514 491 492 + static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type, 493 + u64 *status) 494 + { 495 + be_cmd_set_loopback(adapter, adapter->port_num, 496 + loopback_type, 1); 497 + *status = be_cmd_loopback_test(adapter, adapter->port_num, 498 + loopback_type, 1500, 499 + 2, 0xabc); 500 + be_cmd_set_loopback(adapter, adapter->port_num, 501 + BE_NO_LOOPBACK, 1); 502 + return *status; 503 + } 504 + 515 505 static void 516 506 be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data) 517 507 { ··· 533 497 memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM); 534 498 535 499 if (test->flags & ETH_TEST_FL_OFFLINE) { 536 - data[0] = be_cmd_loopback_test(adapter, adapter->port_num, 537 - BE_MAC_LOOPBACK, 1500, 538 - 2, 0xabc); 539 - if (data[0] != 0) 500 + if (be_loopback_test(adapter, BE_MAC_LOOPBACK, 501 + &data[0]) != 0) { 540 502 test->flags |= ETH_TEST_FL_FAILED; 541 - 542 - data[1] = be_cmd_loopback_test(adapter, adapter->port_num, 543 - BE_PHY_LOOPBACK, 1500, 544 - 2, 0xabc); 545 - if (data[1] != 0) 503 + } 504 + if (be_loopback_test(adapter, BE_PHY_LOOPBACK, 505 + &data[1]) != 0) { 546 506 test->flags |= ETH_TEST_FL_FAILED; 547 - 548 - data[2] = be_cmd_loopback_test(adapter, adapter->port_num, 549 - BE_ONE_PORT_EXT_LOOPBACK, 550 - 1500, 2, 0xabc); 551 - if (data[2] != 0) 507 + } 508 + if (be_loopback_test(adapter, BE_ONE_PORT_EXT_LOOPBACK, 509 + &data[2]) != 0) { 552 510 test->flags |= ETH_TEST_FL_FAILED; 511 + } 553 512 554 513 data[3] = be_test_ddr_dma(adapter); 555 514 if (data[3] != 0)
+2
drivers/net/bnx2x_main.c
··· 7593 7593 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) { 7594 7594 bnx2x_set_iscsi_eth_mac_addr(bp, 1); 7595 7595 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET; 7596 + bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, 7597 + CNIC_SB_ID(bp)); 7596 7598 } 7597 7599 mutex_unlock(&bp->cnic_mutex); 7598 7600 #endif
+1 -1
drivers/net/bonding/bond_3ad.c
··· 1580 1580 // check if any partner replys 1581 1581 if (best->is_individual) { 1582 1582 pr_warning("%s: Warning: No 802.3ad response from the link partner for any adapters in the bond\n", 1583 - best->slave->dev->master->name); 1583 + best->slave ? best->slave->dev->master->name : "NULL"); 1584 1584 } 1585 1585 1586 1586 best->is_active = 1;
+1 -1
drivers/net/can/mcp251x.c
··· 990 990 goto error_tx_buf; 991 991 } 992 992 priv->spi_rx_buf = kmalloc(SPI_TRANSFER_BUF_LEN, GFP_KERNEL); 993 - if (!priv->spi_tx_buf) { 993 + if (!priv->spi_rx_buf) { 994 994 ret = -ENOMEM; 995 995 goto error_rx_buf; 996 996 }
+1 -2
drivers/net/cs89x0.c
··· 1325 1325 write_irq(dev, lp->chip_type, dev->irq); 1326 1326 ret = request_irq(dev->irq, net_interrupt, 0, dev->name, dev); 1327 1327 if (ret) { 1328 - if (net_debug) 1329 - printk(KERN_DEBUG "cs89x0: request_irq(%d) failed\n", dev->irq); 1328 + printk(KERN_ERR "cs89x0: request_irq(%d) failed\n", dev->irq); 1330 1329 goto bad_out; 1331 1330 } 1332 1331 }
+3 -1
drivers/net/davinci_emac.c
··· 2711 2711 SET_ETHTOOL_OPS(ndev, &ethtool_ops); 2712 2712 netif_napi_add(ndev, &priv->napi, emac_poll, EMAC_POLL_WEIGHT); 2713 2713 2714 + clk_enable(emac_clk); 2715 + 2714 2716 /* register the network device */ 2715 2717 SET_NETDEV_DEV(ndev, &pdev->dev); 2716 2718 rc = register_netdev(ndev); ··· 2722 2720 goto netdev_reg_err; 2723 2721 } 2724 2722 2725 - clk_enable(emac_clk); 2726 2723 2727 2724 /* MII/Phy intialisation, mdio bus registration */ 2728 2725 emac_mii = mdiobus_alloc(); ··· 2761 2760 2762 2761 netdev_reg_err: 2763 2762 mdio_alloc_err: 2763 + clk_disable(emac_clk); 2764 2764 no_irq_res: 2765 2765 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2766 2766 release_mem_region(res->start, res->end - res->start + 1);
+2
drivers/net/e1000e/82571.c
··· 237 237 /* Set if manageability features are enabled. */ 238 238 mac->arc_subsystem_valid = (er32(FWSM) & E1000_FWSM_MODE_MASK) 239 239 ? true : false; 240 + /* Adaptive IFS supported */ 241 + mac->adaptive_ifs = true; 240 242 241 243 /* check for link */ 242 244 switch (hw->phy.media_type) {
+2
drivers/net/e1000e/es2lan.c
··· 224 224 /* Set if manageability features are enabled. */ 225 225 mac->arc_subsystem_valid = (er32(FWSM) & E1000_FWSM_MODE_MASK) 226 226 ? true : false; 227 + /* Adaptive IFS not supported */ 228 + mac->adaptive_ifs = false; 227 229 228 230 /* check for link */ 229 231 switch (hw->phy.media_type) {
+1
drivers/net/e1000e/hw.h
··· 818 818 819 819 u8 forced_speed_duplex; 820 820 821 + bool adaptive_ifs; 821 822 bool arc_subsystem_valid; 822 823 bool autoneg; 823 824 bool autoneg_failed;
+2
drivers/net/e1000e/ich8lan.c
··· 454 454 mac->rar_entry_count--; 455 455 /* Set if manageability features are enabled. */ 456 456 mac->arc_subsystem_valid = true; 457 + /* Adaptive IFS supported */ 458 + mac->adaptive_ifs = true; 457 459 458 460 /* LED operations */ 459 461 switch (mac->type) {
+38 -16
drivers/net/e1000e/lib.c
··· 125 125 void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count) 126 126 { 127 127 u32 i; 128 + u8 mac_addr[ETH_ALEN] = {0}; 128 129 129 130 /* Setup the receive address */ 130 131 e_dbg("Programming MAC Address into RAR[0]\n"); ··· 134 133 135 134 /* Zero out the other (rar_entry_count - 1) receive addresses */ 136 135 e_dbg("Clearing RAR[1-%u]\n", rar_count-1); 137 - for (i = 1; i < rar_count; i++) { 138 - E1000_WRITE_REG_ARRAY(hw, E1000_RA, (i << 1), 0); 139 - e1e_flush(); 140 - E1000_WRITE_REG_ARRAY(hw, E1000_RA, ((i << 1) + 1), 0); 141 - e1e_flush(); 142 - } 136 + for (i = 1; i < rar_count; i++) 137 + e1000e_rar_set(hw, mac_addr, i); 143 138 } 144 139 145 140 /** ··· 161 164 162 165 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8)); 163 166 164 - rar_high |= E1000_RAH_AV; 167 + /* If MAC address zero, no need to set the AV bit */ 168 + if (rar_low || rar_high) 169 + rar_high |= E1000_RAH_AV; 165 170 166 - E1000_WRITE_REG_ARRAY(hw, E1000_RA, (index << 1), rar_low); 167 - E1000_WRITE_REG_ARRAY(hw, E1000_RA, ((index << 1) + 1), rar_high); 171 + /* 172 + * Some bridges will combine consecutive 32-bit writes into 173 + * a single burst write, which will malfunction on some parts. 174 + * The flushes avoid this. 175 + */ 176 + ew32(RAL(index), rar_low); 177 + e1e_flush(); 178 + ew32(RAH(index), rar_high); 179 + e1e_flush(); 168 180 } 169 181 170 182 /** ··· 1615 1609 { 1616 1610 struct e1000_mac_info *mac = &hw->mac; 1617 1611 1612 + if (!mac->adaptive_ifs) { 1613 + e_dbg("Not in Adaptive IFS mode!\n"); 1614 + goto out; 1615 + } 1616 + 1618 1617 mac->current_ifs_val = 0; 1619 1618 mac->ifs_min_val = IFS_MIN; 1620 1619 mac->ifs_max_val = IFS_MAX; ··· 1628 1617 1629 1618 mac->in_ifs_mode = false; 1630 1619 ew32(AIT, 0); 1620 + out: 1621 + return; 1631 1622 } 1632 1623 1633 1624 /** ··· 1642 1629 void e1000e_update_adaptive(struct e1000_hw *hw) 1643 1630 { 1644 1631 struct e1000_mac_info *mac = &hw->mac; 1632 + 1633 + if (!mac->adaptive_ifs) { 1634 + e_dbg("Not in Adaptive IFS mode!\n"); 1635 + goto out; 1636 + } 1645 1637 1646 1638 if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) { 1647 1639 if (mac->tx_packet_delta > MIN_NUM_XMITS) { ··· 1668 1650 ew32(AIT, 0); 1669 1651 } 1670 1652 } 1653 + out: 1654 + return; 1671 1655 } 1672 1656 1673 1657 /** ··· 2307 2287 s32 ret_val, hdr_csum, csum; 2308 2288 u8 i, len; 2309 2289 2290 + hw->mac.tx_pkt_filtering = true; 2291 + 2310 2292 /* No manageability, no filtering */ 2311 2293 if (!e1000e_check_mng_mode(hw)) { 2312 2294 hw->mac.tx_pkt_filtering = false; 2313 - return 0; 2295 + goto out; 2314 2296 } 2315 2297 2316 2298 /* ··· 2320 2298 * reason, disable filtering. 2321 2299 */ 2322 2300 ret_val = e1000_mng_enable_host_if(hw); 2323 - if (ret_val != 0) { 2301 + if (ret_val) { 2324 2302 hw->mac.tx_pkt_filtering = false; 2325 - return ret_val; 2303 + goto out; 2326 2304 } 2327 2305 2328 2306 /* Read in the header. Length and offset are in dwords. */ ··· 2341 2319 */ 2342 2320 if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) { 2343 2321 hw->mac.tx_pkt_filtering = true; 2344 - return 1; 2322 + goto out; 2345 2323 } 2346 2324 2347 2325 /* Cookie area is valid, make the final check for filtering. */ 2348 2326 if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING)) { 2349 2327 hw->mac.tx_pkt_filtering = false; 2350 - return 0; 2328 + goto out; 2351 2329 } 2352 2330 2353 - hw->mac.tx_pkt_filtering = true; 2354 - return 1; 2331 + out: 2332 + return hw->mac.tx_pkt_filtering; 2355 2333 } 2356 2334 2357 2335 /**
+16 -14
drivers/net/e1000e/netdev.c
··· 3315 3315 if ((hw->phy.type == e1000_phy_82578) || 3316 3316 (hw->phy.type == e1000_phy_82577)) { 3317 3317 e1e_rphy(hw, HV_SCC_UPPER, &phy_data); 3318 - e1e_rphy(hw, HV_SCC_LOWER, &phy_data); 3319 - adapter->stats.scc += phy_data; 3318 + if (!e1e_rphy(hw, HV_SCC_LOWER, &phy_data)) 3319 + adapter->stats.scc += phy_data; 3320 3320 3321 3321 e1e_rphy(hw, HV_ECOL_UPPER, &phy_data); 3322 - e1e_rphy(hw, HV_ECOL_LOWER, &phy_data); 3323 - adapter->stats.ecol += phy_data; 3322 + if (!e1e_rphy(hw, HV_ECOL_LOWER, &phy_data)) 3323 + adapter->stats.ecol += phy_data; 3324 3324 3325 3325 e1e_rphy(hw, HV_MCC_UPPER, &phy_data); 3326 - e1e_rphy(hw, HV_MCC_LOWER, &phy_data); 3327 - adapter->stats.mcc += phy_data; 3326 + if (!e1e_rphy(hw, HV_MCC_LOWER, &phy_data)) 3327 + adapter->stats.mcc += phy_data; 3328 3328 3329 3329 e1e_rphy(hw, HV_LATECOL_UPPER, &phy_data); 3330 - e1e_rphy(hw, HV_LATECOL_LOWER, &phy_data); 3331 - adapter->stats.latecol += phy_data; 3330 + if (!e1e_rphy(hw, HV_LATECOL_LOWER, &phy_data)) 3331 + adapter->stats.latecol += phy_data; 3332 3332 3333 3333 e1e_rphy(hw, HV_DC_UPPER, &phy_data); 3334 - e1e_rphy(hw, HV_DC_LOWER, &phy_data); 3335 - adapter->stats.dc += phy_data; 3334 + if (!e1e_rphy(hw, HV_DC_LOWER, &phy_data)) 3335 + adapter->stats.dc += phy_data; 3336 3336 } else { 3337 3337 adapter->stats.scc += er32(SCC); 3338 3338 adapter->stats.ecol += er32(ECOL); ··· 3360 3360 if ((hw->phy.type == e1000_phy_82578) || 3361 3361 (hw->phy.type == e1000_phy_82577)) { 3362 3362 e1e_rphy(hw, HV_COLC_UPPER, &phy_data); 3363 - e1e_rphy(hw, HV_COLC_LOWER, &phy_data); 3364 - hw->mac.collision_delta = phy_data; 3363 + if (!e1e_rphy(hw, HV_COLC_LOWER, &phy_data)) 3364 + hw->mac.collision_delta = phy_data; 3365 3365 } else { 3366 3366 hw->mac.collision_delta = er32(COLC); 3367 3367 } ··· 3372 3372 if ((hw->phy.type == e1000_phy_82578) || 3373 3373 (hw->phy.type == e1000_phy_82577)) { 3374 3374 e1e_rphy(hw, HV_TNCRS_UPPER, &phy_data); 3375 - e1e_rphy(hw, HV_TNCRS_LOWER, &phy_data); 3376 - adapter->stats.tncrs += phy_data; 3375 + if (!e1e_rphy(hw, HV_TNCRS_LOWER, &phy_data)) 3376 + adapter->stats.tncrs += phy_data; 3377 3377 } else { 3378 3378 if ((hw->mac.type != e1000_82574) && 3379 3379 (hw->mac.type != e1000_82583)) ··· 4674 4674 4675 4675 pci_set_power_state(pdev, PCI_D0); 4676 4676 pci_restore_state(pdev); 4677 + pci_save_state(pdev); 4677 4678 e1000e_disable_l1aspm(pdev); 4678 4679 4679 4680 err = pci_enable_device_mem(pdev); ··· 4826 4825 } else { 4827 4826 pci_set_master(pdev); 4828 4827 pci_restore_state(pdev); 4828 + pci_save_state(pdev); 4829 4829 4830 4830 pci_enable_wake(pdev, PCI_D3hot, 0); 4831 4831 pci_enable_wake(pdev, PCI_D3cold, 0);
+23 -7
drivers/net/fsl_pq_mdio.c
··· 46 46 #include "gianfar.h" 47 47 #include "fsl_pq_mdio.h" 48 48 49 + struct fsl_pq_mdio_priv { 50 + void __iomem *map; 51 + struct fsl_pq_mdio __iomem *regs; 52 + }; 53 + 49 54 /* 50 55 * Write value to the PHY at mii_id at register regnum, 51 56 * on the bus attached to the local interface, which may be different from the ··· 110 105 111 106 static struct fsl_pq_mdio __iomem *fsl_pq_mdio_get_regs(struct mii_bus *bus) 112 107 { 113 - return (void __iomem __force *)bus->priv; 108 + struct fsl_pq_mdio_priv *priv = bus->priv; 109 + 110 + return priv->regs; 114 111 } 115 112 116 113 /* ··· 273 266 { 274 267 struct device_node *np = ofdev->node; 275 268 struct device_node *tbi; 269 + struct fsl_pq_mdio_priv *priv; 276 270 struct fsl_pq_mdio __iomem *regs = NULL; 277 271 void __iomem *map; 278 272 u32 __iomem *tbipa; ··· 282 274 u64 addr = 0, size = 0; 283 275 int err = 0; 284 276 277 + priv = kzalloc(sizeof(*priv), GFP_KERNEL); 278 + if (!priv) 279 + return -ENOMEM; 280 + 285 281 new_bus = mdiobus_alloc(); 286 282 if (NULL == new_bus) 287 - return -ENOMEM; 283 + goto err_free_priv; 288 284 289 285 new_bus->name = "Freescale PowerQUICC MII Bus", 290 286 new_bus->read = &fsl_pq_mdio_read, 291 287 new_bus->write = &fsl_pq_mdio_write, 292 288 new_bus->reset = &fsl_pq_mdio_reset, 289 + new_bus->priv = priv; 293 290 fsl_pq_mdio_bus_name(new_bus->id, np); 294 291 295 292 /* Set the PHY base address */ ··· 304 291 err = -ENOMEM; 305 292 goto err_free_bus; 306 293 } 294 + priv->map = map; 307 295 308 296 if (of_device_is_compatible(np, "fsl,gianfar-mdio") || 309 297 of_device_is_compatible(np, "fsl,gianfar-tbi") || ··· 312 298 of_device_is_compatible(np, "ucc_geth_phy")) 313 299 map -= offsetof(struct fsl_pq_mdio, miimcfg); 314 300 regs = map; 315 - 316 - new_bus->priv = (void __force *)regs; 301 + priv->regs = regs; 317 302 318 303 new_bus->irq = kcalloc(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL); 319 304 ··· 405 392 err_free_irqs: 406 393 kfree(new_bus->irq); 407 394 err_unmap_regs: 408 - iounmap(regs); 395 + iounmap(priv->map); 409 396 err_free_bus: 410 397 kfree(new_bus); 411 - 398 + err_free_priv: 399 + kfree(priv); 412 400 return err; 413 401 } 414 402 ··· 418 404 { 419 405 struct device *device = &ofdev->dev; 420 406 struct mii_bus *bus = dev_get_drvdata(device); 407 + struct fsl_pq_mdio_priv *priv = bus->priv; 421 408 422 409 mdiobus_unregister(bus); 423 410 424 411 dev_set_drvdata(device, NULL); 425 412 426 - iounmap(fsl_pq_mdio_get_regs(bus)); 413 + iounmap(priv->map); 427 414 bus->priv = NULL; 428 415 mdiobus_free(bus); 416 + kfree(priv); 429 417 430 418 return 0; 431 419 }
+4 -9
drivers/net/gianfar.c
··· 143 143 static void gfar_clear_exact_match(struct net_device *dev); 144 144 static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr); 145 145 static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 146 - u16 gfar_select_queue(struct net_device *dev, struct sk_buff *skb); 147 146 148 147 MODULE_AUTHOR("Freescale Semiconductor, Inc"); 149 148 MODULE_DESCRIPTION("Gianfar Ethernet Driver"); ··· 454 455 .ndo_set_multicast_list = gfar_set_multi, 455 456 .ndo_tx_timeout = gfar_timeout, 456 457 .ndo_do_ioctl = gfar_ioctl, 457 - .ndo_select_queue = gfar_select_queue, 458 458 .ndo_get_stats = gfar_get_stats, 459 459 .ndo_vlan_rx_register = gfar_vlan_rx_register, 460 460 .ndo_set_mac_address = eth_mac_addr, ··· 504 506 return priv->vlgrp || priv->rx_csum_enable; 505 507 } 506 508 507 - u16 gfar_select_queue(struct net_device *dev, struct sk_buff *skb) 508 - { 509 - return skb_get_queue_mapping(skb); 510 - } 511 509 static void free_tx_pointers(struct gfar_private *priv) 512 510 { 513 511 int i = 0; ··· 2464 2470 fcb = (struct rxfcb *)skb->data; 2465 2471 2466 2472 /* Remove the FCB from the skb */ 2467 - skb_set_queue_mapping(skb, fcb->rq); 2468 2473 /* Remove the padded bytes, if there are any */ 2469 - if (amount_pull) 2474 + if (amount_pull) { 2475 + skb_record_rx_queue(skb, fcb->rq); 2470 2476 skb_pull(skb, amount_pull); 2477 + } 2471 2478 2472 2479 if (priv->rx_csum_enable) 2473 2480 gfar_rx_checksum(skb, fcb); ··· 2549 2554 /* Remove the FCS from the packet length */ 2550 2555 skb_put(skb, pkt_len); 2551 2556 rx_queue->stats.rx_bytes += pkt_len; 2552 - 2557 + skb_record_rx_queue(skb, rx_queue->qindex); 2553 2558 gfar_process_frame(dev, skb, amount_pull); 2554 2559 2555 2560 } else {
+3 -1
drivers/net/hamradio/bpqether.c
··· 248 248 { 249 249 unsigned char *ptr; 250 250 struct bpqdev *bpq; 251 + struct net_device *orig_dev; 251 252 int size; 252 253 253 254 /* ··· 283 282 284 283 bpq = netdev_priv(dev); 285 284 285 + orig_dev = dev; 286 286 if ((dev = bpq_get_ether_dev(dev)) == NULL) { 287 - dev->stats.tx_dropped++; 287 + orig_dev->stats.tx_dropped++; 288 288 kfree_skb(skb); 289 289 return NETDEV_TX_OK; 290 290 }
+2 -1
drivers/net/ibmlana.c
··· 87 87 #include <linux/module.h> 88 88 #include <linux/netdevice.h> 89 89 #include <linux/etherdevice.h> 90 + #include <linux/if_ether.h> 90 91 #include <linux/skbuff.h> 91 92 #include <linux/bitops.h> 92 93 ··· 989 988 990 989 /* copy out MAC address */ 991 990 992 - for (z = 0; z < sizeof(dev->dev_addr); z++) 991 + for (z = 0; z < ETH_ALEN; z++) 993 992 dev->dev_addr[z] = inb(dev->base_addr + MACADDRPROM + z); 994 993 995 994 /* print config */
+1 -3
drivers/net/igb/e1000_82575.c
··· 1096 1096 hw_dbg("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg); 1097 1097 } else { 1098 1098 /* Set PCS register for forced link */ 1099 - reg |= E1000_PCS_LCTL_FSD | /* Force Speed */ 1100 - E1000_PCS_LCTL_FORCE_LINK | /* Force Link */ 1101 - E1000_PCS_LCTL_FLV_LINK_UP; /* Force link value up */ 1099 + reg |= E1000_PCS_LCTL_FSD; /* Force Speed */ 1102 1100 1103 1101 hw_dbg("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg); 1104 1102 }
-9
drivers/net/igb/e1000_phy.c
··· 457 457 phy_data |= I82580_CFG_ENABLE_DOWNSHIFT; 458 458 459 459 ret_val = phy->ops.write_reg(hw, I82580_CFG_REG, phy_data); 460 - if (ret_val) 461 - goto out; 462 - 463 - /* Set number of link attempts before downshift */ 464 - ret_val = phy->ops.read_reg(hw, I82580_CTRL_REG, &phy_data); 465 - if (ret_val) 466 - goto out; 467 - phy_data &= ~I82580_CTRL_DOWNSHIFT_MASK; 468 - ret_val = phy->ops.write_reg(hw, I82580_CTRL_REG, phy_data); 469 460 470 461 out: 471 462 return ret_val;
+1 -1
drivers/net/igb/igb_ethtool.c
··· 1795 1795 /* dual port cards only support WoL on port A from now on 1796 1796 * unless it was enabled in the eeprom for port B 1797 1797 * so exclude FUNC_1 ports from having WoL enabled */ 1798 - if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1 && 1798 + if ((rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) && 1799 1799 !adapter->eeprom_wol) { 1800 1800 wol->supported = 0; 1801 1801 break;
+2 -7
drivers/net/igb/igb_main.c
··· 1306 1306 hwm = min(((pba << 10) * 9 / 10), 1307 1307 ((pba << 10) - 2 * adapter->max_frame_size)); 1308 1308 1309 - if (mac->type < e1000_82576) { 1310 - fc->high_water = hwm & 0xFFF8; /* 8-byte granularity */ 1311 - fc->low_water = fc->high_water - 8; 1312 - } else { 1313 - fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */ 1314 - fc->low_water = fc->high_water - 16; 1315 - } 1309 + fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */ 1310 + fc->low_water = fc->high_water - 16; 1316 1311 fc->pause_time = 0xFFFF; 1317 1312 fc->send_xon = 1; 1318 1313 fc->current_mode = fc->requested_mode;
+2 -1
drivers/net/igbvf/netdev.c
··· 2759 2759 err = hw->mac.ops.reset_hw(hw); 2760 2760 if (err) { 2761 2761 dev_info(&pdev->dev, 2762 - "PF still in reset state, assigning new address\n"); 2762 + "PF still in reset state, assigning new address." 2763 + " Is the PF interface up?\n"); 2763 2764 random_ether_addr(hw->mac.addr); 2764 2765 } else { 2765 2766 err = hw->mac.ops.read_mac_addr(hw);
+12 -2
drivers/net/ixgbe/ixgbe_main.c
··· 305 305 int reg_idx = tx_ring->reg_idx; 306 306 int dcb_i = adapter->ring_feature[RING_F_DCB].indices; 307 307 308 - if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 308 + switch (adapter->hw.mac.type) { 309 + case ixgbe_mac_82598EB: 309 310 tc = reg_idx >> 2; 310 311 txoff = IXGBE_TFCS_TXOFF0; 311 - } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 312 + break; 313 + case ixgbe_mac_82599EB: 312 314 tc = 0; 313 315 txoff = IXGBE_TFCS_TXOFF; 314 316 if (dcb_i == 8) { ··· 329 327 tc += (reg_idx - 96) >> 4; 330 328 } 331 329 } 330 + break; 331 + default: 332 + tc = 0; 332 333 } 333 334 txoff <<= tc; 334 335 } ··· 4563 4558 4564 4559 pci_set_power_state(pdev, PCI_D0); 4565 4560 pci_restore_state(pdev); 4561 + /* 4562 + * pci_restore_state clears dev->state_saved so call 4563 + * pci_save_state to restore it. 4564 + */ 4565 + pci_save_state(pdev); 4566 4566 4567 4567 err = pci_enable_device_mem(pdev); 4568 4568 if (err) {
+1 -1
drivers/net/ll_temac_main.c
··· 134 134 struct sk_buff *skb; 135 135 int i; 136 136 137 - lp->rx_skb = kzalloc(sizeof(struct sk_buff)*RX_BD_NUM, GFP_KERNEL); 137 + lp->rx_skb = kzalloc(sizeof(*lp->rx_skb) * RX_BD_NUM, GFP_KERNEL); 138 138 /* allocate the tx and rx ring buffer descriptors. */ 139 139 /* returns a virtual addres and a physical address. */ 140 140 lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
+4 -2
drivers/net/mv643xx_eth.c
··· 656 656 struct sk_buff *skb; 657 657 int rx; 658 658 struct rx_desc *rx_desc; 659 + int size; 659 660 660 661 skb = __skb_dequeue(&mp->rx_recycle); 661 662 if (skb == NULL) ··· 679 678 680 679 rx_desc = rxq->rx_desc_area + rx; 681 680 681 + size = skb->end - skb->data; 682 682 rx_desc->buf_ptr = dma_map_single(mp->dev->dev.parent, 683 - skb->data, mp->skb_size, 683 + skb->data, size, 684 684 DMA_FROM_DEVICE); 685 - rx_desc->buf_size = mp->skb_size; 685 + rx_desc->buf_size = size; 686 686 rxq->rx_skb[rx] = skb; 687 687 wmb(); 688 688 rx_desc->cmd_sts = BUFFER_OWNED_BY_DMA | RX_ENABLE_INTERRUPT;
+2 -2
drivers/net/netxen/netxen_nic.h
··· 53 53 54 54 #define _NETXEN_NIC_LINUX_MAJOR 4 55 55 #define _NETXEN_NIC_LINUX_MINOR 0 56 - #define _NETXEN_NIC_LINUX_SUBVERSION 65 57 - #define NETXEN_NIC_LINUX_VERSIONID "4.0.65" 56 + #define _NETXEN_NIC_LINUX_SUBVERSION 72 57 + #define NETXEN_NIC_LINUX_VERSIONID "4.0.72" 58 58 59 59 #define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c)) 60 60 #define _major(v) (((v) >> 24) & 0xff)
+65 -124
drivers/net/netxen/netxen_nic_ethtool.c
··· 66 66 67 67 #define NETXEN_NIC_TEST_LEN ARRAY_SIZE(netxen_nic_gstrings_test) 68 68 69 - #define NETXEN_NIC_REGS_COUNT 42 69 + #define NETXEN_NIC_REGS_COUNT 30 70 70 #define NETXEN_NIC_REGS_LEN (NETXEN_NIC_REGS_COUNT * sizeof(__le32)) 71 71 #define NETXEN_MAX_EEPROM_LEN 1024 72 72 ··· 312 312 return NETXEN_NIC_REGS_LEN; 313 313 } 314 314 315 - struct netxen_niu_regs { 316 - __u32 reg[NETXEN_NIC_REGS_COUNT]; 317 - }; 318 - 319 - static struct netxen_niu_regs niu_registers[] = { 320 - { 321 - /* GB Mode */ 322 - { 323 - NETXEN_NIU_GB_SERDES_RESET, 324 - NETXEN_NIU_GB0_MII_MODE, 325 - NETXEN_NIU_GB1_MII_MODE, 326 - NETXEN_NIU_GB2_MII_MODE, 327 - NETXEN_NIU_GB3_MII_MODE, 328 - NETXEN_NIU_GB0_GMII_MODE, 329 - NETXEN_NIU_GB1_GMII_MODE, 330 - NETXEN_NIU_GB2_GMII_MODE, 331 - NETXEN_NIU_GB3_GMII_MODE, 332 - NETXEN_NIU_REMOTE_LOOPBACK, 333 - NETXEN_NIU_GB0_HALF_DUPLEX, 334 - NETXEN_NIU_GB1_HALF_DUPLEX, 335 - NETXEN_NIU_RESET_SYS_FIFOS, 336 - NETXEN_NIU_GB_CRC_DROP, 337 - NETXEN_NIU_GB_DROP_WRONGADDR, 338 - NETXEN_NIU_TEST_MUX_CTL, 339 - 340 - NETXEN_NIU_GB_MAC_CONFIG_0(0), 341 - NETXEN_NIU_GB_MAC_CONFIG_1(0), 342 - NETXEN_NIU_GB_HALF_DUPLEX_CTRL(0), 343 - NETXEN_NIU_GB_MAX_FRAME_SIZE(0), 344 - NETXEN_NIU_GB_TEST_REG(0), 345 - NETXEN_NIU_GB_MII_MGMT_CONFIG(0), 346 - NETXEN_NIU_GB_MII_MGMT_COMMAND(0), 347 - NETXEN_NIU_GB_MII_MGMT_ADDR(0), 348 - NETXEN_NIU_GB_MII_MGMT_CTRL(0), 349 - NETXEN_NIU_GB_MII_MGMT_STATUS(0), 350 - NETXEN_NIU_GB_MII_MGMT_INDICATE(0), 351 - NETXEN_NIU_GB_INTERFACE_CTRL(0), 352 - NETXEN_NIU_GB_INTERFACE_STATUS(0), 353 - NETXEN_NIU_GB_STATION_ADDR_0(0), 354 - NETXEN_NIU_GB_STATION_ADDR_1(0), 355 - -1, 356 - } 357 - }, 358 - { 359 - /* XG Mode */ 360 - { 361 - NETXEN_NIU_XG_SINGLE_TERM, 362 - NETXEN_NIU_XG_DRIVE_HI, 363 - NETXEN_NIU_XG_DRIVE_LO, 364 - NETXEN_NIU_XG_DTX, 365 - NETXEN_NIU_XG_DEQ, 366 - NETXEN_NIU_XG_WORD_ALIGN, 367 - NETXEN_NIU_XG_RESET, 368 - NETXEN_NIU_XG_POWER_DOWN, 369 - NETXEN_NIU_XG_RESET_PLL, 370 - NETXEN_NIU_XG_SERDES_LOOPBACK, 371 - NETXEN_NIU_XG_DO_BYTE_ALIGN, 372 - NETXEN_NIU_XG_TX_ENABLE, 373 - NETXEN_NIU_XG_RX_ENABLE, 374 - NETXEN_NIU_XG_STATUS, 375 - NETXEN_NIU_XG_PAUSE_THRESHOLD, 376 - NETXEN_NIU_XGE_CONFIG_0, 377 - NETXEN_NIU_XGE_CONFIG_1, 378 - NETXEN_NIU_XGE_IPG, 379 - NETXEN_NIU_XGE_STATION_ADDR_0_HI, 380 - NETXEN_NIU_XGE_STATION_ADDR_0_1, 381 - NETXEN_NIU_XGE_STATION_ADDR_1_LO, 382 - NETXEN_NIU_XGE_STATUS, 383 - NETXEN_NIU_XGE_MAX_FRAME_SIZE, 384 - NETXEN_NIU_XGE_PAUSE_FRAME_VALUE, 385 - NETXEN_NIU_XGE_TX_BYTE_CNT, 386 - NETXEN_NIU_XGE_TX_FRAME_CNT, 387 - NETXEN_NIU_XGE_RX_BYTE_CNT, 388 - NETXEN_NIU_XGE_RX_FRAME_CNT, 389 - NETXEN_NIU_XGE_AGGR_ERROR_CNT, 390 - NETXEN_NIU_XGE_MULTICAST_FRAME_CNT, 391 - NETXEN_NIU_XGE_UNICAST_FRAME_CNT, 392 - NETXEN_NIU_XGE_CRC_ERROR_CNT, 393 - NETXEN_NIU_XGE_OVERSIZE_FRAME_ERR, 394 - NETXEN_NIU_XGE_UNDERSIZE_FRAME_ERR, 395 - NETXEN_NIU_XGE_LOCAL_ERROR_CNT, 396 - NETXEN_NIU_XGE_REMOTE_ERROR_CNT, 397 - NETXEN_NIU_XGE_CONTROL_CHAR_CNT, 398 - NETXEN_NIU_XGE_PAUSE_FRAME_CNT, 399 - -1, 400 - } 401 - } 402 - }; 403 - 404 315 static void 405 316 netxen_nic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p) 406 317 { 407 318 struct netxen_adapter *adapter = netdev_priv(dev); 408 - __u32 mode, *regs_buff = p; 409 - int i, window; 319 + struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; 320 + struct nx_host_sds_ring *sds_ring; 321 + u32 *regs_buff = p; 322 + int ring, i = 0; 323 + int port = adapter->physical_port; 410 324 411 325 memset(p, 0, NETXEN_NIC_REGS_LEN); 326 + 412 327 regs->version = (1 << 24) | (adapter->ahw.revision_id << 16) | 413 328 (adapter->pdev)->device; 414 - /* which mode */ 415 - regs_buff[0] = NXRD32(adapter, NETXEN_NIU_MODE); 416 - mode = regs_buff[0]; 417 329 418 - /* Common registers to all the modes */ 419 - regs_buff[2] = NXRD32(adapter, NETXEN_NIU_STRAP_VALUE_SAVE_HIGHER); 420 - /* GB/XGB Mode */ 421 - mode = (mode / 2) - 1; 422 - window = 0; 423 - if (mode <= 1) { 424 - for (i = 3; niu_registers[mode].reg[i - 3] != -1; i++) { 425 - /* GB: port specific registers */ 426 - if (mode == 0 && i >= 19) 427 - window = adapter->physical_port * 428 - NETXEN_NIC_PORT_WINDOW; 330 + if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) 331 + return; 429 332 430 - regs_buff[i] = NXRD32(adapter, 431 - niu_registers[mode].reg[i - 3] + window); 432 - } 333 + regs_buff[i++] = NXRD32(adapter, CRB_CMDPEG_STATE); 334 + regs_buff[i++] = NXRD32(adapter, CRB_RCVPEG_STATE); 335 + regs_buff[i++] = NXRD32(adapter, CRB_FW_CAPABILITIES_1); 336 + regs_buff[i++] = NXRDIO(adapter, adapter->crb_int_state_reg); 337 + regs_buff[i++] = NXRD32(adapter, NX_CRB_DEV_REF_COUNT); 338 + regs_buff[i++] = NXRD32(adapter, NX_CRB_DEV_STATE); 339 + regs_buff[i++] = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER); 340 + regs_buff[i++] = NXRD32(adapter, NETXEN_PEG_HALT_STATUS1); 341 + regs_buff[i++] = NXRD32(adapter, NETXEN_PEG_HALT_STATUS2); 433 342 343 + regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_0+0x3c); 344 + regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_1+0x3c); 345 + regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_2+0x3c); 346 + regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_3+0x3c); 347 + 348 + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { 349 + 350 + regs_buff[i++] = NXRD32(adapter, NETXEN_CRB_PEG_NET_4+0x3c); 351 + i += 2; 352 + 353 + regs_buff[i++] = NXRD32(adapter, CRB_XG_STATE_P3); 354 + regs_buff[i++] = le32_to_cpu(*(adapter->tx_ring->hw_consumer)); 355 + 356 + } else { 357 + i++; 358 + 359 + regs_buff[i++] = NXRD32(adapter, 360 + NETXEN_NIU_XGE_CONFIG_0+(0x10000*port)); 361 + regs_buff[i++] = NXRD32(adapter, 362 + NETXEN_NIU_XGE_CONFIG_1+(0x10000*port)); 363 + 364 + regs_buff[i++] = NXRD32(adapter, CRB_XG_STATE); 365 + regs_buff[i++] = NXRDIO(adapter, 366 + adapter->tx_ring->crb_cmd_consumer); 367 + } 368 + 369 + regs_buff[i++] = NXRDIO(adapter, adapter->tx_ring->crb_cmd_producer); 370 + 371 + regs_buff[i++] = NXRDIO(adapter, 372 + recv_ctx->rds_rings[0].crb_rcv_producer); 373 + regs_buff[i++] = NXRDIO(adapter, 374 + recv_ctx->rds_rings[1].crb_rcv_producer); 375 + 376 + regs_buff[i++] = adapter->max_sds_rings; 377 + 378 + for (ring = 0; ring < adapter->max_sds_rings; ring++) { 379 + sds_ring = &(recv_ctx->sds_rings[ring]); 380 + regs_buff[i++] = NXRDIO(adapter, 381 + sds_ring->crb_sts_consumer); 434 382 } 435 383 } 436 384 437 385 static u32 netxen_nic_test_link(struct net_device *dev) 438 386 { 439 387 struct netxen_adapter *adapter = netdev_priv(dev); 440 - __u32 status; 441 - int val; 388 + u32 val, port; 442 389 443 - /* read which mode */ 444 - if (adapter->ahw.port_type == NETXEN_NIC_GBE) { 445 - if (adapter->phy_read && 446 - adapter->phy_read(adapter, 447 - NETXEN_NIU_GB_MII_MGMT_ADDR_PHY_STATUS, 448 - &status) != 0) 449 - return -EIO; 450 - else { 451 - val = netxen_get_phy_link(status); 452 - return !val; 453 - } 454 - } else if (adapter->ahw.port_type == NETXEN_NIC_XGBE) { 390 + port = adapter->physical_port; 391 + if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { 392 + val = NXRD32(adapter, CRB_XG_STATE_P3); 393 + val = XG_LINK_STATE_P3(adapter->ahw.pci_func, val); 394 + return (val == XG_LINK_UP_P3) ? 0 : 1; 395 + } else { 455 396 val = NXRD32(adapter, CRB_XG_STATE); 397 + val = (val >> port*8) & 0xff; 456 398 return (val == XG_LINK_UP) ? 0 : 1; 457 399 } 458 - return -EIO; 459 400 } 460 401 461 402 static int
+4 -2
drivers/net/netxen/netxen_nic_hw.c
··· 345 345 void 346 346 netxen_pcie_sem_unlock(struct netxen_adapter *adapter, int sem) 347 347 { 348 - int val; 349 - val = NXRD32(adapter, NETXEN_PCIE_REG(PCIE_SEM_UNLOCK(sem))); 348 + NXRD32(adapter, NETXEN_PCIE_REG(PCIE_SEM_UNLOCK(sem))); 350 349 } 351 350 352 351 int netxen_niu_xg_init_port(struct netxen_adapter *adapter, int port) ··· 689 690 LIST_HEAD(del_list); 690 691 struct list_head *head; 691 692 nx_mac_list_t *cur; 693 + 694 + if (adapter->is_up != NETXEN_ADAPTER_UP_MAGIC) 695 + return; 692 696 693 697 list_splice_tail_init(&adapter->mac_list, &del_list); 694 698
+3 -1
drivers/net/netxen/netxen_nic_init.c
··· 184 184 185 185 tx_ring = adapter->tx_ring; 186 186 vfree(tx_ring->cmd_buf_arr); 187 + kfree(tx_ring); 188 + adapter->tx_ring = NULL; 187 189 } 188 190 189 191 int netxen_alloc_sw_resources(struct netxen_adapter *adapter) ··· 787 785 if (NXRD32(adapter, CRB_CMDPEG_STATE) == PHAN_INITIALIZE_FAILED) 788 786 return 1; 789 787 790 - old_count = count = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER); 788 + old_count = NXRD32(adapter, NETXEN_PEG_ALIVE_COUNTER); 791 789 792 790 for (i = 0; i < 10; i++) { 793 791
+3 -7
drivers/net/netxen/netxen_nic_main.c
··· 342 342 if (!(first_boot & 0x4)) { 343 343 first_boot |= 0x4; 344 344 NXWR32(adapter, NETXEN_PCIE_REG(0x4), first_boot); 345 - first_boot = NXRD32(adapter, NETXEN_PCIE_REG(0x4)); 345 + NXRD32(adapter, NETXEN_PCIE_REG(0x4)); 346 346 } 347 347 348 348 /* This is the first boot after power up */ ··· 1952 1952 linkup = (val == XG_LINK_UP_P3); 1953 1953 } else { 1954 1954 val = NXRD32(adapter, CRB_XG_STATE); 1955 - if (adapter->ahw.port_type == NETXEN_NIC_GBE) 1956 - linkup = (val >> port) & 1; 1957 - else { 1958 - val = (val >> port*8) & 0xff; 1959 - linkup = (val == XG_LINK_UP); 1960 - } 1955 + val = (val >> port*8) & 0xff; 1956 + linkup = (val == XG_LINK_UP); 1961 1957 } 1962 1958 1963 1959 netxen_advert_link_change(adapter, linkup);
+1 -1
drivers/net/niu.c
··· 2844 2844 break; 2845 2845 udelay(1); 2846 2846 } 2847 - if (limit < 0) 2847 + if (limit <= 0) 2848 2848 return -ENODEV; 2849 2849 2850 2850 return 0;
-1
drivers/net/pcmcia/nmclan_cs.c
··· 1402 1402 for (i = 0; i < 8; i++) 1403 1403 printk(KERN_CONT " %02X", ladrf[i]); 1404 1404 printk(KERN_CONT "\n"); 1405 - } 1406 1405 #endif 1407 1406 } /* BuildLAF */ 1408 1407
+2 -2
drivers/net/pcmcia/pcnet_cs.c
··· 1741 1741 PCMCIA_MFC_DEVICE_CIS_PROD_ID4(0, "NSC MF LAN/Modem", 0x58fc6056, "cis/DP83903.cis"), 1742 1742 PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0175, 0x0000, "cis/DP83903.cis"), 1743 1743 PCMCIA_DEVICE_CIS_MANF_CARD(0xc00f, 0x0002, "cis/LA-PCM.cis"), 1744 - PCMCIA_DEVICE_CIS_PROD_ID12("KTI", "PE520 PLUS", 0xad180345, 0x9d58d392, "PE520.cis"), 1744 + PCMCIA_DEVICE_CIS_PROD_ID12("KTI", "PE520 PLUS", 0xad180345, 0x9d58d392, "cis/PE520.cis"), 1745 1745 PCMCIA_DEVICE_CIS_PROD_ID12("NDC", "Ethernet", 0x01c43ae1, 0x00b2e941, "cis/NE2K.cis"), 1746 1746 PCMCIA_DEVICE_CIS_PROD_ID12("PMX ", "PE-200", 0x34f3f1c8, 0x10b59f8c, "cis/PE-200.cis"), 1747 1747 PCMCIA_DEVICE_CIS_PROD_ID12("TAMARACK", "Ethernet", 0xcf434fba, 0x00b2e941, "cis/tamarack.cis"), ··· 1754 1754 MODULE_FIRMWARE("cis/PCMLM28.cis"); 1755 1755 MODULE_FIRMWARE("cis/DP83903.cis"); 1756 1756 MODULE_FIRMWARE("cis/LA-PCM.cis"); 1757 - MODULE_FIRMWARE("PE520.cis"); 1757 + MODULE_FIRMWARE("cis/PE520.cis"); 1758 1758 MODULE_FIRMWARE("cis/NE2K.cis"); 1759 1759 MODULE_FIRMWARE("cis/PE-200.cis"); 1760 1760 MODULE_FIRMWARE("cis/tamarack.cis");
+2 -1
drivers/net/pcnet32.c
··· 45 45 #include <linux/crc32.h> 46 46 #include <linux/netdevice.h> 47 47 #include <linux/etherdevice.h> 48 + #include <linux/if_ether.h> 48 49 #include <linux/skbuff.h> 49 50 #include <linux/spinlock.h> 50 51 #include <linux/moduleparam.h> ··· 1766 1765 1767 1766 /* if the ethernet address is not valid, force to 00:00:00:00:00:00 */ 1768 1767 if (!is_valid_ether_addr(dev->perm_addr)) 1769 - memset(dev->dev_addr, 0, sizeof(dev->dev_addr)); 1768 + memset(dev->dev_addr, 0, ETH_ALEN); 1770 1769 1771 1770 if (pcnet32_debug & NETIF_MSG_PROBE) { 1772 1771 printk(" %pM", dev->dev_addr);
+2 -2
drivers/net/phy/broadcom.c
··· 331 331 bool clk125en = true; 332 332 333 333 /* Abort if we are using an untested phy. */ 334 - if (BRCM_PHY_MODEL(phydev) != PHY_ID_BCM57780 || 335 - BRCM_PHY_MODEL(phydev) != PHY_ID_BCM50610 || 334 + if (BRCM_PHY_MODEL(phydev) != PHY_ID_BCM57780 && 335 + BRCM_PHY_MODEL(phydev) != PHY_ID_BCM50610 && 336 336 BRCM_PHY_MODEL(phydev) != PHY_ID_BCM50610M) 337 337 return; 338 338
+65 -9
drivers/net/phy/mdio_bus.c
··· 264 264 (phydev->phy_id & phydrv->phy_id_mask)); 265 265 } 266 266 267 + #ifdef CONFIG_PM 268 + 267 269 static bool mdio_bus_phy_may_suspend(struct phy_device *phydev) 268 270 { 269 271 struct device_driver *drv = phydev->dev.driver; ··· 297 295 return true; 298 296 } 299 297 300 - /* Suspend and resume. Copied from platform_suspend and 301 - * platform_resume 302 - */ 303 - static int mdio_bus_suspend(struct device * dev, pm_message_t state) 298 + static int mdio_bus_suspend(struct device *dev) 304 299 { 305 300 struct phy_driver *phydrv = to_phy_driver(dev->driver); 306 301 struct phy_device *phydev = to_phy_device(dev); 307 302 303 + /* 304 + * We must stop the state machine manually, otherwise it stops out of 305 + * control, possibly with the phydev->lock held. Upon resume, netdev 306 + * may call phy routines that try to grab the same lock, and that may 307 + * lead to a deadlock. 308 + */ 309 + if (phydev->attached_dev) 310 + phy_stop_machine(phydev); 311 + 308 312 if (!mdio_bus_phy_may_suspend(phydev)) 309 313 return 0; 314 + 310 315 return phydrv->suspend(phydev); 311 316 } 312 317 313 - static int mdio_bus_resume(struct device * dev) 318 + static int mdio_bus_resume(struct device *dev) 314 319 { 315 320 struct phy_driver *phydrv = to_phy_driver(dev->driver); 316 321 struct phy_device *phydev = to_phy_device(dev); 322 + int ret; 317 323 318 324 if (!mdio_bus_phy_may_suspend(phydev)) 319 - return 0; 320 - return phydrv->resume(phydev); 325 + goto no_resume; 326 + 327 + ret = phydrv->resume(phydev); 328 + if (ret < 0) 329 + return ret; 330 + 331 + no_resume: 332 + if (phydev->attached_dev) 333 + phy_start_machine(phydev, NULL); 334 + 335 + return 0; 321 336 } 337 + 338 + static int mdio_bus_restore(struct device *dev) 339 + { 340 + struct phy_device *phydev = to_phy_device(dev); 341 + struct net_device *netdev = phydev->attached_dev; 342 + int ret; 343 + 344 + if (!netdev) 345 + return 0; 346 + 347 + ret = phy_init_hw(phydev); 348 + if (ret < 0) 349 + return ret; 350 + 351 + /* The PHY needs to renegotiate. */ 352 + phydev->link = 0; 353 + phydev->state = PHY_UP; 354 + 355 + phy_start_machine(phydev, NULL); 356 + 357 + return 0; 358 + } 359 + 360 + static struct dev_pm_ops mdio_bus_pm_ops = { 361 + .suspend = mdio_bus_suspend, 362 + .resume = mdio_bus_resume, 363 + .freeze = mdio_bus_suspend, 364 + .thaw = mdio_bus_resume, 365 + .restore = mdio_bus_restore, 366 + }; 367 + 368 + #define MDIO_BUS_PM_OPS (&mdio_bus_pm_ops) 369 + 370 + #else 371 + 372 + #define MDIO_BUS_PM_OPS NULL 373 + 374 + #endif /* CONFIG_PM */ 322 375 323 376 struct bus_type mdio_bus_type = { 324 377 .name = "mdio_bus", 325 378 .match = mdio_bus_match, 326 - .suspend = mdio_bus_suspend, 327 - .resume = mdio_bus_resume, 379 + .pm = MDIO_BUS_PM_OPS, 328 380 }; 329 381 EXPORT_SYMBOL(mdio_bus_type); 330 382
+15 -15
drivers/net/phy/phy_device.c
··· 378 378 } 379 379 EXPORT_SYMBOL(phy_disconnect); 380 380 381 + int phy_init_hw(struct phy_device *phydev) 382 + { 383 + int ret; 384 + 385 + if (!phydev->drv || !phydev->drv->config_init) 386 + return 0; 387 + 388 + ret = phy_scan_fixups(phydev); 389 + if (ret < 0) 390 + return ret; 391 + 392 + return phydev->drv->config_init(phydev); 393 + } 394 + 381 395 /** 382 396 * phy_attach_direct - attach a network device to a given PHY device pointer 383 397 * @dev: network device to attach ··· 439 425 /* Do initial configuration here, now that 440 426 * we have certain key parameters 441 427 * (dev_flags and interface) */ 442 - if (phydev->drv->config_init) { 443 - int err; 444 - 445 - err = phy_scan_fixups(phydev); 446 - 447 - if (err < 0) 448 - return err; 449 - 450 - err = phydev->drv->config_init(phydev); 451 - 452 - if (err < 0) 453 - return err; 454 - } 455 - 456 - return 0; 428 + return phy_init_hw(phydev); 457 429 } 458 430 EXPORT_SYMBOL(phy_attach_direct); 459 431
+1 -1
drivers/net/rrunner.c
··· 1293 1293 1294 1294 printk("Error code 0x%x\n", readl(&regs->Fail1)); 1295 1295 1296 - index = (((readl(&regs->EvtPrd) >> 8) & 0xff ) - 1) % EVT_RING_ENTRIES; 1296 + index = (((readl(&regs->EvtPrd) >> 8) & 0xff) - 1) % TX_RING_ENTRIES; 1297 1297 cons = rrpriv->dirty_tx; 1298 1298 printk("TX ring index %i, TX consumer %i\n", 1299 1299 index, cons);
+3 -3
drivers/net/sfc/efx.c
··· 741 741 742 742 EFX_LOG(efx, "create port\n"); 743 743 744 + if (phy_flash_cfg) 745 + efx->phy_mode = PHY_MODE_SPECIAL; 746 + 744 747 /* Connect up MAC/PHY operations table */ 745 748 rc = efx->type->probe_port(efx); 746 749 if (rc) 747 750 goto err; 748 - 749 - if (phy_flash_cfg) 750 - efx->phy_mode = PHY_MODE_SPECIAL; 751 751 752 752 /* Sanity check MAC address */ 753 753 if (is_valid_ether_addr(efx->mac_address)) {
+1
drivers/net/sfc/falcon.c
··· 925 925 926 926 static void falcon_remove_port(struct efx_nic *efx) 927 927 { 928 + efx->phy_op->remove(efx); 928 929 efx_nic_free_buffer(efx, &efx->stats_buffer); 929 930 } 930 931
+22 -16
drivers/net/sfc/falcon_xmac.c
··· 111 111 efx_writeo(efx, &reg, FR_AB_XM_MGT_INT_MASK); 112 112 } 113 113 114 - /* Get status of XAUI link */ 115 - static bool falcon_xaui_link_ok(struct efx_nic *efx) 114 + static bool falcon_xgxs_link_ok(struct efx_nic *efx) 116 115 { 117 116 efx_oword_t reg; 118 117 bool align_done, link_ok = false; 119 118 int sync_status; 120 - 121 - if (LOOPBACK_INTERNAL(efx)) 122 - return true; 123 119 124 120 /* Read link status */ 125 121 efx_reado(efx, &reg, FR_AB_XX_CORE_STAT); ··· 131 135 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_DISPERR, FFE_AB_XX_STAT_ALL_LANES); 132 136 efx_writeo(efx, &reg, FR_AB_XX_CORE_STAT); 133 137 134 - /* If the link is up, then check the phy side of the xaui link */ 135 - if (efx->link_state.up && link_ok) 136 - if (efx->mdio.mmds & (1 << MDIO_MMD_PHYXS)) 137 - link_ok = efx_mdio_phyxgxs_lane_sync(efx); 138 - 139 138 return link_ok; 139 + } 140 + 141 + static bool falcon_xmac_link_ok(struct efx_nic *efx) 142 + { 143 + /* 144 + * Check MAC's XGXS link status except when using XGMII loopback 145 + * which bypasses the XGXS block. 146 + * If possible, check PHY's XGXS link status except when using 147 + * MAC loopback. 148 + */ 149 + return (efx->loopback_mode == LOOPBACK_XGMII || 150 + falcon_xgxs_link_ok(efx)) && 151 + (!(efx->mdio.mmds & (1 << MDIO_MMD_PHYXS)) || 152 + LOOPBACK_INTERNAL(efx) || 153 + efx_mdio_phyxgxs_lane_sync(efx)); 140 154 } 141 155 142 156 void falcon_reconfigure_xmac_core(struct efx_nic *efx) ··· 251 245 252 246 253 247 /* Try to bring up the Falcon side of the Falcon-Phy XAUI link */ 254 - static bool falcon_check_xaui_link_up(struct efx_nic *efx, int tries) 248 + static bool falcon_xmac_link_ok_retry(struct efx_nic *efx, int tries) 255 249 { 256 - bool mac_up = falcon_xaui_link_ok(efx); 250 + bool mac_up = falcon_xmac_link_ok(efx); 257 251 258 252 if (LOOPBACK_MASK(efx) & LOOPBACKS_EXTERNAL(efx) & LOOPBACKS_WS || 259 253 efx_phy_mode_disabled(efx->phy_mode)) ··· 267 261 falcon_reset_xaui(efx); 268 262 udelay(200); 269 263 270 - mac_up = falcon_xaui_link_ok(efx); 264 + mac_up = falcon_xmac_link_ok(efx); 271 265 --tries; 272 266 } 273 267 ··· 278 272 279 273 static bool falcon_xmac_check_fault(struct efx_nic *efx) 280 274 { 281 - return !falcon_check_xaui_link_up(efx, 5); 275 + return !falcon_xmac_link_ok_retry(efx, 5); 282 276 } 283 277 284 278 static int falcon_reconfigure_xmac(struct efx_nic *efx) ··· 290 284 291 285 falcon_reconfigure_mac_wrapper(efx); 292 286 293 - efx->xmac_poll_required = !falcon_check_xaui_link_up(efx, 5); 287 + efx->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 5); 294 288 falcon_mask_status_intr(efx, true); 295 289 296 290 return 0; ··· 363 357 return; 364 358 365 359 falcon_mask_status_intr(efx, false); 366 - efx->xmac_poll_required = !falcon_check_xaui_link_up(efx, 1); 360 + efx->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1); 367 361 falcon_mask_status_intr(efx, true); 368 362 } 369 363
+35 -58
drivers/net/sfc/mcdi_phy.c
··· 304 304 305 305 static int efx_mcdi_phy_probe(struct efx_nic *efx) 306 306 { 307 - struct efx_mcdi_phy_cfg *phy_cfg; 307 + struct efx_mcdi_phy_cfg *phy_data; 308 + u8 outbuf[MC_CMD_GET_LINK_OUT_LEN]; 309 + u32 caps; 308 310 int rc; 309 311 310 - /* TODO: Move phy_data initialisation to 311 - * phy_op->probe/remove, rather than init/fini */ 312 - phy_cfg = kzalloc(sizeof(*phy_cfg), GFP_KERNEL); 313 - if (phy_cfg == NULL) { 314 - rc = -ENOMEM; 315 - goto fail_alloc; 316 - } 317 - rc = efx_mcdi_get_phy_cfg(efx, phy_cfg); 312 + /* Initialise and populate phy_data */ 313 + phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL); 314 + if (phy_data == NULL) 315 + return -ENOMEM; 316 + 317 + rc = efx_mcdi_get_phy_cfg(efx, phy_data); 318 318 if (rc != 0) 319 319 goto fail; 320 320 321 - efx->phy_type = phy_cfg->type; 321 + /* Read initial link advertisement */ 322 + BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0); 323 + rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, 324 + outbuf, sizeof(outbuf), NULL); 325 + if (rc) 326 + goto fail; 322 327 323 - efx->mdio_bus = phy_cfg->channel; 324 - efx->mdio.prtad = phy_cfg->port; 325 - efx->mdio.mmds = phy_cfg->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22); 328 + /* Fill out nic state */ 329 + efx->phy_data = phy_data; 330 + efx->phy_type = phy_data->type; 331 + 332 + efx->mdio_bus = phy_data->channel; 333 + efx->mdio.prtad = phy_data->port; 334 + efx->mdio.mmds = phy_data->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22); 326 335 efx->mdio.mode_support = 0; 327 - if (phy_cfg->mmd_mask & (1 << MC_CMD_MMD_CLAUSE22)) 336 + if (phy_data->mmd_mask & (1 << MC_CMD_MMD_CLAUSE22)) 328 337 efx->mdio.mode_support |= MDIO_SUPPORTS_C22; 329 - if (phy_cfg->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22)) 338 + if (phy_data->mmd_mask & ~(1 << MC_CMD_MMD_CLAUSE22)) 330 339 efx->mdio.mode_support |= MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; 340 + 341 + caps = MCDI_DWORD(outbuf, GET_LINK_OUT_CAP); 342 + if (caps & (1 << MC_CMD_PHY_CAP_AN_LBN)) 343 + efx->link_advertising = 344 + mcdi_to_ethtool_cap(phy_data->media, caps); 345 + else 346 + phy_data->forced_cap = caps; 331 347 332 348 /* Assert that we can map efx -> mcdi loopback modes */ 333 349 BUILD_BUG_ON(LOOPBACK_NONE != MC_CMD_LOOPBACK_NONE); ··· 380 364 /* The MC indicates that LOOPBACK_NONE is a valid loopback mode, 381 365 * but by convention we don't */ 382 366 efx->loopback_modes &= ~(1 << LOOPBACK_NONE); 383 - 384 - kfree(phy_cfg); 385 - 386 - return 0; 387 - 388 - fail: 389 - kfree(phy_cfg); 390 - fail_alloc: 391 - return rc; 392 - } 393 - 394 - static int efx_mcdi_phy_init(struct efx_nic *efx) 395 - { 396 - struct efx_mcdi_phy_cfg *phy_data; 397 - u8 outbuf[MC_CMD_GET_LINK_OUT_LEN]; 398 - u32 caps; 399 - int rc; 400 - 401 - phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL); 402 - if (phy_data == NULL) 403 - return -ENOMEM; 404 - 405 - rc = efx_mcdi_get_phy_cfg(efx, phy_data); 406 - if (rc != 0) 407 - goto fail; 408 - 409 - efx->phy_data = phy_data; 410 - 411 - BUILD_BUG_ON(MC_CMD_GET_LINK_IN_LEN != 0); 412 - rc = efx_mcdi_rpc(efx, MC_CMD_GET_LINK, NULL, 0, 413 - outbuf, sizeof(outbuf), NULL); 414 - if (rc) 415 - goto fail; 416 - 417 - caps = MCDI_DWORD(outbuf, GET_LINK_OUT_CAP); 418 - if (caps & (1 << MC_CMD_PHY_CAP_AN_LBN)) 419 - efx->link_advertising = 420 - mcdi_to_ethtool_cap(phy_data->media, caps); 421 - else 422 - phy_data->forced_cap = caps; 423 367 424 368 return 0; 425 369 ··· 480 504 return !efx_link_state_equal(&efx->link_state, &old_state); 481 505 } 482 506 483 - static void efx_mcdi_phy_fini(struct efx_nic *efx) 507 + static void efx_mcdi_phy_remove(struct efx_nic *efx) 484 508 { 485 509 struct efx_mcdi_phy_data *phy_data = efx->phy_data; 486 510 ··· 562 586 563 587 struct efx_phy_operations efx_mcdi_phy_ops = { 564 588 .probe = efx_mcdi_phy_probe, 565 - .init = efx_mcdi_phy_init, 589 + .init = efx_port_dummy_op_int, 566 590 .reconfigure = efx_mcdi_phy_reconfigure, 567 591 .poll = efx_mcdi_phy_poll, 568 - .fini = efx_mcdi_phy_fini, 592 + .fini = efx_port_dummy_op_void, 593 + .remove = efx_mcdi_phy_remove, 569 594 .get_settings = efx_mcdi_phy_get_settings, 570 595 .set_settings = efx_mcdi_phy_set_settings, 571 596 .run_tests = NULL,
+1
drivers/net/sfc/net_driver.h
··· 524 524 int (*probe) (struct efx_nic *efx); 525 525 int (*init) (struct efx_nic *efx); 526 526 void (*fini) (struct efx_nic *efx); 527 + void (*remove) (struct efx_nic *efx); 527 528 int (*reconfigure) (struct efx_nic *efx); 528 529 bool (*poll) (struct efx_nic *efx); 529 530 void (*get_settings) (struct efx_nic *efx,
+2
drivers/net/sfc/nic.c
··· 1576 1576 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1); 1577 1577 /* Prefetch threshold 2 => fetch when descriptor cache half empty */ 1578 1578 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2); 1579 + /* Disable hardware watchdog which can misfire */ 1580 + EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff); 1579 1581 /* Squash TX of packets of 16 bytes or less */ 1580 1582 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) 1581 1583 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
+225 -13
drivers/net/sfc/qt202x_phy.c
··· 33 33 #define PCS_FW_HEARTBEAT_REG 0xd7ee 34 34 #define PCS_FW_HEARTB_LBN 0 35 35 #define PCS_FW_HEARTB_WIDTH 8 36 + #define PCS_FW_PRODUCT_CODE_1 0xd7f0 37 + #define PCS_FW_VERSION_1 0xd7f3 38 + #define PCS_FW_BUILD_1 0xd7f6 36 39 #define PCS_UC8051_STATUS_REG 0xd7fd 37 40 #define PCS_UC_STATUS_LBN 0 38 41 #define PCS_UC_STATUS_WIDTH 8 ··· 55 52 56 53 struct qt202x_phy_data { 57 54 enum efx_phy_mode phy_mode; 55 + bool bug17190_in_bad_state; 56 + unsigned long bug17190_timer; 57 + u32 firmware_ver; 58 58 }; 59 59 60 60 #define QT2022C2_MAX_RESET_TIME 500 61 61 #define QT2022C2_RESET_WAIT 10 62 62 63 - static int qt2025c_wait_reset(struct efx_nic *efx) 63 + #define QT2025C_MAX_HEARTB_TIME (5 * HZ) 64 + #define QT2025C_HEARTB_WAIT 100 65 + #define QT2025C_MAX_FWSTART_TIME (25 * HZ / 10) 66 + #define QT2025C_FWSTART_WAIT 100 67 + 68 + #define BUG17190_INTERVAL (2 * HZ) 69 + 70 + static int qt2025c_wait_heartbeat(struct efx_nic *efx) 64 71 { 65 - unsigned long timeout = jiffies + 10 * HZ; 72 + unsigned long timeout = jiffies + QT2025C_MAX_HEARTB_TIME; 66 73 int reg, old_counter = 0; 67 74 68 75 /* Wait for firmware heartbeat to start */ ··· 87 74 old_counter = counter; 88 75 else if (counter != old_counter) 89 76 break; 90 - if (time_after(jiffies, timeout)) 77 + if (time_after(jiffies, timeout)) { 78 + /* Some cables have EEPROMs that conflict with the 79 + * PHY's on-board EEPROM so it cannot load firmware */ 80 + EFX_ERR(efx, "If an SFP+ direct attach cable is" 81 + " connected, please check that it complies" 82 + " with the SFP+ specification\n"); 91 83 return -ETIMEDOUT; 92 - msleep(10); 84 + } 85 + msleep(QT2025C_HEARTB_WAIT); 93 86 } 87 + 88 + return 0; 89 + } 90 + 91 + static int qt2025c_wait_fw_status_good(struct efx_nic *efx) 92 + { 93 + unsigned long timeout = jiffies + QT2025C_MAX_FWSTART_TIME; 94 + int reg; 94 95 95 96 /* Wait for firmware status to look good */ 96 97 for (;;) { ··· 117 90 break; 118 91 if (time_after(jiffies, timeout)) 119 92 return -ETIMEDOUT; 93 + msleep(QT2025C_FWSTART_WAIT); 94 + } 95 + 96 + return 0; 97 + } 98 + 99 + static void qt2025c_restart_firmware(struct efx_nic *efx) 100 + { 101 + /* Restart microcontroller execution of firmware from RAM */ 102 + efx_mdio_write(efx, 3, 0xe854, 0x00c0); 103 + efx_mdio_write(efx, 3, 0xe854, 0x0040); 104 + msleep(50); 105 + } 106 + 107 + static int qt2025c_wait_reset(struct efx_nic *efx) 108 + { 109 + int rc; 110 + 111 + rc = qt2025c_wait_heartbeat(efx); 112 + if (rc != 0) 113 + return rc; 114 + 115 + rc = qt2025c_wait_fw_status_good(efx); 116 + if (rc == -ETIMEDOUT) { 117 + /* Bug 17689: occasionally heartbeat starts but firmware status 118 + * code never progresses beyond 0x00. Try again, once, after 119 + * restarting execution of the firmware image. */ 120 + EFX_LOG(efx, "bashing QT2025C microcontroller\n"); 121 + qt2025c_restart_firmware(efx); 122 + rc = qt2025c_wait_heartbeat(efx); 123 + if (rc != 0) 124 + return rc; 125 + rc = qt2025c_wait_fw_status_good(efx); 126 + } 127 + 128 + return rc; 129 + } 130 + 131 + static void qt2025c_firmware_id(struct efx_nic *efx) 132 + { 133 + struct qt202x_phy_data *phy_data = efx->phy_data; 134 + u8 firmware_id[9]; 135 + size_t i; 136 + 137 + for (i = 0; i < sizeof(firmware_id); i++) 138 + firmware_id[i] = efx_mdio_read(efx, MDIO_MMD_PCS, 139 + PCS_FW_PRODUCT_CODE_1 + i); 140 + EFX_INFO(efx, "QT2025C firmware %xr%d v%d.%d.%d.%d [20%02d-%02d-%02d]\n", 141 + (firmware_id[0] << 8) | firmware_id[1], firmware_id[2], 142 + firmware_id[3] >> 4, firmware_id[3] & 0xf, 143 + firmware_id[4], firmware_id[5], 144 + firmware_id[6], firmware_id[7], firmware_id[8]); 145 + phy_data->firmware_ver = ((firmware_id[3] & 0xf0) << 20) | 146 + ((firmware_id[3] & 0x0f) << 16) | 147 + (firmware_id[4] << 8) | firmware_id[5]; 148 + } 149 + 150 + static void qt2025c_bug17190_workaround(struct efx_nic *efx) 151 + { 152 + struct qt202x_phy_data *phy_data = efx->phy_data; 153 + 154 + /* The PHY can get stuck in a state where it reports PHY_XS and PMA/PMD 155 + * layers up, but PCS down (no block_lock). If we notice this state 156 + * persisting for a couple of seconds, we switch PMA/PMD loopback 157 + * briefly on and then off again, which is normally sufficient to 158 + * recover it. 159 + */ 160 + if (efx->link_state.up || 161 + !efx_mdio_links_ok(efx, MDIO_DEVS_PMAPMD | MDIO_DEVS_PHYXS)) { 162 + phy_data->bug17190_in_bad_state = false; 163 + return; 164 + } 165 + 166 + if (!phy_data->bug17190_in_bad_state) { 167 + phy_data->bug17190_in_bad_state = true; 168 + phy_data->bug17190_timer = jiffies + BUG17190_INTERVAL; 169 + return; 170 + } 171 + 172 + if (time_after_eq(jiffies, phy_data->bug17190_timer)) { 173 + EFX_LOG(efx, "bashing QT2025C PMA/PMD\n"); 174 + efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_CTRL1, 175 + MDIO_PMA_CTRL1_LOOPBACK, true); 120 176 msleep(100); 177 + efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_CTRL1, 178 + MDIO_PMA_CTRL1_LOOPBACK, false); 179 + phy_data->bug17190_timer = jiffies + BUG17190_INTERVAL; 180 + } 181 + } 182 + 183 + static int qt2025c_select_phy_mode(struct efx_nic *efx) 184 + { 185 + struct qt202x_phy_data *phy_data = efx->phy_data; 186 + struct falcon_board *board = falcon_board(efx); 187 + int reg, rc, i; 188 + uint16_t phy_op_mode; 189 + 190 + /* Only 2.0.1.0+ PHY firmware supports the more optimal SFP+ 191 + * Self-Configure mode. Don't attempt any switching if we encounter 192 + * older firmware. */ 193 + if (phy_data->firmware_ver < 0x02000100) 194 + return 0; 195 + 196 + /* In general we will get optimal behaviour in "SFP+ Self-Configure" 197 + * mode; however, that powers down most of the PHY when no module is 198 + * present, so we must use a different mode (any fixed mode will do) 199 + * to be sure that loopbacks will work. */ 200 + phy_op_mode = (efx->loopback_mode == LOOPBACK_NONE) ? 0x0038 : 0x0020; 201 + 202 + /* Only change mode if really necessary */ 203 + reg = efx_mdio_read(efx, 1, 0xc319); 204 + if ((reg & 0x0038) == phy_op_mode) 205 + return 0; 206 + EFX_LOG(efx, "Switching PHY to mode 0x%04x\n", phy_op_mode); 207 + 208 + /* This sequence replicates the register writes configured in the boot 209 + * EEPROM (including the differences between board revisions), except 210 + * that the operating mode is changed, and the PHY is prevented from 211 + * unnecessarily reloading the main firmware image again. */ 212 + efx_mdio_write(efx, 1, 0xc300, 0x0000); 213 + /* (Note: this portion of the boot EEPROM sequence, which bit-bashes 9 214 + * STOPs onto the firmware/module I2C bus to reset it, varies across 215 + * board revisions, as the bus is connected to different GPIO/LED 216 + * outputs on the PHY.) */ 217 + if (board->major == 0 && board->minor < 2) { 218 + efx_mdio_write(efx, 1, 0xc303, 0x4498); 219 + for (i = 0; i < 9; i++) { 220 + efx_mdio_write(efx, 1, 0xc303, 0x4488); 221 + efx_mdio_write(efx, 1, 0xc303, 0x4480); 222 + efx_mdio_write(efx, 1, 0xc303, 0x4490); 223 + efx_mdio_write(efx, 1, 0xc303, 0x4498); 224 + } 225 + } else { 226 + efx_mdio_write(efx, 1, 0xc303, 0x0920); 227 + efx_mdio_write(efx, 1, 0xd008, 0x0004); 228 + for (i = 0; i < 9; i++) { 229 + efx_mdio_write(efx, 1, 0xc303, 0x0900); 230 + efx_mdio_write(efx, 1, 0xd008, 0x0005); 231 + efx_mdio_write(efx, 1, 0xc303, 0x0920); 232 + efx_mdio_write(efx, 1, 0xd008, 0x0004); 233 + } 234 + efx_mdio_write(efx, 1, 0xc303, 0x4900); 235 + } 236 + efx_mdio_write(efx, 1, 0xc303, 0x4900); 237 + efx_mdio_write(efx, 1, 0xc302, 0x0004); 238 + efx_mdio_write(efx, 1, 0xc316, 0x0013); 239 + efx_mdio_write(efx, 1, 0xc318, 0x0054); 240 + efx_mdio_write(efx, 1, 0xc319, phy_op_mode); 241 + efx_mdio_write(efx, 1, 0xc31a, 0x0098); 242 + efx_mdio_write(efx, 3, 0x0026, 0x0e00); 243 + efx_mdio_write(efx, 3, 0x0027, 0x0013); 244 + efx_mdio_write(efx, 3, 0x0028, 0xa528); 245 + efx_mdio_write(efx, 1, 0xd006, 0x000a); 246 + efx_mdio_write(efx, 1, 0xd007, 0x0009); 247 + efx_mdio_write(efx, 1, 0xd008, 0x0004); 248 + /* This additional write is not present in the boot EEPROM. It 249 + * prevents the PHY's internal boot ROM doing another pointless (and 250 + * slow) reload of the firmware image (the microcontroller's code 251 + * memory is not affected by the microcontroller reset). */ 252 + efx_mdio_write(efx, 1, 0xc317, 0x00ff); 253 + efx_mdio_write(efx, 1, 0xc300, 0x0002); 254 + msleep(20); 255 + 256 + /* Restart microcontroller execution of firmware from RAM */ 257 + qt2025c_restart_firmware(efx); 258 + 259 + /* Wait for the microcontroller to be ready again */ 260 + rc = qt2025c_wait_reset(efx); 261 + if (rc < 0) { 262 + EFX_ERR(efx, "PHY microcontroller reset during mode switch " 263 + "timed out\n"); 264 + return rc; 121 265 } 122 266 123 267 return 0; ··· 335 137 336 138 static int qt202x_phy_probe(struct efx_nic *efx) 337 139 { 140 + struct qt202x_phy_data *phy_data; 141 + 142 + phy_data = kzalloc(sizeof(struct qt202x_phy_data), GFP_KERNEL); 143 + if (!phy_data) 144 + return -ENOMEM; 145 + efx->phy_data = phy_data; 146 + phy_data->phy_mode = efx->phy_mode; 147 + phy_data->bug17190_in_bad_state = false; 148 + phy_data->bug17190_timer = 0; 149 + 338 150 efx->mdio.mmds = QT202X_REQUIRED_DEVS; 339 151 efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; 340 152 efx->loopback_modes = QT202X_LOOPBACKS | FALCON_XMAC_LOOPBACKS; ··· 353 145 354 146 static int qt202x_phy_init(struct efx_nic *efx) 355 147 { 356 - struct qt202x_phy_data *phy_data; 357 148 u32 devid; 358 149 int rc; 359 150 ··· 362 155 return rc; 363 156 } 364 157 365 - phy_data = kzalloc(sizeof(struct qt202x_phy_data), GFP_KERNEL); 366 - if (!phy_data) 367 - return -ENOMEM; 368 - efx->phy_data = phy_data; 369 - 370 158 devid = efx_mdio_read_id(efx, MDIO_MMD_PHYXS); 371 159 EFX_INFO(efx, "PHY ID reg %x (OUI %06x model %02x revision %x)\n", 372 160 devid, efx_mdio_id_oui(devid), efx_mdio_id_model(devid), 373 161 efx_mdio_id_rev(devid)); 374 162 375 - phy_data->phy_mode = efx->phy_mode; 163 + if (efx->phy_type == PHY_TYPE_QT2025C) 164 + qt2025c_firmware_id(efx); 165 + 376 166 return 0; 377 167 } 378 168 ··· 387 183 efx->link_state.fd = true; 388 184 efx->link_state.fc = efx->wanted_fc; 389 185 186 + if (efx->phy_type == PHY_TYPE_QT2025C) 187 + qt2025c_bug17190_workaround(efx); 188 + 390 189 return efx->link_state.up != was_up; 391 190 } 392 191 ··· 398 191 struct qt202x_phy_data *phy_data = efx->phy_data; 399 192 400 193 if (efx->phy_type == PHY_TYPE_QT2025C) { 194 + int rc = qt2025c_select_phy_mode(efx); 195 + if (rc) 196 + return rc; 197 + 401 198 /* There are several different register bits which can 402 199 * disable TX (and save power) on direct-attach cables 403 200 * or optical transceivers, varying somewhat between ··· 435 224 mdio45_ethtool_gset(&efx->mdio, ecmd); 436 225 } 437 226 438 - static void qt202x_phy_fini(struct efx_nic *efx) 227 + static void qt202x_phy_remove(struct efx_nic *efx) 439 228 { 440 229 /* Free the context block */ 441 230 kfree(efx->phy_data); ··· 447 236 .init = qt202x_phy_init, 448 237 .reconfigure = qt202x_phy_reconfigure, 449 238 .poll = qt202x_phy_poll, 450 - .fini = qt202x_phy_fini, 239 + .fini = efx_port_dummy_op_void, 240 + .remove = qt202x_phy_remove, 451 241 .get_settings = qt202x_phy_get_settings, 452 242 .set_settings = efx_mdio_set_settings, 453 243 };
+1
drivers/net/sfc/siena.c
··· 133 133 134 134 void siena_remove_port(struct efx_nic *efx) 135 135 { 136 + efx->phy_op->remove(efx); 136 137 efx_nic_free_buffer(efx, &efx->stats_buffer); 137 138 } 138 139
+78 -62
drivers/net/sfc/tenxpress.c
··· 202 202 int rc; 203 203 204 204 rtnl_lock(); 205 - efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_PMA_10GBT_TXPWR, 206 - MDIO_PMA_10GBT_TXPWR_SHORT, 207 - count != 0 && *buf != '0'); 208 - rc = efx_reconfigure_port(efx); 205 + if (efx->state != STATE_RUNNING) { 206 + rc = -EBUSY; 207 + } else { 208 + efx_mdio_set_flag(efx, MDIO_MMD_PMAPMD, MDIO_PMA_10GBT_TXPWR, 209 + MDIO_PMA_10GBT_TXPWR_SHORT, 210 + count != 0 && *buf != '0'); 211 + rc = efx_reconfigure_port(efx); 212 + } 209 213 rtnl_unlock(); 210 214 211 215 return rc < 0 ? rc : (ssize_t)count; ··· 302 298 return 0; 303 299 } 304 300 305 - static int sfx7101_phy_probe(struct efx_nic *efx) 306 - { 307 - efx->mdio.mmds = TENXPRESS_REQUIRED_DEVS; 308 - efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; 309 - efx->loopback_modes = SFX7101_LOOPBACKS | FALCON_XMAC_LOOPBACKS; 310 - return 0; 311 - } 312 - 313 - static int sft9001_phy_probe(struct efx_nic *efx) 314 - { 315 - efx->mdio.mmds = TENXPRESS_REQUIRED_DEVS; 316 - efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; 317 - efx->loopback_modes = (SFT9001_LOOPBACKS | FALCON_XMAC_LOOPBACKS | 318 - FALCON_GMAC_LOOPBACKS); 319 - return 0; 320 - } 321 - 322 - static int tenxpress_phy_init(struct efx_nic *efx) 301 + static int tenxpress_phy_probe(struct efx_nic *efx) 323 302 { 324 303 struct tenxpress_phy_data *phy_data; 325 - int rc = 0; 304 + int rc; 326 305 327 - falcon_board(efx)->type->init_phy(efx); 328 - 306 + /* Allocate phy private storage */ 329 307 phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL); 330 308 if (!phy_data) 331 309 return -ENOMEM; 332 310 efx->phy_data = phy_data; 333 311 phy_data->phy_mode = efx->phy_mode; 312 + 313 + /* Create any special files */ 314 + if (efx->phy_type == PHY_TYPE_SFT9001B) { 315 + rc = device_create_file(&efx->pci_dev->dev, 316 + &dev_attr_phy_short_reach); 317 + if (rc) 318 + goto fail; 319 + } 320 + 321 + if (efx->phy_type == PHY_TYPE_SFX7101) { 322 + efx->mdio.mmds = TENXPRESS_REQUIRED_DEVS; 323 + efx->mdio.mode_support = MDIO_SUPPORTS_C45; 324 + 325 + efx->loopback_modes = SFX7101_LOOPBACKS | FALCON_XMAC_LOOPBACKS; 326 + 327 + efx->link_advertising = (ADVERTISED_TP | ADVERTISED_Autoneg | 328 + ADVERTISED_10000baseT_Full); 329 + } else { 330 + efx->mdio.mmds = TENXPRESS_REQUIRED_DEVS; 331 + efx->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; 332 + 333 + efx->loopback_modes = (SFT9001_LOOPBACKS | 334 + FALCON_XMAC_LOOPBACKS | 335 + FALCON_GMAC_LOOPBACKS); 336 + 337 + efx->link_advertising = (ADVERTISED_TP | ADVERTISED_Autoneg | 338 + ADVERTISED_10000baseT_Full | 339 + ADVERTISED_1000baseT_Full | 340 + ADVERTISED_100baseT_Full); 341 + } 342 + 343 + return 0; 344 + 345 + fail: 346 + kfree(efx->phy_data); 347 + efx->phy_data = NULL; 348 + return rc; 349 + } 350 + 351 + static int tenxpress_phy_init(struct efx_nic *efx) 352 + { 353 + int rc; 354 + 355 + falcon_board(efx)->type->init_phy(efx); 334 356 335 357 if (!(efx->phy_mode & PHY_MODE_SPECIAL)) { 336 358 if (efx->phy_type == PHY_TYPE_SFT9001A) { ··· 371 341 372 342 rc = efx_mdio_wait_reset_mmds(efx, TENXPRESS_REQUIRED_DEVS); 373 343 if (rc < 0) 374 - goto fail; 344 + return rc; 375 345 376 346 rc = efx_mdio_check_mmds(efx, TENXPRESS_REQUIRED_DEVS, 0); 377 347 if (rc < 0) 378 - goto fail; 348 + return rc; 379 349 } 380 350 381 351 rc = tenxpress_init(efx); 382 352 if (rc < 0) 383 - goto fail; 353 + return rc; 384 354 385 - /* Initialise advertising flags */ 386 - efx->link_advertising = (ADVERTISED_TP | ADVERTISED_Autoneg | 387 - ADVERTISED_10000baseT_Full); 388 - if (efx->phy_type != PHY_TYPE_SFX7101) 389 - efx->link_advertising |= (ADVERTISED_1000baseT_Full | 390 - ADVERTISED_100baseT_Full); 355 + /* Reinitialise flow control settings */ 391 356 efx_link_set_wanted_fc(efx, efx->wanted_fc); 392 357 efx_mdio_an_reconfigure(efx); 393 - 394 - if (efx->phy_type == PHY_TYPE_SFT9001B) { 395 - rc = device_create_file(&efx->pci_dev->dev, 396 - &dev_attr_phy_short_reach); 397 - if (rc) 398 - goto fail; 399 - } 400 358 401 359 schedule_timeout_uninterruptible(HZ / 5); /* 200ms */ 402 360 ··· 392 374 falcon_reset_xaui(efx); 393 375 394 376 return 0; 395 - 396 - fail: 397 - kfree(efx->phy_data); 398 - efx->phy_data = NULL; 399 - return rc; 400 377 } 401 378 402 379 /* Perform a "special software reset" on the PHY. The caller is ··· 602 589 return !efx_link_state_equal(&efx->link_state, &old_state); 603 590 } 604 591 605 - static void tenxpress_phy_fini(struct efx_nic *efx) 592 + static void sfx7101_phy_fini(struct efx_nic *efx) 606 593 { 607 594 int reg; 608 595 596 + /* Power down the LNPGA */ 597 + reg = (1 << PMA_PMD_LNPGA_POWERDOWN_LBN); 598 + efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG, reg); 599 + 600 + /* Waiting here ensures that the board fini, which can turn 601 + * off the power to the PHY, won't get run until the LNPGA 602 + * powerdown has been given long enough to complete. */ 603 + schedule_timeout_uninterruptible(LNPGA_PDOWN_WAIT); /* 200 ms */ 604 + } 605 + 606 + static void tenxpress_phy_remove(struct efx_nic *efx) 607 + { 609 608 if (efx->phy_type == PHY_TYPE_SFT9001B) 610 609 device_remove_file(&efx->pci_dev->dev, 611 610 &dev_attr_phy_short_reach); 612 - 613 - if (efx->phy_type == PHY_TYPE_SFX7101) { 614 - /* Power down the LNPGA */ 615 - reg = (1 << PMA_PMD_LNPGA_POWERDOWN_LBN); 616 - efx_mdio_write(efx, MDIO_MMD_PMAPMD, PMA_PMD_XCONTROL_REG, reg); 617 - 618 - /* Waiting here ensures that the board fini, which can turn 619 - * off the power to the PHY, won't get run until the LNPGA 620 - * powerdown has been given long enough to complete. */ 621 - schedule_timeout_uninterruptible(LNPGA_PDOWN_WAIT); /* 200 ms */ 622 - } 623 611 624 612 kfree(efx->phy_data); 625 613 efx->phy_data = NULL; ··· 833 819 } 834 820 835 821 struct efx_phy_operations falcon_sfx7101_phy_ops = { 836 - .probe = sfx7101_phy_probe, 822 + .probe = tenxpress_phy_probe, 837 823 .init = tenxpress_phy_init, 838 824 .reconfigure = tenxpress_phy_reconfigure, 839 825 .poll = tenxpress_phy_poll, 840 - .fini = tenxpress_phy_fini, 826 + .fini = sfx7101_phy_fini, 827 + .remove = tenxpress_phy_remove, 841 828 .get_settings = tenxpress_get_settings, 842 829 .set_settings = tenxpress_set_settings, 843 830 .set_npage_adv = sfx7101_set_npage_adv, ··· 847 832 }; 848 833 849 834 struct efx_phy_operations falcon_sft9001_phy_ops = { 850 - .probe = sft9001_phy_probe, 835 + .probe = tenxpress_phy_probe, 851 836 .init = tenxpress_phy_init, 852 837 .reconfigure = tenxpress_phy_reconfigure, 853 838 .poll = tenxpress_phy_poll, 854 - .fini = tenxpress_phy_fini, 839 + .fini = efx_port_dummy_op_void, 840 + .remove = tenxpress_phy_remove, 855 841 .get_settings = tenxpress_get_settings, 856 842 .set_settings = tenxpress_set_settings, 857 843 .set_npage_adv = sft9001_set_npage_adv,
+2 -2
drivers/net/sfc/tx.c
··· 821 821 EFX_TXQ_MASK]; 822 822 efx_tsoh_free(tx_queue, buffer); 823 823 EFX_BUG_ON_PARANOID(buffer->skb); 824 - buffer->len = 0; 825 - buffer->continuation = true; 826 824 if (buffer->unmap_len) { 827 825 unmap_addr = (buffer->dma_addr + buffer->len - 828 826 buffer->unmap_len); ··· 834 836 PCI_DMA_TODEVICE); 835 837 buffer->unmap_len = 0; 836 838 } 839 + buffer->len = 0; 840 + buffer->continuation = true; 837 841 } 838 842 } 839 843
+1 -1
drivers/net/sh_eth.c
··· 110 110 mdelay(1); 111 111 cnt--; 112 112 } 113 - if (cnt < 0) 113 + if (cnt == 0) 114 114 printk(KERN_ERR "Device reset fail\n"); 115 115 116 116 /* Table Init */
+4
drivers/net/tulip/Kconfig
··· 101 101 102 102 If in doubt, say Y. 103 103 104 + config TULIP_DM910X 105 + def_bool y 106 + depends on TULIP && SPARC 107 + 104 108 config DE4X5 105 109 tristate "Generic DECchip & DIGITAL EtherWORKS PCI/EISA" 106 110 depends on PCI || EISA
+21
drivers/net/tulip/dmfe.c
··· 92 92 #include <asm/uaccess.h> 93 93 #include <asm/irq.h> 94 94 95 + #ifdef CONFIG_TULIP_DM910X 96 + #include <linux/of.h> 97 + #endif 98 + 95 99 96 100 /* Board/System/Debug information/definition ---------------- */ 97 101 #define PCI_DM9132_ID 0x91321282 /* Davicom DM9132 ID */ ··· 380 376 381 377 if (!printed_version++) 382 378 printk(version); 379 + 380 + /* 381 + * SPARC on-board DM910x chips should be handled by the main 382 + * tulip driver, except for early DM9100s. 383 + */ 384 + #ifdef CONFIG_TULIP_DM910X 385 + if ((ent->driver_data == PCI_DM9100_ID && pdev->revision >= 0x30) || 386 + ent->driver_data == PCI_DM9102_ID) { 387 + struct device_node *dp = pci_device_to_OF_node(pdev); 388 + 389 + if (dp && of_get_property(dp, "local-mac-address", NULL)) { 390 + printk(KERN_INFO DRV_NAME 391 + ": skipping on-board DM910x (use tulip)\n"); 392 + return -ENODEV; 393 + } 394 + } 395 + #endif 383 396 384 397 /* Init network device */ 385 398 dev = alloc_etherdev(sizeof(*db));
+25 -7
drivers/net/tulip/tulip_core.c
··· 196 196 | HAS_NWAY | HAS_PCI_MWI, tulip_timer, tulip_media_task }, 197 197 198 198 /* DM910X */ 199 + #ifdef CONFIG_TULIP_DM910X 199 200 { "Davicom DM9102/DM9102A", 128, 0x0001ebef, 200 201 HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_ACPI, 201 202 tulip_timer, tulip_media_task }, 203 + #else 204 + { NULL }, 205 + #endif 202 206 203 207 /* RS7112 */ 204 208 { "Conexant LANfinity", 256, 0x0001ebef, ··· 232 228 { 0x1259, 0xa120, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, 233 229 { 0x11F6, 0x9881, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMPEX9881 }, 234 230 { 0x8086, 0x0039, PCI_ANY_ID, PCI_ANY_ID, 0, 0, I21145 }, 231 + #ifdef CONFIG_TULIP_DM910X 235 232 { 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X }, 236 233 { 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X }, 234 + #endif 237 235 { 0x1113, 0x1216, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, 238 236 { 0x1113, 0x1217, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 }, 239 237 { 0x1113, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET }, ··· 1305 1299 } 1306 1300 1307 1301 /* 1308 - * Early DM9100's need software CRC and the DMFE driver 1302 + * DM910x chips should be handled by the dmfe driver, except 1303 + * on-board chips on SPARC systems. Also, early DM9100s need 1304 + * software CRC which only the dmfe driver supports. 1309 1305 */ 1310 1306 1311 - if (pdev->vendor == 0x1282 && pdev->device == 0x9100) 1312 - { 1313 - /* Read Chip revision */ 1314 - if (pdev->revision < 0x30) 1315 - { 1316 - printk(KERN_ERR PFX "skipping early DM9100 with Crc bug (use dmfe)\n"); 1307 + #ifdef CONFIG_TULIP_DM910X 1308 + if (chip_idx == DM910X) { 1309 + struct device_node *dp; 1310 + 1311 + if (pdev->vendor == 0x1282 && pdev->device == 0x9100 && 1312 + pdev->revision < 0x30) { 1313 + printk(KERN_INFO PFX 1314 + "skipping early DM9100 with Crc bug (use dmfe)\n"); 1315 + return -ENODEV; 1316 + } 1317 + 1318 + dp = pci_device_to_OF_node(pdev); 1319 + if (!(dp && of_get_property(dp, "local-mac-address", NULL))) { 1320 + printk(KERN_INFO PFX 1321 + "skipping DM910x expansion card (use dmfe)\n"); 1317 1322 return -ENODEV; 1318 1323 } 1319 1324 } 1325 + #endif 1320 1326 1321 1327 /* 1322 1328 * Looks for early PCI chipsets where people report hangs
+3 -3
drivers/net/tun.c
··· 849 849 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) 850 850 wake_up_interruptible_sync(sk->sk_sleep); 851 851 852 - tun = container_of(sk, struct tun_sock, sk)->tun; 852 + tun = tun_sk(sk)->tun; 853 853 kill_fasync(&tun->fasync, SIGIO, POLL_OUT); 854 854 } 855 855 856 856 static void tun_sock_destruct(struct sock *sk) 857 857 { 858 - free_netdev(container_of(sk, struct tun_sock, sk)->tun->dev); 858 + free_netdev(tun_sk(sk)->tun->dev); 859 859 } 860 860 861 861 static struct proto tun_proto = { ··· 990 990 sk->sk_write_space = tun_sock_write_space; 991 991 sk->sk_sndbuf = INT_MAX; 992 992 993 - container_of(sk, struct tun_sock, sk)->tun = tun; 993 + tun_sk(sk)->tun = tun; 994 994 995 995 security_tun_dev_post_create(sk); 996 996
+26 -19
drivers/net/ucc_geth.c
··· 1563 1563 1564 1564 static void ugeth_quiesce(struct ucc_geth_private *ugeth) 1565 1565 { 1566 - /* Wait for and prevent any further xmits. */ 1566 + /* Prevent any further xmits, plus detach the device. */ 1567 + netif_device_detach(ugeth->ndev); 1568 + 1569 + /* Wait for any current xmits to finish. */ 1567 1570 netif_tx_disable(ugeth->ndev); 1568 1571 1569 1572 /* Disable the interrupt to avoid NAPI rescheduling. */ ··· 1580 1577 { 1581 1578 napi_enable(&ugeth->napi); 1582 1579 enable_irq(ugeth->ug_info->uf_info.irq); 1583 - netif_tx_wake_all_queues(ugeth->ndev); 1580 + netif_device_attach(ugeth->ndev); 1584 1581 } 1585 1582 1586 1583 /* Called every time the controller might need to be made ··· 1651 1648 ugeth->oldspeed = phydev->speed; 1652 1649 } 1653 1650 1654 - /* 1655 - * To change the MAC configuration we need to disable the 1656 - * controller. To do so, we have to either grab ugeth->lock, 1657 - * which is a bad idea since 'graceful stop' commands might 1658 - * take quite a while, or we can quiesce driver's activity. 1659 - */ 1660 - ugeth_quiesce(ugeth); 1661 - ugeth_disable(ugeth, COMM_DIR_RX_AND_TX); 1662 - 1663 - out_be32(&ug_regs->maccfg2, tempval); 1664 - out_be32(&uf_regs->upsmr, upsmr); 1665 - 1666 - ugeth_enable(ugeth, COMM_DIR_RX_AND_TX); 1667 - ugeth_activate(ugeth); 1668 - 1669 1651 if (!ugeth->oldlink) { 1670 1652 new_state = 1; 1671 1653 ugeth->oldlink = 1; 1654 + } 1655 + 1656 + if (new_state) { 1657 + /* 1658 + * To change the MAC configuration we need to disable 1659 + * the controller. To do so, we have to either grab 1660 + * ugeth->lock, which is a bad idea since 'graceful 1661 + * stop' commands might take quite a while, or we can 1662 + * quiesce driver's activity. 1663 + */ 1664 + ugeth_quiesce(ugeth); 1665 + ugeth_disable(ugeth, COMM_DIR_RX_AND_TX); 1666 + 1667 + out_be32(&ug_regs->maccfg2, tempval); 1668 + out_be32(&uf_regs->upsmr, upsmr); 1669 + 1670 + ugeth_enable(ugeth, COMM_DIR_RX_AND_TX); 1671 + ugeth_activate(ugeth); 1672 1672 } 1673 1673 } else if (ugeth->oldlink) { 1674 1674 new_state = 1; ··· 3279 3273 /* Handle the transmitted buffer and release */ 3280 3274 /* the BD to be used with the current frame */ 3281 3275 3282 - if ((bd == ugeth->txBd[txQ]) && (netif_queue_stopped(dev) == 0)) 3276 + if (bd == ugeth->txBd[txQ]) /* queue empty? */ 3283 3277 break; 3284 3278 3285 3279 dev->stats.tx_packets++; ··· 3607 3601 if (!netif_running(ndev)) 3608 3602 return 0; 3609 3603 3604 + netif_device_detach(ndev); 3610 3605 napi_disable(&ugeth->napi); 3611 3606 3612 3607 /* ··· 3666 3659 phy_start(ugeth->phydev); 3667 3660 3668 3661 napi_enable(&ugeth->napi); 3669 - netif_start_queue(ndev); 3662 + netif_device_attach(ndev); 3670 3663 3671 3664 return 0; 3672 3665 }
+7 -6
drivers/net/ucc_geth.h
··· 838 838 using the maximum is 839 839 easier */ 840 840 #define UCC_GETH_SEND_QUEUE_QUEUE_DESCRIPTOR_ALIGNMENT 32 841 - #define UCC_GETH_SCHEDULER_ALIGNMENT 4 /* This is a guess */ 841 + #define UCC_GETH_SCHEDULER_ALIGNMENT 8 /* This is a guess */ 842 842 #define UCC_GETH_TX_STATISTICS_ALIGNMENT 4 /* This is a guess */ 843 843 #define UCC_GETH_RX_STATISTICS_ALIGNMENT 4 /* This is a guess */ 844 844 #define UCC_GETH_RX_INTERRUPT_COALESCING_ALIGNMENT 64 845 845 #define UCC_GETH_RX_BD_QUEUES_ALIGNMENT 8 /* This is a guess */ 846 846 #define UCC_GETH_RX_PREFETCHED_BDS_ALIGNMENT 128 /* This is a guess */ 847 - #define UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT 4 /* This 847 + #define UCC_GETH_RX_EXTENDED_FILTERING_GLOBAL_PARAMETERS_ALIGNMENT 8 /* This 848 848 is a 849 849 guess 850 850 */ ··· 899 899 #define UCC_GETH_UTFS_INIT 512 /* Tx virtual FIFO size 900 900 */ 901 901 #define UCC_GETH_UTFET_INIT 256 /* 1/2 utfs */ 902 - #define UCC_GETH_UTFTT_INIT 128 902 + #define UCC_GETH_UTFTT_INIT 512 903 903 /* Gigabit Ethernet (1000 Mbps) */ 904 904 #define UCC_GETH_URFS_GIGA_INIT 4096/*2048*/ /* Rx virtual 905 905 FIFO size */ 906 906 #define UCC_GETH_URFET_GIGA_INIT 2048/*1024*/ /* 1/2 urfs */ 907 907 #define UCC_GETH_URFSET_GIGA_INIT 3072/*1536*/ /* 3/4 urfs */ 908 - #define UCC_GETH_UTFS_GIGA_INIT 8192/*2048*/ /* Tx virtual 908 + #define UCC_GETH_UTFS_GIGA_INIT 4096/*2048*/ /* Tx virtual 909 909 FIFO size */ 910 - #define UCC_GETH_UTFET_GIGA_INIT 4096/*1024*/ /* 1/2 utfs */ 911 - #define UCC_GETH_UTFTT_GIGA_INIT 0x400/*0x40*/ /* */ 910 + #define UCC_GETH_UTFET_GIGA_INIT 2048/*1024*/ /* 1/2 utfs */ 911 + #define UCC_GETH_UTFTT_GIGA_INIT 4096/*0x40*/ /* Tx virtual 912 + FIFO size */ 912 913 913 914 #define UCC_GETH_REMODER_INIT 0 /* bits that must be 914 915 set */
+77 -28
drivers/net/usb/hso.c
··· 286 286 u8 usb_gone; 287 287 struct work_struct async_get_intf; 288 288 struct work_struct async_put_intf; 289 + struct work_struct reset_device; 289 290 290 291 struct usb_device *usb; 291 292 struct usb_interface *interface; ··· 333 332 /* Helper functions */ 334 333 static int hso_mux_submit_intr_urb(struct hso_shared_int *mux_int, 335 334 struct usb_device *usb, gfp_t gfp); 336 - static void log_usb_status(int status, const char *function); 335 + static void handle_usb_error(int status, const char *function, 336 + struct hso_device *hso_dev); 337 337 static struct usb_endpoint_descriptor *hso_get_ep(struct usb_interface *intf, 338 338 int type, int dir); 339 339 static int hso_get_mux_ports(struct usb_interface *intf, unsigned char *ports); ··· 352 350 static int hso_put_activity(struct hso_device *hso_dev); 353 351 static int hso_get_activity(struct hso_device *hso_dev); 354 352 static void tiocmget_intr_callback(struct urb *urb); 353 + static void reset_device(struct work_struct *data); 355 354 /*****************************************************************************/ 356 355 /* Helping functions */ 357 356 /*****************************************************************************/ ··· 464 461 {USB_DEVICE(0x0af0, 0x7501)}, /* GTM 382 */ 465 462 {USB_DEVICE(0x0af0, 0x7601)}, /* GE40x */ 466 463 {USB_DEVICE(0x0af0, 0x7701)}, 464 + {USB_DEVICE(0x0af0, 0x7706)}, 467 465 {USB_DEVICE(0x0af0, 0x7801)}, 468 466 {USB_DEVICE(0x0af0, 0x7901)}, 467 + {USB_DEVICE(0x0af0, 0x7A01)}, 468 + {USB_DEVICE(0x0af0, 0x7A05)}, 469 469 {USB_DEVICE(0x0af0, 0x8200)}, 470 470 {USB_DEVICE(0x0af0, 0x8201)}, 471 + {USB_DEVICE(0x0af0, 0x8300)}, 472 + {USB_DEVICE(0x0af0, 0x8302)}, 473 + {USB_DEVICE(0x0af0, 0x8304)}, 474 + {USB_DEVICE(0x0af0, 0x8400)}, 471 475 {USB_DEVICE(0x0af0, 0xd035)}, 472 476 {USB_DEVICE(0x0af0, 0xd055)}, 473 477 {USB_DEVICE(0x0af0, 0xd155)}, ··· 483 473 {USB_DEVICE(0x0af0, 0xd157)}, 484 474 {USB_DEVICE(0x0af0, 0xd257)}, 485 475 {USB_DEVICE(0x0af0, 0xd357)}, 476 + {USB_DEVICE(0x0af0, 0xd058)}, 477 + {USB_DEVICE(0x0af0, 0xc100)}, 486 478 {} 487 479 }; 488 480 MODULE_DEVICE_TABLE(usb, hso_ids); ··· 667 655 spin_unlock_irqrestore(&serial_table_lock, flags); 668 656 } 669 657 670 - /* log a meaningful explanation of an USB status */ 671 - static void log_usb_status(int status, const char *function) 658 + static void handle_usb_error(int status, const char *function, 659 + struct hso_device *hso_dev) 672 660 { 673 661 char *explanation; 674 662 ··· 697 685 case -EMSGSIZE: 698 686 explanation = "internal error"; 699 687 break; 688 + case -EILSEQ: 689 + case -EPROTO: 690 + case -ETIME: 691 + case -ETIMEDOUT: 692 + explanation = "protocol error"; 693 + if (hso_dev) 694 + schedule_work(&hso_dev->reset_device); 695 + break; 700 696 default: 701 697 explanation = "unknown status"; 702 698 break; 703 699 } 700 + 701 + /* log a meaningful explanation of an USB status */ 704 702 D1("%s: received USB status - %s (%d)", function, explanation, status); 705 703 } 706 704 ··· 784 762 /* log status, but don't act on it, we don't need to resubmit anything 785 763 * anyhow */ 786 764 if (status) 787 - log_usb_status(status, __func__); 765 + handle_usb_error(status, __func__, odev->parent); 788 766 789 767 hso_put_activity(odev->parent); 790 768 ··· 828 806 result = usb_submit_urb(odev->mux_bulk_tx_urb, GFP_ATOMIC); 829 807 if (result) { 830 808 dev_warn(&odev->parent->interface->dev, 831 - "failed mux_bulk_tx_urb %d", result); 809 + "failed mux_bulk_tx_urb %d\n", result); 832 810 net->stats.tx_errors++; 833 811 netif_start_queue(net); 834 812 } else { ··· 1020 998 1021 999 /* is al ok? (Filip: Who's Al ?) */ 1022 1000 if (status) { 1023 - log_usb_status(status, __func__); 1001 + handle_usb_error(status, __func__, odev->parent); 1024 1002 return; 1025 1003 } 1026 1004 ··· 1041 1019 if (odev->parent->port_spec & HSO_INFO_CRC_BUG) { 1042 1020 u32 rest; 1043 1021 u8 crc_check[4] = { 0xDE, 0xAD, 0xBE, 0xEF }; 1044 - rest = urb->actual_length % odev->in_endp->wMaxPacketSize; 1022 + rest = urb->actual_length % 1023 + le16_to_cpu(odev->in_endp->wMaxPacketSize); 1045 1024 if (((rest == 5) || (rest == 6)) && 1046 1025 !memcmp(((u8 *) urb->transfer_buffer) + 1047 1026 urb->actual_length - 4, crc_check, 4)) { ··· 1076 1053 result = usb_submit_urb(urb, GFP_ATOMIC); 1077 1054 if (result) 1078 1055 dev_warn(&odev->parent->interface->dev, 1079 - "%s failed submit mux_bulk_rx_urb %d", __func__, 1056 + "%s failed submit mux_bulk_rx_urb %d\n", __func__, 1080 1057 result); 1081 1058 } 1082 1059 ··· 1230 1207 D1("serial == NULL"); 1231 1208 return; 1232 1209 } else if (status) { 1233 - log_usb_status(status, __func__); 1210 + handle_usb_error(status, __func__, serial->parent); 1234 1211 return; 1235 1212 } 1236 1213 ··· 1248 1225 u8 crc_check[4] = { 0xDE, 0xAD, 0xBE, 0xEF }; 1249 1226 rest = 1250 1227 urb->actual_length % 1251 - serial->in_endp->wMaxPacketSize; 1228 + le16_to_cpu(serial->in_endp->wMaxPacketSize); 1252 1229 if (((rest == 5) || (rest == 6)) && 1253 1230 !memcmp(((u8 *) urb->transfer_buffer) + 1254 1231 urb->actual_length - 4, crc_check, 4)) { ··· 1536 1513 if (!serial) 1537 1514 return; 1538 1515 if (status) { 1539 - log_usb_status(status, __func__); 1516 + handle_usb_error(status, __func__, serial->parent); 1540 1517 return; 1541 1518 } 1542 1519 tiocmget = serial->tiocmget; ··· 1723 1700 D1("no tty structures"); 1724 1701 return -EINVAL; 1725 1702 } 1703 + 1704 + if ((serial->parent->port_spec & HSO_PORT_MASK) != HSO_PORT_MODEM) 1705 + return -EINVAL; 1706 + 1726 1707 if_num = serial->parent->interface->altsetting->desc.bInterfaceNumber; 1727 1708 1728 1709 spin_lock_irqsave(&serial->serial_lock, flags); ··· 1865 1838 result = usb_submit_urb(ctrl_urb, GFP_ATOMIC); 1866 1839 if (result) { 1867 1840 dev_err(&ctrl_urb->dev->dev, 1868 - "%s failed submit ctrl_urb %d type %d", __func__, 1841 + "%s failed submit ctrl_urb %d type %d\n", __func__, 1869 1842 result, type); 1870 1843 return result; 1871 1844 } ··· 1915 1888 1916 1889 /* status check */ 1917 1890 if (status) { 1918 - log_usb_status(status, __func__); 1891 + handle_usb_error(status, __func__, NULL); 1919 1892 return; 1920 1893 } 1921 1894 D4("\n--- Got intr callback 0x%02X ---", status); ··· 1932 1905 if (serial != NULL) { 1933 1906 D1("Pending read interrupt on port %d\n", i); 1934 1907 spin_lock(&serial->serial_lock); 1935 - if (serial->rx_state == RX_IDLE) { 1908 + if (serial->rx_state == RX_IDLE && 1909 + serial->open_count > 0) { 1936 1910 /* Setup and send a ctrl req read on 1937 1911 * port i */ 1938 - if (!serial->rx_urb_filled[0]) { 1912 + if (!serial->rx_urb_filled[0]) { 1939 1913 serial->rx_state = RX_SENT; 1940 1914 hso_mux_serial_read(serial); 1941 1915 } else 1942 1916 serial->rx_state = RX_PENDING; 1943 - 1944 1917 } else { 1945 - D1("Already pending a read on " 1946 - "port %d\n", i); 1918 + D1("Already a read pending on " 1919 + "port %d or port not open\n", i); 1947 1920 } 1948 1921 spin_unlock(&serial->serial_lock); 1949 1922 } ··· 1985 1958 tty = tty_kref_get(serial->tty); 1986 1959 spin_unlock(&serial->serial_lock); 1987 1960 if (status) { 1988 - log_usb_status(status, __func__); 1961 + handle_usb_error(status, __func__, serial->parent); 1989 1962 tty_kref_put(tty); 1990 1963 return; 1991 1964 } ··· 2041 2014 tty = tty_kref_get(serial->tty); 2042 2015 spin_unlock(&serial->serial_lock); 2043 2016 if (status) { 2044 - log_usb_status(status, __func__); 2017 + handle_usb_error(status, __func__, serial->parent); 2045 2018 tty_kref_put(tty); 2046 2019 return; 2047 2020 } ··· 2385 2358 serial->tx_data_length = tx_size; 2386 2359 serial->tx_data = kzalloc(serial->tx_data_length, GFP_KERNEL); 2387 2360 if (!serial->tx_data) { 2388 - dev_err(dev, "%s - Out of memory", __func__); 2361 + dev_err(dev, "%s - Out of memory\n", __func__); 2389 2362 goto exit; 2390 2363 } 2391 2364 serial->tx_buffer = kzalloc(serial->tx_data_length, GFP_KERNEL); 2392 2365 if (!serial->tx_buffer) { 2393 - dev_err(dev, "%s - Out of memory", __func__); 2366 + dev_err(dev, "%s - Out of memory\n", __func__); 2394 2367 goto exit; 2395 2368 } 2396 2369 ··· 2418 2391 2419 2392 INIT_WORK(&hso_dev->async_get_intf, async_get_intf); 2420 2393 INIT_WORK(&hso_dev->async_put_intf, async_put_intf); 2394 + INIT_WORK(&hso_dev->reset_device, reset_device); 2421 2395 2422 2396 return hso_dev; 2423 2397 } ··· 2859 2831 2860 2832 mux->shared_intr_urb = usb_alloc_urb(0, GFP_KERNEL); 2861 2833 if (!mux->shared_intr_urb) { 2862 - dev_err(&interface->dev, "Could not allocate intr urb?"); 2834 + dev_err(&interface->dev, "Could not allocate intr urb?\n"); 2863 2835 goto exit; 2864 2836 } 2865 - mux->shared_intr_buf = kzalloc(mux->intr_endp->wMaxPacketSize, 2866 - GFP_KERNEL); 2837 + mux->shared_intr_buf = 2838 + kzalloc(le16_to_cpu(mux->intr_endp->wMaxPacketSize), 2839 + GFP_KERNEL); 2867 2840 if (!mux->shared_intr_buf) { 2868 - dev_err(&interface->dev, "Could not allocate intr buf?"); 2841 + dev_err(&interface->dev, "Could not allocate intr buf?\n"); 2869 2842 goto exit; 2870 2843 } 2871 2844 ··· 3161 3132 return result; 3162 3133 } 3163 3134 3135 + static void reset_device(struct work_struct *data) 3136 + { 3137 + struct hso_device *hso_dev = 3138 + container_of(data, struct hso_device, reset_device); 3139 + struct usb_device *usb = hso_dev->usb; 3140 + int result; 3141 + 3142 + if (hso_dev->usb_gone) { 3143 + D1("No reset during disconnect\n"); 3144 + } else { 3145 + result = usb_lock_device_for_reset(usb, hso_dev->interface); 3146 + if (result < 0) 3147 + D1("unable to lock device for reset: %d\n", result); 3148 + else { 3149 + usb_reset_device(usb); 3150 + usb_unlock_device(usb); 3151 + } 3152 + } 3153 + } 3154 + 3164 3155 static void hso_serial_ref_free(struct kref *ref) 3165 3156 { 3166 3157 struct hso_device *hso_dev = container_of(ref, struct hso_device, ref); ··· 3281 3232 usb_rcvintpipe(usb, 3282 3233 shared_int->intr_endp->bEndpointAddress & 0x7F), 3283 3234 shared_int->shared_intr_buf, 3284 - shared_int->intr_endp->wMaxPacketSize, 3235 + 1, 3285 3236 intr_callback, shared_int, 3286 3237 shared_int->intr_endp->bInterval); 3287 3238 3288 3239 result = usb_submit_urb(shared_int->shared_intr_urb, gfp); 3289 3240 if (result) 3290 - dev_warn(&usb->dev, "%s failed mux_intr_urb %d", __func__, 3241 + dev_warn(&usb->dev, "%s failed mux_intr_urb %d\n", __func__, 3291 3242 result); 3292 3243 3293 3244 return result;
+2 -2
drivers/net/usb/rtl8150.c
··· 270 270 get_registers(dev, PHYCNT, 1, data); 271 271 } while ((data[0] & PHY_GO) && (i++ < MII_TIMEOUT)); 272 272 273 - if (i < MII_TIMEOUT) { 273 + if (i <= MII_TIMEOUT) { 274 274 get_registers(dev, PHYDAT, 2, data); 275 275 *reg = data[0] | (data[1] << 8); 276 276 return 0; ··· 295 295 get_registers(dev, PHYCNT, 1, data); 296 296 } while ((data[0] & PHY_GO) && (i++ < MII_TIMEOUT)); 297 297 298 - if (i < MII_TIMEOUT) 298 + if (i <= MII_TIMEOUT) 299 299 return 0; 300 300 else 301 301 return 1;
+28 -13
drivers/net/via-rhine.c
··· 102 102 #include <linux/ethtool.h> 103 103 #include <linux/crc32.h> 104 104 #include <linux/bitops.h> 105 + #include <linux/workqueue.h> 105 106 #include <asm/processor.h> /* Processor type for cache alignment. */ 106 107 #include <asm/io.h> 107 108 #include <asm/irq.h> ··· 390 389 struct net_device *dev; 391 390 struct napi_struct napi; 392 391 spinlock_t lock; 392 + struct work_struct reset_task; 393 393 394 394 /* Frequently used values: keep some adjacent for cache effect. */ 395 395 u32 quirks; ··· 409 407 static int mdio_read(struct net_device *dev, int phy_id, int location); 410 408 static void mdio_write(struct net_device *dev, int phy_id, int location, int value); 411 409 static int rhine_open(struct net_device *dev); 410 + static void rhine_reset_task(struct work_struct *work); 412 411 static void rhine_tx_timeout(struct net_device *dev); 413 412 static netdev_tx_t rhine_start_tx(struct sk_buff *skb, 414 413 struct net_device *dev); ··· 778 775 dev->irq = pdev->irq; 779 776 780 777 spin_lock_init(&rp->lock); 778 + INIT_WORK(&rp->reset_task, rhine_reset_task); 779 + 781 780 rp->mii_if.dev = dev; 782 781 rp->mii_if.mdio_read = mdio_read; 783 782 rp->mii_if.mdio_write = mdio_write; ··· 1184 1179 return 0; 1185 1180 } 1186 1181 1187 - static void rhine_tx_timeout(struct net_device *dev) 1182 + static void rhine_reset_task(struct work_struct *work) 1188 1183 { 1189 - struct rhine_private *rp = netdev_priv(dev); 1190 - void __iomem *ioaddr = rp->base; 1191 - 1192 - printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status " 1193 - "%4.4x, resetting...\n", 1194 - dev->name, ioread16(ioaddr + IntrStatus), 1195 - mdio_read(dev, rp->mii_if.phy_id, MII_BMSR)); 1184 + struct rhine_private *rp = container_of(work, struct rhine_private, 1185 + reset_task); 1186 + struct net_device *dev = rp->dev; 1196 1187 1197 1188 /* protect against concurrent rx interrupts */ 1198 1189 disable_irq(rp->pdev->irq); 1199 1190 1200 1191 napi_disable(&rp->napi); 1201 1192 1202 - spin_lock(&rp->lock); 1193 + spin_lock_bh(&rp->lock); 1203 1194 1204 1195 /* clear all descriptors */ 1205 1196 free_tbufs(dev); ··· 1207 1206 rhine_chip_reset(dev); 1208 1207 init_registers(dev); 1209 1208 1210 - spin_unlock(&rp->lock); 1209 + spin_unlock_bh(&rp->lock); 1211 1210 enable_irq(rp->pdev->irq); 1212 1211 1213 1212 dev->trans_start = jiffies; 1214 1213 dev->stats.tx_errors++; 1215 1214 netif_wake_queue(dev); 1215 + } 1216 + 1217 + static void rhine_tx_timeout(struct net_device *dev) 1218 + { 1219 + struct rhine_private *rp = netdev_priv(dev); 1220 + void __iomem *ioaddr = rp->base; 1221 + 1222 + printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status " 1223 + "%4.4x, resetting...\n", 1224 + dev->name, ioread16(ioaddr + IntrStatus), 1225 + mdio_read(dev, rp->mii_if.phy_id, MII_BMSR)); 1226 + 1227 + schedule_work(&rp->reset_task); 1216 1228 } 1217 1229 1218 1230 static netdev_tx_t rhine_start_tx(struct sk_buff *skb, ··· 1844 1830 struct rhine_private *rp = netdev_priv(dev); 1845 1831 void __iomem *ioaddr = rp->base; 1846 1832 1847 - spin_lock_irq(&rp->lock); 1848 - 1849 - netif_stop_queue(dev); 1850 1833 napi_disable(&rp->napi); 1834 + cancel_work_sync(&rp->reset_task); 1835 + netif_stop_queue(dev); 1836 + 1837 + spin_lock_irq(&rp->lock); 1851 1838 1852 1839 if (debug > 1) 1853 1840 printk(KERN_DEBUG "%s: Shutting down ethercard, "
+4 -4
drivers/net/via-velocity.c
··· 2237 2237 /* Ensure chip is running */ 2238 2238 pci_set_power_state(vptr->pdev, PCI_D0); 2239 2239 2240 - velocity_give_many_rx_descs(vptr); 2241 - 2242 2240 velocity_init_registers(vptr, VELOCITY_INIT_COLD); 2243 2241 2244 2242 ret = request_irq(vptr->pdev->irq, velocity_intr, IRQF_SHARED, ··· 2247 2249 velocity_free_rings(vptr); 2248 2250 goto out; 2249 2251 } 2252 + 2253 + velocity_give_many_rx_descs(vptr); 2250 2254 2251 2255 mac_enable_int(vptr->mac_regs); 2252 2256 netif_start_queue(dev); ··· 2339 2339 2340 2340 dev->mtu = new_mtu; 2341 2341 2342 - velocity_give_many_rx_descs(vptr); 2343 - 2344 2342 velocity_init_registers(vptr, VELOCITY_INIT_COLD); 2343 + 2344 + velocity_give_many_rx_descs(vptr); 2345 2345 2346 2346 mac_enable_int(vptr->mac_regs); 2347 2347 netif_start_queue(dev);
+4 -4
drivers/net/vxge/vxge-main.c
··· 310 310 dma_addr = pci_map_single(ring->pdev, rx_priv->skb_data, 311 311 rx_priv->data_size, PCI_DMA_FROMDEVICE); 312 312 313 - if (dma_addr == 0) { 313 + if (unlikely(pci_dma_mapping_error(ring->pdev, dma_addr))) { 314 314 ring->stats.pci_map_fail++; 315 315 return -EIO; 316 316 } ··· 4087 4087 goto _exit0; 4088 4088 } 4089 4089 4090 - if (!pci_set_dma_mask(pdev, 0xffffffffffffffffULL)) { 4090 + if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { 4091 4091 vxge_debug_ll_config(VXGE_TRACE, 4092 4092 "%s : using 64bit DMA", __func__); 4093 4093 4094 4094 high_dma = 1; 4095 4095 4096 4096 if (pci_set_consistent_dma_mask(pdev, 4097 - 0xffffffffffffffffULL)) { 4097 + DMA_BIT_MASK(64))) { 4098 4098 vxge_debug_init(VXGE_ERR, 4099 4099 "%s : unable to obtain 64bit DMA for " 4100 4100 "consistent allocations", __func__); 4101 4101 ret = -ENOMEM; 4102 4102 goto _exit1; 4103 4103 } 4104 - } else if (!pci_set_dma_mask(pdev, 0xffffffffUL)) { 4104 + } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { 4105 4105 vxge_debug_ll_config(VXGE_TRACE, 4106 4106 "%s : using 32bit DMA", __func__); 4107 4107 } else {
+2 -2
drivers/net/wireless/libertas/mesh.c
··· 1 1 #include <linux/delay.h> 2 2 #include <linux/etherdevice.h> 3 3 #include <linux/netdevice.h> 4 + #include <linux/if_ether.h> 4 5 #include <linux/if_arp.h> 5 6 #include <linux/kthread.h> 6 7 #include <linux/kfifo.h> ··· 365 364 366 365 mesh_dev->netdev_ops = &mesh_netdev_ops; 367 366 mesh_dev->ethtool_ops = &lbs_ethtool_ops; 368 - memcpy(mesh_dev->dev_addr, priv->dev->dev_addr, 369 - sizeof(priv->dev->dev_addr)); 367 + memcpy(mesh_dev->dev_addr, priv->dev->dev_addr, ETH_ALEN); 370 368 371 369 SET_NETDEV_DEV(priv->mesh_dev, priv->dev->dev.parent); 372 370
+1 -1
drivers/s390/net/claw.c
··· 3398 3398 goto out_err; 3399 3399 } 3400 3400 CLAW_DBF_TEXT(2, setup, "init_mod"); 3401 - claw_root_dev = root_device_register("qeth"); 3401 + claw_root_dev = root_device_register("claw"); 3402 3402 ret = IS_ERR(claw_root_dev) ? PTR_ERR(claw_root_dev) : 0; 3403 3403 if (ret) 3404 3404 goto register_err;
+33 -1
drivers/scsi/cxgb3i/cxgb3i_offload.c
··· 1440 1440 static int is_cxgb3_dev(struct net_device *dev) 1441 1441 { 1442 1442 struct cxgb3i_sdev_data *cdata; 1443 + struct net_device *ndev = dev; 1444 + 1445 + if (dev->priv_flags & IFF_802_1Q_VLAN) 1446 + ndev = vlan_dev_real_dev(dev); 1443 1447 1444 1448 write_lock(&cdata_rwlock); 1445 1449 list_for_each_entry(cdata, &cdata_list, list) { ··· 1451 1447 int i; 1452 1448 1453 1449 for (i = 0; i < ports->nports; i++) 1454 - if (dev == ports->lldevs[i]) { 1450 + if (ndev == ports->lldevs[i]) { 1455 1451 write_unlock(&cdata_rwlock); 1456 1452 return 1; 1457 1453 } ··· 1570 1566 return -EINVAL; 1571 1567 } 1572 1568 1569 + /* * 1570 + * cxgb3i_find_dev - find the interface associated with the given address 1571 + * @ipaddr: ip address 1572 + */ 1573 + static struct net_device * 1574 + cxgb3i_find_dev(struct net_device *dev, __be32 ipaddr) 1575 + { 1576 + struct flowi fl; 1577 + int err; 1578 + struct rtable *rt; 1579 + 1580 + memset(&fl, 0, sizeof(fl)); 1581 + fl.nl_u.ip4_u.daddr = ipaddr; 1582 + 1583 + err = ip_route_output_key(dev ? dev_net(dev) : &init_net, &rt, &fl); 1584 + if (!err) 1585 + return (&rt->u.dst)->dev; 1586 + 1587 + return NULL; 1588 + } 1573 1589 1574 1590 /** 1575 1591 * cxgb3i_c3cn_connect - initiates an iscsi tcp connection to a given address ··· 1605 1581 struct cxgb3i_sdev_data *cdata; 1606 1582 struct t3cdev *cdev; 1607 1583 __be32 sipv4; 1584 + struct net_device *dstdev; 1608 1585 int err; 1609 1586 1610 1587 c3cn_conn_debug("c3cn 0x%p, dev 0x%p.\n", c3cn, dev); ··· 1615 1590 1616 1591 c3cn->daddr.sin_port = usin->sin_port; 1617 1592 c3cn->daddr.sin_addr.s_addr = usin->sin_addr.s_addr; 1593 + 1594 + dstdev = cxgb3i_find_dev(dev, usin->sin_addr.s_addr); 1595 + if (!dstdev || !is_cxgb3_dev(dstdev)) 1596 + return -ENETUNREACH; 1597 + 1598 + if (dstdev->priv_flags & IFF_802_1Q_VLAN) 1599 + dev = dstdev; 1618 1600 1619 1601 rt = find_route(dev, c3cn->saddr.sin_addr.s_addr, 1620 1602 c3cn->daddr.sin_addr.s_addr,
+14 -1
drivers/serial/serial_cs.c
··· 819 819 PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x0035, "cis/3CXEM556.cis"), 820 820 PCMCIA_MFC_DEVICE_CIS_MANF_CARD(1, 0x0101, 0x003d, "cis/3CXEM556.cis"), 821 821 PCMCIA_DEVICE_CIS_PROD_ID12("Sierra Wireless", "AC850", 0xd85f6206, 0x42a2c018, "cis/SW_8xx_SER.cis"), /* Sierra Wireless AC850 3G Network Adapter R1 */ 822 + PCMCIA_DEVICE_CIS_PROD_ID12("Sierra Wireless", "AC860", 0xd85f6206, 0x698f93db, "cis/SW_8xx_SER.cis"), /* Sierra Wireless AC860 3G Network Adapter R1 */ 822 823 PCMCIA_DEVICE_CIS_PROD_ID12("Sierra Wireless", "AC710/AC750", 0xd85f6206, 0x761b11e0, "cis/SW_7xx_SER.cis"), /* Sierra Wireless AC710/AC750 GPRS Network Adapter R1 */ 823 824 PCMCIA_DEVICE_CIS_MANF_CARD(0x0192, 0xa555, "cis/SW_555_SER.cis"), /* Sierra Aircard 555 CDMA 1xrtt Modem -- pre update */ 824 825 PCMCIA_DEVICE_CIS_MANF_CARD(0x013f, 0xa555, "cis/SW_555_SER.cis"), /* Sierra Aircard 555 CDMA 1xrtt Modem -- post update */ ··· 828 827 PCMCIA_DEVICE_CIS_PROD_ID12("ADVANTECH", "COMpad-32/85B-4", 0x96913a85, 0xcec8f102, "cis/COMpad4.cis"), 829 828 PCMCIA_DEVICE_CIS_PROD_ID123("ADVANTECH", "COMpad-32/85", "1.0", 0x96913a85, 0x8fbe92ae, 0x0877b627, "cis/COMpad2.cis"), 830 829 PCMCIA_DEVICE_CIS_PROD_ID2("RS-COM 2P", 0xad20b156, "cis/RS-COM-2P.cis"), 831 - PCMCIA_DEVICE_CIS_MANF_CARD(0x0013, 0x0000, "GLOBETROTTER.cis"), 830 + PCMCIA_DEVICE_CIS_MANF_CARD(0x0013, 0x0000, "cis/GLOBETROTTER.cis"), 832 831 PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.","SERIAL CARD: SL100 1.00.",0x19ca78af,0xf964f42b), 833 832 PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.","SERIAL CARD: SL100",0x19ca78af,0x71d98e83), 834 833 PCMCIA_DEVICE_PROD_ID12("ELAN DIGITAL SYSTEMS LTD, c1997.","SERIAL CARD: SL232 1.00.",0x19ca78af,0x69fb7490), ··· 861 860 PCMCIA_DEVICE_NULL, 862 861 }; 863 862 MODULE_DEVICE_TABLE(pcmcia, serial_ids); 863 + 864 + MODULE_FIRMWARE("cis/PCMLM28.cis"); 865 + MODULE_FIRMWARE("cis/DP83903.cis"); 866 + MODULE_FIRMWARE("cis/3CCFEM556.cis"); 867 + MODULE_FIRMWARE("cis/3CXEM556.cis"); 868 + MODULE_FIRMWARE("cis/SW_8xx_SER.cis"); 869 + MODULE_FIRMWARE("cis/SW_7xx_SER.cis"); 870 + MODULE_FIRMWARE("cis/SW_555_SER.cis"); 871 + MODULE_FIRMWARE("cis/MT5634ZLX.cis"); 872 + MODULE_FIRMWARE("cis/COMpad2.cis"); 873 + MODULE_FIRMWARE("cis/COMpad4.cis"); 874 + MODULE_FIRMWARE("cis/RS-COM-2P.cis"); 864 875 865 876 static struct pcmcia_driver serial_cs_driver = { 866 877 .owner = THIS_MODULE,
+2 -1
firmware/Makefile
··· 69 69 fw-shipped-$(CONFIG_MYRI_SBUS) += myricom/lanai.bin 70 70 fw-shipped-$(CONFIG_PCMCIA_PCNET) += cis/LA-PCM.cis cis/PCMLM28.cis \ 71 71 cis/DP83903.cis cis/NE2K.cis \ 72 - cis/tamarack.cis cis/PE-200.cis 72 + cis/tamarack.cis cis/PE-200.cis \ 73 + cis/PE520.cis 73 74 fw-shipped-$(CONFIG_PCMCIA_3C589) += cis/3CXEM556.cis 74 75 fw-shipped-$(CONFIG_PCMCIA_3C574) += cis/3CCFEM556.cis 75 76 fw-shipped-$(CONFIG_SERIAL_8250_CS) += cis/MT5634ZLX.cis cis/RS-COM-2P.cis \
+1
firmware/WHENCE
··· 601 601 cis/NE2K.cis 602 602 cis/tamarack.cis 603 603 cis/PE-200.cis 604 + cis/PE520.cis 604 605 605 606 Licence: GPL 606 607
+9
firmware/cis/PE520.cis.ihex
··· 1 + :1000000001030000FF152304014B544900504535FE 2 + :10001000323020504C55530050434D434941204508 3 + :10002000746865726E65740000FF20046101100041 4 + :10003000210206001A050101D00F0B1B09C101198D 5 + :0A00400001556530FFFF1400FF00BA 6 + :00000001FF 7 + # 8 + # Replacement CIS for PE520 ethernet card 9 + #
+1
include/linux/inetdevice.h
··· 81 81 #define IN_DEV_FORWARD(in_dev) IN_DEV_CONF_GET((in_dev), FORWARDING) 82 82 #define IN_DEV_MFORWARD(in_dev) IN_DEV_ANDCONF((in_dev), MC_FORWARDING) 83 83 #define IN_DEV_RPFILTER(in_dev) IN_DEV_MAXCONF((in_dev), RP_FILTER) 84 + #define IN_DEV_SRC_VMARK(in_dev) IN_DEV_ORCONF((in_dev), SRC_VMARK) 84 85 #define IN_DEV_SOURCE_ROUTE(in_dev) IN_DEV_ANDCONF((in_dev), \ 85 86 ACCEPT_SOURCE_ROUTE) 86 87 #define IN_DEV_ACCEPT_LOCAL(in_dev) IN_DEV_ORCONF((in_dev), ACCEPT_LOCAL)
+1
include/linux/phy.h
··· 447 447 int phy_device_register(struct phy_device *phy); 448 448 int phy_clear_interrupt(struct phy_device *phydev); 449 449 int phy_config_interrupt(struct phy_device *phydev, u32 interrupts); 450 + int phy_init_hw(struct phy_device *phydev); 450 451 int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, 451 452 u32 flags, phy_interface_t interface); 452 453 struct phy_device * phy_attach(struct net_device *dev,
+2 -1
include/linux/sysctl.h
··· 482 482 NET_IPV4_CONF_ARP_ACCEPT=21, 483 483 NET_IPV4_CONF_ARP_NOTIFY=22, 484 484 NET_IPV4_CONF_ACCEPT_LOCAL=23, 485 - NET_IPV4_CONF_PROXY_ARP_PVLAN=24, 485 + NET_IPV4_CONF_SRC_VMARK=24, 486 + NET_IPV4_CONF_PROXY_ARP_PVLAN=25, 486 487 __NET_IPV4_CONF_MAX 487 488 }; 488 489
+16
include/net/ip.h
··· 326 326 327 327 #endif 328 328 329 + static inline int sk_mc_loop(struct sock *sk) 330 + { 331 + if (!sk) 332 + return 1; 333 + switch (sk->sk_family) { 334 + case AF_INET: 335 + return inet_sk(sk)->mc_loop; 336 + #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 337 + case AF_INET6: 338 + return inet6_sk(sk)->mc_loop; 339 + #endif 340 + } 341 + __WARN(); 342 + return 1; 343 + } 344 + 329 345 extern int ip_call_ra_chain(struct sk_buff *skb); 330 346 331 347 /*
+6
net/bridge/netfilter/ebtables.c
··· 1406 1406 { 1407 1407 int ret; 1408 1408 1409 + if (!capable(CAP_NET_ADMIN)) 1410 + return -EPERM; 1411 + 1409 1412 switch(cmd) { 1410 1413 case EBT_SO_SET_ENTRIES: 1411 1414 ret = do_replace(sock_net(sk), user, len); ··· 1427 1424 int ret; 1428 1425 struct ebt_replace tmp; 1429 1426 struct ebt_table *t; 1427 + 1428 + if (!capable(CAP_NET_ADMIN)) 1429 + return -EPERM; 1430 1430 1431 1431 if (copy_from_user(&tmp, user, sizeof(tmp))) 1432 1432 return -EFAULT;
+7 -2
net/core/pktgen.c
··· 250 250 __u64 count; /* Default No packets to send */ 251 251 __u64 sofar; /* How many pkts we've sent so far */ 252 252 __u64 tx_bytes; /* How many bytes we've transmitted */ 253 - __u64 errors; /* Errors when trying to transmit, 254 - pkts will be re-sent */ 253 + __u64 errors; /* Errors when trying to transmit, */ 255 254 256 255 /* runtime counters relating to clone_skb */ 257 256 ··· 3463 3464 pkt_dev->sofar++; 3464 3465 pkt_dev->seq_num++; 3465 3466 pkt_dev->tx_bytes += pkt_dev->last_pkt_size; 3467 + break; 3468 + case NET_XMIT_DROP: 3469 + case NET_XMIT_CN: 3470 + case NET_XMIT_POLICED: 3471 + /* skb has been consumed */ 3472 + pkt_dev->errors++; 3466 3473 break; 3467 3474 default: /* Drivers are not supposed to return other values! */ 3468 3475 if (net_ratelimit())
+4
net/core/sock.c
··· 1205 1205 1206 1206 if (newsk->sk_prot->sockets_allocated) 1207 1207 percpu_counter_inc(newsk->sk_prot->sockets_allocated); 1208 + 1209 + if (sock_flag(newsk, SOCK_TIMESTAMP) || 1210 + sock_flag(newsk, SOCK_TIMESTAMPING_RX_SOFTWARE)) 1211 + net_enable_timestamp(); 1208 1212 } 1209 1213 out: 1210 1214 return newsk;
+1
net/ipv4/devinet.c
··· 1397 1397 DEVINET_SYSCTL_RW_ENTRY(ACCEPT_SOURCE_ROUTE, 1398 1398 "accept_source_route"), 1399 1399 DEVINET_SYSCTL_RW_ENTRY(ACCEPT_LOCAL, "accept_local"), 1400 + DEVINET_SYSCTL_RW_ENTRY(SRC_VMARK, "src_valid_mark"), 1400 1401 DEVINET_SYSCTL_RW_ENTRY(PROXY_ARP, "proxy_arp"), 1401 1402 DEVINET_SYSCTL_RW_ENTRY(MEDIUM_ID, "medium_id"), 1402 1403 DEVINET_SYSCTL_RW_ENTRY(BOOTP_RELAY, "bootp_relay"),
+2
net/ipv4/fib_frontend.c
··· 252 252 no_addr = in_dev->ifa_list == NULL; 253 253 rpf = IN_DEV_RPFILTER(in_dev); 254 254 accept_local = IN_DEV_ACCEPT_LOCAL(in_dev); 255 + if (mark && !IN_DEV_SRC_VMARK(in_dev)) 256 + fl.mark = 0; 255 257 } 256 258 rcu_read_unlock(); 257 259
+1 -1
net/ipv4/ip_output.c
··· 254 254 */ 255 255 256 256 if (rt->rt_flags&RTCF_MULTICAST) { 257 - if ((!sk || inet_sk(sk)->mc_loop) 257 + if (sk_mc_loop(sk) 258 258 #ifdef CONFIG_IP_MROUTE 259 259 /* Small optimization: do not loopback not local frames, 260 260 which returned after forwarding; they will be dropped
+1 -2
net/ipv6/ip6_output.c
··· 121 121 skb->dev = dev; 122 122 123 123 if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) { 124 - struct ipv6_pinfo* np = skb->sk ? inet6_sk(skb->sk) : NULL; 125 124 struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb)); 126 125 127 - if (!(dev->flags & IFF_LOOPBACK) && (!np || np->mc_loop) && 126 + if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(skb->sk) && 128 127 ((mroute6_socket(dev_net(dev)) && 129 128 !(IP6CB(skb)->flags & IP6SKB_FORWARDED)) || 130 129 ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr,
+2 -1
net/netfilter/ipvs/Kconfig
··· 112 112 module, choose M here. If unsure, say N. 113 113 114 114 config IP_VS_WRR 115 - tristate "weighted round-robin scheduling" 115 + tristate "weighted round-robin scheduling" 116 + select GCD 116 117 ---help--- 117 118 The weighted robin-robin scheduling algorithm directs network 118 119 connections to different real servers based on server weights
+13 -1
net/netfilter/ipvs/ip_vs_ctl.c
··· 2077 2077 if (!capable(CAP_NET_ADMIN)) 2078 2078 return -EPERM; 2079 2079 2080 + if (cmd < IP_VS_BASE_CTL || cmd > IP_VS_SO_SET_MAX) 2081 + return -EINVAL; 2082 + if (len < 0 || len > MAX_ARG_LEN) 2083 + return -EINVAL; 2080 2084 if (len != set_arglen[SET_CMDID(cmd)]) { 2081 2085 pr_err("set_ctl: len %u != %u\n", 2082 2086 len, set_arglen[SET_CMDID(cmd)]); ··· 2356 2352 { 2357 2353 unsigned char arg[128]; 2358 2354 int ret = 0; 2355 + unsigned int copylen; 2359 2356 2360 2357 if (!capable(CAP_NET_ADMIN)) 2361 2358 return -EPERM; 2359 + 2360 + if (cmd < IP_VS_BASE_CTL || cmd > IP_VS_SO_GET_MAX) 2361 + return -EINVAL; 2362 2362 2363 2363 if (*len < get_arglen[GET_CMDID(cmd)]) { 2364 2364 pr_err("get_ctl: len %u < %u\n", ··· 2370 2362 return -EINVAL; 2371 2363 } 2372 2364 2373 - if (copy_from_user(arg, user, get_arglen[GET_CMDID(cmd)]) != 0) 2365 + copylen = get_arglen[GET_CMDID(cmd)]; 2366 + if (copylen > 128) 2367 + return -EINVAL; 2368 + 2369 + if (copy_from_user(arg, user, copylen) != 0) 2374 2370 return -EFAULT; 2375 2371 2376 2372 if (mutex_lock_interruptible(&__ip_vs_mutex))
+1 -14
net/netfilter/ipvs/ip_vs_wrr.c
··· 24 24 #include <linux/module.h> 25 25 #include <linux/kernel.h> 26 26 #include <linux/net.h> 27 + #include <linux/gcd.h> 27 28 28 29 #include <net/ip_vs.h> 29 30 ··· 38 37 int di; /* decreasing interval */ 39 38 }; 40 39 41 - 42 - /* 43 - * Get the gcd of server weights 44 - */ 45 - static int gcd(int a, int b) 46 - { 47 - int c; 48 - 49 - while ((c = a % b)) { 50 - a = b; 51 - b = c; 52 - } 53 - return b; 54 - } 55 40 56 41 static int ip_vs_wrr_gcd_weight(struct ip_vs_service *svc) 57 42 {
+9 -9
net/netfilter/nf_conntrack_ftp.c
··· 323 323 struct nf_ct_ftp_master *info, int dir, 324 324 struct sk_buff *skb) 325 325 { 326 - unsigned int i, oldest = NUM_SEQ_TO_REMEMBER; 326 + unsigned int i, oldest; 327 327 328 328 /* Look for oldest: if we find exact match, we're done. */ 329 329 for (i = 0; i < info->seq_aft_nl_num[dir]; i++) { 330 330 if (info->seq_aft_nl[dir][i] == nl_seq) 331 331 return; 332 - 333 - if (oldest == info->seq_aft_nl_num[dir] || 334 - before(info->seq_aft_nl[dir][i], 335 - info->seq_aft_nl[dir][oldest])) 336 - oldest = i; 337 332 } 338 333 339 334 if (info->seq_aft_nl_num[dir] < NUM_SEQ_TO_REMEMBER) { 340 335 info->seq_aft_nl[dir][info->seq_aft_nl_num[dir]++] = nl_seq; 341 - } else if (oldest != NUM_SEQ_TO_REMEMBER && 342 - after(nl_seq, info->seq_aft_nl[dir][oldest])) { 343 - info->seq_aft_nl[dir][oldest] = nl_seq; 336 + } else { 337 + if (before(info->seq_aft_nl[dir][0], info->seq_aft_nl[dir][1])) 338 + oldest = 0; 339 + else 340 + oldest = 1; 341 + 342 + if (after(nl_seq, info->seq_aft_nl[dir][oldest])) 343 + info->seq_aft_nl[dir][oldest] = nl_seq; 344 344 } 345 345 } 346 346
+1 -1
net/rose/rose_loopback.c
··· 75 75 lci_i = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF); 76 76 frametype = skb->data[2]; 77 77 dest = (rose_address *)(skb->data + 4); 78 - lci_o = 0xFFF - lci_i; 78 + lci_o = ROSE_DEFAULT_MAXVC + 1 - lci_i; 79 79 80 80 skb_reset_transport_header(skb); 81 81
+1 -2
net/sctp/socket.c
··· 2087 2087 if (copy_from_user(&sp->autoclose, optval, optlen)) 2088 2088 return -EFAULT; 2089 2089 /* make sure it won't exceed MAX_SCHEDULE_TIMEOUT */ 2090 - if (sp->autoclose > (MAX_SCHEDULE_TIMEOUT / HZ) ) 2091 - sp->autoclose = (__u32)(MAX_SCHEDULE_TIMEOUT / HZ) ; 2090 + sp->autoclose = min_t(long, sp->autoclose, MAX_SCHEDULE_TIMEOUT / HZ); 2092 2091 2093 2092 return 0; 2094 2093 }
+1 -1
net/xfrm/xfrm_policy.c
··· 1445 1445 if (!dev) 1446 1446 goto free_dst; 1447 1447 1448 - /* Copy neighbout for reachability confirmation */ 1448 + /* Copy neighbour for reachability confirmation */ 1449 1449 dst0->neighbour = neigh_clone(dst->neighbour); 1450 1450 1451 1451 xfrm_init_path((struct xfrm_dst *)dst0, dst, nfheader_len);