Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

1) mlx4 driver bug fixes (TX queue wakeups, csum complete indications)
from Ido Shamay, Eran Ben Elisha, and Or Gerlitz.

2) Missing unlock in error path of PTP support in renesas driver, from
Dan Carpenter.

3) Add Vitesse 8641 phy IDs to vitesse PHY driver, from Shaohui Xie.

4) Bnx2x driver bug fixes (linearization of encap packets, scratchpad
parity error notifications, flow-control and speed settings) from
Yuval Mintz, Manish Chopra, Shahed Shaikh, and Ariel Elior.

5) ipv6 extension header parsing in the igb chip has a HW errata,
disable it. Frm Todd Fujinaka.

6) Fix PCI link state locking issue in e1000e driver, from Yanir
Lubetkin.

7) Cure panics during MTU change in i40e, from Mitch Williams.

8) Don't leak promisc refs in DSA slave driver, from Gilad Ben-Yossef.

9) Add missing HAS_DMA dep to VIA Rhine driver, from Geery
Uytterhoeven.

10) Make sure DMA map/unmap calls are symmetric in bnx2x driver, from
Michal Schmidt.

11) Workaround for MDIO access problems in bcm7xxx devices, from FLorian
Fainelli.

12) Fix races in SCTP protocol between OTTB responses and route
removals, from Alexander Sverdlin.

13) Fix jumbo frame checksum issue with some mvneta devices, from Simon
Guinot.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (58 commits)
sock_diag: don't broadcast kernel sockets
net: mvneta: disable IP checksum with jumbo frames for Armada 370
ARM: mvebu: update Ethernet compatible string for Armada XP
net: mvneta: introduce compatible string "marvell, armada-xp-neta"
api: fix compatibility of linux/in.h with netinet/in.h
net: icplus: fix typo in constant name
sis900: Trivial: Fix typos in enums
stmmac: Trivial: fix typo in constant name
sctp: Fix race between OOTB responce and route removal
net-Liquidio: Delete unnecessary checks before the function call "vfree"
vmxnet3: Bump up driver version number
amd-xgbe: Add the __GFP_NOWARN flag to Rx buffer allocation
net: phy: mdio-bcm-unimac: workaround initial read failures for integrated PHYs
net: bcmgenet: workaround initial read failures for integrated PHYs
net: phy: bcm7xxx: workaround MDIO management controller initial read
bnx2x: fix DMA API usage
net: via: VIA_RHINE and VIA_VELOCITY should depend on HAS_DMA
net/phy: tune get_phy_c45_ids to support more c45 phy
bnx2x: fix lockdep splat
net: fec: don't access RACC register when not available
...

+705 -380
+1 -1
Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt
··· 1 1 * Marvell Armada 370 / Armada XP Ethernet Controller (NETA) 2 2 3 3 Required properties: 4 - - compatible: should be "marvell,armada-370-neta". 4 + - compatible: "marvell,armada-370-neta" or "marvell,armada-xp-neta". 5 5 - reg: address and length of the register set for the device. 6 6 - interrupts: interrupt for the device 7 7 - phy: See ethernet.txt file in the same directory.
-2
arch/arm/boot/dts/armada-370-xp.dtsi
··· 268 268 }; 269 269 270 270 eth0: ethernet@70000 { 271 - compatible = "marvell,armada-370-neta"; 272 271 reg = <0x70000 0x4000>; 273 272 interrupts = <8>; 274 273 clocks = <&gateclk 4>; ··· 283 284 }; 284 285 285 286 eth1: ethernet@74000 { 286 - compatible = "marvell,armada-370-neta"; 287 287 reg = <0x74000 0x4000>; 288 288 interrupts = <10>; 289 289 clocks = <&gateclk 3>;
+8
arch/arm/boot/dts/armada-370.dtsi
··· 311 311 dmacap,memset; 312 312 }; 313 313 }; 314 + 315 + ethernet@70000 { 316 + compatible = "marvell,armada-370-neta"; 317 + }; 318 + 319 + ethernet@74000 { 320 + compatible = "marvell,armada-370-neta"; 321 + }; 314 322 }; 315 323 }; 316 324 };
+1 -1
arch/arm/boot/dts/armada-xp-mv78260.dtsi
··· 318 318 }; 319 319 320 320 eth3: ethernet@34000 { 321 - compatible = "marvell,armada-370-neta"; 321 + compatible = "marvell,armada-xp-neta"; 322 322 reg = <0x34000 0x4000>; 323 323 interrupts = <14>; 324 324 clocks = <&gateclk 1>;
+1 -1
arch/arm/boot/dts/armada-xp-mv78460.dtsi
··· 356 356 }; 357 357 358 358 eth3: ethernet@34000 { 359 - compatible = "marvell,armada-370-neta"; 359 + compatible = "marvell,armada-xp-neta"; 360 360 reg = <0x34000 0x4000>; 361 361 interrupts = <14>; 362 362 clocks = <&gateclk 1>;
+9 -1
arch/arm/boot/dts/armada-xp.dtsi
··· 185 185 }; 186 186 187 187 eth2: ethernet@30000 { 188 - compatible = "marvell,armada-370-neta"; 188 + compatible = "marvell,armada-xp-neta"; 189 189 reg = <0x30000 0x4000>; 190 190 interrupts = <12>; 191 191 clocks = <&gateclk 2>; ··· 226 226 dmacap,xor; 227 227 dmacap,memset; 228 228 }; 229 + }; 230 + 231 + ethernet@70000 { 232 + compatible = "marvell,armada-xp-neta"; 233 + }; 234 + 235 + ethernet@74000 { 236 + compatible = "marvell,armada-xp-neta"; 229 237 }; 230 238 231 239 xor@f0900 {
+1 -1
drivers/net/ethernet/amd/xgbe/xgbe-desc.c
··· 268 268 int ret; 269 269 270 270 /* Try to obtain pages, decreasing order if necessary */ 271 - gfp |= __GFP_COLD | __GFP_COMP; 271 + gfp |= __GFP_COLD | __GFP_COMP | __GFP_NOWARN; 272 272 while (order >= 0) { 273 273 pages = alloc_pages(gfp, order); 274 274 if (pages)
+1 -1
drivers/net/ethernet/apm/xgene/xgene_enet_main.c
··· 948 948 struct resource *res; 949 949 void __iomem *base_addr; 950 950 u32 offset; 951 - int ret; 951 + int ret = 0; 952 952 953 953 pdev = pdata->pdev; 954 954 dev = &pdev->dev;
+7 -5
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
··· 530 530 531 531 struct bnx2x_alloc_pool { 532 532 struct page *page; 533 - dma_addr_t dma; 534 533 unsigned int offset; 535 534 }; 536 535 ··· 2417 2418 AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR | \ 2418 2419 AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR) 2419 2420 2420 - #define HW_PRTY_ASSERT_SET_3 (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \ 2421 - AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \ 2422 - AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \ 2423 - AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY) 2421 + #define HW_PRTY_ASSERT_SET_3_WITHOUT_SCPAD \ 2422 + (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \ 2423 + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \ 2424 + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY) 2425 + 2426 + #define HW_PRTY_ASSERT_SET_3 (HW_PRTY_ASSERT_SET_3_WITHOUT_SCPAD | \ 2427 + AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY) 2424 2428 2425 2429 #define HW_PRTY_ASSERT_SET_4 (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR | \ 2426 2430 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)
+17 -15
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
··· 563 563 return -ENOMEM; 564 564 } 565 565 566 - pool->dma = dma_map_page(&bp->pdev->dev, pool->page, 0, 567 - PAGE_SIZE, DMA_FROM_DEVICE); 568 - if (unlikely(dma_mapping_error(&bp->pdev->dev, 569 - pool->dma))) { 570 - __free_pages(pool->page, PAGES_PER_SGE_SHIFT); 571 - pool->page = NULL; 572 - BNX2X_ERR("Can't map sge\n"); 573 - return -ENOMEM; 574 - } 575 566 pool->offset = 0; 567 + } 568 + 569 + mapping = dma_map_page(&bp->pdev->dev, pool->page, 570 + pool->offset, SGE_PAGE_SIZE, DMA_FROM_DEVICE); 571 + if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) { 572 + BNX2X_ERR("Can't map sge\n"); 573 + return -ENOMEM; 576 574 } 577 575 578 576 get_page(pool->page); 579 577 sw_buf->page = pool->page; 580 578 sw_buf->offset = pool->offset; 581 579 582 - mapping = pool->dma + sw_buf->offset; 583 580 dma_unmap_addr_set(sw_buf, mapping, mapping); 584 581 585 582 sge->addr_hi = cpu_to_le32(U64_HI(mapping)); ··· 645 648 return err; 646 649 } 647 650 648 - dma_unmap_single(&bp->pdev->dev, 649 - dma_unmap_addr(&old_rx_pg, mapping), 650 - SGE_PAGE_SIZE, DMA_FROM_DEVICE); 651 + dma_unmap_page(&bp->pdev->dev, 652 + dma_unmap_addr(&old_rx_pg, mapping), 653 + SGE_PAGE_SIZE, DMA_FROM_DEVICE); 651 654 /* Add one frag and update the appropriate fields in the skb */ 652 655 if (fp->mode == TPA_MODE_LRO) 653 656 skb_fill_page_desc(skb, j, old_rx_pg.page, ··· 3418 3421 u32 wnd_sum = 0; 3419 3422 3420 3423 /* Headers length */ 3421 - hlen = (int)(skb_transport_header(skb) - skb->data) + 3422 - tcp_hdrlen(skb); 3424 + if (xmit_type & XMIT_GSO_ENC) 3425 + hlen = (int)(skb_inner_transport_header(skb) - 3426 + skb->data) + 3427 + inner_tcp_hdrlen(skb); 3428 + else 3429 + hlen = (int)(skb_transport_header(skb) - 3430 + skb->data) + tcp_hdrlen(skb); 3423 3431 3424 3432 /* Amount of data (w/o headers) on linear part of SKB*/ 3425 3433 first_bd_sz = skb_headlen(skb) - hlen;
+2 -10
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
··· 807 807 /* Since many fragments can share the same page, make sure to 808 808 * only unmap and free the page once. 809 809 */ 810 - dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping), 811 - SGE_PAGE_SIZE, DMA_FROM_DEVICE); 810 + dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping), 811 + SGE_PAGE_SIZE, DMA_FROM_DEVICE); 812 812 813 813 put_page(page); 814 814 ··· 973 973 { 974 974 if (!pool->page) 975 975 return; 976 - 977 - /* Page was not fully fragmented. Unmap unused space */ 978 - if (pool->offset < PAGE_SIZE) { 979 - dma_addr_t dma = pool->dma + pool->offset; 980 - int size = PAGE_SIZE - pool->offset; 981 - 982 - dma_unmap_single(&bp->pdev->dev, dma, size, DMA_FROM_DEVICE); 983 - } 984 976 985 977 put_page(pool->page); 986 978
+43 -15
drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
··· 257 257 { 258 258 struct bnx2x *bp = netdev_priv(dev); 259 259 int cfg_idx = bnx2x_get_link_cfg_idx(bp); 260 + u32 media_type; 260 261 261 262 /* Dual Media boards present all available port types */ 262 263 cmd->supported = bp->port.supported[cfg_idx] | 263 264 (bp->port.supported[cfg_idx ^ 1] & 264 265 (SUPPORTED_TP | SUPPORTED_FIBRE)); 265 266 cmd->advertising = bp->port.advertising[cfg_idx]; 266 - if (bp->link_params.phy[bnx2x_get_cur_phy_idx(bp)].media_type == 267 - ETH_PHY_SFP_1G_FIBER) { 267 + media_type = bp->link_params.phy[bnx2x_get_cur_phy_idx(bp)].media_type; 268 + if (media_type == ETH_PHY_SFP_1G_FIBER) { 268 269 cmd->supported &= ~(SUPPORTED_10000baseT_Full); 269 270 cmd->advertising &= ~(ADVERTISED_10000baseT_Full); 270 271 } ··· 313 312 cmd->lp_advertising |= ADVERTISED_100baseT_Full; 314 313 if (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) 315 314 cmd->lp_advertising |= ADVERTISED_1000baseT_Half; 316 - if (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) 317 - cmd->lp_advertising |= ADVERTISED_1000baseT_Full; 315 + if (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) { 316 + if (media_type == ETH_PHY_KR) { 317 + cmd->lp_advertising |= 318 + ADVERTISED_1000baseKX_Full; 319 + } else { 320 + cmd->lp_advertising |= 321 + ADVERTISED_1000baseT_Full; 322 + } 323 + } 318 324 if (status & LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE) 319 325 cmd->lp_advertising |= ADVERTISED_2500baseX_Full; 320 - if (status & LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE) 321 - cmd->lp_advertising |= ADVERTISED_10000baseT_Full; 326 + if (status & LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE) { 327 + if (media_type == ETH_PHY_KR) { 328 + cmd->lp_advertising |= 329 + ADVERTISED_10000baseKR_Full; 330 + } else { 331 + cmd->lp_advertising |= 332 + ADVERTISED_10000baseT_Full; 333 + } 334 + } 322 335 if (status & LINK_STATUS_LINK_PARTNER_20GXFD_CAPABLE) 323 336 cmd->lp_advertising |= ADVERTISED_20000baseKR2_Full; 324 337 } ··· 579 564 return -EINVAL; 580 565 } 581 566 582 - if (!(bp->port.supported[cfg_idx] & 583 - SUPPORTED_1000baseT_Full)) { 567 + if (bp->port.supported[cfg_idx] & 568 + SUPPORTED_1000baseT_Full) { 569 + advertising = (ADVERTISED_1000baseT_Full | 570 + ADVERTISED_TP); 571 + 572 + } else if (bp->port.supported[cfg_idx] & 573 + SUPPORTED_1000baseKX_Full) { 574 + advertising = ADVERTISED_1000baseKX_Full; 575 + } else { 584 576 DP(BNX2X_MSG_ETHTOOL, 585 577 "1G full not supported\n"); 586 578 return -EINVAL; 587 579 } 588 580 589 - advertising = (ADVERTISED_1000baseT_Full | 590 - ADVERTISED_TP); 591 581 break; 592 582 593 583 case SPEED_2500: ··· 620 600 return -EINVAL; 621 601 } 622 602 phy_idx = bnx2x_get_cur_phy_idx(bp); 623 - if (!(bp->port.supported[cfg_idx] 624 - & SUPPORTED_10000baseT_Full) || 625 - (bp->link_params.phy[phy_idx].media_type == 603 + if ((bp->port.supported[cfg_idx] & 604 + SUPPORTED_10000baseT_Full) && 605 + (bp->link_params.phy[phy_idx].media_type != 626 606 ETH_PHY_SFP_1G_FIBER)) { 607 + advertising = (ADVERTISED_10000baseT_Full | 608 + ADVERTISED_FIBRE); 609 + } else if (bp->port.supported[cfg_idx] & 610 + SUPPORTED_10000baseKR_Full) { 611 + advertising = (ADVERTISED_10000baseKR_Full | 612 + ADVERTISED_FIBRE); 613 + } else { 627 614 DP(BNX2X_MSG_ETHTOOL, 628 615 "10G full not supported\n"); 629 616 return -EINVAL; 630 617 } 631 618 632 - advertising = (ADVERTISED_10000baseT_Full | 633 - ADVERTISED_FIBRE); 634 619 break; 635 620 636 621 default: ··· 658 633 bp->link_params.multi_phy_config = new_multi_phy_config; 659 634 if (netif_running(dev)) { 660 635 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 636 + bnx2x_force_link_reset(bp); 661 637 bnx2x_link_set(bp); 662 638 } 663 639 ··· 1230 1204 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) { 1231 1205 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM, 1232 1206 "cannot get access to nvram interface\n"); 1207 + bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_NVRAM); 1233 1208 return -EBUSY; 1234 1209 } 1235 1210 ··· 1971 1944 1972 1945 if (netif_running(dev)) { 1973 1946 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 1947 + bnx2x_force_link_reset(bp); 1974 1948 bnx2x_link_set(bp); 1975 1949 } 1976 1950
+32 -11
drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
··· 3392 3392 case BNX2X_FLOW_CTRL_AUTO: 3393 3393 switch (params->req_fc_auto_adv) { 3394 3394 case BNX2X_FLOW_CTRL_BOTH: 3395 + case BNX2X_FLOW_CTRL_RX: 3395 3396 *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH; 3396 3397 break; 3397 - case BNX2X_FLOW_CTRL_RX: 3398 3398 case BNX2X_FLOW_CTRL_TX: 3399 3399 *ieee_fc |= 3400 3400 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC; ··· 3488 3488 bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD, MDIO_AN_REG_ADV_PAUSE, val); 3489 3489 } 3490 3490 3491 - static void bnx2x_pause_resolve(struct link_vars *vars, u32 pause_result) 3492 - { /* LD LP */ 3491 + static void bnx2x_pause_resolve(struct bnx2x_phy *phy, 3492 + struct link_params *params, 3493 + struct link_vars *vars, 3494 + u32 pause_result) 3495 + { 3496 + struct bnx2x *bp = params->bp; 3497 + /* LD LP */ 3493 3498 switch (pause_result) { /* ASYM P ASYM P */ 3494 3499 case 0xb: /* 1 0 1 1 */ 3500 + DP(NETIF_MSG_LINK, "Flow Control: TX only\n"); 3495 3501 vars->flow_ctrl = BNX2X_FLOW_CTRL_TX; 3496 3502 break; 3497 3503 3498 3504 case 0xe: /* 1 1 1 0 */ 3505 + DP(NETIF_MSG_LINK, "Flow Control: RX only\n"); 3499 3506 vars->flow_ctrl = BNX2X_FLOW_CTRL_RX; 3500 3507 break; 3501 3508 ··· 3510 3503 case 0x7: /* 0 1 1 1 */ 3511 3504 case 0xd: /* 1 1 0 1 */ 3512 3505 case 0xf: /* 1 1 1 1 */ 3513 - vars->flow_ctrl = BNX2X_FLOW_CTRL_BOTH; 3506 + /* If the user selected to advertise RX ONLY, 3507 + * although we advertised both, need to enable 3508 + * RX only. 3509 + */ 3510 + if (params->req_fc_auto_adv == BNX2X_FLOW_CTRL_BOTH) { 3511 + DP(NETIF_MSG_LINK, "Flow Control: RX & TX\n"); 3512 + vars->flow_ctrl = BNX2X_FLOW_CTRL_BOTH; 3513 + } else { 3514 + DP(NETIF_MSG_LINK, "Flow Control: RX only\n"); 3515 + vars->flow_ctrl = BNX2X_FLOW_CTRL_RX; 3516 + } 3514 3517 break; 3515 3518 3516 3519 default: 3520 + DP(NETIF_MSG_LINK, "Flow Control: None\n"); 3521 + vars->flow_ctrl = BNX2X_FLOW_CTRL_NONE; 3517 3522 break; 3518 3523 } 3519 3524 if (pause_result & (1<<0)) ··· 3586 3567 pause_result |= (lp_pause & 3587 3568 MDIO_AN_REG_ADV_PAUSE_MASK) >> 10; 3588 3569 DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x\n", pause_result); 3589 - bnx2x_pause_resolve(vars, pause_result); 3570 + bnx2x_pause_resolve(phy, params, vars, pause_result); 3590 3571 3591 3572 } 3592 3573 ··· 5415 5396 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7; 5416 5397 DP(NETIF_MSG_LINK, "pause_result CL37 0x%x\n", pause_result); 5417 5398 } 5418 - bnx2x_pause_resolve(vars, pause_result); 5399 + bnx2x_pause_resolve(phy, params, vars, pause_result); 5419 5400 5420 5401 } 5421 5402 ··· 7148 7129 pause_result |= (lp_pause & 7149 7130 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH) >> 7; 7150 7131 7151 - bnx2x_pause_resolve(vars, pause_result); 7132 + bnx2x_pause_resolve(phy, params, vars, pause_result); 7152 7133 DP(NETIF_MSG_LINK, "Ext PHY CL37 pause result 0x%x\n", 7153 7134 pause_result); 7154 7135 } ··· 11493 11474 SUPPORTED_100baseT_Half | 11494 11475 SUPPORTED_100baseT_Full | 11495 11476 SUPPORTED_1000baseT_Full | 11477 + SUPPORTED_1000baseKX_Full | 11496 11478 SUPPORTED_10000baseT_Full | 11479 + SUPPORTED_10000baseKR_Full | 11497 11480 SUPPORTED_20000baseKR2_Full | 11498 11481 SUPPORTED_20000baseMLD2_Full | 11499 11482 SUPPORTED_FIBRE | ··· 12001 11980 break; 12002 11981 case PORT_HW_CFG_NET_SERDES_IF_KR: 12003 11982 phy->media_type = ETH_PHY_KR; 12004 - phy->supported &= (SUPPORTED_1000baseT_Full | 12005 - SUPPORTED_10000baseT_Full | 11983 + phy->supported &= (SUPPORTED_1000baseKX_Full | 11984 + SUPPORTED_10000baseKR_Full | 12006 11985 SUPPORTED_FIBRE | 12007 11986 SUPPORTED_Autoneg | 12008 11987 SUPPORTED_Pause | ··· 12020 11999 phy->media_type = ETH_PHY_KR; 12021 12000 phy->flags |= FLAGS_WC_DUAL_MODE; 12022 12001 phy->supported &= (SUPPORTED_20000baseKR2_Full | 12023 - SUPPORTED_10000baseT_Full | 12024 - SUPPORTED_1000baseT_Full | 12002 + SUPPORTED_10000baseKR_Full | 12003 + SUPPORTED_1000baseKX_Full | 12025 12004 SUPPORTED_Autoneg | 12026 12005 SUPPORTED_FIBRE | 12027 12006 SUPPORTED_Pause |
+41 -19
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
··· 2287 2287 void bnx2x_calc_fc_adv(struct bnx2x *bp) 2288 2288 { 2289 2289 u8 cfg_idx = bnx2x_get_link_cfg_idx(bp); 2290 + 2291 + bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause | 2292 + ADVERTISED_Pause); 2290 2293 switch (bp->link_vars.ieee_fc & 2291 2294 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) { 2292 - case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE: 2293 - bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause | 2294 - ADVERTISED_Pause); 2295 - break; 2296 - 2297 2295 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH: 2298 2296 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause | 2299 2297 ADVERTISED_Pause); ··· 2302 2304 break; 2303 2305 2304 2306 default: 2305 - bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause | 2306 - ADVERTISED_Pause); 2307 2307 break; 2308 2308 } 2309 2309 } ··· 2347 2351 if (load_mode == LOAD_DIAG) { 2348 2352 struct link_params *lp = &bp->link_params; 2349 2353 lp->loopback_mode = LOOPBACK_XGXS; 2350 - /* do PHY loopback at 10G speed, if possible */ 2351 - if (lp->req_line_speed[cfx_idx] < SPEED_10000) { 2354 + /* Prefer doing PHY loopback at highest speed */ 2355 + if (lp->req_line_speed[cfx_idx] < SPEED_20000) { 2352 2356 if (lp->speed_cap_mask[cfx_idx] & 2353 - PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) 2357 + PORT_HW_CFG_SPEED_CAPABILITY_D0_20G) 2354 2358 lp->req_line_speed[cfx_idx] = 2355 - SPEED_10000; 2359 + SPEED_20000; 2360 + else if (lp->speed_cap_mask[cfx_idx] & 2361 + PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) 2362 + lp->req_line_speed[cfx_idx] = 2363 + SPEED_10000; 2356 2364 else 2357 2365 lp->req_line_speed[cfx_idx] = 2358 2366 SPEED_1000; ··· 4867 4867 res = true; 4868 4868 break; 4869 4869 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY: 4870 - if (print) 4871 - _print_next_block((*par_num)++, 4872 - "MCP SCPAD"); 4870 + (*par_num)++; 4873 4871 /* clear latched SCPAD PATIRY from MCP */ 4874 4872 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 4875 4873 1UL << 10); ··· 4929 4931 (sig[3] & HW_PRTY_ASSERT_SET_3) || 4930 4932 (sig[4] & HW_PRTY_ASSERT_SET_4)) { 4931 4933 int par_num = 0; 4934 + 4932 4935 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention:\n" 4933 4936 "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n", 4934 4937 sig[0] & HW_PRTY_ASSERT_SET_0, ··· 4937 4938 sig[2] & HW_PRTY_ASSERT_SET_2, 4938 4939 sig[3] & HW_PRTY_ASSERT_SET_3, 4939 4940 sig[4] & HW_PRTY_ASSERT_SET_4); 4940 - if (print) 4941 - netdev_err(bp->dev, 4942 - "Parity errors detected in blocks: "); 4941 + if (print) { 4942 + if (((sig[0] & HW_PRTY_ASSERT_SET_0) || 4943 + (sig[1] & HW_PRTY_ASSERT_SET_1) || 4944 + (sig[2] & HW_PRTY_ASSERT_SET_2) || 4945 + (sig[4] & HW_PRTY_ASSERT_SET_4)) || 4946 + (sig[3] & HW_PRTY_ASSERT_SET_3_WITHOUT_SCPAD)) { 4947 + netdev_err(bp->dev, 4948 + "Parity errors detected in blocks: "); 4949 + } else { 4950 + print = false; 4951 + } 4952 + } 4943 4953 res |= bnx2x_check_blocks_with_parity0(bp, 4944 4954 sig[0] & HW_PRTY_ASSERT_SET_0, &par_num, print); 4945 4955 res |= bnx2x_check_blocks_with_parity1(bp, ··· 8439 8431 BNX2X_ETH_MAC, &ramrod_flags); 8440 8432 } else { /* vf */ 8441 8433 return bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, 8442 - bp->fp->index, true); 8434 + bp->fp->index, set); 8443 8435 } 8444 8436 } 8445 8437 ··· 9331 9323 * function stop ramrod is sent, since as part of this ramrod FW access 9332 9324 * PTP registers. 9333 9325 */ 9334 - bnx2x_stop_ptp(bp); 9326 + if (bp->flags & PTP_SUPPORTED) 9327 + bnx2x_stop_ptp(bp); 9335 9328 9336 9329 /* Disable HW interrupts, NAPI */ 9337 9330 bnx2x_netif_stop(bp, 1); ··· 11156 11147 bp->port.advertising[idx] |= 11157 11148 (ADVERTISED_1000baseT_Full | 11158 11149 ADVERTISED_TP); 11150 + } else if (bp->port.supported[idx] & 11151 + SUPPORTED_1000baseKX_Full) { 11152 + bp->link_params.req_line_speed[idx] = 11153 + SPEED_1000; 11154 + bp->port.advertising[idx] |= 11155 + ADVERTISED_1000baseKX_Full; 11159 11156 } else { 11160 11157 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n", 11161 11158 link_config, ··· 11193 11178 SPEED_10000; 11194 11179 bp->port.advertising[idx] |= 11195 11180 (ADVERTISED_10000baseT_Full | 11181 + ADVERTISED_FIBRE); 11182 + } else if (bp->port.supported[idx] & 11183 + SUPPORTED_10000baseKR_Full) { 11184 + bp->link_params.req_line_speed[idx] = 11185 + SPEED_10000; 11186 + bp->port.advertising[idx] |= 11187 + (ADVERTISED_10000baseKR_Full | 11196 11188 ADVERTISED_FIBRE); 11197 11189 } else { 11198 11190 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
+1 -1
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
··· 424 424 o->head_exe_request = false; 425 425 o->saved_ramrod_flags = 0; 426 426 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, &ramrod_flags); 427 - if (rc != 0) { 427 + if ((rc != 0) && (rc != 1)) { 428 428 BNX2X_ERR("execution of pending commands failed with rc %d\n", 429 429 rc); 430 430 #ifdef BNX2X_STOP_ON_ERROR
+1
drivers/net/ethernet/broadcom/genet/bcmgenet.h
··· 594 594 wait_queue_head_t wq; 595 595 struct phy_device *phydev; 596 596 struct device_node *phy_dn; 597 + struct device_node *mdio_dn; 597 598 struct mii_bus *mii_bus; 598 599 u16 gphy_rev; 599 600 struct clk *clk_eee;
+50 -4
drivers/net/ethernet/broadcom/genet/bcmmii.c
··· 408 408 return 0; 409 409 } 410 410 411 + /* Workaround for integrated BCM7xxx Gigabit PHYs which have a problem with 412 + * their internal MDIO management controller making them fail to successfully 413 + * be read from or written to for the first transaction. We insert a dummy 414 + * BMSR read here to make sure that phy_get_device() and get_phy_id() can 415 + * correctly read the PHY MII_PHYSID1/2 registers and successfully register a 416 + * PHY device for this peripheral. 417 + * 418 + * Once the PHY driver is registered, we can workaround subsequent reads from 419 + * there (e.g: during system-wide power management). 420 + * 421 + * bus->reset is invoked before mdiobus_scan during mdiobus_register and is 422 + * therefore the right location to stick that workaround. Since we do not want 423 + * to read from non-existing PHYs, we either use bus->phy_mask or do a manual 424 + * Device Tree scan to limit the search area. 425 + */ 426 + static int bcmgenet_mii_bus_reset(struct mii_bus *bus) 427 + { 428 + struct net_device *dev = bus->priv; 429 + struct bcmgenet_priv *priv = netdev_priv(dev); 430 + struct device_node *np = priv->mdio_dn; 431 + struct device_node *child = NULL; 432 + u32 read_mask = 0; 433 + int addr = 0; 434 + 435 + if (!np) { 436 + read_mask = 1 << priv->phy_addr; 437 + } else { 438 + for_each_available_child_of_node(np, child) { 439 + addr = of_mdio_parse_addr(&dev->dev, child); 440 + if (addr < 0) 441 + continue; 442 + 443 + read_mask |= 1 << addr; 444 + } 445 + } 446 + 447 + for (addr = 0; addr < PHY_MAX_ADDR; addr++) { 448 + if (read_mask & 1 << addr) { 449 + dev_dbg(&dev->dev, "Workaround for PHY @ %d\n", addr); 450 + mdiobus_read(bus, addr, MII_BMSR); 451 + } 452 + } 453 + 454 + return 0; 455 + } 456 + 411 457 static int bcmgenet_mii_alloc(struct bcmgenet_priv *priv) 412 458 { 413 459 struct mii_bus *bus; ··· 473 427 bus->parent = &priv->pdev->dev; 474 428 bus->read = bcmgenet_mii_read; 475 429 bus->write = bcmgenet_mii_write; 430 + bus->reset = bcmgenet_mii_bus_reset; 476 431 snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d", 477 432 priv->pdev->name, priv->pdev->id); 478 433 ··· 490 443 { 491 444 struct device_node *dn = priv->pdev->dev.of_node; 492 445 struct device *kdev = &priv->pdev->dev; 493 - struct device_node *mdio_dn; 494 446 char *compat; 495 447 int ret; 496 448 ··· 497 451 if (!compat) 498 452 return -ENOMEM; 499 453 500 - mdio_dn = of_find_compatible_node(dn, NULL, compat); 454 + priv->mdio_dn = of_find_compatible_node(dn, NULL, compat); 501 455 kfree(compat); 502 - if (!mdio_dn) { 456 + if (!priv->mdio_dn) { 503 457 dev_err(kdev, "unable to find MDIO bus node\n"); 504 458 return -ENODEV; 505 459 } 506 460 507 - ret = of_mdiobus_register(priv->mii_bus, mdio_dn); 461 + ret = of_mdiobus_register(priv->mii_bus, priv->mdio_dn); 508 462 if (ret) { 509 463 dev_err(kdev, "failed to register MDIO bus\n"); 510 464 return ret;
+3 -2
drivers/net/ethernet/cavium/liquidio/lio_ethtool.c
··· 434 434 if (ret) 435 435 return ret; 436 436 437 - octnet_mdio45_access(lio, 1, LIO68XX_LED_BEACON_ADDR, 438 - &lio->phy_beacon_val); 437 + ret = octnet_mdio45_access(lio, 1, 438 + LIO68XX_LED_BEACON_ADDR, 439 + &lio->phy_beacon_val); 439 440 if (ret) 440 441 return ret; 441 442
+3 -8
drivers/net/ethernet/cavium/liquidio/octeon_device.c
··· 650 650 651 651 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES; i++) { 652 652 /* could check mask as well */ 653 - if (oct->droq[i]) 654 - vfree(oct->droq[i]); 653 + vfree(oct->droq[i]); 655 654 } 656 655 657 656 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES; i++) { 658 657 /* could check mask as well */ 659 - if (oct->instr_queue[i]) 660 - vfree(oct->instr_queue[i]); 658 + vfree(oct->instr_queue[i]); 661 659 } 662 660 663 661 i = oct->octeon_id; ··· 1076 1078 oct->dispatch.count--; 1077 1079 1078 1080 spin_unlock_bh(&oct->dispatch.lock); 1079 - 1080 - if (dfree) 1081 - vfree(dfree); 1082 - 1081 + vfree(dfree); 1083 1082 return retval; 1084 1083 } 1085 1084
+1 -3
drivers/net/ethernet/cavium/liquidio/octeon_droq.c
··· 216 216 dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no); 217 217 218 218 octeon_droq_destroy_ring_buffers(oct, droq); 219 - 220 - if (droq->recv_buf_list) 221 - vfree(droq->recv_buf_list); 219 + vfree(droq->recv_buf_list); 222 220 223 221 if (droq->info_base_addr) 224 222 cnnic_free_aligned_dma(oct->pci_dev, droq->info_list,
+1 -2
drivers/net/ethernet/cavium/liquidio/request_manager.c
··· 175 175 desc_size = 176 176 CFG_GET_IQ_INSTR_TYPE(CHIP_FIELD(oct, cn6xxx, conf)); 177 177 178 - if (iq->request_list) 179 - vfree(iq->request_list); 178 + vfree(iq->request_list); 180 179 181 180 if (iq->base_addr) { 182 181 q_size = iq->max_count * desc_size;
+2 -2
drivers/net/ethernet/cisco/enic/enic_main.c
··· 1208 1208 napi_complete(napi); 1209 1209 vnic_intr_unmask(&enic->intr[intr]); 1210 1210 } 1211 - enic_poll_unlock_napi(&enic->rq[cq_rq]); 1211 + enic_poll_unlock_napi(&enic->rq[cq_rq], napi); 1212 1212 1213 1213 return rq_work_done; 1214 1214 } ··· 1414 1414 */ 1415 1415 enic_calc_int_moderation(enic, &enic->rq[rq]); 1416 1416 1417 - enic_poll_unlock_napi(&enic->rq[rq]); 1417 + enic_poll_unlock_napi(&enic->rq[rq], napi); 1418 1418 if (work_done < work_to_do) { 1419 1419 1420 1420 /* Some work done, but not enough to stay in polling,
+27 -64
drivers/net/ethernet/cisco/enic/vnic_rq.h
··· 21 21 #define _VNIC_RQ_H_ 22 22 23 23 #include <linux/pci.h> 24 + #include <linux/netdevice.h> 24 25 25 26 #include "vnic_dev.h" 26 27 #include "vnic_cq.h" ··· 76 75 uint64_t wr_id; 77 76 }; 78 77 78 + enum enic_poll_state { 79 + ENIC_POLL_STATE_IDLE, 80 + ENIC_POLL_STATE_NAPI, 81 + ENIC_POLL_STATE_POLL 82 + }; 83 + 79 84 struct vnic_rq { 80 85 unsigned int index; 81 86 struct vnic_dev *vdev; ··· 93 86 void *os_buf_head; 94 87 unsigned int pkts_outstanding; 95 88 #ifdef CONFIG_NET_RX_BUSY_POLL 96 - #define ENIC_POLL_STATE_IDLE 0 97 - #define ENIC_POLL_STATE_NAPI (1 << 0) /* NAPI owns this poll */ 98 - #define ENIC_POLL_STATE_POLL (1 << 1) /* poll owns this poll */ 99 - #define ENIC_POLL_STATE_NAPI_YIELD (1 << 2) /* NAPI yielded this poll */ 100 - #define ENIC_POLL_STATE_POLL_YIELD (1 << 3) /* poll yielded this poll */ 101 - #define ENIC_POLL_YIELD (ENIC_POLL_STATE_NAPI_YIELD | \ 102 - ENIC_POLL_STATE_POLL_YIELD) 103 - #define ENIC_POLL_LOCKED (ENIC_POLL_STATE_NAPI | \ 104 - ENIC_POLL_STATE_POLL) 105 - #define ENIC_POLL_USER_PEND (ENIC_POLL_STATE_POLL | \ 106 - ENIC_POLL_STATE_POLL_YIELD) 107 - unsigned int bpoll_state; 108 - spinlock_t bpoll_lock; 89 + atomic_t bpoll_state; 109 90 #endif /* CONFIG_NET_RX_BUSY_POLL */ 110 91 }; 111 92 ··· 210 215 #ifdef CONFIG_NET_RX_BUSY_POLL 211 216 static inline void enic_busy_poll_init_lock(struct vnic_rq *rq) 212 217 { 213 - spin_lock_init(&rq->bpoll_lock); 214 - rq->bpoll_state = ENIC_POLL_STATE_IDLE; 218 + atomic_set(&rq->bpoll_state, ENIC_POLL_STATE_IDLE); 215 219 } 216 220 217 221 static inline bool enic_poll_lock_napi(struct vnic_rq *rq) 218 222 { 219 - bool rc = true; 223 + int rc = atomic_cmpxchg(&rq->bpoll_state, ENIC_POLL_STATE_IDLE, 224 + ENIC_POLL_STATE_NAPI); 220 225 221 - spin_lock(&rq->bpoll_lock); 222 - if (rq->bpoll_state & ENIC_POLL_LOCKED) { 223 - WARN_ON(rq->bpoll_state & ENIC_POLL_STATE_NAPI); 224 - rq->bpoll_state |= ENIC_POLL_STATE_NAPI_YIELD; 225 - rc = false; 226 - } else { 227 - rq->bpoll_state = ENIC_POLL_STATE_NAPI; 228 - } 229 - spin_unlock(&rq->bpoll_lock); 230 - 231 - return rc; 226 + return (rc == ENIC_POLL_STATE_IDLE); 232 227 } 233 228 234 - static inline bool enic_poll_unlock_napi(struct vnic_rq *rq) 229 + static inline void enic_poll_unlock_napi(struct vnic_rq *rq, 230 + struct napi_struct *napi) 235 231 { 236 - bool rc = false; 237 - 238 - spin_lock(&rq->bpoll_lock); 239 - WARN_ON(rq->bpoll_state & 240 - (ENIC_POLL_STATE_POLL | ENIC_POLL_STATE_NAPI_YIELD)); 241 - if (rq->bpoll_state & ENIC_POLL_STATE_POLL_YIELD) 242 - rc = true; 243 - rq->bpoll_state = ENIC_POLL_STATE_IDLE; 244 - spin_unlock(&rq->bpoll_lock); 245 - 246 - return rc; 232 + WARN_ON(atomic_read(&rq->bpoll_state) != ENIC_POLL_STATE_NAPI); 233 + napi_gro_flush(napi, false); 234 + atomic_set(&rq->bpoll_state, ENIC_POLL_STATE_IDLE); 247 235 } 248 236 249 237 static inline bool enic_poll_lock_poll(struct vnic_rq *rq) 250 238 { 251 - bool rc = true; 239 + int rc = atomic_cmpxchg(&rq->bpoll_state, ENIC_POLL_STATE_IDLE, 240 + ENIC_POLL_STATE_POLL); 252 241 253 - spin_lock_bh(&rq->bpoll_lock); 254 - if (rq->bpoll_state & ENIC_POLL_LOCKED) { 255 - rq->bpoll_state |= ENIC_POLL_STATE_POLL_YIELD; 256 - rc = false; 257 - } else { 258 - rq->bpoll_state |= ENIC_POLL_STATE_POLL; 259 - } 260 - spin_unlock_bh(&rq->bpoll_lock); 261 - 262 - return rc; 242 + return (rc == ENIC_POLL_STATE_IDLE); 263 243 } 264 244 265 - static inline bool enic_poll_unlock_poll(struct vnic_rq *rq) 245 + 246 + static inline void enic_poll_unlock_poll(struct vnic_rq *rq) 266 247 { 267 - bool rc = false; 268 - 269 - spin_lock_bh(&rq->bpoll_lock); 270 - WARN_ON(rq->bpoll_state & ENIC_POLL_STATE_NAPI); 271 - if (rq->bpoll_state & ENIC_POLL_STATE_POLL_YIELD) 272 - rc = true; 273 - rq->bpoll_state = ENIC_POLL_STATE_IDLE; 274 - spin_unlock_bh(&rq->bpoll_lock); 275 - 276 - return rc; 248 + WARN_ON(atomic_read(&rq->bpoll_state) != ENIC_POLL_STATE_POLL); 249 + atomic_set(&rq->bpoll_state, ENIC_POLL_STATE_IDLE); 277 250 } 278 251 279 252 static inline bool enic_poll_busy_polling(struct vnic_rq *rq) 280 253 { 281 - WARN_ON(!(rq->bpoll_state & ENIC_POLL_LOCKED)); 282 - return rq->bpoll_state & ENIC_POLL_USER_PEND; 254 + return atomic_read(&rq->bpoll_state) & ENIC_POLL_STATE_POLL; 283 255 } 284 256 285 257 #else ··· 260 298 return true; 261 299 } 262 300 263 - static inline bool enic_poll_unlock_napi(struct vnic_rq *rq) 301 + static inline bool enic_poll_unlock_napi(struct vnic_rq *rq, 302 + struct napi_struct *napi) 264 303 { 265 304 return false; 266 305 }
+2 -2
drivers/net/ethernet/freescale/Kconfig
··· 83 83 84 84 config GIANFAR 85 85 tristate "Gianfar Ethernet" 86 - depends on FSL_SOC 87 86 select FSL_PQ_MDIO 88 87 select PHYLIB 89 88 select CRC32 90 89 ---help--- 91 90 This driver supports the Gigabit TSEC on the MPC83xx, MPC85xx, 92 - and MPC86xx family of chips, and the FEC on the 8540. 91 + and MPC86xx family of chips, the eTSEC on LS1021A and the FEC 92 + on the 8540. 93 93 94 94 endif # NET_VENDOR_FREESCALE
+2
drivers/net/ethernet/freescale/fec.h
··· 428 428 #define FEC_QUIRK_BUG_CAPTURE (1 << 10) 429 429 /* Controller has only one MDIO bus */ 430 430 #define FEC_QUIRK_SINGLE_MDIO (1 << 11) 431 + /* Controller supports RACC register */ 432 + #define FEC_QUIRK_HAS_RACC (1 << 12) 431 433 432 434 struct fec_enet_priv_tx_q { 433 435 int index;
+17 -13
drivers/net/ethernet/freescale/fec_main.c
··· 85 85 .driver_data = 0, 86 86 }, { 87 87 .name = "imx25-fec", 88 - .driver_data = FEC_QUIRK_USE_GASKET, 88 + .driver_data = FEC_QUIRK_USE_GASKET | FEC_QUIRK_HAS_RACC, 89 89 }, { 90 90 .name = "imx27-fec", 91 - .driver_data = 0, 91 + .driver_data = FEC_QUIRK_HAS_RACC, 92 92 }, { 93 93 .name = "imx28-fec", 94 94 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME | 95 - FEC_QUIRK_SINGLE_MDIO, 95 + FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC, 96 96 }, { 97 97 .name = "imx6q-fec", 98 98 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | 99 99 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | 100 - FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358, 100 + FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 | 101 + FEC_QUIRK_HAS_RACC, 101 102 }, { 102 103 .name = "mvf600-fec", 103 - .driver_data = FEC_QUIRK_ENET_MAC, 104 + .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_RACC, 104 105 }, { 105 106 .name = "imx6sx-fec", 106 107 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | 107 108 FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | 108 109 FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | 109 - FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE, 110 + FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE | 111 + FEC_QUIRK_HAS_RACC, 110 112 }, { 111 113 /* sentinel */ 112 114 } ··· 972 970 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); 973 971 974 972 #if !defined(CONFIG_M5272) 975 - /* set RX checksum */ 976 - val = readl(fep->hwp + FEC_RACC); 977 - if (fep->csum_flags & FLAG_RX_CSUM_ENABLED) 978 - val |= FEC_RACC_OPTIONS; 979 - else 980 - val &= ~FEC_RACC_OPTIONS; 981 - writel(val, fep->hwp + FEC_RACC); 973 + if (fep->quirks & FEC_QUIRK_HAS_RACC) { 974 + /* set RX checksum */ 975 + val = readl(fep->hwp + FEC_RACC); 976 + if (fep->csum_flags & FLAG_RX_CSUM_ENABLED) 977 + val |= FEC_RACC_OPTIONS; 978 + else 979 + val &= ~FEC_RACC_OPTIONS; 980 + writel(val, fep->hwp + FEC_RACC); 981 + } 982 982 #endif 983 983 984 984 /*
+1 -1
drivers/net/ethernet/icplus/ipg.c
··· 1028 1028 1029 1029 /* detailed rx_errors */ 1030 1030 sp->stats.rx_length_errors += ipg_r16(IPG_INRANGELENGTHERRORS) + 1031 - ipg_r16(IPG_FRAMETOOLONGERRRORS); 1031 + ipg_r16(IPG_FRAMETOOLONGERRORS); 1032 1032 sp->stats.rx_crc_errors += ipg_r16(IPG_FRAMECHECKSEQERRORS); 1033 1033 1034 1034 /* Unutilized IPG statistic registers. */
+1 -1
drivers/net/ethernet/icplus/ipg.h
··· 102 102 #define IPG_MCSTFRAMESRCVDOK 0xB8 103 103 #define IPG_BCSTFRAMESRCVDOK 0xBE 104 104 #define IPG_MACCONTROLFRAMESRCVD 0xC6 105 - #define IPG_FRAMETOOLONGERRRORS 0xC8 105 + #define IPG_FRAMETOOLONGERRORS 0xC8 106 106 #define IPG_INRANGELENGTHERRORS 0xCA 107 107 #define IPG_FRAMECHECKSEQERRORS 0xCC 108 108 #define IPG_FRAMESLOSTRXERRORS 0xCE
+100 -30
drivers/net/ethernet/intel/e1000e/ich8lan.c
··· 237 237 if (ret_val) 238 238 return false; 239 239 out: 240 - if ((hw->mac.type == e1000_pch_lpt) || 241 - (hw->mac.type == e1000_pch_spt)) { 242 - /* Unforce SMBus mode in PHY */ 243 - e1e_rphy_locked(hw, CV_SMB_CTRL, &phy_reg); 244 - phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS; 245 - e1e_wphy_locked(hw, CV_SMB_CTRL, phy_reg); 240 + if ((hw->mac.type == e1000_pch_lpt) || (hw->mac.type == e1000_pch_spt)) { 241 + /* Only unforce SMBus if ME is not active */ 242 + if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) { 243 + /* Unforce SMBus mode in PHY */ 244 + e1e_rphy_locked(hw, CV_SMB_CTRL, &phy_reg); 245 + phy_reg &= ~CV_SMB_CTRL_FORCE_SMBUS; 246 + e1e_wphy_locked(hw, CV_SMB_CTRL, phy_reg); 246 247 247 - /* Unforce SMBus mode in MAC */ 248 - mac_reg = er32(CTRL_EXT); 249 - mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS; 250 - ew32(CTRL_EXT, mac_reg); 248 + /* Unforce SMBus mode in MAC */ 249 + mac_reg = er32(CTRL_EXT); 250 + mac_reg &= ~E1000_CTRL_EXT_FORCE_SMBUS; 251 + ew32(CTRL_EXT, mac_reg); 252 + } 251 253 } 252 254 253 255 return true; ··· 1089 1087 u32 mac_reg; 1090 1088 s32 ret_val = 0; 1091 1089 u16 phy_reg; 1090 + u16 oem_reg = 0; 1092 1091 1093 1092 if ((hw->mac.type < e1000_pch_lpt) || 1094 1093 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPT_I217_LM) || ··· 1131 1128 if (ret_val) 1132 1129 goto out; 1133 1130 1134 - /* Si workaround for ULP entry flow on i127/rev6 h/w. Enable 1135 - * LPLU and disable Gig speed when entering ULP 1136 - */ 1137 - if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6)) { 1138 - ret_val = e1000_read_phy_reg_hv_locked(hw, HV_OEM_BITS, 1139 - &phy_reg); 1140 - if (ret_val) 1141 - goto release; 1142 - phy_reg |= HV_OEM_BITS_LPLU | HV_OEM_BITS_GBE_DIS; 1143 - ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS, 1144 - phy_reg); 1145 - if (ret_val) 1146 - goto release; 1147 - } 1148 - 1149 1131 /* Force SMBus mode in PHY */ 1150 1132 ret_val = e1000_read_phy_reg_hv_locked(hw, CV_SMB_CTRL, &phy_reg); 1151 1133 if (ret_val) ··· 1143 1155 mac_reg |= E1000_CTRL_EXT_FORCE_SMBUS; 1144 1156 ew32(CTRL_EXT, mac_reg); 1145 1157 1158 + /* Si workaround for ULP entry flow on i127/rev6 h/w. Enable 1159 + * LPLU and disable Gig speed when entering ULP 1160 + */ 1161 + if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6)) { 1162 + ret_val = e1000_read_phy_reg_hv_locked(hw, HV_OEM_BITS, 1163 + &oem_reg); 1164 + if (ret_val) 1165 + goto release; 1166 + 1167 + phy_reg = oem_reg; 1168 + phy_reg |= HV_OEM_BITS_LPLU | HV_OEM_BITS_GBE_DIS; 1169 + 1170 + ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS, 1171 + phy_reg); 1172 + 1173 + if (ret_val) 1174 + goto release; 1175 + } 1176 + 1146 1177 /* Set Inband ULP Exit, Reset to SMBus mode and 1147 1178 * Disable SMBus Release on PERST# in PHY 1148 1179 */ ··· 1173 1166 if (to_sx) { 1174 1167 if (er32(WUFC) & E1000_WUFC_LNKC) 1175 1168 phy_reg |= I218_ULP_CONFIG1_WOL_HOST; 1169 + else 1170 + phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST; 1176 1171 1177 1172 phy_reg |= I218_ULP_CONFIG1_STICKY_ULP; 1173 + phy_reg &= ~I218_ULP_CONFIG1_INBAND_EXIT; 1178 1174 } else { 1179 1175 phy_reg |= I218_ULP_CONFIG1_INBAND_EXIT; 1176 + phy_reg &= ~I218_ULP_CONFIG1_STICKY_ULP; 1177 + phy_reg &= ~I218_ULP_CONFIG1_WOL_HOST; 1180 1178 } 1181 1179 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg); 1182 1180 ··· 1193 1181 /* Commit ULP changes in PHY by starting auto ULP configuration */ 1194 1182 phy_reg |= I218_ULP_CONFIG1_START; 1195 1183 e1000_write_phy_reg_hv_locked(hw, I218_ULP_CONFIG1, phy_reg); 1184 + 1185 + if ((hw->phy.type == e1000_phy_i217) && (hw->phy.revision == 6) && 1186 + to_sx && (er32(STATUS) & E1000_STATUS_LU)) { 1187 + ret_val = e1000_write_phy_reg_hv_locked(hw, HV_OEM_BITS, 1188 + oem_reg); 1189 + if (ret_val) 1190 + goto release; 1191 + } 1192 + 1196 1193 release: 1197 1194 hw->phy.ops.release(hw); 1198 1195 out: ··· 1400 1379 if (((hw->mac.type == e1000_pch2lan) || 1401 1380 (hw->mac.type == e1000_pch_lpt) || 1402 1381 (hw->mac.type == e1000_pch_spt)) && link) { 1403 - u32 reg; 1382 + u16 speed, duplex; 1404 1383 1405 - reg = er32(STATUS); 1384 + e1000e_get_speed_and_duplex_copper(hw, &speed, &duplex); 1406 1385 tipg_reg = er32(TIPG); 1407 1386 tipg_reg &= ~E1000_TIPG_IPGT_MASK; 1408 1387 1409 - if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) { 1388 + if (duplex == HALF_DUPLEX && speed == SPEED_10) { 1410 1389 tipg_reg |= 0xFF; 1411 1390 /* Reduce Rx latency in analog PHY */ 1412 1391 emi_val = 0; 1392 + } else if (hw->mac.type == e1000_pch_spt && 1393 + duplex == FULL_DUPLEX && speed != SPEED_1000) { 1394 + tipg_reg |= 0xC; 1395 + emi_val = 1; 1413 1396 } else { 1414 1397 1415 1398 /* Roll back the default values */ ··· 1437 1412 1438 1413 if (ret_val) 1439 1414 return ret_val; 1415 + 1416 + if (hw->mac.type == e1000_pch_spt) { 1417 + u16 data; 1418 + u16 ptr_gap; 1419 + 1420 + if (speed == SPEED_1000) { 1421 + ret_val = hw->phy.ops.acquire(hw); 1422 + if (ret_val) 1423 + return ret_val; 1424 + 1425 + ret_val = e1e_rphy_locked(hw, 1426 + PHY_REG(776, 20), 1427 + &data); 1428 + if (ret_val) { 1429 + hw->phy.ops.release(hw); 1430 + return ret_val; 1431 + } 1432 + 1433 + ptr_gap = (data & (0x3FF << 2)) >> 2; 1434 + if (ptr_gap < 0x18) { 1435 + data &= ~(0x3FF << 2); 1436 + data |= (0x18 << 2); 1437 + ret_val = 1438 + e1e_wphy_locked(hw, 1439 + PHY_REG(776, 20), 1440 + data); 1441 + } 1442 + hw->phy.ops.release(hw); 1443 + if (ret_val) 1444 + return ret_val; 1445 + } 1446 + } 1447 + } 1448 + 1449 + /* I217 Packet Loss issue: 1450 + * ensure that FEXTNVM4 Beacon Duration is set correctly 1451 + * on power up. 1452 + * Set the Beacon Duration for I217 to 8 usec 1453 + */ 1454 + if ((hw->mac.type == e1000_pch_lpt) || (hw->mac.type == e1000_pch_spt)) { 1455 + u32 mac_reg; 1456 + 1457 + mac_reg = er32(FEXTNVM4); 1458 + mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK; 1459 + mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC; 1460 + ew32(FEXTNVM4, mac_reg); 1440 1461 } 1441 1462 1442 1463 /* Work-around I218 hang issue */ 1443 1464 if ((hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_LM) || 1444 1465 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_LPTLP_I218_V) || 1445 1466 (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_LM3) || 1446 - (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V3) || 1447 - (hw->mac.type == e1000_pch_spt)) { 1467 + (hw->adapter->pdev->device == E1000_DEV_ID_PCH_I218_V3)) { 1448 1468 ret_val = e1000_k1_workaround_lpt_lp(hw, link); 1449 1469 if (ret_val) 1450 1470 return ret_val;
+34 -4
drivers/net/ethernet/intel/e1000e/netdev.c
··· 6354 6354 } 6355 6355 6356 6356 /** 6357 - * e1000e_disable_aspm - Disable ASPM states 6357 + * __e1000e_disable_aspm - Disable ASPM states 6358 6358 * @pdev: pointer to PCI device struct 6359 6359 * @state: bit-mask of ASPM states to disable 6360 + * @locked: indication if this context holds pci_bus_sem locked. 6360 6361 * 6361 6362 * Some devices *must* have certain ASPM states disabled per hardware errata. 6362 6363 **/ 6363 - static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state) 6364 + static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state, int locked) 6364 6365 { 6365 6366 struct pci_dev *parent = pdev->bus->self; 6366 6367 u16 aspm_dis_mask = 0; ··· 6400 6399 "L1" : ""); 6401 6400 6402 6401 #ifdef CONFIG_PCIEASPM 6403 - pci_disable_link_state_locked(pdev, state); 6402 + if (locked) 6403 + pci_disable_link_state_locked(pdev, state); 6404 + else 6405 + pci_disable_link_state(pdev, state); 6404 6406 6405 6407 /* Double-check ASPM control. If not disabled by the above, the 6406 6408 * BIOS is preventing that from happening (or CONFIG_PCIEASPM is ··· 6426 6422 aspm_dis_mask); 6427 6423 } 6428 6424 6425 + /** 6426 + * e1000e_disable_aspm - Disable ASPM states. 6427 + * @pdev: pointer to PCI device struct 6428 + * @state: bit-mask of ASPM states to disable 6429 + * 6430 + * This function acquires the pci_bus_sem! 6431 + * Some devices *must* have certain ASPM states disabled per hardware errata. 6432 + **/ 6433 + static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state) 6434 + { 6435 + __e1000e_disable_aspm(pdev, state, 0); 6436 + } 6437 + 6438 + /** 6439 + * e1000e_disable_aspm_locked Disable ASPM states. 6440 + * @pdev: pointer to PCI device struct 6441 + * @state: bit-mask of ASPM states to disable 6442 + * 6443 + * This function must be called with pci_bus_sem acquired! 6444 + * Some devices *must* have certain ASPM states disabled per hardware errata. 6445 + **/ 6446 + static void e1000e_disable_aspm_locked(struct pci_dev *pdev, u16 state) 6447 + { 6448 + __e1000e_disable_aspm(pdev, state, 1); 6449 + } 6450 + 6429 6451 #ifdef CONFIG_PM 6430 6452 static int __e1000_resume(struct pci_dev *pdev) 6431 6453 { ··· 6465 6435 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1) 6466 6436 aspm_disable_flag |= PCIE_LINK_STATE_L1; 6467 6437 if (aspm_disable_flag) 6468 - e1000e_disable_aspm(pdev, aspm_disable_flag); 6438 + e1000e_disable_aspm_locked(pdev, aspm_disable_flag); 6469 6439 6470 6440 pci_set_master(pdev); 6471 6441
+4
drivers/net/ethernet/intel/i40evf/i40e_txrx.c
··· 484 484 if (!dev) 485 485 return -ENOMEM; 486 486 487 + /* warn if we are about to overwrite the pointer */ 488 + WARN_ON(tx_ring->tx_bi); 487 489 bi_size = sizeof(struct i40e_tx_buffer) * tx_ring->count; 488 490 tx_ring->tx_bi = kzalloc(bi_size, GFP_KERNEL); 489 491 if (!tx_ring->tx_bi) ··· 646 644 struct device *dev = rx_ring->dev; 647 645 int bi_size; 648 646 647 + /* warn if we are about to overwrite the pointer */ 648 + WARN_ON(rx_ring->rx_bi); 649 649 bi_size = sizeof(struct i40e_rx_buffer) * rx_ring->count; 650 650 rx_ring->rx_bi = kzalloc(bi_size, GFP_KERNEL); 651 651 if (!rx_ring->rx_bi)
-1
drivers/net/ethernet/intel/i40evf/i40evf.h
··· 264 264 265 265 int i40evf_up(struct i40evf_adapter *adapter); 266 266 void i40evf_down(struct i40evf_adapter *adapter); 267 - void i40evf_reinit_locked(struct i40evf_adapter *adapter); 268 267 void i40evf_reset(struct i40evf_adapter *adapter); 269 268 void i40evf_set_ethtool_ops(struct net_device *netdev); 270 269 void i40evf_update_stats(struct i40evf_adapter *adapter);
+4 -2
drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
··· 267 267 adapter->tx_desc_count = new_tx_count; 268 268 adapter->rx_desc_count = new_rx_count; 269 269 270 - if (netif_running(netdev)) 271 - i40evf_reinit_locked(adapter); 270 + if (netif_running(netdev)) { 271 + adapter->flags |= I40EVF_FLAG_RESET_NEEDED; 272 + schedule_work(&adapter->reset_task); 273 + } 272 274 273 275 return 0; 274 276 }
+49 -65
drivers/net/ethernet/intel/i40evf/i40evf_main.c
··· 170 170 struct i40evf_adapter *adapter = netdev_priv(netdev); 171 171 172 172 adapter->tx_timeout_count++; 173 - if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING)) { 173 + if (!(adapter->flags & (I40EVF_FLAG_RESET_PENDING | 174 + I40EVF_FLAG_RESET_NEEDED))) { 174 175 adapter->flags |= I40EVF_FLAG_RESET_NEEDED; 175 176 schedule_work(&adapter->reset_task); 176 177 } ··· 1461 1460 for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) { 1462 1461 lut = 0; 1463 1462 for (j = 0; j < 4; j++) { 1464 - if (cqueue == adapter->vsi_res->num_queue_pairs) 1463 + if (cqueue == adapter->num_active_queues) 1465 1464 cqueue = 0; 1466 1465 lut |= ((cqueue) << (8 * j)); 1467 1466 cqueue++; ··· 1471 1470 i40e_flush(hw); 1472 1471 } 1473 1472 1474 - #define I40EVF_RESET_WAIT_MS 100 1475 - #define I40EVF_RESET_WAIT_COUNT 200 1473 + #define I40EVF_RESET_WAIT_MS 10 1474 + #define I40EVF_RESET_WAIT_COUNT 500 1476 1475 /** 1477 1476 * i40evf_reset_task - Call-back task to handle hardware reset 1478 1477 * @work: pointer to work_struct ··· 1496 1495 &adapter->crit_section)) 1497 1496 usleep_range(500, 1000); 1498 1497 1498 + i40evf_misc_irq_disable(adapter); 1499 1499 if (adapter->flags & I40EVF_FLAG_RESET_NEEDED) { 1500 - dev_info(&adapter->pdev->dev, "Requesting reset from PF\n"); 1500 + adapter->flags &= ~I40EVF_FLAG_RESET_NEEDED; 1501 + /* Restart the AQ here. If we have been reset but didn't 1502 + * detect it, or if the PF had to reinit, our AQ will be hosed. 1503 + */ 1504 + i40evf_shutdown_adminq(hw); 1505 + i40evf_init_adminq(hw); 1501 1506 i40evf_request_reset(adapter); 1502 1507 } 1508 + adapter->flags |= I40EVF_FLAG_RESET_PENDING; 1503 1509 1504 1510 /* poll until we see the reset actually happen */ 1505 1511 for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) { ··· 1515 1507 if ((rstat_val != I40E_VFR_VFACTIVE) && 1516 1508 (rstat_val != I40E_VFR_COMPLETED)) 1517 1509 break; 1518 - msleep(I40EVF_RESET_WAIT_MS); 1510 + usleep_range(500, 1000); 1519 1511 } 1520 1512 if (i == I40EVF_RESET_WAIT_COUNT) { 1521 - adapter->flags &= ~I40EVF_FLAG_RESET_PENDING; 1513 + dev_info(&adapter->pdev->dev, "Never saw reset\n"); 1522 1514 goto continue_reset; /* act like the reset happened */ 1523 1515 } 1524 1516 ··· 1526 1518 for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) { 1527 1519 rstat_val = rd32(hw, I40E_VFGEN_RSTAT) & 1528 1520 I40E_VFGEN_RSTAT_VFR_STATE_MASK; 1529 - if ((rstat_val == I40E_VFR_VFACTIVE) || 1530 - (rstat_val == I40E_VFR_COMPLETED)) 1521 + if (rstat_val == I40E_VFR_VFACTIVE) 1531 1522 break; 1532 1523 msleep(I40EVF_RESET_WAIT_MS); 1533 1524 } 1525 + /* extra wait to make sure minimum wait is met */ 1526 + msleep(I40EVF_RESET_WAIT_MS); 1534 1527 if (i == I40EVF_RESET_WAIT_COUNT) { 1535 1528 struct i40evf_mac_filter *f, *ftmp; 1536 1529 struct i40evf_vlan_filter *fv, *fvtmp; ··· 1543 1534 1544 1535 if (netif_running(adapter->netdev)) { 1545 1536 set_bit(__I40E_DOWN, &adapter->vsi.state); 1546 - i40evf_irq_disable(adapter); 1547 - i40evf_napi_disable_all(adapter); 1548 - netif_tx_disable(netdev); 1549 - netif_tx_stop_all_queues(netdev); 1550 1537 netif_carrier_off(netdev); 1538 + netif_tx_disable(netdev); 1539 + i40evf_napi_disable_all(adapter); 1540 + i40evf_irq_disable(adapter); 1551 1541 i40evf_free_traffic_irqs(adapter); 1552 1542 i40evf_free_all_tx_resources(adapter); 1553 1543 i40evf_free_all_rx_resources(adapter); ··· 1558 1550 list_del(&f->list); 1559 1551 kfree(f); 1560 1552 } 1553 + 1561 1554 list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list, 1562 1555 list) { 1563 1556 list_del(&fv->list); ··· 1573 1564 i40evf_shutdown_adminq(hw); 1574 1565 adapter->netdev->flags &= ~IFF_UP; 1575 1566 clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); 1567 + adapter->flags &= ~I40EVF_FLAG_RESET_PENDING; 1568 + dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n"); 1576 1569 return; /* Do not attempt to reinit. It's dead, Jim. */ 1577 1570 } 1578 1571 1579 1572 continue_reset: 1580 - adapter->flags &= ~I40EVF_FLAG_RESET_PENDING; 1581 - 1573 + if (netif_running(adapter->netdev)) { 1574 + netif_carrier_off(netdev); 1575 + netif_tx_stop_all_queues(netdev); 1576 + i40evf_napi_disable_all(adapter); 1577 + } 1582 1578 i40evf_irq_disable(adapter); 1583 1579 1584 - if (netif_running(adapter->netdev)) { 1585 - i40evf_napi_disable_all(adapter); 1586 - netif_tx_disable(netdev); 1587 - netif_tx_stop_all_queues(netdev); 1588 - netif_carrier_off(netdev); 1589 - } 1590 - 1591 1580 adapter->state = __I40EVF_RESETTING; 1581 + adapter->flags &= ~I40EVF_FLAG_RESET_PENDING; 1582 + 1583 + /* free the Tx/Rx rings and descriptors, might be better to just 1584 + * re-use them sometime in the future 1585 + */ 1586 + i40evf_free_all_rx_resources(adapter); 1587 + i40evf_free_all_tx_resources(adapter); 1592 1588 1593 1589 /* kill and reinit the admin queue */ 1594 1590 if (i40evf_shutdown_adminq(hw)) ··· 1617 1603 adapter->aq_required = I40EVF_FLAG_AQ_ADD_MAC_FILTER; 1618 1604 adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER; 1619 1605 clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section); 1606 + i40evf_misc_irq_enable(adapter); 1620 1607 1621 1608 mod_timer(&adapter->watchdog_timer, jiffies + 2); 1622 1609 ··· 1639 1624 goto reset_err; 1640 1625 1641 1626 i40evf_irq_enable(adapter, true); 1627 + } else { 1628 + adapter->state = __I40EVF_DOWN; 1642 1629 } 1630 + 1643 1631 return; 1644 1632 reset_err: 1645 1633 dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n"); ··· 1685 1667 memset(event.msg_buf, 0, I40EVF_MAX_AQ_BUF_SIZE); 1686 1668 } while (pending); 1687 1669 1670 + if ((adapter->flags & 1671 + (I40EVF_FLAG_RESET_PENDING | I40EVF_FLAG_RESET_NEEDED)) || 1672 + adapter->state == __I40EVF_RESETTING) 1673 + goto freedom; 1674 + 1688 1675 /* check for error indications */ 1689 1676 val = rd32(hw, hw->aq.arq.len); 1690 1677 oldval = val; ··· 1725 1702 if (oldval != val) 1726 1703 wr32(hw, hw->aq.asq.len, val); 1727 1704 1705 + freedom: 1728 1706 kfree(event.msg_buf); 1729 1707 out: 1730 1708 /* re-enable Admin queue interrupt cause */ ··· 1921 1897 } 1922 1898 1923 1899 /** 1924 - * i40evf_reinit_locked - Software reinit 1925 - * @adapter: board private structure 1926 - * 1927 - * Reinititalizes the ring structures in response to a software configuration 1928 - * change. Roughly the same as close followed by open, but skips releasing 1929 - * and reallocating the interrupts. 1930 - **/ 1931 - void i40evf_reinit_locked(struct i40evf_adapter *adapter) 1932 - { 1933 - struct net_device *netdev = adapter->netdev; 1934 - int err; 1935 - 1936 - WARN_ON(in_interrupt()); 1937 - 1938 - i40evf_down(adapter); 1939 - 1940 - /* allocate transmit descriptors */ 1941 - err = i40evf_setup_all_tx_resources(adapter); 1942 - if (err) 1943 - goto err_reinit; 1944 - 1945 - /* allocate receive descriptors */ 1946 - err = i40evf_setup_all_rx_resources(adapter); 1947 - if (err) 1948 - goto err_reinit; 1949 - 1950 - i40evf_configure(adapter); 1951 - 1952 - err = i40evf_up_complete(adapter); 1953 - if (err) 1954 - goto err_reinit; 1955 - 1956 - i40evf_irq_enable(adapter, true); 1957 - return; 1958 - 1959 - err_reinit: 1960 - dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n"); 1961 - i40evf_close(netdev); 1962 - } 1963 - 1964 - /** 1965 1900 * i40evf_change_mtu - Change the Maximum Transfer Unit 1966 1901 * @netdev: network interface device structure 1967 1902 * @new_mtu: new value for maximum frame size ··· 1935 1952 if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER)) 1936 1953 return -EINVAL; 1937 1954 1938 - /* must set new MTU before calling down or up */ 1939 1955 netdev->mtu = new_mtu; 1940 - i40evf_reinit_locked(adapter); 1956 + adapter->flags |= I40EVF_FLAG_RESET_NEEDED; 1957 + schedule_work(&adapter->reset_task); 1958 + 1941 1959 return 0; 1942 1960 } 1943 1961
+8 -4
drivers/net/ethernet/intel/igb/e1000_82575.c
··· 1 1 /* Intel(R) Gigabit Ethernet Linux driver 2 - * Copyright(c) 2007-2014 Intel Corporation. 2 + * Copyright(c) 2007-2015 Intel Corporation. 3 3 * 4 4 * This program is free software; you can redistribute it and/or modify it 5 5 * under the terms and conditions of the GNU General Public License, ··· 1900 1900 * igb_rx_fifo_flush_82575 - Clean rx fifo after RX enable 1901 1901 * @hw: pointer to the HW structure 1902 1902 * 1903 - * After rx enable if managability is enabled then there is likely some 1904 - * bad data at the start of the fifo and possibly in the DMA fifo. This 1903 + * After rx enable if manageability is enabled then there is likely some 1904 + * bad data at the start of the fifo and possibly in the DMA fifo. This 1905 1905 * function clears the fifos and flushes any packets that came in as rx was 1906 1906 * being enabled. 1907 1907 **/ ··· 1909 1909 { 1910 1910 u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled; 1911 1911 int i, ms_wait; 1912 + 1913 + /* disable IPv6 options as per hardware errata */ 1914 + rfctl = rd32(E1000_RFCTL); 1915 + rfctl |= E1000_RFCTL_IPV6_EX_DIS; 1916 + wr32(E1000_RFCTL, rfctl); 1912 1917 1913 1918 if (hw->mac.type != e1000_82575 || 1914 1919 !(rd32(E1000_MANC) & E1000_MANC_RCV_TCO_EN)) ··· 1942 1937 * incoming packets are rejected. Set enable and wait 2ms so that 1943 1938 * any packet that was coming in as RCTL.EN was set is flushed 1944 1939 */ 1945 - rfctl = rd32(E1000_RFCTL); 1946 1940 wr32(E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF); 1947 1941 1948 1942 rlpml = rd32(E1000_RLPML);
+2 -1
drivers/net/ethernet/intel/igb/e1000_defines.h
··· 344 344 #define E1000_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ 345 345 346 346 /* Header split receive */ 347 - #define E1000_RFCTL_LEF 0x00040000 347 + #define E1000_RFCTL_IPV6_EX_DIS 0x00010000 348 + #define E1000_RFCTL_LEF 0x00040000 348 349 349 350 /* Collision related configuration parameters */ 350 351 #define E1000_COLLISION_THRESHOLD 15
+1 -1
drivers/net/ethernet/intel/igb/igb_main.c
··· 58 58 59 59 #define MAJ 5 60 60 #define MIN 2 61 - #define BUILD 15 61 + #define BUILD 18 62 62 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \ 63 63 __stringify(BUILD) "-k" 64 64 char igb_driver_name[] = "igb";
+26 -1
drivers/net/ethernet/marvell/mvneta.c
··· 310 310 unsigned int link; 311 311 unsigned int duplex; 312 312 unsigned int speed; 313 + unsigned int tx_csum_limit; 313 314 int use_inband_status:1; 314 315 }; 315 316 ··· 2509 2508 2510 2509 dev->mtu = mtu; 2511 2510 2512 - if (!netif_running(dev)) 2511 + if (!netif_running(dev)) { 2512 + netdev_update_features(dev); 2513 2513 return 0; 2514 + } 2514 2515 2515 2516 /* The interface is running, so we have to force a 2516 2517 * reallocation of the queues ··· 2541 2538 mvneta_start_dev(pp); 2542 2539 mvneta_port_up(pp); 2543 2540 2541 + netdev_update_features(dev); 2542 + 2544 2543 return 0; 2544 + } 2545 + 2546 + static netdev_features_t mvneta_fix_features(struct net_device *dev, 2547 + netdev_features_t features) 2548 + { 2549 + struct mvneta_port *pp = netdev_priv(dev); 2550 + 2551 + if (pp->tx_csum_limit && dev->mtu > pp->tx_csum_limit) { 2552 + features &= ~(NETIF_F_IP_CSUM | NETIF_F_TSO); 2553 + netdev_info(dev, 2554 + "Disable IP checksum for MTU greater than %dB\n", 2555 + pp->tx_csum_limit); 2556 + } 2557 + 2558 + return features; 2545 2559 } 2546 2560 2547 2561 /* Get mac address */ ··· 2882 2862 .ndo_set_rx_mode = mvneta_set_rx_mode, 2883 2863 .ndo_set_mac_address = mvneta_set_mac_addr, 2884 2864 .ndo_change_mtu = mvneta_change_mtu, 2865 + .ndo_fix_features = mvneta_fix_features, 2885 2866 .ndo_get_stats64 = mvneta_get_stats64, 2886 2867 .ndo_do_ioctl = mvneta_ioctl, 2887 2868 }; ··· 3128 3107 } 3129 3108 } 3130 3109 3110 + if (of_device_is_compatible(dn, "marvell,armada-370-neta")) 3111 + pp->tx_csum_limit = 1600; 3112 + 3131 3113 pp->tx_ring_size = MVNETA_MAX_TXD; 3132 3114 pp->rx_ring_size = MVNETA_MAX_RXD; 3133 3115 ··· 3209 3185 3210 3186 static const struct of_device_id mvneta_match[] = { 3211 3187 { .compatible = "marvell,armada-370-neta" }, 3188 + { .compatible = "marvell,armada-xp-neta" }, 3212 3189 { } 3213 3190 }; 3214 3191 MODULE_DEVICE_TABLE(of, mvneta_match);
-4
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
··· 1977 1977 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]); 1978 1978 } 1979 1979 1980 - if (priv->base_tx_qpn) { 1981 - mlx4_qp_release_range(priv->mdev->dev, priv->base_tx_qpn, priv->tx_ring_num); 1982 - priv->base_tx_qpn = 0; 1983 - } 1984 1980 } 1985 1981 1986 1982 int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
+6 -11
drivers/net/ethernet/mellanox/mlx4/en_rx.c
··· 718 718 } 719 719 #endif 720 720 static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va, 721 - int hwtstamp_rx_filter) 721 + netdev_features_t dev_features) 722 722 { 723 723 __wsum hw_checksum = 0; 724 724 ··· 726 726 727 727 hw_checksum = csum_unfold((__force __sum16)cqe->checksum); 728 728 729 - if (((struct ethhdr *)va)->h_proto == htons(ETH_P_8021Q) && 730 - hwtstamp_rx_filter != HWTSTAMP_FILTER_NONE) { 731 - /* next protocol non IPv4 or IPv6 */ 732 - if (((struct vlan_hdr *)hdr)->h_vlan_encapsulated_proto 733 - != htons(ETH_P_IP) && 734 - ((struct vlan_hdr *)hdr)->h_vlan_encapsulated_proto 735 - != htons(ETH_P_IPV6)) 736 - return -1; 729 + if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_VLAN_PRESENT_MASK) && 730 + !(dev_features & NETIF_F_HW_VLAN_CTAG_RX)) { 737 731 hw_checksum = get_fixed_vlan_csum(hw_checksum, hdr); 738 732 hdr += sizeof(struct vlan_hdr); 739 733 } ··· 890 896 891 897 if (ip_summed == CHECKSUM_COMPLETE) { 892 898 void *va = skb_frag_address(skb_shinfo(gro_skb)->frags); 893 - if (check_csum(cqe, gro_skb, va, ring->hwtstamp_rx_filter)) { 899 + if (check_csum(cqe, gro_skb, va, 900 + dev->features)) { 894 901 ip_summed = CHECKSUM_NONE; 895 902 ring->csum_none++; 896 903 ring->csum_complete--; ··· 946 951 } 947 952 948 953 if (ip_summed == CHECKSUM_COMPLETE) { 949 - if (check_csum(cqe, skb, skb->data, ring->hwtstamp_rx_filter)) { 954 + if (check_csum(cqe, skb, skb->data, dev->features)) { 950 955 ip_summed = CHECKSUM_NONE; 951 956 ring->csum_complete--; 952 957 ring->csum_none++;
+12 -8
drivers/net/ethernet/mellanox/mlx4/en_tx.c
··· 66 66 ring->size = size; 67 67 ring->size_mask = size - 1; 68 68 ring->stride = stride; 69 + ring->full_size = ring->size - HEADROOM - MAX_DESC_TXBBS; 69 70 70 71 tmp = size * sizeof(struct mlx4_en_tx_info); 71 72 ring->tx_info = kmalloc_node(tmp, GFP_KERNEL | __GFP_NOWARN, node); ··· 181 180 mlx4_bf_free(mdev->dev, &ring->bf); 182 181 mlx4_qp_remove(mdev->dev, &ring->qp); 183 182 mlx4_qp_free(mdev->dev, &ring->qp); 183 + mlx4_qp_release_range(priv->mdev->dev, ring->qpn, 1); 184 184 mlx4_en_unmap_buffer(&ring->wqres.buf); 185 185 mlx4_free_hwq_res(mdev->dev, &ring->wqres, ring->buf_size); 186 186 kfree(ring->bounce_buf); ··· 231 229 232 230 mlx4_qp_modify(mdev->dev, NULL, ring->qp_state, 233 231 MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp); 232 + } 233 + 234 + static inline bool mlx4_en_is_tx_ring_full(struct mlx4_en_tx_ring *ring) 235 + { 236 + return ring->prod - ring->cons > ring->full_size; 234 237 } 235 238 236 239 static void mlx4_en_stamp_wqe(struct mlx4_en_priv *priv, ··· 480 473 481 474 netdev_tx_completed_queue(ring->tx_queue, packets, bytes); 482 475 483 - /* 484 - * Wakeup Tx queue if this stopped, and at least 1 packet 485 - * was completed 476 + /* Wakeup Tx queue if this stopped, and ring is not full. 486 477 */ 487 - if (netif_tx_queue_stopped(ring->tx_queue) && txbbs_skipped > 0) { 478 + if (netif_tx_queue_stopped(ring->tx_queue) && 479 + !mlx4_en_is_tx_ring_full(ring)) { 488 480 netif_tx_wake_queue(ring->tx_queue); 489 481 ring->wake_queue++; 490 482 } ··· 927 921 skb_tx_timestamp(skb); 928 922 929 923 /* Check available TXBBs And 2K spare for prefetch */ 930 - stop_queue = (int)(ring->prod - ring_cons) > 931 - ring->size - HEADROOM - MAX_DESC_TXBBS; 924 + stop_queue = mlx4_en_is_tx_ring_full(ring); 932 925 if (unlikely(stop_queue)) { 933 926 netif_tx_stop_queue(ring->tx_queue); 934 927 ring->queue_stopped++; ··· 996 991 smp_rmb(); 997 992 998 993 ring_cons = ACCESS_ONCE(ring->cons); 999 - if (unlikely(((int)(ring->prod - ring_cons)) <= 1000 - ring->size - HEADROOM - MAX_DESC_TXBBS)) { 994 + if (unlikely(!mlx4_en_is_tx_ring_full(ring))) { 1001 995 netif_tx_wake_queue(ring->tx_queue); 1002 996 ring->wake_queue++; 1003 997 }
+7 -1
drivers/net/ethernet/mellanox/mlx4/intf.c
··· 93 93 mutex_lock(&intf_mutex); 94 94 95 95 list_add_tail(&intf->list, &intf_list); 96 - list_for_each_entry(priv, &dev_list, dev_list) 96 + list_for_each_entry(priv, &dev_list, dev_list) { 97 + if (mlx4_is_mfunc(&priv->dev) && (intf->flags & MLX4_INTFF_BONDING)) { 98 + mlx4_dbg(&priv->dev, 99 + "SRIOV, disabling HA mode for intf proto %d\n", intf->protocol); 100 + intf->flags &= ~MLX4_INTFF_BONDING; 101 + } 97 102 mlx4_add_device(intf, priv); 103 + } 98 104 99 105 mutex_unlock(&intf_mutex); 100 106
+1 -1
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
··· 279 279 u32 size; /* number of TXBBs */ 280 280 u32 size_mask; 281 281 u16 stride; 282 + u32 full_size; 282 283 u16 cqn; /* index of port CQ associated with this ring */ 283 284 u32 buf_size; 284 285 __be32 doorbell_qpn; ··· 581 580 int vids[128]; 582 581 bool wol; 583 582 struct device *ddev; 584 - int base_tx_qpn; 585 583 struct hlist_head mac_hash[MLX4_EN_MAC_HASH_SIZE]; 586 584 struct hwtstamp_config hwtstamp_config; 587 585 u32 counter_index;
+3 -1
drivers/net/ethernet/renesas/ravb_ptp.c
··· 116 116 priv->ptp.current_addend = addend; 117 117 118 118 gccr = ravb_read(ndev, GCCR); 119 - if (gccr & GCCR_LTI) 119 + if (gccr & GCCR_LTI) { 120 + spin_unlock_irqrestore(&priv->lock, flags); 120 121 return -EBUSY; 122 + } 121 123 ravb_write(ndev, addend & GTI_TIV, GTI); 122 124 ravb_write(ndev, gccr | GCCR_LTI, GCCR); 123 125
+2 -2
drivers/net/ethernet/sis/sis900.h
··· 56 56 EDB_MASTER_EN = 0x00002000 57 57 }; 58 58 59 - enum sis900_eeprom_access_reigster_bits { 59 + enum sis900_eeprom_access_register_bits { 60 60 MDC = 0x00000040, MDDIR = 0x00000020, MDIO = 0x00000010, /* 7016 specific */ 61 61 EECS = 0x00000008, EECLK = 0x00000004, EEDO = 0x00000002, 62 62 EEDI = 0x00000001 ··· 73 73 RxERR = 0x00000004, RxDESC = 0x00000002, RxOK = 0x00000001 74 74 }; 75 75 76 - enum sis900_interrupt_enable_reigster_bits { 76 + enum sis900_interrupt_enable_register_bits { 77 77 IE = 0x00000001 78 78 }; 79 79
+2 -2
drivers/net/ethernet/stmicro/stmmac/mmc_core.c
··· 73 73 #define MMC_RX_OCTETCOUNT_G 0x00000188 74 74 #define MMC_RX_BROADCASTFRAME_G 0x0000018c 75 75 #define MMC_RX_MULTICASTFRAME_G 0x00000190 76 - #define MMC_RX_CRC_ERRROR 0x00000194 76 + #define MMC_RX_CRC_ERROR 0x00000194 77 77 #define MMC_RX_ALIGN_ERROR 0x00000198 78 78 #define MMC_RX_RUN_ERROR 0x0000019C 79 79 #define MMC_RX_JABBER_ERROR 0x000001A0 ··· 196 196 mmc->mmc_rx_octetcount_g += readl(ioaddr + MMC_RX_OCTETCOUNT_G); 197 197 mmc->mmc_rx_broadcastframe_g += readl(ioaddr + MMC_RX_BROADCASTFRAME_G); 198 198 mmc->mmc_rx_multicastframe_g += readl(ioaddr + MMC_RX_MULTICASTFRAME_G); 199 - mmc->mmc_rx_crc_error += readl(ioaddr + MMC_RX_CRC_ERRROR); 199 + mmc->mmc_rx_crc_error += readl(ioaddr + MMC_RX_CRC_ERROR); 200 200 mmc->mmc_rx_align_error += readl(ioaddr + MMC_RX_ALIGN_ERROR); 201 201 mmc->mmc_rx_run_error += readl(ioaddr + MMC_RX_RUN_ERROR); 202 202 mmc->mmc_rx_jabber_error += readl(ioaddr + MMC_RX_JABBER_ERROR);
+2
drivers/net/ethernet/via/Kconfig
··· 18 18 config VIA_RHINE 19 19 tristate "VIA Rhine support" 20 20 depends on (PCI || OF_IRQ) 21 + depends on HAS_DMA 21 22 select CRC32 22 23 select MII 23 24 ---help--- ··· 43 42 config VIA_VELOCITY 44 43 tristate "VIA Velocity support" 45 44 depends on (PCI || (OF_ADDRESS && OF_IRQ)) 45 + depends on HAS_DMA 46 46 select CRC32 47 47 select CRC_CCITT 48 48 select MII
+7
drivers/net/phy/bcm7xxx.c
··· 246 246 pr_info_once("%s: %s PHY revision: 0x%02x, patch: %d\n", 247 247 dev_name(&phydev->dev), phydev->drv->name, rev, patch); 248 248 249 + /* Dummy read to a register to workaround an issue upon reset where the 250 + * internal inverter may not allow the first MDIO transaction to pass 251 + * the MDIO management controller and make us return 0xffff for such 252 + * reads. 253 + */ 254 + phy_read(phydev, MII_BMSR); 255 + 249 256 switch (rev) { 250 257 case 0xb0: 251 258 ret = bcm7xxx_28nm_b0_afe_config_init(phydev);
+43
drivers/net/phy/mdio-bcm-unimac.c
··· 120 120 return 0; 121 121 } 122 122 123 + /* Workaround for integrated BCM7xxx Gigabit PHYs which have a problem with 124 + * their internal MDIO management controller making them fail to successfully 125 + * be read from or written to for the first transaction. We insert a dummy 126 + * BMSR read here to make sure that phy_get_device() and get_phy_id() can 127 + * correctly read the PHY MII_PHYSID1/2 registers and successfully register a 128 + * PHY device for this peripheral. 129 + * 130 + * Once the PHY driver is registered, we can workaround subsequent reads from 131 + * there (e.g: during system-wide power management). 132 + * 133 + * bus->reset is invoked before mdiobus_scan during mdiobus_register and is 134 + * therefore the right location to stick that workaround. Since we do not want 135 + * to read from non-existing PHYs, we either use bus->phy_mask or do a manual 136 + * Device Tree scan to limit the search area. 137 + */ 138 + static int unimac_mdio_reset(struct mii_bus *bus) 139 + { 140 + struct device_node *np = bus->dev.of_node; 141 + struct device_node *child; 142 + u32 read_mask = 0; 143 + int addr; 144 + 145 + if (!np) { 146 + read_mask = ~bus->phy_mask; 147 + } else { 148 + for_each_available_child_of_node(np, child) { 149 + addr = of_mdio_parse_addr(&bus->dev, child); 150 + if (addr < 0) 151 + continue; 152 + 153 + read_mask |= 1 << addr; 154 + } 155 + } 156 + 157 + for (addr = 0; addr < PHY_MAX_ADDR; addr++) { 158 + if (read_mask & 1 << addr) 159 + mdiobus_read(bus, addr, MII_BMSR); 160 + } 161 + 162 + return 0; 163 + } 164 + 123 165 static int unimac_mdio_probe(struct platform_device *pdev) 124 166 { 125 167 struct unimac_mdio_priv *priv; ··· 197 155 bus->parent = &pdev->dev; 198 156 bus->read = unimac_mdio_read; 199 157 bus->write = unimac_mdio_write; 158 + bus->reset = unimac_mdio_reset; 200 159 snprintf(bus->id, MII_BUS_ID_SIZE, "%s", pdev->name); 201 160 202 161 bus->irq = kcalloc(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL);
+17 -8
drivers/net/phy/phy_device.c
··· 230 230 for (i = 1; 231 231 i < num_ids && c45_ids->devices_in_package == 0; 232 232 i++) { 233 - reg_addr = MII_ADDR_C45 | i << 16 | MDIO_DEVS2; 233 + retry: reg_addr = MII_ADDR_C45 | i << 16 | MDIO_DEVS2; 234 234 phy_reg = mdiobus_read(bus, addr, reg_addr); 235 235 if (phy_reg < 0) 236 236 return -EIO; ··· 242 242 return -EIO; 243 243 c45_ids->devices_in_package |= (phy_reg & 0xffff); 244 244 245 - /* If mostly Fs, there is no device there, 246 - * let's get out of here. 247 - */ 248 245 if ((c45_ids->devices_in_package & 0x1fffffff) == 0x1fffffff) { 249 - *phy_id = 0xffffffff; 250 - return 0; 246 + if (i) { 247 + /* If mostly Fs, there is no device there, 248 + * then let's continue to probe more, as some 249 + * 10G PHYs have zero Devices In package, 250 + * e.g. Cortina CS4315/CS4340 PHY. 251 + */ 252 + i = 0; 253 + goto retry; 254 + } else { 255 + /* no device there, let's get out of here */ 256 + *phy_id = 0xffffffff; 257 + return 0; 258 + } 251 259 } 252 260 } 253 261 ··· 804 796 if (phydev->supported & (SUPPORTED_1000baseT_Half | 805 797 SUPPORTED_1000baseT_Full)) { 806 798 adv |= ethtool_adv_to_mii_ctrl1000_t(advertise); 807 - if (adv != oldadv) 808 - changed = 1; 809 799 } 800 + 801 + if (adv != oldadv) 802 + changed = 1; 810 803 811 804 err = phy_write(phydev, MII_CTRL1000, adv); 812 805 if (err < 0)
+14
drivers/net/phy/vitesse.c
··· 66 66 #define PHY_ID_VSC8244 0x000fc6c0 67 67 #define PHY_ID_VSC8514 0x00070670 68 68 #define PHY_ID_VSC8574 0x000704a0 69 + #define PHY_ID_VSC8641 0x00070431 69 70 #define PHY_ID_VSC8662 0x00070660 70 71 #define PHY_ID_VSC8221 0x000fc550 71 72 #define PHY_ID_VSC8211 0x000fc4b0 ··· 273 272 .config_intr = &vsc82xx_config_intr, 274 273 .driver = { .owner = THIS_MODULE,}, 275 274 }, { 275 + .phy_id = PHY_ID_VSC8641, 276 + .name = "Vitesse VSC8641", 277 + .phy_id_mask = 0x000ffff0, 278 + .features = PHY_GBIT_FEATURES, 279 + .flags = PHY_HAS_INTERRUPT, 280 + .config_init = &vsc824x_config_init, 281 + .config_aneg = &vsc82x4_config_aneg, 282 + .read_status = &genphy_read_status, 283 + .ack_interrupt = &vsc824x_ack_interrupt, 284 + .config_intr = &vsc82xx_config_intr, 285 + .driver = { .owner = THIS_MODULE,}, 286 + }, { 276 287 .phy_id = PHY_ID_VSC8662, 277 288 .name = "Vitesse VSC8662", 278 289 .phy_id_mask = 0x000ffff0, ··· 331 318 { PHY_ID_VSC8244, 0x000fffc0 }, 332 319 { PHY_ID_VSC8514, 0x000ffff0 }, 333 320 { PHY_ID_VSC8574, 0x000ffff0 }, 321 + { PHY_ID_VSC8641, 0x000ffff0 }, 334 322 { PHY_ID_VSC8662, 0x000ffff0 }, 335 323 { PHY_ID_VSC8221, 0x000ffff0 }, 336 324 { PHY_ID_VSC8211, 0x000ffff0 },
+2 -2
drivers/net/vmxnet3/vmxnet3_int.h
··· 69 69 /* 70 70 * Version numbers 71 71 */ 72 - #define VMXNET3_DRIVER_VERSION_STRING "1.3.5.0-k" 72 + #define VMXNET3_DRIVER_VERSION_STRING "1.4.2.0-k" 73 73 74 74 /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ 75 - #define VMXNET3_DRIVER_VERSION_NUM 0x01030500 75 + #define VMXNET3_DRIVER_VERSION_NUM 0x01040200 76 76 77 77 #if defined(CONFIG_PCI_MSI) 78 78 /* RSS only makes sense if MSI-X is supported. */
-7
drivers/net/xen-netfront.c
··· 1244 1244 np = netdev_priv(netdev); 1245 1245 np->xbdev = dev; 1246 1246 1247 - /* No need to use rtnl_lock() before the call below as it 1248 - * happens before register_netdev(). 1249 - */ 1250 - netif_set_real_num_tx_queues(netdev, 0); 1251 1247 np->queues = NULL; 1252 1248 1253 1249 err = -ENOMEM; ··· 1895 1899 xennet_disconnect_backend(info); 1896 1900 kfree(info->queues); 1897 1901 info->queues = NULL; 1898 - rtnl_lock(); 1899 - netif_set_real_num_tx_queues(info->netdev, 0); 1900 - rtnl_unlock(); 1901 1902 out: 1902 1903 return err; 1903 1904 }
+15 -1
include/net/ax25.h
··· 13 13 #include <linux/slab.h> 14 14 #include <linux/atomic.h> 15 15 #include <net/neighbour.h> 16 + #include <net/sock.h> 16 17 17 18 #define AX25_T1CLAMPLO 1 18 19 #define AX25_T1CLAMPHI (30 * HZ) ··· 247 246 atomic_t refcount; 248 247 } ax25_cb; 249 248 250 - #define ax25_sk(__sk) ((ax25_cb *)(__sk)->sk_protinfo) 249 + struct ax25_sock { 250 + struct sock sk; 251 + struct ax25_cb *cb; 252 + }; 253 + 254 + static inline struct ax25_sock *ax25_sk(const struct sock *sk) 255 + { 256 + return (struct ax25_sock *) sk; 257 + } 258 + 259 + static inline struct ax25_cb *sk_to_ax25(const struct sock *sk) 260 + { 261 + return ax25_sk(sk)->cb; 262 + } 251 263 252 264 #define ax25_for_each(__ax25, list) \ 253 265 hlist_for_each_entry(__ax25, list, ax25_node)
-2
include/net/sock.h
··· 277 277 * @sk_incoming_cpu: record cpu processing incoming packets 278 278 * @sk_txhash: computed flow hash for use on transmit 279 279 * @sk_filter: socket filtering instructions 280 - * @sk_protinfo: private area, net family specific, when not using slab 281 280 * @sk_timer: sock cleanup timer 282 281 * @sk_stamp: time stamp of last packet received 283 282 * @sk_tsflags: SO_TIMESTAMPING socket options ··· 415 416 const struct cred *sk_peer_cred; 416 417 long sk_rcvtimeo; 417 418 long sk_sndtimeo; 418 - void *sk_protinfo; 419 419 struct timer_list sk_timer; 420 420 ktime_t sk_stamp; 421 421 u16 sk_tsflags;
+13 -3
include/uapi/linux/in.h
··· 19 19 #define _UAPI_LINUX_IN_H 20 20 21 21 #include <linux/types.h> 22 + #include <linux/libc-compat.h> 22 23 #include <linux/socket.h> 23 24 25 + #if __UAPI_DEF_IN_IPPROTO 24 26 /* Standard well-defined IP protocols. */ 25 27 enum { 26 28 IPPROTO_IP = 0, /* Dummy protocol for TCP */ ··· 77 75 #define IPPROTO_RAW IPPROTO_RAW 78 76 IPPROTO_MAX 79 77 }; 78 + #endif 80 79 81 - 80 + #if __UAPI_DEF_IN_ADDR 82 81 /* Internet address. */ 83 82 struct in_addr { 84 83 __be32 s_addr; 85 84 }; 85 + #endif 86 86 87 87 #define IP_TOS 1 88 88 #define IP_TTL 2 ··· 162 158 163 159 /* Request struct for multicast socket ops */ 164 160 161 + #if __UAPI_DEF_IP_MREQ 165 162 struct ip_mreq { 166 163 struct in_addr imr_multiaddr; /* IP multicast address of group */ 167 164 struct in_addr imr_interface; /* local IP address of interface */ ··· 214 209 #define GROUP_FILTER_SIZE(numsrc) \ 215 210 (sizeof(struct group_filter) - sizeof(struct __kernel_sockaddr_storage) \ 216 211 + (numsrc) * sizeof(struct __kernel_sockaddr_storage)) 212 + #endif 217 213 214 + #if __UAPI_DEF_IN_PKTINFO 218 215 struct in_pktinfo { 219 216 int ipi_ifindex; 220 217 struct in_addr ipi_spec_dst; 221 218 struct in_addr ipi_addr; 222 219 }; 220 + #endif 223 221 224 222 /* Structure describing an Internet (IP) socket address. */ 223 + #if __UAPI_DEF_SOCKADDR_IN 225 224 #define __SOCK_SIZE__ 16 /* sizeof(struct sockaddr) */ 226 225 struct sockaddr_in { 227 226 __kernel_sa_family_t sin_family; /* Address family */ ··· 237 228 sizeof(unsigned short int) - sizeof(struct in_addr)]; 238 229 }; 239 230 #define sin_zero __pad /* for BSD UNIX comp. -FvK */ 231 + #endif 240 232 241 - 233 + #if __UAPI_DEF_IN_CLASS 242 234 /* 243 235 * Definitions of the bits in an Internet address integer. 244 236 * On subnets, host and network parts are found according ··· 290 280 #define INADDR_ALLHOSTS_GROUP 0xe0000001U /* 224.0.0.1 */ 291 281 #define INADDR_ALLRTRS_GROUP 0xe0000002U /* 224.0.0.2 */ 292 282 #define INADDR_MAX_LOCAL_GROUP 0xe00000ffU /* 224.0.0.255 */ 293 - 283 + #endif 294 284 295 285 /* <asm/byteorder.h> contains the htonl type stuff.. */ 296 286 #include <asm/byteorder.h>
+22
include/uapi/linux/libc-compat.h
··· 56 56 57 57 /* GLIBC headers included first so don't define anything 58 58 * that would already be defined. */ 59 + #define __UAPI_DEF_IN_ADDR 0 60 + #define __UAPI_DEF_IN_IPPROTO 0 61 + #define __UAPI_DEF_IN_PKTINFO 0 62 + #define __UAPI_DEF_IP_MREQ 0 63 + #define __UAPI_DEF_SOCKADDR_IN 0 64 + #define __UAPI_DEF_IN_CLASS 0 65 + 59 66 #define __UAPI_DEF_IN6_ADDR 0 60 67 /* The exception is the in6_addr macros which must be defined 61 68 * if the glibc code didn't define them. This guard matches ··· 85 78 /* Linux headers included first, and we must define everything 86 79 * we need. The expectation is that glibc will check the 87 80 * __UAPI_DEF_* defines and adjust appropriately. */ 81 + #define __UAPI_DEF_IN_ADDR 1 82 + #define __UAPI_DEF_IN_IPPROTO 1 83 + #define __UAPI_DEF_IN_PKTINFO 1 84 + #define __UAPI_DEF_IP_MREQ 1 85 + #define __UAPI_DEF_SOCKADDR_IN 1 86 + #define __UAPI_DEF_IN_CLASS 1 87 + 88 88 #define __UAPI_DEF_IN6_ADDR 1 89 89 /* We unconditionally define the in6_addr macros and glibc must 90 90 * coordinate. */ ··· 116 102 * or we are being included in the kernel, then define everything 117 103 * that we need. */ 118 104 #else /* !defined(__GLIBC__) */ 105 + 106 + /* Definitions for in.h */ 107 + #define __UAPI_DEF_IN_ADDR 1 108 + #define __UAPI_DEF_IN_IPPROTO 1 109 + #define __UAPI_DEF_IN_PKTINFO 1 110 + #define __UAPI_DEF_IP_MREQ 1 111 + #define __UAPI_DEF_SOCKADDR_IN 1 112 + #define __UAPI_DEF_IN_CLASS 1 119 113 120 114 /* Definitions for in6.h */ 121 115 #define __UAPI_DEF_IN6_ADDR 1
+15 -15
net/ax25/af_ax25.c
··· 57 57 58 58 static void ax25_free_sock(struct sock *sk) 59 59 { 60 - ax25_cb_put(ax25_sk(sk)); 60 + ax25_cb_put(sk_to_ax25(sk)); 61 61 } 62 62 63 63 /* ··· 306 306 while ((skb = skb_dequeue(&ax25->sk->sk_receive_queue)) != NULL) { 307 307 if (skb->sk != ax25->sk) { 308 308 /* A pending connection */ 309 - ax25_cb *sax25 = ax25_sk(skb->sk); 309 + ax25_cb *sax25 = sk_to_ax25(skb->sk); 310 310 311 311 /* Queue the unaccepted socket for death */ 312 312 sock_orphan(skb->sk); ··· 551 551 return -EFAULT; 552 552 553 553 lock_sock(sk); 554 - ax25 = ax25_sk(sk); 554 + ax25 = sk_to_ax25(sk); 555 555 556 556 switch (optname) { 557 557 case AX25_WINDOW: ··· 697 697 length = min_t(unsigned int, maxlen, sizeof(int)); 698 698 699 699 lock_sock(sk); 700 - ax25 = ax25_sk(sk); 700 + ax25 = sk_to_ax25(sk); 701 701 702 702 switch (optname) { 703 703 case AX25_WINDOW: ··· 796 796 static struct proto ax25_proto = { 797 797 .name = "AX25", 798 798 .owner = THIS_MODULE, 799 - .obj_size = sizeof(struct sock), 799 + .obj_size = sizeof(struct ax25_sock), 800 800 }; 801 801 802 802 static int ax25_create(struct net *net, struct socket *sock, int protocol, ··· 858 858 if (sk == NULL) 859 859 return -ENOMEM; 860 860 861 - ax25 = sk->sk_protinfo = ax25_create_cb(); 861 + ax25 = ax25_sk(sk)->cb = ax25_create_cb(); 862 862 if (!ax25) { 863 863 sk_free(sk); 864 864 return -ENOMEM; ··· 910 910 sk->sk_state = TCP_ESTABLISHED; 911 911 sock_copy_flags(sk, osk); 912 912 913 - oax25 = ax25_sk(osk); 913 + oax25 = sk_to_ax25(osk); 914 914 915 915 ax25->modulus = oax25->modulus; 916 916 ax25->backoff = oax25->backoff; ··· 938 938 } 939 939 } 940 940 941 - sk->sk_protinfo = ax25; 941 + ax25_sk(sk)->cb = ax25; 942 942 sk->sk_destruct = ax25_free_sock; 943 943 ax25->sk = sk; 944 944 ··· 956 956 sock_hold(sk); 957 957 sock_orphan(sk); 958 958 lock_sock(sk); 959 - ax25 = ax25_sk(sk); 959 + ax25 = sk_to_ax25(sk); 960 960 961 961 if (sk->sk_type == SOCK_SEQPACKET) { 962 962 switch (ax25->state) { ··· 1066 1066 1067 1067 lock_sock(sk); 1068 1068 1069 - ax25 = ax25_sk(sk); 1069 + ax25 = sk_to_ax25(sk); 1070 1070 if (!sock_flag(sk, SOCK_ZAPPED)) { 1071 1071 err = -EINVAL; 1072 1072 goto out; ··· 1113 1113 struct sockaddr *uaddr, int addr_len, int flags) 1114 1114 { 1115 1115 struct sock *sk = sock->sk; 1116 - ax25_cb *ax25 = ax25_sk(sk), *ax25t; 1116 + ax25_cb *ax25 = sk_to_ax25(sk), *ax25t; 1117 1117 struct full_sockaddr_ax25 *fsa = (struct full_sockaddr_ax25 *)uaddr; 1118 1118 ax25_digi *digi = NULL; 1119 1119 int ct = 0, err = 0; ··· 1394 1394 1395 1395 memset(fsa, 0, sizeof(*fsa)); 1396 1396 lock_sock(sk); 1397 - ax25 = ax25_sk(sk); 1397 + ax25 = sk_to_ax25(sk); 1398 1398 1399 1399 if (peer != 0) { 1400 1400 if (sk->sk_state != TCP_ESTABLISHED) { ··· 1446 1446 return -EINVAL; 1447 1447 1448 1448 lock_sock(sk); 1449 - ax25 = ax25_sk(sk); 1449 + ax25 = sk_to_ax25(sk); 1450 1450 1451 1451 if (sock_flag(sk, SOCK_ZAPPED)) { 1452 1452 err = -EADDRNOTAVAIL; ··· 1621 1621 if (skb == NULL) 1622 1622 goto out; 1623 1623 1624 - if (!ax25_sk(sk)->pidincl) 1624 + if (!sk_to_ax25(sk)->pidincl) 1625 1625 skb_pull(skb, 1); /* Remove PID */ 1626 1626 1627 1627 skb_reset_transport_header(skb); ··· 1762 1762 1763 1763 case SIOCAX25GETINFO: 1764 1764 case SIOCAX25GETINFOOLD: { 1765 - ax25_cb *ax25 = ax25_sk(sk); 1765 + ax25_cb *ax25 = sk_to_ax25(sk); 1766 1766 struct ax25_info_struct ax25_info; 1767 1767 1768 1768 ax25_info.t1 = ax25->t1 / HZ;
+1 -1
net/ax25/ax25_in.c
··· 353 353 return 0; 354 354 } 355 355 356 - ax25 = ax25_sk(make); 356 + ax25 = sk_to_ax25(make); 357 357 skb_set_owner_r(skb, make); 358 358 skb_queue_head(&sk->sk_receive_queue, skb); 359 359
+1 -1
net/core/flow_dissector.c
··· 129 129 struct flow_dissector_key_ports *key_ports; 130 130 struct flow_dissector_key_tags *key_tags; 131 131 struct flow_dissector_key_keyid *key_keyid; 132 - u8 ip_proto; 132 + u8 ip_proto = 0; 133 133 134 134 if (!data) { 135 135 data = skb->data;
+1 -2
net/core/sock.c
··· 1454 1454 1455 1455 static void __sk_free(struct sock *sk) 1456 1456 { 1457 - if (unlikely(sock_diag_has_destroy_listeners(sk))) 1457 + if (unlikely(sock_diag_has_destroy_listeners(sk) && sk->sk_net_refcnt)) 1458 1458 sock_diag_broadcast_destroy(sk); 1459 1459 else 1460 1460 sk_destruct(sk); ··· 2269 2269 2270 2270 static void sock_def_destruct(struct sock *sk) 2271 2271 { 2272 - kfree(sk->sk_protinfo); 2273 2272 } 2274 2273 2275 2274 void sk_send_sigurg(struct sock *sk)
+1 -1
net/dsa/slave.c
··· 112 112 113 113 clear_promisc: 114 114 if (dev->flags & IFF_PROMISC) 115 - dev_set_promiscuity(master, 0); 115 + dev_set_promiscuity(master, -1); 116 116 clear_allmulti: 117 117 if (dev->flags & IFF_ALLMULTI) 118 118 dev_set_allmulti(master, -1);
+2 -2
net/ipv4/fib_semantics.c
··· 1045 1045 nla_put_u32(skb, RTA_OIF, fi->fib_nh->nh_oif)) 1046 1046 goto nla_put_failure; 1047 1047 if (fi->fib_nh->nh_flags & RTNH_F_LINKDOWN) { 1048 - in_dev = __in_dev_get_rcu(fi->fib_nh->nh_dev); 1048 + in_dev = __in_dev_get_rtnl(fi->fib_nh->nh_dev); 1049 1049 if (in_dev && 1050 1050 IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev)) 1051 1051 rtm->rtm_flags |= RTNH_F_DEAD; ··· 1074 1074 1075 1075 rtnh->rtnh_flags = nh->nh_flags & 0xFF; 1076 1076 if (nh->nh_flags & RTNH_F_LINKDOWN) { 1077 - in_dev = __in_dev_get_rcu(nh->nh_dev); 1077 + in_dev = __in_dev_get_rtnl(nh->nh_dev); 1078 1078 if (in_dev && 1079 1079 IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev)) 1080 1080 rtnh->rtnh_flags |= RTNH_F_DEAD;
+2 -2
net/sched/cls_flower.c
··· 216 216 [TCA_FLOWER_KEY_IPV6_DST_MASK] = { .len = sizeof(struct in6_addr) }, 217 217 [TCA_FLOWER_KEY_TCP_SRC] = { .type = NLA_U16 }, 218 218 [TCA_FLOWER_KEY_TCP_DST] = { .type = NLA_U16 }, 219 - [TCA_FLOWER_KEY_TCP_SRC] = { .type = NLA_U16 }, 220 - [TCA_FLOWER_KEY_TCP_DST] = { .type = NLA_U16 }, 219 + [TCA_FLOWER_KEY_UDP_SRC] = { .type = NLA_U16 }, 220 + [TCA_FLOWER_KEY_UDP_DST] = { .type = NLA_U16 }, 221 221 }; 222 222 223 223 static void fl_set_key_val(struct nlattr **tb,
+3 -1
net/sctp/output.c
··· 599 599 return err; 600 600 no_route: 601 601 kfree_skb(nskb); 602 - IP_INC_STATS(sock_net(asoc->base.sk), IPSTATS_MIB_OUTNOROUTES); 602 + 603 + if (asoc) 604 + IP_INC_STATS(sock_net(asoc->base.sk), IPSTATS_MIB_OUTNOROUTES); 603 605 604 606 /* FIXME: Returning the 'err' will effect all the associations 605 607 * associated with a socket, although only one of the paths of the
-6
net/sctp/socket.c
··· 2121 2121 if (sp->subscribe.sctp_data_io_event) 2122 2122 sctp_ulpevent_read_sndrcvinfo(event, msg); 2123 2123 2124 - #if 0 2125 - /* FIXME: we should be calling IP/IPv6 layers. */ 2126 - if (sk->sk_protinfo.af_inet.cmsg_flags) 2127 - ip_cmsg_recv(msg, skb); 2128 - #endif 2129 - 2130 2124 err = copied; 2131 2125 2132 2126 /* If skb's length exceeds the user's buffer, update the skb and
+5
net/tipc/bcast.c
··· 108 108 109 109 tipc_bclink_lock(net); 110 110 tipc_nmap_remove(&tn->bclink->bcast_nodes, addr); 111 + 112 + /* Last node? => reset backlog queue */ 113 + if (!tn->bclink->bcast_nodes.count) 114 + tipc_link_purge_backlog(&tn->bclink->link); 115 + 111 116 tipc_bclink_unlock(net); 112 117 } 113 118
+1 -1
net/tipc/link.c
··· 404 404 l_ptr->reasm_buf = NULL; 405 405 } 406 406 407 - static void tipc_link_purge_backlog(struct tipc_link *l) 407 + void tipc_link_purge_backlog(struct tipc_link *l) 408 408 { 409 409 __skb_queue_purge(&l->backlogq); 410 410 l->backlog[TIPC_LOW_IMPORTANCE].len = 0;
+1
net/tipc/link.h
··· 218 218 int tipc_link_is_up(struct tipc_link *l_ptr); 219 219 int tipc_link_is_active(struct tipc_link *l_ptr); 220 220 void tipc_link_purge_queues(struct tipc_link *l_ptr); 221 + void tipc_link_purge_backlog(struct tipc_link *l); 221 222 void tipc_link_reset_all(struct tipc_node *node); 222 223 void tipc_link_reset(struct tipc_link *l_ptr); 223 224 int tipc_link_xmit_skb(struct net *net, struct sk_buff *skb, u32 dest,