Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking updates from David Miller:

1) Missing netlink attribute validation in nft_lookup, from Patrick
McHardy.

2) Restrict ipv6 partial checksum handling to UDP, since that's the
only case it works for. From Vlad Yasevich.

3) Clear out silly device table sentinal macros used by SSB and BCMA
drivers. From Joe Perches.

4) Make sure the remote checksum code never creates a situation where
the remote checksum is applied yet the tunneling metadata describing
the remote checksum transformation is still present. Otherwise an
external entity might see this and apply the checksum again. From
Tom Herbert.

5) Use msecs_to_jiffies() where applicable, from Nicholas Mc Guire.

6) Don't explicitly initialize timer struct fields, use setup_timer()
and mod_timer() instead. From Vaishali Thakkar.

7) Don't invoke tg3_halt() without the tp->lock held, from Jun'ichi
Nomura.

8) Missing __percpu annotation in ipvlan driver, from Eric Dumazet.

9) Don't potentially perform skb_get() on shared skbs, also from Eric
Dumazet.

10) Fix COW'ing of metrics for non-DST_HOST routes in ipv6, from Martin
KaFai Lau.

11) Fix merge resolution error between the iov_iter changes in vhost and
some bug fixes that occurred at the same time. From Jason Wang.

12) If rtnl_configure_link() fails we have to perform a call to
->dellink() before unregistering the device. From WANG Cong.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (39 commits)
net: dsa: Set valid phy interface type
rtnetlink: call ->dellink on failure when ->newlink exists
com20020-pci: add support for eae single card
vhost_net: fix wrong iter offset when setting number of buffers
net: spelling fixes
net/core: Fix warning while make xmldocs caused by dev.c
net: phy: micrel: disable NAND-tree for KSZ8021, KSZ8031, KSZ8051, KSZ8081
ipv6: fix ipv6_cow_metrics for non DST_HOST case
openvswitch: Fix key serialization.
r8152: restore hw settings
hso: fix rx parsing logic when skb allocation fails
tcp: make sure skb is not shared before using skb_get()
bridge: netfilter: Move sysctl-specific error code inside #ifdef
ipv6: fix possible deadlock in ip6_fl_purge / ip6_fl_gc
ipvlan: add a missing __percpu pcpu_stats
tg3: Hold tp->lock before calling tg3_halt() from tg3_init_one()
bgmac: fix device initialization on Northstar SoCs (condition typo)
qlcnic: Delete existing multicast MAC list before adding new
net/mlx5_core: Fix configuration of log_uar_page_sz
sunvnet: don't change gso data on clones
...

+483 -143
+18 -3
drivers/net/arcnet/com20020-pci.c
··· 214 214 .flags = ARC_CAN_10MBIT, 215 215 }; 216 216 217 - static struct com20020_pci_card_info card_info_eae = { 218 - .name = "EAE PLX-PCI", 217 + static struct com20020_pci_card_info card_info_eae_arc1 = { 218 + .name = "EAE PLX-PCI ARC1", 219 + .devcount = 1, 220 + .chan_map_tbl = { 221 + { 2, 0x00, 0x08 }, 222 + }, 223 + .flags = ARC_CAN_10MBIT, 224 + }; 225 + 226 + static struct com20020_pci_card_info card_info_eae_ma1 = { 227 + .name = "EAE PLX-PCI MA1", 219 228 .devcount = 2, 220 229 .chan_map_tbl = { 221 230 { 2, 0x00, 0x08 }, ··· 368 359 }, 369 360 { 370 361 0x10B5, 0x9050, 362 + 0x10B5, 0x3263, 363 + 0, 0, 364 + (kernel_ulong_t)&card_info_eae_arc1 365 + }, 366 + { 367 + 0x10B5, 0x9050, 371 368 0x10B5, 0x3292, 372 369 0, 0, 373 - (kernel_ulong_t)&card_info_eae 370 + (kernel_ulong_t)&card_info_eae_ma1 374 371 }, 375 372 { 376 373 0x14BA, 0x6000,
+2 -5
drivers/net/ethernet/3com/3c589_cs.c
··· 518 518 netif_start_queue(dev); 519 519 520 520 tc589_reset(dev); 521 - init_timer(&lp->media); 522 - lp->media.function = media_check; 523 - lp->media.data = (unsigned long) dev; 524 - lp->media.expires = jiffies + HZ; 525 - add_timer(&lp->media); 521 + setup_timer(&lp->media, media_check, (unsigned long)dev); 522 + mod_timer(&lp->media, jiffies + HZ); 526 523 527 524 dev_dbg(&link->dev, "%s: opened, status %4.4x.\n", 528 525 dev->name, inw(dev->base_addr + EL3_STATUS));
+4 -2
drivers/net/ethernet/agere/et131x.c
··· 3127 3127 } 3128 3128 3129 3129 /* This is a periodic timer, so reschedule */ 3130 - mod_timer(&adapter->error_timer, jiffies + TX_ERROR_PERIOD * HZ / 1000); 3130 + mod_timer(&adapter->error_timer, jiffies + 3131 + msecs_to_jiffies(TX_ERROR_PERIOD)); 3131 3132 } 3132 3133 3133 3134 static void et131x_adapter_memory_free(struct et131x_adapter *adapter) ··· 3648 3647 3649 3648 /* Start the timer to track NIC errors */ 3650 3649 init_timer(&adapter->error_timer); 3651 - adapter->error_timer.expires = jiffies + TX_ERROR_PERIOD * HZ / 1000; 3650 + adapter->error_timer.expires = jiffies + 3651 + msecs_to_jiffies(TX_ERROR_PERIOD); 3652 3652 adapter->error_timer.function = et131x_error_timer_handler; 3653 3653 adapter->error_timer.data = (unsigned long)adapter; 3654 3654 add_timer(&adapter->error_timer);
+2
drivers/net/ethernet/apm/xgene/xgene_enet_main.c
··· 1030 1030 MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match); 1031 1031 #endif 1032 1032 1033 + #ifdef CONFIG_OF 1033 1034 static struct of_device_id xgene_enet_of_match[] = { 1034 1035 {.compatible = "apm,xgene-enet",}, 1035 1036 {}, 1036 1037 }; 1037 1038 1038 1039 MODULE_DEVICE_TABLE(of, xgene_enet_of_match); 1040 + #endif 1039 1041 1040 1042 static struct platform_driver xgene_enet_driver = { 1041 1043 .driver = {
+1 -1
drivers/net/ethernet/broadcom/b44.c
··· 121 121 122 122 static const struct ssb_device_id b44_ssb_tbl[] = { 123 123 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_ETHERNET, SSB_ANY_REV), 124 - SSB_DEVTABLE_END 124 + {}, 125 125 }; 126 126 MODULE_DEVICE_TABLE(ssb, b44_ssb_tbl); 127 127
+4 -3
drivers/net/ethernet/broadcom/bgmac.c
··· 21 21 static const struct bcma_device_id bgmac_bcma_tbl[] = { 22 22 BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_4706_MAC_GBIT, BCMA_ANY_REV, BCMA_ANY_CLASS), 23 23 BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_MAC_GBIT, BCMA_ANY_REV, BCMA_ANY_CLASS), 24 - BCMA_CORETABLE_END 24 + {}, 25 25 }; 26 26 MODULE_DEVICE_TABLE(bcma, bgmac_bcma_tbl); 27 27 ··· 1412 1412 /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipattach */ 1413 1413 static int bgmac_probe(struct bcma_device *core) 1414 1414 { 1415 + struct bcma_chipinfo *ci = &core->bus->chipinfo; 1415 1416 struct net_device *net_dev; 1416 1417 struct bgmac *bgmac; 1417 1418 struct ssb_sprom *sprom = &core->bus->sprom; ··· 1475 1474 bgmac_chip_reset(bgmac); 1476 1475 1477 1476 /* For Northstar, we have to take all GMAC core out of reset */ 1478 - if (core->id.id == BCMA_CHIP_ID_BCM4707 || 1479 - core->id.id == BCMA_CHIP_ID_BCM53018) { 1477 + if (ci->id == BCMA_CHIP_ID_BCM4707 || 1478 + ci->id == BCMA_CHIP_ID_BCM53018) { 1480 1479 struct bcma_device *ns_core; 1481 1480 int ns_gmac; 1482 1481
+2
drivers/net/ethernet/broadcom/tg3.c
··· 17855 17855 */ 17856 17856 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) || 17857 17857 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) { 17858 + tg3_full_lock(tp, 0); 17858 17859 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE); 17859 17860 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1); 17861 + tg3_full_unlock(tp); 17860 17862 } 17861 17863 17862 17864 err = tg3_test_dma(tp);
+1
drivers/net/ethernet/mellanox/mlx5/core/main.c
··· 291 291 MLX5_SET(cmd_hca_cap, to, log_max_ra_req_dc, from->gen.log_max_ra_req_dc); 292 292 MLX5_SET(cmd_hca_cap, to, log_max_ra_res_dc, from->gen.log_max_ra_res_dc); 293 293 MLX5_SET(cmd_hca_cap, to, pkey_table_size, to_fw_pkey_sz(from->gen.pkey_table_size)); 294 + MLX5_SET(cmd_hca_cap, to, log_uar_page_sz, PAGE_SHIFT - 12); 294 295 v64 = from->gen.flags & MLX5_CAP_BITS_RW_MASK; 295 296 *flags_off = cpu_to_be64(v64); 296 297 }
+10 -1
drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
··· 848 848 #define QLCNIC_MAC_VLAN_ADD 3 849 849 #define QLCNIC_MAC_VLAN_DEL 4 850 850 851 + enum qlcnic_mac_type { 852 + QLCNIC_UNICAST_MAC, 853 + QLCNIC_MULTICAST_MAC, 854 + QLCNIC_BROADCAST_MAC, 855 + }; 856 + 851 857 struct qlcnic_mac_vlan_list { 852 858 struct list_head list; 853 859 uint8_t mac_addr[ETH_ALEN+2]; 854 860 u16 vlan_id; 861 + enum qlcnic_mac_type mac_type; 855 862 }; 856 863 857 864 /* MAC Learn */ ··· 1622 1615 void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, 1623 1616 struct qlcnic_host_rds_ring *rds_ring, u8 ring_id); 1624 1617 void qlcnic_set_multi(struct net_device *netdev); 1625 - int qlcnic_nic_add_mac(struct qlcnic_adapter *, const u8 *, u16); 1618 + void qlcnic_flush_mcast_mac(struct qlcnic_adapter *); 1619 + int qlcnic_nic_add_mac(struct qlcnic_adapter *, const u8 *, u16, 1620 + enum qlcnic_mac_type); 1626 1621 int qlcnic_nic_del_mac(struct qlcnic_adapter *, const u8 *); 1627 1622 void qlcnic_82xx_free_mac_list(struct qlcnic_adapter *adapter); 1628 1623 int qlcnic_82xx_read_phys_port_id(struct qlcnic_adapter *);
+29 -5
drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
··· 487 487 return err; 488 488 } 489 489 490 - int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, const u8 *addr, u16 vlan) 490 + int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, const u8 *addr, u16 vlan, 491 + enum qlcnic_mac_type mac_type) 491 492 { 492 493 struct qlcnic_mac_vlan_list *cur; 493 494 struct list_head *head; ··· 514 513 } 515 514 516 515 cur->vlan_id = vlan; 516 + cur->mac_type = mac_type; 517 + 517 518 list_add_tail(&cur->list, &adapter->mac_list); 518 519 return 0; 520 + } 521 + 522 + void qlcnic_flush_mcast_mac(struct qlcnic_adapter *adapter) 523 + { 524 + struct qlcnic_mac_vlan_list *cur; 525 + struct list_head *head, *tmp; 526 + 527 + list_for_each_safe(head, tmp, &adapter->mac_list) { 528 + cur = list_entry(head, struct qlcnic_mac_vlan_list, list); 529 + if (cur->mac_type != QLCNIC_MULTICAST_MAC) 530 + continue; 531 + 532 + qlcnic_sre_macaddr_change(adapter, cur->mac_addr, 533 + cur->vlan_id, QLCNIC_MAC_DEL); 534 + list_del(&cur->list); 535 + kfree(cur); 536 + } 519 537 } 520 538 521 539 static void __qlcnic_set_multi(struct net_device *netdev, u16 vlan) ··· 550 530 if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) 551 531 return; 552 532 553 - qlcnic_nic_add_mac(adapter, adapter->mac_addr, vlan); 554 - qlcnic_nic_add_mac(adapter, bcast_addr, vlan); 533 + qlcnic_nic_add_mac(adapter, adapter->mac_addr, vlan, 534 + QLCNIC_UNICAST_MAC); 535 + qlcnic_nic_add_mac(adapter, bcast_addr, vlan, QLCNIC_BROADCAST_MAC); 555 536 556 537 if (netdev->flags & IFF_PROMISC) { 557 538 if (!(adapter->flags & QLCNIC_PROMISC_DISABLED)) ··· 561 540 (netdev_mc_count(netdev) > ahw->max_mc_count)) { 562 541 mode = VPORT_MISS_MODE_ACCEPT_MULTI; 563 542 } else if (!netdev_mc_empty(netdev)) { 543 + qlcnic_flush_mcast_mac(adapter); 564 544 netdev_for_each_mc_addr(ha, netdev) 565 - qlcnic_nic_add_mac(adapter, ha->addr, vlan); 545 + qlcnic_nic_add_mac(adapter, ha->addr, vlan, 546 + QLCNIC_MULTICAST_MAC); 566 547 } 567 548 568 549 /* configure unicast MAC address, if there is not sufficient space ··· 574 551 mode = VPORT_MISS_MODE_ACCEPT_ALL; 575 552 } else if (!netdev_uc_empty(netdev)) { 576 553 netdev_for_each_uc_addr(ha, netdev) 577 - qlcnic_nic_add_mac(adapter, ha->addr, vlan); 554 + qlcnic_nic_add_mac(adapter, ha->addr, vlan, 555 + QLCNIC_UNICAST_MAC); 578 556 } 579 557 580 558 if (mode == VPORT_MISS_MODE_ACCEPT_ALL &&
+12 -7
drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
··· 1489 1489 return ret; 1490 1490 } 1491 1491 1492 - static void qlcnic_vf_add_mc_list(struct net_device *netdev, const u8 *mac) 1492 + static void qlcnic_vf_add_mc_list(struct net_device *netdev, const u8 *mac, 1493 + enum qlcnic_mac_type mac_type) 1493 1494 { 1494 1495 struct qlcnic_adapter *adapter = netdev_priv(netdev); 1495 1496 struct qlcnic_sriov *sriov = adapter->ahw->sriov; ··· 1501 1500 vf = &adapter->ahw->sriov->vf_info[0]; 1502 1501 1503 1502 if (!qlcnic_sriov_check_any_vlan(vf)) { 1504 - qlcnic_nic_add_mac(adapter, mac, 0); 1503 + qlcnic_nic_add_mac(adapter, mac, 0, mac_type); 1505 1504 } else { 1506 1505 spin_lock(&vf->vlan_list_lock); 1507 1506 for (i = 0; i < sriov->num_allowed_vlans; i++) { 1508 1507 vlan_id = vf->sriov_vlans[i]; 1509 1508 if (vlan_id) 1510 - qlcnic_nic_add_mac(adapter, mac, vlan_id); 1509 + qlcnic_nic_add_mac(adapter, mac, vlan_id, 1510 + mac_type); 1511 1511 } 1512 1512 spin_unlock(&vf->vlan_list_lock); 1513 1513 if (qlcnic_84xx_check(adapter)) 1514 - qlcnic_nic_add_mac(adapter, mac, 0); 1514 + qlcnic_nic_add_mac(adapter, mac, 0, mac_type); 1515 1515 } 1516 1516 } 1517 1517 ··· 1551 1549 (netdev_mc_count(netdev) > ahw->max_mc_count)) { 1552 1550 mode = VPORT_MISS_MODE_ACCEPT_MULTI; 1553 1551 } else { 1554 - qlcnic_vf_add_mc_list(netdev, bcast_addr); 1552 + qlcnic_vf_add_mc_list(netdev, bcast_addr, QLCNIC_BROADCAST_MAC); 1555 1553 if (!netdev_mc_empty(netdev)) { 1554 + qlcnic_flush_mcast_mac(adapter); 1556 1555 netdev_for_each_mc_addr(ha, netdev) 1557 - qlcnic_vf_add_mc_list(netdev, ha->addr); 1556 + qlcnic_vf_add_mc_list(netdev, ha->addr, 1557 + QLCNIC_MULTICAST_MAC); 1558 1558 } 1559 1559 } 1560 1560 ··· 1567 1563 mode = VPORT_MISS_MODE_ACCEPT_ALL; 1568 1564 } else if (!netdev_uc_empty(netdev)) { 1569 1565 netdev_for_each_uc_addr(ha, netdev) 1570 - qlcnic_vf_add_mc_list(netdev, ha->addr); 1566 + qlcnic_vf_add_mc_list(netdev, ha->addr, 1567 + QLCNIC_UNICAST_MAC); 1571 1568 } 1572 1569 1573 1570 if (adapter->pdev->is_virtfn) {
+10 -13
drivers/net/ethernet/sun/sunvnet.c
··· 1192 1192 skb_pull(skb, maclen); 1193 1193 1194 1194 if (port->tso && gso_size < datalen) { 1195 + if (skb_unclone(skb, GFP_ATOMIC)) 1196 + goto out_dropped; 1197 + 1195 1198 /* segment to TSO size */ 1196 1199 skb_shinfo(skb)->gso_size = datalen; 1197 1200 skb_shinfo(skb)->gso_segs = gso_segs; 1198 - 1199 - segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO); 1200 - 1201 - /* restore gso_size & gso_segs */ 1202 - skb_shinfo(skb)->gso_size = gso_size; 1203 - skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len - hlen, 1204 - gso_size); 1205 - } else 1206 - segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO); 1207 - if (IS_ERR(segs)) { 1208 - dev->stats.tx_dropped++; 1209 - dev_kfree_skb_any(skb); 1210 - return NETDEV_TX_OK; 1211 1201 } 1202 + segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO); 1203 + if (IS_ERR(segs)) 1204 + goto out_dropped; 1212 1205 1213 1206 skb_push(skb, maclen); 1214 1207 skb_reset_mac_header(skb); ··· 1239 1246 if (!(status & NETDEV_TX_MASK)) 1240 1247 dev_kfree_skb_any(skb); 1241 1248 return status; 1249 + out_dropped: 1250 + dev->stats.tx_dropped++; 1251 + dev_kfree_skb_any(skb); 1252 + return NETDEV_TX_OK; 1242 1253 } 1243 1254 1244 1255 static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
+1 -1
drivers/net/ipvlan/ipvlan.h
··· 67 67 struct list_head addrs; 68 68 int ipv4cnt; 69 69 int ipv6cnt; 70 - struct ipvl_pcpu_stats *pcpu_stats; 70 + struct ipvl_pcpu_stats __percpu *pcpu_stats; 71 71 DECLARE_BITMAP(mac_filters, IPVLAN_MAC_FILTER_SIZE); 72 72 netdev_features_t sfeatures; 73 73 u32 msg_enable;
+28
drivers/net/phy/micrel.c
··· 32 32 /* Operation Mode Strap Override */ 33 33 #define MII_KSZPHY_OMSO 0x16 34 34 #define KSZPHY_OMSO_B_CAST_OFF BIT(9) 35 + #define KSZPHY_OMSO_NAND_TREE_ON BIT(5) 35 36 #define KSZPHY_OMSO_RMII_OVERRIDE BIT(1) 36 37 #define KSZPHY_OMSO_MII_OVERRIDE BIT(0) 37 38 ··· 77 76 u32 led_mode_reg; 78 77 u16 interrupt_level_mask; 79 78 bool has_broadcast_disable; 79 + bool has_nand_tree_disable; 80 80 bool has_rmii_ref_clk_sel; 81 81 }; 82 82 ··· 91 89 static const struct kszphy_type ksz8021_type = { 92 90 .led_mode_reg = MII_KSZPHY_CTRL_2, 93 91 .has_broadcast_disable = true, 92 + .has_nand_tree_disable = true, 94 93 .has_rmii_ref_clk_sel = true, 95 94 }; 96 95 ··· 101 98 102 99 static const struct kszphy_type ksz8051_type = { 103 100 .led_mode_reg = MII_KSZPHY_CTRL_2, 101 + .has_nand_tree_disable = true, 104 102 }; 105 103 106 104 static const struct kszphy_type ksz8081_type = { 107 105 .led_mode_reg = MII_KSZPHY_CTRL_2, 108 106 .has_broadcast_disable = true, 107 + .has_nand_tree_disable = true, 109 108 .has_rmii_ref_clk_sel = true, 110 109 }; 111 110 ··· 236 231 return ret; 237 232 } 238 233 234 + static int kszphy_nand_tree_disable(struct phy_device *phydev) 235 + { 236 + int ret; 237 + 238 + ret = phy_read(phydev, MII_KSZPHY_OMSO); 239 + if (ret < 0) 240 + goto out; 241 + 242 + if (!(ret & KSZPHY_OMSO_NAND_TREE_ON)) 243 + return 0; 244 + 245 + ret = phy_write(phydev, MII_KSZPHY_OMSO, 246 + ret & ~KSZPHY_OMSO_NAND_TREE_ON); 247 + out: 248 + if (ret) 249 + dev_err(&phydev->dev, "failed to disable NAND tree mode\n"); 250 + 251 + return ret; 252 + } 253 + 239 254 static int kszphy_config_init(struct phy_device *phydev) 240 255 { 241 256 struct kszphy_priv *priv = phydev->priv; ··· 269 244 270 245 if (type->has_broadcast_disable) 271 246 kszphy_broadcast_disable(phydev); 247 + 248 + if (type->has_nand_tree_disable) 249 + kszphy_nand_tree_disable(phydev); 272 250 273 251 if (priv->rmii_ref_clk_sel) { 274 252 ret = kszphy_rmii_clk_sel(phydev, priv->rmii_ref_clk_sel_val);
+1 -1
drivers/net/usb/hso.c
··· 914 914 /* We got no receive buffer. */ 915 915 D1("could not allocate memory"); 916 916 odev->rx_parse_state = WAIT_SYNC; 917 - return; 917 + continue; 918 918 } 919 919 920 920 /* Copy what we got so far. make room for iphdr
+57 -2
drivers/net/usb/r8152.c
··· 40 40 #define PLA_RXFIFO_CTRL0 0xc0a0 41 41 #define PLA_RXFIFO_CTRL1 0xc0a4 42 42 #define PLA_RXFIFO_CTRL2 0xc0a8 43 + #define PLA_DMY_REG0 0xc0b0 43 44 #define PLA_FMC 0xc0b4 44 45 #define PLA_CFG_WOL 0xc0b6 45 46 #define PLA_TEREDO_CFG 0xc0bc ··· 91 90 #define PLA_BP_7 0xfc36 92 91 #define PLA_BP_EN 0xfc38 93 92 93 + #define USB_USB2PHY 0xb41e 94 + #define USB_SSPHYLINK2 0xb428 94 95 #define USB_U2P3_CTRL 0xb460 96 + #define USB_CSR_DUMMY1 0xb464 97 + #define USB_CSR_DUMMY2 0xb466 95 98 #define USB_DEV_STAT 0xb808 99 + #define USB_CONNECT_TIMER 0xcbf8 100 + #define USB_BURST_SIZE 0xcfc0 96 101 #define USB_USB_CTRL 0xd406 97 102 #define USB_PHY_CTRL 0xd408 98 103 #define USB_TX_AGG 0xd40a ··· 176 169 /* PLA_TXFIFO_CTRL */ 177 170 #define TXFIFO_THR_NORMAL 0x00400008 178 171 #define TXFIFO_THR_NORMAL2 0x01000008 172 + 173 + /* PLA_DMY_REG0 */ 174 + #define ECM_ALDPS 0x0002 179 175 180 176 /* PLA_FMC */ 181 177 #define FMC_FCR_MCU_EN 0x0001 ··· 299 289 /* PLA_BOOT_CTRL */ 300 290 #define AUTOLOAD_DONE 0x0002 301 291 292 + /* USB_USB2PHY */ 293 + #define USB2PHY_SUSPEND 0x0001 294 + #define USB2PHY_L1 0x0002 295 + 296 + /* USB_SSPHYLINK2 */ 297 + #define pwd_dn_scale_mask 0x3ffe 298 + #define pwd_dn_scale(x) ((x) << 1) 299 + 300 + /* USB_CSR_DUMMY1 */ 301 + #define DYNAMIC_BURST 0x0001 302 + 303 + /* USB_CSR_DUMMY2 */ 304 + #define EP4_FULL_FC 0x0001 305 + 302 306 /* USB_DEV_STAT */ 303 307 #define STAT_SPEED_MASK 0x0006 304 308 #define STAT_SPEED_HIGH 0x0000 ··· 358 334 #define TIMER11_EN 0x0001 359 335 360 336 /* USB_LPM_CTRL */ 337 + /* bit 4 ~ 5: fifo empty boundary */ 338 + #define FIFO_EMPTY_1FB 0x30 /* 0x1fb * 64 = 32448 bytes */ 339 + /* bit 2 ~ 3: LMP timer */ 361 340 #define LPM_TIMER_MASK 0x0c 362 341 #define LPM_TIMER_500MS 0x04 /* 500 ms */ 363 342 #define LPM_TIMER_500US 0x0c /* 500 us */ 343 + #define ROK_EXIT_LPM 0x02 364 344 365 345 /* USB_AFE_CTRL2 */ 366 346 #define SEN_VAL_MASK 0xf800 ··· 3258 3230 3259 3231 r8153_u2p3en(tp, false); 3260 3232 3233 + if (tp->version == RTL_VER_04) { 3234 + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_SSPHYLINK2); 3235 + ocp_data &= ~pwd_dn_scale_mask; 3236 + ocp_data |= pwd_dn_scale(96); 3237 + ocp_write_word(tp, MCU_TYPE_USB, USB_SSPHYLINK2, ocp_data); 3238 + 3239 + ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_USB2PHY); 3240 + ocp_data |= USB2PHY_L1 | USB2PHY_SUSPEND; 3241 + ocp_write_byte(tp, MCU_TYPE_USB, USB_USB2PHY, ocp_data); 3242 + } else if (tp->version == RTL_VER_05) { 3243 + ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_DMY_REG0); 3244 + ocp_data &= ~ECM_ALDPS; 3245 + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_DMY_REG0, ocp_data); 3246 + 3247 + ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY1); 3248 + if (ocp_read_word(tp, MCU_TYPE_USB, USB_BURST_SIZE) == 0) 3249 + ocp_data &= ~DYNAMIC_BURST; 3250 + else 3251 + ocp_data |= DYNAMIC_BURST; 3252 + ocp_write_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY1, ocp_data); 3253 + } 3254 + 3255 + ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY2); 3256 + ocp_data |= EP4_FULL_FC; 3257 + ocp_write_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY2, ocp_data); 3258 + 3261 3259 ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_WDT11_CTRL); 3262 3260 ocp_data &= ~TIMER11_EN; 3263 3261 ocp_write_word(tp, MCU_TYPE_USB, USB_WDT11_CTRL, ocp_data); ··· 3292 3238 ocp_data &= ~LED_MODE_MASK; 3293 3239 ocp_write_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE, ocp_data); 3294 3240 3295 - ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_LPM_CTRL); 3296 - ocp_data &= ~LPM_TIMER_MASK; 3241 + ocp_data = FIFO_EMPTY_1FB | ROK_EXIT_LPM; 3297 3242 if (tp->version == RTL_VER_04 && tp->udev->speed != USB_SPEED_SUPER) 3298 3243 ocp_data |= LPM_TIMER_500MS; 3299 3244 else ··· 3303 3250 ocp_data &= ~SEN_VAL_MASK; 3304 3251 ocp_data |= SEN_VAL_NORMAL | SEL_RXIDLE; 3305 3252 ocp_write_word(tp, MCU_TYPE_USB, USB_AFE_CTRL2, ocp_data); 3253 + 3254 + ocp_write_word(tp, MCU_TYPE_USB, USB_CONNECT_TIMER, 0x0001); 3306 3255 3307 3256 r8153_power_cut_en(tp, false); 3308 3257 r8153_u1u2en(tp, true);
+25 -13
drivers/net/vxlan.c
··· 555 555 static struct vxlanhdr *vxlan_gro_remcsum(struct sk_buff *skb, 556 556 unsigned int off, 557 557 struct vxlanhdr *vh, size_t hdrlen, 558 - u32 data) 558 + u32 data, struct gro_remcsum *grc, 559 + bool nopartial) 559 560 { 560 561 size_t start, offset, plen; 561 562 562 563 if (skb->remcsum_offload) 563 - return vh; 564 + return NULL; 564 565 565 566 if (!NAPI_GRO_CB(skb)->csum_valid) 566 567 return NULL; ··· 580 579 return NULL; 581 580 } 582 581 583 - skb_gro_remcsum_process(skb, (void *)vh + hdrlen, start, offset); 582 + skb_gro_remcsum_process(skb, (void *)vh + hdrlen, 583 + start, offset, grc, nopartial); 584 584 585 585 skb->remcsum_offload = 1; 586 586 ··· 599 597 struct vxlan_sock *vs = container_of(uoff, struct vxlan_sock, 600 598 udp_offloads); 601 599 u32 flags; 600 + struct gro_remcsum grc; 601 + 602 + skb_gro_remcsum_init(&grc); 602 603 603 604 off_vx = skb_gro_offset(skb); 604 605 hlen = off_vx + sizeof(*vh); ··· 619 614 620 615 if ((flags & VXLAN_HF_RCO) && (vs->flags & VXLAN_F_REMCSUM_RX)) { 621 616 vh = vxlan_gro_remcsum(skb, off_vx, vh, sizeof(struct vxlanhdr), 622 - ntohl(vh->vx_vni)); 617 + ntohl(vh->vx_vni), &grc, 618 + !!(vs->flags & 619 + VXLAN_F_REMCSUM_NOPARTIAL)); 623 620 624 621 if (!vh) 625 622 goto out; ··· 644 637 pp = eth_gro_receive(head, skb); 645 638 646 639 out: 640 + skb_gro_remcsum_cleanup(skb, &grc); 647 641 NAPI_GRO_CB(skb)->flush |= flush; 648 642 649 643 return pp; ··· 1158 1150 } 1159 1151 1160 1152 static struct vxlanhdr *vxlan_remcsum(struct sk_buff *skb, struct vxlanhdr *vh, 1161 - size_t hdrlen, u32 data) 1153 + size_t hdrlen, u32 data, bool nopartial) 1162 1154 { 1163 1155 size_t start, offset, plen; 1164 - 1165 - if (skb->remcsum_offload) { 1166 - /* Already processed in GRO path */ 1167 - skb->remcsum_offload = 0; 1168 - return vh; 1169 - } 1170 1156 1171 1157 start = (data & VXLAN_RCO_MASK) << VXLAN_RCO_SHIFT; 1172 1158 offset = start + ((data & VXLAN_RCO_UDP) ? ··· 1174 1172 1175 1173 vh = (struct vxlanhdr *)(udp_hdr(skb) + 1); 1176 1174 1177 - skb_remcsum_process(skb, (void *)vh + hdrlen, start, offset); 1175 + skb_remcsum_process(skb, (void *)vh + hdrlen, start, offset, 1176 + nopartial); 1178 1177 1179 1178 return vh; 1180 1179 } ··· 1212 1209 goto drop; 1213 1210 1214 1211 if ((flags & VXLAN_HF_RCO) && (vs->flags & VXLAN_F_REMCSUM_RX)) { 1215 - vxh = vxlan_remcsum(skb, vxh, sizeof(struct vxlanhdr), vni); 1212 + vxh = vxlan_remcsum(skb, vxh, sizeof(struct vxlanhdr), vni, 1213 + !!(vs->flags & VXLAN_F_REMCSUM_NOPARTIAL)); 1216 1214 if (!vxh) 1217 1215 goto drop; 1218 1216 ··· 2442 2438 [IFLA_VXLAN_REMCSUM_TX] = { .type = NLA_U8 }, 2443 2439 [IFLA_VXLAN_REMCSUM_RX] = { .type = NLA_U8 }, 2444 2440 [IFLA_VXLAN_GBP] = { .type = NLA_FLAG, }, 2441 + [IFLA_VXLAN_REMCSUM_NOPARTIAL] = { .type = NLA_FLAG }, 2445 2442 }; 2446 2443 2447 2444 static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[]) ··· 2766 2761 if (data[IFLA_VXLAN_GBP]) 2767 2762 vxlan->flags |= VXLAN_F_GBP; 2768 2763 2764 + if (data[IFLA_VXLAN_REMCSUM_NOPARTIAL]) 2765 + vxlan->flags |= VXLAN_F_REMCSUM_NOPARTIAL; 2766 + 2769 2767 if (vxlan_find_vni(src_net, vni, use_ipv6 ? AF_INET6 : AF_INET, 2770 2768 vxlan->dst_port, vxlan->flags)) { 2771 2769 pr_info("duplicate VNI %u\n", vni); ··· 2916 2908 2917 2909 if (vxlan->flags & VXLAN_F_GBP && 2918 2910 nla_put_flag(skb, IFLA_VXLAN_GBP)) 2911 + goto nla_put_failure; 2912 + 2913 + if (vxlan->flags & VXLAN_F_REMCSUM_NOPARTIAL && 2914 + nla_put_flag(skb, IFLA_VXLAN_REMCSUM_NOPARTIAL)) 2919 2915 goto nla_put_failure; 2920 2916 2921 2917 return 0;
+2 -2
drivers/net/wireless/b43/main.c
··· 127 127 BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x1E, BCMA_ANY_CLASS), 128 128 BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x28, BCMA_ANY_CLASS), 129 129 BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x2A, BCMA_ANY_CLASS), 130 - BCMA_CORETABLE_END 130 + {}, 131 131 }; 132 132 MODULE_DEVICE_TABLE(bcma, b43_bcma_tbl); 133 133 #endif ··· 144 144 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 13), 145 145 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 15), 146 146 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 16), 147 - SSB_DEVTABLE_END 147 + {}, 148 148 }; 149 149 MODULE_DEVICE_TABLE(ssb, b43_ssb_tbl); 150 150 #endif
+1 -1
drivers/net/wireless/b43legacy/main.c
··· 86 86 static const struct ssb_device_id b43legacy_ssb_tbl[] = { 87 87 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 2), 88 88 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 4), 89 - SSB_DEVTABLE_END 89 + {}, 90 90 }; 91 91 MODULE_DEVICE_TABLE(ssb, b43legacy_ssb_tbl); 92 92
+1 -1
drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
··· 99 99 BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 17, BCMA_ANY_CLASS), 100 100 BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 23, BCMA_ANY_CLASS), 101 101 BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 24, BCMA_ANY_CLASS), 102 - BCMA_CORETABLE_END 102 + {}, 103 103 }; 104 104 MODULE_DEVICE_TABLE(bcma, brcms_coreid_table); 105 105
+1 -4
drivers/net/wireless/rtlwifi/pci.c
··· 822 822 823 823 /* get a new skb - if fail, old one will be reused */ 824 824 new_skb = dev_alloc_skb(rtlpci->rxbuffersize); 825 - if (unlikely(!new_skb)) { 826 - pr_err("Allocation of new skb failed in %s\n", 827 - __func__); 825 + if (unlikely(!new_skb)) 828 826 goto no_new; 829 - } 830 827 if (rtlpriv->use_new_trx_flow) { 831 828 buffer_desc = 832 829 &rtlpci->rx_ring[rxring_idx].buffer_desc
+1 -1
drivers/spi/spi-bcm53xx.c
··· 216 216 217 217 static const struct bcma_device_id bcm53xxspi_bcma_tbl[] = { 218 218 BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_NS_QSPI, BCMA_ANY_REV, BCMA_ANY_CLASS), 219 - BCMA_CORETABLE_END 219 + {}, 220 220 }; 221 221 MODULE_DEVICE_TABLE(bcma, bcm53xxspi_bcma_tbl); 222 222
+1 -1
drivers/ssb/driver_gige.c
··· 24 24 25 25 static const struct ssb_device_id ssb_gige_tbl[] = { 26 26 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_ETHERNET_GBIT, SSB_ANY_REV), 27 - SSB_DEVTABLE_END 27 + {}, 28 28 }; 29 29 /* MODULE_DEVICE_TABLE(ssb, ssb_gige_tbl); */ 30 30
+1 -1
drivers/usb/host/bcma-hcd.c
··· 306 306 307 307 static const struct bcma_device_id bcma_hcd_table[] = { 308 308 BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_USB20_HOST, BCMA_ANY_REV, BCMA_ANY_CLASS), 309 - BCMA_CORETABLE_END 309 + {}, 310 310 }; 311 311 MODULE_DEVICE_TABLE(bcma, bcma_hcd_table); 312 312
+1 -1
drivers/usb/host/ssb-hcd.c
··· 251 251 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_USB11_HOSTDEV, SSB_ANY_REV), 252 252 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_USB11_HOST, SSB_ANY_REV), 253 253 SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_USB20_HOST, SSB_ANY_REV), 254 - SSB_DEVTABLE_END 254 + {}, 255 255 }; 256 256 MODULE_DEVICE_TABLE(ssb, ssb_hcd_table); 257 257
+6 -5
drivers/vhost/net.c
··· 528 528 .msg_controllen = 0, 529 529 .msg_flags = MSG_DONTWAIT, 530 530 }; 531 - struct virtio_net_hdr_mrg_rxbuf hdr = { 532 - .hdr.flags = 0, 533 - .hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE 531 + struct virtio_net_hdr hdr = { 532 + .flags = 0, 533 + .gso_type = VIRTIO_NET_HDR_GSO_NONE 534 534 }; 535 535 size_t total_len = 0; 536 536 int err, mergeable; ··· 539 539 size_t vhost_len, sock_len; 540 540 struct socket *sock; 541 541 struct iov_iter fixup; 542 + __virtio16 num_buffers; 542 543 543 544 mutex_lock(&vq->mutex); 544 545 sock = vq->private_data; ··· 617 616 } 618 617 /* TODO: Should check and handle checksum. */ 619 618 620 - hdr.num_buffers = cpu_to_vhost16(vq, headcount); 619 + num_buffers = cpu_to_vhost16(vq, headcount); 621 620 if (likely(mergeable) && 622 - copy_to_iter(&hdr.num_buffers, 2, &fixup) != 2) { 621 + copy_to_iter(&num_buffers, 2, &fixup) != 2) { 623 622 vq_err(vq, "Failed num_buffers write"); 624 623 vhost_discard_vq_desc(vq, headcount); 625 624 break;
-4
include/linux/mod_devicetable.h
··· 364 364 } __attribute__((packed, aligned(2))); 365 365 #define SSB_DEVICE(_vendor, _coreid, _revision) \ 366 366 { .vendor = _vendor, .coreid = _coreid, .revision = _revision, } 367 - #define SSB_DEVTABLE_END \ 368 - { 0, }, 369 367 370 368 #define SSB_ANY_VENDOR 0xFFFF 371 369 #define SSB_ANY_ID 0xFFFF ··· 378 380 } __attribute__((packed,aligned(2))); 379 381 #define BCMA_CORE(_manuf, _id, _rev, _class) \ 380 382 { .manuf = _manuf, .id = _id, .rev = _rev, .class = _class, } 381 - #define BCMA_CORETABLE_END \ 382 - { 0, }, 383 383 384 384 #define BCMA_ANY_MANUF 0xFFFF 385 385 #define BCMA_ANY_ID 0xFFFF
+52 -10
include/linux/netdevice.h
··· 1923 1923 /* Number of segments aggregated. */ 1924 1924 u16 count; 1925 1925 1926 - /* This is non-zero if the packet may be of the same flow. */ 1927 - u8 same_flow; 1928 - 1929 - /* Free the skb? */ 1930 - u8 free; 1931 - #define NAPI_GRO_FREE 1 1932 - #define NAPI_GRO_FREE_STOLEN_HEAD 2 1926 + /* Start offset for remote checksum offload */ 1927 + u16 gro_remcsum_start; 1933 1928 1934 1929 /* jiffies when first packet was created/queued */ 1935 1930 unsigned long age; 1936 1931 1937 1932 /* Used in ipv6_gro_receive() and foo-over-udp */ 1938 1933 u16 proto; 1934 + 1935 + /* This is non-zero if the packet may be of the same flow. */ 1936 + u8 same_flow:1; 1939 1937 1940 1938 /* Used in udp_gro_receive */ 1941 1939 u8 udp_mark:1; ··· 1944 1946 /* Number of checksums via CHECKSUM_UNNECESSARY */ 1945 1947 u8 csum_cnt:3; 1946 1948 1949 + /* Free the skb? */ 1950 + u8 free:2; 1951 + #define NAPI_GRO_FREE 1 1952 + #define NAPI_GRO_FREE_STOLEN_HEAD 2 1953 + 1947 1954 /* Used in foo-over-udp, set in udp[46]_gro_receive */ 1948 1955 u8 is_ipv6:1; 1956 + 1957 + /* 7 bit hole */ 1949 1958 1950 1959 /* used to support CHECKSUM_COMPLETE for tunneling protocols */ 1951 1960 __wsum csum; ··· 2247 2242 2248 2243 __sum16 __skb_gro_checksum_complete(struct sk_buff *skb); 2249 2244 2245 + static inline bool skb_at_gro_remcsum_start(struct sk_buff *skb) 2246 + { 2247 + return (NAPI_GRO_CB(skb)->gro_remcsum_start - skb_headroom(skb) == 2248 + skb_gro_offset(skb)); 2249 + } 2250 + 2250 2251 static inline bool __skb_gro_checksum_validate_needed(struct sk_buff *skb, 2251 2252 bool zero_okay, 2252 2253 __sum16 check) 2253 2254 { 2254 - return (skb->ip_summed != CHECKSUM_PARTIAL && 2255 + return ((skb->ip_summed != CHECKSUM_PARTIAL || 2256 + skb_checksum_start_offset(skb) < 2257 + skb_gro_offset(skb)) && 2258 + !skb_at_gro_remcsum_start(skb) && 2255 2259 NAPI_GRO_CB(skb)->csum_cnt == 0 && 2256 2260 (!zero_okay || check)); 2257 2261 } ··· 2335 2321 compute_pseudo(skb, proto)); \ 2336 2322 } while (0) 2337 2323 2324 + struct gro_remcsum { 2325 + int offset; 2326 + __wsum delta; 2327 + }; 2328 + 2329 + static inline void skb_gro_remcsum_init(struct gro_remcsum *grc) 2330 + { 2331 + grc->delta = 0; 2332 + } 2333 + 2338 2334 static inline void skb_gro_remcsum_process(struct sk_buff *skb, void *ptr, 2339 - int start, int offset) 2335 + int start, int offset, 2336 + struct gro_remcsum *grc, 2337 + bool nopartial) 2340 2338 { 2341 2339 __wsum delta; 2342 2340 2343 2341 BUG_ON(!NAPI_GRO_CB(skb)->csum_valid); 2344 2342 2343 + if (!nopartial) { 2344 + NAPI_GRO_CB(skb)->gro_remcsum_start = 2345 + ((unsigned char *)ptr + start) - skb->head; 2346 + return; 2347 + } 2348 + 2345 2349 delta = remcsum_adjust(ptr, NAPI_GRO_CB(skb)->csum, start, offset); 2346 2350 2347 2351 /* Adjust skb->csum since we changed the packet */ 2348 - skb->csum = csum_add(skb->csum, delta); 2349 2352 NAPI_GRO_CB(skb)->csum = csum_add(NAPI_GRO_CB(skb)->csum, delta); 2353 + 2354 + grc->offset = (ptr + offset) - (void *)skb->head; 2355 + grc->delta = delta; 2350 2356 } 2351 2357 2358 + static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb, 2359 + struct gro_remcsum *grc) 2360 + { 2361 + if (!grc->delta) 2362 + return; 2363 + 2364 + remcsum_unadjust((__sum16 *)(skb->head + grc->offset), grc->delta); 2365 + } 2352 2366 2353 2367 static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev, 2354 2368 unsigned short type,
+26 -6
include/linux/skbuff.h
··· 83 83 * 84 84 * CHECKSUM_PARTIAL: 85 85 * 86 - * This is identical to the case for output below. This may occur on a packet 86 + * A checksum is set up to be offloaded to a device as described in the 87 + * output description for CHECKSUM_PARTIAL. This may occur on a packet 87 88 * received directly from another Linux OS, e.g., a virtualized Linux kernel 88 - * on the same host. The packet can be treated in the same way as 89 - * CHECKSUM_UNNECESSARY, except that on output (i.e., forwarding) the 90 - * checksum must be filled in by the OS or the hardware. 89 + * on the same host, or it may be set in the input path in GRO or remote 90 + * checksum offload. For the purposes of checksum verification, the checksum 91 + * referred to by skb->csum_start + skb->csum_offset and any preceding 92 + * checksums in the packet are considered verified. Any checksums in the 93 + * packet that are after the checksum being offloaded are not considered to 94 + * be verified. 91 95 * 92 96 * B. Checksumming on output. 93 97 * ··· 2919 2915 2920 2916 static inline int skb_csum_unnecessary(const struct sk_buff *skb) 2921 2917 { 2922 - return ((skb->ip_summed & CHECKSUM_UNNECESSARY) || skb->csum_valid); 2918 + return ((skb->ip_summed == CHECKSUM_UNNECESSARY) || 2919 + skb->csum_valid || 2920 + (skb->ip_summed == CHECKSUM_PARTIAL && 2921 + skb_checksum_start_offset(skb) >= 0)); 2923 2922 } 2924 2923 2925 2924 /** ··· 3104 3097 compute_pseudo(skb, proto)); \ 3105 3098 } while (0) 3106 3099 3100 + static inline void skb_remcsum_adjust_partial(struct sk_buff *skb, void *ptr, 3101 + u16 start, u16 offset) 3102 + { 3103 + skb->ip_summed = CHECKSUM_PARTIAL; 3104 + skb->csum_start = ((unsigned char *)ptr + start) - skb->head; 3105 + skb->csum_offset = offset - start; 3106 + } 3107 + 3107 3108 /* Update skbuf and packet to reflect the remote checksum offload operation. 3108 3109 * When called, ptr indicates the starting point for skb->csum when 3109 3110 * ip_summed is CHECKSUM_COMPLETE. If we need create checksum complete 3110 3111 * here, skb_postpull_rcsum is done so skb->csum start is ptr. 3111 3112 */ 3112 3113 static inline void skb_remcsum_process(struct sk_buff *skb, void *ptr, 3113 - int start, int offset) 3114 + int start, int offset, bool nopartial) 3114 3115 { 3115 3116 __wsum delta; 3117 + 3118 + if (!nopartial) { 3119 + skb_remcsum_adjust_partial(skb, ptr, start, offset); 3120 + return; 3121 + } 3116 3122 3117 3123 if (unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) { 3118 3124 __skb_checksum_complete(skb);
+5
include/net/checksum.h
··· 167 167 return delta; 168 168 } 169 169 170 + static inline void remcsum_unadjust(__sum16 *psum, __wsum delta) 171 + { 172 + *psum = csum_fold(csum_sub(delta, *psum)); 173 + } 174 + 170 175 #endif
+3 -1
include/net/vxlan.h
··· 128 128 #define VXLAN_F_REMCSUM_TX 0x200 129 129 #define VXLAN_F_REMCSUM_RX 0x400 130 130 #define VXLAN_F_GBP 0x800 131 + #define VXLAN_F_REMCSUM_NOPARTIAL 0x1000 131 132 132 133 /* Flags that are used in the receive patch. These flags must match in 133 134 * order for a socket to be shareable 134 135 */ 135 136 #define VXLAN_F_RCV_FLAGS (VXLAN_F_GBP | \ 136 137 VXLAN_F_UDP_ZERO_CSUM6_RX | \ 137 - VXLAN_F_REMCSUM_RX) 138 + VXLAN_F_REMCSUM_RX | \ 139 + VXLAN_F_REMCSUM_NOPARTIAL) 138 140 139 141 struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port, 140 142 vxlan_rcv_t *rcv, void *data,
+1
include/uapi/linux/fou.h
··· 14 14 FOU_ATTR_AF, /* u8 */ 15 15 FOU_ATTR_IPPROTO, /* u8 */ 16 16 FOU_ATTR_TYPE, /* u8 */ 17 + FOU_ATTR_REMCSUM_NOPARTIAL, /* flag */ 17 18 18 19 __FOU_ATTR_MAX, 19 20 };
+1
include/uapi/linux/if_link.h
··· 374 374 IFLA_VXLAN_REMCSUM_TX, 375 375 IFLA_VXLAN_REMCSUM_RX, 376 376 IFLA_VXLAN_GBP, 377 + IFLA_VXLAN_REMCSUM_NOPARTIAL, 377 378 __IFLA_VXLAN_MAX 378 379 }; 379 380 #define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1)
+2 -5
net/bridge/br_netfilter.c
··· 987 987 if (brnf_sysctl_header == NULL) { 988 988 printk(KERN_WARNING 989 989 "br_netfilter: can't register to sysctl.\n"); 990 - ret = -ENOMEM; 991 - goto err1; 990 + nf_unregister_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops)); 991 + return -ENOMEM; 992 992 } 993 993 #endif 994 994 printk(KERN_NOTICE "Bridge firewalling registered\n"); 995 995 return 0; 996 - err1: 997 - nf_unregister_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops)); 998 - return ret; 999 996 } 1000 997 1001 998 static void __exit br_netfilter_fini(void)
+2 -1
net/core/dev.c
··· 4024 4024 NAPI_GRO_CB(skb)->flush = 0; 4025 4025 NAPI_GRO_CB(skb)->free = 0; 4026 4026 NAPI_GRO_CB(skb)->udp_mark = 0; 4027 + NAPI_GRO_CB(skb)->gro_remcsum_start = 0; 4027 4028 4028 4029 /* Setup for GRO checksum validation */ 4029 4030 switch (skb->ip_summed) { ··· 5336 5335 /** 5337 5336 * netdev_bonding_info_change - Dispatch event about slave change 5338 5337 * @dev: device 5339 - * @netdev_bonding_info: info to dispatch 5338 + * @bonding_info: info to dispatch 5340 5339 * 5341 5340 * Send NETDEV_BONDING_INFO to netdev notifiers with info. 5342 5341 * The caller must hold the RTNL lock.
+1 -1
net/core/filter.c
··· 531 531 *insn = BPF_LDX_MEM(BPF_W, BPF_REG_A, BPF_REG_CTX, fp->k); 532 532 break; 533 533 534 - /* Unkown instruction. */ 534 + /* Unknown instruction. */ 535 535 default: 536 536 goto err; 537 537 }
+1 -1
net/core/pktgen.c
··· 97 97 * New xmit() return, do_div and misc clean up by Stephen Hemminger 98 98 * <shemminger@osdl.org> 040923 99 99 * 100 - * Randy Dunlap fixed u64 printk compiler waring 100 + * Randy Dunlap fixed u64 printk compiler warning 101 101 * 102 102 * Remove FCS from BW calculation. Lennert Buytenhek <buytenh@wantstofly.org> 103 103 * New time handling. Lennert Buytenhek <buytenh@wantstofly.org> 041213
+8 -1
net/core/rtnetlink.c
··· 2162 2162 } 2163 2163 err = rtnl_configure_link(dev, ifm); 2164 2164 if (err < 0) { 2165 - unregister_netdevice(dev); 2165 + if (ops->newlink) { 2166 + LIST_HEAD(list_kill); 2167 + 2168 + ops->dellink(dev, &list_kill); 2169 + unregister_netdevice_many(&list_kill); 2170 + } else { 2171 + unregister_netdevice(dev); 2172 + } 2166 2173 goto out; 2167 2174 } 2168 2175
+7 -2
net/dsa/slave.c
··· 521 521 struct device_node *phy_dn, *port_dn; 522 522 bool phy_is_fixed = false; 523 523 u32 phy_flags = 0; 524 - int ret; 524 + int mode, ret; 525 525 526 526 port_dn = cd->port_dn[p->port]; 527 - p->phy_interface = of_get_phy_mode(port_dn); 527 + mode = of_get_phy_mode(port_dn); 528 + if (mode < 0) 529 + mode = PHY_INTERFACE_MODE_NA; 530 + p->phy_interface = mode; 528 531 529 532 phy_dn = of_parse_phandle(port_dn, "phy-handle", 0); 530 533 if (of_phy_is_fixed_link(port_dn)) { ··· 562 559 if (!p->phy) 563 560 return -ENODEV; 564 561 562 + /* Use already configured phy mode */ 563 + p->phy_interface = p->phy->interface; 565 564 phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link, 566 565 p->phy_interface); 567 566 } else {
+1 -1
net/ipv4/devinet.c
··· 1186 1186 no_in_dev: 1187 1187 1188 1188 /* Not loopback addresses on loopback should be preferred 1189 - in this case. It is importnat that lo is the first interface 1189 + in this case. It is important that lo is the first interface 1190 1190 in dev_base list. 1191 1191 */ 1192 1192 for_each_netdev_rcu(net, dev) {
+29 -13
net/ipv4/fou.c
··· 22 22 struct fou { 23 23 struct socket *sock; 24 24 u8 protocol; 25 + u8 flags; 25 26 u16 port; 26 27 struct udp_offload udp_offloads; 27 28 struct list_head list; 28 29 }; 29 30 31 + #define FOU_F_REMCSUM_NOPARTIAL BIT(0) 32 + 30 33 struct fou_cfg { 31 34 u16 type; 32 35 u8 protocol; 36 + u8 flags; 33 37 struct udp_port_cfg udp_config; 34 38 }; 35 39 ··· 68 64 } 69 65 70 66 static struct guehdr *gue_remcsum(struct sk_buff *skb, struct guehdr *guehdr, 71 - void *data, size_t hdrlen, u8 ipproto) 67 + void *data, size_t hdrlen, u8 ipproto, 68 + bool nopartial) 72 69 { 73 70 __be16 *pd = data; 74 71 size_t start = ntohs(pd[0]); 75 72 size_t offset = ntohs(pd[1]); 76 73 size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start); 77 74 78 - if (skb->remcsum_offload) { 79 - /* Already processed in GRO path */ 80 - skb->remcsum_offload = 0; 81 - return guehdr; 82 - } 83 - 84 75 if (!pskb_may_pull(skb, plen)) 85 76 return NULL; 86 77 guehdr = (struct guehdr *)&udp_hdr(skb)[1]; 87 78 88 - skb_remcsum_process(skb, (void *)guehdr + hdrlen, start, offset); 79 + skb_remcsum_process(skb, (void *)guehdr + hdrlen, 80 + start, offset, nopartial); 89 81 90 82 return guehdr; 91 83 } ··· 142 142 143 143 if (flags & GUE_PFLAG_REMCSUM) { 144 144 guehdr = gue_remcsum(skb, guehdr, data + doffset, 145 - hdrlen, guehdr->proto_ctype); 145 + hdrlen, guehdr->proto_ctype, 146 + !!(fou->flags & 147 + FOU_F_REMCSUM_NOPARTIAL)); 146 148 if (!guehdr) 147 149 goto drop; 148 150 ··· 216 214 217 215 static struct guehdr *gue_gro_remcsum(struct sk_buff *skb, unsigned int off, 218 216 struct guehdr *guehdr, void *data, 219 - size_t hdrlen, u8 ipproto) 217 + size_t hdrlen, u8 ipproto, 218 + struct gro_remcsum *grc, bool nopartial) 220 219 { 221 220 __be16 *pd = data; 222 221 size_t start = ntohs(pd[0]); ··· 225 222 size_t plen = hdrlen + max_t(size_t, offset + sizeof(u16), start); 226 223 227 224 if (skb->remcsum_offload) 228 - return guehdr; 225 + return NULL; 229 226 230 227 if (!NAPI_GRO_CB(skb)->csum_valid) 231 228 return NULL; ··· 237 234 return NULL; 238 235 } 239 236 240 - skb_gro_remcsum_process(skb, (void *)guehdr + hdrlen, start, offset); 237 + skb_gro_remcsum_process(skb, (void *)guehdr + hdrlen, 238 + start, offset, grc, nopartial); 241 239 242 240 skb->remcsum_offload = 1; 243 241 ··· 258 254 void *data; 259 255 u16 doffset = 0; 260 256 int flush = 1; 257 + struct fou *fou = container_of(uoff, struct fou, udp_offloads); 258 + struct gro_remcsum grc; 259 + 260 + skb_gro_remcsum_init(&grc); 261 261 262 262 off = skb_gro_offset(skb); 263 263 len = off + sizeof(*guehdr); ··· 303 295 if (flags & GUE_PFLAG_REMCSUM) { 304 296 guehdr = gue_gro_remcsum(skb, off, guehdr, 305 297 data + doffset, hdrlen, 306 - guehdr->proto_ctype); 298 + guehdr->proto_ctype, &grc, 299 + !!(fou->flags & 300 + FOU_F_REMCSUM_NOPARTIAL)); 307 301 if (!guehdr) 308 302 goto out; 309 303 ··· 355 345 rcu_read_unlock(); 356 346 out: 357 347 NAPI_GRO_CB(skb)->flush |= flush; 348 + skb_gro_remcsum_cleanup(skb, &grc); 358 349 359 350 return pp; 360 351 } ··· 466 455 467 456 sk = sock->sk; 468 457 458 + fou->flags = cfg->flags; 469 459 fou->port = cfg->udp_config.local_udp_port; 470 460 471 461 /* Initial for fou type */ ··· 553 541 [FOU_ATTR_AF] = { .type = NLA_U8, }, 554 542 [FOU_ATTR_IPPROTO] = { .type = NLA_U8, }, 555 543 [FOU_ATTR_TYPE] = { .type = NLA_U8, }, 544 + [FOU_ATTR_REMCSUM_NOPARTIAL] = { .type = NLA_FLAG, }, 556 545 }; 557 546 558 547 static int parse_nl_config(struct genl_info *info, ··· 583 570 584 571 if (info->attrs[FOU_ATTR_TYPE]) 585 572 cfg->type = nla_get_u8(info->attrs[FOU_ATTR_TYPE]); 573 + 574 + if (info->attrs[FOU_ATTR_REMCSUM_NOPARTIAL]) 575 + cfg->flags |= FOU_F_REMCSUM_NOPARTIAL; 586 576 587 577 return 0; 588 578 }
+24 -8
net/ipv4/tcp_fastopen.c
··· 134 134 struct tcp_sock *tp; 135 135 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; 136 136 struct sock *child; 137 + u32 end_seq; 137 138 138 139 req->num_retrans = 0; 139 140 req->num_timeout = 0; ··· 186 185 187 186 /* Queue the data carried in the SYN packet. We need to first 188 187 * bump skb's refcnt because the caller will attempt to free it. 188 + * Note that IPv6 might also have used skb_get() trick 189 + * in tcp_v6_conn_request() to keep this SYN around (treq->pktopts) 190 + * So we need to eventually get a clone of the packet, 191 + * before inserting it in sk_receive_queue. 189 192 * 190 193 * XXX (TFO) - we honor a zero-payload TFO request for now, 191 194 * (any reason not to?) but no need to queue the skb since 192 195 * there is no data. How about SYN+FIN? 193 196 */ 194 - if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1) { 195 - skb = skb_get(skb); 196 - skb_dst_drop(skb); 197 - __skb_pull(skb, tcp_hdr(skb)->doff * 4); 198 - skb_set_owner_r(skb, child); 199 - __skb_queue_tail(&child->sk_receive_queue, skb); 200 - tp->syn_data_acked = 1; 197 + end_seq = TCP_SKB_CB(skb)->end_seq; 198 + if (end_seq != TCP_SKB_CB(skb)->seq + 1) { 199 + struct sk_buff *skb2; 200 + 201 + if (unlikely(skb_shared(skb))) 202 + skb2 = skb_clone(skb, GFP_ATOMIC); 203 + else 204 + skb2 = skb_get(skb); 205 + 206 + if (likely(skb2)) { 207 + skb_dst_drop(skb2); 208 + __skb_pull(skb2, tcp_hdrlen(skb)); 209 + skb_set_owner_r(skb2, child); 210 + __skb_queue_tail(&child->sk_receive_queue, skb2); 211 + tp->syn_data_acked = 1; 212 + } else { 213 + end_seq = TCP_SKB_CB(skb)->seq + 1; 214 + } 201 215 } 202 - tcp_rsk(req)->rcv_nxt = tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; 216 + tcp_rsk(req)->rcv_nxt = tp->rcv_nxt = end_seq; 203 217 sk->sk_data_ready(sk); 204 218 bh_unlock_sock(child); 205 219 sock_put(child);
+12 -1
net/ipv4/udp_offload.c
··· 402 402 } 403 403 404 404 rcu_read_unlock(); 405 + 406 + if (skb->remcsum_offload) 407 + skb_shinfo(skb)->gso_type |= SKB_GSO_TUNNEL_REMCSUM; 408 + 409 + skb->encapsulation = 1; 410 + skb_set_inner_mac_header(skb, nhoff + sizeof(struct udphdr)); 411 + 405 412 return err; 406 413 } 407 414 ··· 417 410 const struct iphdr *iph = ip_hdr(skb); 418 411 struct udphdr *uh = (struct udphdr *)(skb->data + nhoff); 419 412 420 - if (uh->check) 413 + if (uh->check) { 414 + skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM; 421 415 uh->check = ~udp_v4_check(skb->len - nhoff, iph->saddr, 422 416 iph->daddr, 0); 417 + } else { 418 + skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; 419 + } 423 420 424 421 return udp_gro_complete(skb, nhoff); 425 422 }
+2 -2
net/ipv6/ip6_flowlabel.c
··· 172 172 { 173 173 int i; 174 174 175 - spin_lock(&ip6_fl_lock); 175 + spin_lock_bh(&ip6_fl_lock); 176 176 for (i = 0; i <= FL_HASH_MASK; i++) { 177 177 struct ip6_flowlabel *fl; 178 178 struct ip6_flowlabel __rcu **flp; ··· 190 190 flp = &fl->next; 191 191 } 192 192 } 193 - spin_unlock(&ip6_fl_lock); 193 + spin_unlock_bh(&ip6_fl_lock); 194 194 } 195 195 196 196 static struct ip6_flowlabel *fl_intern(struct net *net,
+1 -1
net/ipv6/ip6_output.c
··· 1273 1273 /* If this is the first and only packet and device 1274 1274 * supports checksum offloading, let's use it. 1275 1275 */ 1276 - if (!skb && 1276 + if (!skb && sk->sk_protocol == IPPROTO_UDP && 1277 1277 length + fragheaderlen < mtu && 1278 1278 rt->dst.dev->features & NETIF_F_V6_CSUM && 1279 1279 !exthdrlen)
+1 -1
net/ipv6/route.c
··· 141 141 u32 *p = NULL; 142 142 143 143 if (!(rt->dst.flags & DST_HOST)) 144 - return NULL; 144 + return dst_cow_metrics_generic(dst, old); 145 145 146 146 peer = rt6_get_peer_create(rt); 147 147 if (peer) {
+5 -1
net/ipv6/udp_offload.c
··· 161 161 const struct ipv6hdr *ipv6h = ipv6_hdr(skb); 162 162 struct udphdr *uh = (struct udphdr *)(skb->data + nhoff); 163 163 164 - if (uh->check) 164 + if (uh->check) { 165 + skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM; 165 166 uh->check = ~udp_v6_check(skb->len - nhoff, &ipv6h->saddr, 166 167 &ipv6h->daddr, 0); 168 + } else { 169 + skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL; 170 + } 167 171 168 172 return udp_gro_complete(skb, nhoff); 169 173 }
+57 -6
net/netfilter/nft_compat.c
··· 19 19 #include <linux/netfilter/x_tables.h> 20 20 #include <linux/netfilter_ipv4/ip_tables.h> 21 21 #include <linux/netfilter_ipv6/ip6_tables.h> 22 + #include <linux/netfilter_bridge/ebtables.h> 22 23 #include <net/netfilter/nf_tables.h> 23 24 24 25 static int nft_compat_chain_validate_dependency(const char *tablename, ··· 41 40 union nft_entry { 42 41 struct ipt_entry e4; 43 42 struct ip6t_entry e6; 43 + struct ebt_entry ebt; 44 44 }; 45 45 46 46 static inline void ··· 52 50 par->hotdrop = false; 53 51 } 54 52 55 - static void nft_target_eval(const struct nft_expr *expr, 56 - struct nft_data data[NFT_REG_MAX + 1], 57 - const struct nft_pktinfo *pkt) 53 + static void nft_target_eval_xt(const struct nft_expr *expr, 54 + struct nft_data data[NFT_REG_MAX + 1], 55 + const struct nft_pktinfo *pkt) 58 56 { 59 57 void *info = nft_expr_priv(expr); 60 58 struct xt_target *target = expr->ops->data; ··· 68 66 if (pkt->xt.hotdrop) 69 67 ret = NF_DROP; 70 68 71 - switch(ret) { 69 + switch (ret) { 72 70 case XT_CONTINUE: 73 71 data[NFT_REG_VERDICT].verdict = NFT_CONTINUE; 74 72 break; ··· 76 74 data[NFT_REG_VERDICT].verdict = ret; 77 75 break; 78 76 } 79 - return; 77 + } 78 + 79 + static void nft_target_eval_bridge(const struct nft_expr *expr, 80 + struct nft_data data[NFT_REG_MAX + 1], 81 + const struct nft_pktinfo *pkt) 82 + { 83 + void *info = nft_expr_priv(expr); 84 + struct xt_target *target = expr->ops->data; 85 + struct sk_buff *skb = pkt->skb; 86 + int ret; 87 + 88 + nft_compat_set_par((struct xt_action_param *)&pkt->xt, target, info); 89 + 90 + ret = target->target(skb, &pkt->xt); 91 + 92 + if (pkt->xt.hotdrop) 93 + ret = NF_DROP; 94 + 95 + switch (ret) { 96 + case EBT_ACCEPT: 97 + data[NFT_REG_VERDICT].verdict = NF_ACCEPT; 98 + break; 99 + case EBT_DROP: 100 + data[NFT_REG_VERDICT].verdict = NF_DROP; 101 + break; 102 + case EBT_CONTINUE: 103 + data[NFT_REG_VERDICT].verdict = NFT_CONTINUE; 104 + break; 105 + case EBT_RETURN: 106 + data[NFT_REG_VERDICT].verdict = NFT_RETURN; 107 + break; 108 + default: 109 + data[NFT_REG_VERDICT].verdict = ret; 110 + break; 111 + } 80 112 } 81 113 82 114 static const struct nla_policy nft_target_policy[NFTA_TARGET_MAX + 1] = { ··· 135 99 case AF_INET6: 136 100 entry->e6.ipv6.proto = proto; 137 101 entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0; 102 + break; 103 + case NFPROTO_BRIDGE: 104 + entry->ebt.ethproto = proto; 105 + entry->ebt.invflags = inv ? EBT_IPROTO : 0; 138 106 break; 139 107 } 140 108 par->entryinfo = entry; ··· 347 307 entry->e6.ipv6.proto = proto; 348 308 entry->e6.ipv6.invflags = inv ? IP6T_INV_PROTO : 0; 349 309 break; 310 + case NFPROTO_BRIDGE: 311 + entry->ebt.ethproto = proto; 312 + entry->ebt.invflags = inv ? EBT_IPROTO : 0; 313 + break; 350 314 } 351 315 par->entryinfo = entry; 352 316 par->match = match; ··· 534 490 case AF_INET6: 535 491 fmt = "ip6t_%s"; 536 492 break; 493 + case NFPROTO_BRIDGE: 494 + fmt = "ebt_%s"; 495 + break; 537 496 default: 538 497 pr_err("nft_compat: unsupported protocol %d\n", 539 498 nfmsg->nfgen_family); ··· 710 663 711 664 nft_target->ops.type = &nft_target_type; 712 665 nft_target->ops.size = NFT_EXPR_SIZE(XT_ALIGN(target->targetsize)); 713 - nft_target->ops.eval = nft_target_eval; 714 666 nft_target->ops.init = nft_target_init; 715 667 nft_target->ops.destroy = nft_target_destroy; 716 668 nft_target->ops.dump = nft_target_dump; 717 669 nft_target->ops.validate = nft_target_validate; 718 670 nft_target->ops.data = target; 671 + 672 + if (family == NFPROTO_BRIDGE) 673 + nft_target->ops.eval = nft_target_eval_bridge; 674 + else 675 + nft_target->ops.eval = nft_target_eval_xt; 719 676 720 677 list_add(&nft_target->head, &nft_target_list); 721 678
+1
net/netfilter/nft_lookup.c
··· 39 39 40 40 static const struct nla_policy nft_lookup_policy[NFTA_LOOKUP_MAX + 1] = { 41 41 [NFTA_LOOKUP_SET] = { .type = NLA_STRING }, 42 + [NFTA_LOOKUP_SET_ID] = { .type = NLA_U32 }, 42 43 [NFTA_LOOKUP_SREG] = { .type = NLA_U32 }, 43 44 [NFTA_LOOKUP_DREG] = { .type = NLA_U32 }, 44 45 };
+2
net/openvswitch/flow.c
··· 717 717 { 718 718 int err; 719 719 720 + memset(key, 0, OVS_SW_FLOW_KEY_METADATA_SIZE); 721 + 720 722 /* Extract metadata from netlink attributes. */ 721 723 err = ovs_nla_get_flow_metadata(attr, key, log); 722 724 if (err)
+2 -2
net/openvswitch/flow_netlink.c
··· 1516 1516 /* Called with ovs_mutex or RCU read lock. */ 1517 1517 int ovs_nla_put_masked_key(const struct sw_flow *flow, struct sk_buff *skb) 1518 1518 { 1519 - return ovs_nla_put_key(&flow->mask->key, &flow->key, 1519 + return ovs_nla_put_key(&flow->key, &flow->key, 1520 1520 OVS_FLOW_ATTR_KEY, false, skb); 1521 1521 } 1522 1522 ··· 1746 1746 struct sw_flow_key key; 1747 1747 struct ovs_tunnel_info *tun_info; 1748 1748 struct nlattr *a; 1749 - int err, start, opts_type; 1749 + int err = 0, start, opts_type; 1750 1750 1751 1751 ovs_match_init(&match, &key, NULL); 1752 1752 opts_type = ipv4_tun_from_nlattr(nla_data(attr), &match, false, log);
+15 -1
net/rds/cong.c
··· 221 221 list_for_each_entry(conn, &map->m_conn_list, c_map_item) { 222 222 if (!test_and_set_bit(0, &conn->c_map_queued)) { 223 223 rds_stats_inc(s_cong_update_queued); 224 - rds_send_xmit(conn); 224 + /* We cannot inline the call to rds_send_xmit() here 225 + * for two reasons (both pertaining to a TCP transport): 226 + * 1. When we get here from the receive path, we 227 + * are already holding the sock_lock (held by 228 + * tcp_v4_rcv()). So inlining calls to 229 + * tcp_setsockopt and/or tcp_sendmsg will deadlock 230 + * when it tries to get the sock_lock()) 231 + * 2. Interrupts are masked so that we can mark the 232 + * the port congested from both send and recv paths. 233 + * (See comment around declaration of rdc_cong_lock). 234 + * An attempt to get the sock_lock() here will 235 + * therefore trigger warnings. 236 + * Defer the xmit to rds_send_worker() instead. 237 + */ 238 + queue_delayed_work(rds_wq, &conn->c_send_w, 0); 225 239 } 226 240 } 227 241