Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

1) Infinite loop in _decode_session6(), from Eric Dumazet.

2) Pass correct argument to nla_strlcpy() in netfilter, also from Eric
Dumazet.

3) Out of bounds memory access in ipv6 srh code, from Mathieu Xhonneux.

4) NULL deref in XDP_REDIRECT handling of tun driver, from Toshiaki
Makita.

5) Incorrect idr release in cls_flower, from Paul Blakey.

6) Probe error handling fix in davinci_emac, from Dan Carpenter.

7) Memory leak in XPS configuration, from Alexander Duyck.

8) Use after free with cloned sockets in kcm, from Kirill Tkhai.

9) MTU handling fixes fo ip_tunnel and ip6_tunnel, from Nicolas
Dichtel.

10) Fix UAPI hole in bpf data structure for 32-bit compat applications,
from Daniel Borkmann.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (33 commits)
bpf: fix uapi hole for 32 bit compat applications
net: usb: cdc_mbim: add flag FLAG_SEND_ZLP
ip6_tunnel: remove magic mtu value 0xFFF8
ip_tunnel: restore binding to ifaces with a large mtu
net: dsa: b53: Add BCM5389 support
kcm: Fix use-after-free caused by clonned sockets
net-sysfs: Fix memory leak in XPS configuration
ixgbe: fix parsing of TC actions for HW offload
net: ethernet: davinci_emac: fix error handling in probe()
net/ncsi: Fix array size in dumpit handler
cls_flower: Fix incorrect idr release when failing to modify rule
net/sonic: Use dma_mapping_error()
xfrm Fix potential error pointer dereference in xfrm_bundle_create.
vhost_net: flush batched heads before trying to busy polling
tun: Fix NULL pointer dereference in XDP redirect
be2net: Fix error detection logic for BE3
net: qmi_wwan: Add Netgear Aircard 779S
mlxsw: spectrum: Forbid creation of VLAN 1 over port/LAG
atm: zatm: fix memcmp casting
iwlwifi: pcie: compare with number of IRQs requested for, not number of CPUs
...

Changed files
+192 -115
Documentation
devicetree
bindings
net
dsa
drivers
atm
net
dsa
ethernet
emulex
benet
intel
ixgbe
mellanox
mlxsw
natsemi
socionext
ti
usb
wireless
intel
iwlwifi
pcie
ralink
rt2x00
vhost
include
uapi
linux
net
tools
include
uapi
linux
+1
Documentation/devicetree/bindings/net/dsa/b53.txt
··· 10 10 "brcm,bcm53128" 11 11 "brcm,bcm5365" 12 12 "brcm,bcm5395" 13 + "brcm,bcm5389" 13 14 "brcm,bcm5397" 14 15 "brcm,bcm5398" 15 16
+2 -2
drivers/atm/zatm.c
··· 1151 1151 } 1152 1152 1153 1153 1154 - static unsigned char eprom_try_esi(struct atm_dev *dev, unsigned short cmd, 1155 - int offset, int swap) 1154 + static int eprom_try_esi(struct atm_dev *dev, unsigned short cmd, int offset, 1155 + int swap) 1156 1156 { 1157 1157 unsigned char buf[ZEPROM_SIZE]; 1158 1158 struct zatm_dev *zatm_dev;
+13
drivers/net/dsa/b53/b53_common.c
··· 1712 1712 .duplex_reg = B53_DUPLEX_STAT_FE, 1713 1713 }, 1714 1714 { 1715 + .chip_id = BCM5389_DEVICE_ID, 1716 + .dev_name = "BCM5389", 1717 + .vlans = 4096, 1718 + .enabled_ports = 0x1f, 1719 + .arl_entries = 4, 1720 + .cpu_port = B53_CPU_PORT, 1721 + .vta_regs = B53_VTA_REGS, 1722 + .duplex_reg = B53_DUPLEX_STAT_GE, 1723 + .jumbo_pm_reg = B53_JUMBO_PORT_MASK, 1724 + .jumbo_size_reg = B53_JUMBO_MAX_SIZE, 1725 + }, 1726 + { 1715 1727 .chip_id = BCM5395_DEVICE_ID, 1716 1728 .dev_name = "BCM5395", 1717 1729 .vlans = 4096, ··· 2046 2034 else 2047 2035 dev->chip_id = BCM5365_DEVICE_ID; 2048 2036 break; 2037 + case BCM5389_DEVICE_ID: 2049 2038 case BCM5395_DEVICE_ID: 2050 2039 case BCM5397_DEVICE_ID: 2051 2040 case BCM5398_DEVICE_ID:
+4 -1
drivers/net/dsa/b53/b53_mdio.c
··· 285 285 #define B53_BRCM_OUI_1 0x0143bc00 286 286 #define B53_BRCM_OUI_2 0x03625c00 287 287 #define B53_BRCM_OUI_3 0x00406000 288 + #define B53_BRCM_OUI_4 0x01410c00 288 289 289 290 static int b53_mdio_probe(struct mdio_device *mdiodev) 290 291 { ··· 312 311 */ 313 312 if ((phy_id & 0xfffffc00) != B53_BRCM_OUI_1 && 314 313 (phy_id & 0xfffffc00) != B53_BRCM_OUI_2 && 315 - (phy_id & 0xfffffc00) != B53_BRCM_OUI_3) { 314 + (phy_id & 0xfffffc00) != B53_BRCM_OUI_3 && 315 + (phy_id & 0xfffffc00) != B53_BRCM_OUI_4) { 316 316 dev_err(&mdiodev->dev, "Unsupported device: 0x%08x\n", phy_id); 317 317 return -ENODEV; 318 318 } ··· 362 360 { .compatible = "brcm,bcm53125" }, 363 361 { .compatible = "brcm,bcm53128" }, 364 362 { .compatible = "brcm,bcm5365" }, 363 + { .compatible = "brcm,bcm5389" }, 365 364 { .compatible = "brcm,bcm5395" }, 366 365 { .compatible = "brcm,bcm5397" }, 367 366 { .compatible = "brcm,bcm5398" },
+1
drivers/net/dsa/b53/b53_priv.h
··· 48 48 enum { 49 49 BCM5325_DEVICE_ID = 0x25, 50 50 BCM5365_DEVICE_ID = 0x65, 51 + BCM5389_DEVICE_ID = 0x89, 51 52 BCM5395_DEVICE_ID = 0x95, 52 53 BCM5397_DEVICE_ID = 0x97, 53 54 BCM5398_DEVICE_ID = 0x98,
+3 -1
drivers/net/ethernet/emulex/benet/be_main.c
··· 3309 3309 if ((val & POST_STAGE_FAT_LOG_START) 3310 3310 != POST_STAGE_FAT_LOG_START && 3311 3311 (val & POST_STAGE_ARMFW_UE) 3312 - != POST_STAGE_ARMFW_UE) 3312 + != POST_STAGE_ARMFW_UE && 3313 + (val & POST_STAGE_RECOVERABLE_ERR) 3314 + != POST_STAGE_RECOVERABLE_ERR) 3313 3315 return; 3314 3316 } 3315 3317
+4 -5
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
··· 9054 9054 { 9055 9055 const struct tc_action *a; 9056 9056 LIST_HEAD(actions); 9057 - int err; 9058 9057 9059 9058 if (!tcf_exts_has_actions(exts)) 9060 9059 return -EINVAL; ··· 9074 9075 9075 9076 if (!dev) 9076 9077 return -EINVAL; 9077 - err = handle_redirect_action(adapter, dev->ifindex, queue, 9078 - action); 9079 - if (err == 0) 9080 - return err; 9078 + return handle_redirect_action(adapter, dev->ifindex, 9079 + queue, action); 9081 9080 } 9081 + 9082 + return -EINVAL; 9082 9083 } 9083 9084 9084 9085 return -EINVAL;
+5
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
··· 4433 4433 NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port"); 4434 4434 return -EINVAL; 4435 4435 } 4436 + if (is_vlan_dev(upper_dev) && 4437 + vlan_dev_vlan_id(upper_dev) == 1) { 4438 + NL_SET_ERR_MSG_MOD(extack, "Creating a VLAN device with VID 1 is unsupported: VLAN 1 carries untagged traffic"); 4439 + return -EINVAL; 4440 + } 4436 4441 break; 4437 4442 case NETDEV_CHANGEUPPER: 4438 4443 upper_dev = info->upper_dev;
+1 -1
drivers/net/ethernet/natsemi/sonic.c
··· 84 84 for (i = 0; i < SONIC_NUM_RRS; i++) { 85 85 dma_addr_t laddr = dma_map_single(lp->device, skb_put(lp->rx_skb[i], SONIC_RBSIZE), 86 86 SONIC_RBSIZE, DMA_FROM_DEVICE); 87 - if (!laddr) { 87 + if (dma_mapping_error(lp->device, laddr)) { 88 88 while(i > 0) { /* free any that were mapped successfully */ 89 89 i--; 90 90 dma_unmap_single(lp->device, lp->rx_laddr[i], SONIC_RBSIZE, DMA_FROM_DEVICE);
+2 -2
drivers/net/ethernet/socionext/netsec.c
··· 1674 1674 if (ret) 1675 1675 goto unreg_napi; 1676 1676 1677 - if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) 1678 - dev_warn(&pdev->dev, "Failed to enable 64-bit DMA\n"); 1677 + if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40))) 1678 + dev_warn(&pdev->dev, "Failed to set DMA mask\n"); 1679 1679 1680 1680 ret = register_netdev(ndev); 1681 1681 if (ret) {
+12 -10
drivers/net/ethernet/ti/davinci_emac.c
··· 1873 1873 if (IS_ERR(priv->txchan)) { 1874 1874 dev_err(&pdev->dev, "error initializing tx dma channel\n"); 1875 1875 rc = PTR_ERR(priv->txchan); 1876 - goto no_cpdma_chan; 1876 + goto err_free_dma; 1877 1877 } 1878 1878 1879 1879 priv->rxchan = cpdma_chan_create(priv->dma, EMAC_DEF_RX_CH, ··· 1881 1881 if (IS_ERR(priv->rxchan)) { 1882 1882 dev_err(&pdev->dev, "error initializing rx dma channel\n"); 1883 1883 rc = PTR_ERR(priv->rxchan); 1884 - goto no_cpdma_chan; 1884 + goto err_free_txchan; 1885 1885 } 1886 1886 1887 1887 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 1888 1888 if (!res) { 1889 1889 dev_err(&pdev->dev, "error getting irq res\n"); 1890 1890 rc = -ENOENT; 1891 - goto no_cpdma_chan; 1891 + goto err_free_rxchan; 1892 1892 } 1893 1893 ndev->irq = res->start; 1894 1894 ··· 1914 1914 pm_runtime_put_noidle(&pdev->dev); 1915 1915 dev_err(&pdev->dev, "%s: failed to get_sync(%d)\n", 1916 1916 __func__, rc); 1917 - goto no_cpdma_chan; 1917 + goto err_napi_del; 1918 1918 } 1919 1919 1920 1920 /* register the network device */ ··· 1924 1924 dev_err(&pdev->dev, "error in register_netdev\n"); 1925 1925 rc = -ENODEV; 1926 1926 pm_runtime_put(&pdev->dev); 1927 - goto no_cpdma_chan; 1927 + goto err_napi_del; 1928 1928 } 1929 1929 1930 1930 ··· 1937 1937 1938 1938 return 0; 1939 1939 1940 - no_cpdma_chan: 1941 - if (priv->txchan) 1942 - cpdma_chan_destroy(priv->txchan); 1943 - if (priv->rxchan) 1944 - cpdma_chan_destroy(priv->rxchan); 1940 + err_napi_del: 1941 + netif_napi_del(&priv->napi); 1942 + err_free_rxchan: 1943 + cpdma_chan_destroy(priv->rxchan); 1944 + err_free_txchan: 1945 + cpdma_chan_destroy(priv->txchan); 1946 + err_free_dma: 1945 1947 cpdma_ctlr_destroy(priv->dma); 1946 1948 no_pdata: 1947 1949 if (of_phy_is_fixed_link(np))
+9 -6
drivers/net/tun.c
··· 1650 1650 else 1651 1651 *skb_xdp = 0; 1652 1652 1653 - preempt_disable(); 1653 + local_bh_disable(); 1654 1654 rcu_read_lock(); 1655 1655 xdp_prog = rcu_dereference(tun->xdp_prog); 1656 1656 if (xdp_prog && !*skb_xdp) { ··· 1675 1675 if (err) 1676 1676 goto err_redirect; 1677 1677 rcu_read_unlock(); 1678 - preempt_enable(); 1678 + local_bh_enable(); 1679 1679 return NULL; 1680 1680 case XDP_TX: 1681 1681 get_page(alloc_frag->page); ··· 1684 1684 goto err_redirect; 1685 1685 tun_xdp_flush(tun->dev); 1686 1686 rcu_read_unlock(); 1687 - preempt_enable(); 1687 + local_bh_enable(); 1688 1688 return NULL; 1689 1689 case XDP_PASS: 1690 1690 delta = orig_data - xdp.data; ··· 1703 1703 skb = build_skb(buf, buflen); 1704 1704 if (!skb) { 1705 1705 rcu_read_unlock(); 1706 - preempt_enable(); 1706 + local_bh_enable(); 1707 1707 return ERR_PTR(-ENOMEM); 1708 1708 } 1709 1709 ··· 1713 1713 alloc_frag->offset += buflen; 1714 1714 1715 1715 rcu_read_unlock(); 1716 - preempt_enable(); 1716 + local_bh_enable(); 1717 1717 1718 1718 return skb; 1719 1719 ··· 1721 1721 put_page(alloc_frag->page); 1722 1722 err_xdp: 1723 1723 rcu_read_unlock(); 1724 - preempt_enable(); 1724 + local_bh_enable(); 1725 1725 this_cpu_inc(tun->pcpu_stats->rx_dropped); 1726 1726 return NULL; 1727 1727 } ··· 1917 1917 struct bpf_prog *xdp_prog; 1918 1918 int ret; 1919 1919 1920 + local_bh_disable(); 1920 1921 rcu_read_lock(); 1921 1922 xdp_prog = rcu_dereference(tun->xdp_prog); 1922 1923 if (xdp_prog) { 1923 1924 ret = do_xdp_generic(xdp_prog, skb); 1924 1925 if (ret != XDP_PASS) { 1925 1926 rcu_read_unlock(); 1927 + local_bh_enable(); 1926 1928 return total_len; 1927 1929 } 1928 1930 } 1929 1931 rcu_read_unlock(); 1932 + local_bh_enable(); 1930 1933 } 1931 1934 1932 1935 rcu_read_lock();
+1 -1
drivers/net/usb/cdc_mbim.c
··· 609 609 */ 610 610 static const struct driver_info cdc_mbim_info_avoid_altsetting_toggle = { 611 611 .description = "CDC MBIM", 612 - .flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET | FLAG_WWAN, 612 + .flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET | FLAG_WWAN | FLAG_SEND_ZLP, 613 613 .bind = cdc_mbim_bind, 614 614 .unbind = cdc_mbim_unbind, 615 615 .manage_power = cdc_mbim_manage_power,
+1
drivers/net/usb/qmi_wwan.c
··· 1103 1103 {QMI_FIXED_INTF(0x05c6, 0x920d, 5)}, 1104 1104 {QMI_QUIRK_SET_DTR(0x05c6, 0x9625, 4)}, /* YUGA CLM920-NC5 */ 1105 1105 {QMI_FIXED_INTF(0x0846, 0x68a2, 8)}, 1106 + {QMI_FIXED_INTF(0x0846, 0x68d3, 8)}, /* Netgear Aircard 779S */ 1106 1107 {QMI_FIXED_INTF(0x12d1, 0x140c, 1)}, /* Huawei E173 */ 1107 1108 {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)}, /* Huawei E1820 */ 1108 1109 {QMI_FIXED_INTF(0x1435, 0xd181, 3)}, /* Wistron NeWeb D18Q1 */
+5 -5
drivers/net/wireless/intel/iwlwifi/pcie/trans.c
··· 1590 1590 struct iwl_trans *trans) 1591 1591 { 1592 1592 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1593 - int max_irqs, num_irqs, i, ret, nr_online_cpus; 1593 + int max_irqs, num_irqs, i, ret; 1594 1594 u16 pci_cmd; 1595 1595 1596 1596 if (!trans->cfg->mq_rx_supported) 1597 1597 goto enable_msi; 1598 1598 1599 - nr_online_cpus = num_online_cpus(); 1600 - max_irqs = min_t(u32, nr_online_cpus + 2, IWL_MAX_RX_HW_QUEUES); 1599 + max_irqs = min_t(u32, num_online_cpus() + 2, IWL_MAX_RX_HW_QUEUES); 1601 1600 for (i = 0; i < max_irqs; i++) 1602 1601 trans_pcie->msix_entries[i].entry = i; 1603 1602 ··· 1622 1623 * Two interrupts less: non rx causes shared with FBQ and RSS. 1623 1624 * More than two interrupts: we will use fewer RSS queues. 1624 1625 */ 1625 - if (num_irqs <= nr_online_cpus) { 1626 + if (num_irqs <= max_irqs - 2) { 1626 1627 trans_pcie->trans->num_rx_queues = num_irqs + 1; 1627 1628 trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX | 1628 1629 IWL_SHARED_IRQ_FIRST_RSS; 1629 - } else if (num_irqs == nr_online_cpus + 1) { 1630 + } else if (num_irqs == max_irqs - 1) { 1630 1631 trans_pcie->trans->num_rx_queues = num_irqs; 1631 1632 trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX; 1632 1633 } else { 1633 1634 trans_pcie->trans->num_rx_queues = num_irqs - 1; 1634 1635 } 1636 + WARN_ON(trans_pcie->trans->num_rx_queues > IWL_MAX_RX_HW_QUEUES); 1635 1637 1636 1638 trans_pcie->alloc_vecs = num_irqs; 1637 1639 trans_pcie->msix_enabled = true;
+3 -4
drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
··· 372 372 373 373 /* 374 374 * Determine IFS values 375 - * - Use TXOP_BACKOFF for probe and management frames except beacons 375 + * - Use TXOP_BACKOFF for management frames except beacons 376 376 * - Use TXOP_SIFS for fragment bursts 377 377 * - Use TXOP_HTTXOP for everything else 378 378 * 379 379 * Note: rt2800 devices won't use CTS protection (if used) 380 380 * for frames not transmitted with TXOP_HTTXOP 381 381 */ 382 - if ((ieee80211_is_mgmt(hdr->frame_control) && 383 - !ieee80211_is_beacon(hdr->frame_control)) || 384 - (tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) 382 + if (ieee80211_is_mgmt(hdr->frame_control) && 383 + !ieee80211_is_beacon(hdr->frame_control)) 385 384 txdesc->u.ht.txop = TXOP_BACKOFF; 386 385 else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)) 387 386 txdesc->u.ht.txop = TXOP_SIFS;
+24 -13
drivers/vhost/net.c
··· 105 105 /* vhost zerocopy support fields below: */ 106 106 /* last used idx for outstanding DMA zerocopy buffers */ 107 107 int upend_idx; 108 - /* first used idx for DMA done zerocopy buffers */ 108 + /* For TX, first used idx for DMA done zerocopy buffers 109 + * For RX, number of batched heads 110 + */ 109 111 int done_idx; 110 112 /* an array of userspace buffers info */ 111 113 struct ubuf_info *ubuf_info; ··· 628 626 return skb_queue_empty(&sk->sk_receive_queue); 629 627 } 630 628 629 + static void vhost_rx_signal_used(struct vhost_net_virtqueue *nvq) 630 + { 631 + struct vhost_virtqueue *vq = &nvq->vq; 632 + struct vhost_dev *dev = vq->dev; 633 + 634 + if (!nvq->done_idx) 635 + return; 636 + 637 + vhost_add_used_and_signal_n(dev, vq, vq->heads, nvq->done_idx); 638 + nvq->done_idx = 0; 639 + } 640 + 631 641 static int vhost_net_rx_peek_head_len(struct vhost_net *net, struct sock *sk) 632 642 { 633 643 struct vhost_net_virtqueue *rvq = &net->vqs[VHOST_NET_VQ_RX]; ··· 649 635 int len = peek_head_len(rvq, sk); 650 636 651 637 if (!len && vq->busyloop_timeout) { 638 + /* Flush batched heads first */ 639 + vhost_rx_signal_used(rvq); 652 640 /* Both tx vq and rx socket were polled here */ 653 641 mutex_lock_nested(&vq->mutex, 1); 654 642 vhost_disable_notify(&net->dev, vq); ··· 778 762 }; 779 763 size_t total_len = 0; 780 764 int err, mergeable; 781 - s16 headcount, nheads = 0; 765 + s16 headcount; 782 766 size_t vhost_hlen, sock_hlen; 783 767 size_t vhost_len, sock_len; 784 768 struct socket *sock; ··· 806 790 while ((sock_len = vhost_net_rx_peek_head_len(net, sock->sk))) { 807 791 sock_len += sock_hlen; 808 792 vhost_len = sock_len + vhost_hlen; 809 - headcount = get_rx_bufs(vq, vq->heads + nheads, vhost_len, 810 - &in, vq_log, &log, 793 + headcount = get_rx_bufs(vq, vq->heads + nvq->done_idx, 794 + vhost_len, &in, vq_log, &log, 811 795 likely(mergeable) ? UIO_MAXIOV : 1); 812 796 /* On error, stop handling until the next kick. */ 813 797 if (unlikely(headcount < 0)) ··· 878 862 vhost_discard_vq_desc(vq, headcount); 879 863 goto out; 880 864 } 881 - nheads += headcount; 882 - if (nheads > VHOST_RX_BATCH) { 883 - vhost_add_used_and_signal_n(&net->dev, vq, vq->heads, 884 - nheads); 885 - nheads = 0; 886 - } 865 + nvq->done_idx += headcount; 866 + if (nvq->done_idx > VHOST_RX_BATCH) 867 + vhost_rx_signal_used(nvq); 887 868 if (unlikely(vq_log)) 888 869 vhost_log_write(vq, vq_log, log, vhost_len); 889 870 total_len += vhost_len; ··· 891 878 } 892 879 vhost_net_enable_vq(net, vq); 893 880 out: 894 - if (nheads) 895 - vhost_add_used_and_signal_n(&net->dev, vq, vq->heads, 896 - nheads); 881 + vhost_rx_signal_used(nvq); 897 882 mutex_unlock(&vq->mutex); 898 883 } 899 884
+2
include/uapi/linux/bpf.h
··· 1017 1017 __aligned_u64 map_ids; 1018 1018 char name[BPF_OBJ_NAME_LEN]; 1019 1019 __u32 ifindex; 1020 + __u32 :32; 1020 1021 __u64 netns_dev; 1021 1022 __u64 netns_ino; 1022 1023 } __attribute__((aligned(8))); ··· 1031 1030 __u32 map_flags; 1032 1031 char name[BPF_OBJ_NAME_LEN]; 1033 1032 __u32 ifindex; 1033 + __u32 :32; 1034 1034 __u64 netns_dev; 1035 1035 __u64 netns_ino; 1036 1036 } __attribute__((aligned(8)));
+2 -1
net/bridge/netfilter/ebtables.c
··· 1954 1954 int off, pad = 0; 1955 1955 unsigned int size_kern, match_size = mwt->match_size; 1956 1956 1957 - strlcpy(name, mwt->u.name, sizeof(name)); 1957 + if (strscpy(name, mwt->u.name, sizeof(name)) < 0) 1958 + return -EINVAL; 1958 1959 1959 1960 if (state->buf_kern_start) 1960 1961 dst = state->buf_kern_start + state->buf_kern_offset;
+3 -3
net/core/net-sysfs.c
··· 1214 1214 cpumask_var_t mask; 1215 1215 unsigned long index; 1216 1216 1217 - if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) 1218 - return -ENOMEM; 1219 - 1220 1217 index = get_netdev_queue_index(queue); 1221 1218 1222 1219 if (dev->num_tc) { ··· 1222 1225 if (tc < 0) 1223 1226 return -EINVAL; 1224 1227 } 1228 + 1229 + if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) 1230 + return -ENOMEM; 1225 1231 1226 1232 rcu_read_lock(); 1227 1233 dev_maps = rcu_dereference(dev->xps_maps);
+4 -4
net/ipv4/ip_tunnel.c
··· 328 328 329 329 if (tdev) { 330 330 hlen = tdev->hard_header_len + tdev->needed_headroom; 331 - mtu = tdev->mtu; 331 + mtu = min(tdev->mtu, IP_MAX_MTU); 332 332 } 333 333 334 334 dev->needed_headroom = t_hlen + hlen; ··· 362 362 nt = netdev_priv(dev); 363 363 t_hlen = nt->hlen + sizeof(struct iphdr); 364 364 dev->min_mtu = ETH_MIN_MTU; 365 - dev->max_mtu = 0xFFF8 - dev->hard_header_len - t_hlen; 365 + dev->max_mtu = IP_MAX_MTU - dev->hard_header_len - t_hlen; 366 366 ip_tunnel_add(itn, nt); 367 367 return nt; 368 368 ··· 930 930 { 931 931 struct ip_tunnel *tunnel = netdev_priv(dev); 932 932 int t_hlen = tunnel->hlen + sizeof(struct iphdr); 933 - int max_mtu = 0xFFF8 - dev->hard_header_len - t_hlen; 933 + int max_mtu = IP_MAX_MTU - dev->hard_header_len - t_hlen; 934 934 935 935 if (new_mtu < ETH_MIN_MTU) 936 936 return -EINVAL; ··· 1107 1107 1108 1108 mtu = ip_tunnel_bind_dev(dev); 1109 1109 if (tb[IFLA_MTU]) { 1110 - unsigned int max = 0xfff8 - dev->hard_header_len - nt->hlen; 1110 + unsigned int max = IP_MAX_MTU - dev->hard_header_len - nt->hlen; 1111 1111 1112 1112 mtu = clamp(dev->mtu, (unsigned int)ETH_MIN_MTU, 1113 1113 (unsigned int)(max - sizeof(struct iphdr)));
+8 -3
net/ipv6/ip6_tunnel.c
··· 1692 1692 if (new_mtu < ETH_MIN_MTU) 1693 1693 return -EINVAL; 1694 1694 } 1695 - if (new_mtu > 0xFFF8 - dev->hard_header_len) 1696 - return -EINVAL; 1695 + if (tnl->parms.proto == IPPROTO_IPV6 || tnl->parms.proto == 0) { 1696 + if (new_mtu > IP6_MAX_MTU - dev->hard_header_len) 1697 + return -EINVAL; 1698 + } else { 1699 + if (new_mtu > IP_MAX_MTU - dev->hard_header_len) 1700 + return -EINVAL; 1701 + } 1697 1702 dev->mtu = new_mtu; 1698 1703 return 0; 1699 1704 } ··· 1846 1841 if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT)) 1847 1842 dev->mtu -= 8; 1848 1843 dev->min_mtu = ETH_MIN_MTU; 1849 - dev->max_mtu = 0xFFF8 - dev->hard_header_len; 1844 + dev->max_mtu = IP6_MAX_MTU - dev->hard_header_len; 1850 1845 1851 1846 return 0; 1852 1847
+2 -2
net/ipv6/seg6_iptunnel.c
··· 103 103 hdrlen = (osrh->hdrlen + 1) << 3; 104 104 tot_len = hdrlen + sizeof(*hdr); 105 105 106 - err = skb_cow_head(skb, tot_len); 106 + err = skb_cow_head(skb, tot_len + skb->mac_len); 107 107 if (unlikely(err)) 108 108 return err; 109 109 ··· 161 161 162 162 hdrlen = (osrh->hdrlen + 1) << 3; 163 163 164 - err = skb_cow_head(skb, hdrlen); 164 + err = skb_cow_head(skb, hdrlen + skb->mac_len); 165 165 if (unlikely(err)) 166 166 return err; 167 167
+3 -2
net/ipv6/sit.c
··· 1371 1371 dev->hard_header_len = LL_MAX_HEADER + t_hlen; 1372 1372 dev->mtu = ETH_DATA_LEN - t_hlen; 1373 1373 dev->min_mtu = IPV6_MIN_MTU; 1374 - dev->max_mtu = 0xFFF8 - t_hlen; 1374 + dev->max_mtu = IP6_MAX_MTU - t_hlen; 1375 1375 dev->flags = IFF_NOARP; 1376 1376 netif_keep_dst(dev); 1377 1377 dev->addr_len = 4; ··· 1583 1583 if (tb[IFLA_MTU]) { 1584 1584 u32 mtu = nla_get_u32(tb[IFLA_MTU]); 1585 1585 1586 - if (mtu >= IPV6_MIN_MTU && mtu <= 0xFFF8 - dev->hard_header_len) 1586 + if (mtu >= IPV6_MIN_MTU && 1587 + mtu <= IP6_MAX_MTU - dev->hard_header_len) 1587 1588 dev->mtu = mtu; 1588 1589 } 1589 1590
+1 -1
net/ipv6/xfrm6_policy.c
··· 126 126 struct flowi6 *fl6 = &fl->u.ip6; 127 127 int onlyproto = 0; 128 128 const struct ipv6hdr *hdr = ipv6_hdr(skb); 129 - u16 offset = sizeof(*hdr); 129 + u32 offset = sizeof(*hdr); 130 130 struct ipv6_opt_hdr *exthdr; 131 131 const unsigned char *nh = skb_network_header(skb); 132 132 u16 nhoff = IP6CB(skb)->nhoff;
+1 -1
net/kcm/kcmsock.c
··· 1671 1671 __module_get(newsock->ops->owner); 1672 1672 1673 1673 newsk = sk_alloc(sock_net(osock->sk), PF_KCM, GFP_KERNEL, 1674 - &kcm_proto, true); 1674 + &kcm_proto, false); 1675 1675 if (!newsk) { 1676 1676 sock_release(newsock); 1677 1677 return ERR_PTR(-ENOMEM);
+1 -1
net/ncsi/ncsi-netlink.c
··· 215 215 static int ncsi_pkg_info_all_nl(struct sk_buff *skb, 216 216 struct netlink_callback *cb) 217 217 { 218 - struct nlattr *attrs[NCSI_ATTR_MAX]; 218 + struct nlattr *attrs[NCSI_ATTR_MAX + 1]; 219 219 struct ncsi_package *np, *package; 220 220 struct ncsi_dev_priv *ndp; 221 221 unsigned int package_id;
+15 -6
net/netfilter/ipvs/ip_vs_ctl.c
··· 2381 2381 struct ipvs_sync_daemon_cfg cfg; 2382 2382 2383 2383 memset(&cfg, 0, sizeof(cfg)); 2384 - strlcpy(cfg.mcast_ifn, dm->mcast_ifn, 2385 - sizeof(cfg.mcast_ifn)); 2384 + ret = -EINVAL; 2385 + if (strscpy(cfg.mcast_ifn, dm->mcast_ifn, 2386 + sizeof(cfg.mcast_ifn)) <= 0) 2387 + goto out_dec; 2386 2388 cfg.syncid = dm->syncid; 2387 2389 ret = start_sync_thread(ipvs, &cfg, dm->state); 2388 2390 } else { ··· 2422 2420 } 2423 2421 } 2424 2422 2423 + if ((cmd == IP_VS_SO_SET_ADD || cmd == IP_VS_SO_SET_EDIT) && 2424 + strnlen(usvc.sched_name, IP_VS_SCHEDNAME_MAXLEN) == 2425 + IP_VS_SCHEDNAME_MAXLEN) { 2426 + ret = -EINVAL; 2427 + goto out_unlock; 2428 + } 2429 + 2425 2430 /* Check for valid protocol: TCP or UDP or SCTP, even for fwmark!=0 */ 2426 2431 if (usvc.protocol != IPPROTO_TCP && usvc.protocol != IPPROTO_UDP && 2427 2432 usvc.protocol != IPPROTO_SCTP) { 2428 - pr_err("set_ctl: invalid protocol: %d %pI4:%d %s\n", 2433 + pr_err("set_ctl: invalid protocol: %d %pI4:%d\n", 2429 2434 usvc.protocol, &usvc.addr.ip, 2430 - ntohs(usvc.port), usvc.sched_name); 2435 + ntohs(usvc.port)); 2431 2436 ret = -EFAULT; 2432 2437 goto out_unlock; 2433 2438 } ··· 2856 2847 static const struct nla_policy ip_vs_daemon_policy[IPVS_DAEMON_ATTR_MAX + 1] = { 2857 2848 [IPVS_DAEMON_ATTR_STATE] = { .type = NLA_U32 }, 2858 2849 [IPVS_DAEMON_ATTR_MCAST_IFN] = { .type = NLA_NUL_STRING, 2859 - .len = IP_VS_IFNAME_MAXLEN }, 2850 + .len = IP_VS_IFNAME_MAXLEN - 1 }, 2860 2851 [IPVS_DAEMON_ATTR_SYNC_ID] = { .type = NLA_U32 }, 2861 2852 [IPVS_DAEMON_ATTR_SYNC_MAXLEN] = { .type = NLA_U16 }, 2862 2853 [IPVS_DAEMON_ATTR_MCAST_GROUP] = { .type = NLA_U32 }, ··· 2874 2865 [IPVS_SVC_ATTR_PORT] = { .type = NLA_U16 }, 2875 2866 [IPVS_SVC_ATTR_FWMARK] = { .type = NLA_U32 }, 2876 2867 [IPVS_SVC_ATTR_SCHED_NAME] = { .type = NLA_NUL_STRING, 2877 - .len = IP_VS_SCHEDNAME_MAXLEN }, 2868 + .len = IP_VS_SCHEDNAME_MAXLEN - 1 }, 2878 2869 [IPVS_SVC_ATTR_PE_NAME] = { .type = NLA_NUL_STRING, 2879 2870 .len = IP_VS_PENAME_MAXLEN }, 2880 2871 [IPVS_SVC_ATTR_FLAGS] = { .type = NLA_BINARY,
+5 -3
net/netfilter/nf_tables_api.c
··· 1298 1298 rcu_assign_pointer(chain->stats, newstats); 1299 1299 synchronize_rcu(); 1300 1300 free_percpu(oldstats); 1301 - } else 1301 + } else { 1302 1302 rcu_assign_pointer(chain->stats, newstats); 1303 + static_branch_inc(&nft_counters_enabled); 1304 + } 1303 1305 } 1304 1306 1305 1307 static void nf_tables_chain_destroy(struct nft_ctx *ctx) ··· 4708 4706 if (idx > s_idx) 4709 4707 memset(&cb->args[1], 0, 4710 4708 sizeof(cb->args) - sizeof(cb->args[0])); 4711 - if (filter && filter->table[0] && 4709 + if (filter && filter->table && 4712 4710 strcmp(filter->table, table->name)) 4713 4711 goto cont; 4714 4712 if (filter && ··· 5382 5380 if (idx > s_idx) 5383 5381 memset(&cb->args[1], 0, 5384 5382 sizeof(cb->args) - sizeof(cb->args[0])); 5385 - if (filter && filter->table[0] && 5383 + if (filter && filter->table && 5386 5384 strcmp(filter->table, table->name)) 5387 5385 goto cont; 5388 5386
+2 -2
net/netfilter/nf_tables_core.c
··· 126 126 if (!base_chain->stats) 127 127 return; 128 128 129 + local_bh_disable(); 129 130 stats = this_cpu_ptr(rcu_dereference(base_chain->stats)); 130 131 if (stats) { 131 - local_bh_disable(); 132 132 u64_stats_update_begin(&stats->syncp); 133 133 stats->pkts++; 134 134 stats->bytes += pkt->skb->len; 135 135 u64_stats_update_end(&stats->syncp); 136 - local_bh_enable(); 137 136 } 137 + local_bh_enable(); 138 138 } 139 139 140 140 struct nft_jumpstack {
+12 -8
net/netfilter/nft_ct.c
··· 880 880 struct nft_object *obj, bool reset) 881 881 { 882 882 const struct nft_ct_helper_obj *priv = nft_obj_data(obj); 883 - const struct nf_conntrack_helper *helper = priv->helper4; 883 + const struct nf_conntrack_helper *helper; 884 884 u16 family; 885 + 886 + if (priv->helper4 && priv->helper6) { 887 + family = NFPROTO_INET; 888 + helper = priv->helper4; 889 + } else if (priv->helper6) { 890 + family = NFPROTO_IPV6; 891 + helper = priv->helper6; 892 + } else { 893 + family = NFPROTO_IPV4; 894 + helper = priv->helper4; 895 + } 885 896 886 897 if (nla_put_string(skb, NFTA_CT_HELPER_NAME, helper->name)) 887 898 return -1; 888 899 889 900 if (nla_put_u8(skb, NFTA_CT_HELPER_L4PROTO, priv->l4proto)) 890 901 return -1; 891 - 892 - if (priv->helper4 && priv->helper6) 893 - family = NFPROTO_INET; 894 - else if (priv->helper6) 895 - family = NFPROTO_IPV6; 896 - else 897 - family = NFPROTO_IPV4; 898 902 899 903 if (nla_put_be16(skb, NFTA_CT_HELPER_L3PROTO, htons(family))) 900 904 return -1;
+24 -14
net/netfilter/nft_limit.c
··· 51 51 return !limit->invert; 52 52 } 53 53 54 + /* Use same default as in iptables. */ 55 + #define NFT_LIMIT_PKT_BURST_DEFAULT 5 56 + 54 57 static int nft_limit_init(struct nft_limit *limit, 55 - const struct nlattr * const tb[]) 58 + const struct nlattr * const tb[], bool pkts) 56 59 { 57 - u64 unit; 60 + u64 unit, tokens; 58 61 59 62 if (tb[NFTA_LIMIT_RATE] == NULL || 60 63 tb[NFTA_LIMIT_UNIT] == NULL) ··· 71 68 72 69 if (tb[NFTA_LIMIT_BURST]) 73 70 limit->burst = ntohl(nla_get_be32(tb[NFTA_LIMIT_BURST])); 74 - else 75 - limit->burst = 0; 71 + 72 + if (pkts && limit->burst == 0) 73 + limit->burst = NFT_LIMIT_PKT_BURST_DEFAULT; 76 74 77 75 if (limit->rate + limit->burst < limit->rate) 78 76 return -EOVERFLOW; 79 77 80 - /* The token bucket size limits the number of tokens can be 81 - * accumulated. tokens_max specifies the bucket size. 82 - * tokens_max = unit * (rate + burst) / rate. 83 - */ 84 - limit->tokens = div_u64(limit->nsecs * (limit->rate + limit->burst), 85 - limit->rate); 78 + if (pkts) { 79 + tokens = div_u64(limit->nsecs, limit->rate) * limit->burst; 80 + } else { 81 + /* The token bucket size limits the number of tokens can be 82 + * accumulated. tokens_max specifies the bucket size. 83 + * tokens_max = unit * (rate + burst) / rate. 84 + */ 85 + tokens = div_u64(limit->nsecs * (limit->rate + limit->burst), 86 + limit->rate); 87 + } 88 + 89 + limit->tokens = tokens; 86 90 limit->tokens_max = limit->tokens; 87 91 88 92 if (tb[NFTA_LIMIT_FLAGS]) { ··· 154 144 struct nft_limit_pkts *priv = nft_expr_priv(expr); 155 145 int err; 156 146 157 - err = nft_limit_init(&priv->limit, tb); 147 + err = nft_limit_init(&priv->limit, tb, true); 158 148 if (err < 0) 159 149 return err; 160 150 ··· 195 185 { 196 186 struct nft_limit *priv = nft_expr_priv(expr); 197 187 198 - return nft_limit_init(priv, tb); 188 + return nft_limit_init(priv, tb, false); 199 189 } 200 190 201 191 static int nft_limit_bytes_dump(struct sk_buff *skb, ··· 256 246 struct nft_limit_pkts *priv = nft_obj_data(obj); 257 247 int err; 258 248 259 - err = nft_limit_init(&priv->limit, tb); 249 + err = nft_limit_init(&priv->limit, tb, true); 260 250 if (err < 0) 261 251 return err; 262 252 ··· 299 289 { 300 290 struct nft_limit *priv = nft_obj_data(obj); 301 291 302 - return nft_limit_init(priv, tb); 292 + return nft_limit_init(priv, tb, false); 303 293 } 304 294 305 295 static int nft_limit_obj_bytes_dump(struct sk_buff *skb,
+8 -6
net/netfilter/nft_meta.c
··· 234 234 struct sk_buff *skb = pkt->skb; 235 235 u32 *sreg = &regs->data[meta->sreg]; 236 236 u32 value = *sreg; 237 - u8 pkt_type; 237 + u8 value8; 238 238 239 239 switch (meta->key) { 240 240 case NFT_META_MARK: ··· 244 244 skb->priority = value; 245 245 break; 246 246 case NFT_META_PKTTYPE: 247 - pkt_type = nft_reg_load8(sreg); 247 + value8 = nft_reg_load8(sreg); 248 248 249 - if (skb->pkt_type != pkt_type && 250 - skb_pkt_type_ok(pkt_type) && 249 + if (skb->pkt_type != value8 && 250 + skb_pkt_type_ok(value8) && 251 251 skb_pkt_type_ok(skb->pkt_type)) 252 - skb->pkt_type = pkt_type; 252 + skb->pkt_type = value8; 253 253 break; 254 254 case NFT_META_NFTRACE: 255 - skb->nf_trace = !!value; 255 + value8 = nft_reg_load8(sreg); 256 + 257 + skb->nf_trace = !!value8; 256 258 break; 257 259 default: 258 260 WARN_ON(1);
+1 -1
net/sched/cls_flower.c
··· 977 977 return 0; 978 978 979 979 errout_idr: 980 - if (fnew->handle) 980 + if (!fold) 981 981 idr_remove(&head->handle_idr, fnew->handle); 982 982 errout: 983 983 tcf_exts_destroy(&fnew->exts);
+2 -3
net/xfrm/xfrm_policy.c
··· 1658 1658 trailer_len -= xdst_prev->u.dst.xfrm->props.trailer_len; 1659 1659 } 1660 1660 1661 - out: 1662 1661 return &xdst0->u.dst; 1663 1662 1664 1663 put_states: ··· 1666 1667 free_dst: 1667 1668 if (xdst0) 1668 1669 dst_release_immediate(&xdst0->u.dst); 1669 - xdst0 = ERR_PTR(err); 1670 - goto out; 1670 + 1671 + return ERR_PTR(err); 1671 1672 } 1672 1673 1673 1674 static int xfrm_expand_policies(const struct flowi *fl, u16 family,
+2
tools/include/uapi/linux/bpf.h
··· 1017 1017 __aligned_u64 map_ids; 1018 1018 char name[BPF_OBJ_NAME_LEN]; 1019 1019 __u32 ifindex; 1020 + __u32 :32; 1020 1021 __u64 netns_dev; 1021 1022 __u64 netns_ino; 1022 1023 } __attribute__((aligned(8))); ··· 1031 1030 __u32 map_flags; 1032 1031 char name[BPF_OBJ_NAME_LEN]; 1033 1032 __u32 ifindex; 1033 + __u32 :32; 1034 1034 __u64 netns_dev; 1035 1035 __u64 netns_ino; 1036 1036 } __attribute__((aligned(8)));