Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Pull networking fixes from David Miller:

1) Validate tunnel options length in act_tunnel_key, from Xin Long.

2) Fix DMA sync bug in gve driver, from Adi Suresh.

3) TSO kills performance on some r8169 chips due to HW issues, disable
by default in that case, from Corinna Vinschen.

4) Fix clock disable mismatch in fec driver, from Chubong Yuan.

5) Fix interrupt status bits define in hns3 driver, from Huazhong Tan.

6) Fix workqueue deadlocks in qeth driver, from Julian Wiedmann.

7) Don't napi_disable() twice in r8152 driver, from Hayes Wang.

8) Fix SKB extension memory leak, from Florian Westphal.

* git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (54 commits)
r8152: avoid to call napi_disable twice
MAINTAINERS: Add myself as maintainer of virtio-vsock
udp: drop skb extensions before marking skb stateless
net: rtnetlink: prevent underflows in do_setvfinfo()
can: m_can_platform: remove unnecessary m_can_class_resume() call
can: m_can_platform: set net_device structure as driver data
hv_netvsc: Fix send_table offset in case of a host bug
hv_netvsc: Fix offset usage in netvsc_send_table()
net-ipv6: IPV6_TRANSPARENT - check NET_RAW prior to NET_ADMIN
sfc: Only cancel the PPS workqueue if it exists
nfc: port100: handle command failure cleanly
net-sysfs: fix netdev_queue_add_kobject() breakage
r8152: Re-order napi_disable in rtl8152_close
net: qca_spi: Move reset_count to struct qcaspi
net: qca_spi: fix receive buffer size check
net/ibmvnic: Ignore H_FUNCTION return from H_EOI to tolerate XIVE mode
Revert "net/ibmvnic: Fix EOI when running in XIVE mode"
net/mlxfw: Verify FSM error code translation doesn't exceed array size
net/mlx5: Update the list of the PCI supported devices
net/mlx5: Fix auto group size calculation
...

+2 -1
MAINTAINERS
··· 643 643 644 644 FORCEDETH GIGABIT ETHERNET DRIVER 645 645 M: Rain River <rain.1986.08.12@gmail.com> 646 - M: Zhu Yanjun <yanjun.zhu@oracle.com> 646 + M: Zhu Yanjun <zyjzyj2000@gmail.com> 647 647 L: netdev@vger.kernel.org 648 648 S: Maintained 649 649 F: drivers/net/ethernet/nvidia/* ··· 17215 17215 17216 17216 VIRTIO AND VHOST VSOCK DRIVER 17217 17217 M: Stefan Hajnoczi <stefanha@redhat.com> 17218 + M: Stefano Garzarella <sgarzare@redhat.com> 17218 17219 L: kvm@vger.kernel.org 17219 17220 L: virtualization@lists.linux-foundation.org 17220 17221 L: netdev@vger.kernel.org
+1 -3
drivers/net/can/m_can/m_can_platform.c
··· 107 107 108 108 mcan_class->is_peripheral = false; 109 109 110 - platform_set_drvdata(pdev, mcan_class->dev); 110 + platform_set_drvdata(pdev, mcan_class->net); 111 111 112 112 m_can_init_ram(mcan_class); 113 113 ··· 165 165 err = clk_prepare_enable(mcan_class->cclk); 166 166 if (err) 167 167 clk_disable_unprepare(mcan_class->hclk); 168 - 169 - m_can_class_resume(dev); 170 168 171 169 return err; 172 170 }
+11 -4
drivers/net/ethernet/freescale/fec_main.c
··· 3636 3636 struct net_device *ndev = platform_get_drvdata(pdev); 3637 3637 struct fec_enet_private *fep = netdev_priv(ndev); 3638 3638 struct device_node *np = pdev->dev.of_node; 3639 + int ret; 3640 + 3641 + ret = pm_runtime_get_sync(&pdev->dev); 3642 + if (ret < 0) 3643 + return ret; 3639 3644 3640 3645 cancel_work_sync(&fep->tx_timeout_work); 3641 3646 fec_ptp_stop(pdev); ··· 3648 3643 fec_enet_mii_remove(fep); 3649 3644 if (fep->reg_phy) 3650 3645 regulator_disable(fep->reg_phy); 3651 - pm_runtime_put(&pdev->dev); 3652 - pm_runtime_disable(&pdev->dev); 3653 - clk_disable_unprepare(fep->clk_ahb); 3654 - clk_disable_unprepare(fep->clk_ipg); 3646 + 3655 3647 if (of_phy_is_fixed_link(np)) 3656 3648 of_phy_deregister_fixed_link(np); 3657 3649 of_node_put(fep->phy_node); 3658 3650 free_netdev(ndev); 3651 + 3652 + clk_disable_unprepare(fep->clk_ahb); 3653 + clk_disable_unprepare(fep->clk_ipg); 3654 + pm_runtime_put_noidle(&pdev->dev); 3655 + pm_runtime_disable(&pdev->dev); 3659 3656 3660 3657 return 0; 3661 3658 }
+5 -4
drivers/net/ethernet/google/gve/gve_tx.c
··· 393 393 static void gve_dma_sync_for_device(struct device *dev, dma_addr_t *page_buses, 394 394 u64 iov_offset, u64 iov_len) 395 395 { 396 + u64 last_page = (iov_offset + iov_len - 1) / PAGE_SIZE; 397 + u64 first_page = iov_offset / PAGE_SIZE; 396 398 dma_addr_t dma; 397 - u64 addr; 399 + u64 page; 398 400 399 - for (addr = iov_offset; addr < iov_offset + iov_len; 400 - addr += PAGE_SIZE) { 401 - dma = page_buses[addr / PAGE_SIZE]; 401 + for (page = first_page; page <= last_page; page++) { 402 + dma = page_buses[page]; 402 403 dma_sync_single_for_device(dev, dma, PAGE_SIZE, DMA_TO_DEVICE); 403 404 } 404 405 }
+1 -1
drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
··· 166 166 #define HCLGE_GLOBAL_RESET_BIT 0 167 167 #define HCLGE_CORE_RESET_BIT 1 168 168 #define HCLGE_IMP_RESET_BIT 2 169 - #define HCLGE_RESET_INT_M GENMASK(2, 0) 169 + #define HCLGE_RESET_INT_M GENMASK(7, 5) 170 170 #define HCLGE_FUN_RST_ING 0x20C00 171 171 #define HCLGE_FUN_RST_ING_B 0 172 172
+8 -3
drivers/net/ethernet/ibm/ibmvnic.c
··· 2878 2878 2879 2879 if (test_bit(0, &adapter->resetting) && 2880 2880 adapter->reset_reason == VNIC_RESET_MOBILITY) { 2881 - struct irq_desc *desc = irq_to_desc(scrq->irq); 2882 - struct irq_chip *chip = irq_desc_get_chip(desc); 2881 + u64 val = (0xff000000) | scrq->hw_irq; 2883 2882 2884 - chip->irq_eoi(&desc->irq_data); 2883 + rc = plpar_hcall_norets(H_EOI, val); 2884 + /* H_EOI would fail with rc = H_FUNCTION when running 2885 + * in XIVE mode which is expected, but not an error. 2886 + */ 2887 + if (rc && (rc != H_FUNCTION)) 2888 + dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n", 2889 + val, rc); 2885 2890 } 2886 2891 2887 2892 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
+5 -4
drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
··· 1745 1745 err = mlx4_en_get_flow(dev, cmd, cmd->fs.location); 1746 1746 break; 1747 1747 case ETHTOOL_GRXCLSRLALL: 1748 + cmd->data = MAX_NUM_OF_FS_RULES; 1748 1749 while ((!err || err == -ENOENT) && priority < cmd->rule_cnt) { 1749 1750 err = mlx4_en_get_flow(dev, cmd, i); 1750 1751 if (!err) ··· 1812 1811 struct mlx4_en_dev *mdev = priv->mdev; 1813 1812 struct mlx4_en_port_profile new_prof; 1814 1813 struct mlx4_en_priv *tmp; 1814 + int total_tx_count; 1815 1815 int port_up = 0; 1816 1816 int xdp_count; 1817 1817 int err = 0; ··· 1827 1825 1828 1826 mutex_lock(&mdev->state_lock); 1829 1827 xdp_count = priv->tx_ring_num[TX_XDP] ? channel->rx_count : 0; 1830 - if (channel->tx_count * priv->prof->num_up + xdp_count > 1831 - priv->mdev->profile.max_num_tx_rings_p_up * priv->prof->num_up) { 1828 + total_tx_count = channel->tx_count * priv->prof->num_up + xdp_count; 1829 + if (total_tx_count > MAX_TX_RINGS) { 1832 1830 err = -EINVAL; 1833 1831 en_err(priv, 1834 1832 "Total number of TX and XDP rings (%d) exceeds the maximum supported (%d)\n", 1835 - channel->tx_count * priv->prof->num_up + xdp_count, 1836 - MAX_TX_RINGS); 1833 + total_tx_count, MAX_TX_RINGS); 1837 1834 goto out; 1838 1835 } 1839 1836
+9
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
··· 91 91 struct mlx4_en_dev *mdev = priv->mdev; 92 92 struct mlx4_en_port_profile new_prof; 93 93 struct mlx4_en_priv *tmp; 94 + int total_count; 94 95 int port_up = 0; 95 96 int err = 0; 96 97 ··· 105 104 MLX4_EN_NUM_UP_HIGH; 106 105 new_prof.tx_ring_num[TX] = new_prof.num_tx_rings_p_up * 107 106 new_prof.num_up; 107 + total_count = new_prof.tx_ring_num[TX] + new_prof.tx_ring_num[TX_XDP]; 108 + if (total_count > MAX_TX_RINGS) { 109 + err = -EINVAL; 110 + en_err(priv, 111 + "Total number of TX and XDP rings (%d) exceeds the maximum supported (%d)\n", 112 + total_count, MAX_TX_RINGS); 113 + goto out; 114 + } 108 115 err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof, true); 109 116 if (err) 110 117 goto out;
+12 -6
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
··· 239 239 if (max_encap_size < ipv4_encap_size) { 240 240 mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n", 241 241 ipv4_encap_size, max_encap_size); 242 - return -EOPNOTSUPP; 242 + err = -EOPNOTSUPP; 243 + goto out; 243 244 } 244 245 245 246 encap_header = kzalloc(ipv4_encap_size, GFP_KERNEL); 246 - if (!encap_header) 247 - return -ENOMEM; 247 + if (!encap_header) { 248 + err = -ENOMEM; 249 + goto out; 250 + } 248 251 249 252 /* used by mlx5e_detach_encap to lookup a neigh hash table 250 253 * entry in the neigh hash table when a user deletes a rule ··· 358 355 if (max_encap_size < ipv6_encap_size) { 359 356 mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n", 360 357 ipv6_encap_size, max_encap_size); 361 - return -EOPNOTSUPP; 358 + err = -EOPNOTSUPP; 359 + goto out; 362 360 } 363 361 364 362 encap_header = kzalloc(ipv6_encap_size, GFP_KERNEL); 365 - if (!encap_header) 366 - return -ENOMEM; 363 + if (!encap_header) { 364 + err = -ENOMEM; 365 + goto out; 366 + } 367 367 368 368 /* used by mlx5e_detach_encap to lookup a neigh hash table 369 369 * entry in the neigh hash table when a user deletes a rule
+6 -6
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
··· 708 708 709 709 static void ptys2ethtool_supported_advertised_port(struct ethtool_link_ksettings *link_ksettings, 710 710 u32 eth_proto_cap, 711 - u8 connector_type) 711 + u8 connector_type, bool ext) 712 712 { 713 - if (!connector_type || connector_type >= MLX5E_CONNECTOR_TYPE_NUMBER) { 713 + if ((!connector_type && !ext) || connector_type >= MLX5E_CONNECTOR_TYPE_NUMBER) { 714 714 if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_10GBASE_CR) 715 715 | MLX5E_PROT_MASK(MLX5E_10GBASE_SR) 716 716 | MLX5E_PROT_MASK(MLX5E_40GBASE_CR4) ··· 842 842 [MLX5E_PORT_OTHER] = PORT_OTHER, 843 843 }; 844 844 845 - static u8 get_connector_port(u32 eth_proto, u8 connector_type) 845 + static u8 get_connector_port(u32 eth_proto, u8 connector_type, bool ext) 846 846 { 847 - if (connector_type && connector_type < MLX5E_CONNECTOR_TYPE_NUMBER) 847 + if ((connector_type || ext) && connector_type < MLX5E_CONNECTOR_TYPE_NUMBER) 848 848 return ptys2connector_type[connector_type]; 849 849 850 850 if (eth_proto & ··· 945 945 eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap; 946 946 947 947 link_ksettings->base.port = get_connector_port(eth_proto_oper, 948 - connector_type); 948 + connector_type, ext); 949 949 ptys2ethtool_supported_advertised_port(link_ksettings, eth_proto_admin, 950 - connector_type); 950 + connector_type, ext); 951 951 get_lp_advertising(mdev, eth_proto_lp, link_ksettings); 952 952 953 953 if (an_status == MLX5_AN_COMPLETE)
+4 -1
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
··· 4252 4252 4253 4253 switch (proto) { 4254 4254 case IPPROTO_GRE: 4255 + return features; 4255 4256 case IPPROTO_IPIP: 4256 4257 case IPPROTO_IPV6: 4257 - return features; 4258 + if (mlx5e_tunnel_proto_supported(priv->mdev, IPPROTO_IPIP)) 4259 + return features; 4260 + break; 4258 4261 case IPPROTO_UDP: 4259 4262 udph = udp_hdr(skb); 4260 4263 port = be16_to_cpu(udph->dest);
+15 -16
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
··· 3268 3268 3269 3269 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | 3270 3270 MLX5_FLOW_CONTEXT_ACTION_COUNT; 3271 - if (netdev_port_same_parent_id(priv->netdev, out_dev)) { 3271 + if (encap) { 3272 + parse_attr->mirred_ifindex[attr->out_count] = 3273 + out_dev->ifindex; 3274 + parse_attr->tun_info[attr->out_count] = dup_tun_info(info); 3275 + if (!parse_attr->tun_info[attr->out_count]) 3276 + return -ENOMEM; 3277 + encap = false; 3278 + attr->dests[attr->out_count].flags |= 3279 + MLX5_ESW_DEST_ENCAP; 3280 + attr->out_count++; 3281 + /* attr->dests[].rep is resolved when we 3282 + * handle encap 3283 + */ 3284 + } else if (netdev_port_same_parent_id(priv->netdev, out_dev)) { 3272 3285 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; 3273 3286 struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH); 3274 3287 struct net_device *uplink_upper; ··· 3323 3310 attr->dests[attr->out_count].rep = rpriv->rep; 3324 3311 attr->dests[attr->out_count].mdev = out_priv->mdev; 3325 3312 attr->out_count++; 3326 - } else if (encap) { 3327 - parse_attr->mirred_ifindex[attr->out_count] = 3328 - out_dev->ifindex; 3329 - parse_attr->tun_info[attr->out_count] = dup_tun_info(info); 3330 - if (!parse_attr->tun_info[attr->out_count]) 3331 - return -ENOMEM; 3332 - encap = false; 3333 - attr->dests[attr->out_count].flags |= 3334 - MLX5_ESW_DEST_ENCAP; 3335 - attr->out_count++; 3336 - /* attr->dests[].rep is resolved when we 3337 - * handle encap 3338 - */ 3339 3313 } else if (parse_attr->filter_dev != priv->netdev) { 3340 3314 /* All mlx5 devices are called to configure 3341 3315 * high level device filters. Therefore, the ··· 4000 4000 struct tc_cls_matchall_offload *ma) 4001 4001 { 4002 4002 struct netlink_ext_ack *extack = ma->common.extack; 4003 - int prio = TC_H_MAJ(ma->common.prio) >> 16; 4004 4003 4005 - if (prio != 1) { 4004 + if (ma->common.prio != 1) { 4006 4005 NL_SET_ERR_MSG_MOD(extack, "only priority 1 is supported"); 4007 4006 return -EINVAL; 4008 4007 }
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
··· 2117 2117 2118 2118 unlock: 2119 2119 mutex_unlock(&esw->state_lock); 2120 - return 0; 2120 + return err; 2121 2121 } 2122 2122 2123 2123 int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
+6 -4
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
··· 579 579 580 580 rhashtable_destroy(&fg->ftes_hash); 581 581 ida_destroy(&fg->fte_allocator); 582 - if (ft->autogroup.active) 582 + if (ft->autogroup.active && fg->max_ftes == ft->autogroup.group_size) 583 583 ft->autogroup.num_groups--; 584 584 err = rhltable_remove(&ft->fgs_hash, 585 585 &fg->hash, ··· 1126 1126 1127 1127 ft->autogroup.active = true; 1128 1128 ft->autogroup.required_groups = max_num_groups; 1129 + /* We save place for flow groups in addition to max types */ 1130 + ft->autogroup.group_size = ft->max_fte / (max_num_groups + 1); 1129 1131 1130 1132 return ft; 1131 1133 } ··· 1330 1328 return ERR_PTR(-ENOENT); 1331 1329 1332 1330 if (ft->autogroup.num_groups < ft->autogroup.required_groups) 1333 - /* We save place for flow groups in addition to max types */ 1334 - group_size = ft->max_fte / (ft->autogroup.required_groups + 1); 1331 + group_size = ft->autogroup.group_size; 1335 1332 1336 1333 /* ft->max_fte == ft->autogroup.max_types */ 1337 1334 if (group_size == 0) ··· 1357 1356 if (IS_ERR(fg)) 1358 1357 goto out; 1359 1358 1360 - ft->autogroup.num_groups++; 1359 + if (group_size == ft->autogroup.group_size) 1360 + ft->autogroup.num_groups++; 1361 1361 1362 1362 out: 1363 1363 return fg;
+1
drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
··· 162 162 struct { 163 163 bool active; 164 164 unsigned int required_groups; 165 + unsigned int group_size; 165 166 unsigned int num_groups; 166 167 } autogroup; 167 168 /* Protect fwd_rules */
+1
drivers/net/ethernet/mellanox/mlx5/core/main.c
··· 1566 1566 { PCI_VDEVICE(MELLANOX, 0x101c), MLX5_PCI_DEV_IS_VF}, /* ConnectX-6 VF */ 1567 1567 { PCI_VDEVICE(MELLANOX, 0x101d) }, /* ConnectX-6 Dx */ 1568 1568 { PCI_VDEVICE(MELLANOX, 0x101e), MLX5_PCI_DEV_IS_VF}, /* ConnectX Family mlx5Gen Virtual Function */ 1569 + { PCI_VDEVICE(MELLANOX, 0x101f) }, /* ConnectX-6 LX */ 1569 1570 { PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */ 1570 1571 { PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */ 1571 1572 { PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */
+15
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
··· 595 595 } 596 596 } 597 597 598 + static u16 dr_get_bits_per_mask(u16 byte_mask) 599 + { 600 + u16 bits = 0; 601 + 602 + while (byte_mask) { 603 + byte_mask = byte_mask & (byte_mask - 1); 604 + bits++; 605 + } 606 + 607 + return bits; 608 + } 609 + 598 610 static bool dr_rule_need_enlarge_hash(struct mlx5dr_ste_htbl *htbl, 599 611 struct mlx5dr_domain *dmn, 600 612 struct mlx5dr_domain_rx_tx *nic_dmn) ··· 617 605 return false; 618 606 619 607 if (!ctrl->may_grow) 608 + return false; 609 + 610 + if (dr_get_bits_per_mask(htbl->byte_mask) * BITS_PER_BYTE <= htbl->chunk_size) 620 611 return false; 621 612 622 613 if (ctrl->num_of_collisions >= ctrl->increase_threshold &&
+3 -1
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
··· 700 700 unsigned int irqn; 701 701 void *cqc, *in; 702 702 __be64 *pas; 703 + int vector; 703 704 u32 i; 704 705 705 706 cq = kzalloc(sizeof(*cq), GFP_KERNEL); ··· 729 728 if (!in) 730 729 goto err_cqwq; 731 730 732 - err = mlx5_vector2eqn(mdev, smp_processor_id(), &eqn, &irqn); 731 + vector = smp_processor_id() % mlx5_comp_vectors_count(mdev); 732 + err = mlx5_vector2eqn(mdev, vector, &eqn, &irqn); 733 733 if (err) { 734 734 kvfree(in); 735 735 goto err_cqwq;
+1 -21
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
··· 560 560 return !refcount_read(&ste->refcount); 561 561 } 562 562 563 - static u16 get_bits_per_mask(u16 byte_mask) 564 - { 565 - u16 bits = 0; 566 - 567 - while (byte_mask) { 568 - byte_mask = byte_mask & (byte_mask - 1); 569 - bits++; 570 - } 571 - 572 - return bits; 573 - } 574 - 575 563 /* Init one ste as a pattern for ste data array */ 576 564 void mlx5dr_ste_set_formatted_ste(u16 gvmi, 577 565 struct mlx5dr_domain_rx_tx *nic_dmn, ··· 608 620 struct mlx5dr_ste_htbl *next_htbl; 609 621 610 622 if (!mlx5dr_ste_is_last_in_rule(nic_matcher, ste->ste_chain_location)) { 611 - u32 bits_in_mask; 612 623 u8 next_lu_type; 613 624 u16 byte_mask; 614 625 615 626 next_lu_type = MLX5_GET(ste_general, hw_ste, next_lu_type); 616 627 byte_mask = MLX5_GET(ste_general, hw_ste, byte_mask); 617 - 618 - /* Don't allocate table more than required, 619 - * the size of the table defined via the byte_mask, so no need 620 - * to allocate more than that. 621 - */ 622 - bits_in_mask = get_bits_per_mask(byte_mask) * BITS_PER_BYTE; 623 - log_table_size = min(log_table_size, bits_in_mask); 624 628 625 629 next_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool, 626 630 log_table_size, ··· 651 671 652 672 htbl->ctrl.may_grow = true; 653 673 654 - if (htbl->chunk_size == DR_CHUNK_SIZE_MAX - 1) 674 + if (htbl->chunk_size == DR_CHUNK_SIZE_MAX - 1 || !htbl->byte_mask) 655 675 htbl->ctrl.may_grow = false; 656 676 657 677 /* Threshold is 50%, one is added to table of size 1 */
+2
drivers/net/ethernet/mellanox/mlxfw/mlxfw_fsm.c
··· 66 66 return err; 67 67 68 68 if (fsm_state_err != MLXFW_FSM_STATE_ERR_OK) { 69 + fsm_state_err = min_t(enum mlxfw_fsm_state_err, 70 + fsm_state_err, MLXFW_FSM_STATE_ERR_MAX); 69 71 pr_err("Firmware flash failed: %s\n", 70 72 mlxfw_fsm_state_err_str[fsm_state_err]); 71 73 NL_SET_ERR_MSG_MOD(extack, "Firmware flash failed");
+1 -18
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
··· 994 994 if (d) 995 995 return l3mdev_fib_table(d) ? : RT_TABLE_MAIN; 996 996 else 997 - return l3mdev_fib_table(ol_dev) ? : RT_TABLE_MAIN; 997 + return RT_TABLE_MAIN; 998 998 } 999 999 1000 1000 static struct mlxsw_sp_rif * ··· 1598 1598 { 1599 1599 struct mlxsw_sp_ipip_entry *ipip_entry = 1600 1600 mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev); 1601 - enum mlxsw_sp_l3proto ul_proto; 1602 - union mlxsw_sp_l3addr saddr; 1603 - u32 ul_tb_id; 1604 1601 1605 1602 if (!ipip_entry) 1606 1603 return 0; 1607 - 1608 - /* For flat configuration cases, moving overlay to a different VRF might 1609 - * cause local address conflict, and the conflicting tunnels need to be 1610 - * demoted. 1611 - */ 1612 - ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev); 1613 - ul_proto = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto; 1614 - saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev); 1615 - if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto, 1616 - saddr, ul_tb_id, 1617 - ipip_entry)) { 1618 - mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry); 1619 - return 0; 1620 - } 1621 1604 1622 1605 return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry, 1623 1606 true, false, false, extack);
+5 -6
drivers/net/ethernet/qualcomm/qca_spi.c
··· 363 363 netdev_dbg(net_dev, "qcaspi_receive: SPI_REG_RDBUF_BYTE_AVA: Value: %08x\n", 364 364 available); 365 365 366 - if (available > QCASPI_HW_BUF_LEN) { 366 + if (available > QCASPI_HW_BUF_LEN + QCASPI_HW_PKT_LEN) { 367 367 /* This could only happen by interferences on the SPI line. 368 368 * So retry later ... 369 369 */ ··· 496 496 u16 signature = 0; 497 497 u16 spi_config; 498 498 u16 wrbuf_space = 0; 499 - static u16 reset_count; 500 499 501 500 if (event == QCASPI_EVENT_CPUON) { 502 501 /* Read signature twice, if not valid ··· 548 549 549 550 qca->sync = QCASPI_SYNC_RESET; 550 551 qca->stats.trig_reset++; 551 - reset_count = 0; 552 + qca->reset_count = 0; 552 553 break; 553 554 case QCASPI_SYNC_RESET: 554 - reset_count++; 555 + qca->reset_count++; 555 556 netdev_dbg(qca->net_dev, "sync: waiting for CPU on, count %u.\n", 556 - reset_count); 557 - if (reset_count >= QCASPI_RESET_TIMEOUT) { 557 + qca->reset_count); 558 + if (qca->reset_count >= QCASPI_RESET_TIMEOUT) { 558 559 /* reset did not seem to take place, try again */ 559 560 qca->sync = QCASPI_SYNC_UNKNOWN; 560 561 qca->stats.reset_timeout++;
+1
drivers/net/ethernet/qualcomm/qca_spi.h
··· 94 94 95 95 unsigned int intr_req; 96 96 unsigned int intr_svc; 97 + u16 reset_count; 97 98 98 99 #ifdef CONFIG_DEBUG_FS 99 100 struct dentry *device_root;
+5 -2
drivers/net/ethernet/realtek/r8169_main.c
··· 7179 7179 dev->gso_max_segs = RTL_GSO_MAX_SEGS_V1; 7180 7180 } 7181 7181 7182 - /* RTL8168e-vl has a HW issue with TSO */ 7183 - if (tp->mac_version == RTL_GIGA_MAC_VER_34) { 7182 + /* RTL8168e-vl and one RTL8168c variant are known to have a 7183 + * HW issue with TSO. 7184 + */ 7185 + if (tp->mac_version == RTL_GIGA_MAC_VER_34 || 7186 + tp->mac_version == RTL_GIGA_MAC_VER_22) { 7184 7187 dev->vlan_features &= ~(NETIF_F_ALL_TSO | NETIF_F_SG); 7185 7188 dev->hw_features &= ~(NETIF_F_ALL_TSO | NETIF_F_SG); 7186 7189 dev->features &= ~(NETIF_F_ALL_TSO | NETIF_F_SG);
+2 -1
drivers/net/ethernet/sfc/ptp.c
··· 1531 1531 (void)efx_ptp_disable(efx); 1532 1532 1533 1533 cancel_work_sync(&efx->ptp_data->work); 1534 - cancel_work_sync(&efx->ptp_data->pps_work); 1534 + if (efx->ptp_data->pps_workwq) 1535 + cancel_work_sync(&efx->ptp_data->pps_work); 1535 1536 1536 1537 skb_queue_purge(&efx->ptp_data->rxq); 1537 1538 skb_queue_purge(&efx->ptp_data->txq);
+2 -1
drivers/net/hyperv/hyperv_net.h
··· 609 609 /* The number of entries in the send indirection table */ 610 610 u32 count; 611 611 612 - /* The offset of the send indirection table from top of this struct. 612 + /* The offset of the send indirection table from the beginning of 613 + * struct nvsp_message. 613 614 * The send indirection table tells which channel to put the send 614 615 * traffic on. Each entry is a channel number. 615 616 */
+30 -8
drivers/net/hyperv/netvsc.c
··· 1178 1178 } 1179 1179 1180 1180 static void netvsc_send_table(struct net_device *ndev, 1181 - const struct nvsp_message *nvmsg) 1181 + struct netvsc_device *nvscdev, 1182 + const struct nvsp_message *nvmsg, 1183 + u32 msglen) 1182 1184 { 1183 1185 struct net_device_context *net_device_ctx = netdev_priv(ndev); 1184 - u32 count, *tab; 1186 + u32 count, offset, *tab; 1185 1187 int i; 1186 1188 1187 1189 count = nvmsg->msg.v5_msg.send_table.count; 1190 + offset = nvmsg->msg.v5_msg.send_table.offset; 1191 + 1188 1192 if (count != VRSS_SEND_TAB_SIZE) { 1189 1193 netdev_err(ndev, "Received wrong send-table size:%u\n", count); 1190 1194 return; 1191 1195 } 1192 1196 1193 - tab = (u32 *)((unsigned long)&nvmsg->msg.v5_msg.send_table + 1194 - nvmsg->msg.v5_msg.send_table.offset); 1197 + /* If negotiated version <= NVSP_PROTOCOL_VERSION_6, the offset may be 1198 + * wrong due to a host bug. So fix the offset here. 1199 + */ 1200 + if (nvscdev->nvsp_version <= NVSP_PROTOCOL_VERSION_6 && 1201 + msglen >= sizeof(struct nvsp_message_header) + 1202 + sizeof(union nvsp_6_message_uber) + count * sizeof(u32)) 1203 + offset = sizeof(struct nvsp_message_header) + 1204 + sizeof(union nvsp_6_message_uber); 1205 + 1206 + /* Boundary check for all versions */ 1207 + if (offset > msglen - count * sizeof(u32)) { 1208 + netdev_err(ndev, "Received send-table offset too big:%u\n", 1209 + offset); 1210 + return; 1211 + } 1212 + 1213 + tab = (void *)nvmsg + offset; 1195 1214 1196 1215 for (i = 0; i < count; i++) 1197 1216 net_device_ctx->tx_table[i] = tab[i]; ··· 1228 1209 net_device_ctx->vf_alloc ? "added" : "removed"); 1229 1210 } 1230 1211 1231 - static void netvsc_receive_inband(struct net_device *ndev, 1232 - const struct nvsp_message *nvmsg) 1212 + static void netvsc_receive_inband(struct net_device *ndev, 1213 + struct netvsc_device *nvscdev, 1214 + const struct nvsp_message *nvmsg, 1215 + u32 msglen) 1233 1216 { 1234 1217 switch (nvmsg->hdr.msg_type) { 1235 1218 case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE: 1236 - netvsc_send_table(ndev, nvmsg); 1219 + netvsc_send_table(ndev, nvscdev, nvmsg, msglen); 1237 1220 break; 1238 1221 1239 1222 case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION: ··· 1253 1232 { 1254 1233 struct vmbus_channel *channel = nvchan->channel; 1255 1234 const struct nvsp_message *nvmsg = hv_pkt_data(desc); 1235 + u32 msglen = hv_pkt_datalen(desc); 1256 1236 1257 1237 trace_nvsp_recv(ndev, channel, nvmsg); 1258 1238 ··· 1269 1247 break; 1270 1248 1271 1249 case VM_PKT_DATA_INBAND: 1272 - netvsc_receive_inband(ndev, nvmsg); 1250 + netvsc_receive_inband(ndev, net_device, nvmsg, msglen); 1273 1251 break; 1274 1252 1275 1253 default:
+3
drivers/net/phy/mdio-sun4i.c
··· 145 145 static int sun4i_mdio_remove(struct platform_device *pdev) 146 146 { 147 147 struct mii_bus *bus = platform_get_drvdata(pdev); 148 + struct sun4i_mdio_data *data = bus->priv; 148 149 149 150 mdiobus_unregister(bus); 151 + if (data->regulator) 152 + regulator_disable(data->regulator); 150 153 mdiobus_free(bus); 151 154 152 155 return 0;
+20 -9
drivers/net/phy/phylink.c
··· 601 601 * Create a new phylink instance, and parse the link parameters found in @np. 602 602 * This will parse in-band modes, fixed-link or SFP configuration. 603 603 * 604 + * Note: the rtnl lock must not be held when calling this function. 605 + * 604 606 * Returns a pointer to a &struct phylink, or an error-pointer value. Users 605 607 * must use IS_ERR() to check for errors from this function. 606 608 */ ··· 680 678 * 681 679 * Destroy a phylink instance. Any PHY that has been attached must have been 682 680 * cleaned up via phylink_disconnect_phy() prior to calling this function. 681 + * 682 + * Note: the rtnl lock must not be held when calling this function. 683 683 */ 684 684 void phylink_destroy(struct phylink *pl) 685 685 { ··· 1258 1254 pl->link_config.duplex = our_kset.base.duplex; 1259 1255 pl->link_config.an_enabled = our_kset.base.autoneg != AUTONEG_DISABLE; 1260 1256 1261 - if (!test_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state)) { 1257 + /* If we have a PHY, phylib will call our link state function if the 1258 + * mode has changed, which will trigger a resolve and update the MAC 1259 + * configuration. For a fixed link, this isn't able to change any 1260 + * parameters, which just leaves inband mode. 1261 + */ 1262 + if (pl->link_an_mode == MLO_AN_INBAND && 1263 + !test_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state)) { 1262 1264 phylink_mac_config(pl, &pl->link_config); 1263 1265 phylink_mac_an_restart(pl); 1264 1266 } ··· 1344 1334 if (pause->tx_pause) 1345 1335 config->pause |= MLO_PAUSE_TX; 1346 1336 1347 - if (!test_bit(PHYLINK_DISABLE_STOPPED, &pl->phylink_disable_state)) { 1337 + /* If we have a PHY, phylib will call our link state function if the 1338 + * mode has changed, which will trigger a resolve and update the MAC 1339 + * configuration. 1340 + */ 1341 + if (pl->phydev) { 1342 + phy_set_asym_pause(pl->phydev, pause->rx_pause, 1343 + pause->tx_pause); 1344 + } else if (!test_bit(PHYLINK_DISABLE_STOPPED, 1345 + &pl->phylink_disable_state)) { 1348 1346 switch (pl->link_an_mode) { 1349 - case MLO_AN_PHY: 1350 - /* Silently mark the carrier down, and then trigger a resolve */ 1351 - if (pl->netdev) 1352 - netif_carrier_off(pl->netdev); 1353 - phylink_run_resolve(pl); 1354 - break; 1355 - 1356 1347 case MLO_AN_FIXED: 1357 1348 /* Should we allow fixed links to change against the config? */ 1358 1349 phylink_resolve_flow(pl, config);
+21 -9
drivers/net/usb/r8152.c
··· 4283 4283 unregister_pm_notifier(&tp->pm_notifier); 4284 4284 #endif 4285 4285 tasklet_disable(&tp->tx_tl); 4286 - napi_disable(&tp->napi); 4287 4286 clear_bit(WORK_ENABLE, &tp->flags); 4288 4287 usb_kill_urb(tp->intr_urb); 4289 4288 cancel_delayed_work_sync(&tp->schedule); 4289 + napi_disable(&tp->napi); 4290 4290 netif_stop_queue(netdev); 4291 4291 4292 4292 res = usb_autopm_get_interface(tp->intf); ··· 4552 4552 4553 4553 netif_stop_queue(netdev); 4554 4554 tasklet_disable(&tp->tx_tl); 4555 - napi_disable(&tp->napi); 4556 4555 clear_bit(WORK_ENABLE, &tp->flags); 4557 4556 usb_kill_urb(tp->intr_urb); 4558 4557 cancel_delayed_work_sync(&tp->schedule); 4558 + napi_disable(&tp->napi); 4559 4559 if (netif_carrier_ok(netdev)) { 4560 4560 mutex_lock(&tp->control); 4561 4561 tp->rtl_ops.disable(tp); ··· 4673 4673 4674 4674 netif_device_attach(netdev); 4675 4675 4676 - if (netif_running(netdev) && netdev->flags & IFF_UP) { 4676 + if (netif_running(netdev) && (netdev->flags & IFF_UP)) { 4677 4677 tp->rtl_ops.up(tp); 4678 4678 netif_carrier_off(netdev); 4679 4679 set_bit(WORK_ENABLE, &tp->flags); ··· 5244 5244 } 5245 5245 5246 5246 if (tp->rx_copybreak != val) { 5247 - napi_disable(&tp->napi); 5248 - tp->rx_copybreak = val; 5249 - napi_enable(&tp->napi); 5247 + if (netdev->flags & IFF_UP) { 5248 + mutex_lock(&tp->control); 5249 + napi_disable(&tp->napi); 5250 + tp->rx_copybreak = val; 5251 + napi_enable(&tp->napi); 5252 + mutex_unlock(&tp->control); 5253 + } else { 5254 + tp->rx_copybreak = val; 5255 + } 5250 5256 } 5251 5257 break; 5252 5258 default: ··· 5280 5274 return -EINVAL; 5281 5275 5282 5276 if (tp->rx_pending != ring->rx_pending) { 5283 - napi_disable(&tp->napi); 5284 - tp->rx_pending = ring->rx_pending; 5285 - napi_enable(&tp->napi); 5277 + if (netdev->flags & IFF_UP) { 5278 + mutex_lock(&tp->control); 5279 + napi_disable(&tp->napi); 5280 + tp->rx_pending = ring->rx_pending; 5281 + napi_enable(&tp->napi); 5282 + mutex_unlock(&tp->control); 5283 + } else { 5284 + tp->rx_pending = ring->rx_pending; 5285 + } 5286 5286 } 5287 5287 5288 5288 return 0;
+1 -1
drivers/nfc/port100.c
··· 783 783 784 784 rc = port100_submit_urb_for_ack(dev, GFP_KERNEL); 785 785 if (rc) 786 - usb_unlink_urb(dev->out_urb); 786 + usb_kill_urb(dev->out_urb); 787 787 788 788 exit: 789 789 mutex_unlock(&dev->out_urb_lock);
+1
drivers/s390/net/qeth_core.h
··· 839 839 struct service_level qeth_service_level; 840 840 struct qdio_ssqd_desc ssqd; 841 841 debug_info_t *debug; 842 + struct mutex sbp_lock; 842 843 struct mutex conf_mutex; 843 844 struct mutex discipline_mutex; 844 845 struct napi_struct napi;
+5 -5
drivers/s390/net/qeth_core_main.c
··· 901 901 CCW_DEVID(cdev), dstat, cstat); 902 902 print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET, 903 903 16, 1, irb, 64, 1); 904 - return 1; 904 + return -EIO; 905 905 } 906 906 907 907 if (dstat & DEV_STAT_UNIT_CHECK) { 908 908 if (sense[SENSE_RESETTING_EVENT_BYTE] & 909 909 SENSE_RESETTING_EVENT_FLAG) { 910 910 QETH_CARD_TEXT(card, 2, "REVIND"); 911 - return 1; 911 + return -EIO; 912 912 } 913 913 if (sense[SENSE_COMMAND_REJECT_BYTE] & 914 914 SENSE_COMMAND_REJECT_FLAG) { 915 915 QETH_CARD_TEXT(card, 2, "CMDREJi"); 916 - return 1; 916 + return -EIO; 917 917 } 918 918 if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) { 919 919 QETH_CARD_TEXT(card, 2, "AFFE"); 920 - return 1; 920 + return -EIO; 921 921 } 922 922 if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) { 923 923 QETH_CARD_TEXT(card, 2, "ZEROSEN"); 924 924 return 0; 925 925 } 926 926 QETH_CARD_TEXT(card, 2, "DGENCHK"); 927 - return 1; 927 + return -EIO; 928 928 } 929 929 return 0; 930 930 }
+14 -7
drivers/s390/net/qeth_l2_main.c
··· 467 467 if (card->info.promisc_mode == enable) 468 468 return; 469 469 470 - if (qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE)) 470 + if (qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE)) { 471 471 qeth_setadp_promisc_mode(card, enable); 472 - else if (card->options.sbp.reflect_promisc) 473 - qeth_l2_promisc_to_bridge(card, enable); 472 + } else { 473 + mutex_lock(&card->sbp_lock); 474 + if (card->options.sbp.reflect_promisc) 475 + qeth_l2_promisc_to_bridge(card, enable); 476 + mutex_unlock(&card->sbp_lock); 477 + } 474 478 } 475 479 476 480 /* New MAC address is added to the hash table and marked to be written on card ··· 635 631 int rc; 636 632 637 633 qeth_l2_vnicc_set_defaults(card); 634 + mutex_init(&card->sbp_lock); 638 635 639 636 if (gdev->dev.type == &qeth_generic_devtype) { 640 637 rc = qeth_l2_create_device_attributes(&gdev->dev); ··· 809 804 } else 810 805 card->info.hwtrap = 0; 811 806 807 + mutex_lock(&card->sbp_lock); 812 808 qeth_bridgeport_query_support(card); 813 809 if (card->options.sbp.supported_funcs) 814 810 dev_info(&card->gdev->dev, 815 811 "The device represents a Bridge Capable Port\n"); 812 + mutex_unlock(&card->sbp_lock); 816 813 817 814 qeth_l2_register_dev_addr(card); 818 815 ··· 1169 1162 1170 1163 /* Role should not change by itself, but if it did, */ 1171 1164 /* information from the hardware is authoritative. */ 1172 - mutex_lock(&data->card->conf_mutex); 1165 + mutex_lock(&data->card->sbp_lock); 1173 1166 data->card->options.sbp.role = entry->role; 1174 - mutex_unlock(&data->card->conf_mutex); 1167 + mutex_unlock(&data->card->sbp_lock); 1175 1168 1176 1169 snprintf(env_locrem, sizeof(env_locrem), "BRIDGEPORT=statechange"); 1177 1170 snprintf(env_role, sizeof(env_role), "ROLE=%s", ··· 1237 1230 : (data->hostevs.lost_event_mask == 0x02) 1238 1231 ? "Bridge port state change" 1239 1232 : "Unknown reason"); 1240 - mutex_lock(&data->card->conf_mutex); 1233 + mutex_lock(&data->card->sbp_lock); 1241 1234 data->card->options.sbp.hostnotification = 0; 1242 - mutex_unlock(&data->card->conf_mutex); 1235 + mutex_unlock(&data->card->sbp_lock); 1243 1236 qeth_bridge_emit_host_event(data->card, anev_abort, 1244 1237 0, NULL, NULL); 1245 1238 } else
+13 -1
drivers/s390/net/qeth_l2_sys.c
··· 24 24 if (qeth_l2_vnicc_is_in_use(card)) 25 25 return sprintf(buf, "n/a (VNIC characteristics)\n"); 26 26 27 + mutex_lock(&card->sbp_lock); 27 28 if (qeth_card_hw_is_reachable(card) && 28 29 card->options.sbp.supported_funcs) 29 30 rc = qeth_bridgeport_query_ports(card, ··· 58 57 else 59 58 rc = sprintf(buf, "%s\n", word); 60 59 } 60 + mutex_unlock(&card->sbp_lock); 61 61 62 62 return rc; 63 63 } ··· 93 91 return -EINVAL; 94 92 95 93 mutex_lock(&card->conf_mutex); 94 + mutex_lock(&card->sbp_lock); 96 95 97 96 if (qeth_l2_vnicc_is_in_use(card)) 98 97 rc = -EBUSY; ··· 107 104 } else 108 105 card->options.sbp.role = role; 109 106 107 + mutex_unlock(&card->sbp_lock); 110 108 mutex_unlock(&card->conf_mutex); 111 109 112 110 return rc ? rc : count; ··· 162 158 return rc; 163 159 164 160 mutex_lock(&card->conf_mutex); 161 + mutex_lock(&card->sbp_lock); 165 162 166 163 if (qeth_l2_vnicc_is_in_use(card)) 167 164 rc = -EBUSY; ··· 173 168 } else 174 169 card->options.sbp.hostnotification = enable; 175 170 171 + mutex_unlock(&card->sbp_lock); 176 172 mutex_unlock(&card->conf_mutex); 177 173 178 174 return rc ? rc : count; ··· 229 223 return -EINVAL; 230 224 231 225 mutex_lock(&card->conf_mutex); 226 + mutex_lock(&card->sbp_lock); 232 227 233 228 if (qeth_l2_vnicc_is_in_use(card)) 234 229 rc = -EBUSY; ··· 241 234 rc = 0; 242 235 } 243 236 237 + mutex_unlock(&card->sbp_lock); 244 238 mutex_unlock(&card->conf_mutex); 245 239 246 240 return rc ? rc : count; ··· 277 269 return; 278 270 if (!card->options.sbp.supported_funcs) 279 271 return; 272 + 273 + mutex_lock(&card->sbp_lock); 280 274 if (card->options.sbp.role != QETH_SBP_ROLE_NONE) { 281 275 /* Conditional to avoid spurious error messages */ 282 276 qeth_bridgeport_setrole(card, card->options.sbp.role); ··· 290 280 rc = qeth_bridgeport_an_set(card, 1); 291 281 if (rc) 292 282 card->options.sbp.hostnotification = 0; 293 - } else 283 + } else { 294 284 qeth_bridgeport_an_set(card, 0); 285 + } 286 + mutex_unlock(&card->sbp_lock); 295 287 } 296 288 297 289 /* VNIC CHARS support */
+6
include/linux/skbuff.h
··· 4169 4169 skb->active_extensions = 0; 4170 4170 } 4171 4171 } 4172 + 4173 + static inline bool skb_has_extensions(struct sk_buff *skb) 4174 + { 4175 + return unlikely(skb->active_extensions); 4176 + } 4172 4177 #else 4173 4178 static inline void skb_ext_put(struct sk_buff *skb) {} 4174 4179 static inline void skb_ext_reset(struct sk_buff *skb) {} 4175 4180 static inline void skb_ext_del(struct sk_buff *skb, int unused) {} 4176 4181 static inline void __skb_ext_copy(struct sk_buff *d, const struct sk_buff *s) {} 4177 4182 static inline void skb_ext_copy(struct sk_buff *dst, const struct sk_buff *s) {} 4183 + static inline bool skb_has_extensions(struct sk_buff *skb) { return false; } 4178 4184 #endif /* CONFIG_SKB_EXTENSIONS */ 4179 4185 4180 4186 static inline void nf_reset_ct(struct sk_buff *skb)
+2
include/net/tls.h
··· 356 356 void tls_sw_strparser_arm(struct sock *sk, struct tls_context *ctx); 357 357 void tls_sw_strparser_done(struct tls_context *tls_ctx); 358 358 int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size); 359 + int tls_sw_sendpage_locked(struct sock *sk, struct page *page, 360 + int offset, size_t size, int flags); 359 361 int tls_sw_sendpage(struct sock *sk, struct page *page, 360 362 int offset, size_t size, int flags); 361 363 void tls_sw_cancel_work_tx(struct tls_context *tls_ctx);
+3 -1
kernel/bpf/offload.c
··· 678 678 down_write(&bpf_devs_lock); 679 679 if (!offdevs_inited) { 680 680 err = rhashtable_init(&offdevs, &offdevs_params); 681 - if (err) 681 + if (err) { 682 + up_write(&bpf_devs_lock); 682 683 return ERR_PTR(err); 684 + } 683 685 offdevs_inited = true; 684 686 } 685 687 up_write(&bpf_devs_lock);
+3 -3
net/atm/clip.c
··· 89 89 struct clip_vcc **walk; 90 90 91 91 if (!entry) { 92 - pr_crit("!clip_vcc->entry (clip_vcc %p)\n", clip_vcc); 92 + pr_err("!clip_vcc->entry (clip_vcc %p)\n", clip_vcc); 93 93 return; 94 94 } 95 95 netif_tx_lock_bh(entry->neigh->dev); /* block clip_start_xmit() */ ··· 109 109 error = neigh_update(entry->neigh, NULL, NUD_NONE, 110 110 NEIGH_UPDATE_F_ADMIN, 0); 111 111 if (error) 112 - pr_crit("neigh_update failed with %d\n", error); 112 + pr_err("neigh_update failed with %d\n", error); 113 113 goto out; 114 114 } 115 - pr_crit("ATMARP: failed (entry %p, vcc 0x%p)\n", entry, clip_vcc); 115 + pr_err("ATMARP: failed (entry %p, vcc 0x%p)\n", entry, clip_vcc); 116 116 out: 117 117 netif_tx_unlock_bh(entry->neigh->dev); 118 118 }
+14 -11
net/core/net-sysfs.c
··· 923 923 error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL, 924 924 "rx-%u", index); 925 925 if (error) 926 - return error; 926 + goto err; 927 927 928 928 dev_hold(queue->dev); 929 929 930 930 if (dev->sysfs_rx_queue_group) { 931 931 error = sysfs_create_group(kobj, dev->sysfs_rx_queue_group); 932 - if (error) { 933 - kobject_put(kobj); 934 - return error; 935 - } 932 + if (error) 933 + goto err; 936 934 } 937 935 938 936 kobject_uevent(kobj, KOBJ_ADD); 939 937 938 + return error; 939 + 940 + err: 941 + kobject_put(kobj); 940 942 return error; 941 943 } 942 944 #endif /* CONFIG_SYSFS */ ··· 1463 1461 error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL, 1464 1462 "tx-%u", index); 1465 1463 if (error) 1466 - return error; 1464 + goto err; 1467 1465 1468 1466 dev_hold(queue->dev); 1469 1467 1470 1468 #ifdef CONFIG_BQL 1471 1469 error = sysfs_create_group(kobj, &dql_group); 1472 - if (error) { 1473 - kobject_put(kobj); 1474 - return error; 1475 - } 1470 + if (error) 1471 + goto err; 1476 1472 #endif 1477 1473 1478 1474 kobject_uevent(kobj, KOBJ_ADD); 1479 - 1480 1475 return 0; 1476 + 1477 + err: 1478 + kobject_put(kobj); 1479 + return error; 1481 1480 } 1482 1481 #endif /* CONFIG_SYSFS */ 1483 1482
+22 -1
net/core/rtnetlink.c
··· 2195 2195 if (tb[IFLA_VF_MAC]) { 2196 2196 struct ifla_vf_mac *ivm = nla_data(tb[IFLA_VF_MAC]); 2197 2197 2198 + if (ivm->vf >= INT_MAX) 2199 + return -EINVAL; 2198 2200 err = -EOPNOTSUPP; 2199 2201 if (ops->ndo_set_vf_mac) 2200 2202 err = ops->ndo_set_vf_mac(dev, ivm->vf, ··· 2208 2206 if (tb[IFLA_VF_VLAN]) { 2209 2207 struct ifla_vf_vlan *ivv = nla_data(tb[IFLA_VF_VLAN]); 2210 2208 2209 + if (ivv->vf >= INT_MAX) 2210 + return -EINVAL; 2211 2211 err = -EOPNOTSUPP; 2212 2212 if (ops->ndo_set_vf_vlan) 2213 2213 err = ops->ndo_set_vf_vlan(dev, ivv->vf, ivv->vlan, ··· 2242 2238 if (len == 0) 2243 2239 return -EINVAL; 2244 2240 2241 + if (ivvl[0]->vf >= INT_MAX) 2242 + return -EINVAL; 2245 2243 err = ops->ndo_set_vf_vlan(dev, ivvl[0]->vf, ivvl[0]->vlan, 2246 2244 ivvl[0]->qos, ivvl[0]->vlan_proto); 2247 2245 if (err < 0) ··· 2254 2248 struct ifla_vf_tx_rate *ivt = nla_data(tb[IFLA_VF_TX_RATE]); 2255 2249 struct ifla_vf_info ivf; 2256 2250 2251 + if (ivt->vf >= INT_MAX) 2252 + return -EINVAL; 2257 2253 err = -EOPNOTSUPP; 2258 2254 if (ops->ndo_get_vf_config) 2259 2255 err = ops->ndo_get_vf_config(dev, ivt->vf, &ivf); ··· 2274 2266 if (tb[IFLA_VF_RATE]) { 2275 2267 struct ifla_vf_rate *ivt = nla_data(tb[IFLA_VF_RATE]); 2276 2268 2269 + if (ivt->vf >= INT_MAX) 2270 + return -EINVAL; 2277 2271 err = -EOPNOTSUPP; 2278 2272 if (ops->ndo_set_vf_rate) 2279 2273 err = ops->ndo_set_vf_rate(dev, ivt->vf, ··· 2288 2278 if (tb[IFLA_VF_SPOOFCHK]) { 2289 2279 struct ifla_vf_spoofchk *ivs = nla_data(tb[IFLA_VF_SPOOFCHK]); 2290 2280 2281 + if (ivs->vf >= INT_MAX) 2282 + return -EINVAL; 2291 2283 err = -EOPNOTSUPP; 2292 2284 if (ops->ndo_set_vf_spoofchk) 2293 2285 err = ops->ndo_set_vf_spoofchk(dev, ivs->vf, ··· 2301 2289 if (tb[IFLA_VF_LINK_STATE]) { 2302 2290 struct ifla_vf_link_state *ivl = nla_data(tb[IFLA_VF_LINK_STATE]); 2303 2291 2292 + if (ivl->vf >= INT_MAX) 2293 + return -EINVAL; 2304 2294 err = -EOPNOTSUPP; 2305 2295 if (ops->ndo_set_vf_link_state) 2306 2296 err = ops->ndo_set_vf_link_state(dev, ivl->vf, ··· 2316 2302 2317 2303 err = -EOPNOTSUPP; 2318 2304 ivrssq_en = nla_data(tb[IFLA_VF_RSS_QUERY_EN]); 2305 + if (ivrssq_en->vf >= INT_MAX) 2306 + return -EINVAL; 2319 2307 if (ops->ndo_set_vf_rss_query_en) 2320 2308 err = ops->ndo_set_vf_rss_query_en(dev, ivrssq_en->vf, 2321 2309 ivrssq_en->setting); ··· 2328 2312 if (tb[IFLA_VF_TRUST]) { 2329 2313 struct ifla_vf_trust *ivt = nla_data(tb[IFLA_VF_TRUST]); 2330 2314 2315 + if (ivt->vf >= INT_MAX) 2316 + return -EINVAL; 2331 2317 err = -EOPNOTSUPP; 2332 2318 if (ops->ndo_set_vf_trust) 2333 2319 err = ops->ndo_set_vf_trust(dev, ivt->vf, ivt->setting); ··· 2340 2322 if (tb[IFLA_VF_IB_NODE_GUID]) { 2341 2323 struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_NODE_GUID]); 2342 2324 2325 + if (ivt->vf >= INT_MAX) 2326 + return -EINVAL; 2343 2327 if (!ops->ndo_set_vf_guid) 2344 2328 return -EOPNOTSUPP; 2345 - 2346 2329 return handle_vf_guid(dev, ivt, IFLA_VF_IB_NODE_GUID); 2347 2330 } 2348 2331 2349 2332 if (tb[IFLA_VF_IB_PORT_GUID]) { 2350 2333 struct ifla_vf_guid *ivt = nla_data(tb[IFLA_VF_IB_PORT_GUID]); 2351 2334 2335 + if (ivt->vf >= INT_MAX) 2336 + return -EINVAL; 2352 2337 if (!ops->ndo_set_vf_guid) 2353 2338 return -EOPNOTSUPP; 2354 2339
+1 -1
net/ipv4/sysctl_net_ipv4.c
··· 1037 1037 .mode = 0644, 1038 1038 .proc_handler = proc_fib_multipath_hash_policy, 1039 1039 .extra1 = SYSCTL_ZERO, 1040 - .extra2 = SYSCTL_ONE, 1040 + .extra2 = &two, 1041 1041 }, 1042 1042 #endif 1043 1043 {
+22 -5
net/ipv4/udp.c
··· 1297 1297 1298 1298 #define UDP_SKB_IS_STATELESS 0x80000000 1299 1299 1300 + /* all head states (dst, sk, nf conntrack) except skb extensions are 1301 + * cleared by udp_rcv(). 1302 + * 1303 + * We need to preserve secpath, if present, to eventually process 1304 + * IP_CMSG_PASSSEC at recvmsg() time. 1305 + * 1306 + * Other extensions can be cleared. 1307 + */ 1308 + static bool udp_try_make_stateless(struct sk_buff *skb) 1309 + { 1310 + if (!skb_has_extensions(skb)) 1311 + return true; 1312 + 1313 + if (!secpath_exists(skb)) { 1314 + skb_ext_reset(skb); 1315 + return true; 1316 + } 1317 + 1318 + return false; 1319 + } 1320 + 1300 1321 static void udp_set_dev_scratch(struct sk_buff *skb) 1301 1322 { 1302 1323 struct udp_dev_scratch *scratch = udp_skb_scratch(skb); ··· 1329 1308 scratch->csum_unnecessary = !!skb_csum_unnecessary(skb); 1330 1309 scratch->is_linear = !skb_is_nonlinear(skb); 1331 1310 #endif 1332 - /* all head states execept sp (dst, sk, nf) are always cleared by 1333 - * udp_rcv() and we need to preserve secpath, if present, to eventually 1334 - * process IP_CMSG_PASSSEC at recvmsg() time 1335 - */ 1336 - if (likely(!skb_sec_path(skb))) 1311 + if (udp_try_make_stateless(skb)) 1337 1312 scratch->_tsize_state |= UDP_SKB_IS_STATELESS; 1338 1313 } 1339 1314
+2 -2
net/ipv6/ipv6_sockglue.c
··· 363 363 break; 364 364 365 365 case IPV6_TRANSPARENT: 366 - if (valbool && !ns_capable(net->user_ns, CAP_NET_ADMIN) && 367 - !ns_capable(net->user_ns, CAP_NET_RAW)) { 366 + if (valbool && !ns_capable(net->user_ns, CAP_NET_RAW) && 367 + !ns_capable(net->user_ns, CAP_NET_ADMIN)) { 368 368 retv = -EPERM; 369 369 break; 370 370 }
+1 -1
net/ipv6/route.c
··· 634 634 * Router Reachability Probe MUST be rate-limited 635 635 * to no more than one per minute. 636 636 */ 637 - if (fib6_nh->fib_nh_gw_family) 637 + if (!fib6_nh->fib_nh_gw_family) 638 638 return; 639 639 640 640 nh_gw = &fib6_nh->fib_nh_gw6;
+5 -7
net/sched/act_pedit.c
··· 43 43 int err = -EINVAL; 44 44 int rem; 45 45 46 - if (!nla || !n) 46 + if (!nla) 47 47 return NULL; 48 48 49 49 keys_ex = kcalloc(n, sizeof(*k), GFP_KERNEL); ··· 170 170 } 171 171 172 172 parm = nla_data(pattr); 173 + if (!parm->nkeys) { 174 + NL_SET_ERR_MSG_MOD(extack, "Pedit requires keys to be passed"); 175 + return -EINVAL; 176 + } 173 177 ksize = parm->nkeys * sizeof(struct tc_pedit_key); 174 178 if (nla_len(pattr) < sizeof(*parm) + ksize) { 175 179 NL_SET_ERR_MSG_ATTR(extack, pattr, "Length of TCA_PEDIT_PARMS or TCA_PEDIT_PARMS_EX pedit attribute is invalid"); ··· 187 183 index = parm->index; 188 184 err = tcf_idr_check_alloc(tn, &index, a, bind); 189 185 if (!err) { 190 - if (!parm->nkeys) { 191 - tcf_idr_cleanup(tn, index); 192 - NL_SET_ERR_MSG_MOD(extack, "Pedit requires keys to be passed"); 193 - ret = -EINVAL; 194 - goto out_free; 195 - } 196 186 ret = tcf_idr_create(tn, index, est, a, 197 187 &act_pedit_ops, bind, false); 198 188 if (ret) {
+4
net/sched/act_tunnel_key.c
··· 135 135 if (opt_len < 0) 136 136 return opt_len; 137 137 opts_len += opt_len; 138 + if (opts_len > IP_TUNNEL_OPTS_MAX) { 139 + NL_SET_ERR_MSG(extack, "Tunnel options exceeds max size"); 140 + return -EINVAL; 141 + } 138 142 if (dst) { 139 143 dst_len -= opt_len; 140 144 dst += opt_len;
+26 -2
net/sched/sch_taprio.c
··· 922 922 } 923 923 924 924 /* Verify priority mapping uses valid tcs */ 925 - for (i = 0; i < TC_BITMASK + 1; i++) { 925 + for (i = 0; i <= TC_BITMASK; i++) { 926 926 if (qopt->prio_tc_map[i] >= qopt->num_tc) { 927 927 NL_SET_ERR_MSG(extack, "Invalid traffic class in priority to traffic class mapping"); 928 928 return -EINVAL; ··· 1347 1347 return err; 1348 1348 } 1349 1349 1350 + static int taprio_mqprio_cmp(const struct net_device *dev, 1351 + const struct tc_mqprio_qopt *mqprio) 1352 + { 1353 + int i; 1354 + 1355 + if (!mqprio || mqprio->num_tc != dev->num_tc) 1356 + return -1; 1357 + 1358 + for (i = 0; i < mqprio->num_tc; i++) 1359 + if (dev->tc_to_txq[i].count != mqprio->count[i] || 1360 + dev->tc_to_txq[i].offset != mqprio->offset[i]) 1361 + return -1; 1362 + 1363 + for (i = 0; i <= TC_BITMASK; i++) 1364 + if (dev->prio_tc_map[i] != mqprio->prio_tc_map[i]) 1365 + return -1; 1366 + 1367 + return 0; 1368 + } 1369 + 1350 1370 static int taprio_change(struct Qdisc *sch, struct nlattr *opt, 1351 1371 struct netlink_ext_ack *extack) 1352 1372 { ··· 1417 1397 oper = rcu_dereference(q->oper_sched); 1418 1398 admin = rcu_dereference(q->admin_sched); 1419 1399 rcu_read_unlock(); 1400 + 1401 + /* no changes - no new mqprio settings */ 1402 + if (!taprio_mqprio_cmp(dev, mqprio)) 1403 + mqprio = NULL; 1420 1404 1421 1405 if (mqprio && (oper || admin)) { 1422 1406 NL_SET_ERR_MSG(extack, "Changing the traffic mapping of a running schedule is not supported"); ··· 1479 1455 mqprio->offset[i]); 1480 1456 1481 1457 /* Always use supplied priority mappings */ 1482 - for (i = 0; i < TC_BITMASK + 1; i++) 1458 + for (i = 0; i <= TC_BITMASK; i++) 1483 1459 netdev_set_prio_tc_map(dev, i, 1484 1460 mqprio->prio_tc_map[i]); 1485 1461 }
+1
net/tls/tls_main.c
··· 908 908 { 909 909 tls_sw_proto_ops = inet_stream_ops; 910 910 tls_sw_proto_ops.splice_read = tls_sw_splice_read; 911 + tls_sw_proto_ops.sendpage_locked = tls_sw_sendpage_locked, 911 912 912 913 tls_device_init(); 913 914 tcp_register_ulp(&tcp_tls_ulp_ops);
+11
net/tls/tls_sw.c
··· 1204 1204 return copied ? copied : ret; 1205 1205 } 1206 1206 1207 + int tls_sw_sendpage_locked(struct sock *sk, struct page *page, 1208 + int offset, size_t size, int flags) 1209 + { 1210 + if (flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL | 1211 + MSG_SENDPAGE_NOTLAST | MSG_SENDPAGE_NOPOLICY | 1212 + MSG_NO_SHARED_FRAGS)) 1213 + return -ENOTSUPP; 1214 + 1215 + return tls_sw_do_sendpage(sk, page, offset, size, flags); 1216 + } 1217 + 1207 1218 int tls_sw_sendpage(struct sock *sk, struct page *page, 1208 1219 int offset, size_t size, int flags) 1209 1220 {