Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Pull networking fixes from David Miller:

1) Fix 64-bit division in mlx5 IPSEC offload support, from Ilan Tayari
and Arnd Bergmann.

2) Fix race in statistics gathering in bnxt_en driver, from Michael
Chan.

3) Can't use a mutex in RCU reader protected section on tap driver, from
Cong WANG.

4) Fix mdb leak in bridging code, from Eduardo Valentin.

5) Fix free of wrong pointer variable in nfp driver, from Dan Carpenter.

6) Buffer overflow in brcmfmac driver, from Arend van SPriel.

7) ioremap_nocache() return value needs to be checked in smsc911x
driver, from Alexey Khoroshilov.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (34 commits)
net: stmmac: revert "support future possible different internal phy mode"
sfc: don't read beyond unicast address list
datagram: fix kernel-doc comments
socket: add documentation for missing elements
smsc911x: Add check for ioremap_nocache() return code
brcmfmac: fix possible buffer overflow in brcmf_cfg80211_mgmt_tx()
net: hns: Bugfix for Tx timeout handling in hns driver
net: ipmr: ipmr_get_table() returns NULL
nfp: freeing the wrong variable
mlxsw: spectrum_switchdev: Check status of memory allocation
mlxsw: spectrum_switchdev: Remove unused variable
mlxsw: spectrum_router: Fix use-after-free in route replace
mlxsw: spectrum_router: Add missing rollback
samples/bpf: fix a build issue
bridge: mdb: fix leak on complete_info ptr on fail path
tap: convert a mutex to a spinlock
cxgb4: fix BUG() on interrupt deallocating path of ULD
qed: Fix printk option passed when printing ipv6 addresses
net: Fix minor code bug in timestamping.txt
net: stmmac: Make 'alloc_dma_[rt]x_desc_resources()' look even closer
...

+172 -101
+2 -4
Documentation/networking/timestamping.txt
··· 44 44 Supports multiple types of timestamp requests. As a result, this 45 45 socket option takes a bitmap of flags, not a boolean. In 46 46 47 - err = setsockopt(fd, SOL_SOCKET, SO_TIMESTAMPING, (void *) val, 48 - sizeof(val)); 47 + err = setsockopt(fd, SOL_SOCKET, SO_TIMESTAMPING, &val, sizeof(val)); 49 48 50 49 val is an integer with any of the following bits set. Setting other 51 50 bit returns EINVAL and does not change the current state. ··· 248 249 249 250 __u32 val = SOF_TIMESTAMPING_SOFTWARE | 250 251 SOF_TIMESTAMPING_OPT_ID /* or any other flag */; 251 - err = setsockopt(fd, SOL_SOCKET, SO_TIMESTAMPING, (void *) val, 252 - sizeof(val)); 252 + err = setsockopt(fd, SOL_SOCKET, SO_TIMESTAMPING, &val, sizeof(val)); 253 253 254 254 255 255 1.4 Bytestream Timestamps
+29 -13
drivers/net/ethernet/broadcom/bnxt/bnxt.c
··· 3458 3458 req.ver_upd = DRV_VER_UPD; 3459 3459 3460 3460 if (BNXT_PF(bp)) { 3461 - DECLARE_BITMAP(vf_req_snif_bmap, 256); 3462 - u32 *data = (u32 *)vf_req_snif_bmap; 3461 + u32 data[8]; 3463 3462 int i; 3464 3463 3465 - memset(vf_req_snif_bmap, 0, sizeof(vf_req_snif_bmap)); 3466 - for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) 3467 - __set_bit(bnxt_vf_req_snif[i], vf_req_snif_bmap); 3464 + memset(data, 0, sizeof(data)); 3465 + for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) { 3466 + u16 cmd = bnxt_vf_req_snif[i]; 3467 + unsigned int bit, idx; 3468 + 3469 + idx = cmd / 32; 3470 + bit = cmd % 32; 3471 + data[idx] |= 1 << bit; 3472 + } 3468 3473 3469 3474 for (i = 0; i < 8; i++) 3470 3475 req.vf_req_fwd[i] = cpu_to_le32(data[i]); ··· 6284 6279 return __bnxt_open_nic(bp, true, true); 6285 6280 } 6286 6281 6282 + static bool bnxt_drv_busy(struct bnxt *bp) 6283 + { 6284 + return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) || 6285 + test_bit(BNXT_STATE_READ_STATS, &bp->state)); 6286 + } 6287 + 6287 6288 int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) 6288 6289 { 6289 6290 int rc = 0; ··· 6308 6297 6309 6298 clear_bit(BNXT_STATE_OPEN, &bp->state); 6310 6299 smp_mb__after_atomic(); 6311 - while (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state)) 6300 + while (bnxt_drv_busy(bp)) 6312 6301 msleep(20); 6313 6302 6314 6303 /* Flush rings and and disable interrupts */ ··· 6369 6358 u32 i; 6370 6359 struct bnxt *bp = netdev_priv(dev); 6371 6360 6372 - if (!bp->bnapi) 6361 + set_bit(BNXT_STATE_READ_STATS, &bp->state); 6362 + /* Make sure bnxt_close_nic() sees that we are reading stats before 6363 + * we check the BNXT_STATE_OPEN flag. 6364 + */ 6365 + smp_mb__after_atomic(); 6366 + if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { 6367 + clear_bit(BNXT_STATE_READ_STATS, &bp->state); 6373 6368 return; 6369 + } 6374 6370 6375 6371 /* TODO check if we need to synchronize with bnxt_close path */ 6376 6372 for (i = 0; i < bp->cp_nr_rings; i++) { ··· 6424 6406 stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns); 6425 6407 stats->tx_errors = le64_to_cpu(tx->tx_err); 6426 6408 } 6409 + clear_bit(BNXT_STATE_READ_STATS, &bp->state); 6427 6410 } 6428 6411 6429 6412 static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask) ··· 6923 6904 } 6924 6905 6925 6906 /* Under rtnl_lock */ 6926 - int bnxt_reserve_rings(struct bnxt *bp, int tx, int rx, int tcs, int tx_xdp) 6907 + int bnxt_reserve_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, 6908 + int tx_xdp) 6927 6909 { 6928 6910 int max_rx, max_tx, tx_sets = 1; 6929 6911 int tx_rings_needed; 6930 - bool sh = true; 6931 6912 int rc; 6932 - 6933 - if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) 6934 - sh = false; 6935 6913 6936 6914 if (tcs) 6937 6915 tx_sets = tcs; ··· 7137 7121 sh = true; 7138 7122 7139 7123 rc = bnxt_reserve_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings, 7140 - tc, bp->tx_nr_rings_xdp); 7124 + sh, tc, bp->tx_nr_rings_xdp); 7141 7125 if (rc) 7142 7126 return rc; 7143 7127
+3 -1
drivers/net/ethernet/broadcom/bnxt/bnxt.h
··· 1117 1117 unsigned long state; 1118 1118 #define BNXT_STATE_OPEN 0 1119 1119 #define BNXT_STATE_IN_SP_TASK 1 1120 + #define BNXT_STATE_READ_STATS 2 1120 1121 1121 1122 struct bnxt_irq *irq_tbl; 1122 1123 int total_irqs; ··· 1301 1300 int bnxt_half_open_nic(struct bnxt *bp); 1302 1301 void bnxt_half_close_nic(struct bnxt *bp); 1303 1302 int bnxt_close_nic(struct bnxt *, bool, bool); 1304 - int bnxt_reserve_rings(struct bnxt *bp, int tx, int rx, int tcs, int tx_xdp); 1303 + int bnxt_reserve_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, 1304 + int tx_xdp); 1305 1305 int bnxt_setup_mq_tc(struct net_device *dev, u8 tc); 1306 1306 int bnxt_get_max_rings(struct bnxt *, int *, int *, bool); 1307 1307 void bnxt_restore_pf_fw_resources(struct bnxt *bp);
+2 -1
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
··· 432 432 } 433 433 tx_xdp = req_rx_rings; 434 434 } 435 - rc = bnxt_reserve_rings(bp, req_tx_rings, req_rx_rings, tcs, tx_xdp); 435 + rc = bnxt_reserve_rings(bp, req_tx_rings, req_rx_rings, sh, tcs, 436 + tx_xdp); 436 437 if (rc) { 437 438 netdev_warn(dev, "Unable to allocate the requested rings\n"); 438 439 return rc;
+1 -1
drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
··· 170 170 if (!tc) 171 171 tc = 1; 172 172 rc = bnxt_reserve_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings, 173 - tc, tx_xdp); 173 + true, tc, tx_xdp); 174 174 if (rc) { 175 175 netdev_warn(dev, "Unable to reserve enough TX rings to support XDP.\n"); 176 176 return rc;
+11 -5
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
··· 2083 2083 2084 2084 mutex_lock(&uld_mutex); 2085 2085 list_del(&adap->list_node); 2086 + 2086 2087 for (i = 0; i < CXGB4_ULD_MAX; i++) 2087 - if (adap->uld && adap->uld[i].handle) { 2088 + if (adap->uld && adap->uld[i].handle) 2088 2089 adap->uld[i].state_change(adap->uld[i].handle, 2089 2090 CXGB4_STATE_DETACH); 2090 - adap->uld[i].handle = NULL; 2091 - } 2091 + 2092 2092 if (netevent_registered && list_empty(&adapter_list)) { 2093 2093 unregister_netevent_notifier(&cxgb4_netevent_nb); 2094 2094 netevent_registered = false; ··· 5303 5303 */ 5304 5304 destroy_workqueue(adapter->workq); 5305 5305 5306 - if (is_uld(adapter)) 5306 + if (is_uld(adapter)) { 5307 5307 detach_ulds(adapter); 5308 + t4_uld_clean_up(adapter); 5309 + } 5308 5310 5309 5311 disable_interrupts(adapter); 5310 5312 ··· 5387 5385 if (adapter->port[i]->reg_state == NETREG_REGISTERED) 5388 5386 cxgb_close(adapter->port[i]); 5389 5387 5390 - t4_uld_clean_up(adapter); 5388 + if (is_uld(adapter)) { 5389 + detach_ulds(adapter); 5390 + t4_uld_clean_up(adapter); 5391 + } 5392 + 5391 5393 disable_interrupts(adapter); 5392 5394 disable_msi(adapter); 5393 5395
+25 -17
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
··· 589 589 kfree(adap->uld); 590 590 } 591 591 592 + /* This function should be called with uld_mutex taken. */ 593 + static void cxgb4_shutdown_uld_adapter(struct adapter *adap, enum cxgb4_uld type) 594 + { 595 + if (adap->uld[type].handle) { 596 + adap->uld[type].handle = NULL; 597 + adap->uld[type].add = NULL; 598 + release_sge_txq_uld(adap, type); 599 + 600 + if (adap->flags & FULL_INIT_DONE) 601 + quiesce_rx_uld(adap, type); 602 + 603 + if (adap->flags & USING_MSIX) 604 + free_msix_queue_irqs_uld(adap, type); 605 + 606 + free_sge_queues_uld(adap, type); 607 + free_queues_uld(adap, type); 608 + } 609 + } 610 + 592 611 void t4_uld_clean_up(struct adapter *adap) 593 612 { 594 613 unsigned int i; 595 614 596 - if (!adap->uld) 597 - return; 615 + mutex_lock(&uld_mutex); 598 616 for (i = 0; i < CXGB4_ULD_MAX; i++) { 599 617 if (!adap->uld[i].handle) 600 618 continue; 601 - if (adap->flags & FULL_INIT_DONE) 602 - quiesce_rx_uld(adap, i); 603 - if (adap->flags & USING_MSIX) 604 - free_msix_queue_irqs_uld(adap, i); 605 - free_sge_queues_uld(adap, i); 606 - free_queues_uld(adap, i); 619 + 620 + cxgb4_shutdown_uld_adapter(adap, i); 607 621 } 622 + mutex_unlock(&uld_mutex); 608 623 } 609 624 610 625 static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld) ··· 798 783 continue; 799 784 if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip)) 800 785 continue; 801 - adap->uld[type].handle = NULL; 802 - adap->uld[type].add = NULL; 803 - release_sge_txq_uld(adap, type); 804 - if (adap->flags & FULL_INIT_DONE) 805 - quiesce_rx_uld(adap, type); 806 - if (adap->flags & USING_MSIX) 807 - free_msix_queue_irqs_uld(adap, type); 808 - free_sge_queues_uld(adap, type); 809 - free_queues_uld(adap, type); 786 + 787 + cxgb4_shutdown_uld_adapter(adap, type); 810 788 } 811 789 mutex_unlock(&uld_mutex); 812 790
+5 -4
drivers/net/ethernet/cisco/enic/vnic_dev.c
··· 402 402 fetch_index = ioread32(&vdev->devcmd2->wq.ctrl->fetch_index); 403 403 if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone */ 404 404 vdev_err(vdev, "Fatal error in devcmd2 init - hardware surprise removal\n"); 405 - 406 - return -ENODEV; 405 + err = -ENODEV; 406 + goto err_free_wq; 407 407 } 408 408 409 409 enic_wq_init_start(&vdev->devcmd2->wq, 0, fetch_index, fetch_index, 0, ··· 414 414 err = vnic_dev_alloc_desc_ring(vdev, &vdev->devcmd2->results_ring, 415 415 DEVCMD2_RING_SIZE, DEVCMD2_DESC_SIZE); 416 416 if (err) 417 - goto err_free_wq; 417 + goto err_disable_wq; 418 418 419 419 vdev->devcmd2->result = vdev->devcmd2->results_ring.descs; 420 420 vdev->devcmd2->cmd_ring = vdev->devcmd2->wq.ring.descs; ··· 433 433 434 434 err_free_desc_ring: 435 435 vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring); 436 - err_free_wq: 436 + err_disable_wq: 437 437 vnic_wq_disable(&vdev->devcmd2->wq); 438 + err_free_wq: 438 439 vnic_wq_free(&vdev->devcmd2->wq); 439 440 err_free_devcmd2: 440 441 kfree(vdev->devcmd2);
+9 -7
drivers/net/ethernet/hisilicon/hns/hns_enet.c
··· 1378 1378 void hns_nic_net_reinit(struct net_device *netdev) 1379 1379 { 1380 1380 struct hns_nic_priv *priv = netdev_priv(netdev); 1381 + enum hnae_port_type type = priv->ae_handle->port_type; 1381 1382 1382 1383 netif_trans_update(priv->netdev); 1383 1384 while (test_and_set_bit(NIC_STATE_REINITING, &priv->state)) 1384 1385 usleep_range(1000, 2000); 1385 1386 1386 1387 hns_nic_net_down(netdev); 1387 - hns_nic_net_reset(netdev); 1388 + 1389 + /* Only do hns_nic_net_reset in debug mode 1390 + * because of hardware limitation. 1391 + */ 1392 + if (type == HNAE_PORT_DEBUG) 1393 + hns_nic_net_reset(netdev); 1394 + 1388 1395 (void)hns_nic_net_up(netdev); 1389 1396 clear_bit(NIC_STATE_REINITING, &priv->state); 1390 1397 } ··· 2004 1997 rtnl_lock(); 2005 1998 /* put off any impending NetWatchDogTimeout */ 2006 1999 netif_trans_update(priv->netdev); 2000 + hns_nic_net_reinit(priv->netdev); 2007 2001 2008 - if (type == HNAE_PORT_DEBUG) { 2009 - hns_nic_net_reinit(priv->netdev); 2010 - } else { 2011 - netif_carrier_off(priv->netdev); 2012 - netif_tx_disable(priv->netdev); 2013 - } 2014 2002 rtnl_unlock(); 2015 2003 } 2016 2004
+2 -2
drivers/net/ethernet/mellanox/mlx5/core/Makefile
··· 4 4 mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \ 5 5 health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \ 6 6 mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o \ 7 - fs_counters.o rl.o lag.o dev.o lib/gid.o 7 + fs_counters.o rl.o lag.o dev.o wq.o lib/gid.o 8 8 9 9 mlx5_core-$(CONFIG_MLX5_ACCEL) += accel/ipsec.o 10 10 11 11 mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o \ 12 12 fpga/ipsec.o 13 13 14 - mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o eswitch_offloads.o \ 14 + mlx5_core-$(CONFIG_MLX5_CORE_EN) += eswitch.o eswitch_offloads.o \ 15 15 en_main.o en_common.o en_fs.o en_ethtool.o en_tx.o \ 16 16 en_rx.o en_rx_am.o en_txrx.o en_clock.o vxlan.o \ 17 17 en_tc.o en_arfs.o en_rep.o en_fs_ethtool.o en_selftest.o
+1
drivers/net/ethernet/mellanox/mlx5/core/accel/Makefile
··· 1 + subdir-ccflags-y += -I$(src)/..
+1
drivers/net/ethernet/mellanox/mlx5/core/en_accel/Makefile
··· 1 + subdir-ccflags-y += -I$(src)/..
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
··· 372 372 */ 373 373 mlx5e_ipsec_inverse_table[1] = htons(0xFFFF); 374 374 for (mss = 2; mss < MAX_LSO_MSS; mss++) { 375 - mss_inv = ((1ULL << 32) / mss) >> 16; 375 + mss_inv = div_u64(1ULL << 32, mss) >> 16; 376 376 mlx5e_ipsec_inverse_table[mss] = htons(mss_inv); 377 377 } 378 378 }
+2
drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
··· 464 464 if (!perm_addr) 465 465 return; 466 466 467 + memset(perm_addr, 0xff, MAX_ADDR_LEN); 468 + 467 469 mlx5_query_nic_vport_mac_address(priv->mdev, 0, perm_addr); 468 470 } 469 471
+1
drivers/net/ethernet/mellanox/mlx5/core/fpga/Makefile
··· 1 + subdir-ccflags-y += -I$(src)/..
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c
··· 102 102 return 0; 103 103 } 104 104 105 - int mlx5_fpga_device_brb(struct mlx5_fpga_device *fdev) 105 + static int mlx5_fpga_device_brb(struct mlx5_fpga_device *fdev) 106 106 { 107 107 int err; 108 108 struct mlx5_core_dev *mdev = fdev->mdev;
+2 -2
drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c
··· 275 275 { 276 276 struct mlx5_fpga_device *fdev = mdev->fpga; 277 277 unsigned int i; 278 - u32 *data; 278 + __be32 *data; 279 279 u32 count; 280 280 u64 addr; 281 281 int ret; ··· 290 290 291 291 count = mlx5_fpga_ipsec_counters_count(mdev); 292 292 293 - data = kzalloc(sizeof(u32) * count * 2, GFP_KERNEL); 293 + data = kzalloc(sizeof(*data) * count * 2, GFP_KERNEL); 294 294 if (!data) { 295 295 ret = -ENOMEM; 296 296 goto out;
+1
drivers/net/ethernet/mellanox/mlx5/core/ipoib/Makefile
··· 1 + subdir-ccflags-y += -I$(src)/..
+1
drivers/net/ethernet/mellanox/mlx5/core/lib/Makefile
··· 1 + subdir-ccflags-y += -I$(src)/..
+1
drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c
··· 34 34 #include <linux/etherdevice.h> 35 35 #include <linux/idr.h> 36 36 #include "mlx5_core.h" 37 + #include "lib/mlx5.h" 37 38 38 39 void mlx5_init_reserved_gids(struct mlx5_core_dev *dev) 39 40 {
+4
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
··· 1790 1790 return 0; 1791 1791 1792 1792 err_nexthop_neigh_init: 1793 + mlxsw_sp_nexthop_rif_fini(nh); 1793 1794 mlxsw_sp_nexthop_remove(mlxsw_sp, nh); 1794 1795 return err; 1795 1796 } ··· 1867 1866 nh_grp->gateway = fi->fib_nh->nh_scope == RT_SCOPE_LINK; 1868 1867 nh_grp->count = fi->fib_nhs; 1869 1868 nh_grp->key.fi = fi; 1869 + fib_info_hold(fi); 1870 1870 for (i = 0; i < nh_grp->count; i++) { 1871 1871 nh = &nh_grp->nexthops[i]; 1872 1872 fib_nh = &fi->fib_nh[i]; ··· 1887 1885 nh = &nh_grp->nexthops[i]; 1888 1886 mlxsw_sp_nexthop_fini(mlxsw_sp, nh); 1889 1887 } 1888 + fib_info_put(nh_grp->key.fi); 1890 1889 kfree(nh_grp); 1891 1890 return ERR_PTR(err); 1892 1891 } ··· 1906 1903 } 1907 1904 mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp); 1908 1905 WARN_ON_ONCE(nh_grp->adj_index_valid); 1906 + fib_info_put(nh_grp->key.fi); 1909 1907 kfree(nh_grp); 1910 1908 } 1911 1909
+6 -3
drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
··· 979 979 { 980 980 u16 pvid = mlxsw_sp_port_pvid_determine(mlxsw_sp_port, vid, is_pvid); 981 981 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; 982 - struct mlxsw_sp_bridge_vlan *bridge_vlan; 983 982 u16 old_pvid = mlxsw_sp_port->pvid; 984 983 int err; 985 984 ··· 998 999 err = mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port); 999 1000 if (err) 1000 1001 goto err_port_vlan_bridge_join; 1001 - 1002 - bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid); 1003 1002 1004 1003 return 0; 1005 1004 ··· 1916 1919 memcpy(&switchdev_work->fdb_info, ptr, 1917 1920 sizeof(switchdev_work->fdb_info)); 1918 1921 switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC); 1922 + if (!switchdev_work->fdb_info.addr) 1923 + goto err_addr_alloc; 1919 1924 ether_addr_copy((u8 *)switchdev_work->fdb_info.addr, 1920 1925 fdb_info->addr); 1921 1926 /* Take a reference on the device. This can be either ··· 1934 1935 mlxsw_core_schedule_work(&switchdev_work->work); 1935 1936 1936 1937 return NOTIFY_DONE; 1938 + 1939 + err_addr_alloc: 1940 + kfree(switchdev_work); 1941 + return NOTIFY_BAD; 1937 1942 } 1938 1943 1939 1944 static struct notifier_block mlxsw_sp_switchdev_notifier = {
+1 -1
drivers/net/ethernet/netronome/nfp/flower/metadata.c
··· 419 419 return 0; 420 420 421 421 err_free_last_used: 422 - kfree(priv->stats_ids.free_list.buf); 422 + kfree(priv->mask_ids.last_used); 423 423 err_free_mask_id: 424 424 kfree(priv->mask_ids.mask_id_free_list.buf); 425 425 return -ENOMEM;
+3 -3
drivers/net/ethernet/qlogic/qed/qed_iwarp.c
··· 575 575 576 576 if (p_tcp_ramrod->tcp.ip_version == TCP_IPV4) { 577 577 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 578 - "local_ip=%pI4h:%x, remote_ip=%pI4h%x, vlan=%x\n", 578 + "local_ip=%pI4h:%x, remote_ip=%pI4h:%x, vlan=%x\n", 579 579 p_tcp_ramrod->tcp.local_ip, 580 580 p_tcp_ramrod->tcp.local_port, 581 581 p_tcp_ramrod->tcp.remote_ip, ··· 583 583 p_tcp_ramrod->tcp.vlan_id); 584 584 } else { 585 585 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 586 - "local_ip=%pI6h:%x, remote_ip=%pI6h:%x, vlan=%x\n", 586 + "local_ip=%pI6:%x, remote_ip=%pI6:%x, vlan=%x\n", 587 587 p_tcp_ramrod->tcp.local_ip, 588 588 p_tcp_ramrod->tcp.local_port, 589 589 p_tcp_ramrod->tcp.remote_ip, ··· 1519 1519 cm_info->vlan); 1520 1520 else 1521 1521 DP_VERBOSE(p_hwfn, QED_MSG_RDMA, 1522 - "remote_ip %pI6h:%x, local_ip %pI6h:%x vlan=%x\n", 1522 + "remote_ip %pI6:%x, local_ip %pI6:%x vlan=%x\n", 1523 1523 cm_info->remote_ip, cm_info->remote_port, 1524 1524 cm_info->local_ip, cm_info->local_port, 1525 1525 cm_info->vlan);
+3 -5
drivers/net/ethernet/sfc/ef10.c
··· 5034 5034 struct efx_ef10_filter_table *table = efx->filter_state; 5035 5035 struct net_device *net_dev = efx->net_dev; 5036 5036 struct netdev_hw_addr *uc; 5037 - int addr_count; 5038 5037 unsigned int i; 5039 5038 5040 - addr_count = netdev_uc_count(net_dev); 5041 5039 table->uc_promisc = !!(net_dev->flags & IFF_PROMISC); 5042 - table->dev_uc_count = 1 + addr_count; 5043 5040 ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr); 5044 5041 i = 1; 5045 5042 netdev_for_each_uc_addr(uc, net_dev) { ··· 5047 5050 ether_addr_copy(table->dev_uc_list[i].addr, uc->addr); 5048 5051 i++; 5049 5052 } 5053 + 5054 + table->dev_uc_count = i; 5050 5055 } 5051 5056 5052 5057 static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx) ··· 5056 5057 struct efx_ef10_filter_table *table = efx->filter_state; 5057 5058 struct net_device *net_dev = efx->net_dev; 5058 5059 struct netdev_hw_addr *mc; 5059 - unsigned int i, addr_count; 5060 + unsigned int i; 5060 5061 5061 5062 table->mc_overflow = false; 5062 5063 table->mc_promisc = !!(net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI)); 5063 5064 5064 - addr_count = netdev_mc_count(net_dev); 5065 5065 i = 0; 5066 5066 netdev_for_each_mc_addr(mc, net_dev) { 5067 5067 if (i >= EFX_EF10_FILTER_DEV_MC_MAX) {
+5
drivers/net/ethernet/smsc/smsc911x.c
··· 2467 2467 pdata = netdev_priv(dev); 2468 2468 dev->irq = irq; 2469 2469 pdata->ioaddr = ioremap_nocache(res->start, res_size); 2470 + if (!pdata->ioaddr) { 2471 + retval = -ENOMEM; 2472 + goto out_ioremap_fail; 2473 + } 2470 2474 2471 2475 pdata->dev = dev; 2472 2476 pdata->msg_enable = ((1 << debug) - 1); ··· 2576 2572 smsc911x_free_resources(pdev); 2577 2573 out_request_resources_fail: 2578 2574 iounmap(pdata->ioaddr); 2575 + out_ioremap_fail: 2579 2576 free_netdev(dev); 2580 2577 out_release_io_1: 2581 2578 release_mem_region(res->start, resource_size(res));
+3 -7
drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
··· 638 638 { 639 639 struct sunxi_priv_data *gmac = priv->plat->bsp_priv; 640 640 struct device_node *node = priv->device->of_node; 641 - int ret, phy_interface; 641 + int ret; 642 642 u32 reg, val; 643 643 644 644 regmap_read(gmac->regmap, SYSCON_EMAC_REG, &val); ··· 718 718 if (gmac->variant->support_rmii) 719 719 reg &= ~SYSCON_RMII_EN; 720 720 721 - phy_interface = priv->plat->interface; 722 - /* if PHY is internal, select the mode (xMII) used by the SoC */ 723 - if (gmac->use_internal_phy) 724 - phy_interface = gmac->variant->internal_phy; 725 - switch (phy_interface) { 721 + switch (priv->plat->interface) { 726 722 case PHY_INTERFACE_MODE_MII: 727 723 /* default */ 728 724 break; ··· 932 936 } 933 937 934 938 plat_dat->interface = of_get_phy_mode(dev->of_node); 935 - if (plat_dat->interface == PHY_INTERFACE_MODE_INTERNAL) { 939 + if (plat_dat->interface == gmac->variant->internal_phy) { 936 940 dev_info(&pdev->dev, "Will use internal PHY\n"); 937 941 gmac->use_internal_phy = true; 938 942 gmac->ephy_clk = of_clk_get(plat_dat->phy_node, 0);
+7 -7
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
··· 1449 1449 static void free_dma_tx_desc_resources(struct stmmac_priv *priv) 1450 1450 { 1451 1451 u32 tx_count = priv->plat->tx_queues_to_use; 1452 - u32 queue = 0; 1452 + u32 queue; 1453 1453 1454 1454 /* Free TX queue resources */ 1455 1455 for (queue = 0; queue < tx_count; queue++) { ··· 1498 1498 sizeof(dma_addr_t), 1499 1499 GFP_KERNEL); 1500 1500 if (!rx_q->rx_skbuff_dma) 1501 - return -ENOMEM; 1501 + goto err_dma; 1502 1502 1503 1503 rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE, 1504 1504 sizeof(struct sk_buff *), ··· 1561 1561 sizeof(*tx_q->tx_skbuff_dma), 1562 1562 GFP_KERNEL); 1563 1563 if (!tx_q->tx_skbuff_dma) 1564 - return -ENOMEM; 1564 + goto err_dma; 1565 1565 1566 1566 tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE, 1567 1567 sizeof(struct sk_buff *), 1568 1568 GFP_KERNEL); 1569 1569 if (!tx_q->tx_skbuff) 1570 - goto err_dma_buffers; 1570 + goto err_dma; 1571 1571 1572 1572 if (priv->extend_desc) { 1573 1573 tx_q->dma_etx = dma_zalloc_coherent(priv->device, ··· 1577 1577 &tx_q->dma_tx_phy, 1578 1578 GFP_KERNEL); 1579 1579 if (!tx_q->dma_etx) 1580 - goto err_dma_buffers; 1580 + goto err_dma; 1581 1581 } else { 1582 1582 tx_q->dma_tx = dma_zalloc_coherent(priv->device, 1583 1583 DMA_TX_SIZE * ··· 1586 1586 &tx_q->dma_tx_phy, 1587 1587 GFP_KERNEL); 1588 1588 if (!tx_q->dma_tx) 1589 - goto err_dma_buffers; 1589 + goto err_dma; 1590 1590 } 1591 1591 } 1592 1592 1593 1593 return 0; 1594 1594 1595 - err_dma_buffers: 1595 + err_dma: 1596 1596 free_dma_tx_desc_resources(priv); 1597 1597 1598 1598 return ret;
+9 -9
drivers/net/tap.c
··· 106 106 struct rcu_head rcu; 107 107 dev_t major; 108 108 struct idr minor_idr; 109 - struct mutex minor_lock; 109 + spinlock_t minor_lock; 110 110 const char *device_name; 111 111 struct list_head next; 112 112 }; ··· 416 416 goto unlock; 417 417 } 418 418 419 - mutex_lock(&tap_major->minor_lock); 420 - retval = idr_alloc(&tap_major->minor_idr, tap, 1, TAP_NUM_DEVS, GFP_KERNEL); 419 + spin_lock(&tap_major->minor_lock); 420 + retval = idr_alloc(&tap_major->minor_idr, tap, 1, TAP_NUM_DEVS, GFP_ATOMIC); 421 421 if (retval >= 0) { 422 422 tap->minor = retval; 423 423 } else if (retval == -ENOSPC) { 424 424 netdev_err(tap->dev, "Too many tap devices\n"); 425 425 retval = -EINVAL; 426 426 } 427 - mutex_unlock(&tap_major->minor_lock); 427 + spin_unlock(&tap_major->minor_lock); 428 428 429 429 unlock: 430 430 rcu_read_unlock(); ··· 442 442 goto unlock; 443 443 } 444 444 445 - mutex_lock(&tap_major->minor_lock); 445 + spin_lock(&tap_major->minor_lock); 446 446 if (tap->minor) { 447 447 idr_remove(&tap_major->minor_idr, tap->minor); 448 448 tap->minor = 0; 449 449 } 450 - mutex_unlock(&tap_major->minor_lock); 450 + spin_unlock(&tap_major->minor_lock); 451 451 452 452 unlock: 453 453 rcu_read_unlock(); ··· 467 467 goto unlock; 468 468 } 469 469 470 - mutex_lock(&tap_major->minor_lock); 470 + spin_lock(&tap_major->minor_lock); 471 471 tap = idr_find(&tap_major->minor_idr, minor); 472 472 if (tap) { 473 473 dev = tap->dev; 474 474 dev_hold(dev); 475 475 } 476 - mutex_unlock(&tap_major->minor_lock); 476 + spin_unlock(&tap_major->minor_lock); 477 477 478 478 unlock: 479 479 rcu_read_unlock(); ··· 1244 1244 tap_major->major = MAJOR(major); 1245 1245 1246 1246 idr_init(&tap_major->minor_idr); 1247 - mutex_init(&tap_major->minor_lock); 1247 + spin_lock_init(&tap_major->minor_lock); 1248 1248 1249 1249 tap_major->device_name = device_name; 1250 1250
+5
drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
··· 4934 4934 cfg80211_mgmt_tx_status(wdev, *cookie, buf, len, true, 4935 4935 GFP_KERNEL); 4936 4936 } else if (ieee80211_is_action(mgmt->frame_control)) { 4937 + if (len > BRCMF_FIL_ACTION_FRAME_SIZE + DOT11_MGMT_HDR_LEN) { 4938 + brcmf_err("invalid action frame length\n"); 4939 + err = -EINVAL; 4940 + goto exit; 4941 + } 4937 4942 af_params = kzalloc(sizeof(*af_params), GFP_KERNEL); 4938 4943 if (af_params == NULL) { 4939 4944 brcmf_err("unable to allocate frame\n");
+3
include/net/sock.h
··· 246 246 * @sk_policy: flow policy 247 247 * @sk_receive_queue: incoming packets 248 248 * @sk_wmem_alloc: transmit queue bytes committed 249 + * @sk_tsq_flags: TCP Small Queues flags 249 250 * @sk_write_queue: Packet sending queue 250 251 * @sk_omem_alloc: "o" is "option" or "other" 251 252 * @sk_wmem_queued: persistent queue size ··· 258 257 * @sk_pacing_status: Pacing status (requested, handled by sch_fq) 259 258 * @sk_max_pacing_rate: Maximum pacing rate (%SO_MAX_PACING_RATE) 260 259 * @sk_sndbuf: size of send buffer in bytes 260 + * @__sk_flags_offset: empty field used to determine location of bitfield 261 261 * @sk_padding: unused element for alignment 262 262 * @sk_no_check_tx: %SO_NO_CHECK setting, set checksum in TX packets 263 263 * @sk_no_check_rx: allow zero checksum in RX packets ··· 279 277 * @sk_drops: raw/udp drops counter 280 278 * @sk_ack_backlog: current listen backlog 281 279 * @sk_max_ack_backlog: listen backlog set in listen() 280 + * @sk_uid: user id of owner 282 281 * @sk_priority: %SO_PRIORITY setting 283 282 * @sk_type: socket type (%SOCK_STREAM, etc) 284 283 * @sk_protocol: which protocol this socket belongs in this network family
+2 -1
net/bridge/br_mdb.c
··· 323 323 __mdb_entry_to_br_ip(entry, &complete_info->ip); 324 324 mdb.obj.complete_priv = complete_info; 325 325 mdb.obj.complete = br_mdb_complete; 326 - switchdev_port_obj_add(port_dev, &mdb.obj); 326 + if (switchdev_port_obj_add(port_dev, &mdb.obj)) 327 + kfree(complete_info); 327 328 } 328 329 } else if (port_dev && type == RTM_DELMDB) { 329 330 switchdev_port_obj_del(port_dev, &mdb.obj);
+3 -3
net/core/datagram.c
··· 203 203 /** 204 204 * __skb_try_recv_datagram - Receive a datagram skbuff 205 205 * @sk: socket 206 - * @flags: MSG_ flags 206 + * @flags: MSG\_ flags 207 207 * @destructor: invoked under the receive lock on successful dequeue 208 208 * @peeked: returns non-zero if this packet has been seen before 209 209 * @off: an offset in bytes to peek skb from. Returns an offset ··· 375 375 * skb_kill_datagram - Free a datagram skbuff forcibly 376 376 * @sk: socket 377 377 * @skb: datagram skbuff 378 - * @flags: MSG_ flags 378 + * @flags: MSG\_ flags 379 379 * 380 380 * This function frees a datagram skbuff that was received by 381 381 * skb_recv_datagram. The flags argument must match the one ··· 809 809 * sequenced packet sockets providing the socket receive queue 810 810 * is only ever holding data ready to receive. 811 811 * 812 - * Note: when you _don't_ use this routine for this protocol, 812 + * Note: when you *don't* use this routine for this protocol, 813 813 * and you use a different write policy from sock_writeable() 814 814 * then please supply your own write_space callback. 815 815 */
+2 -2
net/ipv4/ipmr.c
··· 2431 2431 tableid = tb[RTA_TABLE] ? nla_get_u32(tb[RTA_TABLE]) : 0; 2432 2432 2433 2433 mrt = ipmr_get_table(net, tableid ? tableid : RT_TABLE_DEFAULT); 2434 - if (IS_ERR(mrt)) { 2435 - err = PTR_ERR(mrt); 2434 + if (!mrt) { 2435 + err = -ENOENT; 2436 2436 goto errout_free; 2437 2437 } 2438 2438
+1
samples/bpf/Makefile
··· 207 207 # useless for BPF samples. 208 208 $(obj)/%.o: $(src)/%.c 209 209 $(CLANG) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) -I$(obj) \ 210 + -I$(srctree)/tools/testing/selftests/bpf/ \ 210 211 -D__KERNEL__ -D__ASM_SYSREG_H -Wno-unused-value -Wno-pointer-sign \ 211 212 -Wno-compare-distinct-pointer-types \ 212 213 -Wno-gnu-variable-sized-type-not-at-end \
samples/bpf/bpf_helpers.h tools/testing/selftests/bpf/bpf_helpers.h
-1
tools/testing/selftests/bpf/Makefile
··· 37 37 38 38 %.o: %.c 39 39 $(CLANG) -I. -I./include/uapi -I../../../include/uapi \ 40 - -I../../../../samples/bpf/ \ 41 40 -Wno-compare-distinct-pointer-types \ 42 41 -O2 -target bpf -c $< -o $@
+14
tools/testing/selftests/bpf/bpf_endian.h
··· 23 23 # define __bpf_htons(x) __builtin_bswap16(x) 24 24 # define __bpf_constant_ntohs(x) ___constant_swab16(x) 25 25 # define __bpf_constant_htons(x) ___constant_swab16(x) 26 + # define __bpf_ntohl(x) __builtin_bswap32(x) 27 + # define __bpf_htonl(x) __builtin_bswap32(x) 28 + # define __bpf_constant_ntohl(x) ___constant_swab32(x) 29 + # define __bpf_constant_htonl(x) ___constant_swab32(x) 26 30 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ 27 31 # define __bpf_ntohs(x) (x) 28 32 # define __bpf_htons(x) (x) 29 33 # define __bpf_constant_ntohs(x) (x) 30 34 # define __bpf_constant_htons(x) (x) 35 + # define __bpf_ntohl(x) (x) 36 + # define __bpf_htonl(x) (x) 37 + # define __bpf_constant_ntohl(x) (x) 38 + # define __bpf_constant_htonl(x) (x) 31 39 #else 32 40 # error "Fix your compiler's __BYTE_ORDER__?!" 33 41 #endif ··· 46 38 #define bpf_ntohs(x) \ 47 39 (__builtin_constant_p(x) ? \ 48 40 __bpf_constant_ntohs(x) : __bpf_ntohs(x)) 41 + #define bpf_htonl(x) \ 42 + (__builtin_constant_p(x) ? \ 43 + __bpf_constant_htonl(x) : __bpf_htonl(x)) 44 + #define bpf_ntohl(x) \ 45 + (__builtin_constant_p(x) ? \ 46 + __bpf_constant_ntohl(x) : __bpf_ntohl(x)) 49 47 50 48 #endif /* __BPF_ENDIAN__ */