Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: annotate writes on dev->mtu from ndo_change_mtu()

Simon reported that ndo_change_mtu() methods were never
updated to use WRITE_ONCE(dev->mtu, new_mtu) as hinted
in commit 501a90c94510 ("inet: protect against too small
mtu values.")

We read dev->mtu without holding RTNL in many places,
with READ_ONCE() annotations.

It is time to take care of ndo_change_mtu() methods
to use corresponding WRITE_ONCE()

Signed-off-by: Eric Dumazet <edumazet@google.com>
Reported-by: Simon Horman <horms@kernel.org>
Closes: https://lore.kernel.org/netdev/20240505144608.GB67882@kernel.org/
Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
Reviewed-by: Sabrina Dubroca <sd@queasysnail.net>
Reviewed-by: Simon Horman <horms@kernel.org>
Acked-by: Shannon Nelson <shannon.nelson@amd.com>
Link: https://lore.kernel.org/r/20240506102812.3025432-1-edumazet@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

authored by

Eric Dumazet and committed by
Jakub Kicinski
1eb2cded feb8c2b7

+174 -173
+2 -2
drivers/infiniband/ulp/ipoib/ipoib_main.c
··· 238 238 ipoib_warn(priv, "mtu > %d will cause multicast packet drops.\n", 239 239 priv->mcast_mtu); 240 240 241 - dev->mtu = new_mtu; 241 + WRITE_ONCE(dev->mtu, new_mtu); 242 242 return 0; 243 243 } 244 244 ··· 265 265 if (carrier_status) 266 266 netif_carrier_on(dev); 267 267 } else { 268 - dev->mtu = new_mtu; 268 + WRITE_ONCE(dev->mtu, new_mtu); 269 269 } 270 270 271 271 return ret;
+1 -1
drivers/net/bonding/bond_main.c
··· 4710 4710 } 4711 4711 } 4712 4712 4713 - bond_dev->mtu = new_mtu; 4713 + WRITE_ONCE(bond_dev->mtu, new_mtu); 4714 4714 4715 4715 return 0; 4716 4716
+1 -1
drivers/net/can/dev/dev.c
··· 338 338 return -EINVAL; 339 339 } 340 340 341 - dev->mtu = new_mtu; 341 + WRITE_ONCE(dev->mtu, new_mtu); 342 342 return 0; 343 343 } 344 344 EXPORT_SYMBOL_GPL(can_change_mtu);
+1 -1
drivers/net/can/vcan.c
··· 140 140 !can_is_canxl_dev_mtu(new_mtu)) 141 141 return -EINVAL; 142 142 143 - dev->mtu = new_mtu; 143 + WRITE_ONCE(dev->mtu, new_mtu); 144 144 return 0; 145 145 } 146 146
+1 -1
drivers/net/can/vxcan.c
··· 135 135 !can_is_canxl_dev_mtu(new_mtu)) 136 136 return -EINVAL; 137 137 138 - dev->mtu = new_mtu; 138 + WRITE_ONCE(dev->mtu, new_mtu); 139 139 return 0; 140 140 } 141 141
+1 -1
drivers/net/ethernet/agere/et131x.c
··· 3852 3852 3853 3853 et131x_disable_txrx(netdev); 3854 3854 3855 - netdev->mtu = new_mtu; 3855 + WRITE_ONCE(netdev->mtu, new_mtu); 3856 3856 3857 3857 et131x_adapter_memory_free(adapter); 3858 3858
+1 -1
drivers/net/ethernet/alteon/acenic.c
··· 2539 2539 struct ace_regs __iomem *regs = ap->regs; 2540 2540 2541 2541 writel(new_mtu + ETH_HLEN + 4, &regs->IfMtu); 2542 - dev->mtu = new_mtu; 2542 + WRITE_ONCE(dev->mtu, new_mtu); 2543 2543 2544 2544 if (new_mtu > ACE_STD_MTU) { 2545 2545 if (!(ap->jumbo)) {
+1 -1
drivers/net/ethernet/altera/altera_tse_main.c
··· 788 788 return -EBUSY; 789 789 } 790 790 791 - dev->mtu = new_mtu; 791 + WRITE_ONCE(dev->mtu, new_mtu); 792 792 netdev_update_features(dev); 793 793 794 794 return 0;
+1 -1
drivers/net/ethernet/amazon/ena/ena_netdev.c
··· 104 104 if (!ret) { 105 105 netif_dbg(adapter, drv, dev, "Set MTU to %d\n", new_mtu); 106 106 update_rx_ring_mtu(adapter, new_mtu); 107 - dev->mtu = new_mtu; 107 + WRITE_ONCE(dev->mtu, new_mtu); 108 108 } else { 109 109 netif_err(adapter, drv, dev, "Failed to set MTU to %d\n", 110 110 new_mtu);
+3 -3
drivers/net/ethernet/amd/amd8111e.c
··· 1520 1520 1521 1521 if (!netif_running(dev)) { 1522 1522 /* new_mtu will be used 1523 - * when device starts netxt time 1523 + * when device starts next time 1524 1524 */ 1525 - dev->mtu = new_mtu; 1525 + WRITE_ONCE(dev->mtu, new_mtu); 1526 1526 return 0; 1527 1527 } 1528 1528 ··· 1531 1531 /* stop the chip */ 1532 1532 writel(RUN, lp->mmio + CMD0); 1533 1533 1534 - dev->mtu = new_mtu; 1534 + WRITE_ONCE(dev->mtu, new_mtu); 1535 1535 1536 1536 err = amd8111e_restart(dev); 1537 1537 spin_unlock_irq(&lp->lock);
+1 -1
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
··· 2070 2070 return ret; 2071 2071 2072 2072 pdata->rx_buf_size = ret; 2073 - netdev->mtu = mtu; 2073 + WRITE_ONCE(netdev->mtu, mtu); 2074 2074 2075 2075 xgbe_restart_dev(pdata); 2076 2076
+1 -1
drivers/net/ethernet/apm/xgene/xgene_enet_main.c
··· 1530 1530 frame_size = (new_mtu > ETH_DATA_LEN) ? (new_mtu + 18) : 0x600; 1531 1531 1532 1532 xgene_enet_close(ndev); 1533 - ndev->mtu = new_mtu; 1533 + WRITE_ONCE(ndev->mtu, new_mtu); 1534 1534 pdata->mac_ops->set_framesize(pdata, frame_size); 1535 1535 xgene_enet_open(ndev); 1536 1536
+1 -1
drivers/net/ethernet/aquantia/atlantic/aq_main.c
··· 146 146 147 147 if (err < 0) 148 148 goto err_exit; 149 - ndev->mtu = new_mtu; 149 + WRITE_ONCE(ndev->mtu, new_mtu); 150 150 151 151 err_exit: 152 152 return err;
+1 -1
drivers/net/ethernet/atheros/ag71xx.c
··· 1788 1788 { 1789 1789 struct ag71xx *ag = netdev_priv(ndev); 1790 1790 1791 - ndev->mtu = new_mtu; 1791 + WRITE_ONCE(ndev->mtu, new_mtu); 1792 1792 ag71xx_wr(ag, AG71XX_REG_MAC_MFL, 1793 1793 ag71xx_max_frame_len(ndev->mtu)); 1794 1794
+1 -1
drivers/net/ethernet/atheros/alx/main.c
··· 1176 1176 struct alx_priv *alx = netdev_priv(netdev); 1177 1177 int max_frame = ALX_MAX_FRAME_LEN(mtu); 1178 1178 1179 - netdev->mtu = mtu; 1179 + WRITE_ONCE(netdev->mtu, mtu); 1180 1180 alx->hw.mtu = mtu; 1181 1181 alx->rxbuf_size = max(max_frame, ALX_DEF_RXBUF_SIZE); 1182 1182 netdev_update_features(netdev);
+1 -1
drivers/net/ethernet/atheros/atl1c/atl1c_main.c
··· 561 561 if (netif_running(netdev)) { 562 562 while (test_and_set_bit(__AT_RESETTING, &adapter->flags)) 563 563 msleep(1); 564 - netdev->mtu = new_mtu; 564 + WRITE_ONCE(netdev->mtu, new_mtu); 565 565 adapter->hw.max_frame_size = new_mtu; 566 566 atl1c_set_rxbufsize(adapter, netdev); 567 567 atl1c_down(adapter);
+1 -1
drivers/net/ethernet/atheros/atl1e/atl1e_main.c
··· 428 428 if (netif_running(netdev)) { 429 429 while (test_and_set_bit(__AT_RESETTING, &adapter->flags)) 430 430 msleep(1); 431 - netdev->mtu = new_mtu; 431 + WRITE_ONCE(netdev->mtu, new_mtu); 432 432 adapter->hw.max_frame_size = new_mtu; 433 433 adapter->hw.rx_jumbo_th = (max_frame + 7) >> 3; 434 434 atl1e_down(adapter);
+1 -1
drivers/net/ethernet/atheros/atlx/atl1.c
··· 2687 2687 adapter->rx_buffer_len = (max_frame + 7) & ~7; 2688 2688 adapter->hw.rx_jumbo_th = adapter->rx_buffer_len / 8; 2689 2689 2690 - netdev->mtu = new_mtu; 2690 + WRITE_ONCE(netdev->mtu, new_mtu); 2691 2691 if (netif_running(netdev)) { 2692 2692 atl1_down(adapter); 2693 2693 atl1_up(adapter);
+1 -1
drivers/net/ethernet/atheros/atlx/atl2.c
··· 905 905 struct atl2_hw *hw = &adapter->hw; 906 906 907 907 /* set MTU */ 908 - netdev->mtu = new_mtu; 908 + WRITE_ONCE(netdev->mtu, new_mtu); 909 909 hw->max_frame_size = new_mtu; 910 910 ATL2_WRITE_REG(hw, REG_MTU, new_mtu + ETH_HLEN + 911 911 VLAN_HLEN + ETH_FCS_LEN);
+2 -2
drivers/net/ethernet/broadcom/b44.c
··· 1042 1042 /* We'll just catch it later when the 1043 1043 * device is up'd. 1044 1044 */ 1045 - dev->mtu = new_mtu; 1045 + WRITE_ONCE(dev->mtu, new_mtu); 1046 1046 return 0; 1047 1047 } 1048 1048 1049 1049 spin_lock_irq(&bp->lock); 1050 1050 b44_halt(bp); 1051 - dev->mtu = new_mtu; 1051 + WRITE_ONCE(dev->mtu, new_mtu); 1052 1052 b44_init_rings(bp); 1053 1053 b44_init_hw(bp, B44_FULL_RESET); 1054 1054 spin_unlock_irq(&bp->lock);
+1 -1
drivers/net/ethernet/broadcom/bcm63xx_enet.c
··· 1652 1652 priv->rx_frag_size = SKB_DATA_ALIGN(priv->rx_buf_offset + priv->rx_buf_size) + 1653 1653 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 1654 1654 1655 - dev->mtu = new_mtu; 1655 + WRITE_ONCE(dev->mtu, new_mtu); 1656 1656 return 0; 1657 1657 } 1658 1658
+1 -1
drivers/net/ethernet/broadcom/bnx2.c
··· 7912 7912 { 7913 7913 struct bnx2 *bp = netdev_priv(dev); 7914 7914 7915 - dev->mtu = new_mtu; 7915 + WRITE_ONCE(dev->mtu, new_mtu); 7916 7916 return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size, 7917 7917 false); 7918 7918 }
+1 -1
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
··· 4902 4902 * because the actual alloc size is 4903 4903 * only updated as part of load 4904 4904 */ 4905 - dev->mtu = new_mtu; 4905 + WRITE_ONCE(dev->mtu, new_mtu); 4906 4906 4907 4907 if (!bnx2x_mtu_allows_gro(new_mtu)) 4908 4908 dev->features &= ~NETIF_F_GRO_HW;
+1 -1
drivers/net/ethernet/broadcom/bnxt/bnxt.c
··· 14280 14280 if (netif_running(dev)) 14281 14281 bnxt_close_nic(bp, true, false); 14282 14282 14283 - dev->mtu = new_mtu; 14283 + WRITE_ONCE(dev->mtu, new_mtu); 14284 14284 bnxt_set_ring_params(bp); 14285 14285 14286 14286 if (netif_running(dev))
+1 -1
drivers/net/ethernet/broadcom/tg3.c
··· 14295 14295 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp, 14296 14296 int new_mtu) 14297 14297 { 14298 - dev->mtu = new_mtu; 14298 + WRITE_ONCE(dev->mtu, new_mtu); 14299 14299 14300 14300 if (new_mtu > ETH_DATA_LEN) { 14301 14301 if (tg3_flag(tp, 5780_CLASS)) {
+1 -1
drivers/net/ethernet/brocade/bna/bnad.c
··· 3276 3276 mutex_lock(&bnad->conf_mutex); 3277 3277 3278 3278 mtu = netdev->mtu; 3279 - netdev->mtu = new_mtu; 3279 + WRITE_ONCE(netdev->mtu, new_mtu); 3280 3280 3281 3281 frame = BNAD_FRAME_SIZE(mtu); 3282 3282 new_frame = BNAD_FRAME_SIZE(new_mtu);
+1 -1
drivers/net/ethernet/cadence/macb_main.c
··· 3022 3022 if (netif_running(dev)) 3023 3023 return -EBUSY; 3024 3024 3025 - dev->mtu = new_mtu; 3025 + WRITE_ONCE(dev->mtu, new_mtu); 3026 3026 3027 3027 return 0; 3028 3028 }
+1 -1
drivers/net/ethernet/calxeda/xgmac.c
··· 1358 1358 1359 1359 /* Bring interface down, change mtu and bring interface back up */ 1360 1360 xgmac_stop(dev); 1361 - dev->mtu = new_mtu; 1361 + WRITE_ONCE(dev->mtu, new_mtu); 1362 1362 return xgmac_open(dev); 1363 1363 } 1364 1364
+1 -1
drivers/net/ethernet/cavium/liquidio/lio_core.c
··· 1262 1262 return -EINVAL; 1263 1263 } 1264 1264 1265 - netdev->mtu = new_mtu; 1265 + WRITE_ONCE(netdev->mtu, new_mtu); 1266 1266 lio->mtu = new_mtu; 1267 1267 1268 1268 WRITE_ONCE(sc->caller_is_done, true);
+1 -1
drivers/net/ethernet/cavium/liquidio/lio_vf_rep.c
··· 218 218 return -EIO; 219 219 } 220 220 221 - ndev->mtu = new_mtu; 221 + WRITE_ONCE(ndev->mtu, new_mtu); 222 222 223 223 return 0; 224 224 }
+1 -1
drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
··· 649 649 struct octeon_mgmt *p = netdev_priv(netdev); 650 650 int max_packet = new_mtu + ETH_HLEN + ETH_FCS_LEN; 651 651 652 - netdev->mtu = new_mtu; 652 + WRITE_ONCE(netdev->mtu, new_mtu); 653 653 654 654 /* HW lifts the limit if the frame is VLAN tagged 655 655 * (+4 bytes per each tag, up to two tags)
+1 -1
drivers/net/ethernet/cavium/thunder/nicvf_main.c
··· 1589 1589 return -EINVAL; 1590 1590 } 1591 1591 1592 - netdev->mtu = new_mtu; 1592 + WRITE_ONCE(netdev->mtu, new_mtu); 1593 1593 1594 1594 if (!netif_running(netdev)) 1595 1595 return 0;
+1 -1
drivers/net/ethernet/chelsio/cxgb/cxgb2.c
··· 844 844 return -EOPNOTSUPP; 845 845 if ((ret = mac->ops->set_mtu(mac, new_mtu))) 846 846 return ret; 847 - dev->mtu = new_mtu; 847 + WRITE_ONCE(dev->mtu, new_mtu); 848 848 return 0; 849 849 } 850 850
+1 -1
drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
··· 2559 2559 2560 2560 if ((ret = t3_mac_set_mtu(&pi->mac, new_mtu))) 2561 2561 return ret; 2562 - dev->mtu = new_mtu; 2562 + WRITE_ONCE(dev->mtu, new_mtu); 2563 2563 init_port_mtus(adapter); 2564 2564 if (adapter->params.rev == 0 && offload_running(adapter)) 2565 2565 t3_load_mtus(adapter, adapter->params.mtus,
+1 -1
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
··· 3180 3180 ret = t4_set_rxmode(pi->adapter, pi->adapter->mbox, pi->viid, 3181 3181 pi->viid_mirror, new_mtu, -1, -1, -1, -1, true); 3182 3182 if (!ret) 3183 - dev->mtu = new_mtu; 3183 + WRITE_ONCE(dev->mtu, new_mtu); 3184 3184 return ret; 3185 3185 } 3186 3186
+1 -1
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
··· 1169 1169 ret = t4vf_set_rxmode(pi->adapter, pi->viid, new_mtu, 1170 1170 -1, -1, -1, -1, true); 1171 1171 if (!ret) 1172 - dev->mtu = new_mtu; 1172 + WRITE_ONCE(dev->mtu, new_mtu); 1173 1173 return ret; 1174 1174 } 1175 1175
+1 -1
drivers/net/ethernet/cisco/enic/enic_main.c
··· 2027 2027 return err; 2028 2028 } 2029 2029 2030 - netdev->mtu = new_mtu; 2030 + WRITE_ONCE(netdev->mtu, new_mtu); 2031 2031 2032 2032 if (running) { 2033 2033 err = enic_open(netdev);
+1 -1
drivers/net/ethernet/cortina/gemini.c
··· 1978 1978 1979 1979 gmac_disable_tx_rx(netdev); 1980 1980 1981 - netdev->mtu = new_mtu; 1981 + WRITE_ONCE(netdev->mtu, new_mtu); 1982 1982 gmac_update_config0_reg(netdev, max_len << CONFIG0_MAXLEN_SHIFT, 1983 1983 CONFIG0_MAXLEN_MASK); 1984 1984
+1 -1
drivers/net/ethernet/dlink/sundance.c
··· 708 708 { 709 709 if (netif_running(dev)) 710 710 return -EBUSY; 711 - dev->mtu = new_mtu; 711 + WRITE_ONCE(dev->mtu, new_mtu); 712 712 return 0; 713 713 } 714 714
+1 -1
drivers/net/ethernet/faraday/ftmac100.c
··· 1092 1092 } 1093 1093 iowrite32(maccr, priv->base + FTMAC100_OFFSET_MACCR); 1094 1094 1095 - netdev->mtu = mtu; 1095 + WRITE_ONCE(netdev->mtu, mtu); 1096 1096 1097 1097 return 0; 1098 1098 }
+1 -1
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
··· 2995 2995 if (priv->xdp_prog && !xdp_validate_mtu(priv, new_mtu)) 2996 2996 return -EINVAL; 2997 2997 2998 - net_dev->mtu = new_mtu; 2998 + WRITE_ONCE(net_dev->mtu, new_mtu); 2999 2999 return 0; 3000 3000 } 3001 3001
+1 -1
drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c
··· 2698 2698 return err; 2699 2699 2700 2700 out: 2701 - dev->mtu = new_mtu; 2701 + WRITE_ONCE(dev->mtu, new_mtu); 2702 2702 return 0; 2703 2703 } 2704 2704
+1 -1
drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
··· 590 590 return err; 591 591 } 592 592 593 - netdev->mtu = mtu; 593 + WRITE_ONCE(netdev->mtu, mtu); 594 594 return 0; 595 595 } 596 596
+1 -1
drivers/net/ethernet/freescale/gianfar.c
··· 2026 2026 if (dev->flags & IFF_UP) 2027 2027 stop_gfar(dev); 2028 2028 2029 - dev->mtu = new_mtu; 2029 + WRITE_ONCE(dev->mtu, new_mtu); 2030 2030 2031 2031 if (dev->flags & IFF_UP) 2032 2032 startup_gfar(dev);
+1 -1
drivers/net/ethernet/fungible/funeth/funeth_main.c
··· 927 927 928 928 rc = fun_port_write_cmd(fp, FUN_ADMIN_PORT_KEY_MTU, new_mtu); 929 929 if (!rc) 930 - netdev->mtu = new_mtu; 930 + WRITE_ONCE(netdev->mtu, new_mtu); 931 931 return rc; 932 932 } 933 933
+1 -1
drivers/net/ethernet/hisilicon/hns/hns_enet.c
··· 1777 1777 } 1778 1778 1779 1779 /* finally, set new mtu to netdevice */ 1780 - ndev->mtu = new_mtu; 1780 + WRITE_ONCE(ndev->mtu, new_mtu); 1781 1781 1782 1782 out: 1783 1783 if (if_running) {
+1 -1
drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
··· 2761 2761 netdev_err(netdev, "failed to change MTU in hardware %d\n", 2762 2762 ret); 2763 2763 else 2764 - netdev->mtu = new_mtu; 2764 + WRITE_ONCE(netdev->mtu, new_mtu); 2765 2765 2766 2766 return ret; 2767 2767 }
+1 -1
drivers/net/ethernet/huawei/hinic/hinic_main.c
··· 581 581 if (err) 582 582 netif_err(nic_dev, drv, netdev, "Failed to set port mtu\n"); 583 583 else 584 - netdev->mtu = new_mtu; 584 + WRITE_ONCE(netdev->mtu, new_mtu); 585 585 586 586 return err; 587 587 }
+2 -2
drivers/net/ethernet/ibm/emac/core.c
··· 1098 1098 /* This is to prevent starting RX channel in emac_rx_enable() */ 1099 1099 set_bit(MAL_COMMAC_RX_STOPPED, &dev->commac.flags); 1100 1100 1101 - dev->ndev->mtu = new_mtu; 1101 + WRITE_ONCE(dev->ndev->mtu, new_mtu); 1102 1102 emac_full_tx_reset(dev); 1103 1103 } 1104 1104 ··· 1130 1130 } 1131 1131 1132 1132 if (!ret) { 1133 - ndev->mtu = new_mtu; 1133 + WRITE_ONCE(ndev->mtu, new_mtu); 1134 1134 dev->rx_skb_size = emac_rx_skb_size(new_mtu); 1135 1135 dev->rx_sync_size = emac_rx_sync_size(new_mtu); 1136 1136 }
+1 -1
drivers/net/ethernet/ibm/ibmveth.c
··· 1537 1537 adapter->rx_buff_pool[i].active = 1; 1538 1538 1539 1539 if (new_mtu_oh <= adapter->rx_buff_pool[i].buff_size) { 1540 - dev->mtu = new_mtu; 1540 + WRITE_ONCE(dev->mtu, new_mtu); 1541 1541 vio_cmo_set_dev_desired(viodev, 1542 1542 ibmveth_get_desired_dma 1543 1543 (viodev));
+1 -1
drivers/net/ethernet/intel/e1000/e1000_main.c
··· 3569 3569 3570 3570 netdev_dbg(netdev, "changing MTU from %d to %d\n", 3571 3571 netdev->mtu, new_mtu); 3572 - netdev->mtu = new_mtu; 3572 + WRITE_ONCE(netdev->mtu, new_mtu); 3573 3573 3574 3574 if (netif_running(netdev)) 3575 3575 e1000_up(adapter);
+1 -1
drivers/net/ethernet/intel/e1000e/netdev.c
··· 6038 6038 adapter->max_frame_size = max_frame; 6039 6039 netdev_dbg(netdev, "changing MTU from %d to %d\n", 6040 6040 netdev->mtu, new_mtu); 6041 - netdev->mtu = new_mtu; 6041 + WRITE_ONCE(netdev->mtu, new_mtu); 6042 6042 6043 6043 pm_runtime_get_sync(netdev->dev.parent); 6044 6044
+1 -1
drivers/net/ethernet/intel/i40e/i40e_main.c
··· 2961 2961 2962 2962 netdev_dbg(netdev, "changing MTU from %d to %d\n", 2963 2963 netdev->mtu, new_mtu); 2964 - netdev->mtu = new_mtu; 2964 + WRITE_ONCE(netdev->mtu, new_mtu); 2965 2965 if (netif_running(netdev)) 2966 2966 i40e_vsi_reinit_locked(vsi); 2967 2967 set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state);
+1 -1
drivers/net/ethernet/intel/iavf/iavf_main.c
··· 4296 4296 4297 4297 netdev_dbg(netdev, "changing MTU from %d to %d\n", 4298 4298 netdev->mtu, new_mtu); 4299 - netdev->mtu = new_mtu; 4299 + WRITE_ONCE(netdev->mtu, new_mtu); 4300 4300 4301 4301 if (netif_running(netdev)) { 4302 4302 iavf_schedule_reset(adapter, IAVF_FLAG_RESET_NEEDED);
+1 -1
drivers/net/ethernet/intel/ice/ice_main.c
··· 7770 7770 return -EBUSY; 7771 7771 } 7772 7772 7773 - netdev->mtu = (unsigned int)new_mtu; 7773 + WRITE_ONCE(netdev->mtu, (unsigned int)new_mtu); 7774 7774 err = ice_down_up(vsi); 7775 7775 if (err) 7776 7776 return err;
+1 -1
drivers/net/ethernet/intel/idpf/idpf_lib.c
··· 2234 2234 idpf_vport_ctrl_lock(netdev); 2235 2235 vport = idpf_netdev_to_vport(netdev); 2236 2236 2237 - netdev->mtu = new_mtu; 2237 + WRITE_ONCE(netdev->mtu, new_mtu); 2238 2238 2239 2239 err = idpf_initiate_soft_reset(vport, IDPF_SR_MTU_CHANGE); 2240 2240
+1 -1
drivers/net/ethernet/intel/igb/igb_main.c
··· 6641 6641 6642 6642 netdev_dbg(netdev, "changing MTU from %d to %d\n", 6643 6643 netdev->mtu, new_mtu); 6644 - netdev->mtu = new_mtu; 6644 + WRITE_ONCE(netdev->mtu, new_mtu); 6645 6645 6646 6646 if (netif_running(netdev)) 6647 6647 igb_up(adapter);
+1 -1
drivers/net/ethernet/intel/igbvf/netdev.c
··· 2434 2434 2435 2435 netdev_dbg(netdev, "changing MTU from %d to %d\n", 2436 2436 netdev->mtu, new_mtu); 2437 - netdev->mtu = new_mtu; 2437 + WRITE_ONCE(netdev->mtu, new_mtu); 2438 2438 2439 2439 if (netif_running(netdev)) 2440 2440 igbvf_up(adapter);
+1 -1
drivers/net/ethernet/intel/igc/igc_main.c
··· 5275 5275 igc_down(adapter); 5276 5276 5277 5277 netdev_dbg(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); 5278 - netdev->mtu = new_mtu; 5278 + WRITE_ONCE(netdev->mtu, new_mtu); 5279 5279 5280 5280 if (netif_running(netdev)) 5281 5281 igc_up(adapter);
+1 -1
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
··· 6847 6847 netdev->mtu, new_mtu); 6848 6848 6849 6849 /* must set new MTU before calling down or up */ 6850 - netdev->mtu = new_mtu; 6850 + WRITE_ONCE(netdev->mtu, new_mtu); 6851 6851 6852 6852 if (netif_running(netdev)) 6853 6853 ixgbe_reinit_locked(adapter);
+1 -1
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
··· 4292 4292 netdev->mtu, new_mtu); 4293 4293 4294 4294 /* must set new MTU before calling down or up */ 4295 - netdev->mtu = new_mtu; 4295 + WRITE_ONCE(netdev->mtu, new_mtu); 4296 4296 4297 4297 if (netif_running(netdev)) 4298 4298 ixgbevf_reinit_locked(adapter);
+1 -1
drivers/net/ethernet/jme.c
··· 2301 2301 { 2302 2302 struct jme_adapter *jme = netdev_priv(netdev); 2303 2303 2304 - netdev->mtu = new_mtu; 2304 + WRITE_ONCE(netdev->mtu, new_mtu); 2305 2305 netdev_update_features(netdev); 2306 2306 2307 2307 jme_restart_rx_engine(jme);
+1 -1
drivers/net/ethernet/lantiq_etop.c
··· 519 519 struct ltq_etop_priv *priv = netdev_priv(dev); 520 520 unsigned long flags; 521 521 522 - dev->mtu = new_mtu; 522 + WRITE_ONCE(dev->mtu, new_mtu); 523 523 524 524 spin_lock_irqsave(&priv->lock, flags); 525 525 ltq_etop_w32((ETOP_PLEN_UNDER << 16) | new_mtu, LTQ_ETOP_IGPLEN);
+2 -2
drivers/net/ethernet/lantiq_xrx200.c
··· 419 419 int curr_desc; 420 420 int ret = 0; 421 421 422 - net_dev->mtu = new_mtu; 422 + WRITE_ONCE(net_dev->mtu, new_mtu); 423 423 priv->rx_buf_size = xrx200_buffer_size(new_mtu); 424 424 priv->rx_skb_size = xrx200_skb_size(priv->rx_buf_size); 425 425 ··· 440 440 buff = ch_rx->rx_buff[ch_rx->dma.desc]; 441 441 ret = xrx200_alloc_buf(ch_rx, netdev_alloc_frag); 442 442 if (ret) { 443 - net_dev->mtu = old_mtu; 443 + WRITE_ONCE(net_dev->mtu, old_mtu); 444 444 priv->rx_buf_size = xrx200_buffer_size(old_mtu); 445 445 priv->rx_skb_size = xrx200_skb_size(priv->rx_buf_size); 446 446 break;
+1 -1
drivers/net/ethernet/marvell/mv643xx_eth.c
··· 2562 2562 { 2563 2563 struct mv643xx_eth_private *mp = netdev_priv(dev); 2564 2564 2565 - dev->mtu = new_mtu; 2565 + WRITE_ONCE(dev->mtu, new_mtu); 2566 2566 mv643xx_eth_recalc_skb_size(mp); 2567 2567 tx_set_rate(mp, 1000000000, 16777216); 2568 2568
+1 -1
drivers/net/ethernet/marvell/mvneta.c
··· 3861 3861 return -EINVAL; 3862 3862 } 3863 3863 3864 - dev->mtu = mtu; 3864 + WRITE_ONCE(dev->mtu, mtu); 3865 3865 3866 3866 if (!netif_running(dev)) { 3867 3867 if (pp->bm_priv)
+1 -1
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
··· 1375 1375 } 1376 1376 1377 1377 out_set: 1378 - dev->mtu = mtu; 1378 + WRITE_ONCE(dev->mtu, mtu); 1379 1379 dev->wanted_features = dev->features; 1380 1380 1381 1381 netdev_update_features(dev);
+1 -1
drivers/net/ethernet/marvell/octeon_ep/octep_main.c
··· 1096 1096 true); 1097 1097 if (!err) { 1098 1098 oct->link_info.mtu = new_mtu; 1099 - netdev->mtu = new_mtu; 1099 + WRITE_ONCE(netdev->mtu, new_mtu); 1100 1100 } 1101 1101 1102 1102 return err;
+1 -1
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
··· 881 881 err = octep_vf_mbox_set_mtu(oct, new_mtu); 882 882 if (!err) { 883 883 oct->link_info.mtu = new_mtu; 884 - netdev->mtu = new_mtu; 884 + WRITE_ONCE(netdev->mtu, new_mtu); 885 885 } 886 886 return err; 887 887 }
+1 -1
drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
··· 67 67 68 68 netdev_info(netdev, "Changing MTU from %d to %d\n", 69 69 netdev->mtu, new_mtu); 70 - netdev->mtu = new_mtu; 70 + WRITE_ONCE(netdev->mtu, new_mtu); 71 71 72 72 if (if_up) 73 73 err = otx2_open(netdev);
+1 -1
drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
··· 456 456 457 457 netdev_info(netdev, "Changing MTU from %d to %d\n", 458 458 netdev->mtu, new_mtu); 459 - netdev->mtu = new_mtu; 459 + WRITE_ONCE(netdev->mtu, new_mtu); 460 460 461 461 if (if_up) 462 462 err = otx2vf_open(netdev);
+1 -1
drivers/net/ethernet/marvell/prestera/prestera_main.c
··· 489 489 if (err) 490 490 return err; 491 491 492 - dev->mtu = mtu; 492 + WRITE_ONCE(dev->mtu, mtu); 493 493 494 494 return 0; 495 495 }
+1 -1
drivers/net/ethernet/marvell/pxa168_eth.c
··· 1188 1188 { 1189 1189 struct pxa168_eth_private *pep = netdev_priv(dev); 1190 1190 1191 - dev->mtu = mtu; 1191 + WRITE_ONCE(dev->mtu, mtu); 1192 1192 set_port_config_ext(pep); 1193 1193 1194 1194 if (!netif_running(dev))
+2 -2
drivers/net/ethernet/marvell/skge.c
··· 2905 2905 int err; 2906 2906 2907 2907 if (!netif_running(dev)) { 2908 - dev->mtu = new_mtu; 2908 + WRITE_ONCE(dev->mtu, new_mtu); 2909 2909 return 0; 2910 2910 } 2911 2911 2912 2912 skge_down(dev); 2913 2913 2914 - dev->mtu = new_mtu; 2914 + WRITE_ONCE(dev->mtu, new_mtu); 2915 2915 2916 2916 err = skge_up(dev); 2917 2917 if (err)
+2 -2
drivers/net/ethernet/marvell/sky2.c
··· 2384 2384 u32 imask; 2385 2385 2386 2386 if (!netif_running(dev)) { 2387 - dev->mtu = new_mtu; 2387 + WRITE_ONCE(dev->mtu, new_mtu); 2388 2388 netdev_update_features(dev); 2389 2389 return 0; 2390 2390 } ··· 2407 2407 sky2_rx_stop(sky2); 2408 2408 sky2_rx_clean(sky2); 2409 2409 2410 - dev->mtu = new_mtu; 2410 + WRITE_ONCE(dev->mtu, new_mtu); 2411 2411 netdev_update_features(dev); 2412 2412 2413 2413 mode = DATA_BLIND_VAL(DATA_BLIND_DEF) | GM_SMOD_VLAN_ENA;
+1 -1
drivers/net/ethernet/mediatek/mtk_eth_soc.c
··· 4055 4055 } 4056 4056 4057 4057 mtk_set_mcr_max_rx(mac, length); 4058 - dev->mtu = new_mtu; 4058 + WRITE_ONCE(dev->mtu, new_mtu); 4059 4059 4060 4060 return 0; 4061 4061 }
+2 -2
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
··· 1649 1649 sizeof(struct ethtool_flow_id) * MAX_NUM_OF_FS_RULES); 1650 1650 1651 1651 /* Calculate Rx buf size */ 1652 - dev->mtu = min(dev->mtu, priv->max_mtu); 1652 + WRITE_ONCE(dev->mtu, min(dev->mtu, priv->max_mtu)); 1653 1653 mlx4_en_calc_rx_buf(dev); 1654 1654 en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size); 1655 1655 ··· 2394 2394 !mlx4_en_check_xdp_mtu(dev, new_mtu)) 2395 2395 return -EOPNOTSUPP; 2396 2396 2397 - dev->mtu = new_mtu; 2397 + WRITE_ONCE(dev->mtu, new_mtu); 2398 2398 2399 2399 if (netif_running(dev)) { 2400 2400 mutex_lock(&mdev->state_lock);
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
··· 4525 4525 err = mlx5e_safe_switch_params(priv, &new_params, preactivate, NULL, reset); 4526 4526 4527 4527 out: 4528 - netdev->mtu = params->sw_mtu; 4528 + WRITE_ONCE(netdev->mtu, params->sw_mtu); 4529 4529 mutex_unlock(&priv->state_lock); 4530 4530 return err; 4531 4531 }
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
··· 531 531 if (err) 532 532 goto out; 533 533 534 - netdev->mtu = new_params.sw_mtu; 534 + WRITE_ONCE(netdev->mtu, new_params.sw_mtu); 535 535 536 536 out: 537 537 mutex_unlock(&priv->state_lock);
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
··· 280 280 struct mlx5e_priv *priv = mlx5i_epriv(netdev); 281 281 282 282 mutex_lock(&priv->state_lock); 283 - netdev->mtu = new_mtu; 283 + WRITE_ONCE(netdev->mtu, new_mtu); 284 284 mutex_unlock(&priv->state_lock); 285 285 286 286 return 0;
+1 -1
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
··· 825 825 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); 826 826 if (err) 827 827 goto err_port_mtu_set; 828 - dev->mtu = mtu; 828 + WRITE_ONCE(dev->mtu, mtu); 829 829 return 0; 830 830 831 831 err_port_mtu_set:
+1 -1
drivers/net/ethernet/micrel/ksz884x.c
··· 5427 5427 } 5428 5428 hw_mtu = (hw_mtu + 3) & ~3; 5429 5429 hw_priv->mtu = hw_mtu; 5430 - dev->mtu = new_mtu; 5430 + WRITE_ONCE(dev->mtu, new_mtu); 5431 5431 5432 5432 return 0; 5433 5433 }
+1 -1
drivers/net/ethernet/microchip/lan743x_main.c
··· 3184 3184 3185 3185 ret = lan743x_mac_set_mtu(adapter, new_mtu); 3186 3186 if (!ret) 3187 - netdev->mtu = new_mtu; 3187 + WRITE_ONCE(netdev->mtu, new_mtu); 3188 3188 return ret; 3189 3189 } 3190 3190
+1 -1
drivers/net/ethernet/microchip/lan966x/lan966x_main.c
··· 402 402 403 403 lan_wr(DEV_MAC_MAXLEN_CFG_MAX_LEN_SET(LAN966X_HW_MTU(new_mtu)), 404 404 lan966x, DEV_MAC_MAXLEN_CFG(port->chip_port)); 405 - dev->mtu = new_mtu; 405 + WRITE_ONCE(dev->mtu, new_mtu); 406 406 407 407 if (!lan966x->fdma) 408 408 return 0;
+2 -2
drivers/net/ethernet/microsoft/mana/mana_en.c
··· 690 690 goto out; 691 691 } 692 692 693 - ndev->mtu = new_mtu; 693 + WRITE_ONCE(ndev->mtu, new_mtu); 694 694 695 695 err = mana_attach(ndev); 696 696 if (err) { 697 697 netdev_err(ndev, "mana_attach failed: %d\n", err); 698 - ndev->mtu = old_mtu; 698 + WRITE_ONCE(ndev->mtu, old_mtu); 699 699 } 700 700 701 701 out:
+4 -4
drivers/net/ethernet/myricom/myri10ge/myri10ge.c
··· 3036 3036 /* if we change the mtu on an active device, we must 3037 3037 * reset the device so the firmware sees the change */ 3038 3038 myri10ge_close(dev); 3039 - dev->mtu = new_mtu; 3039 + WRITE_ONCE(dev->mtu, new_mtu); 3040 3040 myri10ge_open(dev); 3041 - } else 3042 - dev->mtu = new_mtu; 3043 - 3041 + } else { 3042 + WRITE_ONCE(dev->mtu, new_mtu); 3043 + } 3044 3044 return 0; 3045 3045 } 3046 3046
+1 -1
drivers/net/ethernet/natsemi/natsemi.c
··· 2526 2526 2527 2527 static int natsemi_change_mtu(struct net_device *dev, int new_mtu) 2528 2528 { 2529 - dev->mtu = new_mtu; 2529 + WRITE_ONCE(dev->mtu, new_mtu); 2530 2530 2531 2531 /* synchronized against open : rtnl_lock() held by caller */ 2532 2532 if (netif_running(dev)) {
+1 -1
drivers/net/ethernet/neterion/s2io.c
··· 6637 6637 struct s2io_nic *sp = netdev_priv(dev); 6638 6638 int ret = 0; 6639 6639 6640 - dev->mtu = new_mtu; 6640 + WRITE_ONCE(dev->mtu, new_mtu); 6641 6641 if (netif_running(dev)) { 6642 6642 s2io_stop_all_tx_queue(sp); 6643 6643 s2io_card_down(sp);
+1 -1
drivers/net/ethernet/netronome/nfp/nfp_net_common.c
··· 1526 1526 *dp = nn->dp; 1527 1527 nn->dp = new_dp; 1528 1528 1529 - nn->dp.netdev->mtu = new_dp.mtu; 1529 + WRITE_ONCE(nn->dp.netdev->mtu, new_dp.mtu); 1530 1530 1531 1531 if (!netif_is_rxfh_configured(nn->dp.netdev)) 1532 1532 nfp_net_rss_init_itbl(nn);
+1 -1
drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
··· 177 177 if (err) 178 178 return err; 179 179 180 - netdev->mtu = new_mtu; 180 + WRITE_ONCE(netdev->mtu, new_mtu); 181 181 182 182 return 0; 183 183 }
+1 -1
drivers/net/ethernet/ni/nixge.c
··· 946 946 NIXGE_MAX_JUMBO_FRAME_SIZE) 947 947 return -EINVAL; 948 948 949 - ndev->mtu = new_mtu; 949 + WRITE_ONCE(ndev->mtu, new_mtu); 950 950 951 951 return 0; 952 952 }
+1 -1
drivers/net/ethernet/nvidia/forcedeth.c
··· 3098 3098 int old_mtu; 3099 3099 3100 3100 old_mtu = dev->mtu; 3101 - dev->mtu = new_mtu; 3101 + WRITE_ONCE(dev->mtu, new_mtu); 3102 3102 3103 3103 /* return early if the buffer sizes will not change */ 3104 3104 if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN)
+1 -1
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
··· 2184 2184 } 2185 2185 } else { 2186 2186 pch_gbe_reset(adapter); 2187 - netdev->mtu = new_mtu; 2187 + WRITE_ONCE(netdev->mtu, new_mtu); 2188 2188 adapter->hw.mac.max_frame_size = max_frame; 2189 2189 } 2190 2190
+1 -1
drivers/net/ethernet/pasemi/pasemi_mac.c
··· 1639 1639 reg |= PAS_MAC_CFG_MACCFG_MAXF(new_mtu + ETH_HLEN + 4); 1640 1640 write_mac_reg(mac, PAS_MAC_CFG_MACCFG, reg); 1641 1641 1642 - dev->mtu = new_mtu; 1642 + WRITE_ONCE(dev->mtu, new_mtu); 1643 1643 /* MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */ 1644 1644 mac->bufsz = new_mtu + ETH_HLEN + ETH_FCS_LEN + LOCAL_SKB_ALIGN + 128; 1645 1645
+2 -2
drivers/net/ethernet/pensando/ionic/ionic_lif.c
··· 1761 1761 1762 1762 /* if we're not running, nothing more to do */ 1763 1763 if (!netif_running(netdev)) { 1764 - netdev->mtu = new_mtu; 1764 + WRITE_ONCE(netdev->mtu, new_mtu); 1765 1765 return 0; 1766 1766 } 1767 1767 1768 1768 mutex_lock(&lif->queue_lock); 1769 1769 ionic_stop_queues_reconfig(lif); 1770 - netdev->mtu = new_mtu; 1770 + WRITE_ONCE(netdev->mtu, new_mtu); 1771 1771 err = ionic_start_queues_reconfig(lif); 1772 1772 mutex_unlock(&lif->queue_lock); 1773 1773
+1 -1
drivers/net/ethernet/qlogic/netxen/netxen_nic_hw.c
··· 960 960 rc = adapter->set_mtu(adapter, mtu); 961 961 962 962 if (!rc) 963 - netdev->mtu = mtu; 963 + WRITE_ONCE(netdev->mtu, mtu); 964 964 965 965 return rc; 966 966 }
+1 -1
drivers/net/ethernet/qlogic/qede/qede_ethtool.c
··· 1026 1026 static void qede_update_mtu(struct qede_dev *edev, 1027 1027 struct qede_reload_args *args) 1028 1028 { 1029 - edev->ndev->mtu = args->u.mtu; 1029 + WRITE_ONCE(edev->ndev->mtu, args->u.mtu); 1030 1030 } 1031 1031 1032 1032 /* Netdevice NDOs */
+1 -1
drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
··· 1015 1015 rc = qlcnic_fw_cmd_set_mtu(adapter, mtu); 1016 1016 1017 1017 if (!rc) 1018 - netdev->mtu = mtu; 1018 + WRITE_ONCE(netdev->mtu, mtu); 1019 1019 1020 1020 return rc; 1021 1021 }
+1 -1
drivers/net/ethernet/qualcomm/emac/emac.c
··· 216 216 netif_dbg(adpt, hw, adpt->netdev, 217 217 "changing MTU from %d to %d\n", netdev->mtu, 218 218 new_mtu); 219 - netdev->mtu = new_mtu; 219 + WRITE_ONCE(netdev->mtu, new_mtu); 220 220 221 221 if (netif_running(netdev)) 222 222 return emac_reinit_locked(adpt);
+1 -1
drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
··· 90 90 new_mtu > (priv->real_dev->mtu - headroom)) 91 91 return -EINVAL; 92 92 93 - rmnet_dev->mtu = new_mtu; 93 + WRITE_ONCE(rmnet_dev->mtu, new_mtu); 94 94 return 0; 95 95 } 96 96
+2 -2
drivers/net/ethernet/realtek/8139cp.c
··· 1277 1277 1278 1278 /* if network interface not up, no need for complexity */ 1279 1279 if (!netif_running(dev)) { 1280 - dev->mtu = new_mtu; 1280 + WRITE_ONCE(dev->mtu, new_mtu); 1281 1281 cp_set_rxbufsize(cp); /* set new rx buf size */ 1282 1282 return 0; 1283 1283 } 1284 1284 1285 1285 /* network IS up, close it, reset MTU, and come up again. */ 1286 1286 cp_close(dev); 1287 - dev->mtu = new_mtu; 1287 + WRITE_ONCE(dev->mtu, new_mtu); 1288 1288 cp_set_rxbufsize(cp); 1289 1289 return cp_open(dev); 1290 1290 }
+1 -1
drivers/net/ethernet/realtek/r8169_main.c
··· 3924 3924 { 3925 3925 struct rtl8169_private *tp = netdev_priv(dev); 3926 3926 3927 - dev->mtu = new_mtu; 3927 + WRITE_ONCE(dev->mtu, new_mtu); 3928 3928 netdev_update_features(dev); 3929 3929 rtl_jumbo_config(tp); 3930 3930 rtl_set_eee_txidle_timer(tp);
+1 -1
drivers/net/ethernet/renesas/ravb_main.c
··· 2423 2423 { 2424 2424 struct ravb_private *priv = netdev_priv(ndev); 2425 2425 2426 - ndev->mtu = new_mtu; 2426 + WRITE_ONCE(ndev->mtu, new_mtu); 2427 2427 2428 2428 if (netif_running(ndev)) { 2429 2429 synchronize_irq(priv->emac_irq);
+1 -1
drivers/net/ethernet/renesas/sh_eth.c
··· 2624 2624 if (netif_running(ndev)) 2625 2625 return -EBUSY; 2626 2626 2627 - ndev->mtu = new_mtu; 2627 + WRITE_ONCE(ndev->mtu, new_mtu); 2628 2628 netdev_update_features(ndev); 2629 2629 2630 2630 return 0;
+1 -1
drivers/net/ethernet/rocker/rocker_main.c
··· 1967 1967 rocker_port_stop(dev); 1968 1968 1969 1969 netdev_info(dev, "MTU change from %d to %d\n", dev->mtu, new_mtu); 1970 - dev->mtu = new_mtu; 1970 + WRITE_ONCE(dev->mtu, new_mtu); 1971 1971 1972 1972 err = rocker_cmd_set_port_settings_mtu(rocker_port, new_mtu); 1973 1973 if (err)
+1 -1
drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
··· 1804 1804 */ 1805 1805 static int sxgbe_change_mtu(struct net_device *dev, int new_mtu) 1806 1806 { 1807 - dev->mtu = new_mtu; 1807 + WRITE_ONCE(dev->mtu, new_mtu); 1808 1808 1809 1809 if (!netif_running(dev)) 1810 1810 return 0;
+1 -1
drivers/net/ethernet/sfc/efx_common.c
··· 302 302 efx_stop_all(efx); 303 303 304 304 mutex_lock(&efx->mac_lock); 305 - net_dev->mtu = new_mtu; 305 + WRITE_ONCE(net_dev->mtu, new_mtu); 306 306 efx_mac_reconfigure(efx, true); 307 307 mutex_unlock(&efx->mac_lock); 308 308
+1 -1
drivers/net/ethernet/sfc/falcon/efx.c
··· 2125 2125 ef4_stop_all(efx); 2126 2126 2127 2127 mutex_lock(&efx->mac_lock); 2128 - net_dev->mtu = new_mtu; 2128 + WRITE_ONCE(net_dev->mtu, new_mtu); 2129 2129 ef4_mac_reconfigure(efx); 2130 2130 mutex_unlock(&efx->mac_lock); 2131 2131
+1 -1
drivers/net/ethernet/sfc/siena/efx_common.c
··· 306 306 efx_siena_stop_all(efx); 307 307 308 308 mutex_lock(&efx->mac_lock); 309 - net_dev->mtu = new_mtu; 309 + WRITE_ONCE(net_dev->mtu, new_mtu); 310 310 efx_siena_mac_reconfigure(efx, true); 311 311 mutex_unlock(&efx->mac_lock); 312 312
+1 -1
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
··· 5909 5909 stmmac_set_rx_mode(dev); 5910 5910 } 5911 5911 5912 - dev->mtu = mtu; 5912 + WRITE_ONCE(dev->mtu, mtu); 5913 5913 netdev_update_features(dev); 5914 5914 5915 5915 return 0;
+1 -1
drivers/net/ethernet/sun/cassini.c
··· 3804 3804 { 3805 3805 struct cas *cp = netdev_priv(dev); 3806 3806 3807 - dev->mtu = new_mtu; 3807 + WRITE_ONCE(dev->mtu, new_mtu); 3808 3808 if (!netif_running(dev) || !netif_device_present(dev)) 3809 3809 return 0; 3810 3810
+1 -1
drivers/net/ethernet/sun/niu.c
··· 6751 6751 orig_jumbo = (dev->mtu > ETH_DATA_LEN); 6752 6752 new_jumbo = (new_mtu > ETH_DATA_LEN); 6753 6753 6754 - dev->mtu = new_mtu; 6754 + WRITE_ONCE(dev->mtu, new_mtu); 6755 6755 6756 6756 if (!netif_running(dev) || 6757 6757 (orig_jumbo == new_jumbo))
+1 -1
drivers/net/ethernet/sun/sungem.c
··· 2499 2499 { 2500 2500 struct gem *gp = netdev_priv(dev); 2501 2501 2502 - dev->mtu = new_mtu; 2502 + WRITE_ONCE(dev->mtu, new_mtu); 2503 2503 2504 2504 /* We'll just catch it later when the device is up'd or resumed */ 2505 2505 if (!netif_running(dev) || !netif_device_present(dev))
+1 -1
drivers/net/ethernet/synopsys/dwc-xlgmac-net.c
··· 823 823 return ret; 824 824 825 825 pdata->rx_buf_size = ret; 826 - netdev->mtu = mtu; 826 + WRITE_ONCE(netdev->mtu, mtu); 827 827 828 828 xlgmac_restart_dev(pdata); 829 829
+1 -1
drivers/net/ethernet/tehuti/tehuti.c
··· 756 756 { 757 757 ENTER; 758 758 759 - ndev->mtu = new_mtu; 759 + WRITE_ONCE(ndev->mtu, new_mtu); 760 760 if (netif_running(ndev)) { 761 761 bdx_close(ndev); 762 762 bdx_open(ndev);
+2 -2
drivers/net/ethernet/via/via-velocity.c
··· 2294 2294 int ret = 0; 2295 2295 2296 2296 if (!netif_running(dev)) { 2297 - dev->mtu = new_mtu; 2297 + WRITE_ONCE(dev->mtu, new_mtu); 2298 2298 goto out_0; 2299 2299 } 2300 2300 ··· 2336 2336 tmp_vptr->rx = rx; 2337 2337 tmp_vptr->tx = tx; 2338 2338 2339 - dev->mtu = new_mtu; 2339 + WRITE_ONCE(dev->mtu, new_mtu); 2340 2340 2341 2341 velocity_init_registers(vptr, VELOCITY_INIT_COLD); 2342 2342
+1 -1
drivers/net/ethernet/wangxun/libwx/wx_hw.c
··· 1408 1408 { 1409 1409 struct wx *wx = netdev_priv(netdev); 1410 1410 1411 - netdev->mtu = new_mtu; 1411 + WRITE_ONCE(netdev->mtu, new_mtu); 1412 1412 wx_set_rx_buffer_len(wx); 1413 1413 1414 1414 return 0;
+1 -1
drivers/net/ethernet/xilinx/xilinx_axienet_main.c
··· 1641 1641 XAE_TRL_SIZE) > lp->rxmem) 1642 1642 return -EINVAL; 1643 1643 1644 - ndev->mtu = new_mtu; 1644 + WRITE_ONCE(ndev->mtu, new_mtu); 1645 1645 1646 1646 return 0; 1647 1647 }
+1 -1
drivers/net/ethernet/xscale/ixp4xx_eth.c
··· 1233 1233 return ret; 1234 1234 } 1235 1235 1236 - dev->mtu = new_mtu; 1236 + WRITE_ONCE(dev->mtu, new_mtu); 1237 1237 1238 1238 return 0; 1239 1239 }
+1 -1
drivers/net/fjes/fjes_main.c
··· 811 811 netif_tx_stop_all_queues(netdev); 812 812 } 813 813 814 - netdev->mtu = new_mtu; 814 + WRITE_ONCE(netdev->mtu, new_mtu); 815 815 816 816 if (running) { 817 817 for (epidx = 0; epidx < hw->max_epid; epidx++) {
+1 -1
drivers/net/geneve.c
··· 1059 1059 else if (new_mtu < dev->min_mtu) 1060 1060 new_mtu = dev->min_mtu; 1061 1061 1062 - dev->mtu = new_mtu; 1062 + WRITE_ONCE(dev->mtu, new_mtu); 1063 1063 return 0; 1064 1064 } 1065 1065
+2 -2
drivers/net/hyperv/netvsc_drv.c
··· 1233 1233 if (ret) 1234 1234 goto rollback_vf; 1235 1235 1236 - ndev->mtu = mtu; 1236 + WRITE_ONCE(ndev->mtu, mtu); 1237 1237 1238 1238 ret = netvsc_attach(ndev, device_info); 1239 1239 if (!ret) 1240 1240 goto out; 1241 1241 1242 1242 /* Attempt rollback to original MTU */ 1243 - ndev->mtu = orig_mtu; 1243 + WRITE_ONCE(ndev->mtu, orig_mtu); 1244 1244 1245 1245 if (netvsc_attach(ndev, device_info)) 1246 1246 netdev_err(ndev, "restoring mtu failed\n");
+1 -1
drivers/net/macsec.c
··· 3753 3753 if (macsec->real_dev->mtu - extra < new_mtu) 3754 3754 return -ERANGE; 3755 3755 3756 - dev->mtu = new_mtu; 3756 + WRITE_ONCE(dev->mtu, new_mtu); 3757 3757 3758 3758 return 0; 3759 3759 }
+1 -1
drivers/net/macvlan.c
··· 865 865 866 866 if (vlan->lowerdev->mtu < new_mtu) 867 867 return -EINVAL; 868 - dev->mtu = new_mtu; 868 + WRITE_ONCE(dev->mtu, new_mtu); 869 869 return 0; 870 870 } 871 871
+1 -1
drivers/net/net_failover.c
··· 231 231 } 232 232 } 233 233 234 - dev->mtu = new_mtu; 234 + WRITE_ONCE(dev->mtu, new_mtu); 235 235 236 236 return 0; 237 237 }
+1 -1
drivers/net/netdevsim/netdev.c
··· 74 74 if (ns->xdp.prog && new_mtu > NSIM_XDP_MAX_MTU) 75 75 return -EBUSY; 76 76 77 - dev->mtu = new_mtu; 77 + WRITE_ONCE(dev->mtu, new_mtu); 78 78 79 79 return 0; 80 80 }
+2 -2
drivers/net/ntb_netdev.c
··· 306 306 return -EINVAL; 307 307 308 308 if (!netif_running(ndev)) { 309 - ndev->mtu = new_mtu; 309 + WRITE_ONCE(ndev->mtu, new_mtu); 310 310 return 0; 311 311 } 312 312 ··· 335 335 } 336 336 } 337 337 338 - ndev->mtu = new_mtu; 338 + WRITE_ONCE(ndev->mtu, new_mtu); 339 339 340 340 ntb_transport_link_up(dev->qp); 341 341
+1 -1
drivers/net/slip/slip.c
··· 286 286 } 287 287 } 288 288 sl->mtu = mtu; 289 - dev->mtu = mtu; 289 + WRITE_ONCE(dev->mtu, mtu); 290 290 sl->buffsize = len; 291 291 err = 0; 292 292
+1 -1
drivers/net/team/team_core.c
··· 1831 1831 team->port_mtu_change_allowed = false; 1832 1832 mutex_unlock(&team->lock); 1833 1833 1834 - dev->mtu = new_mtu; 1834 + WRITE_ONCE(dev->mtu, new_mtu); 1835 1835 1836 1836 return 0; 1837 1837
+1 -1
drivers/net/usb/aqc111.c
··· 424 424 u16 reg16 = 0; 425 425 u8 buf[5]; 426 426 427 - net->mtu = new_mtu; 427 + WRITE_ONCE(net->mtu, new_mtu); 428 428 dev->hard_mtu = net->mtu + net->hard_header_len; 429 429 430 430 aqc111_read16_cmd(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE,
+1 -1
drivers/net/usb/asix_devices.c
··· 1230 1230 if ((ll_mtu % dev->maxpacket) == 0) 1231 1231 return -EDOM; 1232 1232 1233 - net->mtu = new_mtu; 1233 + WRITE_ONCE(net->mtu, new_mtu); 1234 1234 dev->hard_mtu = net->mtu + net->hard_header_len; 1235 1235 ax88178_set_mfb(dev); 1236 1236
+1 -1
drivers/net/usb/ax88179_178a.c
··· 943 943 struct usbnet *dev = netdev_priv(net); 944 944 u16 tmp16; 945 945 946 - net->mtu = new_mtu; 946 + WRITE_ONCE(net->mtu, new_mtu); 947 947 dev->hard_mtu = net->mtu + net->hard_header_len; 948 948 949 949 if (net->mtu > 1500) {
+1 -1
drivers/net/usb/cdc_ncm.c
··· 798 798 { 799 799 struct usbnet *dev = netdev_priv(net); 800 800 801 - net->mtu = new_mtu; 801 + WRITE_ONCE(net->mtu, new_mtu); 802 802 cdc_ncm_set_dgram_size(dev, new_mtu + cdc_ncm_eth_hlen(dev)); 803 803 804 804 return 0;
+1 -1
drivers/net/usb/lan78xx.c
··· 2528 2528 2529 2529 ret = lan78xx_set_rx_max_frame_length(dev, max_frame_len); 2530 2530 if (!ret) 2531 - netdev->mtu = new_mtu; 2531 + WRITE_ONCE(netdev->mtu, new_mtu); 2532 2532 2533 2533 usb_autopm_put_interface(dev->intf); 2534 2534
+2 -2
drivers/net/usb/r8152.c
··· 9365 9365 case RTL_VER_01: 9366 9366 case RTL_VER_02: 9367 9367 case RTL_VER_07: 9368 - dev->mtu = new_mtu; 9368 + WRITE_ONCE(dev->mtu, new_mtu); 9369 9369 return 0; 9370 9370 default: 9371 9371 break; ··· 9377 9377 9378 9378 mutex_lock(&tp->control); 9379 9379 9380 - dev->mtu = new_mtu; 9380 + WRITE_ONCE(dev->mtu, new_mtu); 9381 9381 9382 9382 if (netif_running(dev)) { 9383 9383 if (tp->rtl_ops.change_mtu)
+1 -1
drivers/net/usb/usbnet.c
··· 398 398 // no second zero-length packet read wanted after mtu-sized packets 399 399 if ((ll_mtu % dev->maxpacket) == 0) 400 400 return -EDOM; 401 - net->mtu = new_mtu; 401 + WRITE_ONCE(net->mtu, new_mtu); 402 402 403 403 dev->hard_mtu = net->mtu + net->hard_header_len; 404 404 if (dev->rx_urb_size == old_hard_mtu) {
+1 -1
drivers/net/vmxnet3/vmxnet3_drv.c
··· 3457 3457 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 3458 3458 int err = 0; 3459 3459 3460 - netdev->mtu = new_mtu; 3460 + WRITE_ONCE(netdev->mtu, new_mtu); 3461 3461 3462 3462 /* 3463 3463 * Reset_work may be in the middle of resetting the device, wait for its
+1 -1
drivers/net/vsockmon.c
··· 58 58 if (!vsockmon_is_valid_mtu(new_mtu)) 59 59 return -EINVAL; 60 60 61 - dev->mtu = new_mtu; 61 + WRITE_ONCE(dev->mtu, new_mtu); 62 62 return 0; 63 63 } 64 64
+1 -1
drivers/net/vxlan/vxlan_core.c
··· 3181 3181 return -EINVAL; 3182 3182 } 3183 3183 3184 - dev->mtu = new_mtu; 3184 + WRITE_ONCE(dev->mtu, new_mtu); 3185 3185 return 0; 3186 3186 } 3187 3187
+1 -1
drivers/net/xen-netback/interface.c
··· 358 358 359 359 if (mtu > max) 360 360 return -EINVAL; 361 - dev->mtu = mtu; 361 + WRITE_ONCE(dev->mtu, mtu); 362 362 return 0; 363 363 } 364 364
+1 -1
drivers/net/xen-netfront.c
··· 1376 1376 1377 1377 if (mtu > max) 1378 1378 return -EINVAL; 1379 - dev->mtu = mtu; 1379 + WRITE_ONCE(dev->mtu, mtu); 1380 1380 return 0; 1381 1381 } 1382 1382
+1 -1
drivers/s390/net/ctcm_main.c
··· 996 996 return -EINVAL; 997 997 dev->hard_header_len = LL_HEADER_LENGTH + 2; 998 998 } 999 - dev->mtu = new_mtu; 999 + WRITE_ONCE(dev->mtu, new_mtu); 1000 1000 return 0; 1001 1001 } 1002 1002
+1 -1
net/8021q/vlan_dev.c
··· 149 149 if (max_mtu < new_mtu) 150 150 return -ERANGE; 151 151 152 - dev->mtu = new_mtu; 152 + WRITE_ONCE(dev->mtu, new_mtu); 153 153 154 154 return 0; 155 155 }
+1 -1
net/batman-adv/soft-interface.c
··· 159 159 if (new_mtu < ETH_MIN_MTU || new_mtu > batadv_hardif_min_mtu(dev)) 160 160 return -EINVAL; 161 161 162 - dev->mtu = new_mtu; 162 + WRITE_ONCE(dev->mtu, new_mtu); 163 163 bat_priv->mtu_set_by_user = new_mtu; 164 164 165 165 return 0;
+1 -1
net/bridge/br_device.c
··· 197 197 { 198 198 struct net_bridge *br = netdev_priv(dev); 199 199 200 - dev->mtu = new_mtu; 200 + WRITE_ONCE(dev->mtu, new_mtu); 201 201 202 202 /* this flag will be cleared if the MTU was automatically adjusted */ 203 203 br_opt_toggle(br, BROPT_MTU_SET_BY_USER, true);
+1 -1
net/dsa/user.c
··· 2120 2120 if (err) 2121 2121 goto out_port_failed; 2122 2122 2123 - dev->mtu = new_mtu; 2123 + WRITE_ONCE(dev->mtu, new_mtu); 2124 2124 2125 2125 dsa_bridge_mtu_normalization(dp); 2126 2126
+1 -1
net/hsr/hsr_device.c
··· 123 123 return -EINVAL; 124 124 } 125 125 126 - dev->mtu = new_mtu; 126 + WRITE_ONCE(dev->mtu, new_mtu); 127 127 128 128 return 0; 129 129 }
+1 -1
net/hsr/hsr_main.c
··· 96 96 break; /* Handled in ndo_change_mtu() */ 97 97 mtu_max = hsr_get_max_mtu(port->hsr); 98 98 master = hsr_port_get_hsr(port->hsr, HSR_PT_MASTER); 99 - master->dev->mtu = mtu_max; 99 + WRITE_ONCE(master->dev->mtu, mtu_max); 100 100 break; 101 101 case NETDEV_UNREGISTER: 102 102 if (!is_hsr_master(dev)) {
+1 -1
net/ipv4/ip_gre.c
··· 793 793 dev->needed_headroom += len; 794 794 795 795 if (set_mtu) 796 - dev->mtu = max_t(int, dev->mtu - len, 68); 796 + WRITE_ONCE(dev->mtu, max_t(int, dev->mtu - len, 68)); 797 797 798 798 if (test_bit(IP_TUNNEL_SEQ_BIT, tunnel->parms.o_flags) || 799 799 (test_bit(IP_TUNNEL_CSUM_BIT, tunnel->parms.o_flags) &&
+2 -2
net/ipv4/ip_tunnel.c
··· 897 897 t->fwmark = fwmark; 898 898 mtu = ip_tunnel_bind_dev(dev); 899 899 if (set_mtu) 900 - dev->mtu = mtu; 900 + WRITE_ONCE(dev->mtu, mtu); 901 901 } 902 902 dst_cache_reset(&t->dst_cache); 903 903 netdev_state_change(dev); ··· 1082 1082 new_mtu = max_mtu; 1083 1083 } 1084 1084 1085 - dev->mtu = new_mtu; 1085 + WRITE_ONCE(dev->mtu, new_mtu); 1086 1086 return 0; 1087 1087 } 1088 1088 EXPORT_SYMBOL_GPL(__ip_tunnel_change_mtu);
+1 -1
net/ipv6/ip6_tunnel.c
··· 1746 1746 if (new_mtu > IP_MAX_MTU - dev->hard_header_len) 1747 1747 return -EINVAL; 1748 1748 } 1749 - dev->mtu = new_mtu; 1749 + WRITE_ONCE(dev->mtu, new_mtu); 1750 1750 return 0; 1751 1751 } 1752 1752 EXPORT_SYMBOL(ip6_tnl_change_mtu);
+2 -1
net/ipv6/ip6_vti.c
··· 666 666 dev->flags &= ~IFF_POINTOPOINT; 667 667 668 668 if (keep_mtu && dev->mtu) { 669 - dev->mtu = clamp(dev->mtu, dev->min_mtu, dev->max_mtu); 669 + WRITE_ONCE(dev->mtu, 670 + clamp(dev->mtu, dev->min_mtu, dev->max_mtu)); 670 671 return; 671 672 } 672 673
+1 -1
net/sched/sch_teql.c
··· 424 424 } while ((q = NEXT_SLAVE(q)) != m->slaves); 425 425 } 426 426 427 - dev->mtu = new_mtu; 427 + WRITE_ONCE(dev->mtu, new_mtu); 428 428 return 0; 429 429 } 430 430