Merge tag 'net-6.19-rc9' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Pull networking fixes from Jakub Kicinski:
"Including fixes from wireless and Netfilter.

Previous releases - regressions:

- eth: stmmac: fix stm32 (and potentially others) resume regression

- nf_tables: fix inverted genmask check in nft_map_catchall_activate()

- usb: r8152: fix resume reset deadlock

- fix reporting RXH_XFRM_NO_CHANGE as input_xfrm for RSS contexts

Previous releases - always broken:

- sched: cls_u32: use skb_header_pointer_careful() to avoid OOB reads
with malicious u32 rules

- eth: ice: timestamping related fixes"

* tag 'net-6.19-rc9' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (38 commits)
ipv6: Fix ECMP sibling count mismatch when clearing RTF_ADDRCONF
netfilter: nf_tables: fix inverted genmask check in nft_map_catchall_activate()
net: cpsw: Execute ndo_set_rx_mode callback in a work queue
net: cpsw_new: Execute ndo_set_rx_mode callback in a work queue
gve: Correct ethtool rx_dropped calculation
gve: Fix stats report corruption on queue count change
selftest: net: add a test-case for encap segmentation after GRO
net: gro: fix outer network offset
net: add proper RCU protection to /proc/net/ptype
net: ethernet: adi: adin1110: Check return value of devm_gpiod_get_optional() in adin1110_check_spi()
wifi: iwlwifi: mvm: pause TCM on fast resume
wifi: iwlwifi: mld: cancel mlo_scan_start_wk
net: spacemit: k1-emac: fix jumbo frame support
net: enetc: Convert 16-bit register reads to 32-bit for ENETC v4
net: enetc: Convert 16-bit register writes to 32-bit for ENETC v4
net: enetc: Remove CBDR cacheability AXI settings for ENETC v4
net: enetc: Remove SI/BDR cacheability AXI settings for ENETC v4
tipc: use kfree_sensitive() for session key material
net: stmmac: fix stm32 (and potentially others) resume regression
net: rss: fix reporting RXH_XFRM_NO_CHANGE as input_xfrm for contexts
...

+513 -234
+12
MAINTAINERS
··· 20978 20978 F: drivers/net/pse-pd/ 20979 20979 F: net/ethtool/pse-pd.c 20980 20980 20981 + PSP SECURITY PROTOCOL 20982 + M: Daniel Zahka <daniel.zahka@gmail.com> 20983 + M: Jakub Kicinski <kuba@kernel.org> 20984 + M: Willem de Bruijn <willemdebruijn.kernel@gmail.com> 20985 + F: Documentation/netlink/specs/psp.yaml 20986 + F: Documentation/networking/psp.rst 20987 + F: include/net/psp/ 20988 + F: include/net/psp.h 20989 + F: include/uapi/linux/psp.h 20990 + F: net/psp/ 20991 + K: struct\ psp(_assoc|_dev|hdr)\b 20992 + 20981 20993 PSTORE FILESYSTEM 20982 20994 M: Kees Cook <kees@kernel.org> 20983 20995 R: Tony Luck <tony.luck@intel.com>
+3
drivers/net/ethernet/adi/adin1110.c
··· 1089 1089 1090 1090 reset_gpio = devm_gpiod_get_optional(&priv->spidev->dev, "reset", 1091 1091 GPIOD_OUT_LOW); 1092 + if (IS_ERR(reset_gpio)) 1093 + return dev_err_probe(&priv->spidev->dev, PTR_ERR(reset_gpio), 1094 + "failed to get reset gpio\n"); 1092 1095 if (reset_gpio) { 1093 1096 /* MISO pin is used for internal configuration, can't have 1094 1097 * anyone else disturbing the SDO line.
+20 -19
drivers/net/ethernet/cavium/liquidio/lio_main.c
··· 3505 3505 */ 3506 3506 netdev->netdev_ops = &lionetdevops; 3507 3507 3508 + lio = GET_LIO(netdev); 3509 + 3510 + memset(lio, 0, sizeof(struct lio)); 3511 + 3512 + lio->ifidx = ifidx_or_pfnum; 3513 + 3514 + props = &octeon_dev->props[i]; 3515 + props->gmxport = resp->cfg_info.linfo.gmxport; 3516 + props->netdev = netdev; 3517 + 3518 + /* Point to the properties for octeon device to which this 3519 + * interface belongs. 3520 + */ 3521 + lio->oct_dev = octeon_dev; 3522 + lio->octprops = props; 3523 + lio->netdev = netdev; 3524 + 3508 3525 retval = netif_set_real_num_rx_queues(netdev, num_oqueues); 3509 3526 if (retval) { 3510 3527 dev_err(&octeon_dev->pci_dev->dev, ··· 3537 3520 WRITE_ONCE(sc->caller_is_done, true); 3538 3521 goto setup_nic_dev_free; 3539 3522 } 3540 - 3541 - lio = GET_LIO(netdev); 3542 - 3543 - memset(lio, 0, sizeof(struct lio)); 3544 - 3545 - lio->ifidx = ifidx_or_pfnum; 3546 - 3547 - props = &octeon_dev->props[i]; 3548 - props->gmxport = resp->cfg_info.linfo.gmxport; 3549 - props->netdev = netdev; 3550 3523 3551 3524 lio->linfo.num_rxpciq = num_oqueues; 3552 3525 lio->linfo.num_txpciq = num_iqueues; ··· 3602 3595 /* MTU range: 68 - 16000 */ 3603 3596 netdev->min_mtu = LIO_MIN_MTU_SIZE; 3604 3597 netdev->max_mtu = LIO_MAX_MTU_SIZE; 3605 - 3606 - /* Point to the properties for octeon device to which this 3607 - * interface belongs. 3608 - */ 3609 - lio->oct_dev = octeon_dev; 3610 - lio->octprops = props; 3611 - lio->netdev = netdev; 3612 3598 3613 3599 dev_dbg(&octeon_dev->pci_dev->dev, 3614 3600 "if%d gmx: %d hw_addr: 0x%llx\n", i, ··· 3750 3750 if (!devlink) { 3751 3751 device_unlock(&octeon_dev->pci_dev->dev); 3752 3752 dev_err(&octeon_dev->pci_dev->dev, "devlink alloc failed\n"); 3753 + i--; 3753 3754 goto setup_nic_dev_free; 3754 3755 } 3755 3756 ··· 3766 3765 3767 3766 setup_nic_dev_free: 3768 3767 3769 - while (i--) { 3768 + do { 3770 3769 dev_err(&octeon_dev->pci_dev->dev, 3771 3770 "NIC ifidx:%d Setup failed\n", i); 3772 3771 liquidio_destroy_nic_device(octeon_dev, i); 3773 - } 3772 + } while (i--); 3774 3773 3775 3774 setup_nic_dev_done: 3776 3775
+2 -2
drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
··· 2212 2212 2213 2213 setup_nic_dev_free: 2214 2214 2215 - while (i--) { 2215 + do { 2216 2216 dev_err(&octeon_dev->pci_dev->dev, 2217 2217 "NIC ifidx:%d Setup failed\n", i); 2218 2218 liquidio_destroy_nic_device(octeon_dev, i); 2219 - } 2219 + } while (i--); 2220 2220 2221 2221 setup_nic_dev_done: 2222 2222
+10
drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
··· 1531 1531 } 1532 1532 1533 1533 if_id = (status & 0xFFFF0000) >> 16; 1534 + if (if_id >= ethsw->sw_attr.num_ifs) { 1535 + dev_err(dev, "Invalid if_id %d in IRQ status\n", if_id); 1536 + goto out; 1537 + } 1534 1538 port_priv = ethsw->ports[if_id]; 1535 1539 1536 1540 if (status & DPSW_IRQ_EVENT_LINK_CHANGED) ··· 3025 3021 &ethsw->sw_attr); 3026 3022 if (err) { 3027 3023 dev_err(dev, "dpsw_get_attributes err %d\n", err); 3024 + goto err_close; 3025 + } 3026 + 3027 + if (!ethsw->sw_attr.num_ifs) { 3028 + dev_err(dev, "DPSW device has no interfaces\n"); 3029 + err = -ENODEV; 3028 3030 goto err_close; 3029 3031 } 3030 3032
+7 -4
drivers/net/ethernet/freescale/enetc/enetc.c
··· 2512 2512 struct enetc_hw *hw = &si->hw; 2513 2513 int err; 2514 2514 2515 - /* set SI cache attributes */ 2516 - enetc_wr(hw, ENETC_SICAR0, 2517 - ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT); 2518 - enetc_wr(hw, ENETC_SICAR1, ENETC_SICAR_MSI); 2515 + if (is_enetc_rev1(si)) { 2516 + /* set SI cache attributes */ 2517 + enetc_wr(hw, ENETC_SICAR0, 2518 + ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT); 2519 + enetc_wr(hw, ENETC_SICAR1, ENETC_SICAR_MSI); 2520 + } 2521 + 2519 2522 /* enable SI */ 2520 2523 enetc_wr(hw, ENETC_SIMR, ENETC_SIMR_EN); 2521 2524
+3 -3
drivers/net/ethernet/freescale/enetc/enetc4_pf.c
··· 59 59 60 60 if (si != 0) { 61 61 __raw_writel(upper, hw->port + ENETC4_PSIPMAR0(si)); 62 - __raw_writew(lower, hw->port + ENETC4_PSIPMAR1(si)); 62 + __raw_writel(lower, hw->port + ENETC4_PSIPMAR1(si)); 63 63 } else { 64 64 __raw_writel(upper, hw->port + ENETC4_PMAR0); 65 - __raw_writew(lower, hw->port + ENETC4_PMAR1); 65 + __raw_writel(lower, hw->port + ENETC4_PMAR1); 66 66 } 67 67 } 68 68 ··· 73 73 u16 lower; 74 74 75 75 upper = __raw_readl(hw->port + ENETC4_PSIPMAR0(si)); 76 - lower = __raw_readw(hw->port + ENETC4_PSIPMAR1(si)); 76 + lower = __raw_readl(hw->port + ENETC4_PSIPMAR1(si)); 77 77 78 78 put_unaligned_le32(upper, addr); 79 79 put_unaligned_le16(lower, addr + 4);
-4
drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
··· 74 74 if (!user->ring) 75 75 return -ENOMEM; 76 76 77 - /* set CBDR cache attributes */ 78 - enetc_wr(hw, ENETC_SICAR2, 79 - ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT); 80 - 81 77 regs.pir = hw->reg + ENETC_SICBDRPIR; 82 78 regs.cir = hw->reg + ENETC_SICBDRCIR; 83 79 regs.mr = hw->reg + ENETC_SICBDRMR;
+14 -3
drivers/net/ethernet/freescale/enetc/enetc_hw.h
··· 708 708 #define ENETC_RFSE_EN BIT(15) 709 709 #define ENETC_RFSE_MODE_BD 2 710 710 711 + static inline void enetc_get_primary_mac_addr(struct enetc_hw *hw, u8 *addr) 712 + { 713 + u32 upper; 714 + u16 lower; 715 + 716 + upper = __raw_readl(hw->reg + ENETC_SIPMAR0); 717 + lower = __raw_readl(hw->reg + ENETC_SIPMAR1); 718 + 719 + put_unaligned_le32(upper, addr); 720 + put_unaligned_le16(lower, addr + 4); 721 + } 722 + 711 723 static inline void enetc_load_primary_mac_addr(struct enetc_hw *hw, 712 724 struct net_device *ndev) 713 725 { 714 - u8 addr[ETH_ALEN] __aligned(4); 726 + u8 addr[ETH_ALEN]; 715 727 716 - *(u32 *)addr = __raw_readl(hw->reg + ENETC_SIPMAR0); 717 - *(u16 *)(addr + 4) = __raw_readw(hw->reg + ENETC_SIPMAR1); 728 + enetc_get_primary_mac_addr(hw, addr); 718 729 eth_hw_addr_set(ndev, addr); 719 730 } 720 731
+51 -26
drivers/net/ethernet/google/gve/gve_ethtool.c
··· 152 152 u64 tmp_rx_pkts, tmp_rx_hsplit_pkt, tmp_rx_bytes, tmp_rx_hsplit_bytes, 153 153 tmp_rx_skb_alloc_fail, tmp_rx_buf_alloc_fail, 154 154 tmp_rx_desc_err_dropped_pkt, tmp_rx_hsplit_unsplit_pkt, 155 - tmp_tx_pkts, tmp_tx_bytes; 155 + tmp_tx_pkts, tmp_tx_bytes, 156 + tmp_xdp_tx_errors, tmp_xdp_redirect_errors; 156 157 u64 rx_buf_alloc_fail, rx_desc_err_dropped_pkt, rx_hsplit_unsplit_pkt, 157 158 rx_pkts, rx_hsplit_pkt, rx_skb_alloc_fail, rx_bytes, tx_pkts, tx_bytes, 158 - tx_dropped; 159 - int stats_idx, base_stats_idx, max_stats_idx; 159 + tx_dropped, xdp_tx_errors, xdp_redirect_errors; 160 + int rx_base_stats_idx, max_rx_stats_idx, max_tx_stats_idx; 161 + int stats_idx, stats_region_len, nic_stats_len; 160 162 struct stats *report_stats; 161 163 int *rx_qid_to_stats_idx; 162 164 int *tx_qid_to_stats_idx; ··· 200 198 for (rx_pkts = 0, rx_bytes = 0, rx_hsplit_pkt = 0, 201 199 rx_skb_alloc_fail = 0, rx_buf_alloc_fail = 0, 202 200 rx_desc_err_dropped_pkt = 0, rx_hsplit_unsplit_pkt = 0, 201 + xdp_tx_errors = 0, xdp_redirect_errors = 0, 203 202 ring = 0; 204 203 ring < priv->rx_cfg.num_queues; ring++) { 205 204 if (priv->rx) { ··· 218 215 rx->rx_desc_err_dropped_pkt; 219 216 tmp_rx_hsplit_unsplit_pkt = 220 217 rx->rx_hsplit_unsplit_pkt; 218 + tmp_xdp_tx_errors = rx->xdp_tx_errors; 219 + tmp_xdp_redirect_errors = 220 + rx->xdp_redirect_errors; 221 221 } while (u64_stats_fetch_retry(&priv->rx[ring].statss, 222 222 start)); 223 223 rx_pkts += tmp_rx_pkts; ··· 230 224 rx_buf_alloc_fail += tmp_rx_buf_alloc_fail; 231 225 rx_desc_err_dropped_pkt += tmp_rx_desc_err_dropped_pkt; 232 226 rx_hsplit_unsplit_pkt += tmp_rx_hsplit_unsplit_pkt; 227 + xdp_tx_errors += tmp_xdp_tx_errors; 228 + xdp_redirect_errors += tmp_xdp_redirect_errors; 233 229 } 234 230 } 235 231 for (tx_pkts = 0, tx_bytes = 0, tx_dropped = 0, ring = 0; ··· 257 249 data[i++] = rx_bytes; 258 250 data[i++] = tx_bytes; 259 251 /* total rx dropped packets */ 260 - data[i++] = rx_skb_alloc_fail + rx_buf_alloc_fail + 261 - rx_desc_err_dropped_pkt; 252 + data[i++] = rx_skb_alloc_fail + rx_desc_err_dropped_pkt + 253 + xdp_tx_errors + xdp_redirect_errors; 262 254 data[i++] = tx_dropped; 263 255 data[i++] = priv->tx_timeo_cnt; 264 256 data[i++] = rx_skb_alloc_fail; ··· 273 265 data[i++] = priv->stats_report_trigger_cnt; 274 266 i = GVE_MAIN_STATS_LEN; 275 267 276 - /* For rx cross-reporting stats, start from nic rx stats in report */ 277 - base_stats_idx = GVE_TX_STATS_REPORT_NUM * num_tx_queues + 278 - GVE_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues; 279 - /* The boundary between driver stats and NIC stats shifts if there are 280 - * stopped queues. 281 - */ 282 - base_stats_idx += NIC_RX_STATS_REPORT_NUM * num_stopped_rxqs + 283 - NIC_TX_STATS_REPORT_NUM * num_stopped_txqs; 284 - max_stats_idx = NIC_RX_STATS_REPORT_NUM * 285 - (priv->rx_cfg.num_queues - num_stopped_rxqs) + 286 - base_stats_idx; 268 + rx_base_stats_idx = 0; 269 + max_rx_stats_idx = 0; 270 + max_tx_stats_idx = 0; 271 + stats_region_len = priv->stats_report_len - 272 + sizeof(struct gve_stats_report); 273 + nic_stats_len = (NIC_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues + 274 + NIC_TX_STATS_REPORT_NUM * num_tx_queues) * sizeof(struct stats); 275 + if (unlikely((stats_region_len - 276 + nic_stats_len) % sizeof(struct stats))) { 277 + net_err_ratelimited("Starting index of NIC stats should be multiple of stats size"); 278 + } else { 279 + /* For rx cross-reporting stats, 280 + * start from nic rx stats in report 281 + */ 282 + rx_base_stats_idx = (stats_region_len - nic_stats_len) / 283 + sizeof(struct stats); 284 + /* The boundary between driver stats and NIC stats 285 + * shifts if there are stopped queues 286 + */ 287 + rx_base_stats_idx += NIC_RX_STATS_REPORT_NUM * 288 + num_stopped_rxqs + NIC_TX_STATS_REPORT_NUM * 289 + num_stopped_txqs; 290 + max_rx_stats_idx = NIC_RX_STATS_REPORT_NUM * 291 + (priv->rx_cfg.num_queues - num_stopped_rxqs) + 292 + rx_base_stats_idx; 293 + max_tx_stats_idx = NIC_TX_STATS_REPORT_NUM * 294 + (num_tx_queues - num_stopped_txqs) + 295 + max_rx_stats_idx; 296 + } 287 297 /* Preprocess the stats report for rx, map queue id to start index */ 288 298 skip_nic_stats = false; 289 - for (stats_idx = base_stats_idx; stats_idx < max_stats_idx; 299 + for (stats_idx = rx_base_stats_idx; stats_idx < max_rx_stats_idx; 290 300 stats_idx += NIC_RX_STATS_REPORT_NUM) { 291 301 u32 stat_name = be32_to_cpu(report_stats[stats_idx].stat_name); 292 302 u32 queue_id = be32_to_cpu(report_stats[stats_idx].queue_id); ··· 337 311 tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail; 338 312 tmp_rx_desc_err_dropped_pkt = 339 313 rx->rx_desc_err_dropped_pkt; 314 + tmp_xdp_tx_errors = rx->xdp_tx_errors; 315 + tmp_xdp_redirect_errors = 316 + rx->xdp_redirect_errors; 340 317 } while (u64_stats_fetch_retry(&priv->rx[ring].statss, 341 318 start)); 342 319 data[i++] = tmp_rx_bytes; ··· 350 321 data[i++] = rx->rx_frag_alloc_cnt; 351 322 /* rx dropped packets */ 352 323 data[i++] = tmp_rx_skb_alloc_fail + 353 - tmp_rx_buf_alloc_fail + 354 - tmp_rx_desc_err_dropped_pkt; 324 + tmp_rx_desc_err_dropped_pkt + 325 + tmp_xdp_tx_errors + 326 + tmp_xdp_redirect_errors; 355 327 data[i++] = rx->rx_copybreak_pkt; 356 328 data[i++] = rx->rx_copied_pkt; 357 329 /* stats from NIC */ ··· 384 354 i += priv->rx_cfg.num_queues * NUM_GVE_RX_CNTS; 385 355 } 386 356 387 - /* For tx cross-reporting stats, start from nic tx stats in report */ 388 - base_stats_idx = max_stats_idx; 389 - max_stats_idx = NIC_TX_STATS_REPORT_NUM * 390 - (num_tx_queues - num_stopped_txqs) + 391 - max_stats_idx; 392 - /* Preprocess the stats report for tx, map queue id to start index */ 393 357 skip_nic_stats = false; 394 - for (stats_idx = base_stats_idx; stats_idx < max_stats_idx; 358 + /* NIC TX stats start right after NIC RX stats */ 359 + for (stats_idx = max_rx_stats_idx; stats_idx < max_tx_stats_idx; 395 360 stats_idx += NIC_TX_STATS_REPORT_NUM) { 396 361 u32 stat_name = be32_to_cpu(report_stats[stats_idx].stat_name); 397 362 u32 queue_id = be32_to_cpu(report_stats[stats_idx].queue_id);
+2 -2
drivers/net/ethernet/google/gve/gve_main.c
··· 283 283 int tx_stats_num, rx_stats_num; 284 284 285 285 tx_stats_num = (GVE_TX_STATS_REPORT_NUM + NIC_TX_STATS_REPORT_NUM) * 286 - gve_num_tx_queues(priv); 286 + priv->tx_cfg.max_queues; 287 287 rx_stats_num = (GVE_RX_STATS_REPORT_NUM + NIC_RX_STATS_REPORT_NUM) * 288 - priv->rx_cfg.num_queues; 288 + priv->rx_cfg.max_queues; 289 289 priv->stats_report_len = struct_size(priv->stats_report, stats, 290 290 size_add(tx_stats_num, rx_stats_num)); 291 291 priv->stats_report =
-1
drivers/net/ethernet/intel/i40e/i40e_main.c
··· 9030 9030 TCP_FLAG_FIN | 9031 9031 TCP_FLAG_CWR) >> 16); 9032 9032 wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16); 9033 - udp_tunnel_get_rx_info(netdev); 9034 9033 9035 9034 return 0; 9036 9035 }
+14 -12
drivers/net/ethernet/intel/ice/ice_main.c
··· 3314 3314 if (ice_is_reset_in_progress(pf->state)) 3315 3315 goto skip_irq; 3316 3316 3317 - if (test_and_clear_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread)) { 3318 - /* Process outstanding Tx timestamps. If there is more work, 3319 - * re-arm the interrupt to trigger again. 3320 - */ 3321 - if (ice_ptp_process_ts(pf) == ICE_TX_TSTAMP_WORK_PENDING) { 3322 - wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M); 3323 - ice_flush(hw); 3324 - } 3325 - } 3317 + if (test_and_clear_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread)) 3318 + ice_ptp_process_ts(pf); 3326 3319 3327 3320 skip_irq: 3328 3321 ice_irq_dynamic_ena(hw, NULL, NULL); 3322 + ice_flush(hw); 3323 + 3324 + if (ice_ptp_tx_tstamps_pending(pf)) { 3325 + /* If any new Tx timestamps happened while in interrupt, 3326 + * re-arm the interrupt to trigger it again. 3327 + */ 3328 + wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M); 3329 + ice_flush(hw); 3330 + } 3329 3331 3330 3332 return IRQ_HANDLED; 3331 3333 } ··· 7809 7807 7810 7808 /* Restore timestamp mode settings after VSI rebuild */ 7811 7809 ice_ptp_restore_timestamp_mode(pf); 7810 + 7811 + /* Start PTP periodic work after VSI is fully rebuilt */ 7812 + ice_ptp_queue_work(pf); 7812 7813 return; 7813 7814 7814 7815 err_vsi_rebuild: ··· 9661 9656 if (err) 9662 9657 netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n", 9663 9658 vsi->vsi_num, vsi->vsw->sw_id); 9664 - 9665 - /* Update existing tunnels information */ 9666 - udp_tunnel_get_rx_info(netdev); 9667 9659 9668 9660 return err; 9669 9661 }
+109 -70
drivers/net/ethernet/intel/ice/ice_ptp.c
··· 573 573 pf = ptp_port_to_pf(ptp_port); 574 574 hw = &pf->hw; 575 575 576 + if (!tx->init) 577 + return; 578 + 576 579 /* Read the Tx ready status first */ 577 580 if (tx->has_ready_bitmap) { 578 581 err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready); ··· 677 674 pf->ptp.tx_hwtstamp_good += tstamp_good; 678 675 } 679 676 680 - /** 681 - * ice_ptp_tx_tstamp_owner - Process Tx timestamps for all ports on the device 682 - * @pf: Board private structure 683 - */ 684 - static enum ice_tx_tstamp_work ice_ptp_tx_tstamp_owner(struct ice_pf *pf) 677 + static void ice_ptp_tx_tstamp_owner(struct ice_pf *pf) 685 678 { 686 679 struct ice_ptp_port *port; 687 - unsigned int i; 688 680 689 681 mutex_lock(&pf->adapter->ports.lock); 690 682 list_for_each_entry(port, &pf->adapter->ports.ports, list_node) { ··· 691 693 ice_ptp_process_tx_tstamp(tx); 692 694 } 693 695 mutex_unlock(&pf->adapter->ports.lock); 694 - 695 - for (i = 0; i < ICE_GET_QUAD_NUM(pf->hw.ptp.num_lports); i++) { 696 - u64 tstamp_ready; 697 - int err; 698 - 699 - /* Read the Tx ready status first */ 700 - err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready); 701 - if (err) 702 - break; 703 - else if (tstamp_ready) 704 - return ICE_TX_TSTAMP_WORK_PENDING; 705 - } 706 - 707 - return ICE_TX_TSTAMP_WORK_DONE; 708 - } 709 - 710 - /** 711 - * ice_ptp_tx_tstamp - Process Tx timestamps for this function. 712 - * @tx: Tx tracking structure to initialize 713 - * 714 - * Returns: ICE_TX_TSTAMP_WORK_PENDING if there are any outstanding incomplete 715 - * Tx timestamps, or ICE_TX_TSTAMP_WORK_DONE otherwise. 716 - */ 717 - static enum ice_tx_tstamp_work ice_ptp_tx_tstamp(struct ice_ptp_tx *tx) 718 - { 719 - bool more_timestamps; 720 - unsigned long flags; 721 - 722 - if (!tx->init) 723 - return ICE_TX_TSTAMP_WORK_DONE; 724 - 725 - /* Process the Tx timestamp tracker */ 726 - ice_ptp_process_tx_tstamp(tx); 727 - 728 - /* Check if there are outstanding Tx timestamps */ 729 - spin_lock_irqsave(&tx->lock, flags); 730 - more_timestamps = tx->init && !bitmap_empty(tx->in_use, tx->len); 731 - spin_unlock_irqrestore(&tx->lock, flags); 732 - 733 - if (more_timestamps) 734 - return ICE_TX_TSTAMP_WORK_PENDING; 735 - 736 - return ICE_TX_TSTAMP_WORK_DONE; 737 696 } 738 697 739 698 /** ··· 1302 1347 /* Do not reconfigure E810 or E830 PHY */ 1303 1348 return; 1304 1349 case ICE_MAC_GENERIC: 1305 - case ICE_MAC_GENERIC_3K_E825: 1306 1350 ice_ptp_port_phy_restart(ptp_port); 1351 + return; 1352 + case ICE_MAC_GENERIC_3K_E825: 1353 + if (linkup) 1354 + ice_ptp_port_phy_restart(ptp_port); 1307 1355 return; 1308 1356 default: 1309 1357 dev_warn(ice_pf_to_dev(pf), "%s: Unknown PHY type\n", __func__); ··· 2621 2663 return idx + tx->offset; 2622 2664 } 2623 2665 2624 - /** 2625 - * ice_ptp_process_ts - Process the PTP Tx timestamps 2626 - * @pf: Board private structure 2627 - * 2628 - * Returns: ICE_TX_TSTAMP_WORK_PENDING if there are any outstanding Tx 2629 - * timestamps that need processing, and ICE_TX_TSTAMP_WORK_DONE otherwise. 2630 - */ 2631 - enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf) 2666 + void ice_ptp_process_ts(struct ice_pf *pf) 2632 2667 { 2633 2668 switch (pf->ptp.tx_interrupt_mode) { 2634 2669 case ICE_PTP_TX_INTERRUPT_NONE: 2635 2670 /* This device has the clock owner handle timestamps for it */ 2636 - return ICE_TX_TSTAMP_WORK_DONE; 2671 + return; 2637 2672 case ICE_PTP_TX_INTERRUPT_SELF: 2638 2673 /* This device handles its own timestamps */ 2639 - return ice_ptp_tx_tstamp(&pf->ptp.port.tx); 2674 + ice_ptp_process_tx_tstamp(&pf->ptp.port.tx); 2675 + return; 2640 2676 case ICE_PTP_TX_INTERRUPT_ALL: 2641 2677 /* This device handles timestamps for all ports */ 2642 - return ice_ptp_tx_tstamp_owner(pf); 2678 + ice_ptp_tx_tstamp_owner(pf); 2679 + return; 2643 2680 default: 2644 2681 WARN_ONCE(1, "Unexpected Tx timestamp interrupt mode %u\n", 2645 2682 pf->ptp.tx_interrupt_mode); 2646 - return ICE_TX_TSTAMP_WORK_DONE; 2683 + return; 2647 2684 } 2685 + } 2686 + 2687 + static bool ice_port_has_timestamps(struct ice_ptp_tx *tx) 2688 + { 2689 + bool more_timestamps; 2690 + 2691 + scoped_guard(spinlock_irqsave, &tx->lock) { 2692 + if (!tx->init) 2693 + return false; 2694 + 2695 + more_timestamps = !bitmap_empty(tx->in_use, tx->len); 2696 + } 2697 + 2698 + return more_timestamps; 2699 + } 2700 + 2701 + static bool ice_any_port_has_timestamps(struct ice_pf *pf) 2702 + { 2703 + struct ice_ptp_port *port; 2704 + 2705 + scoped_guard(mutex, &pf->adapter->ports.lock) { 2706 + list_for_each_entry(port, &pf->adapter->ports.ports, 2707 + list_node) { 2708 + struct ice_ptp_tx *tx = &port->tx; 2709 + 2710 + if (ice_port_has_timestamps(tx)) 2711 + return true; 2712 + } 2713 + } 2714 + 2715 + return false; 2716 + } 2717 + 2718 + bool ice_ptp_tx_tstamps_pending(struct ice_pf *pf) 2719 + { 2720 + struct ice_hw *hw = &pf->hw; 2721 + unsigned int i; 2722 + 2723 + /* Check software indicator */ 2724 + switch (pf->ptp.tx_interrupt_mode) { 2725 + case ICE_PTP_TX_INTERRUPT_NONE: 2726 + return false; 2727 + case ICE_PTP_TX_INTERRUPT_SELF: 2728 + if (ice_port_has_timestamps(&pf->ptp.port.tx)) 2729 + return true; 2730 + break; 2731 + case ICE_PTP_TX_INTERRUPT_ALL: 2732 + if (ice_any_port_has_timestamps(pf)) 2733 + return true; 2734 + break; 2735 + default: 2736 + WARN_ONCE(1, "Unexpected Tx timestamp interrupt mode %u\n", 2737 + pf->ptp.tx_interrupt_mode); 2738 + break; 2739 + } 2740 + 2741 + /* Check hardware indicator */ 2742 + for (i = 0; i < ICE_GET_QUAD_NUM(hw->ptp.num_lports); i++) { 2743 + u64 tstamp_ready = 0; 2744 + int err; 2745 + 2746 + err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready); 2747 + if (err || tstamp_ready) 2748 + return true; 2749 + } 2750 + 2751 + return false; 2648 2752 } 2649 2753 2650 2754 /** ··· 2758 2738 return IRQ_WAKE_THREAD; 2759 2739 case ICE_MAC_E830: 2760 2740 /* E830 can read timestamps in the top half using rd32() */ 2761 - if (ice_ptp_process_ts(pf) == ICE_TX_TSTAMP_WORK_PENDING) { 2741 + ice_ptp_process_ts(pf); 2742 + 2743 + if (ice_ptp_tx_tstamps_pending(pf)) { 2762 2744 /* Process outstanding Tx timestamps. If there 2763 2745 * is more work, re-arm the interrupt to trigger again. 2764 2746 */ ··· 2840 2818 } 2841 2819 2842 2820 /** 2821 + * ice_ptp_queue_work - Queue PTP periodic work for a PF 2822 + * @pf: Board private structure 2823 + * 2824 + * Helper function to queue PTP periodic work after VSI rebuild completes. 2825 + * This ensures that PTP work only runs when VSI structures are ready. 2826 + */ 2827 + void ice_ptp_queue_work(struct ice_pf *pf) 2828 + { 2829 + if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags) && 2830 + pf->ptp.state == ICE_PTP_READY) 2831 + kthread_queue_delayed_work(pf->ptp.kworker, &pf->ptp.work, 0); 2832 + } 2833 + 2834 + /** 2843 2835 * ice_ptp_prepare_rebuild_sec - Prepare second NAC for PTP reset or rebuild 2844 2836 * @pf: Board private structure 2845 2837 * @rebuild: rebuild if true, prepare if false ··· 2871 2835 struct ice_pf *peer_pf = ptp_port_to_pf(port); 2872 2836 2873 2837 if (!ice_is_primary(&peer_pf->hw)) { 2874 - if (rebuild) 2838 + if (rebuild) { 2839 + /* TODO: When implementing rebuild=true: 2840 + * 1. Ensure secondary PFs' VSIs are rebuilt 2841 + * 2. Call ice_ptp_queue_work(peer_pf) after VSI rebuild 2842 + */ 2875 2843 ice_ptp_rebuild(peer_pf, reset_type); 2876 - else 2844 + } else { 2877 2845 ice_ptp_prepare_for_reset(peer_pf, reset_type); 2846 + } 2878 2847 } 2879 2848 } 2880 2849 } ··· 3024 2983 } 3025 2984 3026 2985 ptp->state = ICE_PTP_READY; 3027 - 3028 - /* Start periodic work going */ 3029 - kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0); 3030 2986 3031 2987 dev_info(ice_pf_to_dev(pf), "PTP reset successful\n"); 3032 2988 return; ··· 3229 3191 { 3230 3192 switch (pf->hw.mac_type) { 3231 3193 case ICE_MAC_GENERIC: 3232 - /* E822 based PHY has the clock owner process the interrupt 3233 - * for all ports. 3194 + case ICE_MAC_GENERIC_3K_E825: 3195 + /* E82x hardware has the clock owner process timestamps for 3196 + * all ports. 3234 3197 */ 3235 3198 if (ice_pf_src_tmr_owned(pf)) 3236 3199 pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_ALL;
+13 -5
drivers/net/ethernet/intel/ice/ice_ptp.h
··· 304 304 s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb); 305 305 void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx); 306 306 void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx); 307 - enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf); 307 + void ice_ptp_process_ts(struct ice_pf *pf); 308 308 irqreturn_t ice_ptp_ts_irq(struct ice_pf *pf); 309 + bool ice_ptp_tx_tstamps_pending(struct ice_pf *pf); 309 310 u64 ice_ptp_read_src_clk_reg(struct ice_pf *pf, 310 311 struct ptp_system_timestamp *sts); 311 312 ··· 318 317 void ice_ptp_init(struct ice_pf *pf); 319 318 void ice_ptp_release(struct ice_pf *pf); 320 319 void ice_ptp_link_change(struct ice_pf *pf, bool linkup); 320 + void ice_ptp_queue_work(struct ice_pf *pf); 321 321 #else /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */ 322 322 323 323 static inline int ice_ptp_hwtstamp_get(struct net_device *netdev, ··· 347 345 348 346 static inline void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx) { } 349 347 350 - static inline bool ice_ptp_process_ts(struct ice_pf *pf) 351 - { 352 - return true; 353 - } 348 + static inline void ice_ptp_process_ts(struct ice_pf *pf) { } 354 349 355 350 static inline irqreturn_t ice_ptp_ts_irq(struct ice_pf *pf) 356 351 { 357 352 return IRQ_HANDLED; 353 + } 354 + 355 + static inline bool ice_ptp_tx_tstamps_pending(struct ice_pf *pf) 356 + { 357 + return false; 358 358 } 359 359 360 360 static inline u64 ice_ptp_read_src_clk_reg(struct ice_pf *pf, ··· 384 380 static inline void ice_ptp_init(struct ice_pf *pf) { } 385 381 static inline void ice_ptp_release(struct ice_pf *pf) { } 386 382 static inline void ice_ptp_link_change(struct ice_pf *pf, bool linkup) 383 + { 384 + } 385 + 386 + static inline void ice_ptp_queue_work(struct ice_pf *pf) 387 387 { 388 388 } 389 389
+15 -6
drivers/net/ethernet/spacemit/k1_emac.c
··· 12 12 #include <linux/dma-mapping.h> 13 13 #include <linux/etherdevice.h> 14 14 #include <linux/ethtool.h> 15 + #include <linux/if_vlan.h> 15 16 #include <linux/interrupt.h> 16 17 #include <linux/io.h> 17 18 #include <linux/iopoll.h> ··· 39 38 40 39 #define EMAC_DEFAULT_BUFSIZE 1536 41 40 #define EMAC_RX_BUF_2K 2048 42 - #define EMAC_RX_BUF_4K 4096 41 + #define EMAC_RX_BUF_MAX FIELD_MAX(RX_DESC_1_BUFFER_SIZE_1_MASK) 43 42 44 43 /* Tuning parameters from SpacemiT */ 45 44 #define EMAC_TX_FRAMES 64 ··· 203 202 { 204 203 /* Destination address for 802.3x Ethernet flow control */ 205 204 u8 fc_dest_addr[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x01 }; 206 - 207 - u32 rxirq = 0, dma = 0; 205 + u32 rxirq = 0, dma = 0, frame_sz; 208 206 209 207 regmap_set_bits(priv->regmap_apmu, 210 208 priv->regmap_apmu_offset + APMU_EMAC_CTRL_REG, ··· 227 227 emac_wr(priv, MAC_TRANSMIT_PACKET_START_THRESHOLD, 228 228 DEFAULT_TX_THRESHOLD); 229 229 emac_wr(priv, MAC_RECEIVE_PACKET_START_THRESHOLD, DEFAULT_RX_THRESHOLD); 230 + 231 + /* Set maximum frame size and jabber size based on configured MTU, 232 + * accounting for Ethernet header, double VLAN tags, and FCS. 233 + */ 234 + frame_sz = priv->ndev->mtu + ETH_HLEN + 2 * VLAN_HLEN + ETH_FCS_LEN; 235 + 236 + emac_wr(priv, MAC_MAXIMUM_FRAME_SIZE, frame_sz); 237 + emac_wr(priv, MAC_TRANSMIT_JABBER_SIZE, frame_sz); 238 + emac_wr(priv, MAC_RECEIVE_JABBER_SIZE, frame_sz); 230 239 231 240 /* Configure flow control (enabled in emac_adjust_link() later) */ 232 241 emac_set_mac_addr_reg(priv, fc_dest_addr, MAC_FC_SOURCE_ADDRESS_HIGH); ··· 933 924 return -EBUSY; 934 925 } 935 926 936 - frame_len = mtu + ETH_HLEN + ETH_FCS_LEN; 927 + frame_len = mtu + ETH_HLEN + 2 * VLAN_HLEN + ETH_FCS_LEN; 937 928 938 929 if (frame_len <= EMAC_DEFAULT_BUFSIZE) 939 930 priv->dma_buf_sz = EMAC_DEFAULT_BUFSIZE; 940 931 else if (frame_len <= EMAC_RX_BUF_2K) 941 932 priv->dma_buf_sz = EMAC_RX_BUF_2K; 942 933 else 943 - priv->dma_buf_sz = EMAC_RX_BUF_4K; 934 + priv->dma_buf_sz = EMAC_RX_BUF_MAX; 944 935 945 936 ndev->mtu = mtu; 946 937 ··· 2034 2025 ndev->hw_features = NETIF_F_SG; 2035 2026 ndev->features |= ndev->hw_features; 2036 2027 2037 - ndev->max_mtu = EMAC_RX_BUF_4K - (ETH_HLEN + ETH_FCS_LEN); 2028 + ndev->max_mtu = EMAC_RX_BUF_MAX - (ETH_HLEN + 2 * VLAN_HLEN + ETH_FCS_LEN); 2038 2029 ndev->pcpu_stat_type = NETDEV_PCPU_STAT_DSTATS; 2039 2030 2040 2031 priv = netdev_priv(ndev);
+2 -1
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
··· 8042 8042 u32 chan; 8043 8043 8044 8044 if (!ndev || !netif_running(ndev)) 8045 - return 0; 8045 + goto suspend_bsp; 8046 8046 8047 8047 mutex_lock(&priv->lock); 8048 8048 ··· 8082 8082 if (stmmac_fpe_supported(priv)) 8083 8083 ethtool_mmsv_stop(&priv->fpe_cfg.mmsv); 8084 8084 8085 + suspend_bsp: 8085 8086 if (priv->plat->suspend) 8086 8087 return priv->plat->suspend(dev, priv->plat->bsp_priv); 8087 8088
+35 -6
drivers/net/ethernet/ti/cpsw.c
··· 305 305 return 0; 306 306 } 307 307 308 - static void cpsw_ndo_set_rx_mode(struct net_device *ndev) 308 + static void cpsw_ndo_set_rx_mode_work(struct work_struct *work) 309 309 { 310 - struct cpsw_priv *priv = netdev_priv(ndev); 310 + struct cpsw_priv *priv = container_of(work, struct cpsw_priv, rx_mode_work); 311 311 struct cpsw_common *cpsw = priv->cpsw; 312 + struct net_device *ndev = priv->ndev; 312 313 int slave_port = -1; 314 + 315 + rtnl_lock(); 316 + if (!netif_running(ndev)) 317 + goto unlock_rtnl; 318 + 319 + netif_addr_lock_bh(ndev); 313 320 314 321 if (cpsw->data.dual_emac) 315 322 slave_port = priv->emac_port + 1; ··· 325 318 /* Enable promiscuous mode */ 326 319 cpsw_set_promiscious(ndev, true); 327 320 cpsw_ale_set_allmulti(cpsw->ale, IFF_ALLMULTI, slave_port); 328 - return; 321 + goto unlock_addr; 329 322 } else { 330 323 /* Disable promiscuous mode */ 331 324 cpsw_set_promiscious(ndev, false); ··· 338 331 /* add/remove mcast address either for real netdev or for vlan */ 339 332 __hw_addr_ref_sync_dev(&ndev->mc, ndev, cpsw_add_mc_addr, 340 333 cpsw_del_mc_addr); 334 + 335 + unlock_addr: 336 + netif_addr_unlock_bh(ndev); 337 + unlock_rtnl: 338 + rtnl_unlock(); 339 + } 340 + 341 + static void cpsw_ndo_set_rx_mode(struct net_device *ndev) 342 + { 343 + struct cpsw_priv *priv = netdev_priv(ndev); 344 + 345 + schedule_work(&priv->rx_mode_work); 341 346 } 342 347 343 348 static unsigned int cpsw_rxbuf_total_len(unsigned int len) ··· 1491 1472 priv_sl2->ndev = ndev; 1492 1473 priv_sl2->dev = &ndev->dev; 1493 1474 priv_sl2->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG); 1475 + INIT_WORK(&priv_sl2->rx_mode_work, cpsw_ndo_set_rx_mode_work); 1494 1476 1495 1477 if (is_valid_ether_addr(data->slave_data[1].mac_addr)) { 1496 1478 memcpy(priv_sl2->mac_addr, data->slave_data[1].mac_addr, ··· 1673 1653 priv->dev = dev; 1674 1654 priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG); 1675 1655 priv->emac_port = 0; 1656 + INIT_WORK(&priv->rx_mode_work, cpsw_ndo_set_rx_mode_work); 1676 1657 1677 1658 if (is_valid_ether_addr(data->slave_data[0].mac_addr)) { 1678 1659 memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN); ··· 1779 1758 static void cpsw_remove(struct platform_device *pdev) 1780 1759 { 1781 1760 struct cpsw_common *cpsw = platform_get_drvdata(pdev); 1761 + struct net_device *ndev; 1762 + struct cpsw_priv *priv; 1782 1763 int i, ret; 1783 1764 1784 1765 ret = pm_runtime_resume_and_get(&pdev->dev); ··· 1793 1770 return; 1794 1771 } 1795 1772 1796 - for (i = 0; i < cpsw->data.slaves; i++) 1797 - if (cpsw->slaves[i].ndev) 1798 - unregister_netdev(cpsw->slaves[i].ndev); 1773 + for (i = 0; i < cpsw->data.slaves; i++) { 1774 + ndev = cpsw->slaves[i].ndev; 1775 + if (!ndev) 1776 + continue; 1777 + 1778 + priv = netdev_priv(ndev); 1779 + unregister_netdev(ndev); 1780 + disable_work_sync(&priv->rx_mode_work); 1781 + } 1799 1782 1800 1783 cpts_release(cpsw->cpts); 1801 1784 cpdma_ctlr_destroy(cpsw->dma);
+29 -5
drivers/net/ethernet/ti/cpsw_new.c
··· 248 248 return 0; 249 249 } 250 250 251 - static void cpsw_ndo_set_rx_mode(struct net_device *ndev) 251 + static void cpsw_ndo_set_rx_mode_work(struct work_struct *work) 252 252 { 253 - struct cpsw_priv *priv = netdev_priv(ndev); 253 + struct cpsw_priv *priv = container_of(work, struct cpsw_priv, rx_mode_work); 254 254 struct cpsw_common *cpsw = priv->cpsw; 255 + struct net_device *ndev = priv->ndev; 255 256 257 + rtnl_lock(); 258 + if (!netif_running(ndev)) 259 + goto unlock_rtnl; 260 + 261 + netif_addr_lock_bh(ndev); 256 262 if (ndev->flags & IFF_PROMISC) { 257 263 /* Enable promiscuous mode */ 258 264 cpsw_set_promiscious(ndev, true); 259 265 cpsw_ale_set_allmulti(cpsw->ale, IFF_ALLMULTI, priv->emac_port); 260 - return; 266 + goto unlock_addr; 261 267 } 262 268 263 269 /* Disable promiscuous mode */ ··· 276 270 /* add/remove mcast address either for real netdev or for vlan */ 277 271 __hw_addr_ref_sync_dev(&ndev->mc, ndev, cpsw_add_mc_addr, 278 272 cpsw_del_mc_addr); 273 + 274 + unlock_addr: 275 + netif_addr_unlock_bh(ndev); 276 + unlock_rtnl: 277 + rtnl_unlock(); 278 + } 279 + 280 + static void cpsw_ndo_set_rx_mode(struct net_device *ndev) 281 + { 282 + struct cpsw_priv *priv = netdev_priv(ndev); 283 + 284 + schedule_work(&priv->rx_mode_work); 279 285 } 280 286 281 287 static unsigned int cpsw_rxbuf_total_len(unsigned int len) ··· 1416 1398 priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG); 1417 1399 priv->emac_port = i + 1; 1418 1400 priv->tx_packet_min = CPSW_MIN_PACKET_SIZE; 1401 + INIT_WORK(&priv->rx_mode_work, cpsw_ndo_set_rx_mode_work); 1419 1402 1420 1403 if (is_valid_ether_addr(slave_data->mac_addr)) { 1421 1404 ether_addr_copy(priv->mac_addr, slave_data->mac_addr); ··· 1466 1447 1467 1448 static void cpsw_unregister_ports(struct cpsw_common *cpsw) 1468 1449 { 1450 + struct net_device *ndev; 1451 + struct cpsw_priv *priv; 1469 1452 int i = 0; 1470 1453 1471 1454 for (i = 0; i < cpsw->data.slaves; i++) { 1472 - if (!cpsw->slaves[i].ndev) 1455 + ndev = cpsw->slaves[i].ndev; 1456 + if (!ndev) 1473 1457 continue; 1474 1458 1475 - unregister_netdev(cpsw->slaves[i].ndev); 1459 + priv = netdev_priv(ndev); 1460 + unregister_netdev(ndev); 1461 + disable_work_sync(&priv->rx_mode_work); 1476 1462 } 1477 1463 } 1478 1464
+1
drivers/net/ethernet/ti/cpsw_priv.h
··· 391 391 u32 tx_packet_min; 392 392 struct cpsw_ale_ratelimit ale_bc_ratelimit; 393 393 struct cpsw_ale_ratelimit ale_mc_ratelimit; 394 + struct work_struct rx_mode_work; 394 395 }; 395 396 396 397 #define ndev_to_cpsw(ndev) (((struct cpsw_priv *)netdev_priv(ndev))->cpsw)
+3 -2
drivers/net/macvlan.c
··· 1567 1567 /* the macvlan port may be freed by macvlan_uninit when fail to register. 1568 1568 * so we destroy the macvlan port only when it's valid. 1569 1569 */ 1570 - if (create && macvlan_port_get_rtnl(lowerdev)) { 1570 + if (macvlan_port_get_rtnl(lowerdev)) { 1571 1571 macvlan_flush_sources(port, vlan); 1572 - macvlan_port_destroy(port->dev); 1572 + if (create) 1573 + macvlan_port_destroy(port->dev); 1573 1574 } 1574 1575 return err; 1575 1576 }
+2
drivers/net/phy/sfp.c
··· 479 479 linkmode_zero(caps->link_modes); 480 480 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, 481 481 caps->link_modes); 482 + phy_interface_zero(caps->interfaces); 483 + __set_bit(PHY_INTERFACE_MODE_1000BASEX, caps->interfaces); 482 484 } 483 485 484 486 #define SFP_QUIRK(_v, _p, _s, _f) \
+15 -14
drivers/net/usb/r8152.c
··· 8535 8535 usb_submit_urb(tp->intr_urb, GFP_NOIO); 8536 8536 } 8537 8537 8538 - /* If the device is RTL8152_INACCESSIBLE here then we should do a 8539 - * reset. This is important because the usb_lock_device_for_reset() 8540 - * that happens as a result of usb_queue_reset_device() will silently 8541 - * fail if the device was suspended or if too much time passed. 8542 - * 8543 - * NOTE: The device is locked here so we can directly do the reset. 8544 - * We don't need usb_lock_device_for_reset() because that's just a 8545 - * wrapper over device_lock() and device_resume() (which calls us) 8546 - * does that for us. 8547 - */ 8548 - if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) 8549 - usb_reset_device(tp->udev); 8550 - 8551 8538 return 0; 8552 8539 } 8553 8540 ··· 8645 8658 static int rtl8152_resume(struct usb_interface *intf) 8646 8659 { 8647 8660 struct r8152 *tp = usb_get_intfdata(intf); 8661 + bool runtime_resume = test_bit(SELECTIVE_SUSPEND, &tp->flags); 8648 8662 int ret; 8649 8663 8650 8664 mutex_lock(&tp->control); 8651 8665 8652 8666 rtl_reset_ocp_base(tp); 8653 8667 8654 - if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) 8668 + if (runtime_resume) 8655 8669 ret = rtl8152_runtime_resume(tp); 8656 8670 else 8657 8671 ret = rtl8152_system_resume(tp); 8658 8672 8659 8673 mutex_unlock(&tp->control); 8674 + 8675 + /* If the device is RTL8152_INACCESSIBLE here then we should do a 8676 + * reset. This is important because the usb_lock_device_for_reset() 8677 + * that happens as a result of usb_queue_reset_device() will silently 8678 + * fail if the device was suspended or if too much time passed. 8679 + * 8680 + * NOTE: The device is locked here so we can directly do the reset. 8681 + * We don't need usb_lock_device_for_reset() because that's just a 8682 + * wrapper over device_lock() and device_resume() (which calls us) 8683 + * does that for us. 8684 + */ 8685 + if (!runtime_resume && test_bit(RTL8152_INACCESSIBLE, &tp->flags)) 8686 + usb_reset_device(tp->udev); 8660 8687 8661 8688 return ret; 8662 8689 }
-2
drivers/net/wireless/intel/iwlwifi/mld/iface.c
··· 55 55 56 56 ieee80211_iter_keys(mld->hw, vif, iwl_mld_cleanup_keys_iter, NULL); 57 57 58 - wiphy_delayed_work_cancel(mld->wiphy, &mld_vif->mlo_scan_start_wk); 59 - 60 58 CLEANUP_STRUCT(mld_vif); 61 59 } 62 60
+2
drivers/net/wireless/intel/iwlwifi/mld/mac80211.c
··· 1759 1759 wiphy_work_cancel(mld->wiphy, &mld_vif->emlsr.unblock_tpt_wk); 1760 1760 wiphy_delayed_work_cancel(mld->wiphy, 1761 1761 &mld_vif->emlsr.check_tpt_wk); 1762 + wiphy_delayed_work_cancel(mld->wiphy, 1763 + &mld_vif->mlo_scan_start_wk); 1762 1764 1763 1765 iwl_mld_reset_cca_40mhz_workaround(mld, vif); 1764 1766 iwl_mld_smps_workaround(mld, vif, true);
+5 -1
drivers/net/wireless/intel/iwlwifi/mvm/d3.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 2 /* 3 - * Copyright (C) 2012-2014, 2018-2025 Intel Corporation 3 + * Copyright (C) 2012-2014, 2018-2026 Intel Corporation 4 4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH 5 5 * Copyright (C) 2016-2017 Intel Deutschland GmbH 6 6 */ ··· 3239 3239 3240 3240 IWL_DEBUG_WOWLAN(mvm, "Starting fast suspend flow\n"); 3241 3241 3242 + iwl_mvm_pause_tcm(mvm, true); 3243 + 3242 3244 mvm->fast_resume = true; 3243 3245 set_bit(IWL_MVM_STATUS_IN_D3, &mvm->status); 3244 3246 ··· 3296 3294 IWL_ERR(mvm, "Couldn't get the d3 notif %d\n", ret); 3297 3295 mvm->trans->state = IWL_TRANS_NO_FW; 3298 3296 } 3297 + 3298 + iwl_mvm_resume_tcm(mvm); 3299 3299 3300 3300 out: 3301 3301 clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
+12
include/linux/skbuff.h
··· 4301 4301 skb_headlen(skb), buffer); 4302 4302 } 4303 4303 4304 + /* Variant of skb_header_pointer() where @offset is user-controlled 4305 + * and potentially negative. 4306 + */ 4307 + static inline void * __must_check 4308 + skb_header_pointer_careful(const struct sk_buff *skb, int offset, 4309 + int len, void *buffer) 4310 + { 4311 + if (unlikely(offset < 0 && -offset > skb_headroom(skb))) 4312 + return NULL; 4313 + return skb_header_pointer(skb, offset, len, buffer); 4314 + } 4315 + 4304 4316 static inline void * __must_check 4305 4317 skb_pointer_if_linear(const struct sk_buff *skb, int offset, int len) 4306 4318 {
+4 -4
net/core/filter.c
··· 2289 2289 2290 2290 err = bpf_out_neigh_v6(net, skb, dev, nh); 2291 2291 if (unlikely(net_xmit_eval(err))) 2292 - DEV_STATS_INC(dev, tx_errors); 2292 + dev_core_stats_tx_dropped_inc(dev); 2293 2293 else 2294 2294 ret = NET_XMIT_SUCCESS; 2295 2295 goto out_xmit; 2296 2296 out_drop: 2297 - DEV_STATS_INC(dev, tx_errors); 2297 + dev_core_stats_tx_dropped_inc(dev); 2298 2298 kfree_skb(skb); 2299 2299 out_xmit: 2300 2300 return ret; ··· 2396 2396 2397 2397 err = bpf_out_neigh_v4(net, skb, dev, nh); 2398 2398 if (unlikely(net_xmit_eval(err))) 2399 - DEV_STATS_INC(dev, tx_errors); 2399 + dev_core_stats_tx_dropped_inc(dev); 2400 2400 else 2401 2401 ret = NET_XMIT_SUCCESS; 2402 2402 goto out_xmit; 2403 2403 out_drop: 2404 - DEV_STATS_INC(dev, tx_errors); 2404 + dev_core_stats_tx_dropped_inc(dev); 2405 2405 kfree_skb(skb); 2406 2406 out_xmit: 2407 2407 return ret;
+2
net/core/gro.c
··· 265 265 goto out; 266 266 } 267 267 268 + /* NICs can feed encapsulated packets into GRO */ 269 + skb->encapsulation = 0; 268 270 rcu_read_lock(); 269 271 list_for_each_entry_rcu(ptype, head, list) { 270 272 if (ptype->type != type || !ptype->callbacks.gro_complete)
+34 -16
net/core/net-procfs.c
··· 170 170 .show = softnet_seq_show, 171 171 }; 172 172 173 + struct ptype_iter_state { 174 + struct seq_net_private p; 175 + struct net_device *dev; 176 + }; 177 + 173 178 static void *ptype_get_idx(struct seq_file *seq, loff_t pos) 174 179 { 180 + struct ptype_iter_state *iter = seq->private; 175 181 struct list_head *ptype_list = NULL; 176 182 struct packet_type *pt = NULL; 177 183 struct net_device *dev; ··· 187 181 for_each_netdev_rcu(seq_file_net(seq), dev) { 188 182 ptype_list = &dev->ptype_all; 189 183 list_for_each_entry_rcu(pt, ptype_list, list) { 190 - if (i == pos) 184 + if (i == pos) { 185 + iter->dev = dev; 191 186 return pt; 187 + } 192 188 ++i; 193 189 } 194 190 } 191 + 192 + iter->dev = NULL; 195 193 196 194 list_for_each_entry_rcu(pt, &seq_file_net(seq)->ptype_all, list) { 197 195 if (i == pos) ··· 228 218 229 219 static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos) 230 220 { 221 + struct ptype_iter_state *iter = seq->private; 231 222 struct net *net = seq_file_net(seq); 232 223 struct net_device *dev; 233 224 struct packet_type *pt; ··· 240 229 return ptype_get_idx(seq, 0); 241 230 242 231 pt = v; 243 - nxt = pt->list.next; 244 - if (pt->dev) { 245 - if (nxt != &pt->dev->ptype_all) 232 + nxt = READ_ONCE(pt->list.next); 233 + dev = iter->dev; 234 + if (dev) { 235 + if (nxt != &dev->ptype_all) 246 236 goto found; 247 237 248 - dev = pt->dev; 249 238 for_each_netdev_continue_rcu(seq_file_net(seq), dev) { 250 - if (!list_empty(&dev->ptype_all)) { 251 - nxt = dev->ptype_all.next; 239 + nxt = READ_ONCE(dev->ptype_all.next); 240 + if (nxt != &dev->ptype_all) { 241 + iter->dev = dev; 252 242 goto found; 253 243 } 254 244 } 255 - nxt = net->ptype_all.next; 245 + iter->dev = NULL; 246 + nxt = READ_ONCE(net->ptype_all.next); 256 247 goto net_ptype_all; 257 248 } 258 249 ··· 265 252 266 253 if (nxt == &net->ptype_all) { 267 254 /* continue with ->ptype_specific if it's not empty */ 268 - nxt = net->ptype_specific.next; 255 + nxt = READ_ONCE(net->ptype_specific.next); 269 256 if (nxt != &net->ptype_specific) 270 257 goto found; 271 258 } 272 259 273 260 hash = 0; 274 - nxt = ptype_base[0].next; 261 + nxt = READ_ONCE(ptype_base[0].next); 275 262 } else 276 263 hash = ntohs(pt->type) & PTYPE_HASH_MASK; 277 264 278 265 while (nxt == &ptype_base[hash]) { 279 266 if (++hash >= PTYPE_HASH_SIZE) 280 267 return NULL; 281 - nxt = ptype_base[hash].next; 268 + nxt = READ_ONCE(ptype_base[hash].next); 282 269 } 283 270 found: 284 271 return list_entry(nxt, struct packet_type, list); ··· 292 279 293 280 static int ptype_seq_show(struct seq_file *seq, void *v) 294 281 { 282 + struct ptype_iter_state *iter = seq->private; 295 283 struct packet_type *pt = v; 284 + struct net_device *dev; 296 285 297 - if (v == SEQ_START_TOKEN) 286 + if (v == SEQ_START_TOKEN) { 298 287 seq_puts(seq, "Type Device Function\n"); 299 - else if ((!pt->af_packet_net || net_eq(pt->af_packet_net, seq_file_net(seq))) && 300 - (!pt->dev || net_eq(dev_net(pt->dev), seq_file_net(seq)))) { 288 + return 0; 289 + } 290 + dev = iter->dev; 291 + if ((!pt->af_packet_net || net_eq(pt->af_packet_net, seq_file_net(seq))) && 292 + (!dev || net_eq(dev_net(dev), seq_file_net(seq)))) { 301 293 if (pt->type == htons(ETH_P_ALL)) 302 294 seq_puts(seq, "ALL "); 303 295 else 304 296 seq_printf(seq, "%04x", ntohs(pt->type)); 305 297 306 298 seq_printf(seq, " %-8s %ps\n", 307 - pt->dev ? pt->dev->name : "", pt->func); 299 + dev ? dev->name : "", pt->func); 308 300 } 309 301 310 302 return 0; ··· 333 315 &softnet_seq_ops)) 334 316 goto out_dev; 335 317 if (!proc_create_net("ptype", 0444, net->proc_net, &ptype_seq_ops, 336 - sizeof(struct seq_net_private))) 318 + sizeof(struct ptype_iter_state))) 337 319 goto out_softnet; 338 320 339 321 if (wext_proc_init(net))
-3
net/ethtool/common.c
··· 862 862 ctx->key_off = key_off; 863 863 ctx->priv_size = ops->rxfh_priv_size; 864 864 865 - ctx->hfunc = ETH_RSS_HASH_NO_CHANGE; 866 - ctx->input_xfrm = RXH_XFRM_NO_CHANGE; 867 - 868 865 return ctx; 869 866 } 870 867
+2 -7
net/ethtool/rss.c
··· 824 824 static int 825 825 ethnl_rss_set(struct ethnl_req_info *req_info, struct genl_info *info) 826 826 { 827 - bool indir_reset = false, indir_mod, xfrm_sym = false; 828 827 struct rss_req_info *request = RSS_REQINFO(req_info); 828 + bool indir_reset = false, indir_mod, xfrm_sym; 829 829 struct ethtool_rxfh_context *ctx = NULL; 830 830 struct net_device *dev = req_info->dev; 831 831 bool mod = false, fields_mod = false; ··· 860 860 861 861 rxfh.input_xfrm = data.input_xfrm; 862 862 ethnl_update_u8(&rxfh.input_xfrm, tb[ETHTOOL_A_RSS_INPUT_XFRM], &mod); 863 - /* For drivers which don't support input_xfrm it will be set to 0xff 864 - * in the RSS context info. In all other case input_xfrm != 0 means 865 - * symmetric hashing is requested. 866 - */ 867 - if (!request->rss_context || ops->rxfh_per_ctx_key) 868 - xfrm_sym = rxfh.input_xfrm || data.input_xfrm; 863 + xfrm_sym = rxfh.input_xfrm || data.input_xfrm; 869 864 if (rxfh.input_xfrm == data.input_xfrm) 870 865 rxfh.input_xfrm = RXH_XFRM_NO_CHANGE; 871 866
+2 -1
net/ipv6/ip6_fib.c
··· 1138 1138 fib6_set_expires(iter, rt->expires); 1139 1139 fib6_add_gc_list(iter); 1140 1140 } 1141 - if (!(rt->fib6_flags & (RTF_ADDRCONF | RTF_PREFIX_RT))) { 1141 + if (!(rt->fib6_flags & (RTF_ADDRCONF | RTF_PREFIX_RT)) && 1142 + !iter->fib6_nh->fib_nh_gw_family) { 1142 1143 iter->fib6_flags &= ~RTF_ADDRCONF; 1143 1144 iter->fib6_flags &= ~RTF_PREFIX_RT; 1144 1145 }
+1 -1
net/netfilter/nf_tables_api.c
··· 5914 5914 5915 5915 list_for_each_entry(catchall, &set->catchall_list, list) { 5916 5916 ext = nft_set_elem_ext(set, catchall->elem); 5917 - if (!nft_set_elem_active(ext, genmask)) 5917 + if (nft_set_elem_active(ext, genmask)) 5918 5918 continue; 5919 5919 5920 5920 nft_clear(ctx->net, ext);
+6 -7
net/sched/cls_u32.c
··· 161 161 int toff = off + key->off + (off2 & key->offmask); 162 162 __be32 *data, hdata; 163 163 164 - if (skb_headroom(skb) + toff > INT_MAX) 165 - goto out; 166 - 167 - data = skb_header_pointer(skb, toff, 4, &hdata); 164 + data = skb_header_pointer_careful(skb, toff, 4, 165 + &hdata); 168 166 if (!data) 169 167 goto out; 170 168 if ((*data ^ key->val) & key->mask) { ··· 212 214 if (ht->divisor) { 213 215 __be32 *data, hdata; 214 216 215 - data = skb_header_pointer(skb, off + n->sel.hoff, 4, 216 - &hdata); 217 + data = skb_header_pointer_careful(skb, 218 + off + n->sel.hoff, 219 + 4, &hdata); 217 220 if (!data) 218 221 goto out; 219 222 sel = ht->divisor & u32_hash_fold(*data, &n->sel, ··· 228 229 if (n->sel.flags & TC_U32_VAROFFSET) { 229 230 __be16 *data, hdata; 230 231 231 - data = skb_header_pointer(skb, 232 + data = skb_header_pointer_careful(skb, 232 233 off + n->sel.offoff, 233 234 2, &hdata); 234 235 if (!data)
+2 -2
net/tipc/crypto.c
··· 1219 1219 rx = c; 1220 1220 tx = tipc_net(rx->net)->crypto_tx; 1221 1221 if (cancel_delayed_work(&rx->work)) { 1222 - kfree(rx->skey); 1222 + kfree_sensitive(rx->skey); 1223 1223 rx->skey = NULL; 1224 1224 atomic_xchg(&rx->key_distr, 0); 1225 1225 tipc_node_put(rx->node); ··· 2394 2394 break; 2395 2395 default: 2396 2396 synchronize_rcu(); 2397 - kfree(rx->skey); 2397 + kfree_sensitive(rx->skey); 2398 2398 rx->skey = NULL; 2399 2399 break; 2400 2400 }
+64
tools/testing/selftests/net/udpgro_fwd.sh
··· 162 162 echo " ok" 163 163 } 164 164 165 + run_test_csum() { 166 + local -r msg="$1" 167 + local -r dst="$2" 168 + local csum_error_filter=UdpInCsumErrors 169 + local csum_errors 170 + 171 + printf "%-40s" "$msg" 172 + 173 + is_ipv6 "$dst" && csum_error_filter=Udp6InCsumErrors 174 + 175 + ip netns exec "$NS_DST" iperf3 -s -1 >/dev/null & 176 + wait_local_port_listen "$NS_DST" 5201 tcp 177 + local spid="$!" 178 + ip netns exec "$NS_SRC" iperf3 -c "$dst" -t 2 >/dev/null 179 + local retc="$?" 180 + wait "$spid" 181 + local rets="$?" 182 + if [ "$rets" -ne 0 ] || [ "$retc" -ne 0 ]; then 183 + echo " fail client exit code $retc, server $rets" 184 + ret=1 185 + return 186 + fi 187 + 188 + csum_errors=$(ip netns exec "$NS_DST" nstat -as "$csum_error_filter" | 189 + grep "$csum_error_filter" | awk '{print $2}') 190 + if [ -n "$csum_errors" ] && [ "$csum_errors" -gt 0 ]; then 191 + echo " fail - csum error on receive $csum_errors, expected 0" 192 + ret=1 193 + return 194 + fi 195 + echo " ok" 196 + } 197 + 165 198 run_bench() { 166 199 local -r msg=$1 167 200 local -r dst=$2 ··· 292 259 # stray traffic on top of the UDP tunnel 293 260 ip netns exec $NS_SRC $PING -q -c 1 $OL_NET$DST_NAT >/dev/null 294 261 run_test "GRO fwd over UDP tunnel" $OL_NET$DST_NAT 10 10 $OL_NET$DST 262 + cleanup 263 + 264 + # force segmentation and re-aggregation 265 + create_vxlan_pair 266 + ip netns exec "$NS_DST" ethtool -K veth"$DST" generic-receive-offload on 267 + ip netns exec "$NS_SRC" ethtool -K veth"$SRC" tso off 268 + ip -n "$NS_SRC" link set dev veth"$SRC" mtu 1430 269 + 270 + # forward to a 2nd veth pair 271 + ip -n "$NS_DST" link add br0 type bridge 272 + ip -n "$NS_DST" link set dev veth"$DST" master br0 273 + 274 + # segment the aggregated TSO packet, without csum offload 275 + ip -n "$NS_DST" link add veth_segment type veth peer veth_rx 276 + for FEATURE in tso tx-udp-segmentation tx-checksumming; do 277 + ip netns exec "$NS_DST" ethtool -K veth_segment "$FEATURE" off 278 + done 279 + ip -n "$NS_DST" link set dev veth_segment master br0 up 280 + ip -n "$NS_DST" link set dev br0 up 281 + ip -n "$NS_DST" link set dev veth_rx up 282 + 283 + # move the lower layer IP in the last added veth 284 + for ADDR in "$BM_NET_V4$DST/24" "$BM_NET_V6$DST/64"; do 285 + # the dad argument will let iproute emit a unharmful warning 286 + # with ipv4 addresses 287 + ip -n "$NS_DST" addr del dev veth"$DST" "$ADDR" 288 + ip -n "$NS_DST" addr add dev veth_rx "$ADDR" \ 289 + nodad 2>/dev/null 290 + done 291 + 292 + run_test_csum "GSO after GRO" "$OL_NET$DST" 295 293 cleanup 296 294 done 297 295