Merge tag 'net-6.19-rc9' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Pull networking fixes from Jakub Kicinski:
"Including fixes from wireless and Netfilter.

Previous releases - regressions:

- eth: stmmac: fix stm32 (and potentially others) resume regression

- nf_tables: fix inverted genmask check in nft_map_catchall_activate()

- usb: r8152: fix resume reset deadlock

- fix reporting RXH_XFRM_NO_CHANGE as input_xfrm for RSS contexts

Previous releases - always broken:

- sched: cls_u32: use skb_header_pointer_careful() to avoid OOB reads
with malicious u32 rules

- eth: ice: timestamping related fixes"

* tag 'net-6.19-rc9' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (38 commits)
ipv6: Fix ECMP sibling count mismatch when clearing RTF_ADDRCONF
netfilter: nf_tables: fix inverted genmask check in nft_map_catchall_activate()
net: cpsw: Execute ndo_set_rx_mode callback in a work queue
net: cpsw_new: Execute ndo_set_rx_mode callback in a work queue
gve: Correct ethtool rx_dropped calculation
gve: Fix stats report corruption on queue count change
selftest: net: add a test-case for encap segmentation after GRO
net: gro: fix outer network offset
net: add proper RCU protection to /proc/net/ptype
net: ethernet: adi: adin1110: Check return value of devm_gpiod_get_optional() in adin1110_check_spi()
wifi: iwlwifi: mvm: pause TCM on fast resume
wifi: iwlwifi: mld: cancel mlo_scan_start_wk
net: spacemit: k1-emac: fix jumbo frame support
net: enetc: Convert 16-bit register reads to 32-bit for ENETC v4
net: enetc: Convert 16-bit register writes to 32-bit for ENETC v4
net: enetc: Remove CBDR cacheability AXI settings for ENETC v4
net: enetc: Remove SI/BDR cacheability AXI settings for ENETC v4
tipc: use kfree_sensitive() for session key material
net: stmmac: fix stm32 (and potentially others) resume regression
net: rss: fix reporting RXH_XFRM_NO_CHANGE as input_xfrm for contexts
...

+513 -234
+12
MAINTAINERS
··· 20978 F: drivers/net/pse-pd/ 20979 F: net/ethtool/pse-pd.c 20980 20981 PSTORE FILESYSTEM 20982 M: Kees Cook <kees@kernel.org> 20983 R: Tony Luck <tony.luck@intel.com>
··· 20978 F: drivers/net/pse-pd/ 20979 F: net/ethtool/pse-pd.c 20980 20981 + PSP SECURITY PROTOCOL 20982 + M: Daniel Zahka <daniel.zahka@gmail.com> 20983 + M: Jakub Kicinski <kuba@kernel.org> 20984 + M: Willem de Bruijn <willemdebruijn.kernel@gmail.com> 20985 + F: Documentation/netlink/specs/psp.yaml 20986 + F: Documentation/networking/psp.rst 20987 + F: include/net/psp/ 20988 + F: include/net/psp.h 20989 + F: include/uapi/linux/psp.h 20990 + F: net/psp/ 20991 + K: struct\ psp(_assoc|_dev|hdr)\b 20992 + 20993 PSTORE FILESYSTEM 20994 M: Kees Cook <kees@kernel.org> 20995 R: Tony Luck <tony.luck@intel.com>
+3
drivers/net/ethernet/adi/adin1110.c
··· 1089 1090 reset_gpio = devm_gpiod_get_optional(&priv->spidev->dev, "reset", 1091 GPIOD_OUT_LOW); 1092 if (reset_gpio) { 1093 /* MISO pin is used for internal configuration, can't have 1094 * anyone else disturbing the SDO line.
··· 1089 1090 reset_gpio = devm_gpiod_get_optional(&priv->spidev->dev, "reset", 1091 GPIOD_OUT_LOW); 1092 + if (IS_ERR(reset_gpio)) 1093 + return dev_err_probe(&priv->spidev->dev, PTR_ERR(reset_gpio), 1094 + "failed to get reset gpio\n"); 1095 if (reset_gpio) { 1096 /* MISO pin is used for internal configuration, can't have 1097 * anyone else disturbing the SDO line.
+20 -19
drivers/net/ethernet/cavium/liquidio/lio_main.c
··· 3505 */ 3506 netdev->netdev_ops = &lionetdevops; 3507 3508 retval = netif_set_real_num_rx_queues(netdev, num_oqueues); 3509 if (retval) { 3510 dev_err(&octeon_dev->pci_dev->dev, ··· 3537 WRITE_ONCE(sc->caller_is_done, true); 3538 goto setup_nic_dev_free; 3539 } 3540 - 3541 - lio = GET_LIO(netdev); 3542 - 3543 - memset(lio, 0, sizeof(struct lio)); 3544 - 3545 - lio->ifidx = ifidx_or_pfnum; 3546 - 3547 - props = &octeon_dev->props[i]; 3548 - props->gmxport = resp->cfg_info.linfo.gmxport; 3549 - props->netdev = netdev; 3550 3551 lio->linfo.num_rxpciq = num_oqueues; 3552 lio->linfo.num_txpciq = num_iqueues; ··· 3602 /* MTU range: 68 - 16000 */ 3603 netdev->min_mtu = LIO_MIN_MTU_SIZE; 3604 netdev->max_mtu = LIO_MAX_MTU_SIZE; 3605 - 3606 - /* Point to the properties for octeon device to which this 3607 - * interface belongs. 3608 - */ 3609 - lio->oct_dev = octeon_dev; 3610 - lio->octprops = props; 3611 - lio->netdev = netdev; 3612 3613 dev_dbg(&octeon_dev->pci_dev->dev, 3614 "if%d gmx: %d hw_addr: 0x%llx\n", i, ··· 3750 if (!devlink) { 3751 device_unlock(&octeon_dev->pci_dev->dev); 3752 dev_err(&octeon_dev->pci_dev->dev, "devlink alloc failed\n"); 3753 goto setup_nic_dev_free; 3754 } 3755 ··· 3766 3767 setup_nic_dev_free: 3768 3769 - while (i--) { 3770 dev_err(&octeon_dev->pci_dev->dev, 3771 "NIC ifidx:%d Setup failed\n", i); 3772 liquidio_destroy_nic_device(octeon_dev, i); 3773 - } 3774 3775 setup_nic_dev_done: 3776
··· 3505 */ 3506 netdev->netdev_ops = &lionetdevops; 3507 3508 + lio = GET_LIO(netdev); 3509 + 3510 + memset(lio, 0, sizeof(struct lio)); 3511 + 3512 + lio->ifidx = ifidx_or_pfnum; 3513 + 3514 + props = &octeon_dev->props[i]; 3515 + props->gmxport = resp->cfg_info.linfo.gmxport; 3516 + props->netdev = netdev; 3517 + 3518 + /* Point to the properties for octeon device to which this 3519 + * interface belongs. 3520 + */ 3521 + lio->oct_dev = octeon_dev; 3522 + lio->octprops = props; 3523 + lio->netdev = netdev; 3524 + 3525 retval = netif_set_real_num_rx_queues(netdev, num_oqueues); 3526 if (retval) { 3527 dev_err(&octeon_dev->pci_dev->dev, ··· 3520 WRITE_ONCE(sc->caller_is_done, true); 3521 goto setup_nic_dev_free; 3522 } 3523 3524 lio->linfo.num_rxpciq = num_oqueues; 3525 lio->linfo.num_txpciq = num_iqueues; ··· 3595 /* MTU range: 68 - 16000 */ 3596 netdev->min_mtu = LIO_MIN_MTU_SIZE; 3597 netdev->max_mtu = LIO_MAX_MTU_SIZE; 3598 3599 dev_dbg(&octeon_dev->pci_dev->dev, 3600 "if%d gmx: %d hw_addr: 0x%llx\n", i, ··· 3750 if (!devlink) { 3751 device_unlock(&octeon_dev->pci_dev->dev); 3752 dev_err(&octeon_dev->pci_dev->dev, "devlink alloc failed\n"); 3753 + i--; 3754 goto setup_nic_dev_free; 3755 } 3756 ··· 3765 3766 setup_nic_dev_free: 3767 3768 + do { 3769 dev_err(&octeon_dev->pci_dev->dev, 3770 "NIC ifidx:%d Setup failed\n", i); 3771 liquidio_destroy_nic_device(octeon_dev, i); 3772 + } while (i--); 3773 3774 setup_nic_dev_done: 3775
+2 -2
drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
··· 2212 2213 setup_nic_dev_free: 2214 2215 - while (i--) { 2216 dev_err(&octeon_dev->pci_dev->dev, 2217 "NIC ifidx:%d Setup failed\n", i); 2218 liquidio_destroy_nic_device(octeon_dev, i); 2219 - } 2220 2221 setup_nic_dev_done: 2222
··· 2212 2213 setup_nic_dev_free: 2214 2215 + do { 2216 dev_err(&octeon_dev->pci_dev->dev, 2217 "NIC ifidx:%d Setup failed\n", i); 2218 liquidio_destroy_nic_device(octeon_dev, i); 2219 + } while (i--); 2220 2221 setup_nic_dev_done: 2222
+10
drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
··· 1531 } 1532 1533 if_id = (status & 0xFFFF0000) >> 16; 1534 port_priv = ethsw->ports[if_id]; 1535 1536 if (status & DPSW_IRQ_EVENT_LINK_CHANGED) ··· 3025 &ethsw->sw_attr); 3026 if (err) { 3027 dev_err(dev, "dpsw_get_attributes err %d\n", err); 3028 goto err_close; 3029 } 3030
··· 1531 } 1532 1533 if_id = (status & 0xFFFF0000) >> 16; 1534 + if (if_id >= ethsw->sw_attr.num_ifs) { 1535 + dev_err(dev, "Invalid if_id %d in IRQ status\n", if_id); 1536 + goto out; 1537 + } 1538 port_priv = ethsw->ports[if_id]; 1539 1540 if (status & DPSW_IRQ_EVENT_LINK_CHANGED) ··· 3021 &ethsw->sw_attr); 3022 if (err) { 3023 dev_err(dev, "dpsw_get_attributes err %d\n", err); 3024 + goto err_close; 3025 + } 3026 + 3027 + if (!ethsw->sw_attr.num_ifs) { 3028 + dev_err(dev, "DPSW device has no interfaces\n"); 3029 + err = -ENODEV; 3030 goto err_close; 3031 } 3032
+7 -4
drivers/net/ethernet/freescale/enetc/enetc.c
··· 2512 struct enetc_hw *hw = &si->hw; 2513 int err; 2514 2515 - /* set SI cache attributes */ 2516 - enetc_wr(hw, ENETC_SICAR0, 2517 - ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT); 2518 - enetc_wr(hw, ENETC_SICAR1, ENETC_SICAR_MSI); 2519 /* enable SI */ 2520 enetc_wr(hw, ENETC_SIMR, ENETC_SIMR_EN); 2521
··· 2512 struct enetc_hw *hw = &si->hw; 2513 int err; 2514 2515 + if (is_enetc_rev1(si)) { 2516 + /* set SI cache attributes */ 2517 + enetc_wr(hw, ENETC_SICAR0, 2518 + ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT); 2519 + enetc_wr(hw, ENETC_SICAR1, ENETC_SICAR_MSI); 2520 + } 2521 + 2522 /* enable SI */ 2523 enetc_wr(hw, ENETC_SIMR, ENETC_SIMR_EN); 2524
+3 -3
drivers/net/ethernet/freescale/enetc/enetc4_pf.c
··· 59 60 if (si != 0) { 61 __raw_writel(upper, hw->port + ENETC4_PSIPMAR0(si)); 62 - __raw_writew(lower, hw->port + ENETC4_PSIPMAR1(si)); 63 } else { 64 __raw_writel(upper, hw->port + ENETC4_PMAR0); 65 - __raw_writew(lower, hw->port + ENETC4_PMAR1); 66 } 67 } 68 ··· 73 u16 lower; 74 75 upper = __raw_readl(hw->port + ENETC4_PSIPMAR0(si)); 76 - lower = __raw_readw(hw->port + ENETC4_PSIPMAR1(si)); 77 78 put_unaligned_le32(upper, addr); 79 put_unaligned_le16(lower, addr + 4);
··· 59 60 if (si != 0) { 61 __raw_writel(upper, hw->port + ENETC4_PSIPMAR0(si)); 62 + __raw_writel(lower, hw->port + ENETC4_PSIPMAR1(si)); 63 } else { 64 __raw_writel(upper, hw->port + ENETC4_PMAR0); 65 + __raw_writel(lower, hw->port + ENETC4_PMAR1); 66 } 67 } 68 ··· 73 u16 lower; 74 75 upper = __raw_readl(hw->port + ENETC4_PSIPMAR0(si)); 76 + lower = __raw_readl(hw->port + ENETC4_PSIPMAR1(si)); 77 78 put_unaligned_le32(upper, addr); 79 put_unaligned_le16(lower, addr + 4);
-4
drivers/net/ethernet/freescale/enetc/enetc_cbdr.c
··· 74 if (!user->ring) 75 return -ENOMEM; 76 77 - /* set CBDR cache attributes */ 78 - enetc_wr(hw, ENETC_SICAR2, 79 - ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT); 80 - 81 regs.pir = hw->reg + ENETC_SICBDRPIR; 82 regs.cir = hw->reg + ENETC_SICBDRCIR; 83 regs.mr = hw->reg + ENETC_SICBDRMR;
··· 74 if (!user->ring) 75 return -ENOMEM; 76 77 regs.pir = hw->reg + ENETC_SICBDRPIR; 78 regs.cir = hw->reg + ENETC_SICBDRCIR; 79 regs.mr = hw->reg + ENETC_SICBDRMR;
+14 -3
drivers/net/ethernet/freescale/enetc/enetc_hw.h
··· 708 #define ENETC_RFSE_EN BIT(15) 709 #define ENETC_RFSE_MODE_BD 2 710 711 static inline void enetc_load_primary_mac_addr(struct enetc_hw *hw, 712 struct net_device *ndev) 713 { 714 - u8 addr[ETH_ALEN] __aligned(4); 715 716 - *(u32 *)addr = __raw_readl(hw->reg + ENETC_SIPMAR0); 717 - *(u16 *)(addr + 4) = __raw_readw(hw->reg + ENETC_SIPMAR1); 718 eth_hw_addr_set(ndev, addr); 719 } 720
··· 708 #define ENETC_RFSE_EN BIT(15) 709 #define ENETC_RFSE_MODE_BD 2 710 711 + static inline void enetc_get_primary_mac_addr(struct enetc_hw *hw, u8 *addr) 712 + { 713 + u32 upper; 714 + u16 lower; 715 + 716 + upper = __raw_readl(hw->reg + ENETC_SIPMAR0); 717 + lower = __raw_readl(hw->reg + ENETC_SIPMAR1); 718 + 719 + put_unaligned_le32(upper, addr); 720 + put_unaligned_le16(lower, addr + 4); 721 + } 722 + 723 static inline void enetc_load_primary_mac_addr(struct enetc_hw *hw, 724 struct net_device *ndev) 725 { 726 + u8 addr[ETH_ALEN]; 727 728 + enetc_get_primary_mac_addr(hw, addr); 729 eth_hw_addr_set(ndev, addr); 730 } 731
+51 -26
drivers/net/ethernet/google/gve/gve_ethtool.c
··· 152 u64 tmp_rx_pkts, tmp_rx_hsplit_pkt, tmp_rx_bytes, tmp_rx_hsplit_bytes, 153 tmp_rx_skb_alloc_fail, tmp_rx_buf_alloc_fail, 154 tmp_rx_desc_err_dropped_pkt, tmp_rx_hsplit_unsplit_pkt, 155 - tmp_tx_pkts, tmp_tx_bytes; 156 u64 rx_buf_alloc_fail, rx_desc_err_dropped_pkt, rx_hsplit_unsplit_pkt, 157 rx_pkts, rx_hsplit_pkt, rx_skb_alloc_fail, rx_bytes, tx_pkts, tx_bytes, 158 - tx_dropped; 159 - int stats_idx, base_stats_idx, max_stats_idx; 160 struct stats *report_stats; 161 int *rx_qid_to_stats_idx; 162 int *tx_qid_to_stats_idx; ··· 200 for (rx_pkts = 0, rx_bytes = 0, rx_hsplit_pkt = 0, 201 rx_skb_alloc_fail = 0, rx_buf_alloc_fail = 0, 202 rx_desc_err_dropped_pkt = 0, rx_hsplit_unsplit_pkt = 0, 203 ring = 0; 204 ring < priv->rx_cfg.num_queues; ring++) { 205 if (priv->rx) { ··· 218 rx->rx_desc_err_dropped_pkt; 219 tmp_rx_hsplit_unsplit_pkt = 220 rx->rx_hsplit_unsplit_pkt; 221 } while (u64_stats_fetch_retry(&priv->rx[ring].statss, 222 start)); 223 rx_pkts += tmp_rx_pkts; ··· 230 rx_buf_alloc_fail += tmp_rx_buf_alloc_fail; 231 rx_desc_err_dropped_pkt += tmp_rx_desc_err_dropped_pkt; 232 rx_hsplit_unsplit_pkt += tmp_rx_hsplit_unsplit_pkt; 233 } 234 } 235 for (tx_pkts = 0, tx_bytes = 0, tx_dropped = 0, ring = 0; ··· 257 data[i++] = rx_bytes; 258 data[i++] = tx_bytes; 259 /* total rx dropped packets */ 260 - data[i++] = rx_skb_alloc_fail + rx_buf_alloc_fail + 261 - rx_desc_err_dropped_pkt; 262 data[i++] = tx_dropped; 263 data[i++] = priv->tx_timeo_cnt; 264 data[i++] = rx_skb_alloc_fail; ··· 273 data[i++] = priv->stats_report_trigger_cnt; 274 i = GVE_MAIN_STATS_LEN; 275 276 - /* For rx cross-reporting stats, start from nic rx stats in report */ 277 - base_stats_idx = GVE_TX_STATS_REPORT_NUM * num_tx_queues + 278 - GVE_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues; 279 - /* The boundary between driver stats and NIC stats shifts if there are 280 - * stopped queues. 281 - */ 282 - base_stats_idx += NIC_RX_STATS_REPORT_NUM * num_stopped_rxqs + 283 - NIC_TX_STATS_REPORT_NUM * num_stopped_txqs; 284 - max_stats_idx = NIC_RX_STATS_REPORT_NUM * 285 - (priv->rx_cfg.num_queues - num_stopped_rxqs) + 286 - base_stats_idx; 287 /* Preprocess the stats report for rx, map queue id to start index */ 288 skip_nic_stats = false; 289 - for (stats_idx = base_stats_idx; stats_idx < max_stats_idx; 290 stats_idx += NIC_RX_STATS_REPORT_NUM) { 291 u32 stat_name = be32_to_cpu(report_stats[stats_idx].stat_name); 292 u32 queue_id = be32_to_cpu(report_stats[stats_idx].queue_id); ··· 337 tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail; 338 tmp_rx_desc_err_dropped_pkt = 339 rx->rx_desc_err_dropped_pkt; 340 } while (u64_stats_fetch_retry(&priv->rx[ring].statss, 341 start)); 342 data[i++] = tmp_rx_bytes; ··· 350 data[i++] = rx->rx_frag_alloc_cnt; 351 /* rx dropped packets */ 352 data[i++] = tmp_rx_skb_alloc_fail + 353 - tmp_rx_buf_alloc_fail + 354 - tmp_rx_desc_err_dropped_pkt; 355 data[i++] = rx->rx_copybreak_pkt; 356 data[i++] = rx->rx_copied_pkt; 357 /* stats from NIC */ ··· 384 i += priv->rx_cfg.num_queues * NUM_GVE_RX_CNTS; 385 } 386 387 - /* For tx cross-reporting stats, start from nic tx stats in report */ 388 - base_stats_idx = max_stats_idx; 389 - max_stats_idx = NIC_TX_STATS_REPORT_NUM * 390 - (num_tx_queues - num_stopped_txqs) + 391 - max_stats_idx; 392 - /* Preprocess the stats report for tx, map queue id to start index */ 393 skip_nic_stats = false; 394 - for (stats_idx = base_stats_idx; stats_idx < max_stats_idx; 395 stats_idx += NIC_TX_STATS_REPORT_NUM) { 396 u32 stat_name = be32_to_cpu(report_stats[stats_idx].stat_name); 397 u32 queue_id = be32_to_cpu(report_stats[stats_idx].queue_id);
··· 152 u64 tmp_rx_pkts, tmp_rx_hsplit_pkt, tmp_rx_bytes, tmp_rx_hsplit_bytes, 153 tmp_rx_skb_alloc_fail, tmp_rx_buf_alloc_fail, 154 tmp_rx_desc_err_dropped_pkt, tmp_rx_hsplit_unsplit_pkt, 155 + tmp_tx_pkts, tmp_tx_bytes, 156 + tmp_xdp_tx_errors, tmp_xdp_redirect_errors; 157 u64 rx_buf_alloc_fail, rx_desc_err_dropped_pkt, rx_hsplit_unsplit_pkt, 158 rx_pkts, rx_hsplit_pkt, rx_skb_alloc_fail, rx_bytes, tx_pkts, tx_bytes, 159 + tx_dropped, xdp_tx_errors, xdp_redirect_errors; 160 + int rx_base_stats_idx, max_rx_stats_idx, max_tx_stats_idx; 161 + int stats_idx, stats_region_len, nic_stats_len; 162 struct stats *report_stats; 163 int *rx_qid_to_stats_idx; 164 int *tx_qid_to_stats_idx; ··· 198 for (rx_pkts = 0, rx_bytes = 0, rx_hsplit_pkt = 0, 199 rx_skb_alloc_fail = 0, rx_buf_alloc_fail = 0, 200 rx_desc_err_dropped_pkt = 0, rx_hsplit_unsplit_pkt = 0, 201 + xdp_tx_errors = 0, xdp_redirect_errors = 0, 202 ring = 0; 203 ring < priv->rx_cfg.num_queues; ring++) { 204 if (priv->rx) { ··· 215 rx->rx_desc_err_dropped_pkt; 216 tmp_rx_hsplit_unsplit_pkt = 217 rx->rx_hsplit_unsplit_pkt; 218 + tmp_xdp_tx_errors = rx->xdp_tx_errors; 219 + tmp_xdp_redirect_errors = 220 + rx->xdp_redirect_errors; 221 } while (u64_stats_fetch_retry(&priv->rx[ring].statss, 222 start)); 223 rx_pkts += tmp_rx_pkts; ··· 224 rx_buf_alloc_fail += tmp_rx_buf_alloc_fail; 225 rx_desc_err_dropped_pkt += tmp_rx_desc_err_dropped_pkt; 226 rx_hsplit_unsplit_pkt += tmp_rx_hsplit_unsplit_pkt; 227 + xdp_tx_errors += tmp_xdp_tx_errors; 228 + xdp_redirect_errors += tmp_xdp_redirect_errors; 229 } 230 } 231 for (tx_pkts = 0, tx_bytes = 0, tx_dropped = 0, ring = 0; ··· 249 data[i++] = rx_bytes; 250 data[i++] = tx_bytes; 251 /* total rx dropped packets */ 252 + data[i++] = rx_skb_alloc_fail + rx_desc_err_dropped_pkt + 253 + xdp_tx_errors + xdp_redirect_errors; 254 data[i++] = tx_dropped; 255 data[i++] = priv->tx_timeo_cnt; 256 data[i++] = rx_skb_alloc_fail; ··· 265 data[i++] = priv->stats_report_trigger_cnt; 266 i = GVE_MAIN_STATS_LEN; 267 268 + rx_base_stats_idx = 0; 269 + max_rx_stats_idx = 0; 270 + max_tx_stats_idx = 0; 271 + stats_region_len = priv->stats_report_len - 272 + sizeof(struct gve_stats_report); 273 + nic_stats_len = (NIC_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues + 274 + NIC_TX_STATS_REPORT_NUM * num_tx_queues) * sizeof(struct stats); 275 + if (unlikely((stats_region_len - 276 + nic_stats_len) % sizeof(struct stats))) { 277 + net_err_ratelimited("Starting index of NIC stats should be multiple of stats size"); 278 + } else { 279 + /* For rx cross-reporting stats, 280 + * start from nic rx stats in report 281 + */ 282 + rx_base_stats_idx = (stats_region_len - nic_stats_len) / 283 + sizeof(struct stats); 284 + /* The boundary between driver stats and NIC stats 285 + * shifts if there are stopped queues 286 + */ 287 + rx_base_stats_idx += NIC_RX_STATS_REPORT_NUM * 288 + num_stopped_rxqs + NIC_TX_STATS_REPORT_NUM * 289 + num_stopped_txqs; 290 + max_rx_stats_idx = NIC_RX_STATS_REPORT_NUM * 291 + (priv->rx_cfg.num_queues - num_stopped_rxqs) + 292 + rx_base_stats_idx; 293 + max_tx_stats_idx = NIC_TX_STATS_REPORT_NUM * 294 + (num_tx_queues - num_stopped_txqs) + 295 + max_rx_stats_idx; 296 + } 297 /* Preprocess the stats report for rx, map queue id to start index */ 298 skip_nic_stats = false; 299 + for (stats_idx = rx_base_stats_idx; stats_idx < max_rx_stats_idx; 300 stats_idx += NIC_RX_STATS_REPORT_NUM) { 301 u32 stat_name = be32_to_cpu(report_stats[stats_idx].stat_name); 302 u32 queue_id = be32_to_cpu(report_stats[stats_idx].queue_id); ··· 311 tmp_rx_buf_alloc_fail = rx->rx_buf_alloc_fail; 312 tmp_rx_desc_err_dropped_pkt = 313 rx->rx_desc_err_dropped_pkt; 314 + tmp_xdp_tx_errors = rx->xdp_tx_errors; 315 + tmp_xdp_redirect_errors = 316 + rx->xdp_redirect_errors; 317 } while (u64_stats_fetch_retry(&priv->rx[ring].statss, 318 start)); 319 data[i++] = tmp_rx_bytes; ··· 321 data[i++] = rx->rx_frag_alloc_cnt; 322 /* rx dropped packets */ 323 data[i++] = tmp_rx_skb_alloc_fail + 324 + tmp_rx_desc_err_dropped_pkt + 325 + tmp_xdp_tx_errors + 326 + tmp_xdp_redirect_errors; 327 data[i++] = rx->rx_copybreak_pkt; 328 data[i++] = rx->rx_copied_pkt; 329 /* stats from NIC */ ··· 354 i += priv->rx_cfg.num_queues * NUM_GVE_RX_CNTS; 355 } 356 357 skip_nic_stats = false; 358 + /* NIC TX stats start right after NIC RX stats */ 359 + for (stats_idx = max_rx_stats_idx; stats_idx < max_tx_stats_idx; 360 stats_idx += NIC_TX_STATS_REPORT_NUM) { 361 u32 stat_name = be32_to_cpu(report_stats[stats_idx].stat_name); 362 u32 queue_id = be32_to_cpu(report_stats[stats_idx].queue_id);
+2 -2
drivers/net/ethernet/google/gve/gve_main.c
··· 283 int tx_stats_num, rx_stats_num; 284 285 tx_stats_num = (GVE_TX_STATS_REPORT_NUM + NIC_TX_STATS_REPORT_NUM) * 286 - gve_num_tx_queues(priv); 287 rx_stats_num = (GVE_RX_STATS_REPORT_NUM + NIC_RX_STATS_REPORT_NUM) * 288 - priv->rx_cfg.num_queues; 289 priv->stats_report_len = struct_size(priv->stats_report, stats, 290 size_add(tx_stats_num, rx_stats_num)); 291 priv->stats_report =
··· 283 int tx_stats_num, rx_stats_num; 284 285 tx_stats_num = (GVE_TX_STATS_REPORT_NUM + NIC_TX_STATS_REPORT_NUM) * 286 + priv->tx_cfg.max_queues; 287 rx_stats_num = (GVE_RX_STATS_REPORT_NUM + NIC_RX_STATS_REPORT_NUM) * 288 + priv->rx_cfg.max_queues; 289 priv->stats_report_len = struct_size(priv->stats_report, stats, 290 size_add(tx_stats_num, rx_stats_num)); 291 priv->stats_report =
-1
drivers/net/ethernet/intel/i40e/i40e_main.c
··· 9030 TCP_FLAG_FIN | 9031 TCP_FLAG_CWR) >> 16); 9032 wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16); 9033 - udp_tunnel_get_rx_info(netdev); 9034 9035 return 0; 9036 }
··· 9030 TCP_FLAG_FIN | 9031 TCP_FLAG_CWR) >> 16); 9032 wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16); 9033 9034 return 0; 9035 }
+14 -12
drivers/net/ethernet/intel/ice/ice_main.c
··· 3314 if (ice_is_reset_in_progress(pf->state)) 3315 goto skip_irq; 3316 3317 - if (test_and_clear_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread)) { 3318 - /* Process outstanding Tx timestamps. If there is more work, 3319 - * re-arm the interrupt to trigger again. 3320 - */ 3321 - if (ice_ptp_process_ts(pf) == ICE_TX_TSTAMP_WORK_PENDING) { 3322 - wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M); 3323 - ice_flush(hw); 3324 - } 3325 - } 3326 3327 skip_irq: 3328 ice_irq_dynamic_ena(hw, NULL, NULL); 3329 3330 return IRQ_HANDLED; 3331 } ··· 7809 7810 /* Restore timestamp mode settings after VSI rebuild */ 7811 ice_ptp_restore_timestamp_mode(pf); 7812 return; 7813 7814 err_vsi_rebuild: ··· 9661 if (err) 9662 netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n", 9663 vsi->vsi_num, vsi->vsw->sw_id); 9664 - 9665 - /* Update existing tunnels information */ 9666 - udp_tunnel_get_rx_info(netdev); 9667 9668 return err; 9669 }
··· 3314 if (ice_is_reset_in_progress(pf->state)) 3315 goto skip_irq; 3316 3317 + if (test_and_clear_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread)) 3318 + ice_ptp_process_ts(pf); 3319 3320 skip_irq: 3321 ice_irq_dynamic_ena(hw, NULL, NULL); 3322 + ice_flush(hw); 3323 + 3324 + if (ice_ptp_tx_tstamps_pending(pf)) { 3325 + /* If any new Tx timestamps happened while in interrupt, 3326 + * re-arm the interrupt to trigger it again. 3327 + */ 3328 + wr32(hw, PFINT_OICR, PFINT_OICR_TSYN_TX_M); 3329 + ice_flush(hw); 3330 + } 3331 3332 return IRQ_HANDLED; 3333 } ··· 7807 7808 /* Restore timestamp mode settings after VSI rebuild */ 7809 ice_ptp_restore_timestamp_mode(pf); 7810 + 7811 + /* Start PTP periodic work after VSI is fully rebuilt */ 7812 + ice_ptp_queue_work(pf); 7813 return; 7814 7815 err_vsi_rebuild: ··· 9656 if (err) 9657 netdev_err(netdev, "Failed to open VSI 0x%04X on switch 0x%04X\n", 9658 vsi->vsi_num, vsi->vsw->sw_id); 9659 9660 return err; 9661 }
+109 -70
drivers/net/ethernet/intel/ice/ice_ptp.c
··· 573 pf = ptp_port_to_pf(ptp_port); 574 hw = &pf->hw; 575 576 /* Read the Tx ready status first */ 577 if (tx->has_ready_bitmap) { 578 err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready); ··· 677 pf->ptp.tx_hwtstamp_good += tstamp_good; 678 } 679 680 - /** 681 - * ice_ptp_tx_tstamp_owner - Process Tx timestamps for all ports on the device 682 - * @pf: Board private structure 683 - */ 684 - static enum ice_tx_tstamp_work ice_ptp_tx_tstamp_owner(struct ice_pf *pf) 685 { 686 struct ice_ptp_port *port; 687 - unsigned int i; 688 689 mutex_lock(&pf->adapter->ports.lock); 690 list_for_each_entry(port, &pf->adapter->ports.ports, list_node) { ··· 691 ice_ptp_process_tx_tstamp(tx); 692 } 693 mutex_unlock(&pf->adapter->ports.lock); 694 - 695 - for (i = 0; i < ICE_GET_QUAD_NUM(pf->hw.ptp.num_lports); i++) { 696 - u64 tstamp_ready; 697 - int err; 698 - 699 - /* Read the Tx ready status first */ 700 - err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready); 701 - if (err) 702 - break; 703 - else if (tstamp_ready) 704 - return ICE_TX_TSTAMP_WORK_PENDING; 705 - } 706 - 707 - return ICE_TX_TSTAMP_WORK_DONE; 708 - } 709 - 710 - /** 711 - * ice_ptp_tx_tstamp - Process Tx timestamps for this function. 712 - * @tx: Tx tracking structure to initialize 713 - * 714 - * Returns: ICE_TX_TSTAMP_WORK_PENDING if there are any outstanding incomplete 715 - * Tx timestamps, or ICE_TX_TSTAMP_WORK_DONE otherwise. 716 - */ 717 - static enum ice_tx_tstamp_work ice_ptp_tx_tstamp(struct ice_ptp_tx *tx) 718 - { 719 - bool more_timestamps; 720 - unsigned long flags; 721 - 722 - if (!tx->init) 723 - return ICE_TX_TSTAMP_WORK_DONE; 724 - 725 - /* Process the Tx timestamp tracker */ 726 - ice_ptp_process_tx_tstamp(tx); 727 - 728 - /* Check if there are outstanding Tx timestamps */ 729 - spin_lock_irqsave(&tx->lock, flags); 730 - more_timestamps = tx->init && !bitmap_empty(tx->in_use, tx->len); 731 - spin_unlock_irqrestore(&tx->lock, flags); 732 - 733 - if (more_timestamps) 734 - return ICE_TX_TSTAMP_WORK_PENDING; 735 - 736 - return ICE_TX_TSTAMP_WORK_DONE; 737 } 738 739 /** ··· 1302 /* Do not reconfigure E810 or E830 PHY */ 1303 return; 1304 case ICE_MAC_GENERIC: 1305 - case ICE_MAC_GENERIC_3K_E825: 1306 ice_ptp_port_phy_restart(ptp_port); 1307 return; 1308 default: 1309 dev_warn(ice_pf_to_dev(pf), "%s: Unknown PHY type\n", __func__); ··· 2621 return idx + tx->offset; 2622 } 2623 2624 - /** 2625 - * ice_ptp_process_ts - Process the PTP Tx timestamps 2626 - * @pf: Board private structure 2627 - * 2628 - * Returns: ICE_TX_TSTAMP_WORK_PENDING if there are any outstanding Tx 2629 - * timestamps that need processing, and ICE_TX_TSTAMP_WORK_DONE otherwise. 2630 - */ 2631 - enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf) 2632 { 2633 switch (pf->ptp.tx_interrupt_mode) { 2634 case ICE_PTP_TX_INTERRUPT_NONE: 2635 /* This device has the clock owner handle timestamps for it */ 2636 - return ICE_TX_TSTAMP_WORK_DONE; 2637 case ICE_PTP_TX_INTERRUPT_SELF: 2638 /* This device handles its own timestamps */ 2639 - return ice_ptp_tx_tstamp(&pf->ptp.port.tx); 2640 case ICE_PTP_TX_INTERRUPT_ALL: 2641 /* This device handles timestamps for all ports */ 2642 - return ice_ptp_tx_tstamp_owner(pf); 2643 default: 2644 WARN_ONCE(1, "Unexpected Tx timestamp interrupt mode %u\n", 2645 pf->ptp.tx_interrupt_mode); 2646 - return ICE_TX_TSTAMP_WORK_DONE; 2647 } 2648 } 2649 2650 /** ··· 2758 return IRQ_WAKE_THREAD; 2759 case ICE_MAC_E830: 2760 /* E830 can read timestamps in the top half using rd32() */ 2761 - if (ice_ptp_process_ts(pf) == ICE_TX_TSTAMP_WORK_PENDING) { 2762 /* Process outstanding Tx timestamps. If there 2763 * is more work, re-arm the interrupt to trigger again. 2764 */ ··· 2840 } 2841 2842 /** 2843 * ice_ptp_prepare_rebuild_sec - Prepare second NAC for PTP reset or rebuild 2844 * @pf: Board private structure 2845 * @rebuild: rebuild if true, prepare if false ··· 2871 struct ice_pf *peer_pf = ptp_port_to_pf(port); 2872 2873 if (!ice_is_primary(&peer_pf->hw)) { 2874 - if (rebuild) 2875 ice_ptp_rebuild(peer_pf, reset_type); 2876 - else 2877 ice_ptp_prepare_for_reset(peer_pf, reset_type); 2878 } 2879 } 2880 } ··· 3024 } 3025 3026 ptp->state = ICE_PTP_READY; 3027 - 3028 - /* Start periodic work going */ 3029 - kthread_queue_delayed_work(ptp->kworker, &ptp->work, 0); 3030 3031 dev_info(ice_pf_to_dev(pf), "PTP reset successful\n"); 3032 return; ··· 3229 { 3230 switch (pf->hw.mac_type) { 3231 case ICE_MAC_GENERIC: 3232 - /* E822 based PHY has the clock owner process the interrupt 3233 - * for all ports. 3234 */ 3235 if (ice_pf_src_tmr_owned(pf)) 3236 pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_ALL;
··· 573 pf = ptp_port_to_pf(ptp_port); 574 hw = &pf->hw; 575 576 + if (!tx->init) 577 + return; 578 + 579 /* Read the Tx ready status first */ 580 if (tx->has_ready_bitmap) { 581 err = ice_get_phy_tx_tstamp_ready(hw, tx->block, &tstamp_ready); ··· 674 pf->ptp.tx_hwtstamp_good += tstamp_good; 675 } 676 677 + static void ice_ptp_tx_tstamp_owner(struct ice_pf *pf) 678 { 679 struct ice_ptp_port *port; 680 681 mutex_lock(&pf->adapter->ports.lock); 682 list_for_each_entry(port, &pf->adapter->ports.ports, list_node) { ··· 693 ice_ptp_process_tx_tstamp(tx); 694 } 695 mutex_unlock(&pf->adapter->ports.lock); 696 } 697 698 /** ··· 1347 /* Do not reconfigure E810 or E830 PHY */ 1348 return; 1349 case ICE_MAC_GENERIC: 1350 ice_ptp_port_phy_restart(ptp_port); 1351 + return; 1352 + case ICE_MAC_GENERIC_3K_E825: 1353 + if (linkup) 1354 + ice_ptp_port_phy_restart(ptp_port); 1355 return; 1356 default: 1357 dev_warn(ice_pf_to_dev(pf), "%s: Unknown PHY type\n", __func__); ··· 2663 return idx + tx->offset; 2664 } 2665 2666 + void ice_ptp_process_ts(struct ice_pf *pf) 2667 { 2668 switch (pf->ptp.tx_interrupt_mode) { 2669 case ICE_PTP_TX_INTERRUPT_NONE: 2670 /* This device has the clock owner handle timestamps for it */ 2671 + return; 2672 case ICE_PTP_TX_INTERRUPT_SELF: 2673 /* This device handles its own timestamps */ 2674 + ice_ptp_process_tx_tstamp(&pf->ptp.port.tx); 2675 + return; 2676 case ICE_PTP_TX_INTERRUPT_ALL: 2677 /* This device handles timestamps for all ports */ 2678 + ice_ptp_tx_tstamp_owner(pf); 2679 + return; 2680 default: 2681 WARN_ONCE(1, "Unexpected Tx timestamp interrupt mode %u\n", 2682 pf->ptp.tx_interrupt_mode); 2683 + return; 2684 } 2685 + } 2686 + 2687 + static bool ice_port_has_timestamps(struct ice_ptp_tx *tx) 2688 + { 2689 + bool more_timestamps; 2690 + 2691 + scoped_guard(spinlock_irqsave, &tx->lock) { 2692 + if (!tx->init) 2693 + return false; 2694 + 2695 + more_timestamps = !bitmap_empty(tx->in_use, tx->len); 2696 + } 2697 + 2698 + return more_timestamps; 2699 + } 2700 + 2701 + static bool ice_any_port_has_timestamps(struct ice_pf *pf) 2702 + { 2703 + struct ice_ptp_port *port; 2704 + 2705 + scoped_guard(mutex, &pf->adapter->ports.lock) { 2706 + list_for_each_entry(port, &pf->adapter->ports.ports, 2707 + list_node) { 2708 + struct ice_ptp_tx *tx = &port->tx; 2709 + 2710 + if (ice_port_has_timestamps(tx)) 2711 + return true; 2712 + } 2713 + } 2714 + 2715 + return false; 2716 + } 2717 + 2718 + bool ice_ptp_tx_tstamps_pending(struct ice_pf *pf) 2719 + { 2720 + struct ice_hw *hw = &pf->hw; 2721 + unsigned int i; 2722 + 2723 + /* Check software indicator */ 2724 + switch (pf->ptp.tx_interrupt_mode) { 2725 + case ICE_PTP_TX_INTERRUPT_NONE: 2726 + return false; 2727 + case ICE_PTP_TX_INTERRUPT_SELF: 2728 + if (ice_port_has_timestamps(&pf->ptp.port.tx)) 2729 + return true; 2730 + break; 2731 + case ICE_PTP_TX_INTERRUPT_ALL: 2732 + if (ice_any_port_has_timestamps(pf)) 2733 + return true; 2734 + break; 2735 + default: 2736 + WARN_ONCE(1, "Unexpected Tx timestamp interrupt mode %u\n", 2737 + pf->ptp.tx_interrupt_mode); 2738 + break; 2739 + } 2740 + 2741 + /* Check hardware indicator */ 2742 + for (i = 0; i < ICE_GET_QUAD_NUM(hw->ptp.num_lports); i++) { 2743 + u64 tstamp_ready = 0; 2744 + int err; 2745 + 2746 + err = ice_get_phy_tx_tstamp_ready(&pf->hw, i, &tstamp_ready); 2747 + if (err || tstamp_ready) 2748 + return true; 2749 + } 2750 + 2751 + return false; 2752 } 2753 2754 /** ··· 2738 return IRQ_WAKE_THREAD; 2739 case ICE_MAC_E830: 2740 /* E830 can read timestamps in the top half using rd32() */ 2741 + ice_ptp_process_ts(pf); 2742 + 2743 + if (ice_ptp_tx_tstamps_pending(pf)) { 2744 /* Process outstanding Tx timestamps. If there 2745 * is more work, re-arm the interrupt to trigger again. 2746 */ ··· 2818 } 2819 2820 /** 2821 + * ice_ptp_queue_work - Queue PTP periodic work for a PF 2822 + * @pf: Board private structure 2823 + * 2824 + * Helper function to queue PTP periodic work after VSI rebuild completes. 2825 + * This ensures that PTP work only runs when VSI structures are ready. 2826 + */ 2827 + void ice_ptp_queue_work(struct ice_pf *pf) 2828 + { 2829 + if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags) && 2830 + pf->ptp.state == ICE_PTP_READY) 2831 + kthread_queue_delayed_work(pf->ptp.kworker, &pf->ptp.work, 0); 2832 + } 2833 + 2834 + /** 2835 * ice_ptp_prepare_rebuild_sec - Prepare second NAC for PTP reset or rebuild 2836 * @pf: Board private structure 2837 * @rebuild: rebuild if true, prepare if false ··· 2835 struct ice_pf *peer_pf = ptp_port_to_pf(port); 2836 2837 if (!ice_is_primary(&peer_pf->hw)) { 2838 + if (rebuild) { 2839 + /* TODO: When implementing rebuild=true: 2840 + * 1. Ensure secondary PFs' VSIs are rebuilt 2841 + * 2. Call ice_ptp_queue_work(peer_pf) after VSI rebuild 2842 + */ 2843 ice_ptp_rebuild(peer_pf, reset_type); 2844 + } else { 2845 ice_ptp_prepare_for_reset(peer_pf, reset_type); 2846 + } 2847 } 2848 } 2849 } ··· 2983 } 2984 2985 ptp->state = ICE_PTP_READY; 2986 2987 dev_info(ice_pf_to_dev(pf), "PTP reset successful\n"); 2988 return; ··· 3191 { 3192 switch (pf->hw.mac_type) { 3193 case ICE_MAC_GENERIC: 3194 + case ICE_MAC_GENERIC_3K_E825: 3195 + /* E82x hardware has the clock owner process timestamps for 3196 + * all ports. 3197 */ 3198 if (ice_pf_src_tmr_owned(pf)) 3199 pf->ptp.tx_interrupt_mode = ICE_PTP_TX_INTERRUPT_ALL;
+13 -5
drivers/net/ethernet/intel/ice/ice_ptp.h
··· 304 s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb); 305 void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx); 306 void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx); 307 - enum ice_tx_tstamp_work ice_ptp_process_ts(struct ice_pf *pf); 308 irqreturn_t ice_ptp_ts_irq(struct ice_pf *pf); 309 u64 ice_ptp_read_src_clk_reg(struct ice_pf *pf, 310 struct ptp_system_timestamp *sts); 311 ··· 318 void ice_ptp_init(struct ice_pf *pf); 319 void ice_ptp_release(struct ice_pf *pf); 320 void ice_ptp_link_change(struct ice_pf *pf, bool linkup); 321 #else /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */ 322 323 static inline int ice_ptp_hwtstamp_get(struct net_device *netdev, ··· 347 348 static inline void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx) { } 349 350 - static inline bool ice_ptp_process_ts(struct ice_pf *pf) 351 - { 352 - return true; 353 - } 354 355 static inline irqreturn_t ice_ptp_ts_irq(struct ice_pf *pf) 356 { 357 return IRQ_HANDLED; 358 } 359 360 static inline u64 ice_ptp_read_src_clk_reg(struct ice_pf *pf, ··· 384 static inline void ice_ptp_init(struct ice_pf *pf) { } 385 static inline void ice_ptp_release(struct ice_pf *pf) { } 386 static inline void ice_ptp_link_change(struct ice_pf *pf, bool linkup) 387 { 388 } 389
··· 304 s8 ice_ptp_request_ts(struct ice_ptp_tx *tx, struct sk_buff *skb); 305 void ice_ptp_req_tx_single_tstamp(struct ice_ptp_tx *tx, u8 idx); 306 void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx); 307 + void ice_ptp_process_ts(struct ice_pf *pf); 308 irqreturn_t ice_ptp_ts_irq(struct ice_pf *pf); 309 + bool ice_ptp_tx_tstamps_pending(struct ice_pf *pf); 310 u64 ice_ptp_read_src_clk_reg(struct ice_pf *pf, 311 struct ptp_system_timestamp *sts); 312 ··· 317 void ice_ptp_init(struct ice_pf *pf); 318 void ice_ptp_release(struct ice_pf *pf); 319 void ice_ptp_link_change(struct ice_pf *pf, bool linkup); 320 + void ice_ptp_queue_work(struct ice_pf *pf); 321 #else /* IS_ENABLED(CONFIG_PTP_1588_CLOCK) */ 322 323 static inline int ice_ptp_hwtstamp_get(struct net_device *netdev, ··· 345 346 static inline void ice_ptp_complete_tx_single_tstamp(struct ice_ptp_tx *tx) { } 347 348 + static inline void ice_ptp_process_ts(struct ice_pf *pf) { } 349 350 static inline irqreturn_t ice_ptp_ts_irq(struct ice_pf *pf) 351 { 352 return IRQ_HANDLED; 353 + } 354 + 355 + static inline bool ice_ptp_tx_tstamps_pending(struct ice_pf *pf) 356 + { 357 + return false; 358 } 359 360 static inline u64 ice_ptp_read_src_clk_reg(struct ice_pf *pf, ··· 380 static inline void ice_ptp_init(struct ice_pf *pf) { } 381 static inline void ice_ptp_release(struct ice_pf *pf) { } 382 static inline void ice_ptp_link_change(struct ice_pf *pf, bool linkup) 383 + { 384 + } 385 + 386 + static inline void ice_ptp_queue_work(struct ice_pf *pf) 387 { 388 } 389
+15 -6
drivers/net/ethernet/spacemit/k1_emac.c
··· 12 #include <linux/dma-mapping.h> 13 #include <linux/etherdevice.h> 14 #include <linux/ethtool.h> 15 #include <linux/interrupt.h> 16 #include <linux/io.h> 17 #include <linux/iopoll.h> ··· 39 40 #define EMAC_DEFAULT_BUFSIZE 1536 41 #define EMAC_RX_BUF_2K 2048 42 - #define EMAC_RX_BUF_4K 4096 43 44 /* Tuning parameters from SpacemiT */ 45 #define EMAC_TX_FRAMES 64 ··· 203 { 204 /* Destination address for 802.3x Ethernet flow control */ 205 u8 fc_dest_addr[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x01 }; 206 - 207 - u32 rxirq = 0, dma = 0; 208 209 regmap_set_bits(priv->regmap_apmu, 210 priv->regmap_apmu_offset + APMU_EMAC_CTRL_REG, ··· 227 emac_wr(priv, MAC_TRANSMIT_PACKET_START_THRESHOLD, 228 DEFAULT_TX_THRESHOLD); 229 emac_wr(priv, MAC_RECEIVE_PACKET_START_THRESHOLD, DEFAULT_RX_THRESHOLD); 230 231 /* Configure flow control (enabled in emac_adjust_link() later) */ 232 emac_set_mac_addr_reg(priv, fc_dest_addr, MAC_FC_SOURCE_ADDRESS_HIGH); ··· 933 return -EBUSY; 934 } 935 936 - frame_len = mtu + ETH_HLEN + ETH_FCS_LEN; 937 938 if (frame_len <= EMAC_DEFAULT_BUFSIZE) 939 priv->dma_buf_sz = EMAC_DEFAULT_BUFSIZE; 940 else if (frame_len <= EMAC_RX_BUF_2K) 941 priv->dma_buf_sz = EMAC_RX_BUF_2K; 942 else 943 - priv->dma_buf_sz = EMAC_RX_BUF_4K; 944 945 ndev->mtu = mtu; 946 ··· 2034 ndev->hw_features = NETIF_F_SG; 2035 ndev->features |= ndev->hw_features; 2036 2037 - ndev->max_mtu = EMAC_RX_BUF_4K - (ETH_HLEN + ETH_FCS_LEN); 2038 ndev->pcpu_stat_type = NETDEV_PCPU_STAT_DSTATS; 2039 2040 priv = netdev_priv(ndev);
··· 12 #include <linux/dma-mapping.h> 13 #include <linux/etherdevice.h> 14 #include <linux/ethtool.h> 15 + #include <linux/if_vlan.h> 16 #include <linux/interrupt.h> 17 #include <linux/io.h> 18 #include <linux/iopoll.h> ··· 38 39 #define EMAC_DEFAULT_BUFSIZE 1536 40 #define EMAC_RX_BUF_2K 2048 41 + #define EMAC_RX_BUF_MAX FIELD_MAX(RX_DESC_1_BUFFER_SIZE_1_MASK) 42 43 /* Tuning parameters from SpacemiT */ 44 #define EMAC_TX_FRAMES 64 ··· 202 { 203 /* Destination address for 802.3x Ethernet flow control */ 204 u8 fc_dest_addr[ETH_ALEN] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x01 }; 205 + u32 rxirq = 0, dma = 0, frame_sz; 206 207 regmap_set_bits(priv->regmap_apmu, 208 priv->regmap_apmu_offset + APMU_EMAC_CTRL_REG, ··· 227 emac_wr(priv, MAC_TRANSMIT_PACKET_START_THRESHOLD, 228 DEFAULT_TX_THRESHOLD); 229 emac_wr(priv, MAC_RECEIVE_PACKET_START_THRESHOLD, DEFAULT_RX_THRESHOLD); 230 + 231 + /* Set maximum frame size and jabber size based on configured MTU, 232 + * accounting for Ethernet header, double VLAN tags, and FCS. 233 + */ 234 + frame_sz = priv->ndev->mtu + ETH_HLEN + 2 * VLAN_HLEN + ETH_FCS_LEN; 235 + 236 + emac_wr(priv, MAC_MAXIMUM_FRAME_SIZE, frame_sz); 237 + emac_wr(priv, MAC_TRANSMIT_JABBER_SIZE, frame_sz); 238 + emac_wr(priv, MAC_RECEIVE_JABBER_SIZE, frame_sz); 239 240 /* Configure flow control (enabled in emac_adjust_link() later) */ 241 emac_set_mac_addr_reg(priv, fc_dest_addr, MAC_FC_SOURCE_ADDRESS_HIGH); ··· 924 return -EBUSY; 925 } 926 927 + frame_len = mtu + ETH_HLEN + 2 * VLAN_HLEN + ETH_FCS_LEN; 928 929 if (frame_len <= EMAC_DEFAULT_BUFSIZE) 930 priv->dma_buf_sz = EMAC_DEFAULT_BUFSIZE; 931 else if (frame_len <= EMAC_RX_BUF_2K) 932 priv->dma_buf_sz = EMAC_RX_BUF_2K; 933 else 934 + priv->dma_buf_sz = EMAC_RX_BUF_MAX; 935 936 ndev->mtu = mtu; 937 ··· 2025 ndev->hw_features = NETIF_F_SG; 2026 ndev->features |= ndev->hw_features; 2027 2028 + ndev->max_mtu = EMAC_RX_BUF_MAX - (ETH_HLEN + 2 * VLAN_HLEN + ETH_FCS_LEN); 2029 ndev->pcpu_stat_type = NETDEV_PCPU_STAT_DSTATS; 2030 2031 priv = netdev_priv(ndev);
+2 -1
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
··· 8042 u32 chan; 8043 8044 if (!ndev || !netif_running(ndev)) 8045 - return 0; 8046 8047 mutex_lock(&priv->lock); 8048 ··· 8082 if (stmmac_fpe_supported(priv)) 8083 ethtool_mmsv_stop(&priv->fpe_cfg.mmsv); 8084 8085 if (priv->plat->suspend) 8086 return priv->plat->suspend(dev, priv->plat->bsp_priv); 8087
··· 8042 u32 chan; 8043 8044 if (!ndev || !netif_running(ndev)) 8045 + goto suspend_bsp; 8046 8047 mutex_lock(&priv->lock); 8048 ··· 8082 if (stmmac_fpe_supported(priv)) 8083 ethtool_mmsv_stop(&priv->fpe_cfg.mmsv); 8084 8085 + suspend_bsp: 8086 if (priv->plat->suspend) 8087 return priv->plat->suspend(dev, priv->plat->bsp_priv); 8088
+35 -6
drivers/net/ethernet/ti/cpsw.c
··· 305 return 0; 306 } 307 308 - static void cpsw_ndo_set_rx_mode(struct net_device *ndev) 309 { 310 - struct cpsw_priv *priv = netdev_priv(ndev); 311 struct cpsw_common *cpsw = priv->cpsw; 312 int slave_port = -1; 313 314 if (cpsw->data.dual_emac) 315 slave_port = priv->emac_port + 1; ··· 325 /* Enable promiscuous mode */ 326 cpsw_set_promiscious(ndev, true); 327 cpsw_ale_set_allmulti(cpsw->ale, IFF_ALLMULTI, slave_port); 328 - return; 329 } else { 330 /* Disable promiscuous mode */ 331 cpsw_set_promiscious(ndev, false); ··· 338 /* add/remove mcast address either for real netdev or for vlan */ 339 __hw_addr_ref_sync_dev(&ndev->mc, ndev, cpsw_add_mc_addr, 340 cpsw_del_mc_addr); 341 } 342 343 static unsigned int cpsw_rxbuf_total_len(unsigned int len) ··· 1491 priv_sl2->ndev = ndev; 1492 priv_sl2->dev = &ndev->dev; 1493 priv_sl2->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG); 1494 1495 if (is_valid_ether_addr(data->slave_data[1].mac_addr)) { 1496 memcpy(priv_sl2->mac_addr, data->slave_data[1].mac_addr, ··· 1673 priv->dev = dev; 1674 priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG); 1675 priv->emac_port = 0; 1676 1677 if (is_valid_ether_addr(data->slave_data[0].mac_addr)) { 1678 memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN); ··· 1779 static void cpsw_remove(struct platform_device *pdev) 1780 { 1781 struct cpsw_common *cpsw = platform_get_drvdata(pdev); 1782 int i, ret; 1783 1784 ret = pm_runtime_resume_and_get(&pdev->dev); ··· 1793 return; 1794 } 1795 1796 - for (i = 0; i < cpsw->data.slaves; i++) 1797 - if (cpsw->slaves[i].ndev) 1798 - unregister_netdev(cpsw->slaves[i].ndev); 1799 1800 cpts_release(cpsw->cpts); 1801 cpdma_ctlr_destroy(cpsw->dma);
··· 305 return 0; 306 } 307 308 + static void cpsw_ndo_set_rx_mode_work(struct work_struct *work) 309 { 310 + struct cpsw_priv *priv = container_of(work, struct cpsw_priv, rx_mode_work); 311 struct cpsw_common *cpsw = priv->cpsw; 312 + struct net_device *ndev = priv->ndev; 313 int slave_port = -1; 314 + 315 + rtnl_lock(); 316 + if (!netif_running(ndev)) 317 + goto unlock_rtnl; 318 + 319 + netif_addr_lock_bh(ndev); 320 321 if (cpsw->data.dual_emac) 322 slave_port = priv->emac_port + 1; ··· 318 /* Enable promiscuous mode */ 319 cpsw_set_promiscious(ndev, true); 320 cpsw_ale_set_allmulti(cpsw->ale, IFF_ALLMULTI, slave_port); 321 + goto unlock_addr; 322 } else { 323 /* Disable promiscuous mode */ 324 cpsw_set_promiscious(ndev, false); ··· 331 /* add/remove mcast address either for real netdev or for vlan */ 332 __hw_addr_ref_sync_dev(&ndev->mc, ndev, cpsw_add_mc_addr, 333 cpsw_del_mc_addr); 334 + 335 + unlock_addr: 336 + netif_addr_unlock_bh(ndev); 337 + unlock_rtnl: 338 + rtnl_unlock(); 339 + } 340 + 341 + static void cpsw_ndo_set_rx_mode(struct net_device *ndev) 342 + { 343 + struct cpsw_priv *priv = netdev_priv(ndev); 344 + 345 + schedule_work(&priv->rx_mode_work); 346 } 347 348 static unsigned int cpsw_rxbuf_total_len(unsigned int len) ··· 1472 priv_sl2->ndev = ndev; 1473 priv_sl2->dev = &ndev->dev; 1474 priv_sl2->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG); 1475 + INIT_WORK(&priv_sl2->rx_mode_work, cpsw_ndo_set_rx_mode_work); 1476 1477 if (is_valid_ether_addr(data->slave_data[1].mac_addr)) { 1478 memcpy(priv_sl2->mac_addr, data->slave_data[1].mac_addr, ··· 1653 priv->dev = dev; 1654 priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG); 1655 priv->emac_port = 0; 1656 + INIT_WORK(&priv->rx_mode_work, cpsw_ndo_set_rx_mode_work); 1657 1658 if (is_valid_ether_addr(data->slave_data[0].mac_addr)) { 1659 memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN); ··· 1758 static void cpsw_remove(struct platform_device *pdev) 1759 { 1760 struct cpsw_common *cpsw = platform_get_drvdata(pdev); 1761 + struct net_device *ndev; 1762 + struct cpsw_priv *priv; 1763 int i, ret; 1764 1765 ret = pm_runtime_resume_and_get(&pdev->dev); ··· 1770 return; 1771 } 1772 1773 + for (i = 0; i < cpsw->data.slaves; i++) { 1774 + ndev = cpsw->slaves[i].ndev; 1775 + if (!ndev) 1776 + continue; 1777 + 1778 + priv = netdev_priv(ndev); 1779 + unregister_netdev(ndev); 1780 + disable_work_sync(&priv->rx_mode_work); 1781 + } 1782 1783 cpts_release(cpsw->cpts); 1784 cpdma_ctlr_destroy(cpsw->dma);
+29 -5
drivers/net/ethernet/ti/cpsw_new.c
··· 248 return 0; 249 } 250 251 - static void cpsw_ndo_set_rx_mode(struct net_device *ndev) 252 { 253 - struct cpsw_priv *priv = netdev_priv(ndev); 254 struct cpsw_common *cpsw = priv->cpsw; 255 256 if (ndev->flags & IFF_PROMISC) { 257 /* Enable promiscuous mode */ 258 cpsw_set_promiscious(ndev, true); 259 cpsw_ale_set_allmulti(cpsw->ale, IFF_ALLMULTI, priv->emac_port); 260 - return; 261 } 262 263 /* Disable promiscuous mode */ ··· 276 /* add/remove mcast address either for real netdev or for vlan */ 277 __hw_addr_ref_sync_dev(&ndev->mc, ndev, cpsw_add_mc_addr, 278 cpsw_del_mc_addr); 279 } 280 281 static unsigned int cpsw_rxbuf_total_len(unsigned int len) ··· 1416 priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG); 1417 priv->emac_port = i + 1; 1418 priv->tx_packet_min = CPSW_MIN_PACKET_SIZE; 1419 1420 if (is_valid_ether_addr(slave_data->mac_addr)) { 1421 ether_addr_copy(priv->mac_addr, slave_data->mac_addr); ··· 1466 1467 static void cpsw_unregister_ports(struct cpsw_common *cpsw) 1468 { 1469 int i = 0; 1470 1471 for (i = 0; i < cpsw->data.slaves; i++) { 1472 - if (!cpsw->slaves[i].ndev) 1473 continue; 1474 1475 - unregister_netdev(cpsw->slaves[i].ndev); 1476 } 1477 } 1478
··· 248 return 0; 249 } 250 251 + static void cpsw_ndo_set_rx_mode_work(struct work_struct *work) 252 { 253 + struct cpsw_priv *priv = container_of(work, struct cpsw_priv, rx_mode_work); 254 struct cpsw_common *cpsw = priv->cpsw; 255 + struct net_device *ndev = priv->ndev; 256 257 + rtnl_lock(); 258 + if (!netif_running(ndev)) 259 + goto unlock_rtnl; 260 + 261 + netif_addr_lock_bh(ndev); 262 if (ndev->flags & IFF_PROMISC) { 263 /* Enable promiscuous mode */ 264 cpsw_set_promiscious(ndev, true); 265 cpsw_ale_set_allmulti(cpsw->ale, IFF_ALLMULTI, priv->emac_port); 266 + goto unlock_addr; 267 } 268 269 /* Disable promiscuous mode */ ··· 270 /* add/remove mcast address either for real netdev or for vlan */ 271 __hw_addr_ref_sync_dev(&ndev->mc, ndev, cpsw_add_mc_addr, 272 cpsw_del_mc_addr); 273 + 274 + unlock_addr: 275 + netif_addr_unlock_bh(ndev); 276 + unlock_rtnl: 277 + rtnl_unlock(); 278 + } 279 + 280 + static void cpsw_ndo_set_rx_mode(struct net_device *ndev) 281 + { 282 + struct cpsw_priv *priv = netdev_priv(ndev); 283 + 284 + schedule_work(&priv->rx_mode_work); 285 } 286 287 static unsigned int cpsw_rxbuf_total_len(unsigned int len) ··· 1398 priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG); 1399 priv->emac_port = i + 1; 1400 priv->tx_packet_min = CPSW_MIN_PACKET_SIZE; 1401 + INIT_WORK(&priv->rx_mode_work, cpsw_ndo_set_rx_mode_work); 1402 1403 if (is_valid_ether_addr(slave_data->mac_addr)) { 1404 ether_addr_copy(priv->mac_addr, slave_data->mac_addr); ··· 1447 1448 static void cpsw_unregister_ports(struct cpsw_common *cpsw) 1449 { 1450 + struct net_device *ndev; 1451 + struct cpsw_priv *priv; 1452 int i = 0; 1453 1454 for (i = 0; i < cpsw->data.slaves; i++) { 1455 + ndev = cpsw->slaves[i].ndev; 1456 + if (!ndev) 1457 continue; 1458 1459 + priv = netdev_priv(ndev); 1460 + unregister_netdev(ndev); 1461 + disable_work_sync(&priv->rx_mode_work); 1462 } 1463 } 1464
+1
drivers/net/ethernet/ti/cpsw_priv.h
··· 391 u32 tx_packet_min; 392 struct cpsw_ale_ratelimit ale_bc_ratelimit; 393 struct cpsw_ale_ratelimit ale_mc_ratelimit; 394 }; 395 396 #define ndev_to_cpsw(ndev) (((struct cpsw_priv *)netdev_priv(ndev))->cpsw)
··· 391 u32 tx_packet_min; 392 struct cpsw_ale_ratelimit ale_bc_ratelimit; 393 struct cpsw_ale_ratelimit ale_mc_ratelimit; 394 + struct work_struct rx_mode_work; 395 }; 396 397 #define ndev_to_cpsw(ndev) (((struct cpsw_priv *)netdev_priv(ndev))->cpsw)
+3 -2
drivers/net/macvlan.c
··· 1567 /* the macvlan port may be freed by macvlan_uninit when fail to register. 1568 * so we destroy the macvlan port only when it's valid. 1569 */ 1570 - if (create && macvlan_port_get_rtnl(lowerdev)) { 1571 macvlan_flush_sources(port, vlan); 1572 - macvlan_port_destroy(port->dev); 1573 } 1574 return err; 1575 }
··· 1567 /* the macvlan port may be freed by macvlan_uninit when fail to register. 1568 * so we destroy the macvlan port only when it's valid. 1569 */ 1570 + if (macvlan_port_get_rtnl(lowerdev)) { 1571 macvlan_flush_sources(port, vlan); 1572 + if (create) 1573 + macvlan_port_destroy(port->dev); 1574 } 1575 return err; 1576 }
+2
drivers/net/phy/sfp.c
··· 479 linkmode_zero(caps->link_modes); 480 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, 481 caps->link_modes); 482 } 483 484 #define SFP_QUIRK(_v, _p, _s, _f) \
··· 479 linkmode_zero(caps->link_modes); 480 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, 481 caps->link_modes); 482 + phy_interface_zero(caps->interfaces); 483 + __set_bit(PHY_INTERFACE_MODE_1000BASEX, caps->interfaces); 484 } 485 486 #define SFP_QUIRK(_v, _p, _s, _f) \
+15 -14
drivers/net/usb/r8152.c
··· 8535 usb_submit_urb(tp->intr_urb, GFP_NOIO); 8536 } 8537 8538 - /* If the device is RTL8152_INACCESSIBLE here then we should do a 8539 - * reset. This is important because the usb_lock_device_for_reset() 8540 - * that happens as a result of usb_queue_reset_device() will silently 8541 - * fail if the device was suspended or if too much time passed. 8542 - * 8543 - * NOTE: The device is locked here so we can directly do the reset. 8544 - * We don't need usb_lock_device_for_reset() because that's just a 8545 - * wrapper over device_lock() and device_resume() (which calls us) 8546 - * does that for us. 8547 - */ 8548 - if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) 8549 - usb_reset_device(tp->udev); 8550 - 8551 return 0; 8552 } 8553 ··· 8645 static int rtl8152_resume(struct usb_interface *intf) 8646 { 8647 struct r8152 *tp = usb_get_intfdata(intf); 8648 int ret; 8649 8650 mutex_lock(&tp->control); 8651 8652 rtl_reset_ocp_base(tp); 8653 8654 - if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) 8655 ret = rtl8152_runtime_resume(tp); 8656 else 8657 ret = rtl8152_system_resume(tp); 8658 8659 mutex_unlock(&tp->control); 8660 8661 return ret; 8662 }
··· 8535 usb_submit_urb(tp->intr_urb, GFP_NOIO); 8536 } 8537 8538 return 0; 8539 } 8540 ··· 8658 static int rtl8152_resume(struct usb_interface *intf) 8659 { 8660 struct r8152 *tp = usb_get_intfdata(intf); 8661 + bool runtime_resume = test_bit(SELECTIVE_SUSPEND, &tp->flags); 8662 int ret; 8663 8664 mutex_lock(&tp->control); 8665 8666 rtl_reset_ocp_base(tp); 8667 8668 + if (runtime_resume) 8669 ret = rtl8152_runtime_resume(tp); 8670 else 8671 ret = rtl8152_system_resume(tp); 8672 8673 mutex_unlock(&tp->control); 8674 + 8675 + /* If the device is RTL8152_INACCESSIBLE here then we should do a 8676 + * reset. This is important because the usb_lock_device_for_reset() 8677 + * that happens as a result of usb_queue_reset_device() will silently 8678 + * fail if the device was suspended or if too much time passed. 8679 + * 8680 + * NOTE: The device is locked here so we can directly do the reset. 8681 + * We don't need usb_lock_device_for_reset() because that's just a 8682 + * wrapper over device_lock() and device_resume() (which calls us) 8683 + * does that for us. 8684 + */ 8685 + if (!runtime_resume && test_bit(RTL8152_INACCESSIBLE, &tp->flags)) 8686 + usb_reset_device(tp->udev); 8687 8688 return ret; 8689 }
-2
drivers/net/wireless/intel/iwlwifi/mld/iface.c
··· 55 56 ieee80211_iter_keys(mld->hw, vif, iwl_mld_cleanup_keys_iter, NULL); 57 58 - wiphy_delayed_work_cancel(mld->wiphy, &mld_vif->mlo_scan_start_wk); 59 - 60 CLEANUP_STRUCT(mld_vif); 61 } 62
··· 55 56 ieee80211_iter_keys(mld->hw, vif, iwl_mld_cleanup_keys_iter, NULL); 57 58 CLEANUP_STRUCT(mld_vif); 59 } 60
+2
drivers/net/wireless/intel/iwlwifi/mld/mac80211.c
··· 1759 wiphy_work_cancel(mld->wiphy, &mld_vif->emlsr.unblock_tpt_wk); 1760 wiphy_delayed_work_cancel(mld->wiphy, 1761 &mld_vif->emlsr.check_tpt_wk); 1762 1763 iwl_mld_reset_cca_40mhz_workaround(mld, vif); 1764 iwl_mld_smps_workaround(mld, vif, true);
··· 1759 wiphy_work_cancel(mld->wiphy, &mld_vif->emlsr.unblock_tpt_wk); 1760 wiphy_delayed_work_cancel(mld->wiphy, 1761 &mld_vif->emlsr.check_tpt_wk); 1762 + wiphy_delayed_work_cancel(mld->wiphy, 1763 + &mld_vif->mlo_scan_start_wk); 1764 1765 iwl_mld_reset_cca_40mhz_workaround(mld, vif); 1766 iwl_mld_smps_workaround(mld, vif, true);
+5 -1
drivers/net/wireless/intel/iwlwifi/mvm/d3.c
··· 1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 - * Copyright (C) 2012-2014, 2018-2025 Intel Corporation 4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH 5 * Copyright (C) 2016-2017 Intel Deutschland GmbH 6 */ ··· 3239 3240 IWL_DEBUG_WOWLAN(mvm, "Starting fast suspend flow\n"); 3241 3242 mvm->fast_resume = true; 3243 set_bit(IWL_MVM_STATUS_IN_D3, &mvm->status); 3244 ··· 3296 IWL_ERR(mvm, "Couldn't get the d3 notif %d\n", ret); 3297 mvm->trans->state = IWL_TRANS_NO_FW; 3298 } 3299 3300 out: 3301 clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
··· 1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 + * Copyright (C) 2012-2014, 2018-2026 Intel Corporation 4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH 5 * Copyright (C) 2016-2017 Intel Deutschland GmbH 6 */ ··· 3239 3240 IWL_DEBUG_WOWLAN(mvm, "Starting fast suspend flow\n"); 3241 3242 + iwl_mvm_pause_tcm(mvm, true); 3243 + 3244 mvm->fast_resume = true; 3245 set_bit(IWL_MVM_STATUS_IN_D3, &mvm->status); 3246 ··· 3294 IWL_ERR(mvm, "Couldn't get the d3 notif %d\n", ret); 3295 mvm->trans->state = IWL_TRANS_NO_FW; 3296 } 3297 + 3298 + iwl_mvm_resume_tcm(mvm); 3299 3300 out: 3301 clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status);
+12
include/linux/skbuff.h
··· 4301 skb_headlen(skb), buffer); 4302 } 4303 4304 static inline void * __must_check 4305 skb_pointer_if_linear(const struct sk_buff *skb, int offset, int len) 4306 {
··· 4301 skb_headlen(skb), buffer); 4302 } 4303 4304 + /* Variant of skb_header_pointer() where @offset is user-controlled 4305 + * and potentially negative. 4306 + */ 4307 + static inline void * __must_check 4308 + skb_header_pointer_careful(const struct sk_buff *skb, int offset, 4309 + int len, void *buffer) 4310 + { 4311 + if (unlikely(offset < 0 && -offset > skb_headroom(skb))) 4312 + return NULL; 4313 + return skb_header_pointer(skb, offset, len, buffer); 4314 + } 4315 + 4316 static inline void * __must_check 4317 skb_pointer_if_linear(const struct sk_buff *skb, int offset, int len) 4318 {
+4 -4
net/core/filter.c
··· 2289 2290 err = bpf_out_neigh_v6(net, skb, dev, nh); 2291 if (unlikely(net_xmit_eval(err))) 2292 - DEV_STATS_INC(dev, tx_errors); 2293 else 2294 ret = NET_XMIT_SUCCESS; 2295 goto out_xmit; 2296 out_drop: 2297 - DEV_STATS_INC(dev, tx_errors); 2298 kfree_skb(skb); 2299 out_xmit: 2300 return ret; ··· 2396 2397 err = bpf_out_neigh_v4(net, skb, dev, nh); 2398 if (unlikely(net_xmit_eval(err))) 2399 - DEV_STATS_INC(dev, tx_errors); 2400 else 2401 ret = NET_XMIT_SUCCESS; 2402 goto out_xmit; 2403 out_drop: 2404 - DEV_STATS_INC(dev, tx_errors); 2405 kfree_skb(skb); 2406 out_xmit: 2407 return ret;
··· 2289 2290 err = bpf_out_neigh_v6(net, skb, dev, nh); 2291 if (unlikely(net_xmit_eval(err))) 2292 + dev_core_stats_tx_dropped_inc(dev); 2293 else 2294 ret = NET_XMIT_SUCCESS; 2295 goto out_xmit; 2296 out_drop: 2297 + dev_core_stats_tx_dropped_inc(dev); 2298 kfree_skb(skb); 2299 out_xmit: 2300 return ret; ··· 2396 2397 err = bpf_out_neigh_v4(net, skb, dev, nh); 2398 if (unlikely(net_xmit_eval(err))) 2399 + dev_core_stats_tx_dropped_inc(dev); 2400 else 2401 ret = NET_XMIT_SUCCESS; 2402 goto out_xmit; 2403 out_drop: 2404 + dev_core_stats_tx_dropped_inc(dev); 2405 kfree_skb(skb); 2406 out_xmit: 2407 return ret;
+2
net/core/gro.c
··· 265 goto out; 266 } 267 268 rcu_read_lock(); 269 list_for_each_entry_rcu(ptype, head, list) { 270 if (ptype->type != type || !ptype->callbacks.gro_complete)
··· 265 goto out; 266 } 267 268 + /* NICs can feed encapsulated packets into GRO */ 269 + skb->encapsulation = 0; 270 rcu_read_lock(); 271 list_for_each_entry_rcu(ptype, head, list) { 272 if (ptype->type != type || !ptype->callbacks.gro_complete)
+34 -16
net/core/net-procfs.c
··· 170 .show = softnet_seq_show, 171 }; 172 173 static void *ptype_get_idx(struct seq_file *seq, loff_t pos) 174 { 175 struct list_head *ptype_list = NULL; 176 struct packet_type *pt = NULL; 177 struct net_device *dev; ··· 187 for_each_netdev_rcu(seq_file_net(seq), dev) { 188 ptype_list = &dev->ptype_all; 189 list_for_each_entry_rcu(pt, ptype_list, list) { 190 - if (i == pos) 191 return pt; 192 ++i; 193 } 194 } 195 196 list_for_each_entry_rcu(pt, &seq_file_net(seq)->ptype_all, list) { 197 if (i == pos) ··· 228 229 static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos) 230 { 231 struct net *net = seq_file_net(seq); 232 struct net_device *dev; 233 struct packet_type *pt; ··· 240 return ptype_get_idx(seq, 0); 241 242 pt = v; 243 - nxt = pt->list.next; 244 - if (pt->dev) { 245 - if (nxt != &pt->dev->ptype_all) 246 goto found; 247 248 - dev = pt->dev; 249 for_each_netdev_continue_rcu(seq_file_net(seq), dev) { 250 - if (!list_empty(&dev->ptype_all)) { 251 - nxt = dev->ptype_all.next; 252 goto found; 253 } 254 } 255 - nxt = net->ptype_all.next; 256 goto net_ptype_all; 257 } 258 ··· 265 266 if (nxt == &net->ptype_all) { 267 /* continue with ->ptype_specific if it's not empty */ 268 - nxt = net->ptype_specific.next; 269 if (nxt != &net->ptype_specific) 270 goto found; 271 } 272 273 hash = 0; 274 - nxt = ptype_base[0].next; 275 } else 276 hash = ntohs(pt->type) & PTYPE_HASH_MASK; 277 278 while (nxt == &ptype_base[hash]) { 279 if (++hash >= PTYPE_HASH_SIZE) 280 return NULL; 281 - nxt = ptype_base[hash].next; 282 } 283 found: 284 return list_entry(nxt, struct packet_type, list); ··· 292 293 static int ptype_seq_show(struct seq_file *seq, void *v) 294 { 295 struct packet_type *pt = v; 296 297 - if (v == SEQ_START_TOKEN) 298 seq_puts(seq, "Type Device Function\n"); 299 - else if ((!pt->af_packet_net || net_eq(pt->af_packet_net, seq_file_net(seq))) && 300 - (!pt->dev || net_eq(dev_net(pt->dev), seq_file_net(seq)))) { 301 if (pt->type == htons(ETH_P_ALL)) 302 seq_puts(seq, "ALL "); 303 else 304 seq_printf(seq, "%04x", ntohs(pt->type)); 305 306 seq_printf(seq, " %-8s %ps\n", 307 - pt->dev ? pt->dev->name : "", pt->func); 308 } 309 310 return 0; ··· 333 &softnet_seq_ops)) 334 goto out_dev; 335 if (!proc_create_net("ptype", 0444, net->proc_net, &ptype_seq_ops, 336 - sizeof(struct seq_net_private))) 337 goto out_softnet; 338 339 if (wext_proc_init(net))
··· 170 .show = softnet_seq_show, 171 }; 172 173 + struct ptype_iter_state { 174 + struct seq_net_private p; 175 + struct net_device *dev; 176 + }; 177 + 178 static void *ptype_get_idx(struct seq_file *seq, loff_t pos) 179 { 180 + struct ptype_iter_state *iter = seq->private; 181 struct list_head *ptype_list = NULL; 182 struct packet_type *pt = NULL; 183 struct net_device *dev; ··· 181 for_each_netdev_rcu(seq_file_net(seq), dev) { 182 ptype_list = &dev->ptype_all; 183 list_for_each_entry_rcu(pt, ptype_list, list) { 184 + if (i == pos) { 185 + iter->dev = dev; 186 return pt; 187 + } 188 ++i; 189 } 190 } 191 + 192 + iter->dev = NULL; 193 194 list_for_each_entry_rcu(pt, &seq_file_net(seq)->ptype_all, list) { 195 if (i == pos) ··· 218 219 static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos) 220 { 221 + struct ptype_iter_state *iter = seq->private; 222 struct net *net = seq_file_net(seq); 223 struct net_device *dev; 224 struct packet_type *pt; ··· 229 return ptype_get_idx(seq, 0); 230 231 pt = v; 232 + nxt = READ_ONCE(pt->list.next); 233 + dev = iter->dev; 234 + if (dev) { 235 + if (nxt != &dev->ptype_all) 236 goto found; 237 238 for_each_netdev_continue_rcu(seq_file_net(seq), dev) { 239 + nxt = READ_ONCE(dev->ptype_all.next); 240 + if (nxt != &dev->ptype_all) { 241 + iter->dev = dev; 242 goto found; 243 } 244 } 245 + iter->dev = NULL; 246 + nxt = READ_ONCE(net->ptype_all.next); 247 goto net_ptype_all; 248 } 249 ··· 252 253 if (nxt == &net->ptype_all) { 254 /* continue with ->ptype_specific if it's not empty */ 255 + nxt = READ_ONCE(net->ptype_specific.next); 256 if (nxt != &net->ptype_specific) 257 goto found; 258 } 259 260 hash = 0; 261 + nxt = READ_ONCE(ptype_base[0].next); 262 } else 263 hash = ntohs(pt->type) & PTYPE_HASH_MASK; 264 265 while (nxt == &ptype_base[hash]) { 266 if (++hash >= PTYPE_HASH_SIZE) 267 return NULL; 268 + nxt = READ_ONCE(ptype_base[hash].next); 269 } 270 found: 271 return list_entry(nxt, struct packet_type, list); ··· 279 280 static int ptype_seq_show(struct seq_file *seq, void *v) 281 { 282 + struct ptype_iter_state *iter = seq->private; 283 struct packet_type *pt = v; 284 + struct net_device *dev; 285 286 + if (v == SEQ_START_TOKEN) { 287 seq_puts(seq, "Type Device Function\n"); 288 + return 0; 289 + } 290 + dev = iter->dev; 291 + if ((!pt->af_packet_net || net_eq(pt->af_packet_net, seq_file_net(seq))) && 292 + (!dev || net_eq(dev_net(dev), seq_file_net(seq)))) { 293 if (pt->type == htons(ETH_P_ALL)) 294 seq_puts(seq, "ALL "); 295 else 296 seq_printf(seq, "%04x", ntohs(pt->type)); 297 298 seq_printf(seq, " %-8s %ps\n", 299 + dev ? dev->name : "", pt->func); 300 } 301 302 return 0; ··· 315 &softnet_seq_ops)) 316 goto out_dev; 317 if (!proc_create_net("ptype", 0444, net->proc_net, &ptype_seq_ops, 318 + sizeof(struct ptype_iter_state))) 319 goto out_softnet; 320 321 if (wext_proc_init(net))
-3
net/ethtool/common.c
··· 862 ctx->key_off = key_off; 863 ctx->priv_size = ops->rxfh_priv_size; 864 865 - ctx->hfunc = ETH_RSS_HASH_NO_CHANGE; 866 - ctx->input_xfrm = RXH_XFRM_NO_CHANGE; 867 - 868 return ctx; 869 } 870
··· 862 ctx->key_off = key_off; 863 ctx->priv_size = ops->rxfh_priv_size; 864 865 return ctx; 866 } 867
+2 -7
net/ethtool/rss.c
··· 824 static int 825 ethnl_rss_set(struct ethnl_req_info *req_info, struct genl_info *info) 826 { 827 - bool indir_reset = false, indir_mod, xfrm_sym = false; 828 struct rss_req_info *request = RSS_REQINFO(req_info); 829 struct ethtool_rxfh_context *ctx = NULL; 830 struct net_device *dev = req_info->dev; 831 bool mod = false, fields_mod = false; ··· 860 861 rxfh.input_xfrm = data.input_xfrm; 862 ethnl_update_u8(&rxfh.input_xfrm, tb[ETHTOOL_A_RSS_INPUT_XFRM], &mod); 863 - /* For drivers which don't support input_xfrm it will be set to 0xff 864 - * in the RSS context info. In all other case input_xfrm != 0 means 865 - * symmetric hashing is requested. 866 - */ 867 - if (!request->rss_context || ops->rxfh_per_ctx_key) 868 - xfrm_sym = rxfh.input_xfrm || data.input_xfrm; 869 if (rxfh.input_xfrm == data.input_xfrm) 870 rxfh.input_xfrm = RXH_XFRM_NO_CHANGE; 871
··· 824 static int 825 ethnl_rss_set(struct ethnl_req_info *req_info, struct genl_info *info) 826 { 827 struct rss_req_info *request = RSS_REQINFO(req_info); 828 + bool indir_reset = false, indir_mod, xfrm_sym; 829 struct ethtool_rxfh_context *ctx = NULL; 830 struct net_device *dev = req_info->dev; 831 bool mod = false, fields_mod = false; ··· 860 861 rxfh.input_xfrm = data.input_xfrm; 862 ethnl_update_u8(&rxfh.input_xfrm, tb[ETHTOOL_A_RSS_INPUT_XFRM], &mod); 863 + xfrm_sym = rxfh.input_xfrm || data.input_xfrm; 864 if (rxfh.input_xfrm == data.input_xfrm) 865 rxfh.input_xfrm = RXH_XFRM_NO_CHANGE; 866
+2 -1
net/ipv6/ip6_fib.c
··· 1138 fib6_set_expires(iter, rt->expires); 1139 fib6_add_gc_list(iter); 1140 } 1141 - if (!(rt->fib6_flags & (RTF_ADDRCONF | RTF_PREFIX_RT))) { 1142 iter->fib6_flags &= ~RTF_ADDRCONF; 1143 iter->fib6_flags &= ~RTF_PREFIX_RT; 1144 }
··· 1138 fib6_set_expires(iter, rt->expires); 1139 fib6_add_gc_list(iter); 1140 } 1141 + if (!(rt->fib6_flags & (RTF_ADDRCONF | RTF_PREFIX_RT)) && 1142 + !iter->fib6_nh->fib_nh_gw_family) { 1143 iter->fib6_flags &= ~RTF_ADDRCONF; 1144 iter->fib6_flags &= ~RTF_PREFIX_RT; 1145 }
+1 -1
net/netfilter/nf_tables_api.c
··· 5914 5915 list_for_each_entry(catchall, &set->catchall_list, list) { 5916 ext = nft_set_elem_ext(set, catchall->elem); 5917 - if (!nft_set_elem_active(ext, genmask)) 5918 continue; 5919 5920 nft_clear(ctx->net, ext);
··· 5914 5915 list_for_each_entry(catchall, &set->catchall_list, list) { 5916 ext = nft_set_elem_ext(set, catchall->elem); 5917 + if (nft_set_elem_active(ext, genmask)) 5918 continue; 5919 5920 nft_clear(ctx->net, ext);
+6 -7
net/sched/cls_u32.c
··· 161 int toff = off + key->off + (off2 & key->offmask); 162 __be32 *data, hdata; 163 164 - if (skb_headroom(skb) + toff > INT_MAX) 165 - goto out; 166 - 167 - data = skb_header_pointer(skb, toff, 4, &hdata); 168 if (!data) 169 goto out; 170 if ((*data ^ key->val) & key->mask) { ··· 212 if (ht->divisor) { 213 __be32 *data, hdata; 214 215 - data = skb_header_pointer(skb, off + n->sel.hoff, 4, 216 - &hdata); 217 if (!data) 218 goto out; 219 sel = ht->divisor & u32_hash_fold(*data, &n->sel, ··· 228 if (n->sel.flags & TC_U32_VAROFFSET) { 229 __be16 *data, hdata; 230 231 - data = skb_header_pointer(skb, 232 off + n->sel.offoff, 233 2, &hdata); 234 if (!data)
··· 161 int toff = off + key->off + (off2 & key->offmask); 162 __be32 *data, hdata; 163 164 + data = skb_header_pointer_careful(skb, toff, 4, 165 + &hdata); 166 if (!data) 167 goto out; 168 if ((*data ^ key->val) & key->mask) { ··· 214 if (ht->divisor) { 215 __be32 *data, hdata; 216 217 + data = skb_header_pointer_careful(skb, 218 + off + n->sel.hoff, 219 + 4, &hdata); 220 if (!data) 221 goto out; 222 sel = ht->divisor & u32_hash_fold(*data, &n->sel, ··· 229 if (n->sel.flags & TC_U32_VAROFFSET) { 230 __be16 *data, hdata; 231 232 + data = skb_header_pointer_careful(skb, 233 off + n->sel.offoff, 234 2, &hdata); 235 if (!data)
+2 -2
net/tipc/crypto.c
··· 1219 rx = c; 1220 tx = tipc_net(rx->net)->crypto_tx; 1221 if (cancel_delayed_work(&rx->work)) { 1222 - kfree(rx->skey); 1223 rx->skey = NULL; 1224 atomic_xchg(&rx->key_distr, 0); 1225 tipc_node_put(rx->node); ··· 2394 break; 2395 default: 2396 synchronize_rcu(); 2397 - kfree(rx->skey); 2398 rx->skey = NULL; 2399 break; 2400 }
··· 1219 rx = c; 1220 tx = tipc_net(rx->net)->crypto_tx; 1221 if (cancel_delayed_work(&rx->work)) { 1222 + kfree_sensitive(rx->skey); 1223 rx->skey = NULL; 1224 atomic_xchg(&rx->key_distr, 0); 1225 tipc_node_put(rx->node); ··· 2394 break; 2395 default: 2396 synchronize_rcu(); 2397 + kfree_sensitive(rx->skey); 2398 rx->skey = NULL; 2399 break; 2400 }
+64
tools/testing/selftests/net/udpgro_fwd.sh
··· 162 echo " ok" 163 } 164 165 run_bench() { 166 local -r msg=$1 167 local -r dst=$2 ··· 292 # stray traffic on top of the UDP tunnel 293 ip netns exec $NS_SRC $PING -q -c 1 $OL_NET$DST_NAT >/dev/null 294 run_test "GRO fwd over UDP tunnel" $OL_NET$DST_NAT 10 10 $OL_NET$DST 295 cleanup 296 done 297
··· 162 echo " ok" 163 } 164 165 + run_test_csum() { 166 + local -r msg="$1" 167 + local -r dst="$2" 168 + local csum_error_filter=UdpInCsumErrors 169 + local csum_errors 170 + 171 + printf "%-40s" "$msg" 172 + 173 + is_ipv6 "$dst" && csum_error_filter=Udp6InCsumErrors 174 + 175 + ip netns exec "$NS_DST" iperf3 -s -1 >/dev/null & 176 + wait_local_port_listen "$NS_DST" 5201 tcp 177 + local spid="$!" 178 + ip netns exec "$NS_SRC" iperf3 -c "$dst" -t 2 >/dev/null 179 + local retc="$?" 180 + wait "$spid" 181 + local rets="$?" 182 + if [ "$rets" -ne 0 ] || [ "$retc" -ne 0 ]; then 183 + echo " fail client exit code $retc, server $rets" 184 + ret=1 185 + return 186 + fi 187 + 188 + csum_errors=$(ip netns exec "$NS_DST" nstat -as "$csum_error_filter" | 189 + grep "$csum_error_filter" | awk '{print $2}') 190 + if [ -n "$csum_errors" ] && [ "$csum_errors" -gt 0 ]; then 191 + echo " fail - csum error on receive $csum_errors, expected 0" 192 + ret=1 193 + return 194 + fi 195 + echo " ok" 196 + } 197 + 198 run_bench() { 199 local -r msg=$1 200 local -r dst=$2 ··· 259 # stray traffic on top of the UDP tunnel 260 ip netns exec $NS_SRC $PING -q -c 1 $OL_NET$DST_NAT >/dev/null 261 run_test "GRO fwd over UDP tunnel" $OL_NET$DST_NAT 10 10 $OL_NET$DST 262 + cleanup 263 + 264 + # force segmentation and re-aggregation 265 + create_vxlan_pair 266 + ip netns exec "$NS_DST" ethtool -K veth"$DST" generic-receive-offload on 267 + ip netns exec "$NS_SRC" ethtool -K veth"$SRC" tso off 268 + ip -n "$NS_SRC" link set dev veth"$SRC" mtu 1430 269 + 270 + # forward to a 2nd veth pair 271 + ip -n "$NS_DST" link add br0 type bridge 272 + ip -n "$NS_DST" link set dev veth"$DST" master br0 273 + 274 + # segment the aggregated TSO packet, without csum offload 275 + ip -n "$NS_DST" link add veth_segment type veth peer veth_rx 276 + for FEATURE in tso tx-udp-segmentation tx-checksumming; do 277 + ip netns exec "$NS_DST" ethtool -K veth_segment "$FEATURE" off 278 + done 279 + ip -n "$NS_DST" link set dev veth_segment master br0 up 280 + ip -n "$NS_DST" link set dev br0 up 281 + ip -n "$NS_DST" link set dev veth_rx up 282 + 283 + # move the lower layer IP in the last added veth 284 + for ADDR in "$BM_NET_V4$DST/24" "$BM_NET_V6$DST/64"; do 285 + # the dad argument will let iproute emit a unharmful warning 286 + # with ipv4 addresses 287 + ip -n "$NS_DST" addr del dev veth"$DST" "$ADDR" 288 + ip -n "$NS_DST" addr add dev veth_rx "$ADDR" \ 289 + nodad 2>/dev/null 290 + done 291 + 292 + run_test_csum "GSO after GRO" "$OL_NET$DST" 293 cleanup 294 done 295