Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'net-6.19-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Pull networking fixes from Paolo Abeni:
"Including fixes from bluetooth, CAN and wireless.

There are no known regressions currently under investigation.

Current release - fix to a fix:

- can: gs_usb_receive_bulk_callback(): fix error message

Current release - regressions:

- eth: gve: fix probe failure if clock read fails

Previous releases - regressions:

- ipv6: use the right ifindex when replying to icmpv6 from localhost

- mptcp: fix race in mptcp_pm_nl_flush_addrs_doit()

- bluetooth: fix null-ptr-deref in hci_uart_write_work

- eth:
- sfc: fix deadlock in RSS config read
- ice: ifix NULL pointer dereference in ice_vsi_set_napi_queues
- mlx5: fix memory leak in esw_acl_ingress_lgcy_setup()

Previous releases - always broken:

- core: fix segmentation of forwarding fraglist GRO

- wifi: mac80211: correctly decode TTLM with default link map

- mptcp: avoid dup SUB_CLOSED events after disconnect

- nfc: fix memleak in nfc_llcp_send_ui_frame().

- eth:
- bonding: fix use-after-free due to enslave fail
- mlx5e:
- TC, delete flows only for existing peers
- fix inverted cap check in tx flow table root disconnect"

* tag 'net-6.19-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (43 commits)
net: fix segmentation of forwarding fraglist GRO
wifi: mac80211: correctly decode TTLM with default link map
selftests: mptcp: join: fix local endp not being tracked
selftests: mptcp: check subflow errors in close events
mptcp: only reset subflow errors when propagated
selftests: mptcp: check no dup close events after error
mptcp: avoid dup SUB_CLOSED events after disconnect
net/mlx5e: Skip ESN replay window setup for IPsec crypto offload
net/mlx5: Fix vhca_id access call trace use before alloc
net/mlx5: fs, Fix inverted cap check in tx flow table root disconnect
net: phy: micrel: fix clk warning when removing the driver
net/mlx5e: don't assume psp tx skbs are ipv6 csum handling
net: bridge: fix static key check
nfc: nci: Fix race between rfkill and nci_unregister_device().
gve: fix probe failure if clock read fails
net/mlx5e: Account for netdev stats in ndo_get_stats64
net/mlx5e: TC, delete flows only for existing peers
net/mlx5: Fix Unbinding uplink-netdev in switchdev mode
ice: stop counting UDP csum mismatch as rx_errors
ice: Fix NULL pointer dereference in ice_vsi_set_napi_queues
...

+402 -163
-1
MAINTAINERS
··· 9260 9260 EMULEX 10Gbps NIC BE2, BE3-R, Lancer, Skyhawk-R DRIVER (be2net) 9261 9261 M: Ajit Khaparde <ajit.khaparde@broadcom.com> 9262 9262 M: Sriharsha Basavapatna <sriharsha.basavapatna@broadcom.com> 9263 - M: Somnath Kotur <somnath.kotur@broadcom.com> 9264 9263 L: netdev@vger.kernel.org 9265 9264 S: Maintained 9266 9265 W: http://www.emulex.com
+2 -2
drivers/bluetooth/hci_ldisc.c
··· 685 685 return err; 686 686 } 687 687 688 + set_bit(HCI_UART_PROTO_INIT, &hu->flags); 689 + 688 690 if (test_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags)) 689 691 return 0; 690 692 ··· 713 711 return -EPROTONOSUPPORT; 714 712 715 713 hu->proto = p; 716 - 717 - set_bit(HCI_UART_PROTO_INIT, &hu->flags); 718 714 719 715 err = hci_uart_register_dev(hu); 720 716 if (err) {
+15 -13
drivers/net/bonding/bond_main.c
··· 2202 2202 unblock_netpoll_tx(); 2203 2203 } 2204 2204 2205 - /* broadcast mode uses the all_slaves to loop through slaves. */ 2206 - if (bond_mode_can_use_xmit_hash(bond) || 2207 - BOND_MODE(bond) == BOND_MODE_BROADCAST) 2208 - bond_update_slave_arr(bond, NULL); 2209 - 2210 2205 if (!slave_dev->netdev_ops->ndo_bpf || 2211 2206 !slave_dev->netdev_ops->ndo_xdp_xmit) { 2212 2207 if (bond->xdp_prog) { ··· 2234 2239 if (bond->xdp_prog) 2235 2240 bpf_prog_inc(bond->xdp_prog); 2236 2241 } 2242 + 2243 + /* broadcast mode uses the all_slaves to loop through slaves. */ 2244 + if (bond_mode_can_use_xmit_hash(bond) || 2245 + BOND_MODE(bond) == BOND_MODE_BROADCAST) 2246 + bond_update_slave_arr(bond, NULL); 2237 2247 2238 2248 bond_xdp_set_features(bond_dev); 2239 2249 ··· 3047 3047 __func__, &sip); 3048 3048 return; 3049 3049 } 3050 - slave->last_rx = jiffies; 3051 - slave->target_last_arp_rx[i] = jiffies; 3050 + WRITE_ONCE(slave->last_rx, jiffies); 3051 + WRITE_ONCE(slave->target_last_arp_rx[i], jiffies); 3052 3052 } 3053 3053 3054 3054 static int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond, ··· 3267 3267 __func__, saddr); 3268 3268 return; 3269 3269 } 3270 - slave->last_rx = jiffies; 3271 - slave->target_last_arp_rx[i] = jiffies; 3270 + WRITE_ONCE(slave->last_rx, jiffies); 3271 + WRITE_ONCE(slave->target_last_arp_rx[i], jiffies); 3272 3272 } 3273 3273 3274 3274 static int bond_na_rcv(const struct sk_buff *skb, struct bonding *bond, ··· 3338 3338 (slave_do_arp_validate_only(bond) && is_ipv6) || 3339 3339 #endif 3340 3340 !slave_do_arp_validate_only(bond)) 3341 - slave->last_rx = jiffies; 3341 + WRITE_ONCE(slave->last_rx, jiffies); 3342 3342 return RX_HANDLER_ANOTHER; 3343 3343 } else if (is_arp) { 3344 3344 return bond_arp_rcv(skb, bond, slave); ··· 3406 3406 3407 3407 if (slave->link != BOND_LINK_UP) { 3408 3408 if (bond_time_in_interval(bond, last_tx, 1) && 3409 - bond_time_in_interval(bond, slave->last_rx, 1)) { 3409 + bond_time_in_interval(bond, READ_ONCE(slave->last_rx), 1)) { 3410 3410 3411 3411 bond_propose_link_state(slave, BOND_LINK_UP); 3412 3412 slave_state_changed = 1; ··· 3430 3430 * when the source ip is 0, so don't take the link down 3431 3431 * if we don't know our ip yet 3432 3432 */ 3433 - if (!bond_time_in_interval(bond, last_tx, bond->params.missed_max) || 3434 - !bond_time_in_interval(bond, slave->last_rx, bond->params.missed_max)) { 3433 + if (!bond_time_in_interval(bond, last_tx, 3434 + bond->params.missed_max) || 3435 + !bond_time_in_interval(bond, READ_ONCE(slave->last_rx), 3436 + bond->params.missed_max)) { 3435 3437 3436 3438 bond_propose_link_state(slave, BOND_LINK_DOWN); 3437 3439 slave_state_changed = 1;
+4 -4
drivers/net/bonding/bond_options.c
··· 1152 1152 1153 1153 if (slot >= 0 && slot < BOND_MAX_ARP_TARGETS) { 1154 1154 bond_for_each_slave(bond, slave, iter) 1155 - slave->target_last_arp_rx[slot] = last_rx; 1155 + WRITE_ONCE(slave->target_last_arp_rx[slot], last_rx); 1156 1156 targets[slot] = target; 1157 1157 } 1158 1158 } ··· 1221 1221 bond_for_each_slave(bond, slave, iter) { 1222 1222 targets_rx = slave->target_last_arp_rx; 1223 1223 for (i = ind; (i < BOND_MAX_ARP_TARGETS-1) && targets[i+1]; i++) 1224 - targets_rx[i] = targets_rx[i+1]; 1225 - targets_rx[i] = 0; 1224 + WRITE_ONCE(targets_rx[i], READ_ONCE(targets_rx[i+1])); 1225 + WRITE_ONCE(targets_rx[i], 0); 1226 1226 } 1227 1227 for (i = ind; (i < BOND_MAX_ARP_TARGETS-1) && targets[i+1]; i++) 1228 1228 targets[i] = targets[i+1]; ··· 1377 1377 1378 1378 if (slot >= 0 && slot < BOND_MAX_NS_TARGETS) { 1379 1379 bond_for_each_slave(bond, slave, iter) { 1380 - slave->target_last_arp_rx[slot] = last_rx; 1380 + WRITE_ONCE(slave->target_last_arp_rx[slot], last_rx); 1381 1381 slave_set_ns_maddr(bond, slave, target, &targets[slot]); 1382 1382 } 1383 1383 targets[slot] = *target;
+1 -1
drivers/net/can/at91_can.c
··· 1099 1099 if (IS_ERR(transceiver)) { 1100 1100 err = PTR_ERR(transceiver); 1101 1101 dev_err_probe(&pdev->dev, err, "failed to get phy\n"); 1102 - goto exit_iounmap; 1102 + goto exit_free; 1103 1103 } 1104 1104 1105 1105 dev->netdev_ops = &at91_netdev_ops;
+2 -2
drivers/net/can/usb/gs_usb.c
··· 610 610 { 611 611 struct gs_usb *parent = urb->context; 612 612 struct gs_can *dev; 613 - struct net_device *netdev; 613 + struct net_device *netdev = NULL; 614 614 int rc; 615 615 struct net_device_stats *stats; 616 616 struct gs_host_frame *hf = urb->transfer_buffer; ··· 768 768 } 769 769 } else if (rc != -ESHUTDOWN && net_ratelimit()) { 770 770 netdev_info(netdev, "failed to re-submit IN URB: %pe\n", 771 - ERR_PTR(urb->status)); 771 + ERR_PTR(rc)); 772 772 } 773 773 } 774 774
+8 -7
drivers/net/dsa/yt921x.c
··· 682 682 const struct yt921x_mib_desc *desc = &yt921x_mib_descs[i]; 683 683 u32 reg = YT921X_MIBn_DATA0(port) + desc->offset; 684 684 u64 *valp = &((u64 *)mib)[i]; 685 - u64 val = *valp; 686 685 u32 val0; 687 - u32 val1; 686 + u64 val; 688 687 689 688 res = yt921x_reg_read(priv, reg, &val0); 690 689 if (res) 691 690 break; 692 691 693 692 if (desc->size <= 1) { 694 - if (val < (u32)val) 695 - /* overflow */ 696 - val += (u64)U32_MAX + 1; 697 - val &= ~U32_MAX; 698 - val |= val0; 693 + u64 old_val = *valp; 694 + 695 + val = (old_val & ~(u64)U32_MAX) | val0; 696 + if (val < old_val) 697 + val += 1ull << 32; 699 698 } else { 699 + u32 val1; 700 + 700 701 res = yt921x_reg_read(priv, reg + 4, &val1); 701 702 if (res) 702 703 break;
+4 -1
drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c
··· 1261 1261 netdev_err(intf->ndev, "invalid PHY mode: %s for port %d\n", 1262 1262 phy_modes(intf->phy_interface), intf->port); 1263 1263 ret = -EINVAL; 1264 - goto err_free_netdev; 1264 + goto err_deregister_fixed_link; 1265 1265 } 1266 1266 1267 1267 ret = of_get_ethdev_address(ndev_dn, ndev); ··· 1286 1286 1287 1287 return intf; 1288 1288 1289 + err_deregister_fixed_link: 1290 + if (of_phy_is_fixed_link(ndev_dn)) 1291 + of_phy_deregister_fixed_link(ndev_dn); 1289 1292 err_free_netdev: 1290 1293 free_netdev(ndev); 1291 1294 err:
+5
drivers/net/ethernet/google/gve/gve.h
··· 1206 1206 } 1207 1207 } 1208 1208 1209 + static inline bool gve_is_clock_enabled(struct gve_priv *priv) 1210 + { 1211 + return priv->nic_ts_report; 1212 + } 1213 + 1209 1214 /* gqi napi handler defined in gve_main.c */ 1210 1215 int gve_napi_poll(struct napi_struct *napi, int budget); 1211 1216
+1 -1
drivers/net/ethernet/google/gve/gve_ethtool.c
··· 938 938 939 939 ethtool_op_get_ts_info(netdev, info); 940 940 941 - if (priv->nic_timestamp_supported) { 941 + if (gve_is_clock_enabled(priv)) { 942 942 info->so_timestamping |= SOF_TIMESTAMPING_RX_HARDWARE | 943 943 SOF_TIMESTAMPING_RAW_HARDWARE; 944 944
+7 -5
drivers/net/ethernet/google/gve/gve_main.c
··· 680 680 } 681 681 } 682 682 683 - err = gve_init_clock(priv); 684 - if (err) { 685 - dev_err(&priv->pdev->dev, "Failed to init clock"); 686 - goto abort_with_ptype_lut; 683 + if (priv->nic_timestamp_supported) { 684 + err = gve_init_clock(priv); 685 + if (err) { 686 + dev_warn(&priv->pdev->dev, "Failed to init clock, continuing without PTP support"); 687 + err = 0; 688 + } 687 689 } 688 690 689 691 err = gve_init_rss_config(priv, priv->rx_cfg.num_queues); ··· 2185 2183 } 2186 2184 2187 2185 if (kernel_config->rx_filter != HWTSTAMP_FILTER_NONE) { 2188 - if (!priv->nic_ts_report) { 2186 + if (!gve_is_clock_enabled(priv)) { 2189 2187 NL_SET_ERR_MSG_MOD(extack, 2190 2188 "RX timestamping is not supported"); 2191 2189 kernel_config->rx_filter = HWTSTAMP_FILTER_NONE;
-8
drivers/net/ethernet/google/gve/gve_ptp.c
··· 70 70 struct gve_ptp *ptp; 71 71 int err; 72 72 73 - if (!priv->nic_timestamp_supported) { 74 - dev_dbg(&priv->pdev->dev, "Device does not support PTP\n"); 75 - return -EOPNOTSUPP; 76 - } 77 - 78 73 priv->ptp = kzalloc(sizeof(*priv->ptp), GFP_KERNEL); 79 74 if (!priv->ptp) 80 75 return -ENOMEM; ··· 110 115 int gve_init_clock(struct gve_priv *priv) 111 116 { 112 117 int err; 113 - 114 - if (!priv->nic_timestamp_supported) 115 - return 0; 116 118 117 119 err = gve_ptp_init(priv); 118 120 if (err)
+1 -1
drivers/net/ethernet/google/gve/gve_rx_dqo.c
··· 484 484 { 485 485 const struct gve_xdp_buff *ctx = (void *)_ctx; 486 486 487 - if (!ctx->gve->nic_ts_report) 487 + if (!gve_is_clock_enabled(ctx->gve)) 488 488 return -ENODATA; 489 489 490 490 if (!(ctx->compl_desc->ts_sub_nsecs_low & GVE_DQO_RX_HWTSTAMP_VALID))
+6 -4
drivers/net/ethernet/intel/ice/ice_lib.c
··· 2783 2783 2784 2784 ASSERT_RTNL(); 2785 2785 ice_for_each_rxq(vsi, q_idx) 2786 - netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_RX, 2787 - &vsi->rx_rings[q_idx]->q_vector->napi); 2786 + if (vsi->rx_rings[q_idx] && vsi->rx_rings[q_idx]->q_vector) 2787 + netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_RX, 2788 + &vsi->rx_rings[q_idx]->q_vector->napi); 2788 2789 2789 2790 ice_for_each_txq(vsi, q_idx) 2790 - netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_TX, 2791 - &vsi->tx_rings[q_idx]->q_vector->napi); 2791 + if (vsi->tx_rings[q_idx] && vsi->tx_rings[q_idx]->q_vector) 2792 + netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_TX, 2793 + &vsi->tx_rings[q_idx]->q_vector->napi); 2792 2794 /* Also set the interrupt number for the NAPI */ 2793 2795 ice_for_each_q_vector(vsi, v_idx) { 2794 2796 struct ice_q_vector *q_vector = vsi->q_vectors[v_idx];
-1
drivers/net/ethernet/intel/ice/ice_main.c
··· 6982 6982 cur_ns->rx_errors = pf->stats.crc_errors + 6983 6983 pf->stats.illegal_bytes + 6984 6984 pf->stats.rx_undersize + 6985 - pf->hw_csum_rx_error + 6986 6985 pf->stats.rx_jabber + 6987 6986 pf->stats.rx_fragments + 6988 6987 pf->stats.rx_oversize;
+10 -16
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
··· 11468 11468 */ 11469 11469 static int ixgbe_recovery_probe(struct ixgbe_adapter *adapter) 11470 11470 { 11471 - struct net_device *netdev = adapter->netdev; 11472 11471 struct pci_dev *pdev = adapter->pdev; 11473 11472 struct ixgbe_hw *hw = &adapter->hw; 11474 - bool disable_dev; 11475 11473 int err = -EIO; 11476 11474 11477 11475 if (hw->mac.type != ixgbe_mac_e610) 11478 - goto clean_up_probe; 11476 + return err; 11479 11477 11480 11478 ixgbe_get_hw_control(adapter); 11481 - mutex_init(&hw->aci.lock); 11482 11479 err = ixgbe_get_flash_data(&adapter->hw); 11483 11480 if (err) 11484 - goto shutdown_aci; 11481 + goto err_release_hw_control; 11485 11482 11486 11483 timer_setup(&adapter->service_timer, ixgbe_service_timer, 0); 11487 11484 INIT_WORK(&adapter->service_task, ixgbe_recovery_service_task); ··· 11501 11504 devl_unlock(adapter->devlink); 11502 11505 11503 11506 return 0; 11504 - shutdown_aci: 11505 - mutex_destroy(&adapter->hw.aci.lock); 11507 + err_release_hw_control: 11506 11508 ixgbe_release_hw_control(adapter); 11507 - clean_up_probe: 11508 - disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); 11509 - free_netdev(netdev); 11510 - devlink_free(adapter->devlink); 11511 - pci_release_mem_regions(pdev); 11512 - if (disable_dev) 11513 - pci_disable_device(pdev); 11514 11509 return err; 11515 11510 } 11516 11511 ··· 11644 11655 if (err) 11645 11656 goto err_sw_init; 11646 11657 11647 - if (ixgbe_check_fw_error(adapter)) 11648 - return ixgbe_recovery_probe(adapter); 11658 + if (ixgbe_check_fw_error(adapter)) { 11659 + err = ixgbe_recovery_probe(adapter); 11660 + if (err) 11661 + goto err_sw_init; 11662 + 11663 + return 0; 11664 + } 11649 11665 11650 11666 if (adapter->hw.mac.type == ixgbe_mac_e610) { 11651 11667 err = ixgbe_get_caps(&adapter->hw);
+1 -1
drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
··· 1389 1389 efs->rule.flow_type = mvpp2_cls_ethtool_flow_to_type(info->fs.flow_type); 1390 1390 if (efs->rule.flow_type < 0) { 1391 1391 ret = efs->rule.flow_type; 1392 - goto clean_rule; 1392 + goto clean_eth_rule; 1393 1393 } 1394 1394 1395 1395 ret = mvpp2_cls_rfs_parse_rule(&efs->rule);
+1 -1
drivers/net/ethernet/marvell/octeon_ep/octep_main.c
··· 1338 1338 1339 1339 ret = octep_ctrl_net_init(oct); 1340 1340 if (ret) 1341 - return ret; 1341 + goto unsupported_dev; 1342 1342 1343 1343 INIT_WORK(&oct->tx_timeout_task, octep_tx_timeout_task); 1344 1344 INIT_WORK(&oct->ctrl_mbox_task, octep_ctrl_mbox_task);
+16
drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
··· 613 613 cq->dbg = NULL; 614 614 } 615 615 } 616 + 617 + static int vhca_id_show(struct seq_file *file, void *priv) 618 + { 619 + struct mlx5_core_dev *dev = file->private; 620 + 621 + seq_printf(file, "0x%x\n", MLX5_CAP_GEN(dev, vhca_id)); 622 + return 0; 623 + } 624 + 625 + DEFINE_SHOW_ATTRIBUTE(vhca_id); 626 + 627 + void mlx5_vhca_debugfs_init(struct mlx5_core_dev *dev) 628 + { 629 + debugfs_create_file("vhca_id", 0400, dev->priv.dbg.dbg_root, dev, 630 + &vhca_id_fops); 631 + }
+14
drivers/net/ethernet/mellanox/mlx5/core/dev.c
··· 575 575 return plen && flen && flen == plen && 576 576 !memcmp(fsystem_guid, psystem_guid, flen); 577 577 } 578 + 579 + void mlx5_core_reps_aux_devs_remove(struct mlx5_core_dev *dev) 580 + { 581 + struct mlx5_priv *priv = &dev->priv; 582 + 583 + if (priv->adev[MLX5_INTERFACE_PROTOCOL_ETH]) 584 + device_lock_assert(&priv->adev[MLX5_INTERFACE_PROTOCOL_ETH]->adev.dev); 585 + else 586 + mlx5_core_err(dev, "ETH driver already removed\n"); 587 + if (priv->adev[MLX5_INTERFACE_PROTOCOL_IB_REP]) 588 + del_adev(&priv->adev[MLX5_INTERFACE_PROTOCOL_IB_REP]->adev); 589 + if (priv->adev[MLX5_INTERFACE_PROTOCOL_ETH_REP]) 590 + del_adev(&priv->adev[MLX5_INTERFACE_PROTOCOL_ETH_REP]->adev); 591 + }
+2 -1
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
··· 430 430 attrs->replay_esn.esn = sa_entry->esn_state.esn; 431 431 attrs->replay_esn.esn_msb = sa_entry->esn_state.esn_msb; 432 432 attrs->replay_esn.overlap = sa_entry->esn_state.overlap; 433 - if (attrs->dir == XFRM_DEV_OFFLOAD_OUT) 433 + if (attrs->dir == XFRM_DEV_OFFLOAD_OUT || 434 + x->xso.type != XFRM_DEV_OFFLOAD_PACKET) 434 435 goto skip_replay_window; 435 436 436 437 switch (x->replay_esn->replay_window) {
+11 -6
drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp_rxtx.c
··· 177 177 { 178 178 struct mlx5e_priv *priv = netdev_priv(netdev); 179 179 struct net *net = sock_net(skb->sk); 180 - const struct ipv6hdr *ip6; 181 - struct tcphdr *th; 182 180 183 181 if (!mlx5e_psp_set_state(priv, skb, psp_st)) 184 182 return true; ··· 188 190 return false; 189 191 } 190 192 if (skb_is_gso(skb)) { 191 - ip6 = ipv6_hdr(skb); 192 - th = inner_tcp_hdr(skb); 193 + int len = skb_shinfo(skb)->gso_size + inner_tcp_hdrlen(skb); 194 + struct tcphdr *th = inner_tcp_hdr(skb); 193 195 194 - th->check = ~tcp_v6_check(skb_shinfo(skb)->gso_size + inner_tcp_hdrlen(skb), &ip6->saddr, 195 - &ip6->daddr, 0); 196 + if (skb->protocol == htons(ETH_P_IP)) { 197 + const struct iphdr *ip = ip_hdr(skb); 198 + 199 + th->check = ~tcp_v4_check(len, ip->saddr, ip->daddr, 0); 200 + } else { 201 + const struct ipv6hdr *ip6 = ipv6_hdr(skb); 202 + 203 + th->check = ~tcp_v6_check(len, &ip6->saddr, &ip6->daddr, 0); 204 + } 196 205 } 197 206 198 207 return true;
+12 -9
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
··· 4052 4052 mlx5e_queue_update_stats(priv); 4053 4053 } 4054 4054 4055 + netdev_stats_to_stats64(stats, &dev->stats); 4056 + 4055 4057 if (mlx5e_is_uplink_rep(priv)) { 4056 4058 struct mlx5e_vport_stats *vstats = &priv->stats.vport; 4057 4059 ··· 4070 4068 mlx5e_fold_sw_stats64(priv, stats); 4071 4069 } 4072 4070 4073 - stats->rx_missed_errors = priv->stats.qcnt.rx_out_of_buffer; 4074 - stats->rx_dropped = PPORT_2863_GET(pstats, if_in_discards); 4071 + stats->rx_missed_errors += priv->stats.qcnt.rx_out_of_buffer; 4072 + stats->rx_dropped += PPORT_2863_GET(pstats, if_in_discards); 4075 4073 4076 - stats->rx_length_errors = 4074 + stats->rx_length_errors += 4077 4075 PPORT_802_3_GET(pstats, a_in_range_length_errors) + 4078 4076 PPORT_802_3_GET(pstats, a_out_of_range_length_field) + 4079 4077 PPORT_802_3_GET(pstats, a_frame_too_long_errors) + 4080 4078 VNIC_ENV_GET(&priv->stats.vnic, eth_wqe_too_small); 4081 - stats->rx_crc_errors = 4079 + stats->rx_crc_errors += 4082 4080 PPORT_802_3_GET(pstats, a_frame_check_sequence_errors); 4083 - stats->rx_frame_errors = PPORT_802_3_GET(pstats, a_alignment_errors); 4084 - stats->tx_aborted_errors = PPORT_2863_GET(pstats, if_out_discards); 4085 - stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors + 4086 - stats->rx_frame_errors; 4087 - stats->tx_errors = stats->tx_aborted_errors + stats->tx_carrier_errors; 4081 + stats->rx_frame_errors += PPORT_802_3_GET(pstats, a_alignment_errors); 4082 + stats->tx_aborted_errors += PPORT_2863_GET(pstats, if_out_discards); 4083 + stats->rx_errors += stats->rx_length_errors + stats->rx_crc_errors + 4084 + stats->rx_frame_errors; 4085 + stats->tx_errors += stats->tx_aborted_errors + stats->tx_carrier_errors; 4088 4086 } 4089 4087 4090 4088 static void mlx5e_nic_set_rx_mode(struct mlx5e_priv *priv) ··· 6844 6842 struct mlx5e_priv *priv = netdev_priv(netdev); 6845 6843 struct mlx5_core_dev *mdev = edev->mdev; 6846 6844 6845 + mlx5_eswitch_safe_aux_devs_remove(mdev); 6847 6846 mlx5_core_uplink_netdev_set(mdev, NULL); 6848 6847 6849 6848 if (priv->profile)
+13 -6
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
··· 2147 2147 2148 2148 static void mlx5e_tc_del_fdb_peers_flow(struct mlx5e_tc_flow *flow) 2149 2149 { 2150 + struct mlx5_devcom_comp_dev *devcom; 2151 + struct mlx5_devcom_comp_dev *pos; 2152 + struct mlx5_eswitch *peer_esw; 2150 2153 int i; 2151 2154 2152 - for (i = 0; i < MLX5_MAX_PORTS; i++) { 2153 - if (i == mlx5_get_dev_index(flow->priv->mdev)) 2154 - continue; 2155 + devcom = flow->priv->mdev->priv.eswitch->devcom; 2156 + mlx5_devcom_for_each_peer_entry(devcom, peer_esw, pos) { 2157 + i = mlx5_get_dev_index(peer_esw->dev); 2155 2158 mlx5e_tc_del_fdb_peer_flow(flow, i); 2156 2159 } 2157 2160 } ··· 5516 5513 5517 5514 void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw) 5518 5515 { 5516 + struct mlx5_devcom_comp_dev *devcom; 5517 + struct mlx5_devcom_comp_dev *pos; 5519 5518 struct mlx5e_tc_flow *flow, *tmp; 5519 + struct mlx5_eswitch *peer_esw; 5520 5520 int i; 5521 5521 5522 - for (i = 0; i < MLX5_MAX_PORTS; i++) { 5523 - if (i == mlx5_get_dev_index(esw->dev)) 5524 - continue; 5522 + devcom = esw->devcom; 5523 + 5524 + mlx5_devcom_for_each_peer_entry(devcom, peer_esw, pos) { 5525 + i = mlx5_get_dev_index(peer_esw->dev); 5525 5526 list_for_each_entry_safe(flow, tmp, &esw->offloads.peer_flows[i], peer[i]) 5526 5527 mlx5e_tc_del_fdb_peers_flow(flow); 5527 5528 }
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c
··· 188 188 if (IS_ERR(vport->ingress.acl)) { 189 189 err = PTR_ERR(vport->ingress.acl); 190 190 vport->ingress.acl = NULL; 191 - return err; 191 + goto out; 192 192 } 193 193 194 194 err = esw_acl_ingress_lgcy_groups_create(esw, vport);
+5 -1
drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
··· 929 929 int mlx5_esw_ipsec_vf_packet_offload_supported(struct mlx5_core_dev *dev, 930 930 u16 vport_num); 931 931 bool mlx5_esw_host_functions_enabled(const struct mlx5_core_dev *dev); 932 + void mlx5_eswitch_safe_aux_devs_remove(struct mlx5_core_dev *dev); 932 933 #else /* CONFIG_MLX5_ESWITCH */ 933 934 /* eswitch API stubs */ 934 935 static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; } ··· 1010 1009 static inline bool 1011 1010 mlx5_esw_vport_vhca_id(struct mlx5_eswitch *esw, u16 vportn, u16 *vhca_id) 1012 1011 { 1013 - return -EOPNOTSUPP; 1012 + return false; 1014 1013 } 1014 + 1015 + static inline void 1016 + mlx5_eswitch_safe_aux_devs_remove(struct mlx5_core_dev *dev) {} 1015 1017 1016 1018 #endif /* CONFIG_MLX5_ESWITCH */ 1017 1019
+26
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
··· 3981 3981 return true; 3982 3982 } 3983 3983 3984 + #define MLX5_ESW_HOLD_TIMEOUT_MS 7000 3985 + #define MLX5_ESW_HOLD_RETRY_DELAY_MS 500 3986 + 3987 + void mlx5_eswitch_safe_aux_devs_remove(struct mlx5_core_dev *dev) 3988 + { 3989 + unsigned long timeout; 3990 + bool hold_esw = true; 3991 + 3992 + /* Wait for any concurrent eswitch mode transition to complete. */ 3993 + if (!mlx5_esw_hold(dev)) { 3994 + timeout = jiffies + msecs_to_jiffies(MLX5_ESW_HOLD_TIMEOUT_MS); 3995 + while (!mlx5_esw_hold(dev)) { 3996 + if (!time_before(jiffies, timeout)) { 3997 + hold_esw = false; 3998 + break; 3999 + } 4000 + msleep(MLX5_ESW_HOLD_RETRY_DELAY_MS); 4001 + } 4002 + } 4003 + if (hold_esw) { 4004 + if (mlx5_eswitch_mode(dev) == MLX5_ESWITCH_OFFLOADS) 4005 + mlx5_core_reps_aux_devs_remove(dev); 4006 + mlx5_esw_release(dev); 4007 + } 4008 + } 4009 + 3984 4010 int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, 3985 4011 struct netlink_ext_ack *extack) 3986 4012 {
+2 -1
drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
··· 1198 1198 u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {}; 1199 1199 u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {}; 1200 1200 1201 - if (disconnect && MLX5_CAP_FLOWTABLE_NIC_TX(dev, reset_root_to_default)) 1201 + if (disconnect && 1202 + !MLX5_CAP_FLOWTABLE_NIC_TX(dev, reset_root_to_default)) 1202 1203 return -EOPNOTSUPP; 1203 1204 1204 1205 MLX5_SET(set_flow_table_root_in, in, opcode,
+3 -11
drivers/net/ethernet/mellanox/mlx5/core/main.c
··· 1806 1806 return -ENOMEM; 1807 1807 } 1808 1808 1809 - static int vhca_id_show(struct seq_file *file, void *priv) 1810 - { 1811 - struct mlx5_core_dev *dev = file->private; 1812 - 1813 - seq_printf(file, "0x%x\n", MLX5_CAP_GEN(dev, vhca_id)); 1814 - return 0; 1815 - } 1816 - 1817 - DEFINE_SHOW_ATTRIBUTE(vhca_id); 1818 - 1819 1809 static int mlx5_notifiers_init(struct mlx5_core_dev *dev) 1820 1810 { 1821 1811 int err; ··· 1874 1884 priv->numa_node = dev_to_node(mlx5_core_dma_dev(dev)); 1875 1885 priv->dbg.dbg_root = debugfs_create_dir(dev_name(dev->device), 1876 1886 mlx5_debugfs_root); 1877 - debugfs_create_file("vhca_id", 0400, priv->dbg.dbg_root, dev, &vhca_id_fops); 1887 + 1878 1888 INIT_LIST_HEAD(&priv->traps); 1879 1889 1880 1890 err = mlx5_cmd_init(dev); ··· 2011 2021 err); 2012 2022 goto err_init_one; 2013 2023 } 2024 + 2025 + mlx5_vhca_debugfs_init(dev); 2014 2026 2015 2027 pci_save_state(pdev); 2016 2028 return 0;
+2
drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
··· 258 258 void mlx5_cmd_flush(struct mlx5_core_dev *dev); 259 259 void mlx5_cq_debugfs_init(struct mlx5_core_dev *dev); 260 260 void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev); 261 + void mlx5_vhca_debugfs_init(struct mlx5_core_dev *dev); 261 262 262 263 int mlx5_query_pcam_reg(struct mlx5_core_dev *dev, u32 *pcam, u8 feature_group, 263 264 u8 access_reg_group); ··· 291 290 void mlx5_unregister_device(struct mlx5_core_dev *dev); 292 291 void mlx5_dev_set_lightweight(struct mlx5_core_dev *dev); 293 292 bool mlx5_dev_is_lightweight(struct mlx5_core_dev *dev); 293 + void mlx5_core_reps_aux_devs_remove(struct mlx5_core_dev *dev); 294 294 295 295 void mlx5_fw_reporters_create(struct mlx5_core_dev *dev); 296 296 int mlx5_query_mtpps(struct mlx5_core_dev *dev, u32 *mtpps, u32 mtpps_size);
+1
drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
··· 76 76 goto init_one_err; 77 77 } 78 78 79 + mlx5_vhca_debugfs_init(mdev); 79 80 return 0; 80 81 81 82 init_one_err:
+2 -3
drivers/net/ethernet/rocker/rocker_main.c
··· 1524 1524 { 1525 1525 struct rocker_world_ops *wops = rocker_port->rocker->wops; 1526 1526 1527 - if (!wops->port_post_fini) 1528 - return; 1529 - wops->port_post_fini(rocker_port); 1527 + if (wops->port_post_fini) 1528 + wops->port_post_fini(rocker_port); 1530 1529 kfree(rocker_port->wpriv); 1531 1530 } 1532 1531
+1 -6
drivers/net/ethernet/sfc/mcdi_filters.c
··· 2182 2182 2183 2183 int efx_mcdi_rx_pull_rss_config(struct efx_nic *efx) 2184 2184 { 2185 - int rc; 2186 - 2187 - mutex_lock(&efx->net_dev->ethtool->rss_lock); 2188 - rc = efx_mcdi_rx_pull_rss_context_config(efx, &efx->rss_context); 2189 - mutex_unlock(&efx->net_dev->ethtool->rss_lock); 2190 - return rc; 2185 + return efx_mcdi_rx_pull_rss_context_config(efx, &efx->rss_context); 2191 2186 } 2192 2187 2193 2188 void efx_mcdi_rx_restore_rss_contexts(struct efx_nic *efx)
+27 -7
drivers/net/ethernet/spacemit/k1_emac.c
··· 1099 1099 100, 10000); 1100 1100 1101 1101 if (ret) { 1102 - netdev_err(priv->ndev, "Read stat timeout\n"); 1102 + /* 1103 + * This could be caused by the PHY stopping its refclk even when 1104 + * the link is up, for power saving. See also comments in 1105 + * emac_stats_update(). 1106 + */ 1107 + dev_err_ratelimited(&priv->ndev->dev, 1108 + "Read stat timeout. PHY clock stopped?\n"); 1103 1109 return ret; 1104 1110 } 1105 1111 ··· 1153 1147 1154 1148 assert_spin_locked(&priv->stats_lock); 1155 1149 1156 - if (!netif_running(priv->ndev) || !netif_device_present(priv->ndev)) { 1157 - /* Not up, don't try to update */ 1150 + /* 1151 + * We can't read statistics if the interface is not up. Also, some PHYs 1152 + * stop their reference clocks for link down power saving, which also 1153 + * causes reading statistics to time out. Don't update and don't 1154 + * reschedule in these cases. 1155 + */ 1156 + if (!netif_running(priv->ndev) || 1157 + !netif_carrier_ok(priv->ndev) || 1158 + !netif_device_present(priv->ndev)) { 1158 1159 return; 1159 1160 } 1160 1161 1161 1162 for (i = 0; i < sizeof(priv->tx_stats) / sizeof(*tx_stats); i++) { 1162 1163 /* 1163 - * If reading stats times out, everything is broken and there's 1164 - * nothing we can do. Reading statistics also can't return an 1165 - * error, so just return without updating and without 1166 - * rescheduling. 1164 + * If reading stats times out anyway, the stat registers will be 1165 + * stuck, and we can't really recover from that. 1166 + * 1167 + * Reading statistics also can't return an error, so just return 1168 + * without updating and without rescheduling. 1167 1169 */ 1168 1170 if (emac_tx_read_stat_cnt(priv, i, &res)) 1169 1171 return; ··· 1650 1636 emac_wr(priv, MAC_GLOBAL_CONTROL, ctrl); 1651 1637 1652 1638 emac_set_fc_autoneg(priv); 1639 + 1640 + /* 1641 + * Reschedule stats updates now that link is up. See comments in 1642 + * emac_stats_update(). 1643 + */ 1644 + mod_timer(&priv->stats_timer, jiffies); 1653 1645 } 1654 1646 1655 1647 phy_print_status(phydev);
+13 -4
drivers/net/phy/micrel.c
··· 2643 2643 2644 2644 kszphy_parse_led_mode(phydev); 2645 2645 2646 - clk = devm_clk_get_optional_enabled(&phydev->mdio.dev, "rmii-ref"); 2646 + clk = devm_clk_get_optional(&phydev->mdio.dev, "rmii-ref"); 2647 2647 /* NOTE: clk may be NULL if building without CONFIG_HAVE_CLK */ 2648 2648 if (!IS_ERR_OR_NULL(clk)) { 2649 - unsigned long rate = clk_get_rate(clk); 2650 2649 bool rmii_ref_clk_sel_25_mhz; 2650 + unsigned long rate; 2651 + int err; 2652 + 2653 + err = clk_prepare_enable(clk); 2654 + if (err) { 2655 + phydev_err(phydev, "Failed to enable rmii-ref clock\n"); 2656 + return err; 2657 + } 2658 + 2659 + rate = clk_get_rate(clk); 2660 + clk_disable_unprepare(clk); 2651 2661 2652 2662 if (type) 2653 2663 priv->rmii_ref_clk_sel = type->has_rmii_ref_clk_sel; ··· 2675 2665 } 2676 2666 } else if (!clk) { 2677 2667 /* unnamed clock from the generic ethernet-phy binding */ 2678 - clk = devm_clk_get_optional_enabled(&phydev->mdio.dev, NULL); 2668 + clk = devm_clk_get_optional(&phydev->mdio.dev, NULL); 2679 2669 } 2680 2670 2681 2671 if (IS_ERR(clk)) 2682 2672 return PTR_ERR(clk); 2683 2673 2684 - clk_disable_unprepare(clk); 2685 2674 priv->clk = clk; 2686 2675 2687 2676 if (ksz8041_fiber_mode(phydev))
+7 -2
drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
··· 395 395 struct sk_buff *skb) 396 396 { 397 397 unsigned long long data_bus_addr, data_base_addr; 398 + struct skb_shared_info *shinfo = skb_shinfo(skb); 398 399 struct device *dev = rxq->dpmaif_ctrl->dev; 399 400 struct dpmaif_bat_page *page_info; 400 401 unsigned int data_len; ··· 403 402 404 403 page_info = rxq->bat_frag->bat_skb; 405 404 page_info += t7xx_normal_pit_bid(pkt_info); 406 - dma_unmap_page(dev, page_info->data_bus_addr, page_info->data_len, DMA_FROM_DEVICE); 407 405 408 406 if (!page_info->page) 409 407 return -EINVAL; 408 + 409 + if (shinfo->nr_frags >= MAX_SKB_FRAGS) 410 + return -EINVAL; 411 + 412 + dma_unmap_page(dev, page_info->data_bus_addr, page_info->data_len, DMA_FROM_DEVICE); 410 413 411 414 data_bus_addr = le32_to_cpu(pkt_info->pd.data_addr_h); 412 415 data_bus_addr = (data_bus_addr << 32) + le32_to_cpu(pkt_info->pd.data_addr_l); ··· 418 413 data_offset = data_bus_addr - data_base_addr; 419 414 data_offset += page_info->offset; 420 415 data_len = FIELD_GET(PD_PIT_DATA_LEN, le32_to_cpu(pkt_info->header)); 421 - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page_info->page, 416 + skb_add_rx_frag(skb, shinfo->nr_frags, page_info->page, 422 417 data_offset, data_len, page_info->data_len); 423 418 424 419 page_info->page = NULL;
+7 -6
include/net/bonding.h
··· 521 521 static inline unsigned long slave_oldest_target_arp_rx(struct bonding *bond, 522 522 struct slave *slave) 523 523 { 524 + unsigned long tmp, ret = READ_ONCE(slave->target_last_arp_rx[0]); 524 525 int i = 1; 525 - unsigned long ret = slave->target_last_arp_rx[0]; 526 526 527 - for (; (i < BOND_MAX_ARP_TARGETS) && bond->params.arp_targets[i]; i++) 528 - if (time_before(slave->target_last_arp_rx[i], ret)) 529 - ret = slave->target_last_arp_rx[i]; 530 - 527 + for (; (i < BOND_MAX_ARP_TARGETS) && bond->params.arp_targets[i]; i++) { 528 + tmp = READ_ONCE(slave->target_last_arp_rx[i]); 529 + if (time_before(tmp, ret)) 530 + ret = tmp; 531 + } 531 532 return ret; 532 533 } 533 534 ··· 538 537 if (bond->params.arp_all_targets == BOND_ARP_TARGETS_ALL) 539 538 return slave_oldest_target_arp_rx(bond, slave); 540 539 541 - return slave->last_rx; 540 + return READ_ONCE(slave->last_rx); 542 541 } 543 542 544 543 static inline void slave_update_last_tx(struct slave *slave)
+2
include/net/nfc/nfc.h
··· 219 219 220 220 int nfc_register_device(struct nfc_dev *dev); 221 221 222 + void nfc_unregister_rfkill(struct nfc_dev *dev); 223 + void nfc_remove_device(struct nfc_dev *dev); 222 224 void nfc_unregister_device(struct nfc_dev *dev); 223 225 224 226 /**
+3
net/bluetooth/mgmt.c
··· 1966 1966 } 1967 1967 1968 1968 mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err); 1969 + mgmt_pending_free(cmd); 1969 1970 return; 1970 1971 } 1971 1972 ··· 1985 1984 sock_put(match.sk); 1986 1985 1987 1986 hci_update_eir_sync(hdev); 1987 + mgmt_pending_free(cmd); 1988 1988 } 1989 1989 1990 1990 static int set_ssp_sync(struct hci_dev *hdev, void *data) ··· 6440 6438 hci_dev_clear_flag(hdev, HCI_ADVERTISING); 6441 6439 6442 6440 settings_rsp(cmd, &match); 6441 + mgmt_pending_free(cmd); 6443 6442 6444 6443 new_settings(hdev, match.sk); 6445 6444
+1 -1
net/bridge/br_input.c
··· 274 274 int ret; 275 275 276 276 net = dev_net(skb->dev); 277 - #ifdef HAVE_JUMP_LABEL 277 + #ifdef CONFIG_JUMP_LABEL 278 278 if (!static_key_false(&nf_hooks_needed[NFPROTO_BRIDGE][NF_BR_PRE_ROUTING])) 279 279 goto frame_finish; 280 280 #endif
+2
net/core/filter.c
··· 3353 3353 shinfo->gso_type &= ~SKB_GSO_TCPV4; 3354 3354 shinfo->gso_type |= SKB_GSO_TCPV6; 3355 3355 } 3356 + shinfo->gso_type |= SKB_GSO_DODGY; 3356 3357 } 3357 3358 3358 3359 bpf_skb_change_protocol(skb, ETH_P_IPV6); ··· 3384 3383 shinfo->gso_type &= ~SKB_GSO_TCPV6; 3385 3384 shinfo->gso_type |= SKB_GSO_TCPV4; 3386 3385 } 3386 + shinfo->gso_type |= SKB_GSO_DODGY; 3387 3387 } 3388 3388 3389 3389 bpf_skb_change_protocol(skb, ETH_P_IP);
+2 -1
net/ipv4/tcp_offload.c
··· 107 107 if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST) { 108 108 struct tcphdr *th = tcp_hdr(skb); 109 109 110 - if (skb_pagelen(skb) - th->doff * 4 == skb_shinfo(skb)->gso_size) 110 + if ((skb_pagelen(skb) - th->doff * 4 == skb_shinfo(skb)->gso_size) && 111 + !(skb_shinfo(skb)->gso_type & SKB_GSO_DODGY)) 111 112 return __tcp4_gso_segment_list(skb, features); 112 113 113 114 skb->ip_summed = CHECKSUM_NONE;
+2 -1
net/ipv4/udp_offload.c
··· 514 514 515 515 if (skb_shinfo(gso_skb)->gso_type & SKB_GSO_FRAGLIST) { 516 516 /* Detect modified geometry and pass those to skb_segment. */ 517 - if (skb_pagelen(gso_skb) - sizeof(*uh) == skb_shinfo(gso_skb)->gso_size) 517 + if ((skb_pagelen(gso_skb) - sizeof(*uh) == skb_shinfo(gso_skb)->gso_size) && 518 + !(skb_shinfo(gso_skb)->gso_type & SKB_GSO_DODGY)) 518 519 return __udp_gso_segment_list(gso_skb, features, is_ipv6); 519 520 520 521 ret = __skb_linearize(gso_skb);
+3 -1
net/ipv6/icmp.c
··· 965 965 fl6.daddr = ipv6_hdr(skb)->saddr; 966 966 if (saddr) 967 967 fl6.saddr = *saddr; 968 - fl6.flowi6_oif = icmp6_iif(skb); 968 + fl6.flowi6_oif = ipv6_addr_loopback(&fl6.daddr) ? 969 + skb->dev->ifindex : 970 + icmp6_iif(skb); 969 971 fl6.fl6_icmp_type = type; 970 972 fl6.flowi6_mark = mark; 971 973 fl6.flowi6_uid = sock_net_uid(net, NULL);
+2 -1
net/ipv6/tcpv6_offload.c
··· 170 170 if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST) { 171 171 struct tcphdr *th = tcp_hdr(skb); 172 172 173 - if (skb_pagelen(skb) - th->doff * 4 == skb_shinfo(skb)->gso_size) 173 + if ((skb_pagelen(skb) - th->doff * 4 == skb_shinfo(skb)->gso_size) && 174 + !(skb_shinfo(skb)->gso_type & SKB_GSO_DODGY)) 174 175 return __tcp6_gso_segment_list(skb, features); 175 176 176 177 skb->ip_summed = CHECKSUM_NONE;
+5 -3
net/mac80211/mlme.c
··· 8 8 * Copyright 2007, Michael Wu <flamingice@sourmilk.net> 9 9 * Copyright 2013-2014 Intel Mobile Communications GmbH 10 10 * Copyright (C) 2015 - 2017 Intel Deutschland GmbH 11 - * Copyright (C) 2018 - 2025 Intel Corporation 11 + * Copyright (C) 2018 - 2026 Intel Corporation 12 12 */ 13 13 14 14 #include <linux/delay.h> ··· 6190 6190 return -EINVAL; 6191 6191 } 6192 6192 6193 - link_map_presence = *pos; 6194 - pos++; 6193 + if (!(control & IEEE80211_TTLM_CONTROL_DEF_LINK_MAP)) { 6194 + link_map_presence = *pos; 6195 + pos++; 6196 + } 6195 6197 6196 6198 if (control & IEEE80211_TTLM_CONTROL_SWITCH_TIME_PRESENT) { 6197 6199 ttlm_info->switch_time = get_unaligned_le16(pos);
+13 -3
net/mptcp/pm_kernel.c
··· 1294 1294 int mptcp_pm_nl_flush_addrs_doit(struct sk_buff *skb, struct genl_info *info) 1295 1295 { 1296 1296 struct pm_nl_pernet *pernet = genl_info_pm_nl(info); 1297 - LIST_HEAD(free_list); 1297 + struct list_head free_list; 1298 1298 1299 1299 spin_lock_bh(&pernet->lock); 1300 - list_splice_init(&pernet->endp_list, &free_list); 1300 + free_list = pernet->endp_list; 1301 + INIT_LIST_HEAD_RCU(&pernet->endp_list); 1301 1302 __reset_counters(pernet); 1302 1303 pernet->next_id = 1; 1303 1304 bitmap_zero(pernet->id_bitmap, MPTCP_PM_MAX_ADDR_ID + 1); 1304 1305 spin_unlock_bh(&pernet->lock); 1305 - mptcp_nl_flush_addrs_list(sock_net(skb->sk), &free_list); 1306 + 1307 + if (free_list.next == &pernet->endp_list) 1308 + return 0; 1309 + 1306 1310 synchronize_rcu(); 1311 + 1312 + /* Adjust the pointers to free_list instead of pernet->endp_list */ 1313 + free_list.prev->next = &free_list; 1314 + free_list.next->prev = &free_list; 1315 + 1316 + mptcp_nl_flush_addrs_list(sock_net(skb->sk), &free_list); 1307 1317 __flush_addrs(&free_list); 1308 1318 return 0; 1309 1319 }
+7 -6
net/mptcp/protocol.c
··· 821 821 822 822 static bool __mptcp_subflow_error_report(struct sock *sk, struct sock *ssk) 823 823 { 824 - int err = sock_error(ssk); 825 824 int ssk_state; 826 - 827 - if (!err) 828 - return false; 825 + int err; 829 826 830 827 /* only propagate errors on fallen-back sockets or 831 828 * on MPC connect 832 829 */ 833 830 if (sk->sk_state != TCP_SYN_SENT && !__mptcp_check_fallback(mptcp_sk(sk))) 831 + return false; 832 + 833 + err = sock_error(ssk); 834 + if (!err) 834 835 return false; 835 836 836 837 /* We need to propagate only transition to CLOSE state. ··· 2599 2598 struct mptcp_sock *msk = mptcp_sk(sk); 2600 2599 struct sk_buff *skb; 2601 2600 2602 - /* The first subflow can already be closed and still in the list */ 2603 - if (subflow->close_event_done) 2601 + /* The first subflow can already be closed or disconnected */ 2602 + if (subflow->close_event_done || READ_ONCE(subflow->local_id) < 0) 2604 2603 return; 2605 2604 2606 2605 subflow->close_event_done = true;
+24 -3
net/nfc/core.c
··· 1147 1147 EXPORT_SYMBOL(nfc_register_device); 1148 1148 1149 1149 /** 1150 - * nfc_unregister_device - unregister a nfc device in the nfc subsystem 1150 + * nfc_unregister_rfkill - unregister a nfc device in the rfkill subsystem 1151 1151 * 1152 1152 * @dev: The nfc device to unregister 1153 1153 */ 1154 - void nfc_unregister_device(struct nfc_dev *dev) 1154 + void nfc_unregister_rfkill(struct nfc_dev *dev) 1155 1155 { 1156 - int rc; 1157 1156 struct rfkill *rfk = NULL; 1157 + int rc; 1158 1158 1159 1159 pr_debug("dev_name=%s\n", dev_name(&dev->dev)); 1160 1160 ··· 1175 1175 rfkill_unregister(rfk); 1176 1176 rfkill_destroy(rfk); 1177 1177 } 1178 + } 1179 + EXPORT_SYMBOL(nfc_unregister_rfkill); 1178 1180 1181 + /** 1182 + * nfc_remove_device - remove a nfc device in the nfc subsystem 1183 + * 1184 + * @dev: The nfc device to remove 1185 + */ 1186 + void nfc_remove_device(struct nfc_dev *dev) 1187 + { 1179 1188 if (dev->ops->check_presence) { 1180 1189 timer_delete_sync(&dev->check_pres_timer); 1181 1190 cancel_work_sync(&dev->check_pres_work); ··· 1196 1187 nfc_devlist_generation++; 1197 1188 device_del(&dev->dev); 1198 1189 mutex_unlock(&nfc_devlist_mutex); 1190 + } 1191 + EXPORT_SYMBOL(nfc_remove_device); 1192 + 1193 + /** 1194 + * nfc_unregister_device - unregister a nfc device in the nfc subsystem 1195 + * 1196 + * @dev: The nfc device to unregister 1197 + */ 1198 + void nfc_unregister_device(struct nfc_dev *dev) 1199 + { 1200 + nfc_unregister_rfkill(dev); 1201 + nfc_remove_device(dev); 1199 1202 } 1200 1203 EXPORT_SYMBOL(nfc_unregister_device); 1201 1204
+16 -1
net/nfc/llcp_commands.c
··· 778 778 if (likely(frag_len > 0)) 779 779 skb_put_data(pdu, msg_ptr, frag_len); 780 780 781 + spin_lock(&local->tx_queue.lock); 782 + 783 + if (list_empty(&local->list)) { 784 + spin_unlock(&local->tx_queue.lock); 785 + 786 + kfree_skb(pdu); 787 + 788 + len -= remaining_len; 789 + if (len == 0) 790 + len = -ENXIO; 791 + break; 792 + } 793 + 781 794 /* No need to check for the peer RW for UI frames */ 782 - skb_queue_tail(&local->tx_queue, pdu); 795 + __skb_queue_tail(&local->tx_queue, pdu); 796 + 797 + spin_unlock(&local->tx_queue.lock); 783 798 784 799 remaining_len -= frag_len; 785 800 msg_ptr += frag_len;
+3 -1
net/nfc/llcp_core.c
··· 316 316 spin_lock(&llcp_devices_lock); 317 317 list_for_each_entry_safe(local, tmp, &llcp_devices, list) 318 318 if (local->dev == dev) { 319 - list_del(&local->list); 319 + spin_lock(&local->tx_queue.lock); 320 + list_del_init(&local->list); 321 + spin_unlock(&local->tx_queue.lock); 320 322 spin_unlock(&llcp_devices_lock); 321 323 return local; 322 324 }
+3 -1
net/nfc/nci/core.c
··· 1303 1303 { 1304 1304 struct nci_conn_info *conn_info, *n; 1305 1305 1306 + nfc_unregister_rfkill(ndev->nfc_dev); 1307 + 1306 1308 /* This set_bit is not protected with specialized barrier, 1307 1309 * However, it is fine because the mutex_lock(&ndev->req_lock); 1308 1310 * in nci_close_device() will help to emit one. ··· 1322 1320 /* conn_info is allocated with devm_kzalloc */ 1323 1321 } 1324 1322 1325 - nfc_unregister_device(ndev->nfc_dev); 1323 + nfc_remove_device(ndev->nfc_dev); 1326 1324 } 1327 1325 EXPORT_SYMBOL(nci_unregister_device); 1328 1326
+7
tools/testing/selftests/net/fcnal-test.sh
··· 2327 2327 log_test_addr ${a} $? 2 "ping local, device bind" 2328 2328 done 2329 2329 2330 + for a in ${NSA_LO_IP6} ${NSA_LINKIP6}%${NSA_DEV} ${NSA_IP6} 2331 + do 2332 + log_start 2333 + run_cmd ${ping6} -c1 -w1 -I ::1 ${a} 2334 + log_test_addr ${a} $? 0 "ping local, from localhost" 2335 + done 2336 + 2330 2337 # 2331 2338 # ip rule blocks address 2332 2339 #
+74 -7
tools/testing/selftests/net/mptcp/mptcp_join.sh
··· 2329 2329 ip netns exec $ns1 sysctl -q net.mptcp.add_addr_timeout=1 2330 2330 speed=slow \ 2331 2331 run_tests $ns1 $ns2 10.0.1.1 2332 + chk_join_nr 3 3 3 2332 2333 2333 2334 # It is not directly linked to the commit introducing this 2334 2335 # symbol but for the parent one which is linked anyway. 2335 - if ! mptcp_lib_kallsyms_has "mptcp_pm_subflow_check_next$"; then 2336 - chk_join_nr 3 3 2 2337 - chk_add_nr 4 4 2338 - else 2339 - chk_join_nr 3 3 3 2336 + if mptcp_lib_kallsyms_has "mptcp_pm_subflow_check_next$"; then 2340 2337 # the server will not signal the address terminating 2341 2338 # the MPC subflow 2342 2339 chk_add_nr 3 3 2340 + else 2341 + chk_add_nr 4 4 2343 2342 fi 2344 2343 fi 2345 2344 } ··· 3846 3847 fi 3847 3848 } 3848 3849 3849 - # $1: ns ; $2: event type ; $3: count 3850 + # $1: ns ; $2: event type ; $3: count ; [ $4: attr ; $5: attr count ] 3850 3851 chk_evt_nr() 3851 3852 { 3852 3853 local ns=${1} 3853 3854 local evt_name="${2}" 3854 3855 local exp="${3}" 3856 + local attr="${4}" 3857 + local attr_exp="${5}" 3855 3858 3856 3859 local evts="${evts_ns1}" 3857 3860 local evt="${!evt_name}" 3861 + local attr_name 3858 3862 local count 3863 + 3864 + if [ -n "${attr}" ]; then 3865 + attr_name=", ${attr}: ${attr_exp}" 3866 + fi 3859 3867 3860 3868 evt_name="${evt_name:16}" # without MPTCP_LIB_EVENT_ 3861 3869 [ "${ns}" == "ns2" ] && evts="${evts_ns2}" 3862 3870 3863 - print_check "event ${ns} ${evt_name} (${exp})" 3871 + print_check "event ${ns} ${evt_name} (${exp}${attr_name})" 3864 3872 3865 3873 if [[ "${evt_name}" = "LISTENER_"* ]] && 3866 3874 ! mptcp_lib_kallsyms_has "mptcp_event_pm_listener$"; then ··· 3878 3872 count=$(grep -cw "type:${evt}" "${evts}") 3879 3873 if [ "${count}" != "${exp}" ]; then 3880 3874 fail_test "got ${count} events, expected ${exp}" 3875 + cat "${evts}" 3876 + return 3877 + elif [ -z "${attr}" ]; then 3878 + print_ok 3879 + return 3880 + fi 3881 + 3882 + count=$(grep -w "type:${evt}" "${evts}" | grep -c ",${attr}:") 3883 + if [ "${count}" != "${attr_exp}" ]; then 3884 + fail_test "got ${count} event attributes, expected ${attr_exp}" 3885 + grep -w "type:${evt}" "${evts}" 3881 3886 else 3882 3887 print_ok 3883 3888 fi 3889 + } 3890 + 3891 + # $1: ns ; $2: event type ; $3: expected count 3892 + wait_event() 3893 + { 3894 + local ns="${1}" 3895 + local evt_name="${2}" 3896 + local exp="${3}" 3897 + 3898 + local evt="${!evt_name}" 3899 + local evts="${evts_ns1}" 3900 + local count 3901 + 3902 + [ "${ns}" == "ns2" ] && evts="${evts_ns2}" 3903 + 3904 + for _ in $(seq 100); do 3905 + count=$(grep -cw "type:${evt}" "${evts}") 3906 + [ "${count}" -ge "${exp}" ] && break 3907 + sleep 0.1 3908 + done 3884 3909 } 3885 3910 3886 3911 userspace_tests() ··· 4119 4082 chk_rst_nr 0 0 invert 4120 4083 chk_mptcp_info subflows 1 subflows 1 4121 4084 chk_subflows_total 1 1 4085 + kill_events_pids 4086 + mptcp_lib_kill_group_wait $tests_pid 4087 + fi 4088 + 4089 + # userspace pm no duplicated spurious close events after an error 4090 + if reset_with_events "userspace pm no dup close events after error" && 4091 + continue_if mptcp_lib_has_file '/proc/sys/net/mptcp/pm_type'; then 4092 + set_userspace_pm $ns2 4093 + pm_nl_set_limits $ns1 0 2 4094 + { timeout_test=120 test_linkfail=128 speed=slow \ 4095 + run_tests $ns1 $ns2 10.0.1.1 & } 2>/dev/null 4096 + local tests_pid=$! 4097 + wait_event ns2 MPTCP_LIB_EVENT_ESTABLISHED 1 4098 + userspace_pm_add_sf $ns2 10.0.3.2 20 4099 + chk_mptcp_info subflows 1 subflows 1 4100 + chk_subflows_total 2 2 4101 + 4102 + # force quick loss 4103 + ip netns exec $ns2 sysctl -q net.ipv4.tcp_syn_retries=1 4104 + if ip netns exec "${ns1}" ${iptables} -A INPUT -s "10.0.1.2" \ 4105 + -p tcp --tcp-option 30 -j REJECT --reject-with tcp-reset && 4106 + ip netns exec "${ns2}" ${iptables} -A INPUT -d "10.0.1.2" \ 4107 + -p tcp --tcp-option 30 -j REJECT --reject-with tcp-reset; then 4108 + wait_event ns2 MPTCP_LIB_EVENT_SUB_CLOSED 1 4109 + wait_event ns1 MPTCP_LIB_EVENT_SUB_CLOSED 1 4110 + chk_subflows_total 1 1 4111 + userspace_pm_add_sf $ns2 10.0.1.2 0 4112 + wait_event ns2 MPTCP_LIB_EVENT_SUB_CLOSED 2 4113 + chk_evt_nr ns2 MPTCP_LIB_EVENT_SUB_CLOSED 2 error 2 4114 + fi 4122 4115 kill_events_pids 4123 4116 mptcp_lib_kill_group_wait $tests_pid 4124 4117 fi