Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Merge in late fixes to prepare for the 6.16 net-next PR.

No conflicts nor adjacent changes.

Signed-off-by: Paolo Abeni <pabeni@redhat.com>

Paolo Abeni f6bd8fae acea6b13

+210 -61
+9 -1
drivers/net/ethernet/airoha/airoha_eth.c
··· 2883 2883 if (err) 2884 2884 return err; 2885 2885 2886 - return register_netdev(dev); 2886 + err = register_netdev(dev); 2887 + if (err) 2888 + goto free_metadata_dst; 2889 + 2890 + return 0; 2891 + 2892 + free_metadata_dst: 2893 + airoha_metadata_dst_free(port); 2894 + return err; 2887 2895 } 2888 2896 2889 2897 static int airoha_probe(struct platform_device *pdev)
+5 -1
drivers/net/ethernet/cadence/macb_main.c
··· 5283 5283 5284 5284 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 5285 5285 if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) { 5286 - dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44)); 5286 + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44)); 5287 + if (err) { 5288 + dev_err(&pdev->dev, "failed to set DMA mask\n"); 5289 + goto err_out_free_netdev; 5290 + } 5287 5291 bp->hw_dma_cap |= HW_DMA_CAP_64B; 5288 5292 } 5289 5293 #endif
+2
drivers/net/ethernet/marvell/octeontx2/af/mcs_rvu_if.c
··· 143 143 144 144 otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pf); 145 145 146 + otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pf); 147 + 146 148 mutex_unlock(&rvu->mbox_lock); 147 149 148 150 return 0;
+2
drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
··· 272 272 273 273 otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pfid); 274 274 275 + otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pfid); 276 + 275 277 mutex_unlock(&rvu->mbox_lock); 276 278 } while (pfmap); 277 279 }
+2
drivers/net/ethernet/marvell/octeontx2/af/rvu_rep.c
··· 60 60 61 61 otx2_mbox_msg_send_up(&rvu->afpf_wq_info.mbox_up, pf); 62 62 63 + otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pf); 64 + 63 65 mutex_unlock(&rvu->mbox_lock); 64 66 return 0; 65 67 }
+1 -3
drivers/net/ethernet/marvell/octeontx2/nic/qos.c
··· 1638 1638 if (!node->is_static) 1639 1639 dwrr_del_node = true; 1640 1640 1641 + WRITE_ONCE(node->qid, OTX2_QOS_QID_INNER); 1641 1642 /* destroy the leaf node */ 1642 1643 otx2_qos_disable_sq(pfvf, qid); 1643 1644 otx2_qos_destroy_node(pfvf, node); ··· 1682 1681 return err; 1683 1682 } 1684 1683 kfree(new_cfg); 1685 - 1686 - /* update tx_real_queues */ 1687 - otx2_qos_update_tx_netdev_queues(pfvf); 1688 1684 1689 1685 return 0; 1690 1686 }
+22
drivers/net/ethernet/marvell/octeontx2/nic/qos_sq.c
··· 256 256 return err; 257 257 } 258 258 259 + static int otx2_qos_nix_npa_ndc_sync(struct otx2_nic *pfvf) 260 + { 261 + struct ndc_sync_op *req; 262 + int rc; 263 + 264 + mutex_lock(&pfvf->mbox.lock); 265 + 266 + req = otx2_mbox_alloc_msg_ndc_sync_op(&pfvf->mbox); 267 + if (!req) { 268 + mutex_unlock(&pfvf->mbox.lock); 269 + return -ENOMEM; 270 + } 271 + 272 + req->nix_lf_tx_sync = true; 273 + req->npa_lf_sync = true; 274 + rc = otx2_sync_mbox_msg(&pfvf->mbox); 275 + mutex_unlock(&pfvf->mbox.lock); 276 + return rc; 277 + } 278 + 259 279 void otx2_qos_disable_sq(struct otx2_nic *pfvf, int qidx) 260 280 { 261 281 struct otx2_qset *qset = &pfvf->qset; ··· 305 285 306 286 otx2_qos_sqb_flush(pfvf, sq_idx); 307 287 otx2_smq_flush(pfvf, otx2_get_smq_idx(pfvf, sq_idx)); 288 + /* NIX/NPA NDC sync */ 289 + otx2_qos_nix_npa_ndc_sync(pfvf); 308 290 otx2_cleanup_tx_cqes(pfvf, cq); 309 291 310 292 mutex_lock(&pfvf->mbox.lock);
+12 -6
drivers/net/ethernet/mellanox/mlx5/core/vport.c
··· 465 465 { 466 466 u32 *out; 467 467 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out); 468 + int err; 468 469 469 470 out = kvzalloc(outlen, GFP_KERNEL); 470 471 if (!out) 471 472 return -ENOMEM; 472 473 473 - mlx5_query_nic_vport_context(mdev, 0, out); 474 + err = mlx5_query_nic_vport_context(mdev, 0, out); 475 + if (err) 476 + goto out; 474 477 475 478 *node_guid = MLX5_GET64(query_nic_vport_context_out, out, 476 479 nic_vport_context.node_guid); 477 - 480 + out: 478 481 kvfree(out); 479 482 480 - return 0; 483 + return err; 481 484 } 482 485 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_node_guid); 483 486 ··· 522 519 { 523 520 u32 *out; 524 521 int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out); 522 + int err; 525 523 526 524 out = kvzalloc(outlen, GFP_KERNEL); 527 525 if (!out) 528 526 return -ENOMEM; 529 527 530 - mlx5_query_nic_vport_context(mdev, 0, out); 528 + err = mlx5_query_nic_vport_context(mdev, 0, out); 529 + if (err) 530 + goto out; 531 531 532 532 *qkey_viol_cntr = MLX5_GET(query_nic_vport_context_out, out, 533 533 nic_vport_context.qkey_violation_counter); 534 - 534 + out: 535 535 kvfree(out); 536 536 537 - return 0; 537 + return err; 538 538 } 539 539 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_qkey_viol_cntr); 540 540
+5 -10
drivers/net/ethernet/microchip/lan743x_main.c
··· 1330 1330 } 1331 1331 1332 1332 /* PHY */ 1333 - static int lan743x_phy_reset(struct lan743x_adapter *adapter) 1333 + static int lan743x_hw_reset_phy(struct lan743x_adapter *adapter) 1334 1334 { 1335 1335 u32 data; 1336 1336 ··· 1344 1344 (!(data & PMT_CTL_ETH_PHY_RST_) && 1345 1345 (data & PMT_CTL_READY_)), 1346 1346 50000, 1000000); 1347 - } 1348 - 1349 - static int lan743x_phy_init(struct lan743x_adapter *adapter) 1350 - { 1351 - return lan743x_phy_reset(adapter); 1352 1347 } 1353 1348 1354 1349 static void lan743x_phy_interface_select(struct lan743x_adapter *adapter) ··· 3529 3534 if (ret) 3530 3535 return ret; 3531 3536 3532 - ret = lan743x_phy_init(adapter); 3533 - if (ret) 3534 - return ret; 3535 - 3536 3537 ret = lan743x_ptp_init(adapter); 3537 3538 if (ret) 3538 3539 return ret; ··· 3662 3671 goto return_error; 3663 3672 3664 3673 ret = lan743x_csr_init(adapter); 3674 + if (ret) 3675 + goto cleanup_pci; 3676 + 3677 + ret = lan743x_hw_reset_phy(adapter); 3665 3678 if (ret) 3666 3679 goto cleanup_pci; 3667 3680
+6
drivers/net/ethernet/microchip/lan966x/lan966x_main.c
··· 353 353 lan966x_ifh_set(ifh, rew_op, IFH_POS_REW_CMD, IFH_WID_REW_CMD); 354 354 } 355 355 356 + static void lan966x_ifh_set_oam_type(void *ifh, u64 oam_type) 357 + { 358 + lan966x_ifh_set(ifh, oam_type, IFH_POS_PDU_TYPE, IFH_WID_PDU_TYPE); 359 + } 360 + 356 361 static void lan966x_ifh_set_timestamp(void *ifh, u64 timestamp) 357 362 { 358 363 lan966x_ifh_set(ifh, timestamp, IFH_POS_TIMESTAMP, IFH_WID_TIMESTAMP); ··· 385 380 return err; 386 381 387 382 lan966x_ifh_set_rew_op(ifh, LAN966X_SKB_CB(skb)->rew_op); 383 + lan966x_ifh_set_oam_type(ifh, LAN966X_SKB_CB(skb)->pdu_type); 388 384 lan966x_ifh_set_timestamp(ifh, LAN966X_SKB_CB(skb)->ts_id); 389 385 } 390 386
+5
drivers/net/ethernet/microchip/lan966x/lan966x_main.h
··· 75 75 #define IFH_REW_OP_ONE_STEP_PTP 0x3 76 76 #define IFH_REW_OP_TWO_STEP_PTP 0x4 77 77 78 + #define IFH_PDU_TYPE_NONE 0 79 + #define IFH_PDU_TYPE_IPV4 7 80 + #define IFH_PDU_TYPE_IPV6 8 81 + 78 82 #define FDMA_RX_DCB_MAX_DBS 1 79 83 #define FDMA_TX_DCB_MAX_DBS 1 80 84 ··· 258 254 259 255 struct lan966x_skb_cb { 260 256 u8 rew_op; 257 + u8 pdu_type; 261 258 u16 ts_id; 262 259 unsigned long jiffies; 263 260 };
+36 -13
drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
··· 322 322 *cfg = phc->hwtstamp_config; 323 323 } 324 324 325 - static int lan966x_ptp_classify(struct lan966x_port *port, struct sk_buff *skb) 325 + static void lan966x_ptp_classify(struct lan966x_port *port, struct sk_buff *skb, 326 + u8 *rew_op, u8 *pdu_type) 326 327 { 327 328 struct ptp_header *header; 328 329 u8 msgtype; 329 330 int type; 330 331 331 - if (port->ptp_tx_cmd == IFH_REW_OP_NOOP) 332 - return IFH_REW_OP_NOOP; 332 + if (port->ptp_tx_cmd == IFH_REW_OP_NOOP) { 333 + *rew_op = IFH_REW_OP_NOOP; 334 + *pdu_type = IFH_PDU_TYPE_NONE; 335 + return; 336 + } 333 337 334 338 type = ptp_classify_raw(skb); 335 - if (type == PTP_CLASS_NONE) 336 - return IFH_REW_OP_NOOP; 339 + if (type == PTP_CLASS_NONE) { 340 + *rew_op = IFH_REW_OP_NOOP; 341 + *pdu_type = IFH_PDU_TYPE_NONE; 342 + return; 343 + } 337 344 338 345 header = ptp_parse_header(skb, type); 339 - if (!header) 340 - return IFH_REW_OP_NOOP; 346 + if (!header) { 347 + *rew_op = IFH_REW_OP_NOOP; 348 + *pdu_type = IFH_PDU_TYPE_NONE; 349 + return; 350 + } 341 351 342 - if (port->ptp_tx_cmd == IFH_REW_OP_TWO_STEP_PTP) 343 - return IFH_REW_OP_TWO_STEP_PTP; 352 + if (type & PTP_CLASS_L2) 353 + *pdu_type = IFH_PDU_TYPE_NONE; 354 + if (type & PTP_CLASS_IPV4) 355 + *pdu_type = IFH_PDU_TYPE_IPV4; 356 + if (type & PTP_CLASS_IPV6) 357 + *pdu_type = IFH_PDU_TYPE_IPV6; 358 + 359 + if (port->ptp_tx_cmd == IFH_REW_OP_TWO_STEP_PTP) { 360 + *rew_op = IFH_REW_OP_TWO_STEP_PTP; 361 + return; 362 + } 344 363 345 364 /* If it is sync and run 1 step then set the correct operation, 346 365 * otherwise run as 2 step 347 366 */ 348 367 msgtype = ptp_get_msgtype(header, type); 349 - if ((msgtype & 0xf) == 0) 350 - return IFH_REW_OP_ONE_STEP_PTP; 368 + if ((msgtype & 0xf) == 0) { 369 + *rew_op = IFH_REW_OP_ONE_STEP_PTP; 370 + return; 371 + } 351 372 352 - return IFH_REW_OP_TWO_STEP_PTP; 373 + *rew_op = IFH_REW_OP_TWO_STEP_PTP; 353 374 } 354 375 355 376 static void lan966x_ptp_txtstamp_old_release(struct lan966x_port *port) ··· 395 374 { 396 375 struct lan966x *lan966x = port->lan966x; 397 376 unsigned long flags; 377 + u8 pdu_type; 398 378 u8 rew_op; 399 379 400 - rew_op = lan966x_ptp_classify(port, skb); 380 + lan966x_ptp_classify(port, skb, &rew_op, &pdu_type); 401 381 LAN966X_SKB_CB(skb)->rew_op = rew_op; 382 + LAN966X_SKB_CB(skb)->pdu_type = pdu_type; 402 383 403 384 if (rew_op != IFH_REW_OP_TWO_STEP_PTP) 404 385 return 0;
+3 -3
drivers/net/ethernet/xilinx/xilinx_axienet_main.c
··· 880 880 dev_consume_skb_any(skbuf_dma->skb); 881 881 netif_txq_completed_wake(txq, 1, len, 882 882 CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX), 883 - 2 * MAX_SKB_FRAGS); 883 + 2); 884 884 } 885 885 886 886 /** ··· 914 914 915 915 dma_dev = lp->tx_chan->device; 916 916 sg_len = skb_shinfo(skb)->nr_frags + 1; 917 - if (CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX) <= sg_len) { 917 + if (CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX) <= 1) { 918 918 netif_stop_queue(ndev); 919 919 if (net_ratelimit()) 920 920 netdev_warn(ndev, "TX ring unexpectedly full\n"); ··· 964 964 txq = skb_get_tx_queue(lp->ndev, skb); 965 965 netdev_tx_sent_queue(txq, skb->len); 966 966 netif_txq_maybe_stop(txq, CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX), 967 - MAX_SKB_FRAGS + 1, 2 * MAX_SKB_FRAGS); 967 + 1, 2); 968 968 969 969 dmaengine_submit(dma_tx_desc); 970 970 dma_async_issue_pending(lp->tx_chan);
+2
drivers/net/mctp/mctp-usb.c
··· 257 257 258 258 WRITE_ONCE(mctp_usb->stopped, false); 259 259 260 + netif_start_queue(dev); 261 + 260 262 return mctp_usb_rx_queue(mctp_usb, GFP_KERNEL); 261 263 } 262 264
+14 -6
drivers/net/phy/mscc/mscc_ptp.c
··· 946 946 /* UDP checksum offset in IPv4 packet 947 947 * according to: https://tools.ietf.org/html/rfc768 948 948 */ 949 - val |= IP1_NXT_PROT_UDP_CHKSUM_OFF(26) | IP1_NXT_PROT_UDP_CHKSUM_CLEAR; 949 + val |= IP1_NXT_PROT_UDP_CHKSUM_OFF(26); 950 + if (enable) 951 + val |= IP1_NXT_PROT_UDP_CHKSUM_CLEAR; 950 952 vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_UDP_CHKSUM, 951 953 val); 952 954 ··· 1168 1166 container_of(mii_ts, struct vsc8531_private, mii_ts); 1169 1167 1170 1168 if (!vsc8531->ptp->configured) 1171 - return; 1169 + goto out; 1172 1170 1173 - if (vsc8531->ptp->tx_type == HWTSTAMP_TX_OFF) { 1174 - kfree_skb(skb); 1175 - return; 1176 - } 1171 + if (vsc8531->ptp->tx_type == HWTSTAMP_TX_OFF) 1172 + goto out; 1173 + 1174 + if (vsc8531->ptp->tx_type == HWTSTAMP_TX_ONESTEP_SYNC) 1175 + if (ptp_msg_is_sync(skb, type)) 1176 + goto out; 1177 1177 1178 1178 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; 1179 1179 1180 1180 mutex_lock(&vsc8531->ts_lock); 1181 1181 __skb_queue_tail(&vsc8531->ptp->tx_queue, skb); 1182 1182 mutex_unlock(&vsc8531->ts_lock); 1183 + return; 1184 + 1185 + out: 1186 + kfree_skb(skb); 1183 1187 } 1184 1188 1185 1189 static bool vsc85xx_rxtstamp(struct mii_timestamper *mii_ts,
+3 -1
drivers/net/phy/phy_device.c
··· 1749 1749 struct module *ndev_owner = NULL; 1750 1750 struct mii_bus *bus; 1751 1751 1752 - if (phydev->devlink) 1752 + if (phydev->devlink) { 1753 1753 device_link_del(phydev->devlink); 1754 + phydev->devlink = NULL; 1755 + } 1754 1756 1755 1757 if (phydev->sysfs_links) { 1756 1758 if (dev)
+1
include/linux/virtio_vsock.h
··· 140 140 u32 last_fwd_cnt; 141 141 u32 rx_bytes; 142 142 u32 buf_alloc; 143 + u32 buf_used; 143 144 struct sk_buff_head rx_queue; 144 145 u32 msg_count; 145 146 };
+3
net/netlabel/netlabel_kapi.c
··· 1165 1165 break; 1166 1166 #if IS_ENABLED(CONFIG_IPV6) 1167 1167 case AF_INET6: 1168 + if (sk->sk_family != AF_INET6) 1169 + return -EAFNOSUPPORT; 1170 + 1168 1171 addr6 = (struct sockaddr_in6 *)addr; 1169 1172 entry = netlbl_domhsh_getentry_af6(secattr->domain, 1170 1173 &addr6->sin6_addr);
+1 -1
net/openvswitch/flow.c
··· 788 788 memset(&key->ipv4, 0, sizeof(key->ipv4)); 789 789 } 790 790 } else if (eth_p_mpls(key->eth.type)) { 791 - u8 label_count = 1; 791 + size_t label_count = 1; 792 792 793 793 memset(&key->mpls, 0, sizeof(key->mpls)); 794 794 skb_set_inner_network_header(skb, skb->mac_len);
+16 -5
net/packet/af_packet.c
··· 3713 3713 } 3714 3714 3715 3715 static void packet_dev_mclist_delete(struct net_device *dev, 3716 - struct packet_mclist **mlp) 3716 + struct packet_mclist **mlp, 3717 + struct list_head *list) 3717 3718 { 3718 3719 struct packet_mclist *ml; 3719 3720 3720 3721 while ((ml = *mlp) != NULL) { 3721 3722 if (ml->ifindex == dev->ifindex) { 3722 - packet_dev_mc(dev, ml, -1); 3723 + list_add(&ml->remove_list, list); 3723 3724 *mlp = ml->next; 3724 - kfree(ml); 3725 3725 } else 3726 3726 mlp = &ml->next; 3727 3727 } ··· 3769 3769 memcpy(i->addr, mreq->mr_address, i->alen); 3770 3770 memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen); 3771 3771 i->count = 1; 3772 + INIT_LIST_HEAD(&i->remove_list); 3772 3773 i->next = po->mclist; 3773 3774 po->mclist = i; 3774 3775 err = packet_dev_mc(dev, i, 1); ··· 4234 4233 static int packet_notifier(struct notifier_block *this, 4235 4234 unsigned long msg, void *ptr) 4236 4235 { 4237 - struct sock *sk; 4238 4236 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 4239 4237 struct net *net = dev_net(dev); 4238 + struct packet_mclist *ml, *tmp; 4239 + LIST_HEAD(mclist); 4240 + struct sock *sk; 4240 4241 4241 4242 rcu_read_lock(); 4242 4243 sk_for_each_rcu(sk, &net->packet.sklist) { ··· 4247 4244 switch (msg) { 4248 4245 case NETDEV_UNREGISTER: 4249 4246 if (po->mclist) 4250 - packet_dev_mclist_delete(dev, &po->mclist); 4247 + packet_dev_mclist_delete(dev, &po->mclist, 4248 + &mclist); 4251 4249 fallthrough; 4252 4250 4253 4251 case NETDEV_DOWN: ··· 4281 4277 } 4282 4278 } 4283 4279 rcu_read_unlock(); 4280 + 4281 + /* packet_dev_mc might grab instance locks so can't run under rcu */ 4282 + list_for_each_entry_safe(ml, tmp, &mclist, remove_list) { 4283 + packet_dev_mc(dev, ml, -1); 4284 + kfree(ml); 4285 + } 4286 + 4284 4287 return NOTIFY_DONE; 4285 4288 } 4286 4289
+1
net/packet/internal.h
··· 11 11 unsigned short type; 12 12 unsigned short alen; 13 13 unsigned char addr[MAX_ADDR_LEN]; 14 + struct list_head remove_list; 14 15 }; 15 16 16 17 /* kbdq - kernel block descriptor queue */
+8 -1
net/sched/sch_hfsc.c
··· 175 175 176 176 #define HT_INFINITY 0xffffffffffffffffULL /* infinite time value */ 177 177 178 + static bool cl_in_el_or_vttree(struct hfsc_class *cl) 179 + { 180 + return ((cl->cl_flags & HFSC_FSC) && cl->cl_nactive) || 181 + ((cl->cl_flags & HFSC_RSC) && !RB_EMPTY_NODE(&cl->el_node)); 182 + } 178 183 179 184 /* 180 185 * eligible tree holds backlogged classes being sorted by their eligible times. ··· 1045 1040 if (cl == NULL) 1046 1041 return -ENOBUFS; 1047 1042 1043 + RB_CLEAR_NODE(&cl->el_node); 1044 + 1048 1045 err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack); 1049 1046 if (err) { 1050 1047 kfree(cl); ··· 1579 1572 sch->qstats.backlog += len; 1580 1573 sch->q.qlen++; 1581 1574 1582 - if (first && !cl->cl_nactive) { 1575 + if (first && !cl_in_el_or_vttree(cl)) { 1583 1576 if (cl->cl_flags & HFSC_RSC) 1584 1577 init_ed(cl, len); 1585 1578 if (cl->cl_flags & HFSC_FSC)
+16 -10
net/vmw_vsock/virtio_transport_common.c
··· 440 440 static bool virtio_transport_inc_rx_pkt(struct virtio_vsock_sock *vvs, 441 441 u32 len) 442 442 { 443 - if (vvs->rx_bytes + len > vvs->buf_alloc) 443 + if (vvs->buf_used + len > vvs->buf_alloc) 444 444 return false; 445 445 446 446 vvs->rx_bytes += len; 447 + vvs->buf_used += len; 447 448 return true; 448 449 } 449 450 450 451 static void virtio_transport_dec_rx_pkt(struct virtio_vsock_sock *vvs, 451 - u32 len) 452 + u32 bytes_read, u32 bytes_dequeued) 452 453 { 453 - vvs->rx_bytes -= len; 454 - vvs->fwd_cnt += len; 454 + vvs->rx_bytes -= bytes_read; 455 + vvs->buf_used -= bytes_dequeued; 456 + vvs->fwd_cnt += bytes_dequeued; 455 457 } 456 458 457 459 void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct sk_buff *skb) ··· 582 580 size_t len) 583 581 { 584 582 struct virtio_vsock_sock *vvs = vsk->trans; 585 - size_t bytes, total = 0; 586 583 struct sk_buff *skb; 587 584 u32 fwd_cnt_delta; 588 585 bool low_rx_bytes; 589 586 int err = -EFAULT; 587 + size_t total = 0; 590 588 u32 free_space; 591 589 592 590 spin_lock_bh(&vvs->rx_lock); ··· 598 596 } 599 597 600 598 while (total < len && !skb_queue_empty(&vvs->rx_queue)) { 599 + size_t bytes, dequeued = 0; 600 + 601 601 skb = skb_peek(&vvs->rx_queue); 602 602 603 603 bytes = min_t(size_t, len - total, ··· 623 619 VIRTIO_VSOCK_SKB_CB(skb)->offset += bytes; 624 620 625 621 if (skb->len == VIRTIO_VSOCK_SKB_CB(skb)->offset) { 626 - u32 pkt_len = le32_to_cpu(virtio_vsock_hdr(skb)->len); 627 - 628 - virtio_transport_dec_rx_pkt(vvs, pkt_len); 622 + dequeued = le32_to_cpu(virtio_vsock_hdr(skb)->len); 629 623 __skb_unlink(skb, &vvs->rx_queue); 630 624 consume_skb(skb); 631 625 } 626 + 627 + virtio_transport_dec_rx_pkt(vvs, bytes, dequeued); 632 628 } 633 629 634 630 fwd_cnt_delta = vvs->fwd_cnt - vvs->last_fwd_cnt; ··· 784 780 msg->msg_flags |= MSG_EOR; 785 781 } 786 782 787 - virtio_transport_dec_rx_pkt(vvs, pkt_len); 783 + virtio_transport_dec_rx_pkt(vvs, pkt_len, pkt_len); 788 784 kfree_skb(skb); 789 785 } 790 786 ··· 1721 1717 struct sock *sk = sk_vsock(vsk); 1722 1718 struct virtio_vsock_hdr *hdr; 1723 1719 struct sk_buff *skb; 1720 + u32 pkt_len; 1724 1721 int off = 0; 1725 1722 int err; 1726 1723 ··· 1739 1734 if (le32_to_cpu(hdr->flags) & VIRTIO_VSOCK_SEQ_EOM) 1740 1735 vvs->msg_count--; 1741 1736 1742 - virtio_transport_dec_rx_pkt(vvs, le32_to_cpu(hdr->len)); 1737 + pkt_len = le32_to_cpu(hdr->len); 1738 + virtio_transport_dec_rx_pkt(vvs, pkt_len, pkt_len); 1743 1739 spin_unlock_bh(&vvs->rx_lock); 1744 1740 1745 1741 virtio_transport_send_credit_update(vsk);
+35
tools/testing/selftests/tc-testing/tc-tests/infra/qdiscs.json
··· 600 600 "matchPattern": "qdisc hfsc", 601 601 "matchCount": "1", 602 602 "teardown": ["$TC qdisc del dev $DEV1 root handle 1: drr"] 603 + }, 604 + { 605 + "id": "309e", 606 + "name": "Test HFSC eltree double add with reentrant enqueue behaviour on netem", 607 + "category": [ 608 + "qdisc", 609 + "hfsc" 610 + ], 611 + "plugins": { 612 + "requires": "nsPlugin" 613 + }, 614 + "setup": [ 615 + "$IP link set dev $DUMMY up || true", 616 + "$IP addr add 10.10.11.10/24 dev $DUMMY || true", 617 + "$TC qdisc add dev $DUMMY root handle 1: tbf rate 8bit burst 100b latency 1s", 618 + "$TC qdisc add dev $DUMMY parent 1:0 handle 2:0 hfsc", 619 + "ping -I $DUMMY -f -c10 -s48 -W0.001 10.10.11.1 || true", 620 + "$TC class add dev $DUMMY parent 2:0 classid 2:1 hfsc rt m2 20Kbit", 621 + "$TC qdisc add dev $DUMMY parent 2:1 handle 3:0 netem duplicate 100%", 622 + "$TC class add dev $DUMMY parent 2:0 classid 2:2 hfsc rt m2 20Kbit", 623 + "$TC filter add dev $DUMMY parent 2:0 protocol ip prio 1 u32 match ip dst 10.10.11.2/32 flowid 2:1", 624 + "$TC filter add dev $DUMMY parent 2:0 protocol ip prio 2 u32 match ip dst 10.10.11.3/32 flowid 2:2", 625 + "ping -c 1 10.10.11.2 -I$DUMMY > /dev/null || true", 626 + "$TC filter del dev $DUMMY parent 2:0 protocol ip prio 1", 627 + "$TC class del dev $DUMMY classid 2:1", 628 + "ping -c 1 10.10.11.3 -I$DUMMY > /dev/null || true" 629 + ], 630 + "cmdUnderTest": "$TC class change dev $DUMMY parent 2:0 classid 2:2 hfsc sc m2 20Kbit", 631 + "expExitCode": "0", 632 + "verifyCmd": "$TC -j class ls dev $DUMMY classid 2:1", 633 + "matchJSON": [], 634 + "teardown": [ 635 + "$TC qdisc del dev $DUMMY handle 1:0 root", 636 + "$IP addr del 10.10.10.10/24 dev $DUMMY || true" 637 + ] 603 638 } 604 639 ]