Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Merge in late fixes in preparation for the net-next PR.

Conflicts:

include/net/sch_generic.h
a6bd339dbb351 ("net_sched: fix skb memory leak in deferred qdisc drops")
ff2998f29f390 ("net: sched: introduce qdisc-specific drop reason tracing")
https://lore.kernel.org/adz0iX85FHMz0HdO@sirena.org.uk

drivers/net/ethernet/airoha/airoha_eth.c
1acdfbdb516b ("net: airoha: Fix VIP configuration for AN7583 SoC")
bf3471e6e6c0 ("net: airoha: Make flow control source port mapping dependent on nbq parameter")

Adjacent changes:

drivers/net/ethernet/airoha/airoha_ppe.c
f44218cd5e6a ("net: airoha: Reset PPE cpu port configuration in airoha_ppe_hw_init()")
7da62262ec96 ("inet: add ip_local_port_step_width sysctl to improve port usage distribution")

Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+1204 -250
+10
CREDITS
··· 3592 3592 E: wsalamon@nai.com 3593 3593 D: portions of the Linux Security Module (LSM) framework and security modules 3594 3594 3595 + N: Salil Mehta 3596 + E: salil.mehta@opnsrc.net 3597 + D: Co-authored Huawei/HiSilicon Kunpeng 920 SoC HNS3 PF and VF 100G 3598 + D: Ethernet driver 3599 + D: Co-authored Huawei/HiSilicon Kunpeng 916 SoC HNS 10G Ethernet 3600 + D: driver enhancements 3601 + D: Maintained Huawei/HiSilicon HNS and HNS3 10G/100G Ethernet drivers 3602 + D: for Kunpeng 916 family, 920 family of SoCs 3603 + S: Cambridge, Cambridgeshire, United Kingdom 3604 + 3595 3605 N: Robert Sanders 3596 3606 E: gt8134b@prism.gatech.edu 3597 3607 D: Dosemu
-2
MAINTAINERS
··· 11534 11534 11535 11535 HISILICON NETWORK SUBSYSTEM 3 DRIVER (HNS3) 11536 11536 M: Jian Shen <shenjian15@huawei.com> 11537 - M: Salil Mehta <salil.mehta@huawei.com> 11538 11537 M: Jijie Shao <shaojijie@huawei.com> 11539 11538 L: netdev@vger.kernel.org 11540 11539 S: Maintained ··· 11548 11549 11549 11550 HISILICON NETWORK SUBSYSTEM DRIVER 11550 11551 M: Jian Shen <shenjian15@huawei.com> 11551 - M: Salil Mehta <salil.mehta@huawei.com> 11552 11552 L: netdev@vger.kernel.org 11553 11553 S: Maintained 11554 11554 W: http://www.hisilicon.com
+1 -1
drivers/net/can/usb/ucan.c
··· 1379 1379 */ 1380 1380 1381 1381 /* Prepare Memory for control transfers */ 1382 - ctl_msg_buffer = devm_kzalloc(&udev->dev, 1382 + ctl_msg_buffer = devm_kzalloc(&intf->dev, 1383 1383 sizeof(union ucan_ctl_payload), 1384 1384 GFP_KERNEL); 1385 1385 if (!ctl_msg_buffer) {
+70 -27
drivers/net/ethernet/airoha/airoha_eth.c
··· 107 107 struct airoha_eth *eth = port->qdma->eth; 108 108 u32 vip_port; 109 109 110 - switch (port->id) { 111 - case AIROHA_GDM3_IDX: 112 - /* FIXME: handle XSI_PCIE1_PORT */ 113 - vip_port = XSI_PCIE0_VIP_PORT_MASK; 114 - break; 115 - case AIROHA_GDM4_IDX: 116 - /* FIXME: handle XSI_USB_PORT */ 117 - vip_port = XSI_ETH_VIP_PORT_MASK; 118 - break; 119 - default: 120 - return 0; 121 - } 122 - 110 + vip_port = eth->soc->ops.get_vip_port(port, port->nbq); 123 111 if (enable) { 124 112 airoha_fe_set(eth, REG_FE_VIP_PORT_EN, vip_port); 125 113 airoha_fe_set(eth, REG_FE_IFC_PORT_EN, vip_port); ··· 281 293 [FE_PSE_PORT_GDM4] = 2, 282 294 [FE_PSE_PORT_CDM5] = 2, 283 295 }; 284 - u32 all_rsv; 285 296 int q; 286 297 287 - all_rsv = airoha_fe_get_pse_all_rsv(eth); 288 298 if (airoha_ppe_is_enabled(eth, 1)) { 299 + u32 all_rsv; 300 + 289 301 /* hw misses PPE2 oq rsv */ 302 + all_rsv = airoha_fe_get_pse_all_rsv(eth); 290 303 all_rsv += PSE_RSV_PAGES * 291 304 pse_port_num_queues[FE_PSE_PORT_PPE2]; 305 + airoha_fe_rmw(eth, REG_FE_PSE_BUF_SET, PSE_ALLRSV_MASK, 306 + FIELD_PREP(PSE_ALLRSV_MASK, all_rsv)); 292 307 } 293 - airoha_fe_set(eth, REG_FE_PSE_BUF_SET, all_rsv); 294 308 295 309 /* CMD1 */ 296 310 for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM1]; q++) ··· 574 584 static int airoha_qdma_get_gdm_port(struct airoha_eth *eth, 575 585 struct airoha_qdma_desc *desc) 576 586 { 577 - u32 port, sport, msg1 = le32_to_cpu(desc->msg1); 587 + u32 port, sport, msg1 = le32_to_cpu(READ_ONCE(desc->msg1)); 578 588 579 589 sport = FIELD_GET(QDMA_ETH_RXMSG_SPORT_MASK, msg1); 580 590 switch (sport) { ··· 602 612 while (done < budget) { 603 613 struct airoha_queue_entry *e = &q->entry[q->tail]; 604 614 struct airoha_qdma_desc *desc = &q->desc[q->tail]; 605 - u32 hash, reason, msg1 = le32_to_cpu(desc->msg1); 606 - struct page *page = virt_to_head_page(e->buf); 607 - u32 desc_ctrl = le32_to_cpu(desc->ctrl); 615 + u32 hash, reason, msg1, desc_ctrl; 608 616 struct airoha_gdm_port *port; 609 617 int data_len, len, p; 618 + struct page *page; 610 619 620 + desc_ctrl = le32_to_cpu(READ_ONCE(desc->ctrl)); 611 621 if (!(desc_ctrl & QDMA_DESC_DONE_MASK)) 612 622 break; 623 + 624 + dma_rmb(); 613 625 614 626 q->tail = (q->tail + 1) % q->ndesc; 615 627 q->queued--; ··· 619 627 dma_sync_single_for_cpu(eth->dev, e->dma_addr, 620 628 SKB_WITH_OVERHEAD(q->buf_size), dir); 621 629 630 + page = virt_to_head_page(e->buf); 622 631 len = FIELD_GET(QDMA_DESC_LEN_MASK, desc_ctrl); 623 632 data_len = q->skb ? q->buf_size 624 633 : SKB_WITH_OVERHEAD(q->buf_size); ··· 663 670 * DMA descriptor. Report DSA tag to the DSA stack 664 671 * via skb dst info. 665 672 */ 666 - u32 sptag = FIELD_GET(QDMA_ETH_RXMSG_SPTAG, 667 - le32_to_cpu(desc->msg0)); 673 + u32 msg0 = le32_to_cpu(READ_ONCE(desc->msg0)); 674 + u32 sptag = FIELD_GET(QDMA_ETH_RXMSG_SPTAG, msg0); 668 675 669 676 if (sptag < ARRAY_SIZE(port->dsa_meta) && 670 677 port->dsa_meta[sptag]) ··· 672 679 &port->dsa_meta[sptag]->dst); 673 680 } 674 681 682 + msg1 = le32_to_cpu(READ_ONCE(desc->msg1)); 675 683 hash = FIELD_GET(AIROHA_RXD4_FOE_ENTRY, msg1); 676 684 if (hash != AIROHA_RXD4_FOE_ENTRY) 677 685 skb_set_hash(q->skb, jhash_1word(hash, 0), ··· 813 819 } 814 820 815 821 q->head = q->tail; 822 + /* Set RX_DMA_IDX to RX_CPU_IDX to notify the hw the QDMA RX ring is 823 + * empty. 824 + */ 825 + airoha_qdma_rmw(qdma, REG_RX_CPU_IDX(qid), RX_RING_CPU_IDX_MASK, 826 + FIELD_PREP(RX_RING_CPU_IDX_MASK, q->head)); 816 827 airoha_qdma_rmw(qdma, REG_RX_DMA_IDX(qid), RX_RING_DMA_IDX_MASK, 817 828 FIELD_PREP(RX_RING_DMA_IDX_MASK, q->tail)); 818 829 } ··· 1726 1727 static int airoha_set_gdm2_loopback(struct airoha_gdm_port *port) 1727 1728 { 1728 1729 struct airoha_eth *eth = port->qdma->eth; 1729 - u32 val, pse_port, chan, nbq; 1730 + u32 val, pse_port, chan; 1730 1731 int src_port; 1731 1732 1732 1733 /* Forward the traffic to the proper GDM port */ ··· 1756 1757 airoha_fe_clear(eth, REG_FE_VIP_PORT_EN, BIT(AIROHA_GDM2_IDX)); 1757 1758 airoha_fe_clear(eth, REG_FE_IFC_PORT_EN, BIT(AIROHA_GDM2_IDX)); 1758 1759 1759 - /* XXX: handle XSI_USB_PORT and XSI_PCE1_PORT */ 1760 - nbq = port->id == AIROHA_GDM3_IDX && airoha_is_7581(eth) ? 4 : 0; 1761 - src_port = eth->soc->ops.get_src_port_id(port, nbq); 1760 + src_port = eth->soc->ops.get_src_port_id(port, port->nbq); 1762 1761 if (src_port < 0) 1763 1762 return src_port; 1764 1763 ··· 1770 1773 __field_prep(SP_CPORT_MASK(val), FE_PSE_PORT_CDM2)); 1771 1774 1772 1775 if (port->id == AIROHA_GDM4_IDX && airoha_is_7581(eth)) { 1773 - u32 mask = FC_ID_OF_SRC_PORT_MASK(nbq); 1776 + u32 mask = FC_ID_OF_SRC_PORT_MASK(port->nbq); 1774 1777 1775 1778 airoha_fe_rmw(eth, REG_SRC_PORT_FC_MAP6, mask, 1776 1779 __field_prep(mask, AIROHA_GDM2_IDX)); ··· 2949 2952 port->eth = eth; 2950 2953 port->dev = dev; 2951 2954 port->id = id; 2955 + /* XXX: Read nbq from DTS */ 2956 + port->nbq = id == AIROHA_GDM3_IDX && airoha_is_7581(eth) ? 4 : 0; 2952 2957 eth->ports[p] = port; 2953 2958 2954 2959 return airoha_metadata_dst_alloc(port); ··· 3147 3148 return -EINVAL; 3148 3149 } 3149 3150 3151 + static u32 airoha_en7581_get_vip_port(struct airoha_gdm_port *port, int nbq) 3152 + { 3153 + switch (port->id) { 3154 + case AIROHA_GDM3_IDX: 3155 + if (nbq == 4) 3156 + return XSI_PCIE0_VIP_PORT_MASK; 3157 + if (nbq == 5) 3158 + return XSI_PCIE1_VIP_PORT_MASK; 3159 + break; 3160 + case AIROHA_GDM4_IDX: 3161 + if (!nbq) 3162 + return XSI_ETH_VIP_PORT_MASK; 3163 + if (nbq == 1) 3164 + return XSI_USB_VIP_PORT_MASK; 3165 + break; 3166 + default: 3167 + break; 3168 + } 3169 + 3170 + return 0; 3171 + } 3172 + 3150 3173 static const char * const an7583_xsi_rsts_names[] = { 3151 3174 "xsi-mac", 3152 3175 "hsi0-mac", ··· 3198 3177 return -EINVAL; 3199 3178 } 3200 3179 3180 + static u32 airoha_an7583_get_vip_port(struct airoha_gdm_port *port, int nbq) 3181 + { 3182 + switch (port->id) { 3183 + case AIROHA_GDM3_IDX: 3184 + if (!nbq) 3185 + return XSI_ETH_VIP_PORT_MASK; 3186 + break; 3187 + case AIROHA_GDM4_IDX: 3188 + if (!nbq) 3189 + return XSI_PCIE0_VIP_PORT_MASK; 3190 + if (nbq == 1) 3191 + return XSI_USB_VIP_PORT_MASK; 3192 + break; 3193 + default: 3194 + break; 3195 + } 3196 + 3197 + return 0; 3198 + } 3199 + 3201 3200 static const struct airoha_eth_soc_data en7581_soc_data = { 3202 3201 .version = 0x7581, 3203 3202 .xsi_rsts_names = en7581_xsi_rsts_names, ··· 3225 3184 .num_ppe = 2, 3226 3185 .ops = { 3227 3186 .get_src_port_id = airoha_en7581_get_src_port_id, 3187 + .get_vip_port = airoha_en7581_get_vip_port, 3228 3188 }, 3229 3189 }; 3230 3190 ··· 3236 3194 .num_ppe = 1, 3237 3195 .ops = { 3238 3196 .get_src_port_id = airoha_an7583_get_src_port_id, 3197 + .get_vip_port = airoha_an7583_get_vip_port, 3239 3198 }, 3240 3199 }; 3241 3200
+2
drivers/net/ethernet/airoha/airoha_eth.h
··· 537 537 struct airoha_eth *eth; 538 538 struct net_device *dev; 539 539 int id; 540 + int nbq; 540 541 541 542 struct airoha_hw_stats stats; 542 543 ··· 578 577 int num_ppe; 579 578 struct { 580 579 int (*get_src_port_id)(struct airoha_gdm_port *port, int nbq); 580 + u32 (*get_vip_port)(struct airoha_gdm_port *port, int nbq); 581 581 } ops; 582 582 }; 583 583
+11 -3
drivers/net/ethernet/airoha/airoha_ppe.c
··· 125 125 airoha_fe_rmw(eth, REG_PPE_BND_AGE0(i), 126 126 PPE_BIND_AGE0_DELTA_NON_L4 | 127 127 PPE_BIND_AGE0_DELTA_UDP, 128 - FIELD_PREP(PPE_BIND_AGE0_DELTA_NON_L4, 1) | 129 - FIELD_PREP(PPE_BIND_AGE0_DELTA_UDP, 12)); 128 + FIELD_PREP(PPE_BIND_AGE0_DELTA_NON_L4, 60) | 129 + FIELD_PREP(PPE_BIND_AGE0_DELTA_UDP, 60)); 130 130 airoha_fe_rmw(eth, REG_PPE_BND_AGE1(i), 131 131 PPE_BIND_AGE1_DELTA_TCP_FIN | 132 132 PPE_BIND_AGE1_DELTA_TCP, 133 133 FIELD_PREP(PPE_BIND_AGE1_DELTA_TCP_FIN, 1) | 134 - FIELD_PREP(PPE_BIND_AGE1_DELTA_TCP, 7)); 134 + FIELD_PREP(PPE_BIND_AGE1_DELTA_TCP, 60)); 135 135 136 136 airoha_fe_rmw(eth, REG_PPE_TB_HASH_CFG(i), 137 137 PPE_SRAM_TABLE_EN_MASK | ··· 159 159 FIELD_PREP(PPE_DRAM_TB_NUM_ENTRY_MASK, 160 160 dram_num_entries)); 161 161 162 + airoha_fe_rmw(eth, REG_PPE_BIND_RATE(i), 163 + PPE_BIND_RATE_L2B_BIND_MASK | 164 + PPE_BIND_RATE_BIND_MASK, 165 + FIELD_PREP(PPE_BIND_RATE_L2B_BIND_MASK, 0x1e) | 166 + FIELD_PREP(PPE_BIND_RATE_BIND_MASK, 0x1e)); 167 + 162 168 airoha_fe_wr(eth, REG_PPE_HASH_SEED(i), PPE_HASH_SEED); 169 + airoha_fe_clear(eth, REG_PPE_PPE_FLOW_CFG(i), 170 + PPE_FLOW_CFG_IP6_6RD_MASK); 163 171 164 172 for (p = 0; p < ARRAY_SIZE(eth->ports); p++) { 165 173 struct airoha_gdm_port *port = eth->ports[p];
+1
drivers/net/ethernet/broadcom/bnge/bnge_auxr.c
··· 194 194 dev_warn(bd->dev, "Failed to add auxiliary device for ROCE\n"); 195 195 auxiliary_device_uninit(aux_dev); 196 196 bd->flags &= ~BNGE_EN_ROCE; 197 + return; 197 198 } 198 199 199 200 bd->auxr_dev->net = bd->netdev;
+14 -16
drivers/net/ethernet/broadcom/genet/bcmgenet.c
··· 1819 1819 { 1820 1820 struct enet_cb *tx_cb_ptr; 1821 1821 1822 - tx_cb_ptr = ring->cbs; 1823 - tx_cb_ptr += ring->write_ptr - ring->cb_ptr; 1824 - 1825 1822 /* Rewinding local write pointer */ 1826 1823 if (ring->write_ptr == ring->cb_ptr) 1827 1824 ring->write_ptr = ring->end_ptr; 1828 1825 else 1829 1826 ring->write_ptr--; 1827 + 1828 + tx_cb_ptr = ring->cbs; 1829 + tx_cb_ptr += ring->write_ptr - ring->cb_ptr; 1830 1830 1831 1831 return tx_cb_ptr; 1832 1832 } ··· 1985 1985 drop = (ring->prod_index - ring->c_index) & DMA_C_INDEX_MASK; 1986 1986 released += drop; 1987 1987 ring->prod_index = ring->c_index & DMA_C_INDEX_MASK; 1988 + ring->free_bds += drop; 1988 1989 while (drop--) { 1989 1990 cb_ptr = bcmgenet_put_txcb(priv, ring); 1990 1991 skb = cb_ptr->skb; ··· 1997 1996 } 1998 1997 if (skb) 1999 1998 dev_consume_skb_any(skb); 1999 + netdev_tx_reset_queue(netdev_get_tx_queue(dev, ring->index)); 2000 2000 bcmgenet_tdma_ring_writel(priv, ring->index, 2001 2001 ring->prod_index, TDMA_PROD_INDEX); 2002 2002 wr_ptr = ring->write_ptr * WORDS_PER_BD(priv); ··· 3477 3475 static void bcmgenet_timeout(struct net_device *dev, unsigned int txqueue) 3478 3476 { 3479 3477 struct bcmgenet_priv *priv = netdev_priv(dev); 3480 - u32 int1_enable = 0; 3481 - unsigned int q; 3478 + struct bcmgenet_tx_ring *ring = &priv->tx_rings[txqueue]; 3479 + struct netdev_queue *txq = netdev_get_tx_queue(dev, txqueue); 3482 3480 3483 3481 netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n"); 3484 3482 3485 - for (q = 0; q <= priv->hw_params->tx_queues; q++) 3486 - bcmgenet_dump_tx_queue(&priv->tx_rings[q]); 3483 + bcmgenet_dump_tx_queue(ring); 3487 3484 3488 - bcmgenet_tx_reclaim_all(dev); 3485 + bcmgenet_tx_reclaim(dev, ring, true); 3489 3486 3490 - for (q = 0; q <= priv->hw_params->tx_queues; q++) 3491 - int1_enable |= (1 << q); 3487 + /* Re-enable the TX interrupt for this ring */ 3488 + bcmgenet_intrl2_1_writel(priv, 1 << txqueue, INTRL2_CPU_MASK_CLEAR); 3492 3489 3493 - /* Re-enable TX interrupts if disabled */ 3494 - bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR); 3490 + txq_trans_cond_update(txq); 3495 3491 3496 - netif_trans_update(dev); 3492 + BCMGENET_STATS64_INC((&ring->stats64), errors); 3497 3493 3498 - BCMGENET_STATS64_INC((&priv->tx_rings[txqueue].stats64), errors); 3499 - 3500 - netif_tx_wake_all_queues(dev); 3494 + netif_tx_wake_queue(txq); 3501 3495 } 3502 3496 3503 3497 #define MAX_MDF_FILTER 17
+36 -11
drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c
··· 352 352 return new_pkts; 353 353 } 354 354 355 + static inline u32 octep_vf_oq_next_idx(struct octep_vf_oq *oq, u32 idx) 356 + { 357 + return (idx + 1 == oq->max_count) ? 0 : idx + 1; 358 + } 359 + 355 360 /** 356 361 * __octep_vf_oq_process_rx() - Process hardware Rx queue and push to stack. 357 362 * ··· 414 409 data_offset = OCTEP_VF_OQ_RESP_HW_SIZE; 415 410 rx_ol_flags = 0; 416 411 } 417 - rx_bytes += buff_info->len; 418 - 419 412 if (buff_info->len <= oq->max_single_buffer_size) { 420 413 skb = napi_build_skb((void *)resp_hw, PAGE_SIZE); 414 + if (!skb) { 415 + oq->stats->alloc_failures++; 416 + desc_used++; 417 + read_idx = octep_vf_oq_next_idx(oq, read_idx); 418 + continue; 419 + } 420 + rx_bytes += buff_info->len; 421 421 skb_reserve(skb, data_offset); 422 422 skb_put(skb, buff_info->len); 423 - read_idx++; 424 423 desc_used++; 425 - if (read_idx == oq->max_count) 426 - read_idx = 0; 424 + read_idx = octep_vf_oq_next_idx(oq, read_idx); 427 425 } else { 428 426 struct skb_shared_info *shinfo; 429 427 u16 data_len; 430 428 431 429 skb = napi_build_skb((void *)resp_hw, PAGE_SIZE); 430 + if (!skb) { 431 + oq->stats->alloc_failures++; 432 + desc_used++; 433 + read_idx = octep_vf_oq_next_idx(oq, read_idx); 434 + data_len = buff_info->len - oq->max_single_buffer_size; 435 + while (data_len) { 436 + dma_unmap_page(oq->dev, oq->desc_ring[read_idx].buffer_ptr, 437 + PAGE_SIZE, DMA_FROM_DEVICE); 438 + buff_info = (struct octep_vf_rx_buffer *) 439 + &oq->buff_info[read_idx]; 440 + buff_info->page = NULL; 441 + if (data_len < oq->buffer_size) 442 + data_len = 0; 443 + else 444 + data_len -= oq->buffer_size; 445 + desc_used++; 446 + read_idx = octep_vf_oq_next_idx(oq, read_idx); 447 + } 448 + continue; 449 + } 450 + rx_bytes += buff_info->len; 432 451 skb_reserve(skb, data_offset); 433 452 /* Head fragment includes response header(s); 434 453 * subsequent fragments contains only data. 435 454 */ 436 455 skb_put(skb, oq->max_single_buffer_size); 437 - read_idx++; 438 456 desc_used++; 439 - if (read_idx == oq->max_count) 440 - read_idx = 0; 457 + read_idx = octep_vf_oq_next_idx(oq, read_idx); 441 458 442 459 shinfo = skb_shinfo(skb); 443 460 data_len = buff_info->len - oq->max_single_buffer_size; ··· 481 454 buff_info->len, 482 455 buff_info->len); 483 456 buff_info->page = NULL; 484 - read_idx++; 485 457 desc_used++; 486 - if (read_idx == oq->max_count) 487 - read_idx = 0; 458 + read_idx = octep_vf_oq_next_idx(oq, read_idx); 488 459 } 489 460 } 490 461
+21 -1
drivers/net/ethernet/mediatek/mtk_eth_soc.c
··· 3588 3588 return NOTIFY_DONE; 3589 3589 } 3590 3590 3591 + static int mtk_max_gmac_mtu(struct mtk_eth *eth) 3592 + { 3593 + int i, max_mtu = ETH_DATA_LEN; 3594 + 3595 + for (i = 0; i < ARRAY_SIZE(eth->netdev); i++) 3596 + if (eth->netdev[i] && eth->netdev[i]->mtu > max_mtu) 3597 + max_mtu = eth->netdev[i]->mtu; 3598 + 3599 + return max_mtu; 3600 + } 3601 + 3591 3602 static int mtk_open(struct net_device *dev) 3592 3603 { 3593 3604 struct mtk_mac *mac = netdev_priv(dev); 3594 3605 struct mtk_eth *eth = mac->hw; 3595 3606 struct mtk_mac *target_mac; 3596 - int i, err, ppe_num; 3607 + int i, err, ppe_num, mtu; 3597 3608 3598 3609 ppe_num = eth->soc->ppe_num; 3599 3610 ··· 3650 3639 } 3651 3640 mtk_gdm_config(eth, target_mac->id, gdm_config); 3652 3641 } 3642 + 3643 + mtu = mtk_max_gmac_mtu(eth); 3644 + for (i = 0; i < ARRAY_SIZE(eth->ppe); i++) 3645 + mtk_ppe_update_mtu(eth->ppe[i], mtu); 3653 3646 3654 3647 napi_enable(&eth->tx_napi); 3655 3648 napi_enable(&eth->rx_napi); ··· 4348 4333 int length = new_mtu + MTK_RX_ETH_HLEN; 4349 4334 struct mtk_mac *mac = netdev_priv(dev); 4350 4335 struct mtk_eth *eth = mac->hw; 4336 + int max_mtu, i; 4351 4337 4352 4338 if (rcu_access_pointer(eth->prog) && 4353 4339 length > MTK_PP_MAX_BUF_SIZE) { ··· 4358 4342 4359 4343 mtk_set_mcr_max_rx(mac, length); 4360 4344 WRITE_ONCE(dev->mtu, new_mtu); 4345 + 4346 + max_mtu = mtk_max_gmac_mtu(eth); 4347 + for (i = 0; i < ARRAY_SIZE(eth->ppe); i++) 4348 + mtk_ppe_update_mtu(eth->ppe[i], max_mtu); 4361 4349 4362 4350 return 0; 4363 4351 }
+30
drivers/net/ethernet/mediatek/mtk_ppe.c
··· 973 973 } 974 974 } 975 975 976 + void mtk_ppe_update_mtu(struct mtk_ppe *ppe, int mtu) 977 + { 978 + int base; 979 + u32 val; 980 + 981 + if (!ppe) 982 + return; 983 + 984 + /* The PPE checks output frame size against per-tag-layer MTU limits, 985 + * treating PPPoE and DSA tags just like 802.1Q VLAN tags. The Linux 986 + * device MTU already accounts for PPPoE (PPPOE_SES_HLEN) and DSA tag 987 + * overhead, but 802.1Q VLAN tags are handled transparently without 988 + * being reflected by the lower device MTU being increased by 4. 989 + * Use the maximum MTU across all GMAC interfaces so that PPE output 990 + * frame limits are sufficiently high regardless of which port a flow 991 + * egresses through. 992 + */ 993 + base = ETH_HLEN + mtu; 994 + 995 + val = FIELD_PREP(MTK_PPE_VLAN_MTU0_NONE, base) | 996 + FIELD_PREP(MTK_PPE_VLAN_MTU0_1TAG, base + VLAN_HLEN); 997 + ppe_w32(ppe, MTK_PPE_VLAN_MTU0, val); 998 + 999 + val = FIELD_PREP(MTK_PPE_VLAN_MTU1_2TAG, 1000 + base + 2 * VLAN_HLEN) | 1001 + FIELD_PREP(MTK_PPE_VLAN_MTU1_3TAG, 1002 + base + 3 * VLAN_HLEN); 1003 + ppe_w32(ppe, MTK_PPE_VLAN_MTU1, val); 1004 + } 1005 + 976 1006 void mtk_ppe_start(struct mtk_ppe *ppe) 977 1007 { 978 1008 u32 val;
+1
drivers/net/ethernet/mediatek/mtk_ppe.h
··· 346 346 struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, int index); 347 347 348 348 void mtk_ppe_deinit(struct mtk_eth *eth); 349 + void mtk_ppe_update_mtu(struct mtk_ppe *ppe, int mtu); 349 350 void mtk_ppe_start(struct mtk_ppe *ppe); 350 351 int mtk_ppe_stop(struct mtk_ppe *ppe); 351 352 int mtk_ppe_prepare_reset(struct mtk_ppe *ppe);
+4 -8
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_offload.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 2 /* Copyright (c) 2017, Mellanox Technologies inc. All rights reserved. */ 3 3 4 + #include <linux/iopoll.h> 5 + 4 6 #include "mlx5_core.h" 5 7 #include "en.h" 6 8 #include "ipsec.h" ··· 594 592 struct mlx5_wqe_aso_ctrl_seg *ctrl; 595 593 struct mlx5e_hw_objs *res; 596 594 struct mlx5_aso_wqe *wqe; 597 - unsigned long expires; 598 595 u8 ds_cnt; 599 596 int ret; 600 597 ··· 615 614 mlx5e_ipsec_aso_copy(ctrl, data); 616 615 617 616 mlx5_aso_post_wqe(aso->aso, false, &wqe->ctrl); 618 - expires = jiffies + msecs_to_jiffies(10); 619 - do { 620 - ret = mlx5_aso_poll_cq(aso->aso, false); 621 - if (ret) 622 - /* We are in atomic context */ 623 - udelay(10); 624 - } while (ret && time_is_after_jiffies(expires)); 617 + read_poll_timeout_atomic(mlx5_aso_poll_cq, ret, !ret, 10, 618 + 10 * USEC_PER_MSEC, false, aso->aso, false); 625 619 if (!ret) 626 620 memcpy(sa_entry->ctx, aso->ctx, MLX5_ST_SZ_BYTES(ipsec_aso)); 627 621 spin_unlock_bh(&aso->lock);
+8
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
··· 6868 6868 goto err_resume; 6869 6869 } 6870 6870 6871 + /* mlx5e_fix_features() returns early when the device is not present 6872 + * to avoid dereferencing cleared priv during profile changes. 6873 + * This also causes it to be a no-op during register_netdev(), where 6874 + * the device is not yet present. 6875 + * Trigger an additional features update that will actually work. 6876 + */ 6877 + mlx5e_update_features(netdev); 6878 + 6871 6879 mlx5e_dcbnl_init_app(priv); 6872 6880 mlx5_core_uplink_netdev_set(mdev, netdev); 6873 6881 mlx5e_params_print_info(mdev, &priv->channels.params);
+1 -1
drivers/net/ethernet/meta/fbnic/fbnic_pci.c
··· 139 139 140 140 /* Enable Tx/Rx processing */ 141 141 fbnic_napi_enable(fbn); 142 - netif_tx_start_all_queues(fbn->netdev); 142 + netif_tx_wake_all_queues(fbn->netdev); 143 143 144 144 fbnic_service_task_start(fbn); 145 145
+2 -5
drivers/net/ethernet/microsoft/mana/gdma_main.c
··· 2065 2065 gc->dev = &pdev->dev; 2066 2066 xa_init(&gc->irq_contexts); 2067 2067 2068 - if (gc->is_pf) 2069 - gc->mana_pci_debugfs = debugfs_create_dir("0", mana_debugfs_root); 2070 - else 2071 - gc->mana_pci_debugfs = debugfs_create_dir(pci_slot_name(pdev->slot), 2072 - mana_debugfs_root); 2068 + gc->mana_pci_debugfs = debugfs_create_dir(pci_name(pdev), 2069 + mana_debugfs_root); 2073 2070 2074 2071 err = mana_gd_setup(pdev); 2075 2072 if (err)
+2 -2
drivers/net/ethernet/microsoft/mana/mana_en.c
··· 3154 3154 eth_hw_addr_set(ndev, apc->mac_addr); 3155 3155 sprintf(vport, "vport%d", port_idx); 3156 3156 apc->mana_port_debugfs = debugfs_create_dir(vport, gc->mana_pci_debugfs); 3157 + debugfs_create_u32("current_speed", 0400, apc->mana_port_debugfs, 3158 + &apc->speed); 3157 3159 return 0; 3158 3160 3159 3161 reset_apc: ··· 3433 3431 } 3434 3432 3435 3433 netif_carrier_on(ndev); 3436 - 3437 - debugfs_create_u32("current_speed", 0400, apc->mana_port_debugfs, &apc->speed); 3438 3434 3439 3435 return 0; 3440 3436
+2
drivers/net/ethernet/wangxun/txgbe/txgbe_phy.c
··· 657 657 return; 658 658 case wx_mac_sp: 659 659 if (txgbe->wx->media_type == wx_media_copper) { 660 + rtnl_lock(); 660 661 phylink_disconnect_phy(txgbe->wx->phylink); 662 + rtnl_unlock(); 661 663 phylink_destroy(txgbe->wx->phylink); 662 664 return; 663 665 }
+4 -5
drivers/net/hamradio/6pack.c
··· 391 391 const u8 *fp, size_t count) 392 392 { 393 393 struct sixpack *sp; 394 - size_t count1; 395 394 396 395 if (!count) 397 396 return; ··· 400 401 return; 401 402 402 403 /* Read the characters out of the buffer */ 403 - count1 = count; 404 - while (count) { 405 - count--; 404 + while (count--) { 406 405 if (fp && *fp++) { 407 406 if (!test_and_set_bit(SIXPF_ERROR, &sp->flags)) 408 407 sp->dev->stats.rx_errors++; 408 + cp++; 409 409 continue; 410 410 } 411 + sixpack_decode(sp, cp, 1); 412 + cp++; 411 413 } 412 - sixpack_decode(sp, cp, count1); 413 414 414 415 tty_unthrottle(tty); 415 416 }
+3
drivers/net/hamradio/bpqether.c
··· 187 187 188 188 len = skb->data[0] + skb->data[1] * 256 - 5; 189 189 190 + if (len < 0 || len > skb->len - 2) 191 + goto drop_unlock; 192 + 190 193 skb_pull(skb, 2); /* Remove the length bytes */ 191 194 skb_trim(skb, len); /* Set the length of the data */ 192 195
+2
drivers/net/hamradio/scc.c
··· 1909 1909 if (!capable(CAP_SYS_RAWIO)) return -EPERM; 1910 1910 if (!arg || copy_from_user(&memcfg, arg, sizeof(memcfg))) 1911 1911 return -EINVAL; 1912 + if (memcfg.bufsize < 16) 1913 + return -EINVAL; 1912 1914 scc->stat.bufsize = memcfg.bufsize; 1913 1915 return 0; 1914 1916
+1
drivers/net/ipa/gsi.c
··· 2044 2044 count = reg_decode(reg, NUM_EV_PER_EE, val); 2045 2045 } else { 2046 2046 reg = gsi_reg(gsi, HW_PARAM_4); 2047 + val = ioread32(gsi->virt + reg_offset(reg)); 2047 2048 count = reg_decode(reg, EV_PER_EE, val); 2048 2049 } 2049 2050 if (!count) {
+3 -3
drivers/net/ipa/ipa_main.c
··· 361 361 { 362 362 const struct reg *reg; 363 363 u32 offset; 364 - u32 val; 364 + u32 val = 0; 365 365 366 366 /* Timer clock divider must be disabled when we change the rate */ 367 367 reg = ipa_reg(ipa, TIMERS_XO_CLK_DIV_CFG); ··· 374 374 val |= reg_bit(reg, DPL_TIMESTAMP_SEL); 375 375 } 376 376 /* Configure tag and NAT Qtime timestamp resolution as well */ 377 - val = reg_encode(reg, TAG_TIMESTAMP_LSB, TAG_TIMESTAMP_SHIFT); 378 - val = reg_encode(reg, NAT_TIMESTAMP_LSB, NAT_TIMESTAMP_SHIFT); 377 + val |= reg_encode(reg, TAG_TIMESTAMP_LSB, TAG_TIMESTAMP_SHIFT); 378 + val |= reg_encode(reg, NAT_TIMESTAMP_LSB, NAT_TIMESTAMP_SHIFT); 379 379 380 380 iowrite32(val, ipa->reg_virt + reg_offset(reg)); 381 381
+63 -8
drivers/net/macsec.c
··· 2584 2584 netif_inherit_tso_max(dev, macsec->real_dev); 2585 2585 } 2586 2586 2587 - static int macsec_update_offload(struct net_device *dev, enum macsec_offload offload) 2587 + static int macsec_update_offload(struct net_device *dev, 2588 + enum macsec_offload offload, 2589 + struct netlink_ext_ack *extack) 2588 2590 { 2589 2591 enum macsec_offload prev_offload; 2590 2592 const struct macsec_ops *ops; ··· 2618 2616 if (!ops) 2619 2617 return -EOPNOTSUPP; 2620 2618 2621 - macsec->offload = offload; 2622 - 2623 2619 ctx.secy = &macsec->secy; 2624 2620 ret = offload == MACSEC_OFFLOAD_OFF ? macsec_offload(ops->mdo_del_secy, &ctx) 2625 2621 : macsec_offload(ops->mdo_add_secy, &ctx); 2626 - if (ret) { 2627 - macsec->offload = prev_offload; 2622 + if (ret) 2628 2623 return ret; 2624 + 2625 + /* Remove VLAN filters when disabling offload. */ 2626 + if (offload == MACSEC_OFFLOAD_OFF) { 2627 + vlan_drop_rx_ctag_filter_info(dev); 2628 + vlan_drop_rx_stag_filter_info(dev); 2629 + } 2630 + macsec->offload = offload; 2631 + /* Add VLAN filters when enabling offload. */ 2632 + if (prev_offload == MACSEC_OFFLOAD_OFF) { 2633 + ret = vlan_get_rx_ctag_filter_info(dev); 2634 + if (ret) { 2635 + NL_SET_ERR_MSG_FMT(extack, 2636 + "adding ctag VLAN filters failed, err %d", 2637 + ret); 2638 + goto rollback_offload; 2639 + } 2640 + ret = vlan_get_rx_stag_filter_info(dev); 2641 + if (ret) { 2642 + NL_SET_ERR_MSG_FMT(extack, 2643 + "adding stag VLAN filters failed, err %d", 2644 + ret); 2645 + vlan_drop_rx_ctag_filter_info(dev); 2646 + goto rollback_offload; 2647 + } 2629 2648 } 2630 2649 2631 2650 macsec_set_head_tail_room(dev); ··· 2655 2632 macsec_inherit_tso_max(dev); 2656 2633 2657 2634 netdev_update_features(dev); 2635 + 2636 + return 0; 2637 + 2638 + rollback_offload: 2639 + macsec->offload = prev_offload; 2640 + macsec_offload(ops->mdo_del_secy, &ctx); 2658 2641 2659 2642 return ret; 2660 2643 } ··· 2702 2673 offload = nla_get_u8(tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]); 2703 2674 2704 2675 if (macsec->offload != offload) 2705 - ret = macsec_update_offload(dev, offload); 2676 + ret = macsec_update_offload(dev, offload, info->extack); 2706 2677 out: 2707 2678 rtnl_unlock(); 2708 2679 return ret; ··· 3515 3486 } 3516 3487 3517 3488 #define MACSEC_FEATURES \ 3518 - (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST) 3489 + (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \ 3490 + NETIF_F_HW_VLAN_STAG_FILTER | NETIF_F_HW_VLAN_CTAG_FILTER) 3519 3491 3520 3492 #define MACSEC_OFFLOAD_FEATURES \ 3521 3493 (MACSEC_FEATURES | NETIF_F_GSO_SOFTWARE | NETIF_F_SOFT_FEATURES | \ ··· 3737 3707 return err; 3738 3708 } 3739 3709 3710 + static int macsec_vlan_rx_add_vid(struct net_device *dev, 3711 + __be16 proto, u16 vid) 3712 + { 3713 + struct macsec_dev *macsec = netdev_priv(dev); 3714 + 3715 + if (!macsec_is_offloaded(macsec)) 3716 + return 0; 3717 + 3718 + return vlan_vid_add(macsec->real_dev, proto, vid); 3719 + } 3720 + 3721 + static int macsec_vlan_rx_kill_vid(struct net_device *dev, 3722 + __be16 proto, u16 vid) 3723 + { 3724 + struct macsec_dev *macsec = netdev_priv(dev); 3725 + 3726 + if (!macsec_is_offloaded(macsec)) 3727 + return 0; 3728 + 3729 + vlan_vid_del(macsec->real_dev, proto, vid); 3730 + return 0; 3731 + } 3732 + 3740 3733 static int macsec_change_mtu(struct net_device *dev, int new_mtu) 3741 3734 { 3742 3735 struct macsec_dev *macsec = macsec_priv(dev); ··· 3801 3748 .ndo_set_rx_mode = macsec_dev_set_rx_mode, 3802 3749 .ndo_change_rx_flags = macsec_dev_change_rx_flags, 3803 3750 .ndo_set_mac_address = macsec_set_mac_address, 3751 + .ndo_vlan_rx_add_vid = macsec_vlan_rx_add_vid, 3752 + .ndo_vlan_rx_kill_vid = macsec_vlan_rx_kill_vid, 3804 3753 .ndo_start_xmit = macsec_start_xmit, 3805 3754 .ndo_get_stats64 = macsec_get_stats64, 3806 3755 .ndo_get_iflink = macsec_get_iflink, ··· 3967 3912 offload = nla_get_u8(data[IFLA_MACSEC_OFFLOAD]); 3968 3913 if (macsec->offload != offload) { 3969 3914 macsec_offload_state_change = true; 3970 - ret = macsec_update_offload(dev, offload); 3915 + ret = macsec_update_offload(dev, offload, extack); 3971 3916 if (ret) 3972 3917 goto cleanup; 3973 3918 }
+63 -2
drivers/net/netdevsim/netdev.c
··· 554 554 return 0; 555 555 } 556 556 557 + static int nsim_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) 558 + { 559 + struct netdevsim *ns = netdev_priv(dev); 560 + 561 + if (vid >= VLAN_N_VID) 562 + return -EINVAL; 563 + 564 + if (proto == htons(ETH_P_8021Q)) 565 + WARN_ON_ONCE(test_and_set_bit(vid, ns->vlan.ctag)); 566 + else if (proto == htons(ETH_P_8021AD)) 567 + WARN_ON_ONCE(test_and_set_bit(vid, ns->vlan.stag)); 568 + 569 + return 0; 570 + } 571 + 572 + static int nsim_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) 573 + { 574 + struct netdevsim *ns = netdev_priv(dev); 575 + 576 + if (vid >= VLAN_N_VID) 577 + return -EINVAL; 578 + 579 + if (proto == htons(ETH_P_8021Q)) 580 + WARN_ON_ONCE(!test_and_clear_bit(vid, ns->vlan.ctag)); 581 + else if (proto == htons(ETH_P_8021AD)) 582 + WARN_ON_ONCE(!test_and_clear_bit(vid, ns->vlan.stag)); 583 + 584 + return 0; 585 + } 586 + 557 587 static int nsim_shaper_set(struct net_shaper_binding *binding, 558 588 const struct net_shaper *shaper, 559 589 struct netlink_ext_ack *extack) ··· 641 611 .ndo_bpf = nsim_bpf, 642 612 .ndo_open = nsim_open, 643 613 .ndo_stop = nsim_stop, 614 + .ndo_vlan_rx_add_vid = nsim_vlan_rx_add_vid, 615 + .ndo_vlan_rx_kill_vid = nsim_vlan_rx_kill_vid, 644 616 .net_shaper_ops = &nsim_shaper_ops, 645 617 }; 646 618 ··· 654 622 .ndo_change_mtu = nsim_change_mtu, 655 623 .ndo_setup_tc = nsim_setup_tc, 656 624 .ndo_set_features = nsim_set_features, 625 + .ndo_vlan_rx_add_vid = nsim_vlan_rx_add_vid, 626 + .ndo_vlan_rx_kill_vid = nsim_vlan_rx_kill_vid, 657 627 }; 658 628 659 629 /* We don't have true per-queue stats, yet, so do some random fakery here. ··· 953 919 .owner = THIS_MODULE, 954 920 }; 955 921 922 + static int nsim_vlan_show(struct seq_file *s, void *data) 923 + { 924 + struct netdevsim *ns = s->private; 925 + int vid; 926 + 927 + for_each_set_bit(vid, ns->vlan.ctag, VLAN_N_VID) 928 + seq_printf(s, "ctag %d\n", vid); 929 + for_each_set_bit(vid, ns->vlan.stag, VLAN_N_VID) 930 + seq_printf(s, "stag %d\n", vid); 931 + 932 + return 0; 933 + } 934 + DEFINE_SHOW_ATTRIBUTE(nsim_vlan); 935 + 956 936 static void nsim_setup(struct net_device *dev) 957 937 { 958 938 ether_setup(dev); ··· 979 931 NETIF_F_FRAGLIST | 980 932 NETIF_F_HW_CSUM | 981 933 NETIF_F_LRO | 982 - NETIF_F_TSO; 934 + NETIF_F_TSO | 935 + NETIF_F_HW_VLAN_CTAG_FILTER | 936 + NETIF_F_HW_VLAN_STAG_FILTER; 983 937 dev->hw_features |= NETIF_F_HW_TC | 984 938 NETIF_F_SG | 985 939 NETIF_F_FRAGLIST | 986 940 NETIF_F_HW_CSUM | 987 941 NETIF_F_LRO | 988 942 NETIF_F_TSO | 989 - NETIF_F_LOOPBACK; 943 + NETIF_F_LOOPBACK | 944 + NETIF_F_HW_VLAN_CTAG_FILTER | 945 + NETIF_F_HW_VLAN_STAG_FILTER; 990 946 dev->pcpu_stat_type = NETDEV_PCPU_STAT_DSTATS; 991 947 dev->max_mtu = ETH_MAX_MTU; 992 948 dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_HW_OFFLOAD; ··· 1157 1105 ns->qr_dfs = debugfs_create_file("queue_reset", 0200, 1158 1106 nsim_dev_port->ddir, ns, 1159 1107 &nsim_qreset_fops); 1108 + ns->vlan_dfs = debugfs_create_file("vlan", 0400, nsim_dev_port->ddir, 1109 + ns, &nsim_vlan_fops); 1160 1110 return ns; 1161 1111 1162 1112 err_free_netdev: ··· 1170 1116 { 1171 1117 struct net_device *dev = ns->netdev; 1172 1118 struct netdevsim *peer; 1119 + u16 vid; 1173 1120 1121 + debugfs_remove(ns->vlan_dfs); 1174 1122 debugfs_remove(ns->qr_dfs); 1175 1123 debugfs_remove(ns->pp_dfs); 1176 1124 ··· 1197 1141 rtnl_unlock(); 1198 1142 if (nsim_dev_port_is_pf(ns->nsim_dev_port)) 1199 1143 nsim_exit_netdevsim(ns); 1144 + 1145 + for_each_set_bit(vid, ns->vlan.ctag, VLAN_N_VID) 1146 + WARN_ON_ONCE(1); 1147 + for_each_set_bit(vid, ns->vlan.stag, VLAN_N_VID) 1148 + WARN_ON_ONCE(1); 1200 1149 1201 1150 /* Put this intentionally late to exercise the orphaning path */ 1202 1151 if (ns->page) {
+8
drivers/net/netdevsim/netdevsim.h
··· 18 18 #include <linux/ethtool.h> 19 19 #include <linux/ethtool_netlink.h> 20 20 #include <linux/kernel.h> 21 + #include <linux/if_vlan.h> 21 22 #include <linux/list.h> 22 23 #include <linux/netdevice.h> 23 24 #include <linux/ptp_mock.h> ··· 74 73 struct nsim_macsec { 75 74 struct nsim_secy nsim_secy[NSIM_MACSEC_MAX_SECY_COUNT]; 76 75 u8 nsim_secy_count; 76 + }; 77 + 78 + struct nsim_vlan { 79 + DECLARE_BITMAP(ctag, VLAN_N_VID); 80 + DECLARE_BITMAP(stag, VLAN_N_VID); 77 81 }; 78 82 79 83 struct nsim_ethtool_pauseparam { ··· 141 135 bool bpf_map_accept; 142 136 struct nsim_ipsec ipsec; 143 137 struct nsim_macsec macsec; 138 + struct nsim_vlan vlan; 144 139 struct { 145 140 u32 inject_error; 146 141 u32 __ports[2][NSIM_UDP_TUNNEL_N_PORTS]; ··· 153 146 struct page *page; 154 147 struct dentry *pp_dfs; 155 148 struct dentry *qr_dfs; 149 + struct dentry *vlan_dfs; 156 150 157 151 struct nsim_ethtool ethtool; 158 152 struct netdevsim __rcu *peer;
+2 -2
drivers/net/phy/phy_device.c
··· 927 927 /* returning -ENODEV doesn't stop bus 928 928 * scanning 929 929 */ 930 - return (phy_reg == -EIO || 931 - phy_reg == -ENODEV) ? -ENODEV : -EIO; 930 + return (ret == -EIO || 931 + ret == -ENODEV) ? -ENODEV : -EIO; 932 932 933 933 if (!ret) 934 934 continue;
+1 -1
drivers/net/phy/qcom/at803x.c
··· 524 524 * behaviour but we still need to accommodate it. XNP is only needed 525 525 * for 10Gbps support, so disable XNP. 526 526 */ 527 - return phy_modify(phydev, MII_ADVERTISE, MDIO_AN_CTRL1_XNP, 0); 527 + return phy_modify(phydev, MII_ADVERTISE, ADVERTISE_XNP, 0); 528 528 } 529 529 530 530 static void at803x_link_change_notify(struct phy_device *phydev)
+3
drivers/net/ppp/ppp_generic.c
··· 1048 1048 struct ppp_net *pn; 1049 1049 int __user *p = (int __user *)arg; 1050 1050 1051 + if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) 1052 + return -EPERM; 1053 + 1051 1054 switch (cmd) { 1052 1055 case PPPIOCNEWUNIT: 1053 1056 /* Create a new ppp unit */
+6 -1
drivers/net/usb/cdc-phonet.c
··· 157 157 PAGE_SIZE); 158 158 page = NULL; 159 159 } 160 - } else { 160 + } else if (skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS) { 161 161 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, 162 162 page, 0, req->actual_length, 163 163 PAGE_SIZE); 164 164 page = NULL; 165 + } else { 166 + dev_kfree_skb_any(skb); 167 + pnd->rx_skb = NULL; 168 + skb = NULL; 169 + dev->stats.rx_length_errors++; 165 170 } 166 171 if (req->actual_length < PAGE_SIZE) 167 172 pnd->rx_skb = NULL; /* Last fragment */
+13 -3
include/net/sch_generic.h
··· 1187 1187 static inline void qdisc_dequeue_drop(struct Qdisc *q, struct sk_buff *skb, 1188 1188 enum qdisc_drop_reason reason) 1189 1189 { 1190 + struct Qdisc *root; 1191 + 1190 1192 DEBUG_NET_WARN_ON_ONCE(!(q->flags & TCQ_F_DEQUEUE_DROPS)); 1191 1193 DEBUG_NET_WARN_ON_ONCE(q->flags & TCQ_F_NOLOCK); 1192 1194 1193 - tcf_set_qdisc_drop_reason(skb, reason); 1194 - skb->next = q->to_free; 1195 - q->to_free = skb; 1195 + rcu_read_lock(); 1196 + root = qdisc_root_sleeping(q); 1197 + 1198 + if (root->flags & TCQ_F_DEQUEUE_DROPS) { 1199 + tcf_set_qdisc_drop_reason(skb, reason); 1200 + skb->next = root->to_free; 1201 + root->to_free = skb; 1202 + } else { 1203 + kfree_skb_reason(skb, (enum skb_drop_reason)reason); 1204 + } 1205 + rcu_read_unlock(); 1196 1206 } 1197 1207 1198 1208 /* Instead of calling kfree_skb() while root qdisc lock is held,
+2 -1
include/uapi/linux/mii.h
··· 82 82 #define ADVERTISE_100BASE4 0x0200 /* Try for 100mbps 4k packets */ 83 83 #define ADVERTISE_PAUSE_CAP 0x0400 /* Try for pause */ 84 84 #define ADVERTISE_PAUSE_ASYM 0x0800 /* Try for asymetric pause */ 85 - #define ADVERTISE_RESV 0x1000 /* Unused... */ 85 + #define ADVERTISE_XNP 0x1000 /* Extended Next Page */ 86 + #define ADVERTISE_RESV ADVERTISE_XNP /* Used to be reserved */ 86 87 #define ADVERTISE_RFAULT 0x2000 /* Say we can detect faults */ 87 88 #define ADVERTISE_LPACK 0x4000 /* Ack link partners response */ 88 89 #define ADVERTISE_NPAGE 0x8000 /* Next page bit */
+12 -2
net/caif/cfsrvl.c
··· 191 191 192 192 void caif_free_client(struct cflayer *adap_layer) 193 193 { 194 + struct cflayer *serv_layer; 194 195 struct cfsrvl *servl; 195 - if (adap_layer == NULL || adap_layer->dn == NULL) 196 + 197 + if (!adap_layer) 196 198 return; 197 - servl = container_obj(adap_layer->dn); 199 + 200 + serv_layer = adap_layer->dn; 201 + if (!serv_layer) 202 + return; 203 + 204 + layer_set_dn(adap_layer, NULL); 205 + layer_set_up(serv_layer, NULL); 206 + 207 + servl = container_obj(serv_layer); 198 208 servl->release(&servl->layer); 199 209 } 200 210 EXPORT_SYMBOL(caif_free_client);
+10 -1
net/can/raw.c
··· 362 362 return NOTIFY_DONE; 363 363 } 364 364 365 + static void raw_sock_destruct(struct sock *sk) 366 + { 367 + struct raw_sock *ro = raw_sk(sk); 368 + 369 + free_percpu(ro->uniq); 370 + can_sock_destruct(sk); 371 + } 372 + 365 373 static int raw_init(struct sock *sk) 366 374 { 367 375 struct raw_sock *ro = raw_sk(sk); ··· 395 387 ro->uniq = alloc_percpu(struct uniqframe); 396 388 if (unlikely(!ro->uniq)) 397 389 return -ENOMEM; 390 + 391 + sk->sk_destruct = raw_sock_destruct; 398 392 399 393 /* set notifier */ 400 394 spin_lock(&raw_notifier_lock); ··· 447 437 ro->bound = 0; 448 438 ro->dev = NULL; 449 439 ro->count = 0; 450 - free_percpu(ro->uniq); 451 440 452 441 sock_orphan(sk); 453 442 sock->sk = NULL;
+6 -2
net/core/filter.c
··· 4394 4394 struct net_device *master, *slave; 4395 4395 4396 4396 master = netdev_master_upper_dev_get_rcu(xdp->rxq->dev); 4397 + if (unlikely(!(master->flags & IFF_UP))) 4398 + return XDP_ABORTED; 4397 4399 slave = master->netdev_ops->ndo_xdp_get_xmit_slave(master, xdp); 4398 4400 if (slave && slave != xdp->rxq->dev) { 4399 4401 /* The target device is different from the receiving device, so ··· 10572 10570 si->dst_reg, si->dst_reg, \ 10573 10571 offsetof(OBJ, OBJ_FIELD)); \ 10574 10572 if (si->dst_reg == si->src_reg) { \ 10575 - *insn++ = BPF_JMP_A(1); \ 10573 + *insn++ = BPF_JMP_A(2); \ 10576 10574 *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg, \ 10577 10575 offsetof(struct bpf_sock_ops_kern, \ 10578 10576 temp)); \ 10577 + *insn++ = BPF_MOV64_IMM(si->dst_reg, 0); \ 10579 10578 } \ 10580 10579 } while (0) 10581 10580 ··· 10610 10607 si->dst_reg, si->src_reg, \ 10611 10608 offsetof(struct bpf_sock_ops_kern, sk));\ 10612 10609 if (si->dst_reg == si->src_reg) { \ 10613 - *insn++ = BPF_JMP_A(1); \ 10610 + *insn++ = BPF_JMP_A(2); \ 10614 10611 *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg, \ 10615 10612 offsetof(struct bpf_sock_ops_kern, \ 10616 10613 temp)); \ 10614 + *insn++ = BPF_MOV64_IMM(si->dst_reg, 0); \ 10617 10615 } \ 10618 10616 } while (0) 10619 10617
+1 -1
net/ipv4/syncookies.c
··· 284 284 treq->rcv_isn = ntohl(th->seq) - 1; 285 285 treq->snt_isn = ntohl(th->ack_seq) - 1; 286 286 treq->syn_tos = TCP_SKB_CB(skb)->ip_dsfield; 287 - treq->req_usec_ts = false; 288 287 289 288 #if IS_ENABLED(CONFIG_MPTCP) 290 289 treq->is_mptcp = sk_is_mptcp(sk); ··· 345 346 ireq->wscale_ok = tcp_opt->wscale_ok; 346 347 ireq->ecn_ok = !!(tcp_opt->rcv_tsecr & TS_OPT_ECN); 347 348 349 + treq->req_usec_ts = false; 348 350 treq->ts_off = tsoff; 349 351 350 352 return req;
+6
net/nfc/digital_technology.c
··· 424 424 size = 4; 425 425 } 426 426 427 + if (target->nfcid1_len + size > NFC_NFCID1_MAXSIZE) { 428 + PROTOCOL_ERR("4.7.2.1"); 429 + rc = -EPROTO; 430 + goto exit; 431 + } 432 + 427 433 memcpy(target->nfcid1 + target->nfcid1_len, sdd_res->nfcid1 + offset, 428 434 size); 429 435 target->nfcid1_len += size;
+2
net/nfc/llcp_core.c
··· 1091 1091 if (sk->sk_state == LLCP_CLOSED) { 1092 1092 release_sock(sk); 1093 1093 nfc_llcp_sock_put(llcp_sock); 1094 + return; 1094 1095 } 1095 1096 1096 1097 /* Pass the payload upstream */ ··· 1183 1182 if (sk->sk_state == LLCP_CLOSED) { 1184 1183 release_sock(sk); 1185 1184 nfc_llcp_sock_put(llcp_sock); 1185 + return; 1186 1186 } 1187 1187 1188 1188 if (sk->sk_state == LLCP_CONNECTED) {
+69 -10
net/qrtr/ns.c
··· 22 22 struct socket *sock; 23 23 struct sockaddr_qrtr bcast_sq; 24 24 struct list_head lookups; 25 + u32 lookup_count; 25 26 struct workqueue_struct *workqueue; 26 27 struct work_struct work; 28 + void (*saved_data_ready)(struct sock *sk); 27 29 int local_node; 28 30 } qrtr_ns; 29 31 ··· 69 67 struct qrtr_node { 70 68 unsigned int id; 71 69 struct xarray servers; 70 + u32 server_count; 72 71 }; 72 + 73 + /* Max nodes, server, lookup limits are chosen based on the current platform 74 + * requirements. If the requirement changes in the future, these values can be 75 + * increased. 76 + */ 77 + #define QRTR_NS_MAX_NODES 64 78 + #define QRTR_NS_MAX_SERVERS 256 79 + #define QRTR_NS_MAX_LOOKUPS 64 80 + 81 + static u8 node_count; 73 82 74 83 static struct qrtr_node *node_get(unsigned int node_id) 75 84 { ··· 89 76 node = xa_load(&nodes, node_id); 90 77 if (node) 91 78 return node; 79 + 80 + if (node_count >= QRTR_NS_MAX_NODES) { 81 + pr_err_ratelimited("QRTR clients exceed max node limit!\n"); 82 + return NULL; 83 + } 92 84 93 85 /* If node didn't exist, allocate and insert it to the tree */ 94 86 node = kzalloc_obj(*node); ··· 107 89 kfree(node); 108 90 return NULL; 109 91 } 92 + 93 + node_count++; 110 94 111 95 return node; 112 96 } ··· 249 229 if (!service || !port) 250 230 return NULL; 251 231 232 + node = node_get(node_id); 233 + if (!node) 234 + return NULL; 235 + 236 + /* Make sure the new servers per port are capped at the maximum value */ 237 + old = xa_load(&node->servers, port); 238 + if (!old && node->server_count >= QRTR_NS_MAX_SERVERS) { 239 + pr_err_ratelimited("QRTR client node %u exceeds max server limit!\n", node_id); 240 + return NULL; 241 + } 242 + 252 243 srv = kzalloc_obj(*srv); 253 244 if (!srv) 254 245 return NULL; ··· 268 237 srv->instance = instance; 269 238 srv->node = node_id; 270 239 srv->port = port; 271 - 272 - node = node_get(node_id); 273 - if (!node) 274 - goto err; 275 240 276 241 /* Delete the old server on the same port */ 277 242 old = xa_store(&node->servers, port, srv, GFP_KERNEL); ··· 279 252 } else { 280 253 kfree(old); 281 254 } 255 + } else { 256 + node->server_count++; 282 257 } 283 258 284 259 trace_qrtr_ns_server_add(srv->service, srv->instance, ··· 321 292 } 322 293 323 294 kfree(srv); 295 + node->server_count--; 324 296 325 297 return 0; 326 298 } ··· 371 341 struct qrtr_node *node; 372 342 unsigned long index; 373 343 struct kvec iv; 374 - int ret; 344 + int ret = 0; 375 345 376 346 iv.iov_base = &pkt; 377 347 iv.iov_len = sizeof(pkt); ··· 386 356 387 357 /* Advertise the removal of this client to all local servers */ 388 358 local_node = node_get(qrtr_ns.local_node); 389 - if (!local_node) 390 - return 0; 359 + if (!local_node) { 360 + ret = 0; 361 + goto delete_node; 362 + } 391 363 392 364 memset(&pkt, 0, sizeof(pkt)); 393 365 pkt.cmd = cpu_to_le32(QRTR_TYPE_BYE); ··· 406 374 ret = kernel_sendmsg(qrtr_ns.sock, &msg, &iv, 1, sizeof(pkt)); 407 375 if (ret < 0 && ret != -ENODEV) { 408 376 pr_err("failed to send bye cmd\n"); 409 - return ret; 377 + goto delete_node; 410 378 } 411 379 } 412 - return 0; 380 + 381 + /* Ignore -ENODEV */ 382 + ret = 0; 383 + 384 + delete_node: 385 + xa_erase(&nodes, from->sq_node); 386 + kfree(node); 387 + node_count--; 388 + 389 + return ret; 413 390 } 414 391 415 392 static int ctrl_cmd_del_client(struct sockaddr_qrtr *from, ··· 458 417 459 418 list_del(&lookup->li); 460 419 kfree(lookup); 420 + qrtr_ns.lookup_count--; 461 421 } 462 422 463 423 /* Remove the server belonging to this port but don't broadcast ··· 576 534 if (from->sq_node != qrtr_ns.local_node) 577 535 return -EINVAL; 578 536 537 + if (qrtr_ns.lookup_count >= QRTR_NS_MAX_LOOKUPS) { 538 + pr_err_ratelimited("QRTR client node exceeds max lookup limit!\n"); 539 + return -ENOSPC; 540 + } 541 + 579 542 lookup = kzalloc_obj(*lookup); 580 543 if (!lookup) 581 544 return -ENOMEM; ··· 589 542 lookup->service = service; 590 543 lookup->instance = instance; 591 544 list_add_tail(&lookup->li, &qrtr_ns.lookups); 545 + qrtr_ns.lookup_count++; 592 546 593 547 memset(&filter, 0, sizeof(filter)); 594 548 filter.service = service; ··· 630 582 631 583 list_del(&lookup->li); 632 584 kfree(lookup); 585 + qrtr_ns.lookup_count--; 633 586 } 634 587 } 635 588 ··· 719 670 } 720 671 721 672 if (ret < 0) 722 - pr_err("failed while handling packet from %d:%d", 673 + pr_err_ratelimited("failed while handling packet from %d:%d", 723 674 sq.sq_node, sq.sq_port); 724 675 } 725 676 ··· 758 709 goto err_sock; 759 710 } 760 711 712 + qrtr_ns.saved_data_ready = qrtr_ns.sock->sk->sk_data_ready; 761 713 qrtr_ns.sock->sk->sk_data_ready = qrtr_ns_data_ready; 762 714 763 715 sq.sq_port = QRTR_PORT_CTRL; ··· 799 749 return 0; 800 750 801 751 err_wq: 752 + write_lock_bh(&qrtr_ns.sock->sk->sk_callback_lock); 753 + qrtr_ns.sock->sk->sk_data_ready = qrtr_ns.saved_data_ready; 754 + write_unlock_bh(&qrtr_ns.sock->sk->sk_callback_lock); 755 + 802 756 destroy_workqueue(qrtr_ns.workqueue); 803 757 err_sock: 804 758 sock_release(qrtr_ns.sock); ··· 812 758 813 759 void qrtr_ns_remove(void) 814 760 { 761 + write_lock_bh(&qrtr_ns.sock->sk->sk_callback_lock); 762 + qrtr_ns.sock->sk->sk_data_ready = qrtr_ns.saved_data_ready; 763 + write_unlock_bh(&qrtr_ns.sock->sk->sk_callback_lock); 764 + 815 765 cancel_work_sync(&qrtr_ns.work); 766 + synchronize_net(); 816 767 destroy_workqueue(qrtr_ns.workqueue); 817 768 818 769 /* sock_release() expects the two references that were put during
+8 -2
net/rds/af_rds.c
··· 357 357 return ret; 358 358 } 359 359 360 - static int rds_set_transport(struct rds_sock *rs, sockptr_t optval, int optlen) 360 + static int rds_set_transport(struct net *net, struct rds_sock *rs, 361 + sockptr_t optval, int optlen) 361 362 { 362 363 int t_type; 363 364 ··· 373 372 374 373 if (t_type < 0 || t_type >= RDS_TRANS_COUNT) 375 374 return -EINVAL; 375 + 376 + /* RDS/IB is restricted to the initial network namespace */ 377 + if (t_type != RDS_TRANS_TCP && !net_eq(net, &init_net)) 378 + return -EPROTOTYPE; 376 379 377 380 rs->rs_transport = rds_trans_get(t_type); 378 381 ··· 438 433 sockptr_t optval, unsigned int optlen) 439 434 { 440 435 struct rds_sock *rs = rds_sk_to_rs(sock->sk); 436 + struct net *net = sock_net(sock->sk); 441 437 int ret; 442 438 443 439 if (level != SOL_RDS) { ··· 467 461 break; 468 462 case SO_RDS_TRANSPORT: 469 463 lock_sock(sock->sk); 470 - ret = rds_set_transport(rs, optval, optlen); 464 + ret = rds_set_transport(net, rs, optval, optlen); 471 465 release_sock(sock->sk); 472 466 break; 473 467 case SO_TIMESTAMP_OLD:
+22 -2
net/rds/ib.c
··· 401 401 * allowed to influence which paths have priority. We could call userspace 402 402 * asserting this policy "routing". 403 403 */ 404 - static int rds_ib_laddr_check(struct net *net, const struct in6_addr *addr, 405 - __u32 scope_id) 404 + static int rds_ib_laddr_check_cm(struct net *net, const struct in6_addr *addr, 405 + __u32 scope_id) 406 406 { 407 407 int ret; 408 408 struct rdma_cm_id *cm_id; ··· 485 485 rdma_destroy_id(cm_id); 486 486 487 487 return ret; 488 + } 489 + 490 + static int rds_ib_laddr_check(struct net *net, const struct in6_addr *addr, 491 + __u32 scope_id) 492 + { 493 + struct rds_ib_device *rds_ibdev = NULL; 494 + 495 + /* RDS/IB is restricted to the initial network namespace */ 496 + if (!net_eq(net, &init_net)) 497 + return -EPROTOTYPE; 498 + 499 + if (ipv6_addr_v4mapped(addr)) { 500 + rds_ibdev = rds_ib_get_device(addr->s6_addr32[3]); 501 + if (rds_ibdev) { 502 + rds_ib_dev_put(rds_ibdev); 503 + return 0; 504 + } 505 + } 506 + 507 + return rds_ib_laddr_check_cm(net, addr, scope_id); 488 508 } 489 509 490 510 static void rds_ib_unregister_client(void)
+1
net/rds/ib.h
··· 381 381 __rds_ib_conn_error(conn, KERN_WARNING "RDS/IB: " fmt) 382 382 383 383 /* ib_rdma.c */ 384 + struct rds_ib_device *rds_ib_get_device(__be32 ipaddr); 384 385 int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, 385 386 struct in6_addr *ipaddr); 386 387 void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn);
+1 -1
net/rds/ib_rdma.c
··· 43 43 44 44 static void rds_ib_odp_mr_worker(struct work_struct *work); 45 45 46 - static struct rds_ib_device *rds_ib_get_device(__be32 ipaddr) 46 + struct rds_ib_device *rds_ib_get_device(__be32 ipaddr) 47 47 { 48 48 struct rds_ib_device *rds_ibdev; 49 49 struct rds_ib_ipaddr *i_ipaddr;
+7
net/rose/rose_in.c
··· 270 270 271 271 frametype = rose_decode(skb, &ns, &nr, &q, &d, &m); 272 272 273 + /* 274 + * ROSE_CLEAR_REQUEST carries cause and diagnostic in bytes 3..4. 275 + * Reject a malformed frame that is too short to contain them. 276 + */ 277 + if (frametype == ROSE_CLEAR_REQUEST && skb->len < 5) 278 + return 0; 279 + 273 280 switch (rose->state) { 274 281 case ROSE_STATE_1: 275 282 queued = rose_state1_machine(sk, skb, frametype);
+6 -2
net/sched/act_ct.c
··· 328 328 int err = -ENOMEM; 329 329 330 330 mutex_lock(&zones_mutex); 331 - ct_ft = rhashtable_lookup_fast(&zones_ht, &key, zones_params); 332 - if (ct_ft && refcount_inc_not_zero(&ct_ft->ref)) 331 + rcu_read_lock(); 332 + ct_ft = rhashtable_lookup(&zones_ht, &key, zones_params); 333 + if (ct_ft && refcount_inc_not_zero(&ct_ft->ref)) { 334 + rcu_read_unlock(); 333 335 goto out_unlock; 336 + } 337 + rcu_read_unlock(); 334 338 335 339 ct_ft = kzalloc_obj(*ct_ft); 336 340 if (!ct_ft)
+5 -1
net/sched/cls_fw.c
··· 74 74 } 75 75 } 76 76 } else { 77 - struct Qdisc *q = tcf_block_q(tp->chain->block); 77 + struct Qdisc *q; 78 78 79 79 /* Old method: classify the packet using its skb mark. */ 80 + if (tcf_block_shared(tp->chain->block)) 81 + return -1; 82 + 83 + q = tcf_block_q(tp->chain->block); 80 84 if (id && (TC_H_MAJ(id) == 0 || 81 85 !(TC_H_MAJ(id ^ q->handle)))) { 82 86 res->classid = id;
+1
net/sctp/inqueue.c
··· 201 201 202 202 cb->chunk = head_cb->chunk; 203 203 cb->af = head_cb->af; 204 + cb->encap_port = head_cb->encap_port; 204 205 } 205 206 } 206 207
+2
net/sctp/ipv6.c
··· 261 261 skb_set_inner_ipproto(skb, IPPROTO_SCTP); 262 262 label = ip6_make_flowlabel(sock_net(sk), skb, fl6->flowlabel, true, fl6); 263 263 264 + local_bh_disable(); 264 265 udp_tunnel6_xmit_skb(dst, sk, skb, NULL, &fl6->saddr, &fl6->daddr, 265 266 tclass, ip6_dst_hoplimit(dst), label, 266 267 sctp_sk(sk)->udp_port, t->encap_port, false, 0); 268 + local_bh_enable(); 267 269 return 0; 268 270 } 269 271
+2
net/sctp/protocol.c
··· 1070 1070 skb_reset_inner_mac_header(skb); 1071 1071 skb_reset_inner_transport_header(skb); 1072 1072 skb_set_inner_ipproto(skb, IPPROTO_SCTP); 1073 + local_bh_disable(); 1073 1074 udp_tunnel_xmit_skb(dst_rtable(dst), sk, skb, fl4->saddr, 1074 1075 fl4->daddr, dscp, ip4_dst_hoplimit(dst), df, 1075 1076 sctp_sk(sk)->udp_port, t->encap_port, false, false, 1076 1077 0); 1078 + local_bh_enable(); 1077 1079 return 0; 1078 1080 } 1079 1081
+8
net/strparser/strparser.c
··· 45 45 46 46 strp->stopped = 1; 47 47 48 + if (strp->skb_head) { 49 + kfree_skb(strp->skb_head); 50 + strp->skb_head = NULL; 51 + } 52 + 53 + strp->skb_nextp = NULL; 54 + strp->need_bytes = 0; 55 + 48 56 if (strp->sk) { 49 57 struct sock *sk = strp->sk; 50 58
+3 -3
net/vmw_vsock/af_vsock.c
··· 1962 1962 const struct vsock_transport *transport, 1963 1963 u64 val) 1964 1964 { 1965 - if (val > vsk->buffer_max_size) 1966 - val = vsk->buffer_max_size; 1967 - 1968 1965 if (val < vsk->buffer_min_size) 1969 1966 val = vsk->buffer_min_size; 1967 + 1968 + if (val > vsk->buffer_max_size) 1969 + val = vsk->buffer_max_size; 1970 1970 1971 1971 if (val != vsk->buffer_size && 1972 1972 transport && transport->notify_buffer_size)
+76
tools/testing/selftests/bpf/prog_tests/sock_ops_get_sk.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + 3 + #include <test_progs.h> 4 + #include "cgroup_helpers.h" 5 + #include "network_helpers.h" 6 + #include "sock_ops_get_sk.skel.h" 7 + 8 + /* See progs/sock_ops_get_sk.c for the bug description. */ 9 + static void run_sock_ops_test(int cgroup_fd, int prog_fd) 10 + { 11 + int server_fd, client_fd, err; 12 + 13 + err = bpf_prog_attach(prog_fd, cgroup_fd, BPF_CGROUP_SOCK_OPS, 0); 14 + if (!ASSERT_OK(err, "prog_attach")) 15 + return; 16 + 17 + server_fd = start_server(AF_INET, SOCK_STREAM, NULL, 0, 0); 18 + if (!ASSERT_OK_FD(server_fd, "start_server")) 19 + goto detach; 20 + 21 + /* Trigger TCP handshake which causes TCP_NEW_SYN_RECV state where 22 + * is_fullsock == 0 and is_locked_tcp_sock == 0. 23 + */ 24 + client_fd = connect_to_fd(server_fd, 0); 25 + if (!ASSERT_OK_FD(client_fd, "connect_to_fd")) 26 + goto close_server; 27 + 28 + close(client_fd); 29 + 30 + close_server: 31 + close(server_fd); 32 + detach: 33 + bpf_prog_detach(cgroup_fd, BPF_CGROUP_SOCK_OPS); 34 + } 35 + 36 + void test_ns_sock_ops_get_sk(void) 37 + { 38 + struct sock_ops_get_sk *skel; 39 + int cgroup_fd; 40 + 41 + cgroup_fd = test__join_cgroup("/sock_ops_get_sk"); 42 + if (!ASSERT_OK_FD(cgroup_fd, "join_cgroup")) 43 + return; 44 + 45 + skel = sock_ops_get_sk__open_and_load(); 46 + if (!ASSERT_OK_PTR(skel, "skel_open_load")) 47 + goto close_cgroup; 48 + 49 + /* Test SOCK_OPS_GET_SK with same src/dst register */ 50 + if (test__start_subtest("get_sk")) { 51 + run_sock_ops_test(cgroup_fd, 52 + bpf_program__fd(skel->progs.sock_ops_get_sk_same_reg)); 53 + ASSERT_EQ(skel->bss->null_seen, 1, "null_seen"); 54 + ASSERT_EQ(skel->bss->bug_detected, 0, "bug_not_detected"); 55 + } 56 + 57 + /* Test SOCK_OPS_GET_FIELD with same src/dst register */ 58 + if (test__start_subtest("get_field")) { 59 + run_sock_ops_test(cgroup_fd, 60 + bpf_program__fd(skel->progs.sock_ops_get_field_same_reg)); 61 + ASSERT_EQ(skel->bss->field_null_seen, 1, "field_null_seen"); 62 + ASSERT_EQ(skel->bss->field_bug_detected, 0, "field_bug_not_detected"); 63 + } 64 + 65 + /* Test SOCK_OPS_GET_SK with different src/dst register */ 66 + if (test__start_subtest("get_sk_diff_reg")) { 67 + run_sock_ops_test(cgroup_fd, 68 + bpf_program__fd(skel->progs.sock_ops_get_sk_diff_reg)); 69 + ASSERT_EQ(skel->bss->diff_reg_null_seen, 1, "diff_reg_null_seen"); 70 + ASSERT_EQ(skel->bss->diff_reg_bug_detected, 0, "diff_reg_bug_not_detected"); 71 + } 72 + 73 + sock_ops_get_sk__destroy(skel); 74 + close_cgroup: 75 + close(cgroup_fd); 76 + }
+94 -2
tools/testing/selftests/bpf/prog_tests/xdp_bonding.c
··· 191 191 return -1; 192 192 } 193 193 194 - static void bonding_cleanup(struct skeletons *skeletons) 194 + static void link_cleanup(struct skeletons *skeletons) 195 195 { 196 - restore_root_netns(); 197 196 while (skeletons->nlinks) { 198 197 skeletons->nlinks--; 199 198 bpf_link__destroy(skeletons->links[skeletons->nlinks]); 200 199 } 200 + } 201 + 202 + static void bonding_cleanup(struct skeletons *skeletons) 203 + { 204 + restore_root_netns(); 205 + link_cleanup(skeletons); 201 206 ASSERT_OK(system("ip link delete bond1"), "delete bond1"); 202 207 ASSERT_OK(system("ip link delete veth1_1"), "delete veth1_1"); 203 208 ASSERT_OK(system("ip link delete veth1_2"), "delete veth1_2"); ··· 498 493 system("ip link del bond_nest2"); 499 494 } 500 495 496 + /* 497 + * Test that XDP redirect via xdp_master_redirect() does not crash when 498 + * the bond master device is not up. When bond is in round-robin mode but 499 + * never opened, rr_tx_counter is NULL. 500 + */ 501 + static void test_xdp_bonding_redirect_no_up(struct skeletons *skeletons) 502 + { 503 + struct nstoken *nstoken = NULL; 504 + int xdp_pass_fd; 505 + int veth1_ifindex; 506 + int err; 507 + char pkt[ETH_HLEN + 1]; 508 + struct xdp_md ctx_in = {}; 509 + 510 + DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts, 511 + .data_in = &pkt, 512 + .data_size_in = sizeof(pkt), 513 + .ctx_in = &ctx_in, 514 + .ctx_size_in = sizeof(ctx_in), 515 + .flags = BPF_F_TEST_XDP_LIVE_FRAMES, 516 + .repeat = 1, 517 + .batch_size = 1, 518 + ); 519 + 520 + /* We can't use bonding_setup() because bond will be active */ 521 + SYS(out, "ip netns add ns_rr_no_up"); 522 + nstoken = open_netns("ns_rr_no_up"); 523 + if (!ASSERT_OK_PTR(nstoken, "open ns_rr_no_up")) 524 + goto out; 525 + 526 + /* bond0: active-backup, UP with slave veth0. 527 + * Attaching native XDP to bond0 enables bpf_master_redirect_enabled_key 528 + * globally. 529 + */ 530 + SYS(out, "ip link add bond0 type bond mode active-backup"); 531 + SYS(out, "ip link add veth0 type veth peer name veth0p"); 532 + SYS(out, "ip link set veth0 master bond0"); 533 + SYS(out, "ip link set bond0 up"); 534 + SYS(out, "ip link set veth0p up"); 535 + 536 + /* bond1: round-robin, never UP -> rr_tx_counter stays NULL */ 537 + SYS(out, "ip link add bond1 type bond mode balance-rr"); 538 + SYS(out, "ip link add veth1 type veth peer name veth1p"); 539 + SYS(out, "ip link set veth1 master bond1"); 540 + 541 + veth1_ifindex = if_nametoindex("veth1"); 542 + if (!ASSERT_GT(veth1_ifindex, 0, "veth1_ifindex")) 543 + goto out; 544 + 545 + /* Attach native XDP to bond0 -> enables global redirect key */ 546 + if (xdp_attach(skeletons, skeletons->xdp_tx->progs.xdp_tx, "bond0")) 547 + goto out; 548 + 549 + /* Attach generic XDP (XDP_TX) to veth1. 550 + * When packets arrive at veth1 via netif_receive_skb, do_xdp_generic() 551 + * runs this program. XDP_TX + bond slave triggers xdp_master_redirect(). 552 + */ 553 + err = bpf_xdp_attach(veth1_ifindex, 554 + bpf_program__fd(skeletons->xdp_tx->progs.xdp_tx), 555 + XDP_FLAGS_SKB_MODE, NULL); 556 + if (!ASSERT_OK(err, "attach generic XDP to veth1")) 557 + goto out; 558 + 559 + /* Run BPF_PROG_TEST_RUN with XDP_PASS live frames on veth1. 560 + * XDP_PASS frames become SKBs with skb->dev = veth1, entering 561 + * netif_receive_skb -> do_xdp_generic -> xdp_master_redirect. 562 + * Without the fix, bond_rr_gen_slave_id() dereferences NULL 563 + * rr_tx_counter and crashes. 564 + */ 565 + xdp_pass_fd = bpf_program__fd(skeletons->xdp_dummy->progs.xdp_dummy_prog); 566 + 567 + memset(pkt, 0, sizeof(pkt)); 568 + ctx_in.data_end = sizeof(pkt); 569 + ctx_in.ingress_ifindex = veth1_ifindex; 570 + 571 + err = bpf_prog_test_run_opts(xdp_pass_fd, &opts); 572 + ASSERT_OK(err, "xdp_pass test_run should not crash"); 573 + 574 + out: 575 + link_cleanup(skeletons); 576 + close_netns(nstoken); 577 + SYS_NOFAIL("ip netns del ns_rr_no_up"); 578 + } 579 + 501 580 static void test_xdp_bonding_features(struct skeletons *skeletons) 502 581 { 503 582 LIBBPF_OPTS(bpf_xdp_query_opts, query_opts); ··· 826 737 827 738 if (test__start_subtest("xdp_bonding_redirect_multi")) 828 739 test_xdp_bonding_redirect_multi(&skeletons); 740 + 741 + if (test__start_subtest("xdp_bonding_redirect_no_up")) 742 + test_xdp_bonding_redirect_no_up(&skeletons); 829 743 830 744 out: 831 745 xdp_dummy__destroy(skeletons.xdp_dummy);
+117
tools/testing/selftests/bpf/progs/sock_ops_get_sk.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + 3 + #include "vmlinux.h" 4 + #include <bpf/bpf_helpers.h> 5 + #include "bpf_misc.h" 6 + 7 + /* 8 + * Test the SOCK_OPS_GET_SK() and SOCK_OPS_GET_FIELD() macros in 9 + * sock_ops_convert_ctx_access() when dst_reg == src_reg. 10 + * 11 + * When dst_reg == src_reg, the macros borrow a temporary register to load 12 + * is_fullsock / is_locked_tcp_sock, because dst_reg holds the ctx pointer 13 + * and cannot be clobbered before ctx->sk / ctx->field is read. If 14 + * is_fullsock == 0 (e.g., TCP_NEW_SYN_RECV with a request_sock), the macro 15 + * must still zero dst_reg so the verifier's PTR_TO_SOCKET_OR_NULL / 16 + * SCALAR_VALUE type is correct at runtime. A missing clear leaves a stale 17 + * ctx pointer in dst_reg that passes NULL checks (GET_SK) or leaks a kernel 18 + * address as a scalar (GET_FIELD). 19 + * 20 + * When dst_reg != src_reg, dst_reg itself is used to load is_fullsock, so 21 + * the JEQ (dst_reg == 0) naturally leaves it zeroed on the !fullsock path. 22 + */ 23 + 24 + int bug_detected; 25 + int null_seen; 26 + 27 + SEC("sockops") 28 + __naked void sock_ops_get_sk_same_reg(void) 29 + { 30 + asm volatile ( 31 + "r7 = *(u32 *)(r1 + %[is_fullsock_off]);" 32 + "r1 = *(u64 *)(r1 + %[sk_off]);" 33 + "if r7 != 0 goto 2f;" 34 + "if r1 == 0 goto 1f;" 35 + "r1 = %[bug_detected] ll;" 36 + "r2 = 1;" 37 + "*(u32 *)(r1 + 0) = r2;" 38 + "goto 2f;" 39 + "1:" 40 + "r1 = %[null_seen] ll;" 41 + "r2 = 1;" 42 + "*(u32 *)(r1 + 0) = r2;" 43 + "2:" 44 + "r0 = 1;" 45 + "exit;" 46 + : 47 + : __imm_const(is_fullsock_off, offsetof(struct bpf_sock_ops, is_fullsock)), 48 + __imm_const(sk_off, offsetof(struct bpf_sock_ops, sk)), 49 + __imm_addr(bug_detected), 50 + __imm_addr(null_seen) 51 + : __clobber_all); 52 + } 53 + 54 + /* SOCK_OPS_GET_FIELD: same-register, is_locked_tcp_sock == 0 path. */ 55 + int field_bug_detected; 56 + int field_null_seen; 57 + 58 + SEC("sockops") 59 + __naked void sock_ops_get_field_same_reg(void) 60 + { 61 + asm volatile ( 62 + "r7 = *(u32 *)(r1 + %[is_fullsock_off]);" 63 + "r1 = *(u32 *)(r1 + %[snd_cwnd_off]);" 64 + "if r7 != 0 goto 2f;" 65 + "if r1 == 0 goto 1f;" 66 + "r1 = %[field_bug_detected] ll;" 67 + "r2 = 1;" 68 + "*(u32 *)(r1 + 0) = r2;" 69 + "goto 2f;" 70 + "1:" 71 + "r1 = %[field_null_seen] ll;" 72 + "r2 = 1;" 73 + "*(u32 *)(r1 + 0) = r2;" 74 + "2:" 75 + "r0 = 1;" 76 + "exit;" 77 + : 78 + : __imm_const(is_fullsock_off, offsetof(struct bpf_sock_ops, is_fullsock)), 79 + __imm_const(snd_cwnd_off, offsetof(struct bpf_sock_ops, snd_cwnd)), 80 + __imm_addr(field_bug_detected), 81 + __imm_addr(field_null_seen) 82 + : __clobber_all); 83 + } 84 + 85 + /* SOCK_OPS_GET_SK: different-register, is_fullsock == 0 path. */ 86 + int diff_reg_bug_detected; 87 + int diff_reg_null_seen; 88 + 89 + SEC("sockops") 90 + __naked void sock_ops_get_sk_diff_reg(void) 91 + { 92 + asm volatile ( 93 + "r7 = r1;" 94 + "r6 = *(u32 *)(r7 + %[is_fullsock_off]);" 95 + "r2 = *(u64 *)(r7 + %[sk_off]);" 96 + "if r6 != 0 goto 2f;" 97 + "if r2 == 0 goto 1f;" 98 + "r1 = %[diff_reg_bug_detected] ll;" 99 + "r3 = 1;" 100 + "*(u32 *)(r1 + 0) = r3;" 101 + "goto 2f;" 102 + "1:" 103 + "r1 = %[diff_reg_null_seen] ll;" 104 + "r3 = 1;" 105 + "*(u32 *)(r1 + 0) = r3;" 106 + "2:" 107 + "r0 = 1;" 108 + "exit;" 109 + : 110 + : __imm_const(is_fullsock_off, offsetof(struct bpf_sock_ops, is_fullsock)), 111 + __imm_const(sk_off, offsetof(struct bpf_sock_ops, sk)), 112 + __imm_addr(diff_reg_bug_detected), 113 + __imm_addr(diff_reg_null_seen) 114 + : __clobber_all); 115 + } 116 + 117 + char _license[] SEC("license") = "GPL";
+1
tools/testing/selftests/drivers/net/Makefile
··· 12 12 TEST_PROGS := \ 13 13 gro.py \ 14 14 hds.py \ 15 + macsec.py \ 15 16 napi_id.py \ 16 17 napi_threaded.py \ 17 18 netpoll_basic.py \
+2
tools/testing/selftests/drivers/net/config
··· 3 3 CONFIG_DEBUG_INFO_BTF_MODULES=n 4 4 CONFIG_INET_PSP=y 5 5 CONFIG_IPV6=y 6 + CONFIG_MACSEC=m 6 7 CONFIG_NETCONSOLE=m 7 8 CONFIG_NETCONSOLE_DYNAMIC=y 8 9 CONFIG_NETCONSOLE_EXTENDED_LOG=y 9 10 CONFIG_NETDEVSIM=m 11 + CONFIG_VLAN_8021Q=m 10 12 CONFIG_XDP_SOCKETS=y
+9
tools/testing/selftests/drivers/net/lib/py/env.py
··· 258 258 if nsim_test is False and self._ns is not None: 259 259 raise KsftXfailEx("Test does not work on netdevsim") 260 260 261 + def get_local_nsim_dev(self): 262 + """Returns the local netdevsim device or None. 263 + Using this method is discouraged, as it makes tests nsim-specific. 264 + Standard interfaces available on all HW should ideally be used. 265 + This method is intended for the few cases where nsim-specific 266 + assertions need to be verified which cannot be verified otherwise. 267 + """ 268 + return self._ns 269 + 261 270 def _require_cmd(self, comm, key, host=None): 262 271 cached = self._required_cmd.get(comm, {}) 263 272 if cached.get(key) is None:
+343
tools/testing/selftests/drivers/net/macsec.py
··· 1 + #!/usr/bin/env python3 2 + # SPDX-License-Identifier: GPL-2.0 3 + 4 + """MACsec tests.""" 5 + 6 + import os 7 + 8 + from lib.py import ksft_run, ksft_exit, ksft_eq, ksft_raises 9 + from lib.py import ksft_variants, KsftNamedVariant 10 + from lib.py import CmdExitFailure, KsftSkipEx 11 + from lib.py import NetDrvEpEnv 12 + from lib.py import cmd, ip, defer, ethtool 13 + 14 + MACSEC_KEY = "12345678901234567890123456789012" 15 + MACSEC_VLAN_VID = 10 16 + 17 + # Unique prefix per run to avoid collisions in the shared netns. 18 + # Keep it short: IFNAMSIZ is 16 (incl. NUL), and VLAN names append ".<vid>". 19 + MACSEC_PFX = f"ms{os.getpid()}_" 20 + 21 + 22 + def _macsec_name(idx=0): 23 + return f"{MACSEC_PFX}{idx}" 24 + 25 + 26 + def _get_macsec_offload(dev): 27 + """Returns macsec offload mode string from ip -d link show.""" 28 + info = ip(f"-d link show dev {dev}", json=True)[0] 29 + return info.get("linkinfo", {}).get("info_data", {}).get("offload") 30 + 31 + 32 + def _get_features(dev): 33 + """Returns ethtool features dict for a device.""" 34 + return ethtool(f"-k {dev}", json=True)[0] 35 + 36 + 37 + def _require_ip_macsec(cfg): 38 + """SKIP if iproute2 on local or remote lacks 'ip macsec' support.""" 39 + for host in [None, cfg.remote]: 40 + out = cmd("ip macsec help", fail=False, host=host) 41 + if "Usage" not in out.stdout + out.stderr: 42 + where = "remote" if host else "local" 43 + raise KsftSkipEx(f"iproute2 too old on {where}," 44 + " missing macsec support") 45 + 46 + 47 + def _require_ip_macsec_offload(): 48 + """SKIP if local iproute2 doesn't understand 'ip macsec offload'.""" 49 + out = cmd("ip macsec help", fail=False) 50 + if "offload" not in out.stdout + out.stderr: 51 + raise KsftSkipEx("iproute2 too old, missing macsec offload") 52 + 53 + 54 + def _require_macsec_offload(cfg): 55 + """SKIP if local device doesn't support macsec-hw-offload.""" 56 + _require_ip_macsec_offload() 57 + try: 58 + feat = ethtool(f"-k {cfg.ifname}", json=True)[0] 59 + except (CmdExitFailure, IndexError) as e: 60 + raise KsftSkipEx( 61 + f"can't query features: {e}") from e 62 + if not feat.get("macsec-hw-offload", {}).get("active"): 63 + raise KsftSkipEx("macsec-hw-offload not supported") 64 + 65 + 66 + def _get_mac(ifname, host=None): 67 + """Gets MAC address of an interface.""" 68 + dev = ip(f"link show dev {ifname}", json=True, host=host) 69 + return dev[0]["address"] 70 + 71 + 72 + def _setup_macsec_sa(cfg, name): 73 + """Adds matching TX/RX SAs on both ends.""" 74 + local_mac = _get_mac(name) 75 + remote_mac = _get_mac(name, host=cfg.remote) 76 + 77 + ip(f"macsec add {name} tx sa 0 pn 1 on key 01 {MACSEC_KEY}") 78 + ip(f"macsec add {name} rx port 1 address {remote_mac}") 79 + ip(f"macsec add {name} rx port 1 address {remote_mac} " 80 + f"sa 0 pn 1 on key 02 {MACSEC_KEY}") 81 + 82 + ip(f"macsec add {name} tx sa 0 pn 1 on key 02 {MACSEC_KEY}", 83 + host=cfg.remote) 84 + ip(f"macsec add {name} rx port 1 address {local_mac}", host=cfg.remote) 85 + ip(f"macsec add {name} rx port 1 address {local_mac} " 86 + f"sa 0 pn 1 on key 01 {MACSEC_KEY}", host=cfg.remote) 87 + 88 + 89 + def _setup_macsec_devs(cfg, name, offload): 90 + """Creates macsec devices on both ends. 91 + 92 + Only the local device gets HW offload; the remote always uses software 93 + MACsec since it may not support offload at all. 94 + """ 95 + offload_arg = "mac" if offload else "off" 96 + 97 + ip(f"link add link {cfg.ifname} {name} " 98 + f"type macsec encrypt on offload {offload_arg}") 99 + defer(ip, f"link del {name}") 100 + ip(f"link add link {cfg.remote_ifname} {name} " 101 + f"type macsec encrypt on", host=cfg.remote) 102 + defer(ip, f"link del {name}", host=cfg.remote) 103 + 104 + 105 + def _set_offload(name, offload): 106 + """Sets offload on the local macsec device only.""" 107 + offload_arg = "mac" if offload else "off" 108 + 109 + ip(f"link set {name} type macsec encrypt on offload {offload_arg}") 110 + 111 + 112 + def _setup_vlans(cfg, name, vid): 113 + """Adds VLANs on top of existing macsec devs.""" 114 + vlan_name = f"{name}.{vid}" 115 + 116 + ip(f"link add link {name} {vlan_name} type vlan id {vid}") 117 + defer(ip, f"link del {vlan_name}") 118 + ip(f"link add link {name} {vlan_name} type vlan id {vid}", host=cfg.remote) 119 + defer(ip, f"link del {vlan_name}", host=cfg.remote) 120 + 121 + 122 + def _setup_vlan_ips(cfg, name, vid): 123 + """Adds VLANs and IPs and brings up the macsec + VLAN devices.""" 124 + local_ip = "198.51.100.1" 125 + remote_ip = "198.51.100.2" 126 + vlan_name = f"{name}.{vid}" 127 + 128 + ip(f"addr add {local_ip}/24 dev {vlan_name}") 129 + ip(f"addr add {remote_ip}/24 dev {vlan_name}", host=cfg.remote) 130 + ip(f"link set {name} up") 131 + ip(f"link set {name} up", host=cfg.remote) 132 + ip(f"link set {vlan_name} up") 133 + ip(f"link set {vlan_name} up", host=cfg.remote) 134 + 135 + return vlan_name, remote_ip 136 + 137 + 138 + def test_offload_api(cfg) -> None: 139 + """MACsec offload API: create SecY, add SA/rx, toggle offload.""" 140 + 141 + _require_macsec_offload(cfg) 142 + ms0 = _macsec_name(0) 143 + ms1 = _macsec_name(1) 144 + ms2 = _macsec_name(2) 145 + 146 + # Create 3 SecY with offload 147 + ip(f"link add link {cfg.ifname} {ms0} type macsec " 148 + f"port 4 encrypt on offload mac") 149 + defer(ip, f"link del {ms0}") 150 + 151 + ip(f"link add link {cfg.ifname} {ms1} type macsec " 152 + f"address aa:bb:cc:dd:ee:ff port 5 encrypt on offload mac") 153 + defer(ip, f"link del {ms1}") 154 + 155 + ip(f"link add link {cfg.ifname} {ms2} type macsec " 156 + f"sci abbacdde01020304 encrypt on offload mac") 157 + defer(ip, f"link del {ms2}") 158 + 159 + # Add TX SA 160 + ip(f"macsec add {ms0} tx sa 0 pn 1024 on " 161 + "key 01 12345678901234567890123456789012") 162 + 163 + # Add RX SC + SA 164 + ip(f"macsec add {ms0} rx port 1234 address 1c:ed:de:ad:be:ef") 165 + ip(f"macsec add {ms0} rx port 1234 address 1c:ed:de:ad:be:ef " 166 + "sa 0 pn 1 on key 00 0123456789abcdef0123456789abcdef") 167 + 168 + # Can't disable offload when SAs are configured 169 + with ksft_raises(CmdExitFailure): 170 + ip(f"link set {ms0} type macsec offload off") 171 + with ksft_raises(CmdExitFailure): 172 + ip(f"macsec offload {ms0} off") 173 + 174 + # Toggle offload via rtnetlink on SA-free device 175 + ip(f"link set {ms2} type macsec offload off") 176 + ip(f"link set {ms2} type macsec encrypt on offload mac") 177 + 178 + # Toggle offload via genetlink 179 + ip(f"macsec offload {ms2} off") 180 + ip(f"macsec offload {ms2} mac") 181 + 182 + 183 + def test_max_secy(cfg) -> None: 184 + """nsim-only test for max number of SecYs.""" 185 + 186 + cfg.require_nsim() 187 + _require_ip_macsec_offload() 188 + ms0 = _macsec_name(0) 189 + ms1 = _macsec_name(1) 190 + ms2 = _macsec_name(2) 191 + ms3 = _macsec_name(3) 192 + 193 + ip(f"link add link {cfg.ifname} {ms0} type macsec " 194 + f"port 4 encrypt on offload mac") 195 + defer(ip, f"link del {ms0}") 196 + 197 + ip(f"link add link {cfg.ifname} {ms1} type macsec " 198 + f"address aa:bb:cc:dd:ee:ff port 5 encrypt on offload mac") 199 + defer(ip, f"link del {ms1}") 200 + 201 + ip(f"link add link {cfg.ifname} {ms2} type macsec " 202 + f"sci abbacdde01020304 encrypt on offload mac") 203 + defer(ip, f"link del {ms2}") 204 + with ksft_raises(CmdExitFailure): 205 + ip(f"link add link {cfg.ifname} {ms3} " 206 + f"type macsec port 8 encrypt on offload mac") 207 + 208 + 209 + def test_max_sc(cfg) -> None: 210 + """nsim-only test for max number of SCs.""" 211 + 212 + cfg.require_nsim() 213 + _require_ip_macsec_offload() 214 + ms0 = _macsec_name(0) 215 + 216 + ip(f"link add link {cfg.ifname} {ms0} type macsec " 217 + f"port 4 encrypt on offload mac") 218 + defer(ip, f"link del {ms0}") 219 + ip(f"macsec add {ms0} rx port 1234 address 1c:ed:de:ad:be:ef") 220 + with ksft_raises(CmdExitFailure): 221 + ip(f"macsec add {ms0} rx port 1235 address 1c:ed:de:ad:be:ef") 222 + 223 + 224 + def test_offload_state(cfg) -> None: 225 + """Offload state reflects configuration changes.""" 226 + 227 + _require_macsec_offload(cfg) 228 + ms0 = _macsec_name(0) 229 + 230 + # Create with offload on 231 + ip(f"link add link {cfg.ifname} {ms0} type macsec " 232 + f"encrypt on offload mac") 233 + cleanup = defer(ip, f"link del {ms0}") 234 + 235 + ksft_eq(_get_macsec_offload(ms0), "mac", 236 + "created with offload: should be mac") 237 + feats_on_1 = _get_features(ms0) 238 + 239 + ip(f"link set {ms0} type macsec offload off") 240 + ksft_eq(_get_macsec_offload(ms0), "off", 241 + "offload disabled: should be off") 242 + feats_off_1 = _get_features(ms0) 243 + 244 + ip(f"link set {ms0} type macsec encrypt on offload mac") 245 + ksft_eq(_get_macsec_offload(ms0), "mac", 246 + "offload re-enabled: should be mac") 247 + ksft_eq(_get_features(ms0), feats_on_1, 248 + "features should match first offload-on snapshot") 249 + 250 + # Delete and recreate without offload 251 + cleanup.exec() 252 + ip(f"link add link {cfg.ifname} {ms0} type macsec") 253 + defer(ip, f"link del {ms0}") 254 + ksft_eq(_get_macsec_offload(ms0), "off", 255 + "created without offload: should be off") 256 + ksft_eq(_get_features(ms0), feats_off_1, 257 + "features should match first offload-off snapshot") 258 + 259 + ip(f"link set {ms0} type macsec encrypt on offload mac") 260 + ksft_eq(_get_macsec_offload(ms0), "mac", 261 + "offload enabled after create: should be mac") 262 + ksft_eq(_get_features(ms0), feats_on_1, 263 + "features should match first offload-on snapshot") 264 + 265 + 266 + def _check_nsim_vid(cfg, vid, expected) -> None: 267 + """Checks if a VLAN is present. Only works on netdevsim.""" 268 + 269 + nsim = cfg.get_local_nsim_dev() 270 + if not nsim: 271 + return 272 + 273 + vlan_path = os.path.join(nsim.nsims[0].dfs_dir, "vlan") 274 + with open(vlan_path, encoding="utf-8") as f: 275 + vids = f.read() 276 + found = f"ctag {vid}\n" in vids 277 + ksft_eq(found, expected, 278 + f"VLAN {vid} {'expected' if expected else 'not expected'}" 279 + f" in debugfs") 280 + 281 + 282 + @ksft_variants([ 283 + KsftNamedVariant("offloaded", True), 284 + KsftNamedVariant("software", False), 285 + ]) 286 + def test_vlan(cfg, offload) -> None: 287 + """Ping through VLAN-over-macsec.""" 288 + 289 + _require_ip_macsec(cfg) 290 + if offload: 291 + _require_macsec_offload(cfg) 292 + else: 293 + _require_ip_macsec_offload() 294 + name = _macsec_name() 295 + _setup_macsec_devs(cfg, name, offload=offload) 296 + _setup_macsec_sa(cfg, name) 297 + _setup_vlans(cfg, name, MACSEC_VLAN_VID) 298 + vlan_name, remote_ip = _setup_vlan_ips(cfg, name, MACSEC_VLAN_VID) 299 + _check_nsim_vid(cfg, MACSEC_VLAN_VID, offload) 300 + # nsim doesn't handle the data path for offloaded macsec, so skip 301 + # the ping when offloaded on nsim. 302 + if not offload or not cfg.get_local_nsim_dev(): 303 + cmd(f"ping -I {vlan_name} -c 1 -W 5 {remote_ip}") 304 + 305 + 306 + @ksft_variants([ 307 + KsftNamedVariant("on_to_off", True), 308 + KsftNamedVariant("off_to_on", False), 309 + ]) 310 + def test_vlan_toggle(cfg, offload) -> None: 311 + """Toggle offload: VLAN filters propagate/remove correctly.""" 312 + 313 + _require_ip_macsec(cfg) 314 + _require_macsec_offload(cfg) 315 + name = _macsec_name() 316 + _setup_macsec_devs(cfg, name, offload=offload) 317 + _setup_vlans(cfg, name, MACSEC_VLAN_VID) 318 + _check_nsim_vid(cfg, MACSEC_VLAN_VID, offload) 319 + _set_offload(name, offload=not offload) 320 + _check_nsim_vid(cfg, MACSEC_VLAN_VID, not offload) 321 + vlan_name, remote_ip = _setup_vlan_ips(cfg, name, MACSEC_VLAN_VID) 322 + _setup_macsec_sa(cfg, name) 323 + # nsim doesn't handle the data path for offloaded macsec, so skip 324 + # the ping when the final state is offloaded on nsim. 325 + if offload or not cfg.get_local_nsim_dev(): 326 + cmd(f"ping -I {vlan_name} -c 1 -W 5 {remote_ip}") 327 + 328 + 329 + def main() -> None: 330 + """Main program.""" 331 + with NetDrvEpEnv(__file__) as cfg: 332 + ksft_run([test_offload_api, 333 + test_max_secy, 334 + test_max_sc, 335 + test_offload_state, 336 + test_vlan, 337 + test_vlan_toggle, 338 + ], args=(cfg,)) 339 + ksft_exit() 340 + 341 + 342 + if __name__ == "__main__": 343 + main()
-1
tools/testing/selftests/drivers/net/netdevsim/Makefile
··· 11 11 fib.sh \ 12 12 fib_notifications.sh \ 13 13 hw_stats_l3.sh \ 14 - macsec-offload.sh \ 15 14 nexthop.sh \ 16 15 peer.sh \ 17 16 psample.sh \
-117
tools/testing/selftests/drivers/net/netdevsim/macsec-offload.sh
··· 1 - #!/bin/bash 2 - # SPDX-License-Identifier: GPL-2.0-only 3 - 4 - source ethtool-common.sh 5 - 6 - NSIM_NETDEV=$(make_netdev) 7 - MACSEC_NETDEV=macsec_nsim 8 - 9 - set -o pipefail 10 - 11 - if ! ethtool -k $NSIM_NETDEV | grep -q 'macsec-hw-offload: on'; then 12 - echo "SKIP: netdevsim doesn't support MACsec offload" 13 - exit 4 14 - fi 15 - 16 - if ! ip link add link $NSIM_NETDEV $MACSEC_NETDEV type macsec offload mac 2>/dev/null; then 17 - echo "SKIP: couldn't create macsec device" 18 - exit 4 19 - fi 20 - ip link del $MACSEC_NETDEV 21 - 22 - # 23 - # test macsec offload API 24 - # 25 - 26 - ip link add link $NSIM_NETDEV "${MACSEC_NETDEV}" type macsec port 4 offload mac 27 - check $? 28 - 29 - ip link add link $NSIM_NETDEV "${MACSEC_NETDEV}2" type macsec address "aa:bb:cc:dd:ee:ff" port 5 offload mac 30 - check $? 31 - 32 - ip link add link $NSIM_NETDEV "${MACSEC_NETDEV}3" type macsec sci abbacdde01020304 offload mac 33 - check $? 34 - 35 - ip link add link $NSIM_NETDEV "${MACSEC_NETDEV}4" type macsec port 8 offload mac 2> /dev/null 36 - check $? '' '' 1 37 - 38 - ip macsec add "${MACSEC_NETDEV}" tx sa 0 pn 1024 on key 01 12345678901234567890123456789012 39 - check $? 40 - 41 - ip macsec add "${MACSEC_NETDEV}" rx port 1234 address "1c:ed:de:ad:be:ef" 42 - check $? 43 - 44 - ip macsec add "${MACSEC_NETDEV}" rx port 1234 address "1c:ed:de:ad:be:ef" sa 0 pn 1 on \ 45 - key 00 0123456789abcdef0123456789abcdef 46 - check $? 47 - 48 - ip macsec add "${MACSEC_NETDEV}" rx port 1235 address "1c:ed:de:ad:be:ef" 2> /dev/null 49 - check $? '' '' 1 50 - 51 - # can't disable macsec offload when SAs are configured 52 - ip link set "${MACSEC_NETDEV}" type macsec offload off 2> /dev/null 53 - check $? '' '' 1 54 - 55 - ip macsec offload "${MACSEC_NETDEV}" off 2> /dev/null 56 - check $? '' '' 1 57 - 58 - # toggle macsec offload via rtnetlink 59 - ip link set "${MACSEC_NETDEV}2" type macsec offload off 60 - check $? 61 - 62 - ip link set "${MACSEC_NETDEV}2" type macsec offload mac 63 - check $? 64 - 65 - # toggle macsec offload via genetlink 66 - ip macsec offload "${MACSEC_NETDEV}2" off 67 - check $? 68 - 69 - ip macsec offload "${MACSEC_NETDEV}2" mac 70 - check $? 71 - 72 - for dev in ${MACSEC_NETDEV}{,2,3} ; do 73 - ip link del $dev 74 - check $? 75 - done 76 - 77 - 78 - # 79 - # test ethtool features when toggling offload 80 - # 81 - 82 - ip link add link $NSIM_NETDEV $MACSEC_NETDEV type macsec offload mac 83 - TMP_FEATS_ON_1="$(ethtool -k $MACSEC_NETDEV)" 84 - 85 - ip link set $MACSEC_NETDEV type macsec offload off 86 - TMP_FEATS_OFF_1="$(ethtool -k $MACSEC_NETDEV)" 87 - 88 - ip link set $MACSEC_NETDEV type macsec offload mac 89 - TMP_FEATS_ON_2="$(ethtool -k $MACSEC_NETDEV)" 90 - 91 - [ "$TMP_FEATS_ON_1" = "$TMP_FEATS_ON_2" ] 92 - check $? 93 - 94 - ip link del $MACSEC_NETDEV 95 - 96 - ip link add link $NSIM_NETDEV $MACSEC_NETDEV type macsec 97 - check $? 98 - 99 - TMP_FEATS_OFF_2="$(ethtool -k $MACSEC_NETDEV)" 100 - [ "$TMP_FEATS_OFF_1" = "$TMP_FEATS_OFF_2" ] 101 - check $? 102 - 103 - ip link set $MACSEC_NETDEV type macsec offload mac 104 - check $? 105 - 106 - TMP_FEATS_ON_3="$(ethtool -k $MACSEC_NETDEV)" 107 - [ "$TMP_FEATS_ON_1" = "$TMP_FEATS_ON_3" ] 108 - check $? 109 - 110 - 111 - if [ $num_errors -eq 0 ]; then 112 - echo "PASSED all $((num_passes)) checks" 113 - exit 0 114 - else 115 - echo "FAILED $num_errors/$((num_errors+num_passes)) checks" 116 - exit 1 117 - fi