Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Trivial conflict in net/netfilter/nf_tables_api.c.

Duplicate fix in tools/testing/selftests/net/devlink_port_split.py
- take the net-next version.

skmsg, and L4 bpf - keep the bpf code but remove the flags
and err params.

Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+704 -387
+1
MAINTAINERS
··· 12675 12675 W: http://www.iptables.org/ 12676 12676 W: http://www.nftables.org/ 12677 12677 Q: http://patchwork.ozlabs.org/project/netfilter-devel/list/ 12678 + C: irc://irc.libera.chat/netfilter 12678 12679 T: git git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf.git 12679 12680 T: git git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf-next.git 12680 12681 F: include/linux/netfilter*
+13 -11
drivers/atm/nicstar.c
··· 527 527 /* Set the VPI/VCI MSb mask to zero so we can receive OAM cells */ 528 528 writel(0x00000000, card->membase + VPM); 529 529 530 + card->intcnt = 0; 531 + if (request_irq 532 + (pcidev->irq, &ns_irq_handler, IRQF_SHARED, "nicstar", card) != 0) { 533 + pr_err("nicstar%d: can't allocate IRQ %d.\n", i, pcidev->irq); 534 + error = 9; 535 + ns_init_card_error(card, error); 536 + return error; 537 + } 538 + 530 539 /* Initialize TSQ */ 531 540 card->tsq.org = dma_alloc_coherent(&card->pcidev->dev, 532 541 NS_TSQSIZE + NS_TSQ_ALIGNMENT, ··· 762 753 763 754 card->efbie = 1; 764 755 765 - card->intcnt = 0; 766 - if (request_irq 767 - (pcidev->irq, &ns_irq_handler, IRQF_SHARED, "nicstar", card) != 0) { 768 - printk("nicstar%d: can't allocate IRQ %d.\n", i, pcidev->irq); 769 - error = 9; 770 - ns_init_card_error(card, error); 771 - return error; 772 - } 773 - 774 756 /* Register device */ 775 757 card->atmdev = atm_dev_register("nicstar", &card->pcidev->dev, &atm_ops, 776 758 -1, NULL); ··· 839 839 dev_kfree_skb_any(hb); 840 840 } 841 841 if (error >= 12) { 842 - kfree(card->rsq.org); 842 + dma_free_coherent(&card->pcidev->dev, NS_RSQSIZE + NS_RSQ_ALIGNMENT, 843 + card->rsq.org, card->rsq.dma); 843 844 } 844 845 if (error >= 11) { 845 - kfree(card->tsq.org); 846 + dma_free_coherent(&card->pcidev->dev, NS_TSQSIZE + NS_TSQ_ALIGNMENT, 847 + card->tsq.org, card->tsq.dma); 846 848 } 847 849 if (error >= 10) { 848 850 free_irq(card->pcidev->irq, card);
+8
drivers/net/bonding/bond_main.c
··· 1600 1600 int link_reporting; 1601 1601 int res = 0, i; 1602 1602 1603 + if (slave_dev->flags & IFF_MASTER && 1604 + !netif_is_bond_master(slave_dev)) { 1605 + NL_SET_ERR_MSG(extack, "Device with IFF_MASTER cannot be enslaved"); 1606 + netdev_err(bond_dev, 1607 + "Error: Device with IFF_MASTER cannot be enslaved\n"); 1608 + return -EPERM; 1609 + } 1610 + 1603 1611 if (!bond->params.use_carrier && 1604 1612 slave_dev->ethtool_ops->get_link == NULL && 1605 1613 slave_ops->ndo_do_ioctl == NULL) {
+2 -2
drivers/net/can/peak_canfd/peak_canfd.c
··· 351 351 return err; 352 352 } 353 353 354 - /* start network queue (echo_skb array is empty) */ 355 - netif_start_queue(ndev); 354 + /* wake network queue up (echo_skb array is empty) */ 355 + netif_wake_queue(ndev); 356 356 357 357 return 0; 358 358 }
+2 -1
drivers/net/can/usb/ems_usb.c
··· 1053 1053 1054 1054 if (dev) { 1055 1055 unregister_netdev(dev->netdev); 1056 - free_candev(dev->netdev); 1057 1056 1058 1057 unlink_all_urbs(dev); 1059 1058 ··· 1060 1061 1061 1062 kfree(dev->intr_in_buffer); 1062 1063 kfree(dev->tx_msg_buffer); 1064 + 1065 + free_candev(dev->netdev); 1063 1066 } 1064 1067 } 1065 1068
+3 -3
drivers/net/dsa/mv88e6xxx/chip.c
··· 1618 1618 struct mv88e6xxx_vtu_entry vlan; 1619 1619 int i, err; 1620 1620 1621 - if (!vid) 1622 - return -EOPNOTSUPP; 1623 - 1624 1621 /* DSA and CPU ports have to be members of multiple vlans */ 1625 1622 if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port)) 1626 1623 return 0; ··· 2105 2108 bool warn; 2106 2109 u8 member; 2107 2110 int err; 2111 + 2112 + if (!vlan->vid) 2113 + return 0; 2108 2114 2109 2115 err = mv88e6xxx_port_vlan_prepare(ds, port, vlan); 2110 2116 if (err)
+6
drivers/net/dsa/sja1105/sja1105_main.c
··· 1818 1818 { 1819 1819 int rc = 0, i; 1820 1820 1821 + /* The credit based shapers are only allocated if 1822 + * CONFIG_NET_SCH_CBS is enabled. 1823 + */ 1824 + if (!priv->cbs) 1825 + return 0; 1826 + 1821 1827 for (i = 0; i < priv->info->num_cbs_shapers; i++) { 1822 1828 struct sja1105_cbs_entry *cbs = &priv->cbs[i]; 1823 1829
+2 -1
drivers/net/ethernet/aeroflex/greth.c
··· 1539 1539 mdiobus_unregister(greth->mdio); 1540 1540 1541 1541 unregister_netdev(ndev); 1542 - free_netdev(ndev); 1543 1542 1544 1543 of_iounmap(&of_dev->resource[0], greth->regs, resource_size(&of_dev->resource[0])); 1544 + 1545 + free_netdev(ndev); 1545 1546 1546 1547 return 0; 1547 1548 }
+2 -2
drivers/net/ethernet/aquantia/atlantic/aq_macsec.h
··· 91 91 u32 hw_sc_idx; 92 92 unsigned long tx_sa_idx_busy; 93 93 const struct macsec_secy *sw_secy; 94 - u8 tx_sa_key[MACSEC_NUM_AN][MACSEC_KEYID_LEN]; 94 + u8 tx_sa_key[MACSEC_NUM_AN][MACSEC_MAX_KEY_LEN]; 95 95 struct aq_macsec_tx_sc_stats stats; 96 96 struct aq_macsec_tx_sa_stats tx_sa_stats[MACSEC_NUM_AN]; 97 97 }; ··· 101 101 unsigned long rx_sa_idx_busy; 102 102 const struct macsec_secy *sw_secy; 103 103 const struct macsec_rx_sc *sw_rxsc; 104 - u8 rx_sa_key[MACSEC_NUM_AN][MACSEC_KEYID_LEN]; 104 + u8 rx_sa_key[MACSEC_NUM_AN][MACSEC_MAX_KEY_LEN]; 105 105 struct aq_macsec_rx_sa_stats rx_sa_stats[MACSEC_NUM_AN]; 106 106 }; 107 107
+3 -3
drivers/net/ethernet/broadcom/bcm4908_enet.c
··· 174 174 if (!ring->slots) 175 175 goto err_free_buf_descs; 176 176 177 - ring->read_idx = 0; 178 - ring->write_idx = 0; 179 - 180 177 return 0; 181 178 182 179 err_free_buf_descs: ··· 301 304 302 305 enet_write(enet, ring->st_ram_block + ENET_DMA_CH_STATE_RAM_BASE_DESC_PTR, 303 306 (uint32_t)ring->dma_addr); 307 + 308 + ring->read_idx = 0; 309 + ring->write_idx = 0; 304 310 } 305 311 306 312 static void bcm4908_enet_dma_uninit(struct bcm4908_enet *enet)
+4 -2
drivers/net/ethernet/emulex/benet/be_cmds.c
··· 550 550 int num = 0, status = 0; 551 551 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; 552 552 553 - spin_lock_bh(&adapter->mcc_cq_lock); 553 + spin_lock(&adapter->mcc_cq_lock); 554 554 555 555 while ((compl = be_mcc_compl_get(adapter))) { 556 556 if (compl->flags & CQE_FLAGS_ASYNC_MASK) { ··· 566 566 if (num) 567 567 be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num); 568 568 569 - spin_unlock_bh(&adapter->mcc_cq_lock); 569 + spin_unlock(&adapter->mcc_cq_lock); 570 570 return status; 571 571 } 572 572 ··· 581 581 if (be_check_error(adapter, BE_ERROR_ANY)) 582 582 return -EIO; 583 583 584 + local_bh_disable(); 584 585 status = be_process_mcc(adapter); 586 + local_bh_enable(); 585 587 586 588 if (atomic_read(&mcc_obj->q.used) == 0) 587 589 break;
+2
drivers/net/ethernet/emulex/benet/be_main.c
··· 5501 5501 * mcc completions 5502 5502 */ 5503 5503 if (!netif_running(adapter->netdev)) { 5504 + local_bh_disable(); 5504 5505 be_process_mcc(adapter); 5506 + local_bh_enable(); 5505 5507 goto reschedule; 5506 5508 } 5507 5509
+3 -4
drivers/net/ethernet/ezchip/nps_enet.c
··· 607 607 608 608 /* Get IRQ number */ 609 609 priv->irq = platform_get_irq(pdev, 0); 610 - if (!priv->irq) { 610 + if (priv->irq < 0) { 611 611 dev_err(dev, "failed to retrieve <irq Rx-Tx> value from device tree\n"); 612 612 err = -ENODEV; 613 613 goto out_netdev; ··· 630 630 out_netif_api: 631 631 netif_napi_del(&priv->napi); 632 632 out_netdev: 633 - if (err) 634 - free_netdev(ndev); 633 + free_netdev(ndev); 635 634 636 635 return err; 637 636 } ··· 641 642 struct nps_enet_priv *priv = netdev_priv(ndev); 642 643 643 644 unregister_netdev(ndev); 644 - free_netdev(ndev); 645 645 netif_napi_del(&priv->napi); 646 + free_netdev(ndev); 646 647 647 648 return 0; 648 649 }
+2 -2
drivers/net/ethernet/google/gve/gve_main.c
··· 1506 1506 1507 1507 gve_write_version(&reg_bar->driver_version); 1508 1508 /* Get max queues to alloc etherdev */ 1509 - max_rx_queues = ioread32be(&reg_bar->max_tx_queues); 1510 - max_tx_queues = ioread32be(&reg_bar->max_rx_queues); 1509 + max_tx_queues = ioread32be(&reg_bar->max_tx_queues); 1510 + max_rx_queues = ioread32be(&reg_bar->max_rx_queues); 1511 1511 /* Alloc and setup the netdev and priv */ 1512 1512 dev = alloc_etherdev_mqs(sizeof(*priv), max_tx_queues, max_rx_queues); 1513 1513 if (!dev) {
+77 -24
drivers/net/ethernet/ibm/ibmvnic.c
··· 106 106 static int __ibmvnic_set_mac(struct net_device *, u8 *); 107 107 static int init_crq_queue(struct ibmvnic_adapter *adapter); 108 108 static int send_query_phys_parms(struct ibmvnic_adapter *adapter); 109 + static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter, 110 + struct ibmvnic_sub_crq_queue *tx_scrq); 109 111 110 112 struct ibmvnic_stat { 111 113 char name[ETH_GSTRING_LEN]; ··· 234 232 mutex_lock(&adapter->fw_lock); 235 233 adapter->fw_done_rc = 0; 236 234 reinit_completion(&adapter->fw_done); 237 - rc = send_request_map(adapter, ltb->addr, 238 - ltb->size, ltb->map_id); 235 + 236 + rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id); 239 237 if (rc) { 240 - dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr); 241 - mutex_unlock(&adapter->fw_lock); 242 - return rc; 238 + dev_err(dev, "send_request_map failed, rc = %d\n", rc); 239 + goto out; 243 240 } 244 241 245 242 rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); ··· 246 245 dev_err(dev, 247 246 "Long term map request aborted or timed out,rc = %d\n", 248 247 rc); 249 - dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr); 250 - mutex_unlock(&adapter->fw_lock); 251 - return rc; 248 + goto out; 252 249 } 253 250 254 251 if (adapter->fw_done_rc) { 255 252 dev_err(dev, "Couldn't map long term buffer,rc = %d\n", 256 253 adapter->fw_done_rc); 254 + rc = -1; 255 + goto out; 256 + } 257 + rc = 0; 258 + out: 259 + if (rc) { 257 260 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr); 258 - mutex_unlock(&adapter->fw_lock); 259 - return -1; 261 + ltb->buff = NULL; 260 262 } 261 263 mutex_unlock(&adapter->fw_lock); 262 - return 0; 264 + return rc; 263 265 } 264 266 265 267 static void free_long_term_buff(struct ibmvnic_adapter *adapter, ··· 282 278 adapter->reset_reason != VNIC_RESET_TIMEOUT) 283 279 send_request_unmap(adapter, ltb->map_id); 284 280 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr); 281 + ltb->buff = NULL; 282 + ltb->map_id = 0; 285 283 } 286 284 287 - static int reset_long_term_buff(struct ibmvnic_long_term_buff *ltb) 285 + static int reset_long_term_buff(struct ibmvnic_adapter *adapter, 286 + struct ibmvnic_long_term_buff *ltb) 288 287 { 289 - if (!ltb->buff) 290 - return -EINVAL; 288 + struct device *dev = &adapter->vdev->dev; 289 + int rc; 291 290 292 291 memset(ltb->buff, 0, ltb->size); 292 + 293 + mutex_lock(&adapter->fw_lock); 294 + adapter->fw_done_rc = 0; 295 + 296 + reinit_completion(&adapter->fw_done); 297 + rc = send_request_map(adapter, ltb->addr, ltb->size, ltb->map_id); 298 + if (rc) { 299 + mutex_unlock(&adapter->fw_lock); 300 + return rc; 301 + } 302 + 303 + rc = ibmvnic_wait_for_completion(adapter, &adapter->fw_done, 10000); 304 + if (rc) { 305 + dev_info(dev, 306 + "Reset failed, long term map request timed out or aborted\n"); 307 + mutex_unlock(&adapter->fw_lock); 308 + return rc; 309 + } 310 + 311 + if (adapter->fw_done_rc) { 312 + dev_info(dev, 313 + "Reset failed, attempting to free and reallocate buffer\n"); 314 + free_long_term_buff(adapter, ltb); 315 + mutex_unlock(&adapter->fw_lock); 316 + return alloc_long_term_buff(adapter, ltb, ltb->size); 317 + } 318 + mutex_unlock(&adapter->fw_lock); 293 319 return 0; 294 320 } 295 321 ··· 355 321 356 322 rx_scrq = adapter->rx_scrq[pool->index]; 357 323 ind_bufp = &rx_scrq->ind_buf; 358 - for (i = 0; i < count; ++i) { 324 + 325 + /* netdev_skb_alloc() could have failed after we saved a few skbs 326 + * in the indir_buf and we would not have sent them to VIOS yet. 327 + * To account for them, start the loop at ind_bufp->index rather 328 + * than 0. If we pushed all the skbs to VIOS, ind_bufp->index will 329 + * be 0. 330 + */ 331 + for (i = ind_bufp->index; i < count; ++i) { 359 332 skb = netdev_alloc_skb(adapter->netdev, pool->buff_size); 360 333 if (!skb) { 361 334 dev_err(dev, "Couldn't replenish rx buff\n"); ··· 548 507 rx_pool->size * 549 508 rx_pool->buff_size); 550 509 } else { 551 - rc = reset_long_term_buff(&rx_pool->long_term_buff); 510 + rc = reset_long_term_buff(adapter, 511 + &rx_pool->long_term_buff); 552 512 } 553 513 554 514 if (rc) ··· 672 630 return 0; 673 631 } 674 632 675 - static int reset_one_tx_pool(struct ibmvnic_tx_pool *tx_pool) 633 + static int reset_one_tx_pool(struct ibmvnic_adapter *adapter, 634 + struct ibmvnic_tx_pool *tx_pool) 676 635 { 677 636 int rc, i; 678 637 679 - rc = reset_long_term_buff(&tx_pool->long_term_buff); 638 + rc = reset_long_term_buff(adapter, &tx_pool->long_term_buff); 680 639 if (rc) 681 640 return rc; 682 641 ··· 704 661 705 662 tx_scrqs = adapter->num_active_tx_pools; 706 663 for (i = 0; i < tx_scrqs; i++) { 707 - rc = reset_one_tx_pool(&adapter->tso_pool[i]); 664 + ibmvnic_tx_scrq_clean_buffer(adapter, adapter->tx_scrq[i]); 665 + rc = reset_one_tx_pool(adapter, &adapter->tso_pool[i]); 708 666 if (rc) 709 667 return rc; 710 - rc = reset_one_tx_pool(&adapter->tx_pool[i]); 668 + rc = reset_one_tx_pool(adapter, &adapter->tx_pool[i]); 711 669 if (rc) 712 670 return rc; 713 671 } ··· 801 757 802 758 adapter->tso_pool = kcalloc(tx_subcrqs, 803 759 sizeof(struct ibmvnic_tx_pool), GFP_KERNEL); 804 - if (!adapter->tso_pool) 760 + if (!adapter->tso_pool) { 761 + kfree(adapter->tx_pool); 762 + adapter->tx_pool = NULL; 805 763 return -1; 764 + } 806 765 807 766 adapter->num_active_tx_pools = tx_subcrqs; 808 767 ··· 1251 1204 1252 1205 netif_tx_start_all_queues(netdev); 1253 1206 1207 + if (prev_state == VNIC_CLOSED) { 1208 + for (i = 0; i < adapter->req_rx_queues; i++) 1209 + napi_schedule(&adapter->napi[i]); 1210 + } 1211 + 1254 1212 adapter->state = VNIC_OPEN; 1255 1213 return rc; 1256 1214 } ··· 1660 1608 ind_bufp->index = 0; 1661 1609 if (atomic_sub_return(entries, &tx_scrq->used) <= 1662 1610 (adapter->req_tx_entries_per_subcrq / 2) && 1663 - __netif_subqueue_stopped(adapter->netdev, queue_num)) { 1611 + __netif_subqueue_stopped(adapter->netdev, queue_num) && 1612 + !test_bit(0, &adapter->resetting)) { 1664 1613 netif_wake_subqueue(adapter->netdev, queue_num); 1665 1614 netdev_dbg(adapter->netdev, "Started queue %d\n", 1666 1615 queue_num); ··· 1754 1701 tx_send_failed++; 1755 1702 tx_dropped++; 1756 1703 ret = NETDEV_TX_OK; 1757 - ibmvnic_tx_scrq_flush(adapter, tx_scrq); 1758 1704 goto out; 1759 1705 } 1760 1706 ··· 3293 3241 3294 3242 netdev_dbg(adapter->netdev, "Releasing tx_scrq[%d]\n", 3295 3243 i); 3244 + ibmvnic_tx_scrq_clean_buffer(adapter, adapter->tx_scrq[i]); 3296 3245 if (adapter->tx_scrq[i]->irq) { 3297 3246 free_irq(adapter->tx_scrq[i]->irq, 3298 3247 adapter->tx_scrq[i]); ··· 3367 3314 /* H_EOI would fail with rc = H_FUNCTION when running 3368 3315 * in XIVE mode which is expected, but not an error. 3369 3316 */ 3370 - if (rc && rc != H_FUNCTION) 3317 + if (rc && (rc != H_FUNCTION)) 3371 3318 dev_err(dev, "H_EOI FAILED irq 0x%llx. rc=%ld\n", 3372 3319 val, rc); 3373 3320 }
+12 -10
drivers/net/ethernet/intel/e1000e/netdev.c
··· 5223 5223 pm_runtime_resume(netdev->dev.parent); 5224 5224 5225 5225 /* Checking if MAC is in DMoff state*/ 5226 - pcim_state = er32(STATUS); 5227 - while (pcim_state & E1000_STATUS_PCIM_STATE) { 5228 - if (tries++ == dmoff_exit_timeout) { 5229 - e_dbg("Error in exiting dmoff\n"); 5230 - break; 5231 - } 5232 - usleep_range(10000, 20000); 5226 + if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) { 5233 5227 pcim_state = er32(STATUS); 5228 + while (pcim_state & E1000_STATUS_PCIM_STATE) { 5229 + if (tries++ == dmoff_exit_timeout) { 5230 + e_dbg("Error in exiting dmoff\n"); 5231 + break; 5232 + } 5233 + usleep_range(10000, 20000); 5234 + pcim_state = er32(STATUS); 5234 5235 5235 - /* Checking if MAC exited DMoff state */ 5236 - if (!(pcim_state & E1000_STATUS_PCIM_STATE)) 5237 - e1000_phy_hw_reset(&adapter->hw); 5236 + /* Checking if MAC exited DMoff state */ 5237 + if (!(pcim_state & E1000_STATUS_PCIM_STATE)) 5238 + e1000_phy_hw_reset(&adapter->hw); 5239 + } 5238 5240 } 5239 5241 5240 5242 /* update snapshot of PHY registers on LSC */
+1 -2
drivers/net/ethernet/intel/i40e/i40e_ethtool.c
··· 1262 1262 if (ethtool_link_ksettings_test_link_mode(&safe_ks, 1263 1263 supported, 1264 1264 Autoneg) && 1265 - hw->phy.link_info.phy_type != 1266 - I40E_PHY_TYPE_10GBASE_T) { 1265 + hw->phy.media_type != I40E_MEDIA_TYPE_BASET) { 1267 1266 netdev_info(netdev, "Autoneg cannot be disabled on this phy\n"); 1268 1267 err = -EINVAL; 1269 1268 goto done;
+13 -4
drivers/net/ethernet/intel/i40e/i40e_main.c
··· 32 32 static void i40e_handle_reset_warning(struct i40e_pf *pf, bool lock_acquired); 33 33 static int i40e_add_vsi(struct i40e_vsi *vsi); 34 34 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi); 35 - static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit); 35 + static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acquired); 36 36 static int i40e_setup_misc_vector(struct i40e_pf *pf); 37 37 static void i40e_determine_queue_usage(struct i40e_pf *pf); 38 38 static int i40e_setup_pf_filter_control(struct i40e_pf *pf); ··· 8703 8703 dev_driver_string(&pf->pdev->dev), 8704 8704 dev_name(&pf->pdev->dev)); 8705 8705 err = i40e_vsi_request_irq(vsi, int_name); 8706 + if (err) 8707 + goto err_setup_rx; 8706 8708 8707 8709 } else { 8708 8710 err = -EINVAL; ··· 10571 10569 #endif /* CONFIG_I40E_DCB */ 10572 10570 if (!lock_acquired) 10573 10571 rtnl_lock(); 10574 - ret = i40e_setup_pf_switch(pf, reinit); 10572 + ret = i40e_setup_pf_switch(pf, reinit, true); 10575 10573 if (ret) 10576 10574 goto end_unlock; 10577 10575 ··· 14629 14627 * i40e_setup_pf_switch - Setup the HW switch on startup or after reset 14630 14628 * @pf: board private structure 14631 14629 * @reinit: if the Main VSI needs to re-initialized. 14630 + * @lock_acquired: indicates whether or not the lock has been acquired 14632 14631 * 14633 14632 * Returns 0 on success, negative value on failure 14634 14633 **/ 14635 - static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit) 14634 + static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acquired) 14636 14635 { 14637 14636 u16 flags = 0; 14638 14637 int ret; ··· 14735 14732 14736 14733 i40e_ptp_init(pf); 14737 14734 14735 + if (!lock_acquired) 14736 + rtnl_lock(); 14737 + 14738 14738 /* repopulate tunnel port filters */ 14739 14739 udp_tunnel_nic_reset_ntf(pf->vsi[pf->lan_vsi]->netdev); 14740 + 14741 + if (!lock_acquired) 14742 + rtnl_unlock(); 14740 14743 14741 14744 return ret; 14742 14745 } ··· 15537 15528 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; 15538 15529 } 15539 15530 #endif 15540 - err = i40e_setup_pf_switch(pf, false); 15531 + err = i40e_setup_pf_switch(pf, false, false); 15541 15532 if (err) { 15542 15533 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err); 15543 15534 goto err_vsis;
+6 -2
drivers/net/ethernet/intel/i40e/i40e_ptp.c
··· 11 11 * operate with the nanosecond field directly without fear of overflow. 12 12 * 13 13 * Much like the 82599, the update period is dependent upon the link speed: 14 - * At 40Gb link or no link, the period is 1.6ns. 15 - * At 10Gb link, the period is multiplied by 2. (3.2ns) 14 + * At 40Gb, 25Gb, or no link, the period is 1.6ns. 15 + * At 10Gb or 5Gb link, the period is multiplied by 2. (3.2ns) 16 16 * At 1Gb link, the period is multiplied by 20. (32ns) 17 17 * 1588 functionality is not supported at 100Mbps. 18 18 */ 19 19 #define I40E_PTP_40GB_INCVAL 0x0199999999ULL 20 20 #define I40E_PTP_10GB_INCVAL_MULT 2 21 + #define I40E_PTP_5GB_INCVAL_MULT 2 21 22 #define I40E_PTP_1GB_INCVAL_MULT 20 22 23 23 24 #define I40E_PRTTSYN_CTL1_TSYNTYPE_V1 BIT(I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT) ··· 465 464 switch (hw_link_info->link_speed) { 466 465 case I40E_LINK_SPEED_10GB: 467 466 mult = I40E_PTP_10GB_INCVAL_MULT; 467 + break; 468 + case I40E_LINK_SPEED_5GB: 469 + mult = I40E_PTP_5GB_INCVAL_MULT; 468 470 break; 469 471 case I40E_LINK_SPEED_1GB: 470 472 mult = I40E_PTP_1GB_INCVAL_MULT;
+4 -2
drivers/net/ethernet/microsoft/mana/mana_en.c
··· 1230 1230 1231 1231 cq->gdma_id = cq->gdma_cq->id; 1232 1232 1233 - if (WARN_ON(cq->gdma_id >= gc->max_num_cqs)) 1234 - return -EINVAL; 1233 + if (WARN_ON(cq->gdma_id >= gc->max_num_cqs)) { 1234 + err = -EINVAL; 1235 + goto out; 1236 + } 1235 1237 1236 1238 gc->cq_table[cq->gdma_id] = cq->gdma_cq; 1237 1239
+2 -1
drivers/net/ethernet/sfc/ef10.c
··· 1069 1069 1070 1070 /* If the parent PF has no VF data structure, it doesn't know about this 1071 1071 * VF so fail probe. The VF needs to be re-created. This can happen 1072 - * if the PF driver is unloaded while the VF is assigned to a guest. 1072 + * if the PF driver was unloaded while any VF was assigned to a guest 1073 + * (using Xen, only). 1073 1074 */ 1074 1075 pci_dev_pf = efx->pci_dev->physfn; 1075 1076 if (pci_dev_pf) {
+17 -19
drivers/net/ethernet/sfc/ef10_sriov.c
··· 122 122 struct ef10_vf *vf = nic_data->vf + i; 123 123 124 124 /* If VF is assigned, do not free the vport */ 125 - if (vf->pci_dev && 126 - vf->pci_dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) 125 + if (vf->pci_dev && pci_is_dev_assigned(vf->pci_dev)) 127 126 continue; 128 127 129 128 if (vf->vport_assigned) { ··· 206 207 207 208 return 0; 208 209 fail: 209 - efx_ef10_sriov_free_vf_vports(efx); 210 - kfree(nic_data->vf); 211 - nic_data->vf = NULL; 210 + efx_ef10_sriov_free_vf_vswitching(efx); 212 211 return rc; 213 212 } 214 213 ··· 399 402 return rc; 400 403 } 401 404 405 + /* Disable SRIOV and remove VFs 406 + * If some VFs are attached to a guest (using Xen, only) nothing is 407 + * done if force=false, and vports are freed if force=true (for the non 408 + * attachedc ones, only) but SRIOV is not disabled and VFs are not 409 + * removed in either case. 410 + */ 402 411 static int efx_ef10_pci_sriov_disable(struct efx_nic *efx, bool force) 403 412 { 404 413 struct pci_dev *dev = efx->pci_dev; 405 - unsigned int vfs_assigned = 0; 406 - 407 - vfs_assigned = pci_vfs_assigned(dev); 414 + unsigned int vfs_assigned = pci_vfs_assigned(dev); 415 + int rc = 0; 408 416 409 417 if (vfs_assigned && !force) { 410 418 netif_info(efx, drv, efx->net_dev, "VFs are assigned to guests; " ··· 419 417 420 418 if (!vfs_assigned) 421 419 pci_disable_sriov(dev); 420 + else 421 + rc = -EBUSY; 422 422 423 423 efx_ef10_sriov_free_vf_vswitching(efx); 424 424 efx->vf_count = 0; 425 - return 0; 425 + return rc; 426 426 } 427 427 428 428 int efx_ef10_sriov_configure(struct efx_nic *efx, int num_vfs) ··· 443 439 void efx_ef10_sriov_fini(struct efx_nic *efx) 444 440 { 445 441 struct efx_ef10_nic_data *nic_data = efx->nic_data; 446 - unsigned int i; 447 442 int rc; 448 443 449 444 if (!nic_data->vf) { 450 - /* Remove any un-assigned orphaned VFs */ 445 + /* Remove any un-assigned orphaned VFs. This can happen if the PF driver 446 + * was unloaded while any VF was assigned to a guest (using Xen, only). 447 + */ 451 448 if (pci_num_vf(efx->pci_dev) && !pci_vfs_assigned(efx->pci_dev)) 452 449 pci_disable_sriov(efx->pci_dev); 453 450 return; 454 451 } 455 452 456 - /* Remove any VFs in the host */ 457 - for (i = 0; i < efx->vf_count; ++i) { 458 - struct efx_nic *vf_efx = nic_data->vf[i].efx; 459 - 460 - if (vf_efx) 461 - vf_efx->pci_dev->driver->remove(vf_efx->pci_dev); 462 - } 463 - 453 + /* Disable SRIOV and remove any VFs in the host */ 464 454 rc = efx_ef10_pci_sriov_disable(efx, true); 465 455 if (rc) 466 456 netif_dbg(efx, drv, efx->net_dev,
+9 -9
drivers/net/ethernet/ti/am65-cpsw-nuss.c
··· 1506 1506 for (i = 0; i < common->tx_ch_num; i++) { 1507 1507 struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i]; 1508 1508 1509 - if (!IS_ERR_OR_NULL(tx_chn->tx_chn)) 1510 - k3_udma_glue_release_tx_chn(tx_chn->tx_chn); 1511 - 1512 1509 if (!IS_ERR_OR_NULL(tx_chn->desc_pool)) 1513 1510 k3_cppi_desc_pool_destroy(tx_chn->desc_pool); 1511 + 1512 + if (!IS_ERR_OR_NULL(tx_chn->tx_chn)) 1513 + k3_udma_glue_release_tx_chn(tx_chn->tx_chn); 1514 1514 1515 1515 memset(tx_chn, 0, sizeof(*tx_chn)); 1516 1516 } ··· 1531 1531 1532 1532 netif_napi_del(&tx_chn->napi_tx); 1533 1533 1534 - if (!IS_ERR_OR_NULL(tx_chn->tx_chn)) 1535 - k3_udma_glue_release_tx_chn(tx_chn->tx_chn); 1536 - 1537 1534 if (!IS_ERR_OR_NULL(tx_chn->desc_pool)) 1538 1535 k3_cppi_desc_pool_destroy(tx_chn->desc_pool); 1536 + 1537 + if (!IS_ERR_OR_NULL(tx_chn->tx_chn)) 1538 + k3_udma_glue_release_tx_chn(tx_chn->tx_chn); 1539 1539 1540 1540 memset(tx_chn, 0, sizeof(*tx_chn)); 1541 1541 } ··· 1624 1624 1625 1625 rx_chn = &common->rx_chns; 1626 1626 1627 - if (!IS_ERR_OR_NULL(rx_chn->rx_chn)) 1628 - k3_udma_glue_release_rx_chn(rx_chn->rx_chn); 1629 - 1630 1627 if (!IS_ERR_OR_NULL(rx_chn->desc_pool)) 1631 1628 k3_cppi_desc_pool_destroy(rx_chn->desc_pool); 1629 + 1630 + if (!IS_ERR_OR_NULL(rx_chn->rx_chn)) 1631 + k3_udma_glue_release_rx_chn(rx_chn->rx_chn); 1632 1632 } 1633 1633 1634 1634 static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
+5
drivers/net/hyperv/netvsc_drv.c
··· 2384 2384 dev_hold(vf_netdev); 2385 2385 rcu_assign_pointer(net_device_ctx->vf_netdev, vf_netdev); 2386 2386 2387 + if (ndev->needed_headroom < vf_netdev->needed_headroom) 2388 + ndev->needed_headroom = vf_netdev->needed_headroom; 2389 + 2387 2390 vf_netdev->wanted_features = ndev->features; 2388 2391 netdev_update_features(vf_netdev); 2389 2392 ··· 2464 2461 netdev_upper_dev_unlink(vf_netdev, ndev); 2465 2462 RCU_INIT_POINTER(net_device_ctx->vf_netdev, NULL); 2466 2463 dev_put(vf_netdev); 2464 + 2465 + ndev->needed_headroom = RNDIS_AND_PPI_SIZE; 2467 2466 2468 2467 return NOTIFY_OK; 2469 2468 }
+8 -3
drivers/net/ieee802154/mac802154_hwsim.c
··· 480 480 struct hwsim_edge *e; 481 481 u32 v0, v1; 482 482 483 - if (!info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID] && 483 + if (!info->attrs[MAC802154_HWSIM_ATTR_RADIO_ID] || 484 484 !info->attrs[MAC802154_HWSIM_ATTR_RADIO_EDGE]) 485 485 return -EINVAL; 486 486 ··· 715 715 716 716 return 0; 717 717 718 + sub_fail: 719 + hwsim_edge_unsubscribe_me(phy); 718 720 me_fail: 719 721 rcu_read_lock(); 720 722 list_for_each_entry_rcu(e, &phy->edges, list) { ··· 724 722 hwsim_free_edge(e); 725 723 } 726 724 rcu_read_unlock(); 727 - sub_fail: 728 - hwsim_edge_unsubscribe_me(phy); 729 725 return -ENOMEM; 730 726 } 731 727 ··· 824 824 static void hwsim_del(struct hwsim_phy *phy) 825 825 { 826 826 struct hwsim_pib *pib; 827 + struct hwsim_edge *e; 827 828 828 829 hwsim_edge_unsubscribe_me(phy); 829 830 830 831 list_del(&phy->list); 831 832 832 833 rcu_read_lock(); 834 + list_for_each_entry_rcu(e, &phy->edges, list) { 835 + list_del_rcu(&e->list); 836 + hwsim_free_edge(e); 837 + } 833 838 pib = rcu_dereference(phy->pib); 834 839 rcu_read_unlock(); 835 840
+2 -2
drivers/net/macsec.c
··· 1819 1819 ctx.sa.rx_sa = rx_sa; 1820 1820 ctx.secy = secy; 1821 1821 memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), 1822 - MACSEC_KEYID_LEN); 1822 + secy->key_len); 1823 1823 1824 1824 err = macsec_offload(ops->mdo_add_rxsa, &ctx); 1825 1825 if (err) ··· 2061 2061 ctx.sa.tx_sa = tx_sa; 2062 2062 ctx.secy = secy; 2063 2063 memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), 2064 - MACSEC_KEYID_LEN); 2064 + secy->key_len); 2065 2065 2066 2066 err = macsec_offload(ops->mdo_add_txsa, &ctx); 2067 2067 if (err)
+29 -1
drivers/net/phy/at803x.c
··· 701 701 regulator_disable(priv->vddio); 702 702 } 703 703 704 + static int at803x_get_features(struct phy_device *phydev) 705 + { 706 + int err; 707 + 708 + err = genphy_read_abilities(phydev); 709 + if (err) 710 + return err; 711 + 712 + if (!at803x_match_phy_id(phydev, ATH8031_PHY_ID)) 713 + return 0; 714 + 715 + /* AR8031/AR8033 have different status registers 716 + * for copper and fiber operation. However, the 717 + * extended status register is the same for both 718 + * operation modes. 719 + * 720 + * As a result of that, ESTATUS_1000_XFULL is set 721 + * to 1 even when operating in copper TP mode. 722 + * 723 + * Remove this mode from the supported link modes, 724 + * as this driver currently only supports copper 725 + * operation. 726 + */ 727 + linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, 728 + phydev->supported); 729 + return 0; 730 + } 731 + 704 732 static int at803x_smarteee_config(struct phy_device *phydev) 705 733 { 706 734 struct at803x_priv *priv = phydev->priv; ··· 1372 1344 .resume = at803x_resume, 1373 1345 .read_page = at803x_read_page, 1374 1346 .write_page = at803x_write_page, 1375 - /* PHY_GBIT_FEATURES */ 1347 + .get_features = at803x_get_features, 1376 1348 .read_status = at803x_read_status, 1377 1349 .config_intr = &at803x_config_intr, 1378 1350 .handle_interrupt = at803x_handle_interrupt,
+1 -1
drivers/net/phy/mscc/mscc_macsec.c
··· 501 501 } 502 502 503 503 /* Derive the AES key to get a key for the hash autentication */ 504 - static int vsc8584_macsec_derive_key(const u8 key[MACSEC_KEYID_LEN], 504 + static int vsc8584_macsec_derive_key(const u8 key[MACSEC_MAX_KEY_LEN], 505 505 u16 key_len, u8 hkey[16]) 506 506 { 507 507 const u8 input[AES_BLOCK_SIZE] = {0};
+1 -1
drivers/net/phy/mscc/mscc_macsec.h
··· 81 81 /* Highest takes precedence [0..15] */ 82 82 u8 priority; 83 83 84 - u8 key[MACSEC_KEYID_LEN]; 84 + u8 key[MACSEC_MAX_KEY_LEN]; 85 85 86 86 union { 87 87 struct macsec_rx_sa *rx_sa;
+7 -7
drivers/net/vrf.c
··· 1366 1366 int orig_iif = skb->skb_iif; 1367 1367 bool need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr); 1368 1368 bool is_ndisc = ipv6_ndisc_frame(skb); 1369 - bool is_ll_src; 1370 1369 1371 1370 /* loopback, multicast & non-ND link-local traffic; do not push through 1372 1371 * packet taps again. Reset pkt_type for upper layers to process skb. 1373 - * for packets with lladdr src, however, skip so that the dst can be 1374 - * determine at input using original ifindex in the case that daddr 1375 - * needs strict 1372 + * For strict packets with a source LLA, determine the dst using the 1373 + * original ifindex. 1376 1374 */ 1377 - is_ll_src = ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL; 1378 - if (skb->pkt_type == PACKET_LOOPBACK || 1379 - (need_strict && !is_ndisc && !is_ll_src)) { 1375 + if (skb->pkt_type == PACKET_LOOPBACK || (need_strict && !is_ndisc)) { 1380 1376 skb->dev = vrf_dev; 1381 1377 skb->skb_iif = vrf_dev->ifindex; 1382 1378 IP6CB(skb)->flags |= IP6SKB_L3SLAVE; 1379 + 1383 1380 if (skb->pkt_type == PACKET_LOOPBACK) 1384 1381 skb->pkt_type = PACKET_HOST; 1382 + else if (ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL) 1383 + vrf_ip6_input_dst(skb, vrf_dev, orig_iif); 1384 + 1385 1385 goto out; 1386 1386 } 1387 1387
+2
drivers/net/vxlan.c
··· 2164 2164 struct neighbour *n; 2165 2165 struct nd_msg *msg; 2166 2166 2167 + rcu_read_lock(); 2167 2168 in6_dev = __in6_dev_get(dev); 2168 2169 if (!in6_dev) 2169 2170 goto out; ··· 2216 2215 } 2217 2216 2218 2217 out: 2218 + rcu_read_unlock(); 2219 2219 consume_skb(skb); 2220 2220 return NETDEV_TX_OK; 2221 2221 }
+10 -3
drivers/net/wireless/marvell/mwifiex/main.c
··· 1445 1445 if (!priv) 1446 1446 continue; 1447 1447 rtnl_lock(); 1448 - wiphy_lock(adapter->wiphy); 1449 1448 if (priv->netdev && 1450 - priv->wdev.iftype != NL80211_IFTYPE_UNSPECIFIED) 1449 + priv->wdev.iftype != NL80211_IFTYPE_UNSPECIFIED) { 1450 + /* 1451 + * Close the netdev now, because if we do it later, the 1452 + * netdev notifiers will need to acquire the wiphy lock 1453 + * again --> deadlock. 1454 + */ 1455 + dev_close(priv->wdev.netdev); 1456 + wiphy_lock(adapter->wiphy); 1451 1457 mwifiex_del_virtual_intf(adapter->wiphy, &priv->wdev); 1452 - wiphy_unlock(adapter->wiphy); 1458 + wiphy_unlock(adapter->wiphy); 1459 + } 1453 1460 rtnl_unlock(); 1454 1461 } 1455 1462
+1 -1
include/linux/netdevice.h
··· 4114 4114 return NET_RX_DROP; 4115 4115 } 4116 4116 4117 - skb_scrub_packet(skb, true); 4117 + skb_scrub_packet(skb, !net_eq(dev_net(dev), dev_net(skb->dev))); 4118 4118 skb->priority = 0; 4119 4119 return 0; 4120 4120 }
-1
include/linux/skmsg.h
··· 126 126 struct sk_msg *msg, u32 bytes); 127 127 int sk_msg_memcopy_from_iter(struct sock *sk, struct iov_iter *from, 128 128 struct sk_msg *msg, u32 bytes); 129 - int sk_msg_wait_data(struct sock *sk, struct sk_psock *psock, long timeo); 130 129 int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg, 131 130 int len, int flags); 132 131
+7 -5
include/net/flow_offload.h
··· 319 319 if (flow_offload_has_one_action(action)) 320 320 return true; 321 321 322 - flow_action_for_each(i, action_entry, action) { 323 - if (i && action_entry->hw_stats != last_hw_stats) { 324 - NL_SET_ERR_MSG_MOD(extack, "Mixing HW stats types for actions is not supported"); 325 - return false; 322 + if (action) { 323 + flow_action_for_each(i, action_entry, action) { 324 + if (i && action_entry->hw_stats != last_hw_stats) { 325 + NL_SET_ERR_MSG_MOD(extack, "Mixing HW stats types for actions is not supported"); 326 + return false; 327 + } 328 + last_hw_stats = action_entry->hw_stats; 326 329 } 327 - last_hw_stats = action_entry->hw_stats; 328 330 } 329 331 return true; 330 332 }
+8 -4
include/net/ip.h
··· 31 31 #include <net/flow.h> 32 32 #include <net/flow_dissector.h> 33 33 #include <net/netns/hash.h> 34 + #include <net/lwtunnel.h> 34 35 35 36 #define IPV4_MAX_PMTU 65535U /* RFC 2675, Section 5.1 */ 36 37 #define IPV4_MIN_MTU 68 /* RFC 791 */ ··· 446 445 447 446 /* 'forwarding = true' case should always honour route mtu */ 448 447 mtu = dst_metric_raw(dst, RTAX_MTU); 449 - if (mtu) 450 - return mtu; 448 + if (!mtu) 449 + mtu = min(READ_ONCE(dst->dev->mtu), IP_MAX_MTU); 451 450 452 - return min(READ_ONCE(dst->dev->mtu), IP_MAX_MTU); 451 + return mtu - lwtunnel_headroom(dst->lwtstate, mtu); 453 452 } 454 453 455 454 static inline unsigned int ip_skb_dst_mtu(struct sock *sk, 456 455 const struct sk_buff *skb) 457 456 { 457 + unsigned int mtu; 458 + 458 459 if (!sk || !sk_fullsock(sk) || ip_sk_use_pmtu(sk)) { 459 460 bool forwarding = IPCB(skb)->flags & IPSKB_FORWARDED; 460 461 461 462 return ip_dst_mtu_maybe_forward(skb_dst(skb), forwarding); 462 463 } 463 464 464 - return min(READ_ONCE(skb_dst(skb)->dev->mtu), IP_MAX_MTU); 465 + mtu = min(READ_ONCE(skb_dst(skb)->dev->mtu), IP_MAX_MTU); 466 + return mtu - lwtunnel_headroom(skb_dst(skb)->lwtstate, mtu); 465 467 } 466 468 467 469 struct dst_metrics *ip_fib_metrics_init(struct net *net, struct nlattr *fc_mx,
+12 -4
include/net/ip6_route.h
··· 265 265 266 266 static inline int ip6_skb_dst_mtu(struct sk_buff *skb) 267 267 { 268 + int mtu; 269 + 268 270 struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ? 269 271 inet6_sk(skb->sk) : NULL; 270 272 271 - return (np && np->pmtudisc >= IPV6_PMTUDISC_PROBE) ? 272 - skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb)); 273 + if (np && np->pmtudisc >= IPV6_PMTUDISC_PROBE) { 274 + mtu = READ_ONCE(skb_dst(skb)->dev->mtu); 275 + mtu -= lwtunnel_headroom(skb_dst(skb)->lwtstate, mtu); 276 + } else 277 + mtu = dst_mtu(skb_dst(skb)); 278 + 279 + return mtu; 273 280 } 274 281 275 282 static inline bool ip6_sk_accept_pmtu(const struct sock *sk) ··· 324 317 if (dst_metric_locked(dst, RTAX_MTU)) { 325 318 mtu = dst_metric_raw(dst, RTAX_MTU); 326 319 if (mtu) 327 - return mtu; 320 + goto out; 328 321 } 329 322 330 323 mtu = IPV6_MIN_MTU; ··· 334 327 mtu = idev->cnf.mtu6; 335 328 rcu_read_unlock(); 336 329 337 - return mtu; 330 + out: 331 + return mtu - lwtunnel_headroom(dst->lwtstate, mtu); 338 332 } 339 333 340 334 u32 ip6_mtu_from_fib6(const struct fib6_result *res,
+1 -1
include/net/macsec.h
··· 241 241 struct macsec_rx_sc *rx_sc; 242 242 struct { 243 243 unsigned char assoc_num; 244 - u8 key[MACSEC_KEYID_LEN]; 244 + u8 key[MACSEC_MAX_KEY_LEN]; 245 245 union { 246 246 struct macsec_rx_sa *rx_sa; 247 247 struct macsec_tx_sa *tx_sa;
+12
include/net/sch_generic.h
··· 173 173 if (spin_trylock(&qdisc->seqlock)) 174 174 return true; 175 175 176 + /* Paired with smp_mb__after_atomic() to make sure 177 + * STATE_MISSED checking is synchronized with clearing 178 + * in pfifo_fast_dequeue(). 179 + */ 180 + smp_mb__before_atomic(); 181 + 176 182 /* If the MISSED flag is set, it means other thread has 177 183 * set the MISSED flag before second spin_trylock(), so 178 184 * we can return false here to avoid multi cpus doing ··· 195 189 * dequeuing. 196 190 */ 197 191 set_bit(__QDISC_STATE_MISSED, &qdisc->state); 192 + 193 + /* spin_trylock() only has load-acquire semantic, so use 194 + * smp_mb__after_atomic() to ensure STATE_MISSED is set 195 + * before doing the second spin_trylock(). 196 + */ 197 + smp_mb__after_atomic(); 198 198 199 199 /* Retry again in case other CPU may not see the new flag 200 200 * after it releases the lock at the end of qdisc_run_end().
+1 -1
include/net/sctp/structs.h
··· 463 463 int saddr); 464 464 void (*from_sk) (union sctp_addr *, 465 465 struct sock *sk); 466 - void (*from_addr_param) (union sctp_addr *, 466 + bool (*from_addr_param) (union sctp_addr *, 467 467 union sctp_addr_param *, 468 468 __be16 port, int iif); 469 469 int (*to_addr_param) (const union sctp_addr *,
+1 -1
include/net/xfrm.h
··· 1538 1538 void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si); 1539 1539 u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq); 1540 1540 int xfrm_init_replay(struct xfrm_state *x); 1541 + u32 __xfrm_state_mtu(struct xfrm_state *x, int mtu); 1541 1542 u32 xfrm_state_mtu(struct xfrm_state *x, int mtu); 1542 1543 int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload); 1543 1544 int xfrm_init_state(struct xfrm_state *x); ··· 1563 1562 int encap_type); 1564 1563 int xfrm4_transport_finish(struct sk_buff *skb, int async); 1565 1564 int xfrm4_rcv(struct sk_buff *skb); 1566 - int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq); 1567 1565 1568 1566 static inline int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi) 1569 1567 {
+7 -2
include/net/xsk_buff_pool.h
··· 147 147 { 148 148 bool cross_pg = (addr & (PAGE_SIZE - 1)) + len > PAGE_SIZE; 149 149 150 - if (pool->dma_pages_cnt && cross_pg) { 150 + if (likely(!cross_pg)) 151 + return false; 152 + 153 + if (pool->dma_pages_cnt) { 151 154 return !(pool->dma_pages[addr >> PAGE_SHIFT] & 152 155 XSK_NEXT_PG_CONTIG_MASK); 153 156 } 154 - return false; 157 + 158 + /* skb path */ 159 + return addr + len > pool->addrs_cnt; 155 160 } 156 161 157 162 static inline u64 xp_aligned_extract_addr(struct xsk_buff_pool *pool, u64 addr)
+2 -2
kernel/bpf/devmap.c
··· 93 93 int i; 94 94 struct hlist_head *hash; 95 95 96 - hash = bpf_map_area_alloc(entries * sizeof(*hash), numa_node); 96 + hash = bpf_map_area_alloc((u64) entries * sizeof(*hash), numa_node); 97 97 if (hash != NULL) 98 98 for (i = 0; i < entries; i++) 99 99 INIT_HLIST_HEAD(&hash[i]); ··· 144 144 145 145 spin_lock_init(&dtab->index_lock); 146 146 } else { 147 - dtab->netdev_map = bpf_map_area_alloc(dtab->map.max_entries * 147 + dtab->netdev_map = bpf_map_area_alloc((u64) dtab->map.max_entries * 148 148 sizeof(struct bpf_dtab_netdev *), 149 149 dtab->map.numa_node); 150 150 if (!dtab->netdev_map)
+1 -1
kernel/bpf/inode.c
··· 543 543 return PTR_ERR(raw); 544 544 545 545 if (type == BPF_TYPE_PROG) 546 - ret = (f_flags != O_RDWR) ? -EINVAL : bpf_prog_new_fd(raw); 546 + ret = bpf_prog_new_fd(raw); 547 547 else if (type == BPF_TYPE_MAP) 548 548 ret = bpf_map_new_fd(raw, f_flags); 549 549 else if (type == BPF_TYPE_LINK)
+4 -2
kernel/bpf/verifier.c
··· 11474 11474 } 11475 11475 } 11476 11476 11477 - static void adjust_poke_descs(struct bpf_prog *prog, u32 len) 11477 + static void adjust_poke_descs(struct bpf_prog *prog, u32 off, u32 len) 11478 11478 { 11479 11479 struct bpf_jit_poke_descriptor *tab = prog->aux->poke_tab; 11480 11480 int i, sz = prog->aux->size_poke_tab; ··· 11482 11482 11483 11483 for (i = 0; i < sz; i++) { 11484 11484 desc = &tab[i]; 11485 + if (desc->insn_idx <= off) 11486 + continue; 11485 11487 desc->insn_idx += len - 1; 11486 11488 } 11487 11489 } ··· 11504 11502 if (adjust_insn_aux_data(env, new_prog, off, len)) 11505 11503 return NULL; 11506 11504 adjust_subprog_starts(env, off, len); 11507 - adjust_poke_descs(new_prog, len); 11505 + adjust_poke_descs(new_prog, off, len); 11508 11506 return new_prog; 11509 11507 } 11510 11508
+6 -1
net/can/bcm.c
··· 785 785 bcm_rx_handler, op); 786 786 787 787 list_del(&op->list); 788 + synchronize_rcu(); 788 789 bcm_remove_op(op); 789 790 return 1; /* done */ 790 791 } ··· 1534 1533 REGMASK(op->can_id), 1535 1534 bcm_rx_handler, op); 1536 1535 1537 - bcm_remove_op(op); 1538 1536 } 1537 + 1538 + synchronize_rcu(); 1539 + 1540 + list_for_each_entry_safe(op, next, &bo->rx_ops, list) 1541 + bcm_remove_op(op); 1539 1542 1540 1543 #if IS_ENABLED(CONFIG_PROC_FS) 1541 1544 /* remove procfs entry */
+3
net/can/gw.c
··· 596 596 if (gwj->src.dev == dev || gwj->dst.dev == dev) { 597 597 hlist_del(&gwj->list); 598 598 cgw_unregister_filter(net, gwj); 599 + synchronize_rcu(); 599 600 kmem_cache_free(cgw_cache, gwj); 600 601 } 601 602 } ··· 1155 1154 hlist_for_each_entry_safe(gwj, nx, &net->can.cgw_list, list) { 1156 1155 hlist_del(&gwj->list); 1157 1156 cgw_unregister_filter(net, gwj); 1157 + synchronize_rcu(); 1158 1158 kmem_cache_free(cgw_cache, gwj); 1159 1159 } 1160 1160 } ··· 1224 1222 1225 1223 hlist_del(&gwj->list); 1226 1224 cgw_unregister_filter(net, gwj); 1225 + synchronize_rcu(); 1227 1226 kmem_cache_free(cgw_cache, gwj); 1228 1227 err = 0; 1229 1228 break;
+4 -3
net/can/isotp.c
··· 1030 1030 1031 1031 lock_sock(sk); 1032 1032 1033 - hrtimer_cancel(&so->txtimer); 1034 - hrtimer_cancel(&so->rxtimer); 1035 - 1036 1033 /* remove current filters & unregister */ 1037 1034 if (so->bound && (!(so->opt.flags & CAN_ISOTP_SF_BROADCAST))) { 1038 1035 if (so->ifindex) { ··· 1041 1044 SINGLE_MASK(so->rxid), 1042 1045 isotp_rcv, sk); 1043 1046 dev_put(dev); 1047 + synchronize_rcu(); 1044 1048 } 1045 1049 } 1046 1050 } 1051 + 1052 + hrtimer_cancel(&so->txtimer); 1053 + hrtimer_cancel(&so->rxtimer); 1047 1054 1048 1055 so->ifindex = 0; 1049 1056 so->bound = 0;
+4
net/can/j1939/main.c
··· 193 193 can_rx_unregister(dev_net(ndev), ndev, J1939_CAN_ID, J1939_CAN_MASK, 194 194 j1939_can_recv, priv); 195 195 196 + /* The last reference of priv is dropped by the RCU deferred 197 + * j1939_sk_sock_destruct() of the last socket, so we can 198 + * safely drop this reference here. 199 + */ 196 200 j1939_priv_put(priv); 197 201 } 198 202
+4 -1
net/can/j1939/socket.c
··· 398 398 atomic_set(&jsk->skb_pending, 0); 399 399 spin_lock_init(&jsk->sk_session_queue_lock); 400 400 INIT_LIST_HEAD(&jsk->sk_session_queue); 401 + 402 + /* j1939_sk_sock_destruct() depends on SOCK_RCU_FREE flag */ 403 + sock_set_flag(sk, SOCK_RCU_FREE); 401 404 sk->sk_destruct = j1939_sk_sock_destruct; 402 405 sk->sk_protocol = CAN_J1939; 403 406 ··· 676 673 677 674 switch (optname) { 678 675 case SO_J1939_FILTER: 679 - if (!sockptr_is_null(optval)) { 676 + if (!sockptr_is_null(optval) && optlen != 0) { 680 677 struct j1939_filter *f; 681 678 int c; 682 679
+2 -2
net/core/dev.c
··· 5304 5304 if (static_branch_unlikely(&generic_xdp_needed_key)) { 5305 5305 int ret2; 5306 5306 5307 - preempt_disable(); 5307 + migrate_disable(); 5308 5308 ret2 = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb); 5309 - preempt_enable(); 5309 + migrate_enable(); 5310 5310 5311 5311 if (ret2 != XDP_PASS) { 5312 5312 ret = NET_RX_DROP;
+39 -42
net/core/skmsg.c
··· 399 399 } 400 400 EXPORT_SYMBOL_GPL(sk_msg_memcopy_from_iter); 401 401 402 - int sk_msg_wait_data(struct sock *sk, struct sk_psock *psock, long timeo) 403 - { 404 - DEFINE_WAIT_FUNC(wait, woken_wake_function); 405 - int ret = 0; 406 - 407 - if (sk->sk_shutdown & RCV_SHUTDOWN) 408 - return 1; 409 - 410 - if (!timeo) 411 - return ret; 412 - 413 - add_wait_queue(sk_sleep(sk), &wait); 414 - sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); 415 - ret = sk_wait_event(sk, &timeo, 416 - !list_empty(&psock->ingress_msg) || 417 - !skb_queue_empty(&sk->sk_receive_queue), &wait); 418 - sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); 419 - remove_wait_queue(sk_sleep(sk), &wait); 420 - return ret; 421 - } 422 - EXPORT_SYMBOL_GPL(sk_msg_wait_data); 423 - 424 402 /* Receive sk_msg from psock->ingress_msg to @msg. */ 425 403 int sk_msg_recvmsg(struct sock *sk, struct sk_psock *psock, struct msghdr *msg, 426 404 int len, int flags) ··· 578 600 return sk_psock_skb_ingress(psock, skb); 579 601 } 580 602 603 + static void sock_drop(struct sock *sk, struct sk_buff *skb) 604 + { 605 + sk_drops_add(sk, skb); 606 + kfree_skb(skb); 607 + } 608 + 581 609 static void sk_psock_backlog(struct work_struct *work) 582 610 { 583 611 struct sk_psock *psock = container_of(work, struct sk_psock, work); ··· 623 639 /* Hard errors break pipe and stop xmit. */ 624 640 sk_psock_report_error(psock, ret ? -ret : EPIPE); 625 641 sk_psock_clear_state(psock, SK_PSOCK_TX_ENABLED); 626 - kfree_skb(skb); 642 + sock_drop(psock->sk, skb); 627 643 goto end; 628 644 } 629 645 off += ret; ··· 714 730 715 731 while ((skb = skb_dequeue(&psock->ingress_skb)) != NULL) { 716 732 skb_bpf_redirect_clear(skb); 717 - kfree_skb(skb); 733 + sock_drop(psock->sk, skb); 718 734 } 719 735 __sk_psock_purge_ingress_msg(psock); 720 736 } ··· 830 846 } 831 847 EXPORT_SYMBOL_GPL(sk_psock_msg_verdict); 832 848 833 - static void sk_psock_skb_redirect(struct sk_buff *skb) 849 + static int sk_psock_skb_redirect(struct sk_psock *from, struct sk_buff *skb) 834 850 { 835 851 struct sk_psock *psock_other; 836 852 struct sock *sk_other; ··· 840 856 * return code, but then didn't set a redirect interface. 841 857 */ 842 858 if (unlikely(!sk_other)) { 843 - kfree_skb(skb); 844 - return; 859 + sock_drop(from->sk, skb); 860 + return -EIO; 845 861 } 846 862 psock_other = sk_psock(sk_other); 847 863 /* This error indicates the socket is being torn down or had another ··· 849 865 * a socket that is in this state so we drop the skb. 850 866 */ 851 867 if (!psock_other || sock_flag(sk_other, SOCK_DEAD)) { 852 - kfree_skb(skb); 853 - return; 868 + skb_bpf_redirect_clear(skb); 869 + sock_drop(from->sk, skb); 870 + return -EIO; 854 871 } 855 872 spin_lock_bh(&psock_other->ingress_lock); 856 873 if (!sk_psock_test_state(psock_other, SK_PSOCK_TX_ENABLED)) { 857 874 spin_unlock_bh(&psock_other->ingress_lock); 858 - kfree_skb(skb); 859 - return; 875 + skb_bpf_redirect_clear(skb); 876 + sock_drop(from->sk, skb); 877 + return -EIO; 860 878 } 861 879 862 880 skb_queue_tail(&psock_other->ingress_skb, skb); 863 881 schedule_work(&psock_other->work); 864 882 spin_unlock_bh(&psock_other->ingress_lock); 883 + return 0; 865 884 } 866 885 867 - static void sk_psock_tls_verdict_apply(struct sk_buff *skb, struct sock *sk, int verdict) 886 + static void sk_psock_tls_verdict_apply(struct sk_buff *skb, 887 + struct sk_psock *from, int verdict) 868 888 { 869 889 switch (verdict) { 870 890 case __SK_REDIRECT: 871 - sk_psock_skb_redirect(skb); 891 + sk_psock_skb_redirect(from, skb); 872 892 break; 873 893 case __SK_PASS: 874 894 case __SK_DROP: ··· 896 908 ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb)); 897 909 skb->sk = NULL; 898 910 } 899 - sk_psock_tls_verdict_apply(skb, psock->sk, ret); 911 + sk_psock_tls_verdict_apply(skb, psock, ret); 900 912 rcu_read_unlock(); 901 913 return ret; 902 914 } 903 915 EXPORT_SYMBOL_GPL(sk_psock_tls_strp_read); 904 916 905 - static void sk_psock_verdict_apply(struct sk_psock *psock, 906 - struct sk_buff *skb, int verdict) 917 + static int sk_psock_verdict_apply(struct sk_psock *psock, struct sk_buff *skb, 918 + int verdict) 907 919 { 908 920 struct sock *sk_other; 909 - int err = -EIO; 921 + int err = 0; 910 922 911 923 switch (verdict) { 912 924 case __SK_PASS: 925 + err = -EIO; 913 926 sk_other = psock->sk; 914 927 if (sock_flag(sk_other, SOCK_DEAD) || 915 928 !sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) { ··· 933 944 if (sk_psock_test_state(psock, SK_PSOCK_TX_ENABLED)) { 934 945 skb_queue_tail(&psock->ingress_skb, skb); 935 946 schedule_work(&psock->work); 947 + err = 0; 936 948 } 937 949 spin_unlock_bh(&psock->ingress_lock); 950 + if (err < 0) { 951 + skb_bpf_redirect_clear(skb); 952 + goto out_free; 953 + } 938 954 } 939 955 break; 940 956 case __SK_REDIRECT: 941 - sk_psock_skb_redirect(skb); 957 + err = sk_psock_skb_redirect(psock, skb); 942 958 break; 943 959 case __SK_DROP: 944 960 default: 945 961 out_free: 946 - kfree_skb(skb); 962 + sock_drop(psock->sk, skb); 947 963 } 964 + 965 + return err; 948 966 } 949 967 950 968 static void sk_psock_write_space(struct sock *sk) ··· 983 987 sk = strp->sk; 984 988 psock = sk_psock(sk); 985 989 if (unlikely(!psock)) { 986 - kfree_skb(skb); 990 + sock_drop(sk, skb); 987 991 goto out; 988 992 } 989 993 prog = READ_ONCE(psock->progs.stream_verdict); ··· 1104 1108 psock = sk_psock(sk); 1105 1109 if (unlikely(!psock)) { 1106 1110 len = 0; 1107 - kfree_skb(skb); 1111 + sock_drop(sk, skb); 1108 1112 goto out; 1109 1113 } 1110 1114 prog = READ_ONCE(psock->progs.stream_verdict); ··· 1118 1122 ret = sk_psock_map_verd(ret, skb_bpf_redirect_fetch(skb)); 1119 1123 skb->sk = NULL; 1120 1124 } 1121 - sk_psock_verdict_apply(psock, skb, ret); 1125 + if (sk_psock_verdict_apply(psock, skb, ret) < 0) 1126 + len = 0; 1122 1127 out: 1123 1128 rcu_read_unlock(); 1124 1129 return len;
+1 -1
net/core/sock_map.c
··· 48 48 bpf_map_init_from_attr(&stab->map, attr); 49 49 raw_spin_lock_init(&stab->lock); 50 50 51 - stab->sks = bpf_map_area_alloc(stab->map.max_entries * 51 + stab->sks = bpf_map_area_alloc((u64) stab->map.max_entries * 52 52 sizeof(struct sock *), 53 53 stab->map.numa_node); 54 54 if (!stab->sks) {
+1 -1
net/ipv4/esp4.c
··· 673 673 struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb); 674 674 u32 padto; 675 675 676 - padto = min(x->tfcpad, xfrm_state_mtu(x, dst->child_mtu_cached)); 676 + padto = min(x->tfcpad, __xfrm_state_mtu(x, dst->child_mtu_cached)); 677 677 if (skb->len < padto) 678 678 esp.tfclen = padto - skb->len; 679 679 }
+2
net/ipv4/fib_frontend.c
··· 371 371 fl4.flowi4_proto = 0; 372 372 fl4.fl4_sport = 0; 373 373 fl4.fl4_dport = 0; 374 + } else { 375 + swap(fl4.fl4_sport, fl4.fl4_dport); 374 376 } 375 377 376 378 if (fib_lookup(net, &fl4, &res, 0))
+2 -1
net/ipv4/route.c
··· 1306 1306 mtu = dst_metric_raw(dst, RTAX_MTU); 1307 1307 1308 1308 if (mtu) 1309 - return mtu; 1309 + goto out; 1310 1310 1311 1311 mtu = READ_ONCE(dst->dev->mtu); 1312 1312 ··· 1315 1315 mtu = 576; 1316 1316 } 1317 1317 1318 + out: 1318 1319 mtu = min_t(unsigned int, mtu, IP_MAX_MTU); 1319 1320 1320 1321 return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
+23 -1
net/ipv4/tcp_bpf.c
··· 163 163 return !empty; 164 164 } 165 165 166 + static int tcp_msg_wait_data(struct sock *sk, struct sk_psock *psock, 167 + long timeo) 168 + { 169 + DEFINE_WAIT_FUNC(wait, woken_wake_function); 170 + int ret = 0; 171 + 172 + if (sk->sk_shutdown & RCV_SHUTDOWN) 173 + return 1; 174 + 175 + if (!timeo) 176 + return ret; 177 + 178 + add_wait_queue(sk_sleep(sk), &wait); 179 + sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); 180 + ret = sk_wait_event(sk, &timeo, 181 + !list_empty(&psock->ingress_msg) || 182 + !skb_queue_empty(&sk->sk_receive_queue), &wait); 183 + sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); 184 + remove_wait_queue(sk_sleep(sk), &wait); 185 + return ret; 186 + } 187 + 166 188 static int tcp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, 167 189 int nonblock, int flags, int *addr_len) 168 190 { ··· 210 188 int data; 211 189 212 190 timeo = sock_rcvtimeo(sk, nonblock); 213 - data = sk_msg_wait_data(sk, psock, timeo); 191 + data = tcp_msg_wait_data(sk, psock, timeo); 214 192 if (data) { 215 193 if (!sk_psock_queue_empty(psock)) 216 194 goto msg_bytes_ready;
+2
net/ipv4/udp.c
··· 1798 1798 if (used <= 0) { 1799 1799 if (!copied) 1800 1800 copied = used; 1801 + kfree_skb(skb); 1801 1802 break; 1802 1803 } else if (used <= skb->len) { 1803 1804 copied += used; 1804 1805 } 1805 1806 1807 + kfree_skb(skb); 1806 1808 if (!desc->count) 1807 1809 break; 1808 1810 }
+42 -5
net/ipv4/udp_bpf.c
··· 21 21 return udp_prot.recvmsg(sk, msg, len, noblock, flags, addr_len); 22 22 } 23 23 24 + static bool udp_sk_has_data(struct sock *sk) 25 + { 26 + return !skb_queue_empty(&udp_sk(sk)->reader_queue) || 27 + !skb_queue_empty(&sk->sk_receive_queue); 28 + } 29 + 30 + static bool psock_has_data(struct sk_psock *psock) 31 + { 32 + return !skb_queue_empty(&psock->ingress_skb) || 33 + !sk_psock_queue_empty(psock); 34 + } 35 + 36 + #define udp_msg_has_data(__sk, __psock) \ 37 + ({ udp_sk_has_data(__sk) || psock_has_data(__psock); }) 38 + 39 + static int udp_msg_wait_data(struct sock *sk, struct sk_psock *psock, 40 + long timeo) 41 + { 42 + DEFINE_WAIT_FUNC(wait, woken_wake_function); 43 + int ret = 0; 44 + 45 + if (sk->sk_shutdown & RCV_SHUTDOWN) 46 + return 1; 47 + 48 + if (!timeo) 49 + return ret; 50 + 51 + add_wait_queue(sk_sleep(sk), &wait); 52 + sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); 53 + ret = udp_msg_has_data(sk, psock); 54 + if (!ret) { 55 + wait_woken(&wait, TASK_INTERRUPTIBLE, timeo); 56 + ret = udp_msg_has_data(sk, psock); 57 + } 58 + sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); 59 + remove_wait_queue(sk_sleep(sk), &wait); 60 + return ret; 61 + } 62 + 24 63 static int udp_bpf_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, 25 64 int nonblock, int flags, int *addr_len) 26 65 { ··· 73 34 if (unlikely(!psock)) 74 35 return sk_udp_recvmsg(sk, msg, len, nonblock, flags, addr_len); 75 36 76 - lock_sock(sk); 77 - if (sk_psock_queue_empty(psock)) { 37 + if (!psock_has_data(psock)) { 78 38 ret = sk_udp_recvmsg(sk, msg, len, nonblock, flags, addr_len); 79 39 goto out; 80 40 } ··· 85 47 int data; 86 48 87 49 timeo = sock_rcvtimeo(sk, nonblock); 88 - data = sk_msg_wait_data(sk, psock, timeo); 50 + data = udp_msg_wait_data(sk, psock, timeo); 89 51 if (data) { 90 - if (!sk_psock_queue_empty(psock)) 52 + if (psock_has_data(psock)) 91 53 goto msg_bytes_ready; 92 54 ret = sk_udp_recvmsg(sk, msg, len, nonblock, flags, addr_len); 93 55 goto out; ··· 96 58 } 97 59 ret = copied; 98 60 out: 99 - release_sock(sk); 100 61 sk_psock_put(sk, psock); 101 62 return ret; 102 63 }
+1 -1
net/ipv6/esp6.c
··· 708 708 struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb); 709 709 u32 padto; 710 710 711 - padto = min(x->tfcpad, xfrm_state_mtu(x, dst->child_mtu_cached)); 711 + padto = min(x->tfcpad, __xfrm_state_mtu(x, dst->child_mtu_cached)); 712 712 if (skb->len < padto) 713 713 esp.tfclen = padto - skb->len; 714 714 }
+15 -16
net/ipv6/exthdrs.c
··· 135 135 len -= 2; 136 136 137 137 while (len > 0) { 138 - int optlen = nh[off + 1] + 2; 139 - int i; 138 + int optlen, i; 140 139 141 - switch (nh[off]) { 142 - case IPV6_TLV_PAD1: 143 - optlen = 1; 140 + if (nh[off] == IPV6_TLV_PAD1) { 144 141 padlen++; 145 142 if (padlen > 7) 146 143 goto bad; 147 - break; 144 + off++; 145 + len--; 146 + continue; 147 + } 148 + if (len < 2) 149 + goto bad; 150 + optlen = nh[off + 1] + 2; 151 + if (optlen > len) 152 + goto bad; 148 153 149 - case IPV6_TLV_PADN: 154 + if (nh[off] == IPV6_TLV_PADN) { 150 155 /* RFC 2460 states that the purpose of PadN is 151 156 * to align the containing header to multiples 152 157 * of 8. 7 is therefore the highest valid value. ··· 168 163 if (nh[off + i] != 0) 169 164 goto bad; 170 165 } 171 - break; 172 - 173 - default: /* Other TLV code so scan list */ 174 - if (optlen > len) 175 - goto bad; 176 - 166 + } else { 177 167 tlv_count++; 178 168 if (tlv_count > max_count) 179 169 goto bad; ··· 188 188 return false; 189 189 190 190 padlen = 0; 191 - break; 192 191 } 193 192 off += optlen; 194 193 len -= optlen; ··· 305 306 #endif 306 307 307 308 if (ip6_parse_tlv(tlvprocdestopt_lst, skb, 308 - init_net.ipv6.sysctl.max_dst_opts_cnt)) { 309 + net->ipv6.sysctl.max_dst_opts_cnt)) { 309 310 skb->transport_header += extlen; 310 311 opt = IP6CB(skb); 311 312 #if IS_ENABLED(CONFIG_IPV6_MIP6) ··· 1036 1037 1037 1038 opt->flags |= IP6SKB_HOPBYHOP; 1038 1039 if (ip6_parse_tlv(tlvprochopopt_lst, skb, 1039 - init_net.ipv6.sysctl.max_hbh_opts_cnt)) { 1040 + net->ipv6.sysctl.max_hbh_opts_cnt)) { 1040 1041 skb->transport_header += extlen; 1041 1042 opt = IP6CB(skb); 1042 1043 opt->nhoff = sizeof(struct ipv6hdr);
+2 -2
net/ipv6/ip6_tunnel.c
··· 1240 1240 if (max_headroom > dev->needed_headroom) 1241 1241 dev->needed_headroom = max_headroom; 1242 1242 1243 - skb_set_inner_ipproto(skb, proto); 1244 - 1245 1243 err = ip6_tnl_encap(skb, t, &proto, fl6); 1246 1244 if (err) 1247 1245 return err; ··· 1375 1377 1376 1378 if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6)) 1377 1379 return -1; 1380 + 1381 + skb_set_inner_ipproto(skb, protocol); 1378 1382 1379 1383 err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu, 1380 1384 protocol);
+13 -12
net/mptcp/options.c
··· 942 942 return false; 943 943 } 944 944 945 - static u64 expand_ack(u64 old_ack, u64 cur_ack, bool use_64bit) 945 + u64 __mptcp_expand_seq(u64 old_seq, u64 cur_seq) 946 946 { 947 - u32 old_ack32, cur_ack32; 947 + u32 old_seq32, cur_seq32; 948 948 949 - if (use_64bit) 950 - return cur_ack; 949 + old_seq32 = (u32)old_seq; 950 + cur_seq32 = (u32)cur_seq; 951 + cur_seq = (old_seq & GENMASK_ULL(63, 32)) + cur_seq32; 952 + if (unlikely(cur_seq32 < old_seq32 && before(old_seq32, cur_seq32))) 953 + return cur_seq + (1LL << 32); 951 954 952 - old_ack32 = (u32)old_ack; 953 - cur_ack32 = (u32)cur_ack; 954 - cur_ack = (old_ack & GENMASK_ULL(63, 32)) + cur_ack32; 955 - if (unlikely(before(cur_ack32, old_ack32))) 956 - return cur_ack + (1LL << 32); 957 - return cur_ack; 955 + /* reverse wrap could happen, too */ 956 + if (unlikely(cur_seq32 > old_seq32 && after(old_seq32, cur_seq32))) 957 + return cur_seq - (1LL << 32); 958 + return cur_seq; 958 959 } 959 960 960 961 static void ack_update_msk(struct mptcp_sock *msk, ··· 973 972 * more dangerous than missing an ack 974 973 */ 975 974 old_snd_una = msk->snd_una; 976 - new_snd_una = expand_ack(old_snd_una, mp_opt->data_ack, mp_opt->ack64); 975 + new_snd_una = mptcp_expand_seq(old_snd_una, mp_opt->data_ack, mp_opt->ack64); 977 976 978 977 /* ACK for data not even sent yet? Ignore. */ 979 978 if (after64(new_snd_una, snd_nxt)) ··· 1010 1009 return false; 1011 1010 1012 1011 WRITE_ONCE(msk->rcv_data_fin_seq, 1013 - expand_ack(READ_ONCE(msk->ack_seq), data_fin_seq, use_64bit)); 1012 + mptcp_expand_seq(READ_ONCE(msk->ack_seq), data_fin_seq, use_64bit)); 1014 1013 WRITE_ONCE(msk->rcv_data_fin, 1); 1015 1014 1016 1015 return true;
+5
net/mptcp/protocol.c
··· 2896 2896 spin_lock_bh(&sk->sk_lock.slock); 2897 2897 } 2898 2898 2899 + /* be sure to set the current sk state before tacking actions 2900 + * depending on sk_state 2901 + */ 2902 + if (test_and_clear_bit(MPTCP_CONNECTED, &mptcp_sk(sk)->flags)) 2903 + __mptcp_set_connected(sk); 2899 2904 if (test_and_clear_bit(MPTCP_CLEAN_UNA, &mptcp_sk(sk)->flags)) 2900 2905 __mptcp_clean_una_wakeup(sk); 2901 2906 if (test_and_clear_bit(MPTCP_ERROR_REPORT, &mptcp_sk(sk)->flags))
+10 -3
net/mptcp/protocol.h
··· 112 112 #define MPTCP_ERROR_REPORT 8 113 113 #define MPTCP_RETRANSMIT 9 114 114 #define MPTCP_WORK_SYNC_SETSOCKOPT 10 115 + #define MPTCP_CONNECTED 11 115 116 116 117 static inline bool before64(__u64 seq1, __u64 seq2) 117 118 { ··· 601 600 struct mptcp_options_received *mp_opt); 602 601 603 602 void mptcp_finish_connect(struct sock *sk); 603 + void __mptcp_set_connected(struct sock *sk); 604 604 static inline bool mptcp_is_fully_established(struct sock *sk) 605 605 { 606 606 return inet_sk_state_load(sk) == TCP_ESTABLISHED && ··· 616 614 int mptcp_getsockopt(struct sock *sk, int level, int optname, 617 615 char __user *optval, int __user *option); 618 616 617 + u64 __mptcp_expand_seq(u64 old_seq, u64 cur_seq); 618 + static inline u64 mptcp_expand_seq(u64 old_seq, u64 cur_seq, bool use_64bit) 619 + { 620 + if (use_64bit) 621 + return cur_seq; 622 + 623 + return __mptcp_expand_seq(old_seq, cur_seq); 624 + } 619 625 void __mptcp_check_push(struct sock *sk, struct sock *ssk); 620 626 void __mptcp_data_acked(struct sock *sk); 621 627 void __mptcp_error_report(struct sock *sk); ··· 784 774 unsigned int mptcp_pm_get_add_addr_accept_max(struct mptcp_sock *msk); 785 775 unsigned int mptcp_pm_get_subflows_max(struct mptcp_sock *msk); 786 776 unsigned int mptcp_pm_get_local_addr_max(struct mptcp_sock *msk); 787 - 788 - int mptcp_setsockopt(struct sock *sk, int level, int optname, 789 - sockptr_t optval, unsigned int optlen); 790 777 791 778 void mptcp_sockopt_sync(struct mptcp_sock *msk, struct sock *ssk); 792 779 void mptcp_sockopt_sync_all(struct mptcp_sock *msk);
+23 -24
net/mptcp/subflow.c
··· 373 373 return inet_sk(sk)->inet_dport != inet_sk((struct sock *)msk)->inet_dport; 374 374 } 375 375 376 + void __mptcp_set_connected(struct sock *sk) 377 + { 378 + if (sk->sk_state == TCP_SYN_SENT) { 379 + inet_sk_state_store(sk, TCP_ESTABLISHED); 380 + sk->sk_state_change(sk); 381 + } 382 + } 383 + 384 + static void mptcp_set_connected(struct sock *sk) 385 + { 386 + mptcp_data_lock(sk); 387 + if (!sock_owned_by_user(sk)) 388 + __mptcp_set_connected(sk); 389 + else 390 + set_bit(MPTCP_CONNECTED, &mptcp_sk(sk)->flags); 391 + mptcp_data_unlock(sk); 392 + } 393 + 376 394 static void subflow_finish_connect(struct sock *sk, const struct sk_buff *skb) 377 395 { 378 396 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); ··· 399 381 400 382 subflow->icsk_af_ops->sk_rx_dst_set(sk, skb); 401 383 402 - if (inet_sk_state_load(parent) == TCP_SYN_SENT) { 403 - inet_sk_state_store(parent, TCP_ESTABLISHED); 404 - parent->sk_state_change(parent); 405 - } 406 384 407 385 /* be sure no special action on any packet other than syn-ack */ 408 386 if (subflow->conn_finished) ··· 431 417 subflow->remote_key); 432 418 MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEACTIVEACK); 433 419 mptcp_finish_connect(sk); 420 + mptcp_set_connected(parent); 434 421 } else if (subflow->request_join) { 435 422 u8 hmac[SHA256_DIGEST_SIZE]; 436 423 ··· 472 457 } else if (mptcp_check_fallback(sk)) { 473 458 fallback: 474 459 mptcp_rcv_space_init(mptcp_sk(parent), sk); 460 + mptcp_set_connected(parent); 475 461 } 476 462 return; 477 463 ··· 580 564 581 565 static void mptcp_force_close(struct sock *sk) 582 566 { 567 + /* the msk is not yet exposed to user-space */ 583 568 inet_sk_state_store(sk, TCP_CLOSE); 584 569 sk_common_release(sk); 585 570 } ··· 798 781 MAPPING_DUMMY 799 782 }; 800 783 801 - static u64 expand_seq(u64 old_seq, u16 old_data_len, u64 seq) 802 - { 803 - if ((u32)seq == (u32)old_seq) 804 - return old_seq; 805 - 806 - /* Assume map covers data not mapped yet. */ 807 - return seq | ((old_seq + old_data_len + 1) & GENMASK_ULL(63, 32)); 808 - } 809 - 810 784 static void dbg_bad_map(struct mptcp_subflow_context *subflow, u32 ssn) 811 785 { 812 786 pr_debug("Bad mapping: ssn=%d map_seq=%d map_data_len=%d", ··· 1003 995 data_len--; 1004 996 } 1005 997 1006 - if (!mpext->dsn64) { 1007 - map_seq = expand_seq(subflow->map_seq, subflow->map_data_len, 1008 - mpext->data_seq); 1009 - pr_debug("expanded seq=%llu", subflow->map_seq); 1010 - } else { 1011 - map_seq = mpext->data_seq; 1012 - } 998 + map_seq = mptcp_expand_seq(READ_ONCE(msk->ack_seq), mpext->data_seq, mpext->dsn64); 1013 999 WRITE_ONCE(mptcp_sk(subflow->conn)->use_64bit_ack, !!mpext->dsn64); 1014 1000 1015 1001 if (subflow->map_valid) { ··· 1594 1592 mptcp_rcv_space_init(mptcp_sk(parent), sk); 1595 1593 pr_fallback(mptcp_sk(parent)); 1596 1594 subflow->conn_finished = 1; 1597 - if (inet_sk_state_load(parent) == TCP_SYN_SENT) { 1598 - inet_sk_state_store(parent, TCP_ESTABLISHED); 1599 - parent->sk_state_change(parent); 1600 - } 1595 + mptcp_set_connected(parent); 1601 1596 } 1602 1597 1603 1598 /* as recvmsg() does not acquire the subflow socket for ssk selection
+41 -24
net/netfilter/nf_tables_api.c
··· 571 571 table->family == family && 572 572 nft_active_genmask(table, genmask)) { 573 573 if (nft_table_has_owner(table) && 574 - table->nlpid != nlpid) 574 + nlpid && table->nlpid != nlpid) 575 575 return ERR_PTR(-EPERM); 576 576 577 577 return table; ··· 583 583 584 584 static struct nft_table *nft_table_lookup_byhandle(const struct net *net, 585 585 const struct nlattr *nla, 586 - u8 genmask) 586 + u8 genmask, u32 nlpid) 587 587 { 588 588 struct nftables_pernet *nft_net; 589 589 struct nft_table *table; ··· 591 591 nft_net = nft_pernet(net); 592 592 list_for_each_entry(table, &nft_net->tables, list) { 593 593 if (be64_to_cpu(nla_get_be64(nla)) == table->handle && 594 - nft_active_genmask(table, genmask)) 594 + nft_active_genmask(table, genmask)) { 595 + if (nft_table_has_owner(table) && 596 + nlpid && table->nlpid != nlpid) 597 + return ERR_PTR(-EPERM); 598 + 595 599 return table; 600 + } 596 601 } 597 602 598 603 return ERR_PTR(-ENOENT); ··· 1281 1276 1282 1277 if (nla[NFTA_TABLE_HANDLE]) { 1283 1278 attr = nla[NFTA_TABLE_HANDLE]; 1284 - table = nft_table_lookup_byhandle(net, attr, genmask); 1279 + table = nft_table_lookup_byhandle(net, attr, genmask, 1280 + NETLINK_CB(skb).portid); 1285 1281 } else { 1286 1282 attr = nla[NFTA_TABLE_NAME]; 1287 1283 table = nft_table_lookup(net, attr, family, genmask, ··· 3243 3237 struct nft_rule *rule, *old_rule = NULL; 3244 3238 struct nft_expr_info *expr_info = NULL; 3245 3239 u8 family = info->nfmsg->nfgen_family; 3240 + struct nft_flow_rule *flow = NULL; 3246 3241 struct net *net = info->net; 3247 - struct nft_flow_rule *flow; 3248 3242 struct nft_userdata *udata; 3249 3243 struct nft_table *table; 3250 3244 struct nft_chain *chain; ··· 3339 3333 nla_for_each_nested(tmp, nla[NFTA_RULE_EXPRESSIONS], rem) { 3340 3334 err = -EINVAL; 3341 3335 if (nla_type(tmp) != NFTA_LIST_ELEM) 3342 - goto err1; 3336 + goto err_release_expr; 3343 3337 if (n == NFT_RULE_MAXEXPRS) 3344 - goto err1; 3338 + goto err_release_expr; 3345 3339 err = nf_tables_expr_parse(&ctx, tmp, &expr_info[n]); 3346 3340 if (err < 0) { 3347 3341 NL_SET_BAD_ATTR(extack, tmp); 3348 - goto err1; 3342 + goto err_release_expr; 3349 3343 } 3350 3344 size += expr_info[n].ops->size; 3351 3345 n++; ··· 3354 3348 /* Check for overflow of dlen field */ 3355 3349 err = -EFBIG; 3356 3350 if (size >= 1 << 12) 3357 - goto err1; 3351 + goto err_release_expr; 3358 3352 3359 3353 if (nla[NFTA_RULE_USERDATA]) { 3360 3354 ulen = nla_len(nla[NFTA_RULE_USERDATA]); ··· 3365 3359 err = -ENOMEM; 3366 3360 rule = kzalloc(sizeof(*rule) + size + usize, GFP_KERNEL); 3367 3361 if (rule == NULL) 3368 - goto err1; 3362 + goto err_release_expr; 3369 3363 3370 3364 nft_activate_next(net, rule); 3371 3365 ··· 3384 3378 err = nf_tables_newexpr(&ctx, &expr_info[i], expr); 3385 3379 if (err < 0) { 3386 3380 NL_SET_BAD_ATTR(extack, expr_info[i].attr); 3387 - goto err2; 3381 + goto err_release_rule; 3388 3382 } 3389 3383 3390 3384 if (expr_info[i].ops->validate) ··· 3394 3388 expr = nft_expr_next(expr); 3395 3389 } 3396 3390 3391 + if (chain->flags & NFT_CHAIN_HW_OFFLOAD) { 3392 + flow = nft_flow_rule_create(net, rule); 3393 + if (IS_ERR(flow)) { 3394 + err = PTR_ERR(flow); 3395 + goto err_release_rule; 3396 + } 3397 + } 3398 + 3397 3399 if (info->nlh->nlmsg_flags & NLM_F_REPLACE) { 3398 3400 trans = nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule); 3399 3401 if (trans == NULL) { 3400 3402 err = -ENOMEM; 3401 - goto err2; 3403 + goto err_destroy_flow_rule; 3402 3404 } 3403 3405 err = nft_delrule(&ctx, old_rule); 3404 3406 if (err < 0) { 3405 3407 nft_trans_destroy(trans); 3406 - goto err2; 3408 + goto err_destroy_flow_rule; 3407 3409 } 3408 3410 3409 3411 list_add_tail_rcu(&rule->list, &old_rule->list); ··· 3419 3405 trans = nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule); 3420 3406 if (!trans) { 3421 3407 err = -ENOMEM; 3422 - goto err2; 3408 + goto err_destroy_flow_rule; 3423 3409 } 3424 3410 3425 3411 if (info->nlh->nlmsg_flags & NLM_F_APPEND) { ··· 3437 3423 kvfree(expr_info); 3438 3424 chain->use++; 3439 3425 3426 + if (flow) 3427 + nft_trans_flow_rule(trans) = flow; 3428 + 3440 3429 if (nft_net->validate_state == NFT_VALIDATE_DO) 3441 3430 return nft_table_validate(net, table); 3442 3431 3443 - if (chain->flags & NFT_CHAIN_HW_OFFLOAD) { 3444 - flow = nft_flow_rule_create(net, rule); 3445 - if (IS_ERR(flow)) 3446 - return PTR_ERR(flow); 3447 - 3448 - nft_trans_flow_rule(trans) = flow; 3449 - } 3450 - 3451 3432 return 0; 3452 - err2: 3433 + 3434 + err_destroy_flow_rule: 3435 + nft_flow_rule_destroy(flow); 3436 + err_release_rule: 3453 3437 nf_tables_rule_release(&ctx, rule); 3454 - err1: 3438 + err_release_expr: 3455 3439 for (i = 0; i < n; i++) { 3456 3440 if (expr_info[i].ops) { 3457 3441 module_put(expr_info[i].ops->type->owner); ··· 8817 8805 nft_rule_expr_deactivate(&trans->ctx, 8818 8806 nft_trans_rule(trans), 8819 8807 NFT_TRANS_ABORT); 8808 + if (trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD) 8809 + nft_flow_rule_destroy(nft_trans_flow_rule(trans)); 8820 8810 break; 8821 8811 case NFT_MSG_DELRULE: 8822 8812 trans->ctx.chain->use++; 8823 8813 nft_clear(trans->ctx.net, nft_trans_rule(trans)); 8824 8814 nft_rule_expr_activate(&trans->ctx, nft_trans_rule(trans)); 8815 + if (trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD) 8816 + nft_flow_rule_destroy(nft_trans_flow_rule(trans)); 8817 + 8825 8818 nft_trans_destroy(trans); 8826 8819 break; 8827 8820 case NFT_MSG_NEWSET:
+7 -27
net/netfilter/nf_tables_offload.c
··· 54 54 struct nft_flow_rule *flow) 55 55 { 56 56 struct nft_flow_match *match = &flow->match; 57 - struct nft_offload_ethertype ethertype; 58 - 59 - if (match->dissector.used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL) && 60 - match->key.basic.n_proto != htons(ETH_P_8021Q) && 61 - match->key.basic.n_proto != htons(ETH_P_8021AD)) 62 - return; 63 - 64 - ethertype.value = match->key.basic.n_proto; 65 - ethertype.mask = match->mask.basic.n_proto; 57 + struct nft_offload_ethertype ethertype = { 58 + .value = match->key.basic.n_proto, 59 + .mask = match->mask.basic.n_proto, 60 + }; 66 61 67 62 if (match->dissector.used_keys & BIT(FLOW_DISSECTOR_KEY_VLAN) && 68 63 (match->key.vlan.vlan_tpid == htons(ETH_P_8021Q) || ··· 71 76 match->dissector.offset[FLOW_DISSECTOR_KEY_CVLAN] = 72 77 offsetof(struct nft_flow_key, cvlan); 73 78 match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_CVLAN); 74 - } else { 79 + } else if (match->dissector.used_keys & BIT(FLOW_DISSECTOR_KEY_BASIC) && 80 + (match->key.basic.n_proto == htons(ETH_P_8021Q) || 81 + match->key.basic.n_proto == htons(ETH_P_8021AD))) { 75 82 match->key.basic.n_proto = match->key.vlan.vlan_tpid; 76 83 match->mask.basic.n_proto = match->mask.vlan.vlan_tpid; 77 84 match->key.vlan.vlan_tpid = ethertype.value; ··· 587 590 588 591 if (err) { 589 592 nft_flow_rule_offload_abort(net, trans); 590 - break; 591 - } 592 - } 593 - 594 - list_for_each_entry(trans, &nft_net->commit_list, list) { 595 - if (trans->ctx.family != NFPROTO_NETDEV) 596 - continue; 597 - 598 - switch (trans->msg_type) { 599 - case NFT_MSG_NEWRULE: 600 - case NFT_MSG_DELRULE: 601 - if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)) 602 - continue; 603 - 604 - nft_flow_rule_destroy(nft_trans_flow_rule(trans)); 605 - break; 606 - default: 607 593 break; 608 594 } 609 595 }
+3
net/netfilter/nft_exthdr.c
··· 44 44 unsigned int offset = 0; 45 45 int err; 46 46 47 + if (pkt->skb->protocol != htons(ETH_P_IPV6)) 48 + goto err; 49 + 47 50 err = ipv6_find_hdr(pkt->skb, &offset, priv->type, NULL, NULL); 48 51 if (priv->flags & NFT_EXTHDR_F_PRESENT) { 49 52 nft_reg_store8(dest, err >= 0);
+5
net/netfilter/nft_osf.c
··· 28 28 struct nf_osf_data data; 29 29 struct tcphdr _tcph; 30 30 31 + if (pkt->tprot != IPPROTO_TCP) { 32 + regs->verdict.code = NFT_BREAK; 33 + return; 34 + } 35 + 31 36 tcp = skb_header_pointer(skb, ip_hdrlen(skb), 32 37 sizeof(struct tcphdr), &_tcph); 33 38 if (!tcp) {
+8 -1
net/netfilter/nft_tproxy.c
··· 30 30 __be16 tport = 0; 31 31 struct sock *sk; 32 32 33 + if (pkt->tprot != IPPROTO_TCP && 34 + pkt->tprot != IPPROTO_UDP) { 35 + regs->verdict.code = NFT_BREAK; 36 + return; 37 + } 38 + 33 39 hp = skb_header_pointer(skb, ip_hdrlen(skb), sizeof(_hdr), &_hdr); 34 40 if (!hp) { 35 41 regs->verdict.code = NFT_BREAK; ··· 97 91 98 92 memset(&taddr, 0, sizeof(taddr)); 99 93 100 - if (!pkt->tprot_set) { 94 + if (pkt->tprot != IPPROTO_TCP && 95 + pkt->tprot != IPPROTO_UDP) { 101 96 regs->verdict.code = NFT_BREAK; 102 97 return; 103 98 }
+1 -1
net/sched/cls_tcindex.c
··· 304 304 int i, err = 0; 305 305 306 306 cp->perfect = kcalloc(cp->hash, sizeof(struct tcindex_filter_result), 307 - GFP_KERNEL); 307 + GFP_KERNEL | __GFP_NOWARN); 308 308 if (!cp->perfect) 309 309 return -ENOMEM; 310 310
+3 -5
net/sched/sch_qfq.c
··· 485 485 486 486 if (cl->qdisc != &noop_qdisc) 487 487 qdisc_hash_add(cl->qdisc, true); 488 - sch_tree_lock(sch); 489 - qdisc_class_hash_insert(&q->clhash, &cl->common); 490 - sch_tree_unlock(sch); 491 - 492 - qdisc_class_hash_grow(sch, &q->clhash); 493 488 494 489 set_change_agg: 495 490 sch_tree_lock(sch); ··· 502 507 } 503 508 if (existing) 504 509 qfq_deact_rm_from_agg(q, cl); 510 + else 511 + qdisc_class_hash_insert(&q->clhash, &cl->common); 505 512 qfq_add_to_agg(q, new_agg, cl); 506 513 sch_tree_unlock(sch); 514 + qdisc_class_hash_grow(sch, &q->clhash); 507 515 508 516 *arg = (unsigned long)cl; 509 517 return 0;
+11 -8
net/sctp/bind_addr.c
··· 270 270 rawaddr = (union sctp_addr_param *)raw_addr_list; 271 271 272 272 af = sctp_get_af_specific(param_type2af(param->type)); 273 - if (unlikely(!af)) { 273 + if (unlikely(!af) || 274 + !af->from_addr_param(&addr, rawaddr, htons(port), 0)) { 274 275 retval = -EINVAL; 275 - sctp_bind_addr_clean(bp); 276 - break; 276 + goto out_err; 277 277 } 278 278 279 - af->from_addr_param(&addr, rawaddr, htons(port), 0); 280 279 if (sctp_bind_addr_state(bp, &addr) != -1) 281 280 goto next; 282 281 retval = sctp_add_bind_addr(bp, &addr, sizeof(addr), 283 282 SCTP_ADDR_SRC, gfp); 284 - if (retval) { 283 + if (retval) 285 284 /* Can't finish building the list, clean up. */ 286 - sctp_bind_addr_clean(bp); 287 - break; 288 - } 285 + goto out_err; 289 286 290 287 next: 291 288 len = ntohs(param->length); 292 289 addrs_len -= len; 293 290 raw_addr_list += len; 294 291 } 292 + 293 + return retval; 294 + 295 + out_err: 296 + if (retval) 297 + sctp_bind_addr_clean(bp); 295 298 296 299 return retval; 297 300 }
+8 -3
net/sctp/input.c
··· 1155 1155 if (!af) 1156 1156 continue; 1157 1157 1158 - af->from_addr_param(paddr, params.addr, sh->source, 0); 1158 + if (!af->from_addr_param(paddr, params.addr, sh->source, 0)) 1159 + continue; 1159 1160 1160 1161 asoc = __sctp_lookup_association(net, laddr, paddr, transportp); 1161 1162 if (asoc) ··· 1192 1191 union sctp_addr_param *param; 1193 1192 union sctp_addr paddr; 1194 1193 1194 + if (ntohs(ch->length) < sizeof(*asconf) + sizeof(struct sctp_paramhdr)) 1195 + return NULL; 1196 + 1195 1197 /* Skip over the ADDIP header and find the Address parameter */ 1196 1198 param = (union sctp_addr_param *)(asconf + 1); 1197 1199 ··· 1202 1198 if (unlikely(!af)) 1203 1199 return NULL; 1204 1200 1205 - af->from_addr_param(&paddr, param, peer_port, 0); 1201 + if (af->from_addr_param(&paddr, param, peer_port, 0)) 1202 + return NULL; 1206 1203 1207 1204 return __sctp_lookup_association(net, laddr, &paddr, transportp); 1208 1205 } ··· 1274 1269 1275 1270 ch = (struct sctp_chunkhdr *)ch_end; 1276 1271 chunk_num++; 1277 - } while (ch_end < skb_tail_pointer(skb)); 1272 + } while (ch_end + sizeof(*ch) < skb_tail_pointer(skb)); 1278 1273 1279 1274 return asoc; 1280 1275 }
+6 -1
net/sctp/ipv6.c
··· 577 577 } 578 578 579 579 /* Initialize a sctp_addr from an address parameter. */ 580 - static void sctp_v6_from_addr_param(union sctp_addr *addr, 580 + static bool sctp_v6_from_addr_param(union sctp_addr *addr, 581 581 union sctp_addr_param *param, 582 582 __be16 port, int iif) 583 583 { 584 + if (ntohs(param->v6.param_hdr.length) < sizeof(struct sctp_ipv6addr_param)) 585 + return false; 586 + 584 587 addr->v6.sin6_family = AF_INET6; 585 588 addr->v6.sin6_port = port; 586 589 addr->v6.sin6_flowinfo = 0; /* BUG */ 587 590 addr->v6.sin6_addr = param->v6.addr; 588 591 addr->v6.sin6_scope_id = iif; 592 + 593 + return true; 589 594 } 590 595 591 596 /* Initialize an address parameter from a sctp_addr and return the length
+6 -1
net/sctp/protocol.c
··· 254 254 } 255 255 256 256 /* Initialize a sctp_addr from an address parameter. */ 257 - static void sctp_v4_from_addr_param(union sctp_addr *addr, 257 + static bool sctp_v4_from_addr_param(union sctp_addr *addr, 258 258 union sctp_addr_param *param, 259 259 __be16 port, int iif) 260 260 { 261 + if (ntohs(param->v4.param_hdr.length) < sizeof(struct sctp_ipv4addr_param)) 262 + return false; 263 + 261 264 addr->v4.sin_family = AF_INET; 262 265 addr->v4.sin_port = port; 263 266 addr->v4.sin_addr.s_addr = param->v4.addr.s_addr; 264 267 memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero)); 268 + 269 + return true; 265 270 } 266 271 267 272 /* Initialize an address parameter from a sctp_addr and return the length
+26 -16
net/sctp/sm_make_chunk.c
··· 2195 2195 break; 2196 2196 2197 2197 case SCTP_PARAM_SET_PRIMARY: 2198 - if (ep->asconf_enable) 2199 - break; 2200 - goto unhandled; 2198 + if (!ep->asconf_enable) 2199 + goto unhandled; 2200 + 2201 + if (ntohs(param.p->length) < sizeof(struct sctp_addip_param) + 2202 + sizeof(struct sctp_paramhdr)) { 2203 + sctp_process_inv_paramlength(asoc, param.p, 2204 + chunk, err_chunk); 2205 + retval = SCTP_IERROR_ABORT; 2206 + } 2207 + break; 2201 2208 2202 2209 case SCTP_PARAM_HOST_NAME_ADDRESS: 2203 2210 /* Tell the peer, we won't support this param. */ ··· 2382 2375 2383 2376 /* Process the initialization parameters. */ 2384 2377 sctp_walk_params(param, peer_init, init_hdr.params) { 2385 - if (!src_match && (param.p->type == SCTP_PARAM_IPV4_ADDRESS || 2386 - param.p->type == SCTP_PARAM_IPV6_ADDRESS)) { 2378 + if (!src_match && 2379 + (param.p->type == SCTP_PARAM_IPV4_ADDRESS || 2380 + param.p->type == SCTP_PARAM_IPV6_ADDRESS)) { 2387 2381 af = sctp_get_af_specific(param_type2af(param.p->type)); 2388 - af->from_addr_param(&addr, param.addr, 2389 - chunk->sctp_hdr->source, 0); 2382 + if (!af->from_addr_param(&addr, param.addr, 2383 + chunk->sctp_hdr->source, 0)) 2384 + continue; 2390 2385 if (sctp_cmp_addr_exact(sctp_source(chunk), &addr)) 2391 2386 src_match = 1; 2392 2387 } ··· 2569 2560 break; 2570 2561 do_addr_param: 2571 2562 af = sctp_get_af_specific(param_type2af(param.p->type)); 2572 - af->from_addr_param(&addr, param.addr, htons(asoc->peer.port), 0); 2563 + if (!af->from_addr_param(&addr, param.addr, htons(asoc->peer.port), 0)) 2564 + break; 2573 2565 scope = sctp_scope(peer_addr); 2574 2566 if (sctp_in_scope(net, &addr, scope)) 2575 2567 if (!sctp_assoc_add_peer(asoc, &addr, gfp, SCTP_UNCONFIRMED)) ··· 2671 2661 addr_param = param.v + sizeof(struct sctp_addip_param); 2672 2662 2673 2663 af = sctp_get_af_specific(param_type2af(addr_param->p.type)); 2674 - if (af == NULL) 2664 + if (!af) 2675 2665 break; 2676 2666 2677 - af->from_addr_param(&addr, addr_param, 2678 - htons(asoc->peer.port), 0); 2667 + if (!af->from_addr_param(&addr, addr_param, 2668 + htons(asoc->peer.port), 0)) 2669 + break; 2679 2670 2680 - /* if the address is invalid, we can't process it. 2681 - * XXX: see spec for what to do. 2682 - */ 2683 2671 if (!af->addr_valid(&addr, NULL, NULL)) 2684 2672 break; 2685 2673 ··· 3091 3083 if (unlikely(!af)) 3092 3084 return SCTP_ERROR_DNS_FAILED; 3093 3085 3094 - af->from_addr_param(&addr, addr_param, htons(asoc->peer.port), 0); 3086 + if (!af->from_addr_param(&addr, addr_param, htons(asoc->peer.port), 0)) 3087 + return SCTP_ERROR_DNS_FAILED; 3095 3088 3096 3089 /* ADDIP 4.2.1 This parameter MUST NOT contain a broadcast 3097 3090 * or multicast address. ··· 3369 3360 3370 3361 /* We have checked the packet before, so we do not check again. */ 3371 3362 af = sctp_get_af_specific(param_type2af(addr_param->p.type)); 3372 - af->from_addr_param(&addr, addr_param, htons(bp->port), 0); 3363 + if (!af->from_addr_param(&addr, addr_param, htons(bp->port), 0)) 3364 + return; 3373 3365 3374 3366 switch (asconf_param->param_hdr.type) { 3375 3367 case SCTP_PARAM_ADD_IP:
+1 -1
net/tls/tls_sw.c
··· 1153 1153 int ret = 0; 1154 1154 bool eor; 1155 1155 1156 - eor = !(flags & (MSG_MORE | MSG_SENDPAGE_NOTLAST)); 1156 + eor = !(flags & MSG_SENDPAGE_NOTLAST); 1157 1157 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); 1158 1158 1159 1159 /* Call the sk_stream functions to manage the sndbuf mem. */
+1 -1
net/vmw_vsock/af_vsock.c
··· 1395 1395 1396 1396 if (signal_pending(current)) { 1397 1397 err = sock_intr_errno(timeout); 1398 - sk->sk_state = TCP_CLOSE; 1398 + sk->sk_state = sk->sk_state == TCP_ESTABLISHED ? TCP_CLOSING : TCP_CLOSE; 1399 1399 sock->state = SS_UNCONNECTED; 1400 1400 vsock_transport_cancel_pkt(vsk); 1401 1401 goto out_wait;
+7 -4
net/xdp/xsk_queue.h
··· 128 128 static inline bool xp_aligned_validate_desc(struct xsk_buff_pool *pool, 129 129 struct xdp_desc *desc) 130 130 { 131 - u64 chunk; 132 - 133 - if (desc->len > pool->chunk_size) 134 - return false; 131 + u64 chunk, chunk_end; 135 132 136 133 chunk = xp_aligned_extract_addr(pool, desc->addr); 134 + if (likely(desc->len)) { 135 + chunk_end = xp_aligned_extract_addr(pool, desc->addr + desc->len - 1); 136 + if (chunk != chunk_end) 137 + return false; 138 + } 139 + 137 140 if (chunk >= pool->addrs_cnt) 138 141 return false; 139 142
+1
net/xfrm/xfrm_device.c
··· 268 268 xso->num_exthdrs = 0; 269 269 xso->flags = 0; 270 270 xso->dev = NULL; 271 + xso->real_dev = NULL; 271 272 dev_put(dev); 272 273 273 274 if (err != -EOPNOTSUPP)
-7
net/xfrm/xfrm_output.c
··· 827 827 static int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb) 828 828 { 829 829 #if IS_ENABLED(CONFIG_IPV6) 830 - unsigned int ptr = 0; 831 830 int err; 832 - 833 - if (x->outer_mode.encap == XFRM_MODE_BEET && 834 - ipv6_find_hdr(skb, &ptr, NEXTHDR_FRAGMENT, NULL, NULL) >= 0) { 835 - net_warn_ratelimited("BEET mode doesn't support inner IPv6 fragments\n"); 836 - return -EAFNOSUPPORT; 837 - } 838 831 839 832 err = xfrm6_tunnel_check_size(skb); 840 833 if (err)
+14 -7
net/xfrm/xfrm_policy.c
··· 2092 2092 if (unlikely(!daddr || !saddr)) 2093 2093 return NULL; 2094 2094 2095 - rcu_read_lock(); 2096 2095 retry: 2097 - do { 2098 - sequence = read_seqcount_begin(&xfrm_policy_hash_generation); 2099 - chain = policy_hash_direct(net, daddr, saddr, family, dir); 2100 - } while (read_seqcount_retry(&xfrm_policy_hash_generation, sequence)); 2096 + sequence = read_seqcount_begin(&xfrm_policy_hash_generation); 2097 + rcu_read_lock(); 2098 + 2099 + chain = policy_hash_direct(net, daddr, saddr, family, dir); 2100 + if (read_seqcount_retry(&xfrm_policy_hash_generation, sequence)) { 2101 + rcu_read_unlock(); 2102 + goto retry; 2103 + } 2101 2104 2102 2105 ret = NULL; 2103 2106 hlist_for_each_entry_rcu(pol, chain, bydst) { ··· 2131 2128 } 2132 2129 2133 2130 skip_inexact: 2134 - if (read_seqcount_retry(&xfrm_policy_hash_generation, sequence)) 2131 + if (read_seqcount_retry(&xfrm_policy_hash_generation, sequence)) { 2132 + rcu_read_unlock(); 2135 2133 goto retry; 2134 + } 2136 2135 2137 - if (ret && !xfrm_pol_hold_rcu(ret)) 2136 + if (ret && !xfrm_pol_hold_rcu(ret)) { 2137 + rcu_read_unlock(); 2138 2138 goto retry; 2139 + } 2139 2140 fail: 2140 2141 rcu_read_unlock(); 2141 2142
+12 -2
net/xfrm/xfrm_state.c
··· 2550 2550 } 2551 2551 EXPORT_SYMBOL(xfrm_state_delete_tunnel); 2552 2552 2553 - u32 xfrm_state_mtu(struct xfrm_state *x, int mtu) 2553 + u32 __xfrm_state_mtu(struct xfrm_state *x, int mtu) 2554 2554 { 2555 2555 const struct xfrm_type *type = READ_ONCE(x->type); 2556 2556 struct crypto_aead *aead; ··· 2581 2581 return ((mtu - x->props.header_len - crypto_aead_authsize(aead) - 2582 2582 net_adj) & ~(blksize - 1)) + net_adj - 2; 2583 2583 } 2584 - EXPORT_SYMBOL_GPL(xfrm_state_mtu); 2584 + EXPORT_SYMBOL_GPL(__xfrm_state_mtu); 2585 + 2586 + u32 xfrm_state_mtu(struct xfrm_state *x, int mtu) 2587 + { 2588 + mtu = __xfrm_state_mtu(x, mtu); 2589 + 2590 + if (x->props.family == AF_INET6 && mtu < IPV6_MIN_MTU) 2591 + return IPV6_MIN_MTU; 2592 + 2593 + return mtu; 2594 + } 2585 2595 2586 2596 int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload) 2587 2597 {
+14 -14
net/xfrm/xfrm_user.c
··· 580 580 581 581 copy_from_user_state(x, p); 582 582 583 + if (attrs[XFRMA_ENCAP]) { 584 + x->encap = kmemdup(nla_data(attrs[XFRMA_ENCAP]), 585 + sizeof(*x->encap), GFP_KERNEL); 586 + if (x->encap == NULL) 587 + goto error; 588 + } 589 + 590 + if (attrs[XFRMA_COADDR]) { 591 + x->coaddr = kmemdup(nla_data(attrs[XFRMA_COADDR]), 592 + sizeof(*x->coaddr), GFP_KERNEL); 593 + if (x->coaddr == NULL) 594 + goto error; 595 + } 596 + 583 597 if (attrs[XFRMA_SA_EXTRA_FLAGS]) 584 598 x->props.extra_flags = nla_get_u32(attrs[XFRMA_SA_EXTRA_FLAGS]); 585 599 ··· 614 600 attrs[XFRMA_ALG_COMP]))) 615 601 goto error; 616 602 617 - if (attrs[XFRMA_ENCAP]) { 618 - x->encap = kmemdup(nla_data(attrs[XFRMA_ENCAP]), 619 - sizeof(*x->encap), GFP_KERNEL); 620 - if (x->encap == NULL) 621 - goto error; 622 - } 623 - 624 603 if (attrs[XFRMA_TFCPAD]) 625 604 x->tfcpad = nla_get_u32(attrs[XFRMA_TFCPAD]); 626 - 627 - if (attrs[XFRMA_COADDR]) { 628 - x->coaddr = kmemdup(nla_data(attrs[XFRMA_COADDR]), 629 - sizeof(*x->coaddr), GFP_KERNEL); 630 - if (x->coaddr == NULL) 631 - goto error; 632 - } 633 605 634 606 xfrm_mark_get(attrs, &x->mark); 635 607
+3
tools/bpf/resolve_btfids/main.c
··· 655 655 if (sets_patch(obj)) 656 656 return -1; 657 657 658 + /* Set type to ensure endian translation occurs. */ 659 + obj->efile.idlist->d_type = ELF_T_WORD; 660 + 658 661 elf_flagdata(obj->efile.idlist, ELF_C_SET, ELF_F_DIRTY); 659 662 660 663 err = elf_update(obj->efile.elf, ELF_C_WRITE);
+6 -1
tools/testing/selftests/bpf/prog_tests/sockmap_listen.c
··· 1610 1610 struct sockaddr_storage addr; 1611 1611 int c0, c1, p0, p1; 1612 1612 unsigned int pass; 1613 + int retries = 100; 1613 1614 socklen_t len; 1614 1615 int err, n; 1615 1616 u64 value; ··· 1687 1686 if (pass != 1) 1688 1687 FAIL("%s: want pass count 1, have %d", log_prefix, pass); 1689 1688 1689 + again: 1690 1690 n = read(mode == REDIR_INGRESS ? p0 : c0, &b, 1); 1691 - if (n < 0) 1691 + if (n < 0) { 1692 + if (errno == EAGAIN && retries--) 1693 + goto again; 1692 1694 FAIL_ERRNO("%s: read", log_prefix); 1695 + } 1693 1696 if (n == 0) 1694 1697 FAIL("%s: incomplete read", log_prefix); 1695 1698