Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Pull networking fixes from David Miller:

1) Fix some length checks during OGM processing in batman-adv, from
Sven Eckelmann.

2) Fix regression that caused netfilter conntrack sysctls to not be
per-netns any more. From Florian Westphal.

3) Use after free in netpoll, from Feng Sun.

4) Guard destruction of pfifo_fast per-cpu qdisc stats with
qdisc_is_percpu_stats(), from Davide Caratti. Similar bug is fixed
in pfifo_fast_enqueue().

5) Fix memory leak in mld_del_delrec(), from Eric Dumazet.

6) Handle neigh events on internal ports correctly in nfp, from John
Hurley.

7) Clear SKB timestamp in NF flow table code so that it does not
confuse fq scheduler. From Florian Westphal.

8) taprio destroy can crash if it is invoked in a failure path of
taprio_init(), because the list head isn't setup properly yet and
the list del is unconditional. Perform the list add earlier to
address this. From Vladimir Oltean.

9) Make sure to reapply vlan filters on device up, in aquantia driver.
From Dmitry Bogdanov.

10) sgiseeq driver releases DMA memory using free_page() instead of
dma_free_attrs(). From Christophe JAILLET.

* git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (58 commits)
net: seeq: Fix the function used to release some memory in an error handling path
enetc: Add missing call to 'pci_free_irq_vectors()' in probe and remove functions
net: bcmgenet: use ethtool_op_get_ts_info()
tc-testing: don't hardcode 'ip' in nsPlugin.py
net: dsa: microchip: add KSZ8563 compatibility string
dt-bindings: net: dsa: document additional Microchip KSZ8563 switch
net: aquantia: fix out of memory condition on rx side
net: aquantia: linkstate irq should be oneshot
net: aquantia: reapply vlan filters on up
net: aquantia: fix limit of vlan filters
net: aquantia: fix removal of vlan 0
net/sched: cbs: Set default link speed to 10 Mbps in cbs_set_port_rate
taprio: Set default link speed to 10 Mbps in taprio_set_picos_per_byte
taprio: Fix kernel panic in taprio_destroy
net: dsa: microchip: fill regmap_config name
rxrpc: Fix lack of conn cleanup when local endpoint is cleaned up [ver #2]
net: stmmac: dwmac-rk: Don't fail if phy regulator is absent
amd-xgbe: Fix error path in xgbe_mod_init()
netfilter: nft_meta_bridge: Fix get NFT_META_BRI_IIFVPROTO in network byteorder
mac80211: Correctly set noencrypt for PAE frames
...

+1
Documentation/devicetree/bindings/net/dsa/ksz.txt
··· 12 12 - "microchip,ksz8565" 13 13 - "microchip,ksz9893" 14 14 - "microchip,ksz9563" 15 + - "microchip,ksz8563" 15 16 16 17 Optional properties: 17 18
+2 -2
Documentation/devicetree/bindings/net/macb.txt
··· 15 15 Use "atmel,sama5d4-gem" for the GEM IP (10/100) available on Atmel sama5d4 SoCs. 16 16 Use "cdns,zynq-gem" Xilinx Zynq-7xxx SoC. 17 17 Use "cdns,zynqmp-gem" for Zynq Ultrascale+ MPSoC. 18 - Use "sifive,fu540-macb" for SiFive FU540-C000 SoC. 18 + Use "sifive,fu540-c000-gem" for SiFive FU540-C000 SoC. 19 19 Or the generic form: "cdns,emac". 20 20 - reg: Address and length of the register set for the device 21 - For "sifive,fu540-macb", second range is required to specify the 21 + For "sifive,fu540-c000-gem", second range is required to specify the 22 22 address and length of the registers for GEMGXL Management block. 23 23 - interrupts: Should contain macb interrupt 24 24 - phy-mode: See ethernet.txt file in the same directory.
+1
drivers/net/dsa/microchip/ksz9477_spi.c
··· 81 81 { .compatible = "microchip,ksz9897" }, 82 82 { .compatible = "microchip,ksz9893" }, 83 83 { .compatible = "microchip,ksz9563" }, 84 + { .compatible = "microchip,ksz8563" }, 84 85 {}, 85 86 }; 86 87 MODULE_DEVICE_TABLE(of, ksz9477_dt_ids);
+1
drivers/net/dsa/microchip/ksz_common.h
··· 128 128 129 129 #define KSZ_REGMAP_ENTRY(width, swp, regbits, regpad, regalign) \ 130 130 { \ 131 + .name = #width, \ 131 132 .val_bits = (width), \ 132 133 .reg_stride = (width) / 8, \ 133 134 .reg_bits = (regbits) + (regalign), \
+8 -2
drivers/net/ethernet/amd/xgbe/xgbe-main.c
··· 469 469 470 470 ret = xgbe_platform_init(); 471 471 if (ret) 472 - return ret; 472 + goto err_platform_init; 473 473 474 474 ret = xgbe_pci_init(); 475 475 if (ret) 476 - return ret; 476 + goto err_pci_init; 477 477 478 478 return 0; 479 + 480 + err_pci_init: 481 + xgbe_platform_exit(); 482 + err_platform_init: 483 + unregister_netdevice_notifier(&xgbe_netdev_notifier); 484 + return ret; 479 485 } 480 486 481 487 static void __exit xgbe_mod_exit(void)
+3 -2
drivers/net/ethernet/aquantia/atlantic/aq_filters.c
··· 431 431 if (be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id) 432 432 break; 433 433 } 434 - if (rule && be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id) { 434 + if (rule && rule->type == aq_rx_filter_vlan && 435 + be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id) { 435 436 struct ethtool_rxnfc cmd; 436 437 437 438 cmd.fs.location = rule->aq_fsp.location; ··· 844 843 return err; 845 844 846 845 if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) { 847 - if (hweight < AQ_VLAN_MAX_FILTERS && hweight > 0) { 846 + if (hweight <= AQ_VLAN_MAX_FILTERS && hweight > 0) { 848 847 err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw, 849 848 !(aq_nic->packet_filter & IFF_PROMISC)); 850 849 aq_nic->aq_nic_cfg.is_vlan_force_promisc = false;
+4
drivers/net/ethernet/aquantia/atlantic/aq_main.c
··· 61 61 if (err < 0) 62 62 goto err_exit; 63 63 64 + err = aq_filters_vlans_update(aq_nic); 65 + if (err < 0) 66 + goto err_exit; 67 + 64 68 err = aq_nic_start(aq_nic); 65 69 if (err < 0) 66 70 goto err_exit;
+1 -1
drivers/net/ethernet/aquantia/atlantic/aq_nic.c
··· 393 393 self->aq_nic_cfg.link_irq_vec); 394 394 err = request_threaded_irq(irqvec, NULL, 395 395 aq_linkstate_threaded_isr, 396 - IRQF_SHARED, 396 + IRQF_SHARED | IRQF_ONESHOT, 397 397 self->ndev->name, self); 398 398 if (err < 0) 399 399 goto err_exit;
+2 -1
drivers/net/ethernet/aquantia/atlantic/aq_vec.c
··· 86 86 } 87 87 } 88 88 89 + err_exit: 89 90 if (!was_tx_cleaned) 90 91 work_done = budget; 91 92 ··· 96 95 1U << self->aq_ring_param.vec_idx); 97 96 } 98 97 } 99 - err_exit: 98 + 100 99 return work_done; 101 100 } 102 101
+1
drivers/net/ethernet/broadcom/genet/bcmgenet.c
··· 1124 1124 .set_coalesce = bcmgenet_set_coalesce, 1125 1125 .get_link_ksettings = bcmgenet_get_link_ksettings, 1126 1126 .set_link_ksettings = bcmgenet_set_link_ksettings, 1127 + .get_ts_info = ethtool_op_get_ts_info, 1127 1128 }; 1128 1129 1129 1130 /* Power down the unimac, based on mode. */
+1 -1
drivers/net/ethernet/cadence/macb_main.c
··· 4154 4154 { .compatible = "cdns,emac", .data = &emac_config }, 4155 4155 { .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config}, 4156 4156 { .compatible = "cdns,zynq-gem", .data = &zynq_config }, 4157 - { .compatible = "sifive,fu540-macb", .data = &fu540_c000_config }, 4157 + { .compatible = "sifive,fu540-c000-gem", .data = &fu540_c000_config }, 4158 4158 { /* sentinel */ } 4159 4159 }; 4160 4160 MODULE_DEVICE_TABLE(of, macb_dt_ids);
+4 -1
drivers/net/ethernet/freescale/enetc/enetc_ptp.c
··· 82 82 n = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSIX); 83 83 if (n != 1) { 84 84 err = -EPERM; 85 - goto err_irq; 85 + goto err_irq_vectors; 86 86 } 87 87 88 88 ptp_qoriq->irq = pci_irq_vector(pdev, 0); ··· 107 107 err_no_clock: 108 108 free_irq(ptp_qoriq->irq, ptp_qoriq); 109 109 err_irq: 110 + pci_free_irq_vectors(pdev); 111 + err_irq_vectors: 110 112 iounmap(base); 111 113 err_ioremap: 112 114 kfree(ptp_qoriq); ··· 127 125 128 126 enetc_phc_index = -1; 129 127 ptp_qoriq_free(ptp_qoriq); 128 + pci_free_irq_vectors(pdev); 130 129 kfree(ptp_qoriq); 131 130 132 131 pci_release_mem_regions(pdev);
+5 -1
drivers/net/ethernet/ibm/ibmvnic.c
··· 1983 1983 1984 1984 rwi = get_next_rwi(adapter); 1985 1985 while (rwi) { 1986 + if (adapter->state == VNIC_REMOVING || 1987 + adapter->state == VNIC_REMOVED) 1988 + goto out; 1989 + 1986 1990 if (adapter->force_reset_recovery) { 1987 1991 adapter->force_reset_recovery = false; 1988 1992 rc = do_hard_reset(adapter, rwi, reset_state); ··· 2011 2007 netdev_dbg(adapter->netdev, "Reset failed\n"); 2012 2008 free_all_rwi(adapter); 2013 2009 } 2014 - 2010 + out: 2015 2011 adapter->resetting = false; 2016 2012 if (we_lock_rtnl) 2017 2013 rtnl_unlock();
+7
drivers/net/ethernet/marvell/sky2.c
··· 4931 4931 DMI_MATCH(DMI_BOARD_NAME, "P6T"), 4932 4932 }, 4933 4933 }, 4934 + { 4935 + .ident = "ASUS P6X", 4936 + .matches = { 4937 + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), 4938 + DMI_MATCH(DMI_BOARD_NAME, "P6X"), 4939 + }, 4940 + }, 4934 4941 {} 4935 4942 }; 4936 4943
+13 -4
drivers/net/ethernet/netronome/nfp/bpf/jit.c
··· 1163 1163 bool clr_gpr, lmem_step step) 1164 1164 { 1165 1165 s32 off = nfp_prog->stack_frame_depth + meta->insn.off + ptr_off; 1166 - bool first = true, last; 1166 + bool first = true, narrow_ld, last; 1167 1167 bool needs_inc = false; 1168 1168 swreg stack_off_reg; 1169 1169 u8 prev_gpr = 255; ··· 1209 1209 1210 1210 needs_inc = true; 1211 1211 } 1212 + 1213 + narrow_ld = clr_gpr && size < 8; 1214 + 1212 1215 if (lm3) { 1216 + unsigned int nop_cnt; 1217 + 1213 1218 emit_csr_wr(nfp_prog, imm_b(nfp_prog), NFP_CSR_ACT_LM_ADDR3); 1214 - /* For size < 4 one slot will be filled by zeroing of upper. */ 1215 - wrp_nops(nfp_prog, clr_gpr && size < 8 ? 2 : 3); 1219 + /* For size < 4 one slot will be filled by zeroing of upper, 1220 + * but be careful, that zeroing could be eliminated by zext 1221 + * optimization. 1222 + */ 1223 + nop_cnt = narrow_ld && meta->flags & FLAG_INSN_DO_ZEXT ? 2 : 3; 1224 + wrp_nops(nfp_prog, nop_cnt); 1216 1225 } 1217 1226 1218 - if (clr_gpr && size < 8) 1227 + if (narrow_ld) 1219 1228 wrp_zext(nfp_prog, meta, gpr); 1220 1229 1221 1230 while (size) {
+4 -3
drivers/net/ethernet/netronome/nfp/flower/offload.c
··· 1409 1409 struct nfp_flower_priv *priv = app->priv; 1410 1410 struct flow_block_cb *block_cb; 1411 1411 1412 - if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS && 1413 - !(f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS && 1414 - nfp_flower_internal_port_can_offload(app, netdev))) 1412 + if ((f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS && 1413 + !nfp_flower_internal_port_can_offload(app, netdev)) || 1414 + (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS && 1415 + nfp_flower_internal_port_can_offload(app, netdev))) 1415 1416 return -EOPNOTSUPP; 1416 1417 1417 1418 switch (f->command) {
+4 -4
drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
··· 328 328 329 329 flow.daddr = *(__be32 *)n->primary_key; 330 330 331 - /* Only concerned with route changes for representors. */ 332 - if (!nfp_netdev_is_nfp_repr(n->dev)) 333 - return NOTIFY_DONE; 334 - 335 331 app_priv = container_of(nb, struct nfp_flower_priv, tun.neigh_nb); 336 332 app = app_priv->app; 333 + 334 + if (!nfp_netdev_is_nfp_repr(n->dev) && 335 + !nfp_flower_internal_port_can_offload(app, n->dev)) 336 + return NOTIFY_DONE; 337 337 338 338 /* Only concerned with changes to routes already added to NFP. */ 339 339 if (!nfp_tun_has_route(app, flow.daddr))
+4 -3
drivers/net/ethernet/seeq/sgiseeq.c
··· 794 794 printk(KERN_ERR "Sgiseeq: Cannot register net device, " 795 795 "aborting.\n"); 796 796 err = -ENODEV; 797 - goto err_out_free_page; 797 + goto err_out_free_attrs; 798 798 } 799 799 800 800 printk(KERN_INFO "%s: %s %pM\n", dev->name, sgiseeqstr, dev->dev_addr); 801 801 802 802 return 0; 803 803 804 - err_out_free_page: 805 - free_page((unsigned long) sp->srings); 804 + err_out_free_attrs: 805 + dma_free_attrs(&pdev->dev, sizeof(*sp->srings), sp->srings, 806 + sp->srings_dma, DMA_ATTR_NON_CONSISTENT); 806 807 err_out_free_dev: 807 808 free_netdev(dev); 808 809
+2 -4
drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
··· 1194 1194 int ret; 1195 1195 struct device *dev = &bsp_priv->pdev->dev; 1196 1196 1197 - if (!ldo) { 1198 - dev_err(dev, "no regulator found\n"); 1199 - return -1; 1200 - } 1197 + if (!ldo) 1198 + return 0; 1201 1199 1202 1200 if (enable) { 1203 1201 ret = regulator_enable(ldo);
+26
drivers/net/phy/phy-c45.c
··· 523 523 } 524 524 EXPORT_SYMBOL_GPL(genphy_c45_read_status); 525 525 526 + /** 527 + * genphy_c45_config_aneg - restart auto-negotiation or forced setup 528 + * @phydev: target phy_device struct 529 + * 530 + * Description: If auto-negotiation is enabled, we configure the 531 + * advertising, and then restart auto-negotiation. If it is not 532 + * enabled, then we force a configuration. 533 + */ 534 + int genphy_c45_config_aneg(struct phy_device *phydev) 535 + { 536 + bool changed = false; 537 + int ret; 538 + 539 + if (phydev->autoneg == AUTONEG_DISABLE) 540 + return genphy_c45_pma_setup_forced(phydev); 541 + 542 + ret = genphy_c45_an_config_aneg(phydev); 543 + if (ret < 0) 544 + return ret; 545 + if (ret > 0) 546 + changed = true; 547 + 548 + return genphy_c45_check_and_restart_aneg(phydev, changed); 549 + } 550 + EXPORT_SYMBOL_GPL(genphy_c45_config_aneg); 551 + 526 552 /* The gen10g_* functions are the old Clause 45 stub */ 527 553 528 554 int gen10g_config_aneg(struct phy_device *phydev)
+1 -1
drivers/net/phy/phy.c
··· 507 507 * allowed to call genphy_config_aneg() 508 508 */ 509 509 if (phydev->is_c45 && !(phydev->c45_ids.devices_in_package & BIT(0))) 510 - return -EOPNOTSUPP; 510 + return genphy_c45_config_aneg(phydev); 511 511 512 512 return genphy_config_aneg(phydev); 513 513 }
+1 -4
drivers/net/usb/r8152.c
··· 4021 4021 #ifdef CONFIG_PM_SLEEP 4022 4022 unregister_pm_notifier(&tp->pm_notifier); 4023 4023 #endif 4024 - if (!test_bit(RTL8152_UNPLUG, &tp->flags)) 4025 - napi_disable(&tp->napi); 4024 + napi_disable(&tp->napi); 4026 4025 clear_bit(WORK_ENABLE, &tp->flags); 4027 4026 usb_kill_urb(tp->intr_urb); 4028 4027 cancel_delayed_work_sync(&tp->schedule); ··· 5352 5353 return 0; 5353 5354 5354 5355 out1: 5355 - netif_napi_del(&tp->napi); 5356 5356 usb_set_intfdata(intf, NULL); 5357 5357 out: 5358 5358 free_netdev(netdev); ··· 5366 5368 if (tp) { 5367 5369 rtl_set_unplug(tp); 5368 5370 5369 - netif_napi_del(&tp->napi); 5370 5371 unregister_netdev(tp->netdev); 5371 5372 cancel_delayed_work_sync(&tp->hw_phy_work); 5372 5373 tp->rtl_ops.unload(tp);
+24
drivers/net/wireless/intel/iwlwifi/cfg/22000.c
··· 556 556 .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, 557 557 }; 558 558 559 + const struct iwl_cfg killer1650s_2ax_cfg_qu_c0_hr_b0 = { 560 + .name = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201NGW)", 561 + .fw_name_pre = IWL_QU_C_HR_B_FW_PRE, 562 + IWL_DEVICE_22500, 563 + /* 564 + * This device doesn't support receiving BlockAck with a large bitmap 565 + * so we need to restrict the size of transmitted aggregation to the 566 + * HT size; mac80211 would otherwise pick the HE max (256) by default. 567 + */ 568 + .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, 569 + }; 570 + 571 + const struct iwl_cfg killer1650i_2ax_cfg_qu_c0_hr_b0 = { 572 + .name = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201D2W)", 573 + .fw_name_pre = IWL_QU_C_HR_B_FW_PRE, 574 + IWL_DEVICE_22500, 575 + /* 576 + * This device doesn't support receiving BlockAck with a large bitmap 577 + * so we need to restrict the size of transmitted aggregation to the 578 + * HT size; mac80211 would otherwise pick the HE max (256) by default. 579 + */ 580 + .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, 581 + }; 582 + 559 583 const struct iwl_cfg iwl22000_2ax_cfg_jf = { 560 584 .name = "Intel(R) Dual Band Wireless AX 22000", 561 585 .fw_name_pre = IWL_QU_B_JF_B_FW_PRE,
+2
drivers/net/wireless/intel/iwlwifi/iwl-config.h
··· 577 577 extern const struct iwl_cfg iwl_ax1650s_cfg_quz_hr; 578 578 extern const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0; 579 579 extern const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0; 580 + extern const struct iwl_cfg killer1650s_2ax_cfg_qu_c0_hr_b0; 581 + extern const struct iwl_cfg killer1650i_2ax_cfg_qu_c0_hr_b0; 580 582 extern const struct iwl_cfg killer1650x_2ax_cfg; 581 583 extern const struct iwl_cfg killer1650w_2ax_cfg; 582 584 extern const struct iwl_cfg iwl9461_2ac_cfg_qu_b0_jf_b0;
+4
drivers/net/wireless/intel/iwlwifi/pcie/drv.c
··· 1062 1062 iwl_trans->cfg = &iwl9560_2ac_cfg_qu_c0_jf_b0; 1063 1063 else if (iwl_trans->cfg == &iwl9560_2ac_160_cfg_qu_b0_jf_b0) 1064 1064 iwl_trans->cfg = &iwl9560_2ac_160_cfg_qu_c0_jf_b0; 1065 + else if (iwl_trans->cfg == &killer1650s_2ax_cfg_qu_b0_hr_b0) 1066 + iwl_trans->cfg = &killer1650s_2ax_cfg_qu_c0_hr_b0; 1067 + else if (iwl_trans->cfg == &killer1650i_2ax_cfg_qu_b0_hr_b0) 1068 + iwl_trans->cfg = &killer1650i_2ax_cfg_qu_c0_hr_b0; 1065 1069 } 1066 1070 1067 1071 /* same thing for QuZ... */
+1 -6
drivers/net/wireless/intel/iwlwifi/pcie/trans.c
··· 3602 3602 } 3603 3603 } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == 3604 3604 CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) && 3605 - ((trans->cfg != &iwl_ax200_cfg_cc && 3606 - trans->cfg != &iwl_ax201_cfg_qu_hr && 3607 - trans->cfg != &killer1650x_2ax_cfg && 3608 - trans->cfg != &killer1650w_2ax_cfg && 3609 - trans->cfg != &iwl_ax201_cfg_quz_hr) || 3610 - trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0)) { 3605 + trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0) { 3611 3606 u32 hw_status; 3612 3607 3613 3608 hw_status = iwl_read_prph(trans, UMAG_GEN_HW_STATUS);
+1
include/linux/phy.h
··· 1107 1107 int genphy_c45_read_mdix(struct phy_device *phydev); 1108 1108 int genphy_c45_pma_read_abilities(struct phy_device *phydev); 1109 1109 int genphy_c45_read_status(struct phy_device *phydev); 1110 + int genphy_c45_config_aneg(struct phy_device *phydev); 1110 1111 1111 1112 /* The gen10g_* functions are the old Clause 45 stub */ 1112 1113 int gen10g_config_aneg(struct phy_device *phydev);
+3 -1
include/net/act_api.h
··· 15 15 struct tcf_idrinfo { 16 16 struct mutex lock; 17 17 struct idr action_idr; 18 + struct net *net; 18 19 }; 19 20 20 21 struct tc_action_ops; ··· 109 108 }; 110 109 111 110 static inline 112 - int tc_action_net_init(struct tc_action_net *tn, 111 + int tc_action_net_init(struct net *net, struct tc_action_net *tn, 113 112 const struct tc_action_ops *ops) 114 113 { 115 114 int err = 0; ··· 118 117 if (!tn->idrinfo) 119 118 return -ENOMEM; 120 119 tn->ops = ops; 120 + tn->idrinfo->net = net; 121 121 mutex_init(&tn->idrinfo->lock); 122 122 idr_init(&tn->idrinfo->action_idr); 123 123 return err;
+1
include/net/psample.h
··· 11 11 u32 group_num; 12 12 u32 refcount; 13 13 u32 seq; 14 + struct rcu_head rcu; 14 15 }; 15 16 16 17 struct psample_group *psample_group_get(struct net *net, u32 group_num);
+28 -31
include/trace/events/rxrpc.h
··· 23 23 #define __RXRPC_DECLARE_TRACE_ENUMS_ONCE_ONLY 24 24 25 25 enum rxrpc_skb_trace { 26 - rxrpc_skb_rx_cleaned, 27 - rxrpc_skb_rx_freed, 28 - rxrpc_skb_rx_got, 29 - rxrpc_skb_rx_lost, 30 - rxrpc_skb_rx_purged, 31 - rxrpc_skb_rx_received, 32 - rxrpc_skb_rx_rotated, 33 - rxrpc_skb_rx_seen, 34 - rxrpc_skb_tx_cleaned, 35 - rxrpc_skb_tx_freed, 36 - rxrpc_skb_tx_got, 37 - rxrpc_skb_tx_new, 38 - rxrpc_skb_tx_rotated, 39 - rxrpc_skb_tx_seen, 26 + rxrpc_skb_cleaned, 27 + rxrpc_skb_freed, 28 + rxrpc_skb_got, 29 + rxrpc_skb_lost, 30 + rxrpc_skb_new, 31 + rxrpc_skb_purged, 32 + rxrpc_skb_received, 33 + rxrpc_skb_rotated, 34 + rxrpc_skb_seen, 35 + rxrpc_skb_unshared, 36 + rxrpc_skb_unshared_nomem, 40 37 }; 41 38 42 39 enum rxrpc_local_trace { ··· 225 228 * Declare tracing information enums and their string mappings for display. 226 229 */ 227 230 #define rxrpc_skb_traces \ 228 - EM(rxrpc_skb_rx_cleaned, "Rx CLN") \ 229 - EM(rxrpc_skb_rx_freed, "Rx FRE") \ 230 - EM(rxrpc_skb_rx_got, "Rx GOT") \ 231 - EM(rxrpc_skb_rx_lost, "Rx *L*") \ 232 - EM(rxrpc_skb_rx_purged, "Rx PUR") \ 233 - EM(rxrpc_skb_rx_received, "Rx RCV") \ 234 - EM(rxrpc_skb_rx_rotated, "Rx ROT") \ 235 - EM(rxrpc_skb_rx_seen, "Rx SEE") \ 236 - EM(rxrpc_skb_tx_cleaned, "Tx CLN") \ 237 - EM(rxrpc_skb_tx_freed, "Tx FRE") \ 238 - EM(rxrpc_skb_tx_got, "Tx GOT") \ 239 - EM(rxrpc_skb_tx_new, "Tx NEW") \ 240 - EM(rxrpc_skb_tx_rotated, "Tx ROT") \ 241 - E_(rxrpc_skb_tx_seen, "Tx SEE") 231 + EM(rxrpc_skb_cleaned, "CLN") \ 232 + EM(rxrpc_skb_freed, "FRE") \ 233 + EM(rxrpc_skb_got, "GOT") \ 234 + EM(rxrpc_skb_lost, "*L*") \ 235 + EM(rxrpc_skb_new, "NEW") \ 236 + EM(rxrpc_skb_purged, "PUR") \ 237 + EM(rxrpc_skb_received, "RCV") \ 238 + EM(rxrpc_skb_rotated, "ROT") \ 239 + EM(rxrpc_skb_seen, "SEE") \ 240 + EM(rxrpc_skb_unshared, "UNS") \ 241 + E_(rxrpc_skb_unshared_nomem, "US0") 242 242 243 243 #define rxrpc_local_traces \ 244 244 EM(rxrpc_local_got, "GOT") \ ··· 637 643 638 644 TRACE_EVENT(rxrpc_skb, 639 645 TP_PROTO(struct sk_buff *skb, enum rxrpc_skb_trace op, 640 - int usage, int mod_count, const void *where), 646 + int usage, int mod_count, u8 flags, const void *where), 641 647 642 - TP_ARGS(skb, op, usage, mod_count, where), 648 + TP_ARGS(skb, op, usage, mod_count, flags, where), 643 649 644 650 TP_STRUCT__entry( 645 651 __field(struct sk_buff *, skb ) 646 652 __field(enum rxrpc_skb_trace, op ) 653 + __field(u8, flags ) 647 654 __field(int, usage ) 648 655 __field(int, mod_count ) 649 656 __field(const void *, where ) ··· 652 657 653 658 TP_fast_assign( 654 659 __entry->skb = skb; 660 + __entry->flags = flags; 655 661 __entry->op = op; 656 662 __entry->usage = usage; 657 663 __entry->mod_count = mod_count; 658 664 __entry->where = where; 659 665 ), 660 666 661 - TP_printk("s=%p %s u=%d m=%d p=%pSR", 667 + TP_printk("s=%p %cx %s u=%d m=%d p=%pSR", 662 668 __entry->skb, 669 + __entry->flags & RXRPC_SKB_TX_BUFFER ? 'T' : 'R', 663 670 __print_symbolic(__entry->op, rxrpc_skb_traces), 664 671 __entry->usage, 665 672 __entry->mod_count,
+6 -2
kernel/bpf/core.c
··· 890 890 891 891 static int bpf_jit_blind_insn(const struct bpf_insn *from, 892 892 const struct bpf_insn *aux, 893 - struct bpf_insn *to_buff) 893 + struct bpf_insn *to_buff, 894 + bool emit_zext) 894 895 { 895 896 struct bpf_insn *to = to_buff; 896 897 u32 imm_rnd = get_random_int(); ··· 1006 1005 case 0: /* Part 2 of BPF_LD | BPF_IMM | BPF_DW. */ 1007 1006 *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ aux[0].imm); 1008 1007 *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd); 1008 + if (emit_zext) 1009 + *to++ = BPF_ZEXT_REG(BPF_REG_AX); 1009 1010 *to++ = BPF_ALU64_REG(BPF_OR, aux[0].dst_reg, BPF_REG_AX); 1010 1011 break; 1011 1012 ··· 1091 1088 insn[1].code == 0) 1092 1089 memcpy(aux, insn, sizeof(aux)); 1093 1090 1094 - rewritten = bpf_jit_blind_insn(insn, aux, insn_buff); 1091 + rewritten = bpf_jit_blind_insn(insn, aux, insn_buff, 1092 + clone->aux->verifier_zext); 1095 1093 if (!rewritten) 1096 1094 continue; 1097 1095
+13 -7
net/batman-adv/bat_iv_ogm.c
··· 277 277 * batadv_iv_ogm_aggr_packet() - checks if there is another OGM attached 278 278 * @buff_pos: current position in the skb 279 279 * @packet_len: total length of the skb 280 - * @tvlv_len: tvlv length of the previously considered OGM 280 + * @ogm_packet: potential OGM in buffer 281 281 * 282 282 * Return: true if there is enough space for another OGM, false otherwise. 283 283 */ 284 - static bool batadv_iv_ogm_aggr_packet(int buff_pos, int packet_len, 285 - __be16 tvlv_len) 284 + static bool 285 + batadv_iv_ogm_aggr_packet(int buff_pos, int packet_len, 286 + const struct batadv_ogm_packet *ogm_packet) 286 287 { 287 288 int next_buff_pos = 0; 288 289 289 - next_buff_pos += buff_pos + BATADV_OGM_HLEN; 290 - next_buff_pos += ntohs(tvlv_len); 290 + /* check if there is enough space for the header */ 291 + next_buff_pos += buff_pos + sizeof(*ogm_packet); 292 + if (next_buff_pos > packet_len) 293 + return false; 294 + 295 + /* check if there is enough space for the optional TVLV */ 296 + next_buff_pos += ntohs(ogm_packet->tvlv_len); 291 297 292 298 return (next_buff_pos <= packet_len) && 293 299 (next_buff_pos <= BATADV_MAX_AGGREGATION_BYTES); ··· 321 315 322 316 /* adjust all flags and log packets */ 323 317 while (batadv_iv_ogm_aggr_packet(buff_pos, forw_packet->packet_len, 324 - batadv_ogm_packet->tvlv_len)) { 318 + batadv_ogm_packet)) { 325 319 /* we might have aggregated direct link packets with an 326 320 * ordinary base packet 327 321 */ ··· 1710 1704 1711 1705 /* unpack the aggregated packets and process them one by one */ 1712 1706 while (batadv_iv_ogm_aggr_packet(ogm_offset, skb_headlen(skb), 1713 - ogm_packet->tvlv_len)) { 1707 + ogm_packet)) { 1714 1708 batadv_iv_ogm_process(skb, ogm_offset, if_incoming); 1715 1709 1716 1710 ogm_offset += BATADV_OGM_HLEN;
+12 -6
net/batman-adv/bat_v_ogm.c
··· 631 631 * batadv_v_ogm_aggr_packet() - checks if there is another OGM aggregated 632 632 * @buff_pos: current position in the skb 633 633 * @packet_len: total length of the skb 634 - * @tvlv_len: tvlv length of the previously considered OGM 634 + * @ogm2_packet: potential OGM2 in buffer 635 635 * 636 636 * Return: true if there is enough space for another OGM, false otherwise. 637 637 */ 638 - static bool batadv_v_ogm_aggr_packet(int buff_pos, int packet_len, 639 - __be16 tvlv_len) 638 + static bool 639 + batadv_v_ogm_aggr_packet(int buff_pos, int packet_len, 640 + const struct batadv_ogm2_packet *ogm2_packet) 640 641 { 641 642 int next_buff_pos = 0; 642 643 643 - next_buff_pos += buff_pos + BATADV_OGM2_HLEN; 644 - next_buff_pos += ntohs(tvlv_len); 644 + /* check if there is enough space for the header */ 645 + next_buff_pos += buff_pos + sizeof(*ogm2_packet); 646 + if (next_buff_pos > packet_len) 647 + return false; 648 + 649 + /* check if there is enough space for the optional TVLV */ 650 + next_buff_pos += ntohs(ogm2_packet->tvlv_len); 645 651 646 652 return (next_buff_pos <= packet_len) && 647 653 (next_buff_pos <= BATADV_MAX_AGGREGATION_BYTES); ··· 824 818 ogm_packet = (struct batadv_ogm2_packet *)skb->data; 825 819 826 820 while (batadv_v_ogm_aggr_packet(ogm_offset, skb_headlen(skb), 827 - ogm_packet->tvlv_len)) { 821 + ogm_packet)) { 828 822 batadv_v_ogm_process(skb, ogm_offset, if_incoming); 829 823 830 824 ogm_offset += BATADV_OGM2_HLEN;
+1 -1
net/bridge/netfilter/nft_meta_bridge.c
··· 53 53 goto err; 54 54 55 55 br_vlan_get_proto(br_dev, &p_proto); 56 - nft_reg_store16(dest, p_proto); 56 + nft_reg_store16(dest, htons(p_proto)); 57 57 return; 58 58 } 59 59 default:
+3 -3
net/core/netpoll.c
··· 122 122 txq = netdev_get_tx_queue(dev, q_index); 123 123 HARD_TX_LOCK(dev, txq, smp_processor_id()); 124 124 if (netif_xmit_frozen_or_stopped(txq) || 125 - netpoll_start_xmit(skb, dev, txq) != NETDEV_TX_OK) { 125 + !dev_xmit_complete(netpoll_start_xmit(skb, dev, txq))) { 126 126 skb_queue_head(&npinfo->txq, skb); 127 127 HARD_TX_UNLOCK(dev, txq); 128 128 local_irq_restore(flags); ··· 335 335 336 336 HARD_TX_UNLOCK(dev, txq); 337 337 338 - if (status == NETDEV_TX_OK) 338 + if (dev_xmit_complete(status)) 339 339 break; 340 340 341 341 } ··· 352 352 353 353 } 354 354 355 - if (status != NETDEV_TX_OK) { 355 + if (!dev_xmit_complete(status)) { 356 356 skb_queue_tail(&npinfo->txq, skb); 357 357 schedule_delayed_work(&npinfo->tx_work,0); 358 358 }
+2
net/dsa/tag_8021q.c
··· 28 28 * 29 29 * RSV - VID[9]: 30 30 * To be used for further expansion of SWITCH_ID or for other purposes. 31 + * Must be transmitted as zero and ignored on receive. 31 32 * 32 33 * SWITCH_ID - VID[8:6]: 33 34 * Index of switch within DSA tree. Must be between 0 and ··· 36 35 * 37 36 * RSV - VID[5:4]: 38 37 * To be used for further expansion of PORT or for other purposes. 38 + * Must be transmitted as zero and ignored on receive. 39 39 * 40 40 * PORT - VID[3:0]: 41 41 * Index of switch port. Must be between 0 and DSA_MAX_PORTS - 1.
+21 -11
net/ipv4/tcp.c
··· 935 935 return mss_now; 936 936 } 937 937 938 + /* In some cases, both sendpage() and sendmsg() could have added 939 + * an skb to the write queue, but failed adding payload on it. 940 + * We need to remove it to consume less memory, but more 941 + * importantly be able to generate EPOLLOUT for Edge Trigger epoll() 942 + * users. 943 + */ 944 + static void tcp_remove_empty_skb(struct sock *sk, struct sk_buff *skb) 945 + { 946 + if (skb && !skb->len) { 947 + tcp_unlink_write_queue(skb, sk); 948 + if (tcp_write_queue_empty(sk)) 949 + tcp_chrono_stop(sk, TCP_CHRONO_BUSY); 950 + sk_wmem_free_skb(sk, skb); 951 + } 952 + } 953 + 938 954 ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset, 939 955 size_t size, int flags) 940 956 { ··· 1080 1064 return copied; 1081 1065 1082 1066 do_error: 1067 + tcp_remove_empty_skb(sk, tcp_write_queue_tail(sk)); 1083 1068 if (copied) 1084 1069 goto out; 1085 1070 out_err: ··· 1405 1388 sock_zerocopy_put(uarg); 1406 1389 return copied + copied_syn; 1407 1390 1408 - do_fault: 1409 - if (!skb->len) { 1410 - tcp_unlink_write_queue(skb, sk); 1411 - /* It is the one place in all of TCP, except connection 1412 - * reset, where we can be unlinking the send_head. 1413 - */ 1414 - if (tcp_write_queue_empty(sk)) 1415 - tcp_chrono_stop(sk, TCP_CHRONO_BUSY); 1416 - sk_wmem_free_skb(sk, skb); 1417 - } 1418 - 1419 1391 do_error: 1392 + skb = tcp_write_queue_tail(sk); 1393 + do_fault: 1394 + tcp_remove_empty_skb(sk, skb); 1395 + 1420 1396 if (copied + copied_syn) 1421 1397 goto out; 1422 1398 out_err:
+2 -1
net/ipv4/tcp_output.c
··· 2053 2053 if (len <= skb->len) 2054 2054 break; 2055 2055 2056 - if (unlikely(TCP_SKB_CB(skb)->eor)) 2056 + if (unlikely(TCP_SKB_CB(skb)->eor) || tcp_has_tx_tstamp(skb)) 2057 2057 return false; 2058 2058 2059 2059 len -= skb->len; ··· 2170 2170 * we need to propagate it to the new skb. 2171 2171 */ 2172 2172 TCP_SKB_CB(nskb)->eor = TCP_SKB_CB(skb)->eor; 2173 + tcp_skb_collapse_tstamp(nskb, skb); 2173 2174 tcp_unlink_write_queue(skb, sk); 2174 2175 sk_wmem_free_skb(sk, skb); 2175 2176 } else {
+3 -2
net/ipv6/mcast.c
··· 787 787 if (pmc) { 788 788 im->idev = pmc->idev; 789 789 if (im->mca_sfmode == MCAST_INCLUDE) { 790 - im->mca_tomb = pmc->mca_tomb; 791 - im->mca_sources = pmc->mca_sources; 790 + swap(im->mca_tomb, pmc->mca_tomb); 791 + swap(im->mca_sources, pmc->mca_sources); 792 792 for (psf = im->mca_sources; psf; psf = psf->sf_next) 793 793 psf->sf_crcount = idev->mc_qrv; 794 794 } else { 795 795 im->mca_crcount = idev->mc_qrv; 796 796 } 797 797 in6_dev_put(pmc->idev); 798 + ip6_mc_clear_src(pmc); 798 799 kfree(pmc); 799 800 } 800 801 spin_unlock_bh(&im->mca_lock);
+3 -3
net/mac80211/rx.c
··· 2447 2447 skb->protocol == cpu_to_be16(ETH_P_PREAUTH)) && 2448 2448 sdata->control_port_over_nl80211)) { 2449 2449 struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); 2450 - bool noencrypt = status->flag & RX_FLAG_DECRYPTED; 2450 + bool noencrypt = !(status->flag & RX_FLAG_DECRYPTED); 2451 2451 2452 2452 cfg80211_rx_control_port(dev, skb, noencrypt); 2453 2453 dev_kfree_skb(skb); 2454 2454 } else { 2455 + memset(skb->cb, 0, sizeof(skb->cb)); 2456 + 2455 2457 /* deliver to local stack */ 2456 2458 if (rx->napi) 2457 2459 napi_gro_receive(rx->napi, skb); ··· 2548 2546 2549 2547 if (skb) { 2550 2548 skb->protocol = eth_type_trans(skb, dev); 2551 - memset(skb->cb, 0, sizeof(skb->cb)); 2552 - 2553 2549 ieee80211_deliver_skb_to_local_stack(skb, rx); 2554 2550 } 2555 2551
+1 -1
net/netfilter/nf_conntrack_ftp.c
··· 322 322 i++; 323 323 } 324 324 325 - pr_debug("Skipped up to `%c'!\n", skip); 325 + pr_debug("Skipped up to 0x%hhx delimiter!\n", skip); 326 326 327 327 *numoff = i; 328 328 *numlen = getnum(data + i, dlen - i, cmd, term, numoff);
+5
net/netfilter/nf_conntrack_standalone.c
··· 1037 1037 table[NF_SYSCTL_CT_COUNT].data = &net->ct.count; 1038 1038 table[NF_SYSCTL_CT_CHECKSUM].data = &net->ct.sysctl_checksum; 1039 1039 table[NF_SYSCTL_CT_LOG_INVALID].data = &net->ct.sysctl_log_invalid; 1040 + table[NF_SYSCTL_CT_ACCT].data = &net->ct.sysctl_acct; 1041 + table[NF_SYSCTL_CT_HELPER].data = &net->ct.sysctl_auto_assign_helper; 1040 1042 #ifdef CONFIG_NF_CONNTRACK_EVENTS 1041 1043 table[NF_SYSCTL_CT_EVENTS].data = &net->ct.sysctl_events; 1044 + #endif 1045 + #ifdef CONFIG_NF_CONNTRACK_TIMESTAMP 1046 + table[NF_SYSCTL_CT_TIMESTAMP].data = &net->ct.sysctl_tstamp; 1042 1047 #endif 1043 1048 table[NF_SYSCTL_CT_PROTO_TIMEOUT_GENERIC].data = &nf_generic_pernet(net)->timeout; 1044 1049 table[NF_SYSCTL_CT_PROTO_TIMEOUT_ICMP].data = &nf_icmp_pernet(net)->timeout;
+2 -1
net/netfilter/nf_flow_table_ip.c
··· 228 228 { 229 229 skb_orphan(skb); 230 230 skb_dst_set_noref(skb, dst); 231 - skb->tstamp = 0; 232 231 dst_output(state->net, state->sk, skb); 233 232 return NF_STOLEN; 234 233 } ··· 283 284 flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT; 284 285 iph = ip_hdr(skb); 285 286 ip_decrease_ttl(iph); 287 + skb->tstamp = 0; 286 288 287 289 if (unlikely(dst_xfrm(&rt->dst))) { 288 290 memset(skb->cb, 0, sizeof(struct inet_skb_parm)); ··· 512 512 flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT; 513 513 ip6h = ipv6_hdr(skb); 514 514 ip6h->hop_limit--; 515 + skb->tstamp = 0; 515 516 516 517 if (unlikely(dst_xfrm(&rt->dst))) { 517 518 memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
+2 -4
net/netfilter/xt_physdev.c
··· 101 101 if (info->bitmask & (XT_PHYSDEV_OP_OUT | XT_PHYSDEV_OP_ISOUT) && 102 102 (!(info->bitmask & XT_PHYSDEV_OP_BRIDGED) || 103 103 info->invert & XT_PHYSDEV_OP_BRIDGED) && 104 - par->hook_mask & ((1 << NF_INET_LOCAL_OUT) | 105 - (1 << NF_INET_FORWARD) | (1 << NF_INET_POST_ROUTING))) { 104 + par->hook_mask & (1 << NF_INET_LOCAL_OUT)) { 106 105 pr_info_ratelimited("--physdev-out and --physdev-is-out only supported in the FORWARD and POSTROUTING chains with bridged traffic\n"); 107 - if (par->hook_mask & (1 << NF_INET_LOCAL_OUT)) 108 - return -EINVAL; 106 + return -EINVAL; 109 107 } 110 108 111 109 if (!brnf_probed) {
+5
net/openvswitch/conntrack.c
··· 525 525 return -EPFNOSUPPORT; 526 526 } 527 527 528 + /* The key extracted from the fragment that completed this datagram 529 + * likely didn't have an L4 header, so regenerate it. 530 + */ 531 + ovs_flow_key_update_l3l4(skb, key); 532 + 528 533 key->ip.frag = OVS_FRAG_TYPE_NONE; 529 534 skb_clear_hash(skb); 530 535 skb->ignore_df = 1;
+93 -67
net/openvswitch/flow.c
··· 523 523 } 524 524 525 525 /** 526 - * key_extract - extracts a flow key from an Ethernet frame. 526 + * key_extract_l3l4 - extracts L3/L4 header information. 527 527 * @skb: sk_buff that contains the frame, with skb->data pointing to the 528 - * Ethernet header 528 + * L3 header 529 529 * @key: output flow key 530 530 * 531 - * The caller must ensure that skb->len >= ETH_HLEN. 532 - * 533 - * Returns 0 if successful, otherwise a negative errno value. 534 - * 535 - * Initializes @skb header fields as follows: 536 - * 537 - * - skb->mac_header: the L2 header. 538 - * 539 - * - skb->network_header: just past the L2 header, or just past the 540 - * VLAN header, to the first byte of the L2 payload. 541 - * 542 - * - skb->transport_header: If key->eth.type is ETH_P_IP or ETH_P_IPV6 543 - * on output, then just past the IP header, if one is present and 544 - * of a correct length, otherwise the same as skb->network_header. 545 - * For other key->eth.type values it is left untouched. 546 - * 547 - * - skb->protocol: the type of the data starting at skb->network_header. 548 - * Equals to key->eth.type. 549 531 */ 550 - static int key_extract(struct sk_buff *skb, struct sw_flow_key *key) 532 + static int key_extract_l3l4(struct sk_buff *skb, struct sw_flow_key *key) 551 533 { 552 534 int error; 553 - struct ethhdr *eth; 554 - 555 - /* Flags are always used as part of stats */ 556 - key->tp.flags = 0; 557 - 558 - skb_reset_mac_header(skb); 559 - 560 - /* Link layer. */ 561 - clear_vlan(key); 562 - if (ovs_key_mac_proto(key) == MAC_PROTO_NONE) { 563 - if (unlikely(eth_type_vlan(skb->protocol))) 564 - return -EINVAL; 565 - 566 - skb_reset_network_header(skb); 567 - key->eth.type = skb->protocol; 568 - } else { 569 - eth = eth_hdr(skb); 570 - ether_addr_copy(key->eth.src, eth->h_source); 571 - ether_addr_copy(key->eth.dst, eth->h_dest); 572 - 573 - __skb_pull(skb, 2 * ETH_ALEN); 574 - /* We are going to push all headers that we pull, so no need to 575 - * update skb->csum here. 576 - */ 577 - 578 - if (unlikely(parse_vlan(skb, key))) 579 - return -ENOMEM; 580 - 581 - key->eth.type = parse_ethertype(skb); 582 - if (unlikely(key->eth.type == htons(0))) 583 - return -ENOMEM; 584 - 585 - /* Multiple tagged packets need to retain TPID to satisfy 586 - * skb_vlan_pop(), which will later shift the ethertype into 587 - * skb->protocol. 588 - */ 589 - if (key->eth.cvlan.tci & htons(VLAN_CFI_MASK)) 590 - skb->protocol = key->eth.cvlan.tpid; 591 - else 592 - skb->protocol = key->eth.type; 593 - 594 - skb_reset_network_header(skb); 595 - __skb_push(skb, skb->data - skb_mac_header(skb)); 596 - } 597 - skb_reset_mac_len(skb); 598 535 599 536 /* Network layer. */ 600 537 if (key->eth.type == htons(ETH_P_IP)) { ··· 560 623 offset = nh->frag_off & htons(IP_OFFSET); 561 624 if (offset) { 562 625 key->ip.frag = OVS_FRAG_TYPE_LATER; 626 + memset(&key->tp, 0, sizeof(key->tp)); 563 627 return 0; 564 628 } 565 629 if (nh->frag_off & htons(IP_MF) || ··· 678 740 return error; 679 741 } 680 742 681 - if (key->ip.frag == OVS_FRAG_TYPE_LATER) 743 + if (key->ip.frag == OVS_FRAG_TYPE_LATER) { 744 + memset(&key->tp, 0, sizeof(key->tp)); 682 745 return 0; 746 + } 683 747 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) 684 748 key->ip.frag = OVS_FRAG_TYPE_FIRST; 685 749 ··· 726 786 return error; 727 787 } 728 788 return 0; 789 + } 790 + 791 + /** 792 + * key_extract - extracts a flow key from an Ethernet frame. 793 + * @skb: sk_buff that contains the frame, with skb->data pointing to the 794 + * Ethernet header 795 + * @key: output flow key 796 + * 797 + * The caller must ensure that skb->len >= ETH_HLEN. 798 + * 799 + * Returns 0 if successful, otherwise a negative errno value. 800 + * 801 + * Initializes @skb header fields as follows: 802 + * 803 + * - skb->mac_header: the L2 header. 804 + * 805 + * - skb->network_header: just past the L2 header, or just past the 806 + * VLAN header, to the first byte of the L2 payload. 807 + * 808 + * - skb->transport_header: If key->eth.type is ETH_P_IP or ETH_P_IPV6 809 + * on output, then just past the IP header, if one is present and 810 + * of a correct length, otherwise the same as skb->network_header. 811 + * For other key->eth.type values it is left untouched. 812 + * 813 + * - skb->protocol: the type of the data starting at skb->network_header. 814 + * Equals to key->eth.type. 815 + */ 816 + static int key_extract(struct sk_buff *skb, struct sw_flow_key *key) 817 + { 818 + struct ethhdr *eth; 819 + 820 + /* Flags are always used as part of stats */ 821 + key->tp.flags = 0; 822 + 823 + skb_reset_mac_header(skb); 824 + 825 + /* Link layer. */ 826 + clear_vlan(key); 827 + if (ovs_key_mac_proto(key) == MAC_PROTO_NONE) { 828 + if (unlikely(eth_type_vlan(skb->protocol))) 829 + return -EINVAL; 830 + 831 + skb_reset_network_header(skb); 832 + key->eth.type = skb->protocol; 833 + } else { 834 + eth = eth_hdr(skb); 835 + ether_addr_copy(key->eth.src, eth->h_source); 836 + ether_addr_copy(key->eth.dst, eth->h_dest); 837 + 838 + __skb_pull(skb, 2 * ETH_ALEN); 839 + /* We are going to push all headers that we pull, so no need to 840 + * update skb->csum here. 841 + */ 842 + 843 + if (unlikely(parse_vlan(skb, key))) 844 + return -ENOMEM; 845 + 846 + key->eth.type = parse_ethertype(skb); 847 + if (unlikely(key->eth.type == htons(0))) 848 + return -ENOMEM; 849 + 850 + /* Multiple tagged packets need to retain TPID to satisfy 851 + * skb_vlan_pop(), which will later shift the ethertype into 852 + * skb->protocol. 853 + */ 854 + if (key->eth.cvlan.tci & htons(VLAN_CFI_MASK)) 855 + skb->protocol = key->eth.cvlan.tpid; 856 + else 857 + skb->protocol = key->eth.type; 858 + 859 + skb_reset_network_header(skb); 860 + __skb_push(skb, skb->data - skb_mac_header(skb)); 861 + } 862 + 863 + skb_reset_mac_len(skb); 864 + 865 + /* Fill out L3/L4 key info, if any */ 866 + return key_extract_l3l4(skb, key); 867 + } 868 + 869 + /* In the case of conntrack fragment handling it expects L3 headers, 870 + * add a helper. 871 + */ 872 + int ovs_flow_key_update_l3l4(struct sk_buff *skb, struct sw_flow_key *key) 873 + { 874 + return key_extract_l3l4(skb, key); 729 875 } 730 876 731 877 int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key)
+1
net/openvswitch/flow.h
··· 270 270 u64 ovs_flow_used_time(unsigned long flow_jiffies); 271 271 272 272 int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key); 273 + int ovs_flow_key_update_l3l4(struct sk_buff *skb, struct sw_flow_key *key); 273 274 int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info, 274 275 struct sk_buff *skb, 275 276 struct sw_flow_key *key);
+1 -1
net/psample/psample.c
··· 154 154 { 155 155 psample_group_notify(group, PSAMPLE_CMD_DEL_GROUP); 156 156 list_del(&group->list); 157 - kfree(group); 157 + kfree_rcu(group, rcu); 158 158 } 159 159 160 160 static struct psample_group *
+4 -1
net/rds/recv.c
··· 1 1 /* 2 - * Copyright (c) 2006, 2018 Oracle and/or its affiliates. All rights reserved. 2 + * Copyright (c) 2006, 2019 Oracle and/or its affiliates. All rights reserved. 3 3 * 4 4 * This software is available to you under a choice of one of two 5 5 * licenses. You may choose to be licensed under the terms of the GNU ··· 811 811 812 812 minfo6.seq = be64_to_cpu(inc->i_hdr.h_sequence); 813 813 minfo6.len = be32_to_cpu(inc->i_hdr.h_len); 814 + minfo6.tos = inc->i_conn->c_tos; 814 815 815 816 if (flip) { 816 817 minfo6.laddr = *daddr; ··· 824 823 minfo6.lport = inc->i_hdr.h_sport; 825 824 minfo6.fport = inc->i_hdr.h_dport; 826 825 } 826 + 827 + minfo6.flags = 0; 827 828 828 829 rds_info_copy(iter, &minfo6, sizeof(minfo6)); 829 830 }
-3
net/rxrpc/af_rxrpc.c
··· 862 862 static int rxrpc_release_sock(struct sock *sk) 863 863 { 864 864 struct rxrpc_sock *rx = rxrpc_sk(sk); 865 - struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk)); 866 865 867 866 _enter("%p{%d,%d}", sk, sk->sk_state, refcount_read(&sk->sk_refcnt)); 868 867 ··· 897 898 rxrpc_release_calls_on_socket(rx); 898 899 flush_workqueue(rxrpc_workqueue); 899 900 rxrpc_purge_queue(&sk->sk_receive_queue); 900 - rxrpc_queue_work(&rxnet->service_conn_reaper); 901 - rxrpc_queue_work(&rxnet->client_conn_reaper); 902 901 903 902 rxrpc_unuse_local(rx->local); 904 903 rx->local = NULL;
+12 -5
net/rxrpc/ar-internal.h
··· 185 185 * - max 48 bytes (struct sk_buff::cb) 186 186 */ 187 187 struct rxrpc_skb_priv { 188 - union { 189 - u8 nr_jumbo; /* Number of jumbo subpackets */ 190 - }; 188 + atomic_t nr_ring_pins; /* Number of rxtx ring pins */ 189 + u8 nr_subpackets; /* Number of subpackets */ 190 + u8 rx_flags; /* Received packet flags */ 191 + #define RXRPC_SKB_INCL_LAST 0x01 /* - Includes last packet */ 192 + #define RXRPC_SKB_TX_BUFFER 0x02 /* - Is transmit buffer */ 191 193 union { 192 194 int remain; /* amount of space remaining for next write */ 195 + 196 + /* List of requested ACKs on subpackets */ 197 + unsigned long rx_req_ack[(RXRPC_MAX_NR_JUMBO + BITS_PER_LONG - 1) / 198 + BITS_PER_LONG]; 193 199 }; 194 200 195 201 struct rxrpc_host_header hdr; /* RxRPC packet header from this packet */ ··· 619 613 #define RXRPC_TX_ANNO_LAST 0x04 620 614 #define RXRPC_TX_ANNO_RESENT 0x08 621 615 622 - #define RXRPC_RX_ANNO_JUMBO 0x3f /* Jumbo subpacket number + 1 if not zero */ 623 - #define RXRPC_RX_ANNO_JLAST 0x40 /* Set if last element of a jumbo packet */ 616 + #define RXRPC_RX_ANNO_SUBPACKET 0x3f /* Subpacket number in jumbogram */ 624 617 #define RXRPC_RX_ANNO_VERIFIED 0x80 /* Set if verified and decrypted */ 625 618 rxrpc_seq_t tx_hard_ack; /* Dead slot in buffer; the first transmitted but 626 619 * not hard-ACK'd packet follows this. ··· 910 905 void rxrpc_put_client_conn(struct rxrpc_connection *); 911 906 void rxrpc_discard_expired_client_conns(struct work_struct *); 912 907 void rxrpc_destroy_all_client_connections(struct rxrpc_net *); 908 + void rxrpc_clean_up_local_conns(struct rxrpc_local *); 913 909 914 910 /* 915 911 * conn_event.c ··· 1111 1105 void rxrpc_packet_destructor(struct sk_buff *); 1112 1106 void rxrpc_new_skb(struct sk_buff *, enum rxrpc_skb_trace); 1113 1107 void rxrpc_see_skb(struct sk_buff *, enum rxrpc_skb_trace); 1108 + void rxrpc_eaten_skb(struct sk_buff *, enum rxrpc_skb_trace); 1114 1109 void rxrpc_get_skb(struct sk_buff *, enum rxrpc_skb_trace); 1115 1110 void rxrpc_free_skb(struct sk_buff *, enum rxrpc_skb_trace); 1116 1111 void rxrpc_purge_queue(struct sk_buff_head *);
+4 -4
net/rxrpc/call_event.c
··· 199 199 continue; 200 200 201 201 skb = call->rxtx_buffer[ix]; 202 - rxrpc_see_skb(skb, rxrpc_skb_tx_seen); 202 + rxrpc_see_skb(skb, rxrpc_skb_seen); 203 203 204 204 if (anno_type == RXRPC_TX_ANNO_UNACK) { 205 205 if (ktime_after(skb->tstamp, max_age)) { ··· 255 255 continue; 256 256 257 257 skb = call->rxtx_buffer[ix]; 258 - rxrpc_get_skb(skb, rxrpc_skb_tx_got); 258 + rxrpc_get_skb(skb, rxrpc_skb_got); 259 259 spin_unlock_bh(&call->lock); 260 260 261 261 if (rxrpc_send_data_packet(call, skb, true) < 0) { 262 - rxrpc_free_skb(skb, rxrpc_skb_tx_freed); 262 + rxrpc_free_skb(skb, rxrpc_skb_freed); 263 263 return; 264 264 } 265 265 266 266 if (rxrpc_is_client_call(call)) 267 267 rxrpc_expose_client_call(call); 268 268 269 - rxrpc_free_skb(skb, rxrpc_skb_tx_freed); 269 + rxrpc_free_skb(skb, rxrpc_skb_freed); 270 270 spin_lock_bh(&call->lock); 271 271 272 272 /* We need to clear the retransmit state, but there are two
+16 -17
net/rxrpc/call_object.c
··· 422 422 } 423 423 424 424 /* 425 + * Clean up the RxTx skb ring. 426 + */ 427 + static void rxrpc_cleanup_ring(struct rxrpc_call *call) 428 + { 429 + int i; 430 + 431 + for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) { 432 + rxrpc_free_skb(call->rxtx_buffer[i], rxrpc_skb_cleaned); 433 + call->rxtx_buffer[i] = NULL; 434 + } 435 + } 436 + 437 + /* 425 438 * Detach a call from its owning socket. 426 439 */ 427 440 void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call) ··· 442 429 const void *here = __builtin_return_address(0); 443 430 struct rxrpc_connection *conn = call->conn; 444 431 bool put = false; 445 - int i; 446 432 447 433 _enter("{%d,%d}", call->debug_id, atomic_read(&call->usage)); 448 434 ··· 491 479 if (conn) 492 480 rxrpc_disconnect_call(call); 493 481 494 - for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) { 495 - rxrpc_free_skb(call->rxtx_buffer[i], 496 - (call->tx_phase ? rxrpc_skb_tx_cleaned : 497 - rxrpc_skb_rx_cleaned)); 498 - call->rxtx_buffer[i] = NULL; 499 - } 500 - 482 + rxrpc_cleanup_ring(call); 501 483 _leave(""); 502 484 } 503 485 ··· 574 568 */ 575 569 void rxrpc_cleanup_call(struct rxrpc_call *call) 576 570 { 577 - int i; 578 - 579 571 _net("DESTROY CALL %d", call->debug_id); 580 572 581 573 memset(&call->sock_node, 0xcd, sizeof(call->sock_node)); ··· 584 580 ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags)); 585 581 ASSERTCMP(call->conn, ==, NULL); 586 582 587 - /* Clean up the Rx/Tx buffer */ 588 - for (i = 0; i < RXRPC_RXTX_BUFF_SIZE; i++) 589 - rxrpc_free_skb(call->rxtx_buffer[i], 590 - (call->tx_phase ? rxrpc_skb_tx_cleaned : 591 - rxrpc_skb_rx_cleaned)); 592 - 593 - rxrpc_free_skb(call->tx_pending, rxrpc_skb_tx_cleaned); 583 + rxrpc_cleanup_ring(call); 584 + rxrpc_free_skb(call->tx_pending, rxrpc_skb_cleaned); 594 585 595 586 call_rcu(&call->rcu, rxrpc_rcu_destroy_call); 596 587 }
+44
net/rxrpc/conn_client.c
··· 1162 1162 1163 1163 _leave(""); 1164 1164 } 1165 + 1166 + /* 1167 + * Clean up the client connections on a local endpoint. 1168 + */ 1169 + void rxrpc_clean_up_local_conns(struct rxrpc_local *local) 1170 + { 1171 + struct rxrpc_connection *conn, *tmp; 1172 + struct rxrpc_net *rxnet = local->rxnet; 1173 + unsigned int nr_active; 1174 + LIST_HEAD(graveyard); 1175 + 1176 + _enter(""); 1177 + 1178 + spin_lock(&rxnet->client_conn_cache_lock); 1179 + nr_active = rxnet->nr_active_client_conns; 1180 + 1181 + list_for_each_entry_safe(conn, tmp, &rxnet->idle_client_conns, 1182 + cache_link) { 1183 + if (conn->params.local == local) { 1184 + ASSERTCMP(conn->cache_state, ==, RXRPC_CONN_CLIENT_IDLE); 1185 + 1186 + trace_rxrpc_client(conn, -1, rxrpc_client_discard); 1187 + if (!test_and_clear_bit(RXRPC_CONN_EXPOSED, &conn->flags)) 1188 + BUG(); 1189 + conn->cache_state = RXRPC_CONN_CLIENT_INACTIVE; 1190 + list_move(&conn->cache_link, &graveyard); 1191 + nr_active--; 1192 + } 1193 + } 1194 + 1195 + rxnet->nr_active_client_conns = nr_active; 1196 + spin_unlock(&rxnet->client_conn_cache_lock); 1197 + ASSERTCMP(nr_active, >=, 0); 1198 + 1199 + while (!list_empty(&graveyard)) { 1200 + conn = list_entry(graveyard.next, 1201 + struct rxrpc_connection, cache_link); 1202 + list_del_init(&conn->cache_link); 1203 + 1204 + rxrpc_put_connection(conn); 1205 + } 1206 + 1207 + _leave(" [culled]"); 1208 + }
+3 -3
net/rxrpc/conn_event.c
··· 472 472 /* go through the conn-level event packets, releasing the ref on this 473 473 * connection that each one has when we've finished with it */ 474 474 while ((skb = skb_dequeue(&conn->rx_queue))) { 475 - rxrpc_see_skb(skb, rxrpc_skb_rx_seen); 475 + rxrpc_see_skb(skb, rxrpc_skb_seen); 476 476 ret = rxrpc_process_event(conn, skb, &abort_code); 477 477 switch (ret) { 478 478 case -EPROTO: ··· 484 484 goto requeue_and_leave; 485 485 case -ECONNABORTED: 486 486 default: 487 - rxrpc_free_skb(skb, rxrpc_skb_rx_freed); 487 + rxrpc_free_skb(skb, rxrpc_skb_freed); 488 488 break; 489 489 } 490 490 } ··· 501 501 protocol_error: 502 502 if (rxrpc_abort_connection(conn, ret, abort_code) < 0) 503 503 goto requeue_and_leave; 504 - rxrpc_free_skb(skb, rxrpc_skb_rx_freed); 504 + rxrpc_free_skb(skb, rxrpc_skb_freed); 505 505 goto out; 506 506 }
+1 -1
net/rxrpc/conn_object.c
··· 398 398 if (conn->state == RXRPC_CONN_SERVICE_PREALLOC) 399 399 continue; 400 400 401 - if (rxnet->live) { 401 + if (rxnet->live && !conn->params.local->dead) { 402 402 idle_timestamp = READ_ONCE(conn->idle_timestamp); 403 403 expire_at = idle_timestamp + rxrpc_connection_expiry * HZ; 404 404 if (conn->params.local->service_closed)
+166 -136
net/rxrpc/input.c
··· 233 233 ix = call->tx_hard_ack & RXRPC_RXTX_BUFF_MASK; 234 234 skb = call->rxtx_buffer[ix]; 235 235 annotation = call->rxtx_annotations[ix]; 236 - rxrpc_see_skb(skb, rxrpc_skb_tx_rotated); 236 + rxrpc_see_skb(skb, rxrpc_skb_rotated); 237 237 call->rxtx_buffer[ix] = NULL; 238 238 call->rxtx_annotations[ix] = 0; 239 239 skb->next = list; ··· 258 258 skb = list; 259 259 list = skb->next; 260 260 skb_mark_not_on_list(skb); 261 - rxrpc_free_skb(skb, rxrpc_skb_tx_freed); 261 + rxrpc_free_skb(skb, rxrpc_skb_freed); 262 262 } 263 263 264 264 return rot_last; ··· 347 347 } 348 348 349 349 /* 350 - * Scan a jumbo packet to validate its structure and to work out how many 350 + * Scan a data packet to validate its structure and to work out how many 351 351 * subpackets it contains. 352 352 * 353 353 * A jumbo packet is a collection of consecutive packets glued together with ··· 358 358 * the last are RXRPC_JUMBO_DATALEN in size. The last subpacket may be of any 359 359 * size. 360 360 */ 361 - static bool rxrpc_validate_jumbo(struct sk_buff *skb) 361 + static bool rxrpc_validate_data(struct sk_buff *skb) 362 362 { 363 363 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 364 364 unsigned int offset = sizeof(struct rxrpc_wire_header); 365 365 unsigned int len = skb->len; 366 - int nr_jumbo = 1; 367 366 u8 flags = sp->hdr.flags; 368 367 369 - do { 370 - nr_jumbo++; 368 + for (;;) { 369 + if (flags & RXRPC_REQUEST_ACK) 370 + __set_bit(sp->nr_subpackets, sp->rx_req_ack); 371 + sp->nr_subpackets++; 372 + 373 + if (!(flags & RXRPC_JUMBO_PACKET)) 374 + break; 375 + 371 376 if (len - offset < RXRPC_JUMBO_SUBPKTLEN) 372 377 goto protocol_error; 373 378 if (flags & RXRPC_LAST_PACKET) ··· 381 376 if (skb_copy_bits(skb, offset, &flags, 1) < 0) 382 377 goto protocol_error; 383 378 offset += sizeof(struct rxrpc_jumbo_header); 384 - } while (flags & RXRPC_JUMBO_PACKET); 379 + } 385 380 386 - sp->nr_jumbo = nr_jumbo; 381 + if (flags & RXRPC_LAST_PACKET) 382 + sp->rx_flags |= RXRPC_SKB_INCL_LAST; 387 383 return true; 388 384 389 385 protocol_error: ··· 405 399 * (that information is encoded in the ACK packet). 406 400 */ 407 401 static void rxrpc_input_dup_data(struct rxrpc_call *call, rxrpc_seq_t seq, 408 - u8 annotation, bool *_jumbo_bad) 402 + bool is_jumbo, bool *_jumbo_bad) 409 403 { 410 404 /* Discard normal packets that are duplicates. */ 411 - if (annotation == 0) 405 + if (is_jumbo) 412 406 return; 413 407 414 408 /* Skip jumbo subpackets that are duplicates. When we've had three or ··· 422 416 } 423 417 424 418 /* 425 - * Process a DATA packet, adding the packet to the Rx ring. 419 + * Process a DATA packet, adding the packet to the Rx ring. The caller's 420 + * packet ref must be passed on or discarded. 426 421 */ 427 422 static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb) 428 423 { 429 424 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 430 425 enum rxrpc_call_state state; 431 - unsigned int offset = sizeof(struct rxrpc_wire_header); 432 - unsigned int ix; 426 + unsigned int j; 433 427 rxrpc_serial_t serial = sp->hdr.serial, ack_serial = 0; 434 - rxrpc_seq_t seq = sp->hdr.seq, hard_ack; 435 - bool immediate_ack = false, jumbo_bad = false, queued; 436 - u16 len; 437 - u8 ack = 0, flags, annotation = 0; 428 + rxrpc_seq_t seq0 = sp->hdr.seq, hard_ack; 429 + bool immediate_ack = false, jumbo_bad = false; 430 + u8 ack = 0; 438 431 439 432 _enter("{%u,%u},{%u,%u}", 440 - call->rx_hard_ack, call->rx_top, skb->len, seq); 433 + call->rx_hard_ack, call->rx_top, skb->len, seq0); 441 434 442 - _proto("Rx DATA %%%u { #%u f=%02x }", 443 - sp->hdr.serial, seq, sp->hdr.flags); 435 + _proto("Rx DATA %%%u { #%u f=%02x n=%u }", 436 + sp->hdr.serial, seq0, sp->hdr.flags, sp->nr_subpackets); 444 437 445 438 state = READ_ONCE(call->state); 446 - if (state >= RXRPC_CALL_COMPLETE) 439 + if (state >= RXRPC_CALL_COMPLETE) { 440 + rxrpc_free_skb(skb, rxrpc_skb_freed); 447 441 return; 442 + } 448 443 449 444 if (call->state == RXRPC_CALL_SERVER_RECV_REQUEST) { 450 445 unsigned long timo = READ_ONCE(call->next_req_timo); ··· 470 463 !rxrpc_receiving_reply(call)) 471 464 goto unlock; 472 465 473 - call->ackr_prev_seq = seq; 474 - 466 + call->ackr_prev_seq = seq0; 475 467 hard_ack = READ_ONCE(call->rx_hard_ack); 476 - if (after(seq, hard_ack + call->rx_winsize)) { 477 - ack = RXRPC_ACK_EXCEEDS_WINDOW; 478 - ack_serial = serial; 479 - goto ack; 480 - } 481 468 482 - flags = sp->hdr.flags; 483 - if (flags & RXRPC_JUMBO_PACKET) { 469 + if (sp->nr_subpackets > 1) { 484 470 if (call->nr_jumbo_bad > 3) { 485 471 ack = RXRPC_ACK_NOSPACE; 486 472 ack_serial = serial; 487 473 goto ack; 488 474 } 489 - annotation = 1; 490 475 } 491 476 492 - next_subpacket: 493 - queued = false; 494 - ix = seq & RXRPC_RXTX_BUFF_MASK; 495 - len = skb->len; 496 - if (flags & RXRPC_JUMBO_PACKET) 497 - len = RXRPC_JUMBO_DATALEN; 477 + for (j = 0; j < sp->nr_subpackets; j++) { 478 + rxrpc_serial_t serial = sp->hdr.serial + j; 479 + rxrpc_seq_t seq = seq0 + j; 480 + unsigned int ix = seq & RXRPC_RXTX_BUFF_MASK; 481 + bool terminal = (j == sp->nr_subpackets - 1); 482 + bool last = terminal && (sp->rx_flags & RXRPC_SKB_INCL_LAST); 483 + u8 flags, annotation = j; 498 484 499 - if (flags & RXRPC_LAST_PACKET) { 500 - if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) && 501 - seq != call->rx_top) { 502 - rxrpc_proto_abort("LSN", call, seq); 503 - goto unlock; 485 + _proto("Rx DATA+%u %%%u { #%x t=%u l=%u }", 486 + j, serial, seq, terminal, last); 487 + 488 + if (last) { 489 + if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) && 490 + seq != call->rx_top) { 491 + rxrpc_proto_abort("LSN", call, seq); 492 + goto unlock; 493 + } 494 + } else { 495 + if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) && 496 + after_eq(seq, call->rx_top)) { 497 + rxrpc_proto_abort("LSA", call, seq); 498 + goto unlock; 499 + } 504 500 } 505 - } else { 506 - if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) && 507 - after_eq(seq, call->rx_top)) { 508 - rxrpc_proto_abort("LSA", call, seq); 509 - goto unlock; 510 - } 511 - } 512 501 513 - trace_rxrpc_rx_data(call->debug_id, seq, serial, flags, annotation); 514 - if (before_eq(seq, hard_ack)) { 515 - ack = RXRPC_ACK_DUPLICATE; 516 - ack_serial = serial; 517 - goto skip; 518 - } 502 + flags = 0; 503 + if (last) 504 + flags |= RXRPC_LAST_PACKET; 505 + if (!terminal) 506 + flags |= RXRPC_JUMBO_PACKET; 507 + if (test_bit(j, sp->rx_req_ack)) 508 + flags |= RXRPC_REQUEST_ACK; 509 + trace_rxrpc_rx_data(call->debug_id, seq, serial, flags, annotation); 519 510 520 - if (flags & RXRPC_REQUEST_ACK && !ack) { 521 - ack = RXRPC_ACK_REQUESTED; 522 - ack_serial = serial; 523 - } 524 - 525 - if (call->rxtx_buffer[ix]) { 526 - rxrpc_input_dup_data(call, seq, annotation, &jumbo_bad); 527 - if (ack != RXRPC_ACK_DUPLICATE) { 511 + if (before_eq(seq, hard_ack)) { 528 512 ack = RXRPC_ACK_DUPLICATE; 529 513 ack_serial = serial; 514 + continue; 530 515 } 531 - immediate_ack = true; 532 - goto skip; 533 - } 534 516 535 - /* Queue the packet. We use a couple of memory barriers here as need 536 - * to make sure that rx_top is perceived to be set after the buffer 537 - * pointer and that the buffer pointer is set after the annotation and 538 - * the skb data. 539 - * 540 - * Barriers against rxrpc_recvmsg_data() and rxrpc_rotate_rx_window() 541 - * and also rxrpc_fill_out_ack(). 542 - */ 543 - rxrpc_get_skb(skb, rxrpc_skb_rx_got); 544 - call->rxtx_annotations[ix] = annotation; 545 - smp_wmb(); 546 - call->rxtx_buffer[ix] = skb; 547 - if (after(seq, call->rx_top)) { 548 - smp_store_release(&call->rx_top, seq); 549 - } else if (before(seq, call->rx_top)) { 550 - /* Send an immediate ACK if we fill in a hole */ 551 - if (!ack) { 552 - ack = RXRPC_ACK_DELAY; 553 - ack_serial = serial; 517 + if (call->rxtx_buffer[ix]) { 518 + rxrpc_input_dup_data(call, seq, sp->nr_subpackets > 1, 519 + &jumbo_bad); 520 + if (ack != RXRPC_ACK_DUPLICATE) { 521 + ack = RXRPC_ACK_DUPLICATE; 522 + ack_serial = serial; 523 + } 524 + immediate_ack = true; 525 + continue; 554 526 } 555 - immediate_ack = true; 556 - } 557 - if (flags & RXRPC_LAST_PACKET) { 558 - set_bit(RXRPC_CALL_RX_LAST, &call->flags); 559 - trace_rxrpc_receive(call, rxrpc_receive_queue_last, serial, seq); 560 - } else { 561 - trace_rxrpc_receive(call, rxrpc_receive_queue, serial, seq); 562 - } 563 - queued = true; 564 527 565 - if (after_eq(seq, call->rx_expect_next)) { 566 - if (after(seq, call->rx_expect_next)) { 567 - _net("OOS %u > %u", seq, call->rx_expect_next); 568 - ack = RXRPC_ACK_OUT_OF_SEQUENCE; 569 - ack_serial = serial; 570 - } 571 - call->rx_expect_next = seq + 1; 572 - } 573 - 574 - skip: 575 - offset += len; 576 - if (flags & RXRPC_JUMBO_PACKET) { 577 - if (skb_copy_bits(skb, offset, &flags, 1) < 0) { 578 - rxrpc_proto_abort("XJF", call, seq); 579 - goto unlock; 580 - } 581 - offset += sizeof(struct rxrpc_jumbo_header); 582 - seq++; 583 - serial++; 584 - annotation++; 585 - if (flags & RXRPC_JUMBO_PACKET) 586 - annotation |= RXRPC_RX_ANNO_JLAST; 587 528 if (after(seq, hard_ack + call->rx_winsize)) { 588 529 ack = RXRPC_ACK_EXCEEDS_WINDOW; 589 530 ack_serial = serial; 590 - if (!jumbo_bad) { 591 - call->nr_jumbo_bad++; 592 - jumbo_bad = true; 531 + if (flags & RXRPC_JUMBO_PACKET) { 532 + if (!jumbo_bad) { 533 + call->nr_jumbo_bad++; 534 + jumbo_bad = true; 535 + } 593 536 } 537 + 594 538 goto ack; 595 539 } 596 540 597 - _proto("Rx DATA Jumbo %%%u", serial); 598 - goto next_subpacket; 599 - } 541 + if (flags & RXRPC_REQUEST_ACK && !ack) { 542 + ack = RXRPC_ACK_REQUESTED; 543 + ack_serial = serial; 544 + } 600 545 601 - if (queued && flags & RXRPC_LAST_PACKET && !ack) { 602 - ack = RXRPC_ACK_DELAY; 603 - ack_serial = serial; 546 + /* Queue the packet. We use a couple of memory barriers here as need 547 + * to make sure that rx_top is perceived to be set after the buffer 548 + * pointer and that the buffer pointer is set after the annotation and 549 + * the skb data. 550 + * 551 + * Barriers against rxrpc_recvmsg_data() and rxrpc_rotate_rx_window() 552 + * and also rxrpc_fill_out_ack(). 553 + */ 554 + if (!terminal) 555 + rxrpc_get_skb(skb, rxrpc_skb_got); 556 + call->rxtx_annotations[ix] = annotation; 557 + smp_wmb(); 558 + call->rxtx_buffer[ix] = skb; 559 + if (after(seq, call->rx_top)) { 560 + smp_store_release(&call->rx_top, seq); 561 + } else if (before(seq, call->rx_top)) { 562 + /* Send an immediate ACK if we fill in a hole */ 563 + if (!ack) { 564 + ack = RXRPC_ACK_DELAY; 565 + ack_serial = serial; 566 + } 567 + immediate_ack = true; 568 + } 569 + 570 + if (terminal) { 571 + /* From this point on, we're not allowed to touch the 572 + * packet any longer as its ref now belongs to the Rx 573 + * ring. 574 + */ 575 + skb = NULL; 576 + } 577 + 578 + if (last) { 579 + set_bit(RXRPC_CALL_RX_LAST, &call->flags); 580 + if (!ack) { 581 + ack = RXRPC_ACK_DELAY; 582 + ack_serial = serial; 583 + } 584 + trace_rxrpc_receive(call, rxrpc_receive_queue_last, serial, seq); 585 + } else { 586 + trace_rxrpc_receive(call, rxrpc_receive_queue, serial, seq); 587 + } 588 + 589 + if (after_eq(seq, call->rx_expect_next)) { 590 + if (after(seq, call->rx_expect_next)) { 591 + _net("OOS %u > %u", seq, call->rx_expect_next); 592 + ack = RXRPC_ACK_OUT_OF_SEQUENCE; 593 + ack_serial = serial; 594 + } 595 + call->rx_expect_next = seq + 1; 596 + } 604 597 } 605 598 606 599 ack: ··· 613 606 false, true, 614 607 rxrpc_propose_ack_input_data); 615 608 616 - if (sp->hdr.seq == READ_ONCE(call->rx_hard_ack) + 1) { 609 + if (seq0 == READ_ONCE(call->rx_hard_ack) + 1) { 617 610 trace_rxrpc_notify_socket(call->debug_id, serial); 618 611 rxrpc_notify_socket(call); 619 612 } 620 613 621 614 unlock: 622 615 spin_unlock(&call->input_lock); 616 + rxrpc_free_skb(skb, rxrpc_skb_freed); 623 617 _leave(" [queued]"); 624 618 } 625 619 ··· 1029 1021 switch (sp->hdr.type) { 1030 1022 case RXRPC_PACKET_TYPE_DATA: 1031 1023 rxrpc_input_data(call, skb); 1032 - break; 1024 + goto no_free; 1033 1025 1034 1026 case RXRPC_PACKET_TYPE_ACK: 1035 1027 rxrpc_input_ack(call, skb); ··· 1056 1048 break; 1057 1049 } 1058 1050 1051 + rxrpc_free_skb(skb, rxrpc_skb_freed); 1052 + no_free: 1059 1053 _leave(""); 1060 1054 } 1061 1055 ··· 1119 1109 skb_queue_tail(&local->event_queue, skb); 1120 1110 rxrpc_queue_local(local); 1121 1111 } else { 1122 - rxrpc_free_skb(skb, rxrpc_skb_rx_freed); 1112 + rxrpc_free_skb(skb, rxrpc_skb_freed); 1123 1113 } 1124 1114 } 1125 1115 ··· 1134 1124 skb_queue_tail(&local->reject_queue, skb); 1135 1125 rxrpc_queue_local(local); 1136 1126 } else { 1137 - rxrpc_free_skb(skb, rxrpc_skb_rx_freed); 1127 + rxrpc_free_skb(skb, rxrpc_skb_freed); 1138 1128 } 1139 1129 } 1140 1130 ··· 1198 1188 if (skb->tstamp == 0) 1199 1189 skb->tstamp = ktime_get_real(); 1200 1190 1201 - rxrpc_new_skb(skb, rxrpc_skb_rx_received); 1191 + rxrpc_new_skb(skb, rxrpc_skb_received); 1202 1192 1203 1193 skb_pull(skb, sizeof(struct udphdr)); 1204 1194 ··· 1215 1205 static int lose; 1216 1206 if ((lose++ & 7) == 7) { 1217 1207 trace_rxrpc_rx_lose(sp); 1218 - rxrpc_free_skb(skb, rxrpc_skb_rx_lost); 1208 + rxrpc_free_skb(skb, rxrpc_skb_lost); 1219 1209 return 0; 1220 1210 } 1221 1211 } ··· 1247 1237 if (sp->hdr.callNumber == 0 || 1248 1238 sp->hdr.seq == 0) 1249 1239 goto bad_message; 1250 - if (sp->hdr.flags & RXRPC_JUMBO_PACKET && 1251 - !rxrpc_validate_jumbo(skb)) 1240 + if (!rxrpc_validate_data(skb)) 1252 1241 goto bad_message; 1242 + 1243 + /* Unshare the packet so that it can be modified for in-place 1244 + * decryption. 1245 + */ 1246 + if (sp->hdr.securityIndex != 0) { 1247 + struct sk_buff *nskb = skb_unshare(skb, GFP_ATOMIC); 1248 + if (!nskb) { 1249 + rxrpc_eaten_skb(skb, rxrpc_skb_unshared_nomem); 1250 + goto out; 1251 + } 1252 + 1253 + if (nskb != skb) { 1254 + rxrpc_eaten_skb(skb, rxrpc_skb_received); 1255 + rxrpc_new_skb(skb, rxrpc_skb_unshared); 1256 + skb = nskb; 1257 + sp = rxrpc_skb(skb); 1258 + } 1259 + } 1253 1260 break; 1254 1261 1255 1262 case RXRPC_PACKET_TYPE_CHALLENGE: ··· 1400 1373 mutex_unlock(&call->user_mutex); 1401 1374 } 1402 1375 1376 + /* Process a call packet; this either discards or passes on the ref 1377 + * elsewhere. 1378 + */ 1403 1379 rxrpc_input_call_packet(call, skb); 1404 - goto discard; 1380 + goto out; 1405 1381 1406 1382 discard: 1407 - rxrpc_free_skb(skb, rxrpc_skb_rx_freed); 1383 + rxrpc_free_skb(skb, rxrpc_skb_freed); 1408 1384 out: 1409 1385 trace_rxrpc_rx_done(0, 0); 1410 1386 return 0;
+2 -2
net/rxrpc/local_event.c
··· 90 90 if (skb) { 91 91 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 92 92 93 - rxrpc_see_skb(skb, rxrpc_skb_rx_seen); 93 + rxrpc_see_skb(skb, rxrpc_skb_seen); 94 94 _debug("{%d},{%u}", local->debug_id, sp->hdr.type); 95 95 96 96 switch (sp->hdr.type) { ··· 108 108 break; 109 109 } 110 110 111 - rxrpc_free_skb(skb, rxrpc_skb_rx_freed); 111 + rxrpc_free_skb(skb, rxrpc_skb_freed); 112 112 } 113 113 114 114 _leave("");
+4 -1
net/rxrpc/local_object.c
··· 426 426 427 427 _enter("%d", local->debug_id); 428 428 429 + local->dead = true; 430 + 429 431 mutex_lock(&rxnet->local_mutex); 430 432 list_del_init(&local->link); 431 433 mutex_unlock(&rxnet->local_mutex); 432 434 433 - ASSERT(RB_EMPTY_ROOT(&local->client_conns)); 435 + rxrpc_clean_up_local_conns(local); 436 + rxrpc_service_connection_reaper(&rxnet->service_conn_reaper); 434 437 ASSERT(!local->service); 435 438 436 439 if (socket) {
+3 -3
net/rxrpc/output.c
··· 565 565 memset(&whdr, 0, sizeof(whdr)); 566 566 567 567 while ((skb = skb_dequeue(&local->reject_queue))) { 568 - rxrpc_see_skb(skb, rxrpc_skb_rx_seen); 568 + rxrpc_see_skb(skb, rxrpc_skb_seen); 569 569 sp = rxrpc_skb(skb); 570 570 571 571 switch (skb->mark) { ··· 581 581 ioc = 2; 582 582 break; 583 583 default: 584 - rxrpc_free_skb(skb, rxrpc_skb_rx_freed); 584 + rxrpc_free_skb(skb, rxrpc_skb_freed); 585 585 continue; 586 586 } 587 587 ··· 606 606 rxrpc_tx_point_reject); 607 607 } 608 608 609 - rxrpc_free_skb(skb, rxrpc_skb_rx_freed); 609 + rxrpc_free_skb(skb, rxrpc_skb_freed); 610 610 } 611 611 612 612 _leave("");
+5 -5
net/rxrpc/peer_event.c
··· 163 163 _leave("UDP socket errqueue empty"); 164 164 return; 165 165 } 166 - rxrpc_new_skb(skb, rxrpc_skb_rx_received); 166 + rxrpc_new_skb(skb, rxrpc_skb_received); 167 167 serr = SKB_EXT_ERR(skb); 168 168 if (!skb->len && serr->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) { 169 169 _leave("UDP empty message"); 170 - rxrpc_free_skb(skb, rxrpc_skb_rx_freed); 170 + rxrpc_free_skb(skb, rxrpc_skb_freed); 171 171 return; 172 172 } 173 173 ··· 177 177 peer = NULL; 178 178 if (!peer) { 179 179 rcu_read_unlock(); 180 - rxrpc_free_skb(skb, rxrpc_skb_rx_freed); 180 + rxrpc_free_skb(skb, rxrpc_skb_freed); 181 181 _leave(" [no peer]"); 182 182 return; 183 183 } ··· 189 189 serr->ee.ee_code == ICMP_FRAG_NEEDED)) { 190 190 rxrpc_adjust_mtu(peer, serr); 191 191 rcu_read_unlock(); 192 - rxrpc_free_skb(skb, rxrpc_skb_rx_freed); 192 + rxrpc_free_skb(skb, rxrpc_skb_freed); 193 193 rxrpc_put_peer(peer); 194 194 _leave(" [MTU update]"); 195 195 return; ··· 197 197 198 198 rxrpc_store_error(peer, serr); 199 199 rcu_read_unlock(); 200 - rxrpc_free_skb(skb, rxrpc_skb_rx_freed); 200 + rxrpc_free_skb(skb, rxrpc_skb_freed); 201 201 rxrpc_put_peer(peer); 202 202 203 203 _leave("");
+9
net/rxrpc/protocol.h
··· 89 89 #define RXRPC_JUMBO_DATALEN 1412 /* non-terminal jumbo packet data length */ 90 90 #define RXRPC_JUMBO_SUBPKTLEN (RXRPC_JUMBO_DATALEN + sizeof(struct rxrpc_jumbo_header)) 91 91 92 + /* 93 + * The maximum number of subpackets that can possibly fit in a UDP packet is: 94 + * 95 + * ((max_IP - IP_hdr - UDP_hdr) / RXRPC_JUMBO_SUBPKTLEN) + 1 96 + * = ((65535 - 28 - 28) / 1416) + 1 97 + * = 46 non-terminal packets and 1 terminal packet. 98 + */ 99 + #define RXRPC_MAX_NR_JUMBO 47 100 + 92 101 /*****************************************************************************/ 93 102 /* 94 103 * on-the-wire Rx ACK packet data payload
+27 -20
net/rxrpc/recvmsg.c
··· 177 177 struct sk_buff *skb; 178 178 rxrpc_serial_t serial; 179 179 rxrpc_seq_t hard_ack, top; 180 - u8 flags; 180 + bool last = false; 181 + u8 subpacket; 181 182 int ix; 182 183 183 184 _enter("%d", call->debug_id); ··· 190 189 hard_ack++; 191 190 ix = hard_ack & RXRPC_RXTX_BUFF_MASK; 192 191 skb = call->rxtx_buffer[ix]; 193 - rxrpc_see_skb(skb, rxrpc_skb_rx_rotated); 192 + rxrpc_see_skb(skb, rxrpc_skb_rotated); 194 193 sp = rxrpc_skb(skb); 195 - flags = sp->hdr.flags; 196 - serial = sp->hdr.serial; 197 - if (call->rxtx_annotations[ix] & RXRPC_RX_ANNO_JUMBO) 198 - serial += (call->rxtx_annotations[ix] & RXRPC_RX_ANNO_JUMBO) - 1; 194 + 195 + subpacket = call->rxtx_annotations[ix] & RXRPC_RX_ANNO_SUBPACKET; 196 + serial = sp->hdr.serial + subpacket; 197 + 198 + if (subpacket == sp->nr_subpackets - 1 && 199 + sp->rx_flags & RXRPC_SKB_INCL_LAST) 200 + last = true; 199 201 200 202 call->rxtx_buffer[ix] = NULL; 201 203 call->rxtx_annotations[ix] = 0; 202 204 /* Barrier against rxrpc_input_data(). */ 203 205 smp_store_release(&call->rx_hard_ack, hard_ack); 204 206 205 - rxrpc_free_skb(skb, rxrpc_skb_rx_freed); 207 + rxrpc_free_skb(skb, rxrpc_skb_freed); 206 208 207 - _debug("%u,%u,%02x", hard_ack, top, flags); 208 209 trace_rxrpc_receive(call, rxrpc_receive_rotate, serial, hard_ack); 209 - if (flags & RXRPC_LAST_PACKET) { 210 + if (last) { 210 211 rxrpc_end_rx_phase(call, serial); 211 212 } else { 212 213 /* Check to see if there's an ACK that needs sending. */ ··· 236 233 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 237 234 rxrpc_seq_t seq = sp->hdr.seq; 238 235 u16 cksum = sp->hdr.cksum; 236 + u8 subpacket = annotation & RXRPC_RX_ANNO_SUBPACKET; 239 237 240 238 _enter(""); 241 239 242 240 /* For all but the head jumbo subpacket, the security checksum is in a 243 241 * jumbo header immediately prior to the data. 244 242 */ 245 - if ((annotation & RXRPC_RX_ANNO_JUMBO) > 1) { 243 + if (subpacket > 0) { 246 244 __be16 tmp; 247 245 if (skb_copy_bits(skb, offset - 2, &tmp, 2) < 0) 248 246 BUG(); 249 247 cksum = ntohs(tmp); 250 - seq += (annotation & RXRPC_RX_ANNO_JUMBO) - 1; 248 + seq += subpacket; 251 249 } 252 250 253 251 return call->conn->security->verify_packet(call, skb, offset, len, ··· 269 265 u8 *_annotation, 270 266 unsigned int *_offset, unsigned int *_len) 271 267 { 268 + struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 272 269 unsigned int offset = sizeof(struct rxrpc_wire_header); 273 270 unsigned int len; 274 271 int ret; 275 272 u8 annotation = *_annotation; 273 + u8 subpacket = annotation & RXRPC_RX_ANNO_SUBPACKET; 276 274 277 275 /* Locate the subpacket */ 276 + offset += subpacket * RXRPC_JUMBO_SUBPKTLEN; 278 277 len = skb->len - offset; 279 - if ((annotation & RXRPC_RX_ANNO_JUMBO) > 0) { 280 - offset += (((annotation & RXRPC_RX_ANNO_JUMBO) - 1) * 281 - RXRPC_JUMBO_SUBPKTLEN); 282 - len = (annotation & RXRPC_RX_ANNO_JLAST) ? 283 - skb->len - offset : RXRPC_JUMBO_SUBPKTLEN; 284 - } 278 + if (subpacket < sp->nr_subpackets - 1) 279 + len = RXRPC_JUMBO_DATALEN; 285 280 286 281 if (!(annotation & RXRPC_RX_ANNO_VERIFIED)) { 287 282 ret = rxrpc_verify_packet(call, skb, annotation, offset, len); ··· 306 303 { 307 304 struct rxrpc_skb_priv *sp; 308 305 struct sk_buff *skb; 306 + rxrpc_serial_t serial; 309 307 rxrpc_seq_t hard_ack, top, seq; 310 308 size_t remain; 311 309 bool last; ··· 340 336 break; 341 337 } 342 338 smp_rmb(); 343 - rxrpc_see_skb(skb, rxrpc_skb_rx_seen); 339 + rxrpc_see_skb(skb, rxrpc_skb_seen); 344 340 sp = rxrpc_skb(skb); 345 341 346 - if (!(flags & MSG_PEEK)) 342 + if (!(flags & MSG_PEEK)) { 343 + serial = sp->hdr.serial; 344 + serial += call->rxtx_annotations[ix] & RXRPC_RX_ANNO_SUBPACKET; 347 345 trace_rxrpc_receive(call, rxrpc_receive_front, 348 - sp->hdr.serial, seq); 346 + serial, seq); 347 + } 349 348 350 349 if (msg) 351 350 sock_recv_timestamp(msg, sock->sk, skb);
+9 -23
net/rxrpc/rxkad.c
··· 187 187 struct rxrpc_skb_priv *sp; 188 188 struct rxrpc_crypt iv; 189 189 struct scatterlist sg[16]; 190 - struct sk_buff *trailer; 191 190 unsigned int len; 192 191 u16 check; 193 - int nsg; 194 192 int err; 195 193 196 194 sp = rxrpc_skb(skb); ··· 212 214 crypto_skcipher_encrypt(req); 213 215 214 216 /* we want to encrypt the skbuff in-place */ 215 - nsg = skb_cow_data(skb, 0, &trailer); 216 - err = -ENOMEM; 217 - if (nsg < 0 || nsg > 16) 217 + err = -EMSGSIZE; 218 + if (skb_shinfo(skb)->nr_frags > 16) 218 219 goto out; 219 220 220 221 len = data_size + call->conn->size_align - 1; 221 222 len &= ~(call->conn->size_align - 1); 222 223 223 - sg_init_table(sg, nsg); 224 + sg_init_table(sg, ARRAY_SIZE(sg)); 224 225 err = skb_to_sgvec(skb, sg, 0, len); 225 226 if (unlikely(err < 0)) 226 227 goto out; ··· 316 319 struct rxkad_level1_hdr sechdr; 317 320 struct rxrpc_crypt iv; 318 321 struct scatterlist sg[16]; 319 - struct sk_buff *trailer; 320 322 bool aborted; 321 323 u32 data_size, buf; 322 324 u16 check; 323 - int nsg, ret; 325 + int ret; 324 326 325 327 _enter(""); 326 328 ··· 332 336 /* Decrypt the skbuff in-place. TODO: We really want to decrypt 333 337 * directly into the target buffer. 334 338 */ 335 - nsg = skb_cow_data(skb, 0, &trailer); 336 - if (nsg < 0 || nsg > 16) 337 - goto nomem; 338 - 339 - sg_init_table(sg, nsg); 339 + sg_init_table(sg, ARRAY_SIZE(sg)); 340 340 ret = skb_to_sgvec(skb, sg, offset, 8); 341 341 if (unlikely(ret < 0)) 342 342 return ret; ··· 380 388 if (aborted) 381 389 rxrpc_send_abort_packet(call); 382 390 return -EPROTO; 383 - 384 - nomem: 385 - _leave(" = -ENOMEM"); 386 - return -ENOMEM; 387 391 } 388 392 389 393 /* ··· 394 406 struct rxkad_level2_hdr sechdr; 395 407 struct rxrpc_crypt iv; 396 408 struct scatterlist _sg[4], *sg; 397 - struct sk_buff *trailer; 398 409 bool aborted; 399 410 u32 data_size, buf; 400 411 u16 check; ··· 410 423 /* Decrypt the skbuff in-place. TODO: We really want to decrypt 411 424 * directly into the target buffer. 412 425 */ 413 - nsg = skb_cow_data(skb, 0, &trailer); 414 - if (nsg < 0) 415 - goto nomem; 416 - 417 426 sg = _sg; 418 - if (unlikely(nsg > 4)) { 427 + nsg = skb_shinfo(skb)->nr_frags; 428 + if (nsg <= 4) { 429 + nsg = 4; 430 + } else { 419 431 sg = kmalloc_array(nsg, sizeof(*sg), GFP_NOIO); 420 432 if (!sg) 421 433 goto nomem;
+7 -6
net/rxrpc/sendmsg.c
··· 176 176 skb->tstamp = ktime_get_real(); 177 177 178 178 ix = seq & RXRPC_RXTX_BUFF_MASK; 179 - rxrpc_get_skb(skb, rxrpc_skb_tx_got); 179 + rxrpc_get_skb(skb, rxrpc_skb_got); 180 180 call->rxtx_annotations[ix] = annotation; 181 181 smp_wmb(); 182 182 call->rxtx_buffer[ix] = skb; ··· 248 248 } 249 249 250 250 out: 251 - rxrpc_free_skb(skb, rxrpc_skb_tx_freed); 251 + rxrpc_free_skb(skb, rxrpc_skb_freed); 252 252 _leave(" = %d", ret); 253 253 return ret; 254 254 } ··· 289 289 290 290 skb = call->tx_pending; 291 291 call->tx_pending = NULL; 292 - rxrpc_see_skb(skb, rxrpc_skb_tx_seen); 292 + rxrpc_see_skb(skb, rxrpc_skb_seen); 293 293 294 294 copied = 0; 295 295 do { ··· 336 336 if (!skb) 337 337 goto maybe_error; 338 338 339 - rxrpc_new_skb(skb, rxrpc_skb_tx_new); 339 + sp = rxrpc_skb(skb); 340 + sp->rx_flags |= RXRPC_SKB_TX_BUFFER; 341 + rxrpc_new_skb(skb, rxrpc_skb_new); 340 342 341 343 _debug("ALLOC SEND %p", skb); 342 344 ··· 348 346 skb_reserve(skb, call->conn->security_size); 349 347 skb->len += call->conn->security_size; 350 348 351 - sp = rxrpc_skb(skb); 352 349 sp->remain = chunk; 353 350 if (sp->remain > skb_tailroom(skb)) 354 351 sp->remain = skb_tailroom(skb); ··· 440 439 return ret; 441 440 442 441 call_terminated: 443 - rxrpc_free_skb(skb, rxrpc_skb_tx_freed); 442 + rxrpc_free_skb(skb, rxrpc_skb_freed); 444 443 _leave(" = %d", call->error); 445 444 return call->error; 446 445
+28 -12
net/rxrpc/skbuff.c
··· 14 14 #include <net/af_rxrpc.h> 15 15 #include "ar-internal.h" 16 16 17 - #define select_skb_count(op) (op >= rxrpc_skb_tx_cleaned ? &rxrpc_n_tx_skbs : &rxrpc_n_rx_skbs) 17 + #define is_tx_skb(skb) (rxrpc_skb(skb)->rx_flags & RXRPC_SKB_TX_BUFFER) 18 + #define select_skb_count(skb) (is_tx_skb(skb) ? &rxrpc_n_tx_skbs : &rxrpc_n_rx_skbs) 18 19 19 20 /* 20 21 * Note the allocation or reception of a socket buffer. ··· 23 22 void rxrpc_new_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) 24 23 { 25 24 const void *here = __builtin_return_address(0); 26 - int n = atomic_inc_return(select_skb_count(op)); 27 - trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here); 25 + int n = atomic_inc_return(select_skb_count(skb)); 26 + trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, 27 + rxrpc_skb(skb)->rx_flags, here); 28 28 } 29 29 30 30 /* ··· 35 33 { 36 34 const void *here = __builtin_return_address(0); 37 35 if (skb) { 38 - int n = atomic_read(select_skb_count(op)); 39 - trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here); 36 + int n = atomic_read(select_skb_count(skb)); 37 + trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, 38 + rxrpc_skb(skb)->rx_flags, here); 40 39 } 41 40 } 42 41 ··· 47 44 void rxrpc_get_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) 48 45 { 49 46 const void *here = __builtin_return_address(0); 50 - int n = atomic_inc_return(select_skb_count(op)); 51 - trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here); 47 + int n = atomic_inc_return(select_skb_count(skb)); 48 + trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, 49 + rxrpc_skb(skb)->rx_flags, here); 52 50 skb_get(skb); 51 + } 52 + 53 + /* 54 + * Note the dropping of a ref on a socket buffer by the core. 55 + */ 56 + void rxrpc_eaten_skb(struct sk_buff *skb, enum rxrpc_skb_trace op) 57 + { 58 + const void *here = __builtin_return_address(0); 59 + int n = atomic_inc_return(&rxrpc_n_rx_skbs); 60 + trace_rxrpc_skb(skb, op, 0, n, 0, here); 53 61 } 54 62 55 63 /* ··· 72 58 if (skb) { 73 59 int n; 74 60 CHECK_SLAB_OKAY(&skb->users); 75 - n = atomic_dec_return(select_skb_count(op)); 76 - trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here); 61 + n = atomic_dec_return(select_skb_count(skb)); 62 + trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, 63 + rxrpc_skb(skb)->rx_flags, here); 77 64 kfree_skb(skb); 78 65 } 79 66 } ··· 87 72 const void *here = __builtin_return_address(0); 88 73 struct sk_buff *skb; 89 74 while ((skb = skb_dequeue((list))) != NULL) { 90 - int n = atomic_dec_return(select_skb_count(rxrpc_skb_rx_purged)); 91 - trace_rxrpc_skb(skb, rxrpc_skb_rx_purged, 92 - refcount_read(&skb->users), n, here); 75 + int n = atomic_dec_return(select_skb_count(skb)); 76 + trace_rxrpc_skb(skb, rxrpc_skb_purged, 77 + refcount_read(&skb->users), n, 78 + rxrpc_skb(skb)->rx_flags, here); 93 79 kfree_skb(skb); 94 80 } 95 81 }
+1 -1
net/sched/act_bpf.c
··· 422 422 { 423 423 struct tc_action_net *tn = net_generic(net, bpf_net_id); 424 424 425 - return tc_action_net_init(tn, &act_bpf_ops); 425 + return tc_action_net_init(net, tn, &act_bpf_ops); 426 426 } 427 427 428 428 static void __net_exit bpf_exit_net(struct list_head *net_list)
+1 -1
net/sched/act_connmark.c
··· 231 231 { 232 232 struct tc_action_net *tn = net_generic(net, connmark_net_id); 233 233 234 - return tc_action_net_init(tn, &act_connmark_ops); 234 + return tc_action_net_init(net, tn, &act_connmark_ops); 235 235 } 236 236 237 237 static void __net_exit connmark_exit_net(struct list_head *net_list)
+1 -1
net/sched/act_csum.c
··· 714 714 { 715 715 struct tc_action_net *tn = net_generic(net, csum_net_id); 716 716 717 - return tc_action_net_init(tn, &act_csum_ops); 717 + return tc_action_net_init(net, tn, &act_csum_ops); 718 718 } 719 719 720 720 static void __net_exit csum_exit_net(struct list_head *net_list)
+1 -1
net/sched/act_ct.c
··· 939 939 tn->labels = true; 940 940 } 941 941 942 - return tc_action_net_init(&tn->tn, &act_ct_ops); 942 + return tc_action_net_init(net, &tn->tn, &act_ct_ops); 943 943 } 944 944 945 945 static void __net_exit ct_exit_net(struct list_head *net_list)
+1 -1
net/sched/act_ctinfo.c
··· 376 376 { 377 377 struct tc_action_net *tn = net_generic(net, ctinfo_net_id); 378 378 379 - return tc_action_net_init(tn, &act_ctinfo_ops); 379 + return tc_action_net_init(net, tn, &act_ctinfo_ops); 380 380 } 381 381 382 382 static void __net_exit ctinfo_exit_net(struct list_head *net_list)
+1 -1
net/sched/act_gact.c
··· 278 278 { 279 279 struct tc_action_net *tn = net_generic(net, gact_net_id); 280 280 281 - return tc_action_net_init(tn, &act_gact_ops); 281 + return tc_action_net_init(net, tn, &act_gact_ops); 282 282 } 283 283 284 284 static void __net_exit gact_exit_net(struct list_head *net_list)
+1 -1
net/sched/act_ife.c
··· 890 890 { 891 891 struct tc_action_net *tn = net_generic(net, ife_net_id); 892 892 893 - return tc_action_net_init(tn, &act_ife_ops); 893 + return tc_action_net_init(net, tn, &act_ife_ops); 894 894 } 895 895 896 896 static void __net_exit ife_exit_net(struct list_head *net_list)
+6 -5
net/sched/act_ipt.c
··· 61 61 return 0; 62 62 } 63 63 64 - static void ipt_destroy_target(struct xt_entry_target *t) 64 + static void ipt_destroy_target(struct xt_entry_target *t, struct net *net) 65 65 { 66 66 struct xt_tgdtor_param par = { 67 67 .target = t->u.kernel.target, 68 68 .targinfo = t->data, 69 69 .family = NFPROTO_IPV4, 70 + .net = net, 70 71 }; 71 72 if (par.target->destroy != NULL) 72 73 par.target->destroy(&par); ··· 79 78 struct tcf_ipt *ipt = to_ipt(a); 80 79 81 80 if (ipt->tcfi_t) { 82 - ipt_destroy_target(ipt->tcfi_t); 81 + ipt_destroy_target(ipt->tcfi_t, a->idrinfo->net); 83 82 kfree(ipt->tcfi_t); 84 83 } 85 84 kfree(ipt->tcfi_tname); ··· 181 180 182 181 spin_lock_bh(&ipt->tcf_lock); 183 182 if (ret != ACT_P_CREATED) { 184 - ipt_destroy_target(ipt->tcfi_t); 183 + ipt_destroy_target(ipt->tcfi_t, net); 185 184 kfree(ipt->tcfi_tname); 186 185 kfree(ipt->tcfi_t); 187 186 } ··· 351 350 { 352 351 struct tc_action_net *tn = net_generic(net, ipt_net_id); 353 352 354 - return tc_action_net_init(tn, &act_ipt_ops); 353 + return tc_action_net_init(net, tn, &act_ipt_ops); 355 354 } 356 355 357 356 static void __net_exit ipt_exit_net(struct list_head *net_list) ··· 400 399 { 401 400 struct tc_action_net *tn = net_generic(net, xt_net_id); 402 401 403 - return tc_action_net_init(tn, &act_xt_ops); 402 + return tc_action_net_init(net, tn, &act_xt_ops); 404 403 } 405 404 406 405 static void __net_exit xt_exit_net(struct list_head *net_list)
+1 -1
net/sched/act_mirred.c
··· 453 453 { 454 454 struct tc_action_net *tn = net_generic(net, mirred_net_id); 455 455 456 - return tc_action_net_init(tn, &act_mirred_ops); 456 + return tc_action_net_init(net, tn, &act_mirred_ops); 457 457 } 458 458 459 459 static void __net_exit mirred_exit_net(struct list_head *net_list)
+1 -1
net/sched/act_mpls.c
··· 375 375 { 376 376 struct tc_action_net *tn = net_generic(net, mpls_net_id); 377 377 378 - return tc_action_net_init(tn, &act_mpls_ops); 378 + return tc_action_net_init(net, tn, &act_mpls_ops); 379 379 } 380 380 381 381 static void __net_exit mpls_exit_net(struct list_head *net_list)
+1 -1
net/sched/act_nat.c
··· 327 327 { 328 328 struct tc_action_net *tn = net_generic(net, nat_net_id); 329 329 330 - return tc_action_net_init(tn, &act_nat_ops); 330 + return tc_action_net_init(net, tn, &act_nat_ops); 331 331 } 332 332 333 333 static void __net_exit nat_exit_net(struct list_head *net_list)
+1 -1
net/sched/act_pedit.c
··· 498 498 { 499 499 struct tc_action_net *tn = net_generic(net, pedit_net_id); 500 500 501 - return tc_action_net_init(tn, &act_pedit_ops); 501 + return tc_action_net_init(net, tn, &act_pedit_ops); 502 502 } 503 503 504 504 static void __net_exit pedit_exit_net(struct list_head *net_list)
+1 -1
net/sched/act_police.c
··· 371 371 { 372 372 struct tc_action_net *tn = net_generic(net, police_net_id); 373 373 374 - return tc_action_net_init(tn, &act_police_ops); 374 + return tc_action_net_init(net, tn, &act_police_ops); 375 375 } 376 376 377 377 static void __net_exit police_exit_net(struct list_head *net_list)
+6 -2
net/sched/act_sample.c
··· 102 102 goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch); 103 103 s->rate = rate; 104 104 s->psample_group_num = psample_group_num; 105 - RCU_INIT_POINTER(s->psample_group, psample_group); 105 + rcu_swap_protected(s->psample_group, psample_group, 106 + lockdep_is_held(&s->tcf_lock)); 106 107 107 108 if (tb[TCA_SAMPLE_TRUNC_SIZE]) { 108 109 s->truncate = true; 109 110 s->trunc_size = nla_get_u32(tb[TCA_SAMPLE_TRUNC_SIZE]); 110 111 } 111 112 spin_unlock_bh(&s->tcf_lock); 113 + 114 + if (psample_group) 115 + psample_group_put(psample_group); 112 116 if (goto_ch) 113 117 tcf_chain_put_by_act(goto_ch); 114 118 ··· 269 265 { 270 266 struct tc_action_net *tn = net_generic(net, sample_net_id); 271 267 272 - return tc_action_net_init(tn, &act_sample_ops); 268 + return tc_action_net_init(net, tn, &act_sample_ops); 273 269 } 274 270 275 271 static void __net_exit sample_exit_net(struct list_head *net_list)
+1 -1
net/sched/act_simple.c
··· 232 232 { 233 233 struct tc_action_net *tn = net_generic(net, simp_net_id); 234 234 235 - return tc_action_net_init(tn, &act_simp_ops); 235 + return tc_action_net_init(net, tn, &act_simp_ops); 236 236 } 237 237 238 238 static void __net_exit simp_exit_net(struct list_head *net_list)
+1 -1
net/sched/act_skbedit.c
··· 336 336 { 337 337 struct tc_action_net *tn = net_generic(net, skbedit_net_id); 338 338 339 - return tc_action_net_init(tn, &act_skbedit_ops); 339 + return tc_action_net_init(net, tn, &act_skbedit_ops); 340 340 } 341 341 342 342 static void __net_exit skbedit_exit_net(struct list_head *net_list)
+1 -1
net/sched/act_skbmod.c
··· 287 287 { 288 288 struct tc_action_net *tn = net_generic(net, skbmod_net_id); 289 289 290 - return tc_action_net_init(tn, &act_skbmod_ops); 290 + return tc_action_net_init(net, tn, &act_skbmod_ops); 291 291 } 292 292 293 293 static void __net_exit skbmod_exit_net(struct list_head *net_list)
+1 -1
net/sched/act_tunnel_key.c
··· 600 600 { 601 601 struct tc_action_net *tn = net_generic(net, tunnel_key_net_id); 602 602 603 - return tc_action_net_init(tn, &act_tunnel_key_ops); 603 + return tc_action_net_init(net, tn, &act_tunnel_key_ops); 604 604 } 605 605 606 606 static void __net_exit tunnel_key_exit_net(struct list_head *net_list)
+1 -1
net/sched/act_vlan.c
··· 334 334 { 335 335 struct tc_action_net *tn = net_generic(net, vlan_net_id); 336 336 337 - return tc_action_net_init(tn, &act_vlan_ops); 337 + return tc_action_net_init(net, tn, &act_vlan_ops); 338 338 } 339 339 340 340 static void __net_exit vlan_exit_net(struct list_head *net_list)
+11 -8
net/sched/sch_cbs.c
··· 181 181 s64 credits; 182 182 int len; 183 183 184 - if (atomic64_read(&q->port_rate) == -1) { 185 - WARN_ONCE(1, "cbs: dequeue() called with unknown port rate."); 186 - return NULL; 187 - } 188 - 189 184 if (q->credits < 0) { 190 185 credits = timediff_to_credits(now - q->last, q->idleslope); 191 186 ··· 298 303 static void cbs_set_port_rate(struct net_device *dev, struct cbs_sched_data *q) 299 304 { 300 305 struct ethtool_link_ksettings ecmd; 306 + int speed = SPEED_10; 301 307 int port_rate = -1; 308 + int err; 302 309 303 - if (!__ethtool_get_link_ksettings(dev, &ecmd) && 304 - ecmd.base.speed != SPEED_UNKNOWN) 305 - port_rate = ecmd.base.speed * 1000 * BYTES_PER_KBIT; 310 + err = __ethtool_get_link_ksettings(dev, &ecmd); 311 + if (err < 0) 312 + goto skip; 313 + 314 + if (ecmd.base.speed != SPEED_UNKNOWN) 315 + speed = ecmd.base.speed; 316 + 317 + skip: 318 + port_rate = speed * 1000 * BYTES_PER_KBIT; 306 319 307 320 atomic64_set(&q->port_rate, port_rate); 308 321 netdev_dbg(dev, "cbs: set %s's port_rate to: %lld, linkspeed: %d\n",
+13 -6
net/sched/sch_generic.c
··· 624 624 625 625 err = skb_array_produce(q, skb); 626 626 627 - if (unlikely(err)) 628 - return qdisc_drop_cpu(skb, qdisc, to_free); 627 + if (unlikely(err)) { 628 + if (qdisc_is_percpu_stats(qdisc)) 629 + return qdisc_drop_cpu(skb, qdisc, to_free); 630 + else 631 + return qdisc_drop(skb, qdisc, to_free); 632 + } 629 633 630 634 qdisc_update_stats_at_enqueue(qdisc, pkt_len); 631 635 return NET_XMIT_SUCCESS; ··· 692 688 kfree_skb(skb); 693 689 } 694 690 695 - for_each_possible_cpu(i) { 696 - struct gnet_stats_queue *q = per_cpu_ptr(qdisc->cpu_qstats, i); 691 + if (qdisc_is_percpu_stats(qdisc)) { 692 + for_each_possible_cpu(i) { 693 + struct gnet_stats_queue *q; 697 694 698 - q->backlog = 0; 699 - q->qlen = 0; 695 + q = per_cpu_ptr(qdisc->cpu_qstats, i); 696 + q->backlog = 0; 697 + q->qlen = 0; 698 + } 700 699 } 701 700 } 702 701
+17 -14
net/sched/sch_taprio.c
··· 477 477 u32 gate_mask; 478 478 int i; 479 479 480 - if (atomic64_read(&q->picos_per_byte) == -1) { 481 - WARN_ONCE(1, "taprio: dequeue() called with unknown picos per byte."); 482 - return NULL; 483 - } 484 - 485 480 rcu_read_lock(); 486 481 entry = rcu_dereference(q->current_entry); 487 482 /* if there's no entry, it means that the schedule didn't ··· 953 958 struct taprio_sched *q) 954 959 { 955 960 struct ethtool_link_ksettings ecmd; 956 - int picos_per_byte = -1; 961 + int speed = SPEED_10; 962 + int picos_per_byte; 963 + int err; 957 964 958 - if (!__ethtool_get_link_ksettings(dev, &ecmd) && 959 - ecmd.base.speed != SPEED_UNKNOWN) 960 - picos_per_byte = div64_s64(NSEC_PER_SEC * 1000LL * 8, 961 - ecmd.base.speed * 1000 * 1000); 965 + err = __ethtool_get_link_ksettings(dev, &ecmd); 966 + if (err < 0) 967 + goto skip; 968 + 969 + if (ecmd.base.speed != SPEED_UNKNOWN) 970 + speed = ecmd.base.speed; 971 + 972 + skip: 973 + picos_per_byte = div64_s64(NSEC_PER_SEC * 1000LL * 8, 974 + speed * 1000 * 1000); 962 975 963 976 atomic64_set(&q->picos_per_byte, picos_per_byte); 964 977 netdev_dbg(dev, "taprio: set %s's picos_per_byte to: %lld, linkspeed: %d\n", ··· 1252 1249 */ 1253 1250 q->clockid = -1; 1254 1251 1252 + spin_lock(&taprio_list_lock); 1253 + list_add(&q->taprio_list, &taprio_list); 1254 + spin_unlock(&taprio_list_lock); 1255 + 1255 1256 if (sch->parent != TC_H_ROOT) 1256 1257 return -EOPNOTSUPP; 1257 1258 ··· 1272 1265 1273 1266 if (!opt) 1274 1267 return -EINVAL; 1275 - 1276 - spin_lock(&taprio_list_lock); 1277 - list_add(&q->taprio_list, &taprio_list); 1278 - spin_unlock(&taprio_list_lock); 1279 1268 1280 1269 for (i = 0; i < dev->num_tx_queues; i++) { 1281 1270 struct netdev_queue *dev_queue;
+11 -11
tools/testing/selftests/tc-testing/plugin-lib/nsPlugin.py
··· 64 64 cmdlist.insert(0, self.args.NAMES['NS']) 65 65 cmdlist.insert(0, 'exec') 66 66 cmdlist.insert(0, 'netns') 67 - cmdlist.insert(0, 'ip') 67 + cmdlist.insert(0, self.args.NAMES['IP']) 68 68 else: 69 69 pass 70 70 ··· 78 78 return command 79 79 80 80 def _ports_create(self): 81 - cmd = 'ip link add $DEV0 type veth peer name $DEV1' 81 + cmd = '$IP link add $DEV0 type veth peer name $DEV1' 82 82 self._exec_cmd('pre', cmd) 83 - cmd = 'ip link set $DEV0 up' 83 + cmd = '$IP link set $DEV0 up' 84 84 self._exec_cmd('pre', cmd) 85 85 if not self.args.namespace: 86 - cmd = 'ip link set $DEV1 up' 86 + cmd = '$IP link set $DEV1 up' 87 87 self._exec_cmd('pre', cmd) 88 88 89 89 def _ports_destroy(self): 90 - cmd = 'ip link del $DEV0' 90 + cmd = '$IP link del $DEV0' 91 91 self._exec_cmd('post', cmd) 92 92 93 93 def _ns_create(self): ··· 97 97 ''' 98 98 self._ports_create() 99 99 if self.args.namespace: 100 - cmd = 'ip netns add {}'.format(self.args.NAMES['NS']) 100 + cmd = '$IP netns add {}'.format(self.args.NAMES['NS']) 101 101 self._exec_cmd('pre', cmd) 102 - cmd = 'ip link set $DEV1 netns {}'.format(self.args.NAMES['NS']) 102 + cmd = '$IP link set $DEV1 netns {}'.format(self.args.NAMES['NS']) 103 103 self._exec_cmd('pre', cmd) 104 - cmd = 'ip -n {} link set $DEV1 up'.format(self.args.NAMES['NS']) 104 + cmd = '$IP -n {} link set $DEV1 up'.format(self.args.NAMES['NS']) 105 105 self._exec_cmd('pre', cmd) 106 106 if self.args.device: 107 - cmd = 'ip link set $DEV2 netns {}'.format(self.args.NAMES['NS']) 107 + cmd = '$IP link set $DEV2 netns {}'.format(self.args.NAMES['NS']) 108 108 self._exec_cmd('pre', cmd) 109 - cmd = 'ip -n {} link set $DEV2 up'.format(self.args.NAMES['NS']) 109 + cmd = '$IP -n {} link set $DEV2 up'.format(self.args.NAMES['NS']) 110 110 self._exec_cmd('pre', cmd) 111 111 112 112 def _ns_destroy(self): ··· 115 115 devices as well) 116 116 ''' 117 117 if self.args.namespace: 118 - cmd = 'ip netns delete {}'.format(self.args.NAMES['NS']) 118 + cmd = '$IP netns delete {}'.format(self.args.NAMES['NS']) 119 119 self._exec_cmd('post', cmd) 120 120 121 121 def _exec_cmd(self, stage, command):