Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Pull networking fixes from David Miller:

1) Fix RCU warnings in ipv6 multicast router code, from Madhuparna
Bhowmik.

2) Nexthop attributes aren't being checked properly because of
mis-initialized iterator, from David Ahern.

3) Revert iop_idents_reserve() change as it caused performance
regressions and was just working around what is really a UBSAN bug
in the compiler. From Yuqi Jin.

4) Read MAC address properly from ROM in bmac driver (double iteration
proceeds past end of address array), from Jeremy Kerr.

5) Add Microsoft Surface device IDs to r8152, from Marc Payne.

6) Prevent reference to freed SKB in __netif_receive_skb_core(), from
Boris Sukholitko.

7) Fix ACK discard behavior in rxrpc, from David Howells.

8) Preserve flow hash across packet scrubbing in wireguard, from Jason
A. Donenfeld.

9) Cap option length properly for SO_BINDTODEVICE in AX25, from Eric
Dumazet.

10) Fix encryption error checking in kTLS code, from Vadim Fedorenko.

11) Missing BPF prog ref release in flow dissector, from Jakub Sitnicki.

12) dst_cache must be used with BH disabled in tipc, from Eric Dumazet.

13) Fix use after free in mlxsw driver, from Jiri Pirko.

14) Order kTLS key destruction properly in mlx5 driver, from Tariq
Toukan.

15) Check devm_platform_ioremap_resource() return value properly in
several drivers, from Tiezhu Yang.

* git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (71 commits)
net: smsc911x: Fix runtime PM imbalance on error
net/mlx4_core: fix a memory leak bug.
net: ethernet: ti: cpsw: fix ASSERT_RTNL() warning during suspend
net: phy: mscc: fix initialization of the MACsec protocol mode
net: stmmac: don't attach interface until resume finishes
net: Fix return value about devm_platform_ioremap_resource()
net/mlx5: Fix error flow in case of function_setup failure
net/mlx5e: CT: Correctly get flow rule
net/mlx5e: Update netdev txq on completions during closure
net/mlx5: Annotate mutex destroy for root ns
net/mlx5: Don't maintain a case of del_sw_func being null
net/mlx5: Fix cleaning unmanaged flow tables
net/mlx5: Fix memory leak in mlx5_events_init
net/mlx5e: Fix inner tirs handling
net/mlx5e: kTLS, Destroy key object after destroying the TIS
net/mlx5e: Fix allowed tc redirect merged eswitch offload cases
net/mlx5: Avoid processing commands before cmdif is ready
net/mlx5: Fix a race when moving command interface to events mode
net/mlx5: Add command entry handling completion
rxrpc: Fix a memory leak in rxkad_verify_response()
...

+940 -454
+3
Documentation/devicetree/bindings/net/dsa/b53.txt
··· 110 #size-cells = <0>; 111 112 ports { 113 port0@0 { 114 reg = <0>; 115 label = "lan1";
··· 110 #size-cells = <0>; 111 112 ports { 113 + #address-cells = <1>; 114 + #size-cells = <0>; 115 + 116 port0@0 { 117 reg = <0>; 118 label = "lan1";
+4 -1
drivers/net/can/ifi_canfd/ifi_canfd.c
··· 947 u32 id, rev; 948 949 addr = devm_platform_ioremap_resource(pdev, 0); 950 irq = platform_get_irq(pdev, 0); 951 - if (IS_ERR(addr) || irq < 0) 952 return -EINVAL; 953 954 id = readl(addr + IFI_CANFD_IP_ID);
··· 947 u32 id, rev; 948 949 addr = devm_platform_ioremap_resource(pdev, 0); 950 + if (IS_ERR(addr)) 951 + return PTR_ERR(addr); 952 + 953 irq = platform_get_irq(pdev, 0); 954 + if (irq < 0) 955 return -EINVAL; 956 957 id = readl(addr + IFI_CANFD_IP_ID);
+1 -1
drivers/net/can/sun4i_can.c
··· 792 793 addr = devm_platform_ioremap_resource(pdev, 0); 794 if (IS_ERR(addr)) { 795 - err = -EBUSY; 796 goto exit; 797 } 798
··· 792 793 addr = devm_platform_ioremap_resource(pdev, 0); 794 if (IS_ERR(addr)) { 795 + err = PTR_ERR(addr); 796 goto exit; 797 } 798
+1 -1
drivers/net/dsa/b53/b53_srab.c
··· 609 610 priv->regs = devm_platform_ioremap_resource(pdev, 0); 611 if (IS_ERR(priv->regs)) 612 - return -ENOMEM; 613 614 dev = b53_switch_alloc(&pdev->dev, &b53_srab_ops, priv); 615 if (!dev)
··· 609 610 priv->regs = devm_platform_ioremap_resource(pdev, 0); 611 if (IS_ERR(priv->regs)) 612 + return PTR_ERR(priv->regs); 613 614 dev = b53_switch_alloc(&pdev->dev, &b53_srab_ops, priv); 615 if (!dev)
+2 -7
drivers/net/dsa/mt7530.c
··· 628 mt7530_write(priv, MT7530_PVC_P(port), 629 PORT_SPEC_TAG); 630 631 - /* Disable auto learning on the cpu port */ 632 - mt7530_set(priv, MT7530_PSC_P(port), SA_DIS); 633 - 634 - /* Unknown unicast frame fordwarding to the cpu port */ 635 - mt7530_set(priv, MT7530_MFC, UNU_FFP(BIT(port))); 636 637 /* Set CPU port number */ 638 if (priv->id == ID_MT7621) ··· 1290 1291 /* Enable and reset MIB counters */ 1292 mt7530_mib_reset(ds); 1293 - 1294 - mt7530_clear(priv, MT7530_MFC, UNU_FFP_MASK); 1295 1296 for (i = 0; i < MT7530_NUM_PORTS; i++) { 1297 /* Disable forwarding by default on all ports */
··· 628 mt7530_write(priv, MT7530_PVC_P(port), 629 PORT_SPEC_TAG); 630 631 + /* Unknown multicast frame forwarding to the cpu port */ 632 + mt7530_rmw(priv, MT7530_MFC, UNM_FFP_MASK, UNM_FFP(BIT(port))); 633 634 /* Set CPU port number */ 635 if (priv->id == ID_MT7621) ··· 1293 1294 /* Enable and reset MIB counters */ 1295 mt7530_mib_reset(ds); 1296 1297 for (i = 0; i < MT7530_NUM_PORTS; i++) { 1298 /* Disable forwarding by default on all ports */
+1
drivers/net/dsa/mt7530.h
··· 31 #define MT7530_MFC 0x10 32 #define BC_FFP(x) (((x) & 0xff) << 24) 33 #define UNM_FFP(x) (((x) & 0xff) << 16) 34 #define UNU_FFP(x) (((x) & 0xff) << 8) 35 #define UNU_FFP_MASK UNU_FFP(~0) 36 #define CPU_EN BIT(7)
··· 31 #define MT7530_MFC 0x10 32 #define BC_FFP(x) (((x) & 0xff) << 24) 33 #define UNM_FFP(x) (((x) & 0xff) << 16) 34 + #define UNM_FFP_MASK UNM_FFP(~0) 35 #define UNU_FFP(x) (((x) & 0xff) << 8) 36 #define UNU_FFP_MASK UNU_FFP(~0) 37 #define CPU_EN BIT(7)
+11 -12
drivers/net/dsa/ocelot/felix.c
··· 388 struct ocelot *ocelot = &felix->ocelot; 389 phy_interface_t *port_phy_modes; 390 resource_size_t switch_base; 391 int port, i, err; 392 393 ocelot->num_phys_ports = num_phys_ports; ··· 423 424 for (i = 0; i < TARGET_MAX; i++) { 425 struct regmap *target; 426 - struct resource *res; 427 428 if (!felix->info->target_io_res[i].name) 429 continue; 430 431 - res = &felix->info->target_io_res[i]; 432 - res->flags = IORESOURCE_MEM; 433 - res->start += switch_base; 434 - res->end += switch_base; 435 436 - target = ocelot_regmap_init(ocelot, res); 437 if (IS_ERR(target)) { 438 dev_err(ocelot->dev, 439 "Failed to map device memory space\n"); ··· 453 for (port = 0; port < num_phys_ports; port++) { 454 struct ocelot_port *ocelot_port; 455 void __iomem *port_regs; 456 - struct resource *res; 457 458 ocelot_port = devm_kzalloc(ocelot->dev, 459 sizeof(struct ocelot_port), ··· 464 return -ENOMEM; 465 } 466 467 - res = &felix->info->port_io_res[port]; 468 - res->flags = IORESOURCE_MEM; 469 - res->start += switch_base; 470 - res->end += switch_base; 471 472 - port_regs = devm_ioremap_resource(ocelot->dev, res); 473 if (IS_ERR(port_regs)) { 474 dev_err(ocelot->dev, 475 "failed to map registers for port %d\n", port);
··· 388 struct ocelot *ocelot = &felix->ocelot; 389 phy_interface_t *port_phy_modes; 390 resource_size_t switch_base; 391 + struct resource res; 392 int port, i, err; 393 394 ocelot->num_phys_ports = num_phys_ports; ··· 422 423 for (i = 0; i < TARGET_MAX; i++) { 424 struct regmap *target; 425 426 if (!felix->info->target_io_res[i].name) 427 continue; 428 429 + memcpy(&res, &felix->info->target_io_res[i], sizeof(res)); 430 + res.flags = IORESOURCE_MEM; 431 + res.start += switch_base; 432 + res.end += switch_base; 433 434 + target = ocelot_regmap_init(ocelot, &res); 435 if (IS_ERR(target)) { 436 dev_err(ocelot->dev, 437 "Failed to map device memory space\n"); ··· 453 for (port = 0; port < num_phys_ports; port++) { 454 struct ocelot_port *ocelot_port; 455 void __iomem *port_regs; 456 457 ocelot_port = devm_kzalloc(ocelot->dev, 458 sizeof(struct ocelot_port), ··· 465 return -ENOMEM; 466 } 467 468 + memcpy(&res, &felix->info->port_io_res[port], sizeof(res)); 469 + res.flags = IORESOURCE_MEM; 470 + res.start += switch_base; 471 + res.end += switch_base; 472 473 + port_regs = devm_ioremap_resource(ocelot->dev, &res); 474 if (IS_ERR(port_regs)) { 475 dev_err(ocelot->dev, 476 "failed to map registers for port %d\n", port);
+3 -3
drivers/net/dsa/ocelot/felix.h
··· 8 9 /* Platform-specific information */ 10 struct felix_info { 11 - struct resource *target_io_res; 12 - struct resource *port_io_res; 13 - struct resource *imdio_res; 14 const struct reg_field *regfields; 15 const u32 *const *map; 16 const struct ocelot_ops *ops;
··· 8 9 /* Platform-specific information */ 10 struct felix_info { 11 + const struct resource *target_io_res; 12 + const struct resource *port_io_res; 13 + const struct resource *imdio_res; 14 const struct reg_field *regfields; 15 const u32 *const *map; 16 const struct ocelot_ops *ops;
+10 -12
drivers/net/dsa/ocelot/felix_vsc9959.c
··· 333 [GCB] = vsc9959_gcb_regmap, 334 }; 335 336 - /* Addresses are relative to the PCI device's base address and 337 - * will be fixed up at ioremap time. 338 - */ 339 - static struct resource vsc9959_target_io_res[] = { 340 [ANA] = { 341 .start = 0x0280000, 342 .end = 0x028ffff, ··· 377 }, 378 }; 379 380 - static struct resource vsc9959_port_io_res[] = { 381 { 382 .start = 0x0100000, 383 .end = 0x010ffff, ··· 413 /* Port MAC 0 Internal MDIO bus through which the SerDes acting as an 414 * SGMII/QSGMII MAC PCS can be found. 415 */ 416 - static struct resource vsc9959_imdio_res = { 417 .start = 0x8030, 418 .end = 0x8040, 419 .name = "imdio", ··· 1109 struct device *dev = ocelot->dev; 1110 resource_size_t imdio_base; 1111 void __iomem *imdio_regs; 1112 - struct resource *res; 1113 struct enetc_hw *hw; 1114 struct mii_bus *bus; 1115 int port; ··· 1126 imdio_base = pci_resource_start(felix->pdev, 1127 felix->info->imdio_pci_bar); 1128 1129 - res = felix->info->imdio_res; 1130 - res->flags = IORESOURCE_MEM; 1131 - res->start += imdio_base; 1132 - res->end += imdio_base; 1133 1134 - imdio_regs = devm_ioremap_resource(dev, res); 1135 if (IS_ERR(imdio_regs)) { 1136 dev_err(dev, "failed to map internal MDIO registers\n"); 1137 return PTR_ERR(imdio_regs);
··· 333 [GCB] = vsc9959_gcb_regmap, 334 }; 335 336 + /* Addresses are relative to the PCI device's base address */ 337 + static const struct resource vsc9959_target_io_res[] = { 338 [ANA] = { 339 .start = 0x0280000, 340 .end = 0x028ffff, ··· 379 }, 380 }; 381 382 + static const struct resource vsc9959_port_io_res[] = { 383 { 384 .start = 0x0100000, 385 .end = 0x010ffff, ··· 415 /* Port MAC 0 Internal MDIO bus through which the SerDes acting as an 416 * SGMII/QSGMII MAC PCS can be found. 417 */ 418 + static const struct resource vsc9959_imdio_res = { 419 .start = 0x8030, 420 .end = 0x8040, 421 .name = "imdio", ··· 1111 struct device *dev = ocelot->dev; 1112 resource_size_t imdio_base; 1113 void __iomem *imdio_regs; 1114 + struct resource res; 1115 struct enetc_hw *hw; 1116 struct mii_bus *bus; 1117 int port; ··· 1128 imdio_base = pci_resource_start(felix->pdev, 1129 felix->info->imdio_pci_bar); 1130 1131 + memcpy(&res, felix->info->imdio_res, sizeof(res)); 1132 + res.flags = IORESOURCE_MEM; 1133 + res.start += imdio_base; 1134 + res.end += imdio_base; 1135 1136 + imdio_regs = devm_ioremap_resource(dev, &res); 1137 if (IS_ERR(imdio_regs)) { 1138 dev_err(dev, "failed to map internal MDIO registers\n"); 1139 return PTR_ERR(imdio_regs);
+1 -1
drivers/net/ethernet/apple/bmac.c
··· 1182 int i; 1183 unsigned short data; 1184 1185 - for (i = 0; i < 6; i++) 1186 { 1187 reset_and_select_srom(dev); 1188 data = read_srom(dev, i + EnetAddressOffset/2, SROMAddressBits);
··· 1182 int i; 1183 unsigned short data; 1184 1185 + for (i = 0; i < 3; i++) 1186 { 1187 reset_and_select_srom(dev); 1188 data = read_srom(dev, i + EnetAddressOffset/2, SROMAddressBits);
+7 -6
drivers/net/ethernet/freescale/ucc_geth.c
··· 42 #include <soc/fsl/qe/ucc.h> 43 #include <soc/fsl/qe/ucc_fast.h> 44 #include <asm/machdep.h> 45 46 #include "ucc_geth.h" 47 ··· 1549 1550 static void ugeth_quiesce(struct ucc_geth_private *ugeth) 1551 { 1552 - /* Prevent any further xmits, plus detach the device. */ 1553 - netif_device_detach(ugeth->ndev); 1554 - 1555 - /* Wait for any current xmits to finish. */ 1556 - netif_tx_disable(ugeth->ndev); 1557 1558 /* Disable the interrupt to avoid NAPI rescheduling. */ 1559 disable_irq(ugeth->ug_info->uf_info.irq); ··· 1563 { 1564 napi_enable(&ugeth->napi); 1565 enable_irq(ugeth->ug_info->uf_info.irq); 1566 - netif_device_attach(ugeth->ndev); 1567 } 1568 1569 /* Called every time the controller might need to be made
··· 42 #include <soc/fsl/qe/ucc.h> 43 #include <soc/fsl/qe/ucc_fast.h> 44 #include <asm/machdep.h> 45 + #include <net/sch_generic.h> 46 47 #include "ucc_geth.h" 48 ··· 1548 1549 static void ugeth_quiesce(struct ucc_geth_private *ugeth) 1550 { 1551 + /* Prevent any further xmits */ 1552 + netif_tx_stop_all_queues(ugeth->ndev); 1553 1554 /* Disable the interrupt to avoid NAPI rescheduling. */ 1555 disable_irq(ugeth->ug_info->uf_info.irq); ··· 1565 { 1566 napi_enable(&ugeth->napi); 1567 enable_irq(ugeth->ug_info->uf_info.irq); 1568 + 1569 + /* allow to xmit again */ 1570 + netif_tx_wake_all_queues(ugeth->ndev); 1571 + __netdev_watchdog_up(ugeth->ndev); 1572 } 1573 1574 /* Called every time the controller might need to be made
+1 -1
drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
··· 1070 (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS)); 1071 1072 val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG); 1073 - val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id); 1074 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val); 1075 } 1076
··· 1070 (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS)); 1071 1072 val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG); 1073 + val &= ~MVPP2_CLS_SWFWD_PCTRL_MASK(port->id); 1074 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val); 1075 } 1076
+1 -1
drivers/net/ethernet/marvell/pxa168_eth.c
··· 1418 1419 pep->base = devm_platform_ioremap_resource(pdev, 0); 1420 if (IS_ERR(pep->base)) { 1421 - err = -ENOMEM; 1422 goto err_netdev; 1423 } 1424
··· 1418 1419 pep->base = devm_platform_ioremap_resource(pdev, 0); 1420 if (IS_ERR(pep->base)) { 1421 + err = PTR_ERR(pep->base); 1422 goto err_netdev; 1423 } 1424
+1 -1
drivers/net/ethernet/mellanox/mlx4/fw.c
··· 2734 if (err) { 2735 mlx4_err(dev, "Failed to retrieve required operation: %d\n", 2736 err); 2737 - return; 2738 } 2739 MLX4_GET(modifier, outbox, GET_OP_REQ_MODIFIER_OFFSET); 2740 MLX4_GET(token, outbox, GET_OP_REQ_TOKEN_OFFSET);
··· 2734 if (err) { 2735 mlx4_err(dev, "Failed to retrieve required operation: %d\n", 2736 err); 2737 + goto out; 2738 } 2739 MLX4_GET(modifier, outbox, GET_OP_REQ_MODIFIER_OFFSET); 2740 MLX4_GET(token, outbox, GET_OP_REQ_TOKEN_OFFSET);
+55 -4
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
··· 848 static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev, 849 struct mlx5_cmd_msg *msg); 850 851 static void cmd_work_handler(struct work_struct *work) 852 { 853 struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work); ··· 869 int alloc_ret; 870 int cmd_mode; 871 872 sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem; 873 down(sem); 874 if (!ent->page_queue) { ··· 922 923 /* Skip sending command to fw if internal error */ 924 if (pci_channel_offline(dev->pdev) || 925 - dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { 926 u8 status = 0; 927 u32 drv_synd; 928 ··· 989 struct mlx5_cmd *cmd = &dev->cmd; 990 int err; 991 992 if (cmd->mode == CMD_MODE_POLLING || ent->polling) { 993 wait_for_completion(&ent->done); 994 } else if (!wait_for_completion_timeout(&ent->done, timeout)) { ··· 1001 mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true); 1002 } 1003 1004 err = ent->ret; 1005 1006 if (err == -ETIMEDOUT) { 1007 mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n", 1008 mlx5_command_str(msg_to_opcode(ent->in)), 1009 msg_to_opcode(ent->in)); 1010 } ··· 1047 ent->token = token; 1048 ent->polling = force_polling; 1049 1050 if (!callback) 1051 init_completion(&ent->done); 1052 ··· 1067 err = wait_func(dev, ent); 1068 if (err == -ETIMEDOUT) 1069 goto out; 1070 1071 ds = ent->ts2 - ent->ts1; 1072 op = MLX5_GET(mbox_in, in->first.data, opcode); ··· 1415 mlx5_cmdif_debugfs_init(dev); 1416 } 1417 1418 static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode) 1419 { 1420 struct mlx5_cmd *cmd = &dev->cmd; ··· 1707 int err; 1708 u8 status = 0; 1709 u32 drv_synd; 1710 u8 token; 1711 1712 if (pci_channel_offline(dev->pdev) || 1713 - dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { 1714 - u16 opcode = MLX5_GET(mbox_in, in, opcode); 1715 - 1716 err = mlx5_internal_err_ret_value(dev, opcode, &drv_synd, &status); 1717 MLX5_SET(mbox_out, out, status, status); 1718 MLX5_SET(mbox_out, out, syndrome, drv_synd); ··· 1979 goto err_free_page; 1980 } 1981 1982 cmd->checksum_disabled = 1; 1983 cmd->max_reg_cmds = (1 << cmd->log_sz) - 1; 1984 cmd->bitmask = (1UL << cmd->max_reg_cmds) - 1; ··· 2017 mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd->dma)); 2018 2019 cmd->mode = CMD_MODE_POLLING; 2020 2021 create_msg_cache(dev); 2022 ··· 2057 dma_pool_destroy(cmd->pool); 2058 } 2059 EXPORT_SYMBOL(mlx5_cmd_cleanup);
··· 848 static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev, 849 struct mlx5_cmd_msg *msg); 850 851 + static bool opcode_allowed(struct mlx5_cmd *cmd, u16 opcode) 852 + { 853 + if (cmd->allowed_opcode == CMD_ALLOWED_OPCODE_ALL) 854 + return true; 855 + 856 + return cmd->allowed_opcode == opcode; 857 + } 858 + 859 static void cmd_work_handler(struct work_struct *work) 860 { 861 struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work); ··· 861 int alloc_ret; 862 int cmd_mode; 863 864 + complete(&ent->handling); 865 sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem; 866 down(sem); 867 if (!ent->page_queue) { ··· 913 914 /* Skip sending command to fw if internal error */ 915 if (pci_channel_offline(dev->pdev) || 916 + dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR || 917 + cmd->state != MLX5_CMDIF_STATE_UP || 918 + !opcode_allowed(&dev->cmd, ent->op)) { 919 u8 status = 0; 920 u32 drv_synd; 921 ··· 978 struct mlx5_cmd *cmd = &dev->cmd; 979 int err; 980 981 + if (!wait_for_completion_timeout(&ent->handling, timeout) && 982 + cancel_work_sync(&ent->work)) { 983 + ent->ret = -ECANCELED; 984 + goto out_err; 985 + } 986 if (cmd->mode == CMD_MODE_POLLING || ent->polling) { 987 wait_for_completion(&ent->done); 988 } else if (!wait_for_completion_timeout(&ent->done, timeout)) { ··· 985 mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true); 986 } 987 988 + out_err: 989 err = ent->ret; 990 991 if (err == -ETIMEDOUT) { 992 mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n", 993 + mlx5_command_str(msg_to_opcode(ent->in)), 994 + msg_to_opcode(ent->in)); 995 + } else if (err == -ECANCELED) { 996 + mlx5_core_warn(dev, "%s(0x%x) canceled on out of queue timeout.\n", 997 mlx5_command_str(msg_to_opcode(ent->in)), 998 msg_to_opcode(ent->in)); 999 } ··· 1026 ent->token = token; 1027 ent->polling = force_polling; 1028 1029 + init_completion(&ent->handling); 1030 if (!callback) 1031 init_completion(&ent->done); 1032 ··· 1045 err = wait_func(dev, ent); 1046 if (err == -ETIMEDOUT) 1047 goto out; 1048 + if (err == -ECANCELED) 1049 + goto out_free; 1050 1051 ds = ent->ts2 - ent->ts1; 1052 op = MLX5_GET(mbox_in, in->first.data, opcode); ··· 1391 mlx5_cmdif_debugfs_init(dev); 1392 } 1393 1394 + void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode) 1395 + { 1396 + struct mlx5_cmd *cmd = &dev->cmd; 1397 + int i; 1398 + 1399 + for (i = 0; i < cmd->max_reg_cmds; i++) 1400 + down(&cmd->sem); 1401 + down(&cmd->pages_sem); 1402 + 1403 + cmd->allowed_opcode = opcode; 1404 + 1405 + up(&cmd->pages_sem); 1406 + for (i = 0; i < cmd->max_reg_cmds; i++) 1407 + up(&cmd->sem); 1408 + } 1409 + 1410 static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode) 1411 { 1412 struct mlx5_cmd *cmd = &dev->cmd; ··· 1667 int err; 1668 u8 status = 0; 1669 u32 drv_synd; 1670 + u16 opcode; 1671 u8 token; 1672 1673 + opcode = MLX5_GET(mbox_in, in, opcode); 1674 if (pci_channel_offline(dev->pdev) || 1675 + dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR || 1676 + dev->cmd.state != MLX5_CMDIF_STATE_UP || 1677 + !opcode_allowed(&dev->cmd, opcode)) { 1678 err = mlx5_internal_err_ret_value(dev, opcode, &drv_synd, &status); 1679 MLX5_SET(mbox_out, out, status, status); 1680 MLX5_SET(mbox_out, out, syndrome, drv_synd); ··· 1937 goto err_free_page; 1938 } 1939 1940 + cmd->state = MLX5_CMDIF_STATE_DOWN; 1941 cmd->checksum_disabled = 1; 1942 cmd->max_reg_cmds = (1 << cmd->log_sz) - 1; 1943 cmd->bitmask = (1UL << cmd->max_reg_cmds) - 1; ··· 1974 mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd->dma)); 1975 1976 cmd->mode = CMD_MODE_POLLING; 1977 + cmd->allowed_opcode = CMD_ALLOWED_OPCODE_ALL; 1978 1979 create_msg_cache(dev); 1980 ··· 2013 dma_pool_destroy(cmd->pool); 2014 } 2015 EXPORT_SYMBOL(mlx5_cmd_cleanup); 2016 + 2017 + void mlx5_cmd_set_state(struct mlx5_core_dev *dev, 2018 + enum mlx5_cmdif_state cmdif_state) 2019 + { 2020 + dev->cmd.state = cmdif_state; 2021 + } 2022 + EXPORT_SYMBOL(mlx5_cmd_set_state);
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/en.h
··· 1121 int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv); 1122 1123 int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc); 1124 - void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc); 1125 1126 int mlx5e_create_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs); 1127 void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs);
··· 1121 int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv); 1122 1123 int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc); 1124 + void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv); 1125 1126 int mlx5e_create_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs); 1127 void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs);
+3 -2
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
··· 699 struct netlink_ext_ack *extack) 700 { 701 struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv); 702 struct flow_dissector_key_ct *mask, *key; 703 bool trk, est, untrk, unest, new; 704 u32 ctstate = 0, ctstate_mask = 0; ··· 707 u16 ct_state, ct_state_mask; 708 struct flow_match_ct match; 709 710 - if (!flow_rule_match_key(f->rule, FLOW_DISSECTOR_KEY_CT)) 711 return 0; 712 713 if (!ct_priv) { ··· 716 return -EOPNOTSUPP; 717 } 718 719 - flow_rule_match_ct(f->rule, &match); 720 721 key = match.key; 722 mask = match.mask;
··· 699 struct netlink_ext_ack *extack) 700 { 701 struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv); 702 + struct flow_rule *rule = flow_cls_offload_flow_rule(f); 703 struct flow_dissector_key_ct *mask, *key; 704 bool trk, est, untrk, unest, new; 705 u32 ctstate = 0, ctstate_mask = 0; ··· 706 u16 ct_state, ct_state_mask; 707 struct flow_match_ct match; 708 709 + if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CT)) 710 return 0; 711 712 if (!ct_priv) { ··· 715 return -EOPNOTSUPP; 716 } 717 718 + flow_rule_match_ct(rule, &match); 719 720 key = match.key; 721 mask = match.mask;
+3 -1
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
··· 130 struct flow_cls_offload *f, 131 struct netlink_ext_ack *extack) 132 { 133 - if (!flow_rule_match_key(f->rule, FLOW_DISSECTOR_KEY_CT)) 134 return 0; 135 136 NL_SET_ERR_MSG_MOD(extack, "mlx5 tc ct offload isn't enabled.");
··· 130 struct flow_cls_offload *f, 131 struct netlink_ext_ack *extack) 132 { 133 + struct flow_rule *rule = flow_cls_offload_flow_rule(f); 134 + 135 + if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CT)) 136 return 0; 137 138 NL_SET_ERR_MSG_MOD(extack, "mlx5 tc ct offload isn't enabled.");
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
··· 69 struct mlx5e_ktls_offload_context_tx *tx_priv = 70 mlx5e_get_ktls_tx_priv_ctx(tls_ctx); 71 72 - mlx5_ktls_destroy_key(priv->mdev, tx_priv->key_id); 73 mlx5e_destroy_tis(priv->mdev, tx_priv->tisn); 74 kvfree(tx_priv); 75 } 76
··· 69 struct mlx5e_ktls_offload_context_tx *tx_priv = 70 mlx5e_get_ktls_tx_priv_ctx(tls_ctx); 71 72 mlx5e_destroy_tis(priv->mdev, tx_priv->tisn); 73 + mlx5_ktls_destroy_key(priv->mdev, tx_priv->key_id); 74 kvfree(tx_priv); 75 } 76
+7 -5
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
··· 2717 mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in, inlen); 2718 } 2719 2720 - if (!mlx5e_tunnel_inner_ft_supported(priv->mdev)) 2721 return; 2722 2723 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) { ··· 3409 return err; 3410 } 3411 3412 - void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc) 3413 { 3414 int i; 3415 3416 for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) 3417 mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[i]); 3418 3419 - if (!inner_ttc || !mlx5e_tunnel_inner_ft_supported(priv->mdev)) 3420 return; 3421 3422 for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) ··· 5125 err_destroy_direct_tirs: 5126 mlx5e_destroy_direct_tirs(priv, priv->direct_tir); 5127 err_destroy_indirect_tirs: 5128 - mlx5e_destroy_indirect_tirs(priv, true); 5129 err_destroy_direct_rqts: 5130 mlx5e_destroy_direct_rqts(priv, priv->direct_tir); 5131 err_destroy_indirect_rqts: ··· 5144 mlx5e_destroy_direct_tirs(priv, priv->xsk_tir); 5145 mlx5e_destroy_direct_rqts(priv, priv->xsk_tir); 5146 mlx5e_destroy_direct_tirs(priv, priv->direct_tir); 5147 - mlx5e_destroy_indirect_tirs(priv, true); 5148 mlx5e_destroy_direct_rqts(priv, priv->direct_tir); 5149 mlx5e_destroy_rqt(priv, &priv->indir_rqt); 5150 mlx5e_close_drop_rq(&priv->drop_rq);
··· 2717 mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in, inlen); 2718 } 2719 2720 + /* Verify inner tirs resources allocated */ 2721 + if (!priv->inner_indir_tir[0].tirn) 2722 return; 2723 2724 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) { ··· 3408 return err; 3409 } 3410 3411 + void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv) 3412 { 3413 int i; 3414 3415 for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) 3416 mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[i]); 3417 3418 + /* Verify inner tirs resources allocated */ 3419 + if (!priv->inner_indir_tir[0].tirn) 3420 return; 3421 3422 for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) ··· 5123 err_destroy_direct_tirs: 5124 mlx5e_destroy_direct_tirs(priv, priv->direct_tir); 5125 err_destroy_indirect_tirs: 5126 + mlx5e_destroy_indirect_tirs(priv); 5127 err_destroy_direct_rqts: 5128 mlx5e_destroy_direct_rqts(priv, priv->direct_tir); 5129 err_destroy_indirect_rqts: ··· 5142 mlx5e_destroy_direct_tirs(priv, priv->xsk_tir); 5143 mlx5e_destroy_direct_rqts(priv, priv->xsk_tir); 5144 mlx5e_destroy_direct_tirs(priv, priv->direct_tir); 5145 + mlx5e_destroy_indirect_tirs(priv); 5146 mlx5e_destroy_direct_rqts(priv, priv->direct_tir); 5147 mlx5e_destroy_rqt(priv, &priv->indir_rqt); 5148 mlx5e_close_drop_rq(&priv->drop_rq);
+4 -8
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
··· 1484 return netdev->netdev_ops == &mlx5e_netdev_ops_uplink_rep; 1485 } 1486 1487 - bool mlx5e_eswitch_rep(struct net_device *netdev) 1488 { 1489 - if (netdev->netdev_ops == &mlx5e_netdev_ops_rep || 1490 - netdev->netdev_ops == &mlx5e_netdev_ops_uplink_rep) 1491 - return true; 1492 - 1493 - return false; 1494 } 1495 1496 static void mlx5e_build_rep_params(struct net_device *netdev) ··· 1743 err_destroy_direct_tirs: 1744 mlx5e_destroy_direct_tirs(priv, priv->direct_tir); 1745 err_destroy_indirect_tirs: 1746 - mlx5e_destroy_indirect_tirs(priv, false); 1747 err_destroy_direct_rqts: 1748 mlx5e_destroy_direct_rqts(priv, priv->direct_tir); 1749 err_destroy_indirect_rqts: ··· 1761 mlx5e_destroy_rep_root_ft(priv); 1762 mlx5e_destroy_ttc_table(priv, &priv->fs.ttc); 1763 mlx5e_destroy_direct_tirs(priv, priv->direct_tir); 1764 - mlx5e_destroy_indirect_tirs(priv, false); 1765 mlx5e_destroy_direct_rqts(priv, priv->direct_tir); 1766 mlx5e_destroy_rqt(priv, &priv->indir_rqt); 1767 mlx5e_close_drop_rq(&priv->drop_rq);
··· 1484 return netdev->netdev_ops == &mlx5e_netdev_ops_uplink_rep; 1485 } 1486 1487 + bool mlx5e_eswitch_vf_rep(struct net_device *netdev) 1488 { 1489 + return netdev->netdev_ops == &mlx5e_netdev_ops_rep; 1490 } 1491 1492 static void mlx5e_build_rep_params(struct net_device *netdev) ··· 1747 err_destroy_direct_tirs: 1748 mlx5e_destroy_direct_tirs(priv, priv->direct_tir); 1749 err_destroy_indirect_tirs: 1750 + mlx5e_destroy_indirect_tirs(priv); 1751 err_destroy_direct_rqts: 1752 mlx5e_destroy_direct_rqts(priv, priv->direct_tir); 1753 err_destroy_indirect_rqts: ··· 1765 mlx5e_destroy_rep_root_ft(priv); 1766 mlx5e_destroy_ttc_table(priv, &priv->fs.ttc); 1767 mlx5e_destroy_direct_tirs(priv, priv->direct_tir); 1768 + mlx5e_destroy_indirect_tirs(priv); 1769 mlx5e_destroy_direct_rqts(priv, priv->direct_tir); 1770 mlx5e_destroy_rqt(priv, &priv->indir_rqt); 1771 mlx5e_close_drop_rq(&priv->drop_rq);
+6 -1
drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
··· 210 211 void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv); 212 213 - bool mlx5e_eswitch_rep(struct net_device *netdev); 214 bool mlx5e_eswitch_uplink_rep(struct net_device *netdev); 215 216 #else /* CONFIG_MLX5_ESWITCH */ 217 static inline bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv) { return false; }
··· 210 211 void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv); 212 213 + bool mlx5e_eswitch_vf_rep(struct net_device *netdev); 214 bool mlx5e_eswitch_uplink_rep(struct net_device *netdev); 215 + static inline bool mlx5e_eswitch_rep(struct net_device *netdev) 216 + { 217 + return mlx5e_eswitch_vf_rep(netdev) || 218 + mlx5e_eswitch_uplink_rep(netdev); 219 + } 220 221 #else /* CONFIG_MLX5_ESWITCH */ 222 static inline bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv) { return false; }
+33 -7
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
··· 3073 return true; 3074 } 3075 3076 static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv) 3077 { 3078 struct mlx5_core_dev *fmdev, *pmdev; ··· 3296 } 3297 3298 3299 - static bool is_merged_eswitch_dev(struct mlx5e_priv *priv, 3300 struct net_device *peer_netdev) 3301 { 3302 struct mlx5e_priv *peer_priv; ··· 3304 peer_priv = netdev_priv(peer_netdev); 3305 3306 return (MLX5_CAP_ESW(priv->mdev, merged_eswitch) && 3307 - mlx5e_eswitch_rep(priv->netdev) && 3308 - mlx5e_eswitch_rep(peer_netdev) && 3309 same_hw_devs(priv, peer_priv)); 3310 } 3311 - 3312 - 3313 3314 bool mlx5e_encap_take(struct mlx5e_encap_entry *e) 3315 { ··· 3578 return err; 3579 } 3580 3581 bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv, 3582 struct net_device *out_dev) 3583 { 3584 - if (is_merged_eswitch_dev(priv, out_dev)) 3585 return true; 3586 3587 return mlx5e_eswitch_rep(out_dev) && 3588 - same_hw_devs(priv, netdev_priv(out_dev)); 3589 } 3590 3591 static bool is_duplicated_output_device(struct net_device *dev,
··· 3073 return true; 3074 } 3075 3076 + static bool same_port_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv) 3077 + { 3078 + return priv->mdev == peer_priv->mdev; 3079 + } 3080 + 3081 static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv) 3082 { 3083 struct mlx5_core_dev *fmdev, *pmdev; ··· 3291 } 3292 3293 3294 + static bool is_merged_eswitch_vfs(struct mlx5e_priv *priv, 3295 struct net_device *peer_netdev) 3296 { 3297 struct mlx5e_priv *peer_priv; ··· 3299 peer_priv = netdev_priv(peer_netdev); 3300 3301 return (MLX5_CAP_ESW(priv->mdev, merged_eswitch) && 3302 + mlx5e_eswitch_vf_rep(priv->netdev) && 3303 + mlx5e_eswitch_vf_rep(peer_netdev) && 3304 same_hw_devs(priv, peer_priv)); 3305 } 3306 3307 bool mlx5e_encap_take(struct mlx5e_encap_entry *e) 3308 { ··· 3575 return err; 3576 } 3577 3578 + static bool same_hw_reps(struct mlx5e_priv *priv, 3579 + struct net_device *peer_netdev) 3580 + { 3581 + struct mlx5e_priv *peer_priv; 3582 + 3583 + peer_priv = netdev_priv(peer_netdev); 3584 + 3585 + return mlx5e_eswitch_rep(priv->netdev) && 3586 + mlx5e_eswitch_rep(peer_netdev) && 3587 + same_hw_devs(priv, peer_priv); 3588 + } 3589 + 3590 + static bool is_lag_dev(struct mlx5e_priv *priv, 3591 + struct net_device *peer_netdev) 3592 + { 3593 + return ((mlx5_lag_is_sriov(priv->mdev) || 3594 + mlx5_lag_is_multipath(priv->mdev)) && 3595 + same_hw_reps(priv, peer_netdev)); 3596 + } 3597 + 3598 bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv, 3599 struct net_device *out_dev) 3600 { 3601 + if (is_merged_eswitch_vfs(priv, out_dev)) 3602 + return true; 3603 + 3604 + if (is_lag_dev(priv, out_dev)) 3605 return true; 3606 3607 return mlx5e_eswitch_rep(out_dev) && 3608 + same_port_devs(priv, netdev_priv(out_dev)); 3609 } 3610 3611 static bool is_duplicated_output_device(struct net_device *dev,
+6 -3
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
··· 537 void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq) 538 { 539 struct mlx5e_tx_wqe_info *wi; 540 struct sk_buff *skb; 541 - u32 dma_fifo_cc; 542 - u16 sqcc; 543 - u16 ci; 544 int i; 545 546 sqcc = sq->cc; ··· 564 } 565 566 dev_kfree_skb_any(skb); 567 sqcc += wi->num_wqebbs; 568 } 569 570 sq->dma_fifo_cc = dma_fifo_cc; 571 sq->cc = sqcc; 572 } 573 574 #ifdef CONFIG_MLX5_CORE_IPOIB
··· 537 void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq) 538 { 539 struct mlx5e_tx_wqe_info *wi; 540 + u32 dma_fifo_cc, nbytes = 0; 541 + u16 ci, sqcc, npkts = 0; 542 struct sk_buff *skb; 543 int i; 544 545 sqcc = sq->cc; ··· 565 } 566 567 dev_kfree_skb_any(skb); 568 + npkts++; 569 + nbytes += wi->num_bytes; 570 sqcc += wi->num_wqebbs; 571 } 572 573 sq->dma_fifo_cc = dma_fifo_cc; 574 sq->cc = sqcc; 575 + 576 + netdev_tx_completed_queue(sq->txq, npkts, nbytes); 577 } 578 579 #ifdef CONFIG_MLX5_CORE_IPOIB
+3
drivers/net/ethernet/mellanox/mlx5/core/eq.c
··· 611 .nent = MLX5_NUM_CMD_EQE, 612 .mask[0] = 1ull << MLX5_EVENT_TYPE_CMD, 613 }; 614 err = setup_async_eq(dev, &table->cmd_eq, &param, "cmd"); 615 if (err) 616 goto err1; 617 618 mlx5_cmd_use_events(dev); 619 620 param = (struct mlx5_eq_param) { 621 .irq_index = 0, ··· 647 mlx5_cmd_use_polling(dev); 648 cleanup_async_eq(dev, &table->cmd_eq, "cmd"); 649 err1: 650 mlx5_eq_notifier_unregister(dev, &table->cq_err_nb); 651 return err; 652 }
··· 611 .nent = MLX5_NUM_CMD_EQE, 612 .mask[0] = 1ull << MLX5_EVENT_TYPE_CMD, 613 }; 614 + mlx5_cmd_allowed_opcode(dev, MLX5_CMD_OP_CREATE_EQ); 615 err = setup_async_eq(dev, &table->cmd_eq, &param, "cmd"); 616 if (err) 617 goto err1; 618 619 mlx5_cmd_use_events(dev); 620 + mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL); 621 622 param = (struct mlx5_eq_param) { 623 .irq_index = 0, ··· 645 mlx5_cmd_use_polling(dev); 646 cleanup_async_eq(dev, &table->cmd_eq, "cmd"); 647 err1: 648 + mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL); 649 mlx5_eq_notifier_unregister(dev, &table->cq_err_nb); 650 return err; 651 }
+3 -1
drivers/net/ethernet/mellanox/mlx5/core/events.c
··· 346 events->dev = dev; 347 dev->priv.events = events; 348 events->wq = create_singlethread_workqueue("mlx5_events"); 349 - if (!events->wq) 350 return -ENOMEM; 351 INIT_WORK(&events->pcie_core_work, mlx5_pcie_event); 352 353 return 0;
··· 346 events->dev = dev; 347 dev->priv.events = events; 348 events->wq = create_singlethread_workqueue("mlx5_events"); 349 + if (!events->wq) { 350 + kfree(events); 351 return -ENOMEM; 352 + } 353 INIT_WORK(&events->pcie_core_work, mlx5_pcie_event); 354 355 return 0;
+19 -11
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
··· 344 if (node->del_hw_func) 345 node->del_hw_func(node); 346 if (parent_node) { 347 - /* Only root namespace doesn't have parent and we just 348 - * need to free its node. 349 - */ 350 down_write_ref_node(parent_node, locked); 351 list_del_init(&node->list); 352 - if (node->del_sw_func) 353 - node->del_sw_func(node); 354 - up_write_ref_node(parent_node, locked); 355 - } else { 356 - kfree(node); 357 } 358 node = NULL; 359 } 360 if (!node && parent_node) ··· 463 fs_get_obj(ft, node); 464 465 rhltable_destroy(&ft->fgs_hash); 466 - fs_get_obj(prio, ft->node.parent); 467 - prio->num_ft--; 468 kfree(ft); 469 } 470 ··· 2348 return 0; 2349 } 2350 2351 static struct mlx5_flow_root_namespace 2352 *create_root_ns(struct mlx5_flow_steering *steering, 2353 enum fs_flow_table_type table_type) ··· 2385 ns = &root_ns->ns; 2386 fs_init_namespace(ns); 2387 mutex_init(&root_ns->chain_lock); 2388 - tree_init_node(&ns->node, NULL, NULL); 2389 tree_add_node(&ns->node, NULL); 2390 2391 return root_ns;
··· 344 if (node->del_hw_func) 345 node->del_hw_func(node); 346 if (parent_node) { 347 down_write_ref_node(parent_node, locked); 348 list_del_init(&node->list); 349 } 350 + node->del_sw_func(node); 351 + if (parent_node) 352 + up_write_ref_node(parent_node, locked); 353 node = NULL; 354 } 355 if (!node && parent_node) ··· 468 fs_get_obj(ft, node); 469 470 rhltable_destroy(&ft->fgs_hash); 471 + if (ft->node.parent) { 472 + fs_get_obj(prio, ft->node.parent); 473 + prio->num_ft--; 474 + } 475 kfree(ft); 476 } 477 ··· 2351 return 0; 2352 } 2353 2354 + static void del_sw_root_ns(struct fs_node *node) 2355 + { 2356 + struct mlx5_flow_root_namespace *root_ns; 2357 + struct mlx5_flow_namespace *ns; 2358 + 2359 + fs_get_obj(ns, node); 2360 + root_ns = container_of(ns, struct mlx5_flow_root_namespace, ns); 2361 + mutex_destroy(&root_ns->chain_lock); 2362 + kfree(node); 2363 + } 2364 + 2365 static struct mlx5_flow_root_namespace 2366 *create_root_ns(struct mlx5_flow_steering *steering, 2367 enum fs_flow_table_type table_type) ··· 2377 ns = &root_ns->ns; 2378 fs_init_namespace(ns); 2379 mutex_init(&root_ns->chain_lock); 2380 + tree_init_node(&ns->node, NULL, del_sw_root_ns); 2381 tree_add_node(&ns->node, NULL); 2382 2383 return root_ns;
+2 -2
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
··· 396 err_destroy_direct_tirs: 397 mlx5e_destroy_direct_tirs(priv, priv->direct_tir); 398 err_destroy_indirect_tirs: 399 - mlx5e_destroy_indirect_tirs(priv, true); 400 err_destroy_direct_rqts: 401 mlx5e_destroy_direct_rqts(priv, priv->direct_tir); 402 err_destroy_indirect_rqts: ··· 412 { 413 mlx5i_destroy_flow_steering(priv); 414 mlx5e_destroy_direct_tirs(priv, priv->direct_tir); 415 - mlx5e_destroy_indirect_tirs(priv, true); 416 mlx5e_destroy_direct_rqts(priv, priv->direct_tir); 417 mlx5e_destroy_rqt(priv, &priv->indir_rqt); 418 mlx5e_close_drop_rq(&priv->drop_rq);
··· 396 err_destroy_direct_tirs: 397 mlx5e_destroy_direct_tirs(priv, priv->direct_tir); 398 err_destroy_indirect_tirs: 399 + mlx5e_destroy_indirect_tirs(priv); 400 err_destroy_direct_rqts: 401 mlx5e_destroy_direct_rqts(priv, priv->direct_tir); 402 err_destroy_indirect_rqts: ··· 412 { 413 mlx5i_destroy_flow_steering(priv); 414 mlx5e_destroy_direct_tirs(priv, priv->direct_tir); 415 + mlx5e_destroy_indirect_tirs(priv); 416 mlx5e_destroy_direct_rqts(priv, priv->direct_tir); 417 mlx5e_destroy_rqt(priv, &priv->indir_rqt); 418 mlx5e_close_drop_rq(&priv->drop_rq);
+6 -1
drivers/net/ethernet/mellanox/mlx5/core/main.c
··· 965 goto err_cmd_cleanup; 966 } 967 968 err = mlx5_core_enable_hca(dev, 0); 969 if (err) { 970 mlx5_core_err(dev, "enable hca failed\n"); ··· 1028 err_disable_hca: 1029 mlx5_core_disable_hca(dev, 0); 1030 err_cmd_cleanup: 1031 mlx5_cmd_cleanup(dev); 1032 1033 return err; ··· 1046 } 1047 mlx5_reclaim_startup_pages(dev); 1048 mlx5_core_disable_hca(dev, 0); 1049 mlx5_cmd_cleanup(dev); 1050 1051 return 0; ··· 1195 1196 err = mlx5_function_setup(dev, boot); 1197 if (err) 1198 - goto out; 1199 1200 if (boot) { 1201 err = mlx5_init_once(dev); ··· 1233 mlx5_cleanup_once(dev); 1234 function_teardown: 1235 mlx5_function_teardown(dev, boot); 1236 dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; 1237 mutex_unlock(&dev->intf_state_mutex); 1238
··· 965 goto err_cmd_cleanup; 966 } 967 968 + mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_UP); 969 + 970 err = mlx5_core_enable_hca(dev, 0); 971 if (err) { 972 mlx5_core_err(dev, "enable hca failed\n"); ··· 1026 err_disable_hca: 1027 mlx5_core_disable_hca(dev, 0); 1028 err_cmd_cleanup: 1029 + mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN); 1030 mlx5_cmd_cleanup(dev); 1031 1032 return err; ··· 1043 } 1044 mlx5_reclaim_startup_pages(dev); 1045 mlx5_core_disable_hca(dev, 0); 1046 + mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN); 1047 mlx5_cmd_cleanup(dev); 1048 1049 return 0; ··· 1191 1192 err = mlx5_function_setup(dev, boot); 1193 if (err) 1194 + goto err_function; 1195 1196 if (boot) { 1197 err = mlx5_init_once(dev); ··· 1229 mlx5_cleanup_once(dev); 1230 function_teardown: 1231 mlx5_function_teardown(dev, boot); 1232 + err_function: 1233 dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; 1234 mutex_unlock(&dev->intf_state_mutex); 1235
+12 -2
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
··· 3986 mlxsw_sp_port_remove(mlxsw_sp, i); 3987 mlxsw_sp_cpu_port_remove(mlxsw_sp); 3988 kfree(mlxsw_sp->ports); 3989 } 3990 3991 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) ··· 4023 mlxsw_sp_cpu_port_remove(mlxsw_sp); 4024 err_cpu_port_create: 4025 kfree(mlxsw_sp->ports); 4026 return err; 4027 } 4028 ··· 4145 return mlxsw_core_res_get(mlxsw_core, local_ports_in_x_res_id); 4146 } 4147 4148 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, 4149 unsigned int count, 4150 struct netlink_ext_ack *extack) ··· 4166 int i; 4167 int err; 4168 4169 - mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4170 if (!mlxsw_sp_port) { 4171 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 4172 local_port); ··· 4261 int offset; 4262 int i; 4263 4264 - mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4265 if (!mlxsw_sp_port) { 4266 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 4267 local_port);
··· 3986 mlxsw_sp_port_remove(mlxsw_sp, i); 3987 mlxsw_sp_cpu_port_remove(mlxsw_sp); 3988 kfree(mlxsw_sp->ports); 3989 + mlxsw_sp->ports = NULL; 3990 } 3991 3992 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) ··· 4022 mlxsw_sp_cpu_port_remove(mlxsw_sp); 4023 err_cpu_port_create: 4024 kfree(mlxsw_sp->ports); 4025 + mlxsw_sp->ports = NULL; 4026 return err; 4027 } 4028 ··· 4143 return mlxsw_core_res_get(mlxsw_core, local_ports_in_x_res_id); 4144 } 4145 4146 + static struct mlxsw_sp_port * 4147 + mlxsw_sp_port_get_by_local_port(struct mlxsw_sp *mlxsw_sp, u8 local_port) 4148 + { 4149 + if (mlxsw_sp->ports && mlxsw_sp->ports[local_port]) 4150 + return mlxsw_sp->ports[local_port]; 4151 + return NULL; 4152 + } 4153 + 4154 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, 4155 unsigned int count, 4156 struct netlink_ext_ack *extack) ··· 4156 int i; 4157 int err; 4158 4159 + mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port); 4160 if (!mlxsw_sp_port) { 4161 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 4162 local_port); ··· 4251 int offset; 4252 int i; 4253 4254 + mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port); 4255 if (!mlxsw_sp_port) { 4256 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 4257 local_port);
+8
drivers/net/ethernet/mellanox/mlxsw/switchx2.c
··· 1259 if (mlxsw_sx_port_created(mlxsw_sx, i)) 1260 mlxsw_sx_port_remove(mlxsw_sx, i); 1261 kfree(mlxsw_sx->ports); 1262 } 1263 1264 static int mlxsw_sx_ports_create(struct mlxsw_sx *mlxsw_sx) ··· 1294 if (mlxsw_sx_port_created(mlxsw_sx, i)) 1295 mlxsw_sx_port_remove(mlxsw_sx, i); 1296 kfree(mlxsw_sx->ports); 1297 return err; 1298 } 1299 ··· 1377 struct mlxsw_sx *mlxsw_sx = mlxsw_core_driver_priv(mlxsw_core); 1378 u8 module, width; 1379 int err; 1380 1381 if (new_type == DEVLINK_PORT_TYPE_AUTO) 1382 return -EOPNOTSUPP;
··· 1259 if (mlxsw_sx_port_created(mlxsw_sx, i)) 1260 mlxsw_sx_port_remove(mlxsw_sx, i); 1261 kfree(mlxsw_sx->ports); 1262 + mlxsw_sx->ports = NULL; 1263 } 1264 1265 static int mlxsw_sx_ports_create(struct mlxsw_sx *mlxsw_sx) ··· 1293 if (mlxsw_sx_port_created(mlxsw_sx, i)) 1294 mlxsw_sx_port_remove(mlxsw_sx, i); 1295 kfree(mlxsw_sx->ports); 1296 + mlxsw_sx->ports = NULL; 1297 return err; 1298 } 1299 ··· 1375 struct mlxsw_sx *mlxsw_sx = mlxsw_core_driver_priv(mlxsw_core); 1376 u8 module, width; 1377 int err; 1378 + 1379 + if (!mlxsw_sx->ports || !mlxsw_sx->ports[local_port]) { 1380 + dev_err(mlxsw_sx->bus_info->dev, "Port number \"%d\" does not exist\n", 1381 + local_port); 1382 + return -EINVAL; 1383 + } 1384 1385 if (new_type == DEVLINK_PORT_TYPE_AUTO) 1386 return -EOPNOTSUPP;
+1 -1
drivers/net/ethernet/mscc/ocelot.c
··· 1467 unsigned long ageing_clock_t) 1468 { 1469 unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t); 1470 - u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000; 1471 1472 ocelot_set_ageing_time(ocelot, ageing_time); 1473 }
··· 1467 unsigned long ageing_clock_t) 1468 { 1469 unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t); 1470 + u32 ageing_time = jiffies_to_msecs(ageing_jiffies); 1471 1472 ocelot_set_ageing_time(ocelot, ageing_time); 1473 }
+15 -2
drivers/net/ethernet/realtek/r8169_main.c
··· 1050 RTL_R32(tp, EPHYAR) & EPHYAR_DATA_MASK : ~0; 1051 } 1052 1053 DECLARE_RTL_COND(rtl_eriar_cond) 1054 { 1055 return RTL_R32(tp, ERIAR) & ERIAR_FLAG; ··· 1065 static void _rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask, 1066 u32 val, int type) 1067 { 1068 BUG_ON((addr & 3) || (mask == 0)); 1069 RTL_W32(tp, ERIDR, val); 1070 - RTL_W32(tp, ERIAR, ERIAR_WRITE_CMD | type | mask | addr); 1071 1072 rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 100); 1073 } ··· 1083 1084 static u32 _rtl_eri_read(struct rtl8169_private *tp, int addr, int type) 1085 { 1086 - RTL_W32(tp, ERIAR, ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr); 1087 1088 return rtl_udelay_loop_wait_high(tp, &rtl_eriar_cond, 100, 100) ? 1089 RTL_R32(tp, ERIDR) : ~0;
··· 1050 RTL_R32(tp, EPHYAR) & EPHYAR_DATA_MASK : ~0; 1051 } 1052 1053 + static void r8168fp_adjust_ocp_cmd(struct rtl8169_private *tp, u32 *cmd, int type) 1054 + { 1055 + /* based on RTL8168FP_OOBMAC_BASE in vendor driver */ 1056 + if (tp->mac_version == RTL_GIGA_MAC_VER_52 && type == ERIAR_OOB) 1057 + *cmd |= 0x7f0 << 18; 1058 + } 1059 + 1060 DECLARE_RTL_COND(rtl_eriar_cond) 1061 { 1062 return RTL_R32(tp, ERIAR) & ERIAR_FLAG; ··· 1058 static void _rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask, 1059 u32 val, int type) 1060 { 1061 + u32 cmd = ERIAR_WRITE_CMD | type | mask | addr; 1062 + 1063 BUG_ON((addr & 3) || (mask == 0)); 1064 RTL_W32(tp, ERIDR, val); 1065 + r8168fp_adjust_ocp_cmd(tp, &cmd, type); 1066 + RTL_W32(tp, ERIAR, cmd); 1067 1068 rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 100); 1069 } ··· 1073 1074 static u32 _rtl_eri_read(struct rtl8169_private *tp, int addr, int type) 1075 { 1076 + u32 cmd = ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr; 1077 + 1078 + r8168fp_adjust_ocp_cmd(tp, &cmd, type); 1079 + RTL_W32(tp, ERIAR, cmd); 1080 1081 return rtl_udelay_loop_wait_high(tp, &rtl_eriar_cond, 100, 100) ? 1082 RTL_R32(tp, ERIDR) : ~0;
+4 -4
drivers/net/ethernet/sgi/ioc3-eth.c
··· 848 ip = netdev_priv(dev); 849 ip->dma_dev = pdev->dev.parent; 850 ip->regs = devm_platform_ioremap_resource(pdev, 0); 851 - if (!ip->regs) { 852 - err = -ENOMEM; 853 goto out_free; 854 } 855 856 ip->ssram = devm_platform_ioremap_resource(pdev, 1); 857 - if (!ip->ssram) { 858 - err = -ENOMEM; 859 goto out_free; 860 } 861
··· 848 ip = netdev_priv(dev); 849 ip->dma_dev = pdev->dev.parent; 850 ip->regs = devm_platform_ioremap_resource(pdev, 0); 851 + if (IS_ERR(ip->regs)) { 852 + err = PTR_ERR(ip->regs); 853 goto out_free; 854 } 855 856 ip->ssram = devm_platform_ioremap_resource(pdev, 1); 857 + if (IS_ERR(ip->ssram)) { 858 + err = PTR_ERR(ip->ssram); 859 goto out_free; 860 } 861
+5 -4
drivers/net/ethernet/smsc/smsc911x.c
··· 2493 2494 retval = smsc911x_init(dev); 2495 if (retval < 0) 2496 - goto out_disable_resources; 2497 2498 netif_carrier_off(dev); 2499 2500 retval = smsc911x_mii_init(pdev, dev); 2501 if (retval) { 2502 SMSC_WARN(pdata, probe, "Error %i initialising mii", retval); 2503 - goto out_disable_resources; 2504 } 2505 2506 retval = register_netdev(dev); 2507 if (retval) { 2508 SMSC_WARN(pdata, probe, "Error %i registering device", retval); 2509 - goto out_disable_resources; 2510 } else { 2511 SMSC_TRACE(pdata, probe, 2512 "Network interface: \"%s\"", dev->name); ··· 2547 2548 return 0; 2549 2550 - out_disable_resources: 2551 pm_runtime_put(&pdev->dev); 2552 pm_runtime_disable(&pdev->dev); 2553 (void)smsc911x_disable_resources(pdev); 2554 out_enable_resources_fail: 2555 smsc911x_free_resources(pdev);
··· 2493 2494 retval = smsc911x_init(dev); 2495 if (retval < 0) 2496 + goto out_init_fail; 2497 2498 netif_carrier_off(dev); 2499 2500 retval = smsc911x_mii_init(pdev, dev); 2501 if (retval) { 2502 SMSC_WARN(pdata, probe, "Error %i initialising mii", retval); 2503 + goto out_init_fail; 2504 } 2505 2506 retval = register_netdev(dev); 2507 if (retval) { 2508 SMSC_WARN(pdata, probe, "Error %i registering device", retval); 2509 + goto out_init_fail; 2510 } else { 2511 SMSC_TRACE(pdata, probe, 2512 "Network interface: \"%s\"", dev->name); ··· 2547 2548 return 0; 2549 2550 + out_init_fail: 2551 pm_runtime_put(&pdev->dev); 2552 pm_runtime_disable(&pdev->dev); 2553 + out_disable_resources: 2554 (void)smsc911x_disable_resources(pdev); 2555 out_enable_resources_fail: 2556 smsc911x_free_resources(pdev);
+13
drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
··· 319 /* Enable PTP clock */ 320 regmap_read(gmac->nss_common, NSS_COMMON_CLK_GATE, &val); 321 val |= NSS_COMMON_CLK_GATE_PTP_EN(gmac->id); 322 regmap_write(gmac->nss_common, NSS_COMMON_CLK_GATE, val); 323 324 if (gmac->phy_mode == PHY_INTERFACE_MODE_SGMII) {
··· 319 /* Enable PTP clock */ 320 regmap_read(gmac->nss_common, NSS_COMMON_CLK_GATE, &val); 321 val |= NSS_COMMON_CLK_GATE_PTP_EN(gmac->id); 322 + switch (gmac->phy_mode) { 323 + case PHY_INTERFACE_MODE_RGMII: 324 + val |= NSS_COMMON_CLK_GATE_RGMII_RX_EN(gmac->id) | 325 + NSS_COMMON_CLK_GATE_RGMII_TX_EN(gmac->id); 326 + break; 327 + case PHY_INTERFACE_MODE_SGMII: 328 + val |= NSS_COMMON_CLK_GATE_GMII_RX_EN(gmac->id) | 329 + NSS_COMMON_CLK_GATE_GMII_TX_EN(gmac->id); 330 + break; 331 + default: 332 + /* We don't get here; the switch above will have errored out */ 333 + unreachable(); 334 + } 335 regmap_write(gmac->nss_common, NSS_COMMON_CLK_GATE, val); 336 337 if (gmac->phy_mode == PHY_INTERFACE_MODE_SGMII) {
+2 -2
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
··· 5190 return ret; 5191 } 5192 5193 - netif_device_attach(ndev); 5194 - 5195 mutex_lock(&priv->lock); 5196 5197 stmmac_reset_queues_param(priv); ··· 5215 } 5216 5217 phylink_mac_change(priv->phylink, true); 5218 5219 return 0; 5220 }
··· 5190 return ret; 5191 } 5192 5193 mutex_lock(&priv->lock); 5194 5195 stmmac_reset_queues_param(priv); ··· 5217 } 5218 5219 phylink_mac_change(priv->phylink, true); 5220 + 5221 + netif_device_attach(ndev); 5222 5223 return 0; 5224 }
+1 -2
drivers/net/ethernet/sun/cassini.c
··· 4963 cas_cacheline_size)) { 4964 dev_err(&pdev->dev, "Could not set PCI cache " 4965 "line size\n"); 4966 - goto err_write_cacheline; 4967 } 4968 } 4969 #endif ··· 5136 err_out_free_res: 5137 pci_release_regions(pdev); 5138 5139 - err_write_cacheline: 5140 /* Try to restore it in case the error occurred after we 5141 * set it. 5142 */
··· 4963 cas_cacheline_size)) { 4964 dev_err(&pdev->dev, "Could not set PCI cache " 4965 "line size\n"); 4966 + goto err_out_free_res; 4967 } 4968 } 4969 #endif ··· 5136 err_out_free_res: 5137 pci_release_regions(pdev); 5138 5139 /* Try to restore it in case the error occurred after we 5140 * set it. 5141 */
+2 -1
drivers/net/ethernet/ti/am65-cpsw-nuss.c
··· 1895 ale_params.nu_switch_ale = true; 1896 1897 common->ale = cpsw_ale_create(&ale_params); 1898 - if (!common->ale) { 1899 dev_err(dev, "error initializing ale engine\n"); 1900 goto err_of_clear; 1901 } 1902
··· 1895 ale_params.nu_switch_ale = true; 1896 1897 common->ale = cpsw_ale_create(&ale_params); 1898 + if (IS_ERR(common->ale)) { 1899 dev_err(dev, "error initializing ale engine\n"); 1900 + ret = PTR_ERR(common->ale); 1901 goto err_of_clear; 1902 } 1903
+4
drivers/net/ethernet/ti/cpsw.c
··· 1753 struct cpsw_common *cpsw = dev_get_drvdata(dev); 1754 int i; 1755 1756 for (i = 0; i < cpsw->data.slaves; i++) 1757 if (cpsw->slaves[i].ndev) 1758 if (netif_running(cpsw->slaves[i].ndev)) 1759 cpsw_ndo_stop(cpsw->slaves[i].ndev); 1760 1761 /* Select sleep pin state */ 1762 pinctrl_pm_select_sleep_state(dev);
··· 1753 struct cpsw_common *cpsw = dev_get_drvdata(dev); 1754 int i; 1755 1756 + rtnl_lock(); 1757 + 1758 for (i = 0; i < cpsw->data.slaves; i++) 1759 if (cpsw->slaves[i].ndev) 1760 if (netif_running(cpsw->slaves[i].ndev)) 1761 cpsw_ndo_stop(cpsw->slaves[i].ndev); 1762 + 1763 + rtnl_unlock(); 1764 1765 /* Select sleep pin state */ 1766 pinctrl_pm_select_sleep_state(dev);
+1 -1
drivers/net/ethernet/ti/cpsw_ale.c
··· 955 956 ale = devm_kzalloc(params->dev, sizeof(*ale), GFP_KERNEL); 957 if (!ale) 958 - return NULL; 959 960 ale->p0_untag_vid_mask = 961 devm_kmalloc_array(params->dev, BITS_TO_LONGS(VLAN_N_VID),
··· 955 956 ale = devm_kzalloc(params->dev, sizeof(*ale), GFP_KERNEL); 957 if (!ale) 958 + return ERR_PTR(-ENOMEM); 959 960 ale->p0_untag_vid_mask = 961 devm_kmalloc_array(params->dev, BITS_TO_LONGS(VLAN_N_VID),
+2 -2
drivers/net/ethernet/ti/cpsw_priv.c
··· 490 ale_params.ale_ports = CPSW_ALE_PORTS_NUM; 491 492 cpsw->ale = cpsw_ale_create(&ale_params); 493 - if (!cpsw->ale) { 494 dev_err(dev, "error initializing ale engine\n"); 495 - return -ENODEV; 496 } 497 498 dma_params.dev = dev;
··· 490 ale_params.ale_ports = CPSW_ALE_PORTS_NUM; 491 492 cpsw->ale = cpsw_ale_create(&ale_params); 493 + if (IS_ERR(cpsw->ale)) { 494 dev_err(dev, "error initializing ale engine\n"); 495 + return PTR_ERR(cpsw->ale); 496 } 497 498 dma_params.dev = dev;
+2 -2
drivers/net/ethernet/ti/netcp_ethss.c
··· 3704 ale_params.nu_switch_ale = true; 3705 } 3706 gbe_dev->ale = cpsw_ale_create(&ale_params); 3707 - if (!gbe_dev->ale) { 3708 dev_err(gbe_dev->dev, "error initializing ale engine\n"); 3709 - ret = -ENODEV; 3710 goto free_sec_ports; 3711 } else { 3712 dev_dbg(gbe_dev->dev, "Created a gbe ale engine\n");
··· 3704 ale_params.nu_switch_ale = true; 3705 } 3706 gbe_dev->ale = cpsw_ale_create(&ale_params); 3707 + if (IS_ERR(gbe_dev->ale)) { 3708 dev_err(gbe_dev->dev, "error initializing ale engine\n"); 3709 + ret = PTR_ERR(gbe_dev->ale); 3710 goto free_sec_ports; 3711 } else { 3712 dev_dbg(gbe_dev->dev, "Created a gbe ale engine\n");
+1
drivers/net/ipa/gsi.c
··· 1392 while (count < budget) { 1393 struct gsi_trans *trans; 1394 1395 trans = gsi_channel_poll_one(channel); 1396 if (!trans) 1397 break;
··· 1392 while (count < budget) { 1393 struct gsi_trans *trans; 1394 1395 + count++; 1396 trans = gsi_channel_poll_one(channel); 1397 if (!trans) 1398 break;
+1 -2
drivers/net/netdevsim/dev.c
··· 858 return -EINVAL; 859 860 cnt = &nsim_dev->trap_data->trap_policers_cnt_arr[policer->id - 1]; 861 - *p_drops = *cnt; 862 - *cnt += jiffies % 64; 863 864 return 0; 865 }
··· 858 return -EINVAL; 859 860 cnt = &nsim_dev->trap_data->trap_policers_cnt_arr[policer->id - 1]; 861 + *p_drops = (*cnt)++; 862 863 return 0; 864 }
+2
drivers/net/phy/mscc/mscc.h
··· 354 u64 *stats; 355 int nstats; 356 bool pkg_init; 357 /* For multiple port PHYs; the MDIO address of the base PHY in the 358 * package. 359 */
··· 354 u64 *stats; 355 int nstats; 356 bool pkg_init; 357 + /* PHY address within the package. */ 358 + u8 addr; 359 /* For multiple port PHYs; the MDIO address of the base PHY in the 360 * package. 361 */
+3 -3
drivers/net/phy/mscc/mscc_mac.h
··· 152 #define MSCC_MAC_PAUSE_CFG_STATE_PAUSE_STATE BIT(0) 153 #define MSCC_MAC_PAUSE_CFG_STATE_MAC_TX_PAUSE_GEN BIT(4) 154 155 - #define MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL 0x2 156 - #define MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL_PROTOCOL_MODE(x) (x) 157 - #define MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL_PROTOCOL_MODE_M GENMASK(2, 0) 158 159 #endif /* _MSCC_PHY_LINE_MAC_H_ */
··· 152 #define MSCC_MAC_PAUSE_CFG_STATE_PAUSE_STATE BIT(0) 153 #define MSCC_MAC_PAUSE_CFG_STATE_MAC_TX_PAUSE_GEN BIT(4) 154 155 + #define MSCC_PROC_IP_1588_TOP_CFG_STAT_MODE_CTL 0x2 156 + #define MSCC_PROC_IP_1588_TOP_CFG_STAT_MODE_CTL_PROTOCOL_MODE(x) (x) 157 + #define MSCC_PROC_IP_1588_TOP_CFG_STAT_MODE_CTL_PROTOCOL_MODE_M GENMASK(2, 0) 158 159 #endif /* _MSCC_PHY_LINE_MAC_H_ */
+10 -6
drivers/net/phy/mscc/mscc_macsec.c
··· 316 /* Must be called with mdio_lock taken */ 317 static int __vsc8584_macsec_init(struct phy_device *phydev) 318 { 319 u32 val; 320 321 vsc8584_macsec_block_init(phydev, MACSEC_INGR); ··· 353 val |= MSCC_FCBUF_ENA_CFG_TX_ENA | MSCC_FCBUF_ENA_CFG_RX_ENA; 354 vsc8584_macsec_phy_write(phydev, FC_BUFFER, MSCC_FCBUF_ENA_CFG, val); 355 356 - val = vsc8584_macsec_phy_read(phydev, IP_1588, 357 - MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL); 358 - val &= ~MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL_PROTOCOL_MODE_M; 359 - val |= MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL_PROTOCOL_MODE(4); 360 - vsc8584_macsec_phy_write(phydev, IP_1588, 361 - MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL, val); 362 363 return 0; 364 }
··· 316 /* Must be called with mdio_lock taken */ 317 static int __vsc8584_macsec_init(struct phy_device *phydev) 318 { 319 + struct vsc8531_private *priv = phydev->priv; 320 + enum macsec_bank proc_bank; 321 u32 val; 322 323 vsc8584_macsec_block_init(phydev, MACSEC_INGR); ··· 351 val |= MSCC_FCBUF_ENA_CFG_TX_ENA | MSCC_FCBUF_ENA_CFG_RX_ENA; 352 vsc8584_macsec_phy_write(phydev, FC_BUFFER, MSCC_FCBUF_ENA_CFG, val); 353 354 + proc_bank = (priv->addr < 2) ? PROC_0 : PROC_2; 355 + 356 + val = vsc8584_macsec_phy_read(phydev, proc_bank, 357 + MSCC_PROC_IP_1588_TOP_CFG_STAT_MODE_CTL); 358 + val &= ~MSCC_PROC_IP_1588_TOP_CFG_STAT_MODE_CTL_PROTOCOL_MODE_M; 359 + val |= MSCC_PROC_IP_1588_TOP_CFG_STAT_MODE_CTL_PROTOCOL_MODE(4); 360 + vsc8584_macsec_phy_write(phydev, proc_bank, 361 + MSCC_PROC_IP_1588_TOP_CFG_STAT_MODE_CTL, val); 362 363 return 0; 364 }
+2 -1
drivers/net/phy/mscc/mscc_macsec.h
··· 64 FC_BUFFER = 0x04, 65 HOST_MAC = 0x05, 66 LINE_MAC = 0x06, 67 - IP_1588 = 0x0e, 68 MACSEC_INGR = 0x38, 69 MACSEC_EGR = 0x3c, 70 };
··· 64 FC_BUFFER = 0x04, 65 HOST_MAC = 0x05, 66 LINE_MAC = 0x06, 67 + PROC_0 = 0x0e, 68 + PROC_2 = 0x0f, 69 MACSEC_INGR = 0x38, 70 MACSEC_EGR = 0x3c, 71 };
+4
drivers/net/phy/mscc/mscc_main.c
··· 1347 else 1348 vsc8531->base_addr = phydev->mdio.addr - addr; 1349 1350 /* Some parts of the init sequence are identical for every PHY in the 1351 * package. Some parts are modifying the GPIO register bank which is a 1352 * set of registers that are affecting all PHYs, a few resetting the ··· 1772 vsc8531->base_addr = phydev->mdio.addr + addr; 1773 else 1774 vsc8531->base_addr = phydev->mdio.addr - addr; 1775 1776 /* Some parts of the init sequence are identical for every PHY in the 1777 * package. Some parts are modifying the GPIO register bank which is a
··· 1347 else 1348 vsc8531->base_addr = phydev->mdio.addr - addr; 1349 1350 + vsc8531->addr = addr; 1351 + 1352 /* Some parts of the init sequence are identical for every PHY in the 1353 * package. Some parts are modifying the GPIO register bank which is a 1354 * set of registers that are affecting all PHYs, a few resetting the ··· 1770 vsc8531->base_addr = phydev->mdio.addr + addr; 1771 else 1772 vsc8531->base_addr = phydev->mdio.addr - addr; 1773 + 1774 + vsc8531->addr = addr; 1775 1776 /* Some parts of the init sequence are identical for every PHY in the 1777 * package. Some parts are modifying the GPIO register bank which is a
+2 -2
drivers/net/phy/phy_device.c
··· 1233 const struct sfp_upstream_ops *ops) 1234 { 1235 struct sfp_bus *bus; 1236 - int ret; 1237 1238 if (phydev->mdio.dev.fwnode) { 1239 bus = sfp_bus_find_fwnode(phydev->mdio.dev.fwnode); ··· 1245 ret = sfp_bus_add_upstream(bus, phydev, ops); 1246 sfp_bus_put(bus); 1247 } 1248 - return 0; 1249 } 1250 EXPORT_SYMBOL(phy_sfp_probe); 1251
··· 1233 const struct sfp_upstream_ops *ops) 1234 { 1235 struct sfp_bus *bus; 1236 + int ret = 0; 1237 1238 if (phydev->mdio.dev.fwnode) { 1239 bus = sfp_bus_find_fwnode(phydev->mdio.dev.fwnode); ··· 1245 ret = sfp_bus_add_upstream(bus, phydev, ops); 1246 sfp_bus_put(bus); 1247 } 1248 + return ret; 1249 } 1250 EXPORT_SYMBOL(phy_sfp_probe); 1251
+9 -2
drivers/net/usb/cdc_ether.c
··· 815 .driver_info = 0, 816 }, 817 818 - /* Microsoft Surface 3 dock (based on Realtek RTL8153) */ 819 { 820 USB_DEVICE_AND_INTERFACE_INFO(MICROSOFT_VENDOR_ID, 0x07c6, USB_CLASS_COMM, 821 USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), 822 .driver_info = 0, 823 }, 824 825 - /* TP-LINK UE300 USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */ 826 { 827 USB_DEVICE_AND_INTERFACE_INFO(TPLINK_VENDOR_ID, 0x0601, USB_CLASS_COMM, 828 USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
··· 815 .driver_info = 0, 816 }, 817 818 + /* Microsoft Surface Ethernet Adapter (based on Realtek RTL8153) */ 819 { 820 USB_DEVICE_AND_INTERFACE_INFO(MICROSOFT_VENDOR_ID, 0x07c6, USB_CLASS_COMM, 821 USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), 822 .driver_info = 0, 823 }, 824 825 + /* Microsoft Surface Ethernet Adapter (based on Realtek RTL8153B) */ 826 + { 827 + USB_DEVICE_AND_INTERFACE_INFO(MICROSOFT_VENDOR_ID, 0x0927, USB_CLASS_COMM, 828 + USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), 829 + .driver_info = 0, 830 + }, 831 + 832 + /* TP-LINK UE300 USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */ 833 { 834 USB_DEVICE_AND_INTERFACE_INFO(TPLINK_VENDOR_ID, 0x0601, USB_CLASS_COMM, 835 USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+1
drivers/net/usb/r8152.c
··· 6880 {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8153)}, 6881 {REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07ab)}, 6882 {REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07c6)}, 6883 {REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)}, 6884 {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f)}, 6885 {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3062)},
··· 6880 {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8153)}, 6881 {REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07ab)}, 6882 {REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07c6)}, 6883 + {REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0927)}, 6884 {REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)}, 6885 {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f)}, 6886 {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3062)},
+1 -1
drivers/net/wireguard/messages.h
··· 32 }; 33 34 enum counter_values { 35 - COUNTER_BITS_TOTAL = 2048, 36 COUNTER_REDUNDANT_BITS = BITS_PER_LONG, 37 COUNTER_WINDOW_SIZE = COUNTER_BITS_TOTAL - COUNTER_REDUNDANT_BITS 38 };
··· 32 }; 33 34 enum counter_values { 35 + COUNTER_BITS_TOTAL = 8192, 36 COUNTER_REDUNDANT_BITS = BITS_PER_LONG, 37 COUNTER_WINDOW_SIZE = COUNTER_BITS_TOTAL - COUNTER_REDUNDANT_BITS 38 };
+9 -13
drivers/net/wireguard/noise.c
··· 104 105 if (unlikely(!keypair)) 106 return NULL; 107 keypair->internal_id = atomic64_inc_return(&keypair_counter); 108 keypair->entry.type = INDEX_HASHTABLE_KEYPAIR; 109 keypair->entry.peer = peer; ··· 359 memzero_explicit(output, BLAKE2S_HASH_SIZE + 1); 360 } 361 362 - static void symmetric_key_init(struct noise_symmetric_key *key) 363 - { 364 - spin_lock_init(&key->counter.receive.lock); 365 - atomic64_set(&key->counter.counter, 0); 366 - memset(key->counter.receive.backtrack, 0, 367 - sizeof(key->counter.receive.backtrack)); 368 - key->birthdate = ktime_get_coarse_boottime_ns(); 369 - key->is_valid = true; 370 - } 371 - 372 static void derive_keys(struct noise_symmetric_key *first_dst, 373 struct noise_symmetric_key *second_dst, 374 const u8 chaining_key[NOISE_HASH_LEN]) 375 { 376 kdf(first_dst->key, second_dst->key, NULL, NULL, 377 NOISE_SYMMETRIC_KEY_LEN, NOISE_SYMMETRIC_KEY_LEN, 0, 0, 378 chaining_key); 379 - symmetric_key_init(first_dst); 380 - symmetric_key_init(second_dst); 381 } 382 383 static bool __must_check mix_dh(u8 chaining_key[NOISE_HASH_LEN], ··· 707 u8 e[NOISE_PUBLIC_KEY_LEN]; 708 u8 ephemeral_private[NOISE_PUBLIC_KEY_LEN]; 709 u8 static_private[NOISE_PUBLIC_KEY_LEN]; 710 711 down_read(&wg->static_identity.lock); 712 ··· 726 memcpy(chaining_key, handshake->chaining_key, NOISE_HASH_LEN); 727 memcpy(ephemeral_private, handshake->ephemeral_private, 728 NOISE_PUBLIC_KEY_LEN); 729 up_read(&handshake->lock); 730 731 if (state != HANDSHAKE_CREATED_INITIATION) ··· 745 goto fail; 746 747 /* psk */ 748 - mix_psk(chaining_key, hash, key, handshake->preshared_key); 749 750 /* {} */ 751 if (!message_decrypt(NULL, src->encrypted_nothing, ··· 778 memzero_explicit(chaining_key, NOISE_HASH_LEN); 779 memzero_explicit(ephemeral_private, NOISE_PUBLIC_KEY_LEN); 780 memzero_explicit(static_private, NOISE_PUBLIC_KEY_LEN); 781 up_read(&wg->static_identity.lock); 782 return ret_peer; 783 }
··· 104 105 if (unlikely(!keypair)) 106 return NULL; 107 + spin_lock_init(&keypair->receiving_counter.lock); 108 keypair->internal_id = atomic64_inc_return(&keypair_counter); 109 keypair->entry.type = INDEX_HASHTABLE_KEYPAIR; 110 keypair->entry.peer = peer; ··· 358 memzero_explicit(output, BLAKE2S_HASH_SIZE + 1); 359 } 360 361 static void derive_keys(struct noise_symmetric_key *first_dst, 362 struct noise_symmetric_key *second_dst, 363 const u8 chaining_key[NOISE_HASH_LEN]) 364 { 365 + u64 birthdate = ktime_get_coarse_boottime_ns(); 366 kdf(first_dst->key, second_dst->key, NULL, NULL, 367 NOISE_SYMMETRIC_KEY_LEN, NOISE_SYMMETRIC_KEY_LEN, 0, 0, 368 chaining_key); 369 + first_dst->birthdate = second_dst->birthdate = birthdate; 370 + first_dst->is_valid = second_dst->is_valid = true; 371 } 372 373 static bool __must_check mix_dh(u8 chaining_key[NOISE_HASH_LEN], ··· 715 u8 e[NOISE_PUBLIC_KEY_LEN]; 716 u8 ephemeral_private[NOISE_PUBLIC_KEY_LEN]; 717 u8 static_private[NOISE_PUBLIC_KEY_LEN]; 718 + u8 preshared_key[NOISE_SYMMETRIC_KEY_LEN]; 719 720 down_read(&wg->static_identity.lock); 721 ··· 733 memcpy(chaining_key, handshake->chaining_key, NOISE_HASH_LEN); 734 memcpy(ephemeral_private, handshake->ephemeral_private, 735 NOISE_PUBLIC_KEY_LEN); 736 + memcpy(preshared_key, handshake->preshared_key, 737 + NOISE_SYMMETRIC_KEY_LEN); 738 up_read(&handshake->lock); 739 740 if (state != HANDSHAKE_CREATED_INITIATION) ··· 750 goto fail; 751 752 /* psk */ 753 + mix_psk(chaining_key, hash, key, preshared_key); 754 755 /* {} */ 756 if (!message_decrypt(NULL, src->encrypted_nothing, ··· 783 memzero_explicit(chaining_key, NOISE_HASH_LEN); 784 memzero_explicit(ephemeral_private, NOISE_PUBLIC_KEY_LEN); 785 memzero_explicit(static_private, NOISE_PUBLIC_KEY_LEN); 786 + memzero_explicit(preshared_key, NOISE_SYMMETRIC_KEY_LEN); 787 up_read(&wg->static_identity.lock); 788 return ret_peer; 789 }
+6 -8
drivers/net/wireguard/noise.h
··· 15 #include <linux/mutex.h> 16 #include <linux/kref.h> 17 18 - union noise_counter { 19 - struct { 20 - u64 counter; 21 - unsigned long backtrack[COUNTER_BITS_TOTAL / BITS_PER_LONG]; 22 - spinlock_t lock; 23 - } receive; 24 - atomic64_t counter; 25 }; 26 27 struct noise_symmetric_key { 28 u8 key[NOISE_SYMMETRIC_KEY_LEN]; 29 - union noise_counter counter; 30 u64 birthdate; 31 bool is_valid; 32 }; ··· 30 struct noise_keypair { 31 struct index_hashtable_entry entry; 32 struct noise_symmetric_key sending; 33 struct noise_symmetric_key receiving; 34 __le32 remote_index; 35 bool i_am_the_initiator; 36 struct kref refcount;
··· 15 #include <linux/mutex.h> 16 #include <linux/kref.h> 17 18 + struct noise_replay_counter { 19 + u64 counter; 20 + spinlock_t lock; 21 + unsigned long backtrack[COUNTER_BITS_TOTAL / BITS_PER_LONG]; 22 }; 23 24 struct noise_symmetric_key { 25 u8 key[NOISE_SYMMETRIC_KEY_LEN]; 26 u64 birthdate; 27 bool is_valid; 28 }; ··· 34 struct noise_keypair { 35 struct index_hashtable_entry entry; 36 struct noise_symmetric_key sending; 37 + atomic64_t sending_counter; 38 struct noise_symmetric_key receiving; 39 + struct noise_replay_counter receiving_counter; 40 __le32 remote_index; 41 bool i_am_the_initiator; 42 struct kref refcount;
+9 -1
drivers/net/wireguard/queueing.h
··· 87 return real_protocol && skb->protocol == real_protocol; 88 } 89 90 - static inline void wg_reset_packet(struct sk_buff *skb) 91 { 92 skb_scrub_packet(skb, true); 93 memset(&skb->headers_start, 0, 94 offsetof(struct sk_buff, headers_end) - 95 offsetof(struct sk_buff, headers_start)); 96 skb->queue_mapping = 0; 97 skb->nohdr = 0; 98 skb->peeked = 0;
··· 87 return real_protocol && skb->protocol == real_protocol; 88 } 89 90 + static inline void wg_reset_packet(struct sk_buff *skb, bool encapsulating) 91 { 92 + u8 l4_hash = skb->l4_hash; 93 + u8 sw_hash = skb->sw_hash; 94 + u32 hash = skb->hash; 95 skb_scrub_packet(skb, true); 96 memset(&skb->headers_start, 0, 97 offsetof(struct sk_buff, headers_end) - 98 offsetof(struct sk_buff, headers_start)); 99 + if (encapsulating) { 100 + skb->l4_hash = l4_hash; 101 + skb->sw_hash = sw_hash; 102 + skb->hash = hash; 103 + } 104 skb->queue_mapping = 0; 105 skb->nohdr = 0; 106 skb->peeked = 0;
+22 -22
drivers/net/wireguard/receive.c
··· 245 } 246 } 247 248 - static bool decrypt_packet(struct sk_buff *skb, struct noise_symmetric_key *key) 249 { 250 struct scatterlist sg[MAX_SKB_FRAGS + 8]; 251 struct sk_buff *trailer; 252 unsigned int offset; 253 int num_frags; 254 255 - if (unlikely(!key)) 256 return false; 257 258 - if (unlikely(!READ_ONCE(key->is_valid) || 259 - wg_birthdate_has_expired(key->birthdate, REJECT_AFTER_TIME) || 260 - key->counter.receive.counter >= REJECT_AFTER_MESSAGES)) { 261 - WRITE_ONCE(key->is_valid, false); 262 return false; 263 } 264 ··· 283 284 if (!chacha20poly1305_decrypt_sg_inplace(sg, skb->len, NULL, 0, 285 PACKET_CB(skb)->nonce, 286 - key->key)) 287 return false; 288 289 /* Another ugly situation of pushing and pulling the header so as to ··· 298 } 299 300 /* This is RFC6479, a replay detection bitmap algorithm that avoids bitshifts */ 301 - static bool counter_validate(union noise_counter *counter, u64 their_counter) 302 { 303 unsigned long index, index_current, top, i; 304 bool ret = false; 305 306 - spin_lock_bh(&counter->receive.lock); 307 308 - if (unlikely(counter->receive.counter >= REJECT_AFTER_MESSAGES + 1 || 309 their_counter >= REJECT_AFTER_MESSAGES)) 310 goto out; 311 312 ++their_counter; 313 314 if (unlikely((COUNTER_WINDOW_SIZE + their_counter) < 315 - counter->receive.counter)) 316 goto out; 317 318 index = their_counter >> ilog2(BITS_PER_LONG); 319 320 - if (likely(their_counter > counter->receive.counter)) { 321 - index_current = counter->receive.counter >> ilog2(BITS_PER_LONG); 322 top = min_t(unsigned long, index - index_current, 323 COUNTER_BITS_TOTAL / BITS_PER_LONG); 324 for (i = 1; i <= top; ++i) 325 - counter->receive.backtrack[(i + index_current) & 326 ((COUNTER_BITS_TOTAL / BITS_PER_LONG) - 1)] = 0; 327 - counter->receive.counter = their_counter; 328 } 329 330 index &= (COUNTER_BITS_TOTAL / BITS_PER_LONG) - 1; 331 ret = !test_and_set_bit(their_counter & (BITS_PER_LONG - 1), 332 - &counter->receive.backtrack[index]); 333 334 out: 335 - spin_unlock_bh(&counter->receive.lock); 336 return ret; 337 } 338 ··· 472 if (unlikely(state != PACKET_STATE_CRYPTED)) 473 goto next; 474 475 - if (unlikely(!counter_validate(&keypair->receiving.counter, 476 PACKET_CB(skb)->nonce))) { 477 net_dbg_ratelimited("%s: Packet has invalid nonce %llu (max %llu)\n", 478 peer->device->dev->name, 479 PACKET_CB(skb)->nonce, 480 - keypair->receiving.counter.receive.counter); 481 goto next; 482 } 483 484 if (unlikely(wg_socket_endpoint_from_skb(&endpoint, skb))) 485 goto next; 486 487 - wg_reset_packet(skb); 488 wg_packet_consume_data_done(peer, skb, &endpoint); 489 free = false; 490 ··· 511 struct sk_buff *skb; 512 513 while ((skb = ptr_ring_consume_bh(&queue->ring)) != NULL) { 514 - enum packet_state state = likely(decrypt_packet(skb, 515 - &PACKET_CB(skb)->keypair->receiving)) ? 516 PACKET_STATE_CRYPTED : PACKET_STATE_DEAD; 517 wg_queue_enqueue_per_peer_napi(skb, state); 518 if (need_resched())
··· 245 } 246 } 247 248 + static bool decrypt_packet(struct sk_buff *skb, struct noise_keypair *keypair) 249 { 250 struct scatterlist sg[MAX_SKB_FRAGS + 8]; 251 struct sk_buff *trailer; 252 unsigned int offset; 253 int num_frags; 254 255 + if (unlikely(!keypair)) 256 return false; 257 258 + if (unlikely(!READ_ONCE(keypair->receiving.is_valid) || 259 + wg_birthdate_has_expired(keypair->receiving.birthdate, REJECT_AFTER_TIME) || 260 + keypair->receiving_counter.counter >= REJECT_AFTER_MESSAGES)) { 261 + WRITE_ONCE(keypair->receiving.is_valid, false); 262 return false; 263 } 264 ··· 283 284 if (!chacha20poly1305_decrypt_sg_inplace(sg, skb->len, NULL, 0, 285 PACKET_CB(skb)->nonce, 286 + keypair->receiving.key)) 287 return false; 288 289 /* Another ugly situation of pushing and pulling the header so as to ··· 298 } 299 300 /* This is RFC6479, a replay detection bitmap algorithm that avoids bitshifts */ 301 + static bool counter_validate(struct noise_replay_counter *counter, u64 their_counter) 302 { 303 unsigned long index, index_current, top, i; 304 bool ret = false; 305 306 + spin_lock_bh(&counter->lock); 307 308 + if (unlikely(counter->counter >= REJECT_AFTER_MESSAGES + 1 || 309 their_counter >= REJECT_AFTER_MESSAGES)) 310 goto out; 311 312 ++their_counter; 313 314 if (unlikely((COUNTER_WINDOW_SIZE + their_counter) < 315 + counter->counter)) 316 goto out; 317 318 index = their_counter >> ilog2(BITS_PER_LONG); 319 320 + if (likely(their_counter > counter->counter)) { 321 + index_current = counter->counter >> ilog2(BITS_PER_LONG); 322 top = min_t(unsigned long, index - index_current, 323 COUNTER_BITS_TOTAL / BITS_PER_LONG); 324 for (i = 1; i <= top; ++i) 325 + counter->backtrack[(i + index_current) & 326 ((COUNTER_BITS_TOTAL / BITS_PER_LONG) - 1)] = 0; 327 + counter->counter = their_counter; 328 } 329 330 index &= (COUNTER_BITS_TOTAL / BITS_PER_LONG) - 1; 331 ret = !test_and_set_bit(their_counter & (BITS_PER_LONG - 1), 332 + &counter->backtrack[index]); 333 334 out: 335 + spin_unlock_bh(&counter->lock); 336 return ret; 337 } 338 ··· 472 if (unlikely(state != PACKET_STATE_CRYPTED)) 473 goto next; 474 475 + if (unlikely(!counter_validate(&keypair->receiving_counter, 476 PACKET_CB(skb)->nonce))) { 477 net_dbg_ratelimited("%s: Packet has invalid nonce %llu (max %llu)\n", 478 peer->device->dev->name, 479 PACKET_CB(skb)->nonce, 480 + keypair->receiving_counter.counter); 481 goto next; 482 } 483 484 if (unlikely(wg_socket_endpoint_from_skb(&endpoint, skb))) 485 goto next; 486 487 + wg_reset_packet(skb, false); 488 wg_packet_consume_data_done(peer, skb, &endpoint); 489 free = false; 490 ··· 511 struct sk_buff *skb; 512 513 while ((skb = ptr_ring_consume_bh(&queue->ring)) != NULL) { 514 + enum packet_state state = 515 + likely(decrypt_packet(skb, PACKET_CB(skb)->keypair)) ? 516 PACKET_STATE_CRYPTED : PACKET_STATE_DEAD; 517 wg_queue_enqueue_per_peer_napi(skb, state); 518 if (need_resched())
+12 -5
drivers/net/wireguard/selftest/counter.c
··· 6 #ifdef DEBUG 7 bool __init wg_packet_counter_selftest(void) 8 { 9 unsigned int test_num = 0, i; 10 - union noise_counter counter; 11 bool success = true; 12 13 - #define T_INIT do { \ 14 - memset(&counter, 0, sizeof(union noise_counter)); \ 15 - spin_lock_init(&counter.receive.lock); \ 16 } while (0) 17 #define T_LIM (COUNTER_WINDOW_SIZE + 1) 18 #define T(n, v) do { \ 19 ++test_num; \ 20 - if (counter_validate(&counter, n) != (v)) { \ 21 pr_err("nonce counter self-test %u: FAIL\n", \ 22 test_num); \ 23 success = false; \ ··· 105 106 if (success) 107 pr_info("nonce counter self-tests: pass\n"); 108 return success; 109 } 110 #endif
··· 6 #ifdef DEBUG 7 bool __init wg_packet_counter_selftest(void) 8 { 9 + struct noise_replay_counter *counter; 10 unsigned int test_num = 0, i; 11 bool success = true; 12 13 + counter = kmalloc(sizeof(*counter), GFP_KERNEL); 14 + if (unlikely(!counter)) { 15 + pr_err("nonce counter self-test malloc: FAIL\n"); 16 + return false; 17 + } 18 + 19 + #define T_INIT do { \ 20 + memset(counter, 0, sizeof(*counter)); \ 21 + spin_lock_init(&counter->lock); \ 22 } while (0) 23 #define T_LIM (COUNTER_WINDOW_SIZE + 1) 24 #define T(n, v) do { \ 25 ++test_num; \ 26 + if (counter_validate(counter, n) != (v)) { \ 27 pr_err("nonce counter self-test %u: FAIL\n", \ 28 test_num); \ 29 success = false; \ ··· 99 100 if (success) 101 pr_info("nonce counter self-tests: pass\n"); 102 + kfree(counter); 103 return success; 104 } 105 #endif
+11 -8
drivers/net/wireguard/send.c
··· 129 rcu_read_lock_bh(); 130 keypair = rcu_dereference_bh(peer->keypairs.current_keypair); 131 send = keypair && READ_ONCE(keypair->sending.is_valid) && 132 - (atomic64_read(&keypair->sending.counter.counter) > REKEY_AFTER_MESSAGES || 133 (keypair->i_am_the_initiator && 134 wg_birthdate_has_expired(keypair->sending.birthdate, REKEY_AFTER_TIME))); 135 rcu_read_unlock_bh(); ··· 166 struct message_data *header; 167 struct sk_buff *trailer; 168 int num_frags; 169 170 /* Calculate lengths. */ 171 padding_len = calculate_skb_padding(skb); ··· 300 skb_list_walk_safe(first, skb, next) { 301 if (likely(encrypt_packet(skb, 302 PACKET_CB(first)->keypair))) { 303 - wg_reset_packet(skb); 304 } else { 305 state = PACKET_STATE_DEAD; 306 break; ··· 349 350 void wg_packet_send_staged_packets(struct wg_peer *peer) 351 { 352 - struct noise_symmetric_key *key; 353 struct noise_keypair *keypair; 354 struct sk_buff_head packets; 355 struct sk_buff *skb; ··· 368 rcu_read_unlock_bh(); 369 if (unlikely(!keypair)) 370 goto out_nokey; 371 - key = &keypair->sending; 372 - if (unlikely(!READ_ONCE(key->is_valid))) 373 goto out_nokey; 374 - if (unlikely(wg_birthdate_has_expired(key->birthdate, 375 REJECT_AFTER_TIME))) 376 goto out_invalid; 377 ··· 385 */ 386 PACKET_CB(skb)->ds = ip_tunnel_ecn_encap(0, ip_hdr(skb), skb); 387 PACKET_CB(skb)->nonce = 388 - atomic64_inc_return(&key->counter.counter) - 1; 389 if (unlikely(PACKET_CB(skb)->nonce >= REJECT_AFTER_MESSAGES)) 390 goto out_invalid; 391 } ··· 397 return; 398 399 out_invalid: 400 - WRITE_ONCE(key->is_valid, false); 401 out_nokey: 402 wg_noise_keypair_put(keypair, false); 403
··· 129 rcu_read_lock_bh(); 130 keypair = rcu_dereference_bh(peer->keypairs.current_keypair); 131 send = keypair && READ_ONCE(keypair->sending.is_valid) && 132 + (atomic64_read(&keypair->sending_counter) > REKEY_AFTER_MESSAGES || 133 (keypair->i_am_the_initiator && 134 wg_birthdate_has_expired(keypair->sending.birthdate, REKEY_AFTER_TIME))); 135 rcu_read_unlock_bh(); ··· 166 struct message_data *header; 167 struct sk_buff *trailer; 168 int num_frags; 169 + 170 + /* Force hash calculation before encryption so that flow analysis is 171 + * consistent over the inner packet. 172 + */ 173 + skb_get_hash(skb); 174 175 /* Calculate lengths. */ 176 padding_len = calculate_skb_padding(skb); ··· 295 skb_list_walk_safe(first, skb, next) { 296 if (likely(encrypt_packet(skb, 297 PACKET_CB(first)->keypair))) { 298 + wg_reset_packet(skb, true); 299 } else { 300 state = PACKET_STATE_DEAD; 301 break; ··· 344 345 void wg_packet_send_staged_packets(struct wg_peer *peer) 346 { 347 struct noise_keypair *keypair; 348 struct sk_buff_head packets; 349 struct sk_buff *skb; ··· 364 rcu_read_unlock_bh(); 365 if (unlikely(!keypair)) 366 goto out_nokey; 367 + if (unlikely(!READ_ONCE(keypair->sending.is_valid))) 368 goto out_nokey; 369 + if (unlikely(wg_birthdate_has_expired(keypair->sending.birthdate, 370 REJECT_AFTER_TIME))) 371 goto out_invalid; 372 ··· 382 */ 383 PACKET_CB(skb)->ds = ip_tunnel_ecn_encap(0, ip_hdr(skb), skb); 384 PACKET_CB(skb)->nonce = 385 + atomic64_inc_return(&keypair->sending_counter) - 1; 386 if (unlikely(PACKET_CB(skb)->nonce >= REJECT_AFTER_MESSAGES)) 387 goto out_invalid; 388 } ··· 394 return; 395 396 out_invalid: 397 + WRITE_ONCE(keypair->sending.is_valid, false); 398 out_nokey: 399 wg_noise_keypair_put(keypair, false); 400
+4
drivers/net/wireless/intel/iwlwifi/pcie/drv.c
··· 1092 iwl_trans->cfg = &iwl_ax101_cfg_quz_hr; 1093 else if (iwl_trans->cfg == &iwl_ax201_cfg_qu_hr) 1094 iwl_trans->cfg = &iwl_ax201_cfg_quz_hr; 1095 } 1096 1097 #endif
··· 1092 iwl_trans->cfg = &iwl_ax101_cfg_quz_hr; 1093 else if (iwl_trans->cfg == &iwl_ax201_cfg_qu_hr) 1094 iwl_trans->cfg = &iwl_ax201_cfg_quz_hr; 1095 + else if (iwl_trans->cfg == &killer1650s_2ax_cfg_qu_b0_hr_b0) 1096 + iwl_trans->cfg = &iwl_ax1650s_cfg_quz_hr; 1097 + else if (iwl_trans->cfg == &killer1650i_2ax_cfg_qu_b0_hr_b0) 1098 + iwl_trans->cfg = &iwl_ax1650i_cfg_quz_hr; 1099 } 1100 1101 #endif
+5 -13
fs/afs/fs_probe.c
··· 32 struct afs_server *server = call->server; 33 unsigned int server_index = call->server_index; 34 unsigned int index = call->addr_ix; 35 - unsigned int rtt = UINT_MAX; 36 bool have_result = false; 37 - u64 _rtt; 38 int ret = call->error; 39 40 _enter("%pU,%u", &server->uuid, index); ··· 92 } 93 } 94 95 - /* Get the RTT and scale it to fit into a 32-bit value that represents 96 - * over a minute of time so that we can access it with one instruction 97 - * on a 32-bit system. 98 - */ 99 - _rtt = rxrpc_kernel_get_rtt(call->net->socket, call->rxcall); 100 - _rtt /= 64; 101 - rtt = (_rtt > UINT_MAX) ? UINT_MAX : _rtt; 102 - if (rtt < server->probe.rtt) { 103 - server->probe.rtt = rtt; 104 alist->preferred = index; 105 have_result = true; 106 } ··· 106 spin_unlock(&server->probe_lock); 107 108 _debug("probe [%u][%u] %pISpc rtt=%u ret=%d", 109 - server_index, index, &alist->addrs[index].transport, 110 - (unsigned int)rtt, ret); 111 112 have_result |= afs_fs_probe_done(server); 113 if (have_result)
··· 32 struct afs_server *server = call->server; 33 unsigned int server_index = call->server_index; 34 unsigned int index = call->addr_ix; 35 + unsigned int rtt_us = 0; 36 bool have_result = false; 37 int ret = call->error; 38 39 _enter("%pU,%u", &server->uuid, index); ··· 93 } 94 } 95 96 + rtt_us = rxrpc_kernel_get_srtt(call->net->socket, call->rxcall); 97 + if (rtt_us < server->probe.rtt) { 98 + server->probe.rtt = rtt_us; 99 alist->preferred = index; 100 have_result = true; 101 } ··· 113 spin_unlock(&server->probe_lock); 114 115 _debug("probe [%u][%u] %pISpc rtt=%u ret=%d", 116 + server_index, index, &alist->addrs[index].transport, rtt_us, ret); 117 118 have_result |= afs_fs_probe_done(server); 119 if (have_result)
+5 -13
fs/afs/vl_probe.c
··· 31 struct afs_addr_list *alist = call->alist; 32 struct afs_vlserver *server = call->vlserver; 33 unsigned int server_index = call->server_index; 34 unsigned int index = call->addr_ix; 35 - unsigned int rtt = UINT_MAX; 36 bool have_result = false; 37 - u64 _rtt; 38 int ret = call->error; 39 40 _enter("%s,%u,%u,%d,%d", server->name, server_index, index, ret, call->abort_code); ··· 92 } 93 } 94 95 - /* Get the RTT and scale it to fit into a 32-bit value that represents 96 - * over a minute of time so that we can access it with one instruction 97 - * on a 32-bit system. 98 - */ 99 - _rtt = rxrpc_kernel_get_rtt(call->net->socket, call->rxcall); 100 - _rtt /= 64; 101 - rtt = (_rtt > UINT_MAX) ? UINT_MAX : _rtt; 102 - if (rtt < server->probe.rtt) { 103 - server->probe.rtt = rtt; 104 alist->preferred = index; 105 have_result = true; 106 } ··· 106 spin_unlock(&server->probe_lock); 107 108 _debug("probe [%u][%u] %pISpc rtt=%u ret=%d", 109 - server_index, index, &alist->addrs[index].transport, 110 - (unsigned int)rtt, ret); 111 112 have_result |= afs_vl_probe_done(server); 113 if (have_result) {
··· 31 struct afs_addr_list *alist = call->alist; 32 struct afs_vlserver *server = call->vlserver; 33 unsigned int server_index = call->server_index; 34 + unsigned int rtt_us = 0; 35 unsigned int index = call->addr_ix; 36 bool have_result = false; 37 int ret = call->error; 38 39 _enter("%s,%u,%u,%d,%d", server->name, server_index, index, ret, call->abort_code); ··· 93 } 94 } 95 96 + rtt_us = rxrpc_kernel_get_srtt(call->net->socket, call->rxcall); 97 + if (rtt_us < server->probe.rtt) { 98 + server->probe.rtt = rtt_us; 99 alist->preferred = index; 100 have_result = true; 101 } ··· 113 spin_unlock(&server->probe_lock); 114 115 _debug("probe [%u][%u] %pISpc rtt=%u ret=%d", 116 + server_index, index, &alist->addrs[index].transport, rtt_us, ret); 117 118 have_result |= afs_vl_probe_done(server); 119 if (have_result) {
+16
include/linux/mlx5/driver.h
··· 213 MLX5_PORT_DOWN = 2, 214 }; 215 216 struct mlx5_cmd_first { 217 __be32 data[4]; 218 }; ··· 264 struct mlx5_cmd { 265 struct mlx5_nb nb; 266 267 void *cmd_alloc_buf; 268 dma_addr_t alloc_dma; 269 int alloc_size; ··· 291 struct semaphore sem; 292 struct semaphore pages_sem; 293 int mode; 294 struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS]; 295 struct dma_pool *pool; 296 struct mlx5_cmd_debug dbg; ··· 751 struct delayed_work cb_timeout_work; 752 void *context; 753 int idx; 754 struct completion done; 755 struct mlx5_cmd *cmd; 756 struct work_struct work; ··· 883 return min_t(u32, last_frag_stride_idx - fbc->strides_offset, fbc->sz_m1); 884 } 885 886 int mlx5_cmd_init(struct mlx5_core_dev *dev); 887 void mlx5_cmd_cleanup(struct mlx5_core_dev *dev); 888 void mlx5_cmd_use_events(struct mlx5_core_dev *dev); 889 void mlx5_cmd_use_polling(struct mlx5_core_dev *dev); 890 891 struct mlx5_async_ctx { 892 struct mlx5_core_dev *dev;
··· 213 MLX5_PORT_DOWN = 2, 214 }; 215 216 + enum mlx5_cmdif_state { 217 + MLX5_CMDIF_STATE_UNINITIALIZED, 218 + MLX5_CMDIF_STATE_UP, 219 + MLX5_CMDIF_STATE_DOWN, 220 + }; 221 + 222 struct mlx5_cmd_first { 223 __be32 data[4]; 224 }; ··· 258 struct mlx5_cmd { 259 struct mlx5_nb nb; 260 261 + enum mlx5_cmdif_state state; 262 void *cmd_alloc_buf; 263 dma_addr_t alloc_dma; 264 int alloc_size; ··· 284 struct semaphore sem; 285 struct semaphore pages_sem; 286 int mode; 287 + u16 allowed_opcode; 288 struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS]; 289 struct dma_pool *pool; 290 struct mlx5_cmd_debug dbg; ··· 743 struct delayed_work cb_timeout_work; 744 void *context; 745 int idx; 746 + struct completion handling; 747 struct completion done; 748 struct mlx5_cmd *cmd; 749 struct work_struct work; ··· 874 return min_t(u32, last_frag_stride_idx - fbc->strides_offset, fbc->sz_m1); 875 } 876 877 + enum { 878 + CMD_ALLOWED_OPCODE_ALL, 879 + }; 880 + 881 int mlx5_cmd_init(struct mlx5_core_dev *dev); 882 void mlx5_cmd_cleanup(struct mlx5_core_dev *dev); 883 + void mlx5_cmd_set_state(struct mlx5_core_dev *dev, 884 + enum mlx5_cmdif_state cmdif_state); 885 void mlx5_cmd_use_events(struct mlx5_core_dev *dev); 886 void mlx5_cmd_use_polling(struct mlx5_core_dev *dev); 887 + void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode); 888 889 struct mlx5_async_ctx { 890 struct mlx5_core_dev *dev;
+2 -1
include/net/act_api.h
··· 75 { 76 dtm->install = jiffies_to_clock_t(jiffies - stm->install); 77 dtm->lastuse = jiffies_to_clock_t(jiffies - stm->lastuse); 78 - dtm->firstuse = jiffies_to_clock_t(jiffies - stm->firstuse); 79 dtm->expires = jiffies_to_clock_t(stm->expires); 80 } 81
··· 75 { 76 dtm->install = jiffies_to_clock_t(jiffies - stm->install); 77 dtm->lastuse = jiffies_to_clock_t(jiffies - stm->lastuse); 78 + dtm->firstuse = stm->firstuse ? 79 + jiffies_to_clock_t(jiffies - stm->firstuse) : 0; 80 dtm->expires = jiffies_to_clock_t(stm->expires); 81 } 82
+1 -1
include/net/af_rxrpc.h
··· 59 void rxrpc_kernel_end_call(struct socket *, struct rxrpc_call *); 60 void rxrpc_kernel_get_peer(struct socket *, struct rxrpc_call *, 61 struct sockaddr_rxrpc *); 62 - u64 rxrpc_kernel_get_rtt(struct socket *, struct rxrpc_call *); 63 int rxrpc_kernel_charge_accept(struct socket *, rxrpc_notify_rx_t, 64 rxrpc_user_attach_call_t, unsigned long, gfp_t, 65 unsigned int);
··· 59 void rxrpc_kernel_end_call(struct socket *, struct rxrpc_call *); 60 void rxrpc_kernel_get_peer(struct socket *, struct rxrpc_call *, 61 struct sockaddr_rxrpc *); 62 + u32 rxrpc_kernel_get_srtt(struct socket *, struct rxrpc_call *); 63 int rxrpc_kernel_charge_accept(struct socket *, rxrpc_notify_rx_t, 64 rxrpc_user_attach_call_t, unsigned long, gfp_t, 65 unsigned int);
-1
include/net/ip_fib.h
··· 257 u32 table_id; 258 /* filter_set is an optimization that an entry is set */ 259 bool filter_set; 260 - bool dump_all_families; 261 bool dump_routes; 262 bool dump_exceptions; 263 unsigned char protocol;
··· 257 u32 table_id; 258 /* filter_set is an optimization that an entry is set */ 259 bool filter_set; 260 bool dump_routes; 261 bool dump_exceptions; 262 unsigned char protocol;
+42 -10
include/trace/events/rxrpc.h
··· 1112 TRACE_EVENT(rxrpc_rtt_rx, 1113 TP_PROTO(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why, 1114 rxrpc_serial_t send_serial, rxrpc_serial_t resp_serial, 1115 - s64 rtt, u8 nr, s64 avg), 1116 1117 - TP_ARGS(call, why, send_serial, resp_serial, rtt, nr, avg), 1118 1119 TP_STRUCT__entry( 1120 __field(unsigned int, call ) 1121 __field(enum rxrpc_rtt_rx_trace, why ) 1122 - __field(u8, nr ) 1123 __field(rxrpc_serial_t, send_serial ) 1124 __field(rxrpc_serial_t, resp_serial ) 1125 - __field(s64, rtt ) 1126 - __field(u64, avg ) 1127 ), 1128 1129 TP_fast_assign( ··· 1131 __entry->send_serial = send_serial; 1132 __entry->resp_serial = resp_serial; 1133 __entry->rtt = rtt; 1134 - __entry->nr = nr; 1135 - __entry->avg = avg; 1136 ), 1137 1138 - TP_printk("c=%08x %s sr=%08x rr=%08x rtt=%lld nr=%u avg=%lld", 1139 __entry->call, 1140 __print_symbolic(__entry->why, rxrpc_rtt_rx_traces), 1141 __entry->send_serial, 1142 __entry->resp_serial, 1143 __entry->rtt, 1144 - __entry->nr, 1145 - __entry->avg) 1146 ); 1147 1148 TRACE_EVENT(rxrpc_timer, ··· 1539 TP_printk("c=%08x r=%08x", 1540 __entry->debug_id, 1541 __entry->serial) 1542 ); 1543 1544 #endif /* _TRACE_RXRPC_H */
··· 1112 TRACE_EVENT(rxrpc_rtt_rx, 1113 TP_PROTO(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why, 1114 rxrpc_serial_t send_serial, rxrpc_serial_t resp_serial, 1115 + u32 rtt, u32 rto), 1116 1117 + TP_ARGS(call, why, send_serial, resp_serial, rtt, rto), 1118 1119 TP_STRUCT__entry( 1120 __field(unsigned int, call ) 1121 __field(enum rxrpc_rtt_rx_trace, why ) 1122 __field(rxrpc_serial_t, send_serial ) 1123 __field(rxrpc_serial_t, resp_serial ) 1124 + __field(u32, rtt ) 1125 + __field(u32, rto ) 1126 ), 1127 1128 TP_fast_assign( ··· 1132 __entry->send_serial = send_serial; 1133 __entry->resp_serial = resp_serial; 1134 __entry->rtt = rtt; 1135 + __entry->rto = rto; 1136 ), 1137 1138 + TP_printk("c=%08x %s sr=%08x rr=%08x rtt=%u rto=%u", 1139 __entry->call, 1140 __print_symbolic(__entry->why, rxrpc_rtt_rx_traces), 1141 __entry->send_serial, 1142 __entry->resp_serial, 1143 __entry->rtt, 1144 + __entry->rto) 1145 ); 1146 1147 TRACE_EVENT(rxrpc_timer, ··· 1542 TP_printk("c=%08x r=%08x", 1543 __entry->debug_id, 1544 __entry->serial) 1545 + ); 1546 + 1547 + TRACE_EVENT(rxrpc_rx_discard_ack, 1548 + TP_PROTO(unsigned int debug_id, rxrpc_serial_t serial, 1549 + rxrpc_seq_t first_soft_ack, rxrpc_seq_t call_ackr_first, 1550 + rxrpc_seq_t prev_pkt, rxrpc_seq_t call_ackr_prev), 1551 + 1552 + TP_ARGS(debug_id, serial, first_soft_ack, call_ackr_first, 1553 + prev_pkt, call_ackr_prev), 1554 + 1555 + TP_STRUCT__entry( 1556 + __field(unsigned int, debug_id ) 1557 + __field(rxrpc_serial_t, serial ) 1558 + __field(rxrpc_seq_t, first_soft_ack) 1559 + __field(rxrpc_seq_t, call_ackr_first) 1560 + __field(rxrpc_seq_t, prev_pkt) 1561 + __field(rxrpc_seq_t, call_ackr_prev) 1562 + ), 1563 + 1564 + TP_fast_assign( 1565 + __entry->debug_id = debug_id; 1566 + __entry->serial = serial; 1567 + __entry->first_soft_ack = first_soft_ack; 1568 + __entry->call_ackr_first = call_ackr_first; 1569 + __entry->prev_pkt = prev_pkt; 1570 + __entry->call_ackr_prev = call_ackr_prev; 1571 + ), 1572 + 1573 + TP_printk("c=%08x r=%08x %08x<%08x %08x<%08x", 1574 + __entry->debug_id, 1575 + __entry->serial, 1576 + __entry->first_soft_ack, 1577 + __entry->call_ackr_first, 1578 + __entry->prev_pkt, 1579 + __entry->call_ackr_prev) 1580 ); 1581 1582 #endif /* _TRACE_RXRPC_H */
+14 -3
kernel/bpf/syscall.c
··· 623 624 mutex_lock(&map->freeze_mutex); 625 626 - if ((vma->vm_flags & VM_WRITE) && map->frozen) { 627 - err = -EPERM; 628 - goto out; 629 } 630 631 /* set default open/close callbacks */
··· 623 624 mutex_lock(&map->freeze_mutex); 625 626 + if (vma->vm_flags & VM_WRITE) { 627 + if (map->frozen) { 628 + err = -EPERM; 629 + goto out; 630 + } 631 + /* map is meant to be read-only, so do not allow mapping as 632 + * writable, because it's possible to leak a writable page 633 + * reference and allows user-space to still modify it after 634 + * freezing, while verifier will assume contents do not change 635 + */ 636 + if (map->map_flags & BPF_F_RDONLY_PROG) { 637 + err = -EACCES; 638 + goto out; 639 + } 640 } 641 642 /* set default open/close callbacks */
+4 -2
net/ax25/af_ax25.c
··· 635 break; 636 637 case SO_BINDTODEVICE: 638 - if (optlen > IFNAMSIZ) 639 - optlen = IFNAMSIZ; 640 641 if (copy_from_user(devname, optval, optlen)) { 642 res = -EFAULT;
··· 635 break; 636 637 case SO_BINDTODEVICE: 638 + if (optlen > IFNAMSIZ - 1) 639 + optlen = IFNAMSIZ - 1; 640 + 641 + memset(devname, 0, sizeof(devname)); 642 643 if (copy_from_user(devname, optval, optlen)) { 644 res = -EFAULT;
+15 -5
net/core/dev.c
··· 4988 return 0; 4989 } 4990 4991 - static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc, 4992 struct packet_type **ppt_prev) 4993 { 4994 struct packet_type *ptype, *pt_prev; 4995 rx_handler_func_t *rx_handler; 4996 struct net_device *orig_dev; 4997 bool deliver_exact = false; 4998 int ret = NET_RX_DROP; ··· 5024 ret2 = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb); 5025 preempt_enable(); 5026 5027 - if (ret2 != XDP_PASS) 5028 - return NET_RX_DROP; 5029 skb_reset_mac_len(skb); 5030 } 5031 ··· 5177 } 5178 5179 out: 5180 return ret; 5181 } 5182 ··· 5193 struct packet_type *pt_prev = NULL; 5194 int ret; 5195 5196 - ret = __netif_receive_skb_core(skb, pfmemalloc, &pt_prev); 5197 if (pt_prev) 5198 ret = INDIRECT_CALL_INET(pt_prev->func, ipv6_rcv, ip_rcv, skb, 5199 skb->dev, pt_prev, orig_dev); ··· 5271 struct packet_type *pt_prev = NULL; 5272 5273 skb_list_del_init(skb); 5274 - __netif_receive_skb_core(skb, pfmemalloc, &pt_prev); 5275 if (!pt_prev) 5276 continue; 5277 if (pt_curr != pt_prev || od_curr != orig_dev) {
··· 4988 return 0; 4989 } 4990 4991 + static int __netif_receive_skb_core(struct sk_buff **pskb, bool pfmemalloc, 4992 struct packet_type **ppt_prev) 4993 { 4994 struct packet_type *ptype, *pt_prev; 4995 rx_handler_func_t *rx_handler; 4996 + struct sk_buff *skb = *pskb; 4997 struct net_device *orig_dev; 4998 bool deliver_exact = false; 4999 int ret = NET_RX_DROP; ··· 5023 ret2 = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb); 5024 preempt_enable(); 5025 5026 + if (ret2 != XDP_PASS) { 5027 + ret = NET_RX_DROP; 5028 + goto out; 5029 + } 5030 skb_reset_mac_len(skb); 5031 } 5032 ··· 5174 } 5175 5176 out: 5177 + /* The invariant here is that if *ppt_prev is not NULL 5178 + * then skb should also be non-NULL. 5179 + * 5180 + * Apparently *ppt_prev assignment above holds this invariant due to 5181 + * skb dereferencing near it. 5182 + */ 5183 + *pskb = skb; 5184 return ret; 5185 } 5186 ··· 5183 struct packet_type *pt_prev = NULL; 5184 int ret; 5185 5186 + ret = __netif_receive_skb_core(&skb, pfmemalloc, &pt_prev); 5187 if (pt_prev) 5188 ret = INDIRECT_CALL_INET(pt_prev->func, ipv6_rcv, ip_rcv, skb, 5189 skb->dev, pt_prev, orig_dev); ··· 5261 struct packet_type *pt_prev = NULL; 5262 5263 skb_list_del_init(skb); 5264 + __netif_receive_skb_core(&skb, pfmemalloc, &pt_prev); 5265 if (!pt_prev) 5266 continue; 5267 if (pt_curr != pt_prev || od_curr != orig_dev) {
+21 -5
net/core/flow_dissector.c
··· 160 return ret; 161 } 162 163 - int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr) 164 { 165 struct bpf_prog *attached; 166 - struct net *net; 167 168 - net = current->nsproxy->net_ns; 169 mutex_lock(&flow_dissector_mutex); 170 attached = rcu_dereference_protected(net->flow_dissector_prog, 171 lockdep_is_held(&flow_dissector_mutex)); ··· 176 mutex_unlock(&flow_dissector_mutex); 177 return 0; 178 } 179 180 /** 181 * __skb_flow_get_ports - extract the upper layer ports and return them ··· 1852 skb_flow_dissector_init(&flow_keys_basic_dissector, 1853 flow_keys_basic_dissector_keys, 1854 ARRAY_SIZE(flow_keys_basic_dissector_keys)); 1855 - return 0; 1856 - } 1857 1858 core_initcall(init_default_flow_dissectors);
··· 160 return ret; 161 } 162 163 + static int flow_dissector_bpf_prog_detach(struct net *net) 164 { 165 struct bpf_prog *attached; 166 167 mutex_lock(&flow_dissector_mutex); 168 attached = rcu_dereference_protected(net->flow_dissector_prog, 169 lockdep_is_held(&flow_dissector_mutex)); ··· 178 mutex_unlock(&flow_dissector_mutex); 179 return 0; 180 } 181 + 182 + int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr) 183 + { 184 + return flow_dissector_bpf_prog_detach(current->nsproxy->net_ns); 185 + } 186 + 187 + static void __net_exit flow_dissector_pernet_pre_exit(struct net *net) 188 + { 189 + /* We're not racing with attach/detach because there are no 190 + * references to netns left when pre_exit gets called. 191 + */ 192 + if (rcu_access_pointer(net->flow_dissector_prog)) 193 + flow_dissector_bpf_prog_detach(net); 194 + } 195 + 196 + static struct pernet_operations flow_dissector_pernet_ops __net_initdata = { 197 + .pre_exit = flow_dissector_pernet_pre_exit, 198 + }; 199 200 /** 201 * __skb_flow_get_ports - extract the upper layer ports and return them ··· 1836 skb_flow_dissector_init(&flow_keys_basic_dissector, 1837 flow_keys_basic_dissector_keys, 1838 ARRAY_SIZE(flow_keys_basic_dissector_keys)); 1839 1840 + return register_pernet_subsys(&flow_dissector_pernet_ops); 1841 + } 1842 core_initcall(init_default_flow_dissectors);
+15
net/dsa/tag_mtk.c
··· 15 #define MTK_HDR_XMIT_TAGGED_TPID_8100 1 16 #define MTK_HDR_RECV_SOURCE_PORT_MASK GENMASK(2, 0) 17 #define MTK_HDR_XMIT_DP_BIT_MASK GENMASK(5, 0) 18 19 static struct sk_buff *mtk_tag_xmit(struct sk_buff *skb, 20 struct net_device *dev) ··· 23 struct dsa_port *dp = dsa_slave_to_port(dev); 24 u8 *mtk_tag; 25 bool is_vlan_skb = true; 26 27 /* Build the special tag after the MAC Source Address. If VLAN header 28 * is present, it's required that VLAN header and special tag is ··· 51 MTK_HDR_XMIT_UNTAGGED; 52 mtk_tag[1] = (1 << dp->index) & MTK_HDR_XMIT_DP_BIT_MASK; 53 54 /* Tag control information is kept for 802.1Q */ 55 if (!is_vlan_skb) { 56 mtk_tag[2] = 0; ··· 69 { 70 int port; 71 __be16 *phdr, hdr; 72 73 if (unlikely(!pskb_may_pull(skb, MTK_HDR_LEN))) 74 return NULL; ··· 96 skb->dev = dsa_master_find_slave(dev, 0, port); 97 if (!skb->dev) 98 return NULL; 99 100 return skb; 101 }
··· 15 #define MTK_HDR_XMIT_TAGGED_TPID_8100 1 16 #define MTK_HDR_RECV_SOURCE_PORT_MASK GENMASK(2, 0) 17 #define MTK_HDR_XMIT_DP_BIT_MASK GENMASK(5, 0) 18 + #define MTK_HDR_XMIT_SA_DIS BIT(6) 19 20 static struct sk_buff *mtk_tag_xmit(struct sk_buff *skb, 21 struct net_device *dev) ··· 22 struct dsa_port *dp = dsa_slave_to_port(dev); 23 u8 *mtk_tag; 24 bool is_vlan_skb = true; 25 + unsigned char *dest = eth_hdr(skb)->h_dest; 26 + bool is_multicast_skb = is_multicast_ether_addr(dest) && 27 + !is_broadcast_ether_addr(dest); 28 29 /* Build the special tag after the MAC Source Address. If VLAN header 30 * is present, it's required that VLAN header and special tag is ··· 47 MTK_HDR_XMIT_UNTAGGED; 48 mtk_tag[1] = (1 << dp->index) & MTK_HDR_XMIT_DP_BIT_MASK; 49 50 + /* Disable SA learning for multicast frames */ 51 + if (unlikely(is_multicast_skb)) 52 + mtk_tag[1] |= MTK_HDR_XMIT_SA_DIS; 53 + 54 /* Tag control information is kept for 802.1Q */ 55 if (!is_vlan_skb) { 56 mtk_tag[2] = 0; ··· 61 { 62 int port; 63 __be16 *phdr, hdr; 64 + unsigned char *dest = eth_hdr(skb)->h_dest; 65 + bool is_multicast_skb = is_multicast_ether_addr(dest) && 66 + !is_broadcast_ether_addr(dest); 67 68 if (unlikely(!pskb_may_pull(skb, MTK_HDR_LEN))) 69 return NULL; ··· 85 skb->dev = dsa_master_find_slave(dev, 0, port); 86 if (!skb->dev) 87 return NULL; 88 + 89 + /* Only unicast or broadcast frames are offloaded */ 90 + if (likely(!is_multicast_skb)) 91 + skb->offload_fwd_mark = 1; 92 93 return skb; 94 }
+2 -2
net/ethtool/netlink.c
··· 342 ret = ops->reply_size(req_info, reply_data); 343 if (ret < 0) 344 goto err_cleanup; 345 - reply_len = ret; 346 ret = -ENOMEM; 347 rskb = ethnl_reply_init(reply_len, req_info->dev, ops->reply_cmd, 348 ops->hdr_attr, info, &reply_payload); ··· 588 ret = ops->reply_size(req_info, reply_data); 589 if (ret < 0) 590 goto err_cleanup; 591 - reply_len = ret; 592 ret = -ENOMEM; 593 skb = genlmsg_new(reply_len, GFP_KERNEL); 594 if (!skb)
··· 342 ret = ops->reply_size(req_info, reply_data); 343 if (ret < 0) 344 goto err_cleanup; 345 + reply_len = ret + ethnl_reply_header_size(); 346 ret = -ENOMEM; 347 rskb = ethnl_reply_init(reply_len, req_info->dev, ops->reply_cmd, 348 ops->hdr_attr, info, &reply_payload); ··· 588 ret = ops->reply_size(req_info, reply_data); 589 if (ret < 0) 590 goto err_cleanup; 591 + reply_len = ret + ethnl_reply_header_size(); 592 ret = -ENOMEM; 593 skb = genlmsg_new(reply_len, GFP_KERNEL); 594 if (!skb)
-1
net/ethtool/strset.c
··· 324 int len = 0; 325 int ret; 326 327 - len += ethnl_reply_header_size(); 328 for (i = 0; i < ETH_SS_COUNT; i++) { 329 const struct strset_info *set_info = &data->sets[i]; 330
··· 324 int len = 0; 325 int ret; 326 327 for (i = 0; i < ETH_SS_COUNT; i++) { 328 const struct strset_info *set_info = &data->sets[i]; 329
+1 -2
net/ipv4/fib_frontend.c
··· 918 else 919 filter->dump_exceptions = false; 920 921 - filter->dump_all_families = (rtm->rtm_family == AF_UNSPEC); 922 filter->flags = rtm->rtm_flags; 923 filter->protocol = rtm->rtm_protocol; 924 filter->rt_type = rtm->rtm_type; ··· 989 if (filter.table_id) { 990 tb = fib_get_table(net, filter.table_id); 991 if (!tb) { 992 - if (filter.dump_all_families) 993 return skb->len; 994 995 NL_SET_ERR_MSG(cb->extack, "ipv4: FIB table does not exist");
··· 918 else 919 filter->dump_exceptions = false; 920 921 filter->flags = rtm->rtm_flags; 922 filter->protocol = rtm->rtm_protocol; 923 filter->rt_type = rtm->rtm_type; ··· 990 if (filter.table_id) { 991 tb = fib_get_table(net, filter.table_id); 992 if (!tb) { 993 + if (rtnl_msg_family(cb->nlh) != PF_INET) 994 return skb->len; 995 996 NL_SET_ERR_MSG(cb->extack, "ipv4: FIB table does not exist");
+24 -19
net/ipv4/inet_connection_sock.c
··· 24 #include <net/addrconf.h> 25 26 #if IS_ENABLED(CONFIG_IPV6) 27 - /* match_wildcard == true: IPV6_ADDR_ANY equals to any IPv6 addresses if IPv6 28 - * only, and any IPv4 addresses if not IPv6 only 29 - * match_wildcard == false: addresses must be exactly the same, i.e. 30 - * IPV6_ADDR_ANY only equals to IPV6_ADDR_ANY, 31 - * and 0.0.0.0 equals to 0.0.0.0 only 32 */ 33 static bool ipv6_rcv_saddr_equal(const struct in6_addr *sk1_rcv_saddr6, 34 const struct in6_addr *sk2_rcv_saddr6, 35 __be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr, 36 bool sk1_ipv6only, bool sk2_ipv6only, 37 - bool match_wildcard) 38 { 39 int addr_type = ipv6_addr_type(sk1_rcv_saddr6); 40 int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED; ··· 46 if (!sk2_ipv6only) { 47 if (sk1_rcv_saddr == sk2_rcv_saddr) 48 return true; 49 - if (!sk1_rcv_saddr || !sk2_rcv_saddr) 50 - return match_wildcard; 51 } 52 return false; 53 } ··· 55 if (addr_type == IPV6_ADDR_ANY && addr_type2 == IPV6_ADDR_ANY) 56 return true; 57 58 - if (addr_type2 == IPV6_ADDR_ANY && match_wildcard && 59 !(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED)) 60 return true; 61 62 - if (addr_type == IPV6_ADDR_ANY && match_wildcard && 63 !(sk1_ipv6only && addr_type2 == IPV6_ADDR_MAPPED)) 64 return true; 65 ··· 71 } 72 #endif 73 74 - /* match_wildcard == true: 0.0.0.0 equals to any IPv4 addresses 75 - * match_wildcard == false: addresses must be exactly the same, i.e. 76 - * 0.0.0.0 only equals to 0.0.0.0 77 */ 78 static bool ipv4_rcv_saddr_equal(__be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr, 79 - bool sk2_ipv6only, bool match_wildcard) 80 { 81 if (!sk2_ipv6only) { 82 if (sk1_rcv_saddr == sk2_rcv_saddr) 83 return true; 84 - if (!sk1_rcv_saddr || !sk2_rcv_saddr) 85 - return match_wildcard; 86 } 87 return false; 88 } ··· 99 sk2->sk_rcv_saddr, 100 ipv6_only_sock(sk), 101 ipv6_only_sock(sk2), 102 match_wildcard); 103 #endif 104 return ipv4_rcv_saddr_equal(sk->sk_rcv_saddr, sk2->sk_rcv_saddr, 105 - ipv6_only_sock(sk2), match_wildcard); 106 } 107 EXPORT_SYMBOL(inet_rcv_saddr_equal); 108 ··· 290 tb->fast_rcv_saddr, 291 sk->sk_rcv_saddr, 292 tb->fast_ipv6_only, 293 - ipv6_only_sock(sk), true); 294 #endif 295 return ipv4_rcv_saddr_equal(tb->fast_rcv_saddr, sk->sk_rcv_saddr, 296 - ipv6_only_sock(sk), true); 297 } 298 299 /* Obtain a reference to a local port for the given sock,
··· 24 #include <net/addrconf.h> 25 26 #if IS_ENABLED(CONFIG_IPV6) 27 + /* match_sk*_wildcard == true: IPV6_ADDR_ANY equals to any IPv6 addresses 28 + * if IPv6 only, and any IPv4 addresses 29 + * if not IPv6 only 30 + * match_sk*_wildcard == false: addresses must be exactly the same, i.e. 31 + * IPV6_ADDR_ANY only equals to IPV6_ADDR_ANY, 32 + * and 0.0.0.0 equals to 0.0.0.0 only 33 */ 34 static bool ipv6_rcv_saddr_equal(const struct in6_addr *sk1_rcv_saddr6, 35 const struct in6_addr *sk2_rcv_saddr6, 36 __be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr, 37 bool sk1_ipv6only, bool sk2_ipv6only, 38 + bool match_sk1_wildcard, 39 + bool match_sk2_wildcard) 40 { 41 int addr_type = ipv6_addr_type(sk1_rcv_saddr6); 42 int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED; ··· 44 if (!sk2_ipv6only) { 45 if (sk1_rcv_saddr == sk2_rcv_saddr) 46 return true; 47 + return (match_sk1_wildcard && !sk1_rcv_saddr) || 48 + (match_sk2_wildcard && !sk2_rcv_saddr); 49 } 50 return false; 51 } ··· 53 if (addr_type == IPV6_ADDR_ANY && addr_type2 == IPV6_ADDR_ANY) 54 return true; 55 56 + if (addr_type2 == IPV6_ADDR_ANY && match_sk2_wildcard && 57 !(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED)) 58 return true; 59 60 + if (addr_type == IPV6_ADDR_ANY && match_sk1_wildcard && 61 !(sk1_ipv6only && addr_type2 == IPV6_ADDR_MAPPED)) 62 return true; 63 ··· 69 } 70 #endif 71 72 + /* match_sk*_wildcard == true: 0.0.0.0 equals to any IPv4 addresses 73 + * match_sk*_wildcard == false: addresses must be exactly the same, i.e. 74 + * 0.0.0.0 only equals to 0.0.0.0 75 */ 76 static bool ipv4_rcv_saddr_equal(__be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr, 77 + bool sk2_ipv6only, bool match_sk1_wildcard, 78 + bool match_sk2_wildcard) 79 { 80 if (!sk2_ipv6only) { 81 if (sk1_rcv_saddr == sk2_rcv_saddr) 82 return true; 83 + return (match_sk1_wildcard && !sk1_rcv_saddr) || 84 + (match_sk2_wildcard && !sk2_rcv_saddr); 85 } 86 return false; 87 } ··· 96 sk2->sk_rcv_saddr, 97 ipv6_only_sock(sk), 98 ipv6_only_sock(sk2), 99 + match_wildcard, 100 match_wildcard); 101 #endif 102 return ipv4_rcv_saddr_equal(sk->sk_rcv_saddr, sk2->sk_rcv_saddr, 103 + ipv6_only_sock(sk2), match_wildcard, 104 + match_wildcard); 105 } 106 EXPORT_SYMBOL(inet_rcv_saddr_equal); 107 ··· 285 tb->fast_rcv_saddr, 286 sk->sk_rcv_saddr, 287 tb->fast_ipv6_only, 288 + ipv6_only_sock(sk), true, false); 289 #endif 290 return ipv4_rcv_saddr_equal(tb->fast_rcv_saddr, sk->sk_rcv_saddr, 291 + ipv6_only_sock(sk), true, false); 292 } 293 294 /* Obtain a reference to a local port for the given sock,
+1 -1
net/ipv4/ipip.c
··· 698 699 rtnl_link_failed: 700 #if IS_ENABLED(CONFIG_MPLS) 701 - xfrm4_tunnel_deregister(&mplsip_handler, AF_INET); 702 xfrm_tunnel_mplsip_failed: 703 704 #endif
··· 698 699 rtnl_link_failed: 700 #if IS_ENABLED(CONFIG_MPLS) 701 + xfrm4_tunnel_deregister(&mplsip_handler, AF_MPLS); 702 xfrm_tunnel_mplsip_failed: 703 704 #endif
+1 -1
net/ipv4/ipmr.c
··· 2613 2614 mrt = ipmr_get_table(sock_net(skb->sk), filter.table_id); 2615 if (!mrt) { 2616 - if (filter.dump_all_families) 2617 return skb->len; 2618 2619 NL_SET_ERR_MSG(cb->extack, "ipv4: MR table does not exist");
··· 2613 2614 mrt = ipmr_get_table(sock_net(skb->sk), filter.table_id); 2615 if (!mrt) { 2616 + if (rtnl_msg_family(cb->nlh) != RTNL_FAMILY_IPMR) 2617 return skb->len; 2618 2619 NL_SET_ERR_MSG(cb->extack, "ipv4: MR table does not exist");
+2 -1
net/ipv4/nexthop.c
··· 276 return 0; 277 278 nla_put_failure: 279 return -EMSGSIZE; 280 } 281 ··· 434 if (!valid_group_nh(nh, len, extack)) 435 return -EINVAL; 436 } 437 - for (i = NHA_GROUP + 1; i < __NHA_MAX; ++i) { 438 if (!tb[i]) 439 continue; 440
··· 276 return 0; 277 278 nla_put_failure: 279 + nlmsg_cancel(skb, nlh); 280 return -EMSGSIZE; 281 } 282 ··· 433 if (!valid_group_nh(nh, len, extack)) 434 return -EINVAL; 435 } 436 + for (i = NHA_GROUP_TYPE + 1; i < __NHA_MAX; ++i) { 437 if (!tb[i]) 438 continue; 439
+6 -8
net/ipv4/route.c
··· 491 atomic_t *p_id = ip_idents + hash % IP_IDENTS_SZ; 492 u32 old = READ_ONCE(*p_tstamp); 493 u32 now = (u32)jiffies; 494 - u32 new, delta = 0; 495 496 if (old != now && cmpxchg(p_tstamp, old, now) == old) 497 delta = prandom_u32_max(now - old); 498 499 - /* Do not use atomic_add_return() as it makes UBSAN unhappy */ 500 - do { 501 - old = (u32)atomic_read(p_id); 502 - new = old + delta + segs; 503 - } while (atomic_cmpxchg(p_id, old, new) != old); 504 - 505 - return new - segs; 506 } 507 EXPORT_SYMBOL(ip_idents_reserve); 508
··· 491 atomic_t *p_id = ip_idents + hash % IP_IDENTS_SZ; 492 u32 old = READ_ONCE(*p_tstamp); 493 u32 now = (u32)jiffies; 494 + u32 delta = 0; 495 496 if (old != now && cmpxchg(p_tstamp, old, now) == old) 497 delta = prandom_u32_max(now - old); 498 499 + /* If UBSAN reports an error there, please make sure your compiler 500 + * supports -fno-strict-overflow before reporting it that was a bug 501 + * in UBSAN, and it has been fixed in GCC-8. 502 + */ 503 + return atomic_add_return(segs + delta, p_id) - segs; 504 } 505 EXPORT_SYMBOL(ip_idents_reserve); 506
+1 -1
net/ipv6/ip6_fib.c
··· 664 if (arg.filter.table_id) { 665 tb = fib6_get_table(net, arg.filter.table_id); 666 if (!tb) { 667 - if (arg.filter.dump_all_families) 668 goto out; 669 670 NL_SET_ERR_MSG_MOD(cb->extack, "FIB table does not exist");
··· 664 if (arg.filter.table_id) { 665 tb = fib6_get_table(net, arg.filter.table_id); 666 if (!tb) { 667 + if (rtnl_msg_family(cb->nlh) != PF_INET6) 668 goto out; 669 670 NL_SET_ERR_MSG_MOD(cb->extack, "FIB table does not exist");
+3 -2
net/ipv6/ip6mr.c
··· 98 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES 99 #define ip6mr_for_each_table(mrt, net) \ 100 list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list, \ 101 - lockdep_rtnl_is_held()) 102 103 static struct mr_table *ip6mr_mr_table_iter(struct net *net, 104 struct mr_table *mrt) ··· 2503 2504 mrt = ip6mr_get_table(sock_net(skb->sk), filter.table_id); 2505 if (!mrt) { 2506 - if (filter.dump_all_families) 2507 return skb->len; 2508 2509 NL_SET_ERR_MSG_MOD(cb->extack, "MR table does not exist");
··· 98 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES 99 #define ip6mr_for_each_table(mrt, net) \ 100 list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list, \ 101 + lockdep_rtnl_is_held() || \ 102 + list_empty(&net->ipv6.mr6_tables)) 103 104 static struct mr_table *ip6mr_mr_table_iter(struct net *net, 105 struct mr_table *mrt) ··· 2502 2503 mrt = ip6mr_get_table(sock_net(skb->sk), filter.table_id); 2504 if (!mrt) { 2505 + if (rtnl_msg_family(cb->nlh) != RTNL_FAMILY_IP6MR) 2506 return skb->len; 2507 2508 NL_SET_ERR_MSG_MOD(cb->extack, "MR table does not exist");
+9 -15
net/mptcp/crypto.c
··· 47 void mptcp_crypto_hmac_sha(u64 key1, u64 key2, u8 *msg, int len, void *hmac) 48 { 49 u8 input[SHA256_BLOCK_SIZE + SHA256_DIGEST_SIZE]; 50 - __be32 mptcp_hashed_key[SHA256_DIGEST_WORDS]; 51 - __be32 *hash_out = (__force __be32 *)hmac; 52 struct sha256_state state; 53 u8 key1be[8]; 54 u8 key2be[8]; ··· 84 85 sha256_init(&state); 86 sha256_update(&state, input, SHA256_BLOCK_SIZE + SHA256_DIGEST_SIZE); 87 - sha256_final(&state, (u8 *)mptcp_hashed_key); 88 - 89 - /* takes only first 160 bits */ 90 - for (i = 0; i < 5; i++) 91 - hash_out[i] = mptcp_hashed_key[i]; 92 } 93 94 #ifdef CONFIG_MPTCP_HMAC_TEST ··· 95 }; 96 97 /* we can't reuse RFC 4231 test vectors, as we have constraint on the 98 - * input and key size, and we truncate the output. 99 */ 100 static struct test_cast tests[] = { 101 { 102 .key = "0b0b0b0b0b0b0b0b", 103 .msg = "48692054", 104 - .result = "8385e24fb4235ac37556b6b886db106284a1da67", 105 }, 106 { 107 .key = "aaaaaaaaaaaaaaaa", 108 .msg = "dddddddd", 109 - .result = "2c5e219164ff1dca1c4a92318d847bb6b9d44492", 110 }, 111 { 112 .key = "0102030405060708", 113 .msg = "cdcdcdcd", 114 - .result = "e73b9ba9969969cefb04aa0d6df18ec2fcc075b6", 115 }, 116 }; 117 118 static int __init test_mptcp_crypto(void) 119 { 120 - char hmac[20], hmac_hex[41]; 121 u32 nonce1, nonce2; 122 u64 key1, key2; 123 u8 msg[8]; ··· 134 put_unaligned_be32(nonce2, &msg[4]); 135 136 mptcp_crypto_hmac_sha(key1, key2, msg, 8, hmac); 137 - for (j = 0; j < 20; ++j) 138 sprintf(&hmac_hex[j << 1], "%02x", hmac[j] & 0xff); 139 - hmac_hex[40] = 0; 140 141 - if (memcmp(hmac_hex, tests[i].result, 40)) 142 pr_err("test %d failed, got %s expected %s", i, 143 hmac_hex, tests[i].result); 144 else
··· 47 void mptcp_crypto_hmac_sha(u64 key1, u64 key2, u8 *msg, int len, void *hmac) 48 { 49 u8 input[SHA256_BLOCK_SIZE + SHA256_DIGEST_SIZE]; 50 struct sha256_state state; 51 u8 key1be[8]; 52 u8 key2be[8]; ··· 86 87 sha256_init(&state); 88 sha256_update(&state, input, SHA256_BLOCK_SIZE + SHA256_DIGEST_SIZE); 89 + sha256_final(&state, (u8 *)hmac); 90 } 91 92 #ifdef CONFIG_MPTCP_HMAC_TEST ··· 101 }; 102 103 /* we can't reuse RFC 4231 test vectors, as we have constraint on the 104 + * input and key size. 105 */ 106 static struct test_cast tests[] = { 107 { 108 .key = "0b0b0b0b0b0b0b0b", 109 .msg = "48692054", 110 + .result = "8385e24fb4235ac37556b6b886db106284a1da671699f46db1f235ec622dcafa", 111 }, 112 { 113 .key = "aaaaaaaaaaaaaaaa", 114 .msg = "dddddddd", 115 + .result = "2c5e219164ff1dca1c4a92318d847bb6b9d44492984e1eb71aff9022f71046e9", 116 }, 117 { 118 .key = "0102030405060708", 119 .msg = "cdcdcdcd", 120 + .result = "e73b9ba9969969cefb04aa0d6df18ec2fcc075b6f23b4d8c4da736a5dbbc6e7d", 121 }, 122 }; 123 124 static int __init test_mptcp_crypto(void) 125 { 126 + char hmac[32], hmac_hex[65]; 127 u32 nonce1, nonce2; 128 u64 key1, key2; 129 u8 msg[8]; ··· 140 put_unaligned_be32(nonce2, &msg[4]); 141 142 mptcp_crypto_hmac_sha(key1, key2, msg, 8, hmac); 143 + for (j = 0; j < 32; ++j) 144 sprintf(&hmac_hex[j << 1], "%02x", hmac[j] & 0xff); 145 + hmac_hex[64] = 0; 146 147 + if (memcmp(hmac_hex, tests[i].result, 64)) 148 pr_err("test %d failed, got %s expected %s", i, 149 hmac_hex, tests[i].result); 150 else
+5 -4
net/mptcp/options.c
··· 7 #define pr_fmt(fmt) "MPTCP: " fmt 8 9 #include <linux/kernel.h> 10 #include <net/tcp.h> 11 #include <net/mptcp.h> 12 #include "protocol.h" ··· 536 static u64 add_addr_generate_hmac(u64 key1, u64 key2, u8 addr_id, 537 struct in_addr *addr) 538 { 539 - u8 hmac[MPTCP_ADDR_HMAC_LEN]; 540 u8 msg[7]; 541 542 msg[0] = addr_id; ··· 546 547 mptcp_crypto_hmac_sha(key1, key2, msg, 7, hmac); 548 549 - return get_unaligned_be64(hmac); 550 } 551 552 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 553 static u64 add_addr6_generate_hmac(u64 key1, u64 key2, u8 addr_id, 554 struct in6_addr *addr) 555 { 556 - u8 hmac[MPTCP_ADDR_HMAC_LEN]; 557 u8 msg[19]; 558 559 msg[0] = addr_id; ··· 563 564 mptcp_crypto_hmac_sha(key1, key2, msg, 19, hmac); 565 566 - return get_unaligned_be64(hmac); 567 } 568 #endif 569
··· 7 #define pr_fmt(fmt) "MPTCP: " fmt 8 9 #include <linux/kernel.h> 10 + #include <crypto/sha.h> 11 #include <net/tcp.h> 12 #include <net/mptcp.h> 13 #include "protocol.h" ··· 535 static u64 add_addr_generate_hmac(u64 key1, u64 key2, u8 addr_id, 536 struct in_addr *addr) 537 { 538 + u8 hmac[SHA256_DIGEST_SIZE]; 539 u8 msg[7]; 540 541 msg[0] = addr_id; ··· 545 546 mptcp_crypto_hmac_sha(key1, key2, msg, 7, hmac); 547 548 + return get_unaligned_be64(&hmac[SHA256_DIGEST_SIZE - sizeof(u64)]); 549 } 550 551 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 552 static u64 add_addr6_generate_hmac(u64 key1, u64 key2, u8 addr_id, 553 struct in6_addr *addr) 554 { 555 + u8 hmac[SHA256_DIGEST_SIZE]; 556 u8 msg[19]; 557 558 msg[0] = addr_id; ··· 562 563 mptcp_crypto_hmac_sha(key1, key2, msg, 19, hmac); 564 565 + return get_unaligned_be64(&hmac[SHA256_DIGEST_SIZE - sizeof(u64)]); 566 } 567 #endif 568
-1
net/mptcp/protocol.h
··· 81 82 /* MPTCP ADD_ADDR flags */ 83 #define MPTCP_ADDR_ECHO BIT(0) 84 - #define MPTCP_ADDR_HMAC_LEN 20 85 #define MPTCP_ADDR_IPVERSION_4 4 86 #define MPTCP_ADDR_IPVERSION_6 6 87
··· 81 82 /* MPTCP ADD_ADDR flags */ 83 #define MPTCP_ADDR_ECHO BIT(0) 84 #define MPTCP_ADDR_IPVERSION_4 4 85 #define MPTCP_ADDR_IPVERSION_6 6 86
+10 -5
net/mptcp/subflow.c
··· 10 #include <linux/module.h> 11 #include <linux/netdevice.h> 12 #include <crypto/algapi.h> 13 #include <net/sock.h> 14 #include <net/inet_common.h> 15 #include <net/inet_hashtables.h> ··· 90 const struct sk_buff *skb) 91 { 92 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); 93 - u8 hmac[MPTCPOPT_HMAC_LEN]; 94 struct mptcp_sock *msk; 95 int local_id; 96 ··· 202 /* validate received truncated hmac and create hmac for third ACK */ 203 static bool subflow_thmac_valid(struct mptcp_subflow_context *subflow) 204 { 205 - u8 hmac[MPTCPOPT_HMAC_LEN]; 206 u64 thmac; 207 208 subflow_generate_hmac(subflow->remote_key, subflow->local_key, ··· 268 subflow->ssn_offset = TCP_SKB_CB(skb)->seq; 269 } 270 } else if (subflow->mp_join) { 271 pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u", 272 subflow, subflow->thmac, 273 subflow->remote_nonce); ··· 282 subflow_generate_hmac(subflow->local_key, subflow->remote_key, 283 subflow->local_nonce, 284 subflow->remote_nonce, 285 - subflow->hmac); 286 287 if (skb) 288 subflow->ssn_offset = TCP_SKB_CB(skb)->seq; ··· 352 const struct mptcp_options_received *mp_opt) 353 { 354 const struct mptcp_subflow_request_sock *subflow_req; 355 - u8 hmac[MPTCPOPT_HMAC_LEN]; 356 struct mptcp_sock *msk; 357 bool ret; 358 ··· 366 subflow_req->local_nonce, hmac); 367 368 ret = true; 369 - if (crypto_memneq(hmac, mp_opt->hmac, sizeof(hmac))) 370 ret = false; 371 372 sock_put((struct sock *)msk);
··· 10 #include <linux/module.h> 11 #include <linux/netdevice.h> 12 #include <crypto/algapi.h> 13 + #include <crypto/sha.h> 14 #include <net/sock.h> 15 #include <net/inet_common.h> 16 #include <net/inet_hashtables.h> ··· 89 const struct sk_buff *skb) 90 { 91 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); 92 + u8 hmac[SHA256_DIGEST_SIZE]; 93 struct mptcp_sock *msk; 94 int local_id; 95 ··· 201 /* validate received truncated hmac and create hmac for third ACK */ 202 static bool subflow_thmac_valid(struct mptcp_subflow_context *subflow) 203 { 204 + u8 hmac[SHA256_DIGEST_SIZE]; 205 u64 thmac; 206 207 subflow_generate_hmac(subflow->remote_key, subflow->local_key, ··· 267 subflow->ssn_offset = TCP_SKB_CB(skb)->seq; 268 } 269 } else if (subflow->mp_join) { 270 + u8 hmac[SHA256_DIGEST_SIZE]; 271 + 272 pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u", 273 subflow, subflow->thmac, 274 subflow->remote_nonce); ··· 279 subflow_generate_hmac(subflow->local_key, subflow->remote_key, 280 subflow->local_nonce, 281 subflow->remote_nonce, 282 + hmac); 283 + 284 + memcpy(subflow->hmac, hmac, MPTCPOPT_HMAC_LEN); 285 286 if (skb) 287 subflow->ssn_offset = TCP_SKB_CB(skb)->seq; ··· 347 const struct mptcp_options_received *mp_opt) 348 { 349 const struct mptcp_subflow_request_sock *subflow_req; 350 + u8 hmac[SHA256_DIGEST_SIZE]; 351 struct mptcp_sock *msk; 352 bool ret; 353 ··· 361 subflow_req->local_nonce, hmac); 362 363 ret = true; 364 + if (crypto_memneq(hmac, mp_opt->hmac, MPTCPOPT_HMAC_LEN)) 365 ret = false; 366 367 sock_put((struct sock *)msk);
+1 -1
net/qrtr/qrtr.c
··· 854 } 855 mutex_unlock(&qrtr_node_lock); 856 857 - qrtr_local_enqueue(node, skb, type, from, to); 858 859 return 0; 860 }
··· 854 } 855 mutex_unlock(&qrtr_node_lock); 856 857 + qrtr_local_enqueue(NULL, skb, type, from, to); 858 859 return 0; 860 }
+1
net/rxrpc/Makefile
··· 25 peer_event.o \ 26 peer_object.o \ 27 recvmsg.o \ 28 security.o \ 29 sendmsg.o \ 30 skbuff.o \
··· 25 peer_event.o \ 26 peer_object.o \ 27 recvmsg.o \ 28 + rtt.o \ 29 security.o \ 30 sendmsg.o \ 31 skbuff.o \
+17 -8
net/rxrpc/ar-internal.h
··· 7 8 #include <linux/atomic.h> 9 #include <linux/seqlock.h> 10 #include <net/net_namespace.h> 11 #include <net/netns/generic.h> 12 #include <net/sock.h> ··· 312 #define RXRPC_RTT_CACHE_SIZE 32 313 spinlock_t rtt_input_lock; /* RTT lock for input routine */ 314 ktime_t rtt_last_req; /* Time of last RTT request */ 315 - u64 rtt; /* Current RTT estimate (in nS) */ 316 - u64 rtt_sum; /* Sum of cache contents */ 317 - u64 rtt_cache[RXRPC_RTT_CACHE_SIZE]; /* Determined RTT cache */ 318 - u8 rtt_cursor; /* next entry at which to insert */ 319 - u8 rtt_usage; /* amount of cache actually used */ 320 321 u8 cong_cwnd; /* Congestion window size */ 322 }; ··· 1045 extern unsigned int rxrpc_rx_window_size; 1046 extern unsigned int rxrpc_rx_mtu; 1047 extern unsigned int rxrpc_rx_jumbo_max; 1048 - extern unsigned long rxrpc_resend_timeout; 1049 1050 extern const s8 rxrpc_ack_priority[]; 1051 ··· 1072 * peer_event.c 1073 */ 1074 void rxrpc_error_report(struct sock *); 1075 - void rxrpc_peer_add_rtt(struct rxrpc_call *, enum rxrpc_rtt_rx_trace, 1076 - rxrpc_serial_t, rxrpc_serial_t, ktime_t, ktime_t); 1077 void rxrpc_peer_keepalive_worker(struct work_struct *); 1078 1079 /* ··· 1102 */ 1103 void rxrpc_notify_socket(struct rxrpc_call *); 1104 int rxrpc_recvmsg(struct socket *, struct msghdr *, size_t, int); 1105 1106 /* 1107 * rxkad.c
··· 7 8 #include <linux/atomic.h> 9 #include <linux/seqlock.h> 10 + #include <linux/win_minmax.h> 11 #include <net/net_namespace.h> 12 #include <net/netns/generic.h> 13 #include <net/sock.h> ··· 311 #define RXRPC_RTT_CACHE_SIZE 32 312 spinlock_t rtt_input_lock; /* RTT lock for input routine */ 313 ktime_t rtt_last_req; /* Time of last RTT request */ 314 + unsigned int rtt_count; /* Number of samples we've got */ 315 + 316 + u32 srtt_us; /* smoothed round trip time << 3 in usecs */ 317 + u32 mdev_us; /* medium deviation */ 318 + u32 mdev_max_us; /* maximal mdev for the last rtt period */ 319 + u32 rttvar_us; /* smoothed mdev_max */ 320 + u32 rto_j; /* Retransmission timeout in jiffies */ 321 + u8 backoff; /* Backoff timeout */ 322 323 u8 cong_cwnd; /* Congestion window size */ 324 }; ··· 1041 extern unsigned int rxrpc_rx_window_size; 1042 extern unsigned int rxrpc_rx_mtu; 1043 extern unsigned int rxrpc_rx_jumbo_max; 1044 1045 extern const s8 rxrpc_ack_priority[]; 1046 ··· 1069 * peer_event.c 1070 */ 1071 void rxrpc_error_report(struct sock *); 1072 void rxrpc_peer_keepalive_worker(struct work_struct *); 1073 1074 /* ··· 1101 */ 1102 void rxrpc_notify_socket(struct rxrpc_call *); 1103 int rxrpc_recvmsg(struct socket *, struct msghdr *, size_t, int); 1104 + 1105 + /* 1106 + * rtt.c 1107 + */ 1108 + void rxrpc_peer_add_rtt(struct rxrpc_call *, enum rxrpc_rtt_rx_trace, 1109 + rxrpc_serial_t, rxrpc_serial_t, ktime_t, ktime_t); 1110 + unsigned long rxrpc_get_rto_backoff(struct rxrpc_peer *, bool); 1111 + void rxrpc_peer_init_rtt(struct rxrpc_peer *); 1112 1113 /* 1114 * rxkad.c
+1 -1
net/rxrpc/call_accept.c
··· 248 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 249 ktime_t now = skb->tstamp; 250 251 - if (call->peer->rtt_usage < 3 || 252 ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), now)) 253 rxrpc_propose_ACK(call, RXRPC_ACK_PING, sp->hdr.serial, 254 true, true,
··· 248 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 249 ktime_t now = skb->tstamp; 250 251 + if (call->peer->rtt_count < 3 || 252 ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), now)) 253 rxrpc_propose_ACK(call, RXRPC_ACK_PING, sp->hdr.serial, 254 true, true,
+8 -14
net/rxrpc/call_event.c
··· 111 } else { 112 unsigned long now = jiffies, ack_at; 113 114 - if (call->peer->rtt_usage > 0) 115 - ack_at = nsecs_to_jiffies(call->peer->rtt); 116 else 117 ack_at = expiry; 118 ··· 157 static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j) 158 { 159 struct sk_buff *skb; 160 - unsigned long resend_at; 161 rxrpc_seq_t cursor, seq, top; 162 - ktime_t now, max_age, oldest, ack_ts, timeout, min_timeo; 163 int ix; 164 u8 annotation, anno_type, retrans = 0, unacked = 0; 165 166 _enter("{%d,%d}", call->tx_hard_ack, call->tx_top); 167 168 - if (call->peer->rtt_usage > 1) 169 - timeout = ns_to_ktime(call->peer->rtt * 3 / 2); 170 - else 171 - timeout = ms_to_ktime(rxrpc_resend_timeout); 172 - min_timeo = ns_to_ktime((1000000000 / HZ) * 4); 173 - if (ktime_before(timeout, min_timeo)) 174 - timeout = min_timeo; 175 176 now = ktime_get_real(); 177 - max_age = ktime_sub(now, timeout); 178 179 spin_lock_bh(&call->lock); 180 ··· 213 } 214 215 resend_at = nsecs_to_jiffies(ktime_to_ns(ktime_sub(now, oldest))); 216 - resend_at += jiffies + rxrpc_resend_timeout; 217 WRITE_ONCE(call->resend_at, resend_at); 218 219 if (unacked) ··· 228 rxrpc_timer_set_for_resend); 229 spin_unlock_bh(&call->lock); 230 ack_ts = ktime_sub(now, call->acks_latest_ts); 231 - if (ktime_to_ns(ack_ts) < call->peer->rtt) 232 goto out; 233 rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, true, false, 234 rxrpc_propose_ack_ping_for_lost_ack);
··· 111 } else { 112 unsigned long now = jiffies, ack_at; 113 114 + if (call->peer->srtt_us != 0) 115 + ack_at = usecs_to_jiffies(call->peer->srtt_us >> 3); 116 else 117 ack_at = expiry; 118 ··· 157 static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j) 158 { 159 struct sk_buff *skb; 160 + unsigned long resend_at, rto_j; 161 rxrpc_seq_t cursor, seq, top; 162 + ktime_t now, max_age, oldest, ack_ts; 163 int ix; 164 u8 annotation, anno_type, retrans = 0, unacked = 0; 165 166 _enter("{%d,%d}", call->tx_hard_ack, call->tx_top); 167 168 + rto_j = call->peer->rto_j; 169 170 now = ktime_get_real(); 171 + max_age = ktime_sub(now, jiffies_to_usecs(rto_j)); 172 173 spin_lock_bh(&call->lock); 174 ··· 219 } 220 221 resend_at = nsecs_to_jiffies(ktime_to_ns(ktime_sub(now, oldest))); 222 + resend_at += jiffies + rto_j; 223 WRITE_ONCE(call->resend_at, resend_at); 224 225 if (unacked) ··· 234 rxrpc_timer_set_for_resend); 235 spin_unlock_bh(&call->lock); 236 ack_ts = ktime_sub(now, call->acks_latest_ts); 237 + if (ktime_to_us(ack_ts) < (call->peer->srtt_us >> 3)) 238 goto out; 239 rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, true, false, 240 rxrpc_propose_ack_ping_for_lost_ack);
+37 -7
net/rxrpc/input.c
··· 91 /* We analyse the number of packets that get ACK'd per RTT 92 * period and increase the window if we managed to fill it. 93 */ 94 - if (call->peer->rtt_usage == 0) 95 goto out; 96 if (ktime_before(skb->tstamp, 97 - ktime_add_ns(call->cong_tstamp, 98 - call->peer->rtt))) 99 goto out_no_clear_ca; 100 change = rxrpc_cong_rtt_window_end; 101 call->cong_tstamp = skb->tstamp; ··· 803 } 804 805 /* 806 * Process an ACK packet. 807 * 808 * ack.firstPacket is the sequence number of the first soft-ACK'd/NAK'd packet ··· 889 } 890 891 /* Discard any out-of-order or duplicate ACKs (outside lock). */ 892 - if (before(first_soft_ack, call->ackr_first_seq) || 893 - before(prev_pkt, call->ackr_prev_seq)) 894 return; 895 896 buf.info.rxMTU = 0; 897 ioffset = offset + nr_acks + 3; ··· 905 spin_lock(&call->input_lock); 906 907 /* Discard any out-of-order or duplicate ACKs (inside lock). */ 908 - if (before(first_soft_ack, call->ackr_first_seq) || 909 - before(prev_pkt, call->ackr_prev_seq)) 910 goto out; 911 call->acks_latest_ts = skb->tstamp; 912 913 call->ackr_first_seq = first_soft_ack;
··· 91 /* We analyse the number of packets that get ACK'd per RTT 92 * period and increase the window if we managed to fill it. 93 */ 94 + if (call->peer->rtt_count == 0) 95 goto out; 96 if (ktime_before(skb->tstamp, 97 + ktime_add_us(call->cong_tstamp, 98 + call->peer->srtt_us >> 3))) 99 goto out_no_clear_ca; 100 change = rxrpc_cong_rtt_window_end; 101 call->cong_tstamp = skb->tstamp; ··· 803 } 804 805 /* 806 + * Return true if the ACK is valid - ie. it doesn't appear to have regressed 807 + * with respect to the ack state conveyed by preceding ACKs. 808 + */ 809 + static bool rxrpc_is_ack_valid(struct rxrpc_call *call, 810 + rxrpc_seq_t first_pkt, rxrpc_seq_t prev_pkt) 811 + { 812 + rxrpc_seq_t base = READ_ONCE(call->ackr_first_seq); 813 + 814 + if (after(first_pkt, base)) 815 + return true; /* The window advanced */ 816 + 817 + if (before(first_pkt, base)) 818 + return false; /* firstPacket regressed */ 819 + 820 + if (after_eq(prev_pkt, call->ackr_prev_seq)) 821 + return true; /* previousPacket hasn't regressed. */ 822 + 823 + /* Some rx implementations put a serial number in previousPacket. */ 824 + if (after_eq(prev_pkt, base + call->tx_winsize)) 825 + return false; 826 + return true; 827 + } 828 + 829 + /* 830 * Process an ACK packet. 831 * 832 * ack.firstPacket is the sequence number of the first soft-ACK'd/NAK'd packet ··· 865 } 866 867 /* Discard any out-of-order or duplicate ACKs (outside lock). */ 868 + if (!rxrpc_is_ack_valid(call, first_soft_ack, prev_pkt)) { 869 + trace_rxrpc_rx_discard_ack(call->debug_id, sp->hdr.serial, 870 + first_soft_ack, call->ackr_first_seq, 871 + prev_pkt, call->ackr_prev_seq); 872 return; 873 + } 874 875 buf.info.rxMTU = 0; 876 ioffset = offset + nr_acks + 3; ··· 878 spin_lock(&call->input_lock); 879 880 /* Discard any out-of-order or duplicate ACKs (inside lock). */ 881 + if (!rxrpc_is_ack_valid(call, first_soft_ack, prev_pkt)) { 882 + trace_rxrpc_rx_discard_ack(call->debug_id, sp->hdr.serial, 883 + first_soft_ack, call->ackr_first_seq, 884 + prev_pkt, call->ackr_prev_seq); 885 goto out; 886 + } 887 call->acks_latest_ts = skb->tstamp; 888 889 call->ackr_first_seq = first_soft_ack;
-5
net/rxrpc/misc.c
··· 63 */ 64 unsigned int rxrpc_rx_jumbo_max = 4; 65 66 - /* 67 - * Time till packet resend (in milliseconds). 68 - */ 69 - unsigned long rxrpc_resend_timeout = 4 * HZ; 70 - 71 const s8 rxrpc_ack_priority[] = { 72 [0] = 0, 73 [RXRPC_ACK_DELAY] = 1,
··· 63 */ 64 unsigned int rxrpc_rx_jumbo_max = 4; 65 66 const s8 rxrpc_ack_priority[] = { 67 [0] = 0, 68 [RXRPC_ACK_DELAY] = 1,
+3 -6
net/rxrpc/output.c
··· 369 (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events) || 370 retrans || 371 call->cong_mode == RXRPC_CALL_SLOW_START || 372 - (call->peer->rtt_usage < 3 && sp->hdr.seq & 1) || 373 ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), 374 ktime_get_real()))) 375 whdr.flags |= RXRPC_REQUEST_ACK; ··· 423 if (whdr.flags & RXRPC_REQUEST_ACK) { 424 call->peer->rtt_last_req = skb->tstamp; 425 trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_data, serial); 426 - if (call->peer->rtt_usage > 1) { 427 unsigned long nowj = jiffies, ack_lost_at; 428 429 - ack_lost_at = nsecs_to_jiffies(2 * call->peer->rtt); 430 - if (ack_lost_at < 1) 431 - ack_lost_at = 1; 432 - 433 ack_lost_at += nowj; 434 WRITE_ONCE(call->ack_lost_at, ack_lost_at); 435 rxrpc_reduce_call_timer(call, ack_lost_at, nowj,
··· 369 (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events) || 370 retrans || 371 call->cong_mode == RXRPC_CALL_SLOW_START || 372 + (call->peer->rtt_count < 3 && sp->hdr.seq & 1) || 373 ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), 374 ktime_get_real()))) 375 whdr.flags |= RXRPC_REQUEST_ACK; ··· 423 if (whdr.flags & RXRPC_REQUEST_ACK) { 424 call->peer->rtt_last_req = skb->tstamp; 425 trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_data, serial); 426 + if (call->peer->rtt_count > 1) { 427 unsigned long nowj = jiffies, ack_lost_at; 428 429 + ack_lost_at = rxrpc_get_rto_backoff(call->peer, retrans); 430 ack_lost_at += nowj; 431 WRITE_ONCE(call->ack_lost_at, ack_lost_at); 432 rxrpc_reduce_call_timer(call, ack_lost_at, nowj,
-46
net/rxrpc/peer_event.c
··· 296 } 297 298 /* 299 - * Add RTT information to cache. This is called in softirq mode and has 300 - * exclusive access to the peer RTT data. 301 - */ 302 - void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why, 303 - rxrpc_serial_t send_serial, rxrpc_serial_t resp_serial, 304 - ktime_t send_time, ktime_t resp_time) 305 - { 306 - struct rxrpc_peer *peer = call->peer; 307 - s64 rtt; 308 - u64 sum = peer->rtt_sum, avg; 309 - u8 cursor = peer->rtt_cursor, usage = peer->rtt_usage; 310 - 311 - rtt = ktime_to_ns(ktime_sub(resp_time, send_time)); 312 - if (rtt < 0) 313 - return; 314 - 315 - spin_lock(&peer->rtt_input_lock); 316 - 317 - /* Replace the oldest datum in the RTT buffer */ 318 - sum -= peer->rtt_cache[cursor]; 319 - sum += rtt; 320 - peer->rtt_cache[cursor] = rtt; 321 - peer->rtt_cursor = (cursor + 1) & (RXRPC_RTT_CACHE_SIZE - 1); 322 - peer->rtt_sum = sum; 323 - if (usage < RXRPC_RTT_CACHE_SIZE) { 324 - usage++; 325 - peer->rtt_usage = usage; 326 - } 327 - 328 - spin_unlock(&peer->rtt_input_lock); 329 - 330 - /* Now recalculate the average */ 331 - if (usage == RXRPC_RTT_CACHE_SIZE) { 332 - avg = sum / RXRPC_RTT_CACHE_SIZE; 333 - } else { 334 - avg = sum; 335 - do_div(avg, usage); 336 - } 337 - 338 - /* Don't need to update this under lock */ 339 - peer->rtt = avg; 340 - trace_rxrpc_rtt_rx(call, why, send_serial, resp_serial, rtt, 341 - usage, avg); 342 - } 343 - 344 - /* 345 * Perform keep-alive pings. 346 */ 347 static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
··· 296 } 297 298 /* 299 * Perform keep-alive pings. 300 */ 301 static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
+7 -5
net/rxrpc/peer_object.c
··· 225 spin_lock_init(&peer->rtt_input_lock); 226 peer->debug_id = atomic_inc_return(&rxrpc_debug_id); 227 228 if (RXRPC_TX_SMSS > 2190) 229 peer->cong_cwnd = 2; 230 else if (RXRPC_TX_SMSS > 1095) ··· 499 EXPORT_SYMBOL(rxrpc_kernel_get_peer); 500 501 /** 502 - * rxrpc_kernel_get_rtt - Get a call's peer RTT 503 * @sock: The socket on which the call is in progress. 504 * @call: The call to query 505 * 506 - * Get the call's peer RTT. 507 */ 508 - u64 rxrpc_kernel_get_rtt(struct socket *sock, struct rxrpc_call *call) 509 { 510 - return call->peer->rtt; 511 } 512 - EXPORT_SYMBOL(rxrpc_kernel_get_rtt);
··· 225 spin_lock_init(&peer->rtt_input_lock); 226 peer->debug_id = atomic_inc_return(&rxrpc_debug_id); 227 228 + rxrpc_peer_init_rtt(peer); 229 + 230 if (RXRPC_TX_SMSS > 2190) 231 peer->cong_cwnd = 2; 232 else if (RXRPC_TX_SMSS > 1095) ··· 497 EXPORT_SYMBOL(rxrpc_kernel_get_peer); 498 499 /** 500 + * rxrpc_kernel_get_srtt - Get a call's peer smoothed RTT 501 * @sock: The socket on which the call is in progress. 502 * @call: The call to query 503 * 504 + * Get the call's peer smoothed RTT. 505 */ 506 + u32 rxrpc_kernel_get_srtt(struct socket *sock, struct rxrpc_call *call) 507 { 508 + return call->peer->srtt_us >> 3; 509 } 510 + EXPORT_SYMBOL(rxrpc_kernel_get_srtt);
+4 -4
net/rxrpc/proc.c
··· 222 seq_puts(seq, 223 "Proto Local " 224 " Remote " 225 - " Use CW MTU LastUse RTT Rc\n" 226 ); 227 return 0; 228 } ··· 236 now = ktime_get_seconds(); 237 seq_printf(seq, 238 "UDP %-47.47s %-47.47s %3u" 239 - " %3u %5u %6llus %12llu %2u\n", 240 lbuff, 241 rbuff, 242 atomic_read(&peer->usage), 243 peer->cong_cwnd, 244 peer->mtu, 245 now - peer->last_tx_at, 246 - peer->rtt, 247 - peer->rtt_cursor); 248 249 return 0; 250 }
··· 222 seq_puts(seq, 223 "Proto Local " 224 " Remote " 225 + " Use CW MTU LastUse RTT RTO\n" 226 ); 227 return 0; 228 } ··· 236 now = ktime_get_seconds(); 237 seq_printf(seq, 238 "UDP %-47.47s %-47.47s %3u" 239 + " %3u %5u %6llus %8u %8u\n", 240 lbuff, 241 rbuff, 242 atomic_read(&peer->usage), 243 peer->cong_cwnd, 244 peer->mtu, 245 now - peer->last_tx_at, 246 + peer->srtt_us >> 3, 247 + jiffies_to_usecs(peer->rto_j)); 248 249 return 0; 250 }
+195
net/rxrpc/rtt.c
···
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* RTT/RTO calculation. 3 + * 4 + * Adapted from TCP for AF_RXRPC by David Howells (dhowells@redhat.com) 5 + * 6 + * https://tools.ietf.org/html/rfc6298 7 + * https://tools.ietf.org/html/rfc1122#section-4.2.3.1 8 + * http://ccr.sigcomm.org/archive/1995/jan95/ccr-9501-partridge87.pdf 9 + */ 10 + 11 + #include <linux/net.h> 12 + #include "ar-internal.h" 13 + 14 + #define RXRPC_RTO_MAX ((unsigned)(120 * HZ)) 15 + #define RXRPC_TIMEOUT_INIT ((unsigned)(1*HZ)) /* RFC6298 2.1 initial RTO value */ 16 + #define rxrpc_jiffies32 ((u32)jiffies) /* As rxrpc_jiffies32 */ 17 + #define rxrpc_min_rtt_wlen 300 /* As sysctl_tcp_min_rtt_wlen */ 18 + 19 + static u32 rxrpc_rto_min_us(struct rxrpc_peer *peer) 20 + { 21 + return 200; 22 + } 23 + 24 + static u32 __rxrpc_set_rto(const struct rxrpc_peer *peer) 25 + { 26 + return _usecs_to_jiffies((peer->srtt_us >> 3) + peer->rttvar_us); 27 + } 28 + 29 + static u32 rxrpc_bound_rto(u32 rto) 30 + { 31 + return min(rto, RXRPC_RTO_MAX); 32 + } 33 + 34 + /* 35 + * Called to compute a smoothed rtt estimate. The data fed to this 36 + * routine either comes from timestamps, or from segments that were 37 + * known _not_ to have been retransmitted [see Karn/Partridge 38 + * Proceedings SIGCOMM 87]. The algorithm is from the SIGCOMM 88 39 + * piece by Van Jacobson. 40 + * NOTE: the next three routines used to be one big routine. 41 + * To save cycles in the RFC 1323 implementation it was better to break 42 + * it up into three procedures. -- erics 43 + */ 44 + static void rxrpc_rtt_estimator(struct rxrpc_peer *peer, long sample_rtt_us) 45 + { 46 + long m = sample_rtt_us; /* RTT */ 47 + u32 srtt = peer->srtt_us; 48 + 49 + /* The following amusing code comes from Jacobson's 50 + * article in SIGCOMM '88. Note that rtt and mdev 51 + * are scaled versions of rtt and mean deviation. 52 + * This is designed to be as fast as possible 53 + * m stands for "measurement". 54 + * 55 + * On a 1990 paper the rto value is changed to: 56 + * RTO = rtt + 4 * mdev 57 + * 58 + * Funny. This algorithm seems to be very broken. 59 + * These formulae increase RTO, when it should be decreased, increase 60 + * too slowly, when it should be increased quickly, decrease too quickly 61 + * etc. I guess in BSD RTO takes ONE value, so that it is absolutely 62 + * does not matter how to _calculate_ it. Seems, it was trap 63 + * that VJ failed to avoid. 8) 64 + */ 65 + if (srtt != 0) { 66 + m -= (srtt >> 3); /* m is now error in rtt est */ 67 + srtt += m; /* rtt = 7/8 rtt + 1/8 new */ 68 + if (m < 0) { 69 + m = -m; /* m is now abs(error) */ 70 + m -= (peer->mdev_us >> 2); /* similar update on mdev */ 71 + /* This is similar to one of Eifel findings. 72 + * Eifel blocks mdev updates when rtt decreases. 73 + * This solution is a bit different: we use finer gain 74 + * for mdev in this case (alpha*beta). 75 + * Like Eifel it also prevents growth of rto, 76 + * but also it limits too fast rto decreases, 77 + * happening in pure Eifel. 78 + */ 79 + if (m > 0) 80 + m >>= 3; 81 + } else { 82 + m -= (peer->mdev_us >> 2); /* similar update on mdev */ 83 + } 84 + 85 + peer->mdev_us += m; /* mdev = 3/4 mdev + 1/4 new */ 86 + if (peer->mdev_us > peer->mdev_max_us) { 87 + peer->mdev_max_us = peer->mdev_us; 88 + if (peer->mdev_max_us > peer->rttvar_us) 89 + peer->rttvar_us = peer->mdev_max_us; 90 + } 91 + } else { 92 + /* no previous measure. */ 93 + srtt = m << 3; /* take the measured time to be rtt */ 94 + peer->mdev_us = m << 1; /* make sure rto = 3*rtt */ 95 + peer->rttvar_us = max(peer->mdev_us, rxrpc_rto_min_us(peer)); 96 + peer->mdev_max_us = peer->rttvar_us; 97 + } 98 + 99 + peer->srtt_us = max(1U, srtt); 100 + } 101 + 102 + /* 103 + * Calculate rto without backoff. This is the second half of Van Jacobson's 104 + * routine referred to above. 105 + */ 106 + static void rxrpc_set_rto(struct rxrpc_peer *peer) 107 + { 108 + u32 rto; 109 + 110 + /* 1. If rtt variance happened to be less 50msec, it is hallucination. 111 + * It cannot be less due to utterly erratic ACK generation made 112 + * at least by solaris and freebsd. "Erratic ACKs" has _nothing_ 113 + * to do with delayed acks, because at cwnd>2 true delack timeout 114 + * is invisible. Actually, Linux-2.4 also generates erratic 115 + * ACKs in some circumstances. 116 + */ 117 + rto = __rxrpc_set_rto(peer); 118 + 119 + /* 2. Fixups made earlier cannot be right. 120 + * If we do not estimate RTO correctly without them, 121 + * all the algo is pure shit and should be replaced 122 + * with correct one. It is exactly, which we pretend to do. 123 + */ 124 + 125 + /* NOTE: clamping at RXRPC_RTO_MIN is not required, current algo 126 + * guarantees that rto is higher. 127 + */ 128 + peer->rto_j = rxrpc_bound_rto(rto); 129 + } 130 + 131 + static void rxrpc_ack_update_rtt(struct rxrpc_peer *peer, long rtt_us) 132 + { 133 + if (rtt_us < 0) 134 + return; 135 + 136 + //rxrpc_update_rtt_min(peer, rtt_us); 137 + rxrpc_rtt_estimator(peer, rtt_us); 138 + rxrpc_set_rto(peer); 139 + 140 + /* RFC6298: only reset backoff on valid RTT measurement. */ 141 + peer->backoff = 0; 142 + } 143 + 144 + /* 145 + * Add RTT information to cache. This is called in softirq mode and has 146 + * exclusive access to the peer RTT data. 147 + */ 148 + void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why, 149 + rxrpc_serial_t send_serial, rxrpc_serial_t resp_serial, 150 + ktime_t send_time, ktime_t resp_time) 151 + { 152 + struct rxrpc_peer *peer = call->peer; 153 + s64 rtt_us; 154 + 155 + rtt_us = ktime_to_us(ktime_sub(resp_time, send_time)); 156 + if (rtt_us < 0) 157 + return; 158 + 159 + spin_lock(&peer->rtt_input_lock); 160 + rxrpc_ack_update_rtt(peer, rtt_us); 161 + if (peer->rtt_count < 3) 162 + peer->rtt_count++; 163 + spin_unlock(&peer->rtt_input_lock); 164 + 165 + trace_rxrpc_rtt_rx(call, why, send_serial, resp_serial, 166 + peer->srtt_us >> 3, peer->rto_j); 167 + } 168 + 169 + /* 170 + * Get the retransmission timeout to set in jiffies, backing it off each time 171 + * we retransmit. 172 + */ 173 + unsigned long rxrpc_get_rto_backoff(struct rxrpc_peer *peer, bool retrans) 174 + { 175 + u64 timo_j; 176 + u8 backoff = READ_ONCE(peer->backoff); 177 + 178 + timo_j = peer->rto_j; 179 + timo_j <<= backoff; 180 + if (retrans && timo_j * 2 <= RXRPC_RTO_MAX) 181 + WRITE_ONCE(peer->backoff, backoff + 1); 182 + 183 + if (timo_j < 1) 184 + timo_j = 1; 185 + 186 + return timo_j; 187 + } 188 + 189 + void rxrpc_peer_init_rtt(struct rxrpc_peer *peer) 190 + { 191 + peer->rto_j = RXRPC_TIMEOUT_INIT; 192 + peer->mdev_us = jiffies_to_usecs(RXRPC_TIMEOUT_INIT); 193 + peer->backoff = 0; 194 + //minmax_reset(&peer->rtt_min, rxrpc_jiffies32, ~0U); 195 + }
+1 -2
net/rxrpc/rxkad.c
··· 1148 ret = rxkad_decrypt_ticket(conn, skb, ticket, ticket_len, &session_key, 1149 &expiry, _abort_code); 1150 if (ret < 0) 1151 - goto temporary_error_free_resp; 1152 1153 /* use the session key from inside the ticket to decrypt the 1154 * response */ ··· 1230 1231 temporary_error_free_ticket: 1232 kfree(ticket); 1233 - temporary_error_free_resp: 1234 kfree(response); 1235 temporary_error: 1236 /* Ignore the response packet if we got a temporary error such as
··· 1148 ret = rxkad_decrypt_ticket(conn, skb, ticket, ticket_len, &session_key, 1149 &expiry, _abort_code); 1150 if (ret < 0) 1151 + goto temporary_error_free_ticket; 1152 1153 /* use the session key from inside the ticket to decrypt the 1154 * response */ ··· 1230 1231 temporary_error_free_ticket: 1232 kfree(ticket); 1233 kfree(response); 1234 temporary_error: 1235 /* Ignore the response packet if we got a temporary error such as
+9 -17
net/rxrpc/sendmsg.c
··· 66 struct rxrpc_call *call) 67 { 68 rxrpc_seq_t tx_start, tx_win; 69 - signed long rtt2, timeout; 70 - u64 rtt; 71 72 - rtt = READ_ONCE(call->peer->rtt); 73 - rtt2 = nsecs_to_jiffies64(rtt) * 2; 74 - if (rtt2 < 2) 75 - rtt2 = 2; 76 77 - timeout = rtt2; 78 tx_start = READ_ONCE(call->tx_hard_ack); 79 80 for (;;) { ··· 91 return -EINTR; 92 93 if (tx_win != tx_start) { 94 - timeout = rtt2; 95 tx_start = tx_win; 96 } 97 ··· 270 _debug("need instant resend %d", ret); 271 rxrpc_instant_resend(call, ix); 272 } else { 273 - unsigned long now = jiffies, resend_at; 274 275 - if (call->peer->rtt_usage > 1) 276 - resend_at = nsecs_to_jiffies(call->peer->rtt * 3 / 2); 277 - else 278 - resend_at = rxrpc_resend_timeout; 279 - if (resend_at < 1) 280 - resend_at = 1; 281 - 282 - resend_at += now; 283 WRITE_ONCE(call->resend_at, resend_at); 284 rxrpc_reduce_call_timer(call, resend_at, now, 285 rxrpc_timer_set_for_send);
··· 66 struct rxrpc_call *call) 67 { 68 rxrpc_seq_t tx_start, tx_win; 69 + signed long rtt, timeout; 70 71 + rtt = READ_ONCE(call->peer->srtt_us) >> 3; 72 + rtt = usecs_to_jiffies(rtt) * 2; 73 + if (rtt < 2) 74 + rtt = 2; 75 76 + timeout = rtt; 77 tx_start = READ_ONCE(call->tx_hard_ack); 78 79 for (;;) { ··· 92 return -EINTR; 93 94 if (tx_win != tx_start) { 95 + timeout = rtt; 96 tx_start = tx_win; 97 } 98 ··· 271 _debug("need instant resend %d", ret); 272 rxrpc_instant_resend(call, ix); 273 } else { 274 + unsigned long now = jiffies; 275 + unsigned long resend_at = now + call->peer->rto_j; 276 277 WRITE_ONCE(call->resend_at, resend_at); 278 rxrpc_reduce_call_timer(call, resend_at, now, 279 rxrpc_timer_set_for_send);
-9
net/rxrpc/sysctl.c
··· 71 .extra1 = (void *)&one_jiffy, 72 .extra2 = (void *)&max_jiffies, 73 }, 74 - { 75 - .procname = "resend_timeout", 76 - .data = &rxrpc_resend_timeout, 77 - .maxlen = sizeof(unsigned long), 78 - .mode = 0644, 79 - .proc_handler = proc_doulongvec_ms_jiffies_minmax, 80 - .extra1 = (void *)&one_jiffy, 81 - .extra2 = (void *)&max_jiffies, 82 - }, 83 84 /* Non-time values */ 85 {
··· 71 .extra1 = (void *)&one_jiffy, 72 .extra2 = (void *)&max_jiffies, 73 }, 74 75 /* Non-time values */ 76 {
+11 -3
net/sctp/sm_sideeffect.c
··· 1523 timeout = asoc->timeouts[cmd->obj.to]; 1524 BUG_ON(!timeout); 1525 1526 - timer->expires = jiffies + timeout; 1527 - sctp_association_hold(asoc); 1528 - add_timer(timer); 1529 break; 1530 1531 case SCTP_CMD_TIMER_RESTART:
··· 1523 timeout = asoc->timeouts[cmd->obj.to]; 1524 BUG_ON(!timeout); 1525 1526 + /* 1527 + * SCTP has a hard time with timer starts. Because we process 1528 + * timer starts as side effects, it can be hard to tell if we 1529 + * have already started a timer or not, which leads to BUG 1530 + * halts when we call add_timer. So here, instead of just starting 1531 + * a timer, if the timer is already started, and just mod 1532 + * the timer with the shorter of the two expiration times 1533 + */ 1534 + if (!timer_pending(timer)) 1535 + sctp_association_hold(asoc); 1536 + timer_reduce(timer, jiffies + timeout); 1537 break; 1538 1539 case SCTP_CMD_TIMER_RESTART:
+5 -4
net/sctp/sm_statefuns.c
··· 1856 /* Update the content of current association. */ 1857 sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc)); 1858 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev)); 1859 - if (sctp_state(asoc, SHUTDOWN_PENDING) && 1860 (sctp_sstate(asoc->base.sk, CLOSING) || 1861 sock_flag(asoc->base.sk, SOCK_DEAD))) { 1862 - /* if were currently in SHUTDOWN_PENDING, but the socket 1863 - * has been closed by user, don't transition to ESTABLISHED. 1864 - * Instead trigger SHUTDOWN bundled with COOKIE_ACK. 1865 */ 1866 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); 1867 return sctp_sf_do_9_2_start_shutdown(net, ep, asoc,
··· 1856 /* Update the content of current association. */ 1857 sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc)); 1858 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev)); 1859 + if ((sctp_state(asoc, SHUTDOWN_PENDING) || 1860 + sctp_state(asoc, SHUTDOWN_SENT)) && 1861 (sctp_sstate(asoc->base.sk, CLOSING) || 1862 sock_flag(asoc->base.sk, SOCK_DEAD))) { 1863 + /* If the socket has been closed by user, don't 1864 + * transition to ESTABLISHED. Instead trigger SHUTDOWN 1865 + * bundled with COOKIE_ACK. 1866 */ 1867 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); 1868 return sctp_sf_do_9_2_start_shutdown(net, ep, asoc,
+5 -1
net/tipc/udp_media.c
··· 161 struct udp_bearer *ub, struct udp_media_addr *src, 162 struct udp_media_addr *dst, struct dst_cache *cache) 163 { 164 - struct dst_entry *ndst = dst_cache_get(cache); 165 int ttl, err = 0; 166 167 if (dst->proto == htons(ETH_P_IP)) { 168 struct rtable *rt = (struct rtable *)ndst; 169 ··· 212 src->port, dst->port, false); 213 #endif 214 } 215 return err; 216 217 tx_error: 218 kfree_skb(skb); 219 return err; 220 }
··· 161 struct udp_bearer *ub, struct udp_media_addr *src, 162 struct udp_media_addr *dst, struct dst_cache *cache) 163 { 164 + struct dst_entry *ndst; 165 int ttl, err = 0; 166 167 + local_bh_disable(); 168 + ndst = dst_cache_get(cache); 169 if (dst->proto == htons(ETH_P_IP)) { 170 struct rtable *rt = (struct rtable *)ndst; 171 ··· 210 src->port, dst->port, false); 211 #endif 212 } 213 + local_bh_enable(); 214 return err; 215 216 tx_error: 217 + local_bh_enable(); 218 kfree_skb(skb); 219 return err; 220 }
+10 -7
net/tls/tls_sw.c
··· 780 781 static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk, 782 bool full_record, u8 record_type, 783 - size_t *copied, int flags) 784 { 785 struct tls_context *tls_ctx = tls_get_ctx(sk); 786 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); ··· 796 psock = sk_psock_get(sk); 797 if (!psock || !policy) { 798 err = tls_push_record(sk, flags, record_type); 799 - if (err && err != -EINPROGRESS) { 800 *copied -= sk_msg_free(sk, msg); 801 tls_free_open_rec(sk); 802 } 803 if (psock) 804 sk_psock_put(sk, psock); ··· 825 switch (psock->eval) { 826 case __SK_PASS: 827 err = tls_push_record(sk, flags, record_type); 828 - if (err && err != -EINPROGRESS) { 829 *copied -= sk_msg_free(sk, msg); 830 tls_free_open_rec(sk); 831 goto out_err; 832 } 833 break; ··· 918 unsigned char record_type = TLS_RECORD_TYPE_DATA; 919 bool is_kvec = iov_iter_is_kvec(&msg->msg_iter); 920 bool eor = !(msg->msg_flags & MSG_MORE); 921 - size_t try_to_copy, copied = 0; 922 struct sk_msg *msg_pl, *msg_en; 923 struct tls_rec *rec; 924 int required_size; ··· 1121 1122 release_sock(sk); 1123 mutex_unlock(&tls_ctx->tx_lock); 1124 - return copied ? copied : ret; 1125 } 1126 1127 static int tls_sw_do_sendpage(struct sock *sk, struct page *page, ··· 1135 struct sk_msg *msg_pl; 1136 struct tls_rec *rec; 1137 int num_async = 0; 1138 - size_t copied = 0; 1139 bool full_record; 1140 int record_room; 1141 int ret = 0; ··· 1237 } 1238 sendpage_end: 1239 ret = sk_stream_error(sk, flags, ret); 1240 - return copied ? copied : ret; 1241 } 1242 1243 int tls_sw_sendpage_locked(struct sock *sk, struct page *page,
··· 780 781 static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk, 782 bool full_record, u8 record_type, 783 + ssize_t *copied, int flags) 784 { 785 struct tls_context *tls_ctx = tls_get_ctx(sk); 786 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); ··· 796 psock = sk_psock_get(sk); 797 if (!psock || !policy) { 798 err = tls_push_record(sk, flags, record_type); 799 + if (err && sk->sk_err == EBADMSG) { 800 *copied -= sk_msg_free(sk, msg); 801 tls_free_open_rec(sk); 802 + err = -sk->sk_err; 803 } 804 if (psock) 805 sk_psock_put(sk, psock); ··· 824 switch (psock->eval) { 825 case __SK_PASS: 826 err = tls_push_record(sk, flags, record_type); 827 + if (err && sk->sk_err == EBADMSG) { 828 *copied -= sk_msg_free(sk, msg); 829 tls_free_open_rec(sk); 830 + err = -sk->sk_err; 831 goto out_err; 832 } 833 break; ··· 916 unsigned char record_type = TLS_RECORD_TYPE_DATA; 917 bool is_kvec = iov_iter_is_kvec(&msg->msg_iter); 918 bool eor = !(msg->msg_flags & MSG_MORE); 919 + size_t try_to_copy; 920 + ssize_t copied = 0; 921 struct sk_msg *msg_pl, *msg_en; 922 struct tls_rec *rec; 923 int required_size; ··· 1118 1119 release_sock(sk); 1120 mutex_unlock(&tls_ctx->tx_lock); 1121 + return copied > 0 ? copied : ret; 1122 } 1123 1124 static int tls_sw_do_sendpage(struct sock *sk, struct page *page, ··· 1132 struct sk_msg *msg_pl; 1133 struct tls_rec *rec; 1134 int num_async = 0; 1135 + ssize_t copied = 0; 1136 bool full_record; 1137 int record_room; 1138 int ret = 0; ··· 1234 } 1235 sendpage_end: 1236 ret = sk_stream_error(sk, flags, ret); 1237 + return copied > 0 ? copied : ret; 1238 } 1239 1240 int tls_sw_sendpage_locked(struct sock *sk, struct page *page,
+14 -2
security/security.c
··· 1965 1966 int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen) 1967 { 1968 - return call_int_hook(secid_to_secctx, -EOPNOTSUPP, secid, secdata, 1969 - seclen); 1970 } 1971 EXPORT_SYMBOL(security_secid_to_secctx); 1972
··· 1965 1966 int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen) 1967 { 1968 + struct security_hook_list *hp; 1969 + int rc; 1970 + 1971 + /* 1972 + * Currently, only one LSM can implement secid_to_secctx (i.e this 1973 + * LSM hook is not "stackable"). 1974 + */ 1975 + hlist_for_each_entry(hp, &security_hook_heads.secid_to_secctx, list) { 1976 + rc = hp->hook.secid_to_secctx(secid, secdata, seclen); 1977 + if (rc != LSM_RET_DEFAULT(secid_to_secctx)) 1978 + return rc; 1979 + } 1980 + 1981 + return LSM_RET_DEFAULT(secid_to_secctx); 1982 } 1983 EXPORT_SYMBOL(security_secid_to_secctx); 1984
+12 -1
tools/testing/selftests/bpf/prog_tests/mmap.c
··· 19 const size_t map_sz = roundup_page(sizeof(struct map_data)); 20 const int zero = 0, one = 1, two = 2, far = 1500; 21 const long page_size = sysconf(_SC_PAGE_SIZE); 22 - int err, duration = 0, i, data_map_fd, data_map_id, tmp_fd; 23 struct bpf_map *data_map, *bss_map; 24 void *bss_mmaped = NULL, *map_mmaped = NULL, *tmp1, *tmp2; 25 struct test_mmap__bss *bss_data; ··· 36 bss_map = skel->maps.bss; 37 data_map = skel->maps.data_map; 38 data_map_fd = bpf_map__fd(data_map); 39 40 /* get map's ID */ 41 memset(&map_info, 0, map_info_sz);
··· 19 const size_t map_sz = roundup_page(sizeof(struct map_data)); 20 const int zero = 0, one = 1, two = 2, far = 1500; 21 const long page_size = sysconf(_SC_PAGE_SIZE); 22 + int err, duration = 0, i, data_map_fd, data_map_id, tmp_fd, rdmap_fd; 23 struct bpf_map *data_map, *bss_map; 24 void *bss_mmaped = NULL, *map_mmaped = NULL, *tmp1, *tmp2; 25 struct test_mmap__bss *bss_data; ··· 36 bss_map = skel->maps.bss; 37 data_map = skel->maps.data_map; 38 data_map_fd = bpf_map__fd(data_map); 39 + 40 + rdmap_fd = bpf_map__fd(skel->maps.rdonly_map); 41 + tmp1 = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, rdmap_fd, 0); 42 + if (CHECK(tmp1 != MAP_FAILED, "rdonly_write_mmap", "unexpected success\n")) { 43 + munmap(tmp1, 4096); 44 + goto cleanup; 45 + } 46 + /* now double-check if it's mmap()'able at all */ 47 + tmp1 = mmap(NULL, 4096, PROT_READ, MAP_SHARED, rdmap_fd, 0); 48 + if (CHECK(tmp1 == MAP_FAILED, "rdonly_read_mmap", "failed: %d\n", errno)) 49 + goto cleanup; 50 51 /* get map's ID */ 52 memset(&map_info, 0, map_info_sz);
+8
tools/testing/selftests/bpf/progs/test_mmap.c
··· 9 10 struct { 11 __uint(type, BPF_MAP_TYPE_ARRAY); 12 __uint(max_entries, 512 * 4); /* at least 4 pages of data */ 13 __uint(map_flags, BPF_F_MMAPABLE); 14 __type(key, __u32);
··· 9 10 struct { 11 __uint(type, BPF_MAP_TYPE_ARRAY); 12 + __uint(max_entries, 4096); 13 + __uint(map_flags, BPF_F_MMAPABLE | BPF_F_RDONLY_PROG); 14 + __type(key, __u32); 15 + __type(value, char); 16 + } rdonly_map SEC(".maps"); 17 + 18 + struct { 19 + __uint(type, BPF_MAP_TYPE_ARRAY); 20 __uint(max_entries, 512 * 4); /* at least 4 pages of data */ 21 __uint(map_flags, BPF_F_MMAPABLE); 22 __type(key, __u32);
+1 -1
tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh
··· 300 local i 301 302 for ((i = 0; i < attempts; ++i)); do 303 - if $ARPING -c 1 -I $h1 -b 192.0.2.66 -q -w 0.1; then 304 ((passes++)) 305 fi 306
··· 300 local i 301 302 for ((i = 0; i < attempts; ++i)); do 303 + if $ARPING -c 1 -I $h1 -b 192.0.2.66 -q -w 1; then 304 ((passes++)) 305 fi 306
+1 -1
tools/testing/selftests/wireguard/qemu/Makefile
··· 44 $(eval $(call tar_download,MUSL,musl,1.2.0,.tar.gz,https://musl.libc.org/releases/,c6de7b191139142d3f9a7b5b702c9cae1b5ee6e7f57e582da9328629408fd4e8)) 45 $(eval $(call tar_download,IPERF,iperf,3.7,.tar.gz,https://downloads.es.net/pub/iperf/,d846040224317caf2f75c843d309a950a7db23f9b44b94688ccbe557d6d1710c)) 46 $(eval $(call tar_download,BASH,bash,5.0,.tar.gz,https://ftp.gnu.org/gnu/bash/,b4a80f2ac66170b2913efbfb9f2594f1f76c7b1afd11f799e22035d63077fb4d)) 47 - $(eval $(call tar_download,IPROUTE2,iproute2,5.4.0,.tar.xz,https://www.kernel.org/pub/linux/utils/net/iproute2/,fe97aa60a0d4c5ac830be18937e18dc3400ca713a33a89ad896ff1e3d46086ae)) 48 $(eval $(call tar_download,IPTABLES,iptables,1.8.4,.tar.bz2,https://www.netfilter.org/projects/iptables/files/,993a3a5490a544c2cbf2ef15cf7e7ed21af1845baf228318d5c36ef8827e157c)) 49 $(eval $(call tar_download,NMAP,nmap,7.80,.tar.bz2,https://nmap.org/dist/,fcfa5a0e42099e12e4bf7a68ebe6fde05553383a682e816a7ec9256ab4773faa)) 50 $(eval $(call tar_download,IPUTILS,iputils,s20190709,.tar.gz,https://github.com/iputils/iputils/archive/s20190709.tar.gz/#,a15720dd741d7538dd2645f9f516d193636ae4300ff7dbc8bfca757bf166490a))
··· 44 $(eval $(call tar_download,MUSL,musl,1.2.0,.tar.gz,https://musl.libc.org/releases/,c6de7b191139142d3f9a7b5b702c9cae1b5ee6e7f57e582da9328629408fd4e8)) 45 $(eval $(call tar_download,IPERF,iperf,3.7,.tar.gz,https://downloads.es.net/pub/iperf/,d846040224317caf2f75c843d309a950a7db23f9b44b94688ccbe557d6d1710c)) 46 $(eval $(call tar_download,BASH,bash,5.0,.tar.gz,https://ftp.gnu.org/gnu/bash/,b4a80f2ac66170b2913efbfb9f2594f1f76c7b1afd11f799e22035d63077fb4d)) 47 + $(eval $(call tar_download,IPROUTE2,iproute2,5.6.0,.tar.xz,https://www.kernel.org/pub/linux/utils/net/iproute2/,1b5b0e25ce6e23da7526ea1da044e814ad85ba761b10dd29c2b027c056b04692)) 48 $(eval $(call tar_download,IPTABLES,iptables,1.8.4,.tar.bz2,https://www.netfilter.org/projects/iptables/files/,993a3a5490a544c2cbf2ef15cf7e7ed21af1845baf228318d5c36ef8827e157c)) 49 $(eval $(call tar_download,NMAP,nmap,7.80,.tar.bz2,https://nmap.org/dist/,fcfa5a0e42099e12e4bf7a68ebe6fde05553383a682e816a7ec9256ab4773faa)) 50 $(eval $(call tar_download,IPUTILS,iputils,s20190709,.tar.gz,https://github.com/iputils/iputils/archive/s20190709.tar.gz/#,a15720dd741d7538dd2645f9f516d193636ae4300ff7dbc8bfca757bf166490a))