Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Pull networking fixes from David Miller:

1) Fix RCU warnings in ipv6 multicast router code, from Madhuparna
Bhowmik.

2) Nexthop attributes aren't being checked properly because of
mis-initialized iterator, from David Ahern.

3) Revert iop_idents_reserve() change as it caused performance
regressions and was just working around what is really a UBSAN bug
in the compiler. From Yuqi Jin.

4) Read MAC address properly from ROM in bmac driver (double iteration
proceeds past end of address array), from Jeremy Kerr.

5) Add Microsoft Surface device IDs to r8152, from Marc Payne.

6) Prevent reference to freed SKB in __netif_receive_skb_core(), from
Boris Sukholitko.

7) Fix ACK discard behavior in rxrpc, from David Howells.

8) Preserve flow hash across packet scrubbing in wireguard, from Jason
A. Donenfeld.

9) Cap option length properly for SO_BINDTODEVICE in AX25, from Eric
Dumazet.

10) Fix encryption error checking in kTLS code, from Vadim Fedorenko.

11) Missing BPF prog ref release in flow dissector, from Jakub Sitnicki.

12) dst_cache must be used with BH disabled in tipc, from Eric Dumazet.

13) Fix use after free in mlxsw driver, from Jiri Pirko.

14) Order kTLS key destruction properly in mlx5 driver, from Tariq
Toukan.

15) Check devm_platform_ioremap_resource() return value properly in
several drivers, from Tiezhu Yang.

* git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (71 commits)
net: smsc911x: Fix runtime PM imbalance on error
net/mlx4_core: fix a memory leak bug.
net: ethernet: ti: cpsw: fix ASSERT_RTNL() warning during suspend
net: phy: mscc: fix initialization of the MACsec protocol mode
net: stmmac: don't attach interface until resume finishes
net: Fix return value about devm_platform_ioremap_resource()
net/mlx5: Fix error flow in case of function_setup failure
net/mlx5e: CT: Correctly get flow rule
net/mlx5e: Update netdev txq on completions during closure
net/mlx5: Annotate mutex destroy for root ns
net/mlx5: Don't maintain a case of del_sw_func being null
net/mlx5: Fix cleaning unmanaged flow tables
net/mlx5: Fix memory leak in mlx5_events_init
net/mlx5e: Fix inner tirs handling
net/mlx5e: kTLS, Destroy key object after destroying the TIS
net/mlx5e: Fix allowed tc redirect merged eswitch offload cases
net/mlx5: Avoid processing commands before cmdif is ready
net/mlx5: Fix a race when moving command interface to events mode
net/mlx5: Add command entry handling completion
rxrpc: Fix a memory leak in rxkad_verify_response()
...

Changed files
+940 -454
Documentation
devicetree
bindings
net
dsa
drivers
fs
include
linux
mlx5
net
trace
events
kernel
bpf
net
security
tools
testing
selftests
bpf
prog_tests
progs
drivers
net
wireguard
qemu
+3
Documentation/devicetree/bindings/net/dsa/b53.txt
··· 110 110 #size-cells = <0>; 111 111 112 112 ports { 113 + #address-cells = <1>; 114 + #size-cells = <0>; 115 + 113 116 port0@0 { 114 117 reg = <0>; 115 118 label = "lan1";
+4 -1
drivers/net/can/ifi_canfd/ifi_canfd.c
··· 947 947 u32 id, rev; 948 948 949 949 addr = devm_platform_ioremap_resource(pdev, 0); 950 + if (IS_ERR(addr)) 951 + return PTR_ERR(addr); 952 + 950 953 irq = platform_get_irq(pdev, 0); 951 - if (IS_ERR(addr) || irq < 0) 954 + if (irq < 0) 952 955 return -EINVAL; 953 956 954 957 id = readl(addr + IFI_CANFD_IP_ID);
+1 -1
drivers/net/can/sun4i_can.c
··· 792 792 793 793 addr = devm_platform_ioremap_resource(pdev, 0); 794 794 if (IS_ERR(addr)) { 795 - err = -EBUSY; 795 + err = PTR_ERR(addr); 796 796 goto exit; 797 797 } 798 798
+1 -1
drivers/net/dsa/b53/b53_srab.c
··· 609 609 610 610 priv->regs = devm_platform_ioremap_resource(pdev, 0); 611 611 if (IS_ERR(priv->regs)) 612 - return -ENOMEM; 612 + return PTR_ERR(priv->regs); 613 613 614 614 dev = b53_switch_alloc(&pdev->dev, &b53_srab_ops, priv); 615 615 if (!dev)
+2 -7
drivers/net/dsa/mt7530.c
··· 628 628 mt7530_write(priv, MT7530_PVC_P(port), 629 629 PORT_SPEC_TAG); 630 630 631 - /* Disable auto learning on the cpu port */ 632 - mt7530_set(priv, MT7530_PSC_P(port), SA_DIS); 633 - 634 - /* Unknown unicast frame fordwarding to the cpu port */ 635 - mt7530_set(priv, MT7530_MFC, UNU_FFP(BIT(port))); 631 + /* Unknown multicast frame forwarding to the cpu port */ 632 + mt7530_rmw(priv, MT7530_MFC, UNM_FFP_MASK, UNM_FFP(BIT(port))); 636 633 637 634 /* Set CPU port number */ 638 635 if (priv->id == ID_MT7621) ··· 1290 1293 1291 1294 /* Enable and reset MIB counters */ 1292 1295 mt7530_mib_reset(ds); 1293 - 1294 - mt7530_clear(priv, MT7530_MFC, UNU_FFP_MASK); 1295 1296 1296 1297 for (i = 0; i < MT7530_NUM_PORTS; i++) { 1297 1298 /* Disable forwarding by default on all ports */
+1
drivers/net/dsa/mt7530.h
··· 31 31 #define MT7530_MFC 0x10 32 32 #define BC_FFP(x) (((x) & 0xff) << 24) 33 33 #define UNM_FFP(x) (((x) & 0xff) << 16) 34 + #define UNM_FFP_MASK UNM_FFP(~0) 34 35 #define UNU_FFP(x) (((x) & 0xff) << 8) 35 36 #define UNU_FFP_MASK UNU_FFP(~0) 36 37 #define CPU_EN BIT(7)
+11 -12
drivers/net/dsa/ocelot/felix.c
··· 388 388 struct ocelot *ocelot = &felix->ocelot; 389 389 phy_interface_t *port_phy_modes; 390 390 resource_size_t switch_base; 391 + struct resource res; 391 392 int port, i, err; 392 393 393 394 ocelot->num_phys_ports = num_phys_ports; ··· 423 422 424 423 for (i = 0; i < TARGET_MAX; i++) { 425 424 struct regmap *target; 426 - struct resource *res; 427 425 428 426 if (!felix->info->target_io_res[i].name) 429 427 continue; 430 428 431 - res = &felix->info->target_io_res[i]; 432 - res->flags = IORESOURCE_MEM; 433 - res->start += switch_base; 434 - res->end += switch_base; 429 + memcpy(&res, &felix->info->target_io_res[i], sizeof(res)); 430 + res.flags = IORESOURCE_MEM; 431 + res.start += switch_base; 432 + res.end += switch_base; 435 433 436 - target = ocelot_regmap_init(ocelot, res); 434 + target = ocelot_regmap_init(ocelot, &res); 437 435 if (IS_ERR(target)) { 438 436 dev_err(ocelot->dev, 439 437 "Failed to map device memory space\n"); ··· 453 453 for (port = 0; port < num_phys_ports; port++) { 454 454 struct ocelot_port *ocelot_port; 455 455 void __iomem *port_regs; 456 - struct resource *res; 457 456 458 457 ocelot_port = devm_kzalloc(ocelot->dev, 459 458 sizeof(struct ocelot_port), ··· 464 465 return -ENOMEM; 465 466 } 466 467 467 - res = &felix->info->port_io_res[port]; 468 - res->flags = IORESOURCE_MEM; 469 - res->start += switch_base; 470 - res->end += switch_base; 468 + memcpy(&res, &felix->info->port_io_res[port], sizeof(res)); 469 + res.flags = IORESOURCE_MEM; 470 + res.start += switch_base; 471 + res.end += switch_base; 471 472 472 - port_regs = devm_ioremap_resource(ocelot->dev, res); 473 + port_regs = devm_ioremap_resource(ocelot->dev, &res); 473 474 if (IS_ERR(port_regs)) { 474 475 dev_err(ocelot->dev, 475 476 "failed to map registers for port %d\n", port);
+3 -3
drivers/net/dsa/ocelot/felix.h
··· 8 8 9 9 /* Platform-specific information */ 10 10 struct felix_info { 11 - struct resource *target_io_res; 12 - struct resource *port_io_res; 13 - struct resource *imdio_res; 11 + const struct resource *target_io_res; 12 + const struct resource *port_io_res; 13 + const struct resource *imdio_res; 14 14 const struct reg_field *regfields; 15 15 const u32 *const *map; 16 16 const struct ocelot_ops *ops;
+10 -12
drivers/net/dsa/ocelot/felix_vsc9959.c
··· 333 333 [GCB] = vsc9959_gcb_regmap, 334 334 }; 335 335 336 - /* Addresses are relative to the PCI device's base address and 337 - * will be fixed up at ioremap time. 338 - */ 339 - static struct resource vsc9959_target_io_res[] = { 336 + /* Addresses are relative to the PCI device's base address */ 337 + static const struct resource vsc9959_target_io_res[] = { 340 338 [ANA] = { 341 339 .start = 0x0280000, 342 340 .end = 0x028ffff, ··· 377 379 }, 378 380 }; 379 381 380 - static struct resource vsc9959_port_io_res[] = { 382 + static const struct resource vsc9959_port_io_res[] = { 381 383 { 382 384 .start = 0x0100000, 383 385 .end = 0x010ffff, ··· 413 415 /* Port MAC 0 Internal MDIO bus through which the SerDes acting as an 414 416 * SGMII/QSGMII MAC PCS can be found. 415 417 */ 416 - static struct resource vsc9959_imdio_res = { 418 + static const struct resource vsc9959_imdio_res = { 417 419 .start = 0x8030, 418 420 .end = 0x8040, 419 421 .name = "imdio", ··· 1109 1111 struct device *dev = ocelot->dev; 1110 1112 resource_size_t imdio_base; 1111 1113 void __iomem *imdio_regs; 1112 - struct resource *res; 1114 + struct resource res; 1113 1115 struct enetc_hw *hw; 1114 1116 struct mii_bus *bus; 1115 1117 int port; ··· 1126 1128 imdio_base = pci_resource_start(felix->pdev, 1127 1129 felix->info->imdio_pci_bar); 1128 1130 1129 - res = felix->info->imdio_res; 1130 - res->flags = IORESOURCE_MEM; 1131 - res->start += imdio_base; 1132 - res->end += imdio_base; 1131 + memcpy(&res, felix->info->imdio_res, sizeof(res)); 1132 + res.flags = IORESOURCE_MEM; 1133 + res.start += imdio_base; 1134 + res.end += imdio_base; 1133 1135 1134 - imdio_regs = devm_ioremap_resource(dev, res); 1136 + imdio_regs = devm_ioremap_resource(dev, &res); 1135 1137 if (IS_ERR(imdio_regs)) { 1136 1138 dev_err(dev, "failed to map internal MDIO registers\n"); 1137 1139 return PTR_ERR(imdio_regs);
+1 -1
drivers/net/ethernet/apple/bmac.c
··· 1182 1182 int i; 1183 1183 unsigned short data; 1184 1184 1185 - for (i = 0; i < 6; i++) 1185 + for (i = 0; i < 3; i++) 1186 1186 { 1187 1187 reset_and_select_srom(dev); 1188 1188 data = read_srom(dev, i + EnetAddressOffset/2, SROMAddressBits);
+7 -6
drivers/net/ethernet/freescale/ucc_geth.c
··· 42 42 #include <soc/fsl/qe/ucc.h> 43 43 #include <soc/fsl/qe/ucc_fast.h> 44 44 #include <asm/machdep.h> 45 + #include <net/sch_generic.h> 45 46 46 47 #include "ucc_geth.h" 47 48 ··· 1549 1548 1550 1549 static void ugeth_quiesce(struct ucc_geth_private *ugeth) 1551 1550 { 1552 - /* Prevent any further xmits, plus detach the device. */ 1553 - netif_device_detach(ugeth->ndev); 1554 - 1555 - /* Wait for any current xmits to finish. */ 1556 - netif_tx_disable(ugeth->ndev); 1551 + /* Prevent any further xmits */ 1552 + netif_tx_stop_all_queues(ugeth->ndev); 1557 1553 1558 1554 /* Disable the interrupt to avoid NAPI rescheduling. */ 1559 1555 disable_irq(ugeth->ug_info->uf_info.irq); ··· 1563 1565 { 1564 1566 napi_enable(&ugeth->napi); 1565 1567 enable_irq(ugeth->ug_info->uf_info.irq); 1566 - netif_device_attach(ugeth->ndev); 1568 + 1569 + /* allow to xmit again */ 1570 + netif_tx_wake_all_queues(ugeth->ndev); 1571 + __netdev_watchdog_up(ugeth->ndev); 1567 1572 } 1568 1573 1569 1574 /* Called every time the controller might need to be made
+1 -1
drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c
··· 1070 1070 (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS)); 1071 1071 1072 1072 val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG); 1073 - val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id); 1073 + val &= ~MVPP2_CLS_SWFWD_PCTRL_MASK(port->id); 1074 1074 mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val); 1075 1075 } 1076 1076
+1 -1
drivers/net/ethernet/marvell/pxa168_eth.c
··· 1418 1418 1419 1419 pep->base = devm_platform_ioremap_resource(pdev, 0); 1420 1420 if (IS_ERR(pep->base)) { 1421 - err = -ENOMEM; 1421 + err = PTR_ERR(pep->base); 1422 1422 goto err_netdev; 1423 1423 } 1424 1424
+1 -1
drivers/net/ethernet/mellanox/mlx4/fw.c
··· 2734 2734 if (err) { 2735 2735 mlx4_err(dev, "Failed to retrieve required operation: %d\n", 2736 2736 err); 2737 - return; 2737 + goto out; 2738 2738 } 2739 2739 MLX4_GET(modifier, outbox, GET_OP_REQ_MODIFIER_OFFSET); 2740 2740 MLX4_GET(token, outbox, GET_OP_REQ_TOKEN_OFFSET);
+55 -4
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
··· 848 848 static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev, 849 849 struct mlx5_cmd_msg *msg); 850 850 851 + static bool opcode_allowed(struct mlx5_cmd *cmd, u16 opcode) 852 + { 853 + if (cmd->allowed_opcode == CMD_ALLOWED_OPCODE_ALL) 854 + return true; 855 + 856 + return cmd->allowed_opcode == opcode; 857 + } 858 + 851 859 static void cmd_work_handler(struct work_struct *work) 852 860 { 853 861 struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work); ··· 869 861 int alloc_ret; 870 862 int cmd_mode; 871 863 864 + complete(&ent->handling); 872 865 sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem; 873 866 down(sem); 874 867 if (!ent->page_queue) { ··· 922 913 923 914 /* Skip sending command to fw if internal error */ 924 915 if (pci_channel_offline(dev->pdev) || 925 - dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { 916 + dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR || 917 + cmd->state != MLX5_CMDIF_STATE_UP || 918 + !opcode_allowed(&dev->cmd, ent->op)) { 926 919 u8 status = 0; 927 920 u32 drv_synd; 928 921 ··· 989 978 struct mlx5_cmd *cmd = &dev->cmd; 990 979 int err; 991 980 981 + if (!wait_for_completion_timeout(&ent->handling, timeout) && 982 + cancel_work_sync(&ent->work)) { 983 + ent->ret = -ECANCELED; 984 + goto out_err; 985 + } 992 986 if (cmd->mode == CMD_MODE_POLLING || ent->polling) { 993 987 wait_for_completion(&ent->done); 994 988 } else if (!wait_for_completion_timeout(&ent->done, timeout)) { ··· 1001 985 mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true); 1002 986 } 1003 987 988 + out_err: 1004 989 err = ent->ret; 1005 990 1006 991 if (err == -ETIMEDOUT) { 1007 992 mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n", 993 + mlx5_command_str(msg_to_opcode(ent->in)), 994 + msg_to_opcode(ent->in)); 995 + } else if (err == -ECANCELED) { 996 + mlx5_core_warn(dev, "%s(0x%x) canceled on out of queue timeout.\n", 1008 997 mlx5_command_str(msg_to_opcode(ent->in)), 1009 998 msg_to_opcode(ent->in)); 1010 999 } ··· 1047 1026 ent->token = token; 1048 1027 ent->polling = force_polling; 1049 1028 1029 + init_completion(&ent->handling); 1050 1030 if (!callback) 1051 1031 init_completion(&ent->done); 1052 1032 ··· 1067 1045 err = wait_func(dev, ent); 1068 1046 if (err == -ETIMEDOUT) 1069 1047 goto out; 1048 + if (err == -ECANCELED) 1049 + goto out_free; 1070 1050 1071 1051 ds = ent->ts2 - ent->ts1; 1072 1052 op = MLX5_GET(mbox_in, in->first.data, opcode); ··· 1415 1391 mlx5_cmdif_debugfs_init(dev); 1416 1392 } 1417 1393 1394 + void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode) 1395 + { 1396 + struct mlx5_cmd *cmd = &dev->cmd; 1397 + int i; 1398 + 1399 + for (i = 0; i < cmd->max_reg_cmds; i++) 1400 + down(&cmd->sem); 1401 + down(&cmd->pages_sem); 1402 + 1403 + cmd->allowed_opcode = opcode; 1404 + 1405 + up(&cmd->pages_sem); 1406 + for (i = 0; i < cmd->max_reg_cmds; i++) 1407 + up(&cmd->sem); 1408 + } 1409 + 1418 1410 static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode) 1419 1411 { 1420 1412 struct mlx5_cmd *cmd = &dev->cmd; ··· 1707 1667 int err; 1708 1668 u8 status = 0; 1709 1669 u32 drv_synd; 1670 + u16 opcode; 1710 1671 u8 token; 1711 1672 1673 + opcode = MLX5_GET(mbox_in, in, opcode); 1712 1674 if (pci_channel_offline(dev->pdev) || 1713 - dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { 1714 - u16 opcode = MLX5_GET(mbox_in, in, opcode); 1715 - 1675 + dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR || 1676 + dev->cmd.state != MLX5_CMDIF_STATE_UP || 1677 + !opcode_allowed(&dev->cmd, opcode)) { 1716 1678 err = mlx5_internal_err_ret_value(dev, opcode, &drv_synd, &status); 1717 1679 MLX5_SET(mbox_out, out, status, status); 1718 1680 MLX5_SET(mbox_out, out, syndrome, drv_synd); ··· 1979 1937 goto err_free_page; 1980 1938 } 1981 1939 1940 + cmd->state = MLX5_CMDIF_STATE_DOWN; 1982 1941 cmd->checksum_disabled = 1; 1983 1942 cmd->max_reg_cmds = (1 << cmd->log_sz) - 1; 1984 1943 cmd->bitmask = (1UL << cmd->max_reg_cmds) - 1; ··· 2017 1974 mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd->dma)); 2018 1975 2019 1976 cmd->mode = CMD_MODE_POLLING; 1977 + cmd->allowed_opcode = CMD_ALLOWED_OPCODE_ALL; 2020 1978 2021 1979 create_msg_cache(dev); 2022 1980 ··· 2057 2013 dma_pool_destroy(cmd->pool); 2058 2014 } 2059 2015 EXPORT_SYMBOL(mlx5_cmd_cleanup); 2016 + 2017 + void mlx5_cmd_set_state(struct mlx5_core_dev *dev, 2018 + enum mlx5_cmdif_state cmdif_state) 2019 + { 2020 + dev->cmd.state = cmdif_state; 2021 + } 2022 + EXPORT_SYMBOL(mlx5_cmd_set_state);
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/en.h
··· 1121 1121 int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv); 1122 1122 1123 1123 int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc); 1124 - void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc); 1124 + void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv); 1125 1125 1126 1126 int mlx5e_create_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs); 1127 1127 void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs);
+3 -2
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c
··· 699 699 struct netlink_ext_ack *extack) 700 700 { 701 701 struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv); 702 + struct flow_rule *rule = flow_cls_offload_flow_rule(f); 702 703 struct flow_dissector_key_ct *mask, *key; 703 704 bool trk, est, untrk, unest, new; 704 705 u32 ctstate = 0, ctstate_mask = 0; ··· 707 706 u16 ct_state, ct_state_mask; 708 707 struct flow_match_ct match; 709 708 710 - if (!flow_rule_match_key(f->rule, FLOW_DISSECTOR_KEY_CT)) 709 + if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CT)) 711 710 return 0; 712 711 713 712 if (!ct_priv) { ··· 716 715 return -EOPNOTSUPP; 717 716 } 718 717 719 - flow_rule_match_ct(f->rule, &match); 718 + flow_rule_match_ct(rule, &match); 720 719 721 720 key = match.key; 722 721 mask = match.mask;
+3 -1
drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h
··· 130 130 struct flow_cls_offload *f, 131 131 struct netlink_ext_ack *extack) 132 132 { 133 - if (!flow_rule_match_key(f->rule, FLOW_DISSECTOR_KEY_CT)) 133 + struct flow_rule *rule = flow_cls_offload_flow_rule(f); 134 + 135 + if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CT)) 134 136 return 0; 135 137 136 138 NL_SET_ERR_MSG_MOD(extack, "mlx5 tc ct offload isn't enabled.");
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
··· 69 69 struct mlx5e_ktls_offload_context_tx *tx_priv = 70 70 mlx5e_get_ktls_tx_priv_ctx(tls_ctx); 71 71 72 - mlx5_ktls_destroy_key(priv->mdev, tx_priv->key_id); 73 72 mlx5e_destroy_tis(priv->mdev, tx_priv->tisn); 73 + mlx5_ktls_destroy_key(priv->mdev, tx_priv->key_id); 74 74 kvfree(tx_priv); 75 75 } 76 76
+7 -5
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
··· 2717 2717 mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in, inlen); 2718 2718 } 2719 2719 2720 - if (!mlx5e_tunnel_inner_ft_supported(priv->mdev)) 2720 + /* Verify inner tirs resources allocated */ 2721 + if (!priv->inner_indir_tir[0].tirn) 2721 2722 return; 2722 2723 2723 2724 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) { ··· 3409 3408 return err; 3410 3409 } 3411 3410 3412 - void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc) 3411 + void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv) 3413 3412 { 3414 3413 int i; 3415 3414 3416 3415 for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) 3417 3416 mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[i]); 3418 3417 3419 - if (!inner_ttc || !mlx5e_tunnel_inner_ft_supported(priv->mdev)) 3418 + /* Verify inner tirs resources allocated */ 3419 + if (!priv->inner_indir_tir[0].tirn) 3420 3420 return; 3421 3421 3422 3422 for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) ··· 5125 5123 err_destroy_direct_tirs: 5126 5124 mlx5e_destroy_direct_tirs(priv, priv->direct_tir); 5127 5125 err_destroy_indirect_tirs: 5128 - mlx5e_destroy_indirect_tirs(priv, true); 5126 + mlx5e_destroy_indirect_tirs(priv); 5129 5127 err_destroy_direct_rqts: 5130 5128 mlx5e_destroy_direct_rqts(priv, priv->direct_tir); 5131 5129 err_destroy_indirect_rqts: ··· 5144 5142 mlx5e_destroy_direct_tirs(priv, priv->xsk_tir); 5145 5143 mlx5e_destroy_direct_rqts(priv, priv->xsk_tir); 5146 5144 mlx5e_destroy_direct_tirs(priv, priv->direct_tir); 5147 - mlx5e_destroy_indirect_tirs(priv, true); 5145 + mlx5e_destroy_indirect_tirs(priv); 5148 5146 mlx5e_destroy_direct_rqts(priv, priv->direct_tir); 5149 5147 mlx5e_destroy_rqt(priv, &priv->indir_rqt); 5150 5148 mlx5e_close_drop_rq(&priv->drop_rq);
+4 -8
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
··· 1484 1484 return netdev->netdev_ops == &mlx5e_netdev_ops_uplink_rep; 1485 1485 } 1486 1486 1487 - bool mlx5e_eswitch_rep(struct net_device *netdev) 1487 + bool mlx5e_eswitch_vf_rep(struct net_device *netdev) 1488 1488 { 1489 - if (netdev->netdev_ops == &mlx5e_netdev_ops_rep || 1490 - netdev->netdev_ops == &mlx5e_netdev_ops_uplink_rep) 1491 - return true; 1492 - 1493 - return false; 1489 + return netdev->netdev_ops == &mlx5e_netdev_ops_rep; 1494 1490 } 1495 1491 1496 1492 static void mlx5e_build_rep_params(struct net_device *netdev) ··· 1743 1747 err_destroy_direct_tirs: 1744 1748 mlx5e_destroy_direct_tirs(priv, priv->direct_tir); 1745 1749 err_destroy_indirect_tirs: 1746 - mlx5e_destroy_indirect_tirs(priv, false); 1750 + mlx5e_destroy_indirect_tirs(priv); 1747 1751 err_destroy_direct_rqts: 1748 1752 mlx5e_destroy_direct_rqts(priv, priv->direct_tir); 1749 1753 err_destroy_indirect_rqts: ··· 1761 1765 mlx5e_destroy_rep_root_ft(priv); 1762 1766 mlx5e_destroy_ttc_table(priv, &priv->fs.ttc); 1763 1767 mlx5e_destroy_direct_tirs(priv, priv->direct_tir); 1764 - mlx5e_destroy_indirect_tirs(priv, false); 1768 + mlx5e_destroy_indirect_tirs(priv); 1765 1769 mlx5e_destroy_direct_rqts(priv, priv->direct_tir); 1766 1770 mlx5e_destroy_rqt(priv, &priv->indir_rqt); 1767 1771 mlx5e_close_drop_rq(&priv->drop_rq);
+6 -1
drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
··· 210 210 211 211 void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv); 212 212 213 - bool mlx5e_eswitch_rep(struct net_device *netdev); 213 + bool mlx5e_eswitch_vf_rep(struct net_device *netdev); 214 214 bool mlx5e_eswitch_uplink_rep(struct net_device *netdev); 215 + static inline bool mlx5e_eswitch_rep(struct net_device *netdev) 216 + { 217 + return mlx5e_eswitch_vf_rep(netdev) || 218 + mlx5e_eswitch_uplink_rep(netdev); 219 + } 215 220 216 221 #else /* CONFIG_MLX5_ESWITCH */ 217 222 static inline bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv) { return false; }
+33 -7
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
··· 3073 3073 return true; 3074 3074 } 3075 3075 3076 + static bool same_port_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv) 3077 + { 3078 + return priv->mdev == peer_priv->mdev; 3079 + } 3080 + 3076 3081 static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv) 3077 3082 { 3078 3083 struct mlx5_core_dev *fmdev, *pmdev; ··· 3296 3291 } 3297 3292 3298 3293 3299 - static bool is_merged_eswitch_dev(struct mlx5e_priv *priv, 3294 + static bool is_merged_eswitch_vfs(struct mlx5e_priv *priv, 3300 3295 struct net_device *peer_netdev) 3301 3296 { 3302 3297 struct mlx5e_priv *peer_priv; ··· 3304 3299 peer_priv = netdev_priv(peer_netdev); 3305 3300 3306 3301 return (MLX5_CAP_ESW(priv->mdev, merged_eswitch) && 3307 - mlx5e_eswitch_rep(priv->netdev) && 3308 - mlx5e_eswitch_rep(peer_netdev) && 3302 + mlx5e_eswitch_vf_rep(priv->netdev) && 3303 + mlx5e_eswitch_vf_rep(peer_netdev) && 3309 3304 same_hw_devs(priv, peer_priv)); 3310 3305 } 3311 - 3312 - 3313 3306 3314 3307 bool mlx5e_encap_take(struct mlx5e_encap_entry *e) 3315 3308 { ··· 3578 3575 return err; 3579 3576 } 3580 3577 3578 + static bool same_hw_reps(struct mlx5e_priv *priv, 3579 + struct net_device *peer_netdev) 3580 + { 3581 + struct mlx5e_priv *peer_priv; 3582 + 3583 + peer_priv = netdev_priv(peer_netdev); 3584 + 3585 + return mlx5e_eswitch_rep(priv->netdev) && 3586 + mlx5e_eswitch_rep(peer_netdev) && 3587 + same_hw_devs(priv, peer_priv); 3588 + } 3589 + 3590 + static bool is_lag_dev(struct mlx5e_priv *priv, 3591 + struct net_device *peer_netdev) 3592 + { 3593 + return ((mlx5_lag_is_sriov(priv->mdev) || 3594 + mlx5_lag_is_multipath(priv->mdev)) && 3595 + same_hw_reps(priv, peer_netdev)); 3596 + } 3597 + 3581 3598 bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv, 3582 3599 struct net_device *out_dev) 3583 3600 { 3584 - if (is_merged_eswitch_dev(priv, out_dev)) 3601 + if (is_merged_eswitch_vfs(priv, out_dev)) 3602 + return true; 3603 + 3604 + if (is_lag_dev(priv, out_dev)) 3585 3605 return true; 3586 3606 3587 3607 return mlx5e_eswitch_rep(out_dev) && 3588 - same_hw_devs(priv, netdev_priv(out_dev)); 3608 + same_port_devs(priv, netdev_priv(out_dev)); 3589 3609 } 3590 3610 3591 3611 static bool is_duplicated_output_device(struct net_device *dev,
+6 -3
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
··· 537 537 void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq) 538 538 { 539 539 struct mlx5e_tx_wqe_info *wi; 540 + u32 dma_fifo_cc, nbytes = 0; 541 + u16 ci, sqcc, npkts = 0; 540 542 struct sk_buff *skb; 541 - u32 dma_fifo_cc; 542 - u16 sqcc; 543 - u16 ci; 544 543 int i; 545 544 546 545 sqcc = sq->cc; ··· 564 565 } 565 566 566 567 dev_kfree_skb_any(skb); 568 + npkts++; 569 + nbytes += wi->num_bytes; 567 570 sqcc += wi->num_wqebbs; 568 571 } 569 572 570 573 sq->dma_fifo_cc = dma_fifo_cc; 571 574 sq->cc = sqcc; 575 + 576 + netdev_tx_completed_queue(sq->txq, npkts, nbytes); 572 577 } 573 578 574 579 #ifdef CONFIG_MLX5_CORE_IPOIB
+3
drivers/net/ethernet/mellanox/mlx5/core/eq.c
··· 611 611 .nent = MLX5_NUM_CMD_EQE, 612 612 .mask[0] = 1ull << MLX5_EVENT_TYPE_CMD, 613 613 }; 614 + mlx5_cmd_allowed_opcode(dev, MLX5_CMD_OP_CREATE_EQ); 614 615 err = setup_async_eq(dev, &table->cmd_eq, &param, "cmd"); 615 616 if (err) 616 617 goto err1; 617 618 618 619 mlx5_cmd_use_events(dev); 620 + mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL); 619 621 620 622 param = (struct mlx5_eq_param) { 621 623 .irq_index = 0, ··· 647 645 mlx5_cmd_use_polling(dev); 648 646 cleanup_async_eq(dev, &table->cmd_eq, "cmd"); 649 647 err1: 648 + mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL); 650 649 mlx5_eq_notifier_unregister(dev, &table->cq_err_nb); 651 650 return err; 652 651 }
+3 -1
drivers/net/ethernet/mellanox/mlx5/core/events.c
··· 346 346 events->dev = dev; 347 347 dev->priv.events = events; 348 348 events->wq = create_singlethread_workqueue("mlx5_events"); 349 - if (!events->wq) 349 + if (!events->wq) { 350 + kfree(events); 350 351 return -ENOMEM; 352 + } 351 353 INIT_WORK(&events->pcie_core_work, mlx5_pcie_event); 352 354 353 355 return 0;
+19 -11
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
··· 344 344 if (node->del_hw_func) 345 345 node->del_hw_func(node); 346 346 if (parent_node) { 347 - /* Only root namespace doesn't have parent and we just 348 - * need to free its node. 349 - */ 350 347 down_write_ref_node(parent_node, locked); 351 348 list_del_init(&node->list); 352 - if (node->del_sw_func) 353 - node->del_sw_func(node); 354 - up_write_ref_node(parent_node, locked); 355 - } else { 356 - kfree(node); 357 349 } 350 + node->del_sw_func(node); 351 + if (parent_node) 352 + up_write_ref_node(parent_node, locked); 358 353 node = NULL; 359 354 } 360 355 if (!node && parent_node) ··· 463 468 fs_get_obj(ft, node); 464 469 465 470 rhltable_destroy(&ft->fgs_hash); 466 - fs_get_obj(prio, ft->node.parent); 467 - prio->num_ft--; 471 + if (ft->node.parent) { 472 + fs_get_obj(prio, ft->node.parent); 473 + prio->num_ft--; 474 + } 468 475 kfree(ft); 469 476 } 470 477 ··· 2348 2351 return 0; 2349 2352 } 2350 2353 2354 + static void del_sw_root_ns(struct fs_node *node) 2355 + { 2356 + struct mlx5_flow_root_namespace *root_ns; 2357 + struct mlx5_flow_namespace *ns; 2358 + 2359 + fs_get_obj(ns, node); 2360 + root_ns = container_of(ns, struct mlx5_flow_root_namespace, ns); 2361 + mutex_destroy(&root_ns->chain_lock); 2362 + kfree(node); 2363 + } 2364 + 2351 2365 static struct mlx5_flow_root_namespace 2352 2366 *create_root_ns(struct mlx5_flow_steering *steering, 2353 2367 enum fs_flow_table_type table_type) ··· 2385 2377 ns = &root_ns->ns; 2386 2378 fs_init_namespace(ns); 2387 2379 mutex_init(&root_ns->chain_lock); 2388 - tree_init_node(&ns->node, NULL, NULL); 2380 + tree_init_node(&ns->node, NULL, del_sw_root_ns); 2389 2381 tree_add_node(&ns->node, NULL); 2390 2382 2391 2383 return root_ns;
+2 -2
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
··· 396 396 err_destroy_direct_tirs: 397 397 mlx5e_destroy_direct_tirs(priv, priv->direct_tir); 398 398 err_destroy_indirect_tirs: 399 - mlx5e_destroy_indirect_tirs(priv, true); 399 + mlx5e_destroy_indirect_tirs(priv); 400 400 err_destroy_direct_rqts: 401 401 mlx5e_destroy_direct_rqts(priv, priv->direct_tir); 402 402 err_destroy_indirect_rqts: ··· 412 412 { 413 413 mlx5i_destroy_flow_steering(priv); 414 414 mlx5e_destroy_direct_tirs(priv, priv->direct_tir); 415 - mlx5e_destroy_indirect_tirs(priv, true); 415 + mlx5e_destroy_indirect_tirs(priv); 416 416 mlx5e_destroy_direct_rqts(priv, priv->direct_tir); 417 417 mlx5e_destroy_rqt(priv, &priv->indir_rqt); 418 418 mlx5e_close_drop_rq(&priv->drop_rq);
+6 -1
drivers/net/ethernet/mellanox/mlx5/core/main.c
··· 965 965 goto err_cmd_cleanup; 966 966 } 967 967 968 + mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_UP); 969 + 968 970 err = mlx5_core_enable_hca(dev, 0); 969 971 if (err) { 970 972 mlx5_core_err(dev, "enable hca failed\n"); ··· 1028 1026 err_disable_hca: 1029 1027 mlx5_core_disable_hca(dev, 0); 1030 1028 err_cmd_cleanup: 1029 + mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN); 1031 1030 mlx5_cmd_cleanup(dev); 1032 1031 1033 1032 return err; ··· 1046 1043 } 1047 1044 mlx5_reclaim_startup_pages(dev); 1048 1045 mlx5_core_disable_hca(dev, 0); 1046 + mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN); 1049 1047 mlx5_cmd_cleanup(dev); 1050 1048 1051 1049 return 0; ··· 1195 1191 1196 1192 err = mlx5_function_setup(dev, boot); 1197 1193 if (err) 1198 - goto out; 1194 + goto err_function; 1199 1195 1200 1196 if (boot) { 1201 1197 err = mlx5_init_once(dev); ··· 1233 1229 mlx5_cleanup_once(dev); 1234 1230 function_teardown: 1235 1231 mlx5_function_teardown(dev, boot); 1232 + err_function: 1236 1233 dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; 1237 1234 mutex_unlock(&dev->intf_state_mutex); 1238 1235
+12 -2
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
··· 3986 3986 mlxsw_sp_port_remove(mlxsw_sp, i); 3987 3987 mlxsw_sp_cpu_port_remove(mlxsw_sp); 3988 3988 kfree(mlxsw_sp->ports); 3989 + mlxsw_sp->ports = NULL; 3989 3990 } 3990 3991 3991 3992 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp) ··· 4023 4022 mlxsw_sp_cpu_port_remove(mlxsw_sp); 4024 4023 err_cpu_port_create: 4025 4024 kfree(mlxsw_sp->ports); 4025 + mlxsw_sp->ports = NULL; 4026 4026 return err; 4027 4027 } 4028 4028 ··· 4145 4143 return mlxsw_core_res_get(mlxsw_core, local_ports_in_x_res_id); 4146 4144 } 4147 4145 4146 + static struct mlxsw_sp_port * 4147 + mlxsw_sp_port_get_by_local_port(struct mlxsw_sp *mlxsw_sp, u8 local_port) 4148 + { 4149 + if (mlxsw_sp->ports && mlxsw_sp->ports[local_port]) 4150 + return mlxsw_sp->ports[local_port]; 4151 + return NULL; 4152 + } 4153 + 4148 4154 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port, 4149 4155 unsigned int count, 4150 4156 struct netlink_ext_ack *extack) ··· 4166 4156 int i; 4167 4157 int err; 4168 4158 4169 - mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4159 + mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port); 4170 4160 if (!mlxsw_sp_port) { 4171 4161 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 4172 4162 local_port); ··· 4261 4251 int offset; 4262 4252 int i; 4263 4253 4264 - mlxsw_sp_port = mlxsw_sp->ports[local_port]; 4254 + mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port); 4265 4255 if (!mlxsw_sp_port) { 4266 4256 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n", 4267 4257 local_port);
+8
drivers/net/ethernet/mellanox/mlxsw/switchx2.c
··· 1259 1259 if (mlxsw_sx_port_created(mlxsw_sx, i)) 1260 1260 mlxsw_sx_port_remove(mlxsw_sx, i); 1261 1261 kfree(mlxsw_sx->ports); 1262 + mlxsw_sx->ports = NULL; 1262 1263 } 1263 1264 1264 1265 static int mlxsw_sx_ports_create(struct mlxsw_sx *mlxsw_sx) ··· 1294 1293 if (mlxsw_sx_port_created(mlxsw_sx, i)) 1295 1294 mlxsw_sx_port_remove(mlxsw_sx, i); 1296 1295 kfree(mlxsw_sx->ports); 1296 + mlxsw_sx->ports = NULL; 1297 1297 return err; 1298 1298 } 1299 1299 ··· 1377 1375 struct mlxsw_sx *mlxsw_sx = mlxsw_core_driver_priv(mlxsw_core); 1378 1376 u8 module, width; 1379 1377 int err; 1378 + 1379 + if (!mlxsw_sx->ports || !mlxsw_sx->ports[local_port]) { 1380 + dev_err(mlxsw_sx->bus_info->dev, "Port number \"%d\" does not exist\n", 1381 + local_port); 1382 + return -EINVAL; 1383 + } 1380 1384 1381 1385 if (new_type == DEVLINK_PORT_TYPE_AUTO) 1382 1386 return -EOPNOTSUPP;
+1 -1
drivers/net/ethernet/mscc/ocelot.c
··· 1467 1467 unsigned long ageing_clock_t) 1468 1468 { 1469 1469 unsigned long ageing_jiffies = clock_t_to_jiffies(ageing_clock_t); 1470 - u32 ageing_time = jiffies_to_msecs(ageing_jiffies) / 1000; 1470 + u32 ageing_time = jiffies_to_msecs(ageing_jiffies); 1471 1471 1472 1472 ocelot_set_ageing_time(ocelot, ageing_time); 1473 1473 }
+15 -2
drivers/net/ethernet/realtek/r8169_main.c
··· 1050 1050 RTL_R32(tp, EPHYAR) & EPHYAR_DATA_MASK : ~0; 1051 1051 } 1052 1052 1053 + static void r8168fp_adjust_ocp_cmd(struct rtl8169_private *tp, u32 *cmd, int type) 1054 + { 1055 + /* based on RTL8168FP_OOBMAC_BASE in vendor driver */ 1056 + if (tp->mac_version == RTL_GIGA_MAC_VER_52 && type == ERIAR_OOB) 1057 + *cmd |= 0x7f0 << 18; 1058 + } 1059 + 1053 1060 DECLARE_RTL_COND(rtl_eriar_cond) 1054 1061 { 1055 1062 return RTL_R32(tp, ERIAR) & ERIAR_FLAG; ··· 1065 1058 static void _rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask, 1066 1059 u32 val, int type) 1067 1060 { 1061 + u32 cmd = ERIAR_WRITE_CMD | type | mask | addr; 1062 + 1068 1063 BUG_ON((addr & 3) || (mask == 0)); 1069 1064 RTL_W32(tp, ERIDR, val); 1070 - RTL_W32(tp, ERIAR, ERIAR_WRITE_CMD | type | mask | addr); 1065 + r8168fp_adjust_ocp_cmd(tp, &cmd, type); 1066 + RTL_W32(tp, ERIAR, cmd); 1071 1067 1072 1068 rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 100); 1073 1069 } ··· 1083 1073 1084 1074 static u32 _rtl_eri_read(struct rtl8169_private *tp, int addr, int type) 1085 1075 { 1086 - RTL_W32(tp, ERIAR, ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr); 1076 + u32 cmd = ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr; 1077 + 1078 + r8168fp_adjust_ocp_cmd(tp, &cmd, type); 1079 + RTL_W32(tp, ERIAR, cmd); 1087 1080 1088 1081 return rtl_udelay_loop_wait_high(tp, &rtl_eriar_cond, 100, 100) ? 1089 1082 RTL_R32(tp, ERIDR) : ~0;
+4 -4
drivers/net/ethernet/sgi/ioc3-eth.c
··· 848 848 ip = netdev_priv(dev); 849 849 ip->dma_dev = pdev->dev.parent; 850 850 ip->regs = devm_platform_ioremap_resource(pdev, 0); 851 - if (!ip->regs) { 852 - err = -ENOMEM; 851 + if (IS_ERR(ip->regs)) { 852 + err = PTR_ERR(ip->regs); 853 853 goto out_free; 854 854 } 855 855 856 856 ip->ssram = devm_platform_ioremap_resource(pdev, 1); 857 - if (!ip->ssram) { 858 - err = -ENOMEM; 857 + if (IS_ERR(ip->ssram)) { 858 + err = PTR_ERR(ip->ssram); 859 859 goto out_free; 860 860 } 861 861
+5 -4
drivers/net/ethernet/smsc/smsc911x.c
··· 2493 2493 2494 2494 retval = smsc911x_init(dev); 2495 2495 if (retval < 0) 2496 - goto out_disable_resources; 2496 + goto out_init_fail; 2497 2497 2498 2498 netif_carrier_off(dev); 2499 2499 2500 2500 retval = smsc911x_mii_init(pdev, dev); 2501 2501 if (retval) { 2502 2502 SMSC_WARN(pdata, probe, "Error %i initialising mii", retval); 2503 - goto out_disable_resources; 2503 + goto out_init_fail; 2504 2504 } 2505 2505 2506 2506 retval = register_netdev(dev); 2507 2507 if (retval) { 2508 2508 SMSC_WARN(pdata, probe, "Error %i registering device", retval); 2509 - goto out_disable_resources; 2509 + goto out_init_fail; 2510 2510 } else { 2511 2511 SMSC_TRACE(pdata, probe, 2512 2512 "Network interface: \"%s\"", dev->name); ··· 2547 2547 2548 2548 return 0; 2549 2549 2550 - out_disable_resources: 2550 + out_init_fail: 2551 2551 pm_runtime_put(&pdev->dev); 2552 2552 pm_runtime_disable(&pdev->dev); 2553 + out_disable_resources: 2553 2554 (void)smsc911x_disable_resources(pdev); 2554 2555 out_enable_resources_fail: 2555 2556 smsc911x_free_resources(pdev);
+13
drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
··· 319 319 /* Enable PTP clock */ 320 320 regmap_read(gmac->nss_common, NSS_COMMON_CLK_GATE, &val); 321 321 val |= NSS_COMMON_CLK_GATE_PTP_EN(gmac->id); 322 + switch (gmac->phy_mode) { 323 + case PHY_INTERFACE_MODE_RGMII: 324 + val |= NSS_COMMON_CLK_GATE_RGMII_RX_EN(gmac->id) | 325 + NSS_COMMON_CLK_GATE_RGMII_TX_EN(gmac->id); 326 + break; 327 + case PHY_INTERFACE_MODE_SGMII: 328 + val |= NSS_COMMON_CLK_GATE_GMII_RX_EN(gmac->id) | 329 + NSS_COMMON_CLK_GATE_GMII_TX_EN(gmac->id); 330 + break; 331 + default: 332 + /* We don't get here; the switch above will have errored out */ 333 + unreachable(); 334 + } 322 335 regmap_write(gmac->nss_common, NSS_COMMON_CLK_GATE, val); 323 336 324 337 if (gmac->phy_mode == PHY_INTERFACE_MODE_SGMII) {
+2 -2
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
··· 5190 5190 return ret; 5191 5191 } 5192 5192 5193 - netif_device_attach(ndev); 5194 - 5195 5193 mutex_lock(&priv->lock); 5196 5194 5197 5195 stmmac_reset_queues_param(priv); ··· 5215 5217 } 5216 5218 5217 5219 phylink_mac_change(priv->phylink, true); 5220 + 5221 + netif_device_attach(ndev); 5218 5222 5219 5223 return 0; 5220 5224 }
+1 -2
drivers/net/ethernet/sun/cassini.c
··· 4963 4963 cas_cacheline_size)) { 4964 4964 dev_err(&pdev->dev, "Could not set PCI cache " 4965 4965 "line size\n"); 4966 - goto err_write_cacheline; 4966 + goto err_out_free_res; 4967 4967 } 4968 4968 } 4969 4969 #endif ··· 5136 5136 err_out_free_res: 5137 5137 pci_release_regions(pdev); 5138 5138 5139 - err_write_cacheline: 5140 5139 /* Try to restore it in case the error occurred after we 5141 5140 * set it. 5142 5141 */
+2 -1
drivers/net/ethernet/ti/am65-cpsw-nuss.c
··· 1895 1895 ale_params.nu_switch_ale = true; 1896 1896 1897 1897 common->ale = cpsw_ale_create(&ale_params); 1898 - if (!common->ale) { 1898 + if (IS_ERR(common->ale)) { 1899 1899 dev_err(dev, "error initializing ale engine\n"); 1900 + ret = PTR_ERR(common->ale); 1900 1901 goto err_of_clear; 1901 1902 } 1902 1903
+4
drivers/net/ethernet/ti/cpsw.c
··· 1753 1753 struct cpsw_common *cpsw = dev_get_drvdata(dev); 1754 1754 int i; 1755 1755 1756 + rtnl_lock(); 1757 + 1756 1758 for (i = 0; i < cpsw->data.slaves; i++) 1757 1759 if (cpsw->slaves[i].ndev) 1758 1760 if (netif_running(cpsw->slaves[i].ndev)) 1759 1761 cpsw_ndo_stop(cpsw->slaves[i].ndev); 1762 + 1763 + rtnl_unlock(); 1760 1764 1761 1765 /* Select sleep pin state */ 1762 1766 pinctrl_pm_select_sleep_state(dev);
+1 -1
drivers/net/ethernet/ti/cpsw_ale.c
··· 955 955 956 956 ale = devm_kzalloc(params->dev, sizeof(*ale), GFP_KERNEL); 957 957 if (!ale) 958 - return NULL; 958 + return ERR_PTR(-ENOMEM); 959 959 960 960 ale->p0_untag_vid_mask = 961 961 devm_kmalloc_array(params->dev, BITS_TO_LONGS(VLAN_N_VID),
+2 -2
drivers/net/ethernet/ti/cpsw_priv.c
··· 490 490 ale_params.ale_ports = CPSW_ALE_PORTS_NUM; 491 491 492 492 cpsw->ale = cpsw_ale_create(&ale_params); 493 - if (!cpsw->ale) { 493 + if (IS_ERR(cpsw->ale)) { 494 494 dev_err(dev, "error initializing ale engine\n"); 495 - return -ENODEV; 495 + return PTR_ERR(cpsw->ale); 496 496 } 497 497 498 498 dma_params.dev = dev;
+2 -2
drivers/net/ethernet/ti/netcp_ethss.c
··· 3704 3704 ale_params.nu_switch_ale = true; 3705 3705 } 3706 3706 gbe_dev->ale = cpsw_ale_create(&ale_params); 3707 - if (!gbe_dev->ale) { 3707 + if (IS_ERR(gbe_dev->ale)) { 3708 3708 dev_err(gbe_dev->dev, "error initializing ale engine\n"); 3709 - ret = -ENODEV; 3709 + ret = PTR_ERR(gbe_dev->ale); 3710 3710 goto free_sec_ports; 3711 3711 } else { 3712 3712 dev_dbg(gbe_dev->dev, "Created a gbe ale engine\n");
+1
drivers/net/ipa/gsi.c
··· 1392 1392 while (count < budget) { 1393 1393 struct gsi_trans *trans; 1394 1394 1395 + count++; 1395 1396 trans = gsi_channel_poll_one(channel); 1396 1397 if (!trans) 1397 1398 break;
+1 -2
drivers/net/netdevsim/dev.c
··· 858 858 return -EINVAL; 859 859 860 860 cnt = &nsim_dev->trap_data->trap_policers_cnt_arr[policer->id - 1]; 861 - *p_drops = *cnt; 862 - *cnt += jiffies % 64; 861 + *p_drops = (*cnt)++; 863 862 864 863 return 0; 865 864 }
+2
drivers/net/phy/mscc/mscc.h
··· 354 354 u64 *stats; 355 355 int nstats; 356 356 bool pkg_init; 357 + /* PHY address within the package. */ 358 + u8 addr; 357 359 /* For multiple port PHYs; the MDIO address of the base PHY in the 358 360 * package. 359 361 */
+3 -3
drivers/net/phy/mscc/mscc_mac.h
··· 152 152 #define MSCC_MAC_PAUSE_CFG_STATE_PAUSE_STATE BIT(0) 153 153 #define MSCC_MAC_PAUSE_CFG_STATE_MAC_TX_PAUSE_GEN BIT(4) 154 154 155 - #define MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL 0x2 156 - #define MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL_PROTOCOL_MODE(x) (x) 157 - #define MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL_PROTOCOL_MODE_M GENMASK(2, 0) 155 + #define MSCC_PROC_IP_1588_TOP_CFG_STAT_MODE_CTL 0x2 156 + #define MSCC_PROC_IP_1588_TOP_CFG_STAT_MODE_CTL_PROTOCOL_MODE(x) (x) 157 + #define MSCC_PROC_IP_1588_TOP_CFG_STAT_MODE_CTL_PROTOCOL_MODE_M GENMASK(2, 0) 158 158 159 159 #endif /* _MSCC_PHY_LINE_MAC_H_ */
+10 -6
drivers/net/phy/mscc/mscc_macsec.c
··· 316 316 /* Must be called with mdio_lock taken */ 317 317 static int __vsc8584_macsec_init(struct phy_device *phydev) 318 318 { 319 + struct vsc8531_private *priv = phydev->priv; 320 + enum macsec_bank proc_bank; 319 321 u32 val; 320 322 321 323 vsc8584_macsec_block_init(phydev, MACSEC_INGR); ··· 353 351 val |= MSCC_FCBUF_ENA_CFG_TX_ENA | MSCC_FCBUF_ENA_CFG_RX_ENA; 354 352 vsc8584_macsec_phy_write(phydev, FC_BUFFER, MSCC_FCBUF_ENA_CFG, val); 355 353 356 - val = vsc8584_macsec_phy_read(phydev, IP_1588, 357 - MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL); 358 - val &= ~MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL_PROTOCOL_MODE_M; 359 - val |= MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL_PROTOCOL_MODE(4); 360 - vsc8584_macsec_phy_write(phydev, IP_1588, 361 - MSCC_PROC_0_IP_1588_TOP_CFG_STAT_MODE_CTL, val); 354 + proc_bank = (priv->addr < 2) ? PROC_0 : PROC_2; 355 + 356 + val = vsc8584_macsec_phy_read(phydev, proc_bank, 357 + MSCC_PROC_IP_1588_TOP_CFG_STAT_MODE_CTL); 358 + val &= ~MSCC_PROC_IP_1588_TOP_CFG_STAT_MODE_CTL_PROTOCOL_MODE_M; 359 + val |= MSCC_PROC_IP_1588_TOP_CFG_STAT_MODE_CTL_PROTOCOL_MODE(4); 360 + vsc8584_macsec_phy_write(phydev, proc_bank, 361 + MSCC_PROC_IP_1588_TOP_CFG_STAT_MODE_CTL, val); 362 362 363 363 return 0; 364 364 }
+2 -1
drivers/net/phy/mscc/mscc_macsec.h
··· 64 64 FC_BUFFER = 0x04, 65 65 HOST_MAC = 0x05, 66 66 LINE_MAC = 0x06, 67 - IP_1588 = 0x0e, 67 + PROC_0 = 0x0e, 68 + PROC_2 = 0x0f, 68 69 MACSEC_INGR = 0x38, 69 70 MACSEC_EGR = 0x3c, 70 71 };
+4
drivers/net/phy/mscc/mscc_main.c
··· 1347 1347 else 1348 1348 vsc8531->base_addr = phydev->mdio.addr - addr; 1349 1349 1350 + vsc8531->addr = addr; 1351 + 1350 1352 /* Some parts of the init sequence are identical for every PHY in the 1351 1353 * package. Some parts are modifying the GPIO register bank which is a 1352 1354 * set of registers that are affecting all PHYs, a few resetting the ··· 1772 1770 vsc8531->base_addr = phydev->mdio.addr + addr; 1773 1771 else 1774 1772 vsc8531->base_addr = phydev->mdio.addr - addr; 1773 + 1774 + vsc8531->addr = addr; 1775 1775 1776 1776 /* Some parts of the init sequence are identical for every PHY in the 1777 1777 * package. Some parts are modifying the GPIO register bank which is a
+2 -2
drivers/net/phy/phy_device.c
··· 1233 1233 const struct sfp_upstream_ops *ops) 1234 1234 { 1235 1235 struct sfp_bus *bus; 1236 - int ret; 1236 + int ret = 0; 1237 1237 1238 1238 if (phydev->mdio.dev.fwnode) { 1239 1239 bus = sfp_bus_find_fwnode(phydev->mdio.dev.fwnode); ··· 1245 1245 ret = sfp_bus_add_upstream(bus, phydev, ops); 1246 1246 sfp_bus_put(bus); 1247 1247 } 1248 - return 0; 1248 + return ret; 1249 1249 } 1250 1250 EXPORT_SYMBOL(phy_sfp_probe); 1251 1251
+9 -2
drivers/net/usb/cdc_ether.c
··· 815 815 .driver_info = 0, 816 816 }, 817 817 818 - /* Microsoft Surface 3 dock (based on Realtek RTL8153) */ 818 + /* Microsoft Surface Ethernet Adapter (based on Realtek RTL8153) */ 819 819 { 820 820 USB_DEVICE_AND_INTERFACE_INFO(MICROSOFT_VENDOR_ID, 0x07c6, USB_CLASS_COMM, 821 821 USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), 822 822 .driver_info = 0, 823 823 }, 824 824 825 - /* TP-LINK UE300 USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */ 825 + /* Microsoft Surface Ethernet Adapter (based on Realtek RTL8153B) */ 826 + { 827 + USB_DEVICE_AND_INTERFACE_INFO(MICROSOFT_VENDOR_ID, 0x0927, USB_CLASS_COMM, 828 + USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), 829 + .driver_info = 0, 830 + }, 831 + 832 + /* TP-LINK UE300 USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */ 826 833 { 827 834 USB_DEVICE_AND_INTERFACE_INFO(TPLINK_VENDOR_ID, 0x0601, USB_CLASS_COMM, 828 835 USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+1
drivers/net/usb/r8152.c
··· 6880 6880 {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8153)}, 6881 6881 {REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07ab)}, 6882 6882 {REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07c6)}, 6883 + {REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0927)}, 6883 6884 {REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)}, 6884 6885 {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f)}, 6885 6886 {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3062)},
+1 -1
drivers/net/wireguard/messages.h
··· 32 32 }; 33 33 34 34 enum counter_values { 35 - COUNTER_BITS_TOTAL = 2048, 35 + COUNTER_BITS_TOTAL = 8192, 36 36 COUNTER_REDUNDANT_BITS = BITS_PER_LONG, 37 37 COUNTER_WINDOW_SIZE = COUNTER_BITS_TOTAL - COUNTER_REDUNDANT_BITS 38 38 };
+9 -13
drivers/net/wireguard/noise.c
··· 104 104 105 105 if (unlikely(!keypair)) 106 106 return NULL; 107 + spin_lock_init(&keypair->receiving_counter.lock); 107 108 keypair->internal_id = atomic64_inc_return(&keypair_counter); 108 109 keypair->entry.type = INDEX_HASHTABLE_KEYPAIR; 109 110 keypair->entry.peer = peer; ··· 359 358 memzero_explicit(output, BLAKE2S_HASH_SIZE + 1); 360 359 } 361 360 362 - static void symmetric_key_init(struct noise_symmetric_key *key) 363 - { 364 - spin_lock_init(&key->counter.receive.lock); 365 - atomic64_set(&key->counter.counter, 0); 366 - memset(key->counter.receive.backtrack, 0, 367 - sizeof(key->counter.receive.backtrack)); 368 - key->birthdate = ktime_get_coarse_boottime_ns(); 369 - key->is_valid = true; 370 - } 371 - 372 361 static void derive_keys(struct noise_symmetric_key *first_dst, 373 362 struct noise_symmetric_key *second_dst, 374 363 const u8 chaining_key[NOISE_HASH_LEN]) 375 364 { 365 + u64 birthdate = ktime_get_coarse_boottime_ns(); 376 366 kdf(first_dst->key, second_dst->key, NULL, NULL, 377 367 NOISE_SYMMETRIC_KEY_LEN, NOISE_SYMMETRIC_KEY_LEN, 0, 0, 378 368 chaining_key); 379 - symmetric_key_init(first_dst); 380 - symmetric_key_init(second_dst); 369 + first_dst->birthdate = second_dst->birthdate = birthdate; 370 + first_dst->is_valid = second_dst->is_valid = true; 381 371 } 382 372 383 373 static bool __must_check mix_dh(u8 chaining_key[NOISE_HASH_LEN], ··· 707 715 u8 e[NOISE_PUBLIC_KEY_LEN]; 708 716 u8 ephemeral_private[NOISE_PUBLIC_KEY_LEN]; 709 717 u8 static_private[NOISE_PUBLIC_KEY_LEN]; 718 + u8 preshared_key[NOISE_SYMMETRIC_KEY_LEN]; 710 719 711 720 down_read(&wg->static_identity.lock); 712 721 ··· 726 733 memcpy(chaining_key, handshake->chaining_key, NOISE_HASH_LEN); 727 734 memcpy(ephemeral_private, handshake->ephemeral_private, 728 735 NOISE_PUBLIC_KEY_LEN); 736 + memcpy(preshared_key, handshake->preshared_key, 737 + NOISE_SYMMETRIC_KEY_LEN); 729 738 up_read(&handshake->lock); 730 739 731 740 if (state != HANDSHAKE_CREATED_INITIATION) ··· 745 750 goto fail; 746 751 747 752 /* psk */ 748 - mix_psk(chaining_key, hash, key, handshake->preshared_key); 753 + mix_psk(chaining_key, hash, key, preshared_key); 749 754 750 755 /* {} */ 751 756 if (!message_decrypt(NULL, src->encrypted_nothing, ··· 778 783 memzero_explicit(chaining_key, NOISE_HASH_LEN); 779 784 memzero_explicit(ephemeral_private, NOISE_PUBLIC_KEY_LEN); 780 785 memzero_explicit(static_private, NOISE_PUBLIC_KEY_LEN); 786 + memzero_explicit(preshared_key, NOISE_SYMMETRIC_KEY_LEN); 781 787 up_read(&wg->static_identity.lock); 782 788 return ret_peer; 783 789 }
+6 -8
drivers/net/wireguard/noise.h
··· 15 15 #include <linux/mutex.h> 16 16 #include <linux/kref.h> 17 17 18 - union noise_counter { 19 - struct { 20 - u64 counter; 21 - unsigned long backtrack[COUNTER_BITS_TOTAL / BITS_PER_LONG]; 22 - spinlock_t lock; 23 - } receive; 24 - atomic64_t counter; 18 + struct noise_replay_counter { 19 + u64 counter; 20 + spinlock_t lock; 21 + unsigned long backtrack[COUNTER_BITS_TOTAL / BITS_PER_LONG]; 25 22 }; 26 23 27 24 struct noise_symmetric_key { 28 25 u8 key[NOISE_SYMMETRIC_KEY_LEN]; 29 - union noise_counter counter; 30 26 u64 birthdate; 31 27 bool is_valid; 32 28 }; ··· 30 34 struct noise_keypair { 31 35 struct index_hashtable_entry entry; 32 36 struct noise_symmetric_key sending; 37 + atomic64_t sending_counter; 33 38 struct noise_symmetric_key receiving; 39 + struct noise_replay_counter receiving_counter; 34 40 __le32 remote_index; 35 41 bool i_am_the_initiator; 36 42 struct kref refcount;
+9 -1
drivers/net/wireguard/queueing.h
··· 87 87 return real_protocol && skb->protocol == real_protocol; 88 88 } 89 89 90 - static inline void wg_reset_packet(struct sk_buff *skb) 90 + static inline void wg_reset_packet(struct sk_buff *skb, bool encapsulating) 91 91 { 92 + u8 l4_hash = skb->l4_hash; 93 + u8 sw_hash = skb->sw_hash; 94 + u32 hash = skb->hash; 92 95 skb_scrub_packet(skb, true); 93 96 memset(&skb->headers_start, 0, 94 97 offsetof(struct sk_buff, headers_end) - 95 98 offsetof(struct sk_buff, headers_start)); 99 + if (encapsulating) { 100 + skb->l4_hash = l4_hash; 101 + skb->sw_hash = sw_hash; 102 + skb->hash = hash; 103 + } 96 104 skb->queue_mapping = 0; 97 105 skb->nohdr = 0; 98 106 skb->peeked = 0;
+22 -22
drivers/net/wireguard/receive.c
··· 245 245 } 246 246 } 247 247 248 - static bool decrypt_packet(struct sk_buff *skb, struct noise_symmetric_key *key) 248 + static bool decrypt_packet(struct sk_buff *skb, struct noise_keypair *keypair) 249 249 { 250 250 struct scatterlist sg[MAX_SKB_FRAGS + 8]; 251 251 struct sk_buff *trailer; 252 252 unsigned int offset; 253 253 int num_frags; 254 254 255 - if (unlikely(!key)) 255 + if (unlikely(!keypair)) 256 256 return false; 257 257 258 - if (unlikely(!READ_ONCE(key->is_valid) || 259 - wg_birthdate_has_expired(key->birthdate, REJECT_AFTER_TIME) || 260 - key->counter.receive.counter >= REJECT_AFTER_MESSAGES)) { 261 - WRITE_ONCE(key->is_valid, false); 258 + if (unlikely(!READ_ONCE(keypair->receiving.is_valid) || 259 + wg_birthdate_has_expired(keypair->receiving.birthdate, REJECT_AFTER_TIME) || 260 + keypair->receiving_counter.counter >= REJECT_AFTER_MESSAGES)) { 261 + WRITE_ONCE(keypair->receiving.is_valid, false); 262 262 return false; 263 263 } 264 264 ··· 283 283 284 284 if (!chacha20poly1305_decrypt_sg_inplace(sg, skb->len, NULL, 0, 285 285 PACKET_CB(skb)->nonce, 286 - key->key)) 286 + keypair->receiving.key)) 287 287 return false; 288 288 289 289 /* Another ugly situation of pushing and pulling the header so as to ··· 298 298 } 299 299 300 300 /* This is RFC6479, a replay detection bitmap algorithm that avoids bitshifts */ 301 - static bool counter_validate(union noise_counter *counter, u64 their_counter) 301 + static bool counter_validate(struct noise_replay_counter *counter, u64 their_counter) 302 302 { 303 303 unsigned long index, index_current, top, i; 304 304 bool ret = false; 305 305 306 - spin_lock_bh(&counter->receive.lock); 306 + spin_lock_bh(&counter->lock); 307 307 308 - if (unlikely(counter->receive.counter >= REJECT_AFTER_MESSAGES + 1 || 308 + if (unlikely(counter->counter >= REJECT_AFTER_MESSAGES + 1 || 309 309 their_counter >= REJECT_AFTER_MESSAGES)) 310 310 goto out; 311 311 312 312 ++their_counter; 313 313 314 314 if (unlikely((COUNTER_WINDOW_SIZE + their_counter) < 315 - counter->receive.counter)) 315 + counter->counter)) 316 316 goto out; 317 317 318 318 index = their_counter >> ilog2(BITS_PER_LONG); 319 319 320 - if (likely(their_counter > counter->receive.counter)) { 321 - index_current = counter->receive.counter >> ilog2(BITS_PER_LONG); 320 + if (likely(their_counter > counter->counter)) { 321 + index_current = counter->counter >> ilog2(BITS_PER_LONG); 322 322 top = min_t(unsigned long, index - index_current, 323 323 COUNTER_BITS_TOTAL / BITS_PER_LONG); 324 324 for (i = 1; i <= top; ++i) 325 - counter->receive.backtrack[(i + index_current) & 325 + counter->backtrack[(i + index_current) & 326 326 ((COUNTER_BITS_TOTAL / BITS_PER_LONG) - 1)] = 0; 327 - counter->receive.counter = their_counter; 327 + counter->counter = their_counter; 328 328 } 329 329 330 330 index &= (COUNTER_BITS_TOTAL / BITS_PER_LONG) - 1; 331 331 ret = !test_and_set_bit(their_counter & (BITS_PER_LONG - 1), 332 - &counter->receive.backtrack[index]); 332 + &counter->backtrack[index]); 333 333 334 334 out: 335 - spin_unlock_bh(&counter->receive.lock); 335 + spin_unlock_bh(&counter->lock); 336 336 return ret; 337 337 } 338 338 ··· 472 472 if (unlikely(state != PACKET_STATE_CRYPTED)) 473 473 goto next; 474 474 475 - if (unlikely(!counter_validate(&keypair->receiving.counter, 475 + if (unlikely(!counter_validate(&keypair->receiving_counter, 476 476 PACKET_CB(skb)->nonce))) { 477 477 net_dbg_ratelimited("%s: Packet has invalid nonce %llu (max %llu)\n", 478 478 peer->device->dev->name, 479 479 PACKET_CB(skb)->nonce, 480 - keypair->receiving.counter.receive.counter); 480 + keypair->receiving_counter.counter); 481 481 goto next; 482 482 } 483 483 484 484 if (unlikely(wg_socket_endpoint_from_skb(&endpoint, skb))) 485 485 goto next; 486 486 487 - wg_reset_packet(skb); 487 + wg_reset_packet(skb, false); 488 488 wg_packet_consume_data_done(peer, skb, &endpoint); 489 489 free = false; 490 490 ··· 511 511 struct sk_buff *skb; 512 512 513 513 while ((skb = ptr_ring_consume_bh(&queue->ring)) != NULL) { 514 - enum packet_state state = likely(decrypt_packet(skb, 515 - &PACKET_CB(skb)->keypair->receiving)) ? 514 + enum packet_state state = 515 + likely(decrypt_packet(skb, PACKET_CB(skb)->keypair)) ? 516 516 PACKET_STATE_CRYPTED : PACKET_STATE_DEAD; 517 517 wg_queue_enqueue_per_peer_napi(skb, state); 518 518 if (need_resched())
+12 -5
drivers/net/wireguard/selftest/counter.c
··· 6 6 #ifdef DEBUG 7 7 bool __init wg_packet_counter_selftest(void) 8 8 { 9 + struct noise_replay_counter *counter; 9 10 unsigned int test_num = 0, i; 10 - union noise_counter counter; 11 11 bool success = true; 12 12 13 - #define T_INIT do { \ 14 - memset(&counter, 0, sizeof(union noise_counter)); \ 15 - spin_lock_init(&counter.receive.lock); \ 13 + counter = kmalloc(sizeof(*counter), GFP_KERNEL); 14 + if (unlikely(!counter)) { 15 + pr_err("nonce counter self-test malloc: FAIL\n"); 16 + return false; 17 + } 18 + 19 + #define T_INIT do { \ 20 + memset(counter, 0, sizeof(*counter)); \ 21 + spin_lock_init(&counter->lock); \ 16 22 } while (0) 17 23 #define T_LIM (COUNTER_WINDOW_SIZE + 1) 18 24 #define T(n, v) do { \ 19 25 ++test_num; \ 20 - if (counter_validate(&counter, n) != (v)) { \ 26 + if (counter_validate(counter, n) != (v)) { \ 21 27 pr_err("nonce counter self-test %u: FAIL\n", \ 22 28 test_num); \ 23 29 success = false; \ ··· 105 99 106 100 if (success) 107 101 pr_info("nonce counter self-tests: pass\n"); 102 + kfree(counter); 108 103 return success; 109 104 } 110 105 #endif
+11 -8
drivers/net/wireguard/send.c
··· 129 129 rcu_read_lock_bh(); 130 130 keypair = rcu_dereference_bh(peer->keypairs.current_keypair); 131 131 send = keypair && READ_ONCE(keypair->sending.is_valid) && 132 - (atomic64_read(&keypair->sending.counter.counter) > REKEY_AFTER_MESSAGES || 132 + (atomic64_read(&keypair->sending_counter) > REKEY_AFTER_MESSAGES || 133 133 (keypair->i_am_the_initiator && 134 134 wg_birthdate_has_expired(keypair->sending.birthdate, REKEY_AFTER_TIME))); 135 135 rcu_read_unlock_bh(); ··· 166 166 struct message_data *header; 167 167 struct sk_buff *trailer; 168 168 int num_frags; 169 + 170 + /* Force hash calculation before encryption so that flow analysis is 171 + * consistent over the inner packet. 172 + */ 173 + skb_get_hash(skb); 169 174 170 175 /* Calculate lengths. */ 171 176 padding_len = calculate_skb_padding(skb); ··· 300 295 skb_list_walk_safe(first, skb, next) { 301 296 if (likely(encrypt_packet(skb, 302 297 PACKET_CB(first)->keypair))) { 303 - wg_reset_packet(skb); 298 + wg_reset_packet(skb, true); 304 299 } else { 305 300 state = PACKET_STATE_DEAD; 306 301 break; ··· 349 344 350 345 void wg_packet_send_staged_packets(struct wg_peer *peer) 351 346 { 352 - struct noise_symmetric_key *key; 353 347 struct noise_keypair *keypair; 354 348 struct sk_buff_head packets; 355 349 struct sk_buff *skb; ··· 368 364 rcu_read_unlock_bh(); 369 365 if (unlikely(!keypair)) 370 366 goto out_nokey; 371 - key = &keypair->sending; 372 - if (unlikely(!READ_ONCE(key->is_valid))) 367 + if (unlikely(!READ_ONCE(keypair->sending.is_valid))) 373 368 goto out_nokey; 374 - if (unlikely(wg_birthdate_has_expired(key->birthdate, 369 + if (unlikely(wg_birthdate_has_expired(keypair->sending.birthdate, 375 370 REJECT_AFTER_TIME))) 376 371 goto out_invalid; 377 372 ··· 385 382 */ 386 383 PACKET_CB(skb)->ds = ip_tunnel_ecn_encap(0, ip_hdr(skb), skb); 387 384 PACKET_CB(skb)->nonce = 388 - atomic64_inc_return(&key->counter.counter) - 1; 385 + atomic64_inc_return(&keypair->sending_counter) - 1; 389 386 if (unlikely(PACKET_CB(skb)->nonce >= REJECT_AFTER_MESSAGES)) 390 387 goto out_invalid; 391 388 } ··· 397 394 return; 398 395 399 396 out_invalid: 400 - WRITE_ONCE(key->is_valid, false); 397 + WRITE_ONCE(keypair->sending.is_valid, false); 401 398 out_nokey: 402 399 wg_noise_keypair_put(keypair, false); 403 400
+4
drivers/net/wireless/intel/iwlwifi/pcie/drv.c
··· 1092 1092 iwl_trans->cfg = &iwl_ax101_cfg_quz_hr; 1093 1093 else if (iwl_trans->cfg == &iwl_ax201_cfg_qu_hr) 1094 1094 iwl_trans->cfg = &iwl_ax201_cfg_quz_hr; 1095 + else if (iwl_trans->cfg == &killer1650s_2ax_cfg_qu_b0_hr_b0) 1096 + iwl_trans->cfg = &iwl_ax1650s_cfg_quz_hr; 1097 + else if (iwl_trans->cfg == &killer1650i_2ax_cfg_qu_b0_hr_b0) 1098 + iwl_trans->cfg = &iwl_ax1650i_cfg_quz_hr; 1095 1099 } 1096 1100 1097 1101 #endif
+5 -13
fs/afs/fs_probe.c
··· 32 32 struct afs_server *server = call->server; 33 33 unsigned int server_index = call->server_index; 34 34 unsigned int index = call->addr_ix; 35 - unsigned int rtt = UINT_MAX; 35 + unsigned int rtt_us = 0; 36 36 bool have_result = false; 37 - u64 _rtt; 38 37 int ret = call->error; 39 38 40 39 _enter("%pU,%u", &server->uuid, index); ··· 92 93 } 93 94 } 94 95 95 - /* Get the RTT and scale it to fit into a 32-bit value that represents 96 - * over a minute of time so that we can access it with one instruction 97 - * on a 32-bit system. 98 - */ 99 - _rtt = rxrpc_kernel_get_rtt(call->net->socket, call->rxcall); 100 - _rtt /= 64; 101 - rtt = (_rtt > UINT_MAX) ? UINT_MAX : _rtt; 102 - if (rtt < server->probe.rtt) { 103 - server->probe.rtt = rtt; 96 + rtt_us = rxrpc_kernel_get_srtt(call->net->socket, call->rxcall); 97 + if (rtt_us < server->probe.rtt) { 98 + server->probe.rtt = rtt_us; 104 99 alist->preferred = index; 105 100 have_result = true; 106 101 } ··· 106 113 spin_unlock(&server->probe_lock); 107 114 108 115 _debug("probe [%u][%u] %pISpc rtt=%u ret=%d", 109 - server_index, index, &alist->addrs[index].transport, 110 - (unsigned int)rtt, ret); 116 + server_index, index, &alist->addrs[index].transport, rtt_us, ret); 111 117 112 118 have_result |= afs_fs_probe_done(server); 113 119 if (have_result)
+5 -13
fs/afs/vl_probe.c
··· 31 31 struct afs_addr_list *alist = call->alist; 32 32 struct afs_vlserver *server = call->vlserver; 33 33 unsigned int server_index = call->server_index; 34 + unsigned int rtt_us = 0; 34 35 unsigned int index = call->addr_ix; 35 - unsigned int rtt = UINT_MAX; 36 36 bool have_result = false; 37 - u64 _rtt; 38 37 int ret = call->error; 39 38 40 39 _enter("%s,%u,%u,%d,%d", server->name, server_index, index, ret, call->abort_code); ··· 92 93 } 93 94 } 94 95 95 - /* Get the RTT and scale it to fit into a 32-bit value that represents 96 - * over a minute of time so that we can access it with one instruction 97 - * on a 32-bit system. 98 - */ 99 - _rtt = rxrpc_kernel_get_rtt(call->net->socket, call->rxcall); 100 - _rtt /= 64; 101 - rtt = (_rtt > UINT_MAX) ? UINT_MAX : _rtt; 102 - if (rtt < server->probe.rtt) { 103 - server->probe.rtt = rtt; 96 + rtt_us = rxrpc_kernel_get_srtt(call->net->socket, call->rxcall); 97 + if (rtt_us < server->probe.rtt) { 98 + server->probe.rtt = rtt_us; 104 99 alist->preferred = index; 105 100 have_result = true; 106 101 } ··· 106 113 spin_unlock(&server->probe_lock); 107 114 108 115 _debug("probe [%u][%u] %pISpc rtt=%u ret=%d", 109 - server_index, index, &alist->addrs[index].transport, 110 - (unsigned int)rtt, ret); 116 + server_index, index, &alist->addrs[index].transport, rtt_us, ret); 111 117 112 118 have_result |= afs_vl_probe_done(server); 113 119 if (have_result) {
+16
include/linux/mlx5/driver.h
··· 213 213 MLX5_PORT_DOWN = 2, 214 214 }; 215 215 216 + enum mlx5_cmdif_state { 217 + MLX5_CMDIF_STATE_UNINITIALIZED, 218 + MLX5_CMDIF_STATE_UP, 219 + MLX5_CMDIF_STATE_DOWN, 220 + }; 221 + 216 222 struct mlx5_cmd_first { 217 223 __be32 data[4]; 218 224 }; ··· 264 258 struct mlx5_cmd { 265 259 struct mlx5_nb nb; 266 260 261 + enum mlx5_cmdif_state state; 267 262 void *cmd_alloc_buf; 268 263 dma_addr_t alloc_dma; 269 264 int alloc_size; ··· 291 284 struct semaphore sem; 292 285 struct semaphore pages_sem; 293 286 int mode; 287 + u16 allowed_opcode; 294 288 struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS]; 295 289 struct dma_pool *pool; 296 290 struct mlx5_cmd_debug dbg; ··· 751 743 struct delayed_work cb_timeout_work; 752 744 void *context; 753 745 int idx; 746 + struct completion handling; 754 747 struct completion done; 755 748 struct mlx5_cmd *cmd; 756 749 struct work_struct work; ··· 883 874 return min_t(u32, last_frag_stride_idx - fbc->strides_offset, fbc->sz_m1); 884 875 } 885 876 877 + enum { 878 + CMD_ALLOWED_OPCODE_ALL, 879 + }; 880 + 886 881 int mlx5_cmd_init(struct mlx5_core_dev *dev); 887 882 void mlx5_cmd_cleanup(struct mlx5_core_dev *dev); 883 + void mlx5_cmd_set_state(struct mlx5_core_dev *dev, 884 + enum mlx5_cmdif_state cmdif_state); 888 885 void mlx5_cmd_use_events(struct mlx5_core_dev *dev); 889 886 void mlx5_cmd_use_polling(struct mlx5_core_dev *dev); 887 + void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode); 890 888 891 889 struct mlx5_async_ctx { 892 890 struct mlx5_core_dev *dev;
+2 -1
include/net/act_api.h
··· 75 75 { 76 76 dtm->install = jiffies_to_clock_t(jiffies - stm->install); 77 77 dtm->lastuse = jiffies_to_clock_t(jiffies - stm->lastuse); 78 - dtm->firstuse = jiffies_to_clock_t(jiffies - stm->firstuse); 78 + dtm->firstuse = stm->firstuse ? 79 + jiffies_to_clock_t(jiffies - stm->firstuse) : 0; 79 80 dtm->expires = jiffies_to_clock_t(stm->expires); 80 81 } 81 82
+1 -1
include/net/af_rxrpc.h
··· 59 59 void rxrpc_kernel_end_call(struct socket *, struct rxrpc_call *); 60 60 void rxrpc_kernel_get_peer(struct socket *, struct rxrpc_call *, 61 61 struct sockaddr_rxrpc *); 62 - u64 rxrpc_kernel_get_rtt(struct socket *, struct rxrpc_call *); 62 + u32 rxrpc_kernel_get_srtt(struct socket *, struct rxrpc_call *); 63 63 int rxrpc_kernel_charge_accept(struct socket *, rxrpc_notify_rx_t, 64 64 rxrpc_user_attach_call_t, unsigned long, gfp_t, 65 65 unsigned int);
-1
include/net/ip_fib.h
··· 257 257 u32 table_id; 258 258 /* filter_set is an optimization that an entry is set */ 259 259 bool filter_set; 260 - bool dump_all_families; 261 260 bool dump_routes; 262 261 bool dump_exceptions; 263 262 unsigned char protocol;
+42 -10
include/trace/events/rxrpc.h
··· 1112 1112 TRACE_EVENT(rxrpc_rtt_rx, 1113 1113 TP_PROTO(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why, 1114 1114 rxrpc_serial_t send_serial, rxrpc_serial_t resp_serial, 1115 - s64 rtt, u8 nr, s64 avg), 1115 + u32 rtt, u32 rto), 1116 1116 1117 - TP_ARGS(call, why, send_serial, resp_serial, rtt, nr, avg), 1117 + TP_ARGS(call, why, send_serial, resp_serial, rtt, rto), 1118 1118 1119 1119 TP_STRUCT__entry( 1120 1120 __field(unsigned int, call ) 1121 1121 __field(enum rxrpc_rtt_rx_trace, why ) 1122 - __field(u8, nr ) 1123 1122 __field(rxrpc_serial_t, send_serial ) 1124 1123 __field(rxrpc_serial_t, resp_serial ) 1125 - __field(s64, rtt ) 1126 - __field(u64, avg ) 1124 + __field(u32, rtt ) 1125 + __field(u32, rto ) 1127 1126 ), 1128 1127 1129 1128 TP_fast_assign( ··· 1131 1132 __entry->send_serial = send_serial; 1132 1133 __entry->resp_serial = resp_serial; 1133 1134 __entry->rtt = rtt; 1134 - __entry->nr = nr; 1135 - __entry->avg = avg; 1135 + __entry->rto = rto; 1136 1136 ), 1137 1137 1138 - TP_printk("c=%08x %s sr=%08x rr=%08x rtt=%lld nr=%u avg=%lld", 1138 + TP_printk("c=%08x %s sr=%08x rr=%08x rtt=%u rto=%u", 1139 1139 __entry->call, 1140 1140 __print_symbolic(__entry->why, rxrpc_rtt_rx_traces), 1141 1141 __entry->send_serial, 1142 1142 __entry->resp_serial, 1143 1143 __entry->rtt, 1144 - __entry->nr, 1145 - __entry->avg) 1144 + __entry->rto) 1146 1145 ); 1147 1146 1148 1147 TRACE_EVENT(rxrpc_timer, ··· 1539 1542 TP_printk("c=%08x r=%08x", 1540 1543 __entry->debug_id, 1541 1544 __entry->serial) 1545 + ); 1546 + 1547 + TRACE_EVENT(rxrpc_rx_discard_ack, 1548 + TP_PROTO(unsigned int debug_id, rxrpc_serial_t serial, 1549 + rxrpc_seq_t first_soft_ack, rxrpc_seq_t call_ackr_first, 1550 + rxrpc_seq_t prev_pkt, rxrpc_seq_t call_ackr_prev), 1551 + 1552 + TP_ARGS(debug_id, serial, first_soft_ack, call_ackr_first, 1553 + prev_pkt, call_ackr_prev), 1554 + 1555 + TP_STRUCT__entry( 1556 + __field(unsigned int, debug_id ) 1557 + __field(rxrpc_serial_t, serial ) 1558 + __field(rxrpc_seq_t, first_soft_ack) 1559 + __field(rxrpc_seq_t, call_ackr_first) 1560 + __field(rxrpc_seq_t, prev_pkt) 1561 + __field(rxrpc_seq_t, call_ackr_prev) 1562 + ), 1563 + 1564 + TP_fast_assign( 1565 + __entry->debug_id = debug_id; 1566 + __entry->serial = serial; 1567 + __entry->first_soft_ack = first_soft_ack; 1568 + __entry->call_ackr_first = call_ackr_first; 1569 + __entry->prev_pkt = prev_pkt; 1570 + __entry->call_ackr_prev = call_ackr_prev; 1571 + ), 1572 + 1573 + TP_printk("c=%08x r=%08x %08x<%08x %08x<%08x", 1574 + __entry->debug_id, 1575 + __entry->serial, 1576 + __entry->first_soft_ack, 1577 + __entry->call_ackr_first, 1578 + __entry->prev_pkt, 1579 + __entry->call_ackr_prev) 1542 1580 ); 1543 1581 1544 1582 #endif /* _TRACE_RXRPC_H */
+14 -3
kernel/bpf/syscall.c
··· 623 623 624 624 mutex_lock(&map->freeze_mutex); 625 625 626 - if ((vma->vm_flags & VM_WRITE) && map->frozen) { 627 - err = -EPERM; 628 - goto out; 626 + if (vma->vm_flags & VM_WRITE) { 627 + if (map->frozen) { 628 + err = -EPERM; 629 + goto out; 630 + } 631 + /* map is meant to be read-only, so do not allow mapping as 632 + * writable, because it's possible to leak a writable page 633 + * reference and allows user-space to still modify it after 634 + * freezing, while verifier will assume contents do not change 635 + */ 636 + if (map->map_flags & BPF_F_RDONLY_PROG) { 637 + err = -EACCES; 638 + goto out; 639 + } 629 640 } 630 641 631 642 /* set default open/close callbacks */
+4 -2
net/ax25/af_ax25.c
··· 635 635 break; 636 636 637 637 case SO_BINDTODEVICE: 638 - if (optlen > IFNAMSIZ) 639 - optlen = IFNAMSIZ; 638 + if (optlen > IFNAMSIZ - 1) 639 + optlen = IFNAMSIZ - 1; 640 + 641 + memset(devname, 0, sizeof(devname)); 640 642 641 643 if (copy_from_user(devname, optval, optlen)) { 642 644 res = -EFAULT;
+15 -5
net/core/dev.c
··· 4988 4988 return 0; 4989 4989 } 4990 4990 4991 - static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc, 4991 + static int __netif_receive_skb_core(struct sk_buff **pskb, bool pfmemalloc, 4992 4992 struct packet_type **ppt_prev) 4993 4993 { 4994 4994 struct packet_type *ptype, *pt_prev; 4995 4995 rx_handler_func_t *rx_handler; 4996 + struct sk_buff *skb = *pskb; 4996 4997 struct net_device *orig_dev; 4997 4998 bool deliver_exact = false; 4998 4999 int ret = NET_RX_DROP; ··· 5024 5023 ret2 = do_xdp_generic(rcu_dereference(skb->dev->xdp_prog), skb); 5025 5024 preempt_enable(); 5026 5025 5027 - if (ret2 != XDP_PASS) 5028 - return NET_RX_DROP; 5026 + if (ret2 != XDP_PASS) { 5027 + ret = NET_RX_DROP; 5028 + goto out; 5029 + } 5029 5030 skb_reset_mac_len(skb); 5030 5031 } 5031 5032 ··· 5177 5174 } 5178 5175 5179 5176 out: 5177 + /* The invariant here is that if *ppt_prev is not NULL 5178 + * then skb should also be non-NULL. 5179 + * 5180 + * Apparently *ppt_prev assignment above holds this invariant due to 5181 + * skb dereferencing near it. 5182 + */ 5183 + *pskb = skb; 5180 5184 return ret; 5181 5185 } 5182 5186 ··· 5193 5183 struct packet_type *pt_prev = NULL; 5194 5184 int ret; 5195 5185 5196 - ret = __netif_receive_skb_core(skb, pfmemalloc, &pt_prev); 5186 + ret = __netif_receive_skb_core(&skb, pfmemalloc, &pt_prev); 5197 5187 if (pt_prev) 5198 5188 ret = INDIRECT_CALL_INET(pt_prev->func, ipv6_rcv, ip_rcv, skb, 5199 5189 skb->dev, pt_prev, orig_dev); ··· 5271 5261 struct packet_type *pt_prev = NULL; 5272 5262 5273 5263 skb_list_del_init(skb); 5274 - __netif_receive_skb_core(skb, pfmemalloc, &pt_prev); 5264 + __netif_receive_skb_core(&skb, pfmemalloc, &pt_prev); 5275 5265 if (!pt_prev) 5276 5266 continue; 5277 5267 if (pt_curr != pt_prev || od_curr != orig_dev) {
+21 -5
net/core/flow_dissector.c
··· 160 160 return ret; 161 161 } 162 162 163 - int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr) 163 + static int flow_dissector_bpf_prog_detach(struct net *net) 164 164 { 165 165 struct bpf_prog *attached; 166 - struct net *net; 167 166 168 - net = current->nsproxy->net_ns; 169 167 mutex_lock(&flow_dissector_mutex); 170 168 attached = rcu_dereference_protected(net->flow_dissector_prog, 171 169 lockdep_is_held(&flow_dissector_mutex)); ··· 176 178 mutex_unlock(&flow_dissector_mutex); 177 179 return 0; 178 180 } 181 + 182 + int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr) 183 + { 184 + return flow_dissector_bpf_prog_detach(current->nsproxy->net_ns); 185 + } 186 + 187 + static void __net_exit flow_dissector_pernet_pre_exit(struct net *net) 188 + { 189 + /* We're not racing with attach/detach because there are no 190 + * references to netns left when pre_exit gets called. 191 + */ 192 + if (rcu_access_pointer(net->flow_dissector_prog)) 193 + flow_dissector_bpf_prog_detach(net); 194 + } 195 + 196 + static struct pernet_operations flow_dissector_pernet_ops __net_initdata = { 197 + .pre_exit = flow_dissector_pernet_pre_exit, 198 + }; 179 199 180 200 /** 181 201 * __skb_flow_get_ports - extract the upper layer ports and return them ··· 1852 1836 skb_flow_dissector_init(&flow_keys_basic_dissector, 1853 1837 flow_keys_basic_dissector_keys, 1854 1838 ARRAY_SIZE(flow_keys_basic_dissector_keys)); 1855 - return 0; 1856 - } 1857 1839 1840 + return register_pernet_subsys(&flow_dissector_pernet_ops); 1841 + } 1858 1842 core_initcall(init_default_flow_dissectors);
+15
net/dsa/tag_mtk.c
··· 15 15 #define MTK_HDR_XMIT_TAGGED_TPID_8100 1 16 16 #define MTK_HDR_RECV_SOURCE_PORT_MASK GENMASK(2, 0) 17 17 #define MTK_HDR_XMIT_DP_BIT_MASK GENMASK(5, 0) 18 + #define MTK_HDR_XMIT_SA_DIS BIT(6) 18 19 19 20 static struct sk_buff *mtk_tag_xmit(struct sk_buff *skb, 20 21 struct net_device *dev) ··· 23 22 struct dsa_port *dp = dsa_slave_to_port(dev); 24 23 u8 *mtk_tag; 25 24 bool is_vlan_skb = true; 25 + unsigned char *dest = eth_hdr(skb)->h_dest; 26 + bool is_multicast_skb = is_multicast_ether_addr(dest) && 27 + !is_broadcast_ether_addr(dest); 26 28 27 29 /* Build the special tag after the MAC Source Address. If VLAN header 28 30 * is present, it's required that VLAN header and special tag is ··· 51 47 MTK_HDR_XMIT_UNTAGGED; 52 48 mtk_tag[1] = (1 << dp->index) & MTK_HDR_XMIT_DP_BIT_MASK; 53 49 50 + /* Disable SA learning for multicast frames */ 51 + if (unlikely(is_multicast_skb)) 52 + mtk_tag[1] |= MTK_HDR_XMIT_SA_DIS; 53 + 54 54 /* Tag control information is kept for 802.1Q */ 55 55 if (!is_vlan_skb) { 56 56 mtk_tag[2] = 0; ··· 69 61 { 70 62 int port; 71 63 __be16 *phdr, hdr; 64 + unsigned char *dest = eth_hdr(skb)->h_dest; 65 + bool is_multicast_skb = is_multicast_ether_addr(dest) && 66 + !is_broadcast_ether_addr(dest); 72 67 73 68 if (unlikely(!pskb_may_pull(skb, MTK_HDR_LEN))) 74 69 return NULL; ··· 96 85 skb->dev = dsa_master_find_slave(dev, 0, port); 97 86 if (!skb->dev) 98 87 return NULL; 88 + 89 + /* Only unicast or broadcast frames are offloaded */ 90 + if (likely(!is_multicast_skb)) 91 + skb->offload_fwd_mark = 1; 99 92 100 93 return skb; 101 94 }
+2 -2
net/ethtool/netlink.c
··· 342 342 ret = ops->reply_size(req_info, reply_data); 343 343 if (ret < 0) 344 344 goto err_cleanup; 345 - reply_len = ret; 345 + reply_len = ret + ethnl_reply_header_size(); 346 346 ret = -ENOMEM; 347 347 rskb = ethnl_reply_init(reply_len, req_info->dev, ops->reply_cmd, 348 348 ops->hdr_attr, info, &reply_payload); ··· 588 588 ret = ops->reply_size(req_info, reply_data); 589 589 if (ret < 0) 590 590 goto err_cleanup; 591 - reply_len = ret; 591 + reply_len = ret + ethnl_reply_header_size(); 592 592 ret = -ENOMEM; 593 593 skb = genlmsg_new(reply_len, GFP_KERNEL); 594 594 if (!skb)
-1
net/ethtool/strset.c
··· 324 324 int len = 0; 325 325 int ret; 326 326 327 - len += ethnl_reply_header_size(); 328 327 for (i = 0; i < ETH_SS_COUNT; i++) { 329 328 const struct strset_info *set_info = &data->sets[i]; 330 329
+1 -2
net/ipv4/fib_frontend.c
··· 918 918 else 919 919 filter->dump_exceptions = false; 920 920 921 - filter->dump_all_families = (rtm->rtm_family == AF_UNSPEC); 922 921 filter->flags = rtm->rtm_flags; 923 922 filter->protocol = rtm->rtm_protocol; 924 923 filter->rt_type = rtm->rtm_type; ··· 989 990 if (filter.table_id) { 990 991 tb = fib_get_table(net, filter.table_id); 991 992 if (!tb) { 992 - if (filter.dump_all_families) 993 + if (rtnl_msg_family(cb->nlh) != PF_INET) 993 994 return skb->len; 994 995 995 996 NL_SET_ERR_MSG(cb->extack, "ipv4: FIB table does not exist");
+24 -19
net/ipv4/inet_connection_sock.c
··· 24 24 #include <net/addrconf.h> 25 25 26 26 #if IS_ENABLED(CONFIG_IPV6) 27 - /* match_wildcard == true: IPV6_ADDR_ANY equals to any IPv6 addresses if IPv6 28 - * only, and any IPv4 addresses if not IPv6 only 29 - * match_wildcard == false: addresses must be exactly the same, i.e. 30 - * IPV6_ADDR_ANY only equals to IPV6_ADDR_ANY, 31 - * and 0.0.0.0 equals to 0.0.0.0 only 27 + /* match_sk*_wildcard == true: IPV6_ADDR_ANY equals to any IPv6 addresses 28 + * if IPv6 only, and any IPv4 addresses 29 + * if not IPv6 only 30 + * match_sk*_wildcard == false: addresses must be exactly the same, i.e. 31 + * IPV6_ADDR_ANY only equals to IPV6_ADDR_ANY, 32 + * and 0.0.0.0 equals to 0.0.0.0 only 32 33 */ 33 34 static bool ipv6_rcv_saddr_equal(const struct in6_addr *sk1_rcv_saddr6, 34 35 const struct in6_addr *sk2_rcv_saddr6, 35 36 __be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr, 36 37 bool sk1_ipv6only, bool sk2_ipv6only, 37 - bool match_wildcard) 38 + bool match_sk1_wildcard, 39 + bool match_sk2_wildcard) 38 40 { 39 41 int addr_type = ipv6_addr_type(sk1_rcv_saddr6); 40 42 int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED; ··· 46 44 if (!sk2_ipv6only) { 47 45 if (sk1_rcv_saddr == sk2_rcv_saddr) 48 46 return true; 49 - if (!sk1_rcv_saddr || !sk2_rcv_saddr) 50 - return match_wildcard; 47 + return (match_sk1_wildcard && !sk1_rcv_saddr) || 48 + (match_sk2_wildcard && !sk2_rcv_saddr); 51 49 } 52 50 return false; 53 51 } ··· 55 53 if (addr_type == IPV6_ADDR_ANY && addr_type2 == IPV6_ADDR_ANY) 56 54 return true; 57 55 58 - if (addr_type2 == IPV6_ADDR_ANY && match_wildcard && 56 + if (addr_type2 == IPV6_ADDR_ANY && match_sk2_wildcard && 59 57 !(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED)) 60 58 return true; 61 59 62 - if (addr_type == IPV6_ADDR_ANY && match_wildcard && 60 + if (addr_type == IPV6_ADDR_ANY && match_sk1_wildcard && 63 61 !(sk1_ipv6only && addr_type2 == IPV6_ADDR_MAPPED)) 64 62 return true; 65 63 ··· 71 69 } 72 70 #endif 73 71 74 - /* match_wildcard == true: 0.0.0.0 equals to any IPv4 addresses 75 - * match_wildcard == false: addresses must be exactly the same, i.e. 76 - * 0.0.0.0 only equals to 0.0.0.0 72 + /* match_sk*_wildcard == true: 0.0.0.0 equals to any IPv4 addresses 73 + * match_sk*_wildcard == false: addresses must be exactly the same, i.e. 74 + * 0.0.0.0 only equals to 0.0.0.0 77 75 */ 78 76 static bool ipv4_rcv_saddr_equal(__be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr, 79 - bool sk2_ipv6only, bool match_wildcard) 77 + bool sk2_ipv6only, bool match_sk1_wildcard, 78 + bool match_sk2_wildcard) 80 79 { 81 80 if (!sk2_ipv6only) { 82 81 if (sk1_rcv_saddr == sk2_rcv_saddr) 83 82 return true; 84 - if (!sk1_rcv_saddr || !sk2_rcv_saddr) 85 - return match_wildcard; 83 + return (match_sk1_wildcard && !sk1_rcv_saddr) || 84 + (match_sk2_wildcard && !sk2_rcv_saddr); 86 85 } 87 86 return false; 88 87 } ··· 99 96 sk2->sk_rcv_saddr, 100 97 ipv6_only_sock(sk), 101 98 ipv6_only_sock(sk2), 99 + match_wildcard, 102 100 match_wildcard); 103 101 #endif 104 102 return ipv4_rcv_saddr_equal(sk->sk_rcv_saddr, sk2->sk_rcv_saddr, 105 - ipv6_only_sock(sk2), match_wildcard); 103 + ipv6_only_sock(sk2), match_wildcard, 104 + match_wildcard); 106 105 } 107 106 EXPORT_SYMBOL(inet_rcv_saddr_equal); 108 107 ··· 290 285 tb->fast_rcv_saddr, 291 286 sk->sk_rcv_saddr, 292 287 tb->fast_ipv6_only, 293 - ipv6_only_sock(sk), true); 288 + ipv6_only_sock(sk), true, false); 294 289 #endif 295 290 return ipv4_rcv_saddr_equal(tb->fast_rcv_saddr, sk->sk_rcv_saddr, 296 - ipv6_only_sock(sk), true); 291 + ipv6_only_sock(sk), true, false); 297 292 } 298 293 299 294 /* Obtain a reference to a local port for the given sock,
+1 -1
net/ipv4/ipip.c
··· 698 698 699 699 rtnl_link_failed: 700 700 #if IS_ENABLED(CONFIG_MPLS) 701 - xfrm4_tunnel_deregister(&mplsip_handler, AF_INET); 701 + xfrm4_tunnel_deregister(&mplsip_handler, AF_MPLS); 702 702 xfrm_tunnel_mplsip_failed: 703 703 704 704 #endif
+1 -1
net/ipv4/ipmr.c
··· 2613 2613 2614 2614 mrt = ipmr_get_table(sock_net(skb->sk), filter.table_id); 2615 2615 if (!mrt) { 2616 - if (filter.dump_all_families) 2616 + if (rtnl_msg_family(cb->nlh) != RTNL_FAMILY_IPMR) 2617 2617 return skb->len; 2618 2618 2619 2619 NL_SET_ERR_MSG(cb->extack, "ipv4: MR table does not exist");
+2 -1
net/ipv4/nexthop.c
··· 276 276 return 0; 277 277 278 278 nla_put_failure: 279 + nlmsg_cancel(skb, nlh); 279 280 return -EMSGSIZE; 280 281 } 281 282 ··· 434 433 if (!valid_group_nh(nh, len, extack)) 435 434 return -EINVAL; 436 435 } 437 - for (i = NHA_GROUP + 1; i < __NHA_MAX; ++i) { 436 + for (i = NHA_GROUP_TYPE + 1; i < __NHA_MAX; ++i) { 438 437 if (!tb[i]) 439 438 continue; 440 439
+6 -8
net/ipv4/route.c
··· 491 491 atomic_t *p_id = ip_idents + hash % IP_IDENTS_SZ; 492 492 u32 old = READ_ONCE(*p_tstamp); 493 493 u32 now = (u32)jiffies; 494 - u32 new, delta = 0; 494 + u32 delta = 0; 495 495 496 496 if (old != now && cmpxchg(p_tstamp, old, now) == old) 497 497 delta = prandom_u32_max(now - old); 498 498 499 - /* Do not use atomic_add_return() as it makes UBSAN unhappy */ 500 - do { 501 - old = (u32)atomic_read(p_id); 502 - new = old + delta + segs; 503 - } while (atomic_cmpxchg(p_id, old, new) != old); 504 - 505 - return new - segs; 499 + /* If UBSAN reports an error there, please make sure your compiler 500 + * supports -fno-strict-overflow before reporting it that was a bug 501 + * in UBSAN, and it has been fixed in GCC-8. 502 + */ 503 + return atomic_add_return(segs + delta, p_id) - segs; 506 504 } 507 505 EXPORT_SYMBOL(ip_idents_reserve); 508 506
+1 -1
net/ipv6/ip6_fib.c
··· 664 664 if (arg.filter.table_id) { 665 665 tb = fib6_get_table(net, arg.filter.table_id); 666 666 if (!tb) { 667 - if (arg.filter.dump_all_families) 667 + if (rtnl_msg_family(cb->nlh) != PF_INET6) 668 668 goto out; 669 669 670 670 NL_SET_ERR_MSG_MOD(cb->extack, "FIB table does not exist");
+3 -2
net/ipv6/ip6mr.c
··· 98 98 #ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES 99 99 #define ip6mr_for_each_table(mrt, net) \ 100 100 list_for_each_entry_rcu(mrt, &net->ipv6.mr6_tables, list, \ 101 - lockdep_rtnl_is_held()) 101 + lockdep_rtnl_is_held() || \ 102 + list_empty(&net->ipv6.mr6_tables)) 102 103 103 104 static struct mr_table *ip6mr_mr_table_iter(struct net *net, 104 105 struct mr_table *mrt) ··· 2503 2502 2504 2503 mrt = ip6mr_get_table(sock_net(skb->sk), filter.table_id); 2505 2504 if (!mrt) { 2506 - if (filter.dump_all_families) 2505 + if (rtnl_msg_family(cb->nlh) != RTNL_FAMILY_IP6MR) 2507 2506 return skb->len; 2508 2507 2509 2508 NL_SET_ERR_MSG_MOD(cb->extack, "MR table does not exist");
+9 -15
net/mptcp/crypto.c
··· 47 47 void mptcp_crypto_hmac_sha(u64 key1, u64 key2, u8 *msg, int len, void *hmac) 48 48 { 49 49 u8 input[SHA256_BLOCK_SIZE + SHA256_DIGEST_SIZE]; 50 - __be32 mptcp_hashed_key[SHA256_DIGEST_WORDS]; 51 - __be32 *hash_out = (__force __be32 *)hmac; 52 50 struct sha256_state state; 53 51 u8 key1be[8]; 54 52 u8 key2be[8]; ··· 84 86 85 87 sha256_init(&state); 86 88 sha256_update(&state, input, SHA256_BLOCK_SIZE + SHA256_DIGEST_SIZE); 87 - sha256_final(&state, (u8 *)mptcp_hashed_key); 88 - 89 - /* takes only first 160 bits */ 90 - for (i = 0; i < 5; i++) 91 - hash_out[i] = mptcp_hashed_key[i]; 89 + sha256_final(&state, (u8 *)hmac); 92 90 } 93 91 94 92 #ifdef CONFIG_MPTCP_HMAC_TEST ··· 95 101 }; 96 102 97 103 /* we can't reuse RFC 4231 test vectors, as we have constraint on the 98 - * input and key size, and we truncate the output. 104 + * input and key size. 99 105 */ 100 106 static struct test_cast tests[] = { 101 107 { 102 108 .key = "0b0b0b0b0b0b0b0b", 103 109 .msg = "48692054", 104 - .result = "8385e24fb4235ac37556b6b886db106284a1da67", 110 + .result = "8385e24fb4235ac37556b6b886db106284a1da671699f46db1f235ec622dcafa", 105 111 }, 106 112 { 107 113 .key = "aaaaaaaaaaaaaaaa", 108 114 .msg = "dddddddd", 109 - .result = "2c5e219164ff1dca1c4a92318d847bb6b9d44492", 115 + .result = "2c5e219164ff1dca1c4a92318d847bb6b9d44492984e1eb71aff9022f71046e9", 110 116 }, 111 117 { 112 118 .key = "0102030405060708", 113 119 .msg = "cdcdcdcd", 114 - .result = "e73b9ba9969969cefb04aa0d6df18ec2fcc075b6", 120 + .result = "e73b9ba9969969cefb04aa0d6df18ec2fcc075b6f23b4d8c4da736a5dbbc6e7d", 115 121 }, 116 122 }; 117 123 118 124 static int __init test_mptcp_crypto(void) 119 125 { 120 - char hmac[20], hmac_hex[41]; 126 + char hmac[32], hmac_hex[65]; 121 127 u32 nonce1, nonce2; 122 128 u64 key1, key2; 123 129 u8 msg[8]; ··· 134 140 put_unaligned_be32(nonce2, &msg[4]); 135 141 136 142 mptcp_crypto_hmac_sha(key1, key2, msg, 8, hmac); 137 - for (j = 0; j < 20; ++j) 143 + for (j = 0; j < 32; ++j) 138 144 sprintf(&hmac_hex[j << 1], "%02x", hmac[j] & 0xff); 139 - hmac_hex[40] = 0; 145 + hmac_hex[64] = 0; 140 146 141 - if (memcmp(hmac_hex, tests[i].result, 40)) 147 + if (memcmp(hmac_hex, tests[i].result, 64)) 142 148 pr_err("test %d failed, got %s expected %s", i, 143 149 hmac_hex, tests[i].result); 144 150 else
+5 -4
net/mptcp/options.c
··· 7 7 #define pr_fmt(fmt) "MPTCP: " fmt 8 8 9 9 #include <linux/kernel.h> 10 + #include <crypto/sha.h> 10 11 #include <net/tcp.h> 11 12 #include <net/mptcp.h> 12 13 #include "protocol.h" ··· 536 535 static u64 add_addr_generate_hmac(u64 key1, u64 key2, u8 addr_id, 537 536 struct in_addr *addr) 538 537 { 539 - u8 hmac[MPTCP_ADDR_HMAC_LEN]; 538 + u8 hmac[SHA256_DIGEST_SIZE]; 540 539 u8 msg[7]; 541 540 542 541 msg[0] = addr_id; ··· 546 545 547 546 mptcp_crypto_hmac_sha(key1, key2, msg, 7, hmac); 548 547 549 - return get_unaligned_be64(hmac); 548 + return get_unaligned_be64(&hmac[SHA256_DIGEST_SIZE - sizeof(u64)]); 550 549 } 551 550 552 551 #if IS_ENABLED(CONFIG_MPTCP_IPV6) 553 552 static u64 add_addr6_generate_hmac(u64 key1, u64 key2, u8 addr_id, 554 553 struct in6_addr *addr) 555 554 { 556 - u8 hmac[MPTCP_ADDR_HMAC_LEN]; 555 + u8 hmac[SHA256_DIGEST_SIZE]; 557 556 u8 msg[19]; 558 557 559 558 msg[0] = addr_id; ··· 563 562 564 563 mptcp_crypto_hmac_sha(key1, key2, msg, 19, hmac); 565 564 566 - return get_unaligned_be64(hmac); 565 + return get_unaligned_be64(&hmac[SHA256_DIGEST_SIZE - sizeof(u64)]); 567 566 } 568 567 #endif 569 568
-1
net/mptcp/protocol.h
··· 81 81 82 82 /* MPTCP ADD_ADDR flags */ 83 83 #define MPTCP_ADDR_ECHO BIT(0) 84 - #define MPTCP_ADDR_HMAC_LEN 20 85 84 #define MPTCP_ADDR_IPVERSION_4 4 86 85 #define MPTCP_ADDR_IPVERSION_6 6 87 86
+10 -5
net/mptcp/subflow.c
··· 10 10 #include <linux/module.h> 11 11 #include <linux/netdevice.h> 12 12 #include <crypto/algapi.h> 13 + #include <crypto/sha.h> 13 14 #include <net/sock.h> 14 15 #include <net/inet_common.h> 15 16 #include <net/inet_hashtables.h> ··· 90 89 const struct sk_buff *skb) 91 90 { 92 91 struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); 93 - u8 hmac[MPTCPOPT_HMAC_LEN]; 92 + u8 hmac[SHA256_DIGEST_SIZE]; 94 93 struct mptcp_sock *msk; 95 94 int local_id; 96 95 ··· 202 201 /* validate received truncated hmac and create hmac for third ACK */ 203 202 static bool subflow_thmac_valid(struct mptcp_subflow_context *subflow) 204 203 { 205 - u8 hmac[MPTCPOPT_HMAC_LEN]; 204 + u8 hmac[SHA256_DIGEST_SIZE]; 206 205 u64 thmac; 207 206 208 207 subflow_generate_hmac(subflow->remote_key, subflow->local_key, ··· 268 267 subflow->ssn_offset = TCP_SKB_CB(skb)->seq; 269 268 } 270 269 } else if (subflow->mp_join) { 270 + u8 hmac[SHA256_DIGEST_SIZE]; 271 + 271 272 pr_debug("subflow=%p, thmac=%llu, remote_nonce=%u", 272 273 subflow, subflow->thmac, 273 274 subflow->remote_nonce); ··· 282 279 subflow_generate_hmac(subflow->local_key, subflow->remote_key, 283 280 subflow->local_nonce, 284 281 subflow->remote_nonce, 285 - subflow->hmac); 282 + hmac); 283 + 284 + memcpy(subflow->hmac, hmac, MPTCPOPT_HMAC_LEN); 286 285 287 286 if (skb) 288 287 subflow->ssn_offset = TCP_SKB_CB(skb)->seq; ··· 352 347 const struct mptcp_options_received *mp_opt) 353 348 { 354 349 const struct mptcp_subflow_request_sock *subflow_req; 355 - u8 hmac[MPTCPOPT_HMAC_LEN]; 350 + u8 hmac[SHA256_DIGEST_SIZE]; 356 351 struct mptcp_sock *msk; 357 352 bool ret; 358 353 ··· 366 361 subflow_req->local_nonce, hmac); 367 362 368 363 ret = true; 369 - if (crypto_memneq(hmac, mp_opt->hmac, sizeof(hmac))) 364 + if (crypto_memneq(hmac, mp_opt->hmac, MPTCPOPT_HMAC_LEN)) 370 365 ret = false; 371 366 372 367 sock_put((struct sock *)msk);
+1 -1
net/qrtr/qrtr.c
··· 854 854 } 855 855 mutex_unlock(&qrtr_node_lock); 856 856 857 - qrtr_local_enqueue(node, skb, type, from, to); 857 + qrtr_local_enqueue(NULL, skb, type, from, to); 858 858 859 859 return 0; 860 860 }
+1
net/rxrpc/Makefile
··· 25 25 peer_event.o \ 26 26 peer_object.o \ 27 27 recvmsg.o \ 28 + rtt.o \ 28 29 security.o \ 29 30 sendmsg.o \ 30 31 skbuff.o \
+17 -8
net/rxrpc/ar-internal.h
··· 7 7 8 8 #include <linux/atomic.h> 9 9 #include <linux/seqlock.h> 10 + #include <linux/win_minmax.h> 10 11 #include <net/net_namespace.h> 11 12 #include <net/netns/generic.h> 12 13 #include <net/sock.h> ··· 312 311 #define RXRPC_RTT_CACHE_SIZE 32 313 312 spinlock_t rtt_input_lock; /* RTT lock for input routine */ 314 313 ktime_t rtt_last_req; /* Time of last RTT request */ 315 - u64 rtt; /* Current RTT estimate (in nS) */ 316 - u64 rtt_sum; /* Sum of cache contents */ 317 - u64 rtt_cache[RXRPC_RTT_CACHE_SIZE]; /* Determined RTT cache */ 318 - u8 rtt_cursor; /* next entry at which to insert */ 319 - u8 rtt_usage; /* amount of cache actually used */ 314 + unsigned int rtt_count; /* Number of samples we've got */ 315 + 316 + u32 srtt_us; /* smoothed round trip time << 3 in usecs */ 317 + u32 mdev_us; /* medium deviation */ 318 + u32 mdev_max_us; /* maximal mdev for the last rtt period */ 319 + u32 rttvar_us; /* smoothed mdev_max */ 320 + u32 rto_j; /* Retransmission timeout in jiffies */ 321 + u8 backoff; /* Backoff timeout */ 320 322 321 323 u8 cong_cwnd; /* Congestion window size */ 322 324 }; ··· 1045 1041 extern unsigned int rxrpc_rx_window_size; 1046 1042 extern unsigned int rxrpc_rx_mtu; 1047 1043 extern unsigned int rxrpc_rx_jumbo_max; 1048 - extern unsigned long rxrpc_resend_timeout; 1049 1044 1050 1045 extern const s8 rxrpc_ack_priority[]; 1051 1046 ··· 1072 1069 * peer_event.c 1073 1070 */ 1074 1071 void rxrpc_error_report(struct sock *); 1075 - void rxrpc_peer_add_rtt(struct rxrpc_call *, enum rxrpc_rtt_rx_trace, 1076 - rxrpc_serial_t, rxrpc_serial_t, ktime_t, ktime_t); 1077 1072 void rxrpc_peer_keepalive_worker(struct work_struct *); 1078 1073 1079 1074 /* ··· 1102 1101 */ 1103 1102 void rxrpc_notify_socket(struct rxrpc_call *); 1104 1103 int rxrpc_recvmsg(struct socket *, struct msghdr *, size_t, int); 1104 + 1105 + /* 1106 + * rtt.c 1107 + */ 1108 + void rxrpc_peer_add_rtt(struct rxrpc_call *, enum rxrpc_rtt_rx_trace, 1109 + rxrpc_serial_t, rxrpc_serial_t, ktime_t, ktime_t); 1110 + unsigned long rxrpc_get_rto_backoff(struct rxrpc_peer *, bool); 1111 + void rxrpc_peer_init_rtt(struct rxrpc_peer *); 1105 1112 1106 1113 /* 1107 1114 * rxkad.c
+1 -1
net/rxrpc/call_accept.c
··· 248 248 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 249 249 ktime_t now = skb->tstamp; 250 250 251 - if (call->peer->rtt_usage < 3 || 251 + if (call->peer->rtt_count < 3 || 252 252 ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), now)) 253 253 rxrpc_propose_ACK(call, RXRPC_ACK_PING, sp->hdr.serial, 254 254 true, true,
+8 -14
net/rxrpc/call_event.c
··· 111 111 } else { 112 112 unsigned long now = jiffies, ack_at; 113 113 114 - if (call->peer->rtt_usage > 0) 115 - ack_at = nsecs_to_jiffies(call->peer->rtt); 114 + if (call->peer->srtt_us != 0) 115 + ack_at = usecs_to_jiffies(call->peer->srtt_us >> 3); 116 116 else 117 117 ack_at = expiry; 118 118 ··· 157 157 static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j) 158 158 { 159 159 struct sk_buff *skb; 160 - unsigned long resend_at; 160 + unsigned long resend_at, rto_j; 161 161 rxrpc_seq_t cursor, seq, top; 162 - ktime_t now, max_age, oldest, ack_ts, timeout, min_timeo; 162 + ktime_t now, max_age, oldest, ack_ts; 163 163 int ix; 164 164 u8 annotation, anno_type, retrans = 0, unacked = 0; 165 165 166 166 _enter("{%d,%d}", call->tx_hard_ack, call->tx_top); 167 167 168 - if (call->peer->rtt_usage > 1) 169 - timeout = ns_to_ktime(call->peer->rtt * 3 / 2); 170 - else 171 - timeout = ms_to_ktime(rxrpc_resend_timeout); 172 - min_timeo = ns_to_ktime((1000000000 / HZ) * 4); 173 - if (ktime_before(timeout, min_timeo)) 174 - timeout = min_timeo; 168 + rto_j = call->peer->rto_j; 175 169 176 170 now = ktime_get_real(); 177 - max_age = ktime_sub(now, timeout); 171 + max_age = ktime_sub(now, jiffies_to_usecs(rto_j)); 178 172 179 173 spin_lock_bh(&call->lock); 180 174 ··· 213 219 } 214 220 215 221 resend_at = nsecs_to_jiffies(ktime_to_ns(ktime_sub(now, oldest))); 216 - resend_at += jiffies + rxrpc_resend_timeout; 222 + resend_at += jiffies + rto_j; 217 223 WRITE_ONCE(call->resend_at, resend_at); 218 224 219 225 if (unacked) ··· 228 234 rxrpc_timer_set_for_resend); 229 235 spin_unlock_bh(&call->lock); 230 236 ack_ts = ktime_sub(now, call->acks_latest_ts); 231 - if (ktime_to_ns(ack_ts) < call->peer->rtt) 237 + if (ktime_to_us(ack_ts) < (call->peer->srtt_us >> 3)) 232 238 goto out; 233 239 rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, true, false, 234 240 rxrpc_propose_ack_ping_for_lost_ack);
+37 -7
net/rxrpc/input.c
··· 91 91 /* We analyse the number of packets that get ACK'd per RTT 92 92 * period and increase the window if we managed to fill it. 93 93 */ 94 - if (call->peer->rtt_usage == 0) 94 + if (call->peer->rtt_count == 0) 95 95 goto out; 96 96 if (ktime_before(skb->tstamp, 97 - ktime_add_ns(call->cong_tstamp, 98 - call->peer->rtt))) 97 + ktime_add_us(call->cong_tstamp, 98 + call->peer->srtt_us >> 3))) 99 99 goto out_no_clear_ca; 100 100 change = rxrpc_cong_rtt_window_end; 101 101 call->cong_tstamp = skb->tstamp; ··· 803 803 } 804 804 805 805 /* 806 + * Return true if the ACK is valid - ie. it doesn't appear to have regressed 807 + * with respect to the ack state conveyed by preceding ACKs. 808 + */ 809 + static bool rxrpc_is_ack_valid(struct rxrpc_call *call, 810 + rxrpc_seq_t first_pkt, rxrpc_seq_t prev_pkt) 811 + { 812 + rxrpc_seq_t base = READ_ONCE(call->ackr_first_seq); 813 + 814 + if (after(first_pkt, base)) 815 + return true; /* The window advanced */ 816 + 817 + if (before(first_pkt, base)) 818 + return false; /* firstPacket regressed */ 819 + 820 + if (after_eq(prev_pkt, call->ackr_prev_seq)) 821 + return true; /* previousPacket hasn't regressed. */ 822 + 823 + /* Some rx implementations put a serial number in previousPacket. */ 824 + if (after_eq(prev_pkt, base + call->tx_winsize)) 825 + return false; 826 + return true; 827 + } 828 + 829 + /* 806 830 * Process an ACK packet. 807 831 * 808 832 * ack.firstPacket is the sequence number of the first soft-ACK'd/NAK'd packet ··· 889 865 } 890 866 891 867 /* Discard any out-of-order or duplicate ACKs (outside lock). */ 892 - if (before(first_soft_ack, call->ackr_first_seq) || 893 - before(prev_pkt, call->ackr_prev_seq)) 868 + if (!rxrpc_is_ack_valid(call, first_soft_ack, prev_pkt)) { 869 + trace_rxrpc_rx_discard_ack(call->debug_id, sp->hdr.serial, 870 + first_soft_ack, call->ackr_first_seq, 871 + prev_pkt, call->ackr_prev_seq); 894 872 return; 873 + } 895 874 896 875 buf.info.rxMTU = 0; 897 876 ioffset = offset + nr_acks + 3; ··· 905 878 spin_lock(&call->input_lock); 906 879 907 880 /* Discard any out-of-order or duplicate ACKs (inside lock). */ 908 - if (before(first_soft_ack, call->ackr_first_seq) || 909 - before(prev_pkt, call->ackr_prev_seq)) 881 + if (!rxrpc_is_ack_valid(call, first_soft_ack, prev_pkt)) { 882 + trace_rxrpc_rx_discard_ack(call->debug_id, sp->hdr.serial, 883 + first_soft_ack, call->ackr_first_seq, 884 + prev_pkt, call->ackr_prev_seq); 910 885 goto out; 886 + } 911 887 call->acks_latest_ts = skb->tstamp; 912 888 913 889 call->ackr_first_seq = first_soft_ack;
-5
net/rxrpc/misc.c
··· 63 63 */ 64 64 unsigned int rxrpc_rx_jumbo_max = 4; 65 65 66 - /* 67 - * Time till packet resend (in milliseconds). 68 - */ 69 - unsigned long rxrpc_resend_timeout = 4 * HZ; 70 - 71 66 const s8 rxrpc_ack_priority[] = { 72 67 [0] = 0, 73 68 [RXRPC_ACK_DELAY] = 1,
+3 -6
net/rxrpc/output.c
··· 369 369 (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events) || 370 370 retrans || 371 371 call->cong_mode == RXRPC_CALL_SLOW_START || 372 - (call->peer->rtt_usage < 3 && sp->hdr.seq & 1) || 372 + (call->peer->rtt_count < 3 && sp->hdr.seq & 1) || 373 373 ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), 374 374 ktime_get_real()))) 375 375 whdr.flags |= RXRPC_REQUEST_ACK; ··· 423 423 if (whdr.flags & RXRPC_REQUEST_ACK) { 424 424 call->peer->rtt_last_req = skb->tstamp; 425 425 trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_data, serial); 426 - if (call->peer->rtt_usage > 1) { 426 + if (call->peer->rtt_count > 1) { 427 427 unsigned long nowj = jiffies, ack_lost_at; 428 428 429 - ack_lost_at = nsecs_to_jiffies(2 * call->peer->rtt); 430 - if (ack_lost_at < 1) 431 - ack_lost_at = 1; 432 - 429 + ack_lost_at = rxrpc_get_rto_backoff(call->peer, retrans); 433 430 ack_lost_at += nowj; 434 431 WRITE_ONCE(call->ack_lost_at, ack_lost_at); 435 432 rxrpc_reduce_call_timer(call, ack_lost_at, nowj,
-46
net/rxrpc/peer_event.c
··· 296 296 } 297 297 298 298 /* 299 - * Add RTT information to cache. This is called in softirq mode and has 300 - * exclusive access to the peer RTT data. 301 - */ 302 - void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why, 303 - rxrpc_serial_t send_serial, rxrpc_serial_t resp_serial, 304 - ktime_t send_time, ktime_t resp_time) 305 - { 306 - struct rxrpc_peer *peer = call->peer; 307 - s64 rtt; 308 - u64 sum = peer->rtt_sum, avg; 309 - u8 cursor = peer->rtt_cursor, usage = peer->rtt_usage; 310 - 311 - rtt = ktime_to_ns(ktime_sub(resp_time, send_time)); 312 - if (rtt < 0) 313 - return; 314 - 315 - spin_lock(&peer->rtt_input_lock); 316 - 317 - /* Replace the oldest datum in the RTT buffer */ 318 - sum -= peer->rtt_cache[cursor]; 319 - sum += rtt; 320 - peer->rtt_cache[cursor] = rtt; 321 - peer->rtt_cursor = (cursor + 1) & (RXRPC_RTT_CACHE_SIZE - 1); 322 - peer->rtt_sum = sum; 323 - if (usage < RXRPC_RTT_CACHE_SIZE) { 324 - usage++; 325 - peer->rtt_usage = usage; 326 - } 327 - 328 - spin_unlock(&peer->rtt_input_lock); 329 - 330 - /* Now recalculate the average */ 331 - if (usage == RXRPC_RTT_CACHE_SIZE) { 332 - avg = sum / RXRPC_RTT_CACHE_SIZE; 333 - } else { 334 - avg = sum; 335 - do_div(avg, usage); 336 - } 337 - 338 - /* Don't need to update this under lock */ 339 - peer->rtt = avg; 340 - trace_rxrpc_rtt_rx(call, why, send_serial, resp_serial, rtt, 341 - usage, avg); 342 - } 343 - 344 - /* 345 299 * Perform keep-alive pings. 346 300 */ 347 301 static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
+7 -5
net/rxrpc/peer_object.c
··· 225 225 spin_lock_init(&peer->rtt_input_lock); 226 226 peer->debug_id = atomic_inc_return(&rxrpc_debug_id); 227 227 228 + rxrpc_peer_init_rtt(peer); 229 + 228 230 if (RXRPC_TX_SMSS > 2190) 229 231 peer->cong_cwnd = 2; 230 232 else if (RXRPC_TX_SMSS > 1095) ··· 499 497 EXPORT_SYMBOL(rxrpc_kernel_get_peer); 500 498 501 499 /** 502 - * rxrpc_kernel_get_rtt - Get a call's peer RTT 500 + * rxrpc_kernel_get_srtt - Get a call's peer smoothed RTT 503 501 * @sock: The socket on which the call is in progress. 504 502 * @call: The call to query 505 503 * 506 - * Get the call's peer RTT. 504 + * Get the call's peer smoothed RTT. 507 505 */ 508 - u64 rxrpc_kernel_get_rtt(struct socket *sock, struct rxrpc_call *call) 506 + u32 rxrpc_kernel_get_srtt(struct socket *sock, struct rxrpc_call *call) 509 507 { 510 - return call->peer->rtt; 508 + return call->peer->srtt_us >> 3; 511 509 } 512 - EXPORT_SYMBOL(rxrpc_kernel_get_rtt); 510 + EXPORT_SYMBOL(rxrpc_kernel_get_srtt);
+4 -4
net/rxrpc/proc.c
··· 222 222 seq_puts(seq, 223 223 "Proto Local " 224 224 " Remote " 225 - " Use CW MTU LastUse RTT Rc\n" 225 + " Use CW MTU LastUse RTT RTO\n" 226 226 ); 227 227 return 0; 228 228 } ··· 236 236 now = ktime_get_seconds(); 237 237 seq_printf(seq, 238 238 "UDP %-47.47s %-47.47s %3u" 239 - " %3u %5u %6llus %12llu %2u\n", 239 + " %3u %5u %6llus %8u %8u\n", 240 240 lbuff, 241 241 rbuff, 242 242 atomic_read(&peer->usage), 243 243 peer->cong_cwnd, 244 244 peer->mtu, 245 245 now - peer->last_tx_at, 246 - peer->rtt, 247 - peer->rtt_cursor); 246 + peer->srtt_us >> 3, 247 + jiffies_to_usecs(peer->rto_j)); 248 248 249 249 return 0; 250 250 }
+195
net/rxrpc/rtt.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 2 + /* RTT/RTO calculation. 3 + * 4 + * Adapted from TCP for AF_RXRPC by David Howells (dhowells@redhat.com) 5 + * 6 + * https://tools.ietf.org/html/rfc6298 7 + * https://tools.ietf.org/html/rfc1122#section-4.2.3.1 8 + * http://ccr.sigcomm.org/archive/1995/jan95/ccr-9501-partridge87.pdf 9 + */ 10 + 11 + #include <linux/net.h> 12 + #include "ar-internal.h" 13 + 14 + #define RXRPC_RTO_MAX ((unsigned)(120 * HZ)) 15 + #define RXRPC_TIMEOUT_INIT ((unsigned)(1*HZ)) /* RFC6298 2.1 initial RTO value */ 16 + #define rxrpc_jiffies32 ((u32)jiffies) /* As rxrpc_jiffies32 */ 17 + #define rxrpc_min_rtt_wlen 300 /* As sysctl_tcp_min_rtt_wlen */ 18 + 19 + static u32 rxrpc_rto_min_us(struct rxrpc_peer *peer) 20 + { 21 + return 200; 22 + } 23 + 24 + static u32 __rxrpc_set_rto(const struct rxrpc_peer *peer) 25 + { 26 + return _usecs_to_jiffies((peer->srtt_us >> 3) + peer->rttvar_us); 27 + } 28 + 29 + static u32 rxrpc_bound_rto(u32 rto) 30 + { 31 + return min(rto, RXRPC_RTO_MAX); 32 + } 33 + 34 + /* 35 + * Called to compute a smoothed rtt estimate. The data fed to this 36 + * routine either comes from timestamps, or from segments that were 37 + * known _not_ to have been retransmitted [see Karn/Partridge 38 + * Proceedings SIGCOMM 87]. The algorithm is from the SIGCOMM 88 39 + * piece by Van Jacobson. 40 + * NOTE: the next three routines used to be one big routine. 41 + * To save cycles in the RFC 1323 implementation it was better to break 42 + * it up into three procedures. -- erics 43 + */ 44 + static void rxrpc_rtt_estimator(struct rxrpc_peer *peer, long sample_rtt_us) 45 + { 46 + long m = sample_rtt_us; /* RTT */ 47 + u32 srtt = peer->srtt_us; 48 + 49 + /* The following amusing code comes from Jacobson's 50 + * article in SIGCOMM '88. Note that rtt and mdev 51 + * are scaled versions of rtt and mean deviation. 52 + * This is designed to be as fast as possible 53 + * m stands for "measurement". 54 + * 55 + * On a 1990 paper the rto value is changed to: 56 + * RTO = rtt + 4 * mdev 57 + * 58 + * Funny. This algorithm seems to be very broken. 59 + * These formulae increase RTO, when it should be decreased, increase 60 + * too slowly, when it should be increased quickly, decrease too quickly 61 + * etc. I guess in BSD RTO takes ONE value, so that it is absolutely 62 + * does not matter how to _calculate_ it. Seems, it was trap 63 + * that VJ failed to avoid. 8) 64 + */ 65 + if (srtt != 0) { 66 + m -= (srtt >> 3); /* m is now error in rtt est */ 67 + srtt += m; /* rtt = 7/8 rtt + 1/8 new */ 68 + if (m < 0) { 69 + m = -m; /* m is now abs(error) */ 70 + m -= (peer->mdev_us >> 2); /* similar update on mdev */ 71 + /* This is similar to one of Eifel findings. 72 + * Eifel blocks mdev updates when rtt decreases. 73 + * This solution is a bit different: we use finer gain 74 + * for mdev in this case (alpha*beta). 75 + * Like Eifel it also prevents growth of rto, 76 + * but also it limits too fast rto decreases, 77 + * happening in pure Eifel. 78 + */ 79 + if (m > 0) 80 + m >>= 3; 81 + } else { 82 + m -= (peer->mdev_us >> 2); /* similar update on mdev */ 83 + } 84 + 85 + peer->mdev_us += m; /* mdev = 3/4 mdev + 1/4 new */ 86 + if (peer->mdev_us > peer->mdev_max_us) { 87 + peer->mdev_max_us = peer->mdev_us; 88 + if (peer->mdev_max_us > peer->rttvar_us) 89 + peer->rttvar_us = peer->mdev_max_us; 90 + } 91 + } else { 92 + /* no previous measure. */ 93 + srtt = m << 3; /* take the measured time to be rtt */ 94 + peer->mdev_us = m << 1; /* make sure rto = 3*rtt */ 95 + peer->rttvar_us = max(peer->mdev_us, rxrpc_rto_min_us(peer)); 96 + peer->mdev_max_us = peer->rttvar_us; 97 + } 98 + 99 + peer->srtt_us = max(1U, srtt); 100 + } 101 + 102 + /* 103 + * Calculate rto without backoff. This is the second half of Van Jacobson's 104 + * routine referred to above. 105 + */ 106 + static void rxrpc_set_rto(struct rxrpc_peer *peer) 107 + { 108 + u32 rto; 109 + 110 + /* 1. If rtt variance happened to be less 50msec, it is hallucination. 111 + * It cannot be less due to utterly erratic ACK generation made 112 + * at least by solaris and freebsd. "Erratic ACKs" has _nothing_ 113 + * to do with delayed acks, because at cwnd>2 true delack timeout 114 + * is invisible. Actually, Linux-2.4 also generates erratic 115 + * ACKs in some circumstances. 116 + */ 117 + rto = __rxrpc_set_rto(peer); 118 + 119 + /* 2. Fixups made earlier cannot be right. 120 + * If we do not estimate RTO correctly without them, 121 + * all the algo is pure shit and should be replaced 122 + * with correct one. It is exactly, which we pretend to do. 123 + */ 124 + 125 + /* NOTE: clamping at RXRPC_RTO_MIN is not required, current algo 126 + * guarantees that rto is higher. 127 + */ 128 + peer->rto_j = rxrpc_bound_rto(rto); 129 + } 130 + 131 + static void rxrpc_ack_update_rtt(struct rxrpc_peer *peer, long rtt_us) 132 + { 133 + if (rtt_us < 0) 134 + return; 135 + 136 + //rxrpc_update_rtt_min(peer, rtt_us); 137 + rxrpc_rtt_estimator(peer, rtt_us); 138 + rxrpc_set_rto(peer); 139 + 140 + /* RFC6298: only reset backoff on valid RTT measurement. */ 141 + peer->backoff = 0; 142 + } 143 + 144 + /* 145 + * Add RTT information to cache. This is called in softirq mode and has 146 + * exclusive access to the peer RTT data. 147 + */ 148 + void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why, 149 + rxrpc_serial_t send_serial, rxrpc_serial_t resp_serial, 150 + ktime_t send_time, ktime_t resp_time) 151 + { 152 + struct rxrpc_peer *peer = call->peer; 153 + s64 rtt_us; 154 + 155 + rtt_us = ktime_to_us(ktime_sub(resp_time, send_time)); 156 + if (rtt_us < 0) 157 + return; 158 + 159 + spin_lock(&peer->rtt_input_lock); 160 + rxrpc_ack_update_rtt(peer, rtt_us); 161 + if (peer->rtt_count < 3) 162 + peer->rtt_count++; 163 + spin_unlock(&peer->rtt_input_lock); 164 + 165 + trace_rxrpc_rtt_rx(call, why, send_serial, resp_serial, 166 + peer->srtt_us >> 3, peer->rto_j); 167 + } 168 + 169 + /* 170 + * Get the retransmission timeout to set in jiffies, backing it off each time 171 + * we retransmit. 172 + */ 173 + unsigned long rxrpc_get_rto_backoff(struct rxrpc_peer *peer, bool retrans) 174 + { 175 + u64 timo_j; 176 + u8 backoff = READ_ONCE(peer->backoff); 177 + 178 + timo_j = peer->rto_j; 179 + timo_j <<= backoff; 180 + if (retrans && timo_j * 2 <= RXRPC_RTO_MAX) 181 + WRITE_ONCE(peer->backoff, backoff + 1); 182 + 183 + if (timo_j < 1) 184 + timo_j = 1; 185 + 186 + return timo_j; 187 + } 188 + 189 + void rxrpc_peer_init_rtt(struct rxrpc_peer *peer) 190 + { 191 + peer->rto_j = RXRPC_TIMEOUT_INIT; 192 + peer->mdev_us = jiffies_to_usecs(RXRPC_TIMEOUT_INIT); 193 + peer->backoff = 0; 194 + //minmax_reset(&peer->rtt_min, rxrpc_jiffies32, ~0U); 195 + }
+1 -2
net/rxrpc/rxkad.c
··· 1148 1148 ret = rxkad_decrypt_ticket(conn, skb, ticket, ticket_len, &session_key, 1149 1149 &expiry, _abort_code); 1150 1150 if (ret < 0) 1151 - goto temporary_error_free_resp; 1151 + goto temporary_error_free_ticket; 1152 1152 1153 1153 /* use the session key from inside the ticket to decrypt the 1154 1154 * response */ ··· 1230 1230 1231 1231 temporary_error_free_ticket: 1232 1232 kfree(ticket); 1233 - temporary_error_free_resp: 1234 1233 kfree(response); 1235 1234 temporary_error: 1236 1235 /* Ignore the response packet if we got a temporary error such as
+9 -17
net/rxrpc/sendmsg.c
··· 66 66 struct rxrpc_call *call) 67 67 { 68 68 rxrpc_seq_t tx_start, tx_win; 69 - signed long rtt2, timeout; 70 - u64 rtt; 69 + signed long rtt, timeout; 71 70 72 - rtt = READ_ONCE(call->peer->rtt); 73 - rtt2 = nsecs_to_jiffies64(rtt) * 2; 74 - if (rtt2 < 2) 75 - rtt2 = 2; 71 + rtt = READ_ONCE(call->peer->srtt_us) >> 3; 72 + rtt = usecs_to_jiffies(rtt) * 2; 73 + if (rtt < 2) 74 + rtt = 2; 76 75 77 - timeout = rtt2; 76 + timeout = rtt; 78 77 tx_start = READ_ONCE(call->tx_hard_ack); 79 78 80 79 for (;;) { ··· 91 92 return -EINTR; 92 93 93 94 if (tx_win != tx_start) { 94 - timeout = rtt2; 95 + timeout = rtt; 95 96 tx_start = tx_win; 96 97 } 97 98 ··· 270 271 _debug("need instant resend %d", ret); 271 272 rxrpc_instant_resend(call, ix); 272 273 } else { 273 - unsigned long now = jiffies, resend_at; 274 + unsigned long now = jiffies; 275 + unsigned long resend_at = now + call->peer->rto_j; 274 276 275 - if (call->peer->rtt_usage > 1) 276 - resend_at = nsecs_to_jiffies(call->peer->rtt * 3 / 2); 277 - else 278 - resend_at = rxrpc_resend_timeout; 279 - if (resend_at < 1) 280 - resend_at = 1; 281 - 282 - resend_at += now; 283 277 WRITE_ONCE(call->resend_at, resend_at); 284 278 rxrpc_reduce_call_timer(call, resend_at, now, 285 279 rxrpc_timer_set_for_send);
-9
net/rxrpc/sysctl.c
··· 71 71 .extra1 = (void *)&one_jiffy, 72 72 .extra2 = (void *)&max_jiffies, 73 73 }, 74 - { 75 - .procname = "resend_timeout", 76 - .data = &rxrpc_resend_timeout, 77 - .maxlen = sizeof(unsigned long), 78 - .mode = 0644, 79 - .proc_handler = proc_doulongvec_ms_jiffies_minmax, 80 - .extra1 = (void *)&one_jiffy, 81 - .extra2 = (void *)&max_jiffies, 82 - }, 83 74 84 75 /* Non-time values */ 85 76 {
+11 -3
net/sctp/sm_sideeffect.c
··· 1523 1523 timeout = asoc->timeouts[cmd->obj.to]; 1524 1524 BUG_ON(!timeout); 1525 1525 1526 - timer->expires = jiffies + timeout; 1527 - sctp_association_hold(asoc); 1528 - add_timer(timer); 1526 + /* 1527 + * SCTP has a hard time with timer starts. Because we process 1528 + * timer starts as side effects, it can be hard to tell if we 1529 + * have already started a timer or not, which leads to BUG 1530 + * halts when we call add_timer. So here, instead of just starting 1531 + * a timer, if the timer is already started, and just mod 1532 + * the timer with the shorter of the two expiration times 1533 + */ 1534 + if (!timer_pending(timer)) 1535 + sctp_association_hold(asoc); 1536 + timer_reduce(timer, jiffies + timeout); 1529 1537 break; 1530 1538 1531 1539 case SCTP_CMD_TIMER_RESTART:
+5 -4
net/sctp/sm_statefuns.c
··· 1856 1856 /* Update the content of current association. */ 1857 1857 sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc)); 1858 1858 sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev)); 1859 - if (sctp_state(asoc, SHUTDOWN_PENDING) && 1859 + if ((sctp_state(asoc, SHUTDOWN_PENDING) || 1860 + sctp_state(asoc, SHUTDOWN_SENT)) && 1860 1861 (sctp_sstate(asoc->base.sk, CLOSING) || 1861 1862 sock_flag(asoc->base.sk, SOCK_DEAD))) { 1862 - /* if were currently in SHUTDOWN_PENDING, but the socket 1863 - * has been closed by user, don't transition to ESTABLISHED. 1864 - * Instead trigger SHUTDOWN bundled with COOKIE_ACK. 1863 + /* If the socket has been closed by user, don't 1864 + * transition to ESTABLISHED. Instead trigger SHUTDOWN 1865 + * bundled with COOKIE_ACK. 1865 1866 */ 1866 1867 sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); 1867 1868 return sctp_sf_do_9_2_start_shutdown(net, ep, asoc,
+5 -1
net/tipc/udp_media.c
··· 161 161 struct udp_bearer *ub, struct udp_media_addr *src, 162 162 struct udp_media_addr *dst, struct dst_cache *cache) 163 163 { 164 - struct dst_entry *ndst = dst_cache_get(cache); 164 + struct dst_entry *ndst; 165 165 int ttl, err = 0; 166 166 167 + local_bh_disable(); 168 + ndst = dst_cache_get(cache); 167 169 if (dst->proto == htons(ETH_P_IP)) { 168 170 struct rtable *rt = (struct rtable *)ndst; 169 171 ··· 212 210 src->port, dst->port, false); 213 211 #endif 214 212 } 213 + local_bh_enable(); 215 214 return err; 216 215 217 216 tx_error: 217 + local_bh_enable(); 218 218 kfree_skb(skb); 219 219 return err; 220 220 }
+10 -7
net/tls/tls_sw.c
··· 780 780 781 781 static int bpf_exec_tx_verdict(struct sk_msg *msg, struct sock *sk, 782 782 bool full_record, u8 record_type, 783 - size_t *copied, int flags) 783 + ssize_t *copied, int flags) 784 784 { 785 785 struct tls_context *tls_ctx = tls_get_ctx(sk); 786 786 struct tls_sw_context_tx *ctx = tls_sw_ctx_tx(tls_ctx); ··· 796 796 psock = sk_psock_get(sk); 797 797 if (!psock || !policy) { 798 798 err = tls_push_record(sk, flags, record_type); 799 - if (err && err != -EINPROGRESS) { 799 + if (err && sk->sk_err == EBADMSG) { 800 800 *copied -= sk_msg_free(sk, msg); 801 801 tls_free_open_rec(sk); 802 + err = -sk->sk_err; 802 803 } 803 804 if (psock) 804 805 sk_psock_put(sk, psock); ··· 825 824 switch (psock->eval) { 826 825 case __SK_PASS: 827 826 err = tls_push_record(sk, flags, record_type); 828 - if (err && err != -EINPROGRESS) { 827 + if (err && sk->sk_err == EBADMSG) { 829 828 *copied -= sk_msg_free(sk, msg); 830 829 tls_free_open_rec(sk); 830 + err = -sk->sk_err; 831 831 goto out_err; 832 832 } 833 833 break; ··· 918 916 unsigned char record_type = TLS_RECORD_TYPE_DATA; 919 917 bool is_kvec = iov_iter_is_kvec(&msg->msg_iter); 920 918 bool eor = !(msg->msg_flags & MSG_MORE); 921 - size_t try_to_copy, copied = 0; 919 + size_t try_to_copy; 920 + ssize_t copied = 0; 922 921 struct sk_msg *msg_pl, *msg_en; 923 922 struct tls_rec *rec; 924 923 int required_size; ··· 1121 1118 1122 1119 release_sock(sk); 1123 1120 mutex_unlock(&tls_ctx->tx_lock); 1124 - return copied ? copied : ret; 1121 + return copied > 0 ? copied : ret; 1125 1122 } 1126 1123 1127 1124 static int tls_sw_do_sendpage(struct sock *sk, struct page *page, ··· 1135 1132 struct sk_msg *msg_pl; 1136 1133 struct tls_rec *rec; 1137 1134 int num_async = 0; 1138 - size_t copied = 0; 1135 + ssize_t copied = 0; 1139 1136 bool full_record; 1140 1137 int record_room; 1141 1138 int ret = 0; ··· 1237 1234 } 1238 1235 sendpage_end: 1239 1236 ret = sk_stream_error(sk, flags, ret); 1240 - return copied ? copied : ret; 1237 + return copied > 0 ? copied : ret; 1241 1238 } 1242 1239 1243 1240 int tls_sw_sendpage_locked(struct sock *sk, struct page *page,
+14 -2
security/security.c
··· 1965 1965 1966 1966 int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen) 1967 1967 { 1968 - return call_int_hook(secid_to_secctx, -EOPNOTSUPP, secid, secdata, 1969 - seclen); 1968 + struct security_hook_list *hp; 1969 + int rc; 1970 + 1971 + /* 1972 + * Currently, only one LSM can implement secid_to_secctx (i.e this 1973 + * LSM hook is not "stackable"). 1974 + */ 1975 + hlist_for_each_entry(hp, &security_hook_heads.secid_to_secctx, list) { 1976 + rc = hp->hook.secid_to_secctx(secid, secdata, seclen); 1977 + if (rc != LSM_RET_DEFAULT(secid_to_secctx)) 1978 + return rc; 1979 + } 1980 + 1981 + return LSM_RET_DEFAULT(secid_to_secctx); 1970 1982 } 1971 1983 EXPORT_SYMBOL(security_secid_to_secctx); 1972 1984
+12 -1
tools/testing/selftests/bpf/prog_tests/mmap.c
··· 19 19 const size_t map_sz = roundup_page(sizeof(struct map_data)); 20 20 const int zero = 0, one = 1, two = 2, far = 1500; 21 21 const long page_size = sysconf(_SC_PAGE_SIZE); 22 - int err, duration = 0, i, data_map_fd, data_map_id, tmp_fd; 22 + int err, duration = 0, i, data_map_fd, data_map_id, tmp_fd, rdmap_fd; 23 23 struct bpf_map *data_map, *bss_map; 24 24 void *bss_mmaped = NULL, *map_mmaped = NULL, *tmp1, *tmp2; 25 25 struct test_mmap__bss *bss_data; ··· 36 36 bss_map = skel->maps.bss; 37 37 data_map = skel->maps.data_map; 38 38 data_map_fd = bpf_map__fd(data_map); 39 + 40 + rdmap_fd = bpf_map__fd(skel->maps.rdonly_map); 41 + tmp1 = mmap(NULL, 4096, PROT_READ | PROT_WRITE, MAP_SHARED, rdmap_fd, 0); 42 + if (CHECK(tmp1 != MAP_FAILED, "rdonly_write_mmap", "unexpected success\n")) { 43 + munmap(tmp1, 4096); 44 + goto cleanup; 45 + } 46 + /* now double-check if it's mmap()'able at all */ 47 + tmp1 = mmap(NULL, 4096, PROT_READ, MAP_SHARED, rdmap_fd, 0); 48 + if (CHECK(tmp1 == MAP_FAILED, "rdonly_read_mmap", "failed: %d\n", errno)) 49 + goto cleanup; 39 50 40 51 /* get map's ID */ 41 52 memset(&map_info, 0, map_info_sz);
+8
tools/testing/selftests/bpf/progs/test_mmap.c
··· 9 9 10 10 struct { 11 11 __uint(type, BPF_MAP_TYPE_ARRAY); 12 + __uint(max_entries, 4096); 13 + __uint(map_flags, BPF_F_MMAPABLE | BPF_F_RDONLY_PROG); 14 + __type(key, __u32); 15 + __type(value, char); 16 + } rdonly_map SEC(".maps"); 17 + 18 + struct { 19 + __uint(type, BPF_MAP_TYPE_ARRAY); 12 20 __uint(max_entries, 512 * 4); /* at least 4 pages of data */ 13 21 __uint(map_flags, BPF_F_MMAPABLE); 14 22 __type(key, __u32);
+1 -1
tools/testing/selftests/drivers/net/mlxsw/qos_mc_aware.sh
··· 300 300 local i 301 301 302 302 for ((i = 0; i < attempts; ++i)); do 303 - if $ARPING -c 1 -I $h1 -b 192.0.2.66 -q -w 0.1; then 303 + if $ARPING -c 1 -I $h1 -b 192.0.2.66 -q -w 1; then 304 304 ((passes++)) 305 305 fi 306 306
+1 -1
tools/testing/selftests/wireguard/qemu/Makefile
··· 44 44 $(eval $(call tar_download,MUSL,musl,1.2.0,.tar.gz,https://musl.libc.org/releases/,c6de7b191139142d3f9a7b5b702c9cae1b5ee6e7f57e582da9328629408fd4e8)) 45 45 $(eval $(call tar_download,IPERF,iperf,3.7,.tar.gz,https://downloads.es.net/pub/iperf/,d846040224317caf2f75c843d309a950a7db23f9b44b94688ccbe557d6d1710c)) 46 46 $(eval $(call tar_download,BASH,bash,5.0,.tar.gz,https://ftp.gnu.org/gnu/bash/,b4a80f2ac66170b2913efbfb9f2594f1f76c7b1afd11f799e22035d63077fb4d)) 47 - $(eval $(call tar_download,IPROUTE2,iproute2,5.4.0,.tar.xz,https://www.kernel.org/pub/linux/utils/net/iproute2/,fe97aa60a0d4c5ac830be18937e18dc3400ca713a33a89ad896ff1e3d46086ae)) 47 + $(eval $(call tar_download,IPROUTE2,iproute2,5.6.0,.tar.xz,https://www.kernel.org/pub/linux/utils/net/iproute2/,1b5b0e25ce6e23da7526ea1da044e814ad85ba761b10dd29c2b027c056b04692)) 48 48 $(eval $(call tar_download,IPTABLES,iptables,1.8.4,.tar.bz2,https://www.netfilter.org/projects/iptables/files/,993a3a5490a544c2cbf2ef15cf7e7ed21af1845baf228318d5c36ef8827e157c)) 49 49 $(eval $(call tar_download,NMAP,nmap,7.80,.tar.bz2,https://nmap.org/dist/,fcfa5a0e42099e12e4bf7a68ebe6fde05553383a682e816a7ec9256ab4773faa)) 50 50 $(eval $(call tar_download,IPUTILS,iputils,s20190709,.tar.gz,https://github.com/iputils/iputils/archive/s20190709.tar.gz/#,a15720dd741d7538dd2645f9f516d193636ae4300ff7dbc8bfca757bf166490a))