Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Pull networking fixes from David Miller:
"Another week, another set of bug fixes:

1) Fix pskb_pull length in __xfrm_transport_prep(), from Xin Long.

2) Fix double xfrm_state put in esp{4,6}_gro_receive(), also from Xin
Long.

3) Re-arm discovery timer properly in mac80211 mesh code, from Linus
Lüssing.

4) Prevent buffer overflows in nf_conntrack_pptp debug code, from
Pablo Neira Ayuso.

5) Fix race in ktls code between tls_sw_recvmsg() and
tls_decrypt_done(), from Vinay Kumar Yadav.

6) Fix crashes on TCP fallback in MPTCP code, from Paolo Abeni.

7) More validation is necessary of untrusted GSO packets coming from
virtualization devices, from Willem de Bruijn.

8) Fix endianness of bnxt_en firmware message length accesses, from
Edwin Peer.

9) Fix infinite loop in sch_fq_pie, from Davide Caratti.

10) Fix lockdep splat in DSA by setting lockless TX in netdev features
for slave ports, from Vladimir Oltean.

11) Fix suspend/resume crashes in mlx5, from Mark Bloch.

12) Fix use after free in bpf fmod_ret, from Alexei Starovoitov.

13) ARP retransmit timer guard uses wrong offset, from Hongbin Liu.

14) Fix leak in inetdev_init(), from Yang Yingliang.

15) Don't try to use inet hash and unhash in l2tp code, results in
crashes. From Eric Dumazet"

* git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (77 commits)
l2tp: add sk_family checks to l2tp_validate_socket
l2tp: do not use inet_hash()/inet_unhash()
net: qrtr: Allocate workqueue before kernel_bind
mptcp: remove msk from the token container at destruction time.
mptcp: fix race between MP_JOIN and close
mptcp: fix unblocking connect()
net/sched: act_ct: add nat mangle action only for NAT-conntrack
devinet: fix memleak in inetdev_init()
virtio_vsock: Fix race condition in virtio_transport_recv_pkt
drivers/net/ibmvnic: Update VNIC protocol version reporting
NFC: st21nfca: add missed kfree_skb() in an error path
neigh: fix ARP retransmit timer guard
bpf, selftests: Add a verifier test for assigning 32bit reg states to 64bit ones
bpf, selftests: Verifier bounds tests need to be updated
bpf: Fix a verifier issue when assigning 32bit reg states to 64bit ones
bpf: Fix use-after-free in fmod_ret check
net/mlx5e: replace EINVAL in mlx5e_flower_parse_meta()
net/mlx5e: Fix MLX5_TC_CT dependencies
net/mlx5e: Properly set default values when disabling adaptive moderation
net/mlx5e: Fix arch depending casting issue in FEC
...

Changed files
+806 -337
arch
powerpc
drivers
crypto
chelsio
chtls
net
bonding
dsa
ocelot
ethernet
broadcom
freescale
dpaa
ibm
mellanox
netronome
nfp
flower
qlogic
stmicro
stmmac
usb
nfc
st21nfca
include
kernel
net
tools
testing
selftests
bpf
verifier
tc-testing
tc-tests
qdiscs
+1
arch/powerpc/Kconfig
··· 126 126 select ARCH_HAS_MMIOWB if PPC64 127 127 select ARCH_HAS_PHYS_TO_DMA 128 128 select ARCH_HAS_PMEM_API 129 + select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE 129 130 select ARCH_HAS_PTE_DEVMAP if PPC_BOOK3S_64 130 131 select ARCH_HAS_PTE_SPECIAL 131 132 select ARCH_HAS_MEMBARRIER_CALLBACKS
+1 -1
drivers/crypto/chelsio/chtls/chtls_io.c
··· 682 682 make_tx_data_wr(sk, skb, immdlen, len, 683 683 credits_needed, completion); 684 684 tp->snd_nxt += len; 685 - tp->lsndtime = tcp_time_stamp(tp); 685 + tp->lsndtime = tcp_jiffies32; 686 686 if (completion) 687 687 ULP_SKB_CB(skb)->flags &= ~ULPCB_FLAG_NEED_HDR; 688 688 } else {
+3 -1
drivers/net/bonding/bond_sysfs_slave.c
··· 149 149 150 150 err = kobject_init_and_add(&slave->kobj, &slave_ktype, 151 151 &(slave->dev->dev.kobj), "bonding_slave"); 152 - if (err) 152 + if (err) { 153 + kobject_put(&slave->kobj); 153 154 return err; 155 + } 154 156 155 157 for (a = slave_attrs; *a; ++a) { 156 158 err = sysfs_create_file(&slave->kobj, &((*a)->attr));
+6 -2
drivers/net/dsa/ocelot/felix.c
··· 102 102 const struct switchdev_obj_port_vlan *vlan) 103 103 { 104 104 struct ocelot *ocelot = ds->priv; 105 + u16 flags = vlan->flags; 105 106 u16 vid; 106 107 int err; 107 108 109 + if (dsa_is_cpu_port(ds, port)) 110 + flags &= ~BRIDGE_VLAN_INFO_UNTAGGED; 111 + 108 112 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { 109 113 err = ocelot_vlan_add(ocelot, port, vid, 110 - vlan->flags & BRIDGE_VLAN_INFO_PVID, 111 - vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED); 114 + flags & BRIDGE_VLAN_INFO_PVID, 115 + flags & BRIDGE_VLAN_INFO_UNTAGGED); 112 116 if (err) { 113 117 dev_err(ds->dev, "Failed to add VLAN %d to port %d: %d\n", 114 118 vid, port, err);
+5 -11
drivers/net/ethernet/broadcom/bnxt/bnxt.c
··· 4176 4176 int i, intr_process, rc, tmo_count; 4177 4177 struct input *req = msg; 4178 4178 u32 *data = msg; 4179 - __le32 *resp_len; 4180 4179 u8 *valid; 4181 4180 u16 cp_ring_id, len = 0; 4182 4181 struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr; 4183 4182 u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN; 4184 4183 struct hwrm_short_input short_input = {0}; 4185 4184 u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER; 4186 - u8 *resp_addr = (u8 *)bp->hwrm_cmd_resp_addr; 4187 4185 u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM; 4188 4186 u16 dst = BNXT_HWRM_CHNL_CHIMP; 4189 4187 ··· 4199 4201 bar_offset = BNXT_GRCPF_REG_KONG_COMM; 4200 4202 doorbell_offset = BNXT_GRCPF_REG_KONG_COMM_TRIGGER; 4201 4203 resp = bp->hwrm_cmd_kong_resp_addr; 4202 - resp_addr = (u8 *)bp->hwrm_cmd_kong_resp_addr; 4203 4204 } 4204 4205 4205 4206 memset(resp, 0, PAGE_SIZE); ··· 4267 4270 tmo_count = HWRM_SHORT_TIMEOUT_COUNTER; 4268 4271 timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER; 4269 4272 tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT); 4270 - resp_len = (__le32 *)(resp_addr + HWRM_RESP_LEN_OFFSET); 4271 4273 4272 4274 if (intr_process) { 4273 4275 u16 seq_id = bp->hwrm_intr_seq_id; ··· 4294 4298 le16_to_cpu(req->req_type)); 4295 4299 return -EBUSY; 4296 4300 } 4297 - len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >> 4298 - HWRM_RESP_LEN_SFT; 4299 - valid = resp_addr + len - 1; 4301 + len = le16_to_cpu(resp->resp_len); 4302 + valid = ((u8 *)resp) + len - 1; 4300 4303 } else { 4301 4304 int j; 4302 4305 ··· 4306 4311 */ 4307 4312 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) 4308 4313 return -EBUSY; 4309 - len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >> 4310 - HWRM_RESP_LEN_SFT; 4314 + len = le16_to_cpu(resp->resp_len); 4311 4315 if (len) 4312 4316 break; 4313 4317 /* on first few passes, just barely sleep */ ··· 4328 4334 } 4329 4335 4330 4336 /* Last byte of resp contains valid bit */ 4331 - valid = resp_addr + len - 1; 4337 + valid = ((u8 *)resp) + len - 1; 4332 4338 for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) { 4333 4339 /* make sure we read from updated DMA memory */ 4334 4340 dma_rmb(); ··· 9304 9310 bnxt_free_skbs(bp); 9305 9311 9306 9312 /* Save ring stats before shutdown */ 9307 - if (bp->bnapi) 9313 + if (bp->bnapi && irq_re_init) 9308 9314 bnxt_get_ring_stats(bp, &bp->net_stats_prev); 9309 9315 if (irq_re_init) { 9310 9316 bnxt_free_irq(bp);
-5
drivers/net/ethernet/broadcom/bnxt/bnxt.h
··· 656 656 #define HWRM_CMD_TIMEOUT (bp->hwrm_cmd_timeout) 657 657 #define HWRM_RESET_TIMEOUT ((HWRM_CMD_TIMEOUT) * 4) 658 658 #define HWRM_COREDUMP_TIMEOUT ((HWRM_CMD_TIMEOUT) * 12) 659 - #define HWRM_RESP_ERR_CODE_MASK 0xffff 660 - #define HWRM_RESP_LEN_OFFSET 4 661 - #define HWRM_RESP_LEN_MASK 0xffff0000 662 - #define HWRM_RESP_LEN_SFT 16 663 - #define HWRM_RESP_VALID_MASK 0xff000000 664 659 #define BNXT_HWRM_REQ_MAX_SIZE 128 665 660 #define BNXT_HWRM_REQS_PER_PAGE (BNXT_PAGE_SIZE / \ 666 661 BNXT_HWRM_REQ_MAX_SIZE)
+5 -4
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
··· 2012 2012 2013 2013 bnxt_hwrm_fw_set_time(bp); 2014 2014 2015 - if (bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE, 2016 - BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE, 2017 - &index, &item_len, NULL) != 0) { 2015 + rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE, 2016 + BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE, 2017 + &index, &item_len, NULL); 2018 + if (rc) { 2018 2019 netdev_err(dev, "PKG update area not created in nvram\n"); 2019 - return -ENOBUFS; 2020 + return rc; 2020 2021 } 2021 2022 2022 2023 rc = request_firmware(&fw, filename, &dev->dev);
+1 -1
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
··· 2914 2914 } 2915 2915 2916 2916 /* Do this here, so we can be verbose early */ 2917 - SET_NETDEV_DEV(net_dev, dev); 2917 + SET_NETDEV_DEV(net_dev, dev->parent); 2918 2918 dev_set_drvdata(dev, net_dev); 2919 2919 2920 2920 priv = netdev_priv(net_dev);
+3 -5
drivers/net/ethernet/ibm/ibmvnic.c
··· 4678 4678 dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc); 4679 4679 break; 4680 4680 } 4681 - dev_info(dev, "Partner protocol version is %d\n", 4682 - crq->version_exchange_rsp.version); 4683 - if (be16_to_cpu(crq->version_exchange_rsp.version) < 4684 - ibmvnic_version) 4685 - ibmvnic_version = 4681 + ibmvnic_version = 4686 4682 be16_to_cpu(crq->version_exchange_rsp.version); 4683 + dev_info(dev, "Partner protocol version is %d\n", 4684 + ibmvnic_version); 4687 4685 send_cap_queries(adapter); 4688 4686 break; 4689 4687 case QUERY_CAPABILITY_RSP:
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/Kconfig
··· 80 80 81 81 config MLX5_TC_CT 82 82 bool "MLX5 TC connection tracking offload support" 83 - depends on MLX5_CORE_EN && NET_SWITCHDEV && NF_FLOW_TABLE && NET_ACT_CT && NET_TC_SKB_EXT 83 + depends on MLX5_ESWITCH && NF_FLOW_TABLE && NET_ACT_CT && NET_TC_SKB_EXT 84 84 default y 85 85 help 86 86 Say Y here if you want to support offloading connection tracking rules
+6 -4
drivers/net/ethernet/mellanox/mlx5/core/en.h
··· 1068 1068 1069 1069 void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len, 1070 1070 int num_channels); 1071 - void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, 1072 - u8 cq_period_mode); 1073 - void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, 1074 - u8 cq_period_mode); 1071 + 1072 + void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode); 1073 + void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode); 1074 + void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode); 1075 + void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode); 1076 + 1075 1077 void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params); 1076 1078 void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, 1077 1079 struct mlx5e_params *params);
+13 -11
drivers/net/ethernet/mellanox/mlx5/core/en/port.c
··· 369 369 *_policy = MLX5_GET(pplm_reg, _buf, fec_override_admin_##link); \ 370 370 } while (0) 371 371 372 - #define MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(buf, policy, write, link) \ 373 - do { \ 374 - u16 *__policy = &(policy); \ 375 - bool _write = (write); \ 376 - \ 377 - if (_write && *__policy) \ 378 - *__policy = find_first_bit((u_long *)__policy, \ 379 - sizeof(u16) * BITS_PER_BYTE);\ 380 - MLX5E_FEC_OVERRIDE_ADMIN_POLICY(buf, *__policy, _write, link); \ 381 - if (!_write && *__policy) \ 382 - *__policy = 1 << *__policy; \ 372 + #define MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(buf, policy, write, link) \ 373 + do { \ 374 + unsigned long policy_long; \ 375 + u16 *__policy = &(policy); \ 376 + bool _write = (write); \ 377 + \ 378 + policy_long = *__policy; \ 379 + if (_write && *__policy) \ 380 + *__policy = find_first_bit(&policy_long, \ 381 + sizeof(policy_long) * BITS_PER_BYTE);\ 382 + MLX5E_FEC_OVERRIDE_ADMIN_POLICY(buf, *__policy, _write, link); \ 383 + if (!_write && *__policy) \ 384 + *__policy = 1 << *__policy; \ 383 385 } while (0) 384 386 385 387 /* get/set FEC admin field for a given speed */
+28 -13
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
··· 527 527 struct dim_cq_moder *rx_moder, *tx_moder; 528 528 struct mlx5_core_dev *mdev = priv->mdev; 529 529 struct mlx5e_channels new_channels = {}; 530 + bool reset_rx, reset_tx; 530 531 int err = 0; 531 - bool reset; 532 532 533 533 if (!MLX5_CAP_GEN(mdev, cq_moderation)) 534 534 return -EOPNOTSUPP; ··· 566 566 } 567 567 /* we are opened */ 568 568 569 - reset = (!!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_dim_enabled) || 570 - (!!coal->use_adaptive_tx_coalesce != priv->channels.params.tx_dim_enabled); 569 + reset_rx = !!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_dim_enabled; 570 + reset_tx = !!coal->use_adaptive_tx_coalesce != priv->channels.params.tx_dim_enabled; 571 571 572 - if (!reset) { 572 + if (!reset_rx && !reset_tx) { 573 573 mlx5e_set_priv_channels_coalesce(priv, coal); 574 574 priv->channels.params = new_channels.params; 575 575 goto out; 576 + } 577 + 578 + if (reset_rx) { 579 + u8 mode = MLX5E_GET_PFLAG(&new_channels.params, 580 + MLX5E_PFLAG_RX_CQE_BASED_MODER); 581 + 582 + mlx5e_reset_rx_moderation(&new_channels.params, mode); 583 + } 584 + if (reset_tx) { 585 + u8 mode = MLX5E_GET_PFLAG(&new_channels.params, 586 + MLX5E_PFLAG_TX_CQE_BASED_MODER); 587 + 588 + mlx5e_reset_tx_moderation(&new_channels.params, mode); 576 589 } 577 590 578 591 err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL); ··· 678 665 static int get_fec_supported_advertised(struct mlx5_core_dev *dev, 679 666 struct ethtool_link_ksettings *link_ksettings) 680 667 { 681 - u_long active_fec = 0; 668 + unsigned long active_fec_long; 669 + u32 active_fec; 682 670 u32 bitn; 683 671 int err; 684 672 685 - err = mlx5e_get_fec_mode(dev, (u32 *)&active_fec, NULL); 673 + err = mlx5e_get_fec_mode(dev, &active_fec, NULL); 686 674 if (err) 687 675 return (err == -EOPNOTSUPP) ? 0 : err; 688 676 ··· 696 682 MLX5E_ADVERTISE_SUPPORTED_FEC(MLX5E_FEC_LLRS_272_257_1, 697 683 ETHTOOL_LINK_MODE_FEC_LLRS_BIT); 698 684 685 + active_fec_long = active_fec; 699 686 /* active fec is a bit set, find out which bit is set and 700 687 * advertise the corresponding ethtool bit 701 688 */ 702 - bitn = find_first_bit(&active_fec, sizeof(u32) * BITS_PER_BYTE); 689 + bitn = find_first_bit(&active_fec_long, sizeof(active_fec_long) * BITS_PER_BYTE); 703 690 if (bitn < ARRAY_SIZE(pplm_fec_2_ethtool_linkmodes)) 704 691 __set_bit(pplm_fec_2_ethtool_linkmodes[bitn], 705 692 link_ksettings->link_modes.advertising); ··· 1532 1517 { 1533 1518 struct mlx5e_priv *priv = netdev_priv(netdev); 1534 1519 struct mlx5_core_dev *mdev = priv->mdev; 1535 - u16 fec_configured = 0; 1536 - u32 fec_active = 0; 1520 + u16 fec_configured; 1521 + u32 fec_active; 1537 1522 int err; 1538 1523 1539 1524 err = mlx5e_get_fec_mode(mdev, &fec_active, &fec_configured); ··· 1541 1526 if (err) 1542 1527 return err; 1543 1528 1544 - fecparam->active_fec = pplm2ethtool_fec((u_long)fec_active, 1545 - sizeof(u32) * BITS_PER_BYTE); 1529 + fecparam->active_fec = pplm2ethtool_fec((unsigned long)fec_active, 1530 + sizeof(unsigned long) * BITS_PER_BYTE); 1546 1531 1547 1532 if (!fecparam->active_fec) 1548 1533 return -EOPNOTSUPP; 1549 1534 1550 - fecparam->fec = pplm2ethtool_fec((u_long)fec_configured, 1551 - sizeof(u16) * BITS_PER_BYTE); 1535 + fecparam->fec = pplm2ethtool_fec((unsigned long)fec_configured, 1536 + sizeof(unsigned long) * BITS_PER_BYTE); 1552 1537 1553 1538 return 0; 1554 1539 }
+14 -6
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
··· 4716 4716 DIM_CQ_PERIOD_MODE_START_FROM_EQE; 4717 4717 } 4718 4718 4719 - void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) 4719 + void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode) 4720 4720 { 4721 4721 if (params->tx_dim_enabled) { 4722 4722 u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode); ··· 4725 4725 } else { 4726 4726 params->tx_cq_moderation = mlx5e_get_def_tx_moderation(cq_period_mode); 4727 4727 } 4728 - 4729 - MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER, 4730 - params->tx_cq_moderation.cq_period_mode == 4731 - MLX5_CQ_PERIOD_MODE_START_FROM_CQE); 4732 4728 } 4733 4729 4734 - void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) 4730 + void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode) 4735 4731 { 4736 4732 if (params->rx_dim_enabled) { 4737 4733 u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode); ··· 4736 4740 } else { 4737 4741 params->rx_cq_moderation = mlx5e_get_def_rx_moderation(cq_period_mode); 4738 4742 } 4743 + } 4739 4744 4745 + void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) 4746 + { 4747 + mlx5e_reset_tx_moderation(params, cq_period_mode); 4748 + MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER, 4749 + params->tx_cq_moderation.cq_period_mode == 4750 + MLX5_CQ_PERIOD_MODE_START_FROM_CQE); 4751 + } 4752 + 4753 + void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) 4754 + { 4755 + mlx5e_reset_rx_moderation(params, cq_period_mode); 4740 4756 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER, 4741 4757 params->rx_cq_moderation.cq_period_mode == 4742 4758 MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
+4 -8
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
··· 2068 2068 flow_rule_match_meta(rule, &match); 2069 2069 if (match.mask->ingress_ifindex != 0xFFFFFFFF) { 2070 2070 NL_SET_ERR_MSG_MOD(extack, "Unsupported ingress ifindex mask"); 2071 - return -EINVAL; 2071 + return -EOPNOTSUPP; 2072 2072 } 2073 2073 2074 2074 ingress_dev = __dev_get_by_index(dev_net(filter_dev), ··· 2076 2076 if (!ingress_dev) { 2077 2077 NL_SET_ERR_MSG_MOD(extack, 2078 2078 "Can't find the ingress port to match on"); 2079 - return -EINVAL; 2079 + return -ENOENT; 2080 2080 } 2081 2081 2082 2082 if (ingress_dev != filter_dev) { 2083 2083 NL_SET_ERR_MSG_MOD(extack, 2084 2084 "Can't match on the ingress filter port"); 2085 - return -EINVAL; 2085 + return -EOPNOTSUPP; 2086 2086 } 2087 2087 2088 2088 return 0; ··· 3849 3849 if (!mlx5e_is_valid_eswitch_fwd_dev(priv, out_dev)) { 3850 3850 NL_SET_ERR_MSG_MOD(extack, 3851 3851 "devices are not on same switch HW, can't offload forwarding"); 3852 - netdev_warn(priv->netdev, 3853 - "devices %s %s not on same switch HW, can't offload forwarding\n", 3854 - priv->netdev->name, 3855 - out_dev->name); 3856 3852 return -EOPNOTSUPP; 3857 3853 } 3858 3854 ··· 4610 4614 dpkts = cur_stats.rx_packets - rpriv->prev_vf_vport_stats.rx_packets; 4611 4615 dbytes = cur_stats.rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes; 4612 4616 rpriv->prev_vf_vport_stats = cur_stats; 4613 - flow_stats_update(&ma->stats, dpkts, dbytes, jiffies, 4617 + flow_stats_update(&ma->stats, dbytes, dpkts, jiffies, 4614 4618 FLOW_ACTION_HW_STATS_DELAYED); 4615 4619 } 4616 4620
+18
drivers/net/ethernet/mellanox/mlx5/core/main.c
··· 1549 1549 mlx5_pci_disable_device(dev); 1550 1550 } 1551 1551 1552 + static int mlx5_suspend(struct pci_dev *pdev, pm_message_t state) 1553 + { 1554 + struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 1555 + 1556 + mlx5_unload_one(dev, false); 1557 + 1558 + return 0; 1559 + } 1560 + 1561 + static int mlx5_resume(struct pci_dev *pdev) 1562 + { 1563 + struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 1564 + 1565 + return mlx5_load_one(dev, false); 1566 + } 1567 + 1552 1568 static const struct pci_device_id mlx5_core_pci_table[] = { 1553 1569 { PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_CONNECTIB) }, 1554 1570 { PCI_VDEVICE(MELLANOX, 0x1012), MLX5_PCI_DEV_IS_VF}, /* Connect-IB VF */ ··· 1608 1592 .id_table = mlx5_core_pci_table, 1609 1593 .probe = init_one, 1610 1594 .remove = remove_one, 1595 + .suspend = mlx5_suspend, 1596 + .resume = mlx5_resume, 1611 1597 .shutdown = shutdown, 1612 1598 .err_handler = &mlx5_err_handler, 1613 1599 .sriov_configure = mlx5_core_sriov_configure,
+2 -1
drivers/net/ethernet/netronome/nfp/flower/offload.c
··· 1440 1440 ctx_id = be32_to_cpu(sub_flow->meta.host_ctx_id); 1441 1441 priv->stats[ctx_id].pkts += pkts; 1442 1442 priv->stats[ctx_id].bytes += bytes; 1443 - max_t(u64, priv->stats[ctx_id].used, used); 1443 + priv->stats[ctx_id].used = max_t(u64, used, 1444 + priv->stats[ctx_id].used); 1444 1445 } 1445 1446 } 1446 1447
+3 -1
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
··· 3651 3651 ahw->diag_cnt = 0; 3652 3652 ret = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INTRPT_TEST); 3653 3653 if (ret) 3654 - goto fail_diag_irq; 3654 + goto fail_mbx_args; 3655 3655 3656 3656 if (adapter->flags & QLCNIC_MSIX_ENABLED) 3657 3657 intrpt_id = ahw->intr_tbl[0].id; ··· 3681 3681 3682 3682 done: 3683 3683 qlcnic_free_mbx_args(&cmd); 3684 + 3685 + fail_mbx_args: 3684 3686 qlcnic_83xx_diag_free_res(netdev, drv_sds_rings); 3685 3687 3686 3688 fail_diag_irq:
+2 -1
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
··· 630 630 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 631 631 ptp_v2 = PTP_TCR_TSVER2ENA; 632 632 snap_type_sel = PTP_TCR_SNAPTYPSEL_1; 633 - ts_event_en = PTP_TCR_TSEVNTENA; 633 + if (priv->synopsys_id != DWMAC_CORE_5_10) 634 + ts_event_en = PTP_TCR_TSEVNTENA; 634 635 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 635 636 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 636 637 ptp_over_ethernet = PTP_TCR_TSIPENA;
+1
drivers/net/usb/qmi_wwan.c
··· 1324 1324 {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */ 1325 1325 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ 1326 1326 {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ 1327 + {QMI_QUIRK_SET_DTR(0x1bc7, 0x1031, 3)}, /* Telit LE910C1-EUX */ 1327 1328 {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */ 1328 1329 {QMI_QUIRK_SET_DTR(0x1bc7, 0x1050, 2)}, /* Telit FN980 */ 1329 1330 {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */
+3 -1
drivers/nfc/st21nfca/dep.c
··· 173 173 memcpy(atr_res->gbi, atr_req->gbi, gb_len); 174 174 r = nfc_set_remote_general_bytes(hdev->ndev, atr_res->gbi, 175 175 gb_len); 176 - if (r < 0) 176 + if (r < 0) { 177 + kfree_skb(skb); 177 178 return r; 179 + } 178 180 } 179 181 180 182 info->dep_info.curr_nfc_dep_pni = 0;
+1 -1
include/linux/ieee80211.h
··· 2047 2047 } 2048 2048 2049 2049 /* HE Operation defines */ 2050 - #define IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK 0x00000003 2050 + #define IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK 0x00000007 2051 2051 #define IEEE80211_HE_OPERATION_TWT_REQUIRED 0x00000008 2052 2052 #define IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK 0x00003ff0 2053 2053 #define IEEE80211_HE_OPERATION_RTS_THRESHOLD_OFFSET 4
+1 -1
include/linux/netfilter/nf_conntrack_pptp.h
··· 10 10 #include <net/netfilter/nf_conntrack_expect.h> 11 11 #include <uapi/linux/netfilter/nf_conntrack_tuple_common.h> 12 12 13 - extern const char *const pptp_msg_name[]; 13 + const char *pptp_msg_name(u_int16_t msg); 14 14 15 15 /* state of the control session */ 16 16 enum pptp_ctrlsess_state {
+18 -7
include/linux/virtio_net.h
··· 31 31 { 32 32 unsigned int gso_type = 0; 33 33 unsigned int thlen = 0; 34 + unsigned int p_off = 0; 34 35 unsigned int ip_proto; 35 36 36 37 if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { ··· 69 68 if (!skb_partial_csum_set(skb, start, off)) 70 69 return -EINVAL; 71 70 72 - if (skb_transport_offset(skb) + thlen > skb_headlen(skb)) 71 + p_off = skb_transport_offset(skb) + thlen; 72 + if (p_off > skb_headlen(skb)) 73 73 return -EINVAL; 74 74 } else { 75 75 /* gso packets without NEEDS_CSUM do not set transport_offset. ··· 94 92 return -EINVAL; 95 93 } 96 94 97 - if (keys.control.thoff + thlen > skb_headlen(skb) || 95 + p_off = keys.control.thoff + thlen; 96 + if (p_off > skb_headlen(skb) || 98 97 keys.basic.ip_proto != ip_proto) 99 98 return -EINVAL; 100 99 101 100 skb_set_transport_header(skb, keys.control.thoff); 101 + } else if (gso_type) { 102 + p_off = thlen; 103 + if (p_off > skb_headlen(skb)) 104 + return -EINVAL; 102 105 } 103 106 } 104 107 105 108 if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { 106 109 u16 gso_size = __virtio16_to_cpu(little_endian, hdr->gso_size); 110 + struct skb_shared_info *shinfo = skb_shinfo(skb); 107 111 108 - skb_shinfo(skb)->gso_size = gso_size; 109 - skb_shinfo(skb)->gso_type = gso_type; 112 + /* Too small packets are not really GSO ones. */ 113 + if (skb->len - p_off > gso_size) { 114 + shinfo->gso_size = gso_size; 115 + shinfo->gso_type = gso_type; 110 116 111 - /* Header must be checked, and gso_segs computed. */ 112 - skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; 113 - skb_shinfo(skb)->gso_segs = 0; 117 + /* Header must be checked, and gso_segs computed. */ 118 + shinfo->gso_type |= SKB_GSO_DODGY; 119 + shinfo->gso_segs = 0; 120 + } 114 121 } 115 122 116 123 return 0;
+1
include/net/espintcp.h
··· 25 25 struct espintcp_msg partial; 26 26 void (*saved_data_ready)(struct sock *sk); 27 27 void (*saved_write_space)(struct sock *sk); 28 + void (*saved_destruct)(struct sock *sk); 28 29 struct work_struct work; 29 30 bool tx_running; 30 31 };
+12
include/net/ip_fib.h
··· 447 447 #endif 448 448 int fib_unmerge(struct net *net); 449 449 450 + static inline bool nhc_l3mdev_matches_dev(const struct fib_nh_common *nhc, 451 + const struct net_device *dev) 452 + { 453 + if (nhc->nhc_dev == dev || 454 + l3mdev_master_ifindex_rcu(nhc->nhc_dev) == dev->ifindex) 455 + return true; 456 + 457 + return false; 458 + } 459 + 450 460 /* Exported by fib_semantics.c */ 451 461 int ip_fib_check_default(__be32 gw, struct net_device *dev); 452 462 int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force); ··· 489 479 void fib_alias_hw_flags_set(struct net *net, const struct fib_rt_info *fri); 490 480 void fib_trie_init(void); 491 481 struct fib_table *fib_trie_table(u32 id, struct fib_table *alias); 482 + bool fib_lookup_good_nhc(const struct fib_nh_common *nhc, int fib_flags, 483 + const struct flowi4 *flp); 492 484 493 485 static inline void fib_combine_itag(u32 *itag, const struct fib_result *res) 494 486 {
+84 -16
include/net/nexthop.h
··· 70 70 }; 71 71 72 72 struct nh_group { 73 + struct nh_group *spare; /* spare group for removals */ 73 74 u16 num_nh; 74 75 bool mpath; 75 76 bool has_v4; ··· 137 136 { 138 137 unsigned int rc = 1; 139 138 140 - if (nexthop_is_multipath(nh)) { 139 + if (nh->is_group) { 141 140 struct nh_group *nh_grp; 142 141 143 142 nh_grp = rcu_dereference_rtnl(nh->nh_grp); 144 - rc = nh_grp->num_nh; 143 + if (nh_grp->mpath) 144 + rc = nh_grp->num_nh; 145 145 } 146 146 147 147 return rc; 148 148 } 149 149 150 150 static inline 151 - struct nexthop *nexthop_mpath_select(const struct nexthop *nh, int nhsel) 151 + struct nexthop *nexthop_mpath_select(const struct nh_group *nhg, int nhsel) 152 152 { 153 - const struct nh_group *nhg = rcu_dereference_rtnl(nh->nh_grp); 154 - 155 153 /* for_nexthops macros in fib_semantics.c grabs a pointer to 156 154 * the nexthop before checking nhsel 157 155 */ ··· 185 185 { 186 186 const struct nh_info *nhi; 187 187 188 - if (nexthop_is_multipath(nh)) { 189 - if (nexthop_num_path(nh) > 1) 188 + if (nh->is_group) { 189 + struct nh_group *nh_grp; 190 + 191 + nh_grp = rcu_dereference_rtnl(nh->nh_grp); 192 + if (nh_grp->num_nh > 1) 190 193 return false; 191 - nh = nexthop_mpath_select(nh, 0); 192 - if (!nh) 193 - return false; 194 + 195 + nh = nh_grp->nh_entries[0].nh; 194 196 } 195 197 196 198 nhi = rcu_dereference_rtnl(nh->nh_info); ··· 218 216 BUILD_BUG_ON(offsetof(struct fib_nh, nh_common) != 0); 219 217 BUILD_BUG_ON(offsetof(struct fib6_nh, nh_common) != 0); 220 218 221 - if (nexthop_is_multipath(nh)) { 222 - nh = nexthop_mpath_select(nh, nhsel); 223 - if (!nh) 224 - return NULL; 219 + if (nh->is_group) { 220 + struct nh_group *nh_grp; 221 + 222 + nh_grp = rcu_dereference_rtnl(nh->nh_grp); 223 + if (nh_grp->mpath) { 224 + nh = nexthop_mpath_select(nh_grp, nhsel); 225 + if (!nh) 226 + return NULL; 227 + } 225 228 } 226 229 227 230 nhi = rcu_dereference_rtnl(nh->nh_info); 228 231 return &nhi->fib_nhc; 232 + } 233 + 234 + /* called from fib_table_lookup with rcu_lock */ 235 + static inline 236 + struct fib_nh_common *nexthop_get_nhc_lookup(const struct nexthop *nh, 237 + int fib_flags, 238 + const struct flowi4 *flp, 239 + int *nhsel) 240 + { 241 + struct nh_info *nhi; 242 + 243 + if (nh->is_group) { 244 + struct nh_group *nhg = rcu_dereference(nh->nh_grp); 245 + int i; 246 + 247 + for (i = 0; i < nhg->num_nh; i++) { 248 + struct nexthop *nhe = nhg->nh_entries[i].nh; 249 + 250 + nhi = rcu_dereference(nhe->nh_info); 251 + if (fib_lookup_good_nhc(&nhi->fib_nhc, fib_flags, flp)) { 252 + *nhsel = i; 253 + return &nhi->fib_nhc; 254 + } 255 + } 256 + } else { 257 + nhi = rcu_dereference(nh->nh_info); 258 + if (fib_lookup_good_nhc(&nhi->fib_nhc, fib_flags, flp)) { 259 + *nhsel = 0; 260 + return &nhi->fib_nhc; 261 + } 262 + } 263 + 264 + return NULL; 265 + } 266 + 267 + static inline bool nexthop_uses_dev(const struct nexthop *nh, 268 + const struct net_device *dev) 269 + { 270 + struct nh_info *nhi; 271 + 272 + if (nh->is_group) { 273 + struct nh_group *nhg = rcu_dereference(nh->nh_grp); 274 + int i; 275 + 276 + for (i = 0; i < nhg->num_nh; i++) { 277 + struct nexthop *nhe = nhg->nh_entries[i].nh; 278 + 279 + nhi = rcu_dereference(nhe->nh_info); 280 + if (nhc_l3mdev_matches_dev(&nhi->fib_nhc, dev)) 281 + return true; 282 + } 283 + } else { 284 + nhi = rcu_dereference(nh->nh_info); 285 + if (nhc_l3mdev_matches_dev(&nhi->fib_nhc, dev)) 286 + return true; 287 + } 288 + 289 + return false; 229 290 } 230 291 231 292 static inline unsigned int fib_info_num_path(const struct fib_info *fi) ··· 328 263 { 329 264 struct nh_info *nhi; 330 265 331 - if (nexthop_is_multipath(nh)) { 332 - nh = nexthop_mpath_select(nh, 0); 266 + if (nh->is_group) { 267 + struct nh_group *nh_grp; 268 + 269 + nh_grp = rcu_dereference_rtnl(nh->nh_grp); 270 + nh = nexthop_mpath_select(nh_grp, 0); 333 271 if (!nh) 334 272 return NULL; 335 273 }
+4
include/net/tls.h
··· 135 135 struct tls_rec *open_rec; 136 136 struct list_head tx_list; 137 137 atomic_t encrypt_pending; 138 + /* protect crypto_wait with encrypt_pending */ 139 + spinlock_t encrypt_compl_lock; 138 140 int async_notify; 139 141 u8 async_capable:1; 140 142 ··· 157 155 u8 async_capable:1; 158 156 u8 decrypted:1; 159 157 atomic_t decrypt_pending; 158 + /* protect crypto_wait with decrypt_pending*/ 159 + spinlock_t decrypt_compl_lock; 160 160 bool async_notify; 161 161 }; 162 162
+1 -1
include/uapi/linux/xfrm.h
··· 304 304 XFRMA_PROTO, /* __u8 */ 305 305 XFRMA_ADDRESS_FILTER, /* struct xfrm_address_filter */ 306 306 XFRMA_PAD, 307 - XFRMA_OFFLOAD_DEV, /* struct xfrm_state_offload */ 307 + XFRMA_OFFLOAD_DEV, /* struct xfrm_user_offload */ 308 308 XFRMA_SET_MARK, /* __u32 */ 309 309 XFRMA_SET_MARK_MASK, /* __u32 */ 310 310 XFRMA_IF_ID, /* __u32 */
+16 -18
kernel/bpf/verifier.c
··· 1168 1168 * but must be positive otherwise set to worse case bounds 1169 1169 * and refine later from tnum. 1170 1170 */ 1171 - if (reg->s32_min_value > 0) 1172 - reg->smin_value = reg->s32_min_value; 1173 - else 1174 - reg->smin_value = 0; 1175 - if (reg->s32_max_value > 0) 1171 + if (reg->s32_min_value >= 0 && reg->s32_max_value >= 0) 1176 1172 reg->smax_value = reg->s32_max_value; 1177 1173 else 1178 1174 reg->smax_value = U32_MAX; 1175 + if (reg->s32_min_value >= 0) 1176 + reg->smin_value = reg->s32_min_value; 1177 + else 1178 + reg->smin_value = 0; 1179 1179 } 1180 1180 1181 1181 static void __reg_combine_32_into_64(struct bpf_reg_state *reg) ··· 10428 10428 } 10429 10429 #define SECURITY_PREFIX "security_" 10430 10430 10431 - static int check_attach_modify_return(struct bpf_verifier_env *env) 10431 + static int check_attach_modify_return(struct bpf_prog *prog, unsigned long addr) 10432 10432 { 10433 - struct bpf_prog *prog = env->prog; 10434 - unsigned long addr = (unsigned long) prog->aux->trampoline->func.addr; 10435 - 10436 - /* This is expected to be cleaned up in the future with the KRSI effort 10437 - * introducing the LSM_HOOK macro for cleaning up lsm_hooks.h. 10438 - */ 10439 10433 if (within_error_injection_list(addr) || 10440 10434 !strncmp(SECURITY_PREFIX, prog->aux->attach_func_name, 10441 10435 sizeof(SECURITY_PREFIX) - 1)) 10442 10436 return 0; 10443 - 10444 - verbose(env, "fmod_ret attach_btf_id %u (%s) is not modifiable\n", 10445 - prog->aux->attach_btf_id, prog->aux->attach_func_name); 10446 10437 10447 10438 return -EINVAL; 10448 10439 } ··· 10645 10654 goto out; 10646 10655 } 10647 10656 } 10657 + 10658 + if (prog->expected_attach_type == BPF_MODIFY_RETURN) { 10659 + ret = check_attach_modify_return(prog, addr); 10660 + if (ret) 10661 + verbose(env, "%s() is not modifiable\n", 10662 + prog->aux->attach_func_name); 10663 + } 10664 + 10665 + if (ret) 10666 + goto out; 10648 10667 tr->func.addr = (void *)addr; 10649 10668 prog->aux->trampoline = tr; 10650 - 10651 - if (prog->expected_attach_type == BPF_MODIFY_RETURN) 10652 - ret = check_attach_modify_return(env); 10653 10669 out: 10654 10670 mutex_unlock(&tr->mutex); 10655 10671 if (ret)
+2 -1
net/bridge/br_multicast.c
··· 2413 2413 free_percpu(br->mcast_stats); 2414 2414 } 2415 2415 2416 - static void mcast_stats_add_dir(u64 *dst, u64 *src) 2416 + /* noinline for https://bugs.llvm.org/show_bug.cgi?id=45802#c9 */ 2417 + static noinline_for_stack void mcast_stats_add_dir(u64 *dst, u64 *src) 2417 2418 { 2418 2419 dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX]; 2419 2420 dst[BR_MCAST_DIR_TX] += src[BR_MCAST_DIR_TX];
+6
net/bridge/netfilter/nft_reject_bridge.c
··· 31 31 ether_addr_copy(eth->h_dest, eth_hdr(oldskb)->h_source); 32 32 eth->h_proto = eth_hdr(oldskb)->h_proto; 33 33 skb_pull(nskb, ETH_HLEN); 34 + 35 + if (skb_vlan_tag_present(oldskb)) { 36 + u16 vid = skb_vlan_tag_get(oldskb); 37 + 38 + __vlan_hwaccel_put_tag(nskb, oldskb->vlan_proto, vid); 39 + } 34 40 } 35 41 36 42 static int nft_bridge_iphdr_validate(struct sk_buff *skb)
+2 -2
net/core/neighbour.c
··· 1082 1082 } 1083 1083 1084 1084 if (neigh->nud_state & NUD_IN_TIMER) { 1085 - if (time_before(next, jiffies + HZ/2)) 1086 - next = jiffies + HZ/2; 1085 + if (time_before(next, jiffies + HZ/100)) 1086 + next = jiffies + HZ/100; 1087 1087 if (!mod_timer(&neigh->timer, next)) 1088 1088 neigh_hold(neigh); 1089 1089 }
+1
net/dsa/slave.c
··· 1736 1736 if (ds->ops->port_vlan_add && ds->ops->port_vlan_del) 1737 1737 slave_dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 1738 1738 slave_dev->hw_features |= NETIF_F_HW_TC; 1739 + slave_dev->features |= NETIF_F_LLTX; 1739 1740 slave_dev->ethtool_ops = &dsa_slave_ethtool_ops; 1740 1741 if (!IS_ERR_OR_NULL(port->mac)) 1741 1742 ether_addr_copy(slave_dev->dev_addr, port->mac);
+1
net/ipv4/devinet.c
··· 276 276 err = devinet_sysctl_register(in_dev); 277 277 if (err) { 278 278 in_dev->dead = 1; 279 + neigh_parms_release(&arp_tbl, in_dev->arp_parms); 279 280 in_dev_put(in_dev); 280 281 in_dev = NULL; 281 282 goto out;
+18 -12
net/ipv4/esp4_offload.c
··· 63 63 sp->olen++; 64 64 65 65 xo = xfrm_offload(skb); 66 - if (!xo) { 67 - xfrm_state_put(x); 66 + if (!xo) 68 67 goto out_reset; 69 - } 70 68 } 71 69 72 70 xo->flags |= XFRM_GRO; ··· 137 139 struct xfrm_offload *xo = xfrm_offload(skb); 138 140 struct sk_buff *segs = ERR_PTR(-EINVAL); 139 141 const struct net_offload *ops; 140 - int proto = xo->proto; 142 + u8 proto = xo->proto; 141 143 142 144 skb->transport_header += x->props.header_len; 143 145 144 - if (proto == IPPROTO_BEETPH) { 145 - struct ip_beet_phdr *ph = (struct ip_beet_phdr *)skb->data; 146 + if (x->sel.family != AF_INET6) { 147 + if (proto == IPPROTO_BEETPH) { 148 + struct ip_beet_phdr *ph = 149 + (struct ip_beet_phdr *)skb->data; 146 150 147 - skb->transport_header += ph->hdrlen * 8; 148 - proto = ph->nexthdr; 149 - } else if (x->sel.family != AF_INET6) { 150 - skb->transport_header -= IPV4_BEET_PHMAXLEN; 151 - } else if (proto == IPPROTO_TCP) { 152 - skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4; 151 + skb->transport_header += ph->hdrlen * 8; 152 + proto = ph->nexthdr; 153 + } else { 154 + skb->transport_header -= IPV4_BEET_PHMAXLEN; 155 + } 156 + } else { 157 + __be16 frag; 158 + 159 + skb->transport_header += 160 + ipv6_skip_exthdr(skb, 0, &proto, &frag); 161 + if (proto == IPPROTO_TCP) 162 + skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4; 153 163 } 154 164 155 165 __skb_pull(skb, skb_transport_offset(skb));
+10 -9
net/ipv4/fib_frontend.c
··· 309 309 { 310 310 bool dev_match = false; 311 311 #ifdef CONFIG_IP_ROUTE_MULTIPATH 312 - int ret; 312 + if (unlikely(fi->nh)) { 313 + dev_match = nexthop_uses_dev(fi->nh, dev); 314 + } else { 315 + int ret; 313 316 314 - for (ret = 0; ret < fib_info_num_path(fi); ret++) { 315 - const struct fib_nh_common *nhc = fib_info_nhc(fi, ret); 317 + for (ret = 0; ret < fib_info_num_path(fi); ret++) { 318 + const struct fib_nh_common *nhc = fib_info_nhc(fi, ret); 316 319 317 - if (nhc->nhc_dev == dev) { 318 - dev_match = true; 319 - break; 320 - } else if (l3mdev_master_ifindex_rcu(nhc->nhc_dev) == dev->ifindex) { 321 - dev_match = true; 322 - break; 320 + if (nhc_l3mdev_matches_dev(nhc, dev)) { 321 + dev_match = true; 322 + break; 323 + } 323 324 } 324 325 } 325 326 #else
+36 -15
net/ipv4/fib_trie.c
··· 1371 1371 return (key ^ prefix) & (prefix | -prefix); 1372 1372 } 1373 1373 1374 + bool fib_lookup_good_nhc(const struct fib_nh_common *nhc, int fib_flags, 1375 + const struct flowi4 *flp) 1376 + { 1377 + if (nhc->nhc_flags & RTNH_F_DEAD) 1378 + return false; 1379 + 1380 + if (ip_ignore_linkdown(nhc->nhc_dev) && 1381 + nhc->nhc_flags & RTNH_F_LINKDOWN && 1382 + !(fib_flags & FIB_LOOKUP_IGNORE_LINKSTATE)) 1383 + return false; 1384 + 1385 + if (!(flp->flowi4_flags & FLOWI_FLAG_SKIP_NH_OIF)) { 1386 + if (flp->flowi4_oif && 1387 + flp->flowi4_oif != nhc->nhc_oif) 1388 + return false; 1389 + } 1390 + 1391 + return true; 1392 + } 1393 + 1374 1394 /* should be called with rcu_read_lock */ 1375 1395 int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp, 1376 1396 struct fib_result *res, int fib_flags) ··· 1523 1503 /* Step 3: Process the leaf, if that fails fall back to backtracing */ 1524 1504 hlist_for_each_entry_rcu(fa, &n->leaf, fa_list) { 1525 1505 struct fib_info *fi = fa->fa_info; 1506 + struct fib_nh_common *nhc; 1526 1507 int nhsel, err; 1527 1508 1528 1509 if ((BITS_PER_LONG > KEYLENGTH) || (fa->fa_slen < KEYLENGTH)) { ··· 1549 1528 if (fi->fib_flags & RTNH_F_DEAD) 1550 1529 continue; 1551 1530 1552 - if (unlikely(fi->nh && nexthop_is_blackhole(fi->nh))) { 1553 - err = fib_props[RTN_BLACKHOLE].error; 1554 - goto out_reject; 1531 + if (unlikely(fi->nh)) { 1532 + if (nexthop_is_blackhole(fi->nh)) { 1533 + err = fib_props[RTN_BLACKHOLE].error; 1534 + goto out_reject; 1535 + } 1536 + 1537 + nhc = nexthop_get_nhc_lookup(fi->nh, fib_flags, flp, 1538 + &nhsel); 1539 + if (nhc) 1540 + goto set_result; 1541 + goto miss; 1555 1542 } 1556 1543 1557 1544 for (nhsel = 0; nhsel < fib_info_num_path(fi); nhsel++) { 1558 - struct fib_nh_common *nhc = fib_info_nhc(fi, nhsel); 1545 + nhc = fib_info_nhc(fi, nhsel); 1559 1546 1560 - if (nhc->nhc_flags & RTNH_F_DEAD) 1547 + if (!fib_lookup_good_nhc(nhc, fib_flags, flp)) 1561 1548 continue; 1562 - if (ip_ignore_linkdown(nhc->nhc_dev) && 1563 - nhc->nhc_flags & RTNH_F_LINKDOWN && 1564 - !(fib_flags & FIB_LOOKUP_IGNORE_LINKSTATE)) 1565 - continue; 1566 - if (!(flp->flowi4_flags & FLOWI_FLAG_SKIP_NH_OIF)) { 1567 - if (flp->flowi4_oif && 1568 - flp->flowi4_oif != nhc->nhc_oif) 1569 - continue; 1570 - } 1571 - 1549 + set_result: 1572 1550 if (!(fib_flags & FIB_LOOKUP_NOREF)) 1573 1551 refcount_inc(&fi->fib_clntref); 1574 1552 ··· 1588 1568 return err; 1589 1569 } 1590 1570 } 1571 + miss: 1591 1572 #ifdef CONFIG_IP_FIB_TRIE_STATS 1592 1573 this_cpu_inc(stats->semantic_match_miss); 1593 1574 #endif
+22 -1
net/ipv4/ip_vti.c
··· 93 93 94 94 static int vti_rcv_tunnel(struct sk_buff *skb) 95 95 { 96 - return vti_rcv(skb, ip_hdr(skb)->saddr, true); 96 + struct ip_tunnel_net *itn = net_generic(dev_net(skb->dev), vti_net_id); 97 + const struct iphdr *iph = ip_hdr(skb); 98 + struct ip_tunnel *tunnel; 99 + 100 + tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY, 101 + iph->saddr, iph->daddr, 0); 102 + if (tunnel) { 103 + struct tnl_ptk_info tpi = { 104 + .proto = htons(ETH_P_IP), 105 + }; 106 + 107 + if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) 108 + goto drop; 109 + if (iptunnel_pull_header(skb, 0, tpi.proto, false)) 110 + goto drop; 111 + return ip_tunnel_rcv(tunnel, skb, &tpi, NULL, false); 112 + } 113 + 114 + return -EINVAL; 115 + drop: 116 + kfree_skb(skb); 117 + return 0; 97 118 } 98 119 99 120 static int vti_rcv_cb(struct sk_buff *skb, int err)
+2 -5
net/ipv4/netfilter/nf_nat_pptp.c
··· 166 166 break; 167 167 default: 168 168 pr_debug("unknown outbound packet 0x%04x:%s\n", msg, 169 - msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] : 170 - pptp_msg_name[0]); 169 + pptp_msg_name(msg)); 171 170 fallthrough; 172 171 case PPTP_SET_LINK_INFO: 173 172 /* only need to NAT in case PAC is behind NAT box */ ··· 267 268 pcid_off = offsetof(union pptp_ctrl_union, setlink.peersCallID); 268 269 break; 269 270 default: 270 - pr_debug("unknown inbound packet %s\n", 271 - msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] : 272 - pptp_msg_name[0]); 271 + pr_debug("unknown inbound packet %s\n", pptp_msg_name(msg)); 273 272 fallthrough; 274 273 case PPTP_START_SESSION_REQUEST: 275 274 case PPTP_START_SESSION_REPLY:
+63 -39
net/ipv4/nexthop.c
··· 63 63 int i; 64 64 65 65 nhg = rcu_dereference_raw(nh->nh_grp); 66 - for (i = 0; i < nhg->num_nh; ++i) 67 - WARN_ON(nhg->nh_entries[i].nh); 66 + for (i = 0; i < nhg->num_nh; ++i) { 67 + struct nh_grp_entry *nhge = &nhg->nh_entries[i]; 68 68 69 + WARN_ON(!list_empty(&nhge->nh_list)); 70 + nexthop_put(nhge->nh); 71 + } 72 + 73 + WARN_ON(nhg->spare == nhg); 74 + 75 + kfree(nhg->spare); 69 76 kfree(nhg); 70 77 } 71 78 ··· 701 694 } 702 695 } 703 696 704 - static void remove_nh_grp_entry(struct nh_grp_entry *nhge, 705 - struct nh_group *nhg, 697 + static void remove_nh_grp_entry(struct net *net, struct nh_grp_entry *nhge, 706 698 struct nl_info *nlinfo) 707 699 { 700 + struct nh_grp_entry *nhges, *new_nhges; 701 + struct nexthop *nhp = nhge->nh_parent; 708 702 struct nexthop *nh = nhge->nh; 709 - struct nh_grp_entry *nhges; 710 - bool found = false; 711 - int i; 703 + struct nh_group *nhg, *newg; 704 + int i, j; 712 705 713 706 WARN_ON(!nh); 714 707 715 - nhges = nhg->nh_entries; 716 - for (i = 0; i < nhg->num_nh; ++i) { 717 - if (found) { 718 - nhges[i-1].nh = nhges[i].nh; 719 - nhges[i-1].weight = nhges[i].weight; 720 - list_del(&nhges[i].nh_list); 721 - list_add(&nhges[i-1].nh_list, &nhges[i-1].nh->grp_list); 722 - } else if (nhg->nh_entries[i].nh == nh) { 723 - found = true; 724 - } 708 + nhg = rtnl_dereference(nhp->nh_grp); 709 + newg = nhg->spare; 710 + 711 + /* last entry, keep it visible and remove the parent */ 712 + if (nhg->num_nh == 1) { 713 + remove_nexthop(net, nhp, nlinfo); 714 + return; 725 715 } 726 716 727 - if (WARN_ON(!found)) 728 - return; 717 + newg->has_v4 = nhg->has_v4; 718 + newg->mpath = nhg->mpath; 719 + newg->num_nh = nhg->num_nh; 729 720 730 - nhg->num_nh--; 731 - nhg->nh_entries[nhg->num_nh].nh = NULL; 721 + /* copy old entries to new except the one getting removed */ 722 + nhges = nhg->nh_entries; 723 + new_nhges = newg->nh_entries; 724 + for (i = 0, j = 0; i < nhg->num_nh; ++i) { 725 + /* current nexthop getting removed */ 726 + if (nhg->nh_entries[i].nh == nh) { 727 + newg->num_nh--; 728 + continue; 729 + } 732 730 733 - nh_group_rebalance(nhg); 731 + list_del(&nhges[i].nh_list); 732 + new_nhges[j].nh_parent = nhges[i].nh_parent; 733 + new_nhges[j].nh = nhges[i].nh; 734 + new_nhges[j].weight = nhges[i].weight; 735 + list_add(&new_nhges[j].nh_list, &new_nhges[j].nh->grp_list); 736 + j++; 737 + } 734 738 735 - nexthop_put(nh); 739 + nh_group_rebalance(newg); 740 + rcu_assign_pointer(nhp->nh_grp, newg); 741 + 742 + list_del(&nhge->nh_list); 743 + nexthop_put(nhge->nh); 736 744 737 745 if (nlinfo) 738 - nexthop_notify(RTM_NEWNEXTHOP, nhge->nh_parent, nlinfo); 746 + nexthop_notify(RTM_NEWNEXTHOP, nhp, nlinfo); 739 747 } 740 748 741 749 static void remove_nexthop_from_groups(struct net *net, struct nexthop *nh, ··· 758 736 { 759 737 struct nh_grp_entry *nhge, *tmp; 760 738 761 - list_for_each_entry_safe(nhge, tmp, &nh->grp_list, nh_list) { 762 - struct nh_group *nhg; 739 + list_for_each_entry_safe(nhge, tmp, &nh->grp_list, nh_list) 740 + remove_nh_grp_entry(net, nhge, nlinfo); 763 741 764 - list_del(&nhge->nh_list); 765 - nhg = rtnl_dereference(nhge->nh_parent->nh_grp); 766 - remove_nh_grp_entry(nhge, nhg, nlinfo); 767 - 768 - /* if this group has no more entries then remove it */ 769 - if (!nhg->num_nh) 770 - remove_nexthop(net, nhge->nh_parent, nlinfo); 771 - } 742 + /* make sure all see the newly published array before releasing rtnl */ 743 + synchronize_rcu(); 772 744 } 773 745 774 746 static void remove_nexthop_group(struct nexthop *nh, struct nl_info *nlinfo) ··· 776 760 if (WARN_ON(!nhge->nh)) 777 761 continue; 778 762 779 - list_del(&nhge->nh_list); 780 - nexthop_put(nhge->nh); 781 - nhge->nh = NULL; 782 - nhg->num_nh--; 763 + list_del_init(&nhge->nh_list); 783 764 } 784 765 } 785 766 ··· 1099 1086 { 1100 1087 struct nlattr *grps_attr = cfg->nh_grp; 1101 1088 struct nexthop_grp *entry = nla_data(grps_attr); 1089 + u16 num_nh = nla_len(grps_attr) / sizeof(*entry); 1102 1090 struct nh_group *nhg; 1103 1091 struct nexthop *nh; 1104 1092 int i; ··· 1110 1096 1111 1097 nh->is_group = 1; 1112 1098 1113 - nhg = nexthop_grp_alloc(nla_len(grps_attr) / sizeof(*entry)); 1099 + nhg = nexthop_grp_alloc(num_nh); 1114 1100 if (!nhg) { 1115 1101 kfree(nh); 1116 1102 return ERR_PTR(-ENOMEM); 1117 1103 } 1104 + 1105 + /* spare group used for removals */ 1106 + nhg->spare = nexthop_grp_alloc(num_nh); 1107 + if (!nhg) { 1108 + kfree(nhg); 1109 + kfree(nh); 1110 + return NULL; 1111 + } 1112 + nhg->spare->spare = nhg; 1118 1113 1119 1114 for (i = 0; i < nhg->num_nh; ++i) { 1120 1115 struct nexthop *nhe; ··· 1156 1133 for (; i >= 0; --i) 1157 1134 nexthop_put(nhg->nh_entries[i].nh); 1158 1135 1136 + kfree(nhg->spare); 1159 1137 kfree(nhg); 1160 1138 kfree(nh); 1161 1139
+25 -12
net/ipv6/esp6_offload.c
··· 85 85 sp->olen++; 86 86 87 87 xo = xfrm_offload(skb); 88 - if (!xo) { 89 - xfrm_state_put(x); 88 + if (!xo) 90 89 goto out_reset; 91 - } 92 90 } 93 91 94 92 xo->flags |= XFRM_GRO; ··· 121 123 struct ip_esp_hdr *esph; 122 124 struct ipv6hdr *iph = ipv6_hdr(skb); 123 125 struct xfrm_offload *xo = xfrm_offload(skb); 124 - int proto = iph->nexthdr; 126 + u8 proto = iph->nexthdr; 125 127 126 128 skb_push(skb, -skb_network_offset(skb)); 129 + 130 + if (x->outer_mode.encap == XFRM_MODE_TRANSPORT) { 131 + __be16 frag; 132 + 133 + ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &proto, &frag); 134 + } 135 + 127 136 esph = ip_esp_hdr(skb); 128 137 *skb_mac_header(skb) = IPPROTO_ESP; 129 138 ··· 171 166 struct xfrm_offload *xo = xfrm_offload(skb); 172 167 struct sk_buff *segs = ERR_PTR(-EINVAL); 173 168 const struct net_offload *ops; 174 - int proto = xo->proto; 169 + u8 proto = xo->proto; 175 170 176 171 skb->transport_header += x->props.header_len; 177 - 178 - if (proto == IPPROTO_BEETPH) { 179 - struct ip_beet_phdr *ph = (struct ip_beet_phdr *)skb->data; 180 - 181 - skb->transport_header += ph->hdrlen * 8; 182 - proto = ph->nexthdr; 183 - } 184 172 185 173 if (x->sel.family != AF_INET6) { 186 174 skb->transport_header -= 187 175 (sizeof(struct ipv6hdr) - sizeof(struct iphdr)); 188 176 177 + if (proto == IPPROTO_BEETPH) { 178 + struct ip_beet_phdr *ph = 179 + (struct ip_beet_phdr *)skb->data; 180 + 181 + skb->transport_header += ph->hdrlen * 8; 182 + proto = ph->nexthdr; 183 + } else { 184 + skb->transport_header -= IPV4_BEET_PHMAXLEN; 185 + } 186 + 189 187 if (proto == IPPROTO_TCP) 190 188 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6; 189 + } else { 190 + __be16 frag; 191 + 192 + skb->transport_header += 193 + ipv6_skip_exthdr(skb, 0, &proto, &frag); 191 194 } 192 195 193 196 __skb_pull(skb, skb_transport_offset(skb));
+3
net/l2tp/l2tp_core.c
··· 1458 1458 if (sk->sk_type != SOCK_DGRAM) 1459 1459 return -EPROTONOSUPPORT; 1460 1460 1461 + if (sk->sk_family != PF_INET && sk->sk_family != PF_INET6) 1462 + return -EPROTONOSUPPORT; 1463 + 1461 1464 if ((encap == L2TP_ENCAPTYPE_UDP && sk->sk_protocol != IPPROTO_UDP) || 1462 1465 (encap == L2TP_ENCAPTYPE_IP && sk->sk_protocol != IPPROTO_L2TP)) 1463 1466 return -EPROTONOSUPPORT;
+22 -7
net/l2tp/l2tp_ip.c
··· 20 20 #include <net/icmp.h> 21 21 #include <net/udp.h> 22 22 #include <net/inet_common.h> 23 - #include <net/inet_hashtables.h> 24 23 #include <net/tcp_states.h> 25 24 #include <net/protocol.h> 26 25 #include <net/xfrm.h> ··· 208 209 return 0; 209 210 } 210 211 212 + static int l2tp_ip_hash(struct sock *sk) 213 + { 214 + if (sk_unhashed(sk)) { 215 + write_lock_bh(&l2tp_ip_lock); 216 + sk_add_node(sk, &l2tp_ip_table); 217 + write_unlock_bh(&l2tp_ip_lock); 218 + } 219 + return 0; 220 + } 221 + 222 + static void l2tp_ip_unhash(struct sock *sk) 223 + { 224 + if (sk_unhashed(sk)) 225 + return; 226 + write_lock_bh(&l2tp_ip_lock); 227 + sk_del_node_init(sk); 228 + write_unlock_bh(&l2tp_ip_lock); 229 + } 230 + 211 231 static int l2tp_ip_open(struct sock *sk) 212 232 { 213 233 /* Prevent autobind. We don't have ports. */ 214 234 inet_sk(sk)->inet_num = IPPROTO_L2TP; 215 235 216 - write_lock_bh(&l2tp_ip_lock); 217 - sk_add_node(sk, &l2tp_ip_table); 218 - write_unlock_bh(&l2tp_ip_lock); 219 - 236 + l2tp_ip_hash(sk); 220 237 return 0; 221 238 } 222 239 ··· 609 594 .sendmsg = l2tp_ip_sendmsg, 610 595 .recvmsg = l2tp_ip_recvmsg, 611 596 .backlog_rcv = l2tp_ip_backlog_recv, 612 - .hash = inet_hash, 613 - .unhash = inet_unhash, 597 + .hash = l2tp_ip_hash, 598 + .unhash = l2tp_ip_unhash, 614 599 .obj_size = sizeof(struct l2tp_ip_sock), 615 600 #ifdef CONFIG_COMPAT 616 601 .compat_setsockopt = compat_ip_setsockopt,
+22 -8
net/l2tp/l2tp_ip6.c
··· 20 20 #include <net/icmp.h> 21 21 #include <net/udp.h> 22 22 #include <net/inet_common.h> 23 - #include <net/inet_hashtables.h> 24 - #include <net/inet6_hashtables.h> 25 23 #include <net/tcp_states.h> 26 24 #include <net/protocol.h> 27 25 #include <net/xfrm.h> ··· 220 222 return 0; 221 223 } 222 224 225 + static int l2tp_ip6_hash(struct sock *sk) 226 + { 227 + if (sk_unhashed(sk)) { 228 + write_lock_bh(&l2tp_ip6_lock); 229 + sk_add_node(sk, &l2tp_ip6_table); 230 + write_unlock_bh(&l2tp_ip6_lock); 231 + } 232 + return 0; 233 + } 234 + 235 + static void l2tp_ip6_unhash(struct sock *sk) 236 + { 237 + if (sk_unhashed(sk)) 238 + return; 239 + write_lock_bh(&l2tp_ip6_lock); 240 + sk_del_node_init(sk); 241 + write_unlock_bh(&l2tp_ip6_lock); 242 + } 243 + 223 244 static int l2tp_ip6_open(struct sock *sk) 224 245 { 225 246 /* Prevent autobind. We don't have ports. */ 226 247 inet_sk(sk)->inet_num = IPPROTO_L2TP; 227 248 228 - write_lock_bh(&l2tp_ip6_lock); 229 - sk_add_node(sk, &l2tp_ip6_table); 230 - write_unlock_bh(&l2tp_ip6_lock); 231 - 249 + l2tp_ip6_hash(sk); 232 250 return 0; 233 251 } 234 252 ··· 742 728 .sendmsg = l2tp_ip6_sendmsg, 743 729 .recvmsg = l2tp_ip6_recvmsg, 744 730 .backlog_rcv = l2tp_ip6_backlog_recv, 745 - .hash = inet6_hash, 746 - .unhash = inet_unhash, 731 + .hash = l2tp_ip6_hash, 732 + .unhash = l2tp_ip6_unhash, 747 733 .obj_size = sizeof(struct l2tp_ip6_sock), 748 734 #ifdef CONFIG_COMPAT 749 735 .compat_setsockopt = compat_ipv6_setsockopt,
+7
net/mac80211/mesh_hwmp.c
··· 1103 1103 mesh_path_sel_frame_tx(MPATH_PREQ, 0, sdata->vif.addr, ifmsh->sn, 1104 1104 target_flags, mpath->dst, mpath->sn, da, 0, 1105 1105 ttl, lifetime, 0, ifmsh->preq_id++, sdata); 1106 + 1107 + spin_lock_bh(&mpath->state_lock); 1108 + if (mpath->flags & MESH_PATH_DELETED) { 1109 + spin_unlock_bh(&mpath->state_lock); 1110 + goto enddiscovery; 1111 + } 1106 1112 mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout); 1113 + spin_unlock_bh(&mpath->state_lock); 1107 1114 1108 1115 enddiscovery: 1109 1116 rcu_read_unlock();
+48 -19
net/mptcp/protocol.c
··· 954 954 955 955 pr_debug("block timeout %ld", timeo); 956 956 mptcp_wait_data(sk, &timeo); 957 - if (unlikely(__mptcp_tcp_fallback(msk))) 957 + ssock = __mptcp_tcp_fallback(msk); 958 + if (unlikely(ssock)) 958 959 goto fallback; 959 960 } 960 961 ··· 1263 1262 1264 1263 lock_sock(sk); 1265 1264 1266 - mptcp_token_destroy(msk->token); 1267 1265 inet_sk_state_store(sk, TCP_CLOSE); 1268 1266 1269 - __mptcp_flush_join_list(msk); 1270 - 1267 + /* be sure to always acquire the join list lock, to sync vs 1268 + * mptcp_finish_join(). 1269 + */ 1270 + spin_lock_bh(&msk->join_list_lock); 1271 + list_splice_tail_init(&msk->join_list, &msk->conn_list); 1272 + spin_unlock_bh(&msk->join_list_lock); 1271 1273 list_splice_init(&msk->conn_list, &conn_list); 1272 1274 1273 1275 data_fin_tx_seq = msk->write_seq; ··· 1460 1456 { 1461 1457 struct mptcp_sock *msk = mptcp_sk(sk); 1462 1458 1459 + mptcp_token_destroy(msk->token); 1463 1460 if (msk->cached_ext) 1464 1461 __skb_ext_put(msk->cached_ext); 1465 1462 ··· 1627 1622 if (!msk->pm.server_side) 1628 1623 return true; 1629 1624 1630 - /* passive connection, attach to msk socket */ 1625 + if (!mptcp_pm_allow_new_subflow(msk)) 1626 + return false; 1627 + 1628 + /* active connections are already on conn_list, and we can't acquire 1629 + * msk lock here. 1630 + * use the join list lock as synchronization point and double-check 1631 + * msk status to avoid racing with mptcp_close() 1632 + */ 1633 + spin_lock_bh(&msk->join_list_lock); 1634 + ret = inet_sk_state_load(parent) == TCP_ESTABLISHED; 1635 + if (ret && !WARN_ON_ONCE(!list_empty(&subflow->node))) 1636 + list_add_tail(&subflow->node, &msk->join_list); 1637 + spin_unlock_bh(&msk->join_list_lock); 1638 + if (!ret) 1639 + return false; 1640 + 1641 + /* attach to msk socket only after we are sure he will deal with us 1642 + * at close time 1643 + */ 1631 1644 parent_sock = READ_ONCE(parent->sk_socket); 1632 1645 if (parent_sock && !sk->sk_socket) 1633 1646 mptcp_sock_graft(sk, parent_sock); 1634 - 1635 - ret = mptcp_pm_allow_new_subflow(msk); 1636 - if (ret) { 1637 - subflow->map_seq = msk->ack_seq; 1638 - 1639 - /* active connections are already on conn_list */ 1640 - spin_lock_bh(&msk->join_list_lock); 1641 - if (!WARN_ON_ONCE(!list_empty(&subflow->node))) 1642 - list_add_tail(&subflow->node, &msk->join_list); 1643 - spin_unlock_bh(&msk->join_list_lock); 1644 - } 1645 - return ret; 1647 + subflow->map_seq = msk->ack_seq; 1648 + return true; 1646 1649 } 1647 1650 1648 1651 bool mptcp_sk_is_subflow(const struct sock *sk) ··· 1724 1711 int err; 1725 1712 1726 1713 lock_sock(sock->sk); 1714 + if (sock->state != SS_UNCONNECTED && msk->subflow) { 1715 + /* pending connection or invalid state, let existing subflow 1716 + * cope with that 1717 + */ 1718 + ssock = msk->subflow; 1719 + goto do_connect; 1720 + } 1721 + 1727 1722 ssock = __mptcp_socket_create(msk, TCP_SYN_SENT); 1728 1723 if (IS_ERR(ssock)) { 1729 1724 err = PTR_ERR(ssock); ··· 1746 1725 mptcp_subflow_ctx(ssock->sk)->request_mptcp = 0; 1747 1726 #endif 1748 1727 1728 + do_connect: 1749 1729 err = ssock->ops->connect(ssock, uaddr, addr_len, flags); 1750 - inet_sk_state_store(sock->sk, inet_sk_state_load(ssock->sk)); 1751 - mptcp_copy_inaddrs(sock->sk, ssock->sk); 1730 + sock->state = ssock->state; 1731 + 1732 + /* on successful connect, the msk state will be moved to established by 1733 + * subflow_finish_connect() 1734 + */ 1735 + if (!err || err == EINPROGRESS) 1736 + mptcp_copy_inaddrs(sock->sk, ssock->sk); 1737 + else 1738 + inet_sk_state_store(sock->sk, inet_sk_state_load(ssock->sk)); 1752 1739 1753 1740 unlock: 1754 1741 release_sock(sock->sk);
+1 -1
net/netfilter/ipset/ip_set_list_set.c
··· 59 59 /* Don't lookup sub-counters at all */ 60 60 opt->cmdflags &= ~IPSET_FLAG_MATCH_COUNTERS; 61 61 if (opt->cmdflags & IPSET_FLAG_SKIP_SUBCOUNTER_UPDATE) 62 - opt->cmdflags &= ~IPSET_FLAG_SKIP_COUNTER_UPDATE; 62 + opt->cmdflags |= IPSET_FLAG_SKIP_COUNTER_UPDATE; 63 63 list_for_each_entry_rcu(e, &map->members, list) { 64 64 ret = ip_set_test(e->id, skb, par, opt); 65 65 if (ret <= 0)
+73 -7
net/netfilter/nf_conntrack_core.c
··· 2016 2016 nf_conntrack_get(skb_nfct(nskb)); 2017 2017 } 2018 2018 2019 - static int nf_conntrack_update(struct net *net, struct sk_buff *skb) 2019 + static int __nf_conntrack_update(struct net *net, struct sk_buff *skb, 2020 + struct nf_conn *ct, 2021 + enum ip_conntrack_info ctinfo) 2020 2022 { 2021 2023 struct nf_conntrack_tuple_hash *h; 2022 2024 struct nf_conntrack_tuple tuple; 2023 - enum ip_conntrack_info ctinfo; 2024 2025 struct nf_nat_hook *nat_hook; 2025 2026 unsigned int status; 2026 - struct nf_conn *ct; 2027 2027 int dataoff; 2028 2028 u16 l3num; 2029 2029 u8 l4num; 2030 - 2031 - ct = nf_ct_get(skb, &ctinfo); 2032 - if (!ct || nf_ct_is_confirmed(ct)) 2033 - return 0; 2034 2030 2035 2031 l3num = nf_ct_l3num(ct); 2036 2032 ··· 2082 2086 return -1; 2083 2087 2084 2088 return 0; 2089 + } 2090 + 2091 + /* This packet is coming from userspace via nf_queue, complete the packet 2092 + * processing after the helper invocation in nf_confirm(). 2093 + */ 2094 + static int nf_confirm_cthelper(struct sk_buff *skb, struct nf_conn *ct, 2095 + enum ip_conntrack_info ctinfo) 2096 + { 2097 + const struct nf_conntrack_helper *helper; 2098 + const struct nf_conn_help *help; 2099 + int protoff; 2100 + 2101 + help = nfct_help(ct); 2102 + if (!help) 2103 + return 0; 2104 + 2105 + helper = rcu_dereference(help->helper); 2106 + if (!(helper->flags & NF_CT_HELPER_F_USERSPACE)) 2107 + return 0; 2108 + 2109 + switch (nf_ct_l3num(ct)) { 2110 + case NFPROTO_IPV4: 2111 + protoff = skb_network_offset(skb) + ip_hdrlen(skb); 2112 + break; 2113 + #if IS_ENABLED(CONFIG_IPV6) 2114 + case NFPROTO_IPV6: { 2115 + __be16 frag_off; 2116 + u8 pnum; 2117 + 2118 + pnum = ipv6_hdr(skb)->nexthdr; 2119 + protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &pnum, 2120 + &frag_off); 2121 + if (protoff < 0 || (frag_off & htons(~0x7)) != 0) 2122 + return 0; 2123 + break; 2124 + } 2125 + #endif 2126 + default: 2127 + return 0; 2128 + } 2129 + 2130 + if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) && 2131 + !nf_is_loopback_packet(skb)) { 2132 + if (!nf_ct_seq_adjust(skb, ct, ctinfo, protoff)) { 2133 + NF_CT_STAT_INC_ATOMIC(nf_ct_net(ct), drop); 2134 + return -1; 2135 + } 2136 + } 2137 + 2138 + /* We've seen it coming out the other side: confirm it */ 2139 + return nf_conntrack_confirm(skb) == NF_DROP ? - 1 : 0; 2140 + } 2141 + 2142 + static int nf_conntrack_update(struct net *net, struct sk_buff *skb) 2143 + { 2144 + enum ip_conntrack_info ctinfo; 2145 + struct nf_conn *ct; 2146 + int err; 2147 + 2148 + ct = nf_ct_get(skb, &ctinfo); 2149 + if (!ct) 2150 + return 0; 2151 + 2152 + if (!nf_ct_is_confirmed(ct)) { 2153 + err = __nf_conntrack_update(net, skb, ct, ctinfo); 2154 + if (err < 0) 2155 + return err; 2156 + } 2157 + 2158 + return nf_confirm_cthelper(skb, ct, ctinfo); 2085 2159 } 2086 2160 2087 2161 static bool nf_conntrack_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
+35 -27
net/netfilter/nf_conntrack_pptp.c
··· 72 72 73 73 #if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG) 74 74 /* PptpControlMessageType names */ 75 - const char *const pptp_msg_name[] = { 76 - "UNKNOWN_MESSAGE", 77 - "START_SESSION_REQUEST", 78 - "START_SESSION_REPLY", 79 - "STOP_SESSION_REQUEST", 80 - "STOP_SESSION_REPLY", 81 - "ECHO_REQUEST", 82 - "ECHO_REPLY", 83 - "OUT_CALL_REQUEST", 84 - "OUT_CALL_REPLY", 85 - "IN_CALL_REQUEST", 86 - "IN_CALL_REPLY", 87 - "IN_CALL_CONNECT", 88 - "CALL_CLEAR_REQUEST", 89 - "CALL_DISCONNECT_NOTIFY", 90 - "WAN_ERROR_NOTIFY", 91 - "SET_LINK_INFO" 75 + static const char *const pptp_msg_name_array[PPTP_MSG_MAX + 1] = { 76 + [0] = "UNKNOWN_MESSAGE", 77 + [PPTP_START_SESSION_REQUEST] = "START_SESSION_REQUEST", 78 + [PPTP_START_SESSION_REPLY] = "START_SESSION_REPLY", 79 + [PPTP_STOP_SESSION_REQUEST] = "STOP_SESSION_REQUEST", 80 + [PPTP_STOP_SESSION_REPLY] = "STOP_SESSION_REPLY", 81 + [PPTP_ECHO_REQUEST] = "ECHO_REQUEST", 82 + [PPTP_ECHO_REPLY] = "ECHO_REPLY", 83 + [PPTP_OUT_CALL_REQUEST] = "OUT_CALL_REQUEST", 84 + [PPTP_OUT_CALL_REPLY] = "OUT_CALL_REPLY", 85 + [PPTP_IN_CALL_REQUEST] = "IN_CALL_REQUEST", 86 + [PPTP_IN_CALL_REPLY] = "IN_CALL_REPLY", 87 + [PPTP_IN_CALL_CONNECT] = "IN_CALL_CONNECT", 88 + [PPTP_CALL_CLEAR_REQUEST] = "CALL_CLEAR_REQUEST", 89 + [PPTP_CALL_DISCONNECT_NOTIFY] = "CALL_DISCONNECT_NOTIFY", 90 + [PPTP_WAN_ERROR_NOTIFY] = "WAN_ERROR_NOTIFY", 91 + [PPTP_SET_LINK_INFO] = "SET_LINK_INFO" 92 92 }; 93 + 94 + const char *pptp_msg_name(u_int16_t msg) 95 + { 96 + if (msg > PPTP_MSG_MAX) 97 + return pptp_msg_name_array[0]; 98 + 99 + return pptp_msg_name_array[msg]; 100 + } 93 101 EXPORT_SYMBOL(pptp_msg_name); 94 102 #endif 95 103 ··· 284 276 typeof(nf_nat_pptp_hook_inbound) nf_nat_pptp_inbound; 285 277 286 278 msg = ntohs(ctlh->messageType); 287 - pr_debug("inbound control message %s\n", pptp_msg_name[msg]); 279 + pr_debug("inbound control message %s\n", pptp_msg_name(msg)); 288 280 289 281 switch (msg) { 290 282 case PPTP_START_SESSION_REPLY: ··· 319 311 pcid = pptpReq->ocack.peersCallID; 320 312 if (info->pns_call_id != pcid) 321 313 goto invalid; 322 - pr_debug("%s, CID=%X, PCID=%X\n", pptp_msg_name[msg], 314 + pr_debug("%s, CID=%X, PCID=%X\n", pptp_msg_name(msg), 323 315 ntohs(cid), ntohs(pcid)); 324 316 325 317 if (pptpReq->ocack.resultCode == PPTP_OUTCALL_CONNECT) { ··· 336 328 goto invalid; 337 329 338 330 cid = pptpReq->icreq.callID; 339 - pr_debug("%s, CID=%X\n", pptp_msg_name[msg], ntohs(cid)); 331 + pr_debug("%s, CID=%X\n", pptp_msg_name(msg), ntohs(cid)); 340 332 info->cstate = PPTP_CALL_IN_REQ; 341 333 info->pac_call_id = cid; 342 334 break; ··· 355 347 if (info->pns_call_id != pcid) 356 348 goto invalid; 357 349 358 - pr_debug("%s, PCID=%X\n", pptp_msg_name[msg], ntohs(pcid)); 350 + pr_debug("%s, PCID=%X\n", pptp_msg_name(msg), ntohs(pcid)); 359 351 info->cstate = PPTP_CALL_IN_CONF; 360 352 361 353 /* we expect a GRE connection from PAC to PNS */ ··· 365 357 case PPTP_CALL_DISCONNECT_NOTIFY: 366 358 /* server confirms disconnect */ 367 359 cid = pptpReq->disc.callID; 368 - pr_debug("%s, CID=%X\n", pptp_msg_name[msg], ntohs(cid)); 360 + pr_debug("%s, CID=%X\n", pptp_msg_name(msg), ntohs(cid)); 369 361 info->cstate = PPTP_CALL_NONE; 370 362 371 363 /* untrack this call id, unexpect GRE packets */ ··· 392 384 invalid: 393 385 pr_debug("invalid %s: type=%d cid=%u pcid=%u " 394 386 "cstate=%d sstate=%d pns_cid=%u pac_cid=%u\n", 395 - msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] : pptp_msg_name[0], 387 + pptp_msg_name(msg), 396 388 msg, ntohs(cid), ntohs(pcid), info->cstate, info->sstate, 397 389 ntohs(info->pns_call_id), ntohs(info->pac_call_id)); 398 390 return NF_ACCEPT; ··· 412 404 typeof(nf_nat_pptp_hook_outbound) nf_nat_pptp_outbound; 413 405 414 406 msg = ntohs(ctlh->messageType); 415 - pr_debug("outbound control message %s\n", pptp_msg_name[msg]); 407 + pr_debug("outbound control message %s\n", pptp_msg_name(msg)); 416 408 417 409 switch (msg) { 418 410 case PPTP_START_SESSION_REQUEST: ··· 434 426 info->cstate = PPTP_CALL_OUT_REQ; 435 427 /* track PNS call id */ 436 428 cid = pptpReq->ocreq.callID; 437 - pr_debug("%s, CID=%X\n", pptp_msg_name[msg], ntohs(cid)); 429 + pr_debug("%s, CID=%X\n", pptp_msg_name(msg), ntohs(cid)); 438 430 info->pns_call_id = cid; 439 431 break; 440 432 ··· 448 440 pcid = pptpReq->icack.peersCallID; 449 441 if (info->pac_call_id != pcid) 450 442 goto invalid; 451 - pr_debug("%s, CID=%X PCID=%X\n", pptp_msg_name[msg], 443 + pr_debug("%s, CID=%X PCID=%X\n", pptp_msg_name(msg), 452 444 ntohs(cid), ntohs(pcid)); 453 445 454 446 if (pptpReq->icack.resultCode == PPTP_INCALL_ACCEPT) { ··· 488 480 invalid: 489 481 pr_debug("invalid %s: type=%d cid=%u pcid=%u " 490 482 "cstate=%d sstate=%d pns_cid=%u pac_cid=%u\n", 491 - msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] : pptp_msg_name[0], 483 + pptp_msg_name(msg), 492 484 msg, ntohs(cid), ntohs(pcid), info->cstate, info->sstate, 493 485 ntohs(info->pns_call_id), ntohs(info->pac_call_id)); 494 486 return NF_ACCEPT;
+5 -5
net/qrtr/ns.c
··· 712 712 goto err_sock; 713 713 } 714 714 715 + qrtr_ns.workqueue = alloc_workqueue("qrtr_ns_handler", WQ_UNBOUND, 1); 716 + if (!qrtr_ns.workqueue) 717 + goto err_sock; 718 + 715 719 qrtr_ns.sock->sk->sk_data_ready = qrtr_ns_data_ready; 716 720 717 721 sq.sq_port = QRTR_PORT_CTRL; ··· 724 720 ret = kernel_bind(qrtr_ns.sock, (struct sockaddr *)&sq, sizeof(sq)); 725 721 if (ret < 0) { 726 722 pr_err("failed to bind to socket\n"); 727 - goto err_sock; 723 + goto err_wq; 728 724 } 729 725 730 726 qrtr_ns.bcast_sq.sq_family = AF_QIPCRTR; 731 727 qrtr_ns.bcast_sq.sq_node = QRTR_NODE_BCAST; 732 728 qrtr_ns.bcast_sq.sq_port = QRTR_PORT_CTRL; 733 - 734 - qrtr_ns.workqueue = alloc_workqueue("qrtr_ns_handler", WQ_UNBOUND, 1); 735 - if (!qrtr_ns.workqueue) 736 - goto err_sock; 737 729 738 730 ret = say_hello(&qrtr_ns.bcast_sq); 739 731 if (ret < 0)
+3
net/sched/act_ct.c
··· 199 199 const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple; 200 200 struct nf_conntrack_tuple target; 201 201 202 + if (!(ct->status & IPS_NAT_MASK)) 203 + return 0; 204 + 202 205 nf_ct_invert_tuple(&target, &ct->tuplehash[!dir].tuple); 203 206 204 207 switch (tuple->src.l3num) {
+2 -2
net/sched/sch_fq_pie.c
··· 297 297 goto flow_error; 298 298 } 299 299 q->flows_cnt = nla_get_u32(tb[TCA_FQ_PIE_FLOWS]); 300 - if (!q->flows_cnt || q->flows_cnt > 65536) { 300 + if (!q->flows_cnt || q->flows_cnt >= 65536) { 301 301 NL_SET_ERR_MSG_MOD(extack, 302 - "Number of flows must be < 65536"); 302 + "Number of flows must range in [1..65535]"); 303 303 goto flow_error; 304 304 } 305 305 }
+1 -1
net/sctp/Kconfig
··· 31 31 homing at either or both ends of an association." 32 32 33 33 To compile this protocol support as a module, choose M here: the 34 - module will be called sctp. Debug messages are handeled by the 34 + module will be called sctp. Debug messages are handled by the 35 35 kernel's dynamic debugging framework. 36 36 37 37 If in doubt, say N.
+3
net/sctp/ulpevent.c
··· 343 343 struct sockaddr_storage addr; 344 344 struct sctp_ulpevent *event; 345 345 346 + if (asoc->state < SCTP_STATE_ESTABLISHED) 347 + return; 348 + 346 349 memset(&addr, 0, sizeof(struct sockaddr_storage)); 347 350 memcpy(&addr, &transport->ipaddr, transport->af_specific->sockaddr_len); 348 351
+27 -6
net/tls/tls_sw.c
··· 206 206 207 207 kfree(aead_req); 208 208 209 + spin_lock_bh(&ctx->decrypt_compl_lock); 209 210 pending = atomic_dec_return(&ctx->decrypt_pending); 210 211 211 - if (!pending && READ_ONCE(ctx->async_notify)) 212 + if (!pending && ctx->async_notify) 212 213 complete(&ctx->async_wait.completion); 214 + spin_unlock_bh(&ctx->decrypt_compl_lock); 213 215 } 214 216 215 217 static int tls_do_decryption(struct sock *sk, ··· 469 467 ready = true; 470 468 } 471 469 470 + spin_lock_bh(&ctx->encrypt_compl_lock); 472 471 pending = atomic_dec_return(&ctx->encrypt_pending); 473 472 474 - if (!pending && READ_ONCE(ctx->async_notify)) 473 + if (!pending && ctx->async_notify) 475 474 complete(&ctx->async_wait.completion); 475 + spin_unlock_bh(&ctx->encrypt_compl_lock); 476 476 477 477 if (!ready) 478 478 return; ··· 933 929 int num_zc = 0; 934 930 int orig_size; 935 931 int ret = 0; 932 + int pending; 936 933 937 934 if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL)) 938 935 return -EOPNOTSUPP; ··· 1100 1095 goto send_end; 1101 1096 } else if (num_zc) { 1102 1097 /* Wait for pending encryptions to get completed */ 1103 - smp_store_mb(ctx->async_notify, true); 1098 + spin_lock_bh(&ctx->encrypt_compl_lock); 1099 + ctx->async_notify = true; 1104 1100 1105 - if (atomic_read(&ctx->encrypt_pending)) 1101 + pending = atomic_read(&ctx->encrypt_pending); 1102 + spin_unlock_bh(&ctx->encrypt_compl_lock); 1103 + if (pending) 1106 1104 crypto_wait_req(-EINPROGRESS, &ctx->async_wait); 1107 1105 else 1108 1106 reinit_completion(&ctx->async_wait.completion); 1109 1107 1108 + /* There can be no concurrent accesses, since we have no 1109 + * pending encrypt operations 1110 + */ 1110 1111 WRITE_ONCE(ctx->async_notify, false); 1111 1112 1112 1113 if (ctx->async_wait.err) { ··· 1743 1732 bool is_kvec = iov_iter_is_kvec(&msg->msg_iter); 1744 1733 bool is_peek = flags & MSG_PEEK; 1745 1734 int num_async = 0; 1735 + int pending; 1746 1736 1747 1737 flags |= nonblock; 1748 1738 ··· 1906 1894 recv_end: 1907 1895 if (num_async) { 1908 1896 /* Wait for all previously submitted records to be decrypted */ 1909 - smp_store_mb(ctx->async_notify, true); 1910 - if (atomic_read(&ctx->decrypt_pending)) { 1897 + spin_lock_bh(&ctx->decrypt_compl_lock); 1898 + ctx->async_notify = true; 1899 + pending = atomic_read(&ctx->decrypt_pending); 1900 + spin_unlock_bh(&ctx->decrypt_compl_lock); 1901 + if (pending) { 1911 1902 err = crypto_wait_req(-EINPROGRESS, &ctx->async_wait); 1912 1903 if (err) { 1913 1904 /* one of async decrypt failed */ ··· 1922 1907 } else { 1923 1908 reinit_completion(&ctx->async_wait.completion); 1924 1909 } 1910 + 1911 + /* There can be no concurrent accesses, since we have no 1912 + * pending decrypt operations 1913 + */ 1925 1914 WRITE_ONCE(ctx->async_notify, false); 1926 1915 1927 1916 /* Drain records from the rx_list & copy if required */ ··· 2312 2293 2313 2294 if (tx) { 2314 2295 crypto_init_wait(&sw_ctx_tx->async_wait); 2296 + spin_lock_init(&sw_ctx_tx->encrypt_compl_lock); 2315 2297 crypto_info = &ctx->crypto_send.info; 2316 2298 cctx = &ctx->tx; 2317 2299 aead = &sw_ctx_tx->aead_send; ··· 2321 2301 sw_ctx_tx->tx_work.sk = sk; 2322 2302 } else { 2323 2303 crypto_init_wait(&sw_ctx_rx->async_wait); 2304 + spin_lock_init(&sw_ctx_rx->decrypt_compl_lock); 2324 2305 crypto_info = &ctx->crypto_recv.info; 2325 2306 cctx = &ctx->rx; 2326 2307 skb_queue_head_init(&sw_ctx_rx->rx_list);
+1 -1
net/vmw_vsock/af_vsock.c
··· 1408 1408 /* Wait for children sockets to appear; these are the new sockets 1409 1409 * created upon connection establishment. 1410 1410 */ 1411 - timeout = sock_sndtimeo(listener, flags & O_NONBLOCK); 1411 + timeout = sock_rcvtimeo(listener, flags & O_NONBLOCK); 1412 1412 prepare_to_wait(sk_sleep(listener), &wait, TASK_INTERRUPTIBLE); 1413 1413 1414 1414 while ((connected = vsock_dequeue_accept(listener)) == NULL &&
+8
net/vmw_vsock/virtio_transport_common.c
··· 1132 1132 1133 1133 lock_sock(sk); 1134 1134 1135 + /* Check if sk has been released before lock_sock */ 1136 + if (sk->sk_shutdown == SHUTDOWN_MASK) { 1137 + (void)virtio_transport_reset_no_sock(t, pkt); 1138 + release_sock(sk); 1139 + sock_put(sk); 1140 + goto free_pkt; 1141 + } 1142 + 1135 1143 /* Update CID in case it has changed after a transport reset event */ 1136 1144 vsk->local_addr.svm_cid = dst.svm_cid; 1137 1145
+1 -1
net/wireless/core.c
··· 142 142 if (result) 143 143 return result; 144 144 145 - if (rdev->wiphy.debugfsdir) 145 + if (!IS_ERR_OR_NULL(rdev->wiphy.debugfsdir)) 146 146 debugfs_rename(rdev->wiphy.debugfsdir->d_parent, 147 147 rdev->wiphy.debugfsdir, 148 148 rdev->wiphy.debugfsdir->d_parent, newname);
+6 -2
net/xdp/xdp_umem.c
··· 341 341 { 342 342 bool unaligned_chunks = mr->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG; 343 343 u32 chunk_size = mr->chunk_size, headroom = mr->headroom; 344 + u64 npgs, addr = mr->addr, size = mr->len; 344 345 unsigned int chunks, chunks_per_page; 345 - u64 addr = mr->addr, size = mr->len; 346 346 int err; 347 347 348 348 if (chunk_size < XDP_UMEM_MIN_CHUNK_SIZE || chunk_size > PAGE_SIZE) { ··· 372 372 if ((addr + size) < addr) 373 373 return -EINVAL; 374 374 375 + npgs = div_u64(size, PAGE_SIZE); 376 + if (npgs > U32_MAX) 377 + return -EINVAL; 378 + 375 379 chunks = (unsigned int)div_u64(size, chunk_size); 376 380 if (chunks == 0) 377 381 return -EINVAL; ··· 395 391 umem->size = size; 396 392 umem->headroom = headroom; 397 393 umem->chunk_size_nohr = chunk_size - headroom; 398 - umem->npgs = size / PAGE_SIZE; 394 + umem->npgs = (u32)npgs; 399 395 umem->pgs = NULL; 400 396 umem->user = NULL; 401 397 umem->flags = mr->flags;
+2
net/xfrm/espintcp.c
··· 379 379 { 380 380 struct espintcp_ctx *ctx = espintcp_getctx(sk); 381 381 382 + ctx->saved_destruct(sk); 382 383 kfree(ctx); 383 384 } 384 385 ··· 420 419 sk->sk_socket->ops = &espintcp_ops; 421 420 ctx->saved_data_ready = sk->sk_data_ready; 422 421 ctx->saved_write_space = sk->sk_write_space; 422 + ctx->saved_destruct = sk->sk_destruct; 423 423 sk->sk_data_ready = espintcp_data_ready; 424 424 sk->sk_write_space = espintcp_write_space; 425 425 sk->sk_destruct = espintcp_destruct;
+3 -5
net/xfrm/xfrm_device.c
··· 25 25 struct xfrm_offload *xo = xfrm_offload(skb); 26 26 27 27 skb_reset_mac_len(skb); 28 - pskb_pull(skb, skb->mac_len + hsize + x->props.header_len); 29 - 30 - if (xo->flags & XFRM_GSO_SEGMENT) { 31 - skb_reset_transport_header(skb); 28 + if (xo->flags & XFRM_GSO_SEGMENT) 32 29 skb->transport_header -= x->props.header_len; 33 - } 30 + 31 + pskb_pull(skb, skb_transport_offset(skb) + x->props.header_len); 34 32 } 35 33 36 34 static void __xfrm_mode_tunnel_prep(struct xfrm_state *x, struct sk_buff *skb,
+1 -1
net/xfrm/xfrm_input.c
··· 644 644 dev_put(skb->dev); 645 645 646 646 spin_lock(&x->lock); 647 - if (nexthdr <= 0) { 647 + if (nexthdr < 0) { 648 648 if (nexthdr == -EBADMSG) { 649 649 xfrm_audit_state_icvfail(x, skb, 650 650 x->type->proto);
+21
net/xfrm/xfrm_interface.c
··· 750 750 .get_link_net = xfrmi_get_link_net, 751 751 }; 752 752 753 + static void __net_exit xfrmi_exit_batch_net(struct list_head *net_exit_list) 754 + { 755 + struct net *net; 756 + LIST_HEAD(list); 757 + 758 + rtnl_lock(); 759 + list_for_each_entry(net, net_exit_list, exit_list) { 760 + struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id); 761 + struct xfrm_if __rcu **xip; 762 + struct xfrm_if *xi; 763 + 764 + for (xip = &xfrmn->xfrmi[0]; 765 + (xi = rtnl_dereference(*xip)) != NULL; 766 + xip = &xi->next) 767 + unregister_netdevice_queue(xi->dev, &list); 768 + } 769 + unregister_netdevice_many(&list); 770 + rtnl_unlock(); 771 + } 772 + 753 773 static struct pernet_operations xfrmi_net_ops = { 774 + .exit_batch = xfrmi_exit_batch_net, 754 775 .id = &xfrmi_net_id, 755 776 .size = sizeof(struct xfrmi_net), 756 777 };
+9 -6
net/xfrm/xfrm_output.c
··· 583 583 xfrm_state_hold(x); 584 584 585 585 if (skb_is_gso(skb)) { 586 - skb_shinfo(skb)->gso_type |= SKB_GSO_ESP; 586 + if (skb->inner_protocol) 587 + return xfrm_output_gso(net, sk, skb); 587 588 588 - return xfrm_output2(net, sk, skb); 589 + skb_shinfo(skb)->gso_type |= SKB_GSO_ESP; 590 + goto out; 589 591 } 590 592 591 593 if (x->xso.dev && x->xso.dev->features & NETIF_F_HW_ESP_TX_CSUM) 592 594 goto out; 595 + } else { 596 + if (skb_is_gso(skb)) 597 + return xfrm_output_gso(net, sk, skb); 593 598 } 594 - 595 - if (skb_is_gso(skb)) 596 - return xfrm_output_gso(net, sk, skb); 597 599 598 600 if (skb->ip_summed == CHECKSUM_PARTIAL) { 599 601 err = skb_checksum_help(skb); ··· 642 640 643 641 if (skb->protocol == htons(ETH_P_IP)) 644 642 proto = AF_INET; 645 - else if (skb->protocol == htons(ETH_P_IPV6)) 643 + else if (skb->protocol == htons(ETH_P_IPV6) && 644 + skb->sk->sk_family == AF_INET6) 646 645 proto = AF_INET6; 647 646 else 648 647 return;
+1 -6
net/xfrm/xfrm_policy.c
··· 1436 1436 static bool xfrm_policy_mark_match(struct xfrm_policy *policy, 1437 1437 struct xfrm_policy *pol) 1438 1438 { 1439 - u32 mark = policy->mark.v & policy->mark.m; 1440 - 1441 - if (policy->mark.v == pol->mark.v && policy->mark.m == pol->mark.m) 1442 - return true; 1443 - 1444 - if ((mark & pol->mark.m) == pol->mark.v && 1439 + if (policy->mark.v == pol->mark.v && 1445 1440 policy->priority == pol->priority) 1446 1441 return true; 1447 1442
+32 -14
tools/testing/selftests/bpf/verifier/bounds.c
··· 238 238 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 239 239 BPF_LD_MAP_FD(BPF_REG_1, 0), 240 240 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 241 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), 241 + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8), 242 242 /* r1 = [0x00, 0xff] */ 243 243 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), 244 244 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1), ··· 253 253 * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff] 254 254 */ 255 255 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1), 256 - /* r1 = 0 or 257 - * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff] 258 - */ 259 - BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8), 260 256 /* error on OOB pointer computation */ 261 257 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 262 258 /* exit */ ··· 261 265 }, 262 266 .fixup_map_hash_8b = { 3 }, 263 267 /* not actually fully unbounded, but the bound is very high */ 264 - .errstr = "value 72057594021150720 makes map_value pointer be out of bounds", 265 - .result = REJECT 268 + .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds, pointer arithmetic with it prohibited for !root", 269 + .result_unpriv = REJECT, 270 + .errstr = "value -4294967168 makes map_value pointer be out of bounds", 271 + .result = REJECT, 266 272 }, 267 273 { 268 274 "bounds check after truncation of boundary-crossing range (2)", ··· 274 276 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 275 277 BPF_LD_MAP_FD(BPF_REG_1, 0), 276 278 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 277 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), 279 + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8), 278 280 /* r1 = [0x00, 0xff] */ 279 281 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), 280 282 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1), ··· 291 293 * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff] 292 294 */ 293 295 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1), 294 - /* r1 = 0 or 295 - * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff] 296 - */ 297 - BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8), 298 296 /* error on OOB pointer computation */ 299 297 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 300 298 /* exit */ ··· 299 305 }, 300 306 .fixup_map_hash_8b = { 3 }, 301 307 /* not actually fully unbounded, but the bound is very high */ 302 - .errstr = "value 72057594021150720 makes map_value pointer be out of bounds", 303 - .result = REJECT 308 + .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds, pointer arithmetic with it prohibited for !root", 309 + .result_unpriv = REJECT, 310 + .errstr = "value -4294967168 makes map_value pointer be out of bounds", 311 + .result = REJECT, 304 312 }, 305 313 { 306 314 "bounds check after wrapping 32-bit addition", ··· 534 538 BPF_EXIT_INSN(), 535 539 }, 536 540 .result = ACCEPT 541 + }, 542 + { 543 + "assigning 32bit bounds to 64bit for wA = 0, wB = wA", 544 + .insns = { 545 + BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1, 546 + offsetof(struct __sk_buff, data_end)), 547 + BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, 548 + offsetof(struct __sk_buff, data)), 549 + BPF_MOV32_IMM(BPF_REG_9, 0), 550 + BPF_MOV32_REG(BPF_REG_2, BPF_REG_9), 551 + BPF_MOV64_REG(BPF_REG_6, BPF_REG_7), 552 + BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_2), 553 + BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), 554 + BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 8), 555 + BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_8, 1), 556 + BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_6, 0), 557 + BPF_MOV64_IMM(BPF_REG_0, 0), 558 + BPF_EXIT_INSN(), 559 + }, 560 + .prog_type = BPF_PROG_TYPE_SCHED_CLS, 561 + .result = ACCEPT, 562 + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 537 563 },
+21
tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq_pie.json
··· 1 + [ 2 + { 3 + "id": "83be", 4 + "name": "Create FQ-PIE with invalid number of flows", 5 + "category": [ 6 + "qdisc", 7 + "fq_pie" 8 + ], 9 + "setup": [ 10 + "$IP link add dev $DUMMY type dummy || /bin/true" 11 + ], 12 + "cmdUnderTest": "$TC qdisc add dev $DUMMY root fq_pie flows 65536", 13 + "expExitCode": "2", 14 + "verifyCmd": "$TC qdisc show dev $DUMMY", 15 + "matchPattern": "qdisc", 16 + "matchCount": "0", 17 + "teardown": [ 18 + "$IP link del dev $DUMMY" 19 + ] 20 + } 21 + ]