Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Pull networking fixes from David Miller:
"Another week, another set of bug fixes:

1) Fix pskb_pull length in __xfrm_transport_prep(), from Xin Long.

2) Fix double xfrm_state put in esp{4,6}_gro_receive(), also from Xin
Long.

3) Re-arm discovery timer properly in mac80211 mesh code, from Linus
Lüssing.

4) Prevent buffer overflows in nf_conntrack_pptp debug code, from
Pablo Neira Ayuso.

5) Fix race in ktls code between tls_sw_recvmsg() and
tls_decrypt_done(), from Vinay Kumar Yadav.

6) Fix crashes on TCP fallback in MPTCP code, from Paolo Abeni.

7) More validation is necessary of untrusted GSO packets coming from
virtualization devices, from Willem de Bruijn.

8) Fix endianness of bnxt_en firmware message length accesses, from
Edwin Peer.

9) Fix infinite loop in sch_fq_pie, from Davide Caratti.

10) Fix lockdep splat in DSA by setting lockless TX in netdev features
for slave ports, from Vladimir Oltean.

11) Fix suspend/resume crashes in mlx5, from Mark Bloch.

12) Fix use after free in bpf fmod_ret, from Alexei Starovoitov.

13) ARP retransmit timer guard uses wrong offset, from Hongbin Liu.

14) Fix leak in inetdev_init(), from Yang Yingliang.

15) Don't try to use inet hash and unhash in l2tp code, results in
crashes. From Eric Dumazet"

* git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (77 commits)
l2tp: add sk_family checks to l2tp_validate_socket
l2tp: do not use inet_hash()/inet_unhash()
net: qrtr: Allocate workqueue before kernel_bind
mptcp: remove msk from the token container at destruction time.
mptcp: fix race between MP_JOIN and close
mptcp: fix unblocking connect()
net/sched: act_ct: add nat mangle action only for NAT-conntrack
devinet: fix memleak in inetdev_init()
virtio_vsock: Fix race condition in virtio_transport_recv_pkt
drivers/net/ibmvnic: Update VNIC protocol version reporting
NFC: st21nfca: add missed kfree_skb() in an error path
neigh: fix ARP retransmit timer guard
bpf, selftests: Add a verifier test for assigning 32bit reg states to 64bit ones
bpf, selftests: Verifier bounds tests need to be updated
bpf: Fix a verifier issue when assigning 32bit reg states to 64bit ones
bpf: Fix use-after-free in fmod_ret check
net/mlx5e: replace EINVAL in mlx5e_flower_parse_meta()
net/mlx5e: Fix MLX5_TC_CT dependencies
net/mlx5e: Properly set default values when disabling adaptive moderation
net/mlx5e: Fix arch depending casting issue in FEC
...

+806 -337
+1
arch/powerpc/Kconfig
··· 126 select ARCH_HAS_MMIOWB if PPC64 127 select ARCH_HAS_PHYS_TO_DMA 128 select ARCH_HAS_PMEM_API 129 select ARCH_HAS_PTE_DEVMAP if PPC_BOOK3S_64 130 select ARCH_HAS_PTE_SPECIAL 131 select ARCH_HAS_MEMBARRIER_CALLBACKS
··· 126 select ARCH_HAS_MMIOWB if PPC64 127 select ARCH_HAS_PHYS_TO_DMA 128 select ARCH_HAS_PMEM_API 129 + select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE 130 select ARCH_HAS_PTE_DEVMAP if PPC_BOOK3S_64 131 select ARCH_HAS_PTE_SPECIAL 132 select ARCH_HAS_MEMBARRIER_CALLBACKS
+1 -1
drivers/crypto/chelsio/chtls/chtls_io.c
··· 682 make_tx_data_wr(sk, skb, immdlen, len, 683 credits_needed, completion); 684 tp->snd_nxt += len; 685 - tp->lsndtime = tcp_time_stamp(tp); 686 if (completion) 687 ULP_SKB_CB(skb)->flags &= ~ULPCB_FLAG_NEED_HDR; 688 } else {
··· 682 make_tx_data_wr(sk, skb, immdlen, len, 683 credits_needed, completion); 684 tp->snd_nxt += len; 685 + tp->lsndtime = tcp_jiffies32; 686 if (completion) 687 ULP_SKB_CB(skb)->flags &= ~ULPCB_FLAG_NEED_HDR; 688 } else {
+3 -1
drivers/net/bonding/bond_sysfs_slave.c
··· 149 150 err = kobject_init_and_add(&slave->kobj, &slave_ktype, 151 &(slave->dev->dev.kobj), "bonding_slave"); 152 - if (err) 153 return err; 154 155 for (a = slave_attrs; *a; ++a) { 156 err = sysfs_create_file(&slave->kobj, &((*a)->attr));
··· 149 150 err = kobject_init_and_add(&slave->kobj, &slave_ktype, 151 &(slave->dev->dev.kobj), "bonding_slave"); 152 + if (err) { 153 + kobject_put(&slave->kobj); 154 return err; 155 + } 156 157 for (a = slave_attrs; *a; ++a) { 158 err = sysfs_create_file(&slave->kobj, &((*a)->attr));
+6 -2
drivers/net/dsa/ocelot/felix.c
··· 102 const struct switchdev_obj_port_vlan *vlan) 103 { 104 struct ocelot *ocelot = ds->priv; 105 u16 vid; 106 int err; 107 108 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { 109 err = ocelot_vlan_add(ocelot, port, vid, 110 - vlan->flags & BRIDGE_VLAN_INFO_PVID, 111 - vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED); 112 if (err) { 113 dev_err(ds->dev, "Failed to add VLAN %d to port %d: %d\n", 114 vid, port, err);
··· 102 const struct switchdev_obj_port_vlan *vlan) 103 { 104 struct ocelot *ocelot = ds->priv; 105 + u16 flags = vlan->flags; 106 u16 vid; 107 int err; 108 109 + if (dsa_is_cpu_port(ds, port)) 110 + flags &= ~BRIDGE_VLAN_INFO_UNTAGGED; 111 + 112 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { 113 err = ocelot_vlan_add(ocelot, port, vid, 114 + flags & BRIDGE_VLAN_INFO_PVID, 115 + flags & BRIDGE_VLAN_INFO_UNTAGGED); 116 if (err) { 117 dev_err(ds->dev, "Failed to add VLAN %d to port %d: %d\n", 118 vid, port, err);
+5 -11
drivers/net/ethernet/broadcom/bnxt/bnxt.c
··· 4176 int i, intr_process, rc, tmo_count; 4177 struct input *req = msg; 4178 u32 *data = msg; 4179 - __le32 *resp_len; 4180 u8 *valid; 4181 u16 cp_ring_id, len = 0; 4182 struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr; 4183 u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN; 4184 struct hwrm_short_input short_input = {0}; 4185 u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER; 4186 - u8 *resp_addr = (u8 *)bp->hwrm_cmd_resp_addr; 4187 u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM; 4188 u16 dst = BNXT_HWRM_CHNL_CHIMP; 4189 ··· 4199 bar_offset = BNXT_GRCPF_REG_KONG_COMM; 4200 doorbell_offset = BNXT_GRCPF_REG_KONG_COMM_TRIGGER; 4201 resp = bp->hwrm_cmd_kong_resp_addr; 4202 - resp_addr = (u8 *)bp->hwrm_cmd_kong_resp_addr; 4203 } 4204 4205 memset(resp, 0, PAGE_SIZE); ··· 4267 tmo_count = HWRM_SHORT_TIMEOUT_COUNTER; 4268 timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER; 4269 tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT); 4270 - resp_len = (__le32 *)(resp_addr + HWRM_RESP_LEN_OFFSET); 4271 4272 if (intr_process) { 4273 u16 seq_id = bp->hwrm_intr_seq_id; ··· 4294 le16_to_cpu(req->req_type)); 4295 return -EBUSY; 4296 } 4297 - len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >> 4298 - HWRM_RESP_LEN_SFT; 4299 - valid = resp_addr + len - 1; 4300 } else { 4301 int j; 4302 ··· 4306 */ 4307 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) 4308 return -EBUSY; 4309 - len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >> 4310 - HWRM_RESP_LEN_SFT; 4311 if (len) 4312 break; 4313 /* on first few passes, just barely sleep */ ··· 4328 } 4329 4330 /* Last byte of resp contains valid bit */ 4331 - valid = resp_addr + len - 1; 4332 for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) { 4333 /* make sure we read from updated DMA memory */ 4334 dma_rmb(); ··· 9304 bnxt_free_skbs(bp); 9305 9306 /* Save ring stats before shutdown */ 9307 - if (bp->bnapi) 9308 bnxt_get_ring_stats(bp, &bp->net_stats_prev); 9309 if (irq_re_init) { 9310 bnxt_free_irq(bp);
··· 4176 int i, intr_process, rc, tmo_count; 4177 struct input *req = msg; 4178 u32 *data = msg; 4179 u8 *valid; 4180 u16 cp_ring_id, len = 0; 4181 struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr; 4182 u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN; 4183 struct hwrm_short_input short_input = {0}; 4184 u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER; 4185 u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM; 4186 u16 dst = BNXT_HWRM_CHNL_CHIMP; 4187 ··· 4201 bar_offset = BNXT_GRCPF_REG_KONG_COMM; 4202 doorbell_offset = BNXT_GRCPF_REG_KONG_COMM_TRIGGER; 4203 resp = bp->hwrm_cmd_kong_resp_addr; 4204 } 4205 4206 memset(resp, 0, PAGE_SIZE); ··· 4270 tmo_count = HWRM_SHORT_TIMEOUT_COUNTER; 4271 timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER; 4272 tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT); 4273 4274 if (intr_process) { 4275 u16 seq_id = bp->hwrm_intr_seq_id; ··· 4298 le16_to_cpu(req->req_type)); 4299 return -EBUSY; 4300 } 4301 + len = le16_to_cpu(resp->resp_len); 4302 + valid = ((u8 *)resp) + len - 1; 4303 } else { 4304 int j; 4305 ··· 4311 */ 4312 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) 4313 return -EBUSY; 4314 + len = le16_to_cpu(resp->resp_len); 4315 if (len) 4316 break; 4317 /* on first few passes, just barely sleep */ ··· 4334 } 4335 4336 /* Last byte of resp contains valid bit */ 4337 + valid = ((u8 *)resp) + len - 1; 4338 for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) { 4339 /* make sure we read from updated DMA memory */ 4340 dma_rmb(); ··· 9310 bnxt_free_skbs(bp); 9311 9312 /* Save ring stats before shutdown */ 9313 + if (bp->bnapi && irq_re_init) 9314 bnxt_get_ring_stats(bp, &bp->net_stats_prev); 9315 if (irq_re_init) { 9316 bnxt_free_irq(bp);
-5
drivers/net/ethernet/broadcom/bnxt/bnxt.h
··· 656 #define HWRM_CMD_TIMEOUT (bp->hwrm_cmd_timeout) 657 #define HWRM_RESET_TIMEOUT ((HWRM_CMD_TIMEOUT) * 4) 658 #define HWRM_COREDUMP_TIMEOUT ((HWRM_CMD_TIMEOUT) * 12) 659 - #define HWRM_RESP_ERR_CODE_MASK 0xffff 660 - #define HWRM_RESP_LEN_OFFSET 4 661 - #define HWRM_RESP_LEN_MASK 0xffff0000 662 - #define HWRM_RESP_LEN_SFT 16 663 - #define HWRM_RESP_VALID_MASK 0xff000000 664 #define BNXT_HWRM_REQ_MAX_SIZE 128 665 #define BNXT_HWRM_REQS_PER_PAGE (BNXT_PAGE_SIZE / \ 666 BNXT_HWRM_REQ_MAX_SIZE)
··· 656 #define HWRM_CMD_TIMEOUT (bp->hwrm_cmd_timeout) 657 #define HWRM_RESET_TIMEOUT ((HWRM_CMD_TIMEOUT) * 4) 658 #define HWRM_COREDUMP_TIMEOUT ((HWRM_CMD_TIMEOUT) * 12) 659 #define BNXT_HWRM_REQ_MAX_SIZE 128 660 #define BNXT_HWRM_REQS_PER_PAGE (BNXT_PAGE_SIZE / \ 661 BNXT_HWRM_REQ_MAX_SIZE)
+5 -4
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
··· 2012 2013 bnxt_hwrm_fw_set_time(bp); 2014 2015 - if (bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE, 2016 - BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE, 2017 - &index, &item_len, NULL) != 0) { 2018 netdev_err(dev, "PKG update area not created in nvram\n"); 2019 - return -ENOBUFS; 2020 } 2021 2022 rc = request_firmware(&fw, filename, &dev->dev);
··· 2012 2013 bnxt_hwrm_fw_set_time(bp); 2014 2015 + rc = bnxt_find_nvram_item(dev, BNX_DIR_TYPE_UPDATE, 2016 + BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE, 2017 + &index, &item_len, NULL); 2018 + if (rc) { 2019 netdev_err(dev, "PKG update area not created in nvram\n"); 2020 + return rc; 2021 } 2022 2023 rc = request_firmware(&fw, filename, &dev->dev);
+1 -1
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
··· 2914 } 2915 2916 /* Do this here, so we can be verbose early */ 2917 - SET_NETDEV_DEV(net_dev, dev); 2918 dev_set_drvdata(dev, net_dev); 2919 2920 priv = netdev_priv(net_dev);
··· 2914 } 2915 2916 /* Do this here, so we can be verbose early */ 2917 + SET_NETDEV_DEV(net_dev, dev->parent); 2918 dev_set_drvdata(dev, net_dev); 2919 2920 priv = netdev_priv(net_dev);
+3 -5
drivers/net/ethernet/ibm/ibmvnic.c
··· 4678 dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc); 4679 break; 4680 } 4681 - dev_info(dev, "Partner protocol version is %d\n", 4682 - crq->version_exchange_rsp.version); 4683 - if (be16_to_cpu(crq->version_exchange_rsp.version) < 4684 - ibmvnic_version) 4685 - ibmvnic_version = 4686 be16_to_cpu(crq->version_exchange_rsp.version); 4687 send_cap_queries(adapter); 4688 break; 4689 case QUERY_CAPABILITY_RSP:
··· 4678 dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc); 4679 break; 4680 } 4681 + ibmvnic_version = 4682 be16_to_cpu(crq->version_exchange_rsp.version); 4683 + dev_info(dev, "Partner protocol version is %d\n", 4684 + ibmvnic_version); 4685 send_cap_queries(adapter); 4686 break; 4687 case QUERY_CAPABILITY_RSP:
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/Kconfig
··· 80 81 config MLX5_TC_CT 82 bool "MLX5 TC connection tracking offload support" 83 - depends on MLX5_CORE_EN && NET_SWITCHDEV && NF_FLOW_TABLE && NET_ACT_CT && NET_TC_SKB_EXT 84 default y 85 help 86 Say Y here if you want to support offloading connection tracking rules
··· 80 81 config MLX5_TC_CT 82 bool "MLX5 TC connection tracking offload support" 83 + depends on MLX5_ESWITCH && NF_FLOW_TABLE && NET_ACT_CT && NET_TC_SKB_EXT 84 default y 85 help 86 Say Y here if you want to support offloading connection tracking rules
+6 -4
drivers/net/ethernet/mellanox/mlx5/core/en.h
··· 1068 1069 void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len, 1070 int num_channels); 1071 - void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, 1072 - u8 cq_period_mode); 1073 - void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, 1074 - u8 cq_period_mode); 1075 void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params); 1076 void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, 1077 struct mlx5e_params *params);
··· 1068 1069 void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len, 1070 int num_channels); 1071 + 1072 + void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode); 1073 + void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode); 1074 + void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode); 1075 + void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode); 1076 + 1077 void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params); 1078 void mlx5e_init_rq_type_params(struct mlx5_core_dev *mdev, 1079 struct mlx5e_params *params);
+13 -11
drivers/net/ethernet/mellanox/mlx5/core/en/port.c
··· 369 *_policy = MLX5_GET(pplm_reg, _buf, fec_override_admin_##link); \ 370 } while (0) 371 372 - #define MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(buf, policy, write, link) \ 373 - do { \ 374 - u16 *__policy = &(policy); \ 375 - bool _write = (write); \ 376 - \ 377 - if (_write && *__policy) \ 378 - *__policy = find_first_bit((u_long *)__policy, \ 379 - sizeof(u16) * BITS_PER_BYTE);\ 380 - MLX5E_FEC_OVERRIDE_ADMIN_POLICY(buf, *__policy, _write, link); \ 381 - if (!_write && *__policy) \ 382 - *__policy = 1 << *__policy; \ 383 } while (0) 384 385 /* get/set FEC admin field for a given speed */
··· 369 *_policy = MLX5_GET(pplm_reg, _buf, fec_override_admin_##link); \ 370 } while (0) 371 372 + #define MLX5E_FEC_OVERRIDE_ADMIN_50G_POLICY(buf, policy, write, link) \ 373 + do { \ 374 + unsigned long policy_long; \ 375 + u16 *__policy = &(policy); \ 376 + bool _write = (write); \ 377 + \ 378 + policy_long = *__policy; \ 379 + if (_write && *__policy) \ 380 + *__policy = find_first_bit(&policy_long, \ 381 + sizeof(policy_long) * BITS_PER_BYTE);\ 382 + MLX5E_FEC_OVERRIDE_ADMIN_POLICY(buf, *__policy, _write, link); \ 383 + if (!_write && *__policy) \ 384 + *__policy = 1 << *__policy; \ 385 } while (0) 386 387 /* get/set FEC admin field for a given speed */
+28 -13
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
··· 527 struct dim_cq_moder *rx_moder, *tx_moder; 528 struct mlx5_core_dev *mdev = priv->mdev; 529 struct mlx5e_channels new_channels = {}; 530 int err = 0; 531 - bool reset; 532 533 if (!MLX5_CAP_GEN(mdev, cq_moderation)) 534 return -EOPNOTSUPP; ··· 566 } 567 /* we are opened */ 568 569 - reset = (!!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_dim_enabled) || 570 - (!!coal->use_adaptive_tx_coalesce != priv->channels.params.tx_dim_enabled); 571 572 - if (!reset) { 573 mlx5e_set_priv_channels_coalesce(priv, coal); 574 priv->channels.params = new_channels.params; 575 goto out; 576 } 577 578 err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL); ··· 678 static int get_fec_supported_advertised(struct mlx5_core_dev *dev, 679 struct ethtool_link_ksettings *link_ksettings) 680 { 681 - u_long active_fec = 0; 682 u32 bitn; 683 int err; 684 685 - err = mlx5e_get_fec_mode(dev, (u32 *)&active_fec, NULL); 686 if (err) 687 return (err == -EOPNOTSUPP) ? 0 : err; 688 ··· 696 MLX5E_ADVERTISE_SUPPORTED_FEC(MLX5E_FEC_LLRS_272_257_1, 697 ETHTOOL_LINK_MODE_FEC_LLRS_BIT); 698 699 /* active fec is a bit set, find out which bit is set and 700 * advertise the corresponding ethtool bit 701 */ 702 - bitn = find_first_bit(&active_fec, sizeof(u32) * BITS_PER_BYTE); 703 if (bitn < ARRAY_SIZE(pplm_fec_2_ethtool_linkmodes)) 704 __set_bit(pplm_fec_2_ethtool_linkmodes[bitn], 705 link_ksettings->link_modes.advertising); ··· 1532 { 1533 struct mlx5e_priv *priv = netdev_priv(netdev); 1534 struct mlx5_core_dev *mdev = priv->mdev; 1535 - u16 fec_configured = 0; 1536 - u32 fec_active = 0; 1537 int err; 1538 1539 err = mlx5e_get_fec_mode(mdev, &fec_active, &fec_configured); ··· 1541 if (err) 1542 return err; 1543 1544 - fecparam->active_fec = pplm2ethtool_fec((u_long)fec_active, 1545 - sizeof(u32) * BITS_PER_BYTE); 1546 1547 if (!fecparam->active_fec) 1548 return -EOPNOTSUPP; 1549 1550 - fecparam->fec = pplm2ethtool_fec((u_long)fec_configured, 1551 - sizeof(u16) * BITS_PER_BYTE); 1552 1553 return 0; 1554 }
··· 527 struct dim_cq_moder *rx_moder, *tx_moder; 528 struct mlx5_core_dev *mdev = priv->mdev; 529 struct mlx5e_channels new_channels = {}; 530 + bool reset_rx, reset_tx; 531 int err = 0; 532 533 if (!MLX5_CAP_GEN(mdev, cq_moderation)) 534 return -EOPNOTSUPP; ··· 566 } 567 /* we are opened */ 568 569 + reset_rx = !!coal->use_adaptive_rx_coalesce != priv->channels.params.rx_dim_enabled; 570 + reset_tx = !!coal->use_adaptive_tx_coalesce != priv->channels.params.tx_dim_enabled; 571 572 + if (!reset_rx && !reset_tx) { 573 mlx5e_set_priv_channels_coalesce(priv, coal); 574 priv->channels.params = new_channels.params; 575 goto out; 576 + } 577 + 578 + if (reset_rx) { 579 + u8 mode = MLX5E_GET_PFLAG(&new_channels.params, 580 + MLX5E_PFLAG_RX_CQE_BASED_MODER); 581 + 582 + mlx5e_reset_rx_moderation(&new_channels.params, mode); 583 + } 584 + if (reset_tx) { 585 + u8 mode = MLX5E_GET_PFLAG(&new_channels.params, 586 + MLX5E_PFLAG_TX_CQE_BASED_MODER); 587 + 588 + mlx5e_reset_tx_moderation(&new_channels.params, mode); 589 } 590 591 err = mlx5e_safe_switch_channels(priv, &new_channels, NULL, NULL); ··· 665 static int get_fec_supported_advertised(struct mlx5_core_dev *dev, 666 struct ethtool_link_ksettings *link_ksettings) 667 { 668 + unsigned long active_fec_long; 669 + u32 active_fec; 670 u32 bitn; 671 int err; 672 673 + err = mlx5e_get_fec_mode(dev, &active_fec, NULL); 674 if (err) 675 return (err == -EOPNOTSUPP) ? 0 : err; 676 ··· 682 MLX5E_ADVERTISE_SUPPORTED_FEC(MLX5E_FEC_LLRS_272_257_1, 683 ETHTOOL_LINK_MODE_FEC_LLRS_BIT); 684 685 + active_fec_long = active_fec; 686 /* active fec is a bit set, find out which bit is set and 687 * advertise the corresponding ethtool bit 688 */ 689 + bitn = find_first_bit(&active_fec_long, sizeof(active_fec_long) * BITS_PER_BYTE); 690 if (bitn < ARRAY_SIZE(pplm_fec_2_ethtool_linkmodes)) 691 __set_bit(pplm_fec_2_ethtool_linkmodes[bitn], 692 link_ksettings->link_modes.advertising); ··· 1517 { 1518 struct mlx5e_priv *priv = netdev_priv(netdev); 1519 struct mlx5_core_dev *mdev = priv->mdev; 1520 + u16 fec_configured; 1521 + u32 fec_active; 1522 int err; 1523 1524 err = mlx5e_get_fec_mode(mdev, &fec_active, &fec_configured); ··· 1526 if (err) 1527 return err; 1528 1529 + fecparam->active_fec = pplm2ethtool_fec((unsigned long)fec_active, 1530 + sizeof(unsigned long) * BITS_PER_BYTE); 1531 1532 if (!fecparam->active_fec) 1533 return -EOPNOTSUPP; 1534 1535 + fecparam->fec = pplm2ethtool_fec((unsigned long)fec_configured, 1536 + sizeof(unsigned long) * BITS_PER_BYTE); 1537 1538 return 0; 1539 }
+14 -6
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
··· 4716 DIM_CQ_PERIOD_MODE_START_FROM_EQE; 4717 } 4718 4719 - void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) 4720 { 4721 if (params->tx_dim_enabled) { 4722 u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode); ··· 4725 } else { 4726 params->tx_cq_moderation = mlx5e_get_def_tx_moderation(cq_period_mode); 4727 } 4728 - 4729 - MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER, 4730 - params->tx_cq_moderation.cq_period_mode == 4731 - MLX5_CQ_PERIOD_MODE_START_FROM_CQE); 4732 } 4733 4734 - void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) 4735 { 4736 if (params->rx_dim_enabled) { 4737 u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode); ··· 4736 } else { 4737 params->rx_cq_moderation = mlx5e_get_def_rx_moderation(cq_period_mode); 4738 } 4739 4740 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER, 4741 params->rx_cq_moderation.cq_period_mode == 4742 MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
··· 4716 DIM_CQ_PERIOD_MODE_START_FROM_EQE; 4717 } 4718 4719 + void mlx5e_reset_tx_moderation(struct mlx5e_params *params, u8 cq_period_mode) 4720 { 4721 if (params->tx_dim_enabled) { 4722 u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode); ··· 4725 } else { 4726 params->tx_cq_moderation = mlx5e_get_def_tx_moderation(cq_period_mode); 4727 } 4728 } 4729 4730 + void mlx5e_reset_rx_moderation(struct mlx5e_params *params, u8 cq_period_mode) 4731 { 4732 if (params->rx_dim_enabled) { 4733 u8 dim_period_mode = mlx5_to_net_dim_cq_period_mode(cq_period_mode); ··· 4740 } else { 4741 params->rx_cq_moderation = mlx5e_get_def_rx_moderation(cq_period_mode); 4742 } 4743 + } 4744 4745 + void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) 4746 + { 4747 + mlx5e_reset_tx_moderation(params, cq_period_mode); 4748 + MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER, 4749 + params->tx_cq_moderation.cq_period_mode == 4750 + MLX5_CQ_PERIOD_MODE_START_FROM_CQE); 4751 + } 4752 + 4753 + void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode) 4754 + { 4755 + mlx5e_reset_rx_moderation(params, cq_period_mode); 4756 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER, 4757 params->rx_cq_moderation.cq_period_mode == 4758 MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
+4 -8
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
··· 2068 flow_rule_match_meta(rule, &match); 2069 if (match.mask->ingress_ifindex != 0xFFFFFFFF) { 2070 NL_SET_ERR_MSG_MOD(extack, "Unsupported ingress ifindex mask"); 2071 - return -EINVAL; 2072 } 2073 2074 ingress_dev = __dev_get_by_index(dev_net(filter_dev), ··· 2076 if (!ingress_dev) { 2077 NL_SET_ERR_MSG_MOD(extack, 2078 "Can't find the ingress port to match on"); 2079 - return -EINVAL; 2080 } 2081 2082 if (ingress_dev != filter_dev) { 2083 NL_SET_ERR_MSG_MOD(extack, 2084 "Can't match on the ingress filter port"); 2085 - return -EINVAL; 2086 } 2087 2088 return 0; ··· 3849 if (!mlx5e_is_valid_eswitch_fwd_dev(priv, out_dev)) { 3850 NL_SET_ERR_MSG_MOD(extack, 3851 "devices are not on same switch HW, can't offload forwarding"); 3852 - netdev_warn(priv->netdev, 3853 - "devices %s %s not on same switch HW, can't offload forwarding\n", 3854 - priv->netdev->name, 3855 - out_dev->name); 3856 return -EOPNOTSUPP; 3857 } 3858 ··· 4610 dpkts = cur_stats.rx_packets - rpriv->prev_vf_vport_stats.rx_packets; 4611 dbytes = cur_stats.rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes; 4612 rpriv->prev_vf_vport_stats = cur_stats; 4613 - flow_stats_update(&ma->stats, dpkts, dbytes, jiffies, 4614 FLOW_ACTION_HW_STATS_DELAYED); 4615 } 4616
··· 2068 flow_rule_match_meta(rule, &match); 2069 if (match.mask->ingress_ifindex != 0xFFFFFFFF) { 2070 NL_SET_ERR_MSG_MOD(extack, "Unsupported ingress ifindex mask"); 2071 + return -EOPNOTSUPP; 2072 } 2073 2074 ingress_dev = __dev_get_by_index(dev_net(filter_dev), ··· 2076 if (!ingress_dev) { 2077 NL_SET_ERR_MSG_MOD(extack, 2078 "Can't find the ingress port to match on"); 2079 + return -ENOENT; 2080 } 2081 2082 if (ingress_dev != filter_dev) { 2083 NL_SET_ERR_MSG_MOD(extack, 2084 "Can't match on the ingress filter port"); 2085 + return -EOPNOTSUPP; 2086 } 2087 2088 return 0; ··· 3849 if (!mlx5e_is_valid_eswitch_fwd_dev(priv, out_dev)) { 3850 NL_SET_ERR_MSG_MOD(extack, 3851 "devices are not on same switch HW, can't offload forwarding"); 3852 return -EOPNOTSUPP; 3853 } 3854 ··· 4614 dpkts = cur_stats.rx_packets - rpriv->prev_vf_vport_stats.rx_packets; 4615 dbytes = cur_stats.rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes; 4616 rpriv->prev_vf_vport_stats = cur_stats; 4617 + flow_stats_update(&ma->stats, dbytes, dpkts, jiffies, 4618 FLOW_ACTION_HW_STATS_DELAYED); 4619 } 4620
+18
drivers/net/ethernet/mellanox/mlx5/core/main.c
··· 1549 mlx5_pci_disable_device(dev); 1550 } 1551 1552 static const struct pci_device_id mlx5_core_pci_table[] = { 1553 { PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_CONNECTIB) }, 1554 { PCI_VDEVICE(MELLANOX, 0x1012), MLX5_PCI_DEV_IS_VF}, /* Connect-IB VF */ ··· 1608 .id_table = mlx5_core_pci_table, 1609 .probe = init_one, 1610 .remove = remove_one, 1611 .shutdown = shutdown, 1612 .err_handler = &mlx5_err_handler, 1613 .sriov_configure = mlx5_core_sriov_configure,
··· 1549 mlx5_pci_disable_device(dev); 1550 } 1551 1552 + static int mlx5_suspend(struct pci_dev *pdev, pm_message_t state) 1553 + { 1554 + struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 1555 + 1556 + mlx5_unload_one(dev, false); 1557 + 1558 + return 0; 1559 + } 1560 + 1561 + static int mlx5_resume(struct pci_dev *pdev) 1562 + { 1563 + struct mlx5_core_dev *dev = pci_get_drvdata(pdev); 1564 + 1565 + return mlx5_load_one(dev, false); 1566 + } 1567 + 1568 static const struct pci_device_id mlx5_core_pci_table[] = { 1569 { PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_CONNECTIB) }, 1570 { PCI_VDEVICE(MELLANOX, 0x1012), MLX5_PCI_DEV_IS_VF}, /* Connect-IB VF */ ··· 1592 .id_table = mlx5_core_pci_table, 1593 .probe = init_one, 1594 .remove = remove_one, 1595 + .suspend = mlx5_suspend, 1596 + .resume = mlx5_resume, 1597 .shutdown = shutdown, 1598 .err_handler = &mlx5_err_handler, 1599 .sriov_configure = mlx5_core_sriov_configure,
+2 -1
drivers/net/ethernet/netronome/nfp/flower/offload.c
··· 1440 ctx_id = be32_to_cpu(sub_flow->meta.host_ctx_id); 1441 priv->stats[ctx_id].pkts += pkts; 1442 priv->stats[ctx_id].bytes += bytes; 1443 - max_t(u64, priv->stats[ctx_id].used, used); 1444 } 1445 } 1446
··· 1440 ctx_id = be32_to_cpu(sub_flow->meta.host_ctx_id); 1441 priv->stats[ctx_id].pkts += pkts; 1442 priv->stats[ctx_id].bytes += bytes; 1443 + priv->stats[ctx_id].used = max_t(u64, used, 1444 + priv->stats[ctx_id].used); 1445 } 1446 } 1447
+3 -1
drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
··· 3651 ahw->diag_cnt = 0; 3652 ret = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INTRPT_TEST); 3653 if (ret) 3654 - goto fail_diag_irq; 3655 3656 if (adapter->flags & QLCNIC_MSIX_ENABLED) 3657 intrpt_id = ahw->intr_tbl[0].id; ··· 3681 3682 done: 3683 qlcnic_free_mbx_args(&cmd); 3684 qlcnic_83xx_diag_free_res(netdev, drv_sds_rings); 3685 3686 fail_diag_irq:
··· 3651 ahw->diag_cnt = 0; 3652 ret = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_INTRPT_TEST); 3653 if (ret) 3654 + goto fail_mbx_args; 3655 3656 if (adapter->flags & QLCNIC_MSIX_ENABLED) 3657 intrpt_id = ahw->intr_tbl[0].id; ··· 3681 3682 done: 3683 qlcnic_free_mbx_args(&cmd); 3684 + 3685 + fail_mbx_args: 3686 qlcnic_83xx_diag_free_res(netdev, drv_sds_rings); 3687 3688 fail_diag_irq:
+2 -1
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
··· 630 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 631 ptp_v2 = PTP_TCR_TSVER2ENA; 632 snap_type_sel = PTP_TCR_SNAPTYPSEL_1; 633 - ts_event_en = PTP_TCR_TSEVNTENA; 634 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 635 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 636 ptp_over_ethernet = PTP_TCR_TSIPENA;
··· 630 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 631 ptp_v2 = PTP_TCR_TSVER2ENA; 632 snap_type_sel = PTP_TCR_SNAPTYPSEL_1; 633 + if (priv->synopsys_id != DWMAC_CORE_5_10) 634 + ts_event_en = PTP_TCR_TSEVNTENA; 635 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA; 636 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA; 637 ptp_over_ethernet = PTP_TCR_TSIPENA;
+1
drivers/net/usb/qmi_wwan.c
··· 1324 {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */ 1325 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ 1326 {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ 1327 {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */ 1328 {QMI_QUIRK_SET_DTR(0x1bc7, 0x1050, 2)}, /* Telit FN980 */ 1329 {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */
··· 1324 {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)}, /* Alcatel L800MA */ 1325 {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ 1326 {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ 1327 + {QMI_QUIRK_SET_DTR(0x1bc7, 0x1031, 3)}, /* Telit LE910C1-EUX */ 1328 {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */ 1329 {QMI_QUIRK_SET_DTR(0x1bc7, 0x1050, 2)}, /* Telit FN980 */ 1330 {QMI_FIXED_INTF(0x1bc7, 0x1100, 3)}, /* Telit ME910 */
+3 -1
drivers/nfc/st21nfca/dep.c
··· 173 memcpy(atr_res->gbi, atr_req->gbi, gb_len); 174 r = nfc_set_remote_general_bytes(hdev->ndev, atr_res->gbi, 175 gb_len); 176 - if (r < 0) 177 return r; 178 } 179 180 info->dep_info.curr_nfc_dep_pni = 0;
··· 173 memcpy(atr_res->gbi, atr_req->gbi, gb_len); 174 r = nfc_set_remote_general_bytes(hdev->ndev, atr_res->gbi, 175 gb_len); 176 + if (r < 0) { 177 + kfree_skb(skb); 178 return r; 179 + } 180 } 181 182 info->dep_info.curr_nfc_dep_pni = 0;
+1 -1
include/linux/ieee80211.h
··· 2047 } 2048 2049 /* HE Operation defines */ 2050 - #define IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK 0x00000003 2051 #define IEEE80211_HE_OPERATION_TWT_REQUIRED 0x00000008 2052 #define IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK 0x00003ff0 2053 #define IEEE80211_HE_OPERATION_RTS_THRESHOLD_OFFSET 4
··· 2047 } 2048 2049 /* HE Operation defines */ 2050 + #define IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK 0x00000007 2051 #define IEEE80211_HE_OPERATION_TWT_REQUIRED 0x00000008 2052 #define IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK 0x00003ff0 2053 #define IEEE80211_HE_OPERATION_RTS_THRESHOLD_OFFSET 4
+1 -1
include/linux/netfilter/nf_conntrack_pptp.h
··· 10 #include <net/netfilter/nf_conntrack_expect.h> 11 #include <uapi/linux/netfilter/nf_conntrack_tuple_common.h> 12 13 - extern const char *const pptp_msg_name[]; 14 15 /* state of the control session */ 16 enum pptp_ctrlsess_state {
··· 10 #include <net/netfilter/nf_conntrack_expect.h> 11 #include <uapi/linux/netfilter/nf_conntrack_tuple_common.h> 12 13 + const char *pptp_msg_name(u_int16_t msg); 14 15 /* state of the control session */ 16 enum pptp_ctrlsess_state {
+18 -7
include/linux/virtio_net.h
··· 31 { 32 unsigned int gso_type = 0; 33 unsigned int thlen = 0; 34 unsigned int ip_proto; 35 36 if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { ··· 69 if (!skb_partial_csum_set(skb, start, off)) 70 return -EINVAL; 71 72 - if (skb_transport_offset(skb) + thlen > skb_headlen(skb)) 73 return -EINVAL; 74 } else { 75 /* gso packets without NEEDS_CSUM do not set transport_offset. ··· 94 return -EINVAL; 95 } 96 97 - if (keys.control.thoff + thlen > skb_headlen(skb) || 98 keys.basic.ip_proto != ip_proto) 99 return -EINVAL; 100 101 skb_set_transport_header(skb, keys.control.thoff); 102 } 103 } 104 105 if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { 106 u16 gso_size = __virtio16_to_cpu(little_endian, hdr->gso_size); 107 108 - skb_shinfo(skb)->gso_size = gso_size; 109 - skb_shinfo(skb)->gso_type = gso_type; 110 111 - /* Header must be checked, and gso_segs computed. */ 112 - skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; 113 - skb_shinfo(skb)->gso_segs = 0; 114 } 115 116 return 0;
··· 31 { 32 unsigned int gso_type = 0; 33 unsigned int thlen = 0; 34 + unsigned int p_off = 0; 35 unsigned int ip_proto; 36 37 if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { ··· 68 if (!skb_partial_csum_set(skb, start, off)) 69 return -EINVAL; 70 71 + p_off = skb_transport_offset(skb) + thlen; 72 + if (p_off > skb_headlen(skb)) 73 return -EINVAL; 74 } else { 75 /* gso packets without NEEDS_CSUM do not set transport_offset. ··· 92 return -EINVAL; 93 } 94 95 + p_off = keys.control.thoff + thlen; 96 + if (p_off > skb_headlen(skb) || 97 keys.basic.ip_proto != ip_proto) 98 return -EINVAL; 99 100 skb_set_transport_header(skb, keys.control.thoff); 101 + } else if (gso_type) { 102 + p_off = thlen; 103 + if (p_off > skb_headlen(skb)) 104 + return -EINVAL; 105 } 106 } 107 108 if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { 109 u16 gso_size = __virtio16_to_cpu(little_endian, hdr->gso_size); 110 + struct skb_shared_info *shinfo = skb_shinfo(skb); 111 112 + /* Too small packets are not really GSO ones. */ 113 + if (skb->len - p_off > gso_size) { 114 + shinfo->gso_size = gso_size; 115 + shinfo->gso_type = gso_type; 116 117 + /* Header must be checked, and gso_segs computed. */ 118 + shinfo->gso_type |= SKB_GSO_DODGY; 119 + shinfo->gso_segs = 0; 120 + } 121 } 122 123 return 0;
+1
include/net/espintcp.h
··· 25 struct espintcp_msg partial; 26 void (*saved_data_ready)(struct sock *sk); 27 void (*saved_write_space)(struct sock *sk); 28 struct work_struct work; 29 bool tx_running; 30 };
··· 25 struct espintcp_msg partial; 26 void (*saved_data_ready)(struct sock *sk); 27 void (*saved_write_space)(struct sock *sk); 28 + void (*saved_destruct)(struct sock *sk); 29 struct work_struct work; 30 bool tx_running; 31 };
+12
include/net/ip_fib.h
··· 447 #endif 448 int fib_unmerge(struct net *net); 449 450 /* Exported by fib_semantics.c */ 451 int ip_fib_check_default(__be32 gw, struct net_device *dev); 452 int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force); ··· 489 void fib_alias_hw_flags_set(struct net *net, const struct fib_rt_info *fri); 490 void fib_trie_init(void); 491 struct fib_table *fib_trie_table(u32 id, struct fib_table *alias); 492 493 static inline void fib_combine_itag(u32 *itag, const struct fib_result *res) 494 {
··· 447 #endif 448 int fib_unmerge(struct net *net); 449 450 + static inline bool nhc_l3mdev_matches_dev(const struct fib_nh_common *nhc, 451 + const struct net_device *dev) 452 + { 453 + if (nhc->nhc_dev == dev || 454 + l3mdev_master_ifindex_rcu(nhc->nhc_dev) == dev->ifindex) 455 + return true; 456 + 457 + return false; 458 + } 459 + 460 /* Exported by fib_semantics.c */ 461 int ip_fib_check_default(__be32 gw, struct net_device *dev); 462 int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force); ··· 479 void fib_alias_hw_flags_set(struct net *net, const struct fib_rt_info *fri); 480 void fib_trie_init(void); 481 struct fib_table *fib_trie_table(u32 id, struct fib_table *alias); 482 + bool fib_lookup_good_nhc(const struct fib_nh_common *nhc, int fib_flags, 483 + const struct flowi4 *flp); 484 485 static inline void fib_combine_itag(u32 *itag, const struct fib_result *res) 486 {
+84 -16
include/net/nexthop.h
··· 70 }; 71 72 struct nh_group { 73 u16 num_nh; 74 bool mpath; 75 bool has_v4; ··· 137 { 138 unsigned int rc = 1; 139 140 - if (nexthop_is_multipath(nh)) { 141 struct nh_group *nh_grp; 142 143 nh_grp = rcu_dereference_rtnl(nh->nh_grp); 144 - rc = nh_grp->num_nh; 145 } 146 147 return rc; 148 } 149 150 static inline 151 - struct nexthop *nexthop_mpath_select(const struct nexthop *nh, int nhsel) 152 { 153 - const struct nh_group *nhg = rcu_dereference_rtnl(nh->nh_grp); 154 - 155 /* for_nexthops macros in fib_semantics.c grabs a pointer to 156 * the nexthop before checking nhsel 157 */ ··· 185 { 186 const struct nh_info *nhi; 187 188 - if (nexthop_is_multipath(nh)) { 189 - if (nexthop_num_path(nh) > 1) 190 return false; 191 - nh = nexthop_mpath_select(nh, 0); 192 - if (!nh) 193 - return false; 194 } 195 196 nhi = rcu_dereference_rtnl(nh->nh_info); ··· 218 BUILD_BUG_ON(offsetof(struct fib_nh, nh_common) != 0); 219 BUILD_BUG_ON(offsetof(struct fib6_nh, nh_common) != 0); 220 221 - if (nexthop_is_multipath(nh)) { 222 - nh = nexthop_mpath_select(nh, nhsel); 223 - if (!nh) 224 - return NULL; 225 } 226 227 nhi = rcu_dereference_rtnl(nh->nh_info); 228 return &nhi->fib_nhc; 229 } 230 231 static inline unsigned int fib_info_num_path(const struct fib_info *fi) ··· 328 { 329 struct nh_info *nhi; 330 331 - if (nexthop_is_multipath(nh)) { 332 - nh = nexthop_mpath_select(nh, 0); 333 if (!nh) 334 return NULL; 335 }
··· 70 }; 71 72 struct nh_group { 73 + struct nh_group *spare; /* spare group for removals */ 74 u16 num_nh; 75 bool mpath; 76 bool has_v4; ··· 136 { 137 unsigned int rc = 1; 138 139 + if (nh->is_group) { 140 struct nh_group *nh_grp; 141 142 nh_grp = rcu_dereference_rtnl(nh->nh_grp); 143 + if (nh_grp->mpath) 144 + rc = nh_grp->num_nh; 145 } 146 147 return rc; 148 } 149 150 static inline 151 + struct nexthop *nexthop_mpath_select(const struct nh_group *nhg, int nhsel) 152 { 153 /* for_nexthops macros in fib_semantics.c grabs a pointer to 154 * the nexthop before checking nhsel 155 */ ··· 185 { 186 const struct nh_info *nhi; 187 188 + if (nh->is_group) { 189 + struct nh_group *nh_grp; 190 + 191 + nh_grp = rcu_dereference_rtnl(nh->nh_grp); 192 + if (nh_grp->num_nh > 1) 193 return false; 194 + 195 + nh = nh_grp->nh_entries[0].nh; 196 } 197 198 nhi = rcu_dereference_rtnl(nh->nh_info); ··· 216 BUILD_BUG_ON(offsetof(struct fib_nh, nh_common) != 0); 217 BUILD_BUG_ON(offsetof(struct fib6_nh, nh_common) != 0); 218 219 + if (nh->is_group) { 220 + struct nh_group *nh_grp; 221 + 222 + nh_grp = rcu_dereference_rtnl(nh->nh_grp); 223 + if (nh_grp->mpath) { 224 + nh = nexthop_mpath_select(nh_grp, nhsel); 225 + if (!nh) 226 + return NULL; 227 + } 228 } 229 230 nhi = rcu_dereference_rtnl(nh->nh_info); 231 return &nhi->fib_nhc; 232 + } 233 + 234 + /* called from fib_table_lookup with rcu_lock */ 235 + static inline 236 + struct fib_nh_common *nexthop_get_nhc_lookup(const struct nexthop *nh, 237 + int fib_flags, 238 + const struct flowi4 *flp, 239 + int *nhsel) 240 + { 241 + struct nh_info *nhi; 242 + 243 + if (nh->is_group) { 244 + struct nh_group *nhg = rcu_dereference(nh->nh_grp); 245 + int i; 246 + 247 + for (i = 0; i < nhg->num_nh; i++) { 248 + struct nexthop *nhe = nhg->nh_entries[i].nh; 249 + 250 + nhi = rcu_dereference(nhe->nh_info); 251 + if (fib_lookup_good_nhc(&nhi->fib_nhc, fib_flags, flp)) { 252 + *nhsel = i; 253 + return &nhi->fib_nhc; 254 + } 255 + } 256 + } else { 257 + nhi = rcu_dereference(nh->nh_info); 258 + if (fib_lookup_good_nhc(&nhi->fib_nhc, fib_flags, flp)) { 259 + *nhsel = 0; 260 + return &nhi->fib_nhc; 261 + } 262 + } 263 + 264 + return NULL; 265 + } 266 + 267 + static inline bool nexthop_uses_dev(const struct nexthop *nh, 268 + const struct net_device *dev) 269 + { 270 + struct nh_info *nhi; 271 + 272 + if (nh->is_group) { 273 + struct nh_group *nhg = rcu_dereference(nh->nh_grp); 274 + int i; 275 + 276 + for (i = 0; i < nhg->num_nh; i++) { 277 + struct nexthop *nhe = nhg->nh_entries[i].nh; 278 + 279 + nhi = rcu_dereference(nhe->nh_info); 280 + if (nhc_l3mdev_matches_dev(&nhi->fib_nhc, dev)) 281 + return true; 282 + } 283 + } else { 284 + nhi = rcu_dereference(nh->nh_info); 285 + if (nhc_l3mdev_matches_dev(&nhi->fib_nhc, dev)) 286 + return true; 287 + } 288 + 289 + return false; 290 } 291 292 static inline unsigned int fib_info_num_path(const struct fib_info *fi) ··· 263 { 264 struct nh_info *nhi; 265 266 + if (nh->is_group) { 267 + struct nh_group *nh_grp; 268 + 269 + nh_grp = rcu_dereference_rtnl(nh->nh_grp); 270 + nh = nexthop_mpath_select(nh_grp, 0); 271 if (!nh) 272 return NULL; 273 }
+4
include/net/tls.h
··· 135 struct tls_rec *open_rec; 136 struct list_head tx_list; 137 atomic_t encrypt_pending; 138 int async_notify; 139 u8 async_capable:1; 140 ··· 157 u8 async_capable:1; 158 u8 decrypted:1; 159 atomic_t decrypt_pending; 160 bool async_notify; 161 }; 162
··· 135 struct tls_rec *open_rec; 136 struct list_head tx_list; 137 atomic_t encrypt_pending; 138 + /* protect crypto_wait with encrypt_pending */ 139 + spinlock_t encrypt_compl_lock; 140 int async_notify; 141 u8 async_capable:1; 142 ··· 155 u8 async_capable:1; 156 u8 decrypted:1; 157 atomic_t decrypt_pending; 158 + /* protect crypto_wait with decrypt_pending*/ 159 + spinlock_t decrypt_compl_lock; 160 bool async_notify; 161 }; 162
+1 -1
include/uapi/linux/xfrm.h
··· 304 XFRMA_PROTO, /* __u8 */ 305 XFRMA_ADDRESS_FILTER, /* struct xfrm_address_filter */ 306 XFRMA_PAD, 307 - XFRMA_OFFLOAD_DEV, /* struct xfrm_state_offload */ 308 XFRMA_SET_MARK, /* __u32 */ 309 XFRMA_SET_MARK_MASK, /* __u32 */ 310 XFRMA_IF_ID, /* __u32 */
··· 304 XFRMA_PROTO, /* __u8 */ 305 XFRMA_ADDRESS_FILTER, /* struct xfrm_address_filter */ 306 XFRMA_PAD, 307 + XFRMA_OFFLOAD_DEV, /* struct xfrm_user_offload */ 308 XFRMA_SET_MARK, /* __u32 */ 309 XFRMA_SET_MARK_MASK, /* __u32 */ 310 XFRMA_IF_ID, /* __u32 */
+16 -18
kernel/bpf/verifier.c
··· 1168 * but must be positive otherwise set to worse case bounds 1169 * and refine later from tnum. 1170 */ 1171 - if (reg->s32_min_value > 0) 1172 - reg->smin_value = reg->s32_min_value; 1173 - else 1174 - reg->smin_value = 0; 1175 - if (reg->s32_max_value > 0) 1176 reg->smax_value = reg->s32_max_value; 1177 else 1178 reg->smax_value = U32_MAX; 1179 } 1180 1181 static void __reg_combine_32_into_64(struct bpf_reg_state *reg) ··· 10428 } 10429 #define SECURITY_PREFIX "security_" 10430 10431 - static int check_attach_modify_return(struct bpf_verifier_env *env) 10432 { 10433 - struct bpf_prog *prog = env->prog; 10434 - unsigned long addr = (unsigned long) prog->aux->trampoline->func.addr; 10435 - 10436 - /* This is expected to be cleaned up in the future with the KRSI effort 10437 - * introducing the LSM_HOOK macro for cleaning up lsm_hooks.h. 10438 - */ 10439 if (within_error_injection_list(addr) || 10440 !strncmp(SECURITY_PREFIX, prog->aux->attach_func_name, 10441 sizeof(SECURITY_PREFIX) - 1)) 10442 return 0; 10443 - 10444 - verbose(env, "fmod_ret attach_btf_id %u (%s) is not modifiable\n", 10445 - prog->aux->attach_btf_id, prog->aux->attach_func_name); 10446 10447 return -EINVAL; 10448 } ··· 10645 goto out; 10646 } 10647 } 10648 tr->func.addr = (void *)addr; 10649 prog->aux->trampoline = tr; 10650 - 10651 - if (prog->expected_attach_type == BPF_MODIFY_RETURN) 10652 - ret = check_attach_modify_return(env); 10653 out: 10654 mutex_unlock(&tr->mutex); 10655 if (ret)
··· 1168 * but must be positive otherwise set to worse case bounds 1169 * and refine later from tnum. 1170 */ 1171 + if (reg->s32_min_value >= 0 && reg->s32_max_value >= 0) 1172 reg->smax_value = reg->s32_max_value; 1173 else 1174 reg->smax_value = U32_MAX; 1175 + if (reg->s32_min_value >= 0) 1176 + reg->smin_value = reg->s32_min_value; 1177 + else 1178 + reg->smin_value = 0; 1179 } 1180 1181 static void __reg_combine_32_into_64(struct bpf_reg_state *reg) ··· 10428 } 10429 #define SECURITY_PREFIX "security_" 10430 10431 + static int check_attach_modify_return(struct bpf_prog *prog, unsigned long addr) 10432 { 10433 if (within_error_injection_list(addr) || 10434 !strncmp(SECURITY_PREFIX, prog->aux->attach_func_name, 10435 sizeof(SECURITY_PREFIX) - 1)) 10436 return 0; 10437 10438 return -EINVAL; 10439 } ··· 10654 goto out; 10655 } 10656 } 10657 + 10658 + if (prog->expected_attach_type == BPF_MODIFY_RETURN) { 10659 + ret = check_attach_modify_return(prog, addr); 10660 + if (ret) 10661 + verbose(env, "%s() is not modifiable\n", 10662 + prog->aux->attach_func_name); 10663 + } 10664 + 10665 + if (ret) 10666 + goto out; 10667 tr->func.addr = (void *)addr; 10668 prog->aux->trampoline = tr; 10669 out: 10670 mutex_unlock(&tr->mutex); 10671 if (ret)
+2 -1
net/bridge/br_multicast.c
··· 2413 free_percpu(br->mcast_stats); 2414 } 2415 2416 - static void mcast_stats_add_dir(u64 *dst, u64 *src) 2417 { 2418 dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX]; 2419 dst[BR_MCAST_DIR_TX] += src[BR_MCAST_DIR_TX];
··· 2413 free_percpu(br->mcast_stats); 2414 } 2415 2416 + /* noinline for https://bugs.llvm.org/show_bug.cgi?id=45802#c9 */ 2417 + static noinline_for_stack void mcast_stats_add_dir(u64 *dst, u64 *src) 2418 { 2419 dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX]; 2420 dst[BR_MCAST_DIR_TX] += src[BR_MCAST_DIR_TX];
+6
net/bridge/netfilter/nft_reject_bridge.c
··· 31 ether_addr_copy(eth->h_dest, eth_hdr(oldskb)->h_source); 32 eth->h_proto = eth_hdr(oldskb)->h_proto; 33 skb_pull(nskb, ETH_HLEN); 34 } 35 36 static int nft_bridge_iphdr_validate(struct sk_buff *skb)
··· 31 ether_addr_copy(eth->h_dest, eth_hdr(oldskb)->h_source); 32 eth->h_proto = eth_hdr(oldskb)->h_proto; 33 skb_pull(nskb, ETH_HLEN); 34 + 35 + if (skb_vlan_tag_present(oldskb)) { 36 + u16 vid = skb_vlan_tag_get(oldskb); 37 + 38 + __vlan_hwaccel_put_tag(nskb, oldskb->vlan_proto, vid); 39 + } 40 } 41 42 static int nft_bridge_iphdr_validate(struct sk_buff *skb)
+2 -2
net/core/neighbour.c
··· 1082 } 1083 1084 if (neigh->nud_state & NUD_IN_TIMER) { 1085 - if (time_before(next, jiffies + HZ/2)) 1086 - next = jiffies + HZ/2; 1087 if (!mod_timer(&neigh->timer, next)) 1088 neigh_hold(neigh); 1089 }
··· 1082 } 1083 1084 if (neigh->nud_state & NUD_IN_TIMER) { 1085 + if (time_before(next, jiffies + HZ/100)) 1086 + next = jiffies + HZ/100; 1087 if (!mod_timer(&neigh->timer, next)) 1088 neigh_hold(neigh); 1089 }
+1
net/dsa/slave.c
··· 1736 if (ds->ops->port_vlan_add && ds->ops->port_vlan_del) 1737 slave_dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 1738 slave_dev->hw_features |= NETIF_F_HW_TC; 1739 slave_dev->ethtool_ops = &dsa_slave_ethtool_ops; 1740 if (!IS_ERR_OR_NULL(port->mac)) 1741 ether_addr_copy(slave_dev->dev_addr, port->mac);
··· 1736 if (ds->ops->port_vlan_add && ds->ops->port_vlan_del) 1737 slave_dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; 1738 slave_dev->hw_features |= NETIF_F_HW_TC; 1739 + slave_dev->features |= NETIF_F_LLTX; 1740 slave_dev->ethtool_ops = &dsa_slave_ethtool_ops; 1741 if (!IS_ERR_OR_NULL(port->mac)) 1742 ether_addr_copy(slave_dev->dev_addr, port->mac);
+1
net/ipv4/devinet.c
··· 276 err = devinet_sysctl_register(in_dev); 277 if (err) { 278 in_dev->dead = 1; 279 in_dev_put(in_dev); 280 in_dev = NULL; 281 goto out;
··· 276 err = devinet_sysctl_register(in_dev); 277 if (err) { 278 in_dev->dead = 1; 279 + neigh_parms_release(&arp_tbl, in_dev->arp_parms); 280 in_dev_put(in_dev); 281 in_dev = NULL; 282 goto out;
+18 -12
net/ipv4/esp4_offload.c
··· 63 sp->olen++; 64 65 xo = xfrm_offload(skb); 66 - if (!xo) { 67 - xfrm_state_put(x); 68 goto out_reset; 69 - } 70 } 71 72 xo->flags |= XFRM_GRO; ··· 137 struct xfrm_offload *xo = xfrm_offload(skb); 138 struct sk_buff *segs = ERR_PTR(-EINVAL); 139 const struct net_offload *ops; 140 - int proto = xo->proto; 141 142 skb->transport_header += x->props.header_len; 143 144 - if (proto == IPPROTO_BEETPH) { 145 - struct ip_beet_phdr *ph = (struct ip_beet_phdr *)skb->data; 146 147 - skb->transport_header += ph->hdrlen * 8; 148 - proto = ph->nexthdr; 149 - } else if (x->sel.family != AF_INET6) { 150 - skb->transport_header -= IPV4_BEET_PHMAXLEN; 151 - } else if (proto == IPPROTO_TCP) { 152 - skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4; 153 } 154 155 __skb_pull(skb, skb_transport_offset(skb));
··· 63 sp->olen++; 64 65 xo = xfrm_offload(skb); 66 + if (!xo) 67 goto out_reset; 68 } 69 70 xo->flags |= XFRM_GRO; ··· 139 struct xfrm_offload *xo = xfrm_offload(skb); 140 struct sk_buff *segs = ERR_PTR(-EINVAL); 141 const struct net_offload *ops; 142 + u8 proto = xo->proto; 143 144 skb->transport_header += x->props.header_len; 145 146 + if (x->sel.family != AF_INET6) { 147 + if (proto == IPPROTO_BEETPH) { 148 + struct ip_beet_phdr *ph = 149 + (struct ip_beet_phdr *)skb->data; 150 151 + skb->transport_header += ph->hdrlen * 8; 152 + proto = ph->nexthdr; 153 + } else { 154 + skb->transport_header -= IPV4_BEET_PHMAXLEN; 155 + } 156 + } else { 157 + __be16 frag; 158 + 159 + skb->transport_header += 160 + ipv6_skip_exthdr(skb, 0, &proto, &frag); 161 + if (proto == IPPROTO_TCP) 162 + skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4; 163 } 164 165 __skb_pull(skb, skb_transport_offset(skb));
+10 -9
net/ipv4/fib_frontend.c
··· 309 { 310 bool dev_match = false; 311 #ifdef CONFIG_IP_ROUTE_MULTIPATH 312 - int ret; 313 314 - for (ret = 0; ret < fib_info_num_path(fi); ret++) { 315 - const struct fib_nh_common *nhc = fib_info_nhc(fi, ret); 316 317 - if (nhc->nhc_dev == dev) { 318 - dev_match = true; 319 - break; 320 - } else if (l3mdev_master_ifindex_rcu(nhc->nhc_dev) == dev->ifindex) { 321 - dev_match = true; 322 - break; 323 } 324 } 325 #else
··· 309 { 310 bool dev_match = false; 311 #ifdef CONFIG_IP_ROUTE_MULTIPATH 312 + if (unlikely(fi->nh)) { 313 + dev_match = nexthop_uses_dev(fi->nh, dev); 314 + } else { 315 + int ret; 316 317 + for (ret = 0; ret < fib_info_num_path(fi); ret++) { 318 + const struct fib_nh_common *nhc = fib_info_nhc(fi, ret); 319 320 + if (nhc_l3mdev_matches_dev(nhc, dev)) { 321 + dev_match = true; 322 + break; 323 + } 324 } 325 } 326 #else
+36 -15
net/ipv4/fib_trie.c
··· 1371 return (key ^ prefix) & (prefix | -prefix); 1372 } 1373 1374 /* should be called with rcu_read_lock */ 1375 int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp, 1376 struct fib_result *res, int fib_flags) ··· 1523 /* Step 3: Process the leaf, if that fails fall back to backtracing */ 1524 hlist_for_each_entry_rcu(fa, &n->leaf, fa_list) { 1525 struct fib_info *fi = fa->fa_info; 1526 int nhsel, err; 1527 1528 if ((BITS_PER_LONG > KEYLENGTH) || (fa->fa_slen < KEYLENGTH)) { ··· 1549 if (fi->fib_flags & RTNH_F_DEAD) 1550 continue; 1551 1552 - if (unlikely(fi->nh && nexthop_is_blackhole(fi->nh))) { 1553 - err = fib_props[RTN_BLACKHOLE].error; 1554 - goto out_reject; 1555 } 1556 1557 for (nhsel = 0; nhsel < fib_info_num_path(fi); nhsel++) { 1558 - struct fib_nh_common *nhc = fib_info_nhc(fi, nhsel); 1559 1560 - if (nhc->nhc_flags & RTNH_F_DEAD) 1561 continue; 1562 - if (ip_ignore_linkdown(nhc->nhc_dev) && 1563 - nhc->nhc_flags & RTNH_F_LINKDOWN && 1564 - !(fib_flags & FIB_LOOKUP_IGNORE_LINKSTATE)) 1565 - continue; 1566 - if (!(flp->flowi4_flags & FLOWI_FLAG_SKIP_NH_OIF)) { 1567 - if (flp->flowi4_oif && 1568 - flp->flowi4_oif != nhc->nhc_oif) 1569 - continue; 1570 - } 1571 - 1572 if (!(fib_flags & FIB_LOOKUP_NOREF)) 1573 refcount_inc(&fi->fib_clntref); 1574 ··· 1588 return err; 1589 } 1590 } 1591 #ifdef CONFIG_IP_FIB_TRIE_STATS 1592 this_cpu_inc(stats->semantic_match_miss); 1593 #endif
··· 1371 return (key ^ prefix) & (prefix | -prefix); 1372 } 1373 1374 + bool fib_lookup_good_nhc(const struct fib_nh_common *nhc, int fib_flags, 1375 + const struct flowi4 *flp) 1376 + { 1377 + if (nhc->nhc_flags & RTNH_F_DEAD) 1378 + return false; 1379 + 1380 + if (ip_ignore_linkdown(nhc->nhc_dev) && 1381 + nhc->nhc_flags & RTNH_F_LINKDOWN && 1382 + !(fib_flags & FIB_LOOKUP_IGNORE_LINKSTATE)) 1383 + return false; 1384 + 1385 + if (!(flp->flowi4_flags & FLOWI_FLAG_SKIP_NH_OIF)) { 1386 + if (flp->flowi4_oif && 1387 + flp->flowi4_oif != nhc->nhc_oif) 1388 + return false; 1389 + } 1390 + 1391 + return true; 1392 + } 1393 + 1394 /* should be called with rcu_read_lock */ 1395 int fib_table_lookup(struct fib_table *tb, const struct flowi4 *flp, 1396 struct fib_result *res, int fib_flags) ··· 1503 /* Step 3: Process the leaf, if that fails fall back to backtracing */ 1504 hlist_for_each_entry_rcu(fa, &n->leaf, fa_list) { 1505 struct fib_info *fi = fa->fa_info; 1506 + struct fib_nh_common *nhc; 1507 int nhsel, err; 1508 1509 if ((BITS_PER_LONG > KEYLENGTH) || (fa->fa_slen < KEYLENGTH)) { ··· 1528 if (fi->fib_flags & RTNH_F_DEAD) 1529 continue; 1530 1531 + if (unlikely(fi->nh)) { 1532 + if (nexthop_is_blackhole(fi->nh)) { 1533 + err = fib_props[RTN_BLACKHOLE].error; 1534 + goto out_reject; 1535 + } 1536 + 1537 + nhc = nexthop_get_nhc_lookup(fi->nh, fib_flags, flp, 1538 + &nhsel); 1539 + if (nhc) 1540 + goto set_result; 1541 + goto miss; 1542 } 1543 1544 for (nhsel = 0; nhsel < fib_info_num_path(fi); nhsel++) { 1545 + nhc = fib_info_nhc(fi, nhsel); 1546 1547 + if (!fib_lookup_good_nhc(nhc, fib_flags, flp)) 1548 continue; 1549 + set_result: 1550 if (!(fib_flags & FIB_LOOKUP_NOREF)) 1551 refcount_inc(&fi->fib_clntref); 1552 ··· 1568 return err; 1569 } 1570 } 1571 + miss: 1572 #ifdef CONFIG_IP_FIB_TRIE_STATS 1573 this_cpu_inc(stats->semantic_match_miss); 1574 #endif
+22 -1
net/ipv4/ip_vti.c
··· 93 94 static int vti_rcv_tunnel(struct sk_buff *skb) 95 { 96 - return vti_rcv(skb, ip_hdr(skb)->saddr, true); 97 } 98 99 static int vti_rcv_cb(struct sk_buff *skb, int err)
··· 93 94 static int vti_rcv_tunnel(struct sk_buff *skb) 95 { 96 + struct ip_tunnel_net *itn = net_generic(dev_net(skb->dev), vti_net_id); 97 + const struct iphdr *iph = ip_hdr(skb); 98 + struct ip_tunnel *tunnel; 99 + 100 + tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY, 101 + iph->saddr, iph->daddr, 0); 102 + if (tunnel) { 103 + struct tnl_ptk_info tpi = { 104 + .proto = htons(ETH_P_IP), 105 + }; 106 + 107 + if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) 108 + goto drop; 109 + if (iptunnel_pull_header(skb, 0, tpi.proto, false)) 110 + goto drop; 111 + return ip_tunnel_rcv(tunnel, skb, &tpi, NULL, false); 112 + } 113 + 114 + return -EINVAL; 115 + drop: 116 + kfree_skb(skb); 117 + return 0; 118 } 119 120 static int vti_rcv_cb(struct sk_buff *skb, int err)
+2 -5
net/ipv4/netfilter/nf_nat_pptp.c
··· 166 break; 167 default: 168 pr_debug("unknown outbound packet 0x%04x:%s\n", msg, 169 - msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] : 170 - pptp_msg_name[0]); 171 fallthrough; 172 case PPTP_SET_LINK_INFO: 173 /* only need to NAT in case PAC is behind NAT box */ ··· 267 pcid_off = offsetof(union pptp_ctrl_union, setlink.peersCallID); 268 break; 269 default: 270 - pr_debug("unknown inbound packet %s\n", 271 - msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] : 272 - pptp_msg_name[0]); 273 fallthrough; 274 case PPTP_START_SESSION_REQUEST: 275 case PPTP_START_SESSION_REPLY:
··· 166 break; 167 default: 168 pr_debug("unknown outbound packet 0x%04x:%s\n", msg, 169 + pptp_msg_name(msg)); 170 fallthrough; 171 case PPTP_SET_LINK_INFO: 172 /* only need to NAT in case PAC is behind NAT box */ ··· 268 pcid_off = offsetof(union pptp_ctrl_union, setlink.peersCallID); 269 break; 270 default: 271 + pr_debug("unknown inbound packet %s\n", pptp_msg_name(msg)); 272 fallthrough; 273 case PPTP_START_SESSION_REQUEST: 274 case PPTP_START_SESSION_REPLY:
+63 -39
net/ipv4/nexthop.c
··· 63 int i; 64 65 nhg = rcu_dereference_raw(nh->nh_grp); 66 - for (i = 0; i < nhg->num_nh; ++i) 67 - WARN_ON(nhg->nh_entries[i].nh); 68 69 kfree(nhg); 70 } 71 ··· 701 } 702 } 703 704 - static void remove_nh_grp_entry(struct nh_grp_entry *nhge, 705 - struct nh_group *nhg, 706 struct nl_info *nlinfo) 707 { 708 struct nexthop *nh = nhge->nh; 709 - struct nh_grp_entry *nhges; 710 - bool found = false; 711 - int i; 712 713 WARN_ON(!nh); 714 715 - nhges = nhg->nh_entries; 716 - for (i = 0; i < nhg->num_nh; ++i) { 717 - if (found) { 718 - nhges[i-1].nh = nhges[i].nh; 719 - nhges[i-1].weight = nhges[i].weight; 720 - list_del(&nhges[i].nh_list); 721 - list_add(&nhges[i-1].nh_list, &nhges[i-1].nh->grp_list); 722 - } else if (nhg->nh_entries[i].nh == nh) { 723 - found = true; 724 - } 725 } 726 727 - if (WARN_ON(!found)) 728 - return; 729 730 - nhg->num_nh--; 731 - nhg->nh_entries[nhg->num_nh].nh = NULL; 732 733 - nh_group_rebalance(nhg); 734 735 - nexthop_put(nh); 736 737 if (nlinfo) 738 - nexthop_notify(RTM_NEWNEXTHOP, nhge->nh_parent, nlinfo); 739 } 740 741 static void remove_nexthop_from_groups(struct net *net, struct nexthop *nh, ··· 758 { 759 struct nh_grp_entry *nhge, *tmp; 760 761 - list_for_each_entry_safe(nhge, tmp, &nh->grp_list, nh_list) { 762 - struct nh_group *nhg; 763 764 - list_del(&nhge->nh_list); 765 - nhg = rtnl_dereference(nhge->nh_parent->nh_grp); 766 - remove_nh_grp_entry(nhge, nhg, nlinfo); 767 - 768 - /* if this group has no more entries then remove it */ 769 - if (!nhg->num_nh) 770 - remove_nexthop(net, nhge->nh_parent, nlinfo); 771 - } 772 } 773 774 static void remove_nexthop_group(struct nexthop *nh, struct nl_info *nlinfo) ··· 776 if (WARN_ON(!nhge->nh)) 777 continue; 778 779 - list_del(&nhge->nh_list); 780 - nexthop_put(nhge->nh); 781 - nhge->nh = NULL; 782 - nhg->num_nh--; 783 } 784 } 785 ··· 1099 { 1100 struct nlattr *grps_attr = cfg->nh_grp; 1101 struct nexthop_grp *entry = nla_data(grps_attr); 1102 struct nh_group *nhg; 1103 struct nexthop *nh; 1104 int i; ··· 1110 1111 nh->is_group = 1; 1112 1113 - nhg = nexthop_grp_alloc(nla_len(grps_attr) / sizeof(*entry)); 1114 if (!nhg) { 1115 kfree(nh); 1116 return ERR_PTR(-ENOMEM); 1117 } 1118 1119 for (i = 0; i < nhg->num_nh; ++i) { 1120 struct nexthop *nhe; ··· 1156 for (; i >= 0; --i) 1157 nexthop_put(nhg->nh_entries[i].nh); 1158 1159 kfree(nhg); 1160 kfree(nh); 1161
··· 63 int i; 64 65 nhg = rcu_dereference_raw(nh->nh_grp); 66 + for (i = 0; i < nhg->num_nh; ++i) { 67 + struct nh_grp_entry *nhge = &nhg->nh_entries[i]; 68 69 + WARN_ON(!list_empty(&nhge->nh_list)); 70 + nexthop_put(nhge->nh); 71 + } 72 + 73 + WARN_ON(nhg->spare == nhg); 74 + 75 + kfree(nhg->spare); 76 kfree(nhg); 77 } 78 ··· 694 } 695 } 696 697 + static void remove_nh_grp_entry(struct net *net, struct nh_grp_entry *nhge, 698 struct nl_info *nlinfo) 699 { 700 + struct nh_grp_entry *nhges, *new_nhges; 701 + struct nexthop *nhp = nhge->nh_parent; 702 struct nexthop *nh = nhge->nh; 703 + struct nh_group *nhg, *newg; 704 + int i, j; 705 706 WARN_ON(!nh); 707 708 + nhg = rtnl_dereference(nhp->nh_grp); 709 + newg = nhg->spare; 710 + 711 + /* last entry, keep it visible and remove the parent */ 712 + if (nhg->num_nh == 1) { 713 + remove_nexthop(net, nhp, nlinfo); 714 + return; 715 } 716 717 + newg->has_v4 = nhg->has_v4; 718 + newg->mpath = nhg->mpath; 719 + newg->num_nh = nhg->num_nh; 720 721 + /* copy old entries to new except the one getting removed */ 722 + nhges = nhg->nh_entries; 723 + new_nhges = newg->nh_entries; 724 + for (i = 0, j = 0; i < nhg->num_nh; ++i) { 725 + /* current nexthop getting removed */ 726 + if (nhg->nh_entries[i].nh == nh) { 727 + newg->num_nh--; 728 + continue; 729 + } 730 731 + list_del(&nhges[i].nh_list); 732 + new_nhges[j].nh_parent = nhges[i].nh_parent; 733 + new_nhges[j].nh = nhges[i].nh; 734 + new_nhges[j].weight = nhges[i].weight; 735 + list_add(&new_nhges[j].nh_list, &new_nhges[j].nh->grp_list); 736 + j++; 737 + } 738 739 + nh_group_rebalance(newg); 740 + rcu_assign_pointer(nhp->nh_grp, newg); 741 + 742 + list_del(&nhge->nh_list); 743 + nexthop_put(nhge->nh); 744 745 if (nlinfo) 746 + nexthop_notify(RTM_NEWNEXTHOP, nhp, nlinfo); 747 } 748 749 static void remove_nexthop_from_groups(struct net *net, struct nexthop *nh, ··· 736 { 737 struct nh_grp_entry *nhge, *tmp; 738 739 + list_for_each_entry_safe(nhge, tmp, &nh->grp_list, nh_list) 740 + remove_nh_grp_entry(net, nhge, nlinfo); 741 742 + /* make sure all see the newly published array before releasing rtnl */ 743 + synchronize_rcu(); 744 } 745 746 static void remove_nexthop_group(struct nexthop *nh, struct nl_info *nlinfo) ··· 760 if (WARN_ON(!nhge->nh)) 761 continue; 762 763 + list_del_init(&nhge->nh_list); 764 } 765 } 766 ··· 1086 { 1087 struct nlattr *grps_attr = cfg->nh_grp; 1088 struct nexthop_grp *entry = nla_data(grps_attr); 1089 + u16 num_nh = nla_len(grps_attr) / sizeof(*entry); 1090 struct nh_group *nhg; 1091 struct nexthop *nh; 1092 int i; ··· 1096 1097 nh->is_group = 1; 1098 1099 + nhg = nexthop_grp_alloc(num_nh); 1100 if (!nhg) { 1101 kfree(nh); 1102 return ERR_PTR(-ENOMEM); 1103 } 1104 + 1105 + /* spare group used for removals */ 1106 + nhg->spare = nexthop_grp_alloc(num_nh); 1107 + if (!nhg) { 1108 + kfree(nhg); 1109 + kfree(nh); 1110 + return NULL; 1111 + } 1112 + nhg->spare->spare = nhg; 1113 1114 for (i = 0; i < nhg->num_nh; ++i) { 1115 struct nexthop *nhe; ··· 1133 for (; i >= 0; --i) 1134 nexthop_put(nhg->nh_entries[i].nh); 1135 1136 + kfree(nhg->spare); 1137 kfree(nhg); 1138 kfree(nh); 1139
+25 -12
net/ipv6/esp6_offload.c
··· 85 sp->olen++; 86 87 xo = xfrm_offload(skb); 88 - if (!xo) { 89 - xfrm_state_put(x); 90 goto out_reset; 91 - } 92 } 93 94 xo->flags |= XFRM_GRO; ··· 121 struct ip_esp_hdr *esph; 122 struct ipv6hdr *iph = ipv6_hdr(skb); 123 struct xfrm_offload *xo = xfrm_offload(skb); 124 - int proto = iph->nexthdr; 125 126 skb_push(skb, -skb_network_offset(skb)); 127 esph = ip_esp_hdr(skb); 128 *skb_mac_header(skb) = IPPROTO_ESP; 129 ··· 171 struct xfrm_offload *xo = xfrm_offload(skb); 172 struct sk_buff *segs = ERR_PTR(-EINVAL); 173 const struct net_offload *ops; 174 - int proto = xo->proto; 175 176 skb->transport_header += x->props.header_len; 177 - 178 - if (proto == IPPROTO_BEETPH) { 179 - struct ip_beet_phdr *ph = (struct ip_beet_phdr *)skb->data; 180 - 181 - skb->transport_header += ph->hdrlen * 8; 182 - proto = ph->nexthdr; 183 - } 184 185 if (x->sel.family != AF_INET6) { 186 skb->transport_header -= 187 (sizeof(struct ipv6hdr) - sizeof(struct iphdr)); 188 189 if (proto == IPPROTO_TCP) 190 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6; 191 } 192 193 __skb_pull(skb, skb_transport_offset(skb));
··· 85 sp->olen++; 86 87 xo = xfrm_offload(skb); 88 + if (!xo) 89 goto out_reset; 90 } 91 92 xo->flags |= XFRM_GRO; ··· 123 struct ip_esp_hdr *esph; 124 struct ipv6hdr *iph = ipv6_hdr(skb); 125 struct xfrm_offload *xo = xfrm_offload(skb); 126 + u8 proto = iph->nexthdr; 127 128 skb_push(skb, -skb_network_offset(skb)); 129 + 130 + if (x->outer_mode.encap == XFRM_MODE_TRANSPORT) { 131 + __be16 frag; 132 + 133 + ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &proto, &frag); 134 + } 135 + 136 esph = ip_esp_hdr(skb); 137 *skb_mac_header(skb) = IPPROTO_ESP; 138 ··· 166 struct xfrm_offload *xo = xfrm_offload(skb); 167 struct sk_buff *segs = ERR_PTR(-EINVAL); 168 const struct net_offload *ops; 169 + u8 proto = xo->proto; 170 171 skb->transport_header += x->props.header_len; 172 173 if (x->sel.family != AF_INET6) { 174 skb->transport_header -= 175 (sizeof(struct ipv6hdr) - sizeof(struct iphdr)); 176 177 + if (proto == IPPROTO_BEETPH) { 178 + struct ip_beet_phdr *ph = 179 + (struct ip_beet_phdr *)skb->data; 180 + 181 + skb->transport_header += ph->hdrlen * 8; 182 + proto = ph->nexthdr; 183 + } else { 184 + skb->transport_header -= IPV4_BEET_PHMAXLEN; 185 + } 186 + 187 if (proto == IPPROTO_TCP) 188 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6; 189 + } else { 190 + __be16 frag; 191 + 192 + skb->transport_header += 193 + ipv6_skip_exthdr(skb, 0, &proto, &frag); 194 } 195 196 __skb_pull(skb, skb_transport_offset(skb));
+3
net/l2tp/l2tp_core.c
··· 1458 if (sk->sk_type != SOCK_DGRAM) 1459 return -EPROTONOSUPPORT; 1460 1461 if ((encap == L2TP_ENCAPTYPE_UDP && sk->sk_protocol != IPPROTO_UDP) || 1462 (encap == L2TP_ENCAPTYPE_IP && sk->sk_protocol != IPPROTO_L2TP)) 1463 return -EPROTONOSUPPORT;
··· 1458 if (sk->sk_type != SOCK_DGRAM) 1459 return -EPROTONOSUPPORT; 1460 1461 + if (sk->sk_family != PF_INET && sk->sk_family != PF_INET6) 1462 + return -EPROTONOSUPPORT; 1463 + 1464 if ((encap == L2TP_ENCAPTYPE_UDP && sk->sk_protocol != IPPROTO_UDP) || 1465 (encap == L2TP_ENCAPTYPE_IP && sk->sk_protocol != IPPROTO_L2TP)) 1466 return -EPROTONOSUPPORT;
+22 -7
net/l2tp/l2tp_ip.c
··· 20 #include <net/icmp.h> 21 #include <net/udp.h> 22 #include <net/inet_common.h> 23 - #include <net/inet_hashtables.h> 24 #include <net/tcp_states.h> 25 #include <net/protocol.h> 26 #include <net/xfrm.h> ··· 208 return 0; 209 } 210 211 static int l2tp_ip_open(struct sock *sk) 212 { 213 /* Prevent autobind. We don't have ports. */ 214 inet_sk(sk)->inet_num = IPPROTO_L2TP; 215 216 - write_lock_bh(&l2tp_ip_lock); 217 - sk_add_node(sk, &l2tp_ip_table); 218 - write_unlock_bh(&l2tp_ip_lock); 219 - 220 return 0; 221 } 222 ··· 609 .sendmsg = l2tp_ip_sendmsg, 610 .recvmsg = l2tp_ip_recvmsg, 611 .backlog_rcv = l2tp_ip_backlog_recv, 612 - .hash = inet_hash, 613 - .unhash = inet_unhash, 614 .obj_size = sizeof(struct l2tp_ip_sock), 615 #ifdef CONFIG_COMPAT 616 .compat_setsockopt = compat_ip_setsockopt,
··· 20 #include <net/icmp.h> 21 #include <net/udp.h> 22 #include <net/inet_common.h> 23 #include <net/tcp_states.h> 24 #include <net/protocol.h> 25 #include <net/xfrm.h> ··· 209 return 0; 210 } 211 212 + static int l2tp_ip_hash(struct sock *sk) 213 + { 214 + if (sk_unhashed(sk)) { 215 + write_lock_bh(&l2tp_ip_lock); 216 + sk_add_node(sk, &l2tp_ip_table); 217 + write_unlock_bh(&l2tp_ip_lock); 218 + } 219 + return 0; 220 + } 221 + 222 + static void l2tp_ip_unhash(struct sock *sk) 223 + { 224 + if (sk_unhashed(sk)) 225 + return; 226 + write_lock_bh(&l2tp_ip_lock); 227 + sk_del_node_init(sk); 228 + write_unlock_bh(&l2tp_ip_lock); 229 + } 230 + 231 static int l2tp_ip_open(struct sock *sk) 232 { 233 /* Prevent autobind. We don't have ports. */ 234 inet_sk(sk)->inet_num = IPPROTO_L2TP; 235 236 + l2tp_ip_hash(sk); 237 return 0; 238 } 239 ··· 594 .sendmsg = l2tp_ip_sendmsg, 595 .recvmsg = l2tp_ip_recvmsg, 596 .backlog_rcv = l2tp_ip_backlog_recv, 597 + .hash = l2tp_ip_hash, 598 + .unhash = l2tp_ip_unhash, 599 .obj_size = sizeof(struct l2tp_ip_sock), 600 #ifdef CONFIG_COMPAT 601 .compat_setsockopt = compat_ip_setsockopt,
+22 -8
net/l2tp/l2tp_ip6.c
··· 20 #include <net/icmp.h> 21 #include <net/udp.h> 22 #include <net/inet_common.h> 23 - #include <net/inet_hashtables.h> 24 - #include <net/inet6_hashtables.h> 25 #include <net/tcp_states.h> 26 #include <net/protocol.h> 27 #include <net/xfrm.h> ··· 220 return 0; 221 } 222 223 static int l2tp_ip6_open(struct sock *sk) 224 { 225 /* Prevent autobind. We don't have ports. */ 226 inet_sk(sk)->inet_num = IPPROTO_L2TP; 227 228 - write_lock_bh(&l2tp_ip6_lock); 229 - sk_add_node(sk, &l2tp_ip6_table); 230 - write_unlock_bh(&l2tp_ip6_lock); 231 - 232 return 0; 233 } 234 ··· 742 .sendmsg = l2tp_ip6_sendmsg, 743 .recvmsg = l2tp_ip6_recvmsg, 744 .backlog_rcv = l2tp_ip6_backlog_recv, 745 - .hash = inet6_hash, 746 - .unhash = inet_unhash, 747 .obj_size = sizeof(struct l2tp_ip6_sock), 748 #ifdef CONFIG_COMPAT 749 .compat_setsockopt = compat_ipv6_setsockopt,
··· 20 #include <net/icmp.h> 21 #include <net/udp.h> 22 #include <net/inet_common.h> 23 #include <net/tcp_states.h> 24 #include <net/protocol.h> 25 #include <net/xfrm.h> ··· 222 return 0; 223 } 224 225 + static int l2tp_ip6_hash(struct sock *sk) 226 + { 227 + if (sk_unhashed(sk)) { 228 + write_lock_bh(&l2tp_ip6_lock); 229 + sk_add_node(sk, &l2tp_ip6_table); 230 + write_unlock_bh(&l2tp_ip6_lock); 231 + } 232 + return 0; 233 + } 234 + 235 + static void l2tp_ip6_unhash(struct sock *sk) 236 + { 237 + if (sk_unhashed(sk)) 238 + return; 239 + write_lock_bh(&l2tp_ip6_lock); 240 + sk_del_node_init(sk); 241 + write_unlock_bh(&l2tp_ip6_lock); 242 + } 243 + 244 static int l2tp_ip6_open(struct sock *sk) 245 { 246 /* Prevent autobind. We don't have ports. */ 247 inet_sk(sk)->inet_num = IPPROTO_L2TP; 248 249 + l2tp_ip6_hash(sk); 250 return 0; 251 } 252 ··· 728 .sendmsg = l2tp_ip6_sendmsg, 729 .recvmsg = l2tp_ip6_recvmsg, 730 .backlog_rcv = l2tp_ip6_backlog_recv, 731 + .hash = l2tp_ip6_hash, 732 + .unhash = l2tp_ip6_unhash, 733 .obj_size = sizeof(struct l2tp_ip6_sock), 734 #ifdef CONFIG_COMPAT 735 .compat_setsockopt = compat_ipv6_setsockopt,
+7
net/mac80211/mesh_hwmp.c
··· 1103 mesh_path_sel_frame_tx(MPATH_PREQ, 0, sdata->vif.addr, ifmsh->sn, 1104 target_flags, mpath->dst, mpath->sn, da, 0, 1105 ttl, lifetime, 0, ifmsh->preq_id++, sdata); 1106 mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout); 1107 1108 enddiscovery: 1109 rcu_read_unlock();
··· 1103 mesh_path_sel_frame_tx(MPATH_PREQ, 0, sdata->vif.addr, ifmsh->sn, 1104 target_flags, mpath->dst, mpath->sn, da, 0, 1105 ttl, lifetime, 0, ifmsh->preq_id++, sdata); 1106 + 1107 + spin_lock_bh(&mpath->state_lock); 1108 + if (mpath->flags & MESH_PATH_DELETED) { 1109 + spin_unlock_bh(&mpath->state_lock); 1110 + goto enddiscovery; 1111 + } 1112 mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout); 1113 + spin_unlock_bh(&mpath->state_lock); 1114 1115 enddiscovery: 1116 rcu_read_unlock();
+48 -19
net/mptcp/protocol.c
··· 954 955 pr_debug("block timeout %ld", timeo); 956 mptcp_wait_data(sk, &timeo); 957 - if (unlikely(__mptcp_tcp_fallback(msk))) 958 goto fallback; 959 } 960 ··· 1263 1264 lock_sock(sk); 1265 1266 - mptcp_token_destroy(msk->token); 1267 inet_sk_state_store(sk, TCP_CLOSE); 1268 1269 - __mptcp_flush_join_list(msk); 1270 - 1271 list_splice_init(&msk->conn_list, &conn_list); 1272 1273 data_fin_tx_seq = msk->write_seq; ··· 1460 { 1461 struct mptcp_sock *msk = mptcp_sk(sk); 1462 1463 if (msk->cached_ext) 1464 __skb_ext_put(msk->cached_ext); 1465 ··· 1627 if (!msk->pm.server_side) 1628 return true; 1629 1630 - /* passive connection, attach to msk socket */ 1631 parent_sock = READ_ONCE(parent->sk_socket); 1632 if (parent_sock && !sk->sk_socket) 1633 mptcp_sock_graft(sk, parent_sock); 1634 - 1635 - ret = mptcp_pm_allow_new_subflow(msk); 1636 - if (ret) { 1637 - subflow->map_seq = msk->ack_seq; 1638 - 1639 - /* active connections are already on conn_list */ 1640 - spin_lock_bh(&msk->join_list_lock); 1641 - if (!WARN_ON_ONCE(!list_empty(&subflow->node))) 1642 - list_add_tail(&subflow->node, &msk->join_list); 1643 - spin_unlock_bh(&msk->join_list_lock); 1644 - } 1645 - return ret; 1646 } 1647 1648 bool mptcp_sk_is_subflow(const struct sock *sk) ··· 1724 int err; 1725 1726 lock_sock(sock->sk); 1727 ssock = __mptcp_socket_create(msk, TCP_SYN_SENT); 1728 if (IS_ERR(ssock)) { 1729 err = PTR_ERR(ssock); ··· 1746 mptcp_subflow_ctx(ssock->sk)->request_mptcp = 0; 1747 #endif 1748 1749 err = ssock->ops->connect(ssock, uaddr, addr_len, flags); 1750 - inet_sk_state_store(sock->sk, inet_sk_state_load(ssock->sk)); 1751 - mptcp_copy_inaddrs(sock->sk, ssock->sk); 1752 1753 unlock: 1754 release_sock(sock->sk);
··· 954 955 pr_debug("block timeout %ld", timeo); 956 mptcp_wait_data(sk, &timeo); 957 + ssock = __mptcp_tcp_fallback(msk); 958 + if (unlikely(ssock)) 959 goto fallback; 960 } 961 ··· 1262 1263 lock_sock(sk); 1264 1265 inet_sk_state_store(sk, TCP_CLOSE); 1266 1267 + /* be sure to always acquire the join list lock, to sync vs 1268 + * mptcp_finish_join(). 1269 + */ 1270 + spin_lock_bh(&msk->join_list_lock); 1271 + list_splice_tail_init(&msk->join_list, &msk->conn_list); 1272 + spin_unlock_bh(&msk->join_list_lock); 1273 list_splice_init(&msk->conn_list, &conn_list); 1274 1275 data_fin_tx_seq = msk->write_seq; ··· 1456 { 1457 struct mptcp_sock *msk = mptcp_sk(sk); 1458 1459 + mptcp_token_destroy(msk->token); 1460 if (msk->cached_ext) 1461 __skb_ext_put(msk->cached_ext); 1462 ··· 1622 if (!msk->pm.server_side) 1623 return true; 1624 1625 + if (!mptcp_pm_allow_new_subflow(msk)) 1626 + return false; 1627 + 1628 + /* active connections are already on conn_list, and we can't acquire 1629 + * msk lock here. 1630 + * use the join list lock as synchronization point and double-check 1631 + * msk status to avoid racing with mptcp_close() 1632 + */ 1633 + spin_lock_bh(&msk->join_list_lock); 1634 + ret = inet_sk_state_load(parent) == TCP_ESTABLISHED; 1635 + if (ret && !WARN_ON_ONCE(!list_empty(&subflow->node))) 1636 + list_add_tail(&subflow->node, &msk->join_list); 1637 + spin_unlock_bh(&msk->join_list_lock); 1638 + if (!ret) 1639 + return false; 1640 + 1641 + /* attach to msk socket only after we are sure he will deal with us 1642 + * at close time 1643 + */ 1644 parent_sock = READ_ONCE(parent->sk_socket); 1645 if (parent_sock && !sk->sk_socket) 1646 mptcp_sock_graft(sk, parent_sock); 1647 + subflow->map_seq = msk->ack_seq; 1648 + return true; 1649 } 1650 1651 bool mptcp_sk_is_subflow(const struct sock *sk) ··· 1711 int err; 1712 1713 lock_sock(sock->sk); 1714 + if (sock->state != SS_UNCONNECTED && msk->subflow) { 1715 + /* pending connection or invalid state, let existing subflow 1716 + * cope with that 1717 + */ 1718 + ssock = msk->subflow; 1719 + goto do_connect; 1720 + } 1721 + 1722 ssock = __mptcp_socket_create(msk, TCP_SYN_SENT); 1723 if (IS_ERR(ssock)) { 1724 err = PTR_ERR(ssock); ··· 1725 mptcp_subflow_ctx(ssock->sk)->request_mptcp = 0; 1726 #endif 1727 1728 + do_connect: 1729 err = ssock->ops->connect(ssock, uaddr, addr_len, flags); 1730 + sock->state = ssock->state; 1731 + 1732 + /* on successful connect, the msk state will be moved to established by 1733 + * subflow_finish_connect() 1734 + */ 1735 + if (!err || err == EINPROGRESS) 1736 + mptcp_copy_inaddrs(sock->sk, ssock->sk); 1737 + else 1738 + inet_sk_state_store(sock->sk, inet_sk_state_load(ssock->sk)); 1739 1740 unlock: 1741 release_sock(sock->sk);
+1 -1
net/netfilter/ipset/ip_set_list_set.c
··· 59 /* Don't lookup sub-counters at all */ 60 opt->cmdflags &= ~IPSET_FLAG_MATCH_COUNTERS; 61 if (opt->cmdflags & IPSET_FLAG_SKIP_SUBCOUNTER_UPDATE) 62 - opt->cmdflags &= ~IPSET_FLAG_SKIP_COUNTER_UPDATE; 63 list_for_each_entry_rcu(e, &map->members, list) { 64 ret = ip_set_test(e->id, skb, par, opt); 65 if (ret <= 0)
··· 59 /* Don't lookup sub-counters at all */ 60 opt->cmdflags &= ~IPSET_FLAG_MATCH_COUNTERS; 61 if (opt->cmdflags & IPSET_FLAG_SKIP_SUBCOUNTER_UPDATE) 62 + opt->cmdflags |= IPSET_FLAG_SKIP_COUNTER_UPDATE; 63 list_for_each_entry_rcu(e, &map->members, list) { 64 ret = ip_set_test(e->id, skb, par, opt); 65 if (ret <= 0)
+73 -7
net/netfilter/nf_conntrack_core.c
··· 2016 nf_conntrack_get(skb_nfct(nskb)); 2017 } 2018 2019 - static int nf_conntrack_update(struct net *net, struct sk_buff *skb) 2020 { 2021 struct nf_conntrack_tuple_hash *h; 2022 struct nf_conntrack_tuple tuple; 2023 - enum ip_conntrack_info ctinfo; 2024 struct nf_nat_hook *nat_hook; 2025 unsigned int status; 2026 - struct nf_conn *ct; 2027 int dataoff; 2028 u16 l3num; 2029 u8 l4num; 2030 - 2031 - ct = nf_ct_get(skb, &ctinfo); 2032 - if (!ct || nf_ct_is_confirmed(ct)) 2033 - return 0; 2034 2035 l3num = nf_ct_l3num(ct); 2036 ··· 2082 return -1; 2083 2084 return 0; 2085 } 2086 2087 static bool nf_conntrack_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
··· 2016 nf_conntrack_get(skb_nfct(nskb)); 2017 } 2018 2019 + static int __nf_conntrack_update(struct net *net, struct sk_buff *skb, 2020 + struct nf_conn *ct, 2021 + enum ip_conntrack_info ctinfo) 2022 { 2023 struct nf_conntrack_tuple_hash *h; 2024 struct nf_conntrack_tuple tuple; 2025 struct nf_nat_hook *nat_hook; 2026 unsigned int status; 2027 int dataoff; 2028 u16 l3num; 2029 u8 l4num; 2030 2031 l3num = nf_ct_l3num(ct); 2032 ··· 2086 return -1; 2087 2088 return 0; 2089 + } 2090 + 2091 + /* This packet is coming from userspace via nf_queue, complete the packet 2092 + * processing after the helper invocation in nf_confirm(). 2093 + */ 2094 + static int nf_confirm_cthelper(struct sk_buff *skb, struct nf_conn *ct, 2095 + enum ip_conntrack_info ctinfo) 2096 + { 2097 + const struct nf_conntrack_helper *helper; 2098 + const struct nf_conn_help *help; 2099 + int protoff; 2100 + 2101 + help = nfct_help(ct); 2102 + if (!help) 2103 + return 0; 2104 + 2105 + helper = rcu_dereference(help->helper); 2106 + if (!(helper->flags & NF_CT_HELPER_F_USERSPACE)) 2107 + return 0; 2108 + 2109 + switch (nf_ct_l3num(ct)) { 2110 + case NFPROTO_IPV4: 2111 + protoff = skb_network_offset(skb) + ip_hdrlen(skb); 2112 + break; 2113 + #if IS_ENABLED(CONFIG_IPV6) 2114 + case NFPROTO_IPV6: { 2115 + __be16 frag_off; 2116 + u8 pnum; 2117 + 2118 + pnum = ipv6_hdr(skb)->nexthdr; 2119 + protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &pnum, 2120 + &frag_off); 2121 + if (protoff < 0 || (frag_off & htons(~0x7)) != 0) 2122 + return 0; 2123 + break; 2124 + } 2125 + #endif 2126 + default: 2127 + return 0; 2128 + } 2129 + 2130 + if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) && 2131 + !nf_is_loopback_packet(skb)) { 2132 + if (!nf_ct_seq_adjust(skb, ct, ctinfo, protoff)) { 2133 + NF_CT_STAT_INC_ATOMIC(nf_ct_net(ct), drop); 2134 + return -1; 2135 + } 2136 + } 2137 + 2138 + /* We've seen it coming out the other side: confirm it */ 2139 + return nf_conntrack_confirm(skb) == NF_DROP ? - 1 : 0; 2140 + } 2141 + 2142 + static int nf_conntrack_update(struct net *net, struct sk_buff *skb) 2143 + { 2144 + enum ip_conntrack_info ctinfo; 2145 + struct nf_conn *ct; 2146 + int err; 2147 + 2148 + ct = nf_ct_get(skb, &ctinfo); 2149 + if (!ct) 2150 + return 0; 2151 + 2152 + if (!nf_ct_is_confirmed(ct)) { 2153 + err = __nf_conntrack_update(net, skb, ct, ctinfo); 2154 + if (err < 0) 2155 + return err; 2156 + } 2157 + 2158 + return nf_confirm_cthelper(skb, ct, ctinfo); 2159 } 2160 2161 static bool nf_conntrack_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
+35 -27
net/netfilter/nf_conntrack_pptp.c
··· 72 73 #if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG) 74 /* PptpControlMessageType names */ 75 - const char *const pptp_msg_name[] = { 76 - "UNKNOWN_MESSAGE", 77 - "START_SESSION_REQUEST", 78 - "START_SESSION_REPLY", 79 - "STOP_SESSION_REQUEST", 80 - "STOP_SESSION_REPLY", 81 - "ECHO_REQUEST", 82 - "ECHO_REPLY", 83 - "OUT_CALL_REQUEST", 84 - "OUT_CALL_REPLY", 85 - "IN_CALL_REQUEST", 86 - "IN_CALL_REPLY", 87 - "IN_CALL_CONNECT", 88 - "CALL_CLEAR_REQUEST", 89 - "CALL_DISCONNECT_NOTIFY", 90 - "WAN_ERROR_NOTIFY", 91 - "SET_LINK_INFO" 92 }; 93 EXPORT_SYMBOL(pptp_msg_name); 94 #endif 95 ··· 284 typeof(nf_nat_pptp_hook_inbound) nf_nat_pptp_inbound; 285 286 msg = ntohs(ctlh->messageType); 287 - pr_debug("inbound control message %s\n", pptp_msg_name[msg]); 288 289 switch (msg) { 290 case PPTP_START_SESSION_REPLY: ··· 319 pcid = pptpReq->ocack.peersCallID; 320 if (info->pns_call_id != pcid) 321 goto invalid; 322 - pr_debug("%s, CID=%X, PCID=%X\n", pptp_msg_name[msg], 323 ntohs(cid), ntohs(pcid)); 324 325 if (pptpReq->ocack.resultCode == PPTP_OUTCALL_CONNECT) { ··· 336 goto invalid; 337 338 cid = pptpReq->icreq.callID; 339 - pr_debug("%s, CID=%X\n", pptp_msg_name[msg], ntohs(cid)); 340 info->cstate = PPTP_CALL_IN_REQ; 341 info->pac_call_id = cid; 342 break; ··· 355 if (info->pns_call_id != pcid) 356 goto invalid; 357 358 - pr_debug("%s, PCID=%X\n", pptp_msg_name[msg], ntohs(pcid)); 359 info->cstate = PPTP_CALL_IN_CONF; 360 361 /* we expect a GRE connection from PAC to PNS */ ··· 365 case PPTP_CALL_DISCONNECT_NOTIFY: 366 /* server confirms disconnect */ 367 cid = pptpReq->disc.callID; 368 - pr_debug("%s, CID=%X\n", pptp_msg_name[msg], ntohs(cid)); 369 info->cstate = PPTP_CALL_NONE; 370 371 /* untrack this call id, unexpect GRE packets */ ··· 392 invalid: 393 pr_debug("invalid %s: type=%d cid=%u pcid=%u " 394 "cstate=%d sstate=%d pns_cid=%u pac_cid=%u\n", 395 - msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] : pptp_msg_name[0], 396 msg, ntohs(cid), ntohs(pcid), info->cstate, info->sstate, 397 ntohs(info->pns_call_id), ntohs(info->pac_call_id)); 398 return NF_ACCEPT; ··· 412 typeof(nf_nat_pptp_hook_outbound) nf_nat_pptp_outbound; 413 414 msg = ntohs(ctlh->messageType); 415 - pr_debug("outbound control message %s\n", pptp_msg_name[msg]); 416 417 switch (msg) { 418 case PPTP_START_SESSION_REQUEST: ··· 434 info->cstate = PPTP_CALL_OUT_REQ; 435 /* track PNS call id */ 436 cid = pptpReq->ocreq.callID; 437 - pr_debug("%s, CID=%X\n", pptp_msg_name[msg], ntohs(cid)); 438 info->pns_call_id = cid; 439 break; 440 ··· 448 pcid = pptpReq->icack.peersCallID; 449 if (info->pac_call_id != pcid) 450 goto invalid; 451 - pr_debug("%s, CID=%X PCID=%X\n", pptp_msg_name[msg], 452 ntohs(cid), ntohs(pcid)); 453 454 if (pptpReq->icack.resultCode == PPTP_INCALL_ACCEPT) { ··· 488 invalid: 489 pr_debug("invalid %s: type=%d cid=%u pcid=%u " 490 "cstate=%d sstate=%d pns_cid=%u pac_cid=%u\n", 491 - msg <= PPTP_MSG_MAX ? pptp_msg_name[msg] : pptp_msg_name[0], 492 msg, ntohs(cid), ntohs(pcid), info->cstate, info->sstate, 493 ntohs(info->pns_call_id), ntohs(info->pac_call_id)); 494 return NF_ACCEPT;
··· 72 73 #if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG) 74 /* PptpControlMessageType names */ 75 + static const char *const pptp_msg_name_array[PPTP_MSG_MAX + 1] = { 76 + [0] = "UNKNOWN_MESSAGE", 77 + [PPTP_START_SESSION_REQUEST] = "START_SESSION_REQUEST", 78 + [PPTP_START_SESSION_REPLY] = "START_SESSION_REPLY", 79 + [PPTP_STOP_SESSION_REQUEST] = "STOP_SESSION_REQUEST", 80 + [PPTP_STOP_SESSION_REPLY] = "STOP_SESSION_REPLY", 81 + [PPTP_ECHO_REQUEST] = "ECHO_REQUEST", 82 + [PPTP_ECHO_REPLY] = "ECHO_REPLY", 83 + [PPTP_OUT_CALL_REQUEST] = "OUT_CALL_REQUEST", 84 + [PPTP_OUT_CALL_REPLY] = "OUT_CALL_REPLY", 85 + [PPTP_IN_CALL_REQUEST] = "IN_CALL_REQUEST", 86 + [PPTP_IN_CALL_REPLY] = "IN_CALL_REPLY", 87 + [PPTP_IN_CALL_CONNECT] = "IN_CALL_CONNECT", 88 + [PPTP_CALL_CLEAR_REQUEST] = "CALL_CLEAR_REQUEST", 89 + [PPTP_CALL_DISCONNECT_NOTIFY] = "CALL_DISCONNECT_NOTIFY", 90 + [PPTP_WAN_ERROR_NOTIFY] = "WAN_ERROR_NOTIFY", 91 + [PPTP_SET_LINK_INFO] = "SET_LINK_INFO" 92 }; 93 + 94 + const char *pptp_msg_name(u_int16_t msg) 95 + { 96 + if (msg > PPTP_MSG_MAX) 97 + return pptp_msg_name_array[0]; 98 + 99 + return pptp_msg_name_array[msg]; 100 + } 101 EXPORT_SYMBOL(pptp_msg_name); 102 #endif 103 ··· 276 typeof(nf_nat_pptp_hook_inbound) nf_nat_pptp_inbound; 277 278 msg = ntohs(ctlh->messageType); 279 + pr_debug("inbound control message %s\n", pptp_msg_name(msg)); 280 281 switch (msg) { 282 case PPTP_START_SESSION_REPLY: ··· 311 pcid = pptpReq->ocack.peersCallID; 312 if (info->pns_call_id != pcid) 313 goto invalid; 314 + pr_debug("%s, CID=%X, PCID=%X\n", pptp_msg_name(msg), 315 ntohs(cid), ntohs(pcid)); 316 317 if (pptpReq->ocack.resultCode == PPTP_OUTCALL_CONNECT) { ··· 328 goto invalid; 329 330 cid = pptpReq->icreq.callID; 331 + pr_debug("%s, CID=%X\n", pptp_msg_name(msg), ntohs(cid)); 332 info->cstate = PPTP_CALL_IN_REQ; 333 info->pac_call_id = cid; 334 break; ··· 347 if (info->pns_call_id != pcid) 348 goto invalid; 349 350 + pr_debug("%s, PCID=%X\n", pptp_msg_name(msg), ntohs(pcid)); 351 info->cstate = PPTP_CALL_IN_CONF; 352 353 /* we expect a GRE connection from PAC to PNS */ ··· 357 case PPTP_CALL_DISCONNECT_NOTIFY: 358 /* server confirms disconnect */ 359 cid = pptpReq->disc.callID; 360 + pr_debug("%s, CID=%X\n", pptp_msg_name(msg), ntohs(cid)); 361 info->cstate = PPTP_CALL_NONE; 362 363 /* untrack this call id, unexpect GRE packets */ ··· 384 invalid: 385 pr_debug("invalid %s: type=%d cid=%u pcid=%u " 386 "cstate=%d sstate=%d pns_cid=%u pac_cid=%u\n", 387 + pptp_msg_name(msg), 388 msg, ntohs(cid), ntohs(pcid), info->cstate, info->sstate, 389 ntohs(info->pns_call_id), ntohs(info->pac_call_id)); 390 return NF_ACCEPT; ··· 404 typeof(nf_nat_pptp_hook_outbound) nf_nat_pptp_outbound; 405 406 msg = ntohs(ctlh->messageType); 407 + pr_debug("outbound control message %s\n", pptp_msg_name(msg)); 408 409 switch (msg) { 410 case PPTP_START_SESSION_REQUEST: ··· 426 info->cstate = PPTP_CALL_OUT_REQ; 427 /* track PNS call id */ 428 cid = pptpReq->ocreq.callID; 429 + pr_debug("%s, CID=%X\n", pptp_msg_name(msg), ntohs(cid)); 430 info->pns_call_id = cid; 431 break; 432 ··· 440 pcid = pptpReq->icack.peersCallID; 441 if (info->pac_call_id != pcid) 442 goto invalid; 443 + pr_debug("%s, CID=%X PCID=%X\n", pptp_msg_name(msg), 444 ntohs(cid), ntohs(pcid)); 445 446 if (pptpReq->icack.resultCode == PPTP_INCALL_ACCEPT) { ··· 480 invalid: 481 pr_debug("invalid %s: type=%d cid=%u pcid=%u " 482 "cstate=%d sstate=%d pns_cid=%u pac_cid=%u\n", 483 + pptp_msg_name(msg), 484 msg, ntohs(cid), ntohs(pcid), info->cstate, info->sstate, 485 ntohs(info->pns_call_id), ntohs(info->pac_call_id)); 486 return NF_ACCEPT;
+5 -5
net/qrtr/ns.c
··· 712 goto err_sock; 713 } 714 715 qrtr_ns.sock->sk->sk_data_ready = qrtr_ns_data_ready; 716 717 sq.sq_port = QRTR_PORT_CTRL; ··· 724 ret = kernel_bind(qrtr_ns.sock, (struct sockaddr *)&sq, sizeof(sq)); 725 if (ret < 0) { 726 pr_err("failed to bind to socket\n"); 727 - goto err_sock; 728 } 729 730 qrtr_ns.bcast_sq.sq_family = AF_QIPCRTR; 731 qrtr_ns.bcast_sq.sq_node = QRTR_NODE_BCAST; 732 qrtr_ns.bcast_sq.sq_port = QRTR_PORT_CTRL; 733 - 734 - qrtr_ns.workqueue = alloc_workqueue("qrtr_ns_handler", WQ_UNBOUND, 1); 735 - if (!qrtr_ns.workqueue) 736 - goto err_sock; 737 738 ret = say_hello(&qrtr_ns.bcast_sq); 739 if (ret < 0)
··· 712 goto err_sock; 713 } 714 715 + qrtr_ns.workqueue = alloc_workqueue("qrtr_ns_handler", WQ_UNBOUND, 1); 716 + if (!qrtr_ns.workqueue) 717 + goto err_sock; 718 + 719 qrtr_ns.sock->sk->sk_data_ready = qrtr_ns_data_ready; 720 721 sq.sq_port = QRTR_PORT_CTRL; ··· 720 ret = kernel_bind(qrtr_ns.sock, (struct sockaddr *)&sq, sizeof(sq)); 721 if (ret < 0) { 722 pr_err("failed to bind to socket\n"); 723 + goto err_wq; 724 } 725 726 qrtr_ns.bcast_sq.sq_family = AF_QIPCRTR; 727 qrtr_ns.bcast_sq.sq_node = QRTR_NODE_BCAST; 728 qrtr_ns.bcast_sq.sq_port = QRTR_PORT_CTRL; 729 730 ret = say_hello(&qrtr_ns.bcast_sq); 731 if (ret < 0)
+3
net/sched/act_ct.c
··· 199 const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple; 200 struct nf_conntrack_tuple target; 201 202 nf_ct_invert_tuple(&target, &ct->tuplehash[!dir].tuple); 203 204 switch (tuple->src.l3num) {
··· 199 const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple; 200 struct nf_conntrack_tuple target; 201 202 + if (!(ct->status & IPS_NAT_MASK)) 203 + return 0; 204 + 205 nf_ct_invert_tuple(&target, &ct->tuplehash[!dir].tuple); 206 207 switch (tuple->src.l3num) {
+2 -2
net/sched/sch_fq_pie.c
··· 297 goto flow_error; 298 } 299 q->flows_cnt = nla_get_u32(tb[TCA_FQ_PIE_FLOWS]); 300 - if (!q->flows_cnt || q->flows_cnt > 65536) { 301 NL_SET_ERR_MSG_MOD(extack, 302 - "Number of flows must be < 65536"); 303 goto flow_error; 304 } 305 }
··· 297 goto flow_error; 298 } 299 q->flows_cnt = nla_get_u32(tb[TCA_FQ_PIE_FLOWS]); 300 + if (!q->flows_cnt || q->flows_cnt >= 65536) { 301 NL_SET_ERR_MSG_MOD(extack, 302 + "Number of flows must range in [1..65535]"); 303 goto flow_error; 304 } 305 }
+1 -1
net/sctp/Kconfig
··· 31 homing at either or both ends of an association." 32 33 To compile this protocol support as a module, choose M here: the 34 - module will be called sctp. Debug messages are handeled by the 35 kernel's dynamic debugging framework. 36 37 If in doubt, say N.
··· 31 homing at either or both ends of an association." 32 33 To compile this protocol support as a module, choose M here: the 34 + module will be called sctp. Debug messages are handled by the 35 kernel's dynamic debugging framework. 36 37 If in doubt, say N.
+3
net/sctp/ulpevent.c
··· 343 struct sockaddr_storage addr; 344 struct sctp_ulpevent *event; 345 346 memset(&addr, 0, sizeof(struct sockaddr_storage)); 347 memcpy(&addr, &transport->ipaddr, transport->af_specific->sockaddr_len); 348
··· 343 struct sockaddr_storage addr; 344 struct sctp_ulpevent *event; 345 346 + if (asoc->state < SCTP_STATE_ESTABLISHED) 347 + return; 348 + 349 memset(&addr, 0, sizeof(struct sockaddr_storage)); 350 memcpy(&addr, &transport->ipaddr, transport->af_specific->sockaddr_len); 351
+27 -6
net/tls/tls_sw.c
··· 206 207 kfree(aead_req); 208 209 pending = atomic_dec_return(&ctx->decrypt_pending); 210 211 - if (!pending && READ_ONCE(ctx->async_notify)) 212 complete(&ctx->async_wait.completion); 213 } 214 215 static int tls_do_decryption(struct sock *sk, ··· 469 ready = true; 470 } 471 472 pending = atomic_dec_return(&ctx->encrypt_pending); 473 474 - if (!pending && READ_ONCE(ctx->async_notify)) 475 complete(&ctx->async_wait.completion); 476 477 if (!ready) 478 return; ··· 933 int num_zc = 0; 934 int orig_size; 935 int ret = 0; 936 937 if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL)) 938 return -EOPNOTSUPP; ··· 1100 goto send_end; 1101 } else if (num_zc) { 1102 /* Wait for pending encryptions to get completed */ 1103 - smp_store_mb(ctx->async_notify, true); 1104 1105 - if (atomic_read(&ctx->encrypt_pending)) 1106 crypto_wait_req(-EINPROGRESS, &ctx->async_wait); 1107 else 1108 reinit_completion(&ctx->async_wait.completion); 1109 1110 WRITE_ONCE(ctx->async_notify, false); 1111 1112 if (ctx->async_wait.err) { ··· 1743 bool is_kvec = iov_iter_is_kvec(&msg->msg_iter); 1744 bool is_peek = flags & MSG_PEEK; 1745 int num_async = 0; 1746 1747 flags |= nonblock; 1748 ··· 1906 recv_end: 1907 if (num_async) { 1908 /* Wait for all previously submitted records to be decrypted */ 1909 - smp_store_mb(ctx->async_notify, true); 1910 - if (atomic_read(&ctx->decrypt_pending)) { 1911 err = crypto_wait_req(-EINPROGRESS, &ctx->async_wait); 1912 if (err) { 1913 /* one of async decrypt failed */ ··· 1922 } else { 1923 reinit_completion(&ctx->async_wait.completion); 1924 } 1925 WRITE_ONCE(ctx->async_notify, false); 1926 1927 /* Drain records from the rx_list & copy if required */ ··· 2312 2313 if (tx) { 2314 crypto_init_wait(&sw_ctx_tx->async_wait); 2315 crypto_info = &ctx->crypto_send.info; 2316 cctx = &ctx->tx; 2317 aead = &sw_ctx_tx->aead_send; ··· 2321 sw_ctx_tx->tx_work.sk = sk; 2322 } else { 2323 crypto_init_wait(&sw_ctx_rx->async_wait); 2324 crypto_info = &ctx->crypto_recv.info; 2325 cctx = &ctx->rx; 2326 skb_queue_head_init(&sw_ctx_rx->rx_list);
··· 206 207 kfree(aead_req); 208 209 + spin_lock_bh(&ctx->decrypt_compl_lock); 210 pending = atomic_dec_return(&ctx->decrypt_pending); 211 212 + if (!pending && ctx->async_notify) 213 complete(&ctx->async_wait.completion); 214 + spin_unlock_bh(&ctx->decrypt_compl_lock); 215 } 216 217 static int tls_do_decryption(struct sock *sk, ··· 467 ready = true; 468 } 469 470 + spin_lock_bh(&ctx->encrypt_compl_lock); 471 pending = atomic_dec_return(&ctx->encrypt_pending); 472 473 + if (!pending && ctx->async_notify) 474 complete(&ctx->async_wait.completion); 475 + spin_unlock_bh(&ctx->encrypt_compl_lock); 476 477 if (!ready) 478 return; ··· 929 int num_zc = 0; 930 int orig_size; 931 int ret = 0; 932 + int pending; 933 934 if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL)) 935 return -EOPNOTSUPP; ··· 1095 goto send_end; 1096 } else if (num_zc) { 1097 /* Wait for pending encryptions to get completed */ 1098 + spin_lock_bh(&ctx->encrypt_compl_lock); 1099 + ctx->async_notify = true; 1100 1101 + pending = atomic_read(&ctx->encrypt_pending); 1102 + spin_unlock_bh(&ctx->encrypt_compl_lock); 1103 + if (pending) 1104 crypto_wait_req(-EINPROGRESS, &ctx->async_wait); 1105 else 1106 reinit_completion(&ctx->async_wait.completion); 1107 1108 + /* There can be no concurrent accesses, since we have no 1109 + * pending encrypt operations 1110 + */ 1111 WRITE_ONCE(ctx->async_notify, false); 1112 1113 if (ctx->async_wait.err) { ··· 1732 bool is_kvec = iov_iter_is_kvec(&msg->msg_iter); 1733 bool is_peek = flags & MSG_PEEK; 1734 int num_async = 0; 1735 + int pending; 1736 1737 flags |= nonblock; 1738 ··· 1894 recv_end: 1895 if (num_async) { 1896 /* Wait for all previously submitted records to be decrypted */ 1897 + spin_lock_bh(&ctx->decrypt_compl_lock); 1898 + ctx->async_notify = true; 1899 + pending = atomic_read(&ctx->decrypt_pending); 1900 + spin_unlock_bh(&ctx->decrypt_compl_lock); 1901 + if (pending) { 1902 err = crypto_wait_req(-EINPROGRESS, &ctx->async_wait); 1903 if (err) { 1904 /* one of async decrypt failed */ ··· 1907 } else { 1908 reinit_completion(&ctx->async_wait.completion); 1909 } 1910 + 1911 + /* There can be no concurrent accesses, since we have no 1912 + * pending decrypt operations 1913 + */ 1914 WRITE_ONCE(ctx->async_notify, false); 1915 1916 /* Drain records from the rx_list & copy if required */ ··· 2293 2294 if (tx) { 2295 crypto_init_wait(&sw_ctx_tx->async_wait); 2296 + spin_lock_init(&sw_ctx_tx->encrypt_compl_lock); 2297 crypto_info = &ctx->crypto_send.info; 2298 cctx = &ctx->tx; 2299 aead = &sw_ctx_tx->aead_send; ··· 2301 sw_ctx_tx->tx_work.sk = sk; 2302 } else { 2303 crypto_init_wait(&sw_ctx_rx->async_wait); 2304 + spin_lock_init(&sw_ctx_rx->decrypt_compl_lock); 2305 crypto_info = &ctx->crypto_recv.info; 2306 cctx = &ctx->rx; 2307 skb_queue_head_init(&sw_ctx_rx->rx_list);
+1 -1
net/vmw_vsock/af_vsock.c
··· 1408 /* Wait for children sockets to appear; these are the new sockets 1409 * created upon connection establishment. 1410 */ 1411 - timeout = sock_sndtimeo(listener, flags & O_NONBLOCK); 1412 prepare_to_wait(sk_sleep(listener), &wait, TASK_INTERRUPTIBLE); 1413 1414 while ((connected = vsock_dequeue_accept(listener)) == NULL &&
··· 1408 /* Wait for children sockets to appear; these are the new sockets 1409 * created upon connection establishment. 1410 */ 1411 + timeout = sock_rcvtimeo(listener, flags & O_NONBLOCK); 1412 prepare_to_wait(sk_sleep(listener), &wait, TASK_INTERRUPTIBLE); 1413 1414 while ((connected = vsock_dequeue_accept(listener)) == NULL &&
+8
net/vmw_vsock/virtio_transport_common.c
··· 1132 1133 lock_sock(sk); 1134 1135 /* Update CID in case it has changed after a transport reset event */ 1136 vsk->local_addr.svm_cid = dst.svm_cid; 1137
··· 1132 1133 lock_sock(sk); 1134 1135 + /* Check if sk has been released before lock_sock */ 1136 + if (sk->sk_shutdown == SHUTDOWN_MASK) { 1137 + (void)virtio_transport_reset_no_sock(t, pkt); 1138 + release_sock(sk); 1139 + sock_put(sk); 1140 + goto free_pkt; 1141 + } 1142 + 1143 /* Update CID in case it has changed after a transport reset event */ 1144 vsk->local_addr.svm_cid = dst.svm_cid; 1145
+1 -1
net/wireless/core.c
··· 142 if (result) 143 return result; 144 145 - if (rdev->wiphy.debugfsdir) 146 debugfs_rename(rdev->wiphy.debugfsdir->d_parent, 147 rdev->wiphy.debugfsdir, 148 rdev->wiphy.debugfsdir->d_parent, newname);
··· 142 if (result) 143 return result; 144 145 + if (!IS_ERR_OR_NULL(rdev->wiphy.debugfsdir)) 146 debugfs_rename(rdev->wiphy.debugfsdir->d_parent, 147 rdev->wiphy.debugfsdir, 148 rdev->wiphy.debugfsdir->d_parent, newname);
+6 -2
net/xdp/xdp_umem.c
··· 341 { 342 bool unaligned_chunks = mr->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG; 343 u32 chunk_size = mr->chunk_size, headroom = mr->headroom; 344 unsigned int chunks, chunks_per_page; 345 - u64 addr = mr->addr, size = mr->len; 346 int err; 347 348 if (chunk_size < XDP_UMEM_MIN_CHUNK_SIZE || chunk_size > PAGE_SIZE) { ··· 372 if ((addr + size) < addr) 373 return -EINVAL; 374 375 chunks = (unsigned int)div_u64(size, chunk_size); 376 if (chunks == 0) 377 return -EINVAL; ··· 395 umem->size = size; 396 umem->headroom = headroom; 397 umem->chunk_size_nohr = chunk_size - headroom; 398 - umem->npgs = size / PAGE_SIZE; 399 umem->pgs = NULL; 400 umem->user = NULL; 401 umem->flags = mr->flags;
··· 341 { 342 bool unaligned_chunks = mr->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG; 343 u32 chunk_size = mr->chunk_size, headroom = mr->headroom; 344 + u64 npgs, addr = mr->addr, size = mr->len; 345 unsigned int chunks, chunks_per_page; 346 int err; 347 348 if (chunk_size < XDP_UMEM_MIN_CHUNK_SIZE || chunk_size > PAGE_SIZE) { ··· 372 if ((addr + size) < addr) 373 return -EINVAL; 374 375 + npgs = div_u64(size, PAGE_SIZE); 376 + if (npgs > U32_MAX) 377 + return -EINVAL; 378 + 379 chunks = (unsigned int)div_u64(size, chunk_size); 380 if (chunks == 0) 381 return -EINVAL; ··· 391 umem->size = size; 392 umem->headroom = headroom; 393 umem->chunk_size_nohr = chunk_size - headroom; 394 + umem->npgs = (u32)npgs; 395 umem->pgs = NULL; 396 umem->user = NULL; 397 umem->flags = mr->flags;
+2
net/xfrm/espintcp.c
··· 379 { 380 struct espintcp_ctx *ctx = espintcp_getctx(sk); 381 382 kfree(ctx); 383 } 384 ··· 420 sk->sk_socket->ops = &espintcp_ops; 421 ctx->saved_data_ready = sk->sk_data_ready; 422 ctx->saved_write_space = sk->sk_write_space; 423 sk->sk_data_ready = espintcp_data_ready; 424 sk->sk_write_space = espintcp_write_space; 425 sk->sk_destruct = espintcp_destruct;
··· 379 { 380 struct espintcp_ctx *ctx = espintcp_getctx(sk); 381 382 + ctx->saved_destruct(sk); 383 kfree(ctx); 384 } 385 ··· 419 sk->sk_socket->ops = &espintcp_ops; 420 ctx->saved_data_ready = sk->sk_data_ready; 421 ctx->saved_write_space = sk->sk_write_space; 422 + ctx->saved_destruct = sk->sk_destruct; 423 sk->sk_data_ready = espintcp_data_ready; 424 sk->sk_write_space = espintcp_write_space; 425 sk->sk_destruct = espintcp_destruct;
+3 -5
net/xfrm/xfrm_device.c
··· 25 struct xfrm_offload *xo = xfrm_offload(skb); 26 27 skb_reset_mac_len(skb); 28 - pskb_pull(skb, skb->mac_len + hsize + x->props.header_len); 29 - 30 - if (xo->flags & XFRM_GSO_SEGMENT) { 31 - skb_reset_transport_header(skb); 32 skb->transport_header -= x->props.header_len; 33 - } 34 } 35 36 static void __xfrm_mode_tunnel_prep(struct xfrm_state *x, struct sk_buff *skb,
··· 25 struct xfrm_offload *xo = xfrm_offload(skb); 26 27 skb_reset_mac_len(skb); 28 + if (xo->flags & XFRM_GSO_SEGMENT) 29 skb->transport_header -= x->props.header_len; 30 + 31 + pskb_pull(skb, skb_transport_offset(skb) + x->props.header_len); 32 } 33 34 static void __xfrm_mode_tunnel_prep(struct xfrm_state *x, struct sk_buff *skb,
+1 -1
net/xfrm/xfrm_input.c
··· 644 dev_put(skb->dev); 645 646 spin_lock(&x->lock); 647 - if (nexthdr <= 0) { 648 if (nexthdr == -EBADMSG) { 649 xfrm_audit_state_icvfail(x, skb, 650 x->type->proto);
··· 644 dev_put(skb->dev); 645 646 spin_lock(&x->lock); 647 + if (nexthdr < 0) { 648 if (nexthdr == -EBADMSG) { 649 xfrm_audit_state_icvfail(x, skb, 650 x->type->proto);
+21
net/xfrm/xfrm_interface.c
··· 750 .get_link_net = xfrmi_get_link_net, 751 }; 752 753 static struct pernet_operations xfrmi_net_ops = { 754 .id = &xfrmi_net_id, 755 .size = sizeof(struct xfrmi_net), 756 };
··· 750 .get_link_net = xfrmi_get_link_net, 751 }; 752 753 + static void __net_exit xfrmi_exit_batch_net(struct list_head *net_exit_list) 754 + { 755 + struct net *net; 756 + LIST_HEAD(list); 757 + 758 + rtnl_lock(); 759 + list_for_each_entry(net, net_exit_list, exit_list) { 760 + struct xfrmi_net *xfrmn = net_generic(net, xfrmi_net_id); 761 + struct xfrm_if __rcu **xip; 762 + struct xfrm_if *xi; 763 + 764 + for (xip = &xfrmn->xfrmi[0]; 765 + (xi = rtnl_dereference(*xip)) != NULL; 766 + xip = &xi->next) 767 + unregister_netdevice_queue(xi->dev, &list); 768 + } 769 + unregister_netdevice_many(&list); 770 + rtnl_unlock(); 771 + } 772 + 773 static struct pernet_operations xfrmi_net_ops = { 774 + .exit_batch = xfrmi_exit_batch_net, 775 .id = &xfrmi_net_id, 776 .size = sizeof(struct xfrmi_net), 777 };
+9 -6
net/xfrm/xfrm_output.c
··· 583 xfrm_state_hold(x); 584 585 if (skb_is_gso(skb)) { 586 - skb_shinfo(skb)->gso_type |= SKB_GSO_ESP; 587 588 - return xfrm_output2(net, sk, skb); 589 } 590 591 if (x->xso.dev && x->xso.dev->features & NETIF_F_HW_ESP_TX_CSUM) 592 goto out; 593 } 594 - 595 - if (skb_is_gso(skb)) 596 - return xfrm_output_gso(net, sk, skb); 597 598 if (skb->ip_summed == CHECKSUM_PARTIAL) { 599 err = skb_checksum_help(skb); ··· 642 643 if (skb->protocol == htons(ETH_P_IP)) 644 proto = AF_INET; 645 - else if (skb->protocol == htons(ETH_P_IPV6)) 646 proto = AF_INET6; 647 else 648 return;
··· 583 xfrm_state_hold(x); 584 585 if (skb_is_gso(skb)) { 586 + if (skb->inner_protocol) 587 + return xfrm_output_gso(net, sk, skb); 588 589 + skb_shinfo(skb)->gso_type |= SKB_GSO_ESP; 590 + goto out; 591 } 592 593 if (x->xso.dev && x->xso.dev->features & NETIF_F_HW_ESP_TX_CSUM) 594 goto out; 595 + } else { 596 + if (skb_is_gso(skb)) 597 + return xfrm_output_gso(net, sk, skb); 598 } 599 600 if (skb->ip_summed == CHECKSUM_PARTIAL) { 601 err = skb_checksum_help(skb); ··· 640 641 if (skb->protocol == htons(ETH_P_IP)) 642 proto = AF_INET; 643 + else if (skb->protocol == htons(ETH_P_IPV6) && 644 + skb->sk->sk_family == AF_INET6) 645 proto = AF_INET6; 646 else 647 return;
+1 -6
net/xfrm/xfrm_policy.c
··· 1436 static bool xfrm_policy_mark_match(struct xfrm_policy *policy, 1437 struct xfrm_policy *pol) 1438 { 1439 - u32 mark = policy->mark.v & policy->mark.m; 1440 - 1441 - if (policy->mark.v == pol->mark.v && policy->mark.m == pol->mark.m) 1442 - return true; 1443 - 1444 - if ((mark & pol->mark.m) == pol->mark.v && 1445 policy->priority == pol->priority) 1446 return true; 1447
··· 1436 static bool xfrm_policy_mark_match(struct xfrm_policy *policy, 1437 struct xfrm_policy *pol) 1438 { 1439 + if (policy->mark.v == pol->mark.v && 1440 policy->priority == pol->priority) 1441 return true; 1442
+32 -14
tools/testing/selftests/bpf/verifier/bounds.c
··· 238 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 239 BPF_LD_MAP_FD(BPF_REG_1, 0), 240 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 241 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), 242 /* r1 = [0x00, 0xff] */ 243 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), 244 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1), ··· 253 * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff] 254 */ 255 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1), 256 - /* r1 = 0 or 257 - * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff] 258 - */ 259 - BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8), 260 /* error on OOB pointer computation */ 261 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 262 /* exit */ ··· 261 }, 262 .fixup_map_hash_8b = { 3 }, 263 /* not actually fully unbounded, but the bound is very high */ 264 - .errstr = "value 72057594021150720 makes map_value pointer be out of bounds", 265 - .result = REJECT 266 }, 267 { 268 "bounds check after truncation of boundary-crossing range (2)", ··· 274 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 275 BPF_LD_MAP_FD(BPF_REG_1, 0), 276 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 277 - BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9), 278 /* r1 = [0x00, 0xff] */ 279 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), 280 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1), ··· 291 * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff] 292 */ 293 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1), 294 - /* r1 = 0 or 295 - * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff] 296 - */ 297 - BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8), 298 /* error on OOB pointer computation */ 299 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 300 /* exit */ ··· 299 }, 300 .fixup_map_hash_8b = { 3 }, 301 /* not actually fully unbounded, but the bound is very high */ 302 - .errstr = "value 72057594021150720 makes map_value pointer be out of bounds", 303 - .result = REJECT 304 }, 305 { 306 "bounds check after wrapping 32-bit addition", ··· 534 BPF_EXIT_INSN(), 535 }, 536 .result = ACCEPT 537 },
··· 238 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 239 BPF_LD_MAP_FD(BPF_REG_1, 0), 240 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 241 + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8), 242 /* r1 = [0x00, 0xff] */ 243 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), 244 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1), ··· 253 * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff] 254 */ 255 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1), 256 /* error on OOB pointer computation */ 257 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 258 /* exit */ ··· 265 }, 266 .fixup_map_hash_8b = { 3 }, 267 /* not actually fully unbounded, but the bound is very high */ 268 + .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds, pointer arithmetic with it prohibited for !root", 269 + .result_unpriv = REJECT, 270 + .errstr = "value -4294967168 makes map_value pointer be out of bounds", 271 + .result = REJECT, 272 }, 273 { 274 "bounds check after truncation of boundary-crossing range (2)", ··· 276 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 277 BPF_LD_MAP_FD(BPF_REG_1, 0), 278 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 279 + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8), 280 /* r1 = [0x00, 0xff] */ 281 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0), 282 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1), ··· 293 * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff] 294 */ 295 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1), 296 /* error on OOB pointer computation */ 297 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1), 298 /* exit */ ··· 305 }, 306 .fixup_map_hash_8b = { 3 }, 307 /* not actually fully unbounded, but the bound is very high */ 308 + .errstr_unpriv = "R1 has unknown scalar with mixed signed bounds, pointer arithmetic with it prohibited for !root", 309 + .result_unpriv = REJECT, 310 + .errstr = "value -4294967168 makes map_value pointer be out of bounds", 311 + .result = REJECT, 312 }, 313 { 314 "bounds check after wrapping 32-bit addition", ··· 538 BPF_EXIT_INSN(), 539 }, 540 .result = ACCEPT 541 + }, 542 + { 543 + "assigning 32bit bounds to 64bit for wA = 0, wB = wA", 544 + .insns = { 545 + BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1, 546 + offsetof(struct __sk_buff, data_end)), 547 + BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, 548 + offsetof(struct __sk_buff, data)), 549 + BPF_MOV32_IMM(BPF_REG_9, 0), 550 + BPF_MOV32_REG(BPF_REG_2, BPF_REG_9), 551 + BPF_MOV64_REG(BPF_REG_6, BPF_REG_7), 552 + BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_2), 553 + BPF_MOV64_REG(BPF_REG_3, BPF_REG_6), 554 + BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 8), 555 + BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_8, 1), 556 + BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_6, 0), 557 + BPF_MOV64_IMM(BPF_REG_0, 0), 558 + BPF_EXIT_INSN(), 559 + }, 560 + .prog_type = BPF_PROG_TYPE_SCHED_CLS, 561 + .result = ACCEPT, 562 + .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 563 },
+21
tools/testing/selftests/tc-testing/tc-tests/qdiscs/fq_pie.json
···
··· 1 + [ 2 + { 3 + "id": "83be", 4 + "name": "Create FQ-PIE with invalid number of flows", 5 + "category": [ 6 + "qdisc", 7 + "fq_pie" 8 + ], 9 + "setup": [ 10 + "$IP link add dev $DUMMY type dummy || /bin/true" 11 + ], 12 + "cmdUnderTest": "$TC qdisc add dev $DUMMY root fq_pie flows 65536", 13 + "expExitCode": "2", 14 + "verifyCmd": "$TC qdisc show dev $DUMMY", 15 + "matchPattern": "qdisc", 16 + "matchCount": "0", 17 + "teardown": [ 18 + "$IP link del dev $DUMMY" 19 + ] 20 + } 21 + ]