Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Pull networking fixes from David Miller:

1) Fix big endian overflow in nf_flow_table, from Arnd Bergmann.

2) Fix port selection on big endian in nft_tproxy, from Phil Sutter.

3) Fix precision tracking for unbound scalars in bpf verifier, from
Daniel Borkmann.

4) Fix integer overflow in socket rcvbuf check in UDP, from Antonio
Messina.

5) Do not perform a neigh confirmation during a pmtu update over a
tunnel, from Hangbin Liu.

6) Fix DMA mapping leak in dpaa_eth driver, from Madalin Bucur.

7) Various PTP fixes for sja1105 dsa driver, from Vladimir Oltean.

8) Add missing to dummy definition of of_mdiobus_child_is_phy(), from
Geert Uytterhoeven

* git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (54 commits)
hsr: fix slab-out-of-bounds Read in hsr_debugfs_rename()
net/sched: add delete_empty() to filters and use it in cls_flower
tcp: Fix highest_sack and highest_sack_seq
ptp: fix the race between the release of ptp_clock and cdev
net: dsa: sja1105: Reconcile the meaning of TPID and TPID2 for E/T and P/Q/R/S
Documentation: net: dsa: sja1105: Remove text about taprio base-time limitation
net: dsa: sja1105: Remove restriction of zero base-time for taprio offload
net: dsa: sja1105: Really make the PTP command read-write
net: dsa: sja1105: Take PTP egress timestamp by port, not mgmt slot
cxgb4/cxgb4vf: fix flow control display for auto negotiation
mlxsw: spectrum: Use dedicated policer for VRRP packets
mlxsw: spectrum_router: Skip loopback RIFs during MAC validation
net: stmmac: dwmac-meson8b: Fix the RGMII TX delay on Meson8b/8m2 SoCs
net/sched: act_mirred: Pull mac prior redir to non mac_header_xmit device
net_sched: sch_fq: properly set sk->sk_pacing_status
bnx2x: Fix accounting of vlan resources among the PFs
bnx2x: Use appropriate define for vlan credit
of: mdio: Add missing inline to of_mdiobus_child_is_phy() dummy
net: phy: aquantia: add suspend / resume ops for AQR105
dpaa_eth: fix DMA mapping leak
...

+603 -492
-6
Documentation/networking/dsa/sja1105.rst
··· 230 230 against this restriction and errors out when appropriate. Schedule analysis is 231 231 needed to avoid this, which is outside the scope of the document. 232 232 233 - At the moment, the time-aware scheduler can only be triggered based on a 234 - standalone clock and not based on PTP time. This means the base-time argument 235 - from tc-taprio is ignored and the schedule starts right away. It also means it 236 - is more difficult to phase-align the scheduler with the other devices in the 237 - network. 238 - 239 233 Device Tree bindings and board design 240 234 ===================================== 241 235
+2
MAINTAINERS
··· 771 771 772 772 AMAZON ETHERNET DRIVERS 773 773 M: Netanel Belgazal <netanel@amazon.com> 774 + M: Arthur Kiyanovski <akiyano@amazon.com> 775 + R: Guy Tzalik <gtzalik@amazon.com> 774 776 R: Saeed Bishara <saeedb@amazon.com> 775 777 R: Zorik Machulsky <zorik@amazon.com> 776 778 L: netdev@vger.kernel.org
+3 -3
drivers/net/dsa/bcm_sf2_cfp.c
··· 358 358 return -EINVAL; 359 359 } 360 360 361 - ip_frag = be32_to_cpu(fs->m_ext.data[0]); 361 + ip_frag = !!(be32_to_cpu(fs->h_ext.data[0]) & 1); 362 362 363 363 /* Locate the first rule available */ 364 364 if (fs->location == RX_CLS_LOC_ANY) ··· 569 569 570 570 if (rule->fs.flow_type != fs->flow_type || 571 571 rule->fs.ring_cookie != fs->ring_cookie || 572 - rule->fs.m_ext.data[0] != fs->m_ext.data[0]) 572 + rule->fs.h_ext.data[0] != fs->h_ext.data[0]) 573 573 continue; 574 574 575 575 switch (fs->flow_type & ~FLOW_EXT) { ··· 621 621 return -EINVAL; 622 622 } 623 623 624 - ip_frag = be32_to_cpu(fs->m_ext.data[0]); 624 + ip_frag = !!(be32_to_cpu(fs->h_ext.data[0]) & 1); 625 625 626 626 layout = &udf_tcpip6_layout; 627 627 slice_num = bcm_sf2_get_slice_number(layout, 0);
+5 -5
drivers/net/dsa/sja1105/sja1105_main.c
··· 1569 1569 1570 1570 if (enabled) { 1571 1571 /* Enable VLAN filtering. */ 1572 - tpid = ETH_P_8021AD; 1573 - tpid2 = ETH_P_8021Q; 1572 + tpid = ETH_P_8021Q; 1573 + tpid2 = ETH_P_8021AD; 1574 1574 } else { 1575 1575 /* Disable VLAN filtering. */ 1576 1576 tpid = ETH_P_SJA1105; ··· 1579 1579 1580 1580 table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS]; 1581 1581 general_params = table->entries; 1582 - /* EtherType used to identify outer tagged (S-tag) VLAN traffic */ 1583 - general_params->tpid = tpid; 1584 1582 /* EtherType used to identify inner tagged (C-tag) VLAN traffic */ 1583 + general_params->tpid = tpid; 1584 + /* EtherType used to identify outer tagged (S-tag) VLAN traffic */ 1585 1585 general_params->tpid2 = tpid2; 1586 1586 /* When VLAN filtering is on, we need to at least be able to 1587 1587 * decode management traffic through the "backup plan". ··· 1855 1855 if (!clone) 1856 1856 goto out; 1857 1857 1858 - sja1105_ptp_txtstamp_skb(ds, slot, clone); 1858 + sja1105_ptp_txtstamp_skb(ds, port, clone); 1859 1859 1860 1860 out: 1861 1861 mutex_unlock(&priv->mgmt_lock);
+3 -3
drivers/net/dsa/sja1105/sja1105_ptp.c
··· 234 234 if (rw == SPI_WRITE) 235 235 priv->info->ptp_cmd_packing(buf, cmd, PACK); 236 236 237 - rc = sja1105_xfer_buf(priv, SPI_WRITE, regs->ptp_control, buf, 237 + rc = sja1105_xfer_buf(priv, rw, regs->ptp_control, buf, 238 238 SJA1105_SIZE_PTP_CMD); 239 239 240 240 if (rw == SPI_READ) ··· 659 659 ptp_data->clock = NULL; 660 660 } 661 661 662 - void sja1105_ptp_txtstamp_skb(struct dsa_switch *ds, int slot, 662 + void sja1105_ptp_txtstamp_skb(struct dsa_switch *ds, int port, 663 663 struct sk_buff *skb) 664 664 { 665 665 struct sja1105_private *priv = ds->priv; ··· 679 679 goto out; 680 680 } 681 681 682 - rc = sja1105_ptpegr_ts_poll(ds, slot, &ts); 682 + rc = sja1105_ptpegr_ts_poll(ds, port, &ts); 683 683 if (rc < 0) { 684 684 dev_err(ds->dev, "timed out polling for tstamp\n"); 685 685 kfree_skb(skb);
+5 -2
drivers/net/dsa/sja1105/sja1105_static_config.c
··· 142 142 return size; 143 143 } 144 144 145 + /* TPID and TPID2 are intentionally reversed so that semantic 146 + * compatibility with E/T is kept. 147 + */ 145 148 static size_t 146 149 sja1105pqrs_general_params_entry_packing(void *buf, void *entry_ptr, 147 150 enum packing_op op) ··· 169 166 sja1105_packing(buf, &entry->mirr_port, 141, 139, size, op); 170 167 sja1105_packing(buf, &entry->vlmarker, 138, 107, size, op); 171 168 sja1105_packing(buf, &entry->vlmask, 106, 75, size, op); 172 - sja1105_packing(buf, &entry->tpid, 74, 59, size, op); 169 + sja1105_packing(buf, &entry->tpid2, 74, 59, size, op); 173 170 sja1105_packing(buf, &entry->ignore2stf, 58, 58, size, op); 174 - sja1105_packing(buf, &entry->tpid2, 57, 42, size, op); 171 + sja1105_packing(buf, &entry->tpid, 57, 42, size, op); 175 172 sja1105_packing(buf, &entry->queue_ts, 41, 41, size, op); 176 173 sja1105_packing(buf, &entry->egrmirrvid, 40, 29, size, op); 177 174 sja1105_packing(buf, &entry->egrmirrpcp, 28, 26, size, op);
-5
drivers/net/dsa/sja1105/sja1105_tas.c
··· 477 477 if (admin->cycle_time_extension) 478 478 return -ENOTSUPP; 479 479 480 - if (!ns_to_sja1105_delta(admin->base_time)) { 481 - dev_err(ds->dev, "A base time of zero is not hardware-allowed\n"); 482 - return -ERANGE; 483 - } 484 - 485 480 for (i = 0; i < admin->num_entries; i++) { 486 481 s64 delta_ns = admin->entries[i].interval; 487 482 s64 delta_cycles = ns_to_sja1105_delta(delta_ns);
+4 -1
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
··· 1536 1536 ((MAX_MAC_CREDIT_E2 - GET_NUM_VFS_PER_PATH(bp) * VF_MAC_CREDIT_CNT) / \ 1537 1537 func_num + GET_NUM_VFS_PER_PF(bp) * VF_MAC_CREDIT_CNT) 1538 1538 1539 + #define BNX2X_VFS_VLAN_CREDIT(bp) \ 1540 + (GET_NUM_VFS_PER_PATH(bp) * VF_VLAN_CREDIT_CNT) 1541 + 1539 1542 #define PF_VLAN_CREDIT_E2(bp, func_num) \ 1540 - ((MAX_MAC_CREDIT_E2 - GET_NUM_VFS_PER_PATH(bp) * VF_VLAN_CREDIT_CNT) / \ 1543 + ((MAX_VLAN_CREDIT_E2 - 1 - BNX2X_VFS_VLAN_CREDIT(bp)) / \ 1541 1544 func_num + GET_NUM_VFS_PER_PF(bp) * VF_VLAN_CREDIT_CNT) 1542 1545 1543 1546 #endif /* BNX2X_SP_VERBS */
+1
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
··· 504 504 505 505 enum cc_pause requested_fc; /* flow control user has requested */ 506 506 enum cc_pause fc; /* actual link flow control */ 507 + enum cc_pause advertised_fc; /* actual advertised flow control */ 507 508 508 509 enum cc_fec requested_fec; /* Forward Error Correction: */ 509 510 enum cc_fec fec; /* requested and actual in use */
+2 -2
drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
··· 807 807 struct port_info *p = netdev_priv(dev); 808 808 809 809 epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0; 810 - epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0; 811 - epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0; 810 + epause->rx_pause = (p->link_cfg.advertised_fc & PAUSE_RX) != 0; 811 + epause->tx_pause = (p->link_cfg.advertised_fc & PAUSE_TX) != 0; 812 812 } 813 813 814 814 static int set_pauseparam(struct net_device *dev,
+14 -9
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
··· 4089 4089 if (cc_pause & PAUSE_TX) 4090 4090 fw_pause |= FW_PORT_CAP32_802_3_PAUSE; 4091 4091 else 4092 - fw_pause |= FW_PORT_CAP32_802_3_ASM_DIR; 4092 + fw_pause |= FW_PORT_CAP32_802_3_ASM_DIR | 4093 + FW_PORT_CAP32_802_3_PAUSE; 4093 4094 } else if (cc_pause & PAUSE_TX) { 4094 4095 fw_pause |= FW_PORT_CAP32_802_3_ASM_DIR; 4095 4096 } ··· 8564 8563 void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl) 8565 8564 { 8566 8565 const struct fw_port_cmd *cmd = (const void *)rpl; 8567 - int action = FW_PORT_CMD_ACTION_G(be32_to_cpu(cmd->action_to_len16)); 8568 - struct adapter *adapter = pi->adapter; 8569 - struct link_config *lc = &pi->link_cfg; 8570 - int link_ok, linkdnrc; 8571 - enum fw_port_type port_type; 8572 - enum fw_port_module_type mod_type; 8573 - unsigned int speed, fc, fec; 8574 8566 fw_port_cap32_t pcaps, acaps, lpacaps, linkattr; 8567 + struct link_config *lc = &pi->link_cfg; 8568 + struct adapter *adapter = pi->adapter; 8569 + unsigned int speed, fc, fec, adv_fc; 8570 + enum fw_port_module_type mod_type; 8571 + int action, link_ok, linkdnrc; 8572 + enum fw_port_type port_type; 8575 8573 8576 8574 /* Extract the various fields from the Port Information message. 8577 8575 */ 8576 + action = FW_PORT_CMD_ACTION_G(be32_to_cpu(cmd->action_to_len16)); 8578 8577 switch (action) { 8579 8578 case FW_PORT_ACTION_GET_PORT_INFO: { 8580 8579 u32 lstatus = be32_to_cpu(cmd->u.info.lstatus_to_modtype); ··· 8612 8611 } 8613 8612 8614 8613 fec = fwcap_to_cc_fec(acaps); 8614 + adv_fc = fwcap_to_cc_pause(acaps); 8615 8615 fc = fwcap_to_cc_pause(linkattr); 8616 8616 speed = fwcap_to_speed(linkattr); 8617 8617 ··· 8669 8667 } 8670 8668 8671 8669 if (link_ok != lc->link_ok || speed != lc->speed || 8672 - fc != lc->fc || fec != lc->fec) { /* something changed */ 8670 + fc != lc->fc || adv_fc != lc->advertised_fc || 8671 + fec != lc->fec) { 8672 + /* something changed */ 8673 8673 if (!link_ok && lc->link_ok) { 8674 8674 lc->link_down_rc = linkdnrc; 8675 8675 dev_warn_ratelimited(adapter->pdev_dev, ··· 8681 8677 } 8682 8678 lc->link_ok = link_ok; 8683 8679 lc->speed = speed; 8680 + lc->advertised_fc = adv_fc; 8684 8681 lc->fc = fc; 8685 8682 lc->fec = fec; 8686 8683
+2 -2
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
··· 1690 1690 struct port_info *pi = netdev_priv(dev); 1691 1691 1692 1692 pauseparam->autoneg = (pi->link_cfg.requested_fc & PAUSE_AUTONEG) != 0; 1693 - pauseparam->rx_pause = (pi->link_cfg.fc & PAUSE_RX) != 0; 1694 - pauseparam->tx_pause = (pi->link_cfg.fc & PAUSE_TX) != 0; 1693 + pauseparam->rx_pause = (pi->link_cfg.advertised_fc & PAUSE_RX) != 0; 1694 + pauseparam->tx_pause = (pi->link_cfg.advertised_fc & PAUSE_TX) != 0; 1695 1695 } 1696 1696 1697 1697 /*
+1
drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
··· 135 135 136 136 enum cc_pause requested_fc; /* flow control user has requested */ 137 137 enum cc_pause fc; /* actual link flow control */ 138 + enum cc_pause advertised_fc; /* actual advertised flow control */ 138 139 139 140 enum cc_fec auto_fec; /* Forward Error Correction: */ 140 141 enum cc_fec requested_fec; /* "automatic" (IEEE 802.3), */
+12 -8
drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
··· 1913 1913 static void t4vf_handle_get_port_info(struct port_info *pi, 1914 1914 const struct fw_port_cmd *cmd) 1915 1915 { 1916 - int action = FW_PORT_CMD_ACTION_G(be32_to_cpu(cmd->action_to_len16)); 1917 - struct adapter *adapter = pi->adapter; 1918 - struct link_config *lc = &pi->link_cfg; 1919 - int link_ok, linkdnrc; 1920 - enum fw_port_type port_type; 1921 - enum fw_port_module_type mod_type; 1922 - unsigned int speed, fc, fec; 1923 1916 fw_port_cap32_t pcaps, acaps, lpacaps, linkattr; 1917 + struct link_config *lc = &pi->link_cfg; 1918 + struct adapter *adapter = pi->adapter; 1919 + unsigned int speed, fc, fec, adv_fc; 1920 + enum fw_port_module_type mod_type; 1921 + int action, link_ok, linkdnrc; 1922 + enum fw_port_type port_type; 1924 1923 1925 1924 /* Extract the various fields from the Port Information message. */ 1925 + action = FW_PORT_CMD_ACTION_G(be32_to_cpu(cmd->action_to_len16)); 1926 1926 switch (action) { 1927 1927 case FW_PORT_ACTION_GET_PORT_INFO: { 1928 1928 u32 lstatus = be32_to_cpu(cmd->u.info.lstatus_to_modtype); ··· 1982 1982 } 1983 1983 1984 1984 fec = fwcap_to_cc_fec(acaps); 1985 + adv_fc = fwcap_to_cc_pause(acaps); 1985 1986 fc = fwcap_to_cc_pause(linkattr); 1986 1987 speed = fwcap_to_speed(linkattr); 1987 1988 ··· 2013 2012 } 2014 2013 2015 2014 if (link_ok != lc->link_ok || speed != lc->speed || 2016 - fc != lc->fc || fec != lc->fec) { /* something changed */ 2015 + fc != lc->fc || adv_fc != lc->advertised_fc || 2016 + fec != lc->fec) { 2017 + /* something changed */ 2017 2018 if (!link_ok && lc->link_ok) { 2018 2019 lc->link_down_rc = linkdnrc; 2019 2020 dev_warn_ratelimited(adapter->pdev_dev, ··· 2025 2022 } 2026 2023 lc->link_ok = link_ok; 2027 2024 lc->speed = speed; 2025 + lc->advertised_fc = adv_fc; 2028 2026 lc->fc = fc; 2029 2027 lc->fec = fec; 2030 2028
+20 -19
drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
··· 1719 1719 int page_offset; 1720 1720 unsigned int sz; 1721 1721 int *count_ptr; 1722 - int i; 1722 + int i, j; 1723 1723 1724 1724 vaddr = phys_to_virt(addr); 1725 1725 WARN_ON(!IS_ALIGNED((unsigned long)vaddr, SMP_CACHE_BYTES)); ··· 1736 1736 WARN_ON(!IS_ALIGNED((unsigned long)sg_vaddr, 1737 1737 SMP_CACHE_BYTES)); 1738 1738 1739 + dma_unmap_page(priv->rx_dma_dev, sg_addr, 1740 + DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE); 1741 + 1739 1742 /* We may use multiple Rx pools */ 1740 1743 dpaa_bp = dpaa_bpid2pool(sgt[i].bpid); 1741 1744 if (!dpaa_bp) 1742 1745 goto free_buffers; 1743 1746 1744 - count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); 1745 - dma_unmap_page(priv->rx_dma_dev, sg_addr, 1746 - DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE); 1747 1747 if (!skb) { 1748 1748 sz = dpaa_bp->size + 1749 1749 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); ··· 1786 1786 skb_add_rx_frag(skb, i - 1, head_page, frag_off, 1787 1787 frag_len, dpaa_bp->size); 1788 1788 } 1789 + 1789 1790 /* Update the pool count for the current {cpu x bpool} */ 1791 + count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); 1790 1792 (*count_ptr)--; 1791 1793 1792 1794 if (qm_sg_entry_is_final(&sgt[i])) ··· 1802 1800 return skb; 1803 1801 1804 1802 free_buffers: 1805 - /* compensate sw bpool counter changes */ 1806 - for (i--; i >= 0; i--) { 1807 - dpaa_bp = dpaa_bpid2pool(sgt[i].bpid); 1808 - if (dpaa_bp) { 1809 - count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); 1810 - (*count_ptr)++; 1811 - } 1812 - } 1813 1803 /* free all the SG entries */ 1814 - for (i = 0; i < DPAA_SGT_MAX_ENTRIES ; i++) { 1815 - sg_addr = qm_sg_addr(&sgt[i]); 1804 + for (j = 0; j < DPAA_SGT_MAX_ENTRIES ; j++) { 1805 + sg_addr = qm_sg_addr(&sgt[j]); 1816 1806 sg_vaddr = phys_to_virt(sg_addr); 1807 + /* all pages 0..i were unmaped */ 1808 + if (j > i) 1809 + dma_unmap_page(priv->rx_dma_dev, qm_sg_addr(&sgt[j]), 1810 + DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE); 1817 1811 free_pages((unsigned long)sg_vaddr, 0); 1818 - dpaa_bp = dpaa_bpid2pool(sgt[i].bpid); 1819 - if (dpaa_bp) { 1820 - count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); 1821 - (*count_ptr)--; 1812 + /* counters 0..i-1 were decremented */ 1813 + if (j >= i) { 1814 + dpaa_bp = dpaa_bpid2pool(sgt[j].bpid); 1815 + if (dpaa_bp) { 1816 + count_ptr = this_cpu_ptr(dpaa_bp->percpu_count); 1817 + (*count_ptr)--; 1818 + } 1822 1819 } 1823 1820 1824 - if (qm_sg_entry_is_final(&sgt[i])) 1821 + if (qm_sg_entry_is_final(&sgt[j])) 1825 1822 break; 1826 1823 } 1827 1824 /* free the SGT fragment */
+4 -3
drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2.c
··· 6 6 #include <linux/kernel.h> 7 7 #include <linux/module.h> 8 8 #include <linux/netlink.h> 9 + #include <linux/vmalloc.h> 9 10 #include <linux/xz.h> 10 11 #include "mlxfw_mfa2.h" 11 12 #include "mlxfw_mfa2_file.h" ··· 549 548 comp_size = be32_to_cpu(comp->size); 550 549 comp_buf_size = comp_size + mlxfw_mfa2_comp_magic_len; 551 550 552 - comp_data = kmalloc(sizeof(*comp_data) + comp_buf_size, GFP_KERNEL); 551 + comp_data = vzalloc(sizeof(*comp_data) + comp_buf_size); 553 552 if (!comp_data) 554 553 return ERR_PTR(-ENOMEM); 555 554 comp_data->comp.data_size = comp_size; ··· 571 570 comp_data->comp.data = comp_data->buff + mlxfw_mfa2_comp_magic_len; 572 571 return &comp_data->comp; 573 572 err_out: 574 - kfree(comp_data); 573 + vfree(comp_data); 575 574 return ERR_PTR(err); 576 575 } 577 576 ··· 580 579 const struct mlxfw_mfa2_comp_data *comp_data; 581 580 582 581 comp_data = container_of(comp, struct mlxfw_mfa2_comp_data, comp); 583 - kfree(comp_data); 582 + vfree(comp_data); 584 583 } 585 584 586 585 void mlxfw_mfa2_file_fini(struct mlxfw_mfa2_file *mfa2_file)
+1
drivers/net/ethernet/mellanox/mlxsw/reg.h
··· 5472 5472 MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR, 5473 5473 MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0, 5474 5474 MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP1, 5475 + MLXSW_REG_HTGT_TRAP_GROUP_SP_VRRP, 5475 5476 5476 5477 __MLXSW_REG_HTGT_TRAP_GROUP_MAX, 5477 5478 MLXSW_REG_HTGT_TRAP_GROUP_MAX = __MLXSW_REG_HTGT_TRAP_GROUP_MAX - 1
+7 -2
drivers/net/ethernet/mellanox/mlxsw/spectrum.c
··· 4542 4542 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false), 4543 4543 MLXSW_SP_RXL_MARK(IPIP_DECAP_ERROR, TRAP_TO_CPU, ROUTER_EXP, false), 4544 4544 MLXSW_SP_RXL_MARK(DECAP_ECN0, TRAP_TO_CPU, ROUTER_EXP, false), 4545 - MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, ROUTER_EXP, false), 4546 - MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, ROUTER_EXP, false), 4545 + MLXSW_SP_RXL_MARK(IPV4_VRRP, TRAP_TO_CPU, VRRP, false), 4546 + MLXSW_SP_RXL_MARK(IPV6_VRRP, TRAP_TO_CPU, VRRP, false), 4547 4547 /* PKT Sample trap */ 4548 4548 MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU, 4549 4549 false, SP_IP2ME, DISCARD), ··· 4626 4626 rate = 19 * 1024; 4627 4627 burst_size = 12; 4628 4628 break; 4629 + case MLXSW_REG_HTGT_TRAP_GROUP_SP_VRRP: 4630 + rate = 360; 4631 + burst_size = 7; 4632 + break; 4629 4633 default: 4630 4634 continue; 4631 4635 } ··· 4669 4665 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: 4670 4666 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM: 4671 4667 case MLXSW_REG_HTGT_TRAP_GROUP_SP_PTP0: 4668 + case MLXSW_REG_HTGT_TRAP_GROUP_SP_VRRP: 4672 4669 priority = 5; 4673 4670 tc = 5; 4674 4671 break;
+3
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
··· 7079 7079 7080 7080 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) { 7081 7081 rif = mlxsw_sp->router->rifs[i]; 7082 + if (rif && rif->ops && 7083 + rif->ops->type == MLXSW_SP_RIF_TYPE_IPIP_LB) 7084 + continue; 7082 7085 if (rif && rif->dev && rif->dev != dev && 7083 7086 !ether_addr_equal_masked(rif->dev->dev_addr, dev_addr, 7084 7087 mlxsw_sp->mac_mask)) {
+11 -3
drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
··· 112 112 struct device *dev = dwmac->dev; 113 113 const char *parent_name, *mux_parent_names[MUX_CLK_NUM_PARENTS]; 114 114 struct meson8b_dwmac_clk_configs *clk_configs; 115 + static const struct clk_div_table div_table[] = { 116 + { .div = 2, .val = 2, }, 117 + { .div = 3, .val = 3, }, 118 + { .div = 4, .val = 4, }, 119 + { .div = 5, .val = 5, }, 120 + { .div = 6, .val = 6, }, 121 + { .div = 7, .val = 7, }, 122 + }; 115 123 116 124 clk_configs = devm_kzalloc(dev, sizeof(*clk_configs), GFP_KERNEL); 117 125 if (!clk_configs) ··· 154 146 clk_configs->m250_div.reg = dwmac->regs + PRG_ETH0; 155 147 clk_configs->m250_div.shift = PRG_ETH0_CLK_M250_DIV_SHIFT; 156 148 clk_configs->m250_div.width = PRG_ETH0_CLK_M250_DIV_WIDTH; 157 - clk_configs->m250_div.flags = CLK_DIVIDER_ONE_BASED | 158 - CLK_DIVIDER_ALLOW_ZERO | 159 - CLK_DIVIDER_ROUND_CLOSEST; 149 + clk_configs->m250_div.table = div_table; 150 + clk_configs->m250_div.flags = CLK_DIVIDER_ALLOW_ZERO | 151 + CLK_DIVIDER_ROUND_CLOSEST; 160 152 clk = meson8b_dwmac_register_clk(dwmac, "m250_div", &parent_name, 1, 161 153 &clk_divider_ops, 162 154 &clk_configs->m250_div.hw);
+1 -1
drivers/net/gtp.c
··· 540 540 mtu = dst_mtu(&rt->dst); 541 541 } 542 542 543 - rt->dst.ops->update_pmtu(&rt->dst, NULL, skb, mtu); 543 + rt->dst.ops->update_pmtu(&rt->dst, NULL, skb, mtu, false); 544 544 545 545 if (!skb_is_gso(skb) && (iph->frag_off & htons(IP_DF)) && 546 546 mtu < ntohs(iph->tot_len)) {
+2
drivers/net/phy/aquantia_main.c
··· 627 627 .config_intr = aqr_config_intr, 628 628 .ack_interrupt = aqr_ack_interrupt, 629 629 .read_status = aqr_read_status, 630 + .suspend = aqr107_suspend, 631 + .resume = aqr107_resume, 630 632 }, 631 633 { 632 634 PHY_ID_MATCH_MODEL(PHY_ID_AQR106),
+14 -17
drivers/ptp/ptp_clock.c
··· 166 166 .read = ptp_read, 167 167 }; 168 168 169 - static void delete_ptp_clock(struct posix_clock *pc) 169 + static void ptp_clock_release(struct device *dev) 170 170 { 171 - struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock); 171 + struct ptp_clock *ptp = container_of(dev, struct ptp_clock, dev); 172 172 173 173 mutex_destroy(&ptp->tsevq_mux); 174 174 mutex_destroy(&ptp->pincfg_mux); ··· 213 213 } 214 214 215 215 ptp->clock.ops = ptp_clock_ops; 216 - ptp->clock.release = delete_ptp_clock; 217 216 ptp->info = info; 218 217 ptp->devid = MKDEV(major, index); 219 218 ptp->index = index; ··· 235 236 if (err) 236 237 goto no_pin_groups; 237 238 238 - /* Create a new device in our class. */ 239 - ptp->dev = device_create_with_groups(ptp_class, parent, ptp->devid, 240 - ptp, ptp->pin_attr_groups, 241 - "ptp%d", ptp->index); 242 - if (IS_ERR(ptp->dev)) { 243 - err = PTR_ERR(ptp->dev); 244 - goto no_device; 245 - } 246 - 247 239 /* Register a new PPS source. */ 248 240 if (info->pps) { 249 241 struct pps_source_info pps; ··· 250 260 } 251 261 } 252 262 253 - /* Create a posix clock. */ 254 - err = posix_clock_register(&ptp->clock, ptp->devid); 263 + /* Initialize a new device of our class in our clock structure. */ 264 + device_initialize(&ptp->dev); 265 + ptp->dev.devt = ptp->devid; 266 + ptp->dev.class = ptp_class; 267 + ptp->dev.parent = parent; 268 + ptp->dev.groups = ptp->pin_attr_groups; 269 + ptp->dev.release = ptp_clock_release; 270 + dev_set_drvdata(&ptp->dev, ptp); 271 + dev_set_name(&ptp->dev, "ptp%d", ptp->index); 272 + 273 + /* Create a posix clock and link it to the device. */ 274 + err = posix_clock_register(&ptp->clock, &ptp->dev); 255 275 if (err) { 256 276 pr_err("failed to create posix clock\n"); 257 277 goto no_clock; ··· 273 273 if (ptp->pps_source) 274 274 pps_unregister_source(ptp->pps_source); 275 275 no_pps: 276 - device_destroy(ptp_class, ptp->devid); 277 - no_device: 278 276 ptp_cleanup_pin_groups(ptp); 279 277 no_pin_groups: 280 278 if (ptp->kworker) ··· 302 304 if (ptp->pps_source) 303 305 pps_unregister_source(ptp->pps_source); 304 306 305 - device_destroy(ptp_class, ptp->devid); 306 307 ptp_cleanup_pin_groups(ptp); 307 308 308 309 posix_clock_unregister(&ptp->clock);
+1 -1
drivers/ptp/ptp_private.h
··· 28 28 29 29 struct ptp_clock { 30 30 struct posix_clock clock; 31 - struct device *dev; 31 + struct device dev; 32 32 struct ptp_clock_info *info; 33 33 dev_t devid; 34 34 int index; /* index into clocks.map */
+9 -20
drivers/s390/net/qeth_core_main.c
··· 2482 2482 rc = qeth_cm_enable(card); 2483 2483 if (rc) { 2484 2484 QETH_CARD_TEXT_(card, 2, "2err%d", rc); 2485 - goto out_qdio; 2485 + return rc; 2486 2486 } 2487 2487 rc = qeth_cm_setup(card); 2488 2488 if (rc) { 2489 2489 QETH_CARD_TEXT_(card, 2, "3err%d", rc); 2490 - goto out_qdio; 2490 + return rc; 2491 2491 } 2492 2492 rc = qeth_ulp_enable(card); 2493 2493 if (rc) { 2494 2494 QETH_CARD_TEXT_(card, 2, "4err%d", rc); 2495 - goto out_qdio; 2495 + return rc; 2496 2496 } 2497 2497 rc = qeth_ulp_setup(card); 2498 2498 if (rc) { 2499 2499 QETH_CARD_TEXT_(card, 2, "5err%d", rc); 2500 - goto out_qdio; 2500 + return rc; 2501 2501 } 2502 2502 rc = qeth_alloc_qdio_queues(card); 2503 2503 if (rc) { 2504 2504 QETH_CARD_TEXT_(card, 2, "5err%d", rc); 2505 - goto out_qdio; 2505 + return rc; 2506 2506 } 2507 2507 rc = qeth_qdio_establish(card); 2508 2508 if (rc) { 2509 2509 QETH_CARD_TEXT_(card, 2, "6err%d", rc); 2510 2510 qeth_free_qdio_queues(card); 2511 - goto out_qdio; 2511 + return rc; 2512 2512 } 2513 2513 rc = qeth_qdio_activate(card); 2514 2514 if (rc) { 2515 2515 QETH_CARD_TEXT_(card, 2, "7err%d", rc); 2516 - goto out_qdio; 2516 + return rc; 2517 2517 } 2518 2518 rc = qeth_dm_act(card); 2519 2519 if (rc) { 2520 2520 QETH_CARD_TEXT_(card, 2, "8err%d", rc); 2521 - goto out_qdio; 2521 + return rc; 2522 2522 } 2523 2523 2524 2524 return 0; 2525 - out_qdio: 2526 - qeth_qdio_clear_card(card, !IS_IQD(card)); 2527 - qdio_free(CARD_DDEV(card)); 2528 - return rc; 2529 2525 } 2530 2526 2531 2527 void qeth_print_status_message(struct qeth_card *card) ··· 3422 3426 } else { 3423 3427 if (card->options.cq == cq) { 3424 3428 rc = 0; 3425 - goto out; 3426 - } 3427 - 3428 - if (card->state != CARD_STATE_DOWN) { 3429 - rc = -1; 3430 3429 goto out; 3431 3430 } 3432 3431 ··· 5026 5035 } 5027 5036 if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) { 5028 5037 rc = qeth_query_setdiagass(card); 5029 - if (rc < 0) { 5038 + if (rc) 5030 5039 QETH_CARD_TEXT_(card, 2, "8err%d", rc); 5031 - goto out; 5032 - } 5033 5040 } 5034 5041 5035 5042 if (!qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP) ||
+5 -5
drivers/s390/net/qeth_l2_main.c
··· 287 287 card->state = CARD_STATE_HARDSETUP; 288 288 } 289 289 if (card->state == CARD_STATE_HARDSETUP) { 290 - qeth_qdio_clear_card(card, 0); 291 290 qeth_drain_output_queues(card); 292 291 qeth_clear_working_pool_list(card); 293 292 card->state = CARD_STATE_DOWN; 294 293 } 295 294 295 + qeth_qdio_clear_card(card, 0); 296 296 flush_workqueue(card->event_wq); 297 297 card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED; 298 298 card->info.promisc_mode = 0; ··· 1952 1952 /* check if VNICC is currently enabled */ 1953 1953 bool qeth_l2_vnicc_is_in_use(struct qeth_card *card) 1954 1954 { 1955 - /* if everything is turned off, VNICC is not active */ 1956 - if (!card->options.vnicc.cur_chars) 1955 + if (!card->options.vnicc.sup_chars) 1957 1956 return false; 1958 1957 /* default values are only OK if rx_bcast was not enabled by user 1959 1958 * or the card is offline. ··· 2039 2040 /* enforce assumed default values and recover settings, if changed */ 2040 2041 error |= qeth_l2_vnicc_recover_timeout(card, QETH_VNICC_LEARNING, 2041 2042 timeout); 2042 - chars_tmp = card->options.vnicc.wanted_chars ^ QETH_VNICC_DEFAULT; 2043 - chars_tmp |= QETH_VNICC_BRIDGE_INVISIBLE; 2043 + /* Change chars, if necessary */ 2044 + chars_tmp = card->options.vnicc.wanted_chars ^ 2045 + card->options.vnicc.cur_chars; 2044 2046 chars_len = sizeof(card->options.vnicc.wanted_chars) * BITS_PER_BYTE; 2045 2047 for_each_set_bit(i, &chars_tmp, chars_len) { 2046 2048 vnicc = BIT(i);
+1 -1
drivers/s390/net/qeth_l3_main.c
··· 1307 1307 card->state = CARD_STATE_HARDSETUP; 1308 1308 } 1309 1309 if (card->state == CARD_STATE_HARDSETUP) { 1310 - qeth_qdio_clear_card(card, 0); 1311 1310 qeth_drain_output_queues(card); 1312 1311 qeth_clear_working_pool_list(card); 1313 1312 card->state = CARD_STATE_DOWN; 1314 1313 } 1315 1314 1315 + qeth_qdio_clear_card(card, 0); 1316 1316 flush_workqueue(card->event_wq); 1317 1317 card->info.promisc_mode = 0; 1318 1318 }
+28 -12
drivers/s390/net/qeth_l3_sys.c
··· 242 242 struct device_attribute *attr, const char *buf, size_t count) 243 243 { 244 244 struct qeth_card *card = dev_get_drvdata(dev); 245 + int rc = 0; 245 246 char *tmp; 246 - int rc; 247 247 248 248 if (!IS_IQD(card)) 249 249 return -EPERM; 250 - if (card->state != CARD_STATE_DOWN) 251 - return -EPERM; 252 - if (card->options.sniffer) 253 - return -EPERM; 254 - if (card->options.cq == QETH_CQ_NOTAVAILABLE) 255 - return -EPERM; 250 + 251 + mutex_lock(&card->conf_mutex); 252 + if (card->state != CARD_STATE_DOWN) { 253 + rc = -EPERM; 254 + goto out; 255 + } 256 + 257 + if (card->options.sniffer) { 258 + rc = -EPERM; 259 + goto out; 260 + } 261 + 262 + if (card->options.cq == QETH_CQ_NOTAVAILABLE) { 263 + rc = -EPERM; 264 + goto out; 265 + } 256 266 257 267 tmp = strsep((char **)&buf, "\n"); 258 - if (strlen(tmp) > 8) 259 - return -EINVAL; 268 + if (strlen(tmp) > 8) { 269 + rc = -EINVAL; 270 + goto out; 271 + } 260 272 261 273 if (card->options.hsuid[0]) 262 274 /* delete old ip address */ ··· 279 267 card->options.hsuid[0] = '\0'; 280 268 memcpy(card->dev->perm_addr, card->options.hsuid, 9); 281 269 qeth_configure_cq(card, QETH_CQ_DISABLED); 282 - return count; 270 + goto out; 283 271 } 284 272 285 - if (qeth_configure_cq(card, QETH_CQ_ENABLED)) 286 - return -EPERM; 273 + if (qeth_configure_cq(card, QETH_CQ_ENABLED)) { 274 + rc = -EPERM; 275 + goto out; 276 + } 287 277 288 278 snprintf(card->options.hsuid, sizeof(card->options.hsuid), 289 279 "%-8s", tmp); ··· 294 280 295 281 rc = qeth_l3_modify_hsuid(card, true); 296 282 283 + out: 284 + mutex_unlock(&card->conf_mutex); 297 285 return rc ? rc : count; 298 286 } 299 287
+1 -1
include/linux/of_mdio.h
··· 55 55 } 56 56 57 57 #else /* CONFIG_OF_MDIO */ 58 - static bool of_mdiobus_child_is_phy(struct device_node *child) 58 + static inline bool of_mdiobus_child_is_phy(struct device_node *child) 59 59 { 60 60 return false; 61 61 }
+11 -8
include/linux/posix-clock.h
··· 69 69 * 70 70 * @ops: Functional interface to the clock 71 71 * @cdev: Character device instance for this clock 72 - * @kref: Reference count. 72 + * @dev: Pointer to the clock's device. 73 73 * @rwsem: Protects the 'zombie' field from concurrent access. 74 74 * @zombie: If 'zombie' is true, then the hardware has disappeared. 75 - * @release: A function to free the structure when the reference count reaches 76 - * zero. May be NULL if structure is statically allocated. 77 75 * 78 76 * Drivers should embed their struct posix_clock within a private 79 77 * structure, obtaining a reference to it during callbacks using 80 78 * container_of(). 79 + * 80 + * Drivers should supply an initialized but not exposed struct device 81 + * to posix_clock_register(). It is used to manage lifetime of the 82 + * driver's private structure. It's 'release' field should be set to 83 + * a release function for this private structure. 81 84 */ 82 85 struct posix_clock { 83 86 struct posix_clock_operations ops; 84 87 struct cdev cdev; 85 - struct kref kref; 88 + struct device *dev; 86 89 struct rw_semaphore rwsem; 87 90 bool zombie; 88 - void (*release)(struct posix_clock *clk); 89 91 }; 90 92 91 93 /** 92 94 * posix_clock_register() - register a new clock 93 - * @clk: Pointer to the clock. Caller must provide 'ops' and 'release' 94 - * @devid: Allocated device id 95 + * @clk: Pointer to the clock. Caller must provide 'ops' field 96 + * @dev: Pointer to the initialized device. Caller must provide 97 + * 'release' field 95 98 * 96 99 * A clock driver calls this function to register itself with the 97 100 * clock device subsystem. If 'clk' points to dynamically allocated ··· 103 100 * 104 101 * Returns zero on success, non-zero otherwise. 105 102 */ 106 - int posix_clock_register(struct posix_clock *clk, dev_t devid); 103 + int posix_clock_register(struct posix_clock *clk, struct device *dev); 107 104 108 105 /** 109 106 * posix_clock_unregister() - unregister a clock
+11 -2
include/net/dst.h
··· 516 516 struct dst_entry *dst = skb_dst(skb); 517 517 518 518 if (dst && dst->ops->update_pmtu) 519 - dst->ops->update_pmtu(dst, NULL, skb, mtu); 519 + dst->ops->update_pmtu(dst, NULL, skb, mtu, true); 520 + } 521 + 522 + /* update dst pmtu but not do neighbor confirm */ 523 + static inline void skb_dst_update_pmtu_no_confirm(struct sk_buff *skb, u32 mtu) 524 + { 525 + struct dst_entry *dst = skb_dst(skb); 526 + 527 + if (dst && dst->ops->update_pmtu) 528 + dst->ops->update_pmtu(dst, NULL, skb, mtu, false); 520 529 } 521 530 522 531 static inline void skb_tunnel_check_pmtu(struct sk_buff *skb, ··· 535 526 u32 encap_mtu = dst_mtu(encap_dst); 536 527 537 528 if (skb->len > encap_mtu - headroom) 538 - skb_dst_update_pmtu(skb, encap_mtu - headroom); 529 + skb_dst_update_pmtu_no_confirm(skb, encap_mtu - headroom); 539 530 } 540 531 541 532 #endif /* _NET_DST_H */
+2 -1
include/net/dst_ops.h
··· 27 27 struct dst_entry * (*negative_advice)(struct dst_entry *); 28 28 void (*link_failure)(struct sk_buff *); 29 29 void (*update_pmtu)(struct dst_entry *dst, struct sock *sk, 30 - struct sk_buff *skb, u32 mtu); 30 + struct sk_buff *skb, u32 mtu, 31 + bool confirm_neigh); 31 32 void (*redirect)(struct dst_entry *dst, struct sock *sk, 32 33 struct sk_buff *skb); 33 34 int (*local_out)(struct net *net, struct sock *sk, struct sk_buff *skb);
+5
include/net/sch_generic.h
··· 308 308 int (*delete)(struct tcf_proto *tp, void *arg, 309 309 bool *last, bool rtnl_held, 310 310 struct netlink_ext_ack *); 311 + bool (*delete_empty)(struct tcf_proto *tp); 311 312 void (*walk)(struct tcf_proto *tp, 312 313 struct tcf_walker *arg, bool rtnl_held); 313 314 int (*reoffload)(struct tcf_proto *tp, bool add, ··· 337 336 int flags; 338 337 }; 339 338 339 + /* Classifiers setting TCF_PROTO_OPS_DOIT_UNLOCKED in tcf_proto_ops->flags 340 + * are expected to implement tcf_proto_ops->delete_empty(), otherwise race 341 + * conditions can occur when filters are inserted/deleted simultaneously. 342 + */ 340 343 enum tcf_proto_ops_flags { 341 344 TCF_PROTO_OPS_DOIT_UNLOCKED = 1, 342 345 };
+22 -21
kernel/bpf/verifier.c
··· 907 907 BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5 908 908 }; 909 909 910 - static void __mark_reg_not_init(struct bpf_reg_state *reg); 910 + static void __mark_reg_not_init(const struct bpf_verifier_env *env, 911 + struct bpf_reg_state *reg); 911 912 912 913 /* Mark the unknown part of a register (variable offset or scalar value) as 913 914 * known to have the value @imm. ··· 946 945 verbose(env, "mark_reg_known_zero(regs, %u)\n", regno); 947 946 /* Something bad happened, let's kill all regs */ 948 947 for (regno = 0; regno < MAX_BPF_REG; regno++) 949 - __mark_reg_not_init(regs + regno); 948 + __mark_reg_not_init(env, regs + regno); 950 949 return; 951 950 } 952 951 __mark_reg_known_zero(regs + regno); ··· 1055 1054 } 1056 1055 1057 1056 /* Mark a register as having a completely unknown (scalar) value. */ 1058 - static void __mark_reg_unknown(struct bpf_reg_state *reg) 1057 + static void __mark_reg_unknown(const struct bpf_verifier_env *env, 1058 + struct bpf_reg_state *reg) 1059 1059 { 1060 1060 /* 1061 1061 * Clear type, id, off, and union(map_ptr, range) and ··· 1066 1064 reg->type = SCALAR_VALUE; 1067 1065 reg->var_off = tnum_unknown; 1068 1066 reg->frameno = 0; 1067 + reg->precise = env->subprog_cnt > 1 || !env->allow_ptr_leaks ? 1068 + true : false; 1069 1069 __mark_reg_unbounded(reg); 1070 1070 } 1071 1071 ··· 1078 1074 verbose(env, "mark_reg_unknown(regs, %u)\n", regno); 1079 1075 /* Something bad happened, let's kill all regs except FP */ 1080 1076 for (regno = 0; regno < BPF_REG_FP; regno++) 1081 - __mark_reg_not_init(regs + regno); 1077 + __mark_reg_not_init(env, regs + regno); 1082 1078 return; 1083 1079 } 1084 - regs += regno; 1085 - __mark_reg_unknown(regs); 1086 - /* constant backtracking is enabled for root without bpf2bpf calls */ 1087 - regs->precise = env->subprog_cnt > 1 || !env->allow_ptr_leaks ? 1088 - true : false; 1080 + __mark_reg_unknown(env, regs + regno); 1089 1081 } 1090 1082 1091 - static void __mark_reg_not_init(struct bpf_reg_state *reg) 1083 + static void __mark_reg_not_init(const struct bpf_verifier_env *env, 1084 + struct bpf_reg_state *reg) 1092 1085 { 1093 - __mark_reg_unknown(reg); 1086 + __mark_reg_unknown(env, reg); 1094 1087 reg->type = NOT_INIT; 1095 1088 } 1096 1089 ··· 1098 1097 verbose(env, "mark_reg_not_init(regs, %u)\n", regno); 1099 1098 /* Something bad happened, let's kill all regs except FP */ 1100 1099 for (regno = 0; regno < BPF_REG_FP; regno++) 1101 - __mark_reg_not_init(regs + regno); 1100 + __mark_reg_not_init(env, regs + regno); 1102 1101 return; 1103 1102 } 1104 - __mark_reg_not_init(regs + regno); 1103 + __mark_reg_not_init(env, regs + regno); 1105 1104 } 1106 1105 1107 1106 #define DEF_NOT_SUBREG (0) ··· 3235 3234 } 3236 3235 if (state->stack[spi].slot_type[0] == STACK_SPILL && 3237 3236 state->stack[spi].spilled_ptr.type == SCALAR_VALUE) { 3238 - __mark_reg_unknown(&state->stack[spi].spilled_ptr); 3237 + __mark_reg_unknown(env, &state->stack[spi].spilled_ptr); 3239 3238 for (j = 0; j < BPF_REG_SIZE; j++) 3240 3239 state->stack[spi].slot_type[j] = STACK_MISC; 3241 3240 goto mark; ··· 3893 3892 if (!reg) 3894 3893 continue; 3895 3894 if (reg_is_pkt_pointer_any(reg)) 3896 - __mark_reg_unknown(reg); 3895 + __mark_reg_unknown(env, reg); 3897 3896 } 3898 3897 } 3899 3898 ··· 3921 3920 if (!reg) 3922 3921 continue; 3923 3922 if (reg->ref_obj_id == ref_obj_id) 3924 - __mark_reg_unknown(reg); 3923 + __mark_reg_unknown(env, reg); 3925 3924 } 3926 3925 } 3927 3926 ··· 4583 4582 /* Taint dst register if offset had invalid bounds derived from 4584 4583 * e.g. dead branches. 4585 4584 */ 4586 - __mark_reg_unknown(dst_reg); 4585 + __mark_reg_unknown(env, dst_reg); 4587 4586 return 0; 4588 4587 } 4589 4588 ··· 4835 4834 /* Taint dst register if offset had invalid bounds derived from 4836 4835 * e.g. dead branches. 4837 4836 */ 4838 - __mark_reg_unknown(dst_reg); 4837 + __mark_reg_unknown(env, dst_reg); 4839 4838 return 0; 4840 4839 } 4841 4840 4842 4841 if (!src_known && 4843 4842 opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) { 4844 - __mark_reg_unknown(dst_reg); 4843 + __mark_reg_unknown(env, dst_reg); 4845 4844 return 0; 4846 4845 } 4847 4846 ··· 6983 6982 /* since the register is unused, clear its state 6984 6983 * to make further comparison simpler 6985 6984 */ 6986 - __mark_reg_not_init(&st->regs[i]); 6985 + __mark_reg_not_init(env, &st->regs[i]); 6987 6986 } 6988 6987 6989 6988 for (i = 0; i < st->allocated_stack / BPF_REG_SIZE; i++) { ··· 6991 6990 /* liveness must not touch this stack slot anymore */ 6992 6991 st->stack[i].spilled_ptr.live |= REG_LIVE_DONE; 6993 6992 if (!(live & REG_LIVE_READ)) { 6994 - __mark_reg_not_init(&st->stack[i].spilled_ptr); 6993 + __mark_reg_not_init(env, &st->stack[i].spilled_ptr); 6995 6994 for (j = 0; j < BPF_REG_SIZE; j++) 6996 6995 st->stack[i].slot_type[j] = STACK_INVALID; 6997 6996 }
+13 -18
kernel/time/posix-clock.c
··· 14 14 15 15 #include "posix-timers.h" 16 16 17 - static void delete_clock(struct kref *kref); 18 - 19 17 /* 20 18 * Returns NULL if the posix_clock instance attached to 'fp' is old and stale. 21 19 */ ··· 123 125 err = 0; 124 126 125 127 if (!err) { 126 - kref_get(&clk->kref); 128 + get_device(clk->dev); 127 129 fp->private_data = clk; 128 130 } 129 131 out: ··· 139 141 if (clk->ops.release) 140 142 err = clk->ops.release(clk); 141 143 142 - kref_put(&clk->kref, delete_clock); 144 + put_device(clk->dev); 143 145 144 146 fp->private_data = NULL; 145 147 ··· 159 161 #endif 160 162 }; 161 163 162 - int posix_clock_register(struct posix_clock *clk, dev_t devid) 164 + int posix_clock_register(struct posix_clock *clk, struct device *dev) 163 165 { 164 166 int err; 165 167 166 - kref_init(&clk->kref); 167 168 init_rwsem(&clk->rwsem); 168 169 169 170 cdev_init(&clk->cdev, &posix_clock_file_operations); 171 + err = cdev_device_add(&clk->cdev, dev); 172 + if (err) { 173 + pr_err("%s unable to add device %d:%d\n", 174 + dev_name(dev), MAJOR(dev->devt), MINOR(dev->devt)); 175 + return err; 176 + } 170 177 clk->cdev.owner = clk->ops.owner; 171 - err = cdev_add(&clk->cdev, devid, 1); 178 + clk->dev = dev; 172 179 173 - return err; 180 + return 0; 174 181 } 175 182 EXPORT_SYMBOL_GPL(posix_clock_register); 176 183 177 - static void delete_clock(struct kref *kref) 178 - { 179 - struct posix_clock *clk = container_of(kref, struct posix_clock, kref); 180 - 181 - if (clk->release) 182 - clk->release(clk); 183 - } 184 - 185 184 void posix_clock_unregister(struct posix_clock *clk) 186 185 { 187 - cdev_del(&clk->cdev); 186 + cdev_device_del(&clk->cdev, clk->dev); 188 187 189 188 down_write(&clk->rwsem); 190 189 clk->zombie = true; 191 190 up_write(&clk->rwsem); 192 191 193 - kref_put(&clk->kref, delete_clock); 192 + put_device(clk->dev); 194 193 } 195 194 EXPORT_SYMBOL_GPL(posix_clock_unregister); 196 195
+2 -1
net/bridge/br_nf_core.c
··· 22 22 #endif 23 23 24 24 static void fake_update_pmtu(struct dst_entry *dst, struct sock *sk, 25 - struct sk_buff *skb, u32 mtu) 25 + struct sk_buff *skb, u32 mtu, 26 + bool confirm_neigh) 26 27 { 27 28 } 28 29
+17 -18
net/bridge/netfilter/ebtables.c
··· 1867 1867 } 1868 1868 1869 1869 static int ebt_buf_add(struct ebt_entries_buf_state *state, 1870 - void *data, unsigned int sz) 1870 + const void *data, unsigned int sz) 1871 1871 { 1872 1872 if (state->buf_kern_start == NULL) 1873 1873 goto count_only; ··· 1901 1901 EBT_COMPAT_TARGET, 1902 1902 }; 1903 1903 1904 - static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt, 1904 + static int compat_mtw_from_user(const struct compat_ebt_entry_mwt *mwt, 1905 1905 enum compat_mwt compat_mwt, 1906 1906 struct ebt_entries_buf_state *state, 1907 1907 const unsigned char *base) ··· 1979 1979 /* return size of all matches, watchers or target, including necessary 1980 1980 * alignment and padding. 1981 1981 */ 1982 - static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32, 1982 + static int ebt_size_mwt(const struct compat_ebt_entry_mwt *match32, 1983 1983 unsigned int size_left, enum compat_mwt type, 1984 1984 struct ebt_entries_buf_state *state, const void *base) 1985 1985 { 1986 + const char *buf = (const char *)match32; 1986 1987 int growth = 0; 1987 - char *buf; 1988 1988 1989 1989 if (size_left == 0) 1990 1990 return 0; 1991 1991 1992 - buf = (char *) match32; 1993 - 1994 - while (size_left >= sizeof(*match32)) { 1992 + do { 1995 1993 struct ebt_entry_match *match_kern; 1996 1994 int ret; 1995 + 1996 + if (size_left < sizeof(*match32)) 1997 + return -EINVAL; 1997 1998 1998 1999 match_kern = (struct ebt_entry_match *) state->buf_kern_start; 1999 2000 if (match_kern) { ··· 2032 2031 if (match_kern) 2033 2032 match_kern->match_size = ret; 2034 2033 2035 - /* rule should have no remaining data after target */ 2036 - if (type == EBT_COMPAT_TARGET && size_left) 2037 - return -EINVAL; 2038 - 2039 2034 match32 = (struct compat_ebt_entry_mwt *) buf; 2040 - } 2035 + } while (size_left); 2041 2036 2042 2037 return growth; 2043 2038 } 2044 2039 2045 2040 /* called for all ebt_entry structures. */ 2046 - static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base, 2041 + static int size_entry_mwt(const struct ebt_entry *entry, const unsigned char *base, 2047 2042 unsigned int *total, 2048 2043 struct ebt_entries_buf_state *state) 2049 2044 { 2050 - unsigned int i, j, startoff, new_offset = 0; 2045 + unsigned int i, j, startoff, next_expected_off, new_offset = 0; 2051 2046 /* stores match/watchers/targets & offset of next struct ebt_entry: */ 2052 2047 unsigned int offsets[4]; 2053 2048 unsigned int *offsets_update = NULL; ··· 2129 2132 return ret; 2130 2133 } 2131 2134 2132 - startoff = state->buf_user_offset - startoff; 2133 - 2134 - if (WARN_ON(*total < startoff)) 2135 + next_expected_off = state->buf_user_offset - startoff; 2136 + if (next_expected_off != entry->next_offset) 2135 2137 return -EINVAL; 2136 - *total -= startoff; 2138 + 2139 + if (*total < entry->next_offset) 2140 + return -EINVAL; 2141 + *total -= entry->next_offset; 2137 2142 return 0; 2138 2143 } 2139 2144
+4 -2
net/decnet/dn_route.c
··· 110 110 static struct dst_entry *dn_dst_negative_advice(struct dst_entry *); 111 111 static void dn_dst_link_failure(struct sk_buff *); 112 112 static void dn_dst_update_pmtu(struct dst_entry *dst, struct sock *sk, 113 - struct sk_buff *skb , u32 mtu); 113 + struct sk_buff *skb , u32 mtu, 114 + bool confirm_neigh); 114 115 static void dn_dst_redirect(struct dst_entry *dst, struct sock *sk, 115 116 struct sk_buff *skb); 116 117 static struct neighbour *dn_dst_neigh_lookup(const struct dst_entry *dst, ··· 252 251 * advertise to the other end). 253 252 */ 254 253 static void dn_dst_update_pmtu(struct dst_entry *dst, struct sock *sk, 255 - struct sk_buff *skb, u32 mtu) 254 + struct sk_buff *skb, u32 mtu, 255 + bool confirm_neigh) 256 256 { 257 257 struct dn_route *rt = (struct dn_route *) dst; 258 258 struct neighbour *n = rt->n;
+40 -12
net/hsr/hsr_debugfs.c
··· 20 20 #include "hsr_main.h" 21 21 #include "hsr_framereg.h" 22 22 23 + static struct dentry *hsr_debugfs_root_dir; 24 + 23 25 static void print_mac_address(struct seq_file *sfp, unsigned char *mac) 24 26 { 25 27 seq_printf(sfp, "%02x:%02x:%02x:%02x:%02x:%02x:", ··· 65 63 return single_open(filp, hsr_node_table_show, inode->i_private); 66 64 } 67 65 66 + void hsr_debugfs_rename(struct net_device *dev) 67 + { 68 + struct hsr_priv *priv = netdev_priv(dev); 69 + struct dentry *d; 70 + 71 + d = debugfs_rename(hsr_debugfs_root_dir, priv->node_tbl_root, 72 + hsr_debugfs_root_dir, dev->name); 73 + if (IS_ERR(d)) 74 + netdev_warn(dev, "failed to rename\n"); 75 + else 76 + priv->node_tbl_root = d; 77 + } 78 + 68 79 static const struct file_operations hsr_fops = { 69 - .owner = THIS_MODULE, 70 80 .open = hsr_node_table_open, 71 81 .read = seq_read, 72 82 .llseek = seq_lseek, ··· 92 78 * When debugfs is configured this routine sets up the node_table file per 93 79 * hsr device for dumping the node_table entries 94 80 */ 95 - int hsr_debugfs_init(struct hsr_priv *priv, struct net_device *hsr_dev) 81 + void hsr_debugfs_init(struct hsr_priv *priv, struct net_device *hsr_dev) 96 82 { 97 - int rc = -1; 98 83 struct dentry *de = NULL; 99 84 100 - de = debugfs_create_dir(hsr_dev->name, NULL); 101 - if (!de) { 102 - pr_err("Cannot create hsr debugfs root\n"); 103 - return rc; 85 + de = debugfs_create_dir(hsr_dev->name, hsr_debugfs_root_dir); 86 + if (IS_ERR(de)) { 87 + pr_err("Cannot create hsr debugfs directory\n"); 88 + return; 104 89 } 105 90 106 91 priv->node_tbl_root = de; ··· 107 94 de = debugfs_create_file("node_table", S_IFREG | 0444, 108 95 priv->node_tbl_root, priv, 109 96 &hsr_fops); 110 - if (!de) { 111 - pr_err("Cannot create hsr node_table directory\n"); 112 - return rc; 97 + if (IS_ERR(de)) { 98 + pr_err("Cannot create hsr node_table file\n"); 99 + debugfs_remove(priv->node_tbl_root); 100 + priv->node_tbl_root = NULL; 101 + return; 113 102 } 114 103 priv->node_tbl_file = de; 115 - 116 - return 0; 117 104 } 118 105 119 106 /* hsr_debugfs_term - Tear down debugfs intrastructure ··· 129 116 priv->node_tbl_file = NULL; 130 117 debugfs_remove(priv->node_tbl_root); 131 118 priv->node_tbl_root = NULL; 119 + } 120 + 121 + void hsr_debugfs_create_root(void) 122 + { 123 + hsr_debugfs_root_dir = debugfs_create_dir("hsr", NULL); 124 + if (IS_ERR(hsr_debugfs_root_dir)) { 125 + pr_err("Cannot create hsr debugfs root directory\n"); 126 + hsr_debugfs_root_dir = NULL; 127 + } 128 + } 129 + 130 + void hsr_debugfs_remove_root(void) 131 + { 132 + /* debugfs_remove() internally checks NULL and ERROR */ 133 + debugfs_remove(hsr_debugfs_root_dir); 132 134 }
+16 -12
net/hsr/hsr_device.c
··· 272 272 skb->dev->dev_addr, skb->len) <= 0) 273 273 goto out; 274 274 skb_reset_mac_header(skb); 275 + skb_reset_network_header(skb); 276 + skb_reset_transport_header(skb); 275 277 276 278 if (hsr_ver > 0) { 277 279 hsr_tag = skb_put(skb, sizeof(struct hsr_tag)); ··· 370 368 del_timer_sync(&hsr->prune_timer); 371 369 del_timer_sync(&hsr->announce_timer); 372 370 373 - hsr_del_self_node(&hsr->self_node_db); 371 + hsr_del_self_node(hsr); 374 372 hsr_del_nodes(&hsr->node_db); 375 373 } 376 374 ··· 442 440 INIT_LIST_HEAD(&hsr->ports); 443 441 INIT_LIST_HEAD(&hsr->node_db); 444 442 INIT_LIST_HEAD(&hsr->self_node_db); 443 + spin_lock_init(&hsr->list_lock); 445 444 446 445 ether_addr_copy(hsr_dev->dev_addr, slave[0]->dev_addr); 447 446 448 447 /* Make sure we recognize frames from ourselves in hsr_rcv() */ 449 - res = hsr_create_self_node(&hsr->self_node_db, hsr_dev->dev_addr, 448 + res = hsr_create_self_node(hsr, hsr_dev->dev_addr, 450 449 slave[1]->dev_addr); 451 450 if (res < 0) 452 451 return res; ··· 480 477 481 478 res = hsr_add_port(hsr, hsr_dev, HSR_PT_MASTER); 482 479 if (res) 483 - goto err_add_port; 480 + goto err_add_master; 484 481 485 482 res = register_netdevice(hsr_dev); 486 483 if (res) 487 - goto fail; 484 + goto err_unregister; 488 485 489 486 res = hsr_add_port(hsr, slave[0], HSR_PT_SLAVE_A); 490 487 if (res) 491 - goto fail; 488 + goto err_add_slaves; 489 + 492 490 res = hsr_add_port(hsr, slave[1], HSR_PT_SLAVE_B); 493 491 if (res) 494 - goto fail; 492 + goto err_add_slaves; 495 493 494 + hsr_debugfs_init(hsr, hsr_dev); 496 495 mod_timer(&hsr->prune_timer, jiffies + msecs_to_jiffies(PRUNE_PERIOD)); 497 - res = hsr_debugfs_init(hsr, hsr_dev); 498 - if (res) 499 - goto fail; 500 496 501 497 return 0; 502 498 503 - fail: 499 + err_add_slaves: 500 + unregister_netdevice(hsr_dev); 501 + err_unregister: 504 502 list_for_each_entry_safe(port, tmp, &hsr->ports, port_list) 505 503 hsr_del_port(port); 506 - err_add_port: 507 - hsr_del_self_node(&hsr->self_node_db); 504 + err_add_master: 505 + hsr_del_self_node(hsr); 508 506 509 507 return res; 510 508 }
+46 -27
net/hsr/hsr_framereg.c
··· 75 75 /* Helper for device init; the self_node_db is used in hsr_rcv() to recognize 76 76 * frames from self that's been looped over the HSR ring. 77 77 */ 78 - int hsr_create_self_node(struct list_head *self_node_db, 78 + int hsr_create_self_node(struct hsr_priv *hsr, 79 79 unsigned char addr_a[ETH_ALEN], 80 80 unsigned char addr_b[ETH_ALEN]) 81 81 { 82 + struct list_head *self_node_db = &hsr->self_node_db; 82 83 struct hsr_node *node, *oldnode; 83 84 84 85 node = kmalloc(sizeof(*node), GFP_KERNEL); ··· 89 88 ether_addr_copy(node->macaddress_A, addr_a); 90 89 ether_addr_copy(node->macaddress_B, addr_b); 91 90 92 - rcu_read_lock(); 91 + spin_lock_bh(&hsr->list_lock); 93 92 oldnode = list_first_or_null_rcu(self_node_db, 94 93 struct hsr_node, mac_list); 95 94 if (oldnode) { 96 95 list_replace_rcu(&oldnode->mac_list, &node->mac_list); 97 - rcu_read_unlock(); 98 - synchronize_rcu(); 99 - kfree(oldnode); 96 + spin_unlock_bh(&hsr->list_lock); 97 + kfree_rcu(oldnode, rcu_head); 100 98 } else { 101 - rcu_read_unlock(); 102 99 list_add_tail_rcu(&node->mac_list, self_node_db); 100 + spin_unlock_bh(&hsr->list_lock); 103 101 } 104 102 105 103 return 0; 106 104 } 107 105 108 - void hsr_del_self_node(struct list_head *self_node_db) 106 + void hsr_del_self_node(struct hsr_priv *hsr) 109 107 { 108 + struct list_head *self_node_db = &hsr->self_node_db; 110 109 struct hsr_node *node; 111 110 112 - rcu_read_lock(); 111 + spin_lock_bh(&hsr->list_lock); 113 112 node = list_first_or_null_rcu(self_node_db, struct hsr_node, mac_list); 114 - rcu_read_unlock(); 115 113 if (node) { 116 114 list_del_rcu(&node->mac_list); 117 - kfree(node); 115 + kfree_rcu(node, rcu_head); 118 116 } 117 + spin_unlock_bh(&hsr->list_lock); 119 118 } 120 119 121 120 void hsr_del_nodes(struct list_head *node_db) ··· 131 130 * seq_out is used to initialize filtering of outgoing duplicate frames 132 131 * originating from the newly added node. 133 132 */ 134 - struct hsr_node *hsr_add_node(struct list_head *node_db, unsigned char addr[], 135 - u16 seq_out) 133 + static struct hsr_node *hsr_add_node(struct hsr_priv *hsr, 134 + struct list_head *node_db, 135 + unsigned char addr[], 136 + u16 seq_out) 136 137 { 137 - struct hsr_node *node; 138 + struct hsr_node *new_node, *node; 138 139 unsigned long now; 139 140 int i; 140 141 141 - node = kzalloc(sizeof(*node), GFP_ATOMIC); 142 - if (!node) 142 + new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC); 143 + if (!new_node) 143 144 return NULL; 144 145 145 - ether_addr_copy(node->macaddress_A, addr); 146 + ether_addr_copy(new_node->macaddress_A, addr); 146 147 147 148 /* We are only interested in time diffs here, so use current jiffies 148 149 * as initialization. (0 could trigger an spurious ring error warning). 149 150 */ 150 151 now = jiffies; 151 152 for (i = 0; i < HSR_PT_PORTS; i++) 152 - node->time_in[i] = now; 153 + new_node->time_in[i] = now; 153 154 for (i = 0; i < HSR_PT_PORTS; i++) 154 - node->seq_out[i] = seq_out; 155 + new_node->seq_out[i] = seq_out; 155 156 156 - list_add_tail_rcu(&node->mac_list, node_db); 157 - 157 + spin_lock_bh(&hsr->list_lock); 158 + list_for_each_entry_rcu(node, node_db, mac_list) { 159 + if (ether_addr_equal(node->macaddress_A, addr)) 160 + goto out; 161 + if (ether_addr_equal(node->macaddress_B, addr)) 162 + goto out; 163 + } 164 + list_add_tail_rcu(&new_node->mac_list, node_db); 165 + spin_unlock_bh(&hsr->list_lock); 166 + return new_node; 167 + out: 168 + spin_unlock_bh(&hsr->list_lock); 169 + kfree(new_node); 158 170 return node; 159 171 } 160 172 ··· 177 163 bool is_sup) 178 164 { 179 165 struct list_head *node_db = &port->hsr->node_db; 166 + struct hsr_priv *hsr = port->hsr; 180 167 struct hsr_node *node; 181 168 struct ethhdr *ethhdr; 182 169 u16 seq_out; ··· 211 196 seq_out = HSR_SEQNR_START; 212 197 } 213 198 214 - return hsr_add_node(node_db, ethhdr->h_source, seq_out); 199 + return hsr_add_node(hsr, node_db, ethhdr->h_source, seq_out); 215 200 } 216 201 217 202 /* Use the Supervision frame's info about an eventual macaddress_B for merging ··· 221 206 void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr, 222 207 struct hsr_port *port_rcv) 223 208 { 224 - struct ethhdr *ethhdr; 225 - struct hsr_node *node_real; 209 + struct hsr_priv *hsr = port_rcv->hsr; 226 210 struct hsr_sup_payload *hsr_sp; 211 + struct hsr_node *node_real; 227 212 struct list_head *node_db; 213 + struct ethhdr *ethhdr; 228 214 int i; 229 215 230 216 ethhdr = (struct ethhdr *)skb_mac_header(skb); ··· 247 231 node_real = find_node_by_addr_A(node_db, hsr_sp->macaddress_A); 248 232 if (!node_real) 249 233 /* No frame received from AddrA of this node yet */ 250 - node_real = hsr_add_node(node_db, hsr_sp->macaddress_A, 234 + node_real = hsr_add_node(hsr, node_db, hsr_sp->macaddress_A, 251 235 HSR_SEQNR_START - 1); 252 236 if (!node_real) 253 237 goto done; /* No mem */ ··· 268 252 } 269 253 node_real->addr_B_port = port_rcv->type; 270 254 255 + spin_lock_bh(&hsr->list_lock); 271 256 list_del_rcu(&node_curr->mac_list); 257 + spin_unlock_bh(&hsr->list_lock); 272 258 kfree_rcu(node_curr, rcu_head); 273 259 274 260 done: ··· 386 368 { 387 369 struct hsr_priv *hsr = from_timer(hsr, t, prune_timer); 388 370 struct hsr_node *node; 371 + struct hsr_node *tmp; 389 372 struct hsr_port *port; 390 373 unsigned long timestamp; 391 374 unsigned long time_a, time_b; 392 375 393 - rcu_read_lock(); 394 - list_for_each_entry_rcu(node, &hsr->node_db, mac_list) { 376 + spin_lock_bh(&hsr->list_lock); 377 + list_for_each_entry_safe(node, tmp, &hsr->node_db, mac_list) { 395 378 /* Don't prune own node. Neither time_in[HSR_PT_SLAVE_A] 396 379 * nor time_in[HSR_PT_SLAVE_B], will ever be updated for 397 380 * the master port. Thus the master node will be repeatedly ··· 440 421 kfree_rcu(node, rcu_head); 441 422 } 442 423 } 443 - rcu_read_unlock(); 424 + spin_unlock_bh(&hsr->list_lock); 444 425 445 426 /* Restart timer */ 446 427 mod_timer(&hsr->prune_timer,
+2 -4
net/hsr/hsr_framereg.h
··· 12 12 13 13 struct hsr_node; 14 14 15 - void hsr_del_self_node(struct list_head *self_node_db); 15 + void hsr_del_self_node(struct hsr_priv *hsr); 16 16 void hsr_del_nodes(struct list_head *node_db); 17 - struct hsr_node *hsr_add_node(struct list_head *node_db, unsigned char addr[], 18 - u16 seq_out); 19 17 struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb, 20 18 bool is_sup); 21 19 void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr, ··· 31 33 32 34 void hsr_prune_nodes(struct timer_list *t); 33 35 34 - int hsr_create_self_node(struct list_head *self_node_db, 36 + int hsr_create_self_node(struct hsr_priv *hsr, 35 37 unsigned char addr_a[ETH_ALEN], 36 38 unsigned char addr_b[ETH_ALEN]); 37 39
+6 -1
net/hsr/hsr_main.c
··· 45 45 case NETDEV_CHANGE: /* Link (carrier) state changes */ 46 46 hsr_check_carrier_and_operstate(hsr); 47 47 break; 48 + case NETDEV_CHANGENAME: 49 + if (is_hsr_master(dev)) 50 + hsr_debugfs_rename(dev); 51 + break; 48 52 case NETDEV_CHANGEADDR: 49 53 if (port->type == HSR_PT_MASTER) { 50 54 /* This should not happen since there's no ··· 68 64 69 65 /* Make sure we recognize frames from ourselves in hsr_rcv() */ 70 66 port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B); 71 - res = hsr_create_self_node(&hsr->self_node_db, 67 + res = hsr_create_self_node(hsr, 72 68 master->dev->dev_addr, 73 69 port ? 74 70 port->dev->dev_addr : ··· 127 123 { 128 124 unregister_netdevice_notifier(&hsr_nb); 129 125 hsr_netlink_exit(); 126 + hsr_debugfs_remove_root(); 130 127 } 131 128 132 129 module_init(hsr_init);
+15 -7
net/hsr/hsr_main.h
··· 160 160 int announce_count; 161 161 u16 sequence_nr; 162 162 u16 sup_sequence_nr; /* For HSRv1 separate seq_nr for supervision */ 163 - u8 prot_version; /* Indicate if HSRv0 or HSRv1. */ 164 - spinlock_t seqnr_lock; /* locking for sequence_nr */ 163 + u8 prot_version; /* Indicate if HSRv0 or HSRv1. */ 164 + spinlock_t seqnr_lock; /* locking for sequence_nr */ 165 + spinlock_t list_lock; /* locking for node list */ 165 166 unsigned char sup_multicast_addr[ETH_ALEN]; 166 167 #ifdef CONFIG_DEBUG_FS 167 168 struct dentry *node_tbl_root; ··· 185 184 } 186 185 187 186 #if IS_ENABLED(CONFIG_DEBUG_FS) 188 - int hsr_debugfs_init(struct hsr_priv *priv, struct net_device *hsr_dev); 187 + void hsr_debugfs_rename(struct net_device *dev); 188 + void hsr_debugfs_init(struct hsr_priv *priv, struct net_device *hsr_dev); 189 189 void hsr_debugfs_term(struct hsr_priv *priv); 190 + void hsr_debugfs_create_root(void); 191 + void hsr_debugfs_remove_root(void); 190 192 #else 191 - static inline int hsr_debugfs_init(struct hsr_priv *priv, 192 - struct net_device *hsr_dev) 193 + static inline void void hsr_debugfs_rename(struct net_device *dev) 193 194 { 194 - return 0; 195 195 } 196 - 196 + static inline void hsr_debugfs_init(struct hsr_priv *priv, 197 + struct net_device *hsr_dev) 198 + {} 197 199 static inline void hsr_debugfs_term(struct hsr_priv *priv) 200 + {} 201 + static inline void hsr_debugfs_create_root(void) 202 + {} 203 + static inline void hsr_debugfs_remove_root(void) 198 204 {} 199 205 #endif 200 206
+1
net/hsr/hsr_netlink.c
··· 476 476 if (rc) 477 477 goto fail_genl_register_family; 478 478 479 + hsr_debugfs_create_root(); 479 480 return 0; 480 481 481 482 fail_genl_register_family:
+1 -1
net/ipv4/inet_connection_sock.c
··· 1086 1086 if (!dst) 1087 1087 goto out; 1088 1088 } 1089 - dst->ops->update_pmtu(dst, sk, NULL, mtu); 1089 + dst->ops->update_pmtu(dst, sk, NULL, mtu, true); 1090 1090 1091 1091 dst = __sk_dst_check(sk, 0); 1092 1092 if (!dst)
+1 -1
net/ipv4/ip_tunnel.c
··· 505 505 mtu = skb_valid_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu; 506 506 507 507 if (skb_valid_dst(skb)) 508 - skb_dst_update_pmtu(skb, mtu); 508 + skb_dst_update_pmtu_no_confirm(skb, mtu); 509 509 510 510 if (skb->protocol == htons(ETH_P_IP)) { 511 511 if (!skb_is_gso(skb) &&
+1 -1
net/ipv4/ip_vti.c
··· 214 214 215 215 mtu = dst_mtu(dst); 216 216 if (skb->len > mtu) { 217 - skb_dst_update_pmtu(skb, mtu); 217 + skb_dst_update_pmtu_no_confirm(skb, mtu); 218 218 if (skb->protocol == htons(ETH_P_IP)) { 219 219 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, 220 220 htonl(mtu));
+6 -3
net/ipv4/route.c
··· 139 139 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst); 140 140 static void ipv4_link_failure(struct sk_buff *skb); 141 141 static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, 142 - struct sk_buff *skb, u32 mtu); 142 + struct sk_buff *skb, u32 mtu, 143 + bool confirm_neigh); 143 144 static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, 144 145 struct sk_buff *skb); 145 146 static void ipv4_dst_destroy(struct dst_entry *dst); ··· 1044 1043 } 1045 1044 1046 1045 static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, 1047 - struct sk_buff *skb, u32 mtu) 1046 + struct sk_buff *skb, u32 mtu, 1047 + bool confirm_neigh) 1048 1048 { 1049 1049 struct rtable *rt = (struct rtable *) dst; 1050 1050 struct flowi4 fl4; ··· 2689 2687 } 2690 2688 2691 2689 static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk, 2692 - struct sk_buff *skb, u32 mtu) 2690 + struct sk_buff *skb, u32 mtu, 2691 + bool confirm_neigh) 2693 2692 { 2694 2693 } 2695 2694
+3
net/ipv4/tcp_output.c
··· 72 72 __skb_unlink(skb, &sk->sk_write_queue); 73 73 tcp_rbtree_insert(&sk->tcp_rtx_queue, skb); 74 74 75 + if (tp->highest_sack == NULL) 76 + tp->highest_sack = skb; 77 + 75 78 tp->packets_out += tcp_skb_pcount(skb); 76 79 if (!prior_packets || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) 77 80 tcp_rearm_rto(sk);
+1 -1
net/ipv4/udp.c
··· 1475 1475 * queue contains some other skb 1476 1476 */ 1477 1477 rmem = atomic_add_return(size, &sk->sk_rmem_alloc); 1478 - if (rmem > (size + sk->sk_rcvbuf)) 1478 + if (rmem > (size + (unsigned int)sk->sk_rcvbuf)) 1479 1479 goto uncharge_drop; 1480 1480 1481 1481 spin_lock(&list->lock);
+3 -2
net/ipv4/xfrm4_policy.c
··· 100 100 } 101 101 102 102 static void xfrm4_update_pmtu(struct dst_entry *dst, struct sock *sk, 103 - struct sk_buff *skb, u32 mtu) 103 + struct sk_buff *skb, u32 mtu, 104 + bool confirm_neigh) 104 105 { 105 106 struct xfrm_dst *xdst = (struct xfrm_dst *)dst; 106 107 struct dst_entry *path = xdst->route; 107 108 108 - path->ops->update_pmtu(path, sk, skb, mtu); 109 + path->ops->update_pmtu(path, sk, skb, mtu, confirm_neigh); 109 110 } 110 111 111 112 static void xfrm4_redirect(struct dst_entry *dst, struct sock *sk,
+1 -1
net/ipv6/inet6_connection_sock.c
··· 146 146 147 147 if (IS_ERR(dst)) 148 148 return NULL; 149 - dst->ops->update_pmtu(dst, sk, NULL, mtu); 149 + dst->ops->update_pmtu(dst, sk, NULL, mtu, true); 150 150 151 151 dst = inet6_csk_route_socket(sk, &fl6); 152 152 return IS_ERR(dst) ? NULL : dst;
+1 -1
net/ipv6/ip6_gre.c
··· 1040 1040 1041 1041 /* TooBig packet may have updated dst->dev's mtu */ 1042 1042 if (!t->parms.collect_md && dst && dst_mtu(dst) > dst->dev->mtu) 1043 - dst->ops->update_pmtu(dst, NULL, skb, dst->dev->mtu); 1043 + dst->ops->update_pmtu(dst, NULL, skb, dst->dev->mtu, false); 1044 1044 1045 1045 err = ip6_tnl_xmit(skb, dev, dsfield, &fl6, encap_limit, &mtu, 1046 1046 NEXTHDR_GRE);
+2 -2
net/ipv6/ip6_tunnel.c
··· 640 640 if (rel_info > dst_mtu(skb_dst(skb2))) 641 641 goto out; 642 642 643 - skb_dst_update_pmtu(skb2, rel_info); 643 + skb_dst_update_pmtu_no_confirm(skb2, rel_info); 644 644 } 645 645 646 646 icmp_send(skb2, rel_type, rel_code, htonl(rel_info)); ··· 1132 1132 mtu = max(mtu, skb->protocol == htons(ETH_P_IPV6) ? 1133 1133 IPV6_MIN_MTU : IPV4_MIN_MTU); 1134 1134 1135 - skb_dst_update_pmtu(skb, mtu); 1135 + skb_dst_update_pmtu_no_confirm(skb, mtu); 1136 1136 if (skb->len - t->tun_hlen - eth_hlen > mtu && !skb_is_gso(skb)) { 1137 1137 *pmtu = mtu; 1138 1138 err = -EMSGSIZE;
+1 -1
net/ipv6/ip6_vti.c
··· 479 479 480 480 mtu = dst_mtu(dst); 481 481 if (skb->len > mtu) { 482 - skb_dst_update_pmtu(skb, mtu); 482 + skb_dst_update_pmtu_no_confirm(skb, mtu); 483 483 484 484 if (skb->protocol == htons(ETH_P_IPV6)) { 485 485 if (mtu < IPV6_MIN_MTU)
+15 -7
net/ipv6/route.c
··· 95 95 static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb); 96 96 static void ip6_link_failure(struct sk_buff *skb); 97 97 static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, 98 - struct sk_buff *skb, u32 mtu); 98 + struct sk_buff *skb, u32 mtu, 99 + bool confirm_neigh); 99 100 static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, 100 101 struct sk_buff *skb); 101 102 static int rt6_score_route(const struct fib6_nh *nh, u32 fib6_flags, int oif, ··· 265 264 } 266 265 267 266 static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk, 268 - struct sk_buff *skb, u32 mtu) 267 + struct sk_buff *skb, u32 mtu, 268 + bool confirm_neigh) 269 269 { 270 270 } 271 271 ··· 2694 2692 } 2695 2693 2696 2694 static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk, 2697 - const struct ipv6hdr *iph, u32 mtu) 2695 + const struct ipv6hdr *iph, u32 mtu, 2696 + bool confirm_neigh) 2698 2697 { 2699 2698 const struct in6_addr *daddr, *saddr; 2700 2699 struct rt6_info *rt6 = (struct rt6_info *)dst; ··· 2713 2710 daddr = NULL; 2714 2711 saddr = NULL; 2715 2712 } 2716 - dst_confirm_neigh(dst, daddr); 2713 + 2714 + if (confirm_neigh) 2715 + dst_confirm_neigh(dst, daddr); 2716 + 2717 2717 mtu = max_t(u32, mtu, IPV6_MIN_MTU); 2718 2718 if (mtu >= dst_mtu(dst)) 2719 2719 return; ··· 2770 2764 } 2771 2765 2772 2766 static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, 2773 - struct sk_buff *skb, u32 mtu) 2767 + struct sk_buff *skb, u32 mtu, 2768 + bool confirm_neigh) 2774 2769 { 2775 - __ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu); 2770 + __ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu, 2771 + confirm_neigh); 2776 2772 } 2777 2773 2778 2774 void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu, ··· 2793 2785 2794 2786 dst = ip6_route_output(net, NULL, &fl6); 2795 2787 if (!dst->error) 2796 - __ip6_rt_update_pmtu(dst, NULL, iph, ntohl(mtu)); 2788 + __ip6_rt_update_pmtu(dst, NULL, iph, ntohl(mtu), true); 2797 2789 dst_release(dst); 2798 2790 } 2799 2791 EXPORT_SYMBOL_GPL(ip6_update_pmtu);
+1 -1
net/ipv6/sit.c
··· 944 944 } 945 945 946 946 if (tunnel->parms.iph.daddr) 947 - skb_dst_update_pmtu(skb, mtu); 947 + skb_dst_update_pmtu_no_confirm(skb, mtu); 948 948 949 949 if (skb->len > mtu && !skb_is_gso(skb)) { 950 950 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
+3 -2
net/ipv6/xfrm6_policy.c
··· 98 98 } 99 99 100 100 static void xfrm6_update_pmtu(struct dst_entry *dst, struct sock *sk, 101 - struct sk_buff *skb, u32 mtu) 101 + struct sk_buff *skb, u32 mtu, 102 + bool confirm_neigh) 102 103 { 103 104 struct xfrm_dst *xdst = (struct xfrm_dst *)dst; 104 105 struct dst_entry *path = xdst->route; 105 106 106 - path->ops->update_pmtu(path, sk, skb, mtu); 107 + path->ops->update_pmtu(path, sk, skb, mtu, confirm_neigh); 107 108 } 108 109 109 110 static void xfrm6_redirect(struct dst_entry *dst, struct sock *sk,
+1 -1
net/netfilter/ipvs/ip_vs_xmit.c
··· 208 208 struct rtable *ort = skb_rtable(skb); 209 209 210 210 if (!skb->dev && sk && sk_fullsock(sk)) 211 - ort->dst.ops->update_pmtu(&ort->dst, sk, NULL, mtu); 211 + ort->dst.ops->update_pmtu(&ort->dst, sk, NULL, mtu, true); 212 212 } 213 213 214 214 static inline bool ensure_mtu_is_adequate(struct netns_ipvs *ipvs, int skb_af,
+1 -1
net/netfilter/nf_flow_table_offload.c
··· 88 88 switch (tuple->l4proto) { 89 89 case IPPROTO_TCP: 90 90 key->tcp.flags = 0; 91 - mask->tcp.flags = TCP_FLAG_RST | TCP_FLAG_FIN; 91 + mask->tcp.flags = cpu_to_be16(be32_to_cpu(TCP_FLAG_RST | TCP_FLAG_FIN) >> 16); 92 92 match->dissector.used_keys |= BIT(FLOW_DISSECTOR_KEY_TCP); 93 93 break; 94 94 case IPPROTO_UDP:
+2 -2
net/netfilter/nft_tproxy.c
··· 50 50 taddr = nf_tproxy_laddr4(skb, taddr, iph->daddr); 51 51 52 52 if (priv->sreg_port) 53 - tport = regs->data[priv->sreg_port]; 53 + tport = nft_reg_load16(&regs->data[priv->sreg_port]); 54 54 if (!tport) 55 55 tport = hp->dest; 56 56 ··· 117 117 taddr = *nf_tproxy_laddr6(skb, &taddr, &iph->daddr); 118 118 119 119 if (priv->sreg_port) 120 - tport = regs->data[priv->sreg_port]; 120 + tport = nft_reg_load16(&regs->data[priv->sreg_port]); 121 121 if (!tport) 122 122 tport = hp->dest; 123 123
+7 -3
net/rxrpc/ar-internal.h
··· 209 209 struct rxrpc_security { 210 210 const char *name; /* name of this service */ 211 211 u8 security_index; /* security type provided */ 212 + u32 no_key_abort; /* Abort code indicating no key */ 212 213 213 214 /* Initialise a security service */ 214 215 int (*init)(void); ··· 978 977 struct rxrpc_connection *rxrpc_find_service_conn_rcu(struct rxrpc_peer *, 979 978 struct sk_buff *); 980 979 struct rxrpc_connection *rxrpc_prealloc_service_connection(struct rxrpc_net *, gfp_t); 981 - void rxrpc_new_incoming_connection(struct rxrpc_sock *, 982 - struct rxrpc_connection *, struct sk_buff *); 980 + void rxrpc_new_incoming_connection(struct rxrpc_sock *, struct rxrpc_connection *, 981 + const struct rxrpc_security *, struct key *, 982 + struct sk_buff *); 983 983 void rxrpc_unpublish_service_conn(struct rxrpc_connection *); 984 984 985 985 /* ··· 1105 1103 int __init rxrpc_init_security(void); 1106 1104 void rxrpc_exit_security(void); 1107 1105 int rxrpc_init_client_conn_security(struct rxrpc_connection *); 1108 - int rxrpc_init_server_conn_security(struct rxrpc_connection *); 1106 + bool rxrpc_look_up_server_security(struct rxrpc_local *, struct rxrpc_sock *, 1107 + const struct rxrpc_security **, struct key **, 1108 + struct sk_buff *); 1109 1109 1110 1110 /* 1111 1111 * sendmsg.c
+37 -23
net/rxrpc/call_accept.c
··· 240 240 } 241 241 242 242 /* 243 + * Ping the other end to fill our RTT cache and to retrieve the rwind 244 + * and MTU parameters. 245 + */ 246 + static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb) 247 + { 248 + struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 249 + ktime_t now = skb->tstamp; 250 + 251 + if (call->peer->rtt_usage < 3 || 252 + ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), now)) 253 + rxrpc_propose_ACK(call, RXRPC_ACK_PING, sp->hdr.serial, 254 + true, true, 255 + rxrpc_propose_ack_ping_for_params); 256 + } 257 + 258 + /* 243 259 * Allocate a new incoming call from the prealloc pool, along with a connection 244 260 * and a peer as necessary. 245 261 */ ··· 263 247 struct rxrpc_local *local, 264 248 struct rxrpc_peer *peer, 265 249 struct rxrpc_connection *conn, 250 + const struct rxrpc_security *sec, 251 + struct key *key, 266 252 struct sk_buff *skb) 267 253 { 268 254 struct rxrpc_backlog *b = rx->backlog; ··· 312 294 conn->params.local = rxrpc_get_local(local); 313 295 conn->params.peer = peer; 314 296 rxrpc_see_connection(conn); 315 - rxrpc_new_incoming_connection(rx, conn, skb); 297 + rxrpc_new_incoming_connection(rx, conn, sec, key, skb); 316 298 } else { 317 299 rxrpc_get_connection(conn); 318 300 } ··· 351 333 struct sk_buff *skb) 352 334 { 353 335 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 336 + const struct rxrpc_security *sec = NULL; 354 337 struct rxrpc_connection *conn; 355 338 struct rxrpc_peer *peer = NULL; 356 - struct rxrpc_call *call; 339 + struct rxrpc_call *call = NULL; 340 + struct key *key = NULL; 357 341 358 342 _enter(""); 359 343 ··· 366 346 sp->hdr.seq, RX_INVALID_OPERATION, ESHUTDOWN); 367 347 skb->mark = RXRPC_SKB_MARK_REJECT_ABORT; 368 348 skb->priority = RX_INVALID_OPERATION; 369 - _leave(" = NULL [close]"); 370 - call = NULL; 371 - goto out; 349 + goto no_call; 372 350 } 373 351 374 352 /* The peer, connection and call may all have sprung into existence due ··· 376 358 */ 377 359 conn = rxrpc_find_connection_rcu(local, skb, &peer); 378 360 379 - call = rxrpc_alloc_incoming_call(rx, local, peer, conn, skb); 361 + if (!conn && !rxrpc_look_up_server_security(local, rx, &sec, &key, skb)) 362 + goto no_call; 363 + 364 + call = rxrpc_alloc_incoming_call(rx, local, peer, conn, sec, key, skb); 365 + key_put(key); 380 366 if (!call) { 381 367 skb->mark = RXRPC_SKB_MARK_REJECT_BUSY; 382 - _leave(" = NULL [busy]"); 383 - call = NULL; 384 - goto out; 368 + goto no_call; 385 369 } 386 370 387 371 trace_rxrpc_receive(call, rxrpc_receive_incoming, 388 372 sp->hdr.serial, sp->hdr.seq); 389 - 390 - /* Lock the call to prevent rxrpc_kernel_send/recv_data() and 391 - * sendmsg()/recvmsg() inconveniently stealing the mutex once the 392 - * notification is generated. 393 - * 394 - * The BUG should never happen because the kernel should be well 395 - * behaved enough not to access the call before the first notification 396 - * event and userspace is prevented from doing so until the state is 397 - * appropriate. 398 - */ 399 - if (!mutex_trylock(&call->user_mutex)) 400 - BUG(); 401 373 402 374 /* Make the call live. */ 403 375 rxrpc_incoming_call(rx, call, skb); ··· 429 421 BUG(); 430 422 } 431 423 spin_unlock(&conn->state_lock); 424 + spin_unlock(&rx->incoming_lock); 425 + 426 + rxrpc_send_ping(call, skb); 432 427 433 428 if (call->state == RXRPC_CALL_SERVER_ACCEPTING) 434 429 rxrpc_notify_socket(call); ··· 444 433 rxrpc_put_call(call, rxrpc_call_put); 445 434 446 435 _leave(" = %p{%d}", call, call->debug_id); 447 - out: 448 - spin_unlock(&rx->incoming_lock); 449 436 return call; 437 + 438 + no_call: 439 + spin_unlock(&rx->incoming_lock); 440 + _leave(" = NULL [%u]", skb->mark); 441 + return NULL; 450 442 } 451 443 452 444 /*
+1 -15
net/rxrpc/conn_event.c
··· 376 376 _enter("{%d}", conn->debug_id); 377 377 378 378 ASSERT(conn->security_ix != 0); 379 - 380 - if (!conn->params.key) { 381 - _debug("set up security"); 382 - ret = rxrpc_init_server_conn_security(conn); 383 - switch (ret) { 384 - case 0: 385 - break; 386 - case -ENOENT: 387 - abort_code = RX_CALL_DEAD; 388 - goto abort; 389 - default: 390 - abort_code = RXKADNOAUTH; 391 - goto abort; 392 - } 393 - } 379 + ASSERT(conn->server_key); 394 380 395 381 if (conn->security->issue_challenge(conn) < 0) { 396 382 abort_code = RX_CALL_DEAD;
+4
net/rxrpc/conn_service.c
··· 148 148 */ 149 149 void rxrpc_new_incoming_connection(struct rxrpc_sock *rx, 150 150 struct rxrpc_connection *conn, 151 + const struct rxrpc_security *sec, 152 + struct key *key, 151 153 struct sk_buff *skb) 152 154 { 153 155 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); ··· 162 160 conn->service_id = sp->hdr.serviceId; 163 161 conn->security_ix = sp->hdr.securityIndex; 164 162 conn->out_clientflag = 0; 163 + conn->security = sec; 164 + conn->server_key = key_get(key); 165 165 if (conn->security_ix) 166 166 conn->state = RXRPC_CONN_SERVICE_UNSECURED; 167 167 else
-18
net/rxrpc/input.c
··· 193 193 } 194 194 195 195 /* 196 - * Ping the other end to fill our RTT cache and to retrieve the rwind 197 - * and MTU parameters. 198 - */ 199 - static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb) 200 - { 201 - struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 202 - ktime_t now = skb->tstamp; 203 - 204 - if (call->peer->rtt_usage < 3 || 205 - ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), now)) 206 - rxrpc_propose_ACK(call, RXRPC_ACK_PING, sp->hdr.serial, 207 - true, true, 208 - rxrpc_propose_ack_ping_for_params); 209 - } 210 - 211 - /* 212 196 * Apply a hard ACK by advancing the Tx window. 213 197 */ 214 198 static bool rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to, ··· 1380 1396 call = rxrpc_new_incoming_call(local, rx, skb); 1381 1397 if (!call) 1382 1398 goto reject_packet; 1383 - rxrpc_send_ping(call, skb); 1384 - mutex_unlock(&call->user_mutex); 1385 1399 } 1386 1400 1387 1401 /* Process a call packet; this either discards or passes on the ref
+3 -2
net/rxrpc/rxkad.c
··· 648 648 u32 serial; 649 649 int ret; 650 650 651 - _enter("{%d,%x}", conn->debug_id, key_serial(conn->params.key)); 651 + _enter("{%d,%x}", conn->debug_id, key_serial(conn->server_key)); 652 652 653 - ret = key_validate(conn->params.key); 653 + ret = key_validate(conn->server_key); 654 654 if (ret < 0) 655 655 return ret; 656 656 ··· 1293 1293 const struct rxrpc_security rxkad = { 1294 1294 .name = "rxkad", 1295 1295 .security_index = RXRPC_SECURITY_RXKAD, 1296 + .no_key_abort = RXKADUNKNOWNKEY, 1296 1297 .init = rxkad_init, 1297 1298 .exit = rxkad_exit, 1298 1299 .init_connection_security = rxkad_init_connection_security,
+33 -37
net/rxrpc/security.c
··· 101 101 } 102 102 103 103 /* 104 - * initialise the security on a server connection 104 + * Find the security key for a server connection. 105 105 */ 106 - int rxrpc_init_server_conn_security(struct rxrpc_connection *conn) 106 + bool rxrpc_look_up_server_security(struct rxrpc_local *local, struct rxrpc_sock *rx, 107 + const struct rxrpc_security **_sec, 108 + struct key **_key, 109 + struct sk_buff *skb) 107 110 { 108 111 const struct rxrpc_security *sec; 109 - struct rxrpc_local *local = conn->params.local; 110 - struct rxrpc_sock *rx; 111 - struct key *key; 112 - key_ref_t kref; 112 + struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 113 + key_ref_t kref = NULL; 113 114 char kdesc[5 + 1 + 3 + 1]; 114 115 115 116 _enter(""); 116 117 117 - sprintf(kdesc, "%u:%u", conn->service_id, conn->security_ix); 118 + sprintf(kdesc, "%u:%u", sp->hdr.serviceId, sp->hdr.securityIndex); 118 119 119 - sec = rxrpc_security_lookup(conn->security_ix); 120 + sec = rxrpc_security_lookup(sp->hdr.securityIndex); 120 121 if (!sec) { 121 - _leave(" = -ENOKEY [lookup]"); 122 - return -ENOKEY; 122 + trace_rxrpc_abort(0, "SVS", 123 + sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, 124 + RX_INVALID_OPERATION, EKEYREJECTED); 125 + skb->mark = RXRPC_SKB_MARK_REJECT_ABORT; 126 + skb->priority = RX_INVALID_OPERATION; 127 + return false; 123 128 } 124 129 125 - /* find the service */ 126 - read_lock(&local->services_lock); 127 - rx = rcu_dereference_protected(local->service, 128 - lockdep_is_held(&local->services_lock)); 129 - if (rx && (rx->srx.srx_service == conn->service_id || 130 - rx->second_service == conn->service_id)) 131 - goto found_service; 130 + if (sp->hdr.securityIndex == RXRPC_SECURITY_NONE) 131 + goto out; 132 132 133 - /* the service appears to have died */ 134 - read_unlock(&local->services_lock); 135 - _leave(" = -ENOENT"); 136 - return -ENOENT; 137 - 138 - found_service: 139 133 if (!rx->securities) { 140 - read_unlock(&local->services_lock); 141 - _leave(" = -ENOKEY"); 142 - return -ENOKEY; 134 + trace_rxrpc_abort(0, "SVR", 135 + sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, 136 + RX_INVALID_OPERATION, EKEYREJECTED); 137 + skb->mark = RXRPC_SKB_MARK_REJECT_ABORT; 138 + skb->priority = RX_INVALID_OPERATION; 139 + return false; 143 140 } 144 141 145 142 /* look through the service's keyring */ 146 143 kref = keyring_search(make_key_ref(rx->securities, 1UL), 147 144 &key_type_rxrpc_s, kdesc, true); 148 145 if (IS_ERR(kref)) { 149 - read_unlock(&local->services_lock); 150 - _leave(" = %ld [search]", PTR_ERR(kref)); 151 - return PTR_ERR(kref); 146 + trace_rxrpc_abort(0, "SVK", 147 + sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, 148 + sec->no_key_abort, EKEYREJECTED); 149 + skb->mark = RXRPC_SKB_MARK_REJECT_ABORT; 150 + skb->priority = sec->no_key_abort; 151 + return false; 152 152 } 153 153 154 - key = key_ref_to_ptr(kref); 155 - read_unlock(&local->services_lock); 156 - 157 - conn->server_key = key; 158 - conn->security = sec; 159 - 160 - _leave(" = 0"); 161 - return 0; 154 + out: 155 + *_sec = sec; 156 + *_key = key_ref_to_ptr(kref); 157 + return true; 162 158 }
+12 -10
net/sched/act_mirred.c
··· 219 219 bool use_reinsert; 220 220 bool want_ingress; 221 221 bool is_redirect; 222 + bool expects_nh; 222 223 int m_eaction; 223 224 int mac_len; 225 + bool at_nh; 224 226 225 227 rec_level = __this_cpu_inc_return(mirred_rec_level); 226 228 if (unlikely(rec_level > MIRRED_RECURSION_LIMIT)) { ··· 263 261 goto out; 264 262 } 265 263 266 - /* If action's target direction differs than filter's direction, 267 - * and devices expect a mac header on xmit, then mac push/pull is 268 - * needed. 269 - */ 270 264 want_ingress = tcf_mirred_act_wants_ingress(m_eaction); 271 - if (skb_at_tc_ingress(skb) != want_ingress && m_mac_header_xmit) { 272 - if (!skb_at_tc_ingress(skb)) { 273 - /* caught at egress, act ingress: pull mac */ 274 - mac_len = skb_network_header(skb) - skb_mac_header(skb); 265 + 266 + expects_nh = want_ingress || !m_mac_header_xmit; 267 + at_nh = skb->data == skb_network_header(skb); 268 + if (at_nh != expects_nh) { 269 + mac_len = skb_at_tc_ingress(skb) ? skb->mac_len : 270 + skb_network_header(skb) - skb_mac_header(skb); 271 + if (expects_nh) { 272 + /* target device/action expect data at nh */ 275 273 skb_pull_rcsum(skb2, mac_len); 276 274 } else { 277 - /* caught at ingress, act egress: push mac */ 278 - skb_push_rcsum(skb2, skb->mac_len); 275 + /* target device/action expect data at mac */ 276 + skb_push_rcsum(skb2, mac_len); 279 277 } 280 278 } 281 279
+5 -26
net/sched/cls_api.c
··· 308 308 tcf_proto_destroy(tp, rtnl_held, true, extack); 309 309 } 310 310 311 - static int walker_check_empty(struct tcf_proto *tp, void *fh, 312 - struct tcf_walker *arg) 311 + static bool tcf_proto_check_delete(struct tcf_proto *tp) 313 312 { 314 - if (fh) { 315 - arg->nonempty = true; 316 - return -1; 317 - } 318 - return 0; 319 - } 313 + if (tp->ops->delete_empty) 314 + return tp->ops->delete_empty(tp); 320 315 321 - static bool tcf_proto_is_empty(struct tcf_proto *tp, bool rtnl_held) 322 - { 323 - struct tcf_walker walker = { .fn = walker_check_empty, }; 324 - 325 - if (tp->ops->walk) { 326 - tp->ops->walk(tp, &walker, rtnl_held); 327 - return !walker.nonempty; 328 - } 329 - return true; 330 - } 331 - 332 - static bool tcf_proto_check_delete(struct tcf_proto *tp, bool rtnl_held) 333 - { 334 - spin_lock(&tp->lock); 335 - if (tcf_proto_is_empty(tp, rtnl_held)) 336 - tp->deleting = true; 337 - spin_unlock(&tp->lock); 316 + tp->deleting = true; 338 317 return tp->deleting; 339 318 } 340 319 ··· 1730 1751 * concurrently. 1731 1752 * Mark tp for deletion if it is empty. 1732 1753 */ 1733 - if (!tp_iter || !tcf_proto_check_delete(tp, rtnl_held)) { 1754 + if (!tp_iter || !tcf_proto_check_delete(tp)) { 1734 1755 mutex_unlock(&chain->filter_chain_lock); 1735 1756 return; 1736 1757 }
+12
net/sched/cls_flower.c
··· 2773 2773 f->res.class = cl; 2774 2774 } 2775 2775 2776 + static bool fl_delete_empty(struct tcf_proto *tp) 2777 + { 2778 + struct cls_fl_head *head = fl_head_dereference(tp); 2779 + 2780 + spin_lock(&tp->lock); 2781 + tp->deleting = idr_is_empty(&head->handle_idr); 2782 + spin_unlock(&tp->lock); 2783 + 2784 + return tp->deleting; 2785 + } 2786 + 2776 2787 static struct tcf_proto_ops cls_fl_ops __read_mostly = { 2777 2788 .kind = "flower", 2778 2789 .classify = fl_classify, ··· 2793 2782 .put = fl_put, 2794 2783 .change = fl_change, 2795 2784 .delete = fl_delete, 2785 + .delete_empty = fl_delete_empty, 2796 2786 .walk = fl_walk, 2797 2787 .reoffload = fl_reoffload, 2798 2788 .hw_add = fl_hw_add,
-25
net/sched/cls_u32.c
··· 1108 1108 return err; 1109 1109 } 1110 1110 1111 - static bool u32_hnode_empty(struct tc_u_hnode *ht, bool *non_root_ht) 1112 - { 1113 - int i; 1114 - 1115 - if (!ht) 1116 - return true; 1117 - if (!ht->is_root) { 1118 - *non_root_ht = true; 1119 - return false; 1120 - } 1121 - if (*non_root_ht) 1122 - return false; 1123 - if (ht->refcnt < 2) 1124 - return true; 1125 - 1126 - for (i = 0; i <= ht->divisor; i++) { 1127 - if (rtnl_dereference(ht->ht[i])) 1128 - return false; 1129 - } 1130 - return true; 1131 - } 1132 - 1133 1111 static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg, 1134 1112 bool rtnl_held) 1135 1113 { 1136 1114 struct tc_u_common *tp_c = tp->data; 1137 - bool non_root_ht = false; 1138 1115 struct tc_u_hnode *ht; 1139 1116 struct tc_u_knode *n; 1140 1117 unsigned int h; ··· 1124 1147 ht = rtnl_dereference(ht->next)) { 1125 1148 if (ht->prio != tp->prio) 1126 1149 continue; 1127 - if (u32_hnode_empty(ht, &non_root_ht)) 1128 - return; 1129 1150 if (arg->count >= arg->skip) { 1130 1151 if (arg->fn(tp, ht, arg) < 0) { 1131 1152 arg->stop = 1;
+8 -9
net/sched/sch_fq.c
··· 301 301 f->socket_hash != sk->sk_hash)) { 302 302 f->credit = q->initial_quantum; 303 303 f->socket_hash = sk->sk_hash; 304 + if (q->rate_enable) 305 + smp_store_release(&sk->sk_pacing_status, 306 + SK_PACING_FQ); 304 307 if (fq_flow_is_throttled(f)) 305 308 fq_flow_unset_throttled(q, f); 306 309 f->time_next_packet = 0ULL; ··· 325 322 326 323 fq_flow_set_detached(f); 327 324 f->sk = sk; 328 - if (skb->sk == sk) 325 + if (skb->sk == sk) { 329 326 f->socket_hash = sk->sk_hash; 327 + if (q->rate_enable) 328 + smp_store_release(&sk->sk_pacing_status, 329 + SK_PACING_FQ); 330 + } 330 331 f->credit = q->initial_quantum; 331 332 332 333 rb_link_node(&f->fq_node, parent, p); ··· 435 428 f->qlen++; 436 429 qdisc_qstats_backlog_inc(sch, skb); 437 430 if (fq_flow_is_detached(f)) { 438 - struct sock *sk = skb->sk; 439 - 440 431 fq_flow_add_tail(&q->new_flows, f); 441 432 if (time_after(jiffies, f->age + q->flow_refill_delay)) 442 433 f->credit = max_t(u32, f->credit, q->quantum); 443 - if (sk && q->rate_enable) { 444 - if (unlikely(smp_load_acquire(&sk->sk_pacing_status) != 445 - SK_PACING_FQ)) 446 - smp_store_release(&sk->sk_pacing_status, 447 - SK_PACING_FQ); 448 - } 449 434 q->inactive_flows--; 450 435 } 451 436
+15 -15
net/sctp/stream.c
··· 84 84 return 0; 85 85 86 86 ret = genradix_prealloc(&stream->out, outcnt, gfp); 87 - if (ret) { 88 - genradix_free(&stream->out); 87 + if (ret) 89 88 return ret; 90 - } 91 89 92 90 stream->outcnt = outcnt; 93 91 return 0; ··· 100 102 return 0; 101 103 102 104 ret = genradix_prealloc(&stream->in, incnt, gfp); 103 - if (ret) { 104 - genradix_free(&stream->in); 105 + if (ret) 105 106 return ret; 106 - } 107 107 108 108 stream->incnt = incnt; 109 109 return 0; ··· 119 123 * a new one with new outcnt to save memory if needed. 120 124 */ 121 125 if (outcnt == stream->outcnt) 122 - goto in; 126 + goto handle_in; 123 127 124 128 /* Filter out chunks queued on streams that won't exist anymore */ 125 129 sched->unsched_all(stream); ··· 128 132 129 133 ret = sctp_stream_alloc_out(stream, outcnt, gfp); 130 134 if (ret) 131 - goto out; 135 + goto out_err; 132 136 133 137 for (i = 0; i < stream->outcnt; i++) 134 138 SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN; 135 139 136 - in: 140 + handle_in: 137 141 sctp_stream_interleave_init(stream); 138 142 if (!incnt) 139 143 goto out; 140 144 141 145 ret = sctp_stream_alloc_in(stream, incnt, gfp); 142 - if (ret) { 143 - sched->free(stream); 144 - genradix_free(&stream->out); 145 - stream->outcnt = 0; 146 - goto out; 147 - } 146 + if (ret) 147 + goto in_err; 148 148 149 + goto out; 150 + 151 + in_err: 152 + sched->free(stream); 153 + genradix_free(&stream->in); 154 + out_err: 155 + genradix_free(&stream->out); 156 + stream->outcnt = 0; 149 157 out: 150 158 return ret; 151 159 }
+1 -1
net/sctp/transport.c
··· 263 263 264 264 pf->af->from_sk(&addr, sk); 265 265 pf->to_sk_daddr(&t->ipaddr, sk); 266 - dst->ops->update_pmtu(dst, sk, NULL, pmtu); 266 + dst->ops->update_pmtu(dst, sk, NULL, pmtu, true); 267 267 pf->to_sk_daddr(&addr, sk); 268 268 269 269 dst = sctp_transport_dst_check(t);
+8 -7
tools/lib/bpf/Makefile
··· 138 138 BPF_IN_SHARED := $(SHARED_OBJDIR)libbpf-in.o 139 139 BPF_IN_STATIC := $(STATIC_OBJDIR)libbpf-in.o 140 140 VERSION_SCRIPT := libbpf.map 141 + BPF_HELPER_DEFS := $(OUTPUT)bpf_helper_defs.h 141 142 142 143 LIB_TARGET := $(addprefix $(OUTPUT),$(LIB_TARGET)) 143 144 LIB_FILE := $(addprefix $(OUTPUT),$(LIB_FILE)) ··· 160 159 161 160 all_cmd: $(CMD_TARGETS) check 162 161 163 - $(BPF_IN_SHARED): force elfdep bpfdep bpf_helper_defs.h 162 + $(BPF_IN_SHARED): force elfdep bpfdep $(BPF_HELPER_DEFS) 164 163 @(test -f ../../include/uapi/linux/bpf.h -a -f ../../../include/uapi/linux/bpf.h && ( \ 165 164 (diff -B ../../include/uapi/linux/bpf.h ../../../include/uapi/linux/bpf.h >/dev/null) || \ 166 165 echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/bpf.h' differs from latest version at 'include/uapi/linux/bpf.h'" >&2 )) || true ··· 178 177 echo "Warning: Kernel ABI header at 'tools/include/uapi/linux/if_xdp.h' differs from latest version at 'include/uapi/linux/if_xdp.h'" >&2 )) || true 179 178 $(Q)$(MAKE) $(build)=libbpf OUTPUT=$(SHARED_OBJDIR) CFLAGS="$(CFLAGS) $(SHLIB_FLAGS)" 180 179 181 - $(BPF_IN_STATIC): force elfdep bpfdep bpf_helper_defs.h 180 + $(BPF_IN_STATIC): force elfdep bpfdep $(BPF_HELPER_DEFS) 182 181 $(Q)$(MAKE) $(build)=libbpf OUTPUT=$(STATIC_OBJDIR) 183 182 184 - bpf_helper_defs.h: $(srctree)/tools/include/uapi/linux/bpf.h 183 + $(BPF_HELPER_DEFS): $(srctree)/tools/include/uapi/linux/bpf.h 185 184 $(Q)$(srctree)/scripts/bpf_helpers_doc.py --header \ 186 - --file $(srctree)/tools/include/uapi/linux/bpf.h > bpf_helper_defs.h 185 + --file $(srctree)/tools/include/uapi/linux/bpf.h > $(BPF_HELPER_DEFS) 187 186 188 187 $(OUTPUT)libbpf.so: $(OUTPUT)libbpf.so.$(LIBBPF_VERSION) 189 188 ··· 244 243 $(call do_install_mkdir,$(libdir_SQ)); \ 245 244 cp -fpR $(LIB_FILE) $(DESTDIR)$(libdir_SQ) 246 245 247 - install_headers: bpf_helper_defs.h 246 + install_headers: $(BPF_HELPER_DEFS) 248 247 $(call QUIET_INSTALL, headers) \ 249 248 $(call do_install,bpf.h,$(prefix)/include/bpf,644); \ 250 249 $(call do_install,libbpf.h,$(prefix)/include/bpf,644); \ ··· 252 251 $(call do_install,libbpf_util.h,$(prefix)/include/bpf,644); \ 253 252 $(call do_install,xsk.h,$(prefix)/include/bpf,644); \ 254 253 $(call do_install,bpf_helpers.h,$(prefix)/include/bpf,644); \ 255 - $(call do_install,bpf_helper_defs.h,$(prefix)/include/bpf,644); \ 254 + $(call do_install,$(BPF_HELPER_DEFS),$(prefix)/include/bpf,644); \ 256 255 $(call do_install,bpf_tracing.h,$(prefix)/include/bpf,644); \ 257 256 $(call do_install,bpf_endian.h,$(prefix)/include/bpf,644); \ 258 257 $(call do_install,bpf_core_read.h,$(prefix)/include/bpf,644); ··· 272 271 clean: 273 272 $(call QUIET_CLEAN, libbpf) $(RM) -rf $(CMD_TARGETS) \ 274 273 *.o *~ *.a *.so *.so.$(LIBBPF_MAJOR_VERSION) .*.d .*.cmd \ 275 - *.pc LIBBPF-CFLAGS bpf_helper_defs.h \ 274 + *.pc LIBBPF-CFLAGS $(BPF_HELPER_DEFS) \ 276 275 $(SHARED_OBJDIR) $(STATIC_OBJDIR) 277 276 $(call QUIET_CLEAN, core-gen) $(RM) $(OUTPUT)FEATURE-DUMP.libbpf 278 277
+1
tools/testing/selftests/bpf/.gitignore
··· 40 40 test_cpp 41 41 /no_alu32 42 42 /bpf_gcc 43 + bpf_helper_defs.h
+3 -3
tools/testing/selftests/bpf/Makefile
··· 120 120 $(BPFOBJ): force 121 121 $(MAKE) -C $(BPFDIR) OUTPUT=$(OUTPUT)/ 122 122 123 - BPF_HELPERS := $(BPFDIR)/bpf_helper_defs.h $(wildcard $(BPFDIR)/bpf_*.h) 124 - $(BPFDIR)/bpf_helper_defs.h: 125 - $(MAKE) -C $(BPFDIR) OUTPUT=$(OUTPUT)/ bpf_helper_defs.h 123 + BPF_HELPERS := $(OUTPUT)/bpf_helper_defs.h $(wildcard $(BPFDIR)/bpf_*.h) 124 + $(OUTPUT)/bpf_helper_defs.h: 125 + $(MAKE) -C $(BPFDIR) OUTPUT=$(OUTPUT)/ $(OUTPUT)/bpf_helper_defs.h 126 126 127 127 # Get Clang's default includes on this system, as opposed to those seen by 128 128 # '-target bpf'. This fixes "missing" files on some architectures/distros,
+34 -5
tools/testing/selftests/netfilter/nft_flowtable.sh
··· 226 226 return 0 227 227 } 228 228 229 - test_tcp_forwarding() 229 + test_tcp_forwarding_ip() 230 230 { 231 231 local nsa=$1 232 232 local nsb=$2 233 + local dstip=$3 234 + local dstport=$4 233 235 local lret=0 234 236 235 237 ip netns exec $nsb nc -w 5 -l -p 12345 < "$ns2in" > "$ns2out" & 236 238 lpid=$! 237 239 238 240 sleep 1 239 - ip netns exec $nsa nc -w 4 10.0.2.99 12345 < "$ns1in" > "$ns1out" & 241 + ip netns exec $nsa nc -w 4 "$dstip" "$dstport" < "$ns1in" > "$ns1out" & 240 242 cpid=$! 241 243 242 244 sleep 3 ··· 255 253 check_transfer "$ns2in" "$ns1out" "ns1 <- ns2" 256 254 if [ $? -ne 0 ];then 257 255 lret=1 256 + fi 257 + 258 + return $lret 259 + } 260 + 261 + test_tcp_forwarding() 262 + { 263 + test_tcp_forwarding_ip "$1" "$2" 10.0.2.99 12345 264 + 265 + return $? 266 + } 267 + 268 + test_tcp_forwarding_nat() 269 + { 270 + local lret 271 + 272 + test_tcp_forwarding_ip "$1" "$2" 10.0.2.99 12345 273 + lret=$? 274 + 275 + if [ $lret -eq 0 ] ; then 276 + test_tcp_forwarding_ip "$1" "$2" 10.6.6.6 1666 277 + lret=$? 258 278 fi 259 279 260 280 return $lret ··· 307 283 # Same, but with NAT enabled. 308 284 ip netns exec nsr1 nft -f - <<EOF 309 285 table ip nat { 286 + chain prerouting { 287 + type nat hook prerouting priority 0; policy accept; 288 + meta iif "veth0" ip daddr 10.6.6.6 tcp dport 1666 counter dnat ip to 10.0.2.99:12345 289 + } 290 + 310 291 chain postrouting { 311 292 type nat hook postrouting priority 0; policy accept; 312 - meta oifname "veth1" masquerade 293 + meta oifname "veth1" counter masquerade 313 294 } 314 295 } 315 296 EOF 316 297 317 - test_tcp_forwarding ns1 ns2 298 + test_tcp_forwarding_nat ns1 ns2 318 299 319 300 if [ $? -eq 0 ] ;then 320 301 echo "PASS: flow offloaded for ns1/ns2 with NAT" ··· 342 313 ip netns exec ns1 sysctl net.ipv4.ip_no_pmtu_disc=0 > /dev/null 343 314 ip netns exec ns2 sysctl net.ipv4.ip_no_pmtu_disc=0 > /dev/null 344 315 345 - test_tcp_forwarding ns1 ns2 316 + test_tcp_forwarding_nat ns1 ns2 346 317 if [ $? -eq 0 ] ;then 347 318 echo "PASS: flow offloaded for ns1/ns2 with NAT and pmtu discovery" 348 319 else