Merge tag 'perf-urgent-for-mingo-5.2-20190528' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/urgent

Pull perf/urgent fixes:

BPF:

Jiri Olsa:

- Fixup determination of end of kernel map, to avoid having BPF programs,
that are after the kernel headers and just before module texts mixed up in
the kernel map.

tools UAPI header copies:

Arnaldo Carvalho de Melo:

- Update copy of files related to new fspick, fsmount, fsconfig, fsopen,
move_mount and open_tree syscalls.

- Sync cpufeatures.h, sched.h, fs.h, drm.h, i915_drm.h and kvm.h headers.

Namespaces:

Namhyung Kim:

- Add missing byte swap ops for namespace events when processing records from
perf.data files that could have been recorded in a arch with a different
endianness.

- Fix access to the thread namespaces list by using the namespaces_lock.

perf data:

Shawn Landden:

- Fix 'strncat may truncate' build failure with recent gcc.

s/390

Thomas Richter:

- Fix s390 missing module symbol and warning for non-root users in 'perf record'.

arm64:

Vitaly Chikunov:

- Fix mksyscalltbl when system kernel headers are ahead of the kernel.

Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>

Changed files
+1488 -591
Documentation
bpf
kbuild
drivers
fs
include
kernel
lib
net
scripts
tools
arch
arm64
include
uapi
asm
powerpc
include
uapi
asm
s390
include
uapi
asm
x86
include
bpf
bpftool
include
lib
perf
arch
arm64
entry
syscalls
s390
util
x86
entry
syscalls
tests
util
testing
selftests
+1 -1
Documentation/bpf/btf.rst
··· 131 131 ``btf_type`` is followed by a ``u32`` with the following bits arrangement:: 132 132 133 133 #define BTF_INT_ENCODING(VAL) (((VAL) & 0x0f000000) >> 24) 134 - #define BTF_INT_OFFSET(VAL) (((VAL & 0x00ff0000)) >> 16) 134 + #define BTF_INT_OFFSET(VAL) (((VAL) & 0x00ff0000) >> 16) 135 135 #define BTF_INT_BITS(VAL) ((VAL) & 0x000000ff) 136 136 137 137 The ``BTF_INT_ENCODING`` has the following attributes::
-14
Documentation/kbuild/makefiles.txt
··· 437 437 The second argument is optional, and if supplied will be used 438 438 if first argument is not supported. 439 439 440 - cc-ldoption 441 - cc-ldoption is used to check if $(CC) when used to link object files 442 - supports the given option. An optional second option may be 443 - specified if first option are not supported. 444 - 445 - Example: 446 - #arch/x86/kernel/Makefile 447 - vsyscall-flags += $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) 448 - 449 - In the above example, vsyscall-flags will be assigned the option 450 - -Wl$(comma)--hash-style=sysv if it is supported by $(CC). 451 - The second argument is optional, and if supplied will be used 452 - if first argument is not supported. 453 - 454 440 as-instr 455 441 as-instr checks if the assembler reports a specific instruction 456 442 and then outputs either option1 or option2
+2 -4
MAINTAINERS
··· 11068 11068 F: drivers/net/ethernet/qlogic/netxen/ 11069 11069 11070 11070 NFC SUBSYSTEM 11071 - M: Samuel Ortiz <sameo@linux.intel.com> 11072 - L: linux-wireless@vger.kernel.org 11073 - L: linux-nfc@lists.01.org (subscribers-only) 11074 - S: Supported 11071 + L: netdev@vger.kernel.org 11072 + S: Orphan 11075 11073 F: net/nfc/ 11076 11074 F: include/net/nfc/ 11077 11075 F: include/uapi/linux/nfc.h
-6
drivers/atm/iphase.c
··· 2767 2767 case MEMDUMP: 2768 2768 { 2769 2769 switch (ia_cmds.sub_cmd) { 2770 - case MEMDUMP_DEV: 2771 - if (!capable(CAP_NET_ADMIN)) return -EPERM; 2772 - if (copy_to_user(ia_cmds.buf, iadev, sizeof(IADEV))) 2773 - return -EFAULT; 2774 - ia_cmds.status = 0; 2775 - break; 2776 2770 case MEMDUMP_SEGREG: 2777 2771 if (!capable(CAP_NET_ADMIN)) return -EPERM; 2778 2772 tmps = (u16 __user *)ia_cmds.buf;
+7 -6
drivers/infiniband/hw/mlx5/ib_rep.c
··· 109 109 } 110 110 111 111 struct mlx5_ib_dev *mlx5_ib_get_rep_ibdev(struct mlx5_eswitch *esw, 112 - int vport_index) 112 + u16 vport_num) 113 113 { 114 - return mlx5_eswitch_get_proto_dev(esw, vport_index, REP_IB); 114 + return mlx5_eswitch_get_proto_dev(esw, vport_num, REP_IB); 115 115 } 116 116 117 117 struct net_device *mlx5_ib_get_rep_netdev(struct mlx5_eswitch *esw, 118 - int vport_index) 118 + u16 vport_num) 119 119 { 120 - return mlx5_eswitch_get_proto_dev(esw, vport_index, REP_ETH); 120 + return mlx5_eswitch_get_proto_dev(esw, vport_num, REP_ETH); 121 121 } 122 122 123 123 struct mlx5_ib_dev *mlx5_ib_get_uplink_ibdev(struct mlx5_eswitch *esw) ··· 125 125 return mlx5_eswitch_uplink_get_proto_dev(esw, REP_IB); 126 126 } 127 127 128 - struct mlx5_eswitch_rep *mlx5_ib_vport_rep(struct mlx5_eswitch *esw, int vport) 128 + struct mlx5_eswitch_rep *mlx5_ib_vport_rep(struct mlx5_eswitch *esw, 129 + u16 vport_num) 129 130 { 130 - return mlx5_eswitch_vport_rep(esw, vport); 131 + return mlx5_eswitch_vport_rep(esw, vport_num); 131 132 } 132 133 133 134 struct mlx5_flow_handle *create_flow_rule_vport_sq(struct mlx5_ib_dev *dev,
+6 -6
drivers/infiniband/hw/mlx5/ib_rep.h
··· 14 14 15 15 u8 mlx5_ib_eswitch_mode(struct mlx5_eswitch *esw); 16 16 struct mlx5_ib_dev *mlx5_ib_get_rep_ibdev(struct mlx5_eswitch *esw, 17 - int vport_index); 17 + u16 vport_num); 18 18 struct mlx5_ib_dev *mlx5_ib_get_uplink_ibdev(struct mlx5_eswitch *esw); 19 19 struct mlx5_eswitch_rep *mlx5_ib_vport_rep(struct mlx5_eswitch *esw, 20 - int vport_index); 20 + u16 vport_num); 21 21 void mlx5_ib_register_vport_reps(struct mlx5_core_dev *mdev); 22 22 void mlx5_ib_unregister_vport_reps(struct mlx5_core_dev *mdev); 23 23 struct mlx5_flow_handle *create_flow_rule_vport_sq(struct mlx5_ib_dev *dev, 24 24 struct mlx5_ib_sq *sq, 25 25 u16 port); 26 26 struct net_device *mlx5_ib_get_rep_netdev(struct mlx5_eswitch *esw, 27 - int vport_index); 27 + u16 vport_num); 28 28 #else /* CONFIG_MLX5_ESWITCH */ 29 29 static inline u8 mlx5_ib_eswitch_mode(struct mlx5_eswitch *esw) 30 30 { ··· 33 33 34 34 static inline 35 35 struct mlx5_ib_dev *mlx5_ib_get_rep_ibdev(struct mlx5_eswitch *esw, 36 - int vport_index) 36 + u16 vport_num) 37 37 { 38 38 return NULL; 39 39 } ··· 46 46 47 47 static inline 48 48 struct mlx5_eswitch_rep *mlx5_ib_vport_rep(struct mlx5_eswitch *esw, 49 - int vport_index) 49 + u16 vport_num) 50 50 { 51 51 return NULL; 52 52 } ··· 63 63 64 64 static inline 65 65 struct net_device *mlx5_ib_get_rep_netdev(struct mlx5_eswitch *esw, 66 - int vport_index) 66 + u16 vport_num) 67 67 { 68 68 return NULL; 69 69 }
+1 -1
drivers/net/Makefile
··· 40 40 obj-$(CONFIG_DEV_APPLETALK) += appletalk/ 41 41 obj-$(CONFIG_CAIF) += caif/ 42 42 obj-$(CONFIG_CAN) += can/ 43 - obj-$(CONFIG_NET_DSA) += dsa/ 43 + obj-y += dsa/ 44 44 obj-$(CONFIG_ETHERNET) += ethernet/ 45 45 obj-$(CONFIG_FDDI) += fddi/ 46 46 obj-$(CONFIG_HIPPI) += hippi/
+8 -8
drivers/net/ethernet/cadence/macb_main.c
··· 3343 3343 if (!err) 3344 3344 err = -ENODEV; 3345 3345 3346 - dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err); 3346 + dev_err(&pdev->dev, "failed to get macb_clk (%d)\n", err); 3347 3347 return err; 3348 3348 } 3349 3349 ··· 3352 3352 if (!err) 3353 3353 err = -ENODEV; 3354 3354 3355 - dev_err(&pdev->dev, "failed to get hclk (%u)\n", err); 3355 + dev_err(&pdev->dev, "failed to get hclk (%d)\n", err); 3356 3356 return err; 3357 3357 } 3358 3358 ··· 3370 3370 3371 3371 err = clk_prepare_enable(*pclk); 3372 3372 if (err) { 3373 - dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err); 3373 + dev_err(&pdev->dev, "failed to enable pclk (%d)\n", err); 3374 3374 return err; 3375 3375 } 3376 3376 3377 3377 err = clk_prepare_enable(*hclk); 3378 3378 if (err) { 3379 - dev_err(&pdev->dev, "failed to enable hclk (%u)\n", err); 3379 + dev_err(&pdev->dev, "failed to enable hclk (%d)\n", err); 3380 3380 goto err_disable_pclk; 3381 3381 } 3382 3382 3383 3383 err = clk_prepare_enable(*tx_clk); 3384 3384 if (err) { 3385 - dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err); 3385 + dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err); 3386 3386 goto err_disable_hclk; 3387 3387 } 3388 3388 3389 3389 err = clk_prepare_enable(*rx_clk); 3390 3390 if (err) { 3391 - dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err); 3391 + dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err); 3392 3392 goto err_disable_txclk; 3393 3393 } 3394 3394 3395 3395 err = clk_prepare_enable(*tsu_clk); 3396 3396 if (err) { 3397 - dev_err(&pdev->dev, "failed to enable tsu_clk (%u)\n", err); 3397 + dev_err(&pdev->dev, "failed to enable tsu_clk (%d)\n", err); 3398 3398 goto err_disable_rxclk; 3399 3399 } 3400 3400 ··· 3868 3868 3869 3869 err = clk_prepare_enable(*pclk); 3870 3870 if (err) { 3871 - dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err); 3871 + dev_err(&pdev->dev, "failed to enable pclk (%d)\n", err); 3872 3872 return err; 3873 3873 } 3874 3874
+3 -1
drivers/net/ethernet/freescale/enetc/enetc.c
··· 313 313 while (bds_to_clean && tx_frm_cnt < ENETC_DEFAULT_TX_WORK) { 314 314 bool is_eof = !!tx_swbd->skb; 315 315 316 - enetc_unmap_tx_buff(tx_ring, tx_swbd); 316 + if (likely(tx_swbd->dma)) 317 + enetc_unmap_tx_buff(tx_ring, tx_swbd); 318 + 317 319 if (is_eof) { 318 320 napi_consume_skb(tx_swbd->skb, napi_budget); 319 321 tx_swbd->skb = NULL;
+2
drivers/net/ethernet/freescale/enetc/enetc_ethtool.c
··· 570 570 .get_ringparam = enetc_get_ringparam, 571 571 .get_link_ksettings = phy_ethtool_get_link_ksettings, 572 572 .set_link_ksettings = phy_ethtool_set_link_ksettings, 573 + .get_link = ethtool_op_get_link, 573 574 }; 574 575 575 576 static const struct ethtool_ops enetc_vf_ethtool_ops = { ··· 585 584 .get_rxfh = enetc_get_rxfh, 586 585 .set_rxfh = enetc_set_rxfh, 587 586 .get_ringparam = enetc_get_ringparam, 587 + .get_link = ethtool_op_get_link, 588 588 }; 589 589 590 590 void enetc_set_ethtool_ops(struct net_device *ndev)
+1 -1
drivers/net/ethernet/freescale/enetc/enetc_pf.c
··· 721 721 ndev->watchdog_timeo = 5 * HZ; 722 722 ndev->max_mtu = ENETC_MAX_MTU; 723 723 724 - ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM | 724 + ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | NETIF_F_HW_CSUM | 725 725 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | 726 726 NETIF_F_LOOPBACK; 727 727 ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG |
+1 -1
drivers/net/ethernet/freescale/enetc/enetc_vf.c
··· 130 130 ndev->watchdog_timeo = 5 * HZ; 131 131 ndev->max_mtu = ENETC_MAX_MTU; 132 132 133 - ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM | 133 + ndev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | NETIF_F_HW_CSUM | 134 134 NETIF_F_HW_VLAN_CTAG_TX | 135 135 NETIF_F_HW_VLAN_CTAG_RX; 136 136 ndev->features = NETIF_F_HIGHDMA | NETIF_F_SG |
+1 -1
drivers/net/ethernet/mellanox/mlx4/mcg.c
··· 1492 1492 rule.port = port; 1493 1493 rule.qpn = qpn; 1494 1494 INIT_LIST_HEAD(&rule.list); 1495 - mlx4_err(dev, "going promisc on %x\n", port); 1495 + mlx4_info(dev, "going promisc on %x\n", port); 1496 1496 1497 1497 return mlx4_flow_attach(dev, &rule, regid_p); 1498 1498 }
+1
drivers/net/ethernet/mellanox/mlx5/core/Kconfig
··· 8 8 select NET_DEVLINK 9 9 imply PTP_1588_CLOCK 10 10 imply VXLAN 11 + imply MLXFW 11 12 default n 12 13 ---help--- 13 14 Core driver for low level functionality of the ConnectX-4 and
+21 -1
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
··· 1604 1604 1605 1605 static int status_to_err(u8 status) 1606 1606 { 1607 - return status ? -1 : 0; /* TBD more meaningful codes */ 1607 + switch (status) { 1608 + case MLX5_CMD_DELIVERY_STAT_OK: 1609 + case MLX5_DRIVER_STATUS_ABORTED: 1610 + return 0; 1611 + case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR: 1612 + case MLX5_CMD_DELIVERY_STAT_TOK_ERR: 1613 + return -EBADR; 1614 + case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR: 1615 + case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR: 1616 + case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR: 1617 + return -EFAULT; /* Bad address */ 1618 + case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR: 1619 + case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR: 1620 + case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR: 1621 + case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR: 1622 + return -ENOMSG; 1623 + case MLX5_CMD_DELIVERY_STAT_FW_ERR: 1624 + return -EIO; 1625 + default: 1626 + return -EINVAL; 1627 + } 1608 1628 } 1609 1629 1610 1630 static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
··· 26 26 27 27 MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA); 28 28 MLX5_SET(disable_hca_in, in, function_id, 0); 29 - MLX5_SET(enable_hca_in, in, embedded_cpu_function, 0); 29 + MLX5_SET(disable_hca_in, in, embedded_cpu_function, 0); 30 30 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); 31 31 } 32 32
+17 -1
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
··· 1901 1901 return mlx5e_ethtool_flash_device(priv, flash); 1902 1902 } 1903 1903 1904 + #ifndef CONFIG_MLX5_EN_RXNFC 1905 + /* When CONFIG_MLX5_EN_RXNFC=n we only support ETHTOOL_GRXRINGS 1906 + * otherwise this function will be defined from en_fs_ethtool.c 1907 + */ 1908 + static int mlx5e_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info, u32 *rule_locs) 1909 + { 1910 + struct mlx5e_priv *priv = netdev_priv(dev); 1911 + 1912 + if (info->cmd != ETHTOOL_GRXRINGS) 1913 + return -EOPNOTSUPP; 1914 + /* ring_count is needed by ethtool -x */ 1915 + info->data = priv->channels.params.num_channels; 1916 + return 0; 1917 + } 1918 + #endif 1919 + 1904 1920 const struct ethtool_ops mlx5e_ethtool_ops = { 1905 1921 .get_drvinfo = mlx5e_get_drvinfo, 1906 1922 .get_link = ethtool_op_get_link, ··· 1935 1919 .get_rxfh_indir_size = mlx5e_get_rxfh_indir_size, 1936 1920 .get_rxfh = mlx5e_get_rxfh, 1937 1921 .set_rxfh = mlx5e_set_rxfh, 1938 - #ifdef CONFIG_MLX5_EN_RXNFC 1939 1922 .get_rxnfc = mlx5e_get_rxnfc, 1923 + #ifdef CONFIG_MLX5_EN_RXNFC 1940 1924 .set_rxnfc = mlx5e_set_rxnfc, 1941 1925 #endif 1942 1926 .flash_device = mlx5e_flash_device,
+18 -1
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
··· 65 65 static void mlx5e_rep_get_drvinfo(struct net_device *dev, 66 66 struct ethtool_drvinfo *drvinfo) 67 67 { 68 + struct mlx5e_priv *priv = netdev_priv(dev); 69 + struct mlx5_core_dev *mdev = priv->mdev; 70 + 68 71 strlcpy(drvinfo->driver, mlx5e_rep_driver_name, 69 72 sizeof(drvinfo->driver)); 70 73 strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version)); 74 + snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), 75 + "%d.%d.%04d (%.16s)", 76 + fw_rev_maj(mdev), fw_rev_min(mdev), 77 + fw_rev_sub(mdev), mdev->board_id); 78 + } 79 + 80 + static void mlx5e_uplink_rep_get_drvinfo(struct net_device *dev, 81 + struct ethtool_drvinfo *drvinfo) 82 + { 83 + struct mlx5e_priv *priv = netdev_priv(dev); 84 + 85 + mlx5e_rep_get_drvinfo(dev, drvinfo); 86 + strlcpy(drvinfo->bus_info, pci_name(priv->mdev->pdev), 87 + sizeof(drvinfo->bus_info)); 71 88 } 72 89 73 90 static const struct counter_desc sw_rep_stats_desc[] = { ··· 380 363 }; 381 364 382 365 static const struct ethtool_ops mlx5e_uplink_rep_ethtool_ops = { 383 - .get_drvinfo = mlx5e_rep_get_drvinfo, 366 + .get_drvinfo = mlx5e_uplink_rep_get_drvinfo, 384 367 .get_link = ethtool_op_get_link, 385 368 .get_strings = mlx5e_rep_get_strings, 386 369 .get_sset_count = mlx5e_rep_get_sset_count,
+21 -8
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
··· 1595 1595 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) { 1596 1596 struct flow_match_vlan match; 1597 1597 1598 - flow_rule_match_vlan(rule, &match); 1598 + flow_rule_match_cvlan(rule, &match); 1599 1599 if (match.mask->vlan_id || 1600 1600 match.mask->vlan_priority || 1601 1601 match.mask->vlan_tpid) { ··· 1916 1916 offsetof(struct pedit_headers, field) + (off), \ 1917 1917 MLX5_BYTE_OFF(fte_match_set_lyr_2_4, match_field)} 1918 1918 1919 + /* masked values are the same and there are no rewrites that do not have a 1920 + * match. 1921 + */ 1922 + #define SAME_VAL_MASK(type, valp, maskp, matchvalp, matchmaskp) ({ \ 1923 + type matchmaskx = *(type *)(matchmaskp); \ 1924 + type matchvalx = *(type *)(matchvalp); \ 1925 + type maskx = *(type *)(maskp); \ 1926 + type valx = *(type *)(valp); \ 1927 + \ 1928 + (valx & maskx) == (matchvalx & matchmaskx) && !(maskx & (maskx ^ \ 1929 + matchmaskx)); \ 1930 + }) 1931 + 1919 1932 static bool cmp_val_mask(void *valp, void *maskp, void *matchvalp, 1920 1933 void *matchmaskp, int size) 1921 1934 { ··· 1936 1923 1937 1924 switch (size) { 1938 1925 case sizeof(u8): 1939 - same = ((*(u8 *)valp) & (*(u8 *)maskp)) == 1940 - ((*(u8 *)matchvalp) & (*(u8 *)matchmaskp)); 1926 + same = SAME_VAL_MASK(u8, valp, maskp, matchvalp, matchmaskp); 1941 1927 break; 1942 1928 case sizeof(u16): 1943 - same = ((*(u16 *)valp) & (*(u16 *)maskp)) == 1944 - ((*(u16 *)matchvalp) & (*(u16 *)matchmaskp)); 1929 + same = SAME_VAL_MASK(u16, valp, maskp, matchvalp, matchmaskp); 1945 1930 break; 1946 1931 case sizeof(u32): 1947 - same = ((*(u32 *)valp) & (*(u32 *)maskp)) == 1948 - ((*(u32 *)matchvalp) & (*(u32 *)matchmaskp)); 1932 + same = SAME_VAL_MASK(u32, valp, maskp, matchvalp, matchmaskp); 1949 1933 break; 1950 1934 } 1951 1935 ··· 2567 2557 /* in case all pedit actions are skipped, remove the MOD_HDR 2568 2558 * flag. 2569 2559 */ 2570 - if (parse_attr->num_mod_hdr_actions == 0) 2560 + if (parse_attr->num_mod_hdr_actions == 0) { 2571 2561 action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; 2562 + kfree(parse_attr->mod_hdr_actions); 2563 + } 2572 2564 } 2573 2565 2574 2566 attr->action = action; ··· 3007 2995 */ 3008 2996 if (parse_attr->num_mod_hdr_actions == 0) { 3009 2997 action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; 2998 + kfree(parse_attr->mod_hdr_actions); 3010 2999 if (!((action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) || 3011 3000 (action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH))) 3012 3001 attr->split_count = 0;
+5 -4
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
··· 361 361 } 362 362 363 363 stats->bytes += num_bytes; 364 - stats->xmit_more += netdev_xmit_more(); 364 + stats->xmit_more += xmit_more; 365 365 366 366 headlen = skb->len - ihs - skb->data_len; 367 367 ds_cnt += !!headlen; ··· 624 624 } 625 625 626 626 netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, 627 - struct mlx5_av *av, u32 dqpn, u32 dqkey) 627 + struct mlx5_av *av, u32 dqpn, u32 dqkey, 628 + bool xmit_more) 628 629 { 629 630 struct mlx5_wq_cyc *wq = &sq->wq; 630 631 struct mlx5i_tx_wqe *wqe; ··· 661 660 } 662 661 663 662 stats->bytes += num_bytes; 664 - stats->xmit_more += netdev_xmit_more(); 663 + stats->xmit_more += xmit_more; 665 664 666 665 headlen = skb->len - ihs - skb->data_len; 667 666 ds_cnt += !!headlen; ··· 706 705 goto err_drop; 707 706 708 707 mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt, num_wqebbs, num_bytes, 709 - num_dma, wi, cseg, false); 708 + num_dma, wi, cseg, xmit_more); 710 709 711 710 return NETDEV_TX_OK; 712 711
+10 -10
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
··· 1526 1526 static void esw_apply_vport_conf(struct mlx5_eswitch *esw, 1527 1527 struct mlx5_vport *vport) 1528 1528 { 1529 - int vport_num = vport->vport; 1529 + u16 vport_num = vport->vport; 1530 1530 1531 1531 if (esw->manager_vport == vport_num) 1532 1532 return; ··· 1915 1915 1916 1916 /* Vport Administration */ 1917 1917 int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, 1918 - int vport, u8 mac[ETH_ALEN]) 1918 + u16 vport, u8 mac[ETH_ALEN]) 1919 1919 { 1920 1920 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport); 1921 1921 u64 node_guid; ··· 1959 1959 } 1960 1960 1961 1961 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, 1962 - int vport, int link_state) 1962 + u16 vport, int link_state) 1963 1963 { 1964 1964 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport); 1965 1965 int err = 0; ··· 1989 1989 } 1990 1990 1991 1991 int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, 1992 - int vport, struct ifla_vf_info *ivi) 1992 + u16 vport, struct ifla_vf_info *ivi) 1993 1993 { 1994 1994 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport); 1995 1995 ··· 2014 2014 } 2015 2015 2016 2016 int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, 2017 - int vport, u16 vlan, u8 qos, u8 set_flags) 2017 + u16 vport, u16 vlan, u8 qos, u8 set_flags) 2018 2018 { 2019 2019 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport); 2020 2020 int err = 0; ··· 2047 2047 } 2048 2048 2049 2049 int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, 2050 - int vport, u16 vlan, u8 qos) 2050 + u16 vport, u16 vlan, u8 qos) 2051 2051 { 2052 2052 u8 set_flags = 0; 2053 2053 ··· 2058 2058 } 2059 2059 2060 2060 int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw, 2061 - int vport, bool spoofchk) 2061 + u16 vport, bool spoofchk) 2062 2062 { 2063 2063 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport); 2064 2064 bool pschk; ··· 2208 2208 } 2209 2209 2210 2210 int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw, 2211 - int vport, bool setting) 2211 + u16 vport, bool setting) 2212 2212 { 2213 2213 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport); 2214 2214 ··· 2278 2278 return 0; 2279 2279 } 2280 2280 2281 - int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, int vport, 2281 + int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport, 2282 2282 u32 max_rate, u32 min_rate) 2283 2283 { 2284 2284 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport); ··· 2368 2368 } 2369 2369 2370 2370 int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, 2371 - int vport_num, 2371 + u16 vport_num, 2372 2372 struct ifla_vf_stats *vf_stats) 2373 2373 { 2374 2374 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
+11 -11
drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
··· 246 246 int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode); 247 247 void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw); 248 248 int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, 249 - int vport, u8 mac[ETH_ALEN]); 249 + u16 vport, u8 mac[ETH_ALEN]); 250 250 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, 251 - int vport, int link_state); 251 + u16 vport, int link_state); 252 252 int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, 253 - int vport, u16 vlan, u8 qos); 253 + u16 vport, u16 vlan, u8 qos); 254 254 int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw, 255 - int vport, bool spoofchk); 255 + u16 vport, bool spoofchk); 256 256 int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw, 257 - int vport_num, bool setting); 258 - int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, int vport, 257 + u16 vport_num, bool setting); 258 + int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport, 259 259 u32 max_rate, u32 min_rate); 260 260 int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting); 261 261 int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting); 262 262 int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, 263 - int vport, struct ifla_vf_info *ivi); 263 + u16 vport, struct ifla_vf_info *ivi); 264 264 int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, 265 - int vport, 265 + u16 vport, 266 266 struct ifla_vf_stats *vf_stats); 267 267 void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule); 268 268 ··· 296 296 mlx5_eswitch_get_chain_range(struct mlx5_eswitch *esw); 297 297 298 298 struct mlx5_flow_handle * 299 - mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, 299 + mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport, 300 300 struct mlx5_flow_destination *dest); 301 301 302 302 enum { ··· 366 366 int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw, 367 367 struct mlx5_esw_flow_attr *attr); 368 368 int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, 369 - int vport, u16 vlan, u8 qos, u8 set_flags); 369 + u16 vport, u16 vlan, u8 qos, u8 set_flags); 370 370 371 371 static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev, 372 372 u8 vlan_depth) ··· 430 430 return vport_num; 431 431 } 432 432 433 - static inline int mlx5_eswitch_index_to_vport_num(struct mlx5_eswitch *esw, 433 + static inline u16 mlx5_eswitch_index_to_vport_num(struct mlx5_eswitch *esw, 434 434 int index) 435 435 { 436 436 if (index == mlx5_eswitch_ecpf_idx(esw) &&
+11 -9
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
··· 57 57 static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw, 58 58 u16 vport_num) 59 59 { 60 - u16 idx = mlx5_eswitch_vport_num_to_index(esw, vport_num); 60 + int idx = mlx5_eswitch_vport_num_to_index(esw, vport_num); 61 61 62 62 WARN_ON(idx > esw->total_vports - 1); 63 63 return &esw->offloads.vport_reps[idx]; ··· 515 515 } 516 516 517 517 struct mlx5_flow_handle * 518 - mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn) 518 + mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, u16 vport, 519 + u32 sqn) 519 520 { 520 521 struct mlx5_flow_act flow_act = {0}; 521 522 struct mlx5_flow_destination dest = {}; ··· 1182 1181 } 1183 1182 1184 1183 struct mlx5_flow_handle * 1185 - mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, 1184 + mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport, 1186 1185 struct mlx5_flow_destination *dest) 1187 1186 { 1188 1187 struct mlx5_flow_act flow_act = {0}; ··· 1732 1731 struct mlx5_vport *vport; 1733 1732 int i; 1734 1733 1735 - mlx5_esw_for_each_vf_vport(esw, i, vport, esw->nvports) { 1734 + mlx5_esw_for_each_vf_vport(esw, i, vport, esw->dev->priv.sriov.num_vfs) { 1736 1735 esw_vport_disable_egress_acl(esw, vport); 1737 1736 esw_vport_disable_ingress_acl(esw, vport); 1738 1737 } 1739 1738 } 1740 1739 1741 - static int esw_offloads_steering_init(struct mlx5_eswitch *esw, int nvports) 1740 + static int esw_offloads_steering_init(struct mlx5_eswitch *esw, int vf_nvports, 1741 + int nvports) 1742 1742 { 1743 1743 int err; 1744 1744 ··· 1747 1745 mutex_init(&esw->fdb_table.offloads.fdb_prio_lock); 1748 1746 1749 1747 if (MLX5_CAP_GEN(esw->dev, prio_tag_required)) { 1750 - err = esw_prio_tag_acls_config(esw, nvports); 1748 + err = esw_prio_tag_acls_config(esw, vf_nvports); 1751 1749 if (err) 1752 1750 return err; 1753 1751 } ··· 1840 1838 { 1841 1839 int err; 1842 1840 1843 - err = esw_offloads_steering_init(esw, total_nvports); 1841 + err = esw_offloads_steering_init(esw, vf_nvports, total_nvports); 1844 1842 if (err) 1845 1843 return err; 1846 1844 ··· 2245 2243 } 2246 2244 2247 2245 void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw, 2248 - int vport, 2246 + u16 vport, 2249 2247 u8 rep_type) 2250 2248 { 2251 2249 struct mlx5_eswitch_rep *rep; ··· 2266 2264 EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev); 2267 2265 2268 2266 struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw, 2269 - int vport) 2267 + u16 vport) 2270 2268 { 2271 2269 return mlx5_eswitch_get_rep(esw, vport); 2272 2270 }
+2
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
··· 1380 1380 if ((d1->type == MLX5_FLOW_DESTINATION_TYPE_VPORT && 1381 1381 d1->vport.num == d2->vport.num && 1382 1382 d1->vport.flags == d2->vport.flags && 1383 + ((d1->vport.flags & MLX5_FLOW_DEST_VPORT_VHCA_ID) ? 1384 + (d1->vport.vhca_id == d2->vport.vhca_id) : true) && 1383 1385 ((d1->vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID) ? 1384 1386 (d1->vport.reformat_id == d2->vport.reformat_id) : true)) || 1385 1387 (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
··· 619 619 struct mlx5_ib_ah *mah = to_mah(address); 620 620 struct mlx5i_priv *ipriv = epriv->ppriv; 621 621 622 - return mlx5i_sq_xmit(sq, skb, &mah->av, dqpn, ipriv->qkey); 622 + return mlx5i_sq_xmit(sq, skb, &mah->av, dqpn, ipriv->qkey, netdev_xmit_more()); 623 623 } 624 624 625 625 static void mlx5i_set_pkey_index(struct net_device *netdev, int id)
+2 -1
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
··· 119 119 } 120 120 121 121 netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, 122 - struct mlx5_av *av, u32 dqpn, u32 dqkey); 122 + struct mlx5_av *av, u32 dqpn, u32 dqkey, 123 + bool xmit_more); 123 124 void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); 124 125 void mlx5i_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats); 125 126
+6
drivers/net/ethernet/mellanox/mlxsw/core.c
··· 122 122 } 123 123 EXPORT_SYMBOL(mlxsw_core_driver_priv); 124 124 125 + bool mlxsw_core_res_query_enabled(const struct mlxsw_core *mlxsw_core) 126 + { 127 + return mlxsw_core->driver->res_query_enabled; 128 + } 129 + EXPORT_SYMBOL(mlxsw_core_res_query_enabled); 130 + 125 131 struct mlxsw_rx_listener_item { 126 132 struct list_head list; 127 133 struct mlxsw_rx_listener rxl;
+2
drivers/net/ethernet/mellanox/mlxsw/core.h
··· 28 28 29 29 void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core); 30 30 31 + bool mlxsw_core_res_query_enabled(const struct mlxsw_core *mlxsw_core); 32 + 31 33 int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver); 32 34 void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver); 33 35
+16 -2
drivers/net/ethernet/mellanox/mlxsw/core_env.c
··· 3 3 4 4 #include <linux/kernel.h> 5 5 #include <linux/err.h> 6 + #include <linux/sfp.h> 6 7 7 8 #include "core.h" 8 9 #include "core_env.h" ··· 163 162 { 164 163 u8 module_info[MLXSW_REG_MCIA_EEPROM_MODULE_INFO_SIZE]; 165 164 u16 offset = MLXSW_REG_MCIA_EEPROM_MODULE_INFO_SIZE; 166 - u8 module_rev_id, module_id; 165 + u8 module_rev_id, module_id, diag_mon; 167 166 unsigned int read_size; 168 167 int err; 169 168 ··· 196 195 } 197 196 break; 198 197 case MLXSW_REG_MCIA_EEPROM_MODULE_INFO_ID_SFP: 198 + /* Verify if transceiver provides diagnostic monitoring page */ 199 + err = mlxsw_env_query_module_eeprom(mlxsw_core, module, 200 + SFP_DIAGMON, 1, &diag_mon, 201 + &read_size); 202 + if (err) 203 + return err; 204 + 205 + if (read_size < 1) 206 + return -EIO; 207 + 199 208 modinfo->type = ETH_MODULE_SFF_8472; 200 - modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; 209 + if (diag_mon) 210 + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; 211 + else 212 + modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN / 2; 201 213 break; 202 214 default: 203 215 return -EINVAL;
+3
drivers/net/ethernet/mellanox/mlxsw/core_hwmon.c
··· 518 518 u8 width; 519 519 int err; 520 520 521 + if (!mlxsw_core_res_query_enabled(mlxsw_hwmon->core)) 522 + return 0; 523 + 521 524 /* Add extra attributes for module temperature. Sensor index is 522 525 * assigned to sensor_count value, while all indexed before 523 526 * sensor_count are already utilized by the sensors connected through
+6
drivers/net/ethernet/mellanox/mlxsw/core_thermal.c
··· 740 740 struct mlxsw_thermal_module *module_tz; 741 741 int i, err; 742 742 743 + if (!mlxsw_core_res_query_enabled(core)) 744 + return 0; 745 + 743 746 thermal->tz_module_arr = kcalloc(module_count, 744 747 sizeof(*thermal->tz_module_arr), 745 748 GFP_KERNEL); ··· 778 775 { 779 776 unsigned int module_count = mlxsw_core_max_ports(thermal->core); 780 777 int i; 778 + 779 + if (!mlxsw_core_res_query_enabled(thermal->core)) 780 + return; 781 781 782 782 for (i = module_count - 1; i >= 0; i--) 783 783 mlxsw_thermal_module_fini(&thermal->tz_module_arr[i]);
+11 -6
drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
··· 168 168 return; 169 169 } 170 170 171 + rcu_read_lock(); 171 172 for (i = 0; i < count; i++) { 172 173 ipv4_addr = payload->tun_info[i].ipv4; 173 174 port = be32_to_cpu(payload->tun_info[i].egress_port); ··· 184 183 neigh_event_send(n, NULL); 185 184 neigh_release(n); 186 185 } 186 + rcu_read_unlock(); 187 187 } 188 188 189 189 static int ··· 369 367 370 368 payload = nfp_flower_cmsg_get_data(skb); 371 369 370 + rcu_read_lock(); 372 371 netdev = nfp_app_dev_get(app, be32_to_cpu(payload->ingress_port), NULL); 373 372 if (!netdev) 374 - goto route_fail_warning; 373 + goto fail_rcu_unlock; 375 374 376 375 flow.daddr = payload->ipv4_addr; 377 376 flow.flowi4_proto = IPPROTO_UDP; ··· 382 379 rt = ip_route_output_key(dev_net(netdev), &flow); 383 380 err = PTR_ERR_OR_ZERO(rt); 384 381 if (err) 385 - goto route_fail_warning; 382 + goto fail_rcu_unlock; 386 383 #else 387 - goto route_fail_warning; 384 + goto fail_rcu_unlock; 388 385 #endif 389 386 390 387 /* Get the neighbour entry for the lookup */ 391 388 n = dst_neigh_lookup(&rt->dst, &flow.daddr); 392 389 ip_rt_put(rt); 393 390 if (!n) 394 - goto route_fail_warning; 395 - nfp_tun_write_neigh(n->dev, app, &flow, n, GFP_KERNEL); 391 + goto fail_rcu_unlock; 392 + nfp_tun_write_neigh(n->dev, app, &flow, n, GFP_ATOMIC); 396 393 neigh_release(n); 394 + rcu_read_unlock(); 397 395 return; 398 396 399 - route_fail_warning: 397 + fail_rcu_unlock: 398 + rcu_read_unlock(); 400 399 nfp_flower_cmsg_warn(app, "Requested route not found.\n"); 401 400 } 402 401
+1
drivers/net/phy/aquantia_main.c
··· 487 487 /* Check that the PHY interface type is compatible */ 488 488 if (phydev->interface != PHY_INTERFACE_MODE_SGMII && 489 489 phydev->interface != PHY_INTERFACE_MODE_2500BASEX && 490 + phydev->interface != PHY_INTERFACE_MODE_XGMII && 490 491 phydev->interface != PHY_INTERFACE_MODE_10GKR) 491 492 return -ENODEV; 492 493
+14 -6
drivers/net/ppp/ppp_deflate.c
··· 610 610 611 611 static int __init deflate_init(void) 612 612 { 613 - int answer = ppp_register_compressor(&ppp_deflate); 614 - if (answer == 0) 615 - printk(KERN_INFO 616 - "PPP Deflate Compression module registered\n"); 617 - ppp_register_compressor(&ppp_deflate_draft); 618 - return answer; 613 + int rc; 614 + 615 + rc = ppp_register_compressor(&ppp_deflate); 616 + if (rc) 617 + return rc; 618 + 619 + rc = ppp_register_compressor(&ppp_deflate_draft); 620 + if (rc) { 621 + ppp_unregister_compressor(&ppp_deflate); 622 + return rc; 623 + } 624 + 625 + pr_info("PPP Deflate Compression module registered\n"); 626 + return 0; 619 627 } 620 628 621 629 static void __exit deflate_cleanup(void)
+10 -25
drivers/net/usb/aqc111.c
··· 320 320 static void aqc111_set_phy_speed(struct usbnet *dev, u8 autoneg, u16 speed) 321 321 { 322 322 struct aqc111_data *aqc111_data = dev->driver_priv; 323 - u32 phy_on_the_wire; 324 323 325 324 aqc111_data->phy_cfg &= ~AQ_ADV_MASK; 326 325 aqc111_data->phy_cfg |= AQ_PAUSE; ··· 361 362 } 362 363 } 363 364 364 - phy_on_the_wire = aqc111_data->phy_cfg; 365 - aqc111_write32_cmd(dev, AQ_PHY_OPS, 0, 0, &phy_on_the_wire); 365 + aqc111_write32_cmd(dev, AQ_PHY_OPS, 0, 0, &aqc111_data->phy_cfg); 366 366 } 367 367 368 368 static int aqc111_set_link_ksettings(struct net_device *net, ··· 437 439 aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE, 438 440 2, &reg16); 439 441 440 - if (dev->net->mtu > 12500 && dev->net->mtu <= 16334) { 442 + if (dev->net->mtu > 12500) { 441 443 memcpy(buf, &AQC111_BULKIN_SIZE[2], 5); 442 444 /* RX bulk configuration */ 443 445 aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_RX_BULKIN_QCTRL, ··· 451 453 reg16 = 0x1020; 452 454 else if (dev->net->mtu <= 12500) 453 455 reg16 = 0x1420; 454 - else if (dev->net->mtu <= 16334) 455 - reg16 = 0x1A20; 456 456 else 457 - return 0; 457 + reg16 = 0x1A20; 458 458 459 459 aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_PAUSE_WATERLVL_LOW, 460 460 2, &reg16); ··· 753 757 { 754 758 struct aqc111_data *aqc111_data = dev->driver_priv; 755 759 u16 reg16; 756 - u32 phy_on_the_wire; 757 760 758 761 /* Force bz */ 759 762 reg16 = SFR_PHYPWR_RSTCTL_BZ; ··· 766 771 aqc111_data->phy_cfg &= ~AQ_ADV_MASK; 767 772 aqc111_data->phy_cfg |= AQ_LOW_POWER; 768 773 aqc111_data->phy_cfg &= ~AQ_PHY_POWER_EN; 769 - phy_on_the_wire = aqc111_data->phy_cfg; 770 774 aqc111_write32_cmd_nopm(dev, AQ_PHY_OPS, 0, 0, 771 - &phy_on_the_wire); 775 + &aqc111_data->phy_cfg); 772 776 773 777 kfree(aqc111_data); 774 778 } ··· 990 996 { 991 997 struct aqc111_data *aqc111_data = dev->driver_priv; 992 998 u8 reg8 = 0; 993 - u32 phy_on_the_wire; 994 999 995 1000 dev->rx_urb_size = URB_SIZE; 996 1001 ··· 1002 1009 1003 1010 /* Power up ethernet PHY */ 1004 1011 aqc111_data->phy_cfg = AQ_PHY_POWER_EN; 1005 - phy_on_the_wire = aqc111_data->phy_cfg; 1006 1012 aqc111_write32_cmd(dev, AQ_PHY_OPS, 0, 0, 1007 - &phy_on_the_wire); 1013 + &aqc111_data->phy_cfg); 1008 1014 1009 1015 /* Set the MAC address */ 1010 1016 aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_NODE_ID, ETH_ALEN, ··· 1034 1042 { 1035 1043 struct aqc111_data *aqc111_data = dev->driver_priv; 1036 1044 u16 reg16 = 0; 1037 - u32 phy_on_the_wire; 1038 1045 1039 1046 aqc111_read16_cmd(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE, 1040 1047 2, &reg16); ··· 1045 1054 1046 1055 /* Put PHY to low power*/ 1047 1056 aqc111_data->phy_cfg |= AQ_LOW_POWER; 1048 - phy_on_the_wire = aqc111_data->phy_cfg; 1049 1057 aqc111_write32_cmd(dev, AQ_PHY_OPS, 0, 0, 1050 - &phy_on_the_wire); 1058 + &aqc111_data->phy_cfg); 1051 1059 1052 1060 netif_carrier_off(dev->net); 1053 1061 ··· 1322 1332 u16 temp_rx_ctrl = 0x00; 1323 1333 u16 reg16; 1324 1334 u8 reg8; 1325 - u32 phy_on_the_wire; 1326 1335 1327 1336 usbnet_suspend(intf, message); 1328 1337 ··· 1393 1404 1394 1405 aqc111_write_cmd(dev, AQ_WOL_CFG, 0, 0, 1395 1406 WOL_CFG_SIZE, &wol_cfg); 1396 - phy_on_the_wire = aqc111_data->phy_cfg; 1397 1407 aqc111_write32_cmd(dev, AQ_PHY_OPS, 0, 0, 1398 - &phy_on_the_wire); 1408 + &aqc111_data->phy_cfg); 1399 1409 } else { 1400 1410 aqc111_data->phy_cfg |= AQ_LOW_POWER; 1401 - phy_on_the_wire = aqc111_data->phy_cfg; 1402 1411 aqc111_write32_cmd(dev, AQ_PHY_OPS, 0, 0, 1403 - &phy_on_the_wire); 1412 + &aqc111_data->phy_cfg); 1404 1413 1405 1414 /* Disable RX path */ 1406 1415 aqc111_read16_cmd_nopm(dev, AQ_ACCESS_MAC, ··· 1415 1428 { 1416 1429 struct usbnet *dev = usb_get_intfdata(intf); 1417 1430 struct aqc111_data *aqc111_data = dev->driver_priv; 1418 - u16 reg16, oldreg16; 1431 + u16 reg16; 1419 1432 u8 reg8; 1420 1433 1421 1434 netif_carrier_off(dev->net); ··· 1431 1444 /* Configure RX control register => start operation */ 1432 1445 reg16 = aqc111_data->rxctl; 1433 1446 reg16 &= ~SFR_RX_CTL_START; 1434 - /* needs to be saved in case endianness is swapped */ 1435 - oldreg16 = reg16; 1436 1447 aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_RX_CTL, 2, &reg16); 1437 1448 1438 - reg16 = oldreg16 | SFR_RX_CTL_START; 1449 + reg16 |= SFR_RX_CTL_START; 1439 1450 aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_RX_CTL, 2, &reg16); 1440 1451 1441 1452 aqc111_set_phy_speed(dev, aqc111_data->autoneg,
+2
drivers/net/usb/qmi_wwan.c
··· 1259 1259 {QMI_FIXED_INTF(0x1bc7, 0x1101, 3)}, /* Telit ME910 dual modem */ 1260 1260 {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ 1261 1261 {QMI_QUIRK_SET_DTR(0x1bc7, 0x1201, 2)}, /* Telit LE920, LE920A4 */ 1262 + {QMI_QUIRK_SET_DTR(0x1bc7, 0x1260, 2)}, /* Telit LE910Cx */ 1263 + {QMI_QUIRK_SET_DTR(0x1bc7, 0x1261, 2)}, /* Telit LE910Cx */ 1262 1264 {QMI_QUIRK_SET_DTR(0x1bc7, 0x1900, 1)}, /* Telit LN940 series */ 1263 1265 {QMI_FIXED_INTF(0x1c9e, 0x9801, 3)}, /* Telewell TW-3G HSPA+ */ 1264 1266 {QMI_FIXED_INTF(0x1c9e, 0x9803, 4)}, /* Telewell TW-3G HSPA+ */
+1
fs/btrfs/compression.c
··· 1008 1008 struct list_head *workspace; 1009 1009 int ret; 1010 1010 1011 + level = btrfs_compress_op[type]->set_level(level); 1011 1012 workspace = get_workspace(type, level); 1012 1013 ret = btrfs_compress_op[type]->compress_pages(workspace, mapping, 1013 1014 start, pages,
+8 -7
fs/btrfs/extent-tree.c
··· 757 757 } 758 758 759 759 static void add_pinned_bytes(struct btrfs_fs_info *fs_info, 760 - struct btrfs_ref *ref) 760 + struct btrfs_ref *ref, int sign) 761 761 { 762 762 struct btrfs_space_info *space_info; 763 - s64 num_bytes = -ref->len; 763 + s64 num_bytes; 764 764 u64 flags; 765 765 766 + ASSERT(sign == 1 || sign == -1); 767 + num_bytes = sign * ref->len; 766 768 if (ref->type == BTRFS_REF_METADATA) { 767 769 if (ref->tree_ref.root == BTRFS_CHUNK_TREE_OBJECTID) 768 770 flags = BTRFS_BLOCK_GROUP_SYSTEM; ··· 2065 2063 btrfs_ref_tree_mod(fs_info, generic_ref); 2066 2064 2067 2065 if (ret == 0 && old_ref_mod < 0 && new_ref_mod >= 0) 2068 - add_pinned_bytes(fs_info, generic_ref); 2066 + add_pinned_bytes(fs_info, generic_ref, -1); 2069 2067 2070 2068 return ret; 2071 2069 } ··· 3884 3882 info->space_info_kobj, "%s", 3885 3883 alloc_name(space_info->flags)); 3886 3884 if (ret) { 3887 - percpu_counter_destroy(&space_info->total_bytes_pinned); 3888 - kfree(space_info); 3885 + kobject_put(&space_info->kobj); 3889 3886 return ret; 3890 3887 } 3891 3888 ··· 7191 7190 } 7192 7191 out: 7193 7192 if (pin) 7194 - add_pinned_bytes(fs_info, &generic_ref); 7193 + add_pinned_bytes(fs_info, &generic_ref, 1); 7195 7194 7196 7195 if (last_ref) { 7197 7196 /* ··· 7239 7238 btrfs_ref_tree_mod(fs_info, ref); 7240 7239 7241 7240 if (ret == 0 && old_ref_mod >= 0 && new_ref_mod < 0) 7242 - add_pinned_bytes(fs_info, ref); 7241 + add_pinned_bytes(fs_info, ref, 1); 7243 7242 7244 7243 return ret; 7245 7244 }
+13 -3
fs/btrfs/file.c
··· 2068 2068 u64 len; 2069 2069 2070 2070 /* 2071 + * If the inode needs a full sync, make sure we use a full range to 2072 + * avoid log tree corruption, due to hole detection racing with ordered 2073 + * extent completion for adjacent ranges, and assertion failures during 2074 + * hole detection. 2075 + */ 2076 + if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 2077 + &BTRFS_I(inode)->runtime_flags)) { 2078 + start = 0; 2079 + end = LLONG_MAX; 2080 + } 2081 + 2082 + /* 2071 2083 * The range length can be represented by u64, we have to do the typecasts 2072 2084 * to avoid signed overflow if it's [0, LLONG_MAX] eg. from fsync() 2073 2085 */ ··· 2566 2554 2567 2555 ret = btrfs_punch_hole_lock_range(inode, lockstart, lockend, 2568 2556 &cached_state); 2569 - if (ret) { 2570 - inode_unlock(inode); 2557 + if (ret) 2571 2558 goto out_only_mutex; 2572 - } 2573 2559 2574 2560 path = btrfs_alloc_path(); 2575 2561 if (!path) {
+22 -8
fs/btrfs/props.c
··· 332 332 struct btrfs_fs_info *fs_info = root->fs_info; 333 333 int ret; 334 334 int i; 335 + bool need_reserve = false; 335 336 336 337 if (!test_bit(BTRFS_INODE_HAS_PROPS, 337 338 &BTRFS_I(parent)->runtime_flags)) ··· 358 357 if (ret) 359 358 continue; 360 359 361 - num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1); 362 - ret = btrfs_block_rsv_add(root, trans->block_rsv, 363 - num_bytes, BTRFS_RESERVE_NO_FLUSH); 364 - if (ret) 365 - return ret; 360 + /* 361 + * Currently callers should be reserving 1 item for properties, 362 + * since we only have 1 property that we currently support. If 363 + * we add more in the future we need to try and reserve more 364 + * space for them. But we should also revisit how we do space 365 + * reservations if we do add more properties in the future. 366 + */ 367 + if (need_reserve) { 368 + num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1); 369 + ret = btrfs_block_rsv_add(root, trans->block_rsv, 370 + num_bytes, BTRFS_RESERVE_NO_FLUSH); 371 + if (ret) 372 + return ret; 373 + } 366 374 367 375 ret = btrfs_setxattr(trans, inode, h->xattr_name, value, 368 376 strlen(value), 0); ··· 385 375 &BTRFS_I(inode)->runtime_flags); 386 376 } 387 377 388 - btrfs_block_rsv_release(fs_info, trans->block_rsv, num_bytes); 389 - if (ret) 390 - return ret; 378 + if (need_reserve) { 379 + btrfs_block_rsv_release(fs_info, trans->block_rsv, 380 + num_bytes); 381 + if (ret) 382 + return ret; 383 + } 384 + need_reserve = true; 391 385 } 392 386 393 387 return 0;
+1 -3
fs/btrfs/root-tree.c
··· 132 132 return -ENOMEM; 133 133 134 134 ret = btrfs_search_slot(trans, root, key, path, 0, 1); 135 - if (ret < 0) { 136 - btrfs_abort_transaction(trans, ret); 135 + if (ret < 0) 137 136 goto out; 138 - } 139 137 140 138 if (ret > 0) { 141 139 btrfs_crit(fs_info,
+6 -1
fs/btrfs/sysfs.c
··· 825 825 fs_devs->fsid_kobj.kset = btrfs_kset; 826 826 error = kobject_init_and_add(&fs_devs->fsid_kobj, 827 827 &btrfs_ktype, parent, "%pU", fs_devs->fsid); 828 - return error; 828 + if (error) { 829 + kobject_put(&fs_devs->fsid_kobj); 830 + return error; 831 + } 832 + 833 + return 0; 829 834 } 830 835 831 836 int btrfs_sysfs_add_mounted(struct btrfs_fs_info *fs_info)
+45 -4
fs/btrfs/tree-checker.c
··· 107 107 (!IS_ALIGNED(btrfs_file_extent_##name((leaf), (fi)), (alignment))); \ 108 108 }) 109 109 110 + static u64 file_extent_end(struct extent_buffer *leaf, 111 + struct btrfs_key *key, 112 + struct btrfs_file_extent_item *extent) 113 + { 114 + u64 end; 115 + u64 len; 116 + 117 + if (btrfs_file_extent_type(leaf, extent) == BTRFS_FILE_EXTENT_INLINE) { 118 + len = btrfs_file_extent_ram_bytes(leaf, extent); 119 + end = ALIGN(key->offset + len, leaf->fs_info->sectorsize); 120 + } else { 121 + len = btrfs_file_extent_num_bytes(leaf, extent); 122 + end = key->offset + len; 123 + } 124 + return end; 125 + } 126 + 110 127 static int check_extent_data_item(struct extent_buffer *leaf, 111 - struct btrfs_key *key, int slot) 128 + struct btrfs_key *key, int slot, 129 + struct btrfs_key *prev_key) 112 130 { 113 131 struct btrfs_fs_info *fs_info = leaf->fs_info; 114 132 struct btrfs_file_extent_item *fi; ··· 206 188 CHECK_FE_ALIGNED(leaf, slot, fi, offset, sectorsize) || 207 189 CHECK_FE_ALIGNED(leaf, slot, fi, num_bytes, sectorsize)) 208 190 return -EUCLEAN; 191 + 192 + /* 193 + * Check that no two consecutive file extent items, in the same leaf, 194 + * present ranges that overlap each other. 195 + */ 196 + if (slot > 0 && 197 + prev_key->objectid == key->objectid && 198 + prev_key->type == BTRFS_EXTENT_DATA_KEY) { 199 + struct btrfs_file_extent_item *prev_fi; 200 + u64 prev_end; 201 + 202 + prev_fi = btrfs_item_ptr(leaf, slot - 1, 203 + struct btrfs_file_extent_item); 204 + prev_end = file_extent_end(leaf, prev_key, prev_fi); 205 + if (prev_end > key->offset) { 206 + file_extent_err(leaf, slot - 1, 207 + "file extent end range (%llu) goes beyond start offset (%llu) of the next file extent", 208 + prev_end, key->offset); 209 + return -EUCLEAN; 210 + } 211 + } 212 + 209 213 return 0; 210 214 } 211 215 ··· 814 774 * Common point to switch the item-specific validation. 815 775 */ 816 776 static int check_leaf_item(struct extent_buffer *leaf, 817 - struct btrfs_key *key, int slot) 777 + struct btrfs_key *key, int slot, 778 + struct btrfs_key *prev_key) 818 779 { 819 780 int ret = 0; 820 781 struct btrfs_chunk *chunk; 821 782 822 783 switch (key->type) { 823 784 case BTRFS_EXTENT_DATA_KEY: 824 - ret = check_extent_data_item(leaf, key, slot); 785 + ret = check_extent_data_item(leaf, key, slot, prev_key); 825 786 break; 826 787 case BTRFS_EXTENT_CSUM_KEY: 827 788 ret = check_csum_item(leaf, key, slot); ··· 969 928 * Check if the item size and content meet other 970 929 * criteria 971 930 */ 972 - ret = check_leaf_item(leaf, &key, slot); 931 + ret = check_leaf_item(leaf, &key, slot, &prev_key); 973 932 if (ret < 0) 974 933 return ret; 975 934 }
+1
fs/btrfs/tree-log.c
··· 4182 4182 *last_extent, 0, 4183 4183 0, len, 0, len, 4184 4184 0, 0, 0); 4185 + *last_extent += len; 4185 4186 } 4186 4187 } 4187 4188 }
+1
include/linux/bpf.h
··· 36 36 void (*map_free)(struct bpf_map *map); 37 37 int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key); 38 38 void (*map_release_uref)(struct bpf_map *map); 39 + void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key); 39 40 40 41 /* funcs callable from userspace and from eBPF programs */ 41 42 void *(*map_lookup_elem)(struct bpf_map *map, void *key);
+3 -3
include/linux/mlx5/eswitch.h
··· 51 51 u8 rep_type); 52 52 void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type); 53 53 void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw, 54 - int vport, 54 + u16 vport_num, 55 55 u8 rep_type); 56 56 struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw, 57 - int vport); 57 + u16 vport_num); 58 58 void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type); 59 59 u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw); 60 60 struct mlx5_flow_handle * 61 61 mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, 62 - int vport, u32 sqn); 62 + u16 vport_num, u32 sqn); 63 63 #endif
+1 -1
include/linux/of_net.h
··· 22 22 23 23 static inline const void *of_get_mac_address(struct device_node *np) 24 24 { 25 - return NULL; 25 + return ERR_PTR(-ENODEV); 26 26 } 27 27 28 28 static inline struct net_device *of_find_net_device_by_node(struct device_node *np)
+38 -32
include/linux/rhashtable.h
··· 84 84 85 85 struct lockdep_map dep_map; 86 86 87 - struct rhash_lock_head __rcu *buckets[] ____cacheline_aligned_in_smp; 87 + struct rhash_lock_head *buckets[] ____cacheline_aligned_in_smp; 88 88 }; 89 89 90 90 /* ··· 261 261 void *arg); 262 262 void rhashtable_destroy(struct rhashtable *ht); 263 263 264 - struct rhash_lock_head __rcu **rht_bucket_nested(const struct bucket_table *tbl, 265 - unsigned int hash); 266 - struct rhash_lock_head __rcu **__rht_bucket_nested(const struct bucket_table *tbl, 267 - unsigned int hash); 268 - struct rhash_lock_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht, 269 - struct bucket_table *tbl, 270 - unsigned int hash); 264 + struct rhash_lock_head **rht_bucket_nested(const struct bucket_table *tbl, 265 + unsigned int hash); 266 + struct rhash_lock_head **__rht_bucket_nested(const struct bucket_table *tbl, 267 + unsigned int hash); 268 + struct rhash_lock_head **rht_bucket_nested_insert(struct rhashtable *ht, 269 + struct bucket_table *tbl, 270 + unsigned int hash); 271 271 272 272 #define rht_dereference(p, ht) \ 273 273 rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht)) ··· 284 284 #define rht_entry(tpos, pos, member) \ 285 285 ({ tpos = container_of(pos, typeof(*tpos), member); 1; }) 286 286 287 - static inline struct rhash_lock_head __rcu *const *rht_bucket( 287 + static inline struct rhash_lock_head *const *rht_bucket( 288 288 const struct bucket_table *tbl, unsigned int hash) 289 289 { 290 290 return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) : 291 291 &tbl->buckets[hash]; 292 292 } 293 293 294 - static inline struct rhash_lock_head __rcu **rht_bucket_var( 294 + static inline struct rhash_lock_head **rht_bucket_var( 295 295 struct bucket_table *tbl, unsigned int hash) 296 296 { 297 297 return unlikely(tbl->nest) ? __rht_bucket_nested(tbl, hash) : 298 298 &tbl->buckets[hash]; 299 299 } 300 300 301 - static inline struct rhash_lock_head __rcu **rht_bucket_insert( 301 + static inline struct rhash_lock_head **rht_bucket_insert( 302 302 struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash) 303 303 { 304 304 return unlikely(tbl->nest) ? rht_bucket_nested_insert(ht, tbl, hash) : ··· 349 349 local_bh_enable(); 350 350 } 351 351 352 + static inline struct rhash_head __rcu *__rht_ptr( 353 + struct rhash_lock_head *const *bkt) 354 + { 355 + return (struct rhash_head __rcu *)((unsigned long)*bkt & ~BIT(0)); 356 + } 357 + 352 358 /* 353 359 * Where 'bkt' is a bucket and might be locked: 354 360 * rht_ptr() dereferences that pointer and clears the lock bit. ··· 362 356 * access is guaranteed, such as when destroying the table. 363 357 */ 364 358 static inline struct rhash_head *rht_ptr( 365 - struct rhash_lock_head __rcu * const *bkt, 359 + struct rhash_lock_head *const *bkt, 366 360 struct bucket_table *tbl, 367 361 unsigned int hash) 368 362 { 369 - const struct rhash_lock_head *p = 370 - rht_dereference_bucket_rcu(*bkt, tbl, hash); 371 - 372 - if ((((unsigned long)p) & ~BIT(0)) == 0) 373 - return RHT_NULLS_MARKER(bkt); 374 - return (void *)(((unsigned long)p) & ~BIT(0)); 375 - } 376 - 377 - static inline struct rhash_head *rht_ptr_exclusive( 378 - struct rhash_lock_head __rcu * const *bkt) 379 - { 380 - const struct rhash_lock_head *p = 381 - rcu_dereference_protected(*bkt, 1); 363 + struct rhash_head __rcu *p = __rht_ptr(bkt); 382 364 383 365 if (!p) 384 366 return RHT_NULLS_MARKER(bkt); 385 - return (void *)(((unsigned long)p) & ~BIT(0)); 367 + 368 + return rht_dereference_bucket_rcu(p, tbl, hash); 386 369 } 387 370 388 - static inline void rht_assign_locked(struct rhash_lock_head __rcu **bkt, 371 + static inline struct rhash_head *rht_ptr_exclusive( 372 + struct rhash_lock_head *const *bkt) 373 + { 374 + struct rhash_head __rcu *p = __rht_ptr(bkt); 375 + 376 + if (!p) 377 + return RHT_NULLS_MARKER(bkt); 378 + 379 + return rcu_dereference_protected(p, 1); 380 + } 381 + 382 + static inline void rht_assign_locked(struct rhash_lock_head **bkt, 389 383 struct rhash_head *obj) 390 384 { 391 385 struct rhash_head __rcu **p = (struct rhash_head __rcu **)bkt; ··· 396 390 } 397 391 398 392 static inline void rht_assign_unlock(struct bucket_table *tbl, 399 - struct rhash_lock_head __rcu **bkt, 393 + struct rhash_lock_head **bkt, 400 394 struct rhash_head *obj) 401 395 { 402 396 struct rhash_head __rcu **p = (struct rhash_head __rcu **)bkt; ··· 593 587 .ht = ht, 594 588 .key = key, 595 589 }; 596 - struct rhash_lock_head __rcu * const *bkt; 590 + struct rhash_lock_head *const *bkt; 597 591 struct bucket_table *tbl; 598 592 struct rhash_head *he; 599 593 unsigned int hash; ··· 709 703 .ht = ht, 710 704 .key = key, 711 705 }; 712 - struct rhash_lock_head __rcu **bkt; 706 + struct rhash_lock_head **bkt; 713 707 struct rhash_head __rcu **pprev; 714 708 struct bucket_table *tbl; 715 709 struct rhash_head *head; ··· 995 989 struct rhash_head *obj, const struct rhashtable_params params, 996 990 bool rhlist) 997 991 { 998 - struct rhash_lock_head __rcu **bkt; 992 + struct rhash_lock_head **bkt; 999 993 struct rhash_head __rcu **pprev; 1000 994 struct rhash_head *he; 1001 995 unsigned int hash; ··· 1147 1141 struct rhash_head *obj_old, struct rhash_head *obj_new, 1148 1142 const struct rhashtable_params params) 1149 1143 { 1150 - struct rhash_lock_head __rcu **bkt; 1144 + struct rhash_lock_head **bkt; 1151 1145 struct rhash_head __rcu **pprev; 1152 1146 struct rhash_head *he; 1153 1147 unsigned int hash;
+6 -3
include/linux/skbuff.h
··· 1434 1434 struct ubuf_info *uarg = skb_zcopy(skb); 1435 1435 1436 1436 if (uarg) { 1437 - if (uarg->callback == sock_zerocopy_callback) { 1437 + if (skb_zcopy_is_nouarg(skb)) { 1438 + /* no notification callback */ 1439 + } else if (uarg->callback == sock_zerocopy_callback) { 1438 1440 uarg->zerocopy = uarg->zerocopy && zerocopy; 1439 1441 sock_zerocopy_put(uarg); 1440 - } else if (!skb_zcopy_is_nouarg(skb)) { 1442 + } else { 1441 1443 uarg->callback(uarg, zerocopy); 1442 1444 } 1443 1445 ··· 2693 2691 { 2694 2692 if (likely(!skb_zcopy(skb))) 2695 2693 return 0; 2696 - if (skb_uarg(skb)->callback == sock_zerocopy_callback) 2694 + if (!skb_zcopy_is_nouarg(skb) && 2695 + skb_uarg(skb)->callback == sock_zerocopy_callback) 2697 2696 return 0; 2698 2697 return skb_copy_ubufs(skb, gfp_mask); 2699 2698 }
+2
include/net/flow_offload.h
··· 71 71 struct flow_match_eth_addrs *out); 72 72 void flow_rule_match_vlan(const struct flow_rule *rule, 73 73 struct flow_match_vlan *out); 74 + void flow_rule_match_cvlan(const struct flow_rule *rule, 75 + struct flow_match_vlan *out); 74 76 void flow_rule_match_ipv4_addrs(const struct flow_rule *rule, 75 77 struct flow_match_ipv4_addrs *out); 76 78 void flow_rule_match_ipv6_addrs(const struct flow_rule *rule,
+2 -1
include/net/ip6_fib.h
··· 167 167 dst_nocount:1, 168 168 dst_nopolicy:1, 169 169 dst_host:1, 170 - unused:3; 170 + fib6_destroying:1, 171 + unused:2; 171 172 172 173 struct fib6_nh fib6_nh; 173 174 struct rcu_head rcu;
+1 -1
include/net/sock.h
··· 1473 1473 sock_set_flag(sk, SOCK_QUEUE_SHRUNK); 1474 1474 sk->sk_wmem_queued -= skb->truesize; 1475 1475 sk_mem_uncharge(sk, skb->truesize); 1476 - if (!sk->sk_tx_skb_cache) { 1476 + if (!sk->sk_tx_skb_cache && !skb_cloned(skb)) { 1477 1477 skb_zcopy_clear(skb, true); 1478 1478 sk->sk_tx_skb_cache = skb; 1479 1479 return;
+1 -1
include/uapi/linux/btf.h
··· 83 83 * is the 32 bits arrangement: 84 84 */ 85 85 #define BTF_INT_ENCODING(VAL) (((VAL) & 0x0f000000) >> 24) 86 - #define BTF_INT_OFFSET(VAL) (((VAL & 0x00ff0000)) >> 16) 86 + #define BTF_INT_OFFSET(VAL) (((VAL) & 0x00ff0000) >> 16) 87 87 #define BTF_INT_BITS(VAL) ((VAL) & 0x000000ff) 88 88 89 89 /* Attributes stored in the BTF_INT_ENCODING */
+3
kernel/bpf/devmap.c
··· 164 164 bpf_clear_redirect_map(map); 165 165 synchronize_rcu(); 166 166 167 + /* Make sure prior __dev_map_entry_free() have completed. */ 168 + rcu_barrier(); 169 + 167 170 /* To ensure all pending flush operations have completed wait for flush 168 171 * bitmap to indicate all flush_needed bits to be zero on _all_ cpus. 169 172 * Because the above synchronize_rcu() ensures the map is disconnected
+18 -5
kernel/bpf/hashtab.c
··· 527 527 return insn - insn_buf; 528 528 } 529 529 530 - static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key) 530 + static __always_inline void *__htab_lru_map_lookup_elem(struct bpf_map *map, 531 + void *key, const bool mark) 531 532 { 532 533 struct htab_elem *l = __htab_map_lookup_elem(map, key); 533 534 534 535 if (l) { 535 - bpf_lru_node_set_ref(&l->lru_node); 536 + if (mark) 537 + bpf_lru_node_set_ref(&l->lru_node); 536 538 return l->key + round_up(map->key_size, 8); 537 539 } 538 540 539 541 return NULL; 542 + } 543 + 544 + static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key) 545 + { 546 + return __htab_lru_map_lookup_elem(map, key, true); 547 + } 548 + 549 + static void *htab_lru_map_lookup_elem_sys(struct bpf_map *map, void *key) 550 + { 551 + return __htab_lru_map_lookup_elem(map, key, false); 540 552 } 541 553 542 554 static u32 htab_lru_map_gen_lookup(struct bpf_map *map, ··· 1262 1250 .map_free = htab_map_free, 1263 1251 .map_get_next_key = htab_map_get_next_key, 1264 1252 .map_lookup_elem = htab_lru_map_lookup_elem, 1253 + .map_lookup_elem_sys_only = htab_lru_map_lookup_elem_sys, 1265 1254 .map_update_elem = htab_lru_map_update_elem, 1266 1255 .map_delete_elem = htab_lru_map_delete_elem, 1267 1256 .map_gen_lookup = htab_lru_map_gen_lookup, ··· 1294 1281 1295 1282 int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value) 1296 1283 { 1297 - struct bpf_htab *htab = container_of(map, struct bpf_htab, map); 1298 1284 struct htab_elem *l; 1299 1285 void __percpu *pptr; 1300 1286 int ret = -ENOENT; ··· 1309 1297 l = __htab_map_lookup_elem(map, key); 1310 1298 if (!l) 1311 1299 goto out; 1312 - if (htab_is_lru(htab)) 1313 - bpf_lru_node_set_ref(&l->lru_node); 1300 + /* We do not mark LRU map element here in order to not mess up 1301 + * eviction heuristics when user space does a map walk. 1302 + */ 1314 1303 pptr = htab_elem_get_ptr(l, map->key_size); 1315 1304 for_each_possible_cpu(cpu) { 1316 1305 bpf_long_memcpy(value + off,
+1 -1
kernel/bpf/inode.c
··· 518 518 static struct bpf_prog *__get_prog_inode(struct inode *inode, enum bpf_prog_type type) 519 519 { 520 520 struct bpf_prog *prog; 521 - int ret = inode_permission(inode, MAY_READ | MAY_WRITE); 521 + int ret = inode_permission(inode, MAY_READ); 522 522 if (ret) 523 523 return ERR_PTR(ret); 524 524
+4 -1
kernel/bpf/syscall.c
··· 808 808 err = map->ops->map_peek_elem(map, value); 809 809 } else { 810 810 rcu_read_lock(); 811 - ptr = map->ops->map_lookup_elem(map, key); 811 + if (map->ops->map_lookup_elem_sys_only) 812 + ptr = map->ops->map_lookup_elem_sys_only(map, key); 813 + else 814 + ptr = map->ops->map_lookup_elem(map, key); 812 815 if (IS_ERR(ptr)) { 813 816 err = PTR_ERR(ptr); 814 817 } else if (!ptr) {
+3 -2
kernel/trace/bpf_trace.c
··· 1297 1297 } 1298 1298 1299 1299 #ifdef CONFIG_MODULES 1300 - int bpf_event_notify(struct notifier_block *nb, unsigned long op, void *module) 1300 + static int bpf_event_notify(struct notifier_block *nb, unsigned long op, 1301 + void *module) 1301 1302 { 1302 1303 struct bpf_trace_module *btm, *tmp; 1303 1304 struct module *mod = module; ··· 1337 1336 .notifier_call = bpf_event_notify, 1338 1337 }; 1339 1338 1340 - int __init bpf_event_init(void) 1339 + static int __init bpf_event_init(void) 1341 1340 { 1342 1341 register_module_notifier(&bpf_module_nb); 1343 1342 return 0;
+2 -2
lib/random32.c
··· 171 171 172 172 /** 173 173 * prandom_seed - add entropy to pseudo random number generator 174 - * @seed: seed value 174 + * @entropy: entropy value 175 175 * 176 - * Add some additional seeding to the prandom pool. 176 + * Add some additional entropy to the prandom pool. 177 177 */ 178 178 void prandom_seed(u32 entropy) 179 179 {
+17 -16
lib/rhashtable.c
··· 34 34 35 35 union nested_table { 36 36 union nested_table __rcu *table; 37 - struct rhash_lock_head __rcu *bucket; 37 + struct rhash_lock_head *bucket; 38 38 }; 39 39 40 40 static u32 head_hashfn(struct rhashtable *ht, ··· 131 131 INIT_RHT_NULLS_HEAD(ntbl[i].bucket); 132 132 } 133 133 134 - if (cmpxchg(prev, NULL, ntbl) == NULL) 134 + if (cmpxchg((union nested_table **)prev, NULL, ntbl) == NULL) 135 135 return ntbl; 136 136 /* Raced with another thread. */ 137 137 kfree(ntbl); ··· 216 216 } 217 217 218 218 static int rhashtable_rehash_one(struct rhashtable *ht, 219 - struct rhash_lock_head __rcu **bkt, 219 + struct rhash_lock_head **bkt, 220 220 unsigned int old_hash) 221 221 { 222 222 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); ··· 269 269 unsigned int old_hash) 270 270 { 271 271 struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); 272 - struct rhash_lock_head __rcu **bkt = rht_bucket_var(old_tbl, old_hash); 272 + struct rhash_lock_head **bkt = rht_bucket_var(old_tbl, old_hash); 273 273 int err; 274 274 275 275 if (!bkt) ··· 296 296 * rcu_assign_pointer(). 297 297 */ 298 298 299 - if (cmpxchg(&old_tbl->future_tbl, NULL, new_tbl) != NULL) 299 + if (cmpxchg((struct bucket_table **)&old_tbl->future_tbl, NULL, 300 + new_tbl) != NULL) 300 301 return -EEXIST; 301 302 302 303 return 0; ··· 479 478 } 480 479 481 480 static void *rhashtable_lookup_one(struct rhashtable *ht, 482 - struct rhash_lock_head __rcu **bkt, 481 + struct rhash_lock_head **bkt, 483 482 struct bucket_table *tbl, unsigned int hash, 484 483 const void *key, struct rhash_head *obj) 485 484 { ··· 530 529 } 531 530 532 531 static struct bucket_table *rhashtable_insert_one(struct rhashtable *ht, 533 - struct rhash_lock_head __rcu **bkt, 532 + struct rhash_lock_head **bkt, 534 533 struct bucket_table *tbl, 535 534 unsigned int hash, 536 535 struct rhash_head *obj, ··· 585 584 { 586 585 struct bucket_table *new_tbl; 587 586 struct bucket_table *tbl; 588 - struct rhash_lock_head __rcu **bkt; 587 + struct rhash_lock_head **bkt; 589 588 unsigned int hash; 590 589 void *data; 591 590 ··· 1167 1166 } 1168 1167 EXPORT_SYMBOL_GPL(rhashtable_destroy); 1169 1168 1170 - struct rhash_lock_head __rcu **__rht_bucket_nested(const struct bucket_table *tbl, 1171 - unsigned int hash) 1169 + struct rhash_lock_head **__rht_bucket_nested(const struct bucket_table *tbl, 1170 + unsigned int hash) 1172 1171 { 1173 1172 const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); 1174 1173 unsigned int index = hash & ((1 << tbl->nest) - 1); ··· 1196 1195 } 1197 1196 EXPORT_SYMBOL_GPL(__rht_bucket_nested); 1198 1197 1199 - struct rhash_lock_head __rcu **rht_bucket_nested(const struct bucket_table *tbl, 1200 - unsigned int hash) 1198 + struct rhash_lock_head **rht_bucket_nested(const struct bucket_table *tbl, 1199 + unsigned int hash) 1201 1200 { 1202 - static struct rhash_lock_head __rcu *rhnull; 1201 + static struct rhash_lock_head *rhnull; 1203 1202 1204 1203 if (!rhnull) 1205 1204 INIT_RHT_NULLS_HEAD(rhnull); ··· 1207 1206 } 1208 1207 EXPORT_SYMBOL_GPL(rht_bucket_nested); 1209 1208 1210 - struct rhash_lock_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht, 1211 - struct bucket_table *tbl, 1212 - unsigned int hash) 1209 + struct rhash_lock_head **rht_bucket_nested_insert(struct rhashtable *ht, 1210 + struct bucket_table *tbl, 1211 + unsigned int hash) 1213 1212 { 1214 1213 const unsigned int shift = PAGE_SHIFT - ilog2(sizeof(void *)); 1215 1214 unsigned int index = hash & ((1 << tbl->nest) - 1);
+1 -1
net/caif/cfdbgl.c
··· 26 26 cfsrvl_init(dbg, channel_id, dev_info, false); 27 27 dbg->layer.receive = cfdbgl_receive; 28 28 dbg->layer.transmit = cfdbgl_transmit; 29 - snprintf(dbg->layer.name, CAIF_LAYER_NAME_SZ - 1, "dbg%d", channel_id); 29 + snprintf(dbg->layer.name, CAIF_LAYER_NAME_SZ, "dbg%d", channel_id); 30 30 return &dbg->layer; 31 31 } 32 32
+1 -2
net/caif/cfdgml.c
··· 33 33 cfsrvl_init(dgm, channel_id, dev_info, true); 34 34 dgm->layer.receive = cfdgml_receive; 35 35 dgm->layer.transmit = cfdgml_transmit; 36 - snprintf(dgm->layer.name, CAIF_LAYER_NAME_SZ - 1, "dgm%d", channel_id); 37 - dgm->layer.name[CAIF_LAYER_NAME_SZ - 1] = '\0'; 36 + snprintf(dgm->layer.name, CAIF_LAYER_NAME_SZ, "dgm%d", channel_id); 38 37 return &dgm->layer; 39 38 } 40 39
+1 -1
net/caif/cfutill.c
··· 33 33 cfsrvl_init(util, channel_id, dev_info, true); 34 34 util->layer.receive = cfutill_receive; 35 35 util->layer.transmit = cfutill_transmit; 36 - snprintf(util->layer.name, CAIF_LAYER_NAME_SZ - 1, "util1"); 36 + snprintf(util->layer.name, CAIF_LAYER_NAME_SZ, "util1"); 37 37 return &util->layer; 38 38 } 39 39
+1 -1
net/caif/cfveil.c
··· 32 32 cfsrvl_init(vei, channel_id, dev_info, true); 33 33 vei->layer.receive = cfvei_receive; 34 34 vei->layer.transmit = cfvei_transmit; 35 - snprintf(vei->layer.name, CAIF_LAYER_NAME_SZ - 1, "vei%d", channel_id); 35 + snprintf(vei->layer.name, CAIF_LAYER_NAME_SZ, "vei%d", channel_id); 36 36 return &vei->layer; 37 37 } 38 38
+1 -1
net/caif/cfvidl.c
··· 29 29 cfsrvl_init(vid, channel_id, dev_info, false); 30 30 vid->layer.receive = cfvidl_receive; 31 31 vid->layer.transmit = cfvidl_transmit; 32 - snprintf(vid->layer.name, CAIF_LAYER_NAME_SZ - 1, "vid1"); 32 + snprintf(vid->layer.name, CAIF_LAYER_NAME_SZ, "vid1"); 33 33 return &vid->layer; 34 34 } 35 35
+1 -1
net/core/dev.c
··· 8927 8927 8928 8928 refcnt = netdev_refcnt_read(dev); 8929 8929 8930 - if (time_after(jiffies, warning_time + 10 * HZ)) { 8930 + if (refcnt && time_after(jiffies, warning_time + 10 * HZ)) { 8931 8931 pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n", 8932 8932 dev->name, refcnt); 8933 8933 warning_time = jiffies;
+7
net/core/flow_offload.c
··· 54 54 } 55 55 EXPORT_SYMBOL(flow_rule_match_vlan); 56 56 57 + void flow_rule_match_cvlan(const struct flow_rule *rule, 58 + struct flow_match_vlan *out) 59 + { 60 + FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_CVLAN, out); 61 + } 62 + EXPORT_SYMBOL(flow_rule_match_cvlan); 63 + 57 64 void flow_rule_match_ipv4_addrs(const struct flow_rule *rule, 58 65 struct flow_match_ipv4_addrs *out) 59 66 {
+10 -6
net/core/rtnetlink.c
··· 1496 1496 return ret; 1497 1497 } 1498 1498 1499 - static int nla_put_iflink(struct sk_buff *skb, const struct net_device *dev) 1499 + static int nla_put_iflink(struct sk_buff *skb, const struct net_device *dev, 1500 + bool force) 1500 1501 { 1501 1502 int ifindex = dev_get_iflink(dev); 1502 1503 1503 - if (dev->ifindex == ifindex) 1504 - return 0; 1504 + if (force || dev->ifindex != ifindex) 1505 + return nla_put_u32(skb, IFLA_LINK, ifindex); 1505 1506 1506 - return nla_put_u32(skb, IFLA_LINK, ifindex); 1507 + return 0; 1507 1508 } 1508 1509 1509 1510 static noinline_for_stack int nla_put_ifalias(struct sk_buff *skb, ··· 1521 1520 const struct net_device *dev, 1522 1521 struct net *src_net) 1523 1522 { 1523 + bool put_iflink = false; 1524 + 1524 1525 if (dev->rtnl_link_ops && dev->rtnl_link_ops->get_link_net) { 1525 1526 struct net *link_net = dev->rtnl_link_ops->get_link_net(dev); 1526 1527 ··· 1531 1528 1532 1529 if (nla_put_s32(skb, IFLA_LINK_NETNSID, id)) 1533 1530 return -EMSGSIZE; 1531 + 1532 + put_iflink = true; 1534 1533 } 1535 1534 } 1536 1535 1537 - return 0; 1536 + return nla_put_iflink(skb, dev, put_iflink); 1538 1537 } 1539 1538 1540 1539 static int rtnl_fill_link_af(struct sk_buff *skb, ··· 1622 1617 #ifdef CONFIG_RPS 1623 1618 nla_put_u32(skb, IFLA_NUM_RX_QUEUES, dev->num_rx_queues) || 1624 1619 #endif 1625 - nla_put_iflink(skb, dev) || 1626 1620 put_master_ifindex(skb, dev) || 1627 1621 nla_put_u8(skb, IFLA_CARRIER, netif_carrier_ok(dev)) || 1628 1622 (dev->qdisc &&
+5 -2
net/core/skmsg.c
··· 411 411 sk_mem_charge(sk, skb->len); 412 412 copied = skb->len; 413 413 msg->sg.start = 0; 414 + msg->sg.size = copied; 414 415 msg->sg.end = num_sge == MAX_MSG_FRAGS ? 0 : num_sge; 415 416 msg->skb = skb; 416 417 ··· 555 554 struct sk_psock *psock = container_of(gc, struct sk_psock, gc); 556 555 557 556 /* No sk_callback_lock since already detached. */ 558 - strp_stop(&psock->parser.strp); 559 - strp_done(&psock->parser.strp); 557 + 558 + /* Parser has been stopped */ 559 + if (psock->progs.skb_parser) 560 + strp_done(&psock->parser.strp); 560 561 561 562 cancel_work_sync(&psock->work); 562 563
+2 -4
net/ipv4/bpfilter/sockopt.c
··· 30 30 mutex_lock(&bpfilter_ops.lock); 31 31 if (!bpfilter_ops.sockopt) { 32 32 mutex_unlock(&bpfilter_ops.lock); 33 - err = request_module("bpfilter"); 33 + request_module("bpfilter"); 34 34 mutex_lock(&bpfilter_ops.lock); 35 35 36 - if (err) 37 - goto out; 38 36 if (!bpfilter_ops.sockopt) { 39 - err = -ECHILD; 37 + err = -ENOPROTOOPT; 40 38 goto out; 41 39 } 42 40 }
+1 -1
net/ipv4/ping.c
··· 1113 1113 __u16 srcp = ntohs(inet->inet_sport); 1114 1114 1115 1115 seq_printf(f, "%5d: %08X:%04X %08X:%04X" 1116 - " %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d", 1116 + " %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %u", 1117 1117 bucket, src, srcp, dest, destp, sp->sk_state, 1118 1118 sk_wmem_alloc_get(sp), 1119 1119 sk_rmem_alloc_get(sp),
+1 -1
net/ipv4/raw.c
··· 1076 1076 srcp = inet->inet_num; 1077 1077 1078 1078 seq_printf(seq, "%4d: %08X:%04X %08X:%04X" 1079 - " %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d\n", 1079 + " %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %u\n", 1080 1080 i, src, srcp, dest, destp, sp->sk_state, 1081 1081 sk_wmem_alloc_get(sp), 1082 1082 sk_rmem_alloc_get(sp),
+1 -1
net/ipv4/tcp.c
··· 855 855 856 856 if (likely(!size)) { 857 857 skb = sk->sk_tx_skb_cache; 858 - if (skb && !skb_cloned(skb)) { 858 + if (skb) { 859 859 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb)); 860 860 sk->sk_tx_skb_cache = NULL; 861 861 pskb_trim(skb, 0);
+4 -3
net/ipv4/tcp_bpf.c
··· 27 27 int flags, long timeo, int *err) 28 28 { 29 29 DEFINE_WAIT_FUNC(wait, woken_wake_function); 30 - int ret; 30 + int ret = 0; 31 + 32 + if (!timeo) 33 + return ret; 31 34 32 35 add_wait_queue(sk_sleep(sk), &wait); 33 36 sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); ··· 531 528 { 532 529 struct sk_psock_link *link; 533 530 534 - sk_psock_cork_free(psock); 535 - __sk_psock_purge_ingress_msg(psock); 536 531 while ((link = sk_psock_link_pop(psock))) { 537 532 sk_psock_unlink(sk, link); 538 533 sk_psock_free_link(link);
+3
net/ipv4/tcp_input.c
··· 6024 6024 static void tcp_rcv_synrecv_state_fastopen(struct sock *sk) 6025 6025 { 6026 6026 tcp_try_undo_loss(sk, false); 6027 + 6028 + /* Reset rtx states to prevent spurious retransmits_timed_out() */ 6029 + tcp_sk(sk)->retrans_stamp = 0; 6027 6030 inet_csk(sk)->icsk_retransmits = 0; 6028 6031 6029 6032 /* Once we leave TCP_SYN_RECV or TCP_FIN_WAIT_1,
+1 -1
net/ipv4/udp.c
··· 2883 2883 __u16 srcp = ntohs(inet->inet_sport); 2884 2884 2885 2885 seq_printf(f, "%5d: %08X:%04X %08X:%04X" 2886 - " %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d", 2886 + " %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %u", 2887 2887 bucket, src, srcp, dest, destp, sp->sk_state, 2888 2888 sk_wmem_alloc_get(sp), 2889 2889 udp_rqueue_get(sp),
+1 -1
net/ipv6/datagram.c
··· 1034 1034 src = &sp->sk_v6_rcv_saddr; 1035 1035 seq_printf(seq, 1036 1036 "%5d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X " 1037 - "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d\n", 1037 + "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %u\n", 1038 1038 bucket, 1039 1039 src->s6_addr32[0], src->s6_addr32[1], 1040 1040 src->s6_addr32[2], src->s6_addr32[3], srcp,
+9 -3
net/ipv6/ip6_fib.c
··· 904 904 { 905 905 int cpu; 906 906 907 + /* Make sure rt6_make_pcpu_route() wont add other percpu routes 908 + * while we are cleaning them here. 909 + */ 910 + f6i->fib6_destroying = 1; 911 + mb(); /* paired with the cmpxchg() in rt6_make_pcpu_route() */ 912 + 907 913 /* release the reference to this fib entry from 908 914 * all of its cached pcpu routes 909 915 */ ··· 933 927 { 934 928 struct fib6_table *table = rt->fib6_table; 935 929 930 + if (rt->rt6i_pcpu) 931 + fib6_drop_pcpu_from(rt, table); 932 + 936 933 if (refcount_read(&rt->fib6_ref) != 1) { 937 934 /* This route is used as dummy address holder in some split 938 935 * nodes. It is not leaked, but it still holds other resources, ··· 957 948 fn = rcu_dereference_protected(fn->parent, 958 949 lockdep_is_held(&table->tb6_lock)); 959 950 } 960 - 961 - if (rt->rt6i_pcpu) 962 - fib6_drop_pcpu_from(rt, table); 963 951 } 964 952 } 965 953
+34 -24
net/ipv6/route.c
··· 111 111 int iif, int type, u32 portid, u32 seq, 112 112 unsigned int flags); 113 113 static struct rt6_info *rt6_find_cached_rt(const struct fib6_result *res, 114 - struct in6_addr *daddr, 115 - struct in6_addr *saddr); 114 + const struct in6_addr *daddr, 115 + const struct in6_addr *saddr); 116 116 117 117 #ifdef CONFIG_IPV6_ROUTE_INFO 118 118 static struct fib6_info *rt6_add_route_info(struct net *net, ··· 1295 1295 prev = cmpxchg(p, NULL, pcpu_rt); 1296 1296 BUG_ON(prev); 1297 1297 1298 + if (res->f6i->fib6_destroying) { 1299 + struct fib6_info *from; 1300 + 1301 + from = xchg((__force struct fib6_info **)&pcpu_rt->from, NULL); 1302 + fib6_info_release(from); 1303 + } 1304 + 1298 1305 return pcpu_rt; 1299 1306 } 1300 1307 ··· 1573 1566 * Caller has to hold rcu_read_lock() 1574 1567 */ 1575 1568 static struct rt6_info *rt6_find_cached_rt(const struct fib6_result *res, 1576 - struct in6_addr *daddr, 1577 - struct in6_addr *saddr) 1569 + const struct in6_addr *daddr, 1570 + const struct in6_addr *saddr) 1578 1571 { 1572 + const struct in6_addr *src_key = NULL; 1579 1573 struct rt6_exception_bucket *bucket; 1580 - struct in6_addr *src_key = NULL; 1581 1574 struct rt6_exception *rt6_ex; 1582 1575 struct rt6_info *ret = NULL; 1583 - 1584 - bucket = rcu_dereference(res->f6i->rt6i_exception_bucket); 1585 1576 1586 1577 #ifdef CONFIG_IPV6_SUBTREES 1587 1578 /* fib6i_src.plen != 0 indicates f6i is in subtree 1588 1579 * and exception table is indexed by a hash of 1589 1580 * both fib6_dst and fib6_src. 1590 - * Otherwise, the exception table is indexed by 1591 - * a hash of only fib6_dst. 1581 + * However, the src addr used to create the hash 1582 + * might not be exactly the passed in saddr which 1583 + * is a /128 addr from the flow. 1584 + * So we need to use f6i->fib6_src to redo lookup 1585 + * if the passed in saddr does not find anything. 1586 + * (See the logic in ip6_rt_cache_alloc() on how 1587 + * rt->rt6i_src is updated.) 1592 1588 */ 1593 1589 if (res->f6i->fib6_src.plen) 1594 1590 src_key = saddr; 1591 + find_ex: 1595 1592 #endif 1593 + bucket = rcu_dereference(res->f6i->rt6i_exception_bucket); 1596 1594 rt6_ex = __rt6_find_exception_rcu(&bucket, daddr, src_key); 1597 1595 1598 1596 if (rt6_ex && !rt6_check_expired(rt6_ex->rt6i)) 1599 1597 ret = rt6_ex->rt6i; 1598 + 1599 + #ifdef CONFIG_IPV6_SUBTREES 1600 + /* Use fib6_src as src_key and redo lookup */ 1601 + if (!ret && src_key && src_key != &res->f6i->fib6_src.addr) { 1602 + src_key = &res->f6i->fib6_src.addr; 1603 + goto find_ex; 1604 + } 1605 + #endif 1600 1606 1601 1607 return ret; 1602 1608 } ··· 2685 2665 const struct in6_addr *daddr, 2686 2666 const struct in6_addr *saddr) 2687 2667 { 2688 - struct rt6_exception_bucket *bucket; 2689 2668 const struct fib6_nh *nh = res->nh; 2690 2669 struct fib6_info *f6i = res->f6i; 2691 - const struct in6_addr *src_key; 2692 - struct rt6_exception *rt6_ex; 2693 2670 struct inet6_dev *idev; 2671 + struct rt6_info *rt; 2694 2672 u32 mtu = 0; 2695 2673 2696 2674 if (unlikely(fib6_metric_locked(f6i, RTAX_MTU))) { ··· 2697 2679 goto out; 2698 2680 } 2699 2681 2700 - src_key = NULL; 2701 - #ifdef CONFIG_IPV6_SUBTREES 2702 - if (f6i->fib6_src.plen) 2703 - src_key = saddr; 2704 - #endif 2705 - 2706 - bucket = rcu_dereference(f6i->rt6i_exception_bucket); 2707 - rt6_ex = __rt6_find_exception_rcu(&bucket, daddr, src_key); 2708 - if (rt6_ex && !rt6_check_expired(rt6_ex->rt6i)) 2709 - mtu = dst_metric_raw(&rt6_ex->rt6i->dst, RTAX_MTU); 2710 - 2711 - if (likely(!mtu)) { 2682 + rt = rt6_find_cached_rt(res, daddr, saddr); 2683 + if (unlikely(rt)) { 2684 + mtu = dst_metric_raw(&rt->dst, RTAX_MTU); 2685 + } else { 2712 2686 struct net_device *dev = nh->fib_nh_dev; 2713 2687 2714 2688 mtu = IPV6_MIN_MTU;
+1 -1
net/netlink/af_netlink.c
··· 2642 2642 struct sock *s = v; 2643 2643 struct netlink_sock *nlk = nlk_sk(s); 2644 2644 2645 - seq_printf(seq, "%pK %-3d %-10u %08x %-8d %-8d %-5d %-8d %-8d %-8lu\n", 2645 + seq_printf(seq, "%pK %-3d %-10u %08x %-8d %-8d %-5d %-8d %-8u %-8lu\n", 2646 2646 s, 2647 2647 s->sk_protocol, 2648 2648 nlk->portid,
+1 -1
net/phonet/socket.c
··· 607 607 struct pn_sock *pn = pn_sk(sk); 608 608 609 609 seq_printf(seq, "%2d %04X:%04X:%02X %02X %08X:%08X %5d %lu " 610 - "%d %pK %d", 610 + "%d %pK %u", 611 611 sk->sk_protocol, pn->sobject, pn->dobject, 612 612 pn->resource, sk->sk_state, 613 613 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
+17 -17
net/socket.c
··· 645 645 } 646 646 EXPORT_SYMBOL(__sock_tx_timestamp); 647 647 648 - /** 649 - * sock_sendmsg - send a message through @sock 650 - * @sock: socket 651 - * @msg: message to send 652 - * 653 - * Sends @msg through @sock, passing through LSM. 654 - * Returns the number of bytes sent, or an error code. 655 - */ 656 648 INDIRECT_CALLABLE_DECLARE(int inet_sendmsg(struct socket *, struct msghdr *, 657 649 size_t)); 658 650 static inline int sock_sendmsg_nosec(struct socket *sock, struct msghdr *msg) ··· 655 663 return ret; 656 664 } 657 665 666 + /** 667 + * sock_sendmsg - send a message through @sock 668 + * @sock: socket 669 + * @msg: message to send 670 + * 671 + * Sends @msg through @sock, passing through LSM. 672 + * Returns the number of bytes sent, or an error code. 673 + */ 658 674 int sock_sendmsg(struct socket *sock, struct msghdr *msg) 659 675 { 660 676 int err = security_socket_sendmsg(sock, msg, ··· 875 875 } 876 876 EXPORT_SYMBOL_GPL(__sock_recv_ts_and_drops); 877 877 878 - /** 879 - * sock_recvmsg - receive a message from @sock 880 - * @sock: socket 881 - * @msg: message to receive 882 - * @flags: message flags 883 - * 884 - * Receives @msg from @sock, passing through LSM. Returns the total number 885 - * of bytes received, or an error. 886 - */ 887 878 INDIRECT_CALLABLE_DECLARE(int inet_recvmsg(struct socket *, struct msghdr *, 888 879 size_t , int )); 889 880 static inline int sock_recvmsg_nosec(struct socket *sock, struct msghdr *msg, ··· 884 893 msg_data_left(msg), flags); 885 894 } 886 895 896 + /** 897 + * sock_recvmsg - receive a message from @sock 898 + * @sock: socket 899 + * @msg: message to receive 900 + * @flags: message flags 901 + * 902 + * Receives @msg from @sock, passing through LSM. Returns the total number 903 + * of bytes received, or an error. 904 + */ 887 905 int sock_recvmsg(struct socket *sock, struct msghdr *msg, int flags) 888 906 { 889 907 int err = security_socket_recvmsg(sock, msg, msg_data_left(msg), flags);
+7 -7
net/tipc/core.c
··· 131 131 if (err) 132 132 goto out_netlink_compat; 133 133 134 - err = tipc_socket_init(); 135 - if (err) 136 - goto out_socket; 137 - 138 134 err = tipc_register_sysctl(); 139 135 if (err) 140 136 goto out_sysctl; ··· 139 143 if (err) 140 144 goto out_pernet; 141 145 146 + err = tipc_socket_init(); 147 + if (err) 148 + goto out_socket; 149 + 142 150 err = tipc_bearer_setup(); 143 151 if (err) 144 152 goto out_bearer; ··· 150 150 pr_info("Started in single node mode\n"); 151 151 return 0; 152 152 out_bearer: 153 + tipc_socket_stop(); 154 + out_socket: 153 155 unregister_pernet_subsys(&tipc_net_ops); 154 156 out_pernet: 155 157 tipc_unregister_sysctl(); 156 158 out_sysctl: 157 - tipc_socket_stop(); 158 - out_socket: 159 159 tipc_netlink_compat_stop(); 160 160 out_netlink_compat: 161 161 tipc_netlink_stop(); ··· 167 167 static void __exit tipc_exit(void) 168 168 { 169 169 tipc_bearer_cleanup(); 170 + tipc_socket_stop(); 170 171 unregister_pernet_subsys(&tipc_net_ops); 171 172 tipc_netlink_stop(); 172 173 tipc_netlink_compat_stop(); 173 - tipc_socket_stop(); 174 174 tipc_unregister_sysctl(); 175 175 176 176 pr_info("Deactivated\n");
+80 -34
net/vmw_vsock/hyperv_transport.c
··· 35 35 /* The MTU is 16KB per the host side's design */ 36 36 #define HVS_MTU_SIZE (1024 * 16) 37 37 38 + /* How long to wait for graceful shutdown of a connection */ 39 + #define HVS_CLOSE_TIMEOUT (8 * HZ) 40 + 38 41 struct vmpipe_proto_header { 39 42 u32 pkt_type; 40 43 u32 data_size; ··· 308 305 sk->sk_write_space(sk); 309 306 } 310 307 308 + static void hvs_do_close_lock_held(struct vsock_sock *vsk, 309 + bool cancel_timeout) 310 + { 311 + struct sock *sk = sk_vsock(vsk); 312 + 313 + sock_set_flag(sk, SOCK_DONE); 314 + vsk->peer_shutdown = SHUTDOWN_MASK; 315 + if (vsock_stream_has_data(vsk) <= 0) 316 + sk->sk_state = TCP_CLOSING; 317 + sk->sk_state_change(sk); 318 + if (vsk->close_work_scheduled && 319 + (!cancel_timeout || cancel_delayed_work(&vsk->close_work))) { 320 + vsk->close_work_scheduled = false; 321 + vsock_remove_sock(vsk); 322 + 323 + /* Release the reference taken while scheduling the timeout */ 324 + sock_put(sk); 325 + } 326 + } 327 + 311 328 static void hvs_close_connection(struct vmbus_channel *chan) 312 329 { 313 330 struct sock *sk = get_per_channel_state(chan); 314 - struct vsock_sock *vsk = vsock_sk(sk); 315 331 316 332 lock_sock(sk); 317 - 318 - sk->sk_state = TCP_CLOSE; 319 - sock_set_flag(sk, SOCK_DONE); 320 - vsk->peer_shutdown |= SEND_SHUTDOWN | RCV_SHUTDOWN; 321 - 322 - sk->sk_state_change(sk); 323 - 333 + hvs_do_close_lock_held(vsock_sk(sk), true); 324 334 release_sock(sk); 325 335 } 326 336 ··· 468 452 return vmbus_send_tl_connect_request(&h->vm_srv_id, &h->host_srv_id); 469 453 } 470 454 455 + static void hvs_shutdown_lock_held(struct hvsock *hvs, int mode) 456 + { 457 + struct vmpipe_proto_header hdr; 458 + 459 + if (hvs->fin_sent || !hvs->chan) 460 + return; 461 + 462 + /* It can't fail: see hvs_channel_writable_bytes(). */ 463 + (void)hvs_send_data(hvs->chan, (struct hvs_send_buf *)&hdr, 0); 464 + hvs->fin_sent = true; 465 + } 466 + 471 467 static int hvs_shutdown(struct vsock_sock *vsk, int mode) 472 468 { 473 469 struct sock *sk = sk_vsock(vsk); 474 - struct vmpipe_proto_header hdr; 475 - struct hvs_send_buf *send_buf; 476 - struct hvsock *hvs; 477 470 478 471 if (!(mode & SEND_SHUTDOWN)) 479 472 return 0; 480 473 481 474 lock_sock(sk); 482 - 483 - hvs = vsk->trans; 484 - if (hvs->fin_sent) 485 - goto out; 486 - 487 - send_buf = (struct hvs_send_buf *)&hdr; 488 - 489 - /* It can't fail: see hvs_channel_writable_bytes(). */ 490 - (void)hvs_send_data(hvs->chan, send_buf, 0); 491 - 492 - hvs->fin_sent = true; 493 - out: 475 + hvs_shutdown_lock_held(vsk->trans, mode); 494 476 release_sock(sk); 495 477 return 0; 478 + } 479 + 480 + static void hvs_close_timeout(struct work_struct *work) 481 + { 482 + struct vsock_sock *vsk = 483 + container_of(work, struct vsock_sock, close_work.work); 484 + struct sock *sk = sk_vsock(vsk); 485 + 486 + sock_hold(sk); 487 + lock_sock(sk); 488 + if (!sock_flag(sk, SOCK_DONE)) 489 + hvs_do_close_lock_held(vsk, false); 490 + 491 + vsk->close_work_scheduled = false; 492 + release_sock(sk); 493 + sock_put(sk); 494 + } 495 + 496 + /* Returns true, if it is safe to remove socket; false otherwise */ 497 + static bool hvs_close_lock_held(struct vsock_sock *vsk) 498 + { 499 + struct sock *sk = sk_vsock(vsk); 500 + 501 + if (!(sk->sk_state == TCP_ESTABLISHED || 502 + sk->sk_state == TCP_CLOSING)) 503 + return true; 504 + 505 + if ((sk->sk_shutdown & SHUTDOWN_MASK) != SHUTDOWN_MASK) 506 + hvs_shutdown_lock_held(vsk->trans, SHUTDOWN_MASK); 507 + 508 + if (sock_flag(sk, SOCK_DONE)) 509 + return true; 510 + 511 + /* This reference will be dropped by the delayed close routine */ 512 + sock_hold(sk); 513 + INIT_DELAYED_WORK(&vsk->close_work, hvs_close_timeout); 514 + vsk->close_work_scheduled = true; 515 + schedule_delayed_work(&vsk->close_work, HVS_CLOSE_TIMEOUT); 516 + return false; 496 517 } 497 518 498 519 static void hvs_release(struct vsock_sock *vsk) 499 520 { 500 521 struct sock *sk = sk_vsock(vsk); 501 - struct hvsock *hvs = vsk->trans; 502 - struct vmbus_channel *chan; 522 + bool remove_sock; 503 523 504 524 lock_sock(sk); 505 - 506 - sk->sk_state = TCP_CLOSING; 507 - vsock_remove_sock(vsk); 508 - 525 + remove_sock = hvs_close_lock_held(vsk); 509 526 release_sock(sk); 510 - 511 - chan = hvs->chan; 512 - if (chan) 513 - hvs_shutdown(vsk, RCV_SHUTDOWN | SEND_SHUTDOWN); 514 - 527 + if (remove_sock) 528 + vsock_remove_sock(vsk); 515 529 } 516 530 517 531 static void hvs_destruct(struct vsock_sock *vsk)
+6 -7
net/vmw_vsock/virtio_transport.c
··· 702 702 if (!virtio_vsock_workqueue) 703 703 return -ENOMEM; 704 704 705 - ret = register_virtio_driver(&virtio_vsock_driver); 705 + ret = vsock_core_init(&virtio_transport.transport); 706 706 if (ret) 707 707 goto out_wq; 708 708 709 - ret = vsock_core_init(&virtio_transport.transport); 709 + ret = register_virtio_driver(&virtio_vsock_driver); 710 710 if (ret) 711 - goto out_vdr; 711 + goto out_vci; 712 712 713 713 return 0; 714 714 715 - out_vdr: 716 - unregister_virtio_driver(&virtio_vsock_driver); 715 + out_vci: 716 + vsock_core_exit(); 717 717 out_wq: 718 718 destroy_workqueue(virtio_vsock_workqueue); 719 719 return ret; 720 - 721 720 } 722 721 723 722 static void __exit virtio_vsock_exit(void) 724 723 { 725 - vsock_core_exit(); 726 724 unregister_virtio_driver(&virtio_vsock_driver); 725 + vsock_core_exit(); 727 726 destroy_workqueue(virtio_vsock_workqueue); 728 727 } 729 728
+7
net/vmw_vsock/virtio_transport_common.c
··· 786 786 787 787 void virtio_transport_release(struct vsock_sock *vsk) 788 788 { 789 + struct virtio_vsock_sock *vvs = vsk->trans; 790 + struct virtio_vsock_pkt *pkt, *tmp; 789 791 struct sock *sk = &vsk->sk; 790 792 bool remove_sock = true; 791 793 792 794 lock_sock(sk); 793 795 if (sk->sk_type == SOCK_STREAM) 794 796 remove_sock = virtio_transport_close(vsk); 797 + 798 + list_for_each_entry_safe(pkt, tmp, &vvs->rx_queue, list) { 799 + list_del(&pkt->list); 800 + virtio_transport_free_pkt(pkt); 801 + } 795 802 release_sock(sk); 796 803 797 804 if (remove_sock)
+13 -11
net/xfrm/xfrm_policy.c
··· 3264 3264 decode_session4(struct sk_buff *skb, struct flowi *fl, bool reverse) 3265 3265 { 3266 3266 const struct iphdr *iph = ip_hdr(skb); 3267 - u8 *xprth = skb_network_header(skb) + iph->ihl * 4; 3267 + int ihl = iph->ihl; 3268 + u8 *xprth = skb_network_header(skb) + ihl * 4; 3268 3269 struct flowi4 *fl4 = &fl->u.ip4; 3269 3270 int oif = 0; 3270 3271 ··· 3275 3274 memset(fl4, 0, sizeof(struct flowi4)); 3276 3275 fl4->flowi4_mark = skb->mark; 3277 3276 fl4->flowi4_oif = reverse ? skb->skb_iif : oif; 3277 + 3278 + fl4->flowi4_proto = iph->protocol; 3279 + fl4->daddr = reverse ? iph->saddr : iph->daddr; 3280 + fl4->saddr = reverse ? iph->daddr : iph->saddr; 3281 + fl4->flowi4_tos = iph->tos; 3278 3282 3279 3283 if (!ip_is_fragment(iph)) { 3280 3284 switch (iph->protocol) { ··· 3292 3286 pskb_may_pull(skb, xprth + 4 - skb->data)) { 3293 3287 __be16 *ports; 3294 3288 3295 - xprth = skb_network_header(skb) + iph->ihl * 4; 3289 + xprth = skb_network_header(skb) + ihl * 4; 3296 3290 ports = (__be16 *)xprth; 3297 3291 3298 3292 fl4->fl4_sport = ports[!!reverse]; ··· 3304 3298 pskb_may_pull(skb, xprth + 2 - skb->data)) { 3305 3299 u8 *icmp; 3306 3300 3307 - xprth = skb_network_header(skb) + iph->ihl * 4; 3301 + xprth = skb_network_header(skb) + ihl * 4; 3308 3302 icmp = xprth; 3309 3303 3310 3304 fl4->fl4_icmp_type = icmp[0]; ··· 3316 3310 pskb_may_pull(skb, xprth + 4 - skb->data)) { 3317 3311 __be32 *ehdr; 3318 3312 3319 - xprth = skb_network_header(skb) + iph->ihl * 4; 3313 + xprth = skb_network_header(skb) + ihl * 4; 3320 3314 ehdr = (__be32 *)xprth; 3321 3315 3322 3316 fl4->fl4_ipsec_spi = ehdr[0]; ··· 3327 3321 pskb_may_pull(skb, xprth + 8 - skb->data)) { 3328 3322 __be32 *ah_hdr; 3329 3323 3330 - xprth = skb_network_header(skb) + iph->ihl * 4; 3324 + xprth = skb_network_header(skb) + ihl * 4; 3331 3325 ah_hdr = (__be32 *)xprth; 3332 3326 3333 3327 fl4->fl4_ipsec_spi = ah_hdr[1]; ··· 3338 3332 pskb_may_pull(skb, xprth + 4 - skb->data)) { 3339 3333 __be16 *ipcomp_hdr; 3340 3334 3341 - xprth = skb_network_header(skb) + iph->ihl * 4; 3335 + xprth = skb_network_header(skb) + ihl * 4; 3342 3336 ipcomp_hdr = (__be16 *)xprth; 3343 3337 3344 3338 fl4->fl4_ipsec_spi = htonl(ntohs(ipcomp_hdr[1])); ··· 3350 3344 __be16 *greflags; 3351 3345 __be32 *gre_hdr; 3352 3346 3353 - xprth = skb_network_header(skb) + iph->ihl * 4; 3347 + xprth = skb_network_header(skb) + ihl * 4; 3354 3348 greflags = (__be16 *)xprth; 3355 3349 gre_hdr = (__be32 *)xprth; 3356 3350 ··· 3366 3360 break; 3367 3361 } 3368 3362 } 3369 - fl4->flowi4_proto = iph->protocol; 3370 - fl4->daddr = reverse ? iph->saddr : iph->daddr; 3371 - fl4->saddr = reverse ? iph->daddr : iph->saddr; 3372 - fl4->flowi4_tos = iph->tos; 3373 3363 } 3374 3364 3375 3365 #if IS_ENABLED(CONFIG_IPV6)
-5
scripts/Kbuild.include
··· 138 138 # Usage: EXTRA_CFLAGS += $(call cc-ifversion, -lt, 0402, -O1) 139 139 cc-ifversion = $(shell [ $(CONFIG_GCC_VERSION)0 $(1) $(2)000 ] && echo $(3) || echo $(4)) 140 140 141 - # cc-ldoption 142 - # Usage: ldflags += $(call cc-ldoption, -Wl$(comma)--hash-style=both) 143 - cc-ldoption = $(call try-run,\ 144 - $(CC) $(1) $(KBUILD_CPPFLAGS) $(CC_OPTION_CFLAGS) -nostdlib -x c /dev/null -o "$$TMP",$(1),$(2)) 145 - 146 141 # ld-option 147 142 # Usage: KBUILD_LDFLAGS += $(call ld-option, -X, -Y) 148 143 ld-option = $(call try-run, $(LD) $(KBUILD_LDFLAGS) $(1) -v,$(1),$(2),$(3))
+3 -3
scripts/modules-check.sh
··· 6 6 # Check uniqueness of module names 7 7 check_same_name_modules() 8 8 { 9 - for m in $(sed 's:.*/::' modules.order modules.builtin | sort | uniq -d) 9 + for m in $(sed 's:.*/::' modules.order | sort | uniq -d) 10 10 do 11 - echo "warning: same basename if the following are built as modules:" >&2 12 - sed "/\/$m/!d;s:^kernel/: :" modules.order modules.builtin >&2 11 + echo "warning: same module names found:" >&2 12 + sed -n "/\/$m/s:^kernel/: :p" modules.order >&2 13 13 done 14 14 } 15 15
+43
tools/arch/arm64/include/uapi/asm/kvm.h
··· 35 35 #include <linux/psci.h> 36 36 #include <linux/types.h> 37 37 #include <asm/ptrace.h> 38 + #include <asm/sve_context.h> 38 39 39 40 #define __KVM_HAVE_GUEST_DEBUG 40 41 #define __KVM_HAVE_IRQ_LINE ··· 103 102 #define KVM_ARM_VCPU_EL1_32BIT 1 /* CPU running a 32bit VM */ 104 103 #define KVM_ARM_VCPU_PSCI_0_2 2 /* CPU uses PSCI v0.2 */ 105 104 #define KVM_ARM_VCPU_PMU_V3 3 /* Support guest PMUv3 */ 105 + #define KVM_ARM_VCPU_SVE 4 /* enable SVE for this CPU */ 106 + #define KVM_ARM_VCPU_PTRAUTH_ADDRESS 5 /* VCPU uses address authentication */ 107 + #define KVM_ARM_VCPU_PTRAUTH_GENERIC 6 /* VCPU uses generic authentication */ 106 108 107 109 struct kvm_vcpu_init { 108 110 __u32 target; ··· 229 225 #define KVM_REG_ARM_FW_REG(r) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \ 230 226 KVM_REG_ARM_FW | ((r) & 0xffff)) 231 227 #define KVM_REG_ARM_PSCI_VERSION KVM_REG_ARM_FW_REG(0) 228 + 229 + /* SVE registers */ 230 + #define KVM_REG_ARM64_SVE (0x15 << KVM_REG_ARM_COPROC_SHIFT) 231 + 232 + /* Z- and P-regs occupy blocks at the following offsets within this range: */ 233 + #define KVM_REG_ARM64_SVE_ZREG_BASE 0 234 + #define KVM_REG_ARM64_SVE_PREG_BASE 0x400 235 + #define KVM_REG_ARM64_SVE_FFR_BASE 0x600 236 + 237 + #define KVM_ARM64_SVE_NUM_ZREGS __SVE_NUM_ZREGS 238 + #define KVM_ARM64_SVE_NUM_PREGS __SVE_NUM_PREGS 239 + 240 + #define KVM_ARM64_SVE_MAX_SLICES 32 241 + 242 + #define KVM_REG_ARM64_SVE_ZREG(n, i) \ 243 + (KVM_REG_ARM64 | KVM_REG_ARM64_SVE | KVM_REG_ARM64_SVE_ZREG_BASE | \ 244 + KVM_REG_SIZE_U2048 | \ 245 + (((n) & (KVM_ARM64_SVE_NUM_ZREGS - 1)) << 5) | \ 246 + ((i) & (KVM_ARM64_SVE_MAX_SLICES - 1))) 247 + 248 + #define KVM_REG_ARM64_SVE_PREG(n, i) \ 249 + (KVM_REG_ARM64 | KVM_REG_ARM64_SVE | KVM_REG_ARM64_SVE_PREG_BASE | \ 250 + KVM_REG_SIZE_U256 | \ 251 + (((n) & (KVM_ARM64_SVE_NUM_PREGS - 1)) << 5) | \ 252 + ((i) & (KVM_ARM64_SVE_MAX_SLICES - 1))) 253 + 254 + #define KVM_REG_ARM64_SVE_FFR(i) \ 255 + (KVM_REG_ARM64 | KVM_REG_ARM64_SVE | KVM_REG_ARM64_SVE_FFR_BASE | \ 256 + KVM_REG_SIZE_U256 | \ 257 + ((i) & (KVM_ARM64_SVE_MAX_SLICES - 1))) 258 + 259 + #define KVM_ARM64_SVE_VQ_MIN __SVE_VQ_MIN 260 + #define KVM_ARM64_SVE_VQ_MAX __SVE_VQ_MAX 261 + 262 + /* Vector lengths pseudo-register: */ 263 + #define KVM_REG_ARM64_SVE_VLS (KVM_REG_ARM64 | KVM_REG_ARM64_SVE | \ 264 + KVM_REG_SIZE_U512 | 0xffff) 265 + #define KVM_ARM64_SVE_VLS_WORDS \ 266 + ((KVM_ARM64_SVE_VQ_MAX - KVM_ARM64_SVE_VQ_MIN) / 64 + 1) 232 267 233 268 /* Device Control API: ARM VGIC */ 234 269 #define KVM_DEV_ARM_VGIC_GRP_ADDR 0
+46
tools/arch/powerpc/include/uapi/asm/kvm.h
··· 482 482 #define KVM_REG_PPC_ICP_PPRI_SHIFT 16 /* pending irq priority */ 483 483 #define KVM_REG_PPC_ICP_PPRI_MASK 0xff 484 484 485 + #define KVM_REG_PPC_VP_STATE (KVM_REG_PPC | KVM_REG_SIZE_U128 | 0x8d) 486 + 485 487 /* Device control API: PPC-specific devices */ 486 488 #define KVM_DEV_MPIC_GRP_MISC 1 487 489 #define KVM_DEV_MPIC_BASE_ADDR 0 /* 64-bit */ ··· 678 676 #define KVM_XICS_PENDING (1ULL << 42) 679 677 #define KVM_XICS_PRESENTED (1ULL << 43) 680 678 #define KVM_XICS_QUEUED (1ULL << 44) 679 + 680 + /* POWER9 XIVE Native Interrupt Controller */ 681 + #define KVM_DEV_XIVE_GRP_CTRL 1 682 + #define KVM_DEV_XIVE_RESET 1 683 + #define KVM_DEV_XIVE_EQ_SYNC 2 684 + #define KVM_DEV_XIVE_GRP_SOURCE 2 /* 64-bit source identifier */ 685 + #define KVM_DEV_XIVE_GRP_SOURCE_CONFIG 3 /* 64-bit source identifier */ 686 + #define KVM_DEV_XIVE_GRP_EQ_CONFIG 4 /* 64-bit EQ identifier */ 687 + #define KVM_DEV_XIVE_GRP_SOURCE_SYNC 5 /* 64-bit source identifier */ 688 + 689 + /* Layout of 64-bit XIVE source attribute values */ 690 + #define KVM_XIVE_LEVEL_SENSITIVE (1ULL << 0) 691 + #define KVM_XIVE_LEVEL_ASSERTED (1ULL << 1) 692 + 693 + /* Layout of 64-bit XIVE source configuration attribute values */ 694 + #define KVM_XIVE_SOURCE_PRIORITY_SHIFT 0 695 + #define KVM_XIVE_SOURCE_PRIORITY_MASK 0x7 696 + #define KVM_XIVE_SOURCE_SERVER_SHIFT 3 697 + #define KVM_XIVE_SOURCE_SERVER_MASK 0xfffffff8ULL 698 + #define KVM_XIVE_SOURCE_MASKED_SHIFT 32 699 + #define KVM_XIVE_SOURCE_MASKED_MASK 0x100000000ULL 700 + #define KVM_XIVE_SOURCE_EISN_SHIFT 33 701 + #define KVM_XIVE_SOURCE_EISN_MASK 0xfffffffe00000000ULL 702 + 703 + /* Layout of 64-bit EQ identifier */ 704 + #define KVM_XIVE_EQ_PRIORITY_SHIFT 0 705 + #define KVM_XIVE_EQ_PRIORITY_MASK 0x7 706 + #define KVM_XIVE_EQ_SERVER_SHIFT 3 707 + #define KVM_XIVE_EQ_SERVER_MASK 0xfffffff8ULL 708 + 709 + /* Layout of EQ configuration values (64 bytes) */ 710 + struct kvm_ppc_xive_eq { 711 + __u32 flags; 712 + __u32 qshift; 713 + __u64 qaddr; 714 + __u32 qtoggle; 715 + __u32 qindex; 716 + __u8 pad[40]; 717 + }; 718 + 719 + #define KVM_XIVE_EQ_ALWAYS_NOTIFY 0x00000001 720 + 721 + #define KVM_XIVE_TIMA_PAGE_OFFSET 0 722 + #define KVM_XIVE_ESB_PAGE_OFFSET 4 681 723 682 724 #endif /* __LINUX_KVM_POWERPC_H */
+3 -1
tools/arch/s390/include/uapi/asm/kvm.h
··· 153 153 __u8 ppno[16]; /* with MSA5 */ 154 154 __u8 kma[16]; /* with MSA8 */ 155 155 __u8 kdsa[16]; /* with MSA9 */ 156 - __u8 reserved[1792]; 156 + __u8 sortl[32]; /* with STFLE.150 */ 157 + __u8 dfltcc[32]; /* with STFLE.151 */ 158 + __u8 reserved[1728]; 157 159 }; 158 160 159 161 /* kvm attributes for crypto */
+3
tools/arch/x86/include/asm/cpufeatures.h
··· 344 344 /* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */ 345 345 #define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */ 346 346 #define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */ 347 + #define X86_FEATURE_MD_CLEAR (18*32+10) /* VERW clears CPU buffers */ 347 348 #define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* "" TSX_FORCE_ABORT */ 348 349 #define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */ 349 350 #define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */ ··· 383 382 #define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */ 384 383 #define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */ 385 384 #define X86_BUG_L1TF X86_BUG(18) /* CPU is affected by L1 Terminal Fault */ 385 + #define X86_BUG_MDS X86_BUG(19) /* CPU is affected by Microarchitectural data sampling */ 386 + #define X86_BUG_MSBDS_ONLY X86_BUG(20) /* CPU is only affected by the MSDBS variant of BUG_MDS */ 386 387 387 388 #endif /* _ASM_X86_CPUFEATURES_H */
+2 -2
tools/bpf/bpftool/btf.c
··· 208 208 break; 209 209 } 210 210 case BTF_KIND_FWD: { 211 - const char *fwd_kind = BTF_INFO_KIND(t->info) ? "union" 212 - : "struct"; 211 + const char *fwd_kind = BTF_INFO_KFLAG(t->info) ? "union" 212 + : "struct"; 213 213 214 214 if (json_output) 215 215 jsonw_string_field(w, "fwd_kind", fwd_kind);
+2 -2
tools/bpf/bpftool/prog.c
··· 879 879 } 880 880 } 881 881 882 + set_max_rlimit(); 883 + 882 884 obj = __bpf_object__open_xattr(&attr, bpf_flags); 883 885 if (IS_ERR_OR_NULL(obj)) { 884 886 p_err("failed to open object file"); ··· 959 957 p_err("map idx '%d' not used", map_replace[j].idx); 960 958 goto err_close_obj; 961 959 } 962 - 963 - set_max_rlimit(); 964 960 965 961 err = bpf_object__load(obj); 966 962 if (err) {
+13 -1
tools/include/uapi/asm-generic/unistd.h
··· 832 832 __SYSCALL(__NR_io_uring_enter, sys_io_uring_enter) 833 833 #define __NR_io_uring_register 427 834 834 __SYSCALL(__NR_io_uring_register, sys_io_uring_register) 835 + #define __NR_open_tree 428 836 + __SYSCALL(__NR_open_tree, sys_open_tree) 837 + #define __NR_move_mount 429 838 + __SYSCALL(__NR_move_mount, sys_move_mount) 839 + #define __NR_fsopen 430 840 + __SYSCALL(__NR_fsopen, sys_fsopen) 841 + #define __NR_fsconfig 431 842 + __SYSCALL(__NR_fsconfig, sys_fsconfig) 843 + #define __NR_fsmount 432 844 + __SYSCALL(__NR_fsmount, sys_fsmount) 845 + #define __NR_fspick 433 846 + __SYSCALL(__NR_fspick, sys_fspick) 835 847 836 848 #undef __NR_syscalls 837 - #define __NR_syscalls 428 849 + #define __NR_syscalls 434 838 850 839 851 /* 840 852 * 32 bit systems traditionally used different
+37
tools/include/uapi/drm/drm.h
··· 649 649 #define DRM_CAP_PAGE_FLIP_TARGET 0x11 650 650 #define DRM_CAP_CRTC_IN_VBLANK_EVENT 0x12 651 651 #define DRM_CAP_SYNCOBJ 0x13 652 + #define DRM_CAP_SYNCOBJ_TIMELINE 0x14 652 653 653 654 /** DRM_IOCTL_GET_CAP ioctl argument type */ 654 655 struct drm_get_cap { ··· 736 735 __u32 pad; 737 736 }; 738 737 738 + struct drm_syncobj_transfer { 739 + __u32 src_handle; 740 + __u32 dst_handle; 741 + __u64 src_point; 742 + __u64 dst_point; 743 + __u32 flags; 744 + __u32 pad; 745 + }; 746 + 739 747 #define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL (1 << 0) 740 748 #define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT (1 << 1) 749 + #define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE (1 << 2) /* wait for time point to become available */ 741 750 struct drm_syncobj_wait { 742 751 __u64 handles; 743 752 /* absolute timeout */ ··· 758 747 __u32 pad; 759 748 }; 760 749 750 + struct drm_syncobj_timeline_wait { 751 + __u64 handles; 752 + /* wait on specific timeline point for every handles*/ 753 + __u64 points; 754 + /* absolute timeout */ 755 + __s64 timeout_nsec; 756 + __u32 count_handles; 757 + __u32 flags; 758 + __u32 first_signaled; /* only valid when not waiting all */ 759 + __u32 pad; 760 + }; 761 + 762 + 761 763 struct drm_syncobj_array { 762 764 __u64 handles; 763 765 __u32 count_handles; 764 766 __u32 pad; 765 767 }; 768 + 769 + struct drm_syncobj_timeline_array { 770 + __u64 handles; 771 + __u64 points; 772 + __u32 count_handles; 773 + __u32 pad; 774 + }; 775 + 766 776 767 777 /* Query current scanout sequence number */ 768 778 struct drm_crtc_get_sequence { ··· 940 908 #define DRM_IOCTL_MODE_LIST_LESSEES DRM_IOWR(0xC7, struct drm_mode_list_lessees) 941 909 #define DRM_IOCTL_MODE_GET_LEASE DRM_IOWR(0xC8, struct drm_mode_get_lease) 942 910 #define DRM_IOCTL_MODE_REVOKE_LEASE DRM_IOWR(0xC9, struct drm_mode_revoke_lease) 911 + 912 + #define DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT DRM_IOWR(0xCA, struct drm_syncobj_timeline_wait) 913 + #define DRM_IOCTL_SYNCOBJ_QUERY DRM_IOWR(0xCB, struct drm_syncobj_timeline_array) 914 + #define DRM_IOCTL_SYNCOBJ_TRANSFER DRM_IOWR(0xCC, struct drm_syncobj_transfer) 915 + #define DRM_IOCTL_SYNCOBJ_TIMELINE_SIGNAL DRM_IOWR(0xCD, struct drm_syncobj_timeline_array) 943 916 944 917 /** 945 918 * Device specific ioctls should only be in their respective headers
+206 -98
tools/include/uapi/drm/i915_drm.h
··· 63 63 #define I915_RESET_UEVENT "RESET" 64 64 65 65 /* 66 + * i915_user_extension: Base class for defining a chain of extensions 67 + * 68 + * Many interfaces need to grow over time. In most cases we can simply 69 + * extend the struct and have userspace pass in more data. Another option, 70 + * as demonstrated by Vulkan's approach to providing extensions for forward 71 + * and backward compatibility, is to use a list of optional structs to 72 + * provide those extra details. 73 + * 74 + * The key advantage to using an extension chain is that it allows us to 75 + * redefine the interface more easily than an ever growing struct of 76 + * increasing complexity, and for large parts of that interface to be 77 + * entirely optional. The downside is more pointer chasing; chasing across 78 + * the __user boundary with pointers encapsulated inside u64. 79 + */ 80 + struct i915_user_extension { 81 + __u64 next_extension; 82 + __u32 name; 83 + __u32 flags; /* All undefined bits must be zero. */ 84 + __u32 rsvd[4]; /* Reserved for future use; must be zero. */ 85 + }; 86 + 87 + /* 66 88 * MOCS indexes used for GPU surfaces, defining the cacheability of the 67 89 * surface data and the coherency for this data wrt. CPU vs. GPU accesses. 68 90 */ ··· 121 99 I915_ENGINE_CLASS_VIDEO = 2, 122 100 I915_ENGINE_CLASS_VIDEO_ENHANCE = 3, 123 101 102 + /* should be kept compact */ 103 + 124 104 I915_ENGINE_CLASS_INVALID = -1 105 + }; 106 + 107 + /* 108 + * There may be more than one engine fulfilling any role within the system. 109 + * Each engine of a class is given a unique instance number and therefore 110 + * any engine can be specified by its class:instance tuplet. APIs that allow 111 + * access to any engine in the system will use struct i915_engine_class_instance 112 + * for this identification. 113 + */ 114 + struct i915_engine_class_instance { 115 + __u16 engine_class; /* see enum drm_i915_gem_engine_class */ 116 + __u16 engine_instance; 125 117 }; 126 118 127 119 /** ··· 355 319 #define DRM_I915_PERF_ADD_CONFIG 0x37 356 320 #define DRM_I915_PERF_REMOVE_CONFIG 0x38 357 321 #define DRM_I915_QUERY 0x39 322 + /* Must be kept compact -- no holes */ 358 323 359 324 #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) 360 325 #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) ··· 404 367 #define DRM_IOCTL_I915_GET_SPRITE_COLORKEY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GET_SPRITE_COLORKEY, struct drm_intel_sprite_colorkey) 405 368 #define DRM_IOCTL_I915_GEM_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_WAIT, struct drm_i915_gem_wait) 406 369 #define DRM_IOCTL_I915_GEM_CONTEXT_CREATE DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create) 370 + #define DRM_IOCTL_I915_GEM_CONTEXT_CREATE_EXT DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_CREATE, struct drm_i915_gem_context_create_ext) 407 371 #define DRM_IOCTL_I915_GEM_CONTEXT_DESTROY DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_CONTEXT_DESTROY, struct drm_i915_gem_context_destroy) 408 372 #define DRM_IOCTL_I915_REG_READ DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_REG_READ, struct drm_i915_reg_read) 409 373 #define DRM_IOCTL_I915_GET_RESET_STATS DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GET_RESET_STATS, struct drm_i915_reset_stats) ··· 514 476 #define I915_SCHEDULER_CAP_ENABLED (1ul << 0) 515 477 #define I915_SCHEDULER_CAP_PRIORITY (1ul << 1) 516 478 #define I915_SCHEDULER_CAP_PREEMPTION (1ul << 2) 479 + #define I915_SCHEDULER_CAP_SEMAPHORES (1ul << 3) 517 480 518 481 #define I915_PARAM_HUC_STATUS 42 519 482 ··· 598 559 */ 599 560 #define I915_PARAM_MMAP_GTT_COHERENT 52 600 561 562 + /* Must be kept compact -- no holes and well documented */ 563 + 601 564 typedef struct drm_i915_getparam { 602 565 __s32 param; 603 566 /* ··· 615 574 #define I915_SETPARAM_TEX_LRU_LOG_GRANULARITY 2 616 575 #define I915_SETPARAM_ALLOW_BATCHBUFFER 3 617 576 #define I915_SETPARAM_NUM_USED_FENCES 4 577 + /* Must be kept compact -- no holes */ 618 578 619 579 typedef struct drm_i915_setparam { 620 580 int param; ··· 1014 972 * struct drm_i915_gem_exec_fence *fences. 1015 973 */ 1016 974 __u64 cliprects_ptr; 1017 - #define I915_EXEC_RING_MASK (7<<0) 975 + #define I915_EXEC_RING_MASK (0x3f) 1018 976 #define I915_EXEC_DEFAULT (0<<0) 1019 977 #define I915_EXEC_RENDER (1<<0) 1020 978 #define I915_EXEC_BSD (2<<0) ··· 1162 1120 * as busy may become idle before the ioctl is completed. 1163 1121 * 1164 1122 * Furthermore, if the object is busy, which engine is busy is only 1165 - * provided as a guide. There are race conditions which prevent the 1166 - * report of which engines are busy from being always accurate. 1167 - * However, the converse is not true. If the object is idle, the 1168 - * result of the ioctl, that all engines are idle, is accurate. 1123 + * provided as a guide and only indirectly by reporting its class 1124 + * (there may be more than one engine in each class). There are race 1125 + * conditions which prevent the report of which engines are busy from 1126 + * being always accurate. However, the converse is not true. If the 1127 + * object is idle, the result of the ioctl, that all engines are idle, 1128 + * is accurate. 1169 1129 * 1170 1130 * The returned dword is split into two fields to indicate both 1171 - * the engines on which the object is being read, and the 1172 - * engine on which it is currently being written (if any). 1131 + * the engine classess on which the object is being read, and the 1132 + * engine class on which it is currently being written (if any). 1173 1133 * 1174 1134 * The low word (bits 0:15) indicate if the object is being written 1175 1135 * to by any engine (there can only be one, as the GEM implicit 1176 1136 * synchronisation rules force writes to be serialised). Only the 1177 - * engine for the last write is reported. 1137 + * engine class (offset by 1, I915_ENGINE_CLASS_RENDER is reported as 1138 + * 1 not 0 etc) for the last write is reported. 1178 1139 * 1179 - * The high word (bits 16:31) are a bitmask of which engines are 1180 - * currently reading from the object. Multiple engines may be 1140 + * The high word (bits 16:31) are a bitmask of which engines classes 1141 + * are currently reading from the object. Multiple engines may be 1181 1142 * reading from the object simultaneously. 1182 1143 * 1183 - * The value of each engine is the same as specified in the 1184 - * EXECBUFFER2 ioctl, i.e. I915_EXEC_RENDER, I915_EXEC_BSD etc. 1185 - * Note I915_EXEC_DEFAULT is a symbolic value and is mapped to 1186 - * the I915_EXEC_RENDER engine for execution, and so it is never 1144 + * The value of each engine class is the same as specified in the 1145 + * I915_CONTEXT_SET_ENGINES parameter and via perf, i.e. 1146 + * I915_ENGINE_CLASS_RENDER, I915_ENGINE_CLASS_COPY, etc. 1187 1147 * reported as active itself. Some hardware may have parallel 1188 1148 * execution engines, e.g. multiple media engines, which are 1189 - * mapped to the same identifier in the EXECBUFFER2 ioctl and 1190 - * so are not separately reported for busyness. 1149 + * mapped to the same class identifier and so are not separately 1150 + * reported for busyness. 1191 1151 * 1192 1152 * Caveat emptor: 1193 1153 * Only the boolean result of this query is reliable; that is whether ··· 1456 1412 }; 1457 1413 1458 1414 struct drm_i915_gem_context_create { 1459 - /* output: id of new context*/ 1460 - __u32 ctx_id; 1415 + __u32 ctx_id; /* output: id of new context*/ 1461 1416 __u32 pad; 1417 + }; 1418 + 1419 + struct drm_i915_gem_context_create_ext { 1420 + __u32 ctx_id; /* output: id of new context*/ 1421 + __u32 flags; 1422 + #define I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS (1u << 0) 1423 + #define I915_CONTEXT_CREATE_FLAGS_UNKNOWN \ 1424 + (-(I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS << 1)) 1425 + __u64 extensions; 1426 + }; 1427 + 1428 + struct drm_i915_gem_context_param { 1429 + __u32 ctx_id; 1430 + __u32 size; 1431 + __u64 param; 1432 + #define I915_CONTEXT_PARAM_BAN_PERIOD 0x1 1433 + #define I915_CONTEXT_PARAM_NO_ZEROMAP 0x2 1434 + #define I915_CONTEXT_PARAM_GTT_SIZE 0x3 1435 + #define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE 0x4 1436 + #define I915_CONTEXT_PARAM_BANNABLE 0x5 1437 + #define I915_CONTEXT_PARAM_PRIORITY 0x6 1438 + #define I915_CONTEXT_MAX_USER_PRIORITY 1023 /* inclusive */ 1439 + #define I915_CONTEXT_DEFAULT_PRIORITY 0 1440 + #define I915_CONTEXT_MIN_USER_PRIORITY -1023 /* inclusive */ 1441 + /* 1442 + * When using the following param, value should be a pointer to 1443 + * drm_i915_gem_context_param_sseu. 1444 + */ 1445 + #define I915_CONTEXT_PARAM_SSEU 0x7 1446 + 1447 + /* 1448 + * Not all clients may want to attempt automatic recover of a context after 1449 + * a hang (for example, some clients may only submit very small incremental 1450 + * batches relying on known logical state of previous batches which will never 1451 + * recover correctly and each attempt will hang), and so would prefer that 1452 + * the context is forever banned instead. 1453 + * 1454 + * If set to false (0), after a reset, subsequent (and in flight) rendering 1455 + * from this context is discarded, and the client will need to create a new 1456 + * context to use instead. 1457 + * 1458 + * If set to true (1), the kernel will automatically attempt to recover the 1459 + * context by skipping the hanging batch and executing the next batch starting 1460 + * from the default context state (discarding the incomplete logical context 1461 + * state lost due to the reset). 1462 + * 1463 + * On creation, all new contexts are marked as recoverable. 1464 + */ 1465 + #define I915_CONTEXT_PARAM_RECOVERABLE 0x8 1466 + /* Must be kept compact -- no holes and well documented */ 1467 + 1468 + __u64 value; 1469 + }; 1470 + 1471 + /** 1472 + * Context SSEU programming 1473 + * 1474 + * It may be necessary for either functional or performance reason to configure 1475 + * a context to run with a reduced number of SSEU (where SSEU stands for Slice/ 1476 + * Sub-slice/EU). 1477 + * 1478 + * This is done by configuring SSEU configuration using the below 1479 + * @struct drm_i915_gem_context_param_sseu for every supported engine which 1480 + * userspace intends to use. 1481 + * 1482 + * Not all GPUs or engines support this functionality in which case an error 1483 + * code -ENODEV will be returned. 1484 + * 1485 + * Also, flexibility of possible SSEU configuration permutations varies between 1486 + * GPU generations and software imposed limitations. Requesting such a 1487 + * combination will return an error code of -EINVAL. 1488 + * 1489 + * NOTE: When perf/OA is active the context's SSEU configuration is ignored in 1490 + * favour of a single global setting. 1491 + */ 1492 + struct drm_i915_gem_context_param_sseu { 1493 + /* 1494 + * Engine class & instance to be configured or queried. 1495 + */ 1496 + struct i915_engine_class_instance engine; 1497 + 1498 + /* 1499 + * Unused for now. Must be cleared to zero. 1500 + */ 1501 + __u32 flags; 1502 + 1503 + /* 1504 + * Mask of slices to enable for the context. Valid values are a subset 1505 + * of the bitmask value returned for I915_PARAM_SLICE_MASK. 1506 + */ 1507 + __u64 slice_mask; 1508 + 1509 + /* 1510 + * Mask of subslices to enable for the context. Valid values are a 1511 + * subset of the bitmask value return by I915_PARAM_SUBSLICE_MASK. 1512 + */ 1513 + __u64 subslice_mask; 1514 + 1515 + /* 1516 + * Minimum/Maximum number of EUs to enable per subslice for the 1517 + * context. min_eus_per_subslice must be inferior or equal to 1518 + * max_eus_per_subslice. 1519 + */ 1520 + __u16 min_eus_per_subslice; 1521 + __u16 max_eus_per_subslice; 1522 + 1523 + /* 1524 + * Unused for now. Must be cleared to zero. 1525 + */ 1526 + __u32 rsvd; 1527 + }; 1528 + 1529 + struct drm_i915_gem_context_create_ext_setparam { 1530 + #define I915_CONTEXT_CREATE_EXT_SETPARAM 0 1531 + struct i915_user_extension base; 1532 + struct drm_i915_gem_context_param param; 1462 1533 }; 1463 1534 1464 1535 struct drm_i915_gem_context_destroy { 1465 1536 __u32 ctx_id; 1466 1537 __u32 pad; 1538 + }; 1539 + 1540 + /* 1541 + * DRM_I915_GEM_VM_CREATE - 1542 + * 1543 + * Create a new virtual memory address space (ppGTT) for use within a context 1544 + * on the same file. Extensions can be provided to configure exactly how the 1545 + * address space is setup upon creation. 1546 + * 1547 + * The id of new VM (bound to the fd) for use with I915_CONTEXT_PARAM_VM is 1548 + * returned in the outparam @id. 1549 + * 1550 + * No flags are defined, with all bits reserved and must be zero. 1551 + * 1552 + * An extension chain maybe provided, starting with @extensions, and terminated 1553 + * by the @next_extension being 0. Currently, no extensions are defined. 1554 + * 1555 + * DRM_I915_GEM_VM_DESTROY - 1556 + * 1557 + * Destroys a previously created VM id, specified in @id. 1558 + * 1559 + * No extensions or flags are allowed currently, and so must be zero. 1560 + */ 1561 + struct drm_i915_gem_vm_control { 1562 + __u64 extensions; 1563 + __u32 flags; 1564 + __u32 vm_id; 1467 1565 }; 1468 1566 1469 1567 struct drm_i915_reg_read { ··· 1620 1434 1621 1435 __u64 val; /* Return value */ 1622 1436 }; 1437 + 1623 1438 /* Known registers: 1624 1439 * 1625 1440 * Render engine timestamp - 0x2358 + 64bit - gen7+ ··· 1658 1471 * Object handles are nonzero. 1659 1472 */ 1660 1473 __u32 handle; 1661 - }; 1662 - 1663 - struct drm_i915_gem_context_param { 1664 - __u32 ctx_id; 1665 - __u32 size; 1666 - __u64 param; 1667 - #define I915_CONTEXT_PARAM_BAN_PERIOD 0x1 1668 - #define I915_CONTEXT_PARAM_NO_ZEROMAP 0x2 1669 - #define I915_CONTEXT_PARAM_GTT_SIZE 0x3 1670 - #define I915_CONTEXT_PARAM_NO_ERROR_CAPTURE 0x4 1671 - #define I915_CONTEXT_PARAM_BANNABLE 0x5 1672 - #define I915_CONTEXT_PARAM_PRIORITY 0x6 1673 - #define I915_CONTEXT_MAX_USER_PRIORITY 1023 /* inclusive */ 1674 - #define I915_CONTEXT_DEFAULT_PRIORITY 0 1675 - #define I915_CONTEXT_MIN_USER_PRIORITY -1023 /* inclusive */ 1676 - /* 1677 - * When using the following param, value should be a pointer to 1678 - * drm_i915_gem_context_param_sseu. 1679 - */ 1680 - #define I915_CONTEXT_PARAM_SSEU 0x7 1681 - __u64 value; 1682 - }; 1683 - 1684 - /** 1685 - * Context SSEU programming 1686 - * 1687 - * It may be necessary for either functional or performance reason to configure 1688 - * a context to run with a reduced number of SSEU (where SSEU stands for Slice/ 1689 - * Sub-slice/EU). 1690 - * 1691 - * This is done by configuring SSEU configuration using the below 1692 - * @struct drm_i915_gem_context_param_sseu for every supported engine which 1693 - * userspace intends to use. 1694 - * 1695 - * Not all GPUs or engines support this functionality in which case an error 1696 - * code -ENODEV will be returned. 1697 - * 1698 - * Also, flexibility of possible SSEU configuration permutations varies between 1699 - * GPU generations and software imposed limitations. Requesting such a 1700 - * combination will return an error code of -EINVAL. 1701 - * 1702 - * NOTE: When perf/OA is active the context's SSEU configuration is ignored in 1703 - * favour of a single global setting. 1704 - */ 1705 - struct drm_i915_gem_context_param_sseu { 1706 - /* 1707 - * Engine class & instance to be configured or queried. 1708 - */ 1709 - __u16 engine_class; 1710 - __u16 engine_instance; 1711 - 1712 - /* 1713 - * Unused for now. Must be cleared to zero. 1714 - */ 1715 - __u32 flags; 1716 - 1717 - /* 1718 - * Mask of slices to enable for the context. Valid values are a subset 1719 - * of the bitmask value returned for I915_PARAM_SLICE_MASK. 1720 - */ 1721 - __u64 slice_mask; 1722 - 1723 - /* 1724 - * Mask of subslices to enable for the context. Valid values are a 1725 - * subset of the bitmask value return by I915_PARAM_SUBSLICE_MASK. 1726 - */ 1727 - __u64 subslice_mask; 1728 - 1729 - /* 1730 - * Minimum/Maximum number of EUs to enable per subslice for the 1731 - * context. min_eus_per_subslice must be inferior or equal to 1732 - * max_eus_per_subslice. 1733 - */ 1734 - __u16 min_eus_per_subslice; 1735 - __u16 max_eus_per_subslice; 1736 - 1737 - /* 1738 - * Unused for now. Must be cleared to zero. 1739 - */ 1740 - __u32 rsvd; 1741 1474 }; 1742 1475 1743 1476 enum drm_i915_oa_format { ··· 1821 1714 struct drm_i915_query_item { 1822 1715 __u64 query_id; 1823 1716 #define DRM_I915_QUERY_TOPOLOGY_INFO 1 1717 + /* Must be kept compact -- no holes and well documented */ 1824 1718 1825 1719 /* 1826 1720 * When set to zero by userspace, this is filled with the size of the
+1 -1
tools/include/uapi/linux/btf.h
··· 83 83 * is the 32 bits arrangement: 84 84 */ 85 85 #define BTF_INT_ENCODING(VAL) (((VAL) & 0x0f000000) >> 24) 86 - #define BTF_INT_OFFSET(VAL) (((VAL & 0x00ff0000)) >> 16) 86 + #define BTF_INT_OFFSET(VAL) (((VAL) & 0x00ff0000) >> 16) 87 87 #define BTF_INT_BITS(VAL) ((VAL) & 0x000000ff) 88 88 89 89 /* Attributes stored in the BTF_INT_ENCODING */
+2
tools/include/uapi/linux/fcntl.h
··· 91 91 #define AT_STATX_FORCE_SYNC 0x2000 /* - Force the attributes to be sync'd with the server */ 92 92 #define AT_STATX_DONT_SYNC 0x4000 /* - Don't sync attributes with the server */ 93 93 94 + #define AT_RECURSIVE 0x8000 /* Apply to the entire subtree */ 95 + 94 96 95 97 #endif /* _UAPI_LINUX_FCNTL_H */
+3
tools/include/uapi/linux/fs.h
··· 320 320 #define SYNC_FILE_RANGE_WAIT_BEFORE 1 321 321 #define SYNC_FILE_RANGE_WRITE 2 322 322 #define SYNC_FILE_RANGE_WAIT_AFTER 4 323 + #define SYNC_FILE_RANGE_WRITE_AND_WAIT (SYNC_FILE_RANGE_WRITE | \ 324 + SYNC_FILE_RANGE_WAIT_BEFORE | \ 325 + SYNC_FILE_RANGE_WAIT_AFTER) 323 326 324 327 /* 325 328 * Flags for preadv2/pwritev2:
+13 -2
tools/include/uapi/linux/kvm.h
··· 986 986 #define KVM_CAP_HYPERV_ENLIGHTENED_VMCS 163 987 987 #define KVM_CAP_EXCEPTION_PAYLOAD 164 988 988 #define KVM_CAP_ARM_VM_IPA_SIZE 165 989 - #define KVM_CAP_MANUAL_DIRTY_LOG_PROTECT 166 989 + #define KVM_CAP_MANUAL_DIRTY_LOG_PROTECT 166 /* Obsolete */ 990 990 #define KVM_CAP_HYPERV_CPUID 167 991 + #define KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 168 992 + #define KVM_CAP_PPC_IRQ_XIVE 169 993 + #define KVM_CAP_ARM_SVE 170 994 + #define KVM_CAP_ARM_PTRAUTH_ADDRESS 171 995 + #define KVM_CAP_ARM_PTRAUTH_GENERIC 172 991 996 992 997 #ifdef KVM_CAP_IRQ_ROUTING 993 998 ··· 1150 1145 #define KVM_REG_SIZE_U256 0x0050000000000000ULL 1151 1146 #define KVM_REG_SIZE_U512 0x0060000000000000ULL 1152 1147 #define KVM_REG_SIZE_U1024 0x0070000000000000ULL 1148 + #define KVM_REG_SIZE_U2048 0x0080000000000000ULL 1153 1149 1154 1150 struct kvm_reg_list { 1155 1151 __u64 n; /* number of regs */ ··· 1217 1211 #define KVM_DEV_TYPE_ARM_VGIC_V3 KVM_DEV_TYPE_ARM_VGIC_V3 1218 1212 KVM_DEV_TYPE_ARM_VGIC_ITS, 1219 1213 #define KVM_DEV_TYPE_ARM_VGIC_ITS KVM_DEV_TYPE_ARM_VGIC_ITS 1214 + KVM_DEV_TYPE_XIVE, 1215 + #define KVM_DEV_TYPE_XIVE KVM_DEV_TYPE_XIVE 1220 1216 KVM_DEV_TYPE_MAX, 1221 1217 }; 1222 1218 ··· 1442 1434 #define KVM_GET_NESTED_STATE _IOWR(KVMIO, 0xbe, struct kvm_nested_state) 1443 1435 #define KVM_SET_NESTED_STATE _IOW(KVMIO, 0xbf, struct kvm_nested_state) 1444 1436 1445 - /* Available with KVM_CAP_MANUAL_DIRTY_LOG_PROTECT */ 1437 + /* Available with KVM_CAP_MANUAL_DIRTY_LOG_PROTECT_2 */ 1446 1438 #define KVM_CLEAR_DIRTY_LOG _IOWR(KVMIO, 0xc0, struct kvm_clear_dirty_log) 1447 1439 1448 1440 /* Available with KVM_CAP_HYPERV_CPUID */ 1449 1441 #define KVM_GET_SUPPORTED_HV_CPUID _IOWR(KVMIO, 0xc1, struct kvm_cpuid2) 1442 + 1443 + /* Available with KVM_CAP_ARM_SVE */ 1444 + #define KVM_ARM_VCPU_FINALIZE _IOW(KVMIO, 0xc2, int) 1450 1445 1451 1446 /* Secure Encrypted Virtualization command */ 1452 1447 enum sev_cmd_id {
+62
tools/include/uapi/linux/mount.h
··· 55 55 #define MS_MGC_VAL 0xC0ED0000 56 56 #define MS_MGC_MSK 0xffff0000 57 57 58 + /* 59 + * open_tree() flags. 60 + */ 61 + #define OPEN_TREE_CLONE 1 /* Clone the target tree and attach the clone */ 62 + #define OPEN_TREE_CLOEXEC O_CLOEXEC /* Close the file on execve() */ 63 + 64 + /* 65 + * move_mount() flags. 66 + */ 67 + #define MOVE_MOUNT_F_SYMLINKS 0x00000001 /* Follow symlinks on from path */ 68 + #define MOVE_MOUNT_F_AUTOMOUNTS 0x00000002 /* Follow automounts on from path */ 69 + #define MOVE_MOUNT_F_EMPTY_PATH 0x00000004 /* Empty from path permitted */ 70 + #define MOVE_MOUNT_T_SYMLINKS 0x00000010 /* Follow symlinks on to path */ 71 + #define MOVE_MOUNT_T_AUTOMOUNTS 0x00000020 /* Follow automounts on to path */ 72 + #define MOVE_MOUNT_T_EMPTY_PATH 0x00000040 /* Empty to path permitted */ 73 + #define MOVE_MOUNT__MASK 0x00000077 74 + 75 + /* 76 + * fsopen() flags. 77 + */ 78 + #define FSOPEN_CLOEXEC 0x00000001 79 + 80 + /* 81 + * fspick() flags. 82 + */ 83 + #define FSPICK_CLOEXEC 0x00000001 84 + #define FSPICK_SYMLINK_NOFOLLOW 0x00000002 85 + #define FSPICK_NO_AUTOMOUNT 0x00000004 86 + #define FSPICK_EMPTY_PATH 0x00000008 87 + 88 + /* 89 + * The type of fsconfig() call made. 90 + */ 91 + enum fsconfig_command { 92 + FSCONFIG_SET_FLAG = 0, /* Set parameter, supplying no value */ 93 + FSCONFIG_SET_STRING = 1, /* Set parameter, supplying a string value */ 94 + FSCONFIG_SET_BINARY = 2, /* Set parameter, supplying a binary blob value */ 95 + FSCONFIG_SET_PATH = 3, /* Set parameter, supplying an object by path */ 96 + FSCONFIG_SET_PATH_EMPTY = 4, /* Set parameter, supplying an object by (empty) path */ 97 + FSCONFIG_SET_FD = 5, /* Set parameter, supplying an object by fd */ 98 + FSCONFIG_CMD_CREATE = 6, /* Invoke superblock creation */ 99 + FSCONFIG_CMD_RECONFIGURE = 7, /* Invoke superblock reconfiguration */ 100 + }; 101 + 102 + /* 103 + * fsmount() flags. 104 + */ 105 + #define FSMOUNT_CLOEXEC 0x00000001 106 + 107 + /* 108 + * Mount attributes. 109 + */ 110 + #define MOUNT_ATTR_RDONLY 0x00000001 /* Mount read-only */ 111 + #define MOUNT_ATTR_NOSUID 0x00000002 /* Ignore suid and sgid bits */ 112 + #define MOUNT_ATTR_NODEV 0x00000004 /* Disallow access to device special files */ 113 + #define MOUNT_ATTR_NOEXEC 0x00000008 /* Disallow program execution */ 114 + #define MOUNT_ATTR__ATIME 0x00000070 /* Setting on how atime should be updated */ 115 + #define MOUNT_ATTR_RELATIME 0x00000000 /* - Update atime relative to mtime/ctime. */ 116 + #define MOUNT_ATTR_NOATIME 0x00000010 /* - Do not update access times. */ 117 + #define MOUNT_ATTR_STRICTATIME 0x00000020 /* - Always perform atime updates */ 118 + #define MOUNT_ATTR_NODIRATIME 0x00000080 /* Do not update directory access times */ 119 + 58 120 #endif /* _UAPI_LINUX_MOUNT_H */
+1
tools/include/uapi/linux/sched.h
··· 10 10 #define CLONE_FS 0x00000200 /* set if fs info shared between processes */ 11 11 #define CLONE_FILES 0x00000400 /* set if open files shared between processes */ 12 12 #define CLONE_SIGHAND 0x00000800 /* set if signal handlers and blocked signals shared */ 13 + #define CLONE_PIDFD 0x00001000 /* set if a pidfd should be placed in parent */ 13 14 #define CLONE_PTRACE 0x00002000 /* set if we want to let tracing continue on the child too */ 14 15 #define CLONE_VFORK 0x00004000 /* set if the parent wants the child to wake it up on mm_release */ 15 16 #define CLONE_PARENT 0x00008000 /* set if we want to have the same parent as the cloner */
+1 -1
tools/lib/bpf/btf.c
··· 11 11 #include "btf.h" 12 12 #include "bpf.h" 13 13 #include "libbpf.h" 14 - #include "libbpf_util.h" 14 + #include "libbpf_internal.h" 15 15 16 16 #define max(a, b) ((a) > (b) ? (a) : (b)) 17 17 #define min(a, b) ((a) < (b) ? (a) : (b))
+1 -2
tools/lib/bpf/libbpf.c
··· 43 43 #include "bpf.h" 44 44 #include "btf.h" 45 45 #include "str_error.h" 46 - #include "libbpf_util.h" 47 46 #include "libbpf_internal.h" 48 47 49 48 #ifndef EM_BPF ··· 1695 1696 for (i = 0; i < ARRAY_SIZE(probe_fn); i++) { 1696 1697 ret = probe_fn[i](obj); 1697 1698 if (ret < 0) 1698 - return ret; 1699 + pr_debug("Probe #%d failed with %d.\n", i, ret); 1699 1700 } 1700 1701 1701 1702 return 0;
+13
tools/lib/bpf/libbpf_internal.h
··· 21 21 #define BTF_PARAM_ENC(name, type) (name), (type) 22 22 #define BTF_VAR_SECINFO_ENC(type, offset, size) (type), (offset), (size) 23 23 24 + extern void libbpf_print(enum libbpf_print_level level, 25 + const char *format, ...) 26 + __attribute__((format(printf, 2, 3))); 27 + 28 + #define __pr(level, fmt, ...) \ 29 + do { \ 30 + libbpf_print(level, "libbpf: " fmt, ##__VA_ARGS__); \ 31 + } while (0) 32 + 33 + #define pr_warning(fmt, ...) __pr(LIBBPF_WARN, fmt, ##__VA_ARGS__) 34 + #define pr_info(fmt, ...) __pr(LIBBPF_INFO, fmt, ##__VA_ARGS__) 35 + #define pr_debug(fmt, ...) __pr(LIBBPF_DEBUG, fmt, ##__VA_ARGS__) 36 + 24 37 int libbpf__probe_raw_btf(const char *raw_types, size_t types_len, 25 38 const char *str_sec, size_t str_len); 26 39
-13
tools/lib/bpf/libbpf_util.h
··· 10 10 extern "C" { 11 11 #endif 12 12 13 - extern void libbpf_print(enum libbpf_print_level level, 14 - const char *format, ...) 15 - __attribute__((format(printf, 2, 3))); 16 - 17 - #define __pr(level, fmt, ...) \ 18 - do { \ 19 - libbpf_print(level, "libbpf: " fmt, ##__VA_ARGS__); \ 20 - } while (0) 21 - 22 - #define pr_warning(fmt, ...) __pr(LIBBPF_WARN, fmt, ##__VA_ARGS__) 23 - #define pr_info(fmt, ...) __pr(LIBBPF_INFO, fmt, ##__VA_ARGS__) 24 - #define pr_debug(fmt, ...) __pr(LIBBPF_DEBUG, fmt, ##__VA_ARGS__) 25 - 26 13 /* Use these barrier functions instead of smp_[rw]mb() when they are 27 14 * used in a libbpf header file. That way they can be built into the 28 15 * application that uses libbpf.
+1 -1
tools/lib/bpf/xsk.c
··· 29 29 30 30 #include "bpf.h" 31 31 #include "libbpf.h" 32 - #include "libbpf_util.h" 32 + #include "libbpf_internal.h" 33 33 #include "xsk.h" 34 34 35 35 #ifndef SOL_XDP
+1 -1
tools/perf/arch/arm64/entry/syscalls/mksyscalltbl
··· 56 56 echo "};" 57 57 } 58 58 59 - $gcc -E -dM -x c $input \ 59 + $gcc -E -dM -x c -I $incpath/include/uapi $input \ 60 60 |sed -ne 's/^#define __NR_//p' \ 61 61 |sort -t' ' -k2 -nu \ 62 62 |create_table
+6 -3
tools/perf/arch/s390/util/machine.c
··· 5 5 #include "util.h" 6 6 #include "machine.h" 7 7 #include "api/fs/fs.h" 8 + #include "debug.h" 8 9 9 10 int arch__fix_module_text_start(u64 *start, const char *name) 10 11 { 12 + u64 m_start = *start; 11 13 char path[PATH_MAX]; 12 14 13 15 snprintf(path, PATH_MAX, "module/%.*s/sections/.text", 14 16 (int)strlen(name) - 2, name + 1); 15 - 16 - if (sysfs__read_ull(path, (unsigned long long *)start) < 0) 17 - return -1; 17 + if (sysfs__read_ull(path, (unsigned long long *)start) < 0) { 18 + pr_debug2("Using module %s start:%#lx\n", path, m_start); 19 + *start = m_start; 20 + } 18 21 19 22 return 0; 20 23 }
+6
tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
··· 349 349 425 common io_uring_setup __x64_sys_io_uring_setup 350 350 426 common io_uring_enter __x64_sys_io_uring_enter 351 351 427 common io_uring_register __x64_sys_io_uring_register 352 + 428 common open_tree __x64_sys_open_tree 353 + 429 common move_mount __x64_sys_move_mount 354 + 430 common fsopen __x64_sys_fsopen 355 + 431 common fsconfig __x64_sys_fsconfig 356 + 432 common fsmount __x64_sys_fsmount 357 + 433 common fspick __x64_sys_fspick 352 358 353 359 # 354 360 # x32-specific system call numbers start at 512 to avoid cache impact
+8 -1
tools/perf/tests/vmlinux-kallsyms.c
··· 161 161 162 162 continue; 163 163 } 164 - } else 164 + } else if (mem_start == kallsyms.vmlinux_map->end) { 165 + /* 166 + * Ignore aliases to _etext, i.e. to the end of the kernel text area, 167 + * such as __indirect_thunk_end. 168 + */ 169 + continue; 170 + } else { 165 171 pr_debug("ERR : %#" PRIx64 ": %s not on kallsyms\n", 166 172 mem_start, sym->name); 173 + } 167 174 168 175 err = -1; 169 176 }
+1 -1
tools/perf/util/data-convert-bt.c
··· 271 271 if (i > 0) 272 272 strncpy(buffer, string, i); 273 273 } 274 - strncat(buffer + p, numstr, 4); 274 + memcpy(buffer + p, numstr, 4); 275 275 p += 3; 276 276 } 277 277 }
+18 -9
tools/perf/util/machine.c
··· 924 924 * symbol_name if it's not that important. 925 925 */ 926 926 static int machine__get_running_kernel_start(struct machine *machine, 927 - const char **symbol_name, u64 *start) 927 + const char **symbol_name, 928 + u64 *start, u64 *end) 928 929 { 929 930 char filename[PATH_MAX]; 930 931 int i, err = -1; ··· 950 949 *symbol_name = name; 951 950 952 951 *start = addr; 952 + 953 + err = kallsyms__get_function_start(filename, "_etext", &addr); 954 + if (!err) 955 + *end = addr; 956 + 953 957 return 0; 954 958 } 955 959 ··· 1447 1441 struct dso *kernel = machine__get_kernel(machine); 1448 1442 const char *name = NULL; 1449 1443 struct map *map; 1450 - u64 addr = 0; 1444 + u64 start = 0, end = ~0ULL; 1451 1445 int ret; 1452 1446 1453 1447 if (kernel == NULL) ··· 1466 1460 "continuing anyway...\n", machine->pid); 1467 1461 } 1468 1462 1469 - if (!machine__get_running_kernel_start(machine, &name, &addr)) { 1463 + if (!machine__get_running_kernel_start(machine, &name, &start, &end)) { 1470 1464 if (name && 1471 - map__set_kallsyms_ref_reloc_sym(machine->vmlinux_map, name, addr)) { 1465 + map__set_kallsyms_ref_reloc_sym(machine->vmlinux_map, name, start)) { 1472 1466 machine__destroy_kernel_maps(machine); 1473 1467 ret = -1; 1474 1468 goto out_put; ··· 1478 1472 * we have a real start address now, so re-order the kmaps 1479 1473 * assume it's the last in the kmaps 1480 1474 */ 1481 - machine__update_kernel_mmap(machine, addr, ~0ULL); 1475 + machine__update_kernel_mmap(machine, start, end); 1482 1476 } 1483 1477 1484 1478 if (machine__create_extra_kernel_maps(machine, kernel)) 1485 1479 pr_debug("Problems creating extra kernel maps, continuing anyway...\n"); 1486 1480 1487 - /* update end address of the kernel map using adjacent module address */ 1488 - map = map__next(machine__kernel_map(machine)); 1489 - if (map) 1490 - machine__set_kernel_mmap(machine, addr, map->start); 1481 + if (end == ~0ULL) { 1482 + /* update end address of the kernel map using adjacent module address */ 1483 + map = map__next(machine__kernel_map(machine)); 1484 + if (map) 1485 + machine__set_kernel_mmap(machine, start, map->start); 1486 + } 1487 + 1491 1488 out_put: 1492 1489 dso__put(kernel); 1493 1490 return ret;
+21
tools/perf/util/session.c
··· 647 647 swap_sample_id_all(event, &event->throttle + 1); 648 648 } 649 649 650 + static void perf_event__namespaces_swap(union perf_event *event, 651 + bool sample_id_all) 652 + { 653 + u64 i; 654 + 655 + event->namespaces.pid = bswap_32(event->namespaces.pid); 656 + event->namespaces.tid = bswap_32(event->namespaces.tid); 657 + event->namespaces.nr_namespaces = bswap_64(event->namespaces.nr_namespaces); 658 + 659 + for (i = 0; i < event->namespaces.nr_namespaces; i++) { 660 + struct perf_ns_link_info *ns = &event->namespaces.link_info[i]; 661 + 662 + ns->dev = bswap_64(ns->dev); 663 + ns->ino = bswap_64(ns->ino); 664 + } 665 + 666 + if (sample_id_all) 667 + swap_sample_id_all(event, &event->namespaces.link_info[i]); 668 + } 669 + 650 670 static u8 revbyte(u8 b) 651 671 { 652 672 int rev = (b >> 4) | ((b & 0xf) << 4); ··· 907 887 [PERF_RECORD_LOST_SAMPLES] = perf_event__all64_swap, 908 888 [PERF_RECORD_SWITCH] = perf_event__switch_swap, 909 889 [PERF_RECORD_SWITCH_CPU_WIDE] = perf_event__switch_swap, 890 + [PERF_RECORD_NAMESPACES] = perf_event__namespaces_swap, 910 891 [PERF_RECORD_HEADER_ATTR] = perf_event__hdr_attr_swap, 911 892 [PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap, 912 893 [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
+13 -2
tools/perf/util/thread.c
··· 133 133 } 134 134 } 135 135 136 - struct namespaces *thread__namespaces(const struct thread *thread) 136 + static struct namespaces *__thread__namespaces(const struct thread *thread) 137 137 { 138 138 if (list_empty(&thread->namespaces_list)) 139 139 return NULL; ··· 141 141 return list_first_entry(&thread->namespaces_list, struct namespaces, list); 142 142 } 143 143 144 + struct namespaces *thread__namespaces(const struct thread *thread) 145 + { 146 + struct namespaces *ns; 147 + 148 + down_read((struct rw_semaphore *)&thread->namespaces_lock); 149 + ns = __thread__namespaces(thread); 150 + up_read((struct rw_semaphore *)&thread->namespaces_lock); 151 + 152 + return ns; 153 + } 154 + 144 155 static int __thread__set_namespaces(struct thread *thread, u64 timestamp, 145 156 struct namespaces_event *event) 146 157 { 147 - struct namespaces *new, *curr = thread__namespaces(thread); 158 + struct namespaces *new, *curr = __thread__namespaces(thread); 148 159 149 160 new = namespaces__new(event); 150 161 if (!new)
+1
tools/testing/selftests/bpf/.gitignore
··· 31 31 test_tcpnotify_user 32 32 test_libbpf 33 33 test_tcp_check_syncookie_user 34 + test_sysctl 34 35 alu32 35 36 libbpf.pc 36 37 libbpf.so.*
+1 -1
tools/testing/selftests/bpf/bpf_helpers.h
··· 278 278 (void *) BPF_FUNC_skb_change_type; 279 279 static unsigned int (*bpf_get_hash_recalc)(void *ctx) = 280 280 (void *) BPF_FUNC_get_hash_recalc; 281 - static unsigned long long (*bpf_get_current_task)(void *ctx) = 281 + static unsigned long long (*bpf_get_current_task)(void) = 282 282 (void *) BPF_FUNC_get_current_task; 283 283 static int (*bpf_skb_change_tail)(void *ctx, __u32 len, __u64 flags) = 284 284 (void *) BPF_FUNC_skb_change_tail;
+1
tools/testing/selftests/bpf/map_tests/.gitignore
··· 1 + tests.h
+5 -4
tools/testing/selftests/bpf/prog_tests/flow_dissector.c
··· 242 242 */ 243 243 244 244 err = bpf_prog_attach(prog_fd, 0, BPF_FLOW_DISSECTOR, 0); 245 - CHECK(err, "bpf_prog_attach", "err %d errno %d", err, errno); 245 + CHECK(err, "bpf_prog_attach", "err %d errno %d\n", err, errno); 246 246 247 247 tap_fd = create_tap("tap0"); 248 - CHECK(tap_fd < 0, "create_tap", "tap_fd %d errno %d", tap_fd, errno); 248 + CHECK(tap_fd < 0, "create_tap", "tap_fd %d errno %d\n", tap_fd, errno); 249 249 err = ifup("tap0"); 250 - CHECK(err, "ifup", "err %d errno %d", err, errno); 250 + CHECK(err, "ifup", "err %d errno %d\n", err, errno); 251 251 252 252 for (i = 0; i < ARRAY_SIZE(tests); i++) { 253 253 struct bpf_flow_keys flow_keys = {}; ··· 255 255 __u32 key = 0; 256 256 257 257 err = tx_tap(tap_fd, &tests[i].pkt, sizeof(tests[i].pkt)); 258 - CHECK(err < 0, "tx_tap", "err %d errno %d", err, errno); 258 + CHECK(err < 0, "tx_tap", "err %d errno %d\n", err, errno); 259 259 260 260 err = bpf_map_lookup_elem(keys_fd, &key, &flow_keys); 261 261 CHECK_ATTR(err, tests[i].name, "bpf_map_lookup_elem %d\n", err); ··· 264 264 CHECK_FLOW_KEYS(tests[i].name, flow_keys, tests[i].keys); 265 265 } 266 266 267 + bpf_prog_detach(prog_fd, BPF_FLOW_DISSECTOR); 267 268 bpf_object__close(obj); 268 269 }
+274 -14
tools/testing/selftests/bpf/test_lru_map.c
··· 18 18 #include <sys/wait.h> 19 19 20 20 #include <bpf/bpf.h> 21 + #include <bpf/libbpf.h> 21 22 22 23 #include "bpf_util.h" 23 24 #include "bpf_rlimit.h" 25 + #include "../../../include/linux/filter.h" 24 26 25 27 #define LOCAL_FREE_TARGET (128) 26 28 #define PERCPU_FREE_TARGET (4) ··· 40 38 perror("bpf_create_map"); 41 39 42 40 return map_fd; 41 + } 42 + 43 + static int bpf_map_lookup_elem_with_ref_bit(int fd, unsigned long long key, 44 + void *value) 45 + { 46 + struct bpf_load_program_attr prog; 47 + struct bpf_create_map_attr map; 48 + struct bpf_insn insns[] = { 49 + BPF_LD_MAP_VALUE(BPF_REG_9, 0, 0), 50 + BPF_LD_MAP_FD(BPF_REG_1, fd), 51 + BPF_LD_IMM64(BPF_REG_3, key), 52 + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 53 + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), 54 + BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, 0), 55 + BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem), 56 + BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4), 57 + BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0), 58 + BPF_STX_MEM(BPF_DW, BPF_REG_9, BPF_REG_1, 0), 59 + BPF_MOV64_IMM(BPF_REG_0, 42), 60 + BPF_JMP_IMM(BPF_JA, 0, 0, 1), 61 + BPF_MOV64_IMM(BPF_REG_0, 1), 62 + BPF_EXIT_INSN(), 63 + }; 64 + __u8 data[64] = {}; 65 + int mfd, pfd, ret, zero = 0; 66 + __u32 retval = 0; 67 + 68 + memset(&map, 0, sizeof(map)); 69 + map.map_type = BPF_MAP_TYPE_ARRAY; 70 + map.key_size = sizeof(int); 71 + map.value_size = sizeof(unsigned long long); 72 + map.max_entries = 1; 73 + 74 + mfd = bpf_create_map_xattr(&map); 75 + if (mfd < 0) 76 + return -1; 77 + 78 + insns[0].imm = mfd; 79 + 80 + memset(&prog, 0, sizeof(prog)); 81 + prog.prog_type = BPF_PROG_TYPE_SCHED_CLS; 82 + prog.insns = insns; 83 + prog.insns_cnt = ARRAY_SIZE(insns); 84 + prog.license = "GPL"; 85 + 86 + pfd = bpf_load_program_xattr(&prog, NULL, 0); 87 + if (pfd < 0) { 88 + close(mfd); 89 + return -1; 90 + } 91 + 92 + ret = bpf_prog_test_run(pfd, 1, data, sizeof(data), 93 + NULL, NULL, &retval, NULL); 94 + if (ret < 0 || retval != 42) { 95 + ret = -1; 96 + } else { 97 + assert(!bpf_map_lookup_elem(mfd, &zero, value)); 98 + ret = 0; 99 + } 100 + close(pfd); 101 + close(mfd); 102 + return ret; 43 103 } 44 104 45 105 static int map_subset(int map0, int map1) ··· 151 87 return ret; 152 88 } 153 89 154 - /* Size of the LRU amp is 2 90 + /* Size of the LRU map is 2 155 91 * Add key=1 (+1 key) 156 92 * Add key=2 (+1 key) 157 93 * Lookup Key=1 ··· 221 157 * stop LRU from removing key=1 222 158 */ 223 159 key = 1; 224 - assert(!bpf_map_lookup_elem(lru_map_fd, &key, value)); 160 + assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd, key, value)); 225 161 assert(value[0] == 1234); 226 162 227 163 key = 3; ··· 231 167 232 168 /* key=2 has been removed from the LRU */ 233 169 key = 2; 234 - assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -1); 170 + assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -1 && 171 + errno == ENOENT); 235 172 236 173 assert(map_equal(lru_map_fd, expected_map_fd)); 237 174 ··· 286 221 /* Lookup 1 to tgt_free/2 */ 287 222 end_key = 1 + batch_size; 288 223 for (key = 1; key < end_key; key++) { 289 - assert(!bpf_map_lookup_elem(lru_map_fd, &key, value)); 224 + assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd, key, value)); 290 225 assert(!bpf_map_update_elem(expected_map_fd, &key, value, 291 226 BPF_NOEXIST)); 292 227 } ··· 387 322 end_key = 1 + batch_size; 388 323 value[0] = 4321; 389 324 for (key = 1; key < end_key; key++) { 390 - assert(bpf_map_lookup_elem(lru_map_fd, &key, value)); 325 + assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -1 && 326 + errno == ENOENT); 391 327 assert(!bpf_map_update_elem(lru_map_fd, &key, value, 392 328 BPF_NOEXIST)); 393 - assert(!bpf_map_lookup_elem(lru_map_fd, &key, value)); 329 + assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd, key, value)); 394 330 assert(value[0] == 4321); 395 331 assert(!bpf_map_update_elem(expected_map_fd, &key, value, 396 332 BPF_NOEXIST)); ··· 470 404 /* Lookup key 1 to tgt_free*3/2 */ 471 405 end_key = tgt_free + batch_size; 472 406 for (key = 1; key < end_key; key++) { 473 - assert(!bpf_map_lookup_elem(lru_map_fd, &key, value)); 407 + assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd, key, value)); 474 408 assert(!bpf_map_update_elem(expected_map_fd, &key, value, 475 409 BPF_NOEXIST)); 476 410 } ··· 529 463 assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST)); 530 464 531 465 for (key = 1; key <= tgt_free; key++) { 532 - assert(!bpf_map_lookup_elem(lru_map_fd, &key, value)); 466 + assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd, key, value)); 533 467 assert(!bpf_map_update_elem(expected_map_fd, &key, value, 534 468 BPF_NOEXIST)); 535 469 } ··· 560 494 unsigned long long key, value[nr_cpus]; 561 495 562 496 /* Ensure the last key inserted by previous CPU can be found */ 563 - assert(!bpf_map_lookup_elem(map_fd, &last_key, value)); 564 - 497 + assert(!bpf_map_lookup_elem_with_ref_bit(map_fd, last_key, value)); 565 498 value[0] = 1234; 566 499 567 500 key = last_key + 1; 568 501 assert(!bpf_map_update_elem(map_fd, &key, value, BPF_NOEXIST)); 569 - assert(!bpf_map_lookup_elem(map_fd, &key, value)); 502 + assert(!bpf_map_lookup_elem_with_ref_bit(map_fd, key, value)); 570 503 571 504 /* Cannot find the last key because it was removed by LRU */ 572 - assert(bpf_map_lookup_elem(map_fd, &last_key, value)); 505 + assert(bpf_map_lookup_elem(map_fd, &last_key, value) == -1 && 506 + errno == ENOENT); 573 507 } 574 508 575 509 /* Test map with only one element */ ··· 656 590 /* Make ref bit sticky for key: [1, tgt_free] */ 657 591 for (stable_key = 1; stable_key <= tgt_free; stable_key++) { 658 592 /* Mark the ref bit */ 659 - assert(!bpf_map_lookup_elem(lru_map_fd, &stable_key, 660 - value)); 593 + assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd, 594 + stable_key, value)); 661 595 } 662 596 assert(!bpf_map_update_elem(lru_map_fd, &key, value, 663 597 BPF_NOEXIST)); ··· 669 603 assert(!bpf_map_update_elem(expected_map_fd, &key, value, 670 604 BPF_NOEXIST)); 671 605 } 606 + 607 + assert(map_equal(lru_map_fd, expected_map_fd)); 608 + 609 + close(expected_map_fd); 610 + close(lru_map_fd); 611 + 612 + printf("Pass\n"); 613 + } 614 + 615 + /* Size of the LRU map is 2 616 + * Add key=1 (+1 key) 617 + * Add key=2 (+1 key) 618 + * Lookup Key=1 (datapath) 619 + * Lookup Key=2 (syscall) 620 + * Add Key=3 621 + * => Key=2 will be removed by LRU 622 + * Iterate map. Only found key=1 and key=3 623 + */ 624 + static void test_lru_sanity7(int map_type, int map_flags) 625 + { 626 + unsigned long long key, value[nr_cpus]; 627 + int lru_map_fd, expected_map_fd; 628 + int next_cpu = 0; 629 + 630 + printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type, 631 + map_flags); 632 + 633 + assert(sched_next_online(0, &next_cpu) != -1); 634 + 635 + if (map_flags & BPF_F_NO_COMMON_LRU) 636 + lru_map_fd = create_map(map_type, map_flags, 2 * nr_cpus); 637 + else 638 + lru_map_fd = create_map(map_type, map_flags, 2); 639 + assert(lru_map_fd != -1); 640 + 641 + expected_map_fd = create_map(BPF_MAP_TYPE_HASH, 0, 2); 642 + assert(expected_map_fd != -1); 643 + 644 + value[0] = 1234; 645 + 646 + /* insert key=1 element */ 647 + 648 + key = 1; 649 + assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST)); 650 + assert(!bpf_map_update_elem(expected_map_fd, &key, value, 651 + BPF_NOEXIST)); 652 + 653 + /* BPF_NOEXIST means: add new element if it doesn't exist */ 654 + assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST) == -1 655 + /* key=1 already exists */ 656 + && errno == EEXIST); 657 + 658 + /* insert key=2 element */ 659 + 660 + /* check that key=2 is not found */ 661 + key = 2; 662 + assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -1 && 663 + errno == ENOENT); 664 + 665 + /* BPF_EXIST means: update existing element */ 666 + assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_EXIST) == -1 && 667 + /* key=2 is not there */ 668 + errno == ENOENT); 669 + 670 + assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST)); 671 + 672 + /* insert key=3 element */ 673 + 674 + /* check that key=3 is not found */ 675 + key = 3; 676 + assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -1 && 677 + errno == ENOENT); 678 + 679 + /* check that key=1 can be found and mark the ref bit to 680 + * stop LRU from removing key=1 681 + */ 682 + key = 1; 683 + assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd, key, value)); 684 + assert(value[0] == 1234); 685 + 686 + /* check that key=2 can be found and do _not_ mark ref bit. 687 + * this will be evicted on next update. 688 + */ 689 + key = 2; 690 + assert(!bpf_map_lookup_elem(lru_map_fd, &key, value)); 691 + assert(value[0] == 1234); 692 + 693 + key = 3; 694 + assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST)); 695 + assert(!bpf_map_update_elem(expected_map_fd, &key, value, 696 + BPF_NOEXIST)); 697 + 698 + /* key=2 has been removed from the LRU */ 699 + key = 2; 700 + assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -1 && 701 + errno == ENOENT); 702 + 703 + assert(map_equal(lru_map_fd, expected_map_fd)); 704 + 705 + close(expected_map_fd); 706 + close(lru_map_fd); 707 + 708 + printf("Pass\n"); 709 + } 710 + 711 + /* Size of the LRU map is 2 712 + * Add key=1 (+1 key) 713 + * Add key=2 (+1 key) 714 + * Lookup Key=1 (syscall) 715 + * Lookup Key=2 (datapath) 716 + * Add Key=3 717 + * => Key=1 will be removed by LRU 718 + * Iterate map. Only found key=2 and key=3 719 + */ 720 + static void test_lru_sanity8(int map_type, int map_flags) 721 + { 722 + unsigned long long key, value[nr_cpus]; 723 + int lru_map_fd, expected_map_fd; 724 + int next_cpu = 0; 725 + 726 + printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type, 727 + map_flags); 728 + 729 + assert(sched_next_online(0, &next_cpu) != -1); 730 + 731 + if (map_flags & BPF_F_NO_COMMON_LRU) 732 + lru_map_fd = create_map(map_type, map_flags, 2 * nr_cpus); 733 + else 734 + lru_map_fd = create_map(map_type, map_flags, 2); 735 + assert(lru_map_fd != -1); 736 + 737 + expected_map_fd = create_map(BPF_MAP_TYPE_HASH, 0, 2); 738 + assert(expected_map_fd != -1); 739 + 740 + value[0] = 1234; 741 + 742 + /* insert key=1 element */ 743 + 744 + key = 1; 745 + assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST)); 746 + 747 + /* BPF_NOEXIST means: add new element if it doesn't exist */ 748 + assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST) == -1 749 + /* key=1 already exists */ 750 + && errno == EEXIST); 751 + 752 + /* insert key=2 element */ 753 + 754 + /* check that key=2 is not found */ 755 + key = 2; 756 + assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -1 && 757 + errno == ENOENT); 758 + 759 + /* BPF_EXIST means: update existing element */ 760 + assert(bpf_map_update_elem(lru_map_fd, &key, value, BPF_EXIST) == -1 && 761 + /* key=2 is not there */ 762 + errno == ENOENT); 763 + 764 + assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST)); 765 + assert(!bpf_map_update_elem(expected_map_fd, &key, value, 766 + BPF_NOEXIST)); 767 + 768 + /* insert key=3 element */ 769 + 770 + /* check that key=3 is not found */ 771 + key = 3; 772 + assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -1 && 773 + errno == ENOENT); 774 + 775 + /* check that key=1 can be found and do _not_ mark ref bit. 776 + * this will be evicted on next update. 777 + */ 778 + key = 1; 779 + assert(!bpf_map_lookup_elem(lru_map_fd, &key, value)); 780 + assert(value[0] == 1234); 781 + 782 + /* check that key=2 can be found and mark the ref bit to 783 + * stop LRU from removing key=2 784 + */ 785 + key = 2; 786 + assert(!bpf_map_lookup_elem_with_ref_bit(lru_map_fd, key, value)); 787 + assert(value[0] == 1234); 788 + 789 + key = 3; 790 + assert(!bpf_map_update_elem(lru_map_fd, &key, value, BPF_NOEXIST)); 791 + assert(!bpf_map_update_elem(expected_map_fd, &key, value, 792 + BPF_NOEXIST)); 793 + 794 + /* key=1 has been removed from the LRU */ 795 + key = 1; 796 + assert(bpf_map_lookup_elem(lru_map_fd, &key, value) == -1 && 797 + errno == ENOENT); 672 798 673 799 assert(map_equal(lru_map_fd, expected_map_fd)); 674 800 ··· 895 637 test_lru_sanity4(map_types[t], map_flags[f], tgt_free); 896 638 test_lru_sanity5(map_types[t], map_flags[f]); 897 639 test_lru_sanity6(map_types[t], map_flags[f], tgt_free); 640 + test_lru_sanity7(map_types[t], map_flags[f]); 641 + test_lru_sanity8(map_types[t], map_flags[f]); 898 642 899 643 printf("\n"); 900 644 }
+8 -8
tools/testing/selftests/net/pmtu.sh
··· 430 430 veth_a_addr="${2}" 431 431 veth_b_addr="${3}" 432 432 433 - run_cmd "${ns_a} ip -${proto} xfrm state add src ${veth_a_addr} dst ${veth_b_addr} spi 0x1000 proto esp aead 'rfc4106(gcm(aes))' 0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f 128 mode tunnel" || return 1 434 - run_cmd "${ns_a} ip -${proto} xfrm state add src ${veth_b_addr} dst ${veth_a_addr} spi 0x1001 proto esp aead 'rfc4106(gcm(aes))' 0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f 128 mode tunnel" 435 - run_cmd "${ns_a} ip -${proto} xfrm policy add dir out mark 10 tmpl src ${veth_a_addr} dst ${veth_b_addr} proto esp mode tunnel" 436 - run_cmd "${ns_a} ip -${proto} xfrm policy add dir in mark 10 tmpl src ${veth_b_addr} dst ${veth_a_addr} proto esp mode tunnel" 433 + run_cmd ${ns_a} ip -${proto} xfrm state add src ${veth_a_addr} dst ${veth_b_addr} spi 0x1000 proto esp aead 'rfc4106(gcm(aes))' 0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f 128 mode tunnel || return 1 434 + run_cmd ${ns_a} ip -${proto} xfrm state add src ${veth_b_addr} dst ${veth_a_addr} spi 0x1001 proto esp aead 'rfc4106(gcm(aes))' 0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f 128 mode tunnel 435 + run_cmd ${ns_a} ip -${proto} xfrm policy add dir out mark 10 tmpl src ${veth_a_addr} dst ${veth_b_addr} proto esp mode tunnel 436 + run_cmd ${ns_a} ip -${proto} xfrm policy add dir in mark 10 tmpl src ${veth_b_addr} dst ${veth_a_addr} proto esp mode tunnel 437 437 438 - run_cmd "${ns_b} ip -${proto} xfrm state add src ${veth_a_addr} dst ${veth_b_addr} spi 0x1000 proto esp aead 'rfc4106(gcm(aes))' 0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f 128 mode tunnel" 439 - run_cmd "${ns_b} ip -${proto} xfrm state add src ${veth_b_addr} dst ${veth_a_addr} spi 0x1001 proto esp aead 'rfc4106(gcm(aes))' 0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f 128 mode tunnel" 440 - run_cmd "${ns_b} ip -${proto} xfrm policy add dir out mark 10 tmpl src ${veth_b_addr} dst ${veth_a_addr} proto esp mode tunnel" 441 - run_cmd "${ns_b} ip -${proto} xfrm policy add dir in mark 10 tmpl src ${veth_a_addr} dst ${veth_b_addr} proto esp mode tunnel" 438 + run_cmd ${ns_b} ip -${proto} xfrm state add src ${veth_a_addr} dst ${veth_b_addr} spi 0x1000 proto esp aead 'rfc4106(gcm(aes))' 0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f 128 mode tunnel 439 + run_cmd ${ns_b} ip -${proto} xfrm state add src ${veth_b_addr} dst ${veth_a_addr} spi 0x1001 proto esp aead 'rfc4106(gcm(aes))' 0x0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f0f 128 mode tunnel 440 + run_cmd ${ns_b} ip -${proto} xfrm policy add dir out mark 10 tmpl src ${veth_b_addr} dst ${veth_a_addr} proto esp mode tunnel 441 + run_cmd ${ns_b} ip -${proto} xfrm policy add dir in mark 10 tmpl src ${veth_a_addr} dst ${veth_b_addr} proto esp mode tunnel 442 442 } 443 443 444 444 setup_xfrm4() {
+26 -51
tools/testing/selftests/netfilter/nft_nat.sh
··· 8 8 ret=0 9 9 test_inet_nat=true 10 10 11 + cleanup() 12 + { 13 + for i in 0 1 2; do ip netns del ns$i;done 14 + } 15 + 11 16 nft --version > /dev/null 2>&1 12 17 if [ $? -ne 0 ];then 13 18 echo "SKIP: Could not run test without nft tool" ··· 26 21 fi 27 22 28 23 ip netns add ns0 24 + if [ $? -ne 0 ];then 25 + echo "SKIP: Could not create net namespace" 26 + exit $ksft_skip 27 + fi 28 + 29 + trap cleanup EXIT 30 + 29 31 ip netns add ns1 30 32 ip netns add ns2 31 33 ··· 359 347 test_masquerade6() 360 348 { 361 349 local family=$1 362 - local natflags=$1 350 + local natflags=$2 363 351 local lret=0 364 352 365 353 ip netns exec ns0 sysctl net.ipv6.conf.all.forwarding=1 > /dev/null ··· 404 392 405 393 ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1 406 394 if [ $? -ne 0 ] ; then 407 - <<<<<<< HEAD 408 - echo "ERROR: cannot ping ns1 from ns2 with active $family masquerading" 409 - ======= 410 - echo "ERROR: cannot ping ns1 from ns2 with active ipv6 masquerade $natflags" 411 - >>>>>>> cd8dead0c39457e58ec1d36db93aedca811d48f1 395 + echo "ERROR: cannot ping ns1 from ns2 with active $family masquerade $natflags" 412 396 lret=1 413 397 fi 414 398 415 399 # ns1 should have seen packets from ns0, due to masquerade 416 400 expect="packets 1 bytes 104" 417 401 for dir in "in6" "out6" ; do 418 - 419 402 cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect") 420 403 if [ $? -ne 0 ]; then 421 404 bad_counter ns1 ns0$dir "$expect" ··· 440 433 fi 441 434 done 442 435 443 - <<<<<<< HEAD 444 - ip netns exec ns0 nft flush chain $family nat postrouting 445 - ======= 446 436 ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1 447 437 if [ $? -ne 0 ] ; then 448 438 echo "ERROR: cannot ping ns1 from ns2 with active ipv6 masquerade $natflags (attempt 2)" 449 439 lret=1 450 440 fi 451 441 452 - ip netns exec ns0 nft flush chain ip6 nat postrouting 453 - >>>>>>> cd8dead0c39457e58ec1d36db93aedca811d48f1 442 + ip netns exec ns0 nft flush chain $family nat postrouting 454 443 if [ $? -ne 0 ]; then 455 444 echo "ERROR: Could not flush $family nat postrouting" 1>&2 456 445 lret=1 457 446 fi 458 447 459 - <<<<<<< HEAD 460 - test $lret -eq 0 && echo "PASS: $family IPv6 masquerade for ns2" 461 - ======= 462 - test $lret -eq 0 && echo "PASS: IPv6 masquerade $natflags for ns2" 463 - >>>>>>> cd8dead0c39457e58ec1d36db93aedca811d48f1 448 + test $lret -eq 0 && echo "PASS: $family IPv6 masquerade $natflags for ns2" 464 449 465 450 return $lret 466 451 } 467 452 468 453 test_masquerade() 469 454 { 470 - <<<<<<< HEAD 471 455 local family=$1 472 - ======= 473 - local natflags=$1 474 - >>>>>>> cd8dead0c39457e58ec1d36db93aedca811d48f1 456 + local natflags=$2 475 457 local lret=0 476 458 477 459 ip netns exec ns0 sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null ··· 505 509 506 510 ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1 507 511 if [ $? -ne 0 ] ; then 508 - <<<<<<< HEAD 509 - echo "ERROR: cannot ping ns1 from ns2 with active $family masquerading" 510 - ======= 511 - echo "ERROR: cannot ping ns1 from ns2 with active ip masquere $natflags" 512 - >>>>>>> cd8dead0c39457e58ec1d36db93aedca811d48f1 512 + echo "ERROR: cannot ping ns1 from ns2 with active $family masquerade $natflags" 513 513 lret=1 514 514 fi 515 515 ··· 541 549 fi 542 550 done 543 551 544 - <<<<<<< HEAD 545 - ip netns exec ns0 nft flush chain $family nat postrouting 546 - ======= 547 552 ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1 548 553 if [ $? -ne 0 ] ; then 549 554 echo "ERROR: cannot ping ns1 from ns2 with active ip masquerade $natflags (attempt 2)" 550 555 lret=1 551 556 fi 552 557 553 - ip netns exec ns0 nft flush chain ip nat postrouting 554 - >>>>>>> cd8dead0c39457e58ec1d36db93aedca811d48f1 558 + ip netns exec ns0 nft flush chain $family nat postrouting 555 559 if [ $? -ne 0 ]; then 556 560 echo "ERROR: Could not flush $family nat postrouting" 1>&2 557 561 lret=1 558 562 fi 559 563 560 - <<<<<<< HEAD 561 - test $lret -eq 0 && echo "PASS: $family IP masquerade for ns2" 562 - ======= 563 - test $lret -eq 0 && echo "PASS: IP masquerade $natflags for ns2" 564 - >>>>>>> cd8dead0c39457e58ec1d36db93aedca811d48f1 564 + test $lret -eq 0 && echo "PASS: $family IP masquerade $natflags for ns2" 565 565 566 566 return $lret 567 567 } ··· 826 842 $test_inet_nat && test_local_dnat inet 827 843 $test_inet_nat && test_local_dnat6 inet 828 844 845 + for flags in "" "fully-random"; do 829 846 reset_counters 830 - <<<<<<< HEAD 831 - test_masquerade ip 832 - test_masquerade6 ip6 847 + test_masquerade ip $flags 848 + test_masquerade6 ip6 $flags 833 849 reset_counters 834 - $test_inet_nat && test_masquerade inet 835 - $test_inet_nat && test_masquerade6 inet 836 - ======= 837 - test_masquerade "" 838 - test_masquerade6 "" 839 - 840 - reset_counters 841 - test_masquerade "fully-random" 842 - test_masquerade6 "fully-random" 843 - >>>>>>> cd8dead0c39457e58ec1d36db93aedca811d48f1 850 + $test_inet_nat && test_masquerade inet $flags 851 + $test_inet_nat && test_masquerade6 inet $flags 852 + done 844 853 845 854 reset_counters 846 855 test_redirect ip ··· 841 864 reset_counters 842 865 $test_inet_nat && test_redirect inet 843 866 $test_inet_nat && test_redirect6 inet 844 - 845 - for i in 0 1 2; do ip netns del ns$i;done 846 867 847 868 exit $ret