Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'mlx5-updates-2023-01-10' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

mlx5-updates-2023-01-10

1) From Gal: Add debugfs entries for netdev nic driver
- ktls, flow steering and hairpin info
- useful for debug and performance analysis
- e.g hairpin queue attributes, dump ktls tx pool size, etc

2) From Maher: Update shared buffer configuration on PFC commands
2.1) For every change of buffer's headroom, recalculate the size of shared
buffer to be equal to "total_buffer_size" - "new_headroom_size".
The new shared buffer size will be split in ratio of 3:1 between
lossy and lossless pools, respectively.

2.2) For each port buffer change, count the number of lossless buffers.
If there is only one lossless buffer, then set its lossless pool
usage threshold to be infinite. Otherwise, if there is more than
one lossless buffer, set a usage threshold for each lossless buffer.

While at it, add more verbosity to debug prints when handling user
commands, to assist in future debug.

3) From Tariq: Throttle high rate FW commands

4) From Shay: Properly initialize management PF

5) Various cleanup patches

+706 -83
+71 -47
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
··· 47 47 #define CREATE_TRACE_POINTS 48 48 #include "diag/cmd_tracepoint.h" 49 49 50 + struct mlx5_ifc_mbox_out_bits { 51 + u8 status[0x8]; 52 + u8 reserved_at_8[0x18]; 53 + 54 + u8 syndrome[0x20]; 55 + 56 + u8 reserved_at_40[0x40]; 57 + }; 58 + 59 + struct mlx5_ifc_mbox_in_bits { 60 + u8 opcode[0x10]; 61 + u8 uid[0x10]; 62 + 63 + u8 reserved_at_20[0x10]; 64 + u8 op_mod[0x10]; 65 + 66 + u8 reserved_at_40[0x40]; 67 + }; 68 + 50 69 enum { 51 70 CMD_IF_REV = 5, 52 71 }; ··· 89 70 MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR = 0x10, 90 71 }; 91 72 73 + static u16 in_to_opcode(void *in) 74 + { 75 + return MLX5_GET(mbox_in, in, opcode); 76 + } 77 + 78 + /* Returns true for opcodes that might be triggered very frequently and throttle 79 + * the command interface. Limit their command slots usage. 80 + */ 81 + static bool mlx5_cmd_is_throttle_opcode(u16 op) 82 + { 83 + switch (op) { 84 + case MLX5_CMD_OP_CREATE_GENERAL_OBJECT: 85 + case MLX5_CMD_OP_DESTROY_GENERAL_OBJECT: 86 + case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT: 87 + case MLX5_CMD_OP_QUERY_GENERAL_OBJECT: 88 + return true; 89 + } 90 + return false; 91 + } 92 + 92 93 static struct mlx5_cmd_work_ent * 93 94 cmd_alloc_ent(struct mlx5_cmd *cmd, struct mlx5_cmd_msg *in, 94 95 struct mlx5_cmd_msg *out, void *uout, int uout_size, ··· 130 91 ent->context = context; 131 92 ent->cmd = cmd; 132 93 ent->page_queue = page_queue; 94 + ent->op = in_to_opcode(in->first.data); 133 95 refcount_set(&ent->refcnt, 1); 134 96 135 97 return ent; ··· 792 752 } 793 753 } 794 754 795 - struct mlx5_ifc_mbox_out_bits { 796 - u8 status[0x8]; 797 - u8 reserved_at_8[0x18]; 798 - 799 - u8 syndrome[0x20]; 800 - 801 - u8 reserved_at_40[0x40]; 802 - }; 803 - 804 - struct mlx5_ifc_mbox_in_bits { 805 - u8 opcode[0x10]; 806 - u8 uid[0x10]; 807 - 808 - u8 reserved_at_20[0x10]; 809 - u8 op_mod[0x10]; 810 - 811 - u8 reserved_at_40[0x40]; 812 - }; 813 - 814 755 void mlx5_cmd_out_err(struct mlx5_core_dev *dev, u16 opcode, u16 op_mod, void *out) 815 756 { 816 757 u32 syndrome = MLX5_GET(mbox_out, out, syndrome); ··· 809 788 u16 opcode, op_mod; 810 789 u16 uid; 811 790 812 - opcode = MLX5_GET(mbox_in, in, opcode); 791 + opcode = in_to_opcode(in); 813 792 op_mod = MLX5_GET(mbox_in, in, op_mod); 814 793 uid = MLX5_GET(mbox_in, in, uid); 815 794 ··· 821 800 { 822 801 /* aborted due to PCI error or via reset flow mlx5_cmd_trigger_completions() */ 823 802 if (err == -ENXIO) { 824 - u16 opcode = MLX5_GET(mbox_in, in, opcode); 803 + u16 opcode = in_to_opcode(in); 825 804 u32 syndrome; 826 805 u8 status; 827 806 ··· 850 829 struct mlx5_cmd_work_ent *ent, int input) 851 830 { 852 831 struct mlx5_cmd_msg *msg = input ? ent->in : ent->out; 853 - u16 op = MLX5_GET(mbox_in, ent->lay->in, opcode); 854 832 struct mlx5_cmd_mailbox *next = msg->next; 855 833 int n = mlx5_calc_cmd_blocks(msg); 834 + u16 op = ent->op; 856 835 int data_only; 857 836 u32 offset = 0; 858 837 int dump_len; ··· 904 883 mlx5_core_dbg(dev, "cmd[%d]: end dump\n", ent->idx); 905 884 } 906 885 907 - static u16 msg_to_opcode(struct mlx5_cmd_msg *in) 908 - { 909 - return MLX5_GET(mbox_in, in->first.data, opcode); 910 - } 911 - 912 886 static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced); 913 887 914 888 static void cb_timeout_handler(struct work_struct *work) ··· 921 905 /* Maybe got handled by eq recover ? */ 922 906 if (!test_bit(MLX5_CMD_ENT_STATE_PENDING_COMP, &ent->state)) { 923 907 mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) Async, recovered after timeout\n", ent->idx, 924 - mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in)); 908 + mlx5_command_str(ent->op), ent->op); 925 909 goto out; /* phew, already handled */ 926 910 } 927 911 928 912 ent->ret = -ETIMEDOUT; 929 913 mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) Async, timeout. Will cause a leak of a command resource\n", 930 - ent->idx, mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in)); 914 + ent->idx, mlx5_command_str(ent->op), ent->op); 931 915 mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true); 932 916 933 917 out: ··· 1001 985 ent->lay = lay; 1002 986 memset(lay, 0, sizeof(*lay)); 1003 987 memcpy(lay->in, ent->in->first.data, sizeof(lay->in)); 1004 - ent->op = be32_to_cpu(lay->in[0]) >> 16; 1005 988 if (ent->in->next) 1006 989 lay->in_ptr = cpu_to_be64(ent->in->next->dma); 1007 990 lay->inlen = cpu_to_be32(ent->in->len); ··· 1113 1098 */ 1114 1099 if (wait_for_completion_timeout(&ent->done, timeout)) { 1115 1100 mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) recovered after timeout\n", ent->idx, 1116 - mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in)); 1101 + mlx5_command_str(ent->op), ent->op); 1117 1102 return; 1118 1103 } 1119 1104 1120 1105 mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) No done completion\n", ent->idx, 1121 - mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in)); 1106 + mlx5_command_str(ent->op), ent->op); 1122 1107 1123 1108 ent->ret = -ETIMEDOUT; 1124 1109 mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true); ··· 1145 1130 1146 1131 if (err == -ETIMEDOUT) { 1147 1132 mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n", 1148 - mlx5_command_str(msg_to_opcode(ent->in)), 1149 - msg_to_opcode(ent->in)); 1133 + mlx5_command_str(ent->op), ent->op); 1150 1134 } else if (err == -ECANCELED) { 1151 1135 mlx5_core_warn(dev, "%s(0x%x) canceled on out of queue timeout.\n", 1152 - mlx5_command_str(msg_to_opcode(ent->in)), 1153 - msg_to_opcode(ent->in)); 1136 + mlx5_command_str(ent->op), ent->op); 1154 1137 } 1155 1138 mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n", 1156 1139 err, deliv_status_to_str(ent->status), ent->status); ··· 1182 1169 u8 status = 0; 1183 1170 int err = 0; 1184 1171 s64 ds; 1185 - u16 op; 1186 1172 1187 1173 if (callback && page_queue) 1188 1174 return -EINVAL; ··· 1221 1209 goto out_free; 1222 1210 1223 1211 ds = ent->ts2 - ent->ts1; 1224 - op = MLX5_GET(mbox_in, in->first.data, opcode); 1225 - if (op < MLX5_CMD_OP_MAX) { 1226 - stats = &cmd->stats[op]; 1212 + if (ent->op < MLX5_CMD_OP_MAX) { 1213 + stats = &cmd->stats[ent->op]; 1227 1214 spin_lock_irq(&stats->lock); 1228 1215 stats->sum += ds; 1229 1216 ++stats->n; ··· 1230 1219 } 1231 1220 mlx5_core_dbg_mask(dev, 1 << MLX5_CMD_TIME, 1232 1221 "fw exec time for %s is %lld nsec\n", 1233 - mlx5_command_str(op), ds); 1222 + mlx5_command_str(ent->op), ds); 1234 1223 1235 1224 out_free: 1236 1225 status = ent->status; ··· 1827 1816 1828 1817 static int is_manage_pages(void *in) 1829 1818 { 1830 - return MLX5_GET(mbox_in, in, opcode) == MLX5_CMD_OP_MANAGE_PAGES; 1819 + return in_to_opcode(in) == MLX5_CMD_OP_MANAGE_PAGES; 1831 1820 } 1832 1821 1833 1822 /* Notes: ··· 1838 1827 int out_size, mlx5_cmd_cbk_t callback, void *context, 1839 1828 bool force_polling) 1840 1829 { 1841 - u16 opcode = MLX5_GET(mbox_in, in, opcode); 1842 1830 struct mlx5_cmd_msg *inb, *outb; 1831 + u16 opcode = in_to_opcode(in); 1832 + bool throttle_op; 1843 1833 int pages_queue; 1844 1834 gfp_t gfp; 1845 1835 u8 token; ··· 1849 1837 if (mlx5_cmd_is_down(dev) || !opcode_allowed(&dev->cmd, opcode)) 1850 1838 return -ENXIO; 1851 1839 1840 + throttle_op = mlx5_cmd_is_throttle_opcode(opcode); 1841 + if (throttle_op) { 1842 + /* atomic context may not sleep */ 1843 + if (callback) 1844 + return -EINVAL; 1845 + down(&dev->cmd.throttle_sem); 1846 + } 1847 + 1852 1848 pages_queue = is_manage_pages(in); 1853 1849 gfp = callback ? GFP_ATOMIC : GFP_KERNEL; 1854 1850 1855 1851 inb = alloc_msg(dev, in_size, gfp); 1856 1852 if (IS_ERR(inb)) { 1857 1853 err = PTR_ERR(inb); 1858 - return err; 1854 + goto out_up; 1859 1855 } 1860 1856 1861 1857 token = alloc_token(&dev->cmd); ··· 1897 1877 mlx5_free_cmd_msg(dev, outb); 1898 1878 out_in: 1899 1879 free_msg(dev, inb); 1880 + out_up: 1881 + if (throttle_op) 1882 + up(&dev->cmd.throttle_sem); 1900 1883 return err; 1901 1884 } 1902 1885 ··· 1973 1950 int mlx5_cmd_do(struct mlx5_core_dev *dev, void *in, int in_size, void *out, int out_size) 1974 1951 { 1975 1952 int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, false); 1976 - u16 opcode = MLX5_GET(mbox_in, in, opcode); 1977 1953 u16 op_mod = MLX5_GET(mbox_in, in, op_mod); 1954 + u16 opcode = in_to_opcode(in); 1978 1955 1979 1956 return cmd_status_err(dev, err, opcode, op_mod, out); 1980 1957 } ··· 2019 1996 void *out, int out_size) 2020 1997 { 2021 1998 int err = cmd_exec(dev, in, in_size, out, out_size, NULL, NULL, true); 2022 - u16 opcode = MLX5_GET(mbox_in, in, opcode); 2023 1999 u16 op_mod = MLX5_GET(mbox_in, in, op_mod); 2000 + u16 opcode = in_to_opcode(in); 2024 2001 2025 2002 err = cmd_status_err(dev, err, opcode, op_mod, out); 2026 2003 return mlx5_cmd_check(dev, err, in, out); ··· 2072 2049 2073 2050 work->ctx = ctx; 2074 2051 work->user_callback = callback; 2075 - work->opcode = MLX5_GET(mbox_in, in, opcode); 2052 + work->opcode = in_to_opcode(in); 2076 2053 work->op_mod = MLX5_GET(mbox_in, in, op_mod); 2077 2054 work->out = out; 2078 2055 if (WARN_ON(!atomic_inc_not_zero(&ctx->num_inflight))) ··· 2249 2226 2250 2227 sema_init(&cmd->sem, cmd->max_reg_cmds); 2251 2228 sema_init(&cmd->pages_sem, 1); 2229 + sema_init(&cmd->throttle_sem, DIV_ROUND_UP(cmd->max_reg_cmds, 2)); 2252 2230 2253 2231 cmd_h = (u32)((u64)(cmd->dma) >> 32); 2254 2232 cmd_l = (u32)(cmd->dma);
+6
drivers/net/ethernet/mellanox/mlx5/core/dev.c
··· 59 59 if (!IS_ENABLED(CONFIG_MLX5_CORE_EN)) 60 60 return false; 61 61 62 + if (mlx5_core_is_management_pf(dev)) 63 + return false; 64 + 62 65 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH) 63 66 return false; 64 67 ··· 199 196 bool mlx5_rdma_supported(struct mlx5_core_dev *dev) 200 197 { 201 198 if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND)) 199 + return false; 200 + 201 + if (mlx5_core_is_management_pf(dev)) 202 202 return false; 203 203 204 204 if (dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_IB_ADEV)
+8
drivers/net/ethernet/mellanox/mlx5/core/ecpf.c
··· 75 75 if (!mlx5_core_is_ecpf(dev)) 76 76 return 0; 77 77 78 + /* Management PF don't have a peer PF */ 79 + if (mlx5_core_is_management_pf(dev)) 80 + return 0; 81 + 78 82 return mlx5_host_pf_init(dev); 79 83 } 80 84 ··· 87 83 int err; 88 84 89 85 if (!mlx5_core_is_ecpf(dev)) 86 + return; 87 + 88 + /* Management PF don't have a peer PF */ 89 + if (mlx5_core_is_management_pf(dev)) 90 90 return; 91 91 92 92 mlx5_host_pf_cleanup(dev);
+2 -1
drivers/net/ethernet/mellanox/mlx5/core/en.h
··· 247 247 }; 248 248 249 249 struct mlx5e_rx_wqe_cyc { 250 - struct mlx5_wqe_data_seg data[0]; 250 + DECLARE_FLEX_ARRAY(struct mlx5_wqe_data_seg, data); 251 251 }; 252 252 253 253 struct mlx5e_umr_wqe { ··· 968 968 struct mlx5e_scratchpad scratchpad; 969 969 struct mlx5e_htb *htb; 970 970 struct mlx5e_mqprio_rl *mqprio_rl; 971 + struct dentry *dfs_root; 971 972 }; 972 973 973 974 struct mlx5e_rx_handlers {
+4 -1
drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
··· 145 145 146 146 struct mlx5e_flow_steering *mlx5e_fs_init(const struct mlx5e_profile *profile, 147 147 struct mlx5_core_dev *mdev, 148 - bool state_destroy); 148 + bool state_destroy, 149 + struct dentry *dfs_root); 149 150 void mlx5e_fs_cleanup(struct mlx5e_flow_steering *fs); 150 151 struct mlx5e_vlan_table *mlx5e_fs_get_vlan(struct mlx5e_flow_steering *fs); 151 152 void mlx5e_fs_set_tc(struct mlx5e_flow_steering *fs, struct mlx5e_tc_table *tc); ··· 189 188 struct net_device *netdev, 190 189 __be16 proto, u16 vid); 191 190 void mlx5e_fs_init_l2_addr(struct mlx5e_flow_steering *fs, struct net_device *netdev); 191 + 192 + struct dentry *mlx5e_fs_get_debugfs_root(struct mlx5e_flow_steering *fs); 192 193 193 194 #define fs_err(fs, fmt, ...) \ 194 195 mlx5_core_err(mlx5e_fs_get_mdev(fs), fmt, ##__VA_ARGS__)
+72
drivers/net/ethernet/mellanox/mlx5/core/en/port.c
··· 287 287 return err; 288 288 } 289 289 290 + int mlx5e_port_query_sbpr(struct mlx5_core_dev *mdev, u32 desc, u8 dir, 291 + u8 pool_idx, void *out, int size_out) 292 + { 293 + u32 in[MLX5_ST_SZ_DW(sbpr_reg)] = {}; 294 + 295 + MLX5_SET(sbpr_reg, in, desc, desc); 296 + MLX5_SET(sbpr_reg, in, dir, dir); 297 + MLX5_SET(sbpr_reg, in, pool, pool_idx); 298 + 299 + return mlx5_core_access_reg(mdev, in, sizeof(in), out, size_out, MLX5_REG_SBPR, 0, 0); 300 + } 301 + 302 + int mlx5e_port_set_sbpr(struct mlx5_core_dev *mdev, u32 desc, u8 dir, 303 + u8 pool_idx, u32 infi_size, u32 size) 304 + { 305 + u32 out[MLX5_ST_SZ_DW(sbpr_reg)] = {}; 306 + u32 in[MLX5_ST_SZ_DW(sbpr_reg)] = {}; 307 + 308 + MLX5_SET(sbpr_reg, in, desc, desc); 309 + MLX5_SET(sbpr_reg, in, dir, dir); 310 + MLX5_SET(sbpr_reg, in, pool, pool_idx); 311 + MLX5_SET(sbpr_reg, in, infi_size, infi_size); 312 + MLX5_SET(sbpr_reg, in, size, size); 313 + MLX5_SET(sbpr_reg, in, mode, 1); 314 + 315 + return mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out), MLX5_REG_SBPR, 0, 1); 316 + } 317 + 318 + static int mlx5e_port_query_sbcm(struct mlx5_core_dev *mdev, u32 desc, 319 + u8 pg_buff_idx, u8 dir, void *out, 320 + int size_out) 321 + { 322 + u32 in[MLX5_ST_SZ_DW(sbcm_reg)] = {}; 323 + 324 + MLX5_SET(sbcm_reg, in, desc, desc); 325 + MLX5_SET(sbcm_reg, in, local_port, 1); 326 + MLX5_SET(sbcm_reg, in, pg_buff, pg_buff_idx); 327 + MLX5_SET(sbcm_reg, in, dir, dir); 328 + 329 + return mlx5_core_access_reg(mdev, in, sizeof(in), out, size_out, MLX5_REG_SBCM, 0, 0); 330 + } 331 + 332 + int mlx5e_port_set_sbcm(struct mlx5_core_dev *mdev, u32 desc, u8 pg_buff_idx, 333 + u8 dir, u8 infi_size, u32 max_buff, u8 pool_idx) 334 + { 335 + u32 out[MLX5_ST_SZ_DW(sbcm_reg)] = {}; 336 + u32 in[MLX5_ST_SZ_DW(sbcm_reg)] = {}; 337 + u32 min_buff; 338 + int err; 339 + u8 exc; 340 + 341 + err = mlx5e_port_query_sbcm(mdev, desc, pg_buff_idx, dir, out, 342 + sizeof(out)); 343 + if (err) 344 + return err; 345 + 346 + exc = MLX5_GET(sbcm_reg, out, exc); 347 + min_buff = MLX5_GET(sbcm_reg, out, min_buff); 348 + 349 + MLX5_SET(sbcm_reg, in, desc, desc); 350 + MLX5_SET(sbcm_reg, in, local_port, 1); 351 + MLX5_SET(sbcm_reg, in, pg_buff, pg_buff_idx); 352 + MLX5_SET(sbcm_reg, in, dir, dir); 353 + MLX5_SET(sbcm_reg, in, exc, exc); 354 + MLX5_SET(sbcm_reg, in, min_buff, min_buff); 355 + MLX5_SET(sbcm_reg, in, infi_max, infi_size); 356 + MLX5_SET(sbcm_reg, in, max_buff, max_buff); 357 + MLX5_SET(sbcm_reg, in, pool, pool_idx); 358 + 359 + return mlx5_core_access_reg(mdev, in, sizeof(in), out, sizeof(out), MLX5_REG_SBCM, 0, 1); 360 + } 361 + 290 362 /* buffer[i]: buffer that priority i mapped to */ 291 363 int mlx5e_port_query_priority2buffer(struct mlx5_core_dev *mdev, u8 *buffer) 292 364 {
+6
drivers/net/ethernet/mellanox/mlx5/core/en/port.h
··· 57 57 bool mlx5e_ptys_ext_supported(struct mlx5_core_dev *mdev); 58 58 int mlx5e_port_query_pbmc(struct mlx5_core_dev *mdev, void *out); 59 59 int mlx5e_port_set_pbmc(struct mlx5_core_dev *mdev, void *in); 60 + int mlx5e_port_query_sbpr(struct mlx5_core_dev *mdev, u32 desc, u8 dir, 61 + u8 pool_idx, void *out, int size_out); 62 + int mlx5e_port_set_sbpr(struct mlx5_core_dev *mdev, u32 desc, u8 dir, 63 + u8 pool_idx, u32 infi_size, u32 size); 64 + int mlx5e_port_set_sbcm(struct mlx5_core_dev *mdev, u32 desc, u8 pg_buff_idx, 65 + u8 dir, u8 infi_size, u32 max_buff, u8 pool_idx); 60 66 int mlx5e_port_query_priority2buffer(struct mlx5_core_dev *mdev, u8 *buffer); 61 67 int mlx5e_port_set_priority2buffer(struct mlx5_core_dev *mdev, u8 *buffer); 62 68
+218 -4
drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
··· 73 73 port_buffer->buffer[i].lossy); 74 74 } 75 75 76 + port_buffer->headroom_size = total_used; 76 77 port_buffer->port_buffer_size = 77 78 MLX5_GET(pbmc_reg, out, port_buffer_size) * port_buff_cell_sz; 78 79 port_buffer->spare_buffer_size = ··· 87 86 return err; 88 87 } 89 88 89 + struct mlx5e_buffer_pool { 90 + u32 infi_size; 91 + u32 size; 92 + u32 buff_occupancy; 93 + }; 94 + 95 + static int mlx5e_port_query_pool(struct mlx5_core_dev *mdev, 96 + struct mlx5e_buffer_pool *buffer_pool, 97 + u32 desc, u8 dir, u8 pool_idx) 98 + { 99 + u32 out[MLX5_ST_SZ_DW(sbpr_reg)] = {}; 100 + int err; 101 + 102 + err = mlx5e_port_query_sbpr(mdev, desc, dir, pool_idx, out, 103 + sizeof(out)); 104 + if (err) 105 + return err; 106 + 107 + buffer_pool->size = MLX5_GET(sbpr_reg, out, size); 108 + buffer_pool->infi_size = MLX5_GET(sbpr_reg, out, infi_size); 109 + buffer_pool->buff_occupancy = MLX5_GET(sbpr_reg, out, buff_occupancy); 110 + 111 + return err; 112 + } 113 + 114 + enum { 115 + MLX5_INGRESS_DIR = 0, 116 + MLX5_EGRESS_DIR = 1, 117 + }; 118 + 119 + enum { 120 + MLX5_LOSSY_POOL = 0, 121 + MLX5_LOSSLESS_POOL = 1, 122 + }; 123 + 124 + /* No limit on usage of shared buffer pool (max_buff=0) */ 125 + #define MLX5_SB_POOL_NO_THRESHOLD 0 126 + /* Shared buffer pool usage threshold when calculated 127 + * dynamically in alpha units. alpha=13 is equivalent to 128 + * HW_alpha of [(1/128) * 2 ^ (alpha-1)] = 32, where HW_alpha 129 + * equates to the following portion of the shared buffer pool: 130 + * [32 / (1 + n * 32)] While *n* is the number of buffers 131 + * that are using the shared buffer pool. 132 + */ 133 + #define MLX5_SB_POOL_THRESHOLD 13 134 + 135 + /* Shared buffer class management parameters */ 136 + struct mlx5_sbcm_params { 137 + u8 pool_idx; 138 + u8 max_buff; 139 + u8 infi_size; 140 + }; 141 + 142 + static const struct mlx5_sbcm_params sbcm_default = { 143 + .pool_idx = MLX5_LOSSY_POOL, 144 + .max_buff = MLX5_SB_POOL_NO_THRESHOLD, 145 + .infi_size = 0, 146 + }; 147 + 148 + static const struct mlx5_sbcm_params sbcm_lossy = { 149 + .pool_idx = MLX5_LOSSY_POOL, 150 + .max_buff = MLX5_SB_POOL_NO_THRESHOLD, 151 + .infi_size = 1, 152 + }; 153 + 154 + static const struct mlx5_sbcm_params sbcm_lossless = { 155 + .pool_idx = MLX5_LOSSLESS_POOL, 156 + .max_buff = MLX5_SB_POOL_THRESHOLD, 157 + .infi_size = 0, 158 + }; 159 + 160 + static const struct mlx5_sbcm_params sbcm_lossless_no_threshold = { 161 + .pool_idx = MLX5_LOSSLESS_POOL, 162 + .max_buff = MLX5_SB_POOL_NO_THRESHOLD, 163 + .infi_size = 1, 164 + }; 165 + 166 + /** 167 + * select_sbcm_params() - selects the shared buffer pool configuration 168 + * 169 + * @buffer: <input> port buffer to retrieve params of 170 + * @lossless_buff_count: <input> number of lossless buffers in total 171 + * 172 + * The selection is based on the following rules: 173 + * 1. If buffer size is 0, no shared buffer pool is used. 174 + * 2. If buffer is lossy, use lossy shared buffer pool. 175 + * 3. If there are more than 1 lossless buffers, use lossless shared buffer pool 176 + * with threshold. 177 + * 4. If there is only 1 lossless buffer, use lossless shared buffer pool 178 + * without threshold. 179 + * 180 + * @return const struct mlx5_sbcm_params* selected values 181 + */ 182 + static const struct mlx5_sbcm_params * 183 + select_sbcm_params(struct mlx5e_bufferx_reg *buffer, u8 lossless_buff_count) 184 + { 185 + if (buffer->size == 0) 186 + return &sbcm_default; 187 + 188 + if (buffer->lossy) 189 + return &sbcm_lossy; 190 + 191 + if (lossless_buff_count > 1) 192 + return &sbcm_lossless; 193 + 194 + return &sbcm_lossless_no_threshold; 195 + } 196 + 197 + static int port_update_pool_cfg(struct mlx5_core_dev *mdev, 198 + struct mlx5e_port_buffer *port_buffer) 199 + { 200 + const struct mlx5_sbcm_params *p; 201 + u8 lossless_buff_count = 0; 202 + int err; 203 + int i; 204 + 205 + if (!MLX5_CAP_GEN(mdev, sbcam_reg)) 206 + return 0; 207 + 208 + for (i = 0; i < MLX5E_MAX_BUFFER; i++) 209 + lossless_buff_count += ((port_buffer->buffer[i].size) && 210 + (!(port_buffer->buffer[i].lossy))); 211 + 212 + for (i = 0; i < MLX5E_MAX_BUFFER; i++) { 213 + p = select_sbcm_params(&port_buffer->buffer[i], lossless_buff_count); 214 + err = mlx5e_port_set_sbcm(mdev, 0, i, 215 + MLX5_INGRESS_DIR, 216 + p->infi_size, 217 + p->max_buff, 218 + p->pool_idx); 219 + if (err) 220 + return err; 221 + } 222 + 223 + return 0; 224 + } 225 + 226 + static int port_update_shared_buffer(struct mlx5_core_dev *mdev, 227 + u32 current_headroom_size, 228 + u32 new_headroom_size) 229 + { 230 + struct mlx5e_buffer_pool lossless_ipool; 231 + struct mlx5e_buffer_pool lossy_epool; 232 + u32 lossless_ipool_size; 233 + u32 shared_buffer_size; 234 + u32 total_buffer_size; 235 + u32 lossy_epool_size; 236 + int err; 237 + 238 + if (!MLX5_CAP_GEN(mdev, sbcam_reg)) 239 + return 0; 240 + 241 + err = mlx5e_port_query_pool(mdev, &lossy_epool, 0, MLX5_EGRESS_DIR, 242 + MLX5_LOSSY_POOL); 243 + if (err) 244 + return err; 245 + 246 + err = mlx5e_port_query_pool(mdev, &lossless_ipool, 0, MLX5_INGRESS_DIR, 247 + MLX5_LOSSLESS_POOL); 248 + if (err) 249 + return err; 250 + 251 + total_buffer_size = current_headroom_size + lossy_epool.size + 252 + lossless_ipool.size; 253 + shared_buffer_size = total_buffer_size - new_headroom_size; 254 + 255 + if (shared_buffer_size < 4) { 256 + pr_err("Requested port buffer is too large, not enough space left for shared buffer\n"); 257 + return -EINVAL; 258 + } 259 + 260 + /* Total shared buffer size is split in a ratio of 3:1 between 261 + * lossy and lossless pools respectively. 262 + */ 263 + lossy_epool_size = (shared_buffer_size / 4) * 3; 264 + lossless_ipool_size = shared_buffer_size / 4; 265 + 266 + mlx5e_port_set_sbpr(mdev, 0, MLX5_EGRESS_DIR, MLX5_LOSSY_POOL, 0, 267 + lossy_epool_size); 268 + mlx5e_port_set_sbpr(mdev, 0, MLX5_INGRESS_DIR, MLX5_LOSSLESS_POOL, 0, 269 + lossless_ipool_size); 270 + return 0; 271 + } 272 + 90 273 static int port_set_buffer(struct mlx5e_priv *priv, 91 274 struct mlx5e_port_buffer *port_buffer) 92 275 { 93 276 u16 port_buff_cell_sz = priv->dcbx.port_buff_cell_sz; 94 277 struct mlx5_core_dev *mdev = priv->mdev; 95 278 int sz = MLX5_ST_SZ_BYTES(pbmc_reg); 279 + u32 new_headroom_size = 0; 280 + u32 current_headroom_size; 96 281 void *in; 97 282 int err; 98 283 int i; 284 + 285 + current_headroom_size = port_buffer->headroom_size; 99 286 100 287 in = kzalloc(sz, GFP_KERNEL); 101 288 if (!in) ··· 299 110 u64 xoff = port_buffer->buffer[i].xoff; 300 111 u64 xon = port_buffer->buffer[i].xon; 301 112 113 + new_headroom_size += size; 302 114 do_div(size, port_buff_cell_sz); 303 115 do_div(xoff, port_buff_cell_sz); 304 116 do_div(xon, port_buff_cell_sz); ··· 308 118 MLX5_SET(bufferx_reg, buffer, xoff_threshold, xoff); 309 119 MLX5_SET(bufferx_reg, buffer, xon_threshold, xon); 310 120 } 121 + 122 + new_headroom_size /= port_buff_cell_sz; 123 + current_headroom_size /= port_buff_cell_sz; 124 + err = port_update_shared_buffer(priv->mdev, current_headroom_size, 125 + new_headroom_size); 126 + if (err) 127 + return err; 128 + 129 + err = port_update_pool_cfg(priv->mdev, port_buffer); 130 + if (err) 131 + return err; 311 132 312 133 err = mlx5e_port_set_pbmc(mdev, in); 313 134 out: ··· 375 174 376 175 /** 377 176 * update_buffer_lossy - Update buffer configuration based on pfc 177 + * @mdev: port function core device 378 178 * @max_mtu: netdev's max_mtu 379 179 * @pfc_en: <input> current pfc configuration 380 180 * @buffer: <input> current prio to buffer mapping ··· 394 192 * @return: 0 if no error, 395 193 * sets change to true if buffer configuration was modified. 396 194 */ 397 - static int update_buffer_lossy(unsigned int max_mtu, 195 + static int update_buffer_lossy(struct mlx5_core_dev *mdev, 196 + unsigned int max_mtu, 398 197 u8 pfc_en, u8 *buffer, u32 xoff, u16 port_buff_cell_sz, 399 198 struct mlx5e_port_buffer *port_buffer, 400 199 bool *change) ··· 432 229 } 433 230 434 231 if (changed) { 232 + err = port_update_pool_cfg(mdev, port_buffer); 233 + if (err) 234 + return err; 235 + 435 236 err = update_xoff_threshold(port_buffer, xoff, max_mtu, port_buff_cell_sz); 436 237 if (err) 437 238 return err; ··· 500 293 } 501 294 502 295 if (change & MLX5E_PORT_BUFFER_PFC) { 296 + mlx5e_dbg(HW, priv, "%s: requested PFC per priority bitmask: 0x%x\n", 297 + __func__, pfc->pfc_en); 503 298 err = mlx5e_port_query_priority2buffer(priv->mdev, buffer); 504 299 if (err) 505 300 return err; 506 301 507 - err = update_buffer_lossy(max_mtu, pfc->pfc_en, buffer, xoff, port_buff_cell_sz, 508 - &port_buffer, &update_buffer); 302 + err = update_buffer_lossy(priv->mdev, max_mtu, pfc->pfc_en, buffer, xoff, 303 + port_buff_cell_sz, &port_buffer, 304 + &update_buffer); 509 305 if (err) 510 306 return err; 511 307 } 512 308 513 309 if (change & MLX5E_PORT_BUFFER_PRIO2BUFFER) { 514 310 update_prio2buffer = true; 311 + for (i = 0; i < MLX5E_MAX_BUFFER; i++) 312 + mlx5e_dbg(HW, priv, "%s: requested to map prio[%d] to buffer %d\n", 313 + __func__, i, prio2buffer[i]); 314 + 515 315 err = fill_pfc_en(priv->mdev, &curr_pfc_en); 516 316 if (err) 517 317 return err; 518 318 519 - err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer, xoff, 319 + err = update_buffer_lossy(priv->mdev, max_mtu, curr_pfc_en, prio2buffer, xoff, 520 320 port_buff_cell_sz, &port_buffer, &update_buffer); 521 321 if (err) 522 322 return err;
+1
drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.h
··· 60 60 struct mlx5e_port_buffer { 61 61 u32 port_buffer_size; 62 62 u32 spare_buffer_size; 63 + u32 headroom_size; 63 64 struct mlx5e_bufferx_reg buffer[MLX5E_MAX_BUFFER]; 64 65 }; 65 66
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c
··· 28 28 int base_id; 29 29 int total_meters; 30 30 31 - unsigned long meters_map[0]; /* must be at the end of this struct */ 31 + unsigned long meters_map[]; /* must be at the end of this struct */ 32 32 }; 33 33 34 34 struct mlx5e_flow_meters {
+3 -3
drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c
··· 365 365 for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) 366 366 accel_fs_tcp_destroy_table(fs, i); 367 367 368 - kvfree(accel_tcp); 368 + kfree(accel_tcp); 369 369 mlx5e_fs_set_accel_tcp(fs, NULL); 370 370 } 371 371 ··· 377 377 if (!MLX5_CAP_FLOWTABLE_NIC_RX(mlx5e_fs_get_mdev(fs), ft_field_support.outer_ip_version)) 378 378 return -EOPNOTSUPP; 379 379 380 - accel_tcp = kvzalloc(sizeof(*accel_tcp), GFP_KERNEL); 380 + accel_tcp = kzalloc(sizeof(*accel_tcp), GFP_KERNEL); 381 381 if (!accel_tcp) 382 382 return -ENOMEM; 383 383 mlx5e_fs_set_accel_tcp(fs, accel_tcp); ··· 397 397 err_destroy_tables: 398 398 while (--i >= 0) 399 399 accel_fs_tcp_destroy_table(fs, i); 400 - kvfree(accel_tcp); 400 + kfree(accel_tcp); 401 401 mlx5e_fs_set_accel_tcp(fs, NULL); 402 402 return err; 403 403 }
+22
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 2 // Copyright (c) 2019 Mellanox Technologies. 3 3 4 + #include <linux/debugfs.h> 4 5 #include "en.h" 5 6 #include "lib/mlx5.h" 6 7 #include "en_accel/ktls.h" ··· 178 177 destroy_workqueue(priv->tls->rx_wq); 179 178 } 180 179 180 + static void mlx5e_tls_debugfs_init(struct mlx5e_tls *tls, 181 + struct dentry *dfs_root) 182 + { 183 + if (IS_ERR_OR_NULL(dfs_root)) 184 + return; 185 + 186 + tls->debugfs.dfs = debugfs_create_dir("tls", dfs_root); 187 + } 188 + 181 189 int mlx5e_ktls_init(struct mlx5e_priv *priv) 182 190 { 183 191 struct mlx5e_tls *tls; ··· 199 189 return -ENOMEM; 200 190 201 191 priv->tls = tls; 192 + priv->tls->mdev = priv->mdev; 193 + 194 + mlx5e_tls_debugfs_init(tls, priv->dfs_root); 195 + 202 196 return 0; 203 197 } 204 198 205 199 void mlx5e_ktls_cleanup(struct mlx5e_priv *priv) 206 200 { 201 + struct mlx5e_tls *tls = priv->tls; 202 + 203 + if (!mlx5e_is_ktls_device(priv->mdev)) 204 + return; 205 + 206 + debugfs_remove_recursive(tls->debugfs.dfs); 207 + tls->debugfs.dfs = NULL; 208 + 207 209 kfree(priv->tls); 208 210 priv->tls = NULL; 209 211 }
+8
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls.h
··· 4 4 #ifndef __MLX5E_KTLS_H__ 5 5 #define __MLX5E_KTLS_H__ 6 6 7 + #include <linux/debugfs.h> 7 8 #include <linux/tls.h> 8 9 #include <net/tls.h> 9 10 #include "en.h" ··· 73 72 atomic64_t rx_tls_del; 74 73 }; 75 74 75 + struct mlx5e_tls_debugfs { 76 + struct dentry *dfs; 77 + struct dentry *dfs_tx; 78 + }; 79 + 76 80 struct mlx5e_tls { 81 + struct mlx5_core_dev *mdev; 77 82 struct mlx5e_tls_sw_stats sw_stats; 78 83 struct workqueue_struct *rx_wq; 79 84 struct mlx5e_tls_tx_pool *tx_pool; 85 + struct mlx5e_tls_debugfs debugfs; 80 86 }; 81 87 82 88 int mlx5e_ktls_init(struct mlx5e_priv *priv);
+22
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2 2 // Copyright (c) 2019 Mellanox Technologies. 3 3 4 + #include <linux/debugfs.h> 4 5 #include "en_accel/ktls.h" 5 6 #include "en_accel/ktls_txrx.h" 6 7 #include "en_accel/ktls_utils.h" ··· 887 886 return false; 888 887 } 889 888 889 + static void mlx5e_tls_tx_debugfs_init(struct mlx5e_tls *tls, 890 + struct dentry *dfs_root) 891 + { 892 + if (IS_ERR_OR_NULL(dfs_root)) 893 + return; 894 + 895 + tls->debugfs.dfs_tx = debugfs_create_dir("tx", dfs_root); 896 + if (!tls->debugfs.dfs_tx) 897 + return; 898 + 899 + debugfs_create_size_t("pool_size", 0400, tls->debugfs.dfs_tx, 900 + &tls->tx_pool->size); 901 + } 902 + 890 903 int mlx5e_ktls_init_tx(struct mlx5e_priv *priv) 891 904 { 905 + struct mlx5e_tls *tls = priv->tls; 906 + 892 907 if (!mlx5e_is_ktls_tx(priv->mdev)) 893 908 return 0; 894 909 895 910 priv->tls->tx_pool = mlx5e_tls_tx_pool_init(priv->mdev, &priv->tls->sw_stats); 896 911 if (!priv->tls->tx_pool) 897 912 return -ENOMEM; 913 + 914 + mlx5e_tls_tx_debugfs_init(tls, tls->debugfs.dfs); 898 915 899 916 return 0; 900 917 } ··· 921 902 { 922 903 if (!mlx5e_is_ktls_tx(priv->mdev)) 923 904 return; 905 + 906 + debugfs_remove_recursive(priv->tls->debugfs.dfs_tx); 907 + priv->tls->debugfs.dfs_tx = NULL; 924 908 925 909 mlx5e_tls_tx_pool_cleanup(priv->tls->tx_pool); 926 910 priv->tls->tx_pool = NULL;
+21 -1
drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
··· 30 30 * SOFTWARE. 31 31 */ 32 32 33 + #include <linux/debugfs.h> 33 34 #include <linux/list.h> 34 35 #include <linux/ip.h> 35 36 #include <linux/ipv6.h> ··· 68 67 struct mlx5e_fs_udp *udp; 69 68 struct mlx5e_fs_any *any; 70 69 struct mlx5e_ptp_fs *ptp_fs; 70 + struct dentry *dfs_root; 71 71 }; 72 72 73 73 static int mlx5e_add_l2_flow_rule(struct mlx5e_flow_steering *fs, ··· 104 102 static inline int mlx5e_hash_l2(const u8 *addr) 105 103 { 106 104 return addr[5]; 105 + } 106 + 107 + struct dentry *mlx5e_fs_get_debugfs_root(struct mlx5e_flow_steering *fs) 108 + { 109 + return fs->dfs_root; 107 110 } 108 111 109 112 static void mlx5e_add_l2_to_hash(struct hlist_head *hash, const u8 *addr) ··· 1436 1429 static void mlx5e_fs_ethtool_free(struct mlx5e_flow_steering *fs) { } 1437 1430 #endif 1438 1431 1432 + static void mlx5e_fs_debugfs_init(struct mlx5e_flow_steering *fs, 1433 + struct dentry *dfs_root) 1434 + { 1435 + if (IS_ERR_OR_NULL(dfs_root)) 1436 + return; 1437 + 1438 + fs->dfs_root = debugfs_create_dir("fs", dfs_root); 1439 + } 1440 + 1439 1441 struct mlx5e_flow_steering *mlx5e_fs_init(const struct mlx5e_profile *profile, 1440 1442 struct mlx5_core_dev *mdev, 1441 - bool state_destroy) 1443 + bool state_destroy, 1444 + struct dentry *dfs_root) 1442 1445 { 1443 1446 struct mlx5e_flow_steering *fs; 1444 1447 int err; ··· 1475 1458 if (err) 1476 1459 goto err_free_tc; 1477 1460 1461 + mlx5e_fs_debugfs_init(fs, dfs_root); 1462 + 1478 1463 return fs; 1479 1464 err_free_tc: 1480 1465 mlx5e_fs_tc_free(fs); ··· 1490 1471 1491 1472 void mlx5e_fs_cleanup(struct mlx5e_flow_steering *fs) 1492 1473 { 1474 + debugfs_remove_recursive(fs->dfs_root); 1493 1475 mlx5e_fs_ethtool_free(fs); 1494 1476 mlx5e_fs_tc_free(fs); 1495 1477 mlx5e_fs_vlan_free(fs);
+8 -1
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
··· 35 35 #include <net/vxlan.h> 36 36 #include <net/geneve.h> 37 37 #include <linux/bpf.h> 38 + #include <linux/debugfs.h> 38 39 #include <linux/if_bridge.h> 39 40 #include <linux/filter.h> 40 41 #include <net/page_pool.h> ··· 5231 5230 mlx5e_timestamp_init(priv); 5232 5231 5233 5232 fs = mlx5e_fs_init(priv->profile, mdev, 5234 - !test_bit(MLX5E_STATE_DESTROYING, &priv->state)); 5233 + !test_bit(MLX5E_STATE_DESTROYING, &priv->state), 5234 + priv->dfs_root); 5235 5235 if (!fs) { 5236 5236 err = -ENOMEM; 5237 5237 mlx5_core_err(mdev, "FS initialization failed, %d\n", err); ··· 5933 5931 priv->profile = profile; 5934 5932 priv->ppriv = NULL; 5935 5933 5934 + priv->dfs_root = debugfs_create_dir("nic", 5935 + mlx5_debugfs_get_dev_root(priv->mdev)); 5936 + 5936 5937 err = mlx5e_devlink_port_register(priv); 5937 5938 if (err) { 5938 5939 mlx5_core_err(mdev, "mlx5e_devlink_port_register failed, %d\n", err); ··· 5973 5968 err_devlink_cleanup: 5974 5969 mlx5e_devlink_port_unregister(priv); 5975 5970 err_destroy_netdev: 5971 + debugfs_remove_recursive(priv->dfs_root); 5976 5972 mlx5e_destroy_netdev(priv); 5977 5973 return err; 5978 5974 } ··· 5988 5982 mlx5e_suspend(adev, state); 5989 5983 priv->profile->cleanup(priv); 5990 5984 mlx5e_devlink_port_unregister(priv); 5985 + debugfs_remove_recursive(priv->dfs_root); 5991 5986 mlx5e_destroy_netdev(priv); 5992 5987 } 5993 5988
+6 -3
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
··· 788 788 { 789 789 struct mlx5e_priv *priv = netdev_priv(netdev); 790 790 791 - priv->fs = mlx5e_fs_init(priv->profile, mdev, 792 - !test_bit(MLX5E_STATE_DESTROYING, &priv->state)); 791 + priv->fs = 792 + mlx5e_fs_init(priv->profile, mdev, 793 + !test_bit(MLX5E_STATE_DESTROYING, &priv->state), 794 + priv->dfs_root); 793 795 if (!priv->fs) { 794 796 netdev_err(priv->netdev, "FS allocation failed\n"); 795 797 return -ENOMEM; ··· 809 807 struct mlx5e_priv *priv = netdev_priv(netdev); 810 808 811 809 priv->fs = mlx5e_fs_init(priv->profile, mdev, 812 - !test_bit(MLX5E_STATE_DESTROYING, &priv->state)); 810 + !test_bit(MLX5E_STATE_DESTROYING, &priv->state), 811 + priv->dfs_root); 813 812 if (!priv->fs) { 814 813 netdev_err(priv->netdev, "FS allocation failed\n"); 815 814 return -ENOMEM;
+154 -15
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
··· 71 71 #define MLX5E_TC_TABLE_NUM_GROUPS 4 72 72 #define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(18) 73 73 74 + struct mlx5e_hairpin_params { 75 + struct mlx5_core_dev *mdev; 76 + u32 num_queues; 77 + u32 queue_size; 78 + }; 79 + 74 80 struct mlx5e_tc_table { 75 81 /* Protects the dynamic assignment of the t parameter 76 82 * which is the nic tc root table. ··· 99 93 100 94 struct mlx5_tc_ct_priv *ct; 101 95 struct mapping_ctx *mapping; 96 + struct mlx5e_hairpin_params hairpin_params; 97 + struct dentry *dfs_root; 102 98 }; 103 99 104 100 struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = { ··· 1024 1016 return 0; 1025 1017 } 1026 1018 1019 + static int debugfs_hairpin_queues_set(void *data, u64 val) 1020 + { 1021 + struct mlx5e_hairpin_params *hp = data; 1022 + 1023 + if (!val) { 1024 + mlx5_core_err(hp->mdev, 1025 + "Number of hairpin queues must be > 0\n"); 1026 + return -EINVAL; 1027 + } 1028 + 1029 + hp->num_queues = val; 1030 + 1031 + return 0; 1032 + } 1033 + 1034 + static int debugfs_hairpin_queues_get(void *data, u64 *val) 1035 + { 1036 + struct mlx5e_hairpin_params *hp = data; 1037 + 1038 + *val = hp->num_queues; 1039 + 1040 + return 0; 1041 + } 1042 + DEFINE_DEBUGFS_ATTRIBUTE(fops_hairpin_queues, debugfs_hairpin_queues_get, 1043 + debugfs_hairpin_queues_set, "%llu\n"); 1044 + 1045 + static int debugfs_hairpin_queue_size_set(void *data, u64 val) 1046 + { 1047 + struct mlx5e_hairpin_params *hp = data; 1048 + 1049 + if (val > BIT(MLX5_CAP_GEN(hp->mdev, log_max_hairpin_num_packets))) { 1050 + mlx5_core_err(hp->mdev, 1051 + "Invalid hairpin queue size, must be <= %lu\n", 1052 + BIT(MLX5_CAP_GEN(hp->mdev, 1053 + log_max_hairpin_num_packets))); 1054 + return -EINVAL; 1055 + } 1056 + 1057 + hp->queue_size = roundup_pow_of_two(val); 1058 + 1059 + return 0; 1060 + } 1061 + 1062 + static int debugfs_hairpin_queue_size_get(void *data, u64 *val) 1063 + { 1064 + struct mlx5e_hairpin_params *hp = data; 1065 + 1066 + *val = hp->queue_size; 1067 + 1068 + return 0; 1069 + } 1070 + DEFINE_DEBUGFS_ATTRIBUTE(fops_hairpin_queue_size, 1071 + debugfs_hairpin_queue_size_get, 1072 + debugfs_hairpin_queue_size_set, "%llu\n"); 1073 + 1074 + static int debugfs_hairpin_num_active_get(void *data, u64 *val) 1075 + { 1076 + struct mlx5e_tc_table *tc = data; 1077 + struct mlx5e_hairpin_entry *hpe; 1078 + u32 cnt = 0; 1079 + u32 bkt; 1080 + 1081 + mutex_lock(&tc->hairpin_tbl_lock); 1082 + hash_for_each(tc->hairpin_tbl, bkt, hpe, hairpin_hlist) 1083 + cnt++; 1084 + mutex_unlock(&tc->hairpin_tbl_lock); 1085 + 1086 + *val = cnt; 1087 + 1088 + return 0; 1089 + } 1090 + DEFINE_DEBUGFS_ATTRIBUTE(fops_hairpin_num_active, 1091 + debugfs_hairpin_num_active_get, NULL, "%llu\n"); 1092 + 1093 + static int debugfs_hairpin_table_dump_show(struct seq_file *file, void *priv) 1094 + 1095 + { 1096 + struct mlx5e_tc_table *tc = file->private; 1097 + struct mlx5e_hairpin_entry *hpe; 1098 + u32 bkt; 1099 + 1100 + mutex_lock(&tc->hairpin_tbl_lock); 1101 + hash_for_each(tc->hairpin_tbl, bkt, hpe, hairpin_hlist) 1102 + seq_printf(file, "Hairpin peer_vhca_id %u prio %u refcnt %u\n", 1103 + hpe->peer_vhca_id, hpe->prio, 1104 + refcount_read(&hpe->refcnt)); 1105 + mutex_unlock(&tc->hairpin_tbl_lock); 1106 + 1107 + return 0; 1108 + } 1109 + DEFINE_SHOW_ATTRIBUTE(debugfs_hairpin_table_dump); 1110 + 1111 + static void mlx5e_tc_debugfs_init(struct mlx5e_tc_table *tc, 1112 + struct dentry *dfs_root) 1113 + { 1114 + if (IS_ERR_OR_NULL(dfs_root)) 1115 + return; 1116 + 1117 + tc->dfs_root = debugfs_create_dir("tc", dfs_root); 1118 + if (!tc->dfs_root) 1119 + return; 1120 + 1121 + debugfs_create_file("hairpin_num_queues", 0644, tc->dfs_root, 1122 + &tc->hairpin_params, &fops_hairpin_queues); 1123 + debugfs_create_file("hairpin_queue_size", 0644, tc->dfs_root, 1124 + &tc->hairpin_params, &fops_hairpin_queue_size); 1125 + debugfs_create_file("hairpin_num_active", 0444, tc->dfs_root, tc, 1126 + &fops_hairpin_num_active); 1127 + debugfs_create_file("hairpin_table_dump", 0444, tc->dfs_root, tc, 1128 + &debugfs_hairpin_table_dump_fops); 1129 + } 1130 + 1131 + static void 1132 + mlx5e_hairpin_params_init(struct mlx5e_hairpin_params *hairpin_params, 1133 + struct mlx5_core_dev *mdev) 1134 + { 1135 + u64 link_speed64; 1136 + u32 link_speed; 1137 + 1138 + hairpin_params->mdev = mdev; 1139 + /* set hairpin pair per each 50Gbs share of the link */ 1140 + mlx5e_port_max_linkspeed(mdev, &link_speed); 1141 + link_speed = max_t(u32, link_speed, 50000); 1142 + link_speed64 = link_speed; 1143 + do_div(link_speed64, 50000); 1144 + hairpin_params->num_queues = link_speed64; 1145 + 1146 + hairpin_params->queue_size = 1147 + BIT(min_t(u32, 16 - MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(mdev), 1148 + MLX5_CAP_GEN(mdev, log_max_hairpin_num_packets))); 1149 + } 1150 + 1027 1151 static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv, 1028 1152 struct mlx5e_tc_flow *flow, 1029 1153 struct mlx5e_tc_flow_parse_attr *parse_attr, ··· 1167 1027 struct mlx5_core_dev *peer_mdev; 1168 1028 struct mlx5e_hairpin_entry *hpe; 1169 1029 struct mlx5e_hairpin *hp; 1170 - u64 link_speed64; 1171 - u32 link_speed; 1172 1030 u8 match_prio; 1173 1031 u16 peer_id; 1174 1032 int err; ··· 1219 1081 hash_hairpin_info(peer_id, match_prio)); 1220 1082 mutex_unlock(&tc->hairpin_tbl_lock); 1221 1083 1222 - params.log_data_size = clamp_t(u8, 16, 1223 - MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz), 1224 - MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz)); 1225 - params.log_num_packets = params.log_data_size - 1226 - MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(priv->mdev); 1227 - params.log_num_packets = min_t(u8, params.log_num_packets, 1228 - MLX5_CAP_GEN(priv->mdev, log_max_hairpin_num_packets)); 1084 + params.log_num_packets = ilog2(tc->hairpin_params.queue_size); 1085 + params.log_data_size = 1086 + clamp_t(u32, 1087 + params.log_num_packets + 1088 + MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(priv->mdev), 1089 + MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz), 1090 + MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz)); 1229 1091 1230 1092 params.q_counter = priv->q_counter; 1231 - /* set hairpin pair per each 50Gbs share of the link */ 1232 - mlx5e_port_max_linkspeed(priv->mdev, &link_speed); 1233 - link_speed = max_t(u32, link_speed, 50000); 1234 - link_speed64 = link_speed; 1235 - do_div(link_speed64, 50000); 1236 - params.num_channels = link_speed64; 1093 + params.num_channels = tc->hairpin_params.num_queues; 1237 1094 1238 1095 hp = mlx5e_hairpin_create(priv, &params, peer_ifindex); 1239 1096 hpe->hp = hp; ··· 5350 5217 tc->ct = mlx5_tc_ct_init(priv, tc->chains, &tc->mod_hdr, 5351 5218 MLX5_FLOW_NAMESPACE_KERNEL, tc->post_act); 5352 5219 5220 + mlx5e_hairpin_params_init(&tc->hairpin_params, dev); 5221 + 5353 5222 tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event; 5354 5223 err = register_netdevice_notifier_dev_net(priv->netdev, 5355 5224 &tc->netdevice_nb, ··· 5361 5226 mlx5_core_warn(priv->mdev, "Failed to register netdev notifier\n"); 5362 5227 goto err_reg; 5363 5228 } 5229 + 5230 + mlx5e_tc_debugfs_init(tc, mlx5e_fs_get_debugfs_root(priv->fs)); 5364 5231 5365 5232 return 0; 5366 5233 ··· 5391 5254 void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) 5392 5255 { 5393 5256 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs); 5257 + 5258 + debugfs_remove_recursive(tc->dfs_root); 5394 5259 5395 5260 if (tc->netdevice_nb.notifier_call) 5396 5261 unregister_netdevice_notifier_dev_net(priv->netdev,
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
··· 1488 1488 void *hca_caps; 1489 1489 int err; 1490 1490 1491 - if (!mlx5_core_is_ecpf(dev)) { 1491 + if (!mlx5_core_is_ecpf(dev) || mlx5_core_is_management_pf(dev)) { 1492 1492 *max_sfs = 0; 1493 1493 return 0; 1494 1494 }
+2 -1
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
··· 374 374 int err; 375 375 376 376 priv->fs = mlx5e_fs_init(priv->profile, mdev, 377 - !test_bit(MLX5E_STATE_DESTROYING, &priv->state)); 377 + !test_bit(MLX5E_STATE_DESTROYING, &priv->state), 378 + priv->dfs_root); 378 379 if (!priv->fs) { 379 380 netdev_err(priv->netdev, "FS allocation failed\n"); 380 381 return -ENOMEM;
+1 -4
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c
··· 724 724 struct mlx5dr_action *action) 725 725 { 726 726 struct postsend_info send_info = {}; 727 - int ret; 728 727 729 728 send_info.write.addr = (uintptr_t)action->rewrite->data; 730 729 send_info.write.length = action->rewrite->num_of_actions * ··· 733 734 mlx5dr_icm_pool_get_chunk_mr_addr(action->rewrite->chunk); 734 735 send_info.rkey = mlx5dr_icm_pool_get_chunk_rkey(action->rewrite->chunk); 735 736 736 - ret = dr_postsend_icm_data(dmn, &send_info); 737 - 738 - return ret; 737 + return dr_postsend_icm_data(dmn, &send_info); 739 738 } 740 739 741 740 static int dr_modify_qp_rst2init(struct mlx5_core_dev *mdev,
+8
include/linux/mlx5/driver.h
··· 100 100 }; 101 101 102 102 enum { 103 + MLX5_REG_SBPR = 0xb001, 104 + MLX5_REG_SBCM = 0xb002, 103 105 MLX5_REG_QPTS = 0x4002, 104 106 MLX5_REG_QETCR = 0x4005, 105 107 MLX5_REG_QTCT = 0x400a, ··· 310 308 struct workqueue_struct *wq; 311 309 struct semaphore sem; 312 310 struct semaphore pages_sem; 311 + struct semaphore throttle_sem; 313 312 int mode; 314 313 u16 allowed_opcode; 315 314 struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS]; ··· 1201 1198 static inline bool mlx5_core_is_vf(const struct mlx5_core_dev *dev) 1202 1199 { 1203 1200 return dev->coredev_type == MLX5_COREDEV_VF; 1201 + } 1202 + 1203 + static inline bool mlx5_core_is_management_pf(const struct mlx5_core_dev *dev) 1204 + { 1205 + return MLX5_CAP_GEN(dev, num_ports) == 1 && !MLX5_CAP_GEN(dev, native_port_num); 1204 1206 } 1205 1207 1206 1208 static inline bool mlx5_core_is_ecpf(const struct mlx5_core_dev *dev)
+61
include/linux/mlx5/mlx5_ifc.h
··· 11000 11000 u8 reserved_at_2e0[0x80]; 11001 11001 }; 11002 11002 11003 + struct mlx5_ifc_sbpr_reg_bits { 11004 + u8 desc[0x1]; 11005 + u8 snap[0x1]; 11006 + u8 reserved_at_2[0x4]; 11007 + u8 dir[0x2]; 11008 + u8 reserved_at_8[0x14]; 11009 + u8 pool[0x4]; 11010 + 11011 + u8 infi_size[0x1]; 11012 + u8 reserved_at_21[0x7]; 11013 + u8 size[0x18]; 11014 + 11015 + u8 reserved_at_40[0x1c]; 11016 + u8 mode[0x4]; 11017 + 11018 + u8 reserved_at_60[0x8]; 11019 + u8 buff_occupancy[0x18]; 11020 + 11021 + u8 clr[0x1]; 11022 + u8 reserved_at_81[0x7]; 11023 + u8 max_buff_occupancy[0x18]; 11024 + 11025 + u8 reserved_at_a0[0x8]; 11026 + u8 ext_buff_occupancy[0x18]; 11027 + }; 11028 + 11029 + struct mlx5_ifc_sbcm_reg_bits { 11030 + u8 desc[0x1]; 11031 + u8 snap[0x1]; 11032 + u8 reserved_at_2[0x6]; 11033 + u8 local_port[0x8]; 11034 + u8 pnat[0x2]; 11035 + u8 pg_buff[0x6]; 11036 + u8 reserved_at_18[0x6]; 11037 + u8 dir[0x2]; 11038 + 11039 + u8 reserved_at_20[0x1f]; 11040 + u8 exc[0x1]; 11041 + 11042 + u8 reserved_at_40[0x40]; 11043 + 11044 + u8 reserved_at_80[0x8]; 11045 + u8 buff_occupancy[0x18]; 11046 + 11047 + u8 clr[0x1]; 11048 + u8 reserved_at_a1[0x7]; 11049 + u8 max_buff_occupancy[0x18]; 11050 + 11051 + u8 reserved_at_c0[0x8]; 11052 + u8 min_buff[0x18]; 11053 + 11054 + u8 infi_max[0x1]; 11055 + u8 reserved_at_e1[0x7]; 11056 + u8 max_buff[0x18]; 11057 + 11058 + u8 reserved_at_100[0x20]; 11059 + 11060 + u8 reserved_at_120[0x1c]; 11061 + u8 pool[0x4]; 11062 + }; 11063 + 11003 11064 struct mlx5_ifc_qtct_reg_bits { 11004 11065 u8 reserved_at_0[0x8]; 11005 11066 u8 port_number[0x8];