Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'mlx5e-updates-2018-07-18-v2' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5e-updates-2018-07-18

This series includes update for mlx5e net device driver.

1) From Feras Daoud, Added the support for firmware log tracing,
first by introducing the firmware API needed for the task and then
For each PF do the following:
1- Allocate memory for the tracer strings database and read it from the FW to the SW.
2- Allocate and dma map tracer buffers.

Traces that will be written into the buffer will be parsed as a group
of one or more traces, referred to as trace message. The trace message
represents a C-like printf string.
Once a new trace is available FW will generate an event indicates new trace/s are
available and the driver will parse them and dump them using tracepoints
event tracing

Enable mlx5 fw tracing by:
echo 1 > /sys/kernel/debug/tracing/events/mlx5/mlx5_fw/enable

Read traces by:
cat /sys/kernel/debug/tracing/trace

2) From Roi Dayan, Remove redundant WARN when we cannot find neigh entry

3) From Jianbo Liu, TC double vlan support
- Support offloading tc double vlan headers match
- Support offloading double vlan push/pop tc actions

4) From Boris, re-visit UDP GSO, remove the splitting of UDP_GSO_L4 packets
in the driver, and exposes UDP_GSO_L4 as a PARTIAL_GSO feature.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+1654 -269
+1 -1
drivers/infiniband/hw/mlx5/Kconfig
··· 1 1 config MLX5_INFINIBAND 2 - tristate "Mellanox Connect-IB HCA support" 2 + tristate "Mellanox 5th generation network adapters (ConnectX series) support" 3 3 depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE 4 4 depends on INFINIBAND_USER_ACCESS || INFINIBAND_USER_ACCESS=n 5 5 ---help---
+15
drivers/infiniband/hw/mlx5/cmd.c
··· 32 32 33 33 #include "cmd.h" 34 34 35 + int mlx5_cmd_dump_fill_mkey(struct mlx5_core_dev *dev, u32 *mkey) 36 + { 37 + u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {0}; 38 + u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {0}; 39 + int err; 40 + 41 + MLX5_SET(query_special_contexts_in, in, opcode, 42 + MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS); 43 + err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); 44 + if (!err) 45 + *mkey = MLX5_GET(query_special_contexts_out, out, 46 + dump_fill_mkey); 47 + return err; 48 + } 49 + 35 50 int mlx5_cmd_null_mkey(struct mlx5_core_dev *dev, u32 *null_mkey) 36 51 { 37 52 u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {};
+1
drivers/infiniband/hw/mlx5/cmd.h
··· 37 37 #include <linux/kernel.h> 38 38 #include <linux/mlx5/driver.h> 39 39 40 + int mlx5_cmd_dump_fill_mkey(struct mlx5_core_dev *dev, u32 *mkey); 40 41 int mlx5_cmd_null_mkey(struct mlx5_core_dev *dev, u32 *null_mkey); 41 42 int mlx5_cmd_query_cong_params(struct mlx5_core_dev *dev, int cong_point, 42 43 void *out, int out_size);
+3 -3
drivers/net/ethernet/mellanox/mlx5/core/Kconfig
··· 3 3 # 4 4 5 5 config MLX5_CORE 6 - tristate "Mellanox Technologies ConnectX-4 and Connect-IB core driver" 6 + tristate "Mellanox 5th generation network adapters (ConnectX series) core driver" 7 7 depends on MAY_USE_DEVLINK 8 8 depends on PCI 9 9 imply PTP_1588_CLOCK ··· 27 27 sandbox-specific client drivers. 28 28 29 29 config MLX5_CORE_EN 30 - bool "Mellanox Technologies ConnectX-4 Ethernet support" 30 + bool "Mellanox 5th generation network adapters (ConnectX series) Ethernet support" 31 31 depends on NETDEVICES && ETHERNET && INET && PCI && MLX5_CORE 32 32 depends on IPV6=y || IPV6=n || MLX5_CORE=m 33 33 select PAGE_POOL ··· 69 69 If unsure, set to Y 70 70 71 71 config MLX5_CORE_IPOIB 72 - bool "Mellanox Technologies ConnectX-4 IPoIB offloads support" 72 + bool "Mellanox 5th generation network adapters (connectX series) IPoIB offloads support" 73 73 depends on MLX5_CORE_EN 74 74 default n 75 75 ---help---
+3 -3
drivers/net/ethernet/mellanox/mlx5/core/Makefile
··· 6 6 health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \ 7 7 mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o \ 8 8 fs_counters.o rl.o lag.o dev.o wq.o lib/gid.o lib/clock.o \ 9 - diag/fs_tracepoint.o 9 + diag/fs_tracepoint.o diag/fw_tracer.o 10 10 11 11 mlx5_core-$(CONFIG_MLX5_ACCEL) += accel/ipsec.o accel/tls.o 12 12 ··· 14 14 fpga/ipsec.o fpga/tls.o 15 15 16 16 mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \ 17 - en_tx.o en_rx.o en_dim.o en_txrx.o en_accel/rxtx.o en_stats.o \ 18 - vxlan.o en_arfs.o en_fs_ethtool.o en_selftest.o en/port.o 17 + en_tx.o en_rx.o en_dim.o en_txrx.o en_stats.o vxlan.o \ 18 + en_arfs.o en_fs_ethtool.o en_selftest.o en/port.o 19 19 20 20 mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o 21 21
+30 -23
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
··· 278 278 case MLX5_CMD_OP_DESTROY_PSV: 279 279 case MLX5_CMD_OP_DESTROY_SRQ: 280 280 case MLX5_CMD_OP_DESTROY_XRC_SRQ: 281 + case MLX5_CMD_OP_DESTROY_XRQ: 281 282 case MLX5_CMD_OP_DESTROY_DCT: 282 283 case MLX5_CMD_OP_DEALLOC_Q_COUNTER: 283 284 case MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT: ··· 311 310 case MLX5_CMD_OP_DEALLOC_ENCAP_HEADER: 312 311 case MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT: 313 312 case MLX5_CMD_OP_FPGA_DESTROY_QP: 313 + case MLX5_CMD_OP_DESTROY_GENERAL_OBJECT: 314 314 return MLX5_CMD_STAT_OK; 315 315 316 316 case MLX5_CMD_OP_QUERY_HCA_CAP: ··· 348 346 case MLX5_CMD_OP_CREATE_XRC_SRQ: 349 347 case MLX5_CMD_OP_QUERY_XRC_SRQ: 350 348 case MLX5_CMD_OP_ARM_XRC_SRQ: 349 + case MLX5_CMD_OP_CREATE_XRQ: 350 + case MLX5_CMD_OP_QUERY_XRQ: 351 + case MLX5_CMD_OP_ARM_XRQ: 351 352 case MLX5_CMD_OP_CREATE_DCT: 352 353 case MLX5_CMD_OP_DRAIN_DCT: 353 354 case MLX5_CMD_OP_QUERY_DCT: ··· 432 427 case MLX5_CMD_OP_FPGA_MODIFY_QP: 433 428 case MLX5_CMD_OP_FPGA_QUERY_QP: 434 429 case MLX5_CMD_OP_FPGA_QUERY_QP_COUNTERS: 430 + case MLX5_CMD_OP_CREATE_GENERAL_OBJECT: 435 431 *status = MLX5_DRIVER_STATUS_ABORTED; 436 432 *synd = MLX5_DRIVER_SYND; 437 433 return -EIO; ··· 458 452 MLX5_COMMAND_STR_CASE(SET_HCA_CAP); 459 453 MLX5_COMMAND_STR_CASE(QUERY_ISSI); 460 454 MLX5_COMMAND_STR_CASE(SET_ISSI); 455 + MLX5_COMMAND_STR_CASE(SET_DRIVER_VERSION); 461 456 MLX5_COMMAND_STR_CASE(CREATE_MKEY); 462 457 MLX5_COMMAND_STR_CASE(QUERY_MKEY); 463 458 MLX5_COMMAND_STR_CASE(DESTROY_MKEY); ··· 606 599 MLX5_COMMAND_STR_CASE(FPGA_QUERY_QP); 607 600 MLX5_COMMAND_STR_CASE(FPGA_QUERY_QP_COUNTERS); 608 601 MLX5_COMMAND_STR_CASE(FPGA_DESTROY_QP); 602 + MLX5_COMMAND_STR_CASE(CREATE_XRQ); 603 + MLX5_COMMAND_STR_CASE(DESTROY_XRQ); 604 + MLX5_COMMAND_STR_CASE(QUERY_XRQ); 605 + MLX5_COMMAND_STR_CASE(ARM_XRQ); 606 + MLX5_COMMAND_STR_CASE(CREATE_GENERAL_OBJECT); 607 + MLX5_COMMAND_STR_CASE(DESTROY_GENERAL_OBJECT); 609 608 default: return "unknown command opcode"; 610 609 } 611 610 } ··· 690 677 691 678 struct mlx5_ifc_mbox_in_bits { 692 679 u8 opcode[0x10]; 693 - u8 reserved_at_10[0x10]; 680 + u8 uid[0x10]; 694 681 695 682 u8 reserved_at_20[0x10]; 696 683 u8 op_mod[0x10]; ··· 710 697 u8 status; 711 698 u16 opcode; 712 699 u16 op_mod; 700 + u16 uid; 713 701 714 702 mlx5_cmd_mbox_status(out, &status, &syndrome); 715 703 if (!status) ··· 718 704 719 705 opcode = MLX5_GET(mbox_in, in, opcode); 720 706 op_mod = MLX5_GET(mbox_in, in, op_mod); 707 + uid = MLX5_GET(mbox_in, in, uid); 721 708 722 - mlx5_core_err(dev, 709 + if (!uid && opcode != MLX5_CMD_OP_DESTROY_MKEY) 710 + mlx5_core_err_rl(dev, 711 + "%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x)\n", 712 + mlx5_command_str(opcode), opcode, op_mod, 713 + cmd_status_str(status), status, syndrome); 714 + else 715 + mlx5_core_dbg(dev, 723 716 "%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x)\n", 724 717 mlx5_command_str(opcode), 725 718 opcode, op_mod, ··· 1043 1022 if (!dbg->in_msg || !dbg->out_msg) 1044 1023 return -ENOMEM; 1045 1024 1046 - if (copy_from_user(lbuf, buf, sizeof(lbuf))) 1025 + if (count < sizeof(lbuf) - 1) 1026 + return -EINVAL; 1027 + 1028 + if (copy_from_user(lbuf, buf, sizeof(lbuf) - 1)) 1047 1029 return -EFAULT; 1048 1030 1049 1031 lbuf[sizeof(lbuf) - 1] = 0; ··· 1250 1226 { 1251 1227 struct mlx5_core_dev *dev = filp->private_data; 1252 1228 struct mlx5_cmd_debug *dbg = &dev->cmd.dbg; 1253 - int copy; 1254 - 1255 - if (*pos) 1256 - return 0; 1257 1229 1258 1230 if (!dbg->out_msg) 1259 1231 return -ENOMEM; 1260 1232 1261 - copy = min_t(int, count, dbg->outlen); 1262 - if (copy_to_user(buf, dbg->out_msg, copy)) 1263 - return -EFAULT; 1264 - 1265 - *pos += copy; 1266 - 1267 - return copy; 1233 + return simple_read_from_buffer(buf, count, pos, dbg->out_msg, 1234 + dbg->outlen); 1268 1235 } 1269 1236 1270 1237 static const struct file_operations dfops = { ··· 1273 1258 char outlen[8]; 1274 1259 int err; 1275 1260 1276 - if (*pos) 1277 - return 0; 1278 - 1279 1261 err = snprintf(outlen, sizeof(outlen), "%d", dbg->outlen); 1280 1262 if (err < 0) 1281 1263 return err; 1282 1264 1283 - if (copy_to_user(buf, &outlen, err)) 1284 - return -EFAULT; 1285 - 1286 - *pos += err; 1287 - 1288 - return err; 1265 + return simple_read_from_buffer(buf, count, pos, outlen, err); 1289 1266 } 1290 1267 1291 1268 static ssize_t outlen_write(struct file *filp, const char __user *buf,
+2 -20
drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
··· 150 150 int ret; 151 151 char tbuf[22]; 152 152 153 - if (*pos) 154 - return 0; 155 - 156 153 stats = filp->private_data; 157 154 spin_lock_irq(&stats->lock); 158 155 if (stats->n) 159 156 field = div64_u64(stats->sum, stats->n); 160 157 spin_unlock_irq(&stats->lock); 161 158 ret = snprintf(tbuf, sizeof(tbuf), "%llu\n", field); 162 - if (ret > 0) { 163 - if (copy_to_user(buf, tbuf, ret)) 164 - return -EFAULT; 165 - } 166 - 167 - *pos += ret; 168 - return ret; 159 + return simple_read_from_buffer(buf, count, pos, tbuf, ret); 169 160 } 170 161 171 162 static ssize_t average_write(struct file *filp, const char __user *buf, ··· 433 442 u64 field; 434 443 int ret; 435 444 436 - if (*pos) 437 - return 0; 438 - 439 445 desc = filp->private_data; 440 446 d = (void *)(desc - desc->i) - sizeof(*d); 441 447 switch (d->type) { ··· 458 470 else 459 471 ret = snprintf(tbuf, sizeof(tbuf), "0x%llx\n", field); 460 472 461 - if (ret > 0) { 462 - if (copy_to_user(buf, tbuf, ret)) 463 - return -EFAULT; 464 - } 465 - 466 - *pos += ret; 467 - return ret; 473 + return simple_read_from_buffer(buf, count, pos, tbuf, ret); 468 474 } 469 475 470 476 static const struct file_operations fops = {
+2
drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.h
··· 138 138 {MLX5_FLOW_CONTEXT_ACTION_MOD_HDR, "MOD_HDR"},\ 139 139 {MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH, "VLAN_PUSH"},\ 140 140 {MLX5_FLOW_CONTEXT_ACTION_VLAN_POP, "VLAN_POP"},\ 141 + {MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2, "VLAN_PUSH_2"},\ 142 + {MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2, "VLAN_POP_2"},\ 141 143 {MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO, "NEXT_PRIO"} 142 144 143 145 TRACE_EVENT(mlx5_fs_set_fte,
+947
drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
··· 1 + /* 2 + * Copyright (c) 2018, Mellanox Technologies. All rights reserved. 3 + * 4 + * This software is available to you under a choice of one of two 5 + * licenses. You may choose to be licensed under the terms of the GNU 6 + * General Public License (GPL) Version 2, available from the file 7 + * COPYING in the main directory of this source tree, or the 8 + * OpenIB.org BSD license below: 9 + * 10 + * Redistribution and use in source and binary forms, with or 11 + * without modification, are permitted provided that the following 12 + * conditions are met: 13 + * 14 + * - Redistributions of source code must retain the above 15 + * copyright notice, this list of conditions and the following 16 + * disclaimer. 17 + * 18 + * - Redistributions in binary form must reproduce the above 19 + * copyright notice, this list of conditions and the following 20 + * disclaimer in the documentation and/or other materials 21 + * provided with the distribution. 22 + * 23 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 + * SOFTWARE. 31 + */ 32 + #define CREATE_TRACE_POINTS 33 + #include "fw_tracer.h" 34 + #include "fw_tracer_tracepoint.h" 35 + 36 + static int mlx5_query_mtrc_caps(struct mlx5_fw_tracer *tracer) 37 + { 38 + u32 *string_db_base_address_out = tracer->str_db.base_address_out; 39 + u32 *string_db_size_out = tracer->str_db.size_out; 40 + struct mlx5_core_dev *dev = tracer->dev; 41 + u32 out[MLX5_ST_SZ_DW(mtrc_cap)] = {0}; 42 + u32 in[MLX5_ST_SZ_DW(mtrc_cap)] = {0}; 43 + void *mtrc_cap_sp; 44 + int err, i; 45 + 46 + err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), 47 + MLX5_REG_MTRC_CAP, 0, 0); 48 + if (err) { 49 + mlx5_core_warn(dev, "FWTracer: Error reading tracer caps %d\n", 50 + err); 51 + return err; 52 + } 53 + 54 + if (!MLX5_GET(mtrc_cap, out, trace_to_memory)) { 55 + mlx5_core_dbg(dev, "FWTracer: Device does not support logging traces to memory\n"); 56 + return -ENOTSUPP; 57 + } 58 + 59 + tracer->trc_ver = MLX5_GET(mtrc_cap, out, trc_ver); 60 + tracer->str_db.first_string_trace = 61 + MLX5_GET(mtrc_cap, out, first_string_trace); 62 + tracer->str_db.num_string_trace = 63 + MLX5_GET(mtrc_cap, out, num_string_trace); 64 + tracer->str_db.num_string_db = MLX5_GET(mtrc_cap, out, num_string_db); 65 + tracer->owner = !!MLX5_GET(mtrc_cap, out, trace_owner); 66 + 67 + for (i = 0; i < tracer->str_db.num_string_db; i++) { 68 + mtrc_cap_sp = MLX5_ADDR_OF(mtrc_cap, out, string_db_param[i]); 69 + string_db_base_address_out[i] = MLX5_GET(mtrc_string_db_param, 70 + mtrc_cap_sp, 71 + string_db_base_address); 72 + string_db_size_out[i] = MLX5_GET(mtrc_string_db_param, 73 + mtrc_cap_sp, string_db_size); 74 + } 75 + 76 + return err; 77 + } 78 + 79 + static int mlx5_set_mtrc_caps_trace_owner(struct mlx5_fw_tracer *tracer, 80 + u32 *out, u32 out_size, 81 + u8 trace_owner) 82 + { 83 + struct mlx5_core_dev *dev = tracer->dev; 84 + u32 in[MLX5_ST_SZ_DW(mtrc_cap)] = {0}; 85 + 86 + MLX5_SET(mtrc_cap, in, trace_owner, trace_owner); 87 + 88 + return mlx5_core_access_reg(dev, in, sizeof(in), out, out_size, 89 + MLX5_REG_MTRC_CAP, 0, 1); 90 + } 91 + 92 + static int mlx5_fw_tracer_ownership_acquire(struct mlx5_fw_tracer *tracer) 93 + { 94 + struct mlx5_core_dev *dev = tracer->dev; 95 + u32 out[MLX5_ST_SZ_DW(mtrc_cap)] = {0}; 96 + int err; 97 + 98 + err = mlx5_set_mtrc_caps_trace_owner(tracer, out, sizeof(out), 99 + MLX5_FW_TRACER_ACQUIRE_OWNERSHIP); 100 + if (err) { 101 + mlx5_core_warn(dev, "FWTracer: Acquire tracer ownership failed %d\n", 102 + err); 103 + return err; 104 + } 105 + 106 + tracer->owner = !!MLX5_GET(mtrc_cap, out, trace_owner); 107 + 108 + if (!tracer->owner) 109 + return -EBUSY; 110 + 111 + return 0; 112 + } 113 + 114 + static void mlx5_fw_tracer_ownership_release(struct mlx5_fw_tracer *tracer) 115 + { 116 + u32 out[MLX5_ST_SZ_DW(mtrc_cap)] = {0}; 117 + 118 + mlx5_set_mtrc_caps_trace_owner(tracer, out, sizeof(out), 119 + MLX5_FW_TRACER_RELEASE_OWNERSHIP); 120 + tracer->owner = false; 121 + } 122 + 123 + static int mlx5_fw_tracer_create_log_buf(struct mlx5_fw_tracer *tracer) 124 + { 125 + struct mlx5_core_dev *dev = tracer->dev; 126 + struct device *ddev = &dev->pdev->dev; 127 + dma_addr_t dma; 128 + void *buff; 129 + gfp_t gfp; 130 + int err; 131 + 132 + tracer->buff.size = TRACE_BUFFER_SIZE_BYTE; 133 + 134 + gfp = GFP_KERNEL | __GFP_ZERO; 135 + buff = (void *)__get_free_pages(gfp, 136 + get_order(tracer->buff.size)); 137 + if (!buff) { 138 + err = -ENOMEM; 139 + mlx5_core_warn(dev, "FWTracer: Failed to allocate pages, %d\n", err); 140 + return err; 141 + } 142 + tracer->buff.log_buf = buff; 143 + 144 + dma = dma_map_single(ddev, buff, tracer->buff.size, DMA_FROM_DEVICE); 145 + if (dma_mapping_error(ddev, dma)) { 146 + mlx5_core_warn(dev, "FWTracer: Unable to map DMA: %d\n", 147 + dma_mapping_error(ddev, dma)); 148 + err = -ENOMEM; 149 + goto free_pages; 150 + } 151 + tracer->buff.dma = dma; 152 + 153 + return 0; 154 + 155 + free_pages: 156 + free_pages((unsigned long)tracer->buff.log_buf, get_order(tracer->buff.size)); 157 + 158 + return err; 159 + } 160 + 161 + static void mlx5_fw_tracer_destroy_log_buf(struct mlx5_fw_tracer *tracer) 162 + { 163 + struct mlx5_core_dev *dev = tracer->dev; 164 + struct device *ddev = &dev->pdev->dev; 165 + 166 + if (!tracer->buff.log_buf) 167 + return; 168 + 169 + dma_unmap_single(ddev, tracer->buff.dma, tracer->buff.size, DMA_FROM_DEVICE); 170 + free_pages((unsigned long)tracer->buff.log_buf, get_order(tracer->buff.size)); 171 + } 172 + 173 + static int mlx5_fw_tracer_create_mkey(struct mlx5_fw_tracer *tracer) 174 + { 175 + struct mlx5_core_dev *dev = tracer->dev; 176 + int err, inlen, i; 177 + __be64 *mtt; 178 + void *mkc; 179 + u32 *in; 180 + 181 + inlen = MLX5_ST_SZ_BYTES(create_mkey_in) + 182 + sizeof(*mtt) * round_up(TRACER_BUFFER_PAGE_NUM, 2); 183 + 184 + in = kvzalloc(inlen, GFP_KERNEL); 185 + if (!in) 186 + return -ENOMEM; 187 + 188 + MLX5_SET(create_mkey_in, in, translations_octword_actual_size, 189 + DIV_ROUND_UP(TRACER_BUFFER_PAGE_NUM, 2)); 190 + mtt = (u64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt); 191 + for (i = 0 ; i < TRACER_BUFFER_PAGE_NUM ; i++) 192 + mtt[i] = cpu_to_be64(tracer->buff.dma + i * PAGE_SIZE); 193 + 194 + mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); 195 + MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_MTT); 196 + MLX5_SET(mkc, mkc, lr, 1); 197 + MLX5_SET(mkc, mkc, lw, 1); 198 + MLX5_SET(mkc, mkc, pd, tracer->buff.pdn); 199 + MLX5_SET(mkc, mkc, bsf_octword_size, 0); 200 + MLX5_SET(mkc, mkc, qpn, 0xffffff); 201 + MLX5_SET(mkc, mkc, log_page_size, PAGE_SHIFT); 202 + MLX5_SET(mkc, mkc, translations_octword_size, 203 + DIV_ROUND_UP(TRACER_BUFFER_PAGE_NUM, 2)); 204 + MLX5_SET64(mkc, mkc, start_addr, tracer->buff.dma); 205 + MLX5_SET64(mkc, mkc, len, tracer->buff.size); 206 + err = mlx5_core_create_mkey(dev, &tracer->buff.mkey, in, inlen); 207 + if (err) 208 + mlx5_core_warn(dev, "FWTracer: Failed to create mkey, %d\n", err); 209 + 210 + kvfree(in); 211 + 212 + return err; 213 + } 214 + 215 + static void mlx5_fw_tracer_free_strings_db(struct mlx5_fw_tracer *tracer) 216 + { 217 + u32 num_string_db = tracer->str_db.num_string_db; 218 + int i; 219 + 220 + for (i = 0; i < num_string_db; i++) { 221 + kfree(tracer->str_db.buffer[i]); 222 + tracer->str_db.buffer[i] = NULL; 223 + } 224 + } 225 + 226 + static int mlx5_fw_tracer_allocate_strings_db(struct mlx5_fw_tracer *tracer) 227 + { 228 + u32 *string_db_size_out = tracer->str_db.size_out; 229 + u32 num_string_db = tracer->str_db.num_string_db; 230 + int i; 231 + 232 + for (i = 0; i < num_string_db; i++) { 233 + tracer->str_db.buffer[i] = kzalloc(string_db_size_out[i], GFP_KERNEL); 234 + if (!tracer->str_db.buffer[i]) 235 + goto free_strings_db; 236 + } 237 + 238 + return 0; 239 + 240 + free_strings_db: 241 + mlx5_fw_tracer_free_strings_db(tracer); 242 + return -ENOMEM; 243 + } 244 + 245 + static void mlx5_tracer_read_strings_db(struct work_struct *work) 246 + { 247 + struct mlx5_fw_tracer *tracer = container_of(work, struct mlx5_fw_tracer, 248 + read_fw_strings_work); 249 + u32 num_of_reads, num_string_db = tracer->str_db.num_string_db; 250 + struct mlx5_core_dev *dev = tracer->dev; 251 + u32 in[MLX5_ST_SZ_DW(mtrc_cap)] = {0}; 252 + u32 leftovers, offset; 253 + int err = 0, i, j; 254 + u32 *out, outlen; 255 + void *out_value; 256 + 257 + outlen = MLX5_ST_SZ_BYTES(mtrc_stdb) + STRINGS_DB_READ_SIZE_BYTES; 258 + out = kzalloc(outlen, GFP_KERNEL); 259 + if (!out) { 260 + err = -ENOMEM; 261 + goto out; 262 + } 263 + 264 + for (i = 0; i < num_string_db; i++) { 265 + offset = 0; 266 + MLX5_SET(mtrc_stdb, in, string_db_index, i); 267 + num_of_reads = tracer->str_db.size_out[i] / 268 + STRINGS_DB_READ_SIZE_BYTES; 269 + leftovers = (tracer->str_db.size_out[i] % 270 + STRINGS_DB_READ_SIZE_BYTES) / 271 + STRINGS_DB_LEFTOVER_SIZE_BYTES; 272 + 273 + MLX5_SET(mtrc_stdb, in, read_size, STRINGS_DB_READ_SIZE_BYTES); 274 + for (j = 0; j < num_of_reads; j++) { 275 + MLX5_SET(mtrc_stdb, in, start_offset, offset); 276 + 277 + err = mlx5_core_access_reg(dev, in, sizeof(in), out, 278 + outlen, MLX5_REG_MTRC_STDB, 279 + 0, 1); 280 + if (err) { 281 + mlx5_core_dbg(dev, "FWTracer: Failed to read strings DB %d\n", 282 + err); 283 + goto out_free; 284 + } 285 + 286 + out_value = MLX5_ADDR_OF(mtrc_stdb, out, string_db_data); 287 + memcpy(tracer->str_db.buffer[i] + offset, out_value, 288 + STRINGS_DB_READ_SIZE_BYTES); 289 + offset += STRINGS_DB_READ_SIZE_BYTES; 290 + } 291 + 292 + /* Strings database is aligned to 64, need to read leftovers*/ 293 + MLX5_SET(mtrc_stdb, in, read_size, 294 + STRINGS_DB_LEFTOVER_SIZE_BYTES); 295 + for (j = 0; j < leftovers; j++) { 296 + MLX5_SET(mtrc_stdb, in, start_offset, offset); 297 + 298 + err = mlx5_core_access_reg(dev, in, sizeof(in), out, 299 + outlen, MLX5_REG_MTRC_STDB, 300 + 0, 1); 301 + if (err) { 302 + mlx5_core_dbg(dev, "FWTracer: Failed to read strings DB %d\n", 303 + err); 304 + goto out_free; 305 + } 306 + 307 + out_value = MLX5_ADDR_OF(mtrc_stdb, out, string_db_data); 308 + memcpy(tracer->str_db.buffer[i] + offset, out_value, 309 + STRINGS_DB_LEFTOVER_SIZE_BYTES); 310 + offset += STRINGS_DB_LEFTOVER_SIZE_BYTES; 311 + } 312 + } 313 + 314 + tracer->str_db.loaded = true; 315 + 316 + out_free: 317 + kfree(out); 318 + out: 319 + return; 320 + } 321 + 322 + static void mlx5_fw_tracer_arm(struct mlx5_core_dev *dev) 323 + { 324 + u32 out[MLX5_ST_SZ_DW(mtrc_ctrl)] = {0}; 325 + u32 in[MLX5_ST_SZ_DW(mtrc_ctrl)] = {0}; 326 + int err; 327 + 328 + MLX5_SET(mtrc_ctrl, in, arm_event, 1); 329 + 330 + err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), 331 + MLX5_REG_MTRC_CTRL, 0, 1); 332 + if (err) 333 + mlx5_core_warn(dev, "FWTracer: Failed to arm tracer event %d\n", err); 334 + } 335 + 336 + static const char *VAL_PARM = "%llx"; 337 + static const char *REPLACE_64_VAL_PARM = "%x%x"; 338 + static const char *PARAM_CHAR = "%"; 339 + 340 + static int mlx5_tracer_message_hash(u32 message_id) 341 + { 342 + return jhash_1word(message_id, 0) & (MESSAGE_HASH_SIZE - 1); 343 + } 344 + 345 + static struct tracer_string_format *mlx5_tracer_message_insert(struct mlx5_fw_tracer *tracer, 346 + struct tracer_event *tracer_event) 347 + { 348 + struct hlist_head *head = 349 + &tracer->hash[mlx5_tracer_message_hash(tracer_event->string_event.tmsn)]; 350 + struct tracer_string_format *cur_string; 351 + 352 + cur_string = kzalloc(sizeof(*cur_string), GFP_KERNEL); 353 + if (!cur_string) 354 + return NULL; 355 + 356 + hlist_add_head(&cur_string->hlist, head); 357 + 358 + return cur_string; 359 + } 360 + 361 + static struct tracer_string_format *mlx5_tracer_get_string(struct mlx5_fw_tracer *tracer, 362 + struct tracer_event *tracer_event) 363 + { 364 + struct tracer_string_format *cur_string; 365 + u32 str_ptr, offset; 366 + int i; 367 + 368 + str_ptr = tracer_event->string_event.string_param; 369 + 370 + for (i = 0; i < tracer->str_db.num_string_db; i++) { 371 + if (str_ptr > tracer->str_db.base_address_out[i] && 372 + str_ptr < tracer->str_db.base_address_out[i] + 373 + tracer->str_db.size_out[i]) { 374 + offset = str_ptr - tracer->str_db.base_address_out[i]; 375 + /* add it to the hash */ 376 + cur_string = mlx5_tracer_message_insert(tracer, tracer_event); 377 + if (!cur_string) 378 + return NULL; 379 + cur_string->string = (char *)(tracer->str_db.buffer[i] + 380 + offset); 381 + return cur_string; 382 + } 383 + } 384 + 385 + return NULL; 386 + } 387 + 388 + static void mlx5_tracer_clean_message(struct tracer_string_format *str_frmt) 389 + { 390 + hlist_del(&str_frmt->hlist); 391 + kfree(str_frmt); 392 + } 393 + 394 + static int mlx5_tracer_get_num_of_params(char *str) 395 + { 396 + char *substr, *pstr = str; 397 + int num_of_params = 0; 398 + 399 + /* replace %llx with %x%x */ 400 + substr = strstr(pstr, VAL_PARM); 401 + while (substr) { 402 + memcpy(substr, REPLACE_64_VAL_PARM, 4); 403 + pstr = substr; 404 + substr = strstr(pstr, VAL_PARM); 405 + } 406 + 407 + /* count all the % characters */ 408 + substr = strstr(str, PARAM_CHAR); 409 + while (substr) { 410 + num_of_params += 1; 411 + str = substr + 1; 412 + substr = strstr(str, PARAM_CHAR); 413 + } 414 + 415 + return num_of_params; 416 + } 417 + 418 + static struct tracer_string_format *mlx5_tracer_message_find(struct hlist_head *head, 419 + u8 event_id, u32 tmsn) 420 + { 421 + struct tracer_string_format *message; 422 + 423 + hlist_for_each_entry(message, head, hlist) 424 + if (message->event_id == event_id && message->tmsn == tmsn) 425 + return message; 426 + 427 + return NULL; 428 + } 429 + 430 + static struct tracer_string_format *mlx5_tracer_message_get(struct mlx5_fw_tracer *tracer, 431 + struct tracer_event *tracer_event) 432 + { 433 + struct hlist_head *head = 434 + &tracer->hash[mlx5_tracer_message_hash(tracer_event->string_event.tmsn)]; 435 + 436 + return mlx5_tracer_message_find(head, tracer_event->event_id, tracer_event->string_event.tmsn); 437 + } 438 + 439 + static void poll_trace(struct mlx5_fw_tracer *tracer, 440 + struct tracer_event *tracer_event, u64 *trace) 441 + { 442 + u32 timestamp_low, timestamp_mid, timestamp_high, urts; 443 + 444 + tracer_event->event_id = MLX5_GET(tracer_event, trace, event_id); 445 + tracer_event->lost_event = MLX5_GET(tracer_event, trace, lost); 446 + 447 + switch (tracer_event->event_id) { 448 + case TRACER_EVENT_TYPE_TIMESTAMP: 449 + tracer_event->type = TRACER_EVENT_TYPE_TIMESTAMP; 450 + urts = MLX5_GET(tracer_timestamp_event, trace, urts); 451 + if (tracer->trc_ver == 0) 452 + tracer_event->timestamp_event.unreliable = !!(urts >> 2); 453 + else 454 + tracer_event->timestamp_event.unreliable = !!(urts & 1); 455 + 456 + timestamp_low = MLX5_GET(tracer_timestamp_event, 457 + trace, timestamp7_0); 458 + timestamp_mid = MLX5_GET(tracer_timestamp_event, 459 + trace, timestamp39_8); 460 + timestamp_high = MLX5_GET(tracer_timestamp_event, 461 + trace, timestamp52_40); 462 + 463 + tracer_event->timestamp_event.timestamp = 464 + ((u64)timestamp_high << 40) | 465 + ((u64)timestamp_mid << 8) | 466 + (u64)timestamp_low; 467 + break; 468 + default: 469 + if (tracer_event->event_id >= tracer->str_db.first_string_trace || 470 + tracer_event->event_id <= tracer->str_db.first_string_trace + 471 + tracer->str_db.num_string_trace) { 472 + tracer_event->type = TRACER_EVENT_TYPE_STRING; 473 + tracer_event->string_event.timestamp = 474 + MLX5_GET(tracer_string_event, trace, timestamp); 475 + tracer_event->string_event.string_param = 476 + MLX5_GET(tracer_string_event, trace, string_param); 477 + tracer_event->string_event.tmsn = 478 + MLX5_GET(tracer_string_event, trace, tmsn); 479 + tracer_event->string_event.tdsn = 480 + MLX5_GET(tracer_string_event, trace, tdsn); 481 + } else { 482 + tracer_event->type = TRACER_EVENT_TYPE_UNRECOGNIZED; 483 + } 484 + break; 485 + } 486 + } 487 + 488 + static u64 get_block_timestamp(struct mlx5_fw_tracer *tracer, u64 *ts_event) 489 + { 490 + struct tracer_event tracer_event; 491 + u8 event_id; 492 + 493 + event_id = MLX5_GET(tracer_event, ts_event, event_id); 494 + 495 + if (event_id == TRACER_EVENT_TYPE_TIMESTAMP) 496 + poll_trace(tracer, &tracer_event, ts_event); 497 + else 498 + tracer_event.timestamp_event.timestamp = 0; 499 + 500 + return tracer_event.timestamp_event.timestamp; 501 + } 502 + 503 + static void mlx5_fw_tracer_clean_print_hash(struct mlx5_fw_tracer *tracer) 504 + { 505 + struct tracer_string_format *str_frmt; 506 + struct hlist_node *n; 507 + int i; 508 + 509 + for (i = 0; i < MESSAGE_HASH_SIZE; i++) { 510 + hlist_for_each_entry_safe(str_frmt, n, &tracer->hash[i], hlist) 511 + mlx5_tracer_clean_message(str_frmt); 512 + } 513 + } 514 + 515 + static void mlx5_fw_tracer_clean_ready_list(struct mlx5_fw_tracer *tracer) 516 + { 517 + struct tracer_string_format *str_frmt, *tmp_str; 518 + 519 + list_for_each_entry_safe(str_frmt, tmp_str, &tracer->ready_strings_list, 520 + list) 521 + list_del(&str_frmt->list); 522 + } 523 + 524 + static void mlx5_tracer_print_trace(struct tracer_string_format *str_frmt, 525 + struct mlx5_core_dev *dev, 526 + u64 trace_timestamp) 527 + { 528 + char tmp[512]; 529 + 530 + snprintf(tmp, sizeof(tmp), str_frmt->string, 531 + str_frmt->params[0], 532 + str_frmt->params[1], 533 + str_frmt->params[2], 534 + str_frmt->params[3], 535 + str_frmt->params[4], 536 + str_frmt->params[5], 537 + str_frmt->params[6]); 538 + 539 + trace_mlx5_fw(dev->tracer, trace_timestamp, str_frmt->lost, 540 + str_frmt->event_id, tmp); 541 + 542 + /* remove it from hash */ 543 + mlx5_tracer_clean_message(str_frmt); 544 + } 545 + 546 + static int mlx5_tracer_handle_string_trace(struct mlx5_fw_tracer *tracer, 547 + struct tracer_event *tracer_event) 548 + { 549 + struct tracer_string_format *cur_string; 550 + 551 + if (tracer_event->string_event.tdsn == 0) { 552 + cur_string = mlx5_tracer_get_string(tracer, tracer_event); 553 + if (!cur_string) 554 + return -1; 555 + 556 + cur_string->num_of_params = mlx5_tracer_get_num_of_params(cur_string->string); 557 + cur_string->last_param_num = 0; 558 + cur_string->event_id = tracer_event->event_id; 559 + cur_string->tmsn = tracer_event->string_event.tmsn; 560 + cur_string->timestamp = tracer_event->string_event.timestamp; 561 + cur_string->lost = tracer_event->lost_event; 562 + if (cur_string->num_of_params == 0) /* trace with no params */ 563 + list_add_tail(&cur_string->list, &tracer->ready_strings_list); 564 + } else { 565 + cur_string = mlx5_tracer_message_get(tracer, tracer_event); 566 + if (!cur_string) { 567 + pr_debug("%s Got string event for unknown string tdsm: %d\n", 568 + __func__, tracer_event->string_event.tmsn); 569 + return -1; 570 + } 571 + cur_string->last_param_num += 1; 572 + if (cur_string->last_param_num > TRACER_MAX_PARAMS) { 573 + pr_debug("%s Number of params exceeds the max (%d)\n", 574 + __func__, TRACER_MAX_PARAMS); 575 + list_add_tail(&cur_string->list, &tracer->ready_strings_list); 576 + return 0; 577 + } 578 + /* keep the new parameter */ 579 + cur_string->params[cur_string->last_param_num - 1] = 580 + tracer_event->string_event.string_param; 581 + if (cur_string->last_param_num == cur_string->num_of_params) 582 + list_add_tail(&cur_string->list, &tracer->ready_strings_list); 583 + } 584 + 585 + return 0; 586 + } 587 + 588 + static void mlx5_tracer_handle_timestamp_trace(struct mlx5_fw_tracer *tracer, 589 + struct tracer_event *tracer_event) 590 + { 591 + struct tracer_timestamp_event timestamp_event = 592 + tracer_event->timestamp_event; 593 + struct tracer_string_format *str_frmt, *tmp_str; 594 + struct mlx5_core_dev *dev = tracer->dev; 595 + u64 trace_timestamp; 596 + 597 + list_for_each_entry_safe(str_frmt, tmp_str, &tracer->ready_strings_list, list) { 598 + list_del(&str_frmt->list); 599 + if (str_frmt->timestamp < (timestamp_event.timestamp & MASK_6_0)) 600 + trace_timestamp = (timestamp_event.timestamp & MASK_52_7) | 601 + (str_frmt->timestamp & MASK_6_0); 602 + else 603 + trace_timestamp = ((timestamp_event.timestamp & MASK_52_7) - 1) | 604 + (str_frmt->timestamp & MASK_6_0); 605 + 606 + mlx5_tracer_print_trace(str_frmt, dev, trace_timestamp); 607 + } 608 + } 609 + 610 + static int mlx5_tracer_handle_trace(struct mlx5_fw_tracer *tracer, 611 + struct tracer_event *tracer_event) 612 + { 613 + if (tracer_event->type == TRACER_EVENT_TYPE_STRING) { 614 + mlx5_tracer_handle_string_trace(tracer, tracer_event); 615 + } else if (tracer_event->type == TRACER_EVENT_TYPE_TIMESTAMP) { 616 + if (!tracer_event->timestamp_event.unreliable) 617 + mlx5_tracer_handle_timestamp_trace(tracer, tracer_event); 618 + } else { 619 + pr_debug("%s Got unrecognised type %d for parsing, exiting..\n", 620 + __func__, tracer_event->type); 621 + } 622 + return 0; 623 + } 624 + 625 + static void mlx5_fw_tracer_handle_traces(struct work_struct *work) 626 + { 627 + struct mlx5_fw_tracer *tracer = 628 + container_of(work, struct mlx5_fw_tracer, handle_traces_work); 629 + u64 block_timestamp, last_block_timestamp, tmp_trace_block[TRACES_PER_BLOCK]; 630 + u32 block_count, start_offset, prev_start_offset, prev_consumer_index; 631 + u32 trace_event_size = MLX5_ST_SZ_BYTES(tracer_event); 632 + struct mlx5_core_dev *dev = tracer->dev; 633 + struct tracer_event tracer_event; 634 + int i; 635 + 636 + mlx5_core_dbg(dev, "FWTracer: Handle Trace event, owner=(%d)\n", tracer->owner); 637 + if (!tracer->owner) 638 + return; 639 + 640 + block_count = tracer->buff.size / TRACER_BLOCK_SIZE_BYTE; 641 + start_offset = tracer->buff.consumer_index * TRACER_BLOCK_SIZE_BYTE; 642 + 643 + /* Copy the block to local buffer to avoid HW override while being processed*/ 644 + memcpy(tmp_trace_block, tracer->buff.log_buf + start_offset, 645 + TRACER_BLOCK_SIZE_BYTE); 646 + 647 + block_timestamp = 648 + get_block_timestamp(tracer, &tmp_trace_block[TRACES_PER_BLOCK - 1]); 649 + 650 + while (block_timestamp > tracer->last_timestamp) { 651 + /* Check block override if its not the first block */ 652 + if (!tracer->last_timestamp) { 653 + u64 *ts_event; 654 + /* To avoid block override be the HW in case of buffer 655 + * wraparound, the time stamp of the previous block 656 + * should be compared to the last timestamp handled 657 + * by the driver. 658 + */ 659 + prev_consumer_index = 660 + (tracer->buff.consumer_index - 1) & (block_count - 1); 661 + prev_start_offset = prev_consumer_index * TRACER_BLOCK_SIZE_BYTE; 662 + 663 + ts_event = tracer->buff.log_buf + prev_start_offset + 664 + (TRACES_PER_BLOCK - 1) * trace_event_size; 665 + last_block_timestamp = get_block_timestamp(tracer, ts_event); 666 + /* If previous timestamp different from last stored 667 + * timestamp then there is a good chance that the 668 + * current buffer is overwritten and therefore should 669 + * not be parsed. 670 + */ 671 + if (tracer->last_timestamp != last_block_timestamp) { 672 + mlx5_core_warn(dev, "FWTracer: Events were lost\n"); 673 + tracer->last_timestamp = block_timestamp; 674 + tracer->buff.consumer_index = 675 + (tracer->buff.consumer_index + 1) & (block_count - 1); 676 + break; 677 + } 678 + } 679 + 680 + /* Parse events */ 681 + for (i = 0; i < TRACES_PER_BLOCK ; i++) { 682 + poll_trace(tracer, &tracer_event, &tmp_trace_block[i]); 683 + mlx5_tracer_handle_trace(tracer, &tracer_event); 684 + } 685 + 686 + tracer->buff.consumer_index = 687 + (tracer->buff.consumer_index + 1) & (block_count - 1); 688 + 689 + tracer->last_timestamp = block_timestamp; 690 + start_offset = tracer->buff.consumer_index * TRACER_BLOCK_SIZE_BYTE; 691 + memcpy(tmp_trace_block, tracer->buff.log_buf + start_offset, 692 + TRACER_BLOCK_SIZE_BYTE); 693 + block_timestamp = get_block_timestamp(tracer, 694 + &tmp_trace_block[TRACES_PER_BLOCK - 1]); 695 + } 696 + 697 + mlx5_fw_tracer_arm(dev); 698 + } 699 + 700 + static int mlx5_fw_tracer_set_mtrc_conf(struct mlx5_fw_tracer *tracer) 701 + { 702 + struct mlx5_core_dev *dev = tracer->dev; 703 + u32 out[MLX5_ST_SZ_DW(mtrc_conf)] = {0}; 704 + u32 in[MLX5_ST_SZ_DW(mtrc_conf)] = {0}; 705 + int err; 706 + 707 + MLX5_SET(mtrc_conf, in, trace_mode, TRACE_TO_MEMORY); 708 + MLX5_SET(mtrc_conf, in, log_trace_buffer_size, 709 + ilog2(TRACER_BUFFER_PAGE_NUM)); 710 + MLX5_SET(mtrc_conf, in, trace_mkey, tracer->buff.mkey.key); 711 + 712 + err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), 713 + MLX5_REG_MTRC_CONF, 0, 1); 714 + if (err) 715 + mlx5_core_warn(dev, "FWTracer: Failed to set tracer configurations %d\n", err); 716 + 717 + return err; 718 + } 719 + 720 + static int mlx5_fw_tracer_set_mtrc_ctrl(struct mlx5_fw_tracer *tracer, u8 status, u8 arm) 721 + { 722 + struct mlx5_core_dev *dev = tracer->dev; 723 + u32 out[MLX5_ST_SZ_DW(mtrc_ctrl)] = {0}; 724 + u32 in[MLX5_ST_SZ_DW(mtrc_ctrl)] = {0}; 725 + int err; 726 + 727 + MLX5_SET(mtrc_ctrl, in, modify_field_select, TRACE_STATUS); 728 + MLX5_SET(mtrc_ctrl, in, trace_status, status); 729 + MLX5_SET(mtrc_ctrl, in, arm_event, arm); 730 + 731 + err = mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out), 732 + MLX5_REG_MTRC_CTRL, 0, 1); 733 + 734 + if (!err && status) 735 + tracer->last_timestamp = 0; 736 + 737 + return err; 738 + } 739 + 740 + static int mlx5_fw_tracer_start(struct mlx5_fw_tracer *tracer) 741 + { 742 + struct mlx5_core_dev *dev = tracer->dev; 743 + int err; 744 + 745 + err = mlx5_fw_tracer_ownership_acquire(tracer); 746 + if (err) { 747 + mlx5_core_dbg(dev, "FWTracer: Ownership was not granted %d\n", err); 748 + /* Don't fail since ownership can be acquired on a later FW event */ 749 + return 0; 750 + } 751 + 752 + err = mlx5_fw_tracer_set_mtrc_conf(tracer); 753 + if (err) { 754 + mlx5_core_warn(dev, "FWTracer: Failed to set tracer configuration %d\n", err); 755 + goto release_ownership; 756 + } 757 + 758 + /* enable tracer & trace events */ 759 + err = mlx5_fw_tracer_set_mtrc_ctrl(tracer, 1, 1); 760 + if (err) { 761 + mlx5_core_warn(dev, "FWTracer: Failed to enable tracer %d\n", err); 762 + goto release_ownership; 763 + } 764 + 765 + mlx5_core_dbg(dev, "FWTracer: Ownership granted and active\n"); 766 + return 0; 767 + 768 + release_ownership: 769 + mlx5_fw_tracer_ownership_release(tracer); 770 + return err; 771 + } 772 + 773 + static void mlx5_fw_tracer_ownership_change(struct work_struct *work) 774 + { 775 + struct mlx5_fw_tracer *tracer = 776 + container_of(work, struct mlx5_fw_tracer, ownership_change_work); 777 + 778 + mlx5_core_dbg(tracer->dev, "FWTracer: ownership changed, current=(%d)\n", tracer->owner); 779 + if (tracer->owner) { 780 + tracer->owner = false; 781 + tracer->buff.consumer_index = 0; 782 + return; 783 + } 784 + 785 + mlx5_fw_tracer_start(tracer); 786 + } 787 + 788 + /* Create software resources (Buffers, etc ..) */ 789 + struct mlx5_fw_tracer *mlx5_fw_tracer_create(struct mlx5_core_dev *dev) 790 + { 791 + struct mlx5_fw_tracer *tracer = NULL; 792 + int err; 793 + 794 + if (!MLX5_CAP_MCAM_REG(dev, tracer_registers)) { 795 + mlx5_core_dbg(dev, "FWTracer: Tracer capability not present\n"); 796 + return NULL; 797 + } 798 + 799 + tracer = kzalloc(sizeof(*tracer), GFP_KERNEL); 800 + if (!tracer) 801 + return ERR_PTR(-ENOMEM); 802 + 803 + tracer->work_queue = create_singlethread_workqueue("mlx5_fw_tracer"); 804 + if (!tracer->work_queue) { 805 + err = -ENOMEM; 806 + goto free_tracer; 807 + } 808 + 809 + tracer->dev = dev; 810 + 811 + INIT_LIST_HEAD(&tracer->ready_strings_list); 812 + INIT_WORK(&tracer->ownership_change_work, mlx5_fw_tracer_ownership_change); 813 + INIT_WORK(&tracer->read_fw_strings_work, mlx5_tracer_read_strings_db); 814 + INIT_WORK(&tracer->handle_traces_work, mlx5_fw_tracer_handle_traces); 815 + 816 + 817 + err = mlx5_query_mtrc_caps(tracer); 818 + if (err) { 819 + mlx5_core_dbg(dev, "FWTracer: Failed to query capabilities %d\n", err); 820 + goto destroy_workqueue; 821 + } 822 + 823 + err = mlx5_fw_tracer_create_log_buf(tracer); 824 + if (err) { 825 + mlx5_core_warn(dev, "FWTracer: Create log buffer failed %d\n", err); 826 + goto destroy_workqueue; 827 + } 828 + 829 + err = mlx5_fw_tracer_allocate_strings_db(tracer); 830 + if (err) { 831 + mlx5_core_warn(dev, "FWTracer: Allocate strings database failed %d\n", err); 832 + goto free_log_buf; 833 + } 834 + 835 + mlx5_core_dbg(dev, "FWTracer: Tracer created\n"); 836 + 837 + return tracer; 838 + 839 + free_log_buf: 840 + mlx5_fw_tracer_destroy_log_buf(tracer); 841 + destroy_workqueue: 842 + tracer->dev = NULL; 843 + destroy_workqueue(tracer->work_queue); 844 + free_tracer: 845 + kfree(tracer); 846 + return ERR_PTR(err); 847 + } 848 + 849 + /* Create HW resources + start tracer 850 + * must be called before Async EQ is created 851 + */ 852 + int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer) 853 + { 854 + struct mlx5_core_dev *dev; 855 + int err; 856 + 857 + if (IS_ERR_OR_NULL(tracer)) 858 + return 0; 859 + 860 + dev = tracer->dev; 861 + 862 + if (!tracer->str_db.loaded) 863 + queue_work(tracer->work_queue, &tracer->read_fw_strings_work); 864 + 865 + err = mlx5_core_alloc_pd(dev, &tracer->buff.pdn); 866 + if (err) { 867 + mlx5_core_warn(dev, "FWTracer: Failed to allocate PD %d\n", err); 868 + return err; 869 + } 870 + 871 + err = mlx5_fw_tracer_create_mkey(tracer); 872 + if (err) { 873 + mlx5_core_warn(dev, "FWTracer: Failed to create mkey %d\n", err); 874 + goto err_dealloc_pd; 875 + } 876 + 877 + mlx5_fw_tracer_start(tracer); 878 + 879 + return 0; 880 + 881 + err_dealloc_pd: 882 + mlx5_core_dealloc_pd(dev, tracer->buff.pdn); 883 + return err; 884 + } 885 + 886 + /* Stop tracer + Cleanup HW resources 887 + * must be called after Async EQ is destroyed 888 + */ 889 + void mlx5_fw_tracer_cleanup(struct mlx5_fw_tracer *tracer) 890 + { 891 + if (IS_ERR_OR_NULL(tracer)) 892 + return; 893 + 894 + mlx5_core_dbg(tracer->dev, "FWTracer: Cleanup, is owner ? (%d)\n", 895 + tracer->owner); 896 + 897 + cancel_work_sync(&tracer->ownership_change_work); 898 + cancel_work_sync(&tracer->handle_traces_work); 899 + 900 + if (tracer->owner) 901 + mlx5_fw_tracer_ownership_release(tracer); 902 + 903 + mlx5_core_destroy_mkey(tracer->dev, &tracer->buff.mkey); 904 + mlx5_core_dealloc_pd(tracer->dev, tracer->buff.pdn); 905 + } 906 + 907 + /* Free software resources (Buffers, etc ..) */ 908 + void mlx5_fw_tracer_destroy(struct mlx5_fw_tracer *tracer) 909 + { 910 + if (IS_ERR_OR_NULL(tracer)) 911 + return; 912 + 913 + mlx5_core_dbg(tracer->dev, "FWTracer: Destroy\n"); 914 + 915 + cancel_work_sync(&tracer->read_fw_strings_work); 916 + mlx5_fw_tracer_clean_ready_list(tracer); 917 + mlx5_fw_tracer_clean_print_hash(tracer); 918 + mlx5_fw_tracer_free_strings_db(tracer); 919 + mlx5_fw_tracer_destroy_log_buf(tracer); 920 + flush_workqueue(tracer->work_queue); 921 + destroy_workqueue(tracer->work_queue); 922 + kfree(tracer); 923 + } 924 + 925 + void mlx5_fw_tracer_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe) 926 + { 927 + struct mlx5_fw_tracer *tracer = dev->tracer; 928 + 929 + if (!tracer) 930 + return; 931 + 932 + switch (eqe->sub_type) { 933 + case MLX5_TRACER_SUBTYPE_OWNERSHIP_CHANGE: 934 + if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) 935 + queue_work(tracer->work_queue, &tracer->ownership_change_work); 936 + break; 937 + case MLX5_TRACER_SUBTYPE_TRACES_AVAILABLE: 938 + if (likely(tracer->str_db.loaded)) 939 + queue_work(tracer->work_queue, &tracer->handle_traces_work); 940 + break; 941 + default: 942 + mlx5_core_dbg(dev, "FWTracer: Event with unrecognized subtype: sub_type %d\n", 943 + eqe->sub_type); 944 + } 945 + } 946 + 947 + EXPORT_TRACEPOINT_SYMBOL(mlx5_fw);
+175
drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h
··· 1 + /* 2 + * Copyright (c) 2018, Mellanox Technologies. All rights reserved. 3 + * 4 + * This software is available to you under a choice of one of two 5 + * licenses. You may choose to be licensed under the terms of the GNU 6 + * General Public License (GPL) Version 2, available from the file 7 + * COPYING in the main directory of this source tree, or the 8 + * OpenIB.org BSD license below: 9 + * 10 + * Redistribution and use in source and binary forms, with or 11 + * without modification, are permitted provided that the following 12 + * conditions are met: 13 + * 14 + * - Redistributions of source code must retain the above 15 + * copyright notice, this list of conditions and the following 16 + * disclaimer. 17 + * 18 + * - Redistributions in binary form must reproduce the above 19 + * copyright notice, this list of conditions and the following 20 + * disclaimer in the documentation and/or other materials 21 + * provided with the distribution. 22 + * 23 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 + * SOFTWARE. 31 + */ 32 + 33 + #ifndef __LIB_TRACER_H__ 34 + #define __LIB_TRACER_H__ 35 + 36 + #include <linux/mlx5/driver.h> 37 + #include "mlx5_core.h" 38 + 39 + #define STRINGS_DB_SECTIONS_NUM 8 40 + #define STRINGS_DB_READ_SIZE_BYTES 256 41 + #define STRINGS_DB_LEFTOVER_SIZE_BYTES 64 42 + #define TRACER_BUFFER_PAGE_NUM 64 43 + #define TRACER_BUFFER_CHUNK 4096 44 + #define TRACE_BUFFER_SIZE_BYTE (TRACER_BUFFER_PAGE_NUM * TRACER_BUFFER_CHUNK) 45 + 46 + #define TRACER_BLOCK_SIZE_BYTE 256 47 + #define TRACES_PER_BLOCK 32 48 + 49 + #define TRACER_MAX_PARAMS 7 50 + #define MESSAGE_HASH_BITS 6 51 + #define MESSAGE_HASH_SIZE BIT(MESSAGE_HASH_BITS) 52 + 53 + #define MASK_52_7 (0x1FFFFFFFFFFF80) 54 + #define MASK_6_0 (0x7F) 55 + 56 + struct mlx5_fw_tracer { 57 + struct mlx5_core_dev *dev; 58 + bool owner; 59 + u8 trc_ver; 60 + struct workqueue_struct *work_queue; 61 + struct work_struct ownership_change_work; 62 + struct work_struct read_fw_strings_work; 63 + 64 + /* Strings DB */ 65 + struct { 66 + u8 first_string_trace; 67 + u8 num_string_trace; 68 + u32 num_string_db; 69 + u32 base_address_out[STRINGS_DB_SECTIONS_NUM]; 70 + u32 size_out[STRINGS_DB_SECTIONS_NUM]; 71 + void *buffer[STRINGS_DB_SECTIONS_NUM]; 72 + bool loaded; 73 + } str_db; 74 + 75 + /* Log Buffer */ 76 + struct { 77 + u32 pdn; 78 + void *log_buf; 79 + dma_addr_t dma; 80 + u32 size; 81 + struct mlx5_core_mkey mkey; 82 + u32 consumer_index; 83 + } buff; 84 + 85 + u64 last_timestamp; 86 + struct work_struct handle_traces_work; 87 + struct hlist_head hash[MESSAGE_HASH_SIZE]; 88 + struct list_head ready_strings_list; 89 + }; 90 + 91 + struct tracer_string_format { 92 + char *string; 93 + int params[TRACER_MAX_PARAMS]; 94 + int num_of_params; 95 + int last_param_num; 96 + u8 event_id; 97 + u32 tmsn; 98 + struct hlist_node hlist; 99 + struct list_head list; 100 + u32 timestamp; 101 + bool lost; 102 + }; 103 + 104 + enum mlx5_fw_tracer_ownership_state { 105 + MLX5_FW_TRACER_RELEASE_OWNERSHIP, 106 + MLX5_FW_TRACER_ACQUIRE_OWNERSHIP, 107 + }; 108 + 109 + enum tracer_ctrl_fields_select { 110 + TRACE_STATUS = 1 << 0, 111 + }; 112 + 113 + enum tracer_event_type { 114 + TRACER_EVENT_TYPE_STRING, 115 + TRACER_EVENT_TYPE_TIMESTAMP = 0xFF, 116 + TRACER_EVENT_TYPE_UNRECOGNIZED, 117 + }; 118 + 119 + enum tracing_mode { 120 + TRACE_TO_MEMORY = 1 << 0, 121 + }; 122 + 123 + struct tracer_timestamp_event { 124 + u64 timestamp; 125 + u8 unreliable; 126 + }; 127 + 128 + struct tracer_string_event { 129 + u32 timestamp; 130 + u32 tmsn; 131 + u32 tdsn; 132 + u32 string_param; 133 + }; 134 + 135 + struct tracer_event { 136 + bool lost_event; 137 + u32 type; 138 + u8 event_id; 139 + union { 140 + struct tracer_string_event string_event; 141 + struct tracer_timestamp_event timestamp_event; 142 + }; 143 + }; 144 + 145 + struct mlx5_ifc_tracer_event_bits { 146 + u8 lost[0x1]; 147 + u8 timestamp[0x7]; 148 + u8 event_id[0x8]; 149 + u8 event_data[0x30]; 150 + }; 151 + 152 + struct mlx5_ifc_tracer_string_event_bits { 153 + u8 lost[0x1]; 154 + u8 timestamp[0x7]; 155 + u8 event_id[0x8]; 156 + u8 tmsn[0xd]; 157 + u8 tdsn[0x3]; 158 + u8 string_param[0x20]; 159 + }; 160 + 161 + struct mlx5_ifc_tracer_timestamp_event_bits { 162 + u8 timestamp7_0[0x8]; 163 + u8 event_id[0x8]; 164 + u8 urts[0x3]; 165 + u8 timestamp52_40[0xd]; 166 + u8 timestamp39_8[0x20]; 167 + }; 168 + 169 + struct mlx5_fw_tracer *mlx5_fw_tracer_create(struct mlx5_core_dev *dev); 170 + int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer); 171 + void mlx5_fw_tracer_cleanup(struct mlx5_fw_tracer *tracer); 172 + void mlx5_fw_tracer_destroy(struct mlx5_fw_tracer *tracer); 173 + void mlx5_fw_tracer_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe); 174 + 175 + #endif
+78
drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer_tracepoint.h
··· 1 + /* 2 + * Copyright (c) 2018, Mellanox Technologies. All rights reserved. 3 + * 4 + * This software is available to you under a choice of one of two 5 + * licenses. You may choose to be licensed under the terms of the GNU 6 + * General Public License (GPL) Version 2, available from the file 7 + * COPYING in the main directory of this source tree, or the 8 + * OpenIB.org BSD license below: 9 + * 10 + * Redistribution and use in source and binary forms, with or 11 + * without modification, are permitted provided that the following 12 + * conditions are met: 13 + * 14 + * - Redistributions of source code must retain the above 15 + * copyright notice, this list of conditions and the following 16 + * disclaimer. 17 + * 18 + * - Redistributions in binary form must reproduce the above 19 + * copyright notice, this list of conditions and the following 20 + * disclaimer in the documentation and/or other materials 21 + * provided with the distribution. 22 + * 23 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 + * SOFTWARE. 31 + */ 32 + 33 + #if !defined(__LIB_TRACER_TRACEPOINT_H__) || defined(TRACE_HEADER_MULTI_READ) 34 + #define __LIB_TRACER_TRACEPOINT_H__ 35 + 36 + #include <linux/tracepoint.h> 37 + #include "fw_tracer.h" 38 + 39 + #undef TRACE_SYSTEM 40 + #define TRACE_SYSTEM mlx5 41 + 42 + /* Tracepoint for FWTracer messages: */ 43 + TRACE_EVENT(mlx5_fw, 44 + TP_PROTO(const struct mlx5_fw_tracer *tracer, u64 trace_timestamp, 45 + bool lost, u8 event_id, const char *msg), 46 + 47 + TP_ARGS(tracer, trace_timestamp, lost, event_id, msg), 48 + 49 + TP_STRUCT__entry( 50 + __string(dev_name, dev_name(&tracer->dev->pdev->dev)) 51 + __field(u64, trace_timestamp) 52 + __field(bool, lost) 53 + __field(u8, event_id) 54 + __string(msg, msg) 55 + ), 56 + 57 + TP_fast_assign( 58 + __assign_str(dev_name, dev_name(&tracer->dev->pdev->dev)); 59 + __entry->trace_timestamp = trace_timestamp; 60 + __entry->lost = lost; 61 + __entry->event_id = event_id; 62 + __assign_str(msg, msg); 63 + ), 64 + 65 + TP_printk("%s [0x%llx] %d [0x%x] %s", 66 + __get_str(dev_name), 67 + __entry->trace_timestamp, 68 + __entry->lost, __entry->event_id, 69 + __get_str(msg)) 70 + ); 71 + 72 + #endif 73 + 74 + #undef TRACE_INCLUDE_PATH 75 + #undef TRACE_INCLUDE_FILE 76 + #define TRACE_INCLUDE_PATH ./diag 77 + #define TRACE_INCLUDE_FILE fw_tracer_tracepoint 78 + #include <trace/define_trace.h>
+16 -11
drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
··· 38 38 #include <linux/netdevice.h> 39 39 #include "en_accel/ipsec_rxtx.h" 40 40 #include "en_accel/tls_rxtx.h" 41 - #include "en_accel/rxtx.h" 42 41 #include "en.h" 43 42 44 - static inline struct sk_buff *mlx5e_accel_handle_tx(struct sk_buff *skb, 45 - struct mlx5e_txqsq *sq, 46 - struct net_device *dev, 47 - struct mlx5e_tx_wqe **wqe, 48 - u16 *pi) 43 + static inline void 44 + mlx5e_udp_gso_handle_tx_skb(struct sk_buff *skb) 45 + { 46 + int payload_len = skb_shinfo(skb)->gso_size + sizeof(struct udphdr); 47 + 48 + udp_hdr(skb)->len = htons(payload_len); 49 + } 50 + 51 + static inline struct sk_buff * 52 + mlx5e_accel_handle_tx(struct sk_buff *skb, 53 + struct mlx5e_txqsq *sq, 54 + struct net_device *dev, 55 + struct mlx5e_tx_wqe **wqe, 56 + u16 *pi) 49 57 { 50 58 #ifdef CONFIG_MLX5_EN_TLS 51 59 if (test_bit(MLX5E_SQ_STATE_TLS, &sq->state)) { ··· 71 63 } 72 64 #endif 73 65 74 - if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { 75 - skb = mlx5e_udp_gso_handle_tx_skb(dev, sq, skb, wqe, pi); 76 - if (unlikely(!skb)) 77 - return NULL; 78 - } 66 + if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) 67 + mlx5e_udp_gso_handle_tx_skb(skb); 79 68 80 69 return skb; 81 70 }
-109
drivers/net/ethernet/mellanox/mlx5/core/en_accel/rxtx.c
··· 1 - #include "en_accel/rxtx.h" 2 - 3 - static void mlx5e_udp_gso_prepare_last_skb(struct sk_buff *skb, 4 - struct sk_buff *nskb, 5 - int remaining) 6 - { 7 - int bytes_needed = remaining, remaining_headlen, remaining_page_offset; 8 - int headlen = skb_transport_offset(skb) + sizeof(struct udphdr); 9 - int payload_len = remaining + sizeof(struct udphdr); 10 - int k = 0, i, j; 11 - 12 - skb_copy_bits(skb, 0, nskb->data, headlen); 13 - nskb->dev = skb->dev; 14 - skb_reset_mac_header(nskb); 15 - skb_set_network_header(nskb, skb_network_offset(skb)); 16 - skb_set_transport_header(nskb, skb_transport_offset(skb)); 17 - skb_set_tail_pointer(nskb, headlen); 18 - 19 - /* How many frags do we need? */ 20 - for (i = skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) { 21 - bytes_needed -= skb_frag_size(&skb_shinfo(skb)->frags[i]); 22 - k++; 23 - if (bytes_needed <= 0) 24 - break; 25 - } 26 - 27 - /* Fill the first frag and split it if necessary */ 28 - j = skb_shinfo(skb)->nr_frags - k; 29 - remaining_page_offset = -bytes_needed; 30 - skb_fill_page_desc(nskb, 0, 31 - skb_shinfo(skb)->frags[j].page.p, 32 - skb_shinfo(skb)->frags[j].page_offset + remaining_page_offset, 33 - skb_shinfo(skb)->frags[j].size - remaining_page_offset); 34 - 35 - skb_frag_ref(skb, j); 36 - 37 - /* Fill the rest of the frags */ 38 - for (i = 1; i < k; i++) { 39 - j = skb_shinfo(skb)->nr_frags - k + i; 40 - 41 - skb_fill_page_desc(nskb, i, 42 - skb_shinfo(skb)->frags[j].page.p, 43 - skb_shinfo(skb)->frags[j].page_offset, 44 - skb_shinfo(skb)->frags[j].size); 45 - skb_frag_ref(skb, j); 46 - } 47 - skb_shinfo(nskb)->nr_frags = k; 48 - 49 - remaining_headlen = remaining - skb->data_len; 50 - 51 - /* headlen contains remaining data? */ 52 - if (remaining_headlen > 0) 53 - skb_copy_bits(skb, skb->len - remaining, nskb->data + headlen, 54 - remaining_headlen); 55 - nskb->len = remaining + headlen; 56 - nskb->data_len = payload_len - sizeof(struct udphdr) + 57 - max_t(int, 0, remaining_headlen); 58 - nskb->protocol = skb->protocol; 59 - if (nskb->protocol == htons(ETH_P_IP)) { 60 - ip_hdr(nskb)->id = htons(ntohs(ip_hdr(nskb)->id) + 61 - skb_shinfo(skb)->gso_segs); 62 - ip_hdr(nskb)->tot_len = 63 - htons(payload_len + sizeof(struct iphdr)); 64 - } else { 65 - ipv6_hdr(nskb)->payload_len = htons(payload_len); 66 - } 67 - udp_hdr(nskb)->len = htons(payload_len); 68 - skb_shinfo(nskb)->gso_size = 0; 69 - nskb->ip_summed = skb->ip_summed; 70 - nskb->csum_start = skb->csum_start; 71 - nskb->csum_offset = skb->csum_offset; 72 - nskb->queue_mapping = skb->queue_mapping; 73 - } 74 - 75 - /* might send skbs and update wqe and pi */ 76 - struct sk_buff *mlx5e_udp_gso_handle_tx_skb(struct net_device *netdev, 77 - struct mlx5e_txqsq *sq, 78 - struct sk_buff *skb, 79 - struct mlx5e_tx_wqe **wqe, 80 - u16 *pi) 81 - { 82 - int payload_len = skb_shinfo(skb)->gso_size + sizeof(struct udphdr); 83 - int headlen = skb_transport_offset(skb) + sizeof(struct udphdr); 84 - int remaining = (skb->len - headlen) % skb_shinfo(skb)->gso_size; 85 - struct sk_buff *nskb; 86 - 87 - if (skb->protocol == htons(ETH_P_IP)) 88 - ip_hdr(skb)->tot_len = htons(payload_len + sizeof(struct iphdr)); 89 - else 90 - ipv6_hdr(skb)->payload_len = htons(payload_len); 91 - udp_hdr(skb)->len = htons(payload_len); 92 - if (!remaining) 93 - return skb; 94 - 95 - sq->stats->udp_seg_rem++; 96 - nskb = alloc_skb(max_t(int, headlen, headlen + remaining - skb->data_len), GFP_ATOMIC); 97 - if (unlikely(!nskb)) { 98 - sq->stats->dropped++; 99 - return NULL; 100 - } 101 - 102 - mlx5e_udp_gso_prepare_last_skb(skb, nskb, remaining); 103 - 104 - skb_shinfo(skb)->gso_segs--; 105 - pskb_trim(skb, skb->len - remaining); 106 - mlx5e_sq_xmit(sq, skb, *wqe, *pi); 107 - mlx5e_sq_fetch_wqe(sq, wqe, pi); 108 - return nskb; 109 - }
-14
drivers/net/ethernet/mellanox/mlx5/core/en_accel/rxtx.h
··· 1 - 2 - #ifndef __MLX5E_EN_ACCEL_RX_TX_H__ 3 - #define __MLX5E_EN_ACCEL_RX_TX_H__ 4 - 5 - #include <linux/skbuff.h> 6 - #include "en.h" 7 - 8 - struct sk_buff *mlx5e_udp_gso_handle_tx_skb(struct net_device *netdev, 9 - struct mlx5e_txqsq *sq, 10 - struct sk_buff *skb, 11 - struct mlx5e_tx_wqe **wqe, 12 - u16 *pi); 13 - 14 - #endif
+5 -4
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
··· 4538 4538 netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX; 4539 4539 4540 4540 if (mlx5e_vxlan_allowed(mdev) || MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) { 4541 - netdev->hw_features |= NETIF_F_GSO_PARTIAL; 4542 4541 netdev->hw_enc_features |= NETIF_F_IP_CSUM; 4543 4542 netdev->hw_enc_features |= NETIF_F_IPV6_CSUM; 4544 4543 netdev->hw_enc_features |= NETIF_F_TSO; ··· 4561 4562 netdev->gso_partial_features |= NETIF_F_GSO_GRE | 4562 4563 NETIF_F_GSO_GRE_CSUM; 4563 4564 } 4565 + 4566 + netdev->hw_features |= NETIF_F_GSO_PARTIAL; 4567 + netdev->gso_partial_features |= NETIF_F_GSO_UDP_L4; 4568 + netdev->hw_features |= NETIF_F_GSO_UDP_L4; 4569 + netdev->features |= NETIF_F_GSO_UDP_L4; 4564 4570 4565 4571 mlx5_query_port_fcs(mdev, &fcs_supported, &fcs_enabled); 4566 4572 ··· 4598 4594 4599 4595 netdev->features |= NETIF_F_HIGHDMA; 4600 4596 netdev->features |= NETIF_F_HW_VLAN_STAG_FILTER; 4601 - 4602 - netdev->features |= NETIF_F_GSO_UDP_L4; 4603 - netdev->hw_features |= NETIF_F_GSO_UDP_L4; 4604 4597 4605 4598 netdev->priv_flags |= IFF_UNICAST_FLT; 4606 4599
+109 -25
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
··· 1032 1032 * dst ip pair 1033 1033 */ 1034 1034 n = neigh_lookup(tbl, &m_neigh->dst_ip, m_neigh->dev); 1035 - if (!n) { 1036 - WARN(1, "The neighbour already freed\n"); 1035 + if (!n) 1037 1036 return; 1038 - } 1039 1037 1040 1038 neigh_event_send(n, NULL); 1041 1039 neigh_release(n); ··· 1235 1237 outer_headers); 1236 1238 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, 1237 1239 outer_headers); 1240 + void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, 1241 + misc_parameters); 1242 + void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, 1243 + misc_parameters); 1238 1244 u16 addr_type = 0; 1239 1245 u8 ip_proto = 0; 1240 1246 ··· 1249 1247 BIT(FLOW_DISSECTOR_KEY_BASIC) | 1250 1248 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | 1251 1249 BIT(FLOW_DISSECTOR_KEY_VLAN) | 1250 + BIT(FLOW_DISSECTOR_KEY_CVLAN) | 1252 1251 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | 1253 1252 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | 1254 1253 BIT(FLOW_DISSECTOR_KEY_PORTS) | ··· 1330 1327 skb_flow_dissector_target(f->dissector, 1331 1328 FLOW_DISSECTOR_KEY_VLAN, 1332 1329 f->mask); 1333 - if (mask->vlan_id || mask->vlan_priority) { 1334 - MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1); 1335 - MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1); 1330 + if (mask->vlan_id || mask->vlan_priority || mask->vlan_tpid) { 1331 + if (key->vlan_tpid == htons(ETH_P_8021AD)) { 1332 + MLX5_SET(fte_match_set_lyr_2_4, headers_c, 1333 + svlan_tag, 1); 1334 + MLX5_SET(fte_match_set_lyr_2_4, headers_v, 1335 + svlan_tag, 1); 1336 + } else { 1337 + MLX5_SET(fte_match_set_lyr_2_4, headers_c, 1338 + cvlan_tag, 1); 1339 + MLX5_SET(fte_match_set_lyr_2_4, headers_v, 1340 + cvlan_tag, 1); 1341 + } 1336 1342 1337 1343 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id); 1338 1344 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id); 1339 1345 1340 1346 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, mask->vlan_priority); 1341 1347 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, key->vlan_priority); 1348 + 1349 + *match_level = MLX5_MATCH_L2; 1350 + } 1351 + } 1352 + 1353 + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CVLAN)) { 1354 + struct flow_dissector_key_vlan *key = 1355 + skb_flow_dissector_target(f->dissector, 1356 + FLOW_DISSECTOR_KEY_CVLAN, 1357 + f->key); 1358 + struct flow_dissector_key_vlan *mask = 1359 + skb_flow_dissector_target(f->dissector, 1360 + FLOW_DISSECTOR_KEY_CVLAN, 1361 + f->mask); 1362 + if (mask->vlan_id || mask->vlan_priority || mask->vlan_tpid) { 1363 + if (key->vlan_tpid == htons(ETH_P_8021AD)) { 1364 + MLX5_SET(fte_match_set_misc, misc_c, 1365 + outer_second_svlan_tag, 1); 1366 + MLX5_SET(fte_match_set_misc, misc_v, 1367 + outer_second_svlan_tag, 1); 1368 + } else { 1369 + MLX5_SET(fte_match_set_misc, misc_c, 1370 + outer_second_cvlan_tag, 1); 1371 + MLX5_SET(fte_match_set_misc, misc_v, 1372 + outer_second_cvlan_tag, 1); 1373 + } 1374 + 1375 + MLX5_SET(fte_match_set_misc, misc_c, outer_second_vid, 1376 + mask->vlan_id); 1377 + MLX5_SET(fte_match_set_misc, misc_v, outer_second_vid, 1378 + key->vlan_id); 1379 + MLX5_SET(fte_match_set_misc, misc_c, outer_second_prio, 1380 + mask->vlan_priority); 1381 + MLX5_SET(fte_match_set_misc, misc_v, outer_second_prio, 1382 + key->vlan_priority); 1342 1383 1343 1384 *match_level = MLX5_MATCH_L2; 1344 1385 } ··· 2578 2531 return err; 2579 2532 } 2580 2533 2534 + static int parse_tc_vlan_action(struct mlx5e_priv *priv, 2535 + const struct tc_action *a, 2536 + struct mlx5_esw_flow_attr *attr, 2537 + u32 *action) 2538 + { 2539 + u8 vlan_idx = attr->total_vlan; 2540 + 2541 + if (vlan_idx >= MLX5_FS_VLAN_DEPTH) 2542 + return -EOPNOTSUPP; 2543 + 2544 + if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) { 2545 + if (vlan_idx) { 2546 + if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, 2547 + MLX5_FS_VLAN_DEPTH)) 2548 + return -EOPNOTSUPP; 2549 + 2550 + *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2; 2551 + } else { 2552 + *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP; 2553 + } 2554 + } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) { 2555 + attr->vlan_vid[vlan_idx] = tcf_vlan_push_vid(a); 2556 + attr->vlan_prio[vlan_idx] = tcf_vlan_push_prio(a); 2557 + attr->vlan_proto[vlan_idx] = tcf_vlan_push_proto(a); 2558 + if (!attr->vlan_proto[vlan_idx]) 2559 + attr->vlan_proto[vlan_idx] = htons(ETH_P_8021Q); 2560 + 2561 + if (vlan_idx) { 2562 + if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, 2563 + MLX5_FS_VLAN_DEPTH)) 2564 + return -EOPNOTSUPP; 2565 + 2566 + *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2; 2567 + } else { 2568 + if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, 1) && 2569 + (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q) || 2570 + tcf_vlan_push_prio(a))) 2571 + return -EOPNOTSUPP; 2572 + 2573 + *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH; 2574 + } 2575 + } else { /* action is TCA_VLAN_ACT_MODIFY */ 2576 + return -EOPNOTSUPP; 2577 + } 2578 + 2579 + attr->total_vlan = vlan_idx + 1; 2580 + 2581 + return 0; 2582 + } 2583 + 2581 2584 static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, 2582 2585 struct mlx5e_tc_flow_parse_attr *parse_attr, 2583 2586 struct mlx5e_tc_flow *flow) ··· 2639 2542 LIST_HEAD(actions); 2640 2543 bool encap = false; 2641 2544 u32 action = 0; 2545 + int err; 2642 2546 2643 2547 if (!tcf_exts_has_actions(exts)) 2644 2548 return -EINVAL; ··· 2656 2558 } 2657 2559 2658 2560 if (is_tcf_pedit(a)) { 2659 - int err; 2660 - 2661 2561 err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_FDB, 2662 2562 parse_attr); 2663 2563 if (err) ··· 2722 2626 } 2723 2627 2724 2628 if (is_tcf_vlan(a)) { 2725 - if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) { 2726 - action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP; 2727 - } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) { 2728 - action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH; 2729 - attr->vlan_vid = tcf_vlan_push_vid(a); 2730 - if (mlx5_eswitch_vlan_actions_supported(priv->mdev)) { 2731 - attr->vlan_prio = tcf_vlan_push_prio(a); 2732 - attr->vlan_proto = tcf_vlan_push_proto(a); 2733 - if (!attr->vlan_proto) 2734 - attr->vlan_proto = htons(ETH_P_8021Q); 2735 - } else if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q) || 2736 - tcf_vlan_push_prio(a)) { 2737 - return -EOPNOTSUPP; 2738 - } 2739 - } else { /* action is TCA_VLAN_ACT_MODIFY */ 2740 - return -EOPNOTSUPP; 2741 - } 2629 + err = parse_tc_vlan_action(priv, a, attr, &action); 2630 + 2631 + if (err) 2632 + return err; 2633 + 2742 2634 attr->mirror_count = attr->out_count; 2743 2635 continue; 2744 2636 }
+11
drivers/net/ethernet/mellanox/mlx5/core/eq.c
··· 40 40 #include "mlx5_core.h" 41 41 #include "fpga/core.h" 42 42 #include "eswitch.h" 43 + #include "diag/fw_tracer.h" 43 44 44 45 enum { 45 46 MLX5_EQE_SIZE = sizeof(struct mlx5_eqe), ··· 169 168 return "MLX5_EVENT_TYPE_FPGA_QP_ERROR"; 170 169 case MLX5_EVENT_TYPE_GENERAL_EVENT: 171 170 return "MLX5_EVENT_TYPE_GENERAL_EVENT"; 171 + case MLX5_EVENT_TYPE_DEVICE_TRACER: 172 + return "MLX5_EVENT_TYPE_DEVICE_TRACER"; 172 173 default: 173 174 return "Unrecognized event"; 174 175 } ··· 579 576 case MLX5_EVENT_TYPE_GENERAL_EVENT: 580 577 general_event_handler(dev, eqe); 581 578 break; 579 + 580 + case MLX5_EVENT_TYPE_DEVICE_TRACER: 581 + mlx5_fw_tracer_event(dev, eqe); 582 + break; 583 + 582 584 default: 583 585 mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n", 584 586 eqe->type, eq->eqn); ··· 860 852 861 853 if (MLX5_CAP_GEN(dev, temp_warn_event)) 862 854 async_event_mask |= (1ull << MLX5_EVENT_TYPE_TEMP_WARN_EVENT); 855 + 856 + if (MLX5_CAP_MCAM_REG(dev, tracer_registers)) 857 + async_event_mask |= (1ull << MLX5_EVENT_TYPE_DEVICE_TRACER); 863 858 864 859 err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD, 865 860 MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD,
+15 -6
drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
··· 38 38 #include <net/devlink.h> 39 39 #include <linux/mlx5/device.h> 40 40 #include <linux/mlx5/eswitch.h> 41 + #include <linux/mlx5/fs.h> 41 42 #include "lib/mpfs.h" 42 43 43 44 #ifdef CONFIG_MLX5_ESWITCH ··· 257 256 int out_count; 258 257 259 258 int action; 260 - __be16 vlan_proto; 261 - u16 vlan_vid; 262 - u8 vlan_prio; 259 + __be16 vlan_proto[MLX5_FS_VLAN_DEPTH]; 260 + u16 vlan_vid[MLX5_FS_VLAN_DEPTH]; 261 + u8 vlan_prio[MLX5_FS_VLAN_DEPTH]; 262 + u8 total_vlan; 263 263 bool vlan_handled; 264 264 u32 encap_id; 265 265 u32 mod_hdr_id; ··· 284 282 int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, 285 283 int vport, u16 vlan, u8 qos, u8 set_flags); 286 284 287 - static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev) 285 + static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev, 286 + u8 vlan_depth) 288 287 { 289 - return MLX5_CAP_ESW_FLOWTABLE_FDB(dev, pop_vlan) && 290 - MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan); 288 + bool ret = MLX5_CAP_ESW_FLOWTABLE_FDB(dev, pop_vlan) && 289 + MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan); 290 + 291 + if (vlan_depth == 1) 292 + return ret; 293 + 294 + return ret && MLX5_CAP_ESW_FLOWTABLE_FDB(dev, pop_vlan_2) && 295 + MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan_2); 291 296 } 292 297 293 298 #define MLX5_DEBUG_ESWITCH_MASK BIT(3)
+14 -9
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
··· 66 66 67 67 flow_act.action = attr->action; 68 68 /* if per flow vlan pop/push is emulated, don't set that into the firmware */ 69 - if (!mlx5_eswitch_vlan_actions_supported(esw->dev)) 69 + if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1)) 70 70 flow_act.action &= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH | 71 71 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP); 72 72 else if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) { 73 - flow_act.vlan.ethtype = ntohs(attr->vlan_proto); 74 - flow_act.vlan.vid = attr->vlan_vid; 75 - flow_act.vlan.prio = attr->vlan_prio; 73 + flow_act.vlan[0].ethtype = ntohs(attr->vlan_proto[0]); 74 + flow_act.vlan[0].vid = attr->vlan_vid[0]; 75 + flow_act.vlan[0].prio = attr->vlan_prio[0]; 76 + if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) { 77 + flow_act.vlan[1].ethtype = ntohs(attr->vlan_proto[1]); 78 + flow_act.vlan[1].vid = attr->vlan_vid[1]; 79 + flow_act.vlan[1].prio = attr->vlan_prio[1]; 80 + } 76 81 } 77 82 78 83 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { ··· 271 266 /* protects against (1) setting rules with different vlans to push and 272 267 * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0) 273 268 */ 274 - if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid)) 269 + if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid[0])) 275 270 goto out_notsupp; 276 271 277 272 return 0; ··· 289 284 int err = 0; 290 285 291 286 /* nop if we're on the vlan push/pop non emulation mode */ 292 - if (mlx5_eswitch_vlan_actions_supported(esw->dev)) 287 + if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1)) 293 288 return 0; 294 289 295 290 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH); ··· 329 324 if (vport->vlan_refcount) 330 325 goto skip_set_push; 331 326 332 - err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan_vid, 0, 327 + err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan_vid[0], 0, 333 328 SET_VLAN_INSERT | SET_VLAN_STRIP); 334 329 if (err) 335 330 goto out; 336 - vport->vlan = attr->vlan_vid; 331 + vport->vlan = attr->vlan_vid[0]; 337 332 skip_set_push: 338 333 vport->vlan_refcount++; 339 334 } ··· 352 347 int err = 0; 353 348 354 349 /* nop if we're on the vlan push/pop non emulation mode */ 355 - if (mlx5_eswitch_vlan_actions_supported(esw->dev)) 350 + if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1)) 356 351 return 0; 357 352 358 353 if (!attr->vlan_handled)
+9 -3
drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
··· 349 349 350 350 vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan); 351 351 352 - MLX5_SET(vlan, vlan, ethtype, fte->action.vlan.ethtype); 353 - MLX5_SET(vlan, vlan, vid, fte->action.vlan.vid); 354 - MLX5_SET(vlan, vlan, prio, fte->action.vlan.prio); 352 + MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[0].ethtype); 353 + MLX5_SET(vlan, vlan, vid, fte->action.vlan[0].vid); 354 + MLX5_SET(vlan, vlan, prio, fte->action.vlan[0].prio); 355 + 356 + vlan = MLX5_ADDR_OF(flow_context, in_flow_context, push_vlan_2); 357 + 358 + MLX5_SET(vlan, vlan, ethtype, fte->action.vlan[1].ethtype); 359 + MLX5_SET(vlan, vlan, vid, fte->action.vlan[1].vid); 360 + MLX5_SET(vlan, vlan, prio, fte->action.vlan[1].prio); 355 361 356 362 in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context, 357 363 match_value);
+4 -2
drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
··· 1465 1465 MLX5_FLOW_CONTEXT_ACTION_DECAP | 1466 1466 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | 1467 1467 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP | 1468 - MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH)) 1468 + MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH | 1469 + MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2 | 1470 + MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2)) 1469 1471 return true; 1470 1472 1471 1473 return false; ··· 1826 1824 1827 1825 g = alloc_auto_flow_group(ft, spec); 1828 1826 if (IS_ERR(g)) { 1829 - rule = (void *)g; 1827 + rule = ERR_CAST(g); 1830 1828 up_write_ref_node(&ft->node); 1831 1829 return rule; 1832 1830 }
+17 -3
drivers/net/ethernet/mellanox/mlx5/core/main.c
··· 62 62 #include "accel/ipsec.h" 63 63 #include "accel/tls.h" 64 64 #include "lib/clock.h" 65 + #include "diag/fw_tracer.h" 65 66 66 67 MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>"); 67 - MODULE_DESCRIPTION("Mellanox Connect-IB, ConnectX-4 core driver"); 68 + MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) core driver"); 68 69 MODULE_LICENSE("Dual BSD/GPL"); 69 70 MODULE_VERSION(DRIVER_VERSION); 70 71 ··· 991 990 goto err_sriov_cleanup; 992 991 } 993 992 993 + dev->tracer = mlx5_fw_tracer_create(dev); 994 + 994 995 return 0; 995 996 996 997 err_sriov_cleanup: ··· 1018 1015 1019 1016 static void mlx5_cleanup_once(struct mlx5_core_dev *dev) 1020 1017 { 1018 + mlx5_fw_tracer_destroy(dev->tracer); 1021 1019 mlx5_fpga_cleanup(dev); 1022 1020 mlx5_sriov_cleanup(dev); 1023 1021 mlx5_eswitch_cleanup(dev->priv.eswitch); ··· 1171 1167 goto err_put_uars; 1172 1168 } 1173 1169 1170 + err = mlx5_fw_tracer_init(dev->tracer); 1171 + if (err) { 1172 + dev_err(&pdev->dev, "Failed to init FW tracer\n"); 1173 + goto err_fw_tracer; 1174 + } 1175 + 1174 1176 err = alloc_comp_eqs(dev); 1175 1177 if (err) { 1176 1178 dev_err(&pdev->dev, "Failed to alloc completion EQs\n"); 1177 - goto err_stop_eqs; 1179 + goto err_comp_eqs; 1178 1180 } 1179 1181 1180 1182 err = mlx5_irq_set_affinity_hints(dev); ··· 1262 1252 err_affinity_hints: 1263 1253 free_comp_eqs(dev); 1264 1254 1265 - err_stop_eqs: 1255 + err_comp_eqs: 1256 + mlx5_fw_tracer_cleanup(dev->tracer); 1257 + 1258 + err_fw_tracer: 1266 1259 mlx5_stop_eqs(dev); 1267 1260 1268 1261 err_put_uars: ··· 1333 1320 mlx5_fpga_device_stop(dev); 1334 1321 mlx5_irq_clear_affinity_hints(dev); 1335 1322 free_comp_eqs(dev); 1323 + mlx5_fw_tracer_cleanup(dev->tracer); 1336 1324 mlx5_stop_eqs(dev); 1337 1325 mlx5_put_uars_page(dev, priv->uar); 1338 1326 mlx5_free_irq_vectors(dev);
+6
drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
··· 66 66 __func__, __LINE__, current->pid, \ 67 67 ##__VA_ARGS__) 68 68 69 + #define mlx5_core_err_rl(__dev, format, ...) \ 70 + dev_err_ratelimited(&(__dev)->pdev->dev, \ 71 + "%s:%d:(pid %d): " format, \ 72 + __func__, __LINE__, current->pid, \ 73 + ##__VA_ARGS__) 74 + 69 75 #define mlx5_core_warn(__dev, format, ...) \ 70 76 dev_warn(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \ 71 77 __func__, __LINE__, current->pid, \
-17
drivers/net/ethernet/mellanox/mlx5/core/mr.c
··· 146 146 } 147 147 EXPORT_SYMBOL(mlx5_core_query_mkey); 148 148 149 - int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *_mkey, 150 - u32 *mkey) 151 - { 152 - u32 out[MLX5_ST_SZ_DW(query_special_contexts_out)] = {0}; 153 - u32 in[MLX5_ST_SZ_DW(query_special_contexts_in)] = {0}; 154 - int err; 155 - 156 - MLX5_SET(query_special_contexts_in, in, opcode, 157 - MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS); 158 - err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); 159 - if (!err) 160 - *mkey = MLX5_GET(query_special_contexts_out, out, 161 - dump_fill_mkey); 162 - return err; 163 - } 164 - EXPORT_SYMBOL(mlx5_core_dump_fill_mkey); 165 - 166 149 static inline u32 mlx5_get_psv(u32 *out, int psv_index) 167 150 { 168 151 switch (psv_index) {
+14 -4
include/linux/mlx5/device.h
··· 332 332 333 333 MLX5_EVENT_TYPE_FPGA_ERROR = 0x20, 334 334 MLX5_EVENT_TYPE_FPGA_QP_ERROR = 0x21, 335 + 336 + MLX5_EVENT_TYPE_DEVICE_TRACER = 0x26, 337 + }; 338 + 339 + enum { 340 + MLX5_TRACER_SUBTYPE_OWNERSHIP_CHANGE = 0x0, 341 + MLX5_TRACER_SUBTYPE_TRACES_AVAILABLE = 0x1, 335 342 }; 336 343 337 344 enum { ··· 757 750 758 751 #define MLX5_MINI_CQE_ARRAY_SIZE 8 759 752 760 - static inline int mlx5_get_cqe_format(struct mlx5_cqe64 *cqe) 753 + static inline u8 mlx5_get_cqe_format(struct mlx5_cqe64 *cqe) 761 754 { 762 755 return (cqe->op_own >> 2) & 0x3; 763 756 } ··· 777 770 return (cqe->l4_l3_hdr_type >> 2) & 0x3; 778 771 } 779 772 780 - static inline u8 cqe_is_tunneled(struct mlx5_cqe64 *cqe) 773 + static inline bool cqe_is_tunneled(struct mlx5_cqe64 *cqe) 781 774 { 782 775 return cqe->outer_l3_tunneled & 0x1; 783 776 } 784 777 785 - static inline int cqe_has_vlan(struct mlx5_cqe64 *cqe) 778 + static inline bool cqe_has_vlan(struct mlx5_cqe64 *cqe) 786 779 { 787 - return !!(cqe->l4_l3_hdr_type & 0x1); 780 + return cqe->l4_l3_hdr_type & 0x1; 788 781 } 789 782 790 783 static inline u64 get_cqe_ts(struct mlx5_cqe64 *cqe) ··· 1077 1070 /* GET Dev Caps macros */ 1078 1071 #define MLX5_CAP_GEN(mdev, cap) \ 1079 1072 MLX5_GET(cmd_hca_cap, mdev->caps.hca_cur[MLX5_CAP_GENERAL], cap) 1073 + 1074 + #define MLX5_CAP_GEN_64(mdev, cap) \ 1075 + MLX5_GET64(cmd_hca_cap, mdev->caps.hca_cur[MLX5_CAP_GENERAL], cap) 1080 1076 1081 1077 #define MLX5_CAP_GEN_MAX(mdev, cap) \ 1082 1078 MLX5_GET(cmd_hca_cap, mdev->caps.hca_max[MLX5_CAP_GENERAL], cap)
+8 -2
include/linux/mlx5/driver.h
··· 138 138 MLX5_REG_HOST_ENDIANNESS = 0x7004, 139 139 MLX5_REG_MCIA = 0x9014, 140 140 MLX5_REG_MLCR = 0x902b, 141 + MLX5_REG_MTRC_CAP = 0x9040, 142 + MLX5_REG_MTRC_CONF = 0x9041, 143 + MLX5_REG_MTRC_STDB = 0x9042, 144 + MLX5_REG_MTRC_CTRL = 0x9043, 141 145 MLX5_REG_MPCNT = 0x9051, 142 146 MLX5_REG_MTPPS = 0x9053, 143 147 MLX5_REG_MTPPSE = 0x9054, 148 + MLX5_REG_MPEGC = 0x9056, 144 149 MLX5_REG_MCQI = 0x9061, 145 150 MLX5_REG_MCC = 0x9062, 146 151 MLX5_REG_MCDA = 0x9063, ··· 816 811 struct mlx5_pps pps_info; 817 812 }; 818 813 814 + struct mlx5_fw_tracer; 815 + 819 816 struct mlx5_core_dev { 820 817 struct pci_dev *pdev; 821 818 /* sync pci state */ ··· 862 855 struct mlx5_clock clock; 863 856 struct mlx5_ib_clock_info *clock_info; 864 857 struct page *clock_info_page; 858 + struct mlx5_fw_tracer *tracer; 865 859 }; 866 860 867 861 struct mlx5_db { ··· 1075 1067 struct mlx5_core_mkey *mkey); 1076 1068 int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey, 1077 1069 u32 *out, int outlen); 1078 - int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *_mkey, 1079 - u32 *mkey); 1080 1070 int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn); 1081 1071 int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn); 1082 1072 int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
+3 -1
include/linux/mlx5/fs.h
··· 152 152 u8 prio; 153 153 }; 154 154 155 + #define MLX5_FS_VLAN_DEPTH 2 156 + 155 157 struct mlx5_flow_act { 156 158 u32 action; 157 159 bool has_flow_tag; ··· 161 159 u32 encap_id; 162 160 u32 modify_id; 163 161 uintptr_t esp_id; 164 - struct mlx5_fs_vlan vlan; 162 + struct mlx5_fs_vlan vlan[MLX5_FS_VLAN_DEPTH]; 165 163 struct ib_counters *counters; 166 164 }; 167 165
+166 -9
include/linux/mlx5/mlx5_ifc.h
··· 76 76 }; 77 77 78 78 enum { 79 + MLX5_GENERAL_OBJ_TYPES_CAP_UCTX = (1ULL << 4), 80 + MLX5_GENERAL_OBJ_TYPES_CAP_UMEM = (1ULL << 5), 81 + }; 82 + 83 + enum { 84 + MLX5_OBJ_TYPE_UCTX = 0x0004, 85 + }; 86 + 87 + enum { 79 88 MLX5_CMD_OP_QUERY_HCA_CAP = 0x100, 80 89 MLX5_CMD_OP_QUERY_ADAPTER = 0x101, 81 90 MLX5_CMD_OP_INIT_HCA = 0x102, ··· 251 242 MLX5_CMD_OP_FPGA_QUERY_QP = 0x962, 252 243 MLX5_CMD_OP_FPGA_DESTROY_QP = 0x963, 253 244 MLX5_CMD_OP_FPGA_QUERY_QP_COUNTERS = 0x964, 245 + MLX5_CMD_OP_CREATE_GENERAL_OBJECT = 0xa00, 246 + MLX5_CMD_OP_DESTROY_GENERAL_OBJECT = 0xa03, 254 247 MLX5_CMD_OP_MAX 255 248 }; 256 249 ··· 337 326 u8 reserved_at_9[0x1]; 338 327 u8 pop_vlan[0x1]; 339 328 u8 push_vlan[0x1]; 340 - u8 reserved_at_c[0x14]; 329 + u8 reserved_at_c[0x1]; 330 + u8 pop_vlan_2[0x1]; 331 + u8 push_vlan_2[0x1]; 332 + u8 reserved_at_f[0x11]; 341 333 342 334 u8 reserved_at_20[0x2]; 343 335 u8 log_max_ft_size[0x6]; ··· 888 874 u8 log_max_eq_sz[0x8]; 889 875 u8 reserved_at_e8[0x2]; 890 876 u8 log_max_mkey[0x6]; 891 - u8 reserved_at_f0[0xc]; 877 + u8 reserved_at_f0[0x8]; 878 + u8 dump_fill_mkey[0x1]; 879 + u8 reserved_at_f9[0x3]; 892 880 u8 log_max_eq[0x4]; 893 881 894 882 u8 max_indirection[0x8]; ··· 1129 1113 u8 reserved_at_3f8[0x3]; 1130 1114 u8 log_max_current_uc_list[0x5]; 1131 1115 1132 - u8 reserved_at_400[0x80]; 1116 + u8 general_obj_types[0x40]; 1117 + 1118 + u8 reserved_at_440[0x40]; 1133 1119 1134 1120 u8 reserved_at_480[0x3]; 1135 1121 u8 log_max_l2_table[0x5]; ··· 1686 1668 1687 1669 u8 rx_buffer_full_low[0x20]; 1688 1670 1689 - u8 reserved_at_1c0[0x600]; 1671 + u8 rx_icrc_encapsulated_high[0x20]; 1672 + 1673 + u8 rx_icrc_encapsulated_low[0x20]; 1674 + 1675 + u8 reserved_at_200[0x5c0]; 1690 1676 }; 1691 1677 1692 1678 struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits { ··· 2389 2367 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR = 0x40, 2390 2368 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP = 0x80, 2391 2369 MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH = 0x100, 2370 + MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2 = 0x400, 2371 + MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2 = 0x800, 2392 2372 }; 2393 2373 2394 2374 struct mlx5_ifc_vlan_bits { ··· 2421 2397 2422 2398 u8 modify_header_id[0x20]; 2423 2399 2424 - u8 reserved_at_100[0x100]; 2400 + struct mlx5_ifc_vlan_bits push_vlan_2; 2401 + 2402 + u8 reserved_at_120[0xe0]; 2425 2403 2426 2404 struct mlx5_ifc_fte_match_param_bits match_value; 2427 2405 ··· 8056 8030 u8 error_type[0x8]; 8057 8031 }; 8058 8032 8059 - struct mlx5_ifc_pcam_enhanced_features_bits { 8060 - u8 reserved_at_0[0x76]; 8033 + struct mlx5_ifc_mpegc_reg_bits { 8034 + u8 reserved_at_0[0x30]; 8035 + u8 field_select[0x10]; 8061 8036 8037 + u8 tx_overflow_sense[0x1]; 8038 + u8 mark_cqe[0x1]; 8039 + u8 mark_cnp[0x1]; 8040 + u8 reserved_at_43[0x1b]; 8041 + u8 tx_lossy_overflow_oper[0x2]; 8042 + 8043 + u8 reserved_at_60[0x100]; 8044 + }; 8045 + 8046 + struct mlx5_ifc_pcam_enhanced_features_bits { 8047 + u8 reserved_at_0[0x6d]; 8048 + u8 rx_icrc_encapsulated_counter[0x1]; 8049 + u8 reserved_at_6e[0x8]; 8062 8050 u8 pfcc_mask[0x1]; 8063 8051 u8 reserved_at_77[0x4]; 8064 8052 u8 rx_buffer_fullness_counters[0x1]; ··· 8117 8077 }; 8118 8078 8119 8079 struct mlx5_ifc_mcam_enhanced_features_bits { 8120 - u8 reserved_at_0[0x7b]; 8080 + u8 reserved_at_0[0x74]; 8081 + u8 mark_tx_action_cnp[0x1]; 8082 + u8 mark_tx_action_cqe[0x1]; 8083 + u8 dynamic_tx_overflow[0x1]; 8084 + u8 reserved_at_77[0x4]; 8121 8085 u8 pcie_outbound_stalled[0x1]; 8122 8086 u8 tx_overflow_buffer_pkt[0x1]; 8123 8087 u8 mtpps_enh_out_per_adj[0x1]; ··· 8136 8092 u8 mcqi[0x1]; 8137 8093 u8 reserved_at_1f[0x1]; 8138 8094 8139 - u8 regs_95_to_64[0x20]; 8095 + u8 regs_95_to_87[0x9]; 8096 + u8 mpegc[0x1]; 8097 + u8 regs_85_to_68[0x12]; 8098 + u8 tracer_registers[0x4]; 8099 + 8140 8100 u8 regs_63_to_32[0x20]; 8141 8101 u8 regs_31_to_0[0x20]; 8142 8102 }; ··· 9161 9113 u8 syndrome[0x20]; 9162 9114 9163 9115 u8 reserved_at_40[0x40]; 9116 + }; 9117 + 9118 + struct mlx5_ifc_general_obj_in_cmd_hdr_bits { 9119 + u8 opcode[0x10]; 9120 + u8 uid[0x10]; 9121 + 9122 + u8 reserved_at_20[0x10]; 9123 + u8 obj_type[0x10]; 9124 + 9125 + u8 obj_id[0x20]; 9126 + 9127 + u8 reserved_at_60[0x20]; 9128 + }; 9129 + 9130 + struct mlx5_ifc_general_obj_out_cmd_hdr_bits { 9131 + u8 status[0x8]; 9132 + u8 reserved_at_8[0x18]; 9133 + 9134 + u8 syndrome[0x20]; 9135 + 9136 + u8 obj_id[0x20]; 9137 + 9138 + u8 reserved_at_60[0x20]; 9139 + }; 9140 + 9141 + struct mlx5_ifc_umem_bits { 9142 + u8 modify_field_select[0x40]; 9143 + 9144 + u8 reserved_at_40[0x5b]; 9145 + u8 log_page_size[0x5]; 9146 + 9147 + u8 page_offset[0x20]; 9148 + 9149 + u8 num_of_mtt[0x40]; 9150 + 9151 + struct mlx5_ifc_mtt_bits mtt[0]; 9152 + }; 9153 + 9154 + struct mlx5_ifc_uctx_bits { 9155 + u8 modify_field_select[0x40]; 9156 + 9157 + u8 reserved_at_40[0x1c0]; 9158 + }; 9159 + 9160 + struct mlx5_ifc_create_umem_in_bits { 9161 + struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr; 9162 + struct mlx5_ifc_umem_bits umem; 9163 + }; 9164 + 9165 + struct mlx5_ifc_create_uctx_in_bits { 9166 + struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr; 9167 + struct mlx5_ifc_uctx_bits uctx; 9168 + }; 9169 + 9170 + struct mlx5_ifc_mtrc_string_db_param_bits { 9171 + u8 string_db_base_address[0x20]; 9172 + 9173 + u8 reserved_at_20[0x8]; 9174 + u8 string_db_size[0x18]; 9175 + }; 9176 + 9177 + struct mlx5_ifc_mtrc_cap_bits { 9178 + u8 trace_owner[0x1]; 9179 + u8 trace_to_memory[0x1]; 9180 + u8 reserved_at_2[0x4]; 9181 + u8 trc_ver[0x2]; 9182 + u8 reserved_at_8[0x14]; 9183 + u8 num_string_db[0x4]; 9184 + 9185 + u8 first_string_trace[0x8]; 9186 + u8 num_string_trace[0x8]; 9187 + u8 reserved_at_30[0x28]; 9188 + 9189 + u8 log_max_trace_buffer_size[0x8]; 9190 + 9191 + u8 reserved_at_60[0x20]; 9192 + 9193 + struct mlx5_ifc_mtrc_string_db_param_bits string_db_param[8]; 9194 + 9195 + u8 reserved_at_280[0x180]; 9196 + }; 9197 + 9198 + struct mlx5_ifc_mtrc_conf_bits { 9199 + u8 reserved_at_0[0x1c]; 9200 + u8 trace_mode[0x4]; 9201 + u8 reserved_at_20[0x18]; 9202 + u8 log_trace_buffer_size[0x8]; 9203 + u8 trace_mkey[0x20]; 9204 + u8 reserved_at_60[0x3a0]; 9205 + }; 9206 + 9207 + struct mlx5_ifc_mtrc_stdb_bits { 9208 + u8 string_db_index[0x4]; 9209 + u8 reserved_at_4[0x4]; 9210 + u8 read_size[0x18]; 9211 + u8 start_offset[0x20]; 9212 + u8 string_db_data[0]; 9213 + }; 9214 + 9215 + struct mlx5_ifc_mtrc_ctrl_bits { 9216 + u8 trace_status[0x2]; 9217 + u8 reserved_at_2[0x2]; 9218 + u8 arm_event[0x1]; 9219 + u8 reserved_at_5[0xb]; 9220 + u8 modify_field_select[0x10]; 9221 + u8 reserved_at_20[0x2b]; 9222 + u8 current_timestamp52_32[0x15]; 9223 + u8 current_timestamp31_0[0x20]; 9224 + u8 reserved_at_80[0x180]; 9164 9225 }; 9165 9226 9166 9227 #endif /* MLX5_IFC_H */