Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'mlx5-next'

Saeed Mahameed says:

====================
Mellanox 100G mlx5 update 2016-11-15

This series contains four humble mlx5 features.

From Gal,
- Add the support for PCIe statistics and expose them in ethtool

From Huy,
- Add the support for port module events reporting and statistics
- Add the support for driver version setting into FW (for display purposes only)

From Mohamad,
- Extended the command interface cache flexibility

This series was generated against commit
6a02f5eb6a8a ("Merge branch 'mlxsw-i2c")

V2:
- Changed plain "unsigned" to "unsigned int"
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+454 -84
+66 -73
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
··· 54 54 }; 55 55 56 56 enum { 57 - NUM_LONG_LISTS = 2, 58 - NUM_MED_LISTS = 64, 59 - LONG_LIST_SIZE = (2ULL * 1024 * 1024 * 1024 / PAGE_SIZE) * 8 + 16 + 60 - MLX5_CMD_DATA_BLOCK_SIZE, 61 - MED_LIST_SIZE = 16 + MLX5_CMD_DATA_BLOCK_SIZE, 62 - }; 63 - 64 - enum { 65 57 MLX5_CMD_DELIVERY_STAT_OK = 0x0, 66 58 MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR = 0x1, 67 59 MLX5_CMD_DELIVERY_STAT_TOK_ERR = 0x2, ··· 1364 1372 { 1365 1373 unsigned long flags; 1366 1374 1367 - if (msg->cache) { 1368 - spin_lock_irqsave(&msg->cache->lock, flags); 1369 - list_add_tail(&msg->list, &msg->cache->head); 1370 - spin_unlock_irqrestore(&msg->cache->lock, flags); 1375 + if (msg->parent) { 1376 + spin_lock_irqsave(&msg->parent->lock, flags); 1377 + list_add_tail(&msg->list, &msg->parent->head); 1378 + spin_unlock_irqrestore(&msg->parent->lock, flags); 1371 1379 } else { 1372 1380 mlx5_free_cmd_msg(dev, msg); 1373 1381 } ··· 1464 1472 gfp_t gfp) 1465 1473 { 1466 1474 struct mlx5_cmd_msg *msg = ERR_PTR(-ENOMEM); 1475 + struct cmd_msg_cache *ch = NULL; 1467 1476 struct mlx5_cmd *cmd = &dev->cmd; 1468 - struct cache_ent *ent = NULL; 1477 + int i; 1469 1478 1470 - if (in_size > MED_LIST_SIZE && in_size <= LONG_LIST_SIZE) 1471 - ent = &cmd->cache.large; 1472 - else if (in_size > 16 && in_size <= MED_LIST_SIZE) 1473 - ent = &cmd->cache.med; 1479 + if (in_size <= 16) 1480 + goto cache_miss; 1474 1481 1475 - if (ent) { 1476 - spin_lock_irq(&ent->lock); 1477 - if (!list_empty(&ent->head)) { 1478 - msg = list_entry(ent->head.next, typeof(*msg), list); 1479 - /* For cached lists, we must explicitly state what is 1480 - * the real size 1481 - */ 1482 - msg->len = in_size; 1483 - list_del(&msg->list); 1482 + for (i = 0; i < MLX5_NUM_COMMAND_CACHES; i++) { 1483 + ch = &cmd->cache[i]; 1484 + if (in_size > ch->max_inbox_size) 1485 + continue; 1486 + spin_lock_irq(&ch->lock); 1487 + if (list_empty(&ch->head)) { 1488 + spin_unlock_irq(&ch->lock); 1489 + continue; 1484 1490 } 1485 - spin_unlock_irq(&ent->lock); 1491 + msg = list_entry(ch->head.next, typeof(*msg), list); 1492 + /* For cached lists, we must explicitly state what is 1493 + * the real size 1494 + */ 1495 + msg->len = in_size; 1496 + list_del(&msg->list); 1497 + spin_unlock_irq(&ch->lock); 1498 + break; 1486 1499 } 1487 1500 1488 - if (IS_ERR(msg)) 1489 - msg = mlx5_alloc_cmd_msg(dev, gfp, in_size, 0); 1501 + if (!IS_ERR(msg)) 1502 + return msg; 1490 1503 1504 + cache_miss: 1505 + msg = mlx5_alloc_cmd_msg(dev, gfp, in_size, 0); 1491 1506 return msg; 1492 1507 } 1493 1508 ··· 1592 1593 1593 1594 static void destroy_msg_cache(struct mlx5_core_dev *dev) 1594 1595 { 1595 - struct mlx5_cmd *cmd = &dev->cmd; 1596 + struct cmd_msg_cache *ch; 1596 1597 struct mlx5_cmd_msg *msg; 1597 1598 struct mlx5_cmd_msg *n; 1599 + int i; 1598 1600 1599 - list_for_each_entry_safe(msg, n, &cmd->cache.large.head, list) { 1600 - list_del(&msg->list); 1601 - mlx5_free_cmd_msg(dev, msg); 1602 - } 1603 - 1604 - list_for_each_entry_safe(msg, n, &cmd->cache.med.head, list) { 1605 - list_del(&msg->list); 1606 - mlx5_free_cmd_msg(dev, msg); 1601 + for (i = 0; i < MLX5_NUM_COMMAND_CACHES; i++) { 1602 + ch = &dev->cmd.cache[i]; 1603 + list_for_each_entry_safe(msg, n, &ch->head, list) { 1604 + list_del(&msg->list); 1605 + mlx5_free_cmd_msg(dev, msg); 1606 + } 1607 1607 } 1608 1608 } 1609 1609 1610 - static int create_msg_cache(struct mlx5_core_dev *dev) 1610 + static unsigned cmd_cache_num_ent[MLX5_NUM_COMMAND_CACHES] = { 1611 + 512, 32, 16, 8, 2 1612 + }; 1613 + 1614 + static unsigned cmd_cache_ent_size[MLX5_NUM_COMMAND_CACHES] = { 1615 + 16 + MLX5_CMD_DATA_BLOCK_SIZE, 1616 + 16 + MLX5_CMD_DATA_BLOCK_SIZE * 2, 1617 + 16 + MLX5_CMD_DATA_BLOCK_SIZE * 16, 1618 + 16 + MLX5_CMD_DATA_BLOCK_SIZE * 256, 1619 + 16 + MLX5_CMD_DATA_BLOCK_SIZE * 512, 1620 + }; 1621 + 1622 + static void create_msg_cache(struct mlx5_core_dev *dev) 1611 1623 { 1612 1624 struct mlx5_cmd *cmd = &dev->cmd; 1625 + struct cmd_msg_cache *ch; 1613 1626 struct mlx5_cmd_msg *msg; 1614 - int err; 1615 1627 int i; 1628 + int k; 1616 1629 1617 - spin_lock_init(&cmd->cache.large.lock); 1618 - INIT_LIST_HEAD(&cmd->cache.large.head); 1619 - spin_lock_init(&cmd->cache.med.lock); 1620 - INIT_LIST_HEAD(&cmd->cache.med.head); 1621 - 1622 - for (i = 0; i < NUM_LONG_LISTS; i++) { 1623 - msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE, 0); 1624 - if (IS_ERR(msg)) { 1625 - err = PTR_ERR(msg); 1626 - goto ex_err; 1630 + /* Initialize and fill the caches with initial entries */ 1631 + for (k = 0; k < MLX5_NUM_COMMAND_CACHES; k++) { 1632 + ch = &cmd->cache[k]; 1633 + spin_lock_init(&ch->lock); 1634 + INIT_LIST_HEAD(&ch->head); 1635 + ch->num_ent = cmd_cache_num_ent[k]; 1636 + ch->max_inbox_size = cmd_cache_ent_size[k]; 1637 + for (i = 0; i < ch->num_ent; i++) { 1638 + msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL | __GFP_NOWARN, 1639 + ch->max_inbox_size, 0); 1640 + if (IS_ERR(msg)) 1641 + break; 1642 + msg->parent = ch; 1643 + list_add_tail(&msg->list, &ch->head); 1627 1644 } 1628 - msg->cache = &cmd->cache.large; 1629 - list_add_tail(&msg->list, &cmd->cache.large.head); 1630 1645 } 1631 - 1632 - for (i = 0; i < NUM_MED_LISTS; i++) { 1633 - msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE, 0); 1634 - if (IS_ERR(msg)) { 1635 - err = PTR_ERR(msg); 1636 - goto ex_err; 1637 - } 1638 - msg->cache = &cmd->cache.med; 1639 - list_add_tail(&msg->list, &cmd->cache.med.head); 1640 - } 1641 - 1642 - return 0; 1643 - 1644 - ex_err: 1645 - destroy_msg_cache(dev); 1646 - return err; 1647 1646 } 1648 1647 1649 1648 static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd) ··· 1764 1767 1765 1768 cmd->mode = CMD_MODE_POLLING; 1766 1769 1767 - err = create_msg_cache(dev); 1768 - if (err) { 1769 - dev_err(&dev->pdev->dev, "failed to create command cache\n"); 1770 - goto err_free_page; 1771 - } 1770 + create_msg_cache(dev); 1772 1771 1773 1772 set_wqname(dev); 1774 1773 cmd->wq = create_singlethread_workqueue(cmd->wq_name);
+39 -1
drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
··· 171 171 return NUM_SW_COUNTERS + 172 172 MLX5E_NUM_Q_CNTRS(priv) + 173 173 NUM_VPORT_COUNTERS + NUM_PPORT_COUNTERS + 174 + NUM_PCIE_COUNTERS + 174 175 MLX5E_NUM_RQ_STATS(priv) + 175 176 MLX5E_NUM_SQ_STATS(priv) + 176 - MLX5E_NUM_PFC_COUNTERS(priv); 177 + MLX5E_NUM_PFC_COUNTERS(priv) + 178 + ARRAY_SIZE(mlx5e_pme_status_desc) + 179 + ARRAY_SIZE(mlx5e_pme_error_desc); 180 + 177 181 case ETH_SS_PRIV_FLAGS: 178 182 return ARRAY_SIZE(mlx5e_priv_flags); 179 183 /* fallthrough */ ··· 217 213 strcpy(data + (idx++) * ETH_GSTRING_LEN, 218 214 pport_2819_stats_desc[i].format); 219 215 216 + for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++) 217 + strcpy(data + (idx++) * ETH_GSTRING_LEN, 218 + pcie_perf_stats_desc[i].format); 219 + 220 + for (i = 0; i < NUM_PCIE_TAS_COUNTERS; i++) 221 + strcpy(data + (idx++) * ETH_GSTRING_LEN, 222 + pcie_tas_stats_desc[i].format); 223 + 220 224 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { 221 225 for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++) 222 226 sprintf(data + (idx++) * ETH_GSTRING_LEN, ··· 248 236 pport_per_prio_pfc_stats_desc[i].format, "global"); 249 237 } 250 238 } 239 + 240 + /* port module event counters */ 241 + for (i = 0; i < ARRAY_SIZE(mlx5e_pme_status_desc); i++) 242 + strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_status_desc[i].format); 243 + 244 + for (i = 0; i < ARRAY_SIZE(mlx5e_pme_error_desc); i++) 245 + strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_error_desc[i].format); 251 246 252 247 if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) 253 248 return; ··· 298 279 struct ethtool_stats *stats, u64 *data) 299 280 { 300 281 struct mlx5e_priv *priv = netdev_priv(dev); 282 + struct mlx5_priv *mlx5_priv; 301 283 int i, j, tc, prio, idx = 0; 302 284 unsigned long pfc_combined; 303 285 ··· 334 314 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters, 335 315 pport_2819_stats_desc, i); 336 316 317 + for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++) 318 + data[idx++] = MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters, 319 + pcie_perf_stats_desc, i); 320 + 321 + for (i = 0; i < NUM_PCIE_TAS_COUNTERS; i++) 322 + data[idx++] = MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_tas_counters, 323 + pcie_tas_stats_desc, i); 324 + 337 325 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { 338 326 for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++) 339 327 data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio], ··· 362 334 pport_per_prio_pfc_stats_desc, i); 363 335 } 364 336 } 337 + 338 + /* port module event counters */ 339 + mlx5_priv = &priv->mdev->priv; 340 + for (i = 0; i < ARRAY_SIZE(mlx5e_pme_status_desc); i++) 341 + data[idx++] = MLX5E_READ_CTR64_CPU(mlx5_priv->pme_stats.status_counters, 342 + mlx5e_pme_status_desc, i); 343 + 344 + for (i = 0; i < ARRAY_SIZE(mlx5e_pme_error_desc); i++) 345 + data[idx++] = MLX5E_READ_CTR64_CPU(mlx5_priv->pme_stats.error_counters, 346 + mlx5e_pme_error_desc, i); 365 347 366 348 if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) 367 349 return;
+24
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
··· 290 290 &qcnt->rx_out_of_buffer); 291 291 } 292 292 293 + static void mlx5e_update_pcie_counters(struct mlx5e_priv *priv) 294 + { 295 + struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie; 296 + struct mlx5_core_dev *mdev = priv->mdev; 297 + int sz = MLX5_ST_SZ_BYTES(mpcnt_reg); 298 + void *out; 299 + u32 *in; 300 + 301 + in = mlx5_vzalloc(sz); 302 + if (!in) 303 + return; 304 + 305 + out = pcie_stats->pcie_perf_counters; 306 + MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP); 307 + mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0); 308 + 309 + out = pcie_stats->pcie_tas_counters; 310 + MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_TIMERS_AND_STATES_COUNTERS_GROUP); 311 + mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0); 312 + 313 + kvfree(in); 314 + } 315 + 293 316 void mlx5e_update_stats(struct mlx5e_priv *priv) 294 317 { 295 318 mlx5e_update_q_counter(priv); 296 319 mlx5e_update_vport_counters(priv); 297 320 mlx5e_update_pport_counters(priv); 298 321 mlx5e_update_sw_counters(priv); 322 + mlx5e_update_pcie_counters(priv); 299 323 } 300 324 301 325 void mlx5e_update_stats_work(struct work_struct *work)
+48 -1
drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
··· 39 39 #define MLX5E_READ_CTR32_CPU(ptr, dsc, i) \ 40 40 (*(u32 *)((char *)ptr + dsc[i].offset)) 41 41 #define MLX5E_READ_CTR32_BE(ptr, dsc, i) \ 42 - be64_to_cpu(*(__be32 *)((char *)ptr + dsc[i].offset)) 42 + be32_to_cpu(*(__be32 *)((char *)ptr + dsc[i].offset)) 43 43 44 44 #define MLX5E_DECLARE_STAT(type, fld) #fld, offsetof(type, fld) 45 45 #define MLX5E_DECLARE_RX_STAT(type, fld) "rx%d_"#fld, offsetof(type, fld) ··· 276 276 { "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) }, 277 277 }; 278 278 279 + #define PCIE_PERF_OFF(c) \ 280 + MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c) 281 + #define PCIE_PERF_GET(pcie_stats, c) \ 282 + MLX5_GET(mpcnt_reg, pcie_stats->pcie_perf_counters, \ 283 + counter_set.pcie_perf_cntrs_grp_data_layout.c) 284 + #define PCIE_TAS_OFF(c) \ 285 + MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_tas_cntrs_grp_data_layout.c) 286 + #define PCIE_TAS_GET(pcie_stats, c) \ 287 + MLX5_GET(mpcnt_reg, pcie_stats->pcie_tas_counters, \ 288 + counter_set.pcie_tas_cntrs_grp_data_layout.c) 289 + 290 + struct mlx5e_pcie_stats { 291 + __be64 pcie_perf_counters[MLX5_ST_SZ_QW(mpcnt_reg)]; 292 + __be64 pcie_tas_counters[MLX5_ST_SZ_QW(mpcnt_reg)]; 293 + }; 294 + 295 + static const struct counter_desc pcie_perf_stats_desc[] = { 296 + { "rx_pci_signal_integrity", PCIE_PERF_OFF(rx_errors) }, 297 + { "tx_pci_signal_integrity", PCIE_PERF_OFF(tx_errors) }, 298 + }; 299 + 300 + static const struct counter_desc pcie_tas_stats_desc[] = { 301 + { "tx_pci_transport_nonfatal_msg", PCIE_TAS_OFF(non_fatal_err_msg_sent) }, 302 + { "tx_pci_transport_fatal_msg", PCIE_TAS_OFF(fatal_err_msg_sent) }, 303 + }; 304 + 279 305 struct mlx5e_rq_stats { 280 306 u64 packets; 281 307 u64 bytes; ··· 386 360 #define NUM_PPORT_802_3_COUNTERS ARRAY_SIZE(pport_802_3_stats_desc) 387 361 #define NUM_PPORT_2863_COUNTERS ARRAY_SIZE(pport_2863_stats_desc) 388 362 #define NUM_PPORT_2819_COUNTERS ARRAY_SIZE(pport_2819_stats_desc) 363 + #define NUM_PCIE_PERF_COUNTERS ARRAY_SIZE(pcie_perf_stats_desc) 364 + #define NUM_PCIE_TAS_COUNTERS ARRAY_SIZE(pcie_tas_stats_desc) 389 365 #define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS \ 390 366 ARRAY_SIZE(pport_per_prio_traffic_stats_desc) 391 367 #define NUM_PPORT_PER_PRIO_PFC_COUNTERS \ ··· 397 369 NUM_PPORT_2819_COUNTERS + \ 398 370 NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * \ 399 371 NUM_PPORT_PRIO) 372 + #define NUM_PCIE_COUNTERS (NUM_PCIE_PERF_COUNTERS + NUM_PCIE_TAS_COUNTERS) 400 373 #define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc) 401 374 #define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc) 402 375 ··· 406 377 struct mlx5e_qcounter_stats qcnt; 407 378 struct mlx5e_vport_stats vport; 408 379 struct mlx5e_pport_stats pport; 380 + struct mlx5e_pcie_stats pcie; 381 + }; 382 + 383 + static const struct counter_desc mlx5e_pme_status_desc[] = { 384 + { "module_plug", 0 }, 385 + { "module_unplug", 8 }, 386 + }; 387 + 388 + static const struct counter_desc mlx5e_pme_error_desc[] = { 389 + { "module_pwr_budget_exd", 0 }, /* power budget exceed */ 390 + { "module_long_range", 8 }, /* long range for non MLNX cable */ 391 + { "module_bus_stuck", 16 }, /* bus stuck (I2C or data shorted) */ 392 + { "module_no_eeprom", 24 }, /* no eeprom/retry time out */ 393 + { "module_enforce_part", 32 }, /* enforce part number list */ 394 + { "module_unknown_id", 40 }, /* unknown identifier */ 395 + { "module_high_temp", 48 }, /* high temperature */ 396 + { "module_bad_shorted", 56 }, /* bad or shorted cable/module */ 397 + { "module_unknown_status", 64 }, 409 398 }; 410 399 411 400 #endif /* __MLX5_EN_STATS_H__ */
+12
drivers/net/ethernet/mellanox/mlx5/core/eq.c
··· 139 139 return "MLX5_EVENT_TYPE_PORT_CHANGE"; 140 140 case MLX5_EVENT_TYPE_GPIO_EVENT: 141 141 return "MLX5_EVENT_TYPE_GPIO_EVENT"; 142 + case MLX5_EVENT_TYPE_PORT_MODULE_EVENT: 143 + return "MLX5_EVENT_TYPE_PORT_MODULE_EVENT"; 142 144 case MLX5_EVENT_TYPE_REMOTE_CONFIG: 143 145 return "MLX5_EVENT_TYPE_REMOTE_CONFIG"; 144 146 case MLX5_EVENT_TYPE_DB_BF_CONGESTION: ··· 287 285 mlx5_eswitch_vport_event(dev->priv.eswitch, eqe); 288 286 break; 289 287 #endif 288 + 289 + case MLX5_EVENT_TYPE_PORT_MODULE_EVENT: 290 + mlx5_port_module_event(dev, eqe); 291 + break; 292 + 290 293 default: 291 294 mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n", 292 295 eqe->type, eq->eqn); ··· 486 479 MLX5_CAP_GEN(dev, vport_group_manager) && 487 480 mlx5_core_is_pf(dev)) 488 481 async_event_mask |= (1ull << MLX5_EVENT_TYPE_NIC_VPORT_CHANGE); 482 + 483 + if (MLX5_CAP_GEN(dev, port_module_event)) 484 + async_event_mask |= (1ull << MLX5_EVENT_TYPE_PORT_MODULE_EVENT); 485 + else 486 + mlx5_core_dbg(dev, "port_module_event is not set\n"); 489 487 490 488 err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD, 491 489 MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD,
+37
drivers/net/ethernet/mellanox/mlx5/core/main.c
··· 175 175 return err; 176 176 } 177 177 178 + static void mlx5_set_driver_version(struct mlx5_core_dev *dev) 179 + { 180 + int driver_ver_sz = MLX5_FLD_SZ_BYTES(set_driver_version_in, 181 + driver_version); 182 + u8 in[MLX5_ST_SZ_BYTES(set_driver_version_in)] = {0}; 183 + u8 out[MLX5_ST_SZ_BYTES(set_driver_version_out)] = {0}; 184 + int remaining_size = driver_ver_sz; 185 + char *string; 186 + 187 + if (!MLX5_CAP_GEN(dev, driver_version)) 188 + return; 189 + 190 + string = MLX5_ADDR_OF(set_driver_version_in, in, driver_version); 191 + 192 + strncpy(string, "Linux", remaining_size); 193 + 194 + remaining_size = max_t(int, 0, driver_ver_sz - strlen(string)); 195 + strncat(string, ",", remaining_size); 196 + 197 + remaining_size = max_t(int, 0, driver_ver_sz - strlen(string)); 198 + strncat(string, DRIVER_NAME, remaining_size); 199 + 200 + remaining_size = max_t(int, 0, driver_ver_sz - strlen(string)); 201 + strncat(string, ",", remaining_size); 202 + 203 + remaining_size = max_t(int, 0, driver_ver_sz - strlen(string)); 204 + strncat(string, DRIVER_VERSION, remaining_size); 205 + 206 + /*Send the command*/ 207 + MLX5_SET(set_driver_version_in, in, opcode, 208 + MLX5_CMD_OP_SET_DRIVER_VERSION); 209 + 210 + mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); 211 + } 212 + 178 213 static int set_dma_caps(struct pci_dev *pdev) 179 214 { 180 215 int err; ··· 1049 1014 dev_err(&pdev->dev, "init hca failed\n"); 1050 1015 goto err_pagealloc_stop; 1051 1016 } 1017 + 1018 + mlx5_set_driver_version(dev); 1052 1019 1053 1020 mlx5_start_health_poll(dev); 1054 1021
+1
drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
··· 81 81 int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev); 82 82 void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event, 83 83 unsigned long param); 84 + void mlx5_port_module_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe); 84 85 void mlx5_enter_error_state(struct mlx5_core_dev *dev); 85 86 void mlx5_disable_device(struct mlx5_core_dev *dev); 86 87 void mlx5_recover_device(struct mlx5_core_dev *dev);
+57
drivers/net/ethernet/mellanox/mlx5/core/port.c
··· 746 746 *supported = !!(MLX5_GET(pcmr_reg, out, fcs_cap)); 747 747 *enabled = !!(MLX5_GET(pcmr_reg, out, fcs_chk)); 748 748 } 749 + 750 + static const char *mlx5_pme_status[MLX5_MODULE_STATUS_NUM] = { 751 + "Cable plugged", /* MLX5_MODULE_STATUS_PLUGGED = 0x1 */ 752 + "Cable unplugged", /* MLX5_MODULE_STATUS_UNPLUGGED = 0x2 */ 753 + "Cable error", /* MLX5_MODULE_STATUS_ERROR = 0x3 */ 754 + }; 755 + 756 + static const char *mlx5_pme_error[MLX5_MODULE_EVENT_ERROR_NUM] = { 757 + "Power budget exceeded", 758 + "Long Range for non MLNX cable", 759 + "Bus stuck(I2C or data shorted)", 760 + "No EEPROM/retry timeout", 761 + "Enforce part number list", 762 + "Unknown identifier", 763 + "High Temperature", 764 + "Bad or shorted cable/module", 765 + "Unknown status", 766 + }; 767 + 768 + void mlx5_port_module_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe) 769 + { 770 + enum port_module_event_status_type module_status; 771 + enum port_module_event_error_type error_type; 772 + struct mlx5_eqe_port_module *module_event_eqe; 773 + struct mlx5_priv *priv = &dev->priv; 774 + u8 module_num; 775 + 776 + module_event_eqe = &eqe->data.port_module; 777 + module_num = module_event_eqe->module; 778 + module_status = module_event_eqe->module_status & 779 + PORT_MODULE_EVENT_MODULE_STATUS_MASK; 780 + error_type = module_event_eqe->error_type & 781 + PORT_MODULE_EVENT_ERROR_TYPE_MASK; 782 + 783 + if (module_status < MLX5_MODULE_STATUS_ERROR) { 784 + priv->pme_stats.status_counters[module_status - 1]++; 785 + } else if (module_status == MLX5_MODULE_STATUS_ERROR) { 786 + if (error_type >= MLX5_MODULE_EVENT_ERROR_UNKNOWN) 787 + /* Unknown error type */ 788 + error_type = MLX5_MODULE_EVENT_ERROR_UNKNOWN; 789 + priv->pme_stats.error_counters[error_type]++; 790 + } 791 + 792 + if (!printk_ratelimit()) 793 + return; 794 + 795 + if (module_status < MLX5_MODULE_STATUS_ERROR) 796 + mlx5_core_info(dev, 797 + "Port module event: module %u, %s\n", 798 + module_num, mlx5_pme_status[module_status - 1]); 799 + 800 + else if (module_status == MLX5_MODULE_STATUS_ERROR) 801 + mlx5_core_info(dev, 802 + "Port module event[error]: module %u, %s, %s\n", 803 + module_num, mlx5_pme_status[module_status - 1], 804 + mlx5_pme_error[error_type]); 805 + }
+16
include/linux/mlx5/device.h
··· 277 277 MLX5_EVENT_TYPE_INTERNAL_ERROR = 0x08, 278 278 MLX5_EVENT_TYPE_PORT_CHANGE = 0x09, 279 279 MLX5_EVENT_TYPE_GPIO_EVENT = 0x15, 280 + MLX5_EVENT_TYPE_PORT_MODULE_EVENT = 0x16, 280 281 MLX5_EVENT_TYPE_REMOTE_CONFIG = 0x19, 281 282 282 283 MLX5_EVENT_TYPE_DB_BF_CONGESTION = 0x1a, ··· 553 552 __be32 rsvd1[6]; 554 553 } __packed; 555 554 555 + struct mlx5_eqe_port_module { 556 + u8 reserved_at_0[1]; 557 + u8 module; 558 + u8 reserved_at_2[1]; 559 + u8 module_status; 560 + u8 reserved_at_4[2]; 561 + u8 error_type; 562 + } __packed; 563 + 556 564 union ev_data { 557 565 __be32 raw[7]; 558 566 struct mlx5_eqe_cmd cmd; ··· 575 565 struct mlx5_eqe_page_req req_pages; 576 566 struct mlx5_eqe_page_fault page_fault; 577 567 struct mlx5_eqe_vport_change vport_change; 568 + struct mlx5_eqe_port_module port_module; 578 569 } __packed; 579 570 580 571 struct mlx5_eqe { ··· 1069 1058 MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11, 1070 1059 MLX5_PHYSICAL_LAYER_COUNTERS_GROUP = 0x12, 1071 1060 MLX5_INFINIBAND_PORT_COUNTERS_GROUP = 0x20, 1061 + }; 1062 + 1063 + enum { 1064 + MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP = 0x0, 1065 + MLX5_PCIE_TIMERS_AND_STATES_COUNTERS_GROUP = 0x2, 1072 1066 }; 1073 1067 1074 1068 static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz)
+35 -7
include/linux/mlx5/driver.h
··· 121 121 MLX5_REG_HOST_ENDIANNESS = 0x7004, 122 122 MLX5_REG_MCIA = 0x9014, 123 123 MLX5_REG_MLCR = 0x902b, 124 + MLX5_REG_MPCNT = 0x9051, 124 125 }; 125 126 126 127 enum { ··· 209 208 210 209 struct mlx5_cmd_msg { 211 210 struct list_head list; 212 - struct cache_ent *cache; 211 + struct cmd_msg_cache *parent; 213 212 u32 len; 214 213 struct mlx5_cmd_first first; 215 214 struct mlx5_cmd_mailbox *next; ··· 229 228 u16 outlen; 230 229 }; 231 230 232 - struct cache_ent { 231 + struct cmd_msg_cache { 233 232 /* protect block chain allocations 234 233 */ 235 234 spinlock_t lock; 236 235 struct list_head head; 236 + unsigned int max_inbox_size; 237 + unsigned int num_ent; 237 238 }; 238 239 239 - struct cmd_msg_cache { 240 - struct cache_ent large; 241 - struct cache_ent med; 242 - 240 + enum { 241 + MLX5_NUM_COMMAND_CACHES = 5, 243 242 }; 244 243 245 244 struct mlx5_cmd_stats { ··· 282 281 struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS]; 283 282 struct pci_pool *pool; 284 283 struct mlx5_cmd_debug dbg; 285 - struct cmd_msg_cache cache; 284 + struct cmd_msg_cache cache[MLX5_NUM_COMMAND_CACHES]; 286 285 int checksum_disabled; 287 286 struct mlx5_cmd_stats stats[MLX5_CMD_OP_MAX]; 288 287 }; ··· 499 498 struct mlx5_rl_entry *rl_entry; 500 499 }; 501 500 501 + enum port_module_event_status_type { 502 + MLX5_MODULE_STATUS_PLUGGED = 0x1, 503 + MLX5_MODULE_STATUS_UNPLUGGED = 0x2, 504 + MLX5_MODULE_STATUS_ERROR = 0x3, 505 + MLX5_MODULE_STATUS_NUM = 0x3, 506 + }; 507 + 508 + enum port_module_event_error_type { 509 + MLX5_MODULE_EVENT_ERROR_POWER_BUDGET_EXCEEDED, 510 + MLX5_MODULE_EVENT_ERROR_LONG_RANGE_FOR_NON_MLNX_CABLE_MODULE, 511 + MLX5_MODULE_EVENT_ERROR_BUS_STUCK, 512 + MLX5_MODULE_EVENT_ERROR_NO_EEPROM_RETRY_TIMEOUT, 513 + MLX5_MODULE_EVENT_ERROR_ENFORCE_PART_NUMBER_LIST, 514 + MLX5_MODULE_EVENT_ERROR_UNKNOWN_IDENTIFIER, 515 + MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE, 516 + MLX5_MODULE_EVENT_ERROR_BAD_CABLE, 517 + MLX5_MODULE_EVENT_ERROR_UNKNOWN, 518 + MLX5_MODULE_EVENT_ERROR_NUM, 519 + }; 520 + 521 + struct mlx5_port_module_event_stats { 522 + u64 status_counters[MLX5_MODULE_STATUS_NUM]; 523 + u64 error_counters[MLX5_MODULE_EVENT_ERROR_NUM]; 524 + }; 525 + 502 526 struct mlx5_priv { 503 527 char name[MLX5_MAX_NAME_LEN]; 504 528 struct mlx5_eq_table eq_table; ··· 585 559 unsigned long pci_dev_data; 586 560 struct mlx5_fc_stats fc_stats; 587 561 struct mlx5_rl_table rl_table; 562 + 563 + struct mlx5_port_module_event_stats pme_stats; 588 564 }; 589 565 590 566 enum mlx5_device_state {
+116 -2
include/linux/mlx5/mlx5_ifc.h
··· 83 83 MLX5_CMD_OP_SET_HCA_CAP = 0x109, 84 84 MLX5_CMD_OP_QUERY_ISSI = 0x10a, 85 85 MLX5_CMD_OP_SET_ISSI = 0x10b, 86 + MLX5_CMD_OP_SET_DRIVER_VERSION = 0x10d, 86 87 MLX5_CMD_OP_CREATE_MKEY = 0x200, 87 88 MLX5_CMD_OP_QUERY_MKEY = 0x201, 88 89 MLX5_CMD_OP_DESTROY_MKEY = 0x202, ··· 825 824 u8 early_vf_enable[0x1]; 826 825 u8 reserved_at_1a9[0x2]; 827 826 u8 local_ca_ack_delay[0x5]; 828 - u8 reserved_at_1af[0x2]; 827 + u8 port_module_event[0x1]; 828 + u8 reserved_at_1b0[0x1]; 829 829 u8 ports_check[0x1]; 830 830 u8 reserved_at_1b2[0x1]; 831 831 u8 disable_link_up[0x1]; ··· 910 908 u8 log_pg_sz[0x8]; 911 909 912 910 u8 bf[0x1]; 913 - u8 reserved_at_261[0x1]; 911 + u8 driver_version[0x1]; 914 912 u8 pad_tx_eth_packet[0x1]; 915 913 u8 reserved_at_263[0x8]; 916 914 u8 log_bf_reg_size[0x5]; ··· 1755 1753 u8 a_pause_mac_ctrl_frames_transmitted_low[0x20]; 1756 1754 1757 1755 u8 reserved_at_4c0[0x300]; 1756 + }; 1757 + 1758 + struct mlx5_ifc_pcie_perf_cntrs_grp_data_layout_bits { 1759 + u8 life_time_counter_high[0x20]; 1760 + 1761 + u8 life_time_counter_low[0x20]; 1762 + 1763 + u8 rx_errors[0x20]; 1764 + 1765 + u8 tx_errors[0x20]; 1766 + 1767 + u8 l0_to_recovery_eieos[0x20]; 1768 + 1769 + u8 l0_to_recovery_ts[0x20]; 1770 + 1771 + u8 l0_to_recovery_framing[0x20]; 1772 + 1773 + u8 l0_to_recovery_retrain[0x20]; 1774 + 1775 + u8 crc_error_dllp[0x20]; 1776 + 1777 + u8 crc_error_tlp[0x20]; 1778 + 1779 + u8 reserved_at_140[0x680]; 1780 + }; 1781 + 1782 + struct mlx5_ifc_pcie_tas_cntrs_grp_data_layout_bits { 1783 + u8 life_time_counter_high[0x20]; 1784 + 1785 + u8 life_time_counter_low[0x20]; 1786 + 1787 + u8 time_to_boot_image_start[0x20]; 1788 + 1789 + u8 time_to_link_image[0x20]; 1790 + 1791 + u8 calibration_time[0x20]; 1792 + 1793 + u8 time_to_first_perst[0x20]; 1794 + 1795 + u8 time_to_detect_state[0x20]; 1796 + 1797 + u8 time_to_l0[0x20]; 1798 + 1799 + u8 time_to_crs_en[0x20]; 1800 + 1801 + u8 time_to_plastic_image_start[0x20]; 1802 + 1803 + u8 time_to_iron_image_start[0x20]; 1804 + 1805 + u8 perst_handler[0x20]; 1806 + 1807 + u8 times_in_l1[0x20]; 1808 + 1809 + u8 times_in_l23[0x20]; 1810 + 1811 + u8 dl_down[0x20]; 1812 + 1813 + u8 config_cycle1usec[0x20]; 1814 + 1815 + u8 config_cycle2to7usec[0x20]; 1816 + 1817 + u8 config_cycle_8to15usec[0x20]; 1818 + 1819 + u8 config_cycle_16_to_63usec[0x20]; 1820 + 1821 + u8 config_cycle_64usec[0x20]; 1822 + 1823 + u8 correctable_err_msg_sent[0x20]; 1824 + 1825 + u8 non_fatal_err_msg_sent[0x20]; 1826 + 1827 + u8 fatal_err_msg_sent[0x20]; 1828 + 1829 + u8 reserved_at_2e0[0x4e0]; 1758 1830 }; 1759 1831 1760 1832 struct mlx5_ifc_cmd_inter_comp_event_bits { ··· 2995 2919 u8 reserved_at_0[0x7c0]; 2996 2920 }; 2997 2921 2922 + union mlx5_ifc_pcie_cntrs_grp_data_layout_auto_bits { 2923 + struct mlx5_ifc_pcie_perf_cntrs_grp_data_layout_bits pcie_perf_cntrs_grp_data_layout; 2924 + struct mlx5_ifc_pcie_tas_cntrs_grp_data_layout_bits pcie_tas_cntrs_grp_data_layout; 2925 + u8 reserved_at_0[0x7c0]; 2926 + }; 2927 + 2998 2928 union mlx5_ifc_event_auto_bits { 2999 2929 struct mlx5_ifc_comp_event_bits comp_event; 3000 2930 struct mlx5_ifc_dct_events_bits dct_events; ··· 4084 4002 u8 op_mod[0x10]; 4085 4003 4086 4004 u8 reserved_at_40[0x40]; 4005 + }; 4006 + 4007 + struct mlx5_ifc_set_driver_version_out_bits { 4008 + u8 status[0x8]; 4009 + u8 reserved_0[0x18]; 4010 + 4011 + u8 syndrome[0x20]; 4012 + u8 reserved_1[0x40]; 4013 + }; 4014 + 4015 + struct mlx5_ifc_set_driver_version_in_bits { 4016 + u8 opcode[0x10]; 4017 + u8 reserved_0[0x10]; 4018 + 4019 + u8 reserved_1[0x10]; 4020 + u8 op_mod[0x10]; 4021 + 4022 + u8 reserved_2[0x40]; 4023 + u8 driver_version[64][0x8]; 4087 4024 }; 4088 4025 4089 4026 struct mlx5_ifc_query_hca_vport_pkey_out_bits { ··· 7320 7219 union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits counter_set; 7321 7220 }; 7322 7221 7222 + struct mlx5_ifc_mpcnt_reg_bits { 7223 + u8 reserved_at_0[0x8]; 7224 + u8 pcie_index[0x8]; 7225 + u8 reserved_at_10[0xa]; 7226 + u8 grp[0x6]; 7227 + 7228 + u8 clr[0x1]; 7229 + u8 reserved_at_21[0x1f]; 7230 + 7231 + union mlx5_ifc_pcie_cntrs_grp_data_layout_auto_bits counter_set; 7232 + }; 7233 + 7323 7234 struct mlx5_ifc_ppad_reg_bits { 7324 7235 u8 reserved_at_0[0x3]; 7325 7236 u8 single_mac[0x1]; ··· 7937 7824 struct mlx5_ifc_pmtu_reg_bits pmtu_reg; 7938 7825 struct mlx5_ifc_ppad_reg_bits ppad_reg; 7939 7826 struct mlx5_ifc_ppcnt_reg_bits ppcnt_reg; 7827 + struct mlx5_ifc_mpcnt_reg_bits mpcnt_reg; 7940 7828 struct mlx5_ifc_pplm_reg_bits pplm_reg; 7941 7829 struct mlx5_ifc_pplr_reg_bits pplr_reg; 7942 7830 struct mlx5_ifc_ppsc_reg_bits ppsc_reg;
+3
include/linux/mlx5/port.h
··· 94 94 95 95 #define MLX5E_PROT_MASK(link_mode) (1 << link_mode) 96 96 97 + #define PORT_MODULE_EVENT_MODULE_STATUS_MASK 0xF 98 + #define PORT_MODULE_EVENT_ERROR_TYPE_MASK 0xF 99 + 97 100 int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps); 98 101 int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys, 99 102 int ptys_size, int proto_mask, u8 local_port);