Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'mlx5-next' of git://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux

Saeed Mahameed says:

====================
updates from mlx5-next 2022-09-24

Updates form mlx5-next including[1]:

1) HW definitions and support for NPPS clock settings.

2) various cleanups

3) Enable hash mode by default for all NICs

4) page tracker and advanced virtualization HW definitions for vfio

[1] https://lore.kernel.org/netdev/20220907233636.388475-1-saeed@kernel.org/

* 'mlx5-next' of git://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux:
net/mlx5: Remove from FPGA IFC file not-needed definitions
net/mlx5: Remove unused structs
net/mlx5: Remove unused functions
net/mlx5: detect and enable bypass port select flow table
net/mlx5: Lag, enable hash mode by default for all NICs
net/mlx5: Lag, set active ports if support bypass port select flow table
RDMA/mlx5: Don't set tx affinity when lag is in hash mode
net/mlx5: add IFC bits for bypassing port select flow table
net/mlx5: Add support for NPPS with real time mode
net/mlx5: Expose NPPS related registers
net/mlx5: Query ADV_VIRTUALIZATION capabilities
net/mlx5: Introduce ifc bits for page tracker
RDMA/mlx5: Move function mlx5_core_query_ib_ppcnt() to mlx5_ib
====================

Link: https://lore.kernel.org/all/20220927201906.234015-1-saeed@kernel.org/
Signed-off-by: Jakub Kicinski <kuba@kernel.org>

+406 -184
+23 -2
drivers/infiniband/hw/mlx5/mad.c
··· 147 147 vl_15_dropped); 148 148 } 149 149 150 + static int query_ib_ppcnt(struct mlx5_core_dev *dev, u8 port_num, void *out, 151 + size_t sz) 152 + { 153 + u32 *in; 154 + int err; 155 + 156 + in = kvzalloc(sz, GFP_KERNEL); 157 + if (!in) { 158 + err = -ENOMEM; 159 + return err; 160 + } 161 + 162 + MLX5_SET(ppcnt_reg, in, local_port, port_num); 163 + 164 + MLX5_SET(ppcnt_reg, in, grp, MLX5_INFINIBAND_PORT_COUNTERS_GROUP); 165 + err = mlx5_core_access_reg(dev, in, sz, out, 166 + sz, MLX5_REG_PPCNT, 0, 0); 167 + 168 + kvfree(in); 169 + return err; 170 + } 171 + 150 172 static int process_pma_cmd(struct mlx5_ib_dev *dev, u32 port_num, 151 173 const struct ib_mad *in_mad, struct ib_mad *out_mad) 152 174 { ··· 230 208 goto done; 231 209 } 232 210 233 - err = mlx5_core_query_ib_ppcnt(mdev, mdev_port_num, 234 - out_cnt, sz); 211 + err = query_ib_ppcnt(mdev, mdev_port_num, out_cnt, sz); 235 212 if (!err) 236 213 pma_cnt_assign(pma_cnt, out_cnt); 237 214 }
+12
drivers/infiniband/hw/mlx5/mlx5_ib.h
··· 1541 1541 1542 1542 static inline bool mlx5_ib_lag_should_assign_affinity(struct mlx5_ib_dev *dev) 1543 1543 { 1544 + /* 1545 + * If the driver is in hash mode and the port_select_flow_table_bypass cap 1546 + * is supported, it means that the driver no longer needs to assign the port 1547 + * affinity by default. If a user wants to set the port affinity explicitly, 1548 + * the user has a dedicated API to do that, so there is no need to assign 1549 + * the port affinity by default. 1550 + */ 1551 + if (dev->lag_active && 1552 + mlx5_lag_mode_is_hash(dev->mdev) && 1553 + MLX5_CAP_PORT_SELECTION(dev->mdev, port_select_flow_table_bypass)) 1554 + return 0; 1555 + 1544 1556 return dev->lag_active || 1545 1557 (MLX5_CAP_GEN(dev->mdev, num_lag_ports) > 1 && 1546 1558 MLX5_CAP_GEN(dev->mdev, lag_tx_port_affinity));
-5
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
··· 77 77 return MLX5_IPSEC_METADATA_MARKER(be32_to_cpu(cqe->ft_metadata)); 78 78 } 79 79 80 - static inline bool mlx5e_ipsec_is_tx_flow(struct mlx5e_accel_tx_ipsec_state *ipsec_st) 81 - { 82 - return ipsec_st->x; 83 - } 84 - 85 80 static inline bool mlx5e_ipsec_eseg_meta(struct mlx5_wqe_eth_seg *eseg) 86 81 { 87 82 return eseg->flow_table_metadata & cpu_to_be32(MLX5_ETH_WQE_FT_META_IPSEC);
+6
drivers/net/ethernet/mellanox/mlx5/core/fw.c
··· 280 280 return err; 281 281 } 282 282 283 + if (MLX5_CAP_GEN(dev, adv_virtualization)) { 284 + err = mlx5_core_get_caps(dev, MLX5_CAP_ADV_VIRTUALIZATION); 285 + if (err) 286 + return err; 287 + } 288 + 283 289 return 0; 284 290 } 285 291
-7
drivers/net/ethernet/mellanox/mlx5/core/health.c
··· 875 875 cancel_work_sync(&health->fatal_report_work); 876 876 } 877 877 878 - void mlx5_health_flush(struct mlx5_core_dev *dev) 879 - { 880 - struct mlx5_core_health *health = &dev->priv.health; 881 - 882 - flush_workqueue(health->wq); 883 - } 884 - 885 878 void mlx5_health_cleanup(struct mlx5_core_dev *dev) 886 879 { 887 880 struct mlx5_core_health *health = &dev->priv.health;
+80 -11
drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
··· 65 65 return MLX5_LAG_PORT_SELECT_MODE_QUEUE_AFFINITY; 66 66 } 67 67 68 + static u8 lag_active_port_bits(struct mlx5_lag *ldev) 69 + { 70 + u8 enabled_ports[MLX5_MAX_PORTS] = {}; 71 + u8 active_port = 0; 72 + int num_enabled; 73 + int idx; 74 + 75 + mlx5_infer_tx_enabled(&ldev->tracker, ldev->ports, enabled_ports, 76 + &num_enabled); 77 + for (idx = 0; idx < num_enabled; idx++) 78 + active_port |= BIT_MASK(enabled_ports[idx]); 79 + 80 + return active_port; 81 + } 82 + 68 83 static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 *ports, int mode, 69 84 unsigned long flags) 70 85 { ··· 92 77 lag_ctx = MLX5_ADDR_OF(create_lag_in, in, ctx); 93 78 MLX5_SET(create_lag_in, in, opcode, MLX5_CMD_OP_CREATE_LAG); 94 79 MLX5_SET(lagc, lag_ctx, fdb_selection_mode, fdb_sel_mode); 95 - if (port_sel_mode == MLX5_LAG_PORT_SELECT_MODE_QUEUE_AFFINITY) { 80 + 81 + switch (port_sel_mode) { 82 + case MLX5_LAG_PORT_SELECT_MODE_QUEUE_AFFINITY: 96 83 MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, ports[0]); 97 84 MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, ports[1]); 85 + break; 86 + case MLX5_LAG_PORT_SELECT_MODE_PORT_SELECT_FT: 87 + if (!MLX5_CAP_PORT_SELECTION(dev, port_select_flow_table_bypass)) 88 + break; 89 + 90 + MLX5_SET(lagc, lag_ctx, active_port, 91 + lag_active_port_bits(mlx5_lag_dev(dev))); 92 + break; 93 + default: 94 + break; 98 95 } 99 96 MLX5_SET(lagc, lag_ctx, port_select_mode, port_sel_mode); 100 97 ··· 413 386 } 414 387 } 415 388 389 + static int mlx5_cmd_modify_active_port(struct mlx5_core_dev *dev, u8 ports) 390 + { 391 + u32 in[MLX5_ST_SZ_DW(modify_lag_in)] = {}; 392 + void *lag_ctx; 393 + 394 + lag_ctx = MLX5_ADDR_OF(modify_lag_in, in, ctx); 395 + 396 + MLX5_SET(modify_lag_in, in, opcode, MLX5_CMD_OP_MODIFY_LAG); 397 + MLX5_SET(modify_lag_in, in, field_select, 0x2); 398 + 399 + MLX5_SET(lagc, lag_ctx, active_port, ports); 400 + 401 + return mlx5_cmd_exec_in(dev, modify_lag, in); 402 + } 403 + 416 404 static int _mlx5_modify_lag(struct mlx5_lag *ldev, u8 *ports) 417 405 { 418 406 struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; 407 + u8 active_ports; 408 + int ret; 419 409 420 - if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &ldev->mode_flags)) 421 - return mlx5_lag_port_sel_modify(ldev, ports); 410 + if (test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &ldev->mode_flags)) { 411 + ret = mlx5_lag_port_sel_modify(ldev, ports); 412 + if (ret || 413 + !MLX5_CAP_PORT_SELECTION(dev0, port_select_flow_table_bypass)) 414 + return ret; 415 + 416 + active_ports = lag_active_port_bits(ldev); 417 + 418 + return mlx5_cmd_modify_active_port(dev0, active_ports); 419 + } 422 420 return mlx5_cmd_modify_lag(dev0, ldev->ports, ports); 423 421 } 424 422 ··· 484 432 mlx5_lag_drop_rule_setup(ldev, tracker); 485 433 } 486 434 487 - #define MLX5_LAG_ROCE_HASH_PORTS_SUPPORTED 4 488 435 static int mlx5_lag_set_port_sel_mode_roce(struct mlx5_lag *ldev, 489 436 unsigned long *flags) 490 437 { 491 - struct lag_func *dev0 = &ldev->pf[MLX5_LAG_P1]; 438 + struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; 492 439 493 - if (ldev->ports == MLX5_LAG_ROCE_HASH_PORTS_SUPPORTED) { 494 - /* Four ports are support only in hash mode */ 495 - if (!MLX5_CAP_PORT_SELECTION(dev0->dev, port_select_flow_table)) 496 - return -EINVAL; 497 - set_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, flags); 440 + if (!MLX5_CAP_PORT_SELECTION(dev0, port_select_flow_table)) { 498 441 if (ldev->ports > 2) 499 - ldev->buckets = MLX5_LAG_MAX_HASH_BUCKETS; 442 + return -EINVAL; 443 + return 0; 500 444 } 445 + 446 + if (ldev->ports > 2) 447 + ldev->buckets = MLX5_LAG_MAX_HASH_BUCKETS; 448 + 449 + set_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, flags); 501 450 502 451 return 0; 503 452 } ··· 1327 1274 return res; 1328 1275 } 1329 1276 EXPORT_SYMBOL(mlx5_lag_is_active); 1277 + 1278 + bool mlx5_lag_mode_is_hash(struct mlx5_core_dev *dev) 1279 + { 1280 + struct mlx5_lag *ldev; 1281 + unsigned long flags; 1282 + bool res = 0; 1283 + 1284 + spin_lock_irqsave(&lag_lock, flags); 1285 + ldev = mlx5_lag_dev(dev); 1286 + if (ldev) 1287 + res = test_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, &ldev->mode_flags); 1288 + spin_unlock_irqrestore(&lag_lock, flags); 1289 + 1290 + return res; 1291 + } 1292 + EXPORT_SYMBOL(mlx5_lag_mode_is_hash); 1330 1293 1331 1294 bool mlx5_lag_is_master(struct mlx5_core_dev *dev) 1332 1295 {
+119 -20
drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
··· 65 65 MLX5_MTPPS_FS_TIME_STAMP = BIT(0x4), 66 66 MLX5_MTPPS_FS_OUT_PULSE_DURATION = BIT(0x5), 67 67 MLX5_MTPPS_FS_ENH_OUT_PER_ADJ = BIT(0x7), 68 + MLX5_MTPPS_FS_NPPS_PERIOD = BIT(0x9), 69 + MLX5_MTPPS_FS_OUT_PULSE_DURATION_NS = BIT(0xa), 68 70 }; 69 71 70 72 static bool mlx5_real_time_mode(struct mlx5_core_dev *mdev) 71 73 { 72 74 return (mlx5_is_real_time_rq(mdev) || mlx5_is_real_time_sq(mdev)); 75 + } 76 + 77 + static bool mlx5_npps_real_time_supported(struct mlx5_core_dev *mdev) 78 + { 79 + return (mlx5_real_time_mode(mdev) && 80 + MLX5_CAP_MCAM_FEATURE(mdev, npps_period) && 81 + MLX5_CAP_MCAM_FEATURE(mdev, out_pulse_duration_ns)); 73 82 } 74 83 75 84 static bool mlx5_modify_mtutc_allowed(struct mlx5_core_dev *mdev) ··· 468 459 return find_target_cycles(mdev, target_ns); 469 460 } 470 461 471 - static u64 perout_conf_real_time(s64 sec) 462 + static u64 perout_conf_real_time(s64 sec, u32 nsec) 472 463 { 473 - return (u64)sec << 32; 464 + return (u64)nsec | (u64)sec << 32; 465 + } 466 + 467 + static int perout_conf_1pps(struct mlx5_core_dev *mdev, struct ptp_clock_request *rq, 468 + u64 *time_stamp, bool real_time) 469 + { 470 + struct timespec64 ts; 471 + s64 ns; 472 + 473 + ts.tv_nsec = rq->perout.period.nsec; 474 + ts.tv_sec = rq->perout.period.sec; 475 + ns = timespec64_to_ns(&ts); 476 + 477 + if ((ns >> 1) != 500000000LL) 478 + return -EINVAL; 479 + 480 + *time_stamp = real_time ? perout_conf_real_time(rq->perout.start.sec, 0) : 481 + perout_conf_internal_timer(mdev, rq->perout.start.sec); 482 + 483 + return 0; 484 + } 485 + 486 + #define MLX5_MAX_PULSE_DURATION (BIT(__mlx5_bit_sz(mtpps_reg, out_pulse_duration_ns)) - 1) 487 + static int mlx5_perout_conf_out_pulse_duration(struct mlx5_core_dev *mdev, 488 + struct ptp_clock_request *rq, 489 + u32 *out_pulse_duration_ns) 490 + { 491 + struct mlx5_pps *pps_info = &mdev->clock.pps_info; 492 + u32 out_pulse_duration; 493 + struct timespec64 ts; 494 + 495 + if (rq->perout.flags & PTP_PEROUT_DUTY_CYCLE) { 496 + ts.tv_sec = rq->perout.on.sec; 497 + ts.tv_nsec = rq->perout.on.nsec; 498 + out_pulse_duration = (u32)timespec64_to_ns(&ts); 499 + } else { 500 + /* out_pulse_duration_ns should be up to 50% of the 501 + * pulse period as default 502 + */ 503 + ts.tv_sec = rq->perout.period.sec; 504 + ts.tv_nsec = rq->perout.period.nsec; 505 + out_pulse_duration = (u32)timespec64_to_ns(&ts) >> 1; 506 + } 507 + 508 + if (out_pulse_duration < pps_info->min_out_pulse_duration_ns || 509 + out_pulse_duration > MLX5_MAX_PULSE_DURATION) { 510 + mlx5_core_err(mdev, "NPPS pulse duration %u is not in [%llu, %lu]\n", 511 + out_pulse_duration, pps_info->min_out_pulse_duration_ns, 512 + MLX5_MAX_PULSE_DURATION); 513 + return -EINVAL; 514 + } 515 + *out_pulse_duration_ns = out_pulse_duration; 516 + 517 + return 0; 518 + } 519 + 520 + static int perout_conf_npps_real_time(struct mlx5_core_dev *mdev, struct ptp_clock_request *rq, 521 + u32 *field_select, u32 *out_pulse_duration_ns, 522 + u64 *period, u64 *time_stamp) 523 + { 524 + struct mlx5_pps *pps_info = &mdev->clock.pps_info; 525 + struct ptp_clock_time *time = &rq->perout.start; 526 + struct timespec64 ts; 527 + 528 + ts.tv_sec = rq->perout.period.sec; 529 + ts.tv_nsec = rq->perout.period.nsec; 530 + if (timespec64_to_ns(&ts) < pps_info->min_npps_period) { 531 + mlx5_core_err(mdev, "NPPS period is lower than minimal npps period %llu\n", 532 + pps_info->min_npps_period); 533 + return -EINVAL; 534 + } 535 + *period = perout_conf_real_time(rq->perout.period.sec, rq->perout.period.nsec); 536 + 537 + if (mlx5_perout_conf_out_pulse_duration(mdev, rq, out_pulse_duration_ns)) 538 + return -EINVAL; 539 + 540 + *time_stamp = perout_conf_real_time(time->sec, time->nsec); 541 + *field_select |= MLX5_MTPPS_FS_NPPS_PERIOD | 542 + MLX5_MTPPS_FS_OUT_PULSE_DURATION_NS; 543 + 544 + return 0; 545 + } 546 + 547 + static bool mlx5_perout_verify_flags(struct mlx5_core_dev *mdev, unsigned int flags) 548 + { 549 + return ((!mlx5_npps_real_time_supported(mdev) && flags) || 550 + (mlx5_npps_real_time_supported(mdev) && flags & ~PTP_PEROUT_DUTY_CYCLE)); 474 551 } 475 552 476 553 static int mlx5_perout_configure(struct ptp_clock_info *ptp, ··· 569 474 container_of(clock, struct mlx5_core_dev, clock); 570 475 bool rt_mode = mlx5_real_time_mode(mdev); 571 476 u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; 572 - struct timespec64 ts; 477 + u32 out_pulse_duration_ns = 0; 573 478 u32 field_select = 0; 479 + u64 npps_period = 0; 574 480 u64 time_stamp = 0; 575 481 u8 pin_mode = 0; 576 482 u8 pattern = 0; 577 483 int pin = -1; 578 484 int err = 0; 579 - s64 ns; 580 485 581 486 if (!MLX5_PPS_CAP(mdev)) 582 487 return -EOPNOTSUPP; 583 488 584 489 /* Reject requests with unsupported flags */ 585 - if (rq->perout.flags) 490 + if (mlx5_perout_verify_flags(mdev, rq->perout.flags)) 586 491 return -EOPNOTSUPP; 587 492 588 493 if (rq->perout.index >= clock->ptp_info.n_pins) ··· 595 500 596 501 if (on) { 597 502 bool rt_mode = mlx5_real_time_mode(mdev); 598 - s64 sec = rq->perout.start.sec; 599 - 600 - if (rq->perout.start.nsec) 601 - return -EINVAL; 602 503 603 504 pin_mode = MLX5_PIN_MODE_OUT; 604 505 pattern = MLX5_OUT_PATTERN_PERIODIC; 605 - ts.tv_sec = rq->perout.period.sec; 606 - ts.tv_nsec = rq->perout.period.nsec; 607 - ns = timespec64_to_ns(&ts); 608 506 609 - if ((ns >> 1) != 500000000LL) 507 + if (rt_mode && rq->perout.start.sec > U32_MAX) 610 508 return -EINVAL; 611 - 612 - if (rt_mode && sec > U32_MAX) 613 - return -EINVAL; 614 - 615 - time_stamp = rt_mode ? perout_conf_real_time(sec) : 616 - perout_conf_internal_timer(mdev, sec); 617 509 618 510 field_select |= MLX5_MTPPS_FS_PIN_MODE | 619 511 MLX5_MTPPS_FS_PATTERN | 620 512 MLX5_MTPPS_FS_TIME_STAMP; 513 + 514 + if (mlx5_npps_real_time_supported(mdev)) 515 + err = perout_conf_npps_real_time(mdev, rq, &field_select, 516 + &out_pulse_duration_ns, &npps_period, 517 + &time_stamp); 518 + else 519 + err = perout_conf_1pps(mdev, rq, &time_stamp, rt_mode); 520 + if (err) 521 + return err; 621 522 } 622 523 623 524 MLX5_SET(mtpps_reg, in, pin, pin); ··· 622 531 MLX5_SET(mtpps_reg, in, enable, on); 623 532 MLX5_SET64(mtpps_reg, in, time_stamp, time_stamp); 624 533 MLX5_SET(mtpps_reg, in, field_select, field_select); 625 - 534 + MLX5_SET64(mtpps_reg, in, npps_period, npps_period); 535 + MLX5_SET(mtpps_reg, in, out_pulse_duration_ns, out_pulse_duration_ns); 626 536 err = mlx5_set_mtpps(mdev, in, sizeof(in)); 627 537 if (err) 628 538 return err; ··· 778 686 cap_max_num_of_pps_in_pins); 779 687 clock->ptp_info.n_per_out = MLX5_GET(mtpps_reg, out, 780 688 cap_max_num_of_pps_out_pins); 689 + 690 + if (MLX5_CAP_MCAM_FEATURE(mdev, npps_period)) 691 + clock->pps_info.min_npps_period = 1 << MLX5_GET(mtpps_reg, out, 692 + cap_log_min_npps_period); 693 + if (MLX5_CAP_MCAM_FEATURE(mdev, out_pulse_duration_ns)) 694 + clock->pps_info.min_out_pulse_duration_ns = 1 << MLX5_GET(mtpps_reg, out, 695 + cap_log_min_out_pulse_duration_ns); 781 696 782 697 clock->pps_info.pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode); 783 698 clock->pps_info.pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode);
+35
drivers/net/ethernet/mellanox/mlx5/core/main.c
··· 671 671 return err; 672 672 } 673 673 674 + static int handle_hca_cap_port_selection(struct mlx5_core_dev *dev, 675 + void *set_ctx) 676 + { 677 + void *set_hca_cap; 678 + int err; 679 + 680 + if (!MLX5_CAP_GEN(dev, port_selection_cap)) 681 + return 0; 682 + 683 + err = mlx5_core_get_caps(dev, MLX5_CAP_PORT_SELECTION); 684 + if (err) 685 + return err; 686 + 687 + if (MLX5_CAP_PORT_SELECTION(dev, port_select_flow_table_bypass) || 688 + !MLX5_CAP_PORT_SELECTION_MAX(dev, port_select_flow_table_bypass)) 689 + return 0; 690 + 691 + set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability); 692 + memcpy(set_hca_cap, dev->caps.hca[MLX5_CAP_PORT_SELECTION]->cur, 693 + MLX5_ST_SZ_BYTES(port_selection_cap)); 694 + MLX5_SET(port_selection_cap, set_hca_cap, port_select_flow_table_bypass, 1); 695 + 696 + err = set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MODE_PORT_SELECTION); 697 + 698 + return err; 699 + } 700 + 674 701 static int set_hca_cap(struct mlx5_core_dev *dev) 675 702 { 676 703 int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in); ··· 739 712 err = handle_hca_cap_2(dev, set_ctx); 740 713 if (err) { 741 714 mlx5_core_err(dev, "handle_hca_cap_2 failed\n"); 715 + goto out; 716 + } 717 + 718 + memset(set_ctx, 0, set_sz); 719 + err = handle_hca_cap_port_selection(dev, set_ctx); 720 + if (err) { 721 + mlx5_core_err(dev, "handle_hca_cap_port_selection failed\n"); 742 722 goto out; 743 723 } 744 724 ··· 1542 1508 MLX5_CAP_PORT_SELECTION, 1543 1509 MLX5_CAP_DEV_SHAMPO, 1544 1510 MLX5_CAP_MACSEC, 1511 + MLX5_CAP_ADV_VIRTUALIZATION, 1545 1512 }; 1546 1513 1547 1514 static void mlx5_hca_caps_free(struct mlx5_core_dev *dev)
-23
drivers/net/ethernet/mellanox/mlx5/core/port.c
··· 493 493 } 494 494 EXPORT_SYMBOL_GPL(mlx5_query_port_vl_hw_cap); 495 495 496 - int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev, 497 - u8 port_num, void *out, size_t sz) 498 - { 499 - u32 *in; 500 - int err; 501 - 502 - in = kvzalloc(sz, GFP_KERNEL); 503 - if (!in) { 504 - err = -ENOMEM; 505 - return err; 506 - } 507 - 508 - MLX5_SET(ppcnt_reg, in, local_port, port_num); 509 - 510 - MLX5_SET(ppcnt_reg, in, grp, MLX5_INFINIBAND_PORT_COUNTERS_GROUP); 511 - err = mlx5_core_access_reg(dev, in, sz, out, 512 - sz, MLX5_REG_PPCNT, 0, 0); 513 - 514 - kvfree(in); 515 - return err; 516 - } 517 - EXPORT_SYMBOL_GPL(mlx5_core_query_ib_ppcnt); 518 - 519 496 static int mlx5_query_pfcc_reg(struct mlx5_core_dev *dev, u32 *out, 520 497 u32 out_size) 521 498 {
-14
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
··· 1294 1294 u32 roce_ver; 1295 1295 }; 1296 1296 1297 - struct mlx5dr_cmd_qp_create_attr { 1298 - u32 page_id; 1299 - u32 pdn; 1300 - u32 cqn; 1301 - u32 pm_state; 1302 - u32 service_type; 1303 - u32 buff_umem_id; 1304 - u32 db_umem_id; 1305 - u32 sq_wqe_cnt; 1306 - u32 rq_wqe_cnt; 1307 - u32 rq_wqe_shift; 1308 - u8 isolate_vl_tc:1; 1309 - }; 1310 - 1311 1297 int mlx5dr_cmd_query_gid(struct mlx5_core_dev *mdev, u8 vhca_port_num, 1312 1298 u16 index, struct mlx5dr_cmd_gid_attr *attr); 1313 1299
-4
drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.h
··· 14 14 struct mlx5dr_action *dr_action; 15 15 }; 16 16 17 - struct mlx5_fs_dr_ns { 18 - struct mlx5_dr_ns *dr_ns; 19 - }; 20 - 21 17 struct mlx5_fs_dr_rule { 22 18 struct mlx5dr_rule *dr_rule; 23 19 /* Only actions created by fs_dr */
+9 -11
include/linux/mlx5/device.h
··· 882 882 return cqe->op_own >> 4; 883 883 } 884 884 885 - static inline u8 get_cqe_enhanced_num_mini_cqes(struct mlx5_cqe64 *cqe) 886 - { 887 - /* num_of_mini_cqes is zero based */ 888 - return get_cqe_opcode(cqe) + 1; 889 - } 890 - 891 885 static inline u8 get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe) 892 886 { 893 887 return (cqe->lro.tcppsh_abort_dupack >> 6) & 1; ··· 890 896 static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe) 891 897 { 892 898 return (cqe->l4_l3_hdr_type >> 4) & 0x7; 893 - } 894 - 895 - static inline u8 get_cqe_l3_hdr_type(struct mlx5_cqe64 *cqe) 896 - { 897 - return (cqe->l4_l3_hdr_type >> 2) & 0x3; 898 899 } 899 900 900 901 static inline bool cqe_is_tunneled(struct mlx5_cqe64 *cqe) ··· 1198 1209 MLX5_CAP_MACSEC = 0x1f, 1199 1210 MLX5_CAP_GENERAL_2 = 0x20, 1200 1211 MLX5_CAP_PORT_SELECTION = 0x25, 1212 + MLX5_CAP_ADV_VIRTUALIZATION = 0x26, 1201 1213 /* NUM OF CAP Types */ 1202 1214 MLX5_CAP_NUM 1203 1215 }; ··· 1363 1373 #define MLX5_CAP_PORT_SELECTION_MAX(mdev, cap) \ 1364 1374 MLX5_GET(port_selection_cap, \ 1365 1375 mdev->caps.hca[MLX5_CAP_PORT_SELECTION]->max, cap) 1376 + 1377 + #define MLX5_CAP_ADV_VIRTUALIZATION(mdev, cap) \ 1378 + MLX5_GET(adv_virtualization_cap, \ 1379 + mdev->caps.hca[MLX5_CAP_ADV_VIRTUALIZATION]->cur, cap) 1380 + 1381 + #define MLX5_CAP_ADV_VIRTUALIZATION_MAX(mdev, cap) \ 1382 + MLX5_GET(adv_virtualization_cap, \ 1383 + mdev->caps.hca[MLX5_CAP_ADV_VIRTUALIZATION]->max, cap) 1366 1384 1367 1385 #define MLX5_CAP_FLOWTABLE_PORT_SELECTION(mdev, cap) \ 1368 1386 MLX5_CAP_PORT_SELECTION(mdev, flow_table_properties_port_selection.cap)
+3 -8
include/linux/mlx5/driver.h
··· 698 698 struct work_struct out_work; 699 699 u64 start[MAX_PIN_NUM]; 700 700 u8 enabled; 701 + u64 min_npps_period; 702 + u64 min_out_pulse_duration_ns; 701 703 }; 702 704 703 705 struct mlx5_timer { ··· 855 853 bool polling; 856 854 /* Track the max comp handlers */ 857 855 refcount_t refcnt; 858 - }; 859 - 860 - struct mlx5_pas { 861 - u64 pa; 862 - u8 log_sz; 863 856 }; 864 857 865 858 enum phy_port_state { ··· 1013 1016 bool mlx5_cmd_is_down(struct mlx5_core_dev *dev); 1014 1017 1015 1018 int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type); 1016 - void mlx5_health_flush(struct mlx5_core_dev *dev); 1017 1019 void mlx5_health_cleanup(struct mlx5_core_dev *dev); 1018 1020 int mlx5_health_init(struct mlx5_core_dev *dev); 1019 1021 void mlx5_start_health_poll(struct mlx5_core_dev *dev); ··· 1081 1085 void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common); 1082 1086 int mlx5_query_odp_caps(struct mlx5_core_dev *dev, 1083 1087 struct mlx5_odp_caps *odp_caps); 1084 - int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev, 1085 - u8 port_num, void *out, size_t sz); 1086 1088 1087 1089 int mlx5_init_rl_table(struct mlx5_core_dev *dev); 1088 1090 void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev); ··· 1147 1153 bool mlx5_lag_is_roce(struct mlx5_core_dev *dev); 1148 1154 bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev); 1149 1155 bool mlx5_lag_is_active(struct mlx5_core_dev *dev); 1156 + bool mlx5_lag_mode_is_hash(struct mlx5_core_dev *dev); 1150 1157 bool mlx5_lag_is_master(struct mlx5_core_dev *dev); 1151 1158 bool mlx5_lag_is_shared_fdb(struct mlx5_core_dev *dev); 1152 1159 struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev);
-48
include/linux/mlx5/fs_helpers.h
··· 38 38 #define MLX5_FS_IPV4_VERSION 4 39 39 #define MLX5_FS_IPV6_VERSION 6 40 40 41 - static inline bool mlx5_fs_is_ipsec_flow(const u32 *match_c) 42 - { 43 - void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c, 44 - misc_parameters); 45 - 46 - return MLX5_GET(fte_match_set_misc, misc_params_c, outer_esp_spi); 47 - } 48 - 49 - static inline bool _mlx5_fs_is_outer_ipproto_flow(const u32 *match_c, 50 - const u32 *match_v, u8 match) 51 - { 52 - const void *headers_c = MLX5_ADDR_OF(fte_match_param, match_c, 53 - outer_headers); 54 - const void *headers_v = MLX5_ADDR_OF(fte_match_param, match_v, 55 - outer_headers); 56 - 57 - return MLX5_GET(fte_match_set_lyr_2_4, headers_c, ip_protocol) == 0xff && 58 - MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol) == match; 59 - } 60 - 61 - static inline bool mlx5_fs_is_outer_tcp_flow(const u32 *match_c, 62 - const u32 *match_v) 63 - { 64 - return _mlx5_fs_is_outer_ipproto_flow(match_c, match_v, IPPROTO_TCP); 65 - } 66 - 67 - static inline bool mlx5_fs_is_outer_udp_flow(const u32 *match_c, 68 - const u32 *match_v) 69 - { 70 - return _mlx5_fs_is_outer_ipproto_flow(match_c, match_v, IPPROTO_UDP); 71 - } 72 - 73 - static inline bool mlx5_fs_is_vxlan_flow(const u32 *match_c) 74 - { 75 - void *misc_params_c = MLX5_ADDR_OF(fte_match_param, match_c, 76 - misc_parameters); 77 - 78 - return MLX5_GET(fte_match_set_misc, misc_params_c, vxlan_vni); 79 - } 80 - 81 41 static inline bool _mlx5_fs_is_outer_ipv_flow(struct mlx5_core_dev *mdev, 82 42 const u32 *match_c, 83 43 const u32 *match_v, int version) ··· 89 129 { 90 130 return _mlx5_fs_is_outer_ipv_flow(mdev, match_c, match_v, 91 131 MLX5_FS_IPV6_VERSION); 92 - } 93 - 94 - static inline bool mlx5_fs_is_outer_ipsec_flow(const u32 *match_c) 95 - { 96 - void *misc_params_c = 97 - MLX5_ADDR_OF(fte_match_param, match_c, misc_parameters); 98 - 99 - return MLX5_GET(fte_match_set_misc, misc_params_c, outer_esp_spi); 100 132 } 101 133 102 134 #endif
+119 -7
include/linux/mlx5/mlx5_ifc.h
··· 68 68 MLX5_SET_HCA_CAP_OP_MOD_ODP = 0x2, 69 69 MLX5_SET_HCA_CAP_OP_MOD_ATOMIC = 0x3, 70 70 MLX5_SET_HCA_CAP_OP_MOD_ROCE = 0x4, 71 + MLX5_SET_HCA_CAP_OP_MODE_PORT_SELECTION = 0x25, 71 72 }; 72 73 73 74 enum { ··· 91 90 MLX5_OBJ_TYPE_VIRTIO_NET_Q = 0x000d, 92 91 MLX5_OBJ_TYPE_VIRTIO_Q_COUNTERS = 0x001c, 93 92 MLX5_OBJ_TYPE_MATCH_DEFINER = 0x0018, 93 + MLX5_OBJ_TYPE_PAGE_TRACK = 0x46, 94 94 MLX5_OBJ_TYPE_MKEY = 0xff01, 95 95 MLX5_OBJ_TYPE_QP = 0xff02, 96 96 MLX5_OBJ_TYPE_PSV = 0xff03, ··· 484 482 u8 reserved_at_6[0x1a]; 485 483 }; 486 484 485 + struct mlx5_ifc_ipv4_layout_bits { 486 + u8 reserved_at_0[0x60]; 487 + 488 + u8 ipv4[0x20]; 489 + }; 490 + 491 + struct mlx5_ifc_ipv6_layout_bits { 492 + u8 ipv6[16][0x8]; 493 + }; 494 + 495 + union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits { 496 + struct mlx5_ifc_ipv6_layout_bits ipv6_layout; 497 + struct mlx5_ifc_ipv4_layout_bits ipv4_layout; 498 + u8 reserved_at_0[0x80]; 499 + }; 500 + 487 501 struct mlx5_ifc_fte_match_set_lyr_2_4_bits { 488 502 u8 smac_47_16[0x20]; 489 503 ··· 841 823 struct mlx5_ifc_port_selection_cap_bits { 842 824 u8 reserved_at_0[0x10]; 843 825 u8 port_select_flow_table[0x1]; 844 - u8 reserved_at_11[0xf]; 826 + u8 reserved_at_11[0x1]; 827 + u8 port_select_flow_table_bypass[0x1]; 828 + u8 reserved_at_13[0xd]; 845 829 846 830 u8 reserved_at_20[0x1e0]; 847 831 ··· 1781 1761 u8 max_geneve_tlv_options[0x8]; 1782 1762 u8 reserved_at_568[0x3]; 1783 1763 u8 max_geneve_tlv_option_data_len[0x5]; 1784 - u8 reserved_at_570[0x10]; 1764 + u8 reserved_at_570[0x9]; 1765 + u8 adv_virtualization[0x1]; 1766 + u8 reserved_at_57a[0x6]; 1785 1767 1786 1768 u8 reserved_at_580[0xb]; 1787 1769 u8 log_max_dci_stream_channels[0x5]; ··· 9847 9825 struct mlx5_ifc_mcam_enhanced_features_bits { 9848 9826 u8 reserved_at_0[0x5d]; 9849 9827 u8 mcia_32dwords[0x1]; 9850 - u8 reserved_at_5e[0xc]; 9828 + u8 out_pulse_duration_ns[0x1]; 9829 + u8 npps_period[0x1]; 9830 + u8 reserved_at_60[0xa]; 9851 9831 u8 reset_state[0x1]; 9852 9832 u8 ptpcyc2realtime_modify[0x1]; 9853 9833 u8 reserved_at_6c[0x2]; ··· 10349 10325 u8 reserved_at_18[0x4]; 10350 10326 u8 cap_max_num_of_pps_out_pins[0x4]; 10351 10327 10352 - u8 reserved_at_20[0x24]; 10328 + u8 reserved_at_20[0x13]; 10329 + u8 cap_log_min_npps_period[0x5]; 10330 + u8 reserved_at_38[0x3]; 10331 + u8 cap_log_min_out_pulse_duration_ns[0x5]; 10332 + 10333 + u8 reserved_at_40[0x4]; 10353 10334 u8 cap_pin_3_mode[0x4]; 10354 10335 u8 reserved_at_48[0x4]; 10355 10336 u8 cap_pin_2_mode[0x4]; ··· 10373 10344 u8 cap_pin_4_mode[0x4]; 10374 10345 10375 10346 u8 field_select[0x20]; 10376 - u8 reserved_at_a0[0x60]; 10347 + u8 reserved_at_a0[0x20]; 10348 + 10349 + u8 npps_period[0x40]; 10377 10350 10378 10351 u8 enable[0x1]; 10379 10352 u8 reserved_at_101[0xb]; ··· 10384 10353 u8 pin_mode[0x4]; 10385 10354 u8 pin[0x8]; 10386 10355 10387 - u8 reserved_at_120[0x20]; 10356 + u8 reserved_at_120[0x2]; 10357 + u8 out_pulse_duration_ns[0x1e]; 10388 10358 10389 10359 u8 time_stamp[0x40]; 10390 10360 ··· 10988 10956 u8 reserved_at_18[0x5]; 10989 10957 u8 lag_state[0x3]; 10990 10958 10991 - u8 reserved_at_20[0x14]; 10959 + u8 reserved_at_20[0xc]; 10960 + u8 active_port[0x4]; 10961 + u8 reserved_at_30[0x4]; 10992 10962 u8 tx_remap_affinity_2[0x4]; 10993 10963 u8 reserved_at_38[0x4]; 10994 10964 u8 tx_remap_affinity_1[0x4]; ··· 11978 11944 u8 syndrome[0x20]; 11979 11945 11980 11946 u8 reserved_at_40[0x40]; 11947 + }; 11948 + 11949 + struct mlx5_ifc_adv_virtualization_cap_bits { 11950 + u8 reserved_at_0[0x3]; 11951 + u8 pg_track_log_max_num[0x5]; 11952 + u8 pg_track_max_num_range[0x8]; 11953 + u8 pg_track_log_min_addr_space[0x8]; 11954 + u8 pg_track_log_max_addr_space[0x8]; 11955 + 11956 + u8 reserved_at_20[0x3]; 11957 + u8 pg_track_log_min_msg_size[0x5]; 11958 + u8 reserved_at_28[0x3]; 11959 + u8 pg_track_log_max_msg_size[0x5]; 11960 + u8 reserved_at_30[0x3]; 11961 + u8 pg_track_log_min_page_size[0x5]; 11962 + u8 reserved_at_38[0x3]; 11963 + u8 pg_track_log_max_page_size[0x5]; 11964 + 11965 + u8 reserved_at_40[0x7c0]; 11966 + }; 11967 + 11968 + struct mlx5_ifc_page_track_report_entry_bits { 11969 + u8 dirty_address_high[0x20]; 11970 + 11971 + u8 dirty_address_low[0x20]; 11972 + }; 11973 + 11974 + enum { 11975 + MLX5_PAGE_TRACK_STATE_TRACKING, 11976 + MLX5_PAGE_TRACK_STATE_REPORTING, 11977 + MLX5_PAGE_TRACK_STATE_ERROR, 11978 + }; 11979 + 11980 + struct mlx5_ifc_page_track_range_bits { 11981 + u8 start_address[0x40]; 11982 + 11983 + u8 length[0x40]; 11984 + }; 11985 + 11986 + struct mlx5_ifc_page_track_bits { 11987 + u8 modify_field_select[0x40]; 11988 + 11989 + u8 reserved_at_40[0x10]; 11990 + u8 vhca_id[0x10]; 11991 + 11992 + u8 reserved_at_60[0x20]; 11993 + 11994 + u8 state[0x4]; 11995 + u8 track_type[0x4]; 11996 + u8 log_addr_space_size[0x8]; 11997 + u8 reserved_at_90[0x3]; 11998 + u8 log_page_size[0x5]; 11999 + u8 reserved_at_98[0x3]; 12000 + u8 log_msg_size[0x5]; 12001 + 12002 + u8 reserved_at_a0[0x8]; 12003 + u8 reporting_qpn[0x18]; 12004 + 12005 + u8 reserved_at_c0[0x18]; 12006 + u8 num_ranges[0x8]; 12007 + 12008 + u8 reserved_at_e0[0x20]; 12009 + 12010 + u8 range_start_address[0x40]; 12011 + 12012 + u8 length[0x40]; 12013 + 12014 + struct mlx5_ifc_page_track_range_bits track_range[0]; 12015 + }; 12016 + 12017 + struct mlx5_ifc_create_page_track_obj_in_bits { 12018 + struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr; 12019 + struct mlx5_ifc_page_track_bits obj_context; 12020 + }; 12021 + 12022 + struct mlx5_ifc_modify_page_track_obj_in_bits { 12023 + struct mlx5_ifc_general_obj_in_cmd_hdr_bits general_obj_in_cmd_hdr; 12024 + struct mlx5_ifc_page_track_bits obj_context; 11981 12025 }; 11982 12026 11983 12027 #endif /* MLX5_IFC_H */
-24
include/linux/mlx5/mlx5_ifc_fpga.h
··· 32 32 #ifndef MLX5_IFC_FPGA_H 33 33 #define MLX5_IFC_FPGA_H 34 34 35 - struct mlx5_ifc_ipv4_layout_bits { 36 - u8 reserved_at_0[0x60]; 37 - 38 - u8 ipv4[0x20]; 39 - }; 40 - 41 - struct mlx5_ifc_ipv6_layout_bits { 42 - u8 ipv6[16][0x8]; 43 - }; 44 - 45 - union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits { 46 - struct mlx5_ifc_ipv6_layout_bits ipv6_layout; 47 - struct mlx5_ifc_ipv4_layout_bits ipv4_layout; 48 - u8 reserved_at_0[0x80]; 49 - }; 50 - 51 - enum { 52 - MLX5_FPGA_CAP_SANDBOX_VENDOR_ID_MLNX = 0x2c9, 53 - }; 54 - 55 - enum { 56 - MLX5_FPGA_CAP_SANDBOX_PRODUCT_ID_IPSEC = 0x2, 57 - }; 58 - 59 35 struct mlx5_ifc_fpga_shell_caps_bits { 60 36 u8 max_num_qps[0x10]; 61 37 u8 reserved_at_10[0x8];