Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net/mlx5: Use event mask based on device capabilities

Use the reported device capabilities for the supported user events (i.e.
affiliated and un-affiliated) to set the EQ mask.

As the event mask can be up to 256 defined by 4 entries of u64 change
the applicable code to work accordingly.

Signed-off-by: Yishai Hadas <yishaih@mellanox.com>
Acked-by: Saeed Mahameed <saeedm@mellanox.com>
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>

authored by

Yishai Hadas and committed by
Leon Romanovsky
b9a7ba55 1d49ce1e

+55 -14
+1 -1
drivers/infiniband/hw/mlx5/odp.c
··· 1558 1558 eq->irq_nb.notifier_call = mlx5_ib_eq_pf_int; 1559 1559 param = (struct mlx5_eq_param) { 1560 1560 .irq_index = 0, 1561 - .mask = 1 << MLX5_EVENT_TYPE_PAGE_FAULT, 1562 1561 .nent = MLX5_IB_NUM_PF_EQE, 1563 1562 }; 1563 + param.mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_FAULT; 1564 1564 eq->core = mlx5_eq_create_generic(dev->mdev, &param); 1565 1565 if (IS_ERR(eq->core)) { 1566 1566 err = PTR_ERR(eq->core);
+32 -8
drivers/net/ethernet/mellanox/mlx5/core/eq.c
··· 256 256 int inlen; 257 257 u32 *in; 258 258 int err; 259 + int i; 259 260 260 261 /* Init CQ table */ 261 262 memset(cq_table, 0, sizeof(*cq_table)); ··· 284 283 mlx5_fill_page_array(&eq->buf, pas); 285 284 286 285 MLX5_SET(create_eq_in, in, opcode, MLX5_CMD_OP_CREATE_EQ); 287 - if (!param->mask && MLX5_CAP_GEN(dev, log_max_uctx)) 286 + if (!param->mask[0] && MLX5_CAP_GEN(dev, log_max_uctx)) 288 287 MLX5_SET(create_eq_in, in, uid, MLX5_SHARED_RESOURCE_UID); 289 288 290 - MLX5_SET64(create_eq_in, in, event_bitmask, param->mask); 289 + for (i = 0; i < 4; i++) 290 + MLX5_ARRAY_SET64(create_eq_in, in, event_bitmask, i, 291 + param->mask[i]); 291 292 292 293 eqc = MLX5_ADDR_OF(create_eq_in, in, eq_context_entry); 293 294 MLX5_SET(eqc, eqc, log_eq_size, ilog2(eq->nent)); ··· 510 507 return NOTIFY_OK; 511 508 } 512 509 513 - static u64 gather_async_events_mask(struct mlx5_core_dev *dev) 510 + static void gather_user_async_events(struct mlx5_core_dev *dev, u64 mask[4]) 511 + { 512 + __be64 *user_unaffiliated_events; 513 + __be64 *user_affiliated_events; 514 + int i; 515 + 516 + user_affiliated_events = 517 + MLX5_CAP_DEV_EVENT(dev, user_affiliated_events); 518 + user_unaffiliated_events = 519 + MLX5_CAP_DEV_EVENT(dev, user_unaffiliated_events); 520 + 521 + for (i = 0; i < 4; i++) 522 + mask[i] |= be64_to_cpu(user_affiliated_events[i] | 523 + user_unaffiliated_events[i]); 524 + } 525 + 526 + static void gather_async_events_mask(struct mlx5_core_dev *dev, u64 mask[4]) 514 527 { 515 528 u64 async_event_mask = MLX5_ASYNC_EVENT_MASK; 516 529 ··· 563 544 async_event_mask |= 564 545 (1ull << MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED); 565 546 566 - return async_event_mask; 547 + mask[0] = async_event_mask; 548 + 549 + if (MLX5_CAP_GEN(dev, event_cap)) 550 + gather_user_async_events(dev, mask); 567 551 } 568 552 569 553 static int create_async_eqs(struct mlx5_core_dev *dev) ··· 581 559 table->cmd_eq.irq_nb.notifier_call = mlx5_eq_async_int; 582 560 param = (struct mlx5_eq_param) { 583 561 .irq_index = 0, 584 - .mask = 1ull << MLX5_EVENT_TYPE_CMD, 585 562 .nent = MLX5_NUM_CMD_EQE, 586 563 }; 564 + 565 + param.mask[0] = 1ull << MLX5_EVENT_TYPE_CMD; 587 566 err = create_async_eq(dev, &table->cmd_eq.core, &param); 588 567 if (err) { 589 568 mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err); ··· 600 577 table->async_eq.irq_nb.notifier_call = mlx5_eq_async_int; 601 578 param = (struct mlx5_eq_param) { 602 579 .irq_index = 0, 603 - .mask = gather_async_events_mask(dev), 604 580 .nent = MLX5_NUM_ASYNC_EQE, 605 581 }; 582 + 583 + gather_async_events_mask(dev, param.mask); 606 584 err = create_async_eq(dev, &table->async_eq.core, &param); 607 585 if (err) { 608 586 mlx5_core_warn(dev, "failed to create async EQ %d\n", err); ··· 619 595 table->pages_eq.irq_nb.notifier_call = mlx5_eq_async_int; 620 596 param = (struct mlx5_eq_param) { 621 597 .irq_index = 0, 622 - .mask = 1 << MLX5_EVENT_TYPE_PAGE_REQUEST, 623 598 .nent = /* TODO: sriov max_vf + */ 1, 624 599 }; 600 + 601 + param.mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_REQUEST; 625 602 err = create_async_eq(dev, &table->pages_eq.core, &param); 626 603 if (err) { 627 604 mlx5_core_warn(dev, "failed to create pages EQ %d\n", err); ··· 814 789 eq->irq_nb.notifier_call = mlx5_eq_comp_int; 815 790 param = (struct mlx5_eq_param) { 816 791 .irq_index = vecidx, 817 - .mask = 0, 818 792 .nent = nent, 819 793 }; 820 794 err = create_map_eq(dev, &eq->core, &param);
+6
drivers/net/ethernet/mellanox/mlx5/core/fw.c
··· 202 202 return err; 203 203 } 204 204 205 + if (MLX5_CAP_GEN(dev, event_cap)) { 206 + err = mlx5_core_get_caps(dev, MLX5_CAP_DEV_EVENT); 207 + if (err) 208 + return err; 209 + } 210 + 205 211 return 0; 206 212 } 207 213
+5 -1
include/linux/mlx5/device.h
··· 351 351 352 352 MLX5_EVENT_TYPE_DEVICE_TRACER = 0x26, 353 353 354 - MLX5_EVENT_TYPE_MAX = MLX5_EVENT_TYPE_DEVICE_TRACER + 1, 354 + MLX5_EVENT_TYPE_MAX = 0x100, 355 355 }; 356 356 357 357 enum { ··· 1077 1077 MLX5_CAP_DEBUG, 1078 1078 MLX5_CAP_RESERVED_14, 1079 1079 MLX5_CAP_DEV_MEM, 1080 + MLX5_CAP_DEV_EVENT = 0x14, 1080 1081 /* NUM OF CAP Types */ 1081 1082 MLX5_CAP_NUM 1082 1083 }; ··· 1255 1254 1256 1255 #define MLX5_CAP64_DEV_MEM(mdev, cap)\ 1257 1256 MLX5_GET64(device_mem_cap, mdev->caps.hca_cur[MLX5_CAP_DEV_MEM], cap) 1257 + 1258 + #define MLX5_CAP_DEV_EVENT(mdev, cap)\ 1259 + MLX5_ADDR_OF(device_event_cap, (mdev)->caps.hca_cur[MLX5_CAP_DEV_EVENT], cap) 1258 1260 1259 1261 enum { 1260 1262 MLX5_CMD_STAT_OK = 0x0,
+1 -1
include/linux/mlx5/eq.h
··· 15 15 struct mlx5_eq_param { 16 16 u8 irq_index; 17 17 int nent; 18 - u64 mask; 18 + u64 mask[4]; 19 19 }; 20 20 21 21 struct mlx5_eq *
+10 -3
include/linux/mlx5/mlx5_ifc.h
··· 860 860 u8 reserved_at_180[0x680]; 861 861 }; 862 862 863 + struct mlx5_ifc_device_event_cap_bits { 864 + u8 user_affiliated_events[4][0x40]; 865 + 866 + u8 user_unaffiliated_events[4][0x40]; 867 + }; 868 + 863 869 enum { 864 870 MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_1_BYTE = 0x0, 865 871 MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_2_BYTES = 0x2, ··· 1023 1017 1024 1018 u8 log_max_srq_sz[0x8]; 1025 1019 u8 log_max_qp_sz[0x8]; 1026 - u8 reserved_at_90[0x8]; 1020 + u8 event_cap[0x1]; 1021 + u8 reserved_at_91[0x7]; 1027 1022 u8 prio_tag_required[0x1]; 1028 1023 u8 reserved_at_99[0x2]; 1029 1024 u8 log_max_qp[0x5]; ··· 7429 7422 7430 7423 u8 reserved_at_280[0x40]; 7431 7424 7432 - u8 event_bitmask[0x40]; 7425 + u8 event_bitmask[4][0x40]; 7433 7426 7434 - u8 reserved_at_300[0x580]; 7427 + u8 reserved_at_3c0[0x4c0]; 7435 7428 7436 7429 u8 pas[0][0x40]; 7437 7430 };