at v4.7 32 kB view raw
1/* 2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 33#ifndef MLX5_DEVICE_H 34#define MLX5_DEVICE_H 35 36#include <linux/types.h> 37#include <rdma/ib_verbs.h> 38#include <linux/mlx5/mlx5_ifc.h> 39 40#if defined(__LITTLE_ENDIAN) 41#define MLX5_SET_HOST_ENDIANNESS 0 42#elif defined(__BIG_ENDIAN) 43#define MLX5_SET_HOST_ENDIANNESS 0x80 44#else 45#error Host endianness not defined 46#endif 47 48/* helper macros */ 49#define __mlx5_nullp(typ) ((struct mlx5_ifc_##typ##_bits *)0) 50#define __mlx5_bit_sz(typ, fld) sizeof(__mlx5_nullp(typ)->fld) 51#define __mlx5_bit_off(typ, fld) ((unsigned)(unsigned long)(&(__mlx5_nullp(typ)->fld))) 52#define __mlx5_dw_off(typ, fld) (__mlx5_bit_off(typ, fld) / 32) 53#define __mlx5_64_off(typ, fld) (__mlx5_bit_off(typ, fld) / 64) 54#define __mlx5_dw_bit_off(typ, fld) (32 - __mlx5_bit_sz(typ, fld) - (__mlx5_bit_off(typ, fld) & 0x1f)) 55#define __mlx5_mask(typ, fld) ((u32)((1ull << __mlx5_bit_sz(typ, fld)) - 1)) 56#define __mlx5_dw_mask(typ, fld) (__mlx5_mask(typ, fld) << __mlx5_dw_bit_off(typ, fld)) 57#define __mlx5_st_sz_bits(typ) sizeof(struct mlx5_ifc_##typ##_bits) 58 59#define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8) 60#define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8) 61#define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32) 62#define MLX5_ST_SZ_QW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 64) 63#define MLX5_UN_SZ_BYTES(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 8) 64#define MLX5_UN_SZ_DW(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 32) 65#define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8) 66#define MLX5_ADDR_OF(typ, p, fld) ((char *)(p) + MLX5_BYTE_OFF(typ, fld)) 67 68/* insert a value to a struct */ 69#define MLX5_SET(typ, p, fld, v) do { \ 70 BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \ 71 *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \ 72 cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \ 73 (~__mlx5_dw_mask(typ, fld))) | (((v) & __mlx5_mask(typ, fld)) \ 74 << __mlx5_dw_bit_off(typ, fld))); \ 75} while (0) 76 77#define MLX5_SET_TO_ONES(typ, p, fld) do { \ 78 BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \ 79 *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \ 80 cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \ 81 (~__mlx5_dw_mask(typ, fld))) | ((__mlx5_mask(typ, fld)) \ 82 << __mlx5_dw_bit_off(typ, fld))); \ 83} while (0) 84 85#define MLX5_GET(typ, p, fld) ((be32_to_cpu(*((__be32 *)(p) +\ 86__mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \ 87__mlx5_mask(typ, fld)) 88 89#define MLX5_GET_PR(typ, p, fld) ({ \ 90 u32 ___t = MLX5_GET(typ, p, fld); \ 91 pr_debug(#fld " = 0x%x\n", ___t); \ 92 ___t; \ 93}) 94 95#define MLX5_SET64(typ, p, fld, v) do { \ 96 BUILD_BUG_ON(__mlx5_bit_sz(typ, fld) != 64); \ 97 BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 64); \ 98 *((__be64 *)(p) + __mlx5_64_off(typ, fld)) = cpu_to_be64(v); \ 99} while (0) 100 101#define MLX5_GET64(typ, p, fld) be64_to_cpu(*((__be64 *)(p) + __mlx5_64_off(typ, fld))) 102 103#define MLX5_GET64_PR(typ, p, fld) ({ \ 104 u64 ___t = MLX5_GET64(typ, p, fld); \ 105 pr_debug(#fld " = 0x%llx\n", ___t); \ 106 ___t; \ 107}) 108 109/* Big endian getters */ 110#define MLX5_GET64_BE(typ, p, fld) (*((__be64 *)(p) +\ 111 __mlx5_64_off(typ, fld))) 112 113#define MLX5_GET_BE(type_t, typ, p, fld) ({ \ 114 type_t tmp; \ 115 switch (sizeof(tmp)) { \ 116 case sizeof(u8): \ 117 tmp = (__force type_t)MLX5_GET(typ, p, fld); \ 118 break; \ 119 case sizeof(u16): \ 120 tmp = (__force type_t)cpu_to_be16(MLX5_GET(typ, p, fld)); \ 121 break; \ 122 case sizeof(u32): \ 123 tmp = (__force type_t)cpu_to_be32(MLX5_GET(typ, p, fld)); \ 124 break; \ 125 case sizeof(u64): \ 126 tmp = (__force type_t)MLX5_GET64_BE(typ, p, fld); \ 127 break; \ 128 } \ 129 tmp; \ 130 }) 131 132enum { 133 MLX5_MAX_COMMANDS = 32, 134 MLX5_CMD_DATA_BLOCK_SIZE = 512, 135 MLX5_PCI_CMD_XPORT = 7, 136 MLX5_MKEY_BSF_OCTO_SIZE = 4, 137 MLX5_MAX_PSVS = 4, 138}; 139 140enum { 141 MLX5_EXTENDED_UD_AV = 0x80000000, 142}; 143 144enum { 145 MLX5_CQ_STATE_ARMED = 9, 146 MLX5_CQ_STATE_ALWAYS_ARMED = 0xb, 147 MLX5_CQ_STATE_FIRED = 0xa, 148}; 149 150enum { 151 MLX5_STAT_RATE_OFFSET = 5, 152}; 153 154enum { 155 MLX5_INLINE_SEG = 0x80000000, 156}; 157 158enum { 159 MLX5_HW_START_PADDING = MLX5_INLINE_SEG, 160}; 161 162enum { 163 MLX5_MIN_PKEY_TABLE_SIZE = 128, 164 MLX5_MAX_LOG_PKEY_TABLE = 5, 165}; 166 167enum { 168 MLX5_MKEY_INBOX_PG_ACCESS = 1 << 31 169}; 170 171enum { 172 MLX5_PFAULT_SUBTYPE_WQE = 0, 173 MLX5_PFAULT_SUBTYPE_RDMA = 1, 174}; 175 176enum { 177 MLX5_PERM_LOCAL_READ = 1 << 2, 178 MLX5_PERM_LOCAL_WRITE = 1 << 3, 179 MLX5_PERM_REMOTE_READ = 1 << 4, 180 MLX5_PERM_REMOTE_WRITE = 1 << 5, 181 MLX5_PERM_ATOMIC = 1 << 6, 182 MLX5_PERM_UMR_EN = 1 << 7, 183}; 184 185enum { 186 MLX5_PCIE_CTRL_SMALL_FENCE = 1 << 0, 187 MLX5_PCIE_CTRL_RELAXED_ORDERING = 1 << 2, 188 MLX5_PCIE_CTRL_NO_SNOOP = 1 << 3, 189 MLX5_PCIE_CTRL_TLP_PROCE_EN = 1 << 6, 190 MLX5_PCIE_CTRL_TPH_MASK = 3 << 4, 191}; 192 193enum { 194 MLX5_ACCESS_MODE_PA = 0, 195 MLX5_ACCESS_MODE_MTT = 1, 196 MLX5_ACCESS_MODE_KLM = 2 197}; 198 199enum { 200 MLX5_MKEY_REMOTE_INVAL = 1 << 24, 201 MLX5_MKEY_FLAG_SYNC_UMR = 1 << 29, 202 MLX5_MKEY_BSF_EN = 1 << 30, 203 MLX5_MKEY_LEN64 = 1 << 31, 204}; 205 206enum { 207 MLX5_EN_RD = (u64)1, 208 MLX5_EN_WR = (u64)2 209}; 210 211enum { 212 MLX5_BF_REGS_PER_PAGE = 4, 213 MLX5_MAX_UAR_PAGES = 1 << 8, 214 MLX5_NON_FP_BF_REGS_PER_PAGE = 2, 215 MLX5_MAX_UUARS = MLX5_MAX_UAR_PAGES * MLX5_NON_FP_BF_REGS_PER_PAGE, 216}; 217 218enum { 219 MLX5_MKEY_MASK_LEN = 1ull << 0, 220 MLX5_MKEY_MASK_PAGE_SIZE = 1ull << 1, 221 MLX5_MKEY_MASK_START_ADDR = 1ull << 6, 222 MLX5_MKEY_MASK_PD = 1ull << 7, 223 MLX5_MKEY_MASK_EN_RINVAL = 1ull << 8, 224 MLX5_MKEY_MASK_EN_SIGERR = 1ull << 9, 225 MLX5_MKEY_MASK_BSF_EN = 1ull << 12, 226 MLX5_MKEY_MASK_KEY = 1ull << 13, 227 MLX5_MKEY_MASK_QPN = 1ull << 14, 228 MLX5_MKEY_MASK_LR = 1ull << 17, 229 MLX5_MKEY_MASK_LW = 1ull << 18, 230 MLX5_MKEY_MASK_RR = 1ull << 19, 231 MLX5_MKEY_MASK_RW = 1ull << 20, 232 MLX5_MKEY_MASK_A = 1ull << 21, 233 MLX5_MKEY_MASK_SMALL_FENCE = 1ull << 23, 234 MLX5_MKEY_MASK_FREE = 1ull << 29, 235}; 236 237enum { 238 MLX5_UMR_TRANSLATION_OFFSET_EN = (1 << 4), 239 240 MLX5_UMR_CHECK_NOT_FREE = (1 << 5), 241 MLX5_UMR_CHECK_FREE = (2 << 5), 242 243 MLX5_UMR_INLINE = (1 << 7), 244}; 245 246#define MLX5_UMR_MTT_ALIGNMENT 0x40 247#define MLX5_UMR_MTT_MASK (MLX5_UMR_MTT_ALIGNMENT - 1) 248#define MLX5_UMR_MTT_MIN_CHUNK_SIZE MLX5_UMR_MTT_ALIGNMENT 249 250#define MLX5_USER_INDEX_LEN (MLX5_FLD_SZ_BYTES(qpc, user_index) * 8) 251 252enum { 253 MLX5_EVENT_QUEUE_TYPE_QP = 0, 254 MLX5_EVENT_QUEUE_TYPE_RQ = 1, 255 MLX5_EVENT_QUEUE_TYPE_SQ = 2, 256}; 257 258enum mlx5_event { 259 MLX5_EVENT_TYPE_COMP = 0x0, 260 261 MLX5_EVENT_TYPE_PATH_MIG = 0x01, 262 MLX5_EVENT_TYPE_COMM_EST = 0x02, 263 MLX5_EVENT_TYPE_SQ_DRAINED = 0x03, 264 MLX5_EVENT_TYPE_SRQ_LAST_WQE = 0x13, 265 MLX5_EVENT_TYPE_SRQ_RQ_LIMIT = 0x14, 266 267 MLX5_EVENT_TYPE_CQ_ERROR = 0x04, 268 MLX5_EVENT_TYPE_WQ_CATAS_ERROR = 0x05, 269 MLX5_EVENT_TYPE_PATH_MIG_FAILED = 0x07, 270 MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10, 271 MLX5_EVENT_TYPE_WQ_ACCESS_ERROR = 0x11, 272 MLX5_EVENT_TYPE_SRQ_CATAS_ERROR = 0x12, 273 274 MLX5_EVENT_TYPE_INTERNAL_ERROR = 0x08, 275 MLX5_EVENT_TYPE_PORT_CHANGE = 0x09, 276 MLX5_EVENT_TYPE_GPIO_EVENT = 0x15, 277 MLX5_EVENT_TYPE_REMOTE_CONFIG = 0x19, 278 279 MLX5_EVENT_TYPE_DB_BF_CONGESTION = 0x1a, 280 MLX5_EVENT_TYPE_STALL_EVENT = 0x1b, 281 282 MLX5_EVENT_TYPE_CMD = 0x0a, 283 MLX5_EVENT_TYPE_PAGE_REQUEST = 0xb, 284 285 MLX5_EVENT_TYPE_PAGE_FAULT = 0xc, 286 MLX5_EVENT_TYPE_NIC_VPORT_CHANGE = 0xd, 287}; 288 289enum { 290 MLX5_PORT_CHANGE_SUBTYPE_DOWN = 1, 291 MLX5_PORT_CHANGE_SUBTYPE_ACTIVE = 4, 292 MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED = 5, 293 MLX5_PORT_CHANGE_SUBTYPE_LID = 6, 294 MLX5_PORT_CHANGE_SUBTYPE_PKEY = 7, 295 MLX5_PORT_CHANGE_SUBTYPE_GUID = 8, 296 MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG = 9, 297}; 298 299enum { 300 MLX5_DEV_CAP_FLAG_XRC = 1LL << 3, 301 MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR = 1LL << 8, 302 MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR = 1LL << 9, 303 MLX5_DEV_CAP_FLAG_APM = 1LL << 17, 304 MLX5_DEV_CAP_FLAG_ATOMIC = 1LL << 18, 305 MLX5_DEV_CAP_FLAG_BLOCK_MCAST = 1LL << 23, 306 MLX5_DEV_CAP_FLAG_ON_DMND_PG = 1LL << 24, 307 MLX5_DEV_CAP_FLAG_CQ_MODER = 1LL << 29, 308 MLX5_DEV_CAP_FLAG_RESIZE_CQ = 1LL << 30, 309 MLX5_DEV_CAP_FLAG_DCT = 1LL << 37, 310 MLX5_DEV_CAP_FLAG_SIG_HAND_OVER = 1LL << 40, 311 MLX5_DEV_CAP_FLAG_CMDIF_CSUM = 3LL << 46, 312}; 313 314enum { 315 MLX5_ROCE_VERSION_1 = 0, 316 MLX5_ROCE_VERSION_2 = 2, 317}; 318 319enum { 320 MLX5_ROCE_VERSION_1_CAP = 1 << MLX5_ROCE_VERSION_1, 321 MLX5_ROCE_VERSION_2_CAP = 1 << MLX5_ROCE_VERSION_2, 322}; 323 324enum { 325 MLX5_ROCE_L3_TYPE_IPV4 = 0, 326 MLX5_ROCE_L3_TYPE_IPV6 = 1, 327}; 328 329enum { 330 MLX5_ROCE_L3_TYPE_IPV4_CAP = 1 << 1, 331 MLX5_ROCE_L3_TYPE_IPV6_CAP = 1 << 2, 332}; 333 334enum { 335 MLX5_OPCODE_NOP = 0x00, 336 MLX5_OPCODE_SEND_INVAL = 0x01, 337 MLX5_OPCODE_RDMA_WRITE = 0x08, 338 MLX5_OPCODE_RDMA_WRITE_IMM = 0x09, 339 MLX5_OPCODE_SEND = 0x0a, 340 MLX5_OPCODE_SEND_IMM = 0x0b, 341 MLX5_OPCODE_LSO = 0x0e, 342 MLX5_OPCODE_RDMA_READ = 0x10, 343 MLX5_OPCODE_ATOMIC_CS = 0x11, 344 MLX5_OPCODE_ATOMIC_FA = 0x12, 345 MLX5_OPCODE_ATOMIC_MASKED_CS = 0x14, 346 MLX5_OPCODE_ATOMIC_MASKED_FA = 0x15, 347 MLX5_OPCODE_BIND_MW = 0x18, 348 MLX5_OPCODE_CONFIG_CMD = 0x1f, 349 350 MLX5_RECV_OPCODE_RDMA_WRITE_IMM = 0x00, 351 MLX5_RECV_OPCODE_SEND = 0x01, 352 MLX5_RECV_OPCODE_SEND_IMM = 0x02, 353 MLX5_RECV_OPCODE_SEND_INVAL = 0x03, 354 355 MLX5_CQE_OPCODE_ERROR = 0x1e, 356 MLX5_CQE_OPCODE_RESIZE = 0x16, 357 358 MLX5_OPCODE_SET_PSV = 0x20, 359 MLX5_OPCODE_GET_PSV = 0x21, 360 MLX5_OPCODE_CHECK_PSV = 0x22, 361 MLX5_OPCODE_RGET_PSV = 0x26, 362 MLX5_OPCODE_RCHECK_PSV = 0x27, 363 364 MLX5_OPCODE_UMR = 0x25, 365 366}; 367 368enum { 369 MLX5_SET_PORT_RESET_QKEY = 0, 370 MLX5_SET_PORT_GUID0 = 16, 371 MLX5_SET_PORT_NODE_GUID = 17, 372 MLX5_SET_PORT_SYS_GUID = 18, 373 MLX5_SET_PORT_GID_TABLE = 19, 374 MLX5_SET_PORT_PKEY_TABLE = 20, 375}; 376 377enum { 378 MLX5_BW_NO_LIMIT = 0, 379 MLX5_100_MBPS_UNIT = 3, 380 MLX5_GBPS_UNIT = 4, 381}; 382 383enum { 384 MLX5_MAX_PAGE_SHIFT = 31 385}; 386 387enum { 388 MLX5_ADAPTER_PAGE_SHIFT = 12, 389 MLX5_ADAPTER_PAGE_SIZE = 1 << MLX5_ADAPTER_PAGE_SHIFT, 390}; 391 392enum { 393 MLX5_CAP_OFF_CMDIF_CSUM = 46, 394}; 395 396enum { 397 /* 398 * Max wqe size for rdma read is 512 bytes, so this 399 * limits our max_sge_rd as the wqe needs to fit: 400 * - ctrl segment (16 bytes) 401 * - rdma segment (16 bytes) 402 * - scatter elements (16 bytes each) 403 */ 404 MLX5_MAX_SGE_RD = (512 - 16 - 16) / 16 405}; 406 407struct mlx5_inbox_hdr { 408 __be16 opcode; 409 u8 rsvd[4]; 410 __be16 opmod; 411}; 412 413struct mlx5_outbox_hdr { 414 u8 status; 415 u8 rsvd[3]; 416 __be32 syndrome; 417}; 418 419struct mlx5_cmd_query_adapter_mbox_in { 420 struct mlx5_inbox_hdr hdr; 421 u8 rsvd[8]; 422}; 423 424struct mlx5_cmd_query_adapter_mbox_out { 425 struct mlx5_outbox_hdr hdr; 426 u8 rsvd0[24]; 427 u8 intapin; 428 u8 rsvd1[13]; 429 __be16 vsd_vendor_id; 430 u8 vsd[208]; 431 u8 vsd_psid[16]; 432}; 433 434enum mlx5_odp_transport_cap_bits { 435 MLX5_ODP_SUPPORT_SEND = 1 << 31, 436 MLX5_ODP_SUPPORT_RECV = 1 << 30, 437 MLX5_ODP_SUPPORT_WRITE = 1 << 29, 438 MLX5_ODP_SUPPORT_READ = 1 << 28, 439}; 440 441struct mlx5_odp_caps { 442 char reserved[0x10]; 443 struct { 444 __be32 rc_odp_caps; 445 __be32 uc_odp_caps; 446 __be32 ud_odp_caps; 447 } per_transport_caps; 448 char reserved2[0xe4]; 449}; 450 451struct mlx5_cmd_init_hca_mbox_in { 452 struct mlx5_inbox_hdr hdr; 453 u8 rsvd0[2]; 454 __be16 profile; 455 u8 rsvd1[4]; 456}; 457 458struct mlx5_cmd_init_hca_mbox_out { 459 struct mlx5_outbox_hdr hdr; 460 u8 rsvd[8]; 461}; 462 463struct mlx5_cmd_teardown_hca_mbox_in { 464 struct mlx5_inbox_hdr hdr; 465 u8 rsvd0[2]; 466 __be16 profile; 467 u8 rsvd1[4]; 468}; 469 470struct mlx5_cmd_teardown_hca_mbox_out { 471 struct mlx5_outbox_hdr hdr; 472 u8 rsvd[8]; 473}; 474 475struct mlx5_cmd_layout { 476 u8 type; 477 u8 rsvd0[3]; 478 __be32 inlen; 479 __be64 in_ptr; 480 __be32 in[4]; 481 __be32 out[4]; 482 __be64 out_ptr; 483 __be32 outlen; 484 u8 token; 485 u8 sig; 486 u8 rsvd1; 487 u8 status_own; 488}; 489 490 491struct health_buffer { 492 __be32 assert_var[5]; 493 __be32 rsvd0[3]; 494 __be32 assert_exit_ptr; 495 __be32 assert_callra; 496 __be32 rsvd1[2]; 497 __be32 fw_ver; 498 __be32 hw_id; 499 __be32 rsvd2; 500 u8 irisc_index; 501 u8 synd; 502 __be16 ext_synd; 503}; 504 505struct mlx5_init_seg { 506 __be32 fw_rev; 507 __be32 cmdif_rev_fw_sub; 508 __be32 rsvd0[2]; 509 __be32 cmdq_addr_h; 510 __be32 cmdq_addr_l_sz; 511 __be32 cmd_dbell; 512 __be32 rsvd1[120]; 513 __be32 initializing; 514 struct health_buffer health; 515 __be32 rsvd2[880]; 516 __be32 internal_timer_h; 517 __be32 internal_timer_l; 518 __be32 rsvd3[2]; 519 __be32 health_counter; 520 __be32 rsvd4[1019]; 521 __be64 ieee1588_clk; 522 __be32 ieee1588_clk_type; 523 __be32 clr_intx; 524}; 525 526struct mlx5_eqe_comp { 527 __be32 reserved[6]; 528 __be32 cqn; 529}; 530 531struct mlx5_eqe_qp_srq { 532 __be32 reserved1[5]; 533 u8 type; 534 u8 reserved2[3]; 535 __be32 qp_srq_n; 536}; 537 538struct mlx5_eqe_cq_err { 539 __be32 cqn; 540 u8 reserved1[7]; 541 u8 syndrome; 542}; 543 544struct mlx5_eqe_port_state { 545 u8 reserved0[8]; 546 u8 port; 547}; 548 549struct mlx5_eqe_gpio { 550 __be32 reserved0[2]; 551 __be64 gpio_event; 552}; 553 554struct mlx5_eqe_congestion { 555 u8 type; 556 u8 rsvd0; 557 u8 congestion_level; 558}; 559 560struct mlx5_eqe_stall_vl { 561 u8 rsvd0[3]; 562 u8 port_vl; 563}; 564 565struct mlx5_eqe_cmd { 566 __be32 vector; 567 __be32 rsvd[6]; 568}; 569 570struct mlx5_eqe_page_req { 571 u8 rsvd0[2]; 572 __be16 func_id; 573 __be32 num_pages; 574 __be32 rsvd1[5]; 575}; 576 577struct mlx5_eqe_page_fault { 578 __be32 bytes_committed; 579 union { 580 struct { 581 u16 reserved1; 582 __be16 wqe_index; 583 u16 reserved2; 584 __be16 packet_length; 585 u8 reserved3[12]; 586 } __packed wqe; 587 struct { 588 __be32 r_key; 589 u16 reserved1; 590 __be16 packet_length; 591 __be32 rdma_op_len; 592 __be64 rdma_va; 593 } __packed rdma; 594 } __packed; 595 __be32 flags_qpn; 596} __packed; 597 598struct mlx5_eqe_vport_change { 599 u8 rsvd0[2]; 600 __be16 vport_num; 601 __be32 rsvd1[6]; 602} __packed; 603 604union ev_data { 605 __be32 raw[7]; 606 struct mlx5_eqe_cmd cmd; 607 struct mlx5_eqe_comp comp; 608 struct mlx5_eqe_qp_srq qp_srq; 609 struct mlx5_eqe_cq_err cq_err; 610 struct mlx5_eqe_port_state port; 611 struct mlx5_eqe_gpio gpio; 612 struct mlx5_eqe_congestion cong; 613 struct mlx5_eqe_stall_vl stall_vl; 614 struct mlx5_eqe_page_req req_pages; 615 struct mlx5_eqe_page_fault page_fault; 616 struct mlx5_eqe_vport_change vport_change; 617} __packed; 618 619struct mlx5_eqe { 620 u8 rsvd0; 621 u8 type; 622 u8 rsvd1; 623 u8 sub_type; 624 __be32 rsvd2[7]; 625 union ev_data data; 626 __be16 rsvd3; 627 u8 signature; 628 u8 owner; 629} __packed; 630 631struct mlx5_cmd_prot_block { 632 u8 data[MLX5_CMD_DATA_BLOCK_SIZE]; 633 u8 rsvd0[48]; 634 __be64 next; 635 __be32 block_num; 636 u8 rsvd1; 637 u8 token; 638 u8 ctrl_sig; 639 u8 sig; 640}; 641 642enum { 643 MLX5_CQE_SYND_FLUSHED_IN_ERROR = 5, 644}; 645 646struct mlx5_err_cqe { 647 u8 rsvd0[32]; 648 __be32 srqn; 649 u8 rsvd1[18]; 650 u8 vendor_err_synd; 651 u8 syndrome; 652 __be32 s_wqe_opcode_qpn; 653 __be16 wqe_counter; 654 u8 signature; 655 u8 op_own; 656}; 657 658struct mlx5_cqe64 { 659 u8 outer_l3_tunneled; 660 u8 rsvd0; 661 __be16 wqe_id; 662 u8 lro_tcppsh_abort_dupack; 663 u8 lro_min_ttl; 664 __be16 lro_tcp_win; 665 __be32 lro_ack_seq_num; 666 __be32 rss_hash_result; 667 u8 rss_hash_type; 668 u8 ml_path; 669 u8 rsvd20[2]; 670 __be16 check_sum; 671 __be16 slid; 672 __be32 flags_rqpn; 673 u8 hds_ip_ext; 674 u8 l4_l3_hdr_type; 675 __be16 vlan_info; 676 __be32 srqn; /* [31:24]: lro_num_seg, [23:0]: srqn */ 677 __be32 imm_inval_pkey; 678 u8 rsvd40[4]; 679 __be32 byte_cnt; 680 __be32 timestamp_h; 681 __be32 timestamp_l; 682 __be32 sop_drop_qpn; 683 __be16 wqe_counter; 684 u8 signature; 685 u8 op_own; 686}; 687 688struct mlx5_mini_cqe8 { 689 union { 690 __be32 rx_hash_result; 691 struct { 692 __be16 checksum; 693 __be16 rsvd; 694 }; 695 struct { 696 __be16 wqe_counter; 697 u8 s_wqe_opcode; 698 u8 reserved; 699 } s_wqe_info; 700 }; 701 __be32 byte_cnt; 702}; 703 704enum { 705 MLX5_NO_INLINE_DATA, 706 MLX5_INLINE_DATA32_SEG, 707 MLX5_INLINE_DATA64_SEG, 708 MLX5_COMPRESSED, 709}; 710 711enum { 712 MLX5_CQE_FORMAT_CSUM = 0x1, 713}; 714 715#define MLX5_MINI_CQE_ARRAY_SIZE 8 716 717static inline int mlx5_get_cqe_format(struct mlx5_cqe64 *cqe) 718{ 719 return (cqe->op_own >> 2) & 0x3; 720} 721 722static inline int get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe) 723{ 724 return (cqe->lro_tcppsh_abort_dupack >> 6) & 1; 725} 726 727static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe) 728{ 729 return (cqe->l4_l3_hdr_type >> 4) & 0x7; 730} 731 732static inline u8 get_cqe_l3_hdr_type(struct mlx5_cqe64 *cqe) 733{ 734 return (cqe->l4_l3_hdr_type >> 2) & 0x3; 735} 736 737static inline u8 cqe_is_tunneled(struct mlx5_cqe64 *cqe) 738{ 739 return cqe->outer_l3_tunneled & 0x1; 740} 741 742static inline int cqe_has_vlan(struct mlx5_cqe64 *cqe) 743{ 744 return !!(cqe->l4_l3_hdr_type & 0x1); 745} 746 747static inline u64 get_cqe_ts(struct mlx5_cqe64 *cqe) 748{ 749 u32 hi, lo; 750 751 hi = be32_to_cpu(cqe->timestamp_h); 752 lo = be32_to_cpu(cqe->timestamp_l); 753 754 return (u64)lo | ((u64)hi << 32); 755} 756 757struct mpwrq_cqe_bc { 758 __be16 filler_consumed_strides; 759 __be16 byte_cnt; 760}; 761 762static inline u16 mpwrq_get_cqe_byte_cnt(struct mlx5_cqe64 *cqe) 763{ 764 struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt; 765 766 return be16_to_cpu(bc->byte_cnt); 767} 768 769static inline u16 mpwrq_get_cqe_bc_consumed_strides(struct mpwrq_cqe_bc *bc) 770{ 771 return 0x7fff & be16_to_cpu(bc->filler_consumed_strides); 772} 773 774static inline u16 mpwrq_get_cqe_consumed_strides(struct mlx5_cqe64 *cqe) 775{ 776 struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt; 777 778 return mpwrq_get_cqe_bc_consumed_strides(bc); 779} 780 781static inline bool mpwrq_is_filler_cqe(struct mlx5_cqe64 *cqe) 782{ 783 struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt; 784 785 return 0x8000 & be16_to_cpu(bc->filler_consumed_strides); 786} 787 788static inline u16 mpwrq_get_cqe_stride_index(struct mlx5_cqe64 *cqe) 789{ 790 return be16_to_cpu(cqe->wqe_counter); 791} 792 793enum { 794 CQE_L4_HDR_TYPE_NONE = 0x0, 795 CQE_L4_HDR_TYPE_TCP_NO_ACK = 0x1, 796 CQE_L4_HDR_TYPE_UDP = 0x2, 797 CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA = 0x3, 798 CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA = 0x4, 799}; 800 801enum { 802 CQE_RSS_HTYPE_IP = 0x3 << 6, 803 CQE_RSS_HTYPE_L4 = 0x3 << 2, 804}; 805 806enum { 807 MLX5_CQE_ROCE_L3_HEADER_TYPE_GRH = 0x0, 808 MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV6 = 0x1, 809 MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV4 = 0x2, 810}; 811 812enum { 813 CQE_L2_OK = 1 << 0, 814 CQE_L3_OK = 1 << 1, 815 CQE_L4_OK = 1 << 2, 816}; 817 818struct mlx5_sig_err_cqe { 819 u8 rsvd0[16]; 820 __be32 expected_trans_sig; 821 __be32 actual_trans_sig; 822 __be32 expected_reftag; 823 __be32 actual_reftag; 824 __be16 syndrome; 825 u8 rsvd22[2]; 826 __be32 mkey; 827 __be64 err_offset; 828 u8 rsvd30[8]; 829 __be32 qpn; 830 u8 rsvd38[2]; 831 u8 signature; 832 u8 op_own; 833}; 834 835struct mlx5_wqe_srq_next_seg { 836 u8 rsvd0[2]; 837 __be16 next_wqe_index; 838 u8 signature; 839 u8 rsvd1[11]; 840}; 841 842union mlx5_ext_cqe { 843 struct ib_grh grh; 844 u8 inl[64]; 845}; 846 847struct mlx5_cqe128 { 848 union mlx5_ext_cqe inl_grh; 849 struct mlx5_cqe64 cqe64; 850}; 851 852struct mlx5_srq_ctx { 853 u8 state_log_sz; 854 u8 rsvd0[3]; 855 __be32 flags_xrcd; 856 __be32 pgoff_cqn; 857 u8 rsvd1[4]; 858 u8 log_pg_sz; 859 u8 rsvd2[7]; 860 __be32 pd; 861 __be16 lwm; 862 __be16 wqe_cnt; 863 u8 rsvd3[8]; 864 __be64 db_record; 865}; 866 867struct mlx5_create_srq_mbox_in { 868 struct mlx5_inbox_hdr hdr; 869 __be32 input_srqn; 870 u8 rsvd0[4]; 871 struct mlx5_srq_ctx ctx; 872 u8 rsvd1[208]; 873 __be64 pas[0]; 874}; 875 876struct mlx5_create_srq_mbox_out { 877 struct mlx5_outbox_hdr hdr; 878 __be32 srqn; 879 u8 rsvd[4]; 880}; 881 882struct mlx5_destroy_srq_mbox_in { 883 struct mlx5_inbox_hdr hdr; 884 __be32 srqn; 885 u8 rsvd[4]; 886}; 887 888struct mlx5_destroy_srq_mbox_out { 889 struct mlx5_outbox_hdr hdr; 890 u8 rsvd[8]; 891}; 892 893struct mlx5_query_srq_mbox_in { 894 struct mlx5_inbox_hdr hdr; 895 __be32 srqn; 896 u8 rsvd0[4]; 897}; 898 899struct mlx5_query_srq_mbox_out { 900 struct mlx5_outbox_hdr hdr; 901 u8 rsvd0[8]; 902 struct mlx5_srq_ctx ctx; 903 u8 rsvd1[32]; 904 __be64 pas[0]; 905}; 906 907struct mlx5_arm_srq_mbox_in { 908 struct mlx5_inbox_hdr hdr; 909 __be32 srqn; 910 __be16 rsvd; 911 __be16 lwm; 912}; 913 914struct mlx5_arm_srq_mbox_out { 915 struct mlx5_outbox_hdr hdr; 916 u8 rsvd[8]; 917}; 918 919struct mlx5_cq_context { 920 u8 status; 921 u8 cqe_sz_flags; 922 u8 st; 923 u8 rsvd3; 924 u8 rsvd4[6]; 925 __be16 page_offset; 926 __be32 log_sz_usr_page; 927 __be16 cq_period; 928 __be16 cq_max_count; 929 __be16 rsvd20; 930 __be16 c_eqn; 931 u8 log_pg_sz; 932 u8 rsvd25[7]; 933 __be32 last_notified_index; 934 __be32 solicit_producer_index; 935 __be32 consumer_counter; 936 __be32 producer_counter; 937 u8 rsvd48[8]; 938 __be64 db_record_addr; 939}; 940 941struct mlx5_create_cq_mbox_in { 942 struct mlx5_inbox_hdr hdr; 943 __be32 input_cqn; 944 u8 rsvdx[4]; 945 struct mlx5_cq_context ctx; 946 u8 rsvd6[192]; 947 __be64 pas[0]; 948}; 949 950struct mlx5_create_cq_mbox_out { 951 struct mlx5_outbox_hdr hdr; 952 __be32 cqn; 953 u8 rsvd0[4]; 954}; 955 956struct mlx5_destroy_cq_mbox_in { 957 struct mlx5_inbox_hdr hdr; 958 __be32 cqn; 959 u8 rsvd0[4]; 960}; 961 962struct mlx5_destroy_cq_mbox_out { 963 struct mlx5_outbox_hdr hdr; 964 u8 rsvd0[8]; 965}; 966 967struct mlx5_query_cq_mbox_in { 968 struct mlx5_inbox_hdr hdr; 969 __be32 cqn; 970 u8 rsvd0[4]; 971}; 972 973struct mlx5_query_cq_mbox_out { 974 struct mlx5_outbox_hdr hdr; 975 u8 rsvd0[8]; 976 struct mlx5_cq_context ctx; 977 u8 rsvd6[16]; 978 __be64 pas[0]; 979}; 980 981struct mlx5_modify_cq_mbox_in { 982 struct mlx5_inbox_hdr hdr; 983 __be32 cqn; 984 __be32 field_select; 985 struct mlx5_cq_context ctx; 986 u8 rsvd[192]; 987 __be64 pas[0]; 988}; 989 990struct mlx5_modify_cq_mbox_out { 991 struct mlx5_outbox_hdr hdr; 992 u8 rsvd[8]; 993}; 994 995struct mlx5_enable_hca_mbox_in { 996 struct mlx5_inbox_hdr hdr; 997 u8 rsvd[8]; 998}; 999 1000struct mlx5_enable_hca_mbox_out { 1001 struct mlx5_outbox_hdr hdr; 1002 u8 rsvd[8]; 1003}; 1004 1005struct mlx5_disable_hca_mbox_in { 1006 struct mlx5_inbox_hdr hdr; 1007 u8 rsvd[8]; 1008}; 1009 1010struct mlx5_disable_hca_mbox_out { 1011 struct mlx5_outbox_hdr hdr; 1012 u8 rsvd[8]; 1013}; 1014 1015struct mlx5_eq_context { 1016 u8 status; 1017 u8 ec_oi; 1018 u8 st; 1019 u8 rsvd2[7]; 1020 __be16 page_pffset; 1021 __be32 log_sz_usr_page; 1022 u8 rsvd3[7]; 1023 u8 intr; 1024 u8 log_page_size; 1025 u8 rsvd4[15]; 1026 __be32 consumer_counter; 1027 __be32 produser_counter; 1028 u8 rsvd5[16]; 1029}; 1030 1031struct mlx5_create_eq_mbox_in { 1032 struct mlx5_inbox_hdr hdr; 1033 u8 rsvd0[3]; 1034 u8 input_eqn; 1035 u8 rsvd1[4]; 1036 struct mlx5_eq_context ctx; 1037 u8 rsvd2[8]; 1038 __be64 events_mask; 1039 u8 rsvd3[176]; 1040 __be64 pas[0]; 1041}; 1042 1043struct mlx5_create_eq_mbox_out { 1044 struct mlx5_outbox_hdr hdr; 1045 u8 rsvd0[3]; 1046 u8 eq_number; 1047 u8 rsvd1[4]; 1048}; 1049 1050struct mlx5_destroy_eq_mbox_in { 1051 struct mlx5_inbox_hdr hdr; 1052 u8 rsvd0[3]; 1053 u8 eqn; 1054 u8 rsvd1[4]; 1055}; 1056 1057struct mlx5_destroy_eq_mbox_out { 1058 struct mlx5_outbox_hdr hdr; 1059 u8 rsvd[8]; 1060}; 1061 1062struct mlx5_map_eq_mbox_in { 1063 struct mlx5_inbox_hdr hdr; 1064 __be64 mask; 1065 u8 mu; 1066 u8 rsvd0[2]; 1067 u8 eqn; 1068 u8 rsvd1[24]; 1069}; 1070 1071struct mlx5_map_eq_mbox_out { 1072 struct mlx5_outbox_hdr hdr; 1073 u8 rsvd[8]; 1074}; 1075 1076struct mlx5_query_eq_mbox_in { 1077 struct mlx5_inbox_hdr hdr; 1078 u8 rsvd0[3]; 1079 u8 eqn; 1080 u8 rsvd1[4]; 1081}; 1082 1083struct mlx5_query_eq_mbox_out { 1084 struct mlx5_outbox_hdr hdr; 1085 u8 rsvd[8]; 1086 struct mlx5_eq_context ctx; 1087}; 1088 1089enum { 1090 MLX5_MKEY_STATUS_FREE = 1 << 6, 1091}; 1092 1093struct mlx5_mkey_seg { 1094 /* This is a two bit field occupying bits 31-30. 1095 * bit 31 is always 0, 1096 * bit 30 is zero for regular MRs and 1 (e.g free) for UMRs that do not have tanslation 1097 */ 1098 u8 status; 1099 u8 pcie_control; 1100 u8 flags; 1101 u8 version; 1102 __be32 qpn_mkey7_0; 1103 u8 rsvd1[4]; 1104 __be32 flags_pd; 1105 __be64 start_addr; 1106 __be64 len; 1107 __be32 bsfs_octo_size; 1108 u8 rsvd2[16]; 1109 __be32 xlt_oct_size; 1110 u8 rsvd3[3]; 1111 u8 log2_page_size; 1112 u8 rsvd4[4]; 1113}; 1114 1115struct mlx5_query_special_ctxs_mbox_in { 1116 struct mlx5_inbox_hdr hdr; 1117 u8 rsvd[8]; 1118}; 1119 1120struct mlx5_query_special_ctxs_mbox_out { 1121 struct mlx5_outbox_hdr hdr; 1122 __be32 dump_fill_mkey; 1123 __be32 reserved_lkey; 1124}; 1125 1126struct mlx5_create_mkey_mbox_in { 1127 struct mlx5_inbox_hdr hdr; 1128 __be32 input_mkey_index; 1129 __be32 flags; 1130 struct mlx5_mkey_seg seg; 1131 u8 rsvd1[16]; 1132 __be32 xlat_oct_act_size; 1133 __be32 rsvd2; 1134 u8 rsvd3[168]; 1135 __be64 pas[0]; 1136}; 1137 1138struct mlx5_create_mkey_mbox_out { 1139 struct mlx5_outbox_hdr hdr; 1140 __be32 mkey; 1141 u8 rsvd[4]; 1142}; 1143 1144struct mlx5_destroy_mkey_mbox_in { 1145 struct mlx5_inbox_hdr hdr; 1146 __be32 mkey; 1147 u8 rsvd[4]; 1148}; 1149 1150struct mlx5_destroy_mkey_mbox_out { 1151 struct mlx5_outbox_hdr hdr; 1152 u8 rsvd[8]; 1153}; 1154 1155struct mlx5_query_mkey_mbox_in { 1156 struct mlx5_inbox_hdr hdr; 1157 __be32 mkey; 1158}; 1159 1160struct mlx5_query_mkey_mbox_out { 1161 struct mlx5_outbox_hdr hdr; 1162 __be64 pas[0]; 1163}; 1164 1165struct mlx5_modify_mkey_mbox_in { 1166 struct mlx5_inbox_hdr hdr; 1167 __be32 mkey; 1168 __be64 pas[0]; 1169}; 1170 1171struct mlx5_modify_mkey_mbox_out { 1172 struct mlx5_outbox_hdr hdr; 1173 u8 rsvd[8]; 1174}; 1175 1176struct mlx5_dump_mkey_mbox_in { 1177 struct mlx5_inbox_hdr hdr; 1178}; 1179 1180struct mlx5_dump_mkey_mbox_out { 1181 struct mlx5_outbox_hdr hdr; 1182 __be32 mkey; 1183}; 1184 1185struct mlx5_mad_ifc_mbox_in { 1186 struct mlx5_inbox_hdr hdr; 1187 __be16 remote_lid; 1188 u8 rsvd0; 1189 u8 port; 1190 u8 rsvd1[4]; 1191 u8 data[256]; 1192}; 1193 1194struct mlx5_mad_ifc_mbox_out { 1195 struct mlx5_outbox_hdr hdr; 1196 u8 rsvd[8]; 1197 u8 data[256]; 1198}; 1199 1200struct mlx5_access_reg_mbox_in { 1201 struct mlx5_inbox_hdr hdr; 1202 u8 rsvd0[2]; 1203 __be16 register_id; 1204 __be32 arg; 1205 __be32 data[0]; 1206}; 1207 1208struct mlx5_access_reg_mbox_out { 1209 struct mlx5_outbox_hdr hdr; 1210 u8 rsvd[8]; 1211 __be32 data[0]; 1212}; 1213 1214#define MLX5_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90) 1215 1216enum { 1217 MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO = 1 << 0 1218}; 1219 1220struct mlx5_allocate_psv_in { 1221 struct mlx5_inbox_hdr hdr; 1222 __be32 npsv_pd; 1223 __be32 rsvd_psv0; 1224}; 1225 1226struct mlx5_allocate_psv_out { 1227 struct mlx5_outbox_hdr hdr; 1228 u8 rsvd[8]; 1229 __be32 psv_idx[4]; 1230}; 1231 1232struct mlx5_destroy_psv_in { 1233 struct mlx5_inbox_hdr hdr; 1234 __be32 psv_number; 1235 u8 rsvd[4]; 1236}; 1237 1238struct mlx5_destroy_psv_out { 1239 struct mlx5_outbox_hdr hdr; 1240 u8 rsvd[8]; 1241}; 1242 1243enum { 1244 VPORT_STATE_DOWN = 0x0, 1245 VPORT_STATE_UP = 0x1, 1246}; 1247 1248enum { 1249 MLX5_ESW_VPORT_ADMIN_STATE_DOWN = 0x0, 1250 MLX5_ESW_VPORT_ADMIN_STATE_UP = 0x1, 1251 MLX5_ESW_VPORT_ADMIN_STATE_AUTO = 0x2, 1252}; 1253 1254enum { 1255 MLX5_L3_PROT_TYPE_IPV4 = 0, 1256 MLX5_L3_PROT_TYPE_IPV6 = 1, 1257}; 1258 1259enum { 1260 MLX5_L4_PROT_TYPE_TCP = 0, 1261 MLX5_L4_PROT_TYPE_UDP = 1, 1262}; 1263 1264enum { 1265 MLX5_HASH_FIELD_SEL_SRC_IP = 1 << 0, 1266 MLX5_HASH_FIELD_SEL_DST_IP = 1 << 1, 1267 MLX5_HASH_FIELD_SEL_L4_SPORT = 1 << 2, 1268 MLX5_HASH_FIELD_SEL_L4_DPORT = 1 << 3, 1269 MLX5_HASH_FIELD_SEL_IPSEC_SPI = 1 << 4, 1270}; 1271 1272enum { 1273 MLX5_MATCH_OUTER_HEADERS = 1 << 0, 1274 MLX5_MATCH_MISC_PARAMETERS = 1 << 1, 1275 MLX5_MATCH_INNER_HEADERS = 1 << 2, 1276 1277}; 1278 1279enum { 1280 MLX5_FLOW_TABLE_TYPE_NIC_RCV = 0, 1281 MLX5_FLOW_TABLE_TYPE_ESWITCH = 4, 1282}; 1283 1284enum { 1285 MLX5_FLOW_CONTEXT_DEST_TYPE_VPORT = 0, 1286 MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE = 1, 1287 MLX5_FLOW_CONTEXT_DEST_TYPE_TIR = 2, 1288}; 1289 1290enum mlx5_list_type { 1291 MLX5_NVPRT_LIST_TYPE_UC = 0x0, 1292 MLX5_NVPRT_LIST_TYPE_MC = 0x1, 1293 MLX5_NVPRT_LIST_TYPE_VLAN = 0x2, 1294}; 1295 1296enum { 1297 MLX5_RQC_RQ_TYPE_MEMORY_RQ_INLINE = 0x0, 1298 MLX5_RQC_RQ_TYPE_MEMORY_RQ_RPM = 0x1, 1299}; 1300 1301enum mlx5_wol_mode { 1302 MLX5_WOL_DISABLE = 0, 1303 MLX5_WOL_SECURED_MAGIC = 1 << 1, 1304 MLX5_WOL_MAGIC = 1 << 2, 1305 MLX5_WOL_ARP = 1 << 3, 1306 MLX5_WOL_BROADCAST = 1 << 4, 1307 MLX5_WOL_MULTICAST = 1 << 5, 1308 MLX5_WOL_UNICAST = 1 << 6, 1309 MLX5_WOL_PHY_ACTIVITY = 1 << 7, 1310}; 1311 1312/* MLX5 DEV CAPs */ 1313 1314/* TODO: EAT.ME */ 1315enum mlx5_cap_mode { 1316 HCA_CAP_OPMOD_GET_MAX = 0, 1317 HCA_CAP_OPMOD_GET_CUR = 1, 1318}; 1319 1320enum mlx5_cap_type { 1321 MLX5_CAP_GENERAL = 0, 1322 MLX5_CAP_ETHERNET_OFFLOADS, 1323 MLX5_CAP_ODP, 1324 MLX5_CAP_ATOMIC, 1325 MLX5_CAP_ROCE, 1326 MLX5_CAP_IPOIB_OFFLOADS, 1327 MLX5_CAP_EOIB_OFFLOADS, 1328 MLX5_CAP_FLOW_TABLE, 1329 MLX5_CAP_ESWITCH_FLOW_TABLE, 1330 MLX5_CAP_ESWITCH, 1331 MLX5_CAP_RESERVED, 1332 MLX5_CAP_VECTOR_CALC, 1333 /* NUM OF CAP Types */ 1334 MLX5_CAP_NUM 1335}; 1336 1337/* GET Dev Caps macros */ 1338#define MLX5_CAP_GEN(mdev, cap) \ 1339 MLX5_GET(cmd_hca_cap, mdev->hca_caps_cur[MLX5_CAP_GENERAL], cap) 1340 1341#define MLX5_CAP_GEN_MAX(mdev, cap) \ 1342 MLX5_GET(cmd_hca_cap, mdev->hca_caps_max[MLX5_CAP_GENERAL], cap) 1343 1344#define MLX5_CAP_ETH(mdev, cap) \ 1345 MLX5_GET(per_protocol_networking_offload_caps,\ 1346 mdev->hca_caps_cur[MLX5_CAP_ETHERNET_OFFLOADS], cap) 1347 1348#define MLX5_CAP_ETH_MAX(mdev, cap) \ 1349 MLX5_GET(per_protocol_networking_offload_caps,\ 1350 mdev->hca_caps_max[MLX5_CAP_ETHERNET_OFFLOADS], cap) 1351 1352#define MLX5_CAP_ROCE(mdev, cap) \ 1353 MLX5_GET(roce_cap, mdev->hca_caps_cur[MLX5_CAP_ROCE], cap) 1354 1355#define MLX5_CAP_ROCE_MAX(mdev, cap) \ 1356 MLX5_GET(roce_cap, mdev->hca_caps_max[MLX5_CAP_ROCE], cap) 1357 1358#define MLX5_CAP_ATOMIC(mdev, cap) \ 1359 MLX5_GET(atomic_caps, mdev->hca_caps_cur[MLX5_CAP_ATOMIC], cap) 1360 1361#define MLX5_CAP_ATOMIC_MAX(mdev, cap) \ 1362 MLX5_GET(atomic_caps, mdev->hca_caps_max[MLX5_CAP_ATOMIC], cap) 1363 1364#define MLX5_CAP_FLOWTABLE(mdev, cap) \ 1365 MLX5_GET(flow_table_nic_cap, mdev->hca_caps_cur[MLX5_CAP_FLOW_TABLE], cap) 1366 1367#define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \ 1368 MLX5_GET(flow_table_nic_cap, mdev->hca_caps_max[MLX5_CAP_FLOW_TABLE], cap) 1369 1370#define MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) \ 1371 MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.cap) 1372 1373#define MLX5_CAP_FLOWTABLE_NIC_RX_MAX(mdev, cap) \ 1374 MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive.cap) 1375 1376#define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \ 1377 MLX5_GET(flow_table_eswitch_cap, \ 1378 mdev->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap) 1379 1380#define MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, cap) \ 1381 MLX5_GET(flow_table_eswitch_cap, \ 1382 mdev->hca_caps_max[MLX5_CAP_ESWITCH_FLOW_TABLE], cap) 1383 1384#define MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) \ 1385 MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_nic_esw_fdb.cap) 1386 1387#define MLX5_CAP_ESW_FLOWTABLE_FDB_MAX(mdev, cap) \ 1388 MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_nic_esw_fdb.cap) 1389 1390#define MLX5_CAP_ESW_EGRESS_ACL(mdev, cap) \ 1391 MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_egress.cap) 1392 1393#define MLX5_CAP_ESW_EGRESS_ACL_MAX(mdev, cap) \ 1394 MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_egress.cap) 1395 1396#define MLX5_CAP_ESW_INGRESS_ACL(mdev, cap) \ 1397 MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_ingress.cap) 1398 1399#define MLX5_CAP_ESW_INGRESS_ACL_MAX(mdev, cap) \ 1400 MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_ingress.cap) 1401 1402#define MLX5_CAP_ESW(mdev, cap) \ 1403 MLX5_GET(e_switch_cap, \ 1404 mdev->hca_caps_cur[MLX5_CAP_ESWITCH], cap) 1405 1406#define MLX5_CAP_ESW_MAX(mdev, cap) \ 1407 MLX5_GET(e_switch_cap, \ 1408 mdev->hca_caps_max[MLX5_CAP_ESWITCH], cap) 1409 1410#define MLX5_CAP_ODP(mdev, cap)\ 1411 MLX5_GET(odp_cap, mdev->hca_caps_cur[MLX5_CAP_ODP], cap) 1412 1413#define MLX5_CAP_VECTOR_CALC(mdev, cap) \ 1414 MLX5_GET(vector_calc_cap, \ 1415 mdev->hca_caps_cur[MLX5_CAP_VECTOR_CALC], cap) 1416 1417enum { 1418 MLX5_CMD_STAT_OK = 0x0, 1419 MLX5_CMD_STAT_INT_ERR = 0x1, 1420 MLX5_CMD_STAT_BAD_OP_ERR = 0x2, 1421 MLX5_CMD_STAT_BAD_PARAM_ERR = 0x3, 1422 MLX5_CMD_STAT_BAD_SYS_STATE_ERR = 0x4, 1423 MLX5_CMD_STAT_BAD_RES_ERR = 0x5, 1424 MLX5_CMD_STAT_RES_BUSY = 0x6, 1425 MLX5_CMD_STAT_LIM_ERR = 0x8, 1426 MLX5_CMD_STAT_BAD_RES_STATE_ERR = 0x9, 1427 MLX5_CMD_STAT_IX_ERR = 0xa, 1428 MLX5_CMD_STAT_NO_RES_ERR = 0xf, 1429 MLX5_CMD_STAT_BAD_INP_LEN_ERR = 0x50, 1430 MLX5_CMD_STAT_BAD_OUTP_LEN_ERR = 0x51, 1431 MLX5_CMD_STAT_BAD_QP_STATE_ERR = 0x10, 1432 MLX5_CMD_STAT_BAD_PKT_ERR = 0x30, 1433 MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR = 0x40, 1434}; 1435 1436enum { 1437 MLX5_IEEE_802_3_COUNTERS_GROUP = 0x0, 1438 MLX5_RFC_2863_COUNTERS_GROUP = 0x1, 1439 MLX5_RFC_2819_COUNTERS_GROUP = 0x2, 1440 MLX5_RFC_3635_COUNTERS_GROUP = 0x3, 1441 MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP = 0x5, 1442 MLX5_PER_PRIORITY_COUNTERS_GROUP = 0x10, 1443 MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11, 1444 MLX5_PHYSICAL_LAYER_COUNTERS_GROUP = 0x12, 1445 MLX5_INFINIBAND_PORT_COUNTERS_GROUP = 0x20, 1446}; 1447 1448static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz) 1449{ 1450 if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE) 1451 return 0; 1452 return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz; 1453} 1454 1455#define MLX5_BY_PASS_NUM_REGULAR_PRIOS 8 1456#define MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS 8 1457#define MLX5_BY_PASS_NUM_MULTICAST_PRIOS 1 1458#define MLX5_BY_PASS_NUM_PRIOS (MLX5_BY_PASS_NUM_REGULAR_PRIOS +\ 1459 MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS +\ 1460 MLX5_BY_PASS_NUM_MULTICAST_PRIOS) 1461 1462#endif /* MLX5_DEVICE_H */