Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */
2/*
3 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#ifndef MLX5_ABI_USER_H
35#define MLX5_ABI_USER_H
36
37#include <linux/types.h>
38#include <linux/if_ether.h> /* For ETH_ALEN. */
39#include <rdma/ib_user_ioctl_verbs.h>
40
41enum {
42 MLX5_QP_FLAG_SIGNATURE = 1 << 0,
43 MLX5_QP_FLAG_SCATTER_CQE = 1 << 1,
44 MLX5_QP_FLAG_TUNNEL_OFFLOADS = 1 << 2,
45 MLX5_QP_FLAG_BFREG_INDEX = 1 << 3,
46 MLX5_QP_FLAG_TYPE_DCT = 1 << 4,
47 MLX5_QP_FLAG_TYPE_DCI = 1 << 5,
48 MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_UC = 1 << 6,
49 MLX5_QP_FLAG_TIR_ALLOW_SELF_LB_MC = 1 << 7,
50 MLX5_QP_FLAG_ALLOW_SCATTER_CQE = 1 << 8,
51 MLX5_QP_FLAG_PACKET_BASED_CREDIT_MODE = 1 << 9,
52 MLX5_QP_FLAG_UAR_PAGE_INDEX = 1 << 10,
53};
54
55enum {
56 MLX5_SRQ_FLAG_SIGNATURE = 1 << 0,
57};
58
59enum {
60 MLX5_WQ_FLAG_SIGNATURE = 1 << 0,
61};
62
63/* Increment this value if any changes that break userspace ABI
64 * compatibility are made.
65 */
66#define MLX5_IB_UVERBS_ABI_VERSION 1
67
68/* Make sure that all structs defined in this file remain laid out so
69 * that they pack the same way on 32-bit and 64-bit architectures (to
70 * avoid incompatibility between 32-bit userspace and 64-bit kernels).
71 * In particular do not use pointer types -- pass pointers in __u64
72 * instead.
73 */
74
75struct mlx5_ib_alloc_ucontext_req {
76 __u32 total_num_bfregs;
77 __u32 num_low_latency_bfregs;
78};
79
80enum mlx5_lib_caps {
81 MLX5_LIB_CAP_4K_UAR = (__u64)1 << 0,
82 MLX5_LIB_CAP_DYN_UAR = (__u64)1 << 1,
83};
84
85enum mlx5_ib_alloc_uctx_v2_flags {
86 MLX5_IB_ALLOC_UCTX_DEVX = 1 << 0,
87};
88struct mlx5_ib_alloc_ucontext_req_v2 {
89 __u32 total_num_bfregs;
90 __u32 num_low_latency_bfregs;
91 __u32 flags;
92 __u32 comp_mask;
93 __u8 max_cqe_version;
94 __u8 reserved0;
95 __u16 reserved1;
96 __u32 reserved2;
97 __aligned_u64 lib_caps;
98};
99
100enum mlx5_ib_alloc_ucontext_resp_mask {
101 MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET = 1UL << 0,
102 MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_DUMP_FILL_MKEY = 1UL << 1,
103 MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_ECE = 1UL << 2,
104 MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_SQD2RTS = 1UL << 3,
105 MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_REAL_TIME_TS = 1UL << 4,
106};
107
108enum mlx5_user_cmds_supp_uhw {
109 MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE = 1 << 0,
110 MLX5_USER_CMDS_SUPP_UHW_CREATE_AH = 1 << 1,
111};
112
113/* The eth_min_inline response value is set to off-by-one vs the FW
114 * returned value to allow user-space to deal with older kernels.
115 */
116enum mlx5_user_inline_mode {
117 MLX5_USER_INLINE_MODE_NA,
118 MLX5_USER_INLINE_MODE_NONE,
119 MLX5_USER_INLINE_MODE_L2,
120 MLX5_USER_INLINE_MODE_IP,
121 MLX5_USER_INLINE_MODE_TCP_UDP,
122};
123
124enum {
125 MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM = 1 << 0,
126 MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_REQ_METADATA = 1 << 1,
127 MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_SPI_STEERING = 1 << 2,
128 MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_FULL_OFFLOAD = 1 << 3,
129 MLX5_USER_ALLOC_UCONTEXT_FLOW_ACTION_FLAGS_ESP_AES_GCM_TX_IV_IS_ESN = 1 << 4,
130};
131
132struct mlx5_ib_alloc_ucontext_resp {
133 __u32 qp_tab_size;
134 __u32 bf_reg_size;
135 __u32 tot_bfregs;
136 __u32 cache_line_size;
137 __u16 max_sq_desc_sz;
138 __u16 max_rq_desc_sz;
139 __u32 max_send_wqebb;
140 __u32 max_recv_wr;
141 __u32 max_srq_recv_wr;
142 __u16 num_ports;
143 __u16 flow_action_flags;
144 __u32 comp_mask;
145 __u32 response_length;
146 __u8 cqe_version;
147 __u8 cmds_supp_uhw;
148 __u8 eth_min_inline;
149 __u8 clock_info_versions;
150 __aligned_u64 hca_core_clock_offset;
151 __u32 log_uar_size;
152 __u32 num_uars_per_page;
153 __u32 num_dyn_bfregs;
154 __u32 dump_fill_mkey;
155};
156
157struct mlx5_ib_alloc_pd_resp {
158 __u32 pdn;
159};
160
161struct mlx5_ib_tso_caps {
162 __u32 max_tso; /* Maximum tso payload size in bytes */
163
164 /* Corresponding bit will be set if qp type from
165 * 'enum ib_qp_type' is supported, e.g.
166 * supported_qpts |= 1 << IB_QPT_UD
167 */
168 __u32 supported_qpts;
169};
170
171struct mlx5_ib_rss_caps {
172 __aligned_u64 rx_hash_fields_mask; /* enum mlx5_rx_hash_fields */
173 __u8 rx_hash_function; /* enum mlx5_rx_hash_function_flags */
174 __u8 reserved[7];
175};
176
177enum mlx5_ib_cqe_comp_res_format {
178 MLX5_IB_CQE_RES_FORMAT_HASH = 1 << 0,
179 MLX5_IB_CQE_RES_FORMAT_CSUM = 1 << 1,
180 MLX5_IB_CQE_RES_FORMAT_CSUM_STRIDX = 1 << 2,
181};
182
183struct mlx5_ib_cqe_comp_caps {
184 __u32 max_num;
185 __u32 supported_format; /* enum mlx5_ib_cqe_comp_res_format */
186};
187
188enum mlx5_ib_packet_pacing_cap_flags {
189 MLX5_IB_PP_SUPPORT_BURST = 1 << 0,
190};
191
192struct mlx5_packet_pacing_caps {
193 __u32 qp_rate_limit_min;
194 __u32 qp_rate_limit_max; /* In kpbs */
195
196 /* Corresponding bit will be set if qp type from
197 * 'enum ib_qp_type' is supported, e.g.
198 * supported_qpts |= 1 << IB_QPT_RAW_PACKET
199 */
200 __u32 supported_qpts;
201 __u8 cap_flags; /* enum mlx5_ib_packet_pacing_cap_flags */
202 __u8 reserved[3];
203};
204
205enum mlx5_ib_mpw_caps {
206 MPW_RESERVED = 1 << 0,
207 MLX5_IB_ALLOW_MPW = 1 << 1,
208 MLX5_IB_SUPPORT_EMPW = 1 << 2,
209};
210
211enum mlx5_ib_sw_parsing_offloads {
212 MLX5_IB_SW_PARSING = 1 << 0,
213 MLX5_IB_SW_PARSING_CSUM = 1 << 1,
214 MLX5_IB_SW_PARSING_LSO = 1 << 2,
215};
216
217struct mlx5_ib_sw_parsing_caps {
218 __u32 sw_parsing_offloads; /* enum mlx5_ib_sw_parsing_offloads */
219
220 /* Corresponding bit will be set if qp type from
221 * 'enum ib_qp_type' is supported, e.g.
222 * supported_qpts |= 1 << IB_QPT_RAW_PACKET
223 */
224 __u32 supported_qpts;
225};
226
227struct mlx5_ib_striding_rq_caps {
228 __u32 min_single_stride_log_num_of_bytes;
229 __u32 max_single_stride_log_num_of_bytes;
230 __u32 min_single_wqe_log_num_of_strides;
231 __u32 max_single_wqe_log_num_of_strides;
232
233 /* Corresponding bit will be set if qp type from
234 * 'enum ib_qp_type' is supported, e.g.
235 * supported_qpts |= 1 << IB_QPT_RAW_PACKET
236 */
237 __u32 supported_qpts;
238 __u32 reserved;
239};
240
241enum mlx5_ib_query_dev_resp_flags {
242 /* Support 128B CQE compression */
243 MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_COMP = 1 << 0,
244 MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_PAD = 1 << 1,
245 MLX5_IB_QUERY_DEV_RESP_PACKET_BASED_CREDIT_MODE = 1 << 2,
246 MLX5_IB_QUERY_DEV_RESP_FLAGS_SCAT2CQE_DCT = 1 << 3,
247};
248
249enum mlx5_ib_tunnel_offloads {
250 MLX5_IB_TUNNELED_OFFLOADS_VXLAN = 1 << 0,
251 MLX5_IB_TUNNELED_OFFLOADS_GRE = 1 << 1,
252 MLX5_IB_TUNNELED_OFFLOADS_GENEVE = 1 << 2,
253 MLX5_IB_TUNNELED_OFFLOADS_MPLS_GRE = 1 << 3,
254 MLX5_IB_TUNNELED_OFFLOADS_MPLS_UDP = 1 << 4,
255};
256
257struct mlx5_ib_query_device_resp {
258 __u32 comp_mask;
259 __u32 response_length;
260 struct mlx5_ib_tso_caps tso_caps;
261 struct mlx5_ib_rss_caps rss_caps;
262 struct mlx5_ib_cqe_comp_caps cqe_comp_caps;
263 struct mlx5_packet_pacing_caps packet_pacing_caps;
264 __u32 mlx5_ib_support_multi_pkt_send_wqes;
265 __u32 flags; /* Use enum mlx5_ib_query_dev_resp_flags */
266 struct mlx5_ib_sw_parsing_caps sw_parsing_caps;
267 struct mlx5_ib_striding_rq_caps striding_rq_caps;
268 __u32 tunnel_offloads_caps; /* enum mlx5_ib_tunnel_offloads */
269 __u32 reserved;
270};
271
272enum mlx5_ib_create_cq_flags {
273 MLX5_IB_CREATE_CQ_FLAGS_CQE_128B_PAD = 1 << 0,
274 MLX5_IB_CREATE_CQ_FLAGS_UAR_PAGE_INDEX = 1 << 1,
275 MLX5_IB_CREATE_CQ_FLAGS_REAL_TIME_TS = 1 << 2,
276};
277
278struct mlx5_ib_create_cq {
279 __aligned_u64 buf_addr;
280 __aligned_u64 db_addr;
281 __u32 cqe_size;
282 __u8 cqe_comp_en;
283 __u8 cqe_comp_res_format;
284 __u16 flags;
285 __u16 uar_page_index;
286 __u16 reserved0;
287 __u32 reserved1;
288};
289
290struct mlx5_ib_create_cq_resp {
291 __u32 cqn;
292 __u32 reserved;
293};
294
295struct mlx5_ib_resize_cq {
296 __aligned_u64 buf_addr;
297 __u16 cqe_size;
298 __u16 reserved0;
299 __u32 reserved1;
300};
301
302struct mlx5_ib_create_srq {
303 __aligned_u64 buf_addr;
304 __aligned_u64 db_addr;
305 __u32 flags;
306 __u32 reserved0; /* explicit padding (optional on i386) */
307 __u32 uidx;
308 __u32 reserved1;
309};
310
311struct mlx5_ib_create_srq_resp {
312 __u32 srqn;
313 __u32 reserved;
314};
315
316struct mlx5_ib_create_qp {
317 __aligned_u64 buf_addr;
318 __aligned_u64 db_addr;
319 __u32 sq_wqe_count;
320 __u32 rq_wqe_count;
321 __u32 rq_wqe_shift;
322 __u32 flags;
323 __u32 uidx;
324 __u32 bfreg_index;
325 union {
326 __aligned_u64 sq_buf_addr;
327 __aligned_u64 access_key;
328 };
329 __u32 ece_options;
330 __u32 reserved;
331};
332
333/* RX Hash function flags */
334enum mlx5_rx_hash_function_flags {
335 MLX5_RX_HASH_FUNC_TOEPLITZ = 1 << 0,
336};
337
338/*
339 * RX Hash flags, these flags allows to set which incoming packet's field should
340 * participates in RX Hash. Each flag represent certain packet's field,
341 * when the flag is set the field that is represented by the flag will
342 * participate in RX Hash calculation.
343 * Note: *IPV4 and *IPV6 flags can't be enabled together on the same QP
344 * and *TCP and *UDP flags can't be enabled together on the same QP.
345*/
346enum mlx5_rx_hash_fields {
347 MLX5_RX_HASH_SRC_IPV4 = 1 << 0,
348 MLX5_RX_HASH_DST_IPV4 = 1 << 1,
349 MLX5_RX_HASH_SRC_IPV6 = 1 << 2,
350 MLX5_RX_HASH_DST_IPV6 = 1 << 3,
351 MLX5_RX_HASH_SRC_PORT_TCP = 1 << 4,
352 MLX5_RX_HASH_DST_PORT_TCP = 1 << 5,
353 MLX5_RX_HASH_SRC_PORT_UDP = 1 << 6,
354 MLX5_RX_HASH_DST_PORT_UDP = 1 << 7,
355 MLX5_RX_HASH_IPSEC_SPI = 1 << 8,
356 /* Save bits for future fields */
357 MLX5_RX_HASH_INNER = (1UL << 31),
358};
359
360struct mlx5_ib_create_qp_rss {
361 __aligned_u64 rx_hash_fields_mask; /* enum mlx5_rx_hash_fields */
362 __u8 rx_hash_function; /* enum mlx5_rx_hash_function_flags */
363 __u8 rx_key_len; /* valid only for Toeplitz */
364 __u8 reserved[6];
365 __u8 rx_hash_key[128]; /* valid only for Toeplitz */
366 __u32 comp_mask;
367 __u32 flags;
368};
369
370enum mlx5_ib_create_qp_resp_mask {
371 MLX5_IB_CREATE_QP_RESP_MASK_TIRN = 1UL << 0,
372 MLX5_IB_CREATE_QP_RESP_MASK_TISN = 1UL << 1,
373 MLX5_IB_CREATE_QP_RESP_MASK_RQN = 1UL << 2,
374 MLX5_IB_CREATE_QP_RESP_MASK_SQN = 1UL << 3,
375 MLX5_IB_CREATE_QP_RESP_MASK_TIR_ICM_ADDR = 1UL << 4,
376};
377
378struct mlx5_ib_create_qp_resp {
379 __u32 bfreg_index;
380 __u32 ece_options;
381 __u32 comp_mask;
382 __u32 tirn;
383 __u32 tisn;
384 __u32 rqn;
385 __u32 sqn;
386 __u32 reserved1;
387 __u64 tir_icm_addr;
388};
389
390struct mlx5_ib_alloc_mw {
391 __u32 comp_mask;
392 __u8 num_klms;
393 __u8 reserved1;
394 __u16 reserved2;
395};
396
397enum mlx5_ib_create_wq_mask {
398 MLX5_IB_CREATE_WQ_STRIDING_RQ = (1 << 0),
399};
400
401struct mlx5_ib_create_wq {
402 __aligned_u64 buf_addr;
403 __aligned_u64 db_addr;
404 __u32 rq_wqe_count;
405 __u32 rq_wqe_shift;
406 __u32 user_index;
407 __u32 flags;
408 __u32 comp_mask;
409 __u32 single_stride_log_num_of_bytes;
410 __u32 single_wqe_log_num_of_strides;
411 __u32 two_byte_shift_en;
412};
413
414struct mlx5_ib_create_ah_resp {
415 __u32 response_length;
416 __u8 dmac[ETH_ALEN];
417 __u8 reserved[6];
418};
419
420struct mlx5_ib_burst_info {
421 __u32 max_burst_sz;
422 __u16 typical_pkt_sz;
423 __u16 reserved;
424};
425
426struct mlx5_ib_modify_qp {
427 __u32 comp_mask;
428 struct mlx5_ib_burst_info burst_info;
429 __u32 ece_options;
430};
431
432struct mlx5_ib_modify_qp_resp {
433 __u32 response_length;
434 __u32 dctn;
435 __u32 ece_options;
436 __u32 reserved;
437};
438
439struct mlx5_ib_create_wq_resp {
440 __u32 response_length;
441 __u32 reserved;
442};
443
444struct mlx5_ib_create_rwq_ind_tbl_resp {
445 __u32 response_length;
446 __u32 reserved;
447};
448
449struct mlx5_ib_modify_wq {
450 __u32 comp_mask;
451 __u32 reserved;
452};
453
454struct mlx5_ib_clock_info {
455 __u32 sign;
456 __u32 resv;
457 __aligned_u64 nsec;
458 __aligned_u64 cycles;
459 __aligned_u64 frac;
460 __u32 mult;
461 __u32 shift;
462 __aligned_u64 mask;
463 __aligned_u64 overflow_period;
464};
465
466enum mlx5_ib_mmap_cmd {
467 MLX5_IB_MMAP_REGULAR_PAGE = 0,
468 MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES = 1,
469 MLX5_IB_MMAP_WC_PAGE = 2,
470 MLX5_IB_MMAP_NC_PAGE = 3,
471 /* 5 is chosen in order to be compatible with old versions of libmlx5 */
472 MLX5_IB_MMAP_CORE_CLOCK = 5,
473 MLX5_IB_MMAP_ALLOC_WC = 6,
474 MLX5_IB_MMAP_CLOCK_INFO = 7,
475 MLX5_IB_MMAP_DEVICE_MEM = 8,
476};
477
478enum {
479 MLX5_IB_CLOCK_INFO_KERNEL_UPDATING = 1,
480};
481
482/* Bit indexes for the mlx5_alloc_ucontext_resp.clock_info_versions bitmap */
483enum {
484 MLX5_IB_CLOCK_INFO_V1 = 0,
485};
486
487struct mlx5_ib_flow_counters_desc {
488 __u32 description;
489 __u32 index;
490};
491
492struct mlx5_ib_flow_counters_data {
493 RDMA_UAPI_PTR(struct mlx5_ib_flow_counters_desc *, counters_data);
494 __u32 ncounters;
495 __u32 reserved;
496};
497
498struct mlx5_ib_create_flow {
499 __u32 ncounters_data;
500 __u32 reserved;
501 /*
502 * Following are counters data based on ncounters_data, each
503 * entry in the data[] should match a corresponding counter object
504 * that was pointed by a counters spec upon the flow creation
505 */
506 struct mlx5_ib_flow_counters_data data[];
507};
508
509#endif /* MLX5_ABI_USER_H */