Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33#ifndef MLX5_DRIVER_H
34#define MLX5_DRIVER_H
35
36#include <linux/kernel.h>
37#include <linux/completion.h>
38#include <linux/pci.h>
39#include <linux/irq.h>
40#include <linux/spinlock_types.h>
41#include <linux/semaphore.h>
42#include <linux/slab.h>
43#include <linux/vmalloc.h>
44#include <linux/xarray.h>
45#include <linux/workqueue.h>
46#include <linux/mempool.h>
47#include <linux/interrupt.h>
48#include <linux/idr.h>
49#include <linux/notifier.h>
50#include <linux/refcount.h>
51#include <linux/auxiliary_bus.h>
52
53#include <linux/mlx5/device.h>
54#include <linux/mlx5/doorbell.h>
55#include <linux/mlx5/eq.h>
56#include <linux/timecounter.h>
57#include <linux/ptp_clock_kernel.h>
58#include <net/devlink.h>
59
60#define MLX5_ADEV_NAME "mlx5_core"
61
62enum {
63 MLX5_BOARD_ID_LEN = 64,
64};
65
66enum {
67 /* one minute for the sake of bringup. Generally, commands must always
68 * complete and we may need to increase this timeout value
69 */
70 MLX5_CMD_TIMEOUT_MSEC = 60 * 1000,
71 MLX5_CMD_WQ_MAX_NAME = 32,
72};
73
74enum {
75 CMD_OWNER_SW = 0x0,
76 CMD_OWNER_HW = 0x1,
77 CMD_STATUS_SUCCESS = 0,
78};
79
80enum mlx5_sqp_t {
81 MLX5_SQP_SMI = 0,
82 MLX5_SQP_GSI = 1,
83 MLX5_SQP_IEEE_1588 = 2,
84 MLX5_SQP_SNIFFER = 3,
85 MLX5_SQP_SYNC_UMR = 4,
86};
87
88enum {
89 MLX5_MAX_PORTS = 2,
90};
91
92enum {
93 MLX5_ATOMIC_MODE_OFFSET = 16,
94 MLX5_ATOMIC_MODE_IB_COMP = 1,
95 MLX5_ATOMIC_MODE_CX = 2,
96 MLX5_ATOMIC_MODE_8B = 3,
97 MLX5_ATOMIC_MODE_16B = 4,
98 MLX5_ATOMIC_MODE_32B = 5,
99 MLX5_ATOMIC_MODE_64B = 6,
100 MLX5_ATOMIC_MODE_128B = 7,
101 MLX5_ATOMIC_MODE_256B = 8,
102};
103
104enum {
105 MLX5_REG_QPTS = 0x4002,
106 MLX5_REG_QETCR = 0x4005,
107 MLX5_REG_QTCT = 0x400a,
108 MLX5_REG_QPDPM = 0x4013,
109 MLX5_REG_QCAM = 0x4019,
110 MLX5_REG_DCBX_PARAM = 0x4020,
111 MLX5_REG_DCBX_APP = 0x4021,
112 MLX5_REG_FPGA_CAP = 0x4022,
113 MLX5_REG_FPGA_CTRL = 0x4023,
114 MLX5_REG_FPGA_ACCESS_REG = 0x4024,
115 MLX5_REG_CORE_DUMP = 0x402e,
116 MLX5_REG_PCAP = 0x5001,
117 MLX5_REG_PMTU = 0x5003,
118 MLX5_REG_PTYS = 0x5004,
119 MLX5_REG_PAOS = 0x5006,
120 MLX5_REG_PFCC = 0x5007,
121 MLX5_REG_PPCNT = 0x5008,
122 MLX5_REG_PPTB = 0x500b,
123 MLX5_REG_PBMC = 0x500c,
124 MLX5_REG_PMAOS = 0x5012,
125 MLX5_REG_PUDE = 0x5009,
126 MLX5_REG_PMPE = 0x5010,
127 MLX5_REG_PELC = 0x500e,
128 MLX5_REG_PVLC = 0x500f,
129 MLX5_REG_PCMR = 0x5041,
130 MLX5_REG_PDDR = 0x5031,
131 MLX5_REG_PMLP = 0x5002,
132 MLX5_REG_PPLM = 0x5023,
133 MLX5_REG_PCAM = 0x507f,
134 MLX5_REG_NODE_DESC = 0x6001,
135 MLX5_REG_HOST_ENDIANNESS = 0x7004,
136 MLX5_REG_MCIA = 0x9014,
137 MLX5_REG_MFRL = 0x9028,
138 MLX5_REG_MLCR = 0x902b,
139 MLX5_REG_MTRC_CAP = 0x9040,
140 MLX5_REG_MTRC_CONF = 0x9041,
141 MLX5_REG_MTRC_STDB = 0x9042,
142 MLX5_REG_MTRC_CTRL = 0x9043,
143 MLX5_REG_MPEIN = 0x9050,
144 MLX5_REG_MPCNT = 0x9051,
145 MLX5_REG_MTPPS = 0x9053,
146 MLX5_REG_MTPPSE = 0x9054,
147 MLX5_REG_MTUTC = 0x9055,
148 MLX5_REG_MPEGC = 0x9056,
149 MLX5_REG_MCQS = 0x9060,
150 MLX5_REG_MCQI = 0x9061,
151 MLX5_REG_MCC = 0x9062,
152 MLX5_REG_MCDA = 0x9063,
153 MLX5_REG_MCAM = 0x907f,
154 MLX5_REG_MIRC = 0x9162,
155 MLX5_REG_SBCAM = 0xB01F,
156 MLX5_REG_RESOURCE_DUMP = 0xC000,
157};
158
159enum mlx5_qpts_trust_state {
160 MLX5_QPTS_TRUST_PCP = 1,
161 MLX5_QPTS_TRUST_DSCP = 2,
162};
163
164enum mlx5_dcbx_oper_mode {
165 MLX5E_DCBX_PARAM_VER_OPER_HOST = 0x0,
166 MLX5E_DCBX_PARAM_VER_OPER_AUTO = 0x3,
167};
168
169enum {
170 MLX5_ATOMIC_OPS_CMP_SWAP = 1 << 0,
171 MLX5_ATOMIC_OPS_FETCH_ADD = 1 << 1,
172 MLX5_ATOMIC_OPS_EXTENDED_CMP_SWAP = 1 << 2,
173 MLX5_ATOMIC_OPS_EXTENDED_FETCH_ADD = 1 << 3,
174};
175
176enum mlx5_page_fault_resume_flags {
177 MLX5_PAGE_FAULT_RESUME_REQUESTOR = 1 << 0,
178 MLX5_PAGE_FAULT_RESUME_WRITE = 1 << 1,
179 MLX5_PAGE_FAULT_RESUME_RDMA = 1 << 2,
180 MLX5_PAGE_FAULT_RESUME_ERROR = 1 << 7,
181};
182
183enum dbg_rsc_type {
184 MLX5_DBG_RSC_QP,
185 MLX5_DBG_RSC_EQ,
186 MLX5_DBG_RSC_CQ,
187};
188
189enum port_state_policy {
190 MLX5_POLICY_DOWN = 0,
191 MLX5_POLICY_UP = 1,
192 MLX5_POLICY_FOLLOW = 2,
193 MLX5_POLICY_INVALID = 0xffffffff
194};
195
196enum mlx5_coredev_type {
197 MLX5_COREDEV_PF,
198 MLX5_COREDEV_VF,
199 MLX5_COREDEV_SF,
200};
201
202struct mlx5_field_desc {
203 int i;
204};
205
206struct mlx5_rsc_debug {
207 struct mlx5_core_dev *dev;
208 void *object;
209 enum dbg_rsc_type type;
210 struct dentry *root;
211 struct mlx5_field_desc fields[];
212};
213
214enum mlx5_dev_event {
215 MLX5_DEV_EVENT_SYS_ERROR = 128, /* 0 - 127 are FW events */
216 MLX5_DEV_EVENT_PORT_AFFINITY = 129,
217};
218
219enum mlx5_port_status {
220 MLX5_PORT_UP = 1,
221 MLX5_PORT_DOWN = 2,
222};
223
224enum mlx5_cmdif_state {
225 MLX5_CMDIF_STATE_UNINITIALIZED,
226 MLX5_CMDIF_STATE_UP,
227 MLX5_CMDIF_STATE_DOWN,
228};
229
230struct mlx5_cmd_first {
231 __be32 data[4];
232};
233
234struct mlx5_cmd_msg {
235 struct list_head list;
236 struct cmd_msg_cache *parent;
237 u32 len;
238 struct mlx5_cmd_first first;
239 struct mlx5_cmd_mailbox *next;
240};
241
242struct mlx5_cmd_debug {
243 struct dentry *dbg_root;
244 void *in_msg;
245 void *out_msg;
246 u8 status;
247 u16 inlen;
248 u16 outlen;
249};
250
251struct cmd_msg_cache {
252 /* protect block chain allocations
253 */
254 spinlock_t lock;
255 struct list_head head;
256 unsigned int max_inbox_size;
257 unsigned int num_ent;
258};
259
260enum {
261 MLX5_NUM_COMMAND_CACHES = 5,
262};
263
264struct mlx5_cmd_stats {
265 u64 sum;
266 u64 n;
267 struct dentry *root;
268 /* protect command average calculations */
269 spinlock_t lock;
270};
271
272struct mlx5_cmd {
273 struct mlx5_nb nb;
274
275 enum mlx5_cmdif_state state;
276 void *cmd_alloc_buf;
277 dma_addr_t alloc_dma;
278 int alloc_size;
279 void *cmd_buf;
280 dma_addr_t dma;
281 u16 cmdif_rev;
282 u8 log_sz;
283 u8 log_stride;
284 int max_reg_cmds;
285 int events;
286 u32 __iomem *vector;
287
288 /* protect command queue allocations
289 */
290 spinlock_t alloc_lock;
291
292 /* protect token allocations
293 */
294 spinlock_t token_lock;
295 u8 token;
296 unsigned long bitmask;
297 char wq_name[MLX5_CMD_WQ_MAX_NAME];
298 struct workqueue_struct *wq;
299 struct semaphore sem;
300 struct semaphore pages_sem;
301 int mode;
302 u16 allowed_opcode;
303 struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS];
304 struct dma_pool *pool;
305 struct mlx5_cmd_debug dbg;
306 struct cmd_msg_cache cache[MLX5_NUM_COMMAND_CACHES];
307 int checksum_disabled;
308 struct mlx5_cmd_stats *stats;
309};
310
311struct mlx5_cmd_mailbox {
312 void *buf;
313 dma_addr_t dma;
314 struct mlx5_cmd_mailbox *next;
315};
316
317struct mlx5_buf_list {
318 void *buf;
319 dma_addr_t map;
320};
321
322struct mlx5_frag_buf {
323 struct mlx5_buf_list *frags;
324 int npages;
325 int size;
326 u8 page_shift;
327};
328
329struct mlx5_frag_buf_ctrl {
330 struct mlx5_buf_list *frags;
331 u32 sz_m1;
332 u16 frag_sz_m1;
333 u16 strides_offset;
334 u8 log_sz;
335 u8 log_stride;
336 u8 log_frag_strides;
337};
338
339struct mlx5_core_psv {
340 u32 psv_idx;
341 struct psv_layout {
342 u32 pd;
343 u16 syndrome;
344 u16 reserved;
345 u16 bg;
346 u16 app_tag;
347 u32 ref_tag;
348 } psv;
349};
350
351struct mlx5_core_sig_ctx {
352 struct mlx5_core_psv psv_memory;
353 struct mlx5_core_psv psv_wire;
354 struct ib_sig_err err_item;
355 bool sig_status_checked;
356 bool sig_err_exists;
357 u32 sigerr_count;
358};
359
360enum {
361 MLX5_MKEY_MR = 1,
362 MLX5_MKEY_MW,
363 MLX5_MKEY_INDIRECT_DEVX,
364};
365
366struct mlx5_core_mkey {
367 u64 iova;
368 u64 size;
369 u32 key;
370 u32 pd;
371 u32 type;
372 struct wait_queue_head wait;
373 refcount_t usecount;
374};
375
376#define MLX5_24BIT_MASK ((1 << 24) - 1)
377
378enum mlx5_res_type {
379 MLX5_RES_QP = MLX5_EVENT_QUEUE_TYPE_QP,
380 MLX5_RES_RQ = MLX5_EVENT_QUEUE_TYPE_RQ,
381 MLX5_RES_SQ = MLX5_EVENT_QUEUE_TYPE_SQ,
382 MLX5_RES_SRQ = 3,
383 MLX5_RES_XSRQ = 4,
384 MLX5_RES_XRQ = 5,
385 MLX5_RES_DCT = MLX5_EVENT_QUEUE_TYPE_DCT,
386};
387
388struct mlx5_core_rsc_common {
389 enum mlx5_res_type res;
390 refcount_t refcount;
391 struct completion free;
392};
393
394struct mlx5_uars_page {
395 void __iomem *map;
396 bool wc;
397 u32 index;
398 struct list_head list;
399 unsigned int bfregs;
400 unsigned long *reg_bitmap; /* for non fast path bf regs */
401 unsigned long *fp_bitmap;
402 unsigned int reg_avail;
403 unsigned int fp_avail;
404 struct kref ref_count;
405 struct mlx5_core_dev *mdev;
406};
407
408struct mlx5_bfreg_head {
409 /* protect blue flame registers allocations */
410 struct mutex lock;
411 struct list_head list;
412};
413
414struct mlx5_bfreg_data {
415 struct mlx5_bfreg_head reg_head;
416 struct mlx5_bfreg_head wc_head;
417};
418
419struct mlx5_sq_bfreg {
420 void __iomem *map;
421 struct mlx5_uars_page *up;
422 bool wc;
423 u32 index;
424 unsigned int offset;
425};
426
427struct mlx5_core_health {
428 struct health_buffer __iomem *health;
429 __be32 __iomem *health_counter;
430 struct timer_list timer;
431 u32 prev;
432 int miss_counter;
433 u8 synd;
434 u32 fatal_error;
435 u32 crdump_size;
436 /* wq spinlock to synchronize draining */
437 spinlock_t wq_lock;
438 struct workqueue_struct *wq;
439 unsigned long flags;
440 struct work_struct fatal_report_work;
441 struct work_struct report_work;
442 struct devlink_health_reporter *fw_reporter;
443 struct devlink_health_reporter *fw_fatal_reporter;
444};
445
446struct mlx5_qp_table {
447 struct notifier_block nb;
448
449 /* protect radix tree
450 */
451 spinlock_t lock;
452 struct radix_tree_root tree;
453};
454
455struct mlx5_vf_context {
456 int enabled;
457 u64 port_guid;
458 u64 node_guid;
459 /* Valid bits are used to validate administrative guid only.
460 * Enabled after ndo_set_vf_guid
461 */
462 u8 port_guid_valid:1;
463 u8 node_guid_valid:1;
464 enum port_state_policy policy;
465};
466
467struct mlx5_core_sriov {
468 struct mlx5_vf_context *vfs_ctx;
469 int num_vfs;
470 u16 max_vfs;
471};
472
473struct mlx5_fc_pool {
474 struct mlx5_core_dev *dev;
475 struct mutex pool_lock; /* protects pool lists */
476 struct list_head fully_used;
477 struct list_head partially_used;
478 struct list_head unused;
479 int available_fcs;
480 int used_fcs;
481 int threshold;
482};
483
484struct mlx5_fc_stats {
485 spinlock_t counters_idr_lock; /* protects counters_idr */
486 struct idr counters_idr;
487 struct list_head counters;
488 struct llist_head addlist;
489 struct llist_head dellist;
490
491 struct workqueue_struct *wq;
492 struct delayed_work work;
493 unsigned long next_query;
494 unsigned long sampling_interval; /* jiffies */
495 u32 *bulk_query_out;
496 struct mlx5_fc_pool fc_pool;
497};
498
499struct mlx5_events;
500struct mlx5_mpfs;
501struct mlx5_eswitch;
502struct mlx5_lag;
503struct mlx5_devcom;
504struct mlx5_fw_reset;
505struct mlx5_eq_table;
506struct mlx5_irq_table;
507struct mlx5_vhca_state_notifier;
508struct mlx5_sf_dev_table;
509struct mlx5_sf_hw_table;
510struct mlx5_sf_table;
511
512struct mlx5_rate_limit {
513 u32 rate;
514 u32 max_burst_sz;
515 u16 typical_pkt_sz;
516};
517
518struct mlx5_rl_entry {
519 u8 rl_raw[MLX5_ST_SZ_BYTES(set_pp_rate_limit_context)];
520 u64 refcount;
521 u16 index;
522 u16 uid;
523 u8 dedicated : 1;
524};
525
526struct mlx5_rl_table {
527 /* protect rate limit table */
528 struct mutex rl_lock;
529 u16 max_size;
530 u32 max_rate;
531 u32 min_rate;
532 struct mlx5_rl_entry *rl_entry;
533 u64 refcount;
534};
535
536struct mlx5_core_roce {
537 struct mlx5_flow_table *ft;
538 struct mlx5_flow_group *fg;
539 struct mlx5_flow_handle *allow_rule;
540};
541
542enum {
543 MLX5_PRIV_FLAGS_DISABLE_IB_ADEV = 1 << 0,
544 MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV = 1 << 1,
545};
546
547struct mlx5_adev {
548 struct auxiliary_device adev;
549 struct mlx5_core_dev *mdev;
550 int idx;
551};
552
553struct mlx5_priv {
554 /* IRQ table valid only for real pci devices PF or VF */
555 struct mlx5_irq_table *irq_table;
556 struct mlx5_eq_table *eq_table;
557
558 /* pages stuff */
559 struct mlx5_nb pg_nb;
560 struct workqueue_struct *pg_wq;
561 struct xarray page_root_xa;
562 int fw_pages;
563 atomic_t reg_pages;
564 struct list_head free_list;
565 int vfs_pages;
566 int host_pf_pages;
567
568 struct mlx5_core_health health;
569 struct list_head traps;
570
571 /* start: qp staff */
572 struct dentry *qp_debugfs;
573 struct dentry *eq_debugfs;
574 struct dentry *cq_debugfs;
575 struct dentry *cmdif_debugfs;
576 /* end: qp staff */
577
578 /* start: alloc staff */
579 /* protect buffer alocation according to numa node */
580 struct mutex alloc_mutex;
581 int numa_node;
582
583 struct mutex pgdir_mutex;
584 struct list_head pgdir_list;
585 /* end: alloc staff */
586 struct dentry *dbg_root;
587
588 struct list_head ctx_list;
589 spinlock_t ctx_lock;
590 struct mlx5_adev **adev;
591 int adev_idx;
592 struct mlx5_events *events;
593
594 struct mlx5_flow_steering *steering;
595 struct mlx5_mpfs *mpfs;
596 struct mlx5_eswitch *eswitch;
597 struct mlx5_core_sriov sriov;
598 struct mlx5_lag *lag;
599 u32 flags;
600 struct mlx5_devcom *devcom;
601 struct mlx5_fw_reset *fw_reset;
602 struct mlx5_core_roce roce;
603 struct mlx5_fc_stats fc_stats;
604 struct mlx5_rl_table rl_table;
605
606 struct mlx5_bfreg_data bfregs;
607 struct mlx5_uars_page *uar;
608#ifdef CONFIG_MLX5_SF
609 struct mlx5_vhca_state_notifier *vhca_state_notifier;
610 struct mlx5_sf_dev_table *sf_dev_table;
611 struct mlx5_core_dev *parent_mdev;
612#endif
613#ifdef CONFIG_MLX5_SF_MANAGER
614 struct mlx5_sf_hw_table *sf_hw_table;
615 struct mlx5_sf_table *sf_table;
616#endif
617};
618
619enum mlx5_device_state {
620 MLX5_DEVICE_STATE_UNINITIALIZED,
621 MLX5_DEVICE_STATE_UP,
622 MLX5_DEVICE_STATE_INTERNAL_ERROR,
623};
624
625enum mlx5_interface_state {
626 MLX5_INTERFACE_STATE_UP = BIT(0),
627};
628
629enum mlx5_pci_status {
630 MLX5_PCI_STATUS_DISABLED,
631 MLX5_PCI_STATUS_ENABLED,
632};
633
634enum mlx5_pagefault_type_flags {
635 MLX5_PFAULT_REQUESTOR = 1 << 0,
636 MLX5_PFAULT_WRITE = 1 << 1,
637 MLX5_PFAULT_RDMA = 1 << 2,
638};
639
640struct mlx5_td {
641 /* protects tirs list changes while tirs refresh */
642 struct mutex list_lock;
643 struct list_head tirs_list;
644 u32 tdn;
645};
646
647struct mlx5e_resources {
648 struct mlx5e_hw_objs {
649 u32 pdn;
650 struct mlx5_td td;
651 struct mlx5_core_mkey mkey;
652 struct mlx5_sq_bfreg bfreg;
653 } hw_objs;
654 struct devlink_port dl_port;
655 struct net_device *uplink_netdev;
656};
657
658enum mlx5_sw_icm_type {
659 MLX5_SW_ICM_TYPE_STEERING,
660 MLX5_SW_ICM_TYPE_HEADER_MODIFY,
661};
662
663#define MLX5_MAX_RESERVED_GIDS 8
664
665struct mlx5_rsvd_gids {
666 unsigned int start;
667 unsigned int count;
668 struct ida ida;
669};
670
671#define MAX_PIN_NUM 8
672struct mlx5_pps {
673 u8 pin_caps[MAX_PIN_NUM];
674 struct work_struct out_work;
675 u64 start[MAX_PIN_NUM];
676 u8 enabled;
677};
678
679struct mlx5_timer {
680 struct cyclecounter cycles;
681 struct timecounter tc;
682 u32 nominal_c_mult;
683 unsigned long overflow_period;
684 struct delayed_work overflow_work;
685};
686
687struct mlx5_clock {
688 struct mlx5_nb pps_nb;
689 seqlock_t lock;
690 struct hwtstamp_config hwtstamp_config;
691 struct ptp_clock *ptp;
692 struct ptp_clock_info ptp_info;
693 struct mlx5_pps pps_info;
694 struct mlx5_timer timer;
695};
696
697struct mlx5_dm;
698struct mlx5_fw_tracer;
699struct mlx5_vxlan;
700struct mlx5_geneve;
701struct mlx5_hv_vhca;
702
703#define MLX5_LOG_SW_ICM_BLOCK_SIZE(dev) (MLX5_CAP_DEV_MEM(dev, log_sw_icm_alloc_granularity))
704#define MLX5_SW_ICM_BLOCK_SIZE(dev) (1 << MLX5_LOG_SW_ICM_BLOCK_SIZE(dev))
705
706enum {
707 MLX5_PROF_MASK_QP_SIZE = (u64)1 << 0,
708 MLX5_PROF_MASK_MR_CACHE = (u64)1 << 1,
709};
710
711enum {
712 MR_CACHE_LAST_STD_ENTRY = 20,
713 MLX5_IMR_MTT_CACHE_ENTRY,
714 MLX5_IMR_KSM_CACHE_ENTRY,
715 MAX_MR_CACHE_ENTRIES
716};
717
718struct mlx5_profile {
719 u64 mask;
720 u8 log_max_qp;
721 struct {
722 int size;
723 int limit;
724 } mr_cache[MAX_MR_CACHE_ENTRIES];
725};
726
727struct mlx5_core_dev {
728 struct device *device;
729 enum mlx5_coredev_type coredev_type;
730 struct pci_dev *pdev;
731 /* sync pci state */
732 struct mutex pci_status_mutex;
733 enum mlx5_pci_status pci_status;
734 u8 rev_id;
735 char board_id[MLX5_BOARD_ID_LEN];
736 struct mlx5_cmd cmd;
737 struct {
738 u32 hca_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
739 u32 hca_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
740 u32 pcam[MLX5_ST_SZ_DW(pcam_reg)];
741 u32 mcam[MLX5_MCAM_REGS_NUM][MLX5_ST_SZ_DW(mcam_reg)];
742 u32 fpga[MLX5_ST_SZ_DW(fpga_cap)];
743 u32 qcam[MLX5_ST_SZ_DW(qcam_reg)];
744 u8 embedded_cpu;
745 } caps;
746 u64 sys_image_guid;
747 phys_addr_t iseg_base;
748 struct mlx5_init_seg __iomem *iseg;
749 phys_addr_t bar_addr;
750 enum mlx5_device_state state;
751 /* sync interface state */
752 struct mutex intf_state_mutex;
753 unsigned long intf_state;
754 struct mlx5_priv priv;
755 struct mlx5_profile profile;
756 u32 issi;
757 struct mlx5e_resources mlx5e_res;
758 struct mlx5_dm *dm;
759 struct mlx5_vxlan *vxlan;
760 struct mlx5_geneve *geneve;
761 struct {
762 struct mlx5_rsvd_gids reserved_gids;
763 u32 roce_en;
764 } roce;
765#ifdef CONFIG_MLX5_FPGA
766 struct mlx5_fpga_device *fpga;
767#endif
768#ifdef CONFIG_MLX5_ACCEL
769 const struct mlx5_accel_ipsec_ops *ipsec_ops;
770#endif
771 struct mlx5_clock clock;
772 struct mlx5_ib_clock_info *clock_info;
773 struct mlx5_fw_tracer *tracer;
774 struct mlx5_rsc_dump *rsc_dump;
775 u32 vsc_addr;
776 struct mlx5_hv_vhca *hv_vhca;
777};
778
779struct mlx5_db {
780 __be32 *db;
781 union {
782 struct mlx5_db_pgdir *pgdir;
783 struct mlx5_ib_user_db_page *user_page;
784 } u;
785 dma_addr_t dma;
786 int index;
787};
788
789enum {
790 MLX5_COMP_EQ_SIZE = 1024,
791};
792
793enum {
794 MLX5_PTYS_IB = 1 << 0,
795 MLX5_PTYS_EN = 1 << 2,
796};
797
798typedef void (*mlx5_cmd_cbk_t)(int status, void *context);
799
800enum {
801 MLX5_CMD_ENT_STATE_PENDING_COMP,
802};
803
804struct mlx5_cmd_work_ent {
805 unsigned long state;
806 struct mlx5_cmd_msg *in;
807 struct mlx5_cmd_msg *out;
808 void *uout;
809 int uout_size;
810 mlx5_cmd_cbk_t callback;
811 struct delayed_work cb_timeout_work;
812 void *context;
813 int idx;
814 struct completion handling;
815 struct completion done;
816 struct mlx5_cmd *cmd;
817 struct work_struct work;
818 struct mlx5_cmd_layout *lay;
819 int ret;
820 int page_queue;
821 u8 status;
822 u8 token;
823 u64 ts1;
824 u64 ts2;
825 u16 op;
826 bool polling;
827 /* Track the max comp handlers */
828 refcount_t refcnt;
829};
830
831struct mlx5_pas {
832 u64 pa;
833 u8 log_sz;
834};
835
836enum phy_port_state {
837 MLX5_AAA_111
838};
839
840struct mlx5_hca_vport_context {
841 u32 field_select;
842 bool sm_virt_aware;
843 bool has_smi;
844 bool has_raw;
845 enum port_state_policy policy;
846 enum phy_port_state phys_state;
847 enum ib_port_state vport_state;
848 u8 port_physical_state;
849 u64 sys_image_guid;
850 u64 port_guid;
851 u64 node_guid;
852 u32 cap_mask1;
853 u32 cap_mask1_perm;
854 u16 cap_mask2;
855 u16 cap_mask2_perm;
856 u16 lid;
857 u8 init_type_reply; /* bitmask: see ib spec 14.2.5.6 InitTypeReply */
858 u8 lmc;
859 u8 subnet_timeout;
860 u16 sm_lid;
861 u8 sm_sl;
862 u16 qkey_violation_counter;
863 u16 pkey_violation_counter;
864 bool grh_required;
865};
866
867static inline void *mlx5_buf_offset(struct mlx5_frag_buf *buf, int offset)
868{
869 return buf->frags->buf + offset;
870}
871
872#define STRUCT_FIELD(header, field) \
873 .struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \
874 .struct_size_bytes = sizeof((struct ib_unpacked_ ## header *)0)->field
875
876static inline struct mlx5_core_dev *pci2mlx5_core_dev(struct pci_dev *pdev)
877{
878 return pci_get_drvdata(pdev);
879}
880
881extern struct dentry *mlx5_debugfs_root;
882
883static inline u16 fw_rev_maj(struct mlx5_core_dev *dev)
884{
885 return ioread32be(&dev->iseg->fw_rev) & 0xffff;
886}
887
888static inline u16 fw_rev_min(struct mlx5_core_dev *dev)
889{
890 return ioread32be(&dev->iseg->fw_rev) >> 16;
891}
892
893static inline u16 fw_rev_sub(struct mlx5_core_dev *dev)
894{
895 return ioread32be(&dev->iseg->cmdif_rev_fw_sub) & 0xffff;
896}
897
898static inline u32 mlx5_base_mkey(const u32 key)
899{
900 return key & 0xffffff00u;
901}
902
903static inline u32 wq_get_byte_sz(u8 log_sz, u8 log_stride)
904{
905 return ((u32)1 << log_sz) << log_stride;
906}
907
908static inline void mlx5_init_fbc_offset(struct mlx5_buf_list *frags,
909 u8 log_stride, u8 log_sz,
910 u16 strides_offset,
911 struct mlx5_frag_buf_ctrl *fbc)
912{
913 fbc->frags = frags;
914 fbc->log_stride = log_stride;
915 fbc->log_sz = log_sz;
916 fbc->sz_m1 = (1 << fbc->log_sz) - 1;
917 fbc->log_frag_strides = PAGE_SHIFT - fbc->log_stride;
918 fbc->frag_sz_m1 = (1 << fbc->log_frag_strides) - 1;
919 fbc->strides_offset = strides_offset;
920}
921
922static inline void mlx5_init_fbc(struct mlx5_buf_list *frags,
923 u8 log_stride, u8 log_sz,
924 struct mlx5_frag_buf_ctrl *fbc)
925{
926 mlx5_init_fbc_offset(frags, log_stride, log_sz, 0, fbc);
927}
928
929static inline void *mlx5_frag_buf_get_wqe(struct mlx5_frag_buf_ctrl *fbc,
930 u32 ix)
931{
932 unsigned int frag;
933
934 ix += fbc->strides_offset;
935 frag = ix >> fbc->log_frag_strides;
936
937 return fbc->frags[frag].buf + ((fbc->frag_sz_m1 & ix) << fbc->log_stride);
938}
939
940static inline u32
941mlx5_frag_buf_get_idx_last_contig_stride(struct mlx5_frag_buf_ctrl *fbc, u32 ix)
942{
943 u32 last_frag_stride_idx = (ix + fbc->strides_offset) | fbc->frag_sz_m1;
944
945 return min_t(u32, last_frag_stride_idx - fbc->strides_offset, fbc->sz_m1);
946}
947
948enum {
949 CMD_ALLOWED_OPCODE_ALL,
950};
951
952void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
953void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
954void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode);
955
956struct mlx5_async_ctx {
957 struct mlx5_core_dev *dev;
958 atomic_t num_inflight;
959 struct wait_queue_head wait;
960};
961
962struct mlx5_async_work;
963
964typedef void (*mlx5_async_cbk_t)(int status, struct mlx5_async_work *context);
965
966struct mlx5_async_work {
967 struct mlx5_async_ctx *ctx;
968 mlx5_async_cbk_t user_callback;
969};
970
971void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev,
972 struct mlx5_async_ctx *ctx);
973void mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx *ctx);
974int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size,
975 void *out, int out_size, mlx5_async_cbk_t callback,
976 struct mlx5_async_work *work);
977
978int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
979 int out_size);
980
981#define mlx5_cmd_exec_inout(dev, ifc_cmd, in, out) \
982 ({ \
983 mlx5_cmd_exec(dev, in, MLX5_ST_SZ_BYTES(ifc_cmd##_in), out, \
984 MLX5_ST_SZ_BYTES(ifc_cmd##_out)); \
985 })
986
987#define mlx5_cmd_exec_in(dev, ifc_cmd, in) \
988 ({ \
989 u32 _out[MLX5_ST_SZ_DW(ifc_cmd##_out)] = {}; \
990 mlx5_cmd_exec_inout(dev, ifc_cmd, in, _out); \
991 })
992
993int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size,
994 void *out, int out_size);
995void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome);
996bool mlx5_cmd_is_down(struct mlx5_core_dev *dev);
997
998int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type);
999int mlx5_cmd_alloc_uar(struct mlx5_core_dev *dev, u32 *uarn);
1000int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
1001void mlx5_health_flush(struct mlx5_core_dev *dev);
1002void mlx5_health_cleanup(struct mlx5_core_dev *dev);
1003int mlx5_health_init(struct mlx5_core_dev *dev);
1004void mlx5_start_health_poll(struct mlx5_core_dev *dev);
1005void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health);
1006void mlx5_drain_health_wq(struct mlx5_core_dev *dev);
1007void mlx5_trigger_health_work(struct mlx5_core_dev *dev);
1008int mlx5_buf_alloc(struct mlx5_core_dev *dev,
1009 int size, struct mlx5_frag_buf *buf);
1010void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf);
1011int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
1012 struct mlx5_frag_buf *buf, int node);
1013void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf);
1014struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
1015 gfp_t flags, int npages);
1016void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev,
1017 struct mlx5_cmd_mailbox *head);
1018int mlx5_core_create_mkey(struct mlx5_core_dev *dev,
1019 struct mlx5_core_mkey *mkey,
1020 u32 *in, int inlen);
1021int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev,
1022 struct mlx5_core_mkey *mkey);
1023int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey,
1024 u32 *out, int outlen);
1025int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn);
1026int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn);
1027int mlx5_pagealloc_init(struct mlx5_core_dev *dev);
1028void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
1029void mlx5_pagealloc_start(struct mlx5_core_dev *dev);
1030void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
1031void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
1032 s32 npages, bool ec_function);
1033int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot);
1034int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev);
1035void mlx5_register_debugfs(void);
1036void mlx5_unregister_debugfs(void);
1037
1038void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas);
1039void mlx5_fill_page_frag_array_perm(struct mlx5_frag_buf *buf, __be64 *pas, u8 perm);
1040void mlx5_fill_page_frag_array(struct mlx5_frag_buf *frag_buf, __be64 *pas);
1041int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
1042 unsigned int *irqn);
1043int mlx5_core_attach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
1044int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn);
1045
1046void mlx5_qp_debugfs_init(struct mlx5_core_dev *dev);
1047void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev);
1048int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
1049 int size_in, void *data_out, int size_out,
1050 u16 reg_num, int arg, int write);
1051
1052int mlx5_db_alloc(struct mlx5_core_dev *dev, struct mlx5_db *db);
1053int mlx5_db_alloc_node(struct mlx5_core_dev *dev, struct mlx5_db *db,
1054 int node);
1055void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db);
1056
1057const char *mlx5_command_str(int command);
1058void mlx5_cmdif_debugfs_init(struct mlx5_core_dev *dev);
1059void mlx5_cmdif_debugfs_cleanup(struct mlx5_core_dev *dev);
1060int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn,
1061 int npsvs, u32 *sig_index);
1062int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num);
1063void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common);
1064int mlx5_query_odp_caps(struct mlx5_core_dev *dev,
1065 struct mlx5_odp_caps *odp_caps);
1066int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev,
1067 u8 port_num, void *out, size_t sz);
1068
1069int mlx5_init_rl_table(struct mlx5_core_dev *dev);
1070void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev);
1071int mlx5_rl_add_rate(struct mlx5_core_dev *dev, u16 *index,
1072 struct mlx5_rate_limit *rl);
1073void mlx5_rl_remove_rate(struct mlx5_core_dev *dev, struct mlx5_rate_limit *rl);
1074bool mlx5_rl_is_in_range(struct mlx5_core_dev *dev, u32 rate);
1075int mlx5_rl_add_rate_raw(struct mlx5_core_dev *dev, void *rl_in, u16 uid,
1076 bool dedicated_entry, u16 *index);
1077void mlx5_rl_remove_rate_raw(struct mlx5_core_dev *dev, u16 index);
1078bool mlx5_rl_are_equal(struct mlx5_rate_limit *rl_0,
1079 struct mlx5_rate_limit *rl_1);
1080int mlx5_alloc_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg,
1081 bool map_wc, bool fast_path);
1082void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg);
1083
1084unsigned int mlx5_comp_vectors_count(struct mlx5_core_dev *dev);
1085struct cpumask *
1086mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector);
1087unsigned int mlx5_core_reserved_gids_count(struct mlx5_core_dev *dev);
1088int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index,
1089 u8 roce_version, u8 roce_l3_type, const u8 *gid,
1090 const u8 *mac, bool vlan, u16 vlan_id, u8 port_num);
1091
1092static inline u32 mlx5_mkey_to_idx(u32 mkey)
1093{
1094 return mkey >> 8;
1095}
1096
1097static inline u32 mlx5_idx_to_mkey(u32 mkey_idx)
1098{
1099 return mkey_idx << 8;
1100}
1101
1102static inline u8 mlx5_mkey_variant(u32 mkey)
1103{
1104 return mkey & 0xff;
1105}
1106
1107/* Async-atomic event notifier used by mlx5 core to forward FW
1108 * evetns recived from event queue to mlx5 consumers.
1109 * Optimise event queue dipatching.
1110 */
1111int mlx5_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb);
1112int mlx5_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb);
1113
1114/* Async-atomic event notifier used for forwarding
1115 * evetns from the event queue into the to mlx5 events dispatcher,
1116 * eswitch, clock and others.
1117 */
1118int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb);
1119int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb);
1120
1121/* Blocking event notifier used to forward SW events, used for slow path */
1122int mlx5_blocking_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb);
1123int mlx5_blocking_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb);
1124int mlx5_blocking_notifier_call_chain(struct mlx5_core_dev *dev, unsigned int event,
1125 void *data);
1126
1127int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id);
1128
1129int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev);
1130int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev);
1131bool mlx5_lag_is_roce(struct mlx5_core_dev *dev);
1132bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev);
1133bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev);
1134bool mlx5_lag_is_active(struct mlx5_core_dev *dev);
1135struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev);
1136u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev,
1137 struct net_device *slave);
1138int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
1139 u64 *values,
1140 int num_counters,
1141 size_t *offsets);
1142struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev);
1143void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up);
1144int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type,
1145 u64 length, u32 log_alignment, u16 uid,
1146 phys_addr_t *addr, u32 *obj_id);
1147int mlx5_dm_sw_icm_dealloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type,
1148 u64 length, u16 uid, phys_addr_t addr, u32 obj_id);
1149
1150#ifdef CONFIG_MLX5_CORE_IPOIB
1151struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
1152 struct ib_device *ibdev,
1153 const char *name,
1154 void (*setup)(struct net_device *));
1155#endif /* CONFIG_MLX5_CORE_IPOIB */
1156int mlx5_rdma_rn_get_params(struct mlx5_core_dev *mdev,
1157 struct ib_device *device,
1158 struct rdma_netdev_alloc_params *params);
1159
1160enum {
1161 MLX5_PCI_DEV_IS_VF = 1 << 0,
1162};
1163
1164static inline bool mlx5_core_is_pf(const struct mlx5_core_dev *dev)
1165{
1166 return dev->coredev_type == MLX5_COREDEV_PF;
1167}
1168
1169static inline bool mlx5_core_is_vf(const struct mlx5_core_dev *dev)
1170{
1171 return dev->coredev_type == MLX5_COREDEV_VF;
1172}
1173
1174static inline bool mlx5_core_is_ecpf(const struct mlx5_core_dev *dev)
1175{
1176 return dev->caps.embedded_cpu;
1177}
1178
1179static inline bool
1180mlx5_core_is_ecpf_esw_manager(const struct mlx5_core_dev *dev)
1181{
1182 return dev->caps.embedded_cpu && MLX5_CAP_GEN(dev, eswitch_manager);
1183}
1184
1185static inline bool mlx5_ecpf_vport_exists(const struct mlx5_core_dev *dev)
1186{
1187 return mlx5_core_is_pf(dev) && MLX5_CAP_ESW(dev, ecpf_vport_exists);
1188}
1189
1190static inline u16 mlx5_core_max_vfs(const struct mlx5_core_dev *dev)
1191{
1192 return dev->priv.sriov.max_vfs;
1193}
1194
1195static inline int mlx5_get_gid_table_len(u16 param)
1196{
1197 if (param > 4) {
1198 pr_warn("gid table length is zero\n");
1199 return 0;
1200 }
1201
1202 return 8 * (1 << param);
1203}
1204
1205static inline bool mlx5_rl_is_supported(struct mlx5_core_dev *dev)
1206{
1207 return !!(dev->priv.rl_table.max_size);
1208}
1209
1210static inline int mlx5_core_is_mp_slave(struct mlx5_core_dev *dev)
1211{
1212 return MLX5_CAP_GEN(dev, affiliate_nic_vport_criteria) &&
1213 MLX5_CAP_GEN(dev, num_vhca_ports) <= 1;
1214}
1215
1216static inline int mlx5_core_is_mp_master(struct mlx5_core_dev *dev)
1217{
1218 return MLX5_CAP_GEN(dev, num_vhca_ports) > 1;
1219}
1220
1221static inline int mlx5_core_mp_enabled(struct mlx5_core_dev *dev)
1222{
1223 return mlx5_core_is_mp_slave(dev) ||
1224 mlx5_core_is_mp_master(dev);
1225}
1226
1227static inline int mlx5_core_native_port_num(struct mlx5_core_dev *dev)
1228{
1229 if (!mlx5_core_mp_enabled(dev))
1230 return 1;
1231
1232 return MLX5_CAP_GEN(dev, native_port_num);
1233}
1234
1235enum {
1236 MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32,
1237};
1238
1239static inline bool mlx5_is_roce_init_enabled(struct mlx5_core_dev *dev)
1240{
1241 struct devlink *devlink = priv_to_devlink(dev);
1242 union devlink_param_value val;
1243
1244 devlink_param_driverinit_value_get(devlink,
1245 DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
1246 &val);
1247 return val.vbool;
1248}
1249
1250#endif /* MLX5_DRIVER_H */