Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
fork
Configure Feed
Select the types of activity you want to include in your feed.
1/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2/* Copyright (c) 2021, Microsoft Corporation. */
3
4#ifndef _MANA_H
5#define _MANA_H
6
7#include <net/xdp.h>
8#include <net/net_shaper.h>
9
10#include "gdma.h"
11#include "hw_channel.h"
12
13/* Microsoft Azure Network Adapter (MANA)'s definitions
14 *
15 * Structures labeled with "HW DATA" are exchanged with the hardware. All of
16 * them are naturally aligned and hence don't need __packed.
17 */
18
19/* MANA protocol version */
20#define MANA_MAJOR_VERSION 0
21#define MANA_MINOR_VERSION 1
22#define MANA_MICRO_VERSION 1
23
24typedef u64 mana_handle_t;
25#define INVALID_MANA_HANDLE ((mana_handle_t)-1)
26
27enum TRI_STATE {
28 TRI_STATE_UNKNOWN = -1,
29 TRI_STATE_FALSE = 0,
30 TRI_STATE_TRUE = 1
31};
32
33/* Number of entries for hardware indirection table must be in power of 2 */
34#define MANA_INDIRECT_TABLE_MAX_SIZE 512
35#define MANA_INDIRECT_TABLE_DEF_SIZE 64
36
37/* The Toeplitz hash key's length in bytes: should be multiple of 8 */
38#define MANA_HASH_KEY_SIZE 40
39
40#define COMP_ENTRY_SIZE 64
41
42/* This Max value for RX buffers is derived from __alloc_page()'s max page
43 * allocation calculation. It allows maximum 2^(MAX_ORDER -1) pages. RX buffer
44 * size beyond this value gets rejected by __alloc_page() call.
45 */
46#define MAX_RX_BUFFERS_PER_QUEUE 8192
47#define DEF_RX_BUFFERS_PER_QUEUE 1024
48#define MIN_RX_BUFFERS_PER_QUEUE 128
49
50/* This max value for TX buffers is derived as the maximum allocatable
51 * pages supported on host per guest through testing. TX buffer size beyond
52 * this value is rejected by the hardware.
53 */
54#define MAX_TX_BUFFERS_PER_QUEUE 16384
55#define DEF_TX_BUFFERS_PER_QUEUE 256
56#define MIN_TX_BUFFERS_PER_QUEUE 128
57
58#define EQ_SIZE (8 * MANA_PAGE_SIZE)
59
60#define LOG2_EQ_THROTTLE 3
61
62#define MAX_PORTS_IN_MANA_DEV 256
63
64/* Update this count whenever the respective structures are changed */
65#define MANA_STATS_RX_COUNT 5
66#define MANA_STATS_TX_COUNT 11
67
68struct mana_stats_rx {
69 u64 packets;
70 u64 bytes;
71 u64 xdp_drop;
72 u64 xdp_tx;
73 u64 xdp_redirect;
74 struct u64_stats_sync syncp;
75};
76
77struct mana_stats_tx {
78 u64 packets;
79 u64 bytes;
80 u64 xdp_xmit;
81 u64 tso_packets;
82 u64 tso_bytes;
83 u64 tso_inner_packets;
84 u64 tso_inner_bytes;
85 u64 short_pkt_fmt;
86 u64 long_pkt_fmt;
87 u64 csum_partial;
88 u64 mana_map_err;
89 struct u64_stats_sync syncp;
90};
91
92struct mana_txq {
93 struct gdma_queue *gdma_sq;
94
95 union {
96 u32 gdma_txq_id;
97 struct {
98 u32 reserved1 : 10;
99 u32 vsq_frame : 14;
100 u32 reserved2 : 8;
101 };
102 };
103
104 u16 vp_offset;
105
106 struct net_device *ndev;
107
108 /* The SKBs are sent to the HW and we are waiting for the CQEs. */
109 struct sk_buff_head pending_skbs;
110 struct netdev_queue *net_txq;
111
112 atomic_t pending_sends;
113
114 bool napi_initialized;
115
116 struct mana_stats_tx stats;
117};
118
119/* skb data and frags dma mappings */
120struct mana_skb_head {
121 /* GSO pkts may have 2 SGEs for the linear part*/
122 dma_addr_t dma_handle[MAX_SKB_FRAGS + 2];
123
124 u32 size[MAX_SKB_FRAGS + 2];
125};
126
127#define MANA_HEADROOM sizeof(struct mana_skb_head)
128
129enum mana_tx_pkt_format {
130 MANA_SHORT_PKT_FMT = 0,
131 MANA_LONG_PKT_FMT = 1,
132};
133
134struct mana_tx_short_oob {
135 u32 pkt_fmt : 2;
136 u32 is_outer_ipv4 : 1;
137 u32 is_outer_ipv6 : 1;
138 u32 comp_iphdr_csum : 1;
139 u32 comp_tcp_csum : 1;
140 u32 comp_udp_csum : 1;
141 u32 supress_txcqe_gen : 1;
142 u32 vcq_num : 24;
143
144 u32 trans_off : 10; /* Transport header offset */
145 u32 vsq_frame : 14;
146 u32 short_vp_offset : 8;
147}; /* HW DATA */
148
149struct mana_tx_long_oob {
150 u32 is_encap : 1;
151 u32 inner_is_ipv6 : 1;
152 u32 inner_tcp_opt : 1;
153 u32 inject_vlan_pri_tag : 1;
154 u32 reserved1 : 12;
155 u32 pcp : 3; /* 802.1Q */
156 u32 dei : 1; /* 802.1Q */
157 u32 vlan_id : 12; /* 802.1Q */
158
159 u32 inner_frame_offset : 10;
160 u32 inner_ip_rel_offset : 6;
161 u32 long_vp_offset : 12;
162 u32 reserved2 : 4;
163
164 u32 reserved3;
165 u32 reserved4;
166}; /* HW DATA */
167
168struct mana_tx_oob {
169 struct mana_tx_short_oob s_oob;
170 struct mana_tx_long_oob l_oob;
171}; /* HW DATA */
172
173enum mana_cq_type {
174 MANA_CQ_TYPE_RX,
175 MANA_CQ_TYPE_TX,
176};
177
178enum mana_cqe_type {
179 CQE_INVALID = 0,
180 CQE_RX_OKAY = 1,
181 CQE_RX_COALESCED_4 = 2,
182 CQE_RX_OBJECT_FENCE = 3,
183 CQE_RX_TRUNCATED = 4,
184
185 CQE_TX_OKAY = 32,
186 CQE_TX_SA_DROP = 33,
187 CQE_TX_MTU_DROP = 34,
188 CQE_TX_INVALID_OOB = 35,
189 CQE_TX_INVALID_ETH_TYPE = 36,
190 CQE_TX_HDR_PROCESSING_ERROR = 37,
191 CQE_TX_VF_DISABLED = 38,
192 CQE_TX_VPORT_IDX_OUT_OF_RANGE = 39,
193 CQE_TX_VPORT_DISABLED = 40,
194 CQE_TX_VLAN_TAGGING_VIOLATION = 41,
195};
196
197#define MANA_CQE_COMPLETION 1
198
199struct mana_cqe_header {
200 u32 cqe_type : 6;
201 u32 client_type : 2;
202 u32 vendor_err : 24;
203}; /* HW DATA */
204
205/* NDIS HASH Types */
206#define NDIS_HASH_IPV4 BIT(0)
207#define NDIS_HASH_TCP_IPV4 BIT(1)
208#define NDIS_HASH_UDP_IPV4 BIT(2)
209#define NDIS_HASH_IPV6 BIT(3)
210#define NDIS_HASH_TCP_IPV6 BIT(4)
211#define NDIS_HASH_UDP_IPV6 BIT(5)
212#define NDIS_HASH_IPV6_EX BIT(6)
213#define NDIS_HASH_TCP_IPV6_EX BIT(7)
214#define NDIS_HASH_UDP_IPV6_EX BIT(8)
215
216#define MANA_HASH_L3 (NDIS_HASH_IPV4 | NDIS_HASH_IPV6 | NDIS_HASH_IPV6_EX)
217#define MANA_HASH_L4 \
218 (NDIS_HASH_TCP_IPV4 | NDIS_HASH_UDP_IPV4 | NDIS_HASH_TCP_IPV6 | \
219 NDIS_HASH_UDP_IPV6 | NDIS_HASH_TCP_IPV6_EX | NDIS_HASH_UDP_IPV6_EX)
220
221struct mana_rxcomp_perpkt_info {
222 u32 pkt_len : 16;
223 u32 reserved1 : 16;
224 u32 reserved2;
225 u32 pkt_hash;
226}; /* HW DATA */
227
228#define MANA_RXCOMP_OOB_NUM_PPI 4
229
230/* Receive completion OOB */
231struct mana_rxcomp_oob {
232 struct mana_cqe_header cqe_hdr;
233
234 u32 rx_vlan_id : 12;
235 u32 rx_vlantag_present : 1;
236 u32 rx_outer_iphdr_csum_succeed : 1;
237 u32 rx_outer_iphdr_csum_fail : 1;
238 u32 reserved1 : 1;
239 u32 rx_hashtype : 9;
240 u32 rx_iphdr_csum_succeed : 1;
241 u32 rx_iphdr_csum_fail : 1;
242 u32 rx_tcp_csum_succeed : 1;
243 u32 rx_tcp_csum_fail : 1;
244 u32 rx_udp_csum_succeed : 1;
245 u32 rx_udp_csum_fail : 1;
246 u32 reserved2 : 1;
247
248 struct mana_rxcomp_perpkt_info ppi[MANA_RXCOMP_OOB_NUM_PPI];
249
250 u32 rx_wqe_offset;
251}; /* HW DATA */
252
253struct mana_tx_comp_oob {
254 struct mana_cqe_header cqe_hdr;
255
256 u32 tx_data_offset;
257
258 u32 tx_sgl_offset : 5;
259 u32 tx_wqe_offset : 27;
260
261 u32 reserved[12];
262}; /* HW DATA */
263
264struct mana_rxq;
265
266#define CQE_POLLING_BUFFER 512
267
268struct mana_cq {
269 struct gdma_queue *gdma_cq;
270
271 /* Cache the CQ id (used to verify if each CQE comes to the right CQ. */
272 u32 gdma_id;
273
274 /* Type of the CQ: TX or RX */
275 enum mana_cq_type type;
276
277 /* Pointer to the mana_rxq that is pushing RX CQEs to the queue.
278 * Only and must be non-NULL if type is MANA_CQ_TYPE_RX.
279 */
280 struct mana_rxq *rxq;
281
282 /* Pointer to the mana_txq that is pushing TX CQEs to the queue.
283 * Only and must be non-NULL if type is MANA_CQ_TYPE_TX.
284 */
285 struct mana_txq *txq;
286
287 /* Buffer which the CQ handler can copy the CQE's into. */
288 struct gdma_comp gdma_comp_buf[CQE_POLLING_BUFFER];
289
290 /* NAPI data */
291 struct napi_struct napi;
292 int work_done;
293 int work_done_since_doorbell;
294 int budget;
295};
296
297struct mana_recv_buf_oob {
298 /* A valid GDMA work request representing the data buffer. */
299 struct gdma_wqe_request wqe_req;
300
301 void *buf_va;
302 bool from_pool; /* allocated from a page pool */
303
304 /* SGL of the buffer going to be sent as part of the work request. */
305 u32 num_sge;
306 struct gdma_sge sgl[MAX_RX_WQE_SGL_ENTRIES];
307
308 /* Required to store the result of mana_gd_post_work_request.
309 * gdma_posted_wqe_info.wqe_size_in_bu is required for progressing the
310 * work queue when the WQE is consumed.
311 */
312 struct gdma_posted_wqe_info wqe_inf;
313};
314
315#define MANA_RXBUF_PAD (SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) \
316 + ETH_HLEN)
317
318#define MANA_XDP_MTU_MAX (PAGE_SIZE - MANA_RXBUF_PAD - XDP_PACKET_HEADROOM)
319
320struct mana_rxq {
321 struct gdma_queue *gdma_rq;
322 /* Cache the gdma receive queue id */
323 u32 gdma_id;
324
325 /* Index of RQ in the vPort, not gdma receive queue id */
326 u32 rxq_idx;
327
328 u32 datasize;
329 u32 alloc_size;
330 u32 headroom;
331
332 mana_handle_t rxobj;
333
334 struct mana_cq rx_cq;
335
336 struct completion fence_event;
337
338 struct net_device *ndev;
339
340 /* Total number of receive buffers to be allocated */
341 u32 num_rx_buf;
342
343 u32 buf_index;
344
345 struct mana_stats_rx stats;
346
347 struct bpf_prog __rcu *bpf_prog;
348 struct xdp_rxq_info xdp_rxq;
349 void *xdp_save_va; /* for reusing */
350 bool xdp_flush;
351 int xdp_rc; /* XDP redirect return code */
352
353 struct page_pool *page_pool;
354 struct dentry *mana_rx_debugfs;
355
356 /* MUST BE THE LAST MEMBER:
357 * Each receive buffer has an associated mana_recv_buf_oob.
358 */
359 struct mana_recv_buf_oob rx_oobs[] __counted_by(num_rx_buf);
360};
361
362struct mana_tx_qp {
363 struct mana_txq txq;
364
365 struct mana_cq tx_cq;
366
367 mana_handle_t tx_object;
368
369 struct dentry *mana_tx_debugfs;
370};
371
372struct mana_ethtool_stats {
373 u64 stop_queue;
374 u64 wake_queue;
375 u64 hc_rx_discards_no_wqe;
376 u64 hc_rx_err_vport_disabled;
377 u64 hc_rx_bytes;
378 u64 hc_rx_ucast_pkts;
379 u64 hc_rx_ucast_bytes;
380 u64 hc_rx_bcast_pkts;
381 u64 hc_rx_bcast_bytes;
382 u64 hc_rx_mcast_pkts;
383 u64 hc_rx_mcast_bytes;
384 u64 hc_tx_err_gf_disabled;
385 u64 hc_tx_err_vport_disabled;
386 u64 hc_tx_err_inval_vportoffset_pkt;
387 u64 hc_tx_err_vlan_enforcement;
388 u64 hc_tx_err_eth_type_enforcement;
389 u64 hc_tx_err_sa_enforcement;
390 u64 hc_tx_err_sqpdid_enforcement;
391 u64 hc_tx_err_cqpdid_enforcement;
392 u64 hc_tx_err_mtu_violation;
393 u64 hc_tx_err_inval_oob;
394 u64 hc_tx_bytes;
395 u64 hc_tx_ucast_pkts;
396 u64 hc_tx_ucast_bytes;
397 u64 hc_tx_bcast_pkts;
398 u64 hc_tx_bcast_bytes;
399 u64 hc_tx_mcast_pkts;
400 u64 hc_tx_mcast_bytes;
401 u64 hc_tx_err_gdma;
402 u64 tx_cqe_err;
403 u64 tx_cqe_unknown_type;
404 u64 rx_coalesced_err;
405 u64 rx_cqe_unknown_type;
406};
407
408struct mana_ethtool_phy_stats {
409 /* Drop Counters */
410 u64 rx_pkt_drop_phy;
411 u64 tx_pkt_drop_phy;
412
413 /* Per TC traffic Counters */
414 u64 rx_pkt_tc0_phy;
415 u64 tx_pkt_tc0_phy;
416 u64 rx_pkt_tc1_phy;
417 u64 tx_pkt_tc1_phy;
418 u64 rx_pkt_tc2_phy;
419 u64 tx_pkt_tc2_phy;
420 u64 rx_pkt_tc3_phy;
421 u64 tx_pkt_tc3_phy;
422 u64 rx_pkt_tc4_phy;
423 u64 tx_pkt_tc4_phy;
424 u64 rx_pkt_tc5_phy;
425 u64 tx_pkt_tc5_phy;
426 u64 rx_pkt_tc6_phy;
427 u64 tx_pkt_tc6_phy;
428 u64 rx_pkt_tc7_phy;
429 u64 tx_pkt_tc7_phy;
430
431 u64 rx_byte_tc0_phy;
432 u64 tx_byte_tc0_phy;
433 u64 rx_byte_tc1_phy;
434 u64 tx_byte_tc1_phy;
435 u64 rx_byte_tc2_phy;
436 u64 tx_byte_tc2_phy;
437 u64 rx_byte_tc3_phy;
438 u64 tx_byte_tc3_phy;
439 u64 rx_byte_tc4_phy;
440 u64 tx_byte_tc4_phy;
441 u64 rx_byte_tc5_phy;
442 u64 tx_byte_tc5_phy;
443 u64 rx_byte_tc6_phy;
444 u64 tx_byte_tc6_phy;
445 u64 rx_byte_tc7_phy;
446 u64 tx_byte_tc7_phy;
447
448 /* Per TC pause Counters */
449 u64 rx_pause_tc0_phy;
450 u64 tx_pause_tc0_phy;
451 u64 rx_pause_tc1_phy;
452 u64 tx_pause_tc1_phy;
453 u64 rx_pause_tc2_phy;
454 u64 tx_pause_tc2_phy;
455 u64 rx_pause_tc3_phy;
456 u64 tx_pause_tc3_phy;
457 u64 rx_pause_tc4_phy;
458 u64 tx_pause_tc4_phy;
459 u64 rx_pause_tc5_phy;
460 u64 tx_pause_tc5_phy;
461 u64 rx_pause_tc6_phy;
462 u64 tx_pause_tc6_phy;
463 u64 rx_pause_tc7_phy;
464 u64 tx_pause_tc7_phy;
465};
466
467struct mana_context {
468 struct gdma_dev *gdma_dev;
469
470 u16 num_ports;
471 u8 bm_hostmode;
472
473 struct mana_eq *eqs;
474 struct dentry *mana_eqs_debugfs;
475
476 struct net_device *ports[MAX_PORTS_IN_MANA_DEV];
477};
478
479struct mana_port_context {
480 struct mana_context *ac;
481 struct net_device *ndev;
482
483 u8 mac_addr[ETH_ALEN];
484
485 enum TRI_STATE rss_state;
486
487 mana_handle_t default_rxobj;
488 bool tx_shortform_allowed;
489 u16 tx_vp_offset;
490
491 struct mana_tx_qp *tx_qp;
492
493 /* Indirection Table for RX & TX. The values are queue indexes */
494 u32 *indir_table;
495 u32 indir_table_sz;
496
497 /* Indirection table containing RxObject Handles */
498 mana_handle_t *rxobj_table;
499
500 /* Hash key used by the NIC */
501 u8 hashkey[MANA_HASH_KEY_SIZE];
502
503 /* This points to an array of num_queues of RQ pointers. */
504 struct mana_rxq **rxqs;
505
506 /* pre-allocated rx buffer array */
507 void **rxbufs_pre;
508 dma_addr_t *das_pre;
509 int rxbpre_total;
510 u32 rxbpre_datasize;
511 u32 rxbpre_alloc_size;
512 u32 rxbpre_headroom;
513
514 struct bpf_prog *bpf_prog;
515
516 /* Create num_queues EQs, SQs, SQ-CQs, RQs and RQ-CQs, respectively. */
517 unsigned int max_queues;
518 unsigned int num_queues;
519
520 unsigned int rx_queue_size;
521 unsigned int tx_queue_size;
522
523 mana_handle_t port_handle;
524 mana_handle_t pf_filter_handle;
525
526 /* Mutex for sharing access to vport_use_count */
527 struct mutex vport_mutex;
528 int vport_use_count;
529
530 /* Net shaper handle*/
531 struct net_shaper_handle handle;
532
533 u16 port_idx;
534 /* Currently configured speed (mbps) */
535 u32 speed;
536 /* Maximum speed supported by the SKU (mbps) */
537 u32 max_speed;
538
539 bool port_is_up;
540 bool port_st_save; /* Saved port state */
541
542 struct mana_ethtool_stats eth_stats;
543
544 struct mana_ethtool_phy_stats phy_stats;
545
546 /* Debugfs */
547 struct dentry *mana_port_debugfs;
548};
549
550netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev);
551int mana_config_rss(struct mana_port_context *ac, enum TRI_STATE rx,
552 bool update_hash, bool update_tab);
553
554int mana_alloc_queues(struct net_device *ndev);
555int mana_attach(struct net_device *ndev);
556int mana_detach(struct net_device *ndev, bool from_close);
557
558int mana_probe(struct gdma_dev *gd, bool resuming);
559void mana_remove(struct gdma_dev *gd, bool suspending);
560
561int mana_rdma_probe(struct gdma_dev *gd);
562void mana_rdma_remove(struct gdma_dev *gd);
563
564void mana_xdp_tx(struct sk_buff *skb, struct net_device *ndev);
565int mana_xdp_xmit(struct net_device *ndev, int n, struct xdp_frame **frames,
566 u32 flags);
567u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq,
568 struct xdp_buff *xdp, void *buf_va, uint pkt_len);
569struct bpf_prog *mana_xdp_get(struct mana_port_context *apc);
570void mana_chn_setxdp(struct mana_port_context *apc, struct bpf_prog *prog);
571int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf);
572void mana_query_gf_stats(struct mana_port_context *apc);
573int mana_query_link_cfg(struct mana_port_context *apc);
574int mana_set_bw_clamp(struct mana_port_context *apc, u32 speed,
575 int enable_clamping);
576void mana_query_phy_stats(struct mana_port_context *apc);
577int mana_pre_alloc_rxbufs(struct mana_port_context *apc, int mtu, int num_queues);
578void mana_pre_dealloc_rxbufs(struct mana_port_context *apc);
579
580extern const struct ethtool_ops mana_ethtool_ops;
581extern struct dentry *mana_debugfs_root;
582
583/* A CQ can be created not associated with any EQ */
584#define GDMA_CQ_NO_EQ 0xffff
585
586struct mana_obj_spec {
587 u32 queue_index;
588 u64 gdma_region;
589 u32 queue_size;
590 u32 attached_eq;
591 u32 modr_ctx_id;
592};
593
594enum mana_command_code {
595 MANA_QUERY_DEV_CONFIG = 0x20001,
596 MANA_QUERY_GF_STAT = 0x20002,
597 MANA_CONFIG_VPORT_TX = 0x20003,
598 MANA_CREATE_WQ_OBJ = 0x20004,
599 MANA_DESTROY_WQ_OBJ = 0x20005,
600 MANA_FENCE_RQ = 0x20006,
601 MANA_CONFIG_VPORT_RX = 0x20007,
602 MANA_QUERY_VPORT_CONFIG = 0x20008,
603 MANA_QUERY_LINK_CONFIG = 0x2000A,
604 MANA_SET_BW_CLAMP = 0x2000B,
605 MANA_QUERY_PHY_STAT = 0x2000c,
606
607 /* Privileged commands for the PF mode */
608 MANA_REGISTER_FILTER = 0x28000,
609 MANA_DEREGISTER_FILTER = 0x28001,
610 MANA_REGISTER_HW_PORT = 0x28003,
611 MANA_DEREGISTER_HW_PORT = 0x28004,
612};
613
614/* Query Link Configuration*/
615struct mana_query_link_config_req {
616 struct gdma_req_hdr hdr;
617 mana_handle_t vport;
618}; /* HW DATA */
619
620struct mana_query_link_config_resp {
621 struct gdma_resp_hdr hdr;
622 u32 qos_speed_mbps;
623 u8 qos_unconfigured;
624 u8 reserved1[3];
625 u32 link_speed_mbps;
626 u8 reserved2[4];
627}; /* HW DATA */
628
629/* Set Bandwidth Clamp*/
630struct mana_set_bw_clamp_req {
631 struct gdma_req_hdr hdr;
632 mana_handle_t vport;
633 enum TRI_STATE enable_clamping;
634 u32 link_speed_mbps;
635}; /* HW DATA */
636
637struct mana_set_bw_clamp_resp {
638 struct gdma_resp_hdr hdr;
639 u8 qos_unconfigured;
640 u8 reserved[7];
641}; /* HW DATA */
642
643/* Query Device Configuration */
644struct mana_query_device_cfg_req {
645 struct gdma_req_hdr hdr;
646
647 /* MANA Nic Driver Capability flags */
648 u64 mn_drv_cap_flags1;
649 u64 mn_drv_cap_flags2;
650 u64 mn_drv_cap_flags3;
651 u64 mn_drv_cap_flags4;
652
653 u32 proto_major_ver;
654 u32 proto_minor_ver;
655 u32 proto_micro_ver;
656
657 u32 reserved;
658}; /* HW DATA */
659
660struct mana_query_device_cfg_resp {
661 struct gdma_resp_hdr hdr;
662
663 u64 pf_cap_flags1;
664 u64 pf_cap_flags2;
665 u64 pf_cap_flags3;
666 u64 pf_cap_flags4;
667
668 u16 max_num_vports;
669 u8 bm_hostmode; /* response v3: Bare Metal Host Mode */
670 u8 reserved;
671 u32 max_num_eqs;
672
673 /* response v2: */
674 u16 adapter_mtu;
675 u16 reserved2;
676 u32 reserved3;
677}; /* HW DATA */
678
679/* Query vPort Configuration */
680struct mana_query_vport_cfg_req {
681 struct gdma_req_hdr hdr;
682 u32 vport_index;
683}; /* HW DATA */
684
685struct mana_query_vport_cfg_resp {
686 struct gdma_resp_hdr hdr;
687 u32 max_num_sq;
688 u32 max_num_rq;
689 u32 num_indirection_ent;
690 u32 reserved1;
691 u8 mac_addr[6];
692 u8 reserved2[2];
693 mana_handle_t vport;
694}; /* HW DATA */
695
696/* Configure vPort */
697struct mana_config_vport_req {
698 struct gdma_req_hdr hdr;
699 mana_handle_t vport;
700 u32 pdid;
701 u32 doorbell_pageid;
702}; /* HW DATA */
703
704struct mana_config_vport_resp {
705 struct gdma_resp_hdr hdr;
706 u16 tx_vport_offset;
707 u8 short_form_allowed;
708 u8 reserved;
709}; /* HW DATA */
710
711/* Create WQ Object */
712struct mana_create_wqobj_req {
713 struct gdma_req_hdr hdr;
714 mana_handle_t vport;
715 u32 wq_type;
716 u32 reserved;
717 u64 wq_gdma_region;
718 u64 cq_gdma_region;
719 u32 wq_size;
720 u32 cq_size;
721 u32 cq_moderation_ctx_id;
722 u32 cq_parent_qid;
723}; /* HW DATA */
724
725struct mana_create_wqobj_resp {
726 struct gdma_resp_hdr hdr;
727 u32 wq_id;
728 u32 cq_id;
729 mana_handle_t wq_obj;
730}; /* HW DATA */
731
732/* Destroy WQ Object */
733struct mana_destroy_wqobj_req {
734 struct gdma_req_hdr hdr;
735 u32 wq_type;
736 u32 reserved;
737 mana_handle_t wq_obj_handle;
738}; /* HW DATA */
739
740struct mana_destroy_wqobj_resp {
741 struct gdma_resp_hdr hdr;
742}; /* HW DATA */
743
744/* Fence RQ */
745struct mana_fence_rq_req {
746 struct gdma_req_hdr hdr;
747 mana_handle_t wq_obj_handle;
748}; /* HW DATA */
749
750struct mana_fence_rq_resp {
751 struct gdma_resp_hdr hdr;
752}; /* HW DATA */
753
754/* Query stats RQ */
755struct mana_query_gf_stat_req {
756 struct gdma_req_hdr hdr;
757 u64 req_stats;
758}; /* HW DATA */
759
760struct mana_query_gf_stat_resp {
761 struct gdma_resp_hdr hdr;
762 u64 reported_stats;
763 /* rx errors/discards */
764 u64 rx_discards_nowqe;
765 u64 rx_err_vport_disabled;
766 /* rx bytes/packets */
767 u64 hc_rx_bytes;
768 u64 hc_rx_ucast_pkts;
769 u64 hc_rx_ucast_bytes;
770 u64 hc_rx_bcast_pkts;
771 u64 hc_rx_bcast_bytes;
772 u64 hc_rx_mcast_pkts;
773 u64 hc_rx_mcast_bytes;
774 /* tx errors */
775 u64 tx_err_gf_disabled;
776 u64 tx_err_vport_disabled;
777 u64 tx_err_inval_vport_offset_pkt;
778 u64 tx_err_vlan_enforcement;
779 u64 tx_err_ethtype_enforcement;
780 u64 tx_err_SA_enforcement;
781 u64 tx_err_SQPDID_enforcement;
782 u64 tx_err_CQPDID_enforcement;
783 u64 tx_err_mtu_violation;
784 u64 tx_err_inval_oob;
785 /* tx bytes/packets */
786 u64 hc_tx_bytes;
787 u64 hc_tx_ucast_pkts;
788 u64 hc_tx_ucast_bytes;
789 u64 hc_tx_bcast_pkts;
790 u64 hc_tx_bcast_bytes;
791 u64 hc_tx_mcast_pkts;
792 u64 hc_tx_mcast_bytes;
793 /* tx error */
794 u64 tx_err_gdma;
795}; /* HW DATA */
796
797/* Query phy stats */
798struct mana_query_phy_stat_req {
799 struct gdma_req_hdr hdr;
800 u64 req_stats;
801}; /* HW DATA */
802
803struct mana_query_phy_stat_resp {
804 struct gdma_resp_hdr hdr;
805 u64 reported_stats;
806
807 /* Aggregate Drop Counters */
808 u64 rx_pkt_drop_phy;
809 u64 tx_pkt_drop_phy;
810
811 /* Per TC(Traffic class) traffic Counters */
812 u64 rx_pkt_tc0_phy;
813 u64 tx_pkt_tc0_phy;
814 u64 rx_pkt_tc1_phy;
815 u64 tx_pkt_tc1_phy;
816 u64 rx_pkt_tc2_phy;
817 u64 tx_pkt_tc2_phy;
818 u64 rx_pkt_tc3_phy;
819 u64 tx_pkt_tc3_phy;
820 u64 rx_pkt_tc4_phy;
821 u64 tx_pkt_tc4_phy;
822 u64 rx_pkt_tc5_phy;
823 u64 tx_pkt_tc5_phy;
824 u64 rx_pkt_tc6_phy;
825 u64 tx_pkt_tc6_phy;
826 u64 rx_pkt_tc7_phy;
827 u64 tx_pkt_tc7_phy;
828
829 u64 rx_byte_tc0_phy;
830 u64 tx_byte_tc0_phy;
831 u64 rx_byte_tc1_phy;
832 u64 tx_byte_tc1_phy;
833 u64 rx_byte_tc2_phy;
834 u64 tx_byte_tc2_phy;
835 u64 rx_byte_tc3_phy;
836 u64 tx_byte_tc3_phy;
837 u64 rx_byte_tc4_phy;
838 u64 tx_byte_tc4_phy;
839 u64 rx_byte_tc5_phy;
840 u64 tx_byte_tc5_phy;
841 u64 rx_byte_tc6_phy;
842 u64 tx_byte_tc6_phy;
843 u64 rx_byte_tc7_phy;
844 u64 tx_byte_tc7_phy;
845
846 /* Per TC(Traffic Class) pause Counters */
847 u64 rx_pause_tc0_phy;
848 u64 tx_pause_tc0_phy;
849 u64 rx_pause_tc1_phy;
850 u64 tx_pause_tc1_phy;
851 u64 rx_pause_tc2_phy;
852 u64 tx_pause_tc2_phy;
853 u64 rx_pause_tc3_phy;
854 u64 tx_pause_tc3_phy;
855 u64 rx_pause_tc4_phy;
856 u64 tx_pause_tc4_phy;
857 u64 rx_pause_tc5_phy;
858 u64 tx_pause_tc5_phy;
859 u64 rx_pause_tc6_phy;
860 u64 tx_pause_tc6_phy;
861 u64 rx_pause_tc7_phy;
862 u64 tx_pause_tc7_phy;
863}; /* HW DATA */
864
865/* Configure vPort Rx Steering */
866struct mana_cfg_rx_steer_req_v2 {
867 struct gdma_req_hdr hdr;
868 mana_handle_t vport;
869 u16 num_indir_entries;
870 u16 indir_tab_offset;
871 u32 rx_enable;
872 u32 rss_enable;
873 u8 update_default_rxobj;
874 u8 update_hashkey;
875 u8 update_indir_tab;
876 u8 reserved;
877 mana_handle_t default_rxobj;
878 u8 hashkey[MANA_HASH_KEY_SIZE];
879 u8 cqe_coalescing_enable;
880 u8 reserved2[7];
881 mana_handle_t indir_tab[] __counted_by(num_indir_entries);
882}; /* HW DATA */
883
884struct mana_cfg_rx_steer_resp {
885 struct gdma_resp_hdr hdr;
886}; /* HW DATA */
887
888/* Register HW vPort */
889struct mana_register_hw_vport_req {
890 struct gdma_req_hdr hdr;
891 u16 attached_gfid;
892 u8 is_pf_default_vport;
893 u8 reserved1;
894 u8 allow_all_ether_types;
895 u8 reserved2;
896 u8 reserved3;
897 u8 reserved4;
898}; /* HW DATA */
899
900struct mana_register_hw_vport_resp {
901 struct gdma_resp_hdr hdr;
902 mana_handle_t hw_vport_handle;
903}; /* HW DATA */
904
905/* Deregister HW vPort */
906struct mana_deregister_hw_vport_req {
907 struct gdma_req_hdr hdr;
908 mana_handle_t hw_vport_handle;
909}; /* HW DATA */
910
911struct mana_deregister_hw_vport_resp {
912 struct gdma_resp_hdr hdr;
913}; /* HW DATA */
914
915/* Register filter */
916struct mana_register_filter_req {
917 struct gdma_req_hdr hdr;
918 mana_handle_t vport;
919 u8 mac_addr[6];
920 u8 reserved1;
921 u8 reserved2;
922 u8 reserved3;
923 u8 reserved4;
924 u16 reserved5;
925 u32 reserved6;
926 u32 reserved7;
927 u32 reserved8;
928}; /* HW DATA */
929
930struct mana_register_filter_resp {
931 struct gdma_resp_hdr hdr;
932 mana_handle_t filter_handle;
933}; /* HW DATA */
934
935/* Deregister filter */
936struct mana_deregister_filter_req {
937 struct gdma_req_hdr hdr;
938 mana_handle_t filter_handle;
939}; /* HW DATA */
940
941struct mana_deregister_filter_resp {
942 struct gdma_resp_hdr hdr;
943}; /* HW DATA */
944
945/* Requested GF stats Flags */
946/* Rx discards/Errors */
947#define STATISTICS_FLAGS_RX_DISCARDS_NO_WQE 0x0000000000000001
948#define STATISTICS_FLAGS_RX_ERRORS_VPORT_DISABLED 0x0000000000000002
949/* Rx bytes/pkts */
950#define STATISTICS_FLAGS_HC_RX_BYTES 0x0000000000000004
951#define STATISTICS_FLAGS_HC_RX_UCAST_PACKETS 0x0000000000000008
952#define STATISTICS_FLAGS_HC_RX_UCAST_BYTES 0x0000000000000010
953#define STATISTICS_FLAGS_HC_RX_MCAST_PACKETS 0x0000000000000020
954#define STATISTICS_FLAGS_HC_RX_MCAST_BYTES 0x0000000000000040
955#define STATISTICS_FLAGS_HC_RX_BCAST_PACKETS 0x0000000000000080
956#define STATISTICS_FLAGS_HC_RX_BCAST_BYTES 0x0000000000000100
957/* Tx errors */
958#define STATISTICS_FLAGS_TX_ERRORS_GF_DISABLED 0x0000000000000200
959#define STATISTICS_FLAGS_TX_ERRORS_VPORT_DISABLED 0x0000000000000400
960#define STATISTICS_FLAGS_TX_ERRORS_INVAL_VPORT_OFFSET_PACKETS \
961 0x0000000000000800
962#define STATISTICS_FLAGS_TX_ERRORS_VLAN_ENFORCEMENT 0x0000000000001000
963#define STATISTICS_FLAGS_TX_ERRORS_ETH_TYPE_ENFORCEMENT \
964 0x0000000000002000
965#define STATISTICS_FLAGS_TX_ERRORS_SA_ENFORCEMENT 0x0000000000004000
966#define STATISTICS_FLAGS_TX_ERRORS_SQPDID_ENFORCEMENT 0x0000000000008000
967#define STATISTICS_FLAGS_TX_ERRORS_CQPDID_ENFORCEMENT 0x0000000000010000
968#define STATISTICS_FLAGS_TX_ERRORS_MTU_VIOLATION 0x0000000000020000
969#define STATISTICS_FLAGS_TX_ERRORS_INVALID_OOB 0x0000000000040000
970/* Tx bytes/pkts */
971#define STATISTICS_FLAGS_HC_TX_BYTES 0x0000000000080000
972#define STATISTICS_FLAGS_HC_TX_UCAST_PACKETS 0x0000000000100000
973#define STATISTICS_FLAGS_HC_TX_UCAST_BYTES 0x0000000000200000
974#define STATISTICS_FLAGS_HC_TX_MCAST_PACKETS 0x0000000000400000
975#define STATISTICS_FLAGS_HC_TX_MCAST_BYTES 0x0000000000800000
976#define STATISTICS_FLAGS_HC_TX_BCAST_PACKETS 0x0000000001000000
977#define STATISTICS_FLAGS_HC_TX_BCAST_BYTES 0x0000000002000000
978/* Tx error */
979#define STATISTICS_FLAGS_TX_ERRORS_GDMA_ERROR 0x0000000004000000
980
981#define MANA_MAX_NUM_QUEUES 64
982
983#define MANA_SHORT_VPORT_OFFSET_MAX ((1U << 8) - 1)
984
985struct mana_tx_package {
986 struct gdma_wqe_request wqe_req;
987 struct gdma_sge sgl_array[5];
988 struct gdma_sge *sgl_ptr;
989
990 struct mana_tx_oob tx_oob;
991
992 struct gdma_posted_wqe_info wqe_info;
993};
994
995int mana_create_wq_obj(struct mana_port_context *apc,
996 mana_handle_t vport,
997 u32 wq_type, struct mana_obj_spec *wq_spec,
998 struct mana_obj_spec *cq_spec,
999 mana_handle_t *wq_obj);
1000
1001void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type,
1002 mana_handle_t wq_obj);
1003
1004int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id,
1005 u32 doorbell_pg_id);
1006void mana_uncfg_vport(struct mana_port_context *apc);
1007
1008struct net_device *mana_get_primary_netdev(struct mana_context *ac,
1009 u32 port_index,
1010 netdevice_tracker *tracker);
1011#endif /* _MANA_H */