Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2/* Copyright (c) 2021, Microsoft Corporation. */
3
4#ifndef _MANA_H
5#define _MANA_H
6
7#include "gdma.h"
8#include "hw_channel.h"
9
10/* Microsoft Azure Network Adapter (MANA)'s definitions
11 *
12 * Structures labeled with "HW DATA" are exchanged with the hardware. All of
13 * them are naturally aligned and hence don't need __packed.
14 */
15
16/* MANA protocol version */
17#define MANA_MAJOR_VERSION 0
18#define MANA_MINOR_VERSION 1
19#define MANA_MICRO_VERSION 1
20
21typedef u64 mana_handle_t;
22#define INVALID_MANA_HANDLE ((mana_handle_t)-1)
23
24enum TRI_STATE {
25 TRI_STATE_UNKNOWN = -1,
26 TRI_STATE_FALSE = 0,
27 TRI_STATE_TRUE = 1
28};
29
30/* Number of entries for hardware indirection table must be in power of 2 */
31#define MANA_INDIRECT_TABLE_SIZE 64
32#define MANA_INDIRECT_TABLE_MASK (MANA_INDIRECT_TABLE_SIZE - 1)
33
34/* The Toeplitz hash key's length in bytes: should be multiple of 8 */
35#define MANA_HASH_KEY_SIZE 40
36
37#define COMP_ENTRY_SIZE 64
38
39#define RX_BUFFERS_PER_QUEUE 512
40#define MANA_RX_DATA_ALIGN 64
41
42#define MAX_SEND_BUFFERS_PER_QUEUE 256
43
44#define EQ_SIZE (8 * PAGE_SIZE)
45#define LOG2_EQ_THROTTLE 3
46
47#define MAX_PORTS_IN_MANA_DEV 256
48
49/* Update this count whenever the respective structures are changed */
50#define MANA_STATS_RX_COUNT 5
51#define MANA_STATS_TX_COUNT 11
52
53struct mana_stats_rx {
54 u64 packets;
55 u64 bytes;
56 u64 xdp_drop;
57 u64 xdp_tx;
58 u64 xdp_redirect;
59 struct u64_stats_sync syncp;
60};
61
62struct mana_stats_tx {
63 u64 packets;
64 u64 bytes;
65 u64 xdp_xmit;
66 u64 tso_packets;
67 u64 tso_bytes;
68 u64 tso_inner_packets;
69 u64 tso_inner_bytes;
70 u64 short_pkt_fmt;
71 u64 long_pkt_fmt;
72 u64 csum_partial;
73 u64 mana_map_err;
74 struct u64_stats_sync syncp;
75};
76
77struct mana_txq {
78 struct gdma_queue *gdma_sq;
79
80 union {
81 u32 gdma_txq_id;
82 struct {
83 u32 reserved1 : 10;
84 u32 vsq_frame : 14;
85 u32 reserved2 : 8;
86 };
87 };
88
89 u16 vp_offset;
90
91 struct net_device *ndev;
92
93 /* The SKBs are sent to the HW and we are waiting for the CQEs. */
94 struct sk_buff_head pending_skbs;
95 struct netdev_queue *net_txq;
96
97 atomic_t pending_sends;
98
99 struct mana_stats_tx stats;
100};
101
102/* skb data and frags dma mappings */
103struct mana_skb_head {
104 dma_addr_t dma_handle[MAX_SKB_FRAGS + 1];
105
106 u32 size[MAX_SKB_FRAGS + 1];
107};
108
109#define MANA_HEADROOM sizeof(struct mana_skb_head)
110
111enum mana_tx_pkt_format {
112 MANA_SHORT_PKT_FMT = 0,
113 MANA_LONG_PKT_FMT = 1,
114};
115
116struct mana_tx_short_oob {
117 u32 pkt_fmt : 2;
118 u32 is_outer_ipv4 : 1;
119 u32 is_outer_ipv6 : 1;
120 u32 comp_iphdr_csum : 1;
121 u32 comp_tcp_csum : 1;
122 u32 comp_udp_csum : 1;
123 u32 supress_txcqe_gen : 1;
124 u32 vcq_num : 24;
125
126 u32 trans_off : 10; /* Transport header offset */
127 u32 vsq_frame : 14;
128 u32 short_vp_offset : 8;
129}; /* HW DATA */
130
131struct mana_tx_long_oob {
132 u32 is_encap : 1;
133 u32 inner_is_ipv6 : 1;
134 u32 inner_tcp_opt : 1;
135 u32 inject_vlan_pri_tag : 1;
136 u32 reserved1 : 12;
137 u32 pcp : 3; /* 802.1Q */
138 u32 dei : 1; /* 802.1Q */
139 u32 vlan_id : 12; /* 802.1Q */
140
141 u32 inner_frame_offset : 10;
142 u32 inner_ip_rel_offset : 6;
143 u32 long_vp_offset : 12;
144 u32 reserved2 : 4;
145
146 u32 reserved3;
147 u32 reserved4;
148}; /* HW DATA */
149
150struct mana_tx_oob {
151 struct mana_tx_short_oob s_oob;
152 struct mana_tx_long_oob l_oob;
153}; /* HW DATA */
154
155enum mana_cq_type {
156 MANA_CQ_TYPE_RX,
157 MANA_CQ_TYPE_TX,
158};
159
160enum mana_cqe_type {
161 CQE_INVALID = 0,
162 CQE_RX_OKAY = 1,
163 CQE_RX_COALESCED_4 = 2,
164 CQE_RX_OBJECT_FENCE = 3,
165 CQE_RX_TRUNCATED = 4,
166
167 CQE_TX_OKAY = 32,
168 CQE_TX_SA_DROP = 33,
169 CQE_TX_MTU_DROP = 34,
170 CQE_TX_INVALID_OOB = 35,
171 CQE_TX_INVALID_ETH_TYPE = 36,
172 CQE_TX_HDR_PROCESSING_ERROR = 37,
173 CQE_TX_VF_DISABLED = 38,
174 CQE_TX_VPORT_IDX_OUT_OF_RANGE = 39,
175 CQE_TX_VPORT_DISABLED = 40,
176 CQE_TX_VLAN_TAGGING_VIOLATION = 41,
177};
178
179#define MANA_CQE_COMPLETION 1
180
181struct mana_cqe_header {
182 u32 cqe_type : 6;
183 u32 client_type : 2;
184 u32 vendor_err : 24;
185}; /* HW DATA */
186
187/* NDIS HASH Types */
188#define NDIS_HASH_IPV4 BIT(0)
189#define NDIS_HASH_TCP_IPV4 BIT(1)
190#define NDIS_HASH_UDP_IPV4 BIT(2)
191#define NDIS_HASH_IPV6 BIT(3)
192#define NDIS_HASH_TCP_IPV6 BIT(4)
193#define NDIS_HASH_UDP_IPV6 BIT(5)
194#define NDIS_HASH_IPV6_EX BIT(6)
195#define NDIS_HASH_TCP_IPV6_EX BIT(7)
196#define NDIS_HASH_UDP_IPV6_EX BIT(8)
197
198#define MANA_HASH_L3 (NDIS_HASH_IPV4 | NDIS_HASH_IPV6 | NDIS_HASH_IPV6_EX)
199#define MANA_HASH_L4 \
200 (NDIS_HASH_TCP_IPV4 | NDIS_HASH_UDP_IPV4 | NDIS_HASH_TCP_IPV6 | \
201 NDIS_HASH_UDP_IPV6 | NDIS_HASH_TCP_IPV6_EX | NDIS_HASH_UDP_IPV6_EX)
202
203struct mana_rxcomp_perpkt_info {
204 u32 pkt_len : 16;
205 u32 reserved1 : 16;
206 u32 reserved2;
207 u32 pkt_hash;
208}; /* HW DATA */
209
210#define MANA_RXCOMP_OOB_NUM_PPI 4
211
212/* Receive completion OOB */
213struct mana_rxcomp_oob {
214 struct mana_cqe_header cqe_hdr;
215
216 u32 rx_vlan_id : 12;
217 u32 rx_vlantag_present : 1;
218 u32 rx_outer_iphdr_csum_succeed : 1;
219 u32 rx_outer_iphdr_csum_fail : 1;
220 u32 reserved1 : 1;
221 u32 rx_hashtype : 9;
222 u32 rx_iphdr_csum_succeed : 1;
223 u32 rx_iphdr_csum_fail : 1;
224 u32 rx_tcp_csum_succeed : 1;
225 u32 rx_tcp_csum_fail : 1;
226 u32 rx_udp_csum_succeed : 1;
227 u32 rx_udp_csum_fail : 1;
228 u32 reserved2 : 1;
229
230 struct mana_rxcomp_perpkt_info ppi[MANA_RXCOMP_OOB_NUM_PPI];
231
232 u32 rx_wqe_offset;
233}; /* HW DATA */
234
235struct mana_tx_comp_oob {
236 struct mana_cqe_header cqe_hdr;
237
238 u32 tx_data_offset;
239
240 u32 tx_sgl_offset : 5;
241 u32 tx_wqe_offset : 27;
242
243 u32 reserved[12];
244}; /* HW DATA */
245
246struct mana_rxq;
247
248#define CQE_POLLING_BUFFER 512
249
250struct mana_cq {
251 struct gdma_queue *gdma_cq;
252
253 /* Cache the CQ id (used to verify if each CQE comes to the right CQ. */
254 u32 gdma_id;
255
256 /* Type of the CQ: TX or RX */
257 enum mana_cq_type type;
258
259 /* Pointer to the mana_rxq that is pushing RX CQEs to the queue.
260 * Only and must be non-NULL if type is MANA_CQ_TYPE_RX.
261 */
262 struct mana_rxq *rxq;
263
264 /* Pointer to the mana_txq that is pushing TX CQEs to the queue.
265 * Only and must be non-NULL if type is MANA_CQ_TYPE_TX.
266 */
267 struct mana_txq *txq;
268
269 /* Buffer which the CQ handler can copy the CQE's into. */
270 struct gdma_comp gdma_comp_buf[CQE_POLLING_BUFFER];
271
272 /* NAPI data */
273 struct napi_struct napi;
274 int work_done;
275 int budget;
276};
277
278struct mana_recv_buf_oob {
279 /* A valid GDMA work request representing the data buffer. */
280 struct gdma_wqe_request wqe_req;
281
282 void *buf_va;
283
284 /* SGL of the buffer going to be sent has part of the work request. */
285 u32 num_sge;
286 struct gdma_sge sgl[MAX_RX_WQE_SGL_ENTRIES];
287
288 /* Required to store the result of mana_gd_post_work_request.
289 * gdma_posted_wqe_info.wqe_size_in_bu is required for progressing the
290 * work queue when the WQE is consumed.
291 */
292 struct gdma_posted_wqe_info wqe_inf;
293};
294
295#define MANA_RXBUF_PAD (SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) \
296 + ETH_HLEN)
297
298#define MANA_XDP_MTU_MAX (PAGE_SIZE - MANA_RXBUF_PAD - XDP_PACKET_HEADROOM)
299
300struct mana_rxq {
301 struct gdma_queue *gdma_rq;
302 /* Cache the gdma receive queue id */
303 u32 gdma_id;
304
305 /* Index of RQ in the vPort, not gdma receive queue id */
306 u32 rxq_idx;
307
308 u32 datasize;
309 u32 alloc_size;
310 u32 headroom;
311
312 mana_handle_t rxobj;
313
314 struct mana_cq rx_cq;
315
316 struct completion fence_event;
317
318 struct net_device *ndev;
319
320 /* Total number of receive buffers to be allocated */
321 u32 num_rx_buf;
322
323 u32 buf_index;
324
325 struct mana_stats_rx stats;
326
327 struct bpf_prog __rcu *bpf_prog;
328 struct xdp_rxq_info xdp_rxq;
329 void *xdp_save_va; /* for reusing */
330 bool xdp_flush;
331 int xdp_rc; /* XDP redirect return code */
332
333 /* MUST BE THE LAST MEMBER:
334 * Each receive buffer has an associated mana_recv_buf_oob.
335 */
336 struct mana_recv_buf_oob rx_oobs[];
337};
338
339struct mana_tx_qp {
340 struct mana_txq txq;
341
342 struct mana_cq tx_cq;
343
344 mana_handle_t tx_object;
345};
346
347struct mana_ethtool_stats {
348 u64 stop_queue;
349 u64 wake_queue;
350 u64 tx_cqe_err;
351 u64 tx_cqe_unknown_type;
352 u64 rx_coalesced_err;
353 u64 rx_cqe_unknown_type;
354};
355
356struct mana_context {
357 struct gdma_dev *gdma_dev;
358
359 u16 num_ports;
360
361 struct mana_eq *eqs;
362
363 struct net_device *ports[MAX_PORTS_IN_MANA_DEV];
364};
365
366struct mana_port_context {
367 struct mana_context *ac;
368 struct net_device *ndev;
369
370 u8 mac_addr[ETH_ALEN];
371
372 enum TRI_STATE rss_state;
373
374 mana_handle_t default_rxobj;
375 bool tx_shortform_allowed;
376 u16 tx_vp_offset;
377
378 struct mana_tx_qp *tx_qp;
379
380 /* Indirection Table for RX & TX. The values are queue indexes */
381 u32 indir_table[MANA_INDIRECT_TABLE_SIZE];
382
383 /* Indirection table containing RxObject Handles */
384 mana_handle_t rxobj_table[MANA_INDIRECT_TABLE_SIZE];
385
386 /* Hash key used by the NIC */
387 u8 hashkey[MANA_HASH_KEY_SIZE];
388
389 /* This points to an array of num_queues of RQ pointers. */
390 struct mana_rxq **rxqs;
391
392 /* pre-allocated rx buffer array */
393 void **rxbufs_pre;
394 dma_addr_t *das_pre;
395 int rxbpre_total;
396 u32 rxbpre_datasize;
397 u32 rxbpre_alloc_size;
398 u32 rxbpre_headroom;
399
400 struct bpf_prog *bpf_prog;
401
402 /* Create num_queues EQs, SQs, SQ-CQs, RQs and RQ-CQs, respectively. */
403 unsigned int max_queues;
404 unsigned int num_queues;
405
406 mana_handle_t port_handle;
407 mana_handle_t pf_filter_handle;
408
409 /* Mutex for sharing access to vport_use_count */
410 struct mutex vport_mutex;
411 int vport_use_count;
412
413 u16 port_idx;
414
415 bool port_is_up;
416 bool port_st_save; /* Saved port state */
417
418 struct mana_ethtool_stats eth_stats;
419};
420
421netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev);
422int mana_config_rss(struct mana_port_context *ac, enum TRI_STATE rx,
423 bool update_hash, bool update_tab);
424
425int mana_alloc_queues(struct net_device *ndev);
426int mana_attach(struct net_device *ndev);
427int mana_detach(struct net_device *ndev, bool from_close);
428
429int mana_probe(struct gdma_dev *gd, bool resuming);
430void mana_remove(struct gdma_dev *gd, bool suspending);
431
432void mana_xdp_tx(struct sk_buff *skb, struct net_device *ndev);
433int mana_xdp_xmit(struct net_device *ndev, int n, struct xdp_frame **frames,
434 u32 flags);
435u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq,
436 struct xdp_buff *xdp, void *buf_va, uint pkt_len);
437struct bpf_prog *mana_xdp_get(struct mana_port_context *apc);
438void mana_chn_setxdp(struct mana_port_context *apc, struct bpf_prog *prog);
439int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf);
440
441extern const struct ethtool_ops mana_ethtool_ops;
442
443/* A CQ can be created not associated with any EQ */
444#define GDMA_CQ_NO_EQ 0xffff
445
446struct mana_obj_spec {
447 u32 queue_index;
448 u64 gdma_region;
449 u32 queue_size;
450 u32 attached_eq;
451 u32 modr_ctx_id;
452};
453
454enum mana_command_code {
455 MANA_QUERY_DEV_CONFIG = 0x20001,
456 MANA_QUERY_GF_STAT = 0x20002,
457 MANA_CONFIG_VPORT_TX = 0x20003,
458 MANA_CREATE_WQ_OBJ = 0x20004,
459 MANA_DESTROY_WQ_OBJ = 0x20005,
460 MANA_FENCE_RQ = 0x20006,
461 MANA_CONFIG_VPORT_RX = 0x20007,
462 MANA_QUERY_VPORT_CONFIG = 0x20008,
463
464 /* Privileged commands for the PF mode */
465 MANA_REGISTER_FILTER = 0x28000,
466 MANA_DEREGISTER_FILTER = 0x28001,
467 MANA_REGISTER_HW_PORT = 0x28003,
468 MANA_DEREGISTER_HW_PORT = 0x28004,
469};
470
471/* Query Device Configuration */
472struct mana_query_device_cfg_req {
473 struct gdma_req_hdr hdr;
474
475 /* MANA Nic Driver Capability flags */
476 u64 mn_drv_cap_flags1;
477 u64 mn_drv_cap_flags2;
478 u64 mn_drv_cap_flags3;
479 u64 mn_drv_cap_flags4;
480
481 u32 proto_major_ver;
482 u32 proto_minor_ver;
483 u32 proto_micro_ver;
484
485 u32 reserved;
486}; /* HW DATA */
487
488struct mana_query_device_cfg_resp {
489 struct gdma_resp_hdr hdr;
490
491 u64 pf_cap_flags1;
492 u64 pf_cap_flags2;
493 u64 pf_cap_flags3;
494 u64 pf_cap_flags4;
495
496 u16 max_num_vports;
497 u16 reserved;
498 u32 max_num_eqs;
499
500 /* response v2: */
501 u16 adapter_mtu;
502 u16 reserved2;
503 u32 reserved3;
504}; /* HW DATA */
505
506/* Query vPort Configuration */
507struct mana_query_vport_cfg_req {
508 struct gdma_req_hdr hdr;
509 u32 vport_index;
510}; /* HW DATA */
511
512struct mana_query_vport_cfg_resp {
513 struct gdma_resp_hdr hdr;
514 u32 max_num_sq;
515 u32 max_num_rq;
516 u32 num_indirection_ent;
517 u32 reserved1;
518 u8 mac_addr[6];
519 u8 reserved2[2];
520 mana_handle_t vport;
521}; /* HW DATA */
522
523/* Configure vPort */
524struct mana_config_vport_req {
525 struct gdma_req_hdr hdr;
526 mana_handle_t vport;
527 u32 pdid;
528 u32 doorbell_pageid;
529}; /* HW DATA */
530
531struct mana_config_vport_resp {
532 struct gdma_resp_hdr hdr;
533 u16 tx_vport_offset;
534 u8 short_form_allowed;
535 u8 reserved;
536}; /* HW DATA */
537
538/* Create WQ Object */
539struct mana_create_wqobj_req {
540 struct gdma_req_hdr hdr;
541 mana_handle_t vport;
542 u32 wq_type;
543 u32 reserved;
544 u64 wq_gdma_region;
545 u64 cq_gdma_region;
546 u32 wq_size;
547 u32 cq_size;
548 u32 cq_moderation_ctx_id;
549 u32 cq_parent_qid;
550}; /* HW DATA */
551
552struct mana_create_wqobj_resp {
553 struct gdma_resp_hdr hdr;
554 u32 wq_id;
555 u32 cq_id;
556 mana_handle_t wq_obj;
557}; /* HW DATA */
558
559/* Destroy WQ Object */
560struct mana_destroy_wqobj_req {
561 struct gdma_req_hdr hdr;
562 u32 wq_type;
563 u32 reserved;
564 mana_handle_t wq_obj_handle;
565}; /* HW DATA */
566
567struct mana_destroy_wqobj_resp {
568 struct gdma_resp_hdr hdr;
569}; /* HW DATA */
570
571/* Fence RQ */
572struct mana_fence_rq_req {
573 struct gdma_req_hdr hdr;
574 mana_handle_t wq_obj_handle;
575}; /* HW DATA */
576
577struct mana_fence_rq_resp {
578 struct gdma_resp_hdr hdr;
579}; /* HW DATA */
580
581/* Configure vPort Rx Steering */
582struct mana_cfg_rx_steer_req_v2 {
583 struct gdma_req_hdr hdr;
584 mana_handle_t vport;
585 u16 num_indir_entries;
586 u16 indir_tab_offset;
587 u32 rx_enable;
588 u32 rss_enable;
589 u8 update_default_rxobj;
590 u8 update_hashkey;
591 u8 update_indir_tab;
592 u8 reserved;
593 mana_handle_t default_rxobj;
594 u8 hashkey[MANA_HASH_KEY_SIZE];
595 u8 cqe_coalescing_enable;
596 u8 reserved2[7];
597}; /* HW DATA */
598
599struct mana_cfg_rx_steer_resp {
600 struct gdma_resp_hdr hdr;
601}; /* HW DATA */
602
603/* Register HW vPort */
604struct mana_register_hw_vport_req {
605 struct gdma_req_hdr hdr;
606 u16 attached_gfid;
607 u8 is_pf_default_vport;
608 u8 reserved1;
609 u8 allow_all_ether_types;
610 u8 reserved2;
611 u8 reserved3;
612 u8 reserved4;
613}; /* HW DATA */
614
615struct mana_register_hw_vport_resp {
616 struct gdma_resp_hdr hdr;
617 mana_handle_t hw_vport_handle;
618}; /* HW DATA */
619
620/* Deregister HW vPort */
621struct mana_deregister_hw_vport_req {
622 struct gdma_req_hdr hdr;
623 mana_handle_t hw_vport_handle;
624}; /* HW DATA */
625
626struct mana_deregister_hw_vport_resp {
627 struct gdma_resp_hdr hdr;
628}; /* HW DATA */
629
630/* Register filter */
631struct mana_register_filter_req {
632 struct gdma_req_hdr hdr;
633 mana_handle_t vport;
634 u8 mac_addr[6];
635 u8 reserved1;
636 u8 reserved2;
637 u8 reserved3;
638 u8 reserved4;
639 u16 reserved5;
640 u32 reserved6;
641 u32 reserved7;
642 u32 reserved8;
643}; /* HW DATA */
644
645struct mana_register_filter_resp {
646 struct gdma_resp_hdr hdr;
647 mana_handle_t filter_handle;
648}; /* HW DATA */
649
650/* Deregister filter */
651struct mana_deregister_filter_req {
652 struct gdma_req_hdr hdr;
653 mana_handle_t filter_handle;
654}; /* HW DATA */
655
656struct mana_deregister_filter_resp {
657 struct gdma_resp_hdr hdr;
658}; /* HW DATA */
659
660#define MANA_MAX_NUM_QUEUES 64
661
662#define MANA_SHORT_VPORT_OFFSET_MAX ((1U << 8) - 1)
663
664struct mana_tx_package {
665 struct gdma_wqe_request wqe_req;
666 struct gdma_sge sgl_array[5];
667 struct gdma_sge *sgl_ptr;
668
669 struct mana_tx_oob tx_oob;
670
671 struct gdma_posted_wqe_info wqe_info;
672};
673
674int mana_create_wq_obj(struct mana_port_context *apc,
675 mana_handle_t vport,
676 u32 wq_type, struct mana_obj_spec *wq_spec,
677 struct mana_obj_spec *cq_spec,
678 mana_handle_t *wq_obj);
679
680void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type,
681 mana_handle_t wq_obj);
682
683int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id,
684 u32 doorbell_pg_id);
685void mana_uncfg_vport(struct mana_port_context *apc);
686#endif /* _MANA_H */