Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2/* Copyright (c) 2021, Microsoft Corporation. */
3
4#ifndef _MANA_H
5#define _MANA_H
6
7#include "gdma.h"
8#include "hw_channel.h"
9
10/* Microsoft Azure Network Adapter (MANA)'s definitions
11 *
12 * Structures labeled with "HW DATA" are exchanged with the hardware. All of
13 * them are naturally aligned and hence don't need __packed.
14 */
15
16/* MANA protocol version */
17#define MANA_MAJOR_VERSION 0
18#define MANA_MINOR_VERSION 1
19#define MANA_MICRO_VERSION 1
20
21typedef u64 mana_handle_t;
22#define INVALID_MANA_HANDLE ((mana_handle_t)-1)
23
24enum TRI_STATE {
25 TRI_STATE_UNKNOWN = -1,
26 TRI_STATE_FALSE = 0,
27 TRI_STATE_TRUE = 1
28};
29
30/* Number of entries for hardware indirection table must be in power of 2 */
31#define MANA_INDIRECT_TABLE_SIZE 64
32#define MANA_INDIRECT_TABLE_MASK (MANA_INDIRECT_TABLE_SIZE - 1)
33
34/* The Toeplitz hash key's length in bytes: should be multiple of 8 */
35#define MANA_HASH_KEY_SIZE 40
36
37#define COMP_ENTRY_SIZE 64
38
39#define ADAPTER_MTU_SIZE 1500
40#define MAX_FRAME_SIZE (ADAPTER_MTU_SIZE + 14)
41
42#define RX_BUFFERS_PER_QUEUE 512
43
44#define MAX_SEND_BUFFERS_PER_QUEUE 256
45
46#define EQ_SIZE (8 * PAGE_SIZE)
47#define LOG2_EQ_THROTTLE 3
48
49#define MAX_PORTS_IN_MANA_DEV 256
50
51struct mana_stats_rx {
52 u64 packets;
53 u64 bytes;
54 u64 xdp_drop;
55 u64 xdp_tx;
56 u64 xdp_redirect;
57 struct u64_stats_sync syncp;
58};
59
60struct mana_stats_tx {
61 u64 packets;
62 u64 bytes;
63 u64 xdp_xmit;
64 struct u64_stats_sync syncp;
65};
66
67struct mana_txq {
68 struct gdma_queue *gdma_sq;
69
70 union {
71 u32 gdma_txq_id;
72 struct {
73 u32 reserved1 : 10;
74 u32 vsq_frame : 14;
75 u32 reserved2 : 8;
76 };
77 };
78
79 u16 vp_offset;
80
81 struct net_device *ndev;
82
83 /* The SKBs are sent to the HW and we are waiting for the CQEs. */
84 struct sk_buff_head pending_skbs;
85 struct netdev_queue *net_txq;
86
87 atomic_t pending_sends;
88
89 struct mana_stats_tx stats;
90};
91
92/* skb data and frags dma mappings */
93struct mana_skb_head {
94 dma_addr_t dma_handle[MAX_SKB_FRAGS + 1];
95
96 u32 size[MAX_SKB_FRAGS + 1];
97};
98
99#define MANA_HEADROOM sizeof(struct mana_skb_head)
100
101enum mana_tx_pkt_format {
102 MANA_SHORT_PKT_FMT = 0,
103 MANA_LONG_PKT_FMT = 1,
104};
105
106struct mana_tx_short_oob {
107 u32 pkt_fmt : 2;
108 u32 is_outer_ipv4 : 1;
109 u32 is_outer_ipv6 : 1;
110 u32 comp_iphdr_csum : 1;
111 u32 comp_tcp_csum : 1;
112 u32 comp_udp_csum : 1;
113 u32 supress_txcqe_gen : 1;
114 u32 vcq_num : 24;
115
116 u32 trans_off : 10; /* Transport header offset */
117 u32 vsq_frame : 14;
118 u32 short_vp_offset : 8;
119}; /* HW DATA */
120
121struct mana_tx_long_oob {
122 u32 is_encap : 1;
123 u32 inner_is_ipv6 : 1;
124 u32 inner_tcp_opt : 1;
125 u32 inject_vlan_pri_tag : 1;
126 u32 reserved1 : 12;
127 u32 pcp : 3; /* 802.1Q */
128 u32 dei : 1; /* 802.1Q */
129 u32 vlan_id : 12; /* 802.1Q */
130
131 u32 inner_frame_offset : 10;
132 u32 inner_ip_rel_offset : 6;
133 u32 long_vp_offset : 12;
134 u32 reserved2 : 4;
135
136 u32 reserved3;
137 u32 reserved4;
138}; /* HW DATA */
139
140struct mana_tx_oob {
141 struct mana_tx_short_oob s_oob;
142 struct mana_tx_long_oob l_oob;
143}; /* HW DATA */
144
145enum mana_cq_type {
146 MANA_CQ_TYPE_RX,
147 MANA_CQ_TYPE_TX,
148};
149
150enum mana_cqe_type {
151 CQE_INVALID = 0,
152 CQE_RX_OKAY = 1,
153 CQE_RX_COALESCED_4 = 2,
154 CQE_RX_OBJECT_FENCE = 3,
155 CQE_RX_TRUNCATED = 4,
156
157 CQE_TX_OKAY = 32,
158 CQE_TX_SA_DROP = 33,
159 CQE_TX_MTU_DROP = 34,
160 CQE_TX_INVALID_OOB = 35,
161 CQE_TX_INVALID_ETH_TYPE = 36,
162 CQE_TX_HDR_PROCESSING_ERROR = 37,
163 CQE_TX_VF_DISABLED = 38,
164 CQE_TX_VPORT_IDX_OUT_OF_RANGE = 39,
165 CQE_TX_VPORT_DISABLED = 40,
166 CQE_TX_VLAN_TAGGING_VIOLATION = 41,
167};
168
169#define MANA_CQE_COMPLETION 1
170
171struct mana_cqe_header {
172 u32 cqe_type : 6;
173 u32 client_type : 2;
174 u32 vendor_err : 24;
175}; /* HW DATA */
176
177/* NDIS HASH Types */
178#define NDIS_HASH_IPV4 BIT(0)
179#define NDIS_HASH_TCP_IPV4 BIT(1)
180#define NDIS_HASH_UDP_IPV4 BIT(2)
181#define NDIS_HASH_IPV6 BIT(3)
182#define NDIS_HASH_TCP_IPV6 BIT(4)
183#define NDIS_HASH_UDP_IPV6 BIT(5)
184#define NDIS_HASH_IPV6_EX BIT(6)
185#define NDIS_HASH_TCP_IPV6_EX BIT(7)
186#define NDIS_HASH_UDP_IPV6_EX BIT(8)
187
188#define MANA_HASH_L3 (NDIS_HASH_IPV4 | NDIS_HASH_IPV6 | NDIS_HASH_IPV6_EX)
189#define MANA_HASH_L4 \
190 (NDIS_HASH_TCP_IPV4 | NDIS_HASH_UDP_IPV4 | NDIS_HASH_TCP_IPV6 | \
191 NDIS_HASH_UDP_IPV6 | NDIS_HASH_TCP_IPV6_EX | NDIS_HASH_UDP_IPV6_EX)
192
193struct mana_rxcomp_perpkt_info {
194 u32 pkt_len : 16;
195 u32 reserved1 : 16;
196 u32 reserved2;
197 u32 pkt_hash;
198}; /* HW DATA */
199
200#define MANA_RXCOMP_OOB_NUM_PPI 4
201
202/* Receive completion OOB */
203struct mana_rxcomp_oob {
204 struct mana_cqe_header cqe_hdr;
205
206 u32 rx_vlan_id : 12;
207 u32 rx_vlantag_present : 1;
208 u32 rx_outer_iphdr_csum_succeed : 1;
209 u32 rx_outer_iphdr_csum_fail : 1;
210 u32 reserved1 : 1;
211 u32 rx_hashtype : 9;
212 u32 rx_iphdr_csum_succeed : 1;
213 u32 rx_iphdr_csum_fail : 1;
214 u32 rx_tcp_csum_succeed : 1;
215 u32 rx_tcp_csum_fail : 1;
216 u32 rx_udp_csum_succeed : 1;
217 u32 rx_udp_csum_fail : 1;
218 u32 reserved2 : 1;
219
220 struct mana_rxcomp_perpkt_info ppi[MANA_RXCOMP_OOB_NUM_PPI];
221
222 u32 rx_wqe_offset;
223}; /* HW DATA */
224
225struct mana_tx_comp_oob {
226 struct mana_cqe_header cqe_hdr;
227
228 u32 tx_data_offset;
229
230 u32 tx_sgl_offset : 5;
231 u32 tx_wqe_offset : 27;
232
233 u32 reserved[12];
234}; /* HW DATA */
235
236struct mana_rxq;
237
238#define CQE_POLLING_BUFFER 512
239
240struct mana_cq {
241 struct gdma_queue *gdma_cq;
242
243 /* Cache the CQ id (used to verify if each CQE comes to the right CQ. */
244 u32 gdma_id;
245
246 /* Type of the CQ: TX or RX */
247 enum mana_cq_type type;
248
249 /* Pointer to the mana_rxq that is pushing RX CQEs to the queue.
250 * Only and must be non-NULL if type is MANA_CQ_TYPE_RX.
251 */
252 struct mana_rxq *rxq;
253
254 /* Pointer to the mana_txq that is pushing TX CQEs to the queue.
255 * Only and must be non-NULL if type is MANA_CQ_TYPE_TX.
256 */
257 struct mana_txq *txq;
258
259 /* Buffer which the CQ handler can copy the CQE's into. */
260 struct gdma_comp gdma_comp_buf[CQE_POLLING_BUFFER];
261
262 /* NAPI data */
263 struct napi_struct napi;
264 int work_done;
265 int budget;
266};
267
268#define GDMA_MAX_RQE_SGES 15
269
270struct mana_recv_buf_oob {
271 /* A valid GDMA work request representing the data buffer. */
272 struct gdma_wqe_request wqe_req;
273
274 void *buf_va;
275 dma_addr_t buf_dma_addr;
276
277 /* SGL of the buffer going to be sent has part of the work request. */
278 u32 num_sge;
279 struct gdma_sge sgl[GDMA_MAX_RQE_SGES];
280
281 /* Required to store the result of mana_gd_post_work_request.
282 * gdma_posted_wqe_info.wqe_size_in_bu is required for progressing the
283 * work queue when the WQE is consumed.
284 */
285 struct gdma_posted_wqe_info wqe_inf;
286};
287
288struct mana_rxq {
289 struct gdma_queue *gdma_rq;
290 /* Cache the gdma receive queue id */
291 u32 gdma_id;
292
293 /* Index of RQ in the vPort, not gdma receive queue id */
294 u32 rxq_idx;
295
296 u32 datasize;
297
298 mana_handle_t rxobj;
299
300 struct mana_cq rx_cq;
301
302 struct completion fence_event;
303
304 struct net_device *ndev;
305
306 /* Total number of receive buffers to be allocated */
307 u32 num_rx_buf;
308
309 u32 buf_index;
310
311 struct mana_stats_rx stats;
312
313 struct bpf_prog __rcu *bpf_prog;
314 struct xdp_rxq_info xdp_rxq;
315 struct page *xdp_save_page;
316 bool xdp_flush;
317 int xdp_rc; /* XDP redirect return code */
318
319 /* MUST BE THE LAST MEMBER:
320 * Each receive buffer has an associated mana_recv_buf_oob.
321 */
322 struct mana_recv_buf_oob rx_oobs[];
323};
324
325struct mana_tx_qp {
326 struct mana_txq txq;
327
328 struct mana_cq tx_cq;
329
330 mana_handle_t tx_object;
331};
332
333struct mana_ethtool_stats {
334 u64 stop_queue;
335 u64 wake_queue;
336};
337
338struct mana_context {
339 struct gdma_dev *gdma_dev;
340
341 u16 num_ports;
342
343 struct mana_eq *eqs;
344
345 struct net_device *ports[MAX_PORTS_IN_MANA_DEV];
346};
347
348struct mana_port_context {
349 struct mana_context *ac;
350 struct net_device *ndev;
351
352 u8 mac_addr[ETH_ALEN];
353
354 enum TRI_STATE rss_state;
355
356 mana_handle_t default_rxobj;
357 bool tx_shortform_allowed;
358 u16 tx_vp_offset;
359
360 struct mana_tx_qp *tx_qp;
361
362 /* Indirection Table for RX & TX. The values are queue indexes */
363 u32 indir_table[MANA_INDIRECT_TABLE_SIZE];
364
365 /* Indirection table containing RxObject Handles */
366 mana_handle_t rxobj_table[MANA_INDIRECT_TABLE_SIZE];
367
368 /* Hash key used by the NIC */
369 u8 hashkey[MANA_HASH_KEY_SIZE];
370
371 /* This points to an array of num_queues of RQ pointers. */
372 struct mana_rxq **rxqs;
373
374 struct bpf_prog *bpf_prog;
375
376 /* Create num_queues EQs, SQs, SQ-CQs, RQs and RQ-CQs, respectively. */
377 unsigned int max_queues;
378 unsigned int num_queues;
379
380 mana_handle_t port_handle;
381 mana_handle_t pf_filter_handle;
382
383 u16 port_idx;
384
385 bool port_is_up;
386 bool port_st_save; /* Saved port state */
387
388 struct mana_ethtool_stats eth_stats;
389};
390
391int mana_start_xmit(struct sk_buff *skb, struct net_device *ndev);
392int mana_config_rss(struct mana_port_context *ac, enum TRI_STATE rx,
393 bool update_hash, bool update_tab);
394
395int mana_alloc_queues(struct net_device *ndev);
396int mana_attach(struct net_device *ndev);
397int mana_detach(struct net_device *ndev, bool from_close);
398
399int mana_probe(struct gdma_dev *gd, bool resuming);
400void mana_remove(struct gdma_dev *gd, bool suspending);
401
402void mana_xdp_tx(struct sk_buff *skb, struct net_device *ndev);
403int mana_xdp_xmit(struct net_device *ndev, int n, struct xdp_frame **frames,
404 u32 flags);
405u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq,
406 struct xdp_buff *xdp, void *buf_va, uint pkt_len);
407struct bpf_prog *mana_xdp_get(struct mana_port_context *apc);
408void mana_chn_setxdp(struct mana_port_context *apc, struct bpf_prog *prog);
409int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf);
410
411extern const struct ethtool_ops mana_ethtool_ops;
412
413struct mana_obj_spec {
414 u32 queue_index;
415 u64 gdma_region;
416 u32 queue_size;
417 u32 attached_eq;
418 u32 modr_ctx_id;
419};
420
421enum mana_command_code {
422 MANA_QUERY_DEV_CONFIG = 0x20001,
423 MANA_QUERY_GF_STAT = 0x20002,
424 MANA_CONFIG_VPORT_TX = 0x20003,
425 MANA_CREATE_WQ_OBJ = 0x20004,
426 MANA_DESTROY_WQ_OBJ = 0x20005,
427 MANA_FENCE_RQ = 0x20006,
428 MANA_CONFIG_VPORT_RX = 0x20007,
429 MANA_QUERY_VPORT_CONFIG = 0x20008,
430
431 /* Privileged commands for the PF mode */
432 MANA_REGISTER_FILTER = 0x28000,
433 MANA_DEREGISTER_FILTER = 0x28001,
434 MANA_REGISTER_HW_PORT = 0x28003,
435 MANA_DEREGISTER_HW_PORT = 0x28004,
436};
437
438/* Query Device Configuration */
439struct mana_query_device_cfg_req {
440 struct gdma_req_hdr hdr;
441
442 /* MANA Nic Driver Capability flags */
443 u64 mn_drv_cap_flags1;
444 u64 mn_drv_cap_flags2;
445 u64 mn_drv_cap_flags3;
446 u64 mn_drv_cap_flags4;
447
448 u32 proto_major_ver;
449 u32 proto_minor_ver;
450 u32 proto_micro_ver;
451
452 u32 reserved;
453}; /* HW DATA */
454
455struct mana_query_device_cfg_resp {
456 struct gdma_resp_hdr hdr;
457
458 u64 pf_cap_flags1;
459 u64 pf_cap_flags2;
460 u64 pf_cap_flags3;
461 u64 pf_cap_flags4;
462
463 u16 max_num_vports;
464 u16 reserved;
465 u32 max_num_eqs;
466}; /* HW DATA */
467
468/* Query vPort Configuration */
469struct mana_query_vport_cfg_req {
470 struct gdma_req_hdr hdr;
471 u32 vport_index;
472}; /* HW DATA */
473
474struct mana_query_vport_cfg_resp {
475 struct gdma_resp_hdr hdr;
476 u32 max_num_sq;
477 u32 max_num_rq;
478 u32 num_indirection_ent;
479 u32 reserved1;
480 u8 mac_addr[6];
481 u8 reserved2[2];
482 mana_handle_t vport;
483}; /* HW DATA */
484
485/* Configure vPort */
486struct mana_config_vport_req {
487 struct gdma_req_hdr hdr;
488 mana_handle_t vport;
489 u32 pdid;
490 u32 doorbell_pageid;
491}; /* HW DATA */
492
493struct mana_config_vport_resp {
494 struct gdma_resp_hdr hdr;
495 u16 tx_vport_offset;
496 u8 short_form_allowed;
497 u8 reserved;
498}; /* HW DATA */
499
500/* Create WQ Object */
501struct mana_create_wqobj_req {
502 struct gdma_req_hdr hdr;
503 mana_handle_t vport;
504 u32 wq_type;
505 u32 reserved;
506 u64 wq_gdma_region;
507 u64 cq_gdma_region;
508 u32 wq_size;
509 u32 cq_size;
510 u32 cq_moderation_ctx_id;
511 u32 cq_parent_qid;
512}; /* HW DATA */
513
514struct mana_create_wqobj_resp {
515 struct gdma_resp_hdr hdr;
516 u32 wq_id;
517 u32 cq_id;
518 mana_handle_t wq_obj;
519}; /* HW DATA */
520
521/* Destroy WQ Object */
522struct mana_destroy_wqobj_req {
523 struct gdma_req_hdr hdr;
524 u32 wq_type;
525 u32 reserved;
526 mana_handle_t wq_obj_handle;
527}; /* HW DATA */
528
529struct mana_destroy_wqobj_resp {
530 struct gdma_resp_hdr hdr;
531}; /* HW DATA */
532
533/* Fence RQ */
534struct mana_fence_rq_req {
535 struct gdma_req_hdr hdr;
536 mana_handle_t wq_obj_handle;
537}; /* HW DATA */
538
539struct mana_fence_rq_resp {
540 struct gdma_resp_hdr hdr;
541}; /* HW DATA */
542
543/* Configure vPort Rx Steering */
544struct mana_cfg_rx_steer_req {
545 struct gdma_req_hdr hdr;
546 mana_handle_t vport;
547 u16 num_indir_entries;
548 u16 indir_tab_offset;
549 u32 rx_enable;
550 u32 rss_enable;
551 u8 update_default_rxobj;
552 u8 update_hashkey;
553 u8 update_indir_tab;
554 u8 reserved;
555 mana_handle_t default_rxobj;
556 u8 hashkey[MANA_HASH_KEY_SIZE];
557}; /* HW DATA */
558
559struct mana_cfg_rx_steer_resp {
560 struct gdma_resp_hdr hdr;
561}; /* HW DATA */
562
563/* Register HW vPort */
564struct mana_register_hw_vport_req {
565 struct gdma_req_hdr hdr;
566 u16 attached_gfid;
567 u8 is_pf_default_vport;
568 u8 reserved1;
569 u8 allow_all_ether_types;
570 u8 reserved2;
571 u8 reserved3;
572 u8 reserved4;
573}; /* HW DATA */
574
575struct mana_register_hw_vport_resp {
576 struct gdma_resp_hdr hdr;
577 mana_handle_t hw_vport_handle;
578}; /* HW DATA */
579
580/* Deregister HW vPort */
581struct mana_deregister_hw_vport_req {
582 struct gdma_req_hdr hdr;
583 mana_handle_t hw_vport_handle;
584}; /* HW DATA */
585
586struct mana_deregister_hw_vport_resp {
587 struct gdma_resp_hdr hdr;
588}; /* HW DATA */
589
590/* Register filter */
591struct mana_register_filter_req {
592 struct gdma_req_hdr hdr;
593 mana_handle_t vport;
594 u8 mac_addr[6];
595 u8 reserved1;
596 u8 reserved2;
597 u8 reserved3;
598 u8 reserved4;
599 u16 reserved5;
600 u32 reserved6;
601 u32 reserved7;
602 u32 reserved8;
603}; /* HW DATA */
604
605struct mana_register_filter_resp {
606 struct gdma_resp_hdr hdr;
607 mana_handle_t filter_handle;
608}; /* HW DATA */
609
610/* Deregister filter */
611struct mana_deregister_filter_req {
612 struct gdma_req_hdr hdr;
613 mana_handle_t filter_handle;
614}; /* HW DATA */
615
616struct mana_deregister_filter_resp {
617 struct gdma_resp_hdr hdr;
618}; /* HW DATA */
619
620#define MANA_MAX_NUM_QUEUES 64
621
622#define MANA_SHORT_VPORT_OFFSET_MAX ((1U << 8) - 1)
623
624struct mana_tx_package {
625 struct gdma_wqe_request wqe_req;
626 struct gdma_sge sgl_array[5];
627 struct gdma_sge *sgl_ptr;
628
629 struct mana_tx_oob tx_oob;
630
631 struct gdma_posted_wqe_info wqe_info;
632};
633
634#endif /* _MANA_H */