Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'mlx5e-updates-2018-07-26' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5e-updates-2018-07-26 (XDP redirect)

This series from Tariq adds the support for device-out XDP redirect.

Start with a simple RX and XDP cleanups:
- Replace call to MPWQE free with dealloc in interface down flow
- Do not recycle RX pages in interface down flow
- Gather all XDP pre-requisite checks in a single function
- Restrict the combination of large MTU and XDP

Since now XDP logic is going to be called from TX side as well,
generic XDP TX logic is not RX only anymore, for that Tariq creates
a new xdp.c file and moves XDP related code into it, and generalizes
the code to support XDP TX for XDP redirect, such as the xdp tx sq
structures and xdp counters.

XDP redirect support:
Add implementation for the ndo_xdp_xmit callback.

Dedicate a new set of XDP-SQ instances to satisfy the XDP_REDIRECT
requests. These instances are totally separated from the existing
XDP-SQ objects that satisfy local XDP_TX actions.

Performance tests:

xdp_redirect_map from ConnectX-5 to ConnectX-5.
CPU: Intel(R) Xeon(R) CPU E5-2680 v3 @ 2.50GHz
Packet-rate of 64B packets.

Single queue: 7 Mpps.
Multi queue: 55 Mpps.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+612 -313
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/Makefile
··· 14 14 fpga/ipsec.o fpga/tls.o 15 15 16 16 mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \ 17 - en_tx.o en_rx.o en_dim.o en_txrx.o en_stats.o vxlan.o \ 17 + en_tx.o en_rx.o en_dim.o en_txrx.o en/xdp.o en_stats.o vxlan.o \ 18 18 en_arfs.o en_fs_ethtool.o en_selftest.o en/port.o 19 19 20 20 mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o
+33 -28
drivers/net/ethernet/mellanox/mlx5/core/en.h
··· 147 147 (DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_BB)) 148 148 #define MLX5E_ICOSQ_MAX_WQEBBS MLX5E_UMR_WQEBBS 149 149 150 - #define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN) 151 - #define MLX5E_XDP_TX_DS_COUNT \ 152 - ((sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) + 1 /* SG DS */) 153 - 154 150 #define MLX5E_NUM_MAIN_GROUPS 9 155 151 156 152 #define MLX5E_MSG_LEVEL NETIF_MSG_LINK ··· 344 348 MLX5E_SQ_STATE_IPSEC, 345 349 MLX5E_SQ_STATE_AM, 346 350 MLX5E_SQ_STATE_TLS, 351 + MLX5E_SQ_STATE_REDIRECT, 347 352 }; 348 353 349 354 struct mlx5e_sq_wqe_info { ··· 365 368 366 369 struct mlx5e_cq cq; 367 370 368 - /* write@xmit, read@completion */ 369 - struct { 370 - struct mlx5e_sq_dma *dma_fifo; 371 - struct mlx5e_tx_wqe_info *wqe_info; 372 - } db; 373 - 374 371 /* read only */ 375 372 struct mlx5_wq_cyc wq; 376 373 u32 dma_fifo_mask; 377 374 struct mlx5e_sq_stats *stats; 375 + struct { 376 + struct mlx5e_sq_dma *dma_fifo; 377 + struct mlx5e_tx_wqe_info *wqe_info; 378 + } db; 378 379 void __iomem *uar_map; 379 380 struct netdev_queue *txq; 380 381 u32 sqn; ··· 394 399 } recover; 395 400 } ____cacheline_aligned_in_smp; 396 401 402 + struct mlx5e_dma_info { 403 + struct page *page; 404 + dma_addr_t addr; 405 + }; 406 + 407 + struct mlx5e_xdp_info { 408 + struct xdp_frame *xdpf; 409 + dma_addr_t dma_addr; 410 + struct mlx5e_dma_info di; 411 + }; 412 + 397 413 struct mlx5e_xdpsq { 398 414 /* data path */ 399 415 400 - /* dirtied @rx completion */ 416 + /* dirtied @completion */ 401 417 u16 cc; 402 - u16 pc; 418 + bool redirect_flush; 419 + 420 + /* dirtied @xmit */ 421 + u16 pc ____cacheline_aligned_in_smp; 422 + bool doorbell; 403 423 404 424 struct mlx5e_cq cq; 405 425 406 - /* write@xmit, read@completion */ 407 - struct { 408 - struct mlx5e_dma_info *di; 409 - bool doorbell; 410 - bool redirect_flush; 411 - } db; 412 - 413 426 /* read only */ 414 427 struct mlx5_wq_cyc wq; 428 + struct mlx5e_xdpsq_stats *stats; 429 + struct { 430 + struct mlx5e_xdp_info *xdpi; 431 + } db; 415 432 void __iomem *uar_map; 416 433 u32 sqn; 417 434 struct device *pdev; 418 435 __be32 mkey_be; 419 436 u8 min_inline_mode; 420 437 unsigned long state; 438 + unsigned int hw_mtu; 421 439 422 440 /* control path */ 423 441 struct mlx5_wq_ctrl wq_ctrl; ··· 466 458 { 467 459 return (mlx5_wq_cyc_ctr2ix(wq, cc - pc) >= n) || (cc == pc); 468 460 } 469 - 470 - struct mlx5e_dma_info { 471 - struct page *page; 472 - dma_addr_t addr; 473 - }; 474 461 475 462 struct mlx5e_wqe_frag_info { 476 463 struct mlx5e_dma_info *di; ··· 569 566 570 567 /* XDP */ 571 568 struct bpf_prog *xdp_prog; 572 - unsigned int hw_mtu; 573 569 struct mlx5e_xdpsq xdpsq; 574 570 DECLARE_BITMAP(flags, 8); 575 571 struct page_pool *page_pool; ··· 597 595 __be32 mkey_be; 598 596 u8 num_tc; 599 597 598 + /* XDP_REDIRECT */ 599 + struct mlx5e_xdpsq xdpsq; 600 + 600 601 /* data path - accessed per napi poll */ 601 602 struct irq_desc *irq_desc; 602 603 struct mlx5e_ch_stats *stats; ··· 622 617 struct mlx5e_ch_stats ch; 623 618 struct mlx5e_sq_stats sq[MLX5E_MAX_NUM_TC]; 624 619 struct mlx5e_rq_stats rq; 620 + struct mlx5e_xdpsq_stats rq_xdpsq; 621 + struct mlx5e_xdpsq_stats xdpsq; 625 622 } ____cacheline_aligned_in_smp; 626 623 627 624 enum mlx5e_traffic_types { ··· 883 876 int mlx5e_napi_poll(struct napi_struct *napi, int budget); 884 877 bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget); 885 878 int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget); 886 - bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq); 887 879 void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq); 888 - void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq); 889 880 890 881 bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev); 891 882 bool mlx5e_striding_rq_possible(struct mlx5_core_dev *mdev, 892 883 struct mlx5e_params *params); 893 884 885 + void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info); 894 886 void mlx5e_page_release(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info, 895 887 bool recycle); 896 888 void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); ··· 898 892 bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq); 899 893 void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix); 900 894 void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix); 901 - void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi); 902 895 struct sk_buff * 903 896 mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, 904 897 u16 cqe_bcnt, u32 head_offset, u32 page_idx);
+302
drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c
··· 1 + /* 2 + * Copyright (c) 2018, Mellanox Technologies. All rights reserved. 3 + * 4 + * This software is available to you under a choice of one of two 5 + * licenses. You may choose to be licensed under the terms of the GNU 6 + * General Public License (GPL) Version 2, available from the file 7 + * COPYING in the main directory of this source tree, or the 8 + * OpenIB.org BSD license below: 9 + * 10 + * Redistribution and use in source and binary forms, with or 11 + * without modification, are permitted provided that the following 12 + * conditions are met: 13 + * 14 + * - Redistributions of source code must retain the above 15 + * copyright notice, this list of conditions and the following 16 + * disclaimer. 17 + * 18 + * - Redistributions in binary form must reproduce the above 19 + * copyright notice, this list of conditions and the following 20 + * disclaimer in the documentation and/or other materials 21 + * provided with the distribution. 22 + * 23 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 + * SOFTWARE. 31 + */ 32 + 33 + #include <linux/bpf_trace.h> 34 + #include "en/xdp.h" 35 + 36 + static inline bool 37 + mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_dma_info *di, 38 + struct xdp_buff *xdp) 39 + { 40 + struct mlx5e_xdp_info xdpi; 41 + 42 + xdpi.xdpf = convert_to_xdp_frame(xdp); 43 + if (unlikely(!xdpi.xdpf)) 44 + return false; 45 + xdpi.dma_addr = di->addr + (xdpi.xdpf->data - (void *)xdpi.xdpf); 46 + dma_sync_single_for_device(sq->pdev, xdpi.dma_addr, 47 + xdpi.xdpf->len, PCI_DMA_TODEVICE); 48 + xdpi.di = *di; 49 + 50 + return mlx5e_xmit_xdp_frame(sq, &xdpi); 51 + } 52 + 53 + /* returns true if packet was consumed by xdp */ 54 + bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di, 55 + void *va, u16 *rx_headroom, u32 *len) 56 + { 57 + struct bpf_prog *prog = READ_ONCE(rq->xdp_prog); 58 + struct xdp_buff xdp; 59 + u32 act; 60 + int err; 61 + 62 + if (!prog) 63 + return false; 64 + 65 + xdp.data = va + *rx_headroom; 66 + xdp_set_data_meta_invalid(&xdp); 67 + xdp.data_end = xdp.data + *len; 68 + xdp.data_hard_start = va; 69 + xdp.rxq = &rq->xdp_rxq; 70 + 71 + act = bpf_prog_run_xdp(prog, &xdp); 72 + switch (act) { 73 + case XDP_PASS: 74 + *rx_headroom = xdp.data - xdp.data_hard_start; 75 + *len = xdp.data_end - xdp.data; 76 + return false; 77 + case XDP_TX: 78 + if (unlikely(!mlx5e_xmit_xdp_buff(&rq->xdpsq, di, &xdp))) 79 + goto xdp_abort; 80 + __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); /* non-atomic */ 81 + return true; 82 + case XDP_REDIRECT: 83 + /* When XDP enabled then page-refcnt==1 here */ 84 + err = xdp_do_redirect(rq->netdev, &xdp, prog); 85 + if (unlikely(err)) 86 + goto xdp_abort; 87 + __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); 88 + rq->xdpsq.redirect_flush = true; 89 + mlx5e_page_dma_unmap(rq, di); 90 + rq->stats->xdp_redirect++; 91 + return true; 92 + default: 93 + bpf_warn_invalid_xdp_action(act); 94 + case XDP_ABORTED: 95 + xdp_abort: 96 + trace_xdp_exception(rq->netdev, prog, act); 97 + case XDP_DROP: 98 + rq->stats->xdp_drop++; 99 + return true; 100 + } 101 + } 102 + 103 + bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *xdpi) 104 + { 105 + struct mlx5_wq_cyc *wq = &sq->wq; 106 + u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); 107 + struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); 108 + 109 + struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; 110 + struct mlx5_wqe_eth_seg *eseg = &wqe->eth; 111 + struct mlx5_wqe_data_seg *dseg = wqe->data; 112 + 113 + struct xdp_frame *xdpf = xdpi->xdpf; 114 + dma_addr_t dma_addr = xdpi->dma_addr; 115 + unsigned int dma_len = xdpf->len; 116 + 117 + struct mlx5e_xdpsq_stats *stats = sq->stats; 118 + 119 + prefetchw(wqe); 120 + 121 + if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE || sq->hw_mtu < dma_len)) { 122 + stats->err++; 123 + return false; 124 + } 125 + 126 + if (unlikely(!mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1))) { 127 + if (sq->doorbell) { 128 + /* SQ is full, ring doorbell */ 129 + mlx5e_xmit_xdp_doorbell(sq); 130 + sq->doorbell = false; 131 + } 132 + stats->full++; 133 + return false; 134 + } 135 + 136 + cseg->fm_ce_se = 0; 137 + 138 + /* copy the inline part if required */ 139 + if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) { 140 + memcpy(eseg->inline_hdr.start, xdpf->data, MLX5E_XDP_MIN_INLINE); 141 + eseg->inline_hdr.sz = cpu_to_be16(MLX5E_XDP_MIN_INLINE); 142 + dma_len -= MLX5E_XDP_MIN_INLINE; 143 + dma_addr += MLX5E_XDP_MIN_INLINE; 144 + dseg++; 145 + } 146 + 147 + /* write the dma part */ 148 + dseg->addr = cpu_to_be64(dma_addr); 149 + dseg->byte_count = cpu_to_be32(dma_len); 150 + 151 + cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND); 152 + 153 + /* move page to reference to sq responsibility, 154 + * and mark so it's not put back in page-cache. 155 + */ 156 + sq->db.xdpi[pi] = *xdpi; 157 + sq->pc++; 158 + 159 + sq->doorbell = true; 160 + 161 + stats->xmit++; 162 + return true; 163 + } 164 + 165 + bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq) 166 + { 167 + struct mlx5e_xdpsq *sq; 168 + struct mlx5_cqe64 *cqe; 169 + struct mlx5e_rq *rq; 170 + bool is_redirect; 171 + u16 sqcc; 172 + int i; 173 + 174 + sq = container_of(cq, struct mlx5e_xdpsq, cq); 175 + 176 + if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))) 177 + return false; 178 + 179 + cqe = mlx5_cqwq_get_cqe(&cq->wq); 180 + if (!cqe) 181 + return false; 182 + 183 + is_redirect = test_bit(MLX5E_SQ_STATE_REDIRECT, &sq->state); 184 + rq = container_of(sq, struct mlx5e_rq, xdpsq); 185 + 186 + /* sq->cc must be updated only after mlx5_cqwq_update_db_record(), 187 + * otherwise a cq overrun may occur 188 + */ 189 + sqcc = sq->cc; 190 + 191 + i = 0; 192 + do { 193 + u16 wqe_counter; 194 + bool last_wqe; 195 + 196 + mlx5_cqwq_pop(&cq->wq); 197 + 198 + wqe_counter = be16_to_cpu(cqe->wqe_counter); 199 + 200 + do { 201 + u16 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); 202 + struct mlx5e_xdp_info *xdpi = &sq->db.xdpi[ci]; 203 + 204 + last_wqe = (sqcc == wqe_counter); 205 + sqcc++; 206 + 207 + if (is_redirect) { 208 + xdp_return_frame(xdpi->xdpf); 209 + dma_unmap_single(sq->pdev, xdpi->dma_addr, 210 + xdpi->xdpf->len, DMA_TO_DEVICE); 211 + } else { 212 + /* Recycle RX page */ 213 + mlx5e_page_release(rq, &xdpi->di, true); 214 + } 215 + } while (!last_wqe); 216 + } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq))); 217 + 218 + sq->stats->cqes += i; 219 + 220 + mlx5_cqwq_update_db_record(&cq->wq); 221 + 222 + /* ensure cq space is freed before enabling more cqes */ 223 + wmb(); 224 + 225 + sq->cc = sqcc; 226 + return (i == MLX5E_TX_CQ_POLL_BUDGET); 227 + } 228 + 229 + void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq) 230 + { 231 + struct mlx5e_rq *rq; 232 + bool is_redirect; 233 + 234 + is_redirect = test_bit(MLX5E_SQ_STATE_REDIRECT, &sq->state); 235 + rq = is_redirect ? NULL : container_of(sq, struct mlx5e_rq, xdpsq); 236 + 237 + while (sq->cc != sq->pc) { 238 + u16 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc); 239 + struct mlx5e_xdp_info *xdpi = &sq->db.xdpi[ci]; 240 + 241 + sq->cc++; 242 + 243 + if (is_redirect) { 244 + xdp_return_frame(xdpi->xdpf); 245 + dma_unmap_single(sq->pdev, xdpi->dma_addr, 246 + xdpi->xdpf->len, DMA_TO_DEVICE); 247 + } else { 248 + /* Recycle RX page */ 249 + mlx5e_page_release(rq, &xdpi->di, false); 250 + } 251 + } 252 + } 253 + 254 + int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, 255 + u32 flags) 256 + { 257 + struct mlx5e_priv *priv = netdev_priv(dev); 258 + struct mlx5e_xdpsq *sq; 259 + int drops = 0; 260 + int sq_num; 261 + int i; 262 + 263 + if (unlikely(!test_bit(MLX5E_STATE_OPENED, &priv->state))) 264 + return -ENETDOWN; 265 + 266 + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) 267 + return -EINVAL; 268 + 269 + sq_num = smp_processor_id(); 270 + 271 + if (unlikely(sq_num >= priv->channels.num)) 272 + return -ENXIO; 273 + 274 + sq = &priv->channels.c[sq_num]->xdpsq; 275 + 276 + if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))) 277 + return -ENETDOWN; 278 + 279 + for (i = 0; i < n; i++) { 280 + struct xdp_frame *xdpf = frames[i]; 281 + struct mlx5e_xdp_info xdpi; 282 + 283 + xdpi.dma_addr = dma_map_single(sq->pdev, xdpf->data, xdpf->len, 284 + DMA_TO_DEVICE); 285 + if (unlikely(dma_mapping_error(sq->pdev, xdpi.dma_addr))) { 286 + drops++; 287 + continue; 288 + } 289 + 290 + xdpi.xdpf = xdpf; 291 + 292 + if (unlikely(!mlx5e_xmit_xdp_frame(sq, &xdpi))) { 293 + xdp_return_frame_rx_napi(xdpf); 294 + drops++; 295 + } 296 + } 297 + 298 + if (flags & XDP_XMIT_FLUSH) 299 + mlx5e_xmit_xdp_doorbell(sq); 300 + 301 + return n - drops; 302 + }
+63
drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h
··· 1 + /* 2 + * Copyright (c) 2018, Mellanox Technologies. All rights reserved. 3 + * 4 + * This software is available to you under a choice of one of two 5 + * licenses. You may choose to be licensed under the terms of the GNU 6 + * General Public License (GPL) Version 2, available from the file 7 + * COPYING in the main directory of this source tree, or the 8 + * OpenIB.org BSD license below: 9 + * 10 + * Redistribution and use in source and binary forms, with or 11 + * without modification, are permitted provided that the following 12 + * conditions are met: 13 + * 14 + * - Redistributions of source code must retain the above 15 + * copyright notice, this list of conditions and the following 16 + * disclaimer. 17 + * 18 + * - Redistributions in binary form must reproduce the above 19 + * copyright notice, this list of conditions and the following 20 + * disclaimer in the documentation and/or other materials 21 + * provided with the distribution. 22 + * 23 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 + * SOFTWARE. 31 + */ 32 + #ifndef __MLX5_EN_XDP_H__ 33 + #define __MLX5_EN_XDP_H__ 34 + 35 + #include "en.h" 36 + 37 + #define MLX5E_XDP_MAX_MTU ((int)(PAGE_SIZE - \ 38 + MLX5_SKB_FRAG_SZ(XDP_PACKET_HEADROOM))) 39 + #define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN) 40 + #define MLX5E_XDP_TX_DS_COUNT \ 41 + ((sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) + 1 /* SG DS */) 42 + 43 + bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di, 44 + void *va, u16 *rx_headroom, u32 *len); 45 + bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq); 46 + void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq); 47 + 48 + bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *xdpi); 49 + int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, 50 + u32 flags); 51 + 52 + static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq) 53 + { 54 + struct mlx5_wq_cyc *wq = &sq->wq; 55 + struct mlx5e_tx_wqe *wqe; 56 + u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc - 1); /* last pi */ 57 + 58 + wqe = mlx5_wq_cyc_get_wqe(wq, pi); 59 + 60 + mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &wqe->ctrl); 61 + } 62 + 63 + #endif
+88 -28
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
··· 47 47 #include "accel/tls.h" 48 48 #include "vxlan.h" 49 49 #include "en/port.h" 50 + #include "en/xdp.h" 50 51 51 52 struct mlx5e_rq_param { 52 53 u32 rqc[MLX5_ST_SZ_DW(rqc)]; ··· 97 96 98 97 static u32 mlx5e_rx_get_linear_frag_sz(struct mlx5e_params *params) 99 98 { 100 - if (!params->xdp_prog) { 101 - u16 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); 102 - u16 rq_headroom = MLX5_RX_HEADROOM + NET_IP_ALIGN; 99 + u16 hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); 100 + u16 linear_rq_headroom = params->xdp_prog ? 101 + XDP_PACKET_HEADROOM : MLX5_RX_HEADROOM; 102 + u32 frag_sz; 103 103 104 - return MLX5_SKB_FRAG_SZ(rq_headroom + hw_mtu); 105 - } 104 + linear_rq_headroom += NET_IP_ALIGN; 106 105 107 - return PAGE_SIZE; 106 + frag_sz = MLX5_SKB_FRAG_SZ(linear_rq_headroom + hw_mtu); 107 + 108 + if (params->xdp_prog && frag_sz < PAGE_SIZE) 109 + frag_sz = PAGE_SIZE; 110 + 111 + return frag_sz; 108 112 } 109 113 110 114 static u8 mlx5e_mpwqe_log_pkts_per_wqe(struct mlx5e_params *params) ··· 491 485 rq->channel = c; 492 486 rq->ix = c->ix; 493 487 rq->mdev = mdev; 494 - rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); 495 488 rq->stats = &c->priv->channel_stats[c->ix].rq; 496 489 497 490 rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL; ··· 882 877 883 878 /* UMR WQE (if in progress) is always at wq->head */ 884 879 if (rq->mpwqe.umr_in_progress) 885 - mlx5e_free_rx_mpwqe(rq, &rq->mpwqe.info[wq->head]); 880 + rq->dealloc_wqe(rq, wq->head); 886 881 887 882 while (!mlx5_wq_ll_is_empty(wq)) { 888 883 struct mlx5e_rx_wqe_ll *wqe; ··· 968 963 969 964 static void mlx5e_free_xdpsq_db(struct mlx5e_xdpsq *sq) 970 965 { 971 - kvfree(sq->db.di); 966 + kvfree(sq->db.xdpi); 972 967 } 973 968 974 969 static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa) 975 970 { 976 971 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); 977 972 978 - sq->db.di = kvzalloc_node(array_size(wq_sz, sizeof(*sq->db.di)), 979 - GFP_KERNEL, numa); 980 - if (!sq->db.di) { 973 + sq->db.xdpi = kvzalloc_node(array_size(wq_sz, sizeof(*sq->db.xdpi)), 974 + GFP_KERNEL, numa); 975 + if (!sq->db.xdpi) { 981 976 mlx5e_free_xdpsq_db(sq); 982 977 return -ENOMEM; 983 978 } ··· 988 983 static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c, 989 984 struct mlx5e_params *params, 990 985 struct mlx5e_sq_param *param, 991 - struct mlx5e_xdpsq *sq) 986 + struct mlx5e_xdpsq *sq, 987 + bool is_redirect) 992 988 { 993 989 void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq); 994 990 struct mlx5_core_dev *mdev = c->mdev; ··· 1001 995 sq->channel = c; 1002 996 sq->uar_map = mdev->mlx5e_res.bfreg.map; 1003 997 sq->min_inline_mode = params->tx_min_inline_mode; 998 + sq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu); 999 + sq->stats = is_redirect ? 1000 + &c->priv->channel_stats[c->ix].xdpsq : 1001 + &c->priv->channel_stats[c->ix].rq_xdpsq; 1004 1002 1005 1003 param->wq.db_numa_node = cpu_to_node(c->cpu); 1006 1004 err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl); ··· 1534 1524 static int mlx5e_open_xdpsq(struct mlx5e_channel *c, 1535 1525 struct mlx5e_params *params, 1536 1526 struct mlx5e_sq_param *param, 1537 - struct mlx5e_xdpsq *sq) 1527 + struct mlx5e_xdpsq *sq, 1528 + bool is_redirect) 1538 1529 { 1539 1530 unsigned int ds_cnt = MLX5E_XDP_TX_DS_COUNT; 1540 1531 struct mlx5e_create_sq_param csp = {}; ··· 1543 1532 int err; 1544 1533 int i; 1545 1534 1546 - err = mlx5e_alloc_xdpsq(c, params, param, sq); 1535 + err = mlx5e_alloc_xdpsq(c, params, param, sq, is_redirect); 1547 1536 if (err) 1548 1537 return err; 1549 1538 ··· 1552 1541 csp.cqn = sq->cq.mcq.cqn; 1553 1542 csp.wq_ctrl = &sq->wq_ctrl; 1554 1543 csp.min_inline_mode = sq->min_inline_mode; 1544 + if (is_redirect) 1545 + set_bit(MLX5E_SQ_STATE_REDIRECT, &sq->state); 1555 1546 set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); 1556 1547 err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn); 1557 1548 if (err) ··· 1936 1923 if (err) 1937 1924 goto err_close_icosq_cq; 1938 1925 1939 - err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rx_cq, &c->rq.cq); 1926 + err = mlx5e_open_cq(c, params->tx_cq_moderation, &cparam->tx_cq, &c->xdpsq.cq); 1940 1927 if (err) 1941 1928 goto err_close_tx_cqs; 1929 + 1930 + err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rx_cq, &c->rq.cq); 1931 + if (err) 1932 + goto err_close_xdp_tx_cqs; 1942 1933 1943 1934 /* XDP SQ CQ params are same as normal TXQ sq CQ params */ 1944 1935 err = c->xdp ? mlx5e_open_cq(c, params->tx_cq_moderation, ··· 1960 1943 if (err) 1961 1944 goto err_close_icosq; 1962 1945 1963 - err = c->xdp ? mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, &c->rq.xdpsq) : 0; 1946 + err = c->xdp ? mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, &c->rq.xdpsq, false) : 0; 1964 1947 if (err) 1965 1948 goto err_close_sqs; 1966 1949 ··· 1968 1951 if (err) 1969 1952 goto err_close_xdp_sq; 1970 1953 1954 + err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, &c->xdpsq, true); 1955 + if (err) 1956 + goto err_close_rq; 1957 + 1971 1958 *cp = c; 1972 1959 1973 1960 return 0; 1961 + 1962 + err_close_rq: 1963 + mlx5e_close_rq(&c->rq); 1964 + 1974 1965 err_close_xdp_sq: 1975 1966 if (c->xdp) 1976 1967 mlx5e_close_xdpsq(&c->rq.xdpsq); ··· 1996 1971 1997 1972 err_close_rx_cq: 1998 1973 mlx5e_close_cq(&c->rq.cq); 1974 + 1975 + err_close_xdp_tx_cqs: 1976 + mlx5e_close_cq(&c->xdpsq.cq); 1999 1977 2000 1978 err_close_tx_cqs: 2001 1979 mlx5e_close_tx_cqs(c); ··· 2034 2006 2035 2007 static void mlx5e_close_channel(struct mlx5e_channel *c) 2036 2008 { 2009 + mlx5e_close_xdpsq(&c->xdpsq); 2037 2010 mlx5e_close_rq(&c->rq); 2038 2011 if (c->xdp) 2039 2012 mlx5e_close_xdpsq(&c->rq.xdpsq); ··· 2044 2015 if (c->xdp) 2045 2016 mlx5e_close_cq(&c->rq.xdpsq.cq); 2046 2017 mlx5e_close_cq(&c->rq.cq); 2018 + mlx5e_close_cq(&c->xdpsq.cq); 2047 2019 mlx5e_close_tx_cqs(c); 2048 2020 mlx5e_close_cq(&c->icosq.cq); 2049 2021 netif_napi_del(&c->napi); ··· 3737 3707 new_channels.params = *params; 3738 3708 new_channels.params.sw_mtu = new_mtu; 3739 3709 3710 + if (params->xdp_prog && 3711 + !mlx5e_rx_is_linear_skb(priv->mdev, &new_channels.params)) { 3712 + netdev_err(netdev, "MTU(%d) > %d is not allowed while XDP enabled\n", 3713 + new_mtu, MLX5E_XDP_MAX_MTU); 3714 + err = -EINVAL; 3715 + goto out; 3716 + } 3717 + 3740 3718 if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { 3741 3719 u8 ppw_old = mlx5e_mpwqe_log_pkts_per_wqe(params); 3742 3720 u8 ppw_new = mlx5e_mpwqe_log_pkts_per_wqe(&new_channels.params); ··· 4132 4094 queue_work(priv->wq, &priv->tx_timeout_work); 4133 4095 } 4134 4096 4097 + static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog) 4098 + { 4099 + struct net_device *netdev = priv->netdev; 4100 + struct mlx5e_channels new_channels = {}; 4101 + 4102 + if (priv->channels.params.lro_en) { 4103 + netdev_warn(netdev, "can't set XDP while LRO is on, disable LRO first\n"); 4104 + return -EINVAL; 4105 + } 4106 + 4107 + if (MLX5_IPSEC_DEV(priv->mdev)) { 4108 + netdev_warn(netdev, "can't set XDP with IPSec offload\n"); 4109 + return -EINVAL; 4110 + } 4111 + 4112 + new_channels.params = priv->channels.params; 4113 + new_channels.params.xdp_prog = prog; 4114 + 4115 + if (!mlx5e_rx_is_linear_skb(priv->mdev, &new_channels.params)) { 4116 + netdev_warn(netdev, "XDP is not allowed with MTU(%d) > %d\n", 4117 + new_channels.params.sw_mtu, MLX5E_XDP_MAX_MTU); 4118 + return -EINVAL; 4119 + } 4120 + 4121 + return 0; 4122 + } 4123 + 4135 4124 static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog) 4136 4125 { 4137 4126 struct mlx5e_priv *priv = netdev_priv(netdev); 4138 4127 struct bpf_prog *old_prog; 4139 - int err = 0; 4140 4128 bool reset, was_opened; 4129 + int err; 4141 4130 int i; 4142 4131 4143 4132 mutex_lock(&priv->state_lock); 4144 4133 4145 - if ((netdev->features & NETIF_F_LRO) && prog) { 4146 - netdev_warn(netdev, "can't set XDP while LRO is on, disable LRO first\n"); 4147 - err = -EINVAL; 4148 - goto unlock; 4149 - } 4150 - 4151 - if ((netdev->features & NETIF_F_HW_ESP) && prog) { 4152 - netdev_warn(netdev, "can't set XDP with IPSec offload\n"); 4153 - err = -EINVAL; 4154 - goto unlock; 4134 + if (prog) { 4135 + err = mlx5e_xdp_allowed(priv, prog); 4136 + if (err) 4137 + goto unlock; 4155 4138 } 4156 4139 4157 4140 was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state); ··· 4301 4242 #endif 4302 4243 .ndo_tx_timeout = mlx5e_tx_timeout, 4303 4244 .ndo_bpf = mlx5e_xdp, 4245 + .ndo_xdp_xmit = mlx5e_xdp_xmit, 4304 4246 #ifdef CONFIG_NET_POLL_CONTROLLER 4305 4247 .ndo_poll_controller = mlx5e_netpoll, 4306 4248 #endif
+27 -227
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
··· 34 34 #include <linux/ip.h> 35 35 #include <linux/ipv6.h> 36 36 #include <linux/tcp.h> 37 - #include <linux/bpf_trace.h> 38 37 #include <net/busy_poll.h> 39 38 #include <net/ip6_checksum.h> 40 39 #include <net/page_pool.h> ··· 45 46 #include "en_accel/ipsec_rxtx.h" 46 47 #include "en_accel/tls_rxtx.h" 47 48 #include "lib/clock.h" 49 + #include "en/xdp.h" 48 50 49 51 static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config) 50 52 { ··· 239 239 return 0; 240 240 } 241 241 242 - static void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, 243 - struct mlx5e_dma_info *dma_info) 242 + void mlx5e_page_dma_unmap(struct mlx5e_rq *rq, struct mlx5e_dma_info *dma_info) 244 243 { 245 244 dma_unmap_page(rq->pdev, dma_info->addr, PAGE_SIZE, rq->buff.map_dir); 246 245 } ··· 276 277 } 277 278 278 279 static inline void mlx5e_put_rx_frag(struct mlx5e_rq *rq, 279 - struct mlx5e_wqe_frag_info *frag) 280 + struct mlx5e_wqe_frag_info *frag, 281 + bool recycle) 280 282 { 281 283 if (frag->last_in_page) 282 - mlx5e_page_release(rq, frag->di, true); 284 + mlx5e_page_release(rq, frag->di, recycle); 283 285 } 284 286 285 287 static inline struct mlx5e_wqe_frag_info *get_frag(struct mlx5e_rq *rq, u16 ix) ··· 308 308 309 309 free_frags: 310 310 while (--i >= 0) 311 - mlx5e_put_rx_frag(rq, --frag); 311 + mlx5e_put_rx_frag(rq, --frag, true); 312 312 313 313 return err; 314 314 } 315 315 316 316 static inline void mlx5e_free_rx_wqe(struct mlx5e_rq *rq, 317 - struct mlx5e_wqe_frag_info *wi) 317 + struct mlx5e_wqe_frag_info *wi, 318 + bool recycle) 318 319 { 319 320 int i; 320 321 321 322 for (i = 0; i < rq->wqe.info.num_frags; i++, wi++) 322 - mlx5e_put_rx_frag(rq, wi); 323 + mlx5e_put_rx_frag(rq, wi, recycle); 323 324 } 324 325 325 326 void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix) 326 327 { 327 328 struct mlx5e_wqe_frag_info *wi = get_frag(rq, ix); 328 329 329 - mlx5e_free_rx_wqe(rq, wi); 330 + mlx5e_free_rx_wqe(rq, wi, false); 330 331 } 331 332 332 333 static int mlx5e_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, u8 wqe_bulk) ··· 397 396 } 398 397 } 399 398 400 - void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi) 399 + static void 400 + mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, bool recycle) 401 401 { 402 402 const bool no_xdp_xmit = 403 403 bitmap_empty(wi->xdp_xmit_bitmap, MLX5_MPWRQ_PAGES_PER_WQE); ··· 407 405 408 406 for (i = 0; i < MLX5_MPWRQ_PAGES_PER_WQE; i++) 409 407 if (no_xdp_xmit || !test_bit(i, wi->xdp_xmit_bitmap)) 410 - mlx5e_page_release(rq, &dma_info[i], true); 408 + mlx5e_page_release(rq, &dma_info[i], recycle); 411 409 } 412 410 413 411 static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq) ··· 507 505 void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) 508 506 { 509 507 struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix]; 510 - 511 - mlx5e_free_rx_mpwqe(rq, wi); 508 + /* Don't recycle, this function is called on rq/netdev close */ 509 + mlx5e_free_rx_mpwqe(rq, wi, false); 512 510 } 513 511 514 512 bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq) ··· 849 847 mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb); 850 848 } 851 849 852 - static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq) 853 - { 854 - struct mlx5_wq_cyc *wq = &sq->wq; 855 - struct mlx5e_tx_wqe *wqe; 856 - u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc - 1); /* last pi */ 857 - 858 - wqe = mlx5_wq_cyc_get_wqe(wq, pi); 859 - 860 - mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &wqe->ctrl); 861 - } 862 - 863 - static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq, 864 - struct mlx5e_dma_info *di, 865 - const struct xdp_buff *xdp) 866 - { 867 - struct mlx5e_xdpsq *sq = &rq->xdpsq; 868 - struct mlx5_wq_cyc *wq = &sq->wq; 869 - u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); 870 - struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); 871 - 872 - struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; 873 - struct mlx5_wqe_eth_seg *eseg = &wqe->eth; 874 - struct mlx5_wqe_data_seg *dseg; 875 - 876 - ptrdiff_t data_offset = xdp->data - xdp->data_hard_start; 877 - dma_addr_t dma_addr = di->addr + data_offset; 878 - unsigned int dma_len = xdp->data_end - xdp->data; 879 - 880 - struct mlx5e_rq_stats *stats = rq->stats; 881 - 882 - prefetchw(wqe); 883 - 884 - if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE || rq->hw_mtu < dma_len)) { 885 - stats->xdp_drop++; 886 - return false; 887 - } 888 - 889 - if (unlikely(!mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1))) { 890 - if (sq->db.doorbell) { 891 - /* SQ is full, ring doorbell */ 892 - mlx5e_xmit_xdp_doorbell(sq); 893 - sq->db.doorbell = false; 894 - } 895 - stats->xdp_tx_full++; 896 - return false; 897 - } 898 - 899 - dma_sync_single_for_device(sq->pdev, dma_addr, dma_len, PCI_DMA_TODEVICE); 900 - 901 - cseg->fm_ce_se = 0; 902 - 903 - dseg = (struct mlx5_wqe_data_seg *)eseg + 1; 904 - 905 - /* copy the inline part if required */ 906 - if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) { 907 - memcpy(eseg->inline_hdr.start, xdp->data, MLX5E_XDP_MIN_INLINE); 908 - eseg->inline_hdr.sz = cpu_to_be16(MLX5E_XDP_MIN_INLINE); 909 - dma_len -= MLX5E_XDP_MIN_INLINE; 910 - dma_addr += MLX5E_XDP_MIN_INLINE; 911 - dseg++; 912 - } 913 - 914 - /* write the dma part */ 915 - dseg->addr = cpu_to_be64(dma_addr); 916 - dseg->byte_count = cpu_to_be32(dma_len); 917 - 918 - cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND); 919 - 920 - /* move page to reference to sq responsibility, 921 - * and mark so it's not put back in page-cache. 922 - */ 923 - __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); /* non-atomic */ 924 - sq->db.di[pi] = *di; 925 - sq->pc++; 926 - 927 - sq->db.doorbell = true; 928 - 929 - stats->xdp_tx++; 930 - return true; 931 - } 932 - 933 - /* returns true if packet was consumed by xdp */ 934 - static inline bool mlx5e_xdp_handle(struct mlx5e_rq *rq, 935 - struct mlx5e_dma_info *di, 936 - void *va, u16 *rx_headroom, u32 *len) 937 - { 938 - struct bpf_prog *prog = READ_ONCE(rq->xdp_prog); 939 - struct xdp_buff xdp; 940 - u32 act; 941 - int err; 942 - 943 - if (!prog) 944 - return false; 945 - 946 - xdp.data = va + *rx_headroom; 947 - xdp_set_data_meta_invalid(&xdp); 948 - xdp.data_end = xdp.data + *len; 949 - xdp.data_hard_start = va; 950 - xdp.rxq = &rq->xdp_rxq; 951 - 952 - act = bpf_prog_run_xdp(prog, &xdp); 953 - switch (act) { 954 - case XDP_PASS: 955 - *rx_headroom = xdp.data - xdp.data_hard_start; 956 - *len = xdp.data_end - xdp.data; 957 - return false; 958 - case XDP_TX: 959 - if (unlikely(!mlx5e_xmit_xdp_frame(rq, di, &xdp))) 960 - trace_xdp_exception(rq->netdev, prog, act); 961 - return true; 962 - case XDP_REDIRECT: 963 - /* When XDP enabled then page-refcnt==1 here */ 964 - err = xdp_do_redirect(rq->netdev, &xdp, prog); 965 - if (!err) { 966 - __set_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags); 967 - rq->xdpsq.db.redirect_flush = true; 968 - mlx5e_page_dma_unmap(rq, di); 969 - } 970 - return true; 971 - default: 972 - bpf_warn_invalid_xdp_action(act); 973 - case XDP_ABORTED: 974 - trace_xdp_exception(rq->netdev, prog, act); 975 - case XDP_DROP: 976 - rq->stats->xdp_drop++; 977 - return true; 978 - } 979 - } 980 - 981 850 static inline 982 851 struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va, 983 852 u32 frag_size, u16 headroom, ··· 986 1113 napi_gro_receive(rq->cq.napi, skb); 987 1114 988 1115 free_wqe: 989 - mlx5e_free_rx_wqe(rq, wi); 1116 + mlx5e_free_rx_wqe(rq, wi, true); 990 1117 wq_cyc_pop: 991 1118 mlx5_wq_cyc_pop(wq); 992 1119 } ··· 1028 1155 napi_gro_receive(rq->cq.napi, skb); 1029 1156 1030 1157 free_wqe: 1031 - mlx5e_free_rx_wqe(rq, wi); 1158 + mlx5e_free_rx_wqe(rq, wi, true); 1032 1159 wq_cyc_pop: 1033 1160 mlx5_wq_cyc_pop(wq); 1034 1161 } ··· 1099 1226 1100 1227 dma_sync_single_range_for_cpu(rq->pdev, di->addr, head_offset, 1101 1228 frag_size, DMA_FROM_DEVICE); 1229 + prefetchw(va); /* xdp_frame data area */ 1102 1230 prefetch(data); 1103 1231 1104 1232 rcu_read_lock(); ··· 1166 1292 1167 1293 wq = &rq->mpwqe.wq; 1168 1294 wqe = mlx5_wq_ll_get_wqe(wq, wqe_id); 1169 - mlx5e_free_rx_mpwqe(rq, wi); 1295 + mlx5e_free_rx_mpwqe(rq, wi, true); 1170 1296 mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index); 1171 1297 } 1172 1298 ··· 1202 1328 rq->handle_rx_cqe(rq, cqe); 1203 1329 } while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(&cq->wq))); 1204 1330 1205 - if (xdpsq->db.doorbell) { 1331 + if (xdpsq->doorbell) { 1206 1332 mlx5e_xmit_xdp_doorbell(xdpsq); 1207 - xdpsq->db.doorbell = false; 1333 + xdpsq->doorbell = false; 1208 1334 } 1209 1335 1210 - if (xdpsq->db.redirect_flush) { 1336 + if (xdpsq->redirect_flush) { 1211 1337 xdp_do_flush_map(); 1212 - xdpsq->db.redirect_flush = false; 1338 + xdpsq->redirect_flush = false; 1213 1339 } 1214 1340 1215 1341 mlx5_cqwq_update_db_record(&cq->wq); ··· 1218 1344 wmb(); 1219 1345 1220 1346 return work_done; 1221 - } 1222 - 1223 - bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq) 1224 - { 1225 - struct mlx5e_xdpsq *sq; 1226 - struct mlx5_cqe64 *cqe; 1227 - struct mlx5e_rq *rq; 1228 - u16 sqcc; 1229 - int i; 1230 - 1231 - sq = container_of(cq, struct mlx5e_xdpsq, cq); 1232 - 1233 - if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state))) 1234 - return false; 1235 - 1236 - cqe = mlx5_cqwq_get_cqe(&cq->wq); 1237 - if (!cqe) 1238 - return false; 1239 - 1240 - rq = container_of(sq, struct mlx5e_rq, xdpsq); 1241 - 1242 - /* sq->cc must be updated only after mlx5_cqwq_update_db_record(), 1243 - * otherwise a cq overrun may occur 1244 - */ 1245 - sqcc = sq->cc; 1246 - 1247 - i = 0; 1248 - do { 1249 - u16 wqe_counter; 1250 - bool last_wqe; 1251 - 1252 - mlx5_cqwq_pop(&cq->wq); 1253 - 1254 - wqe_counter = be16_to_cpu(cqe->wqe_counter); 1255 - 1256 - do { 1257 - struct mlx5e_dma_info *di; 1258 - u16 ci; 1259 - 1260 - last_wqe = (sqcc == wqe_counter); 1261 - 1262 - ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); 1263 - di = &sq->db.di[ci]; 1264 - 1265 - sqcc++; 1266 - /* Recycle RX page */ 1267 - mlx5e_page_release(rq, di, true); 1268 - } while (!last_wqe); 1269 - } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq))); 1270 - 1271 - rq->stats->xdp_tx_cqe += i; 1272 - 1273 - mlx5_cqwq_update_db_record(&cq->wq); 1274 - 1275 - /* ensure cq space is freed before enabling more cqes */ 1276 - wmb(); 1277 - 1278 - sq->cc = sqcc; 1279 - return (i == MLX5E_TX_CQ_POLL_BUDGET); 1280 - } 1281 - 1282 - void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq) 1283 - { 1284 - struct mlx5e_rq *rq = container_of(sq, struct mlx5e_rq, xdpsq); 1285 - struct mlx5e_dma_info *di; 1286 - u16 ci; 1287 - 1288 - while (sq->cc != sq->pc) { 1289 - ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc); 1290 - di = &sq->db.di[ci]; 1291 - sq->cc++; 1292 - 1293 - mlx5e_page_release(rq, di, false); 1294 - } 1295 1347 } 1296 1348 1297 1349 #ifdef CONFIG_MLX5_CORE_IPOIB ··· 1321 1521 napi_gro_receive(rq->cq.napi, skb); 1322 1522 1323 1523 wq_free_wqe: 1324 - mlx5e_free_rx_wqe(rq, wi); 1524 + mlx5e_free_rx_wqe(rq, wi, true); 1325 1525 mlx5_wq_cyc_pop(wq); 1326 1526 } 1327 1527 ··· 1344 1544 skb = rq->wqe.skb_from_cqe(rq, cqe, wi, cqe_bcnt); 1345 1545 if (unlikely(!skb)) { 1346 1546 /* a DROP, save the page-reuse checks */ 1347 - mlx5e_free_rx_wqe(rq, wi); 1547 + mlx5e_free_rx_wqe(rq, wi, true); 1348 1548 goto wq_cyc_pop; 1349 1549 } 1350 1550 skb = mlx5e_ipsec_handle_rx_skb(rq->netdev, skb, &cqe_bcnt); 1351 1551 if (unlikely(!skb)) { 1352 - mlx5e_free_rx_wqe(rq, wi); 1552 + mlx5e_free_rx_wqe(rq, wi, true); 1353 1553 goto wq_cyc_pop; 1354 1554 } 1355 1555 1356 1556 mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); 1357 1557 napi_gro_receive(rq->cq.napi, skb); 1358 1558 1359 - mlx5e_free_rx_wqe(rq, wi); 1559 + mlx5e_free_rx_wqe(rq, wi, true); 1360 1560 wq_cyc_pop: 1361 1561 mlx5_wq_cyc_pop(wq); 1362 1562 }
+66 -14
drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
··· 59 59 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) }, 60 60 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) }, 61 61 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) }, 62 - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx) }, 63 - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_cqe) }, 62 + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_redirect) }, 63 + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_xmit) }, 64 64 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) }, 65 + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_err) }, 66 + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_cqe) }, 65 67 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_none) }, 66 68 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) }, 67 69 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) }, ··· 75 73 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) }, 76 74 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_udp_seg_rem) }, 77 75 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_cqe_err) }, 76 + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_xmit) }, 77 + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_full) }, 78 + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_err) }, 79 + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xdp_cqes) }, 78 80 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) }, 79 81 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_cqes) }, 80 82 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler_strides) }, ··· 134 128 for (i = 0; i < priv->profile->max_nch(priv->mdev); i++) { 135 129 struct mlx5e_channel_stats *channel_stats = 136 130 &priv->channel_stats[i]; 131 + struct mlx5e_xdpsq_stats *xdpsq_red_stats = &channel_stats->xdpsq; 132 + struct mlx5e_xdpsq_stats *xdpsq_stats = &channel_stats->rq_xdpsq; 137 133 struct mlx5e_rq_stats *rq_stats = &channel_stats->rq; 138 134 struct mlx5e_ch_stats *ch_stats = &channel_stats->ch; 139 135 int j; ··· 149 141 s->rx_csum_complete += rq_stats->csum_complete; 150 142 s->rx_csum_unnecessary += rq_stats->csum_unnecessary; 151 143 s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner; 152 - s->rx_xdp_drop += rq_stats->xdp_drop; 153 - s->rx_xdp_tx += rq_stats->xdp_tx; 154 - s->rx_xdp_tx_cqe += rq_stats->xdp_tx_cqe; 155 - s->rx_xdp_tx_full += rq_stats->xdp_tx_full; 144 + s->rx_xdp_drop += rq_stats->xdp_drop; 145 + s->rx_xdp_redirect += rq_stats->xdp_redirect; 146 + s->rx_xdp_tx_xmit += xdpsq_stats->xmit; 147 + s->rx_xdp_tx_full += xdpsq_stats->full; 148 + s->rx_xdp_tx_err += xdpsq_stats->err; 149 + s->rx_xdp_tx_cqe += xdpsq_stats->cqes; 156 150 s->rx_wqe_err += rq_stats->wqe_err; 157 151 s->rx_mpwqe_filler_cqes += rq_stats->mpwqe_filler_cqes; 158 152 s->rx_mpwqe_filler_strides += rq_stats->mpwqe_filler_strides; ··· 172 162 s->ch_poll += ch_stats->poll; 173 163 s->ch_arm += ch_stats->arm; 174 164 s->ch_aff_change += ch_stats->aff_change; 175 - s->ch_eq_rearm += ch_stats->eq_rearm; 165 + s->ch_eq_rearm += ch_stats->eq_rearm; 166 + /* xdp redirect */ 167 + s->tx_xdp_xmit += xdpsq_red_stats->xmit; 168 + s->tx_xdp_full += xdpsq_red_stats->full; 169 + s->tx_xdp_err += xdpsq_red_stats->err; 170 + s->tx_xdp_cqes += xdpsq_red_stats->cqes; 176 171 177 172 for (j = 0; j < priv->max_opened_tc; j++) { 178 173 struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j]; ··· 1141 1126 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) }, 1142 1127 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) }, 1143 1128 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) }, 1144 - { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx) }, 1145 - { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx_cqe) }, 1146 - { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx_full) }, 1129 + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_redirect) }, 1147 1130 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) }, 1148 1131 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) }, 1149 1132 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) }, ··· 1181 1168 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, cqe_err) }, 1182 1169 }; 1183 1170 1171 + static const struct counter_desc rq_xdpsq_stats_desc[] = { 1172 + { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) }, 1173 + { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) }, 1174 + { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) }, 1175 + { MLX5E_DECLARE_RQ_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) }, 1176 + }; 1177 + 1178 + static const struct counter_desc xdpsq_stats_desc[] = { 1179 + { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, xmit) }, 1180 + { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, full) }, 1181 + { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, err) }, 1182 + { MLX5E_DECLARE_XDPSQ_STAT(struct mlx5e_xdpsq_stats, cqes) }, 1183 + }; 1184 + 1184 1185 static const struct counter_desc ch_stats_desc[] = { 1185 1186 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, events) }, 1186 1187 { MLX5E_DECLARE_CH_STAT(struct mlx5e_ch_stats, poll) }, ··· 1205 1178 1206 1179 #define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc) 1207 1180 #define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc) 1181 + #define NUM_XDPSQ_STATS ARRAY_SIZE(xdpsq_stats_desc) 1182 + #define NUM_RQ_XDPSQ_STATS ARRAY_SIZE(rq_xdpsq_stats_desc) 1208 1183 #define NUM_CH_STATS ARRAY_SIZE(ch_stats_desc) 1209 1184 1210 1185 static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv) ··· 1215 1186 1216 1187 return (NUM_RQ_STATS * max_nch) + 1217 1188 (NUM_CH_STATS * max_nch) + 1218 - (NUM_SQ_STATS * max_nch * priv->max_opened_tc); 1189 + (NUM_SQ_STATS * max_nch * priv->max_opened_tc) + 1190 + (NUM_RQ_XDPSQ_STATS * max_nch) + 1191 + (NUM_XDPSQ_STATS * max_nch); 1219 1192 } 1220 1193 1221 1194 static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data, ··· 1231 1200 sprintf(data + (idx++) * ETH_GSTRING_LEN, 1232 1201 ch_stats_desc[j].format, i); 1233 1202 1234 - for (i = 0; i < max_nch; i++) 1203 + for (i = 0; i < max_nch; i++) { 1235 1204 for (j = 0; j < NUM_RQ_STATS; j++) 1236 - sprintf(data + (idx++) * ETH_GSTRING_LEN, rq_stats_desc[j].format, i); 1205 + sprintf(data + (idx++) * ETH_GSTRING_LEN, 1206 + rq_stats_desc[j].format, i); 1207 + for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++) 1208 + sprintf(data + (idx++) * ETH_GSTRING_LEN, 1209 + rq_xdpsq_stats_desc[j].format, i); 1210 + } 1237 1211 1238 1212 for (tc = 0; tc < priv->max_opened_tc; tc++) 1239 1213 for (i = 0; i < max_nch; i++) ··· 1246 1210 sprintf(data + (idx++) * ETH_GSTRING_LEN, 1247 1211 sq_stats_desc[j].format, 1248 1212 priv->channel_tc2txq[i][tc]); 1213 + 1214 + for (i = 0; i < max_nch; i++) 1215 + for (j = 0; j < NUM_XDPSQ_STATS; j++) 1216 + sprintf(data + (idx++) * ETH_GSTRING_LEN, 1217 + xdpsq_stats_desc[j].format, i); 1249 1218 1250 1219 return idx; 1251 1220 } ··· 1267 1226 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].ch, 1268 1227 ch_stats_desc, j); 1269 1228 1270 - for (i = 0; i < max_nch; i++) 1229 + for (i = 0; i < max_nch; i++) { 1271 1230 for (j = 0; j < NUM_RQ_STATS; j++) 1272 1231 data[idx++] = 1273 1232 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq, 1274 1233 rq_stats_desc, j); 1234 + for (j = 0; j < NUM_RQ_XDPSQ_STATS; j++) 1235 + data[idx++] = 1236 + MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].rq_xdpsq, 1237 + rq_xdpsq_stats_desc, j); 1238 + } 1275 1239 1276 1240 for (tc = 0; tc < priv->max_opened_tc; tc++) 1277 1241 for (i = 0; i < max_nch; i++) ··· 1284 1238 data[idx++] = 1285 1239 MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].sq[tc], 1286 1240 sq_stats_desc, j); 1241 + 1242 + for (i = 0; i < max_nch; i++) 1243 + for (j = 0; j < NUM_XDPSQ_STATS; j++) 1244 + data[idx++] = 1245 + MLX5E_READ_CTR64_CPU(&priv->channel_stats[i].xdpsq, 1246 + xdpsq_stats_desc, j); 1287 1247 1288 1248 return idx; 1289 1249 }
+19 -5
drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
··· 44 44 #define MLX5E_DECLARE_STAT(type, fld) #fld, offsetof(type, fld) 45 45 #define MLX5E_DECLARE_RX_STAT(type, fld) "rx%d_"#fld, offsetof(type, fld) 46 46 #define MLX5E_DECLARE_TX_STAT(type, fld) "tx%d_"#fld, offsetof(type, fld) 47 + #define MLX5E_DECLARE_XDPSQ_STAT(type, fld) "tx%d_xdp_"#fld, offsetof(type, fld) 48 + #define MLX5E_DECLARE_RQ_XDPSQ_STAT(type, fld) "rx%d_xdp_tx_"#fld, offsetof(type, fld) 47 49 #define MLX5E_DECLARE_CH_STAT(type, fld) "ch%d_"#fld, offsetof(type, fld) 48 50 49 51 struct counter_desc { ··· 72 70 u64 rx_csum_complete; 73 71 u64 rx_csum_unnecessary_inner; 74 72 u64 rx_xdp_drop; 75 - u64 rx_xdp_tx; 76 - u64 rx_xdp_tx_cqe; 73 + u64 rx_xdp_redirect; 74 + u64 rx_xdp_tx_xmit; 77 75 u64 rx_xdp_tx_full; 76 + u64 rx_xdp_tx_err; 77 + u64 rx_xdp_tx_cqe; 78 78 u64 tx_csum_none; 79 79 u64 tx_csum_partial; 80 80 u64 tx_csum_partial_inner; ··· 88 84 u64 tx_queue_wake; 89 85 u64 tx_udp_seg_rem; 90 86 u64 tx_cqe_err; 87 + u64 tx_xdp_xmit; 88 + u64 tx_xdp_full; 89 + u64 tx_xdp_err; 90 + u64 tx_xdp_cqes; 91 91 u64 rx_wqe_err; 92 92 u64 rx_mpwqe_filler_cqes; 93 93 u64 rx_mpwqe_filler_strides; ··· 186 178 u64 lro_bytes; 187 179 u64 removed_vlan_packets; 188 180 u64 xdp_drop; 189 - u64 xdp_tx; 190 - u64 xdp_tx_cqe; 191 - u64 xdp_tx_full; 181 + u64 xdp_redirect; 192 182 u64 wqe_err; 193 183 u64 mpwqe_filler_cqes; 194 184 u64 mpwqe_filler_strides; ··· 229 223 u64 cqes ____cacheline_aligned_in_smp; 230 224 u64 wake; 231 225 u64 cqe_err; 226 + }; 227 + 228 + struct mlx5e_xdpsq_stats { 229 + u64 xmit; 230 + u64 full; 231 + u64 err; 232 + /* dirtied @completion */ 233 + u64 cqes ____cacheline_aligned_in_smp; 232 234 }; 233 235 234 236 struct mlx5e_ch_stats {
+9 -10
drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
··· 66 66 } 67 67 } 68 68 69 + static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_txqsq *sq, u32 i) 70 + { 71 + return &sq->db.dma_fifo[i & sq->dma_fifo_mask]; 72 + } 73 + 69 74 static inline void mlx5e_dma_push(struct mlx5e_txqsq *sq, 70 75 dma_addr_t addr, 71 76 u32 size, 72 77 enum mlx5e_dma_map_type map_type) 73 78 { 74 - u32 i = sq->dma_fifo_pc & sq->dma_fifo_mask; 79 + struct mlx5e_sq_dma *dma = mlx5e_dma_get(sq, sq->dma_fifo_pc++); 75 80 76 - sq->db.dma_fifo[i].addr = addr; 77 - sq->db.dma_fifo[i].size = size; 78 - sq->db.dma_fifo[i].type = map_type; 79 - sq->dma_fifo_pc++; 80 - } 81 - 82 - static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_txqsq *sq, u32 i) 83 - { 84 - return &sq->db.dma_fifo[i & sq->dma_fifo_mask]; 81 + dma->addr = addr; 82 + dma->size = size; 83 + dma->type = map_type; 85 84 } 86 85 87 86 static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma)
+4
drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
··· 32 32 33 33 #include <linux/irq.h> 34 34 #include "en.h" 35 + #include "en/xdp.h" 35 36 36 37 static inline bool mlx5e_channel_no_affinity_change(struct mlx5e_channel *c) 37 38 { ··· 85 84 for (i = 0; i < c->num_tc; i++) 86 85 busy |= mlx5e_poll_tx_cq(&c->sq[i].cq, budget); 87 86 87 + busy |= mlx5e_poll_xdpsq_cq(&c->xdpsq.cq); 88 + 88 89 if (c->xdp) 89 90 busy |= mlx5e_poll_xdpsq_cq(&c->rq.xdpsq.cq); 90 91 ··· 119 116 120 117 mlx5e_cq_arm(&c->rq.cq); 121 118 mlx5e_cq_arm(&c->icosq.cq); 119 + mlx5e_cq_arm(&c->xdpsq.cq); 122 120 123 121 return work_done; 124 122 }