Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2019, Intel Corporation. */
3
4#include <linux/bpf_trace.h>
5#include <net/xdp_sock.h>
6#include <net/xdp.h>
7#include "ice.h"
8#include "ice_base.h"
9#include "ice_type.h"
10#include "ice_xsk.h"
11#include "ice_txrx.h"
12#include "ice_txrx_lib.h"
13#include "ice_lib.h"
14
15/**
16 * ice_qp_reset_stats - Resets all stats for rings of given index
17 * @vsi: VSI that contains rings of interest
18 * @q_idx: ring index in array
19 */
20static void ice_qp_reset_stats(struct ice_vsi *vsi, u16 q_idx)
21{
22 memset(&vsi->rx_rings[q_idx]->rx_stats, 0,
23 sizeof(vsi->rx_rings[q_idx]->rx_stats));
24 memset(&vsi->tx_rings[q_idx]->stats, 0,
25 sizeof(vsi->tx_rings[q_idx]->stats));
26 if (ice_is_xdp_ena_vsi(vsi))
27 memset(&vsi->xdp_rings[q_idx]->stats, 0,
28 sizeof(vsi->xdp_rings[q_idx]->stats));
29}
30
31/**
32 * ice_qp_clean_rings - Cleans all the rings of a given index
33 * @vsi: VSI that contains rings of interest
34 * @q_idx: ring index in array
35 */
36static void ice_qp_clean_rings(struct ice_vsi *vsi, u16 q_idx)
37{
38 ice_clean_tx_ring(vsi->tx_rings[q_idx]);
39 if (ice_is_xdp_ena_vsi(vsi))
40 ice_clean_tx_ring(vsi->xdp_rings[q_idx]);
41 ice_clean_rx_ring(vsi->rx_rings[q_idx]);
42}
43
44/**
45 * ice_qvec_toggle_napi - Enables/disables NAPI for a given q_vector
46 * @vsi: VSI that has netdev
47 * @q_vector: q_vector that has NAPI context
48 * @enable: true for enable, false for disable
49 */
50static void
51ice_qvec_toggle_napi(struct ice_vsi *vsi, struct ice_q_vector *q_vector,
52 bool enable)
53{
54 if (!vsi->netdev || !q_vector)
55 return;
56
57 if (enable)
58 napi_enable(&q_vector->napi);
59 else
60 napi_disable(&q_vector->napi);
61}
62
63/**
64 * ice_qvec_dis_irq - Mask off queue interrupt generation on given ring
65 * @vsi: the VSI that contains queue vector being un-configured
66 * @rx_ring: Rx ring that will have its IRQ disabled
67 * @q_vector: queue vector
68 */
69static void
70ice_qvec_dis_irq(struct ice_vsi *vsi, struct ice_ring *rx_ring,
71 struct ice_q_vector *q_vector)
72{
73 struct ice_pf *pf = vsi->back;
74 struct ice_hw *hw = &pf->hw;
75 int base = vsi->base_vector;
76 u16 reg;
77 u32 val;
78
79 /* QINT_TQCTL is being cleared in ice_vsi_stop_tx_ring, so handle
80 * here only QINT_RQCTL
81 */
82 reg = rx_ring->reg_idx;
83 val = rd32(hw, QINT_RQCTL(reg));
84 val &= ~QINT_RQCTL_CAUSE_ENA_M;
85 wr32(hw, QINT_RQCTL(reg), val);
86
87 if (q_vector) {
88 u16 v_idx = q_vector->v_idx;
89
90 wr32(hw, GLINT_DYN_CTL(q_vector->reg_idx), 0);
91 ice_flush(hw);
92 synchronize_irq(pf->msix_entries[v_idx + base].vector);
93 }
94}
95
96/**
97 * ice_qvec_cfg_msix - Enable IRQ for given queue vector
98 * @vsi: the VSI that contains queue vector
99 * @q_vector: queue vector
100 */
101static void
102ice_qvec_cfg_msix(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
103{
104 u16 reg_idx = q_vector->reg_idx;
105 struct ice_pf *pf = vsi->back;
106 struct ice_hw *hw = &pf->hw;
107 struct ice_ring *ring;
108
109 ice_cfg_itr(hw, q_vector);
110
111 wr32(hw, GLINT_RATE(reg_idx),
112 ice_intrl_usec_to_reg(q_vector->intrl, hw->intrl_gran));
113
114 ice_for_each_ring(ring, q_vector->tx)
115 ice_cfg_txq_interrupt(vsi, ring->reg_idx, reg_idx,
116 q_vector->tx.itr_idx);
117
118 ice_for_each_ring(ring, q_vector->rx)
119 ice_cfg_rxq_interrupt(vsi, ring->reg_idx, reg_idx,
120 q_vector->rx.itr_idx);
121
122 ice_flush(hw);
123}
124
125/**
126 * ice_qvec_ena_irq - Enable IRQ for given queue vector
127 * @vsi: the VSI that contains queue vector
128 * @q_vector: queue vector
129 */
130static void ice_qvec_ena_irq(struct ice_vsi *vsi, struct ice_q_vector *q_vector)
131{
132 struct ice_pf *pf = vsi->back;
133 struct ice_hw *hw = &pf->hw;
134
135 ice_irq_dynamic_ena(hw, vsi, q_vector);
136
137 ice_flush(hw);
138}
139
140/**
141 * ice_qp_dis - Disables a queue pair
142 * @vsi: VSI of interest
143 * @q_idx: ring index in array
144 *
145 * Returns 0 on success, negative on failure.
146 */
147static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx)
148{
149 struct ice_txq_meta txq_meta = { };
150 struct ice_ring *tx_ring, *rx_ring;
151 struct ice_q_vector *q_vector;
152 int timeout = 50;
153 int err;
154
155 if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq)
156 return -EINVAL;
157
158 tx_ring = vsi->tx_rings[q_idx];
159 rx_ring = vsi->rx_rings[q_idx];
160 q_vector = rx_ring->q_vector;
161
162 while (test_and_set_bit(__ICE_CFG_BUSY, vsi->state)) {
163 timeout--;
164 if (!timeout)
165 return -EBUSY;
166 usleep_range(1000, 2000);
167 }
168 netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
169
170 ice_qvec_dis_irq(vsi, rx_ring, q_vector);
171
172 ice_fill_txq_meta(vsi, tx_ring, &txq_meta);
173 err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, tx_ring, &txq_meta);
174 if (err)
175 return err;
176 if (ice_is_xdp_ena_vsi(vsi)) {
177 struct ice_ring *xdp_ring = vsi->xdp_rings[q_idx];
178
179 memset(&txq_meta, 0, sizeof(txq_meta));
180 ice_fill_txq_meta(vsi, xdp_ring, &txq_meta);
181 err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, xdp_ring,
182 &txq_meta);
183 if (err)
184 return err;
185 }
186 err = ice_vsi_ctrl_rx_ring(vsi, false, q_idx);
187 if (err)
188 return err;
189
190 ice_qvec_toggle_napi(vsi, q_vector, false);
191 ice_qp_clean_rings(vsi, q_idx);
192 ice_qp_reset_stats(vsi, q_idx);
193
194 return 0;
195}
196
197/**
198 * ice_qp_ena - Enables a queue pair
199 * @vsi: VSI of interest
200 * @q_idx: ring index in array
201 *
202 * Returns 0 on success, negative on failure.
203 */
204static int ice_qp_ena(struct ice_vsi *vsi, u16 q_idx)
205{
206 struct ice_aqc_add_tx_qgrp *qg_buf;
207 struct ice_ring *tx_ring, *rx_ring;
208 struct ice_q_vector *q_vector;
209 int err;
210
211 if (q_idx >= vsi->num_rxq || q_idx >= vsi->num_txq)
212 return -EINVAL;
213
214 qg_buf = kzalloc(sizeof(*qg_buf), GFP_KERNEL);
215 if (!qg_buf)
216 return -ENOMEM;
217
218 qg_buf->num_txqs = 1;
219
220 tx_ring = vsi->tx_rings[q_idx];
221 rx_ring = vsi->rx_rings[q_idx];
222 q_vector = rx_ring->q_vector;
223
224 err = ice_vsi_cfg_txq(vsi, tx_ring, qg_buf);
225 if (err)
226 goto free_buf;
227
228 if (ice_is_xdp_ena_vsi(vsi)) {
229 struct ice_ring *xdp_ring = vsi->xdp_rings[q_idx];
230
231 memset(qg_buf, 0, sizeof(*qg_buf));
232 qg_buf->num_txqs = 1;
233 err = ice_vsi_cfg_txq(vsi, xdp_ring, qg_buf);
234 if (err)
235 goto free_buf;
236 ice_set_ring_xdp(xdp_ring);
237 xdp_ring->xsk_umem = ice_xsk_umem(xdp_ring);
238 }
239
240 err = ice_setup_rx_ctx(rx_ring);
241 if (err)
242 goto free_buf;
243
244 ice_qvec_cfg_msix(vsi, q_vector);
245
246 err = ice_vsi_ctrl_rx_ring(vsi, true, q_idx);
247 if (err)
248 goto free_buf;
249
250 clear_bit(__ICE_CFG_BUSY, vsi->state);
251 ice_qvec_toggle_napi(vsi, q_vector, true);
252 ice_qvec_ena_irq(vsi, q_vector);
253
254 netif_tx_start_queue(netdev_get_tx_queue(vsi->netdev, q_idx));
255free_buf:
256 kfree(qg_buf);
257 return err;
258}
259
260/**
261 * ice_xsk_alloc_umems - allocate a UMEM region for an XDP socket
262 * @vsi: VSI to allocate the UMEM on
263 *
264 * Returns 0 on success, negative on error
265 */
266static int ice_xsk_alloc_umems(struct ice_vsi *vsi)
267{
268 if (vsi->xsk_umems)
269 return 0;
270
271 vsi->xsk_umems = kcalloc(vsi->num_xsk_umems, sizeof(*vsi->xsk_umems),
272 GFP_KERNEL);
273
274 if (!vsi->xsk_umems) {
275 vsi->num_xsk_umems = 0;
276 return -ENOMEM;
277 }
278
279 return 0;
280}
281
282/**
283 * ice_xsk_add_umem - add a UMEM region for XDP sockets
284 * @vsi: VSI to which the UMEM will be added
285 * @umem: pointer to a requested UMEM region
286 * @qid: queue ID
287 *
288 * Returns 0 on success, negative on error
289 */
290static int ice_xsk_add_umem(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid)
291{
292 int err;
293
294 err = ice_xsk_alloc_umems(vsi);
295 if (err)
296 return err;
297
298 vsi->xsk_umems[qid] = umem;
299 vsi->num_xsk_umems_used++;
300
301 return 0;
302}
303
304/**
305 * ice_xsk_remove_umem - Remove an UMEM for a certain ring/qid
306 * @vsi: VSI from which the VSI will be removed
307 * @qid: Ring/qid associated with the UMEM
308 */
309static void ice_xsk_remove_umem(struct ice_vsi *vsi, u16 qid)
310{
311 vsi->xsk_umems[qid] = NULL;
312 vsi->num_xsk_umems_used--;
313
314 if (vsi->num_xsk_umems_used == 0) {
315 kfree(vsi->xsk_umems);
316 vsi->xsk_umems = NULL;
317 vsi->num_xsk_umems = 0;
318 }
319}
320
321/**
322 * ice_xsk_umem_dma_map - DMA map UMEM region for XDP sockets
323 * @vsi: VSI to map the UMEM region
324 * @umem: UMEM to map
325 *
326 * Returns 0 on success, negative on error
327 */
328static int ice_xsk_umem_dma_map(struct ice_vsi *vsi, struct xdp_umem *umem)
329{
330 struct ice_pf *pf = vsi->back;
331 struct device *dev;
332 unsigned int i;
333
334 dev = ice_pf_to_dev(pf);
335 for (i = 0; i < umem->npgs; i++) {
336 dma_addr_t dma = dma_map_page_attrs(dev, umem->pgs[i], 0,
337 PAGE_SIZE,
338 DMA_BIDIRECTIONAL,
339 ICE_RX_DMA_ATTR);
340 if (dma_mapping_error(dev, dma)) {
341 dev_dbg(dev, "XSK UMEM DMA mapping error on page num %d\n",
342 i);
343 goto out_unmap;
344 }
345
346 umem->pages[i].dma = dma;
347 }
348
349 return 0;
350
351out_unmap:
352 for (; i > 0; i--) {
353 dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE,
354 DMA_BIDIRECTIONAL, ICE_RX_DMA_ATTR);
355 umem->pages[i].dma = 0;
356 }
357
358 return -EFAULT;
359}
360
361/**
362 * ice_xsk_umem_dma_unmap - DMA unmap UMEM region for XDP sockets
363 * @vsi: VSI from which the UMEM will be unmapped
364 * @umem: UMEM to unmap
365 */
366static void ice_xsk_umem_dma_unmap(struct ice_vsi *vsi, struct xdp_umem *umem)
367{
368 struct ice_pf *pf = vsi->back;
369 struct device *dev;
370 unsigned int i;
371
372 dev = ice_pf_to_dev(pf);
373 for (i = 0; i < umem->npgs; i++) {
374 dma_unmap_page_attrs(dev, umem->pages[i].dma, PAGE_SIZE,
375 DMA_BIDIRECTIONAL, ICE_RX_DMA_ATTR);
376
377 umem->pages[i].dma = 0;
378 }
379}
380
381/**
382 * ice_xsk_umem_disable - disable a UMEM region
383 * @vsi: Current VSI
384 * @qid: queue ID
385 *
386 * Returns 0 on success, negative on failure
387 */
388static int ice_xsk_umem_disable(struct ice_vsi *vsi, u16 qid)
389{
390 if (!vsi->xsk_umems || qid >= vsi->num_xsk_umems ||
391 !vsi->xsk_umems[qid])
392 return -EINVAL;
393
394 ice_xsk_umem_dma_unmap(vsi, vsi->xsk_umems[qid]);
395 ice_xsk_remove_umem(vsi, qid);
396
397 return 0;
398}
399
400/**
401 * ice_xsk_umem_enable - enable a UMEM region
402 * @vsi: Current VSI
403 * @umem: pointer to a requested UMEM region
404 * @qid: queue ID
405 *
406 * Returns 0 on success, negative on failure
407 */
408static int
409ice_xsk_umem_enable(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid)
410{
411 struct xdp_umem_fq_reuse *reuseq;
412 int err;
413
414 if (vsi->type != ICE_VSI_PF)
415 return -EINVAL;
416
417 if (!vsi->num_xsk_umems)
418 vsi->num_xsk_umems = min_t(u16, vsi->num_rxq, vsi->num_txq);
419 if (qid >= vsi->num_xsk_umems)
420 return -EINVAL;
421
422 if (vsi->xsk_umems && vsi->xsk_umems[qid])
423 return -EBUSY;
424
425 reuseq = xsk_reuseq_prepare(vsi->rx_rings[0]->count);
426 if (!reuseq)
427 return -ENOMEM;
428
429 xsk_reuseq_free(xsk_reuseq_swap(umem, reuseq));
430
431 err = ice_xsk_umem_dma_map(vsi, umem);
432 if (err)
433 return err;
434
435 err = ice_xsk_add_umem(vsi, umem, qid);
436 if (err)
437 return err;
438
439 return 0;
440}
441
442/**
443 * ice_xsk_umem_setup - enable/disable a UMEM region depending on its state
444 * @vsi: Current VSI
445 * @umem: UMEM to enable/associate to a ring, NULL to disable
446 * @qid: queue ID
447 *
448 * Returns 0 on success, negative on failure
449 */
450int ice_xsk_umem_setup(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid)
451{
452 bool if_running, umem_present = !!umem;
453 int ret = 0, umem_failure = 0;
454
455 if_running = netif_running(vsi->netdev) && ice_is_xdp_ena_vsi(vsi);
456
457 if (if_running) {
458 ret = ice_qp_dis(vsi, qid);
459 if (ret) {
460 netdev_err(vsi->netdev, "ice_qp_dis error = %d", ret);
461 goto xsk_umem_if_up;
462 }
463 }
464
465 umem_failure = umem_present ? ice_xsk_umem_enable(vsi, umem, qid) :
466 ice_xsk_umem_disable(vsi, qid);
467
468xsk_umem_if_up:
469 if (if_running) {
470 ret = ice_qp_ena(vsi, qid);
471 if (!ret && umem_present)
472 napi_schedule(&vsi->xdp_rings[qid]->q_vector->napi);
473 else if (ret)
474 netdev_err(vsi->netdev, "ice_qp_ena error = %d", ret);
475 }
476
477 if (umem_failure) {
478 netdev_err(vsi->netdev, "Could not %sable UMEM, error = %d",
479 umem_present ? "en" : "dis", umem_failure);
480 return umem_failure;
481 }
482
483 return ret;
484}
485
486/**
487 * ice_zca_free - Callback for MEM_TYPE_ZERO_COPY allocations
488 * @zca: zero-cpoy allocator
489 * @handle: Buffer handle
490 */
491void ice_zca_free(struct zero_copy_allocator *zca, unsigned long handle)
492{
493 struct ice_rx_buf *rx_buf;
494 struct ice_ring *rx_ring;
495 struct xdp_umem *umem;
496 u64 hr, mask;
497 u16 nta;
498
499 rx_ring = container_of(zca, struct ice_ring, zca);
500 umem = rx_ring->xsk_umem;
501 hr = umem->headroom + XDP_PACKET_HEADROOM;
502
503 mask = umem->chunk_mask;
504
505 nta = rx_ring->next_to_alloc;
506 rx_buf = &rx_ring->rx_buf[nta];
507
508 nta++;
509 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
510
511 handle &= mask;
512
513 rx_buf->dma = xdp_umem_get_dma(umem, handle);
514 rx_buf->dma += hr;
515
516 rx_buf->addr = xdp_umem_get_data(umem, handle);
517 rx_buf->addr += hr;
518
519 rx_buf->handle = (u64)handle + umem->headroom;
520}
521
522/**
523 * ice_alloc_buf_fast_zc - Retrieve buffer address from XDP umem
524 * @rx_ring: ring with an xdp_umem bound to it
525 * @rx_buf: buffer to which xsk page address will be assigned
526 *
527 * This function allocates an Rx buffer in the hot path.
528 * The buffer can come from fill queue or recycle queue.
529 *
530 * Returns true if an assignment was successful, false if not.
531 */
532static __always_inline bool
533ice_alloc_buf_fast_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
534{
535 struct xdp_umem *umem = rx_ring->xsk_umem;
536 void *addr = rx_buf->addr;
537 u64 handle, hr;
538
539 if (addr) {
540 rx_ring->rx_stats.page_reuse_count++;
541 return true;
542 }
543
544 if (!xsk_umem_peek_addr(umem, &handle)) {
545 rx_ring->rx_stats.alloc_page_failed++;
546 return false;
547 }
548
549 hr = umem->headroom + XDP_PACKET_HEADROOM;
550
551 rx_buf->dma = xdp_umem_get_dma(umem, handle);
552 rx_buf->dma += hr;
553
554 rx_buf->addr = xdp_umem_get_data(umem, handle);
555 rx_buf->addr += hr;
556
557 rx_buf->handle = handle + umem->headroom;
558
559 xsk_umem_release_addr(umem);
560 return true;
561}
562
563/**
564 * ice_alloc_buf_slow_zc - Retrieve buffer address from XDP umem
565 * @rx_ring: ring with an xdp_umem bound to it
566 * @rx_buf: buffer to which xsk page address will be assigned
567 *
568 * This function allocates an Rx buffer in the slow path.
569 * The buffer can come from fill queue or recycle queue.
570 *
571 * Returns true if an assignment was successful, false if not.
572 */
573static __always_inline bool
574ice_alloc_buf_slow_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf)
575{
576 struct xdp_umem *umem = rx_ring->xsk_umem;
577 u64 handle, headroom;
578
579 if (!xsk_umem_peek_addr_rq(umem, &handle)) {
580 rx_ring->rx_stats.alloc_page_failed++;
581 return false;
582 }
583
584 handle &= umem->chunk_mask;
585 headroom = umem->headroom + XDP_PACKET_HEADROOM;
586
587 rx_buf->dma = xdp_umem_get_dma(umem, handle);
588 rx_buf->dma += headroom;
589
590 rx_buf->addr = xdp_umem_get_data(umem, handle);
591 rx_buf->addr += headroom;
592
593 rx_buf->handle = handle + umem->headroom;
594
595 xsk_umem_release_addr_rq(umem);
596 return true;
597}
598
599/**
600 * ice_alloc_rx_bufs_zc - allocate a number of Rx buffers
601 * @rx_ring: Rx ring
602 * @count: The number of buffers to allocate
603 * @alloc: the function pointer to call for allocation
604 *
605 * This function allocates a number of Rx buffers from the fill ring
606 * or the internal recycle mechanism and places them on the Rx ring.
607 *
608 * Returns false if all allocations were successful, true if any fail.
609 */
610static bool
611ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, int count,
612 bool alloc(struct ice_ring *, struct ice_rx_buf *))
613{
614 union ice_32b_rx_flex_desc *rx_desc;
615 u16 ntu = rx_ring->next_to_use;
616 struct ice_rx_buf *rx_buf;
617 bool ret = false;
618
619 if (!count)
620 return false;
621
622 rx_desc = ICE_RX_DESC(rx_ring, ntu);
623 rx_buf = &rx_ring->rx_buf[ntu];
624
625 do {
626 if (!alloc(rx_ring, rx_buf)) {
627 ret = true;
628 break;
629 }
630
631 dma_sync_single_range_for_device(rx_ring->dev, rx_buf->dma, 0,
632 rx_ring->rx_buf_len,
633 DMA_BIDIRECTIONAL);
634
635 rx_desc->read.pkt_addr = cpu_to_le64(rx_buf->dma);
636 rx_desc->wb.status_error0 = 0;
637
638 rx_desc++;
639 rx_buf++;
640 ntu++;
641
642 if (unlikely(ntu == rx_ring->count)) {
643 rx_desc = ICE_RX_DESC(rx_ring, 0);
644 rx_buf = rx_ring->rx_buf;
645 ntu = 0;
646 }
647 } while (--count);
648
649 if (rx_ring->next_to_use != ntu)
650 ice_release_rx_desc(rx_ring, ntu);
651
652 return ret;
653}
654
655/**
656 * ice_alloc_rx_bufs_fast_zc - allocate zero copy bufs in the hot path
657 * @rx_ring: Rx ring
658 * @count: number of bufs to allocate
659 *
660 * Returns false on success, true on failure.
661 */
662static bool ice_alloc_rx_bufs_fast_zc(struct ice_ring *rx_ring, u16 count)
663{
664 return ice_alloc_rx_bufs_zc(rx_ring, count,
665 ice_alloc_buf_fast_zc);
666}
667
668/**
669 * ice_alloc_rx_bufs_slow_zc - allocate zero copy bufs in the slow path
670 * @rx_ring: Rx ring
671 * @count: number of bufs to allocate
672 *
673 * Returns false on success, true on failure.
674 */
675bool ice_alloc_rx_bufs_slow_zc(struct ice_ring *rx_ring, u16 count)
676{
677 return ice_alloc_rx_bufs_zc(rx_ring, count,
678 ice_alloc_buf_slow_zc);
679}
680
681/**
682 * ice_bump_ntc - Bump the next_to_clean counter of an Rx ring
683 * @rx_ring: Rx ring
684 */
685static void ice_bump_ntc(struct ice_ring *rx_ring)
686{
687 int ntc = rx_ring->next_to_clean + 1;
688
689 ntc = (ntc < rx_ring->count) ? ntc : 0;
690 rx_ring->next_to_clean = ntc;
691 prefetch(ICE_RX_DESC(rx_ring, ntc));
692}
693
694/**
695 * ice_get_rx_buf_zc - Fetch the current Rx buffer
696 * @rx_ring: Rx ring
697 * @size: size of a buffer
698 *
699 * This function returns the current, received Rx buffer and does
700 * DMA synchronization.
701 *
702 * Returns a pointer to the received Rx buffer.
703 */
704static struct ice_rx_buf *ice_get_rx_buf_zc(struct ice_ring *rx_ring, int size)
705{
706 struct ice_rx_buf *rx_buf;
707
708 rx_buf = &rx_ring->rx_buf[rx_ring->next_to_clean];
709
710 dma_sync_single_range_for_cpu(rx_ring->dev, rx_buf->dma, 0,
711 size, DMA_BIDIRECTIONAL);
712
713 return rx_buf;
714}
715
716/**
717 * ice_reuse_rx_buf_zc - reuse an Rx buffer
718 * @rx_ring: Rx ring
719 * @old_buf: The buffer to recycle
720 *
721 * This function recycles a finished Rx buffer, and places it on the recycle
722 * queue (next_to_alloc).
723 */
724static void
725ice_reuse_rx_buf_zc(struct ice_ring *rx_ring, struct ice_rx_buf *old_buf)
726{
727 unsigned long mask = (unsigned long)rx_ring->xsk_umem->chunk_mask;
728 u64 hr = rx_ring->xsk_umem->headroom + XDP_PACKET_HEADROOM;
729 u16 nta = rx_ring->next_to_alloc;
730 struct ice_rx_buf *new_buf;
731
732 new_buf = &rx_ring->rx_buf[nta++];
733 rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
734
735 new_buf->dma = old_buf->dma & mask;
736 new_buf->dma += hr;
737
738 new_buf->addr = (void *)((unsigned long)old_buf->addr & mask);
739 new_buf->addr += hr;
740
741 new_buf->handle = old_buf->handle & mask;
742 new_buf->handle += rx_ring->xsk_umem->headroom;
743
744 old_buf->addr = NULL;
745}
746
747/**
748 * ice_construct_skb_zc - Create an sk_buff from zero-copy buffer
749 * @rx_ring: Rx ring
750 * @rx_buf: zero-copy Rx buffer
751 * @xdp: XDP buffer
752 *
753 * This function allocates a new skb from a zero-copy Rx buffer.
754 *
755 * Returns the skb on success, NULL on failure.
756 */
757static struct sk_buff *
758ice_construct_skb_zc(struct ice_ring *rx_ring, struct ice_rx_buf *rx_buf,
759 struct xdp_buff *xdp)
760{
761 unsigned int metasize = xdp->data - xdp->data_meta;
762 unsigned int datasize = xdp->data_end - xdp->data;
763 unsigned int datasize_hard = xdp->data_end -
764 xdp->data_hard_start;
765 struct sk_buff *skb;
766
767 skb = __napi_alloc_skb(&rx_ring->q_vector->napi, datasize_hard,
768 GFP_ATOMIC | __GFP_NOWARN);
769 if (unlikely(!skb))
770 return NULL;
771
772 skb_reserve(skb, xdp->data - xdp->data_hard_start);
773 memcpy(__skb_put(skb, datasize), xdp->data, datasize);
774 if (metasize)
775 skb_metadata_set(skb, metasize);
776
777 ice_reuse_rx_buf_zc(rx_ring, rx_buf);
778
779 return skb;
780}
781
782/**
783 * ice_run_xdp_zc - Executes an XDP program in zero-copy path
784 * @rx_ring: Rx ring
785 * @xdp: xdp_buff used as input to the XDP program
786 *
787 * Returns any of ICE_XDP_{PASS, CONSUMED, TX, REDIR}
788 */
789static int
790ice_run_xdp_zc(struct ice_ring *rx_ring, struct xdp_buff *xdp)
791{
792 int err, result = ICE_XDP_PASS;
793 struct bpf_prog *xdp_prog;
794 struct ice_ring *xdp_ring;
795 u32 act;
796
797 rcu_read_lock();
798 xdp_prog = READ_ONCE(rx_ring->xdp_prog);
799 if (!xdp_prog) {
800 rcu_read_unlock();
801 return ICE_XDP_PASS;
802 }
803
804 act = bpf_prog_run_xdp(xdp_prog, xdp);
805 xdp->handle += xdp->data - xdp->data_hard_start;
806 switch (act) {
807 case XDP_PASS:
808 break;
809 case XDP_TX:
810 xdp_ring = rx_ring->vsi->xdp_rings[rx_ring->q_index];
811 result = ice_xmit_xdp_buff(xdp, xdp_ring);
812 break;
813 case XDP_REDIRECT:
814 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
815 result = !err ? ICE_XDP_REDIR : ICE_XDP_CONSUMED;
816 break;
817 default:
818 bpf_warn_invalid_xdp_action(act);
819 /* fallthrough -- not supported action */
820 case XDP_ABORTED:
821 trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
822 /* fallthrough -- handle aborts by dropping frame */
823 case XDP_DROP:
824 result = ICE_XDP_CONSUMED;
825 break;
826 }
827
828 rcu_read_unlock();
829 return result;
830}
831
832/**
833 * ice_clean_rx_irq_zc - consumes packets from the hardware ring
834 * @rx_ring: AF_XDP Rx ring
835 * @budget: NAPI budget
836 *
837 * Returns number of processed packets on success, remaining budget on failure.
838 */
839int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget)
840{
841 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
842 u16 cleaned_count = ICE_DESC_UNUSED(rx_ring);
843 unsigned int xdp_xmit = 0;
844 struct xdp_buff xdp;
845 bool failure = 0;
846
847 xdp.rxq = &rx_ring->xdp_rxq;
848
849 while (likely(total_rx_packets < (unsigned int)budget)) {
850 union ice_32b_rx_flex_desc *rx_desc;
851 unsigned int size, xdp_res = 0;
852 struct ice_rx_buf *rx_buf;
853 struct sk_buff *skb;
854 u16 stat_err_bits;
855 u16 vlan_tag = 0;
856 u8 rx_ptype;
857
858 if (cleaned_count >= ICE_RX_BUF_WRITE) {
859 failure |= ice_alloc_rx_bufs_fast_zc(rx_ring,
860 cleaned_count);
861 cleaned_count = 0;
862 }
863
864 rx_desc = ICE_RX_DESC(rx_ring, rx_ring->next_to_clean);
865
866 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_DD_S);
867 if (!ice_test_staterr(rx_desc, stat_err_bits))
868 break;
869
870 /* This memory barrier is needed to keep us from reading
871 * any other fields out of the rx_desc until we have
872 * verified the descriptor has been written back.
873 */
874 dma_rmb();
875
876 size = le16_to_cpu(rx_desc->wb.pkt_len) &
877 ICE_RX_FLX_DESC_PKT_LEN_M;
878 if (!size)
879 break;
880
881 rx_buf = ice_get_rx_buf_zc(rx_ring, size);
882 if (!rx_buf->addr)
883 break;
884
885 xdp.data = rx_buf->addr;
886 xdp.data_meta = xdp.data;
887 xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM;
888 xdp.data_end = xdp.data + size;
889 xdp.handle = rx_buf->handle;
890
891 xdp_res = ice_run_xdp_zc(rx_ring, &xdp);
892 if (xdp_res) {
893 if (xdp_res & (ICE_XDP_TX | ICE_XDP_REDIR)) {
894 xdp_xmit |= xdp_res;
895 rx_buf->addr = NULL;
896 } else {
897 ice_reuse_rx_buf_zc(rx_ring, rx_buf);
898 }
899
900 total_rx_bytes += size;
901 total_rx_packets++;
902 cleaned_count++;
903
904 ice_bump_ntc(rx_ring);
905 continue;
906 }
907
908 /* XDP_PASS path */
909 skb = ice_construct_skb_zc(rx_ring, rx_buf, &xdp);
910 if (!skb) {
911 rx_ring->rx_stats.alloc_buf_failed++;
912 break;
913 }
914
915 cleaned_count++;
916 ice_bump_ntc(rx_ring);
917
918 if (eth_skb_pad(skb)) {
919 skb = NULL;
920 continue;
921 }
922
923 total_rx_bytes += skb->len;
924 total_rx_packets++;
925
926 stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_L2TAG1P_S);
927 if (ice_test_staterr(rx_desc, stat_err_bits))
928 vlan_tag = le16_to_cpu(rx_desc->wb.l2tag1);
929
930 rx_ptype = le16_to_cpu(rx_desc->wb.ptype_flex_flags0) &
931 ICE_RX_FLEX_DESC_PTYPE_M;
932
933 ice_process_skb_fields(rx_ring, rx_desc, skb, rx_ptype);
934 ice_receive_skb(rx_ring, skb, vlan_tag);
935 }
936
937 ice_finalize_xdp_rx(rx_ring, xdp_xmit);
938 ice_update_rx_ring_stats(rx_ring, total_rx_packets, total_rx_bytes);
939
940 return failure ? budget : (int)total_rx_packets;
941}
942
943/**
944 * ice_xmit_zc - Completes AF_XDP entries, and cleans XDP entries
945 * @xdp_ring: XDP Tx ring
946 * @budget: max number of frames to xmit
947 *
948 * Returns true if cleanup/transmission is done.
949 */
950static bool ice_xmit_zc(struct ice_ring *xdp_ring, int budget)
951{
952 struct ice_tx_desc *tx_desc = NULL;
953 bool work_done = true;
954 struct xdp_desc desc;
955 dma_addr_t dma;
956
957 while (likely(budget-- > 0)) {
958 struct ice_tx_buf *tx_buf;
959
960 if (unlikely(!ICE_DESC_UNUSED(xdp_ring))) {
961 xdp_ring->tx_stats.tx_busy++;
962 work_done = false;
963 break;
964 }
965
966 tx_buf = &xdp_ring->tx_buf[xdp_ring->next_to_use];
967
968 if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &desc))
969 break;
970
971 dma = xdp_umem_get_dma(xdp_ring->xsk_umem, desc.addr);
972
973 dma_sync_single_for_device(xdp_ring->dev, dma, desc.len,
974 DMA_BIDIRECTIONAL);
975
976 tx_buf->bytecount = desc.len;
977
978 tx_desc = ICE_TX_DESC(xdp_ring, xdp_ring->next_to_use);
979 tx_desc->buf_addr = cpu_to_le64(dma);
980 tx_desc->cmd_type_offset_bsz = build_ctob(ICE_TXD_LAST_DESC_CMD,
981 0, desc.len, 0);
982
983 xdp_ring->next_to_use++;
984 if (xdp_ring->next_to_use == xdp_ring->count)
985 xdp_ring->next_to_use = 0;
986 }
987
988 if (tx_desc) {
989 ice_xdp_ring_update_tail(xdp_ring);
990 xsk_umem_consume_tx_done(xdp_ring->xsk_umem);
991 }
992
993 return budget > 0 && work_done;
994}
995
996/**
997 * ice_clean_xdp_tx_buf - Free and unmap XDP Tx buffer
998 * @xdp_ring: XDP Tx ring
999 * @tx_buf: Tx buffer to clean
1000 */
1001static void
1002ice_clean_xdp_tx_buf(struct ice_ring *xdp_ring, struct ice_tx_buf *tx_buf)
1003{
1004 xdp_return_frame((struct xdp_frame *)tx_buf->raw_buf);
1005 dma_unmap_single(xdp_ring->dev, dma_unmap_addr(tx_buf, dma),
1006 dma_unmap_len(tx_buf, len), DMA_TO_DEVICE);
1007 dma_unmap_len_set(tx_buf, len, 0);
1008}
1009
1010/**
1011 * ice_clean_tx_irq_zc - Completes AF_XDP entries, and cleans XDP entries
1012 * @xdp_ring: XDP Tx ring
1013 * @budget: NAPI budget
1014 *
1015 * Returns true if cleanup/tranmission is done.
1016 */
1017bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget)
1018{
1019 int total_packets = 0, total_bytes = 0;
1020 s16 ntc = xdp_ring->next_to_clean;
1021 struct ice_tx_desc *tx_desc;
1022 struct ice_tx_buf *tx_buf;
1023 u32 xsk_frames = 0;
1024 bool xmit_done;
1025
1026 tx_desc = ICE_TX_DESC(xdp_ring, ntc);
1027 tx_buf = &xdp_ring->tx_buf[ntc];
1028 ntc -= xdp_ring->count;
1029
1030 do {
1031 if (!(tx_desc->cmd_type_offset_bsz &
1032 cpu_to_le64(ICE_TX_DESC_DTYPE_DESC_DONE)))
1033 break;
1034
1035 total_bytes += tx_buf->bytecount;
1036 total_packets++;
1037
1038 if (tx_buf->raw_buf) {
1039 ice_clean_xdp_tx_buf(xdp_ring, tx_buf);
1040 tx_buf->raw_buf = NULL;
1041 } else {
1042 xsk_frames++;
1043 }
1044
1045 tx_desc->cmd_type_offset_bsz = 0;
1046 tx_buf++;
1047 tx_desc++;
1048 ntc++;
1049
1050 if (unlikely(!ntc)) {
1051 ntc -= xdp_ring->count;
1052 tx_buf = xdp_ring->tx_buf;
1053 tx_desc = ICE_TX_DESC(xdp_ring, 0);
1054 }
1055
1056 prefetch(tx_desc);
1057
1058 } while (likely(--budget));
1059
1060 ntc += xdp_ring->count;
1061 xdp_ring->next_to_clean = ntc;
1062
1063 if (xsk_frames)
1064 xsk_umem_complete_tx(xdp_ring->xsk_umem, xsk_frames);
1065
1066 ice_update_tx_ring_stats(xdp_ring, total_packets, total_bytes);
1067 xmit_done = ice_xmit_zc(xdp_ring, ICE_DFLT_IRQ_WORK);
1068
1069 return budget > 0 && xmit_done;
1070}
1071
1072/**
1073 * ice_xsk_wakeup - Implements ndo_xsk_wakeup
1074 * @netdev: net_device
1075 * @queue_id: queue to wake up
1076 * @flags: ignored in our case, since we have Rx and Tx in the same NAPI
1077 *
1078 * Returns negative on error, zero otherwise.
1079 */
1080int
1081ice_xsk_wakeup(struct net_device *netdev, u32 queue_id,
1082 u32 __always_unused flags)
1083{
1084 struct ice_netdev_priv *np = netdev_priv(netdev);
1085 struct ice_q_vector *q_vector;
1086 struct ice_vsi *vsi = np->vsi;
1087 struct ice_ring *ring;
1088
1089 if (test_bit(__ICE_DOWN, vsi->state))
1090 return -ENETDOWN;
1091
1092 if (!ice_is_xdp_ena_vsi(vsi))
1093 return -ENXIO;
1094
1095 if (queue_id >= vsi->num_txq)
1096 return -ENXIO;
1097
1098 if (!vsi->xdp_rings[queue_id]->xsk_umem)
1099 return -ENXIO;
1100
1101 ring = vsi->xdp_rings[queue_id];
1102
1103 /* The idea here is that if NAPI is running, mark a miss, so
1104 * it will run again. If not, trigger an interrupt and
1105 * schedule the NAPI from interrupt context. If NAPI would be
1106 * scheduled here, the interrupt affinity would not be
1107 * honored.
1108 */
1109 q_vector = ring->q_vector;
1110 if (!napi_if_scheduled_mark_missed(&q_vector->napi))
1111 ice_trigger_sw_intr(&vsi->back->hw, q_vector);
1112
1113 return 0;
1114}
1115
1116/**
1117 * ice_xsk_any_rx_ring_ena - Checks if Rx rings have AF_XDP UMEM attached
1118 * @vsi: VSI to be checked
1119 *
1120 * Returns true if any of the Rx rings has an AF_XDP UMEM attached
1121 */
1122bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi)
1123{
1124 int i;
1125
1126 if (!vsi->xsk_umems)
1127 return false;
1128
1129 for (i = 0; i < vsi->num_xsk_umems; i++) {
1130 if (vsi->xsk_umems[i])
1131 return true;
1132 }
1133
1134 return false;
1135}
1136
1137/**
1138 * ice_xsk_clean_rx_ring - clean UMEM queues connected to a given Rx ring
1139 * @rx_ring: ring to be cleaned
1140 */
1141void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring)
1142{
1143 u16 i;
1144
1145 for (i = 0; i < rx_ring->count; i++) {
1146 struct ice_rx_buf *rx_buf = &rx_ring->rx_buf[i];
1147
1148 if (!rx_buf->addr)
1149 continue;
1150
1151 xsk_umem_fq_reuse(rx_ring->xsk_umem, rx_buf->handle);
1152 rx_buf->addr = NULL;
1153 }
1154}
1155
1156/**
1157 * ice_xsk_clean_xdp_ring - Clean the XDP Tx ring and its UMEM queues
1158 * @xdp_ring: XDP_Tx ring
1159 */
1160void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring)
1161{
1162 u16 ntc = xdp_ring->next_to_clean, ntu = xdp_ring->next_to_use;
1163 u32 xsk_frames = 0;
1164
1165 while (ntc != ntu) {
1166 struct ice_tx_buf *tx_buf = &xdp_ring->tx_buf[ntc];
1167
1168 if (tx_buf->raw_buf)
1169 ice_clean_xdp_tx_buf(xdp_ring, tx_buf);
1170 else
1171 xsk_frames++;
1172
1173 tx_buf->raw_buf = NULL;
1174
1175 ntc++;
1176 if (ntc >= xdp_ring->count)
1177 ntc = 0;
1178 }
1179
1180 if (xsk_frames)
1181 xsk_umem_complete_tx(xdp_ring->xsk_umem, xsk_frames);
1182}