Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: ISC
2/*
3 * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
4 */
5
6#include <linux/dma-mapping.h>
7#include "mt76.h"
8#include "dma.h"
9
10#if IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED)
11
12#define Q_READ(_dev, _q, _field) ({ \
13 u32 _offset = offsetof(struct mt76_queue_regs, _field); \
14 u32 _val; \
15 if ((_q)->flags & MT_QFLAG_WED) \
16 _val = mtk_wed_device_reg_read(&(_dev)->mmio.wed, \
17 ((_q)->wed_regs + \
18 _offset)); \
19 else \
20 _val = readl(&(_q)->regs->_field); \
21 _val; \
22})
23
24#define Q_WRITE(_dev, _q, _field, _val) do { \
25 u32 _offset = offsetof(struct mt76_queue_regs, _field); \
26 if ((_q)->flags & MT_QFLAG_WED) \
27 mtk_wed_device_reg_write(&(_dev)->mmio.wed, \
28 ((_q)->wed_regs + _offset), \
29 _val); \
30 else \
31 writel(_val, &(_q)->regs->_field); \
32} while (0)
33
34#else
35
36#define Q_READ(_dev, _q, _field) readl(&(_q)->regs->_field)
37#define Q_WRITE(_dev, _q, _field, _val) writel(_val, &(_q)->regs->_field)
38
39#endif
40
41static struct mt76_txwi_cache *
42mt76_alloc_txwi(struct mt76_dev *dev)
43{
44 struct mt76_txwi_cache *t;
45 dma_addr_t addr;
46 u8 *txwi;
47 int size;
48
49 size = L1_CACHE_ALIGN(dev->drv->txwi_size + sizeof(*t));
50 txwi = kzalloc(size, GFP_ATOMIC);
51 if (!txwi)
52 return NULL;
53
54 addr = dma_map_single(dev->dma_dev, txwi, dev->drv->txwi_size,
55 DMA_TO_DEVICE);
56 t = (struct mt76_txwi_cache *)(txwi + dev->drv->txwi_size);
57 t->dma_addr = addr;
58
59 return t;
60}
61
62static struct mt76_txwi_cache *
63mt76_alloc_rxwi(struct mt76_dev *dev)
64{
65 struct mt76_txwi_cache *t;
66
67 t = kzalloc(L1_CACHE_ALIGN(sizeof(*t)), GFP_ATOMIC);
68 if (!t)
69 return NULL;
70
71 t->ptr = NULL;
72 return t;
73}
74
75static struct mt76_txwi_cache *
76__mt76_get_txwi(struct mt76_dev *dev)
77{
78 struct mt76_txwi_cache *t = NULL;
79
80 spin_lock(&dev->lock);
81 if (!list_empty(&dev->txwi_cache)) {
82 t = list_first_entry(&dev->txwi_cache, struct mt76_txwi_cache,
83 list);
84 list_del(&t->list);
85 }
86 spin_unlock(&dev->lock);
87
88 return t;
89}
90
91static struct mt76_txwi_cache *
92__mt76_get_rxwi(struct mt76_dev *dev)
93{
94 struct mt76_txwi_cache *t = NULL;
95
96 spin_lock(&dev->wed_lock);
97 if (!list_empty(&dev->rxwi_cache)) {
98 t = list_first_entry(&dev->rxwi_cache, struct mt76_txwi_cache,
99 list);
100 list_del(&t->list);
101 }
102 spin_unlock(&dev->wed_lock);
103
104 return t;
105}
106
107static struct mt76_txwi_cache *
108mt76_get_txwi(struct mt76_dev *dev)
109{
110 struct mt76_txwi_cache *t = __mt76_get_txwi(dev);
111
112 if (t)
113 return t;
114
115 return mt76_alloc_txwi(dev);
116}
117
118struct mt76_txwi_cache *
119mt76_get_rxwi(struct mt76_dev *dev)
120{
121 struct mt76_txwi_cache *t = __mt76_get_rxwi(dev);
122
123 if (t)
124 return t;
125
126 return mt76_alloc_rxwi(dev);
127}
128EXPORT_SYMBOL_GPL(mt76_get_rxwi);
129
130void
131mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
132{
133 if (!t)
134 return;
135
136 spin_lock(&dev->lock);
137 list_add(&t->list, &dev->txwi_cache);
138 spin_unlock(&dev->lock);
139}
140EXPORT_SYMBOL_GPL(mt76_put_txwi);
141
142void
143mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
144{
145 if (!t)
146 return;
147
148 spin_lock(&dev->wed_lock);
149 list_add(&t->list, &dev->rxwi_cache);
150 spin_unlock(&dev->wed_lock);
151}
152EXPORT_SYMBOL_GPL(mt76_put_rxwi);
153
154static void
155mt76_free_pending_txwi(struct mt76_dev *dev)
156{
157 struct mt76_txwi_cache *t;
158
159 local_bh_disable();
160 while ((t = __mt76_get_txwi(dev)) != NULL) {
161 dma_unmap_single(dev->dma_dev, t->dma_addr, dev->drv->txwi_size,
162 DMA_TO_DEVICE);
163 kfree(mt76_get_txwi_ptr(dev, t));
164 }
165 local_bh_enable();
166}
167
168static void
169mt76_free_pending_rxwi(struct mt76_dev *dev)
170{
171 struct mt76_txwi_cache *t;
172
173 local_bh_disable();
174 while ((t = __mt76_get_rxwi(dev)) != NULL) {
175 if (t->ptr)
176 skb_free_frag(t->ptr);
177 kfree(t);
178 }
179 local_bh_enable();
180}
181
182static void
183mt76_dma_sync_idx(struct mt76_dev *dev, struct mt76_queue *q)
184{
185 Q_WRITE(dev, q, desc_base, q->desc_dma);
186 Q_WRITE(dev, q, ring_size, q->ndesc);
187 q->head = Q_READ(dev, q, dma_idx);
188 q->tail = q->head;
189}
190
191static void
192mt76_dma_queue_reset(struct mt76_dev *dev, struct mt76_queue *q)
193{
194 int i;
195
196 if (!q || !q->ndesc)
197 return;
198
199 /* clear descriptors */
200 for (i = 0; i < q->ndesc; i++)
201 q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
202
203 Q_WRITE(dev, q, cpu_idx, 0);
204 Q_WRITE(dev, q, dma_idx, 0);
205 mt76_dma_sync_idx(dev, q);
206}
207
208static int
209mt76_dma_add_rx_buf(struct mt76_dev *dev, struct mt76_queue *q,
210 struct mt76_queue_buf *buf, void *data)
211{
212 struct mt76_desc *desc = &q->desc[q->head];
213 struct mt76_queue_entry *entry = &q->entry[q->head];
214 struct mt76_txwi_cache *txwi = NULL;
215 u32 buf1 = 0, ctrl;
216 int idx = q->head;
217 int rx_token;
218
219 ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
220
221 if ((q->flags & MT_QFLAG_WED) &&
222 FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX) {
223 txwi = mt76_get_rxwi(dev);
224 if (!txwi)
225 return -ENOMEM;
226
227 rx_token = mt76_rx_token_consume(dev, data, txwi, buf->addr);
228 if (rx_token < 0) {
229 mt76_put_rxwi(dev, txwi);
230 return -ENOMEM;
231 }
232
233 buf1 |= FIELD_PREP(MT_DMA_CTL_TOKEN, rx_token);
234 ctrl |= MT_DMA_CTL_TO_HOST;
235 }
236
237 WRITE_ONCE(desc->buf0, cpu_to_le32(buf->addr));
238 WRITE_ONCE(desc->buf1, cpu_to_le32(buf1));
239 WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
240 WRITE_ONCE(desc->info, 0);
241
242 entry->dma_addr[0] = buf->addr;
243 entry->dma_len[0] = buf->len;
244 entry->txwi = txwi;
245 entry->buf = data;
246 entry->wcid = 0xffff;
247 entry->skip_buf1 = true;
248 q->head = (q->head + 1) % q->ndesc;
249 q->queued++;
250
251 return idx;
252}
253
254static int
255mt76_dma_add_buf(struct mt76_dev *dev, struct mt76_queue *q,
256 struct mt76_queue_buf *buf, int nbufs, u32 info,
257 struct sk_buff *skb, void *txwi)
258{
259 struct mt76_queue_entry *entry;
260 struct mt76_desc *desc;
261 int i, idx = -1;
262 u32 ctrl, next;
263
264 if (txwi) {
265 q->entry[q->head].txwi = DMA_DUMMY_DATA;
266 q->entry[q->head].skip_buf0 = true;
267 }
268
269 for (i = 0; i < nbufs; i += 2, buf += 2) {
270 u32 buf0 = buf[0].addr, buf1 = 0;
271
272 idx = q->head;
273 next = (q->head + 1) % q->ndesc;
274
275 desc = &q->desc[idx];
276 entry = &q->entry[idx];
277
278 if (buf[0].skip_unmap)
279 entry->skip_buf0 = true;
280 entry->skip_buf1 = i == nbufs - 1;
281
282 entry->dma_addr[0] = buf[0].addr;
283 entry->dma_len[0] = buf[0].len;
284
285 ctrl = FIELD_PREP(MT_DMA_CTL_SD_LEN0, buf[0].len);
286 if (i < nbufs - 1) {
287 entry->dma_addr[1] = buf[1].addr;
288 entry->dma_len[1] = buf[1].len;
289 buf1 = buf[1].addr;
290 ctrl |= FIELD_PREP(MT_DMA_CTL_SD_LEN1, buf[1].len);
291 if (buf[1].skip_unmap)
292 entry->skip_buf1 = true;
293 }
294
295 if (i == nbufs - 1)
296 ctrl |= MT_DMA_CTL_LAST_SEC0;
297 else if (i == nbufs - 2)
298 ctrl |= MT_DMA_CTL_LAST_SEC1;
299
300 WRITE_ONCE(desc->buf0, cpu_to_le32(buf0));
301 WRITE_ONCE(desc->buf1, cpu_to_le32(buf1));
302 WRITE_ONCE(desc->info, cpu_to_le32(info));
303 WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
304
305 q->head = next;
306 q->queued++;
307 }
308
309 q->entry[idx].txwi = txwi;
310 q->entry[idx].skb = skb;
311 q->entry[idx].wcid = 0xffff;
312
313 return idx;
314}
315
316static void
317mt76_dma_tx_cleanup_idx(struct mt76_dev *dev, struct mt76_queue *q, int idx,
318 struct mt76_queue_entry *prev_e)
319{
320 struct mt76_queue_entry *e = &q->entry[idx];
321
322 if (!e->skip_buf0)
323 dma_unmap_single(dev->dma_dev, e->dma_addr[0], e->dma_len[0],
324 DMA_TO_DEVICE);
325
326 if (!e->skip_buf1)
327 dma_unmap_single(dev->dma_dev, e->dma_addr[1], e->dma_len[1],
328 DMA_TO_DEVICE);
329
330 if (e->txwi == DMA_DUMMY_DATA)
331 e->txwi = NULL;
332
333 if (e->skb == DMA_DUMMY_DATA)
334 e->skb = NULL;
335
336 *prev_e = *e;
337 memset(e, 0, sizeof(*e));
338}
339
340static void
341mt76_dma_kick_queue(struct mt76_dev *dev, struct mt76_queue *q)
342{
343 wmb();
344 Q_WRITE(dev, q, cpu_idx, q->head);
345}
346
347static void
348mt76_dma_tx_cleanup(struct mt76_dev *dev, struct mt76_queue *q, bool flush)
349{
350 struct mt76_queue_entry entry;
351 int last;
352
353 if (!q || !q->ndesc)
354 return;
355
356 spin_lock_bh(&q->cleanup_lock);
357 if (flush)
358 last = -1;
359 else
360 last = Q_READ(dev, q, dma_idx);
361
362 while (q->queued > 0 && q->tail != last) {
363 mt76_dma_tx_cleanup_idx(dev, q, q->tail, &entry);
364 mt76_queue_tx_complete(dev, q, &entry);
365
366 if (entry.txwi) {
367 if (!(dev->drv->drv_flags & MT_DRV_TXWI_NO_FREE))
368 mt76_put_txwi(dev, entry.txwi);
369 }
370
371 if (!flush && q->tail == last)
372 last = Q_READ(dev, q, dma_idx);
373 }
374 spin_unlock_bh(&q->cleanup_lock);
375
376 if (flush) {
377 spin_lock_bh(&q->lock);
378 mt76_dma_sync_idx(dev, q);
379 mt76_dma_kick_queue(dev, q);
380 spin_unlock_bh(&q->lock);
381 }
382
383 if (!q->queued)
384 wake_up(&dev->tx_wait);
385}
386
387static void *
388mt76_dma_get_buf(struct mt76_dev *dev, struct mt76_queue *q, int idx,
389 int *len, u32 *info, bool *more, bool *drop)
390{
391 struct mt76_queue_entry *e = &q->entry[idx];
392 struct mt76_desc *desc = &q->desc[idx];
393 void *buf;
394
395 if (len) {
396 u32 ctrl = le32_to_cpu(READ_ONCE(desc->ctrl));
397 *len = FIELD_GET(MT_DMA_CTL_SD_LEN0, ctrl);
398 *more = !(ctrl & MT_DMA_CTL_LAST_SEC0);
399 }
400
401 if (info)
402 *info = le32_to_cpu(desc->info);
403
404 if ((q->flags & MT_QFLAG_WED) &&
405 FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX) {
406 u32 token = FIELD_GET(MT_DMA_CTL_TOKEN,
407 le32_to_cpu(desc->buf1));
408 struct mt76_txwi_cache *t = mt76_rx_token_release(dev, token);
409
410 if (!t)
411 return NULL;
412
413 dma_unmap_single(dev->dma_dev, t->dma_addr,
414 SKB_WITH_OVERHEAD(q->buf_size),
415 DMA_FROM_DEVICE);
416
417 buf = t->ptr;
418 t->dma_addr = 0;
419 t->ptr = NULL;
420
421 mt76_put_rxwi(dev, t);
422
423 if (drop) {
424 u32 ctrl = le32_to_cpu(READ_ONCE(desc->ctrl));
425
426 *drop = !!(ctrl & (MT_DMA_CTL_TO_HOST_A |
427 MT_DMA_CTL_DROP));
428 }
429 } else {
430 buf = e->buf;
431 e->buf = NULL;
432 dma_unmap_single(dev->dma_dev, e->dma_addr[0],
433 SKB_WITH_OVERHEAD(q->buf_size),
434 DMA_FROM_DEVICE);
435 }
436
437 return buf;
438}
439
440static void *
441mt76_dma_dequeue(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
442 int *len, u32 *info, bool *more, bool *drop)
443{
444 int idx = q->tail;
445
446 *more = false;
447 if (!q->queued)
448 return NULL;
449
450 if (flush)
451 q->desc[idx].ctrl |= cpu_to_le32(MT_DMA_CTL_DMA_DONE);
452 else if (!(q->desc[idx].ctrl & cpu_to_le32(MT_DMA_CTL_DMA_DONE)))
453 return NULL;
454
455 q->tail = (q->tail + 1) % q->ndesc;
456 q->queued--;
457
458 return mt76_dma_get_buf(dev, q, idx, len, info, more, drop);
459}
460
461static int
462mt76_dma_tx_queue_skb_raw(struct mt76_dev *dev, struct mt76_queue *q,
463 struct sk_buff *skb, u32 tx_info)
464{
465 struct mt76_queue_buf buf = {};
466 dma_addr_t addr;
467
468 if (q->queued + 1 >= q->ndesc - 1)
469 goto error;
470
471 addr = dma_map_single(dev->dma_dev, skb->data, skb->len,
472 DMA_TO_DEVICE);
473 if (unlikely(dma_mapping_error(dev->dma_dev, addr)))
474 goto error;
475
476 buf.addr = addr;
477 buf.len = skb->len;
478
479 spin_lock_bh(&q->lock);
480 mt76_dma_add_buf(dev, q, &buf, 1, tx_info, skb, NULL);
481 mt76_dma_kick_queue(dev, q);
482 spin_unlock_bh(&q->lock);
483
484 return 0;
485
486error:
487 dev_kfree_skb(skb);
488 return -ENOMEM;
489}
490
491static int
492mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q,
493 enum mt76_txq_id qid, struct sk_buff *skb,
494 struct mt76_wcid *wcid, struct ieee80211_sta *sta)
495{
496 struct ieee80211_tx_status status = {
497 .sta = sta,
498 };
499 struct mt76_tx_info tx_info = {
500 .skb = skb,
501 };
502 struct ieee80211_hw *hw;
503 int len, n = 0, ret = -ENOMEM;
504 struct mt76_txwi_cache *t;
505 struct sk_buff *iter;
506 dma_addr_t addr;
507 u8 *txwi;
508
509 t = mt76_get_txwi(dev);
510 if (!t)
511 goto free_skb;
512
513 txwi = mt76_get_txwi_ptr(dev, t);
514
515 skb->prev = skb->next = NULL;
516 if (dev->drv->drv_flags & MT_DRV_TX_ALIGNED4_SKBS)
517 mt76_insert_hdr_pad(skb);
518
519 len = skb_headlen(skb);
520 addr = dma_map_single(dev->dma_dev, skb->data, len, DMA_TO_DEVICE);
521 if (unlikely(dma_mapping_error(dev->dma_dev, addr)))
522 goto free;
523
524 tx_info.buf[n].addr = t->dma_addr;
525 tx_info.buf[n++].len = dev->drv->txwi_size;
526 tx_info.buf[n].addr = addr;
527 tx_info.buf[n++].len = len;
528
529 skb_walk_frags(skb, iter) {
530 if (n == ARRAY_SIZE(tx_info.buf))
531 goto unmap;
532
533 addr = dma_map_single(dev->dma_dev, iter->data, iter->len,
534 DMA_TO_DEVICE);
535 if (unlikely(dma_mapping_error(dev->dma_dev, addr)))
536 goto unmap;
537
538 tx_info.buf[n].addr = addr;
539 tx_info.buf[n++].len = iter->len;
540 }
541 tx_info.nbuf = n;
542
543 if (q->queued + (tx_info.nbuf + 1) / 2 >= q->ndesc - 1) {
544 ret = -ENOMEM;
545 goto unmap;
546 }
547
548 dma_sync_single_for_cpu(dev->dma_dev, t->dma_addr, dev->drv->txwi_size,
549 DMA_TO_DEVICE);
550 ret = dev->drv->tx_prepare_skb(dev, txwi, qid, wcid, sta, &tx_info);
551 dma_sync_single_for_device(dev->dma_dev, t->dma_addr, dev->drv->txwi_size,
552 DMA_TO_DEVICE);
553 if (ret < 0)
554 goto unmap;
555
556 return mt76_dma_add_buf(dev, q, tx_info.buf, tx_info.nbuf,
557 tx_info.info, tx_info.skb, t);
558
559unmap:
560 for (n--; n > 0; n--)
561 dma_unmap_single(dev->dma_dev, tx_info.buf[n].addr,
562 tx_info.buf[n].len, DMA_TO_DEVICE);
563
564free:
565#ifdef CONFIG_NL80211_TESTMODE
566 /* fix tx_done accounting on queue overflow */
567 if (mt76_is_testmode_skb(dev, skb, &hw)) {
568 struct mt76_phy *phy = hw->priv;
569
570 if (tx_info.skb == phy->test.tx_skb)
571 phy->test.tx_done--;
572 }
573#endif
574
575 mt76_put_txwi(dev, t);
576
577free_skb:
578 status.skb = tx_info.skb;
579 hw = mt76_tx_status_get_hw(dev, tx_info.skb);
580 ieee80211_tx_status_ext(hw, &status);
581
582 return ret;
583}
584
585static struct page_frag_cache *
586mt76_dma_rx_get_frag_cache(struct mt76_dev *dev, struct mt76_queue *q)
587{
588 struct page_frag_cache *rx_page = &q->rx_page;
589
590#ifdef CONFIG_NET_MEDIATEK_SOC_WED
591 if ((q->flags & MT_QFLAG_WED) &&
592 FIELD_GET(MT_QFLAG_WED_TYPE, q->flags) == MT76_WED_Q_RX)
593 rx_page = &dev->mmio.wed.rx_buf_ring.rx_page;
594#endif
595 return rx_page;
596}
597
598static int
599mt76_dma_rx_fill(struct mt76_dev *dev, struct mt76_queue *q)
600{
601 struct page_frag_cache *rx_page = mt76_dma_rx_get_frag_cache(dev, q);
602 int len = SKB_WITH_OVERHEAD(q->buf_size);
603 int frames = 0, offset = q->buf_offset;
604 dma_addr_t addr;
605
606 if (!q->ndesc)
607 return 0;
608
609 spin_lock_bh(&q->lock);
610
611 while (q->queued < q->ndesc - 1) {
612 struct mt76_queue_buf qbuf;
613 void *buf = NULL;
614
615 buf = page_frag_alloc(rx_page, q->buf_size, GFP_ATOMIC);
616 if (!buf)
617 break;
618
619 addr = dma_map_single(dev->dma_dev, buf, len, DMA_FROM_DEVICE);
620 if (unlikely(dma_mapping_error(dev->dma_dev, addr))) {
621 skb_free_frag(buf);
622 break;
623 }
624
625 qbuf.addr = addr + offset;
626 qbuf.len = len - offset;
627 qbuf.skip_unmap = false;
628 if (mt76_dma_add_rx_buf(dev, q, &qbuf, buf) < 0) {
629 dma_unmap_single(dev->dma_dev, addr, len,
630 DMA_FROM_DEVICE);
631 skb_free_frag(buf);
632 break;
633 }
634 frames++;
635 }
636
637 if (frames)
638 mt76_dma_kick_queue(dev, q);
639
640 spin_unlock_bh(&q->lock);
641
642 return frames;
643}
644
645static int
646mt76_dma_wed_setup(struct mt76_dev *dev, struct mt76_queue *q)
647{
648#ifdef CONFIG_NET_MEDIATEK_SOC_WED
649 struct mtk_wed_device *wed = &dev->mmio.wed;
650 int ret, type, ring;
651 u8 flags = q->flags;
652
653 if (!mtk_wed_device_active(wed))
654 q->flags &= ~MT_QFLAG_WED;
655
656 if (!(q->flags & MT_QFLAG_WED))
657 return 0;
658
659 type = FIELD_GET(MT_QFLAG_WED_TYPE, q->flags);
660 ring = FIELD_GET(MT_QFLAG_WED_RING, q->flags);
661
662 switch (type) {
663 case MT76_WED_Q_TX:
664 ret = mtk_wed_device_tx_ring_setup(wed, ring, q->regs, false);
665 if (!ret)
666 q->wed_regs = wed->tx_ring[ring].reg_base;
667 break;
668 case MT76_WED_Q_TXFREE:
669 /* WED txfree queue needs ring to be initialized before setup */
670 q->flags = 0;
671 mt76_dma_queue_reset(dev, q);
672 mt76_dma_rx_fill(dev, q);
673 q->flags = flags;
674
675 ret = mtk_wed_device_txfree_ring_setup(wed, q->regs);
676 if (!ret)
677 q->wed_regs = wed->txfree_ring.reg_base;
678 break;
679 case MT76_WED_Q_RX:
680 ret = mtk_wed_device_rx_ring_setup(wed, ring, q->regs, false);
681 if (!ret)
682 q->wed_regs = wed->rx_ring[ring].reg_base;
683 break;
684 default:
685 ret = -EINVAL;
686 }
687
688 return ret;
689#else
690 return 0;
691#endif
692}
693
694static int
695mt76_dma_alloc_queue(struct mt76_dev *dev, struct mt76_queue *q,
696 int idx, int n_desc, int bufsize,
697 u32 ring_base)
698{
699 int ret, size;
700
701 spin_lock_init(&q->lock);
702 spin_lock_init(&q->cleanup_lock);
703
704 q->regs = dev->mmio.regs + ring_base + idx * MT_RING_SIZE;
705 q->ndesc = n_desc;
706 q->buf_size = bufsize;
707 q->hw_idx = idx;
708
709 size = q->ndesc * sizeof(struct mt76_desc);
710 q->desc = dmam_alloc_coherent(dev->dma_dev, size, &q->desc_dma, GFP_KERNEL);
711 if (!q->desc)
712 return -ENOMEM;
713
714 size = q->ndesc * sizeof(*q->entry);
715 q->entry = devm_kzalloc(dev->dev, size, GFP_KERNEL);
716 if (!q->entry)
717 return -ENOMEM;
718
719 ret = mt76_dma_wed_setup(dev, q);
720 if (ret)
721 return ret;
722
723 if (q->flags != MT_WED_Q_TXFREE)
724 mt76_dma_queue_reset(dev, q);
725
726 return 0;
727}
728
729static void
730mt76_dma_rx_cleanup(struct mt76_dev *dev, struct mt76_queue *q)
731{
732 struct page *page;
733 void *buf;
734 bool more;
735
736 if (!q->ndesc)
737 return;
738
739 spin_lock_bh(&q->lock);
740 do {
741 buf = mt76_dma_dequeue(dev, q, true, NULL, NULL, &more, NULL);
742 if (!buf)
743 break;
744
745 skb_free_frag(buf);
746 } while (1);
747 spin_unlock_bh(&q->lock);
748
749 if (!q->rx_page.va)
750 return;
751
752 page = virt_to_page(q->rx_page.va);
753 __page_frag_cache_drain(page, q->rx_page.pagecnt_bias);
754 memset(&q->rx_page, 0, sizeof(q->rx_page));
755}
756
757static void
758mt76_dma_rx_reset(struct mt76_dev *dev, enum mt76_rxq_id qid)
759{
760 struct mt76_queue *q = &dev->q_rx[qid];
761 int i;
762
763 if (!q->ndesc)
764 return;
765
766 for (i = 0; i < q->ndesc; i++)
767 q->desc[i].ctrl = cpu_to_le32(MT_DMA_CTL_DMA_DONE);
768
769 mt76_dma_rx_cleanup(dev, q);
770 mt76_dma_sync_idx(dev, q);
771 mt76_dma_rx_fill(dev, q);
772
773 if (!q->rx_head)
774 return;
775
776 dev_kfree_skb(q->rx_head);
777 q->rx_head = NULL;
778}
779
780static void
781mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
782 int len, bool more, u32 info)
783{
784 struct sk_buff *skb = q->rx_head;
785 struct skb_shared_info *shinfo = skb_shinfo(skb);
786 int nr_frags = shinfo->nr_frags;
787
788 if (nr_frags < ARRAY_SIZE(shinfo->frags)) {
789 struct page *page = virt_to_head_page(data);
790 int offset = data - page_address(page) + q->buf_offset;
791
792 skb_add_rx_frag(skb, nr_frags, page, offset, len, q->buf_size);
793 } else {
794 skb_free_frag(data);
795 }
796
797 if (more)
798 return;
799
800 q->rx_head = NULL;
801 if (nr_frags < ARRAY_SIZE(shinfo->frags))
802 dev->drv->rx_skb(dev, q - dev->q_rx, skb, &info);
803 else
804 dev_kfree_skb(skb);
805}
806
807static int
808mt76_dma_rx_process(struct mt76_dev *dev, struct mt76_queue *q, int budget)
809{
810 int len, data_len, done = 0, dma_idx;
811 struct sk_buff *skb;
812 unsigned char *data;
813 bool check_ddone = false;
814 bool more;
815
816 if (IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED) &&
817 q->flags == MT_WED_Q_TXFREE) {
818 dma_idx = Q_READ(dev, q, dma_idx);
819 check_ddone = true;
820 }
821
822 while (done < budget) {
823 bool drop = false;
824 u32 info;
825
826 if (check_ddone) {
827 if (q->tail == dma_idx)
828 dma_idx = Q_READ(dev, q, dma_idx);
829
830 if (q->tail == dma_idx)
831 break;
832 }
833
834 data = mt76_dma_dequeue(dev, q, false, &len, &info, &more,
835 &drop);
836 if (!data)
837 break;
838
839 if (drop)
840 goto free_frag;
841
842 if (q->rx_head)
843 data_len = q->buf_size;
844 else
845 data_len = SKB_WITH_OVERHEAD(q->buf_size);
846
847 if (data_len < len + q->buf_offset) {
848 dev_kfree_skb(q->rx_head);
849 q->rx_head = NULL;
850 goto free_frag;
851 }
852
853 if (q->rx_head) {
854 mt76_add_fragment(dev, q, data, len, more, info);
855 continue;
856 }
857
858 if (!more && dev->drv->rx_check &&
859 !(dev->drv->rx_check(dev, data, len)))
860 goto free_frag;
861
862 skb = build_skb(data, q->buf_size);
863 if (!skb)
864 goto free_frag;
865
866 skb_reserve(skb, q->buf_offset);
867
868 *(u32 *)skb->cb = info;
869
870 __skb_put(skb, len);
871 done++;
872
873 if (more) {
874 q->rx_head = skb;
875 continue;
876 }
877
878 dev->drv->rx_skb(dev, q - dev->q_rx, skb, &info);
879 continue;
880
881free_frag:
882 skb_free_frag(data);
883 }
884
885 mt76_dma_rx_fill(dev, q);
886 return done;
887}
888
889int mt76_dma_rx_poll(struct napi_struct *napi, int budget)
890{
891 struct mt76_dev *dev;
892 int qid, done = 0, cur;
893
894 dev = container_of(napi->dev, struct mt76_dev, napi_dev);
895 qid = napi - dev->napi;
896
897 rcu_read_lock();
898
899 do {
900 cur = mt76_dma_rx_process(dev, &dev->q_rx[qid], budget - done);
901 mt76_rx_poll_complete(dev, qid, napi);
902 done += cur;
903 } while (cur && done < budget);
904
905 rcu_read_unlock();
906
907 if (done < budget && napi_complete(napi))
908 dev->drv->rx_poll_complete(dev, qid);
909
910 return done;
911}
912EXPORT_SYMBOL_GPL(mt76_dma_rx_poll);
913
914static int
915mt76_dma_init(struct mt76_dev *dev,
916 int (*poll)(struct napi_struct *napi, int budget))
917{
918 int i;
919
920 init_dummy_netdev(&dev->napi_dev);
921 init_dummy_netdev(&dev->tx_napi_dev);
922 snprintf(dev->napi_dev.name, sizeof(dev->napi_dev.name), "%s",
923 wiphy_name(dev->hw->wiphy));
924 dev->napi_dev.threaded = 1;
925
926 mt76_for_each_q_rx(dev, i) {
927 netif_napi_add(&dev->napi_dev, &dev->napi[i], poll);
928 mt76_dma_rx_fill(dev, &dev->q_rx[i]);
929 napi_enable(&dev->napi[i]);
930 }
931
932 return 0;
933}
934
935static const struct mt76_queue_ops mt76_dma_ops = {
936 .init = mt76_dma_init,
937 .alloc = mt76_dma_alloc_queue,
938 .reset_q = mt76_dma_queue_reset,
939 .tx_queue_skb_raw = mt76_dma_tx_queue_skb_raw,
940 .tx_queue_skb = mt76_dma_tx_queue_skb,
941 .tx_cleanup = mt76_dma_tx_cleanup,
942 .rx_cleanup = mt76_dma_rx_cleanup,
943 .rx_reset = mt76_dma_rx_reset,
944 .kick = mt76_dma_kick_queue,
945};
946
947void mt76_dma_attach(struct mt76_dev *dev)
948{
949 dev->queue_ops = &mt76_dma_ops;
950}
951EXPORT_SYMBOL_GPL(mt76_dma_attach);
952
953void mt76_dma_cleanup(struct mt76_dev *dev)
954{
955 int i;
956
957 mt76_worker_disable(&dev->tx_worker);
958 netif_napi_del(&dev->tx_napi);
959
960 for (i = 0; i < ARRAY_SIZE(dev->phys); i++) {
961 struct mt76_phy *phy = dev->phys[i];
962 int j;
963
964 if (!phy)
965 continue;
966
967 for (j = 0; j < ARRAY_SIZE(phy->q_tx); j++)
968 mt76_dma_tx_cleanup(dev, phy->q_tx[j], true);
969 }
970
971 for (i = 0; i < ARRAY_SIZE(dev->q_mcu); i++)
972 mt76_dma_tx_cleanup(dev, dev->q_mcu[i], true);
973
974 mt76_for_each_q_rx(dev, i) {
975 struct mt76_queue *q = &dev->q_rx[i];
976
977 netif_napi_del(&dev->napi[i]);
978 if (FIELD_GET(MT_QFLAG_WED_TYPE, q->flags))
979 mt76_dma_rx_cleanup(dev, q);
980 }
981
982 mt76_free_pending_txwi(dev);
983 mt76_free_pending_rxwi(dev);
984
985 if (mtk_wed_device_active(&dev->mmio.wed))
986 mtk_wed_device_detach(&dev->mmio.wed);
987}
988EXPORT_SYMBOL_GPL(mt76_dma_cleanup);