Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
4 * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
5 */
6
7#include <linux/kernel.h>
8#include <linux/module.h>
9#include <linux/delay.h>
10#include <linux/dmaengine.h>
11#include <linux/dma-mapping.h>
12#include <linux/dmapool.h>
13#include <linux/err.h>
14#include <linux/init.h>
15#include <linux/interrupt.h>
16#include <linux/list.h>
17#include <linux/platform_device.h>
18#include <linux/slab.h>
19#include <linux/spinlock.h>
20#include <linux/sys_soc.h>
21#include <linux/of.h>
22#include <linux/of_dma.h>
23#include <linux/of_device.h>
24#include <linux/of_irq.h>
25#include <linux/workqueue.h>
26#include <linux/completion.h>
27#include <linux/soc/ti/k3-ringacc.h>
28#include <linux/soc/ti/ti_sci_protocol.h>
29#include <linux/soc/ti/ti_sci_inta_msi.h>
30#include <linux/dma/k3-event-router.h>
31#include <linux/dma/ti-cppi5.h>
32
33#include "../virt-dma.h"
34#include "k3-udma.h"
35#include "k3-psil-priv.h"
36
37struct udma_static_tr {
38 u8 elsize; /* RPSTR0 */
39 u16 elcnt; /* RPSTR0 */
40 u16 bstcnt; /* RPSTR1 */
41};
42
43#define K3_UDMA_MAX_RFLOWS 1024
44#define K3_UDMA_DEFAULT_RING_SIZE 16
45
46/* How SRC/DST tag should be updated by UDMA in the descriptor's Word 3 */
47#define UDMA_RFLOW_SRCTAG_NONE 0
48#define UDMA_RFLOW_SRCTAG_CFG_TAG 1
49#define UDMA_RFLOW_SRCTAG_FLOW_ID 2
50#define UDMA_RFLOW_SRCTAG_SRC_TAG 4
51
52#define UDMA_RFLOW_DSTTAG_NONE 0
53#define UDMA_RFLOW_DSTTAG_CFG_TAG 1
54#define UDMA_RFLOW_DSTTAG_FLOW_ID 2
55#define UDMA_RFLOW_DSTTAG_DST_TAG_LO 4
56#define UDMA_RFLOW_DSTTAG_DST_TAG_HI 5
57
58struct udma_chan;
59
60enum k3_dma_type {
61 DMA_TYPE_UDMA = 0,
62 DMA_TYPE_BCDMA,
63 DMA_TYPE_PKTDMA,
64};
65
66enum udma_mmr {
67 MMR_GCFG = 0,
68 MMR_BCHANRT,
69 MMR_RCHANRT,
70 MMR_TCHANRT,
71 MMR_LAST,
72};
73
74static const char * const mmr_names[] = {
75 [MMR_GCFG] = "gcfg",
76 [MMR_BCHANRT] = "bchanrt",
77 [MMR_RCHANRT] = "rchanrt",
78 [MMR_TCHANRT] = "tchanrt",
79};
80
81struct udma_tchan {
82 void __iomem *reg_rt;
83
84 int id;
85 struct k3_ring *t_ring; /* Transmit ring */
86 struct k3_ring *tc_ring; /* Transmit Completion ring */
87 int tflow_id; /* applicable only for PKTDMA */
88
89};
90
91#define udma_bchan udma_tchan
92
93struct udma_rflow {
94 int id;
95 struct k3_ring *fd_ring; /* Free Descriptor ring */
96 struct k3_ring *r_ring; /* Receive ring */
97};
98
99struct udma_rchan {
100 void __iomem *reg_rt;
101
102 int id;
103};
104
105struct udma_oes_offsets {
106 /* K3 UDMA Output Event Offset */
107 u32 udma_rchan;
108
109 /* BCDMA Output Event Offsets */
110 u32 bcdma_bchan_data;
111 u32 bcdma_bchan_ring;
112 u32 bcdma_tchan_data;
113 u32 bcdma_tchan_ring;
114 u32 bcdma_rchan_data;
115 u32 bcdma_rchan_ring;
116
117 /* PKTDMA Output Event Offsets */
118 u32 pktdma_tchan_flow;
119 u32 pktdma_rchan_flow;
120};
121
122#define UDMA_FLAG_PDMA_ACC32 BIT(0)
123#define UDMA_FLAG_PDMA_BURST BIT(1)
124#define UDMA_FLAG_TDTYPE BIT(2)
125#define UDMA_FLAG_BURST_SIZE BIT(3)
126#define UDMA_FLAGS_J7_CLASS (UDMA_FLAG_PDMA_ACC32 | \
127 UDMA_FLAG_PDMA_BURST | \
128 UDMA_FLAG_TDTYPE | \
129 UDMA_FLAG_BURST_SIZE)
130
131struct udma_match_data {
132 enum k3_dma_type type;
133 u32 psil_base;
134 bool enable_memcpy_support;
135 u32 flags;
136 u32 statictr_z_mask;
137 u8 burst_size[3];
138};
139
140struct udma_soc_data {
141 struct udma_oes_offsets oes;
142 u32 bcdma_trigger_event_offset;
143};
144
145struct udma_hwdesc {
146 size_t cppi5_desc_size;
147 void *cppi5_desc_vaddr;
148 dma_addr_t cppi5_desc_paddr;
149
150 /* TR descriptor internal pointers */
151 void *tr_req_base;
152 struct cppi5_tr_resp_t *tr_resp_base;
153};
154
155struct udma_rx_flush {
156 struct udma_hwdesc hwdescs[2];
157
158 size_t buffer_size;
159 void *buffer_vaddr;
160 dma_addr_t buffer_paddr;
161};
162
163struct udma_tpl {
164 u8 levels;
165 u32 start_idx[3];
166};
167
168struct udma_dev {
169 struct dma_device ddev;
170 struct device *dev;
171 void __iomem *mmrs[MMR_LAST];
172 const struct udma_match_data *match_data;
173 const struct udma_soc_data *soc_data;
174
175 struct udma_tpl bchan_tpl;
176 struct udma_tpl tchan_tpl;
177 struct udma_tpl rchan_tpl;
178
179 size_t desc_align; /* alignment to use for descriptors */
180
181 struct udma_tisci_rm tisci_rm;
182
183 struct k3_ringacc *ringacc;
184
185 struct work_struct purge_work;
186 struct list_head desc_to_purge;
187 spinlock_t lock;
188
189 struct udma_rx_flush rx_flush;
190
191 int bchan_cnt;
192 int tchan_cnt;
193 int echan_cnt;
194 int rchan_cnt;
195 int rflow_cnt;
196 int tflow_cnt;
197 unsigned long *bchan_map;
198 unsigned long *tchan_map;
199 unsigned long *rchan_map;
200 unsigned long *rflow_gp_map;
201 unsigned long *rflow_gp_map_allocated;
202 unsigned long *rflow_in_use;
203 unsigned long *tflow_map;
204
205 struct udma_bchan *bchans;
206 struct udma_tchan *tchans;
207 struct udma_rchan *rchans;
208 struct udma_rflow *rflows;
209
210 struct udma_chan *channels;
211 u32 psil_base;
212 u32 atype;
213 u32 asel;
214};
215
216struct udma_desc {
217 struct virt_dma_desc vd;
218
219 bool terminated;
220
221 enum dma_transfer_direction dir;
222
223 struct udma_static_tr static_tr;
224 u32 residue;
225
226 unsigned int sglen;
227 unsigned int desc_idx; /* Only used for cyclic in packet mode */
228 unsigned int tr_idx;
229
230 u32 metadata_size;
231 void *metadata; /* pointer to provided metadata buffer (EPIP, PSdata) */
232
233 unsigned int hwdesc_count;
234 struct udma_hwdesc hwdesc[];
235};
236
237enum udma_chan_state {
238 UDMA_CHAN_IS_IDLE = 0, /* not active, no teardown is in progress */
239 UDMA_CHAN_IS_ACTIVE, /* Normal operation */
240 UDMA_CHAN_IS_TERMINATING, /* channel is being terminated */
241};
242
243struct udma_tx_drain {
244 struct delayed_work work;
245 ktime_t tstamp;
246 u32 residue;
247};
248
249struct udma_chan_config {
250 bool pkt_mode; /* TR or packet */
251 bool needs_epib; /* EPIB is needed for the communication or not */
252 u32 psd_size; /* size of Protocol Specific Data */
253 u32 metadata_size; /* (needs_epib ? 16:0) + psd_size */
254 u32 hdesc_size; /* Size of a packet descriptor in packet mode */
255 bool notdpkt; /* Suppress sending TDC packet */
256 int remote_thread_id;
257 u32 atype;
258 u32 asel;
259 u32 src_thread;
260 u32 dst_thread;
261 enum psil_endpoint_type ep_type;
262 bool enable_acc32;
263 bool enable_burst;
264 enum udma_tp_level channel_tpl; /* Channel Throughput Level */
265
266 u32 tr_trigger_type;
267 unsigned long tx_flags;
268
269 /* PKDMA mapped channel */
270 int mapped_channel_id;
271 /* PKTDMA default tflow or rflow for mapped channel */
272 int default_flow_id;
273
274 enum dma_transfer_direction dir;
275};
276
277struct udma_chan {
278 struct virt_dma_chan vc;
279 struct dma_slave_config cfg;
280 struct udma_dev *ud;
281 struct device *dma_dev;
282 struct udma_desc *desc;
283 struct udma_desc *terminated_desc;
284 struct udma_static_tr static_tr;
285 char *name;
286
287 struct udma_bchan *bchan;
288 struct udma_tchan *tchan;
289 struct udma_rchan *rchan;
290 struct udma_rflow *rflow;
291
292 bool psil_paired;
293
294 int irq_num_ring;
295 int irq_num_udma;
296
297 bool cyclic;
298 bool paused;
299
300 enum udma_chan_state state;
301 struct completion teardown_completed;
302
303 struct udma_tx_drain tx_drain;
304
305 /* Channel configuration parameters */
306 struct udma_chan_config config;
307
308 /* dmapool for packet mode descriptors */
309 bool use_dma_pool;
310 struct dma_pool *hdesc_pool;
311
312 u32 id;
313};
314
315static inline struct udma_dev *to_udma_dev(struct dma_device *d)
316{
317 return container_of(d, struct udma_dev, ddev);
318}
319
320static inline struct udma_chan *to_udma_chan(struct dma_chan *c)
321{
322 return container_of(c, struct udma_chan, vc.chan);
323}
324
325static inline struct udma_desc *to_udma_desc(struct dma_async_tx_descriptor *t)
326{
327 return container_of(t, struct udma_desc, vd.tx);
328}
329
330/* Generic register access functions */
331static inline u32 udma_read(void __iomem *base, int reg)
332{
333 return readl(base + reg);
334}
335
336static inline void udma_write(void __iomem *base, int reg, u32 val)
337{
338 writel(val, base + reg);
339}
340
341static inline void udma_update_bits(void __iomem *base, int reg,
342 u32 mask, u32 val)
343{
344 u32 tmp, orig;
345
346 orig = readl(base + reg);
347 tmp = orig & ~mask;
348 tmp |= (val & mask);
349
350 if (tmp != orig)
351 writel(tmp, base + reg);
352}
353
354/* TCHANRT */
355static inline u32 udma_tchanrt_read(struct udma_chan *uc, int reg)
356{
357 if (!uc->tchan)
358 return 0;
359 return udma_read(uc->tchan->reg_rt, reg);
360}
361
362static inline void udma_tchanrt_write(struct udma_chan *uc, int reg, u32 val)
363{
364 if (!uc->tchan)
365 return;
366 udma_write(uc->tchan->reg_rt, reg, val);
367}
368
369static inline void udma_tchanrt_update_bits(struct udma_chan *uc, int reg,
370 u32 mask, u32 val)
371{
372 if (!uc->tchan)
373 return;
374 udma_update_bits(uc->tchan->reg_rt, reg, mask, val);
375}
376
377/* RCHANRT */
378static inline u32 udma_rchanrt_read(struct udma_chan *uc, int reg)
379{
380 if (!uc->rchan)
381 return 0;
382 return udma_read(uc->rchan->reg_rt, reg);
383}
384
385static inline void udma_rchanrt_write(struct udma_chan *uc, int reg, u32 val)
386{
387 if (!uc->rchan)
388 return;
389 udma_write(uc->rchan->reg_rt, reg, val);
390}
391
392static inline void udma_rchanrt_update_bits(struct udma_chan *uc, int reg,
393 u32 mask, u32 val)
394{
395 if (!uc->rchan)
396 return;
397 udma_update_bits(uc->rchan->reg_rt, reg, mask, val);
398}
399
400static int navss_psil_pair(struct udma_dev *ud, u32 src_thread, u32 dst_thread)
401{
402 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
403
404 dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
405 return tisci_rm->tisci_psil_ops->pair(tisci_rm->tisci,
406 tisci_rm->tisci_navss_dev_id,
407 src_thread, dst_thread);
408}
409
410static int navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
411 u32 dst_thread)
412{
413 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
414
415 dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
416 return tisci_rm->tisci_psil_ops->unpair(tisci_rm->tisci,
417 tisci_rm->tisci_navss_dev_id,
418 src_thread, dst_thread);
419}
420
421static void k3_configure_chan_coherency(struct dma_chan *chan, u32 asel)
422{
423 struct device *chan_dev = &chan->dev->device;
424
425 if (asel == 0) {
426 /* No special handling for the channel */
427 chan->dev->chan_dma_dev = false;
428
429 chan_dev->dma_coherent = false;
430 chan_dev->dma_parms = NULL;
431 } else if (asel == 14 || asel == 15) {
432 chan->dev->chan_dma_dev = true;
433
434 chan_dev->dma_coherent = true;
435 dma_coerce_mask_and_coherent(chan_dev, DMA_BIT_MASK(48));
436 chan_dev->dma_parms = chan_dev->parent->dma_parms;
437 } else {
438 dev_warn(chan->device->dev, "Invalid ASEL value: %u\n", asel);
439
440 chan_dev->dma_coherent = false;
441 chan_dev->dma_parms = NULL;
442 }
443}
444
445static u8 udma_get_chan_tpl_index(struct udma_tpl *tpl_map, int chan_id)
446{
447 int i;
448
449 for (i = 0; i < tpl_map->levels; i++) {
450 if (chan_id >= tpl_map->start_idx[i])
451 return i;
452 }
453
454 return 0;
455}
456
457static void udma_reset_uchan(struct udma_chan *uc)
458{
459 memset(&uc->config, 0, sizeof(uc->config));
460 uc->config.remote_thread_id = -1;
461 uc->config.mapped_channel_id = -1;
462 uc->config.default_flow_id = -1;
463 uc->state = UDMA_CHAN_IS_IDLE;
464}
465
466static void udma_dump_chan_stdata(struct udma_chan *uc)
467{
468 struct device *dev = uc->ud->dev;
469 u32 offset;
470 int i;
471
472 if (uc->config.dir == DMA_MEM_TO_DEV || uc->config.dir == DMA_MEM_TO_MEM) {
473 dev_dbg(dev, "TCHAN State data:\n");
474 for (i = 0; i < 32; i++) {
475 offset = UDMA_CHAN_RT_STDATA_REG + i * 4;
476 dev_dbg(dev, "TRT_STDATA[%02d]: 0x%08x\n", i,
477 udma_tchanrt_read(uc, offset));
478 }
479 }
480
481 if (uc->config.dir == DMA_DEV_TO_MEM || uc->config.dir == DMA_MEM_TO_MEM) {
482 dev_dbg(dev, "RCHAN State data:\n");
483 for (i = 0; i < 32; i++) {
484 offset = UDMA_CHAN_RT_STDATA_REG + i * 4;
485 dev_dbg(dev, "RRT_STDATA[%02d]: 0x%08x\n", i,
486 udma_rchanrt_read(uc, offset));
487 }
488 }
489}
490
491static inline dma_addr_t udma_curr_cppi5_desc_paddr(struct udma_desc *d,
492 int idx)
493{
494 return d->hwdesc[idx].cppi5_desc_paddr;
495}
496
497static inline void *udma_curr_cppi5_desc_vaddr(struct udma_desc *d, int idx)
498{
499 return d->hwdesc[idx].cppi5_desc_vaddr;
500}
501
502static struct udma_desc *udma_udma_desc_from_paddr(struct udma_chan *uc,
503 dma_addr_t paddr)
504{
505 struct udma_desc *d = uc->terminated_desc;
506
507 if (d) {
508 dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
509 d->desc_idx);
510
511 if (desc_paddr != paddr)
512 d = NULL;
513 }
514
515 if (!d) {
516 d = uc->desc;
517 if (d) {
518 dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
519 d->desc_idx);
520
521 if (desc_paddr != paddr)
522 d = NULL;
523 }
524 }
525
526 return d;
527}
528
529static void udma_free_hwdesc(struct udma_chan *uc, struct udma_desc *d)
530{
531 if (uc->use_dma_pool) {
532 int i;
533
534 for (i = 0; i < d->hwdesc_count; i++) {
535 if (!d->hwdesc[i].cppi5_desc_vaddr)
536 continue;
537
538 dma_pool_free(uc->hdesc_pool,
539 d->hwdesc[i].cppi5_desc_vaddr,
540 d->hwdesc[i].cppi5_desc_paddr);
541
542 d->hwdesc[i].cppi5_desc_vaddr = NULL;
543 }
544 } else if (d->hwdesc[0].cppi5_desc_vaddr) {
545 dma_free_coherent(uc->dma_dev, d->hwdesc[0].cppi5_desc_size,
546 d->hwdesc[0].cppi5_desc_vaddr,
547 d->hwdesc[0].cppi5_desc_paddr);
548
549 d->hwdesc[0].cppi5_desc_vaddr = NULL;
550 }
551}
552
553static void udma_purge_desc_work(struct work_struct *work)
554{
555 struct udma_dev *ud = container_of(work, typeof(*ud), purge_work);
556 struct virt_dma_desc *vd, *_vd;
557 unsigned long flags;
558 LIST_HEAD(head);
559
560 spin_lock_irqsave(&ud->lock, flags);
561 list_splice_tail_init(&ud->desc_to_purge, &head);
562 spin_unlock_irqrestore(&ud->lock, flags);
563
564 list_for_each_entry_safe(vd, _vd, &head, node) {
565 struct udma_chan *uc = to_udma_chan(vd->tx.chan);
566 struct udma_desc *d = to_udma_desc(&vd->tx);
567
568 udma_free_hwdesc(uc, d);
569 list_del(&vd->node);
570 kfree(d);
571 }
572
573 /* If more to purge, schedule the work again */
574 if (!list_empty(&ud->desc_to_purge))
575 schedule_work(&ud->purge_work);
576}
577
578static void udma_desc_free(struct virt_dma_desc *vd)
579{
580 struct udma_dev *ud = to_udma_dev(vd->tx.chan->device);
581 struct udma_chan *uc = to_udma_chan(vd->tx.chan);
582 struct udma_desc *d = to_udma_desc(&vd->tx);
583 unsigned long flags;
584
585 if (uc->terminated_desc == d)
586 uc->terminated_desc = NULL;
587
588 if (uc->use_dma_pool) {
589 udma_free_hwdesc(uc, d);
590 kfree(d);
591 return;
592 }
593
594 spin_lock_irqsave(&ud->lock, flags);
595 list_add_tail(&vd->node, &ud->desc_to_purge);
596 spin_unlock_irqrestore(&ud->lock, flags);
597
598 schedule_work(&ud->purge_work);
599}
600
601static bool udma_is_chan_running(struct udma_chan *uc)
602{
603 u32 trt_ctl = 0;
604 u32 rrt_ctl = 0;
605
606 if (uc->tchan)
607 trt_ctl = udma_tchanrt_read(uc, UDMA_CHAN_RT_CTL_REG);
608 if (uc->rchan)
609 rrt_ctl = udma_rchanrt_read(uc, UDMA_CHAN_RT_CTL_REG);
610
611 if (trt_ctl & UDMA_CHAN_RT_CTL_EN || rrt_ctl & UDMA_CHAN_RT_CTL_EN)
612 return true;
613
614 return false;
615}
616
617static bool udma_is_chan_paused(struct udma_chan *uc)
618{
619 u32 val, pause_mask;
620
621 switch (uc->config.dir) {
622 case DMA_DEV_TO_MEM:
623 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PEER_RT_EN_REG);
624 pause_mask = UDMA_PEER_RT_EN_PAUSE;
625 break;
626 case DMA_MEM_TO_DEV:
627 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_RT_EN_REG);
628 pause_mask = UDMA_PEER_RT_EN_PAUSE;
629 break;
630 case DMA_MEM_TO_MEM:
631 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_CTL_REG);
632 pause_mask = UDMA_CHAN_RT_CTL_PAUSE;
633 break;
634 default:
635 return false;
636 }
637
638 if (val & pause_mask)
639 return true;
640
641 return false;
642}
643
644static inline dma_addr_t udma_get_rx_flush_hwdesc_paddr(struct udma_chan *uc)
645{
646 return uc->ud->rx_flush.hwdescs[uc->config.pkt_mode].cppi5_desc_paddr;
647}
648
649static int udma_push_to_ring(struct udma_chan *uc, int idx)
650{
651 struct udma_desc *d = uc->desc;
652 struct k3_ring *ring = NULL;
653 dma_addr_t paddr;
654
655 switch (uc->config.dir) {
656 case DMA_DEV_TO_MEM:
657 ring = uc->rflow->fd_ring;
658 break;
659 case DMA_MEM_TO_DEV:
660 case DMA_MEM_TO_MEM:
661 ring = uc->tchan->t_ring;
662 break;
663 default:
664 return -EINVAL;
665 }
666
667 /* RX flush packet: idx == -1 is only passed in case of DEV_TO_MEM */
668 if (idx == -1) {
669 paddr = udma_get_rx_flush_hwdesc_paddr(uc);
670 } else {
671 paddr = udma_curr_cppi5_desc_paddr(d, idx);
672
673 wmb(); /* Ensure that writes are not moved over this point */
674 }
675
676 return k3_ringacc_ring_push(ring, &paddr);
677}
678
679static bool udma_desc_is_rx_flush(struct udma_chan *uc, dma_addr_t addr)
680{
681 if (uc->config.dir != DMA_DEV_TO_MEM)
682 return false;
683
684 if (addr == udma_get_rx_flush_hwdesc_paddr(uc))
685 return true;
686
687 return false;
688}
689
690static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr)
691{
692 struct k3_ring *ring = NULL;
693 int ret;
694
695 switch (uc->config.dir) {
696 case DMA_DEV_TO_MEM:
697 ring = uc->rflow->r_ring;
698 break;
699 case DMA_MEM_TO_DEV:
700 case DMA_MEM_TO_MEM:
701 ring = uc->tchan->tc_ring;
702 break;
703 default:
704 return -ENOENT;
705 }
706
707 ret = k3_ringacc_ring_pop(ring, addr);
708 if (ret)
709 return ret;
710
711 rmb(); /* Ensure that reads are not moved before this point */
712
713 /* Teardown completion */
714 if (cppi5_desc_is_tdcm(*addr))
715 return 0;
716
717 /* Check for flush descriptor */
718 if (udma_desc_is_rx_flush(uc, *addr))
719 return -ENOENT;
720
721 return 0;
722}
723
724static void udma_reset_rings(struct udma_chan *uc)
725{
726 struct k3_ring *ring1 = NULL;
727 struct k3_ring *ring2 = NULL;
728
729 switch (uc->config.dir) {
730 case DMA_DEV_TO_MEM:
731 if (uc->rchan) {
732 ring1 = uc->rflow->fd_ring;
733 ring2 = uc->rflow->r_ring;
734 }
735 break;
736 case DMA_MEM_TO_DEV:
737 case DMA_MEM_TO_MEM:
738 if (uc->tchan) {
739 ring1 = uc->tchan->t_ring;
740 ring2 = uc->tchan->tc_ring;
741 }
742 break;
743 default:
744 break;
745 }
746
747 if (ring1)
748 k3_ringacc_ring_reset_dma(ring1,
749 k3_ringacc_ring_get_occ(ring1));
750 if (ring2)
751 k3_ringacc_ring_reset(ring2);
752
753 /* make sure we are not leaking memory by stalled descriptor */
754 if (uc->terminated_desc) {
755 udma_desc_free(&uc->terminated_desc->vd);
756 uc->terminated_desc = NULL;
757 }
758}
759
760static void udma_decrement_byte_counters(struct udma_chan *uc, u32 val)
761{
762 if (uc->desc->dir == DMA_DEV_TO_MEM) {
763 udma_rchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
764 udma_rchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
765 if (uc->config.ep_type != PSIL_EP_NATIVE)
766 udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
767 } else {
768 udma_tchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
769 udma_tchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
770 if (!uc->bchan && uc->config.ep_type != PSIL_EP_NATIVE)
771 udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
772 }
773}
774
775static void udma_reset_counters(struct udma_chan *uc)
776{
777 u32 val;
778
779 if (uc->tchan) {
780 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
781 udma_tchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
782
783 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_SBCNT_REG);
784 udma_tchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
785
786 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PCNT_REG);
787 udma_tchanrt_write(uc, UDMA_CHAN_RT_PCNT_REG, val);
788
789 if (!uc->bchan) {
790 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG);
791 udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
792 }
793 }
794
795 if (uc->rchan) {
796 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
797 udma_rchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
798
799 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_SBCNT_REG);
800 udma_rchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
801
802 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PCNT_REG);
803 udma_rchanrt_write(uc, UDMA_CHAN_RT_PCNT_REG, val);
804
805 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG);
806 udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
807 }
808}
809
810static int udma_reset_chan(struct udma_chan *uc, bool hard)
811{
812 switch (uc->config.dir) {
813 case DMA_DEV_TO_MEM:
814 udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 0);
815 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0);
816 break;
817 case DMA_MEM_TO_DEV:
818 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0);
819 udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 0);
820 break;
821 case DMA_MEM_TO_MEM:
822 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0);
823 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0);
824 break;
825 default:
826 return -EINVAL;
827 }
828
829 /* Reset all counters */
830 udma_reset_counters(uc);
831
832 /* Hard reset: re-initialize the channel to reset */
833 if (hard) {
834 struct udma_chan_config ucc_backup;
835 int ret;
836
837 memcpy(&ucc_backup, &uc->config, sizeof(uc->config));
838 uc->ud->ddev.device_free_chan_resources(&uc->vc.chan);
839
840 /* restore the channel configuration */
841 memcpy(&uc->config, &ucc_backup, sizeof(uc->config));
842 ret = uc->ud->ddev.device_alloc_chan_resources(&uc->vc.chan);
843 if (ret)
844 return ret;
845
846 /*
847 * Setting forced teardown after forced reset helps recovering
848 * the rchan.
849 */
850 if (uc->config.dir == DMA_DEV_TO_MEM)
851 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
852 UDMA_CHAN_RT_CTL_EN |
853 UDMA_CHAN_RT_CTL_TDOWN |
854 UDMA_CHAN_RT_CTL_FTDOWN);
855 }
856 uc->state = UDMA_CHAN_IS_IDLE;
857
858 return 0;
859}
860
861static void udma_start_desc(struct udma_chan *uc)
862{
863 struct udma_chan_config *ucc = &uc->config;
864
865 if (uc->ud->match_data->type == DMA_TYPE_UDMA && ucc->pkt_mode &&
866 (uc->cyclic || ucc->dir == DMA_DEV_TO_MEM)) {
867 int i;
868
869 /*
870 * UDMA only: Push all descriptors to ring for packet mode
871 * cyclic or RX
872 * PKTDMA supports pre-linked descriptor and cyclic is not
873 * supported
874 */
875 for (i = 0; i < uc->desc->sglen; i++)
876 udma_push_to_ring(uc, i);
877 } else {
878 udma_push_to_ring(uc, 0);
879 }
880}
881
882static bool udma_chan_needs_reconfiguration(struct udma_chan *uc)
883{
884 /* Only PDMAs have staticTR */
885 if (uc->config.ep_type == PSIL_EP_NATIVE)
886 return false;
887
888 /* Check if the staticTR configuration has changed for TX */
889 if (memcmp(&uc->static_tr, &uc->desc->static_tr, sizeof(uc->static_tr)))
890 return true;
891
892 return false;
893}
894
895static int udma_start(struct udma_chan *uc)
896{
897 struct virt_dma_desc *vd = vchan_next_desc(&uc->vc);
898
899 if (!vd) {
900 uc->desc = NULL;
901 return -ENOENT;
902 }
903
904 list_del(&vd->node);
905
906 uc->desc = to_udma_desc(&vd->tx);
907
908 /* Channel is already running and does not need reconfiguration */
909 if (udma_is_chan_running(uc) && !udma_chan_needs_reconfiguration(uc)) {
910 udma_start_desc(uc);
911 goto out;
912 }
913
914 /* Make sure that we clear the teardown bit, if it is set */
915 udma_reset_chan(uc, false);
916
917 /* Push descriptors before we start the channel */
918 udma_start_desc(uc);
919
920 switch (uc->desc->dir) {
921 case DMA_DEV_TO_MEM:
922 /* Config remote TR */
923 if (uc->config.ep_type == PSIL_EP_PDMA_XY) {
924 u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) |
925 PDMA_STATIC_TR_X(uc->desc->static_tr.elsize);
926 const struct udma_match_data *match_data =
927 uc->ud->match_data;
928
929 if (uc->config.enable_acc32)
930 val |= PDMA_STATIC_TR_XY_ACC32;
931 if (uc->config.enable_burst)
932 val |= PDMA_STATIC_TR_XY_BURST;
933
934 udma_rchanrt_write(uc,
935 UDMA_CHAN_RT_PEER_STATIC_TR_XY_REG,
936 val);
937
938 udma_rchanrt_write(uc,
939 UDMA_CHAN_RT_PEER_STATIC_TR_Z_REG,
940 PDMA_STATIC_TR_Z(uc->desc->static_tr.bstcnt,
941 match_data->statictr_z_mask));
942
943 /* save the current staticTR configuration */
944 memcpy(&uc->static_tr, &uc->desc->static_tr,
945 sizeof(uc->static_tr));
946 }
947
948 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
949 UDMA_CHAN_RT_CTL_EN);
950
951 /* Enable remote */
952 udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
953 UDMA_PEER_RT_EN_ENABLE);
954
955 break;
956 case DMA_MEM_TO_DEV:
957 /* Config remote TR */
958 if (uc->config.ep_type == PSIL_EP_PDMA_XY) {
959 u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) |
960 PDMA_STATIC_TR_X(uc->desc->static_tr.elsize);
961
962 if (uc->config.enable_acc32)
963 val |= PDMA_STATIC_TR_XY_ACC32;
964 if (uc->config.enable_burst)
965 val |= PDMA_STATIC_TR_XY_BURST;
966
967 udma_tchanrt_write(uc,
968 UDMA_CHAN_RT_PEER_STATIC_TR_XY_REG,
969 val);
970
971 /* save the current staticTR configuration */
972 memcpy(&uc->static_tr, &uc->desc->static_tr,
973 sizeof(uc->static_tr));
974 }
975
976 /* Enable remote */
977 udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
978 UDMA_PEER_RT_EN_ENABLE);
979
980 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
981 UDMA_CHAN_RT_CTL_EN);
982
983 break;
984 case DMA_MEM_TO_MEM:
985 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
986 UDMA_CHAN_RT_CTL_EN);
987 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
988 UDMA_CHAN_RT_CTL_EN);
989
990 break;
991 default:
992 return -EINVAL;
993 }
994
995 uc->state = UDMA_CHAN_IS_ACTIVE;
996out:
997
998 return 0;
999}
1000
1001static int udma_stop(struct udma_chan *uc)
1002{
1003 enum udma_chan_state old_state = uc->state;
1004
1005 uc->state = UDMA_CHAN_IS_TERMINATING;
1006 reinit_completion(&uc->teardown_completed);
1007
1008 switch (uc->config.dir) {
1009 case DMA_DEV_TO_MEM:
1010 if (!uc->cyclic && !uc->desc)
1011 udma_push_to_ring(uc, -1);
1012
1013 udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
1014 UDMA_PEER_RT_EN_ENABLE |
1015 UDMA_PEER_RT_EN_TEARDOWN);
1016 break;
1017 case DMA_MEM_TO_DEV:
1018 udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
1019 UDMA_PEER_RT_EN_ENABLE |
1020 UDMA_PEER_RT_EN_FLUSH);
1021 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
1022 UDMA_CHAN_RT_CTL_EN |
1023 UDMA_CHAN_RT_CTL_TDOWN);
1024 break;
1025 case DMA_MEM_TO_MEM:
1026 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
1027 UDMA_CHAN_RT_CTL_EN |
1028 UDMA_CHAN_RT_CTL_TDOWN);
1029 break;
1030 default:
1031 uc->state = old_state;
1032 complete_all(&uc->teardown_completed);
1033 return -EINVAL;
1034 }
1035
1036 return 0;
1037}
1038
1039static void udma_cyclic_packet_elapsed(struct udma_chan *uc)
1040{
1041 struct udma_desc *d = uc->desc;
1042 struct cppi5_host_desc_t *h_desc;
1043
1044 h_desc = d->hwdesc[d->desc_idx].cppi5_desc_vaddr;
1045 cppi5_hdesc_reset_to_original(h_desc);
1046 udma_push_to_ring(uc, d->desc_idx);
1047 d->desc_idx = (d->desc_idx + 1) % d->sglen;
1048}
1049
1050static inline void udma_fetch_epib(struct udma_chan *uc, struct udma_desc *d)
1051{
1052 struct cppi5_host_desc_t *h_desc = d->hwdesc[0].cppi5_desc_vaddr;
1053
1054 memcpy(d->metadata, h_desc->epib, d->metadata_size);
1055}
1056
1057static bool udma_is_desc_really_done(struct udma_chan *uc, struct udma_desc *d)
1058{
1059 u32 peer_bcnt, bcnt;
1060
1061 /*
1062 * Only TX towards PDMA is affected.
1063 * If DMA_PREP_INTERRUPT is not set by consumer then skip the transfer
1064 * completion calculation, consumer must ensure that there is no stale
1065 * data in DMA fabric in this case.
1066 */
1067 if (uc->config.ep_type == PSIL_EP_NATIVE ||
1068 uc->config.dir != DMA_MEM_TO_DEV || !(uc->config.tx_flags & DMA_PREP_INTERRUPT))
1069 return true;
1070
1071 peer_bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG);
1072 bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
1073
1074 /* Transfer is incomplete, store current residue and time stamp */
1075 if (peer_bcnt < bcnt) {
1076 uc->tx_drain.residue = bcnt - peer_bcnt;
1077 uc->tx_drain.tstamp = ktime_get();
1078 return false;
1079 }
1080
1081 return true;
1082}
1083
1084static void udma_check_tx_completion(struct work_struct *work)
1085{
1086 struct udma_chan *uc = container_of(work, typeof(*uc),
1087 tx_drain.work.work);
1088 bool desc_done = true;
1089 u32 residue_diff;
1090 ktime_t time_diff;
1091 unsigned long delay;
1092
1093 while (1) {
1094 if (uc->desc) {
1095 /* Get previous residue and time stamp */
1096 residue_diff = uc->tx_drain.residue;
1097 time_diff = uc->tx_drain.tstamp;
1098 /*
1099 * Get current residue and time stamp or see if
1100 * transfer is complete
1101 */
1102 desc_done = udma_is_desc_really_done(uc, uc->desc);
1103 }
1104
1105 if (!desc_done) {
1106 /*
1107 * Find the time delta and residue delta w.r.t
1108 * previous poll
1109 */
1110 time_diff = ktime_sub(uc->tx_drain.tstamp,
1111 time_diff) + 1;
1112 residue_diff -= uc->tx_drain.residue;
1113 if (residue_diff) {
1114 /*
1115 * Try to guess when we should check
1116 * next time by calculating rate at
1117 * which data is being drained at the
1118 * peer device
1119 */
1120 delay = (time_diff / residue_diff) *
1121 uc->tx_drain.residue;
1122 } else {
1123 /* No progress, check again in 1 second */
1124 schedule_delayed_work(&uc->tx_drain.work, HZ);
1125 break;
1126 }
1127
1128 usleep_range(ktime_to_us(delay),
1129 ktime_to_us(delay) + 10);
1130 continue;
1131 }
1132
1133 if (uc->desc) {
1134 struct udma_desc *d = uc->desc;
1135
1136 udma_decrement_byte_counters(uc, d->residue);
1137 udma_start(uc);
1138 vchan_cookie_complete(&d->vd);
1139 break;
1140 }
1141
1142 break;
1143 }
1144}
1145
1146static irqreturn_t udma_ring_irq_handler(int irq, void *data)
1147{
1148 struct udma_chan *uc = data;
1149 struct udma_desc *d;
1150 dma_addr_t paddr = 0;
1151
1152 if (udma_pop_from_ring(uc, &paddr) || !paddr)
1153 return IRQ_HANDLED;
1154
1155 spin_lock(&uc->vc.lock);
1156
1157 /* Teardown completion message */
1158 if (cppi5_desc_is_tdcm(paddr)) {
1159 complete_all(&uc->teardown_completed);
1160
1161 if (uc->terminated_desc) {
1162 udma_desc_free(&uc->terminated_desc->vd);
1163 uc->terminated_desc = NULL;
1164 }
1165
1166 if (!uc->desc)
1167 udma_start(uc);
1168
1169 goto out;
1170 }
1171
1172 d = udma_udma_desc_from_paddr(uc, paddr);
1173
1174 if (d) {
1175 dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
1176 d->desc_idx);
1177 if (desc_paddr != paddr) {
1178 dev_err(uc->ud->dev, "not matching descriptors!\n");
1179 goto out;
1180 }
1181
1182 if (d == uc->desc) {
1183 /* active descriptor */
1184 if (uc->cyclic) {
1185 udma_cyclic_packet_elapsed(uc);
1186 vchan_cyclic_callback(&d->vd);
1187 } else {
1188 if (udma_is_desc_really_done(uc, d)) {
1189 udma_decrement_byte_counters(uc, d->residue);
1190 udma_start(uc);
1191 vchan_cookie_complete(&d->vd);
1192 } else {
1193 schedule_delayed_work(&uc->tx_drain.work,
1194 0);
1195 }
1196 }
1197 } else {
1198 /*
1199 * terminated descriptor, mark the descriptor as
1200 * completed to update the channel's cookie marker
1201 */
1202 dma_cookie_complete(&d->vd.tx);
1203 }
1204 }
1205out:
1206 spin_unlock(&uc->vc.lock);
1207
1208 return IRQ_HANDLED;
1209}
1210
1211static irqreturn_t udma_udma_irq_handler(int irq, void *data)
1212{
1213 struct udma_chan *uc = data;
1214 struct udma_desc *d;
1215
1216 spin_lock(&uc->vc.lock);
1217 d = uc->desc;
1218 if (d) {
1219 d->tr_idx = (d->tr_idx + 1) % d->sglen;
1220
1221 if (uc->cyclic) {
1222 vchan_cyclic_callback(&d->vd);
1223 } else {
1224 /* TODO: figure out the real amount of data */
1225 udma_decrement_byte_counters(uc, d->residue);
1226 udma_start(uc);
1227 vchan_cookie_complete(&d->vd);
1228 }
1229 }
1230
1231 spin_unlock(&uc->vc.lock);
1232
1233 return IRQ_HANDLED;
1234}
1235
1236/**
1237 * __udma_alloc_gp_rflow_range - alloc range of GP RX flows
1238 * @ud: UDMA device
1239 * @from: Start the search from this flow id number
1240 * @cnt: Number of consecutive flow ids to allocate
1241 *
1242 * Allocate range of RX flow ids for future use, those flows can be requested
1243 * only using explicit flow id number. if @from is set to -1 it will try to find
1244 * first free range. if @from is positive value it will force allocation only
1245 * of the specified range of flows.
1246 *
1247 * Returns -ENOMEM if can't find free range.
1248 * -EEXIST if requested range is busy.
1249 * -EINVAL if wrong input values passed.
1250 * Returns flow id on success.
1251 */
1252static int __udma_alloc_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
1253{
1254 int start, tmp_from;
1255 DECLARE_BITMAP(tmp, K3_UDMA_MAX_RFLOWS);
1256
1257 tmp_from = from;
1258 if (tmp_from < 0)
1259 tmp_from = ud->rchan_cnt;
1260 /* default flows can't be allocated and accessible only by id */
1261 if (tmp_from < ud->rchan_cnt)
1262 return -EINVAL;
1263
1264 if (tmp_from + cnt > ud->rflow_cnt)
1265 return -EINVAL;
1266
1267 bitmap_or(tmp, ud->rflow_gp_map, ud->rflow_gp_map_allocated,
1268 ud->rflow_cnt);
1269
1270 start = bitmap_find_next_zero_area(tmp,
1271 ud->rflow_cnt,
1272 tmp_from, cnt, 0);
1273 if (start >= ud->rflow_cnt)
1274 return -ENOMEM;
1275
1276 if (from >= 0 && start != from)
1277 return -EEXIST;
1278
1279 bitmap_set(ud->rflow_gp_map_allocated, start, cnt);
1280 return start;
1281}
1282
1283static int __udma_free_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
1284{
1285 if (from < ud->rchan_cnt)
1286 return -EINVAL;
1287 if (from + cnt > ud->rflow_cnt)
1288 return -EINVAL;
1289
1290 bitmap_clear(ud->rflow_gp_map_allocated, from, cnt);
1291 return 0;
1292}
1293
1294static struct udma_rflow *__udma_get_rflow(struct udma_dev *ud, int id)
1295{
1296 /*
1297 * Attempt to request rflow by ID can be made for any rflow
1298 * if not in use with assumption that caller knows what's doing.
1299 * TI-SCI FW will perform additional permission check ant way, it's
1300 * safe
1301 */
1302
1303 if (id < 0 || id >= ud->rflow_cnt)
1304 return ERR_PTR(-ENOENT);
1305
1306 if (test_bit(id, ud->rflow_in_use))
1307 return ERR_PTR(-ENOENT);
1308
1309 if (ud->rflow_gp_map) {
1310 /* GP rflow has to be allocated first */
1311 if (!test_bit(id, ud->rflow_gp_map) &&
1312 !test_bit(id, ud->rflow_gp_map_allocated))
1313 return ERR_PTR(-EINVAL);
1314 }
1315
1316 dev_dbg(ud->dev, "get rflow%d\n", id);
1317 set_bit(id, ud->rflow_in_use);
1318 return &ud->rflows[id];
1319}
1320
1321static void __udma_put_rflow(struct udma_dev *ud, struct udma_rflow *rflow)
1322{
1323 if (!test_bit(rflow->id, ud->rflow_in_use)) {
1324 dev_err(ud->dev, "attempt to put unused rflow%d\n", rflow->id);
1325 return;
1326 }
1327
1328 dev_dbg(ud->dev, "put rflow%d\n", rflow->id);
1329 clear_bit(rflow->id, ud->rflow_in_use);
1330}
1331
1332#define UDMA_RESERVE_RESOURCE(res) \
1333static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud, \
1334 enum udma_tp_level tpl, \
1335 int id) \
1336{ \
1337 if (id >= 0) { \
1338 if (test_bit(id, ud->res##_map)) { \
1339 dev_err(ud->dev, "res##%d is in use\n", id); \
1340 return ERR_PTR(-ENOENT); \
1341 } \
1342 } else { \
1343 int start; \
1344 \
1345 if (tpl >= ud->res##_tpl.levels) \
1346 tpl = ud->res##_tpl.levels - 1; \
1347 \
1348 start = ud->res##_tpl.start_idx[tpl]; \
1349 \
1350 id = find_next_zero_bit(ud->res##_map, ud->res##_cnt, \
1351 start); \
1352 if (id == ud->res##_cnt) { \
1353 return ERR_PTR(-ENOENT); \
1354 } \
1355 } \
1356 \
1357 set_bit(id, ud->res##_map); \
1358 return &ud->res##s[id]; \
1359}
1360
1361UDMA_RESERVE_RESOURCE(bchan);
1362UDMA_RESERVE_RESOURCE(tchan);
1363UDMA_RESERVE_RESOURCE(rchan);
1364
1365static int bcdma_get_bchan(struct udma_chan *uc)
1366{
1367 struct udma_dev *ud = uc->ud;
1368 enum udma_tp_level tpl;
1369 int ret;
1370
1371 if (uc->bchan) {
1372 dev_dbg(ud->dev, "chan%d: already have bchan%d allocated\n",
1373 uc->id, uc->bchan->id);
1374 return 0;
1375 }
1376
1377 /*
1378 * Use normal channels for peripherals, and highest TPL channel for
1379 * mem2mem
1380 */
1381 if (uc->config.tr_trigger_type)
1382 tpl = 0;
1383 else
1384 tpl = ud->bchan_tpl.levels - 1;
1385
1386 uc->bchan = __udma_reserve_bchan(ud, tpl, -1);
1387 if (IS_ERR(uc->bchan)) {
1388 ret = PTR_ERR(uc->bchan);
1389 uc->bchan = NULL;
1390 return ret;
1391 }
1392
1393 uc->tchan = uc->bchan;
1394
1395 return 0;
1396}
1397
1398static int udma_get_tchan(struct udma_chan *uc)
1399{
1400 struct udma_dev *ud = uc->ud;
1401 int ret;
1402
1403 if (uc->tchan) {
1404 dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n",
1405 uc->id, uc->tchan->id);
1406 return 0;
1407 }
1408
1409 /*
1410 * mapped_channel_id is -1 for UDMA, BCDMA and PKTDMA unmapped channels.
1411 * For PKTDMA mapped channels it is configured to a channel which must
1412 * be used to service the peripheral.
1413 */
1414 uc->tchan = __udma_reserve_tchan(ud, uc->config.channel_tpl,
1415 uc->config.mapped_channel_id);
1416 if (IS_ERR(uc->tchan)) {
1417 ret = PTR_ERR(uc->tchan);
1418 uc->tchan = NULL;
1419 return ret;
1420 }
1421
1422 if (ud->tflow_cnt) {
1423 int tflow_id;
1424
1425 /* Only PKTDMA have support for tx flows */
1426 if (uc->config.default_flow_id >= 0)
1427 tflow_id = uc->config.default_flow_id;
1428 else
1429 tflow_id = uc->tchan->id;
1430
1431 if (test_bit(tflow_id, ud->tflow_map)) {
1432 dev_err(ud->dev, "tflow%d is in use\n", tflow_id);
1433 clear_bit(uc->tchan->id, ud->tchan_map);
1434 uc->tchan = NULL;
1435 return -ENOENT;
1436 }
1437
1438 uc->tchan->tflow_id = tflow_id;
1439 set_bit(tflow_id, ud->tflow_map);
1440 } else {
1441 uc->tchan->tflow_id = -1;
1442 }
1443
1444 return 0;
1445}
1446
1447static int udma_get_rchan(struct udma_chan *uc)
1448{
1449 struct udma_dev *ud = uc->ud;
1450 int ret;
1451
1452 if (uc->rchan) {
1453 dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n",
1454 uc->id, uc->rchan->id);
1455 return 0;
1456 }
1457
1458 /*
1459 * mapped_channel_id is -1 for UDMA, BCDMA and PKTDMA unmapped channels.
1460 * For PKTDMA mapped channels it is configured to a channel which must
1461 * be used to service the peripheral.
1462 */
1463 uc->rchan = __udma_reserve_rchan(ud, uc->config.channel_tpl,
1464 uc->config.mapped_channel_id);
1465 if (IS_ERR(uc->rchan)) {
1466 ret = PTR_ERR(uc->rchan);
1467 uc->rchan = NULL;
1468 return ret;
1469 }
1470
1471 return 0;
1472}
1473
1474static int udma_get_chan_pair(struct udma_chan *uc)
1475{
1476 struct udma_dev *ud = uc->ud;
1477 int chan_id, end;
1478
1479 if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) {
1480 dev_info(ud->dev, "chan%d: already have %d pair allocated\n",
1481 uc->id, uc->tchan->id);
1482 return 0;
1483 }
1484
1485 if (uc->tchan) {
1486 dev_err(ud->dev, "chan%d: already have tchan%d allocated\n",
1487 uc->id, uc->tchan->id);
1488 return -EBUSY;
1489 } else if (uc->rchan) {
1490 dev_err(ud->dev, "chan%d: already have rchan%d allocated\n",
1491 uc->id, uc->rchan->id);
1492 return -EBUSY;
1493 }
1494
1495 /* Can be optimized, but let's have it like this for now */
1496 end = min(ud->tchan_cnt, ud->rchan_cnt);
1497 /*
1498 * Try to use the highest TPL channel pair for MEM_TO_MEM channels
1499 * Note: in UDMAP the channel TPL is symmetric between tchan and rchan
1500 */
1501 chan_id = ud->tchan_tpl.start_idx[ud->tchan_tpl.levels - 1];
1502 for (; chan_id < end; chan_id++) {
1503 if (!test_bit(chan_id, ud->tchan_map) &&
1504 !test_bit(chan_id, ud->rchan_map))
1505 break;
1506 }
1507
1508 if (chan_id == end)
1509 return -ENOENT;
1510
1511 set_bit(chan_id, ud->tchan_map);
1512 set_bit(chan_id, ud->rchan_map);
1513 uc->tchan = &ud->tchans[chan_id];
1514 uc->rchan = &ud->rchans[chan_id];
1515
1516 /* UDMA does not use tx flows */
1517 uc->tchan->tflow_id = -1;
1518
1519 return 0;
1520}
1521
1522static int udma_get_rflow(struct udma_chan *uc, int flow_id)
1523{
1524 struct udma_dev *ud = uc->ud;
1525 int ret;
1526
1527 if (!uc->rchan) {
1528 dev_err(ud->dev, "chan%d: does not have rchan??\n", uc->id);
1529 return -EINVAL;
1530 }
1531
1532 if (uc->rflow) {
1533 dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n",
1534 uc->id, uc->rflow->id);
1535 return 0;
1536 }
1537
1538 uc->rflow = __udma_get_rflow(ud, flow_id);
1539 if (IS_ERR(uc->rflow)) {
1540 ret = PTR_ERR(uc->rflow);
1541 uc->rflow = NULL;
1542 return ret;
1543 }
1544
1545 return 0;
1546}
1547
1548static void bcdma_put_bchan(struct udma_chan *uc)
1549{
1550 struct udma_dev *ud = uc->ud;
1551
1552 if (uc->bchan) {
1553 dev_dbg(ud->dev, "chan%d: put bchan%d\n", uc->id,
1554 uc->bchan->id);
1555 clear_bit(uc->bchan->id, ud->bchan_map);
1556 uc->bchan = NULL;
1557 uc->tchan = NULL;
1558 }
1559}
1560
1561static void udma_put_rchan(struct udma_chan *uc)
1562{
1563 struct udma_dev *ud = uc->ud;
1564
1565 if (uc->rchan) {
1566 dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id,
1567 uc->rchan->id);
1568 clear_bit(uc->rchan->id, ud->rchan_map);
1569 uc->rchan = NULL;
1570 }
1571}
1572
1573static void udma_put_tchan(struct udma_chan *uc)
1574{
1575 struct udma_dev *ud = uc->ud;
1576
1577 if (uc->tchan) {
1578 dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id,
1579 uc->tchan->id);
1580 clear_bit(uc->tchan->id, ud->tchan_map);
1581
1582 if (uc->tchan->tflow_id >= 0)
1583 clear_bit(uc->tchan->tflow_id, ud->tflow_map);
1584
1585 uc->tchan = NULL;
1586 }
1587}
1588
1589static void udma_put_rflow(struct udma_chan *uc)
1590{
1591 struct udma_dev *ud = uc->ud;
1592
1593 if (uc->rflow) {
1594 dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id,
1595 uc->rflow->id);
1596 __udma_put_rflow(ud, uc->rflow);
1597 uc->rflow = NULL;
1598 }
1599}
1600
1601static void bcdma_free_bchan_resources(struct udma_chan *uc)
1602{
1603 if (!uc->bchan)
1604 return;
1605
1606 k3_ringacc_ring_free(uc->bchan->tc_ring);
1607 k3_ringacc_ring_free(uc->bchan->t_ring);
1608 uc->bchan->tc_ring = NULL;
1609 uc->bchan->t_ring = NULL;
1610 k3_configure_chan_coherency(&uc->vc.chan, 0);
1611
1612 bcdma_put_bchan(uc);
1613}
1614
1615static int bcdma_alloc_bchan_resources(struct udma_chan *uc)
1616{
1617 struct k3_ring_cfg ring_cfg;
1618 struct udma_dev *ud = uc->ud;
1619 int ret;
1620
1621 ret = bcdma_get_bchan(uc);
1622 if (ret)
1623 return ret;
1624
1625 ret = k3_ringacc_request_rings_pair(ud->ringacc, uc->bchan->id, -1,
1626 &uc->bchan->t_ring,
1627 &uc->bchan->tc_ring);
1628 if (ret) {
1629 ret = -EBUSY;
1630 goto err_ring;
1631 }
1632
1633 memset(&ring_cfg, 0, sizeof(ring_cfg));
1634 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1635 ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
1636 ring_cfg.mode = K3_RINGACC_RING_MODE_RING;
1637
1638 k3_configure_chan_coherency(&uc->vc.chan, ud->asel);
1639 ring_cfg.asel = ud->asel;
1640 ring_cfg.dma_dev = dmaengine_get_dma_device(&uc->vc.chan);
1641
1642 ret = k3_ringacc_ring_cfg(uc->bchan->t_ring, &ring_cfg);
1643 if (ret)
1644 goto err_ringcfg;
1645
1646 return 0;
1647
1648err_ringcfg:
1649 k3_ringacc_ring_free(uc->bchan->tc_ring);
1650 uc->bchan->tc_ring = NULL;
1651 k3_ringacc_ring_free(uc->bchan->t_ring);
1652 uc->bchan->t_ring = NULL;
1653 k3_configure_chan_coherency(&uc->vc.chan, 0);
1654err_ring:
1655 bcdma_put_bchan(uc);
1656
1657 return ret;
1658}
1659
1660static void udma_free_tx_resources(struct udma_chan *uc)
1661{
1662 if (!uc->tchan)
1663 return;
1664
1665 k3_ringacc_ring_free(uc->tchan->t_ring);
1666 k3_ringacc_ring_free(uc->tchan->tc_ring);
1667 uc->tchan->t_ring = NULL;
1668 uc->tchan->tc_ring = NULL;
1669
1670 udma_put_tchan(uc);
1671}
1672
1673static int udma_alloc_tx_resources(struct udma_chan *uc)
1674{
1675 struct k3_ring_cfg ring_cfg;
1676 struct udma_dev *ud = uc->ud;
1677 struct udma_tchan *tchan;
1678 int ring_idx, ret;
1679
1680 ret = udma_get_tchan(uc);
1681 if (ret)
1682 return ret;
1683
1684 tchan = uc->tchan;
1685 if (tchan->tflow_id >= 0)
1686 ring_idx = tchan->tflow_id;
1687 else
1688 ring_idx = ud->bchan_cnt + tchan->id;
1689
1690 ret = k3_ringacc_request_rings_pair(ud->ringacc, ring_idx, -1,
1691 &tchan->t_ring,
1692 &tchan->tc_ring);
1693 if (ret) {
1694 ret = -EBUSY;
1695 goto err_ring;
1696 }
1697
1698 memset(&ring_cfg, 0, sizeof(ring_cfg));
1699 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1700 ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
1701 if (ud->match_data->type == DMA_TYPE_UDMA) {
1702 ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
1703 } else {
1704 ring_cfg.mode = K3_RINGACC_RING_MODE_RING;
1705
1706 k3_configure_chan_coherency(&uc->vc.chan, uc->config.asel);
1707 ring_cfg.asel = uc->config.asel;
1708 ring_cfg.dma_dev = dmaengine_get_dma_device(&uc->vc.chan);
1709 }
1710
1711 ret = k3_ringacc_ring_cfg(tchan->t_ring, &ring_cfg);
1712 ret |= k3_ringacc_ring_cfg(tchan->tc_ring, &ring_cfg);
1713
1714 if (ret)
1715 goto err_ringcfg;
1716
1717 return 0;
1718
1719err_ringcfg:
1720 k3_ringacc_ring_free(uc->tchan->tc_ring);
1721 uc->tchan->tc_ring = NULL;
1722 k3_ringacc_ring_free(uc->tchan->t_ring);
1723 uc->tchan->t_ring = NULL;
1724err_ring:
1725 udma_put_tchan(uc);
1726
1727 return ret;
1728}
1729
1730static void udma_free_rx_resources(struct udma_chan *uc)
1731{
1732 if (!uc->rchan)
1733 return;
1734
1735 if (uc->rflow) {
1736 struct udma_rflow *rflow = uc->rflow;
1737
1738 k3_ringacc_ring_free(rflow->fd_ring);
1739 k3_ringacc_ring_free(rflow->r_ring);
1740 rflow->fd_ring = NULL;
1741 rflow->r_ring = NULL;
1742
1743 udma_put_rflow(uc);
1744 }
1745
1746 udma_put_rchan(uc);
1747}
1748
1749static int udma_alloc_rx_resources(struct udma_chan *uc)
1750{
1751 struct udma_dev *ud = uc->ud;
1752 struct k3_ring_cfg ring_cfg;
1753 struct udma_rflow *rflow;
1754 int fd_ring_id;
1755 int ret;
1756
1757 ret = udma_get_rchan(uc);
1758 if (ret)
1759 return ret;
1760
1761 /* For MEM_TO_MEM we don't need rflow or rings */
1762 if (uc->config.dir == DMA_MEM_TO_MEM)
1763 return 0;
1764
1765 if (uc->config.default_flow_id >= 0)
1766 ret = udma_get_rflow(uc, uc->config.default_flow_id);
1767 else
1768 ret = udma_get_rflow(uc, uc->rchan->id);
1769
1770 if (ret) {
1771 ret = -EBUSY;
1772 goto err_rflow;
1773 }
1774
1775 rflow = uc->rflow;
1776 if (ud->tflow_cnt)
1777 fd_ring_id = ud->tflow_cnt + rflow->id;
1778 else
1779 fd_ring_id = ud->bchan_cnt + ud->tchan_cnt + ud->echan_cnt +
1780 uc->rchan->id;
1781
1782 ret = k3_ringacc_request_rings_pair(ud->ringacc, fd_ring_id, -1,
1783 &rflow->fd_ring, &rflow->r_ring);
1784 if (ret) {
1785 ret = -EBUSY;
1786 goto err_ring;
1787 }
1788
1789 memset(&ring_cfg, 0, sizeof(ring_cfg));
1790
1791 ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
1792 if (ud->match_data->type == DMA_TYPE_UDMA) {
1793 if (uc->config.pkt_mode)
1794 ring_cfg.size = SG_MAX_SEGMENTS;
1795 else
1796 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1797
1798 ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
1799 } else {
1800 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1801 ring_cfg.mode = K3_RINGACC_RING_MODE_RING;
1802
1803 k3_configure_chan_coherency(&uc->vc.chan, uc->config.asel);
1804 ring_cfg.asel = uc->config.asel;
1805 ring_cfg.dma_dev = dmaengine_get_dma_device(&uc->vc.chan);
1806 }
1807
1808 ret = k3_ringacc_ring_cfg(rflow->fd_ring, &ring_cfg);
1809
1810 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1811 ret |= k3_ringacc_ring_cfg(rflow->r_ring, &ring_cfg);
1812
1813 if (ret)
1814 goto err_ringcfg;
1815
1816 return 0;
1817
1818err_ringcfg:
1819 k3_ringacc_ring_free(rflow->r_ring);
1820 rflow->r_ring = NULL;
1821 k3_ringacc_ring_free(rflow->fd_ring);
1822 rflow->fd_ring = NULL;
1823err_ring:
1824 udma_put_rflow(uc);
1825err_rflow:
1826 udma_put_rchan(uc);
1827
1828 return ret;
1829}
1830
1831#define TISCI_BCDMA_BCHAN_VALID_PARAMS ( \
1832 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1833 TI_SCI_MSG_VALUE_RM_UDMAP_CH_EXTENDED_CH_TYPE_VALID)
1834
1835#define TISCI_BCDMA_TCHAN_VALID_PARAMS ( \
1836 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1837 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID)
1838
1839#define TISCI_BCDMA_RCHAN_VALID_PARAMS ( \
1840 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID)
1841
1842#define TISCI_UDMA_TCHAN_VALID_PARAMS ( \
1843 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1844 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID | \
1845 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID | \
1846 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \
1847 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID | \
1848 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \
1849 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \
1850 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
1851
1852#define TISCI_UDMA_RCHAN_VALID_PARAMS ( \
1853 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1854 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \
1855 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \
1856 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \
1857 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID | \
1858 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID | \
1859 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID | \
1860 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID | \
1861 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
1862
1863static int udma_tisci_m2m_channel_config(struct udma_chan *uc)
1864{
1865 struct udma_dev *ud = uc->ud;
1866 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1867 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1868 struct udma_tchan *tchan = uc->tchan;
1869 struct udma_rchan *rchan = uc->rchan;
1870 u8 burst_size = 0;
1871 int ret;
1872 u8 tpl;
1873
1874 /* Non synchronized - mem to mem type of transfer */
1875 int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring);
1876 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
1877 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
1878
1879 if (ud->match_data->flags & UDMA_FLAG_BURST_SIZE) {
1880 tpl = udma_get_chan_tpl_index(&ud->tchan_tpl, tchan->id);
1881
1882 burst_size = ud->match_data->burst_size[tpl];
1883 }
1884
1885 req_tx.valid_params = TISCI_UDMA_TCHAN_VALID_PARAMS;
1886 req_tx.nav_id = tisci_rm->tisci_dev_id;
1887 req_tx.index = tchan->id;
1888 req_tx.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
1889 req_tx.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
1890 req_tx.txcq_qnum = tc_ring;
1891 req_tx.tx_atype = ud->atype;
1892 if (burst_size) {
1893 req_tx.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID;
1894 req_tx.tx_burst_size = burst_size;
1895 }
1896
1897 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
1898 if (ret) {
1899 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
1900 return ret;
1901 }
1902
1903 req_rx.valid_params = TISCI_UDMA_RCHAN_VALID_PARAMS;
1904 req_rx.nav_id = tisci_rm->tisci_dev_id;
1905 req_rx.index = rchan->id;
1906 req_rx.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
1907 req_rx.rxcq_qnum = tc_ring;
1908 req_rx.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
1909 req_rx.rx_atype = ud->atype;
1910 if (burst_size) {
1911 req_rx.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID;
1912 req_rx.rx_burst_size = burst_size;
1913 }
1914
1915 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
1916 if (ret)
1917 dev_err(ud->dev, "rchan%d alloc failed %d\n", rchan->id, ret);
1918
1919 return ret;
1920}
1921
1922static int bcdma_tisci_m2m_channel_config(struct udma_chan *uc)
1923{
1924 struct udma_dev *ud = uc->ud;
1925 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1926 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1927 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
1928 struct udma_bchan *bchan = uc->bchan;
1929 u8 burst_size = 0;
1930 int ret;
1931 u8 tpl;
1932
1933 if (ud->match_data->flags & UDMA_FLAG_BURST_SIZE) {
1934 tpl = udma_get_chan_tpl_index(&ud->bchan_tpl, bchan->id);
1935
1936 burst_size = ud->match_data->burst_size[tpl];
1937 }
1938
1939 req_tx.valid_params = TISCI_BCDMA_BCHAN_VALID_PARAMS;
1940 req_tx.nav_id = tisci_rm->tisci_dev_id;
1941 req_tx.extended_ch_type = TI_SCI_RM_BCDMA_EXTENDED_CH_TYPE_BCHAN;
1942 req_tx.index = bchan->id;
1943 if (burst_size) {
1944 req_tx.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID;
1945 req_tx.tx_burst_size = burst_size;
1946 }
1947
1948 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
1949 if (ret)
1950 dev_err(ud->dev, "bchan%d cfg failed %d\n", bchan->id, ret);
1951
1952 return ret;
1953}
1954
1955static int udma_tisci_tx_channel_config(struct udma_chan *uc)
1956{
1957 struct udma_dev *ud = uc->ud;
1958 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1959 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1960 struct udma_tchan *tchan = uc->tchan;
1961 int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring);
1962 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
1963 u32 mode, fetch_size;
1964 int ret;
1965
1966 if (uc->config.pkt_mode) {
1967 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
1968 fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
1969 uc->config.psd_size, 0);
1970 } else {
1971 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR;
1972 fetch_size = sizeof(struct cppi5_desc_hdr_t);
1973 }
1974
1975 req_tx.valid_params = TISCI_UDMA_TCHAN_VALID_PARAMS;
1976 req_tx.nav_id = tisci_rm->tisci_dev_id;
1977 req_tx.index = tchan->id;
1978 req_tx.tx_chan_type = mode;
1979 req_tx.tx_supr_tdpkt = uc->config.notdpkt;
1980 req_tx.tx_fetch_size = fetch_size >> 2;
1981 req_tx.txcq_qnum = tc_ring;
1982 req_tx.tx_atype = uc->config.atype;
1983 if (uc->config.ep_type == PSIL_EP_PDMA_XY &&
1984 ud->match_data->flags & UDMA_FLAG_TDTYPE) {
1985 /* wait for peer to complete the teardown for PDMAs */
1986 req_tx.valid_params |=
1987 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_TDTYPE_VALID;
1988 req_tx.tx_tdtype = 1;
1989 }
1990
1991 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
1992 if (ret)
1993 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
1994
1995 return ret;
1996}
1997
1998static int bcdma_tisci_tx_channel_config(struct udma_chan *uc)
1999{
2000 struct udma_dev *ud = uc->ud;
2001 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
2002 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
2003 struct udma_tchan *tchan = uc->tchan;
2004 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
2005 int ret;
2006
2007 req_tx.valid_params = TISCI_BCDMA_TCHAN_VALID_PARAMS;
2008 req_tx.nav_id = tisci_rm->tisci_dev_id;
2009 req_tx.index = tchan->id;
2010 req_tx.tx_supr_tdpkt = uc->config.notdpkt;
2011 if (ud->match_data->flags & UDMA_FLAG_TDTYPE) {
2012 /* wait for peer to complete the teardown for PDMAs */
2013 req_tx.valid_params |=
2014 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_TDTYPE_VALID;
2015 req_tx.tx_tdtype = 1;
2016 }
2017
2018 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
2019 if (ret)
2020 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
2021
2022 return ret;
2023}
2024
2025#define pktdma_tisci_tx_channel_config bcdma_tisci_tx_channel_config
2026
2027static int udma_tisci_rx_channel_config(struct udma_chan *uc)
2028{
2029 struct udma_dev *ud = uc->ud;
2030 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
2031 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
2032 struct udma_rchan *rchan = uc->rchan;
2033 int fd_ring = k3_ringacc_get_ring_id(uc->rflow->fd_ring);
2034 int rx_ring = k3_ringacc_get_ring_id(uc->rflow->r_ring);
2035 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
2036 struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
2037 u32 mode, fetch_size;
2038 int ret;
2039
2040 if (uc->config.pkt_mode) {
2041 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
2042 fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
2043 uc->config.psd_size, 0);
2044 } else {
2045 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR;
2046 fetch_size = sizeof(struct cppi5_desc_hdr_t);
2047 }
2048
2049 req_rx.valid_params = TISCI_UDMA_RCHAN_VALID_PARAMS;
2050 req_rx.nav_id = tisci_rm->tisci_dev_id;
2051 req_rx.index = rchan->id;
2052 req_rx.rx_fetch_size = fetch_size >> 2;
2053 req_rx.rxcq_qnum = rx_ring;
2054 req_rx.rx_chan_type = mode;
2055 req_rx.rx_atype = uc->config.atype;
2056
2057 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
2058 if (ret) {
2059 dev_err(ud->dev, "rchan%d cfg failed %d\n", rchan->id, ret);
2060 return ret;
2061 }
2062
2063 flow_req.valid_params =
2064 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
2065 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
2066 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
2067 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
2068 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
2069 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
2070 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
2071 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
2072 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
2073 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
2074 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
2075 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
2076 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
2077
2078 flow_req.nav_id = tisci_rm->tisci_dev_id;
2079 flow_req.flow_index = rchan->id;
2080
2081 if (uc->config.needs_epib)
2082 flow_req.rx_einfo_present = 1;
2083 else
2084 flow_req.rx_einfo_present = 0;
2085 if (uc->config.psd_size)
2086 flow_req.rx_psinfo_present = 1;
2087 else
2088 flow_req.rx_psinfo_present = 0;
2089 flow_req.rx_error_handling = 1;
2090 flow_req.rx_dest_qnum = rx_ring;
2091 flow_req.rx_src_tag_hi_sel = UDMA_RFLOW_SRCTAG_NONE;
2092 flow_req.rx_src_tag_lo_sel = UDMA_RFLOW_SRCTAG_SRC_TAG;
2093 flow_req.rx_dest_tag_hi_sel = UDMA_RFLOW_DSTTAG_DST_TAG_HI;
2094 flow_req.rx_dest_tag_lo_sel = UDMA_RFLOW_DSTTAG_DST_TAG_LO;
2095 flow_req.rx_fdq0_sz0_qnum = fd_ring;
2096 flow_req.rx_fdq1_qnum = fd_ring;
2097 flow_req.rx_fdq2_qnum = fd_ring;
2098 flow_req.rx_fdq3_qnum = fd_ring;
2099
2100 ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req);
2101
2102 if (ret)
2103 dev_err(ud->dev, "flow%d config failed: %d\n", rchan->id, ret);
2104
2105 return 0;
2106}
2107
2108static int bcdma_tisci_rx_channel_config(struct udma_chan *uc)
2109{
2110 struct udma_dev *ud = uc->ud;
2111 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
2112 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
2113 struct udma_rchan *rchan = uc->rchan;
2114 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
2115 int ret;
2116
2117 req_rx.valid_params = TISCI_BCDMA_RCHAN_VALID_PARAMS;
2118 req_rx.nav_id = tisci_rm->tisci_dev_id;
2119 req_rx.index = rchan->id;
2120
2121 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
2122 if (ret)
2123 dev_err(ud->dev, "rchan%d cfg failed %d\n", rchan->id, ret);
2124
2125 return ret;
2126}
2127
2128static int pktdma_tisci_rx_channel_config(struct udma_chan *uc)
2129{
2130 struct udma_dev *ud = uc->ud;
2131 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
2132 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
2133 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
2134 struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
2135 int ret;
2136
2137 req_rx.valid_params = TISCI_BCDMA_RCHAN_VALID_PARAMS;
2138 req_rx.nav_id = tisci_rm->tisci_dev_id;
2139 req_rx.index = uc->rchan->id;
2140
2141 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
2142 if (ret) {
2143 dev_err(ud->dev, "rchan%d cfg failed %d\n", uc->rchan->id, ret);
2144 return ret;
2145 }
2146
2147 flow_req.valid_params =
2148 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
2149 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
2150 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID;
2151
2152 flow_req.nav_id = tisci_rm->tisci_dev_id;
2153 flow_req.flow_index = uc->rflow->id;
2154
2155 if (uc->config.needs_epib)
2156 flow_req.rx_einfo_present = 1;
2157 else
2158 flow_req.rx_einfo_present = 0;
2159 if (uc->config.psd_size)
2160 flow_req.rx_psinfo_present = 1;
2161 else
2162 flow_req.rx_psinfo_present = 0;
2163 flow_req.rx_error_handling = 1;
2164
2165 ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req);
2166
2167 if (ret)
2168 dev_err(ud->dev, "flow%d config failed: %d\n", uc->rflow->id,
2169 ret);
2170
2171 return ret;
2172}
2173
2174static int udma_alloc_chan_resources(struct dma_chan *chan)
2175{
2176 struct udma_chan *uc = to_udma_chan(chan);
2177 struct udma_dev *ud = to_udma_dev(chan->device);
2178 const struct udma_soc_data *soc_data = ud->soc_data;
2179 struct k3_ring *irq_ring;
2180 u32 irq_udma_idx;
2181 int ret;
2182
2183 uc->dma_dev = ud->dev;
2184
2185 if (uc->config.pkt_mode || uc->config.dir == DMA_MEM_TO_MEM) {
2186 uc->use_dma_pool = true;
2187 /* in case of MEM_TO_MEM we have maximum of two TRs */
2188 if (uc->config.dir == DMA_MEM_TO_MEM) {
2189 uc->config.hdesc_size = cppi5_trdesc_calc_size(
2190 sizeof(struct cppi5_tr_type15_t), 2);
2191 uc->config.pkt_mode = false;
2192 }
2193 }
2194
2195 if (uc->use_dma_pool) {
2196 uc->hdesc_pool = dma_pool_create(uc->name, ud->ddev.dev,
2197 uc->config.hdesc_size,
2198 ud->desc_align,
2199 0);
2200 if (!uc->hdesc_pool) {
2201 dev_err(ud->ddev.dev,
2202 "Descriptor pool allocation failed\n");
2203 uc->use_dma_pool = false;
2204 ret = -ENOMEM;
2205 goto err_cleanup;
2206 }
2207 }
2208
2209 /*
2210 * Make sure that the completion is in a known state:
2211 * No teardown, the channel is idle
2212 */
2213 reinit_completion(&uc->teardown_completed);
2214 complete_all(&uc->teardown_completed);
2215 uc->state = UDMA_CHAN_IS_IDLE;
2216
2217 switch (uc->config.dir) {
2218 case DMA_MEM_TO_MEM:
2219 /* Non synchronized - mem to mem type of transfer */
2220 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__,
2221 uc->id);
2222
2223 ret = udma_get_chan_pair(uc);
2224 if (ret)
2225 goto err_cleanup;
2226
2227 ret = udma_alloc_tx_resources(uc);
2228 if (ret) {
2229 udma_put_rchan(uc);
2230 goto err_cleanup;
2231 }
2232
2233 ret = udma_alloc_rx_resources(uc);
2234 if (ret) {
2235 udma_free_tx_resources(uc);
2236 goto err_cleanup;
2237 }
2238
2239 uc->config.src_thread = ud->psil_base + uc->tchan->id;
2240 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
2241 K3_PSIL_DST_THREAD_ID_OFFSET;
2242
2243 irq_ring = uc->tchan->tc_ring;
2244 irq_udma_idx = uc->tchan->id;
2245
2246 ret = udma_tisci_m2m_channel_config(uc);
2247 break;
2248 case DMA_MEM_TO_DEV:
2249 /* Slave transfer synchronized - mem to dev (TX) trasnfer */
2250 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
2251 uc->id);
2252
2253 ret = udma_alloc_tx_resources(uc);
2254 if (ret)
2255 goto err_cleanup;
2256
2257 uc->config.src_thread = ud->psil_base + uc->tchan->id;
2258 uc->config.dst_thread = uc->config.remote_thread_id;
2259 uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
2260
2261 irq_ring = uc->tchan->tc_ring;
2262 irq_udma_idx = uc->tchan->id;
2263
2264 ret = udma_tisci_tx_channel_config(uc);
2265 break;
2266 case DMA_DEV_TO_MEM:
2267 /* Slave transfer synchronized - dev to mem (RX) trasnfer */
2268 dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
2269 uc->id);
2270
2271 ret = udma_alloc_rx_resources(uc);
2272 if (ret)
2273 goto err_cleanup;
2274
2275 uc->config.src_thread = uc->config.remote_thread_id;
2276 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
2277 K3_PSIL_DST_THREAD_ID_OFFSET;
2278
2279 irq_ring = uc->rflow->r_ring;
2280 irq_udma_idx = soc_data->oes.udma_rchan + uc->rchan->id;
2281
2282 ret = udma_tisci_rx_channel_config(uc);
2283 break;
2284 default:
2285 /* Can not happen */
2286 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
2287 __func__, uc->id, uc->config.dir);
2288 ret = -EINVAL;
2289 goto err_cleanup;
2290
2291 }
2292
2293 /* check if the channel configuration was successful */
2294 if (ret)
2295 goto err_res_free;
2296
2297 if (udma_is_chan_running(uc)) {
2298 dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
2299 udma_reset_chan(uc, false);
2300 if (udma_is_chan_running(uc)) {
2301 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
2302 ret = -EBUSY;
2303 goto err_res_free;
2304 }
2305 }
2306
2307 /* PSI-L pairing */
2308 ret = navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread);
2309 if (ret) {
2310 dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
2311 uc->config.src_thread, uc->config.dst_thread);
2312 goto err_res_free;
2313 }
2314
2315 uc->psil_paired = true;
2316
2317 uc->irq_num_ring = k3_ringacc_get_ring_irq_num(irq_ring);
2318 if (uc->irq_num_ring <= 0) {
2319 dev_err(ud->dev, "Failed to get ring irq (index: %u)\n",
2320 k3_ringacc_get_ring_id(irq_ring));
2321 ret = -EINVAL;
2322 goto err_psi_free;
2323 }
2324
2325 ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler,
2326 IRQF_TRIGGER_HIGH, uc->name, uc);
2327 if (ret) {
2328 dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id);
2329 goto err_irq_free;
2330 }
2331
2332 /* Event from UDMA (TR events) only needed for slave TR mode channels */
2333 if (is_slave_direction(uc->config.dir) && !uc->config.pkt_mode) {
2334 uc->irq_num_udma = msi_get_virq(ud->dev, irq_udma_idx);
2335 if (uc->irq_num_udma <= 0) {
2336 dev_err(ud->dev, "Failed to get udma irq (index: %u)\n",
2337 irq_udma_idx);
2338 free_irq(uc->irq_num_ring, uc);
2339 ret = -EINVAL;
2340 goto err_irq_free;
2341 }
2342
2343 ret = request_irq(uc->irq_num_udma, udma_udma_irq_handler, 0,
2344 uc->name, uc);
2345 if (ret) {
2346 dev_err(ud->dev, "chan%d: UDMA irq request failed\n",
2347 uc->id);
2348 free_irq(uc->irq_num_ring, uc);
2349 goto err_irq_free;
2350 }
2351 } else {
2352 uc->irq_num_udma = 0;
2353 }
2354
2355 udma_reset_rings(uc);
2356
2357 return 0;
2358
2359err_irq_free:
2360 uc->irq_num_ring = 0;
2361 uc->irq_num_udma = 0;
2362err_psi_free:
2363 navss_psil_unpair(ud, uc->config.src_thread, uc->config.dst_thread);
2364 uc->psil_paired = false;
2365err_res_free:
2366 udma_free_tx_resources(uc);
2367 udma_free_rx_resources(uc);
2368err_cleanup:
2369 udma_reset_uchan(uc);
2370
2371 if (uc->use_dma_pool) {
2372 dma_pool_destroy(uc->hdesc_pool);
2373 uc->use_dma_pool = false;
2374 }
2375
2376 return ret;
2377}
2378
2379static int bcdma_alloc_chan_resources(struct dma_chan *chan)
2380{
2381 struct udma_chan *uc = to_udma_chan(chan);
2382 struct udma_dev *ud = to_udma_dev(chan->device);
2383 const struct udma_oes_offsets *oes = &ud->soc_data->oes;
2384 u32 irq_udma_idx, irq_ring_idx;
2385 int ret;
2386
2387 /* Only TR mode is supported */
2388 uc->config.pkt_mode = false;
2389
2390 /*
2391 * Make sure that the completion is in a known state:
2392 * No teardown, the channel is idle
2393 */
2394 reinit_completion(&uc->teardown_completed);
2395 complete_all(&uc->teardown_completed);
2396 uc->state = UDMA_CHAN_IS_IDLE;
2397
2398 switch (uc->config.dir) {
2399 case DMA_MEM_TO_MEM:
2400 /* Non synchronized - mem to mem type of transfer */
2401 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__,
2402 uc->id);
2403
2404 ret = bcdma_alloc_bchan_resources(uc);
2405 if (ret)
2406 return ret;
2407
2408 irq_ring_idx = uc->bchan->id + oes->bcdma_bchan_ring;
2409 irq_udma_idx = uc->bchan->id + oes->bcdma_bchan_data;
2410
2411 ret = bcdma_tisci_m2m_channel_config(uc);
2412 break;
2413 case DMA_MEM_TO_DEV:
2414 /* Slave transfer synchronized - mem to dev (TX) trasnfer */
2415 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
2416 uc->id);
2417
2418 ret = udma_alloc_tx_resources(uc);
2419 if (ret) {
2420 uc->config.remote_thread_id = -1;
2421 return ret;
2422 }
2423
2424 uc->config.src_thread = ud->psil_base + uc->tchan->id;
2425 uc->config.dst_thread = uc->config.remote_thread_id;
2426 uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
2427
2428 irq_ring_idx = uc->tchan->id + oes->bcdma_tchan_ring;
2429 irq_udma_idx = uc->tchan->id + oes->bcdma_tchan_data;
2430
2431 ret = bcdma_tisci_tx_channel_config(uc);
2432 break;
2433 case DMA_DEV_TO_MEM:
2434 /* Slave transfer synchronized - dev to mem (RX) trasnfer */
2435 dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
2436 uc->id);
2437
2438 ret = udma_alloc_rx_resources(uc);
2439 if (ret) {
2440 uc->config.remote_thread_id = -1;
2441 return ret;
2442 }
2443
2444 uc->config.src_thread = uc->config.remote_thread_id;
2445 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
2446 K3_PSIL_DST_THREAD_ID_OFFSET;
2447
2448 irq_ring_idx = uc->rchan->id + oes->bcdma_rchan_ring;
2449 irq_udma_idx = uc->rchan->id + oes->bcdma_rchan_data;
2450
2451 ret = bcdma_tisci_rx_channel_config(uc);
2452 break;
2453 default:
2454 /* Can not happen */
2455 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
2456 __func__, uc->id, uc->config.dir);
2457 return -EINVAL;
2458 }
2459
2460 /* check if the channel configuration was successful */
2461 if (ret)
2462 goto err_res_free;
2463
2464 if (udma_is_chan_running(uc)) {
2465 dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
2466 udma_reset_chan(uc, false);
2467 if (udma_is_chan_running(uc)) {
2468 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
2469 ret = -EBUSY;
2470 goto err_res_free;
2471 }
2472 }
2473
2474 uc->dma_dev = dmaengine_get_dma_device(chan);
2475 if (uc->config.dir == DMA_MEM_TO_MEM && !uc->config.tr_trigger_type) {
2476 uc->config.hdesc_size = cppi5_trdesc_calc_size(
2477 sizeof(struct cppi5_tr_type15_t), 2);
2478
2479 uc->hdesc_pool = dma_pool_create(uc->name, ud->ddev.dev,
2480 uc->config.hdesc_size,
2481 ud->desc_align,
2482 0);
2483 if (!uc->hdesc_pool) {
2484 dev_err(ud->ddev.dev,
2485 "Descriptor pool allocation failed\n");
2486 uc->use_dma_pool = false;
2487 ret = -ENOMEM;
2488 goto err_res_free;
2489 }
2490
2491 uc->use_dma_pool = true;
2492 } else if (uc->config.dir != DMA_MEM_TO_MEM) {
2493 /* PSI-L pairing */
2494 ret = navss_psil_pair(ud, uc->config.src_thread,
2495 uc->config.dst_thread);
2496 if (ret) {
2497 dev_err(ud->dev,
2498 "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
2499 uc->config.src_thread, uc->config.dst_thread);
2500 goto err_res_free;
2501 }
2502
2503 uc->psil_paired = true;
2504 }
2505
2506 uc->irq_num_ring = msi_get_virq(ud->dev, irq_ring_idx);
2507 if (uc->irq_num_ring <= 0) {
2508 dev_err(ud->dev, "Failed to get ring irq (index: %u)\n",
2509 irq_ring_idx);
2510 ret = -EINVAL;
2511 goto err_psi_free;
2512 }
2513
2514 ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler,
2515 IRQF_TRIGGER_HIGH, uc->name, uc);
2516 if (ret) {
2517 dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id);
2518 goto err_irq_free;
2519 }
2520
2521 /* Event from BCDMA (TR events) only needed for slave channels */
2522 if (is_slave_direction(uc->config.dir)) {
2523 uc->irq_num_udma = msi_get_virq(ud->dev, irq_udma_idx);
2524 if (uc->irq_num_udma <= 0) {
2525 dev_err(ud->dev, "Failed to get bcdma irq (index: %u)\n",
2526 irq_udma_idx);
2527 free_irq(uc->irq_num_ring, uc);
2528 ret = -EINVAL;
2529 goto err_irq_free;
2530 }
2531
2532 ret = request_irq(uc->irq_num_udma, udma_udma_irq_handler, 0,
2533 uc->name, uc);
2534 if (ret) {
2535 dev_err(ud->dev, "chan%d: BCDMA irq request failed\n",
2536 uc->id);
2537 free_irq(uc->irq_num_ring, uc);
2538 goto err_irq_free;
2539 }
2540 } else {
2541 uc->irq_num_udma = 0;
2542 }
2543
2544 udma_reset_rings(uc);
2545
2546 INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work,
2547 udma_check_tx_completion);
2548 return 0;
2549
2550err_irq_free:
2551 uc->irq_num_ring = 0;
2552 uc->irq_num_udma = 0;
2553err_psi_free:
2554 if (uc->psil_paired)
2555 navss_psil_unpair(ud, uc->config.src_thread,
2556 uc->config.dst_thread);
2557 uc->psil_paired = false;
2558err_res_free:
2559 bcdma_free_bchan_resources(uc);
2560 udma_free_tx_resources(uc);
2561 udma_free_rx_resources(uc);
2562
2563 udma_reset_uchan(uc);
2564
2565 if (uc->use_dma_pool) {
2566 dma_pool_destroy(uc->hdesc_pool);
2567 uc->use_dma_pool = false;
2568 }
2569
2570 return ret;
2571}
2572
2573static int bcdma_router_config(struct dma_chan *chan)
2574{
2575 struct k3_event_route_data *router_data = chan->route_data;
2576 struct udma_chan *uc = to_udma_chan(chan);
2577 u32 trigger_event;
2578
2579 if (!uc->bchan)
2580 return -EINVAL;
2581
2582 if (uc->config.tr_trigger_type != 1 && uc->config.tr_trigger_type != 2)
2583 return -EINVAL;
2584
2585 trigger_event = uc->ud->soc_data->bcdma_trigger_event_offset;
2586 trigger_event += (uc->bchan->id * 2) + uc->config.tr_trigger_type - 1;
2587
2588 return router_data->set_event(router_data->priv, trigger_event);
2589}
2590
2591static int pktdma_alloc_chan_resources(struct dma_chan *chan)
2592{
2593 struct udma_chan *uc = to_udma_chan(chan);
2594 struct udma_dev *ud = to_udma_dev(chan->device);
2595 const struct udma_oes_offsets *oes = &ud->soc_data->oes;
2596 u32 irq_ring_idx;
2597 int ret;
2598
2599 /*
2600 * Make sure that the completion is in a known state:
2601 * No teardown, the channel is idle
2602 */
2603 reinit_completion(&uc->teardown_completed);
2604 complete_all(&uc->teardown_completed);
2605 uc->state = UDMA_CHAN_IS_IDLE;
2606
2607 switch (uc->config.dir) {
2608 case DMA_MEM_TO_DEV:
2609 /* Slave transfer synchronized - mem to dev (TX) trasnfer */
2610 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
2611 uc->id);
2612
2613 ret = udma_alloc_tx_resources(uc);
2614 if (ret) {
2615 uc->config.remote_thread_id = -1;
2616 return ret;
2617 }
2618
2619 uc->config.src_thread = ud->psil_base + uc->tchan->id;
2620 uc->config.dst_thread = uc->config.remote_thread_id;
2621 uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
2622
2623 irq_ring_idx = uc->tchan->tflow_id + oes->pktdma_tchan_flow;
2624
2625 ret = pktdma_tisci_tx_channel_config(uc);
2626 break;
2627 case DMA_DEV_TO_MEM:
2628 /* Slave transfer synchronized - dev to mem (RX) trasnfer */
2629 dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
2630 uc->id);
2631
2632 ret = udma_alloc_rx_resources(uc);
2633 if (ret) {
2634 uc->config.remote_thread_id = -1;
2635 return ret;
2636 }
2637
2638 uc->config.src_thread = uc->config.remote_thread_id;
2639 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
2640 K3_PSIL_DST_THREAD_ID_OFFSET;
2641
2642 irq_ring_idx = uc->rflow->id + oes->pktdma_rchan_flow;
2643
2644 ret = pktdma_tisci_rx_channel_config(uc);
2645 break;
2646 default:
2647 /* Can not happen */
2648 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
2649 __func__, uc->id, uc->config.dir);
2650 return -EINVAL;
2651 }
2652
2653 /* check if the channel configuration was successful */
2654 if (ret)
2655 goto err_res_free;
2656
2657 if (udma_is_chan_running(uc)) {
2658 dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
2659 udma_reset_chan(uc, false);
2660 if (udma_is_chan_running(uc)) {
2661 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
2662 ret = -EBUSY;
2663 goto err_res_free;
2664 }
2665 }
2666
2667 uc->dma_dev = dmaengine_get_dma_device(chan);
2668 uc->hdesc_pool = dma_pool_create(uc->name, uc->dma_dev,
2669 uc->config.hdesc_size, ud->desc_align,
2670 0);
2671 if (!uc->hdesc_pool) {
2672 dev_err(ud->ddev.dev,
2673 "Descriptor pool allocation failed\n");
2674 uc->use_dma_pool = false;
2675 ret = -ENOMEM;
2676 goto err_res_free;
2677 }
2678
2679 uc->use_dma_pool = true;
2680
2681 /* PSI-L pairing */
2682 ret = navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread);
2683 if (ret) {
2684 dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
2685 uc->config.src_thread, uc->config.dst_thread);
2686 goto err_res_free;
2687 }
2688
2689 uc->psil_paired = true;
2690
2691 uc->irq_num_ring = msi_get_virq(ud->dev, irq_ring_idx);
2692 if (uc->irq_num_ring <= 0) {
2693 dev_err(ud->dev, "Failed to get ring irq (index: %u)\n",
2694 irq_ring_idx);
2695 ret = -EINVAL;
2696 goto err_psi_free;
2697 }
2698
2699 ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler,
2700 IRQF_TRIGGER_HIGH, uc->name, uc);
2701 if (ret) {
2702 dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id);
2703 goto err_irq_free;
2704 }
2705
2706 uc->irq_num_udma = 0;
2707
2708 udma_reset_rings(uc);
2709
2710 INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work,
2711 udma_check_tx_completion);
2712
2713 if (uc->tchan)
2714 dev_dbg(ud->dev,
2715 "chan%d: tchan%d, tflow%d, Remote thread: 0x%04x\n",
2716 uc->id, uc->tchan->id, uc->tchan->tflow_id,
2717 uc->config.remote_thread_id);
2718 else if (uc->rchan)
2719 dev_dbg(ud->dev,
2720 "chan%d: rchan%d, rflow%d, Remote thread: 0x%04x\n",
2721 uc->id, uc->rchan->id, uc->rflow->id,
2722 uc->config.remote_thread_id);
2723 return 0;
2724
2725err_irq_free:
2726 uc->irq_num_ring = 0;
2727err_psi_free:
2728 navss_psil_unpair(ud, uc->config.src_thread, uc->config.dst_thread);
2729 uc->psil_paired = false;
2730err_res_free:
2731 udma_free_tx_resources(uc);
2732 udma_free_rx_resources(uc);
2733
2734 udma_reset_uchan(uc);
2735
2736 dma_pool_destroy(uc->hdesc_pool);
2737 uc->use_dma_pool = false;
2738
2739 return ret;
2740}
2741
2742static int udma_slave_config(struct dma_chan *chan,
2743 struct dma_slave_config *cfg)
2744{
2745 struct udma_chan *uc = to_udma_chan(chan);
2746
2747 memcpy(&uc->cfg, cfg, sizeof(uc->cfg));
2748
2749 return 0;
2750}
2751
2752static struct udma_desc *udma_alloc_tr_desc(struct udma_chan *uc,
2753 size_t tr_size, int tr_count,
2754 enum dma_transfer_direction dir)
2755{
2756 struct udma_hwdesc *hwdesc;
2757 struct cppi5_desc_hdr_t *tr_desc;
2758 struct udma_desc *d;
2759 u32 reload_count = 0;
2760 u32 ring_id;
2761
2762 switch (tr_size) {
2763 case 16:
2764 case 32:
2765 case 64:
2766 case 128:
2767 break;
2768 default:
2769 dev_err(uc->ud->dev, "Unsupported TR size of %zu\n", tr_size);
2770 return NULL;
2771 }
2772
2773 /* We have only one descriptor containing multiple TRs */
2774 d = kzalloc(sizeof(*d) + sizeof(d->hwdesc[0]), GFP_NOWAIT);
2775 if (!d)
2776 return NULL;
2777
2778 d->sglen = tr_count;
2779
2780 d->hwdesc_count = 1;
2781 hwdesc = &d->hwdesc[0];
2782
2783 /* Allocate memory for DMA ring descriptor */
2784 if (uc->use_dma_pool) {
2785 hwdesc->cppi5_desc_size = uc->config.hdesc_size;
2786 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
2787 GFP_NOWAIT,
2788 &hwdesc->cppi5_desc_paddr);
2789 } else {
2790 hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size,
2791 tr_count);
2792 hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size,
2793 uc->ud->desc_align);
2794 hwdesc->cppi5_desc_vaddr = dma_alloc_coherent(uc->ud->dev,
2795 hwdesc->cppi5_desc_size,
2796 &hwdesc->cppi5_desc_paddr,
2797 GFP_NOWAIT);
2798 }
2799
2800 if (!hwdesc->cppi5_desc_vaddr) {
2801 kfree(d);
2802 return NULL;
2803 }
2804
2805 /* Start of the TR req records */
2806 hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size;
2807 /* Start address of the TR response array */
2808 hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size * tr_count;
2809
2810 tr_desc = hwdesc->cppi5_desc_vaddr;
2811
2812 if (uc->cyclic)
2813 reload_count = CPPI5_INFO0_TRDESC_RLDCNT_INFINITE;
2814
2815 if (dir == DMA_DEV_TO_MEM)
2816 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
2817 else
2818 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
2819
2820 cppi5_trdesc_init(tr_desc, tr_count, tr_size, 0, reload_count);
2821 cppi5_desc_set_pktids(tr_desc, uc->id,
2822 CPPI5_INFO1_DESC_FLOWID_DEFAULT);
2823 cppi5_desc_set_retpolicy(tr_desc, 0, ring_id);
2824
2825 return d;
2826}
2827
2828/**
2829 * udma_get_tr_counters - calculate TR counters for a given length
2830 * @len: Length of the trasnfer
2831 * @align_to: Preferred alignment
2832 * @tr0_cnt0: First TR icnt0
2833 * @tr0_cnt1: First TR icnt1
2834 * @tr1_cnt0: Second (if used) TR icnt0
2835 *
2836 * For len < SZ_64K only one TR is enough, tr1_cnt0 is not updated
2837 * For len >= SZ_64K two TRs are used in a simple way:
2838 * First TR: SZ_64K-alignment blocks (tr0_cnt0, tr0_cnt1)
2839 * Second TR: the remaining length (tr1_cnt0)
2840 *
2841 * Returns the number of TRs the length needs (1 or 2)
2842 * -EINVAL if the length can not be supported
2843 */
2844static int udma_get_tr_counters(size_t len, unsigned long align_to,
2845 u16 *tr0_cnt0, u16 *tr0_cnt1, u16 *tr1_cnt0)
2846{
2847 if (len < SZ_64K) {
2848 *tr0_cnt0 = len;
2849 *tr0_cnt1 = 1;
2850
2851 return 1;
2852 }
2853
2854 if (align_to > 3)
2855 align_to = 3;
2856
2857realign:
2858 *tr0_cnt0 = SZ_64K - BIT(align_to);
2859 if (len / *tr0_cnt0 >= SZ_64K) {
2860 if (align_to) {
2861 align_to--;
2862 goto realign;
2863 }
2864 return -EINVAL;
2865 }
2866
2867 *tr0_cnt1 = len / *tr0_cnt0;
2868 *tr1_cnt0 = len % *tr0_cnt0;
2869
2870 return 2;
2871}
2872
2873static struct udma_desc *
2874udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl,
2875 unsigned int sglen, enum dma_transfer_direction dir,
2876 unsigned long tx_flags, void *context)
2877{
2878 struct scatterlist *sgent;
2879 struct udma_desc *d;
2880 struct cppi5_tr_type1_t *tr_req = NULL;
2881 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
2882 unsigned int i;
2883 size_t tr_size;
2884 int num_tr = 0;
2885 int tr_idx = 0;
2886 u64 asel;
2887
2888 /* estimate the number of TRs we will need */
2889 for_each_sg(sgl, sgent, sglen, i) {
2890 if (sg_dma_len(sgent) < SZ_64K)
2891 num_tr++;
2892 else
2893 num_tr += 2;
2894 }
2895
2896 /* Now allocate and setup the descriptor. */
2897 tr_size = sizeof(struct cppi5_tr_type1_t);
2898 d = udma_alloc_tr_desc(uc, tr_size, num_tr, dir);
2899 if (!d)
2900 return NULL;
2901
2902 d->sglen = sglen;
2903
2904 if (uc->ud->match_data->type == DMA_TYPE_UDMA)
2905 asel = 0;
2906 else
2907 asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT;
2908
2909 tr_req = d->hwdesc[0].tr_req_base;
2910 for_each_sg(sgl, sgent, sglen, i) {
2911 dma_addr_t sg_addr = sg_dma_address(sgent);
2912
2913 num_tr = udma_get_tr_counters(sg_dma_len(sgent), __ffs(sg_addr),
2914 &tr0_cnt0, &tr0_cnt1, &tr1_cnt0);
2915 if (num_tr < 0) {
2916 dev_err(uc->ud->dev, "size %u is not supported\n",
2917 sg_dma_len(sgent));
2918 udma_free_hwdesc(uc, d);
2919 kfree(d);
2920 return NULL;
2921 }
2922
2923 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, false,
2924 false, CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2925 cppi5_tr_csf_set(&tr_req[tr_idx].flags, CPPI5_TR_CSF_SUPR_EVT);
2926
2927 sg_addr |= asel;
2928 tr_req[tr_idx].addr = sg_addr;
2929 tr_req[tr_idx].icnt0 = tr0_cnt0;
2930 tr_req[tr_idx].icnt1 = tr0_cnt1;
2931 tr_req[tr_idx].dim1 = tr0_cnt0;
2932 tr_idx++;
2933
2934 if (num_tr == 2) {
2935 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1,
2936 false, false,
2937 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2938 cppi5_tr_csf_set(&tr_req[tr_idx].flags,
2939 CPPI5_TR_CSF_SUPR_EVT);
2940
2941 tr_req[tr_idx].addr = sg_addr + tr0_cnt1 * tr0_cnt0;
2942 tr_req[tr_idx].icnt0 = tr1_cnt0;
2943 tr_req[tr_idx].icnt1 = 1;
2944 tr_req[tr_idx].dim1 = tr1_cnt0;
2945 tr_idx++;
2946 }
2947
2948 d->residue += sg_dma_len(sgent);
2949 }
2950
2951 cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags,
2952 CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP);
2953
2954 return d;
2955}
2956
2957static struct udma_desc *
2958udma_prep_slave_sg_triggered_tr(struct udma_chan *uc, struct scatterlist *sgl,
2959 unsigned int sglen,
2960 enum dma_transfer_direction dir,
2961 unsigned long tx_flags, void *context)
2962{
2963 struct scatterlist *sgent;
2964 struct cppi5_tr_type15_t *tr_req = NULL;
2965 enum dma_slave_buswidth dev_width;
2966 u16 tr_cnt0, tr_cnt1;
2967 dma_addr_t dev_addr;
2968 struct udma_desc *d;
2969 unsigned int i;
2970 size_t tr_size, sg_len;
2971 int num_tr = 0;
2972 int tr_idx = 0;
2973 u32 burst, trigger_size, port_window;
2974 u64 asel;
2975
2976 if (dir == DMA_DEV_TO_MEM) {
2977 dev_addr = uc->cfg.src_addr;
2978 dev_width = uc->cfg.src_addr_width;
2979 burst = uc->cfg.src_maxburst;
2980 port_window = uc->cfg.src_port_window_size;
2981 } else if (dir == DMA_MEM_TO_DEV) {
2982 dev_addr = uc->cfg.dst_addr;
2983 dev_width = uc->cfg.dst_addr_width;
2984 burst = uc->cfg.dst_maxburst;
2985 port_window = uc->cfg.dst_port_window_size;
2986 } else {
2987 dev_err(uc->ud->dev, "%s: bad direction?\n", __func__);
2988 return NULL;
2989 }
2990
2991 if (!burst)
2992 burst = 1;
2993
2994 if (port_window) {
2995 if (port_window != burst) {
2996 dev_err(uc->ud->dev,
2997 "The burst must be equal to port_window\n");
2998 return NULL;
2999 }
3000
3001 tr_cnt0 = dev_width * port_window;
3002 tr_cnt1 = 1;
3003 } else {
3004 tr_cnt0 = dev_width;
3005 tr_cnt1 = burst;
3006 }
3007 trigger_size = tr_cnt0 * tr_cnt1;
3008
3009 /* estimate the number of TRs we will need */
3010 for_each_sg(sgl, sgent, sglen, i) {
3011 sg_len = sg_dma_len(sgent);
3012
3013 if (sg_len % trigger_size) {
3014 dev_err(uc->ud->dev,
3015 "Not aligned SG entry (%zu for %u)\n", sg_len,
3016 trigger_size);
3017 return NULL;
3018 }
3019
3020 if (sg_len / trigger_size < SZ_64K)
3021 num_tr++;
3022 else
3023 num_tr += 2;
3024 }
3025
3026 /* Now allocate and setup the descriptor. */
3027 tr_size = sizeof(struct cppi5_tr_type15_t);
3028 d = udma_alloc_tr_desc(uc, tr_size, num_tr, dir);
3029 if (!d)
3030 return NULL;
3031
3032 d->sglen = sglen;
3033
3034 if (uc->ud->match_data->type == DMA_TYPE_UDMA) {
3035 asel = 0;
3036 } else {
3037 asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT;
3038 dev_addr |= asel;
3039 }
3040
3041 tr_req = d->hwdesc[0].tr_req_base;
3042 for_each_sg(sgl, sgent, sglen, i) {
3043 u16 tr0_cnt2, tr0_cnt3, tr1_cnt2;
3044 dma_addr_t sg_addr = sg_dma_address(sgent);
3045
3046 sg_len = sg_dma_len(sgent);
3047 num_tr = udma_get_tr_counters(sg_len / trigger_size, 0,
3048 &tr0_cnt2, &tr0_cnt3, &tr1_cnt2);
3049 if (num_tr < 0) {
3050 dev_err(uc->ud->dev, "size %zu is not supported\n",
3051 sg_len);
3052 udma_free_hwdesc(uc, d);
3053 kfree(d);
3054 return NULL;
3055 }
3056
3057 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE15, false,
3058 true, CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3059 cppi5_tr_csf_set(&tr_req[tr_idx].flags, CPPI5_TR_CSF_SUPR_EVT);
3060 cppi5_tr_set_trigger(&tr_req[tr_idx].flags,
3061 uc->config.tr_trigger_type,
3062 CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC, 0, 0);
3063
3064 sg_addr |= asel;
3065 if (dir == DMA_DEV_TO_MEM) {
3066 tr_req[tr_idx].addr = dev_addr;
3067 tr_req[tr_idx].icnt0 = tr_cnt0;
3068 tr_req[tr_idx].icnt1 = tr_cnt1;
3069 tr_req[tr_idx].icnt2 = tr0_cnt2;
3070 tr_req[tr_idx].icnt3 = tr0_cnt3;
3071 tr_req[tr_idx].dim1 = (-1) * tr_cnt0;
3072
3073 tr_req[tr_idx].daddr = sg_addr;
3074 tr_req[tr_idx].dicnt0 = tr_cnt0;
3075 tr_req[tr_idx].dicnt1 = tr_cnt1;
3076 tr_req[tr_idx].dicnt2 = tr0_cnt2;
3077 tr_req[tr_idx].dicnt3 = tr0_cnt3;
3078 tr_req[tr_idx].ddim1 = tr_cnt0;
3079 tr_req[tr_idx].ddim2 = trigger_size;
3080 tr_req[tr_idx].ddim3 = trigger_size * tr0_cnt2;
3081 } else {
3082 tr_req[tr_idx].addr = sg_addr;
3083 tr_req[tr_idx].icnt0 = tr_cnt0;
3084 tr_req[tr_idx].icnt1 = tr_cnt1;
3085 tr_req[tr_idx].icnt2 = tr0_cnt2;
3086 tr_req[tr_idx].icnt3 = tr0_cnt3;
3087 tr_req[tr_idx].dim1 = tr_cnt0;
3088 tr_req[tr_idx].dim2 = trigger_size;
3089 tr_req[tr_idx].dim3 = trigger_size * tr0_cnt2;
3090
3091 tr_req[tr_idx].daddr = dev_addr;
3092 tr_req[tr_idx].dicnt0 = tr_cnt0;
3093 tr_req[tr_idx].dicnt1 = tr_cnt1;
3094 tr_req[tr_idx].dicnt2 = tr0_cnt2;
3095 tr_req[tr_idx].dicnt3 = tr0_cnt3;
3096 tr_req[tr_idx].ddim1 = (-1) * tr_cnt0;
3097 }
3098
3099 tr_idx++;
3100
3101 if (num_tr == 2) {
3102 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE15,
3103 false, true,
3104 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3105 cppi5_tr_csf_set(&tr_req[tr_idx].flags,
3106 CPPI5_TR_CSF_SUPR_EVT);
3107 cppi5_tr_set_trigger(&tr_req[tr_idx].flags,
3108 uc->config.tr_trigger_type,
3109 CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC,
3110 0, 0);
3111
3112 sg_addr += trigger_size * tr0_cnt2 * tr0_cnt3;
3113 if (dir == DMA_DEV_TO_MEM) {
3114 tr_req[tr_idx].addr = dev_addr;
3115 tr_req[tr_idx].icnt0 = tr_cnt0;
3116 tr_req[tr_idx].icnt1 = tr_cnt1;
3117 tr_req[tr_idx].icnt2 = tr1_cnt2;
3118 tr_req[tr_idx].icnt3 = 1;
3119 tr_req[tr_idx].dim1 = (-1) * tr_cnt0;
3120
3121 tr_req[tr_idx].daddr = sg_addr;
3122 tr_req[tr_idx].dicnt0 = tr_cnt0;
3123 tr_req[tr_idx].dicnt1 = tr_cnt1;
3124 tr_req[tr_idx].dicnt2 = tr1_cnt2;
3125 tr_req[tr_idx].dicnt3 = 1;
3126 tr_req[tr_idx].ddim1 = tr_cnt0;
3127 tr_req[tr_idx].ddim2 = trigger_size;
3128 } else {
3129 tr_req[tr_idx].addr = sg_addr;
3130 tr_req[tr_idx].icnt0 = tr_cnt0;
3131 tr_req[tr_idx].icnt1 = tr_cnt1;
3132 tr_req[tr_idx].icnt2 = tr1_cnt2;
3133 tr_req[tr_idx].icnt3 = 1;
3134 tr_req[tr_idx].dim1 = tr_cnt0;
3135 tr_req[tr_idx].dim2 = trigger_size;
3136
3137 tr_req[tr_idx].daddr = dev_addr;
3138 tr_req[tr_idx].dicnt0 = tr_cnt0;
3139 tr_req[tr_idx].dicnt1 = tr_cnt1;
3140 tr_req[tr_idx].dicnt2 = tr1_cnt2;
3141 tr_req[tr_idx].dicnt3 = 1;
3142 tr_req[tr_idx].ddim1 = (-1) * tr_cnt0;
3143 }
3144 tr_idx++;
3145 }
3146
3147 d->residue += sg_len;
3148 }
3149
3150 cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags,
3151 CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP);
3152
3153 return d;
3154}
3155
3156static int udma_configure_statictr(struct udma_chan *uc, struct udma_desc *d,
3157 enum dma_slave_buswidth dev_width,
3158 u16 elcnt)
3159{
3160 if (uc->config.ep_type != PSIL_EP_PDMA_XY)
3161 return 0;
3162
3163 /* Bus width translates to the element size (ES) */
3164 switch (dev_width) {
3165 case DMA_SLAVE_BUSWIDTH_1_BYTE:
3166 d->static_tr.elsize = 0;
3167 break;
3168 case DMA_SLAVE_BUSWIDTH_2_BYTES:
3169 d->static_tr.elsize = 1;
3170 break;
3171 case DMA_SLAVE_BUSWIDTH_3_BYTES:
3172 d->static_tr.elsize = 2;
3173 break;
3174 case DMA_SLAVE_BUSWIDTH_4_BYTES:
3175 d->static_tr.elsize = 3;
3176 break;
3177 case DMA_SLAVE_BUSWIDTH_8_BYTES:
3178 d->static_tr.elsize = 4;
3179 break;
3180 default: /* not reached */
3181 return -EINVAL;
3182 }
3183
3184 d->static_tr.elcnt = elcnt;
3185
3186 /*
3187 * PDMA must to close the packet when the channel is in packet mode.
3188 * For TR mode when the channel is not cyclic we also need PDMA to close
3189 * the packet otherwise the transfer will stall because PDMA holds on
3190 * the data it has received from the peripheral.
3191 */
3192 if (uc->config.pkt_mode || !uc->cyclic) {
3193 unsigned int div = dev_width * elcnt;
3194
3195 if (uc->cyclic)
3196 d->static_tr.bstcnt = d->residue / d->sglen / div;
3197 else
3198 d->static_tr.bstcnt = d->residue / div;
3199
3200 if (uc->config.dir == DMA_DEV_TO_MEM &&
3201 d->static_tr.bstcnt > uc->ud->match_data->statictr_z_mask)
3202 return -EINVAL;
3203 } else {
3204 d->static_tr.bstcnt = 0;
3205 }
3206
3207 return 0;
3208}
3209
3210static struct udma_desc *
3211udma_prep_slave_sg_pkt(struct udma_chan *uc, struct scatterlist *sgl,
3212 unsigned int sglen, enum dma_transfer_direction dir,
3213 unsigned long tx_flags, void *context)
3214{
3215 struct scatterlist *sgent;
3216 struct cppi5_host_desc_t *h_desc = NULL;
3217 struct udma_desc *d;
3218 u32 ring_id;
3219 unsigned int i;
3220 u64 asel;
3221
3222 d = kzalloc(struct_size(d, hwdesc, sglen), GFP_NOWAIT);
3223 if (!d)
3224 return NULL;
3225
3226 d->sglen = sglen;
3227 d->hwdesc_count = sglen;
3228
3229 if (dir == DMA_DEV_TO_MEM)
3230 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
3231 else
3232 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
3233
3234 if (uc->ud->match_data->type == DMA_TYPE_UDMA)
3235 asel = 0;
3236 else
3237 asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT;
3238
3239 for_each_sg(sgl, sgent, sglen, i) {
3240 struct udma_hwdesc *hwdesc = &d->hwdesc[i];
3241 dma_addr_t sg_addr = sg_dma_address(sgent);
3242 struct cppi5_host_desc_t *desc;
3243 size_t sg_len = sg_dma_len(sgent);
3244
3245 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
3246 GFP_NOWAIT,
3247 &hwdesc->cppi5_desc_paddr);
3248 if (!hwdesc->cppi5_desc_vaddr) {
3249 dev_err(uc->ud->dev,
3250 "descriptor%d allocation failed\n", i);
3251
3252 udma_free_hwdesc(uc, d);
3253 kfree(d);
3254 return NULL;
3255 }
3256
3257 d->residue += sg_len;
3258 hwdesc->cppi5_desc_size = uc->config.hdesc_size;
3259 desc = hwdesc->cppi5_desc_vaddr;
3260
3261 if (i == 0) {
3262 cppi5_hdesc_init(desc, 0, 0);
3263 /* Flow and Packed ID */
3264 cppi5_desc_set_pktids(&desc->hdr, uc->id,
3265 CPPI5_INFO1_DESC_FLOWID_DEFAULT);
3266 cppi5_desc_set_retpolicy(&desc->hdr, 0, ring_id);
3267 } else {
3268 cppi5_hdesc_reset_hbdesc(desc);
3269 cppi5_desc_set_retpolicy(&desc->hdr, 0, 0xffff);
3270 }
3271
3272 /* attach the sg buffer to the descriptor */
3273 sg_addr |= asel;
3274 cppi5_hdesc_attach_buf(desc, sg_addr, sg_len, sg_addr, sg_len);
3275
3276 /* Attach link as host buffer descriptor */
3277 if (h_desc)
3278 cppi5_hdesc_link_hbdesc(h_desc,
3279 hwdesc->cppi5_desc_paddr | asel);
3280
3281 if (uc->ud->match_data->type == DMA_TYPE_PKTDMA ||
3282 dir == DMA_MEM_TO_DEV)
3283 h_desc = desc;
3284 }
3285
3286 if (d->residue >= SZ_4M) {
3287 dev_err(uc->ud->dev,
3288 "%s: Transfer size %u is over the supported 4M range\n",
3289 __func__, d->residue);
3290 udma_free_hwdesc(uc, d);
3291 kfree(d);
3292 return NULL;
3293 }
3294
3295 h_desc = d->hwdesc[0].cppi5_desc_vaddr;
3296 cppi5_hdesc_set_pktlen(h_desc, d->residue);
3297
3298 return d;
3299}
3300
3301static int udma_attach_metadata(struct dma_async_tx_descriptor *desc,
3302 void *data, size_t len)
3303{
3304 struct udma_desc *d = to_udma_desc(desc);
3305 struct udma_chan *uc = to_udma_chan(desc->chan);
3306 struct cppi5_host_desc_t *h_desc;
3307 u32 psd_size = len;
3308 u32 flags = 0;
3309
3310 if (!uc->config.pkt_mode || !uc->config.metadata_size)
3311 return -ENOTSUPP;
3312
3313 if (!data || len > uc->config.metadata_size)
3314 return -EINVAL;
3315
3316 if (uc->config.needs_epib && len < CPPI5_INFO0_HDESC_EPIB_SIZE)
3317 return -EINVAL;
3318
3319 h_desc = d->hwdesc[0].cppi5_desc_vaddr;
3320 if (d->dir == DMA_MEM_TO_DEV)
3321 memcpy(h_desc->epib, data, len);
3322
3323 if (uc->config.needs_epib)
3324 psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE;
3325
3326 d->metadata = data;
3327 d->metadata_size = len;
3328 if (uc->config.needs_epib)
3329 flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT;
3330
3331 cppi5_hdesc_update_flags(h_desc, flags);
3332 cppi5_hdesc_update_psdata_size(h_desc, psd_size);
3333
3334 return 0;
3335}
3336
3337static void *udma_get_metadata_ptr(struct dma_async_tx_descriptor *desc,
3338 size_t *payload_len, size_t *max_len)
3339{
3340 struct udma_desc *d = to_udma_desc(desc);
3341 struct udma_chan *uc = to_udma_chan(desc->chan);
3342 struct cppi5_host_desc_t *h_desc;
3343
3344 if (!uc->config.pkt_mode || !uc->config.metadata_size)
3345 return ERR_PTR(-ENOTSUPP);
3346
3347 h_desc = d->hwdesc[0].cppi5_desc_vaddr;
3348
3349 *max_len = uc->config.metadata_size;
3350
3351 *payload_len = cppi5_hdesc_epib_present(&h_desc->hdr) ?
3352 CPPI5_INFO0_HDESC_EPIB_SIZE : 0;
3353 *payload_len += cppi5_hdesc_get_psdata_size(h_desc);
3354
3355 return h_desc->epib;
3356}
3357
3358static int udma_set_metadata_len(struct dma_async_tx_descriptor *desc,
3359 size_t payload_len)
3360{
3361 struct udma_desc *d = to_udma_desc(desc);
3362 struct udma_chan *uc = to_udma_chan(desc->chan);
3363 struct cppi5_host_desc_t *h_desc;
3364 u32 psd_size = payload_len;
3365 u32 flags = 0;
3366
3367 if (!uc->config.pkt_mode || !uc->config.metadata_size)
3368 return -ENOTSUPP;
3369
3370 if (payload_len > uc->config.metadata_size)
3371 return -EINVAL;
3372
3373 if (uc->config.needs_epib && payload_len < CPPI5_INFO0_HDESC_EPIB_SIZE)
3374 return -EINVAL;
3375
3376 h_desc = d->hwdesc[0].cppi5_desc_vaddr;
3377
3378 if (uc->config.needs_epib) {
3379 psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE;
3380 flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT;
3381 }
3382
3383 cppi5_hdesc_update_flags(h_desc, flags);
3384 cppi5_hdesc_update_psdata_size(h_desc, psd_size);
3385
3386 return 0;
3387}
3388
3389static struct dma_descriptor_metadata_ops metadata_ops = {
3390 .attach = udma_attach_metadata,
3391 .get_ptr = udma_get_metadata_ptr,
3392 .set_len = udma_set_metadata_len,
3393};
3394
3395static struct dma_async_tx_descriptor *
3396udma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
3397 unsigned int sglen, enum dma_transfer_direction dir,
3398 unsigned long tx_flags, void *context)
3399{
3400 struct udma_chan *uc = to_udma_chan(chan);
3401 enum dma_slave_buswidth dev_width;
3402 struct udma_desc *d;
3403 u32 burst;
3404
3405 if (dir != uc->config.dir &&
3406 (uc->config.dir == DMA_MEM_TO_MEM && !uc->config.tr_trigger_type)) {
3407 dev_err(chan->device->dev,
3408 "%s: chan%d is for %s, not supporting %s\n",
3409 __func__, uc->id,
3410 dmaengine_get_direction_text(uc->config.dir),
3411 dmaengine_get_direction_text(dir));
3412 return NULL;
3413 }
3414
3415 if (dir == DMA_DEV_TO_MEM) {
3416 dev_width = uc->cfg.src_addr_width;
3417 burst = uc->cfg.src_maxburst;
3418 } else if (dir == DMA_MEM_TO_DEV) {
3419 dev_width = uc->cfg.dst_addr_width;
3420 burst = uc->cfg.dst_maxburst;
3421 } else {
3422 dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
3423 return NULL;
3424 }
3425
3426 if (!burst)
3427 burst = 1;
3428
3429 uc->config.tx_flags = tx_flags;
3430
3431 if (uc->config.pkt_mode)
3432 d = udma_prep_slave_sg_pkt(uc, sgl, sglen, dir, tx_flags,
3433 context);
3434 else if (is_slave_direction(uc->config.dir))
3435 d = udma_prep_slave_sg_tr(uc, sgl, sglen, dir, tx_flags,
3436 context);
3437 else
3438 d = udma_prep_slave_sg_triggered_tr(uc, sgl, sglen, dir,
3439 tx_flags, context);
3440
3441 if (!d)
3442 return NULL;
3443
3444 d->dir = dir;
3445 d->desc_idx = 0;
3446 d->tr_idx = 0;
3447
3448 /* static TR for remote PDMA */
3449 if (udma_configure_statictr(uc, d, dev_width, burst)) {
3450 dev_err(uc->ud->dev,
3451 "%s: StaticTR Z is limited to maximum 4095 (%u)\n",
3452 __func__, d->static_tr.bstcnt);
3453
3454 udma_free_hwdesc(uc, d);
3455 kfree(d);
3456 return NULL;
3457 }
3458
3459 if (uc->config.metadata_size)
3460 d->vd.tx.metadata_ops = &metadata_ops;
3461
3462 return vchan_tx_prep(&uc->vc, &d->vd, tx_flags);
3463}
3464
3465static struct udma_desc *
3466udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr,
3467 size_t buf_len, size_t period_len,
3468 enum dma_transfer_direction dir, unsigned long flags)
3469{
3470 struct udma_desc *d;
3471 size_t tr_size, period_addr;
3472 struct cppi5_tr_type1_t *tr_req;
3473 unsigned int periods = buf_len / period_len;
3474 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
3475 unsigned int i;
3476 int num_tr;
3477
3478 num_tr = udma_get_tr_counters(period_len, __ffs(buf_addr), &tr0_cnt0,
3479 &tr0_cnt1, &tr1_cnt0);
3480 if (num_tr < 0) {
3481 dev_err(uc->ud->dev, "size %zu is not supported\n",
3482 period_len);
3483 return NULL;
3484 }
3485
3486 /* Now allocate and setup the descriptor. */
3487 tr_size = sizeof(struct cppi5_tr_type1_t);
3488 d = udma_alloc_tr_desc(uc, tr_size, periods * num_tr, dir);
3489 if (!d)
3490 return NULL;
3491
3492 tr_req = d->hwdesc[0].tr_req_base;
3493 if (uc->ud->match_data->type == DMA_TYPE_UDMA)
3494 period_addr = buf_addr;
3495 else
3496 period_addr = buf_addr |
3497 ((u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT);
3498
3499 for (i = 0; i < periods; i++) {
3500 int tr_idx = i * num_tr;
3501
3502 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, false,
3503 false, CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3504
3505 tr_req[tr_idx].addr = period_addr;
3506 tr_req[tr_idx].icnt0 = tr0_cnt0;
3507 tr_req[tr_idx].icnt1 = tr0_cnt1;
3508 tr_req[tr_idx].dim1 = tr0_cnt0;
3509
3510 if (num_tr == 2) {
3511 cppi5_tr_csf_set(&tr_req[tr_idx].flags,
3512 CPPI5_TR_CSF_SUPR_EVT);
3513 tr_idx++;
3514
3515 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1,
3516 false, false,
3517 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3518
3519 tr_req[tr_idx].addr = period_addr + tr0_cnt1 * tr0_cnt0;
3520 tr_req[tr_idx].icnt0 = tr1_cnt0;
3521 tr_req[tr_idx].icnt1 = 1;
3522 tr_req[tr_idx].dim1 = tr1_cnt0;
3523 }
3524
3525 if (!(flags & DMA_PREP_INTERRUPT))
3526 cppi5_tr_csf_set(&tr_req[tr_idx].flags,
3527 CPPI5_TR_CSF_SUPR_EVT);
3528
3529 period_addr += period_len;
3530 }
3531
3532 return d;
3533}
3534
3535static struct udma_desc *
3536udma_prep_dma_cyclic_pkt(struct udma_chan *uc, dma_addr_t buf_addr,
3537 size_t buf_len, size_t period_len,
3538 enum dma_transfer_direction dir, unsigned long flags)
3539{
3540 struct udma_desc *d;
3541 u32 ring_id;
3542 int i;
3543 int periods = buf_len / period_len;
3544
3545 if (periods > (K3_UDMA_DEFAULT_RING_SIZE - 1))
3546 return NULL;
3547
3548 if (period_len >= SZ_4M)
3549 return NULL;
3550
3551 d = kzalloc(struct_size(d, hwdesc, periods), GFP_NOWAIT);
3552 if (!d)
3553 return NULL;
3554
3555 d->hwdesc_count = periods;
3556
3557 /* TODO: re-check this... */
3558 if (dir == DMA_DEV_TO_MEM)
3559 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
3560 else
3561 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
3562
3563 if (uc->ud->match_data->type != DMA_TYPE_UDMA)
3564 buf_addr |= (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT;
3565
3566 for (i = 0; i < periods; i++) {
3567 struct udma_hwdesc *hwdesc = &d->hwdesc[i];
3568 dma_addr_t period_addr = buf_addr + (period_len * i);
3569 struct cppi5_host_desc_t *h_desc;
3570
3571 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
3572 GFP_NOWAIT,
3573 &hwdesc->cppi5_desc_paddr);
3574 if (!hwdesc->cppi5_desc_vaddr) {
3575 dev_err(uc->ud->dev,
3576 "descriptor%d allocation failed\n", i);
3577
3578 udma_free_hwdesc(uc, d);
3579 kfree(d);
3580 return NULL;
3581 }
3582
3583 hwdesc->cppi5_desc_size = uc->config.hdesc_size;
3584 h_desc = hwdesc->cppi5_desc_vaddr;
3585
3586 cppi5_hdesc_init(h_desc, 0, 0);
3587 cppi5_hdesc_set_pktlen(h_desc, period_len);
3588
3589 /* Flow and Packed ID */
3590 cppi5_desc_set_pktids(&h_desc->hdr, uc->id,
3591 CPPI5_INFO1_DESC_FLOWID_DEFAULT);
3592 cppi5_desc_set_retpolicy(&h_desc->hdr, 0, ring_id);
3593
3594 /* attach each period to a new descriptor */
3595 cppi5_hdesc_attach_buf(h_desc,
3596 period_addr, period_len,
3597 period_addr, period_len);
3598 }
3599
3600 return d;
3601}
3602
3603static struct dma_async_tx_descriptor *
3604udma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
3605 size_t period_len, enum dma_transfer_direction dir,
3606 unsigned long flags)
3607{
3608 struct udma_chan *uc = to_udma_chan(chan);
3609 enum dma_slave_buswidth dev_width;
3610 struct udma_desc *d;
3611 u32 burst;
3612
3613 if (dir != uc->config.dir) {
3614 dev_err(chan->device->dev,
3615 "%s: chan%d is for %s, not supporting %s\n",
3616 __func__, uc->id,
3617 dmaengine_get_direction_text(uc->config.dir),
3618 dmaengine_get_direction_text(dir));
3619 return NULL;
3620 }
3621
3622 uc->cyclic = true;
3623
3624 if (dir == DMA_DEV_TO_MEM) {
3625 dev_width = uc->cfg.src_addr_width;
3626 burst = uc->cfg.src_maxburst;
3627 } else if (dir == DMA_MEM_TO_DEV) {
3628 dev_width = uc->cfg.dst_addr_width;
3629 burst = uc->cfg.dst_maxburst;
3630 } else {
3631 dev_err(uc->ud->dev, "%s: bad direction?\n", __func__);
3632 return NULL;
3633 }
3634
3635 if (!burst)
3636 burst = 1;
3637
3638 if (uc->config.pkt_mode)
3639 d = udma_prep_dma_cyclic_pkt(uc, buf_addr, buf_len, period_len,
3640 dir, flags);
3641 else
3642 d = udma_prep_dma_cyclic_tr(uc, buf_addr, buf_len, period_len,
3643 dir, flags);
3644
3645 if (!d)
3646 return NULL;
3647
3648 d->sglen = buf_len / period_len;
3649
3650 d->dir = dir;
3651 d->residue = buf_len;
3652
3653 /* static TR for remote PDMA */
3654 if (udma_configure_statictr(uc, d, dev_width, burst)) {
3655 dev_err(uc->ud->dev,
3656 "%s: StaticTR Z is limited to maximum 4095 (%u)\n",
3657 __func__, d->static_tr.bstcnt);
3658
3659 udma_free_hwdesc(uc, d);
3660 kfree(d);
3661 return NULL;
3662 }
3663
3664 if (uc->config.metadata_size)
3665 d->vd.tx.metadata_ops = &metadata_ops;
3666
3667 return vchan_tx_prep(&uc->vc, &d->vd, flags);
3668}
3669
3670static struct dma_async_tx_descriptor *
3671udma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
3672 size_t len, unsigned long tx_flags)
3673{
3674 struct udma_chan *uc = to_udma_chan(chan);
3675 struct udma_desc *d;
3676 struct cppi5_tr_type15_t *tr_req;
3677 int num_tr;
3678 size_t tr_size = sizeof(struct cppi5_tr_type15_t);
3679 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
3680
3681 if (uc->config.dir != DMA_MEM_TO_MEM) {
3682 dev_err(chan->device->dev,
3683 "%s: chan%d is for %s, not supporting %s\n",
3684 __func__, uc->id,
3685 dmaengine_get_direction_text(uc->config.dir),
3686 dmaengine_get_direction_text(DMA_MEM_TO_MEM));
3687 return NULL;
3688 }
3689
3690 num_tr = udma_get_tr_counters(len, __ffs(src | dest), &tr0_cnt0,
3691 &tr0_cnt1, &tr1_cnt0);
3692 if (num_tr < 0) {
3693 dev_err(uc->ud->dev, "size %zu is not supported\n",
3694 len);
3695 return NULL;
3696 }
3697
3698 d = udma_alloc_tr_desc(uc, tr_size, num_tr, DMA_MEM_TO_MEM);
3699 if (!d)
3700 return NULL;
3701
3702 d->dir = DMA_MEM_TO_MEM;
3703 d->desc_idx = 0;
3704 d->tr_idx = 0;
3705 d->residue = len;
3706
3707 if (uc->ud->match_data->type != DMA_TYPE_UDMA) {
3708 src |= (u64)uc->ud->asel << K3_ADDRESS_ASEL_SHIFT;
3709 dest |= (u64)uc->ud->asel << K3_ADDRESS_ASEL_SHIFT;
3710 }
3711
3712 tr_req = d->hwdesc[0].tr_req_base;
3713
3714 cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true,
3715 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3716 cppi5_tr_csf_set(&tr_req[0].flags, CPPI5_TR_CSF_SUPR_EVT);
3717
3718 tr_req[0].addr = src;
3719 tr_req[0].icnt0 = tr0_cnt0;
3720 tr_req[0].icnt1 = tr0_cnt1;
3721 tr_req[0].icnt2 = 1;
3722 tr_req[0].icnt3 = 1;
3723 tr_req[0].dim1 = tr0_cnt0;
3724
3725 tr_req[0].daddr = dest;
3726 tr_req[0].dicnt0 = tr0_cnt0;
3727 tr_req[0].dicnt1 = tr0_cnt1;
3728 tr_req[0].dicnt2 = 1;
3729 tr_req[0].dicnt3 = 1;
3730 tr_req[0].ddim1 = tr0_cnt0;
3731
3732 if (num_tr == 2) {
3733 cppi5_tr_init(&tr_req[1].flags, CPPI5_TR_TYPE15, false, true,
3734 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3735 cppi5_tr_csf_set(&tr_req[1].flags, CPPI5_TR_CSF_SUPR_EVT);
3736
3737 tr_req[1].addr = src + tr0_cnt1 * tr0_cnt0;
3738 tr_req[1].icnt0 = tr1_cnt0;
3739 tr_req[1].icnt1 = 1;
3740 tr_req[1].icnt2 = 1;
3741 tr_req[1].icnt3 = 1;
3742
3743 tr_req[1].daddr = dest + tr0_cnt1 * tr0_cnt0;
3744 tr_req[1].dicnt0 = tr1_cnt0;
3745 tr_req[1].dicnt1 = 1;
3746 tr_req[1].dicnt2 = 1;
3747 tr_req[1].dicnt3 = 1;
3748 }
3749
3750 cppi5_tr_csf_set(&tr_req[num_tr - 1].flags,
3751 CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP);
3752
3753 if (uc->config.metadata_size)
3754 d->vd.tx.metadata_ops = &metadata_ops;
3755
3756 return vchan_tx_prep(&uc->vc, &d->vd, tx_flags);
3757}
3758
3759static void udma_issue_pending(struct dma_chan *chan)
3760{
3761 struct udma_chan *uc = to_udma_chan(chan);
3762 unsigned long flags;
3763
3764 spin_lock_irqsave(&uc->vc.lock, flags);
3765
3766 /* If we have something pending and no active descriptor, then */
3767 if (vchan_issue_pending(&uc->vc) && !uc->desc) {
3768 /*
3769 * start a descriptor if the channel is NOT [marked as
3770 * terminating _and_ it is still running (teardown has not
3771 * completed yet)].
3772 */
3773 if (!(uc->state == UDMA_CHAN_IS_TERMINATING &&
3774 udma_is_chan_running(uc)))
3775 udma_start(uc);
3776 }
3777
3778 spin_unlock_irqrestore(&uc->vc.lock, flags);
3779}
3780
3781static enum dma_status udma_tx_status(struct dma_chan *chan,
3782 dma_cookie_t cookie,
3783 struct dma_tx_state *txstate)
3784{
3785 struct udma_chan *uc = to_udma_chan(chan);
3786 enum dma_status ret;
3787 unsigned long flags;
3788
3789 spin_lock_irqsave(&uc->vc.lock, flags);
3790
3791 ret = dma_cookie_status(chan, cookie, txstate);
3792
3793 if (!udma_is_chan_running(uc))
3794 ret = DMA_COMPLETE;
3795
3796 if (ret == DMA_IN_PROGRESS && udma_is_chan_paused(uc))
3797 ret = DMA_PAUSED;
3798
3799 if (ret == DMA_COMPLETE || !txstate)
3800 goto out;
3801
3802 if (uc->desc && uc->desc->vd.tx.cookie == cookie) {
3803 u32 peer_bcnt = 0;
3804 u32 bcnt = 0;
3805 u32 residue = uc->desc->residue;
3806 u32 delay = 0;
3807
3808 if (uc->desc->dir == DMA_MEM_TO_DEV) {
3809 bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_SBCNT_REG);
3810
3811 if (uc->config.ep_type != PSIL_EP_NATIVE) {
3812 peer_bcnt = udma_tchanrt_read(uc,
3813 UDMA_CHAN_RT_PEER_BCNT_REG);
3814
3815 if (bcnt > peer_bcnt)
3816 delay = bcnt - peer_bcnt;
3817 }
3818 } else if (uc->desc->dir == DMA_DEV_TO_MEM) {
3819 bcnt = udma_rchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
3820
3821 if (uc->config.ep_type != PSIL_EP_NATIVE) {
3822 peer_bcnt = udma_rchanrt_read(uc,
3823 UDMA_CHAN_RT_PEER_BCNT_REG);
3824
3825 if (peer_bcnt > bcnt)
3826 delay = peer_bcnt - bcnt;
3827 }
3828 } else {
3829 bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
3830 }
3831
3832 if (bcnt && !(bcnt % uc->desc->residue))
3833 residue = 0;
3834 else
3835 residue -= bcnt % uc->desc->residue;
3836
3837 if (!residue && (uc->config.dir == DMA_DEV_TO_MEM || !delay)) {
3838 ret = DMA_COMPLETE;
3839 delay = 0;
3840 }
3841
3842 dma_set_residue(txstate, residue);
3843 dma_set_in_flight_bytes(txstate, delay);
3844
3845 } else {
3846 ret = DMA_COMPLETE;
3847 }
3848
3849out:
3850 spin_unlock_irqrestore(&uc->vc.lock, flags);
3851 return ret;
3852}
3853
3854static int udma_pause(struct dma_chan *chan)
3855{
3856 struct udma_chan *uc = to_udma_chan(chan);
3857
3858 /* pause the channel */
3859 switch (uc->config.dir) {
3860 case DMA_DEV_TO_MEM:
3861 udma_rchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
3862 UDMA_PEER_RT_EN_PAUSE,
3863 UDMA_PEER_RT_EN_PAUSE);
3864 break;
3865 case DMA_MEM_TO_DEV:
3866 udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
3867 UDMA_PEER_RT_EN_PAUSE,
3868 UDMA_PEER_RT_EN_PAUSE);
3869 break;
3870 case DMA_MEM_TO_MEM:
3871 udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_CTL_REG,
3872 UDMA_CHAN_RT_CTL_PAUSE,
3873 UDMA_CHAN_RT_CTL_PAUSE);
3874 break;
3875 default:
3876 return -EINVAL;
3877 }
3878
3879 return 0;
3880}
3881
3882static int udma_resume(struct dma_chan *chan)
3883{
3884 struct udma_chan *uc = to_udma_chan(chan);
3885
3886 /* resume the channel */
3887 switch (uc->config.dir) {
3888 case DMA_DEV_TO_MEM:
3889 udma_rchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
3890 UDMA_PEER_RT_EN_PAUSE, 0);
3891
3892 break;
3893 case DMA_MEM_TO_DEV:
3894 udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
3895 UDMA_PEER_RT_EN_PAUSE, 0);
3896 break;
3897 case DMA_MEM_TO_MEM:
3898 udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_CTL_REG,
3899 UDMA_CHAN_RT_CTL_PAUSE, 0);
3900 break;
3901 default:
3902 return -EINVAL;
3903 }
3904
3905 return 0;
3906}
3907
3908static int udma_terminate_all(struct dma_chan *chan)
3909{
3910 struct udma_chan *uc = to_udma_chan(chan);
3911 unsigned long flags;
3912 LIST_HEAD(head);
3913
3914 spin_lock_irqsave(&uc->vc.lock, flags);
3915
3916 if (udma_is_chan_running(uc))
3917 udma_stop(uc);
3918
3919 if (uc->desc) {
3920 uc->terminated_desc = uc->desc;
3921 uc->desc = NULL;
3922 uc->terminated_desc->terminated = true;
3923 cancel_delayed_work(&uc->tx_drain.work);
3924 }
3925
3926 uc->paused = false;
3927
3928 vchan_get_all_descriptors(&uc->vc, &head);
3929 spin_unlock_irqrestore(&uc->vc.lock, flags);
3930 vchan_dma_desc_free_list(&uc->vc, &head);
3931
3932 return 0;
3933}
3934
3935static void udma_synchronize(struct dma_chan *chan)
3936{
3937 struct udma_chan *uc = to_udma_chan(chan);
3938 unsigned long timeout = msecs_to_jiffies(1000);
3939
3940 vchan_synchronize(&uc->vc);
3941
3942 if (uc->state == UDMA_CHAN_IS_TERMINATING) {
3943 timeout = wait_for_completion_timeout(&uc->teardown_completed,
3944 timeout);
3945 if (!timeout) {
3946 dev_warn(uc->ud->dev, "chan%d teardown timeout!\n",
3947 uc->id);
3948 udma_dump_chan_stdata(uc);
3949 udma_reset_chan(uc, true);
3950 }
3951 }
3952
3953 udma_reset_chan(uc, false);
3954 if (udma_is_chan_running(uc))
3955 dev_warn(uc->ud->dev, "chan%d refused to stop!\n", uc->id);
3956
3957 cancel_delayed_work_sync(&uc->tx_drain.work);
3958 udma_reset_rings(uc);
3959}
3960
3961static void udma_desc_pre_callback(struct virt_dma_chan *vc,
3962 struct virt_dma_desc *vd,
3963 struct dmaengine_result *result)
3964{
3965 struct udma_chan *uc = to_udma_chan(&vc->chan);
3966 struct udma_desc *d;
3967
3968 if (!vd)
3969 return;
3970
3971 d = to_udma_desc(&vd->tx);
3972
3973 if (d->metadata_size)
3974 udma_fetch_epib(uc, d);
3975
3976 /* Provide residue information for the client */
3977 if (result) {
3978 void *desc_vaddr = udma_curr_cppi5_desc_vaddr(d, d->desc_idx);
3979
3980 if (cppi5_desc_get_type(desc_vaddr) ==
3981 CPPI5_INFO0_DESC_TYPE_VAL_HOST) {
3982 result->residue = d->residue -
3983 cppi5_hdesc_get_pktlen(desc_vaddr);
3984 if (result->residue)
3985 result->result = DMA_TRANS_ABORTED;
3986 else
3987 result->result = DMA_TRANS_NOERROR;
3988 } else {
3989 result->residue = 0;
3990 result->result = DMA_TRANS_NOERROR;
3991 }
3992 }
3993}
3994
3995/*
3996 * This tasklet handles the completion of a DMA descriptor by
3997 * calling its callback and freeing it.
3998 */
3999static void udma_vchan_complete(struct tasklet_struct *t)
4000{
4001 struct virt_dma_chan *vc = from_tasklet(vc, t, task);
4002 struct virt_dma_desc *vd, *_vd;
4003 struct dmaengine_desc_callback cb;
4004 LIST_HEAD(head);
4005
4006 spin_lock_irq(&vc->lock);
4007 list_splice_tail_init(&vc->desc_completed, &head);
4008 vd = vc->cyclic;
4009 if (vd) {
4010 vc->cyclic = NULL;
4011 dmaengine_desc_get_callback(&vd->tx, &cb);
4012 } else {
4013 memset(&cb, 0, sizeof(cb));
4014 }
4015 spin_unlock_irq(&vc->lock);
4016
4017 udma_desc_pre_callback(vc, vd, NULL);
4018 dmaengine_desc_callback_invoke(&cb, NULL);
4019
4020 list_for_each_entry_safe(vd, _vd, &head, node) {
4021 struct dmaengine_result result;
4022
4023 dmaengine_desc_get_callback(&vd->tx, &cb);
4024
4025 list_del(&vd->node);
4026
4027 udma_desc_pre_callback(vc, vd, &result);
4028 dmaengine_desc_callback_invoke(&cb, &result);
4029
4030 vchan_vdesc_fini(vd);
4031 }
4032}
4033
4034static void udma_free_chan_resources(struct dma_chan *chan)
4035{
4036 struct udma_chan *uc = to_udma_chan(chan);
4037 struct udma_dev *ud = to_udma_dev(chan->device);
4038
4039 udma_terminate_all(chan);
4040 if (uc->terminated_desc) {
4041 udma_reset_chan(uc, false);
4042 udma_reset_rings(uc);
4043 }
4044
4045 cancel_delayed_work_sync(&uc->tx_drain.work);
4046
4047 if (uc->irq_num_ring > 0) {
4048 free_irq(uc->irq_num_ring, uc);
4049
4050 uc->irq_num_ring = 0;
4051 }
4052 if (uc->irq_num_udma > 0) {
4053 free_irq(uc->irq_num_udma, uc);
4054
4055 uc->irq_num_udma = 0;
4056 }
4057
4058 /* Release PSI-L pairing */
4059 if (uc->psil_paired) {
4060 navss_psil_unpair(ud, uc->config.src_thread,
4061 uc->config.dst_thread);
4062 uc->psil_paired = false;
4063 }
4064
4065 vchan_free_chan_resources(&uc->vc);
4066 tasklet_kill(&uc->vc.task);
4067
4068 bcdma_free_bchan_resources(uc);
4069 udma_free_tx_resources(uc);
4070 udma_free_rx_resources(uc);
4071 udma_reset_uchan(uc);
4072
4073 if (uc->use_dma_pool) {
4074 dma_pool_destroy(uc->hdesc_pool);
4075 uc->use_dma_pool = false;
4076 }
4077}
4078
4079static struct platform_driver udma_driver;
4080static struct platform_driver bcdma_driver;
4081static struct platform_driver pktdma_driver;
4082
4083struct udma_filter_param {
4084 int remote_thread_id;
4085 u32 atype;
4086 u32 asel;
4087 u32 tr_trigger_type;
4088};
4089
4090static bool udma_dma_filter_fn(struct dma_chan *chan, void *param)
4091{
4092 struct udma_chan_config *ucc;
4093 struct psil_endpoint_config *ep_config;
4094 struct udma_filter_param *filter_param;
4095 struct udma_chan *uc;
4096 struct udma_dev *ud;
4097
4098 if (chan->device->dev->driver != &udma_driver.driver &&
4099 chan->device->dev->driver != &bcdma_driver.driver &&
4100 chan->device->dev->driver != &pktdma_driver.driver)
4101 return false;
4102
4103 uc = to_udma_chan(chan);
4104 ucc = &uc->config;
4105 ud = uc->ud;
4106 filter_param = param;
4107
4108 if (filter_param->atype > 2) {
4109 dev_err(ud->dev, "Invalid channel atype: %u\n",
4110 filter_param->atype);
4111 return false;
4112 }
4113
4114 if (filter_param->asel > 15) {
4115 dev_err(ud->dev, "Invalid channel asel: %u\n",
4116 filter_param->asel);
4117 return false;
4118 }
4119
4120 ucc->remote_thread_id = filter_param->remote_thread_id;
4121 ucc->atype = filter_param->atype;
4122 ucc->asel = filter_param->asel;
4123 ucc->tr_trigger_type = filter_param->tr_trigger_type;
4124
4125 if (ucc->tr_trigger_type) {
4126 ucc->dir = DMA_MEM_TO_MEM;
4127 goto triggered_bchan;
4128 } else if (ucc->remote_thread_id & K3_PSIL_DST_THREAD_ID_OFFSET) {
4129 ucc->dir = DMA_MEM_TO_DEV;
4130 } else {
4131 ucc->dir = DMA_DEV_TO_MEM;
4132 }
4133
4134 ep_config = psil_get_ep_config(ucc->remote_thread_id);
4135 if (IS_ERR(ep_config)) {
4136 dev_err(ud->dev, "No configuration for psi-l thread 0x%04x\n",
4137 ucc->remote_thread_id);
4138 ucc->dir = DMA_MEM_TO_MEM;
4139 ucc->remote_thread_id = -1;
4140 ucc->atype = 0;
4141 ucc->asel = 0;
4142 return false;
4143 }
4144
4145 if (ud->match_data->type == DMA_TYPE_BCDMA &&
4146 ep_config->pkt_mode) {
4147 dev_err(ud->dev,
4148 "Only TR mode is supported (psi-l thread 0x%04x)\n",
4149 ucc->remote_thread_id);
4150 ucc->dir = DMA_MEM_TO_MEM;
4151 ucc->remote_thread_id = -1;
4152 ucc->atype = 0;
4153 ucc->asel = 0;
4154 return false;
4155 }
4156
4157 ucc->pkt_mode = ep_config->pkt_mode;
4158 ucc->channel_tpl = ep_config->channel_tpl;
4159 ucc->notdpkt = ep_config->notdpkt;
4160 ucc->ep_type = ep_config->ep_type;
4161
4162 if (ud->match_data->type == DMA_TYPE_PKTDMA &&
4163 ep_config->mapped_channel_id >= 0) {
4164 ucc->mapped_channel_id = ep_config->mapped_channel_id;
4165 ucc->default_flow_id = ep_config->default_flow_id;
4166 } else {
4167 ucc->mapped_channel_id = -1;
4168 ucc->default_flow_id = -1;
4169 }
4170
4171 if (ucc->ep_type != PSIL_EP_NATIVE) {
4172 const struct udma_match_data *match_data = ud->match_data;
4173
4174 if (match_data->flags & UDMA_FLAG_PDMA_ACC32)
4175 ucc->enable_acc32 = ep_config->pdma_acc32;
4176 if (match_data->flags & UDMA_FLAG_PDMA_BURST)
4177 ucc->enable_burst = ep_config->pdma_burst;
4178 }
4179
4180 ucc->needs_epib = ep_config->needs_epib;
4181 ucc->psd_size = ep_config->psd_size;
4182 ucc->metadata_size =
4183 (ucc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_SIZE : 0) +
4184 ucc->psd_size;
4185
4186 if (ucc->pkt_mode)
4187 ucc->hdesc_size = ALIGN(sizeof(struct cppi5_host_desc_t) +
4188 ucc->metadata_size, ud->desc_align);
4189
4190 dev_dbg(ud->dev, "chan%d: Remote thread: 0x%04x (%s)\n", uc->id,
4191 ucc->remote_thread_id, dmaengine_get_direction_text(ucc->dir));
4192
4193 return true;
4194
4195triggered_bchan:
4196 dev_dbg(ud->dev, "chan%d: triggered channel (type: %u)\n", uc->id,
4197 ucc->tr_trigger_type);
4198
4199 return true;
4200
4201}
4202
4203static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec,
4204 struct of_dma *ofdma)
4205{
4206 struct udma_dev *ud = ofdma->of_dma_data;
4207 dma_cap_mask_t mask = ud->ddev.cap_mask;
4208 struct udma_filter_param filter_param;
4209 struct dma_chan *chan;
4210
4211 if (ud->match_data->type == DMA_TYPE_BCDMA) {
4212 if (dma_spec->args_count != 3)
4213 return NULL;
4214
4215 filter_param.tr_trigger_type = dma_spec->args[0];
4216 filter_param.remote_thread_id = dma_spec->args[1];
4217 filter_param.asel = dma_spec->args[2];
4218 filter_param.atype = 0;
4219 } else {
4220 if (dma_spec->args_count != 1 && dma_spec->args_count != 2)
4221 return NULL;
4222
4223 filter_param.remote_thread_id = dma_spec->args[0];
4224 filter_param.tr_trigger_type = 0;
4225 if (dma_spec->args_count == 2) {
4226 if (ud->match_data->type == DMA_TYPE_UDMA) {
4227 filter_param.atype = dma_spec->args[1];
4228 filter_param.asel = 0;
4229 } else {
4230 filter_param.atype = 0;
4231 filter_param.asel = dma_spec->args[1];
4232 }
4233 } else {
4234 filter_param.atype = 0;
4235 filter_param.asel = 0;
4236 }
4237 }
4238
4239 chan = __dma_request_channel(&mask, udma_dma_filter_fn, &filter_param,
4240 ofdma->of_node);
4241 if (!chan) {
4242 dev_err(ud->dev, "get channel fail in %s.\n", __func__);
4243 return ERR_PTR(-EINVAL);
4244 }
4245
4246 return chan;
4247}
4248
4249static struct udma_match_data am654_main_data = {
4250 .type = DMA_TYPE_UDMA,
4251 .psil_base = 0x1000,
4252 .enable_memcpy_support = true,
4253 .statictr_z_mask = GENMASK(11, 0),
4254 .burst_size = {
4255 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4256 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* H Channels */
4257 0, /* No UH Channels */
4258 },
4259};
4260
4261static struct udma_match_data am654_mcu_data = {
4262 .type = DMA_TYPE_UDMA,
4263 .psil_base = 0x6000,
4264 .enable_memcpy_support = false,
4265 .statictr_z_mask = GENMASK(11, 0),
4266 .burst_size = {
4267 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4268 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* H Channels */
4269 0, /* No UH Channels */
4270 },
4271};
4272
4273static struct udma_match_data j721e_main_data = {
4274 .type = DMA_TYPE_UDMA,
4275 .psil_base = 0x1000,
4276 .enable_memcpy_support = true,
4277 .flags = UDMA_FLAGS_J7_CLASS,
4278 .statictr_z_mask = GENMASK(23, 0),
4279 .burst_size = {
4280 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4281 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES, /* H Channels */
4282 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES, /* UH Channels */
4283 },
4284};
4285
4286static struct udma_match_data j721e_mcu_data = {
4287 .type = DMA_TYPE_UDMA,
4288 .psil_base = 0x6000,
4289 .enable_memcpy_support = false, /* MEM_TO_MEM is slow via MCU UDMA */
4290 .flags = UDMA_FLAGS_J7_CLASS,
4291 .statictr_z_mask = GENMASK(23, 0),
4292 .burst_size = {
4293 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4294 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_128_BYTES, /* H Channels */
4295 0, /* No UH Channels */
4296 },
4297};
4298
4299static struct udma_match_data am64_bcdma_data = {
4300 .type = DMA_TYPE_BCDMA,
4301 .psil_base = 0x2000, /* for tchan and rchan, not applicable to bchan */
4302 .enable_memcpy_support = true, /* Supported via bchan */
4303 .flags = UDMA_FLAGS_J7_CLASS,
4304 .statictr_z_mask = GENMASK(23, 0),
4305 .burst_size = {
4306 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4307 0, /* No H Channels */
4308 0, /* No UH Channels */
4309 },
4310};
4311
4312static struct udma_match_data am64_pktdma_data = {
4313 .type = DMA_TYPE_PKTDMA,
4314 .psil_base = 0x1000,
4315 .enable_memcpy_support = false, /* PKTDMA does not support MEM_TO_MEM */
4316 .flags = UDMA_FLAGS_J7_CLASS,
4317 .statictr_z_mask = GENMASK(23, 0),
4318 .burst_size = {
4319 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4320 0, /* No H Channels */
4321 0, /* No UH Channels */
4322 },
4323};
4324
4325static const struct of_device_id udma_of_match[] = {
4326 {
4327 .compatible = "ti,am654-navss-main-udmap",
4328 .data = &am654_main_data,
4329 },
4330 {
4331 .compatible = "ti,am654-navss-mcu-udmap",
4332 .data = &am654_mcu_data,
4333 }, {
4334 .compatible = "ti,j721e-navss-main-udmap",
4335 .data = &j721e_main_data,
4336 }, {
4337 .compatible = "ti,j721e-navss-mcu-udmap",
4338 .data = &j721e_mcu_data,
4339 },
4340 {
4341 .compatible = "ti,am64-dmss-bcdma",
4342 .data = &am64_bcdma_data,
4343 },
4344 {
4345 .compatible = "ti,am64-dmss-pktdma",
4346 .data = &am64_pktdma_data,
4347 },
4348 { /* Sentinel */ },
4349};
4350
4351static struct udma_soc_data am654_soc_data = {
4352 .oes = {
4353 .udma_rchan = 0x200,
4354 },
4355};
4356
4357static struct udma_soc_data j721e_soc_data = {
4358 .oes = {
4359 .udma_rchan = 0x400,
4360 },
4361};
4362
4363static struct udma_soc_data j7200_soc_data = {
4364 .oes = {
4365 .udma_rchan = 0x80,
4366 },
4367};
4368
4369static struct udma_soc_data am64_soc_data = {
4370 .oes = {
4371 .bcdma_bchan_data = 0x2200,
4372 .bcdma_bchan_ring = 0x2400,
4373 .bcdma_tchan_data = 0x2800,
4374 .bcdma_tchan_ring = 0x2a00,
4375 .bcdma_rchan_data = 0x2e00,
4376 .bcdma_rchan_ring = 0x3000,
4377 .pktdma_tchan_flow = 0x1200,
4378 .pktdma_rchan_flow = 0x1600,
4379 },
4380 .bcdma_trigger_event_offset = 0xc400,
4381};
4382
4383static const struct soc_device_attribute k3_soc_devices[] = {
4384 { .family = "AM65X", .data = &am654_soc_data },
4385 { .family = "J721E", .data = &j721e_soc_data },
4386 { .family = "J7200", .data = &j7200_soc_data },
4387 { .family = "AM64X", .data = &am64_soc_data },
4388 { .family = "J721S2", .data = &j721e_soc_data},
4389 { .family = "AM62X", .data = &am64_soc_data },
4390 { /* sentinel */ }
4391};
4392
4393static int udma_get_mmrs(struct platform_device *pdev, struct udma_dev *ud)
4394{
4395 u32 cap2, cap3, cap4;
4396 int i;
4397
4398 ud->mmrs[MMR_GCFG] = devm_platform_ioremap_resource_byname(pdev, mmr_names[MMR_GCFG]);
4399 if (IS_ERR(ud->mmrs[MMR_GCFG]))
4400 return PTR_ERR(ud->mmrs[MMR_GCFG]);
4401
4402 cap2 = udma_read(ud->mmrs[MMR_GCFG], 0x28);
4403 cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
4404
4405 switch (ud->match_data->type) {
4406 case DMA_TYPE_UDMA:
4407 ud->rflow_cnt = UDMA_CAP3_RFLOW_CNT(cap3);
4408 ud->tchan_cnt = UDMA_CAP2_TCHAN_CNT(cap2);
4409 ud->echan_cnt = UDMA_CAP2_ECHAN_CNT(cap2);
4410 ud->rchan_cnt = UDMA_CAP2_RCHAN_CNT(cap2);
4411 break;
4412 case DMA_TYPE_BCDMA:
4413 ud->bchan_cnt = BCDMA_CAP2_BCHAN_CNT(cap2);
4414 ud->tchan_cnt = BCDMA_CAP2_TCHAN_CNT(cap2);
4415 ud->rchan_cnt = BCDMA_CAP2_RCHAN_CNT(cap2);
4416 ud->rflow_cnt = ud->rchan_cnt;
4417 break;
4418 case DMA_TYPE_PKTDMA:
4419 cap4 = udma_read(ud->mmrs[MMR_GCFG], 0x30);
4420 ud->tchan_cnt = UDMA_CAP2_TCHAN_CNT(cap2);
4421 ud->rchan_cnt = UDMA_CAP2_RCHAN_CNT(cap2);
4422 ud->rflow_cnt = UDMA_CAP3_RFLOW_CNT(cap3);
4423 ud->tflow_cnt = PKTDMA_CAP4_TFLOW_CNT(cap4);
4424 break;
4425 default:
4426 return -EINVAL;
4427 }
4428
4429 for (i = 1; i < MMR_LAST; i++) {
4430 if (i == MMR_BCHANRT && ud->bchan_cnt == 0)
4431 continue;
4432 if (i == MMR_TCHANRT && ud->tchan_cnt == 0)
4433 continue;
4434 if (i == MMR_RCHANRT && ud->rchan_cnt == 0)
4435 continue;
4436
4437 ud->mmrs[i] = devm_platform_ioremap_resource_byname(pdev, mmr_names[i]);
4438 if (IS_ERR(ud->mmrs[i]))
4439 return PTR_ERR(ud->mmrs[i]);
4440 }
4441
4442 return 0;
4443}
4444
4445static void udma_mark_resource_ranges(struct udma_dev *ud, unsigned long *map,
4446 struct ti_sci_resource_desc *rm_desc,
4447 char *name)
4448{
4449 bitmap_clear(map, rm_desc->start, rm_desc->num);
4450 bitmap_clear(map, rm_desc->start_sec, rm_desc->num_sec);
4451 dev_dbg(ud->dev, "ti_sci resource range for %s: %d:%d | %d:%d\n", name,
4452 rm_desc->start, rm_desc->num, rm_desc->start_sec,
4453 rm_desc->num_sec);
4454}
4455
4456static const char * const range_names[] = {
4457 [RM_RANGE_BCHAN] = "ti,sci-rm-range-bchan",
4458 [RM_RANGE_TCHAN] = "ti,sci-rm-range-tchan",
4459 [RM_RANGE_RCHAN] = "ti,sci-rm-range-rchan",
4460 [RM_RANGE_RFLOW] = "ti,sci-rm-range-rflow",
4461 [RM_RANGE_TFLOW] = "ti,sci-rm-range-tflow",
4462};
4463
4464static int udma_setup_resources(struct udma_dev *ud)
4465{
4466 int ret, i, j;
4467 struct device *dev = ud->dev;
4468 struct ti_sci_resource *rm_res, irq_res;
4469 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
4470 u32 cap3;
4471
4472 /* Set up the throughput level start indexes */
4473 cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
4474 if (of_device_is_compatible(dev->of_node,
4475 "ti,am654-navss-main-udmap")) {
4476 ud->tchan_tpl.levels = 2;
4477 ud->tchan_tpl.start_idx[0] = 8;
4478 } else if (of_device_is_compatible(dev->of_node,
4479 "ti,am654-navss-mcu-udmap")) {
4480 ud->tchan_tpl.levels = 2;
4481 ud->tchan_tpl.start_idx[0] = 2;
4482 } else if (UDMA_CAP3_UCHAN_CNT(cap3)) {
4483 ud->tchan_tpl.levels = 3;
4484 ud->tchan_tpl.start_idx[1] = UDMA_CAP3_UCHAN_CNT(cap3);
4485 ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
4486 } else if (UDMA_CAP3_HCHAN_CNT(cap3)) {
4487 ud->tchan_tpl.levels = 2;
4488 ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
4489 } else {
4490 ud->tchan_tpl.levels = 1;
4491 }
4492
4493 ud->rchan_tpl.levels = ud->tchan_tpl.levels;
4494 ud->rchan_tpl.start_idx[0] = ud->tchan_tpl.start_idx[0];
4495 ud->rchan_tpl.start_idx[1] = ud->tchan_tpl.start_idx[1];
4496
4497 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
4498 sizeof(unsigned long), GFP_KERNEL);
4499 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
4500 GFP_KERNEL);
4501 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
4502 sizeof(unsigned long), GFP_KERNEL);
4503 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
4504 GFP_KERNEL);
4505 ud->rflow_gp_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rflow_cnt),
4506 sizeof(unsigned long),
4507 GFP_KERNEL);
4508 ud->rflow_gp_map_allocated = devm_kcalloc(dev,
4509 BITS_TO_LONGS(ud->rflow_cnt),
4510 sizeof(unsigned long),
4511 GFP_KERNEL);
4512 ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
4513 sizeof(unsigned long),
4514 GFP_KERNEL);
4515 ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
4516 GFP_KERNEL);
4517
4518 if (!ud->tchan_map || !ud->rchan_map || !ud->rflow_gp_map ||
4519 !ud->rflow_gp_map_allocated || !ud->tchans || !ud->rchans ||
4520 !ud->rflows || !ud->rflow_in_use)
4521 return -ENOMEM;
4522
4523 /*
4524 * RX flows with the same Ids as RX channels are reserved to be used
4525 * as default flows if remote HW can't generate flow_ids. Those
4526 * RX flows can be requested only explicitly by id.
4527 */
4528 bitmap_set(ud->rflow_gp_map_allocated, 0, ud->rchan_cnt);
4529
4530 /* by default no GP rflows are assigned to Linux */
4531 bitmap_set(ud->rflow_gp_map, 0, ud->rflow_cnt);
4532
4533 /* Get resource ranges from tisci */
4534 for (i = 0; i < RM_RANGE_LAST; i++) {
4535 if (i == RM_RANGE_BCHAN || i == RM_RANGE_TFLOW)
4536 continue;
4537
4538 tisci_rm->rm_ranges[i] =
4539 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
4540 tisci_rm->tisci_dev_id,
4541 (char *)range_names[i]);
4542 }
4543
4544 /* tchan ranges */
4545 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
4546 if (IS_ERR(rm_res)) {
4547 bitmap_zero(ud->tchan_map, ud->tchan_cnt);
4548 irq_res.sets = 1;
4549 } else {
4550 bitmap_fill(ud->tchan_map, ud->tchan_cnt);
4551 for (i = 0; i < rm_res->sets; i++)
4552 udma_mark_resource_ranges(ud, ud->tchan_map,
4553 &rm_res->desc[i], "tchan");
4554 irq_res.sets = rm_res->sets;
4555 }
4556
4557 /* rchan and matching default flow ranges */
4558 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
4559 if (IS_ERR(rm_res)) {
4560 bitmap_zero(ud->rchan_map, ud->rchan_cnt);
4561 irq_res.sets++;
4562 } else {
4563 bitmap_fill(ud->rchan_map, ud->rchan_cnt);
4564 for (i = 0; i < rm_res->sets; i++)
4565 udma_mark_resource_ranges(ud, ud->rchan_map,
4566 &rm_res->desc[i], "rchan");
4567 irq_res.sets += rm_res->sets;
4568 }
4569
4570 irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
4571 if (!irq_res.desc)
4572 return -ENOMEM;
4573 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
4574 if (IS_ERR(rm_res)) {
4575 irq_res.desc[0].start = 0;
4576 irq_res.desc[0].num = ud->tchan_cnt;
4577 i = 1;
4578 } else {
4579 for (i = 0; i < rm_res->sets; i++) {
4580 irq_res.desc[i].start = rm_res->desc[i].start;
4581 irq_res.desc[i].num = rm_res->desc[i].num;
4582 irq_res.desc[i].start_sec = rm_res->desc[i].start_sec;
4583 irq_res.desc[i].num_sec = rm_res->desc[i].num_sec;
4584 }
4585 }
4586 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
4587 if (IS_ERR(rm_res)) {
4588 irq_res.desc[i].start = 0;
4589 irq_res.desc[i].num = ud->rchan_cnt;
4590 } else {
4591 for (j = 0; j < rm_res->sets; j++, i++) {
4592 if (rm_res->desc[j].num) {
4593 irq_res.desc[i].start = rm_res->desc[j].start +
4594 ud->soc_data->oes.udma_rchan;
4595 irq_res.desc[i].num = rm_res->desc[j].num;
4596 }
4597 if (rm_res->desc[j].num_sec) {
4598 irq_res.desc[i].start_sec = rm_res->desc[j].start_sec +
4599 ud->soc_data->oes.udma_rchan;
4600 irq_res.desc[i].num_sec = rm_res->desc[j].num_sec;
4601 }
4602 }
4603 }
4604 ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
4605 kfree(irq_res.desc);
4606 if (ret) {
4607 dev_err(ud->dev, "Failed to allocate MSI interrupts\n");
4608 return ret;
4609 }
4610
4611 /* GP rflow ranges */
4612 rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
4613 if (IS_ERR(rm_res)) {
4614 /* all gp flows are assigned exclusively to Linux */
4615 bitmap_clear(ud->rflow_gp_map, ud->rchan_cnt,
4616 ud->rflow_cnt - ud->rchan_cnt);
4617 } else {
4618 for (i = 0; i < rm_res->sets; i++)
4619 udma_mark_resource_ranges(ud, ud->rflow_gp_map,
4620 &rm_res->desc[i], "gp-rflow");
4621 }
4622
4623 return 0;
4624}
4625
4626static int bcdma_setup_resources(struct udma_dev *ud)
4627{
4628 int ret, i, j;
4629 struct device *dev = ud->dev;
4630 struct ti_sci_resource *rm_res, irq_res;
4631 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
4632 const struct udma_oes_offsets *oes = &ud->soc_data->oes;
4633 u32 cap;
4634
4635 /* Set up the throughput level start indexes */
4636 cap = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
4637 if (BCDMA_CAP3_UBCHAN_CNT(cap)) {
4638 ud->bchan_tpl.levels = 3;
4639 ud->bchan_tpl.start_idx[1] = BCDMA_CAP3_UBCHAN_CNT(cap);
4640 ud->bchan_tpl.start_idx[0] = BCDMA_CAP3_HBCHAN_CNT(cap);
4641 } else if (BCDMA_CAP3_HBCHAN_CNT(cap)) {
4642 ud->bchan_tpl.levels = 2;
4643 ud->bchan_tpl.start_idx[0] = BCDMA_CAP3_HBCHAN_CNT(cap);
4644 } else {
4645 ud->bchan_tpl.levels = 1;
4646 }
4647
4648 cap = udma_read(ud->mmrs[MMR_GCFG], 0x30);
4649 if (BCDMA_CAP4_URCHAN_CNT(cap)) {
4650 ud->rchan_tpl.levels = 3;
4651 ud->rchan_tpl.start_idx[1] = BCDMA_CAP4_URCHAN_CNT(cap);
4652 ud->rchan_tpl.start_idx[0] = BCDMA_CAP4_HRCHAN_CNT(cap);
4653 } else if (BCDMA_CAP4_HRCHAN_CNT(cap)) {
4654 ud->rchan_tpl.levels = 2;
4655 ud->rchan_tpl.start_idx[0] = BCDMA_CAP4_HRCHAN_CNT(cap);
4656 } else {
4657 ud->rchan_tpl.levels = 1;
4658 }
4659
4660 if (BCDMA_CAP4_UTCHAN_CNT(cap)) {
4661 ud->tchan_tpl.levels = 3;
4662 ud->tchan_tpl.start_idx[1] = BCDMA_CAP4_UTCHAN_CNT(cap);
4663 ud->tchan_tpl.start_idx[0] = BCDMA_CAP4_HTCHAN_CNT(cap);
4664 } else if (BCDMA_CAP4_HTCHAN_CNT(cap)) {
4665 ud->tchan_tpl.levels = 2;
4666 ud->tchan_tpl.start_idx[0] = BCDMA_CAP4_HTCHAN_CNT(cap);
4667 } else {
4668 ud->tchan_tpl.levels = 1;
4669 }
4670
4671 ud->bchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->bchan_cnt),
4672 sizeof(unsigned long), GFP_KERNEL);
4673 ud->bchans = devm_kcalloc(dev, ud->bchan_cnt, sizeof(*ud->bchans),
4674 GFP_KERNEL);
4675 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
4676 sizeof(unsigned long), GFP_KERNEL);
4677 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
4678 GFP_KERNEL);
4679 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
4680 sizeof(unsigned long), GFP_KERNEL);
4681 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
4682 GFP_KERNEL);
4683 /* BCDMA do not really have flows, but the driver expect it */
4684 ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rchan_cnt),
4685 sizeof(unsigned long),
4686 GFP_KERNEL);
4687 ud->rflows = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rflows),
4688 GFP_KERNEL);
4689
4690 if (!ud->bchan_map || !ud->tchan_map || !ud->rchan_map ||
4691 !ud->rflow_in_use || !ud->bchans || !ud->tchans || !ud->rchans ||
4692 !ud->rflows)
4693 return -ENOMEM;
4694
4695 /* Get resource ranges from tisci */
4696 for (i = 0; i < RM_RANGE_LAST; i++) {
4697 if (i == RM_RANGE_RFLOW || i == RM_RANGE_TFLOW)
4698 continue;
4699 if (i == RM_RANGE_BCHAN && ud->bchan_cnt == 0)
4700 continue;
4701 if (i == RM_RANGE_TCHAN && ud->tchan_cnt == 0)
4702 continue;
4703 if (i == RM_RANGE_RCHAN && ud->rchan_cnt == 0)
4704 continue;
4705
4706 tisci_rm->rm_ranges[i] =
4707 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
4708 tisci_rm->tisci_dev_id,
4709 (char *)range_names[i]);
4710 }
4711
4712 irq_res.sets = 0;
4713
4714 /* bchan ranges */
4715 if (ud->bchan_cnt) {
4716 rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN];
4717 if (IS_ERR(rm_res)) {
4718 bitmap_zero(ud->bchan_map, ud->bchan_cnt);
4719 irq_res.sets++;
4720 } else {
4721 bitmap_fill(ud->bchan_map, ud->bchan_cnt);
4722 for (i = 0; i < rm_res->sets; i++)
4723 udma_mark_resource_ranges(ud, ud->bchan_map,
4724 &rm_res->desc[i],
4725 "bchan");
4726 irq_res.sets += rm_res->sets;
4727 }
4728 }
4729
4730 /* tchan ranges */
4731 if (ud->tchan_cnt) {
4732 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
4733 if (IS_ERR(rm_res)) {
4734 bitmap_zero(ud->tchan_map, ud->tchan_cnt);
4735 irq_res.sets += 2;
4736 } else {
4737 bitmap_fill(ud->tchan_map, ud->tchan_cnt);
4738 for (i = 0; i < rm_res->sets; i++)
4739 udma_mark_resource_ranges(ud, ud->tchan_map,
4740 &rm_res->desc[i],
4741 "tchan");
4742 irq_res.sets += rm_res->sets * 2;
4743 }
4744 }
4745
4746 /* rchan ranges */
4747 if (ud->rchan_cnt) {
4748 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
4749 if (IS_ERR(rm_res)) {
4750 bitmap_zero(ud->rchan_map, ud->rchan_cnt);
4751 irq_res.sets += 2;
4752 } else {
4753 bitmap_fill(ud->rchan_map, ud->rchan_cnt);
4754 for (i = 0; i < rm_res->sets; i++)
4755 udma_mark_resource_ranges(ud, ud->rchan_map,
4756 &rm_res->desc[i],
4757 "rchan");
4758 irq_res.sets += rm_res->sets * 2;
4759 }
4760 }
4761
4762 irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
4763 if (!irq_res.desc)
4764 return -ENOMEM;
4765 if (ud->bchan_cnt) {
4766 rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN];
4767 if (IS_ERR(rm_res)) {
4768 irq_res.desc[0].start = oes->bcdma_bchan_ring;
4769 irq_res.desc[0].num = ud->bchan_cnt;
4770 i = 1;
4771 } else {
4772 for (i = 0; i < rm_res->sets; i++) {
4773 irq_res.desc[i].start = rm_res->desc[i].start +
4774 oes->bcdma_bchan_ring;
4775 irq_res.desc[i].num = rm_res->desc[i].num;
4776 }
4777 }
4778 }
4779 if (ud->tchan_cnt) {
4780 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
4781 if (IS_ERR(rm_res)) {
4782 irq_res.desc[i].start = oes->bcdma_tchan_data;
4783 irq_res.desc[i].num = ud->tchan_cnt;
4784 irq_res.desc[i + 1].start = oes->bcdma_tchan_ring;
4785 irq_res.desc[i + 1].num = ud->tchan_cnt;
4786 i += 2;
4787 } else {
4788 for (j = 0; j < rm_res->sets; j++, i += 2) {
4789 irq_res.desc[i].start = rm_res->desc[j].start +
4790 oes->bcdma_tchan_data;
4791 irq_res.desc[i].num = rm_res->desc[j].num;
4792
4793 irq_res.desc[i + 1].start = rm_res->desc[j].start +
4794 oes->bcdma_tchan_ring;
4795 irq_res.desc[i + 1].num = rm_res->desc[j].num;
4796 }
4797 }
4798 }
4799 if (ud->rchan_cnt) {
4800 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
4801 if (IS_ERR(rm_res)) {
4802 irq_res.desc[i].start = oes->bcdma_rchan_data;
4803 irq_res.desc[i].num = ud->rchan_cnt;
4804 irq_res.desc[i + 1].start = oes->bcdma_rchan_ring;
4805 irq_res.desc[i + 1].num = ud->rchan_cnt;
4806 i += 2;
4807 } else {
4808 for (j = 0; j < rm_res->sets; j++, i += 2) {
4809 irq_res.desc[i].start = rm_res->desc[j].start +
4810 oes->bcdma_rchan_data;
4811 irq_res.desc[i].num = rm_res->desc[j].num;
4812
4813 irq_res.desc[i + 1].start = rm_res->desc[j].start +
4814 oes->bcdma_rchan_ring;
4815 irq_res.desc[i + 1].num = rm_res->desc[j].num;
4816 }
4817 }
4818 }
4819
4820 ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
4821 kfree(irq_res.desc);
4822 if (ret) {
4823 dev_err(ud->dev, "Failed to allocate MSI interrupts\n");
4824 return ret;
4825 }
4826
4827 return 0;
4828}
4829
4830static int pktdma_setup_resources(struct udma_dev *ud)
4831{
4832 int ret, i, j;
4833 struct device *dev = ud->dev;
4834 struct ti_sci_resource *rm_res, irq_res;
4835 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
4836 const struct udma_oes_offsets *oes = &ud->soc_data->oes;
4837 u32 cap3;
4838
4839 /* Set up the throughput level start indexes */
4840 cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
4841 if (UDMA_CAP3_UCHAN_CNT(cap3)) {
4842 ud->tchan_tpl.levels = 3;
4843 ud->tchan_tpl.start_idx[1] = UDMA_CAP3_UCHAN_CNT(cap3);
4844 ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
4845 } else if (UDMA_CAP3_HCHAN_CNT(cap3)) {
4846 ud->tchan_tpl.levels = 2;
4847 ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
4848 } else {
4849 ud->tchan_tpl.levels = 1;
4850 }
4851
4852 ud->rchan_tpl.levels = ud->tchan_tpl.levels;
4853 ud->rchan_tpl.start_idx[0] = ud->tchan_tpl.start_idx[0];
4854 ud->rchan_tpl.start_idx[1] = ud->tchan_tpl.start_idx[1];
4855
4856 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
4857 sizeof(unsigned long), GFP_KERNEL);
4858 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
4859 GFP_KERNEL);
4860 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
4861 sizeof(unsigned long), GFP_KERNEL);
4862 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
4863 GFP_KERNEL);
4864 ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
4865 sizeof(unsigned long),
4866 GFP_KERNEL);
4867 ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
4868 GFP_KERNEL);
4869 ud->tflow_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tflow_cnt),
4870 sizeof(unsigned long), GFP_KERNEL);
4871
4872 if (!ud->tchan_map || !ud->rchan_map || !ud->tflow_map || !ud->tchans ||
4873 !ud->rchans || !ud->rflows || !ud->rflow_in_use)
4874 return -ENOMEM;
4875
4876 /* Get resource ranges from tisci */
4877 for (i = 0; i < RM_RANGE_LAST; i++) {
4878 if (i == RM_RANGE_BCHAN)
4879 continue;
4880
4881 tisci_rm->rm_ranges[i] =
4882 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
4883 tisci_rm->tisci_dev_id,
4884 (char *)range_names[i]);
4885 }
4886
4887 /* tchan ranges */
4888 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
4889 if (IS_ERR(rm_res)) {
4890 bitmap_zero(ud->tchan_map, ud->tchan_cnt);
4891 } else {
4892 bitmap_fill(ud->tchan_map, ud->tchan_cnt);
4893 for (i = 0; i < rm_res->sets; i++)
4894 udma_mark_resource_ranges(ud, ud->tchan_map,
4895 &rm_res->desc[i], "tchan");
4896 }
4897
4898 /* rchan ranges */
4899 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
4900 if (IS_ERR(rm_res)) {
4901 bitmap_zero(ud->rchan_map, ud->rchan_cnt);
4902 } else {
4903 bitmap_fill(ud->rchan_map, ud->rchan_cnt);
4904 for (i = 0; i < rm_res->sets; i++)
4905 udma_mark_resource_ranges(ud, ud->rchan_map,
4906 &rm_res->desc[i], "rchan");
4907 }
4908
4909 /* rflow ranges */
4910 rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
4911 if (IS_ERR(rm_res)) {
4912 /* all rflows are assigned exclusively to Linux */
4913 bitmap_zero(ud->rflow_in_use, ud->rflow_cnt);
4914 irq_res.sets = 1;
4915 } else {
4916 bitmap_fill(ud->rflow_in_use, ud->rflow_cnt);
4917 for (i = 0; i < rm_res->sets; i++)
4918 udma_mark_resource_ranges(ud, ud->rflow_in_use,
4919 &rm_res->desc[i], "rflow");
4920 irq_res.sets = rm_res->sets;
4921 }
4922
4923 /* tflow ranges */
4924 rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW];
4925 if (IS_ERR(rm_res)) {
4926 /* all tflows are assigned exclusively to Linux */
4927 bitmap_zero(ud->tflow_map, ud->tflow_cnt);
4928 irq_res.sets++;
4929 } else {
4930 bitmap_fill(ud->tflow_map, ud->tflow_cnt);
4931 for (i = 0; i < rm_res->sets; i++)
4932 udma_mark_resource_ranges(ud, ud->tflow_map,
4933 &rm_res->desc[i], "tflow");
4934 irq_res.sets += rm_res->sets;
4935 }
4936
4937 irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
4938 if (!irq_res.desc)
4939 return -ENOMEM;
4940 rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW];
4941 if (IS_ERR(rm_res)) {
4942 irq_res.desc[0].start = oes->pktdma_tchan_flow;
4943 irq_res.desc[0].num = ud->tflow_cnt;
4944 i = 1;
4945 } else {
4946 for (i = 0; i < rm_res->sets; i++) {
4947 irq_res.desc[i].start = rm_res->desc[i].start +
4948 oes->pktdma_tchan_flow;
4949 irq_res.desc[i].num = rm_res->desc[i].num;
4950 }
4951 }
4952 rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
4953 if (IS_ERR(rm_res)) {
4954 irq_res.desc[i].start = oes->pktdma_rchan_flow;
4955 irq_res.desc[i].num = ud->rflow_cnt;
4956 } else {
4957 for (j = 0; j < rm_res->sets; j++, i++) {
4958 irq_res.desc[i].start = rm_res->desc[j].start +
4959 oes->pktdma_rchan_flow;
4960 irq_res.desc[i].num = rm_res->desc[j].num;
4961 }
4962 }
4963 ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
4964 kfree(irq_res.desc);
4965 if (ret) {
4966 dev_err(ud->dev, "Failed to allocate MSI interrupts\n");
4967 return ret;
4968 }
4969
4970 return 0;
4971}
4972
4973static int setup_resources(struct udma_dev *ud)
4974{
4975 struct device *dev = ud->dev;
4976 int ch_count, ret;
4977
4978 switch (ud->match_data->type) {
4979 case DMA_TYPE_UDMA:
4980 ret = udma_setup_resources(ud);
4981 break;
4982 case DMA_TYPE_BCDMA:
4983 ret = bcdma_setup_resources(ud);
4984 break;
4985 case DMA_TYPE_PKTDMA:
4986 ret = pktdma_setup_resources(ud);
4987 break;
4988 default:
4989 return -EINVAL;
4990 }
4991
4992 if (ret)
4993 return ret;
4994
4995 ch_count = ud->bchan_cnt + ud->tchan_cnt + ud->rchan_cnt;
4996 if (ud->bchan_cnt)
4997 ch_count -= bitmap_weight(ud->bchan_map, ud->bchan_cnt);
4998 ch_count -= bitmap_weight(ud->tchan_map, ud->tchan_cnt);
4999 ch_count -= bitmap_weight(ud->rchan_map, ud->rchan_cnt);
5000 if (!ch_count)
5001 return -ENODEV;
5002
5003 ud->channels = devm_kcalloc(dev, ch_count, sizeof(*ud->channels),
5004 GFP_KERNEL);
5005 if (!ud->channels)
5006 return -ENOMEM;
5007
5008 switch (ud->match_data->type) {
5009 case DMA_TYPE_UDMA:
5010 dev_info(dev,
5011 "Channels: %d (tchan: %u, rchan: %u, gp-rflow: %u)\n",
5012 ch_count,
5013 ud->tchan_cnt - bitmap_weight(ud->tchan_map,
5014 ud->tchan_cnt),
5015 ud->rchan_cnt - bitmap_weight(ud->rchan_map,
5016 ud->rchan_cnt),
5017 ud->rflow_cnt - bitmap_weight(ud->rflow_gp_map,
5018 ud->rflow_cnt));
5019 break;
5020 case DMA_TYPE_BCDMA:
5021 dev_info(dev,
5022 "Channels: %d (bchan: %u, tchan: %u, rchan: %u)\n",
5023 ch_count,
5024 ud->bchan_cnt - bitmap_weight(ud->bchan_map,
5025 ud->bchan_cnt),
5026 ud->tchan_cnt - bitmap_weight(ud->tchan_map,
5027 ud->tchan_cnt),
5028 ud->rchan_cnt - bitmap_weight(ud->rchan_map,
5029 ud->rchan_cnt));
5030 break;
5031 case DMA_TYPE_PKTDMA:
5032 dev_info(dev,
5033 "Channels: %d (tchan: %u, rchan: %u)\n",
5034 ch_count,
5035 ud->tchan_cnt - bitmap_weight(ud->tchan_map,
5036 ud->tchan_cnt),
5037 ud->rchan_cnt - bitmap_weight(ud->rchan_map,
5038 ud->rchan_cnt));
5039 break;
5040 default:
5041 break;
5042 }
5043
5044 return ch_count;
5045}
5046
5047static int udma_setup_rx_flush(struct udma_dev *ud)
5048{
5049 struct udma_rx_flush *rx_flush = &ud->rx_flush;
5050 struct cppi5_desc_hdr_t *tr_desc;
5051 struct cppi5_tr_type1_t *tr_req;
5052 struct cppi5_host_desc_t *desc;
5053 struct device *dev = ud->dev;
5054 struct udma_hwdesc *hwdesc;
5055 size_t tr_size;
5056
5057 /* Allocate 1K buffer for discarded data on RX channel teardown */
5058 rx_flush->buffer_size = SZ_1K;
5059 rx_flush->buffer_vaddr = devm_kzalloc(dev, rx_flush->buffer_size,
5060 GFP_KERNEL);
5061 if (!rx_flush->buffer_vaddr)
5062 return -ENOMEM;
5063
5064 rx_flush->buffer_paddr = dma_map_single(dev, rx_flush->buffer_vaddr,
5065 rx_flush->buffer_size,
5066 DMA_TO_DEVICE);
5067 if (dma_mapping_error(dev, rx_flush->buffer_paddr))
5068 return -ENOMEM;
5069
5070 /* Set up descriptor to be used for TR mode */
5071 hwdesc = &rx_flush->hwdescs[0];
5072 tr_size = sizeof(struct cppi5_tr_type1_t);
5073 hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size, 1);
5074 hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size,
5075 ud->desc_align);
5076
5077 hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size,
5078 GFP_KERNEL);
5079 if (!hwdesc->cppi5_desc_vaddr)
5080 return -ENOMEM;
5081
5082 hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr,
5083 hwdesc->cppi5_desc_size,
5084 DMA_TO_DEVICE);
5085 if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr))
5086 return -ENOMEM;
5087
5088 /* Start of the TR req records */
5089 hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size;
5090 /* Start address of the TR response array */
5091 hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size;
5092
5093 tr_desc = hwdesc->cppi5_desc_vaddr;
5094 cppi5_trdesc_init(tr_desc, 1, tr_size, 0, 0);
5095 cppi5_desc_set_pktids(tr_desc, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT);
5096 cppi5_desc_set_retpolicy(tr_desc, 0, 0);
5097
5098 tr_req = hwdesc->tr_req_base;
5099 cppi5_tr_init(&tr_req->flags, CPPI5_TR_TYPE1, false, false,
5100 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
5101 cppi5_tr_csf_set(&tr_req->flags, CPPI5_TR_CSF_SUPR_EVT);
5102
5103 tr_req->addr = rx_flush->buffer_paddr;
5104 tr_req->icnt0 = rx_flush->buffer_size;
5105 tr_req->icnt1 = 1;
5106
5107 dma_sync_single_for_device(dev, hwdesc->cppi5_desc_paddr,
5108 hwdesc->cppi5_desc_size, DMA_TO_DEVICE);
5109
5110 /* Set up descriptor to be used for packet mode */
5111 hwdesc = &rx_flush->hwdescs[1];
5112 hwdesc->cppi5_desc_size = ALIGN(sizeof(struct cppi5_host_desc_t) +
5113 CPPI5_INFO0_HDESC_EPIB_SIZE +
5114 CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE,
5115 ud->desc_align);
5116
5117 hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size,
5118 GFP_KERNEL);
5119 if (!hwdesc->cppi5_desc_vaddr)
5120 return -ENOMEM;
5121
5122 hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr,
5123 hwdesc->cppi5_desc_size,
5124 DMA_TO_DEVICE);
5125 if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr))
5126 return -ENOMEM;
5127
5128 desc = hwdesc->cppi5_desc_vaddr;
5129 cppi5_hdesc_init(desc, 0, 0);
5130 cppi5_desc_set_pktids(&desc->hdr, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT);
5131 cppi5_desc_set_retpolicy(&desc->hdr, 0, 0);
5132
5133 cppi5_hdesc_attach_buf(desc,
5134 rx_flush->buffer_paddr, rx_flush->buffer_size,
5135 rx_flush->buffer_paddr, rx_flush->buffer_size);
5136
5137 dma_sync_single_for_device(dev, hwdesc->cppi5_desc_paddr,
5138 hwdesc->cppi5_desc_size, DMA_TO_DEVICE);
5139 return 0;
5140}
5141
5142#ifdef CONFIG_DEBUG_FS
5143static void udma_dbg_summary_show_chan(struct seq_file *s,
5144 struct dma_chan *chan)
5145{
5146 struct udma_chan *uc = to_udma_chan(chan);
5147 struct udma_chan_config *ucc = &uc->config;
5148
5149 seq_printf(s, " %-13s| %s", dma_chan_name(chan),
5150 chan->dbg_client_name ?: "in-use");
5151 if (ucc->tr_trigger_type)
5152 seq_puts(s, " (triggered, ");
5153 else
5154 seq_printf(s, " (%s, ",
5155 dmaengine_get_direction_text(uc->config.dir));
5156
5157 switch (uc->config.dir) {
5158 case DMA_MEM_TO_MEM:
5159 if (uc->ud->match_data->type == DMA_TYPE_BCDMA) {
5160 seq_printf(s, "bchan%d)\n", uc->bchan->id);
5161 return;
5162 }
5163
5164 seq_printf(s, "chan%d pair [0x%04x -> 0x%04x], ", uc->tchan->id,
5165 ucc->src_thread, ucc->dst_thread);
5166 break;
5167 case DMA_DEV_TO_MEM:
5168 seq_printf(s, "rchan%d [0x%04x -> 0x%04x], ", uc->rchan->id,
5169 ucc->src_thread, ucc->dst_thread);
5170 if (uc->ud->match_data->type == DMA_TYPE_PKTDMA)
5171 seq_printf(s, "rflow%d, ", uc->rflow->id);
5172 break;
5173 case DMA_MEM_TO_DEV:
5174 seq_printf(s, "tchan%d [0x%04x -> 0x%04x], ", uc->tchan->id,
5175 ucc->src_thread, ucc->dst_thread);
5176 if (uc->ud->match_data->type == DMA_TYPE_PKTDMA)
5177 seq_printf(s, "tflow%d, ", uc->tchan->tflow_id);
5178 break;
5179 default:
5180 seq_printf(s, ")\n");
5181 return;
5182 }
5183
5184 if (ucc->ep_type == PSIL_EP_NATIVE) {
5185 seq_printf(s, "PSI-L Native");
5186 if (ucc->metadata_size) {
5187 seq_printf(s, "[%s", ucc->needs_epib ? " EPIB" : "");
5188 if (ucc->psd_size)
5189 seq_printf(s, " PSDsize:%u", ucc->psd_size);
5190 seq_printf(s, " ]");
5191 }
5192 } else {
5193 seq_printf(s, "PDMA");
5194 if (ucc->enable_acc32 || ucc->enable_burst)
5195 seq_printf(s, "[%s%s ]",
5196 ucc->enable_acc32 ? " ACC32" : "",
5197 ucc->enable_burst ? " BURST" : "");
5198 }
5199
5200 seq_printf(s, ", %s)\n", ucc->pkt_mode ? "Packet mode" : "TR mode");
5201}
5202
5203static void udma_dbg_summary_show(struct seq_file *s,
5204 struct dma_device *dma_dev)
5205{
5206 struct dma_chan *chan;
5207
5208 list_for_each_entry(chan, &dma_dev->channels, device_node) {
5209 if (chan->client_count)
5210 udma_dbg_summary_show_chan(s, chan);
5211 }
5212}
5213#endif /* CONFIG_DEBUG_FS */
5214
5215static enum dmaengine_alignment udma_get_copy_align(struct udma_dev *ud)
5216{
5217 const struct udma_match_data *match_data = ud->match_data;
5218 u8 tpl;
5219
5220 if (!match_data->enable_memcpy_support)
5221 return DMAENGINE_ALIGN_8_BYTES;
5222
5223 /* Get the highest TPL level the device supports for memcpy */
5224 if (ud->bchan_cnt)
5225 tpl = udma_get_chan_tpl_index(&ud->bchan_tpl, 0);
5226 else if (ud->tchan_cnt)
5227 tpl = udma_get_chan_tpl_index(&ud->tchan_tpl, 0);
5228 else
5229 return DMAENGINE_ALIGN_8_BYTES;
5230
5231 switch (match_data->burst_size[tpl]) {
5232 case TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES:
5233 return DMAENGINE_ALIGN_256_BYTES;
5234 case TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_128_BYTES:
5235 return DMAENGINE_ALIGN_128_BYTES;
5236 case TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES:
5237 fallthrough;
5238 default:
5239 return DMAENGINE_ALIGN_64_BYTES;
5240 }
5241}
5242
5243#define TI_UDMAC_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
5244 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
5245 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
5246 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
5247 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
5248
5249static int udma_probe(struct platform_device *pdev)
5250{
5251 struct device_node *navss_node = pdev->dev.parent->of_node;
5252 const struct soc_device_attribute *soc;
5253 struct device *dev = &pdev->dev;
5254 struct udma_dev *ud;
5255 const struct of_device_id *match;
5256 int i, ret;
5257 int ch_count;
5258
5259 ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(48));
5260 if (ret)
5261 dev_err(dev, "failed to set dma mask stuff\n");
5262
5263 ud = devm_kzalloc(dev, sizeof(*ud), GFP_KERNEL);
5264 if (!ud)
5265 return -ENOMEM;
5266
5267 match = of_match_node(udma_of_match, dev->of_node);
5268 if (!match) {
5269 dev_err(dev, "No compatible match found\n");
5270 return -ENODEV;
5271 }
5272 ud->match_data = match->data;
5273
5274 soc = soc_device_match(k3_soc_devices);
5275 if (!soc) {
5276 dev_err(dev, "No compatible SoC found\n");
5277 return -ENODEV;
5278 }
5279 ud->soc_data = soc->data;
5280
5281 ret = udma_get_mmrs(pdev, ud);
5282 if (ret)
5283 return ret;
5284
5285 ud->tisci_rm.tisci = ti_sci_get_by_phandle(dev->of_node, "ti,sci");
5286 if (IS_ERR(ud->tisci_rm.tisci))
5287 return PTR_ERR(ud->tisci_rm.tisci);
5288
5289 ret = of_property_read_u32(dev->of_node, "ti,sci-dev-id",
5290 &ud->tisci_rm.tisci_dev_id);
5291 if (ret) {
5292 dev_err(dev, "ti,sci-dev-id read failure %d\n", ret);
5293 return ret;
5294 }
5295 pdev->id = ud->tisci_rm.tisci_dev_id;
5296
5297 ret = of_property_read_u32(navss_node, "ti,sci-dev-id",
5298 &ud->tisci_rm.tisci_navss_dev_id);
5299 if (ret) {
5300 dev_err(dev, "NAVSS ti,sci-dev-id read failure %d\n", ret);
5301 return ret;
5302 }
5303
5304 if (ud->match_data->type == DMA_TYPE_UDMA) {
5305 ret = of_property_read_u32(dev->of_node, "ti,udma-atype",
5306 &ud->atype);
5307 if (!ret && ud->atype > 2) {
5308 dev_err(dev, "Invalid atype: %u\n", ud->atype);
5309 return -EINVAL;
5310 }
5311 } else {
5312 ret = of_property_read_u32(dev->of_node, "ti,asel",
5313 &ud->asel);
5314 if (!ret && ud->asel > 15) {
5315 dev_err(dev, "Invalid asel: %u\n", ud->asel);
5316 return -EINVAL;
5317 }
5318 }
5319
5320 ud->tisci_rm.tisci_udmap_ops = &ud->tisci_rm.tisci->ops.rm_udmap_ops;
5321 ud->tisci_rm.tisci_psil_ops = &ud->tisci_rm.tisci->ops.rm_psil_ops;
5322
5323 if (ud->match_data->type == DMA_TYPE_UDMA) {
5324 ud->ringacc = of_k3_ringacc_get_by_phandle(dev->of_node, "ti,ringacc");
5325 } else {
5326 struct k3_ringacc_init_data ring_init_data;
5327
5328 ring_init_data.tisci = ud->tisci_rm.tisci;
5329 ring_init_data.tisci_dev_id = ud->tisci_rm.tisci_dev_id;
5330 if (ud->match_data->type == DMA_TYPE_BCDMA) {
5331 ring_init_data.num_rings = ud->bchan_cnt +
5332 ud->tchan_cnt +
5333 ud->rchan_cnt;
5334 } else {
5335 ring_init_data.num_rings = ud->rflow_cnt +
5336 ud->tflow_cnt;
5337 }
5338
5339 ud->ringacc = k3_ringacc_dmarings_init(pdev, &ring_init_data);
5340 }
5341
5342 if (IS_ERR(ud->ringacc))
5343 return PTR_ERR(ud->ringacc);
5344
5345 dev->msi.domain = of_msi_get_domain(dev, dev->of_node,
5346 DOMAIN_BUS_TI_SCI_INTA_MSI);
5347 if (!dev->msi.domain) {
5348 dev_err(dev, "Failed to get MSI domain\n");
5349 return -EPROBE_DEFER;
5350 }
5351
5352 dma_cap_set(DMA_SLAVE, ud->ddev.cap_mask);
5353 /* cyclic operation is not supported via PKTDMA */
5354 if (ud->match_data->type != DMA_TYPE_PKTDMA) {
5355 dma_cap_set(DMA_CYCLIC, ud->ddev.cap_mask);
5356 ud->ddev.device_prep_dma_cyclic = udma_prep_dma_cyclic;
5357 }
5358
5359 ud->ddev.device_config = udma_slave_config;
5360 ud->ddev.device_prep_slave_sg = udma_prep_slave_sg;
5361 ud->ddev.device_issue_pending = udma_issue_pending;
5362 ud->ddev.device_tx_status = udma_tx_status;
5363 ud->ddev.device_pause = udma_pause;
5364 ud->ddev.device_resume = udma_resume;
5365 ud->ddev.device_terminate_all = udma_terminate_all;
5366 ud->ddev.device_synchronize = udma_synchronize;
5367#ifdef CONFIG_DEBUG_FS
5368 ud->ddev.dbg_summary_show = udma_dbg_summary_show;
5369#endif
5370
5371 switch (ud->match_data->type) {
5372 case DMA_TYPE_UDMA:
5373 ud->ddev.device_alloc_chan_resources =
5374 udma_alloc_chan_resources;
5375 break;
5376 case DMA_TYPE_BCDMA:
5377 ud->ddev.device_alloc_chan_resources =
5378 bcdma_alloc_chan_resources;
5379 ud->ddev.device_router_config = bcdma_router_config;
5380 break;
5381 case DMA_TYPE_PKTDMA:
5382 ud->ddev.device_alloc_chan_resources =
5383 pktdma_alloc_chan_resources;
5384 break;
5385 default:
5386 return -EINVAL;
5387 }
5388 ud->ddev.device_free_chan_resources = udma_free_chan_resources;
5389
5390 ud->ddev.src_addr_widths = TI_UDMAC_BUSWIDTHS;
5391 ud->ddev.dst_addr_widths = TI_UDMAC_BUSWIDTHS;
5392 ud->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
5393 ud->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
5394 ud->ddev.desc_metadata_modes = DESC_METADATA_CLIENT |
5395 DESC_METADATA_ENGINE;
5396 if (ud->match_data->enable_memcpy_support &&
5397 !(ud->match_data->type == DMA_TYPE_BCDMA && ud->bchan_cnt == 0)) {
5398 dma_cap_set(DMA_MEMCPY, ud->ddev.cap_mask);
5399 ud->ddev.device_prep_dma_memcpy = udma_prep_dma_memcpy;
5400 ud->ddev.directions |= BIT(DMA_MEM_TO_MEM);
5401 }
5402
5403 ud->ddev.dev = dev;
5404 ud->dev = dev;
5405 ud->psil_base = ud->match_data->psil_base;
5406
5407 INIT_LIST_HEAD(&ud->ddev.channels);
5408 INIT_LIST_HEAD(&ud->desc_to_purge);
5409
5410 ch_count = setup_resources(ud);
5411 if (ch_count <= 0)
5412 return ch_count;
5413
5414 spin_lock_init(&ud->lock);
5415 INIT_WORK(&ud->purge_work, udma_purge_desc_work);
5416
5417 ud->desc_align = 64;
5418 if (ud->desc_align < dma_get_cache_alignment())
5419 ud->desc_align = dma_get_cache_alignment();
5420
5421 ret = udma_setup_rx_flush(ud);
5422 if (ret)
5423 return ret;
5424
5425 for (i = 0; i < ud->bchan_cnt; i++) {
5426 struct udma_bchan *bchan = &ud->bchans[i];
5427
5428 bchan->id = i;
5429 bchan->reg_rt = ud->mmrs[MMR_BCHANRT] + i * 0x1000;
5430 }
5431
5432 for (i = 0; i < ud->tchan_cnt; i++) {
5433 struct udma_tchan *tchan = &ud->tchans[i];
5434
5435 tchan->id = i;
5436 tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + i * 0x1000;
5437 }
5438
5439 for (i = 0; i < ud->rchan_cnt; i++) {
5440 struct udma_rchan *rchan = &ud->rchans[i];
5441
5442 rchan->id = i;
5443 rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + i * 0x1000;
5444 }
5445
5446 for (i = 0; i < ud->rflow_cnt; i++) {
5447 struct udma_rflow *rflow = &ud->rflows[i];
5448
5449 rflow->id = i;
5450 }
5451
5452 for (i = 0; i < ch_count; i++) {
5453 struct udma_chan *uc = &ud->channels[i];
5454
5455 uc->ud = ud;
5456 uc->vc.desc_free = udma_desc_free;
5457 uc->id = i;
5458 uc->bchan = NULL;
5459 uc->tchan = NULL;
5460 uc->rchan = NULL;
5461 uc->config.remote_thread_id = -1;
5462 uc->config.mapped_channel_id = -1;
5463 uc->config.default_flow_id = -1;
5464 uc->config.dir = DMA_MEM_TO_MEM;
5465 uc->name = devm_kasprintf(dev, GFP_KERNEL, "%s chan%d",
5466 dev_name(dev), i);
5467
5468 vchan_init(&uc->vc, &ud->ddev);
5469 /* Use custom vchan completion handling */
5470 tasklet_setup(&uc->vc.task, udma_vchan_complete);
5471 init_completion(&uc->teardown_completed);
5472 INIT_DELAYED_WORK(&uc->tx_drain.work, udma_check_tx_completion);
5473 }
5474
5475 /* Configure the copy_align to the maximum burst size the device supports */
5476 ud->ddev.copy_align = udma_get_copy_align(ud);
5477
5478 ret = dma_async_device_register(&ud->ddev);
5479 if (ret) {
5480 dev_err(dev, "failed to register slave DMA engine: %d\n", ret);
5481 return ret;
5482 }
5483
5484 platform_set_drvdata(pdev, ud);
5485
5486 ret = of_dma_controller_register(dev->of_node, udma_of_xlate, ud);
5487 if (ret) {
5488 dev_err(dev, "failed to register of_dma controller\n");
5489 dma_async_device_unregister(&ud->ddev);
5490 }
5491
5492 return ret;
5493}
5494
5495static struct platform_driver udma_driver = {
5496 .driver = {
5497 .name = "ti-udma",
5498 .of_match_table = udma_of_match,
5499 .suppress_bind_attrs = true,
5500 },
5501 .probe = udma_probe,
5502};
5503
5504module_platform_driver(udma_driver);
5505MODULE_LICENSE("GPL v2");
5506
5507/* Private interfaces to UDMA */
5508#include "k3-udma-private.c"