Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * K3 NAVSS DMA glue interface
4 *
5 * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
6 *
7 */
8
9#include <linux/module.h>
10#include <linux/atomic.h>
11#include <linux/delay.h>
12#include <linux/dma-mapping.h>
13#include <linux/io.h>
14#include <linux/init.h>
15#include <linux/of.h>
16#include <linux/platform_device.h>
17#include <linux/soc/ti/k3-ringacc.h>
18#include <linux/dma/ti-cppi5.h>
19#include <linux/dma/k3-udma-glue.h>
20
21#include "k3-udma.h"
22#include "k3-psil-priv.h"
23
24struct k3_udma_glue_common {
25 struct device *dev;
26 struct device chan_dev;
27 struct udma_dev *udmax;
28 const struct udma_tisci_rm *tisci_rm;
29 struct k3_ringacc *ringacc;
30 u32 src_thread;
31 u32 dst_thread;
32
33 u32 hdesc_size;
34 bool epib;
35 u32 psdata_size;
36 u32 swdata_size;
37 u32 atype_asel;
38 struct psil_endpoint_config *ep_config;
39};
40
41struct k3_udma_glue_tx_channel {
42 struct k3_udma_glue_common common;
43
44 struct udma_tchan *udma_tchanx;
45 int udma_tchan_id;
46
47 struct k3_ring *ringtx;
48 struct k3_ring *ringtxcq;
49
50 bool psil_paired;
51
52 int virq;
53
54 atomic_t free_pkts;
55 bool tx_pause_on_err;
56 bool tx_filt_einfo;
57 bool tx_filt_pswords;
58 bool tx_supr_tdpkt;
59
60 int udma_tflow_id;
61};
62
63struct k3_udma_glue_rx_flow {
64 struct udma_rflow *udma_rflow;
65 int udma_rflow_id;
66 struct k3_ring *ringrx;
67 struct k3_ring *ringrxfdq;
68
69 int virq;
70};
71
72struct k3_udma_glue_rx_channel {
73 struct k3_udma_glue_common common;
74
75 struct udma_rchan *udma_rchanx;
76 int udma_rchan_id;
77 bool remote;
78
79 bool psil_paired;
80
81 u32 swdata_size;
82 int flow_id_base;
83
84 struct k3_udma_glue_rx_flow *flows;
85 u32 flow_num;
86 u32 flows_ready;
87};
88
89static void k3_udma_chan_dev_release(struct device *dev)
90{
91 /* The struct containing the device is devm managed */
92}
93
94static struct class k3_udma_glue_devclass = {
95 .name = "k3_udma_glue_chan",
96 .dev_release = k3_udma_chan_dev_release,
97};
98
99#define K3_UDMAX_TDOWN_TIMEOUT_US 1000
100
101static int of_k3_udma_glue_parse(struct device_node *udmax_np,
102 struct k3_udma_glue_common *common)
103{
104 common->udmax = of_xudma_dev_get(udmax_np, NULL);
105 if (IS_ERR(common->udmax))
106 return PTR_ERR(common->udmax);
107
108 common->ringacc = xudma_get_ringacc(common->udmax);
109 common->tisci_rm = xudma_dev_get_tisci_rm(common->udmax);
110
111 return 0;
112}
113
114static int of_k3_udma_glue_parse_chn(struct device_node *chn_np,
115 const char *name, struct k3_udma_glue_common *common,
116 bool tx_chn)
117{
118 struct of_phandle_args dma_spec;
119 u32 thread_id;
120 int ret = 0;
121 int index;
122
123 if (unlikely(!name))
124 return -EINVAL;
125
126 index = of_property_match_string(chn_np, "dma-names", name);
127 if (index < 0)
128 return index;
129
130 if (of_parse_phandle_with_args(chn_np, "dmas", "#dma-cells", index,
131 &dma_spec))
132 return -ENOENT;
133
134 ret = of_k3_udma_glue_parse(dma_spec.np, common);
135 if (ret)
136 goto out_put_spec;
137
138 thread_id = dma_spec.args[0];
139 if (dma_spec.args_count == 2) {
140 if (dma_spec.args[1] > 2 && !xudma_is_pktdma(common->udmax)) {
141 dev_err(common->dev, "Invalid channel atype: %u\n",
142 dma_spec.args[1]);
143 ret = -EINVAL;
144 goto out_put_spec;
145 }
146 if (dma_spec.args[1] > 15 && xudma_is_pktdma(common->udmax)) {
147 dev_err(common->dev, "Invalid channel asel: %u\n",
148 dma_spec.args[1]);
149 ret = -EINVAL;
150 goto out_put_spec;
151 }
152
153 common->atype_asel = dma_spec.args[1];
154 }
155
156 if (tx_chn && !(thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) {
157 ret = -EINVAL;
158 goto out_put_spec;
159 }
160
161 if (!tx_chn && (thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) {
162 ret = -EINVAL;
163 goto out_put_spec;
164 }
165
166 /* get psil endpoint config */
167 common->ep_config = psil_get_ep_config(thread_id);
168 if (IS_ERR(common->ep_config)) {
169 dev_err(common->dev,
170 "No configuration for psi-l thread 0x%04x\n",
171 thread_id);
172 ret = PTR_ERR(common->ep_config);
173 goto out_put_spec;
174 }
175
176 common->epib = common->ep_config->needs_epib;
177 common->psdata_size = common->ep_config->psd_size;
178
179 if (tx_chn)
180 common->dst_thread = thread_id;
181 else
182 common->src_thread = thread_id;
183
184out_put_spec:
185 of_node_put(dma_spec.np);
186 return ret;
187};
188
189static void k3_udma_glue_dump_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
190{
191 struct device *dev = tx_chn->common.dev;
192
193 dev_dbg(dev, "dump_tx_chn:\n"
194 "udma_tchan_id: %d\n"
195 "src_thread: %08x\n"
196 "dst_thread: %08x\n",
197 tx_chn->udma_tchan_id,
198 tx_chn->common.src_thread,
199 tx_chn->common.dst_thread);
200}
201
202static void k3_udma_glue_dump_tx_rt_chn(struct k3_udma_glue_tx_channel *chn,
203 char *mark)
204{
205 struct device *dev = chn->common.dev;
206
207 dev_dbg(dev, "=== dump ===> %s\n", mark);
208 dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_CTL_REG,
209 xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG));
210 dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_PEER_RT_EN_REG,
211 xudma_tchanrt_read(chn->udma_tchanx,
212 UDMA_CHAN_RT_PEER_RT_EN_REG));
213 dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_PCNT_REG,
214 xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_PCNT_REG));
215 dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_BCNT_REG,
216 xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_BCNT_REG));
217 dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_SBCNT_REG,
218 xudma_tchanrt_read(chn->udma_tchanx, UDMA_CHAN_RT_SBCNT_REG));
219}
220
221static int k3_udma_glue_cfg_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
222{
223 const struct udma_tisci_rm *tisci_rm = tx_chn->common.tisci_rm;
224 struct ti_sci_msg_rm_udmap_tx_ch_cfg req;
225
226 memset(&req, 0, sizeof(req));
227
228 req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |
229 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID |
230 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID |
231 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
232 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID |
233 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
234 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
235 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID;
236 req.nav_id = tisci_rm->tisci_dev_id;
237 req.index = tx_chn->udma_tchan_id;
238 if (tx_chn->tx_pause_on_err)
239 req.tx_pause_on_err = 1;
240 if (tx_chn->tx_filt_einfo)
241 req.tx_filt_einfo = 1;
242 if (tx_chn->tx_filt_pswords)
243 req.tx_filt_pswords = 1;
244 req.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
245 if (tx_chn->tx_supr_tdpkt)
246 req.tx_supr_tdpkt = 1;
247 req.tx_fetch_size = tx_chn->common.hdesc_size >> 2;
248 req.txcq_qnum = k3_ringacc_get_ring_id(tx_chn->ringtxcq);
249 req.tx_atype = tx_chn->common.atype_asel;
250
251 return tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req);
252}
253
254struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
255 const char *name, struct k3_udma_glue_tx_channel_cfg *cfg)
256{
257 struct k3_udma_glue_tx_channel *tx_chn;
258 int ret;
259
260 tx_chn = devm_kzalloc(dev, sizeof(*tx_chn), GFP_KERNEL);
261 if (!tx_chn)
262 return ERR_PTR(-ENOMEM);
263
264 tx_chn->common.dev = dev;
265 tx_chn->common.swdata_size = cfg->swdata_size;
266 tx_chn->tx_pause_on_err = cfg->tx_pause_on_err;
267 tx_chn->tx_filt_einfo = cfg->tx_filt_einfo;
268 tx_chn->tx_filt_pswords = cfg->tx_filt_pswords;
269 tx_chn->tx_supr_tdpkt = cfg->tx_supr_tdpkt;
270
271 /* parse of udmap channel */
272 ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
273 &tx_chn->common, true);
274 if (ret)
275 goto err;
276
277 tx_chn->common.hdesc_size = cppi5_hdesc_calc_size(tx_chn->common.epib,
278 tx_chn->common.psdata_size,
279 tx_chn->common.swdata_size);
280
281 if (xudma_is_pktdma(tx_chn->common.udmax))
282 tx_chn->udma_tchan_id = tx_chn->common.ep_config->mapped_channel_id;
283 else
284 tx_chn->udma_tchan_id = -1;
285
286 /* request and cfg UDMAP TX channel */
287 tx_chn->udma_tchanx = xudma_tchan_get(tx_chn->common.udmax,
288 tx_chn->udma_tchan_id);
289 if (IS_ERR(tx_chn->udma_tchanx)) {
290 ret = PTR_ERR(tx_chn->udma_tchanx);
291 dev_err(dev, "UDMAX tchanx get err %d\n", ret);
292 goto err;
293 }
294 tx_chn->udma_tchan_id = xudma_tchan_get_id(tx_chn->udma_tchanx);
295
296 tx_chn->common.chan_dev.class = &k3_udma_glue_devclass;
297 tx_chn->common.chan_dev.parent = xudma_get_device(tx_chn->common.udmax);
298 dev_set_name(&tx_chn->common.chan_dev, "tchan%d-0x%04x",
299 tx_chn->udma_tchan_id, tx_chn->common.dst_thread);
300 ret = device_register(&tx_chn->common.chan_dev);
301 if (ret) {
302 dev_err(dev, "Channel Device registration failed %d\n", ret);
303 put_device(&tx_chn->common.chan_dev);
304 tx_chn->common.chan_dev.parent = NULL;
305 goto err;
306 }
307
308 if (xudma_is_pktdma(tx_chn->common.udmax)) {
309 /* prepare the channel device as coherent */
310 tx_chn->common.chan_dev.dma_coherent = true;
311 dma_coerce_mask_and_coherent(&tx_chn->common.chan_dev,
312 DMA_BIT_MASK(48));
313 }
314
315 atomic_set(&tx_chn->free_pkts, cfg->txcq_cfg.size);
316
317 if (xudma_is_pktdma(tx_chn->common.udmax))
318 tx_chn->udma_tflow_id = tx_chn->common.ep_config->default_flow_id;
319 else
320 tx_chn->udma_tflow_id = tx_chn->udma_tchan_id;
321
322 /* request and cfg rings */
323 ret = k3_ringacc_request_rings_pair(tx_chn->common.ringacc,
324 tx_chn->udma_tflow_id, -1,
325 &tx_chn->ringtx,
326 &tx_chn->ringtxcq);
327 if (ret) {
328 dev_err(dev, "Failed to get TX/TXCQ rings %d\n", ret);
329 goto err;
330 }
331
332 /* Set the dma_dev for the rings to be configured */
333 cfg->tx_cfg.dma_dev = k3_udma_glue_tx_get_dma_device(tx_chn);
334 cfg->txcq_cfg.dma_dev = cfg->tx_cfg.dma_dev;
335
336 /* Set the ASEL value for DMA rings of PKTDMA */
337 if (xudma_is_pktdma(tx_chn->common.udmax)) {
338 cfg->tx_cfg.asel = tx_chn->common.atype_asel;
339 cfg->txcq_cfg.asel = tx_chn->common.atype_asel;
340 }
341
342 ret = k3_ringacc_ring_cfg(tx_chn->ringtx, &cfg->tx_cfg);
343 if (ret) {
344 dev_err(dev, "Failed to cfg ringtx %d\n", ret);
345 goto err;
346 }
347
348 ret = k3_ringacc_ring_cfg(tx_chn->ringtxcq, &cfg->txcq_cfg);
349 if (ret) {
350 dev_err(dev, "Failed to cfg ringtx %d\n", ret);
351 goto err;
352 }
353
354 /* request and cfg psi-l */
355 tx_chn->common.src_thread =
356 xudma_dev_get_psil_base(tx_chn->common.udmax) +
357 tx_chn->udma_tchan_id;
358
359 ret = k3_udma_glue_cfg_tx_chn(tx_chn);
360 if (ret) {
361 dev_err(dev, "Failed to cfg tchan %d\n", ret);
362 goto err;
363 }
364
365 k3_udma_glue_dump_tx_chn(tx_chn);
366
367 return tx_chn;
368
369err:
370 k3_udma_glue_release_tx_chn(tx_chn);
371 return ERR_PTR(ret);
372}
373EXPORT_SYMBOL_GPL(k3_udma_glue_request_tx_chn);
374
375void k3_udma_glue_release_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
376{
377 if (tx_chn->psil_paired) {
378 xudma_navss_psil_unpair(tx_chn->common.udmax,
379 tx_chn->common.src_thread,
380 tx_chn->common.dst_thread);
381 tx_chn->psil_paired = false;
382 }
383
384 if (!IS_ERR_OR_NULL(tx_chn->udma_tchanx))
385 xudma_tchan_put(tx_chn->common.udmax,
386 tx_chn->udma_tchanx);
387
388 if (tx_chn->ringtxcq)
389 k3_ringacc_ring_free(tx_chn->ringtxcq);
390
391 if (tx_chn->ringtx)
392 k3_ringacc_ring_free(tx_chn->ringtx);
393
394 if (tx_chn->common.chan_dev.parent) {
395 device_unregister(&tx_chn->common.chan_dev);
396 tx_chn->common.chan_dev.parent = NULL;
397 }
398}
399EXPORT_SYMBOL_GPL(k3_udma_glue_release_tx_chn);
400
401int k3_udma_glue_push_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
402 struct cppi5_host_desc_t *desc_tx,
403 dma_addr_t desc_dma)
404{
405 u32 ringtxcq_id;
406
407 if (!atomic_add_unless(&tx_chn->free_pkts, -1, 0))
408 return -ENOMEM;
409
410 ringtxcq_id = k3_ringacc_get_ring_id(tx_chn->ringtxcq);
411 cppi5_desc_set_retpolicy(&desc_tx->hdr, 0, ringtxcq_id);
412
413 return k3_ringacc_ring_push(tx_chn->ringtx, &desc_dma);
414}
415EXPORT_SYMBOL_GPL(k3_udma_glue_push_tx_chn);
416
417int k3_udma_glue_pop_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
418 dma_addr_t *desc_dma)
419{
420 int ret;
421
422 ret = k3_ringacc_ring_pop(tx_chn->ringtxcq, desc_dma);
423 if (!ret)
424 atomic_inc(&tx_chn->free_pkts);
425
426 return ret;
427}
428EXPORT_SYMBOL_GPL(k3_udma_glue_pop_tx_chn);
429
430int k3_udma_glue_enable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
431{
432 int ret;
433
434 ret = xudma_navss_psil_pair(tx_chn->common.udmax,
435 tx_chn->common.src_thread,
436 tx_chn->common.dst_thread);
437 if (ret) {
438 dev_err(tx_chn->common.dev, "PSI-L request err %d\n", ret);
439 return ret;
440 }
441
442 tx_chn->psil_paired = true;
443
444 xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_PEER_RT_EN_REG,
445 UDMA_PEER_RT_EN_ENABLE);
446
447 xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG,
448 UDMA_CHAN_RT_CTL_EN);
449
450 k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn en");
451 return 0;
452}
453EXPORT_SYMBOL_GPL(k3_udma_glue_enable_tx_chn);
454
455void k3_udma_glue_disable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
456{
457 k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn dis1");
458
459 xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG, 0);
460
461 xudma_tchanrt_write(tx_chn->udma_tchanx,
462 UDMA_CHAN_RT_PEER_RT_EN_REG, 0);
463 k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn dis2");
464
465 if (tx_chn->psil_paired) {
466 xudma_navss_psil_unpair(tx_chn->common.udmax,
467 tx_chn->common.src_thread,
468 tx_chn->common.dst_thread);
469 tx_chn->psil_paired = false;
470 }
471}
472EXPORT_SYMBOL_GPL(k3_udma_glue_disable_tx_chn);
473
474void k3_udma_glue_tdown_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
475 bool sync)
476{
477 int i = 0;
478 u32 val;
479
480 k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn tdown1");
481
482 xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG,
483 UDMA_CHAN_RT_CTL_EN | UDMA_CHAN_RT_CTL_TDOWN);
484
485 val = xudma_tchanrt_read(tx_chn->udma_tchanx, UDMA_CHAN_RT_CTL_REG);
486
487 while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
488 val = xudma_tchanrt_read(tx_chn->udma_tchanx,
489 UDMA_CHAN_RT_CTL_REG);
490 udelay(1);
491 if (i > K3_UDMAX_TDOWN_TIMEOUT_US) {
492 dev_err(tx_chn->common.dev, "TX tdown timeout\n");
493 break;
494 }
495 i++;
496 }
497
498 val = xudma_tchanrt_read(tx_chn->udma_tchanx,
499 UDMA_CHAN_RT_PEER_RT_EN_REG);
500 if (sync && (val & UDMA_PEER_RT_EN_ENABLE))
501 dev_err(tx_chn->common.dev, "TX tdown peer not stopped\n");
502 k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn tdown2");
503}
504EXPORT_SYMBOL_GPL(k3_udma_glue_tdown_tx_chn);
505
506void k3_udma_glue_reset_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
507 void *data,
508 void (*cleanup)(void *data, dma_addr_t desc_dma))
509{
510 struct device *dev = tx_chn->common.dev;
511 dma_addr_t desc_dma;
512 int occ_tx, i, ret;
513
514 /*
515 * TXQ reset need to be special way as it is input for udma and its
516 * state cached by udma, so:
517 * 1) save TXQ occ
518 * 2) clean up TXQ and call callback .cleanup() for each desc
519 * 3) reset TXQ in a special way
520 */
521 occ_tx = k3_ringacc_ring_get_occ(tx_chn->ringtx);
522 dev_dbg(dev, "TX reset occ_tx %u\n", occ_tx);
523
524 for (i = 0; i < occ_tx; i++) {
525 ret = k3_ringacc_ring_pop(tx_chn->ringtx, &desc_dma);
526 if (ret) {
527 if (ret != -ENODATA)
528 dev_err(dev, "TX reset pop %d\n", ret);
529 break;
530 }
531 cleanup(data, desc_dma);
532 }
533
534 /* reset TXCQ as it is not input for udma - expected to be empty */
535 k3_ringacc_ring_reset(tx_chn->ringtxcq);
536 k3_ringacc_ring_reset_dma(tx_chn->ringtx, occ_tx);
537}
538EXPORT_SYMBOL_GPL(k3_udma_glue_reset_tx_chn);
539
540u32 k3_udma_glue_tx_get_hdesc_size(struct k3_udma_glue_tx_channel *tx_chn)
541{
542 return tx_chn->common.hdesc_size;
543}
544EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_hdesc_size);
545
546u32 k3_udma_glue_tx_get_txcq_id(struct k3_udma_glue_tx_channel *tx_chn)
547{
548 return k3_ringacc_get_ring_id(tx_chn->ringtxcq);
549}
550EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_txcq_id);
551
552int k3_udma_glue_tx_get_irq(struct k3_udma_glue_tx_channel *tx_chn)
553{
554 if (xudma_is_pktdma(tx_chn->common.udmax)) {
555 tx_chn->virq = xudma_pktdma_tflow_get_irq(tx_chn->common.udmax,
556 tx_chn->udma_tflow_id);
557 } else {
558 tx_chn->virq = k3_ringacc_get_ring_irq_num(tx_chn->ringtxcq);
559 }
560
561 return tx_chn->virq;
562}
563EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_irq);
564
565struct device *
566 k3_udma_glue_tx_get_dma_device(struct k3_udma_glue_tx_channel *tx_chn)
567{
568 if (xudma_is_pktdma(tx_chn->common.udmax) &&
569 (tx_chn->common.atype_asel == 14 || tx_chn->common.atype_asel == 15))
570 return &tx_chn->common.chan_dev;
571
572 return xudma_get_device(tx_chn->common.udmax);
573}
574EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_dma_device);
575
576void k3_udma_glue_tx_dma_to_cppi5_addr(struct k3_udma_glue_tx_channel *tx_chn,
577 dma_addr_t *addr)
578{
579 if (!xudma_is_pktdma(tx_chn->common.udmax) ||
580 !tx_chn->common.atype_asel)
581 return;
582
583 *addr |= (u64)tx_chn->common.atype_asel << K3_ADDRESS_ASEL_SHIFT;
584}
585EXPORT_SYMBOL_GPL(k3_udma_glue_tx_dma_to_cppi5_addr);
586
587void k3_udma_glue_tx_cppi5_to_dma_addr(struct k3_udma_glue_tx_channel *tx_chn,
588 dma_addr_t *addr)
589{
590 if (!xudma_is_pktdma(tx_chn->common.udmax) ||
591 !tx_chn->common.atype_asel)
592 return;
593
594 *addr &= (u64)GENMASK(K3_ADDRESS_ASEL_SHIFT - 1, 0);
595}
596EXPORT_SYMBOL_GPL(k3_udma_glue_tx_cppi5_to_dma_addr);
597
598static int k3_udma_glue_cfg_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
599{
600 const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
601 struct ti_sci_msg_rm_udmap_rx_ch_cfg req;
602 int ret;
603
604 memset(&req, 0, sizeof(req));
605
606 req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |
607 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |
608 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |
609 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID;
610
611 req.nav_id = tisci_rm->tisci_dev_id;
612 req.index = rx_chn->udma_rchan_id;
613 req.rx_fetch_size = rx_chn->common.hdesc_size >> 2;
614 /*
615 * TODO: we can't support rxcq_qnum/RCHAN[a]_RCQ cfg with current sysfw
616 * and udmax impl, so just configure it to invalid value.
617 * req.rxcq_qnum = k3_ringacc_get_ring_id(rx_chn->flows[0].ringrx);
618 */
619 req.rxcq_qnum = 0xFFFF;
620 if (!xudma_is_pktdma(rx_chn->common.udmax) && rx_chn->flow_num &&
621 rx_chn->flow_id_base != rx_chn->udma_rchan_id) {
622 /* Default flow + extra ones */
623 req.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |
624 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID;
625 req.flowid_start = rx_chn->flow_id_base;
626 req.flowid_cnt = rx_chn->flow_num;
627 }
628 req.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
629 req.rx_atype = rx_chn->common.atype_asel;
630
631 ret = tisci_rm->tisci_udmap_ops->rx_ch_cfg(tisci_rm->tisci, &req);
632 if (ret)
633 dev_err(rx_chn->common.dev, "rchan%d cfg failed %d\n",
634 rx_chn->udma_rchan_id, ret);
635
636 return ret;
637}
638
639static void k3_udma_glue_release_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
640 u32 flow_num)
641{
642 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
643
644 if (IS_ERR_OR_NULL(flow->udma_rflow))
645 return;
646
647 if (flow->ringrxfdq)
648 k3_ringacc_ring_free(flow->ringrxfdq);
649
650 if (flow->ringrx)
651 k3_ringacc_ring_free(flow->ringrx);
652
653 xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow);
654 flow->udma_rflow = NULL;
655 rx_chn->flows_ready--;
656}
657
658static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn,
659 u32 flow_idx,
660 struct k3_udma_glue_rx_flow_cfg *flow_cfg)
661{
662 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx];
663 const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
664 struct device *dev = rx_chn->common.dev;
665 struct ti_sci_msg_rm_udmap_flow_cfg req;
666 int rx_ring_id;
667 int rx_ringfdq_id;
668 int ret = 0;
669
670 flow->udma_rflow = xudma_rflow_get(rx_chn->common.udmax,
671 flow->udma_rflow_id);
672 if (IS_ERR(flow->udma_rflow)) {
673 ret = PTR_ERR(flow->udma_rflow);
674 dev_err(dev, "UDMAX rflow get err %d\n", ret);
675 return ret;
676 }
677
678 if (flow->udma_rflow_id != xudma_rflow_get_id(flow->udma_rflow)) {
679 ret = -ENODEV;
680 goto err_rflow_put;
681 }
682
683 if (xudma_is_pktdma(rx_chn->common.udmax)) {
684 rx_ringfdq_id = flow->udma_rflow_id +
685 xudma_get_rflow_ring_offset(rx_chn->common.udmax);
686 rx_ring_id = 0;
687 } else {
688 rx_ring_id = flow_cfg->ring_rxq_id;
689 rx_ringfdq_id = flow_cfg->ring_rxfdq0_id;
690 }
691
692 /* request and cfg rings */
693 ret = k3_ringacc_request_rings_pair(rx_chn->common.ringacc,
694 rx_ringfdq_id, rx_ring_id,
695 &flow->ringrxfdq,
696 &flow->ringrx);
697 if (ret) {
698 dev_err(dev, "Failed to get RX/RXFDQ rings %d\n", ret);
699 goto err_rflow_put;
700 }
701
702 /* Set the dma_dev for the rings to be configured */
703 flow_cfg->rx_cfg.dma_dev = k3_udma_glue_rx_get_dma_device(rx_chn);
704 flow_cfg->rxfdq_cfg.dma_dev = flow_cfg->rx_cfg.dma_dev;
705
706 /* Set the ASEL value for DMA rings of PKTDMA */
707 if (xudma_is_pktdma(rx_chn->common.udmax)) {
708 flow_cfg->rx_cfg.asel = rx_chn->common.atype_asel;
709 flow_cfg->rxfdq_cfg.asel = rx_chn->common.atype_asel;
710 }
711
712 ret = k3_ringacc_ring_cfg(flow->ringrx, &flow_cfg->rx_cfg);
713 if (ret) {
714 dev_err(dev, "Failed to cfg ringrx %d\n", ret);
715 goto err_ringrxfdq_free;
716 }
717
718 ret = k3_ringacc_ring_cfg(flow->ringrxfdq, &flow_cfg->rxfdq_cfg);
719 if (ret) {
720 dev_err(dev, "Failed to cfg ringrxfdq %d\n", ret);
721 goto err_ringrxfdq_free;
722 }
723
724 if (rx_chn->remote) {
725 rx_ring_id = TI_SCI_RESOURCE_NULL;
726 rx_ringfdq_id = TI_SCI_RESOURCE_NULL;
727 } else {
728 rx_ring_id = k3_ringacc_get_ring_id(flow->ringrx);
729 rx_ringfdq_id = k3_ringacc_get_ring_id(flow->ringrxfdq);
730 }
731
732 memset(&req, 0, sizeof(req));
733
734 req.valid_params =
735 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
736 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
737 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
738 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
739 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
740 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
741 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
742 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
743 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
744 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
745 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
746 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
747 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
748 req.nav_id = tisci_rm->tisci_dev_id;
749 req.flow_index = flow->udma_rflow_id;
750 if (rx_chn->common.epib)
751 req.rx_einfo_present = 1;
752 if (rx_chn->common.psdata_size)
753 req.rx_psinfo_present = 1;
754 if (flow_cfg->rx_error_handling)
755 req.rx_error_handling = 1;
756 req.rx_desc_type = 0;
757 req.rx_dest_qnum = rx_ring_id;
758 req.rx_src_tag_hi_sel = 0;
759 req.rx_src_tag_lo_sel = flow_cfg->src_tag_lo_sel;
760 req.rx_dest_tag_hi_sel = 0;
761 req.rx_dest_tag_lo_sel = 0;
762 req.rx_fdq0_sz0_qnum = rx_ringfdq_id;
763 req.rx_fdq1_qnum = rx_ringfdq_id;
764 req.rx_fdq2_qnum = rx_ringfdq_id;
765 req.rx_fdq3_qnum = rx_ringfdq_id;
766
767 ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req);
768 if (ret) {
769 dev_err(dev, "flow%d config failed: %d\n", flow->udma_rflow_id,
770 ret);
771 goto err_ringrxfdq_free;
772 }
773
774 rx_chn->flows_ready++;
775 dev_dbg(dev, "flow%d config done. ready:%d\n",
776 flow->udma_rflow_id, rx_chn->flows_ready);
777
778 return 0;
779
780err_ringrxfdq_free:
781 k3_ringacc_ring_free(flow->ringrxfdq);
782 k3_ringacc_ring_free(flow->ringrx);
783
784err_rflow_put:
785 xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow);
786 flow->udma_rflow = NULL;
787
788 return ret;
789}
790
791static void k3_udma_glue_dump_rx_chn(struct k3_udma_glue_rx_channel *chn)
792{
793 struct device *dev = chn->common.dev;
794
795 dev_dbg(dev, "dump_rx_chn:\n"
796 "udma_rchan_id: %d\n"
797 "src_thread: %08x\n"
798 "dst_thread: %08x\n"
799 "epib: %d\n"
800 "hdesc_size: %u\n"
801 "psdata_size: %u\n"
802 "swdata_size: %u\n"
803 "flow_id_base: %d\n"
804 "flow_num: %d\n",
805 chn->udma_rchan_id,
806 chn->common.src_thread,
807 chn->common.dst_thread,
808 chn->common.epib,
809 chn->common.hdesc_size,
810 chn->common.psdata_size,
811 chn->common.swdata_size,
812 chn->flow_id_base,
813 chn->flow_num);
814}
815
816static void k3_udma_glue_dump_rx_rt_chn(struct k3_udma_glue_rx_channel *chn,
817 char *mark)
818{
819 struct device *dev = chn->common.dev;
820
821 dev_dbg(dev, "=== dump ===> %s\n", mark);
822
823 dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_CTL_REG,
824 xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG));
825 dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_PEER_RT_EN_REG,
826 xudma_rchanrt_read(chn->udma_rchanx,
827 UDMA_CHAN_RT_PEER_RT_EN_REG));
828 dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_PCNT_REG,
829 xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_PCNT_REG));
830 dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_BCNT_REG,
831 xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_BCNT_REG));
832 dev_dbg(dev, "0x%08X: %08X\n", UDMA_CHAN_RT_SBCNT_REG,
833 xudma_rchanrt_read(chn->udma_rchanx, UDMA_CHAN_RT_SBCNT_REG));
834}
835
836static int
837k3_udma_glue_allocate_rx_flows(struct k3_udma_glue_rx_channel *rx_chn,
838 struct k3_udma_glue_rx_channel_cfg *cfg)
839{
840 int ret;
841
842 /* default rflow */
843 if (cfg->flow_id_use_rxchan_id)
844 return 0;
845
846 /* not a GP rflows */
847 if (rx_chn->flow_id_base != -1 &&
848 !xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base))
849 return 0;
850
851 /* Allocate range of GP rflows */
852 ret = xudma_alloc_gp_rflow_range(rx_chn->common.udmax,
853 rx_chn->flow_id_base,
854 rx_chn->flow_num);
855 if (ret < 0) {
856 dev_err(rx_chn->common.dev, "UDMAX reserve_rflow %d cnt:%d err: %d\n",
857 rx_chn->flow_id_base, rx_chn->flow_num, ret);
858 return ret;
859 }
860 rx_chn->flow_id_base = ret;
861
862 return 0;
863}
864
865static struct k3_udma_glue_rx_channel *
866k3_udma_glue_request_rx_chn_priv(struct device *dev, const char *name,
867 struct k3_udma_glue_rx_channel_cfg *cfg)
868{
869 struct k3_udma_glue_rx_channel *rx_chn;
870 struct psil_endpoint_config *ep_cfg;
871 int ret, i;
872
873 if (cfg->flow_id_num <= 0)
874 return ERR_PTR(-EINVAL);
875
876 if (cfg->flow_id_num != 1 &&
877 (cfg->def_flow_cfg || cfg->flow_id_use_rxchan_id))
878 return ERR_PTR(-EINVAL);
879
880 rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL);
881 if (!rx_chn)
882 return ERR_PTR(-ENOMEM);
883
884 rx_chn->common.dev = dev;
885 rx_chn->common.swdata_size = cfg->swdata_size;
886 rx_chn->remote = false;
887
888 /* parse of udmap channel */
889 ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
890 &rx_chn->common, false);
891 if (ret)
892 goto err;
893
894 rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib,
895 rx_chn->common.psdata_size,
896 rx_chn->common.swdata_size);
897
898 ep_cfg = rx_chn->common.ep_config;
899
900 if (xudma_is_pktdma(rx_chn->common.udmax))
901 rx_chn->udma_rchan_id = ep_cfg->mapped_channel_id;
902 else
903 rx_chn->udma_rchan_id = -1;
904
905 /* request and cfg UDMAP RX channel */
906 rx_chn->udma_rchanx = xudma_rchan_get(rx_chn->common.udmax,
907 rx_chn->udma_rchan_id);
908 if (IS_ERR(rx_chn->udma_rchanx)) {
909 ret = PTR_ERR(rx_chn->udma_rchanx);
910 dev_err(dev, "UDMAX rchanx get err %d\n", ret);
911 goto err;
912 }
913 rx_chn->udma_rchan_id = xudma_rchan_get_id(rx_chn->udma_rchanx);
914
915 rx_chn->common.chan_dev.class = &k3_udma_glue_devclass;
916 rx_chn->common.chan_dev.parent = xudma_get_device(rx_chn->common.udmax);
917 dev_set_name(&rx_chn->common.chan_dev, "rchan%d-0x%04x",
918 rx_chn->udma_rchan_id, rx_chn->common.src_thread);
919 ret = device_register(&rx_chn->common.chan_dev);
920 if (ret) {
921 dev_err(dev, "Channel Device registration failed %d\n", ret);
922 put_device(&rx_chn->common.chan_dev);
923 rx_chn->common.chan_dev.parent = NULL;
924 goto err;
925 }
926
927 if (xudma_is_pktdma(rx_chn->common.udmax)) {
928 /* prepare the channel device as coherent */
929 rx_chn->common.chan_dev.dma_coherent = true;
930 dma_coerce_mask_and_coherent(&rx_chn->common.chan_dev,
931 DMA_BIT_MASK(48));
932 }
933
934 if (xudma_is_pktdma(rx_chn->common.udmax)) {
935 int flow_start = cfg->flow_id_base;
936 int flow_end;
937
938 if (flow_start == -1)
939 flow_start = ep_cfg->flow_start;
940
941 flow_end = flow_start + cfg->flow_id_num - 1;
942 if (flow_start < ep_cfg->flow_start ||
943 flow_end > (ep_cfg->flow_start + ep_cfg->flow_num - 1)) {
944 dev_err(dev, "Invalid flow range requested\n");
945 ret = -EINVAL;
946 goto err;
947 }
948 rx_chn->flow_id_base = flow_start;
949 } else {
950 rx_chn->flow_id_base = cfg->flow_id_base;
951
952 /* Use RX channel id as flow id: target dev can't generate flow_id */
953 if (cfg->flow_id_use_rxchan_id)
954 rx_chn->flow_id_base = rx_chn->udma_rchan_id;
955 }
956
957 rx_chn->flow_num = cfg->flow_id_num;
958
959 rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num,
960 sizeof(*rx_chn->flows), GFP_KERNEL);
961 if (!rx_chn->flows) {
962 ret = -ENOMEM;
963 goto err;
964 }
965
966 ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg);
967 if (ret)
968 goto err;
969
970 for (i = 0; i < rx_chn->flow_num; i++)
971 rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i;
972
973 /* request and cfg psi-l */
974 rx_chn->common.dst_thread =
975 xudma_dev_get_psil_base(rx_chn->common.udmax) +
976 rx_chn->udma_rchan_id;
977
978 ret = k3_udma_glue_cfg_rx_chn(rx_chn);
979 if (ret) {
980 dev_err(dev, "Failed to cfg rchan %d\n", ret);
981 goto err;
982 }
983
984 /* init default RX flow only if flow_num = 1 */
985 if (cfg->def_flow_cfg) {
986 ret = k3_udma_glue_cfg_rx_flow(rx_chn, 0, cfg->def_flow_cfg);
987 if (ret)
988 goto err;
989 }
990
991 k3_udma_glue_dump_rx_chn(rx_chn);
992
993 return rx_chn;
994
995err:
996 k3_udma_glue_release_rx_chn(rx_chn);
997 return ERR_PTR(ret);
998}
999
1000static struct k3_udma_glue_rx_channel *
1001k3_udma_glue_request_remote_rx_chn(struct device *dev, const char *name,
1002 struct k3_udma_glue_rx_channel_cfg *cfg)
1003{
1004 struct k3_udma_glue_rx_channel *rx_chn;
1005 int ret, i;
1006
1007 if (cfg->flow_id_num <= 0 ||
1008 cfg->flow_id_use_rxchan_id ||
1009 cfg->def_flow_cfg ||
1010 cfg->flow_id_base < 0)
1011 return ERR_PTR(-EINVAL);
1012
1013 /*
1014 * Remote RX channel is under control of Remote CPU core, so
1015 * Linux can only request and manipulate by dedicated RX flows
1016 */
1017
1018 rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL);
1019 if (!rx_chn)
1020 return ERR_PTR(-ENOMEM);
1021
1022 rx_chn->common.dev = dev;
1023 rx_chn->common.swdata_size = cfg->swdata_size;
1024 rx_chn->remote = true;
1025 rx_chn->udma_rchan_id = -1;
1026 rx_chn->flow_num = cfg->flow_id_num;
1027 rx_chn->flow_id_base = cfg->flow_id_base;
1028 rx_chn->psil_paired = false;
1029
1030 /* parse of udmap channel */
1031 ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
1032 &rx_chn->common, false);
1033 if (ret)
1034 goto err;
1035
1036 rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib,
1037 rx_chn->common.psdata_size,
1038 rx_chn->common.swdata_size);
1039
1040 rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num,
1041 sizeof(*rx_chn->flows), GFP_KERNEL);
1042 if (!rx_chn->flows) {
1043 ret = -ENOMEM;
1044 goto err;
1045 }
1046
1047 rx_chn->common.chan_dev.class = &k3_udma_glue_devclass;
1048 rx_chn->common.chan_dev.parent = xudma_get_device(rx_chn->common.udmax);
1049 dev_set_name(&rx_chn->common.chan_dev, "rchan_remote-0x%04x",
1050 rx_chn->common.src_thread);
1051 ret = device_register(&rx_chn->common.chan_dev);
1052 if (ret) {
1053 dev_err(dev, "Channel Device registration failed %d\n", ret);
1054 put_device(&rx_chn->common.chan_dev);
1055 rx_chn->common.chan_dev.parent = NULL;
1056 goto err;
1057 }
1058
1059 if (xudma_is_pktdma(rx_chn->common.udmax)) {
1060 /* prepare the channel device as coherent */
1061 rx_chn->common.chan_dev.dma_coherent = true;
1062 dma_coerce_mask_and_coherent(&rx_chn->common.chan_dev,
1063 DMA_BIT_MASK(48));
1064 }
1065
1066 ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg);
1067 if (ret)
1068 goto err;
1069
1070 for (i = 0; i < rx_chn->flow_num; i++)
1071 rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i;
1072
1073 k3_udma_glue_dump_rx_chn(rx_chn);
1074
1075 return rx_chn;
1076
1077err:
1078 k3_udma_glue_release_rx_chn(rx_chn);
1079 return ERR_PTR(ret);
1080}
1081
1082struct k3_udma_glue_rx_channel *
1083k3_udma_glue_request_rx_chn(struct device *dev, const char *name,
1084 struct k3_udma_glue_rx_channel_cfg *cfg)
1085{
1086 if (cfg->remote)
1087 return k3_udma_glue_request_remote_rx_chn(dev, name, cfg);
1088 else
1089 return k3_udma_glue_request_rx_chn_priv(dev, name, cfg);
1090}
1091EXPORT_SYMBOL_GPL(k3_udma_glue_request_rx_chn);
1092
1093void k3_udma_glue_release_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
1094{
1095 int i;
1096
1097 if (IS_ERR_OR_NULL(rx_chn->common.udmax))
1098 return;
1099
1100 if (rx_chn->psil_paired) {
1101 xudma_navss_psil_unpair(rx_chn->common.udmax,
1102 rx_chn->common.src_thread,
1103 rx_chn->common.dst_thread);
1104 rx_chn->psil_paired = false;
1105 }
1106
1107 for (i = 0; i < rx_chn->flow_num; i++)
1108 k3_udma_glue_release_rx_flow(rx_chn, i);
1109
1110 if (xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base))
1111 xudma_free_gp_rflow_range(rx_chn->common.udmax,
1112 rx_chn->flow_id_base,
1113 rx_chn->flow_num);
1114
1115 if (!IS_ERR_OR_NULL(rx_chn->udma_rchanx))
1116 xudma_rchan_put(rx_chn->common.udmax,
1117 rx_chn->udma_rchanx);
1118
1119 if (rx_chn->common.chan_dev.parent) {
1120 device_unregister(&rx_chn->common.chan_dev);
1121 rx_chn->common.chan_dev.parent = NULL;
1122 }
1123}
1124EXPORT_SYMBOL_GPL(k3_udma_glue_release_rx_chn);
1125
1126int k3_udma_glue_rx_flow_init(struct k3_udma_glue_rx_channel *rx_chn,
1127 u32 flow_idx,
1128 struct k3_udma_glue_rx_flow_cfg *flow_cfg)
1129{
1130 if (flow_idx >= rx_chn->flow_num)
1131 return -EINVAL;
1132
1133 return k3_udma_glue_cfg_rx_flow(rx_chn, flow_idx, flow_cfg);
1134}
1135EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_init);
1136
1137u32 k3_udma_glue_rx_flow_get_fdq_id(struct k3_udma_glue_rx_channel *rx_chn,
1138 u32 flow_idx)
1139{
1140 struct k3_udma_glue_rx_flow *flow;
1141
1142 if (flow_idx >= rx_chn->flow_num)
1143 return -EINVAL;
1144
1145 flow = &rx_chn->flows[flow_idx];
1146
1147 return k3_ringacc_get_ring_id(flow->ringrxfdq);
1148}
1149EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_get_fdq_id);
1150
1151u32 k3_udma_glue_rx_get_flow_id_base(struct k3_udma_glue_rx_channel *rx_chn)
1152{
1153 return rx_chn->flow_id_base;
1154}
1155EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_flow_id_base);
1156
1157int k3_udma_glue_rx_flow_enable(struct k3_udma_glue_rx_channel *rx_chn,
1158 u32 flow_idx)
1159{
1160 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx];
1161 const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
1162 struct device *dev = rx_chn->common.dev;
1163 struct ti_sci_msg_rm_udmap_flow_cfg req;
1164 int rx_ring_id;
1165 int rx_ringfdq_id;
1166 int ret = 0;
1167
1168 if (!rx_chn->remote)
1169 return -EINVAL;
1170
1171 rx_ring_id = k3_ringacc_get_ring_id(flow->ringrx);
1172 rx_ringfdq_id = k3_ringacc_get_ring_id(flow->ringrxfdq);
1173
1174 memset(&req, 0, sizeof(req));
1175
1176 req.valid_params =
1177 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
1178 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
1179 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
1180 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
1181 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
1182 req.nav_id = tisci_rm->tisci_dev_id;
1183 req.flow_index = flow->udma_rflow_id;
1184 req.rx_dest_qnum = rx_ring_id;
1185 req.rx_fdq0_sz0_qnum = rx_ringfdq_id;
1186 req.rx_fdq1_qnum = rx_ringfdq_id;
1187 req.rx_fdq2_qnum = rx_ringfdq_id;
1188 req.rx_fdq3_qnum = rx_ringfdq_id;
1189
1190 ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req);
1191 if (ret) {
1192 dev_err(dev, "flow%d enable failed: %d\n", flow->udma_rflow_id,
1193 ret);
1194 }
1195
1196 return ret;
1197}
1198EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_enable);
1199
1200int k3_udma_glue_rx_flow_disable(struct k3_udma_glue_rx_channel *rx_chn,
1201 u32 flow_idx)
1202{
1203 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx];
1204 const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
1205 struct device *dev = rx_chn->common.dev;
1206 struct ti_sci_msg_rm_udmap_flow_cfg req;
1207 int ret = 0;
1208
1209 if (!rx_chn->remote)
1210 return -EINVAL;
1211
1212 memset(&req, 0, sizeof(req));
1213 req.valid_params =
1214 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
1215 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
1216 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
1217 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
1218 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
1219 req.nav_id = tisci_rm->tisci_dev_id;
1220 req.flow_index = flow->udma_rflow_id;
1221 req.rx_dest_qnum = TI_SCI_RESOURCE_NULL;
1222 req.rx_fdq0_sz0_qnum = TI_SCI_RESOURCE_NULL;
1223 req.rx_fdq1_qnum = TI_SCI_RESOURCE_NULL;
1224 req.rx_fdq2_qnum = TI_SCI_RESOURCE_NULL;
1225 req.rx_fdq3_qnum = TI_SCI_RESOURCE_NULL;
1226
1227 ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req);
1228 if (ret) {
1229 dev_err(dev, "flow%d disable failed: %d\n", flow->udma_rflow_id,
1230 ret);
1231 }
1232
1233 return ret;
1234}
1235EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_disable);
1236
1237int k3_udma_glue_enable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
1238{
1239 int ret;
1240
1241 if (rx_chn->remote)
1242 return -EINVAL;
1243
1244 if (rx_chn->flows_ready < rx_chn->flow_num)
1245 return -EINVAL;
1246
1247 ret = xudma_navss_psil_pair(rx_chn->common.udmax,
1248 rx_chn->common.src_thread,
1249 rx_chn->common.dst_thread);
1250 if (ret) {
1251 dev_err(rx_chn->common.dev, "PSI-L request err %d\n", ret);
1252 return ret;
1253 }
1254
1255 rx_chn->psil_paired = true;
1256
1257 xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG,
1258 UDMA_CHAN_RT_CTL_EN);
1259
1260 xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_PEER_RT_EN_REG,
1261 UDMA_PEER_RT_EN_ENABLE);
1262
1263 k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt en");
1264 return 0;
1265}
1266EXPORT_SYMBOL_GPL(k3_udma_glue_enable_rx_chn);
1267
1268void k3_udma_glue_disable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
1269{
1270 k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt dis1");
1271
1272 xudma_rchanrt_write(rx_chn->udma_rchanx,
1273 UDMA_CHAN_RT_PEER_RT_EN_REG, 0);
1274 xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG, 0);
1275
1276 k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt dis2");
1277
1278 if (rx_chn->psil_paired) {
1279 xudma_navss_psil_unpair(rx_chn->common.udmax,
1280 rx_chn->common.src_thread,
1281 rx_chn->common.dst_thread);
1282 rx_chn->psil_paired = false;
1283 }
1284}
1285EXPORT_SYMBOL_GPL(k3_udma_glue_disable_rx_chn);
1286
1287void k3_udma_glue_tdown_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
1288 bool sync)
1289{
1290 int i = 0;
1291 u32 val;
1292
1293 if (rx_chn->remote)
1294 return;
1295
1296 k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt tdown1");
1297
1298 xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_CHAN_RT_PEER_RT_EN_REG,
1299 UDMA_PEER_RT_EN_ENABLE | UDMA_PEER_RT_EN_TEARDOWN);
1300
1301 val = xudma_rchanrt_read(rx_chn->udma_rchanx, UDMA_CHAN_RT_CTL_REG);
1302
1303 while (sync && (val & UDMA_CHAN_RT_CTL_EN)) {
1304 val = xudma_rchanrt_read(rx_chn->udma_rchanx,
1305 UDMA_CHAN_RT_CTL_REG);
1306 udelay(1);
1307 if (i > K3_UDMAX_TDOWN_TIMEOUT_US) {
1308 dev_err(rx_chn->common.dev, "RX tdown timeout\n");
1309 break;
1310 }
1311 i++;
1312 }
1313
1314 val = xudma_rchanrt_read(rx_chn->udma_rchanx,
1315 UDMA_CHAN_RT_PEER_RT_EN_REG);
1316 if (sync && (val & UDMA_PEER_RT_EN_ENABLE))
1317 dev_err(rx_chn->common.dev, "TX tdown peer not stopped\n");
1318 k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt tdown2");
1319}
1320EXPORT_SYMBOL_GPL(k3_udma_glue_tdown_rx_chn);
1321
1322void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
1323 u32 flow_num, void *data,
1324 void (*cleanup)(void *data, dma_addr_t desc_dma), bool skip_fdq)
1325{
1326 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
1327 struct device *dev = rx_chn->common.dev;
1328 dma_addr_t desc_dma;
1329 int occ_rx, i, ret;
1330
1331 /* reset RXCQ as it is not input for udma - expected to be empty */
1332 occ_rx = k3_ringacc_ring_get_occ(flow->ringrx);
1333 dev_dbg(dev, "RX reset flow %u occ_rx %u\n", flow_num, occ_rx);
1334
1335 /* Skip RX FDQ in case one FDQ is used for the set of flows */
1336 if (skip_fdq)
1337 goto do_reset;
1338
1339 /*
1340 * RX FDQ reset need to be special way as it is input for udma and its
1341 * state cached by udma, so:
1342 * 1) save RX FDQ occ
1343 * 2) clean up RX FDQ and call callback .cleanup() for each desc
1344 * 3) reset RX FDQ in a special way
1345 */
1346 occ_rx = k3_ringacc_ring_get_occ(flow->ringrxfdq);
1347 dev_dbg(dev, "RX reset flow %u occ_rx_fdq %u\n", flow_num, occ_rx);
1348
1349 for (i = 0; i < occ_rx; i++) {
1350 ret = k3_ringacc_ring_pop(flow->ringrxfdq, &desc_dma);
1351 if (ret) {
1352 if (ret != -ENODATA)
1353 dev_err(dev, "RX reset pop %d\n", ret);
1354 break;
1355 }
1356 cleanup(data, desc_dma);
1357 }
1358
1359 k3_ringacc_ring_reset_dma(flow->ringrxfdq, occ_rx);
1360
1361do_reset:
1362 k3_ringacc_ring_reset(flow->ringrx);
1363}
1364EXPORT_SYMBOL_GPL(k3_udma_glue_reset_rx_chn);
1365
1366int k3_udma_glue_push_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
1367 u32 flow_num, struct cppi5_host_desc_t *desc_rx,
1368 dma_addr_t desc_dma)
1369{
1370 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
1371
1372 return k3_ringacc_ring_push(flow->ringrxfdq, &desc_dma);
1373}
1374EXPORT_SYMBOL_GPL(k3_udma_glue_push_rx_chn);
1375
1376int k3_udma_glue_pop_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
1377 u32 flow_num, dma_addr_t *desc_dma)
1378{
1379 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num];
1380
1381 return k3_ringacc_ring_pop(flow->ringrx, desc_dma);
1382}
1383EXPORT_SYMBOL_GPL(k3_udma_glue_pop_rx_chn);
1384
1385int k3_udma_glue_rx_get_irq(struct k3_udma_glue_rx_channel *rx_chn,
1386 u32 flow_num)
1387{
1388 struct k3_udma_glue_rx_flow *flow;
1389
1390 flow = &rx_chn->flows[flow_num];
1391
1392 if (xudma_is_pktdma(rx_chn->common.udmax)) {
1393 flow->virq = xudma_pktdma_rflow_get_irq(rx_chn->common.udmax,
1394 flow->udma_rflow_id);
1395 } else {
1396 flow->virq = k3_ringacc_get_ring_irq_num(flow->ringrx);
1397 }
1398
1399 return flow->virq;
1400}
1401EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_irq);
1402
1403struct device *
1404 k3_udma_glue_rx_get_dma_device(struct k3_udma_glue_rx_channel *rx_chn)
1405{
1406 if (xudma_is_pktdma(rx_chn->common.udmax) &&
1407 (rx_chn->common.atype_asel == 14 || rx_chn->common.atype_asel == 15))
1408 return &rx_chn->common.chan_dev;
1409
1410 return xudma_get_device(rx_chn->common.udmax);
1411}
1412EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_dma_device);
1413
1414void k3_udma_glue_rx_dma_to_cppi5_addr(struct k3_udma_glue_rx_channel *rx_chn,
1415 dma_addr_t *addr)
1416{
1417 if (!xudma_is_pktdma(rx_chn->common.udmax) ||
1418 !rx_chn->common.atype_asel)
1419 return;
1420
1421 *addr |= (u64)rx_chn->common.atype_asel << K3_ADDRESS_ASEL_SHIFT;
1422}
1423EXPORT_SYMBOL_GPL(k3_udma_glue_rx_dma_to_cppi5_addr);
1424
1425void k3_udma_glue_rx_cppi5_to_dma_addr(struct k3_udma_glue_rx_channel *rx_chn,
1426 dma_addr_t *addr)
1427{
1428 if (!xudma_is_pktdma(rx_chn->common.udmax) ||
1429 !rx_chn->common.atype_asel)
1430 return;
1431
1432 *addr &= (u64)GENMASK(K3_ADDRESS_ASEL_SHIFT - 1, 0);
1433}
1434EXPORT_SYMBOL_GPL(k3_udma_glue_rx_cppi5_to_dma_addr);
1435
1436static int __init k3_udma_glue_class_init(void)
1437{
1438 return class_register(&k3_udma_glue_devclass);
1439}
1440
1441module_init(k3_udma_glue_class_init);
1442MODULE_LICENSE("GPL v2");