Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright (C) 2014 Texas Instruments Incorporated
3 * Authors: Santosh Shilimkar <santosh.shilimkar@ti.com>
4 * Sandeep Nair <sandeep_n@ti.com>
5 * Cyril Chemparathy <cyril@ti.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation version 2.
10 *
11 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
12 * kind, whether express or implied; without even the implied warranty
13 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#include <linux/io.h>
18#include <linux/sched.h>
19#include <linux/module.h>
20#include <linux/dma-direction.h>
21#include <linux/interrupt.h>
22#include <linux/pm_runtime.h>
23#include <linux/of_dma.h>
24#include <linux/of_address.h>
25#include <linux/platform_device.h>
26#include <linux/soc/ti/knav_dma.h>
27#include <linux/debugfs.h>
28#include <linux/seq_file.h>
29
30#define REG_MASK 0xffffffff
31
32#define DMA_LOOPBACK BIT(31)
33#define DMA_ENABLE BIT(31)
34#define DMA_TEARDOWN BIT(30)
35
36#define DMA_TX_FILT_PSWORDS BIT(29)
37#define DMA_TX_FILT_EINFO BIT(30)
38#define DMA_TX_PRIO_SHIFT 0
39#define DMA_RX_PRIO_SHIFT 16
40#define DMA_PRIO_MASK GENMASK(3, 0)
41#define DMA_PRIO_DEFAULT 0
42#define DMA_RX_TIMEOUT_DEFAULT 17500 /* cycles */
43#define DMA_RX_TIMEOUT_MASK GENMASK(16, 0)
44#define DMA_RX_TIMEOUT_SHIFT 0
45
46#define CHAN_HAS_EPIB BIT(30)
47#define CHAN_HAS_PSINFO BIT(29)
48#define CHAN_ERR_RETRY BIT(28)
49#define CHAN_PSINFO_AT_SOP BIT(25)
50#define CHAN_SOP_OFF_SHIFT 16
51#define CHAN_SOP_OFF_MASK GENMASK(9, 0)
52#define DESC_TYPE_SHIFT 26
53#define DESC_TYPE_MASK GENMASK(2, 0)
54
55/*
56 * QMGR & QNUM together make up 14 bits with QMGR as the 2 MSb's in the logical
57 * navigator cloud mapping scheme.
58 * using the 14bit physical queue numbers directly maps into this scheme.
59 */
60#define CHAN_QNUM_MASK GENMASK(14, 0)
61#define DMA_MAX_QMS 4
62#define DMA_TIMEOUT 1 /* msecs */
63#define DMA_INVALID_ID 0xffff
64
65struct reg_global {
66 u32 revision;
67 u32 perf_control;
68 u32 emulation_control;
69 u32 priority_control;
70 u32 qm_base_address[DMA_MAX_QMS];
71};
72
73struct reg_chan {
74 u32 control;
75 u32 mode;
76 u32 __rsvd[6];
77};
78
79struct reg_tx_sched {
80 u32 prio;
81};
82
83struct reg_rx_flow {
84 u32 control;
85 u32 tags;
86 u32 tag_sel;
87 u32 fdq_sel[2];
88 u32 thresh[3];
89};
90
91struct knav_dma_pool_device {
92 struct device *dev;
93 struct list_head list;
94};
95
96struct knav_dma_device {
97 bool loopback, enable_all;
98 unsigned tx_priority, rx_priority, rx_timeout;
99 unsigned logical_queue_managers;
100 unsigned qm_base_address[DMA_MAX_QMS];
101 struct reg_global __iomem *reg_global;
102 struct reg_chan __iomem *reg_tx_chan;
103 struct reg_rx_flow __iomem *reg_rx_flow;
104 struct reg_chan __iomem *reg_rx_chan;
105 struct reg_tx_sched __iomem *reg_tx_sched;
106 unsigned max_rx_chan, max_tx_chan;
107 unsigned max_rx_flow;
108 char name[32];
109 atomic_t ref_count;
110 struct list_head list;
111 struct list_head chan_list;
112 spinlock_t lock;
113};
114
115struct knav_dma_chan {
116 enum dma_transfer_direction direction;
117 struct knav_dma_device *dma;
118 atomic_t ref_count;
119
120 /* registers */
121 struct reg_chan __iomem *reg_chan;
122 struct reg_tx_sched __iomem *reg_tx_sched;
123 struct reg_rx_flow __iomem *reg_rx_flow;
124
125 /* configuration stuff */
126 unsigned channel, flow;
127 struct knav_dma_cfg cfg;
128 struct list_head list;
129 spinlock_t lock;
130};
131
132#define chan_number(ch) ((ch->direction == DMA_MEM_TO_DEV) ? \
133 ch->channel : ch->flow)
134
135static struct knav_dma_pool_device *kdev;
136
137static bool device_ready;
138bool knav_dma_device_ready(void)
139{
140 return device_ready;
141}
142EXPORT_SYMBOL_GPL(knav_dma_device_ready);
143
144static bool check_config(struct knav_dma_chan *chan, struct knav_dma_cfg *cfg)
145{
146 if (!memcmp(&chan->cfg, cfg, sizeof(*cfg)))
147 return true;
148 else
149 return false;
150}
151
152static int chan_start(struct knav_dma_chan *chan,
153 struct knav_dma_cfg *cfg)
154{
155 u32 v = 0;
156
157 spin_lock(&chan->lock);
158 if ((chan->direction == DMA_MEM_TO_DEV) && chan->reg_chan) {
159 if (cfg->u.tx.filt_pswords)
160 v |= DMA_TX_FILT_PSWORDS;
161 if (cfg->u.tx.filt_einfo)
162 v |= DMA_TX_FILT_EINFO;
163 writel_relaxed(v, &chan->reg_chan->mode);
164 writel_relaxed(DMA_ENABLE, &chan->reg_chan->control);
165 }
166
167 if (chan->reg_tx_sched)
168 writel_relaxed(cfg->u.tx.priority, &chan->reg_tx_sched->prio);
169
170 if (chan->reg_rx_flow) {
171 v = 0;
172
173 if (cfg->u.rx.einfo_present)
174 v |= CHAN_HAS_EPIB;
175 if (cfg->u.rx.psinfo_present)
176 v |= CHAN_HAS_PSINFO;
177 if (cfg->u.rx.err_mode == DMA_RETRY)
178 v |= CHAN_ERR_RETRY;
179 v |= (cfg->u.rx.desc_type & DESC_TYPE_MASK) << DESC_TYPE_SHIFT;
180 if (cfg->u.rx.psinfo_at_sop)
181 v |= CHAN_PSINFO_AT_SOP;
182 v |= (cfg->u.rx.sop_offset & CHAN_SOP_OFF_MASK)
183 << CHAN_SOP_OFF_SHIFT;
184 v |= cfg->u.rx.dst_q & CHAN_QNUM_MASK;
185
186 writel_relaxed(v, &chan->reg_rx_flow->control);
187 writel_relaxed(0, &chan->reg_rx_flow->tags);
188 writel_relaxed(0, &chan->reg_rx_flow->tag_sel);
189
190 v = cfg->u.rx.fdq[0] << 16;
191 v |= cfg->u.rx.fdq[1] & CHAN_QNUM_MASK;
192 writel_relaxed(v, &chan->reg_rx_flow->fdq_sel[0]);
193
194 v = cfg->u.rx.fdq[2] << 16;
195 v |= cfg->u.rx.fdq[3] & CHAN_QNUM_MASK;
196 writel_relaxed(v, &chan->reg_rx_flow->fdq_sel[1]);
197
198 writel_relaxed(0, &chan->reg_rx_flow->thresh[0]);
199 writel_relaxed(0, &chan->reg_rx_flow->thresh[1]);
200 writel_relaxed(0, &chan->reg_rx_flow->thresh[2]);
201 }
202
203 /* Keep a copy of the cfg */
204 memcpy(&chan->cfg, cfg, sizeof(*cfg));
205 spin_unlock(&chan->lock);
206
207 return 0;
208}
209
210static int chan_teardown(struct knav_dma_chan *chan)
211{
212 unsigned long end, value;
213
214 if (!chan->reg_chan)
215 return 0;
216
217 /* indicate teardown */
218 writel_relaxed(DMA_TEARDOWN, &chan->reg_chan->control);
219
220 /* wait for the dma to shut itself down */
221 end = jiffies + msecs_to_jiffies(DMA_TIMEOUT);
222 do {
223 value = readl_relaxed(&chan->reg_chan->control);
224 if ((value & DMA_ENABLE) == 0)
225 break;
226 } while (time_after(end, jiffies));
227
228 if (readl_relaxed(&chan->reg_chan->control) & DMA_ENABLE) {
229 dev_err(kdev->dev, "timeout waiting for teardown\n");
230 return -ETIMEDOUT;
231 }
232
233 return 0;
234}
235
236static void chan_stop(struct knav_dma_chan *chan)
237{
238 spin_lock(&chan->lock);
239 if (chan->reg_rx_flow) {
240 /* first detach fdqs, starve out the flow */
241 writel_relaxed(0, &chan->reg_rx_flow->fdq_sel[0]);
242 writel_relaxed(0, &chan->reg_rx_flow->fdq_sel[1]);
243 writel_relaxed(0, &chan->reg_rx_flow->thresh[0]);
244 writel_relaxed(0, &chan->reg_rx_flow->thresh[1]);
245 writel_relaxed(0, &chan->reg_rx_flow->thresh[2]);
246 }
247
248 /* teardown the dma channel */
249 chan_teardown(chan);
250
251 /* then disconnect the completion side */
252 if (chan->reg_rx_flow) {
253 writel_relaxed(0, &chan->reg_rx_flow->control);
254 writel_relaxed(0, &chan->reg_rx_flow->tags);
255 writel_relaxed(0, &chan->reg_rx_flow->tag_sel);
256 }
257
258 memset(&chan->cfg, 0, sizeof(struct knav_dma_cfg));
259 spin_unlock(&chan->lock);
260
261 dev_dbg(kdev->dev, "channel stopped\n");
262}
263
264static void dma_hw_enable_all(struct knav_dma_device *dma)
265{
266 int i;
267
268 for (i = 0; i < dma->max_tx_chan; i++) {
269 writel_relaxed(0, &dma->reg_tx_chan[i].mode);
270 writel_relaxed(DMA_ENABLE, &dma->reg_tx_chan[i].control);
271 }
272}
273
274
275static void knav_dma_hw_init(struct knav_dma_device *dma)
276{
277 unsigned v;
278 int i;
279
280 spin_lock(&dma->lock);
281 v = dma->loopback ? DMA_LOOPBACK : 0;
282 writel_relaxed(v, &dma->reg_global->emulation_control);
283
284 v = readl_relaxed(&dma->reg_global->perf_control);
285 v |= ((dma->rx_timeout & DMA_RX_TIMEOUT_MASK) << DMA_RX_TIMEOUT_SHIFT);
286 writel_relaxed(v, &dma->reg_global->perf_control);
287
288 v = ((dma->tx_priority << DMA_TX_PRIO_SHIFT) |
289 (dma->rx_priority << DMA_RX_PRIO_SHIFT));
290
291 writel_relaxed(v, &dma->reg_global->priority_control);
292
293 /* Always enable all Rx channels. Rx paths are managed using flows */
294 for (i = 0; i < dma->max_rx_chan; i++)
295 writel_relaxed(DMA_ENABLE, &dma->reg_rx_chan[i].control);
296
297 for (i = 0; i < dma->logical_queue_managers; i++)
298 writel_relaxed(dma->qm_base_address[i],
299 &dma->reg_global->qm_base_address[i]);
300 spin_unlock(&dma->lock);
301}
302
303static void knav_dma_hw_destroy(struct knav_dma_device *dma)
304{
305 int i;
306 unsigned v;
307
308 spin_lock(&dma->lock);
309 v = ~DMA_ENABLE & REG_MASK;
310
311 for (i = 0; i < dma->max_rx_chan; i++)
312 writel_relaxed(v, &dma->reg_rx_chan[i].control);
313
314 for (i = 0; i < dma->max_tx_chan; i++)
315 writel_relaxed(v, &dma->reg_tx_chan[i].control);
316 spin_unlock(&dma->lock);
317}
318
319static void dma_debug_show_channels(struct seq_file *s,
320 struct knav_dma_chan *chan)
321{
322 int i;
323
324 seq_printf(s, "\t%s %d:\t",
325 ((chan->direction == DMA_MEM_TO_DEV) ? "tx chan" : "rx flow"),
326 chan_number(chan));
327
328 if (chan->direction == DMA_MEM_TO_DEV) {
329 seq_printf(s, "einfo - %d, pswords - %d, priority - %d\n",
330 chan->cfg.u.tx.filt_einfo,
331 chan->cfg.u.tx.filt_pswords,
332 chan->cfg.u.tx.priority);
333 } else {
334 seq_printf(s, "einfo - %d, psinfo - %d, desc_type - %d\n",
335 chan->cfg.u.rx.einfo_present,
336 chan->cfg.u.rx.psinfo_present,
337 chan->cfg.u.rx.desc_type);
338 seq_printf(s, "\t\t\tdst_q: [%d], thresh: %d fdq: ",
339 chan->cfg.u.rx.dst_q,
340 chan->cfg.u.rx.thresh);
341 for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN; i++)
342 seq_printf(s, "[%d]", chan->cfg.u.rx.fdq[i]);
343 seq_printf(s, "\n");
344 }
345}
346
347static void dma_debug_show_devices(struct seq_file *s,
348 struct knav_dma_device *dma)
349{
350 struct knav_dma_chan *chan;
351
352 list_for_each_entry(chan, &dma->chan_list, list) {
353 if (atomic_read(&chan->ref_count))
354 dma_debug_show_channels(s, chan);
355 }
356}
357
358static int knav_dma_debug_show(struct seq_file *s, void *v)
359{
360 struct knav_dma_device *dma;
361
362 list_for_each_entry(dma, &kdev->list, list) {
363 if (atomic_read(&dma->ref_count)) {
364 seq_printf(s, "%s : max_tx_chan: (%d), max_rx_flows: (%d)\n",
365 dma->name, dma->max_tx_chan, dma->max_rx_flow);
366 dma_debug_show_devices(s, dma);
367 }
368 }
369
370 return 0;
371}
372
373DEFINE_SHOW_ATTRIBUTE(knav_dma_debug);
374
375static int of_channel_match_helper(struct device_node *np, const char *name,
376 const char **dma_instance)
377{
378 struct of_phandle_args args;
379 struct device_node *dma_node;
380 int index;
381
382 dma_node = of_parse_phandle(np, "ti,navigator-dmas", 0);
383 if (!dma_node)
384 return -ENODEV;
385
386 *dma_instance = dma_node->name;
387 index = of_property_match_string(np, "ti,navigator-dma-names", name);
388 if (index < 0) {
389 dev_err(kdev->dev, "No 'ti,navigator-dma-names' property\n");
390 return -ENODEV;
391 }
392
393 if (of_parse_phandle_with_fixed_args(np, "ti,navigator-dmas",
394 1, index, &args)) {
395 dev_err(kdev->dev, "Missing the phandle args name %s\n", name);
396 return -ENODEV;
397 }
398
399 if (args.args[0] < 0) {
400 dev_err(kdev->dev, "Missing args for %s\n", name);
401 return -ENODEV;
402 }
403
404 return args.args[0];
405}
406
407/**
408 * knav_dma_open_channel() - try to setup an exclusive slave channel
409 * @dev: pointer to client device structure
410 * @name: slave channel name
411 * @config: dma configuration parameters
412 *
413 * Returns pointer to appropriate DMA channel on success or error.
414 */
415void *knav_dma_open_channel(struct device *dev, const char *name,
416 struct knav_dma_cfg *config)
417{
418 struct knav_dma_device *dma = NULL, *iter1;
419 struct knav_dma_chan *chan = NULL, *iter2;
420 int chan_num = -1;
421 const char *instance;
422
423 if (!kdev) {
424 pr_err("keystone-navigator-dma driver not registered\n");
425 return (void *)-EINVAL;
426 }
427
428 chan_num = of_channel_match_helper(dev->of_node, name, &instance);
429 if (chan_num < 0) {
430 dev_err(kdev->dev, "No DMA instance with name %s\n", name);
431 return (void *)-EINVAL;
432 }
433
434 dev_dbg(kdev->dev, "initializing %s channel %d from DMA %s\n",
435 config->direction == DMA_MEM_TO_DEV ? "transmit" :
436 config->direction == DMA_DEV_TO_MEM ? "receive" :
437 "unknown", chan_num, instance);
438
439 if (config->direction != DMA_MEM_TO_DEV &&
440 config->direction != DMA_DEV_TO_MEM) {
441 dev_err(kdev->dev, "bad direction\n");
442 return (void *)-EINVAL;
443 }
444
445 /* Look for correct dma instance */
446 list_for_each_entry(iter1, &kdev->list, list) {
447 if (!strcmp(iter1->name, instance)) {
448 dma = iter1;
449 break;
450 }
451 }
452 if (!dma) {
453 dev_err(kdev->dev, "No DMA instance with name %s\n", instance);
454 return (void *)-EINVAL;
455 }
456
457 /* Look for correct dma channel from dma instance */
458 list_for_each_entry(iter2, &dma->chan_list, list) {
459 if (config->direction == DMA_MEM_TO_DEV) {
460 if (iter2->channel == chan_num) {
461 chan = iter2;
462 break;
463 }
464 } else {
465 if (iter2->flow == chan_num) {
466 chan = iter2;
467 break;
468 }
469 }
470 }
471 if (!chan) {
472 dev_err(kdev->dev, "channel %d is not in DMA %s\n",
473 chan_num, instance);
474 return (void *)-EINVAL;
475 }
476
477 if (atomic_read(&chan->ref_count) >= 1) {
478 if (!check_config(chan, config)) {
479 dev_err(kdev->dev, "channel %d config miss-match\n",
480 chan_num);
481 return (void *)-EINVAL;
482 }
483 }
484
485 if (atomic_inc_return(&chan->dma->ref_count) <= 1)
486 knav_dma_hw_init(chan->dma);
487
488 if (atomic_inc_return(&chan->ref_count) <= 1)
489 chan_start(chan, config);
490
491 dev_dbg(kdev->dev, "channel %d opened from DMA %s\n",
492 chan_num, instance);
493
494 return chan;
495}
496EXPORT_SYMBOL_GPL(knav_dma_open_channel);
497
498/**
499 * knav_dma_close_channel() - Destroy a dma channel
500 *
501 * @channel: dma channel handle
502 *
503 */
504void knav_dma_close_channel(void *channel)
505{
506 struct knav_dma_chan *chan = channel;
507
508 if (!kdev) {
509 pr_err("keystone-navigator-dma driver not registered\n");
510 return;
511 }
512
513 if (atomic_dec_return(&chan->ref_count) <= 0)
514 chan_stop(chan);
515
516 if (atomic_dec_return(&chan->dma->ref_count) <= 0)
517 knav_dma_hw_destroy(chan->dma);
518
519 dev_dbg(kdev->dev, "channel %d or flow %d closed from DMA %s\n",
520 chan->channel, chan->flow, chan->dma->name);
521}
522EXPORT_SYMBOL_GPL(knav_dma_close_channel);
523
524static void __iomem *pktdma_get_regs(struct knav_dma_device *dma,
525 struct device_node *node,
526 unsigned index, resource_size_t *_size)
527{
528 struct device *dev = kdev->dev;
529 struct resource res;
530 void __iomem *regs;
531 int ret;
532
533 ret = of_address_to_resource(node, index, &res);
534 if (ret) {
535 dev_err(dev, "Can't translate of node(%pOFn) address for index(%d)\n",
536 node, index);
537 return ERR_PTR(ret);
538 }
539
540 regs = devm_ioremap_resource(kdev->dev, &res);
541 if (IS_ERR(regs))
542 dev_err(dev, "Failed to map register base for index(%d) node(%pOFn)\n",
543 index, node);
544 if (_size)
545 *_size = resource_size(&res);
546
547 return regs;
548}
549
550static int pktdma_init_rx_chan(struct knav_dma_chan *chan, u32 flow)
551{
552 struct knav_dma_device *dma = chan->dma;
553
554 chan->flow = flow;
555 chan->reg_rx_flow = dma->reg_rx_flow + flow;
556 chan->channel = DMA_INVALID_ID;
557 dev_dbg(kdev->dev, "rx flow(%d) (%p)\n", chan->flow, chan->reg_rx_flow);
558
559 return 0;
560}
561
562static int pktdma_init_tx_chan(struct knav_dma_chan *chan, u32 channel)
563{
564 struct knav_dma_device *dma = chan->dma;
565
566 chan->channel = channel;
567 chan->reg_chan = dma->reg_tx_chan + channel;
568 chan->reg_tx_sched = dma->reg_tx_sched + channel;
569 chan->flow = DMA_INVALID_ID;
570 dev_dbg(kdev->dev, "tx channel(%d) (%p)\n", chan->channel, chan->reg_chan);
571
572 return 0;
573}
574
575static int pktdma_init_chan(struct knav_dma_device *dma,
576 enum dma_transfer_direction dir,
577 unsigned chan_num)
578{
579 struct device *dev = kdev->dev;
580 struct knav_dma_chan *chan;
581 int ret = -EINVAL;
582
583 chan = devm_kzalloc(dev, sizeof(*chan), GFP_KERNEL);
584 if (!chan)
585 return -ENOMEM;
586
587 INIT_LIST_HEAD(&chan->list);
588 chan->dma = dma;
589 chan->direction = DMA_TRANS_NONE;
590 atomic_set(&chan->ref_count, 0);
591 spin_lock_init(&chan->lock);
592
593 if (dir == DMA_MEM_TO_DEV) {
594 chan->direction = dir;
595 ret = pktdma_init_tx_chan(chan, chan_num);
596 } else if (dir == DMA_DEV_TO_MEM) {
597 chan->direction = dir;
598 ret = pktdma_init_rx_chan(chan, chan_num);
599 } else {
600 dev_err(dev, "channel(%d) direction unknown\n", chan_num);
601 }
602
603 list_add_tail(&chan->list, &dma->chan_list);
604
605 return ret;
606}
607
608static int dma_init(struct device_node *cloud, struct device_node *dma_node)
609{
610 unsigned max_tx_chan, max_rx_chan, max_rx_flow, max_tx_sched;
611 struct device_node *node = dma_node;
612 struct knav_dma_device *dma;
613 int ret, len, num_chan = 0;
614 resource_size_t size;
615 u32 timeout;
616 u32 i;
617
618 dma = devm_kzalloc(kdev->dev, sizeof(*dma), GFP_KERNEL);
619 if (!dma) {
620 dev_err(kdev->dev, "could not allocate driver mem\n");
621 return -ENOMEM;
622 }
623 INIT_LIST_HEAD(&dma->list);
624 INIT_LIST_HEAD(&dma->chan_list);
625
626 if (!of_find_property(cloud, "ti,navigator-cloud-address", &len)) {
627 dev_err(kdev->dev, "unspecified navigator cloud addresses\n");
628 return -ENODEV;
629 }
630
631 dma->logical_queue_managers = len / sizeof(u32);
632 if (dma->logical_queue_managers > DMA_MAX_QMS) {
633 dev_warn(kdev->dev, "too many queue mgrs(>%d) rest ignored\n",
634 dma->logical_queue_managers);
635 dma->logical_queue_managers = DMA_MAX_QMS;
636 }
637
638 ret = of_property_read_u32_array(cloud, "ti,navigator-cloud-address",
639 dma->qm_base_address,
640 dma->logical_queue_managers);
641 if (ret) {
642 dev_err(kdev->dev, "invalid navigator cloud addresses\n");
643 return -ENODEV;
644 }
645
646 dma->reg_global = pktdma_get_regs(dma, node, 0, &size);
647 if (IS_ERR(dma->reg_global))
648 return PTR_ERR(dma->reg_global);
649 if (size < sizeof(struct reg_global)) {
650 dev_err(kdev->dev, "bad size %pa for global regs\n", &size);
651 return -ENODEV;
652 }
653
654 dma->reg_tx_chan = pktdma_get_regs(dma, node, 1, &size);
655 if (IS_ERR(dma->reg_tx_chan))
656 return PTR_ERR(dma->reg_tx_chan);
657
658 max_tx_chan = size / sizeof(struct reg_chan);
659 dma->reg_rx_chan = pktdma_get_regs(dma, node, 2, &size);
660 if (IS_ERR(dma->reg_rx_chan))
661 return PTR_ERR(dma->reg_rx_chan);
662
663 max_rx_chan = size / sizeof(struct reg_chan);
664 dma->reg_tx_sched = pktdma_get_regs(dma, node, 3, &size);
665 if (IS_ERR(dma->reg_tx_sched))
666 return PTR_ERR(dma->reg_tx_sched);
667
668 max_tx_sched = size / sizeof(struct reg_tx_sched);
669 dma->reg_rx_flow = pktdma_get_regs(dma, node, 4, &size);
670 if (IS_ERR(dma->reg_rx_flow))
671 return PTR_ERR(dma->reg_rx_flow);
672
673 max_rx_flow = size / sizeof(struct reg_rx_flow);
674 dma->rx_priority = DMA_PRIO_DEFAULT;
675 dma->tx_priority = DMA_PRIO_DEFAULT;
676
677 dma->enable_all = (of_get_property(node, "ti,enable-all", NULL) != NULL);
678 dma->loopback = (of_get_property(node, "ti,loop-back", NULL) != NULL);
679
680 ret = of_property_read_u32(node, "ti,rx-retry-timeout", &timeout);
681 if (ret < 0) {
682 dev_dbg(kdev->dev, "unspecified rx timeout using value %d\n",
683 DMA_RX_TIMEOUT_DEFAULT);
684 timeout = DMA_RX_TIMEOUT_DEFAULT;
685 }
686
687 dma->rx_timeout = timeout;
688 dma->max_rx_chan = max_rx_chan;
689 dma->max_rx_flow = max_rx_flow;
690 dma->max_tx_chan = min(max_tx_chan, max_tx_sched);
691 atomic_set(&dma->ref_count, 0);
692 strcpy(dma->name, node->name);
693 spin_lock_init(&dma->lock);
694
695 for (i = 0; i < dma->max_tx_chan; i++) {
696 if (pktdma_init_chan(dma, DMA_MEM_TO_DEV, i) >= 0)
697 num_chan++;
698 }
699
700 for (i = 0; i < dma->max_rx_flow; i++) {
701 if (pktdma_init_chan(dma, DMA_DEV_TO_MEM, i) >= 0)
702 num_chan++;
703 }
704
705 list_add_tail(&dma->list, &kdev->list);
706
707 /*
708 * For DSP software usecases or userpace transport software, setup all
709 * the DMA hardware resources.
710 */
711 if (dma->enable_all) {
712 atomic_inc(&dma->ref_count);
713 knav_dma_hw_init(dma);
714 dma_hw_enable_all(dma);
715 }
716
717 dev_info(kdev->dev, "DMA %s registered %d logical channels, flows %d, tx chans: %d, rx chans: %d%s\n",
718 dma->name, num_chan, dma->max_rx_flow,
719 dma->max_tx_chan, dma->max_rx_chan,
720 dma->loopback ? ", loopback" : "");
721
722 return 0;
723}
724
725static int knav_dma_probe(struct platform_device *pdev)
726{
727 struct device *dev = &pdev->dev;
728 struct device_node *node = pdev->dev.of_node;
729 struct device_node *child;
730 int ret = 0;
731
732 if (!node) {
733 dev_err(&pdev->dev, "could not find device info\n");
734 return -EINVAL;
735 }
736
737 kdev = devm_kzalloc(dev,
738 sizeof(struct knav_dma_pool_device), GFP_KERNEL);
739 if (!kdev) {
740 dev_err(dev, "could not allocate driver mem\n");
741 return -ENOMEM;
742 }
743
744 kdev->dev = dev;
745 INIT_LIST_HEAD(&kdev->list);
746
747 pm_runtime_enable(kdev->dev);
748 ret = pm_runtime_resume_and_get(kdev->dev);
749 if (ret < 0) {
750 dev_err(kdev->dev, "unable to enable pktdma, err %d\n", ret);
751 goto err_pm_disable;
752 }
753
754 /* Initialise all packet dmas */
755 for_each_child_of_node(node, child) {
756 ret = dma_init(node, child);
757 if (ret) {
758 of_node_put(child);
759 dev_err(&pdev->dev, "init failed with %d\n", ret);
760 break;
761 }
762 }
763
764 if (list_empty(&kdev->list)) {
765 dev_err(dev, "no valid dma instance\n");
766 ret = -ENODEV;
767 goto err_put_sync;
768 }
769
770 debugfs_create_file("knav_dma", S_IFREG | S_IRUGO, NULL, NULL,
771 &knav_dma_debug_fops);
772
773 device_ready = true;
774 return ret;
775
776err_put_sync:
777 pm_runtime_put_sync(kdev->dev);
778err_pm_disable:
779 pm_runtime_disable(kdev->dev);
780
781 return ret;
782}
783
784static int knav_dma_remove(struct platform_device *pdev)
785{
786 struct knav_dma_device *dma;
787
788 list_for_each_entry(dma, &kdev->list, list) {
789 if (atomic_dec_return(&dma->ref_count) == 0)
790 knav_dma_hw_destroy(dma);
791 }
792
793 pm_runtime_put_sync(&pdev->dev);
794 pm_runtime_disable(&pdev->dev);
795
796 return 0;
797}
798
799static struct of_device_id of_match[] = {
800 { .compatible = "ti,keystone-navigator-dma", },
801 {},
802};
803
804MODULE_DEVICE_TABLE(of, of_match);
805
806static struct platform_driver knav_dma_driver = {
807 .probe = knav_dma_probe,
808 .remove = knav_dma_remove,
809 .driver = {
810 .name = "keystone-navigator-dma",
811 .of_match_table = of_match,
812 },
813};
814module_platform_driver(knav_dma_driver);
815
816MODULE_LICENSE("GPL v2");
817MODULE_DESCRIPTION("TI Keystone Navigator Packet DMA driver");
818MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com>");
819MODULE_AUTHOR("Santosh Shilimkar <santosh.shilimkar@ti.com>");