Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
4 */
5
6#include <linux/clk.h>
7#include <linux/delay.h>
8#include <linux/device.h>
9#include <linux/dmaengine.h>
10#include <linux/dma-mapping.h>
11#include <linux/dma/qcom_adm.h>
12#include <linux/init.h>
13#include <linux/interrupt.h>
14#include <linux/io.h>
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/of.h>
18#include <linux/of_address.h>
19#include <linux/of_irq.h>
20#include <linux/of_dma.h>
21#include <linux/platform_device.h>
22#include <linux/reset.h>
23#include <linux/scatterlist.h>
24#include <linux/slab.h>
25
26#include "../dmaengine.h"
27#include "../virt-dma.h"
28
29/* ADM registers - calculated from channel number and security domain */
30#define ADM_CHAN_MULTI 0x4
31#define ADM_CI_MULTI 0x4
32#define ADM_CRCI_MULTI 0x4
33#define ADM_EE_MULTI 0x800
34#define ADM_CHAN_OFFS(chan) (ADM_CHAN_MULTI * (chan))
35#define ADM_EE_OFFS(ee) (ADM_EE_MULTI * (ee))
36#define ADM_CHAN_EE_OFFS(chan, ee) (ADM_CHAN_OFFS(chan) + ADM_EE_OFFS(ee))
37#define ADM_CHAN_OFFS(chan) (ADM_CHAN_MULTI * (chan))
38#define ADM_CI_OFFS(ci) (ADM_CHAN_OFF(ci))
39#define ADM_CH_CMD_PTR(chan, ee) (ADM_CHAN_EE_OFFS(chan, ee))
40#define ADM_CH_RSLT(chan, ee) (0x40 + ADM_CHAN_EE_OFFS(chan, ee))
41#define ADM_CH_FLUSH_STATE0(chan, ee) (0x80 + ADM_CHAN_EE_OFFS(chan, ee))
42#define ADM_CH_STATUS_SD(chan, ee) (0x200 + ADM_CHAN_EE_OFFS(chan, ee))
43#define ADM_CH_CONF(chan) (0x240 + ADM_CHAN_OFFS(chan))
44#define ADM_CH_RSLT_CONF(chan, ee) (0x300 + ADM_CHAN_EE_OFFS(chan, ee))
45#define ADM_SEC_DOMAIN_IRQ_STATUS(ee) (0x380 + ADM_EE_OFFS(ee))
46#define ADM_CI_CONF(ci) (0x390 + (ci) * ADM_CI_MULTI)
47#define ADM_GP_CTL 0x3d8
48#define ADM_CRCI_CTL(crci, ee) (0x400 + (crci) * ADM_CRCI_MULTI + \
49 ADM_EE_OFFS(ee))
50
51/* channel status */
52#define ADM_CH_STATUS_VALID BIT(1)
53
54/* channel result */
55#define ADM_CH_RSLT_VALID BIT(31)
56#define ADM_CH_RSLT_ERR BIT(3)
57#define ADM_CH_RSLT_FLUSH BIT(2)
58#define ADM_CH_RSLT_TPD BIT(1)
59
60/* channel conf */
61#define ADM_CH_CONF_SHADOW_EN BIT(12)
62#define ADM_CH_CONF_MPU_DISABLE BIT(11)
63#define ADM_CH_CONF_PERM_MPU_CONF BIT(9)
64#define ADM_CH_CONF_FORCE_RSLT_EN BIT(7)
65#define ADM_CH_CONF_SEC_DOMAIN(ee) ((((ee) & 0x3) << 4) | (((ee) & 0x4) << 11))
66
67/* channel result conf */
68#define ADM_CH_RSLT_CONF_FLUSH_EN BIT(1)
69#define ADM_CH_RSLT_CONF_IRQ_EN BIT(0)
70
71/* CRCI CTL */
72#define ADM_CRCI_CTL_MUX_SEL BIT(18)
73#define ADM_CRCI_CTL_RST BIT(17)
74
75/* CI configuration */
76#define ADM_CI_RANGE_END(x) ((x) << 24)
77#define ADM_CI_RANGE_START(x) ((x) << 16)
78#define ADM_CI_BURST_4_WORDS BIT(2)
79#define ADM_CI_BURST_8_WORDS BIT(3)
80
81/* GP CTL */
82#define ADM_GP_CTL_LP_EN BIT(12)
83#define ADM_GP_CTL_LP_CNT(x) ((x) << 8)
84
85/* Command pointer list entry */
86#define ADM_CPLE_LP BIT(31)
87#define ADM_CPLE_CMD_PTR_LIST BIT(29)
88
89/* Command list entry */
90#define ADM_CMD_LC BIT(31)
91#define ADM_CMD_DST_CRCI(n) (((n) & 0xf) << 7)
92#define ADM_CMD_SRC_CRCI(n) (((n) & 0xf) << 3)
93
94#define ADM_CMD_TYPE_SINGLE 0x0
95#define ADM_CMD_TYPE_BOX 0x3
96
97#define ADM_CRCI_MUX_SEL BIT(4)
98#define ADM_DESC_ALIGN 8
99#define ADM_MAX_XFER (SZ_64K - 1)
100#define ADM_MAX_ROWS (SZ_64K - 1)
101#define ADM_MAX_CHANNELS 16
102
103struct adm_desc_hw_box {
104 u32 cmd;
105 u32 src_addr;
106 u32 dst_addr;
107 u32 row_len;
108 u32 num_rows;
109 u32 row_offset;
110};
111
112struct adm_desc_hw_single {
113 u32 cmd;
114 u32 src_addr;
115 u32 dst_addr;
116 u32 len;
117};
118
119struct adm_async_desc {
120 struct virt_dma_desc vd;
121 struct adm_device *adev;
122
123 size_t length;
124 enum dma_transfer_direction dir;
125 dma_addr_t dma_addr;
126 size_t dma_len;
127
128 void *cpl;
129 dma_addr_t cp_addr;
130 u32 crci;
131 u32 mux;
132 u32 blk_size;
133};
134
135struct adm_chan {
136 struct virt_dma_chan vc;
137 struct adm_device *adev;
138
139 /* parsed from DT */
140 u32 id; /* channel id */
141
142 struct adm_async_desc *curr_txd;
143 struct dma_slave_config slave;
144 u32 crci;
145 u32 mux;
146 struct list_head node;
147
148 int error;
149 int initialized;
150};
151
152static inline struct adm_chan *to_adm_chan(struct dma_chan *common)
153{
154 return container_of(common, struct adm_chan, vc.chan);
155}
156
157struct adm_device {
158 void __iomem *regs;
159 struct device *dev;
160 struct dma_device common;
161 struct device_dma_parameters dma_parms;
162 struct adm_chan *channels;
163
164 u32 ee;
165
166 struct clk *core_clk;
167 struct clk *iface_clk;
168
169 struct reset_control *clk_reset;
170 struct reset_control *c0_reset;
171 struct reset_control *c1_reset;
172 struct reset_control *c2_reset;
173 int irq;
174};
175
176/**
177 * adm_free_chan - Frees dma resources associated with the specific channel
178 *
179 * @chan: dma channel
180 *
181 * Free all allocated descriptors associated with this channel
182 */
183static void adm_free_chan(struct dma_chan *chan)
184{
185 /* free all queued descriptors */
186 vchan_free_chan_resources(to_virt_chan(chan));
187}
188
189/**
190 * adm_get_blksize - Get block size from burst value
191 *
192 * @burst: Burst size of transaction
193 */
194static int adm_get_blksize(unsigned int burst)
195{
196 int ret;
197
198 switch (burst) {
199 case 16:
200 case 32:
201 case 64:
202 case 128:
203 ret = ffs(burst >> 4) - 1;
204 break;
205 case 192:
206 ret = 4;
207 break;
208 case 256:
209 ret = 5;
210 break;
211 default:
212 ret = -EINVAL;
213 break;
214 }
215
216 return ret;
217}
218
219/**
220 * adm_process_fc_descriptors - Process descriptors for flow controlled xfers
221 *
222 * @achan: ADM channel
223 * @desc: Descriptor memory pointer
224 * @sg: Scatterlist entry
225 * @crci: CRCI value
226 * @burst: Burst size of transaction
227 * @direction: DMA transfer direction
228 */
229static void *adm_process_fc_descriptors(struct adm_chan *achan, void *desc,
230 struct scatterlist *sg, u32 crci,
231 u32 burst,
232 enum dma_transfer_direction direction)
233{
234 struct adm_desc_hw_box *box_desc = NULL;
235 struct adm_desc_hw_single *single_desc;
236 u32 remainder = sg_dma_len(sg);
237 u32 rows, row_offset, crci_cmd;
238 u32 mem_addr = sg_dma_address(sg);
239 u32 *incr_addr = &mem_addr;
240 u32 *src, *dst;
241
242 if (direction == DMA_DEV_TO_MEM) {
243 crci_cmd = ADM_CMD_SRC_CRCI(crci);
244 row_offset = burst;
245 src = &achan->slave.src_addr;
246 dst = &mem_addr;
247 } else {
248 crci_cmd = ADM_CMD_DST_CRCI(crci);
249 row_offset = burst << 16;
250 src = &mem_addr;
251 dst = &achan->slave.dst_addr;
252 }
253
254 while (remainder >= burst) {
255 box_desc = desc;
256 box_desc->cmd = ADM_CMD_TYPE_BOX | crci_cmd;
257 box_desc->row_offset = row_offset;
258 box_desc->src_addr = *src;
259 box_desc->dst_addr = *dst;
260
261 rows = remainder / burst;
262 rows = min_t(u32, rows, ADM_MAX_ROWS);
263 box_desc->num_rows = rows << 16 | rows;
264 box_desc->row_len = burst << 16 | burst;
265
266 *incr_addr += burst * rows;
267 remainder -= burst * rows;
268 desc += sizeof(*box_desc);
269 }
270
271 /* if leftover bytes, do one single descriptor */
272 if (remainder) {
273 single_desc = desc;
274 single_desc->cmd = ADM_CMD_TYPE_SINGLE | crci_cmd;
275 single_desc->len = remainder;
276 single_desc->src_addr = *src;
277 single_desc->dst_addr = *dst;
278 desc += sizeof(*single_desc);
279
280 if (sg_is_last(sg))
281 single_desc->cmd |= ADM_CMD_LC;
282 } else {
283 if (box_desc && sg_is_last(sg))
284 box_desc->cmd |= ADM_CMD_LC;
285 }
286
287 return desc;
288}
289
290/**
291 * adm_process_non_fc_descriptors - Process descriptors for non-fc xfers
292 *
293 * @achan: ADM channel
294 * @desc: Descriptor memory pointer
295 * @sg: Scatterlist entry
296 * @direction: DMA transfer direction
297 */
298static void *adm_process_non_fc_descriptors(struct adm_chan *achan, void *desc,
299 struct scatterlist *sg,
300 enum dma_transfer_direction direction)
301{
302 struct adm_desc_hw_single *single_desc;
303 u32 remainder = sg_dma_len(sg);
304 u32 mem_addr = sg_dma_address(sg);
305 u32 *incr_addr = &mem_addr;
306 u32 *src, *dst;
307
308 if (direction == DMA_DEV_TO_MEM) {
309 src = &achan->slave.src_addr;
310 dst = &mem_addr;
311 } else {
312 src = &mem_addr;
313 dst = &achan->slave.dst_addr;
314 }
315
316 do {
317 single_desc = desc;
318 single_desc->cmd = ADM_CMD_TYPE_SINGLE;
319 single_desc->src_addr = *src;
320 single_desc->dst_addr = *dst;
321 single_desc->len = (remainder > ADM_MAX_XFER) ?
322 ADM_MAX_XFER : remainder;
323
324 remainder -= single_desc->len;
325 *incr_addr += single_desc->len;
326 desc += sizeof(*single_desc);
327 } while (remainder);
328
329 /* set last command if this is the end of the whole transaction */
330 if (sg_is_last(sg))
331 single_desc->cmd |= ADM_CMD_LC;
332
333 return desc;
334}
335
336/**
337 * adm_prep_slave_sg - Prep slave sg transaction
338 *
339 * @chan: dma channel
340 * @sgl: scatter gather list
341 * @sg_len: length of sg
342 * @direction: DMA transfer direction
343 * @flags: DMA flags
344 * @context: transfer context (unused)
345 */
346static struct dma_async_tx_descriptor *adm_prep_slave_sg(struct dma_chan *chan,
347 struct scatterlist *sgl,
348 unsigned int sg_len,
349 enum dma_transfer_direction direction,
350 unsigned long flags,
351 void *context)
352{
353 struct adm_chan *achan = to_adm_chan(chan);
354 struct adm_device *adev = achan->adev;
355 struct adm_async_desc *async_desc;
356 struct scatterlist *sg;
357 dma_addr_t cple_addr;
358 u32 i, burst;
359 u32 single_count = 0, box_count = 0, crci = 0;
360 void *desc;
361 u32 *cple;
362 int blk_size = 0;
363
364 if (!is_slave_direction(direction)) {
365 dev_err(adev->dev, "invalid dma direction\n");
366 return NULL;
367 }
368
369 /*
370 * get burst value from slave configuration
371 */
372 burst = (direction == DMA_MEM_TO_DEV) ?
373 achan->slave.dst_maxburst :
374 achan->slave.src_maxburst;
375
376 /* if using flow control, validate burst and crci values */
377 if (achan->slave.device_fc) {
378 blk_size = adm_get_blksize(burst);
379 if (blk_size < 0) {
380 dev_err(adev->dev, "invalid burst value: %d\n",
381 burst);
382 return ERR_PTR(-EINVAL);
383 }
384
385 crci = achan->crci & 0xf;
386 if (!crci || achan->crci > 0x1f) {
387 dev_err(adev->dev, "invalid crci value\n");
388 return ERR_PTR(-EINVAL);
389 }
390 }
391
392 /* iterate through sgs and compute allocation size of structures */
393 for_each_sg(sgl, sg, sg_len, i) {
394 if (achan->slave.device_fc) {
395 box_count += DIV_ROUND_UP(sg_dma_len(sg) / burst,
396 ADM_MAX_ROWS);
397 if (sg_dma_len(sg) % burst)
398 single_count++;
399 } else {
400 single_count += DIV_ROUND_UP(sg_dma_len(sg),
401 ADM_MAX_XFER);
402 }
403 }
404
405 async_desc = kzalloc(sizeof(*async_desc), GFP_NOWAIT);
406 if (!async_desc)
407 return ERR_PTR(-ENOMEM);
408
409 async_desc->mux = achan->mux ? ADM_CRCI_CTL_MUX_SEL : 0;
410 async_desc->crci = crci;
411 async_desc->blk_size = blk_size;
412 async_desc->dma_len = single_count * sizeof(struct adm_desc_hw_single) +
413 box_count * sizeof(struct adm_desc_hw_box) +
414 sizeof(*cple) + 2 * ADM_DESC_ALIGN;
415
416 async_desc->cpl = kzalloc(async_desc->dma_len, GFP_NOWAIT);
417 if (!async_desc->cpl)
418 goto free;
419
420 async_desc->adev = adev;
421
422 /* both command list entry and descriptors must be 8 byte aligned */
423 cple = PTR_ALIGN(async_desc->cpl, ADM_DESC_ALIGN);
424 desc = PTR_ALIGN(cple + 1, ADM_DESC_ALIGN);
425
426 for_each_sg(sgl, sg, sg_len, i) {
427 async_desc->length += sg_dma_len(sg);
428
429 if (achan->slave.device_fc)
430 desc = adm_process_fc_descriptors(achan, desc, sg, crci,
431 burst, direction);
432 else
433 desc = adm_process_non_fc_descriptors(achan, desc, sg,
434 direction);
435 }
436
437 async_desc->dma_addr = dma_map_single(adev->dev, async_desc->cpl,
438 async_desc->dma_len,
439 DMA_TO_DEVICE);
440 if (dma_mapping_error(adev->dev, async_desc->dma_addr))
441 goto free;
442
443 cple_addr = async_desc->dma_addr + ((void *)cple - async_desc->cpl);
444
445 /* init cmd list */
446 dma_sync_single_for_cpu(adev->dev, cple_addr, sizeof(*cple),
447 DMA_TO_DEVICE);
448 *cple = ADM_CPLE_LP;
449 *cple |= (async_desc->dma_addr + ADM_DESC_ALIGN) >> 3;
450 dma_sync_single_for_device(adev->dev, cple_addr, sizeof(*cple),
451 DMA_TO_DEVICE);
452
453 return vchan_tx_prep(&achan->vc, &async_desc->vd, flags);
454
455free:
456 kfree(async_desc);
457 return ERR_PTR(-ENOMEM);
458}
459
460/**
461 * adm_terminate_all - terminate all transactions on a channel
462 * @chan: dma channel
463 *
464 * Dequeues and frees all transactions, aborts current transaction
465 * No callbacks are done
466 *
467 */
468static int adm_terminate_all(struct dma_chan *chan)
469{
470 struct adm_chan *achan = to_adm_chan(chan);
471 struct adm_device *adev = achan->adev;
472 unsigned long flags;
473 LIST_HEAD(head);
474
475 spin_lock_irqsave(&achan->vc.lock, flags);
476 vchan_get_all_descriptors(&achan->vc, &head);
477
478 /* send flush command to terminate current transaction */
479 writel_relaxed(0x0,
480 adev->regs + ADM_CH_FLUSH_STATE0(achan->id, adev->ee));
481
482 spin_unlock_irqrestore(&achan->vc.lock, flags);
483
484 vchan_dma_desc_free_list(&achan->vc, &head);
485
486 return 0;
487}
488
489static int adm_slave_config(struct dma_chan *chan, struct dma_slave_config *cfg)
490{
491 struct adm_chan *achan = to_adm_chan(chan);
492 struct qcom_adm_peripheral_config *config = cfg->peripheral_config;
493 unsigned long flag;
494
495 spin_lock_irqsave(&achan->vc.lock, flag);
496 memcpy(&achan->slave, cfg, sizeof(struct dma_slave_config));
497 if (cfg->peripheral_size == sizeof(config))
498 achan->crci = config->crci;
499 spin_unlock_irqrestore(&achan->vc.lock, flag);
500
501 return 0;
502}
503
504/**
505 * adm_start_dma - start next transaction
506 * @achan: ADM dma channel
507 */
508static void adm_start_dma(struct adm_chan *achan)
509{
510 struct virt_dma_desc *vd = vchan_next_desc(&achan->vc);
511 struct adm_device *adev = achan->adev;
512 struct adm_async_desc *async_desc;
513
514 lockdep_assert_held(&achan->vc.lock);
515
516 if (!vd)
517 return;
518
519 list_del(&vd->node);
520
521 /* write next command list out to the CMD FIFO */
522 async_desc = container_of(vd, struct adm_async_desc, vd);
523 achan->curr_txd = async_desc;
524
525 /* reset channel error */
526 achan->error = 0;
527
528 if (!achan->initialized) {
529 /* enable interrupts */
530 writel(ADM_CH_CONF_SHADOW_EN |
531 ADM_CH_CONF_PERM_MPU_CONF |
532 ADM_CH_CONF_MPU_DISABLE |
533 ADM_CH_CONF_SEC_DOMAIN(adev->ee),
534 adev->regs + ADM_CH_CONF(achan->id));
535
536 writel(ADM_CH_RSLT_CONF_IRQ_EN | ADM_CH_RSLT_CONF_FLUSH_EN,
537 adev->regs + ADM_CH_RSLT_CONF(achan->id, adev->ee));
538
539 achan->initialized = 1;
540 }
541
542 /* set the crci block size if this transaction requires CRCI */
543 if (async_desc->crci) {
544 writel(async_desc->mux | async_desc->blk_size,
545 adev->regs + ADM_CRCI_CTL(async_desc->crci, adev->ee));
546 }
547
548 /* make sure IRQ enable doesn't get reordered */
549 wmb();
550
551 /* write next command list out to the CMD FIFO */
552 writel(ALIGN(async_desc->dma_addr, ADM_DESC_ALIGN) >> 3,
553 adev->regs + ADM_CH_CMD_PTR(achan->id, adev->ee));
554}
555
556/**
557 * adm_dma_irq - irq handler for ADM controller
558 * @irq: IRQ of interrupt
559 * @data: callback data
560 *
561 * IRQ handler for the bam controller
562 */
563static irqreturn_t adm_dma_irq(int irq, void *data)
564{
565 struct adm_device *adev = data;
566 u32 srcs, i;
567 struct adm_async_desc *async_desc;
568 unsigned long flags;
569
570 srcs = readl_relaxed(adev->regs +
571 ADM_SEC_DOMAIN_IRQ_STATUS(adev->ee));
572
573 for (i = 0; i < ADM_MAX_CHANNELS; i++) {
574 struct adm_chan *achan = &adev->channels[i];
575 u32 status, result;
576
577 if (srcs & BIT(i)) {
578 status = readl_relaxed(adev->regs +
579 ADM_CH_STATUS_SD(i, adev->ee));
580
581 /* if no result present, skip */
582 if (!(status & ADM_CH_STATUS_VALID))
583 continue;
584
585 result = readl_relaxed(adev->regs +
586 ADM_CH_RSLT(i, adev->ee));
587
588 /* no valid results, skip */
589 if (!(result & ADM_CH_RSLT_VALID))
590 continue;
591
592 /* flag error if transaction was flushed or failed */
593 if (result & (ADM_CH_RSLT_ERR | ADM_CH_RSLT_FLUSH))
594 achan->error = 1;
595
596 spin_lock_irqsave(&achan->vc.lock, flags);
597 async_desc = achan->curr_txd;
598
599 achan->curr_txd = NULL;
600
601 if (async_desc) {
602 vchan_cookie_complete(&async_desc->vd);
603
604 /* kick off next DMA */
605 adm_start_dma(achan);
606 }
607
608 spin_unlock_irqrestore(&achan->vc.lock, flags);
609 }
610 }
611
612 return IRQ_HANDLED;
613}
614
615/**
616 * adm_tx_status - returns status of transaction
617 * @chan: dma channel
618 * @cookie: transaction cookie
619 * @txstate: DMA transaction state
620 *
621 * Return status of dma transaction
622 */
623static enum dma_status adm_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
624 struct dma_tx_state *txstate)
625{
626 struct adm_chan *achan = to_adm_chan(chan);
627 struct virt_dma_desc *vd;
628 enum dma_status ret;
629 unsigned long flags;
630 size_t residue = 0;
631
632 ret = dma_cookie_status(chan, cookie, txstate);
633 if (ret == DMA_COMPLETE || !txstate)
634 return ret;
635
636 spin_lock_irqsave(&achan->vc.lock, flags);
637
638 vd = vchan_find_desc(&achan->vc, cookie);
639 if (vd)
640 residue = container_of(vd, struct adm_async_desc, vd)->length;
641
642 spin_unlock_irqrestore(&achan->vc.lock, flags);
643
644 /*
645 * residue is either the full length if it is in the issued list, or 0
646 * if it is in progress. We have no reliable way of determining
647 * anything inbetween
648 */
649 dma_set_residue(txstate, residue);
650
651 if (achan->error)
652 return DMA_ERROR;
653
654 return ret;
655}
656
657/**
658 * adm_issue_pending - starts pending transactions
659 * @chan: dma channel
660 *
661 * Issues all pending transactions and starts DMA
662 */
663static void adm_issue_pending(struct dma_chan *chan)
664{
665 struct adm_chan *achan = to_adm_chan(chan);
666 unsigned long flags;
667
668 spin_lock_irqsave(&achan->vc.lock, flags);
669
670 if (vchan_issue_pending(&achan->vc) && !achan->curr_txd)
671 adm_start_dma(achan);
672 spin_unlock_irqrestore(&achan->vc.lock, flags);
673}
674
675/**
676 * adm_dma_free_desc - free descriptor memory
677 * @vd: virtual descriptor
678 *
679 */
680static void adm_dma_free_desc(struct virt_dma_desc *vd)
681{
682 struct adm_async_desc *async_desc = container_of(vd,
683 struct adm_async_desc, vd);
684
685 dma_unmap_single(async_desc->adev->dev, async_desc->dma_addr,
686 async_desc->dma_len, DMA_TO_DEVICE);
687 kfree(async_desc->cpl);
688 kfree(async_desc);
689}
690
691static void adm_channel_init(struct adm_device *adev, struct adm_chan *achan,
692 u32 index)
693{
694 achan->id = index;
695 achan->adev = adev;
696
697 vchan_init(&achan->vc, &adev->common);
698 achan->vc.desc_free = adm_dma_free_desc;
699}
700
701/**
702 * adm_dma_xlate
703 * @dma_spec: pointer to DMA specifier as found in the device tree
704 * @ofdma: pointer to DMA controller data
705 *
706 * This can use either 1-cell or 2-cell formats, the first cell
707 * identifies the slave device, while the optional second cell
708 * contains the crci value.
709 *
710 * Returns pointer to appropriate dma channel on success or NULL on error.
711 */
712static struct dma_chan *adm_dma_xlate(struct of_phandle_args *dma_spec,
713 struct of_dma *ofdma)
714{
715 struct dma_device *dev = ofdma->of_dma_data;
716 struct dma_chan *chan, *candidate = NULL;
717 struct adm_chan *achan;
718
719 if (!dev || dma_spec->args_count > 2)
720 return NULL;
721
722 list_for_each_entry(chan, &dev->channels, device_node)
723 if (chan->chan_id == dma_spec->args[0]) {
724 candidate = chan;
725 break;
726 }
727
728 if (!candidate)
729 return NULL;
730
731 achan = to_adm_chan(candidate);
732 if (dma_spec->args_count == 2)
733 achan->crci = dma_spec->args[1];
734 else
735 achan->crci = 0;
736
737 return dma_get_slave_channel(candidate);
738}
739
740static int adm_dma_probe(struct platform_device *pdev)
741{
742 struct adm_device *adev;
743 int ret;
744 u32 i;
745
746 adev = devm_kzalloc(&pdev->dev, sizeof(*adev), GFP_KERNEL);
747 if (!adev)
748 return -ENOMEM;
749
750 adev->dev = &pdev->dev;
751
752 adev->regs = devm_platform_ioremap_resource(pdev, 0);
753 if (IS_ERR(adev->regs))
754 return PTR_ERR(adev->regs);
755
756 adev->irq = platform_get_irq(pdev, 0);
757 if (adev->irq < 0)
758 return adev->irq;
759
760 ret = of_property_read_u32(pdev->dev.of_node, "qcom,ee", &adev->ee);
761 if (ret) {
762 dev_err(adev->dev, "Execution environment unspecified\n");
763 return ret;
764 }
765
766 adev->core_clk = devm_clk_get(adev->dev, "core");
767 if (IS_ERR(adev->core_clk))
768 return PTR_ERR(adev->core_clk);
769
770 adev->iface_clk = devm_clk_get(adev->dev, "iface");
771 if (IS_ERR(adev->iface_clk))
772 return PTR_ERR(adev->iface_clk);
773
774 adev->clk_reset = devm_reset_control_get_exclusive(&pdev->dev, "clk");
775 if (IS_ERR(adev->clk_reset)) {
776 dev_err(adev->dev, "failed to get ADM0 reset\n");
777 return PTR_ERR(adev->clk_reset);
778 }
779
780 adev->c0_reset = devm_reset_control_get_exclusive(&pdev->dev, "c0");
781 if (IS_ERR(adev->c0_reset)) {
782 dev_err(adev->dev, "failed to get ADM0 C0 reset\n");
783 return PTR_ERR(adev->c0_reset);
784 }
785
786 adev->c1_reset = devm_reset_control_get_exclusive(&pdev->dev, "c1");
787 if (IS_ERR(adev->c1_reset)) {
788 dev_err(adev->dev, "failed to get ADM0 C1 reset\n");
789 return PTR_ERR(adev->c1_reset);
790 }
791
792 adev->c2_reset = devm_reset_control_get_exclusive(&pdev->dev, "c2");
793 if (IS_ERR(adev->c2_reset)) {
794 dev_err(adev->dev, "failed to get ADM0 C2 reset\n");
795 return PTR_ERR(adev->c2_reset);
796 }
797
798 ret = clk_prepare_enable(adev->core_clk);
799 if (ret) {
800 dev_err(adev->dev, "failed to prepare/enable core clock\n");
801 return ret;
802 }
803
804 ret = clk_prepare_enable(adev->iface_clk);
805 if (ret) {
806 dev_err(adev->dev, "failed to prepare/enable iface clock\n");
807 goto err_disable_core_clk;
808 }
809
810 reset_control_assert(adev->clk_reset);
811 reset_control_assert(adev->c0_reset);
812 reset_control_assert(adev->c1_reset);
813 reset_control_assert(adev->c2_reset);
814
815 udelay(2);
816
817 reset_control_deassert(adev->clk_reset);
818 reset_control_deassert(adev->c0_reset);
819 reset_control_deassert(adev->c1_reset);
820 reset_control_deassert(adev->c2_reset);
821
822 adev->channels = devm_kcalloc(adev->dev, ADM_MAX_CHANNELS,
823 sizeof(*adev->channels), GFP_KERNEL);
824
825 if (!adev->channels) {
826 ret = -ENOMEM;
827 goto err_disable_clks;
828 }
829
830 /* allocate and initialize channels */
831 INIT_LIST_HEAD(&adev->common.channels);
832
833 for (i = 0; i < ADM_MAX_CHANNELS; i++)
834 adm_channel_init(adev, &adev->channels[i], i);
835
836 /* reset CRCIs */
837 for (i = 0; i < 16; i++)
838 writel(ADM_CRCI_CTL_RST, adev->regs +
839 ADM_CRCI_CTL(i, adev->ee));
840
841 /* configure client interfaces */
842 writel(ADM_CI_RANGE_START(0x40) | ADM_CI_RANGE_END(0xb0) |
843 ADM_CI_BURST_8_WORDS, adev->regs + ADM_CI_CONF(0));
844 writel(ADM_CI_RANGE_START(0x2a) | ADM_CI_RANGE_END(0x2c) |
845 ADM_CI_BURST_8_WORDS, adev->regs + ADM_CI_CONF(1));
846 writel(ADM_CI_RANGE_START(0x12) | ADM_CI_RANGE_END(0x28) |
847 ADM_CI_BURST_8_WORDS, adev->regs + ADM_CI_CONF(2));
848 writel(ADM_GP_CTL_LP_EN | ADM_GP_CTL_LP_CNT(0xf),
849 adev->regs + ADM_GP_CTL);
850
851 ret = devm_request_irq(adev->dev, adev->irq, adm_dma_irq,
852 0, "adm_dma", adev);
853 if (ret)
854 goto err_disable_clks;
855
856 platform_set_drvdata(pdev, adev);
857
858 adev->common.dev = adev->dev;
859 adev->common.dev->dma_parms = &adev->dma_parms;
860
861 /* set capabilities */
862 dma_cap_zero(adev->common.cap_mask);
863 dma_cap_set(DMA_SLAVE, adev->common.cap_mask);
864 dma_cap_set(DMA_PRIVATE, adev->common.cap_mask);
865
866 /* initialize dmaengine apis */
867 adev->common.directions = BIT(DMA_DEV_TO_MEM | DMA_MEM_TO_DEV);
868 adev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
869 adev->common.src_addr_widths = DMA_SLAVE_BUSWIDTH_4_BYTES;
870 adev->common.dst_addr_widths = DMA_SLAVE_BUSWIDTH_4_BYTES;
871 adev->common.device_free_chan_resources = adm_free_chan;
872 adev->common.device_prep_slave_sg = adm_prep_slave_sg;
873 adev->common.device_issue_pending = adm_issue_pending;
874 adev->common.device_tx_status = adm_tx_status;
875 adev->common.device_terminate_all = adm_terminate_all;
876 adev->common.device_config = adm_slave_config;
877
878 ret = dma_async_device_register(&adev->common);
879 if (ret) {
880 dev_err(adev->dev, "failed to register dma async device\n");
881 goto err_disable_clks;
882 }
883
884 ret = of_dma_controller_register(pdev->dev.of_node, adm_dma_xlate,
885 &adev->common);
886 if (ret)
887 goto err_unregister_dma;
888
889 return 0;
890
891err_unregister_dma:
892 dma_async_device_unregister(&adev->common);
893err_disable_clks:
894 clk_disable_unprepare(adev->iface_clk);
895err_disable_core_clk:
896 clk_disable_unprepare(adev->core_clk);
897
898 return ret;
899}
900
901static int adm_dma_remove(struct platform_device *pdev)
902{
903 struct adm_device *adev = platform_get_drvdata(pdev);
904 struct adm_chan *achan;
905 u32 i;
906
907 of_dma_controller_free(pdev->dev.of_node);
908 dma_async_device_unregister(&adev->common);
909
910 for (i = 0; i < ADM_MAX_CHANNELS; i++) {
911 achan = &adev->channels[i];
912
913 /* mask IRQs for this channel/EE pair */
914 writel(0, adev->regs + ADM_CH_RSLT_CONF(achan->id, adev->ee));
915
916 tasklet_kill(&adev->channels[i].vc.task);
917 adm_terminate_all(&adev->channels[i].vc.chan);
918 }
919
920 devm_free_irq(adev->dev, adev->irq, adev);
921
922 clk_disable_unprepare(adev->core_clk);
923 clk_disable_unprepare(adev->iface_clk);
924
925 return 0;
926}
927
928static const struct of_device_id adm_of_match[] = {
929 { .compatible = "qcom,adm", },
930 {}
931};
932MODULE_DEVICE_TABLE(of, adm_of_match);
933
934static struct platform_driver adm_dma_driver = {
935 .probe = adm_dma_probe,
936 .remove = adm_dma_remove,
937 .driver = {
938 .name = "adm-dma-engine",
939 .of_match_table = adm_of_match,
940 },
941};
942
943module_platform_driver(adm_dma_driver);
944
945MODULE_AUTHOR("Andy Gross <agross@codeaurora.org>");
946MODULE_DESCRIPTION("QCOM ADM DMA engine driver");
947MODULE_LICENSE("GPL v2");