Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Dmaengine driver base library for DMA controllers, found on SH-based SoCs
4 *
5 * extracted from shdma.c
6 *
7 * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
8 * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
9 * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
10 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
11 */
12
13#include <linux/delay.h>
14#include <linux/shdma-base.h>
15#include <linux/dmaengine.h>
16#include <linux/init.h>
17#include <linux/interrupt.h>
18#include <linux/module.h>
19#include <linux/pm_runtime.h>
20#include <linux/slab.h>
21#include <linux/spinlock.h>
22
23#include "../dmaengine.h"
24
25/* DMA descriptor control */
26enum shdma_desc_status {
27 DESC_IDLE,
28 DESC_PREPARED,
29 DESC_SUBMITTED,
30 DESC_COMPLETED, /* completed, have to call callback */
31 DESC_WAITING, /* callback called, waiting for ack / re-submit */
32};
33
34#define NR_DESCS_PER_CHANNEL 32
35
36#define to_shdma_chan(c) container_of(c, struct shdma_chan, dma_chan)
37#define to_shdma_dev(d) container_of(d, struct shdma_dev, dma_dev)
38
39/*
40 * For slave DMA we assume, that there is a finite number of DMA slaves in the
41 * system, and that each such slave can only use a finite number of channels.
42 * We use slave channel IDs to make sure, that no such slave channel ID is
43 * allocated more than once.
44 */
45static unsigned int slave_num = 256;
46module_param(slave_num, uint, 0444);
47
48/* A bitmask with slave_num bits */
49static unsigned long *shdma_slave_used;
50
51/* Called under spin_lock_irq(&schan->chan_lock") */
52static void shdma_chan_xfer_ld_queue(struct shdma_chan *schan)
53{
54 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
55 const struct shdma_ops *ops = sdev->ops;
56 struct shdma_desc *sdesc;
57
58 /* DMA work check */
59 if (ops->channel_busy(schan))
60 return;
61
62 /* Find the first not transferred descriptor */
63 list_for_each_entry(sdesc, &schan->ld_queue, node)
64 if (sdesc->mark == DESC_SUBMITTED) {
65 ops->start_xfer(schan, sdesc);
66 break;
67 }
68}
69
70static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx)
71{
72 struct shdma_desc *chunk, *c, *desc =
73 container_of(tx, struct shdma_desc, async_tx);
74 struct shdma_chan *schan = to_shdma_chan(tx->chan);
75 dma_async_tx_callback callback = tx->callback;
76 dma_cookie_t cookie;
77 bool power_up;
78
79 spin_lock_irq(&schan->chan_lock);
80
81 power_up = list_empty(&schan->ld_queue);
82
83 cookie = dma_cookie_assign(tx);
84
85 /* Mark all chunks of this descriptor as submitted, move to the queue */
86 list_for_each_entry_safe(chunk, c, desc->node.prev, node) {
87 /*
88 * All chunks are on the global ld_free, so, we have to find
89 * the end of the chain ourselves
90 */
91 if (chunk != desc && (chunk->mark == DESC_IDLE ||
92 chunk->async_tx.cookie > 0 ||
93 chunk->async_tx.cookie == -EBUSY ||
94 &chunk->node == &schan->ld_free))
95 break;
96 chunk->mark = DESC_SUBMITTED;
97 if (chunk->chunks == 1) {
98 chunk->async_tx.callback = callback;
99 chunk->async_tx.callback_param = tx->callback_param;
100 } else {
101 /* Callback goes to the last chunk */
102 chunk->async_tx.callback = NULL;
103 }
104 chunk->cookie = cookie;
105 list_move_tail(&chunk->node, &schan->ld_queue);
106
107 dev_dbg(schan->dev, "submit #%d@%p on %d\n",
108 tx->cookie, &chunk->async_tx, schan->id);
109 }
110
111 if (power_up) {
112 int ret;
113 schan->pm_state = SHDMA_PM_BUSY;
114
115 ret = pm_runtime_get(schan->dev);
116
117 spin_unlock_irq(&schan->chan_lock);
118 if (ret < 0) {
119 dev_err(schan->dev, "%s(): GET = %d\n", __func__, ret);
120 pm_runtime_put(schan->dev);
121 }
122
123 pm_runtime_barrier(schan->dev);
124
125 spin_lock_irq(&schan->chan_lock);
126
127 /* Have we been reset, while waiting? */
128 if (schan->pm_state != SHDMA_PM_ESTABLISHED) {
129 struct shdma_dev *sdev =
130 to_shdma_dev(schan->dma_chan.device);
131 const struct shdma_ops *ops = sdev->ops;
132 dev_dbg(schan->dev, "Bring up channel %d\n",
133 schan->id);
134 /*
135 * TODO: .xfer_setup() might fail on some platforms.
136 * Make it int then, on error remove chunks from the
137 * queue again
138 */
139 ops->setup_xfer(schan, schan->slave_id);
140
141 if (schan->pm_state == SHDMA_PM_PENDING)
142 shdma_chan_xfer_ld_queue(schan);
143 schan->pm_state = SHDMA_PM_ESTABLISHED;
144 }
145 } else {
146 /*
147 * Tell .device_issue_pending() not to run the queue, interrupts
148 * will do it anyway
149 */
150 schan->pm_state = SHDMA_PM_PENDING;
151 }
152
153 spin_unlock_irq(&schan->chan_lock);
154
155 return cookie;
156}
157
158/* Called with desc_lock held */
159static struct shdma_desc *shdma_get_desc(struct shdma_chan *schan)
160{
161 struct shdma_desc *sdesc;
162
163 list_for_each_entry(sdesc, &schan->ld_free, node)
164 if (sdesc->mark != DESC_PREPARED) {
165 BUG_ON(sdesc->mark != DESC_IDLE);
166 list_del(&sdesc->node);
167 return sdesc;
168 }
169
170 return NULL;
171}
172
173static int shdma_setup_slave(struct shdma_chan *schan, dma_addr_t slave_addr)
174{
175 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
176 const struct shdma_ops *ops = sdev->ops;
177 int ret, match;
178
179 if (schan->dev->of_node) {
180 match = schan->hw_req;
181 ret = ops->set_slave(schan, match, slave_addr, true);
182 if (ret < 0)
183 return ret;
184 } else {
185 match = schan->real_slave_id;
186 }
187
188 if (schan->real_slave_id < 0 || schan->real_slave_id >= slave_num)
189 return -EINVAL;
190
191 if (test_and_set_bit(schan->real_slave_id, shdma_slave_used))
192 return -EBUSY;
193
194 ret = ops->set_slave(schan, match, slave_addr, false);
195 if (ret < 0) {
196 clear_bit(schan->real_slave_id, shdma_slave_used);
197 return ret;
198 }
199
200 schan->slave_id = schan->real_slave_id;
201
202 return 0;
203}
204
205static int shdma_alloc_chan_resources(struct dma_chan *chan)
206{
207 struct shdma_chan *schan = to_shdma_chan(chan);
208 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
209 const struct shdma_ops *ops = sdev->ops;
210 struct shdma_desc *desc;
211 struct shdma_slave *slave = chan->private;
212 int ret, i;
213
214 /*
215 * This relies on the guarantee from dmaengine that alloc_chan_resources
216 * never runs concurrently with itself or free_chan_resources.
217 */
218 if (slave) {
219 /* Legacy mode: .private is set in filter */
220 schan->real_slave_id = slave->slave_id;
221 ret = shdma_setup_slave(schan, 0);
222 if (ret < 0)
223 goto esetslave;
224 } else {
225 /* Normal mode: real_slave_id was set by filter */
226 schan->slave_id = -EINVAL;
227 }
228
229 schan->desc = kcalloc(NR_DESCS_PER_CHANNEL,
230 sdev->desc_size, GFP_KERNEL);
231 if (!schan->desc) {
232 ret = -ENOMEM;
233 goto edescalloc;
234 }
235 schan->desc_num = NR_DESCS_PER_CHANNEL;
236
237 for (i = 0; i < NR_DESCS_PER_CHANNEL; i++) {
238 desc = ops->embedded_desc(schan->desc, i);
239 dma_async_tx_descriptor_init(&desc->async_tx,
240 &schan->dma_chan);
241 desc->async_tx.tx_submit = shdma_tx_submit;
242 desc->mark = DESC_IDLE;
243
244 list_add(&desc->node, &schan->ld_free);
245 }
246
247 return NR_DESCS_PER_CHANNEL;
248
249edescalloc:
250 if (slave)
251esetslave:
252 clear_bit(slave->slave_id, shdma_slave_used);
253 chan->private = NULL;
254 return ret;
255}
256
257/*
258 * This is the standard shdma filter function to be used as a replacement to the
259 * "old" method, using the .private pointer.
260 * You always have to pass a valid slave id as the argument, old drivers that
261 * pass ERR_PTR(-EINVAL) as a filter parameter and set it up in dma_slave_config
262 * need to be updated so we can remove the slave_id field from dma_slave_config.
263 * parameter. If this filter is used, the slave driver, after calling
264 * dma_request_channel(), will also have to call dmaengine_slave_config() with
265 * .direction, and either .src_addr or .dst_addr set.
266 *
267 * NOTE: this filter doesn't support multiple DMAC drivers with the DMA_SLAVE
268 * capability! If this becomes a requirement, hardware glue drivers, using this
269 * services would have to provide their own filters, which first would check
270 * the device driver, similar to how other DMAC drivers, e.g., sa11x0-dma.c, do
271 * this, and only then, in case of a match, call this common filter.
272 * NOTE 2: This filter function is also used in the DT case by shdma_of_xlate().
273 * In that case the MID-RID value is used for slave channel filtering and is
274 * passed to this function in the "arg" parameter.
275 */
276bool shdma_chan_filter(struct dma_chan *chan, void *arg)
277{
278 struct shdma_chan *schan;
279 struct shdma_dev *sdev;
280 int slave_id = (long)arg;
281 int ret;
282
283 /* Only support channels handled by this driver. */
284 if (chan->device->device_alloc_chan_resources !=
285 shdma_alloc_chan_resources)
286 return false;
287
288 schan = to_shdma_chan(chan);
289 sdev = to_shdma_dev(chan->device);
290
291 /*
292 * For DT, the schan->slave_id field is generated by the
293 * set_slave function from the slave ID that is passed in
294 * from xlate. For the non-DT case, the slave ID is
295 * directly passed into the filter function by the driver
296 */
297 if (schan->dev->of_node) {
298 ret = sdev->ops->set_slave(schan, slave_id, 0, true);
299 if (ret < 0)
300 return false;
301
302 schan->real_slave_id = schan->slave_id;
303 return true;
304 }
305
306 if (slave_id < 0) {
307 /* No slave requested - arbitrary channel */
308 dev_warn(sdev->dma_dev.dev, "invalid slave ID passed to dma_request_slave\n");
309 return true;
310 }
311
312 if (slave_id >= slave_num)
313 return false;
314
315 ret = sdev->ops->set_slave(schan, slave_id, 0, true);
316 if (ret < 0)
317 return false;
318
319 schan->real_slave_id = slave_id;
320
321 return true;
322}
323EXPORT_SYMBOL(shdma_chan_filter);
324
325static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all)
326{
327 struct shdma_desc *desc, *_desc;
328 /* Is the "exposed" head of a chain acked? */
329 bool head_acked = false;
330 dma_cookie_t cookie = 0;
331 dma_async_tx_callback callback = NULL;
332 struct dmaengine_desc_callback cb;
333 unsigned long flags;
334 LIST_HEAD(cyclic_list);
335
336 memset(&cb, 0, sizeof(cb));
337 spin_lock_irqsave(&schan->chan_lock, flags);
338 list_for_each_entry_safe(desc, _desc, &schan->ld_queue, node) {
339 struct dma_async_tx_descriptor *tx = &desc->async_tx;
340
341 BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie);
342 BUG_ON(desc->mark != DESC_SUBMITTED &&
343 desc->mark != DESC_COMPLETED &&
344 desc->mark != DESC_WAITING);
345
346 /*
347 * queue is ordered, and we use this loop to (1) clean up all
348 * completed descriptors, and to (2) update descriptor flags of
349 * any chunks in a (partially) completed chain
350 */
351 if (!all && desc->mark == DESC_SUBMITTED &&
352 desc->cookie != cookie)
353 break;
354
355 if (tx->cookie > 0)
356 cookie = tx->cookie;
357
358 if (desc->mark == DESC_COMPLETED && desc->chunks == 1) {
359 if (schan->dma_chan.completed_cookie != desc->cookie - 1)
360 dev_dbg(schan->dev,
361 "Completing cookie %d, expected %d\n",
362 desc->cookie,
363 schan->dma_chan.completed_cookie + 1);
364 schan->dma_chan.completed_cookie = desc->cookie;
365 }
366
367 /* Call callback on the last chunk */
368 if (desc->mark == DESC_COMPLETED && tx->callback) {
369 desc->mark = DESC_WAITING;
370 dmaengine_desc_get_callback(tx, &cb);
371 callback = tx->callback;
372 dev_dbg(schan->dev, "descriptor #%d@%p on %d callback\n",
373 tx->cookie, tx, schan->id);
374 BUG_ON(desc->chunks != 1);
375 break;
376 }
377
378 if (tx->cookie > 0 || tx->cookie == -EBUSY) {
379 if (desc->mark == DESC_COMPLETED) {
380 BUG_ON(tx->cookie < 0);
381 desc->mark = DESC_WAITING;
382 }
383 head_acked = async_tx_test_ack(tx);
384 } else {
385 switch (desc->mark) {
386 case DESC_COMPLETED:
387 desc->mark = DESC_WAITING;
388 fallthrough;
389 case DESC_WAITING:
390 if (head_acked)
391 async_tx_ack(&desc->async_tx);
392 }
393 }
394
395 dev_dbg(schan->dev, "descriptor %p #%d completed.\n",
396 tx, tx->cookie);
397
398 if (((desc->mark == DESC_COMPLETED ||
399 desc->mark == DESC_WAITING) &&
400 async_tx_test_ack(&desc->async_tx)) || all) {
401
402 if (all || !desc->cyclic) {
403 /* Remove from ld_queue list */
404 desc->mark = DESC_IDLE;
405 list_move(&desc->node, &schan->ld_free);
406 } else {
407 /* reuse as cyclic */
408 desc->mark = DESC_SUBMITTED;
409 list_move_tail(&desc->node, &cyclic_list);
410 }
411
412 if (list_empty(&schan->ld_queue)) {
413 dev_dbg(schan->dev, "Bring down channel %d\n", schan->id);
414 pm_runtime_put(schan->dev);
415 schan->pm_state = SHDMA_PM_ESTABLISHED;
416 } else if (schan->pm_state == SHDMA_PM_PENDING) {
417 shdma_chan_xfer_ld_queue(schan);
418 }
419 }
420 }
421
422 if (all && !callback)
423 /*
424 * Terminating and the loop completed normally: forgive
425 * uncompleted cookies
426 */
427 schan->dma_chan.completed_cookie = schan->dma_chan.cookie;
428
429 list_splice_tail(&cyclic_list, &schan->ld_queue);
430
431 spin_unlock_irqrestore(&schan->chan_lock, flags);
432
433 dmaengine_desc_callback_invoke(&cb, NULL);
434
435 return callback;
436}
437
438/*
439 * shdma_chan_ld_cleanup - Clean up link descriptors
440 *
441 * Clean up the ld_queue of DMA channel.
442 */
443static void shdma_chan_ld_cleanup(struct shdma_chan *schan, bool all)
444{
445 while (__ld_cleanup(schan, all))
446 ;
447}
448
449/*
450 * shdma_free_chan_resources - Free all resources of the channel.
451 */
452static void shdma_free_chan_resources(struct dma_chan *chan)
453{
454 struct shdma_chan *schan = to_shdma_chan(chan);
455 struct shdma_dev *sdev = to_shdma_dev(chan->device);
456 const struct shdma_ops *ops = sdev->ops;
457 LIST_HEAD(list);
458
459 /* Protect against ISR */
460 spin_lock_irq(&schan->chan_lock);
461 ops->halt_channel(schan);
462 spin_unlock_irq(&schan->chan_lock);
463
464 /* Now no new interrupts will occur */
465
466 /* Prepared and not submitted descriptors can still be on the queue */
467 if (!list_empty(&schan->ld_queue))
468 shdma_chan_ld_cleanup(schan, true);
469
470 if (schan->slave_id >= 0) {
471 /* The caller is holding dma_list_mutex */
472 clear_bit(schan->slave_id, shdma_slave_used);
473 chan->private = NULL;
474 }
475
476 schan->real_slave_id = 0;
477
478 spin_lock_irq(&schan->chan_lock);
479
480 list_splice_init(&schan->ld_free, &list);
481 schan->desc_num = 0;
482
483 spin_unlock_irq(&schan->chan_lock);
484
485 kfree(schan->desc);
486}
487
488/**
489 * shdma_add_desc - get, set up and return one transfer descriptor
490 * @schan: DMA channel
491 * @flags: DMA transfer flags
492 * @dst: destination DMA address, incremented when direction equals
493 * DMA_DEV_TO_MEM or DMA_MEM_TO_MEM
494 * @src: source DMA address, incremented when direction equals
495 * DMA_MEM_TO_DEV or DMA_MEM_TO_MEM
496 * @len: DMA transfer length
497 * @first: if NULL, set to the current descriptor and cookie set to -EBUSY
498 * @direction: needed for slave DMA to decide which address to keep constant,
499 * equals DMA_MEM_TO_MEM for MEMCPY
500 * Returns 0 or an error
501 * Locks: called with desc_lock held
502 */
503static struct shdma_desc *shdma_add_desc(struct shdma_chan *schan,
504 unsigned long flags, dma_addr_t *dst, dma_addr_t *src, size_t *len,
505 struct shdma_desc **first, enum dma_transfer_direction direction)
506{
507 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
508 const struct shdma_ops *ops = sdev->ops;
509 struct shdma_desc *new;
510 size_t copy_size = *len;
511
512 if (!copy_size)
513 return NULL;
514
515 /* Allocate the link descriptor from the free list */
516 new = shdma_get_desc(schan);
517 if (!new) {
518 dev_err(schan->dev, "No free link descriptor available\n");
519 return NULL;
520 }
521
522 ops->desc_setup(schan, new, *src, *dst, ©_size);
523
524 if (!*first) {
525 /* First desc */
526 new->async_tx.cookie = -EBUSY;
527 *first = new;
528 } else {
529 /* Other desc - invisible to the user */
530 new->async_tx.cookie = -EINVAL;
531 }
532
533 dev_dbg(schan->dev,
534 "chaining (%zu/%zu)@%pad -> %pad with %p, cookie %d\n",
535 copy_size, *len, src, dst, &new->async_tx,
536 new->async_tx.cookie);
537
538 new->mark = DESC_PREPARED;
539 new->async_tx.flags = flags;
540 new->direction = direction;
541 new->partial = 0;
542
543 *len -= copy_size;
544 if (direction == DMA_MEM_TO_MEM || direction == DMA_MEM_TO_DEV)
545 *src += copy_size;
546 if (direction == DMA_MEM_TO_MEM || direction == DMA_DEV_TO_MEM)
547 *dst += copy_size;
548
549 return new;
550}
551
552/*
553 * shdma_prep_sg - prepare transfer descriptors from an SG list
554 *
555 * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also
556 * converted to scatter-gather to guarantee consistent locking and a correct
557 * list manipulation. For slave DMA direction carries the usual meaning, and,
558 * logically, the SG list is RAM and the addr variable contains slave address,
559 * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM
560 * and the SG list contains only one element and points at the source buffer.
561 */
562static struct dma_async_tx_descriptor *shdma_prep_sg(struct shdma_chan *schan,
563 struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr,
564 enum dma_transfer_direction direction, unsigned long flags, bool cyclic)
565{
566 struct scatterlist *sg;
567 struct shdma_desc *first = NULL, *new = NULL /* compiler... */;
568 LIST_HEAD(tx_list);
569 int chunks = 0;
570 unsigned long irq_flags;
571 int i;
572
573 for_each_sg(sgl, sg, sg_len, i)
574 chunks += DIV_ROUND_UP(sg_dma_len(sg), schan->max_xfer_len);
575
576 /* Have to lock the whole loop to protect against concurrent release */
577 spin_lock_irqsave(&schan->chan_lock, irq_flags);
578
579 /*
580 * Chaining:
581 * first descriptor is what user is dealing with in all API calls, its
582 * cookie is at first set to -EBUSY, at tx-submit to a positive
583 * number
584 * if more than one chunk is needed further chunks have cookie = -EINVAL
585 * the last chunk, if not equal to the first, has cookie = -ENOSPC
586 * all chunks are linked onto the tx_list head with their .node heads
587 * only during this function, then they are immediately spliced
588 * back onto the free list in form of a chain
589 */
590 for_each_sg(sgl, sg, sg_len, i) {
591 dma_addr_t sg_addr = sg_dma_address(sg);
592 size_t len = sg_dma_len(sg);
593
594 if (!len)
595 goto err_get_desc;
596
597 do {
598 dev_dbg(schan->dev, "Add SG #%d@%p[%zu], dma %pad\n",
599 i, sg, len, &sg_addr);
600
601 if (direction == DMA_DEV_TO_MEM)
602 new = shdma_add_desc(schan, flags,
603 &sg_addr, addr, &len, &first,
604 direction);
605 else
606 new = shdma_add_desc(schan, flags,
607 addr, &sg_addr, &len, &first,
608 direction);
609 if (!new)
610 goto err_get_desc;
611
612 new->cyclic = cyclic;
613 if (cyclic)
614 new->chunks = 1;
615 else
616 new->chunks = chunks--;
617 list_add_tail(&new->node, &tx_list);
618 } while (len);
619 }
620
621 if (new != first)
622 new->async_tx.cookie = -ENOSPC;
623
624 /* Put them back on the free list, so, they don't get lost */
625 list_splice_tail(&tx_list, &schan->ld_free);
626
627 spin_unlock_irqrestore(&schan->chan_lock, irq_flags);
628
629 return &first->async_tx;
630
631err_get_desc:
632 list_for_each_entry(new, &tx_list, node)
633 new->mark = DESC_IDLE;
634 list_splice(&tx_list, &schan->ld_free);
635
636 spin_unlock_irqrestore(&schan->chan_lock, irq_flags);
637
638 return NULL;
639}
640
641static struct dma_async_tx_descriptor *shdma_prep_memcpy(
642 struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
643 size_t len, unsigned long flags)
644{
645 struct shdma_chan *schan = to_shdma_chan(chan);
646 struct scatterlist sg;
647
648 if (!chan || !len)
649 return NULL;
650
651 BUG_ON(!schan->desc_num);
652
653 sg_init_table(&sg, 1);
654 sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len,
655 offset_in_page(dma_src));
656 sg_dma_address(&sg) = dma_src;
657 sg_dma_len(&sg) = len;
658
659 return shdma_prep_sg(schan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM,
660 flags, false);
661}
662
663static struct dma_async_tx_descriptor *shdma_prep_slave_sg(
664 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
665 enum dma_transfer_direction direction, unsigned long flags, void *context)
666{
667 struct shdma_chan *schan = to_shdma_chan(chan);
668 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
669 const struct shdma_ops *ops = sdev->ops;
670 int slave_id = schan->slave_id;
671 dma_addr_t slave_addr;
672
673 if (!chan)
674 return NULL;
675
676 BUG_ON(!schan->desc_num);
677
678 /* Someone calling slave DMA on a generic channel? */
679 if (slave_id < 0 || !sg_len) {
680 dev_warn(schan->dev, "%s: bad parameter: len=%d, id=%d\n",
681 __func__, sg_len, slave_id);
682 return NULL;
683 }
684
685 slave_addr = ops->slave_addr(schan);
686
687 return shdma_prep_sg(schan, sgl, sg_len, &slave_addr,
688 direction, flags, false);
689}
690
691#define SHDMA_MAX_SG_LEN 32
692
693static struct dma_async_tx_descriptor *shdma_prep_dma_cyclic(
694 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
695 size_t period_len, enum dma_transfer_direction direction,
696 unsigned long flags)
697{
698 struct shdma_chan *schan = to_shdma_chan(chan);
699 struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device);
700 struct dma_async_tx_descriptor *desc;
701 const struct shdma_ops *ops = sdev->ops;
702 unsigned int sg_len = buf_len / period_len;
703 int slave_id = schan->slave_id;
704 dma_addr_t slave_addr;
705 struct scatterlist *sgl;
706 int i;
707
708 if (!chan)
709 return NULL;
710
711 BUG_ON(!schan->desc_num);
712
713 if (sg_len > SHDMA_MAX_SG_LEN) {
714 dev_err(schan->dev, "sg length %d exceeds limit %d",
715 sg_len, SHDMA_MAX_SG_LEN);
716 return NULL;
717 }
718
719 /* Someone calling slave DMA on a generic channel? */
720 if (slave_id < 0 || (buf_len < period_len)) {
721 dev_warn(schan->dev,
722 "%s: bad parameter: buf_len=%zu, period_len=%zu, id=%d\n",
723 __func__, buf_len, period_len, slave_id);
724 return NULL;
725 }
726
727 slave_addr = ops->slave_addr(schan);
728
729 /*
730 * Allocate the sg list dynamically as it would consumer too much stack
731 * space.
732 */
733 sgl = kmalloc_array(sg_len, sizeof(*sgl), GFP_KERNEL);
734 if (!sgl)
735 return NULL;
736
737 sg_init_table(sgl, sg_len);
738
739 for (i = 0; i < sg_len; i++) {
740 dma_addr_t src = buf_addr + (period_len * i);
741
742 sg_set_page(&sgl[i], pfn_to_page(PFN_DOWN(src)), period_len,
743 offset_in_page(src));
744 sg_dma_address(&sgl[i]) = src;
745 sg_dma_len(&sgl[i]) = period_len;
746 }
747
748 desc = shdma_prep_sg(schan, sgl, sg_len, &slave_addr,
749 direction, flags, true);
750
751 kfree(sgl);
752 return desc;
753}
754
755static int shdma_terminate_all(struct dma_chan *chan)
756{
757 struct shdma_chan *schan = to_shdma_chan(chan);
758 struct shdma_dev *sdev = to_shdma_dev(chan->device);
759 const struct shdma_ops *ops = sdev->ops;
760 unsigned long flags;
761
762 spin_lock_irqsave(&schan->chan_lock, flags);
763 ops->halt_channel(schan);
764
765 if (ops->get_partial && !list_empty(&schan->ld_queue)) {
766 /* Record partial transfer */
767 struct shdma_desc *desc = list_first_entry(&schan->ld_queue,
768 struct shdma_desc, node);
769 desc->partial = ops->get_partial(schan, desc);
770 }
771
772 spin_unlock_irqrestore(&schan->chan_lock, flags);
773
774 shdma_chan_ld_cleanup(schan, true);
775
776 return 0;
777}
778
779static int shdma_config(struct dma_chan *chan,
780 struct dma_slave_config *config)
781{
782 struct shdma_chan *schan = to_shdma_chan(chan);
783
784 /*
785 * So far only .slave_id is used, but the slave drivers are
786 * encouraged to also set a transfer direction and an address.
787 */
788 if (!config)
789 return -EINVAL;
790
791 /*
792 * We could lock this, but you shouldn't be configuring the
793 * channel, while using it...
794 */
795 return shdma_setup_slave(schan,
796 config->direction == DMA_DEV_TO_MEM ?
797 config->src_addr : config->dst_addr);
798}
799
800static void shdma_issue_pending(struct dma_chan *chan)
801{
802 struct shdma_chan *schan = to_shdma_chan(chan);
803
804 spin_lock_irq(&schan->chan_lock);
805 if (schan->pm_state == SHDMA_PM_ESTABLISHED)
806 shdma_chan_xfer_ld_queue(schan);
807 else
808 schan->pm_state = SHDMA_PM_PENDING;
809 spin_unlock_irq(&schan->chan_lock);
810}
811
812static enum dma_status shdma_tx_status(struct dma_chan *chan,
813 dma_cookie_t cookie,
814 struct dma_tx_state *txstate)
815{
816 struct shdma_chan *schan = to_shdma_chan(chan);
817 enum dma_status status;
818 unsigned long flags;
819
820 shdma_chan_ld_cleanup(schan, false);
821
822 spin_lock_irqsave(&schan->chan_lock, flags);
823
824 status = dma_cookie_status(chan, cookie, txstate);
825
826 /*
827 * If we don't find cookie on the queue, it has been aborted and we have
828 * to report error
829 */
830 if (status != DMA_COMPLETE) {
831 struct shdma_desc *sdesc;
832 status = DMA_ERROR;
833 list_for_each_entry(sdesc, &schan->ld_queue, node)
834 if (sdesc->cookie == cookie) {
835 status = DMA_IN_PROGRESS;
836 break;
837 }
838 }
839
840 spin_unlock_irqrestore(&schan->chan_lock, flags);
841
842 return status;
843}
844
845/* Called from error IRQ or NMI */
846bool shdma_reset(struct shdma_dev *sdev)
847{
848 const struct shdma_ops *ops = sdev->ops;
849 struct shdma_chan *schan;
850 unsigned int handled = 0;
851 int i;
852
853 /* Reset all channels */
854 shdma_for_each_chan(schan, sdev, i) {
855 struct shdma_desc *sdesc;
856 LIST_HEAD(dl);
857
858 if (!schan)
859 continue;
860
861 spin_lock(&schan->chan_lock);
862
863 /* Stop the channel */
864 ops->halt_channel(schan);
865
866 list_splice_init(&schan->ld_queue, &dl);
867
868 if (!list_empty(&dl)) {
869 dev_dbg(schan->dev, "Bring down channel %d\n", schan->id);
870 pm_runtime_put(schan->dev);
871 }
872 schan->pm_state = SHDMA_PM_ESTABLISHED;
873
874 spin_unlock(&schan->chan_lock);
875
876 /* Complete all */
877 list_for_each_entry(sdesc, &dl, node) {
878 struct dma_async_tx_descriptor *tx = &sdesc->async_tx;
879
880 sdesc->mark = DESC_IDLE;
881 dmaengine_desc_get_callback_invoke(tx, NULL);
882 }
883
884 spin_lock(&schan->chan_lock);
885 list_splice(&dl, &schan->ld_free);
886 spin_unlock(&schan->chan_lock);
887
888 handled++;
889 }
890
891 return !!handled;
892}
893EXPORT_SYMBOL(shdma_reset);
894
895static irqreturn_t chan_irq(int irq, void *dev)
896{
897 struct shdma_chan *schan = dev;
898 const struct shdma_ops *ops =
899 to_shdma_dev(schan->dma_chan.device)->ops;
900 irqreturn_t ret;
901
902 spin_lock(&schan->chan_lock);
903
904 ret = ops->chan_irq(schan, irq) ? IRQ_WAKE_THREAD : IRQ_NONE;
905
906 spin_unlock(&schan->chan_lock);
907
908 return ret;
909}
910
911static irqreturn_t chan_irqt(int irq, void *dev)
912{
913 struct shdma_chan *schan = dev;
914 const struct shdma_ops *ops =
915 to_shdma_dev(schan->dma_chan.device)->ops;
916 struct shdma_desc *sdesc;
917
918 spin_lock_irq(&schan->chan_lock);
919 list_for_each_entry(sdesc, &schan->ld_queue, node) {
920 if (sdesc->mark == DESC_SUBMITTED &&
921 ops->desc_completed(schan, sdesc)) {
922 dev_dbg(schan->dev, "done #%d@%p\n",
923 sdesc->async_tx.cookie, &sdesc->async_tx);
924 sdesc->mark = DESC_COMPLETED;
925 break;
926 }
927 }
928 /* Next desc */
929 shdma_chan_xfer_ld_queue(schan);
930 spin_unlock_irq(&schan->chan_lock);
931
932 shdma_chan_ld_cleanup(schan, false);
933
934 return IRQ_HANDLED;
935}
936
937int shdma_request_irq(struct shdma_chan *schan, int irq,
938 unsigned long flags, const char *name)
939{
940 int ret = devm_request_threaded_irq(schan->dev, irq, chan_irq,
941 chan_irqt, flags, name, schan);
942
943 schan->irq = ret < 0 ? ret : irq;
944
945 return ret;
946}
947EXPORT_SYMBOL(shdma_request_irq);
948
949void shdma_chan_probe(struct shdma_dev *sdev,
950 struct shdma_chan *schan, int id)
951{
952 schan->pm_state = SHDMA_PM_ESTABLISHED;
953
954 /* reference struct dma_device */
955 schan->dma_chan.device = &sdev->dma_dev;
956 dma_cookie_init(&schan->dma_chan);
957
958 schan->dev = sdev->dma_dev.dev;
959 schan->id = id;
960
961 if (!schan->max_xfer_len)
962 schan->max_xfer_len = PAGE_SIZE;
963
964 spin_lock_init(&schan->chan_lock);
965
966 /* Init descripter manage list */
967 INIT_LIST_HEAD(&schan->ld_queue);
968 INIT_LIST_HEAD(&schan->ld_free);
969
970 /* Add the channel to DMA device channel list */
971 list_add_tail(&schan->dma_chan.device_node,
972 &sdev->dma_dev.channels);
973 sdev->schan[id] = schan;
974}
975EXPORT_SYMBOL(shdma_chan_probe);
976
977void shdma_chan_remove(struct shdma_chan *schan)
978{
979 list_del(&schan->dma_chan.device_node);
980}
981EXPORT_SYMBOL(shdma_chan_remove);
982
983int shdma_init(struct device *dev, struct shdma_dev *sdev,
984 int chan_num)
985{
986 struct dma_device *dma_dev = &sdev->dma_dev;
987
988 /*
989 * Require all call-backs for now, they can trivially be made optional
990 * later as required
991 */
992 if (!sdev->ops ||
993 !sdev->desc_size ||
994 !sdev->ops->embedded_desc ||
995 !sdev->ops->start_xfer ||
996 !sdev->ops->setup_xfer ||
997 !sdev->ops->set_slave ||
998 !sdev->ops->desc_setup ||
999 !sdev->ops->slave_addr ||
1000 !sdev->ops->channel_busy ||
1001 !sdev->ops->halt_channel ||
1002 !sdev->ops->desc_completed)
1003 return -EINVAL;
1004
1005 sdev->schan = kcalloc(chan_num, sizeof(*sdev->schan), GFP_KERNEL);
1006 if (!sdev->schan)
1007 return -ENOMEM;
1008
1009 INIT_LIST_HEAD(&dma_dev->channels);
1010
1011 /* Common and MEMCPY operations */
1012 dma_dev->device_alloc_chan_resources
1013 = shdma_alloc_chan_resources;
1014 dma_dev->device_free_chan_resources = shdma_free_chan_resources;
1015 dma_dev->device_prep_dma_memcpy = shdma_prep_memcpy;
1016 dma_dev->device_tx_status = shdma_tx_status;
1017 dma_dev->device_issue_pending = shdma_issue_pending;
1018
1019 /* Compulsory for DMA_SLAVE fields */
1020 dma_dev->device_prep_slave_sg = shdma_prep_slave_sg;
1021 dma_dev->device_prep_dma_cyclic = shdma_prep_dma_cyclic;
1022 dma_dev->device_config = shdma_config;
1023 dma_dev->device_terminate_all = shdma_terminate_all;
1024
1025 dma_dev->dev = dev;
1026
1027 return 0;
1028}
1029EXPORT_SYMBOL(shdma_init);
1030
1031void shdma_cleanup(struct shdma_dev *sdev)
1032{
1033 kfree(sdev->schan);
1034}
1035EXPORT_SYMBOL(shdma_cleanup);
1036
1037static int __init shdma_enter(void)
1038{
1039 shdma_slave_used = bitmap_zalloc(slave_num, GFP_KERNEL);
1040 if (!shdma_slave_used)
1041 return -ENOMEM;
1042 return 0;
1043}
1044module_init(shdma_enter);
1045
1046static void __exit shdma_exit(void)
1047{
1048 bitmap_free(shdma_slave_used);
1049}
1050module_exit(shdma_exit);
1051
1052MODULE_LICENSE("GPL v2");
1053MODULE_DESCRIPTION("SH-DMA driver base library");
1054MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");