Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Qualcomm Technologies HIDMA DMA engine interface
3 *
4 * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16/*
17 * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008.
18 * Copyright (C) Semihalf 2009
19 * Copyright (C) Ilya Yanok, Emcraft Systems 2010
20 * Copyright (C) Alexander Popov, Promcontroller 2014
21 *
22 * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description
23 * (defines, structures and comments) was taken from MPC5121 DMA driver
24 * written by Hongjun Chen <hong-jun.chen@freescale.com>.
25 *
26 * Approved as OSADL project by a majority of OSADL members and funded
27 * by OSADL membership fees in 2009; for details see www.osadl.org.
28 *
29 * This program is free software; you can redistribute it and/or modify it
30 * under the terms of the GNU General Public License as published by the Free
31 * Software Foundation; either version 2 of the License, or (at your option)
32 * any later version.
33 *
34 * This program is distributed in the hope that it will be useful, but WITHOUT
35 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
36 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
37 * more details.
38 *
39 * The full GNU General Public License is included in this distribution in the
40 * file called COPYING.
41 */
42
43/* Linux Foundation elects GPLv2 license only. */
44
45#include <linux/dmaengine.h>
46#include <linux/dma-mapping.h>
47#include <linux/list.h>
48#include <linux/module.h>
49#include <linux/platform_device.h>
50#include <linux/slab.h>
51#include <linux/spinlock.h>
52#include <linux/of_dma.h>
53#include <linux/of_device.h>
54#include <linux/property.h>
55#include <linux/delay.h>
56#include <linux/acpi.h>
57#include <linux/irq.h>
58#include <linux/atomic.h>
59#include <linux/pm_runtime.h>
60#include <linux/msi.h>
61
62#include "../dmaengine.h"
63#include "hidma.h"
64
65/*
66 * Default idle time is 2 seconds. This parameter can
67 * be overridden by changing the following
68 * /sys/bus/platform/devices/QCOM8061:<xy>/power/autosuspend_delay_ms
69 * during kernel boot.
70 */
71#define HIDMA_AUTOSUSPEND_TIMEOUT 2000
72#define HIDMA_ERR_INFO_SW 0xFF
73#define HIDMA_ERR_CODE_UNEXPECTED_TERMINATE 0x0
74#define HIDMA_NR_DEFAULT_DESC 10
75#define HIDMA_MSI_INTS 11
76
77static inline struct hidma_dev *to_hidma_dev(struct dma_device *dmadev)
78{
79 return container_of(dmadev, struct hidma_dev, ddev);
80}
81
82static inline
83struct hidma_dev *to_hidma_dev_from_lldev(struct hidma_lldev **_lldevp)
84{
85 return container_of(_lldevp, struct hidma_dev, lldev);
86}
87
88static inline struct hidma_chan *to_hidma_chan(struct dma_chan *dmach)
89{
90 return container_of(dmach, struct hidma_chan, chan);
91}
92
93static void hidma_free(struct hidma_dev *dmadev)
94{
95 INIT_LIST_HEAD(&dmadev->ddev.channels);
96}
97
98static unsigned int nr_desc_prm;
99module_param(nr_desc_prm, uint, 0644);
100MODULE_PARM_DESC(nr_desc_prm, "number of descriptors (default: 0)");
101
102enum hidma_cap {
103 HIDMA_MSI_CAP = 1,
104 HIDMA_IDENTITY_CAP,
105};
106
107/* process completed descriptors */
108static void hidma_process_completed(struct hidma_chan *mchan)
109{
110 struct dma_device *ddev = mchan->chan.device;
111 struct hidma_dev *mdma = to_hidma_dev(ddev);
112 struct dma_async_tx_descriptor *desc;
113 dma_cookie_t last_cookie;
114 struct hidma_desc *mdesc;
115 struct hidma_desc *next;
116 unsigned long irqflags;
117 struct list_head list;
118
119 INIT_LIST_HEAD(&list);
120
121 /* Get all completed descriptors */
122 spin_lock_irqsave(&mchan->lock, irqflags);
123 list_splice_tail_init(&mchan->completed, &list);
124 spin_unlock_irqrestore(&mchan->lock, irqflags);
125
126 /* Execute callbacks and run dependencies */
127 list_for_each_entry_safe(mdesc, next, &list, node) {
128 enum dma_status llstat;
129 struct dmaengine_desc_callback cb;
130 struct dmaengine_result result;
131
132 desc = &mdesc->desc;
133 last_cookie = desc->cookie;
134
135 llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch);
136
137 spin_lock_irqsave(&mchan->lock, irqflags);
138 if (llstat == DMA_COMPLETE) {
139 mchan->last_success = last_cookie;
140 result.result = DMA_TRANS_NOERROR;
141 } else {
142 result.result = DMA_TRANS_ABORTED;
143 }
144
145 dma_cookie_complete(desc);
146 spin_unlock_irqrestore(&mchan->lock, irqflags);
147
148 dmaengine_desc_get_callback(desc, &cb);
149
150 dma_run_dependencies(desc);
151
152 spin_lock_irqsave(&mchan->lock, irqflags);
153 list_move(&mdesc->node, &mchan->free);
154 spin_unlock_irqrestore(&mchan->lock, irqflags);
155
156 dmaengine_desc_callback_invoke(&cb, &result);
157 }
158}
159
160/*
161 * Called once for each submitted descriptor.
162 * PM is locked once for each descriptor that is currently
163 * in execution.
164 */
165static void hidma_callback(void *data)
166{
167 struct hidma_desc *mdesc = data;
168 struct hidma_chan *mchan = to_hidma_chan(mdesc->desc.chan);
169 struct dma_device *ddev = mchan->chan.device;
170 struct hidma_dev *dmadev = to_hidma_dev(ddev);
171 unsigned long irqflags;
172 bool queued = false;
173
174 spin_lock_irqsave(&mchan->lock, irqflags);
175 if (mdesc->node.next) {
176 /* Delete from the active list, add to completed list */
177 list_move_tail(&mdesc->node, &mchan->completed);
178 queued = true;
179
180 /* calculate the next running descriptor */
181 mchan->running = list_first_entry(&mchan->active,
182 struct hidma_desc, node);
183 }
184 spin_unlock_irqrestore(&mchan->lock, irqflags);
185
186 hidma_process_completed(mchan);
187
188 if (queued) {
189 pm_runtime_mark_last_busy(dmadev->ddev.dev);
190 pm_runtime_put_autosuspend(dmadev->ddev.dev);
191 }
192}
193
194static int hidma_chan_init(struct hidma_dev *dmadev, u32 dma_sig)
195{
196 struct hidma_chan *mchan;
197 struct dma_device *ddev;
198
199 mchan = devm_kzalloc(dmadev->ddev.dev, sizeof(*mchan), GFP_KERNEL);
200 if (!mchan)
201 return -ENOMEM;
202
203 ddev = &dmadev->ddev;
204 mchan->dma_sig = dma_sig;
205 mchan->dmadev = dmadev;
206 mchan->chan.device = ddev;
207 dma_cookie_init(&mchan->chan);
208
209 INIT_LIST_HEAD(&mchan->free);
210 INIT_LIST_HEAD(&mchan->prepared);
211 INIT_LIST_HEAD(&mchan->active);
212 INIT_LIST_HEAD(&mchan->completed);
213 INIT_LIST_HEAD(&mchan->queued);
214
215 spin_lock_init(&mchan->lock);
216 list_add_tail(&mchan->chan.device_node, &ddev->channels);
217 dmadev->ddev.chancnt++;
218 return 0;
219}
220
221static void hidma_issue_task(struct tasklet_struct *t)
222{
223 struct hidma_dev *dmadev = from_tasklet(dmadev, t, task);
224
225 pm_runtime_get_sync(dmadev->ddev.dev);
226 hidma_ll_start(dmadev->lldev);
227}
228
229static void hidma_issue_pending(struct dma_chan *dmach)
230{
231 struct hidma_chan *mchan = to_hidma_chan(dmach);
232 struct hidma_dev *dmadev = mchan->dmadev;
233 unsigned long flags;
234 struct hidma_desc *qdesc, *next;
235 int status;
236
237 spin_lock_irqsave(&mchan->lock, flags);
238 list_for_each_entry_safe(qdesc, next, &mchan->queued, node) {
239 hidma_ll_queue_request(dmadev->lldev, qdesc->tre_ch);
240 list_move_tail(&qdesc->node, &mchan->active);
241 }
242
243 if (!mchan->running) {
244 struct hidma_desc *desc = list_first_entry(&mchan->active,
245 struct hidma_desc,
246 node);
247 mchan->running = desc;
248 }
249 spin_unlock_irqrestore(&mchan->lock, flags);
250
251 /* PM will be released in hidma_callback function. */
252 status = pm_runtime_get(dmadev->ddev.dev);
253 if (status < 0)
254 tasklet_schedule(&dmadev->task);
255 else
256 hidma_ll_start(dmadev->lldev);
257}
258
259static inline bool hidma_txn_is_success(dma_cookie_t cookie,
260 dma_cookie_t last_success, dma_cookie_t last_used)
261{
262 if (last_success <= last_used) {
263 if ((cookie <= last_success) || (cookie > last_used))
264 return true;
265 } else {
266 if ((cookie <= last_success) && (cookie > last_used))
267 return true;
268 }
269 return false;
270}
271
272static enum dma_status hidma_tx_status(struct dma_chan *dmach,
273 dma_cookie_t cookie,
274 struct dma_tx_state *txstate)
275{
276 struct hidma_chan *mchan = to_hidma_chan(dmach);
277 enum dma_status ret;
278
279 ret = dma_cookie_status(dmach, cookie, txstate);
280 if (ret == DMA_COMPLETE) {
281 bool is_success;
282
283 is_success = hidma_txn_is_success(cookie, mchan->last_success,
284 dmach->cookie);
285 return is_success ? ret : DMA_ERROR;
286 }
287
288 if (mchan->paused && (ret == DMA_IN_PROGRESS)) {
289 unsigned long flags;
290 dma_cookie_t runcookie;
291
292 spin_lock_irqsave(&mchan->lock, flags);
293 if (mchan->running)
294 runcookie = mchan->running->desc.cookie;
295 else
296 runcookie = -EINVAL;
297
298 if (runcookie == cookie)
299 ret = DMA_PAUSED;
300
301 spin_unlock_irqrestore(&mchan->lock, flags);
302 }
303
304 return ret;
305}
306
307/*
308 * Submit descriptor to hardware.
309 * Lock the PM for each descriptor we are sending.
310 */
311static dma_cookie_t hidma_tx_submit(struct dma_async_tx_descriptor *txd)
312{
313 struct hidma_chan *mchan = to_hidma_chan(txd->chan);
314 struct hidma_dev *dmadev = mchan->dmadev;
315 struct hidma_desc *mdesc;
316 unsigned long irqflags;
317 dma_cookie_t cookie;
318
319 pm_runtime_get_sync(dmadev->ddev.dev);
320 if (!hidma_ll_isenabled(dmadev->lldev)) {
321 pm_runtime_mark_last_busy(dmadev->ddev.dev);
322 pm_runtime_put_autosuspend(dmadev->ddev.dev);
323 return -ENODEV;
324 }
325 pm_runtime_mark_last_busy(dmadev->ddev.dev);
326 pm_runtime_put_autosuspend(dmadev->ddev.dev);
327
328 mdesc = container_of(txd, struct hidma_desc, desc);
329 spin_lock_irqsave(&mchan->lock, irqflags);
330
331 /* Move descriptor to queued */
332 list_move_tail(&mdesc->node, &mchan->queued);
333
334 /* Update cookie */
335 cookie = dma_cookie_assign(txd);
336
337 spin_unlock_irqrestore(&mchan->lock, irqflags);
338
339 return cookie;
340}
341
342static int hidma_alloc_chan_resources(struct dma_chan *dmach)
343{
344 struct hidma_chan *mchan = to_hidma_chan(dmach);
345 struct hidma_dev *dmadev = mchan->dmadev;
346 struct hidma_desc *mdesc, *tmp;
347 unsigned long irqflags;
348 LIST_HEAD(descs);
349 unsigned int i;
350 int rc = 0;
351
352 if (mchan->allocated)
353 return 0;
354
355 /* Alloc descriptors for this channel */
356 for (i = 0; i < dmadev->nr_descriptors; i++) {
357 mdesc = kzalloc(sizeof(struct hidma_desc), GFP_NOWAIT);
358 if (!mdesc) {
359 rc = -ENOMEM;
360 break;
361 }
362 dma_async_tx_descriptor_init(&mdesc->desc, dmach);
363 mdesc->desc.tx_submit = hidma_tx_submit;
364
365 rc = hidma_ll_request(dmadev->lldev, mchan->dma_sig,
366 "DMA engine", hidma_callback, mdesc,
367 &mdesc->tre_ch);
368 if (rc) {
369 dev_err(dmach->device->dev,
370 "channel alloc failed at %u\n", i);
371 kfree(mdesc);
372 break;
373 }
374 list_add_tail(&mdesc->node, &descs);
375 }
376
377 if (rc) {
378 /* return the allocated descriptors */
379 list_for_each_entry_safe(mdesc, tmp, &descs, node) {
380 hidma_ll_free(dmadev->lldev, mdesc->tre_ch);
381 kfree(mdesc);
382 }
383 return rc;
384 }
385
386 spin_lock_irqsave(&mchan->lock, irqflags);
387 list_splice_tail_init(&descs, &mchan->free);
388 mchan->allocated = true;
389 spin_unlock_irqrestore(&mchan->lock, irqflags);
390 return 1;
391}
392
393static struct dma_async_tx_descriptor *
394hidma_prep_dma_memcpy(struct dma_chan *dmach, dma_addr_t dest, dma_addr_t src,
395 size_t len, unsigned long flags)
396{
397 struct hidma_chan *mchan = to_hidma_chan(dmach);
398 struct hidma_desc *mdesc = NULL;
399 struct hidma_dev *mdma = mchan->dmadev;
400 unsigned long irqflags;
401
402 /* Get free descriptor */
403 spin_lock_irqsave(&mchan->lock, irqflags);
404 if (!list_empty(&mchan->free)) {
405 mdesc = list_first_entry(&mchan->free, struct hidma_desc, node);
406 list_del(&mdesc->node);
407 }
408 spin_unlock_irqrestore(&mchan->lock, irqflags);
409
410 if (!mdesc)
411 return NULL;
412
413 mdesc->desc.flags = flags;
414 hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch,
415 src, dest, len, flags,
416 HIDMA_TRE_MEMCPY);
417
418 /* Place descriptor in prepared list */
419 spin_lock_irqsave(&mchan->lock, irqflags);
420 list_add_tail(&mdesc->node, &mchan->prepared);
421 spin_unlock_irqrestore(&mchan->lock, irqflags);
422
423 return &mdesc->desc;
424}
425
426static struct dma_async_tx_descriptor *
427hidma_prep_dma_memset(struct dma_chan *dmach, dma_addr_t dest, int value,
428 size_t len, unsigned long flags)
429{
430 struct hidma_chan *mchan = to_hidma_chan(dmach);
431 struct hidma_desc *mdesc = NULL;
432 struct hidma_dev *mdma = mchan->dmadev;
433 unsigned long irqflags;
434 u64 byte_pattern, fill_pattern;
435
436 /* Get free descriptor */
437 spin_lock_irqsave(&mchan->lock, irqflags);
438 if (!list_empty(&mchan->free)) {
439 mdesc = list_first_entry(&mchan->free, struct hidma_desc, node);
440 list_del(&mdesc->node);
441 }
442 spin_unlock_irqrestore(&mchan->lock, irqflags);
443
444 if (!mdesc)
445 return NULL;
446
447 byte_pattern = (char)value;
448 fill_pattern = (byte_pattern << 56) |
449 (byte_pattern << 48) |
450 (byte_pattern << 40) |
451 (byte_pattern << 32) |
452 (byte_pattern << 24) |
453 (byte_pattern << 16) |
454 (byte_pattern << 8) |
455 byte_pattern;
456
457 mdesc->desc.flags = flags;
458 hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch,
459 fill_pattern, dest, len, flags,
460 HIDMA_TRE_MEMSET);
461
462 /* Place descriptor in prepared list */
463 spin_lock_irqsave(&mchan->lock, irqflags);
464 list_add_tail(&mdesc->node, &mchan->prepared);
465 spin_unlock_irqrestore(&mchan->lock, irqflags);
466
467 return &mdesc->desc;
468}
469
470static int hidma_terminate_channel(struct dma_chan *chan)
471{
472 struct hidma_chan *mchan = to_hidma_chan(chan);
473 struct hidma_dev *dmadev = to_hidma_dev(mchan->chan.device);
474 struct hidma_desc *tmp, *mdesc;
475 unsigned long irqflags;
476 LIST_HEAD(list);
477 int rc;
478
479 pm_runtime_get_sync(dmadev->ddev.dev);
480 /* give completed requests a chance to finish */
481 hidma_process_completed(mchan);
482
483 spin_lock_irqsave(&mchan->lock, irqflags);
484 mchan->last_success = 0;
485 list_splice_init(&mchan->active, &list);
486 list_splice_init(&mchan->prepared, &list);
487 list_splice_init(&mchan->completed, &list);
488 list_splice_init(&mchan->queued, &list);
489 spin_unlock_irqrestore(&mchan->lock, irqflags);
490
491 /* this suspends the existing transfer */
492 rc = hidma_ll_disable(dmadev->lldev);
493 if (rc) {
494 dev_err(dmadev->ddev.dev, "channel did not pause\n");
495 goto out;
496 }
497
498 /* return all user requests */
499 list_for_each_entry_safe(mdesc, tmp, &list, node) {
500 struct dma_async_tx_descriptor *txd = &mdesc->desc;
501
502 dma_descriptor_unmap(txd);
503 dmaengine_desc_get_callback_invoke(txd, NULL);
504 dma_run_dependencies(txd);
505
506 /* move myself to free_list */
507 list_move(&mdesc->node, &mchan->free);
508 }
509
510 rc = hidma_ll_enable(dmadev->lldev);
511out:
512 pm_runtime_mark_last_busy(dmadev->ddev.dev);
513 pm_runtime_put_autosuspend(dmadev->ddev.dev);
514 return rc;
515}
516
517static int hidma_terminate_all(struct dma_chan *chan)
518{
519 struct hidma_chan *mchan = to_hidma_chan(chan);
520 struct hidma_dev *dmadev = to_hidma_dev(mchan->chan.device);
521 int rc;
522
523 rc = hidma_terminate_channel(chan);
524 if (rc)
525 return rc;
526
527 /* reinitialize the hardware */
528 pm_runtime_get_sync(dmadev->ddev.dev);
529 rc = hidma_ll_setup(dmadev->lldev);
530 pm_runtime_mark_last_busy(dmadev->ddev.dev);
531 pm_runtime_put_autosuspend(dmadev->ddev.dev);
532 return rc;
533}
534
535static void hidma_free_chan_resources(struct dma_chan *dmach)
536{
537 struct hidma_chan *mchan = to_hidma_chan(dmach);
538 struct hidma_dev *mdma = mchan->dmadev;
539 struct hidma_desc *mdesc, *tmp;
540 unsigned long irqflags;
541 LIST_HEAD(descs);
542
543 /* terminate running transactions and free descriptors */
544 hidma_terminate_channel(dmach);
545
546 spin_lock_irqsave(&mchan->lock, irqflags);
547
548 /* Move data */
549 list_splice_tail_init(&mchan->free, &descs);
550
551 /* Free descriptors */
552 list_for_each_entry_safe(mdesc, tmp, &descs, node) {
553 hidma_ll_free(mdma->lldev, mdesc->tre_ch);
554 list_del(&mdesc->node);
555 kfree(mdesc);
556 }
557
558 mchan->allocated = false;
559 spin_unlock_irqrestore(&mchan->lock, irqflags);
560}
561
562static int hidma_pause(struct dma_chan *chan)
563{
564 struct hidma_chan *mchan;
565 struct hidma_dev *dmadev;
566
567 mchan = to_hidma_chan(chan);
568 dmadev = to_hidma_dev(mchan->chan.device);
569 if (!mchan->paused) {
570 pm_runtime_get_sync(dmadev->ddev.dev);
571 if (hidma_ll_disable(dmadev->lldev))
572 dev_warn(dmadev->ddev.dev, "channel did not stop\n");
573 mchan->paused = true;
574 pm_runtime_mark_last_busy(dmadev->ddev.dev);
575 pm_runtime_put_autosuspend(dmadev->ddev.dev);
576 }
577 return 0;
578}
579
580static int hidma_resume(struct dma_chan *chan)
581{
582 struct hidma_chan *mchan;
583 struct hidma_dev *dmadev;
584 int rc = 0;
585
586 mchan = to_hidma_chan(chan);
587 dmadev = to_hidma_dev(mchan->chan.device);
588 if (mchan->paused) {
589 pm_runtime_get_sync(dmadev->ddev.dev);
590 rc = hidma_ll_enable(dmadev->lldev);
591 if (!rc)
592 mchan->paused = false;
593 else
594 dev_err(dmadev->ddev.dev,
595 "failed to resume the channel");
596 pm_runtime_mark_last_busy(dmadev->ddev.dev);
597 pm_runtime_put_autosuspend(dmadev->ddev.dev);
598 }
599 return rc;
600}
601
602static irqreturn_t hidma_chirq_handler(int chirq, void *arg)
603{
604 struct hidma_lldev *lldev = arg;
605
606 /*
607 * All interrupts are request driven.
608 * HW doesn't send an interrupt by itself.
609 */
610 return hidma_ll_inthandler(chirq, lldev);
611}
612
613#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
614static irqreturn_t hidma_chirq_handler_msi(int chirq, void *arg)
615{
616 struct hidma_lldev **lldevp = arg;
617 struct hidma_dev *dmadev = to_hidma_dev_from_lldev(lldevp);
618
619 return hidma_ll_inthandler_msi(chirq, *lldevp,
620 1 << (chirq - dmadev->msi_virqbase));
621}
622#endif
623
624static ssize_t hidma_show_values(struct device *dev,
625 struct device_attribute *attr, char *buf)
626{
627 struct hidma_dev *mdev = dev_get_drvdata(dev);
628
629 buf[0] = 0;
630
631 if (strcmp(attr->attr.name, "chid") == 0)
632 sprintf(buf, "%d\n", mdev->chidx);
633
634 return strlen(buf);
635}
636
637static inline void hidma_sysfs_uninit(struct hidma_dev *dev)
638{
639 device_remove_file(dev->ddev.dev, dev->chid_attrs);
640}
641
642static struct device_attribute*
643hidma_create_sysfs_entry(struct hidma_dev *dev, char *name, int mode)
644{
645 struct device_attribute *attrs;
646 char *name_copy;
647
648 attrs = devm_kmalloc(dev->ddev.dev, sizeof(struct device_attribute),
649 GFP_KERNEL);
650 if (!attrs)
651 return NULL;
652
653 name_copy = devm_kstrdup(dev->ddev.dev, name, GFP_KERNEL);
654 if (!name_copy)
655 return NULL;
656
657 attrs->attr.name = name_copy;
658 attrs->attr.mode = mode;
659 attrs->show = hidma_show_values;
660 sysfs_attr_init(&attrs->attr);
661
662 return attrs;
663}
664
665static int hidma_sysfs_init(struct hidma_dev *dev)
666{
667 dev->chid_attrs = hidma_create_sysfs_entry(dev, "chid", S_IRUGO);
668 if (!dev->chid_attrs)
669 return -ENOMEM;
670
671 return device_create_file(dev->ddev.dev, dev->chid_attrs);
672}
673
674#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
675static void hidma_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
676{
677 struct device *dev = msi_desc_to_dev(desc);
678 struct hidma_dev *dmadev = dev_get_drvdata(dev);
679
680 if (!desc->msi_index) {
681 writel(msg->address_lo, dmadev->dev_evca + 0x118);
682 writel(msg->address_hi, dmadev->dev_evca + 0x11C);
683 writel(msg->data, dmadev->dev_evca + 0x120);
684 }
685}
686#endif
687
688static void hidma_free_msis(struct hidma_dev *dmadev)
689{
690#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
691 struct device *dev = dmadev->ddev.dev;
692 int i, virq;
693
694 for (i = 0; i < HIDMA_MSI_INTS; i++) {
695 virq = msi_get_virq(dev, i);
696 if (virq)
697 devm_free_irq(dev, virq, &dmadev->lldev);
698 }
699
700 platform_msi_domain_free_irqs(dev);
701#endif
702}
703
704static int hidma_request_msi(struct hidma_dev *dmadev,
705 struct platform_device *pdev)
706{
707#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
708 int rc, i, virq;
709
710 rc = platform_msi_domain_alloc_irqs(&pdev->dev, HIDMA_MSI_INTS,
711 hidma_write_msi_msg);
712 if (rc)
713 return rc;
714
715 for (i = 0; i < HIDMA_MSI_INTS; i++) {
716 virq = msi_get_virq(&pdev->dev, i);
717 rc = devm_request_irq(&pdev->dev, virq,
718 hidma_chirq_handler_msi,
719 0, "qcom-hidma-msi",
720 &dmadev->lldev);
721 if (rc)
722 break;
723 if (!i)
724 dmadev->msi_virqbase = virq;
725 }
726
727 if (rc) {
728 /* free allocated MSI interrupts above */
729 for (--i; i >= 0; i--) {
730 virq = msi_get_virq(&pdev->dev, i);
731 devm_free_irq(&pdev->dev, virq, &dmadev->lldev);
732 }
733 dev_warn(&pdev->dev,
734 "failed to request MSI irq, falling back to wired IRQ\n");
735 } else {
736 /* Add callback to free MSIs on teardown */
737 hidma_ll_setup_irq(dmadev->lldev, true);
738 }
739 return rc;
740#else
741 return -EINVAL;
742#endif
743}
744
745static bool hidma_test_capability(struct device *dev, enum hidma_cap test_cap)
746{
747 enum hidma_cap cap;
748
749 cap = (enum hidma_cap) device_get_match_data(dev);
750 return cap ? ((cap & test_cap) > 0) : 0;
751}
752
753static int hidma_probe(struct platform_device *pdev)
754{
755 struct hidma_dev *dmadev;
756 struct resource *trca_resource;
757 struct resource *evca_resource;
758 int chirq;
759 void __iomem *evca;
760 void __iomem *trca;
761 int rc;
762 bool msi;
763
764 pm_runtime_set_autosuspend_delay(&pdev->dev, HIDMA_AUTOSUSPEND_TIMEOUT);
765 pm_runtime_use_autosuspend(&pdev->dev);
766 pm_runtime_set_active(&pdev->dev);
767 pm_runtime_enable(&pdev->dev);
768
769 trca_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
770 trca = devm_ioremap_resource(&pdev->dev, trca_resource);
771 if (IS_ERR(trca)) {
772 rc = -ENOMEM;
773 goto bailout;
774 }
775
776 evca_resource = platform_get_resource(pdev, IORESOURCE_MEM, 1);
777 evca = devm_ioremap_resource(&pdev->dev, evca_resource);
778 if (IS_ERR(evca)) {
779 rc = -ENOMEM;
780 goto bailout;
781 }
782
783 /*
784 * This driver only handles the channel IRQs.
785 * Common IRQ is handled by the management driver.
786 */
787 chirq = platform_get_irq(pdev, 0);
788 if (chirq < 0) {
789 rc = -ENODEV;
790 goto bailout;
791 }
792
793 dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev), GFP_KERNEL);
794 if (!dmadev) {
795 rc = -ENOMEM;
796 goto bailout;
797 }
798
799 INIT_LIST_HEAD(&dmadev->ddev.channels);
800 spin_lock_init(&dmadev->lock);
801 dmadev->ddev.dev = &pdev->dev;
802 pm_runtime_get_sync(dmadev->ddev.dev);
803
804 dma_cap_set(DMA_MEMCPY, dmadev->ddev.cap_mask);
805 dma_cap_set(DMA_MEMSET, dmadev->ddev.cap_mask);
806 if (WARN_ON(!pdev->dev.dma_mask)) {
807 rc = -ENXIO;
808 goto dmafree;
809 }
810
811 dmadev->dev_evca = evca;
812 dmadev->evca_resource = evca_resource;
813 dmadev->dev_trca = trca;
814 dmadev->trca_resource = trca_resource;
815 dmadev->ddev.device_prep_dma_memcpy = hidma_prep_dma_memcpy;
816 dmadev->ddev.device_prep_dma_memset = hidma_prep_dma_memset;
817 dmadev->ddev.device_alloc_chan_resources = hidma_alloc_chan_resources;
818 dmadev->ddev.device_free_chan_resources = hidma_free_chan_resources;
819 dmadev->ddev.device_tx_status = hidma_tx_status;
820 dmadev->ddev.device_issue_pending = hidma_issue_pending;
821 dmadev->ddev.device_pause = hidma_pause;
822 dmadev->ddev.device_resume = hidma_resume;
823 dmadev->ddev.device_terminate_all = hidma_terminate_all;
824 dmadev->ddev.copy_align = 8;
825
826 /*
827 * Determine the MSI capability of the platform. Old HW doesn't
828 * support MSI.
829 */
830 msi = hidma_test_capability(&pdev->dev, HIDMA_MSI_CAP);
831 device_property_read_u32(&pdev->dev, "desc-count",
832 &dmadev->nr_descriptors);
833
834 if (nr_desc_prm) {
835 dev_info(&pdev->dev, "overriding number of descriptors as %d\n",
836 nr_desc_prm);
837 dmadev->nr_descriptors = nr_desc_prm;
838 }
839
840 if (!dmadev->nr_descriptors)
841 dmadev->nr_descriptors = HIDMA_NR_DEFAULT_DESC;
842
843 if (hidma_test_capability(&pdev->dev, HIDMA_IDENTITY_CAP))
844 dmadev->chidx = readl(dmadev->dev_trca + 0x40);
845 else
846 dmadev->chidx = readl(dmadev->dev_trca + 0x28);
847
848 /* Set DMA mask to 64 bits. */
849 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
850 if (rc) {
851 dev_warn(&pdev->dev, "unable to set coherent mask to 64");
852 goto dmafree;
853 }
854
855 dmadev->lldev = hidma_ll_init(dmadev->ddev.dev,
856 dmadev->nr_descriptors, dmadev->dev_trca,
857 dmadev->dev_evca, dmadev->chidx);
858 if (!dmadev->lldev) {
859 rc = -EPROBE_DEFER;
860 goto dmafree;
861 }
862
863 platform_set_drvdata(pdev, dmadev);
864 if (msi)
865 rc = hidma_request_msi(dmadev, pdev);
866
867 if (!msi || rc) {
868 hidma_ll_setup_irq(dmadev->lldev, false);
869 rc = devm_request_irq(&pdev->dev, chirq, hidma_chirq_handler,
870 0, "qcom-hidma", dmadev->lldev);
871 if (rc)
872 goto uninit;
873 }
874
875 INIT_LIST_HEAD(&dmadev->ddev.channels);
876 rc = hidma_chan_init(dmadev, 0);
877 if (rc)
878 goto uninit;
879
880 rc = dma_async_device_register(&dmadev->ddev);
881 if (rc)
882 goto uninit;
883
884 dmadev->irq = chirq;
885 tasklet_setup(&dmadev->task, hidma_issue_task);
886 hidma_debug_init(dmadev);
887 hidma_sysfs_init(dmadev);
888 dev_info(&pdev->dev, "HI-DMA engine driver registration complete\n");
889 pm_runtime_mark_last_busy(dmadev->ddev.dev);
890 pm_runtime_put_autosuspend(dmadev->ddev.dev);
891 return 0;
892
893uninit:
894 if (msi)
895 hidma_free_msis(dmadev);
896
897 hidma_ll_uninit(dmadev->lldev);
898dmafree:
899 if (dmadev)
900 hidma_free(dmadev);
901bailout:
902 pm_runtime_put_sync(&pdev->dev);
903 pm_runtime_disable(&pdev->dev);
904 return rc;
905}
906
907static void hidma_shutdown(struct platform_device *pdev)
908{
909 struct hidma_dev *dmadev = platform_get_drvdata(pdev);
910
911 dev_info(dmadev->ddev.dev, "HI-DMA engine shutdown\n");
912
913 pm_runtime_get_sync(dmadev->ddev.dev);
914 if (hidma_ll_disable(dmadev->lldev))
915 dev_warn(dmadev->ddev.dev, "channel did not stop\n");
916 pm_runtime_mark_last_busy(dmadev->ddev.dev);
917 pm_runtime_put_autosuspend(dmadev->ddev.dev);
918
919}
920
921static int hidma_remove(struct platform_device *pdev)
922{
923 struct hidma_dev *dmadev = platform_get_drvdata(pdev);
924
925 pm_runtime_get_sync(dmadev->ddev.dev);
926 dma_async_device_unregister(&dmadev->ddev);
927 if (!dmadev->lldev->msi_support)
928 devm_free_irq(dmadev->ddev.dev, dmadev->irq, dmadev->lldev);
929 else
930 hidma_free_msis(dmadev);
931
932 tasklet_kill(&dmadev->task);
933 hidma_sysfs_uninit(dmadev);
934 hidma_debug_uninit(dmadev);
935 hidma_ll_uninit(dmadev->lldev);
936 hidma_free(dmadev);
937
938 dev_info(&pdev->dev, "HI-DMA engine removed\n");
939 pm_runtime_put_sync_suspend(&pdev->dev);
940 pm_runtime_disable(&pdev->dev);
941
942 return 0;
943}
944
945#if IS_ENABLED(CONFIG_ACPI)
946static const struct acpi_device_id hidma_acpi_ids[] = {
947 {"QCOM8061"},
948 {"QCOM8062", HIDMA_MSI_CAP},
949 {"QCOM8063", (HIDMA_MSI_CAP | HIDMA_IDENTITY_CAP)},
950 {},
951};
952MODULE_DEVICE_TABLE(acpi, hidma_acpi_ids);
953#endif
954
955static const struct of_device_id hidma_match[] = {
956 {.compatible = "qcom,hidma-1.0",},
957 {.compatible = "qcom,hidma-1.1", .data = (void *)(HIDMA_MSI_CAP),},
958 {.compatible = "qcom,hidma-1.2",
959 .data = (void *)(HIDMA_MSI_CAP | HIDMA_IDENTITY_CAP),},
960 {},
961};
962MODULE_DEVICE_TABLE(of, hidma_match);
963
964static struct platform_driver hidma_driver = {
965 .probe = hidma_probe,
966 .remove = hidma_remove,
967 .shutdown = hidma_shutdown,
968 .driver = {
969 .name = "hidma",
970 .of_match_table = hidma_match,
971 .acpi_match_table = ACPI_PTR(hidma_acpi_ids),
972 },
973};
974
975module_platform_driver(hidma_driver);
976MODULE_LICENSE("GPL v2");