Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Driver for Audio DMA Controller (ADMAC) on t8103 (M1) and other Apple chips
4 *
5 * Copyright (C) The Asahi Linux Contributors
6 */
7
8#include <linux/bits.h>
9#include <linux/bitfield.h>
10#include <linux/device.h>
11#include <linux/init.h>
12#include <linux/module.h>
13#include <linux/of_device.h>
14#include <linux/of_dma.h>
15#include <linux/interrupt.h>
16#include <linux/spinlock.h>
17
18#include "dmaengine.h"
19
20#define NCHANNELS_MAX 64
21#define IRQ_NOUTPUTS 4
22
23#define RING_WRITE_SLOT GENMASK(1, 0)
24#define RING_READ_SLOT GENMASK(5, 4)
25#define RING_FULL BIT(9)
26#define RING_EMPTY BIT(8)
27#define RING_ERR BIT(10)
28
29#define STATUS_DESC_DONE BIT(0)
30#define STATUS_ERR BIT(6)
31
32#define FLAG_DESC_NOTIFY BIT(16)
33
34#define REG_TX_START 0x0000
35#define REG_TX_STOP 0x0004
36#define REG_RX_START 0x0008
37#define REG_RX_STOP 0x000c
38
39#define REG_CHAN_CTL(ch) (0x8000 + (ch) * 0x200)
40#define REG_CHAN_CTL_RST_RINGS BIT(0)
41
42#define REG_DESC_RING(ch) (0x8070 + (ch) * 0x200)
43#define REG_REPORT_RING(ch) (0x8074 + (ch) * 0x200)
44
45#define REG_RESIDUE(ch) (0x8064 + (ch) * 0x200)
46
47#define REG_BUS_WIDTH(ch) (0x8040 + (ch) * 0x200)
48
49#define BUS_WIDTH_8BIT 0x00
50#define BUS_WIDTH_16BIT 0x01
51#define BUS_WIDTH_32BIT 0x02
52#define BUS_WIDTH_FRAME_2_WORDS 0x10
53#define BUS_WIDTH_FRAME_4_WORDS 0x20
54
55#define CHAN_BUFSIZE 0x8000
56
57#define REG_CHAN_FIFOCTL(ch) (0x8054 + (ch) * 0x200)
58#define CHAN_FIFOCTL_LIMIT GENMASK(31, 16)
59#define CHAN_FIFOCTL_THRESHOLD GENMASK(15, 0)
60
61#define REG_DESC_WRITE(ch) (0x10000 + ((ch) / 2) * 0x4 + ((ch) & 1) * 0x4000)
62#define REG_REPORT_READ(ch) (0x10100 + ((ch) / 2) * 0x4 + ((ch) & 1) * 0x4000)
63
64#define REG_TX_INTSTATE(idx) (0x0030 + (idx) * 4)
65#define REG_RX_INTSTATE(idx) (0x0040 + (idx) * 4)
66#define REG_CHAN_INTSTATUS(ch, idx) (0x8010 + (ch) * 0x200 + (idx) * 4)
67#define REG_CHAN_INTMASK(ch, idx) (0x8020 + (ch) * 0x200 + (idx) * 4)
68
69struct admac_data;
70struct admac_tx;
71
72struct admac_chan {
73 unsigned int no;
74 struct admac_data *host;
75 struct dma_chan chan;
76 struct tasklet_struct tasklet;
77
78 spinlock_t lock;
79 struct admac_tx *current_tx;
80 int nperiod_acks;
81
82 /*
83 * We maintain a 'submitted' and 'issued' list mainly for interface
84 * correctness. Typical use of the driver (per channel) will be
85 * prepping, submitting and issuing a single cyclic transaction which
86 * will stay current until terminate_all is called.
87 */
88 struct list_head submitted;
89 struct list_head issued;
90
91 struct list_head to_free;
92};
93
94struct admac_data {
95 struct dma_device dma;
96 struct device *dev;
97 __iomem void *base;
98
99 int irq_index;
100 int nchannels;
101 struct admac_chan channels[];
102};
103
104struct admac_tx {
105 struct dma_async_tx_descriptor tx;
106 bool cyclic;
107 dma_addr_t buf_addr;
108 dma_addr_t buf_end;
109 size_t buf_len;
110 size_t period_len;
111
112 size_t submitted_pos;
113 size_t reclaimed_pos;
114
115 struct list_head node;
116};
117
118static void admac_modify(struct admac_data *ad, int reg, u32 mask, u32 val)
119{
120 void __iomem *addr = ad->base + reg;
121 u32 curr = readl_relaxed(addr);
122
123 writel_relaxed((curr & ~mask) | (val & mask), addr);
124}
125
126static struct admac_chan *to_admac_chan(struct dma_chan *chan)
127{
128 return container_of(chan, struct admac_chan, chan);
129}
130
131static struct admac_tx *to_admac_tx(struct dma_async_tx_descriptor *tx)
132{
133 return container_of(tx, struct admac_tx, tx);
134}
135
136static enum dma_transfer_direction admac_chan_direction(int channo)
137{
138 /* Channel directions are hardwired */
139 return (channo & 1) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
140}
141
142static dma_cookie_t admac_tx_submit(struct dma_async_tx_descriptor *tx)
143{
144 struct admac_tx *adtx = to_admac_tx(tx);
145 struct admac_chan *adchan = to_admac_chan(tx->chan);
146 unsigned long flags;
147 dma_cookie_t cookie;
148
149 spin_lock_irqsave(&adchan->lock, flags);
150 cookie = dma_cookie_assign(tx);
151 list_add_tail(&adtx->node, &adchan->submitted);
152 spin_unlock_irqrestore(&adchan->lock, flags);
153
154 return cookie;
155}
156
157static int admac_desc_free(struct dma_async_tx_descriptor *tx)
158{
159 kfree(to_admac_tx(tx));
160
161 return 0;
162}
163
164static struct dma_async_tx_descriptor *admac_prep_dma_cyclic(
165 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
166 size_t period_len, enum dma_transfer_direction direction,
167 unsigned long flags)
168{
169 struct admac_chan *adchan = container_of(chan, struct admac_chan, chan);
170 struct admac_tx *adtx;
171
172 if (direction != admac_chan_direction(adchan->no))
173 return NULL;
174
175 adtx = kzalloc(sizeof(*adtx), GFP_NOWAIT);
176 if (!adtx)
177 return NULL;
178
179 adtx->cyclic = true;
180
181 adtx->buf_addr = buf_addr;
182 adtx->buf_len = buf_len;
183 adtx->buf_end = buf_addr + buf_len;
184 adtx->period_len = period_len;
185
186 adtx->submitted_pos = 0;
187 adtx->reclaimed_pos = 0;
188
189 dma_async_tx_descriptor_init(&adtx->tx, chan);
190 adtx->tx.tx_submit = admac_tx_submit;
191 adtx->tx.desc_free = admac_desc_free;
192
193 return &adtx->tx;
194}
195
196/*
197 * Write one hardware descriptor for a dmaengine cyclic transaction.
198 */
199static void admac_cyclic_write_one_desc(struct admac_data *ad, int channo,
200 struct admac_tx *tx)
201{
202 dma_addr_t addr;
203
204 addr = tx->buf_addr + (tx->submitted_pos % tx->buf_len);
205
206 /* If happens means we have buggy code */
207 WARN_ON_ONCE(addr + tx->period_len > tx->buf_end);
208
209 dev_dbg(ad->dev, "ch%d descriptor: addr=0x%pad len=0x%zx flags=0x%lx\n",
210 channo, &addr, tx->period_len, FLAG_DESC_NOTIFY);
211
212 writel_relaxed(lower_32_bits(addr), ad->base + REG_DESC_WRITE(channo));
213 writel_relaxed(upper_32_bits(addr), ad->base + REG_DESC_WRITE(channo));
214 writel_relaxed(tx->period_len, ad->base + REG_DESC_WRITE(channo));
215 writel_relaxed(FLAG_DESC_NOTIFY, ad->base + REG_DESC_WRITE(channo));
216
217 tx->submitted_pos += tx->period_len;
218 tx->submitted_pos %= 2 * tx->buf_len;
219}
220
221/*
222 * Write all the hardware descriptors for a dmaengine cyclic
223 * transaction there is space for.
224 */
225static void admac_cyclic_write_desc(struct admac_data *ad, int channo,
226 struct admac_tx *tx)
227{
228 int i;
229
230 for (i = 0; i < 4; i++) {
231 if (readl_relaxed(ad->base + REG_DESC_RING(channo)) & RING_FULL)
232 break;
233 admac_cyclic_write_one_desc(ad, channo, tx);
234 }
235}
236
237static int admac_ring_noccupied_slots(int ringval)
238{
239 int wrslot = FIELD_GET(RING_WRITE_SLOT, ringval);
240 int rdslot = FIELD_GET(RING_READ_SLOT, ringval);
241
242 if (wrslot != rdslot) {
243 return (wrslot + 4 - rdslot) % 4;
244 } else {
245 WARN_ON((ringval & (RING_FULL | RING_EMPTY)) == 0);
246
247 if (ringval & RING_FULL)
248 return 4;
249 else
250 return 0;
251 }
252}
253
254/*
255 * Read from hardware the residue of a cyclic dmaengine transaction.
256 */
257static u32 admac_cyclic_read_residue(struct admac_data *ad, int channo,
258 struct admac_tx *adtx)
259{
260 u32 ring1, ring2;
261 u32 residue1, residue2;
262 int nreports;
263 size_t pos;
264
265 ring1 = readl_relaxed(ad->base + REG_REPORT_RING(channo));
266 residue1 = readl_relaxed(ad->base + REG_RESIDUE(channo));
267 ring2 = readl_relaxed(ad->base + REG_REPORT_RING(channo));
268 residue2 = readl_relaxed(ad->base + REG_RESIDUE(channo));
269
270 if (residue2 > residue1) {
271 /*
272 * Controller must have loaded next descriptor between
273 * the two residue reads
274 */
275 nreports = admac_ring_noccupied_slots(ring1) + 1;
276 } else {
277 /* No descriptor load between the two reads, ring2 is safe to use */
278 nreports = admac_ring_noccupied_slots(ring2);
279 }
280
281 pos = adtx->reclaimed_pos + adtx->period_len * (nreports + 1) - residue2;
282
283 return adtx->buf_len - pos % adtx->buf_len;
284}
285
286static enum dma_status admac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
287 struct dma_tx_state *txstate)
288{
289 struct admac_chan *adchan = to_admac_chan(chan);
290 struct admac_data *ad = adchan->host;
291 struct admac_tx *adtx;
292
293 enum dma_status ret;
294 size_t residue;
295 unsigned long flags;
296
297 ret = dma_cookie_status(chan, cookie, txstate);
298 if (ret == DMA_COMPLETE || !txstate)
299 return ret;
300
301 spin_lock_irqsave(&adchan->lock, flags);
302 adtx = adchan->current_tx;
303
304 if (adtx && adtx->tx.cookie == cookie) {
305 ret = DMA_IN_PROGRESS;
306 residue = admac_cyclic_read_residue(ad, adchan->no, adtx);
307 } else {
308 ret = DMA_IN_PROGRESS;
309 residue = 0;
310 list_for_each_entry(adtx, &adchan->issued, node) {
311 if (adtx->tx.cookie == cookie) {
312 residue = adtx->buf_len;
313 break;
314 }
315 }
316 }
317 spin_unlock_irqrestore(&adchan->lock, flags);
318
319 dma_set_residue(txstate, residue);
320 return ret;
321}
322
323static void admac_start_chan(struct admac_chan *adchan)
324{
325 struct admac_data *ad = adchan->host;
326 u32 startbit = 1 << (adchan->no / 2);
327
328 writel_relaxed(STATUS_DESC_DONE | STATUS_ERR,
329 ad->base + REG_CHAN_INTSTATUS(adchan->no, ad->irq_index));
330 writel_relaxed(STATUS_DESC_DONE | STATUS_ERR,
331 ad->base + REG_CHAN_INTMASK(adchan->no, ad->irq_index));
332
333 switch (admac_chan_direction(adchan->no)) {
334 case DMA_MEM_TO_DEV:
335 writel_relaxed(startbit, ad->base + REG_TX_START);
336 break;
337 case DMA_DEV_TO_MEM:
338 writel_relaxed(startbit, ad->base + REG_RX_START);
339 break;
340 default:
341 break;
342 }
343 dev_dbg(adchan->host->dev, "ch%d start\n", adchan->no);
344}
345
346static void admac_stop_chan(struct admac_chan *adchan)
347{
348 struct admac_data *ad = adchan->host;
349 u32 stopbit = 1 << (adchan->no / 2);
350
351 switch (admac_chan_direction(adchan->no)) {
352 case DMA_MEM_TO_DEV:
353 writel_relaxed(stopbit, ad->base + REG_TX_STOP);
354 break;
355 case DMA_DEV_TO_MEM:
356 writel_relaxed(stopbit, ad->base + REG_RX_STOP);
357 break;
358 default:
359 break;
360 }
361 dev_dbg(adchan->host->dev, "ch%d stop\n", adchan->no);
362}
363
364static void admac_reset_rings(struct admac_chan *adchan)
365{
366 struct admac_data *ad = adchan->host;
367
368 writel_relaxed(REG_CHAN_CTL_RST_RINGS,
369 ad->base + REG_CHAN_CTL(adchan->no));
370 writel_relaxed(0, ad->base + REG_CHAN_CTL(adchan->no));
371}
372
373static void admac_start_current_tx(struct admac_chan *adchan)
374{
375 struct admac_data *ad = adchan->host;
376 int ch = adchan->no;
377
378 admac_reset_rings(adchan);
379 writel_relaxed(0, ad->base + REG_CHAN_CTL(ch));
380
381 admac_cyclic_write_one_desc(ad, ch, adchan->current_tx);
382 admac_start_chan(adchan);
383 admac_cyclic_write_desc(ad, ch, adchan->current_tx);
384}
385
386static void admac_issue_pending(struct dma_chan *chan)
387{
388 struct admac_chan *adchan = to_admac_chan(chan);
389 struct admac_tx *tx;
390 unsigned long flags;
391
392 spin_lock_irqsave(&adchan->lock, flags);
393 list_splice_tail_init(&adchan->submitted, &adchan->issued);
394 if (!list_empty(&adchan->issued) && !adchan->current_tx) {
395 tx = list_first_entry(&adchan->issued, struct admac_tx, node);
396 list_del(&tx->node);
397
398 adchan->current_tx = tx;
399 adchan->nperiod_acks = 0;
400 admac_start_current_tx(adchan);
401 }
402 spin_unlock_irqrestore(&adchan->lock, flags);
403}
404
405static int admac_pause(struct dma_chan *chan)
406{
407 struct admac_chan *adchan = to_admac_chan(chan);
408
409 admac_stop_chan(adchan);
410
411 return 0;
412}
413
414static int admac_resume(struct dma_chan *chan)
415{
416 struct admac_chan *adchan = to_admac_chan(chan);
417
418 admac_start_chan(adchan);
419
420 return 0;
421}
422
423static int admac_terminate_all(struct dma_chan *chan)
424{
425 struct admac_chan *adchan = to_admac_chan(chan);
426 unsigned long flags;
427
428 spin_lock_irqsave(&adchan->lock, flags);
429 admac_stop_chan(adchan);
430 admac_reset_rings(adchan);
431
432 adchan->current_tx = NULL;
433 /*
434 * Descriptors can only be freed after the tasklet
435 * has been killed (in admac_synchronize).
436 */
437 list_splice_tail_init(&adchan->submitted, &adchan->to_free);
438 list_splice_tail_init(&adchan->issued, &adchan->to_free);
439 spin_unlock_irqrestore(&adchan->lock, flags);
440
441 return 0;
442}
443
444static void admac_synchronize(struct dma_chan *chan)
445{
446 struct admac_chan *adchan = to_admac_chan(chan);
447 struct admac_tx *adtx, *_adtx;
448 unsigned long flags;
449 LIST_HEAD(head);
450
451 spin_lock_irqsave(&adchan->lock, flags);
452 list_splice_tail_init(&adchan->to_free, &head);
453 spin_unlock_irqrestore(&adchan->lock, flags);
454
455 tasklet_kill(&adchan->tasklet);
456
457 list_for_each_entry_safe(adtx, _adtx, &head, node) {
458 list_del(&adtx->node);
459 admac_desc_free(&adtx->tx);
460 }
461}
462
463static int admac_alloc_chan_resources(struct dma_chan *chan)
464{
465 struct admac_chan *adchan = to_admac_chan(chan);
466
467 dma_cookie_init(&adchan->chan);
468 return 0;
469}
470
471static void admac_free_chan_resources(struct dma_chan *chan)
472{
473 admac_terminate_all(chan);
474 admac_synchronize(chan);
475}
476
477static struct dma_chan *admac_dma_of_xlate(struct of_phandle_args *dma_spec,
478 struct of_dma *ofdma)
479{
480 struct admac_data *ad = (struct admac_data *) ofdma->of_dma_data;
481 unsigned int index;
482
483 if (dma_spec->args_count != 1)
484 return NULL;
485
486 index = dma_spec->args[0];
487
488 if (index >= ad->nchannels) {
489 dev_err(ad->dev, "channel index %u out of bounds\n", index);
490 return NULL;
491 }
492
493 return &ad->channels[index].chan;
494}
495
496static int admac_drain_reports(struct admac_data *ad, int channo)
497{
498 int count;
499
500 for (count = 0; count < 4; count++) {
501 u32 countval_hi, countval_lo, unk1, flags;
502
503 if (readl_relaxed(ad->base + REG_REPORT_RING(channo)) & RING_EMPTY)
504 break;
505
506 countval_lo = readl_relaxed(ad->base + REG_REPORT_READ(channo));
507 countval_hi = readl_relaxed(ad->base + REG_REPORT_READ(channo));
508 unk1 = readl_relaxed(ad->base + REG_REPORT_READ(channo));
509 flags = readl_relaxed(ad->base + REG_REPORT_READ(channo));
510
511 dev_dbg(ad->dev, "ch%d report: countval=0x%llx unk1=0x%x flags=0x%x\n",
512 channo, ((u64) countval_hi) << 32 | countval_lo, unk1, flags);
513 }
514
515 return count;
516}
517
518static void admac_handle_status_err(struct admac_data *ad, int channo)
519{
520 bool handled = false;
521
522 if (readl_relaxed(ad->base + REG_DESC_RING(channo)) & RING_ERR) {
523 writel_relaxed(RING_ERR, ad->base + REG_DESC_RING(channo));
524 dev_err_ratelimited(ad->dev, "ch%d descriptor ring error\n", channo);
525 handled = true;
526 }
527
528 if (readl_relaxed(ad->base + REG_REPORT_RING(channo)) & RING_ERR) {
529 writel_relaxed(RING_ERR, ad->base + REG_REPORT_RING(channo));
530 dev_err_ratelimited(ad->dev, "ch%d report ring error\n", channo);
531 handled = true;
532 }
533
534 if (unlikely(!handled)) {
535 dev_err(ad->dev, "ch%d unknown error, masking errors as cause of IRQs\n", channo);
536 admac_modify(ad, REG_CHAN_INTMASK(channo, ad->irq_index),
537 STATUS_ERR, 0);
538 }
539}
540
541static void admac_handle_status_desc_done(struct admac_data *ad, int channo)
542{
543 struct admac_chan *adchan = &ad->channels[channo];
544 unsigned long flags;
545 int nreports;
546
547 writel_relaxed(STATUS_DESC_DONE,
548 ad->base + REG_CHAN_INTSTATUS(channo, ad->irq_index));
549
550 spin_lock_irqsave(&adchan->lock, flags);
551 nreports = admac_drain_reports(ad, channo);
552
553 if (adchan->current_tx) {
554 struct admac_tx *tx = adchan->current_tx;
555
556 adchan->nperiod_acks += nreports;
557 tx->reclaimed_pos += nreports * tx->period_len;
558 tx->reclaimed_pos %= 2 * tx->buf_len;
559
560 admac_cyclic_write_desc(ad, channo, tx);
561 tasklet_schedule(&adchan->tasklet);
562 }
563 spin_unlock_irqrestore(&adchan->lock, flags);
564}
565
566static void admac_handle_chan_int(struct admac_data *ad, int no)
567{
568 u32 cause = readl_relaxed(ad->base + REG_CHAN_INTSTATUS(no, ad->irq_index));
569
570 if (cause & STATUS_ERR)
571 admac_handle_status_err(ad, no);
572
573 if (cause & STATUS_DESC_DONE)
574 admac_handle_status_desc_done(ad, no);
575}
576
577static irqreturn_t admac_interrupt(int irq, void *devid)
578{
579 struct admac_data *ad = devid;
580 u32 rx_intstate, tx_intstate;
581 int i;
582
583 rx_intstate = readl_relaxed(ad->base + REG_RX_INTSTATE(ad->irq_index));
584 tx_intstate = readl_relaxed(ad->base + REG_TX_INTSTATE(ad->irq_index));
585
586 if (!tx_intstate && !rx_intstate)
587 return IRQ_NONE;
588
589 for (i = 0; i < ad->nchannels; i += 2) {
590 if (tx_intstate & 1)
591 admac_handle_chan_int(ad, i);
592 tx_intstate >>= 1;
593 }
594
595 for (i = 1; i < ad->nchannels; i += 2) {
596 if (rx_intstate & 1)
597 admac_handle_chan_int(ad, i);
598 rx_intstate >>= 1;
599 }
600
601 return IRQ_HANDLED;
602}
603
604static void admac_chan_tasklet(struct tasklet_struct *t)
605{
606 struct admac_chan *adchan = from_tasklet(adchan, t, tasklet);
607 struct admac_tx *adtx;
608 struct dmaengine_desc_callback cb;
609 struct dmaengine_result tx_result;
610 int nacks;
611
612 spin_lock_irq(&adchan->lock);
613 adtx = adchan->current_tx;
614 nacks = adchan->nperiod_acks;
615 adchan->nperiod_acks = 0;
616 spin_unlock_irq(&adchan->lock);
617
618 if (!adtx || !nacks)
619 return;
620
621 tx_result.result = DMA_TRANS_NOERROR;
622 tx_result.residue = 0;
623
624 dmaengine_desc_get_callback(&adtx->tx, &cb);
625 while (nacks--)
626 dmaengine_desc_callback_invoke(&cb, &tx_result);
627}
628
629static int admac_device_config(struct dma_chan *chan,
630 struct dma_slave_config *config)
631{
632 struct admac_chan *adchan = to_admac_chan(chan);
633 struct admac_data *ad = adchan->host;
634 bool is_tx = admac_chan_direction(adchan->no) == DMA_MEM_TO_DEV;
635 int wordsize = 0;
636 u32 bus_width = 0;
637
638 switch (is_tx ? config->dst_addr_width : config->src_addr_width) {
639 case DMA_SLAVE_BUSWIDTH_1_BYTE:
640 wordsize = 1;
641 bus_width |= BUS_WIDTH_8BIT;
642 break;
643 case DMA_SLAVE_BUSWIDTH_2_BYTES:
644 wordsize = 2;
645 bus_width |= BUS_WIDTH_16BIT;
646 break;
647 case DMA_SLAVE_BUSWIDTH_4_BYTES:
648 wordsize = 4;
649 bus_width |= BUS_WIDTH_32BIT;
650 break;
651 default:
652 return -EINVAL;
653 }
654
655 /*
656 * We take port_window_size to be the number of words in a frame.
657 *
658 * The controller has some means of out-of-band signalling, to the peripheral,
659 * of words position in a frame. That's where the importance of this control
660 * comes from.
661 */
662 switch (is_tx ? config->dst_port_window_size : config->src_port_window_size) {
663 case 0 ... 1:
664 break;
665 case 2:
666 bus_width |= BUS_WIDTH_FRAME_2_WORDS;
667 break;
668 case 4:
669 bus_width |= BUS_WIDTH_FRAME_4_WORDS;
670 break;
671 default:
672 return -EINVAL;
673 }
674
675 writel_relaxed(bus_width, ad->base + REG_BUS_WIDTH(adchan->no));
676
677 /*
678 * By FIFOCTL_LIMIT we seem to set the maximal number of bytes allowed to be
679 * held in controller's per-channel FIFO. Transfers seem to be triggered
680 * around the time FIFO occupancy touches FIFOCTL_THRESHOLD.
681 *
682 * The numbers we set are more or less arbitrary.
683 */
684 writel_relaxed(FIELD_PREP(CHAN_FIFOCTL_LIMIT, 0x30 * wordsize)
685 | FIELD_PREP(CHAN_FIFOCTL_THRESHOLD, 0x18 * wordsize),
686 ad->base + REG_CHAN_FIFOCTL(adchan->no));
687
688 return 0;
689}
690
691static int admac_probe(struct platform_device *pdev)
692{
693 struct device_node *np = pdev->dev.of_node;
694 struct admac_data *ad;
695 struct dma_device *dma;
696 int nchannels;
697 int err, irq, i;
698
699 err = of_property_read_u32(np, "dma-channels", &nchannels);
700 if (err || nchannels > NCHANNELS_MAX) {
701 dev_err(&pdev->dev, "missing or invalid dma-channels property\n");
702 return -EINVAL;
703 }
704
705 ad = devm_kzalloc(&pdev->dev, struct_size(ad, channels, nchannels), GFP_KERNEL);
706 if (!ad)
707 return -ENOMEM;
708
709 platform_set_drvdata(pdev, ad);
710 ad->dev = &pdev->dev;
711 ad->nchannels = nchannels;
712
713 /*
714 * The controller has 4 IRQ outputs. Try them all until
715 * we find one we can use.
716 */
717 for (i = 0; i < IRQ_NOUTPUTS; i++) {
718 irq = platform_get_irq_optional(pdev, i);
719 if (irq >= 0) {
720 ad->irq_index = i;
721 break;
722 }
723 }
724
725 if (irq < 0)
726 return dev_err_probe(&pdev->dev, irq, "no usable interrupt\n");
727
728 err = devm_request_irq(&pdev->dev, irq, admac_interrupt,
729 0, dev_name(&pdev->dev), ad);
730 if (err)
731 return dev_err_probe(&pdev->dev, err,
732 "unable to register interrupt\n");
733
734 ad->base = devm_platform_ioremap_resource(pdev, 0);
735 if (IS_ERR(ad->base))
736 return dev_err_probe(&pdev->dev, PTR_ERR(ad->base),
737 "unable to obtain MMIO resource\n");
738
739 dma = &ad->dma;
740
741 dma_cap_set(DMA_PRIVATE, dma->cap_mask);
742 dma_cap_set(DMA_CYCLIC, dma->cap_mask);
743
744 dma->dev = &pdev->dev;
745 dma->device_alloc_chan_resources = admac_alloc_chan_resources;
746 dma->device_free_chan_resources = admac_free_chan_resources;
747 dma->device_tx_status = admac_tx_status;
748 dma->device_issue_pending = admac_issue_pending;
749 dma->device_terminate_all = admac_terminate_all;
750 dma->device_synchronize = admac_synchronize;
751 dma->device_prep_dma_cyclic = admac_prep_dma_cyclic;
752 dma->device_config = admac_device_config;
753 dma->device_pause = admac_pause;
754 dma->device_resume = admac_resume;
755
756 dma->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
757 dma->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
758 dma->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
759 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
760 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
761
762 INIT_LIST_HEAD(&dma->channels);
763 for (i = 0; i < nchannels; i++) {
764 struct admac_chan *adchan = &ad->channels[i];
765
766 adchan->host = ad;
767 adchan->no = i;
768 adchan->chan.device = &ad->dma;
769 spin_lock_init(&adchan->lock);
770 INIT_LIST_HEAD(&adchan->submitted);
771 INIT_LIST_HEAD(&adchan->issued);
772 INIT_LIST_HEAD(&adchan->to_free);
773 list_add_tail(&adchan->chan.device_node, &dma->channels);
774 tasklet_setup(&adchan->tasklet, admac_chan_tasklet);
775 }
776
777 err = dma_async_device_register(&ad->dma);
778 if (err)
779 return dev_err_probe(&pdev->dev, err, "failed to register DMA device\n");
780
781 err = of_dma_controller_register(pdev->dev.of_node, admac_dma_of_xlate, ad);
782 if (err) {
783 dma_async_device_unregister(&ad->dma);
784 return dev_err_probe(&pdev->dev, err, "failed to register with OF\n");
785 }
786
787 return 0;
788}
789
790static int admac_remove(struct platform_device *pdev)
791{
792 struct admac_data *ad = platform_get_drvdata(pdev);
793
794 of_dma_controller_free(pdev->dev.of_node);
795 dma_async_device_unregister(&ad->dma);
796
797 return 0;
798}
799
800static const struct of_device_id admac_of_match[] = {
801 { .compatible = "apple,admac", },
802 { }
803};
804MODULE_DEVICE_TABLE(of, admac_of_match);
805
806static struct platform_driver apple_admac_driver = {
807 .driver = {
808 .name = "apple-admac",
809 .of_match_table = admac_of_match,
810 },
811 .probe = admac_probe,
812 .remove = admac_remove,
813};
814module_platform_driver(apple_admac_driver);
815
816MODULE_AUTHOR("Martin Povišer <povik+lin@cutebit.org>");
817MODULE_DESCRIPTION("Driver for Audio DMA Controller (ADMAC) on Apple SoCs");
818MODULE_LICENSE("GPL");