Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Driver for STM32 DMA controller
4 *
5 * Inspired by dma-jz4740.c and tegra20-apb-dma.c
6 *
7 * Copyright (C) M'boumba Cedric Madianga 2015
8 * Author: M'boumba Cedric Madianga <cedric.madianga@gmail.com>
9 * Pierre-Yves Mordret <pierre-yves.mordret@st.com>
10 */
11
12#include <linux/clk.h>
13#include <linux/delay.h>
14#include <linux/dmaengine.h>
15#include <linux/dma-mapping.h>
16#include <linux/err.h>
17#include <linux/init.h>
18#include <linux/iopoll.h>
19#include <linux/jiffies.h>
20#include <linux/list.h>
21#include <linux/module.h>
22#include <linux/of.h>
23#include <linux/of_device.h>
24#include <linux/of_dma.h>
25#include <linux/platform_device.h>
26#include <linux/pm_runtime.h>
27#include <linux/reset.h>
28#include <linux/sched.h>
29#include <linux/slab.h>
30
31#include "virt-dma.h"
32
33#define STM32_DMA_LISR 0x0000 /* DMA Low Int Status Reg */
34#define STM32_DMA_HISR 0x0004 /* DMA High Int Status Reg */
35#define STM32_DMA_LIFCR 0x0008 /* DMA Low Int Flag Clear Reg */
36#define STM32_DMA_HIFCR 0x000c /* DMA High Int Flag Clear Reg */
37#define STM32_DMA_TCI BIT(5) /* Transfer Complete Interrupt */
38#define STM32_DMA_HTI BIT(4) /* Half Transfer Interrupt */
39#define STM32_DMA_TEI BIT(3) /* Transfer Error Interrupt */
40#define STM32_DMA_DMEI BIT(2) /* Direct Mode Error Interrupt */
41#define STM32_DMA_FEI BIT(0) /* FIFO Error Interrupt */
42#define STM32_DMA_MASKI (STM32_DMA_TCI \
43 | STM32_DMA_TEI \
44 | STM32_DMA_DMEI \
45 | STM32_DMA_FEI)
46
47/* DMA Stream x Configuration Register */
48#define STM32_DMA_SCR(x) (0x0010 + 0x18 * (x)) /* x = 0..7 */
49#define STM32_DMA_SCR_REQ(n) ((n & 0x7) << 25)
50#define STM32_DMA_SCR_MBURST_MASK GENMASK(24, 23)
51#define STM32_DMA_SCR_MBURST(n) ((n & 0x3) << 23)
52#define STM32_DMA_SCR_PBURST_MASK GENMASK(22, 21)
53#define STM32_DMA_SCR_PBURST(n) ((n & 0x3) << 21)
54#define STM32_DMA_SCR_PL_MASK GENMASK(17, 16)
55#define STM32_DMA_SCR_PL(n) ((n & 0x3) << 16)
56#define STM32_DMA_SCR_MSIZE_MASK GENMASK(14, 13)
57#define STM32_DMA_SCR_MSIZE(n) ((n & 0x3) << 13)
58#define STM32_DMA_SCR_PSIZE_MASK GENMASK(12, 11)
59#define STM32_DMA_SCR_PSIZE(n) ((n & 0x3) << 11)
60#define STM32_DMA_SCR_PSIZE_GET(n) ((n & STM32_DMA_SCR_PSIZE_MASK) >> 11)
61#define STM32_DMA_SCR_DIR_MASK GENMASK(7, 6)
62#define STM32_DMA_SCR_DIR(n) ((n & 0x3) << 6)
63#define STM32_DMA_SCR_TRBUFF BIT(20) /* Bufferable transfer for USART/UART */
64#define STM32_DMA_SCR_CT BIT(19) /* Target in double buffer */
65#define STM32_DMA_SCR_DBM BIT(18) /* Double Buffer Mode */
66#define STM32_DMA_SCR_PINCOS BIT(15) /* Peripheral inc offset size */
67#define STM32_DMA_SCR_MINC BIT(10) /* Memory increment mode */
68#define STM32_DMA_SCR_PINC BIT(9) /* Peripheral increment mode */
69#define STM32_DMA_SCR_CIRC BIT(8) /* Circular mode */
70#define STM32_DMA_SCR_PFCTRL BIT(5) /* Peripheral Flow Controller */
71#define STM32_DMA_SCR_TCIE BIT(4) /* Transfer Complete Int Enable
72 */
73#define STM32_DMA_SCR_TEIE BIT(2) /* Transfer Error Int Enable */
74#define STM32_DMA_SCR_DMEIE BIT(1) /* Direct Mode Err Int Enable */
75#define STM32_DMA_SCR_EN BIT(0) /* Stream Enable */
76#define STM32_DMA_SCR_CFG_MASK (STM32_DMA_SCR_PINC \
77 | STM32_DMA_SCR_MINC \
78 | STM32_DMA_SCR_PINCOS \
79 | STM32_DMA_SCR_PL_MASK)
80#define STM32_DMA_SCR_IRQ_MASK (STM32_DMA_SCR_TCIE \
81 | STM32_DMA_SCR_TEIE \
82 | STM32_DMA_SCR_DMEIE)
83
84/* DMA Stream x number of data register */
85#define STM32_DMA_SNDTR(x) (0x0014 + 0x18 * (x))
86
87/* DMA stream peripheral address register */
88#define STM32_DMA_SPAR(x) (0x0018 + 0x18 * (x))
89
90/* DMA stream x memory 0 address register */
91#define STM32_DMA_SM0AR(x) (0x001c + 0x18 * (x))
92
93/* DMA stream x memory 1 address register */
94#define STM32_DMA_SM1AR(x) (0x0020 + 0x18 * (x))
95
96/* DMA stream x FIFO control register */
97#define STM32_DMA_SFCR(x) (0x0024 + 0x18 * (x))
98#define STM32_DMA_SFCR_FTH_MASK GENMASK(1, 0)
99#define STM32_DMA_SFCR_FTH(n) (n & STM32_DMA_SFCR_FTH_MASK)
100#define STM32_DMA_SFCR_FEIE BIT(7) /* FIFO error interrupt enable */
101#define STM32_DMA_SFCR_DMDIS BIT(2) /* Direct mode disable */
102#define STM32_DMA_SFCR_MASK (STM32_DMA_SFCR_FEIE \
103 | STM32_DMA_SFCR_DMDIS)
104
105/* DMA direction */
106#define STM32_DMA_DEV_TO_MEM 0x00
107#define STM32_DMA_MEM_TO_DEV 0x01
108#define STM32_DMA_MEM_TO_MEM 0x02
109
110/* DMA priority level */
111#define STM32_DMA_PRIORITY_LOW 0x00
112#define STM32_DMA_PRIORITY_MEDIUM 0x01
113#define STM32_DMA_PRIORITY_HIGH 0x02
114#define STM32_DMA_PRIORITY_VERY_HIGH 0x03
115
116/* DMA FIFO threshold selection */
117#define STM32_DMA_FIFO_THRESHOLD_1QUARTERFULL 0x00
118#define STM32_DMA_FIFO_THRESHOLD_HALFFULL 0x01
119#define STM32_DMA_FIFO_THRESHOLD_3QUARTERSFULL 0x02
120#define STM32_DMA_FIFO_THRESHOLD_FULL 0x03
121#define STM32_DMA_FIFO_THRESHOLD_NONE 0x04
122
123#define STM32_DMA_MAX_DATA_ITEMS 0xffff
124/*
125 * Valid transfer starts from @0 to @0xFFFE leading to unaligned scatter
126 * gather at boundary. Thus it's safer to round down this value on FIFO
127 * size (16 Bytes)
128 */
129#define STM32_DMA_ALIGNED_MAX_DATA_ITEMS \
130 ALIGN_DOWN(STM32_DMA_MAX_DATA_ITEMS, 16)
131#define STM32_DMA_MAX_CHANNELS 0x08
132#define STM32_DMA_MAX_REQUEST_ID 0x08
133#define STM32_DMA_MAX_DATA_PARAM 0x03
134#define STM32_DMA_FIFO_SIZE 16 /* FIFO is 16 bytes */
135#define STM32_DMA_MIN_BURST 4
136#define STM32_DMA_MAX_BURST 16
137
138/* DMA Features */
139#define STM32_DMA_THRESHOLD_FTR_MASK GENMASK(1, 0)
140#define STM32_DMA_THRESHOLD_FTR_GET(n) ((n) & STM32_DMA_THRESHOLD_FTR_MASK)
141#define STM32_DMA_DIRECT_MODE_MASK BIT(2)
142#define STM32_DMA_DIRECT_MODE_GET(n) (((n) & STM32_DMA_DIRECT_MODE_MASK) >> 2)
143#define STM32_DMA_ALT_ACK_MODE_MASK BIT(4)
144#define STM32_DMA_ALT_ACK_MODE_GET(n) (((n) & STM32_DMA_ALT_ACK_MODE_MASK) >> 4)
145
146enum stm32_dma_width {
147 STM32_DMA_BYTE,
148 STM32_DMA_HALF_WORD,
149 STM32_DMA_WORD,
150};
151
152enum stm32_dma_burst_size {
153 STM32_DMA_BURST_SINGLE,
154 STM32_DMA_BURST_INCR4,
155 STM32_DMA_BURST_INCR8,
156 STM32_DMA_BURST_INCR16,
157};
158
159/**
160 * struct stm32_dma_cfg - STM32 DMA custom configuration
161 * @channel_id: channel ID
162 * @request_line: DMA request
163 * @stream_config: 32bit mask specifying the DMA channel configuration
164 * @features: 32bit mask specifying the DMA Feature list
165 */
166struct stm32_dma_cfg {
167 u32 channel_id;
168 u32 request_line;
169 u32 stream_config;
170 u32 features;
171};
172
173struct stm32_dma_chan_reg {
174 u32 dma_lisr;
175 u32 dma_hisr;
176 u32 dma_lifcr;
177 u32 dma_hifcr;
178 u32 dma_scr;
179 u32 dma_sndtr;
180 u32 dma_spar;
181 u32 dma_sm0ar;
182 u32 dma_sm1ar;
183 u32 dma_sfcr;
184};
185
186struct stm32_dma_sg_req {
187 u32 len;
188 struct stm32_dma_chan_reg chan_reg;
189};
190
191struct stm32_dma_desc {
192 struct virt_dma_desc vdesc;
193 bool cyclic;
194 u32 num_sgs;
195 struct stm32_dma_sg_req sg_req[];
196};
197
198struct stm32_dma_chan {
199 struct virt_dma_chan vchan;
200 bool config_init;
201 bool busy;
202 u32 id;
203 u32 irq;
204 struct stm32_dma_desc *desc;
205 u32 next_sg;
206 struct dma_slave_config dma_sconfig;
207 struct stm32_dma_chan_reg chan_reg;
208 u32 threshold;
209 u32 mem_burst;
210 u32 mem_width;
211 enum dma_status status;
212};
213
214struct stm32_dma_device {
215 struct dma_device ddev;
216 void __iomem *base;
217 struct clk *clk;
218 bool mem2mem;
219 struct stm32_dma_chan chan[STM32_DMA_MAX_CHANNELS];
220};
221
222static struct stm32_dma_device *stm32_dma_get_dev(struct stm32_dma_chan *chan)
223{
224 return container_of(chan->vchan.chan.device, struct stm32_dma_device,
225 ddev);
226}
227
228static struct stm32_dma_chan *to_stm32_dma_chan(struct dma_chan *c)
229{
230 return container_of(c, struct stm32_dma_chan, vchan.chan);
231}
232
233static struct stm32_dma_desc *to_stm32_dma_desc(struct virt_dma_desc *vdesc)
234{
235 return container_of(vdesc, struct stm32_dma_desc, vdesc);
236}
237
238static struct device *chan2dev(struct stm32_dma_chan *chan)
239{
240 return &chan->vchan.chan.dev->device;
241}
242
243static u32 stm32_dma_read(struct stm32_dma_device *dmadev, u32 reg)
244{
245 return readl_relaxed(dmadev->base + reg);
246}
247
248static void stm32_dma_write(struct stm32_dma_device *dmadev, u32 reg, u32 val)
249{
250 writel_relaxed(val, dmadev->base + reg);
251}
252
253static int stm32_dma_get_width(struct stm32_dma_chan *chan,
254 enum dma_slave_buswidth width)
255{
256 switch (width) {
257 case DMA_SLAVE_BUSWIDTH_1_BYTE:
258 return STM32_DMA_BYTE;
259 case DMA_SLAVE_BUSWIDTH_2_BYTES:
260 return STM32_DMA_HALF_WORD;
261 case DMA_SLAVE_BUSWIDTH_4_BYTES:
262 return STM32_DMA_WORD;
263 default:
264 dev_err(chan2dev(chan), "Dma bus width not supported\n");
265 return -EINVAL;
266 }
267}
268
269static enum dma_slave_buswidth stm32_dma_get_max_width(u32 buf_len,
270 dma_addr_t buf_addr,
271 u32 threshold)
272{
273 enum dma_slave_buswidth max_width;
274
275 if (threshold == STM32_DMA_FIFO_THRESHOLD_FULL)
276 max_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
277 else
278 max_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
279
280 while ((buf_len < max_width || buf_len % max_width) &&
281 max_width > DMA_SLAVE_BUSWIDTH_1_BYTE)
282 max_width = max_width >> 1;
283
284 if (buf_addr & (max_width - 1))
285 max_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
286
287 return max_width;
288}
289
290static bool stm32_dma_fifo_threshold_is_allowed(u32 burst, u32 threshold,
291 enum dma_slave_buswidth width)
292{
293 u32 remaining;
294
295 if (threshold == STM32_DMA_FIFO_THRESHOLD_NONE)
296 return false;
297
298 if (width != DMA_SLAVE_BUSWIDTH_UNDEFINED) {
299 if (burst != 0) {
300 /*
301 * If number of beats fit in several whole bursts
302 * this configuration is allowed.
303 */
304 remaining = ((STM32_DMA_FIFO_SIZE / width) *
305 (threshold + 1) / 4) % burst;
306
307 if (remaining == 0)
308 return true;
309 } else {
310 return true;
311 }
312 }
313
314 return false;
315}
316
317static bool stm32_dma_is_burst_possible(u32 buf_len, u32 threshold)
318{
319 /* If FIFO direct mode, burst is not possible */
320 if (threshold == STM32_DMA_FIFO_THRESHOLD_NONE)
321 return false;
322
323 /*
324 * Buffer or period length has to be aligned on FIFO depth.
325 * Otherwise bytes may be stuck within FIFO at buffer or period
326 * length.
327 */
328 return ((buf_len % ((threshold + 1) * 4)) == 0);
329}
330
331static u32 stm32_dma_get_best_burst(u32 buf_len, u32 max_burst, u32 threshold,
332 enum dma_slave_buswidth width)
333{
334 u32 best_burst = max_burst;
335
336 if (best_burst == 1 || !stm32_dma_is_burst_possible(buf_len, threshold))
337 return 0;
338
339 while ((buf_len < best_burst * width && best_burst > 1) ||
340 !stm32_dma_fifo_threshold_is_allowed(best_burst, threshold,
341 width)) {
342 if (best_burst > STM32_DMA_MIN_BURST)
343 best_burst = best_burst >> 1;
344 else
345 best_burst = 0;
346 }
347
348 return best_burst;
349}
350
351static int stm32_dma_get_burst(struct stm32_dma_chan *chan, u32 maxburst)
352{
353 switch (maxburst) {
354 case 0:
355 case 1:
356 return STM32_DMA_BURST_SINGLE;
357 case 4:
358 return STM32_DMA_BURST_INCR4;
359 case 8:
360 return STM32_DMA_BURST_INCR8;
361 case 16:
362 return STM32_DMA_BURST_INCR16;
363 default:
364 dev_err(chan2dev(chan), "Dma burst size not supported\n");
365 return -EINVAL;
366 }
367}
368
369static void stm32_dma_set_fifo_config(struct stm32_dma_chan *chan,
370 u32 src_burst, u32 dst_burst)
371{
372 chan->chan_reg.dma_sfcr &= ~STM32_DMA_SFCR_MASK;
373 chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_DMEIE;
374
375 if (!src_burst && !dst_burst) {
376 /* Using direct mode */
377 chan->chan_reg.dma_scr |= STM32_DMA_SCR_DMEIE;
378 } else {
379 /* Using FIFO mode */
380 chan->chan_reg.dma_sfcr |= STM32_DMA_SFCR_MASK;
381 }
382}
383
384static int stm32_dma_slave_config(struct dma_chan *c,
385 struct dma_slave_config *config)
386{
387 struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
388
389 memcpy(&chan->dma_sconfig, config, sizeof(*config));
390
391 chan->config_init = true;
392
393 return 0;
394}
395
396static u32 stm32_dma_irq_status(struct stm32_dma_chan *chan)
397{
398 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
399 u32 flags, dma_isr;
400
401 /*
402 * Read "flags" from DMA_xISR register corresponding to the selected
403 * DMA channel at the correct bit offset inside that register.
404 *
405 * If (ch % 4) is 2 or 3, left shift the mask by 16 bits.
406 * If (ch % 4) is 1 or 3, additionally left shift the mask by 6 bits.
407 */
408
409 if (chan->id & 4)
410 dma_isr = stm32_dma_read(dmadev, STM32_DMA_HISR);
411 else
412 dma_isr = stm32_dma_read(dmadev, STM32_DMA_LISR);
413
414 flags = dma_isr >> (((chan->id & 2) << 3) | ((chan->id & 1) * 6));
415
416 return flags & STM32_DMA_MASKI;
417}
418
419static void stm32_dma_irq_clear(struct stm32_dma_chan *chan, u32 flags)
420{
421 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
422 u32 dma_ifcr;
423
424 /*
425 * Write "flags" to the DMA_xIFCR register corresponding to the selected
426 * DMA channel at the correct bit offset inside that register.
427 *
428 * If (ch % 4) is 2 or 3, left shift the mask by 16 bits.
429 * If (ch % 4) is 1 or 3, additionally left shift the mask by 6 bits.
430 */
431 flags &= STM32_DMA_MASKI;
432 dma_ifcr = flags << (((chan->id & 2) << 3) | ((chan->id & 1) * 6));
433
434 if (chan->id & 4)
435 stm32_dma_write(dmadev, STM32_DMA_HIFCR, dma_ifcr);
436 else
437 stm32_dma_write(dmadev, STM32_DMA_LIFCR, dma_ifcr);
438}
439
440static int stm32_dma_disable_chan(struct stm32_dma_chan *chan)
441{
442 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
443 u32 dma_scr, id, reg;
444
445 id = chan->id;
446 reg = STM32_DMA_SCR(id);
447 dma_scr = stm32_dma_read(dmadev, reg);
448
449 if (dma_scr & STM32_DMA_SCR_EN) {
450 dma_scr &= ~STM32_DMA_SCR_EN;
451 stm32_dma_write(dmadev, reg, dma_scr);
452
453 return readl_relaxed_poll_timeout_atomic(dmadev->base + reg,
454 dma_scr, !(dma_scr & STM32_DMA_SCR_EN),
455 10, 1000000);
456 }
457
458 return 0;
459}
460
461static void stm32_dma_stop(struct stm32_dma_chan *chan)
462{
463 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
464 u32 dma_scr, dma_sfcr, status;
465 int ret;
466
467 /* Disable interrupts */
468 dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
469 dma_scr &= ~STM32_DMA_SCR_IRQ_MASK;
470 stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), dma_scr);
471 dma_sfcr = stm32_dma_read(dmadev, STM32_DMA_SFCR(chan->id));
472 dma_sfcr &= ~STM32_DMA_SFCR_FEIE;
473 stm32_dma_write(dmadev, STM32_DMA_SFCR(chan->id), dma_sfcr);
474
475 /* Disable DMA */
476 ret = stm32_dma_disable_chan(chan);
477 if (ret < 0)
478 return;
479
480 /* Clear interrupt status if it is there */
481 status = stm32_dma_irq_status(chan);
482 if (status) {
483 dev_dbg(chan2dev(chan), "%s(): clearing interrupt: 0x%08x\n",
484 __func__, status);
485 stm32_dma_irq_clear(chan, status);
486 }
487
488 chan->busy = false;
489 chan->status = DMA_COMPLETE;
490}
491
492static int stm32_dma_terminate_all(struct dma_chan *c)
493{
494 struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
495 unsigned long flags;
496 LIST_HEAD(head);
497
498 spin_lock_irqsave(&chan->vchan.lock, flags);
499
500 if (chan->desc) {
501 dma_cookie_complete(&chan->desc->vdesc.tx);
502 vchan_terminate_vdesc(&chan->desc->vdesc);
503 if (chan->busy)
504 stm32_dma_stop(chan);
505 chan->desc = NULL;
506 }
507
508 vchan_get_all_descriptors(&chan->vchan, &head);
509 spin_unlock_irqrestore(&chan->vchan.lock, flags);
510 vchan_dma_desc_free_list(&chan->vchan, &head);
511
512 return 0;
513}
514
515static void stm32_dma_synchronize(struct dma_chan *c)
516{
517 struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
518
519 vchan_synchronize(&chan->vchan);
520}
521
522static void stm32_dma_dump_reg(struct stm32_dma_chan *chan)
523{
524 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
525 u32 scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
526 u32 ndtr = stm32_dma_read(dmadev, STM32_DMA_SNDTR(chan->id));
527 u32 spar = stm32_dma_read(dmadev, STM32_DMA_SPAR(chan->id));
528 u32 sm0ar = stm32_dma_read(dmadev, STM32_DMA_SM0AR(chan->id));
529 u32 sm1ar = stm32_dma_read(dmadev, STM32_DMA_SM1AR(chan->id));
530 u32 sfcr = stm32_dma_read(dmadev, STM32_DMA_SFCR(chan->id));
531
532 dev_dbg(chan2dev(chan), "SCR: 0x%08x\n", scr);
533 dev_dbg(chan2dev(chan), "NDTR: 0x%08x\n", ndtr);
534 dev_dbg(chan2dev(chan), "SPAR: 0x%08x\n", spar);
535 dev_dbg(chan2dev(chan), "SM0AR: 0x%08x\n", sm0ar);
536 dev_dbg(chan2dev(chan), "SM1AR: 0x%08x\n", sm1ar);
537 dev_dbg(chan2dev(chan), "SFCR: 0x%08x\n", sfcr);
538}
539
540static void stm32_dma_sg_inc(struct stm32_dma_chan *chan)
541{
542 chan->next_sg++;
543 if (chan->desc->cyclic && (chan->next_sg == chan->desc->num_sgs))
544 chan->next_sg = 0;
545}
546
547static void stm32_dma_configure_next_sg(struct stm32_dma_chan *chan);
548
549static void stm32_dma_start_transfer(struct stm32_dma_chan *chan)
550{
551 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
552 struct virt_dma_desc *vdesc;
553 struct stm32_dma_sg_req *sg_req;
554 struct stm32_dma_chan_reg *reg;
555 u32 status;
556 int ret;
557
558 ret = stm32_dma_disable_chan(chan);
559 if (ret < 0)
560 return;
561
562 if (!chan->desc) {
563 vdesc = vchan_next_desc(&chan->vchan);
564 if (!vdesc)
565 return;
566
567 list_del(&vdesc->node);
568
569 chan->desc = to_stm32_dma_desc(vdesc);
570 chan->next_sg = 0;
571 }
572
573 if (chan->next_sg == chan->desc->num_sgs)
574 chan->next_sg = 0;
575
576 sg_req = &chan->desc->sg_req[chan->next_sg];
577 reg = &sg_req->chan_reg;
578
579 reg->dma_scr &= ~STM32_DMA_SCR_EN;
580 stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), reg->dma_scr);
581 stm32_dma_write(dmadev, STM32_DMA_SPAR(chan->id), reg->dma_spar);
582 stm32_dma_write(dmadev, STM32_DMA_SM0AR(chan->id), reg->dma_sm0ar);
583 stm32_dma_write(dmadev, STM32_DMA_SFCR(chan->id), reg->dma_sfcr);
584 stm32_dma_write(dmadev, STM32_DMA_SM1AR(chan->id), reg->dma_sm1ar);
585 stm32_dma_write(dmadev, STM32_DMA_SNDTR(chan->id), reg->dma_sndtr);
586
587 stm32_dma_sg_inc(chan);
588
589 /* Clear interrupt status if it is there */
590 status = stm32_dma_irq_status(chan);
591 if (status)
592 stm32_dma_irq_clear(chan, status);
593
594 if (chan->desc->cyclic)
595 stm32_dma_configure_next_sg(chan);
596
597 stm32_dma_dump_reg(chan);
598
599 /* Start DMA */
600 chan->busy = true;
601 chan->status = DMA_IN_PROGRESS;
602 reg->dma_scr |= STM32_DMA_SCR_EN;
603 stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), reg->dma_scr);
604
605 dev_dbg(chan2dev(chan), "vchan %pK: started\n", &chan->vchan);
606}
607
608static void stm32_dma_configure_next_sg(struct stm32_dma_chan *chan)
609{
610 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
611 struct stm32_dma_sg_req *sg_req;
612 u32 dma_scr, dma_sm0ar, dma_sm1ar, id;
613
614 id = chan->id;
615 dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id));
616
617 sg_req = &chan->desc->sg_req[chan->next_sg];
618
619 if (dma_scr & STM32_DMA_SCR_CT) {
620 dma_sm0ar = sg_req->chan_reg.dma_sm0ar;
621 stm32_dma_write(dmadev, STM32_DMA_SM0AR(id), dma_sm0ar);
622 dev_dbg(chan2dev(chan), "CT=1 <=> SM0AR: 0x%08x\n",
623 stm32_dma_read(dmadev, STM32_DMA_SM0AR(id)));
624 } else {
625 dma_sm1ar = sg_req->chan_reg.dma_sm1ar;
626 stm32_dma_write(dmadev, STM32_DMA_SM1AR(id), dma_sm1ar);
627 dev_dbg(chan2dev(chan), "CT=0 <=> SM1AR: 0x%08x\n",
628 stm32_dma_read(dmadev, STM32_DMA_SM1AR(id)));
629 }
630}
631
632static void stm32_dma_handle_chan_paused(struct stm32_dma_chan *chan)
633{
634 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
635 u32 dma_scr;
636
637 /*
638 * Read and store current remaining data items and peripheral/memory addresses to be
639 * updated on resume
640 */
641 dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
642 /*
643 * Transfer can be paused while between a previous resume and reconfiguration on transfer
644 * complete. If transfer is cyclic and CIRC and DBM have been deactivated for resume, need
645 * to set it here in SCR backup to ensure a good reconfiguration on transfer complete.
646 */
647 if (chan->desc && chan->desc->cyclic) {
648 if (chan->desc->num_sgs == 1)
649 dma_scr |= STM32_DMA_SCR_CIRC;
650 else
651 dma_scr |= STM32_DMA_SCR_DBM;
652 }
653 chan->chan_reg.dma_scr = dma_scr;
654
655 /*
656 * Need to temporarily deactivate CIRC/DBM until next Transfer Complete interrupt, otherwise
657 * on resume NDTR autoreload value will be wrong (lower than the initial period length)
658 */
659 if (chan->desc && chan->desc->cyclic) {
660 dma_scr &= ~(STM32_DMA_SCR_DBM | STM32_DMA_SCR_CIRC);
661 stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), dma_scr);
662 }
663
664 chan->chan_reg.dma_sndtr = stm32_dma_read(dmadev, STM32_DMA_SNDTR(chan->id));
665
666 dev_dbg(chan2dev(chan), "vchan %pK: paused\n", &chan->vchan);
667}
668
669static void stm32_dma_post_resume_reconfigure(struct stm32_dma_chan *chan)
670{
671 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
672 struct stm32_dma_sg_req *sg_req;
673 u32 dma_scr, status, id;
674
675 id = chan->id;
676 dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id));
677
678 /* Clear interrupt status if it is there */
679 status = stm32_dma_irq_status(chan);
680 if (status)
681 stm32_dma_irq_clear(chan, status);
682
683 if (!chan->next_sg)
684 sg_req = &chan->desc->sg_req[chan->desc->num_sgs - 1];
685 else
686 sg_req = &chan->desc->sg_req[chan->next_sg - 1];
687
688 /* Reconfigure NDTR with the initial value */
689 stm32_dma_write(dmadev, STM32_DMA_SNDTR(chan->id), sg_req->chan_reg.dma_sndtr);
690
691 /* Restore SPAR */
692 stm32_dma_write(dmadev, STM32_DMA_SPAR(id), sg_req->chan_reg.dma_spar);
693
694 /* Restore SM0AR/SM1AR whatever DBM/CT as they may have been modified */
695 stm32_dma_write(dmadev, STM32_DMA_SM0AR(id), sg_req->chan_reg.dma_sm0ar);
696 stm32_dma_write(dmadev, STM32_DMA_SM1AR(id), sg_req->chan_reg.dma_sm1ar);
697
698 /* Reactivate CIRC/DBM if needed */
699 if (chan->chan_reg.dma_scr & STM32_DMA_SCR_DBM) {
700 dma_scr |= STM32_DMA_SCR_DBM;
701 /* Restore CT */
702 if (chan->chan_reg.dma_scr & STM32_DMA_SCR_CT)
703 dma_scr &= ~STM32_DMA_SCR_CT;
704 else
705 dma_scr |= STM32_DMA_SCR_CT;
706 } else if (chan->chan_reg.dma_scr & STM32_DMA_SCR_CIRC) {
707 dma_scr |= STM32_DMA_SCR_CIRC;
708 }
709 stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), dma_scr);
710
711 stm32_dma_configure_next_sg(chan);
712
713 stm32_dma_dump_reg(chan);
714
715 dma_scr |= STM32_DMA_SCR_EN;
716 stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), dma_scr);
717
718 dev_dbg(chan2dev(chan), "vchan %pK: reconfigured after pause/resume\n", &chan->vchan);
719}
720
721static void stm32_dma_handle_chan_done(struct stm32_dma_chan *chan, u32 scr)
722{
723 if (!chan->desc)
724 return;
725
726 if (chan->desc->cyclic) {
727 vchan_cyclic_callback(&chan->desc->vdesc);
728 stm32_dma_sg_inc(chan);
729 /* cyclic while CIRC/DBM disable => post resume reconfiguration needed */
730 if (!(scr & (STM32_DMA_SCR_CIRC | STM32_DMA_SCR_DBM)))
731 stm32_dma_post_resume_reconfigure(chan);
732 else if (scr & STM32_DMA_SCR_DBM)
733 stm32_dma_configure_next_sg(chan);
734 } else {
735 chan->busy = false;
736 chan->status = DMA_COMPLETE;
737 if (chan->next_sg == chan->desc->num_sgs) {
738 vchan_cookie_complete(&chan->desc->vdesc);
739 chan->desc = NULL;
740 }
741 stm32_dma_start_transfer(chan);
742 }
743}
744
745static irqreturn_t stm32_dma_chan_irq(int irq, void *devid)
746{
747 struct stm32_dma_chan *chan = devid;
748 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
749 u32 status, scr, sfcr;
750
751 spin_lock(&chan->vchan.lock);
752
753 status = stm32_dma_irq_status(chan);
754 scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
755 sfcr = stm32_dma_read(dmadev, STM32_DMA_SFCR(chan->id));
756
757 if (status & STM32_DMA_FEI) {
758 stm32_dma_irq_clear(chan, STM32_DMA_FEI);
759 status &= ~STM32_DMA_FEI;
760 if (sfcr & STM32_DMA_SFCR_FEIE) {
761 if (!(scr & STM32_DMA_SCR_EN) &&
762 !(status & STM32_DMA_TCI))
763 dev_err(chan2dev(chan), "FIFO Error\n");
764 else
765 dev_dbg(chan2dev(chan), "FIFO over/underrun\n");
766 }
767 }
768 if (status & STM32_DMA_DMEI) {
769 stm32_dma_irq_clear(chan, STM32_DMA_DMEI);
770 status &= ~STM32_DMA_DMEI;
771 if (sfcr & STM32_DMA_SCR_DMEIE)
772 dev_dbg(chan2dev(chan), "Direct mode overrun\n");
773 }
774
775 if (status & STM32_DMA_TCI) {
776 stm32_dma_irq_clear(chan, STM32_DMA_TCI);
777 if (scr & STM32_DMA_SCR_TCIE) {
778 if (chan->status == DMA_PAUSED && !(scr & STM32_DMA_SCR_EN))
779 stm32_dma_handle_chan_paused(chan);
780 else
781 stm32_dma_handle_chan_done(chan, scr);
782 }
783 status &= ~STM32_DMA_TCI;
784 }
785
786 if (status & STM32_DMA_HTI) {
787 stm32_dma_irq_clear(chan, STM32_DMA_HTI);
788 status &= ~STM32_DMA_HTI;
789 }
790
791 if (status) {
792 stm32_dma_irq_clear(chan, status);
793 dev_err(chan2dev(chan), "DMA error: status=0x%08x\n", status);
794 if (!(scr & STM32_DMA_SCR_EN))
795 dev_err(chan2dev(chan), "chan disabled by HW\n");
796 }
797
798 spin_unlock(&chan->vchan.lock);
799
800 return IRQ_HANDLED;
801}
802
803static void stm32_dma_issue_pending(struct dma_chan *c)
804{
805 struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
806 unsigned long flags;
807
808 spin_lock_irqsave(&chan->vchan.lock, flags);
809 if (vchan_issue_pending(&chan->vchan) && !chan->desc && !chan->busy) {
810 dev_dbg(chan2dev(chan), "vchan %pK: issued\n", &chan->vchan);
811 stm32_dma_start_transfer(chan);
812
813 }
814 spin_unlock_irqrestore(&chan->vchan.lock, flags);
815}
816
817static int stm32_dma_pause(struct dma_chan *c)
818{
819 struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
820 unsigned long flags;
821 int ret;
822
823 if (chan->status != DMA_IN_PROGRESS)
824 return -EPERM;
825
826 spin_lock_irqsave(&chan->vchan.lock, flags);
827 ret = stm32_dma_disable_chan(chan);
828 /*
829 * A transfer complete flag is set to indicate the end of transfer due to the stream
830 * interruption, so wait for interrupt
831 */
832 if (!ret)
833 chan->status = DMA_PAUSED;
834 spin_unlock_irqrestore(&chan->vchan.lock, flags);
835
836 return ret;
837}
838
839static int stm32_dma_resume(struct dma_chan *c)
840{
841 struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
842 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
843 struct stm32_dma_chan_reg chan_reg = chan->chan_reg;
844 u32 id = chan->id, scr, ndtr, offset, spar, sm0ar, sm1ar;
845 struct stm32_dma_sg_req *sg_req;
846 unsigned long flags;
847
848 if (chan->status != DMA_PAUSED)
849 return -EPERM;
850
851 scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id));
852 if (WARN_ON(scr & STM32_DMA_SCR_EN))
853 return -EPERM;
854
855 spin_lock_irqsave(&chan->vchan.lock, flags);
856
857 /* sg_reg[prev_sg] contains original ndtr, sm0ar and sm1ar before pausing the transfer */
858 if (!chan->next_sg)
859 sg_req = &chan->desc->sg_req[chan->desc->num_sgs - 1];
860 else
861 sg_req = &chan->desc->sg_req[chan->next_sg - 1];
862
863 ndtr = sg_req->chan_reg.dma_sndtr;
864 offset = (ndtr - chan_reg.dma_sndtr) << STM32_DMA_SCR_PSIZE_GET(chan_reg.dma_scr);
865 spar = sg_req->chan_reg.dma_spar;
866 sm0ar = sg_req->chan_reg.dma_sm0ar;
867 sm1ar = sg_req->chan_reg.dma_sm1ar;
868
869 /*
870 * The peripheral and/or memory addresses have to be updated in order to adjust the
871 * address pointers. Need to check increment.
872 */
873 if (chan_reg.dma_scr & STM32_DMA_SCR_PINC)
874 stm32_dma_write(dmadev, STM32_DMA_SPAR(id), spar + offset);
875 else
876 stm32_dma_write(dmadev, STM32_DMA_SPAR(id), spar);
877
878 if (!(chan_reg.dma_scr & STM32_DMA_SCR_MINC))
879 offset = 0;
880
881 /*
882 * In case of DBM, the current target could be SM1AR.
883 * Need to temporarily deactivate CIRC/DBM to finish the current transfer, so
884 * SM0AR becomes the current target and must be updated with SM1AR + offset if CT=1.
885 */
886 if ((chan_reg.dma_scr & STM32_DMA_SCR_DBM) && (chan_reg.dma_scr & STM32_DMA_SCR_CT))
887 stm32_dma_write(dmadev, STM32_DMA_SM1AR(id), sm1ar + offset);
888 else
889 stm32_dma_write(dmadev, STM32_DMA_SM0AR(id), sm0ar + offset);
890
891 /* NDTR must be restored otherwise internal HW counter won't be correctly reset */
892 stm32_dma_write(dmadev, STM32_DMA_SNDTR(id), chan_reg.dma_sndtr);
893
894 /*
895 * Need to temporarily deactivate CIRC/DBM until next Transfer Complete interrupt,
896 * otherwise NDTR autoreload value will be wrong (lower than the initial period length)
897 */
898 if (chan_reg.dma_scr & (STM32_DMA_SCR_CIRC | STM32_DMA_SCR_DBM))
899 chan_reg.dma_scr &= ~(STM32_DMA_SCR_CIRC | STM32_DMA_SCR_DBM);
900
901 if (chan_reg.dma_scr & STM32_DMA_SCR_DBM)
902 stm32_dma_configure_next_sg(chan);
903
904 stm32_dma_dump_reg(chan);
905
906 /* The stream may then be re-enabled to restart transfer from the point it was stopped */
907 chan->status = DMA_IN_PROGRESS;
908 chan_reg.dma_scr |= STM32_DMA_SCR_EN;
909 stm32_dma_write(dmadev, STM32_DMA_SCR(id), chan_reg.dma_scr);
910
911 spin_unlock_irqrestore(&chan->vchan.lock, flags);
912
913 dev_dbg(chan2dev(chan), "vchan %pK: resumed\n", &chan->vchan);
914
915 return 0;
916}
917
918static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan,
919 enum dma_transfer_direction direction,
920 enum dma_slave_buswidth *buswidth,
921 u32 buf_len, dma_addr_t buf_addr)
922{
923 enum dma_slave_buswidth src_addr_width, dst_addr_width;
924 int src_bus_width, dst_bus_width;
925 int src_burst_size, dst_burst_size;
926 u32 src_maxburst, dst_maxburst, src_best_burst, dst_best_burst;
927 u32 dma_scr, fifoth;
928
929 src_addr_width = chan->dma_sconfig.src_addr_width;
930 dst_addr_width = chan->dma_sconfig.dst_addr_width;
931 src_maxburst = chan->dma_sconfig.src_maxburst;
932 dst_maxburst = chan->dma_sconfig.dst_maxburst;
933 fifoth = chan->threshold;
934
935 switch (direction) {
936 case DMA_MEM_TO_DEV:
937 /* Set device data size */
938 dst_bus_width = stm32_dma_get_width(chan, dst_addr_width);
939 if (dst_bus_width < 0)
940 return dst_bus_width;
941
942 /* Set device burst size */
943 dst_best_burst = stm32_dma_get_best_burst(buf_len,
944 dst_maxburst,
945 fifoth,
946 dst_addr_width);
947
948 dst_burst_size = stm32_dma_get_burst(chan, dst_best_burst);
949 if (dst_burst_size < 0)
950 return dst_burst_size;
951
952 /* Set memory data size */
953 src_addr_width = stm32_dma_get_max_width(buf_len, buf_addr,
954 fifoth);
955 chan->mem_width = src_addr_width;
956 src_bus_width = stm32_dma_get_width(chan, src_addr_width);
957 if (src_bus_width < 0)
958 return src_bus_width;
959
960 /*
961 * Set memory burst size - burst not possible if address is not aligned on
962 * the address boundary equal to the size of the transfer
963 */
964 if (buf_addr & (buf_len - 1))
965 src_maxburst = 1;
966 else
967 src_maxburst = STM32_DMA_MAX_BURST;
968 src_best_burst = stm32_dma_get_best_burst(buf_len,
969 src_maxburst,
970 fifoth,
971 src_addr_width);
972 src_burst_size = stm32_dma_get_burst(chan, src_best_burst);
973 if (src_burst_size < 0)
974 return src_burst_size;
975
976 dma_scr = STM32_DMA_SCR_DIR(STM32_DMA_MEM_TO_DEV) |
977 STM32_DMA_SCR_PSIZE(dst_bus_width) |
978 STM32_DMA_SCR_MSIZE(src_bus_width) |
979 STM32_DMA_SCR_PBURST(dst_burst_size) |
980 STM32_DMA_SCR_MBURST(src_burst_size);
981
982 /* Set FIFO threshold */
983 chan->chan_reg.dma_sfcr &= ~STM32_DMA_SFCR_FTH_MASK;
984 if (fifoth != STM32_DMA_FIFO_THRESHOLD_NONE)
985 chan->chan_reg.dma_sfcr |= STM32_DMA_SFCR_FTH(fifoth);
986
987 /* Set peripheral address */
988 chan->chan_reg.dma_spar = chan->dma_sconfig.dst_addr;
989 *buswidth = dst_addr_width;
990 break;
991
992 case DMA_DEV_TO_MEM:
993 /* Set device data size */
994 src_bus_width = stm32_dma_get_width(chan, src_addr_width);
995 if (src_bus_width < 0)
996 return src_bus_width;
997
998 /* Set device burst size */
999 src_best_burst = stm32_dma_get_best_burst(buf_len,
1000 src_maxburst,
1001 fifoth,
1002 src_addr_width);
1003 chan->mem_burst = src_best_burst;
1004 src_burst_size = stm32_dma_get_burst(chan, src_best_burst);
1005 if (src_burst_size < 0)
1006 return src_burst_size;
1007
1008 /* Set memory data size */
1009 dst_addr_width = stm32_dma_get_max_width(buf_len, buf_addr,
1010 fifoth);
1011 chan->mem_width = dst_addr_width;
1012 dst_bus_width = stm32_dma_get_width(chan, dst_addr_width);
1013 if (dst_bus_width < 0)
1014 return dst_bus_width;
1015
1016 /*
1017 * Set memory burst size - burst not possible if address is not aligned on
1018 * the address boundary equal to the size of the transfer
1019 */
1020 if (buf_addr & (buf_len - 1))
1021 dst_maxburst = 1;
1022 else
1023 dst_maxburst = STM32_DMA_MAX_BURST;
1024 dst_best_burst = stm32_dma_get_best_burst(buf_len,
1025 dst_maxburst,
1026 fifoth,
1027 dst_addr_width);
1028 chan->mem_burst = dst_best_burst;
1029 dst_burst_size = stm32_dma_get_burst(chan, dst_best_burst);
1030 if (dst_burst_size < 0)
1031 return dst_burst_size;
1032
1033 dma_scr = STM32_DMA_SCR_DIR(STM32_DMA_DEV_TO_MEM) |
1034 STM32_DMA_SCR_PSIZE(src_bus_width) |
1035 STM32_DMA_SCR_MSIZE(dst_bus_width) |
1036 STM32_DMA_SCR_PBURST(src_burst_size) |
1037 STM32_DMA_SCR_MBURST(dst_burst_size);
1038
1039 /* Set FIFO threshold */
1040 chan->chan_reg.dma_sfcr &= ~STM32_DMA_SFCR_FTH_MASK;
1041 if (fifoth != STM32_DMA_FIFO_THRESHOLD_NONE)
1042 chan->chan_reg.dma_sfcr |= STM32_DMA_SFCR_FTH(fifoth);
1043
1044 /* Set peripheral address */
1045 chan->chan_reg.dma_spar = chan->dma_sconfig.src_addr;
1046 *buswidth = chan->dma_sconfig.src_addr_width;
1047 break;
1048
1049 default:
1050 dev_err(chan2dev(chan), "Dma direction is not supported\n");
1051 return -EINVAL;
1052 }
1053
1054 stm32_dma_set_fifo_config(chan, src_best_burst, dst_best_burst);
1055
1056 /* Set DMA control register */
1057 chan->chan_reg.dma_scr &= ~(STM32_DMA_SCR_DIR_MASK |
1058 STM32_DMA_SCR_PSIZE_MASK | STM32_DMA_SCR_MSIZE_MASK |
1059 STM32_DMA_SCR_PBURST_MASK | STM32_DMA_SCR_MBURST_MASK);
1060 chan->chan_reg.dma_scr |= dma_scr;
1061
1062 return 0;
1063}
1064
1065static void stm32_dma_clear_reg(struct stm32_dma_chan_reg *regs)
1066{
1067 memset(regs, 0, sizeof(struct stm32_dma_chan_reg));
1068}
1069
1070static struct dma_async_tx_descriptor *stm32_dma_prep_slave_sg(
1071 struct dma_chan *c, struct scatterlist *sgl,
1072 u32 sg_len, enum dma_transfer_direction direction,
1073 unsigned long flags, void *context)
1074{
1075 struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
1076 struct stm32_dma_desc *desc;
1077 struct scatterlist *sg;
1078 enum dma_slave_buswidth buswidth;
1079 u32 nb_data_items;
1080 int i, ret;
1081
1082 if (!chan->config_init) {
1083 dev_err(chan2dev(chan), "dma channel is not configured\n");
1084 return NULL;
1085 }
1086
1087 if (sg_len < 1) {
1088 dev_err(chan2dev(chan), "Invalid segment length %d\n", sg_len);
1089 return NULL;
1090 }
1091
1092 desc = kzalloc(struct_size(desc, sg_req, sg_len), GFP_NOWAIT);
1093 if (!desc)
1094 return NULL;
1095
1096 /* Set peripheral flow controller */
1097 if (chan->dma_sconfig.device_fc)
1098 chan->chan_reg.dma_scr |= STM32_DMA_SCR_PFCTRL;
1099 else
1100 chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_PFCTRL;
1101
1102 for_each_sg(sgl, sg, sg_len, i) {
1103 ret = stm32_dma_set_xfer_param(chan, direction, &buswidth,
1104 sg_dma_len(sg),
1105 sg_dma_address(sg));
1106 if (ret < 0)
1107 goto err;
1108
1109 desc->sg_req[i].len = sg_dma_len(sg);
1110
1111 nb_data_items = desc->sg_req[i].len / buswidth;
1112 if (nb_data_items > STM32_DMA_ALIGNED_MAX_DATA_ITEMS) {
1113 dev_err(chan2dev(chan), "nb items not supported\n");
1114 goto err;
1115 }
1116
1117 stm32_dma_clear_reg(&desc->sg_req[i].chan_reg);
1118 desc->sg_req[i].chan_reg.dma_scr = chan->chan_reg.dma_scr;
1119 desc->sg_req[i].chan_reg.dma_sfcr = chan->chan_reg.dma_sfcr;
1120 desc->sg_req[i].chan_reg.dma_spar = chan->chan_reg.dma_spar;
1121 desc->sg_req[i].chan_reg.dma_sm0ar = sg_dma_address(sg);
1122 desc->sg_req[i].chan_reg.dma_sm1ar = sg_dma_address(sg);
1123 desc->sg_req[i].chan_reg.dma_sndtr = nb_data_items;
1124 }
1125
1126 desc->num_sgs = sg_len;
1127 desc->cyclic = false;
1128
1129 return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
1130
1131err:
1132 kfree(desc);
1133 return NULL;
1134}
1135
1136static struct dma_async_tx_descriptor *stm32_dma_prep_dma_cyclic(
1137 struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len,
1138 size_t period_len, enum dma_transfer_direction direction,
1139 unsigned long flags)
1140{
1141 struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
1142 struct stm32_dma_desc *desc;
1143 enum dma_slave_buswidth buswidth;
1144 u32 num_periods, nb_data_items;
1145 int i, ret;
1146
1147 if (!buf_len || !period_len) {
1148 dev_err(chan2dev(chan), "Invalid buffer/period len\n");
1149 return NULL;
1150 }
1151
1152 if (!chan->config_init) {
1153 dev_err(chan2dev(chan), "dma channel is not configured\n");
1154 return NULL;
1155 }
1156
1157 if (buf_len % period_len) {
1158 dev_err(chan2dev(chan), "buf_len not multiple of period_len\n");
1159 return NULL;
1160 }
1161
1162 /*
1163 * We allow to take more number of requests till DMA is
1164 * not started. The driver will loop over all requests.
1165 * Once DMA is started then new requests can be queued only after
1166 * terminating the DMA.
1167 */
1168 if (chan->busy) {
1169 dev_err(chan2dev(chan), "Request not allowed when dma busy\n");
1170 return NULL;
1171 }
1172
1173 ret = stm32_dma_set_xfer_param(chan, direction, &buswidth, period_len,
1174 buf_addr);
1175 if (ret < 0)
1176 return NULL;
1177
1178 nb_data_items = period_len / buswidth;
1179 if (nb_data_items > STM32_DMA_ALIGNED_MAX_DATA_ITEMS) {
1180 dev_err(chan2dev(chan), "number of items not supported\n");
1181 return NULL;
1182 }
1183
1184 /* Enable Circular mode or double buffer mode */
1185 if (buf_len == period_len) {
1186 chan->chan_reg.dma_scr |= STM32_DMA_SCR_CIRC;
1187 } else {
1188 chan->chan_reg.dma_scr |= STM32_DMA_SCR_DBM;
1189 chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_CT;
1190 }
1191
1192 /* Clear periph ctrl if client set it */
1193 chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_PFCTRL;
1194
1195 num_periods = buf_len / period_len;
1196
1197 desc = kzalloc(struct_size(desc, sg_req, num_periods), GFP_NOWAIT);
1198 if (!desc)
1199 return NULL;
1200
1201 for (i = 0; i < num_periods; i++) {
1202 desc->sg_req[i].len = period_len;
1203
1204 stm32_dma_clear_reg(&desc->sg_req[i].chan_reg);
1205 desc->sg_req[i].chan_reg.dma_scr = chan->chan_reg.dma_scr;
1206 desc->sg_req[i].chan_reg.dma_sfcr = chan->chan_reg.dma_sfcr;
1207 desc->sg_req[i].chan_reg.dma_spar = chan->chan_reg.dma_spar;
1208 desc->sg_req[i].chan_reg.dma_sm0ar = buf_addr;
1209 desc->sg_req[i].chan_reg.dma_sm1ar = buf_addr;
1210 desc->sg_req[i].chan_reg.dma_sndtr = nb_data_items;
1211 buf_addr += period_len;
1212 }
1213
1214 desc->num_sgs = num_periods;
1215 desc->cyclic = true;
1216
1217 return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
1218}
1219
1220static struct dma_async_tx_descriptor *stm32_dma_prep_dma_memcpy(
1221 struct dma_chan *c, dma_addr_t dest,
1222 dma_addr_t src, size_t len, unsigned long flags)
1223{
1224 struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
1225 enum dma_slave_buswidth max_width;
1226 struct stm32_dma_desc *desc;
1227 size_t xfer_count, offset;
1228 u32 num_sgs, best_burst, dma_burst, threshold;
1229 int i;
1230
1231 num_sgs = DIV_ROUND_UP(len, STM32_DMA_ALIGNED_MAX_DATA_ITEMS);
1232 desc = kzalloc(struct_size(desc, sg_req, num_sgs), GFP_NOWAIT);
1233 if (!desc)
1234 return NULL;
1235
1236 threshold = chan->threshold;
1237
1238 for (offset = 0, i = 0; offset < len; offset += xfer_count, i++) {
1239 xfer_count = min_t(size_t, len - offset,
1240 STM32_DMA_ALIGNED_MAX_DATA_ITEMS);
1241
1242 /* Compute best burst size */
1243 max_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1244 best_burst = stm32_dma_get_best_burst(len, STM32_DMA_MAX_BURST,
1245 threshold, max_width);
1246 dma_burst = stm32_dma_get_burst(chan, best_burst);
1247
1248 stm32_dma_clear_reg(&desc->sg_req[i].chan_reg);
1249 desc->sg_req[i].chan_reg.dma_scr =
1250 STM32_DMA_SCR_DIR(STM32_DMA_MEM_TO_MEM) |
1251 STM32_DMA_SCR_PBURST(dma_burst) |
1252 STM32_DMA_SCR_MBURST(dma_burst) |
1253 STM32_DMA_SCR_MINC |
1254 STM32_DMA_SCR_PINC |
1255 STM32_DMA_SCR_TCIE |
1256 STM32_DMA_SCR_TEIE;
1257 desc->sg_req[i].chan_reg.dma_sfcr |= STM32_DMA_SFCR_MASK;
1258 desc->sg_req[i].chan_reg.dma_sfcr |=
1259 STM32_DMA_SFCR_FTH(threshold);
1260 desc->sg_req[i].chan_reg.dma_spar = src + offset;
1261 desc->sg_req[i].chan_reg.dma_sm0ar = dest + offset;
1262 desc->sg_req[i].chan_reg.dma_sndtr = xfer_count;
1263 desc->sg_req[i].len = xfer_count;
1264 }
1265
1266 desc->num_sgs = num_sgs;
1267 desc->cyclic = false;
1268
1269 return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
1270}
1271
1272static u32 stm32_dma_get_remaining_bytes(struct stm32_dma_chan *chan)
1273{
1274 u32 dma_scr, width, ndtr;
1275 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
1276
1277 dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
1278 width = STM32_DMA_SCR_PSIZE_GET(dma_scr);
1279 ndtr = stm32_dma_read(dmadev, STM32_DMA_SNDTR(chan->id));
1280
1281 return ndtr << width;
1282}
1283
1284/**
1285 * stm32_dma_is_current_sg - check that expected sg_req is currently transferred
1286 * @chan: dma channel
1287 *
1288 * This function called when IRQ are disable, checks that the hardware has not
1289 * switched on the next transfer in double buffer mode. The test is done by
1290 * comparing the next_sg memory address with the hardware related register
1291 * (based on CT bit value).
1292 *
1293 * Returns true if expected current transfer is still running or double
1294 * buffer mode is not activated.
1295 */
1296static bool stm32_dma_is_current_sg(struct stm32_dma_chan *chan)
1297{
1298 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
1299 struct stm32_dma_sg_req *sg_req;
1300 u32 dma_scr, dma_smar, id, period_len;
1301
1302 id = chan->id;
1303 dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id));
1304
1305 /* In cyclic CIRC but not DBM, CT is not used */
1306 if (!(dma_scr & STM32_DMA_SCR_DBM))
1307 return true;
1308
1309 sg_req = &chan->desc->sg_req[chan->next_sg];
1310 period_len = sg_req->len;
1311
1312 /* DBM - take care of a previous pause/resume not yet post reconfigured */
1313 if (dma_scr & STM32_DMA_SCR_CT) {
1314 dma_smar = stm32_dma_read(dmadev, STM32_DMA_SM0AR(id));
1315 /*
1316 * If transfer has been pause/resumed,
1317 * SM0AR is in the range of [SM0AR:SM0AR+period_len]
1318 */
1319 return (dma_smar >= sg_req->chan_reg.dma_sm0ar &&
1320 dma_smar < sg_req->chan_reg.dma_sm0ar + period_len);
1321 }
1322
1323 dma_smar = stm32_dma_read(dmadev, STM32_DMA_SM1AR(id));
1324 /*
1325 * If transfer has been pause/resumed,
1326 * SM1AR is in the range of [SM1AR:SM1AR+period_len]
1327 */
1328 return (dma_smar >= sg_req->chan_reg.dma_sm1ar &&
1329 dma_smar < sg_req->chan_reg.dma_sm1ar + period_len);
1330}
1331
1332static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan,
1333 struct stm32_dma_desc *desc,
1334 u32 next_sg)
1335{
1336 u32 modulo, burst_size;
1337 u32 residue;
1338 u32 n_sg = next_sg;
1339 struct stm32_dma_sg_req *sg_req = &chan->desc->sg_req[chan->next_sg];
1340 int i;
1341
1342 /*
1343 * Calculate the residue means compute the descriptors
1344 * information:
1345 * - the sg_req currently transferred
1346 * - the Hardware remaining position in this sg (NDTR bits field).
1347 *
1348 * A race condition may occur if DMA is running in cyclic or double
1349 * buffer mode, since the DMA register are automatically reloaded at end
1350 * of period transfer. The hardware may have switched to the next
1351 * transfer (CT bit updated) just before the position (SxNDTR reg) is
1352 * read.
1353 * In this case the SxNDTR reg could (or not) correspond to the new
1354 * transfer position, and not the expected one.
1355 * The strategy implemented in the stm32 driver is to:
1356 * - read the SxNDTR register
1357 * - crosscheck that hardware is still in current transfer.
1358 * In case of switch, we can assume that the DMA is at the beginning of
1359 * the next transfer. So we approximate the residue in consequence, by
1360 * pointing on the beginning of next transfer.
1361 *
1362 * This race condition doesn't apply for none cyclic mode, as double
1363 * buffer is not used. In such situation registers are updated by the
1364 * software.
1365 */
1366
1367 residue = stm32_dma_get_remaining_bytes(chan);
1368
1369 if (chan->desc->cyclic && !stm32_dma_is_current_sg(chan)) {
1370 n_sg++;
1371 if (n_sg == chan->desc->num_sgs)
1372 n_sg = 0;
1373 residue = sg_req->len;
1374 }
1375
1376 /*
1377 * In cyclic mode, for the last period, residue = remaining bytes
1378 * from NDTR,
1379 * else for all other periods in cyclic mode, and in sg mode,
1380 * residue = remaining bytes from NDTR + remaining
1381 * periods/sg to be transferred
1382 */
1383 if (!chan->desc->cyclic || n_sg != 0)
1384 for (i = n_sg; i < desc->num_sgs; i++)
1385 residue += desc->sg_req[i].len;
1386
1387 if (!chan->mem_burst)
1388 return residue;
1389
1390 burst_size = chan->mem_burst * chan->mem_width;
1391 modulo = residue % burst_size;
1392 if (modulo)
1393 residue = residue - modulo + burst_size;
1394
1395 return residue;
1396}
1397
1398static enum dma_status stm32_dma_tx_status(struct dma_chan *c,
1399 dma_cookie_t cookie,
1400 struct dma_tx_state *state)
1401{
1402 struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
1403 struct virt_dma_desc *vdesc;
1404 enum dma_status status;
1405 unsigned long flags;
1406 u32 residue = 0;
1407
1408 status = dma_cookie_status(c, cookie, state);
1409 if (status == DMA_COMPLETE)
1410 return status;
1411
1412 status = chan->status;
1413
1414 if (!state)
1415 return status;
1416
1417 spin_lock_irqsave(&chan->vchan.lock, flags);
1418 vdesc = vchan_find_desc(&chan->vchan, cookie);
1419 if (chan->desc && cookie == chan->desc->vdesc.tx.cookie)
1420 residue = stm32_dma_desc_residue(chan, chan->desc,
1421 chan->next_sg);
1422 else if (vdesc)
1423 residue = stm32_dma_desc_residue(chan,
1424 to_stm32_dma_desc(vdesc), 0);
1425 dma_set_residue(state, residue);
1426
1427 spin_unlock_irqrestore(&chan->vchan.lock, flags);
1428
1429 return status;
1430}
1431
1432static int stm32_dma_alloc_chan_resources(struct dma_chan *c)
1433{
1434 struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
1435 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
1436 int ret;
1437
1438 chan->config_init = false;
1439
1440 ret = pm_runtime_resume_and_get(dmadev->ddev.dev);
1441 if (ret < 0)
1442 return ret;
1443
1444 ret = stm32_dma_disable_chan(chan);
1445 if (ret < 0)
1446 pm_runtime_put(dmadev->ddev.dev);
1447
1448 return ret;
1449}
1450
1451static void stm32_dma_free_chan_resources(struct dma_chan *c)
1452{
1453 struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
1454 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
1455 unsigned long flags;
1456
1457 dev_dbg(chan2dev(chan), "Freeing channel %d\n", chan->id);
1458
1459 if (chan->busy) {
1460 spin_lock_irqsave(&chan->vchan.lock, flags);
1461 stm32_dma_stop(chan);
1462 chan->desc = NULL;
1463 spin_unlock_irqrestore(&chan->vchan.lock, flags);
1464 }
1465
1466 pm_runtime_put(dmadev->ddev.dev);
1467
1468 vchan_free_chan_resources(to_virt_chan(c));
1469 stm32_dma_clear_reg(&chan->chan_reg);
1470 chan->threshold = 0;
1471}
1472
1473static void stm32_dma_desc_free(struct virt_dma_desc *vdesc)
1474{
1475 kfree(container_of(vdesc, struct stm32_dma_desc, vdesc));
1476}
1477
1478static void stm32_dma_set_config(struct stm32_dma_chan *chan,
1479 struct stm32_dma_cfg *cfg)
1480{
1481 stm32_dma_clear_reg(&chan->chan_reg);
1482
1483 chan->chan_reg.dma_scr = cfg->stream_config & STM32_DMA_SCR_CFG_MASK;
1484 chan->chan_reg.dma_scr |= STM32_DMA_SCR_REQ(cfg->request_line);
1485
1486 /* Enable Interrupts */
1487 chan->chan_reg.dma_scr |= STM32_DMA_SCR_TEIE | STM32_DMA_SCR_TCIE;
1488
1489 chan->threshold = STM32_DMA_THRESHOLD_FTR_GET(cfg->features);
1490 if (STM32_DMA_DIRECT_MODE_GET(cfg->features))
1491 chan->threshold = STM32_DMA_FIFO_THRESHOLD_NONE;
1492 if (STM32_DMA_ALT_ACK_MODE_GET(cfg->features))
1493 chan->chan_reg.dma_scr |= STM32_DMA_SCR_TRBUFF;
1494}
1495
1496static struct dma_chan *stm32_dma_of_xlate(struct of_phandle_args *dma_spec,
1497 struct of_dma *ofdma)
1498{
1499 struct stm32_dma_device *dmadev = ofdma->of_dma_data;
1500 struct device *dev = dmadev->ddev.dev;
1501 struct stm32_dma_cfg cfg;
1502 struct stm32_dma_chan *chan;
1503 struct dma_chan *c;
1504
1505 if (dma_spec->args_count < 4) {
1506 dev_err(dev, "Bad number of cells\n");
1507 return NULL;
1508 }
1509
1510 cfg.channel_id = dma_spec->args[0];
1511 cfg.request_line = dma_spec->args[1];
1512 cfg.stream_config = dma_spec->args[2];
1513 cfg.features = dma_spec->args[3];
1514
1515 if (cfg.channel_id >= STM32_DMA_MAX_CHANNELS ||
1516 cfg.request_line >= STM32_DMA_MAX_REQUEST_ID) {
1517 dev_err(dev, "Bad channel and/or request id\n");
1518 return NULL;
1519 }
1520
1521 chan = &dmadev->chan[cfg.channel_id];
1522
1523 c = dma_get_slave_channel(&chan->vchan.chan);
1524 if (!c) {
1525 dev_err(dev, "No more channels available\n");
1526 return NULL;
1527 }
1528
1529 stm32_dma_set_config(chan, &cfg);
1530
1531 return c;
1532}
1533
1534static const struct of_device_id stm32_dma_of_match[] = {
1535 { .compatible = "st,stm32-dma", },
1536 { /* sentinel */ },
1537};
1538MODULE_DEVICE_TABLE(of, stm32_dma_of_match);
1539
1540static int stm32_dma_probe(struct platform_device *pdev)
1541{
1542 struct stm32_dma_chan *chan;
1543 struct stm32_dma_device *dmadev;
1544 struct dma_device *dd;
1545 const struct of_device_id *match;
1546 struct resource *res;
1547 struct reset_control *rst;
1548 int i, ret;
1549
1550 match = of_match_device(stm32_dma_of_match, &pdev->dev);
1551 if (!match) {
1552 dev_err(&pdev->dev, "Error: No device match found\n");
1553 return -ENODEV;
1554 }
1555
1556 dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev), GFP_KERNEL);
1557 if (!dmadev)
1558 return -ENOMEM;
1559
1560 dd = &dmadev->ddev;
1561
1562 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1563 dmadev->base = devm_ioremap_resource(&pdev->dev, res);
1564 if (IS_ERR(dmadev->base))
1565 return PTR_ERR(dmadev->base);
1566
1567 dmadev->clk = devm_clk_get(&pdev->dev, NULL);
1568 if (IS_ERR(dmadev->clk))
1569 return dev_err_probe(&pdev->dev, PTR_ERR(dmadev->clk), "Can't get clock\n");
1570
1571 ret = clk_prepare_enable(dmadev->clk);
1572 if (ret < 0) {
1573 dev_err(&pdev->dev, "clk_prep_enable error: %d\n", ret);
1574 return ret;
1575 }
1576
1577 dmadev->mem2mem = of_property_read_bool(pdev->dev.of_node,
1578 "st,mem2mem");
1579
1580 rst = devm_reset_control_get(&pdev->dev, NULL);
1581 if (IS_ERR(rst)) {
1582 ret = PTR_ERR(rst);
1583 if (ret == -EPROBE_DEFER)
1584 goto clk_free;
1585 } else {
1586 reset_control_assert(rst);
1587 udelay(2);
1588 reset_control_deassert(rst);
1589 }
1590
1591 dma_set_max_seg_size(&pdev->dev, STM32_DMA_ALIGNED_MAX_DATA_ITEMS);
1592
1593 dma_cap_set(DMA_SLAVE, dd->cap_mask);
1594 dma_cap_set(DMA_PRIVATE, dd->cap_mask);
1595 dma_cap_set(DMA_CYCLIC, dd->cap_mask);
1596 dd->device_alloc_chan_resources = stm32_dma_alloc_chan_resources;
1597 dd->device_free_chan_resources = stm32_dma_free_chan_resources;
1598 dd->device_tx_status = stm32_dma_tx_status;
1599 dd->device_issue_pending = stm32_dma_issue_pending;
1600 dd->device_prep_slave_sg = stm32_dma_prep_slave_sg;
1601 dd->device_prep_dma_cyclic = stm32_dma_prep_dma_cyclic;
1602 dd->device_config = stm32_dma_slave_config;
1603 dd->device_pause = stm32_dma_pause;
1604 dd->device_resume = stm32_dma_resume;
1605 dd->device_terminate_all = stm32_dma_terminate_all;
1606 dd->device_synchronize = stm32_dma_synchronize;
1607 dd->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
1608 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
1609 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
1610 dd->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
1611 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
1612 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
1613 dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1614 dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1615 dd->copy_align = DMAENGINE_ALIGN_32_BYTES;
1616 dd->max_burst = STM32_DMA_MAX_BURST;
1617 dd->max_sg_burst = STM32_DMA_ALIGNED_MAX_DATA_ITEMS;
1618 dd->descriptor_reuse = true;
1619 dd->dev = &pdev->dev;
1620 INIT_LIST_HEAD(&dd->channels);
1621
1622 if (dmadev->mem2mem) {
1623 dma_cap_set(DMA_MEMCPY, dd->cap_mask);
1624 dd->device_prep_dma_memcpy = stm32_dma_prep_dma_memcpy;
1625 dd->directions |= BIT(DMA_MEM_TO_MEM);
1626 }
1627
1628 for (i = 0; i < STM32_DMA_MAX_CHANNELS; i++) {
1629 chan = &dmadev->chan[i];
1630 chan->id = i;
1631 chan->vchan.desc_free = stm32_dma_desc_free;
1632 vchan_init(&chan->vchan, dd);
1633 }
1634
1635 ret = dma_async_device_register(dd);
1636 if (ret)
1637 goto clk_free;
1638
1639 for (i = 0; i < STM32_DMA_MAX_CHANNELS; i++) {
1640 chan = &dmadev->chan[i];
1641 ret = platform_get_irq(pdev, i);
1642 if (ret < 0)
1643 goto err_unregister;
1644 chan->irq = ret;
1645
1646 ret = devm_request_irq(&pdev->dev, chan->irq,
1647 stm32_dma_chan_irq, 0,
1648 dev_name(chan2dev(chan)), chan);
1649 if (ret) {
1650 dev_err(&pdev->dev,
1651 "request_irq failed with err %d channel %d\n",
1652 ret, i);
1653 goto err_unregister;
1654 }
1655 }
1656
1657 ret = of_dma_controller_register(pdev->dev.of_node,
1658 stm32_dma_of_xlate, dmadev);
1659 if (ret < 0) {
1660 dev_err(&pdev->dev,
1661 "STM32 DMA DMA OF registration failed %d\n", ret);
1662 goto err_unregister;
1663 }
1664
1665 platform_set_drvdata(pdev, dmadev);
1666
1667 pm_runtime_set_active(&pdev->dev);
1668 pm_runtime_enable(&pdev->dev);
1669 pm_runtime_get_noresume(&pdev->dev);
1670 pm_runtime_put(&pdev->dev);
1671
1672 dev_info(&pdev->dev, "STM32 DMA driver registered\n");
1673
1674 return 0;
1675
1676err_unregister:
1677 dma_async_device_unregister(dd);
1678clk_free:
1679 clk_disable_unprepare(dmadev->clk);
1680
1681 return ret;
1682}
1683
1684#ifdef CONFIG_PM
1685static int stm32_dma_runtime_suspend(struct device *dev)
1686{
1687 struct stm32_dma_device *dmadev = dev_get_drvdata(dev);
1688
1689 clk_disable_unprepare(dmadev->clk);
1690
1691 return 0;
1692}
1693
1694static int stm32_dma_runtime_resume(struct device *dev)
1695{
1696 struct stm32_dma_device *dmadev = dev_get_drvdata(dev);
1697 int ret;
1698
1699 ret = clk_prepare_enable(dmadev->clk);
1700 if (ret) {
1701 dev_err(dev, "failed to prepare_enable clock\n");
1702 return ret;
1703 }
1704
1705 return 0;
1706}
1707#endif
1708
1709#ifdef CONFIG_PM_SLEEP
1710static int stm32_dma_pm_suspend(struct device *dev)
1711{
1712 struct stm32_dma_device *dmadev = dev_get_drvdata(dev);
1713 int id, ret, scr;
1714
1715 ret = pm_runtime_resume_and_get(dev);
1716 if (ret < 0)
1717 return ret;
1718
1719 for (id = 0; id < STM32_DMA_MAX_CHANNELS; id++) {
1720 scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id));
1721 if (scr & STM32_DMA_SCR_EN) {
1722 dev_warn(dev, "Suspend is prevented by Chan %i\n", id);
1723 return -EBUSY;
1724 }
1725 }
1726
1727 pm_runtime_put_sync(dev);
1728
1729 pm_runtime_force_suspend(dev);
1730
1731 return 0;
1732}
1733
1734static int stm32_dma_pm_resume(struct device *dev)
1735{
1736 return pm_runtime_force_resume(dev);
1737}
1738#endif
1739
1740static const struct dev_pm_ops stm32_dma_pm_ops = {
1741 SET_SYSTEM_SLEEP_PM_OPS(stm32_dma_pm_suspend, stm32_dma_pm_resume)
1742 SET_RUNTIME_PM_OPS(stm32_dma_runtime_suspend,
1743 stm32_dma_runtime_resume, NULL)
1744};
1745
1746static struct platform_driver stm32_dma_driver = {
1747 .driver = {
1748 .name = "stm32-dma",
1749 .of_match_table = stm32_dma_of_match,
1750 .pm = &stm32_dma_pm_ops,
1751 },
1752 .probe = stm32_dma_probe,
1753};
1754
1755static int __init stm32_dma_init(void)
1756{
1757 return platform_driver_register(&stm32_dma_driver);
1758}
1759subsys_initcall(stm32_dma_init);