Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0+
2//
3// drivers/dma/imx-sdma.c
4//
5// This file contains a driver for the Freescale Smart DMA engine
6//
7// Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
8//
9// Based on code from Freescale:
10//
11// Copyright 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved.
12
13#include <linux/init.h>
14#include <linux/iopoll.h>
15#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/bitops.h>
18#include <linux/mm.h>
19#include <linux/interrupt.h>
20#include <linux/clk.h>
21#include <linux/delay.h>
22#include <linux/sched.h>
23#include <linux/semaphore.h>
24#include <linux/spinlock.h>
25#include <linux/device.h>
26#include <linux/dma-mapping.h>
27#include <linux/firmware.h>
28#include <linux/slab.h>
29#include <linux/platform_device.h>
30#include <linux/dmaengine.h>
31#include <linux/of.h>
32#include <linux/of_address.h>
33#include <linux/of_device.h>
34#include <linux/of_dma.h>
35#include <linux/workqueue.h>
36
37#include <asm/irq.h>
38#include <linux/platform_data/dma-imx-sdma.h>
39#include <linux/platform_data/dma-imx.h>
40#include <linux/regmap.h>
41#include <linux/mfd/syscon.h>
42#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
43
44#include "dmaengine.h"
45#include "virt-dma.h"
46
47/* SDMA registers */
48#define SDMA_H_C0PTR 0x000
49#define SDMA_H_INTR 0x004
50#define SDMA_H_STATSTOP 0x008
51#define SDMA_H_START 0x00c
52#define SDMA_H_EVTOVR 0x010
53#define SDMA_H_DSPOVR 0x014
54#define SDMA_H_HOSTOVR 0x018
55#define SDMA_H_EVTPEND 0x01c
56#define SDMA_H_DSPENBL 0x020
57#define SDMA_H_RESET 0x024
58#define SDMA_H_EVTERR 0x028
59#define SDMA_H_INTRMSK 0x02c
60#define SDMA_H_PSW 0x030
61#define SDMA_H_EVTERRDBG 0x034
62#define SDMA_H_CONFIG 0x038
63#define SDMA_ONCE_ENB 0x040
64#define SDMA_ONCE_DATA 0x044
65#define SDMA_ONCE_INSTR 0x048
66#define SDMA_ONCE_STAT 0x04c
67#define SDMA_ONCE_CMD 0x050
68#define SDMA_EVT_MIRROR 0x054
69#define SDMA_ILLINSTADDR 0x058
70#define SDMA_CHN0ADDR 0x05c
71#define SDMA_ONCE_RTB 0x060
72#define SDMA_XTRIG_CONF1 0x070
73#define SDMA_XTRIG_CONF2 0x074
74#define SDMA_CHNENBL0_IMX35 0x200
75#define SDMA_CHNENBL0_IMX31 0x080
76#define SDMA_CHNPRI_0 0x100
77
78/*
79 * Buffer descriptor status values.
80 */
81#define BD_DONE 0x01
82#define BD_WRAP 0x02
83#define BD_CONT 0x04
84#define BD_INTR 0x08
85#define BD_RROR 0x10
86#define BD_LAST 0x20
87#define BD_EXTD 0x80
88
89/*
90 * Data Node descriptor status values.
91 */
92#define DND_END_OF_FRAME 0x80
93#define DND_END_OF_XFER 0x40
94#define DND_DONE 0x20
95#define DND_UNUSED 0x01
96
97/*
98 * IPCV2 descriptor status values.
99 */
100#define BD_IPCV2_END_OF_FRAME 0x40
101
102#define IPCV2_MAX_NODES 50
103/*
104 * Error bit set in the CCB status field by the SDMA,
105 * in setbd routine, in case of a transfer error
106 */
107#define DATA_ERROR 0x10000000
108
109/*
110 * Buffer descriptor commands.
111 */
112#define C0_ADDR 0x01
113#define C0_LOAD 0x02
114#define C0_DUMP 0x03
115#define C0_SETCTX 0x07
116#define C0_GETCTX 0x03
117#define C0_SETDM 0x01
118#define C0_SETPM 0x04
119#define C0_GETDM 0x02
120#define C0_GETPM 0x08
121/*
122 * Change endianness indicator in the BD command field
123 */
124#define CHANGE_ENDIANNESS 0x80
125
126/*
127 * p_2_p watermark_level description
128 * Bits Name Description
129 * 0-7 Lower WML Lower watermark level
130 * 8 PS 1: Pad Swallowing
131 * 0: No Pad Swallowing
132 * 9 PA 1: Pad Adding
133 * 0: No Pad Adding
134 * 10 SPDIF If this bit is set both source
135 * and destination are on SPBA
136 * 11 Source Bit(SP) 1: Source on SPBA
137 * 0: Source on AIPS
138 * 12 Destination Bit(DP) 1: Destination on SPBA
139 * 0: Destination on AIPS
140 * 13-15 --------- MUST BE 0
141 * 16-23 Higher WML HWML
142 * 24-27 N Total number of samples after
143 * which Pad adding/Swallowing
144 * must be done. It must be odd.
145 * 28 Lower WML Event(LWE) SDMA events reg to check for
146 * LWML event mask
147 * 0: LWE in EVENTS register
148 * 1: LWE in EVENTS2 register
149 * 29 Higher WML Event(HWE) SDMA events reg to check for
150 * HWML event mask
151 * 0: HWE in EVENTS register
152 * 1: HWE in EVENTS2 register
153 * 30 --------- MUST BE 0
154 * 31 CONT 1: Amount of samples to be
155 * transferred is unknown and
156 * script will keep on
157 * transferring samples as long as
158 * both events are detected and
159 * script must be manually stopped
160 * by the application
161 * 0: The amount of samples to be
162 * transferred is equal to the
163 * count field of mode word
164 */
165#define SDMA_WATERMARK_LEVEL_LWML 0xFF
166#define SDMA_WATERMARK_LEVEL_PS BIT(8)
167#define SDMA_WATERMARK_LEVEL_PA BIT(9)
168#define SDMA_WATERMARK_LEVEL_SPDIF BIT(10)
169#define SDMA_WATERMARK_LEVEL_SP BIT(11)
170#define SDMA_WATERMARK_LEVEL_DP BIT(12)
171#define SDMA_WATERMARK_LEVEL_HWML (0xFF << 16)
172#define SDMA_WATERMARK_LEVEL_LWE BIT(28)
173#define SDMA_WATERMARK_LEVEL_HWE BIT(29)
174#define SDMA_WATERMARK_LEVEL_CONT BIT(31)
175
176#define SDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
177 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
178 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
179
180#define SDMA_DMA_DIRECTIONS (BIT(DMA_DEV_TO_MEM) | \
181 BIT(DMA_MEM_TO_DEV) | \
182 BIT(DMA_DEV_TO_DEV))
183
184/*
185 * Mode/Count of data node descriptors - IPCv2
186 */
187struct sdma_mode_count {
188#define SDMA_BD_MAX_CNT 0xffff
189 u32 count : 16; /* size of the buffer pointed by this BD */
190 u32 status : 8; /* E,R,I,C,W,D status bits stored here */
191 u32 command : 8; /* command mostly used for channel 0 */
192};
193
194/*
195 * Buffer descriptor
196 */
197struct sdma_buffer_descriptor {
198 struct sdma_mode_count mode;
199 u32 buffer_addr; /* address of the buffer described */
200 u32 ext_buffer_addr; /* extended buffer address */
201} __attribute__ ((packed));
202
203/**
204 * struct sdma_channel_control - Channel control Block
205 *
206 * @current_bd_ptr: current buffer descriptor processed
207 * @base_bd_ptr: first element of buffer descriptor array
208 * @unused: padding. The SDMA engine expects an array of 128 byte
209 * control blocks
210 */
211struct sdma_channel_control {
212 u32 current_bd_ptr;
213 u32 base_bd_ptr;
214 u32 unused[2];
215} __attribute__ ((packed));
216
217/**
218 * struct sdma_state_registers - SDMA context for a channel
219 *
220 * @pc: program counter
221 * @unused1: unused
222 * @t: test bit: status of arithmetic & test instruction
223 * @rpc: return program counter
224 * @unused0: unused
225 * @sf: source fault while loading data
226 * @spc: loop start program counter
227 * @unused2: unused
228 * @df: destination fault while storing data
229 * @epc: loop end program counter
230 * @lm: loop mode
231 */
232struct sdma_state_registers {
233 u32 pc :14;
234 u32 unused1: 1;
235 u32 t : 1;
236 u32 rpc :14;
237 u32 unused0: 1;
238 u32 sf : 1;
239 u32 spc :14;
240 u32 unused2: 1;
241 u32 df : 1;
242 u32 epc :14;
243 u32 lm : 2;
244} __attribute__ ((packed));
245
246/**
247 * struct sdma_context_data - sdma context specific to a channel
248 *
249 * @channel_state: channel state bits
250 * @gReg: general registers
251 * @mda: burst dma destination address register
252 * @msa: burst dma source address register
253 * @ms: burst dma status register
254 * @md: burst dma data register
255 * @pda: peripheral dma destination address register
256 * @psa: peripheral dma source address register
257 * @ps: peripheral dma status register
258 * @pd: peripheral dma data register
259 * @ca: CRC polynomial register
260 * @cs: CRC accumulator register
261 * @dda: dedicated core destination address register
262 * @dsa: dedicated core source address register
263 * @ds: dedicated core status register
264 * @dd: dedicated core data register
265 * @scratch0: 1st word of dedicated ram for context switch
266 * @scratch1: 2nd word of dedicated ram for context switch
267 * @scratch2: 3rd word of dedicated ram for context switch
268 * @scratch3: 4th word of dedicated ram for context switch
269 * @scratch4: 5th word of dedicated ram for context switch
270 * @scratch5: 6th word of dedicated ram for context switch
271 * @scratch6: 7th word of dedicated ram for context switch
272 * @scratch7: 8th word of dedicated ram for context switch
273 */
274struct sdma_context_data {
275 struct sdma_state_registers channel_state;
276 u32 gReg[8];
277 u32 mda;
278 u32 msa;
279 u32 ms;
280 u32 md;
281 u32 pda;
282 u32 psa;
283 u32 ps;
284 u32 pd;
285 u32 ca;
286 u32 cs;
287 u32 dda;
288 u32 dsa;
289 u32 ds;
290 u32 dd;
291 u32 scratch0;
292 u32 scratch1;
293 u32 scratch2;
294 u32 scratch3;
295 u32 scratch4;
296 u32 scratch5;
297 u32 scratch6;
298 u32 scratch7;
299} __attribute__ ((packed));
300
301
302struct sdma_engine;
303
304/**
305 * struct sdma_desc - descriptor structor for one transfer
306 * @vd: descriptor for virt dma
307 * @num_bd: number of descriptors currently handling
308 * @bd_phys: physical address of bd
309 * @buf_tail: ID of the buffer that was processed
310 * @buf_ptail: ID of the previous buffer that was processed
311 * @period_len: period length, used in cyclic.
312 * @chn_real_count: the real count updated from bd->mode.count
313 * @chn_count: the transfer count set
314 * @sdmac: sdma_channel pointer
315 * @bd: pointer of allocate bd
316 */
317struct sdma_desc {
318 struct virt_dma_desc vd;
319 unsigned int num_bd;
320 dma_addr_t bd_phys;
321 unsigned int buf_tail;
322 unsigned int buf_ptail;
323 unsigned int period_len;
324 unsigned int chn_real_count;
325 unsigned int chn_count;
326 struct sdma_channel *sdmac;
327 struct sdma_buffer_descriptor *bd;
328};
329
330/**
331 * struct sdma_channel - housekeeping for a SDMA channel
332 *
333 * @vc: virt_dma base structure
334 * @desc: sdma description including vd and other special member
335 * @sdma: pointer to the SDMA engine for this channel
336 * @channel: the channel number, matches dmaengine chan_id + 1
337 * @direction: transfer type. Needed for setting SDMA script
338 * @slave_config: Slave configuration
339 * @peripheral_type: Peripheral type. Needed for setting SDMA script
340 * @event_id0: aka dma request line
341 * @event_id1: for channels that use 2 events
342 * @word_size: peripheral access size
343 * @pc_from_device: script address for those device_2_memory
344 * @pc_to_device: script address for those memory_2_device
345 * @device_to_device: script address for those device_2_device
346 * @pc_to_pc: script address for those memory_2_memory
347 * @flags: loop mode or not
348 * @per_address: peripheral source or destination address in common case
349 * destination address in p_2_p case
350 * @per_address2: peripheral source address in p_2_p case
351 * @event_mask: event mask used in p_2_p script
352 * @watermark_level: value for gReg[7], some script will extend it from
353 * basic watermark such as p_2_p
354 * @shp_addr: value for gReg[6]
355 * @per_addr: value for gReg[2]
356 * @status: status of dma channel
357 * @context_loaded: ensure context is only loaded once
358 * @data: specific sdma interface structure
359 * @bd_pool: dma_pool for bd
360 * @terminate_worker: used to call back into terminate work function
361 */
362struct sdma_channel {
363 struct virt_dma_chan vc;
364 struct sdma_desc *desc;
365 struct sdma_engine *sdma;
366 unsigned int channel;
367 enum dma_transfer_direction direction;
368 struct dma_slave_config slave_config;
369 enum sdma_peripheral_type peripheral_type;
370 unsigned int event_id0;
371 unsigned int event_id1;
372 enum dma_slave_buswidth word_size;
373 unsigned int pc_from_device, pc_to_device;
374 unsigned int device_to_device;
375 unsigned int pc_to_pc;
376 unsigned long flags;
377 dma_addr_t per_address, per_address2;
378 unsigned long event_mask[2];
379 unsigned long watermark_level;
380 u32 shp_addr, per_addr;
381 enum dma_status status;
382 bool context_loaded;
383 struct imx_dma_data data;
384 struct work_struct terminate_worker;
385};
386
387#define IMX_DMA_SG_LOOP BIT(0)
388
389#define MAX_DMA_CHANNELS 32
390#define MXC_SDMA_DEFAULT_PRIORITY 1
391#define MXC_SDMA_MIN_PRIORITY 1
392#define MXC_SDMA_MAX_PRIORITY 7
393
394#define SDMA_FIRMWARE_MAGIC 0x414d4453
395
396/**
397 * struct sdma_firmware_header - Layout of the firmware image
398 *
399 * @magic: "SDMA"
400 * @version_major: increased whenever layout of struct
401 * sdma_script_start_addrs changes.
402 * @version_minor: firmware minor version (for binary compatible changes)
403 * @script_addrs_start: offset of struct sdma_script_start_addrs in this image
404 * @num_script_addrs: Number of script addresses in this image
405 * @ram_code_start: offset of SDMA ram image in this firmware image
406 * @ram_code_size: size of SDMA ram image
407 * @script_addrs: Stores the start address of the SDMA scripts
408 * (in SDMA memory space)
409 */
410struct sdma_firmware_header {
411 u32 magic;
412 u32 version_major;
413 u32 version_minor;
414 u32 script_addrs_start;
415 u32 num_script_addrs;
416 u32 ram_code_start;
417 u32 ram_code_size;
418};
419
420struct sdma_driver_data {
421 int chnenbl0;
422 int num_events;
423 struct sdma_script_start_addrs *script_addrs;
424 bool check_ratio;
425};
426
427struct sdma_engine {
428 struct device *dev;
429 struct device_dma_parameters dma_parms;
430 struct sdma_channel channel[MAX_DMA_CHANNELS];
431 struct sdma_channel_control *channel_control;
432 void __iomem *regs;
433 struct sdma_context_data *context;
434 dma_addr_t context_phys;
435 struct dma_device dma_device;
436 struct clk *clk_ipg;
437 struct clk *clk_ahb;
438 spinlock_t channel_0_lock;
439 u32 script_number;
440 struct sdma_script_start_addrs *script_addrs;
441 const struct sdma_driver_data *drvdata;
442 u32 spba_start_addr;
443 u32 spba_end_addr;
444 unsigned int irq;
445 dma_addr_t bd0_phys;
446 struct sdma_buffer_descriptor *bd0;
447 /* clock ratio for AHB:SDMA core. 1:1 is 1, 2:1 is 0*/
448 bool clk_ratio;
449};
450
451static int sdma_config_write(struct dma_chan *chan,
452 struct dma_slave_config *dmaengine_cfg,
453 enum dma_transfer_direction direction);
454
455static struct sdma_driver_data sdma_imx31 = {
456 .chnenbl0 = SDMA_CHNENBL0_IMX31,
457 .num_events = 32,
458};
459
460static struct sdma_script_start_addrs sdma_script_imx25 = {
461 .ap_2_ap_addr = 729,
462 .uart_2_mcu_addr = 904,
463 .per_2_app_addr = 1255,
464 .mcu_2_app_addr = 834,
465 .uartsh_2_mcu_addr = 1120,
466 .per_2_shp_addr = 1329,
467 .mcu_2_shp_addr = 1048,
468 .ata_2_mcu_addr = 1560,
469 .mcu_2_ata_addr = 1479,
470 .app_2_per_addr = 1189,
471 .app_2_mcu_addr = 770,
472 .shp_2_per_addr = 1407,
473 .shp_2_mcu_addr = 979,
474};
475
476static struct sdma_driver_data sdma_imx25 = {
477 .chnenbl0 = SDMA_CHNENBL0_IMX35,
478 .num_events = 48,
479 .script_addrs = &sdma_script_imx25,
480};
481
482static struct sdma_driver_data sdma_imx35 = {
483 .chnenbl0 = SDMA_CHNENBL0_IMX35,
484 .num_events = 48,
485};
486
487static struct sdma_script_start_addrs sdma_script_imx51 = {
488 .ap_2_ap_addr = 642,
489 .uart_2_mcu_addr = 817,
490 .mcu_2_app_addr = 747,
491 .mcu_2_shp_addr = 961,
492 .ata_2_mcu_addr = 1473,
493 .mcu_2_ata_addr = 1392,
494 .app_2_per_addr = 1033,
495 .app_2_mcu_addr = 683,
496 .shp_2_per_addr = 1251,
497 .shp_2_mcu_addr = 892,
498};
499
500static struct sdma_driver_data sdma_imx51 = {
501 .chnenbl0 = SDMA_CHNENBL0_IMX35,
502 .num_events = 48,
503 .script_addrs = &sdma_script_imx51,
504};
505
506static struct sdma_script_start_addrs sdma_script_imx53 = {
507 .ap_2_ap_addr = 642,
508 .app_2_mcu_addr = 683,
509 .mcu_2_app_addr = 747,
510 .uart_2_mcu_addr = 817,
511 .shp_2_mcu_addr = 891,
512 .mcu_2_shp_addr = 960,
513 .uartsh_2_mcu_addr = 1032,
514 .spdif_2_mcu_addr = 1100,
515 .mcu_2_spdif_addr = 1134,
516 .firi_2_mcu_addr = 1193,
517 .mcu_2_firi_addr = 1290,
518};
519
520static struct sdma_driver_data sdma_imx53 = {
521 .chnenbl0 = SDMA_CHNENBL0_IMX35,
522 .num_events = 48,
523 .script_addrs = &sdma_script_imx53,
524};
525
526static struct sdma_script_start_addrs sdma_script_imx6q = {
527 .ap_2_ap_addr = 642,
528 .uart_2_mcu_addr = 817,
529 .mcu_2_app_addr = 747,
530 .per_2_per_addr = 6331,
531 .uartsh_2_mcu_addr = 1032,
532 .mcu_2_shp_addr = 960,
533 .app_2_mcu_addr = 683,
534 .shp_2_mcu_addr = 891,
535 .spdif_2_mcu_addr = 1100,
536 .mcu_2_spdif_addr = 1134,
537};
538
539static struct sdma_driver_data sdma_imx6q = {
540 .chnenbl0 = SDMA_CHNENBL0_IMX35,
541 .num_events = 48,
542 .script_addrs = &sdma_script_imx6q,
543};
544
545static struct sdma_script_start_addrs sdma_script_imx7d = {
546 .ap_2_ap_addr = 644,
547 .uart_2_mcu_addr = 819,
548 .mcu_2_app_addr = 749,
549 .uartsh_2_mcu_addr = 1034,
550 .mcu_2_shp_addr = 962,
551 .app_2_mcu_addr = 685,
552 .shp_2_mcu_addr = 893,
553 .spdif_2_mcu_addr = 1102,
554 .mcu_2_spdif_addr = 1136,
555};
556
557static struct sdma_driver_data sdma_imx7d = {
558 .chnenbl0 = SDMA_CHNENBL0_IMX35,
559 .num_events = 48,
560 .script_addrs = &sdma_script_imx7d,
561};
562
563static struct sdma_driver_data sdma_imx8mq = {
564 .chnenbl0 = SDMA_CHNENBL0_IMX35,
565 .num_events = 48,
566 .script_addrs = &sdma_script_imx7d,
567 .check_ratio = 1,
568};
569
570static const struct platform_device_id sdma_devtypes[] = {
571 {
572 .name = "imx25-sdma",
573 .driver_data = (unsigned long)&sdma_imx25,
574 }, {
575 .name = "imx31-sdma",
576 .driver_data = (unsigned long)&sdma_imx31,
577 }, {
578 .name = "imx35-sdma",
579 .driver_data = (unsigned long)&sdma_imx35,
580 }, {
581 .name = "imx51-sdma",
582 .driver_data = (unsigned long)&sdma_imx51,
583 }, {
584 .name = "imx53-sdma",
585 .driver_data = (unsigned long)&sdma_imx53,
586 }, {
587 .name = "imx6q-sdma",
588 .driver_data = (unsigned long)&sdma_imx6q,
589 }, {
590 .name = "imx7d-sdma",
591 .driver_data = (unsigned long)&sdma_imx7d,
592 }, {
593 .name = "imx8mq-sdma",
594 .driver_data = (unsigned long)&sdma_imx8mq,
595 }, {
596 /* sentinel */
597 }
598};
599MODULE_DEVICE_TABLE(platform, sdma_devtypes);
600
601static const struct of_device_id sdma_dt_ids[] = {
602 { .compatible = "fsl,imx6q-sdma", .data = &sdma_imx6q, },
603 { .compatible = "fsl,imx53-sdma", .data = &sdma_imx53, },
604 { .compatible = "fsl,imx51-sdma", .data = &sdma_imx51, },
605 { .compatible = "fsl,imx35-sdma", .data = &sdma_imx35, },
606 { .compatible = "fsl,imx31-sdma", .data = &sdma_imx31, },
607 { .compatible = "fsl,imx25-sdma", .data = &sdma_imx25, },
608 { .compatible = "fsl,imx7d-sdma", .data = &sdma_imx7d, },
609 { .compatible = "fsl,imx8mq-sdma", .data = &sdma_imx8mq, },
610 { /* sentinel */ }
611};
612MODULE_DEVICE_TABLE(of, sdma_dt_ids);
613
614#define SDMA_H_CONFIG_DSPDMA BIT(12) /* indicates if the DSPDMA is used */
615#define SDMA_H_CONFIG_RTD_PINS BIT(11) /* indicates if Real-Time Debug pins are enabled */
616#define SDMA_H_CONFIG_ACR BIT(4) /* indicates if AHB freq /core freq = 2 or 1 */
617#define SDMA_H_CONFIG_CSM (3) /* indicates which context switch mode is selected*/
618
619static inline u32 chnenbl_ofs(struct sdma_engine *sdma, unsigned int event)
620{
621 u32 chnenbl0 = sdma->drvdata->chnenbl0;
622 return chnenbl0 + event * 4;
623}
624
625static int sdma_config_ownership(struct sdma_channel *sdmac,
626 bool event_override, bool mcu_override, bool dsp_override)
627{
628 struct sdma_engine *sdma = sdmac->sdma;
629 int channel = sdmac->channel;
630 unsigned long evt, mcu, dsp;
631
632 if (event_override && mcu_override && dsp_override)
633 return -EINVAL;
634
635 evt = readl_relaxed(sdma->regs + SDMA_H_EVTOVR);
636 mcu = readl_relaxed(sdma->regs + SDMA_H_HOSTOVR);
637 dsp = readl_relaxed(sdma->regs + SDMA_H_DSPOVR);
638
639 if (dsp_override)
640 __clear_bit(channel, &dsp);
641 else
642 __set_bit(channel, &dsp);
643
644 if (event_override)
645 __clear_bit(channel, &evt);
646 else
647 __set_bit(channel, &evt);
648
649 if (mcu_override)
650 __clear_bit(channel, &mcu);
651 else
652 __set_bit(channel, &mcu);
653
654 writel_relaxed(evt, sdma->regs + SDMA_H_EVTOVR);
655 writel_relaxed(mcu, sdma->regs + SDMA_H_HOSTOVR);
656 writel_relaxed(dsp, sdma->regs + SDMA_H_DSPOVR);
657
658 return 0;
659}
660
661static void sdma_enable_channel(struct sdma_engine *sdma, int channel)
662{
663 writel(BIT(channel), sdma->regs + SDMA_H_START);
664}
665
666/*
667 * sdma_run_channel0 - run a channel and wait till it's done
668 */
669static int sdma_run_channel0(struct sdma_engine *sdma)
670{
671 int ret;
672 u32 reg;
673
674 sdma_enable_channel(sdma, 0);
675
676 ret = readl_relaxed_poll_timeout_atomic(sdma->regs + SDMA_H_STATSTOP,
677 reg, !(reg & 1), 1, 500);
678 if (ret)
679 dev_err(sdma->dev, "Timeout waiting for CH0 ready\n");
680
681 /* Set bits of CONFIG register with dynamic context switching */
682 reg = readl(sdma->regs + SDMA_H_CONFIG);
683 if ((reg & SDMA_H_CONFIG_CSM) == 0) {
684 reg |= SDMA_H_CONFIG_CSM;
685 writel_relaxed(reg, sdma->regs + SDMA_H_CONFIG);
686 }
687
688 return ret;
689}
690
691static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
692 u32 address)
693{
694 struct sdma_buffer_descriptor *bd0 = sdma->bd0;
695 void *buf_virt;
696 dma_addr_t buf_phys;
697 int ret;
698 unsigned long flags;
699
700 buf_virt = dma_alloc_coherent(sdma->dev, size, &buf_phys, GFP_KERNEL);
701 if (!buf_virt) {
702 return -ENOMEM;
703 }
704
705 spin_lock_irqsave(&sdma->channel_0_lock, flags);
706
707 bd0->mode.command = C0_SETPM;
708 bd0->mode.status = BD_DONE | BD_WRAP | BD_EXTD;
709 bd0->mode.count = size / 2;
710 bd0->buffer_addr = buf_phys;
711 bd0->ext_buffer_addr = address;
712
713 memcpy(buf_virt, buf, size);
714
715 ret = sdma_run_channel0(sdma);
716
717 spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
718
719 dma_free_coherent(sdma->dev, size, buf_virt, buf_phys);
720
721 return ret;
722}
723
724static void sdma_event_enable(struct sdma_channel *sdmac, unsigned int event)
725{
726 struct sdma_engine *sdma = sdmac->sdma;
727 int channel = sdmac->channel;
728 unsigned long val;
729 u32 chnenbl = chnenbl_ofs(sdma, event);
730
731 val = readl_relaxed(sdma->regs + chnenbl);
732 __set_bit(channel, &val);
733 writel_relaxed(val, sdma->regs + chnenbl);
734}
735
736static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event)
737{
738 struct sdma_engine *sdma = sdmac->sdma;
739 int channel = sdmac->channel;
740 u32 chnenbl = chnenbl_ofs(sdma, event);
741 unsigned long val;
742
743 val = readl_relaxed(sdma->regs + chnenbl);
744 __clear_bit(channel, &val);
745 writel_relaxed(val, sdma->regs + chnenbl);
746}
747
748static struct sdma_desc *to_sdma_desc(struct dma_async_tx_descriptor *t)
749{
750 return container_of(t, struct sdma_desc, vd.tx);
751}
752
753static void sdma_start_desc(struct sdma_channel *sdmac)
754{
755 struct virt_dma_desc *vd = vchan_next_desc(&sdmac->vc);
756 struct sdma_desc *desc;
757 struct sdma_engine *sdma = sdmac->sdma;
758 int channel = sdmac->channel;
759
760 if (!vd) {
761 sdmac->desc = NULL;
762 return;
763 }
764 sdmac->desc = desc = to_sdma_desc(&vd->tx);
765
766 list_del(&vd->node);
767
768 sdma->channel_control[channel].base_bd_ptr = desc->bd_phys;
769 sdma->channel_control[channel].current_bd_ptr = desc->bd_phys;
770 sdma_enable_channel(sdma, sdmac->channel);
771}
772
773static void sdma_update_channel_loop(struct sdma_channel *sdmac)
774{
775 struct sdma_buffer_descriptor *bd;
776 int error = 0;
777 enum dma_status old_status = sdmac->status;
778
779 /*
780 * loop mode. Iterate over descriptors, re-setup them and
781 * call callback function.
782 */
783 while (sdmac->desc) {
784 struct sdma_desc *desc = sdmac->desc;
785
786 bd = &desc->bd[desc->buf_tail];
787
788 if (bd->mode.status & BD_DONE)
789 break;
790
791 if (bd->mode.status & BD_RROR) {
792 bd->mode.status &= ~BD_RROR;
793 sdmac->status = DMA_ERROR;
794 error = -EIO;
795 }
796
797 /*
798 * We use bd->mode.count to calculate the residue, since contains
799 * the number of bytes present in the current buffer descriptor.
800 */
801
802 desc->chn_real_count = bd->mode.count;
803 bd->mode.status |= BD_DONE;
804 bd->mode.count = desc->period_len;
805 desc->buf_ptail = desc->buf_tail;
806 desc->buf_tail = (desc->buf_tail + 1) % desc->num_bd;
807
808 /*
809 * The callback is called from the interrupt context in order
810 * to reduce latency and to avoid the risk of altering the
811 * SDMA transaction status by the time the client tasklet is
812 * executed.
813 */
814 spin_unlock(&sdmac->vc.lock);
815 dmaengine_desc_get_callback_invoke(&desc->vd.tx, NULL);
816 spin_lock(&sdmac->vc.lock);
817
818 if (error)
819 sdmac->status = old_status;
820 }
821}
822
823static void mxc_sdma_handle_channel_normal(struct sdma_channel *data)
824{
825 struct sdma_channel *sdmac = (struct sdma_channel *) data;
826 struct sdma_buffer_descriptor *bd;
827 int i, error = 0;
828
829 sdmac->desc->chn_real_count = 0;
830 /*
831 * non loop mode. Iterate over all descriptors, collect
832 * errors and call callback function
833 */
834 for (i = 0; i < sdmac->desc->num_bd; i++) {
835 bd = &sdmac->desc->bd[i];
836
837 if (bd->mode.status & (BD_DONE | BD_RROR))
838 error = -EIO;
839 sdmac->desc->chn_real_count += bd->mode.count;
840 }
841
842 if (error)
843 sdmac->status = DMA_ERROR;
844 else
845 sdmac->status = DMA_COMPLETE;
846}
847
848static irqreturn_t sdma_int_handler(int irq, void *dev_id)
849{
850 struct sdma_engine *sdma = dev_id;
851 unsigned long stat;
852
853 stat = readl_relaxed(sdma->regs + SDMA_H_INTR);
854 writel_relaxed(stat, sdma->regs + SDMA_H_INTR);
855 /* channel 0 is special and not handled here, see run_channel0() */
856 stat &= ~1;
857
858 while (stat) {
859 int channel = fls(stat) - 1;
860 struct sdma_channel *sdmac = &sdma->channel[channel];
861 struct sdma_desc *desc;
862
863 spin_lock(&sdmac->vc.lock);
864 desc = sdmac->desc;
865 if (desc) {
866 if (sdmac->flags & IMX_DMA_SG_LOOP) {
867 sdma_update_channel_loop(sdmac);
868 } else {
869 mxc_sdma_handle_channel_normal(sdmac);
870 vchan_cookie_complete(&desc->vd);
871 sdma_start_desc(sdmac);
872 }
873 }
874
875 spin_unlock(&sdmac->vc.lock);
876 __clear_bit(channel, &stat);
877 }
878
879 return IRQ_HANDLED;
880}
881
882/*
883 * sets the pc of SDMA script according to the peripheral type
884 */
885static void sdma_get_pc(struct sdma_channel *sdmac,
886 enum sdma_peripheral_type peripheral_type)
887{
888 struct sdma_engine *sdma = sdmac->sdma;
889 int per_2_emi = 0, emi_2_per = 0;
890 /*
891 * These are needed once we start to support transfers between
892 * two peripherals or memory-to-memory transfers
893 */
894 int per_2_per = 0, emi_2_emi = 0;
895
896 sdmac->pc_from_device = 0;
897 sdmac->pc_to_device = 0;
898 sdmac->device_to_device = 0;
899 sdmac->pc_to_pc = 0;
900
901 switch (peripheral_type) {
902 case IMX_DMATYPE_MEMORY:
903 emi_2_emi = sdma->script_addrs->ap_2_ap_addr;
904 break;
905 case IMX_DMATYPE_DSP:
906 emi_2_per = sdma->script_addrs->bp_2_ap_addr;
907 per_2_emi = sdma->script_addrs->ap_2_bp_addr;
908 break;
909 case IMX_DMATYPE_FIRI:
910 per_2_emi = sdma->script_addrs->firi_2_mcu_addr;
911 emi_2_per = sdma->script_addrs->mcu_2_firi_addr;
912 break;
913 case IMX_DMATYPE_UART:
914 per_2_emi = sdma->script_addrs->uart_2_mcu_addr;
915 emi_2_per = sdma->script_addrs->mcu_2_app_addr;
916 break;
917 case IMX_DMATYPE_UART_SP:
918 per_2_emi = sdma->script_addrs->uartsh_2_mcu_addr;
919 emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
920 break;
921 case IMX_DMATYPE_ATA:
922 per_2_emi = sdma->script_addrs->ata_2_mcu_addr;
923 emi_2_per = sdma->script_addrs->mcu_2_ata_addr;
924 break;
925 case IMX_DMATYPE_CSPI:
926 case IMX_DMATYPE_EXT:
927 case IMX_DMATYPE_SSI:
928 case IMX_DMATYPE_SAI:
929 per_2_emi = sdma->script_addrs->app_2_mcu_addr;
930 emi_2_per = sdma->script_addrs->mcu_2_app_addr;
931 break;
932 case IMX_DMATYPE_SSI_DUAL:
933 per_2_emi = sdma->script_addrs->ssish_2_mcu_addr;
934 emi_2_per = sdma->script_addrs->mcu_2_ssish_addr;
935 break;
936 case IMX_DMATYPE_SSI_SP:
937 case IMX_DMATYPE_MMC:
938 case IMX_DMATYPE_SDHC:
939 case IMX_DMATYPE_CSPI_SP:
940 case IMX_DMATYPE_ESAI:
941 case IMX_DMATYPE_MSHC_SP:
942 per_2_emi = sdma->script_addrs->shp_2_mcu_addr;
943 emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
944 break;
945 case IMX_DMATYPE_ASRC:
946 per_2_emi = sdma->script_addrs->asrc_2_mcu_addr;
947 emi_2_per = sdma->script_addrs->asrc_2_mcu_addr;
948 per_2_per = sdma->script_addrs->per_2_per_addr;
949 break;
950 case IMX_DMATYPE_ASRC_SP:
951 per_2_emi = sdma->script_addrs->shp_2_mcu_addr;
952 emi_2_per = sdma->script_addrs->mcu_2_shp_addr;
953 per_2_per = sdma->script_addrs->per_2_per_addr;
954 break;
955 case IMX_DMATYPE_MSHC:
956 per_2_emi = sdma->script_addrs->mshc_2_mcu_addr;
957 emi_2_per = sdma->script_addrs->mcu_2_mshc_addr;
958 break;
959 case IMX_DMATYPE_CCM:
960 per_2_emi = sdma->script_addrs->dptc_dvfs_addr;
961 break;
962 case IMX_DMATYPE_SPDIF:
963 per_2_emi = sdma->script_addrs->spdif_2_mcu_addr;
964 emi_2_per = sdma->script_addrs->mcu_2_spdif_addr;
965 break;
966 case IMX_DMATYPE_IPU_MEMORY:
967 emi_2_per = sdma->script_addrs->ext_mem_2_ipu_addr;
968 break;
969 default:
970 break;
971 }
972
973 sdmac->pc_from_device = per_2_emi;
974 sdmac->pc_to_device = emi_2_per;
975 sdmac->device_to_device = per_2_per;
976 sdmac->pc_to_pc = emi_2_emi;
977}
978
979static int sdma_load_context(struct sdma_channel *sdmac)
980{
981 struct sdma_engine *sdma = sdmac->sdma;
982 int channel = sdmac->channel;
983 int load_address;
984 struct sdma_context_data *context = sdma->context;
985 struct sdma_buffer_descriptor *bd0 = sdma->bd0;
986 int ret;
987 unsigned long flags;
988
989 if (sdmac->context_loaded)
990 return 0;
991
992 if (sdmac->direction == DMA_DEV_TO_MEM)
993 load_address = sdmac->pc_from_device;
994 else if (sdmac->direction == DMA_DEV_TO_DEV)
995 load_address = sdmac->device_to_device;
996 else if (sdmac->direction == DMA_MEM_TO_MEM)
997 load_address = sdmac->pc_to_pc;
998 else
999 load_address = sdmac->pc_to_device;
1000
1001 if (load_address < 0)
1002 return load_address;
1003
1004 dev_dbg(sdma->dev, "load_address = %d\n", load_address);
1005 dev_dbg(sdma->dev, "wml = 0x%08x\n", (u32)sdmac->watermark_level);
1006 dev_dbg(sdma->dev, "shp_addr = 0x%08x\n", sdmac->shp_addr);
1007 dev_dbg(sdma->dev, "per_addr = 0x%08x\n", sdmac->per_addr);
1008 dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", (u32)sdmac->event_mask[0]);
1009 dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", (u32)sdmac->event_mask[1]);
1010
1011 spin_lock_irqsave(&sdma->channel_0_lock, flags);
1012
1013 memset(context, 0, sizeof(*context));
1014 context->channel_state.pc = load_address;
1015
1016 /* Send by context the event mask,base address for peripheral
1017 * and watermark level
1018 */
1019 context->gReg[0] = sdmac->event_mask[1];
1020 context->gReg[1] = sdmac->event_mask[0];
1021 context->gReg[2] = sdmac->per_addr;
1022 context->gReg[6] = sdmac->shp_addr;
1023 context->gReg[7] = sdmac->watermark_level;
1024
1025 bd0->mode.command = C0_SETDM;
1026 bd0->mode.status = BD_DONE | BD_WRAP | BD_EXTD;
1027 bd0->mode.count = sizeof(*context) / 4;
1028 bd0->buffer_addr = sdma->context_phys;
1029 bd0->ext_buffer_addr = 2048 + (sizeof(*context) / 4) * channel;
1030 ret = sdma_run_channel0(sdma);
1031
1032 spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
1033
1034 sdmac->context_loaded = true;
1035
1036 return ret;
1037}
1038
1039static struct sdma_channel *to_sdma_chan(struct dma_chan *chan)
1040{
1041 return container_of(chan, struct sdma_channel, vc.chan);
1042}
1043
1044static int sdma_disable_channel(struct dma_chan *chan)
1045{
1046 struct sdma_channel *sdmac = to_sdma_chan(chan);
1047 struct sdma_engine *sdma = sdmac->sdma;
1048 int channel = sdmac->channel;
1049
1050 writel_relaxed(BIT(channel), sdma->regs + SDMA_H_STATSTOP);
1051 sdmac->status = DMA_ERROR;
1052
1053 return 0;
1054}
1055static void sdma_channel_terminate_work(struct work_struct *work)
1056{
1057 struct sdma_channel *sdmac = container_of(work, struct sdma_channel,
1058 terminate_worker);
1059 unsigned long flags;
1060 LIST_HEAD(head);
1061
1062 /*
1063 * According to NXP R&D team a delay of one BD SDMA cost time
1064 * (maximum is 1ms) should be added after disable of the channel
1065 * bit, to ensure SDMA core has really been stopped after SDMA
1066 * clients call .device_terminate_all.
1067 */
1068 usleep_range(1000, 2000);
1069
1070 spin_lock_irqsave(&sdmac->vc.lock, flags);
1071 vchan_get_all_descriptors(&sdmac->vc, &head);
1072 spin_unlock_irqrestore(&sdmac->vc.lock, flags);
1073 vchan_dma_desc_free_list(&sdmac->vc, &head);
1074 sdmac->context_loaded = false;
1075}
1076
1077static int sdma_terminate_all(struct dma_chan *chan)
1078{
1079 struct sdma_channel *sdmac = to_sdma_chan(chan);
1080 unsigned long flags;
1081
1082 spin_lock_irqsave(&sdmac->vc.lock, flags);
1083
1084 sdma_disable_channel(chan);
1085
1086 if (sdmac->desc) {
1087 vchan_terminate_vdesc(&sdmac->desc->vd);
1088 sdmac->desc = NULL;
1089 schedule_work(&sdmac->terminate_worker);
1090 }
1091
1092 spin_unlock_irqrestore(&sdmac->vc.lock, flags);
1093
1094 return 0;
1095}
1096
1097static void sdma_channel_synchronize(struct dma_chan *chan)
1098{
1099 struct sdma_channel *sdmac = to_sdma_chan(chan);
1100
1101 vchan_synchronize(&sdmac->vc);
1102
1103 flush_work(&sdmac->terminate_worker);
1104}
1105
1106static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac)
1107{
1108 struct sdma_engine *sdma = sdmac->sdma;
1109
1110 int lwml = sdmac->watermark_level & SDMA_WATERMARK_LEVEL_LWML;
1111 int hwml = (sdmac->watermark_level & SDMA_WATERMARK_LEVEL_HWML) >> 16;
1112
1113 set_bit(sdmac->event_id0 % 32, &sdmac->event_mask[1]);
1114 set_bit(sdmac->event_id1 % 32, &sdmac->event_mask[0]);
1115
1116 if (sdmac->event_id0 > 31)
1117 sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_LWE;
1118
1119 if (sdmac->event_id1 > 31)
1120 sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_HWE;
1121
1122 /*
1123 * If LWML(src_maxburst) > HWML(dst_maxburst), we need
1124 * swap LWML and HWML of INFO(A.3.2.5.1), also need swap
1125 * r0(event_mask[1]) and r1(event_mask[0]).
1126 */
1127 if (lwml > hwml) {
1128 sdmac->watermark_level &= ~(SDMA_WATERMARK_LEVEL_LWML |
1129 SDMA_WATERMARK_LEVEL_HWML);
1130 sdmac->watermark_level |= hwml;
1131 sdmac->watermark_level |= lwml << 16;
1132 swap(sdmac->event_mask[0], sdmac->event_mask[1]);
1133 }
1134
1135 if (sdmac->per_address2 >= sdma->spba_start_addr &&
1136 sdmac->per_address2 <= sdma->spba_end_addr)
1137 sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_SP;
1138
1139 if (sdmac->per_address >= sdma->spba_start_addr &&
1140 sdmac->per_address <= sdma->spba_end_addr)
1141 sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_DP;
1142
1143 sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_CONT;
1144}
1145
1146static int sdma_config_channel(struct dma_chan *chan)
1147{
1148 struct sdma_channel *sdmac = to_sdma_chan(chan);
1149 int ret;
1150
1151 sdma_disable_channel(chan);
1152
1153 sdmac->event_mask[0] = 0;
1154 sdmac->event_mask[1] = 0;
1155 sdmac->shp_addr = 0;
1156 sdmac->per_addr = 0;
1157
1158 switch (sdmac->peripheral_type) {
1159 case IMX_DMATYPE_DSP:
1160 sdma_config_ownership(sdmac, false, true, true);
1161 break;
1162 case IMX_DMATYPE_MEMORY:
1163 sdma_config_ownership(sdmac, false, true, false);
1164 break;
1165 default:
1166 sdma_config_ownership(sdmac, true, true, false);
1167 break;
1168 }
1169
1170 sdma_get_pc(sdmac, sdmac->peripheral_type);
1171
1172 if ((sdmac->peripheral_type != IMX_DMATYPE_MEMORY) &&
1173 (sdmac->peripheral_type != IMX_DMATYPE_DSP)) {
1174 /* Handle multiple event channels differently */
1175 if (sdmac->event_id1) {
1176 if (sdmac->peripheral_type == IMX_DMATYPE_ASRC_SP ||
1177 sdmac->peripheral_type == IMX_DMATYPE_ASRC)
1178 sdma_set_watermarklevel_for_p2p(sdmac);
1179 } else
1180 __set_bit(sdmac->event_id0, sdmac->event_mask);
1181
1182 /* Address */
1183 sdmac->shp_addr = sdmac->per_address;
1184 sdmac->per_addr = sdmac->per_address2;
1185 } else {
1186 sdmac->watermark_level = 0; /* FIXME: M3_BASE_ADDRESS */
1187 }
1188
1189 ret = sdma_load_context(sdmac);
1190
1191 return ret;
1192}
1193
1194static int sdma_set_channel_priority(struct sdma_channel *sdmac,
1195 unsigned int priority)
1196{
1197 struct sdma_engine *sdma = sdmac->sdma;
1198 int channel = sdmac->channel;
1199
1200 if (priority < MXC_SDMA_MIN_PRIORITY
1201 || priority > MXC_SDMA_MAX_PRIORITY) {
1202 return -EINVAL;
1203 }
1204
1205 writel_relaxed(priority, sdma->regs + SDMA_CHNPRI_0 + 4 * channel);
1206
1207 return 0;
1208}
1209
1210static int sdma_request_channel0(struct sdma_engine *sdma)
1211{
1212 int ret = -EBUSY;
1213
1214 sdma->bd0 = dma_alloc_coherent(sdma->dev, PAGE_SIZE, &sdma->bd0_phys,
1215 GFP_NOWAIT);
1216 if (!sdma->bd0) {
1217 ret = -ENOMEM;
1218 goto out;
1219 }
1220
1221 sdma->channel_control[0].base_bd_ptr = sdma->bd0_phys;
1222 sdma->channel_control[0].current_bd_ptr = sdma->bd0_phys;
1223
1224 sdma_set_channel_priority(&sdma->channel[0], MXC_SDMA_DEFAULT_PRIORITY);
1225 return 0;
1226out:
1227
1228 return ret;
1229}
1230
1231
1232static int sdma_alloc_bd(struct sdma_desc *desc)
1233{
1234 u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
1235 int ret = 0;
1236
1237 desc->bd = dma_alloc_coherent(desc->sdmac->sdma->dev, bd_size,
1238 &desc->bd_phys, GFP_NOWAIT);
1239 if (!desc->bd) {
1240 ret = -ENOMEM;
1241 goto out;
1242 }
1243out:
1244 return ret;
1245}
1246
1247static void sdma_free_bd(struct sdma_desc *desc)
1248{
1249 u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
1250
1251 dma_free_coherent(desc->sdmac->sdma->dev, bd_size, desc->bd,
1252 desc->bd_phys);
1253}
1254
1255static void sdma_desc_free(struct virt_dma_desc *vd)
1256{
1257 struct sdma_desc *desc = container_of(vd, struct sdma_desc, vd);
1258
1259 sdma_free_bd(desc);
1260 kfree(desc);
1261}
1262
1263static int sdma_alloc_chan_resources(struct dma_chan *chan)
1264{
1265 struct sdma_channel *sdmac = to_sdma_chan(chan);
1266 struct imx_dma_data *data = chan->private;
1267 struct imx_dma_data mem_data;
1268 int prio, ret;
1269
1270 /*
1271 * MEMCPY may never setup chan->private by filter function such as
1272 * dmatest, thus create 'struct imx_dma_data mem_data' for this case.
1273 * Please note in any other slave case, you have to setup chan->private
1274 * with 'struct imx_dma_data' in your own filter function if you want to
1275 * request dma channel by dma_request_channel() rather than
1276 * dma_request_slave_channel(). Othwise, 'MEMCPY in case?' will appear
1277 * to warn you to correct your filter function.
1278 */
1279 if (!data) {
1280 dev_dbg(sdmac->sdma->dev, "MEMCPY in case?\n");
1281 mem_data.priority = 2;
1282 mem_data.peripheral_type = IMX_DMATYPE_MEMORY;
1283 mem_data.dma_request = 0;
1284 mem_data.dma_request2 = 0;
1285 data = &mem_data;
1286
1287 sdma_get_pc(sdmac, IMX_DMATYPE_MEMORY);
1288 }
1289
1290 switch (data->priority) {
1291 case DMA_PRIO_HIGH:
1292 prio = 3;
1293 break;
1294 case DMA_PRIO_MEDIUM:
1295 prio = 2;
1296 break;
1297 case DMA_PRIO_LOW:
1298 default:
1299 prio = 1;
1300 break;
1301 }
1302
1303 sdmac->peripheral_type = data->peripheral_type;
1304 sdmac->event_id0 = data->dma_request;
1305 sdmac->event_id1 = data->dma_request2;
1306
1307 ret = clk_enable(sdmac->sdma->clk_ipg);
1308 if (ret)
1309 return ret;
1310 ret = clk_enable(sdmac->sdma->clk_ahb);
1311 if (ret)
1312 goto disable_clk_ipg;
1313
1314 ret = sdma_set_channel_priority(sdmac, prio);
1315 if (ret)
1316 goto disable_clk_ahb;
1317
1318 return 0;
1319
1320disable_clk_ahb:
1321 clk_disable(sdmac->sdma->clk_ahb);
1322disable_clk_ipg:
1323 clk_disable(sdmac->sdma->clk_ipg);
1324 return ret;
1325}
1326
1327static void sdma_free_chan_resources(struct dma_chan *chan)
1328{
1329 struct sdma_channel *sdmac = to_sdma_chan(chan);
1330 struct sdma_engine *sdma = sdmac->sdma;
1331
1332 sdma_terminate_all(chan);
1333
1334 sdma_channel_synchronize(chan);
1335
1336 sdma_event_disable(sdmac, sdmac->event_id0);
1337 if (sdmac->event_id1)
1338 sdma_event_disable(sdmac, sdmac->event_id1);
1339
1340 sdmac->event_id0 = 0;
1341 sdmac->event_id1 = 0;
1342 sdmac->context_loaded = false;
1343
1344 sdma_set_channel_priority(sdmac, 0);
1345
1346 clk_disable(sdma->clk_ipg);
1347 clk_disable(sdma->clk_ahb);
1348}
1349
1350static struct sdma_desc *sdma_transfer_init(struct sdma_channel *sdmac,
1351 enum dma_transfer_direction direction, u32 bds)
1352{
1353 struct sdma_desc *desc;
1354
1355 desc = kzalloc((sizeof(*desc)), GFP_NOWAIT);
1356 if (!desc)
1357 goto err_out;
1358
1359 sdmac->status = DMA_IN_PROGRESS;
1360 sdmac->direction = direction;
1361 sdmac->flags = 0;
1362
1363 desc->chn_count = 0;
1364 desc->chn_real_count = 0;
1365 desc->buf_tail = 0;
1366 desc->buf_ptail = 0;
1367 desc->sdmac = sdmac;
1368 desc->num_bd = bds;
1369
1370 if (sdma_alloc_bd(desc))
1371 goto err_desc_out;
1372
1373 /* No slave_config called in MEMCPY case, so do here */
1374 if (direction == DMA_MEM_TO_MEM)
1375 sdma_config_ownership(sdmac, false, true, false);
1376
1377 if (sdma_load_context(sdmac))
1378 goto err_desc_out;
1379
1380 return desc;
1381
1382err_desc_out:
1383 kfree(desc);
1384err_out:
1385 return NULL;
1386}
1387
1388static struct dma_async_tx_descriptor *sdma_prep_memcpy(
1389 struct dma_chan *chan, dma_addr_t dma_dst,
1390 dma_addr_t dma_src, size_t len, unsigned long flags)
1391{
1392 struct sdma_channel *sdmac = to_sdma_chan(chan);
1393 struct sdma_engine *sdma = sdmac->sdma;
1394 int channel = sdmac->channel;
1395 size_t count;
1396 int i = 0, param;
1397 struct sdma_buffer_descriptor *bd;
1398 struct sdma_desc *desc;
1399
1400 if (!chan || !len)
1401 return NULL;
1402
1403 dev_dbg(sdma->dev, "memcpy: %pad->%pad, len=%zu, channel=%d.\n",
1404 &dma_src, &dma_dst, len, channel);
1405
1406 desc = sdma_transfer_init(sdmac, DMA_MEM_TO_MEM,
1407 len / SDMA_BD_MAX_CNT + 1);
1408 if (!desc)
1409 return NULL;
1410
1411 do {
1412 count = min_t(size_t, len, SDMA_BD_MAX_CNT);
1413 bd = &desc->bd[i];
1414 bd->buffer_addr = dma_src;
1415 bd->ext_buffer_addr = dma_dst;
1416 bd->mode.count = count;
1417 desc->chn_count += count;
1418 bd->mode.command = 0;
1419
1420 dma_src += count;
1421 dma_dst += count;
1422 len -= count;
1423 i++;
1424
1425 param = BD_DONE | BD_EXTD | BD_CONT;
1426 /* last bd */
1427 if (!len) {
1428 param |= BD_INTR;
1429 param |= BD_LAST;
1430 param &= ~BD_CONT;
1431 }
1432
1433 dev_dbg(sdma->dev, "entry %d: count: %zd dma: 0x%x %s%s\n",
1434 i, count, bd->buffer_addr,
1435 param & BD_WRAP ? "wrap" : "",
1436 param & BD_INTR ? " intr" : "");
1437
1438 bd->mode.status = param;
1439 } while (len);
1440
1441 return vchan_tx_prep(&sdmac->vc, &desc->vd, flags);
1442}
1443
1444static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
1445 struct dma_chan *chan, struct scatterlist *sgl,
1446 unsigned int sg_len, enum dma_transfer_direction direction,
1447 unsigned long flags, void *context)
1448{
1449 struct sdma_channel *sdmac = to_sdma_chan(chan);
1450 struct sdma_engine *sdma = sdmac->sdma;
1451 int i, count;
1452 int channel = sdmac->channel;
1453 struct scatterlist *sg;
1454 struct sdma_desc *desc;
1455
1456 sdma_config_write(chan, &sdmac->slave_config, direction);
1457
1458 desc = sdma_transfer_init(sdmac, direction, sg_len);
1459 if (!desc)
1460 goto err_out;
1461
1462 dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n",
1463 sg_len, channel);
1464
1465 for_each_sg(sgl, sg, sg_len, i) {
1466 struct sdma_buffer_descriptor *bd = &desc->bd[i];
1467 int param;
1468
1469 bd->buffer_addr = sg->dma_address;
1470
1471 count = sg_dma_len(sg);
1472
1473 if (count > SDMA_BD_MAX_CNT) {
1474 dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n",
1475 channel, count, SDMA_BD_MAX_CNT);
1476 goto err_bd_out;
1477 }
1478
1479 bd->mode.count = count;
1480 desc->chn_count += count;
1481
1482 if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES)
1483 goto err_bd_out;
1484
1485 switch (sdmac->word_size) {
1486 case DMA_SLAVE_BUSWIDTH_4_BYTES:
1487 bd->mode.command = 0;
1488 if (count & 3 || sg->dma_address & 3)
1489 goto err_bd_out;
1490 break;
1491 case DMA_SLAVE_BUSWIDTH_2_BYTES:
1492 bd->mode.command = 2;
1493 if (count & 1 || sg->dma_address & 1)
1494 goto err_bd_out;
1495 break;
1496 case DMA_SLAVE_BUSWIDTH_1_BYTE:
1497 bd->mode.command = 1;
1498 break;
1499 default:
1500 goto err_bd_out;
1501 }
1502
1503 param = BD_DONE | BD_EXTD | BD_CONT;
1504
1505 if (i + 1 == sg_len) {
1506 param |= BD_INTR;
1507 param |= BD_LAST;
1508 param &= ~BD_CONT;
1509 }
1510
1511 dev_dbg(sdma->dev, "entry %d: count: %d dma: %#llx %s%s\n",
1512 i, count, (u64)sg->dma_address,
1513 param & BD_WRAP ? "wrap" : "",
1514 param & BD_INTR ? " intr" : "");
1515
1516 bd->mode.status = param;
1517 }
1518
1519 return vchan_tx_prep(&sdmac->vc, &desc->vd, flags);
1520err_bd_out:
1521 sdma_free_bd(desc);
1522 kfree(desc);
1523err_out:
1524 sdmac->status = DMA_ERROR;
1525 return NULL;
1526}
1527
1528static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
1529 struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
1530 size_t period_len, enum dma_transfer_direction direction,
1531 unsigned long flags)
1532{
1533 struct sdma_channel *sdmac = to_sdma_chan(chan);
1534 struct sdma_engine *sdma = sdmac->sdma;
1535 int num_periods = buf_len / period_len;
1536 int channel = sdmac->channel;
1537 int i = 0, buf = 0;
1538 struct sdma_desc *desc;
1539
1540 dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel);
1541
1542 sdma_config_write(chan, &sdmac->slave_config, direction);
1543
1544 desc = sdma_transfer_init(sdmac, direction, num_periods);
1545 if (!desc)
1546 goto err_out;
1547
1548 desc->period_len = period_len;
1549
1550 sdmac->flags |= IMX_DMA_SG_LOOP;
1551
1552 if (period_len > SDMA_BD_MAX_CNT) {
1553 dev_err(sdma->dev, "SDMA channel %d: maximum period size exceeded: %zu > %d\n",
1554 channel, period_len, SDMA_BD_MAX_CNT);
1555 goto err_bd_out;
1556 }
1557
1558 while (buf < buf_len) {
1559 struct sdma_buffer_descriptor *bd = &desc->bd[i];
1560 int param;
1561
1562 bd->buffer_addr = dma_addr;
1563
1564 bd->mode.count = period_len;
1565
1566 if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES)
1567 goto err_bd_out;
1568 if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES)
1569 bd->mode.command = 0;
1570 else
1571 bd->mode.command = sdmac->word_size;
1572
1573 param = BD_DONE | BD_EXTD | BD_CONT | BD_INTR;
1574 if (i + 1 == num_periods)
1575 param |= BD_WRAP;
1576
1577 dev_dbg(sdma->dev, "entry %d: count: %zu dma: %#llx %s%s\n",
1578 i, period_len, (u64)dma_addr,
1579 param & BD_WRAP ? "wrap" : "",
1580 param & BD_INTR ? " intr" : "");
1581
1582 bd->mode.status = param;
1583
1584 dma_addr += period_len;
1585 buf += period_len;
1586
1587 i++;
1588 }
1589
1590 return vchan_tx_prep(&sdmac->vc, &desc->vd, flags);
1591err_bd_out:
1592 sdma_free_bd(desc);
1593 kfree(desc);
1594err_out:
1595 sdmac->status = DMA_ERROR;
1596 return NULL;
1597}
1598
1599static int sdma_config_write(struct dma_chan *chan,
1600 struct dma_slave_config *dmaengine_cfg,
1601 enum dma_transfer_direction direction)
1602{
1603 struct sdma_channel *sdmac = to_sdma_chan(chan);
1604
1605 if (direction == DMA_DEV_TO_MEM) {
1606 sdmac->per_address = dmaengine_cfg->src_addr;
1607 sdmac->watermark_level = dmaengine_cfg->src_maxburst *
1608 dmaengine_cfg->src_addr_width;
1609 sdmac->word_size = dmaengine_cfg->src_addr_width;
1610 } else if (direction == DMA_DEV_TO_DEV) {
1611 sdmac->per_address2 = dmaengine_cfg->src_addr;
1612 sdmac->per_address = dmaengine_cfg->dst_addr;
1613 sdmac->watermark_level = dmaengine_cfg->src_maxburst &
1614 SDMA_WATERMARK_LEVEL_LWML;
1615 sdmac->watermark_level |= (dmaengine_cfg->dst_maxburst << 16) &
1616 SDMA_WATERMARK_LEVEL_HWML;
1617 sdmac->word_size = dmaengine_cfg->dst_addr_width;
1618 } else {
1619 sdmac->per_address = dmaengine_cfg->dst_addr;
1620 sdmac->watermark_level = dmaengine_cfg->dst_maxburst *
1621 dmaengine_cfg->dst_addr_width;
1622 sdmac->word_size = dmaengine_cfg->dst_addr_width;
1623 }
1624 sdmac->direction = direction;
1625 return sdma_config_channel(chan);
1626}
1627
1628static int sdma_config(struct dma_chan *chan,
1629 struct dma_slave_config *dmaengine_cfg)
1630{
1631 struct sdma_channel *sdmac = to_sdma_chan(chan);
1632
1633 memcpy(&sdmac->slave_config, dmaengine_cfg, sizeof(*dmaengine_cfg));
1634
1635 /* Set ENBLn earlier to make sure dma request triggered after that */
1636 if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events)
1637 return -EINVAL;
1638 sdma_event_enable(sdmac, sdmac->event_id0);
1639
1640 if (sdmac->event_id1) {
1641 if (sdmac->event_id1 >= sdmac->sdma->drvdata->num_events)
1642 return -EINVAL;
1643 sdma_event_enable(sdmac, sdmac->event_id1);
1644 }
1645
1646 return 0;
1647}
1648
1649static enum dma_status sdma_tx_status(struct dma_chan *chan,
1650 dma_cookie_t cookie,
1651 struct dma_tx_state *txstate)
1652{
1653 struct sdma_channel *sdmac = to_sdma_chan(chan);
1654 struct sdma_desc *desc = NULL;
1655 u32 residue;
1656 struct virt_dma_desc *vd;
1657 enum dma_status ret;
1658 unsigned long flags;
1659
1660 ret = dma_cookie_status(chan, cookie, txstate);
1661 if (ret == DMA_COMPLETE || !txstate)
1662 return ret;
1663
1664 spin_lock_irqsave(&sdmac->vc.lock, flags);
1665
1666 vd = vchan_find_desc(&sdmac->vc, cookie);
1667 if (vd)
1668 desc = to_sdma_desc(&vd->tx);
1669 else if (sdmac->desc && sdmac->desc->vd.tx.cookie == cookie)
1670 desc = sdmac->desc;
1671
1672 if (desc) {
1673 if (sdmac->flags & IMX_DMA_SG_LOOP)
1674 residue = (desc->num_bd - desc->buf_ptail) *
1675 desc->period_len - desc->chn_real_count;
1676 else
1677 residue = desc->chn_count - desc->chn_real_count;
1678 } else {
1679 residue = 0;
1680 }
1681
1682 spin_unlock_irqrestore(&sdmac->vc.lock, flags);
1683
1684 dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
1685 residue);
1686
1687 return sdmac->status;
1688}
1689
1690static void sdma_issue_pending(struct dma_chan *chan)
1691{
1692 struct sdma_channel *sdmac = to_sdma_chan(chan);
1693 unsigned long flags;
1694
1695 spin_lock_irqsave(&sdmac->vc.lock, flags);
1696 if (vchan_issue_pending(&sdmac->vc) && !sdmac->desc)
1697 sdma_start_desc(sdmac);
1698 spin_unlock_irqrestore(&sdmac->vc.lock, flags);
1699}
1700
1701#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 34
1702#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2 38
1703#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3 41
1704#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V4 42
1705
1706static void sdma_add_scripts(struct sdma_engine *sdma,
1707 const struct sdma_script_start_addrs *addr)
1708{
1709 s32 *addr_arr = (u32 *)addr;
1710 s32 *saddr_arr = (u32 *)sdma->script_addrs;
1711 int i;
1712
1713 /* use the default firmware in ROM if missing external firmware */
1714 if (!sdma->script_number)
1715 sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1;
1716
1717 if (sdma->script_number > sizeof(struct sdma_script_start_addrs)
1718 / sizeof(s32)) {
1719 dev_err(sdma->dev,
1720 "SDMA script number %d not match with firmware.\n",
1721 sdma->script_number);
1722 return;
1723 }
1724
1725 for (i = 0; i < sdma->script_number; i++)
1726 if (addr_arr[i] > 0)
1727 saddr_arr[i] = addr_arr[i];
1728}
1729
1730static void sdma_load_firmware(const struct firmware *fw, void *context)
1731{
1732 struct sdma_engine *sdma = context;
1733 const struct sdma_firmware_header *header;
1734 const struct sdma_script_start_addrs *addr;
1735 unsigned short *ram_code;
1736
1737 if (!fw) {
1738 dev_info(sdma->dev, "external firmware not found, using ROM firmware\n");
1739 /* In this case we just use the ROM firmware. */
1740 return;
1741 }
1742
1743 if (fw->size < sizeof(*header))
1744 goto err_firmware;
1745
1746 header = (struct sdma_firmware_header *)fw->data;
1747
1748 if (header->magic != SDMA_FIRMWARE_MAGIC)
1749 goto err_firmware;
1750 if (header->ram_code_start + header->ram_code_size > fw->size)
1751 goto err_firmware;
1752 switch (header->version_major) {
1753 case 1:
1754 sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1;
1755 break;
1756 case 2:
1757 sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2;
1758 break;
1759 case 3:
1760 sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3;
1761 break;
1762 case 4:
1763 sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V4;
1764 break;
1765 default:
1766 dev_err(sdma->dev, "unknown firmware version\n");
1767 goto err_firmware;
1768 }
1769
1770 addr = (void *)header + header->script_addrs_start;
1771 ram_code = (void *)header + header->ram_code_start;
1772
1773 clk_enable(sdma->clk_ipg);
1774 clk_enable(sdma->clk_ahb);
1775 /* download the RAM image for SDMA */
1776 sdma_load_script(sdma, ram_code,
1777 header->ram_code_size,
1778 addr->ram_code_start_addr);
1779 clk_disable(sdma->clk_ipg);
1780 clk_disable(sdma->clk_ahb);
1781
1782 sdma_add_scripts(sdma, addr);
1783
1784 dev_info(sdma->dev, "loaded firmware %d.%d\n",
1785 header->version_major,
1786 header->version_minor);
1787
1788err_firmware:
1789 release_firmware(fw);
1790}
1791
1792#define EVENT_REMAP_CELLS 3
1793
1794static int sdma_event_remap(struct sdma_engine *sdma)
1795{
1796 struct device_node *np = sdma->dev->of_node;
1797 struct device_node *gpr_np = of_parse_phandle(np, "gpr", 0);
1798 struct property *event_remap;
1799 struct regmap *gpr;
1800 char propname[] = "fsl,sdma-event-remap";
1801 u32 reg, val, shift, num_map, i;
1802 int ret = 0;
1803
1804 if (IS_ERR(np) || IS_ERR(gpr_np))
1805 goto out;
1806
1807 event_remap = of_find_property(np, propname, NULL);
1808 num_map = event_remap ? (event_remap->length / sizeof(u32)) : 0;
1809 if (!num_map) {
1810 dev_dbg(sdma->dev, "no event needs to be remapped\n");
1811 goto out;
1812 } else if (num_map % EVENT_REMAP_CELLS) {
1813 dev_err(sdma->dev, "the property %s must modulo %d\n",
1814 propname, EVENT_REMAP_CELLS);
1815 ret = -EINVAL;
1816 goto out;
1817 }
1818
1819 gpr = syscon_node_to_regmap(gpr_np);
1820 if (IS_ERR(gpr)) {
1821 dev_err(sdma->dev, "failed to get gpr regmap\n");
1822 ret = PTR_ERR(gpr);
1823 goto out;
1824 }
1825
1826 for (i = 0; i < num_map; i += EVENT_REMAP_CELLS) {
1827 ret = of_property_read_u32_index(np, propname, i, ®);
1828 if (ret) {
1829 dev_err(sdma->dev, "failed to read property %s index %d\n",
1830 propname, i);
1831 goto out;
1832 }
1833
1834 ret = of_property_read_u32_index(np, propname, i + 1, &shift);
1835 if (ret) {
1836 dev_err(sdma->dev, "failed to read property %s index %d\n",
1837 propname, i + 1);
1838 goto out;
1839 }
1840
1841 ret = of_property_read_u32_index(np, propname, i + 2, &val);
1842 if (ret) {
1843 dev_err(sdma->dev, "failed to read property %s index %d\n",
1844 propname, i + 2);
1845 goto out;
1846 }
1847
1848 regmap_update_bits(gpr, reg, BIT(shift), val << shift);
1849 }
1850
1851out:
1852 if (!IS_ERR(gpr_np))
1853 of_node_put(gpr_np);
1854
1855 return ret;
1856}
1857
1858static int sdma_get_firmware(struct sdma_engine *sdma,
1859 const char *fw_name)
1860{
1861 int ret;
1862
1863 ret = request_firmware_nowait(THIS_MODULE,
1864 FW_ACTION_HOTPLUG, fw_name, sdma->dev,
1865 GFP_KERNEL, sdma, sdma_load_firmware);
1866
1867 return ret;
1868}
1869
1870static int sdma_init(struct sdma_engine *sdma)
1871{
1872 int i, ret;
1873 dma_addr_t ccb_phys;
1874
1875 ret = clk_enable(sdma->clk_ipg);
1876 if (ret)
1877 return ret;
1878 ret = clk_enable(sdma->clk_ahb);
1879 if (ret)
1880 goto disable_clk_ipg;
1881
1882 if (sdma->drvdata->check_ratio &&
1883 (clk_get_rate(sdma->clk_ahb) == clk_get_rate(sdma->clk_ipg)))
1884 sdma->clk_ratio = 1;
1885
1886 /* Be sure SDMA has not started yet */
1887 writel_relaxed(0, sdma->regs + SDMA_H_C0PTR);
1888
1889 sdma->channel_control = dma_alloc_coherent(sdma->dev,
1890 MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control) +
1891 sizeof(struct sdma_context_data),
1892 &ccb_phys, GFP_KERNEL);
1893
1894 if (!sdma->channel_control) {
1895 ret = -ENOMEM;
1896 goto err_dma_alloc;
1897 }
1898
1899 sdma->context = (void *)sdma->channel_control +
1900 MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control);
1901 sdma->context_phys = ccb_phys +
1902 MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control);
1903
1904 /* disable all channels */
1905 for (i = 0; i < sdma->drvdata->num_events; i++)
1906 writel_relaxed(0, sdma->regs + chnenbl_ofs(sdma, i));
1907
1908 /* All channels have priority 0 */
1909 for (i = 0; i < MAX_DMA_CHANNELS; i++)
1910 writel_relaxed(0, sdma->regs + SDMA_CHNPRI_0 + i * 4);
1911
1912 ret = sdma_request_channel0(sdma);
1913 if (ret)
1914 goto err_dma_alloc;
1915
1916 sdma_config_ownership(&sdma->channel[0], false, true, false);
1917
1918 /* Set Command Channel (Channel Zero) */
1919 writel_relaxed(0x4050, sdma->regs + SDMA_CHN0ADDR);
1920
1921 /* Set bits of CONFIG register but with static context switching */
1922 if (sdma->clk_ratio)
1923 writel_relaxed(SDMA_H_CONFIG_ACR, sdma->regs + SDMA_H_CONFIG);
1924 else
1925 writel_relaxed(0, sdma->regs + SDMA_H_CONFIG);
1926
1927 writel_relaxed(ccb_phys, sdma->regs + SDMA_H_C0PTR);
1928
1929 /* Initializes channel's priorities */
1930 sdma_set_channel_priority(&sdma->channel[0], 7);
1931
1932 clk_disable(sdma->clk_ipg);
1933 clk_disable(sdma->clk_ahb);
1934
1935 return 0;
1936
1937err_dma_alloc:
1938 clk_disable(sdma->clk_ahb);
1939disable_clk_ipg:
1940 clk_disable(sdma->clk_ipg);
1941 dev_err(sdma->dev, "initialisation failed with %d\n", ret);
1942 return ret;
1943}
1944
1945static bool sdma_filter_fn(struct dma_chan *chan, void *fn_param)
1946{
1947 struct sdma_channel *sdmac = to_sdma_chan(chan);
1948 struct imx_dma_data *data = fn_param;
1949
1950 if (!imx_dma_is_general_purpose(chan))
1951 return false;
1952
1953 sdmac->data = *data;
1954 chan->private = &sdmac->data;
1955
1956 return true;
1957}
1958
1959static struct dma_chan *sdma_xlate(struct of_phandle_args *dma_spec,
1960 struct of_dma *ofdma)
1961{
1962 struct sdma_engine *sdma = ofdma->of_dma_data;
1963 dma_cap_mask_t mask = sdma->dma_device.cap_mask;
1964 struct imx_dma_data data;
1965
1966 if (dma_spec->args_count != 3)
1967 return NULL;
1968
1969 data.dma_request = dma_spec->args[0];
1970 data.peripheral_type = dma_spec->args[1];
1971 data.priority = dma_spec->args[2];
1972 /*
1973 * init dma_request2 to zero, which is not used by the dts.
1974 * For P2P, dma_request2 is init from dma_request_channel(),
1975 * chan->private will point to the imx_dma_data, and in
1976 * device_alloc_chan_resources(), imx_dma_data.dma_request2 will
1977 * be set to sdmac->event_id1.
1978 */
1979 data.dma_request2 = 0;
1980
1981 return __dma_request_channel(&mask, sdma_filter_fn, &data,
1982 ofdma->of_node);
1983}
1984
1985static int sdma_probe(struct platform_device *pdev)
1986{
1987 const struct of_device_id *of_id =
1988 of_match_device(sdma_dt_ids, &pdev->dev);
1989 struct device_node *np = pdev->dev.of_node;
1990 struct device_node *spba_bus;
1991 const char *fw_name;
1992 int ret;
1993 int irq;
1994 struct resource *iores;
1995 struct resource spba_res;
1996 struct sdma_platform_data *pdata = dev_get_platdata(&pdev->dev);
1997 int i;
1998 struct sdma_engine *sdma;
1999 s32 *saddr_arr;
2000 const struct sdma_driver_data *drvdata = NULL;
2001
2002 if (of_id)
2003 drvdata = of_id->data;
2004 else if (pdev->id_entry)
2005 drvdata = (void *)pdev->id_entry->driver_data;
2006
2007 if (!drvdata) {
2008 dev_err(&pdev->dev, "unable to find driver data\n");
2009 return -EINVAL;
2010 }
2011
2012 ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
2013 if (ret)
2014 return ret;
2015
2016 sdma = devm_kzalloc(&pdev->dev, sizeof(*sdma), GFP_KERNEL);
2017 if (!sdma)
2018 return -ENOMEM;
2019
2020 spin_lock_init(&sdma->channel_0_lock);
2021
2022 sdma->dev = &pdev->dev;
2023 sdma->drvdata = drvdata;
2024
2025 irq = platform_get_irq(pdev, 0);
2026 if (irq < 0)
2027 return irq;
2028
2029 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2030 sdma->regs = devm_ioremap_resource(&pdev->dev, iores);
2031 if (IS_ERR(sdma->regs))
2032 return PTR_ERR(sdma->regs);
2033
2034 sdma->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
2035 if (IS_ERR(sdma->clk_ipg))
2036 return PTR_ERR(sdma->clk_ipg);
2037
2038 sdma->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
2039 if (IS_ERR(sdma->clk_ahb))
2040 return PTR_ERR(sdma->clk_ahb);
2041
2042 ret = clk_prepare(sdma->clk_ipg);
2043 if (ret)
2044 return ret;
2045
2046 ret = clk_prepare(sdma->clk_ahb);
2047 if (ret)
2048 goto err_clk;
2049
2050 ret = devm_request_irq(&pdev->dev, irq, sdma_int_handler, 0, "sdma",
2051 sdma);
2052 if (ret)
2053 goto err_irq;
2054
2055 sdma->irq = irq;
2056
2057 sdma->script_addrs = kzalloc(sizeof(*sdma->script_addrs), GFP_KERNEL);
2058 if (!sdma->script_addrs) {
2059 ret = -ENOMEM;
2060 goto err_irq;
2061 }
2062
2063 /* initially no scripts available */
2064 saddr_arr = (s32 *)sdma->script_addrs;
2065 for (i = 0; i < sizeof(*sdma->script_addrs) / sizeof(s32); i++)
2066 saddr_arr[i] = -EINVAL;
2067
2068 dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
2069 dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
2070 dma_cap_set(DMA_MEMCPY, sdma->dma_device.cap_mask);
2071
2072 INIT_LIST_HEAD(&sdma->dma_device.channels);
2073 /* Initialize channel parameters */
2074 for (i = 0; i < MAX_DMA_CHANNELS; i++) {
2075 struct sdma_channel *sdmac = &sdma->channel[i];
2076
2077 sdmac->sdma = sdma;
2078
2079 sdmac->channel = i;
2080 sdmac->vc.desc_free = sdma_desc_free;
2081 INIT_WORK(&sdmac->terminate_worker,
2082 sdma_channel_terminate_work);
2083 /*
2084 * Add the channel to the DMAC list. Do not add channel 0 though
2085 * because we need it internally in the SDMA driver. This also means
2086 * that channel 0 in dmaengine counting matches sdma channel 1.
2087 */
2088 if (i)
2089 vchan_init(&sdmac->vc, &sdma->dma_device);
2090 }
2091
2092 ret = sdma_init(sdma);
2093 if (ret)
2094 goto err_init;
2095
2096 ret = sdma_event_remap(sdma);
2097 if (ret)
2098 goto err_init;
2099
2100 if (sdma->drvdata->script_addrs)
2101 sdma_add_scripts(sdma, sdma->drvdata->script_addrs);
2102 if (pdata && pdata->script_addrs)
2103 sdma_add_scripts(sdma, pdata->script_addrs);
2104
2105 sdma->dma_device.dev = &pdev->dev;
2106
2107 sdma->dma_device.device_alloc_chan_resources = sdma_alloc_chan_resources;
2108 sdma->dma_device.device_free_chan_resources = sdma_free_chan_resources;
2109 sdma->dma_device.device_tx_status = sdma_tx_status;
2110 sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg;
2111 sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
2112 sdma->dma_device.device_config = sdma_config;
2113 sdma->dma_device.device_terminate_all = sdma_terminate_all;
2114 sdma->dma_device.device_synchronize = sdma_channel_synchronize;
2115 sdma->dma_device.src_addr_widths = SDMA_DMA_BUSWIDTHS;
2116 sdma->dma_device.dst_addr_widths = SDMA_DMA_BUSWIDTHS;
2117 sdma->dma_device.directions = SDMA_DMA_DIRECTIONS;
2118 sdma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
2119 sdma->dma_device.device_prep_dma_memcpy = sdma_prep_memcpy;
2120 sdma->dma_device.device_issue_pending = sdma_issue_pending;
2121 sdma->dma_device.dev->dma_parms = &sdma->dma_parms;
2122 sdma->dma_device.copy_align = 2;
2123 dma_set_max_seg_size(sdma->dma_device.dev, SDMA_BD_MAX_CNT);
2124
2125 platform_set_drvdata(pdev, sdma);
2126
2127 ret = dma_async_device_register(&sdma->dma_device);
2128 if (ret) {
2129 dev_err(&pdev->dev, "unable to register\n");
2130 goto err_init;
2131 }
2132
2133 if (np) {
2134 ret = of_dma_controller_register(np, sdma_xlate, sdma);
2135 if (ret) {
2136 dev_err(&pdev->dev, "failed to register controller\n");
2137 goto err_register;
2138 }
2139
2140 spba_bus = of_find_compatible_node(NULL, NULL, "fsl,spba-bus");
2141 ret = of_address_to_resource(spba_bus, 0, &spba_res);
2142 if (!ret) {
2143 sdma->spba_start_addr = spba_res.start;
2144 sdma->spba_end_addr = spba_res.end;
2145 }
2146 of_node_put(spba_bus);
2147 }
2148
2149 /*
2150 * Kick off firmware loading as the very last step:
2151 * attempt to load firmware only if we're not on the error path, because
2152 * the firmware callback requires a fully functional and allocated sdma
2153 * instance.
2154 */
2155 if (pdata) {
2156 ret = sdma_get_firmware(sdma, pdata->fw_name);
2157 if (ret)
2158 dev_warn(&pdev->dev, "failed to get firmware from platform data\n");
2159 } else {
2160 /*
2161 * Because that device tree does not encode ROM script address,
2162 * the RAM script in firmware is mandatory for device tree
2163 * probe, otherwise it fails.
2164 */
2165 ret = of_property_read_string(np, "fsl,sdma-ram-script-name",
2166 &fw_name);
2167 if (ret) {
2168 dev_warn(&pdev->dev, "failed to get firmware name\n");
2169 } else {
2170 ret = sdma_get_firmware(sdma, fw_name);
2171 if (ret)
2172 dev_warn(&pdev->dev, "failed to get firmware from device tree\n");
2173 }
2174 }
2175
2176 return 0;
2177
2178err_register:
2179 dma_async_device_unregister(&sdma->dma_device);
2180err_init:
2181 kfree(sdma->script_addrs);
2182err_irq:
2183 clk_unprepare(sdma->clk_ahb);
2184err_clk:
2185 clk_unprepare(sdma->clk_ipg);
2186 return ret;
2187}
2188
2189static int sdma_remove(struct platform_device *pdev)
2190{
2191 struct sdma_engine *sdma = platform_get_drvdata(pdev);
2192 int i;
2193
2194 devm_free_irq(&pdev->dev, sdma->irq, sdma);
2195 dma_async_device_unregister(&sdma->dma_device);
2196 kfree(sdma->script_addrs);
2197 clk_unprepare(sdma->clk_ahb);
2198 clk_unprepare(sdma->clk_ipg);
2199 /* Kill the tasklet */
2200 for (i = 0; i < MAX_DMA_CHANNELS; i++) {
2201 struct sdma_channel *sdmac = &sdma->channel[i];
2202
2203 tasklet_kill(&sdmac->vc.task);
2204 sdma_free_chan_resources(&sdmac->vc.chan);
2205 }
2206
2207 platform_set_drvdata(pdev, NULL);
2208 return 0;
2209}
2210
2211static struct platform_driver sdma_driver = {
2212 .driver = {
2213 .name = "imx-sdma",
2214 .of_match_table = sdma_dt_ids,
2215 },
2216 .id_table = sdma_devtypes,
2217 .remove = sdma_remove,
2218 .probe = sdma_probe,
2219};
2220
2221module_platform_driver(sdma_driver);
2222
2223MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
2224MODULE_DESCRIPTION("i.MX SDMA driver");
2225#if IS_ENABLED(CONFIG_SOC_IMX6Q)
2226MODULE_FIRMWARE("imx/sdma/sdma-imx6q.bin");
2227#endif
2228#if IS_ENABLED(CONFIG_SOC_IMX7D)
2229MODULE_FIRMWARE("imx/sdma/sdma-imx7d.bin");
2230#endif
2231MODULE_LICENSE("GPL");