Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Driver for the Atmel Extensible DMA Controller (aka XDMAC on AT91 systems)
4 *
5 * Copyright (C) 2014 Atmel Corporation
6 *
7 * Author: Ludovic Desroches <ludovic.desroches@atmel.com>
8 */
9
10#include <asm/barrier.h>
11#include <dt-bindings/dma/at91.h>
12#include <linux/clk.h>
13#include <linux/dmaengine.h>
14#include <linux/dmapool.h>
15#include <linux/interrupt.h>
16#include <linux/irq.h>
17#include <linux/kernel.h>
18#include <linux/list.h>
19#include <linux/module.h>
20#include <linux/of_dma.h>
21#include <linux/of_platform.h>
22#include <linux/platform_device.h>
23#include <linux/pm.h>
24#include <linux/pm_runtime.h>
25
26#include "dmaengine.h"
27
28/* Global registers */
29#define AT_XDMAC_GTYPE 0x00 /* Global Type Register */
30#define AT_XDMAC_NB_CH(i) (((i) & 0x1F) + 1) /* Number of Channels Minus One */
31#define AT_XDMAC_FIFO_SZ(i) (((i) >> 5) & 0x7FF) /* Number of Bytes */
32#define AT_XDMAC_NB_REQ(i) ((((i) >> 16) & 0x3F) + 1) /* Number of Peripheral Requests Minus One */
33#define AT_XDMAC_GCFG 0x04 /* Global Configuration Register */
34#define AT_XDMAC_WRHP(i) (((i) & 0xF) << 4)
35#define AT_XDMAC_WRMP(i) (((i) & 0xF) << 8)
36#define AT_XDMAC_WRLP(i) (((i) & 0xF) << 12)
37#define AT_XDMAC_RDHP(i) (((i) & 0xF) << 16)
38#define AT_XDMAC_RDMP(i) (((i) & 0xF) << 20)
39#define AT_XDMAC_RDLP(i) (((i) & 0xF) << 24)
40#define AT_XDMAC_RDSG(i) (((i) & 0xF) << 28)
41#define AT_XDMAC_GCFG_M2M (AT_XDMAC_RDLP(0xF) | AT_XDMAC_WRLP(0xF))
42#define AT_XDMAC_GCFG_P2M (AT_XDMAC_RDSG(0x1) | AT_XDMAC_RDHP(0x3) | \
43 AT_XDMAC_WRHP(0x5))
44#define AT_XDMAC_GWAC 0x08 /* Global Weighted Arbiter Configuration Register */
45#define AT_XDMAC_PW0(i) (((i) & 0xF) << 0)
46#define AT_XDMAC_PW1(i) (((i) & 0xF) << 4)
47#define AT_XDMAC_PW2(i) (((i) & 0xF) << 8)
48#define AT_XDMAC_PW3(i) (((i) & 0xF) << 12)
49#define AT_XDMAC_GWAC_M2M 0
50#define AT_XDMAC_GWAC_P2M (AT_XDMAC_PW0(0xF) | AT_XDMAC_PW2(0xF))
51
52#define AT_XDMAC_GIE 0x0C /* Global Interrupt Enable Register */
53#define AT_XDMAC_GID 0x10 /* Global Interrupt Disable Register */
54#define AT_XDMAC_GIM 0x14 /* Global Interrupt Mask Register */
55#define AT_XDMAC_GIS 0x18 /* Global Interrupt Status Register */
56#define AT_XDMAC_GE 0x1C /* Global Channel Enable Register */
57#define AT_XDMAC_GD 0x20 /* Global Channel Disable Register */
58#define AT_XDMAC_GS 0x24 /* Global Channel Status Register */
59#define AT_XDMAC_VERSION 0xFFC /* XDMAC Version Register */
60
61/* Channel relative registers offsets */
62#define AT_XDMAC_CIE 0x00 /* Channel Interrupt Enable Register */
63#define AT_XDMAC_CIE_BIE BIT(0) /* End of Block Interrupt Enable Bit */
64#define AT_XDMAC_CIE_LIE BIT(1) /* End of Linked List Interrupt Enable Bit */
65#define AT_XDMAC_CIE_DIE BIT(2) /* End of Disable Interrupt Enable Bit */
66#define AT_XDMAC_CIE_FIE BIT(3) /* End of Flush Interrupt Enable Bit */
67#define AT_XDMAC_CIE_RBEIE BIT(4) /* Read Bus Error Interrupt Enable Bit */
68#define AT_XDMAC_CIE_WBEIE BIT(5) /* Write Bus Error Interrupt Enable Bit */
69#define AT_XDMAC_CIE_ROIE BIT(6) /* Request Overflow Interrupt Enable Bit */
70#define AT_XDMAC_CID 0x04 /* Channel Interrupt Disable Register */
71#define AT_XDMAC_CID_BID BIT(0) /* End of Block Interrupt Disable Bit */
72#define AT_XDMAC_CID_LID BIT(1) /* End of Linked List Interrupt Disable Bit */
73#define AT_XDMAC_CID_DID BIT(2) /* End of Disable Interrupt Disable Bit */
74#define AT_XDMAC_CID_FID BIT(3) /* End of Flush Interrupt Disable Bit */
75#define AT_XDMAC_CID_RBEID BIT(4) /* Read Bus Error Interrupt Disable Bit */
76#define AT_XDMAC_CID_WBEID BIT(5) /* Write Bus Error Interrupt Disable Bit */
77#define AT_XDMAC_CID_ROID BIT(6) /* Request Overflow Interrupt Disable Bit */
78#define AT_XDMAC_CIM 0x08 /* Channel Interrupt Mask Register */
79#define AT_XDMAC_CIM_BIM BIT(0) /* End of Block Interrupt Mask Bit */
80#define AT_XDMAC_CIM_LIM BIT(1) /* End of Linked List Interrupt Mask Bit */
81#define AT_XDMAC_CIM_DIM BIT(2) /* End of Disable Interrupt Mask Bit */
82#define AT_XDMAC_CIM_FIM BIT(3) /* End of Flush Interrupt Mask Bit */
83#define AT_XDMAC_CIM_RBEIM BIT(4) /* Read Bus Error Interrupt Mask Bit */
84#define AT_XDMAC_CIM_WBEIM BIT(5) /* Write Bus Error Interrupt Mask Bit */
85#define AT_XDMAC_CIM_ROIM BIT(6) /* Request Overflow Interrupt Mask Bit */
86#define AT_XDMAC_CIS 0x0C /* Channel Interrupt Status Register */
87#define AT_XDMAC_CIS_BIS BIT(0) /* End of Block Interrupt Status Bit */
88#define AT_XDMAC_CIS_LIS BIT(1) /* End of Linked List Interrupt Status Bit */
89#define AT_XDMAC_CIS_DIS BIT(2) /* End of Disable Interrupt Status Bit */
90#define AT_XDMAC_CIS_FIS BIT(3) /* End of Flush Interrupt Status Bit */
91#define AT_XDMAC_CIS_RBEIS BIT(4) /* Read Bus Error Interrupt Status Bit */
92#define AT_XDMAC_CIS_WBEIS BIT(5) /* Write Bus Error Interrupt Status Bit */
93#define AT_XDMAC_CIS_ROIS BIT(6) /* Request Overflow Interrupt Status Bit */
94#define AT_XDMAC_CSA 0x10 /* Channel Source Address Register */
95#define AT_XDMAC_CDA 0x14 /* Channel Destination Address Register */
96#define AT_XDMAC_CNDA 0x18 /* Channel Next Descriptor Address Register */
97#define AT_XDMAC_CNDA_NDAIF(i) ((i) & 0x1) /* Channel x Next Descriptor Interface */
98#define AT_XDMAC_CNDA_NDA(i) ((i) & 0xfffffffc) /* Channel x Next Descriptor Address */
99#define AT_XDMAC_CNDC 0x1C /* Channel Next Descriptor Control Register */
100#define AT_XDMAC_CNDC_NDE (0x1 << 0) /* Channel x Next Descriptor Enable */
101#define AT_XDMAC_CNDC_NDSUP (0x1 << 1) /* Channel x Next Descriptor Source Update */
102#define AT_XDMAC_CNDC_NDDUP (0x1 << 2) /* Channel x Next Descriptor Destination Update */
103#define AT_XDMAC_CNDC_NDVIEW_MASK GENMASK(28, 27)
104#define AT_XDMAC_CNDC_NDVIEW_NDV0 (0x0 << 3) /* Channel x Next Descriptor View 0 */
105#define AT_XDMAC_CNDC_NDVIEW_NDV1 (0x1 << 3) /* Channel x Next Descriptor View 1 */
106#define AT_XDMAC_CNDC_NDVIEW_NDV2 (0x2 << 3) /* Channel x Next Descriptor View 2 */
107#define AT_XDMAC_CNDC_NDVIEW_NDV3 (0x3 << 3) /* Channel x Next Descriptor View 3 */
108#define AT_XDMAC_CUBC 0x20 /* Channel Microblock Control Register */
109#define AT_XDMAC_CBC 0x24 /* Channel Block Control Register */
110#define AT_XDMAC_CC 0x28 /* Channel Configuration Register */
111#define AT_XDMAC_CC_TYPE (0x1 << 0) /* Channel Transfer Type */
112#define AT_XDMAC_CC_TYPE_MEM_TRAN (0x0 << 0) /* Memory to Memory Transfer */
113#define AT_XDMAC_CC_TYPE_PER_TRAN (0x1 << 0) /* Peripheral to Memory or Memory to Peripheral Transfer */
114#define AT_XDMAC_CC_MBSIZE_MASK (0x3 << 1)
115#define AT_XDMAC_CC_MBSIZE_SINGLE (0x0 << 1)
116#define AT_XDMAC_CC_MBSIZE_FOUR (0x1 << 1)
117#define AT_XDMAC_CC_MBSIZE_EIGHT (0x2 << 1)
118#define AT_XDMAC_CC_MBSIZE_SIXTEEN (0x3 << 1)
119#define AT_XDMAC_CC_DSYNC (0x1 << 4) /* Channel Synchronization */
120#define AT_XDMAC_CC_DSYNC_PER2MEM (0x0 << 4)
121#define AT_XDMAC_CC_DSYNC_MEM2PER (0x1 << 4)
122#define AT_XDMAC_CC_PROT (0x1 << 5) /* Channel Protection */
123#define AT_XDMAC_CC_PROT_SEC (0x0 << 5)
124#define AT_XDMAC_CC_PROT_UNSEC (0x1 << 5)
125#define AT_XDMAC_CC_SWREQ (0x1 << 6) /* Channel Software Request Trigger */
126#define AT_XDMAC_CC_SWREQ_HWR_CONNECTED (0x0 << 6)
127#define AT_XDMAC_CC_SWREQ_SWR_CONNECTED (0x1 << 6)
128#define AT_XDMAC_CC_MEMSET (0x1 << 7) /* Channel Fill Block of memory */
129#define AT_XDMAC_CC_MEMSET_NORMAL_MODE (0x0 << 7)
130#define AT_XDMAC_CC_MEMSET_HW_MODE (0x1 << 7)
131#define AT_XDMAC_CC_CSIZE(i) ((0x7 & (i)) << 8) /* Channel Chunk Size */
132#define AT_XDMAC_CC_DWIDTH_OFFSET 11
133#define AT_XDMAC_CC_DWIDTH_MASK (0x3 << AT_XDMAC_CC_DWIDTH_OFFSET)
134#define AT_XDMAC_CC_DWIDTH(i) ((0x3 & (i)) << AT_XDMAC_CC_DWIDTH_OFFSET) /* Channel Data Width */
135#define AT_XDMAC_CC_DWIDTH_BYTE 0x0
136#define AT_XDMAC_CC_DWIDTH_HALFWORD 0x1
137#define AT_XDMAC_CC_DWIDTH_WORD 0x2
138#define AT_XDMAC_CC_DWIDTH_DWORD 0x3
139#define AT_XDMAC_CC_SIF(i) ((0x1 & (i)) << 13) /* Channel Source Interface Identifier */
140#define AT_XDMAC_CC_DIF(i) ((0x1 & (i)) << 14) /* Channel Destination Interface Identifier */
141#define AT_XDMAC_CC_SAM_MASK (0x3 << 16) /* Channel Source Addressing Mode */
142#define AT_XDMAC_CC_SAM_FIXED_AM (0x0 << 16)
143#define AT_XDMAC_CC_SAM_INCREMENTED_AM (0x1 << 16)
144#define AT_XDMAC_CC_SAM_UBS_AM (0x2 << 16)
145#define AT_XDMAC_CC_SAM_UBS_DS_AM (0x3 << 16)
146#define AT_XDMAC_CC_DAM_MASK (0x3 << 18) /* Channel Source Addressing Mode */
147#define AT_XDMAC_CC_DAM_FIXED_AM (0x0 << 18)
148#define AT_XDMAC_CC_DAM_INCREMENTED_AM (0x1 << 18)
149#define AT_XDMAC_CC_DAM_UBS_AM (0x2 << 18)
150#define AT_XDMAC_CC_DAM_UBS_DS_AM (0x3 << 18)
151#define AT_XDMAC_CC_INITD (0x1 << 21) /* Channel Initialization Terminated (read only) */
152#define AT_XDMAC_CC_INITD_TERMINATED (0x0 << 21)
153#define AT_XDMAC_CC_INITD_IN_PROGRESS (0x1 << 21)
154#define AT_XDMAC_CC_RDIP (0x1 << 22) /* Read in Progress (read only) */
155#define AT_XDMAC_CC_RDIP_DONE (0x0 << 22)
156#define AT_XDMAC_CC_RDIP_IN_PROGRESS (0x1 << 22)
157#define AT_XDMAC_CC_WRIP (0x1 << 23) /* Write in Progress (read only) */
158#define AT_XDMAC_CC_WRIP_DONE (0x0 << 23)
159#define AT_XDMAC_CC_WRIP_IN_PROGRESS (0x1 << 23)
160#define AT_XDMAC_CC_PERID(i) ((0x7f & (i)) << 24) /* Channel Peripheral Identifier */
161#define AT_XDMAC_CDS_MSP 0x2C /* Channel Data Stride Memory Set Pattern */
162#define AT_XDMAC_CSUS 0x30 /* Channel Source Microblock Stride */
163#define AT_XDMAC_CDUS 0x34 /* Channel Destination Microblock Stride */
164
165/* Microblock control members */
166#define AT_XDMAC_MBR_UBC_UBLEN_MAX 0xFFFFFFUL /* Maximum Microblock Length */
167#define AT_XDMAC_MBR_UBC_NDE (0x1 << 24) /* Next Descriptor Enable */
168#define AT_XDMAC_MBR_UBC_NSEN (0x1 << 25) /* Next Descriptor Source Update */
169#define AT_XDMAC_MBR_UBC_NDEN (0x1 << 26) /* Next Descriptor Destination Update */
170#define AT_XDMAC_MBR_UBC_NDV0 (0x0 << 27) /* Next Descriptor View 0 */
171#define AT_XDMAC_MBR_UBC_NDV1 (0x1 << 27) /* Next Descriptor View 1 */
172#define AT_XDMAC_MBR_UBC_NDV2 (0x2 << 27) /* Next Descriptor View 2 */
173#define AT_XDMAC_MBR_UBC_NDV3 (0x3 << 27) /* Next Descriptor View 3 */
174
175#define AT_XDMAC_MAX_CHAN 0x20
176#define AT_XDMAC_MAX_CSIZE 16 /* 16 data */
177#define AT_XDMAC_MAX_DWIDTH 8 /* 64 bits */
178#define AT_XDMAC_RESIDUE_MAX_RETRIES 5
179
180#define AT_XDMAC_DMA_BUSWIDTHS\
181 (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
182 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\
183 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\
184 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |\
185 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
186
187enum atc_status {
188 AT_XDMAC_CHAN_IS_CYCLIC = 0,
189 AT_XDMAC_CHAN_IS_PAUSED,
190};
191
192struct at_xdmac_layout {
193 /* Global Channel Read Suspend Register */
194 u8 grs;
195 /* Global Write Suspend Register */
196 u8 gws;
197 /* Global Channel Read Write Suspend Register */
198 u8 grws;
199 /* Global Channel Read Write Resume Register */
200 u8 grwr;
201 /* Global Channel Software Request Register */
202 u8 gswr;
203 /* Global channel Software Request Status Register */
204 u8 gsws;
205 /* Global Channel Software Flush Request Register */
206 u8 gswf;
207 /* Channel reg base */
208 u8 chan_cc_reg_base;
209 /* Source/Destination Interface must be specified or not */
210 bool sdif;
211 /* AXI queue priority configuration supported */
212 bool axi_config;
213};
214
215/* ----- Channels ----- */
216struct at_xdmac_chan {
217 struct dma_chan chan;
218 void __iomem *ch_regs;
219 u32 mask; /* Channel Mask */
220 u32 cfg; /* Channel Configuration Register */
221 u8 perid; /* Peripheral ID */
222 u8 perif; /* Peripheral Interface */
223 u8 memif; /* Memory Interface */
224 u32 save_cc;
225 u32 save_cim;
226 u32 save_cnda;
227 u32 save_cndc;
228 u32 irq_status;
229 unsigned long status;
230 struct tasklet_struct tasklet;
231 struct dma_slave_config sconfig;
232
233 spinlock_t lock;
234
235 struct list_head xfers_list;
236 struct list_head free_descs_list;
237};
238
239
240/* ----- Controller ----- */
241struct at_xdmac {
242 struct dma_device dma;
243 void __iomem *regs;
244 struct device *dev;
245 int irq;
246 struct clk *clk;
247 u32 save_gim;
248 struct dma_pool *at_xdmac_desc_pool;
249 const struct at_xdmac_layout *layout;
250 struct at_xdmac_chan chan[];
251};
252
253
254/* ----- Descriptors ----- */
255
256/* Linked List Descriptor */
257struct at_xdmac_lld {
258 u32 mbr_nda; /* Next Descriptor Member */
259 u32 mbr_ubc; /* Microblock Control Member */
260 u32 mbr_sa; /* Source Address Member */
261 u32 mbr_da; /* Destination Address Member */
262 u32 mbr_cfg; /* Configuration Register */
263 u32 mbr_bc; /* Block Control Register */
264 u32 mbr_ds; /* Data Stride Register */
265 u32 mbr_sus; /* Source Microblock Stride Register */
266 u32 mbr_dus; /* Destination Microblock Stride Register */
267};
268
269/* 64-bit alignment needed to update CNDA and CUBC registers in an atomic way. */
270struct at_xdmac_desc {
271 struct at_xdmac_lld lld;
272 enum dma_transfer_direction direction;
273 struct dma_async_tx_descriptor tx_dma_desc;
274 struct list_head desc_node;
275 /* Following members are only used by the first descriptor */
276 bool active_xfer;
277 unsigned int xfer_size;
278 struct list_head descs_list;
279 struct list_head xfer_node;
280} __aligned(sizeof(u64));
281
282static const struct at_xdmac_layout at_xdmac_sama5d4_layout = {
283 .grs = 0x28,
284 .gws = 0x2C,
285 .grws = 0x30,
286 .grwr = 0x34,
287 .gswr = 0x38,
288 .gsws = 0x3C,
289 .gswf = 0x40,
290 .chan_cc_reg_base = 0x50,
291 .sdif = true,
292 .axi_config = false,
293};
294
295static const struct at_xdmac_layout at_xdmac_sama7g5_layout = {
296 .grs = 0x30,
297 .gws = 0x38,
298 .grws = 0x40,
299 .grwr = 0x44,
300 .gswr = 0x48,
301 .gsws = 0x4C,
302 .gswf = 0x50,
303 .chan_cc_reg_base = 0x60,
304 .sdif = false,
305 .axi_config = true,
306};
307
308static inline void __iomem *at_xdmac_chan_reg_base(struct at_xdmac *atxdmac, unsigned int chan_nb)
309{
310 return atxdmac->regs + (atxdmac->layout->chan_cc_reg_base + chan_nb * 0x40);
311}
312
313#define at_xdmac_read(atxdmac, reg) readl_relaxed((atxdmac)->regs + (reg))
314#define at_xdmac_write(atxdmac, reg, value) \
315 writel_relaxed((value), (atxdmac)->regs + (reg))
316
317#define at_xdmac_chan_read(atchan, reg) readl_relaxed((atchan)->ch_regs + (reg))
318#define at_xdmac_chan_write(atchan, reg, value) writel_relaxed((value), (atchan)->ch_regs + (reg))
319
320static inline struct at_xdmac_chan *to_at_xdmac_chan(struct dma_chan *dchan)
321{
322 return container_of(dchan, struct at_xdmac_chan, chan);
323}
324
325static struct device *chan2dev(struct dma_chan *chan)
326{
327 return &chan->dev->device;
328}
329
330static inline struct at_xdmac *to_at_xdmac(struct dma_device *ddev)
331{
332 return container_of(ddev, struct at_xdmac, dma);
333}
334
335static inline struct at_xdmac_desc *txd_to_at_desc(struct dma_async_tx_descriptor *txd)
336{
337 return container_of(txd, struct at_xdmac_desc, tx_dma_desc);
338}
339
340static inline int at_xdmac_chan_is_cyclic(struct at_xdmac_chan *atchan)
341{
342 return test_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
343}
344
345static inline int at_xdmac_chan_is_paused(struct at_xdmac_chan *atchan)
346{
347 return test_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
348}
349
350static inline bool at_xdmac_chan_is_peripheral_xfer(u32 cfg)
351{
352 return cfg & AT_XDMAC_CC_TYPE_PER_TRAN;
353}
354
355static inline u8 at_xdmac_get_dwidth(u32 cfg)
356{
357 return (cfg & AT_XDMAC_CC_DWIDTH_MASK) >> AT_XDMAC_CC_DWIDTH_OFFSET;
358};
359
360static unsigned int init_nr_desc_per_channel = 64;
361module_param(init_nr_desc_per_channel, uint, 0644);
362MODULE_PARM_DESC(init_nr_desc_per_channel,
363 "initial descriptors per channel (default: 64)");
364
365
366static void at_xdmac_runtime_suspend_descriptors(struct at_xdmac_chan *atchan)
367{
368 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
369 struct at_xdmac_desc *desc, *_desc;
370
371 list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node) {
372 if (!desc->active_xfer)
373 continue;
374
375 pm_runtime_mark_last_busy(atxdmac->dev);
376 pm_runtime_put_autosuspend(atxdmac->dev);
377 }
378}
379
380static int at_xdmac_runtime_resume_descriptors(struct at_xdmac_chan *atchan)
381{
382 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
383 struct at_xdmac_desc *desc, *_desc;
384 int ret;
385
386 list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node) {
387 if (!desc->active_xfer)
388 continue;
389
390 ret = pm_runtime_resume_and_get(atxdmac->dev);
391 if (ret < 0)
392 return ret;
393 }
394
395 return 0;
396}
397
398static bool at_xdmac_chan_is_enabled(struct at_xdmac_chan *atchan)
399{
400 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
401 int ret;
402
403 ret = pm_runtime_resume_and_get(atxdmac->dev);
404 if (ret < 0)
405 return false;
406
407 ret = !!(at_xdmac_chan_read(atchan, AT_XDMAC_GS) & atchan->mask);
408
409 pm_runtime_mark_last_busy(atxdmac->dev);
410 pm_runtime_put_autosuspend(atxdmac->dev);
411
412 return ret;
413}
414
415static void at_xdmac_off(struct at_xdmac *atxdmac)
416{
417 struct dma_chan *chan, *_chan;
418 struct at_xdmac_chan *atchan;
419 int ret;
420
421 ret = pm_runtime_resume_and_get(atxdmac->dev);
422 if (ret < 0)
423 return;
424
425 at_xdmac_write(atxdmac, AT_XDMAC_GD, -1L);
426
427 /* Wait that all chans are disabled. */
428 while (at_xdmac_read(atxdmac, AT_XDMAC_GS))
429 cpu_relax();
430
431 at_xdmac_write(atxdmac, AT_XDMAC_GID, -1L);
432
433 /* Decrement runtime PM ref counter for each active descriptor. */
434 if (!list_empty(&atxdmac->dma.channels)) {
435 list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels,
436 device_node) {
437 atchan = to_at_xdmac_chan(chan);
438 at_xdmac_runtime_suspend_descriptors(atchan);
439 }
440 }
441
442 pm_runtime_mark_last_busy(atxdmac->dev);
443 pm_runtime_put_autosuspend(atxdmac->dev);
444}
445
446/* Call with lock hold. */
447static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan,
448 struct at_xdmac_desc *first)
449{
450 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
451 u32 reg;
452 int ret;
453
454 ret = pm_runtime_resume_and_get(atxdmac->dev);
455 if (ret < 0)
456 return;
457
458 dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, first);
459
460 /* Set transfer as active to not try to start it again. */
461 first->active_xfer = true;
462
463 /* Tell xdmac where to get the first descriptor. */
464 reg = AT_XDMAC_CNDA_NDA(first->tx_dma_desc.phys);
465 if (atxdmac->layout->sdif)
466 reg |= AT_XDMAC_CNDA_NDAIF(atchan->memif);
467
468 at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, reg);
469
470 /*
471 * When doing non cyclic transfer we need to use the next
472 * descriptor view 2 since some fields of the configuration register
473 * depend on transfer size and src/dest addresses.
474 */
475 if (at_xdmac_chan_is_cyclic(atchan))
476 reg = AT_XDMAC_CNDC_NDVIEW_NDV1;
477 else if ((first->lld.mbr_ubc &
478 AT_XDMAC_CNDC_NDVIEW_MASK) == AT_XDMAC_MBR_UBC_NDV3)
479 reg = AT_XDMAC_CNDC_NDVIEW_NDV3;
480 else
481 reg = AT_XDMAC_CNDC_NDVIEW_NDV2;
482 /*
483 * Even if the register will be updated from the configuration in the
484 * descriptor when using view 2 or higher, the PROT bit won't be set
485 * properly. This bit can be modified only by using the channel
486 * configuration register.
487 */
488 at_xdmac_chan_write(atchan, AT_XDMAC_CC, first->lld.mbr_cfg);
489
490 reg |= AT_XDMAC_CNDC_NDDUP
491 | AT_XDMAC_CNDC_NDSUP
492 | AT_XDMAC_CNDC_NDE;
493 at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, reg);
494
495 dev_vdbg(chan2dev(&atchan->chan),
496 "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n",
497 __func__, at_xdmac_chan_read(atchan, AT_XDMAC_CC),
498 at_xdmac_chan_read(atchan, AT_XDMAC_CNDA),
499 at_xdmac_chan_read(atchan, AT_XDMAC_CNDC),
500 at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
501 at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
502 at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
503
504 at_xdmac_chan_write(atchan, AT_XDMAC_CID, 0xffffffff);
505 reg = AT_XDMAC_CIE_RBEIE | AT_XDMAC_CIE_WBEIE;
506 /*
507 * Request Overflow Error is only for peripheral synchronized transfers
508 */
509 if (at_xdmac_chan_is_peripheral_xfer(first->lld.mbr_cfg))
510 reg |= AT_XDMAC_CIE_ROIE;
511
512 /*
513 * There is no end of list when doing cyclic dma, we need to get
514 * an interrupt after each periods.
515 */
516 if (at_xdmac_chan_is_cyclic(atchan))
517 at_xdmac_chan_write(atchan, AT_XDMAC_CIE,
518 reg | AT_XDMAC_CIE_BIE);
519 else
520 at_xdmac_chan_write(atchan, AT_XDMAC_CIE,
521 reg | AT_XDMAC_CIE_LIE);
522 at_xdmac_write(atxdmac, AT_XDMAC_GIE, atchan->mask);
523 dev_vdbg(chan2dev(&atchan->chan),
524 "%s: enable channel (0x%08x)\n", __func__, atchan->mask);
525 wmb();
526 at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);
527
528 dev_vdbg(chan2dev(&atchan->chan),
529 "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n",
530 __func__, at_xdmac_chan_read(atchan, AT_XDMAC_CC),
531 at_xdmac_chan_read(atchan, AT_XDMAC_CNDA),
532 at_xdmac_chan_read(atchan, AT_XDMAC_CNDC),
533 at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
534 at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
535 at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
536}
537
538static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx)
539{
540 struct at_xdmac_desc *desc = txd_to_at_desc(tx);
541 struct at_xdmac_chan *atchan = to_at_xdmac_chan(tx->chan);
542 dma_cookie_t cookie;
543 unsigned long irqflags;
544
545 spin_lock_irqsave(&atchan->lock, irqflags);
546 cookie = dma_cookie_assign(tx);
547
548 list_add_tail(&desc->xfer_node, &atchan->xfers_list);
549 spin_unlock_irqrestore(&atchan->lock, irqflags);
550
551 dev_vdbg(chan2dev(tx->chan), "%s: atchan 0x%p, add desc 0x%p to xfers_list\n",
552 __func__, atchan, desc);
553
554 return cookie;
555}
556
557static struct at_xdmac_desc *at_xdmac_alloc_desc(struct dma_chan *chan,
558 gfp_t gfp_flags)
559{
560 struct at_xdmac_desc *desc;
561 struct at_xdmac *atxdmac = to_at_xdmac(chan->device);
562 dma_addr_t phys;
563
564 desc = dma_pool_zalloc(atxdmac->at_xdmac_desc_pool, gfp_flags, &phys);
565 if (desc) {
566 INIT_LIST_HEAD(&desc->descs_list);
567 dma_async_tx_descriptor_init(&desc->tx_dma_desc, chan);
568 desc->tx_dma_desc.tx_submit = at_xdmac_tx_submit;
569 desc->tx_dma_desc.phys = phys;
570 }
571
572 return desc;
573}
574
575static void at_xdmac_init_used_desc(struct at_xdmac_desc *desc)
576{
577 memset(&desc->lld, 0, sizeof(desc->lld));
578 INIT_LIST_HEAD(&desc->descs_list);
579 desc->direction = DMA_TRANS_NONE;
580 desc->xfer_size = 0;
581 desc->active_xfer = false;
582}
583
584/* Call must be protected by lock. */
585static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan)
586{
587 struct at_xdmac_desc *desc;
588
589 if (list_empty(&atchan->free_descs_list)) {
590 desc = at_xdmac_alloc_desc(&atchan->chan, GFP_NOWAIT);
591 } else {
592 desc = list_first_entry(&atchan->free_descs_list,
593 struct at_xdmac_desc, desc_node);
594 list_del(&desc->desc_node);
595 at_xdmac_init_used_desc(desc);
596 }
597
598 return desc;
599}
600
601static void at_xdmac_queue_desc(struct dma_chan *chan,
602 struct at_xdmac_desc *prev,
603 struct at_xdmac_desc *desc)
604{
605 if (!prev || !desc)
606 return;
607
608 prev->lld.mbr_nda = desc->tx_dma_desc.phys;
609 prev->lld.mbr_ubc |= AT_XDMAC_MBR_UBC_NDE;
610
611 dev_dbg(chan2dev(chan), "%s: chain lld: prev=0x%p, mbr_nda=%pad\n",
612 __func__, prev, &prev->lld.mbr_nda);
613}
614
615static inline void at_xdmac_increment_block_count(struct dma_chan *chan,
616 struct at_xdmac_desc *desc)
617{
618 if (!desc)
619 return;
620
621 desc->lld.mbr_bc++;
622
623 dev_dbg(chan2dev(chan),
624 "%s: incrementing the block count of the desc 0x%p\n",
625 __func__, desc);
626}
627
628static struct dma_chan *at_xdmac_xlate(struct of_phandle_args *dma_spec,
629 struct of_dma *of_dma)
630{
631 struct at_xdmac *atxdmac = of_dma->of_dma_data;
632 struct at_xdmac_chan *atchan;
633 struct dma_chan *chan;
634 struct device *dev = atxdmac->dma.dev;
635
636 if (dma_spec->args_count != 1) {
637 dev_err(dev, "dma phandler args: bad number of args\n");
638 return NULL;
639 }
640
641 chan = dma_get_any_slave_channel(&atxdmac->dma);
642 if (!chan) {
643 dev_err(dev, "can't get a dma channel\n");
644 return NULL;
645 }
646
647 atchan = to_at_xdmac_chan(chan);
648 atchan->memif = AT91_XDMAC_DT_GET_MEM_IF(dma_spec->args[0]);
649 atchan->perif = AT91_XDMAC_DT_GET_PER_IF(dma_spec->args[0]);
650 atchan->perid = AT91_XDMAC_DT_GET_PERID(dma_spec->args[0]);
651 dev_dbg(dev, "chan dt cfg: memif=%u perif=%u perid=%u\n",
652 atchan->memif, atchan->perif, atchan->perid);
653
654 return chan;
655}
656
657static int at_xdmac_compute_chan_conf(struct dma_chan *chan,
658 enum dma_transfer_direction direction)
659{
660 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
661 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
662 int csize, dwidth;
663
664 if (direction == DMA_DEV_TO_MEM) {
665 atchan->cfg =
666 AT91_XDMAC_DT_PERID(atchan->perid)
667 | AT_XDMAC_CC_DAM_INCREMENTED_AM
668 | AT_XDMAC_CC_SAM_FIXED_AM
669 | AT_XDMAC_CC_SWREQ_HWR_CONNECTED
670 | AT_XDMAC_CC_DSYNC_PER2MEM
671 | AT_XDMAC_CC_MBSIZE_SIXTEEN
672 | AT_XDMAC_CC_TYPE_PER_TRAN;
673 if (atxdmac->layout->sdif)
674 atchan->cfg |= AT_XDMAC_CC_DIF(atchan->memif) |
675 AT_XDMAC_CC_SIF(atchan->perif);
676
677 csize = ffs(atchan->sconfig.src_maxburst) - 1;
678 if (csize < 0) {
679 dev_err(chan2dev(chan), "invalid src maxburst value\n");
680 return -EINVAL;
681 }
682 atchan->cfg |= AT_XDMAC_CC_CSIZE(csize);
683 dwidth = ffs(atchan->sconfig.src_addr_width) - 1;
684 if (dwidth < 0) {
685 dev_err(chan2dev(chan), "invalid src addr width value\n");
686 return -EINVAL;
687 }
688 atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth);
689 } else if (direction == DMA_MEM_TO_DEV) {
690 atchan->cfg =
691 AT91_XDMAC_DT_PERID(atchan->perid)
692 | AT_XDMAC_CC_DAM_FIXED_AM
693 | AT_XDMAC_CC_SAM_INCREMENTED_AM
694 | AT_XDMAC_CC_SWREQ_HWR_CONNECTED
695 | AT_XDMAC_CC_DSYNC_MEM2PER
696 | AT_XDMAC_CC_MBSIZE_SIXTEEN
697 | AT_XDMAC_CC_TYPE_PER_TRAN;
698 if (atxdmac->layout->sdif)
699 atchan->cfg |= AT_XDMAC_CC_DIF(atchan->perif) |
700 AT_XDMAC_CC_SIF(atchan->memif);
701
702 csize = ffs(atchan->sconfig.dst_maxburst) - 1;
703 if (csize < 0) {
704 dev_err(chan2dev(chan), "invalid src maxburst value\n");
705 return -EINVAL;
706 }
707 atchan->cfg |= AT_XDMAC_CC_CSIZE(csize);
708 dwidth = ffs(atchan->sconfig.dst_addr_width) - 1;
709 if (dwidth < 0) {
710 dev_err(chan2dev(chan), "invalid dst addr width value\n");
711 return -EINVAL;
712 }
713 atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth);
714 }
715
716 dev_dbg(chan2dev(chan), "%s: cfg=0x%08x\n", __func__, atchan->cfg);
717
718 return 0;
719}
720
721/*
722 * Only check that maxburst and addr width values are supported by
723 * the controller but not that the configuration is good to perform the
724 * transfer since we don't know the direction at this stage.
725 */
726static int at_xdmac_check_slave_config(struct dma_slave_config *sconfig)
727{
728 if ((sconfig->src_maxburst > AT_XDMAC_MAX_CSIZE)
729 || (sconfig->dst_maxburst > AT_XDMAC_MAX_CSIZE))
730 return -EINVAL;
731
732 if ((sconfig->src_addr_width > AT_XDMAC_MAX_DWIDTH)
733 || (sconfig->dst_addr_width > AT_XDMAC_MAX_DWIDTH))
734 return -EINVAL;
735
736 return 0;
737}
738
739static int at_xdmac_set_slave_config(struct dma_chan *chan,
740 struct dma_slave_config *sconfig)
741{
742 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
743
744 if (at_xdmac_check_slave_config(sconfig)) {
745 dev_err(chan2dev(chan), "invalid slave configuration\n");
746 return -EINVAL;
747 }
748
749 memcpy(&atchan->sconfig, sconfig, sizeof(atchan->sconfig));
750
751 return 0;
752}
753
754static struct dma_async_tx_descriptor *
755at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
756 unsigned int sg_len, enum dma_transfer_direction direction,
757 unsigned long flags, void *context)
758{
759 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
760 struct at_xdmac_desc *first = NULL, *prev = NULL;
761 struct scatterlist *sg;
762 int i;
763 unsigned int xfer_size = 0;
764 unsigned long irqflags;
765 struct dma_async_tx_descriptor *ret = NULL;
766
767 if (!sgl)
768 return NULL;
769
770 if (!is_slave_direction(direction)) {
771 dev_err(chan2dev(chan), "invalid DMA direction\n");
772 return NULL;
773 }
774
775 dev_dbg(chan2dev(chan), "%s: sg_len=%d, dir=%s, flags=0x%lx\n",
776 __func__, sg_len,
777 direction == DMA_MEM_TO_DEV ? "to device" : "from device",
778 flags);
779
780 /* Protect dma_sconfig field that can be modified by set_slave_conf. */
781 spin_lock_irqsave(&atchan->lock, irqflags);
782
783 if (at_xdmac_compute_chan_conf(chan, direction))
784 goto spin_unlock;
785
786 /* Prepare descriptors. */
787 for_each_sg(sgl, sg, sg_len, i) {
788 struct at_xdmac_desc *desc = NULL;
789 u32 len, mem, dwidth, fixed_dwidth;
790
791 len = sg_dma_len(sg);
792 mem = sg_dma_address(sg);
793 if (unlikely(!len)) {
794 dev_err(chan2dev(chan), "sg data length is zero\n");
795 goto spin_unlock;
796 }
797 dev_dbg(chan2dev(chan), "%s: * sg%d len=%u, mem=0x%08x\n",
798 __func__, i, len, mem);
799
800 desc = at_xdmac_get_desc(atchan);
801 if (!desc) {
802 dev_err(chan2dev(chan), "can't get descriptor\n");
803 if (first)
804 list_splice_tail_init(&first->descs_list,
805 &atchan->free_descs_list);
806 goto spin_unlock;
807 }
808
809 /* Linked list descriptor setup. */
810 if (direction == DMA_DEV_TO_MEM) {
811 desc->lld.mbr_sa = atchan->sconfig.src_addr;
812 desc->lld.mbr_da = mem;
813 } else {
814 desc->lld.mbr_sa = mem;
815 desc->lld.mbr_da = atchan->sconfig.dst_addr;
816 }
817 dwidth = at_xdmac_get_dwidth(atchan->cfg);
818 fixed_dwidth = IS_ALIGNED(len, 1 << dwidth)
819 ? dwidth
820 : AT_XDMAC_CC_DWIDTH_BYTE;
821 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2 /* next descriptor view */
822 | AT_XDMAC_MBR_UBC_NDEN /* next descriptor dst parameter update */
823 | AT_XDMAC_MBR_UBC_NSEN /* next descriptor src parameter update */
824 | (len >> fixed_dwidth); /* microblock length */
825 desc->lld.mbr_cfg = (atchan->cfg & ~AT_XDMAC_CC_DWIDTH_MASK) |
826 AT_XDMAC_CC_DWIDTH(fixed_dwidth);
827 dev_dbg(chan2dev(chan),
828 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
829 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc);
830
831 /* Chain lld. */
832 if (prev)
833 at_xdmac_queue_desc(chan, prev, desc);
834
835 prev = desc;
836 if (!first)
837 first = desc;
838
839 dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
840 __func__, desc, first);
841 list_add_tail(&desc->desc_node, &first->descs_list);
842 xfer_size += len;
843 }
844
845
846 first->tx_dma_desc.flags = flags;
847 first->xfer_size = xfer_size;
848 first->direction = direction;
849 ret = &first->tx_dma_desc;
850
851spin_unlock:
852 spin_unlock_irqrestore(&atchan->lock, irqflags);
853 return ret;
854}
855
856static struct dma_async_tx_descriptor *
857at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
858 size_t buf_len, size_t period_len,
859 enum dma_transfer_direction direction,
860 unsigned long flags)
861{
862 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
863 struct at_xdmac_desc *first = NULL, *prev = NULL;
864 unsigned int periods = buf_len / period_len;
865 int i;
866 unsigned long irqflags;
867
868 dev_dbg(chan2dev(chan), "%s: buf_addr=%pad, buf_len=%zd, period_len=%zd, dir=%s, flags=0x%lx\n",
869 __func__, &buf_addr, buf_len, period_len,
870 direction == DMA_MEM_TO_DEV ? "mem2per" : "per2mem", flags);
871
872 if (!is_slave_direction(direction)) {
873 dev_err(chan2dev(chan), "invalid DMA direction\n");
874 return NULL;
875 }
876
877 if (test_and_set_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status)) {
878 dev_err(chan2dev(chan), "channel currently used\n");
879 return NULL;
880 }
881
882 if (at_xdmac_compute_chan_conf(chan, direction))
883 return NULL;
884
885 for (i = 0; i < periods; i++) {
886 struct at_xdmac_desc *desc = NULL;
887
888 spin_lock_irqsave(&atchan->lock, irqflags);
889 desc = at_xdmac_get_desc(atchan);
890 if (!desc) {
891 dev_err(chan2dev(chan), "can't get descriptor\n");
892 if (first)
893 list_splice_tail_init(&first->descs_list,
894 &atchan->free_descs_list);
895 spin_unlock_irqrestore(&atchan->lock, irqflags);
896 return NULL;
897 }
898 spin_unlock_irqrestore(&atchan->lock, irqflags);
899 dev_dbg(chan2dev(chan),
900 "%s: desc=0x%p, tx_dma_desc.phys=%pad\n",
901 __func__, desc, &desc->tx_dma_desc.phys);
902
903 if (direction == DMA_DEV_TO_MEM) {
904 desc->lld.mbr_sa = atchan->sconfig.src_addr;
905 desc->lld.mbr_da = buf_addr + i * period_len;
906 } else {
907 desc->lld.mbr_sa = buf_addr + i * period_len;
908 desc->lld.mbr_da = atchan->sconfig.dst_addr;
909 }
910 desc->lld.mbr_cfg = atchan->cfg;
911 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1
912 | AT_XDMAC_MBR_UBC_NDEN
913 | AT_XDMAC_MBR_UBC_NSEN
914 | period_len >> at_xdmac_get_dwidth(desc->lld.mbr_cfg);
915
916 dev_dbg(chan2dev(chan),
917 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
918 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc);
919
920 /* Chain lld. */
921 if (prev)
922 at_xdmac_queue_desc(chan, prev, desc);
923
924 prev = desc;
925 if (!first)
926 first = desc;
927
928 dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
929 __func__, desc, first);
930 list_add_tail(&desc->desc_node, &first->descs_list);
931 }
932
933 at_xdmac_queue_desc(chan, prev, first);
934 first->tx_dma_desc.flags = flags;
935 first->xfer_size = buf_len;
936 first->direction = direction;
937
938 return &first->tx_dma_desc;
939}
940
941static inline u32 at_xdmac_align_width(struct dma_chan *chan, dma_addr_t addr)
942{
943 u32 width;
944
945 /*
946 * Check address alignment to select the greater data width we
947 * can use.
948 *
949 * Some XDMAC implementations don't provide dword transfer, in
950 * this case selecting dword has the same behavior as
951 * selecting word transfers.
952 */
953 if (!(addr & 7)) {
954 width = AT_XDMAC_CC_DWIDTH_DWORD;
955 dev_dbg(chan2dev(chan), "%s: dwidth: double word\n", __func__);
956 } else if (!(addr & 3)) {
957 width = AT_XDMAC_CC_DWIDTH_WORD;
958 dev_dbg(chan2dev(chan), "%s: dwidth: word\n", __func__);
959 } else if (!(addr & 1)) {
960 width = AT_XDMAC_CC_DWIDTH_HALFWORD;
961 dev_dbg(chan2dev(chan), "%s: dwidth: half word\n", __func__);
962 } else {
963 width = AT_XDMAC_CC_DWIDTH_BYTE;
964 dev_dbg(chan2dev(chan), "%s: dwidth: byte\n", __func__);
965 }
966
967 return width;
968}
969
970static struct at_xdmac_desc *
971at_xdmac_interleaved_queue_desc(struct dma_chan *chan,
972 struct at_xdmac_chan *atchan,
973 struct at_xdmac_desc *prev,
974 dma_addr_t src, dma_addr_t dst,
975 struct dma_interleaved_template *xt,
976 struct data_chunk *chunk)
977{
978 struct at_xdmac_desc *desc;
979 u32 dwidth;
980 unsigned long flags;
981 size_t ublen;
982 /*
983 * WARNING: The channel configuration is set here since there is no
984 * dmaengine_slave_config call in this case. Moreover we don't know the
985 * direction, it involves we can't dynamically set the source and dest
986 * interface so we have to use the same one. Only interface 0 allows EBI
987 * access. Hopefully we can access DDR through both ports (at least on
988 * SAMA5D4x), so we can use the same interface for source and dest,
989 * that solves the fact we don't know the direction.
990 * ERRATA: Even if useless for memory transfers, the PERID has to not
991 * match the one of another channel. If not, it could lead to spurious
992 * flag status.
993 * For SAMA7G5x case, the SIF and DIF fields are no longer used.
994 * Thus, no need to have the SIF/DIF interfaces here.
995 * For SAMA5D4x and SAMA5D2x the SIF and DIF are already configured as
996 * zero.
997 */
998 u32 chan_cc = AT_XDMAC_CC_PERID(0x7f)
999 | AT_XDMAC_CC_MBSIZE_SIXTEEN
1000 | AT_XDMAC_CC_TYPE_MEM_TRAN;
1001
1002 dwidth = at_xdmac_align_width(chan, src | dst | chunk->size);
1003 if (chunk->size >= (AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)) {
1004 dev_dbg(chan2dev(chan),
1005 "%s: chunk too big (%zu, max size %lu)...\n",
1006 __func__, chunk->size,
1007 AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth);
1008 return NULL;
1009 }
1010
1011 if (prev)
1012 dev_dbg(chan2dev(chan),
1013 "Adding items at the end of desc 0x%p\n", prev);
1014
1015 if (xt->src_inc) {
1016 if (xt->src_sgl)
1017 chan_cc |= AT_XDMAC_CC_SAM_UBS_AM;
1018 else
1019 chan_cc |= AT_XDMAC_CC_SAM_INCREMENTED_AM;
1020 }
1021
1022 if (xt->dst_inc) {
1023 if (xt->dst_sgl)
1024 chan_cc |= AT_XDMAC_CC_DAM_UBS_AM;
1025 else
1026 chan_cc |= AT_XDMAC_CC_DAM_INCREMENTED_AM;
1027 }
1028
1029 spin_lock_irqsave(&atchan->lock, flags);
1030 desc = at_xdmac_get_desc(atchan);
1031 spin_unlock_irqrestore(&atchan->lock, flags);
1032 if (!desc) {
1033 dev_err(chan2dev(chan), "can't get descriptor\n");
1034 return NULL;
1035 }
1036
1037 chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);
1038
1039 ublen = chunk->size >> dwidth;
1040
1041 desc->lld.mbr_sa = src;
1042 desc->lld.mbr_da = dst;
1043 desc->lld.mbr_sus = dmaengine_get_src_icg(xt, chunk);
1044 desc->lld.mbr_dus = dmaengine_get_dst_icg(xt, chunk);
1045
1046 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV3
1047 | AT_XDMAC_MBR_UBC_NDEN
1048 | AT_XDMAC_MBR_UBC_NSEN
1049 | ublen;
1050 desc->lld.mbr_cfg = chan_cc;
1051
1052 dev_dbg(chan2dev(chan),
1053 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
1054 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da,
1055 desc->lld.mbr_ubc, desc->lld.mbr_cfg);
1056
1057 /* Chain lld. */
1058 if (prev)
1059 at_xdmac_queue_desc(chan, prev, desc);
1060
1061 return desc;
1062}
1063
1064static struct dma_async_tx_descriptor *
1065at_xdmac_prep_interleaved(struct dma_chan *chan,
1066 struct dma_interleaved_template *xt,
1067 unsigned long flags)
1068{
1069 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1070 struct at_xdmac_desc *prev = NULL, *first = NULL;
1071 dma_addr_t dst_addr, src_addr;
1072 size_t src_skip = 0, dst_skip = 0, len = 0;
1073 struct data_chunk *chunk;
1074 int i;
1075
1076 if (!xt || !xt->numf || (xt->dir != DMA_MEM_TO_MEM))
1077 return NULL;
1078
1079 /*
1080 * TODO: Handle the case where we have to repeat a chain of
1081 * descriptors...
1082 */
1083 if ((xt->numf > 1) && (xt->frame_size > 1))
1084 return NULL;
1085
1086 dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, numf=%zu, frame_size=%zu, flags=0x%lx\n",
1087 __func__, &xt->src_start, &xt->dst_start, xt->numf,
1088 xt->frame_size, flags);
1089
1090 src_addr = xt->src_start;
1091 dst_addr = xt->dst_start;
1092
1093 if (xt->numf > 1) {
1094 first = at_xdmac_interleaved_queue_desc(chan, atchan,
1095 NULL,
1096 src_addr, dst_addr,
1097 xt, xt->sgl);
1098
1099 /* Length of the block is (BLEN+1) microblocks. */
1100 for (i = 0; i < xt->numf - 1; i++)
1101 at_xdmac_increment_block_count(chan, first);
1102
1103 dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
1104 __func__, first, first);
1105 list_add_tail(&first->desc_node, &first->descs_list);
1106 } else {
1107 for (i = 0; i < xt->frame_size; i++) {
1108 size_t src_icg = 0, dst_icg = 0;
1109 struct at_xdmac_desc *desc;
1110
1111 chunk = xt->sgl + i;
1112
1113 dst_icg = dmaengine_get_dst_icg(xt, chunk);
1114 src_icg = dmaengine_get_src_icg(xt, chunk);
1115
1116 src_skip = chunk->size + src_icg;
1117 dst_skip = chunk->size + dst_icg;
1118
1119 dev_dbg(chan2dev(chan),
1120 "%s: chunk size=%zu, src icg=%zu, dst icg=%zu\n",
1121 __func__, chunk->size, src_icg, dst_icg);
1122
1123 desc = at_xdmac_interleaved_queue_desc(chan, atchan,
1124 prev,
1125 src_addr, dst_addr,
1126 xt, chunk);
1127 if (!desc) {
1128 list_splice_tail_init(&first->descs_list,
1129 &atchan->free_descs_list);
1130 return NULL;
1131 }
1132
1133 if (!first)
1134 first = desc;
1135
1136 dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
1137 __func__, desc, first);
1138 list_add_tail(&desc->desc_node, &first->descs_list);
1139
1140 if (xt->src_sgl)
1141 src_addr += src_skip;
1142
1143 if (xt->dst_sgl)
1144 dst_addr += dst_skip;
1145
1146 len += chunk->size;
1147 prev = desc;
1148 }
1149 }
1150
1151 first->tx_dma_desc.cookie = -EBUSY;
1152 first->tx_dma_desc.flags = flags;
1153 first->xfer_size = len;
1154
1155 return &first->tx_dma_desc;
1156}
1157
1158static struct dma_async_tx_descriptor *
1159at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
1160 size_t len, unsigned long flags)
1161{
1162 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1163 struct at_xdmac_desc *first = NULL, *prev = NULL;
1164 size_t remaining_size = len, xfer_size = 0, ublen;
1165 dma_addr_t src_addr = src, dst_addr = dest;
1166 u32 dwidth;
1167 /*
1168 * WARNING: We don't know the direction, it involves we can't
1169 * dynamically set the source and dest interface so we have to use the
1170 * same one. Only interface 0 allows EBI access. Hopefully we can
1171 * access DDR through both ports (at least on SAMA5D4x), so we can use
1172 * the same interface for source and dest, that solves the fact we
1173 * don't know the direction.
1174 * ERRATA: Even if useless for memory transfers, the PERID has to not
1175 * match the one of another channel. If not, it could lead to spurious
1176 * flag status.
1177 * For SAMA7G5x case, the SIF and DIF fields are no longer used.
1178 * Thus, no need to have the SIF/DIF interfaces here.
1179 * For SAMA5D4x and SAMA5D2x the SIF and DIF are already configured as
1180 * zero.
1181 */
1182 u32 chan_cc = AT_XDMAC_CC_PERID(0x7f)
1183 | AT_XDMAC_CC_DAM_INCREMENTED_AM
1184 | AT_XDMAC_CC_SAM_INCREMENTED_AM
1185 | AT_XDMAC_CC_MBSIZE_SIXTEEN
1186 | AT_XDMAC_CC_TYPE_MEM_TRAN;
1187 unsigned long irqflags;
1188
1189 dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, len=%zd, flags=0x%lx\n",
1190 __func__, &src, &dest, len, flags);
1191
1192 if (unlikely(!len))
1193 return NULL;
1194
1195 dwidth = at_xdmac_align_width(chan, src_addr | dst_addr);
1196
1197 /* Prepare descriptors. */
1198 while (remaining_size) {
1199 struct at_xdmac_desc *desc = NULL;
1200
1201 dev_dbg(chan2dev(chan), "%s: remaining_size=%zu\n", __func__, remaining_size);
1202
1203 spin_lock_irqsave(&atchan->lock, irqflags);
1204 desc = at_xdmac_get_desc(atchan);
1205 spin_unlock_irqrestore(&atchan->lock, irqflags);
1206 if (!desc) {
1207 dev_err(chan2dev(chan), "can't get descriptor\n");
1208 if (first)
1209 list_splice_tail_init(&first->descs_list,
1210 &atchan->free_descs_list);
1211 return NULL;
1212 }
1213
1214 /* Update src and dest addresses. */
1215 src_addr += xfer_size;
1216 dst_addr += xfer_size;
1217
1218 if (remaining_size >= AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)
1219 xfer_size = AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth;
1220 else
1221 xfer_size = remaining_size;
1222
1223 dev_dbg(chan2dev(chan), "%s: xfer_size=%zu\n", __func__, xfer_size);
1224
1225 /* Check remaining length and change data width if needed. */
1226 dwidth = at_xdmac_align_width(chan,
1227 src_addr | dst_addr | xfer_size);
1228 chan_cc &= ~AT_XDMAC_CC_DWIDTH_MASK;
1229 chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);
1230
1231 ublen = xfer_size >> dwidth;
1232 remaining_size -= xfer_size;
1233
1234 desc->lld.mbr_sa = src_addr;
1235 desc->lld.mbr_da = dst_addr;
1236 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2
1237 | AT_XDMAC_MBR_UBC_NDEN
1238 | AT_XDMAC_MBR_UBC_NSEN
1239 | ublen;
1240 desc->lld.mbr_cfg = chan_cc;
1241
1242 dev_dbg(chan2dev(chan),
1243 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
1244 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc, desc->lld.mbr_cfg);
1245
1246 /* Chain lld. */
1247 if (prev)
1248 at_xdmac_queue_desc(chan, prev, desc);
1249
1250 prev = desc;
1251 if (!first)
1252 first = desc;
1253
1254 dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
1255 __func__, desc, first);
1256 list_add_tail(&desc->desc_node, &first->descs_list);
1257 }
1258
1259 first->tx_dma_desc.flags = flags;
1260 first->xfer_size = len;
1261
1262 return &first->tx_dma_desc;
1263}
1264
1265static struct at_xdmac_desc *at_xdmac_memset_create_desc(struct dma_chan *chan,
1266 struct at_xdmac_chan *atchan,
1267 dma_addr_t dst_addr,
1268 size_t len,
1269 int value)
1270{
1271 struct at_xdmac_desc *desc;
1272 unsigned long flags;
1273 size_t ublen;
1274 u32 dwidth;
1275 char pattern;
1276 /*
1277 * WARNING: The channel configuration is set here since there is no
1278 * dmaengine_slave_config call in this case. Moreover we don't know the
1279 * direction, it involves we can't dynamically set the source and dest
1280 * interface so we have to use the same one. Only interface 0 allows EBI
1281 * access. Hopefully we can access DDR through both ports (at least on
1282 * SAMA5D4x), so we can use the same interface for source and dest,
1283 * that solves the fact we don't know the direction.
1284 * ERRATA: Even if useless for memory transfers, the PERID has to not
1285 * match the one of another channel. If not, it could lead to spurious
1286 * flag status.
1287 * For SAMA7G5x case, the SIF and DIF fields are no longer used.
1288 * Thus, no need to have the SIF/DIF interfaces here.
1289 * For SAMA5D4x and SAMA5D2x the SIF and DIF are already configured as
1290 * zero.
1291 */
1292 u32 chan_cc = AT_XDMAC_CC_PERID(0x7f)
1293 | AT_XDMAC_CC_DAM_UBS_AM
1294 | AT_XDMAC_CC_SAM_INCREMENTED_AM
1295 | AT_XDMAC_CC_MBSIZE_SIXTEEN
1296 | AT_XDMAC_CC_MEMSET_HW_MODE
1297 | AT_XDMAC_CC_TYPE_MEM_TRAN;
1298
1299 dwidth = at_xdmac_align_width(chan, dst_addr);
1300
1301 if (len >= (AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)) {
1302 dev_err(chan2dev(chan),
1303 "%s: Transfer too large, aborting...\n",
1304 __func__);
1305 return NULL;
1306 }
1307
1308 spin_lock_irqsave(&atchan->lock, flags);
1309 desc = at_xdmac_get_desc(atchan);
1310 spin_unlock_irqrestore(&atchan->lock, flags);
1311 if (!desc) {
1312 dev_err(chan2dev(chan), "can't get descriptor\n");
1313 return NULL;
1314 }
1315
1316 chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);
1317
1318 /* Only the first byte of value is to be used according to dmaengine */
1319 pattern = (char)value;
1320
1321 ublen = len >> dwidth;
1322
1323 desc->lld.mbr_da = dst_addr;
1324 desc->lld.mbr_ds = (pattern << 24) |
1325 (pattern << 16) |
1326 (pattern << 8) |
1327 pattern;
1328 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV3
1329 | AT_XDMAC_MBR_UBC_NDEN
1330 | AT_XDMAC_MBR_UBC_NSEN
1331 | ublen;
1332 desc->lld.mbr_cfg = chan_cc;
1333
1334 dev_dbg(chan2dev(chan),
1335 "%s: lld: mbr_da=%pad, mbr_ds=0x%08x, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
1336 __func__, &desc->lld.mbr_da, desc->lld.mbr_ds, desc->lld.mbr_ubc,
1337 desc->lld.mbr_cfg);
1338
1339 return desc;
1340}
1341
1342static struct dma_async_tx_descriptor *
1343at_xdmac_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
1344 size_t len, unsigned long flags)
1345{
1346 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1347 struct at_xdmac_desc *desc;
1348
1349 dev_dbg(chan2dev(chan), "%s: dest=%pad, len=%zu, pattern=0x%x, flags=0x%lx\n",
1350 __func__, &dest, len, value, flags);
1351
1352 if (unlikely(!len))
1353 return NULL;
1354
1355 desc = at_xdmac_memset_create_desc(chan, atchan, dest, len, value);
1356 list_add_tail(&desc->desc_node, &desc->descs_list);
1357
1358 desc->tx_dma_desc.cookie = -EBUSY;
1359 desc->tx_dma_desc.flags = flags;
1360 desc->xfer_size = len;
1361
1362 return &desc->tx_dma_desc;
1363}
1364
1365static struct dma_async_tx_descriptor *
1366at_xdmac_prep_dma_memset_sg(struct dma_chan *chan, struct scatterlist *sgl,
1367 unsigned int sg_len, int value,
1368 unsigned long flags)
1369{
1370 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1371 struct at_xdmac_desc *desc, *pdesc = NULL,
1372 *ppdesc = NULL, *first = NULL;
1373 struct scatterlist *sg, *psg = NULL, *ppsg = NULL;
1374 size_t stride = 0, pstride = 0, len = 0;
1375 int i;
1376
1377 if (!sgl)
1378 return NULL;
1379
1380 dev_dbg(chan2dev(chan), "%s: sg_len=%d, value=0x%x, flags=0x%lx\n",
1381 __func__, sg_len, value, flags);
1382
1383 /* Prepare descriptors. */
1384 for_each_sg(sgl, sg, sg_len, i) {
1385 dev_dbg(chan2dev(chan), "%s: dest=%pad, len=%d, pattern=0x%x, flags=0x%lx\n",
1386 __func__, &sg_dma_address(sg), sg_dma_len(sg),
1387 value, flags);
1388 desc = at_xdmac_memset_create_desc(chan, atchan,
1389 sg_dma_address(sg),
1390 sg_dma_len(sg),
1391 value);
1392 if (!desc && first)
1393 list_splice_tail_init(&first->descs_list,
1394 &atchan->free_descs_list);
1395
1396 if (!first)
1397 first = desc;
1398
1399 /* Update our strides */
1400 pstride = stride;
1401 if (psg)
1402 stride = sg_dma_address(sg) -
1403 (sg_dma_address(psg) + sg_dma_len(psg));
1404
1405 /*
1406 * The scatterlist API gives us only the address and
1407 * length of each elements.
1408 *
1409 * Unfortunately, we don't have the stride, which we
1410 * will need to compute.
1411 *
1412 * That make us end up in a situation like this one:
1413 * len stride len stride len
1414 * +-------+ +-------+ +-------+
1415 * | N-2 | | N-1 | | N |
1416 * +-------+ +-------+ +-------+
1417 *
1418 * We need all these three elements (N-2, N-1 and N)
1419 * to actually take the decision on whether we need to
1420 * queue N-1 or reuse N-2.
1421 *
1422 * We will only consider N if it is the last element.
1423 */
1424 if (ppdesc && pdesc) {
1425 if ((stride == pstride) &&
1426 (sg_dma_len(ppsg) == sg_dma_len(psg))) {
1427 dev_dbg(chan2dev(chan),
1428 "%s: desc 0x%p can be merged with desc 0x%p\n",
1429 __func__, pdesc, ppdesc);
1430
1431 /*
1432 * Increment the block count of the
1433 * N-2 descriptor
1434 */
1435 at_xdmac_increment_block_count(chan, ppdesc);
1436 ppdesc->lld.mbr_dus = stride;
1437
1438 /*
1439 * Put back the N-1 descriptor in the
1440 * free descriptor list
1441 */
1442 list_add_tail(&pdesc->desc_node,
1443 &atchan->free_descs_list);
1444
1445 /*
1446 * Make our N-1 descriptor pointer
1447 * point to the N-2 since they were
1448 * actually merged.
1449 */
1450 pdesc = ppdesc;
1451
1452 /*
1453 * Rule out the case where we don't have
1454 * pstride computed yet (our second sg
1455 * element)
1456 *
1457 * We also want to catch the case where there
1458 * would be a negative stride,
1459 */
1460 } else if (pstride ||
1461 sg_dma_address(sg) < sg_dma_address(psg)) {
1462 /*
1463 * Queue the N-1 descriptor after the
1464 * N-2
1465 */
1466 at_xdmac_queue_desc(chan, ppdesc, pdesc);
1467
1468 /*
1469 * Add the N-1 descriptor to the list
1470 * of the descriptors used for this
1471 * transfer
1472 */
1473 list_add_tail(&desc->desc_node,
1474 &first->descs_list);
1475 dev_dbg(chan2dev(chan),
1476 "%s: add desc 0x%p to descs_list 0x%p\n",
1477 __func__, desc, first);
1478 }
1479 }
1480
1481 /*
1482 * If we are the last element, just see if we have the
1483 * same size than the previous element.
1484 *
1485 * If so, we can merge it with the previous descriptor
1486 * since we don't care about the stride anymore.
1487 */
1488 if ((i == (sg_len - 1)) &&
1489 sg_dma_len(psg) == sg_dma_len(sg)) {
1490 dev_dbg(chan2dev(chan),
1491 "%s: desc 0x%p can be merged with desc 0x%p\n",
1492 __func__, desc, pdesc);
1493
1494 /*
1495 * Increment the block count of the N-1
1496 * descriptor
1497 */
1498 at_xdmac_increment_block_count(chan, pdesc);
1499 pdesc->lld.mbr_dus = stride;
1500
1501 /*
1502 * Put back the N descriptor in the free
1503 * descriptor list
1504 */
1505 list_add_tail(&desc->desc_node,
1506 &atchan->free_descs_list);
1507 }
1508
1509 /* Update our descriptors */
1510 ppdesc = pdesc;
1511 pdesc = desc;
1512
1513 /* Update our scatter pointers */
1514 ppsg = psg;
1515 psg = sg;
1516
1517 len += sg_dma_len(sg);
1518 }
1519
1520 first->tx_dma_desc.cookie = -EBUSY;
1521 first->tx_dma_desc.flags = flags;
1522 first->xfer_size = len;
1523
1524 return &first->tx_dma_desc;
1525}
1526
1527static enum dma_status
1528at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
1529 struct dma_tx_state *txstate)
1530{
1531 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1532 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1533 struct at_xdmac_desc *desc, *_desc, *iter;
1534 struct list_head *descs_list;
1535 enum dma_status ret;
1536 int residue, retry, pm_status;
1537 u32 cur_nda, check_nda, cur_ubc, mask, value;
1538 u8 dwidth = 0;
1539 unsigned long flags;
1540 bool initd;
1541
1542 ret = dma_cookie_status(chan, cookie, txstate);
1543 if (ret == DMA_COMPLETE || !txstate)
1544 return ret;
1545
1546 pm_status = pm_runtime_resume_and_get(atxdmac->dev);
1547 if (pm_status < 0)
1548 return DMA_ERROR;
1549
1550 spin_lock_irqsave(&atchan->lock, flags);
1551
1552 desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node);
1553
1554 /*
1555 * If the transfer has not been started yet, don't need to compute the
1556 * residue, it's the transfer length.
1557 */
1558 if (!desc->active_xfer) {
1559 dma_set_residue(txstate, desc->xfer_size);
1560 goto spin_unlock;
1561 }
1562
1563 residue = desc->xfer_size;
1564 /*
1565 * Flush FIFO: only relevant when the transfer is source peripheral
1566 * synchronized. Flush is needed before reading CUBC because data in
1567 * the FIFO are not reported by CUBC. Reporting a residue of the
1568 * transfer length while we have data in FIFO can cause issue.
1569 * Usecase: atmel USART has a timeout which means I have received
1570 * characters but there is no more character received for a while. On
1571 * timeout, it requests the residue. If the data are in the DMA FIFO,
1572 * we will return a residue of the transfer length. It means no data
1573 * received. If an application is waiting for these data, it will hang
1574 * since we won't have another USART timeout without receiving new
1575 * data.
1576 */
1577 mask = AT_XDMAC_CC_TYPE | AT_XDMAC_CC_DSYNC;
1578 value = AT_XDMAC_CC_TYPE_PER_TRAN | AT_XDMAC_CC_DSYNC_PER2MEM;
1579 if ((desc->lld.mbr_cfg & mask) == value) {
1580 at_xdmac_write(atxdmac, atxdmac->layout->gswf, atchan->mask);
1581 while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS))
1582 cpu_relax();
1583 }
1584
1585 /*
1586 * The easiest way to compute the residue should be to pause the DMA
1587 * but doing this can lead to miss some data as some devices don't
1588 * have FIFO.
1589 * We need to read several registers because:
1590 * - DMA is running therefore a descriptor change is possible while
1591 * reading these registers
1592 * - When the block transfer is done, the value of the CUBC register
1593 * is set to its initial value until the fetch of the next descriptor.
1594 * This value will corrupt the residue calculation so we have to skip
1595 * it.
1596 *
1597 * INITD -------- ------------
1598 * |____________________|
1599 * _______________________ _______________
1600 * NDA @desc2 \/ @desc3
1601 * _______________________/\_______________
1602 * __________ ___________ _______________
1603 * CUBC 0 \/ MAX desc1 \/ MAX desc2
1604 * __________/\___________/\_______________
1605 *
1606 * Since descriptors are aligned on 64 bits, we can assume that
1607 * the update of NDA and CUBC is atomic.
1608 * Memory barriers are used to ensure the read order of the registers.
1609 * A max number of retries is set because unlikely it could never ends.
1610 */
1611 for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) {
1612 check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
1613 rmb();
1614 cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
1615 rmb();
1616 initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & AT_XDMAC_CC_INITD);
1617 rmb();
1618 cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
1619 rmb();
1620
1621 if ((check_nda == cur_nda) && initd)
1622 break;
1623 }
1624
1625 if (unlikely(retry >= AT_XDMAC_RESIDUE_MAX_RETRIES)) {
1626 ret = DMA_ERROR;
1627 goto spin_unlock;
1628 }
1629
1630 /*
1631 * Flush FIFO: only relevant when the transfer is source peripheral
1632 * synchronized. Another flush is needed here because CUBC is updated
1633 * when the controller sends the data write command. It can lead to
1634 * report data that are not written in the memory or the device. The
1635 * FIFO flush ensures that data are really written.
1636 */
1637 if ((desc->lld.mbr_cfg & mask) == value) {
1638 at_xdmac_write(atxdmac, atxdmac->layout->gswf, atchan->mask);
1639 while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS))
1640 cpu_relax();
1641 }
1642
1643 /*
1644 * Remove size of all microblocks already transferred and the current
1645 * one. Then add the remaining size to transfer of the current
1646 * microblock.
1647 */
1648 descs_list = &desc->descs_list;
1649 list_for_each_entry_safe(iter, _desc, descs_list, desc_node) {
1650 dwidth = at_xdmac_get_dwidth(iter->lld.mbr_cfg);
1651 residue -= (iter->lld.mbr_ubc & 0xffffff) << dwidth;
1652 if ((iter->lld.mbr_nda & 0xfffffffc) == cur_nda) {
1653 desc = iter;
1654 break;
1655 }
1656 }
1657 residue += cur_ubc << dwidth;
1658
1659 dma_set_residue(txstate, residue);
1660
1661 dev_dbg(chan2dev(chan),
1662 "%s: desc=0x%p, tx_dma_desc.phys=%pad, tx_status=%d, cookie=%d, residue=%d\n",
1663 __func__, desc, &desc->tx_dma_desc.phys, ret, cookie, residue);
1664
1665spin_unlock:
1666 spin_unlock_irqrestore(&atchan->lock, flags);
1667 pm_runtime_mark_last_busy(atxdmac->dev);
1668 pm_runtime_put_autosuspend(atxdmac->dev);
1669 return ret;
1670}
1671
1672static void at_xdmac_advance_work(struct at_xdmac_chan *atchan)
1673{
1674 struct at_xdmac_desc *desc;
1675
1676 /*
1677 * If channel is enabled, do nothing, advance_work will be triggered
1678 * after the interruption.
1679 */
1680 if (at_xdmac_chan_is_enabled(atchan) || list_empty(&atchan->xfers_list))
1681 return;
1682
1683 desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc,
1684 xfer_node);
1685 dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
1686 if (!desc->active_xfer)
1687 at_xdmac_start_xfer(atchan, desc);
1688}
1689
1690static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan)
1691{
1692 struct at_xdmac_desc *desc;
1693 struct dma_async_tx_descriptor *txd;
1694
1695 spin_lock_irq(&atchan->lock);
1696 dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08x\n",
1697 __func__, atchan->irq_status);
1698 if (list_empty(&atchan->xfers_list)) {
1699 spin_unlock_irq(&atchan->lock);
1700 return;
1701 }
1702 desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc,
1703 xfer_node);
1704 spin_unlock_irq(&atchan->lock);
1705 txd = &desc->tx_dma_desc;
1706 if (txd->flags & DMA_PREP_INTERRUPT)
1707 dmaengine_desc_get_callback_invoke(txd, NULL);
1708}
1709
1710/* Called with atchan->lock held. */
1711static void at_xdmac_handle_error(struct at_xdmac_chan *atchan)
1712{
1713 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1714 struct at_xdmac_desc *bad_desc;
1715 int ret;
1716
1717 ret = pm_runtime_resume_and_get(atxdmac->dev);
1718 if (ret < 0)
1719 return;
1720
1721 /*
1722 * The descriptor currently at the head of the active list is
1723 * broken. Since we don't have any way to report errors, we'll
1724 * just have to scream loudly and try to continue with other
1725 * descriptors queued (if any).
1726 */
1727 if (atchan->irq_status & AT_XDMAC_CIS_RBEIS)
1728 dev_err(chan2dev(&atchan->chan), "read bus error!!!");
1729 if (atchan->irq_status & AT_XDMAC_CIS_WBEIS)
1730 dev_err(chan2dev(&atchan->chan), "write bus error!!!");
1731 if (atchan->irq_status & AT_XDMAC_CIS_ROIS)
1732 dev_err(chan2dev(&atchan->chan), "request overflow error!!!");
1733
1734 /* Channel must be disabled first as it's not done automatically */
1735 at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
1736 while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask)
1737 cpu_relax();
1738
1739 bad_desc = list_first_entry(&atchan->xfers_list,
1740 struct at_xdmac_desc,
1741 xfer_node);
1742
1743 /* Print bad descriptor's details if needed */
1744 dev_dbg(chan2dev(&atchan->chan),
1745 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
1746 __func__, &bad_desc->lld.mbr_sa, &bad_desc->lld.mbr_da,
1747 bad_desc->lld.mbr_ubc);
1748
1749 pm_runtime_mark_last_busy(atxdmac->dev);
1750 pm_runtime_put_autosuspend(atxdmac->dev);
1751
1752 /* Then continue with usual descriptor management */
1753}
1754
1755static void at_xdmac_tasklet(struct tasklet_struct *t)
1756{
1757 struct at_xdmac_chan *atchan = from_tasklet(atchan, t, tasklet);
1758 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1759 struct at_xdmac_desc *desc;
1760 struct dma_async_tx_descriptor *txd;
1761 u32 error_mask;
1762
1763 if (at_xdmac_chan_is_cyclic(atchan))
1764 return at_xdmac_handle_cyclic(atchan);
1765
1766 error_mask = AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS |
1767 AT_XDMAC_CIS_ROIS;
1768
1769 spin_lock_irq(&atchan->lock);
1770
1771 dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08x\n",
1772 __func__, atchan->irq_status);
1773
1774 if (!(atchan->irq_status & AT_XDMAC_CIS_LIS) &&
1775 !(atchan->irq_status & error_mask)) {
1776 spin_unlock_irq(&atchan->lock);
1777 return;
1778 }
1779
1780 if (atchan->irq_status & error_mask)
1781 at_xdmac_handle_error(atchan);
1782
1783 desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc,
1784 xfer_node);
1785 dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
1786 if (!desc->active_xfer) {
1787 dev_err(chan2dev(&atchan->chan), "Xfer not active: exiting");
1788 spin_unlock_irq(&atchan->lock);
1789 return;
1790 }
1791
1792 txd = &desc->tx_dma_desc;
1793 dma_cookie_complete(txd);
1794 /* Remove the transfer from the transfer list. */
1795 list_del(&desc->xfer_node);
1796 spin_unlock_irq(&atchan->lock);
1797
1798 if (txd->flags & DMA_PREP_INTERRUPT)
1799 dmaengine_desc_get_callback_invoke(txd, NULL);
1800
1801 dma_run_dependencies(txd);
1802
1803 spin_lock_irq(&atchan->lock);
1804 /* Move the xfer descriptors into the free descriptors list. */
1805 list_splice_tail_init(&desc->descs_list, &atchan->free_descs_list);
1806 at_xdmac_advance_work(atchan);
1807 spin_unlock_irq(&atchan->lock);
1808
1809 /*
1810 * Decrement runtime PM ref counter incremented in
1811 * at_xdmac_start_xfer().
1812 */
1813 pm_runtime_mark_last_busy(atxdmac->dev);
1814 pm_runtime_put_autosuspend(atxdmac->dev);
1815}
1816
1817static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id)
1818{
1819 struct at_xdmac *atxdmac = (struct at_xdmac *)dev_id;
1820 struct at_xdmac_chan *atchan;
1821 u32 imr, status, pending;
1822 u32 chan_imr, chan_status;
1823 int i, ret = IRQ_NONE;
1824
1825 do {
1826 imr = at_xdmac_read(atxdmac, AT_XDMAC_GIM);
1827 status = at_xdmac_read(atxdmac, AT_XDMAC_GIS);
1828 pending = status & imr;
1829
1830 dev_vdbg(atxdmac->dma.dev,
1831 "%s: status=0x%08x, imr=0x%08x, pending=0x%08x\n",
1832 __func__, status, imr, pending);
1833
1834 if (!pending)
1835 break;
1836
1837 /* We have to find which channel has generated the interrupt. */
1838 for (i = 0; i < atxdmac->dma.chancnt; i++) {
1839 if (!((1 << i) & pending))
1840 continue;
1841
1842 atchan = &atxdmac->chan[i];
1843 chan_imr = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
1844 chan_status = at_xdmac_chan_read(atchan, AT_XDMAC_CIS);
1845 atchan->irq_status = chan_status & chan_imr;
1846 dev_vdbg(atxdmac->dma.dev,
1847 "%s: chan%d: imr=0x%x, status=0x%x\n",
1848 __func__, i, chan_imr, chan_status);
1849 dev_vdbg(chan2dev(&atchan->chan),
1850 "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n",
1851 __func__,
1852 at_xdmac_chan_read(atchan, AT_XDMAC_CC),
1853 at_xdmac_chan_read(atchan, AT_XDMAC_CNDA),
1854 at_xdmac_chan_read(atchan, AT_XDMAC_CNDC),
1855 at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
1856 at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
1857 at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
1858
1859 if (atchan->irq_status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS))
1860 at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
1861
1862 tasklet_schedule(&atchan->tasklet);
1863 ret = IRQ_HANDLED;
1864 }
1865
1866 } while (pending);
1867
1868 return ret;
1869}
1870
1871static void at_xdmac_issue_pending(struct dma_chan *chan)
1872{
1873 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1874 unsigned long flags;
1875
1876 dev_dbg(chan2dev(&atchan->chan), "%s\n", __func__);
1877
1878 spin_lock_irqsave(&atchan->lock, flags);
1879 at_xdmac_advance_work(atchan);
1880 spin_unlock_irqrestore(&atchan->lock, flags);
1881
1882 return;
1883}
1884
1885static int at_xdmac_device_config(struct dma_chan *chan,
1886 struct dma_slave_config *config)
1887{
1888 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1889 int ret;
1890 unsigned long flags;
1891
1892 dev_dbg(chan2dev(chan), "%s\n", __func__);
1893
1894 spin_lock_irqsave(&atchan->lock, flags);
1895 ret = at_xdmac_set_slave_config(chan, config);
1896 spin_unlock_irqrestore(&atchan->lock, flags);
1897
1898 return ret;
1899}
1900
1901static int at_xdmac_device_pause(struct dma_chan *chan)
1902{
1903 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1904 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1905 unsigned long flags;
1906 int ret;
1907
1908 dev_dbg(chan2dev(chan), "%s\n", __func__);
1909
1910 if (test_and_set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status))
1911 return 0;
1912
1913 ret = pm_runtime_resume_and_get(atxdmac->dev);
1914 if (ret < 0)
1915 return ret;
1916
1917 spin_lock_irqsave(&atchan->lock, flags);
1918 at_xdmac_write(atxdmac, atxdmac->layout->grws, atchan->mask);
1919 while (at_xdmac_chan_read(atchan, AT_XDMAC_CC)
1920 & (AT_XDMAC_CC_WRIP | AT_XDMAC_CC_RDIP))
1921 cpu_relax();
1922
1923 /* Decrement runtime PM ref counter for each active descriptor. */
1924 at_xdmac_runtime_suspend_descriptors(atchan);
1925
1926 spin_unlock_irqrestore(&atchan->lock, flags);
1927
1928 pm_runtime_mark_last_busy(atxdmac->dev);
1929 pm_runtime_put_autosuspend(atxdmac->dev);
1930
1931 return 0;
1932}
1933
1934static int at_xdmac_device_resume(struct dma_chan *chan)
1935{
1936 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1937 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1938 unsigned long flags;
1939 int ret;
1940
1941 dev_dbg(chan2dev(chan), "%s\n", __func__);
1942
1943 ret = pm_runtime_resume_and_get(atxdmac->dev);
1944 if (ret < 0)
1945 return ret;
1946
1947 spin_lock_irqsave(&atchan->lock, flags);
1948 if (!at_xdmac_chan_is_paused(atchan))
1949 goto unlock;
1950
1951 /* Increment runtime PM ref counter for each active descriptor. */
1952 ret = at_xdmac_runtime_resume_descriptors(atchan);
1953 if (ret < 0)
1954 goto unlock;
1955
1956 at_xdmac_write(atxdmac, atxdmac->layout->grwr, atchan->mask);
1957 clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
1958
1959unlock:
1960 spin_unlock_irqrestore(&atchan->lock, flags);
1961 pm_runtime_mark_last_busy(atxdmac->dev);
1962 pm_runtime_put_autosuspend(atxdmac->dev);
1963
1964 return ret;
1965}
1966
1967static int at_xdmac_device_terminate_all(struct dma_chan *chan)
1968{
1969 struct at_xdmac_desc *desc, *_desc;
1970 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1971 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1972 unsigned long flags;
1973 int ret;
1974
1975 dev_dbg(chan2dev(chan), "%s\n", __func__);
1976
1977 ret = pm_runtime_resume_and_get(atxdmac->dev);
1978 if (ret < 0)
1979 return ret;
1980
1981 spin_lock_irqsave(&atchan->lock, flags);
1982 at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
1983 while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask)
1984 cpu_relax();
1985
1986 /* Cancel all pending transfers. */
1987 list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node) {
1988 list_del(&desc->xfer_node);
1989 list_splice_tail_init(&desc->descs_list,
1990 &atchan->free_descs_list);
1991 /*
1992 * We incremented the runtime PM reference count on
1993 * at_xdmac_start_xfer() for this descriptor. Now it's time
1994 * to release it.
1995 */
1996 if (desc->active_xfer) {
1997 pm_runtime_put_autosuspend(atxdmac->dev);
1998 pm_runtime_mark_last_busy(atxdmac->dev);
1999 }
2000 }
2001
2002 clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
2003 clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
2004 spin_unlock_irqrestore(&atchan->lock, flags);
2005
2006 pm_runtime_mark_last_busy(atxdmac->dev);
2007 pm_runtime_put_autosuspend(atxdmac->dev);
2008
2009 return 0;
2010}
2011
2012static int at_xdmac_alloc_chan_resources(struct dma_chan *chan)
2013{
2014 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
2015 struct at_xdmac_desc *desc;
2016 int i;
2017
2018 if (at_xdmac_chan_is_enabled(atchan)) {
2019 dev_err(chan2dev(chan),
2020 "can't allocate channel resources (channel enabled)\n");
2021 return -EIO;
2022 }
2023
2024 if (!list_empty(&atchan->free_descs_list)) {
2025 dev_err(chan2dev(chan),
2026 "can't allocate channel resources (channel not free from a previous use)\n");
2027 return -EIO;
2028 }
2029
2030 for (i = 0; i < init_nr_desc_per_channel; i++) {
2031 desc = at_xdmac_alloc_desc(chan, GFP_KERNEL);
2032 if (!desc) {
2033 if (i == 0) {
2034 dev_warn(chan2dev(chan),
2035 "can't allocate any descriptors\n");
2036 return -EIO;
2037 }
2038 dev_warn(chan2dev(chan),
2039 "only %d descriptors have been allocated\n", i);
2040 break;
2041 }
2042 list_add_tail(&desc->desc_node, &atchan->free_descs_list);
2043 }
2044
2045 dma_cookie_init(chan);
2046
2047 dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i);
2048
2049 return i;
2050}
2051
2052static void at_xdmac_free_chan_resources(struct dma_chan *chan)
2053{
2054 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
2055 struct at_xdmac *atxdmac = to_at_xdmac(chan->device);
2056 struct at_xdmac_desc *desc, *_desc;
2057
2058 list_for_each_entry_safe(desc, _desc, &atchan->free_descs_list, desc_node) {
2059 dev_dbg(chan2dev(chan), "%s: freeing descriptor %p\n", __func__, desc);
2060 list_del(&desc->desc_node);
2061 dma_pool_free(atxdmac->at_xdmac_desc_pool, desc, desc->tx_dma_desc.phys);
2062 }
2063
2064 return;
2065}
2066
2067static void at_xdmac_axi_config(struct platform_device *pdev)
2068{
2069 struct at_xdmac *atxdmac = (struct at_xdmac *)platform_get_drvdata(pdev);
2070 bool dev_m2m = false;
2071 u32 dma_requests;
2072
2073 if (!atxdmac->layout->axi_config)
2074 return; /* Not supported */
2075
2076 if (!of_property_read_u32(pdev->dev.of_node, "dma-requests",
2077 &dma_requests)) {
2078 dev_info(&pdev->dev, "controller in mem2mem mode.\n");
2079 dev_m2m = true;
2080 }
2081
2082 if (dev_m2m) {
2083 at_xdmac_write(atxdmac, AT_XDMAC_GCFG, AT_XDMAC_GCFG_M2M);
2084 at_xdmac_write(atxdmac, AT_XDMAC_GWAC, AT_XDMAC_GWAC_M2M);
2085 } else {
2086 at_xdmac_write(atxdmac, AT_XDMAC_GCFG, AT_XDMAC_GCFG_P2M);
2087 at_xdmac_write(atxdmac, AT_XDMAC_GWAC, AT_XDMAC_GWAC_P2M);
2088 }
2089}
2090
2091static int __maybe_unused atmel_xdmac_prepare(struct device *dev)
2092{
2093 struct at_xdmac *atxdmac = dev_get_drvdata(dev);
2094 struct dma_chan *chan, *_chan;
2095
2096 list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
2097 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
2098
2099 /* Wait for transfer completion, except in cyclic case. */
2100 if (at_xdmac_chan_is_enabled(atchan) && !at_xdmac_chan_is_cyclic(atchan))
2101 return -EAGAIN;
2102 }
2103 return 0;
2104}
2105
2106static int __maybe_unused atmel_xdmac_suspend(struct device *dev)
2107{
2108 struct at_xdmac *atxdmac = dev_get_drvdata(dev);
2109 struct dma_chan *chan, *_chan;
2110 int ret;
2111
2112 ret = pm_runtime_resume_and_get(atxdmac->dev);
2113 if (ret < 0)
2114 return ret;
2115
2116 list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
2117 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
2118
2119 atchan->save_cc = at_xdmac_chan_read(atchan, AT_XDMAC_CC);
2120 if (at_xdmac_chan_is_cyclic(atchan)) {
2121 if (!at_xdmac_chan_is_paused(atchan))
2122 at_xdmac_device_pause(chan);
2123 atchan->save_cim = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
2124 atchan->save_cnda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA);
2125 atchan->save_cndc = at_xdmac_chan_read(atchan, AT_XDMAC_CNDC);
2126 }
2127
2128 at_xdmac_runtime_suspend_descriptors(atchan);
2129 }
2130 atxdmac->save_gim = at_xdmac_read(atxdmac, AT_XDMAC_GIM);
2131
2132 at_xdmac_off(atxdmac);
2133 return pm_runtime_force_suspend(atxdmac->dev);
2134}
2135
2136static int __maybe_unused atmel_xdmac_resume(struct device *dev)
2137{
2138 struct at_xdmac *atxdmac = dev_get_drvdata(dev);
2139 struct at_xdmac_chan *atchan;
2140 struct dma_chan *chan, *_chan;
2141 struct platform_device *pdev = container_of(dev, struct platform_device, dev);
2142 int i;
2143 int ret;
2144
2145 ret = pm_runtime_force_resume(atxdmac->dev);
2146 if (ret < 0)
2147 return ret;
2148
2149 at_xdmac_axi_config(pdev);
2150
2151 /* Clear pending interrupts. */
2152 for (i = 0; i < atxdmac->dma.chancnt; i++) {
2153 atchan = &atxdmac->chan[i];
2154 while (at_xdmac_chan_read(atchan, AT_XDMAC_CIS))
2155 cpu_relax();
2156 }
2157
2158 at_xdmac_write(atxdmac, AT_XDMAC_GIE, atxdmac->save_gim);
2159 list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
2160 atchan = to_at_xdmac_chan(chan);
2161
2162 ret = at_xdmac_runtime_resume_descriptors(atchan);
2163 if (ret < 0)
2164 return ret;
2165
2166 at_xdmac_chan_write(atchan, AT_XDMAC_CC, atchan->save_cc);
2167 if (at_xdmac_chan_is_cyclic(atchan)) {
2168 if (at_xdmac_chan_is_paused(atchan))
2169 at_xdmac_device_resume(chan);
2170 at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, atchan->save_cnda);
2171 at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, atchan->save_cndc);
2172 at_xdmac_chan_write(atchan, AT_XDMAC_CIE, atchan->save_cim);
2173 wmb();
2174 at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);
2175 }
2176 }
2177
2178 pm_runtime_mark_last_busy(atxdmac->dev);
2179 pm_runtime_put_autosuspend(atxdmac->dev);
2180
2181 return 0;
2182}
2183
2184static int __maybe_unused atmel_xdmac_runtime_suspend(struct device *dev)
2185{
2186 struct at_xdmac *atxdmac = dev_get_drvdata(dev);
2187
2188 clk_disable(atxdmac->clk);
2189
2190 return 0;
2191}
2192
2193static int __maybe_unused atmel_xdmac_runtime_resume(struct device *dev)
2194{
2195 struct at_xdmac *atxdmac = dev_get_drvdata(dev);
2196
2197 return clk_enable(atxdmac->clk);
2198}
2199
2200static int at_xdmac_probe(struct platform_device *pdev)
2201{
2202 struct at_xdmac *atxdmac;
2203 int irq, nr_channels, i, ret;
2204 void __iomem *base;
2205 u32 reg;
2206
2207 irq = platform_get_irq(pdev, 0);
2208 if (irq < 0)
2209 return irq;
2210
2211 base = devm_platform_ioremap_resource(pdev, 0);
2212 if (IS_ERR(base))
2213 return PTR_ERR(base);
2214
2215 /*
2216 * Read number of xdmac channels, read helper function can't be used
2217 * since atxdmac is not yet allocated and we need to know the number
2218 * of channels to do the allocation.
2219 */
2220 reg = readl_relaxed(base + AT_XDMAC_GTYPE);
2221 nr_channels = AT_XDMAC_NB_CH(reg);
2222 if (nr_channels > AT_XDMAC_MAX_CHAN) {
2223 dev_err(&pdev->dev, "invalid number of channels (%u)\n",
2224 nr_channels);
2225 return -EINVAL;
2226 }
2227
2228 atxdmac = devm_kzalloc(&pdev->dev,
2229 struct_size(atxdmac, chan, nr_channels),
2230 GFP_KERNEL);
2231 if (!atxdmac) {
2232 dev_err(&pdev->dev, "can't allocate at_xdmac structure\n");
2233 return -ENOMEM;
2234 }
2235
2236 atxdmac->regs = base;
2237 atxdmac->irq = irq;
2238 atxdmac->dev = &pdev->dev;
2239
2240 atxdmac->layout = of_device_get_match_data(&pdev->dev);
2241 if (!atxdmac->layout)
2242 return -ENODEV;
2243
2244 atxdmac->clk = devm_clk_get(&pdev->dev, "dma_clk");
2245 if (IS_ERR(atxdmac->clk)) {
2246 dev_err(&pdev->dev, "can't get dma_clk\n");
2247 return PTR_ERR(atxdmac->clk);
2248 }
2249
2250 /* Do not use dev res to prevent races with tasklet */
2251 ret = request_irq(atxdmac->irq, at_xdmac_interrupt, 0, "at_xdmac", atxdmac);
2252 if (ret) {
2253 dev_err(&pdev->dev, "can't request irq\n");
2254 return ret;
2255 }
2256
2257 ret = clk_prepare_enable(atxdmac->clk);
2258 if (ret) {
2259 dev_err(&pdev->dev, "can't prepare or enable clock\n");
2260 goto err_free_irq;
2261 }
2262
2263 atxdmac->at_xdmac_desc_pool =
2264 dmam_pool_create(dev_name(&pdev->dev), &pdev->dev,
2265 sizeof(struct at_xdmac_desc), 4, 0);
2266 if (!atxdmac->at_xdmac_desc_pool) {
2267 dev_err(&pdev->dev, "no memory for descriptors dma pool\n");
2268 ret = -ENOMEM;
2269 goto err_clk_disable;
2270 }
2271
2272 dma_cap_set(DMA_CYCLIC, atxdmac->dma.cap_mask);
2273 dma_cap_set(DMA_INTERLEAVE, atxdmac->dma.cap_mask);
2274 dma_cap_set(DMA_MEMCPY, atxdmac->dma.cap_mask);
2275 dma_cap_set(DMA_MEMSET, atxdmac->dma.cap_mask);
2276 dma_cap_set(DMA_MEMSET_SG, atxdmac->dma.cap_mask);
2277 dma_cap_set(DMA_SLAVE, atxdmac->dma.cap_mask);
2278 /*
2279 * Without DMA_PRIVATE the driver is not able to allocate more than
2280 * one channel, second allocation fails in private_candidate.
2281 */
2282 dma_cap_set(DMA_PRIVATE, atxdmac->dma.cap_mask);
2283 atxdmac->dma.dev = &pdev->dev;
2284 atxdmac->dma.device_alloc_chan_resources = at_xdmac_alloc_chan_resources;
2285 atxdmac->dma.device_free_chan_resources = at_xdmac_free_chan_resources;
2286 atxdmac->dma.device_tx_status = at_xdmac_tx_status;
2287 atxdmac->dma.device_issue_pending = at_xdmac_issue_pending;
2288 atxdmac->dma.device_prep_dma_cyclic = at_xdmac_prep_dma_cyclic;
2289 atxdmac->dma.device_prep_interleaved_dma = at_xdmac_prep_interleaved;
2290 atxdmac->dma.device_prep_dma_memcpy = at_xdmac_prep_dma_memcpy;
2291 atxdmac->dma.device_prep_dma_memset = at_xdmac_prep_dma_memset;
2292 atxdmac->dma.device_prep_dma_memset_sg = at_xdmac_prep_dma_memset_sg;
2293 atxdmac->dma.device_prep_slave_sg = at_xdmac_prep_slave_sg;
2294 atxdmac->dma.device_config = at_xdmac_device_config;
2295 atxdmac->dma.device_pause = at_xdmac_device_pause;
2296 atxdmac->dma.device_resume = at_xdmac_device_resume;
2297 atxdmac->dma.device_terminate_all = at_xdmac_device_terminate_all;
2298 atxdmac->dma.src_addr_widths = AT_XDMAC_DMA_BUSWIDTHS;
2299 atxdmac->dma.dst_addr_widths = AT_XDMAC_DMA_BUSWIDTHS;
2300 atxdmac->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
2301 atxdmac->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
2302
2303 platform_set_drvdata(pdev, atxdmac);
2304
2305 pm_runtime_set_autosuspend_delay(&pdev->dev, 500);
2306 pm_runtime_use_autosuspend(&pdev->dev);
2307 pm_runtime_set_active(&pdev->dev);
2308 pm_runtime_enable(&pdev->dev);
2309 pm_runtime_get_noresume(&pdev->dev);
2310
2311 /* Init channels. */
2312 INIT_LIST_HEAD(&atxdmac->dma.channels);
2313
2314 /* Disable all chans and interrupts. */
2315 at_xdmac_off(atxdmac);
2316
2317 for (i = 0; i < nr_channels; i++) {
2318 struct at_xdmac_chan *atchan = &atxdmac->chan[i];
2319
2320 atchan->chan.device = &atxdmac->dma;
2321 list_add_tail(&atchan->chan.device_node,
2322 &atxdmac->dma.channels);
2323
2324 atchan->ch_regs = at_xdmac_chan_reg_base(atxdmac, i);
2325 atchan->mask = 1 << i;
2326
2327 spin_lock_init(&atchan->lock);
2328 INIT_LIST_HEAD(&atchan->xfers_list);
2329 INIT_LIST_HEAD(&atchan->free_descs_list);
2330 tasklet_setup(&atchan->tasklet, at_xdmac_tasklet);
2331
2332 /* Clear pending interrupts. */
2333 while (at_xdmac_chan_read(atchan, AT_XDMAC_CIS))
2334 cpu_relax();
2335 }
2336
2337 ret = dma_async_device_register(&atxdmac->dma);
2338 if (ret) {
2339 dev_err(&pdev->dev, "fail to register DMA engine device\n");
2340 goto err_pm_disable;
2341 }
2342
2343 ret = of_dma_controller_register(pdev->dev.of_node,
2344 at_xdmac_xlate, atxdmac);
2345 if (ret) {
2346 dev_err(&pdev->dev, "could not register of dma controller\n");
2347 goto err_dma_unregister;
2348 }
2349
2350 dev_info(&pdev->dev, "%d channels, mapped at 0x%p\n",
2351 nr_channels, atxdmac->regs);
2352
2353 at_xdmac_axi_config(pdev);
2354
2355 pm_runtime_mark_last_busy(&pdev->dev);
2356 pm_runtime_put_autosuspend(&pdev->dev);
2357
2358 return 0;
2359
2360err_dma_unregister:
2361 dma_async_device_unregister(&atxdmac->dma);
2362err_pm_disable:
2363 pm_runtime_put_noidle(&pdev->dev);
2364 pm_runtime_disable(&pdev->dev);
2365 pm_runtime_set_suspended(&pdev->dev);
2366 pm_runtime_dont_use_autosuspend(&pdev->dev);
2367err_clk_disable:
2368 clk_disable_unprepare(atxdmac->clk);
2369err_free_irq:
2370 free_irq(atxdmac->irq, atxdmac);
2371 return ret;
2372}
2373
2374static int at_xdmac_remove(struct platform_device *pdev)
2375{
2376 struct at_xdmac *atxdmac = (struct at_xdmac *)platform_get_drvdata(pdev);
2377 int i;
2378
2379 at_xdmac_off(atxdmac);
2380 of_dma_controller_free(pdev->dev.of_node);
2381 dma_async_device_unregister(&atxdmac->dma);
2382 pm_runtime_disable(atxdmac->dev);
2383 pm_runtime_set_suspended(&pdev->dev);
2384 pm_runtime_dont_use_autosuspend(&pdev->dev);
2385 clk_disable_unprepare(atxdmac->clk);
2386
2387 free_irq(atxdmac->irq, atxdmac);
2388
2389 for (i = 0; i < atxdmac->dma.chancnt; i++) {
2390 struct at_xdmac_chan *atchan = &atxdmac->chan[i];
2391
2392 tasklet_kill(&atchan->tasklet);
2393 at_xdmac_free_chan_resources(&atchan->chan);
2394 }
2395
2396 return 0;
2397}
2398
2399static const struct dev_pm_ops __maybe_unused atmel_xdmac_dev_pm_ops = {
2400 .prepare = atmel_xdmac_prepare,
2401 SET_LATE_SYSTEM_SLEEP_PM_OPS(atmel_xdmac_suspend, atmel_xdmac_resume)
2402 SET_RUNTIME_PM_OPS(atmel_xdmac_runtime_suspend,
2403 atmel_xdmac_runtime_resume, NULL)
2404};
2405
2406static const struct of_device_id atmel_xdmac_dt_ids[] = {
2407 {
2408 .compatible = "atmel,sama5d4-dma",
2409 .data = &at_xdmac_sama5d4_layout,
2410 }, {
2411 .compatible = "microchip,sama7g5-dma",
2412 .data = &at_xdmac_sama7g5_layout,
2413 }, {
2414 /* sentinel */
2415 }
2416};
2417MODULE_DEVICE_TABLE(of, atmel_xdmac_dt_ids);
2418
2419static struct platform_driver at_xdmac_driver = {
2420 .probe = at_xdmac_probe,
2421 .remove = at_xdmac_remove,
2422 .driver = {
2423 .name = "at_xdmac",
2424 .of_match_table = of_match_ptr(atmel_xdmac_dt_ids),
2425 .pm = pm_ptr(&atmel_xdmac_dev_pm_ops),
2426 }
2427};
2428
2429static int __init at_xdmac_init(void)
2430{
2431 return platform_driver_register(&at_xdmac_driver);
2432}
2433subsys_initcall(at_xdmac_init);
2434
2435static void __exit at_xdmac_exit(void)
2436{
2437 platform_driver_unregister(&at_xdmac_driver);
2438}
2439module_exit(at_xdmac_exit);
2440
2441MODULE_DESCRIPTION("Atmel Extended DMA Controller driver");
2442MODULE_AUTHOR("Ludovic Desroches <ludovic.desroches@atmel.com>");
2443MODULE_LICENSE("GPL");