Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Driver for the Atmel Extensible DMA Controller (aka XDMAC on AT91 systems)
4 *
5 * Copyright (C) 2014 Atmel Corporation
6 *
7 * Author: Ludovic Desroches <ludovic.desroches@atmel.com>
8 */
9
10#include <asm/barrier.h>
11#include <dt-bindings/dma/at91.h>
12#include <linux/clk.h>
13#include <linux/dmaengine.h>
14#include <linux/dmapool.h>
15#include <linux/interrupt.h>
16#include <linux/irq.h>
17#include <linux/kernel.h>
18#include <linux/list.h>
19#include <linux/module.h>
20#include <linux/of_dma.h>
21#include <linux/of_platform.h>
22#include <linux/platform_device.h>
23#include <linux/pm.h>
24#include <linux/pm_runtime.h>
25
26#include "dmaengine.h"
27
28/* Global registers */
29#define AT_XDMAC_GTYPE 0x00 /* Global Type Register */
30#define AT_XDMAC_NB_CH(i) (((i) & 0x1F) + 1) /* Number of Channels Minus One */
31#define AT_XDMAC_FIFO_SZ(i) (((i) >> 5) & 0x7FF) /* Number of Bytes */
32#define AT_XDMAC_NB_REQ(i) ((((i) >> 16) & 0x3F) + 1) /* Number of Peripheral Requests Minus One */
33#define AT_XDMAC_GCFG 0x04 /* Global Configuration Register */
34#define AT_XDMAC_WRHP(i) (((i) & 0xF) << 4)
35#define AT_XDMAC_WRMP(i) (((i) & 0xF) << 8)
36#define AT_XDMAC_WRLP(i) (((i) & 0xF) << 12)
37#define AT_XDMAC_RDHP(i) (((i) & 0xF) << 16)
38#define AT_XDMAC_RDMP(i) (((i) & 0xF) << 20)
39#define AT_XDMAC_RDLP(i) (((i) & 0xF) << 24)
40#define AT_XDMAC_RDSG(i) (((i) & 0xF) << 28)
41#define AT_XDMAC_GCFG_M2M (AT_XDMAC_RDLP(0xF) | AT_XDMAC_WRLP(0xF))
42#define AT_XDMAC_GCFG_P2M (AT_XDMAC_RDSG(0x1) | AT_XDMAC_RDHP(0x3) | \
43 AT_XDMAC_WRHP(0x5))
44#define AT_XDMAC_GWAC 0x08 /* Global Weighted Arbiter Configuration Register */
45#define AT_XDMAC_PW0(i) (((i) & 0xF) << 0)
46#define AT_XDMAC_PW1(i) (((i) & 0xF) << 4)
47#define AT_XDMAC_PW2(i) (((i) & 0xF) << 8)
48#define AT_XDMAC_PW3(i) (((i) & 0xF) << 12)
49#define AT_XDMAC_GWAC_M2M 0
50#define AT_XDMAC_GWAC_P2M (AT_XDMAC_PW0(0xF) | AT_XDMAC_PW2(0xF))
51
52#define AT_XDMAC_GIE 0x0C /* Global Interrupt Enable Register */
53#define AT_XDMAC_GID 0x10 /* Global Interrupt Disable Register */
54#define AT_XDMAC_GIM 0x14 /* Global Interrupt Mask Register */
55#define AT_XDMAC_GIS 0x18 /* Global Interrupt Status Register */
56#define AT_XDMAC_GE 0x1C /* Global Channel Enable Register */
57#define AT_XDMAC_GD 0x20 /* Global Channel Disable Register */
58#define AT_XDMAC_GS 0x24 /* Global Channel Status Register */
59#define AT_XDMAC_VERSION 0xFFC /* XDMAC Version Register */
60
61/* Channel relative registers offsets */
62#define AT_XDMAC_CIE 0x00 /* Channel Interrupt Enable Register */
63#define AT_XDMAC_CIE_BIE BIT(0) /* End of Block Interrupt Enable Bit */
64#define AT_XDMAC_CIE_LIE BIT(1) /* End of Linked List Interrupt Enable Bit */
65#define AT_XDMAC_CIE_DIE BIT(2) /* End of Disable Interrupt Enable Bit */
66#define AT_XDMAC_CIE_FIE BIT(3) /* End of Flush Interrupt Enable Bit */
67#define AT_XDMAC_CIE_RBEIE BIT(4) /* Read Bus Error Interrupt Enable Bit */
68#define AT_XDMAC_CIE_WBEIE BIT(5) /* Write Bus Error Interrupt Enable Bit */
69#define AT_XDMAC_CIE_ROIE BIT(6) /* Request Overflow Interrupt Enable Bit */
70#define AT_XDMAC_CID 0x04 /* Channel Interrupt Disable Register */
71#define AT_XDMAC_CID_BID BIT(0) /* End of Block Interrupt Disable Bit */
72#define AT_XDMAC_CID_LID BIT(1) /* End of Linked List Interrupt Disable Bit */
73#define AT_XDMAC_CID_DID BIT(2) /* End of Disable Interrupt Disable Bit */
74#define AT_XDMAC_CID_FID BIT(3) /* End of Flush Interrupt Disable Bit */
75#define AT_XDMAC_CID_RBEID BIT(4) /* Read Bus Error Interrupt Disable Bit */
76#define AT_XDMAC_CID_WBEID BIT(5) /* Write Bus Error Interrupt Disable Bit */
77#define AT_XDMAC_CID_ROID BIT(6) /* Request Overflow Interrupt Disable Bit */
78#define AT_XDMAC_CIM 0x08 /* Channel Interrupt Mask Register */
79#define AT_XDMAC_CIM_BIM BIT(0) /* End of Block Interrupt Mask Bit */
80#define AT_XDMAC_CIM_LIM BIT(1) /* End of Linked List Interrupt Mask Bit */
81#define AT_XDMAC_CIM_DIM BIT(2) /* End of Disable Interrupt Mask Bit */
82#define AT_XDMAC_CIM_FIM BIT(3) /* End of Flush Interrupt Mask Bit */
83#define AT_XDMAC_CIM_RBEIM BIT(4) /* Read Bus Error Interrupt Mask Bit */
84#define AT_XDMAC_CIM_WBEIM BIT(5) /* Write Bus Error Interrupt Mask Bit */
85#define AT_XDMAC_CIM_ROIM BIT(6) /* Request Overflow Interrupt Mask Bit */
86#define AT_XDMAC_CIS 0x0C /* Channel Interrupt Status Register */
87#define AT_XDMAC_CIS_BIS BIT(0) /* End of Block Interrupt Status Bit */
88#define AT_XDMAC_CIS_LIS BIT(1) /* End of Linked List Interrupt Status Bit */
89#define AT_XDMAC_CIS_DIS BIT(2) /* End of Disable Interrupt Status Bit */
90#define AT_XDMAC_CIS_FIS BIT(3) /* End of Flush Interrupt Status Bit */
91#define AT_XDMAC_CIS_RBEIS BIT(4) /* Read Bus Error Interrupt Status Bit */
92#define AT_XDMAC_CIS_WBEIS BIT(5) /* Write Bus Error Interrupt Status Bit */
93#define AT_XDMAC_CIS_ROIS BIT(6) /* Request Overflow Interrupt Status Bit */
94#define AT_XDMAC_CSA 0x10 /* Channel Source Address Register */
95#define AT_XDMAC_CDA 0x14 /* Channel Destination Address Register */
96#define AT_XDMAC_CNDA 0x18 /* Channel Next Descriptor Address Register */
97#define AT_XDMAC_CNDA_NDAIF(i) ((i) & 0x1) /* Channel x Next Descriptor Interface */
98#define AT_XDMAC_CNDA_NDA(i) ((i) & 0xfffffffc) /* Channel x Next Descriptor Address */
99#define AT_XDMAC_CNDC 0x1C /* Channel Next Descriptor Control Register */
100#define AT_XDMAC_CNDC_NDE (0x1 << 0) /* Channel x Next Descriptor Enable */
101#define AT_XDMAC_CNDC_NDSUP (0x1 << 1) /* Channel x Next Descriptor Source Update */
102#define AT_XDMAC_CNDC_NDDUP (0x1 << 2) /* Channel x Next Descriptor Destination Update */
103#define AT_XDMAC_CNDC_NDVIEW_MASK GENMASK(28, 27)
104#define AT_XDMAC_CNDC_NDVIEW_NDV0 (0x0 << 3) /* Channel x Next Descriptor View 0 */
105#define AT_XDMAC_CNDC_NDVIEW_NDV1 (0x1 << 3) /* Channel x Next Descriptor View 1 */
106#define AT_XDMAC_CNDC_NDVIEW_NDV2 (0x2 << 3) /* Channel x Next Descriptor View 2 */
107#define AT_XDMAC_CNDC_NDVIEW_NDV3 (0x3 << 3) /* Channel x Next Descriptor View 3 */
108#define AT_XDMAC_CUBC 0x20 /* Channel Microblock Control Register */
109#define AT_XDMAC_CBC 0x24 /* Channel Block Control Register */
110#define AT_XDMAC_CC 0x28 /* Channel Configuration Register */
111#define AT_XDMAC_CC_TYPE (0x1 << 0) /* Channel Transfer Type */
112#define AT_XDMAC_CC_TYPE_MEM_TRAN (0x0 << 0) /* Memory to Memory Transfer */
113#define AT_XDMAC_CC_TYPE_PER_TRAN (0x1 << 0) /* Peripheral to Memory or Memory to Peripheral Transfer */
114#define AT_XDMAC_CC_MBSIZE_MASK (0x3 << 1)
115#define AT_XDMAC_CC_MBSIZE_SINGLE (0x0 << 1)
116#define AT_XDMAC_CC_MBSIZE_FOUR (0x1 << 1)
117#define AT_XDMAC_CC_MBSIZE_EIGHT (0x2 << 1)
118#define AT_XDMAC_CC_MBSIZE_SIXTEEN (0x3 << 1)
119#define AT_XDMAC_CC_DSYNC (0x1 << 4) /* Channel Synchronization */
120#define AT_XDMAC_CC_DSYNC_PER2MEM (0x0 << 4)
121#define AT_XDMAC_CC_DSYNC_MEM2PER (0x1 << 4)
122#define AT_XDMAC_CC_PROT (0x1 << 5) /* Channel Protection */
123#define AT_XDMAC_CC_PROT_SEC (0x0 << 5)
124#define AT_XDMAC_CC_PROT_UNSEC (0x1 << 5)
125#define AT_XDMAC_CC_SWREQ (0x1 << 6) /* Channel Software Request Trigger */
126#define AT_XDMAC_CC_SWREQ_HWR_CONNECTED (0x0 << 6)
127#define AT_XDMAC_CC_SWREQ_SWR_CONNECTED (0x1 << 6)
128#define AT_XDMAC_CC_MEMSET (0x1 << 7) /* Channel Fill Block of memory */
129#define AT_XDMAC_CC_MEMSET_NORMAL_MODE (0x0 << 7)
130#define AT_XDMAC_CC_MEMSET_HW_MODE (0x1 << 7)
131#define AT_XDMAC_CC_CSIZE(i) ((0x7 & (i)) << 8) /* Channel Chunk Size */
132#define AT_XDMAC_CC_DWIDTH_OFFSET 11
133#define AT_XDMAC_CC_DWIDTH_MASK (0x3 << AT_XDMAC_CC_DWIDTH_OFFSET)
134#define AT_XDMAC_CC_DWIDTH(i) ((0x3 & (i)) << AT_XDMAC_CC_DWIDTH_OFFSET) /* Channel Data Width */
135#define AT_XDMAC_CC_DWIDTH_BYTE 0x0
136#define AT_XDMAC_CC_DWIDTH_HALFWORD 0x1
137#define AT_XDMAC_CC_DWIDTH_WORD 0x2
138#define AT_XDMAC_CC_DWIDTH_DWORD 0x3
139#define AT_XDMAC_CC_SIF(i) ((0x1 & (i)) << 13) /* Channel Source Interface Identifier */
140#define AT_XDMAC_CC_DIF(i) ((0x1 & (i)) << 14) /* Channel Destination Interface Identifier */
141#define AT_XDMAC_CC_SAM_MASK (0x3 << 16) /* Channel Source Addressing Mode */
142#define AT_XDMAC_CC_SAM_FIXED_AM (0x0 << 16)
143#define AT_XDMAC_CC_SAM_INCREMENTED_AM (0x1 << 16)
144#define AT_XDMAC_CC_SAM_UBS_AM (0x2 << 16)
145#define AT_XDMAC_CC_SAM_UBS_DS_AM (0x3 << 16)
146#define AT_XDMAC_CC_DAM_MASK (0x3 << 18) /* Channel Source Addressing Mode */
147#define AT_XDMAC_CC_DAM_FIXED_AM (0x0 << 18)
148#define AT_XDMAC_CC_DAM_INCREMENTED_AM (0x1 << 18)
149#define AT_XDMAC_CC_DAM_UBS_AM (0x2 << 18)
150#define AT_XDMAC_CC_DAM_UBS_DS_AM (0x3 << 18)
151#define AT_XDMAC_CC_INITD (0x1 << 21) /* Channel Initialization Terminated (read only) */
152#define AT_XDMAC_CC_INITD_TERMINATED (0x0 << 21)
153#define AT_XDMAC_CC_INITD_IN_PROGRESS (0x1 << 21)
154#define AT_XDMAC_CC_RDIP (0x1 << 22) /* Read in Progress (read only) */
155#define AT_XDMAC_CC_RDIP_DONE (0x0 << 22)
156#define AT_XDMAC_CC_RDIP_IN_PROGRESS (0x1 << 22)
157#define AT_XDMAC_CC_WRIP (0x1 << 23) /* Write in Progress (read only) */
158#define AT_XDMAC_CC_WRIP_DONE (0x0 << 23)
159#define AT_XDMAC_CC_WRIP_IN_PROGRESS (0x1 << 23)
160#define AT_XDMAC_CC_PERID(i) ((0x7f & (i)) << 24) /* Channel Peripheral Identifier */
161#define AT_XDMAC_CDS_MSP 0x2C /* Channel Data Stride Memory Set Pattern */
162#define AT_XDMAC_CSUS 0x30 /* Channel Source Microblock Stride */
163#define AT_XDMAC_CDUS 0x34 /* Channel Destination Microblock Stride */
164
165/* Microblock control members */
166#define AT_XDMAC_MBR_UBC_UBLEN_MAX 0xFFFFFFUL /* Maximum Microblock Length */
167#define AT_XDMAC_MBR_UBC_NDE (0x1 << 24) /* Next Descriptor Enable */
168#define AT_XDMAC_MBR_UBC_NSEN (0x1 << 25) /* Next Descriptor Source Update */
169#define AT_XDMAC_MBR_UBC_NDEN (0x1 << 26) /* Next Descriptor Destination Update */
170#define AT_XDMAC_MBR_UBC_NDV0 (0x0 << 27) /* Next Descriptor View 0 */
171#define AT_XDMAC_MBR_UBC_NDV1 (0x1 << 27) /* Next Descriptor View 1 */
172#define AT_XDMAC_MBR_UBC_NDV2 (0x2 << 27) /* Next Descriptor View 2 */
173#define AT_XDMAC_MBR_UBC_NDV3 (0x3 << 27) /* Next Descriptor View 3 */
174
175#define AT_XDMAC_MAX_CHAN 0x20
176#define AT_XDMAC_MAX_CSIZE 16 /* 16 data */
177#define AT_XDMAC_MAX_DWIDTH 8 /* 64 bits */
178#define AT_XDMAC_RESIDUE_MAX_RETRIES 5
179
180#define AT_XDMAC_DMA_BUSWIDTHS\
181 (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
182 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\
183 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\
184 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |\
185 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
186
187enum atc_status {
188 AT_XDMAC_CHAN_IS_CYCLIC = 0,
189 AT_XDMAC_CHAN_IS_PAUSED,
190 AT_XDMAC_CHAN_IS_PAUSED_INTERNAL,
191};
192
193struct at_xdmac_layout {
194 /* Global Channel Read Suspend Register */
195 u8 grs;
196 /* Global Write Suspend Register */
197 u8 gws;
198 /* Global Channel Read Write Suspend Register */
199 u8 grws;
200 /* Global Channel Read Write Resume Register */
201 u8 grwr;
202 /* Global Channel Software Request Register */
203 u8 gswr;
204 /* Global channel Software Request Status Register */
205 u8 gsws;
206 /* Global Channel Software Flush Request Register */
207 u8 gswf;
208 /* Channel reg base */
209 u8 chan_cc_reg_base;
210 /* Source/Destination Interface must be specified or not */
211 bool sdif;
212 /* AXI queue priority configuration supported */
213 bool axi_config;
214};
215
216/* ----- Channels ----- */
217struct at_xdmac_chan {
218 struct dma_chan chan;
219 void __iomem *ch_regs;
220 u32 mask; /* Channel Mask */
221 u32 cfg; /* Channel Configuration Register */
222 u8 perid; /* Peripheral ID */
223 u8 perif; /* Peripheral Interface */
224 u8 memif; /* Memory Interface */
225 u32 save_cc;
226 u32 save_cim;
227 u32 save_cnda;
228 u32 save_cndc;
229 u32 irq_status;
230 unsigned long status;
231 struct tasklet_struct tasklet;
232 struct dma_slave_config sconfig;
233
234 spinlock_t lock;
235
236 struct list_head xfers_list;
237 struct list_head free_descs_list;
238};
239
240
241/* ----- Controller ----- */
242struct at_xdmac {
243 struct dma_device dma;
244 void __iomem *regs;
245 struct device *dev;
246 int irq;
247 struct clk *clk;
248 u32 save_gim;
249 u32 save_gs;
250 struct dma_pool *at_xdmac_desc_pool;
251 const struct at_xdmac_layout *layout;
252 struct at_xdmac_chan chan[];
253};
254
255
256/* ----- Descriptors ----- */
257
258/* Linked List Descriptor */
259struct at_xdmac_lld {
260 u32 mbr_nda; /* Next Descriptor Member */
261 u32 mbr_ubc; /* Microblock Control Member */
262 u32 mbr_sa; /* Source Address Member */
263 u32 mbr_da; /* Destination Address Member */
264 u32 mbr_cfg; /* Configuration Register */
265 u32 mbr_bc; /* Block Control Register */
266 u32 mbr_ds; /* Data Stride Register */
267 u32 mbr_sus; /* Source Microblock Stride Register */
268 u32 mbr_dus; /* Destination Microblock Stride Register */
269};
270
271/* 64-bit alignment needed to update CNDA and CUBC registers in an atomic way. */
272struct at_xdmac_desc {
273 struct at_xdmac_lld lld;
274 enum dma_transfer_direction direction;
275 struct dma_async_tx_descriptor tx_dma_desc;
276 struct list_head desc_node;
277 /* Following members are only used by the first descriptor */
278 bool active_xfer;
279 unsigned int xfer_size;
280 struct list_head descs_list;
281 struct list_head xfer_node;
282} __aligned(sizeof(u64));
283
284static const struct at_xdmac_layout at_xdmac_sama5d4_layout = {
285 .grs = 0x28,
286 .gws = 0x2C,
287 .grws = 0x30,
288 .grwr = 0x34,
289 .gswr = 0x38,
290 .gsws = 0x3C,
291 .gswf = 0x40,
292 .chan_cc_reg_base = 0x50,
293 .sdif = true,
294 .axi_config = false,
295};
296
297static const struct at_xdmac_layout at_xdmac_sama7g5_layout = {
298 .grs = 0x30,
299 .gws = 0x38,
300 .grws = 0x40,
301 .grwr = 0x44,
302 .gswr = 0x48,
303 .gsws = 0x4C,
304 .gswf = 0x50,
305 .chan_cc_reg_base = 0x60,
306 .sdif = false,
307 .axi_config = true,
308};
309
310static inline void __iomem *at_xdmac_chan_reg_base(struct at_xdmac *atxdmac, unsigned int chan_nb)
311{
312 return atxdmac->regs + (atxdmac->layout->chan_cc_reg_base + chan_nb * 0x40);
313}
314
315#define at_xdmac_read(atxdmac, reg) readl_relaxed((atxdmac)->regs + (reg))
316#define at_xdmac_write(atxdmac, reg, value) \
317 writel_relaxed((value), (atxdmac)->regs + (reg))
318
319#define at_xdmac_chan_read(atchan, reg) readl_relaxed((atchan)->ch_regs + (reg))
320#define at_xdmac_chan_write(atchan, reg, value) writel_relaxed((value), (atchan)->ch_regs + (reg))
321
322static inline struct at_xdmac_chan *to_at_xdmac_chan(struct dma_chan *dchan)
323{
324 return container_of(dchan, struct at_xdmac_chan, chan);
325}
326
327static struct device *chan2dev(struct dma_chan *chan)
328{
329 return &chan->dev->device;
330}
331
332static inline struct at_xdmac *to_at_xdmac(struct dma_device *ddev)
333{
334 return container_of(ddev, struct at_xdmac, dma);
335}
336
337static inline struct at_xdmac_desc *txd_to_at_desc(struct dma_async_tx_descriptor *txd)
338{
339 return container_of(txd, struct at_xdmac_desc, tx_dma_desc);
340}
341
342static inline int at_xdmac_chan_is_cyclic(struct at_xdmac_chan *atchan)
343{
344 return test_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
345}
346
347static inline int at_xdmac_chan_is_paused(struct at_xdmac_chan *atchan)
348{
349 return test_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
350}
351
352static inline int at_xdmac_chan_is_paused_internal(struct at_xdmac_chan *atchan)
353{
354 return test_bit(AT_XDMAC_CHAN_IS_PAUSED_INTERNAL, &atchan->status);
355}
356
357static inline bool at_xdmac_chan_is_peripheral_xfer(u32 cfg)
358{
359 return cfg & AT_XDMAC_CC_TYPE_PER_TRAN;
360}
361
362static inline u8 at_xdmac_get_dwidth(u32 cfg)
363{
364 return (cfg & AT_XDMAC_CC_DWIDTH_MASK) >> AT_XDMAC_CC_DWIDTH_OFFSET;
365};
366
367static unsigned int init_nr_desc_per_channel = 64;
368module_param(init_nr_desc_per_channel, uint, 0644);
369MODULE_PARM_DESC(init_nr_desc_per_channel,
370 "initial descriptors per channel (default: 64)");
371
372
373static void at_xdmac_runtime_suspend_descriptors(struct at_xdmac_chan *atchan)
374{
375 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
376 struct at_xdmac_desc *desc, *_desc;
377
378 list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node) {
379 if (!desc->active_xfer)
380 continue;
381
382 pm_runtime_mark_last_busy(atxdmac->dev);
383 pm_runtime_put_autosuspend(atxdmac->dev);
384 }
385}
386
387static int at_xdmac_runtime_resume_descriptors(struct at_xdmac_chan *atchan)
388{
389 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
390 struct at_xdmac_desc *desc, *_desc;
391 int ret;
392
393 list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node) {
394 if (!desc->active_xfer)
395 continue;
396
397 ret = pm_runtime_resume_and_get(atxdmac->dev);
398 if (ret < 0)
399 return ret;
400 }
401
402 return 0;
403}
404
405static bool at_xdmac_chan_is_enabled(struct at_xdmac_chan *atchan)
406{
407 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
408 int ret;
409
410 ret = pm_runtime_resume_and_get(atxdmac->dev);
411 if (ret < 0)
412 return false;
413
414 ret = !!(at_xdmac_chan_read(atchan, AT_XDMAC_GS) & atchan->mask);
415
416 pm_runtime_mark_last_busy(atxdmac->dev);
417 pm_runtime_put_autosuspend(atxdmac->dev);
418
419 return ret;
420}
421
422static void at_xdmac_off(struct at_xdmac *atxdmac, bool suspend_descriptors)
423{
424 struct dma_chan *chan, *_chan;
425 struct at_xdmac_chan *atchan;
426 int ret;
427
428 ret = pm_runtime_resume_and_get(atxdmac->dev);
429 if (ret < 0)
430 return;
431
432 at_xdmac_write(atxdmac, AT_XDMAC_GD, -1L);
433
434 /* Wait that all chans are disabled. */
435 while (at_xdmac_read(atxdmac, AT_XDMAC_GS))
436 cpu_relax();
437
438 at_xdmac_write(atxdmac, AT_XDMAC_GID, -1L);
439
440 /* Decrement runtime PM ref counter for each active descriptor. */
441 if (!list_empty(&atxdmac->dma.channels) && suspend_descriptors) {
442 list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels,
443 device_node) {
444 atchan = to_at_xdmac_chan(chan);
445 at_xdmac_runtime_suspend_descriptors(atchan);
446 }
447 }
448
449 pm_runtime_mark_last_busy(atxdmac->dev);
450 pm_runtime_put_autosuspend(atxdmac->dev);
451}
452
453/* Call with lock hold. */
454static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan,
455 struct at_xdmac_desc *first)
456{
457 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
458 u32 reg;
459 int ret;
460
461 ret = pm_runtime_resume_and_get(atxdmac->dev);
462 if (ret < 0)
463 return;
464
465 dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, first);
466
467 /* Set transfer as active to not try to start it again. */
468 first->active_xfer = true;
469
470 /* Tell xdmac where to get the first descriptor. */
471 reg = AT_XDMAC_CNDA_NDA(first->tx_dma_desc.phys);
472 if (atxdmac->layout->sdif)
473 reg |= AT_XDMAC_CNDA_NDAIF(atchan->memif);
474
475 at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, reg);
476
477 /*
478 * When doing non cyclic transfer we need to use the next
479 * descriptor view 2 since some fields of the configuration register
480 * depend on transfer size and src/dest addresses.
481 */
482 if (at_xdmac_chan_is_cyclic(atchan))
483 reg = AT_XDMAC_CNDC_NDVIEW_NDV1;
484 else if ((first->lld.mbr_ubc &
485 AT_XDMAC_CNDC_NDVIEW_MASK) == AT_XDMAC_MBR_UBC_NDV3)
486 reg = AT_XDMAC_CNDC_NDVIEW_NDV3;
487 else
488 reg = AT_XDMAC_CNDC_NDVIEW_NDV2;
489 /*
490 * Even if the register will be updated from the configuration in the
491 * descriptor when using view 2 or higher, the PROT bit won't be set
492 * properly. This bit can be modified only by using the channel
493 * configuration register.
494 */
495 at_xdmac_chan_write(atchan, AT_XDMAC_CC, first->lld.mbr_cfg);
496
497 reg |= AT_XDMAC_CNDC_NDDUP
498 | AT_XDMAC_CNDC_NDSUP
499 | AT_XDMAC_CNDC_NDE;
500 at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, reg);
501
502 dev_vdbg(chan2dev(&atchan->chan),
503 "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n",
504 __func__, at_xdmac_chan_read(atchan, AT_XDMAC_CC),
505 at_xdmac_chan_read(atchan, AT_XDMAC_CNDA),
506 at_xdmac_chan_read(atchan, AT_XDMAC_CNDC),
507 at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
508 at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
509 at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
510
511 at_xdmac_chan_write(atchan, AT_XDMAC_CID, 0xffffffff);
512 reg = AT_XDMAC_CIE_RBEIE | AT_XDMAC_CIE_WBEIE;
513 /*
514 * Request Overflow Error is only for peripheral synchronized transfers
515 */
516 if (at_xdmac_chan_is_peripheral_xfer(first->lld.mbr_cfg))
517 reg |= AT_XDMAC_CIE_ROIE;
518
519 /*
520 * There is no end of list when doing cyclic dma, we need to get
521 * an interrupt after each periods.
522 */
523 if (at_xdmac_chan_is_cyclic(atchan))
524 at_xdmac_chan_write(atchan, AT_XDMAC_CIE,
525 reg | AT_XDMAC_CIE_BIE);
526 else
527 at_xdmac_chan_write(atchan, AT_XDMAC_CIE,
528 reg | AT_XDMAC_CIE_LIE);
529 at_xdmac_write(atxdmac, AT_XDMAC_GIE, atchan->mask);
530 dev_vdbg(chan2dev(&atchan->chan),
531 "%s: enable channel (0x%08x)\n", __func__, atchan->mask);
532 wmb();
533 at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);
534
535 dev_vdbg(chan2dev(&atchan->chan),
536 "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n",
537 __func__, at_xdmac_chan_read(atchan, AT_XDMAC_CC),
538 at_xdmac_chan_read(atchan, AT_XDMAC_CNDA),
539 at_xdmac_chan_read(atchan, AT_XDMAC_CNDC),
540 at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
541 at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
542 at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
543}
544
545static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx)
546{
547 struct at_xdmac_desc *desc = txd_to_at_desc(tx);
548 struct at_xdmac_chan *atchan = to_at_xdmac_chan(tx->chan);
549 dma_cookie_t cookie;
550 unsigned long irqflags;
551
552 spin_lock_irqsave(&atchan->lock, irqflags);
553 cookie = dma_cookie_assign(tx);
554
555 list_add_tail(&desc->xfer_node, &atchan->xfers_list);
556 spin_unlock_irqrestore(&atchan->lock, irqflags);
557
558 dev_vdbg(chan2dev(tx->chan), "%s: atchan 0x%p, add desc 0x%p to xfers_list\n",
559 __func__, atchan, desc);
560
561 return cookie;
562}
563
564static struct at_xdmac_desc *at_xdmac_alloc_desc(struct dma_chan *chan,
565 gfp_t gfp_flags)
566{
567 struct at_xdmac_desc *desc;
568 struct at_xdmac *atxdmac = to_at_xdmac(chan->device);
569 dma_addr_t phys;
570
571 desc = dma_pool_zalloc(atxdmac->at_xdmac_desc_pool, gfp_flags, &phys);
572 if (desc) {
573 INIT_LIST_HEAD(&desc->descs_list);
574 dma_async_tx_descriptor_init(&desc->tx_dma_desc, chan);
575 desc->tx_dma_desc.tx_submit = at_xdmac_tx_submit;
576 desc->tx_dma_desc.phys = phys;
577 }
578
579 return desc;
580}
581
582static void at_xdmac_init_used_desc(struct at_xdmac_desc *desc)
583{
584 memset(&desc->lld, 0, sizeof(desc->lld));
585 INIT_LIST_HEAD(&desc->descs_list);
586 desc->direction = DMA_TRANS_NONE;
587 desc->xfer_size = 0;
588 desc->active_xfer = false;
589}
590
591/* Call must be protected by lock. */
592static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan)
593{
594 struct at_xdmac_desc *desc;
595
596 if (list_empty(&atchan->free_descs_list)) {
597 desc = at_xdmac_alloc_desc(&atchan->chan, GFP_NOWAIT);
598 } else {
599 desc = list_first_entry(&atchan->free_descs_list,
600 struct at_xdmac_desc, desc_node);
601 list_del(&desc->desc_node);
602 at_xdmac_init_used_desc(desc);
603 }
604
605 return desc;
606}
607
608static void at_xdmac_queue_desc(struct dma_chan *chan,
609 struct at_xdmac_desc *prev,
610 struct at_xdmac_desc *desc)
611{
612 if (!prev || !desc)
613 return;
614
615 prev->lld.mbr_nda = desc->tx_dma_desc.phys;
616 prev->lld.mbr_ubc |= AT_XDMAC_MBR_UBC_NDE;
617
618 dev_dbg(chan2dev(chan), "%s: chain lld: prev=0x%p, mbr_nda=%pad\n",
619 __func__, prev, &prev->lld.mbr_nda);
620}
621
622static inline void at_xdmac_increment_block_count(struct dma_chan *chan,
623 struct at_xdmac_desc *desc)
624{
625 if (!desc)
626 return;
627
628 desc->lld.mbr_bc++;
629
630 dev_dbg(chan2dev(chan),
631 "%s: incrementing the block count of the desc 0x%p\n",
632 __func__, desc);
633}
634
635static struct dma_chan *at_xdmac_xlate(struct of_phandle_args *dma_spec,
636 struct of_dma *of_dma)
637{
638 struct at_xdmac *atxdmac = of_dma->of_dma_data;
639 struct at_xdmac_chan *atchan;
640 struct dma_chan *chan;
641 struct device *dev = atxdmac->dma.dev;
642
643 if (dma_spec->args_count != 1) {
644 dev_err(dev, "dma phandler args: bad number of args\n");
645 return NULL;
646 }
647
648 chan = dma_get_any_slave_channel(&atxdmac->dma);
649 if (!chan) {
650 dev_err(dev, "can't get a dma channel\n");
651 return NULL;
652 }
653
654 atchan = to_at_xdmac_chan(chan);
655 atchan->memif = AT91_XDMAC_DT_GET_MEM_IF(dma_spec->args[0]);
656 atchan->perif = AT91_XDMAC_DT_GET_PER_IF(dma_spec->args[0]);
657 atchan->perid = AT91_XDMAC_DT_GET_PERID(dma_spec->args[0]);
658 dev_dbg(dev, "chan dt cfg: memif=%u perif=%u perid=%u\n",
659 atchan->memif, atchan->perif, atchan->perid);
660
661 return chan;
662}
663
664static int at_xdmac_compute_chan_conf(struct dma_chan *chan,
665 enum dma_transfer_direction direction)
666{
667 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
668 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
669 int csize, dwidth;
670
671 if (direction == DMA_DEV_TO_MEM) {
672 atchan->cfg =
673 AT91_XDMAC_DT_PERID(atchan->perid)
674 | AT_XDMAC_CC_DAM_INCREMENTED_AM
675 | AT_XDMAC_CC_SAM_FIXED_AM
676 | AT_XDMAC_CC_SWREQ_HWR_CONNECTED
677 | AT_XDMAC_CC_DSYNC_PER2MEM
678 | AT_XDMAC_CC_MBSIZE_SIXTEEN
679 | AT_XDMAC_CC_TYPE_PER_TRAN;
680 if (atxdmac->layout->sdif)
681 atchan->cfg |= AT_XDMAC_CC_DIF(atchan->memif) |
682 AT_XDMAC_CC_SIF(atchan->perif);
683
684 csize = ffs(atchan->sconfig.src_maxburst) - 1;
685 if (csize < 0) {
686 dev_err(chan2dev(chan), "invalid src maxburst value\n");
687 return -EINVAL;
688 }
689 atchan->cfg |= AT_XDMAC_CC_CSIZE(csize);
690 dwidth = ffs(atchan->sconfig.src_addr_width) - 1;
691 if (dwidth < 0) {
692 dev_err(chan2dev(chan), "invalid src addr width value\n");
693 return -EINVAL;
694 }
695 atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth);
696 } else if (direction == DMA_MEM_TO_DEV) {
697 atchan->cfg =
698 AT91_XDMAC_DT_PERID(atchan->perid)
699 | AT_XDMAC_CC_DAM_FIXED_AM
700 | AT_XDMAC_CC_SAM_INCREMENTED_AM
701 | AT_XDMAC_CC_SWREQ_HWR_CONNECTED
702 | AT_XDMAC_CC_DSYNC_MEM2PER
703 | AT_XDMAC_CC_MBSIZE_SIXTEEN
704 | AT_XDMAC_CC_TYPE_PER_TRAN;
705 if (atxdmac->layout->sdif)
706 atchan->cfg |= AT_XDMAC_CC_DIF(atchan->perif) |
707 AT_XDMAC_CC_SIF(atchan->memif);
708
709 csize = ffs(atchan->sconfig.dst_maxburst) - 1;
710 if (csize < 0) {
711 dev_err(chan2dev(chan), "invalid src maxburst value\n");
712 return -EINVAL;
713 }
714 atchan->cfg |= AT_XDMAC_CC_CSIZE(csize);
715 dwidth = ffs(atchan->sconfig.dst_addr_width) - 1;
716 if (dwidth < 0) {
717 dev_err(chan2dev(chan), "invalid dst addr width value\n");
718 return -EINVAL;
719 }
720 atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth);
721 }
722
723 dev_dbg(chan2dev(chan), "%s: cfg=0x%08x\n", __func__, atchan->cfg);
724
725 return 0;
726}
727
728/*
729 * Only check that maxburst and addr width values are supported by
730 * the controller but not that the configuration is good to perform the
731 * transfer since we don't know the direction at this stage.
732 */
733static int at_xdmac_check_slave_config(struct dma_slave_config *sconfig)
734{
735 if ((sconfig->src_maxburst > AT_XDMAC_MAX_CSIZE)
736 || (sconfig->dst_maxburst > AT_XDMAC_MAX_CSIZE))
737 return -EINVAL;
738
739 if ((sconfig->src_addr_width > AT_XDMAC_MAX_DWIDTH)
740 || (sconfig->dst_addr_width > AT_XDMAC_MAX_DWIDTH))
741 return -EINVAL;
742
743 return 0;
744}
745
746static int at_xdmac_set_slave_config(struct dma_chan *chan,
747 struct dma_slave_config *sconfig)
748{
749 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
750
751 if (at_xdmac_check_slave_config(sconfig)) {
752 dev_err(chan2dev(chan), "invalid slave configuration\n");
753 return -EINVAL;
754 }
755
756 memcpy(&atchan->sconfig, sconfig, sizeof(atchan->sconfig));
757
758 return 0;
759}
760
761static struct dma_async_tx_descriptor *
762at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
763 unsigned int sg_len, enum dma_transfer_direction direction,
764 unsigned long flags, void *context)
765{
766 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
767 struct at_xdmac_desc *first = NULL, *prev = NULL;
768 struct scatterlist *sg;
769 int i;
770 unsigned int xfer_size = 0;
771 unsigned long irqflags;
772 struct dma_async_tx_descriptor *ret = NULL;
773
774 if (!sgl)
775 return NULL;
776
777 if (!is_slave_direction(direction)) {
778 dev_err(chan2dev(chan), "invalid DMA direction\n");
779 return NULL;
780 }
781
782 dev_dbg(chan2dev(chan), "%s: sg_len=%d, dir=%s, flags=0x%lx\n",
783 __func__, sg_len,
784 direction == DMA_MEM_TO_DEV ? "to device" : "from device",
785 flags);
786
787 /* Protect dma_sconfig field that can be modified by set_slave_conf. */
788 spin_lock_irqsave(&atchan->lock, irqflags);
789
790 if (at_xdmac_compute_chan_conf(chan, direction))
791 goto spin_unlock;
792
793 /* Prepare descriptors. */
794 for_each_sg(sgl, sg, sg_len, i) {
795 struct at_xdmac_desc *desc = NULL;
796 u32 len, mem, dwidth, fixed_dwidth;
797
798 len = sg_dma_len(sg);
799 mem = sg_dma_address(sg);
800 if (unlikely(!len)) {
801 dev_err(chan2dev(chan), "sg data length is zero\n");
802 goto spin_unlock;
803 }
804 dev_dbg(chan2dev(chan), "%s: * sg%d len=%u, mem=0x%08x\n",
805 __func__, i, len, mem);
806
807 desc = at_xdmac_get_desc(atchan);
808 if (!desc) {
809 dev_err(chan2dev(chan), "can't get descriptor\n");
810 if (first)
811 list_splice_tail_init(&first->descs_list,
812 &atchan->free_descs_list);
813 goto spin_unlock;
814 }
815
816 /* Linked list descriptor setup. */
817 if (direction == DMA_DEV_TO_MEM) {
818 desc->lld.mbr_sa = atchan->sconfig.src_addr;
819 desc->lld.mbr_da = mem;
820 } else {
821 desc->lld.mbr_sa = mem;
822 desc->lld.mbr_da = atchan->sconfig.dst_addr;
823 }
824 dwidth = at_xdmac_get_dwidth(atchan->cfg);
825 fixed_dwidth = IS_ALIGNED(len, 1 << dwidth)
826 ? dwidth
827 : AT_XDMAC_CC_DWIDTH_BYTE;
828 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2 /* next descriptor view */
829 | AT_XDMAC_MBR_UBC_NDEN /* next descriptor dst parameter update */
830 | AT_XDMAC_MBR_UBC_NSEN /* next descriptor src parameter update */
831 | (len >> fixed_dwidth); /* microblock length */
832 desc->lld.mbr_cfg = (atchan->cfg & ~AT_XDMAC_CC_DWIDTH_MASK) |
833 AT_XDMAC_CC_DWIDTH(fixed_dwidth);
834 dev_dbg(chan2dev(chan),
835 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
836 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc);
837
838 /* Chain lld. */
839 if (prev)
840 at_xdmac_queue_desc(chan, prev, desc);
841
842 prev = desc;
843 if (!first)
844 first = desc;
845
846 dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
847 __func__, desc, first);
848 list_add_tail(&desc->desc_node, &first->descs_list);
849 xfer_size += len;
850 }
851
852
853 first->tx_dma_desc.flags = flags;
854 first->xfer_size = xfer_size;
855 first->direction = direction;
856 ret = &first->tx_dma_desc;
857
858spin_unlock:
859 spin_unlock_irqrestore(&atchan->lock, irqflags);
860 return ret;
861}
862
863static struct dma_async_tx_descriptor *
864at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
865 size_t buf_len, size_t period_len,
866 enum dma_transfer_direction direction,
867 unsigned long flags)
868{
869 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
870 struct at_xdmac_desc *first = NULL, *prev = NULL;
871 unsigned int periods = buf_len / period_len;
872 int i;
873 unsigned long irqflags;
874
875 dev_dbg(chan2dev(chan), "%s: buf_addr=%pad, buf_len=%zd, period_len=%zd, dir=%s, flags=0x%lx\n",
876 __func__, &buf_addr, buf_len, period_len,
877 direction == DMA_MEM_TO_DEV ? "mem2per" : "per2mem", flags);
878
879 if (!is_slave_direction(direction)) {
880 dev_err(chan2dev(chan), "invalid DMA direction\n");
881 return NULL;
882 }
883
884 if (test_and_set_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status)) {
885 dev_err(chan2dev(chan), "channel currently used\n");
886 return NULL;
887 }
888
889 if (at_xdmac_compute_chan_conf(chan, direction))
890 return NULL;
891
892 for (i = 0; i < periods; i++) {
893 struct at_xdmac_desc *desc = NULL;
894
895 spin_lock_irqsave(&atchan->lock, irqflags);
896 desc = at_xdmac_get_desc(atchan);
897 if (!desc) {
898 dev_err(chan2dev(chan), "can't get descriptor\n");
899 if (first)
900 list_splice_tail_init(&first->descs_list,
901 &atchan->free_descs_list);
902 spin_unlock_irqrestore(&atchan->lock, irqflags);
903 return NULL;
904 }
905 spin_unlock_irqrestore(&atchan->lock, irqflags);
906 dev_dbg(chan2dev(chan),
907 "%s: desc=0x%p, tx_dma_desc.phys=%pad\n",
908 __func__, desc, &desc->tx_dma_desc.phys);
909
910 if (direction == DMA_DEV_TO_MEM) {
911 desc->lld.mbr_sa = atchan->sconfig.src_addr;
912 desc->lld.mbr_da = buf_addr + i * period_len;
913 } else {
914 desc->lld.mbr_sa = buf_addr + i * period_len;
915 desc->lld.mbr_da = atchan->sconfig.dst_addr;
916 }
917 desc->lld.mbr_cfg = atchan->cfg;
918 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1
919 | AT_XDMAC_MBR_UBC_NDEN
920 | AT_XDMAC_MBR_UBC_NSEN
921 | period_len >> at_xdmac_get_dwidth(desc->lld.mbr_cfg);
922
923 dev_dbg(chan2dev(chan),
924 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
925 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc);
926
927 /* Chain lld. */
928 if (prev)
929 at_xdmac_queue_desc(chan, prev, desc);
930
931 prev = desc;
932 if (!first)
933 first = desc;
934
935 dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
936 __func__, desc, first);
937 list_add_tail(&desc->desc_node, &first->descs_list);
938 }
939
940 at_xdmac_queue_desc(chan, prev, first);
941 first->tx_dma_desc.flags = flags;
942 first->xfer_size = buf_len;
943 first->direction = direction;
944
945 return &first->tx_dma_desc;
946}
947
948static inline u32 at_xdmac_align_width(struct dma_chan *chan, dma_addr_t addr)
949{
950 u32 width;
951
952 /*
953 * Check address alignment to select the greater data width we
954 * can use.
955 *
956 * Some XDMAC implementations don't provide dword transfer, in
957 * this case selecting dword has the same behavior as
958 * selecting word transfers.
959 */
960 if (!(addr & 7)) {
961 width = AT_XDMAC_CC_DWIDTH_DWORD;
962 dev_dbg(chan2dev(chan), "%s: dwidth: double word\n", __func__);
963 } else if (!(addr & 3)) {
964 width = AT_XDMAC_CC_DWIDTH_WORD;
965 dev_dbg(chan2dev(chan), "%s: dwidth: word\n", __func__);
966 } else if (!(addr & 1)) {
967 width = AT_XDMAC_CC_DWIDTH_HALFWORD;
968 dev_dbg(chan2dev(chan), "%s: dwidth: half word\n", __func__);
969 } else {
970 width = AT_XDMAC_CC_DWIDTH_BYTE;
971 dev_dbg(chan2dev(chan), "%s: dwidth: byte\n", __func__);
972 }
973
974 return width;
975}
976
977static struct at_xdmac_desc *
978at_xdmac_interleaved_queue_desc(struct dma_chan *chan,
979 struct at_xdmac_chan *atchan,
980 struct at_xdmac_desc *prev,
981 dma_addr_t src, dma_addr_t dst,
982 struct dma_interleaved_template *xt,
983 struct data_chunk *chunk)
984{
985 struct at_xdmac_desc *desc;
986 u32 dwidth;
987 unsigned long flags;
988 size_t ublen;
989 /*
990 * WARNING: The channel configuration is set here since there is no
991 * dmaengine_slave_config call in this case. Moreover we don't know the
992 * direction, it involves we can't dynamically set the source and dest
993 * interface so we have to use the same one. Only interface 0 allows EBI
994 * access. Hopefully we can access DDR through both ports (at least on
995 * SAMA5D4x), so we can use the same interface for source and dest,
996 * that solves the fact we don't know the direction.
997 * ERRATA: Even if useless for memory transfers, the PERID has to not
998 * match the one of another channel. If not, it could lead to spurious
999 * flag status.
1000 * For SAMA7G5x case, the SIF and DIF fields are no longer used.
1001 * Thus, no need to have the SIF/DIF interfaces here.
1002 * For SAMA5D4x and SAMA5D2x the SIF and DIF are already configured as
1003 * zero.
1004 */
1005 u32 chan_cc = AT_XDMAC_CC_PERID(0x7f)
1006 | AT_XDMAC_CC_MBSIZE_SIXTEEN
1007 | AT_XDMAC_CC_TYPE_MEM_TRAN;
1008
1009 dwidth = at_xdmac_align_width(chan, src | dst | chunk->size);
1010 if (chunk->size >= (AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)) {
1011 dev_dbg(chan2dev(chan),
1012 "%s: chunk too big (%zu, max size %lu)...\n",
1013 __func__, chunk->size,
1014 AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth);
1015 return NULL;
1016 }
1017
1018 if (prev)
1019 dev_dbg(chan2dev(chan),
1020 "Adding items at the end of desc 0x%p\n", prev);
1021
1022 if (xt->src_inc) {
1023 if (xt->src_sgl)
1024 chan_cc |= AT_XDMAC_CC_SAM_UBS_AM;
1025 else
1026 chan_cc |= AT_XDMAC_CC_SAM_INCREMENTED_AM;
1027 }
1028
1029 if (xt->dst_inc) {
1030 if (xt->dst_sgl)
1031 chan_cc |= AT_XDMAC_CC_DAM_UBS_AM;
1032 else
1033 chan_cc |= AT_XDMAC_CC_DAM_INCREMENTED_AM;
1034 }
1035
1036 spin_lock_irqsave(&atchan->lock, flags);
1037 desc = at_xdmac_get_desc(atchan);
1038 spin_unlock_irqrestore(&atchan->lock, flags);
1039 if (!desc) {
1040 dev_err(chan2dev(chan), "can't get descriptor\n");
1041 return NULL;
1042 }
1043
1044 chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);
1045
1046 ublen = chunk->size >> dwidth;
1047
1048 desc->lld.mbr_sa = src;
1049 desc->lld.mbr_da = dst;
1050 desc->lld.mbr_sus = dmaengine_get_src_icg(xt, chunk);
1051 desc->lld.mbr_dus = dmaengine_get_dst_icg(xt, chunk);
1052
1053 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV3
1054 | AT_XDMAC_MBR_UBC_NDEN
1055 | AT_XDMAC_MBR_UBC_NSEN
1056 | ublen;
1057 desc->lld.mbr_cfg = chan_cc;
1058
1059 dev_dbg(chan2dev(chan),
1060 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
1061 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da,
1062 desc->lld.mbr_ubc, desc->lld.mbr_cfg);
1063
1064 /* Chain lld. */
1065 if (prev)
1066 at_xdmac_queue_desc(chan, prev, desc);
1067
1068 return desc;
1069}
1070
1071static struct dma_async_tx_descriptor *
1072at_xdmac_prep_interleaved(struct dma_chan *chan,
1073 struct dma_interleaved_template *xt,
1074 unsigned long flags)
1075{
1076 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1077 struct at_xdmac_desc *prev = NULL, *first = NULL;
1078 dma_addr_t dst_addr, src_addr;
1079 size_t src_skip = 0, dst_skip = 0, len = 0;
1080 struct data_chunk *chunk;
1081 int i;
1082
1083 if (!xt || !xt->numf || (xt->dir != DMA_MEM_TO_MEM))
1084 return NULL;
1085
1086 /*
1087 * TODO: Handle the case where we have to repeat a chain of
1088 * descriptors...
1089 */
1090 if ((xt->numf > 1) && (xt->frame_size > 1))
1091 return NULL;
1092
1093 dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, numf=%zu, frame_size=%zu, flags=0x%lx\n",
1094 __func__, &xt->src_start, &xt->dst_start, xt->numf,
1095 xt->frame_size, flags);
1096
1097 src_addr = xt->src_start;
1098 dst_addr = xt->dst_start;
1099
1100 if (xt->numf > 1) {
1101 first = at_xdmac_interleaved_queue_desc(chan, atchan,
1102 NULL,
1103 src_addr, dst_addr,
1104 xt, xt->sgl);
1105
1106 /* Length of the block is (BLEN+1) microblocks. */
1107 for (i = 0; i < xt->numf - 1; i++)
1108 at_xdmac_increment_block_count(chan, first);
1109
1110 dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
1111 __func__, first, first);
1112 list_add_tail(&first->desc_node, &first->descs_list);
1113 } else {
1114 for (i = 0; i < xt->frame_size; i++) {
1115 size_t src_icg = 0, dst_icg = 0;
1116 struct at_xdmac_desc *desc;
1117
1118 chunk = xt->sgl + i;
1119
1120 dst_icg = dmaengine_get_dst_icg(xt, chunk);
1121 src_icg = dmaengine_get_src_icg(xt, chunk);
1122
1123 src_skip = chunk->size + src_icg;
1124 dst_skip = chunk->size + dst_icg;
1125
1126 dev_dbg(chan2dev(chan),
1127 "%s: chunk size=%zu, src icg=%zu, dst icg=%zu\n",
1128 __func__, chunk->size, src_icg, dst_icg);
1129
1130 desc = at_xdmac_interleaved_queue_desc(chan, atchan,
1131 prev,
1132 src_addr, dst_addr,
1133 xt, chunk);
1134 if (!desc) {
1135 list_splice_tail_init(&first->descs_list,
1136 &atchan->free_descs_list);
1137 return NULL;
1138 }
1139
1140 if (!first)
1141 first = desc;
1142
1143 dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
1144 __func__, desc, first);
1145 list_add_tail(&desc->desc_node, &first->descs_list);
1146
1147 if (xt->src_sgl)
1148 src_addr += src_skip;
1149
1150 if (xt->dst_sgl)
1151 dst_addr += dst_skip;
1152
1153 len += chunk->size;
1154 prev = desc;
1155 }
1156 }
1157
1158 first->tx_dma_desc.cookie = -EBUSY;
1159 first->tx_dma_desc.flags = flags;
1160 first->xfer_size = len;
1161
1162 return &first->tx_dma_desc;
1163}
1164
1165static struct dma_async_tx_descriptor *
1166at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
1167 size_t len, unsigned long flags)
1168{
1169 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1170 struct at_xdmac_desc *first = NULL, *prev = NULL;
1171 size_t remaining_size = len, xfer_size = 0, ublen;
1172 dma_addr_t src_addr = src, dst_addr = dest;
1173 u32 dwidth;
1174 /*
1175 * WARNING: We don't know the direction, it involves we can't
1176 * dynamically set the source and dest interface so we have to use the
1177 * same one. Only interface 0 allows EBI access. Hopefully we can
1178 * access DDR through both ports (at least on SAMA5D4x), so we can use
1179 * the same interface for source and dest, that solves the fact we
1180 * don't know the direction.
1181 * ERRATA: Even if useless for memory transfers, the PERID has to not
1182 * match the one of another channel. If not, it could lead to spurious
1183 * flag status.
1184 * For SAMA7G5x case, the SIF and DIF fields are no longer used.
1185 * Thus, no need to have the SIF/DIF interfaces here.
1186 * For SAMA5D4x and SAMA5D2x the SIF and DIF are already configured as
1187 * zero.
1188 */
1189 u32 chan_cc = AT_XDMAC_CC_PERID(0x7f)
1190 | AT_XDMAC_CC_DAM_INCREMENTED_AM
1191 | AT_XDMAC_CC_SAM_INCREMENTED_AM
1192 | AT_XDMAC_CC_MBSIZE_SIXTEEN
1193 | AT_XDMAC_CC_TYPE_MEM_TRAN;
1194 unsigned long irqflags;
1195
1196 dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, len=%zd, flags=0x%lx\n",
1197 __func__, &src, &dest, len, flags);
1198
1199 if (unlikely(!len))
1200 return NULL;
1201
1202 dwidth = at_xdmac_align_width(chan, src_addr | dst_addr);
1203
1204 /* Prepare descriptors. */
1205 while (remaining_size) {
1206 struct at_xdmac_desc *desc = NULL;
1207
1208 dev_dbg(chan2dev(chan), "%s: remaining_size=%zu\n", __func__, remaining_size);
1209
1210 spin_lock_irqsave(&atchan->lock, irqflags);
1211 desc = at_xdmac_get_desc(atchan);
1212 spin_unlock_irqrestore(&atchan->lock, irqflags);
1213 if (!desc) {
1214 dev_err(chan2dev(chan), "can't get descriptor\n");
1215 if (first)
1216 list_splice_tail_init(&first->descs_list,
1217 &atchan->free_descs_list);
1218 return NULL;
1219 }
1220
1221 /* Update src and dest addresses. */
1222 src_addr += xfer_size;
1223 dst_addr += xfer_size;
1224
1225 if (remaining_size >= AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)
1226 xfer_size = AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth;
1227 else
1228 xfer_size = remaining_size;
1229
1230 dev_dbg(chan2dev(chan), "%s: xfer_size=%zu\n", __func__, xfer_size);
1231
1232 /* Check remaining length and change data width if needed. */
1233 dwidth = at_xdmac_align_width(chan,
1234 src_addr | dst_addr | xfer_size);
1235 chan_cc &= ~AT_XDMAC_CC_DWIDTH_MASK;
1236 chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);
1237
1238 ublen = xfer_size >> dwidth;
1239 remaining_size -= xfer_size;
1240
1241 desc->lld.mbr_sa = src_addr;
1242 desc->lld.mbr_da = dst_addr;
1243 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2
1244 | AT_XDMAC_MBR_UBC_NDEN
1245 | AT_XDMAC_MBR_UBC_NSEN
1246 | ublen;
1247 desc->lld.mbr_cfg = chan_cc;
1248
1249 dev_dbg(chan2dev(chan),
1250 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
1251 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc, desc->lld.mbr_cfg);
1252
1253 /* Chain lld. */
1254 if (prev)
1255 at_xdmac_queue_desc(chan, prev, desc);
1256
1257 prev = desc;
1258 if (!first)
1259 first = desc;
1260
1261 dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
1262 __func__, desc, first);
1263 list_add_tail(&desc->desc_node, &first->descs_list);
1264 }
1265
1266 first->tx_dma_desc.flags = flags;
1267 first->xfer_size = len;
1268
1269 return &first->tx_dma_desc;
1270}
1271
1272static struct at_xdmac_desc *at_xdmac_memset_create_desc(struct dma_chan *chan,
1273 struct at_xdmac_chan *atchan,
1274 dma_addr_t dst_addr,
1275 size_t len,
1276 int value)
1277{
1278 struct at_xdmac_desc *desc;
1279 unsigned long flags;
1280 size_t ublen;
1281 u32 dwidth;
1282 char pattern;
1283 /*
1284 * WARNING: The channel configuration is set here since there is no
1285 * dmaengine_slave_config call in this case. Moreover we don't know the
1286 * direction, it involves we can't dynamically set the source and dest
1287 * interface so we have to use the same one. Only interface 0 allows EBI
1288 * access. Hopefully we can access DDR through both ports (at least on
1289 * SAMA5D4x), so we can use the same interface for source and dest,
1290 * that solves the fact we don't know the direction.
1291 * ERRATA: Even if useless for memory transfers, the PERID has to not
1292 * match the one of another channel. If not, it could lead to spurious
1293 * flag status.
1294 * For SAMA7G5x case, the SIF and DIF fields are no longer used.
1295 * Thus, no need to have the SIF/DIF interfaces here.
1296 * For SAMA5D4x and SAMA5D2x the SIF and DIF are already configured as
1297 * zero.
1298 */
1299 u32 chan_cc = AT_XDMAC_CC_PERID(0x7f)
1300 | AT_XDMAC_CC_DAM_UBS_AM
1301 | AT_XDMAC_CC_SAM_INCREMENTED_AM
1302 | AT_XDMAC_CC_MBSIZE_SIXTEEN
1303 | AT_XDMAC_CC_MEMSET_HW_MODE
1304 | AT_XDMAC_CC_TYPE_MEM_TRAN;
1305
1306 dwidth = at_xdmac_align_width(chan, dst_addr);
1307
1308 if (len >= (AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)) {
1309 dev_err(chan2dev(chan),
1310 "%s: Transfer too large, aborting...\n",
1311 __func__);
1312 return NULL;
1313 }
1314
1315 spin_lock_irqsave(&atchan->lock, flags);
1316 desc = at_xdmac_get_desc(atchan);
1317 spin_unlock_irqrestore(&atchan->lock, flags);
1318 if (!desc) {
1319 dev_err(chan2dev(chan), "can't get descriptor\n");
1320 return NULL;
1321 }
1322
1323 chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);
1324
1325 /* Only the first byte of value is to be used according to dmaengine */
1326 pattern = (char)value;
1327
1328 ublen = len >> dwidth;
1329
1330 desc->lld.mbr_da = dst_addr;
1331 desc->lld.mbr_ds = (pattern << 24) |
1332 (pattern << 16) |
1333 (pattern << 8) |
1334 pattern;
1335 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV3
1336 | AT_XDMAC_MBR_UBC_NDEN
1337 | AT_XDMAC_MBR_UBC_NSEN
1338 | ublen;
1339 desc->lld.mbr_cfg = chan_cc;
1340
1341 dev_dbg(chan2dev(chan),
1342 "%s: lld: mbr_da=%pad, mbr_ds=0x%08x, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
1343 __func__, &desc->lld.mbr_da, desc->lld.mbr_ds, desc->lld.mbr_ubc,
1344 desc->lld.mbr_cfg);
1345
1346 return desc;
1347}
1348
1349static struct dma_async_tx_descriptor *
1350at_xdmac_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
1351 size_t len, unsigned long flags)
1352{
1353 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1354 struct at_xdmac_desc *desc;
1355
1356 dev_dbg(chan2dev(chan), "%s: dest=%pad, len=%zu, pattern=0x%x, flags=0x%lx\n",
1357 __func__, &dest, len, value, flags);
1358
1359 if (unlikely(!len))
1360 return NULL;
1361
1362 desc = at_xdmac_memset_create_desc(chan, atchan, dest, len, value);
1363 list_add_tail(&desc->desc_node, &desc->descs_list);
1364
1365 desc->tx_dma_desc.cookie = -EBUSY;
1366 desc->tx_dma_desc.flags = flags;
1367 desc->xfer_size = len;
1368
1369 return &desc->tx_dma_desc;
1370}
1371
1372static struct dma_async_tx_descriptor *
1373at_xdmac_prep_dma_memset_sg(struct dma_chan *chan, struct scatterlist *sgl,
1374 unsigned int sg_len, int value,
1375 unsigned long flags)
1376{
1377 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1378 struct at_xdmac_desc *desc, *pdesc = NULL,
1379 *ppdesc = NULL, *first = NULL;
1380 struct scatterlist *sg, *psg = NULL, *ppsg = NULL;
1381 size_t stride = 0, pstride = 0, len = 0;
1382 int i;
1383
1384 if (!sgl)
1385 return NULL;
1386
1387 dev_dbg(chan2dev(chan), "%s: sg_len=%d, value=0x%x, flags=0x%lx\n",
1388 __func__, sg_len, value, flags);
1389
1390 /* Prepare descriptors. */
1391 for_each_sg(sgl, sg, sg_len, i) {
1392 dev_dbg(chan2dev(chan), "%s: dest=%pad, len=%d, pattern=0x%x, flags=0x%lx\n",
1393 __func__, &sg_dma_address(sg), sg_dma_len(sg),
1394 value, flags);
1395 desc = at_xdmac_memset_create_desc(chan, atchan,
1396 sg_dma_address(sg),
1397 sg_dma_len(sg),
1398 value);
1399 if (!desc && first)
1400 list_splice_tail_init(&first->descs_list,
1401 &atchan->free_descs_list);
1402
1403 if (!first)
1404 first = desc;
1405
1406 /* Update our strides */
1407 pstride = stride;
1408 if (psg)
1409 stride = sg_dma_address(sg) -
1410 (sg_dma_address(psg) + sg_dma_len(psg));
1411
1412 /*
1413 * The scatterlist API gives us only the address and
1414 * length of each elements.
1415 *
1416 * Unfortunately, we don't have the stride, which we
1417 * will need to compute.
1418 *
1419 * That make us end up in a situation like this one:
1420 * len stride len stride len
1421 * +-------+ +-------+ +-------+
1422 * | N-2 | | N-1 | | N |
1423 * +-------+ +-------+ +-------+
1424 *
1425 * We need all these three elements (N-2, N-1 and N)
1426 * to actually take the decision on whether we need to
1427 * queue N-1 or reuse N-2.
1428 *
1429 * We will only consider N if it is the last element.
1430 */
1431 if (ppdesc && pdesc) {
1432 if ((stride == pstride) &&
1433 (sg_dma_len(ppsg) == sg_dma_len(psg))) {
1434 dev_dbg(chan2dev(chan),
1435 "%s: desc 0x%p can be merged with desc 0x%p\n",
1436 __func__, pdesc, ppdesc);
1437
1438 /*
1439 * Increment the block count of the
1440 * N-2 descriptor
1441 */
1442 at_xdmac_increment_block_count(chan, ppdesc);
1443 ppdesc->lld.mbr_dus = stride;
1444
1445 /*
1446 * Put back the N-1 descriptor in the
1447 * free descriptor list
1448 */
1449 list_add_tail(&pdesc->desc_node,
1450 &atchan->free_descs_list);
1451
1452 /*
1453 * Make our N-1 descriptor pointer
1454 * point to the N-2 since they were
1455 * actually merged.
1456 */
1457 pdesc = ppdesc;
1458
1459 /*
1460 * Rule out the case where we don't have
1461 * pstride computed yet (our second sg
1462 * element)
1463 *
1464 * We also want to catch the case where there
1465 * would be a negative stride,
1466 */
1467 } else if (pstride ||
1468 sg_dma_address(sg) < sg_dma_address(psg)) {
1469 /*
1470 * Queue the N-1 descriptor after the
1471 * N-2
1472 */
1473 at_xdmac_queue_desc(chan, ppdesc, pdesc);
1474
1475 /*
1476 * Add the N-1 descriptor to the list
1477 * of the descriptors used for this
1478 * transfer
1479 */
1480 list_add_tail(&desc->desc_node,
1481 &first->descs_list);
1482 dev_dbg(chan2dev(chan),
1483 "%s: add desc 0x%p to descs_list 0x%p\n",
1484 __func__, desc, first);
1485 }
1486 }
1487
1488 /*
1489 * If we are the last element, just see if we have the
1490 * same size than the previous element.
1491 *
1492 * If so, we can merge it with the previous descriptor
1493 * since we don't care about the stride anymore.
1494 */
1495 if ((i == (sg_len - 1)) &&
1496 sg_dma_len(psg) == sg_dma_len(sg)) {
1497 dev_dbg(chan2dev(chan),
1498 "%s: desc 0x%p can be merged with desc 0x%p\n",
1499 __func__, desc, pdesc);
1500
1501 /*
1502 * Increment the block count of the N-1
1503 * descriptor
1504 */
1505 at_xdmac_increment_block_count(chan, pdesc);
1506 pdesc->lld.mbr_dus = stride;
1507
1508 /*
1509 * Put back the N descriptor in the free
1510 * descriptor list
1511 */
1512 list_add_tail(&desc->desc_node,
1513 &atchan->free_descs_list);
1514 }
1515
1516 /* Update our descriptors */
1517 ppdesc = pdesc;
1518 pdesc = desc;
1519
1520 /* Update our scatter pointers */
1521 ppsg = psg;
1522 psg = sg;
1523
1524 len += sg_dma_len(sg);
1525 }
1526
1527 first->tx_dma_desc.cookie = -EBUSY;
1528 first->tx_dma_desc.flags = flags;
1529 first->xfer_size = len;
1530
1531 return &first->tx_dma_desc;
1532}
1533
1534static enum dma_status
1535at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
1536 struct dma_tx_state *txstate)
1537{
1538 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1539 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1540 struct at_xdmac_desc *desc, *_desc, *iter;
1541 struct list_head *descs_list;
1542 enum dma_status ret;
1543 int residue, retry, pm_status;
1544 u32 cur_nda, check_nda, cur_ubc, mask, value;
1545 u8 dwidth = 0;
1546 unsigned long flags;
1547 bool initd;
1548
1549 ret = dma_cookie_status(chan, cookie, txstate);
1550 if (ret == DMA_COMPLETE || !txstate)
1551 return ret;
1552
1553 pm_status = pm_runtime_resume_and_get(atxdmac->dev);
1554 if (pm_status < 0)
1555 return DMA_ERROR;
1556
1557 spin_lock_irqsave(&atchan->lock, flags);
1558
1559 desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node);
1560
1561 /*
1562 * If the transfer has not been started yet, don't need to compute the
1563 * residue, it's the transfer length.
1564 */
1565 if (!desc->active_xfer) {
1566 dma_set_residue(txstate, desc->xfer_size);
1567 goto spin_unlock;
1568 }
1569
1570 residue = desc->xfer_size;
1571 /*
1572 * Flush FIFO: only relevant when the transfer is source peripheral
1573 * synchronized. Flush is needed before reading CUBC because data in
1574 * the FIFO are not reported by CUBC. Reporting a residue of the
1575 * transfer length while we have data in FIFO can cause issue.
1576 * Usecase: atmel USART has a timeout which means I have received
1577 * characters but there is no more character received for a while. On
1578 * timeout, it requests the residue. If the data are in the DMA FIFO,
1579 * we will return a residue of the transfer length. It means no data
1580 * received. If an application is waiting for these data, it will hang
1581 * since we won't have another USART timeout without receiving new
1582 * data.
1583 */
1584 mask = AT_XDMAC_CC_TYPE | AT_XDMAC_CC_DSYNC;
1585 value = AT_XDMAC_CC_TYPE_PER_TRAN | AT_XDMAC_CC_DSYNC_PER2MEM;
1586 if ((desc->lld.mbr_cfg & mask) == value) {
1587 at_xdmac_write(atxdmac, atxdmac->layout->gswf, atchan->mask);
1588 while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS))
1589 cpu_relax();
1590 }
1591
1592 /*
1593 * The easiest way to compute the residue should be to pause the DMA
1594 * but doing this can lead to miss some data as some devices don't
1595 * have FIFO.
1596 * We need to read several registers because:
1597 * - DMA is running therefore a descriptor change is possible while
1598 * reading these registers
1599 * - When the block transfer is done, the value of the CUBC register
1600 * is set to its initial value until the fetch of the next descriptor.
1601 * This value will corrupt the residue calculation so we have to skip
1602 * it.
1603 *
1604 * INITD -------- ------------
1605 * |____________________|
1606 * _______________________ _______________
1607 * NDA @desc2 \/ @desc3
1608 * _______________________/\_______________
1609 * __________ ___________ _______________
1610 * CUBC 0 \/ MAX desc1 \/ MAX desc2
1611 * __________/\___________/\_______________
1612 *
1613 * Since descriptors are aligned on 64 bits, we can assume that
1614 * the update of NDA and CUBC is atomic.
1615 * Memory barriers are used to ensure the read order of the registers.
1616 * A max number of retries is set because unlikely it could never ends.
1617 */
1618 for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) {
1619 check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
1620 rmb();
1621 cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
1622 rmb();
1623 initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & AT_XDMAC_CC_INITD);
1624 rmb();
1625 cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
1626 rmb();
1627
1628 if ((check_nda == cur_nda) && initd)
1629 break;
1630 }
1631
1632 if (unlikely(retry >= AT_XDMAC_RESIDUE_MAX_RETRIES)) {
1633 ret = DMA_ERROR;
1634 goto spin_unlock;
1635 }
1636
1637 /*
1638 * Flush FIFO: only relevant when the transfer is source peripheral
1639 * synchronized. Another flush is needed here because CUBC is updated
1640 * when the controller sends the data write command. It can lead to
1641 * report data that are not written in the memory or the device. The
1642 * FIFO flush ensures that data are really written.
1643 */
1644 if ((desc->lld.mbr_cfg & mask) == value) {
1645 at_xdmac_write(atxdmac, atxdmac->layout->gswf, atchan->mask);
1646 while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS))
1647 cpu_relax();
1648 }
1649
1650 /*
1651 * Remove size of all microblocks already transferred and the current
1652 * one. Then add the remaining size to transfer of the current
1653 * microblock.
1654 */
1655 descs_list = &desc->descs_list;
1656 list_for_each_entry_safe(iter, _desc, descs_list, desc_node) {
1657 dwidth = at_xdmac_get_dwidth(iter->lld.mbr_cfg);
1658 residue -= (iter->lld.mbr_ubc & 0xffffff) << dwidth;
1659 if ((iter->lld.mbr_nda & 0xfffffffc) == cur_nda) {
1660 desc = iter;
1661 break;
1662 }
1663 }
1664 residue += cur_ubc << dwidth;
1665
1666 dma_set_residue(txstate, residue);
1667
1668 dev_dbg(chan2dev(chan),
1669 "%s: desc=0x%p, tx_dma_desc.phys=%pad, tx_status=%d, cookie=%d, residue=%d\n",
1670 __func__, desc, &desc->tx_dma_desc.phys, ret, cookie, residue);
1671
1672spin_unlock:
1673 spin_unlock_irqrestore(&atchan->lock, flags);
1674 pm_runtime_mark_last_busy(atxdmac->dev);
1675 pm_runtime_put_autosuspend(atxdmac->dev);
1676 return ret;
1677}
1678
1679static void at_xdmac_advance_work(struct at_xdmac_chan *atchan)
1680{
1681 struct at_xdmac_desc *desc;
1682
1683 /*
1684 * If channel is enabled, do nothing, advance_work will be triggered
1685 * after the interruption.
1686 */
1687 if (at_xdmac_chan_is_enabled(atchan) || list_empty(&atchan->xfers_list))
1688 return;
1689
1690 desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc,
1691 xfer_node);
1692 dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
1693 if (!desc->active_xfer)
1694 at_xdmac_start_xfer(atchan, desc);
1695}
1696
1697static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan)
1698{
1699 struct at_xdmac_desc *desc;
1700 struct dma_async_tx_descriptor *txd;
1701
1702 spin_lock_irq(&atchan->lock);
1703 dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08x\n",
1704 __func__, atchan->irq_status);
1705 if (list_empty(&atchan->xfers_list)) {
1706 spin_unlock_irq(&atchan->lock);
1707 return;
1708 }
1709 desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc,
1710 xfer_node);
1711 spin_unlock_irq(&atchan->lock);
1712 txd = &desc->tx_dma_desc;
1713 if (txd->flags & DMA_PREP_INTERRUPT)
1714 dmaengine_desc_get_callback_invoke(txd, NULL);
1715}
1716
1717/* Called with atchan->lock held. */
1718static void at_xdmac_handle_error(struct at_xdmac_chan *atchan)
1719{
1720 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1721 struct at_xdmac_desc *bad_desc;
1722 int ret;
1723
1724 ret = pm_runtime_resume_and_get(atxdmac->dev);
1725 if (ret < 0)
1726 return;
1727
1728 /*
1729 * The descriptor currently at the head of the active list is
1730 * broken. Since we don't have any way to report errors, we'll
1731 * just have to scream loudly and try to continue with other
1732 * descriptors queued (if any).
1733 */
1734 if (atchan->irq_status & AT_XDMAC_CIS_RBEIS)
1735 dev_err(chan2dev(&atchan->chan), "read bus error!!!");
1736 if (atchan->irq_status & AT_XDMAC_CIS_WBEIS)
1737 dev_err(chan2dev(&atchan->chan), "write bus error!!!");
1738 if (atchan->irq_status & AT_XDMAC_CIS_ROIS)
1739 dev_err(chan2dev(&atchan->chan), "request overflow error!!!");
1740
1741 /* Channel must be disabled first as it's not done automatically */
1742 at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
1743 while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask)
1744 cpu_relax();
1745
1746 bad_desc = list_first_entry(&atchan->xfers_list,
1747 struct at_xdmac_desc,
1748 xfer_node);
1749
1750 /* Print bad descriptor's details if needed */
1751 dev_dbg(chan2dev(&atchan->chan),
1752 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
1753 __func__, &bad_desc->lld.mbr_sa, &bad_desc->lld.mbr_da,
1754 bad_desc->lld.mbr_ubc);
1755
1756 pm_runtime_mark_last_busy(atxdmac->dev);
1757 pm_runtime_put_autosuspend(atxdmac->dev);
1758
1759 /* Then continue with usual descriptor management */
1760}
1761
1762static void at_xdmac_tasklet(struct tasklet_struct *t)
1763{
1764 struct at_xdmac_chan *atchan = from_tasklet(atchan, t, tasklet);
1765 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1766 struct at_xdmac_desc *desc;
1767 struct dma_async_tx_descriptor *txd;
1768 u32 error_mask;
1769
1770 if (at_xdmac_chan_is_cyclic(atchan))
1771 return at_xdmac_handle_cyclic(atchan);
1772
1773 error_mask = AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS |
1774 AT_XDMAC_CIS_ROIS;
1775
1776 spin_lock_irq(&atchan->lock);
1777
1778 dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08x\n",
1779 __func__, atchan->irq_status);
1780
1781 if (!(atchan->irq_status & AT_XDMAC_CIS_LIS) &&
1782 !(atchan->irq_status & error_mask)) {
1783 spin_unlock_irq(&atchan->lock);
1784 return;
1785 }
1786
1787 if (atchan->irq_status & error_mask)
1788 at_xdmac_handle_error(atchan);
1789
1790 desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc,
1791 xfer_node);
1792 dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
1793 if (!desc->active_xfer) {
1794 dev_err(chan2dev(&atchan->chan), "Xfer not active: exiting");
1795 spin_unlock_irq(&atchan->lock);
1796 return;
1797 }
1798
1799 txd = &desc->tx_dma_desc;
1800 dma_cookie_complete(txd);
1801 /* Remove the transfer from the transfer list. */
1802 list_del(&desc->xfer_node);
1803 spin_unlock_irq(&atchan->lock);
1804
1805 if (txd->flags & DMA_PREP_INTERRUPT)
1806 dmaengine_desc_get_callback_invoke(txd, NULL);
1807
1808 dma_run_dependencies(txd);
1809
1810 spin_lock_irq(&atchan->lock);
1811 /* Move the xfer descriptors into the free descriptors list. */
1812 list_splice_tail_init(&desc->descs_list, &atchan->free_descs_list);
1813 at_xdmac_advance_work(atchan);
1814 spin_unlock_irq(&atchan->lock);
1815
1816 /*
1817 * Decrement runtime PM ref counter incremented in
1818 * at_xdmac_start_xfer().
1819 */
1820 pm_runtime_mark_last_busy(atxdmac->dev);
1821 pm_runtime_put_autosuspend(atxdmac->dev);
1822}
1823
1824static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id)
1825{
1826 struct at_xdmac *atxdmac = (struct at_xdmac *)dev_id;
1827 struct at_xdmac_chan *atchan;
1828 u32 imr, status, pending;
1829 u32 chan_imr, chan_status;
1830 int i, ret = IRQ_NONE;
1831
1832 do {
1833 imr = at_xdmac_read(atxdmac, AT_XDMAC_GIM);
1834 status = at_xdmac_read(atxdmac, AT_XDMAC_GIS);
1835 pending = status & imr;
1836
1837 dev_vdbg(atxdmac->dma.dev,
1838 "%s: status=0x%08x, imr=0x%08x, pending=0x%08x\n",
1839 __func__, status, imr, pending);
1840
1841 if (!pending)
1842 break;
1843
1844 /* We have to find which channel has generated the interrupt. */
1845 for (i = 0; i < atxdmac->dma.chancnt; i++) {
1846 if (!((1 << i) & pending))
1847 continue;
1848
1849 atchan = &atxdmac->chan[i];
1850 chan_imr = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
1851 chan_status = at_xdmac_chan_read(atchan, AT_XDMAC_CIS);
1852 atchan->irq_status = chan_status & chan_imr;
1853 dev_vdbg(atxdmac->dma.dev,
1854 "%s: chan%d: imr=0x%x, status=0x%x\n",
1855 __func__, i, chan_imr, chan_status);
1856 dev_vdbg(chan2dev(&atchan->chan),
1857 "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n",
1858 __func__,
1859 at_xdmac_chan_read(atchan, AT_XDMAC_CC),
1860 at_xdmac_chan_read(atchan, AT_XDMAC_CNDA),
1861 at_xdmac_chan_read(atchan, AT_XDMAC_CNDC),
1862 at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
1863 at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
1864 at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
1865
1866 if (atchan->irq_status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS))
1867 at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
1868
1869 tasklet_schedule(&atchan->tasklet);
1870 ret = IRQ_HANDLED;
1871 }
1872
1873 } while (pending);
1874
1875 return ret;
1876}
1877
1878static void at_xdmac_issue_pending(struct dma_chan *chan)
1879{
1880 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1881 unsigned long flags;
1882
1883 dev_dbg(chan2dev(&atchan->chan), "%s\n", __func__);
1884
1885 spin_lock_irqsave(&atchan->lock, flags);
1886 at_xdmac_advance_work(atchan);
1887 spin_unlock_irqrestore(&atchan->lock, flags);
1888
1889 return;
1890}
1891
1892static int at_xdmac_device_config(struct dma_chan *chan,
1893 struct dma_slave_config *config)
1894{
1895 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1896 int ret;
1897 unsigned long flags;
1898
1899 dev_dbg(chan2dev(chan), "%s\n", __func__);
1900
1901 spin_lock_irqsave(&atchan->lock, flags);
1902 ret = at_xdmac_set_slave_config(chan, config);
1903 spin_unlock_irqrestore(&atchan->lock, flags);
1904
1905 return ret;
1906}
1907
1908static void at_xdmac_device_pause_set(struct at_xdmac *atxdmac,
1909 struct at_xdmac_chan *atchan)
1910{
1911 at_xdmac_write(atxdmac, atxdmac->layout->grws, atchan->mask);
1912 while (at_xdmac_chan_read(atchan, AT_XDMAC_CC) &
1913 (AT_XDMAC_CC_WRIP | AT_XDMAC_CC_RDIP))
1914 cpu_relax();
1915}
1916
1917static void at_xdmac_device_pause_internal(struct at_xdmac_chan *atchan)
1918{
1919 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1920 unsigned long flags;
1921
1922 spin_lock_irqsave(&atchan->lock, flags);
1923 set_bit(AT_XDMAC_CHAN_IS_PAUSED_INTERNAL, &atchan->status);
1924 at_xdmac_device_pause_set(atxdmac, atchan);
1925 spin_unlock_irqrestore(&atchan->lock, flags);
1926}
1927
1928static int at_xdmac_device_pause(struct dma_chan *chan)
1929{
1930 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1931 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1932 unsigned long flags;
1933 int ret;
1934
1935 dev_dbg(chan2dev(chan), "%s\n", __func__);
1936
1937 if (test_and_set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status))
1938 return 0;
1939
1940 ret = pm_runtime_resume_and_get(atxdmac->dev);
1941 if (ret < 0)
1942 return ret;
1943
1944 spin_lock_irqsave(&atchan->lock, flags);
1945
1946 at_xdmac_device_pause_set(atxdmac, atchan);
1947 /* Decrement runtime PM ref counter for each active descriptor. */
1948 at_xdmac_runtime_suspend_descriptors(atchan);
1949
1950 spin_unlock_irqrestore(&atchan->lock, flags);
1951
1952 pm_runtime_mark_last_busy(atxdmac->dev);
1953 pm_runtime_put_autosuspend(atxdmac->dev);
1954
1955 return 0;
1956}
1957
1958static void at_xdmac_device_resume_internal(struct at_xdmac_chan *atchan)
1959{
1960 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1961 unsigned long flags;
1962
1963 spin_lock_irqsave(&atchan->lock, flags);
1964 at_xdmac_write(atxdmac, atxdmac->layout->grwr, atchan->mask);
1965 clear_bit(AT_XDMAC_CHAN_IS_PAUSED_INTERNAL, &atchan->status);
1966 spin_unlock_irqrestore(&atchan->lock, flags);
1967}
1968
1969static int at_xdmac_device_resume(struct dma_chan *chan)
1970{
1971 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1972 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1973 unsigned long flags;
1974 int ret;
1975
1976 dev_dbg(chan2dev(chan), "%s\n", __func__);
1977
1978 ret = pm_runtime_resume_and_get(atxdmac->dev);
1979 if (ret < 0)
1980 return ret;
1981
1982 spin_lock_irqsave(&atchan->lock, flags);
1983 if (!at_xdmac_chan_is_paused(atchan))
1984 goto unlock;
1985
1986 /* Increment runtime PM ref counter for each active descriptor. */
1987 ret = at_xdmac_runtime_resume_descriptors(atchan);
1988 if (ret < 0)
1989 goto unlock;
1990
1991 at_xdmac_write(atxdmac, atxdmac->layout->grwr, atchan->mask);
1992 clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
1993
1994unlock:
1995 spin_unlock_irqrestore(&atchan->lock, flags);
1996 pm_runtime_mark_last_busy(atxdmac->dev);
1997 pm_runtime_put_autosuspend(atxdmac->dev);
1998
1999 return ret;
2000}
2001
2002static int at_xdmac_device_terminate_all(struct dma_chan *chan)
2003{
2004 struct at_xdmac_desc *desc, *_desc;
2005 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
2006 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
2007 unsigned long flags;
2008 int ret;
2009
2010 dev_dbg(chan2dev(chan), "%s\n", __func__);
2011
2012 ret = pm_runtime_resume_and_get(atxdmac->dev);
2013 if (ret < 0)
2014 return ret;
2015
2016 spin_lock_irqsave(&atchan->lock, flags);
2017 at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
2018 while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask)
2019 cpu_relax();
2020
2021 /* Cancel all pending transfers. */
2022 list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node) {
2023 list_del(&desc->xfer_node);
2024 list_splice_tail_init(&desc->descs_list,
2025 &atchan->free_descs_list);
2026 /*
2027 * We incremented the runtime PM reference count on
2028 * at_xdmac_start_xfer() for this descriptor. Now it's time
2029 * to release it.
2030 */
2031 if (desc->active_xfer) {
2032 pm_runtime_put_autosuspend(atxdmac->dev);
2033 pm_runtime_mark_last_busy(atxdmac->dev);
2034 }
2035 }
2036
2037 clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
2038 clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
2039 spin_unlock_irqrestore(&atchan->lock, flags);
2040
2041 pm_runtime_mark_last_busy(atxdmac->dev);
2042 pm_runtime_put_autosuspend(atxdmac->dev);
2043
2044 return 0;
2045}
2046
2047static int at_xdmac_alloc_chan_resources(struct dma_chan *chan)
2048{
2049 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
2050 struct at_xdmac_desc *desc;
2051 int i;
2052
2053 if (at_xdmac_chan_is_enabled(atchan)) {
2054 dev_err(chan2dev(chan),
2055 "can't allocate channel resources (channel enabled)\n");
2056 return -EIO;
2057 }
2058
2059 if (!list_empty(&atchan->free_descs_list)) {
2060 dev_err(chan2dev(chan),
2061 "can't allocate channel resources (channel not free from a previous use)\n");
2062 return -EIO;
2063 }
2064
2065 for (i = 0; i < init_nr_desc_per_channel; i++) {
2066 desc = at_xdmac_alloc_desc(chan, GFP_KERNEL);
2067 if (!desc) {
2068 if (i == 0) {
2069 dev_warn(chan2dev(chan),
2070 "can't allocate any descriptors\n");
2071 return -EIO;
2072 }
2073 dev_warn(chan2dev(chan),
2074 "only %d descriptors have been allocated\n", i);
2075 break;
2076 }
2077 list_add_tail(&desc->desc_node, &atchan->free_descs_list);
2078 }
2079
2080 dma_cookie_init(chan);
2081
2082 dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i);
2083
2084 return i;
2085}
2086
2087static void at_xdmac_free_chan_resources(struct dma_chan *chan)
2088{
2089 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
2090 struct at_xdmac *atxdmac = to_at_xdmac(chan->device);
2091 struct at_xdmac_desc *desc, *_desc;
2092
2093 list_for_each_entry_safe(desc, _desc, &atchan->free_descs_list, desc_node) {
2094 dev_dbg(chan2dev(chan), "%s: freeing descriptor %p\n", __func__, desc);
2095 list_del(&desc->desc_node);
2096 dma_pool_free(atxdmac->at_xdmac_desc_pool, desc, desc->tx_dma_desc.phys);
2097 }
2098
2099 return;
2100}
2101
2102static void at_xdmac_axi_config(struct platform_device *pdev)
2103{
2104 struct at_xdmac *atxdmac = (struct at_xdmac *)platform_get_drvdata(pdev);
2105 bool dev_m2m = false;
2106 u32 dma_requests;
2107
2108 if (!atxdmac->layout->axi_config)
2109 return; /* Not supported */
2110
2111 if (!of_property_read_u32(pdev->dev.of_node, "dma-requests",
2112 &dma_requests)) {
2113 dev_info(&pdev->dev, "controller in mem2mem mode.\n");
2114 dev_m2m = true;
2115 }
2116
2117 if (dev_m2m) {
2118 at_xdmac_write(atxdmac, AT_XDMAC_GCFG, AT_XDMAC_GCFG_M2M);
2119 at_xdmac_write(atxdmac, AT_XDMAC_GWAC, AT_XDMAC_GWAC_M2M);
2120 } else {
2121 at_xdmac_write(atxdmac, AT_XDMAC_GCFG, AT_XDMAC_GCFG_P2M);
2122 at_xdmac_write(atxdmac, AT_XDMAC_GWAC, AT_XDMAC_GWAC_P2M);
2123 }
2124}
2125
2126static int __maybe_unused atmel_xdmac_prepare(struct device *dev)
2127{
2128 struct at_xdmac *atxdmac = dev_get_drvdata(dev);
2129 struct dma_chan *chan, *_chan;
2130
2131 list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
2132 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
2133
2134 /* Wait for transfer completion, except in cyclic case. */
2135 if (at_xdmac_chan_is_enabled(atchan) && !at_xdmac_chan_is_cyclic(atchan))
2136 return -EAGAIN;
2137 }
2138 return 0;
2139}
2140
2141static int __maybe_unused atmel_xdmac_suspend(struct device *dev)
2142{
2143 struct at_xdmac *atxdmac = dev_get_drvdata(dev);
2144 struct dma_chan *chan, *_chan;
2145 int ret;
2146
2147 ret = pm_runtime_resume_and_get(atxdmac->dev);
2148 if (ret < 0)
2149 return ret;
2150
2151 list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
2152 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
2153
2154 atchan->save_cc = at_xdmac_chan_read(atchan, AT_XDMAC_CC);
2155 if (at_xdmac_chan_is_cyclic(atchan)) {
2156 if (!at_xdmac_chan_is_paused(atchan)) {
2157 dev_warn(chan2dev(chan), "%s: channel %d not paused\n",
2158 __func__, chan->chan_id);
2159 at_xdmac_device_pause_internal(atchan);
2160 at_xdmac_runtime_suspend_descriptors(atchan);
2161 }
2162 atchan->save_cim = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
2163 atchan->save_cnda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA);
2164 atchan->save_cndc = at_xdmac_chan_read(atchan, AT_XDMAC_CNDC);
2165 }
2166 }
2167 atxdmac->save_gim = at_xdmac_read(atxdmac, AT_XDMAC_GIM);
2168 atxdmac->save_gs = at_xdmac_read(atxdmac, AT_XDMAC_GS);
2169
2170 at_xdmac_off(atxdmac, false);
2171 pm_runtime_mark_last_busy(atxdmac->dev);
2172 pm_runtime_put_noidle(atxdmac->dev);
2173 clk_disable_unprepare(atxdmac->clk);
2174
2175 return 0;
2176}
2177
2178static int __maybe_unused atmel_xdmac_resume(struct device *dev)
2179{
2180 struct at_xdmac *atxdmac = dev_get_drvdata(dev);
2181 struct at_xdmac_chan *atchan;
2182 struct dma_chan *chan, *_chan;
2183 struct platform_device *pdev = container_of(dev, struct platform_device, dev);
2184 int i, ret;
2185
2186 ret = clk_prepare_enable(atxdmac->clk);
2187 if (ret)
2188 return ret;
2189
2190 pm_runtime_get_noresume(atxdmac->dev);
2191
2192 at_xdmac_axi_config(pdev);
2193
2194 /* Clear pending interrupts. */
2195 for (i = 0; i < atxdmac->dma.chancnt; i++) {
2196 atchan = &atxdmac->chan[i];
2197 while (at_xdmac_chan_read(atchan, AT_XDMAC_CIS))
2198 cpu_relax();
2199 }
2200
2201 at_xdmac_write(atxdmac, AT_XDMAC_GIE, atxdmac->save_gim);
2202 list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
2203 atchan = to_at_xdmac_chan(chan);
2204
2205 at_xdmac_chan_write(atchan, AT_XDMAC_CC, atchan->save_cc);
2206 if (at_xdmac_chan_is_cyclic(atchan)) {
2207 /*
2208 * Resume only channels not explicitly paused by
2209 * consumers.
2210 */
2211 if (at_xdmac_chan_is_paused_internal(atchan)) {
2212 ret = at_xdmac_runtime_resume_descriptors(atchan);
2213 if (ret < 0)
2214 return ret;
2215 at_xdmac_device_resume_internal(atchan);
2216 }
2217
2218 /*
2219 * We may resume from a deep sleep state where power
2220 * to DMA controller is cut-off. Thus, restore the
2221 * suspend state of channels set though dmaengine API.
2222 */
2223 else if (at_xdmac_chan_is_paused(atchan))
2224 at_xdmac_device_pause_set(atxdmac, atchan);
2225
2226 at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, atchan->save_cnda);
2227 at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, atchan->save_cndc);
2228 at_xdmac_chan_write(atchan, AT_XDMAC_CIE, atchan->save_cim);
2229 wmb();
2230 if (atxdmac->save_gs & atchan->mask)
2231 at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);
2232 }
2233 }
2234
2235 pm_runtime_mark_last_busy(atxdmac->dev);
2236 pm_runtime_put_autosuspend(atxdmac->dev);
2237
2238 return 0;
2239}
2240
2241static int __maybe_unused atmel_xdmac_runtime_suspend(struct device *dev)
2242{
2243 struct at_xdmac *atxdmac = dev_get_drvdata(dev);
2244
2245 clk_disable(atxdmac->clk);
2246
2247 return 0;
2248}
2249
2250static int __maybe_unused atmel_xdmac_runtime_resume(struct device *dev)
2251{
2252 struct at_xdmac *atxdmac = dev_get_drvdata(dev);
2253
2254 return clk_enable(atxdmac->clk);
2255}
2256
2257static int at_xdmac_probe(struct platform_device *pdev)
2258{
2259 struct at_xdmac *atxdmac;
2260 int irq, nr_channels, i, ret;
2261 void __iomem *base;
2262 u32 reg;
2263
2264 irq = platform_get_irq(pdev, 0);
2265 if (irq < 0)
2266 return irq;
2267
2268 base = devm_platform_ioremap_resource(pdev, 0);
2269 if (IS_ERR(base))
2270 return PTR_ERR(base);
2271
2272 /*
2273 * Read number of xdmac channels, read helper function can't be used
2274 * since atxdmac is not yet allocated and we need to know the number
2275 * of channels to do the allocation.
2276 */
2277 reg = readl_relaxed(base + AT_XDMAC_GTYPE);
2278 nr_channels = AT_XDMAC_NB_CH(reg);
2279 if (nr_channels > AT_XDMAC_MAX_CHAN) {
2280 dev_err(&pdev->dev, "invalid number of channels (%u)\n",
2281 nr_channels);
2282 return -EINVAL;
2283 }
2284
2285 atxdmac = devm_kzalloc(&pdev->dev,
2286 struct_size(atxdmac, chan, nr_channels),
2287 GFP_KERNEL);
2288 if (!atxdmac) {
2289 dev_err(&pdev->dev, "can't allocate at_xdmac structure\n");
2290 return -ENOMEM;
2291 }
2292
2293 atxdmac->regs = base;
2294 atxdmac->irq = irq;
2295 atxdmac->dev = &pdev->dev;
2296
2297 atxdmac->layout = of_device_get_match_data(&pdev->dev);
2298 if (!atxdmac->layout)
2299 return -ENODEV;
2300
2301 atxdmac->clk = devm_clk_get(&pdev->dev, "dma_clk");
2302 if (IS_ERR(atxdmac->clk)) {
2303 dev_err(&pdev->dev, "can't get dma_clk\n");
2304 return PTR_ERR(atxdmac->clk);
2305 }
2306
2307 /* Do not use dev res to prevent races with tasklet */
2308 ret = request_irq(atxdmac->irq, at_xdmac_interrupt, 0, "at_xdmac", atxdmac);
2309 if (ret) {
2310 dev_err(&pdev->dev, "can't request irq\n");
2311 return ret;
2312 }
2313
2314 ret = clk_prepare_enable(atxdmac->clk);
2315 if (ret) {
2316 dev_err(&pdev->dev, "can't prepare or enable clock\n");
2317 goto err_free_irq;
2318 }
2319
2320 atxdmac->at_xdmac_desc_pool =
2321 dmam_pool_create(dev_name(&pdev->dev), &pdev->dev,
2322 sizeof(struct at_xdmac_desc), 4, 0);
2323 if (!atxdmac->at_xdmac_desc_pool) {
2324 dev_err(&pdev->dev, "no memory for descriptors dma pool\n");
2325 ret = -ENOMEM;
2326 goto err_clk_disable;
2327 }
2328
2329 dma_cap_set(DMA_CYCLIC, atxdmac->dma.cap_mask);
2330 dma_cap_set(DMA_INTERLEAVE, atxdmac->dma.cap_mask);
2331 dma_cap_set(DMA_MEMCPY, atxdmac->dma.cap_mask);
2332 dma_cap_set(DMA_MEMSET, atxdmac->dma.cap_mask);
2333 dma_cap_set(DMA_MEMSET_SG, atxdmac->dma.cap_mask);
2334 dma_cap_set(DMA_SLAVE, atxdmac->dma.cap_mask);
2335 /*
2336 * Without DMA_PRIVATE the driver is not able to allocate more than
2337 * one channel, second allocation fails in private_candidate.
2338 */
2339 dma_cap_set(DMA_PRIVATE, atxdmac->dma.cap_mask);
2340 atxdmac->dma.dev = &pdev->dev;
2341 atxdmac->dma.device_alloc_chan_resources = at_xdmac_alloc_chan_resources;
2342 atxdmac->dma.device_free_chan_resources = at_xdmac_free_chan_resources;
2343 atxdmac->dma.device_tx_status = at_xdmac_tx_status;
2344 atxdmac->dma.device_issue_pending = at_xdmac_issue_pending;
2345 atxdmac->dma.device_prep_dma_cyclic = at_xdmac_prep_dma_cyclic;
2346 atxdmac->dma.device_prep_interleaved_dma = at_xdmac_prep_interleaved;
2347 atxdmac->dma.device_prep_dma_memcpy = at_xdmac_prep_dma_memcpy;
2348 atxdmac->dma.device_prep_dma_memset = at_xdmac_prep_dma_memset;
2349 atxdmac->dma.device_prep_dma_memset_sg = at_xdmac_prep_dma_memset_sg;
2350 atxdmac->dma.device_prep_slave_sg = at_xdmac_prep_slave_sg;
2351 atxdmac->dma.device_config = at_xdmac_device_config;
2352 atxdmac->dma.device_pause = at_xdmac_device_pause;
2353 atxdmac->dma.device_resume = at_xdmac_device_resume;
2354 atxdmac->dma.device_terminate_all = at_xdmac_device_terminate_all;
2355 atxdmac->dma.src_addr_widths = AT_XDMAC_DMA_BUSWIDTHS;
2356 atxdmac->dma.dst_addr_widths = AT_XDMAC_DMA_BUSWIDTHS;
2357 atxdmac->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
2358 atxdmac->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
2359
2360 platform_set_drvdata(pdev, atxdmac);
2361
2362 pm_runtime_set_autosuspend_delay(&pdev->dev, 500);
2363 pm_runtime_use_autosuspend(&pdev->dev);
2364 pm_runtime_set_active(&pdev->dev);
2365 pm_runtime_enable(&pdev->dev);
2366 pm_runtime_get_noresume(&pdev->dev);
2367
2368 /* Init channels. */
2369 INIT_LIST_HEAD(&atxdmac->dma.channels);
2370
2371 /* Disable all chans and interrupts. */
2372 at_xdmac_off(atxdmac, true);
2373
2374 for (i = 0; i < nr_channels; i++) {
2375 struct at_xdmac_chan *atchan = &atxdmac->chan[i];
2376
2377 atchan->chan.device = &atxdmac->dma;
2378 list_add_tail(&atchan->chan.device_node,
2379 &atxdmac->dma.channels);
2380
2381 atchan->ch_regs = at_xdmac_chan_reg_base(atxdmac, i);
2382 atchan->mask = 1 << i;
2383
2384 spin_lock_init(&atchan->lock);
2385 INIT_LIST_HEAD(&atchan->xfers_list);
2386 INIT_LIST_HEAD(&atchan->free_descs_list);
2387 tasklet_setup(&atchan->tasklet, at_xdmac_tasklet);
2388
2389 /* Clear pending interrupts. */
2390 while (at_xdmac_chan_read(atchan, AT_XDMAC_CIS))
2391 cpu_relax();
2392 }
2393
2394 ret = dma_async_device_register(&atxdmac->dma);
2395 if (ret) {
2396 dev_err(&pdev->dev, "fail to register DMA engine device\n");
2397 goto err_pm_disable;
2398 }
2399
2400 ret = of_dma_controller_register(pdev->dev.of_node,
2401 at_xdmac_xlate, atxdmac);
2402 if (ret) {
2403 dev_err(&pdev->dev, "could not register of dma controller\n");
2404 goto err_dma_unregister;
2405 }
2406
2407 dev_info(&pdev->dev, "%d channels, mapped at 0x%p\n",
2408 nr_channels, atxdmac->regs);
2409
2410 at_xdmac_axi_config(pdev);
2411
2412 pm_runtime_mark_last_busy(&pdev->dev);
2413 pm_runtime_put_autosuspend(&pdev->dev);
2414
2415 return 0;
2416
2417err_dma_unregister:
2418 dma_async_device_unregister(&atxdmac->dma);
2419err_pm_disable:
2420 pm_runtime_put_noidle(&pdev->dev);
2421 pm_runtime_disable(&pdev->dev);
2422 pm_runtime_set_suspended(&pdev->dev);
2423 pm_runtime_dont_use_autosuspend(&pdev->dev);
2424err_clk_disable:
2425 clk_disable_unprepare(atxdmac->clk);
2426err_free_irq:
2427 free_irq(atxdmac->irq, atxdmac);
2428 return ret;
2429}
2430
2431static int at_xdmac_remove(struct platform_device *pdev)
2432{
2433 struct at_xdmac *atxdmac = (struct at_xdmac *)platform_get_drvdata(pdev);
2434 int i;
2435
2436 at_xdmac_off(atxdmac, true);
2437 of_dma_controller_free(pdev->dev.of_node);
2438 dma_async_device_unregister(&atxdmac->dma);
2439 pm_runtime_disable(atxdmac->dev);
2440 pm_runtime_set_suspended(&pdev->dev);
2441 pm_runtime_dont_use_autosuspend(&pdev->dev);
2442 clk_disable_unprepare(atxdmac->clk);
2443
2444 free_irq(atxdmac->irq, atxdmac);
2445
2446 for (i = 0; i < atxdmac->dma.chancnt; i++) {
2447 struct at_xdmac_chan *atchan = &atxdmac->chan[i];
2448
2449 tasklet_kill(&atchan->tasklet);
2450 at_xdmac_free_chan_resources(&atchan->chan);
2451 }
2452
2453 return 0;
2454}
2455
2456static const struct dev_pm_ops __maybe_unused atmel_xdmac_dev_pm_ops = {
2457 .prepare = atmel_xdmac_prepare,
2458 SET_LATE_SYSTEM_SLEEP_PM_OPS(atmel_xdmac_suspend, atmel_xdmac_resume)
2459 SET_RUNTIME_PM_OPS(atmel_xdmac_runtime_suspend,
2460 atmel_xdmac_runtime_resume, NULL)
2461};
2462
2463static const struct of_device_id atmel_xdmac_dt_ids[] = {
2464 {
2465 .compatible = "atmel,sama5d4-dma",
2466 .data = &at_xdmac_sama5d4_layout,
2467 }, {
2468 .compatible = "microchip,sama7g5-dma",
2469 .data = &at_xdmac_sama7g5_layout,
2470 }, {
2471 /* sentinel */
2472 }
2473};
2474MODULE_DEVICE_TABLE(of, atmel_xdmac_dt_ids);
2475
2476static struct platform_driver at_xdmac_driver = {
2477 .probe = at_xdmac_probe,
2478 .remove = at_xdmac_remove,
2479 .driver = {
2480 .name = "at_xdmac",
2481 .of_match_table = of_match_ptr(atmel_xdmac_dt_ids),
2482 .pm = pm_ptr(&atmel_xdmac_dev_pm_ops),
2483 }
2484};
2485
2486static int __init at_xdmac_init(void)
2487{
2488 return platform_driver_register(&at_xdmac_driver);
2489}
2490subsys_initcall(at_xdmac_init);
2491
2492static void __exit at_xdmac_exit(void)
2493{
2494 platform_driver_unregister(&at_xdmac_driver);
2495}
2496module_exit(at_xdmac_exit);
2497
2498MODULE_DESCRIPTION("Atmel Extended DMA Controller driver");
2499MODULE_AUTHOR("Ludovic Desroches <ludovic.desroches@atmel.com>");
2500MODULE_LICENSE("GPL");