Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dmaengine: fsl-edma: extract common fsl-edma code (no changes in behavior intended)

This patch adds a new fsl-edma-common module to allow new
mcf-edma module code to use most of the fsl-edma code.

Signed-off-by: Angelo Dureghello <angelo@sysam.it>
Tested-by: Krzysztof Kozlowski <krzk@kernel.org>
Signed-off-by: Vinod Koul <vkoul@kernel.org>

authored by

Angelo Dureghello and committed by
Vinod Koul
9d831528 1297b647

+795 -697
+1 -1
drivers/dma/Makefile
··· 31 31 obj-$(CONFIG_DW_DMAC_CORE) += dw/ 32 32 obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o 33 33 obj-$(CONFIG_FSL_DMA) += fsldma.o 34 - obj-$(CONFIG_FSL_EDMA) += fsl-edma.o 34 + obj-$(CONFIG_FSL_EDMA) += fsl-edma.o fsl-edma-common.o 35 35 obj-$(CONFIG_FSL_RAID) += fsl_raid.o 36 36 obj-$(CONFIG_HSU_DMA) += hsu/ 37 37 obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o
+552
drivers/dma/fsl-edma-common.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 2 + // 3 + // Copyright (c) 2013-2014 Freescale Semiconductor, Inc 4 + // Copyright (c) 2017 Sysam, Angelo Dureghello <angelo@sysam.it> 5 + 6 + #include <linux/dmapool.h> 7 + #include <linux/module.h> 8 + #include <linux/slab.h> 9 + 10 + #include "fsl-edma-common.h" 11 + 12 + #define EDMA_CR 0x00 13 + #define EDMA_ES 0x04 14 + #define EDMA_ERQ 0x0C 15 + #define EDMA_EEI 0x14 16 + #define EDMA_SERQ 0x1B 17 + #define EDMA_CERQ 0x1A 18 + #define EDMA_SEEI 0x19 19 + #define EDMA_CEEI 0x18 20 + #define EDMA_CINT 0x1F 21 + #define EDMA_CERR 0x1E 22 + #define EDMA_SSRT 0x1D 23 + #define EDMA_CDNE 0x1C 24 + #define EDMA_INTR 0x24 25 + #define EDMA_ERR 0x2C 26 + 27 + #define EDMA64_ERQH 0x08 28 + #define EDMA64_EEIH 0x10 29 + #define EDMA64_SERQ 0x18 30 + #define EDMA64_CERQ 0x19 31 + #define EDMA64_SEEI 0x1a 32 + #define EDMA64_CEEI 0x1b 33 + #define EDMA64_CINT 0x1c 34 + #define EDMA64_CERR 0x1d 35 + #define EDMA64_SSRT 0x1e 36 + #define EDMA64_CDNE 0x1f 37 + #define EDMA64_INTH 0x20 38 + #define EDMA64_INTL 0x24 39 + #define EDMA64_ERRH 0x28 40 + #define EDMA64_ERRL 0x2c 41 + 42 + #define EDMA_TCD 0x1000 43 + 44 + static void fsl_edma_enable_request(struct fsl_edma_chan *fsl_chan) 45 + { 46 + void __iomem *addr = fsl_chan->edma->membase; 47 + u32 ch = fsl_chan->vchan.chan.chan_id; 48 + 49 + edma_writeb(fsl_chan->edma, EDMA_SEEI_SEEI(ch), addr + EDMA_SEEI); 50 + edma_writeb(fsl_chan->edma, ch, addr + EDMA_SERQ); 51 + } 52 + 53 + void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan) 54 + { 55 + void __iomem *addr = fsl_chan->edma->membase; 56 + u32 ch = fsl_chan->vchan.chan.chan_id; 57 + 58 + edma_writeb(fsl_chan->edma, ch, addr + EDMA_CERQ); 59 + edma_writeb(fsl_chan->edma, EDMA_CEEI_CEEI(ch), addr + EDMA_CEEI); 60 + } 61 + EXPORT_SYMBOL_GPL(fsl_edma_disable_request); 62 + 63 + void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan, 64 + unsigned int slot, bool enable) 65 + { 66 + u32 ch = fsl_chan->vchan.chan.chan_id; 67 + void __iomem *muxaddr; 68 + unsigned int chans_per_mux, ch_off; 69 + 70 + chans_per_mux = fsl_chan->edma->n_chans / DMAMUX_NR; 71 + ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux; 72 + muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux]; 73 + slot = EDMAMUX_CHCFG_SOURCE(slot); 74 + 75 + if (enable) 76 + iowrite8(EDMAMUX_CHCFG_ENBL | slot, muxaddr + ch_off); 77 + else 78 + iowrite8(EDMAMUX_CHCFG_DIS, muxaddr + ch_off); 79 + } 80 + EXPORT_SYMBOL_GPL(fsl_edma_chan_mux); 81 + 82 + static unsigned int fsl_edma_get_tcd_attr(enum dma_slave_buswidth addr_width) 83 + { 84 + switch (addr_width) { 85 + case 1: 86 + return EDMA_TCD_ATTR_SSIZE_8BIT | EDMA_TCD_ATTR_DSIZE_8BIT; 87 + case 2: 88 + return EDMA_TCD_ATTR_SSIZE_16BIT | EDMA_TCD_ATTR_DSIZE_16BIT; 89 + case 4: 90 + return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT; 91 + case 8: 92 + return EDMA_TCD_ATTR_SSIZE_64BIT | EDMA_TCD_ATTR_DSIZE_64BIT; 93 + default: 94 + return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT; 95 + } 96 + } 97 + 98 + void fsl_edma_free_desc(struct virt_dma_desc *vdesc) 99 + { 100 + struct fsl_edma_desc *fsl_desc; 101 + int i; 102 + 103 + fsl_desc = to_fsl_edma_desc(vdesc); 104 + for (i = 0; i < fsl_desc->n_tcds; i++) 105 + dma_pool_free(fsl_desc->echan->tcd_pool, fsl_desc->tcd[i].vtcd, 106 + fsl_desc->tcd[i].ptcd); 107 + kfree(fsl_desc); 108 + } 109 + EXPORT_SYMBOL_GPL(fsl_edma_free_desc); 110 + 111 + int fsl_edma_terminate_all(struct dma_chan *chan) 112 + { 113 + struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); 114 + unsigned long flags; 115 + LIST_HEAD(head); 116 + 117 + spin_lock_irqsave(&fsl_chan->vchan.lock, flags); 118 + fsl_edma_disable_request(fsl_chan); 119 + fsl_chan->edesc = NULL; 120 + fsl_chan->idle = true; 121 + vchan_get_all_descriptors(&fsl_chan->vchan, &head); 122 + spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); 123 + vchan_dma_desc_free_list(&fsl_chan->vchan, &head); 124 + return 0; 125 + } 126 + EXPORT_SYMBOL_GPL(fsl_edma_terminate_all); 127 + 128 + int fsl_edma_pause(struct dma_chan *chan) 129 + { 130 + struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); 131 + unsigned long flags; 132 + 133 + spin_lock_irqsave(&fsl_chan->vchan.lock, flags); 134 + if (fsl_chan->edesc) { 135 + fsl_edma_disable_request(fsl_chan); 136 + fsl_chan->status = DMA_PAUSED; 137 + fsl_chan->idle = true; 138 + } 139 + spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); 140 + return 0; 141 + } 142 + EXPORT_SYMBOL_GPL(fsl_edma_pause); 143 + 144 + int fsl_edma_resume(struct dma_chan *chan) 145 + { 146 + struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); 147 + unsigned long flags; 148 + 149 + spin_lock_irqsave(&fsl_chan->vchan.lock, flags); 150 + if (fsl_chan->edesc) { 151 + fsl_edma_enable_request(fsl_chan); 152 + fsl_chan->status = DMA_IN_PROGRESS; 153 + fsl_chan->idle = false; 154 + } 155 + spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); 156 + return 0; 157 + } 158 + EXPORT_SYMBOL_GPL(fsl_edma_resume); 159 + 160 + int fsl_edma_slave_config(struct dma_chan *chan, 161 + struct dma_slave_config *cfg) 162 + { 163 + struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); 164 + 165 + fsl_chan->fsc.dir = cfg->direction; 166 + if (cfg->direction == DMA_DEV_TO_MEM) { 167 + fsl_chan->fsc.dev_addr = cfg->src_addr; 168 + fsl_chan->fsc.addr_width = cfg->src_addr_width; 169 + fsl_chan->fsc.burst = cfg->src_maxburst; 170 + fsl_chan->fsc.attr = fsl_edma_get_tcd_attr(cfg->src_addr_width); 171 + } else if (cfg->direction == DMA_MEM_TO_DEV) { 172 + fsl_chan->fsc.dev_addr = cfg->dst_addr; 173 + fsl_chan->fsc.addr_width = cfg->dst_addr_width; 174 + fsl_chan->fsc.burst = cfg->dst_maxburst; 175 + fsl_chan->fsc.attr = fsl_edma_get_tcd_attr(cfg->dst_addr_width); 176 + } else 177 + return -EINVAL; 178 + 179 + return 0; 180 + } 181 + EXPORT_SYMBOL_GPL(fsl_edma_slave_config); 182 + 183 + static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan, 184 + struct virt_dma_desc *vdesc, bool in_progress) 185 + { 186 + struct fsl_edma_desc *edesc = fsl_chan->edesc; 187 + void __iomem *addr = fsl_chan->edma->membase; 188 + u32 ch = fsl_chan->vchan.chan.chan_id; 189 + enum dma_transfer_direction dir = fsl_chan->fsc.dir; 190 + dma_addr_t cur_addr, dma_addr; 191 + size_t len, size; 192 + int i; 193 + 194 + /* calculate the total size in this desc */ 195 + for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++) 196 + len += le32_to_cpu(edesc->tcd[i].vtcd->nbytes) 197 + * le16_to_cpu(edesc->tcd[i].vtcd->biter); 198 + 199 + if (!in_progress) 200 + return len; 201 + 202 + if (dir == DMA_MEM_TO_DEV) 203 + cur_addr = edma_readl( 204 + fsl_chan->edma, addr + EDMA_TCD_SADDR(ch)); 205 + else 206 + cur_addr = edma_readl( 207 + fsl_chan->edma, addr + EDMA_TCD_DADDR(ch)); 208 + 209 + /* figure out the finished and calculate the residue */ 210 + for (i = 0; i < fsl_chan->edesc->n_tcds; i++) { 211 + size = le32_to_cpu(edesc->tcd[i].vtcd->nbytes) 212 + * le16_to_cpu(edesc->tcd[i].vtcd->biter); 213 + if (dir == DMA_MEM_TO_DEV) 214 + dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->saddr); 215 + else 216 + dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->daddr); 217 + 218 + len -= size; 219 + if (cur_addr >= dma_addr && cur_addr < dma_addr + size) { 220 + len += dma_addr + size - cur_addr; 221 + break; 222 + } 223 + } 224 + 225 + return len; 226 + } 227 + 228 + enum dma_status fsl_edma_tx_status(struct dma_chan *chan, 229 + dma_cookie_t cookie, struct dma_tx_state *txstate) 230 + { 231 + struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); 232 + struct virt_dma_desc *vdesc; 233 + enum dma_status status; 234 + unsigned long flags; 235 + 236 + status = dma_cookie_status(chan, cookie, txstate); 237 + if (status == DMA_COMPLETE) 238 + return status; 239 + 240 + if (!txstate) 241 + return fsl_chan->status; 242 + 243 + spin_lock_irqsave(&fsl_chan->vchan.lock, flags); 244 + vdesc = vchan_find_desc(&fsl_chan->vchan, cookie); 245 + if (fsl_chan->edesc && cookie == fsl_chan->edesc->vdesc.tx.cookie) 246 + txstate->residue = 247 + fsl_edma_desc_residue(fsl_chan, vdesc, true); 248 + else if (vdesc) 249 + txstate->residue = 250 + fsl_edma_desc_residue(fsl_chan, vdesc, false); 251 + else 252 + txstate->residue = 0; 253 + 254 + spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); 255 + 256 + return fsl_chan->status; 257 + } 258 + EXPORT_SYMBOL_GPL(fsl_edma_tx_status); 259 + 260 + static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan, 261 + struct fsl_edma_hw_tcd *tcd) 262 + { 263 + struct fsl_edma_engine *edma = fsl_chan->edma; 264 + void __iomem *addr = fsl_chan->edma->membase; 265 + u32 ch = fsl_chan->vchan.chan.chan_id; 266 + 267 + /* 268 + * TCD parameters are stored in struct fsl_edma_hw_tcd in little 269 + * endian format. However, we need to load the TCD registers in 270 + * big- or little-endian obeying the eDMA engine model endian. 271 + */ 272 + edma_writew(edma, 0, addr + EDMA_TCD_CSR(ch)); 273 + edma_writel(edma, le32_to_cpu(tcd->saddr), addr + EDMA_TCD_SADDR(ch)); 274 + edma_writel(edma, le32_to_cpu(tcd->daddr), addr + EDMA_TCD_DADDR(ch)); 275 + 276 + edma_writew(edma, le16_to_cpu(tcd->attr), addr + EDMA_TCD_ATTR(ch)); 277 + edma_writew(edma, le16_to_cpu(tcd->soff), addr + EDMA_TCD_SOFF(ch)); 278 + 279 + edma_writel(edma, le32_to_cpu(tcd->nbytes), addr + EDMA_TCD_NBYTES(ch)); 280 + edma_writel(edma, le32_to_cpu(tcd->slast), addr + EDMA_TCD_SLAST(ch)); 281 + 282 + edma_writew(edma, le16_to_cpu(tcd->citer), addr + EDMA_TCD_CITER(ch)); 283 + edma_writew(edma, le16_to_cpu(tcd->biter), addr + EDMA_TCD_BITER(ch)); 284 + edma_writew(edma, le16_to_cpu(tcd->doff), addr + EDMA_TCD_DOFF(ch)); 285 + 286 + edma_writel(edma, 287 + le32_to_cpu(tcd->dlast_sga), addr + EDMA_TCD_DLAST_SGA(ch)); 288 + 289 + edma_writew(edma, le16_to_cpu(tcd->csr), addr + EDMA_TCD_CSR(ch)); 290 + } 291 + 292 + static inline 293 + void fsl_edma_fill_tcd(struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst, 294 + u16 attr, u16 soff, u32 nbytes, u32 slast, u16 citer, 295 + u16 biter, u16 doff, u32 dlast_sga, bool major_int, 296 + bool disable_req, bool enable_sg) 297 + { 298 + u16 csr = 0; 299 + 300 + /* 301 + * eDMA hardware SGs require the TCDs to be stored in little 302 + * endian format irrespective of the register endian model. 303 + * So we put the value in little endian in memory, waiting 304 + * for fsl_edma_set_tcd_regs doing the swap. 305 + */ 306 + tcd->saddr = cpu_to_le32(src); 307 + tcd->daddr = cpu_to_le32(dst); 308 + 309 + tcd->attr = cpu_to_le16(attr); 310 + 311 + tcd->soff = cpu_to_le16(EDMA_TCD_SOFF_SOFF(soff)); 312 + 313 + tcd->nbytes = cpu_to_le32(EDMA_TCD_NBYTES_NBYTES(nbytes)); 314 + tcd->slast = cpu_to_le32(EDMA_TCD_SLAST_SLAST(slast)); 315 + 316 + tcd->citer = cpu_to_le16(EDMA_TCD_CITER_CITER(citer)); 317 + tcd->doff = cpu_to_le16(EDMA_TCD_DOFF_DOFF(doff)); 318 + 319 + tcd->dlast_sga = cpu_to_le32(EDMA_TCD_DLAST_SGA_DLAST_SGA(dlast_sga)); 320 + 321 + tcd->biter = cpu_to_le16(EDMA_TCD_BITER_BITER(biter)); 322 + if (major_int) 323 + csr |= EDMA_TCD_CSR_INT_MAJOR; 324 + 325 + if (disable_req) 326 + csr |= EDMA_TCD_CSR_D_REQ; 327 + 328 + if (enable_sg) 329 + csr |= EDMA_TCD_CSR_E_SG; 330 + 331 + tcd->csr = cpu_to_le16(csr); 332 + } 333 + 334 + static struct fsl_edma_desc *fsl_edma_alloc_desc(struct fsl_edma_chan *fsl_chan, 335 + int sg_len) 336 + { 337 + struct fsl_edma_desc *fsl_desc; 338 + int i; 339 + 340 + fsl_desc = kzalloc(sizeof(*fsl_desc) + 341 + sizeof(struct fsl_edma_sw_tcd) * 342 + sg_len, GFP_NOWAIT); 343 + if (!fsl_desc) 344 + return NULL; 345 + 346 + fsl_desc->echan = fsl_chan; 347 + fsl_desc->n_tcds = sg_len; 348 + for (i = 0; i < sg_len; i++) { 349 + fsl_desc->tcd[i].vtcd = dma_pool_alloc(fsl_chan->tcd_pool, 350 + GFP_NOWAIT, &fsl_desc->tcd[i].ptcd); 351 + if (!fsl_desc->tcd[i].vtcd) 352 + goto err; 353 + } 354 + return fsl_desc; 355 + 356 + err: 357 + while (--i >= 0) 358 + dma_pool_free(fsl_chan->tcd_pool, fsl_desc->tcd[i].vtcd, 359 + fsl_desc->tcd[i].ptcd); 360 + kfree(fsl_desc); 361 + return NULL; 362 + } 363 + 364 + struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic( 365 + struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, 366 + size_t period_len, enum dma_transfer_direction direction, 367 + unsigned long flags) 368 + { 369 + struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); 370 + struct fsl_edma_desc *fsl_desc; 371 + dma_addr_t dma_buf_next; 372 + int sg_len, i; 373 + u32 src_addr, dst_addr, last_sg, nbytes; 374 + u16 soff, doff, iter; 375 + 376 + if (!is_slave_direction(fsl_chan->fsc.dir)) 377 + return NULL; 378 + 379 + sg_len = buf_len / period_len; 380 + fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len); 381 + if (!fsl_desc) 382 + return NULL; 383 + fsl_desc->iscyclic = true; 384 + 385 + dma_buf_next = dma_addr; 386 + nbytes = fsl_chan->fsc.addr_width * fsl_chan->fsc.burst; 387 + iter = period_len / nbytes; 388 + 389 + for (i = 0; i < sg_len; i++) { 390 + if (dma_buf_next >= dma_addr + buf_len) 391 + dma_buf_next = dma_addr; 392 + 393 + /* get next sg's physical address */ 394 + last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd; 395 + 396 + if (fsl_chan->fsc.dir == DMA_MEM_TO_DEV) { 397 + src_addr = dma_buf_next; 398 + dst_addr = fsl_chan->fsc.dev_addr; 399 + soff = fsl_chan->fsc.addr_width; 400 + doff = 0; 401 + } else { 402 + src_addr = fsl_chan->fsc.dev_addr; 403 + dst_addr = dma_buf_next; 404 + soff = 0; 405 + doff = fsl_chan->fsc.addr_width; 406 + } 407 + 408 + fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr, dst_addr, 409 + fsl_chan->fsc.attr, soff, nbytes, 0, iter, 410 + iter, doff, last_sg, true, false, true); 411 + dma_buf_next += period_len; 412 + } 413 + 414 + return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags); 415 + } 416 + EXPORT_SYMBOL_GPL(fsl_edma_prep_dma_cyclic); 417 + 418 + struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg( 419 + struct dma_chan *chan, struct scatterlist *sgl, 420 + unsigned int sg_len, enum dma_transfer_direction direction, 421 + unsigned long flags, void *context) 422 + { 423 + struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); 424 + struct fsl_edma_desc *fsl_desc; 425 + struct scatterlist *sg; 426 + u32 src_addr, dst_addr, last_sg, nbytes; 427 + u16 soff, doff, iter; 428 + int i; 429 + 430 + if (!is_slave_direction(fsl_chan->fsc.dir)) 431 + return NULL; 432 + 433 + fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len); 434 + if (!fsl_desc) 435 + return NULL; 436 + fsl_desc->iscyclic = false; 437 + 438 + nbytes = fsl_chan->fsc.addr_width * fsl_chan->fsc.burst; 439 + for_each_sg(sgl, sg, sg_len, i) { 440 + /* get next sg's physical address */ 441 + last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd; 442 + 443 + if (fsl_chan->fsc.dir == DMA_MEM_TO_DEV) { 444 + src_addr = sg_dma_address(sg); 445 + dst_addr = fsl_chan->fsc.dev_addr; 446 + soff = fsl_chan->fsc.addr_width; 447 + doff = 0; 448 + } else { 449 + src_addr = fsl_chan->fsc.dev_addr; 450 + dst_addr = sg_dma_address(sg); 451 + soff = 0; 452 + doff = fsl_chan->fsc.addr_width; 453 + } 454 + 455 + iter = sg_dma_len(sg) / nbytes; 456 + if (i < sg_len - 1) { 457 + last_sg = fsl_desc->tcd[(i + 1)].ptcd; 458 + fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr, 459 + dst_addr, fsl_chan->fsc.attr, soff, 460 + nbytes, 0, iter, iter, doff, last_sg, 461 + false, false, true); 462 + } else { 463 + last_sg = 0; 464 + fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr, 465 + dst_addr, fsl_chan->fsc.attr, soff, 466 + nbytes, 0, iter, iter, doff, last_sg, 467 + true, true, false); 468 + } 469 + } 470 + 471 + return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags); 472 + } 473 + EXPORT_SYMBOL_GPL(fsl_edma_prep_slave_sg); 474 + 475 + void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan) 476 + { 477 + struct virt_dma_desc *vdesc; 478 + 479 + vdesc = vchan_next_desc(&fsl_chan->vchan); 480 + if (!vdesc) 481 + return; 482 + fsl_chan->edesc = to_fsl_edma_desc(vdesc); 483 + fsl_edma_set_tcd_regs(fsl_chan, fsl_chan->edesc->tcd[0].vtcd); 484 + fsl_edma_enable_request(fsl_chan); 485 + fsl_chan->status = DMA_IN_PROGRESS; 486 + fsl_chan->idle = false; 487 + } 488 + EXPORT_SYMBOL_GPL(fsl_edma_xfer_desc); 489 + 490 + void fsl_edma_issue_pending(struct dma_chan *chan) 491 + { 492 + struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); 493 + unsigned long flags; 494 + 495 + spin_lock_irqsave(&fsl_chan->vchan.lock, flags); 496 + 497 + if (unlikely(fsl_chan->pm_state != RUNNING)) { 498 + spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); 499 + /* cannot submit due to suspend */ 500 + return; 501 + } 502 + 503 + if (vchan_issue_pending(&fsl_chan->vchan) && !fsl_chan->edesc) 504 + fsl_edma_xfer_desc(fsl_chan); 505 + 506 + spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); 507 + } 508 + EXPORT_SYMBOL_GPL(fsl_edma_issue_pending); 509 + 510 + int fsl_edma_alloc_chan_resources(struct dma_chan *chan) 511 + { 512 + struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); 513 + 514 + fsl_chan->tcd_pool = dma_pool_create("tcd_pool", chan->device->dev, 515 + sizeof(struct fsl_edma_hw_tcd), 516 + 32, 0); 517 + return 0; 518 + } 519 + EXPORT_SYMBOL_GPL(fsl_edma_alloc_chan_resources); 520 + 521 + void fsl_edma_free_chan_resources(struct dma_chan *chan) 522 + { 523 + struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); 524 + unsigned long flags; 525 + LIST_HEAD(head); 526 + 527 + spin_lock_irqsave(&fsl_chan->vchan.lock, flags); 528 + fsl_edma_disable_request(fsl_chan); 529 + fsl_edma_chan_mux(fsl_chan, 0, false); 530 + fsl_chan->edesc = NULL; 531 + vchan_get_all_descriptors(&fsl_chan->vchan, &head); 532 + spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); 533 + 534 + vchan_dma_desc_free_list(&fsl_chan->vchan, &head); 535 + dma_pool_destroy(fsl_chan->tcd_pool); 536 + fsl_chan->tcd_pool = NULL; 537 + } 538 + EXPORT_SYMBOL_GPL(fsl_edma_free_chan_resources); 539 + 540 + void fsl_edma_cleanup_vchan(struct dma_device *dmadev) 541 + { 542 + struct fsl_edma_chan *chan, *_chan; 543 + 544 + list_for_each_entry_safe(chan, _chan, 545 + &dmadev->channels, vchan.chan.device_node) { 546 + list_del(&chan->vchan.chan.device_node); 547 + tasklet_kill(&chan->vchan.task); 548 + } 549 + } 550 + EXPORT_SYMBOL_GPL(fsl_edma_cleanup_vchan); 551 + 552 + MODULE_LICENSE("GPL v2");
+241
drivers/dma/fsl-edma-common.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0+ */ 2 + /* 3 + * Copyright 2013-2014 Freescale Semiconductor, Inc. 4 + * Copyright 2018 Angelo Dureghello <angelo@sysam.it> 5 + */ 6 + #ifndef _FSL_EDMA_COMMON_H_ 7 + #define _FSL_EDMA_COMMON_H_ 8 + 9 + #include "virt-dma.h" 10 + 11 + #define EDMA_CR 0x00 12 + #define EDMA_ES 0x04 13 + #define EDMA_ERQ 0x0C 14 + #define EDMA_EEI 0x14 15 + #define EDMA_SERQ 0x1B 16 + #define EDMA_CERQ 0x1A 17 + #define EDMA_SEEI 0x19 18 + #define EDMA_CEEI 0x18 19 + #define EDMA_CINT 0x1F 20 + #define EDMA_CERR 0x1E 21 + #define EDMA_SSRT 0x1D 22 + #define EDMA_CDNE 0x1C 23 + #define EDMA_INTR 0x24 24 + #define EDMA_ERR 0x2C 25 + 26 + #define EDMA_TCD_SADDR(x) (0x1000 + 32 * (x)) 27 + #define EDMA_TCD_SOFF(x) (0x1004 + 32 * (x)) 28 + #define EDMA_TCD_ATTR(x) (0x1006 + 32 * (x)) 29 + #define EDMA_TCD_NBYTES(x) (0x1008 + 32 * (x)) 30 + #define EDMA_TCD_SLAST(x) (0x100C + 32 * (x)) 31 + #define EDMA_TCD_DADDR(x) (0x1010 + 32 * (x)) 32 + #define EDMA_TCD_DOFF(x) (0x1014 + 32 * (x)) 33 + #define EDMA_TCD_CITER_ELINK(x) (0x1016 + 32 * (x)) 34 + #define EDMA_TCD_CITER(x) (0x1016 + 32 * (x)) 35 + #define EDMA_TCD_DLAST_SGA(x) (0x1018 + 32 * (x)) 36 + #define EDMA_TCD_CSR(x) (0x101C + 32 * (x)) 37 + #define EDMA_TCD_BITER_ELINK(x) (0x101E + 32 * (x)) 38 + #define EDMA_TCD_BITER(x) (0x101E + 32 * (x)) 39 + 40 + #define EDMA_CR_EDBG BIT(1) 41 + #define EDMA_CR_ERCA BIT(2) 42 + #define EDMA_CR_ERGA BIT(3) 43 + #define EDMA_CR_HOE BIT(4) 44 + #define EDMA_CR_HALT BIT(5) 45 + #define EDMA_CR_CLM BIT(6) 46 + #define EDMA_CR_EMLM BIT(7) 47 + #define EDMA_CR_ECX BIT(16) 48 + #define EDMA_CR_CX BIT(17) 49 + 50 + #define EDMA_SEEI_SEEI(x) ((x) & 0x1F) 51 + #define EDMA_CEEI_CEEI(x) ((x) & 0x1F) 52 + #define EDMA_CINT_CINT(x) ((x) & 0x1F) 53 + #define EDMA_CERR_CERR(x) ((x) & 0x1F) 54 + 55 + #define EDMA_TCD_ATTR_DSIZE(x) (((x) & 0x0007)) 56 + #define EDMA_TCD_ATTR_DMOD(x) (((x) & 0x001F) << 3) 57 + #define EDMA_TCD_ATTR_SSIZE(x) (((x) & 0x0007) << 8) 58 + #define EDMA_TCD_ATTR_SMOD(x) (((x) & 0x001F) << 11) 59 + #define EDMA_TCD_ATTR_SSIZE_8BIT (0x0000) 60 + #define EDMA_TCD_ATTR_SSIZE_16BIT (0x0100) 61 + #define EDMA_TCD_ATTR_SSIZE_32BIT (0x0200) 62 + #define EDMA_TCD_ATTR_SSIZE_64BIT (0x0300) 63 + #define EDMA_TCD_ATTR_SSIZE_32BYTE (0x0500) 64 + #define EDMA_TCD_ATTR_DSIZE_8BIT (0x0000) 65 + #define EDMA_TCD_ATTR_DSIZE_16BIT (0x0001) 66 + #define EDMA_TCD_ATTR_DSIZE_32BIT (0x0002) 67 + #define EDMA_TCD_ATTR_DSIZE_64BIT (0x0003) 68 + #define EDMA_TCD_ATTR_DSIZE_32BYTE (0x0005) 69 + 70 + #define EDMA_TCD_SOFF_SOFF(x) (x) 71 + #define EDMA_TCD_NBYTES_NBYTES(x) (x) 72 + #define EDMA_TCD_SLAST_SLAST(x) (x) 73 + #define EDMA_TCD_DADDR_DADDR(x) (x) 74 + #define EDMA_TCD_CITER_CITER(x) ((x) & 0x7FFF) 75 + #define EDMA_TCD_DOFF_DOFF(x) (x) 76 + #define EDMA_TCD_DLAST_SGA_DLAST_SGA(x) (x) 77 + #define EDMA_TCD_BITER_BITER(x) ((x) & 0x7FFF) 78 + 79 + #define EDMA_TCD_CSR_START BIT(0) 80 + #define EDMA_TCD_CSR_INT_MAJOR BIT(1) 81 + #define EDMA_TCD_CSR_INT_HALF BIT(2) 82 + #define EDMA_TCD_CSR_D_REQ BIT(3) 83 + #define EDMA_TCD_CSR_E_SG BIT(4) 84 + #define EDMA_TCD_CSR_E_LINK BIT(5) 85 + #define EDMA_TCD_CSR_ACTIVE BIT(6) 86 + #define EDMA_TCD_CSR_DONE BIT(7) 87 + 88 + #define EDMAMUX_CHCFG_DIS 0x0 89 + #define EDMAMUX_CHCFG_ENBL 0x80 90 + #define EDMAMUX_CHCFG_SOURCE(n) ((n) & 0x3F) 91 + 92 + #define DMAMUX_NR 2 93 + 94 + #define FSL_EDMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ 95 + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ 96 + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \ 97 + BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)) 98 + enum fsl_edma_pm_state { 99 + RUNNING = 0, 100 + SUSPENDED, 101 + }; 102 + 103 + struct fsl_edma_hw_tcd { 104 + __le32 saddr; 105 + __le16 soff; 106 + __le16 attr; 107 + __le32 nbytes; 108 + __le32 slast; 109 + __le32 daddr; 110 + __le16 doff; 111 + __le16 citer; 112 + __le32 dlast_sga; 113 + __le16 csr; 114 + __le16 biter; 115 + }; 116 + 117 + struct fsl_edma_sw_tcd { 118 + dma_addr_t ptcd; 119 + struct fsl_edma_hw_tcd *vtcd; 120 + }; 121 + 122 + struct fsl_edma_slave_config { 123 + enum dma_transfer_direction dir; 124 + enum dma_slave_buswidth addr_width; 125 + u32 dev_addr; 126 + u32 burst; 127 + u32 attr; 128 + }; 129 + 130 + struct fsl_edma_chan { 131 + struct virt_dma_chan vchan; 132 + enum dma_status status; 133 + enum fsl_edma_pm_state pm_state; 134 + bool idle; 135 + u32 slave_id; 136 + struct fsl_edma_engine *edma; 137 + struct fsl_edma_desc *edesc; 138 + struct fsl_edma_slave_config fsc; 139 + struct dma_pool *tcd_pool; 140 + }; 141 + 142 + struct fsl_edma_desc { 143 + struct virt_dma_desc vdesc; 144 + struct fsl_edma_chan *echan; 145 + bool iscyclic; 146 + unsigned int n_tcds; 147 + struct fsl_edma_sw_tcd tcd[]; 148 + }; 149 + 150 + struct fsl_edma_engine { 151 + struct dma_device dma_dev; 152 + void __iomem *membase; 153 + void __iomem *muxbase[DMAMUX_NR]; 154 + struct clk *muxclk[DMAMUX_NR]; 155 + struct mutex fsl_edma_mutex; 156 + u32 n_chans; 157 + int txirq; 158 + int errirq; 159 + bool big_endian; 160 + struct fsl_edma_chan chans[]; 161 + }; 162 + 163 + /* 164 + * R/W functions for big- or little-endian registers: 165 + * The eDMA controller's endian is independent of the CPU core's endian. 166 + * For the big-endian IP module, the offset for 8-bit or 16-bit registers 167 + * should also be swapped opposite to that in little-endian IP. 168 + */ 169 + static inline u32 edma_readl(struct fsl_edma_engine *edma, void __iomem *addr) 170 + { 171 + if (edma->big_endian) 172 + return ioread32be(addr); 173 + else 174 + return ioread32(addr); 175 + } 176 + 177 + static inline void edma_writeb(struct fsl_edma_engine *edma, 178 + u8 val, void __iomem *addr) 179 + { 180 + /* swap the reg offset for these in big-endian mode */ 181 + if (edma->big_endian) 182 + iowrite8(val, (void __iomem *)((unsigned long)addr ^ 0x3)); 183 + else 184 + iowrite8(val, addr); 185 + } 186 + 187 + static inline void edma_writew(struct fsl_edma_engine *edma, 188 + u16 val, void __iomem *addr) 189 + { 190 + /* swap the reg offset for these in big-endian mode */ 191 + if (edma->big_endian) 192 + iowrite16be(val, (void __iomem *)((unsigned long)addr ^ 0x2)); 193 + else 194 + iowrite16(val, addr); 195 + } 196 + 197 + static inline void edma_writel(struct fsl_edma_engine *edma, 198 + u32 val, void __iomem *addr) 199 + { 200 + if (edma->big_endian) 201 + iowrite32be(val, addr); 202 + else 203 + iowrite32(val, addr); 204 + } 205 + 206 + static inline struct fsl_edma_chan *to_fsl_edma_chan(struct dma_chan *chan) 207 + { 208 + return container_of(chan, struct fsl_edma_chan, vchan.chan); 209 + } 210 + 211 + static inline struct fsl_edma_desc *to_fsl_edma_desc(struct virt_dma_desc *vd) 212 + { 213 + return container_of(vd, struct fsl_edma_desc, vdesc); 214 + } 215 + 216 + void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan); 217 + void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan, 218 + unsigned int slot, bool enable); 219 + void fsl_edma_free_desc(struct virt_dma_desc *vdesc); 220 + int fsl_edma_terminate_all(struct dma_chan *chan); 221 + int fsl_edma_pause(struct dma_chan *chan); 222 + int fsl_edma_resume(struct dma_chan *chan); 223 + int fsl_edma_slave_config(struct dma_chan *chan, 224 + struct dma_slave_config *cfg); 225 + enum dma_status fsl_edma_tx_status(struct dma_chan *chan, 226 + dma_cookie_t cookie, struct dma_tx_state *txstate); 227 + struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic( 228 + struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, 229 + size_t period_len, enum dma_transfer_direction direction, 230 + unsigned long flags); 231 + struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg( 232 + struct dma_chan *chan, struct scatterlist *sgl, 233 + unsigned int sg_len, enum dma_transfer_direction direction, 234 + unsigned long flags, void *context); 235 + void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan); 236 + void fsl_edma_issue_pending(struct dma_chan *chan); 237 + int fsl_edma_alloc_chan_resources(struct dma_chan *chan); 238 + void fsl_edma_free_chan_resources(struct dma_chan *chan); 239 + void fsl_edma_cleanup_vchan(struct dma_device *dmadev); 240 + 241 + #endif /* _FSL_EDMA_COMMON_H_ */
+1 -696
drivers/dma/fsl-edma.c
··· 13 13 * option) any later version. 14 14 */ 15 15 16 - #include <linux/init.h> 17 16 #include <linux/module.h> 18 17 #include <linux/interrupt.h> 19 18 #include <linux/clk.h> 20 - #include <linux/dma-mapping.h> 21 - #include <linux/dmapool.h> 22 - #include <linux/slab.h> 23 - #include <linux/spinlock.h> 24 19 #include <linux/of.h> 25 20 #include <linux/of_device.h> 26 21 #include <linux/of_address.h> 27 22 #include <linux/of_irq.h> 28 23 #include <linux/of_dma.h> 29 24 30 - #include "virt-dma.h" 31 - 32 - #define EDMA_CR 0x00 33 - #define EDMA_ES 0x04 34 - #define EDMA_ERQ 0x0C 35 - #define EDMA_EEI 0x14 36 - #define EDMA_SERQ 0x1B 37 - #define EDMA_CERQ 0x1A 38 - #define EDMA_SEEI 0x19 39 - #define EDMA_CEEI 0x18 40 - #define EDMA_CINT 0x1F 41 - #define EDMA_CERR 0x1E 42 - #define EDMA_SSRT 0x1D 43 - #define EDMA_CDNE 0x1C 44 - #define EDMA_INTR 0x24 45 - #define EDMA_ERR 0x2C 46 - 47 - #define EDMA_TCD_SADDR(x) (0x1000 + 32 * (x)) 48 - #define EDMA_TCD_SOFF(x) (0x1004 + 32 * (x)) 49 - #define EDMA_TCD_ATTR(x) (0x1006 + 32 * (x)) 50 - #define EDMA_TCD_NBYTES(x) (0x1008 + 32 * (x)) 51 - #define EDMA_TCD_SLAST(x) (0x100C + 32 * (x)) 52 - #define EDMA_TCD_DADDR(x) (0x1010 + 32 * (x)) 53 - #define EDMA_TCD_DOFF(x) (0x1014 + 32 * (x)) 54 - #define EDMA_TCD_CITER_ELINK(x) (0x1016 + 32 * (x)) 55 - #define EDMA_TCD_CITER(x) (0x1016 + 32 * (x)) 56 - #define EDMA_TCD_DLAST_SGA(x) (0x1018 + 32 * (x)) 57 - #define EDMA_TCD_CSR(x) (0x101C + 32 * (x)) 58 - #define EDMA_TCD_BITER_ELINK(x) (0x101E + 32 * (x)) 59 - #define EDMA_TCD_BITER(x) (0x101E + 32 * (x)) 60 - 61 - #define EDMA_CR_EDBG BIT(1) 62 - #define EDMA_CR_ERCA BIT(2) 63 - #define EDMA_CR_ERGA BIT(3) 64 - #define EDMA_CR_HOE BIT(4) 65 - #define EDMA_CR_HALT BIT(5) 66 - #define EDMA_CR_CLM BIT(6) 67 - #define EDMA_CR_EMLM BIT(7) 68 - #define EDMA_CR_ECX BIT(16) 69 - #define EDMA_CR_CX BIT(17) 70 - 71 - #define EDMA_SEEI_SEEI(x) ((x) & 0x1F) 72 - #define EDMA_CEEI_CEEI(x) ((x) & 0x1F) 73 - #define EDMA_CINT_CINT(x) ((x) & 0x1F) 74 - #define EDMA_CERR_CERR(x) ((x) & 0x1F) 75 - 76 - #define EDMA_TCD_ATTR_DSIZE(x) (((x) & 0x0007)) 77 - #define EDMA_TCD_ATTR_DMOD(x) (((x) & 0x001F) << 3) 78 - #define EDMA_TCD_ATTR_SSIZE(x) (((x) & 0x0007) << 8) 79 - #define EDMA_TCD_ATTR_SMOD(x) (((x) & 0x001F) << 11) 80 - #define EDMA_TCD_ATTR_SSIZE_8BIT (0x0000) 81 - #define EDMA_TCD_ATTR_SSIZE_16BIT (0x0100) 82 - #define EDMA_TCD_ATTR_SSIZE_32BIT (0x0200) 83 - #define EDMA_TCD_ATTR_SSIZE_64BIT (0x0300) 84 - #define EDMA_TCD_ATTR_SSIZE_32BYTE (0x0500) 85 - #define EDMA_TCD_ATTR_DSIZE_8BIT (0x0000) 86 - #define EDMA_TCD_ATTR_DSIZE_16BIT (0x0001) 87 - #define EDMA_TCD_ATTR_DSIZE_32BIT (0x0002) 88 - #define EDMA_TCD_ATTR_DSIZE_64BIT (0x0003) 89 - #define EDMA_TCD_ATTR_DSIZE_32BYTE (0x0005) 90 - 91 - #define EDMA_TCD_SOFF_SOFF(x) (x) 92 - #define EDMA_TCD_NBYTES_NBYTES(x) (x) 93 - #define EDMA_TCD_SLAST_SLAST(x) (x) 94 - #define EDMA_TCD_DADDR_DADDR(x) (x) 95 - #define EDMA_TCD_CITER_CITER(x) ((x) & 0x7FFF) 96 - #define EDMA_TCD_DOFF_DOFF(x) (x) 97 - #define EDMA_TCD_DLAST_SGA_DLAST_SGA(x) (x) 98 - #define EDMA_TCD_BITER_BITER(x) ((x) & 0x7FFF) 99 - 100 - #define EDMA_TCD_CSR_START BIT(0) 101 - #define EDMA_TCD_CSR_INT_MAJOR BIT(1) 102 - #define EDMA_TCD_CSR_INT_HALF BIT(2) 103 - #define EDMA_TCD_CSR_D_REQ BIT(3) 104 - #define EDMA_TCD_CSR_E_SG BIT(4) 105 - #define EDMA_TCD_CSR_E_LINK BIT(5) 106 - #define EDMA_TCD_CSR_ACTIVE BIT(6) 107 - #define EDMA_TCD_CSR_DONE BIT(7) 108 - 109 - #define EDMAMUX_CHCFG_DIS 0x0 110 - #define EDMAMUX_CHCFG_ENBL 0x80 111 - #define EDMAMUX_CHCFG_SOURCE(n) ((n) & 0x3F) 112 - 113 - #define DMAMUX_NR 2 114 - 115 - #define FSL_EDMA_BUSWIDTHS BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ 116 - BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ 117 - BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \ 118 - BIT(DMA_SLAVE_BUSWIDTH_8_BYTES) 119 - enum fsl_edma_pm_state { 120 - RUNNING = 0, 121 - SUSPENDED, 122 - }; 123 - 124 - struct fsl_edma_hw_tcd { 125 - __le32 saddr; 126 - __le16 soff; 127 - __le16 attr; 128 - __le32 nbytes; 129 - __le32 slast; 130 - __le32 daddr; 131 - __le16 doff; 132 - __le16 citer; 133 - __le32 dlast_sga; 134 - __le16 csr; 135 - __le16 biter; 136 - }; 137 - 138 - struct fsl_edma_sw_tcd { 139 - dma_addr_t ptcd; 140 - struct fsl_edma_hw_tcd *vtcd; 141 - }; 142 - 143 - struct fsl_edma_slave_config { 144 - enum dma_transfer_direction dir; 145 - enum dma_slave_buswidth addr_width; 146 - u32 dev_addr; 147 - u32 burst; 148 - u32 attr; 149 - }; 150 - 151 - struct fsl_edma_chan { 152 - struct virt_dma_chan vchan; 153 - enum dma_status status; 154 - enum fsl_edma_pm_state pm_state; 155 - bool idle; 156 - u32 slave_id; 157 - struct fsl_edma_engine *edma; 158 - struct fsl_edma_desc *edesc; 159 - struct fsl_edma_slave_config fsc; 160 - struct dma_pool *tcd_pool; 161 - }; 162 - 163 - struct fsl_edma_desc { 164 - struct virt_dma_desc vdesc; 165 - struct fsl_edma_chan *echan; 166 - bool iscyclic; 167 - unsigned int n_tcds; 168 - struct fsl_edma_sw_tcd tcd[]; 169 - }; 170 - 171 - struct fsl_edma_engine { 172 - struct dma_device dma_dev; 173 - void __iomem *membase; 174 - void __iomem *muxbase[DMAMUX_NR]; 175 - struct clk *muxclk[DMAMUX_NR]; 176 - struct mutex fsl_edma_mutex; 177 - u32 n_chans; 178 - int txirq; 179 - int errirq; 180 - bool big_endian; 181 - struct fsl_edma_chan chans[]; 182 - }; 183 - 184 - /* 185 - * R/W functions for big- or little-endian registers: 186 - * The eDMA controller's endian is independent of the CPU core's endian. 187 - * For the big-endian IP module, the offset for 8-bit or 16-bit registers 188 - * should also be swapped opposite to that in little-endian IP. 189 - */ 190 - 191 - static u32 edma_readl(struct fsl_edma_engine *edma, void __iomem *addr) 192 - { 193 - if (edma->big_endian) 194 - return ioread32be(addr); 195 - else 196 - return ioread32(addr); 197 - } 198 - 199 - static void edma_writeb(struct fsl_edma_engine *edma, u8 val, void __iomem *addr) 200 - { 201 - /* swap the reg offset for these in big-endian mode */ 202 - if (edma->big_endian) 203 - iowrite8(val, (void __iomem *)((unsigned long)addr ^ 0x3)); 204 - else 205 - iowrite8(val, addr); 206 - } 207 - 208 - static void edma_writew(struct fsl_edma_engine *edma, u16 val, void __iomem *addr) 209 - { 210 - /* swap the reg offset for these in big-endian mode */ 211 - if (edma->big_endian) 212 - iowrite16be(val, (void __iomem *)((unsigned long)addr ^ 0x2)); 213 - else 214 - iowrite16(val, addr); 215 - } 216 - 217 - static void edma_writel(struct fsl_edma_engine *edma, u32 val, void __iomem *addr) 218 - { 219 - if (edma->big_endian) 220 - iowrite32be(val, addr); 221 - else 222 - iowrite32(val, addr); 223 - } 224 - 225 - static struct fsl_edma_chan *to_fsl_edma_chan(struct dma_chan *chan) 226 - { 227 - return container_of(chan, struct fsl_edma_chan, vchan.chan); 228 - } 229 - 230 - static struct fsl_edma_desc *to_fsl_edma_desc(struct virt_dma_desc *vd) 231 - { 232 - return container_of(vd, struct fsl_edma_desc, vdesc); 233 - } 234 - 235 - static void fsl_edma_enable_request(struct fsl_edma_chan *fsl_chan) 236 - { 237 - void __iomem *addr = fsl_chan->edma->membase; 238 - u32 ch = fsl_chan->vchan.chan.chan_id; 239 - 240 - edma_writeb(fsl_chan->edma, EDMA_SEEI_SEEI(ch), addr + EDMA_SEEI); 241 - edma_writeb(fsl_chan->edma, ch, addr + EDMA_SERQ); 242 - } 243 - 244 - static void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan) 245 - { 246 - void __iomem *addr = fsl_chan->edma->membase; 247 - u32 ch = fsl_chan->vchan.chan.chan_id; 248 - 249 - edma_writeb(fsl_chan->edma, ch, addr + EDMA_CERQ); 250 - edma_writeb(fsl_chan->edma, EDMA_CEEI_CEEI(ch), addr + EDMA_CEEI); 251 - } 252 - 253 - static void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan, 254 - unsigned int slot, bool enable) 255 - { 256 - u32 ch = fsl_chan->vchan.chan.chan_id; 257 - void __iomem *muxaddr; 258 - unsigned chans_per_mux, ch_off; 259 - 260 - chans_per_mux = fsl_chan->edma->n_chans / DMAMUX_NR; 261 - ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux; 262 - muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux]; 263 - slot = EDMAMUX_CHCFG_SOURCE(slot); 264 - 265 - if (enable) 266 - iowrite8(EDMAMUX_CHCFG_ENBL | slot, muxaddr + ch_off); 267 - else 268 - iowrite8(EDMAMUX_CHCFG_DIS, muxaddr + ch_off); 269 - } 270 - 271 - static unsigned int fsl_edma_get_tcd_attr(enum dma_slave_buswidth addr_width) 272 - { 273 - switch (addr_width) { 274 - case 1: 275 - return EDMA_TCD_ATTR_SSIZE_8BIT | EDMA_TCD_ATTR_DSIZE_8BIT; 276 - case 2: 277 - return EDMA_TCD_ATTR_SSIZE_16BIT | EDMA_TCD_ATTR_DSIZE_16BIT; 278 - case 4: 279 - return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT; 280 - case 8: 281 - return EDMA_TCD_ATTR_SSIZE_64BIT | EDMA_TCD_ATTR_DSIZE_64BIT; 282 - default: 283 - return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT; 284 - } 285 - } 286 - 287 - static void fsl_edma_free_desc(struct virt_dma_desc *vdesc) 288 - { 289 - struct fsl_edma_desc *fsl_desc; 290 - int i; 291 - 292 - fsl_desc = to_fsl_edma_desc(vdesc); 293 - for (i = 0; i < fsl_desc->n_tcds; i++) 294 - dma_pool_free(fsl_desc->echan->tcd_pool, fsl_desc->tcd[i].vtcd, 295 - fsl_desc->tcd[i].ptcd); 296 - kfree(fsl_desc); 297 - } 298 - 299 - static int fsl_edma_terminate_all(struct dma_chan *chan) 300 - { 301 - struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); 302 - unsigned long flags; 303 - LIST_HEAD(head); 304 - 305 - spin_lock_irqsave(&fsl_chan->vchan.lock, flags); 306 - fsl_edma_disable_request(fsl_chan); 307 - fsl_chan->edesc = NULL; 308 - fsl_chan->idle = true; 309 - vchan_get_all_descriptors(&fsl_chan->vchan, &head); 310 - spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); 311 - vchan_dma_desc_free_list(&fsl_chan->vchan, &head); 312 - return 0; 313 - } 314 - 315 - static int fsl_edma_pause(struct dma_chan *chan) 316 - { 317 - struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); 318 - unsigned long flags; 319 - 320 - spin_lock_irqsave(&fsl_chan->vchan.lock, flags); 321 - if (fsl_chan->edesc) { 322 - fsl_edma_disable_request(fsl_chan); 323 - fsl_chan->status = DMA_PAUSED; 324 - fsl_chan->idle = true; 325 - } 326 - spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); 327 - return 0; 328 - } 329 - 330 - static int fsl_edma_resume(struct dma_chan *chan) 331 - { 332 - struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); 333 - unsigned long flags; 334 - 335 - spin_lock_irqsave(&fsl_chan->vchan.lock, flags); 336 - if (fsl_chan->edesc) { 337 - fsl_edma_enable_request(fsl_chan); 338 - fsl_chan->status = DMA_IN_PROGRESS; 339 - fsl_chan->idle = false; 340 - } 341 - spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); 342 - return 0; 343 - } 344 - 345 - static int fsl_edma_slave_config(struct dma_chan *chan, 346 - struct dma_slave_config *cfg) 347 - { 348 - struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); 349 - 350 - fsl_chan->fsc.dir = cfg->direction; 351 - if (cfg->direction == DMA_DEV_TO_MEM) { 352 - fsl_chan->fsc.dev_addr = cfg->src_addr; 353 - fsl_chan->fsc.addr_width = cfg->src_addr_width; 354 - fsl_chan->fsc.burst = cfg->src_maxburst; 355 - fsl_chan->fsc.attr = fsl_edma_get_tcd_attr(cfg->src_addr_width); 356 - } else if (cfg->direction == DMA_MEM_TO_DEV) { 357 - fsl_chan->fsc.dev_addr = cfg->dst_addr; 358 - fsl_chan->fsc.addr_width = cfg->dst_addr_width; 359 - fsl_chan->fsc.burst = cfg->dst_maxburst; 360 - fsl_chan->fsc.attr = fsl_edma_get_tcd_attr(cfg->dst_addr_width); 361 - } else { 362 - return -EINVAL; 363 - } 364 - return 0; 365 - } 366 - 367 - static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan, 368 - struct virt_dma_desc *vdesc, bool in_progress) 369 - { 370 - struct fsl_edma_desc *edesc = fsl_chan->edesc; 371 - void __iomem *addr = fsl_chan->edma->membase; 372 - u32 ch = fsl_chan->vchan.chan.chan_id; 373 - enum dma_transfer_direction dir = fsl_chan->fsc.dir; 374 - dma_addr_t cur_addr, dma_addr; 375 - size_t len, size; 376 - int i; 377 - 378 - /* calculate the total size in this desc */ 379 - for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++) 380 - len += le32_to_cpu(edesc->tcd[i].vtcd->nbytes) 381 - * le16_to_cpu(edesc->tcd[i].vtcd->biter); 382 - 383 - if (!in_progress) 384 - return len; 385 - 386 - if (dir == DMA_MEM_TO_DEV) 387 - cur_addr = edma_readl(fsl_chan->edma, addr + EDMA_TCD_SADDR(ch)); 388 - else 389 - cur_addr = edma_readl(fsl_chan->edma, addr + EDMA_TCD_DADDR(ch)); 390 - 391 - /* figure out the finished and calculate the residue */ 392 - for (i = 0; i < fsl_chan->edesc->n_tcds; i++) { 393 - size = le32_to_cpu(edesc->tcd[i].vtcd->nbytes) 394 - * le16_to_cpu(edesc->tcd[i].vtcd->biter); 395 - if (dir == DMA_MEM_TO_DEV) 396 - dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->saddr); 397 - else 398 - dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->daddr); 399 - 400 - len -= size; 401 - if (cur_addr >= dma_addr && cur_addr < dma_addr + size) { 402 - len += dma_addr + size - cur_addr; 403 - break; 404 - } 405 - } 406 - 407 - return len; 408 - } 409 - 410 - static enum dma_status fsl_edma_tx_status(struct dma_chan *chan, 411 - dma_cookie_t cookie, struct dma_tx_state *txstate) 412 - { 413 - struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); 414 - struct virt_dma_desc *vdesc; 415 - enum dma_status status; 416 - unsigned long flags; 417 - 418 - status = dma_cookie_status(chan, cookie, txstate); 419 - if (status == DMA_COMPLETE) 420 - return status; 421 - 422 - if (!txstate) 423 - return fsl_chan->status; 424 - 425 - spin_lock_irqsave(&fsl_chan->vchan.lock, flags); 426 - vdesc = vchan_find_desc(&fsl_chan->vchan, cookie); 427 - if (fsl_chan->edesc && cookie == fsl_chan->edesc->vdesc.tx.cookie) 428 - txstate->residue = fsl_edma_desc_residue(fsl_chan, vdesc, true); 429 - else if (vdesc) 430 - txstate->residue = fsl_edma_desc_residue(fsl_chan, vdesc, false); 431 - else 432 - txstate->residue = 0; 433 - 434 - spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); 435 - 436 - return fsl_chan->status; 437 - } 438 - 439 - static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan, 440 - struct fsl_edma_hw_tcd *tcd) 441 - { 442 - struct fsl_edma_engine *edma = fsl_chan->edma; 443 - void __iomem *addr = fsl_chan->edma->membase; 444 - u32 ch = fsl_chan->vchan.chan.chan_id; 445 - 446 - /* 447 - * TCD parameters are stored in struct fsl_edma_hw_tcd in little 448 - * endian format. However, we need to load the TCD registers in 449 - * big- or little-endian obeying the eDMA engine model endian. 450 - */ 451 - edma_writew(edma, 0, addr + EDMA_TCD_CSR(ch)); 452 - edma_writel(edma, le32_to_cpu(tcd->saddr), addr + EDMA_TCD_SADDR(ch)); 453 - edma_writel(edma, le32_to_cpu(tcd->daddr), addr + EDMA_TCD_DADDR(ch)); 454 - 455 - edma_writew(edma, le16_to_cpu(tcd->attr), addr + EDMA_TCD_ATTR(ch)); 456 - edma_writew(edma, le16_to_cpu(tcd->soff), addr + EDMA_TCD_SOFF(ch)); 457 - 458 - edma_writel(edma, le32_to_cpu(tcd->nbytes), addr + EDMA_TCD_NBYTES(ch)); 459 - edma_writel(edma, le32_to_cpu(tcd->slast), addr + EDMA_TCD_SLAST(ch)); 460 - 461 - edma_writew(edma, le16_to_cpu(tcd->citer), addr + EDMA_TCD_CITER(ch)); 462 - edma_writew(edma, le16_to_cpu(tcd->biter), addr + EDMA_TCD_BITER(ch)); 463 - edma_writew(edma, le16_to_cpu(tcd->doff), addr + EDMA_TCD_DOFF(ch)); 464 - 465 - edma_writel(edma, le32_to_cpu(tcd->dlast_sga), addr + EDMA_TCD_DLAST_SGA(ch)); 466 - 467 - edma_writew(edma, le16_to_cpu(tcd->csr), addr + EDMA_TCD_CSR(ch)); 468 - } 469 - 470 - static inline 471 - void fsl_edma_fill_tcd(struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst, 472 - u16 attr, u16 soff, u32 nbytes, u32 slast, u16 citer, 473 - u16 biter, u16 doff, u32 dlast_sga, bool major_int, 474 - bool disable_req, bool enable_sg) 475 - { 476 - u16 csr = 0; 477 - 478 - /* 479 - * eDMA hardware SGs require the TCDs to be stored in little 480 - * endian format irrespective of the register endian model. 481 - * So we put the value in little endian in memory, waiting 482 - * for fsl_edma_set_tcd_regs doing the swap. 483 - */ 484 - tcd->saddr = cpu_to_le32(src); 485 - tcd->daddr = cpu_to_le32(dst); 486 - 487 - tcd->attr = cpu_to_le16(attr); 488 - 489 - tcd->soff = cpu_to_le16(EDMA_TCD_SOFF_SOFF(soff)); 490 - 491 - tcd->nbytes = cpu_to_le32(EDMA_TCD_NBYTES_NBYTES(nbytes)); 492 - tcd->slast = cpu_to_le32(EDMA_TCD_SLAST_SLAST(slast)); 493 - 494 - tcd->citer = cpu_to_le16(EDMA_TCD_CITER_CITER(citer)); 495 - tcd->doff = cpu_to_le16(EDMA_TCD_DOFF_DOFF(doff)); 496 - 497 - tcd->dlast_sga = cpu_to_le32(EDMA_TCD_DLAST_SGA_DLAST_SGA(dlast_sga)); 498 - 499 - tcd->biter = cpu_to_le16(EDMA_TCD_BITER_BITER(biter)); 500 - if (major_int) 501 - csr |= EDMA_TCD_CSR_INT_MAJOR; 502 - 503 - if (disable_req) 504 - csr |= EDMA_TCD_CSR_D_REQ; 505 - 506 - if (enable_sg) 507 - csr |= EDMA_TCD_CSR_E_SG; 508 - 509 - tcd->csr = cpu_to_le16(csr); 510 - } 511 - 512 - static struct fsl_edma_desc *fsl_edma_alloc_desc(struct fsl_edma_chan *fsl_chan, 513 - int sg_len) 514 - { 515 - struct fsl_edma_desc *fsl_desc; 516 - int i; 517 - 518 - fsl_desc = kzalloc(sizeof(*fsl_desc) + sizeof(struct fsl_edma_sw_tcd) * sg_len, 519 - GFP_NOWAIT); 520 - if (!fsl_desc) 521 - return NULL; 522 - 523 - fsl_desc->echan = fsl_chan; 524 - fsl_desc->n_tcds = sg_len; 525 - for (i = 0; i < sg_len; i++) { 526 - fsl_desc->tcd[i].vtcd = dma_pool_alloc(fsl_chan->tcd_pool, 527 - GFP_NOWAIT, &fsl_desc->tcd[i].ptcd); 528 - if (!fsl_desc->tcd[i].vtcd) 529 - goto err; 530 - } 531 - return fsl_desc; 532 - 533 - err: 534 - while (--i >= 0) 535 - dma_pool_free(fsl_chan->tcd_pool, fsl_desc->tcd[i].vtcd, 536 - fsl_desc->tcd[i].ptcd); 537 - kfree(fsl_desc); 538 - return NULL; 539 - } 540 - 541 - static struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic( 542 - struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, 543 - size_t period_len, enum dma_transfer_direction direction, 544 - unsigned long flags) 545 - { 546 - struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); 547 - struct fsl_edma_desc *fsl_desc; 548 - dma_addr_t dma_buf_next; 549 - int sg_len, i; 550 - u32 src_addr, dst_addr, last_sg, nbytes; 551 - u16 soff, doff, iter; 552 - 553 - if (!is_slave_direction(fsl_chan->fsc.dir)) 554 - return NULL; 555 - 556 - sg_len = buf_len / period_len; 557 - fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len); 558 - if (!fsl_desc) 559 - return NULL; 560 - fsl_desc->iscyclic = true; 561 - 562 - dma_buf_next = dma_addr; 563 - nbytes = fsl_chan->fsc.addr_width * fsl_chan->fsc.burst; 564 - iter = period_len / nbytes; 565 - 566 - for (i = 0; i < sg_len; i++) { 567 - if (dma_buf_next >= dma_addr + buf_len) 568 - dma_buf_next = dma_addr; 569 - 570 - /* get next sg's physical address */ 571 - last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd; 572 - 573 - if (fsl_chan->fsc.dir == DMA_MEM_TO_DEV) { 574 - src_addr = dma_buf_next; 575 - dst_addr = fsl_chan->fsc.dev_addr; 576 - soff = fsl_chan->fsc.addr_width; 577 - doff = 0; 578 - } else { 579 - src_addr = fsl_chan->fsc.dev_addr; 580 - dst_addr = dma_buf_next; 581 - soff = 0; 582 - doff = fsl_chan->fsc.addr_width; 583 - } 584 - 585 - fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr, dst_addr, 586 - fsl_chan->fsc.attr, soff, nbytes, 0, iter, 587 - iter, doff, last_sg, true, false, true); 588 - dma_buf_next += period_len; 589 - } 590 - 591 - return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags); 592 - } 593 - 594 - static struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg( 595 - struct dma_chan *chan, struct scatterlist *sgl, 596 - unsigned int sg_len, enum dma_transfer_direction direction, 597 - unsigned long flags, void *context) 598 - { 599 - struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); 600 - struct fsl_edma_desc *fsl_desc; 601 - struct scatterlist *sg; 602 - u32 src_addr, dst_addr, last_sg, nbytes; 603 - u16 soff, doff, iter; 604 - int i; 605 - 606 - if (!is_slave_direction(fsl_chan->fsc.dir)) 607 - return NULL; 608 - 609 - fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len); 610 - if (!fsl_desc) 611 - return NULL; 612 - fsl_desc->iscyclic = false; 613 - 614 - nbytes = fsl_chan->fsc.addr_width * fsl_chan->fsc.burst; 615 - for_each_sg(sgl, sg, sg_len, i) { 616 - /* get next sg's physical address */ 617 - last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd; 618 - 619 - if (fsl_chan->fsc.dir == DMA_MEM_TO_DEV) { 620 - src_addr = sg_dma_address(sg); 621 - dst_addr = fsl_chan->fsc.dev_addr; 622 - soff = fsl_chan->fsc.addr_width; 623 - doff = 0; 624 - } else { 625 - src_addr = fsl_chan->fsc.dev_addr; 626 - dst_addr = sg_dma_address(sg); 627 - soff = 0; 628 - doff = fsl_chan->fsc.addr_width; 629 - } 630 - 631 - iter = sg_dma_len(sg) / nbytes; 632 - if (i < sg_len - 1) { 633 - last_sg = fsl_desc->tcd[(i + 1)].ptcd; 634 - fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr, 635 - dst_addr, fsl_chan->fsc.attr, soff, 636 - nbytes, 0, iter, iter, doff, last_sg, 637 - false, false, true); 638 - } else { 639 - last_sg = 0; 640 - fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr, 641 - dst_addr, fsl_chan->fsc.attr, soff, 642 - nbytes, 0, iter, iter, doff, last_sg, 643 - true, true, false); 644 - } 645 - } 646 - 647 - return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags); 648 - } 649 - 650 - static void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan) 651 - { 652 - struct virt_dma_desc *vdesc; 653 - 654 - vdesc = vchan_next_desc(&fsl_chan->vchan); 655 - if (!vdesc) 656 - return; 657 - fsl_chan->edesc = to_fsl_edma_desc(vdesc); 658 - fsl_edma_set_tcd_regs(fsl_chan, fsl_chan->edesc->tcd[0].vtcd); 659 - fsl_edma_enable_request(fsl_chan); 660 - fsl_chan->status = DMA_IN_PROGRESS; 661 - fsl_chan->idle = false; 662 - } 25 + #include "fsl-edma-common.h" 663 26 664 27 static irqreturn_t fsl_edma_tx_handler(int irq, void *dev_id) 665 28 { ··· 93 730 return fsl_edma_err_handler(irq, dev_id); 94 731 } 95 732 96 - static void fsl_edma_issue_pending(struct dma_chan *chan) 97 - { 98 - struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); 99 - unsigned long flags; 100 - 101 - spin_lock_irqsave(&fsl_chan->vchan.lock, flags); 102 - 103 - if (unlikely(fsl_chan->pm_state != RUNNING)) { 104 - spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); 105 - /* cannot submit due to suspend */ 106 - return; 107 - } 108 - 109 - if (vchan_issue_pending(&fsl_chan->vchan) && !fsl_chan->edesc) 110 - fsl_edma_xfer_desc(fsl_chan); 111 - 112 - spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); 113 - } 114 - 115 733 static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec, 116 734 struct of_dma *ofdma) 117 735 { ··· 123 779 } 124 780 mutex_unlock(&fsl_edma->fsl_edma_mutex); 125 781 return NULL; 126 - } 127 - 128 - static int fsl_edma_alloc_chan_resources(struct dma_chan *chan) 129 - { 130 - struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); 131 - 132 - fsl_chan->tcd_pool = dma_pool_create("tcd_pool", chan->device->dev, 133 - sizeof(struct fsl_edma_hw_tcd), 134 - 32, 0); 135 - return 0; 136 - } 137 - 138 - static void fsl_edma_free_chan_resources(struct dma_chan *chan) 139 - { 140 - struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); 141 - unsigned long flags; 142 - LIST_HEAD(head); 143 - 144 - spin_lock_irqsave(&fsl_chan->vchan.lock, flags); 145 - fsl_edma_disable_request(fsl_chan); 146 - fsl_edma_chan_mux(fsl_chan, 0, false); 147 - fsl_chan->edesc = NULL; 148 - vchan_get_all_descriptors(&fsl_chan->vchan, &head); 149 - spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags); 150 - 151 - vchan_dma_desc_free_list(&fsl_chan->vchan, &head); 152 - dma_pool_destroy(fsl_chan->tcd_pool); 153 - fsl_chan->tcd_pool = NULL; 154 782 } 155 783 156 784 static int ··· 309 993 edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, fsl_edma->membase + EDMA_CR); 310 994 311 995 return 0; 312 - } 313 - 314 - static void fsl_edma_cleanup_vchan(struct dma_device *dmadev) 315 - { 316 - struct fsl_edma_chan *chan, *_chan; 317 - 318 - list_for_each_entry_safe(chan, _chan, 319 - &dmadev->channels, vchan.chan.device_node) { 320 - list_del(&chan->vchan.chan.device_node); 321 - tasklet_kill(&chan->vchan.task); 322 - } 323 996 } 324 997 325 998 static int fsl_edma_remove(struct platform_device *pdev)