Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dmaengine: fsl-edma: add edma version and configurable registers

This patch adds configurable registers (using __iomem addresses)
to allow the use of fsl-edma-common code with slightly different
edma module versions, as Vybrid (v1) and ColdFire (v2) are.

Signed-off-by: Angelo Dureghello <angelo@sysam.it>
Tested-by: Krzysztof Kozlowski <krzk@kernel.org>
Signed-off-by: Vinod Koul <vkoul@kernel.org>

authored by

Angelo Dureghello and committed by
Vinod Koul
377eaf3b 9d831528

+126 -74
+76 -30
drivers/dma/fsl-edma-common.c
··· 43 43 44 44 static void fsl_edma_enable_request(struct fsl_edma_chan *fsl_chan) 45 45 { 46 - void __iomem *addr = fsl_chan->edma->membase; 46 + struct edma_regs *regs = &fsl_chan->edma->regs; 47 47 u32 ch = fsl_chan->vchan.chan.chan_id; 48 48 49 - edma_writeb(fsl_chan->edma, EDMA_SEEI_SEEI(ch), addr + EDMA_SEEI); 50 - edma_writeb(fsl_chan->edma, ch, addr + EDMA_SERQ); 49 + edma_writeb(fsl_chan->edma, EDMA_SEEI_SEEI(ch), regs->seei); 50 + edma_writeb(fsl_chan->edma, ch, regs->serq); 51 51 } 52 52 53 53 void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan) 54 54 { 55 - void __iomem *addr = fsl_chan->edma->membase; 55 + struct edma_regs *regs = &fsl_chan->edma->regs; 56 56 u32 ch = fsl_chan->vchan.chan.chan_id; 57 57 58 - edma_writeb(fsl_chan->edma, ch, addr + EDMA_CERQ); 59 - edma_writeb(fsl_chan->edma, EDMA_CEEI_CEEI(ch), addr + EDMA_CEEI); 58 + edma_writeb(fsl_chan->edma, ch, regs->cerq); 59 + edma_writeb(fsl_chan->edma, EDMA_CEEI_CEEI(ch), regs->ceei); 60 60 } 61 61 EXPORT_SYMBOL_GPL(fsl_edma_disable_request); 62 62 ··· 184 184 struct virt_dma_desc *vdesc, bool in_progress) 185 185 { 186 186 struct fsl_edma_desc *edesc = fsl_chan->edesc; 187 - void __iomem *addr = fsl_chan->edma->membase; 187 + struct edma_regs *regs = &fsl_chan->edma->regs; 188 188 u32 ch = fsl_chan->vchan.chan.chan_id; 189 189 enum dma_transfer_direction dir = fsl_chan->fsc.dir; 190 190 dma_addr_t cur_addr, dma_addr; ··· 200 200 return len; 201 201 202 202 if (dir == DMA_MEM_TO_DEV) 203 - cur_addr = edma_readl( 204 - fsl_chan->edma, addr + EDMA_TCD_SADDR(ch)); 203 + cur_addr = edma_readl(fsl_chan->edma, &regs->tcd[ch].saddr); 205 204 else 206 - cur_addr = edma_readl( 207 - fsl_chan->edma, addr + EDMA_TCD_DADDR(ch)); 205 + cur_addr = edma_readl(fsl_chan->edma, &regs->tcd[ch].daddr); 208 206 209 207 /* figure out the finished and calculate the residue */ 210 208 for (i = 0; i < fsl_chan->edesc->n_tcds; i++) { ··· 259 261 struct fsl_edma_hw_tcd *tcd) 260 262 { 261 263 struct fsl_edma_engine *edma = fsl_chan->edma; 262 - void __iomem *addr = fsl_chan->edma->membase; 264 + struct edma_regs *regs = &fsl_chan->edma->regs; 263 265 u32 ch = fsl_chan->vchan.chan.chan_id; 264 266 265 267 /* ··· 267 269 * endian format. However, we need to load the TCD registers in 268 270 * big- or little-endian obeying the eDMA engine model endian. 269 271 */ 270 - edma_writew(edma, 0, addr + EDMA_TCD_CSR(ch)); 271 - edma_writel(edma, le32_to_cpu(tcd->saddr), addr + EDMA_TCD_SADDR(ch)); 272 - edma_writel(edma, le32_to_cpu(tcd->daddr), addr + EDMA_TCD_DADDR(ch)); 272 + edma_writew(edma, 0, &regs->tcd[ch].csr); 273 + edma_writel(edma, le32_to_cpu(tcd->saddr), &regs->tcd[ch].saddr); 274 + edma_writel(edma, le32_to_cpu(tcd->daddr), &regs->tcd[ch].daddr); 273 275 274 - edma_writew(edma, le16_to_cpu(tcd->attr), addr + EDMA_TCD_ATTR(ch)); 275 - edma_writew(edma, le16_to_cpu(tcd->soff), addr + EDMA_TCD_SOFF(ch)); 276 + edma_writew(edma, le16_to_cpu(tcd->attr), &regs->tcd[ch].attr); 277 + edma_writew(edma, le16_to_cpu(tcd->soff), &regs->tcd[ch].soff); 276 278 277 - edma_writel(edma, le32_to_cpu(tcd->nbytes), addr + EDMA_TCD_NBYTES(ch)); 278 - edma_writel(edma, le32_to_cpu(tcd->slast), addr + EDMA_TCD_SLAST(ch)); 279 + edma_writel(edma, le32_to_cpu(tcd->nbytes), &regs->tcd[ch].nbytes); 280 + edma_writel(edma, le32_to_cpu(tcd->slast), &regs->tcd[ch].slast); 279 281 280 - edma_writew(edma, le16_to_cpu(tcd->citer), addr + EDMA_TCD_CITER(ch)); 281 - edma_writew(edma, le16_to_cpu(tcd->biter), addr + EDMA_TCD_BITER(ch)); 282 - edma_writew(edma, le16_to_cpu(tcd->doff), addr + EDMA_TCD_DOFF(ch)); 282 + edma_writew(edma, le16_to_cpu(tcd->citer), &regs->tcd[ch].citer); 283 + edma_writew(edma, le16_to_cpu(tcd->biter), &regs->tcd[ch].biter); 284 + edma_writew(edma, le16_to_cpu(tcd->doff), &regs->tcd[ch].doff); 283 285 284 - edma_writel(edma, 285 - le32_to_cpu(tcd->dlast_sga), addr + EDMA_TCD_DLAST_SGA(ch)); 286 + edma_writel(edma, le32_to_cpu(tcd->dlast_sga), 287 + &regs->tcd[ch].dlast_sga); 286 288 287 - edma_writew(edma, le16_to_cpu(tcd->csr), addr + EDMA_TCD_CSR(ch)); 289 + edma_writew(edma, le16_to_cpu(tcd->csr), &regs->tcd[ch].csr); 288 290 } 289 291 290 292 static inline ··· 306 308 307 309 tcd->attr = cpu_to_le16(attr); 308 310 309 - tcd->soff = cpu_to_le16(EDMA_TCD_SOFF_SOFF(soff)); 311 + tcd->soff = cpu_to_le16(soff); 310 312 311 - tcd->nbytes = cpu_to_le32(EDMA_TCD_NBYTES_NBYTES(nbytes)); 312 - tcd->slast = cpu_to_le32(EDMA_TCD_SLAST_SLAST(slast)); 313 + tcd->nbytes = cpu_to_le32(nbytes); 314 + tcd->slast = cpu_to_le32(slast); 313 315 314 316 tcd->citer = cpu_to_le16(EDMA_TCD_CITER_CITER(citer)); 315 - tcd->doff = cpu_to_le16(EDMA_TCD_DOFF_DOFF(doff)); 317 + tcd->doff = cpu_to_le16(doff); 316 318 317 - tcd->dlast_sga = cpu_to_le32(EDMA_TCD_DLAST_SGA_DLAST_SGA(dlast_sga)); 319 + tcd->dlast_sga = cpu_to_le32(dlast_sga); 318 320 319 321 tcd->biter = cpu_to_le16(EDMA_TCD_BITER_BITER(biter)); 320 322 if (major_int) ··· 546 548 } 547 549 } 548 550 EXPORT_SYMBOL_GPL(fsl_edma_cleanup_vchan); 551 + 552 + /* 553 + * On the 32 channels Vybrid/mpc577x edma version (here called "v1"), 554 + * register offsets are different compared to ColdFire mcf5441x 64 channels 555 + * edma (here called "v2"). 556 + * 557 + * This function sets up register offsets as per proper declared version 558 + * so must be called in xxx_edma_probe() just after setting the 559 + * edma "version" and "membase" appropriately. 560 + */ 561 + void fsl_edma_setup_regs(struct fsl_edma_engine *edma) 562 + { 563 + edma->regs.cr = edma->membase + EDMA_CR; 564 + edma->regs.es = edma->membase + EDMA_ES; 565 + edma->regs.erql = edma->membase + EDMA_ERQ; 566 + edma->regs.eeil = edma->membase + EDMA_EEI; 567 + 568 + edma->regs.serq = edma->membase + ((edma->version == v1) ? 569 + EDMA_SERQ : EDMA64_SERQ); 570 + edma->regs.cerq = edma->membase + ((edma->version == v1) ? 571 + EDMA_CERQ : EDMA64_CERQ); 572 + edma->regs.seei = edma->membase + ((edma->version == v1) ? 573 + EDMA_SEEI : EDMA64_SEEI); 574 + edma->regs.ceei = edma->membase + ((edma->version == v1) ? 575 + EDMA_CEEI : EDMA64_CEEI); 576 + edma->regs.cint = edma->membase + ((edma->version == v1) ? 577 + EDMA_CINT : EDMA64_CINT); 578 + edma->regs.cerr = edma->membase + ((edma->version == v1) ? 579 + EDMA_CERR : EDMA64_CERR); 580 + edma->regs.ssrt = edma->membase + ((edma->version == v1) ? 581 + EDMA_SSRT : EDMA64_SSRT); 582 + edma->regs.cdne = edma->membase + ((edma->version == v1) ? 583 + EDMA_CDNE : EDMA64_CDNE); 584 + edma->regs.intl = edma->membase + ((edma->version == v1) ? 585 + EDMA_INTR : EDMA64_INTL); 586 + edma->regs.errl = edma->membase + ((edma->version == v1) ? 587 + EDMA_ERR : EDMA64_ERRL); 588 + 589 + if (edma->version == v2) { 590 + edma->regs.erqh = edma->membase + EDMA64_ERQH; 591 + edma->regs.eeih = edma->membase + EDMA64_EEIH; 592 + edma->regs.errh = edma->membase + EDMA64_ERRH; 593 + edma->regs.inth = edma->membase + EDMA64_INTH; 594 + } 595 + 596 + edma->regs.tcd = edma->membase + EDMA_TCD; 597 + } 598 + EXPORT_SYMBOL_GPL(fsl_edma_setup_regs); 549 599 550 600 MODULE_LICENSE("GPL v2");
+33 -29
drivers/dma/fsl-edma-common.h
··· 8 8 9 9 #include "virt-dma.h" 10 10 11 - #define EDMA_CR 0x00 12 - #define EDMA_ES 0x04 13 - #define EDMA_ERQ 0x0C 14 - #define EDMA_EEI 0x14 15 - #define EDMA_SERQ 0x1B 16 - #define EDMA_CERQ 0x1A 17 - #define EDMA_SEEI 0x19 18 - #define EDMA_CEEI 0x18 19 - #define EDMA_CINT 0x1F 20 - #define EDMA_CERR 0x1E 21 - #define EDMA_SSRT 0x1D 22 - #define EDMA_CDNE 0x1C 23 - #define EDMA_INTR 0x24 24 - #define EDMA_ERR 0x2C 25 - 26 - #define EDMA_TCD_SADDR(x) (0x1000 + 32 * (x)) 27 - #define EDMA_TCD_SOFF(x) (0x1004 + 32 * (x)) 28 - #define EDMA_TCD_ATTR(x) (0x1006 + 32 * (x)) 29 - #define EDMA_TCD_NBYTES(x) (0x1008 + 32 * (x)) 30 - #define EDMA_TCD_SLAST(x) (0x100C + 32 * (x)) 31 - #define EDMA_TCD_DADDR(x) (0x1010 + 32 * (x)) 32 - #define EDMA_TCD_DOFF(x) (0x1014 + 32 * (x)) 33 - #define EDMA_TCD_CITER_ELINK(x) (0x1016 + 32 * (x)) 34 - #define EDMA_TCD_CITER(x) (0x1016 + 32 * (x)) 35 - #define EDMA_TCD_DLAST_SGA(x) (0x1018 + 32 * (x)) 36 - #define EDMA_TCD_CSR(x) (0x101C + 32 * (x)) 37 - #define EDMA_TCD_BITER_ELINK(x) (0x101E + 32 * (x)) 38 - #define EDMA_TCD_BITER(x) (0x101E + 32 * (x)) 39 - 40 11 #define EDMA_CR_EDBG BIT(1) 41 12 #define EDMA_CR_ERCA BIT(2) 42 13 #define EDMA_CR_ERGA BIT(3) ··· 85 114 __le16 biter; 86 115 }; 87 116 117 + /* 118 + * These are iomem pointers, for both v32 and v64. 119 + */ 120 + struct edma_regs { 121 + void __iomem *cr; 122 + void __iomem *es; 123 + void __iomem *erqh; 124 + void __iomem *erql; /* aka erq on v32 */ 125 + void __iomem *eeih; 126 + void __iomem *eeil; /* aka eei on v32 */ 127 + void __iomem *seei; 128 + void __iomem *ceei; 129 + void __iomem *serq; 130 + void __iomem *cerq; 131 + void __iomem *cint; 132 + void __iomem *cerr; 133 + void __iomem *ssrt; 134 + void __iomem *cdne; 135 + void __iomem *inth; 136 + void __iomem *intl; 137 + void __iomem *errh; 138 + void __iomem *errl; 139 + struct fsl_edma_hw_tcd __iomem *tcd; 140 + }; 141 + 88 142 struct fsl_edma_sw_tcd { 89 143 dma_addr_t ptcd; 90 144 struct fsl_edma_hw_tcd *vtcd; ··· 143 147 struct fsl_edma_sw_tcd tcd[]; 144 148 }; 145 149 150 + enum edma_version { 151 + v1, /* 32ch, Vybdir, mpc57x, etc */ 152 + v2, /* 64ch Coldfire */ 153 + }; 154 + 146 155 struct fsl_edma_engine { 147 156 struct dma_device dma_dev; 148 157 void __iomem *membase; ··· 158 157 int txirq; 159 158 int errirq; 160 159 bool big_endian; 160 + enum edma_version version; 161 + struct edma_regs regs; 161 162 struct fsl_edma_chan chans[]; 162 163 }; 163 164 ··· 240 237 int fsl_edma_alloc_chan_resources(struct dma_chan *chan); 241 238 void fsl_edma_free_chan_resources(struct dma_chan *chan); 242 239 void fsl_edma_cleanup_vchan(struct dma_device *dmadev); 240 + void fsl_edma_setup_regs(struct fsl_edma_engine *edma); 243 241 244 242 #endif /* _FSL_EDMA_COMMON_H_ */
+17 -15
drivers/dma/fsl-edma.c
··· 28 28 { 29 29 struct fsl_edma_engine *fsl_edma = dev_id; 30 30 unsigned int intr, ch; 31 - void __iomem *base_addr; 31 + struct edma_regs *regs = &fsl_edma->regs; 32 32 struct fsl_edma_chan *fsl_chan; 33 33 34 - base_addr = fsl_edma->membase; 35 - 36 - intr = edma_readl(fsl_edma, base_addr + EDMA_INTR); 34 + intr = edma_readl(fsl_edma, regs->intl); 37 35 if (!intr) 38 36 return IRQ_NONE; 39 37 40 38 for (ch = 0; ch < fsl_edma->n_chans; ch++) { 41 39 if (intr & (0x1 << ch)) { 42 - edma_writeb(fsl_edma, EDMA_CINT_CINT(ch), 43 - base_addr + EDMA_CINT); 40 + edma_writeb(fsl_edma, EDMA_CINT_CINT(ch), regs->cint); 44 41 45 42 fsl_chan = &fsl_edma->chans[ch]; 46 43 ··· 65 68 { 66 69 struct fsl_edma_engine *fsl_edma = dev_id; 67 70 unsigned int err, ch; 71 + struct edma_regs *regs = &fsl_edma->regs; 68 72 69 - err = edma_readl(fsl_edma, fsl_edma->membase + EDMA_ERR); 73 + err = edma_readl(fsl_edma, regs->errl); 70 74 if (!err) 71 75 return IRQ_NONE; 72 76 73 77 for (ch = 0; ch < fsl_edma->n_chans; ch++) { 74 78 if (err & (0x1 << ch)) { 75 79 fsl_edma_disable_request(&fsl_edma->chans[ch]); 76 - edma_writeb(fsl_edma, EDMA_CERR_CERR(ch), 77 - fsl_edma->membase + EDMA_CERR); 80 + edma_writeb(fsl_edma, EDMA_CERR_CERR(ch), regs->cerr); 78 81 fsl_edma->chans[ch].status = DMA_ERROR; 79 82 fsl_edma->chans[ch].idle = true; 80 83 } ··· 189 192 struct device_node *np = pdev->dev.of_node; 190 193 struct fsl_edma_engine *fsl_edma; 191 194 struct fsl_edma_chan *fsl_chan; 195 + struct edma_regs *regs; 192 196 struct resource *res; 193 197 int len, chans; 194 198 int ret, i; ··· 205 207 if (!fsl_edma) 206 208 return -ENOMEM; 207 209 210 + fsl_edma->version = v1; 208 211 fsl_edma->n_chans = chans; 209 212 mutex_init(&fsl_edma->fsl_edma_mutex); 210 213 ··· 213 214 fsl_edma->membase = devm_ioremap_resource(&pdev->dev, res); 214 215 if (IS_ERR(fsl_edma->membase)) 215 216 return PTR_ERR(fsl_edma->membase); 217 + 218 + fsl_edma_setup_regs(fsl_edma); 219 + regs = &fsl_edma->regs; 216 220 217 221 for (i = 0; i < DMAMUX_NR; i++) { 218 222 char clkname[32]; ··· 257 255 fsl_chan->vchan.desc_free = fsl_edma_free_desc; 258 256 vchan_init(&fsl_chan->vchan, &fsl_edma->dma_dev); 259 257 260 - edma_writew(fsl_edma, 0x0, fsl_edma->membase + EDMA_TCD_CSR(i)); 258 + edma_writew(fsl_edma, 0x0, &regs->tcd[i].csr); 261 259 fsl_edma_chan_mux(fsl_chan, 0, false); 262 260 } 263 261 264 - edma_writel(fsl_edma, ~0, fsl_edma->membase + EDMA_INTR); 262 + edma_writel(fsl_edma, ~0, regs->intl); 265 263 ret = fsl_edma_irq_init(pdev, fsl_edma); 266 264 if (ret) 267 265 return ret; ··· 308 306 } 309 307 310 308 /* enable round robin arbitration */ 311 - edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, fsl_edma->membase + EDMA_CR); 309 + edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, regs->cr); 312 310 313 311 return 0; 314 312 } ··· 355 353 { 356 354 struct fsl_edma_engine *fsl_edma = dev_get_drvdata(dev); 357 355 struct fsl_edma_chan *fsl_chan; 356 + struct edma_regs *regs = &fsl_edma->regs; 358 357 int i; 359 358 360 359 for (i = 0; i < fsl_edma->n_chans; i++) { 361 360 fsl_chan = &fsl_edma->chans[i]; 362 361 fsl_chan->pm_state = RUNNING; 363 - edma_writew(fsl_edma, 0x0, fsl_edma->membase + EDMA_TCD_CSR(i)); 362 + edma_writew(fsl_edma, 0x0, &regs->tcd[i].csr); 364 363 if (fsl_chan->slave_id != 0) 365 364 fsl_edma_chan_mux(fsl_chan, fsl_chan->slave_id, true); 366 365 } 367 366 368 - edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, 369 - fsl_edma->membase + EDMA_CR); 367 + edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, regs->cr); 370 368 371 369 return 0; 372 370 }