Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'next' of git://git.infradead.org/users/vkoul/slave-dma

Pull slave-dmaengine update from Vinod Koul:
"This time we have a new dmaengine driver from the tegra folks. Also
we have Guennadi's cleanup of sh drivers which incudes a library for
sh drivers. And the usual odd fixes in bunch of drivers and some nice
cleanup of dw_dmac from Andy."

Fix up conflicts in drivers/mmc/host/sh_mmcif.c

* 'next' of git://git.infradead.org/users/vkoul/slave-dma: (46 commits)
dmaengine: Cleanup logging messages
mmc: sh_mmcif: switch to the new DMA channel allocation and configuration
dma: sh: provide a migration path for slave drivers to stop using .private
dma: sh: use an integer slave ID to improve API compatibility
dmaengine: shdma: prepare to stop using struct dma_chan::private
sh: remove unused DMA device pointer from SIU platform data
ASoC: siu: don't use DMA device for channel filtering
dmaengine: shdma: (cosmetic) simplify a static function
dmaengine: at_hdmac: add a few const qualifiers
dw_dmac: use 'u32' for LLI structure members, not dma_addr_t
dw_dmac: mark dwc_dump_lli inline
dma: mxs-dma: Export missing symbols from mxs-dma.c
dma: shdma: convert to the shdma base library
ASoC: fsi: prepare for conversion to the shdma base library
usb: renesas_usbhs: prepare for conversion to the shdma base library
ASoC: siu: prepare for conversion to the shdma base library
serial: sh-sci: prepare for conversion to the shdma base library
mmc: sh_mobile_sdhi: prepare for conversion to the shdma base library
mmc: sh_mmcif: remove unneeded struct sh_mmcif_dma, prepare to shdma conversion
dma: shdma: prepare for conversion to the shdma base library
...

+4294 -1813
-1
arch/sh/include/asm/siu.h
··· 14 14 struct device; 15 15 16 16 struct siu_platform { 17 - struct device *dma_dev; 18 17 unsigned int dma_slave_tx_a; 19 18 unsigned int dma_slave_rx_a; 20 19 unsigned int dma_slave_tx_b;
-1
arch/sh/kernel/cpu/sh4a/setup-sh7722.c
··· 512 512 }; 513 513 514 514 static struct siu_platform siu_platform_data = { 515 - .dma_dev = &dma_device.dev, 516 515 .dma_slave_tx_a = SHDMA_SLAVE_SIUA_TX, 517 516 .dma_slave_rx_a = SHDMA_SLAVE_SIUA_RX, 518 517 .dma_slave_tx_b = SHDMA_SLAVE_SIUB_TX,
+25 -1
drivers/dma/Kconfig
··· 148 148 Support the TXx9 SoC internal DMA controller. This can be 149 149 integrated in chips such as the Toshiba TX4927/38/39. 150 150 151 + config TEGRA20_APB_DMA 152 + bool "NVIDIA Tegra20 APB DMA support" 153 + depends on ARCH_TEGRA 154 + select DMA_ENGINE 155 + help 156 + Support for the NVIDIA Tegra20 APB DMA controller driver. The 157 + DMA controller is having multiple DMA channel which can be 158 + configured for different peripherals like audio, UART, SPI, 159 + I2C etc which is in APB bus. 160 + This DMA controller transfers data from memory to peripheral fifo 161 + or vice versa. It does not support memory to memory data transfer. 162 + 163 + 164 + 151 165 config SH_DMAE 152 166 tristate "Renesas SuperH DMAC support" 153 167 depends on (SUPERH && SH_DMA) || (ARM && ARCH_SHMOBILE) ··· 251 237 252 238 config MXS_DMA 253 239 bool "MXS DMA support" 254 - depends on SOC_IMX23 || SOC_IMX28 240 + depends on SOC_IMX23 || SOC_IMX28 || SOC_IMX6Q 255 241 select STMP_DEVICE 256 242 select DMA_ENGINE 257 243 help ··· 273 259 Support the DMA engine found on Intel StrongARM SA-1100 and 274 260 SA-1110 SoCs. This DMA engine can only be used with on-chip 275 261 devices. 262 + 263 + config MMP_TDMA 264 + bool "MMP Two-Channel DMA support" 265 + depends on ARCH_MMP 266 + select DMA_ENGINE 267 + help 268 + Support the MMP Two-Channel DMA engine. 269 + This engine used for MMP Audio DMA and pxa910 SQU. 270 + 271 + Say Y here if you enabled MMP ADMA, otherwise say N. 276 272 277 273 config DMA_ENGINE 278 274 bool
+3 -1
drivers/dma/Makefile
··· 14 14 obj-$(CONFIG_AT_HDMAC) += at_hdmac.o 15 15 obj-$(CONFIG_MX3_IPU) += ipu/ 16 16 obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o 17 - obj-$(CONFIG_SH_DMAE) += shdma.o 17 + obj-$(CONFIG_SH_DMAE) += sh/ 18 18 obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o 19 19 obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/ 20 20 obj-$(CONFIG_IMX_SDMA) += imx-sdma.o ··· 23 23 obj-$(CONFIG_TIMB_DMA) += timb_dma.o 24 24 obj-$(CONFIG_SIRF_DMA) += sirf-dma.o 25 25 obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o 26 + obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o 26 27 obj-$(CONFIG_PL330_DMA) += pl330.o 27 28 obj-$(CONFIG_PCH_DMA) += pch_dma.o 28 29 obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o 29 30 obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o 30 31 obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o 32 + obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o
+5 -6
drivers/dma/at_hdmac.c
··· 9 9 * (at your option) any later version. 10 10 * 11 11 * 12 - * This supports the Atmel AHB DMA Controller, 13 - * 14 - * The driver has currently been tested with the Atmel AT91SAM9RL 15 - * and AT91SAM9G45 series. 12 + * This supports the Atmel AHB DMA Controller found in several Atmel SoCs. 13 + * The only Atmel DMA Controller that is not covered by this driver is the one 14 + * found on AT91SAM9263. 16 15 */ 17 16 18 17 #include <linux/clk.h> ··· 1216 1217 } 1217 1218 }; 1218 1219 1219 - static inline struct at_dma_platform_data * __init at_dma_get_driver_data( 1220 + static inline const struct at_dma_platform_data * __init at_dma_get_driver_data( 1220 1221 struct platform_device *pdev) 1221 1222 { 1222 1223 if (pdev->dev.of_node) { ··· 1254 1255 int irq; 1255 1256 int err; 1256 1257 int i; 1257 - struct at_dma_platform_data *plat_dat; 1258 + const struct at_dma_platform_data *plat_dat; 1258 1259 1259 1260 /* setup platform data for each SoC */ 1260 1261 dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask);
+23 -47
drivers/dma/coh901318.c
··· 1438 1438 1439 1439 io = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1440 1440 if (!io) 1441 - goto err_get_resource; 1441 + return -ENODEV; 1442 1442 1443 1443 /* Map DMA controller registers to virtual memory */ 1444 - if (request_mem_region(io->start, 1445 - resource_size(io), 1446 - pdev->dev.driver->name) == NULL) { 1447 - err = -EBUSY; 1448 - goto err_request_mem; 1449 - } 1444 + if (devm_request_mem_region(&pdev->dev, 1445 + io->start, 1446 + resource_size(io), 1447 + pdev->dev.driver->name) == NULL) 1448 + return -ENOMEM; 1450 1449 1451 1450 pdata = pdev->dev.platform_data; 1452 1451 if (!pdata) 1453 - goto err_no_platformdata; 1452 + return -ENODEV; 1454 1453 1455 - base = kmalloc(ALIGN(sizeof(struct coh901318_base), 4) + 1456 - pdata->max_channels * 1457 - sizeof(struct coh901318_chan), 1458 - GFP_KERNEL); 1454 + base = devm_kzalloc(&pdev->dev, 1455 + ALIGN(sizeof(struct coh901318_base), 4) + 1456 + pdata->max_channels * 1457 + sizeof(struct coh901318_chan), 1458 + GFP_KERNEL); 1459 1459 if (!base) 1460 - goto err_alloc_coh_dma_channels; 1460 + return -ENOMEM; 1461 1461 1462 1462 base->chans = ((void *)base) + ALIGN(sizeof(struct coh901318_base), 4); 1463 1463 1464 - base->virtbase = ioremap(io->start, resource_size(io)); 1465 - if (!base->virtbase) { 1466 - err = -ENOMEM; 1467 - goto err_no_ioremap; 1468 - } 1464 + base->virtbase = devm_ioremap(&pdev->dev, io->start, resource_size(io)); 1465 + if (!base->virtbase) 1466 + return -ENOMEM; 1469 1467 1470 1468 base->dev = &pdev->dev; 1471 1469 base->platform = pdata; ··· 1472 1474 1473 1475 COH901318_DEBUGFS_ASSIGN(debugfs_dma_base, base); 1474 1476 1475 - platform_set_drvdata(pdev, base); 1476 - 1477 1477 irq = platform_get_irq(pdev, 0); 1478 1478 if (irq < 0) 1479 - goto err_no_irq; 1479 + return irq; 1480 1480 1481 - err = request_irq(irq, dma_irq_handler, IRQF_DISABLED, 1482 - "coh901318", base); 1483 - if (err) { 1484 - dev_crit(&pdev->dev, 1485 - "Cannot allocate IRQ for DMA controller!\n"); 1486 - goto err_request_irq; 1487 - } 1481 + err = devm_request_irq(&pdev->dev, irq, dma_irq_handler, IRQF_DISABLED, 1482 + "coh901318", base); 1483 + if (err) 1484 + return err; 1488 1485 1489 1486 err = coh901318_pool_create(&base->pool, &pdev->dev, 1490 1487 sizeof(struct coh901318_lli), 1491 1488 32); 1492 1489 if (err) 1493 - goto err_pool_create; 1490 + return err; 1494 1491 1495 1492 /* init channels for device transfers */ 1496 1493 coh901318_base_init(&base->dma_slave, base->platform->chans_slave, ··· 1531 1538 if (err) 1532 1539 goto err_register_memcpy; 1533 1540 1541 + platform_set_drvdata(pdev, base); 1534 1542 dev_info(&pdev->dev, "Initialized COH901318 DMA on virtual base 0x%08x\n", 1535 1543 (u32) base->virtbase); 1536 1544 ··· 1541 1547 dma_async_device_unregister(&base->dma_slave); 1542 1548 err_register_slave: 1543 1549 coh901318_pool_destroy(&base->pool); 1544 - err_pool_create: 1545 - free_irq(platform_get_irq(pdev, 0), base); 1546 - err_request_irq: 1547 - err_no_irq: 1548 - iounmap(base->virtbase); 1549 - err_no_ioremap: 1550 - kfree(base); 1551 - err_alloc_coh_dma_channels: 1552 - err_no_platformdata: 1553 - release_mem_region(pdev->resource->start, 1554 - resource_size(pdev->resource)); 1555 - err_request_mem: 1556 - err_get_resource: 1557 1550 return err; 1558 1551 } 1559 1552 ··· 1551 1570 dma_async_device_unregister(&base->dma_memcpy); 1552 1571 dma_async_device_unregister(&base->dma_slave); 1553 1572 coh901318_pool_destroy(&base->pool); 1554 - free_irq(platform_get_irq(pdev, 0), base); 1555 - iounmap(base->virtbase); 1556 - kfree(base); 1557 - release_mem_region(pdev->resource->start, 1558 - resource_size(pdev->resource)); 1559 1573 return 0; 1560 1574 } 1561 1575
+12 -8
drivers/dma/dmaengine.c
··· 45 45 * See Documentation/dmaengine.txt for more details 46 46 */ 47 47 48 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 49 + 48 50 #include <linux/dma-mapping.h> 49 51 #include <linux/init.h> 50 52 #include <linux/module.h> ··· 263 261 do { 264 262 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); 265 263 if (time_after_eq(jiffies, dma_sync_wait_timeout)) { 266 - printk(KERN_ERR "dma_sync_wait_timeout!\n"); 264 + pr_err("%s: timeout!\n", __func__); 267 265 return DMA_ERROR; 268 266 } 269 267 } while (status == DMA_IN_PROGRESS); ··· 314 312 } 315 313 316 314 if (err) { 317 - pr_err("dmaengine: initialization failure\n"); 315 + pr_err("initialization failure\n"); 318 316 for_each_dma_cap_mask(cap, dma_cap_mask_all) 319 317 if (channel_table[cap]) 320 318 free_percpu(channel_table[cap]); ··· 522 520 err = dma_chan_get(chan); 523 521 524 522 if (err == -ENODEV) { 525 - pr_debug("%s: %s module removed\n", __func__, 526 - dma_chan_name(chan)); 523 + pr_debug("%s: %s module removed\n", 524 + __func__, dma_chan_name(chan)); 527 525 list_del_rcu(&device->global_node); 528 526 } else if (err) 529 527 pr_debug("%s: failed to get %s: (%d)\n", 530 - __func__, dma_chan_name(chan), err); 528 + __func__, dma_chan_name(chan), err); 531 529 else 532 530 break; 533 531 if (--device->privatecnt == 0) ··· 537 535 } 538 536 mutex_unlock(&dma_list_mutex); 539 537 540 - pr_debug("%s: %s (%s)\n", __func__, chan ? "success" : "fail", 538 + pr_debug("%s: %s (%s)\n", 539 + __func__, 540 + chan ? "success" : "fail", 541 541 chan ? dma_chan_name(chan) : NULL); 542 542 543 543 return chan; ··· 583 579 break; 584 580 } else if (err) 585 581 pr_err("%s: failed to get %s: (%d)\n", 586 - __func__, dma_chan_name(chan), err); 582 + __func__, dma_chan_name(chan), err); 587 583 } 588 584 } 589 585 ··· 1019 1015 while (tx->cookie == -EBUSY) { 1020 1016 if (time_after_eq(jiffies, dma_sync_wait_timeout)) { 1021 1017 pr_err("%s timeout waiting for descriptor submission\n", 1022 - __func__); 1018 + __func__); 1023 1019 return DMA_ERROR; 1024 1020 } 1025 1021 cpu_relax();
+82 -100
drivers/dma/dw_dmac.c
··· 105 105 106 106 spin_lock_irqsave(&dwc->lock, flags); 107 107 list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) { 108 + i++; 108 109 if (async_tx_test_ack(&desc->txd)) { 109 110 list_del(&desc->desc_node); 110 111 ret = desc; 111 112 break; 112 113 } 113 114 dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc); 114 - i++; 115 115 } 116 116 spin_unlock_irqrestore(&dwc->lock, flags); 117 117 ··· 191 191 192 192 /*----------------------------------------------------------------------*/ 193 193 194 + static inline unsigned int dwc_fast_fls(unsigned long long v) 195 + { 196 + /* 197 + * We can be a lot more clever here, but this should take care 198 + * of the most common optimization. 199 + */ 200 + if (!(v & 7)) 201 + return 3; 202 + else if (!(v & 3)) 203 + return 2; 204 + else if (!(v & 1)) 205 + return 1; 206 + return 0; 207 + } 208 + 209 + static void dwc_dump_chan_regs(struct dw_dma_chan *dwc) 210 + { 211 + dev_err(chan2dev(&dwc->chan), 212 + " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", 213 + channel_readl(dwc, SAR), 214 + channel_readl(dwc, DAR), 215 + channel_readl(dwc, LLP), 216 + channel_readl(dwc, CTL_HI), 217 + channel_readl(dwc, CTL_LO)); 218 + } 219 + 220 + 221 + static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc) 222 + { 223 + channel_clear_bit(dw, CH_EN, dwc->mask); 224 + while (dma_readl(dw, CH_EN) & dwc->mask) 225 + cpu_relax(); 226 + } 227 + 228 + /*----------------------------------------------------------------------*/ 229 + 194 230 /* Called with dwc->lock held and bh disabled */ 195 231 static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) 196 232 { ··· 236 200 if (dma_readl(dw, CH_EN) & dwc->mask) { 237 201 dev_err(chan2dev(&dwc->chan), 238 202 "BUG: Attempted to start non-idle channel\n"); 239 - dev_err(chan2dev(&dwc->chan), 240 - " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", 241 - channel_readl(dwc, SAR), 242 - channel_readl(dwc, DAR), 243 - channel_readl(dwc, LLP), 244 - channel_readl(dwc, CTL_HI), 245 - channel_readl(dwc, CTL_LO)); 203 + dwc_dump_chan_regs(dwc); 246 204 247 205 /* The tasklet will hopefully advance the queue... */ 248 206 return; ··· 320 290 "BUG: XFER bit set, but channel not idle!\n"); 321 291 322 292 /* Try to continue after resetting the channel... */ 323 - channel_clear_bit(dw, CH_EN, dwc->mask); 324 - while (dma_readl(dw, CH_EN) & dwc->mask) 325 - cpu_relax(); 293 + dwc_chan_disable(dw, dwc); 326 294 } 327 295 328 296 /* ··· 365 337 return; 366 338 } 367 339 368 - dev_vdbg(chan2dev(&dwc->chan), "scan_descriptors: llp=0x%x\n", llp); 340 + dev_vdbg(chan2dev(&dwc->chan), "%s: llp=0x%llx\n", __func__, 341 + (unsigned long long)llp); 369 342 370 343 list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { 371 344 /* check first descriptors addr */ ··· 402 373 "BUG: All descriptors done, but channel not idle!\n"); 403 374 404 375 /* Try to continue after resetting the channel... */ 405 - channel_clear_bit(dw, CH_EN, dwc->mask); 406 - while (dma_readl(dw, CH_EN) & dwc->mask) 407 - cpu_relax(); 376 + dwc_chan_disable(dw, dwc); 408 377 409 378 if (!list_empty(&dwc->queue)) { 410 379 list_move(dwc->queue.next, &dwc->active_list); ··· 411 384 spin_unlock_irqrestore(&dwc->lock, flags); 412 385 } 413 386 414 - static void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli) 387 + static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_lli *lli) 415 388 { 416 389 dev_printk(KERN_CRIT, chan2dev(&dwc->chan), 417 390 " desc: s0x%x d0x%x l0x%x c0x%x:%x\n", 418 - lli->sar, lli->dar, lli->llp, 419 - lli->ctlhi, lli->ctllo); 391 + lli->sar, lli->dar, lli->llp, lli->ctlhi, lli->ctllo); 420 392 } 421 393 422 394 static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) ··· 513 487 514 488 spin_lock_irqsave(&dwc->lock, flags); 515 489 516 - dev_err(chan2dev(&dwc->chan), 517 - " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", 518 - channel_readl(dwc, SAR), 519 - channel_readl(dwc, DAR), 520 - channel_readl(dwc, LLP), 521 - channel_readl(dwc, CTL_HI), 522 - channel_readl(dwc, CTL_LO)); 490 + dwc_dump_chan_regs(dwc); 523 491 524 - channel_clear_bit(dw, CH_EN, dwc->mask); 525 - while (dma_readl(dw, CH_EN) & dwc->mask) 526 - cpu_relax(); 492 + dwc_chan_disable(dw, dwc); 527 493 528 494 /* make sure DMA does not restart by loading a new list */ 529 495 channel_writel(dwc, LLP, 0); ··· 545 527 status_xfer = dma_readl(dw, RAW.XFER); 546 528 status_err = dma_readl(dw, RAW.ERROR); 547 529 548 - dev_vdbg(dw->dma.dev, "tasklet: status_err=%x\n", status_err); 530 + dev_vdbg(dw->dma.dev, "%s: status_err=%x\n", __func__, status_err); 549 531 550 532 for (i = 0; i < dw->dma.chancnt; i++) { 551 533 dwc = &dw->chan[i]; ··· 569 551 struct dw_dma *dw = dev_id; 570 552 u32 status; 571 553 572 - dev_vdbg(dw->dma.dev, "interrupt: status=0x%x\n", 554 + dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__, 573 555 dma_readl(dw, STATUS_INT)); 574 556 575 557 /* ··· 615 597 * for DMA. But this is hard to do in a race-free manner. 616 598 */ 617 599 if (list_empty(&dwc->active_list)) { 618 - dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n", 600 + dev_vdbg(chan2dev(tx->chan), "%s: started %u\n", __func__, 619 601 desc->txd.cookie); 620 602 list_add_tail(&desc->desc_node, &dwc->active_list); 621 603 dwc_dostart(dwc, dwc_first_active(dwc)); 622 604 } else { 623 - dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n", 605 + dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__, 624 606 desc->txd.cookie); 625 607 626 608 list_add_tail(&desc->desc_node, &dwc->queue); ··· 645 627 unsigned int dst_width; 646 628 u32 ctllo; 647 629 648 - dev_vdbg(chan2dev(chan), "prep_dma_memcpy d0x%x s0x%x l0x%zx f0x%lx\n", 649 - dest, src, len, flags); 630 + dev_vdbg(chan2dev(chan), 631 + "%s: d0x%llx s0x%llx l0x%zx f0x%lx\n", __func__, 632 + (unsigned long long)dest, (unsigned long long)src, 633 + len, flags); 650 634 651 635 if (unlikely(!len)) { 652 - dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); 636 + dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__); 653 637 return NULL; 654 638 } 655 639 656 - /* 657 - * We can be a lot more clever here, but this should take care 658 - * of the most common optimization. 659 - */ 660 - if (!((src | dest | len) & 7)) 661 - src_width = dst_width = 3; 662 - else if (!((src | dest | len) & 3)) 663 - src_width = dst_width = 2; 664 - else if (!((src | dest | len) & 1)) 665 - src_width = dst_width = 1; 666 - else 667 - src_width = dst_width = 0; 640 + src_width = dst_width = dwc_fast_fls(src | dest | len); 668 641 669 642 ctllo = DWC_DEFAULT_CTLLO(chan) 670 643 | DWC_CTLL_DST_WIDTH(dst_width) ··· 729 720 struct scatterlist *sg; 730 721 size_t total_len = 0; 731 722 732 - dev_vdbg(chan2dev(chan), "prep_dma_slave\n"); 723 + dev_vdbg(chan2dev(chan), "%s\n", __func__); 733 724 734 725 if (unlikely(!dws || !sg_len)) 735 726 return NULL; ··· 755 746 mem = sg_dma_address(sg); 756 747 len = sg_dma_len(sg); 757 748 758 - if (!((mem | len) & 7)) 759 - mem_width = 3; 760 - else if (!((mem | len) & 3)) 761 - mem_width = 2; 762 - else if (!((mem | len) & 1)) 763 - mem_width = 1; 764 - else 765 - mem_width = 0; 749 + mem_width = dwc_fast_fls(mem | len); 766 750 767 751 slave_sg_todev_fill_desc: 768 752 desc = dwc_desc_get(dwc); ··· 815 813 mem = sg_dma_address(sg); 816 814 len = sg_dma_len(sg); 817 815 818 - if (!((mem | len) & 7)) 819 - mem_width = 3; 820 - else if (!((mem | len) & 3)) 821 - mem_width = 2; 822 - else if (!((mem | len) & 1)) 823 - mem_width = 1; 824 - else 825 - mem_width = 0; 816 + mem_width = dwc_fast_fls(mem | len); 826 817 827 818 slave_sg_fromdev_fill_desc: 828 819 desc = dwc_desc_get(dwc); ··· 945 950 } else if (cmd == DMA_TERMINATE_ALL) { 946 951 spin_lock_irqsave(&dwc->lock, flags); 947 952 948 - channel_clear_bit(dw, CH_EN, dwc->mask); 949 - while (dma_readl(dw, CH_EN) & dwc->mask) 950 - cpu_relax(); 953 + dwc_chan_disable(dw, dwc); 951 954 952 955 dwc->paused = false; 953 956 ··· 1007 1014 int i; 1008 1015 unsigned long flags; 1009 1016 1010 - dev_vdbg(chan2dev(chan), "alloc_chan_resources\n"); 1017 + dev_vdbg(chan2dev(chan), "%s\n", __func__); 1011 1018 1012 1019 /* ASSERT: channel is idle */ 1013 1020 if (dma_readl(dw, CH_EN) & dwc->mask) { ··· 1050 1057 1051 1058 spin_unlock_irqrestore(&dwc->lock, flags); 1052 1059 1053 - dev_dbg(chan2dev(chan), 1054 - "alloc_chan_resources allocated %d descriptors\n", i); 1060 + dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i); 1055 1061 1056 1062 return i; 1057 1063 } ··· 1063 1071 unsigned long flags; 1064 1072 LIST_HEAD(list); 1065 1073 1066 - dev_dbg(chan2dev(chan), "free_chan_resources (descs allocated=%u)\n", 1074 + dev_dbg(chan2dev(chan), "%s: descs allocated=%u\n", __func__, 1067 1075 dwc->descs_allocated); 1068 1076 1069 1077 /* ASSERT: channel is idle */ ··· 1089 1097 kfree(desc); 1090 1098 } 1091 1099 1092 - dev_vdbg(chan2dev(chan), "free_chan_resources done\n"); 1100 + dev_vdbg(chan2dev(chan), "%s: done\n", __func__); 1093 1101 } 1094 1102 1095 1103 /* --------------------- Cyclic DMA API extensions -------------------- */ ··· 1118 1126 if (dma_readl(dw, CH_EN) & dwc->mask) { 1119 1127 dev_err(chan2dev(&dwc->chan), 1120 1128 "BUG: Attempted to start non-idle channel\n"); 1121 - dev_err(chan2dev(&dwc->chan), 1122 - " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", 1123 - channel_readl(dwc, SAR), 1124 - channel_readl(dwc, DAR), 1125 - channel_readl(dwc, LLP), 1126 - channel_readl(dwc, CTL_HI), 1127 - channel_readl(dwc, CTL_LO)); 1129 + dwc_dump_chan_regs(dwc); 1128 1130 spin_unlock_irqrestore(&dwc->lock, flags); 1129 1131 return -EBUSY; 1130 1132 } ··· 1153 1167 1154 1168 spin_lock_irqsave(&dwc->lock, flags); 1155 1169 1156 - channel_clear_bit(dw, CH_EN, dwc->mask); 1157 - while (dma_readl(dw, CH_EN) & dwc->mask) 1158 - cpu_relax(); 1170 + dwc_chan_disable(dw, dwc); 1159 1171 1160 1172 spin_unlock_irqrestore(&dwc->lock, flags); 1161 1173 } ··· 1292 1308 dma_sync_single_for_device(chan2parent(chan), last->txd.phys, 1293 1309 sizeof(last->lli), DMA_TO_DEVICE); 1294 1310 1295 - dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%08x len %zu " 1296 - "period %zu periods %d\n", buf_addr, buf_len, 1297 - period_len, periods); 1311 + dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%llx len %zu " 1312 + "period %zu periods %d\n", (unsigned long long)buf_addr, 1313 + buf_len, period_len, periods); 1298 1314 1299 1315 cdesc->periods = periods; 1300 1316 dwc->cdesc = cdesc; ··· 1324 1340 int i; 1325 1341 unsigned long flags; 1326 1342 1327 - dev_dbg(chan2dev(&dwc->chan), "cyclic free\n"); 1343 + dev_dbg(chan2dev(&dwc->chan), "%s\n", __func__); 1328 1344 1329 1345 if (!cdesc) 1330 1346 return; 1331 1347 1332 1348 spin_lock_irqsave(&dwc->lock, flags); 1333 1349 1334 - channel_clear_bit(dw, CH_EN, dwc->mask); 1335 - while (dma_readl(dw, CH_EN) & dwc->mask) 1336 - cpu_relax(); 1350 + dwc_chan_disable(dw, dwc); 1337 1351 1338 1352 dma_writel(dw, CLEAR.ERROR, dwc->mask); 1339 1353 dma_writel(dw, CLEAR.XFER, dwc->mask); ··· 1368 1386 dw->chan[i].initialized = false; 1369 1387 } 1370 1388 1371 - static int __init dw_probe(struct platform_device *pdev) 1389 + static int __devinit dw_probe(struct platform_device *pdev) 1372 1390 { 1373 1391 struct dw_dma_platform_data *pdata; 1374 1392 struct resource *io; ··· 1414 1432 } 1415 1433 clk_prepare_enable(dw->clk); 1416 1434 1435 + /* Calculate all channel mask before DMA setup */ 1436 + dw->all_chan_mask = (1 << pdata->nr_channels) - 1; 1437 + 1417 1438 /* force dma off, just in case */ 1418 1439 dw_dma_off(dw); 1440 + 1441 + /* disable BLOCK interrupts as well */ 1442 + channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); 1419 1443 1420 1444 err = request_irq(irq, dw_dma_interrupt, 0, "dw_dmac", dw); 1421 1445 if (err) ··· 1430 1442 platform_set_drvdata(pdev, dw); 1431 1443 1432 1444 tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw); 1433 - 1434 - dw->all_chan_mask = (1 << pdata->nr_channels) - 1; 1435 1445 1436 1446 INIT_LIST_HEAD(&dw->dma.channels); 1437 1447 for (i = 0; i < pdata->nr_channels; i++) { ··· 1460 1474 channel_clear_bit(dw, CH_EN, dwc->mask); 1461 1475 } 1462 1476 1463 - /* Clear/disable all interrupts on all channels. */ 1477 + /* Clear all interrupts on all channels. */ 1464 1478 dma_writel(dw, CLEAR.XFER, dw->all_chan_mask); 1479 + dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask); 1465 1480 dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask); 1466 1481 dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask); 1467 1482 dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask); 1468 - 1469 - channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); 1470 - channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask); 1471 - channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask); 1472 - channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); 1473 1483 1474 1484 dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask); 1475 1485 dma_cap_set(DMA_SLAVE, dw->dma.cap_mask); ··· 1505 1523 return err; 1506 1524 } 1507 1525 1508 - static int __exit dw_remove(struct platform_device *pdev) 1526 + static int __devexit dw_remove(struct platform_device *pdev) 1509 1527 { 1510 1528 struct dw_dma *dw = platform_get_drvdata(pdev); 1511 1529 struct dw_dma_chan *dwc, *_dwc; ··· 1584 1602 #endif 1585 1603 1586 1604 static struct platform_driver dw_driver = { 1587 - .remove = __exit_p(dw_remove), 1605 + .remove = __devexit_p(dw_remove), 1588 1606 .shutdown = dw_shutdown, 1589 1607 .driver = { 1590 1608 .name = "dw_dmac",
+4 -4
drivers/dma/dw_dmac_regs.h
··· 82 82 DW_REG(ID); 83 83 DW_REG(TEST); 84 84 85 - /* optional encoded params, 0x3c8..0x3 */ 85 + /* optional encoded params, 0x3c8..0x3f7 */ 86 86 }; 87 87 88 88 /* Bitfields in CTL_LO */ ··· 219 219 /* LLI == Linked List Item; a.k.a. DMA block descriptor */ 220 220 struct dw_lli { 221 221 /* values that are not changed by hardware */ 222 - dma_addr_t sar; 223 - dma_addr_t dar; 224 - dma_addr_t llp; /* chain to next lli */ 222 + u32 sar; 223 + u32 dar; 224 + u32 llp; /* chain to next lli */ 225 225 u32 ctllo; 226 226 /* values that may get written back: */ 227 227 u32 ctlhi;
+610
drivers/dma/mmp_tdma.c
··· 1 + /* 2 + * Driver For Marvell Two-channel DMA Engine 3 + * 4 + * Copyright: Marvell International Ltd. 5 + * 6 + * The code contained herein is licensed under the GNU General Public 7 + * License. You may obtain a copy of the GNU General Public License 8 + * Version 2 or later at the following locations: 9 + * 10 + */ 11 + 12 + #include <linux/module.h> 13 + #include <linux/init.h> 14 + #include <linux/types.h> 15 + #include <linux/interrupt.h> 16 + #include <linux/dma-mapping.h> 17 + #include <linux/slab.h> 18 + #include <linux/dmaengine.h> 19 + #include <linux/platform_device.h> 20 + #include <linux/device.h> 21 + #include <mach/regs-icu.h> 22 + #include <mach/sram.h> 23 + 24 + #include "dmaengine.h" 25 + 26 + /* 27 + * Two-Channel DMA registers 28 + */ 29 + #define TDBCR 0x00 /* Byte Count */ 30 + #define TDSAR 0x10 /* Src Addr */ 31 + #define TDDAR 0x20 /* Dst Addr */ 32 + #define TDNDPR 0x30 /* Next Desc */ 33 + #define TDCR 0x40 /* Control */ 34 + #define TDCP 0x60 /* Priority*/ 35 + #define TDCDPR 0x70 /* Current Desc */ 36 + #define TDIMR 0x80 /* Int Mask */ 37 + #define TDISR 0xa0 /* Int Status */ 38 + 39 + /* Two-Channel DMA Control Register */ 40 + #define TDCR_SSZ_8_BITS (0x0 << 22) /* Sample Size */ 41 + #define TDCR_SSZ_12_BITS (0x1 << 22) 42 + #define TDCR_SSZ_16_BITS (0x2 << 22) 43 + #define TDCR_SSZ_20_BITS (0x3 << 22) 44 + #define TDCR_SSZ_24_BITS (0x4 << 22) 45 + #define TDCR_SSZ_32_BITS (0x5 << 22) 46 + #define TDCR_SSZ_SHIFT (0x1 << 22) 47 + #define TDCR_SSZ_MASK (0x7 << 22) 48 + #define TDCR_SSPMOD (0x1 << 21) /* SSP MOD */ 49 + #define TDCR_ABR (0x1 << 20) /* Channel Abort */ 50 + #define TDCR_CDE (0x1 << 17) /* Close Desc Enable */ 51 + #define TDCR_PACKMOD (0x1 << 16) /* Pack Mode (ADMA Only) */ 52 + #define TDCR_CHANACT (0x1 << 14) /* Channel Active */ 53 + #define TDCR_FETCHND (0x1 << 13) /* Fetch Next Desc */ 54 + #define TDCR_CHANEN (0x1 << 12) /* Channel Enable */ 55 + #define TDCR_INTMODE (0x1 << 10) /* Interrupt Mode */ 56 + #define TDCR_CHAINMOD (0x1 << 9) /* Chain Mode */ 57 + #define TDCR_BURSTSZ_MSK (0x7 << 6) /* Burst Size */ 58 + #define TDCR_BURSTSZ_4B (0x0 << 6) 59 + #define TDCR_BURSTSZ_8B (0x1 << 6) 60 + #define TDCR_BURSTSZ_16B (0x3 << 6) 61 + #define TDCR_BURSTSZ_32B (0x6 << 6) 62 + #define TDCR_BURSTSZ_64B (0x7 << 6) 63 + #define TDCR_BURSTSZ_SQU_32B (0x7 << 6) 64 + #define TDCR_BURSTSZ_128B (0x5 << 6) 65 + #define TDCR_DSTDIR_MSK (0x3 << 4) /* Dst Direction */ 66 + #define TDCR_DSTDIR_ADDR_HOLD (0x2 << 4) /* Dst Addr Hold */ 67 + #define TDCR_DSTDIR_ADDR_INC (0x0 << 4) /* Dst Addr Increment */ 68 + #define TDCR_SRCDIR_MSK (0x3 << 2) /* Src Direction */ 69 + #define TDCR_SRCDIR_ADDR_HOLD (0x2 << 2) /* Src Addr Hold */ 70 + #define TDCR_SRCDIR_ADDR_INC (0x0 << 2) /* Src Addr Increment */ 71 + #define TDCR_DSTDESCCONT (0x1 << 1) 72 + #define TDCR_SRCDESTCONT (0x1 << 0) 73 + 74 + /* Two-Channel DMA Int Mask Register */ 75 + #define TDIMR_COMP (0x1 << 0) 76 + 77 + /* Two-Channel DMA Int Status Register */ 78 + #define TDISR_COMP (0x1 << 0) 79 + 80 + /* 81 + * Two-Channel DMA Descriptor Struct 82 + * NOTE: desc's buf must be aligned to 16 bytes. 83 + */ 84 + struct mmp_tdma_desc { 85 + u32 byte_cnt; 86 + u32 src_addr; 87 + u32 dst_addr; 88 + u32 nxt_desc; 89 + }; 90 + 91 + enum mmp_tdma_type { 92 + MMP_AUD_TDMA = 0, 93 + PXA910_SQU, 94 + }; 95 + 96 + #define TDMA_ALIGNMENT 3 97 + #define TDMA_MAX_XFER_BYTES SZ_64K 98 + 99 + struct mmp_tdma_chan { 100 + struct device *dev; 101 + struct dma_chan chan; 102 + struct dma_async_tx_descriptor desc; 103 + struct tasklet_struct tasklet; 104 + 105 + struct mmp_tdma_desc *desc_arr; 106 + phys_addr_t desc_arr_phys; 107 + int desc_num; 108 + enum dma_transfer_direction dir; 109 + dma_addr_t dev_addr; 110 + u32 burst_sz; 111 + enum dma_slave_buswidth buswidth; 112 + enum dma_status status; 113 + 114 + int idx; 115 + enum mmp_tdma_type type; 116 + int irq; 117 + unsigned long reg_base; 118 + 119 + size_t buf_len; 120 + size_t period_len; 121 + size_t pos; 122 + }; 123 + 124 + #define TDMA_CHANNEL_NUM 2 125 + struct mmp_tdma_device { 126 + struct device *dev; 127 + void __iomem *base; 128 + struct dma_device device; 129 + struct mmp_tdma_chan *tdmac[TDMA_CHANNEL_NUM]; 130 + int irq; 131 + }; 132 + 133 + #define to_mmp_tdma_chan(dchan) container_of(dchan, struct mmp_tdma_chan, chan) 134 + 135 + static void mmp_tdma_chan_set_desc(struct mmp_tdma_chan *tdmac, dma_addr_t phys) 136 + { 137 + writel(phys, tdmac->reg_base + TDNDPR); 138 + writel(readl(tdmac->reg_base + TDCR) | TDCR_FETCHND, 139 + tdmac->reg_base + TDCR); 140 + } 141 + 142 + static void mmp_tdma_enable_chan(struct mmp_tdma_chan *tdmac) 143 + { 144 + /* enable irq */ 145 + writel(TDIMR_COMP, tdmac->reg_base + TDIMR); 146 + /* enable dma chan */ 147 + writel(readl(tdmac->reg_base + TDCR) | TDCR_CHANEN, 148 + tdmac->reg_base + TDCR); 149 + tdmac->status = DMA_IN_PROGRESS; 150 + } 151 + 152 + static void mmp_tdma_disable_chan(struct mmp_tdma_chan *tdmac) 153 + { 154 + writel(readl(tdmac->reg_base + TDCR) & ~TDCR_CHANEN, 155 + tdmac->reg_base + TDCR); 156 + tdmac->status = DMA_SUCCESS; 157 + } 158 + 159 + static void mmp_tdma_resume_chan(struct mmp_tdma_chan *tdmac) 160 + { 161 + writel(readl(tdmac->reg_base + TDCR) | TDCR_CHANEN, 162 + tdmac->reg_base + TDCR); 163 + tdmac->status = DMA_IN_PROGRESS; 164 + } 165 + 166 + static void mmp_tdma_pause_chan(struct mmp_tdma_chan *tdmac) 167 + { 168 + writel(readl(tdmac->reg_base + TDCR) & ~TDCR_CHANEN, 169 + tdmac->reg_base + TDCR); 170 + tdmac->status = DMA_PAUSED; 171 + } 172 + 173 + static int mmp_tdma_config_chan(struct mmp_tdma_chan *tdmac) 174 + { 175 + unsigned int tdcr; 176 + 177 + mmp_tdma_disable_chan(tdmac); 178 + 179 + if (tdmac->dir == DMA_MEM_TO_DEV) 180 + tdcr = TDCR_DSTDIR_ADDR_HOLD | TDCR_SRCDIR_ADDR_INC; 181 + else if (tdmac->dir == DMA_DEV_TO_MEM) 182 + tdcr = TDCR_SRCDIR_ADDR_HOLD | TDCR_DSTDIR_ADDR_INC; 183 + 184 + if (tdmac->type == MMP_AUD_TDMA) { 185 + tdcr |= TDCR_PACKMOD; 186 + 187 + switch (tdmac->burst_sz) { 188 + case 4: 189 + tdcr |= TDCR_BURSTSZ_4B; 190 + break; 191 + case 8: 192 + tdcr |= TDCR_BURSTSZ_8B; 193 + break; 194 + case 16: 195 + tdcr |= TDCR_BURSTSZ_16B; 196 + break; 197 + case 32: 198 + tdcr |= TDCR_BURSTSZ_32B; 199 + break; 200 + case 64: 201 + tdcr |= TDCR_BURSTSZ_64B; 202 + break; 203 + case 128: 204 + tdcr |= TDCR_BURSTSZ_128B; 205 + break; 206 + default: 207 + dev_err(tdmac->dev, "mmp_tdma: unknown burst size.\n"); 208 + return -EINVAL; 209 + } 210 + 211 + switch (tdmac->buswidth) { 212 + case DMA_SLAVE_BUSWIDTH_1_BYTE: 213 + tdcr |= TDCR_SSZ_8_BITS; 214 + break; 215 + case DMA_SLAVE_BUSWIDTH_2_BYTES: 216 + tdcr |= TDCR_SSZ_16_BITS; 217 + break; 218 + case DMA_SLAVE_BUSWIDTH_4_BYTES: 219 + tdcr |= TDCR_SSZ_32_BITS; 220 + break; 221 + default: 222 + dev_err(tdmac->dev, "mmp_tdma: unknown bus size.\n"); 223 + return -EINVAL; 224 + } 225 + } else if (tdmac->type == PXA910_SQU) { 226 + tdcr |= TDCR_BURSTSZ_SQU_32B; 227 + tdcr |= TDCR_SSPMOD; 228 + } 229 + 230 + writel(tdcr, tdmac->reg_base + TDCR); 231 + return 0; 232 + } 233 + 234 + static int mmp_tdma_clear_chan_irq(struct mmp_tdma_chan *tdmac) 235 + { 236 + u32 reg = readl(tdmac->reg_base + TDISR); 237 + 238 + if (reg & TDISR_COMP) { 239 + /* clear irq */ 240 + reg &= ~TDISR_COMP; 241 + writel(reg, tdmac->reg_base + TDISR); 242 + 243 + return 0; 244 + } 245 + return -EAGAIN; 246 + } 247 + 248 + static irqreturn_t mmp_tdma_chan_handler(int irq, void *dev_id) 249 + { 250 + struct mmp_tdma_chan *tdmac = dev_id; 251 + 252 + if (mmp_tdma_clear_chan_irq(tdmac) == 0) { 253 + tdmac->pos = (tdmac->pos + tdmac->period_len) % tdmac->buf_len; 254 + tasklet_schedule(&tdmac->tasklet); 255 + return IRQ_HANDLED; 256 + } else 257 + return IRQ_NONE; 258 + } 259 + 260 + static irqreturn_t mmp_tdma_int_handler(int irq, void *dev_id) 261 + { 262 + struct mmp_tdma_device *tdev = dev_id; 263 + int i, ret; 264 + int irq_num = 0; 265 + 266 + for (i = 0; i < TDMA_CHANNEL_NUM; i++) { 267 + struct mmp_tdma_chan *tdmac = tdev->tdmac[i]; 268 + 269 + ret = mmp_tdma_chan_handler(irq, tdmac); 270 + if (ret == IRQ_HANDLED) 271 + irq_num++; 272 + } 273 + 274 + if (irq_num) 275 + return IRQ_HANDLED; 276 + else 277 + return IRQ_NONE; 278 + } 279 + 280 + static void dma_do_tasklet(unsigned long data) 281 + { 282 + struct mmp_tdma_chan *tdmac = (struct mmp_tdma_chan *)data; 283 + 284 + if (tdmac->desc.callback) 285 + tdmac->desc.callback(tdmac->desc.callback_param); 286 + 287 + } 288 + 289 + static void mmp_tdma_free_descriptor(struct mmp_tdma_chan *tdmac) 290 + { 291 + struct gen_pool *gpool; 292 + int size = tdmac->desc_num * sizeof(struct mmp_tdma_desc); 293 + 294 + gpool = sram_get_gpool("asram"); 295 + if (tdmac->desc_arr) 296 + gen_pool_free(gpool, (unsigned long)tdmac->desc_arr, 297 + size); 298 + tdmac->desc_arr = NULL; 299 + 300 + return; 301 + } 302 + 303 + static dma_cookie_t mmp_tdma_tx_submit(struct dma_async_tx_descriptor *tx) 304 + { 305 + struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(tx->chan); 306 + 307 + mmp_tdma_chan_set_desc(tdmac, tdmac->desc_arr_phys); 308 + 309 + return 0; 310 + } 311 + 312 + static int mmp_tdma_alloc_chan_resources(struct dma_chan *chan) 313 + { 314 + struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); 315 + int ret; 316 + 317 + dma_async_tx_descriptor_init(&tdmac->desc, chan); 318 + tdmac->desc.tx_submit = mmp_tdma_tx_submit; 319 + 320 + if (tdmac->irq) { 321 + ret = devm_request_irq(tdmac->dev, tdmac->irq, 322 + mmp_tdma_chan_handler, IRQF_DISABLED, "tdma", tdmac); 323 + if (ret) 324 + return ret; 325 + } 326 + return 1; 327 + } 328 + 329 + static void mmp_tdma_free_chan_resources(struct dma_chan *chan) 330 + { 331 + struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); 332 + 333 + if (tdmac->irq) 334 + devm_free_irq(tdmac->dev, tdmac->irq, tdmac); 335 + mmp_tdma_free_descriptor(tdmac); 336 + return; 337 + } 338 + 339 + struct mmp_tdma_desc *mmp_tdma_alloc_descriptor(struct mmp_tdma_chan *tdmac) 340 + { 341 + struct gen_pool *gpool; 342 + int size = tdmac->desc_num * sizeof(struct mmp_tdma_desc); 343 + 344 + gpool = sram_get_gpool("asram"); 345 + if (!gpool) 346 + return NULL; 347 + 348 + tdmac->desc_arr = (void *)gen_pool_alloc(gpool, size); 349 + if (!tdmac->desc_arr) 350 + return NULL; 351 + 352 + tdmac->desc_arr_phys = gen_pool_virt_to_phys(gpool, 353 + (unsigned long)tdmac->desc_arr); 354 + 355 + return tdmac->desc_arr; 356 + } 357 + 358 + static struct dma_async_tx_descriptor *mmp_tdma_prep_dma_cyclic( 359 + struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, 360 + size_t period_len, enum dma_transfer_direction direction, 361 + void *context) 362 + { 363 + struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); 364 + struct mmp_tdma_desc *desc; 365 + int num_periods = buf_len / period_len; 366 + int i = 0, buf = 0; 367 + 368 + if (tdmac->status != DMA_SUCCESS) 369 + return NULL; 370 + 371 + if (period_len > TDMA_MAX_XFER_BYTES) { 372 + dev_err(tdmac->dev, 373 + "maximum period size exceeded: %d > %d\n", 374 + period_len, TDMA_MAX_XFER_BYTES); 375 + goto err_out; 376 + } 377 + 378 + tdmac->status = DMA_IN_PROGRESS; 379 + tdmac->desc_num = num_periods; 380 + desc = mmp_tdma_alloc_descriptor(tdmac); 381 + if (!desc) 382 + goto err_out; 383 + 384 + while (buf < buf_len) { 385 + desc = &tdmac->desc_arr[i]; 386 + 387 + if (i + 1 == num_periods) 388 + desc->nxt_desc = tdmac->desc_arr_phys; 389 + else 390 + desc->nxt_desc = tdmac->desc_arr_phys + 391 + sizeof(*desc) * (i + 1); 392 + 393 + if (direction == DMA_MEM_TO_DEV) { 394 + desc->src_addr = dma_addr; 395 + desc->dst_addr = tdmac->dev_addr; 396 + } else { 397 + desc->src_addr = tdmac->dev_addr; 398 + desc->dst_addr = dma_addr; 399 + } 400 + desc->byte_cnt = period_len; 401 + dma_addr += period_len; 402 + buf += period_len; 403 + i++; 404 + } 405 + 406 + tdmac->buf_len = buf_len; 407 + tdmac->period_len = period_len; 408 + tdmac->pos = 0; 409 + 410 + return &tdmac->desc; 411 + 412 + err_out: 413 + tdmac->status = DMA_ERROR; 414 + return NULL; 415 + } 416 + 417 + static int mmp_tdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 418 + unsigned long arg) 419 + { 420 + struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); 421 + struct dma_slave_config *dmaengine_cfg = (void *)arg; 422 + int ret = 0; 423 + 424 + switch (cmd) { 425 + case DMA_TERMINATE_ALL: 426 + mmp_tdma_disable_chan(tdmac); 427 + break; 428 + case DMA_PAUSE: 429 + mmp_tdma_pause_chan(tdmac); 430 + break; 431 + case DMA_RESUME: 432 + mmp_tdma_resume_chan(tdmac); 433 + break; 434 + case DMA_SLAVE_CONFIG: 435 + if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) { 436 + tdmac->dev_addr = dmaengine_cfg->src_addr; 437 + tdmac->burst_sz = dmaengine_cfg->src_maxburst; 438 + tdmac->buswidth = dmaengine_cfg->src_addr_width; 439 + } else { 440 + tdmac->dev_addr = dmaengine_cfg->dst_addr; 441 + tdmac->burst_sz = dmaengine_cfg->dst_maxburst; 442 + tdmac->buswidth = dmaengine_cfg->dst_addr_width; 443 + } 444 + tdmac->dir = dmaengine_cfg->direction; 445 + return mmp_tdma_config_chan(tdmac); 446 + default: 447 + ret = -ENOSYS; 448 + } 449 + 450 + return ret; 451 + } 452 + 453 + static enum dma_status mmp_tdma_tx_status(struct dma_chan *chan, 454 + dma_cookie_t cookie, struct dma_tx_state *txstate) 455 + { 456 + struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); 457 + 458 + dma_set_residue(txstate, tdmac->buf_len - tdmac->pos); 459 + 460 + return tdmac->status; 461 + } 462 + 463 + static void mmp_tdma_issue_pending(struct dma_chan *chan) 464 + { 465 + struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan); 466 + 467 + mmp_tdma_enable_chan(tdmac); 468 + } 469 + 470 + static int __devexit mmp_tdma_remove(struct platform_device *pdev) 471 + { 472 + struct mmp_tdma_device *tdev = platform_get_drvdata(pdev); 473 + 474 + dma_async_device_unregister(&tdev->device); 475 + return 0; 476 + } 477 + 478 + static int __devinit mmp_tdma_chan_init(struct mmp_tdma_device *tdev, 479 + int idx, int irq, int type) 480 + { 481 + struct mmp_tdma_chan *tdmac; 482 + 483 + if (idx >= TDMA_CHANNEL_NUM) { 484 + dev_err(tdev->dev, "too many channels for device!\n"); 485 + return -EINVAL; 486 + } 487 + 488 + /* alloc channel */ 489 + tdmac = devm_kzalloc(tdev->dev, sizeof(*tdmac), GFP_KERNEL); 490 + if (!tdmac) { 491 + dev_err(tdev->dev, "no free memory for DMA channels!\n"); 492 + return -ENOMEM; 493 + } 494 + if (irq) 495 + tdmac->irq = irq + idx; 496 + tdmac->dev = tdev->dev; 497 + tdmac->chan.device = &tdev->device; 498 + tdmac->idx = idx; 499 + tdmac->type = type; 500 + tdmac->reg_base = (unsigned long)tdev->base + idx * 4; 501 + tdmac->status = DMA_SUCCESS; 502 + tdev->tdmac[tdmac->idx] = tdmac; 503 + tasklet_init(&tdmac->tasklet, dma_do_tasklet, (unsigned long)tdmac); 504 + 505 + /* add the channel to tdma_chan list */ 506 + list_add_tail(&tdmac->chan.device_node, 507 + &tdev->device.channels); 508 + 509 + return 0; 510 + } 511 + 512 + static int __devinit mmp_tdma_probe(struct platform_device *pdev) 513 + { 514 + const struct platform_device_id *id = platform_get_device_id(pdev); 515 + enum mmp_tdma_type type = id->driver_data; 516 + struct mmp_tdma_device *tdev; 517 + struct resource *iores; 518 + int i, ret; 519 + int irq = 0; 520 + int chan_num = TDMA_CHANNEL_NUM; 521 + 522 + /* always have couple channels */ 523 + tdev = devm_kzalloc(&pdev->dev, sizeof(*tdev), GFP_KERNEL); 524 + if (!tdev) 525 + return -ENOMEM; 526 + 527 + tdev->dev = &pdev->dev; 528 + iores = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 529 + if (!iores) 530 + return -EINVAL; 531 + 532 + if (resource_size(iores) != chan_num) 533 + tdev->irq = iores->start; 534 + else 535 + irq = iores->start; 536 + 537 + iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); 538 + if (!iores) 539 + return -EINVAL; 540 + 541 + tdev->base = devm_request_and_ioremap(&pdev->dev, iores); 542 + if (!tdev->base) 543 + return -EADDRNOTAVAIL; 544 + 545 + if (tdev->irq) { 546 + ret = devm_request_irq(&pdev->dev, tdev->irq, 547 + mmp_tdma_int_handler, IRQF_DISABLED, "tdma", tdev); 548 + if (ret) 549 + return ret; 550 + } 551 + 552 + dma_cap_set(DMA_SLAVE, tdev->device.cap_mask); 553 + dma_cap_set(DMA_CYCLIC, tdev->device.cap_mask); 554 + 555 + INIT_LIST_HEAD(&tdev->device.channels); 556 + 557 + /* initialize channel parameters */ 558 + for (i = 0; i < chan_num; i++) { 559 + ret = mmp_tdma_chan_init(tdev, i, irq, type); 560 + if (ret) 561 + return ret; 562 + } 563 + 564 + tdev->device.dev = &pdev->dev; 565 + tdev->device.device_alloc_chan_resources = 566 + mmp_tdma_alloc_chan_resources; 567 + tdev->device.device_free_chan_resources = 568 + mmp_tdma_free_chan_resources; 569 + tdev->device.device_prep_dma_cyclic = mmp_tdma_prep_dma_cyclic; 570 + tdev->device.device_tx_status = mmp_tdma_tx_status; 571 + tdev->device.device_issue_pending = mmp_tdma_issue_pending; 572 + tdev->device.device_control = mmp_tdma_control; 573 + tdev->device.copy_align = TDMA_ALIGNMENT; 574 + 575 + dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); 576 + platform_set_drvdata(pdev, tdev); 577 + 578 + ret = dma_async_device_register(&tdev->device); 579 + if (ret) { 580 + dev_err(tdev->device.dev, "unable to register\n"); 581 + return ret; 582 + } 583 + 584 + dev_info(tdev->device.dev, "initialized\n"); 585 + return 0; 586 + } 587 + 588 + static const struct platform_device_id mmp_tdma_id_table[] = { 589 + { "mmp-adma", MMP_AUD_TDMA }, 590 + { "pxa910-squ", PXA910_SQU }, 591 + { }, 592 + }; 593 + 594 + static struct platform_driver mmp_tdma_driver = { 595 + .driver = { 596 + .name = "mmp-tdma", 597 + .owner = THIS_MODULE, 598 + }, 599 + .id_table = mmp_tdma_id_table, 600 + .probe = mmp_tdma_probe, 601 + .remove = __devexit_p(mmp_tdma_remove), 602 + }; 603 + 604 + module_platform_driver(mmp_tdma_driver); 605 + 606 + MODULE_LICENSE("GPL"); 607 + MODULE_DESCRIPTION("MMP Two-Channel DMA Driver"); 608 + MODULE_ALIAS("platform:mmp-tdma"); 609 + MODULE_AUTHOR("Leo Yan <leoy@marvell.com>"); 610 + MODULE_AUTHOR("Zhangfei Gao <zhangfei.gao@marvell.com>");
+2 -1
drivers/dma/mxs-dma.c
··· 29 29 #include <linux/of_device.h> 30 30 31 31 #include <asm/irq.h> 32 - #include <mach/mxs.h> 33 32 34 33 #include "dmaengine.h" 35 34 ··· 200 201 201 202 return dma_is_apbh(mxs_dma); 202 203 } 204 + EXPORT_SYMBOL_GPL(mxs_dma_is_apbh); 203 205 204 206 int mxs_dma_is_apbx(struct dma_chan *chan) 205 207 { ··· 209 209 210 210 return !dma_is_apbh(mxs_dma); 211 211 } 212 + EXPORT_SYMBOL_GPL(mxs_dma_is_apbx); 212 213 213 214 static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan) 214 215 {
+2
drivers/dma/sh/Makefile
··· 1 + obj-$(CONFIG_SH_DMAE) += shdma-base.o 2 + obj-$(CONFIG_SH_DMAE) += shdma.o
+934
drivers/dma/sh/shdma-base.c
··· 1 + /* 2 + * Dmaengine driver base library for DMA controllers, found on SH-based SoCs 3 + * 4 + * extracted from shdma.c 5 + * 6 + * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de> 7 + * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com> 8 + * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved. 9 + * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. 10 + * 11 + * This is free software; you can redistribute it and/or modify 12 + * it under the terms of version 2 of the GNU General Public License as 13 + * published by the Free Software Foundation. 14 + */ 15 + 16 + #include <linux/delay.h> 17 + #include <linux/shdma-base.h> 18 + #include <linux/dmaengine.h> 19 + #include <linux/init.h> 20 + #include <linux/interrupt.h> 21 + #include <linux/module.h> 22 + #include <linux/pm_runtime.h> 23 + #include <linux/slab.h> 24 + #include <linux/spinlock.h> 25 + 26 + #include "../dmaengine.h" 27 + 28 + /* DMA descriptor control */ 29 + enum shdma_desc_status { 30 + DESC_IDLE, 31 + DESC_PREPARED, 32 + DESC_SUBMITTED, 33 + DESC_COMPLETED, /* completed, have to call callback */ 34 + DESC_WAITING, /* callback called, waiting for ack / re-submit */ 35 + }; 36 + 37 + #define NR_DESCS_PER_CHANNEL 32 38 + 39 + #define to_shdma_chan(c) container_of(c, struct shdma_chan, dma_chan) 40 + #define to_shdma_dev(d) container_of(d, struct shdma_dev, dma_dev) 41 + 42 + /* 43 + * For slave DMA we assume, that there is a finite number of DMA slaves in the 44 + * system, and that each such slave can only use a finite number of channels. 45 + * We use slave channel IDs to make sure, that no such slave channel ID is 46 + * allocated more than once. 47 + */ 48 + static unsigned int slave_num = 256; 49 + module_param(slave_num, uint, 0444); 50 + 51 + /* A bitmask with slave_num bits */ 52 + static unsigned long *shdma_slave_used; 53 + 54 + /* Called under spin_lock_irq(&schan->chan_lock") */ 55 + static void shdma_chan_xfer_ld_queue(struct shdma_chan *schan) 56 + { 57 + struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); 58 + const struct shdma_ops *ops = sdev->ops; 59 + struct shdma_desc *sdesc; 60 + 61 + /* DMA work check */ 62 + if (ops->channel_busy(schan)) 63 + return; 64 + 65 + /* Find the first not transferred descriptor */ 66 + list_for_each_entry(sdesc, &schan->ld_queue, node) 67 + if (sdesc->mark == DESC_SUBMITTED) { 68 + ops->start_xfer(schan, sdesc); 69 + break; 70 + } 71 + } 72 + 73 + static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx) 74 + { 75 + struct shdma_desc *chunk, *c, *desc = 76 + container_of(tx, struct shdma_desc, async_tx), 77 + *last = desc; 78 + struct shdma_chan *schan = to_shdma_chan(tx->chan); 79 + dma_async_tx_callback callback = tx->callback; 80 + dma_cookie_t cookie; 81 + bool power_up; 82 + 83 + spin_lock_irq(&schan->chan_lock); 84 + 85 + power_up = list_empty(&schan->ld_queue); 86 + 87 + cookie = dma_cookie_assign(tx); 88 + 89 + /* Mark all chunks of this descriptor as submitted, move to the queue */ 90 + list_for_each_entry_safe(chunk, c, desc->node.prev, node) { 91 + /* 92 + * All chunks are on the global ld_free, so, we have to find 93 + * the end of the chain ourselves 94 + */ 95 + if (chunk != desc && (chunk->mark == DESC_IDLE || 96 + chunk->async_tx.cookie > 0 || 97 + chunk->async_tx.cookie == -EBUSY || 98 + &chunk->node == &schan->ld_free)) 99 + break; 100 + chunk->mark = DESC_SUBMITTED; 101 + /* Callback goes to the last chunk */ 102 + chunk->async_tx.callback = NULL; 103 + chunk->cookie = cookie; 104 + list_move_tail(&chunk->node, &schan->ld_queue); 105 + last = chunk; 106 + 107 + dev_dbg(schan->dev, "submit #%d@%p on %d\n", 108 + tx->cookie, &last->async_tx, schan->id); 109 + } 110 + 111 + last->async_tx.callback = callback; 112 + last->async_tx.callback_param = tx->callback_param; 113 + 114 + if (power_up) { 115 + int ret; 116 + schan->pm_state = SHDMA_PM_BUSY; 117 + 118 + ret = pm_runtime_get(schan->dev); 119 + 120 + spin_unlock_irq(&schan->chan_lock); 121 + if (ret < 0) 122 + dev_err(schan->dev, "%s(): GET = %d\n", __func__, ret); 123 + 124 + pm_runtime_barrier(schan->dev); 125 + 126 + spin_lock_irq(&schan->chan_lock); 127 + 128 + /* Have we been reset, while waiting? */ 129 + if (schan->pm_state != SHDMA_PM_ESTABLISHED) { 130 + struct shdma_dev *sdev = 131 + to_shdma_dev(schan->dma_chan.device); 132 + const struct shdma_ops *ops = sdev->ops; 133 + dev_dbg(schan->dev, "Bring up channel %d\n", 134 + schan->id); 135 + /* 136 + * TODO: .xfer_setup() might fail on some platforms. 137 + * Make it int then, on error remove chunks from the 138 + * queue again 139 + */ 140 + ops->setup_xfer(schan, schan->slave_id); 141 + 142 + if (schan->pm_state == SHDMA_PM_PENDING) 143 + shdma_chan_xfer_ld_queue(schan); 144 + schan->pm_state = SHDMA_PM_ESTABLISHED; 145 + } 146 + } else { 147 + /* 148 + * Tell .device_issue_pending() not to run the queue, interrupts 149 + * will do it anyway 150 + */ 151 + schan->pm_state = SHDMA_PM_PENDING; 152 + } 153 + 154 + spin_unlock_irq(&schan->chan_lock); 155 + 156 + return cookie; 157 + } 158 + 159 + /* Called with desc_lock held */ 160 + static struct shdma_desc *shdma_get_desc(struct shdma_chan *schan) 161 + { 162 + struct shdma_desc *sdesc; 163 + 164 + list_for_each_entry(sdesc, &schan->ld_free, node) 165 + if (sdesc->mark != DESC_PREPARED) { 166 + BUG_ON(sdesc->mark != DESC_IDLE); 167 + list_del(&sdesc->node); 168 + return sdesc; 169 + } 170 + 171 + return NULL; 172 + } 173 + 174 + static int shdma_setup_slave(struct shdma_chan *schan, int slave_id) 175 + { 176 + struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); 177 + const struct shdma_ops *ops = sdev->ops; 178 + int ret; 179 + 180 + if (slave_id < 0 || slave_id >= slave_num) 181 + return -EINVAL; 182 + 183 + if (test_and_set_bit(slave_id, shdma_slave_used)) 184 + return -EBUSY; 185 + 186 + ret = ops->set_slave(schan, slave_id, false); 187 + if (ret < 0) { 188 + clear_bit(slave_id, shdma_slave_used); 189 + return ret; 190 + } 191 + 192 + schan->slave_id = slave_id; 193 + 194 + return 0; 195 + } 196 + 197 + /* 198 + * This is the standard shdma filter function to be used as a replacement to the 199 + * "old" method, using the .private pointer. If for some reason you allocate a 200 + * channel without slave data, use something like ERR_PTR(-EINVAL) as a filter 201 + * parameter. If this filter is used, the slave driver, after calling 202 + * dma_request_channel(), will also have to call dmaengine_slave_config() with 203 + * .slave_id, .direction, and either .src_addr or .dst_addr set. 204 + * NOTE: this filter doesn't support multiple DMAC drivers with the DMA_SLAVE 205 + * capability! If this becomes a requirement, hardware glue drivers, using this 206 + * services would have to provide their own filters, which first would check 207 + * the device driver, similar to how other DMAC drivers, e.g., sa11x0-dma.c, do 208 + * this, and only then, in case of a match, call this common filter. 209 + */ 210 + bool shdma_chan_filter(struct dma_chan *chan, void *arg) 211 + { 212 + struct shdma_chan *schan = to_shdma_chan(chan); 213 + struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); 214 + const struct shdma_ops *ops = sdev->ops; 215 + int slave_id = (int)arg; 216 + int ret; 217 + 218 + if (slave_id < 0) 219 + /* No slave requested - arbitrary channel */ 220 + return true; 221 + 222 + if (slave_id >= slave_num) 223 + return false; 224 + 225 + ret = ops->set_slave(schan, slave_id, true); 226 + if (ret < 0) 227 + return false; 228 + 229 + return true; 230 + } 231 + EXPORT_SYMBOL(shdma_chan_filter); 232 + 233 + static int shdma_alloc_chan_resources(struct dma_chan *chan) 234 + { 235 + struct shdma_chan *schan = to_shdma_chan(chan); 236 + struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); 237 + const struct shdma_ops *ops = sdev->ops; 238 + struct shdma_desc *desc; 239 + struct shdma_slave *slave = chan->private; 240 + int ret, i; 241 + 242 + /* 243 + * This relies on the guarantee from dmaengine that alloc_chan_resources 244 + * never runs concurrently with itself or free_chan_resources. 245 + */ 246 + if (slave) { 247 + /* Legacy mode: .private is set in filter */ 248 + ret = shdma_setup_slave(schan, slave->slave_id); 249 + if (ret < 0) 250 + goto esetslave; 251 + } else { 252 + schan->slave_id = -EINVAL; 253 + } 254 + 255 + schan->desc = kcalloc(NR_DESCS_PER_CHANNEL, 256 + sdev->desc_size, GFP_KERNEL); 257 + if (!schan->desc) { 258 + ret = -ENOMEM; 259 + goto edescalloc; 260 + } 261 + schan->desc_num = NR_DESCS_PER_CHANNEL; 262 + 263 + for (i = 0; i < NR_DESCS_PER_CHANNEL; i++) { 264 + desc = ops->embedded_desc(schan->desc, i); 265 + dma_async_tx_descriptor_init(&desc->async_tx, 266 + &schan->dma_chan); 267 + desc->async_tx.tx_submit = shdma_tx_submit; 268 + desc->mark = DESC_IDLE; 269 + 270 + list_add(&desc->node, &schan->ld_free); 271 + } 272 + 273 + return NR_DESCS_PER_CHANNEL; 274 + 275 + edescalloc: 276 + if (slave) 277 + esetslave: 278 + clear_bit(slave->slave_id, shdma_slave_used); 279 + chan->private = NULL; 280 + return ret; 281 + } 282 + 283 + static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all) 284 + { 285 + struct shdma_desc *desc, *_desc; 286 + /* Is the "exposed" head of a chain acked? */ 287 + bool head_acked = false; 288 + dma_cookie_t cookie = 0; 289 + dma_async_tx_callback callback = NULL; 290 + void *param = NULL; 291 + unsigned long flags; 292 + 293 + spin_lock_irqsave(&schan->chan_lock, flags); 294 + list_for_each_entry_safe(desc, _desc, &schan->ld_queue, node) { 295 + struct dma_async_tx_descriptor *tx = &desc->async_tx; 296 + 297 + BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie); 298 + BUG_ON(desc->mark != DESC_SUBMITTED && 299 + desc->mark != DESC_COMPLETED && 300 + desc->mark != DESC_WAITING); 301 + 302 + /* 303 + * queue is ordered, and we use this loop to (1) clean up all 304 + * completed descriptors, and to (2) update descriptor flags of 305 + * any chunks in a (partially) completed chain 306 + */ 307 + if (!all && desc->mark == DESC_SUBMITTED && 308 + desc->cookie != cookie) 309 + break; 310 + 311 + if (tx->cookie > 0) 312 + cookie = tx->cookie; 313 + 314 + if (desc->mark == DESC_COMPLETED && desc->chunks == 1) { 315 + if (schan->dma_chan.completed_cookie != desc->cookie - 1) 316 + dev_dbg(schan->dev, 317 + "Completing cookie %d, expected %d\n", 318 + desc->cookie, 319 + schan->dma_chan.completed_cookie + 1); 320 + schan->dma_chan.completed_cookie = desc->cookie; 321 + } 322 + 323 + /* Call callback on the last chunk */ 324 + if (desc->mark == DESC_COMPLETED && tx->callback) { 325 + desc->mark = DESC_WAITING; 326 + callback = tx->callback; 327 + param = tx->callback_param; 328 + dev_dbg(schan->dev, "descriptor #%d@%p on %d callback\n", 329 + tx->cookie, tx, schan->id); 330 + BUG_ON(desc->chunks != 1); 331 + break; 332 + } 333 + 334 + if (tx->cookie > 0 || tx->cookie == -EBUSY) { 335 + if (desc->mark == DESC_COMPLETED) { 336 + BUG_ON(tx->cookie < 0); 337 + desc->mark = DESC_WAITING; 338 + } 339 + head_acked = async_tx_test_ack(tx); 340 + } else { 341 + switch (desc->mark) { 342 + case DESC_COMPLETED: 343 + desc->mark = DESC_WAITING; 344 + /* Fall through */ 345 + case DESC_WAITING: 346 + if (head_acked) 347 + async_tx_ack(&desc->async_tx); 348 + } 349 + } 350 + 351 + dev_dbg(schan->dev, "descriptor %p #%d completed.\n", 352 + tx, tx->cookie); 353 + 354 + if (((desc->mark == DESC_COMPLETED || 355 + desc->mark == DESC_WAITING) && 356 + async_tx_test_ack(&desc->async_tx)) || all) { 357 + /* Remove from ld_queue list */ 358 + desc->mark = DESC_IDLE; 359 + 360 + list_move(&desc->node, &schan->ld_free); 361 + 362 + if (list_empty(&schan->ld_queue)) { 363 + dev_dbg(schan->dev, "Bring down channel %d\n", schan->id); 364 + pm_runtime_put(schan->dev); 365 + schan->pm_state = SHDMA_PM_ESTABLISHED; 366 + } 367 + } 368 + } 369 + 370 + if (all && !callback) 371 + /* 372 + * Terminating and the loop completed normally: forgive 373 + * uncompleted cookies 374 + */ 375 + schan->dma_chan.completed_cookie = schan->dma_chan.cookie; 376 + 377 + spin_unlock_irqrestore(&schan->chan_lock, flags); 378 + 379 + if (callback) 380 + callback(param); 381 + 382 + return callback; 383 + } 384 + 385 + /* 386 + * shdma_chan_ld_cleanup - Clean up link descriptors 387 + * 388 + * Clean up the ld_queue of DMA channel. 389 + */ 390 + static void shdma_chan_ld_cleanup(struct shdma_chan *schan, bool all) 391 + { 392 + while (__ld_cleanup(schan, all)) 393 + ; 394 + } 395 + 396 + /* 397 + * shdma_free_chan_resources - Free all resources of the channel. 398 + */ 399 + static void shdma_free_chan_resources(struct dma_chan *chan) 400 + { 401 + struct shdma_chan *schan = to_shdma_chan(chan); 402 + struct shdma_dev *sdev = to_shdma_dev(chan->device); 403 + const struct shdma_ops *ops = sdev->ops; 404 + LIST_HEAD(list); 405 + 406 + /* Protect against ISR */ 407 + spin_lock_irq(&schan->chan_lock); 408 + ops->halt_channel(schan); 409 + spin_unlock_irq(&schan->chan_lock); 410 + 411 + /* Now no new interrupts will occur */ 412 + 413 + /* Prepared and not submitted descriptors can still be on the queue */ 414 + if (!list_empty(&schan->ld_queue)) 415 + shdma_chan_ld_cleanup(schan, true); 416 + 417 + if (schan->slave_id >= 0) { 418 + /* The caller is holding dma_list_mutex */ 419 + clear_bit(schan->slave_id, shdma_slave_used); 420 + chan->private = NULL; 421 + } 422 + 423 + spin_lock_irq(&schan->chan_lock); 424 + 425 + list_splice_init(&schan->ld_free, &list); 426 + schan->desc_num = 0; 427 + 428 + spin_unlock_irq(&schan->chan_lock); 429 + 430 + kfree(schan->desc); 431 + } 432 + 433 + /** 434 + * shdma_add_desc - get, set up and return one transfer descriptor 435 + * @schan: DMA channel 436 + * @flags: DMA transfer flags 437 + * @dst: destination DMA address, incremented when direction equals 438 + * DMA_DEV_TO_MEM or DMA_MEM_TO_MEM 439 + * @src: source DMA address, incremented when direction equals 440 + * DMA_MEM_TO_DEV or DMA_MEM_TO_MEM 441 + * @len: DMA transfer length 442 + * @first: if NULL, set to the current descriptor and cookie set to -EBUSY 443 + * @direction: needed for slave DMA to decide which address to keep constant, 444 + * equals DMA_MEM_TO_MEM for MEMCPY 445 + * Returns 0 or an error 446 + * Locks: called with desc_lock held 447 + */ 448 + static struct shdma_desc *shdma_add_desc(struct shdma_chan *schan, 449 + unsigned long flags, dma_addr_t *dst, dma_addr_t *src, size_t *len, 450 + struct shdma_desc **first, enum dma_transfer_direction direction) 451 + { 452 + struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); 453 + const struct shdma_ops *ops = sdev->ops; 454 + struct shdma_desc *new; 455 + size_t copy_size = *len; 456 + 457 + if (!copy_size) 458 + return NULL; 459 + 460 + /* Allocate the link descriptor from the free list */ 461 + new = shdma_get_desc(schan); 462 + if (!new) { 463 + dev_err(schan->dev, "No free link descriptor available\n"); 464 + return NULL; 465 + } 466 + 467 + ops->desc_setup(schan, new, *src, *dst, &copy_size); 468 + 469 + if (!*first) { 470 + /* First desc */ 471 + new->async_tx.cookie = -EBUSY; 472 + *first = new; 473 + } else { 474 + /* Other desc - invisible to the user */ 475 + new->async_tx.cookie = -EINVAL; 476 + } 477 + 478 + dev_dbg(schan->dev, 479 + "chaining (%u/%u)@%x -> %x with %p, cookie %d\n", 480 + copy_size, *len, *src, *dst, &new->async_tx, 481 + new->async_tx.cookie); 482 + 483 + new->mark = DESC_PREPARED; 484 + new->async_tx.flags = flags; 485 + new->direction = direction; 486 + 487 + *len -= copy_size; 488 + if (direction == DMA_MEM_TO_MEM || direction == DMA_MEM_TO_DEV) 489 + *src += copy_size; 490 + if (direction == DMA_MEM_TO_MEM || direction == DMA_DEV_TO_MEM) 491 + *dst += copy_size; 492 + 493 + return new; 494 + } 495 + 496 + /* 497 + * shdma_prep_sg - prepare transfer descriptors from an SG list 498 + * 499 + * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also 500 + * converted to scatter-gather to guarantee consistent locking and a correct 501 + * list manipulation. For slave DMA direction carries the usual meaning, and, 502 + * logically, the SG list is RAM and the addr variable contains slave address, 503 + * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM 504 + * and the SG list contains only one element and points at the source buffer. 505 + */ 506 + static struct dma_async_tx_descriptor *shdma_prep_sg(struct shdma_chan *schan, 507 + struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr, 508 + enum dma_transfer_direction direction, unsigned long flags) 509 + { 510 + struct scatterlist *sg; 511 + struct shdma_desc *first = NULL, *new = NULL /* compiler... */; 512 + LIST_HEAD(tx_list); 513 + int chunks = 0; 514 + unsigned long irq_flags; 515 + int i; 516 + 517 + for_each_sg(sgl, sg, sg_len, i) 518 + chunks += DIV_ROUND_UP(sg_dma_len(sg), schan->max_xfer_len); 519 + 520 + /* Have to lock the whole loop to protect against concurrent release */ 521 + spin_lock_irqsave(&schan->chan_lock, irq_flags); 522 + 523 + /* 524 + * Chaining: 525 + * first descriptor is what user is dealing with in all API calls, its 526 + * cookie is at first set to -EBUSY, at tx-submit to a positive 527 + * number 528 + * if more than one chunk is needed further chunks have cookie = -EINVAL 529 + * the last chunk, if not equal to the first, has cookie = -ENOSPC 530 + * all chunks are linked onto the tx_list head with their .node heads 531 + * only during this function, then they are immediately spliced 532 + * back onto the free list in form of a chain 533 + */ 534 + for_each_sg(sgl, sg, sg_len, i) { 535 + dma_addr_t sg_addr = sg_dma_address(sg); 536 + size_t len = sg_dma_len(sg); 537 + 538 + if (!len) 539 + goto err_get_desc; 540 + 541 + do { 542 + dev_dbg(schan->dev, "Add SG #%d@%p[%d], dma %llx\n", 543 + i, sg, len, (unsigned long long)sg_addr); 544 + 545 + if (direction == DMA_DEV_TO_MEM) 546 + new = shdma_add_desc(schan, flags, 547 + &sg_addr, addr, &len, &first, 548 + direction); 549 + else 550 + new = shdma_add_desc(schan, flags, 551 + addr, &sg_addr, &len, &first, 552 + direction); 553 + if (!new) 554 + goto err_get_desc; 555 + 556 + new->chunks = chunks--; 557 + list_add_tail(&new->node, &tx_list); 558 + } while (len); 559 + } 560 + 561 + if (new != first) 562 + new->async_tx.cookie = -ENOSPC; 563 + 564 + /* Put them back on the free list, so, they don't get lost */ 565 + list_splice_tail(&tx_list, &schan->ld_free); 566 + 567 + spin_unlock_irqrestore(&schan->chan_lock, irq_flags); 568 + 569 + return &first->async_tx; 570 + 571 + err_get_desc: 572 + list_for_each_entry(new, &tx_list, node) 573 + new->mark = DESC_IDLE; 574 + list_splice(&tx_list, &schan->ld_free); 575 + 576 + spin_unlock_irqrestore(&schan->chan_lock, irq_flags); 577 + 578 + return NULL; 579 + } 580 + 581 + static struct dma_async_tx_descriptor *shdma_prep_memcpy( 582 + struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src, 583 + size_t len, unsigned long flags) 584 + { 585 + struct shdma_chan *schan = to_shdma_chan(chan); 586 + struct scatterlist sg; 587 + 588 + if (!chan || !len) 589 + return NULL; 590 + 591 + BUG_ON(!schan->desc_num); 592 + 593 + sg_init_table(&sg, 1); 594 + sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len, 595 + offset_in_page(dma_src)); 596 + sg_dma_address(&sg) = dma_src; 597 + sg_dma_len(&sg) = len; 598 + 599 + return shdma_prep_sg(schan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM, flags); 600 + } 601 + 602 + static struct dma_async_tx_descriptor *shdma_prep_slave_sg( 603 + struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, 604 + enum dma_transfer_direction direction, unsigned long flags, void *context) 605 + { 606 + struct shdma_chan *schan = to_shdma_chan(chan); 607 + struct shdma_dev *sdev = to_shdma_dev(schan->dma_chan.device); 608 + const struct shdma_ops *ops = sdev->ops; 609 + int slave_id = schan->slave_id; 610 + dma_addr_t slave_addr; 611 + 612 + if (!chan) 613 + return NULL; 614 + 615 + BUG_ON(!schan->desc_num); 616 + 617 + /* Someone calling slave DMA on a generic channel? */ 618 + if (slave_id < 0 || !sg_len) { 619 + dev_warn(schan->dev, "%s: bad parameter: len=%d, id=%d\n", 620 + __func__, sg_len, slave_id); 621 + return NULL; 622 + } 623 + 624 + slave_addr = ops->slave_addr(schan); 625 + 626 + return shdma_prep_sg(schan, sgl, sg_len, &slave_addr, 627 + direction, flags); 628 + } 629 + 630 + static int shdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 631 + unsigned long arg) 632 + { 633 + struct shdma_chan *schan = to_shdma_chan(chan); 634 + struct shdma_dev *sdev = to_shdma_dev(chan->device); 635 + const struct shdma_ops *ops = sdev->ops; 636 + struct dma_slave_config *config; 637 + unsigned long flags; 638 + int ret; 639 + 640 + if (!chan) 641 + return -EINVAL; 642 + 643 + switch (cmd) { 644 + case DMA_TERMINATE_ALL: 645 + spin_lock_irqsave(&schan->chan_lock, flags); 646 + ops->halt_channel(schan); 647 + spin_unlock_irqrestore(&schan->chan_lock, flags); 648 + 649 + shdma_chan_ld_cleanup(schan, true); 650 + break; 651 + case DMA_SLAVE_CONFIG: 652 + /* 653 + * So far only .slave_id is used, but the slave drivers are 654 + * encouraged to also set a transfer direction and an address. 655 + */ 656 + if (!arg) 657 + return -EINVAL; 658 + /* 659 + * We could lock this, but you shouldn't be configuring the 660 + * channel, while using it... 661 + */ 662 + config = (struct dma_slave_config *)arg; 663 + ret = shdma_setup_slave(schan, config->slave_id); 664 + if (ret < 0) 665 + return ret; 666 + break; 667 + default: 668 + return -ENXIO; 669 + } 670 + 671 + return 0; 672 + } 673 + 674 + static void shdma_issue_pending(struct dma_chan *chan) 675 + { 676 + struct shdma_chan *schan = to_shdma_chan(chan); 677 + 678 + spin_lock_irq(&schan->chan_lock); 679 + if (schan->pm_state == SHDMA_PM_ESTABLISHED) 680 + shdma_chan_xfer_ld_queue(schan); 681 + else 682 + schan->pm_state = SHDMA_PM_PENDING; 683 + spin_unlock_irq(&schan->chan_lock); 684 + } 685 + 686 + static enum dma_status shdma_tx_status(struct dma_chan *chan, 687 + dma_cookie_t cookie, 688 + struct dma_tx_state *txstate) 689 + { 690 + struct shdma_chan *schan = to_shdma_chan(chan); 691 + enum dma_status status; 692 + unsigned long flags; 693 + 694 + shdma_chan_ld_cleanup(schan, false); 695 + 696 + spin_lock_irqsave(&schan->chan_lock, flags); 697 + 698 + status = dma_cookie_status(chan, cookie, txstate); 699 + 700 + /* 701 + * If we don't find cookie on the queue, it has been aborted and we have 702 + * to report error 703 + */ 704 + if (status != DMA_SUCCESS) { 705 + struct shdma_desc *sdesc; 706 + status = DMA_ERROR; 707 + list_for_each_entry(sdesc, &schan->ld_queue, node) 708 + if (sdesc->cookie == cookie) { 709 + status = DMA_IN_PROGRESS; 710 + break; 711 + } 712 + } 713 + 714 + spin_unlock_irqrestore(&schan->chan_lock, flags); 715 + 716 + return status; 717 + } 718 + 719 + /* Called from error IRQ or NMI */ 720 + bool shdma_reset(struct shdma_dev *sdev) 721 + { 722 + const struct shdma_ops *ops = sdev->ops; 723 + struct shdma_chan *schan; 724 + unsigned int handled = 0; 725 + int i; 726 + 727 + /* Reset all channels */ 728 + shdma_for_each_chan(schan, sdev, i) { 729 + struct shdma_desc *sdesc; 730 + LIST_HEAD(dl); 731 + 732 + if (!schan) 733 + continue; 734 + 735 + spin_lock(&schan->chan_lock); 736 + 737 + /* Stop the channel */ 738 + ops->halt_channel(schan); 739 + 740 + list_splice_init(&schan->ld_queue, &dl); 741 + 742 + if (!list_empty(&dl)) { 743 + dev_dbg(schan->dev, "Bring down channel %d\n", schan->id); 744 + pm_runtime_put(schan->dev); 745 + } 746 + schan->pm_state = SHDMA_PM_ESTABLISHED; 747 + 748 + spin_unlock(&schan->chan_lock); 749 + 750 + /* Complete all */ 751 + list_for_each_entry(sdesc, &dl, node) { 752 + struct dma_async_tx_descriptor *tx = &sdesc->async_tx; 753 + sdesc->mark = DESC_IDLE; 754 + if (tx->callback) 755 + tx->callback(tx->callback_param); 756 + } 757 + 758 + spin_lock(&schan->chan_lock); 759 + list_splice(&dl, &schan->ld_free); 760 + spin_unlock(&schan->chan_lock); 761 + 762 + handled++; 763 + } 764 + 765 + return !!handled; 766 + } 767 + EXPORT_SYMBOL(shdma_reset); 768 + 769 + static irqreturn_t chan_irq(int irq, void *dev) 770 + { 771 + struct shdma_chan *schan = dev; 772 + const struct shdma_ops *ops = 773 + to_shdma_dev(schan->dma_chan.device)->ops; 774 + irqreturn_t ret; 775 + 776 + spin_lock(&schan->chan_lock); 777 + 778 + ret = ops->chan_irq(schan, irq) ? IRQ_WAKE_THREAD : IRQ_NONE; 779 + 780 + spin_unlock(&schan->chan_lock); 781 + 782 + return ret; 783 + } 784 + 785 + static irqreturn_t chan_irqt(int irq, void *dev) 786 + { 787 + struct shdma_chan *schan = dev; 788 + const struct shdma_ops *ops = 789 + to_shdma_dev(schan->dma_chan.device)->ops; 790 + struct shdma_desc *sdesc; 791 + 792 + spin_lock_irq(&schan->chan_lock); 793 + list_for_each_entry(sdesc, &schan->ld_queue, node) { 794 + if (sdesc->mark == DESC_SUBMITTED && 795 + ops->desc_completed(schan, sdesc)) { 796 + dev_dbg(schan->dev, "done #%d@%p\n", 797 + sdesc->async_tx.cookie, &sdesc->async_tx); 798 + sdesc->mark = DESC_COMPLETED; 799 + break; 800 + } 801 + } 802 + /* Next desc */ 803 + shdma_chan_xfer_ld_queue(schan); 804 + spin_unlock_irq(&schan->chan_lock); 805 + 806 + shdma_chan_ld_cleanup(schan, false); 807 + 808 + return IRQ_HANDLED; 809 + } 810 + 811 + int shdma_request_irq(struct shdma_chan *schan, int irq, 812 + unsigned long flags, const char *name) 813 + { 814 + int ret = request_threaded_irq(irq, chan_irq, chan_irqt, 815 + flags, name, schan); 816 + 817 + schan->irq = ret < 0 ? ret : irq; 818 + 819 + return ret; 820 + } 821 + EXPORT_SYMBOL(shdma_request_irq); 822 + 823 + void shdma_free_irq(struct shdma_chan *schan) 824 + { 825 + if (schan->irq >= 0) 826 + free_irq(schan->irq, schan); 827 + } 828 + EXPORT_SYMBOL(shdma_free_irq); 829 + 830 + void shdma_chan_probe(struct shdma_dev *sdev, 831 + struct shdma_chan *schan, int id) 832 + { 833 + schan->pm_state = SHDMA_PM_ESTABLISHED; 834 + 835 + /* reference struct dma_device */ 836 + schan->dma_chan.device = &sdev->dma_dev; 837 + dma_cookie_init(&schan->dma_chan); 838 + 839 + schan->dev = sdev->dma_dev.dev; 840 + schan->id = id; 841 + 842 + if (!schan->max_xfer_len) 843 + schan->max_xfer_len = PAGE_SIZE; 844 + 845 + spin_lock_init(&schan->chan_lock); 846 + 847 + /* Init descripter manage list */ 848 + INIT_LIST_HEAD(&schan->ld_queue); 849 + INIT_LIST_HEAD(&schan->ld_free); 850 + 851 + /* Add the channel to DMA device channel list */ 852 + list_add_tail(&schan->dma_chan.device_node, 853 + &sdev->dma_dev.channels); 854 + sdev->schan[sdev->dma_dev.chancnt++] = schan; 855 + } 856 + EXPORT_SYMBOL(shdma_chan_probe); 857 + 858 + void shdma_chan_remove(struct shdma_chan *schan) 859 + { 860 + list_del(&schan->dma_chan.device_node); 861 + } 862 + EXPORT_SYMBOL(shdma_chan_remove); 863 + 864 + int shdma_init(struct device *dev, struct shdma_dev *sdev, 865 + int chan_num) 866 + { 867 + struct dma_device *dma_dev = &sdev->dma_dev; 868 + 869 + /* 870 + * Require all call-backs for now, they can trivially be made optional 871 + * later as required 872 + */ 873 + if (!sdev->ops || 874 + !sdev->desc_size || 875 + !sdev->ops->embedded_desc || 876 + !sdev->ops->start_xfer || 877 + !sdev->ops->setup_xfer || 878 + !sdev->ops->set_slave || 879 + !sdev->ops->desc_setup || 880 + !sdev->ops->slave_addr || 881 + !sdev->ops->channel_busy || 882 + !sdev->ops->halt_channel || 883 + !sdev->ops->desc_completed) 884 + return -EINVAL; 885 + 886 + sdev->schan = kcalloc(chan_num, sizeof(*sdev->schan), GFP_KERNEL); 887 + if (!sdev->schan) 888 + return -ENOMEM; 889 + 890 + INIT_LIST_HEAD(&dma_dev->channels); 891 + 892 + /* Common and MEMCPY operations */ 893 + dma_dev->device_alloc_chan_resources 894 + = shdma_alloc_chan_resources; 895 + dma_dev->device_free_chan_resources = shdma_free_chan_resources; 896 + dma_dev->device_prep_dma_memcpy = shdma_prep_memcpy; 897 + dma_dev->device_tx_status = shdma_tx_status; 898 + dma_dev->device_issue_pending = shdma_issue_pending; 899 + 900 + /* Compulsory for DMA_SLAVE fields */ 901 + dma_dev->device_prep_slave_sg = shdma_prep_slave_sg; 902 + dma_dev->device_control = shdma_control; 903 + 904 + dma_dev->dev = dev; 905 + 906 + return 0; 907 + } 908 + EXPORT_SYMBOL(shdma_init); 909 + 910 + void shdma_cleanup(struct shdma_dev *sdev) 911 + { 912 + kfree(sdev->schan); 913 + } 914 + EXPORT_SYMBOL(shdma_cleanup); 915 + 916 + static int __init shdma_enter(void) 917 + { 918 + shdma_slave_used = kzalloc(DIV_ROUND_UP(slave_num, BITS_PER_LONG) * 919 + sizeof(long), GFP_KERNEL); 920 + if (!shdma_slave_used) 921 + return -ENOMEM; 922 + return 0; 923 + } 924 + module_init(shdma_enter); 925 + 926 + static void __exit shdma_exit(void) 927 + { 928 + kfree(shdma_slave_used); 929 + } 930 + module_exit(shdma_exit); 931 + 932 + MODULE_LICENSE("GPL v2"); 933 + MODULE_DESCRIPTION("SH-DMA driver base library"); 934 + MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>");
+943
drivers/dma/sh/shdma.c
··· 1 + /* 2 + * Renesas SuperH DMA Engine support 3 + * 4 + * base is drivers/dma/flsdma.c 5 + * 6 + * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de> 7 + * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com> 8 + * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved. 9 + * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. 10 + * 11 + * This is free software; you can redistribute it and/or modify 12 + * it under the terms of the GNU General Public License as published by 13 + * the Free Software Foundation; either version 2 of the License, or 14 + * (at your option) any later version. 15 + * 16 + * - DMA of SuperH does not have Hardware DMA chain mode. 17 + * - MAX DMA size is 16MB. 18 + * 19 + */ 20 + 21 + #include <linux/init.h> 22 + #include <linux/module.h> 23 + #include <linux/slab.h> 24 + #include <linux/interrupt.h> 25 + #include <linux/dmaengine.h> 26 + #include <linux/delay.h> 27 + #include <linux/platform_device.h> 28 + #include <linux/pm_runtime.h> 29 + #include <linux/sh_dma.h> 30 + #include <linux/notifier.h> 31 + #include <linux/kdebug.h> 32 + #include <linux/spinlock.h> 33 + #include <linux/rculist.h> 34 + 35 + #include "../dmaengine.h" 36 + #include "shdma.h" 37 + 38 + #define SH_DMAE_DRV_NAME "sh-dma-engine" 39 + 40 + /* Default MEMCPY transfer size = 2^2 = 4 bytes */ 41 + #define LOG2_DEFAULT_XFER_SIZE 2 42 + #define SH_DMA_SLAVE_NUMBER 256 43 + #define SH_DMA_TCR_MAX (16 * 1024 * 1024 - 1) 44 + 45 + /* 46 + * Used for write-side mutual exclusion for the global device list, 47 + * read-side synchronization by way of RCU, and per-controller data. 48 + */ 49 + static DEFINE_SPINLOCK(sh_dmae_lock); 50 + static LIST_HEAD(sh_dmae_devices); 51 + 52 + static void chclr_write(struct sh_dmae_chan *sh_dc, u32 data) 53 + { 54 + struct sh_dmae_device *shdev = to_sh_dev(sh_dc); 55 + 56 + __raw_writel(data, shdev->chan_reg + 57 + shdev->pdata->channel[sh_dc->shdma_chan.id].chclr_offset); 58 + } 59 + 60 + static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg) 61 + { 62 + __raw_writel(data, sh_dc->base + reg / sizeof(u32)); 63 + } 64 + 65 + static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg) 66 + { 67 + return __raw_readl(sh_dc->base + reg / sizeof(u32)); 68 + } 69 + 70 + static u16 dmaor_read(struct sh_dmae_device *shdev) 71 + { 72 + u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32); 73 + 74 + if (shdev->pdata->dmaor_is_32bit) 75 + return __raw_readl(addr); 76 + else 77 + return __raw_readw(addr); 78 + } 79 + 80 + static void dmaor_write(struct sh_dmae_device *shdev, u16 data) 81 + { 82 + u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32); 83 + 84 + if (shdev->pdata->dmaor_is_32bit) 85 + __raw_writel(data, addr); 86 + else 87 + __raw_writew(data, addr); 88 + } 89 + 90 + static void chcr_write(struct sh_dmae_chan *sh_dc, u32 data) 91 + { 92 + struct sh_dmae_device *shdev = to_sh_dev(sh_dc); 93 + 94 + __raw_writel(data, sh_dc->base + shdev->chcr_offset / sizeof(u32)); 95 + } 96 + 97 + static u32 chcr_read(struct sh_dmae_chan *sh_dc) 98 + { 99 + struct sh_dmae_device *shdev = to_sh_dev(sh_dc); 100 + 101 + return __raw_readl(sh_dc->base + shdev->chcr_offset / sizeof(u32)); 102 + } 103 + 104 + /* 105 + * Reset DMA controller 106 + * 107 + * SH7780 has two DMAOR register 108 + */ 109 + static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev) 110 + { 111 + unsigned short dmaor; 112 + unsigned long flags; 113 + 114 + spin_lock_irqsave(&sh_dmae_lock, flags); 115 + 116 + dmaor = dmaor_read(shdev); 117 + dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME)); 118 + 119 + spin_unlock_irqrestore(&sh_dmae_lock, flags); 120 + } 121 + 122 + static int sh_dmae_rst(struct sh_dmae_device *shdev) 123 + { 124 + unsigned short dmaor; 125 + unsigned long flags; 126 + 127 + spin_lock_irqsave(&sh_dmae_lock, flags); 128 + 129 + dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME); 130 + 131 + if (shdev->pdata->chclr_present) { 132 + int i; 133 + for (i = 0; i < shdev->pdata->channel_num; i++) { 134 + struct sh_dmae_chan *sh_chan = shdev->chan[i]; 135 + if (sh_chan) 136 + chclr_write(sh_chan, 0); 137 + } 138 + } 139 + 140 + dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init); 141 + 142 + dmaor = dmaor_read(shdev); 143 + 144 + spin_unlock_irqrestore(&sh_dmae_lock, flags); 145 + 146 + if (dmaor & (DMAOR_AE | DMAOR_NMIF)) { 147 + dev_warn(shdev->shdma_dev.dma_dev.dev, "Can't initialize DMAOR.\n"); 148 + return -EIO; 149 + } 150 + if (shdev->pdata->dmaor_init & ~dmaor) 151 + dev_warn(shdev->shdma_dev.dma_dev.dev, 152 + "DMAOR=0x%x hasn't latched the initial value 0x%x.\n", 153 + dmaor, shdev->pdata->dmaor_init); 154 + return 0; 155 + } 156 + 157 + static bool dmae_is_busy(struct sh_dmae_chan *sh_chan) 158 + { 159 + u32 chcr = chcr_read(sh_chan); 160 + 161 + if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE) 162 + return true; /* working */ 163 + 164 + return false; /* waiting */ 165 + } 166 + 167 + static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr) 168 + { 169 + struct sh_dmae_device *shdev = to_sh_dev(sh_chan); 170 + struct sh_dmae_pdata *pdata = shdev->pdata; 171 + int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) | 172 + ((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift); 173 + 174 + if (cnt >= pdata->ts_shift_num) 175 + cnt = 0; 176 + 177 + return pdata->ts_shift[cnt]; 178 + } 179 + 180 + static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size) 181 + { 182 + struct sh_dmae_device *shdev = to_sh_dev(sh_chan); 183 + struct sh_dmae_pdata *pdata = shdev->pdata; 184 + int i; 185 + 186 + for (i = 0; i < pdata->ts_shift_num; i++) 187 + if (pdata->ts_shift[i] == l2size) 188 + break; 189 + 190 + if (i == pdata->ts_shift_num) 191 + i = 0; 192 + 193 + return ((i << pdata->ts_low_shift) & pdata->ts_low_mask) | 194 + ((i << pdata->ts_high_shift) & pdata->ts_high_mask); 195 + } 196 + 197 + static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw) 198 + { 199 + sh_dmae_writel(sh_chan, hw->sar, SAR); 200 + sh_dmae_writel(sh_chan, hw->dar, DAR); 201 + sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR); 202 + } 203 + 204 + static void dmae_start(struct sh_dmae_chan *sh_chan) 205 + { 206 + struct sh_dmae_device *shdev = to_sh_dev(sh_chan); 207 + u32 chcr = chcr_read(sh_chan); 208 + 209 + if (shdev->pdata->needs_tend_set) 210 + sh_dmae_writel(sh_chan, 0xFFFFFFFF, TEND); 211 + 212 + chcr |= CHCR_DE | shdev->chcr_ie_bit; 213 + chcr_write(sh_chan, chcr & ~CHCR_TE); 214 + } 215 + 216 + static void dmae_init(struct sh_dmae_chan *sh_chan) 217 + { 218 + /* 219 + * Default configuration for dual address memory-memory transfer. 220 + * 0x400 represents auto-request. 221 + */ 222 + u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan, 223 + LOG2_DEFAULT_XFER_SIZE); 224 + sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr); 225 + chcr_write(sh_chan, chcr); 226 + } 227 + 228 + static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val) 229 + { 230 + /* If DMA is active, cannot set CHCR. TODO: remove this superfluous check */ 231 + if (dmae_is_busy(sh_chan)) 232 + return -EBUSY; 233 + 234 + sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val); 235 + chcr_write(sh_chan, val); 236 + 237 + return 0; 238 + } 239 + 240 + static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) 241 + { 242 + struct sh_dmae_device *shdev = to_sh_dev(sh_chan); 243 + struct sh_dmae_pdata *pdata = shdev->pdata; 244 + const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->shdma_chan.id]; 245 + u16 __iomem *addr = shdev->dmars; 246 + unsigned int shift = chan_pdata->dmars_bit; 247 + 248 + if (dmae_is_busy(sh_chan)) 249 + return -EBUSY; 250 + 251 + if (pdata->no_dmars) 252 + return 0; 253 + 254 + /* in the case of a missing DMARS resource use first memory window */ 255 + if (!addr) 256 + addr = (u16 __iomem *)shdev->chan_reg; 257 + addr += chan_pdata->dmars / sizeof(u16); 258 + 259 + __raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift), 260 + addr); 261 + 262 + return 0; 263 + } 264 + 265 + static void sh_dmae_start_xfer(struct shdma_chan *schan, 266 + struct shdma_desc *sdesc) 267 + { 268 + struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, 269 + shdma_chan); 270 + struct sh_dmae_desc *sh_desc = container_of(sdesc, 271 + struct sh_dmae_desc, shdma_desc); 272 + dev_dbg(sh_chan->shdma_chan.dev, "Queue #%d to %d: %u@%x -> %x\n", 273 + sdesc->async_tx.cookie, sh_chan->shdma_chan.id, 274 + sh_desc->hw.tcr, sh_desc->hw.sar, sh_desc->hw.dar); 275 + /* Get the ld start address from ld_queue */ 276 + dmae_set_reg(sh_chan, &sh_desc->hw); 277 + dmae_start(sh_chan); 278 + } 279 + 280 + static bool sh_dmae_channel_busy(struct shdma_chan *schan) 281 + { 282 + struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, 283 + shdma_chan); 284 + return dmae_is_busy(sh_chan); 285 + } 286 + 287 + static void sh_dmae_setup_xfer(struct shdma_chan *schan, 288 + int slave_id) 289 + { 290 + struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, 291 + shdma_chan); 292 + 293 + if (slave_id >= 0) { 294 + const struct sh_dmae_slave_config *cfg = 295 + sh_chan->config; 296 + 297 + dmae_set_dmars(sh_chan, cfg->mid_rid); 298 + dmae_set_chcr(sh_chan, cfg->chcr); 299 + } else { 300 + dmae_init(sh_chan); 301 + } 302 + } 303 + 304 + static const struct sh_dmae_slave_config *dmae_find_slave( 305 + struct sh_dmae_chan *sh_chan, int slave_id) 306 + { 307 + struct sh_dmae_device *shdev = to_sh_dev(sh_chan); 308 + struct sh_dmae_pdata *pdata = shdev->pdata; 309 + const struct sh_dmae_slave_config *cfg; 310 + int i; 311 + 312 + if (slave_id >= SH_DMA_SLAVE_NUMBER) 313 + return NULL; 314 + 315 + for (i = 0, cfg = pdata->slave; i < pdata->slave_num; i++, cfg++) 316 + if (cfg->slave_id == slave_id) 317 + return cfg; 318 + 319 + return NULL; 320 + } 321 + 322 + static int sh_dmae_set_slave(struct shdma_chan *schan, 323 + int slave_id, bool try) 324 + { 325 + struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, 326 + shdma_chan); 327 + const struct sh_dmae_slave_config *cfg = dmae_find_slave(sh_chan, slave_id); 328 + if (!cfg) 329 + return -ENODEV; 330 + 331 + if (!try) 332 + sh_chan->config = cfg; 333 + 334 + return 0; 335 + } 336 + 337 + static void dmae_halt(struct sh_dmae_chan *sh_chan) 338 + { 339 + struct sh_dmae_device *shdev = to_sh_dev(sh_chan); 340 + u32 chcr = chcr_read(sh_chan); 341 + 342 + chcr &= ~(CHCR_DE | CHCR_TE | shdev->chcr_ie_bit); 343 + chcr_write(sh_chan, chcr); 344 + } 345 + 346 + static int sh_dmae_desc_setup(struct shdma_chan *schan, 347 + struct shdma_desc *sdesc, 348 + dma_addr_t src, dma_addr_t dst, size_t *len) 349 + { 350 + struct sh_dmae_desc *sh_desc = container_of(sdesc, 351 + struct sh_dmae_desc, shdma_desc); 352 + 353 + if (*len > schan->max_xfer_len) 354 + *len = schan->max_xfer_len; 355 + 356 + sh_desc->hw.sar = src; 357 + sh_desc->hw.dar = dst; 358 + sh_desc->hw.tcr = *len; 359 + 360 + return 0; 361 + } 362 + 363 + static void sh_dmae_halt(struct shdma_chan *schan) 364 + { 365 + struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, 366 + shdma_chan); 367 + dmae_halt(sh_chan); 368 + } 369 + 370 + static bool sh_dmae_chan_irq(struct shdma_chan *schan, int irq) 371 + { 372 + struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan, 373 + shdma_chan); 374 + 375 + if (!(chcr_read(sh_chan) & CHCR_TE)) 376 + return false; 377 + 378 + /* DMA stop */ 379 + dmae_halt(sh_chan); 380 + 381 + return true; 382 + } 383 + 384 + /* Called from error IRQ or NMI */ 385 + static bool sh_dmae_reset(struct sh_dmae_device *shdev) 386 + { 387 + bool ret; 388 + 389 + /* halt the dma controller */ 390 + sh_dmae_ctl_stop(shdev); 391 + 392 + /* We cannot detect, which channel caused the error, have to reset all */ 393 + ret = shdma_reset(&shdev->shdma_dev); 394 + 395 + sh_dmae_rst(shdev); 396 + 397 + return ret; 398 + } 399 + 400 + static irqreturn_t sh_dmae_err(int irq, void *data) 401 + { 402 + struct sh_dmae_device *shdev = data; 403 + 404 + if (!(dmaor_read(shdev) & DMAOR_AE)) 405 + return IRQ_NONE; 406 + 407 + sh_dmae_reset(shdev); 408 + return IRQ_HANDLED; 409 + } 410 + 411 + static bool sh_dmae_desc_completed(struct shdma_chan *schan, 412 + struct shdma_desc *sdesc) 413 + { 414 + struct sh_dmae_chan *sh_chan = container_of(schan, 415 + struct sh_dmae_chan, shdma_chan); 416 + struct sh_dmae_desc *sh_desc = container_of(sdesc, 417 + struct sh_dmae_desc, shdma_desc); 418 + u32 sar_buf = sh_dmae_readl(sh_chan, SAR); 419 + u32 dar_buf = sh_dmae_readl(sh_chan, DAR); 420 + 421 + return (sdesc->direction == DMA_DEV_TO_MEM && 422 + (sh_desc->hw.dar + sh_desc->hw.tcr) == dar_buf) || 423 + (sdesc->direction != DMA_DEV_TO_MEM && 424 + (sh_desc->hw.sar + sh_desc->hw.tcr) == sar_buf); 425 + } 426 + 427 + static bool sh_dmae_nmi_notify(struct sh_dmae_device *shdev) 428 + { 429 + /* Fast path out if NMIF is not asserted for this controller */ 430 + if ((dmaor_read(shdev) & DMAOR_NMIF) == 0) 431 + return false; 432 + 433 + return sh_dmae_reset(shdev); 434 + } 435 + 436 + static int sh_dmae_nmi_handler(struct notifier_block *self, 437 + unsigned long cmd, void *data) 438 + { 439 + struct sh_dmae_device *shdev; 440 + int ret = NOTIFY_DONE; 441 + bool triggered; 442 + 443 + /* 444 + * Only concern ourselves with NMI events. 445 + * 446 + * Normally we would check the die chain value, but as this needs 447 + * to be architecture independent, check for NMI context instead. 448 + */ 449 + if (!in_nmi()) 450 + return NOTIFY_DONE; 451 + 452 + rcu_read_lock(); 453 + list_for_each_entry_rcu(shdev, &sh_dmae_devices, node) { 454 + /* 455 + * Only stop if one of the controllers has NMIF asserted, 456 + * we do not want to interfere with regular address error 457 + * handling or NMI events that don't concern the DMACs. 458 + */ 459 + triggered = sh_dmae_nmi_notify(shdev); 460 + if (triggered == true) 461 + ret = NOTIFY_OK; 462 + } 463 + rcu_read_unlock(); 464 + 465 + return ret; 466 + } 467 + 468 + static struct notifier_block sh_dmae_nmi_notifier __read_mostly = { 469 + .notifier_call = sh_dmae_nmi_handler, 470 + 471 + /* Run before NMI debug handler and KGDB */ 472 + .priority = 1, 473 + }; 474 + 475 + static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id, 476 + int irq, unsigned long flags) 477 + { 478 + const struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id]; 479 + struct shdma_dev *sdev = &shdev->shdma_dev; 480 + struct platform_device *pdev = to_platform_device(sdev->dma_dev.dev); 481 + struct sh_dmae_chan *sh_chan; 482 + struct shdma_chan *schan; 483 + int err; 484 + 485 + sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL); 486 + if (!sh_chan) { 487 + dev_err(sdev->dma_dev.dev, 488 + "No free memory for allocating dma channels!\n"); 489 + return -ENOMEM; 490 + } 491 + 492 + schan = &sh_chan->shdma_chan; 493 + schan->max_xfer_len = SH_DMA_TCR_MAX + 1; 494 + 495 + shdma_chan_probe(sdev, schan, id); 496 + 497 + sh_chan->base = shdev->chan_reg + chan_pdata->offset / sizeof(u32); 498 + 499 + /* set up channel irq */ 500 + if (pdev->id >= 0) 501 + snprintf(sh_chan->dev_id, sizeof(sh_chan->dev_id), 502 + "sh-dmae%d.%d", pdev->id, id); 503 + else 504 + snprintf(sh_chan->dev_id, sizeof(sh_chan->dev_id), 505 + "sh-dma%d", id); 506 + 507 + err = shdma_request_irq(schan, irq, flags, sh_chan->dev_id); 508 + if (err) { 509 + dev_err(sdev->dma_dev.dev, 510 + "DMA channel %d request_irq error %d\n", 511 + id, err); 512 + goto err_no_irq; 513 + } 514 + 515 + shdev->chan[id] = sh_chan; 516 + return 0; 517 + 518 + err_no_irq: 519 + /* remove from dmaengine device node */ 520 + shdma_chan_remove(schan); 521 + kfree(sh_chan); 522 + return err; 523 + } 524 + 525 + static void sh_dmae_chan_remove(struct sh_dmae_device *shdev) 526 + { 527 + struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev; 528 + struct shdma_chan *schan; 529 + int i; 530 + 531 + shdma_for_each_chan(schan, &shdev->shdma_dev, i) { 532 + struct sh_dmae_chan *sh_chan = container_of(schan, 533 + struct sh_dmae_chan, shdma_chan); 534 + BUG_ON(!schan); 535 + 536 + shdma_free_irq(&sh_chan->shdma_chan); 537 + 538 + shdma_chan_remove(schan); 539 + kfree(sh_chan); 540 + } 541 + dma_dev->chancnt = 0; 542 + } 543 + 544 + static void sh_dmae_shutdown(struct platform_device *pdev) 545 + { 546 + struct sh_dmae_device *shdev = platform_get_drvdata(pdev); 547 + sh_dmae_ctl_stop(shdev); 548 + } 549 + 550 + static int sh_dmae_runtime_suspend(struct device *dev) 551 + { 552 + return 0; 553 + } 554 + 555 + static int sh_dmae_runtime_resume(struct device *dev) 556 + { 557 + struct sh_dmae_device *shdev = dev_get_drvdata(dev); 558 + 559 + return sh_dmae_rst(shdev); 560 + } 561 + 562 + #ifdef CONFIG_PM 563 + static int sh_dmae_suspend(struct device *dev) 564 + { 565 + return 0; 566 + } 567 + 568 + static int sh_dmae_resume(struct device *dev) 569 + { 570 + struct sh_dmae_device *shdev = dev_get_drvdata(dev); 571 + int i, ret; 572 + 573 + ret = sh_dmae_rst(shdev); 574 + if (ret < 0) 575 + dev_err(dev, "Failed to reset!\n"); 576 + 577 + for (i = 0; i < shdev->pdata->channel_num; i++) { 578 + struct sh_dmae_chan *sh_chan = shdev->chan[i]; 579 + 580 + if (!sh_chan->shdma_chan.desc_num) 581 + continue; 582 + 583 + if (sh_chan->shdma_chan.slave_id >= 0) { 584 + const struct sh_dmae_slave_config *cfg = sh_chan->config; 585 + dmae_set_dmars(sh_chan, cfg->mid_rid); 586 + dmae_set_chcr(sh_chan, cfg->chcr); 587 + } else { 588 + dmae_init(sh_chan); 589 + } 590 + } 591 + 592 + return 0; 593 + } 594 + #else 595 + #define sh_dmae_suspend NULL 596 + #define sh_dmae_resume NULL 597 + #endif 598 + 599 + const struct dev_pm_ops sh_dmae_pm = { 600 + .suspend = sh_dmae_suspend, 601 + .resume = sh_dmae_resume, 602 + .runtime_suspend = sh_dmae_runtime_suspend, 603 + .runtime_resume = sh_dmae_runtime_resume, 604 + }; 605 + 606 + static dma_addr_t sh_dmae_slave_addr(struct shdma_chan *schan) 607 + { 608 + struct sh_dmae_chan *sh_chan = container_of(schan, 609 + struct sh_dmae_chan, shdma_chan); 610 + 611 + /* 612 + * Implicit BUG_ON(!sh_chan->config) 613 + * This is an exclusive slave DMA operation, may only be called after a 614 + * successful slave configuration. 615 + */ 616 + return sh_chan->config->addr; 617 + } 618 + 619 + static struct shdma_desc *sh_dmae_embedded_desc(void *buf, int i) 620 + { 621 + return &((struct sh_dmae_desc *)buf)[i].shdma_desc; 622 + } 623 + 624 + static const struct shdma_ops sh_dmae_shdma_ops = { 625 + .desc_completed = sh_dmae_desc_completed, 626 + .halt_channel = sh_dmae_halt, 627 + .channel_busy = sh_dmae_channel_busy, 628 + .slave_addr = sh_dmae_slave_addr, 629 + .desc_setup = sh_dmae_desc_setup, 630 + .set_slave = sh_dmae_set_slave, 631 + .setup_xfer = sh_dmae_setup_xfer, 632 + .start_xfer = sh_dmae_start_xfer, 633 + .embedded_desc = sh_dmae_embedded_desc, 634 + .chan_irq = sh_dmae_chan_irq, 635 + }; 636 + 637 + static int __devinit sh_dmae_probe(struct platform_device *pdev) 638 + { 639 + struct sh_dmae_pdata *pdata = pdev->dev.platform_data; 640 + unsigned long irqflags = IRQF_DISABLED, 641 + chan_flag[SH_DMAE_MAX_CHANNELS] = {}; 642 + int errirq, chan_irq[SH_DMAE_MAX_CHANNELS]; 643 + int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0; 644 + struct sh_dmae_device *shdev; 645 + struct dma_device *dma_dev; 646 + struct resource *chan, *dmars, *errirq_res, *chanirq_res; 647 + 648 + /* get platform data */ 649 + if (!pdata || !pdata->channel_num) 650 + return -ENODEV; 651 + 652 + chan = platform_get_resource(pdev, IORESOURCE_MEM, 0); 653 + /* DMARS area is optional */ 654 + dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1); 655 + /* 656 + * IRQ resources: 657 + * 1. there always must be at least one IRQ IO-resource. On SH4 it is 658 + * the error IRQ, in which case it is the only IRQ in this resource: 659 + * start == end. If it is the only IRQ resource, all channels also 660 + * use the same IRQ. 661 + * 2. DMA channel IRQ resources can be specified one per resource or in 662 + * ranges (start != end) 663 + * 3. iff all events (channels and, optionally, error) on this 664 + * controller use the same IRQ, only one IRQ resource can be 665 + * specified, otherwise there must be one IRQ per channel, even if 666 + * some of them are equal 667 + * 4. if all IRQs on this controller are equal or if some specific IRQs 668 + * specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be 669 + * requested with the IRQF_SHARED flag 670 + */ 671 + errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 672 + if (!chan || !errirq_res) 673 + return -ENODEV; 674 + 675 + if (!request_mem_region(chan->start, resource_size(chan), pdev->name)) { 676 + dev_err(&pdev->dev, "DMAC register region already claimed\n"); 677 + return -EBUSY; 678 + } 679 + 680 + if (dmars && !request_mem_region(dmars->start, resource_size(dmars), pdev->name)) { 681 + dev_err(&pdev->dev, "DMAC DMARS region already claimed\n"); 682 + err = -EBUSY; 683 + goto ermrdmars; 684 + } 685 + 686 + err = -ENOMEM; 687 + shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL); 688 + if (!shdev) { 689 + dev_err(&pdev->dev, "Not enough memory\n"); 690 + goto ealloc; 691 + } 692 + 693 + dma_dev = &shdev->shdma_dev.dma_dev; 694 + 695 + shdev->chan_reg = ioremap(chan->start, resource_size(chan)); 696 + if (!shdev->chan_reg) 697 + goto emapchan; 698 + if (dmars) { 699 + shdev->dmars = ioremap(dmars->start, resource_size(dmars)); 700 + if (!shdev->dmars) 701 + goto emapdmars; 702 + } 703 + 704 + if (!pdata->slave_only) 705 + dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); 706 + if (pdata->slave && pdata->slave_num) 707 + dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); 708 + 709 + /* Default transfer size of 32 bytes requires 32-byte alignment */ 710 + dma_dev->copy_align = LOG2_DEFAULT_XFER_SIZE; 711 + 712 + shdev->shdma_dev.ops = &sh_dmae_shdma_ops; 713 + shdev->shdma_dev.desc_size = sizeof(struct sh_dmae_desc); 714 + err = shdma_init(&pdev->dev, &shdev->shdma_dev, 715 + pdata->channel_num); 716 + if (err < 0) 717 + goto eshdma; 718 + 719 + /* platform data */ 720 + shdev->pdata = pdev->dev.platform_data; 721 + 722 + if (pdata->chcr_offset) 723 + shdev->chcr_offset = pdata->chcr_offset; 724 + else 725 + shdev->chcr_offset = CHCR; 726 + 727 + if (pdata->chcr_ie_bit) 728 + shdev->chcr_ie_bit = pdata->chcr_ie_bit; 729 + else 730 + shdev->chcr_ie_bit = CHCR_IE; 731 + 732 + platform_set_drvdata(pdev, shdev); 733 + 734 + pm_runtime_enable(&pdev->dev); 735 + err = pm_runtime_get_sync(&pdev->dev); 736 + if (err < 0) 737 + dev_err(&pdev->dev, "%s(): GET = %d\n", __func__, err); 738 + 739 + spin_lock_irq(&sh_dmae_lock); 740 + list_add_tail_rcu(&shdev->node, &sh_dmae_devices); 741 + spin_unlock_irq(&sh_dmae_lock); 742 + 743 + /* reset dma controller - only needed as a test */ 744 + err = sh_dmae_rst(shdev); 745 + if (err) 746 + goto rst_err; 747 + 748 + #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) 749 + chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1); 750 + 751 + if (!chanirq_res) 752 + chanirq_res = errirq_res; 753 + else 754 + irqres++; 755 + 756 + if (chanirq_res == errirq_res || 757 + (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE) 758 + irqflags = IRQF_SHARED; 759 + 760 + errirq = errirq_res->start; 761 + 762 + err = request_irq(errirq, sh_dmae_err, irqflags, 763 + "DMAC Address Error", shdev); 764 + if (err) { 765 + dev_err(&pdev->dev, 766 + "DMA failed requesting irq #%d, error %d\n", 767 + errirq, err); 768 + goto eirq_err; 769 + } 770 + 771 + #else 772 + chanirq_res = errirq_res; 773 + #endif /* CONFIG_CPU_SH4 || CONFIG_ARCH_SHMOBILE */ 774 + 775 + if (chanirq_res->start == chanirq_res->end && 776 + !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) { 777 + /* Special case - all multiplexed */ 778 + for (; irq_cnt < pdata->channel_num; irq_cnt++) { 779 + if (irq_cnt < SH_DMAE_MAX_CHANNELS) { 780 + chan_irq[irq_cnt] = chanirq_res->start; 781 + chan_flag[irq_cnt] = IRQF_SHARED; 782 + } else { 783 + irq_cap = 1; 784 + break; 785 + } 786 + } 787 + } else { 788 + do { 789 + for (i = chanirq_res->start; i <= chanirq_res->end; i++) { 790 + if (irq_cnt >= SH_DMAE_MAX_CHANNELS) { 791 + irq_cap = 1; 792 + break; 793 + } 794 + 795 + if ((errirq_res->flags & IORESOURCE_BITS) == 796 + IORESOURCE_IRQ_SHAREABLE) 797 + chan_flag[irq_cnt] = IRQF_SHARED; 798 + else 799 + chan_flag[irq_cnt] = IRQF_DISABLED; 800 + dev_dbg(&pdev->dev, 801 + "Found IRQ %d for channel %d\n", 802 + i, irq_cnt); 803 + chan_irq[irq_cnt++] = i; 804 + } 805 + 806 + if (irq_cnt >= SH_DMAE_MAX_CHANNELS) 807 + break; 808 + 809 + chanirq_res = platform_get_resource(pdev, 810 + IORESOURCE_IRQ, ++irqres); 811 + } while (irq_cnt < pdata->channel_num && chanirq_res); 812 + } 813 + 814 + /* Create DMA Channel */ 815 + for (i = 0; i < irq_cnt; i++) { 816 + err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]); 817 + if (err) 818 + goto chan_probe_err; 819 + } 820 + 821 + if (irq_cap) 822 + dev_notice(&pdev->dev, "Attempting to register %d DMA " 823 + "channels when a maximum of %d are supported.\n", 824 + pdata->channel_num, SH_DMAE_MAX_CHANNELS); 825 + 826 + pm_runtime_put(&pdev->dev); 827 + 828 + err = dma_async_device_register(&shdev->shdma_dev.dma_dev); 829 + if (err < 0) 830 + goto edmadevreg; 831 + 832 + return err; 833 + 834 + edmadevreg: 835 + pm_runtime_get(&pdev->dev); 836 + 837 + chan_probe_err: 838 + sh_dmae_chan_remove(shdev); 839 + 840 + #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) 841 + free_irq(errirq, shdev); 842 + eirq_err: 843 + #endif 844 + rst_err: 845 + spin_lock_irq(&sh_dmae_lock); 846 + list_del_rcu(&shdev->node); 847 + spin_unlock_irq(&sh_dmae_lock); 848 + 849 + pm_runtime_put(&pdev->dev); 850 + pm_runtime_disable(&pdev->dev); 851 + 852 + platform_set_drvdata(pdev, NULL); 853 + shdma_cleanup(&shdev->shdma_dev); 854 + eshdma: 855 + if (dmars) 856 + iounmap(shdev->dmars); 857 + emapdmars: 858 + iounmap(shdev->chan_reg); 859 + synchronize_rcu(); 860 + emapchan: 861 + kfree(shdev); 862 + ealloc: 863 + if (dmars) 864 + release_mem_region(dmars->start, resource_size(dmars)); 865 + ermrdmars: 866 + release_mem_region(chan->start, resource_size(chan)); 867 + 868 + return err; 869 + } 870 + 871 + static int __devexit sh_dmae_remove(struct platform_device *pdev) 872 + { 873 + struct sh_dmae_device *shdev = platform_get_drvdata(pdev); 874 + struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev; 875 + struct resource *res; 876 + int errirq = platform_get_irq(pdev, 0); 877 + 878 + dma_async_device_unregister(dma_dev); 879 + 880 + if (errirq > 0) 881 + free_irq(errirq, shdev); 882 + 883 + spin_lock_irq(&sh_dmae_lock); 884 + list_del_rcu(&shdev->node); 885 + spin_unlock_irq(&sh_dmae_lock); 886 + 887 + pm_runtime_disable(&pdev->dev); 888 + 889 + sh_dmae_chan_remove(shdev); 890 + shdma_cleanup(&shdev->shdma_dev); 891 + 892 + if (shdev->dmars) 893 + iounmap(shdev->dmars); 894 + iounmap(shdev->chan_reg); 895 + 896 + platform_set_drvdata(pdev, NULL); 897 + 898 + synchronize_rcu(); 899 + kfree(shdev); 900 + 901 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 902 + if (res) 903 + release_mem_region(res->start, resource_size(res)); 904 + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 905 + if (res) 906 + release_mem_region(res->start, resource_size(res)); 907 + 908 + return 0; 909 + } 910 + 911 + static struct platform_driver sh_dmae_driver = { 912 + .driver = { 913 + .owner = THIS_MODULE, 914 + .pm = &sh_dmae_pm, 915 + .name = SH_DMAE_DRV_NAME, 916 + }, 917 + .remove = __devexit_p(sh_dmae_remove), 918 + .shutdown = sh_dmae_shutdown, 919 + }; 920 + 921 + static int __init sh_dmae_init(void) 922 + { 923 + /* Wire up NMI handling */ 924 + int err = register_die_notifier(&sh_dmae_nmi_notifier); 925 + if (err) 926 + return err; 927 + 928 + return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe); 929 + } 930 + module_init(sh_dmae_init); 931 + 932 + static void __exit sh_dmae_exit(void) 933 + { 934 + platform_driver_unregister(&sh_dmae_driver); 935 + 936 + unregister_die_notifier(&sh_dmae_nmi_notifier); 937 + } 938 + module_exit(sh_dmae_exit); 939 + 940 + MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>"); 941 + MODULE_DESCRIPTION("Renesas SH DMA Engine driver"); 942 + MODULE_LICENSE("GPL"); 943 + MODULE_ALIAS("platform:" SH_DMAE_DRV_NAME);
-1524
drivers/dma/shdma.c
··· 1 - /* 2 - * Renesas SuperH DMA Engine support 3 - * 4 - * base is drivers/dma/flsdma.c 5 - * 6 - * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com> 7 - * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved. 8 - * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. 9 - * 10 - * This is free software; you can redistribute it and/or modify 11 - * it under the terms of the GNU General Public License as published by 12 - * the Free Software Foundation; either version 2 of the License, or 13 - * (at your option) any later version. 14 - * 15 - * - DMA of SuperH does not have Hardware DMA chain mode. 16 - * - MAX DMA size is 16MB. 17 - * 18 - */ 19 - 20 - #include <linux/init.h> 21 - #include <linux/module.h> 22 - #include <linux/slab.h> 23 - #include <linux/interrupt.h> 24 - #include <linux/dmaengine.h> 25 - #include <linux/delay.h> 26 - #include <linux/platform_device.h> 27 - #include <linux/pm_runtime.h> 28 - #include <linux/sh_dma.h> 29 - #include <linux/notifier.h> 30 - #include <linux/kdebug.h> 31 - #include <linux/spinlock.h> 32 - #include <linux/rculist.h> 33 - 34 - #include "dmaengine.h" 35 - #include "shdma.h" 36 - 37 - /* DMA descriptor control */ 38 - enum sh_dmae_desc_status { 39 - DESC_IDLE, 40 - DESC_PREPARED, 41 - DESC_SUBMITTED, 42 - DESC_COMPLETED, /* completed, have to call callback */ 43 - DESC_WAITING, /* callback called, waiting for ack / re-submit */ 44 - }; 45 - 46 - #define NR_DESCS_PER_CHANNEL 32 47 - /* Default MEMCPY transfer size = 2^2 = 4 bytes */ 48 - #define LOG2_DEFAULT_XFER_SIZE 2 49 - 50 - /* 51 - * Used for write-side mutual exclusion for the global device list, 52 - * read-side synchronization by way of RCU, and per-controller data. 53 - */ 54 - static DEFINE_SPINLOCK(sh_dmae_lock); 55 - static LIST_HEAD(sh_dmae_devices); 56 - 57 - /* A bitmask with bits enough for enum sh_dmae_slave_chan_id */ 58 - static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SH_DMA_SLAVE_NUMBER)]; 59 - 60 - static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all); 61 - static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan); 62 - 63 - static void chclr_write(struct sh_dmae_chan *sh_dc, u32 data) 64 - { 65 - struct sh_dmae_device *shdev = to_sh_dev(sh_dc); 66 - 67 - __raw_writel(data, shdev->chan_reg + 68 - shdev->pdata->channel[sh_dc->id].chclr_offset); 69 - } 70 - 71 - static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg) 72 - { 73 - __raw_writel(data, sh_dc->base + reg / sizeof(u32)); 74 - } 75 - 76 - static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg) 77 - { 78 - return __raw_readl(sh_dc->base + reg / sizeof(u32)); 79 - } 80 - 81 - static u16 dmaor_read(struct sh_dmae_device *shdev) 82 - { 83 - u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32); 84 - 85 - if (shdev->pdata->dmaor_is_32bit) 86 - return __raw_readl(addr); 87 - else 88 - return __raw_readw(addr); 89 - } 90 - 91 - static void dmaor_write(struct sh_dmae_device *shdev, u16 data) 92 - { 93 - u32 __iomem *addr = shdev->chan_reg + DMAOR / sizeof(u32); 94 - 95 - if (shdev->pdata->dmaor_is_32bit) 96 - __raw_writel(data, addr); 97 - else 98 - __raw_writew(data, addr); 99 - } 100 - 101 - static void chcr_write(struct sh_dmae_chan *sh_dc, u32 data) 102 - { 103 - struct sh_dmae_device *shdev = to_sh_dev(sh_dc); 104 - 105 - __raw_writel(data, sh_dc->base + shdev->chcr_offset / sizeof(u32)); 106 - } 107 - 108 - static u32 chcr_read(struct sh_dmae_chan *sh_dc) 109 - { 110 - struct sh_dmae_device *shdev = to_sh_dev(sh_dc); 111 - 112 - return __raw_readl(sh_dc->base + shdev->chcr_offset / sizeof(u32)); 113 - } 114 - 115 - /* 116 - * Reset DMA controller 117 - * 118 - * SH7780 has two DMAOR register 119 - */ 120 - static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev) 121 - { 122 - unsigned short dmaor; 123 - unsigned long flags; 124 - 125 - spin_lock_irqsave(&sh_dmae_lock, flags); 126 - 127 - dmaor = dmaor_read(shdev); 128 - dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME)); 129 - 130 - spin_unlock_irqrestore(&sh_dmae_lock, flags); 131 - } 132 - 133 - static int sh_dmae_rst(struct sh_dmae_device *shdev) 134 - { 135 - unsigned short dmaor; 136 - unsigned long flags; 137 - 138 - spin_lock_irqsave(&sh_dmae_lock, flags); 139 - 140 - dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME); 141 - 142 - if (shdev->pdata->chclr_present) { 143 - int i; 144 - for (i = 0; i < shdev->pdata->channel_num; i++) { 145 - struct sh_dmae_chan *sh_chan = shdev->chan[i]; 146 - if (sh_chan) 147 - chclr_write(sh_chan, 0); 148 - } 149 - } 150 - 151 - dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init); 152 - 153 - dmaor = dmaor_read(shdev); 154 - 155 - spin_unlock_irqrestore(&sh_dmae_lock, flags); 156 - 157 - if (dmaor & (DMAOR_AE | DMAOR_NMIF)) { 158 - dev_warn(shdev->common.dev, "Can't initialize DMAOR.\n"); 159 - return -EIO; 160 - } 161 - if (shdev->pdata->dmaor_init & ~dmaor) 162 - dev_warn(shdev->common.dev, 163 - "DMAOR=0x%x hasn't latched the initial value 0x%x.\n", 164 - dmaor, shdev->pdata->dmaor_init); 165 - return 0; 166 - } 167 - 168 - static bool dmae_is_busy(struct sh_dmae_chan *sh_chan) 169 - { 170 - u32 chcr = chcr_read(sh_chan); 171 - 172 - if ((chcr & (CHCR_DE | CHCR_TE)) == CHCR_DE) 173 - return true; /* working */ 174 - 175 - return false; /* waiting */ 176 - } 177 - 178 - static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr) 179 - { 180 - struct sh_dmae_device *shdev = to_sh_dev(sh_chan); 181 - struct sh_dmae_pdata *pdata = shdev->pdata; 182 - int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) | 183 - ((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift); 184 - 185 - if (cnt >= pdata->ts_shift_num) 186 - cnt = 0; 187 - 188 - return pdata->ts_shift[cnt]; 189 - } 190 - 191 - static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size) 192 - { 193 - struct sh_dmae_device *shdev = to_sh_dev(sh_chan); 194 - struct sh_dmae_pdata *pdata = shdev->pdata; 195 - int i; 196 - 197 - for (i = 0; i < pdata->ts_shift_num; i++) 198 - if (pdata->ts_shift[i] == l2size) 199 - break; 200 - 201 - if (i == pdata->ts_shift_num) 202 - i = 0; 203 - 204 - return ((i << pdata->ts_low_shift) & pdata->ts_low_mask) | 205 - ((i << pdata->ts_high_shift) & pdata->ts_high_mask); 206 - } 207 - 208 - static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw) 209 - { 210 - sh_dmae_writel(sh_chan, hw->sar, SAR); 211 - sh_dmae_writel(sh_chan, hw->dar, DAR); 212 - sh_dmae_writel(sh_chan, hw->tcr >> sh_chan->xmit_shift, TCR); 213 - } 214 - 215 - static void dmae_start(struct sh_dmae_chan *sh_chan) 216 - { 217 - struct sh_dmae_device *shdev = to_sh_dev(sh_chan); 218 - u32 chcr = chcr_read(sh_chan); 219 - 220 - if (shdev->pdata->needs_tend_set) 221 - sh_dmae_writel(sh_chan, 0xFFFFFFFF, TEND); 222 - 223 - chcr |= CHCR_DE | shdev->chcr_ie_bit; 224 - chcr_write(sh_chan, chcr & ~CHCR_TE); 225 - } 226 - 227 - static void dmae_halt(struct sh_dmae_chan *sh_chan) 228 - { 229 - struct sh_dmae_device *shdev = to_sh_dev(sh_chan); 230 - u32 chcr = chcr_read(sh_chan); 231 - 232 - chcr &= ~(CHCR_DE | CHCR_TE | shdev->chcr_ie_bit); 233 - chcr_write(sh_chan, chcr); 234 - } 235 - 236 - static void dmae_init(struct sh_dmae_chan *sh_chan) 237 - { 238 - /* 239 - * Default configuration for dual address memory-memory transfer. 240 - * 0x400 represents auto-request. 241 - */ 242 - u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan, 243 - LOG2_DEFAULT_XFER_SIZE); 244 - sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr); 245 - chcr_write(sh_chan, chcr); 246 - } 247 - 248 - static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val) 249 - { 250 - /* If DMA is active, cannot set CHCR. TODO: remove this superfluous check */ 251 - if (dmae_is_busy(sh_chan)) 252 - return -EBUSY; 253 - 254 - sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val); 255 - chcr_write(sh_chan, val); 256 - 257 - return 0; 258 - } 259 - 260 - static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) 261 - { 262 - struct sh_dmae_device *shdev = to_sh_dev(sh_chan); 263 - struct sh_dmae_pdata *pdata = shdev->pdata; 264 - const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id]; 265 - u16 __iomem *addr = shdev->dmars; 266 - unsigned int shift = chan_pdata->dmars_bit; 267 - 268 - if (dmae_is_busy(sh_chan)) 269 - return -EBUSY; 270 - 271 - if (pdata->no_dmars) 272 - return 0; 273 - 274 - /* in the case of a missing DMARS resource use first memory window */ 275 - if (!addr) 276 - addr = (u16 __iomem *)shdev->chan_reg; 277 - addr += chan_pdata->dmars / sizeof(u16); 278 - 279 - __raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift), 280 - addr); 281 - 282 - return 0; 283 - } 284 - 285 - static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx) 286 - { 287 - struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c; 288 - struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan); 289 - struct sh_dmae_slave *param = tx->chan->private; 290 - dma_async_tx_callback callback = tx->callback; 291 - dma_cookie_t cookie; 292 - bool power_up; 293 - 294 - spin_lock_irq(&sh_chan->desc_lock); 295 - 296 - if (list_empty(&sh_chan->ld_queue)) 297 - power_up = true; 298 - else 299 - power_up = false; 300 - 301 - cookie = dma_cookie_assign(tx); 302 - 303 - /* Mark all chunks of this descriptor as submitted, move to the queue */ 304 - list_for_each_entry_safe(chunk, c, desc->node.prev, node) { 305 - /* 306 - * All chunks are on the global ld_free, so, we have to find 307 - * the end of the chain ourselves 308 - */ 309 - if (chunk != desc && (chunk->mark == DESC_IDLE || 310 - chunk->async_tx.cookie > 0 || 311 - chunk->async_tx.cookie == -EBUSY || 312 - &chunk->node == &sh_chan->ld_free)) 313 - break; 314 - chunk->mark = DESC_SUBMITTED; 315 - /* Callback goes to the last chunk */ 316 - chunk->async_tx.callback = NULL; 317 - chunk->cookie = cookie; 318 - list_move_tail(&chunk->node, &sh_chan->ld_queue); 319 - last = chunk; 320 - } 321 - 322 - last->async_tx.callback = callback; 323 - last->async_tx.callback_param = tx->callback_param; 324 - 325 - dev_dbg(sh_chan->dev, "submit #%d@%p on %d: %x[%d] -> %x\n", 326 - tx->cookie, &last->async_tx, sh_chan->id, 327 - desc->hw.sar, desc->hw.tcr, desc->hw.dar); 328 - 329 - if (power_up) { 330 - sh_chan->pm_state = DMAE_PM_BUSY; 331 - 332 - pm_runtime_get(sh_chan->dev); 333 - 334 - spin_unlock_irq(&sh_chan->desc_lock); 335 - 336 - pm_runtime_barrier(sh_chan->dev); 337 - 338 - spin_lock_irq(&sh_chan->desc_lock); 339 - 340 - /* Have we been reset, while waiting? */ 341 - if (sh_chan->pm_state != DMAE_PM_ESTABLISHED) { 342 - dev_dbg(sh_chan->dev, "Bring up channel %d\n", 343 - sh_chan->id); 344 - if (param) { 345 - const struct sh_dmae_slave_config *cfg = 346 - param->config; 347 - 348 - dmae_set_dmars(sh_chan, cfg->mid_rid); 349 - dmae_set_chcr(sh_chan, cfg->chcr); 350 - } else { 351 - dmae_init(sh_chan); 352 - } 353 - 354 - if (sh_chan->pm_state == DMAE_PM_PENDING) 355 - sh_chan_xfer_ld_queue(sh_chan); 356 - sh_chan->pm_state = DMAE_PM_ESTABLISHED; 357 - } 358 - } else { 359 - sh_chan->pm_state = DMAE_PM_PENDING; 360 - } 361 - 362 - spin_unlock_irq(&sh_chan->desc_lock); 363 - 364 - return cookie; 365 - } 366 - 367 - /* Called with desc_lock held */ 368 - static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan) 369 - { 370 - struct sh_desc *desc; 371 - 372 - list_for_each_entry(desc, &sh_chan->ld_free, node) 373 - if (desc->mark != DESC_PREPARED) { 374 - BUG_ON(desc->mark != DESC_IDLE); 375 - list_del(&desc->node); 376 - return desc; 377 - } 378 - 379 - return NULL; 380 - } 381 - 382 - static const struct sh_dmae_slave_config *sh_dmae_find_slave( 383 - struct sh_dmae_chan *sh_chan, struct sh_dmae_slave *param) 384 - { 385 - struct sh_dmae_device *shdev = to_sh_dev(sh_chan); 386 - struct sh_dmae_pdata *pdata = shdev->pdata; 387 - int i; 388 - 389 - if (param->slave_id >= SH_DMA_SLAVE_NUMBER) 390 - return NULL; 391 - 392 - for (i = 0; i < pdata->slave_num; i++) 393 - if (pdata->slave[i].slave_id == param->slave_id) 394 - return pdata->slave + i; 395 - 396 - return NULL; 397 - } 398 - 399 - static int sh_dmae_alloc_chan_resources(struct dma_chan *chan) 400 - { 401 - struct sh_dmae_chan *sh_chan = to_sh_chan(chan); 402 - struct sh_desc *desc; 403 - struct sh_dmae_slave *param = chan->private; 404 - int ret; 405 - 406 - /* 407 - * This relies on the guarantee from dmaengine that alloc_chan_resources 408 - * never runs concurrently with itself or free_chan_resources. 409 - */ 410 - if (param) { 411 - const struct sh_dmae_slave_config *cfg; 412 - 413 - cfg = sh_dmae_find_slave(sh_chan, param); 414 - if (!cfg) { 415 - ret = -EINVAL; 416 - goto efindslave; 417 - } 418 - 419 - if (test_and_set_bit(param->slave_id, sh_dmae_slave_used)) { 420 - ret = -EBUSY; 421 - goto etestused; 422 - } 423 - 424 - param->config = cfg; 425 - } 426 - 427 - while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) { 428 - desc = kzalloc(sizeof(struct sh_desc), GFP_KERNEL); 429 - if (!desc) 430 - break; 431 - dma_async_tx_descriptor_init(&desc->async_tx, 432 - &sh_chan->common); 433 - desc->async_tx.tx_submit = sh_dmae_tx_submit; 434 - desc->mark = DESC_IDLE; 435 - 436 - list_add(&desc->node, &sh_chan->ld_free); 437 - sh_chan->descs_allocated++; 438 - } 439 - 440 - if (!sh_chan->descs_allocated) { 441 - ret = -ENOMEM; 442 - goto edescalloc; 443 - } 444 - 445 - return sh_chan->descs_allocated; 446 - 447 - edescalloc: 448 - if (param) 449 - clear_bit(param->slave_id, sh_dmae_slave_used); 450 - etestused: 451 - efindslave: 452 - chan->private = NULL; 453 - return ret; 454 - } 455 - 456 - /* 457 - * sh_dma_free_chan_resources - Free all resources of the channel. 458 - */ 459 - static void sh_dmae_free_chan_resources(struct dma_chan *chan) 460 - { 461 - struct sh_dmae_chan *sh_chan = to_sh_chan(chan); 462 - struct sh_desc *desc, *_desc; 463 - LIST_HEAD(list); 464 - 465 - /* Protect against ISR */ 466 - spin_lock_irq(&sh_chan->desc_lock); 467 - dmae_halt(sh_chan); 468 - spin_unlock_irq(&sh_chan->desc_lock); 469 - 470 - /* Now no new interrupts will occur */ 471 - 472 - /* Prepared and not submitted descriptors can still be on the queue */ 473 - if (!list_empty(&sh_chan->ld_queue)) 474 - sh_dmae_chan_ld_cleanup(sh_chan, true); 475 - 476 - if (chan->private) { 477 - /* The caller is holding dma_list_mutex */ 478 - struct sh_dmae_slave *param = chan->private; 479 - clear_bit(param->slave_id, sh_dmae_slave_used); 480 - chan->private = NULL; 481 - } 482 - 483 - spin_lock_irq(&sh_chan->desc_lock); 484 - 485 - list_splice_init(&sh_chan->ld_free, &list); 486 - sh_chan->descs_allocated = 0; 487 - 488 - spin_unlock_irq(&sh_chan->desc_lock); 489 - 490 - list_for_each_entry_safe(desc, _desc, &list, node) 491 - kfree(desc); 492 - } 493 - 494 - /** 495 - * sh_dmae_add_desc - get, set up and return one transfer descriptor 496 - * @sh_chan: DMA channel 497 - * @flags: DMA transfer flags 498 - * @dest: destination DMA address, incremented when direction equals 499 - * DMA_DEV_TO_MEM 500 - * @src: source DMA address, incremented when direction equals 501 - * DMA_MEM_TO_DEV 502 - * @len: DMA transfer length 503 - * @first: if NULL, set to the current descriptor and cookie set to -EBUSY 504 - * @direction: needed for slave DMA to decide which address to keep constant, 505 - * equals DMA_MEM_TO_MEM for MEMCPY 506 - * Returns 0 or an error 507 - * Locks: called with desc_lock held 508 - */ 509 - static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan, 510 - unsigned long flags, dma_addr_t *dest, dma_addr_t *src, size_t *len, 511 - struct sh_desc **first, enum dma_transfer_direction direction) 512 - { 513 - struct sh_desc *new; 514 - size_t copy_size; 515 - 516 - if (!*len) 517 - return NULL; 518 - 519 - /* Allocate the link descriptor from the free list */ 520 - new = sh_dmae_get_desc(sh_chan); 521 - if (!new) { 522 - dev_err(sh_chan->dev, "No free link descriptor available\n"); 523 - return NULL; 524 - } 525 - 526 - copy_size = min(*len, (size_t)SH_DMA_TCR_MAX + 1); 527 - 528 - new->hw.sar = *src; 529 - new->hw.dar = *dest; 530 - new->hw.tcr = copy_size; 531 - 532 - if (!*first) { 533 - /* First desc */ 534 - new->async_tx.cookie = -EBUSY; 535 - *first = new; 536 - } else { 537 - /* Other desc - invisible to the user */ 538 - new->async_tx.cookie = -EINVAL; 539 - } 540 - 541 - dev_dbg(sh_chan->dev, 542 - "chaining (%u/%u)@%x -> %x with %p, cookie %d, shift %d\n", 543 - copy_size, *len, *src, *dest, &new->async_tx, 544 - new->async_tx.cookie, sh_chan->xmit_shift); 545 - 546 - new->mark = DESC_PREPARED; 547 - new->async_tx.flags = flags; 548 - new->direction = direction; 549 - 550 - *len -= copy_size; 551 - if (direction == DMA_MEM_TO_MEM || direction == DMA_MEM_TO_DEV) 552 - *src += copy_size; 553 - if (direction == DMA_MEM_TO_MEM || direction == DMA_DEV_TO_MEM) 554 - *dest += copy_size; 555 - 556 - return new; 557 - } 558 - 559 - /* 560 - * sh_dmae_prep_sg - prepare transfer descriptors from an SG list 561 - * 562 - * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also 563 - * converted to scatter-gather to guarantee consistent locking and a correct 564 - * list manipulation. For slave DMA direction carries the usual meaning, and, 565 - * logically, the SG list is RAM and the addr variable contains slave address, 566 - * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM 567 - * and the SG list contains only one element and points at the source buffer. 568 - */ 569 - static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_chan, 570 - struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr, 571 - enum dma_transfer_direction direction, unsigned long flags) 572 - { 573 - struct scatterlist *sg; 574 - struct sh_desc *first = NULL, *new = NULL /* compiler... */; 575 - LIST_HEAD(tx_list); 576 - int chunks = 0; 577 - unsigned long irq_flags; 578 - int i; 579 - 580 - if (!sg_len) 581 - return NULL; 582 - 583 - for_each_sg(sgl, sg, sg_len, i) 584 - chunks += (sg_dma_len(sg) + SH_DMA_TCR_MAX) / 585 - (SH_DMA_TCR_MAX + 1); 586 - 587 - /* Have to lock the whole loop to protect against concurrent release */ 588 - spin_lock_irqsave(&sh_chan->desc_lock, irq_flags); 589 - 590 - /* 591 - * Chaining: 592 - * first descriptor is what user is dealing with in all API calls, its 593 - * cookie is at first set to -EBUSY, at tx-submit to a positive 594 - * number 595 - * if more than one chunk is needed further chunks have cookie = -EINVAL 596 - * the last chunk, if not equal to the first, has cookie = -ENOSPC 597 - * all chunks are linked onto the tx_list head with their .node heads 598 - * only during this function, then they are immediately spliced 599 - * back onto the free list in form of a chain 600 - */ 601 - for_each_sg(sgl, sg, sg_len, i) { 602 - dma_addr_t sg_addr = sg_dma_address(sg); 603 - size_t len = sg_dma_len(sg); 604 - 605 - if (!len) 606 - goto err_get_desc; 607 - 608 - do { 609 - dev_dbg(sh_chan->dev, "Add SG #%d@%p[%d], dma %llx\n", 610 - i, sg, len, (unsigned long long)sg_addr); 611 - 612 - if (direction == DMA_DEV_TO_MEM) 613 - new = sh_dmae_add_desc(sh_chan, flags, 614 - &sg_addr, addr, &len, &first, 615 - direction); 616 - else 617 - new = sh_dmae_add_desc(sh_chan, flags, 618 - addr, &sg_addr, &len, &first, 619 - direction); 620 - if (!new) 621 - goto err_get_desc; 622 - 623 - new->chunks = chunks--; 624 - list_add_tail(&new->node, &tx_list); 625 - } while (len); 626 - } 627 - 628 - if (new != first) 629 - new->async_tx.cookie = -ENOSPC; 630 - 631 - /* Put them back on the free list, so, they don't get lost */ 632 - list_splice_tail(&tx_list, &sh_chan->ld_free); 633 - 634 - spin_unlock_irqrestore(&sh_chan->desc_lock, irq_flags); 635 - 636 - return &first->async_tx; 637 - 638 - err_get_desc: 639 - list_for_each_entry(new, &tx_list, node) 640 - new->mark = DESC_IDLE; 641 - list_splice(&tx_list, &sh_chan->ld_free); 642 - 643 - spin_unlock_irqrestore(&sh_chan->desc_lock, irq_flags); 644 - 645 - return NULL; 646 - } 647 - 648 - static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy( 649 - struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src, 650 - size_t len, unsigned long flags) 651 - { 652 - struct sh_dmae_chan *sh_chan; 653 - struct scatterlist sg; 654 - 655 - if (!chan || !len) 656 - return NULL; 657 - 658 - sh_chan = to_sh_chan(chan); 659 - 660 - sg_init_table(&sg, 1); 661 - sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_src)), len, 662 - offset_in_page(dma_src)); 663 - sg_dma_address(&sg) = dma_src; 664 - sg_dma_len(&sg) = len; 665 - 666 - return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM, 667 - flags); 668 - } 669 - 670 - static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg( 671 - struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, 672 - enum dma_transfer_direction direction, unsigned long flags, 673 - void *context) 674 - { 675 - struct sh_dmae_slave *param; 676 - struct sh_dmae_chan *sh_chan; 677 - dma_addr_t slave_addr; 678 - 679 - if (!chan) 680 - return NULL; 681 - 682 - sh_chan = to_sh_chan(chan); 683 - param = chan->private; 684 - 685 - /* Someone calling slave DMA on a public channel? */ 686 - if (!param || !sg_len) { 687 - dev_warn(sh_chan->dev, "%s: bad parameter: %p, %d, %d\n", 688 - __func__, param, sg_len, param ? param->slave_id : -1); 689 - return NULL; 690 - } 691 - 692 - slave_addr = param->config->addr; 693 - 694 - /* 695 - * if (param != NULL), this is a successfully requested slave channel, 696 - * therefore param->config != NULL too. 697 - */ 698 - return sh_dmae_prep_sg(sh_chan, sgl, sg_len, &slave_addr, 699 - direction, flags); 700 - } 701 - 702 - static int sh_dmae_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 703 - unsigned long arg) 704 - { 705 - struct sh_dmae_chan *sh_chan = to_sh_chan(chan); 706 - unsigned long flags; 707 - 708 - /* Only supports DMA_TERMINATE_ALL */ 709 - if (cmd != DMA_TERMINATE_ALL) 710 - return -ENXIO; 711 - 712 - if (!chan) 713 - return -EINVAL; 714 - 715 - spin_lock_irqsave(&sh_chan->desc_lock, flags); 716 - dmae_halt(sh_chan); 717 - 718 - if (!list_empty(&sh_chan->ld_queue)) { 719 - /* Record partial transfer */ 720 - struct sh_desc *desc = list_entry(sh_chan->ld_queue.next, 721 - struct sh_desc, node); 722 - desc->partial = (desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) << 723 - sh_chan->xmit_shift; 724 - } 725 - spin_unlock_irqrestore(&sh_chan->desc_lock, flags); 726 - 727 - sh_dmae_chan_ld_cleanup(sh_chan, true); 728 - 729 - return 0; 730 - } 731 - 732 - static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all) 733 - { 734 - struct sh_desc *desc, *_desc; 735 - /* Is the "exposed" head of a chain acked? */ 736 - bool head_acked = false; 737 - dma_cookie_t cookie = 0; 738 - dma_async_tx_callback callback = NULL; 739 - void *param = NULL; 740 - unsigned long flags; 741 - 742 - spin_lock_irqsave(&sh_chan->desc_lock, flags); 743 - list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) { 744 - struct dma_async_tx_descriptor *tx = &desc->async_tx; 745 - 746 - BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie); 747 - BUG_ON(desc->mark != DESC_SUBMITTED && 748 - desc->mark != DESC_COMPLETED && 749 - desc->mark != DESC_WAITING); 750 - 751 - /* 752 - * queue is ordered, and we use this loop to (1) clean up all 753 - * completed descriptors, and to (2) update descriptor flags of 754 - * any chunks in a (partially) completed chain 755 - */ 756 - if (!all && desc->mark == DESC_SUBMITTED && 757 - desc->cookie != cookie) 758 - break; 759 - 760 - if (tx->cookie > 0) 761 - cookie = tx->cookie; 762 - 763 - if (desc->mark == DESC_COMPLETED && desc->chunks == 1) { 764 - if (sh_chan->common.completed_cookie != desc->cookie - 1) 765 - dev_dbg(sh_chan->dev, 766 - "Completing cookie %d, expected %d\n", 767 - desc->cookie, 768 - sh_chan->common.completed_cookie + 1); 769 - sh_chan->common.completed_cookie = desc->cookie; 770 - } 771 - 772 - /* Call callback on the last chunk */ 773 - if (desc->mark == DESC_COMPLETED && tx->callback) { 774 - desc->mark = DESC_WAITING; 775 - callback = tx->callback; 776 - param = tx->callback_param; 777 - dev_dbg(sh_chan->dev, "descriptor #%d@%p on %d callback\n", 778 - tx->cookie, tx, sh_chan->id); 779 - BUG_ON(desc->chunks != 1); 780 - break; 781 - } 782 - 783 - if (tx->cookie > 0 || tx->cookie == -EBUSY) { 784 - if (desc->mark == DESC_COMPLETED) { 785 - BUG_ON(tx->cookie < 0); 786 - desc->mark = DESC_WAITING; 787 - } 788 - head_acked = async_tx_test_ack(tx); 789 - } else { 790 - switch (desc->mark) { 791 - case DESC_COMPLETED: 792 - desc->mark = DESC_WAITING; 793 - /* Fall through */ 794 - case DESC_WAITING: 795 - if (head_acked) 796 - async_tx_ack(&desc->async_tx); 797 - } 798 - } 799 - 800 - dev_dbg(sh_chan->dev, "descriptor %p #%d completed.\n", 801 - tx, tx->cookie); 802 - 803 - if (((desc->mark == DESC_COMPLETED || 804 - desc->mark == DESC_WAITING) && 805 - async_tx_test_ack(&desc->async_tx)) || all) { 806 - /* Remove from ld_queue list */ 807 - desc->mark = DESC_IDLE; 808 - 809 - list_move(&desc->node, &sh_chan->ld_free); 810 - 811 - if (list_empty(&sh_chan->ld_queue)) { 812 - dev_dbg(sh_chan->dev, "Bring down channel %d\n", sh_chan->id); 813 - pm_runtime_put(sh_chan->dev); 814 - } 815 - } 816 - } 817 - 818 - if (all && !callback) 819 - /* 820 - * Terminating and the loop completed normally: forgive 821 - * uncompleted cookies 822 - */ 823 - sh_chan->common.completed_cookie = sh_chan->common.cookie; 824 - 825 - spin_unlock_irqrestore(&sh_chan->desc_lock, flags); 826 - 827 - if (callback) 828 - callback(param); 829 - 830 - return callback; 831 - } 832 - 833 - /* 834 - * sh_chan_ld_cleanup - Clean up link descriptors 835 - * 836 - * This function cleans up the ld_queue of DMA channel. 837 - */ 838 - static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all) 839 - { 840 - while (__ld_cleanup(sh_chan, all)) 841 - ; 842 - } 843 - 844 - /* Called under spin_lock_irq(&sh_chan->desc_lock) */ 845 - static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan) 846 - { 847 - struct sh_desc *desc; 848 - 849 - /* DMA work check */ 850 - if (dmae_is_busy(sh_chan)) 851 - return; 852 - 853 - /* Find the first not transferred descriptor */ 854 - list_for_each_entry(desc, &sh_chan->ld_queue, node) 855 - if (desc->mark == DESC_SUBMITTED) { 856 - dev_dbg(sh_chan->dev, "Queue #%d to %d: %u@%x -> %x\n", 857 - desc->async_tx.cookie, sh_chan->id, 858 - desc->hw.tcr, desc->hw.sar, desc->hw.dar); 859 - /* Get the ld start address from ld_queue */ 860 - dmae_set_reg(sh_chan, &desc->hw); 861 - dmae_start(sh_chan); 862 - break; 863 - } 864 - } 865 - 866 - static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan) 867 - { 868 - struct sh_dmae_chan *sh_chan = to_sh_chan(chan); 869 - 870 - spin_lock_irq(&sh_chan->desc_lock); 871 - if (sh_chan->pm_state == DMAE_PM_ESTABLISHED) 872 - sh_chan_xfer_ld_queue(sh_chan); 873 - else 874 - sh_chan->pm_state = DMAE_PM_PENDING; 875 - spin_unlock_irq(&sh_chan->desc_lock); 876 - } 877 - 878 - static enum dma_status sh_dmae_tx_status(struct dma_chan *chan, 879 - dma_cookie_t cookie, 880 - struct dma_tx_state *txstate) 881 - { 882 - struct sh_dmae_chan *sh_chan = to_sh_chan(chan); 883 - enum dma_status status; 884 - unsigned long flags; 885 - 886 - sh_dmae_chan_ld_cleanup(sh_chan, false); 887 - 888 - spin_lock_irqsave(&sh_chan->desc_lock, flags); 889 - 890 - status = dma_cookie_status(chan, cookie, txstate); 891 - 892 - /* 893 - * If we don't find cookie on the queue, it has been aborted and we have 894 - * to report error 895 - */ 896 - if (status != DMA_SUCCESS) { 897 - struct sh_desc *desc; 898 - status = DMA_ERROR; 899 - list_for_each_entry(desc, &sh_chan->ld_queue, node) 900 - if (desc->cookie == cookie) { 901 - status = DMA_IN_PROGRESS; 902 - break; 903 - } 904 - } 905 - 906 - spin_unlock_irqrestore(&sh_chan->desc_lock, flags); 907 - 908 - return status; 909 - } 910 - 911 - static irqreturn_t sh_dmae_interrupt(int irq, void *data) 912 - { 913 - irqreturn_t ret = IRQ_NONE; 914 - struct sh_dmae_chan *sh_chan = data; 915 - u32 chcr; 916 - 917 - spin_lock(&sh_chan->desc_lock); 918 - 919 - chcr = chcr_read(sh_chan); 920 - 921 - if (chcr & CHCR_TE) { 922 - /* DMA stop */ 923 - dmae_halt(sh_chan); 924 - 925 - ret = IRQ_HANDLED; 926 - tasklet_schedule(&sh_chan->tasklet); 927 - } 928 - 929 - spin_unlock(&sh_chan->desc_lock); 930 - 931 - return ret; 932 - } 933 - 934 - /* Called from error IRQ or NMI */ 935 - static bool sh_dmae_reset(struct sh_dmae_device *shdev) 936 - { 937 - unsigned int handled = 0; 938 - int i; 939 - 940 - /* halt the dma controller */ 941 - sh_dmae_ctl_stop(shdev); 942 - 943 - /* We cannot detect, which channel caused the error, have to reset all */ 944 - for (i = 0; i < SH_DMAC_MAX_CHANNELS; i++) { 945 - struct sh_dmae_chan *sh_chan = shdev->chan[i]; 946 - struct sh_desc *desc; 947 - LIST_HEAD(dl); 948 - 949 - if (!sh_chan) 950 - continue; 951 - 952 - spin_lock(&sh_chan->desc_lock); 953 - 954 - /* Stop the channel */ 955 - dmae_halt(sh_chan); 956 - 957 - list_splice_init(&sh_chan->ld_queue, &dl); 958 - 959 - if (!list_empty(&dl)) { 960 - dev_dbg(sh_chan->dev, "Bring down channel %d\n", sh_chan->id); 961 - pm_runtime_put(sh_chan->dev); 962 - } 963 - sh_chan->pm_state = DMAE_PM_ESTABLISHED; 964 - 965 - spin_unlock(&sh_chan->desc_lock); 966 - 967 - /* Complete all */ 968 - list_for_each_entry(desc, &dl, node) { 969 - struct dma_async_tx_descriptor *tx = &desc->async_tx; 970 - desc->mark = DESC_IDLE; 971 - if (tx->callback) 972 - tx->callback(tx->callback_param); 973 - } 974 - 975 - spin_lock(&sh_chan->desc_lock); 976 - list_splice(&dl, &sh_chan->ld_free); 977 - spin_unlock(&sh_chan->desc_lock); 978 - 979 - handled++; 980 - } 981 - 982 - sh_dmae_rst(shdev); 983 - 984 - return !!handled; 985 - } 986 - 987 - static irqreturn_t sh_dmae_err(int irq, void *data) 988 - { 989 - struct sh_dmae_device *shdev = data; 990 - 991 - if (!(dmaor_read(shdev) & DMAOR_AE)) 992 - return IRQ_NONE; 993 - 994 - sh_dmae_reset(data); 995 - return IRQ_HANDLED; 996 - } 997 - 998 - static void dmae_do_tasklet(unsigned long data) 999 - { 1000 - struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data; 1001 - struct sh_desc *desc; 1002 - u32 sar_buf = sh_dmae_readl(sh_chan, SAR); 1003 - u32 dar_buf = sh_dmae_readl(sh_chan, DAR); 1004 - 1005 - spin_lock_irq(&sh_chan->desc_lock); 1006 - list_for_each_entry(desc, &sh_chan->ld_queue, node) { 1007 - if (desc->mark == DESC_SUBMITTED && 1008 - ((desc->direction == DMA_DEV_TO_MEM && 1009 - (desc->hw.dar + desc->hw.tcr) == dar_buf) || 1010 - (desc->hw.sar + desc->hw.tcr) == sar_buf)) { 1011 - dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n", 1012 - desc->async_tx.cookie, &desc->async_tx, 1013 - desc->hw.dar); 1014 - desc->mark = DESC_COMPLETED; 1015 - break; 1016 - } 1017 - } 1018 - /* Next desc */ 1019 - sh_chan_xfer_ld_queue(sh_chan); 1020 - spin_unlock_irq(&sh_chan->desc_lock); 1021 - 1022 - sh_dmae_chan_ld_cleanup(sh_chan, false); 1023 - } 1024 - 1025 - static bool sh_dmae_nmi_notify(struct sh_dmae_device *shdev) 1026 - { 1027 - /* Fast path out if NMIF is not asserted for this controller */ 1028 - if ((dmaor_read(shdev) & DMAOR_NMIF) == 0) 1029 - return false; 1030 - 1031 - return sh_dmae_reset(shdev); 1032 - } 1033 - 1034 - static int sh_dmae_nmi_handler(struct notifier_block *self, 1035 - unsigned long cmd, void *data) 1036 - { 1037 - struct sh_dmae_device *shdev; 1038 - int ret = NOTIFY_DONE; 1039 - bool triggered; 1040 - 1041 - /* 1042 - * Only concern ourselves with NMI events. 1043 - * 1044 - * Normally we would check the die chain value, but as this needs 1045 - * to be architecture independent, check for NMI context instead. 1046 - */ 1047 - if (!in_nmi()) 1048 - return NOTIFY_DONE; 1049 - 1050 - rcu_read_lock(); 1051 - list_for_each_entry_rcu(shdev, &sh_dmae_devices, node) { 1052 - /* 1053 - * Only stop if one of the controllers has NMIF asserted, 1054 - * we do not want to interfere with regular address error 1055 - * handling or NMI events that don't concern the DMACs. 1056 - */ 1057 - triggered = sh_dmae_nmi_notify(shdev); 1058 - if (triggered == true) 1059 - ret = NOTIFY_OK; 1060 - } 1061 - rcu_read_unlock(); 1062 - 1063 - return ret; 1064 - } 1065 - 1066 - static struct notifier_block sh_dmae_nmi_notifier __read_mostly = { 1067 - .notifier_call = sh_dmae_nmi_handler, 1068 - 1069 - /* Run before NMI debug handler and KGDB */ 1070 - .priority = 1, 1071 - }; 1072 - 1073 - static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id, 1074 - int irq, unsigned long flags) 1075 - { 1076 - int err; 1077 - const struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id]; 1078 - struct platform_device *pdev = to_platform_device(shdev->common.dev); 1079 - struct sh_dmae_chan *new_sh_chan; 1080 - 1081 - /* alloc channel */ 1082 - new_sh_chan = kzalloc(sizeof(struct sh_dmae_chan), GFP_KERNEL); 1083 - if (!new_sh_chan) { 1084 - dev_err(shdev->common.dev, 1085 - "No free memory for allocating dma channels!\n"); 1086 - return -ENOMEM; 1087 - } 1088 - 1089 - new_sh_chan->pm_state = DMAE_PM_ESTABLISHED; 1090 - 1091 - /* reference struct dma_device */ 1092 - new_sh_chan->common.device = &shdev->common; 1093 - dma_cookie_init(&new_sh_chan->common); 1094 - 1095 - new_sh_chan->dev = shdev->common.dev; 1096 - new_sh_chan->id = id; 1097 - new_sh_chan->irq = irq; 1098 - new_sh_chan->base = shdev->chan_reg + chan_pdata->offset / sizeof(u32); 1099 - 1100 - /* Init DMA tasklet */ 1101 - tasklet_init(&new_sh_chan->tasklet, dmae_do_tasklet, 1102 - (unsigned long)new_sh_chan); 1103 - 1104 - spin_lock_init(&new_sh_chan->desc_lock); 1105 - 1106 - /* Init descripter manage list */ 1107 - INIT_LIST_HEAD(&new_sh_chan->ld_queue); 1108 - INIT_LIST_HEAD(&new_sh_chan->ld_free); 1109 - 1110 - /* Add the channel to DMA device channel list */ 1111 - list_add_tail(&new_sh_chan->common.device_node, 1112 - &shdev->common.channels); 1113 - shdev->common.chancnt++; 1114 - 1115 - if (pdev->id >= 0) 1116 - snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id), 1117 - "sh-dmae%d.%d", pdev->id, new_sh_chan->id); 1118 - else 1119 - snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id), 1120 - "sh-dma%d", new_sh_chan->id); 1121 - 1122 - /* set up channel irq */ 1123 - err = request_irq(irq, &sh_dmae_interrupt, flags, 1124 - new_sh_chan->dev_id, new_sh_chan); 1125 - if (err) { 1126 - dev_err(shdev->common.dev, "DMA channel %d request_irq error " 1127 - "with return %d\n", id, err); 1128 - goto err_no_irq; 1129 - } 1130 - 1131 - shdev->chan[id] = new_sh_chan; 1132 - return 0; 1133 - 1134 - err_no_irq: 1135 - /* remove from dmaengine device node */ 1136 - list_del(&new_sh_chan->common.device_node); 1137 - kfree(new_sh_chan); 1138 - return err; 1139 - } 1140 - 1141 - static void sh_dmae_chan_remove(struct sh_dmae_device *shdev) 1142 - { 1143 - int i; 1144 - 1145 - for (i = shdev->common.chancnt - 1 ; i >= 0 ; i--) { 1146 - if (shdev->chan[i]) { 1147 - struct sh_dmae_chan *sh_chan = shdev->chan[i]; 1148 - 1149 - free_irq(sh_chan->irq, sh_chan); 1150 - 1151 - list_del(&sh_chan->common.device_node); 1152 - kfree(sh_chan); 1153 - shdev->chan[i] = NULL; 1154 - } 1155 - } 1156 - shdev->common.chancnt = 0; 1157 - } 1158 - 1159 - static int __init sh_dmae_probe(struct platform_device *pdev) 1160 - { 1161 - struct sh_dmae_pdata *pdata = pdev->dev.platform_data; 1162 - unsigned long irqflags = IRQF_DISABLED, 1163 - chan_flag[SH_DMAC_MAX_CHANNELS] = {}; 1164 - int errirq, chan_irq[SH_DMAC_MAX_CHANNELS]; 1165 - int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0; 1166 - struct sh_dmae_device *shdev; 1167 - struct resource *chan, *dmars, *errirq_res, *chanirq_res; 1168 - 1169 - /* get platform data */ 1170 - if (!pdata || !pdata->channel_num) 1171 - return -ENODEV; 1172 - 1173 - chan = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1174 - /* DMARS area is optional */ 1175 - dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1); 1176 - /* 1177 - * IRQ resources: 1178 - * 1. there always must be at least one IRQ IO-resource. On SH4 it is 1179 - * the error IRQ, in which case it is the only IRQ in this resource: 1180 - * start == end. If it is the only IRQ resource, all channels also 1181 - * use the same IRQ. 1182 - * 2. DMA channel IRQ resources can be specified one per resource or in 1183 - * ranges (start != end) 1184 - * 3. iff all events (channels and, optionally, error) on this 1185 - * controller use the same IRQ, only one IRQ resource can be 1186 - * specified, otherwise there must be one IRQ per channel, even if 1187 - * some of them are equal 1188 - * 4. if all IRQs on this controller are equal or if some specific IRQs 1189 - * specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be 1190 - * requested with the IRQF_SHARED flag 1191 - */ 1192 - errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 1193 - if (!chan || !errirq_res) 1194 - return -ENODEV; 1195 - 1196 - if (!request_mem_region(chan->start, resource_size(chan), pdev->name)) { 1197 - dev_err(&pdev->dev, "DMAC register region already claimed\n"); 1198 - return -EBUSY; 1199 - } 1200 - 1201 - if (dmars && !request_mem_region(dmars->start, resource_size(dmars), pdev->name)) { 1202 - dev_err(&pdev->dev, "DMAC DMARS region already claimed\n"); 1203 - err = -EBUSY; 1204 - goto ermrdmars; 1205 - } 1206 - 1207 - err = -ENOMEM; 1208 - shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL); 1209 - if (!shdev) { 1210 - dev_err(&pdev->dev, "Not enough memory\n"); 1211 - goto ealloc; 1212 - } 1213 - 1214 - shdev->chan_reg = ioremap(chan->start, resource_size(chan)); 1215 - if (!shdev->chan_reg) 1216 - goto emapchan; 1217 - if (dmars) { 1218 - shdev->dmars = ioremap(dmars->start, resource_size(dmars)); 1219 - if (!shdev->dmars) 1220 - goto emapdmars; 1221 - } 1222 - 1223 - /* platform data */ 1224 - shdev->pdata = pdata; 1225 - 1226 - if (pdata->chcr_offset) 1227 - shdev->chcr_offset = pdata->chcr_offset; 1228 - else 1229 - shdev->chcr_offset = CHCR; 1230 - 1231 - if (pdata->chcr_ie_bit) 1232 - shdev->chcr_ie_bit = pdata->chcr_ie_bit; 1233 - else 1234 - shdev->chcr_ie_bit = CHCR_IE; 1235 - 1236 - platform_set_drvdata(pdev, shdev); 1237 - 1238 - shdev->common.dev = &pdev->dev; 1239 - 1240 - pm_runtime_enable(&pdev->dev); 1241 - pm_runtime_get_sync(&pdev->dev); 1242 - 1243 - spin_lock_irq(&sh_dmae_lock); 1244 - list_add_tail_rcu(&shdev->node, &sh_dmae_devices); 1245 - spin_unlock_irq(&sh_dmae_lock); 1246 - 1247 - /* reset dma controller - only needed as a test */ 1248 - err = sh_dmae_rst(shdev); 1249 - if (err) 1250 - goto rst_err; 1251 - 1252 - INIT_LIST_HEAD(&shdev->common.channels); 1253 - 1254 - if (!pdata->slave_only) 1255 - dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask); 1256 - if (pdata->slave && pdata->slave_num) 1257 - dma_cap_set(DMA_SLAVE, shdev->common.cap_mask); 1258 - 1259 - shdev->common.device_alloc_chan_resources 1260 - = sh_dmae_alloc_chan_resources; 1261 - shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources; 1262 - shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy; 1263 - shdev->common.device_tx_status = sh_dmae_tx_status; 1264 - shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending; 1265 - 1266 - /* Compulsory for DMA_SLAVE fields */ 1267 - shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg; 1268 - shdev->common.device_control = sh_dmae_control; 1269 - 1270 - /* Default transfer size of 32 bytes requires 32-byte alignment */ 1271 - shdev->common.copy_align = LOG2_DEFAULT_XFER_SIZE; 1272 - 1273 - #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) 1274 - chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1); 1275 - 1276 - if (!chanirq_res) 1277 - chanirq_res = errirq_res; 1278 - else 1279 - irqres++; 1280 - 1281 - if (chanirq_res == errirq_res || 1282 - (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE) 1283 - irqflags = IRQF_SHARED; 1284 - 1285 - errirq = errirq_res->start; 1286 - 1287 - err = request_irq(errirq, sh_dmae_err, irqflags, 1288 - "DMAC Address Error", shdev); 1289 - if (err) { 1290 - dev_err(&pdev->dev, 1291 - "DMA failed requesting irq #%d, error %d\n", 1292 - errirq, err); 1293 - goto eirq_err; 1294 - } 1295 - 1296 - #else 1297 - chanirq_res = errirq_res; 1298 - #endif /* CONFIG_CPU_SH4 || CONFIG_ARCH_SHMOBILE */ 1299 - 1300 - if (chanirq_res->start == chanirq_res->end && 1301 - !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) { 1302 - /* Special case - all multiplexed */ 1303 - for (; irq_cnt < pdata->channel_num; irq_cnt++) { 1304 - if (irq_cnt < SH_DMAC_MAX_CHANNELS) { 1305 - chan_irq[irq_cnt] = chanirq_res->start; 1306 - chan_flag[irq_cnt] = IRQF_SHARED; 1307 - } else { 1308 - irq_cap = 1; 1309 - break; 1310 - } 1311 - } 1312 - } else { 1313 - do { 1314 - for (i = chanirq_res->start; i <= chanirq_res->end; i++) { 1315 - if (irq_cnt >= SH_DMAC_MAX_CHANNELS) { 1316 - irq_cap = 1; 1317 - break; 1318 - } 1319 - 1320 - if ((errirq_res->flags & IORESOURCE_BITS) == 1321 - IORESOURCE_IRQ_SHAREABLE) 1322 - chan_flag[irq_cnt] = IRQF_SHARED; 1323 - else 1324 - chan_flag[irq_cnt] = IRQF_DISABLED; 1325 - dev_dbg(&pdev->dev, 1326 - "Found IRQ %d for channel %d\n", 1327 - i, irq_cnt); 1328 - chan_irq[irq_cnt++] = i; 1329 - } 1330 - 1331 - if (irq_cnt >= SH_DMAC_MAX_CHANNELS) 1332 - break; 1333 - 1334 - chanirq_res = platform_get_resource(pdev, 1335 - IORESOURCE_IRQ, ++irqres); 1336 - } while (irq_cnt < pdata->channel_num && chanirq_res); 1337 - } 1338 - 1339 - /* Create DMA Channel */ 1340 - for (i = 0; i < irq_cnt; i++) { 1341 - err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]); 1342 - if (err) 1343 - goto chan_probe_err; 1344 - } 1345 - 1346 - if (irq_cap) 1347 - dev_notice(&pdev->dev, "Attempting to register %d DMA " 1348 - "channels when a maximum of %d are supported.\n", 1349 - pdata->channel_num, SH_DMAC_MAX_CHANNELS); 1350 - 1351 - pm_runtime_put(&pdev->dev); 1352 - 1353 - dma_async_device_register(&shdev->common); 1354 - 1355 - return err; 1356 - 1357 - chan_probe_err: 1358 - sh_dmae_chan_remove(shdev); 1359 - 1360 - #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) 1361 - free_irq(errirq, shdev); 1362 - eirq_err: 1363 - #endif 1364 - rst_err: 1365 - spin_lock_irq(&sh_dmae_lock); 1366 - list_del_rcu(&shdev->node); 1367 - spin_unlock_irq(&sh_dmae_lock); 1368 - 1369 - pm_runtime_put(&pdev->dev); 1370 - pm_runtime_disable(&pdev->dev); 1371 - 1372 - if (dmars) 1373 - iounmap(shdev->dmars); 1374 - 1375 - platform_set_drvdata(pdev, NULL); 1376 - emapdmars: 1377 - iounmap(shdev->chan_reg); 1378 - synchronize_rcu(); 1379 - emapchan: 1380 - kfree(shdev); 1381 - ealloc: 1382 - if (dmars) 1383 - release_mem_region(dmars->start, resource_size(dmars)); 1384 - ermrdmars: 1385 - release_mem_region(chan->start, resource_size(chan)); 1386 - 1387 - return err; 1388 - } 1389 - 1390 - static int __exit sh_dmae_remove(struct platform_device *pdev) 1391 - { 1392 - struct sh_dmae_device *shdev = platform_get_drvdata(pdev); 1393 - struct resource *res; 1394 - int errirq = platform_get_irq(pdev, 0); 1395 - 1396 - dma_async_device_unregister(&shdev->common); 1397 - 1398 - if (errirq > 0) 1399 - free_irq(errirq, shdev); 1400 - 1401 - spin_lock_irq(&sh_dmae_lock); 1402 - list_del_rcu(&shdev->node); 1403 - spin_unlock_irq(&sh_dmae_lock); 1404 - 1405 - /* channel data remove */ 1406 - sh_dmae_chan_remove(shdev); 1407 - 1408 - pm_runtime_disable(&pdev->dev); 1409 - 1410 - if (shdev->dmars) 1411 - iounmap(shdev->dmars); 1412 - iounmap(shdev->chan_reg); 1413 - 1414 - platform_set_drvdata(pdev, NULL); 1415 - 1416 - synchronize_rcu(); 1417 - kfree(shdev); 1418 - 1419 - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1420 - if (res) 1421 - release_mem_region(res->start, resource_size(res)); 1422 - res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 1423 - if (res) 1424 - release_mem_region(res->start, resource_size(res)); 1425 - 1426 - return 0; 1427 - } 1428 - 1429 - static void sh_dmae_shutdown(struct platform_device *pdev) 1430 - { 1431 - struct sh_dmae_device *shdev = platform_get_drvdata(pdev); 1432 - sh_dmae_ctl_stop(shdev); 1433 - } 1434 - 1435 - static int sh_dmae_runtime_suspend(struct device *dev) 1436 - { 1437 - return 0; 1438 - } 1439 - 1440 - static int sh_dmae_runtime_resume(struct device *dev) 1441 - { 1442 - struct sh_dmae_device *shdev = dev_get_drvdata(dev); 1443 - 1444 - return sh_dmae_rst(shdev); 1445 - } 1446 - 1447 - #ifdef CONFIG_PM 1448 - static int sh_dmae_suspend(struct device *dev) 1449 - { 1450 - return 0; 1451 - } 1452 - 1453 - static int sh_dmae_resume(struct device *dev) 1454 - { 1455 - struct sh_dmae_device *shdev = dev_get_drvdata(dev); 1456 - int i, ret; 1457 - 1458 - ret = sh_dmae_rst(shdev); 1459 - if (ret < 0) 1460 - dev_err(dev, "Failed to reset!\n"); 1461 - 1462 - for (i = 0; i < shdev->pdata->channel_num; i++) { 1463 - struct sh_dmae_chan *sh_chan = shdev->chan[i]; 1464 - struct sh_dmae_slave *param = sh_chan->common.private; 1465 - 1466 - if (!sh_chan->descs_allocated) 1467 - continue; 1468 - 1469 - if (param) { 1470 - const struct sh_dmae_slave_config *cfg = param->config; 1471 - dmae_set_dmars(sh_chan, cfg->mid_rid); 1472 - dmae_set_chcr(sh_chan, cfg->chcr); 1473 - } else { 1474 - dmae_init(sh_chan); 1475 - } 1476 - } 1477 - 1478 - return 0; 1479 - } 1480 - #else 1481 - #define sh_dmae_suspend NULL 1482 - #define sh_dmae_resume NULL 1483 - #endif 1484 - 1485 - const struct dev_pm_ops sh_dmae_pm = { 1486 - .suspend = sh_dmae_suspend, 1487 - .resume = sh_dmae_resume, 1488 - .runtime_suspend = sh_dmae_runtime_suspend, 1489 - .runtime_resume = sh_dmae_runtime_resume, 1490 - }; 1491 - 1492 - static struct platform_driver sh_dmae_driver = { 1493 - .remove = __exit_p(sh_dmae_remove), 1494 - .shutdown = sh_dmae_shutdown, 1495 - .driver = { 1496 - .owner = THIS_MODULE, 1497 - .name = "sh-dma-engine", 1498 - .pm = &sh_dmae_pm, 1499 - }, 1500 - }; 1501 - 1502 - static int __init sh_dmae_init(void) 1503 - { 1504 - /* Wire up NMI handling */ 1505 - int err = register_die_notifier(&sh_dmae_nmi_notifier); 1506 - if (err) 1507 - return err; 1508 - 1509 - return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe); 1510 - } 1511 - module_init(sh_dmae_init); 1512 - 1513 - static void __exit sh_dmae_exit(void) 1514 - { 1515 - platform_driver_unregister(&sh_dmae_driver); 1516 - 1517 - unregister_die_notifier(&sh_dmae_nmi_notifier); 1518 - } 1519 - module_exit(sh_dmae_exit); 1520 - 1521 - MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>"); 1522 - MODULE_DESCRIPTION("Renesas SH DMA Engine driver"); 1523 - MODULE_LICENSE("GPL"); 1524 - MODULE_ALIAS("platform:sh-dma-engine");
+22 -24
drivers/dma/shdma.h drivers/dma/sh/shdma.h
··· 13 13 #ifndef __DMA_SHDMA_H 14 14 #define __DMA_SHDMA_H 15 15 16 + #include <linux/sh_dma.h> 17 + #include <linux/shdma-base.h> 16 18 #include <linux/dmaengine.h> 17 19 #include <linux/interrupt.h> 18 20 #include <linux/list.h> 19 21 20 - #define SH_DMAC_MAX_CHANNELS 20 21 - #define SH_DMA_SLAVE_NUMBER 256 22 - #define SH_DMA_TCR_MAX 0x00FFFFFF /* 16MB */ 22 + #define SH_DMAE_MAX_CHANNELS 20 23 + #define SH_DMAE_TCR_MAX 0x00FFFFFF /* 16MB */ 23 24 24 25 struct device; 25 26 26 - enum dmae_pm_state { 27 - DMAE_PM_ESTABLISHED, 28 - DMAE_PM_BUSY, 29 - DMAE_PM_PENDING, 30 - }; 31 - 32 27 struct sh_dmae_chan { 33 - spinlock_t desc_lock; /* Descriptor operation lock */ 34 - struct list_head ld_queue; /* Link descriptors queue */ 35 - struct list_head ld_free; /* Link descriptors free */ 36 - struct dma_chan common; /* DMA common channel */ 37 - struct device *dev; /* Channel device */ 38 - struct tasklet_struct tasklet; /* Tasklet */ 39 - int descs_allocated; /* desc count */ 28 + struct shdma_chan shdma_chan; 29 + const struct sh_dmae_slave_config *config; /* Slave DMA configuration */ 40 30 int xmit_shift; /* log_2(bytes_per_xfer) */ 41 - int irq; 42 - int id; /* Raw id of this channel */ 43 31 u32 __iomem *base; 44 32 char dev_id[16]; /* unique name per DMAC of channel */ 45 33 int pm_error; 46 - enum dmae_pm_state pm_state; 47 34 }; 48 35 49 36 struct sh_dmae_device { 50 - struct dma_device common; 51 - struct sh_dmae_chan *chan[SH_DMAC_MAX_CHANNELS]; 37 + struct shdma_dev shdma_dev; 38 + struct sh_dmae_chan *chan[SH_DMAE_MAX_CHANNELS]; 52 39 struct sh_dmae_pdata *pdata; 53 40 struct list_head node; 54 41 u32 __iomem *chan_reg; ··· 44 57 u32 chcr_ie_bit; 45 58 }; 46 59 47 - #define to_sh_chan(chan) container_of(chan, struct sh_dmae_chan, common) 60 + struct sh_dmae_regs { 61 + u32 sar; /* SAR / source address */ 62 + u32 dar; /* DAR / destination address */ 63 + u32 tcr; /* TCR / transfer count */ 64 + }; 65 + 66 + struct sh_dmae_desc { 67 + struct sh_dmae_regs hw; 68 + struct shdma_desc shdma_desc; 69 + }; 70 + 71 + #define to_sh_chan(chan) container_of(chan, struct sh_dmae_chan, shdma_chan) 48 72 #define to_sh_desc(lh) container_of(lh, struct sh_desc, node) 49 73 #define tx_to_sh_desc(tx) container_of(tx, struct sh_desc, async_tx) 50 - #define to_sh_dev(chan) container_of(chan->common.device,\ 51 - struct sh_dmae_device, common) 74 + #define to_sh_dev(chan) container_of(chan->shdma_chan.dma_chan.device,\ 75 + struct sh_dmae_device, shdma_dev.dma_dev) 52 76 53 77 #endif /* __DMA_SHDMA_H */
+1415
drivers/dma/tegra20-apb-dma.c
··· 1 + /* 2 + * DMA driver for Nvidia's Tegra20 APB DMA controller. 3 + * 4 + * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved. 5 + * 6 + * This program is free software; you can redistribute it and/or modify it 7 + * under the terms and conditions of the GNU General Public License, 8 + * version 2, as published by the Free Software Foundation. 9 + * 10 + * This program is distributed in the hope it will be useful, but WITHOUT 11 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 + * more details. 14 + * 15 + * You should have received a copy of the GNU General Public License 16 + * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 + */ 18 + 19 + #include <linux/bitops.h> 20 + #include <linux/clk.h> 21 + #include <linux/delay.h> 22 + #include <linux/dmaengine.h> 23 + #include <linux/dma-mapping.h> 24 + #include <linux/init.h> 25 + #include <linux/interrupt.h> 26 + #include <linux/io.h> 27 + #include <linux/mm.h> 28 + #include <linux/module.h> 29 + #include <linux/of.h> 30 + #include <linux/of_device.h> 31 + #include <linux/platform_device.h> 32 + #include <linux/pm_runtime.h> 33 + #include <linux/slab.h> 34 + 35 + #include <mach/clk.h> 36 + #include "dmaengine.h" 37 + 38 + #define TEGRA_APBDMA_GENERAL 0x0 39 + #define TEGRA_APBDMA_GENERAL_ENABLE BIT(31) 40 + 41 + #define TEGRA_APBDMA_CONTROL 0x010 42 + #define TEGRA_APBDMA_IRQ_MASK 0x01c 43 + #define TEGRA_APBDMA_IRQ_MASK_SET 0x020 44 + 45 + /* CSR register */ 46 + #define TEGRA_APBDMA_CHAN_CSR 0x00 47 + #define TEGRA_APBDMA_CSR_ENB BIT(31) 48 + #define TEGRA_APBDMA_CSR_IE_EOC BIT(30) 49 + #define TEGRA_APBDMA_CSR_HOLD BIT(29) 50 + #define TEGRA_APBDMA_CSR_DIR BIT(28) 51 + #define TEGRA_APBDMA_CSR_ONCE BIT(27) 52 + #define TEGRA_APBDMA_CSR_FLOW BIT(21) 53 + #define TEGRA_APBDMA_CSR_REQ_SEL_SHIFT 16 54 + #define TEGRA_APBDMA_CSR_WCOUNT_MASK 0xFFFC 55 + 56 + /* STATUS register */ 57 + #define TEGRA_APBDMA_CHAN_STATUS 0x004 58 + #define TEGRA_APBDMA_STATUS_BUSY BIT(31) 59 + #define TEGRA_APBDMA_STATUS_ISE_EOC BIT(30) 60 + #define TEGRA_APBDMA_STATUS_HALT BIT(29) 61 + #define TEGRA_APBDMA_STATUS_PING_PONG BIT(28) 62 + #define TEGRA_APBDMA_STATUS_COUNT_SHIFT 2 63 + #define TEGRA_APBDMA_STATUS_COUNT_MASK 0xFFFC 64 + 65 + /* AHB memory address */ 66 + #define TEGRA_APBDMA_CHAN_AHBPTR 0x010 67 + 68 + /* AHB sequence register */ 69 + #define TEGRA_APBDMA_CHAN_AHBSEQ 0x14 70 + #define TEGRA_APBDMA_AHBSEQ_INTR_ENB BIT(31) 71 + #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_8 (0 << 28) 72 + #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_16 (1 << 28) 73 + #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32 (2 << 28) 74 + #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_64 (3 << 28) 75 + #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_128 (4 << 28) 76 + #define TEGRA_APBDMA_AHBSEQ_DATA_SWAP BIT(27) 77 + #define TEGRA_APBDMA_AHBSEQ_BURST_1 (4 << 24) 78 + #define TEGRA_APBDMA_AHBSEQ_BURST_4 (5 << 24) 79 + #define TEGRA_APBDMA_AHBSEQ_BURST_8 (6 << 24) 80 + #define TEGRA_APBDMA_AHBSEQ_DBL_BUF BIT(19) 81 + #define TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT 16 82 + #define TEGRA_APBDMA_AHBSEQ_WRAP_NONE 0 83 + 84 + /* APB address */ 85 + #define TEGRA_APBDMA_CHAN_APBPTR 0x018 86 + 87 + /* APB sequence register */ 88 + #define TEGRA_APBDMA_CHAN_APBSEQ 0x01c 89 + #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_8 (0 << 28) 90 + #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_16 (1 << 28) 91 + #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32 (2 << 28) 92 + #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64 (3 << 28) 93 + #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_128 (4 << 28) 94 + #define TEGRA_APBDMA_APBSEQ_DATA_SWAP BIT(27) 95 + #define TEGRA_APBDMA_APBSEQ_WRAP_WORD_1 (1 << 16) 96 + 97 + /* 98 + * If any burst is in flight and DMA paused then this is the time to complete 99 + * on-flight burst and update DMA status register. 100 + */ 101 + #define TEGRA_APBDMA_BURST_COMPLETE_TIME 20 102 + 103 + /* Channel base address offset from APBDMA base address */ 104 + #define TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET 0x1000 105 + 106 + /* DMA channel register space size */ 107 + #define TEGRA_APBDMA_CHANNEL_REGISTER_SIZE 0x20 108 + 109 + struct tegra_dma; 110 + 111 + /* 112 + * tegra_dma_chip_data Tegra chip specific DMA data 113 + * @nr_channels: Number of channels available in the controller. 114 + * @max_dma_count: Maximum DMA transfer count supported by DMA controller. 115 + */ 116 + struct tegra_dma_chip_data { 117 + int nr_channels; 118 + int max_dma_count; 119 + }; 120 + 121 + /* DMA channel registers */ 122 + struct tegra_dma_channel_regs { 123 + unsigned long csr; 124 + unsigned long ahb_ptr; 125 + unsigned long apb_ptr; 126 + unsigned long ahb_seq; 127 + unsigned long apb_seq; 128 + }; 129 + 130 + /* 131 + * tegra_dma_sg_req: Dma request details to configure hardware. This 132 + * contains the details for one transfer to configure DMA hw. 133 + * The client's request for data transfer can be broken into multiple 134 + * sub-transfer as per requester details and hw support. 135 + * This sub transfer get added in the list of transfer and point to Tegra 136 + * DMA descriptor which manages the transfer details. 137 + */ 138 + struct tegra_dma_sg_req { 139 + struct tegra_dma_channel_regs ch_regs; 140 + int req_len; 141 + bool configured; 142 + bool last_sg; 143 + bool half_done; 144 + struct list_head node; 145 + struct tegra_dma_desc *dma_desc; 146 + }; 147 + 148 + /* 149 + * tegra_dma_desc: Tegra DMA descriptors which manages the client requests. 150 + * This descriptor keep track of transfer status, callbacks and request 151 + * counts etc. 152 + */ 153 + struct tegra_dma_desc { 154 + struct dma_async_tx_descriptor txd; 155 + int bytes_requested; 156 + int bytes_transferred; 157 + enum dma_status dma_status; 158 + struct list_head node; 159 + struct list_head tx_list; 160 + struct list_head cb_node; 161 + int cb_count; 162 + }; 163 + 164 + struct tegra_dma_channel; 165 + 166 + typedef void (*dma_isr_handler)(struct tegra_dma_channel *tdc, 167 + bool to_terminate); 168 + 169 + /* tegra_dma_channel: Channel specific information */ 170 + struct tegra_dma_channel { 171 + struct dma_chan dma_chan; 172 + bool config_init; 173 + int id; 174 + int irq; 175 + unsigned long chan_base_offset; 176 + spinlock_t lock; 177 + bool busy; 178 + struct tegra_dma *tdma; 179 + bool cyclic; 180 + 181 + /* Different lists for managing the requests */ 182 + struct list_head free_sg_req; 183 + struct list_head pending_sg_req; 184 + struct list_head free_dma_desc; 185 + struct list_head cb_desc; 186 + 187 + /* ISR handler and tasklet for bottom half of isr handling */ 188 + dma_isr_handler isr_handler; 189 + struct tasklet_struct tasklet; 190 + dma_async_tx_callback callback; 191 + void *callback_param; 192 + 193 + /* Channel-slave specific configuration */ 194 + struct dma_slave_config dma_sconfig; 195 + }; 196 + 197 + /* tegra_dma: Tegra DMA specific information */ 198 + struct tegra_dma { 199 + struct dma_device dma_dev; 200 + struct device *dev; 201 + struct clk *dma_clk; 202 + spinlock_t global_lock; 203 + void __iomem *base_addr; 204 + struct tegra_dma_chip_data *chip_data; 205 + 206 + /* Some register need to be cache before suspend */ 207 + u32 reg_gen; 208 + 209 + /* Last member of the structure */ 210 + struct tegra_dma_channel channels[0]; 211 + }; 212 + 213 + static inline void tdma_write(struct tegra_dma *tdma, u32 reg, u32 val) 214 + { 215 + writel(val, tdma->base_addr + reg); 216 + } 217 + 218 + static inline u32 tdma_read(struct tegra_dma *tdma, u32 reg) 219 + { 220 + return readl(tdma->base_addr + reg); 221 + } 222 + 223 + static inline void tdc_write(struct tegra_dma_channel *tdc, 224 + u32 reg, u32 val) 225 + { 226 + writel(val, tdc->tdma->base_addr + tdc->chan_base_offset + reg); 227 + } 228 + 229 + static inline u32 tdc_read(struct tegra_dma_channel *tdc, u32 reg) 230 + { 231 + return readl(tdc->tdma->base_addr + tdc->chan_base_offset + reg); 232 + } 233 + 234 + static inline struct tegra_dma_channel *to_tegra_dma_chan(struct dma_chan *dc) 235 + { 236 + return container_of(dc, struct tegra_dma_channel, dma_chan); 237 + } 238 + 239 + static inline struct tegra_dma_desc *txd_to_tegra_dma_desc( 240 + struct dma_async_tx_descriptor *td) 241 + { 242 + return container_of(td, struct tegra_dma_desc, txd); 243 + } 244 + 245 + static inline struct device *tdc2dev(struct tegra_dma_channel *tdc) 246 + { 247 + return &tdc->dma_chan.dev->device; 248 + } 249 + 250 + static dma_cookie_t tegra_dma_tx_submit(struct dma_async_tx_descriptor *tx); 251 + static int tegra_dma_runtime_suspend(struct device *dev); 252 + static int tegra_dma_runtime_resume(struct device *dev); 253 + 254 + /* Get DMA desc from free list, if not there then allocate it. */ 255 + static struct tegra_dma_desc *tegra_dma_desc_get( 256 + struct tegra_dma_channel *tdc) 257 + { 258 + struct tegra_dma_desc *dma_desc; 259 + unsigned long flags; 260 + 261 + spin_lock_irqsave(&tdc->lock, flags); 262 + 263 + /* Do not allocate if desc are waiting for ack */ 264 + list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) { 265 + if (async_tx_test_ack(&dma_desc->txd)) { 266 + list_del(&dma_desc->node); 267 + spin_unlock_irqrestore(&tdc->lock, flags); 268 + return dma_desc; 269 + } 270 + } 271 + 272 + spin_unlock_irqrestore(&tdc->lock, flags); 273 + 274 + /* Allocate DMA desc */ 275 + dma_desc = kzalloc(sizeof(*dma_desc), GFP_ATOMIC); 276 + if (!dma_desc) { 277 + dev_err(tdc2dev(tdc), "dma_desc alloc failed\n"); 278 + return NULL; 279 + } 280 + 281 + dma_async_tx_descriptor_init(&dma_desc->txd, &tdc->dma_chan); 282 + dma_desc->txd.tx_submit = tegra_dma_tx_submit; 283 + dma_desc->txd.flags = 0; 284 + return dma_desc; 285 + } 286 + 287 + static void tegra_dma_desc_put(struct tegra_dma_channel *tdc, 288 + struct tegra_dma_desc *dma_desc) 289 + { 290 + unsigned long flags; 291 + 292 + spin_lock_irqsave(&tdc->lock, flags); 293 + if (!list_empty(&dma_desc->tx_list)) 294 + list_splice_init(&dma_desc->tx_list, &tdc->free_sg_req); 295 + list_add_tail(&dma_desc->node, &tdc->free_dma_desc); 296 + spin_unlock_irqrestore(&tdc->lock, flags); 297 + } 298 + 299 + static struct tegra_dma_sg_req *tegra_dma_sg_req_get( 300 + struct tegra_dma_channel *tdc) 301 + { 302 + struct tegra_dma_sg_req *sg_req = NULL; 303 + unsigned long flags; 304 + 305 + spin_lock_irqsave(&tdc->lock, flags); 306 + if (!list_empty(&tdc->free_sg_req)) { 307 + sg_req = list_first_entry(&tdc->free_sg_req, 308 + typeof(*sg_req), node); 309 + list_del(&sg_req->node); 310 + spin_unlock_irqrestore(&tdc->lock, flags); 311 + return sg_req; 312 + } 313 + spin_unlock_irqrestore(&tdc->lock, flags); 314 + 315 + sg_req = kzalloc(sizeof(struct tegra_dma_sg_req), GFP_ATOMIC); 316 + if (!sg_req) 317 + dev_err(tdc2dev(tdc), "sg_req alloc failed\n"); 318 + return sg_req; 319 + } 320 + 321 + static int tegra_dma_slave_config(struct dma_chan *dc, 322 + struct dma_slave_config *sconfig) 323 + { 324 + struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 325 + 326 + if (!list_empty(&tdc->pending_sg_req)) { 327 + dev_err(tdc2dev(tdc), "Configuration not allowed\n"); 328 + return -EBUSY; 329 + } 330 + 331 + memcpy(&tdc->dma_sconfig, sconfig, sizeof(*sconfig)); 332 + tdc->config_init = true; 333 + return 0; 334 + } 335 + 336 + static void tegra_dma_global_pause(struct tegra_dma_channel *tdc, 337 + bool wait_for_burst_complete) 338 + { 339 + struct tegra_dma *tdma = tdc->tdma; 340 + 341 + spin_lock(&tdma->global_lock); 342 + tdma_write(tdma, TEGRA_APBDMA_GENERAL, 0); 343 + if (wait_for_burst_complete) 344 + udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME); 345 + } 346 + 347 + static void tegra_dma_global_resume(struct tegra_dma_channel *tdc) 348 + { 349 + struct tegra_dma *tdma = tdc->tdma; 350 + 351 + tdma_write(tdma, TEGRA_APBDMA_GENERAL, TEGRA_APBDMA_GENERAL_ENABLE); 352 + spin_unlock(&tdma->global_lock); 353 + } 354 + 355 + static void tegra_dma_stop(struct tegra_dma_channel *tdc) 356 + { 357 + u32 csr; 358 + u32 status; 359 + 360 + /* Disable interrupts */ 361 + csr = tdc_read(tdc, TEGRA_APBDMA_CHAN_CSR); 362 + csr &= ~TEGRA_APBDMA_CSR_IE_EOC; 363 + tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, csr); 364 + 365 + /* Disable DMA */ 366 + csr &= ~TEGRA_APBDMA_CSR_ENB; 367 + tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, csr); 368 + 369 + /* Clear interrupt status if it is there */ 370 + status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); 371 + if (status & TEGRA_APBDMA_STATUS_ISE_EOC) { 372 + dev_dbg(tdc2dev(tdc), "%s():clearing interrupt\n", __func__); 373 + tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status); 374 + } 375 + tdc->busy = false; 376 + } 377 + 378 + static void tegra_dma_start(struct tegra_dma_channel *tdc, 379 + struct tegra_dma_sg_req *sg_req) 380 + { 381 + struct tegra_dma_channel_regs *ch_regs = &sg_req->ch_regs; 382 + 383 + tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, ch_regs->csr); 384 + tdc_write(tdc, TEGRA_APBDMA_CHAN_APBSEQ, ch_regs->apb_seq); 385 + tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, ch_regs->apb_ptr); 386 + tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBSEQ, ch_regs->ahb_seq); 387 + tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, ch_regs->ahb_ptr); 388 + 389 + /* Start DMA */ 390 + tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, 391 + ch_regs->csr | TEGRA_APBDMA_CSR_ENB); 392 + } 393 + 394 + static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc, 395 + struct tegra_dma_sg_req *nsg_req) 396 + { 397 + unsigned long status; 398 + 399 + /* 400 + * The DMA controller reloads the new configuration for next transfer 401 + * after last burst of current transfer completes. 402 + * If there is no IEC status then this makes sure that last burst 403 + * has not be completed. There may be case that last burst is on 404 + * flight and so it can complete but because DMA is paused, it 405 + * will not generates interrupt as well as not reload the new 406 + * configuration. 407 + * If there is already IEC status then interrupt handler need to 408 + * load new configuration. 409 + */ 410 + tegra_dma_global_pause(tdc, false); 411 + status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); 412 + 413 + /* 414 + * If interrupt is pending then do nothing as the ISR will handle 415 + * the programing for new request. 416 + */ 417 + if (status & TEGRA_APBDMA_STATUS_ISE_EOC) { 418 + dev_err(tdc2dev(tdc), 419 + "Skipping new configuration as interrupt is pending\n"); 420 + tegra_dma_global_resume(tdc); 421 + return; 422 + } 423 + 424 + /* Safe to program new configuration */ 425 + tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, nsg_req->ch_regs.apb_ptr); 426 + tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, nsg_req->ch_regs.ahb_ptr); 427 + tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, 428 + nsg_req->ch_regs.csr | TEGRA_APBDMA_CSR_ENB); 429 + nsg_req->configured = true; 430 + 431 + tegra_dma_global_resume(tdc); 432 + } 433 + 434 + static void tdc_start_head_req(struct tegra_dma_channel *tdc) 435 + { 436 + struct tegra_dma_sg_req *sg_req; 437 + 438 + if (list_empty(&tdc->pending_sg_req)) 439 + return; 440 + 441 + sg_req = list_first_entry(&tdc->pending_sg_req, 442 + typeof(*sg_req), node); 443 + tegra_dma_start(tdc, sg_req); 444 + sg_req->configured = true; 445 + tdc->busy = true; 446 + } 447 + 448 + static void tdc_configure_next_head_desc(struct tegra_dma_channel *tdc) 449 + { 450 + struct tegra_dma_sg_req *hsgreq; 451 + struct tegra_dma_sg_req *hnsgreq; 452 + 453 + if (list_empty(&tdc->pending_sg_req)) 454 + return; 455 + 456 + hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node); 457 + if (!list_is_last(&hsgreq->node, &tdc->pending_sg_req)) { 458 + hnsgreq = list_first_entry(&hsgreq->node, 459 + typeof(*hnsgreq), node); 460 + tegra_dma_configure_for_next(tdc, hnsgreq); 461 + } 462 + } 463 + 464 + static inline int get_current_xferred_count(struct tegra_dma_channel *tdc, 465 + struct tegra_dma_sg_req *sg_req, unsigned long status) 466 + { 467 + return sg_req->req_len - (status & TEGRA_APBDMA_STATUS_COUNT_MASK) - 4; 468 + } 469 + 470 + static void tegra_dma_abort_all(struct tegra_dma_channel *tdc) 471 + { 472 + struct tegra_dma_sg_req *sgreq; 473 + struct tegra_dma_desc *dma_desc; 474 + 475 + while (!list_empty(&tdc->pending_sg_req)) { 476 + sgreq = list_first_entry(&tdc->pending_sg_req, 477 + typeof(*sgreq), node); 478 + list_del(&sgreq->node); 479 + list_add_tail(&sgreq->node, &tdc->free_sg_req); 480 + if (sgreq->last_sg) { 481 + dma_desc = sgreq->dma_desc; 482 + dma_desc->dma_status = DMA_ERROR; 483 + list_add_tail(&dma_desc->node, &tdc->free_dma_desc); 484 + 485 + /* Add in cb list if it is not there. */ 486 + if (!dma_desc->cb_count) 487 + list_add_tail(&dma_desc->cb_node, 488 + &tdc->cb_desc); 489 + dma_desc->cb_count++; 490 + } 491 + } 492 + tdc->isr_handler = NULL; 493 + } 494 + 495 + static bool handle_continuous_head_request(struct tegra_dma_channel *tdc, 496 + struct tegra_dma_sg_req *last_sg_req, bool to_terminate) 497 + { 498 + struct tegra_dma_sg_req *hsgreq = NULL; 499 + 500 + if (list_empty(&tdc->pending_sg_req)) { 501 + dev_err(tdc2dev(tdc), "Dma is running without req\n"); 502 + tegra_dma_stop(tdc); 503 + return false; 504 + } 505 + 506 + /* 507 + * Check that head req on list should be in flight. 508 + * If it is not in flight then abort transfer as 509 + * looping of transfer can not continue. 510 + */ 511 + hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node); 512 + if (!hsgreq->configured) { 513 + tegra_dma_stop(tdc); 514 + dev_err(tdc2dev(tdc), "Error in dma transfer, aborting dma\n"); 515 + tegra_dma_abort_all(tdc); 516 + return false; 517 + } 518 + 519 + /* Configure next request */ 520 + if (!to_terminate) 521 + tdc_configure_next_head_desc(tdc); 522 + return true; 523 + } 524 + 525 + static void handle_once_dma_done(struct tegra_dma_channel *tdc, 526 + bool to_terminate) 527 + { 528 + struct tegra_dma_sg_req *sgreq; 529 + struct tegra_dma_desc *dma_desc; 530 + 531 + tdc->busy = false; 532 + sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node); 533 + dma_desc = sgreq->dma_desc; 534 + dma_desc->bytes_transferred += sgreq->req_len; 535 + 536 + list_del(&sgreq->node); 537 + if (sgreq->last_sg) { 538 + dma_desc->dma_status = DMA_SUCCESS; 539 + dma_cookie_complete(&dma_desc->txd); 540 + if (!dma_desc->cb_count) 541 + list_add_tail(&dma_desc->cb_node, &tdc->cb_desc); 542 + dma_desc->cb_count++; 543 + list_add_tail(&dma_desc->node, &tdc->free_dma_desc); 544 + } 545 + list_add_tail(&sgreq->node, &tdc->free_sg_req); 546 + 547 + /* Do not start DMA if it is going to be terminate */ 548 + if (to_terminate || list_empty(&tdc->pending_sg_req)) 549 + return; 550 + 551 + tdc_start_head_req(tdc); 552 + return; 553 + } 554 + 555 + static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc, 556 + bool to_terminate) 557 + { 558 + struct tegra_dma_sg_req *sgreq; 559 + struct tegra_dma_desc *dma_desc; 560 + bool st; 561 + 562 + sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node); 563 + dma_desc = sgreq->dma_desc; 564 + dma_desc->bytes_transferred += sgreq->req_len; 565 + 566 + /* Callback need to be call */ 567 + if (!dma_desc->cb_count) 568 + list_add_tail(&dma_desc->cb_node, &tdc->cb_desc); 569 + dma_desc->cb_count++; 570 + 571 + /* If not last req then put at end of pending list */ 572 + if (!list_is_last(&sgreq->node, &tdc->pending_sg_req)) { 573 + list_del(&sgreq->node); 574 + list_add_tail(&sgreq->node, &tdc->pending_sg_req); 575 + sgreq->configured = false; 576 + st = handle_continuous_head_request(tdc, sgreq, to_terminate); 577 + if (!st) 578 + dma_desc->dma_status = DMA_ERROR; 579 + } 580 + return; 581 + } 582 + 583 + static void tegra_dma_tasklet(unsigned long data) 584 + { 585 + struct tegra_dma_channel *tdc = (struct tegra_dma_channel *)data; 586 + dma_async_tx_callback callback = NULL; 587 + void *callback_param = NULL; 588 + struct tegra_dma_desc *dma_desc; 589 + unsigned long flags; 590 + int cb_count; 591 + 592 + spin_lock_irqsave(&tdc->lock, flags); 593 + while (!list_empty(&tdc->cb_desc)) { 594 + dma_desc = list_first_entry(&tdc->cb_desc, 595 + typeof(*dma_desc), cb_node); 596 + list_del(&dma_desc->cb_node); 597 + callback = dma_desc->txd.callback; 598 + callback_param = dma_desc->txd.callback_param; 599 + cb_count = dma_desc->cb_count; 600 + dma_desc->cb_count = 0; 601 + spin_unlock_irqrestore(&tdc->lock, flags); 602 + while (cb_count-- && callback) 603 + callback(callback_param); 604 + spin_lock_irqsave(&tdc->lock, flags); 605 + } 606 + spin_unlock_irqrestore(&tdc->lock, flags); 607 + } 608 + 609 + static irqreturn_t tegra_dma_isr(int irq, void *dev_id) 610 + { 611 + struct tegra_dma_channel *tdc = dev_id; 612 + unsigned long status; 613 + unsigned long flags; 614 + 615 + spin_lock_irqsave(&tdc->lock, flags); 616 + 617 + status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); 618 + if (status & TEGRA_APBDMA_STATUS_ISE_EOC) { 619 + tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status); 620 + tdc->isr_handler(tdc, false); 621 + tasklet_schedule(&tdc->tasklet); 622 + spin_unlock_irqrestore(&tdc->lock, flags); 623 + return IRQ_HANDLED; 624 + } 625 + 626 + spin_unlock_irqrestore(&tdc->lock, flags); 627 + dev_info(tdc2dev(tdc), 628 + "Interrupt already served status 0x%08lx\n", status); 629 + return IRQ_NONE; 630 + } 631 + 632 + static dma_cookie_t tegra_dma_tx_submit(struct dma_async_tx_descriptor *txd) 633 + { 634 + struct tegra_dma_desc *dma_desc = txd_to_tegra_dma_desc(txd); 635 + struct tegra_dma_channel *tdc = to_tegra_dma_chan(txd->chan); 636 + unsigned long flags; 637 + dma_cookie_t cookie; 638 + 639 + spin_lock_irqsave(&tdc->lock, flags); 640 + dma_desc->dma_status = DMA_IN_PROGRESS; 641 + cookie = dma_cookie_assign(&dma_desc->txd); 642 + list_splice_tail_init(&dma_desc->tx_list, &tdc->pending_sg_req); 643 + spin_unlock_irqrestore(&tdc->lock, flags); 644 + return cookie; 645 + } 646 + 647 + static void tegra_dma_issue_pending(struct dma_chan *dc) 648 + { 649 + struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 650 + unsigned long flags; 651 + 652 + spin_lock_irqsave(&tdc->lock, flags); 653 + if (list_empty(&tdc->pending_sg_req)) { 654 + dev_err(tdc2dev(tdc), "No DMA request\n"); 655 + goto end; 656 + } 657 + if (!tdc->busy) { 658 + tdc_start_head_req(tdc); 659 + 660 + /* Continuous single mode: Configure next req */ 661 + if (tdc->cyclic) { 662 + /* 663 + * Wait for 1 burst time for configure DMA for 664 + * next transfer. 665 + */ 666 + udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME); 667 + tdc_configure_next_head_desc(tdc); 668 + } 669 + } 670 + end: 671 + spin_unlock_irqrestore(&tdc->lock, flags); 672 + return; 673 + } 674 + 675 + static void tegra_dma_terminate_all(struct dma_chan *dc) 676 + { 677 + struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 678 + struct tegra_dma_sg_req *sgreq; 679 + struct tegra_dma_desc *dma_desc; 680 + unsigned long flags; 681 + unsigned long status; 682 + bool was_busy; 683 + 684 + spin_lock_irqsave(&tdc->lock, flags); 685 + if (list_empty(&tdc->pending_sg_req)) { 686 + spin_unlock_irqrestore(&tdc->lock, flags); 687 + return; 688 + } 689 + 690 + if (!tdc->busy) 691 + goto skip_dma_stop; 692 + 693 + /* Pause DMA before checking the queue status */ 694 + tegra_dma_global_pause(tdc, true); 695 + 696 + status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); 697 + if (status & TEGRA_APBDMA_STATUS_ISE_EOC) { 698 + dev_dbg(tdc2dev(tdc), "%s():handling isr\n", __func__); 699 + tdc->isr_handler(tdc, true); 700 + status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); 701 + } 702 + 703 + was_busy = tdc->busy; 704 + tegra_dma_stop(tdc); 705 + 706 + if (!list_empty(&tdc->pending_sg_req) && was_busy) { 707 + sgreq = list_first_entry(&tdc->pending_sg_req, 708 + typeof(*sgreq), node); 709 + sgreq->dma_desc->bytes_transferred += 710 + get_current_xferred_count(tdc, sgreq, status); 711 + } 712 + tegra_dma_global_resume(tdc); 713 + 714 + skip_dma_stop: 715 + tegra_dma_abort_all(tdc); 716 + 717 + while (!list_empty(&tdc->cb_desc)) { 718 + dma_desc = list_first_entry(&tdc->cb_desc, 719 + typeof(*dma_desc), cb_node); 720 + list_del(&dma_desc->cb_node); 721 + dma_desc->cb_count = 0; 722 + } 723 + spin_unlock_irqrestore(&tdc->lock, flags); 724 + } 725 + 726 + static enum dma_status tegra_dma_tx_status(struct dma_chan *dc, 727 + dma_cookie_t cookie, struct dma_tx_state *txstate) 728 + { 729 + struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 730 + struct tegra_dma_desc *dma_desc; 731 + struct tegra_dma_sg_req *sg_req; 732 + enum dma_status ret; 733 + unsigned long flags; 734 + unsigned int residual; 735 + 736 + spin_lock_irqsave(&tdc->lock, flags); 737 + 738 + ret = dma_cookie_status(dc, cookie, txstate); 739 + if (ret == DMA_SUCCESS) { 740 + dma_set_residue(txstate, 0); 741 + spin_unlock_irqrestore(&tdc->lock, flags); 742 + return ret; 743 + } 744 + 745 + /* Check on wait_ack desc status */ 746 + list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) { 747 + if (dma_desc->txd.cookie == cookie) { 748 + residual = dma_desc->bytes_requested - 749 + (dma_desc->bytes_transferred % 750 + dma_desc->bytes_requested); 751 + dma_set_residue(txstate, residual); 752 + ret = dma_desc->dma_status; 753 + spin_unlock_irqrestore(&tdc->lock, flags); 754 + return ret; 755 + } 756 + } 757 + 758 + /* Check in pending list */ 759 + list_for_each_entry(sg_req, &tdc->pending_sg_req, node) { 760 + dma_desc = sg_req->dma_desc; 761 + if (dma_desc->txd.cookie == cookie) { 762 + residual = dma_desc->bytes_requested - 763 + (dma_desc->bytes_transferred % 764 + dma_desc->bytes_requested); 765 + dma_set_residue(txstate, residual); 766 + ret = dma_desc->dma_status; 767 + spin_unlock_irqrestore(&tdc->lock, flags); 768 + return ret; 769 + } 770 + } 771 + 772 + dev_dbg(tdc2dev(tdc), "cookie %d does not found\n", cookie); 773 + spin_unlock_irqrestore(&tdc->lock, flags); 774 + return ret; 775 + } 776 + 777 + static int tegra_dma_device_control(struct dma_chan *dc, enum dma_ctrl_cmd cmd, 778 + unsigned long arg) 779 + { 780 + switch (cmd) { 781 + case DMA_SLAVE_CONFIG: 782 + return tegra_dma_slave_config(dc, 783 + (struct dma_slave_config *)arg); 784 + 785 + case DMA_TERMINATE_ALL: 786 + tegra_dma_terminate_all(dc); 787 + return 0; 788 + 789 + default: 790 + break; 791 + } 792 + 793 + return -ENXIO; 794 + } 795 + 796 + static inline int get_bus_width(struct tegra_dma_channel *tdc, 797 + enum dma_slave_buswidth slave_bw) 798 + { 799 + switch (slave_bw) { 800 + case DMA_SLAVE_BUSWIDTH_1_BYTE: 801 + return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_8; 802 + case DMA_SLAVE_BUSWIDTH_2_BYTES: 803 + return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_16; 804 + case DMA_SLAVE_BUSWIDTH_4_BYTES: 805 + return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32; 806 + case DMA_SLAVE_BUSWIDTH_8_BYTES: 807 + return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64; 808 + default: 809 + dev_warn(tdc2dev(tdc), 810 + "slave bw is not supported, using 32bits\n"); 811 + return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32; 812 + } 813 + } 814 + 815 + static inline int get_burst_size(struct tegra_dma_channel *tdc, 816 + u32 burst_size, enum dma_slave_buswidth slave_bw, int len) 817 + { 818 + int burst_byte; 819 + int burst_ahb_width; 820 + 821 + /* 822 + * burst_size from client is in terms of the bus_width. 823 + * convert them into AHB memory width which is 4 byte. 824 + */ 825 + burst_byte = burst_size * slave_bw; 826 + burst_ahb_width = burst_byte / 4; 827 + 828 + /* If burst size is 0 then calculate the burst size based on length */ 829 + if (!burst_ahb_width) { 830 + if (len & 0xF) 831 + return TEGRA_APBDMA_AHBSEQ_BURST_1; 832 + else if ((len >> 4) & 0x1) 833 + return TEGRA_APBDMA_AHBSEQ_BURST_4; 834 + else 835 + return TEGRA_APBDMA_AHBSEQ_BURST_8; 836 + } 837 + if (burst_ahb_width < 4) 838 + return TEGRA_APBDMA_AHBSEQ_BURST_1; 839 + else if (burst_ahb_width < 8) 840 + return TEGRA_APBDMA_AHBSEQ_BURST_4; 841 + else 842 + return TEGRA_APBDMA_AHBSEQ_BURST_8; 843 + } 844 + 845 + static int get_transfer_param(struct tegra_dma_channel *tdc, 846 + enum dma_transfer_direction direction, unsigned long *apb_addr, 847 + unsigned long *apb_seq, unsigned long *csr, unsigned int *burst_size, 848 + enum dma_slave_buswidth *slave_bw) 849 + { 850 + 851 + switch (direction) { 852 + case DMA_MEM_TO_DEV: 853 + *apb_addr = tdc->dma_sconfig.dst_addr; 854 + *apb_seq = get_bus_width(tdc, tdc->dma_sconfig.dst_addr_width); 855 + *burst_size = tdc->dma_sconfig.dst_maxburst; 856 + *slave_bw = tdc->dma_sconfig.dst_addr_width; 857 + *csr = TEGRA_APBDMA_CSR_DIR; 858 + return 0; 859 + 860 + case DMA_DEV_TO_MEM: 861 + *apb_addr = tdc->dma_sconfig.src_addr; 862 + *apb_seq = get_bus_width(tdc, tdc->dma_sconfig.src_addr_width); 863 + *burst_size = tdc->dma_sconfig.src_maxburst; 864 + *slave_bw = tdc->dma_sconfig.src_addr_width; 865 + *csr = 0; 866 + return 0; 867 + 868 + default: 869 + dev_err(tdc2dev(tdc), "Dma direction is not supported\n"); 870 + return -EINVAL; 871 + } 872 + return -EINVAL; 873 + } 874 + 875 + static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg( 876 + struct dma_chan *dc, struct scatterlist *sgl, unsigned int sg_len, 877 + enum dma_transfer_direction direction, unsigned long flags, 878 + void *context) 879 + { 880 + struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 881 + struct tegra_dma_desc *dma_desc; 882 + unsigned int i; 883 + struct scatterlist *sg; 884 + unsigned long csr, ahb_seq, apb_ptr, apb_seq; 885 + struct list_head req_list; 886 + struct tegra_dma_sg_req *sg_req = NULL; 887 + u32 burst_size; 888 + enum dma_slave_buswidth slave_bw; 889 + int ret; 890 + 891 + if (!tdc->config_init) { 892 + dev_err(tdc2dev(tdc), "dma channel is not configured\n"); 893 + return NULL; 894 + } 895 + if (sg_len < 1) { 896 + dev_err(tdc2dev(tdc), "Invalid segment length %d\n", sg_len); 897 + return NULL; 898 + } 899 + 900 + ret = get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr, 901 + &burst_size, &slave_bw); 902 + if (ret < 0) 903 + return NULL; 904 + 905 + INIT_LIST_HEAD(&req_list); 906 + 907 + ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB; 908 + ahb_seq |= TEGRA_APBDMA_AHBSEQ_WRAP_NONE << 909 + TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT; 910 + ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32; 911 + 912 + csr |= TEGRA_APBDMA_CSR_ONCE | TEGRA_APBDMA_CSR_FLOW; 913 + csr |= tdc->dma_sconfig.slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT; 914 + if (flags & DMA_PREP_INTERRUPT) 915 + csr |= TEGRA_APBDMA_CSR_IE_EOC; 916 + 917 + apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1; 918 + 919 + dma_desc = tegra_dma_desc_get(tdc); 920 + if (!dma_desc) { 921 + dev_err(tdc2dev(tdc), "Dma descriptors not available\n"); 922 + return NULL; 923 + } 924 + INIT_LIST_HEAD(&dma_desc->tx_list); 925 + INIT_LIST_HEAD(&dma_desc->cb_node); 926 + dma_desc->cb_count = 0; 927 + dma_desc->bytes_requested = 0; 928 + dma_desc->bytes_transferred = 0; 929 + dma_desc->dma_status = DMA_IN_PROGRESS; 930 + 931 + /* Make transfer requests */ 932 + for_each_sg(sgl, sg, sg_len, i) { 933 + u32 len, mem; 934 + 935 + mem = sg_dma_address(sg); 936 + len = sg_dma_len(sg); 937 + 938 + if ((len & 3) || (mem & 3) || 939 + (len > tdc->tdma->chip_data->max_dma_count)) { 940 + dev_err(tdc2dev(tdc), 941 + "Dma length/memory address is not supported\n"); 942 + tegra_dma_desc_put(tdc, dma_desc); 943 + return NULL; 944 + } 945 + 946 + sg_req = tegra_dma_sg_req_get(tdc); 947 + if (!sg_req) { 948 + dev_err(tdc2dev(tdc), "Dma sg-req not available\n"); 949 + tegra_dma_desc_put(tdc, dma_desc); 950 + return NULL; 951 + } 952 + 953 + ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len); 954 + dma_desc->bytes_requested += len; 955 + 956 + sg_req->ch_regs.apb_ptr = apb_ptr; 957 + sg_req->ch_regs.ahb_ptr = mem; 958 + sg_req->ch_regs.csr = csr | ((len - 4) & 0xFFFC); 959 + sg_req->ch_regs.apb_seq = apb_seq; 960 + sg_req->ch_regs.ahb_seq = ahb_seq; 961 + sg_req->configured = false; 962 + sg_req->last_sg = false; 963 + sg_req->dma_desc = dma_desc; 964 + sg_req->req_len = len; 965 + 966 + list_add_tail(&sg_req->node, &dma_desc->tx_list); 967 + } 968 + sg_req->last_sg = true; 969 + if (flags & DMA_CTRL_ACK) 970 + dma_desc->txd.flags = DMA_CTRL_ACK; 971 + 972 + /* 973 + * Make sure that mode should not be conflicting with currently 974 + * configured mode. 975 + */ 976 + if (!tdc->isr_handler) { 977 + tdc->isr_handler = handle_once_dma_done; 978 + tdc->cyclic = false; 979 + } else { 980 + if (tdc->cyclic) { 981 + dev_err(tdc2dev(tdc), "DMA configured in cyclic mode\n"); 982 + tegra_dma_desc_put(tdc, dma_desc); 983 + return NULL; 984 + } 985 + } 986 + 987 + return &dma_desc->txd; 988 + } 989 + 990 + struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic( 991 + struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len, 992 + size_t period_len, enum dma_transfer_direction direction, 993 + void *context) 994 + { 995 + struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 996 + struct tegra_dma_desc *dma_desc = NULL; 997 + struct tegra_dma_sg_req *sg_req = NULL; 998 + unsigned long csr, ahb_seq, apb_ptr, apb_seq; 999 + int len; 1000 + size_t remain_len; 1001 + dma_addr_t mem = buf_addr; 1002 + u32 burst_size; 1003 + enum dma_slave_buswidth slave_bw; 1004 + int ret; 1005 + 1006 + if (!buf_len || !period_len) { 1007 + dev_err(tdc2dev(tdc), "Invalid buffer/period len\n"); 1008 + return NULL; 1009 + } 1010 + 1011 + if (!tdc->config_init) { 1012 + dev_err(tdc2dev(tdc), "DMA slave is not configured\n"); 1013 + return NULL; 1014 + } 1015 + 1016 + /* 1017 + * We allow to take more number of requests till DMA is 1018 + * not started. The driver will loop over all requests. 1019 + * Once DMA is started then new requests can be queued only after 1020 + * terminating the DMA. 1021 + */ 1022 + if (tdc->busy) { 1023 + dev_err(tdc2dev(tdc), "Request not allowed when dma running\n"); 1024 + return NULL; 1025 + } 1026 + 1027 + /* 1028 + * We only support cycle transfer when buf_len is multiple of 1029 + * period_len. 1030 + */ 1031 + if (buf_len % period_len) { 1032 + dev_err(tdc2dev(tdc), "buf_len is not multiple of period_len\n"); 1033 + return NULL; 1034 + } 1035 + 1036 + len = period_len; 1037 + if ((len & 3) || (buf_addr & 3) || 1038 + (len > tdc->tdma->chip_data->max_dma_count)) { 1039 + dev_err(tdc2dev(tdc), "Req len/mem address is not correct\n"); 1040 + return NULL; 1041 + } 1042 + 1043 + ret = get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr, 1044 + &burst_size, &slave_bw); 1045 + if (ret < 0) 1046 + return NULL; 1047 + 1048 + 1049 + ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB; 1050 + ahb_seq |= TEGRA_APBDMA_AHBSEQ_WRAP_NONE << 1051 + TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT; 1052 + ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32; 1053 + 1054 + csr |= TEGRA_APBDMA_CSR_FLOW | TEGRA_APBDMA_CSR_IE_EOC; 1055 + csr |= tdc->dma_sconfig.slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT; 1056 + 1057 + apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1; 1058 + 1059 + dma_desc = tegra_dma_desc_get(tdc); 1060 + if (!dma_desc) { 1061 + dev_err(tdc2dev(tdc), "not enough descriptors available\n"); 1062 + return NULL; 1063 + } 1064 + 1065 + INIT_LIST_HEAD(&dma_desc->tx_list); 1066 + INIT_LIST_HEAD(&dma_desc->cb_node); 1067 + dma_desc->cb_count = 0; 1068 + 1069 + dma_desc->bytes_transferred = 0; 1070 + dma_desc->bytes_requested = buf_len; 1071 + remain_len = buf_len; 1072 + 1073 + /* Split transfer equal to period size */ 1074 + while (remain_len) { 1075 + sg_req = tegra_dma_sg_req_get(tdc); 1076 + if (!sg_req) { 1077 + dev_err(tdc2dev(tdc), "Dma sg-req not available\n"); 1078 + tegra_dma_desc_put(tdc, dma_desc); 1079 + return NULL; 1080 + } 1081 + 1082 + ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len); 1083 + sg_req->ch_regs.apb_ptr = apb_ptr; 1084 + sg_req->ch_regs.ahb_ptr = mem; 1085 + sg_req->ch_regs.csr = csr | ((len - 4) & 0xFFFC); 1086 + sg_req->ch_regs.apb_seq = apb_seq; 1087 + sg_req->ch_regs.ahb_seq = ahb_seq; 1088 + sg_req->configured = false; 1089 + sg_req->half_done = false; 1090 + sg_req->last_sg = false; 1091 + sg_req->dma_desc = dma_desc; 1092 + sg_req->req_len = len; 1093 + 1094 + list_add_tail(&sg_req->node, &dma_desc->tx_list); 1095 + remain_len -= len; 1096 + mem += len; 1097 + } 1098 + sg_req->last_sg = true; 1099 + dma_desc->txd.flags = 0; 1100 + 1101 + /* 1102 + * Make sure that mode should not be conflicting with currently 1103 + * configured mode. 1104 + */ 1105 + if (!tdc->isr_handler) { 1106 + tdc->isr_handler = handle_cont_sngl_cycle_dma_done; 1107 + tdc->cyclic = true; 1108 + } else { 1109 + if (!tdc->cyclic) { 1110 + dev_err(tdc2dev(tdc), "DMA configuration conflict\n"); 1111 + tegra_dma_desc_put(tdc, dma_desc); 1112 + return NULL; 1113 + } 1114 + } 1115 + 1116 + return &dma_desc->txd; 1117 + } 1118 + 1119 + static int tegra_dma_alloc_chan_resources(struct dma_chan *dc) 1120 + { 1121 + struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 1122 + 1123 + dma_cookie_init(&tdc->dma_chan); 1124 + tdc->config_init = false; 1125 + return 0; 1126 + } 1127 + 1128 + static void tegra_dma_free_chan_resources(struct dma_chan *dc) 1129 + { 1130 + struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); 1131 + 1132 + struct tegra_dma_desc *dma_desc; 1133 + struct tegra_dma_sg_req *sg_req; 1134 + struct list_head dma_desc_list; 1135 + struct list_head sg_req_list; 1136 + unsigned long flags; 1137 + 1138 + INIT_LIST_HEAD(&dma_desc_list); 1139 + INIT_LIST_HEAD(&sg_req_list); 1140 + 1141 + dev_dbg(tdc2dev(tdc), "Freeing channel %d\n", tdc->id); 1142 + 1143 + if (tdc->busy) 1144 + tegra_dma_terminate_all(dc); 1145 + 1146 + spin_lock_irqsave(&tdc->lock, flags); 1147 + list_splice_init(&tdc->pending_sg_req, &sg_req_list); 1148 + list_splice_init(&tdc->free_sg_req, &sg_req_list); 1149 + list_splice_init(&tdc->free_dma_desc, &dma_desc_list); 1150 + INIT_LIST_HEAD(&tdc->cb_desc); 1151 + tdc->config_init = false; 1152 + spin_unlock_irqrestore(&tdc->lock, flags); 1153 + 1154 + while (!list_empty(&dma_desc_list)) { 1155 + dma_desc = list_first_entry(&dma_desc_list, 1156 + typeof(*dma_desc), node); 1157 + list_del(&dma_desc->node); 1158 + kfree(dma_desc); 1159 + } 1160 + 1161 + while (!list_empty(&sg_req_list)) { 1162 + sg_req = list_first_entry(&sg_req_list, typeof(*sg_req), node); 1163 + list_del(&sg_req->node); 1164 + kfree(sg_req); 1165 + } 1166 + } 1167 + 1168 + /* Tegra20 specific DMA controller information */ 1169 + static struct tegra_dma_chip_data tegra20_dma_chip_data = { 1170 + .nr_channels = 16, 1171 + .max_dma_count = 1024UL * 64, 1172 + }; 1173 + 1174 + #if defined(CONFIG_OF) 1175 + /* Tegra30 specific DMA controller information */ 1176 + static struct tegra_dma_chip_data tegra30_dma_chip_data = { 1177 + .nr_channels = 32, 1178 + .max_dma_count = 1024UL * 64, 1179 + }; 1180 + 1181 + static const struct of_device_id tegra_dma_of_match[] __devinitconst = { 1182 + { 1183 + .compatible = "nvidia,tegra30-apbdma", 1184 + .data = &tegra30_dma_chip_data, 1185 + }, { 1186 + .compatible = "nvidia,tegra20-apbdma", 1187 + .data = &tegra20_dma_chip_data, 1188 + }, { 1189 + }, 1190 + }; 1191 + MODULE_DEVICE_TABLE(of, tegra_dma_of_match); 1192 + #endif 1193 + 1194 + static int __devinit tegra_dma_probe(struct platform_device *pdev) 1195 + { 1196 + struct resource *res; 1197 + struct tegra_dma *tdma; 1198 + int ret; 1199 + int i; 1200 + struct tegra_dma_chip_data *cdata = NULL; 1201 + 1202 + if (pdev->dev.of_node) { 1203 + const struct of_device_id *match; 1204 + match = of_match_device(of_match_ptr(tegra_dma_of_match), 1205 + &pdev->dev); 1206 + if (!match) { 1207 + dev_err(&pdev->dev, "Error: No device match found\n"); 1208 + return -ENODEV; 1209 + } 1210 + cdata = match->data; 1211 + } else { 1212 + /* If no device tree then fallback to tegra20 */ 1213 + cdata = &tegra20_dma_chip_data; 1214 + } 1215 + 1216 + tdma = devm_kzalloc(&pdev->dev, sizeof(*tdma) + cdata->nr_channels * 1217 + sizeof(struct tegra_dma_channel), GFP_KERNEL); 1218 + if (!tdma) { 1219 + dev_err(&pdev->dev, "Error: memory allocation failed\n"); 1220 + return -ENOMEM; 1221 + } 1222 + 1223 + tdma->dev = &pdev->dev; 1224 + tdma->chip_data = cdata; 1225 + platform_set_drvdata(pdev, tdma); 1226 + 1227 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1228 + if (!res) { 1229 + dev_err(&pdev->dev, "No mem resource for DMA\n"); 1230 + return -EINVAL; 1231 + } 1232 + 1233 + tdma->base_addr = devm_request_and_ioremap(&pdev->dev, res); 1234 + if (!tdma->base_addr) { 1235 + dev_err(&pdev->dev, 1236 + "Cannot request memregion/iomap dma address\n"); 1237 + return -EADDRNOTAVAIL; 1238 + } 1239 + 1240 + tdma->dma_clk = devm_clk_get(&pdev->dev, NULL); 1241 + if (IS_ERR(tdma->dma_clk)) { 1242 + dev_err(&pdev->dev, "Error: Missing controller clock\n"); 1243 + return PTR_ERR(tdma->dma_clk); 1244 + } 1245 + 1246 + spin_lock_init(&tdma->global_lock); 1247 + 1248 + pm_runtime_enable(&pdev->dev); 1249 + if (!pm_runtime_enabled(&pdev->dev)) { 1250 + ret = tegra_dma_runtime_resume(&pdev->dev); 1251 + if (ret) { 1252 + dev_err(&pdev->dev, "dma_runtime_resume failed %d\n", 1253 + ret); 1254 + goto err_pm_disable; 1255 + } 1256 + } 1257 + 1258 + /* Reset DMA controller */ 1259 + tegra_periph_reset_assert(tdma->dma_clk); 1260 + udelay(2); 1261 + tegra_periph_reset_deassert(tdma->dma_clk); 1262 + 1263 + /* Enable global DMA registers */ 1264 + tdma_write(tdma, TEGRA_APBDMA_GENERAL, TEGRA_APBDMA_GENERAL_ENABLE); 1265 + tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0); 1266 + tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFFul); 1267 + 1268 + INIT_LIST_HEAD(&tdma->dma_dev.channels); 1269 + for (i = 0; i < cdata->nr_channels; i++) { 1270 + struct tegra_dma_channel *tdc = &tdma->channels[i]; 1271 + char irq_name[30]; 1272 + 1273 + tdc->chan_base_offset = TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET + 1274 + i * TEGRA_APBDMA_CHANNEL_REGISTER_SIZE; 1275 + 1276 + res = platform_get_resource(pdev, IORESOURCE_IRQ, i); 1277 + if (!res) { 1278 + ret = -EINVAL; 1279 + dev_err(&pdev->dev, "No irq resource for chan %d\n", i); 1280 + goto err_irq; 1281 + } 1282 + tdc->irq = res->start; 1283 + snprintf(irq_name, sizeof(irq_name), "apbdma.%d", i); 1284 + ret = devm_request_irq(&pdev->dev, tdc->irq, 1285 + tegra_dma_isr, 0, irq_name, tdc); 1286 + if (ret) { 1287 + dev_err(&pdev->dev, 1288 + "request_irq failed with err %d channel %d\n", 1289 + i, ret); 1290 + goto err_irq; 1291 + } 1292 + 1293 + tdc->dma_chan.device = &tdma->dma_dev; 1294 + dma_cookie_init(&tdc->dma_chan); 1295 + list_add_tail(&tdc->dma_chan.device_node, 1296 + &tdma->dma_dev.channels); 1297 + tdc->tdma = tdma; 1298 + tdc->id = i; 1299 + 1300 + tasklet_init(&tdc->tasklet, tegra_dma_tasklet, 1301 + (unsigned long)tdc); 1302 + spin_lock_init(&tdc->lock); 1303 + 1304 + INIT_LIST_HEAD(&tdc->pending_sg_req); 1305 + INIT_LIST_HEAD(&tdc->free_sg_req); 1306 + INIT_LIST_HEAD(&tdc->free_dma_desc); 1307 + INIT_LIST_HEAD(&tdc->cb_desc); 1308 + } 1309 + 1310 + dma_cap_set(DMA_SLAVE, tdma->dma_dev.cap_mask); 1311 + dma_cap_set(DMA_PRIVATE, tdma->dma_dev.cap_mask); 1312 + dma_cap_set(DMA_CYCLIC, tdma->dma_dev.cap_mask); 1313 + 1314 + tdma->dma_dev.dev = &pdev->dev; 1315 + tdma->dma_dev.device_alloc_chan_resources = 1316 + tegra_dma_alloc_chan_resources; 1317 + tdma->dma_dev.device_free_chan_resources = 1318 + tegra_dma_free_chan_resources; 1319 + tdma->dma_dev.device_prep_slave_sg = tegra_dma_prep_slave_sg; 1320 + tdma->dma_dev.device_prep_dma_cyclic = tegra_dma_prep_dma_cyclic; 1321 + tdma->dma_dev.device_control = tegra_dma_device_control; 1322 + tdma->dma_dev.device_tx_status = tegra_dma_tx_status; 1323 + tdma->dma_dev.device_issue_pending = tegra_dma_issue_pending; 1324 + 1325 + ret = dma_async_device_register(&tdma->dma_dev); 1326 + if (ret < 0) { 1327 + dev_err(&pdev->dev, 1328 + "Tegra20 APB DMA driver registration failed %d\n", ret); 1329 + goto err_irq; 1330 + } 1331 + 1332 + dev_info(&pdev->dev, "Tegra20 APB DMA driver register %d channels\n", 1333 + cdata->nr_channels); 1334 + return 0; 1335 + 1336 + err_irq: 1337 + while (--i >= 0) { 1338 + struct tegra_dma_channel *tdc = &tdma->channels[i]; 1339 + tasklet_kill(&tdc->tasklet); 1340 + } 1341 + 1342 + err_pm_disable: 1343 + pm_runtime_disable(&pdev->dev); 1344 + if (!pm_runtime_status_suspended(&pdev->dev)) 1345 + tegra_dma_runtime_suspend(&pdev->dev); 1346 + return ret; 1347 + } 1348 + 1349 + static int __devexit tegra_dma_remove(struct platform_device *pdev) 1350 + { 1351 + struct tegra_dma *tdma = platform_get_drvdata(pdev); 1352 + int i; 1353 + struct tegra_dma_channel *tdc; 1354 + 1355 + dma_async_device_unregister(&tdma->dma_dev); 1356 + 1357 + for (i = 0; i < tdma->chip_data->nr_channels; ++i) { 1358 + tdc = &tdma->channels[i]; 1359 + tasklet_kill(&tdc->tasklet); 1360 + } 1361 + 1362 + pm_runtime_disable(&pdev->dev); 1363 + if (!pm_runtime_status_suspended(&pdev->dev)) 1364 + tegra_dma_runtime_suspend(&pdev->dev); 1365 + 1366 + return 0; 1367 + } 1368 + 1369 + static int tegra_dma_runtime_suspend(struct device *dev) 1370 + { 1371 + struct platform_device *pdev = to_platform_device(dev); 1372 + struct tegra_dma *tdma = platform_get_drvdata(pdev); 1373 + 1374 + clk_disable_unprepare(tdma->dma_clk); 1375 + return 0; 1376 + } 1377 + 1378 + static int tegra_dma_runtime_resume(struct device *dev) 1379 + { 1380 + struct platform_device *pdev = to_platform_device(dev); 1381 + struct tegra_dma *tdma = platform_get_drvdata(pdev); 1382 + int ret; 1383 + 1384 + ret = clk_prepare_enable(tdma->dma_clk); 1385 + if (ret < 0) { 1386 + dev_err(dev, "clk_enable failed: %d\n", ret); 1387 + return ret; 1388 + } 1389 + return 0; 1390 + } 1391 + 1392 + static const struct dev_pm_ops tegra_dma_dev_pm_ops __devinitconst = { 1393 + #ifdef CONFIG_PM_RUNTIME 1394 + .runtime_suspend = tegra_dma_runtime_suspend, 1395 + .runtime_resume = tegra_dma_runtime_resume, 1396 + #endif 1397 + }; 1398 + 1399 + static struct platform_driver tegra_dmac_driver = { 1400 + .driver = { 1401 + .name = "tegra-apbdma", 1402 + .owner = THIS_MODULE, 1403 + .pm = &tegra_dma_dev_pm_ops, 1404 + .of_match_table = of_match_ptr(tegra_dma_of_match), 1405 + }, 1406 + .probe = tegra_dma_probe, 1407 + .remove = __devexit_p(tegra_dma_remove), 1408 + }; 1409 + 1410 + module_platform_driver(tegra_dmac_driver); 1411 + 1412 + MODULE_ALIAS("platform:tegra20-apbdma"); 1413 + MODULE_DESCRIPTION("NVIDIA Tegra APB DMA Controller driver"); 1414 + MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>"); 1415 + MODULE_LICENSE("GPL v2");
+48 -40
drivers/mmc/host/sh_mmcif.c
··· 213 213 struct mmc_host *mmc; 214 214 struct mmc_request *mrq; 215 215 struct platform_device *pd; 216 - struct sh_dmae_slave dma_slave_tx; 217 - struct sh_dmae_slave dma_slave_rx; 218 216 struct clk *hclk; 219 217 unsigned int clk; 220 218 int bus_width; ··· 371 373 desc, cookie); 372 374 } 373 375 374 - static bool sh_mmcif_filter(struct dma_chan *chan, void *arg) 375 - { 376 - dev_dbg(chan->device->dev, "%s: slave data %p\n", __func__, arg); 377 - chan->private = arg; 378 - return true; 379 - } 380 - 381 376 static void sh_mmcif_request_dma(struct sh_mmcif_host *host, 382 377 struct sh_mmcif_plat_data *pdata) 383 378 { 384 - struct sh_dmae_slave *tx, *rx; 379 + struct resource *res = platform_get_resource(host->pd, IORESOURCE_MEM, 0); 380 + struct dma_slave_config cfg; 381 + dma_cap_mask_t mask; 382 + int ret; 383 + 385 384 host->dma_active = false; 386 385 387 386 if (!pdata) 388 387 return; 389 388 389 + if (pdata->slave_id_tx <= 0 || pdata->slave_id_rx <= 0) 390 + return; 391 + 390 392 /* We can only either use DMA for both Tx and Rx or not use it at all */ 391 - if (pdata->dma) { 392 - dev_warn(&host->pd->dev, 393 - "Update your platform to use embedded DMA slave IDs\n"); 394 - tx = &pdata->dma->chan_priv_tx; 395 - rx = &pdata->dma->chan_priv_rx; 396 - } else { 397 - tx = &host->dma_slave_tx; 398 - tx->slave_id = pdata->slave_id_tx; 399 - rx = &host->dma_slave_rx; 400 - rx->slave_id = pdata->slave_id_rx; 401 - } 402 - if (tx->slave_id > 0 && rx->slave_id > 0) { 403 - dma_cap_mask_t mask; 393 + dma_cap_zero(mask); 394 + dma_cap_set(DMA_SLAVE, mask); 404 395 405 - dma_cap_zero(mask); 406 - dma_cap_set(DMA_SLAVE, mask); 396 + host->chan_tx = dma_request_channel(mask, shdma_chan_filter, 397 + (void *)pdata->slave_id_tx); 398 + dev_dbg(&host->pd->dev, "%s: TX: got channel %p\n", __func__, 399 + host->chan_tx); 407 400 408 - host->chan_tx = dma_request_channel(mask, sh_mmcif_filter, tx); 409 - dev_dbg(&host->pd->dev, "%s: TX: got channel %p\n", __func__, 410 - host->chan_tx); 401 + if (!host->chan_tx) 402 + return; 411 403 412 - if (!host->chan_tx) 413 - return; 404 + cfg.slave_id = pdata->slave_id_tx; 405 + cfg.direction = DMA_MEM_TO_DEV; 406 + cfg.dst_addr = res->start + MMCIF_CE_DATA; 407 + cfg.src_addr = 0; 408 + ret = dmaengine_slave_config(host->chan_tx, &cfg); 409 + if (ret < 0) 410 + goto ecfgtx; 414 411 415 - host->chan_rx = dma_request_channel(mask, sh_mmcif_filter, rx); 416 - dev_dbg(&host->pd->dev, "%s: RX: got channel %p\n", __func__, 417 - host->chan_rx); 412 + host->chan_rx = dma_request_channel(mask, shdma_chan_filter, 413 + (void *)pdata->slave_id_rx); 414 + dev_dbg(&host->pd->dev, "%s: RX: got channel %p\n", __func__, 415 + host->chan_rx); 418 416 419 - if (!host->chan_rx) { 420 - dma_release_channel(host->chan_tx); 421 - host->chan_tx = NULL; 422 - return; 423 - } 417 + if (!host->chan_rx) 418 + goto erqrx; 424 419 425 - init_completion(&host->dma_complete); 426 - } 420 + cfg.slave_id = pdata->slave_id_rx; 421 + cfg.direction = DMA_DEV_TO_MEM; 422 + cfg.dst_addr = 0; 423 + cfg.src_addr = res->start + MMCIF_CE_DATA; 424 + ret = dmaengine_slave_config(host->chan_rx, &cfg); 425 + if (ret < 0) 426 + goto ecfgrx; 427 + 428 + init_completion(&host->dma_complete); 429 + 430 + return; 431 + 432 + ecfgrx: 433 + dma_release_channel(host->chan_rx); 434 + host->chan_rx = NULL; 435 + erqrx: 436 + ecfgtx: 437 + dma_release_channel(host->chan_tx); 438 + host->chan_tx = NULL; 427 439 } 428 440 429 441 static void sh_mmcif_release_dma(struct sh_mmcif_host *host)
+4 -4
drivers/mmc/host/sh_mobile_sdhi.c
··· 169 169 mmc_data->get_cd = sh_mobile_sdhi_get_cd; 170 170 171 171 if (p->dma_slave_tx > 0 && p->dma_slave_rx > 0) { 172 - priv->param_tx.slave_id = p->dma_slave_tx; 173 - priv->param_rx.slave_id = p->dma_slave_rx; 174 - priv->dma_priv.chan_priv_tx = &priv->param_tx; 175 - priv->dma_priv.chan_priv_rx = &priv->param_rx; 172 + priv->param_tx.shdma_slave.slave_id = p->dma_slave_tx; 173 + priv->param_rx.shdma_slave.slave_id = p->dma_slave_rx; 174 + priv->dma_priv.chan_priv_tx = &priv->param_tx.shdma_slave; 175 + priv->dma_priv.chan_priv_rx = &priv->param_rx.shdma_slave; 176 176 priv->dma_priv.alignment_shift = 1; /* 2-byte alignment */ 177 177 mmc_data->dma = &priv->dma_priv; 178 178 }
+4 -4
drivers/tty/serial/sh-sci.c
··· 1615 1615 struct sh_dmae_slave *param = slave; 1616 1616 1617 1617 dev_dbg(chan->device->dev, "%s: slave ID %d\n", __func__, 1618 - param->slave_id); 1618 + param->shdma_slave.slave_id); 1619 1619 1620 - chan->private = param; 1620 + chan->private = &param->shdma_slave; 1621 1621 return true; 1622 1622 } 1623 1623 ··· 1656 1656 param = &s->param_tx; 1657 1657 1658 1658 /* Slave ID, e.g., SHDMA_SLAVE_SCIF0_TX */ 1659 - param->slave_id = s->cfg->dma_slave_tx; 1659 + param->shdma_slave.slave_id = s->cfg->dma_slave_tx; 1660 1660 1661 1661 s->cookie_tx = -EINVAL; 1662 1662 chan = dma_request_channel(mask, filter, param); ··· 1684 1684 param = &s->param_rx; 1685 1685 1686 1686 /* Slave ID, e.g., SHDMA_SLAVE_SCIF0_RX */ 1687 - param->slave_id = s->cfg->dma_slave_rx; 1687 + param->shdma_slave.slave_id = s->cfg->dma_slave_rx; 1688 1688 1689 1689 chan = dma_request_channel(mask, filter, param); 1690 1690 dev_dbg(port->dev, "%s: RX: got channel %p\n", __func__, chan);
+5 -5
drivers/usb/renesas_usbhs/fifo.c
··· 994 994 * 995 995 * usbhs doesn't recognize id = 0 as valid DMA 996 996 */ 997 - if (0 == slave->slave_id) 997 + if (0 == slave->shdma_slave.slave_id) 998 998 return false; 999 999 1000 1000 chan->private = slave; ··· 1173 1173 fifo->port = D0FIFO; 1174 1174 fifo->sel = D0FIFOSEL; 1175 1175 fifo->ctr = D0FIFOCTR; 1176 - fifo->tx_slave.slave_id = usbhs_get_dparam(priv, d0_tx_id); 1177 - fifo->rx_slave.slave_id = usbhs_get_dparam(priv, d0_rx_id); 1176 + fifo->tx_slave.shdma_slave.slave_id = usbhs_get_dparam(priv, d0_tx_id); 1177 + fifo->rx_slave.shdma_slave.slave_id = usbhs_get_dparam(priv, d0_rx_id); 1178 1178 1179 1179 /* D1FIFO */ 1180 1180 fifo = usbhsf_get_d1fifo(priv); ··· 1182 1182 fifo->port = D1FIFO; 1183 1183 fifo->sel = D1FIFOSEL; 1184 1184 fifo->ctr = D1FIFOCTR; 1185 - fifo->tx_slave.slave_id = usbhs_get_dparam(priv, d1_tx_id); 1186 - fifo->rx_slave.slave_id = usbhs_get_dparam(priv, d1_rx_id); 1185 + fifo->tx_slave.shdma_slave.slave_id = usbhs_get_dparam(priv, d1_tx_id); 1186 + fifo->rx_slave.shdma_slave.slave_id = usbhs_get_dparam(priv, d1_rx_id); 1187 1187 1188 1188 return 0; 1189 1189 }
+4
include/linux/dmaengine.h
··· 338 338 * @device_fc: Flow Controller Settings. Only valid for slave channels. Fill 339 339 * with 'true' if peripheral should be flow controller. Direction will be 340 340 * selected at Runtime. 341 + * @slave_id: Slave requester id. Only valid for slave channels. The dma 342 + * slave peripheral will have unique id as dma requester which need to be 343 + * pass as slave config. 341 344 * 342 345 * This struct is passed in as configuration data to a DMA engine 343 346 * in order to set up a certain channel for DMA transport at runtime. ··· 368 365 u32 src_maxburst; 369 366 u32 dst_maxburst; 370 367 bool device_fc; 368 + unsigned int slave_id; 371 369 }; 372 370 373 371 static inline const char *dma_chan_name(struct dma_chan *chan)
+1 -7
include/linux/mmc/sh_mmcif.h
··· 32 32 * 1111 : Peripheral clock (sup_pclk set '1') 33 33 */ 34 34 35 - struct sh_mmcif_dma { 36 - struct sh_dmae_slave chan_priv_tx; 37 - struct sh_dmae_slave chan_priv_rx; 38 - }; 39 - 40 35 struct sh_mmcif_plat_data { 41 36 void (*set_pwr)(struct platform_device *pdev, int state); 42 37 void (*down_pwr)(struct platform_device *pdev); 43 38 int (*get_cd)(struct platform_device *pdef); 44 - struct sh_mmcif_dma *dma; /* Deprecated. Instead */ 45 - unsigned int slave_id_tx; /* use embedded slave_id_[tr]x */ 39 + unsigned int slave_id_tx; /* embedded slave_id_[tr]x */ 46 40 unsigned int slave_id_rx; 47 41 bool use_cd_gpio : 1; 48 42 unsigned int cd_gpio;
+16 -25
include/linux/sh_dma.h
··· 10 10 #ifndef SH_DMA_H 11 11 #define SH_DMA_H 12 12 13 - #include <linux/list.h> 14 13 #include <linux/dmaengine.h> 14 + #include <linux/list.h> 15 + #include <linux/shdma-base.h> 16 + #include <linux/types.h> 17 + 18 + struct device; 15 19 16 20 /* Used by slave DMA clients to request DMA to/from a specific peripheral */ 17 21 struct sh_dmae_slave { 18 - unsigned int slave_id; /* Set by the platform */ 19 - struct device *dma_dev; /* Set by the platform */ 20 - const struct sh_dmae_slave_config *config; /* Set by the driver */ 22 + struct shdma_slave shdma_slave; /* Set by the platform */ 21 23 }; 22 24 23 - struct sh_dmae_regs { 24 - u32 sar; /* SAR / source address */ 25 - u32 dar; /* DAR / destination address */ 26 - u32 tcr; /* TCR / transfer count */ 27 - }; 28 - 29 - struct sh_desc { 30 - struct sh_dmae_regs hw; 31 - struct list_head node; 32 - struct dma_async_tx_descriptor async_tx; 33 - enum dma_transfer_direction direction; 34 - dma_cookie_t cookie; 35 - size_t partial; 36 - int chunks; 37 - int mark; 38 - }; 39 - 25 + /* 26 + * Supplied by platforms to specify, how a DMA channel has to be configured for 27 + * a certain peripheral 28 + */ 40 29 struct sh_dmae_slave_config { 41 - unsigned int slave_id; 42 - dma_addr_t addr; 43 - u32 chcr; 44 - char mid_rid; 30 + int slave_id; 31 + dma_addr_t addr; 32 + u32 chcr; 33 + char mid_rid; 45 34 }; 46 35 47 36 struct sh_dmae_channel { ··· 98 109 #define CHCR_DE 0x00000001 99 110 #define CHCR_TE 0x00000002 100 111 #define CHCR_IE 0x00000004 112 + 113 + bool shdma_chan_filter(struct dma_chan *chan, void *arg); 101 114 102 115 #endif
+124
include/linux/shdma-base.h
··· 1 + /* 2 + * Dmaengine driver base library for DMA controllers, found on SH-based SoCs 3 + * 4 + * extracted from shdma.c and headers 5 + * 6 + * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de> 7 + * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com> 8 + * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved. 9 + * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. 10 + * 11 + * This is free software; you can redistribute it and/or modify 12 + * it under the terms of version 2 of the GNU General Public License as 13 + * published by the Free Software Foundation. 14 + */ 15 + 16 + #ifndef SHDMA_BASE_H 17 + #define SHDMA_BASE_H 18 + 19 + #include <linux/dmaengine.h> 20 + #include <linux/interrupt.h> 21 + #include <linux/list.h> 22 + #include <linux/types.h> 23 + 24 + /** 25 + * shdma_pm_state - DMA channel PM state 26 + * SHDMA_PM_ESTABLISHED: either idle or during data transfer 27 + * SHDMA_PM_BUSY: during the transfer preparation, when we have to 28 + * drop the lock temporarily 29 + * SHDMA_PM_PENDING: transfers pending 30 + */ 31 + enum shdma_pm_state { 32 + SHDMA_PM_ESTABLISHED, 33 + SHDMA_PM_BUSY, 34 + SHDMA_PM_PENDING, 35 + }; 36 + 37 + struct device; 38 + 39 + /* 40 + * Drivers, using this library are expected to embed struct shdma_dev, 41 + * struct shdma_chan, struct shdma_desc, and struct shdma_slave 42 + * in their respective device, channel, descriptor and slave objects. 43 + */ 44 + 45 + struct shdma_slave { 46 + int slave_id; 47 + }; 48 + 49 + struct shdma_desc { 50 + struct list_head node; 51 + struct dma_async_tx_descriptor async_tx; 52 + enum dma_transfer_direction direction; 53 + dma_cookie_t cookie; 54 + int chunks; 55 + int mark; 56 + }; 57 + 58 + struct shdma_chan { 59 + spinlock_t chan_lock; /* Channel operation lock */ 60 + struct list_head ld_queue; /* Link descriptors queue */ 61 + struct list_head ld_free; /* Free link descriptors */ 62 + struct dma_chan dma_chan; /* DMA channel */ 63 + struct device *dev; /* Channel device */ 64 + void *desc; /* buffer for descriptor array */ 65 + int desc_num; /* desc count */ 66 + size_t max_xfer_len; /* max transfer length */ 67 + int id; /* Raw id of this channel */ 68 + int irq; /* Channel IRQ */ 69 + int slave_id; /* Client ID for slave DMA */ 70 + enum shdma_pm_state pm_state; 71 + }; 72 + 73 + /** 74 + * struct shdma_ops - simple DMA driver operations 75 + * desc_completed: return true, if this is the descriptor, that just has 76 + * completed (atomic) 77 + * halt_channel: stop DMA channel operation (atomic) 78 + * channel_busy: return true, if the channel is busy (atomic) 79 + * slave_addr: return slave DMA address 80 + * desc_setup: set up the hardware specific descriptor portion (atomic) 81 + * set_slave: bind channel to a slave 82 + * setup_xfer: configure channel hardware for operation (atomic) 83 + * start_xfer: start the DMA transfer (atomic) 84 + * embedded_desc: return Nth struct shdma_desc pointer from the 85 + * descriptor array 86 + * chan_irq: process channel IRQ, return true if a transfer has 87 + * completed (atomic) 88 + */ 89 + struct shdma_ops { 90 + bool (*desc_completed)(struct shdma_chan *, struct shdma_desc *); 91 + void (*halt_channel)(struct shdma_chan *); 92 + bool (*channel_busy)(struct shdma_chan *); 93 + dma_addr_t (*slave_addr)(struct shdma_chan *); 94 + int (*desc_setup)(struct shdma_chan *, struct shdma_desc *, 95 + dma_addr_t, dma_addr_t, size_t *); 96 + int (*set_slave)(struct shdma_chan *, int, bool); 97 + void (*setup_xfer)(struct shdma_chan *, int); 98 + void (*start_xfer)(struct shdma_chan *, struct shdma_desc *); 99 + struct shdma_desc *(*embedded_desc)(void *, int); 100 + bool (*chan_irq)(struct shdma_chan *, int); 101 + }; 102 + 103 + struct shdma_dev { 104 + struct dma_device dma_dev; 105 + struct shdma_chan **schan; 106 + const struct shdma_ops *ops; 107 + size_t desc_size; 108 + }; 109 + 110 + #define shdma_for_each_chan(c, d, i) for (i = 0, c = (d)->schan[0]; \ 111 + i < (d)->dma_dev.chancnt; c = (d)->schan[++i]) 112 + 113 + int shdma_request_irq(struct shdma_chan *, int, 114 + unsigned long, const char *); 115 + void shdma_free_irq(struct shdma_chan *); 116 + bool shdma_reset(struct shdma_dev *sdev); 117 + void shdma_chan_probe(struct shdma_dev *sdev, 118 + struct shdma_chan *schan, int id); 119 + void shdma_chan_remove(struct shdma_chan *schan); 120 + int shdma_init(struct device *dev, struct shdma_dev *sdev, 121 + int chan_num); 122 + void shdma_cleanup(struct shdma_dev *sdev); 123 + 124 + #endif
+2 -2
sound/soc/sh/fsi.c
··· 1631 1631 fsi->capture.priv = fsi; 1632 1632 1633 1633 if (fsi->info->tx_id) { 1634 - fsi->playback.slave.slave_id = fsi->info->tx_id; 1635 - fsi->playback.handler = &fsi_dma_push_handler; 1634 + fsi->playback.slave.shdma_slave.slave_id = fsi->info->tx_id; 1635 + fsi->playback.handler = &fsi_dma_push_handler; 1636 1636 } 1637 1637 } 1638 1638
+4 -8
sound/soc/sh/siu_pcm.c
··· 330 330 { 331 331 struct sh_dmae_slave *param = slave; 332 332 333 - pr_debug("%s: slave ID %d\n", __func__, param->slave_id); 333 + pr_debug("%s: slave ID %d\n", __func__, param->shdma_slave.slave_id); 334 334 335 - if (unlikely(param->dma_dev != chan->device->dev)) 336 - return false; 337 - 338 - chan->private = param; 335 + chan->private = &param->shdma_slave; 339 336 return true; 340 337 } 341 338 ··· 357 360 if (ss->stream == SNDRV_PCM_STREAM_PLAYBACK) { 358 361 siu_stream = &port_info->playback; 359 362 param = &siu_stream->param; 360 - param->slave_id = port ? pdata->dma_slave_tx_b : 363 + param->shdma_slave.slave_id = port ? pdata->dma_slave_tx_b : 361 364 pdata->dma_slave_tx_a; 362 365 } else { 363 366 siu_stream = &port_info->capture; 364 367 param = &siu_stream->param; 365 - param->slave_id = port ? pdata->dma_slave_rx_b : 368 + param->shdma_slave.slave_id = port ? pdata->dma_slave_rx_b : 366 369 pdata->dma_slave_rx_a; 367 370 } 368 371 369 - param->dma_dev = pdata->dma_dev; 370 372 /* Get DMA channel */ 371 373 siu_stream->chan = dma_request_channel(mask, filter, param); 372 374 if (!siu_stream->chan) {