Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'next' of git://git.infradead.org/users/vkoul/slave-dma

Pull slave-dmaengine updates from Vinod Koul:
"This time we have Andy updates on dw_dmac which is attempting to make
this IP block available as PCI and platform device though not fully
complete this time.

We also have TI EDMA moving the dma driver to use dmaengine APIs, also
have a new driver for mmp-tdma, along with bunch of small updates.

Now for your excitement the merge is little unusual here, while
merging the auto merge on linux-next picks wrong choice for pl330
(drivers/dma/pl330.c) and this causes build failure. The correct
resolution is in linux-next. (DMA: PL330: Fix build error) I didn't
back merge your tree this time as you are better than me so no point
in doing that for me :)"

Fixed the pl330 conflict as in linux-next, along with trivial header
file conflicts due to changed includes.

* 'next' of git://git.infradead.org/users/vkoul/slave-dma: (29 commits)
dma: tegra: fix interrupt name issue with apb dma.
dw_dmac: fix a regression in dwc_prep_dma_memcpy
dw_dmac: introduce software emulation of LLP transfers
dw_dmac: autoconfigure data_width or get it via platform data
dw_dmac: autoconfigure block_size or use platform data
dw_dmac: get number of channels from hardware if possible
dw_dmac: fill optional encoded parameters in register structure
dw_dmac: mark dwc_dump_chan_regs as inline
DMA: PL330: return ENOMEM instead of 0 from pl330_alloc_chan_resources
DMA: PL330: Remove redundant runtime_suspend/resume functions
DMA: PL330: Remove controller clock enable/disable
dmaengine: use kmem_cache_zalloc instead of kmem_cache_alloc/memset
DMA: PL330: Set the capability of pdm0 and pdm1 as DMA_PRIVATE
ARM: EXYNOS: Set the capability of pdm0 and pdm1 as DMA_PRIVATE
dma: tegra: use list_move_tail instead of list_del/list_add_tail
mxs/dma: Enlarge the CCW descriptor area to 4 pages
dw_dmac: utilize slave_id to pass request line
dmaengine: mmp_tdma: add dt support
dmaengine: mmp-pdma support
spi: davici - make davinci select edma
...

+2088 -364
+2
arch/arm/mach-exynos/dma.c
··· 303 303 304 304 dma_cap_set(DMA_SLAVE, exynos_pdma0_pdata.cap_mask); 305 305 dma_cap_set(DMA_CYCLIC, exynos_pdma0_pdata.cap_mask); 306 + dma_cap_set(DMA_PRIVATE, exynos_pdma0_pdata.cap_mask); 306 307 amba_device_register(&exynos_pdma0_device, &iomem_resource); 307 308 308 309 dma_cap_set(DMA_SLAVE, exynos_pdma1_pdata.cap_mask); 309 310 dma_cap_set(DMA_CYCLIC, exynos_pdma1_pdata.cap_mask); 311 + dma_cap_set(DMA_PRIVATE, exynos_pdma1_pdata.cap_mask); 310 312 amba_device_register(&exynos_pdma1_device, &iomem_resource); 311 313 312 314 dma_cap_set(DMA_MEMCPY, exynos_mdma1_pdata.cap_mask);
+3
arch/arm/mach-spear13xx/spear13xx.c
··· 78 78 .nr_channels = 8, 79 79 .chan_allocation_order = CHAN_ALLOCATION_DESCENDING, 80 80 .chan_priority = CHAN_PRIORITY_DESCENDING, 81 + .block_size = 4095U, 82 + .nr_masters = 2, 83 + .data_width = { 3, 3, 0, 0 }, 81 84 }; 82 85 83 86 void __init spear13xx_l2x0_init(void)
+3
arch/avr32/mach-at32ap/at32ap700x.c
··· 605 605 606 606 static struct dw_dma_platform_data dw_dmac0_data = { 607 607 .nr_channels = 3, 608 + .block_size = 4095U, 609 + .nr_masters = 2, 610 + .data_width = { 2, 2, 0, 0 }, 608 611 }; 609 612 610 613 static struct resource dw_dmac0_resource[] = {
+17
drivers/dma/Kconfig
··· 208 208 help 209 209 Enable support for the CSR SiRFprimaII DMA engine. 210 210 211 + config TI_EDMA 212 + tristate "TI EDMA support" 213 + depends on ARCH_DAVINCI 214 + select DMA_ENGINE 215 + select DMA_VIRTUAL_CHANNELS 216 + default n 217 + help 218 + Enable support for the TI EDMA controller. This DMA 219 + engine is found on TI DaVinci and AM33xx parts. 220 + 211 221 config ARCH_HAS_ASYNC_TX_FIND_CHANNEL 212 222 bool 213 223 ··· 301 291 depends on ARCH_OMAP 302 292 select DMA_ENGINE 303 293 select DMA_VIRTUAL_CHANNELS 294 + 295 + config MMP_PDMA 296 + bool "MMP PDMA support" 297 + depends on (ARCH_MMP || ARCH_PXA) 298 + select DMA_ENGINE 299 + help 300 + Support the MMP PDMA engine for PXA and MMP platfrom. 304 301 305 302 config DMA_ENGINE 306 303 bool
+2
drivers/dma/Makefile
··· 23 23 obj-$(CONFIG_MXS_DMA) += mxs-dma.o 24 24 obj-$(CONFIG_TIMB_DMA) += timb_dma.o 25 25 obj-$(CONFIG_SIRF_DMA) += sirf-dma.o 26 + obj-$(CONFIG_TI_EDMA) += edma.o 26 27 obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o 27 28 obj-$(CONFIG_TEGRA20_APB_DMA) += tegra20-apb-dma.o 28 29 obj-$(CONFIG_PL330_DMA) += pl330.o ··· 33 32 obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o 34 33 obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o 35 34 obj-$(CONFIG_DMA_OMAP) += omap-dma.o 35 + obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o
+2
drivers/dma/amba-pl08x.c
··· 1892 1892 pl08x->pd = dev_get_platdata(&adev->dev); 1893 1893 if (!pl08x->pd) { 1894 1894 dev_err(&adev->dev, "no platform data supplied\n"); 1895 + ret = -EINVAL; 1895 1896 goto out_no_platdata; 1896 1897 } 1897 1898 ··· 1944 1943 dev_err(&adev->dev, "%s failed to allocate " 1945 1944 "physical channel holders\n", 1946 1945 __func__); 1946 + ret = -ENOMEM; 1947 1947 goto out_no_phychans; 1948 1948 } 1949 1949
+189 -73
drivers/dma/dw_dmac.c
··· 36 36 * which does not support descriptor writeback. 37 37 */ 38 38 39 + static inline unsigned int dwc_get_dms(struct dw_dma_slave *slave) 40 + { 41 + return slave ? slave->dst_master : 0; 42 + } 43 + 44 + static inline unsigned int dwc_get_sms(struct dw_dma_slave *slave) 45 + { 46 + return slave ? slave->src_master : 1; 47 + } 48 + 39 49 #define DWC_DEFAULT_CTLLO(_chan) ({ \ 40 50 struct dw_dma_slave *__slave = (_chan->private); \ 41 51 struct dw_dma_chan *_dwc = to_dw_dma_chan(_chan); \ 42 52 struct dma_slave_config *_sconfig = &_dwc->dma_sconfig; \ 43 - int _dms = __slave ? __slave->dst_master : 0; \ 44 - int _sms = __slave ? __slave->src_master : 1; \ 53 + int _dms = dwc_get_dms(__slave); \ 54 + int _sms = dwc_get_sms(__slave); \ 45 55 u8 _smsize = __slave ? _sconfig->src_maxburst : \ 46 56 DW_DMA_MSIZE_16; \ 47 57 u8 _dmsize = __slave ? _sconfig->dst_maxburst : \ ··· 64 54 | DWC_CTLL_DMS(_dms) \ 65 55 | DWC_CTLL_SMS(_sms)); \ 66 56 }) 67 - 68 - /* 69 - * This is configuration-dependent and usually a funny size like 4095. 70 - * 71 - * Note that this is a transfer count, i.e. if we transfer 32-bit 72 - * words, we can do 16380 bytes per descriptor. 73 - * 74 - * This parameter is also system-specific. 75 - */ 76 - #define DWC_MAX_COUNT 4095U 77 57 78 58 /* 79 59 * Number of descriptors to allocate for each channel. This should be ··· 177 177 178 178 cfghi = dws->cfg_hi; 179 179 cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK; 180 + } else { 181 + if (dwc->dma_sconfig.direction == DMA_MEM_TO_DEV) 182 + cfghi = DWC_CFGH_DST_PER(dwc->dma_sconfig.slave_id); 183 + else if (dwc->dma_sconfig.direction == DMA_DEV_TO_MEM) 184 + cfghi = DWC_CFGH_SRC_PER(dwc->dma_sconfig.slave_id); 180 185 } 181 186 182 187 channel_writel(dwc, CFG_LO, cfglo); ··· 211 206 return 0; 212 207 } 213 208 214 - static void dwc_dump_chan_regs(struct dw_dma_chan *dwc) 209 + static inline void dwc_dump_chan_regs(struct dw_dma_chan *dwc) 215 210 { 216 211 dev_err(chan2dev(&dwc->chan), 217 212 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", ··· 232 227 233 228 /*----------------------------------------------------------------------*/ 234 229 230 + /* Perform single block transfer */ 231 + static inline void dwc_do_single_block(struct dw_dma_chan *dwc, 232 + struct dw_desc *desc) 233 + { 234 + struct dw_dma *dw = to_dw_dma(dwc->chan.device); 235 + u32 ctllo; 236 + 237 + /* Software emulation of LLP mode relies on interrupts to continue 238 + * multi block transfer. */ 239 + ctllo = desc->lli.ctllo | DWC_CTLL_INT_EN; 240 + 241 + channel_writel(dwc, SAR, desc->lli.sar); 242 + channel_writel(dwc, DAR, desc->lli.dar); 243 + channel_writel(dwc, CTL_LO, ctllo); 244 + channel_writel(dwc, CTL_HI, desc->lli.ctlhi); 245 + channel_set_bit(dw, CH_EN, dwc->mask); 246 + } 247 + 235 248 /* Called with dwc->lock held and bh disabled */ 236 249 static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) 237 250 { 238 251 struct dw_dma *dw = to_dw_dma(dwc->chan.device); 252 + unsigned long was_soft_llp; 239 253 240 254 /* ASSERT: channel is idle */ 241 255 if (dma_readl(dw, CH_EN) & dwc->mask) { ··· 263 239 dwc_dump_chan_regs(dwc); 264 240 265 241 /* The tasklet will hopefully advance the queue... */ 242 + return; 243 + } 244 + 245 + if (dwc->nollp) { 246 + was_soft_llp = test_and_set_bit(DW_DMA_IS_SOFT_LLP, 247 + &dwc->flags); 248 + if (was_soft_llp) { 249 + dev_err(chan2dev(&dwc->chan), 250 + "BUG: Attempted to start new LLP transfer " 251 + "inside ongoing one\n"); 252 + return; 253 + } 254 + 255 + dwc_initialize(dwc); 256 + 257 + dwc->tx_list = &first->tx_list; 258 + dwc->tx_node_active = first->tx_list.next; 259 + 260 + dwc_do_single_block(dwc, first); 261 + 266 262 return; 267 263 } 268 264 ··· 597 553 dwc_handle_cyclic(dw, dwc, status_err, status_xfer); 598 554 else if (status_err & (1 << i)) 599 555 dwc_handle_error(dw, dwc); 600 - else if (status_xfer & (1 << i)) 556 + else if (status_xfer & (1 << i)) { 557 + unsigned long flags; 558 + 559 + spin_lock_irqsave(&dwc->lock, flags); 560 + if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) { 561 + if (dwc->tx_node_active != dwc->tx_list) { 562 + struct dw_desc *desc = 563 + list_entry(dwc->tx_node_active, 564 + struct dw_desc, 565 + desc_node); 566 + 567 + dma_writel(dw, CLEAR.XFER, dwc->mask); 568 + 569 + /* move pointer to next descriptor */ 570 + dwc->tx_node_active = 571 + dwc->tx_node_active->next; 572 + 573 + dwc_do_single_block(dwc, desc); 574 + 575 + spin_unlock_irqrestore(&dwc->lock, flags); 576 + continue; 577 + } else { 578 + /* we are done here */ 579 + clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags); 580 + } 581 + } 582 + spin_unlock_irqrestore(&dwc->lock, flags); 583 + 601 584 dwc_scan_descriptors(dw, dwc); 585 + } 602 586 } 603 587 604 588 /* ··· 708 636 size_t len, unsigned long flags) 709 637 { 710 638 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 639 + struct dw_dma_slave *dws = chan->private; 711 640 struct dw_desc *desc; 712 641 struct dw_desc *first; 713 642 struct dw_desc *prev; ··· 716 643 size_t offset; 717 644 unsigned int src_width; 718 645 unsigned int dst_width; 646 + unsigned int data_width; 719 647 u32 ctllo; 720 648 721 649 dev_vdbg(chan2dev(chan), ··· 729 655 return NULL; 730 656 } 731 657 732 - src_width = dst_width = dwc_fast_fls(src | dest | len); 658 + data_width = min_t(unsigned int, dwc->dw->data_width[dwc_get_sms(dws)], 659 + dwc->dw->data_width[dwc_get_dms(dws)]); 660 + 661 + src_width = dst_width = min_t(unsigned int, data_width, 662 + dwc_fast_fls(src | dest | len)); 733 663 734 664 ctllo = DWC_DEFAULT_CTLLO(chan) 735 665 | DWC_CTLL_DST_WIDTH(dst_width) ··· 745 667 746 668 for (offset = 0; offset < len; offset += xfer_count << src_width) { 747 669 xfer_count = min_t(size_t, (len - offset) >> src_width, 748 - DWC_MAX_COUNT); 670 + dwc->block_size); 749 671 750 672 desc = dwc_desc_get(dwc); 751 673 if (!desc) ··· 803 725 dma_addr_t reg; 804 726 unsigned int reg_width; 805 727 unsigned int mem_width; 728 + unsigned int data_width; 806 729 unsigned int i; 807 730 struct scatterlist *sg; 808 731 size_t total_len = 0; ··· 827 748 ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) : 828 749 DWC_CTLL_FC(DW_DMA_FC_D_M2P); 829 750 751 + data_width = dwc->dw->data_width[dwc_get_sms(dws)]; 752 + 830 753 for_each_sg(sgl, sg, sg_len, i) { 831 754 struct dw_desc *desc; 832 755 u32 len, dlen, mem; ··· 836 755 mem = sg_dma_address(sg); 837 756 len = sg_dma_len(sg); 838 757 839 - mem_width = dwc_fast_fls(mem | len); 758 + mem_width = min_t(unsigned int, 759 + data_width, dwc_fast_fls(mem | len)); 840 760 841 761 slave_sg_todev_fill_desc: 842 762 desc = dwc_desc_get(dwc); ··· 850 768 desc->lli.sar = mem; 851 769 desc->lli.dar = reg; 852 770 desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width); 853 - if ((len >> mem_width) > DWC_MAX_COUNT) { 854 - dlen = DWC_MAX_COUNT << mem_width; 771 + if ((len >> mem_width) > dwc->block_size) { 772 + dlen = dwc->block_size << mem_width; 855 773 mem += dlen; 856 774 len -= dlen; 857 775 } else { ··· 890 808 ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) : 891 809 DWC_CTLL_FC(DW_DMA_FC_D_P2M); 892 810 811 + data_width = dwc->dw->data_width[dwc_get_dms(dws)]; 812 + 893 813 for_each_sg(sgl, sg, sg_len, i) { 894 814 struct dw_desc *desc; 895 815 u32 len, dlen, mem; ··· 899 815 mem = sg_dma_address(sg); 900 816 len = sg_dma_len(sg); 901 817 902 - mem_width = dwc_fast_fls(mem | len); 818 + mem_width = min_t(unsigned int, 819 + data_width, dwc_fast_fls(mem | len)); 903 820 904 821 slave_sg_fromdev_fill_desc: 905 822 desc = dwc_desc_get(dwc); ··· 913 828 desc->lli.sar = reg; 914 829 desc->lli.dar = mem; 915 830 desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width); 916 - if ((len >> reg_width) > DWC_MAX_COUNT) { 917 - dlen = DWC_MAX_COUNT << reg_width; 831 + if ((len >> reg_width) > dwc->block_size) { 832 + dlen = dwc->block_size << reg_width; 918 833 mem += dlen; 919 834 len -= dlen; 920 835 } else { ··· 1029 944 spin_unlock_irqrestore(&dwc->lock, flags); 1030 945 } else if (cmd == DMA_TERMINATE_ALL) { 1031 946 spin_lock_irqsave(&dwc->lock, flags); 947 + 948 + clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags); 1032 949 1033 950 dwc_chan_disable(dw, dwc); 1034 951 ··· 1274 1187 unsigned long flags; 1275 1188 1276 1189 spin_lock_irqsave(&dwc->lock, flags); 1190 + if (dwc->nollp) { 1191 + spin_unlock_irqrestore(&dwc->lock, flags); 1192 + dev_dbg(chan2dev(&dwc->chan), 1193 + "channel doesn't support LLP transfers\n"); 1194 + return ERR_PTR(-EINVAL); 1195 + } 1196 + 1277 1197 if (!list_empty(&dwc->queue) || !list_empty(&dwc->active_list)) { 1278 1198 spin_unlock_irqrestore(&dwc->lock, flags); 1279 1199 dev_dbg(chan2dev(&dwc->chan), ··· 1306 1212 periods = buf_len / period_len; 1307 1213 1308 1214 /* Check for too big/unaligned periods and unaligned DMA buffer. */ 1309 - if (period_len > (DWC_MAX_COUNT << reg_width)) 1215 + if (period_len > (dwc->block_size << reg_width)) 1310 1216 goto out_err; 1311 1217 if (unlikely(period_len & ((1 << reg_width) - 1))) 1312 1218 goto out_err; ··· 1468 1374 struct resource *io; 1469 1375 struct dw_dma *dw; 1470 1376 size_t size; 1377 + void __iomem *regs; 1378 + bool autocfg; 1379 + unsigned int dw_params; 1380 + unsigned int nr_channels; 1381 + unsigned int max_blk_size = 0; 1471 1382 int irq; 1472 1383 int err; 1473 1384 int i; ··· 1489 1390 if (irq < 0) 1490 1391 return irq; 1491 1392 1492 - size = sizeof(struct dw_dma); 1493 - size += pdata->nr_channels * sizeof(struct dw_dma_chan); 1494 - dw = kzalloc(size, GFP_KERNEL); 1393 + regs = devm_request_and_ioremap(&pdev->dev, io); 1394 + if (!regs) 1395 + return -EBUSY; 1396 + 1397 + dw_params = dma_read_byaddr(regs, DW_PARAMS); 1398 + autocfg = dw_params >> DW_PARAMS_EN & 0x1; 1399 + 1400 + if (autocfg) 1401 + nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 0x7) + 1; 1402 + else 1403 + nr_channels = pdata->nr_channels; 1404 + 1405 + size = sizeof(struct dw_dma) + nr_channels * sizeof(struct dw_dma_chan); 1406 + dw = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); 1495 1407 if (!dw) 1496 1408 return -ENOMEM; 1497 1409 1498 - if (!request_mem_region(io->start, DW_REGLEN, pdev->dev.driver->name)) { 1499 - err = -EBUSY; 1500 - goto err_kfree; 1501 - } 1502 - 1503 - dw->regs = ioremap(io->start, DW_REGLEN); 1504 - if (!dw->regs) { 1505 - err = -ENOMEM; 1506 - goto err_release_r; 1507 - } 1508 - 1509 - dw->clk = clk_get(&pdev->dev, "hclk"); 1510 - if (IS_ERR(dw->clk)) { 1511 - err = PTR_ERR(dw->clk); 1512 - goto err_clk; 1513 - } 1410 + dw->clk = devm_clk_get(&pdev->dev, "hclk"); 1411 + if (IS_ERR(dw->clk)) 1412 + return PTR_ERR(dw->clk); 1514 1413 clk_prepare_enable(dw->clk); 1515 1414 1415 + dw->regs = regs; 1416 + 1417 + /* get hardware configuration parameters */ 1418 + if (autocfg) { 1419 + max_blk_size = dma_readl(dw, MAX_BLK_SIZE); 1420 + 1421 + dw->nr_masters = (dw_params >> DW_PARAMS_NR_MASTER & 3) + 1; 1422 + for (i = 0; i < dw->nr_masters; i++) { 1423 + dw->data_width[i] = 1424 + (dw_params >> DW_PARAMS_DATA_WIDTH(i) & 3) + 2; 1425 + } 1426 + } else { 1427 + dw->nr_masters = pdata->nr_masters; 1428 + memcpy(dw->data_width, pdata->data_width, 4); 1429 + } 1430 + 1516 1431 /* Calculate all channel mask before DMA setup */ 1517 - dw->all_chan_mask = (1 << pdata->nr_channels) - 1; 1432 + dw->all_chan_mask = (1 << nr_channels) - 1; 1518 1433 1519 1434 /* force dma off, just in case */ 1520 1435 dw_dma_off(dw); ··· 1536 1423 /* disable BLOCK interrupts as well */ 1537 1424 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); 1538 1425 1539 - err = request_irq(irq, dw_dma_interrupt, 0, "dw_dmac", dw); 1426 + err = devm_request_irq(&pdev->dev, irq, dw_dma_interrupt, 0, 1427 + "dw_dmac", dw); 1540 1428 if (err) 1541 - goto err_irq; 1429 + return err; 1542 1430 1543 1431 platform_set_drvdata(pdev, dw); 1544 1432 1545 1433 tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw); 1546 1434 1547 1435 INIT_LIST_HEAD(&dw->dma.channels); 1548 - for (i = 0; i < pdata->nr_channels; i++) { 1436 + for (i = 0; i < nr_channels; i++) { 1549 1437 struct dw_dma_chan *dwc = &dw->chan[i]; 1438 + int r = nr_channels - i - 1; 1550 1439 1551 1440 dwc->chan.device = &dw->dma; 1552 1441 dma_cookie_init(&dwc->chan); ··· 1560 1445 1561 1446 /* 7 is highest priority & 0 is lowest. */ 1562 1447 if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING) 1563 - dwc->priority = pdata->nr_channels - i - 1; 1448 + dwc->priority = r; 1564 1449 else 1565 1450 dwc->priority = i; 1566 1451 ··· 1573 1458 INIT_LIST_HEAD(&dwc->free_list); 1574 1459 1575 1460 channel_clear_bit(dw, CH_EN, dwc->mask); 1461 + 1462 + dwc->dw = dw; 1463 + 1464 + /* hardware configuration */ 1465 + if (autocfg) { 1466 + unsigned int dwc_params; 1467 + 1468 + dwc_params = dma_read_byaddr(regs + r * sizeof(u32), 1469 + DWC_PARAMS); 1470 + 1471 + /* Decode maximum block size for given channel. The 1472 + * stored 4 bit value represents blocks from 0x00 for 3 1473 + * up to 0x0a for 4095. */ 1474 + dwc->block_size = 1475 + (4 << ((max_blk_size >> 4 * i) & 0xf)) - 1; 1476 + dwc->nollp = 1477 + (dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0; 1478 + } else { 1479 + dwc->block_size = pdata->block_size; 1480 + 1481 + /* Check if channel supports multi block transfer */ 1482 + channel_writel(dwc, LLP, 0xfffffffc); 1483 + dwc->nollp = 1484 + (channel_readl(dwc, LLP) & 0xfffffffc) == 0; 1485 + channel_writel(dwc, LLP, 0); 1486 + } 1576 1487 } 1577 1488 1578 1489 /* Clear all interrupts on all channels. */ ··· 1627 1486 dma_writel(dw, CFG, DW_CFG_DMA_EN); 1628 1487 1629 1488 printk(KERN_INFO "%s: DesignWare DMA Controller, %d channels\n", 1630 - dev_name(&pdev->dev), pdata->nr_channels); 1489 + dev_name(&pdev->dev), nr_channels); 1631 1490 1632 1491 dma_async_device_register(&dw->dma); 1633 1492 1634 1493 return 0; 1635 - 1636 - err_irq: 1637 - clk_disable_unprepare(dw->clk); 1638 - clk_put(dw->clk); 1639 - err_clk: 1640 - iounmap(dw->regs); 1641 - dw->regs = NULL; 1642 - err_release_r: 1643 - release_resource(io); 1644 - err_kfree: 1645 - kfree(dw); 1646 - return err; 1647 1494 } 1648 1495 1649 1496 static int __devexit dw_remove(struct platform_device *pdev) 1650 1497 { 1651 1498 struct dw_dma *dw = platform_get_drvdata(pdev); 1652 1499 struct dw_dma_chan *dwc, *_dwc; 1653 - struct resource *io; 1654 1500 1655 1501 dw_dma_off(dw); 1656 1502 dma_async_device_unregister(&dw->dma); 1657 1503 1658 - free_irq(platform_get_irq(pdev, 0), dw); 1659 1504 tasklet_kill(&dw->tasklet); 1660 1505 1661 1506 list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels, ··· 1649 1522 list_del(&dwc->chan.device_node); 1650 1523 channel_clear_bit(dw, CH_EN, dwc->mask); 1651 1524 } 1652 - 1653 - clk_disable_unprepare(dw->clk); 1654 - clk_put(dw->clk); 1655 - 1656 - iounmap(dw->regs); 1657 - dw->regs = NULL; 1658 - 1659 - io = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1660 - release_mem_region(io->start, DW_REGLEN); 1661 - 1662 - kfree(dw); 1663 1525 1664 1526 return 0; 1665 1527 }
+46 -2
drivers/dma/dw_dmac_regs.h
··· 82 82 DW_REG(ID); 83 83 DW_REG(TEST); 84 84 85 + /* reserved */ 86 + DW_REG(__reserved0); 87 + DW_REG(__reserved1); 88 + 85 89 /* optional encoded params, 0x3c8..0x3f7 */ 90 + u32 __reserved; 91 + 92 + /* per-channel configuration registers */ 93 + u32 DWC_PARAMS[DW_DMA_MAX_NR_CHANNELS]; 94 + u32 MULTI_BLK_TYPE; 95 + u32 MAX_BLK_SIZE; 96 + 97 + /* top-level parameters */ 98 + u32 DW_PARAMS; 86 99 }; 100 + 101 + /* To access the registers in early stage of probe */ 102 + #define dma_read_byaddr(addr, name) \ 103 + readl((addr) + offsetof(struct dw_dma_regs, name)) 104 + 105 + /* Bitfields in DW_PARAMS */ 106 + #define DW_PARAMS_NR_CHAN 8 /* number of channels */ 107 + #define DW_PARAMS_NR_MASTER 11 /* number of AHB masters */ 108 + #define DW_PARAMS_DATA_WIDTH(n) (15 + 2 * (n)) 109 + #define DW_PARAMS_DATA_WIDTH1 15 /* master 1 data width */ 110 + #define DW_PARAMS_DATA_WIDTH2 17 /* master 2 data width */ 111 + #define DW_PARAMS_DATA_WIDTH3 19 /* master 3 data width */ 112 + #define DW_PARAMS_DATA_WIDTH4 21 /* master 4 data width */ 113 + #define DW_PARAMS_EN 28 /* encoded parameters */ 114 + 115 + /* Bitfields in DWC_PARAMS */ 116 + #define DWC_PARAMS_MBLK_EN 11 /* multi block transfer */ 87 117 88 118 /* Bitfields in CTL_LO */ 89 119 #define DWC_CTLL_INT_EN (1 << 0) /* irqs enabled? */ ··· 170 140 /* Bitfields in CFG */ 171 141 #define DW_CFG_DMA_EN (1 << 0) 172 142 173 - #define DW_REGLEN 0x400 174 - 175 143 enum dw_dmac_flags { 176 144 DW_DMA_IS_CYCLIC = 0, 145 + DW_DMA_IS_SOFT_LLP = 1, 177 146 }; 178 147 179 148 struct dw_dma_chan { ··· 182 153 u8 priority; 183 154 bool paused; 184 155 bool initialized; 156 + 157 + /* software emulation of the LLP transfers */ 158 + struct list_head *tx_list; 159 + struct list_head *tx_node_active; 185 160 186 161 spinlock_t lock; 187 162 ··· 198 165 199 166 unsigned int descs_allocated; 200 167 168 + /* hardware configuration */ 169 + unsigned int block_size; 170 + bool nollp; 171 + 201 172 /* configuration passed via DMA_SLAVE_CONFIG */ 202 173 struct dma_slave_config dma_sconfig; 174 + 175 + /* backlink to dw_dma */ 176 + struct dw_dma *dw; 203 177 }; 204 178 205 179 static inline struct dw_dma_chan_regs __iomem * ··· 232 192 struct clk *clk; 233 193 234 194 u8 all_chan_mask; 195 + 196 + /* hardware configuration */ 197 + unsigned char nr_masters; 198 + unsigned char data_width[4]; 235 199 236 200 struct dw_dma_chan chan[0]; 237 201 };
+671
drivers/dma/edma.c
··· 1 + /* 2 + * TI EDMA DMA engine driver 3 + * 4 + * Copyright 2012 Texas Instruments 5 + * 6 + * This program is free software; you can redistribute it and/or 7 + * modify it under the terms of the GNU General Public License as 8 + * published by the Free Software Foundation version 2. 9 + * 10 + * This program is distributed "as is" WITHOUT ANY WARRANTY of any 11 + * kind, whether express or implied; without even the implied warranty 12 + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 + * GNU General Public License for more details. 14 + */ 15 + 16 + #include <linux/dmaengine.h> 17 + #include <linux/dma-mapping.h> 18 + #include <linux/err.h> 19 + #include <linux/init.h> 20 + #include <linux/interrupt.h> 21 + #include <linux/list.h> 22 + #include <linux/module.h> 23 + #include <linux/platform_device.h> 24 + #include <linux/slab.h> 25 + #include <linux/spinlock.h> 26 + 27 + #include <mach/edma.h> 28 + 29 + #include "dmaengine.h" 30 + #include "virt-dma.h" 31 + 32 + /* 33 + * This will go away when the private EDMA API is folded 34 + * into this driver and the platform device(s) are 35 + * instantiated in the arch code. We can only get away 36 + * with this simplification because DA8XX may not be built 37 + * in the same kernel image with other DaVinci parts. This 38 + * avoids having to sprinkle dmaengine driver platform devices 39 + * and data throughout all the existing board files. 40 + */ 41 + #ifdef CONFIG_ARCH_DAVINCI_DA8XX 42 + #define EDMA_CTLRS 2 43 + #define EDMA_CHANS 32 44 + #else 45 + #define EDMA_CTLRS 1 46 + #define EDMA_CHANS 64 47 + #endif /* CONFIG_ARCH_DAVINCI_DA8XX */ 48 + 49 + /* Max of 16 segments per channel to conserve PaRAM slots */ 50 + #define MAX_NR_SG 16 51 + #define EDMA_MAX_SLOTS MAX_NR_SG 52 + #define EDMA_DESCRIPTORS 16 53 + 54 + struct edma_desc { 55 + struct virt_dma_desc vdesc; 56 + struct list_head node; 57 + int absync; 58 + int pset_nr; 59 + struct edmacc_param pset[0]; 60 + }; 61 + 62 + struct edma_cc; 63 + 64 + struct edma_chan { 65 + struct virt_dma_chan vchan; 66 + struct list_head node; 67 + struct edma_desc *edesc; 68 + struct edma_cc *ecc; 69 + int ch_num; 70 + bool alloced; 71 + int slot[EDMA_MAX_SLOTS]; 72 + dma_addr_t addr; 73 + int addr_width; 74 + int maxburst; 75 + }; 76 + 77 + struct edma_cc { 78 + int ctlr; 79 + struct dma_device dma_slave; 80 + struct edma_chan slave_chans[EDMA_CHANS]; 81 + int num_slave_chans; 82 + int dummy_slot; 83 + }; 84 + 85 + static inline struct edma_cc *to_edma_cc(struct dma_device *d) 86 + { 87 + return container_of(d, struct edma_cc, dma_slave); 88 + } 89 + 90 + static inline struct edma_chan *to_edma_chan(struct dma_chan *c) 91 + { 92 + return container_of(c, struct edma_chan, vchan.chan); 93 + } 94 + 95 + static inline struct edma_desc 96 + *to_edma_desc(struct dma_async_tx_descriptor *tx) 97 + { 98 + return container_of(tx, struct edma_desc, vdesc.tx); 99 + } 100 + 101 + static void edma_desc_free(struct virt_dma_desc *vdesc) 102 + { 103 + kfree(container_of(vdesc, struct edma_desc, vdesc)); 104 + } 105 + 106 + /* Dispatch a queued descriptor to the controller (caller holds lock) */ 107 + static void edma_execute(struct edma_chan *echan) 108 + { 109 + struct virt_dma_desc *vdesc = vchan_next_desc(&echan->vchan); 110 + struct edma_desc *edesc; 111 + int i; 112 + 113 + if (!vdesc) { 114 + echan->edesc = NULL; 115 + return; 116 + } 117 + 118 + list_del(&vdesc->node); 119 + 120 + echan->edesc = edesc = to_edma_desc(&vdesc->tx); 121 + 122 + /* Write descriptor PaRAM set(s) */ 123 + for (i = 0; i < edesc->pset_nr; i++) { 124 + edma_write_slot(echan->slot[i], &edesc->pset[i]); 125 + dev_dbg(echan->vchan.chan.device->dev, 126 + "\n pset[%d]:\n" 127 + " chnum\t%d\n" 128 + " slot\t%d\n" 129 + " opt\t%08x\n" 130 + " src\t%08x\n" 131 + " dst\t%08x\n" 132 + " abcnt\t%08x\n" 133 + " ccnt\t%08x\n" 134 + " bidx\t%08x\n" 135 + " cidx\t%08x\n" 136 + " lkrld\t%08x\n", 137 + i, echan->ch_num, echan->slot[i], 138 + edesc->pset[i].opt, 139 + edesc->pset[i].src, 140 + edesc->pset[i].dst, 141 + edesc->pset[i].a_b_cnt, 142 + edesc->pset[i].ccnt, 143 + edesc->pset[i].src_dst_bidx, 144 + edesc->pset[i].src_dst_cidx, 145 + edesc->pset[i].link_bcntrld); 146 + /* Link to the previous slot if not the last set */ 147 + if (i != (edesc->pset_nr - 1)) 148 + edma_link(echan->slot[i], echan->slot[i+1]); 149 + /* Final pset links to the dummy pset */ 150 + else 151 + edma_link(echan->slot[i], echan->ecc->dummy_slot); 152 + } 153 + 154 + edma_start(echan->ch_num); 155 + } 156 + 157 + static int edma_terminate_all(struct edma_chan *echan) 158 + { 159 + unsigned long flags; 160 + LIST_HEAD(head); 161 + 162 + spin_lock_irqsave(&echan->vchan.lock, flags); 163 + 164 + /* 165 + * Stop DMA activity: we assume the callback will not be called 166 + * after edma_dma() returns (even if it does, it will see 167 + * echan->edesc is NULL and exit.) 168 + */ 169 + if (echan->edesc) { 170 + echan->edesc = NULL; 171 + edma_stop(echan->ch_num); 172 + } 173 + 174 + vchan_get_all_descriptors(&echan->vchan, &head); 175 + spin_unlock_irqrestore(&echan->vchan.lock, flags); 176 + vchan_dma_desc_free_list(&echan->vchan, &head); 177 + 178 + return 0; 179 + } 180 + 181 + 182 + static int edma_slave_config(struct edma_chan *echan, 183 + struct dma_slave_config *config) 184 + { 185 + if ((config->src_addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES) || 186 + (config->dst_addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES)) 187 + return -EINVAL; 188 + 189 + if (config->direction == DMA_MEM_TO_DEV) { 190 + if (config->dst_addr) 191 + echan->addr = config->dst_addr; 192 + if (config->dst_addr_width) 193 + echan->addr_width = config->dst_addr_width; 194 + if (config->dst_maxburst) 195 + echan->maxburst = config->dst_maxburst; 196 + } else if (config->direction == DMA_DEV_TO_MEM) { 197 + if (config->src_addr) 198 + echan->addr = config->src_addr; 199 + if (config->src_addr_width) 200 + echan->addr_width = config->src_addr_width; 201 + if (config->src_maxburst) 202 + echan->maxburst = config->src_maxburst; 203 + } 204 + 205 + return 0; 206 + } 207 + 208 + static int edma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 209 + unsigned long arg) 210 + { 211 + int ret = 0; 212 + struct dma_slave_config *config; 213 + struct edma_chan *echan = to_edma_chan(chan); 214 + 215 + switch (cmd) { 216 + case DMA_TERMINATE_ALL: 217 + edma_terminate_all(echan); 218 + break; 219 + case DMA_SLAVE_CONFIG: 220 + config = (struct dma_slave_config *)arg; 221 + ret = edma_slave_config(echan, config); 222 + break; 223 + default: 224 + ret = -ENOSYS; 225 + } 226 + 227 + return ret; 228 + } 229 + 230 + static struct dma_async_tx_descriptor *edma_prep_slave_sg( 231 + struct dma_chan *chan, struct scatterlist *sgl, 232 + unsigned int sg_len, enum dma_transfer_direction direction, 233 + unsigned long tx_flags, void *context) 234 + { 235 + struct edma_chan *echan = to_edma_chan(chan); 236 + struct device *dev = chan->device->dev; 237 + struct edma_desc *edesc; 238 + struct scatterlist *sg; 239 + int i; 240 + int acnt, bcnt, ccnt, src, dst, cidx; 241 + int src_bidx, dst_bidx, src_cidx, dst_cidx; 242 + 243 + if (unlikely(!echan || !sgl || !sg_len)) 244 + return NULL; 245 + 246 + if (echan->addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) { 247 + dev_err(dev, "Undefined slave buswidth\n"); 248 + return NULL; 249 + } 250 + 251 + if (sg_len > MAX_NR_SG) { 252 + dev_err(dev, "Exceeded max SG segments %d > %d\n", 253 + sg_len, MAX_NR_SG); 254 + return NULL; 255 + } 256 + 257 + edesc = kzalloc(sizeof(*edesc) + sg_len * 258 + sizeof(edesc->pset[0]), GFP_ATOMIC); 259 + if (!edesc) { 260 + dev_dbg(dev, "Failed to allocate a descriptor\n"); 261 + return NULL; 262 + } 263 + 264 + edesc->pset_nr = sg_len; 265 + 266 + for_each_sg(sgl, sg, sg_len, i) { 267 + /* Allocate a PaRAM slot, if needed */ 268 + if (echan->slot[i] < 0) { 269 + echan->slot[i] = 270 + edma_alloc_slot(EDMA_CTLR(echan->ch_num), 271 + EDMA_SLOT_ANY); 272 + if (echan->slot[i] < 0) { 273 + dev_err(dev, "Failed to allocate slot\n"); 274 + return NULL; 275 + } 276 + } 277 + 278 + acnt = echan->addr_width; 279 + 280 + /* 281 + * If the maxburst is equal to the fifo width, use 282 + * A-synced transfers. This allows for large contiguous 283 + * buffer transfers using only one PaRAM set. 284 + */ 285 + if (echan->maxburst == 1) { 286 + edesc->absync = false; 287 + ccnt = sg_dma_len(sg) / acnt / (SZ_64K - 1); 288 + bcnt = sg_dma_len(sg) / acnt - ccnt * (SZ_64K - 1); 289 + if (bcnt) 290 + ccnt++; 291 + else 292 + bcnt = SZ_64K - 1; 293 + cidx = acnt; 294 + /* 295 + * If maxburst is greater than the fifo address_width, 296 + * use AB-synced transfers where A count is the fifo 297 + * address_width and B count is the maxburst. In this 298 + * case, we are limited to transfers of C count frames 299 + * of (address_width * maxburst) where C count is limited 300 + * to SZ_64K-1. This places an upper bound on the length 301 + * of an SG segment that can be handled. 302 + */ 303 + } else { 304 + edesc->absync = true; 305 + bcnt = echan->maxburst; 306 + ccnt = sg_dma_len(sg) / (acnt * bcnt); 307 + if (ccnt > (SZ_64K - 1)) { 308 + dev_err(dev, "Exceeded max SG segment size\n"); 309 + return NULL; 310 + } 311 + cidx = acnt * bcnt; 312 + } 313 + 314 + if (direction == DMA_MEM_TO_DEV) { 315 + src = sg_dma_address(sg); 316 + dst = echan->addr; 317 + src_bidx = acnt; 318 + src_cidx = cidx; 319 + dst_bidx = 0; 320 + dst_cidx = 0; 321 + } else { 322 + src = echan->addr; 323 + dst = sg_dma_address(sg); 324 + src_bidx = 0; 325 + src_cidx = 0; 326 + dst_bidx = acnt; 327 + dst_cidx = cidx; 328 + } 329 + 330 + edesc->pset[i].opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num)); 331 + /* Configure A or AB synchronized transfers */ 332 + if (edesc->absync) 333 + edesc->pset[i].opt |= SYNCDIM; 334 + /* If this is the last set, enable completion interrupt flag */ 335 + if (i == sg_len - 1) 336 + edesc->pset[i].opt |= TCINTEN; 337 + 338 + edesc->pset[i].src = src; 339 + edesc->pset[i].dst = dst; 340 + 341 + edesc->pset[i].src_dst_bidx = (dst_bidx << 16) | src_bidx; 342 + edesc->pset[i].src_dst_cidx = (dst_cidx << 16) | src_cidx; 343 + 344 + edesc->pset[i].a_b_cnt = bcnt << 16 | acnt; 345 + edesc->pset[i].ccnt = ccnt; 346 + edesc->pset[i].link_bcntrld = 0xffffffff; 347 + 348 + } 349 + 350 + return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); 351 + } 352 + 353 + static void edma_callback(unsigned ch_num, u16 ch_status, void *data) 354 + { 355 + struct edma_chan *echan = data; 356 + struct device *dev = echan->vchan.chan.device->dev; 357 + struct edma_desc *edesc; 358 + unsigned long flags; 359 + 360 + /* Stop the channel */ 361 + edma_stop(echan->ch_num); 362 + 363 + switch (ch_status) { 364 + case DMA_COMPLETE: 365 + dev_dbg(dev, "transfer complete on channel %d\n", ch_num); 366 + 367 + spin_lock_irqsave(&echan->vchan.lock, flags); 368 + 369 + edesc = echan->edesc; 370 + if (edesc) { 371 + edma_execute(echan); 372 + vchan_cookie_complete(&edesc->vdesc); 373 + } 374 + 375 + spin_unlock_irqrestore(&echan->vchan.lock, flags); 376 + 377 + break; 378 + case DMA_CC_ERROR: 379 + dev_dbg(dev, "transfer error on channel %d\n", ch_num); 380 + break; 381 + default: 382 + break; 383 + } 384 + } 385 + 386 + /* Alloc channel resources */ 387 + static int edma_alloc_chan_resources(struct dma_chan *chan) 388 + { 389 + struct edma_chan *echan = to_edma_chan(chan); 390 + struct device *dev = chan->device->dev; 391 + int ret; 392 + int a_ch_num; 393 + LIST_HEAD(descs); 394 + 395 + a_ch_num = edma_alloc_channel(echan->ch_num, edma_callback, 396 + chan, EVENTQ_DEFAULT); 397 + 398 + if (a_ch_num < 0) { 399 + ret = -ENODEV; 400 + goto err_no_chan; 401 + } 402 + 403 + if (a_ch_num != echan->ch_num) { 404 + dev_err(dev, "failed to allocate requested channel %u:%u\n", 405 + EDMA_CTLR(echan->ch_num), 406 + EDMA_CHAN_SLOT(echan->ch_num)); 407 + ret = -ENODEV; 408 + goto err_wrong_chan; 409 + } 410 + 411 + echan->alloced = true; 412 + echan->slot[0] = echan->ch_num; 413 + 414 + dev_info(dev, "allocated channel for %u:%u\n", 415 + EDMA_CTLR(echan->ch_num), EDMA_CHAN_SLOT(echan->ch_num)); 416 + 417 + return 0; 418 + 419 + err_wrong_chan: 420 + edma_free_channel(a_ch_num); 421 + err_no_chan: 422 + return ret; 423 + } 424 + 425 + /* Free channel resources */ 426 + static void edma_free_chan_resources(struct dma_chan *chan) 427 + { 428 + struct edma_chan *echan = to_edma_chan(chan); 429 + struct device *dev = chan->device->dev; 430 + int i; 431 + 432 + /* Terminate transfers */ 433 + edma_stop(echan->ch_num); 434 + 435 + vchan_free_chan_resources(&echan->vchan); 436 + 437 + /* Free EDMA PaRAM slots */ 438 + for (i = 1; i < EDMA_MAX_SLOTS; i++) { 439 + if (echan->slot[i] >= 0) { 440 + edma_free_slot(echan->slot[i]); 441 + echan->slot[i] = -1; 442 + } 443 + } 444 + 445 + /* Free EDMA channel */ 446 + if (echan->alloced) { 447 + edma_free_channel(echan->ch_num); 448 + echan->alloced = false; 449 + } 450 + 451 + dev_info(dev, "freeing channel for %u\n", echan->ch_num); 452 + } 453 + 454 + /* Send pending descriptor to hardware */ 455 + static void edma_issue_pending(struct dma_chan *chan) 456 + { 457 + struct edma_chan *echan = to_edma_chan(chan); 458 + unsigned long flags; 459 + 460 + spin_lock_irqsave(&echan->vchan.lock, flags); 461 + if (vchan_issue_pending(&echan->vchan) && !echan->edesc) 462 + edma_execute(echan); 463 + spin_unlock_irqrestore(&echan->vchan.lock, flags); 464 + } 465 + 466 + static size_t edma_desc_size(struct edma_desc *edesc) 467 + { 468 + int i; 469 + size_t size; 470 + 471 + if (edesc->absync) 472 + for (size = i = 0; i < edesc->pset_nr; i++) 473 + size += (edesc->pset[i].a_b_cnt & 0xffff) * 474 + (edesc->pset[i].a_b_cnt >> 16) * 475 + edesc->pset[i].ccnt; 476 + else 477 + size = (edesc->pset[0].a_b_cnt & 0xffff) * 478 + (edesc->pset[0].a_b_cnt >> 16) + 479 + (edesc->pset[0].a_b_cnt & 0xffff) * 480 + (SZ_64K - 1) * edesc->pset[0].ccnt; 481 + 482 + return size; 483 + } 484 + 485 + /* Check request completion status */ 486 + static enum dma_status edma_tx_status(struct dma_chan *chan, 487 + dma_cookie_t cookie, 488 + struct dma_tx_state *txstate) 489 + { 490 + struct edma_chan *echan = to_edma_chan(chan); 491 + struct virt_dma_desc *vdesc; 492 + enum dma_status ret; 493 + unsigned long flags; 494 + 495 + ret = dma_cookie_status(chan, cookie, txstate); 496 + if (ret == DMA_SUCCESS || !txstate) 497 + return ret; 498 + 499 + spin_lock_irqsave(&echan->vchan.lock, flags); 500 + vdesc = vchan_find_desc(&echan->vchan, cookie); 501 + if (vdesc) { 502 + txstate->residue = edma_desc_size(to_edma_desc(&vdesc->tx)); 503 + } else if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie) { 504 + struct edma_desc *edesc = echan->edesc; 505 + txstate->residue = edma_desc_size(edesc); 506 + } else { 507 + txstate->residue = 0; 508 + } 509 + spin_unlock_irqrestore(&echan->vchan.lock, flags); 510 + 511 + return ret; 512 + } 513 + 514 + static void __init edma_chan_init(struct edma_cc *ecc, 515 + struct dma_device *dma, 516 + struct edma_chan *echans) 517 + { 518 + int i, j; 519 + 520 + for (i = 0; i < EDMA_CHANS; i++) { 521 + struct edma_chan *echan = &echans[i]; 522 + echan->ch_num = EDMA_CTLR_CHAN(ecc->ctlr, i); 523 + echan->ecc = ecc; 524 + echan->vchan.desc_free = edma_desc_free; 525 + 526 + vchan_init(&echan->vchan, dma); 527 + 528 + INIT_LIST_HEAD(&echan->node); 529 + for (j = 0; j < EDMA_MAX_SLOTS; j++) 530 + echan->slot[j] = -1; 531 + } 532 + } 533 + 534 + static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma, 535 + struct device *dev) 536 + { 537 + dma->device_prep_slave_sg = edma_prep_slave_sg; 538 + dma->device_alloc_chan_resources = edma_alloc_chan_resources; 539 + dma->device_free_chan_resources = edma_free_chan_resources; 540 + dma->device_issue_pending = edma_issue_pending; 541 + dma->device_tx_status = edma_tx_status; 542 + dma->device_control = edma_control; 543 + dma->dev = dev; 544 + 545 + INIT_LIST_HEAD(&dma->channels); 546 + } 547 + 548 + static int __devinit edma_probe(struct platform_device *pdev) 549 + { 550 + struct edma_cc *ecc; 551 + int ret; 552 + 553 + ecc = devm_kzalloc(&pdev->dev, sizeof(*ecc), GFP_KERNEL); 554 + if (!ecc) { 555 + dev_err(&pdev->dev, "Can't allocate controller\n"); 556 + return -ENOMEM; 557 + } 558 + 559 + ecc->ctlr = pdev->id; 560 + ecc->dummy_slot = edma_alloc_slot(ecc->ctlr, EDMA_SLOT_ANY); 561 + if (ecc->dummy_slot < 0) { 562 + dev_err(&pdev->dev, "Can't allocate PaRAM dummy slot\n"); 563 + return -EIO; 564 + } 565 + 566 + dma_cap_zero(ecc->dma_slave.cap_mask); 567 + dma_cap_set(DMA_SLAVE, ecc->dma_slave.cap_mask); 568 + 569 + edma_dma_init(ecc, &ecc->dma_slave, &pdev->dev); 570 + 571 + edma_chan_init(ecc, &ecc->dma_slave, ecc->slave_chans); 572 + 573 + ret = dma_async_device_register(&ecc->dma_slave); 574 + if (ret) 575 + goto err_reg1; 576 + 577 + platform_set_drvdata(pdev, ecc); 578 + 579 + dev_info(&pdev->dev, "TI EDMA DMA engine driver\n"); 580 + 581 + return 0; 582 + 583 + err_reg1: 584 + edma_free_slot(ecc->dummy_slot); 585 + return ret; 586 + } 587 + 588 + static int __devexit edma_remove(struct platform_device *pdev) 589 + { 590 + struct device *dev = &pdev->dev; 591 + struct edma_cc *ecc = dev_get_drvdata(dev); 592 + 593 + dma_async_device_unregister(&ecc->dma_slave); 594 + edma_free_slot(ecc->dummy_slot); 595 + 596 + return 0; 597 + } 598 + 599 + static struct platform_driver edma_driver = { 600 + .probe = edma_probe, 601 + .remove = __devexit_p(edma_remove), 602 + .driver = { 603 + .name = "edma-dma-engine", 604 + .owner = THIS_MODULE, 605 + }, 606 + }; 607 + 608 + bool edma_filter_fn(struct dma_chan *chan, void *param) 609 + { 610 + if (chan->device->dev->driver == &edma_driver.driver) { 611 + struct edma_chan *echan = to_edma_chan(chan); 612 + unsigned ch_req = *(unsigned *)param; 613 + return ch_req == echan->ch_num; 614 + } 615 + return false; 616 + } 617 + EXPORT_SYMBOL(edma_filter_fn); 618 + 619 + static struct platform_device *pdev0, *pdev1; 620 + 621 + static const struct platform_device_info edma_dev_info0 = { 622 + .name = "edma-dma-engine", 623 + .id = 0, 624 + .dma_mask = DMA_BIT_MASK(32), 625 + }; 626 + 627 + static const struct platform_device_info edma_dev_info1 = { 628 + .name = "edma-dma-engine", 629 + .id = 1, 630 + .dma_mask = DMA_BIT_MASK(32), 631 + }; 632 + 633 + static int edma_init(void) 634 + { 635 + int ret = platform_driver_register(&edma_driver); 636 + 637 + if (ret == 0) { 638 + pdev0 = platform_device_register_full(&edma_dev_info0); 639 + if (IS_ERR(pdev0)) { 640 + platform_driver_unregister(&edma_driver); 641 + ret = PTR_ERR(pdev0); 642 + goto out; 643 + } 644 + } 645 + 646 + if (EDMA_CTLRS == 2) { 647 + pdev1 = platform_device_register_full(&edma_dev_info1); 648 + if (IS_ERR(pdev1)) { 649 + platform_driver_unregister(&edma_driver); 650 + platform_device_unregister(pdev0); 651 + ret = PTR_ERR(pdev1); 652 + } 653 + } 654 + 655 + out: 656 + return ret; 657 + } 658 + subsys_initcall(edma_init); 659 + 660 + static void __exit edma_exit(void) 661 + { 662 + platform_device_unregister(pdev0); 663 + if (pdev1) 664 + platform_device_unregister(pdev1); 665 + platform_driver_unregister(&edma_driver); 666 + } 667 + module_exit(edma_exit); 668 + 669 + MODULE_AUTHOR("Matt Porter <mporter@ti.com>"); 670 + MODULE_DESCRIPTION("TI EDMA DMA engine driver"); 671 + MODULE_LICENSE("GPL v2");
+1 -2
drivers/dma/ioat/dma_v2.c
··· 434 434 return NULL; 435 435 memset(hw, 0, sizeof(*hw)); 436 436 437 - desc = kmem_cache_alloc(ioat2_cache, flags); 437 + desc = kmem_cache_zalloc(ioat2_cache, flags); 438 438 if (!desc) { 439 439 pci_pool_free(dma->dma_pool, hw, phys); 440 440 return NULL; 441 441 } 442 - memset(desc, 0, sizeof(*desc)); 443 442 444 443 dma_async_tx_descriptor_init(&desc->txd, chan); 445 444 desc->txd.tx_submit = ioat2_tx_submit_unlock;
+22
drivers/dma/ioat/pci.c
··· 40 40 MODULE_LICENSE("Dual BSD/GPL"); 41 41 MODULE_AUTHOR("Intel Corporation"); 42 42 43 + #define PCI_DEVICE_ID_INTEL_IOAT_IVB0 0x0e20 44 + #define PCI_DEVICE_ID_INTEL_IOAT_IVB1 0x0e21 45 + #define PCI_DEVICE_ID_INTEL_IOAT_IVB2 0x0e22 46 + #define PCI_DEVICE_ID_INTEL_IOAT_IVB3 0x0e23 47 + #define PCI_DEVICE_ID_INTEL_IOAT_IVB4 0x0e24 48 + #define PCI_DEVICE_ID_INTEL_IOAT_IVB5 0x0e25 49 + #define PCI_DEVICE_ID_INTEL_IOAT_IVB6 0x0e26 50 + #define PCI_DEVICE_ID_INTEL_IOAT_IVB7 0x0e27 51 + #define PCI_DEVICE_ID_INTEL_IOAT_IVB8 0x0e2e 52 + #define PCI_DEVICE_ID_INTEL_IOAT_IVB9 0x0e2f 53 + 43 54 static struct pci_device_id ioat_pci_tbl[] = { 44 55 /* I/OAT v1 platforms */ 45 56 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT) }, ··· 93 82 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB7) }, 94 83 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB8) }, 95 84 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB9) }, 85 + 86 + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB0) }, 87 + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB1) }, 88 + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB2) }, 89 + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB3) }, 90 + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB4) }, 91 + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB5) }, 92 + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB6) }, 93 + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB7) }, 94 + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB8) }, 95 + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_IVB9) }, 96 96 97 97 { 0, } 98 98 };
+875
drivers/dma/mmp_pdma.c
··· 1 + /* 2 + * Copyright 2012 Marvell International Ltd. 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 as 6 + * published by the Free Software Foundation. 7 + */ 8 + #include <linux/module.h> 9 + #include <linux/init.h> 10 + #include <linux/types.h> 11 + #include <linux/interrupt.h> 12 + #include <linux/dma-mapping.h> 13 + #include <linux/slab.h> 14 + #include <linux/dmaengine.h> 15 + #include <linux/platform_device.h> 16 + #include <linux/device.h> 17 + #include <linux/platform_data/mmp_dma.h> 18 + #include <linux/dmapool.h> 19 + #include <linux/of_device.h> 20 + #include <linux/of.h> 21 + 22 + #include "dmaengine.h" 23 + 24 + #define DCSR 0x0000 25 + #define DALGN 0x00a0 26 + #define DINT 0x00f0 27 + #define DDADR 0x0200 28 + #define DSADR 0x0204 29 + #define DTADR 0x0208 30 + #define DCMD 0x020c 31 + 32 + #define DCSR_RUN (1 << 31) /* Run Bit (read / write) */ 33 + #define DCSR_NODESC (1 << 30) /* No-Descriptor Fetch (read / write) */ 34 + #define DCSR_STOPIRQEN (1 << 29) /* Stop Interrupt Enable (read / write) */ 35 + #define DCSR_REQPEND (1 << 8) /* Request Pending (read-only) */ 36 + #define DCSR_STOPSTATE (1 << 3) /* Stop State (read-only) */ 37 + #define DCSR_ENDINTR (1 << 2) /* End Interrupt (read / write) */ 38 + #define DCSR_STARTINTR (1 << 1) /* Start Interrupt (read / write) */ 39 + #define DCSR_BUSERR (1 << 0) /* Bus Error Interrupt (read / write) */ 40 + 41 + #define DCSR_EORIRQEN (1 << 28) /* End of Receive Interrupt Enable (R/W) */ 42 + #define DCSR_EORJMPEN (1 << 27) /* Jump to next descriptor on EOR */ 43 + #define DCSR_EORSTOPEN (1 << 26) /* STOP on an EOR */ 44 + #define DCSR_SETCMPST (1 << 25) /* Set Descriptor Compare Status */ 45 + #define DCSR_CLRCMPST (1 << 24) /* Clear Descriptor Compare Status */ 46 + #define DCSR_CMPST (1 << 10) /* The Descriptor Compare Status */ 47 + #define DCSR_EORINTR (1 << 9) /* The end of Receive */ 48 + 49 + #define DRCMR_MAPVLD (1 << 7) /* Map Valid (read / write) */ 50 + #define DRCMR_CHLNUM 0x1f /* mask for Channel Number (read / write) */ 51 + 52 + #define DDADR_DESCADDR 0xfffffff0 /* Address of next descriptor (mask) */ 53 + #define DDADR_STOP (1 << 0) /* Stop (read / write) */ 54 + 55 + #define DCMD_INCSRCADDR (1 << 31) /* Source Address Increment Setting. */ 56 + #define DCMD_INCTRGADDR (1 << 30) /* Target Address Increment Setting. */ 57 + #define DCMD_FLOWSRC (1 << 29) /* Flow Control by the source. */ 58 + #define DCMD_FLOWTRG (1 << 28) /* Flow Control by the target. */ 59 + #define DCMD_STARTIRQEN (1 << 22) /* Start Interrupt Enable */ 60 + #define DCMD_ENDIRQEN (1 << 21) /* End Interrupt Enable */ 61 + #define DCMD_ENDIAN (1 << 18) /* Device Endian-ness. */ 62 + #define DCMD_BURST8 (1 << 16) /* 8 byte burst */ 63 + #define DCMD_BURST16 (2 << 16) /* 16 byte burst */ 64 + #define DCMD_BURST32 (3 << 16) /* 32 byte burst */ 65 + #define DCMD_WIDTH1 (1 << 14) /* 1 byte width */ 66 + #define DCMD_WIDTH2 (2 << 14) /* 2 byte width (HalfWord) */ 67 + #define DCMD_WIDTH4 (3 << 14) /* 4 byte width (Word) */ 68 + #define DCMD_LENGTH 0x01fff /* length mask (max = 8K - 1) */ 69 + 70 + #define PDMA_ALIGNMENT 3 71 + #define PDMA_MAX_DESC_BYTES 0x1000 72 + 73 + struct mmp_pdma_desc_hw { 74 + u32 ddadr; /* Points to the next descriptor + flags */ 75 + u32 dsadr; /* DSADR value for the current transfer */ 76 + u32 dtadr; /* DTADR value for the current transfer */ 77 + u32 dcmd; /* DCMD value for the current transfer */ 78 + } __aligned(32); 79 + 80 + struct mmp_pdma_desc_sw { 81 + struct mmp_pdma_desc_hw desc; 82 + struct list_head node; 83 + struct list_head tx_list; 84 + struct dma_async_tx_descriptor async_tx; 85 + }; 86 + 87 + struct mmp_pdma_phy; 88 + 89 + struct mmp_pdma_chan { 90 + struct device *dev; 91 + struct dma_chan chan; 92 + struct dma_async_tx_descriptor desc; 93 + struct mmp_pdma_phy *phy; 94 + enum dma_transfer_direction dir; 95 + 96 + /* channel's basic info */ 97 + struct tasklet_struct tasklet; 98 + u32 dcmd; 99 + u32 drcmr; 100 + u32 dev_addr; 101 + 102 + /* list for desc */ 103 + spinlock_t desc_lock; /* Descriptor list lock */ 104 + struct list_head chain_pending; /* Link descriptors queue for pending */ 105 + struct list_head chain_running; /* Link descriptors queue for running */ 106 + bool idle; /* channel statue machine */ 107 + 108 + struct dma_pool *desc_pool; /* Descriptors pool */ 109 + }; 110 + 111 + struct mmp_pdma_phy { 112 + int idx; 113 + void __iomem *base; 114 + struct mmp_pdma_chan *vchan; 115 + }; 116 + 117 + struct mmp_pdma_device { 118 + int dma_channels; 119 + void __iomem *base; 120 + struct device *dev; 121 + struct dma_device device; 122 + struct mmp_pdma_phy *phy; 123 + }; 124 + 125 + #define tx_to_mmp_pdma_desc(tx) container_of(tx, struct mmp_pdma_desc_sw, async_tx) 126 + #define to_mmp_pdma_desc(lh) container_of(lh, struct mmp_pdma_desc_sw, node) 127 + #define to_mmp_pdma_chan(dchan) container_of(dchan, struct mmp_pdma_chan, chan) 128 + #define to_mmp_pdma_dev(dmadev) container_of(dmadev, struct mmp_pdma_device, device) 129 + 130 + static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr) 131 + { 132 + u32 reg = (phy->idx << 4) + DDADR; 133 + 134 + writel(addr, phy->base + reg); 135 + } 136 + 137 + static void enable_chan(struct mmp_pdma_phy *phy) 138 + { 139 + u32 reg; 140 + 141 + if (!phy->vchan) 142 + return; 143 + 144 + reg = phy->vchan->drcmr; 145 + reg = (((reg) < 64) ? 0x0100 : 0x1100) + (((reg) & 0x3f) << 2); 146 + writel(DRCMR_MAPVLD | phy->idx, phy->base + reg); 147 + 148 + reg = (phy->idx << 2) + DCSR; 149 + writel(readl(phy->base + reg) | DCSR_RUN, 150 + phy->base + reg); 151 + } 152 + 153 + static void disable_chan(struct mmp_pdma_phy *phy) 154 + { 155 + u32 reg; 156 + 157 + if (phy) { 158 + reg = (phy->idx << 2) + DCSR; 159 + writel(readl(phy->base + reg) & ~DCSR_RUN, 160 + phy->base + reg); 161 + } 162 + } 163 + 164 + static int clear_chan_irq(struct mmp_pdma_phy *phy) 165 + { 166 + u32 dcsr; 167 + u32 dint = readl(phy->base + DINT); 168 + u32 reg = (phy->idx << 2) + DCSR; 169 + 170 + if (dint & BIT(phy->idx)) { 171 + /* clear irq */ 172 + dcsr = readl(phy->base + reg); 173 + writel(dcsr, phy->base + reg); 174 + if ((dcsr & DCSR_BUSERR) && (phy->vchan)) 175 + dev_warn(phy->vchan->dev, "DCSR_BUSERR\n"); 176 + return 0; 177 + } 178 + return -EAGAIN; 179 + } 180 + 181 + static irqreturn_t mmp_pdma_chan_handler(int irq, void *dev_id) 182 + { 183 + struct mmp_pdma_phy *phy = dev_id; 184 + 185 + if (clear_chan_irq(phy) == 0) { 186 + tasklet_schedule(&phy->vchan->tasklet); 187 + return IRQ_HANDLED; 188 + } else 189 + return IRQ_NONE; 190 + } 191 + 192 + static irqreturn_t mmp_pdma_int_handler(int irq, void *dev_id) 193 + { 194 + struct mmp_pdma_device *pdev = dev_id; 195 + struct mmp_pdma_phy *phy; 196 + u32 dint = readl(pdev->base + DINT); 197 + int i, ret; 198 + int irq_num = 0; 199 + 200 + while (dint) { 201 + i = __ffs(dint); 202 + dint &= (dint - 1); 203 + phy = &pdev->phy[i]; 204 + ret = mmp_pdma_chan_handler(irq, phy); 205 + if (ret == IRQ_HANDLED) 206 + irq_num++; 207 + } 208 + 209 + if (irq_num) 210 + return IRQ_HANDLED; 211 + else 212 + return IRQ_NONE; 213 + } 214 + 215 + /* lookup free phy channel as descending priority */ 216 + static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan) 217 + { 218 + int prio, i; 219 + struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device); 220 + struct mmp_pdma_phy *phy; 221 + 222 + /* 223 + * dma channel priorities 224 + * ch 0 - 3, 16 - 19 <--> (0) 225 + * ch 4 - 7, 20 - 23 <--> (1) 226 + * ch 8 - 11, 24 - 27 <--> (2) 227 + * ch 12 - 15, 28 - 31 <--> (3) 228 + */ 229 + for (prio = 0; prio <= (((pdev->dma_channels - 1) & 0xf) >> 2); prio++) { 230 + for (i = 0; i < pdev->dma_channels; i++) { 231 + if (prio != ((i & 0xf) >> 2)) 232 + continue; 233 + phy = &pdev->phy[i]; 234 + if (!phy->vchan) { 235 + phy->vchan = pchan; 236 + return phy; 237 + } 238 + } 239 + } 240 + 241 + return NULL; 242 + } 243 + 244 + /* desc->tx_list ==> pending list */ 245 + static void append_pending_queue(struct mmp_pdma_chan *chan, 246 + struct mmp_pdma_desc_sw *desc) 247 + { 248 + struct mmp_pdma_desc_sw *tail = 249 + to_mmp_pdma_desc(chan->chain_pending.prev); 250 + 251 + if (list_empty(&chan->chain_pending)) 252 + goto out_splice; 253 + 254 + /* one irq per queue, even appended */ 255 + tail->desc.ddadr = desc->async_tx.phys; 256 + tail->desc.dcmd &= ~DCMD_ENDIRQEN; 257 + 258 + /* softly link to pending list */ 259 + out_splice: 260 + list_splice_tail_init(&desc->tx_list, &chan->chain_pending); 261 + } 262 + 263 + /** 264 + * start_pending_queue - transfer any pending transactions 265 + * pending list ==> running list 266 + */ 267 + static void start_pending_queue(struct mmp_pdma_chan *chan) 268 + { 269 + struct mmp_pdma_desc_sw *desc; 270 + 271 + /* still in running, irq will start the pending list */ 272 + if (!chan->idle) { 273 + dev_dbg(chan->dev, "DMA controller still busy\n"); 274 + return; 275 + } 276 + 277 + if (list_empty(&chan->chain_pending)) { 278 + /* chance to re-fetch phy channel with higher prio */ 279 + if (chan->phy) { 280 + chan->phy->vchan = NULL; 281 + chan->phy = NULL; 282 + } 283 + dev_dbg(chan->dev, "no pending list\n"); 284 + return; 285 + } 286 + 287 + if (!chan->phy) { 288 + chan->phy = lookup_phy(chan); 289 + if (!chan->phy) { 290 + dev_dbg(chan->dev, "no free dma channel\n"); 291 + return; 292 + } 293 + } 294 + 295 + /* 296 + * pending -> running 297 + * reintilize pending list 298 + */ 299 + desc = list_first_entry(&chan->chain_pending, 300 + struct mmp_pdma_desc_sw, node); 301 + list_splice_tail_init(&chan->chain_pending, &chan->chain_running); 302 + 303 + /* 304 + * Program the descriptor's address into the DMA controller, 305 + * then start the DMA transaction 306 + */ 307 + set_desc(chan->phy, desc->async_tx.phys); 308 + enable_chan(chan->phy); 309 + chan->idle = false; 310 + } 311 + 312 + 313 + /* desc->tx_list ==> pending list */ 314 + static dma_cookie_t mmp_pdma_tx_submit(struct dma_async_tx_descriptor *tx) 315 + { 316 + struct mmp_pdma_chan *chan = to_mmp_pdma_chan(tx->chan); 317 + struct mmp_pdma_desc_sw *desc = tx_to_mmp_pdma_desc(tx); 318 + struct mmp_pdma_desc_sw *child; 319 + unsigned long flags; 320 + dma_cookie_t cookie = -EBUSY; 321 + 322 + spin_lock_irqsave(&chan->desc_lock, flags); 323 + 324 + list_for_each_entry(child, &desc->tx_list, node) { 325 + cookie = dma_cookie_assign(&child->async_tx); 326 + } 327 + 328 + append_pending_queue(chan, desc); 329 + 330 + spin_unlock_irqrestore(&chan->desc_lock, flags); 331 + 332 + return cookie; 333 + } 334 + 335 + struct mmp_pdma_desc_sw *mmp_pdma_alloc_descriptor(struct mmp_pdma_chan *chan) 336 + { 337 + struct mmp_pdma_desc_sw *desc; 338 + dma_addr_t pdesc; 339 + 340 + desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc); 341 + if (!desc) { 342 + dev_err(chan->dev, "out of memory for link descriptor\n"); 343 + return NULL; 344 + } 345 + 346 + memset(desc, 0, sizeof(*desc)); 347 + INIT_LIST_HEAD(&desc->tx_list); 348 + dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan); 349 + /* each desc has submit */ 350 + desc->async_tx.tx_submit = mmp_pdma_tx_submit; 351 + desc->async_tx.phys = pdesc; 352 + 353 + return desc; 354 + } 355 + 356 + /** 357 + * mmp_pdma_alloc_chan_resources - Allocate resources for DMA channel. 358 + * 359 + * This function will create a dma pool for descriptor allocation. 360 + * Request irq only when channel is requested 361 + * Return - The number of allocated descriptors. 362 + */ 363 + 364 + static int mmp_pdma_alloc_chan_resources(struct dma_chan *dchan) 365 + { 366 + struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); 367 + 368 + if (chan->desc_pool) 369 + return 1; 370 + 371 + chan->desc_pool = 372 + dma_pool_create(dev_name(&dchan->dev->device), chan->dev, 373 + sizeof(struct mmp_pdma_desc_sw), 374 + __alignof__(struct mmp_pdma_desc_sw), 0); 375 + if (!chan->desc_pool) { 376 + dev_err(chan->dev, "unable to allocate descriptor pool\n"); 377 + return -ENOMEM; 378 + } 379 + if (chan->phy) { 380 + chan->phy->vchan = NULL; 381 + chan->phy = NULL; 382 + } 383 + chan->idle = true; 384 + chan->dev_addr = 0; 385 + return 1; 386 + } 387 + 388 + static void mmp_pdma_free_desc_list(struct mmp_pdma_chan *chan, 389 + struct list_head *list) 390 + { 391 + struct mmp_pdma_desc_sw *desc, *_desc; 392 + 393 + list_for_each_entry_safe(desc, _desc, list, node) { 394 + list_del(&desc->node); 395 + dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); 396 + } 397 + } 398 + 399 + static void mmp_pdma_free_chan_resources(struct dma_chan *dchan) 400 + { 401 + struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); 402 + unsigned long flags; 403 + 404 + spin_lock_irqsave(&chan->desc_lock, flags); 405 + mmp_pdma_free_desc_list(chan, &chan->chain_pending); 406 + mmp_pdma_free_desc_list(chan, &chan->chain_running); 407 + spin_unlock_irqrestore(&chan->desc_lock, flags); 408 + 409 + dma_pool_destroy(chan->desc_pool); 410 + chan->desc_pool = NULL; 411 + chan->idle = true; 412 + chan->dev_addr = 0; 413 + if (chan->phy) { 414 + chan->phy->vchan = NULL; 415 + chan->phy = NULL; 416 + } 417 + return; 418 + } 419 + 420 + static struct dma_async_tx_descriptor * 421 + mmp_pdma_prep_memcpy(struct dma_chan *dchan, 422 + dma_addr_t dma_dst, dma_addr_t dma_src, 423 + size_t len, unsigned long flags) 424 + { 425 + struct mmp_pdma_chan *chan; 426 + struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new; 427 + size_t copy = 0; 428 + 429 + if (!dchan) 430 + return NULL; 431 + 432 + if (!len) 433 + return NULL; 434 + 435 + chan = to_mmp_pdma_chan(dchan); 436 + 437 + if (!chan->dir) { 438 + chan->dir = DMA_MEM_TO_MEM; 439 + chan->dcmd = DCMD_INCTRGADDR | DCMD_INCSRCADDR; 440 + chan->dcmd |= DCMD_BURST32; 441 + } 442 + 443 + do { 444 + /* Allocate the link descriptor from DMA pool */ 445 + new = mmp_pdma_alloc_descriptor(chan); 446 + if (!new) { 447 + dev_err(chan->dev, "no memory for desc\n"); 448 + goto fail; 449 + } 450 + 451 + copy = min_t(size_t, len, PDMA_MAX_DESC_BYTES); 452 + 453 + new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & copy); 454 + new->desc.dsadr = dma_src; 455 + new->desc.dtadr = dma_dst; 456 + 457 + if (!first) 458 + first = new; 459 + else 460 + prev->desc.ddadr = new->async_tx.phys; 461 + 462 + new->async_tx.cookie = 0; 463 + async_tx_ack(&new->async_tx); 464 + 465 + prev = new; 466 + len -= copy; 467 + 468 + if (chan->dir == DMA_MEM_TO_DEV) { 469 + dma_src += copy; 470 + } else if (chan->dir == DMA_DEV_TO_MEM) { 471 + dma_dst += copy; 472 + } else if (chan->dir == DMA_MEM_TO_MEM) { 473 + dma_src += copy; 474 + dma_dst += copy; 475 + } 476 + 477 + /* Insert the link descriptor to the LD ring */ 478 + list_add_tail(&new->node, &first->tx_list); 479 + } while (len); 480 + 481 + first->async_tx.flags = flags; /* client is in control of this ack */ 482 + first->async_tx.cookie = -EBUSY; 483 + 484 + /* last desc and fire IRQ */ 485 + new->desc.ddadr = DDADR_STOP; 486 + new->desc.dcmd |= DCMD_ENDIRQEN; 487 + 488 + return &first->async_tx; 489 + 490 + fail: 491 + if (first) 492 + mmp_pdma_free_desc_list(chan, &first->tx_list); 493 + return NULL; 494 + } 495 + 496 + static struct dma_async_tx_descriptor * 497 + mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, 498 + unsigned int sg_len, enum dma_transfer_direction dir, 499 + unsigned long flags, void *context) 500 + { 501 + struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); 502 + struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new = NULL; 503 + size_t len, avail; 504 + struct scatterlist *sg; 505 + dma_addr_t addr; 506 + int i; 507 + 508 + if ((sgl == NULL) || (sg_len == 0)) 509 + return NULL; 510 + 511 + for_each_sg(sgl, sg, sg_len, i) { 512 + addr = sg_dma_address(sg); 513 + avail = sg_dma_len(sgl); 514 + 515 + do { 516 + len = min_t(size_t, avail, PDMA_MAX_DESC_BYTES); 517 + 518 + /* allocate and populate the descriptor */ 519 + new = mmp_pdma_alloc_descriptor(chan); 520 + if (!new) { 521 + dev_err(chan->dev, "no memory for desc\n"); 522 + goto fail; 523 + } 524 + 525 + new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & len); 526 + if (dir == DMA_MEM_TO_DEV) { 527 + new->desc.dsadr = addr; 528 + new->desc.dtadr = chan->dev_addr; 529 + } else { 530 + new->desc.dsadr = chan->dev_addr; 531 + new->desc.dtadr = addr; 532 + } 533 + 534 + if (!first) 535 + first = new; 536 + else 537 + prev->desc.ddadr = new->async_tx.phys; 538 + 539 + new->async_tx.cookie = 0; 540 + async_tx_ack(&new->async_tx); 541 + prev = new; 542 + 543 + /* Insert the link descriptor to the LD ring */ 544 + list_add_tail(&new->node, &first->tx_list); 545 + 546 + /* update metadata */ 547 + addr += len; 548 + avail -= len; 549 + } while (avail); 550 + } 551 + 552 + first->async_tx.cookie = -EBUSY; 553 + first->async_tx.flags = flags; 554 + 555 + /* last desc and fire IRQ */ 556 + new->desc.ddadr = DDADR_STOP; 557 + new->desc.dcmd |= DCMD_ENDIRQEN; 558 + 559 + return &first->async_tx; 560 + 561 + fail: 562 + if (first) 563 + mmp_pdma_free_desc_list(chan, &first->tx_list); 564 + return NULL; 565 + } 566 + 567 + static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd, 568 + unsigned long arg) 569 + { 570 + struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); 571 + struct dma_slave_config *cfg = (void *)arg; 572 + unsigned long flags; 573 + int ret = 0; 574 + u32 maxburst = 0, addr = 0; 575 + enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED; 576 + 577 + if (!dchan) 578 + return -EINVAL; 579 + 580 + switch (cmd) { 581 + case DMA_TERMINATE_ALL: 582 + disable_chan(chan->phy); 583 + if (chan->phy) { 584 + chan->phy->vchan = NULL; 585 + chan->phy = NULL; 586 + } 587 + spin_lock_irqsave(&chan->desc_lock, flags); 588 + mmp_pdma_free_desc_list(chan, &chan->chain_pending); 589 + mmp_pdma_free_desc_list(chan, &chan->chain_running); 590 + spin_unlock_irqrestore(&chan->desc_lock, flags); 591 + chan->idle = true; 592 + break; 593 + case DMA_SLAVE_CONFIG: 594 + if (cfg->direction == DMA_DEV_TO_MEM) { 595 + chan->dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC; 596 + maxburst = cfg->src_maxburst; 597 + width = cfg->src_addr_width; 598 + addr = cfg->src_addr; 599 + } else if (cfg->direction == DMA_MEM_TO_DEV) { 600 + chan->dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG; 601 + maxburst = cfg->dst_maxburst; 602 + width = cfg->dst_addr_width; 603 + addr = cfg->dst_addr; 604 + } 605 + 606 + if (width == DMA_SLAVE_BUSWIDTH_1_BYTE) 607 + chan->dcmd |= DCMD_WIDTH1; 608 + else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES) 609 + chan->dcmd |= DCMD_WIDTH2; 610 + else if (width == DMA_SLAVE_BUSWIDTH_4_BYTES) 611 + chan->dcmd |= DCMD_WIDTH4; 612 + 613 + if (maxburst == 8) 614 + chan->dcmd |= DCMD_BURST8; 615 + else if (maxburst == 16) 616 + chan->dcmd |= DCMD_BURST16; 617 + else if (maxburst == 32) 618 + chan->dcmd |= DCMD_BURST32; 619 + 620 + if (cfg) { 621 + chan->dir = cfg->direction; 622 + chan->drcmr = cfg->slave_id; 623 + } 624 + chan->dev_addr = addr; 625 + break; 626 + default: 627 + return -ENOSYS; 628 + } 629 + 630 + return ret; 631 + } 632 + 633 + static enum dma_status mmp_pdma_tx_status(struct dma_chan *dchan, 634 + dma_cookie_t cookie, struct dma_tx_state *txstate) 635 + { 636 + struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); 637 + enum dma_status ret; 638 + unsigned long flags; 639 + 640 + spin_lock_irqsave(&chan->desc_lock, flags); 641 + ret = dma_cookie_status(dchan, cookie, txstate); 642 + spin_unlock_irqrestore(&chan->desc_lock, flags); 643 + 644 + return ret; 645 + } 646 + 647 + /** 648 + * mmp_pdma_issue_pending - Issue the DMA start command 649 + * pending list ==> running list 650 + */ 651 + static void mmp_pdma_issue_pending(struct dma_chan *dchan) 652 + { 653 + struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); 654 + unsigned long flags; 655 + 656 + spin_lock_irqsave(&chan->desc_lock, flags); 657 + start_pending_queue(chan); 658 + spin_unlock_irqrestore(&chan->desc_lock, flags); 659 + } 660 + 661 + /* 662 + * dma_do_tasklet 663 + * Do call back 664 + * Start pending list 665 + */ 666 + static void dma_do_tasklet(unsigned long data) 667 + { 668 + struct mmp_pdma_chan *chan = (struct mmp_pdma_chan *)data; 669 + struct mmp_pdma_desc_sw *desc, *_desc; 670 + LIST_HEAD(chain_cleanup); 671 + unsigned long flags; 672 + 673 + /* submit pending list; callback for each desc; free desc */ 674 + 675 + spin_lock_irqsave(&chan->desc_lock, flags); 676 + 677 + /* update the cookie if we have some descriptors to cleanup */ 678 + if (!list_empty(&chan->chain_running)) { 679 + dma_cookie_t cookie; 680 + 681 + desc = to_mmp_pdma_desc(chan->chain_running.prev); 682 + cookie = desc->async_tx.cookie; 683 + dma_cookie_complete(&desc->async_tx); 684 + 685 + dev_dbg(chan->dev, "completed_cookie=%d\n", cookie); 686 + } 687 + 688 + /* 689 + * move the descriptors to a temporary list so we can drop the lock 690 + * during the entire cleanup operation 691 + */ 692 + list_splice_tail_init(&chan->chain_running, &chain_cleanup); 693 + 694 + /* the hardware is now idle and ready for more */ 695 + chan->idle = true; 696 + 697 + /* Start any pending transactions automatically */ 698 + start_pending_queue(chan); 699 + spin_unlock_irqrestore(&chan->desc_lock, flags); 700 + 701 + /* Run the callback for each descriptor, in order */ 702 + list_for_each_entry_safe(desc, _desc, &chain_cleanup, node) { 703 + struct dma_async_tx_descriptor *txd = &desc->async_tx; 704 + 705 + /* Remove from the list of transactions */ 706 + list_del(&desc->node); 707 + /* Run the link descriptor callback function */ 708 + if (txd->callback) 709 + txd->callback(txd->callback_param); 710 + 711 + dma_pool_free(chan->desc_pool, desc, txd->phys); 712 + } 713 + } 714 + 715 + static int __devexit mmp_pdma_remove(struct platform_device *op) 716 + { 717 + struct mmp_pdma_device *pdev = platform_get_drvdata(op); 718 + 719 + dma_async_device_unregister(&pdev->device); 720 + return 0; 721 + } 722 + 723 + static int __devinit mmp_pdma_chan_init(struct mmp_pdma_device *pdev, 724 + int idx, int irq) 725 + { 726 + struct mmp_pdma_phy *phy = &pdev->phy[idx]; 727 + struct mmp_pdma_chan *chan; 728 + int ret; 729 + 730 + chan = devm_kzalloc(pdev->dev, 731 + sizeof(struct mmp_pdma_chan), GFP_KERNEL); 732 + if (chan == NULL) 733 + return -ENOMEM; 734 + 735 + phy->idx = idx; 736 + phy->base = pdev->base; 737 + 738 + if (irq) { 739 + ret = devm_request_irq(pdev->dev, irq, 740 + mmp_pdma_chan_handler, IRQF_DISABLED, "pdma", phy); 741 + if (ret) { 742 + dev_err(pdev->dev, "channel request irq fail!\n"); 743 + return ret; 744 + } 745 + } 746 + 747 + spin_lock_init(&chan->desc_lock); 748 + chan->dev = pdev->dev; 749 + chan->chan.device = &pdev->device; 750 + tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan); 751 + INIT_LIST_HEAD(&chan->chain_pending); 752 + INIT_LIST_HEAD(&chan->chain_running); 753 + 754 + /* register virt channel to dma engine */ 755 + list_add_tail(&chan->chan.device_node, 756 + &pdev->device.channels); 757 + 758 + return 0; 759 + } 760 + 761 + static struct of_device_id mmp_pdma_dt_ids[] = { 762 + { .compatible = "marvell,pdma-1.0", }, 763 + {} 764 + }; 765 + MODULE_DEVICE_TABLE(of, mmp_pdma_dt_ids); 766 + 767 + static int __devinit mmp_pdma_probe(struct platform_device *op) 768 + { 769 + struct mmp_pdma_device *pdev; 770 + const struct of_device_id *of_id; 771 + struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev); 772 + struct resource *iores; 773 + int i, ret, irq = 0; 774 + int dma_channels = 0, irq_num = 0; 775 + 776 + pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL); 777 + if (!pdev) 778 + return -ENOMEM; 779 + pdev->dev = &op->dev; 780 + 781 + iores = platform_get_resource(op, IORESOURCE_MEM, 0); 782 + if (!iores) 783 + return -EINVAL; 784 + 785 + pdev->base = devm_request_and_ioremap(pdev->dev, iores); 786 + if (!pdev->base) 787 + return -EADDRNOTAVAIL; 788 + 789 + of_id = of_match_device(mmp_pdma_dt_ids, pdev->dev); 790 + if (of_id) 791 + of_property_read_u32(pdev->dev->of_node, 792 + "#dma-channels", &dma_channels); 793 + else if (pdata && pdata->dma_channels) 794 + dma_channels = pdata->dma_channels; 795 + else 796 + dma_channels = 32; /* default 32 channel */ 797 + pdev->dma_channels = dma_channels; 798 + 799 + for (i = 0; i < dma_channels; i++) { 800 + if (platform_get_irq(op, i) > 0) 801 + irq_num++; 802 + } 803 + 804 + pdev->phy = devm_kzalloc(pdev->dev, 805 + dma_channels * sizeof(struct mmp_pdma_chan), GFP_KERNEL); 806 + if (pdev->phy == NULL) 807 + return -ENOMEM; 808 + 809 + INIT_LIST_HEAD(&pdev->device.channels); 810 + 811 + if (irq_num != dma_channels) { 812 + /* all chan share one irq, demux inside */ 813 + irq = platform_get_irq(op, 0); 814 + ret = devm_request_irq(pdev->dev, irq, 815 + mmp_pdma_int_handler, IRQF_DISABLED, "pdma", pdev); 816 + if (ret) 817 + return ret; 818 + } 819 + 820 + for (i = 0; i < dma_channels; i++) { 821 + irq = (irq_num != dma_channels) ? 0 : platform_get_irq(op, i); 822 + ret = mmp_pdma_chan_init(pdev, i, irq); 823 + if (ret) 824 + return ret; 825 + } 826 + 827 + dma_cap_set(DMA_SLAVE, pdev->device.cap_mask); 828 + dma_cap_set(DMA_MEMCPY, pdev->device.cap_mask); 829 + dma_cap_set(DMA_SLAVE, pdev->device.cap_mask); 830 + pdev->device.dev = &op->dev; 831 + pdev->device.device_alloc_chan_resources = mmp_pdma_alloc_chan_resources; 832 + pdev->device.device_free_chan_resources = mmp_pdma_free_chan_resources; 833 + pdev->device.device_tx_status = mmp_pdma_tx_status; 834 + pdev->device.device_prep_dma_memcpy = mmp_pdma_prep_memcpy; 835 + pdev->device.device_prep_slave_sg = mmp_pdma_prep_slave_sg; 836 + pdev->device.device_issue_pending = mmp_pdma_issue_pending; 837 + pdev->device.device_control = mmp_pdma_control; 838 + pdev->device.copy_align = PDMA_ALIGNMENT; 839 + 840 + if (pdev->dev->coherent_dma_mask) 841 + dma_set_mask(pdev->dev, pdev->dev->coherent_dma_mask); 842 + else 843 + dma_set_mask(pdev->dev, DMA_BIT_MASK(64)); 844 + 845 + ret = dma_async_device_register(&pdev->device); 846 + if (ret) { 847 + dev_err(pdev->device.dev, "unable to register\n"); 848 + return ret; 849 + } 850 + 851 + dev_info(pdev->device.dev, "initialized\n"); 852 + return 0; 853 + } 854 + 855 + static const struct platform_device_id mmp_pdma_id_table[] = { 856 + { "mmp-pdma", }, 857 + { }, 858 + }; 859 + 860 + static struct platform_driver mmp_pdma_driver = { 861 + .driver = { 862 + .name = "mmp-pdma", 863 + .owner = THIS_MODULE, 864 + .of_match_table = mmp_pdma_dt_ids, 865 + }, 866 + .id_table = mmp_pdma_id_table, 867 + .probe = mmp_pdma_probe, 868 + .remove = __devexit_p(mmp_pdma_remove), 869 + }; 870 + 871 + module_platform_driver(mmp_pdma_driver); 872 + 873 + MODULE_DESCRIPTION("MARVELL MMP Periphera DMA Driver"); 874 + MODULE_AUTHOR("Marvell International Ltd."); 875 + MODULE_LICENSE("GPL v2");
+36 -25
drivers/dma/mmp_tdma.c
··· 20 20 #include <linux/device.h> 21 21 #include <mach/regs-icu.h> 22 22 #include <linux/platform_data/dma-mmp_tdma.h> 23 + #include <linux/of_device.h> 23 24 24 25 #include "dmaengine.h" 25 26 ··· 128 127 void __iomem *base; 129 128 struct dma_device device; 130 129 struct mmp_tdma_chan *tdmac[TDMA_CHANNEL_NUM]; 131 - int irq; 132 130 }; 133 131 134 132 #define to_mmp_tdma_chan(dchan) container_of(dchan, struct mmp_tdma_chan, chan) ··· 492 492 return -ENOMEM; 493 493 } 494 494 if (irq) 495 - tdmac->irq = irq + idx; 495 + tdmac->irq = irq; 496 496 tdmac->dev = tdev->dev; 497 497 tdmac->chan.device = &tdev->device; 498 498 tdmac->idx = idx; ··· 505 505 /* add the channel to tdma_chan list */ 506 506 list_add_tail(&tdmac->chan.device_node, 507 507 &tdev->device.channels); 508 - 509 508 return 0; 510 509 } 511 510 511 + static struct of_device_id mmp_tdma_dt_ids[] = { 512 + { .compatible = "marvell,adma-1.0", .data = (void *)MMP_AUD_TDMA}, 513 + { .compatible = "marvell,pxa910-squ", .data = (void *)PXA910_SQU}, 514 + {} 515 + }; 516 + MODULE_DEVICE_TABLE(of, mmp_tdma_dt_ids); 517 + 512 518 static int __devinit mmp_tdma_probe(struct platform_device *pdev) 513 519 { 514 - const struct platform_device_id *id = platform_get_device_id(pdev); 515 - enum mmp_tdma_type type = id->driver_data; 520 + enum mmp_tdma_type type; 521 + const struct of_device_id *of_id; 516 522 struct mmp_tdma_device *tdev; 517 523 struct resource *iores; 518 524 int i, ret; 519 - int irq = 0; 525 + int irq = 0, irq_num = 0; 520 526 int chan_num = TDMA_CHANNEL_NUM; 527 + 528 + of_id = of_match_device(mmp_tdma_dt_ids, &pdev->dev); 529 + if (of_id) 530 + type = (enum mmp_tdma_type) of_id->data; 531 + else 532 + type = platform_get_device_id(pdev)->driver_data; 521 533 522 534 /* always have couple channels */ 523 535 tdev = devm_kzalloc(&pdev->dev, sizeof(*tdev), GFP_KERNEL); ··· 537 525 return -ENOMEM; 538 526 539 527 tdev->dev = &pdev->dev; 540 - iores = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 541 - if (!iores) 542 - return -EINVAL; 543 528 544 - if (resource_size(iores) != chan_num) 545 - tdev->irq = iores->start; 546 - else 547 - irq = iores->start; 529 + for (i = 0; i < chan_num; i++) { 530 + if (platform_get_irq(pdev, i) > 0) 531 + irq_num++; 532 + } 548 533 549 534 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); 550 535 if (!iores) ··· 551 542 if (!tdev->base) 552 543 return -EADDRNOTAVAIL; 553 544 554 - if (tdev->irq) { 555 - ret = devm_request_irq(&pdev->dev, tdev->irq, 545 + INIT_LIST_HEAD(&tdev->device.channels); 546 + 547 + if (irq_num != chan_num) { 548 + irq = platform_get_irq(pdev, 0); 549 + ret = devm_request_irq(&pdev->dev, irq, 556 550 mmp_tdma_int_handler, IRQF_DISABLED, "tdma", tdev); 551 + if (ret) 552 + return ret; 553 + } 554 + 555 + /* initialize channel parameters */ 556 + for (i = 0; i < chan_num; i++) { 557 + irq = (irq_num != chan_num) ? 0 : platform_get_irq(pdev, i); 558 + ret = mmp_tdma_chan_init(tdev, i, irq, type); 557 559 if (ret) 558 560 return ret; 559 561 } 560 562 561 563 dma_cap_set(DMA_SLAVE, tdev->device.cap_mask); 562 564 dma_cap_set(DMA_CYCLIC, tdev->device.cap_mask); 563 - 564 - INIT_LIST_HEAD(&tdev->device.channels); 565 - 566 - /* initialize channel parameters */ 567 - for (i = 0; i < chan_num; i++) { 568 - ret = mmp_tdma_chan_init(tdev, i, irq, type); 569 - if (ret) 570 - return ret; 571 - } 572 - 573 565 tdev->device.dev = &pdev->dev; 574 566 tdev->device.device_alloc_chan_resources = 575 567 mmp_tdma_alloc_chan_resources; ··· 605 595 .driver = { 606 596 .name = "mmp-tdma", 607 597 .owner = THIS_MODULE, 598 + .of_match_table = mmp_tdma_dt_ids, 608 599 }, 609 600 .id_table = mmp_tdma_id_table, 610 601 .probe = mmp_tdma_probe,
+8 -6
drivers/dma/mxs-dma.c
··· 101 101 u32 pio_words[MXS_PIO_WORDS]; 102 102 }; 103 103 104 - #define NUM_CCW (int)(PAGE_SIZE / sizeof(struct mxs_dma_ccw)) 104 + #define CCW_BLOCK_SIZE (4 * PAGE_SIZE) 105 + #define NUM_CCW (int)(CCW_BLOCK_SIZE / sizeof(struct mxs_dma_ccw)) 105 106 106 107 struct mxs_dma_chan { 107 108 struct mxs_dma_engine *mxs_dma; ··· 355 354 356 355 mxs_chan->chan_irq = data->chan_irq; 357 356 358 - mxs_chan->ccw = dma_alloc_coherent(mxs_dma->dma_device.dev, PAGE_SIZE, 359 - &mxs_chan->ccw_phys, GFP_KERNEL); 357 + mxs_chan->ccw = dma_alloc_coherent(mxs_dma->dma_device.dev, 358 + CCW_BLOCK_SIZE, &mxs_chan->ccw_phys, 359 + GFP_KERNEL); 360 360 if (!mxs_chan->ccw) { 361 361 ret = -ENOMEM; 362 362 goto err_alloc; 363 363 } 364 364 365 - memset(mxs_chan->ccw, 0, PAGE_SIZE); 365 + memset(mxs_chan->ccw, 0, CCW_BLOCK_SIZE); 366 366 367 367 if (mxs_chan->chan_irq != NO_IRQ) { 368 368 ret = request_irq(mxs_chan->chan_irq, mxs_dma_int_handler, ··· 389 387 err_clk: 390 388 free_irq(mxs_chan->chan_irq, mxs_dma); 391 389 err_irq: 392 - dma_free_coherent(mxs_dma->dma_device.dev, PAGE_SIZE, 390 + dma_free_coherent(mxs_dma->dma_device.dev, CCW_BLOCK_SIZE, 393 391 mxs_chan->ccw, mxs_chan->ccw_phys); 394 392 err_alloc: 395 393 return ret; ··· 404 402 405 403 free_irq(mxs_chan->chan_irq, mxs_dma); 406 404 407 - dma_free_coherent(mxs_dma->dma_device.dev, PAGE_SIZE, 405 + dma_free_coherent(mxs_dma->dma_device.dev, CCW_BLOCK_SIZE, 408 406 mxs_chan->ccw, mxs_chan->ccw_phys); 409 407 410 408 clk_disable_unprepare(mxs_dma->clk);
+8 -70
drivers/dma/pl330.c
··· 23 23 #include <linux/dmaengine.h> 24 24 #include <linux/amba/bus.h> 25 25 #include <linux/amba/pl330.h> 26 - #include <linux/pm_runtime.h> 27 26 #include <linux/scatterlist.h> 28 27 #include <linux/of.h> 29 28 ··· 585 586 586 587 /* Peripheral channels connected to this DMAC */ 587 588 struct dma_pl330_chan *peripherals; /* keep at end */ 588 - 589 - struct clk *clk; 590 589 }; 591 590 592 591 struct dma_pl330_desc { ··· 2392 2395 pch->pl330_chid = pl330_request_channel(&pdmac->pif); 2393 2396 if (!pch->pl330_chid) { 2394 2397 spin_unlock_irqrestore(&pch->lock, flags); 2395 - return 0; 2398 + return -ENOMEM; 2396 2399 } 2397 2400 2398 2401 tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch); ··· 2886 2889 goto probe_err1; 2887 2890 } 2888 2891 2889 - pdmac->clk = clk_get(&adev->dev, "dma"); 2890 - if (IS_ERR(pdmac->clk)) { 2891 - dev_err(&adev->dev, "Cannot get operation clock.\n"); 2892 - ret = -EINVAL; 2893 - goto probe_err2; 2894 - } 2895 - 2896 2892 amba_set_drvdata(adev, pdmac); 2897 - 2898 - #ifndef CONFIG_PM_RUNTIME 2899 - /* enable dma clk */ 2900 - clk_enable(pdmac->clk); 2901 - #endif 2902 2893 2903 2894 irq = adev->irq[0]; 2904 2895 ret = request_irq(irq, pl330_irq_handler, 0, 2905 2896 dev_name(&adev->dev), pi); 2906 2897 if (ret) 2907 - goto probe_err3; 2898 + goto probe_err2; 2908 2899 2909 2900 ret = pl330_add(pi); 2910 2901 if (ret) 2911 - goto probe_err4; 2902 + goto probe_err3; 2912 2903 2913 2904 INIT_LIST_HEAD(&pdmac->desc_pool); 2914 2905 spin_lock_init(&pdmac->pool_lock); ··· 2918 2933 if (!pdmac->peripherals) { 2919 2934 ret = -ENOMEM; 2920 2935 dev_err(&adev->dev, "unable to allocate pdmac->peripherals\n"); 2921 - goto probe_err5; 2936 + goto probe_err4; 2922 2937 } 2923 2938 2924 2939 for (i = 0; i < num_chan; i++) { ··· 2946 2961 if (pi->pcfg.num_peri) { 2947 2962 dma_cap_set(DMA_SLAVE, pd->cap_mask); 2948 2963 dma_cap_set(DMA_CYCLIC, pd->cap_mask); 2964 + dma_cap_set(DMA_PRIVATE, pd->cap_mask); 2949 2965 } 2950 2966 } 2951 2967 ··· 2962 2976 ret = dma_async_device_register(pd); 2963 2977 if (ret) { 2964 2978 dev_err(&adev->dev, "unable to register DMAC\n"); 2965 - goto probe_err5; 2979 + goto probe_err4; 2966 2980 } 2967 2981 2968 2982 dev_info(&adev->dev, ··· 2975 2989 2976 2990 return 0; 2977 2991 2978 - probe_err5: 2979 - pl330_del(pi); 2980 2992 probe_err4: 2981 - free_irq(irq, pi); 2993 + pl330_del(pi); 2982 2994 probe_err3: 2983 - #ifndef CONFIG_PM_RUNTIME 2984 - clk_disable(pdmac->clk); 2985 - #endif 2986 - clk_put(pdmac->clk); 2995 + free_irq(irq, pi); 2987 2996 probe_err2: 2988 2997 iounmap(pi->base); 2989 2998 probe_err1: ··· 3025 3044 res = &adev->res; 3026 3045 release_mem_region(res->start, resource_size(res)); 3027 3046 3028 - #ifndef CONFIG_PM_RUNTIME 3029 - clk_disable(pdmac->clk); 3030 - #endif 3031 - 3032 3047 kfree(pdmac); 3033 3048 3034 3049 return 0; ··· 3040 3063 3041 3064 MODULE_DEVICE_TABLE(amba, pl330_ids); 3042 3065 3043 - #ifdef CONFIG_PM_RUNTIME 3044 - static int pl330_runtime_suspend(struct device *dev) 3045 - { 3046 - struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev); 3047 - 3048 - if (!pdmac) { 3049 - dev_err(dev, "failed to get dmac\n"); 3050 - return -ENODEV; 3051 - } 3052 - 3053 - clk_disable(pdmac->clk); 3054 - 3055 - return 0; 3056 - } 3057 - 3058 - static int pl330_runtime_resume(struct device *dev) 3059 - { 3060 - struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev); 3061 - 3062 - if (!pdmac) { 3063 - dev_err(dev, "failed to get dmac\n"); 3064 - return -ENODEV; 3065 - } 3066 - 3067 - clk_enable(pdmac->clk); 3068 - 3069 - return 0; 3070 - } 3071 - #else 3072 - #define pl330_runtime_suspend NULL 3073 - #define pl330_runtime_resume NULL 3074 - #endif /* CONFIG_PM_RUNTIME */ 3075 - 3076 - static const struct dev_pm_ops pl330_pm_ops = { 3077 - .runtime_suspend = pl330_runtime_suspend, 3078 - .runtime_resume = pl330_runtime_resume, 3079 - }; 3080 - 3081 3066 static struct amba_driver pl330_driver = { 3082 3067 .drv = { 3083 3068 .owner = THIS_MODULE, 3084 3069 .name = "dma-pl330", 3085 - .pm = &pl330_pm_ops, 3086 3070 }, 3087 3071 .id_table = pl330_ids, 3088 3072 .probe = pl330_probe,
+7 -16
drivers/dma/sirf-dma.c
··· 570 570 571 571 if (of_property_read_u32(dn, "cell-index", &id)) { 572 572 dev_err(dev, "Fail to get DMAC index\n"); 573 - ret = -ENODEV; 574 - goto free_mem; 573 + return -ENODEV; 575 574 } 576 575 577 576 sdma->irq = irq_of_parse_and_map(dn, 0); 578 577 if (sdma->irq == NO_IRQ) { 579 578 dev_err(dev, "Error mapping IRQ!\n"); 580 - ret = -EINVAL; 581 - goto free_mem; 579 + return -EINVAL; 582 580 } 583 581 584 582 ret = of_address_to_resource(dn, 0, &res); 585 583 if (ret) { 586 584 dev_err(dev, "Error parsing memory region!\n"); 587 - goto free_mem; 585 + goto irq_dispose; 588 586 } 589 587 590 588 regs_start = res.start; ··· 595 597 goto irq_dispose; 596 598 } 597 599 598 - ret = devm_request_irq(dev, sdma->irq, &sirfsoc_dma_irq, 0, DRV_NAME, 599 - sdma); 600 + ret = request_irq(sdma->irq, &sirfsoc_dma_irq, 0, DRV_NAME, sdma); 600 601 if (ret) { 601 602 dev_err(dev, "Error requesting IRQ!\n"); 602 603 ret = -EINVAL; 603 - goto unmap_mem; 604 + goto irq_dispose; 604 605 } 605 606 606 607 dma = &sdma->dma; ··· 649 652 return 0; 650 653 651 654 free_irq: 652 - devm_free_irq(dev, sdma->irq, sdma); 655 + free_irq(sdma->irq, sdma); 653 656 irq_dispose: 654 657 irq_dispose_mapping(sdma->irq); 655 - unmap_mem: 656 - iounmap(sdma->base); 657 - free_mem: 658 - devm_kfree(dev, sdma); 659 658 return ret; 660 659 } 661 660 ··· 661 668 struct sirfsoc_dma *sdma = dev_get_drvdata(dev); 662 669 663 670 dma_async_device_unregister(&sdma->dma); 664 - devm_free_irq(dev, sdma->irq, sdma); 671 + free_irq(sdma->irq, sdma); 665 672 irq_dispose_mapping(sdma->irq); 666 - iounmap(sdma->base); 667 - devm_kfree(dev, sdma); 668 673 return 0; 669 674 } 670 675
+9 -5
drivers/dma/ste_dma40.c
··· 2921 2921 struct d40_base *base = NULL; 2922 2922 int num_log_chans = 0; 2923 2923 int num_phy_chans; 2924 + int clk_ret = -EINVAL; 2924 2925 int i; 2925 2926 u32 pid; 2926 2927 u32 cid; 2927 2928 u8 rev; 2928 2929 2929 2930 clk = clk_get(&pdev->dev, NULL); 2930 - 2931 2931 if (IS_ERR(clk)) { 2932 2932 d40_err(&pdev->dev, "No matching clock found\n"); 2933 2933 goto failure; 2934 2934 } 2935 2935 2936 - clk_enable(clk); 2936 + clk_ret = clk_prepare_enable(clk); 2937 + if (clk_ret) { 2938 + d40_err(&pdev->dev, "Failed to prepare/enable clock\n"); 2939 + goto failure; 2940 + } 2937 2941 2938 2942 /* Get IO for DMAC base address */ 2939 2943 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base"); ··· 3067 3063 return base; 3068 3064 3069 3065 failure: 3070 - if (!IS_ERR(clk)) { 3071 - clk_disable(clk); 3066 + if (!clk_ret) 3067 + clk_disable_unprepare(clk); 3068 + if (!IS_ERR(clk)) 3072 3069 clk_put(clk); 3073 - } 3074 3070 if (virtbase) 3075 3071 iounmap(virtbase); 3076 3072 if (res)
+5 -7
drivers/dma/tegra20-apb-dma.c
··· 169 169 /* tegra_dma_channel: Channel specific information */ 170 170 struct tegra_dma_channel { 171 171 struct dma_chan dma_chan; 172 + char name[30]; 172 173 bool config_init; 173 174 int id; 174 175 int irq; ··· 476 475 while (!list_empty(&tdc->pending_sg_req)) { 477 476 sgreq = list_first_entry(&tdc->pending_sg_req, 478 477 typeof(*sgreq), node); 479 - list_del(&sgreq->node); 480 - list_add_tail(&sgreq->node, &tdc->free_sg_req); 478 + list_move_tail(&sgreq->node, &tdc->free_sg_req); 481 479 if (sgreq->last_sg) { 482 480 dma_desc = sgreq->dma_desc; 483 481 dma_desc->dma_status = DMA_ERROR; ··· 570 570 571 571 /* If not last req then put at end of pending list */ 572 572 if (!list_is_last(&sgreq->node, &tdc->pending_sg_req)) { 573 - list_del(&sgreq->node); 574 - list_add_tail(&sgreq->node, &tdc->pending_sg_req); 573 + list_move_tail(&sgreq->node, &tdc->pending_sg_req); 575 574 sgreq->configured = false; 576 575 st = handle_continuous_head_request(tdc, sgreq, to_terminate); 577 576 if (!st) ··· 1283 1284 INIT_LIST_HEAD(&tdma->dma_dev.channels); 1284 1285 for (i = 0; i < cdata->nr_channels; i++) { 1285 1286 struct tegra_dma_channel *tdc = &tdma->channels[i]; 1286 - char irq_name[30]; 1287 1287 1288 1288 tdc->chan_base_offset = TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET + 1289 1289 i * TEGRA_APBDMA_CHANNEL_REGISTER_SIZE; ··· 1294 1296 goto err_irq; 1295 1297 } 1296 1298 tdc->irq = res->start; 1297 - snprintf(irq_name, sizeof(irq_name), "apbdma.%d", i); 1299 + snprintf(tdc->name, sizeof(tdc->name), "apbdma.%d", i); 1298 1300 ret = devm_request_irq(&pdev->dev, tdc->irq, 1299 - tegra_dma_isr, 0, irq_name, tdc); 1301 + tegra_dma_isr, 0, tdc->name, tdc); 1300 1302 if (ret) { 1301 1303 dev_err(&pdev->dev, 1302 1304 "request_irq failed with err %d channel %d\n",
+1
drivers/spi/Kconfig
··· 134 134 tristate "Texas Instruments DaVinci/DA8x/OMAP-L/AM1x SoC SPI controller" 135 135 depends on ARCH_DAVINCI 136 136 select SPI_BITBANG 137 + select TI_EDMA 137 138 help 138 139 SPI master controller for DaVinci/DA8x/OMAP-L/AM1x SPI modules. 139 140
+126 -158
drivers/spi/spi-davinci.c
··· 25 25 #include <linux/platform_device.h> 26 26 #include <linux/err.h> 27 27 #include <linux/clk.h> 28 + #include <linux/dmaengine.h> 28 29 #include <linux/dma-mapping.h> 30 + #include <linux/edma.h> 29 31 #include <linux/spi/spi.h> 30 32 #include <linux/spi/spi_bitbang.h> 31 33 #include <linux/slab.h> 32 34 33 35 #include <linux/platform_data/spi-davinci.h> 34 - #include <mach/edma.h> 35 36 36 37 #define SPI_NO_RESOURCE ((resource_size_t)-1) 37 38 ··· 114 113 #define SPIDEF 0x4c 115 114 #define SPIFMT0 0x50 116 115 117 - /* We have 2 DMA channels per CS, one for RX and one for TX */ 118 - struct davinci_spi_dma { 119 - int tx_channel; 120 - int rx_channel; 121 - int dummy_param_slot; 122 - enum dma_event_q eventq; 123 - }; 124 - 125 116 /* SPI Controller driver's private data. */ 126 117 struct davinci_spi { 127 118 struct spi_bitbang bitbang; ··· 127 134 128 135 const void *tx; 129 136 void *rx; 130 - #define SPI_TMP_BUFSZ (SMP_CACHE_BYTES + 1) 131 - u8 rx_tmp_buf[SPI_TMP_BUFSZ]; 132 137 int rcount; 133 138 int wcount; 134 - struct davinci_spi_dma dma; 139 + 140 + struct dma_chan *dma_rx; 141 + struct dma_chan *dma_tx; 142 + int dma_rx_chnum; 143 + int dma_tx_chnum; 144 + 135 145 struct davinci_spi_platform_data *pdata; 136 146 137 147 void (*get_rx)(u32 rx_data, struct davinci_spi *); ··· 492 496 return errors; 493 497 } 494 498 495 - static void davinci_spi_dma_callback(unsigned lch, u16 status, void *data) 499 + static void davinci_spi_dma_rx_callback(void *data) 496 500 { 497 - struct davinci_spi *dspi = data; 498 - struct davinci_spi_dma *dma = &dspi->dma; 501 + struct davinci_spi *dspi = (struct davinci_spi *)data; 499 502 500 - edma_stop(lch); 503 + dspi->rcount = 0; 501 504 502 - if (status == DMA_COMPLETE) { 503 - if (lch == dma->rx_channel) 504 - dspi->rcount = 0; 505 - if (lch == dma->tx_channel) 506 - dspi->wcount = 0; 507 - } 505 + if (!dspi->wcount && !dspi->rcount) 506 + complete(&dspi->done); 507 + } 508 508 509 - if ((!dspi->wcount && !dspi->rcount) || (status != DMA_COMPLETE)) 509 + static void davinci_spi_dma_tx_callback(void *data) 510 + { 511 + struct davinci_spi *dspi = (struct davinci_spi *)data; 512 + 513 + dspi->wcount = 0; 514 + 515 + if (!dspi->wcount && !dspi->rcount) 510 516 complete(&dspi->done); 511 517 } 512 518 ··· 524 526 static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t) 525 527 { 526 528 struct davinci_spi *dspi; 527 - int data_type, ret; 529 + int data_type, ret = -ENOMEM; 528 530 u32 tx_data, spidat1; 529 531 u32 errors = 0; 530 532 struct davinci_spi_config *spicfg; 531 533 struct davinci_spi_platform_data *pdata; 532 534 unsigned uninitialized_var(rx_buf_count); 533 - struct device *sdev; 535 + void *dummy_buf = NULL; 536 + struct scatterlist sg_rx, sg_tx; 534 537 535 538 dspi = spi_master_get_devdata(spi->master); 536 539 pdata = dspi->pdata; 537 540 spicfg = (struct davinci_spi_config *)spi->controller_data; 538 541 if (!spicfg) 539 542 spicfg = &davinci_spi_default_cfg; 540 - sdev = dspi->bitbang.master->dev.parent; 541 543 542 544 /* convert len to words based on bits_per_word */ 543 545 data_type = dspi->bytes_per_word[spi->chip_select]; ··· 565 567 spidat1 |= tx_data & 0xFFFF; 566 568 iowrite32(spidat1, dspi->base + SPIDAT1); 567 569 } else { 568 - struct davinci_spi_dma *dma; 569 - unsigned long tx_reg, rx_reg; 570 - struct edmacc_param param; 571 - void *rx_buf; 572 - int b, c; 570 + struct dma_slave_config dma_rx_conf = { 571 + .direction = DMA_DEV_TO_MEM, 572 + .src_addr = (unsigned long)dspi->pbase + SPIBUF, 573 + .src_addr_width = data_type, 574 + .src_maxburst = 1, 575 + }; 576 + struct dma_slave_config dma_tx_conf = { 577 + .direction = DMA_MEM_TO_DEV, 578 + .dst_addr = (unsigned long)dspi->pbase + SPIDAT1, 579 + .dst_addr_width = data_type, 580 + .dst_maxburst = 1, 581 + }; 582 + struct dma_async_tx_descriptor *rxdesc; 583 + struct dma_async_tx_descriptor *txdesc; 584 + void *buf; 573 585 574 - dma = &dspi->dma; 586 + dummy_buf = kzalloc(t->len, GFP_KERNEL); 587 + if (!dummy_buf) 588 + goto err_alloc_dummy_buf; 575 589 576 - tx_reg = (unsigned long)dspi->pbase + SPIDAT1; 577 - rx_reg = (unsigned long)dspi->pbase + SPIBUF; 590 + dmaengine_slave_config(dspi->dma_rx, &dma_rx_conf); 591 + dmaengine_slave_config(dspi->dma_tx, &dma_tx_conf); 578 592 579 - /* 580 - * Transmit DMA setup 581 - * 582 - * If there is transmit data, map the transmit buffer, set it 583 - * as the source of data and set the source B index to data 584 - * size. If there is no transmit data, set the transmit register 585 - * as the source of data, and set the source B index to zero. 586 - * 587 - * The destination is always the transmit register itself. And 588 - * the destination never increments. 589 - */ 590 - 591 - if (t->tx_buf) { 592 - t->tx_dma = dma_map_single(&spi->dev, (void *)t->tx_buf, 593 - t->len, DMA_TO_DEVICE); 594 - if (dma_mapping_error(&spi->dev, t->tx_dma)) { 595 - dev_dbg(sdev, "Unable to DMA map %d bytes" 596 - "TX buffer\n", t->len); 597 - return -ENOMEM; 598 - } 599 - } 600 - 601 - /* 602 - * If number of words is greater than 65535, then we need 603 - * to configure a 3 dimension transfer. Use the BCNTRLD 604 - * feature to allow for transfers that aren't even multiples 605 - * of 65535 (or any other possible b size) by first transferring 606 - * the remainder amount then grabbing the next N blocks of 607 - * 65535 words. 608 - */ 609 - 610 - c = dspi->wcount / (SZ_64K - 1); /* N 65535 Blocks */ 611 - b = dspi->wcount - c * (SZ_64K - 1); /* Remainder */ 612 - if (b) 613 - c++; 593 + sg_init_table(&sg_rx, 1); 594 + if (!t->rx_buf) 595 + buf = dummy_buf; 614 596 else 615 - b = SZ_64K - 1; 616 - 617 - param.opt = TCINTEN | EDMA_TCC(dma->tx_channel); 618 - param.src = t->tx_buf ? t->tx_dma : tx_reg; 619 - param.a_b_cnt = b << 16 | data_type; 620 - param.dst = tx_reg; 621 - param.src_dst_bidx = t->tx_buf ? data_type : 0; 622 - param.link_bcntrld = 0xffffffff; 623 - param.src_dst_cidx = t->tx_buf ? data_type : 0; 624 - param.ccnt = c; 625 - edma_write_slot(dma->tx_channel, &param); 626 - edma_link(dma->tx_channel, dma->dummy_param_slot); 627 - 628 - /* 629 - * Receive DMA setup 630 - * 631 - * If there is receive buffer, use it to receive data. If there 632 - * is none provided, use a temporary receive buffer. Set the 633 - * destination B index to 0 so effectively only one byte is used 634 - * in the temporary buffer (address does not increment). 635 - * 636 - * The source of receive data is the receive data register. The 637 - * source address never increments. 638 - */ 639 - 640 - if (t->rx_buf) { 641 - rx_buf = t->rx_buf; 642 - rx_buf_count = t->len; 643 - } else { 644 - rx_buf = dspi->rx_tmp_buf; 645 - rx_buf_count = sizeof(dspi->rx_tmp_buf); 597 + buf = t->rx_buf; 598 + t->rx_dma = dma_map_single(&spi->dev, buf, 599 + t->len, DMA_FROM_DEVICE); 600 + if (!t->rx_dma) { 601 + ret = -EFAULT; 602 + goto err_rx_map; 646 603 } 604 + sg_dma_address(&sg_rx) = t->rx_dma; 605 + sg_dma_len(&sg_rx) = t->len; 647 606 648 - t->rx_dma = dma_map_single(&spi->dev, rx_buf, rx_buf_count, 649 - DMA_FROM_DEVICE); 650 - if (dma_mapping_error(&spi->dev, t->rx_dma)) { 651 - dev_dbg(sdev, "Couldn't DMA map a %d bytes RX buffer\n", 652 - rx_buf_count); 653 - if (t->tx_buf) 654 - dma_unmap_single(&spi->dev, t->tx_dma, t->len, 655 - DMA_TO_DEVICE); 656 - return -ENOMEM; 607 + sg_init_table(&sg_tx, 1); 608 + if (!t->tx_buf) 609 + buf = dummy_buf; 610 + else 611 + buf = (void *)t->tx_buf; 612 + t->tx_dma = dma_map_single(&spi->dev, buf, 613 + t->len, DMA_FROM_DEVICE); 614 + if (!t->tx_dma) { 615 + ret = -EFAULT; 616 + goto err_tx_map; 657 617 } 618 + sg_dma_address(&sg_tx) = t->tx_dma; 619 + sg_dma_len(&sg_tx) = t->len; 658 620 659 - param.opt = TCINTEN | EDMA_TCC(dma->rx_channel); 660 - param.src = rx_reg; 661 - param.a_b_cnt = b << 16 | data_type; 662 - param.dst = t->rx_dma; 663 - param.src_dst_bidx = (t->rx_buf ? data_type : 0) << 16; 664 - param.link_bcntrld = 0xffffffff; 665 - param.src_dst_cidx = (t->rx_buf ? data_type : 0) << 16; 666 - param.ccnt = c; 667 - edma_write_slot(dma->rx_channel, &param); 621 + rxdesc = dmaengine_prep_slave_sg(dspi->dma_rx, 622 + &sg_rx, 1, DMA_DEV_TO_MEM, 623 + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 624 + if (!rxdesc) 625 + goto err_desc; 626 + 627 + txdesc = dmaengine_prep_slave_sg(dspi->dma_tx, 628 + &sg_tx, 1, DMA_MEM_TO_DEV, 629 + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 630 + if (!txdesc) 631 + goto err_desc; 632 + 633 + rxdesc->callback = davinci_spi_dma_rx_callback; 634 + rxdesc->callback_param = (void *)dspi; 635 + txdesc->callback = davinci_spi_dma_tx_callback; 636 + txdesc->callback_param = (void *)dspi; 668 637 669 638 if (pdata->cshold_bug) 670 639 iowrite16(spidat1 >> 16, dspi->base + SPIDAT1 + 2); 671 640 672 - edma_start(dma->rx_channel); 673 - edma_start(dma->tx_channel); 641 + dmaengine_submit(rxdesc); 642 + dmaengine_submit(txdesc); 643 + 644 + dma_async_issue_pending(dspi->dma_rx); 645 + dma_async_issue_pending(dspi->dma_tx); 646 + 674 647 set_io_bits(dspi->base + SPIINT, SPIINT_DMA_REQ_EN); 675 648 } 676 649 ··· 659 690 660 691 clear_io_bits(dspi->base + SPIINT, SPIINT_MASKALL); 661 692 if (spicfg->io_type == SPI_IO_TYPE_DMA) { 662 - 663 - if (t->tx_buf) 664 - dma_unmap_single(&spi->dev, t->tx_dma, t->len, 665 - DMA_TO_DEVICE); 666 - 667 - dma_unmap_single(&spi->dev, t->rx_dma, rx_buf_count, 668 - DMA_FROM_DEVICE); 669 - 670 693 clear_io_bits(dspi->base + SPIINT, SPIINT_DMA_REQ_EN); 694 + 695 + dma_unmap_single(&spi->dev, t->rx_dma, 696 + t->len, DMA_FROM_DEVICE); 697 + dma_unmap_single(&spi->dev, t->tx_dma, 698 + t->len, DMA_TO_DEVICE); 699 + kfree(dummy_buf); 671 700 } 672 701 673 702 clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_SPIENA_MASK); ··· 683 716 } 684 717 685 718 if (dspi->rcount != 0 || dspi->wcount != 0) { 686 - dev_err(sdev, "SPI data transfer error\n"); 719 + dev_err(&spi->dev, "SPI data transfer error\n"); 687 720 return -EIO; 688 721 } 689 722 690 723 return t->len; 724 + 725 + err_desc: 726 + dma_unmap_single(&spi->dev, t->tx_dma, t->len, DMA_TO_DEVICE); 727 + err_tx_map: 728 + dma_unmap_single(&spi->dev, t->rx_dma, t->len, DMA_FROM_DEVICE); 729 + err_rx_map: 730 + kfree(dummy_buf); 731 + err_alloc_dummy_buf: 732 + return ret; 691 733 } 692 734 693 735 /** ··· 727 751 728 752 static int davinci_spi_request_dma(struct davinci_spi *dspi) 729 753 { 754 + dma_cap_mask_t mask; 755 + struct device *sdev = dspi->bitbang.master->dev.parent; 730 756 int r; 731 - struct davinci_spi_dma *dma = &dspi->dma; 732 757 733 - r = edma_alloc_channel(dma->rx_channel, davinci_spi_dma_callback, dspi, 734 - dma->eventq); 735 - if (r < 0) { 736 - pr_err("Unable to request DMA channel for SPI RX\n"); 737 - r = -EAGAIN; 758 + dma_cap_zero(mask); 759 + dma_cap_set(DMA_SLAVE, mask); 760 + 761 + dspi->dma_rx = dma_request_channel(mask, edma_filter_fn, 762 + &dspi->dma_rx_chnum); 763 + if (!dspi->dma_rx) { 764 + dev_err(sdev, "request RX DMA channel failed\n"); 765 + r = -ENODEV; 738 766 goto rx_dma_failed; 739 767 } 740 768 741 - r = edma_alloc_channel(dma->tx_channel, davinci_spi_dma_callback, dspi, 742 - dma->eventq); 743 - if (r < 0) { 744 - pr_err("Unable to request DMA channel for SPI TX\n"); 745 - r = -EAGAIN; 769 + dspi->dma_tx = dma_request_channel(mask, edma_filter_fn, 770 + &dspi->dma_tx_chnum); 771 + if (!dspi->dma_tx) { 772 + dev_err(sdev, "request TX DMA channel failed\n"); 773 + r = -ENODEV; 746 774 goto tx_dma_failed; 747 775 } 748 776 749 - r = edma_alloc_slot(EDMA_CTLR(dma->tx_channel), EDMA_SLOT_ANY); 750 - if (r < 0) { 751 - pr_err("Unable to request SPI TX DMA param slot\n"); 752 - r = -EAGAIN; 753 - goto param_failed; 754 - } 755 - dma->dummy_param_slot = r; 756 - edma_link(dma->dummy_param_slot, dma->dummy_param_slot); 757 - 758 777 return 0; 759 - param_failed: 760 - edma_free_channel(dma->tx_channel); 778 + 761 779 tx_dma_failed: 762 - edma_free_channel(dma->rx_channel); 780 + dma_release_channel(dspi->dma_rx); 763 781 rx_dma_failed: 764 782 return r; 765 783 } ··· 868 898 dspi->bitbang.txrx_bufs = davinci_spi_bufs; 869 899 if (dma_rx_chan != SPI_NO_RESOURCE && 870 900 dma_tx_chan != SPI_NO_RESOURCE) { 871 - dspi->dma.rx_channel = dma_rx_chan; 872 - dspi->dma.tx_channel = dma_tx_chan; 873 - dspi->dma.eventq = pdata->dma_event_q; 901 + dspi->dma_rx_chnum = dma_rx_chan; 902 + dspi->dma_tx_chnum = dma_tx_chan; 874 903 875 904 ret = davinci_spi_request_dma(dspi); 876 905 if (ret) ··· 924 955 return ret; 925 956 926 957 free_dma: 927 - edma_free_channel(dspi->dma.tx_channel); 928 - edma_free_channel(dspi->dma.rx_channel); 929 - edma_free_slot(dspi->dma.dummy_param_slot); 958 + dma_release_channel(dspi->dma_rx); 959 + dma_release_channel(dspi->dma_tx); 930 960 free_clk: 931 961 clk_disable(dspi->clk); 932 962 clk_put(dspi->clk);
+7
include/linux/dw_dmac.h
··· 19 19 * @nr_channels: Number of channels supported by hardware (max 8) 20 20 * @is_private: The device channels should be marked as private and not for 21 21 * by the general purpose DMA channel allocator. 22 + * @block_size: Maximum block size supported by the controller 23 + * @nr_masters: Number of AHB masters supported by the controller 24 + * @data_width: Maximum data width supported by hardware per AHB master 25 + * (0 - 8bits, 1 - 16bits, ..., 5 - 256bits) 22 26 */ 23 27 struct dw_dma_platform_data { 24 28 unsigned int nr_channels; ··· 33 29 #define CHAN_PRIORITY_ASCENDING 0 /* chan0 highest */ 34 30 #define CHAN_PRIORITY_DESCENDING 1 /* chan7 highest */ 35 31 unsigned char chan_priority; 32 + unsigned short block_size; 33 + unsigned char nr_masters; 34 + unsigned char data_width[4]; 36 35 }; 37 36 38 37 /* bursts size */
+29
include/linux/edma.h
··· 1 + /* 2 + * TI EDMA DMA engine driver 3 + * 4 + * Copyright 2012 Texas Instruments 5 + * 6 + * This program is free software; you can redistribute it and/or 7 + * modify it under the terms of the GNU General Public License as 8 + * published by the Free Software Foundation version 2. 9 + * 10 + * This program is distributed "as is" WITHOUT ANY WARRANTY of any 11 + * kind, whether express or implied; without even the implied warranty 12 + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 + * GNU General Public License for more details. 14 + */ 15 + #ifndef __LINUX_EDMA_H 16 + #define __LINUX_EDMA_H 17 + 18 + struct dma_chan; 19 + 20 + #if defined(CONFIG_TI_EDMA) || defined(CONFIG_TI_EDMA_MODULE) 21 + bool edma_filter_fn(struct dma_chan *, void *); 22 + #else 23 + static inline bool edma_filter_fn(struct dma_chan *chan, void *param) 24 + { 25 + return false; 26 + } 27 + #endif 28 + 29 + #endif
+19
include/linux/platform_data/mmp_dma.h
··· 1 + /* 2 + * MMP Platform DMA Management 3 + * 4 + * Copyright (c) 2011 Marvell Semiconductors Inc. 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License version 2 as 8 + * published by the Free Software Foundation. 9 + * 10 + */ 11 + 12 + #ifndef MMP_DMA_H 13 + #define MMP_DMA_H 14 + 15 + struct mmp_dma_platdata { 16 + int dma_channels; 17 + }; 18 + 19 + #endif /* MMP_DMA_H */