Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx

* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx: (66 commits)
avr32: at32ap700x: fix typo in DMA master configuration
dmaengine/dmatest: Pass timeout via module params
dma: let IMX_DMA depend on IMX_HAVE_DMA_V1 instead of an explicit list of SoCs
fsldma: make halt behave nicely on all supported controllers
fsldma: reduce locking during descriptor cleanup
fsldma: support async_tx dependencies and automatic unmapping
fsldma: fix controller lockups
fsldma: minor codingstyle and consistency fixes
fsldma: improve link descriptor debugging
fsldma: use channel name in printk output
fsldma: move related helper functions near each other
dmatest: fix automatic buffer unmap type
drivers, pch_dma: Fix warning when CONFIG_PM=n.
dmaengine/dw_dmac fix: use readl & writel instead of __raw_readl & __raw_writel
avr32: at32ap700x: Specify DMA Flow Controller, Src and Dst msize
dw_dmac: Setting Default Burst length for transfers as 16.
dw_dmac: Allow src/dst msize & flow controller to be configured at runtime
dw_dmac: Changing type of src_master and dest_master to u8.
dw_dmac: Pass Channel Priority from platform_data
dw_dmac: Pass Channel Allocation Order from platform_data
...

+2045 -1318
+26
arch/arm/mach-mxs/include/mach/dma.h
···
··· 1 + /* 2 + * Copyright 2011 Freescale Semiconductor, Inc. All Rights Reserved. 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 as 6 + * published by the Free Software Foundation. 7 + */ 8 + 9 + #ifndef __MACH_MXS_DMA_H__ 10 + #define __MACH_MXS_DMA_H__ 11 + 12 + struct mxs_dma_data { 13 + int chan_irq; 14 + }; 15 + 16 + static inline int mxs_dma_is_apbh(struct dma_chan *chan) 17 + { 18 + return !strcmp(dev_name(chan->device->dev), "mxs-dma-apbh"); 19 + } 20 + 21 + static inline int mxs_dma_is_apbx(struct dma_chan *chan) 22 + { 23 + return !strcmp(dev_name(chan->device->dev), "mxs-dma-apbx"); 24 + } 25 + 26 + #endif /* __MACH_MXS_DMA_H__ */
+3 -19
arch/arm/plat-nomadik/include/plat/ste_dma40.h
··· 104 * 105 * @dir: MEM 2 MEM, PERIPH 2 MEM , MEM 2 PERIPH, PERIPH 2 PERIPH 106 * @high_priority: true if high-priority 107 * @mode: channel mode: physical, logical, or operation 108 * @mode_opt: options for the chosen channel mode 109 * @src_dev_type: Src device type ··· 121 struct stedma40_chan_cfg { 122 enum stedma40_xfer_dir dir; 123 bool high_priority; 124 enum stedma40_mode mode; 125 enum stedma40_mode_opt mode_opt; 126 int src_dev_type; ··· 170 */ 171 172 bool stedma40_filter(struct dma_chan *chan, void *data); 173 - 174 - /** 175 - * stedma40_memcpy_sg() - extension of the dma framework, memcpy to/from 176 - * scattergatter lists. 177 - * 178 - * @chan: dmaengine handle 179 - * @sgl_dst: Destination scatter list 180 - * @sgl_src: Source scatter list 181 - * @sgl_len: The length of each scatterlist. Both lists must be of equal length 182 - * and each element must match the corresponding element in the other scatter 183 - * list. 184 - * @flags: is actually enum dma_ctrl_flags. See dmaengine.h 185 - */ 186 - 187 - struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, 188 - struct scatterlist *sgl_dst, 189 - struct scatterlist *sgl_src, 190 - unsigned int sgl_len, 191 - unsigned long flags); 192 193 /** 194 * stedma40_slave_mem() - Transfers a raw data buffer to or from a slave
··· 104 * 105 * @dir: MEM 2 MEM, PERIPH 2 MEM , MEM 2 PERIPH, PERIPH 2 PERIPH 106 * @high_priority: true if high-priority 107 + * @realtime: true if realtime mode is to be enabled. Only available on DMA40 108 + * version 3+, i.e DB8500v2+ 109 * @mode: channel mode: physical, logical, or operation 110 * @mode_opt: options for the chosen channel mode 111 * @src_dev_type: Src device type ··· 119 struct stedma40_chan_cfg { 120 enum stedma40_xfer_dir dir; 121 bool high_priority; 122 + bool realtime; 123 enum stedma40_mode mode; 124 enum stedma40_mode_opt mode_opt; 125 int src_dev_type; ··· 167 */ 168 169 bool stedma40_filter(struct dma_chan *chan, void *data); 170 171 /** 172 * stedma40_slave_mem() - Transfers a raw data buffer to or from a slave
+15
arch/avr32/mach-at32ap/at32ap700x.c
··· 2048 rx_dws->reg_width = DW_DMA_SLAVE_WIDTH_16BIT; 2049 rx_dws->cfg_hi = DWC_CFGH_SRC_PER(3); 2050 rx_dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL); 2051 } 2052 2053 /* Check if DMA slave interface for playback should be configured. */ ··· 2061 tx_dws->reg_width = DW_DMA_SLAVE_WIDTH_16BIT; 2062 tx_dws->cfg_hi = DWC_CFGH_DST_PER(4); 2063 tx_dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL); 2064 } 2065 2066 if (platform_device_add_data(pdev, data, ··· 2138 dws->reg_width = DW_DMA_SLAVE_WIDTH_32BIT; 2139 dws->cfg_hi = DWC_CFGH_DST_PER(2); 2140 dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL); 2141 2142 if (platform_device_add_data(pdev, data, 2143 sizeof(struct atmel_abdac_pdata)))
··· 2048 rx_dws->reg_width = DW_DMA_SLAVE_WIDTH_16BIT; 2049 rx_dws->cfg_hi = DWC_CFGH_SRC_PER(3); 2050 rx_dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL); 2051 + rx_dws->src_master = 0; 2052 + rx_dws->dst_master = 1; 2053 + rx_dws->src_msize = DW_DMA_MSIZE_1; 2054 + rx_dws->dst_msize = DW_DMA_MSIZE_1; 2055 + rx_dws->fc = DW_DMA_FC_D_P2M; 2056 } 2057 2058 /* Check if DMA slave interface for playback should be configured. */ ··· 2056 tx_dws->reg_width = DW_DMA_SLAVE_WIDTH_16BIT; 2057 tx_dws->cfg_hi = DWC_CFGH_DST_PER(4); 2058 tx_dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL); 2059 + tx_dws->src_master = 0; 2060 + tx_dws->dst_master = 1; 2061 + tx_dws->src_msize = DW_DMA_MSIZE_1; 2062 + tx_dws->dst_msize = DW_DMA_MSIZE_1; 2063 + tx_dws->fc = DW_DMA_FC_D_M2P; 2064 } 2065 2066 if (platform_device_add_data(pdev, data, ··· 2128 dws->reg_width = DW_DMA_SLAVE_WIDTH_32BIT; 2129 dws->cfg_hi = DWC_CFGH_DST_PER(2); 2130 dws->cfg_lo &= ~(DWC_CFGL_HS_DST_POL | DWC_CFGL_HS_SRC_POL); 2131 + dws->src_master = 0; 2132 + dws->dst_master = 1; 2133 + dws->src_msize = DW_DMA_MSIZE_1; 2134 + dws->dst_msize = DW_DMA_MSIZE_1; 2135 + dws->fc = DW_DMA_FC_D_M2P; 2136 2137 if (platform_device_add_data(pdev, data, 2138 sizeof(struct atmel_abdac_pdata)))
+10 -2
drivers/dma/Kconfig
··· 82 83 config DW_DMAC 84 tristate "Synopsys DesignWare AHB DMA support" 85 - depends on AVR32 86 select DMA_ENGINE 87 default y if CPU_AT32AP7000 88 help ··· 221 222 config IMX_DMA 223 tristate "i.MX DMA support" 224 - depends on ARCH_MX1 || ARCH_MX21 || MACH_MX27 225 select DMA_ENGINE 226 help 227 Support the i.MX DMA engine. This engine is integrated into 228 Freescale i.MX1/21/27 chips. 229 230 config DMA_ENGINE 231 bool
··· 82 83 config DW_DMAC 84 tristate "Synopsys DesignWare AHB DMA support" 85 + depends on HAVE_CLK 86 select DMA_ENGINE 87 default y if CPU_AT32AP7000 88 help ··· 221 222 config IMX_DMA 223 tristate "i.MX DMA support" 224 + depends on IMX_HAVE_DMA_V1 225 select DMA_ENGINE 226 help 227 Support the i.MX DMA engine. This engine is integrated into 228 Freescale i.MX1/21/27 chips. 229 + 230 + config MXS_DMA 231 + bool "MXS DMA support" 232 + depends on SOC_IMX23 || SOC_IMX28 233 + select DMA_ENGINE 234 + help 235 + Support the MXS DMA engine. This engine including APBH-DMA 236 + and APBX-DMA is integrated into Freescale i.MX23/28 chips. 237 238 config DMA_ENGINE 239 bool
+1
drivers/dma/Makefile
··· 19 obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/ 20 obj-$(CONFIG_IMX_SDMA) += imx-sdma.o 21 obj-$(CONFIG_IMX_DMA) += imx-dma.o 22 obj-$(CONFIG_TIMB_DMA) += timb_dma.o 23 obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o 24 obj-$(CONFIG_PL330_DMA) += pl330.o
··· 19 obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/ 20 obj-$(CONFIG_IMX_SDMA) += imx-sdma.o 21 obj-$(CONFIG_IMX_DMA) += imx-dma.o 22 + obj-$(CONFIG_MXS_DMA) += mxs-dma.o 23 obj-$(CONFIG_TIMB_DMA) += timb_dma.o 24 obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o 25 obj-$(CONFIG_PL330_DMA) += pl330.o
+12 -2
drivers/dma/dmatest.c
··· 54 MODULE_PARM_DESC(pq_sources, 55 "Number of p+q source buffers (default: 3)"); 56 57 /* 58 * Initialization patterns. All bytes in the source buffer has bit 7 59 * set, all bytes in the destination buffer has bit 7 cleared. ··· 290 291 set_user_nice(current, 10); 292 293 - flags = DMA_CTRL_ACK | DMA_COMPL_SKIP_DEST_UNMAP | DMA_PREP_INTERRUPT; 294 295 while (!kthread_should_stop() 296 && !(iterations && total_tests >= iterations)) { ··· 304 dma_addr_t dma_srcs[src_cnt]; 305 dma_addr_t dma_dsts[dst_cnt]; 306 struct completion cmp; 307 - unsigned long tmo = msecs_to_jiffies(3000); 308 u8 align = 0; 309 310 total_tests++;
··· 54 MODULE_PARM_DESC(pq_sources, 55 "Number of p+q source buffers (default: 3)"); 56 57 + static int timeout = 3000; 58 + module_param(timeout, uint, S_IRUGO); 59 + MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), \ 60 + Pass -1 for infinite timeout"); 61 + 62 /* 63 * Initialization patterns. All bytes in the source buffer has bit 7 64 * set, all bytes in the destination buffer has bit 7 cleared. ··· 285 286 set_user_nice(current, 10); 287 288 + /* 289 + * src buffers are freed by the DMAEngine code with dma_unmap_single() 290 + * dst buffers are freed by ourselves below 291 + */ 292 + flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT 293 + | DMA_COMPL_SKIP_DEST_UNMAP | DMA_COMPL_SRC_UNMAP_SINGLE; 294 295 while (!kthread_should_stop() 296 && !(iterations && total_tests >= iterations)) { ··· 294 dma_addr_t dma_srcs[src_cnt]; 295 dma_addr_t dma_dsts[dst_cnt]; 296 struct completion cmp; 297 + unsigned long tmo = msecs_to_jiffies(timeout); 298 u8 align = 0; 299 300 total_tests++;
+66 -37
drivers/dma/dw_dmac.c
··· 32 * which does not support descriptor writeback. 33 */ 34 35 - /* NOTE: DMS+SMS is system-specific. We should get this information 36 - * from the platform code somehow. 37 - */ 38 - #define DWC_DEFAULT_CTLLO (DWC_CTLL_DST_MSIZE(0) \ 39 - | DWC_CTLL_SRC_MSIZE(0) \ 40 - | DWC_CTLL_DMS(0) \ 41 - | DWC_CTLL_SMS(1) \ 42 - | DWC_CTLL_LLP_D_EN \ 43 - | DWC_CTLL_LLP_S_EN) 44 45 /* 46 * This is configuration-dependent and usually a funny size like 4095. 47 - * Let's round it down to the nearest power of two. 48 * 49 * Note that this is a transfer count, i.e. if we transfer 32-bit 50 - * words, we can do 8192 bytes per descriptor. 51 * 52 * This parameter is also system-specific. 53 */ 54 - #define DWC_MAX_COUNT 2048U 55 56 /* 57 * Number of descriptors to allocate for each channel. This should be ··· 86 static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc) 87 { 88 return list_entry(dwc->active_list.next, struct dw_desc, desc_node); 89 - } 90 - 91 - static struct dw_desc *dwc_first_queued(struct dw_dma_chan *dwc) 92 - { 93 - return list_entry(dwc->queue.next, struct dw_desc, desc_node); 94 } 95 96 static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc) ··· 200 dma_async_tx_callback callback; 201 void *param; 202 struct dma_async_tx_descriptor *txd = &desc->txd; 203 204 dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie); 205 ··· 209 param = txd->callback_param; 210 211 dwc_sync_desc_for_cpu(dwc, desc); 212 list_splice_init(&desc->tx_list, &dwc->free_list); 213 list_move(&desc->desc_node, &dwc->free_list); 214 ··· 265 * Submit queued descriptors ASAP, i.e. before we go through 266 * the completed ones. 267 */ 268 - if (!list_empty(&dwc->queue)) 269 - dwc_dostart(dwc, dwc_first_queued(dwc)); 270 list_splice_init(&dwc->active_list, &list); 271 - list_splice_init(&dwc->queue, &dwc->active_list); 272 273 list_for_each_entry_safe(desc, _desc, &list, desc_node) 274 dwc_descriptor_complete(dwc, desc); ··· 297 dwc_complete_all(dw, dwc); 298 return; 299 } 300 301 dev_vdbg(chan2dev(&dwc->chan), "scan_descriptors: llp=0x%x\n", llp); 302 ··· 329 cpu_relax(); 330 331 if (!list_empty(&dwc->queue)) { 332 - dwc_dostart(dwc, dwc_first_queued(dwc)); 333 - list_splice_init(&dwc->queue, &dwc->active_list); 334 } 335 } 336 ··· 356 */ 357 bad_desc = dwc_first_active(dwc); 358 list_del_init(&bad_desc->desc_node); 359 - list_splice_init(&dwc->queue, dwc->active_list.prev); 360 361 /* Clear the error flag and try to restart the controller */ 362 dma_writel(dw, CLEAR.ERROR, dwc->mask); ··· 551 if (list_empty(&dwc->active_list)) { 552 dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n", 553 desc->txd.cookie); 554 - dwc_dostart(dwc, desc); 555 list_add_tail(&desc->desc_node, &dwc->active_list); 556 } else { 557 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n", 558 desc->txd.cookie); ··· 591 * We can be a lot more clever here, but this should take care 592 * of the most common optimization. 593 */ 594 - if (!((src | dest | len) & 3)) 595 src_width = dst_width = 2; 596 else if (!((src | dest | len) & 1)) 597 src_width = dst_width = 1; 598 else 599 src_width = dst_width = 0; 600 601 - ctllo = DWC_DEFAULT_CTLLO 602 | DWC_CTLL_DST_WIDTH(dst_width) 603 | DWC_CTLL_SRC_WIDTH(src_width) 604 | DWC_CTLL_DST_INC ··· 681 682 switch (direction) { 683 case DMA_TO_DEVICE: 684 - ctllo = (DWC_DEFAULT_CTLLO 685 | DWC_CTLL_DST_WIDTH(reg_width) 686 | DWC_CTLL_DST_FIX 687 | DWC_CTLL_SRC_INC 688 - | DWC_CTLL_FC_M2P); 689 reg = dws->tx_reg; 690 for_each_sg(sgl, sg, sg_len, i) { 691 struct dw_desc *desc; ··· 726 } 727 break; 728 case DMA_FROM_DEVICE: 729 - ctllo = (DWC_DEFAULT_CTLLO 730 | DWC_CTLL_SRC_WIDTH(reg_width) 731 | DWC_CTLL_DST_INC 732 | DWC_CTLL_SRC_FIX 733 - | DWC_CTLL_FC_P2M); 734 735 reg = dws->rx_reg; 736 for_each_sg(sgl, sg, sg_len, i) { ··· 846 847 ret = dma_async_is_complete(cookie, last_complete, last_used); 848 if (ret != DMA_SUCCESS) { 849 dwc_scan_descriptors(to_dw_dma(chan->device), dwc); 850 851 last_complete = dwc->completed; 852 last_used = chan->cookie; ··· 903 BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev); 904 905 cfghi = dws->cfg_hi; 906 - cfglo = dws->cfg_lo; 907 } 908 channel_writel(dwc, CFG_LO, cfglo); 909 channel_writel(dwc, CFG_HI, cfghi); 910 ··· 1143 case DMA_TO_DEVICE: 1144 desc->lli.dar = dws->tx_reg; 1145 desc->lli.sar = buf_addr + (period_len * i); 1146 - desc->lli.ctllo = (DWC_DEFAULT_CTLLO 1147 | DWC_CTLL_DST_WIDTH(reg_width) 1148 | DWC_CTLL_SRC_WIDTH(reg_width) 1149 | DWC_CTLL_DST_FIX 1150 | DWC_CTLL_SRC_INC 1151 - | DWC_CTLL_FC_M2P 1152 | DWC_CTLL_INT_EN); 1153 break; 1154 case DMA_FROM_DEVICE: 1155 desc->lli.dar = buf_addr + (period_len * i); 1156 desc->lli.sar = dws->rx_reg; 1157 - desc->lli.ctllo = (DWC_DEFAULT_CTLLO 1158 | DWC_CTLL_SRC_WIDTH(reg_width) 1159 | DWC_CTLL_DST_WIDTH(reg_width) 1160 | DWC_CTLL_DST_INC 1161 | DWC_CTLL_SRC_FIX 1162 - | DWC_CTLL_FC_P2M 1163 | DWC_CTLL_INT_EN); 1164 break; 1165 default: ··· 1324 dwc->chan.device = &dw->dma; 1325 dwc->chan.cookie = dwc->completed = 1; 1326 dwc->chan.chan_id = i; 1327 - list_add_tail(&dwc->chan.device_node, &dw->dma.channels); 1328 1329 dwc->ch_regs = &__dw_regs(dw)->CHAN[i]; 1330 spin_lock_init(&dwc->lock); ··· 1362 1363 dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask); 1364 dma_cap_set(DMA_SLAVE, dw->dma.cap_mask); 1365 dw->dma.dev = &pdev->dev; 1366 dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources; 1367 dw->dma.device_free_chan_resources = dwc_free_chan_resources; ··· 1476 { 1477 return platform_driver_probe(&dw_driver, dw_probe); 1478 } 1479 - module_init(dw_init); 1480 1481 static void __exit dw_exit(void) 1482 {
··· 32 * which does not support descriptor writeback. 33 */ 34 35 + #define DWC_DEFAULT_CTLLO(private) ({ \ 36 + struct dw_dma_slave *__slave = (private); \ 37 + int dms = __slave ? __slave->dst_master : 0; \ 38 + int sms = __slave ? __slave->src_master : 1; \ 39 + u8 smsize = __slave ? __slave->src_msize : DW_DMA_MSIZE_16; \ 40 + u8 dmsize = __slave ? __slave->dst_msize : DW_DMA_MSIZE_16; \ 41 + \ 42 + (DWC_CTLL_DST_MSIZE(dmsize) \ 43 + | DWC_CTLL_SRC_MSIZE(smsize) \ 44 + | DWC_CTLL_LLP_D_EN \ 45 + | DWC_CTLL_LLP_S_EN \ 46 + | DWC_CTLL_DMS(dms) \ 47 + | DWC_CTLL_SMS(sms)); \ 48 + }) 49 50 /* 51 * This is configuration-dependent and usually a funny size like 4095. 52 * 53 * Note that this is a transfer count, i.e. if we transfer 32-bit 54 + * words, we can do 16380 bytes per descriptor. 55 * 56 * This parameter is also system-specific. 57 */ 58 + #define DWC_MAX_COUNT 4095U 59 60 /* 61 * Number of descriptors to allocate for each channel. This should be ··· 82 static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc) 83 { 84 return list_entry(dwc->active_list.next, struct dw_desc, desc_node); 85 } 86 87 static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc) ··· 201 dma_async_tx_callback callback; 202 void *param; 203 struct dma_async_tx_descriptor *txd = &desc->txd; 204 + struct dw_desc *child; 205 206 dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie); 207 ··· 209 param = txd->callback_param; 210 211 dwc_sync_desc_for_cpu(dwc, desc); 212 + 213 + /* async_tx_ack */ 214 + list_for_each_entry(child, &desc->tx_list, desc_node) 215 + async_tx_ack(&child->txd); 216 + async_tx_ack(&desc->txd); 217 + 218 list_splice_init(&desc->tx_list, &dwc->free_list); 219 list_move(&desc->desc_node, &dwc->free_list); 220 ··· 259 * Submit queued descriptors ASAP, i.e. before we go through 260 * the completed ones. 261 */ 262 list_splice_init(&dwc->active_list, &list); 263 + if (!list_empty(&dwc->queue)) { 264 + list_move(dwc->queue.next, &dwc->active_list); 265 + dwc_dostart(dwc, dwc_first_active(dwc)); 266 + } 267 268 list_for_each_entry_safe(desc, _desc, &list, desc_node) 269 dwc_descriptor_complete(dwc, desc); ··· 290 dwc_complete_all(dw, dwc); 291 return; 292 } 293 + 294 + if (list_empty(&dwc->active_list)) 295 + return; 296 297 dev_vdbg(chan2dev(&dwc->chan), "scan_descriptors: llp=0x%x\n", llp); 298 ··· 319 cpu_relax(); 320 321 if (!list_empty(&dwc->queue)) { 322 + list_move(dwc->queue.next, &dwc->active_list); 323 + dwc_dostart(dwc, dwc_first_active(dwc)); 324 } 325 } 326 ··· 346 */ 347 bad_desc = dwc_first_active(dwc); 348 list_del_init(&bad_desc->desc_node); 349 + list_move(dwc->queue.next, dwc->active_list.prev); 350 351 /* Clear the error flag and try to restart the controller */ 352 dma_writel(dw, CLEAR.ERROR, dwc->mask); ··· 541 if (list_empty(&dwc->active_list)) { 542 dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n", 543 desc->txd.cookie); 544 list_add_tail(&desc->desc_node, &dwc->active_list); 545 + dwc_dostart(dwc, dwc_first_active(dwc)); 546 } else { 547 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n", 548 desc->txd.cookie); ··· 581 * We can be a lot more clever here, but this should take care 582 * of the most common optimization. 583 */ 584 + if (!((src | dest | len) & 7)) 585 + src_width = dst_width = 3; 586 + else if (!((src | dest | len) & 3)) 587 src_width = dst_width = 2; 588 else if (!((src | dest | len) & 1)) 589 src_width = dst_width = 1; 590 else 591 src_width = dst_width = 0; 592 593 + ctllo = DWC_DEFAULT_CTLLO(chan->private) 594 | DWC_CTLL_DST_WIDTH(dst_width) 595 | DWC_CTLL_SRC_WIDTH(src_width) 596 | DWC_CTLL_DST_INC ··· 669 670 switch (direction) { 671 case DMA_TO_DEVICE: 672 + ctllo = (DWC_DEFAULT_CTLLO(chan->private) 673 | DWC_CTLL_DST_WIDTH(reg_width) 674 | DWC_CTLL_DST_FIX 675 | DWC_CTLL_SRC_INC 676 + | DWC_CTLL_FC(dws->fc)); 677 reg = dws->tx_reg; 678 for_each_sg(sgl, sg, sg_len, i) { 679 struct dw_desc *desc; ··· 714 } 715 break; 716 case DMA_FROM_DEVICE: 717 + ctllo = (DWC_DEFAULT_CTLLO(chan->private) 718 | DWC_CTLL_SRC_WIDTH(reg_width) 719 | DWC_CTLL_DST_INC 720 | DWC_CTLL_SRC_FIX 721 + | DWC_CTLL_FC(dws->fc)); 722 723 reg = dws->rx_reg; 724 for_each_sg(sgl, sg, sg_len, i) { ··· 834 835 ret = dma_async_is_complete(cookie, last_complete, last_used); 836 if (ret != DMA_SUCCESS) { 837 + spin_lock_bh(&dwc->lock); 838 dwc_scan_descriptors(to_dw_dma(chan->device), dwc); 839 + spin_unlock_bh(&dwc->lock); 840 841 last_complete = dwc->completed; 842 last_used = chan->cookie; ··· 889 BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev); 890 891 cfghi = dws->cfg_hi; 892 + cfglo = dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK; 893 } 894 + 895 + cfglo |= DWC_CFGL_CH_PRIOR(dwc->priority); 896 + 897 channel_writel(dwc, CFG_LO, cfglo); 898 channel_writel(dwc, CFG_HI, cfghi); 899 ··· 1126 case DMA_TO_DEVICE: 1127 desc->lli.dar = dws->tx_reg; 1128 desc->lli.sar = buf_addr + (period_len * i); 1129 + desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private) 1130 | DWC_CTLL_DST_WIDTH(reg_width) 1131 | DWC_CTLL_SRC_WIDTH(reg_width) 1132 | DWC_CTLL_DST_FIX 1133 | DWC_CTLL_SRC_INC 1134 + | DWC_CTLL_FC(dws->fc) 1135 | DWC_CTLL_INT_EN); 1136 break; 1137 case DMA_FROM_DEVICE: 1138 desc->lli.dar = buf_addr + (period_len * i); 1139 desc->lli.sar = dws->rx_reg; 1140 + desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private) 1141 | DWC_CTLL_SRC_WIDTH(reg_width) 1142 | DWC_CTLL_DST_WIDTH(reg_width) 1143 | DWC_CTLL_DST_INC 1144 | DWC_CTLL_SRC_FIX 1145 + | DWC_CTLL_FC(dws->fc) 1146 | DWC_CTLL_INT_EN); 1147 break; 1148 default: ··· 1307 dwc->chan.device = &dw->dma; 1308 dwc->chan.cookie = dwc->completed = 1; 1309 dwc->chan.chan_id = i; 1310 + if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING) 1311 + list_add_tail(&dwc->chan.device_node, 1312 + &dw->dma.channels); 1313 + else 1314 + list_add(&dwc->chan.device_node, &dw->dma.channels); 1315 + 1316 + /* 7 is highest priority & 0 is lowest. */ 1317 + if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING) 1318 + dwc->priority = 7 - i; 1319 + else 1320 + dwc->priority = i; 1321 1322 dwc->ch_regs = &__dw_regs(dw)->CHAN[i]; 1323 spin_lock_init(&dwc->lock); ··· 1335 1336 dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask); 1337 dma_cap_set(DMA_SLAVE, dw->dma.cap_mask); 1338 + if (pdata->is_private) 1339 + dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask); 1340 dw->dma.dev = &pdev->dev; 1341 dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources; 1342 dw->dma.device_free_chan_resources = dwc_free_chan_resources; ··· 1447 { 1448 return platform_driver_probe(&dw_driver, dw_probe); 1449 } 1450 + subsys_initcall(dw_init); 1451 1452 static void __exit dw_exit(void) 1453 {
+8 -4
drivers/dma/dw_dmac_regs.h
··· 86 #define DWC_CTLL_SRC_MSIZE(n) ((n)<<14) 87 #define DWC_CTLL_S_GATH_EN (1 << 17) /* src gather, !FIX */ 88 #define DWC_CTLL_D_SCAT_EN (1 << 18) /* dst scatter, !FIX */ 89 #define DWC_CTLL_FC_M2M (0 << 20) /* mem-to-mem */ 90 #define DWC_CTLL_FC_M2P (1 << 20) /* mem-to-periph */ 91 #define DWC_CTLL_FC_P2M (2 << 20) /* periph-to-mem */ ··· 102 #define DWC_CTLH_BLOCK_TS_MASK 0x00000fff 103 104 /* Bitfields in CFG_LO. Platform-configurable bits are in <linux/dw_dmac.h> */ 105 #define DWC_CFGL_CH_SUSP (1 << 8) /* pause xfer */ 106 #define DWC_CFGL_FIFO_EMPTY (1 << 9) /* pause xfer */ 107 #define DWC_CFGL_HS_DST (1 << 10) /* handshake w/dst */ ··· 137 struct dma_chan chan; 138 void __iomem *ch_regs; 139 u8 mask; 140 141 spinlock_t lock; 142 ··· 159 } 160 161 #define channel_readl(dwc, name) \ 162 - __raw_readl(&(__dwc_regs(dwc)->name)) 163 #define channel_writel(dwc, name, val) \ 164 - __raw_writel((val), &(__dwc_regs(dwc)->name)) 165 166 static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan) 167 { ··· 185 } 186 187 #define dma_readl(dw, name) \ 188 - __raw_readl(&(__dw_regs(dw)->name)) 189 #define dma_writel(dw, name, val) \ 190 - __raw_writel((val), &(__dw_regs(dw)->name)) 191 192 #define channel_set_bit(dw, reg, mask) \ 193 dma_writel(dw, reg, ((mask) << 8) | (mask))
··· 86 #define DWC_CTLL_SRC_MSIZE(n) ((n)<<14) 87 #define DWC_CTLL_S_GATH_EN (1 << 17) /* src gather, !FIX */ 88 #define DWC_CTLL_D_SCAT_EN (1 << 18) /* dst scatter, !FIX */ 89 + #define DWC_CTLL_FC(n) ((n) << 20) 90 #define DWC_CTLL_FC_M2M (0 << 20) /* mem-to-mem */ 91 #define DWC_CTLL_FC_M2P (1 << 20) /* mem-to-periph */ 92 #define DWC_CTLL_FC_P2M (2 << 20) /* periph-to-mem */ ··· 101 #define DWC_CTLH_BLOCK_TS_MASK 0x00000fff 102 103 /* Bitfields in CFG_LO. Platform-configurable bits are in <linux/dw_dmac.h> */ 104 + #define DWC_CFGL_CH_PRIOR_MASK (0x7 << 5) /* priority mask */ 105 + #define DWC_CFGL_CH_PRIOR(x) ((x) << 5) /* priority */ 106 #define DWC_CFGL_CH_SUSP (1 << 8) /* pause xfer */ 107 #define DWC_CFGL_FIFO_EMPTY (1 << 9) /* pause xfer */ 108 #define DWC_CFGL_HS_DST (1 << 10) /* handshake w/dst */ ··· 134 struct dma_chan chan; 135 void __iomem *ch_regs; 136 u8 mask; 137 + u8 priority; 138 139 spinlock_t lock; 140 ··· 155 } 156 157 #define channel_readl(dwc, name) \ 158 + readl(&(__dwc_regs(dwc)->name)) 159 #define channel_writel(dwc, name, val) \ 160 + writel((val), &(__dwc_regs(dwc)->name)) 161 162 static inline struct dw_dma_chan *to_dw_dma_chan(struct dma_chan *chan) 163 { ··· 181 } 182 183 #define dma_readl(dw, name) \ 184 + readl(&(__dw_regs(dw)->name)) 185 #define dma_writel(dw, name, val) \ 186 + writel((val), &(__dw_regs(dw)->name)) 187 188 #define channel_set_bit(dw, reg, mask) \ 189 dma_writel(dw, reg, ((mask) << 8) | (mask))
+348 -297
drivers/dma/fsldma.c
··· 37 38 #include "fsldma.h" 39 40 - static const char msg_ld_oom[] = "No free memory for link descriptor\n"; 41 42 - static void dma_init(struct fsldma_chan *chan) 43 - { 44 - /* Reset the channel */ 45 - DMA_OUT(chan, &chan->regs->mr, 0, 32); 46 47 - switch (chan->feature & FSL_DMA_IP_MASK) { 48 - case FSL_DMA_IP_85XX: 49 - /* Set the channel to below modes: 50 - * EIE - Error interrupt enable 51 - * EOSIE - End of segments interrupt enable (basic mode) 52 - * EOLNIE - End of links interrupt enable 53 - * BWC - Bandwidth sharing among channels 54 - */ 55 - DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_BWC 56 - | FSL_DMA_MR_EIE | FSL_DMA_MR_EOLNIE 57 - | FSL_DMA_MR_EOSIE, 32); 58 - break; 59 - case FSL_DMA_IP_83XX: 60 - /* Set the channel to below modes: 61 - * EOTIE - End-of-transfer interrupt enable 62 - * PRC_RM - PCI read multiple 63 - */ 64 - DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EOTIE 65 - | FSL_DMA_MR_PRC_RM, 32); 66 - break; 67 - } 68 - } 69 70 static void set_sr(struct fsldma_chan *chan, u32 val) 71 { ··· 56 static u32 get_sr(struct fsldma_chan *chan) 57 { 58 return DMA_IN(chan, &chan->regs->sr, 32); 59 - } 60 - 61 - static void set_desc_cnt(struct fsldma_chan *chan, 62 - struct fsl_dma_ld_hw *hw, u32 count) 63 - { 64 - hw->count = CPU_TO_DMA(chan, count, 32); 65 - } 66 - 67 - static void set_desc_src(struct fsldma_chan *chan, 68 - struct fsl_dma_ld_hw *hw, dma_addr_t src) 69 - { 70 - u64 snoop_bits; 71 - 72 - snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) 73 - ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0; 74 - hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64); 75 - } 76 - 77 - static void set_desc_dst(struct fsldma_chan *chan, 78 - struct fsl_dma_ld_hw *hw, dma_addr_t dst) 79 - { 80 - u64 snoop_bits; 81 - 82 - snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) 83 - ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0; 84 - hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64); 85 - } 86 - 87 - static void set_desc_next(struct fsldma_chan *chan, 88 - struct fsl_dma_ld_hw *hw, dma_addr_t next) 89 - { 90 - u64 snoop_bits; 91 - 92 - snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) 93 - ? FSL_DMA_SNEN : 0; 94 - hw->next_ln_addr = CPU_TO_DMA(chan, snoop_bits | next, 64); 95 } 96 97 static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr) ··· 68 return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN; 69 } 70 71 - static dma_addr_t get_ndar(struct fsldma_chan *chan) 72 - { 73 - return DMA_IN(chan, &chan->regs->ndar, 64); 74 - } 75 - 76 static u32 get_bcr(struct fsldma_chan *chan) 77 { 78 return DMA_IN(chan, &chan->regs->bcr, 32); 79 } 80 81 - static int dma_is_idle(struct fsldma_chan *chan) 82 { 83 - u32 sr = get_sr(chan); 84 - return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH); 85 } 86 87 - static void dma_start(struct fsldma_chan *chan) 88 { 89 - u32 mode; 90 - 91 - mode = DMA_IN(chan, &chan->regs->mr, 32); 92 - 93 - if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) { 94 - if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) { 95 - DMA_OUT(chan, &chan->regs->bcr, 0, 32); 96 - mode |= FSL_DMA_MR_EMP_EN; 97 - } else { 98 - mode &= ~FSL_DMA_MR_EMP_EN; 99 - } 100 - } 101 - 102 - if (chan->feature & FSL_DMA_CHAN_START_EXT) 103 - mode |= FSL_DMA_MR_EMS_EN; 104 - else 105 - mode |= FSL_DMA_MR_CS; 106 - 107 - DMA_OUT(chan, &chan->regs->mr, mode, 32); 108 } 109 110 - static void dma_halt(struct fsldma_chan *chan) 111 { 112 - u32 mode; 113 - int i; 114 115 - mode = DMA_IN(chan, &chan->regs->mr, 32); 116 - mode |= FSL_DMA_MR_CA; 117 - DMA_OUT(chan, &chan->regs->mr, mode, 32); 118 - 119 - mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA); 120 - DMA_OUT(chan, &chan->regs->mr, mode, 32); 121 - 122 - for (i = 0; i < 100; i++) { 123 - if (dma_is_idle(chan)) 124 - return; 125 - 126 - udelay(10); 127 - } 128 - 129 - if (!dma_is_idle(chan)) 130 - dev_err(chan->dev, "DMA halt timeout!\n"); 131 } 132 133 - static void set_ld_eol(struct fsldma_chan *chan, 134 - struct fsl_desc_sw *desc) 135 { 136 u64 snoop_bits; 137 ··· 148 desc->hw.next_ln_addr = CPU_TO_DMA(chan, 149 DMA_TO_CPU(chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL 150 | snoop_bits, 64); 151 } 152 153 /** ··· 375 chan->feature &= ~FSL_DMA_CHAN_START_EXT; 376 } 377 378 - static void append_ld_queue(struct fsldma_chan *chan, 379 - struct fsl_desc_sw *desc) 380 { 381 struct fsl_desc_sw *tail = to_fsl_desc(chan->ld_pending.prev); 382 ··· 416 cookie = chan->common.cookie; 417 list_for_each_entry(child, &desc->tx_list, node) { 418 cookie++; 419 - if (cookie < 0) 420 - cookie = 1; 421 422 child->async_tx.cookie = cookie; 423 } ··· 438 * 439 * Return - The descriptor allocated. NULL for failed. 440 */ 441 - static struct fsl_desc_sw *fsl_dma_alloc_descriptor( 442 - struct fsldma_chan *chan) 443 { 444 struct fsl_desc_sw *desc; 445 dma_addr_t pdesc; 446 447 desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc); 448 if (!desc) { 449 - dev_dbg(chan->dev, "out of memory for link desc\n"); 450 return NULL; 451 } 452 ··· 455 desc->async_tx.tx_submit = fsl_dma_tx_submit; 456 desc->async_tx.phys = pdesc; 457 458 return desc; 459 } 460 - 461 462 /** 463 * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel. ··· 482 * We need the descriptor to be aligned to 32bytes 483 * for meeting FSL DMA specification requirement. 484 */ 485 - chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool", 486 - chan->dev, 487 sizeof(struct fsl_desc_sw), 488 __alignof__(struct fsl_desc_sw), 0); 489 if (!chan->desc_pool) { 490 - dev_err(chan->dev, "unable to allocate channel %d " 491 - "descriptor pool\n", chan->id); 492 return -ENOMEM; 493 } 494 ··· 508 509 list_for_each_entry_safe(desc, _desc, list, node) { 510 list_del(&desc->node); 511 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); 512 } 513 } ··· 522 523 list_for_each_entry_safe_reverse(desc, _desc, list, node) { 524 list_del(&desc->node); 525 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); 526 } 527 } ··· 538 struct fsldma_chan *chan = to_fsl_chan(dchan); 539 unsigned long flags; 540 541 - dev_dbg(chan->dev, "Free all channel resources.\n"); 542 spin_lock_irqsave(&chan->desc_lock, flags); 543 fsldma_free_desc_list(chan, &chan->ld_pending); 544 fsldma_free_desc_list(chan, &chan->ld_running); ··· 561 562 new = fsl_dma_alloc_descriptor(chan); 563 if (!new) { 564 - dev_err(chan->dev, msg_ld_oom); 565 return NULL; 566 } 567 ··· 571 /* Insert the link descriptor to the LD ring */ 572 list_add_tail(&new->node, &new->tx_list); 573 574 - /* Set End-of-link to the last link descriptor of new list*/ 575 set_ld_eol(chan, new); 576 577 return &new->async_tx; 578 } 579 580 - static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( 581 - struct dma_chan *dchan, dma_addr_t dma_dst, dma_addr_t dma_src, 582 size_t len, unsigned long flags) 583 { 584 struct fsldma_chan *chan; ··· 599 /* Allocate the link descriptor from DMA pool */ 600 new = fsl_dma_alloc_descriptor(chan); 601 if (!new) { 602 - dev_err(chan->dev, msg_ld_oom); 603 goto fail; 604 } 605 - #ifdef FSL_DMA_LD_DEBUG 606 - dev_dbg(chan->dev, "new link desc alloc %p\n", new); 607 - #endif 608 609 copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT); 610 ··· 629 new->async_tx.flags = flags; /* client is in control of this ack */ 630 new->async_tx.cookie = -EBUSY; 631 632 - /* Set End-of-link to the last link descriptor of new list*/ 633 set_ld_eol(chan, new); 634 635 return &first->async_tx; ··· 684 /* allocate and populate the descriptor */ 685 new = fsl_dma_alloc_descriptor(chan); 686 if (!new) { 687 - dev_err(chan->dev, msg_ld_oom); 688 goto fail; 689 } 690 - #ifdef FSL_DMA_LD_DEBUG 691 - dev_dbg(chan->dev, "new link desc alloc %p\n", new); 692 - #endif 693 694 set_desc_cnt(chan, &new->hw, len); 695 set_desc_src(chan, &new->hw, src); ··· 798 799 switch (cmd) { 800 case DMA_TERMINATE_ALL: 801 /* Halt the DMA engine */ 802 dma_halt(chan); 803 - 804 - spin_lock_irqsave(&chan->desc_lock, flags); 805 806 /* Remove and free all of the descriptors in the LD queue */ 807 fsldma_free_desc_list(chan, &chan->ld_pending); 808 fsldma_free_desc_list(chan, &chan->ld_running); 809 810 spin_unlock_irqrestore(&chan->desc_lock, flags); 811 return 0; ··· 844 } 845 846 /** 847 - * fsl_dma_update_completed_cookie - Update the completed cookie. 848 - * @chan : Freescale DMA channel 849 - * 850 - * CONTEXT: hardirq 851 - */ 852 - static void fsl_dma_update_completed_cookie(struct fsldma_chan *chan) 853 - { 854 - struct fsl_desc_sw *desc; 855 - unsigned long flags; 856 - dma_cookie_t cookie; 857 - 858 - spin_lock_irqsave(&chan->desc_lock, flags); 859 - 860 - if (list_empty(&chan->ld_running)) { 861 - dev_dbg(chan->dev, "no running descriptors\n"); 862 - goto out_unlock; 863 - } 864 - 865 - /* Get the last descriptor, update the cookie to that */ 866 - desc = to_fsl_desc(chan->ld_running.prev); 867 - if (dma_is_idle(chan)) 868 - cookie = desc->async_tx.cookie; 869 - else { 870 - cookie = desc->async_tx.cookie - 1; 871 - if (unlikely(cookie < DMA_MIN_COOKIE)) 872 - cookie = DMA_MAX_COOKIE; 873 - } 874 - 875 - chan->completed_cookie = cookie; 876 - 877 - out_unlock: 878 - spin_unlock_irqrestore(&chan->desc_lock, flags); 879 - } 880 - 881 - /** 882 - * fsldma_desc_status - Check the status of a descriptor 883 * @chan: Freescale DMA channel 884 - * @desc: DMA SW descriptor 885 * 886 - * This function will return the status of the given descriptor 887 */ 888 - static enum dma_status fsldma_desc_status(struct fsldma_chan *chan, 889 - struct fsl_desc_sw *desc) 890 { 891 - return dma_async_is_complete(desc->async_tx.cookie, 892 - chan->completed_cookie, 893 - chan->common.cookie); 894 - } 895 896 - /** 897 - * fsl_chan_ld_cleanup - Clean up link descriptors 898 - * @chan : Freescale DMA channel 899 - * 900 - * This function clean up the ld_queue of DMA channel. 901 - */ 902 - static void fsl_chan_ld_cleanup(struct fsldma_chan *chan) 903 - { 904 - struct fsl_desc_sw *desc, *_desc; 905 - unsigned long flags; 906 - 907 - spin_lock_irqsave(&chan->desc_lock, flags); 908 - 909 - dev_dbg(chan->dev, "chan completed_cookie = %d\n", chan->completed_cookie); 910 - list_for_each_entry_safe(desc, _desc, &chan->ld_running, node) { 911 - dma_async_tx_callback callback; 912 - void *callback_param; 913 - 914 - if (fsldma_desc_status(chan, desc) == DMA_IN_PROGRESS) 915 - break; 916 - 917 - /* Remove from the list of running transactions */ 918 - list_del(&desc->node); 919 - 920 - /* Run the link descriptor callback function */ 921 - callback = desc->async_tx.callback; 922 - callback_param = desc->async_tx.callback_param; 923 - if (callback) { 924 - spin_unlock_irqrestore(&chan->desc_lock, flags); 925 - dev_dbg(chan->dev, "LD %p callback\n", desc); 926 - callback(callback_param); 927 - spin_lock_irqsave(&chan->desc_lock, flags); 928 - } 929 - 930 - /* Run any dependencies, then free the descriptor */ 931 - dma_run_dependencies(&desc->async_tx); 932 - dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); 933 } 934 935 - spin_unlock_irqrestore(&chan->desc_lock, flags); 936 } 937 938 /** 939 * fsl_chan_xfer_ld_queue - transfer any pending transactions 940 * @chan : Freescale DMA channel 941 * 942 - * This will make sure that any pending transactions will be run. 943 - * If the DMA controller is idle, it will be started. Otherwise, 944 - * the DMA controller's interrupt handler will start any pending 945 - * transactions when it becomes idle. 946 */ 947 static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan) 948 { 949 struct fsl_desc_sw *desc; 950 - unsigned long flags; 951 - 952 - spin_lock_irqsave(&chan->desc_lock, flags); 953 954 /* 955 * If the list of pending descriptors is empty, then we 956 * don't need to do any work at all 957 */ 958 if (list_empty(&chan->ld_pending)) { 959 - dev_dbg(chan->dev, "no pending LDs\n"); 960 - goto out_unlock; 961 } 962 963 /* 964 - * The DMA controller is not idle, which means the interrupt 965 - * handler will start any queued transactions when it runs 966 - * at the end of the current transaction 967 */ 968 - if (!dma_is_idle(chan)) { 969 - dev_dbg(chan->dev, "DMA controller still busy\n"); 970 - goto out_unlock; 971 } 972 - 973 - /* 974 - * TODO: 975 - * make sure the dma_halt() function really un-wedges the 976 - * controller as much as possible 977 - */ 978 - dma_halt(chan); 979 980 /* 981 * If there are some link descriptors which have not been ··· 933 * Move all elements from the queue of pending transactions 934 * onto the list of running transactions 935 */ 936 desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node); 937 list_splice_tail_init(&chan->ld_pending, &chan->ld_running); 938 939 /* 940 * Program the descriptor's address into the DMA controller, 941 * then start the DMA transaction 942 */ 943 set_cdar(chan, desc->async_tx.phys); 944 - dma_start(chan); 945 946 - out_unlock: 947 - spin_unlock_irqrestore(&chan->desc_lock, flags); 948 } 949 950 /** ··· 968 static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan) 969 { 970 struct fsldma_chan *chan = to_fsl_chan(dchan); 971 fsl_chan_xfer_ld_queue(chan); 972 } 973 974 /** ··· 984 struct dma_tx_state *txstate) 985 { 986 struct fsldma_chan *chan = to_fsl_chan(dchan); 987 - dma_cookie_t last_used; 988 dma_cookie_t last_complete; 989 990 - fsl_chan_ld_cleanup(chan); 991 992 - last_used = dchan->cookie; 993 last_complete = chan->completed_cookie; 994 995 dma_set_tx_state(txstate, last_complete, last_used, 0); 996 - 997 return dma_async_is_complete(cookie, last_complete, last_used); 998 } 999 ··· 1006 static irqreturn_t fsldma_chan_irq(int irq, void *data) 1007 { 1008 struct fsldma_chan *chan = data; 1009 - int update_cookie = 0; 1010 - int xfer_ld_q = 0; 1011 u32 stat; 1012 1013 /* save and clear the status register */ 1014 stat = get_sr(chan); 1015 set_sr(chan, stat); 1016 - dev_dbg(chan->dev, "irq: channel %d, stat = 0x%x\n", chan->id, stat); 1017 1018 stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH); 1019 if (!stat) 1020 return IRQ_NONE; 1021 1022 if (stat & FSL_DMA_SR_TE) 1023 - dev_err(chan->dev, "Transfer Error!\n"); 1024 1025 /* 1026 * Programming Error ··· 1027 * triger a PE interrupt. 1028 */ 1029 if (stat & FSL_DMA_SR_PE) { 1030 - dev_dbg(chan->dev, "irq: Programming Error INT\n"); 1031 - if (get_bcr(chan) == 0) { 1032 - /* BCR register is 0, this is a DMA_INTERRUPT async_tx. 1033 - * Now, update the completed cookie, and continue the 1034 - * next uncompleted transfer. 1035 - */ 1036 - update_cookie = 1; 1037 - xfer_ld_q = 1; 1038 - } 1039 stat &= ~FSL_DMA_SR_PE; 1040 - } 1041 - 1042 - /* 1043 - * If the link descriptor segment transfer finishes, 1044 - * we will recycle the used descriptor. 1045 - */ 1046 - if (stat & FSL_DMA_SR_EOSI) { 1047 - dev_dbg(chan->dev, "irq: End-of-segments INT\n"); 1048 - dev_dbg(chan->dev, "irq: clndar 0x%llx, nlndar 0x%llx\n", 1049 - (unsigned long long)get_cdar(chan), 1050 - (unsigned long long)get_ndar(chan)); 1051 - stat &= ~FSL_DMA_SR_EOSI; 1052 - update_cookie = 1; 1053 } 1054 1055 /* ··· 1038 * and start the next transfer if it exist. 1039 */ 1040 if (stat & FSL_DMA_SR_EOCDI) { 1041 - dev_dbg(chan->dev, "irq: End-of-Chain link INT\n"); 1042 stat &= ~FSL_DMA_SR_EOCDI; 1043 - update_cookie = 1; 1044 - xfer_ld_q = 1; 1045 } 1046 1047 /* ··· 1048 * prepare next transfer. 1049 */ 1050 if (stat & FSL_DMA_SR_EOLNI) { 1051 - dev_dbg(chan->dev, "irq: End-of-link INT\n"); 1052 stat &= ~FSL_DMA_SR_EOLNI; 1053 - xfer_ld_q = 1; 1054 } 1055 1056 - if (update_cookie) 1057 - fsl_dma_update_completed_cookie(chan); 1058 - if (xfer_ld_q) 1059 - fsl_chan_xfer_ld_queue(chan); 1060 - if (stat) 1061 - dev_dbg(chan->dev, "irq: unhandled sr 0x%02x\n", stat); 1062 1063 - dev_dbg(chan->dev, "irq: Exit\n"); 1064 tasklet_schedule(&chan->tasklet); 1065 return IRQ_HANDLED; 1066 } 1067 1068 static void dma_do_tasklet(unsigned long data) 1069 { 1070 struct fsldma_chan *chan = (struct fsldma_chan *)data; 1071 - fsl_chan_ld_cleanup(chan); 1072 } 1073 1074 static irqreturn_t fsldma_ctrl_irq(int irq, void *data) ··· 1168 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { 1169 chan = fdev->chan[i]; 1170 if (chan && chan->irq != NO_IRQ) { 1171 - dev_dbg(fdev->dev, "free channel %d IRQ\n", chan->id); 1172 free_irq(chan->irq, chan); 1173 } 1174 } ··· 1195 continue; 1196 1197 if (chan->irq == NO_IRQ) { 1198 - dev_err(fdev->dev, "no interrupts property defined for " 1199 - "DMA channel %d. Please fix your " 1200 - "device tree\n", chan->id); 1201 ret = -ENODEV; 1202 goto out_unwind; 1203 } 1204 1205 - dev_dbg(fdev->dev, "request channel %d IRQ\n", chan->id); 1206 ret = request_irq(chan->irq, fsldma_chan_irq, IRQF_SHARED, 1207 "fsldma-chan", chan); 1208 if (ret) { 1209 - dev_err(fdev->dev, "unable to request IRQ for DMA " 1210 - "channel %d\n", chan->id); 1211 goto out_unwind; 1212 } 1213 } ··· 1279 1280 fdev->chan[chan->id] = chan; 1281 tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan); 1282 1283 /* Initialize the channel */ 1284 dma_init(chan); ··· 1300 spin_lock_init(&chan->desc_lock); 1301 INIT_LIST_HEAD(&chan->ld_pending); 1302 INIT_LIST_HEAD(&chan->ld_running); 1303 1304 chan->common.device = &fdev->common; 1305
··· 37 38 #include "fsldma.h" 39 40 + #define chan_dbg(chan, fmt, arg...) \ 41 + dev_dbg(chan->dev, "%s: " fmt, chan->name, ##arg) 42 + #define chan_err(chan, fmt, arg...) \ 43 + dev_err(chan->dev, "%s: " fmt, chan->name, ##arg) 44 45 + static const char msg_ld_oom[] = "No free memory for link descriptor"; 46 47 + /* 48 + * Register Helpers 49 + */ 50 51 static void set_sr(struct fsldma_chan *chan, u32 val) 52 { ··· 75 static u32 get_sr(struct fsldma_chan *chan) 76 { 77 return DMA_IN(chan, &chan->regs->sr, 32); 78 } 79 80 static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr) ··· 123 return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN; 124 } 125 126 static u32 get_bcr(struct fsldma_chan *chan) 127 { 128 return DMA_IN(chan, &chan->regs->bcr, 32); 129 } 130 131 + /* 132 + * Descriptor Helpers 133 + */ 134 + 135 + static void set_desc_cnt(struct fsldma_chan *chan, 136 + struct fsl_dma_ld_hw *hw, u32 count) 137 { 138 + hw->count = CPU_TO_DMA(chan, count, 32); 139 } 140 141 + static u32 get_desc_cnt(struct fsldma_chan *chan, struct fsl_desc_sw *desc) 142 { 143 + return DMA_TO_CPU(chan, desc->hw.count, 32); 144 } 145 146 + static void set_desc_src(struct fsldma_chan *chan, 147 + struct fsl_dma_ld_hw *hw, dma_addr_t src) 148 { 149 + u64 snoop_bits; 150 151 + snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) 152 + ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0; 153 + hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64); 154 } 155 156 + static dma_addr_t get_desc_src(struct fsldma_chan *chan, 157 + struct fsl_desc_sw *desc) 158 + { 159 + u64 snoop_bits; 160 + 161 + snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) 162 + ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0; 163 + return DMA_TO_CPU(chan, desc->hw.src_addr, 64) & ~snoop_bits; 164 + } 165 + 166 + static void set_desc_dst(struct fsldma_chan *chan, 167 + struct fsl_dma_ld_hw *hw, dma_addr_t dst) 168 + { 169 + u64 snoop_bits; 170 + 171 + snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) 172 + ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0; 173 + hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64); 174 + } 175 + 176 + static dma_addr_t get_desc_dst(struct fsldma_chan *chan, 177 + struct fsl_desc_sw *desc) 178 + { 179 + u64 snoop_bits; 180 + 181 + snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) 182 + ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0; 183 + return DMA_TO_CPU(chan, desc->hw.dst_addr, 64) & ~snoop_bits; 184 + } 185 + 186 + static void set_desc_next(struct fsldma_chan *chan, 187 + struct fsl_dma_ld_hw *hw, dma_addr_t next) 188 + { 189 + u64 snoop_bits; 190 + 191 + snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) 192 + ? FSL_DMA_SNEN : 0; 193 + hw->next_ln_addr = CPU_TO_DMA(chan, snoop_bits | next, 64); 194 + } 195 + 196 + static void set_ld_eol(struct fsldma_chan *chan, struct fsl_desc_sw *desc) 197 { 198 u64 snoop_bits; 199 ··· 196 desc->hw.next_ln_addr = CPU_TO_DMA(chan, 197 DMA_TO_CPU(chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL 198 | snoop_bits, 64); 199 + } 200 + 201 + /* 202 + * DMA Engine Hardware Control Helpers 203 + */ 204 + 205 + static void dma_init(struct fsldma_chan *chan) 206 + { 207 + /* Reset the channel */ 208 + DMA_OUT(chan, &chan->regs->mr, 0, 32); 209 + 210 + switch (chan->feature & FSL_DMA_IP_MASK) { 211 + case FSL_DMA_IP_85XX: 212 + /* Set the channel to below modes: 213 + * EIE - Error interrupt enable 214 + * EOLNIE - End of links interrupt enable 215 + * BWC - Bandwidth sharing among channels 216 + */ 217 + DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_BWC 218 + | FSL_DMA_MR_EIE | FSL_DMA_MR_EOLNIE, 32); 219 + break; 220 + case FSL_DMA_IP_83XX: 221 + /* Set the channel to below modes: 222 + * EOTIE - End-of-transfer interrupt enable 223 + * PRC_RM - PCI read multiple 224 + */ 225 + DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EOTIE 226 + | FSL_DMA_MR_PRC_RM, 32); 227 + break; 228 + } 229 + } 230 + 231 + static int dma_is_idle(struct fsldma_chan *chan) 232 + { 233 + u32 sr = get_sr(chan); 234 + return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH); 235 + } 236 + 237 + /* 238 + * Start the DMA controller 239 + * 240 + * Preconditions: 241 + * - the CDAR register must point to the start descriptor 242 + * - the MRn[CS] bit must be cleared 243 + */ 244 + static void dma_start(struct fsldma_chan *chan) 245 + { 246 + u32 mode; 247 + 248 + mode = DMA_IN(chan, &chan->regs->mr, 32); 249 + 250 + if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) { 251 + DMA_OUT(chan, &chan->regs->bcr, 0, 32); 252 + mode |= FSL_DMA_MR_EMP_EN; 253 + } else { 254 + mode &= ~FSL_DMA_MR_EMP_EN; 255 + } 256 + 257 + if (chan->feature & FSL_DMA_CHAN_START_EXT) { 258 + mode |= FSL_DMA_MR_EMS_EN; 259 + } else { 260 + mode &= ~FSL_DMA_MR_EMS_EN; 261 + mode |= FSL_DMA_MR_CS; 262 + } 263 + 264 + DMA_OUT(chan, &chan->regs->mr, mode, 32); 265 + } 266 + 267 + static void dma_halt(struct fsldma_chan *chan) 268 + { 269 + u32 mode; 270 + int i; 271 + 272 + /* read the mode register */ 273 + mode = DMA_IN(chan, &chan->regs->mr, 32); 274 + 275 + /* 276 + * The 85xx controller supports channel abort, which will stop 277 + * the current transfer. On 83xx, this bit is the transfer error 278 + * mask bit, which should not be changed. 279 + */ 280 + if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) { 281 + mode |= FSL_DMA_MR_CA; 282 + DMA_OUT(chan, &chan->regs->mr, mode, 32); 283 + 284 + mode &= ~FSL_DMA_MR_CA; 285 + } 286 + 287 + /* stop the DMA controller */ 288 + mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN); 289 + DMA_OUT(chan, &chan->regs->mr, mode, 32); 290 + 291 + /* wait for the DMA controller to become idle */ 292 + for (i = 0; i < 100; i++) { 293 + if (dma_is_idle(chan)) 294 + return; 295 + 296 + udelay(10); 297 + } 298 + 299 + if (!dma_is_idle(chan)) 300 + chan_err(chan, "DMA halt timeout!\n"); 301 } 302 303 /** ··· 321 chan->feature &= ~FSL_DMA_CHAN_START_EXT; 322 } 323 324 + static void append_ld_queue(struct fsldma_chan *chan, struct fsl_desc_sw *desc) 325 { 326 struct fsl_desc_sw *tail = to_fsl_desc(chan->ld_pending.prev); 327 ··· 363 cookie = chan->common.cookie; 364 list_for_each_entry(child, &desc->tx_list, node) { 365 cookie++; 366 + if (cookie < DMA_MIN_COOKIE) 367 + cookie = DMA_MIN_COOKIE; 368 369 child->async_tx.cookie = cookie; 370 } ··· 385 * 386 * Return - The descriptor allocated. NULL for failed. 387 */ 388 + static struct fsl_desc_sw *fsl_dma_alloc_descriptor(struct fsldma_chan *chan) 389 { 390 struct fsl_desc_sw *desc; 391 dma_addr_t pdesc; 392 393 desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc); 394 if (!desc) { 395 + chan_dbg(chan, "out of memory for link descriptor\n"); 396 return NULL; 397 } 398 ··· 403 desc->async_tx.tx_submit = fsl_dma_tx_submit; 404 desc->async_tx.phys = pdesc; 405 406 + #ifdef FSL_DMA_LD_DEBUG 407 + chan_dbg(chan, "LD %p allocated\n", desc); 408 + #endif 409 + 410 return desc; 411 } 412 413 /** 414 * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel. ··· 427 * We need the descriptor to be aligned to 32bytes 428 * for meeting FSL DMA specification requirement. 429 */ 430 + chan->desc_pool = dma_pool_create(chan->name, chan->dev, 431 sizeof(struct fsl_desc_sw), 432 __alignof__(struct fsl_desc_sw), 0); 433 if (!chan->desc_pool) { 434 + chan_err(chan, "unable to allocate descriptor pool\n"); 435 return -ENOMEM; 436 } 437 ··· 455 456 list_for_each_entry_safe(desc, _desc, list, node) { 457 list_del(&desc->node); 458 + #ifdef FSL_DMA_LD_DEBUG 459 + chan_dbg(chan, "LD %p free\n", desc); 460 + #endif 461 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); 462 } 463 } ··· 466 467 list_for_each_entry_safe_reverse(desc, _desc, list, node) { 468 list_del(&desc->node); 469 + #ifdef FSL_DMA_LD_DEBUG 470 + chan_dbg(chan, "LD %p free\n", desc); 471 + #endif 472 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys); 473 } 474 } ··· 479 struct fsldma_chan *chan = to_fsl_chan(dchan); 480 unsigned long flags; 481 482 + chan_dbg(chan, "free all channel resources\n"); 483 spin_lock_irqsave(&chan->desc_lock, flags); 484 fsldma_free_desc_list(chan, &chan->ld_pending); 485 fsldma_free_desc_list(chan, &chan->ld_running); ··· 502 503 new = fsl_dma_alloc_descriptor(chan); 504 if (!new) { 505 + chan_err(chan, "%s\n", msg_ld_oom); 506 return NULL; 507 } 508 ··· 512 /* Insert the link descriptor to the LD ring */ 513 list_add_tail(&new->node, &new->tx_list); 514 515 + /* Set End-of-link to the last link descriptor of new list */ 516 set_ld_eol(chan, new); 517 518 return &new->async_tx; 519 } 520 521 + static struct dma_async_tx_descriptor * 522 + fsl_dma_prep_memcpy(struct dma_chan *dchan, 523 + dma_addr_t dma_dst, dma_addr_t dma_src, 524 size_t len, unsigned long flags) 525 { 526 struct fsldma_chan *chan; ··· 539 /* Allocate the link descriptor from DMA pool */ 540 new = fsl_dma_alloc_descriptor(chan); 541 if (!new) { 542 + chan_err(chan, "%s\n", msg_ld_oom); 543 goto fail; 544 } 545 546 copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT); 547 ··· 572 new->async_tx.flags = flags; /* client is in control of this ack */ 573 new->async_tx.cookie = -EBUSY; 574 575 + /* Set End-of-link to the last link descriptor of new list */ 576 set_ld_eol(chan, new); 577 578 return &first->async_tx; ··· 627 /* allocate and populate the descriptor */ 628 new = fsl_dma_alloc_descriptor(chan); 629 if (!new) { 630 + chan_err(chan, "%s\n", msg_ld_oom); 631 goto fail; 632 } 633 634 set_desc_cnt(chan, &new->hw, len); 635 set_desc_src(chan, &new->hw, src); ··· 744 745 switch (cmd) { 746 case DMA_TERMINATE_ALL: 747 + spin_lock_irqsave(&chan->desc_lock, flags); 748 + 749 /* Halt the DMA engine */ 750 dma_halt(chan); 751 752 /* Remove and free all of the descriptors in the LD queue */ 753 fsldma_free_desc_list(chan, &chan->ld_pending); 754 fsldma_free_desc_list(chan, &chan->ld_running); 755 + chan->idle = true; 756 757 spin_unlock_irqrestore(&chan->desc_lock, flags); 758 return 0; ··· 789 } 790 791 /** 792 + * fsldma_cleanup_descriptor - cleanup and free a single link descriptor 793 * @chan: Freescale DMA channel 794 + * @desc: descriptor to cleanup and free 795 * 796 + * This function is used on a descriptor which has been executed by the DMA 797 + * controller. It will run any callbacks, submit any dependencies, and then 798 + * free the descriptor. 799 */ 800 + static void fsldma_cleanup_descriptor(struct fsldma_chan *chan, 801 + struct fsl_desc_sw *desc) 802 { 803 + struct dma_async_tx_descriptor *txd = &desc->async_tx; 804 + struct device *dev = chan->common.device->dev; 805 + dma_addr_t src = get_desc_src(chan, desc); 806 + dma_addr_t dst = get_desc_dst(chan, desc); 807 + u32 len = get_desc_cnt(chan, desc); 808 809 + /* Run the link descriptor callback function */ 810 + if (txd->callback) { 811 + #ifdef FSL_DMA_LD_DEBUG 812 + chan_dbg(chan, "LD %p callback\n", desc); 813 + #endif 814 + txd->callback(txd->callback_param); 815 } 816 817 + /* Run any dependencies */ 818 + dma_run_dependencies(txd); 819 + 820 + /* Unmap the dst buffer, if requested */ 821 + if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) { 822 + if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE) 823 + dma_unmap_single(dev, dst, len, DMA_FROM_DEVICE); 824 + else 825 + dma_unmap_page(dev, dst, len, DMA_FROM_DEVICE); 826 + } 827 + 828 + /* Unmap the src buffer, if requested */ 829 + if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) { 830 + if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE) 831 + dma_unmap_single(dev, src, len, DMA_TO_DEVICE); 832 + else 833 + dma_unmap_page(dev, src, len, DMA_TO_DEVICE); 834 + } 835 + 836 + #ifdef FSL_DMA_LD_DEBUG 837 + chan_dbg(chan, "LD %p free\n", desc); 838 + #endif 839 + dma_pool_free(chan->desc_pool, desc, txd->phys); 840 } 841 842 /** 843 * fsl_chan_xfer_ld_queue - transfer any pending transactions 844 * @chan : Freescale DMA channel 845 * 846 + * HARDWARE STATE: idle 847 + * LOCKING: must hold chan->desc_lock 848 */ 849 static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan) 850 { 851 struct fsl_desc_sw *desc; 852 853 /* 854 * If the list of pending descriptors is empty, then we 855 * don't need to do any work at all 856 */ 857 if (list_empty(&chan->ld_pending)) { 858 + chan_dbg(chan, "no pending LDs\n"); 859 + return; 860 } 861 862 /* 863 + * The DMA controller is not idle, which means that the interrupt 864 + * handler will start any queued transactions when it runs after 865 + * this transaction finishes 866 */ 867 + if (!chan->idle) { 868 + chan_dbg(chan, "DMA controller still busy\n"); 869 + return; 870 } 871 872 /* 873 * If there are some link descriptors which have not been ··· 931 * Move all elements from the queue of pending transactions 932 * onto the list of running transactions 933 */ 934 + chan_dbg(chan, "idle, starting controller\n"); 935 desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node); 936 list_splice_tail_init(&chan->ld_pending, &chan->ld_running); 937 + 938 + /* 939 + * The 85xx DMA controller doesn't clear the channel start bit 940 + * automatically at the end of a transfer. Therefore we must clear 941 + * it in software before starting the transfer. 942 + */ 943 + if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) { 944 + u32 mode; 945 + 946 + mode = DMA_IN(chan, &chan->regs->mr, 32); 947 + mode &= ~FSL_DMA_MR_CS; 948 + DMA_OUT(chan, &chan->regs->mr, mode, 32); 949 + } 950 951 /* 952 * Program the descriptor's address into the DMA controller, 953 * then start the DMA transaction 954 */ 955 set_cdar(chan, desc->async_tx.phys); 956 + get_cdar(chan); 957 958 + dma_start(chan); 959 + chan->idle = false; 960 } 961 962 /** ··· 952 static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan) 953 { 954 struct fsldma_chan *chan = to_fsl_chan(dchan); 955 + unsigned long flags; 956 + 957 + spin_lock_irqsave(&chan->desc_lock, flags); 958 fsl_chan_xfer_ld_queue(chan); 959 + spin_unlock_irqrestore(&chan->desc_lock, flags); 960 } 961 962 /** ··· 964 struct dma_tx_state *txstate) 965 { 966 struct fsldma_chan *chan = to_fsl_chan(dchan); 967 dma_cookie_t last_complete; 968 + dma_cookie_t last_used; 969 + unsigned long flags; 970 971 + spin_lock_irqsave(&chan->desc_lock, flags); 972 973 last_complete = chan->completed_cookie; 974 + last_used = dchan->cookie; 975 + 976 + spin_unlock_irqrestore(&chan->desc_lock, flags); 977 978 dma_set_tx_state(txstate, last_complete, last_used, 0); 979 return dma_async_is_complete(cookie, last_complete, last_used); 980 } 981 ··· 984 static irqreturn_t fsldma_chan_irq(int irq, void *data) 985 { 986 struct fsldma_chan *chan = data; 987 u32 stat; 988 989 /* save and clear the status register */ 990 stat = get_sr(chan); 991 set_sr(chan, stat); 992 + chan_dbg(chan, "irq: stat = 0x%x\n", stat); 993 994 + /* check that this was really our device */ 995 stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH); 996 if (!stat) 997 return IRQ_NONE; 998 999 if (stat & FSL_DMA_SR_TE) 1000 + chan_err(chan, "Transfer Error!\n"); 1001 1002 /* 1003 * Programming Error ··· 1006 * triger a PE interrupt. 1007 */ 1008 if (stat & FSL_DMA_SR_PE) { 1009 + chan_dbg(chan, "irq: Programming Error INT\n"); 1010 stat &= ~FSL_DMA_SR_PE; 1011 + if (get_bcr(chan) != 0) 1012 + chan_err(chan, "Programming Error!\n"); 1013 } 1014 1015 /* ··· 1036 * and start the next transfer if it exist. 1037 */ 1038 if (stat & FSL_DMA_SR_EOCDI) { 1039 + chan_dbg(chan, "irq: End-of-Chain link INT\n"); 1040 stat &= ~FSL_DMA_SR_EOCDI; 1041 } 1042 1043 /* ··· 1048 * prepare next transfer. 1049 */ 1050 if (stat & FSL_DMA_SR_EOLNI) { 1051 + chan_dbg(chan, "irq: End-of-link INT\n"); 1052 stat &= ~FSL_DMA_SR_EOLNI; 1053 } 1054 1055 + /* check that the DMA controller is really idle */ 1056 + if (!dma_is_idle(chan)) 1057 + chan_err(chan, "irq: controller not idle!\n"); 1058 1059 + /* check that we handled all of the bits */ 1060 + if (stat) 1061 + chan_err(chan, "irq: unhandled sr 0x%08x\n", stat); 1062 + 1063 + /* 1064 + * Schedule the tasklet to handle all cleanup of the current 1065 + * transaction. It will start a new transaction if there is 1066 + * one pending. 1067 + */ 1068 tasklet_schedule(&chan->tasklet); 1069 + chan_dbg(chan, "irq: Exit\n"); 1070 return IRQ_HANDLED; 1071 } 1072 1073 static void dma_do_tasklet(unsigned long data) 1074 { 1075 struct fsldma_chan *chan = (struct fsldma_chan *)data; 1076 + struct fsl_desc_sw *desc, *_desc; 1077 + LIST_HEAD(ld_cleanup); 1078 + unsigned long flags; 1079 + 1080 + chan_dbg(chan, "tasklet entry\n"); 1081 + 1082 + spin_lock_irqsave(&chan->desc_lock, flags); 1083 + 1084 + /* update the cookie if we have some descriptors to cleanup */ 1085 + if (!list_empty(&chan->ld_running)) { 1086 + dma_cookie_t cookie; 1087 + 1088 + desc = to_fsl_desc(chan->ld_running.prev); 1089 + cookie = desc->async_tx.cookie; 1090 + 1091 + chan->completed_cookie = cookie; 1092 + chan_dbg(chan, "completed_cookie=%d\n", cookie); 1093 + } 1094 + 1095 + /* 1096 + * move the descriptors to a temporary list so we can drop the lock 1097 + * during the entire cleanup operation 1098 + */ 1099 + list_splice_tail_init(&chan->ld_running, &ld_cleanup); 1100 + 1101 + /* the hardware is now idle and ready for more */ 1102 + chan->idle = true; 1103 + 1104 + /* 1105 + * Start any pending transactions automatically 1106 + * 1107 + * In the ideal case, we keep the DMA controller busy while we go 1108 + * ahead and free the descriptors below. 1109 + */ 1110 + fsl_chan_xfer_ld_queue(chan); 1111 + spin_unlock_irqrestore(&chan->desc_lock, flags); 1112 + 1113 + /* Run the callback for each descriptor, in order */ 1114 + list_for_each_entry_safe(desc, _desc, &ld_cleanup, node) { 1115 + 1116 + /* Remove from the list of transactions */ 1117 + list_del(&desc->node); 1118 + 1119 + /* Run all cleanup for this descriptor */ 1120 + fsldma_cleanup_descriptor(chan, desc); 1121 + } 1122 + 1123 + chan_dbg(chan, "tasklet exit\n"); 1124 } 1125 1126 static irqreturn_t fsldma_ctrl_irq(int irq, void *data) ··· 1116 for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) { 1117 chan = fdev->chan[i]; 1118 if (chan && chan->irq != NO_IRQ) { 1119 + chan_dbg(chan, "free per-channel IRQ\n"); 1120 free_irq(chan->irq, chan); 1121 } 1122 } ··· 1143 continue; 1144 1145 if (chan->irq == NO_IRQ) { 1146 + chan_err(chan, "interrupts property missing in device tree\n"); 1147 ret = -ENODEV; 1148 goto out_unwind; 1149 } 1150 1151 + chan_dbg(chan, "request per-channel IRQ\n"); 1152 ret = request_irq(chan->irq, fsldma_chan_irq, IRQF_SHARED, 1153 "fsldma-chan", chan); 1154 if (ret) { 1155 + chan_err(chan, "unable to request per-channel IRQ\n"); 1156 goto out_unwind; 1157 } 1158 } ··· 1230 1231 fdev->chan[chan->id] = chan; 1232 tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan); 1233 + snprintf(chan->name, sizeof(chan->name), "chan%d", chan->id); 1234 1235 /* Initialize the channel */ 1236 dma_init(chan); ··· 1250 spin_lock_init(&chan->desc_lock); 1251 INIT_LIST_HEAD(&chan->ld_pending); 1252 INIT_LIST_HEAD(&chan->ld_running); 1253 + chan->idle = true; 1254 1255 chan->common.device = &fdev->common; 1256
+4 -2
drivers/dma/fsldma.h
··· 102 } __attribute__((aligned(32))); 103 104 struct fsldma_chan_regs { 105 - u32 mr; /* 0x00 - Mode Register */ 106 - u32 sr; /* 0x04 - Status Register */ 107 u64 cdar; /* 0x08 - Current descriptor address register */ 108 u64 sar; /* 0x10 - Source Address Register */ 109 u64 dar; /* 0x18 - Destination Address Register */ ··· 135 #define FSL_DMA_CHAN_START_EXT 0x00002000 136 137 struct fsldma_chan { 138 struct fsldma_chan_regs __iomem *regs; 139 dma_cookie_t completed_cookie; /* The maximum cookie completed */ 140 spinlock_t desc_lock; /* Descriptor operation lock */ ··· 148 int id; /* Raw id of this channel */ 149 struct tasklet_struct tasklet; 150 u32 feature; 151 152 void (*toggle_ext_pause)(struct fsldma_chan *fsl_chan, int enable); 153 void (*toggle_ext_start)(struct fsldma_chan *fsl_chan, int enable);
··· 102 } __attribute__((aligned(32))); 103 104 struct fsldma_chan_regs { 105 + u32 mr; /* 0x00 - Mode Register */ 106 + u32 sr; /* 0x04 - Status Register */ 107 u64 cdar; /* 0x08 - Current descriptor address register */ 108 u64 sar; /* 0x10 - Source Address Register */ 109 u64 dar; /* 0x18 - Destination Address Register */ ··· 135 #define FSL_DMA_CHAN_START_EXT 0x00002000 136 137 struct fsldma_chan { 138 + char name[8]; /* Channel name */ 139 struct fsldma_chan_regs __iomem *regs; 140 dma_cookie_t completed_cookie; /* The maximum cookie completed */ 141 spinlock_t desc_lock; /* Descriptor operation lock */ ··· 147 int id; /* Raw id of this channel */ 148 struct tasklet_struct tasklet; 149 u32 feature; 150 + bool idle; /* DMA controller is idle */ 151 152 void (*toggle_ext_pause)(struct fsldma_chan *fsl_chan, int enable); 153 void (*toggle_ext_start)(struct fsldma_chan *fsl_chan, int enable);
+724
drivers/dma/mxs-dma.c
···
··· 1 + /* 2 + * Copyright 2011 Freescale Semiconductor, Inc. All Rights Reserved. 3 + * 4 + * Refer to drivers/dma/imx-sdma.c 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License version 2 as 8 + * published by the Free Software Foundation. 9 + */ 10 + 11 + #include <linux/init.h> 12 + #include <linux/types.h> 13 + #include <linux/mm.h> 14 + #include <linux/interrupt.h> 15 + #include <linux/clk.h> 16 + #include <linux/wait.h> 17 + #include <linux/sched.h> 18 + #include <linux/semaphore.h> 19 + #include <linux/device.h> 20 + #include <linux/dma-mapping.h> 21 + #include <linux/slab.h> 22 + #include <linux/platform_device.h> 23 + #include <linux/dmaengine.h> 24 + #include <linux/delay.h> 25 + 26 + #include <asm/irq.h> 27 + #include <mach/mxs.h> 28 + #include <mach/dma.h> 29 + #include <mach/common.h> 30 + 31 + /* 32 + * NOTE: The term "PIO" throughout the mxs-dma implementation means 33 + * PIO mode of mxs apbh-dma and apbx-dma. With this working mode, 34 + * dma can program the controller registers of peripheral devices. 35 + */ 36 + 37 + #define MXS_DMA_APBH 0 38 + #define MXS_DMA_APBX 1 39 + #define dma_is_apbh() (mxs_dma->dev_id == MXS_DMA_APBH) 40 + 41 + #define APBH_VERSION_LATEST 3 42 + #define apbh_is_old() (mxs_dma->version < APBH_VERSION_LATEST) 43 + 44 + #define HW_APBHX_CTRL0 0x000 45 + #define BM_APBH_CTRL0_APB_BURST8_EN (1 << 29) 46 + #define BM_APBH_CTRL0_APB_BURST_EN (1 << 28) 47 + #define BP_APBH_CTRL0_CLKGATE_CHANNEL 8 48 + #define BP_APBH_CTRL0_RESET_CHANNEL 16 49 + #define HW_APBHX_CTRL1 0x010 50 + #define HW_APBHX_CTRL2 0x020 51 + #define HW_APBHX_CHANNEL_CTRL 0x030 52 + #define BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL 16 53 + #define HW_APBH_VERSION (cpu_is_mx23() ? 0x3f0 : 0x800) 54 + #define HW_APBX_VERSION 0x800 55 + #define BP_APBHX_VERSION_MAJOR 24 56 + #define HW_APBHX_CHn_NXTCMDAR(n) \ 57 + (((dma_is_apbh() && apbh_is_old()) ? 0x050 : 0x110) + (n) * 0x70) 58 + #define HW_APBHX_CHn_SEMA(n) \ 59 + (((dma_is_apbh() && apbh_is_old()) ? 0x080 : 0x140) + (n) * 0x70) 60 + 61 + /* 62 + * ccw bits definitions 63 + * 64 + * COMMAND: 0..1 (2) 65 + * CHAIN: 2 (1) 66 + * IRQ: 3 (1) 67 + * NAND_LOCK: 4 (1) - not implemented 68 + * NAND_WAIT4READY: 5 (1) - not implemented 69 + * DEC_SEM: 6 (1) 70 + * WAIT4END: 7 (1) 71 + * HALT_ON_TERMINATE: 8 (1) 72 + * TERMINATE_FLUSH: 9 (1) 73 + * RESERVED: 10..11 (2) 74 + * PIO_NUM: 12..15 (4) 75 + */ 76 + #define BP_CCW_COMMAND 0 77 + #define BM_CCW_COMMAND (3 << 0) 78 + #define CCW_CHAIN (1 << 2) 79 + #define CCW_IRQ (1 << 3) 80 + #define CCW_DEC_SEM (1 << 6) 81 + #define CCW_WAIT4END (1 << 7) 82 + #define CCW_HALT_ON_TERM (1 << 8) 83 + #define CCW_TERM_FLUSH (1 << 9) 84 + #define BP_CCW_PIO_NUM 12 85 + #define BM_CCW_PIO_NUM (0xf << 12) 86 + 87 + #define BF_CCW(value, field) (((value) << BP_CCW_##field) & BM_CCW_##field) 88 + 89 + #define MXS_DMA_CMD_NO_XFER 0 90 + #define MXS_DMA_CMD_WRITE 1 91 + #define MXS_DMA_CMD_READ 2 92 + #define MXS_DMA_CMD_DMA_SENSE 3 /* not implemented */ 93 + 94 + struct mxs_dma_ccw { 95 + u32 next; 96 + u16 bits; 97 + u16 xfer_bytes; 98 + #define MAX_XFER_BYTES 0xff00 99 + u32 bufaddr; 100 + #define MXS_PIO_WORDS 16 101 + u32 pio_words[MXS_PIO_WORDS]; 102 + }; 103 + 104 + #define NUM_CCW (int)(PAGE_SIZE / sizeof(struct mxs_dma_ccw)) 105 + 106 + struct mxs_dma_chan { 107 + struct mxs_dma_engine *mxs_dma; 108 + struct dma_chan chan; 109 + struct dma_async_tx_descriptor desc; 110 + struct tasklet_struct tasklet; 111 + int chan_irq; 112 + struct mxs_dma_ccw *ccw; 113 + dma_addr_t ccw_phys; 114 + dma_cookie_t last_completed; 115 + enum dma_status status; 116 + unsigned int flags; 117 + #define MXS_DMA_SG_LOOP (1 << 0) 118 + }; 119 + 120 + #define MXS_DMA_CHANNELS 16 121 + #define MXS_DMA_CHANNELS_MASK 0xffff 122 + 123 + struct mxs_dma_engine { 124 + int dev_id; 125 + unsigned int version; 126 + void __iomem *base; 127 + struct clk *clk; 128 + struct dma_device dma_device; 129 + struct device_dma_parameters dma_parms; 130 + struct mxs_dma_chan mxs_chans[MXS_DMA_CHANNELS]; 131 + }; 132 + 133 + static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan) 134 + { 135 + struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 136 + int chan_id = mxs_chan->chan.chan_id; 137 + 138 + if (dma_is_apbh() && apbh_is_old()) 139 + writel(1 << (chan_id + BP_APBH_CTRL0_RESET_CHANNEL), 140 + mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR); 141 + else 142 + writel(1 << (chan_id + BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL), 143 + mxs_dma->base + HW_APBHX_CHANNEL_CTRL + MXS_SET_ADDR); 144 + } 145 + 146 + static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan) 147 + { 148 + struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 149 + int chan_id = mxs_chan->chan.chan_id; 150 + 151 + /* set cmd_addr up */ 152 + writel(mxs_chan->ccw_phys, 153 + mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(chan_id)); 154 + 155 + /* enable apbh channel clock */ 156 + if (dma_is_apbh()) { 157 + if (apbh_is_old()) 158 + writel(1 << (chan_id + BP_APBH_CTRL0_CLKGATE_CHANNEL), 159 + mxs_dma->base + HW_APBHX_CTRL0 + MXS_CLR_ADDR); 160 + else 161 + writel(1 << chan_id, 162 + mxs_dma->base + HW_APBHX_CTRL0 + MXS_CLR_ADDR); 163 + } 164 + 165 + /* write 1 to SEMA to kick off the channel */ 166 + writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(chan_id)); 167 + } 168 + 169 + static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan) 170 + { 171 + struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 172 + int chan_id = mxs_chan->chan.chan_id; 173 + 174 + /* disable apbh channel clock */ 175 + if (dma_is_apbh()) { 176 + if (apbh_is_old()) 177 + writel(1 << (chan_id + BP_APBH_CTRL0_CLKGATE_CHANNEL), 178 + mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR); 179 + else 180 + writel(1 << chan_id, 181 + mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR); 182 + } 183 + 184 + mxs_chan->status = DMA_SUCCESS; 185 + } 186 + 187 + static void mxs_dma_pause_chan(struct mxs_dma_chan *mxs_chan) 188 + { 189 + struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 190 + int chan_id = mxs_chan->chan.chan_id; 191 + 192 + /* freeze the channel */ 193 + if (dma_is_apbh() && apbh_is_old()) 194 + writel(1 << chan_id, 195 + mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR); 196 + else 197 + writel(1 << chan_id, 198 + mxs_dma->base + HW_APBHX_CHANNEL_CTRL + MXS_SET_ADDR); 199 + 200 + mxs_chan->status = DMA_PAUSED; 201 + } 202 + 203 + static void mxs_dma_resume_chan(struct mxs_dma_chan *mxs_chan) 204 + { 205 + struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 206 + int chan_id = mxs_chan->chan.chan_id; 207 + 208 + /* unfreeze the channel */ 209 + if (dma_is_apbh() && apbh_is_old()) 210 + writel(1 << chan_id, 211 + mxs_dma->base + HW_APBHX_CTRL0 + MXS_CLR_ADDR); 212 + else 213 + writel(1 << chan_id, 214 + mxs_dma->base + HW_APBHX_CHANNEL_CTRL + MXS_CLR_ADDR); 215 + 216 + mxs_chan->status = DMA_IN_PROGRESS; 217 + } 218 + 219 + static dma_cookie_t mxs_dma_assign_cookie(struct mxs_dma_chan *mxs_chan) 220 + { 221 + dma_cookie_t cookie = mxs_chan->chan.cookie; 222 + 223 + if (++cookie < 0) 224 + cookie = 1; 225 + 226 + mxs_chan->chan.cookie = cookie; 227 + mxs_chan->desc.cookie = cookie; 228 + 229 + return cookie; 230 + } 231 + 232 + static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan) 233 + { 234 + return container_of(chan, struct mxs_dma_chan, chan); 235 + } 236 + 237 + static dma_cookie_t mxs_dma_tx_submit(struct dma_async_tx_descriptor *tx) 238 + { 239 + struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(tx->chan); 240 + 241 + mxs_dma_enable_chan(mxs_chan); 242 + 243 + return mxs_dma_assign_cookie(mxs_chan); 244 + } 245 + 246 + static void mxs_dma_tasklet(unsigned long data) 247 + { 248 + struct mxs_dma_chan *mxs_chan = (struct mxs_dma_chan *) data; 249 + 250 + if (mxs_chan->desc.callback) 251 + mxs_chan->desc.callback(mxs_chan->desc.callback_param); 252 + } 253 + 254 + static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id) 255 + { 256 + struct mxs_dma_engine *mxs_dma = dev_id; 257 + u32 stat1, stat2; 258 + 259 + /* completion status */ 260 + stat1 = readl(mxs_dma->base + HW_APBHX_CTRL1); 261 + stat1 &= MXS_DMA_CHANNELS_MASK; 262 + writel(stat1, mxs_dma->base + HW_APBHX_CTRL1 + MXS_CLR_ADDR); 263 + 264 + /* error status */ 265 + stat2 = readl(mxs_dma->base + HW_APBHX_CTRL2); 266 + writel(stat2, mxs_dma->base + HW_APBHX_CTRL2 + MXS_CLR_ADDR); 267 + 268 + /* 269 + * When both completion and error of termination bits set at the 270 + * same time, we do not take it as an error. IOW, it only becomes 271 + * an error we need to handler here in case of ether it's (1) an bus 272 + * error or (2) a termination error with no completion. 273 + */ 274 + stat2 = ((stat2 >> MXS_DMA_CHANNELS) & stat2) | /* (1) */ 275 + (~(stat2 >> MXS_DMA_CHANNELS) & stat2 & ~stat1); /* (2) */ 276 + 277 + /* combine error and completion status for checking */ 278 + stat1 = (stat2 << MXS_DMA_CHANNELS) | stat1; 279 + while (stat1) { 280 + int channel = fls(stat1) - 1; 281 + struct mxs_dma_chan *mxs_chan = 282 + &mxs_dma->mxs_chans[channel % MXS_DMA_CHANNELS]; 283 + 284 + if (channel >= MXS_DMA_CHANNELS) { 285 + dev_dbg(mxs_dma->dma_device.dev, 286 + "%s: error in channel %d\n", __func__, 287 + channel - MXS_DMA_CHANNELS); 288 + mxs_chan->status = DMA_ERROR; 289 + mxs_dma_reset_chan(mxs_chan); 290 + } else { 291 + if (mxs_chan->flags & MXS_DMA_SG_LOOP) 292 + mxs_chan->status = DMA_IN_PROGRESS; 293 + else 294 + mxs_chan->status = DMA_SUCCESS; 295 + } 296 + 297 + stat1 &= ~(1 << channel); 298 + 299 + if (mxs_chan->status == DMA_SUCCESS) 300 + mxs_chan->last_completed = mxs_chan->desc.cookie; 301 + 302 + /* schedule tasklet on this channel */ 303 + tasklet_schedule(&mxs_chan->tasklet); 304 + } 305 + 306 + return IRQ_HANDLED; 307 + } 308 + 309 + static int mxs_dma_alloc_chan_resources(struct dma_chan *chan) 310 + { 311 + struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); 312 + struct mxs_dma_data *data = chan->private; 313 + struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 314 + int ret; 315 + 316 + if (!data) 317 + return -EINVAL; 318 + 319 + mxs_chan->chan_irq = data->chan_irq; 320 + 321 + mxs_chan->ccw = dma_alloc_coherent(mxs_dma->dma_device.dev, PAGE_SIZE, 322 + &mxs_chan->ccw_phys, GFP_KERNEL); 323 + if (!mxs_chan->ccw) { 324 + ret = -ENOMEM; 325 + goto err_alloc; 326 + } 327 + 328 + memset(mxs_chan->ccw, 0, PAGE_SIZE); 329 + 330 + ret = request_irq(mxs_chan->chan_irq, mxs_dma_int_handler, 331 + 0, "mxs-dma", mxs_dma); 332 + if (ret) 333 + goto err_irq; 334 + 335 + ret = clk_enable(mxs_dma->clk); 336 + if (ret) 337 + goto err_clk; 338 + 339 + mxs_dma_reset_chan(mxs_chan); 340 + 341 + dma_async_tx_descriptor_init(&mxs_chan->desc, chan); 342 + mxs_chan->desc.tx_submit = mxs_dma_tx_submit; 343 + 344 + /* the descriptor is ready */ 345 + async_tx_ack(&mxs_chan->desc); 346 + 347 + return 0; 348 + 349 + err_clk: 350 + free_irq(mxs_chan->chan_irq, mxs_dma); 351 + err_irq: 352 + dma_free_coherent(mxs_dma->dma_device.dev, PAGE_SIZE, 353 + mxs_chan->ccw, mxs_chan->ccw_phys); 354 + err_alloc: 355 + return ret; 356 + } 357 + 358 + static void mxs_dma_free_chan_resources(struct dma_chan *chan) 359 + { 360 + struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); 361 + struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 362 + 363 + mxs_dma_disable_chan(mxs_chan); 364 + 365 + free_irq(mxs_chan->chan_irq, mxs_dma); 366 + 367 + dma_free_coherent(mxs_dma->dma_device.dev, PAGE_SIZE, 368 + mxs_chan->ccw, mxs_chan->ccw_phys); 369 + 370 + clk_disable(mxs_dma->clk); 371 + } 372 + 373 + static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg( 374 + struct dma_chan *chan, struct scatterlist *sgl, 375 + unsigned int sg_len, enum dma_data_direction direction, 376 + unsigned long append) 377 + { 378 + struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); 379 + struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 380 + struct mxs_dma_ccw *ccw; 381 + struct scatterlist *sg; 382 + int i, j; 383 + u32 *pio; 384 + static int idx; 385 + 386 + if (mxs_chan->status == DMA_IN_PROGRESS && !append) 387 + return NULL; 388 + 389 + if (sg_len + (append ? idx : 0) > NUM_CCW) { 390 + dev_err(mxs_dma->dma_device.dev, 391 + "maximum number of sg exceeded: %d > %d\n", 392 + sg_len, NUM_CCW); 393 + goto err_out; 394 + } 395 + 396 + mxs_chan->status = DMA_IN_PROGRESS; 397 + mxs_chan->flags = 0; 398 + 399 + /* 400 + * If the sg is prepared with append flag set, the sg 401 + * will be appended to the last prepared sg. 402 + */ 403 + if (append) { 404 + BUG_ON(idx < 1); 405 + ccw = &mxs_chan->ccw[idx - 1]; 406 + ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx; 407 + ccw->bits |= CCW_CHAIN; 408 + ccw->bits &= ~CCW_IRQ; 409 + ccw->bits &= ~CCW_DEC_SEM; 410 + ccw->bits &= ~CCW_WAIT4END; 411 + } else { 412 + idx = 0; 413 + } 414 + 415 + if (direction == DMA_NONE) { 416 + ccw = &mxs_chan->ccw[idx++]; 417 + pio = (u32 *) sgl; 418 + 419 + for (j = 0; j < sg_len;) 420 + ccw->pio_words[j++] = *pio++; 421 + 422 + ccw->bits = 0; 423 + ccw->bits |= CCW_IRQ; 424 + ccw->bits |= CCW_DEC_SEM; 425 + ccw->bits |= CCW_WAIT4END; 426 + ccw->bits |= CCW_HALT_ON_TERM; 427 + ccw->bits |= CCW_TERM_FLUSH; 428 + ccw->bits |= BF_CCW(sg_len, PIO_NUM); 429 + ccw->bits |= BF_CCW(MXS_DMA_CMD_NO_XFER, COMMAND); 430 + } else { 431 + for_each_sg(sgl, sg, sg_len, i) { 432 + if (sg->length > MAX_XFER_BYTES) { 433 + dev_err(mxs_dma->dma_device.dev, "maximum bytes for sg entry exceeded: %d > %d\n", 434 + sg->length, MAX_XFER_BYTES); 435 + goto err_out; 436 + } 437 + 438 + ccw = &mxs_chan->ccw[idx++]; 439 + 440 + ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx; 441 + ccw->bufaddr = sg->dma_address; 442 + ccw->xfer_bytes = sg->length; 443 + 444 + ccw->bits = 0; 445 + ccw->bits |= CCW_CHAIN; 446 + ccw->bits |= CCW_HALT_ON_TERM; 447 + ccw->bits |= CCW_TERM_FLUSH; 448 + ccw->bits |= BF_CCW(direction == DMA_FROM_DEVICE ? 449 + MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, 450 + COMMAND); 451 + 452 + if (i + 1 == sg_len) { 453 + ccw->bits &= ~CCW_CHAIN; 454 + ccw->bits |= CCW_IRQ; 455 + ccw->bits |= CCW_DEC_SEM; 456 + ccw->bits |= CCW_WAIT4END; 457 + } 458 + } 459 + } 460 + 461 + return &mxs_chan->desc; 462 + 463 + err_out: 464 + mxs_chan->status = DMA_ERROR; 465 + return NULL; 466 + } 467 + 468 + static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic( 469 + struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, 470 + size_t period_len, enum dma_data_direction direction) 471 + { 472 + struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); 473 + struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 474 + int num_periods = buf_len / period_len; 475 + int i = 0, buf = 0; 476 + 477 + if (mxs_chan->status == DMA_IN_PROGRESS) 478 + return NULL; 479 + 480 + mxs_chan->status = DMA_IN_PROGRESS; 481 + mxs_chan->flags |= MXS_DMA_SG_LOOP; 482 + 483 + if (num_periods > NUM_CCW) { 484 + dev_err(mxs_dma->dma_device.dev, 485 + "maximum number of sg exceeded: %d > %d\n", 486 + num_periods, NUM_CCW); 487 + goto err_out; 488 + } 489 + 490 + if (period_len > MAX_XFER_BYTES) { 491 + dev_err(mxs_dma->dma_device.dev, 492 + "maximum period size exceeded: %d > %d\n", 493 + period_len, MAX_XFER_BYTES); 494 + goto err_out; 495 + } 496 + 497 + while (buf < buf_len) { 498 + struct mxs_dma_ccw *ccw = &mxs_chan->ccw[i]; 499 + 500 + if (i + 1 == num_periods) 501 + ccw->next = mxs_chan->ccw_phys; 502 + else 503 + ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * (i + 1); 504 + 505 + ccw->bufaddr = dma_addr; 506 + ccw->xfer_bytes = period_len; 507 + 508 + ccw->bits = 0; 509 + ccw->bits |= CCW_CHAIN; 510 + ccw->bits |= CCW_IRQ; 511 + ccw->bits |= CCW_HALT_ON_TERM; 512 + ccw->bits |= CCW_TERM_FLUSH; 513 + ccw->bits |= BF_CCW(direction == DMA_FROM_DEVICE ? 514 + MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, COMMAND); 515 + 516 + dma_addr += period_len; 517 + buf += period_len; 518 + 519 + i++; 520 + } 521 + 522 + return &mxs_chan->desc; 523 + 524 + err_out: 525 + mxs_chan->status = DMA_ERROR; 526 + return NULL; 527 + } 528 + 529 + static int mxs_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 530 + unsigned long arg) 531 + { 532 + struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); 533 + int ret = 0; 534 + 535 + switch (cmd) { 536 + case DMA_TERMINATE_ALL: 537 + mxs_dma_disable_chan(mxs_chan); 538 + break; 539 + case DMA_PAUSE: 540 + mxs_dma_pause_chan(mxs_chan); 541 + break; 542 + case DMA_RESUME: 543 + mxs_dma_resume_chan(mxs_chan); 544 + break; 545 + default: 546 + ret = -ENOSYS; 547 + } 548 + 549 + return ret; 550 + } 551 + 552 + static enum dma_status mxs_dma_tx_status(struct dma_chan *chan, 553 + dma_cookie_t cookie, struct dma_tx_state *txstate) 554 + { 555 + struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan); 556 + dma_cookie_t last_used; 557 + 558 + last_used = chan->cookie; 559 + dma_set_tx_state(txstate, mxs_chan->last_completed, last_used, 0); 560 + 561 + return mxs_chan->status; 562 + } 563 + 564 + static void mxs_dma_issue_pending(struct dma_chan *chan) 565 + { 566 + /* 567 + * Nothing to do. We only have a single descriptor. 568 + */ 569 + } 570 + 571 + static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma) 572 + { 573 + int ret; 574 + 575 + ret = clk_enable(mxs_dma->clk); 576 + if (ret) 577 + goto err_out; 578 + 579 + ret = mxs_reset_block(mxs_dma->base); 580 + if (ret) 581 + goto err_out; 582 + 583 + /* only major version matters */ 584 + mxs_dma->version = readl(mxs_dma->base + 585 + ((mxs_dma->dev_id == MXS_DMA_APBX) ? 586 + HW_APBX_VERSION : HW_APBH_VERSION)) >> 587 + BP_APBHX_VERSION_MAJOR; 588 + 589 + /* enable apbh burst */ 590 + if (dma_is_apbh()) { 591 + writel(BM_APBH_CTRL0_APB_BURST_EN, 592 + mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR); 593 + writel(BM_APBH_CTRL0_APB_BURST8_EN, 594 + mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR); 595 + } 596 + 597 + /* enable irq for all the channels */ 598 + writel(MXS_DMA_CHANNELS_MASK << MXS_DMA_CHANNELS, 599 + mxs_dma->base + HW_APBHX_CTRL1 + MXS_SET_ADDR); 600 + 601 + clk_disable(mxs_dma->clk); 602 + 603 + return 0; 604 + 605 + err_out: 606 + return ret; 607 + } 608 + 609 + static int __init mxs_dma_probe(struct platform_device *pdev) 610 + { 611 + const struct platform_device_id *id_entry = 612 + platform_get_device_id(pdev); 613 + struct mxs_dma_engine *mxs_dma; 614 + struct resource *iores; 615 + int ret, i; 616 + 617 + mxs_dma = kzalloc(sizeof(*mxs_dma), GFP_KERNEL); 618 + if (!mxs_dma) 619 + return -ENOMEM; 620 + 621 + mxs_dma->dev_id = id_entry->driver_data; 622 + 623 + iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); 624 + 625 + if (!request_mem_region(iores->start, resource_size(iores), 626 + pdev->name)) { 627 + ret = -EBUSY; 628 + goto err_request_region; 629 + } 630 + 631 + mxs_dma->base = ioremap(iores->start, resource_size(iores)); 632 + if (!mxs_dma->base) { 633 + ret = -ENOMEM; 634 + goto err_ioremap; 635 + } 636 + 637 + mxs_dma->clk = clk_get(&pdev->dev, NULL); 638 + if (IS_ERR(mxs_dma->clk)) { 639 + ret = PTR_ERR(mxs_dma->clk); 640 + goto err_clk; 641 + } 642 + 643 + dma_cap_set(DMA_SLAVE, mxs_dma->dma_device.cap_mask); 644 + dma_cap_set(DMA_CYCLIC, mxs_dma->dma_device.cap_mask); 645 + 646 + INIT_LIST_HEAD(&mxs_dma->dma_device.channels); 647 + 648 + /* Initialize channel parameters */ 649 + for (i = 0; i < MXS_DMA_CHANNELS; i++) { 650 + struct mxs_dma_chan *mxs_chan = &mxs_dma->mxs_chans[i]; 651 + 652 + mxs_chan->mxs_dma = mxs_dma; 653 + mxs_chan->chan.device = &mxs_dma->dma_device; 654 + 655 + tasklet_init(&mxs_chan->tasklet, mxs_dma_tasklet, 656 + (unsigned long) mxs_chan); 657 + 658 + 659 + /* Add the channel to mxs_chan list */ 660 + list_add_tail(&mxs_chan->chan.device_node, 661 + &mxs_dma->dma_device.channels); 662 + } 663 + 664 + ret = mxs_dma_init(mxs_dma); 665 + if (ret) 666 + goto err_init; 667 + 668 + mxs_dma->dma_device.dev = &pdev->dev; 669 + 670 + /* mxs_dma gets 65535 bytes maximum sg size */ 671 + mxs_dma->dma_device.dev->dma_parms = &mxs_dma->dma_parms; 672 + dma_set_max_seg_size(mxs_dma->dma_device.dev, MAX_XFER_BYTES); 673 + 674 + mxs_dma->dma_device.device_alloc_chan_resources = mxs_dma_alloc_chan_resources; 675 + mxs_dma->dma_device.device_free_chan_resources = mxs_dma_free_chan_resources; 676 + mxs_dma->dma_device.device_tx_status = mxs_dma_tx_status; 677 + mxs_dma->dma_device.device_prep_slave_sg = mxs_dma_prep_slave_sg; 678 + mxs_dma->dma_device.device_prep_dma_cyclic = mxs_dma_prep_dma_cyclic; 679 + mxs_dma->dma_device.device_control = mxs_dma_control; 680 + mxs_dma->dma_device.device_issue_pending = mxs_dma_issue_pending; 681 + 682 + ret = dma_async_device_register(&mxs_dma->dma_device); 683 + if (ret) { 684 + dev_err(mxs_dma->dma_device.dev, "unable to register\n"); 685 + goto err_init; 686 + } 687 + 688 + dev_info(mxs_dma->dma_device.dev, "initialized\n"); 689 + 690 + return 0; 691 + 692 + err_init: 693 + clk_put(mxs_dma->clk); 694 + err_clk: 695 + iounmap(mxs_dma->base); 696 + err_ioremap: 697 + release_mem_region(iores->start, resource_size(iores)); 698 + err_request_region: 699 + kfree(mxs_dma); 700 + return ret; 701 + } 702 + 703 + static struct platform_device_id mxs_dma_type[] = { 704 + { 705 + .name = "mxs-dma-apbh", 706 + .driver_data = MXS_DMA_APBH, 707 + }, { 708 + .name = "mxs-dma-apbx", 709 + .driver_data = MXS_DMA_APBX, 710 + } 711 + }; 712 + 713 + static struct platform_driver mxs_dma_driver = { 714 + .driver = { 715 + .name = "mxs-dma", 716 + }, 717 + .id_table = mxs_dma_type, 718 + }; 719 + 720 + static int __init mxs_dma_module_init(void) 721 + { 722 + return platform_driver_probe(&mxs_dma_driver, mxs_dma_probe); 723 + } 724 + subsys_initcall(mxs_dma_module_init);
+18 -17
drivers/dma/pch_dma.c
··· 82 u32 dma_sts1; 83 u32 reserved2; 84 u32 reserved3; 85 - struct pch_dma_desc_regs desc[0]; 86 }; 87 88 struct pch_dma_desc { ··· 124 struct pci_pool *pool; 125 struct pch_dma_regs regs; 126 struct pch_dma_desc_regs ch_regs[MAX_CHAN_NR]; 127 - struct pch_dma_chan channels[0]; 128 }; 129 130 #define PCH_DMA_CTL0 0x00 ··· 366 struct pch_dma_chan *pd_chan = to_pd_chan(txd->chan); 367 dma_cookie_t cookie; 368 369 - spin_lock_bh(&pd_chan->lock); 370 cookie = pdc_assign_cookie(pd_chan, desc); 371 372 if (list_empty(&pd_chan->active_list)) { ··· 376 list_add_tail(&desc->desc_node, &pd_chan->queue); 377 } 378 379 - spin_unlock_bh(&pd_chan->lock); 380 return 0; 381 } 382 ··· 386 struct pch_dma *pd = to_pd(chan->device); 387 dma_addr_t addr; 388 389 - desc = pci_pool_alloc(pd->pool, GFP_KERNEL, &addr); 390 if (desc) { 391 memset(desc, 0, sizeof(struct pch_dma_desc)); 392 INIT_LIST_HEAD(&desc->tx_list); ··· 405 struct pch_dma_desc *ret = NULL; 406 int i; 407 408 - spin_lock_bh(&pd_chan->lock); 409 list_for_each_entry_safe(desc, _d, &pd_chan->free_list, desc_node) { 410 i++; 411 if (async_tx_test_ack(&desc->txd)) { ··· 415 } 416 dev_dbg(chan2dev(&pd_chan->chan), "desc %p not ACKed\n", desc); 417 } 418 - spin_unlock_bh(&pd_chan->lock); 419 dev_dbg(chan2dev(&pd_chan->chan), "scanned %d descriptors\n", i); 420 421 if (!ret) { 422 ret = pdc_alloc_desc(&pd_chan->chan, GFP_NOIO); 423 if (ret) { 424 - spin_lock_bh(&pd_chan->lock); 425 pd_chan->descs_allocated++; 426 - spin_unlock_bh(&pd_chan->lock); 427 } else { 428 dev_err(chan2dev(&pd_chan->chan), 429 "failed to alloc desc\n"); ··· 437 struct pch_dma_desc *desc) 438 { 439 if (desc) { 440 - spin_lock_bh(&pd_chan->lock); 441 list_splice_init(&desc->tx_list, &pd_chan->free_list); 442 list_add(&desc->desc_node, &pd_chan->free_list); 443 - spin_unlock_bh(&pd_chan->lock); 444 } 445 } 446 ··· 530 struct pch_dma_chan *pd_chan = to_pd_chan(chan); 531 532 if (pdc_is_idle(pd_chan)) { 533 - spin_lock_bh(&pd_chan->lock); 534 pdc_advance_work(pd_chan); 535 - spin_unlock_bh(&pd_chan->lock); 536 } 537 } 538 ··· 592 goto err_desc_get; 593 } 594 595 - 596 if (!first) { 597 first = desc; 598 } else { ··· 640 641 spin_unlock_bh(&pd_chan->lock); 642 643 - 644 return 0; 645 } 646 647 static void pdc_tasklet(unsigned long data) 648 { 649 struct pch_dma_chan *pd_chan = (struct pch_dma_chan *)data; 650 651 if (!pdc_is_idle(pd_chan)) { 652 dev_err(chan2dev(&pd_chan->chan), ··· 654 return; 655 } 656 657 - spin_lock_bh(&pd_chan->lock); 658 if (test_and_clear_bit(0, &pd_chan->err_status)) 659 pdc_handle_error(pd_chan); 660 else 661 pdc_advance_work(pd_chan); 662 - spin_unlock_bh(&pd_chan->lock); 663 } 664 665 static irqreturn_t pd_irq(int irq, void *devid) ··· 693 return ret; 694 } 695 696 static void pch_dma_save_regs(struct pch_dma *pd) 697 { 698 struct pch_dma_chan *pd_chan; ··· 771 772 return 0; 773 } 774 775 static int __devinit pch_dma_probe(struct pci_dev *pdev, 776 const struct pci_device_id *id)
··· 82 u32 dma_sts1; 83 u32 reserved2; 84 u32 reserved3; 85 + struct pch_dma_desc_regs desc[MAX_CHAN_NR]; 86 }; 87 88 struct pch_dma_desc { ··· 124 struct pci_pool *pool; 125 struct pch_dma_regs regs; 126 struct pch_dma_desc_regs ch_regs[MAX_CHAN_NR]; 127 + struct pch_dma_chan channels[MAX_CHAN_NR]; 128 }; 129 130 #define PCH_DMA_CTL0 0x00 ··· 366 struct pch_dma_chan *pd_chan = to_pd_chan(txd->chan); 367 dma_cookie_t cookie; 368 369 + spin_lock(&pd_chan->lock); 370 cookie = pdc_assign_cookie(pd_chan, desc); 371 372 if (list_empty(&pd_chan->active_list)) { ··· 376 list_add_tail(&desc->desc_node, &pd_chan->queue); 377 } 378 379 + spin_unlock(&pd_chan->lock); 380 return 0; 381 } 382 ··· 386 struct pch_dma *pd = to_pd(chan->device); 387 dma_addr_t addr; 388 389 + desc = pci_pool_alloc(pd->pool, flags, &addr); 390 if (desc) { 391 memset(desc, 0, sizeof(struct pch_dma_desc)); 392 INIT_LIST_HEAD(&desc->tx_list); ··· 405 struct pch_dma_desc *ret = NULL; 406 int i; 407 408 + spin_lock(&pd_chan->lock); 409 list_for_each_entry_safe(desc, _d, &pd_chan->free_list, desc_node) { 410 i++; 411 if (async_tx_test_ack(&desc->txd)) { ··· 415 } 416 dev_dbg(chan2dev(&pd_chan->chan), "desc %p not ACKed\n", desc); 417 } 418 + spin_unlock(&pd_chan->lock); 419 dev_dbg(chan2dev(&pd_chan->chan), "scanned %d descriptors\n", i); 420 421 if (!ret) { 422 ret = pdc_alloc_desc(&pd_chan->chan, GFP_NOIO); 423 if (ret) { 424 + spin_lock(&pd_chan->lock); 425 pd_chan->descs_allocated++; 426 + spin_unlock(&pd_chan->lock); 427 } else { 428 dev_err(chan2dev(&pd_chan->chan), 429 "failed to alloc desc\n"); ··· 437 struct pch_dma_desc *desc) 438 { 439 if (desc) { 440 + spin_lock(&pd_chan->lock); 441 list_splice_init(&desc->tx_list, &pd_chan->free_list); 442 list_add(&desc->desc_node, &pd_chan->free_list); 443 + spin_unlock(&pd_chan->lock); 444 } 445 } 446 ··· 530 struct pch_dma_chan *pd_chan = to_pd_chan(chan); 531 532 if (pdc_is_idle(pd_chan)) { 533 + spin_lock(&pd_chan->lock); 534 pdc_advance_work(pd_chan); 535 + spin_unlock(&pd_chan->lock); 536 } 537 } 538 ··· 592 goto err_desc_get; 593 } 594 595 if (!first) { 596 first = desc; 597 } else { ··· 641 642 spin_unlock_bh(&pd_chan->lock); 643 644 return 0; 645 } 646 647 static void pdc_tasklet(unsigned long data) 648 { 649 struct pch_dma_chan *pd_chan = (struct pch_dma_chan *)data; 650 + unsigned long flags; 651 652 if (!pdc_is_idle(pd_chan)) { 653 dev_err(chan2dev(&pd_chan->chan), ··· 655 return; 656 } 657 658 + spin_lock_irqsave(&pd_chan->lock, flags); 659 if (test_and_clear_bit(0, &pd_chan->err_status)) 660 pdc_handle_error(pd_chan); 661 else 662 pdc_advance_work(pd_chan); 663 + spin_unlock_irqrestore(&pd_chan->lock, flags); 664 } 665 666 static irqreturn_t pd_irq(int irq, void *devid) ··· 694 return ret; 695 } 696 697 + #ifdef CONFIG_PM 698 static void pch_dma_save_regs(struct pch_dma *pd) 699 { 700 struct pch_dma_chan *pd_chan; ··· 771 772 return 0; 773 } 774 + #endif 775 776 static int __devinit pch_dma_probe(struct pci_dev *pdev, 777 const struct pci_device_id *id)
+659 -761
drivers/dma/ste_dma40.c
··· 68 * @base: Pointer to memory area when the pre_alloc_lli's are not large 69 * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if 70 * pre_alloc_lli is used. 71 * @size: The size in bytes of the memory at base or the size of pre_alloc_lli. 72 * @pre_alloc_lli: Pre allocated area for the most common case of transfers, 73 * one buffer to one buffer. ··· 76 struct d40_lli_pool { 77 void *base; 78 int size; 79 /* Space for dst and src, plus an extra for padding */ 80 u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)]; 81 }; ··· 96 * during a transfer. 97 * @node: List entry. 98 * @is_in_client_list: true if the client owns this descriptor. 99 - * @is_hw_linked: true if this job will automatically be continued for 100 * the previous one. 101 * 102 * This descriptor is used for both logical and physical transfers. ··· 115 struct list_head node; 116 117 bool is_in_client_list; 118 - bool is_hw_linked; 119 }; 120 121 /** ··· 131 */ 132 struct d40_lcla_pool { 133 void *base; 134 void *base_unaligned; 135 int pages; 136 spinlock_t lock; ··· 305 unsigned int val; 306 }; 307 308 - static int d40_pool_lli_alloc(struct d40_desc *d40d, 309 - int lli_len, bool is_log) 310 { 311 u32 align; 312 void *base; 313 ··· 349 d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli); 350 d40d->lli_pool.base = NULL; 351 } else { 352 - d40d->lli_pool.size = ALIGN(lli_len * 2 * align, align); 353 354 base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT); 355 d40d->lli_pool.base = base; ··· 359 } 360 361 if (is_log) { 362 - d40d->lli_log.src = PTR_ALIGN((struct d40_log_lli *) base, 363 - align); 364 - d40d->lli_log.dst = PTR_ALIGN(d40d->lli_log.src + lli_len, 365 - align); 366 } else { 367 - d40d->lli_phy.src = PTR_ALIGN((struct d40_phy_lli *)base, 368 - align); 369 - d40d->lli_phy.dst = PTR_ALIGN(d40d->lli_phy.src + lli_len, 370 - align); 371 } 372 373 return 0; 374 } 375 376 - static void d40_pool_lli_free(struct d40_desc *d40d) 377 { 378 kfree(d40d->lli_pool.base); 379 d40d->lli_pool.base = NULL; 380 d40d->lli_pool.size = 0; ··· 436 int i; 437 int ret = -EINVAL; 438 439 - if (d40c->log_num == D40_PHY_CHAN) 440 return 0; 441 442 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags); ··· 475 476 list_for_each_entry_safe(d, _d, &d40c->client, node) 477 if (async_tx_test_ack(&d->txd)) { 478 - d40_pool_lli_free(d); 479 d40_desc_remove(d); 480 desc = d; 481 memset(desc, 0, sizeof(*desc)); ··· 495 static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d) 496 { 497 498 d40_lcla_free_all(d40c, d40d); 499 kmem_cache_free(d40c->base->desc_slab, d40d); 500 } ··· 505 list_add_tail(&desc->node, &d40c->active); 506 } 507 508 - static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d) 509 { 510 - int curr_lcla = -EINVAL, next_lcla; 511 512 - if (d40c->log_num == D40_PHY_CHAN) { 513 - d40_phy_lli_write(d40c->base->virtbase, 514 - d40c->phy_chan->num, 515 - d40d->lli_phy.dst, 516 - d40d->lli_phy.src); 517 - d40d->lli_current = d40d->lli_len; 518 - } else { 519 520 - if ((d40d->lli_len - d40d->lli_current) > 1) 521 - curr_lcla = d40_lcla_alloc_one(d40c, d40d); 522 523 - d40_log_lli_lcpa_write(d40c->lcpa, 524 - &d40d->lli_log.dst[d40d->lli_current], 525 - &d40d->lli_log.src[d40d->lli_current], 526 - curr_lcla); 527 528 - d40d->lli_current++; 529 - for (; d40d->lli_current < d40d->lli_len; d40d->lli_current++) { 530 - struct d40_log_lli *lcla; 531 532 - if (d40d->lli_current + 1 < d40d->lli_len) 533 - next_lcla = d40_lcla_alloc_one(d40c, d40d); 534 - else 535 - next_lcla = -EINVAL; 536 537 - lcla = d40c->base->lcla_pool.base + 538 - d40c->phy_chan->num * 1024 + 539 - 8 * curr_lcla * 2; 540 541 - d40_log_lli_lcla_write(lcla, 542 - &d40d->lli_log.dst[d40d->lli_current], 543 - &d40d->lli_log.src[d40d->lli_current], 544 - next_lcla); 545 546 - (void) dma_map_single(d40c->base->dev, lcla, 547 - 2 * sizeof(struct d40_log_lli), 548 - DMA_TO_DEVICE); 549 550 - curr_lcla = next_lcla; 551 552 - if (curr_lcla == -EINVAL) { 553 - d40d->lli_current++; 554 - break; 555 - } 556 557 } 558 } 559 } 560 561 static struct d40_desc *d40_first_active_get(struct d40_chan *d40c) ··· 657 d = list_first_entry(&d40c->queue, 658 struct d40_desc, 659 node); 660 - return d; 661 - } 662 - 663 - static struct d40_desc *d40_last_queued(struct d40_chan *d40c) 664 - { 665 - struct d40_desc *d; 666 - 667 - if (list_empty(&d40c->queue)) 668 - return NULL; 669 - list_for_each_entry(d, &d40c->queue, node) 670 - if (list_is_last(&d->node, &d40c->queue)) 671 - break; 672 return d; 673 } 674 ··· 771 } 772 773 if (i == D40_SUSPEND_MAX_IT) { 774 - dev_err(&d40c->chan.dev->device, 775 - "[%s]: unable to suspend the chl %d (log: %d) status %x\n", 776 - __func__, d40c->phy_chan->num, d40c->log_num, 777 status); 778 dump_stack(); 779 ret = -EBUSY; ··· 806 d40c->busy = false; 807 } 808 809 static void d40_config_set_event(struct d40_chan *d40c, bool do_enable) 810 { 811 - u32 val; 812 unsigned long flags; 813 - 814 - /* Notice, that disable requires the physical channel to be stopped */ 815 - if (do_enable) 816 - val = D40_ACTIVATE_EVENTLINE; 817 - else 818 - val = D40_DEACTIVATE_EVENTLINE; 819 820 spin_lock_irqsave(&d40c->phy_chan->lock, flags); 821 ··· 852 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) { 853 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); 854 855 - writel((val << D40_EVENTLINE_POS(event)) | 856 - ~D40_EVENTLINE_MASK(event), 857 - d40c->base->virtbase + D40_DREG_PCBASE + 858 - d40c->phy_chan->num * D40_DREG_PCDELTA + 859 - D40_CHAN_REG_SSLNK); 860 } 861 if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) { 862 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); 863 864 - writel((val << D40_EVENTLINE_POS(event)) | 865 - ~D40_EVENTLINE_MASK(event), 866 - d40c->base->virtbase + D40_DREG_PCBASE + 867 - d40c->phy_chan->num * D40_DREG_PCDELTA + 868 - D40_CHAN_REG_SDLNK); 869 } 870 871 spin_unlock_irqrestore(&d40c->phy_chan->lock, flags); ··· 868 869 static u32 d40_chan_has_events(struct d40_chan *d40c) 870 { 871 u32 val; 872 873 - val = readl(d40c->base->virtbase + D40_DREG_PCBASE + 874 - d40c->phy_chan->num * D40_DREG_PCDELTA + 875 - D40_CHAN_REG_SSLNK); 876 877 - val |= readl(d40c->base->virtbase + D40_DREG_PCBASE + 878 - d40c->phy_chan->num * D40_DREG_PCDELTA + 879 - D40_CHAN_REG_SDLNK); 880 return val; 881 } 882 ··· 896 = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG, 897 }; 898 899 - if (d40c->log_num == D40_PHY_CHAN) 900 return phy_map[d40c->dma_cfg.mode_opt]; 901 else 902 return log_map[d40c->dma_cfg.mode_opt]; ··· 910 /* Odd addresses are even addresses + 4 */ 911 addr_base = (d40c->phy_chan->num % 2) * 4; 912 /* Setup channel mode to logical or physical */ 913 - var = ((u32)(d40c->log_num != D40_PHY_CHAN) + 1) << 914 D40_CHAN_POS(d40c->phy_chan->num); 915 writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base); 916 ··· 919 920 writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base); 921 922 - if (d40c->log_num != D40_PHY_CHAN) { 923 /* Set default config for CFG reg */ 924 - writel(d40c->src_def_cfg, 925 - d40c->base->virtbase + D40_DREG_PCBASE + 926 - d40c->phy_chan->num * D40_DREG_PCDELTA + 927 - D40_CHAN_REG_SSCFG); 928 - writel(d40c->dst_def_cfg, 929 - d40c->base->virtbase + D40_DREG_PCBASE + 930 - d40c->phy_chan->num * D40_DREG_PCDELTA + 931 - D40_CHAN_REG_SDCFG); 932 933 /* Set LIDX for lcla */ 934 - writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) & 935 - D40_SREG_ELEM_LOG_LIDX_MASK, 936 - d40c->base->virtbase + D40_DREG_PCBASE + 937 - d40c->phy_chan->num * D40_DREG_PCDELTA + 938 - D40_CHAN_REG_SDELT); 939 - 940 - writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) & 941 - D40_SREG_ELEM_LOG_LIDX_MASK, 942 - d40c->base->virtbase + D40_DREG_PCBASE + 943 - d40c->phy_chan->num * D40_DREG_PCDELTA + 944 - D40_CHAN_REG_SSELT); 945 - 946 } 947 } 948 ··· 938 { 939 u32 num_elt; 940 941 - if (d40c->log_num != D40_PHY_CHAN) 942 num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK) 943 >> D40_MEM_LCSP2_ECNT_POS; 944 - else 945 - num_elt = (readl(d40c->base->virtbase + D40_DREG_PCBASE + 946 - d40c->phy_chan->num * D40_DREG_PCDELTA + 947 - D40_CHAN_REG_SDELT) & 948 - D40_SREG_ELEM_PHY_ECNT_MASK) >> 949 - D40_SREG_ELEM_PHY_ECNT_POS; 950 return num_elt * (1 << d40c->dma_cfg.dst_info.data_width); 951 } 952 ··· 954 { 955 bool is_link; 956 957 - if (d40c->log_num != D40_PHY_CHAN) 958 is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK; 959 else 960 - is_link = readl(d40c->base->virtbase + D40_DREG_PCBASE + 961 - d40c->phy_chan->num * D40_DREG_PCDELTA + 962 - D40_CHAN_REG_SDLNK) & 963 - D40_SREG_LNK_PHYS_LNK_MASK; 964 return is_link; 965 } 966 967 - static int d40_pause(struct dma_chan *chan) 968 { 969 - struct d40_chan *d40c = 970 - container_of(chan, struct d40_chan, chan); 971 int res = 0; 972 unsigned long flags; 973 ··· 975 976 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); 977 if (res == 0) { 978 - if (d40c->log_num != D40_PHY_CHAN) { 979 d40_config_set_event(d40c, false); 980 /* Resume the other logical channels if any */ 981 if (d40_chan_has_events(d40c)) ··· 988 return res; 989 } 990 991 - static int d40_resume(struct dma_chan *chan) 992 { 993 - struct d40_chan *d40c = 994 - container_of(chan, struct d40_chan, chan); 995 int res = 0; 996 unsigned long flags; 997 ··· 999 spin_lock_irqsave(&d40c->lock, flags); 1000 1001 if (d40c->base->rev == 0) 1002 - if (d40c->log_num != D40_PHY_CHAN) { 1003 res = d40_channel_execute_command(d40c, 1004 D40_DMA_SUSPEND_REQ); 1005 goto no_suspend; ··· 1008 /* If bytes left to transfer or linked tx resume job */ 1009 if (d40_residue(d40c) || d40_tx_is_linked(d40c)) { 1010 1011 - if (d40c->log_num != D40_PHY_CHAN) 1012 d40_config_set_event(d40c, true); 1013 1014 res = d40_channel_execute_command(d40c, D40_DMA_RUN); ··· 1019 return res; 1020 } 1021 1022 - static void d40_tx_submit_log(struct d40_chan *d40c, struct d40_desc *d40d) 1023 { 1024 - /* TODO: Write */ 1025 - } 1026 1027 - static void d40_tx_submit_phy(struct d40_chan *d40c, struct d40_desc *d40d) 1028 - { 1029 - struct d40_desc *d40d_prev = NULL; 1030 - int i; 1031 - u32 val; 1032 1033 - if (!list_empty(&d40c->queue)) 1034 - d40d_prev = d40_last_queued(d40c); 1035 - else if (!list_empty(&d40c->active)) 1036 - d40d_prev = d40_first_active_get(d40c); 1037 1038 - if (!d40d_prev) 1039 - return; 1040 - 1041 - /* Here we try to join this job with previous jobs */ 1042 - val = readl(d40c->base->virtbase + D40_DREG_PCBASE + 1043 - d40c->phy_chan->num * D40_DREG_PCDELTA + 1044 - D40_CHAN_REG_SSLNK); 1045 - 1046 - /* Figure out which link we're currently transmitting */ 1047 - for (i = 0; i < d40d_prev->lli_len; i++) 1048 - if (val == d40d_prev->lli_phy.src[i].reg_lnk) 1049 - break; 1050 - 1051 - val = readl(d40c->base->virtbase + D40_DREG_PCBASE + 1052 - d40c->phy_chan->num * D40_DREG_PCDELTA + 1053 - D40_CHAN_REG_SSELT) >> D40_SREG_ELEM_LOG_ECNT_POS; 1054 - 1055 - if (i == (d40d_prev->lli_len - 1) && val > 0) { 1056 - /* Change the current one */ 1057 - writel(virt_to_phys(d40d->lli_phy.src), 1058 - d40c->base->virtbase + D40_DREG_PCBASE + 1059 - d40c->phy_chan->num * D40_DREG_PCDELTA + 1060 - D40_CHAN_REG_SSLNK); 1061 - writel(virt_to_phys(d40d->lli_phy.dst), 1062 - d40c->base->virtbase + D40_DREG_PCBASE + 1063 - d40c->phy_chan->num * D40_DREG_PCDELTA + 1064 - D40_CHAN_REG_SDLNK); 1065 - 1066 - d40d->is_hw_linked = true; 1067 - 1068 - } else if (i < d40d_prev->lli_len) { 1069 - (void) dma_unmap_single(d40c->base->dev, 1070 - virt_to_phys(d40d_prev->lli_phy.src), 1071 - d40d_prev->lli_pool.size, 1072 - DMA_TO_DEVICE); 1073 - 1074 - /* Keep the settings */ 1075 - val = d40d_prev->lli_phy.src[d40d_prev->lli_len - 1].reg_lnk & 1076 - ~D40_SREG_LNK_PHYS_LNK_MASK; 1077 - d40d_prev->lli_phy.src[d40d_prev->lli_len - 1].reg_lnk = 1078 - val | virt_to_phys(d40d->lli_phy.src); 1079 - 1080 - val = d40d_prev->lli_phy.dst[d40d_prev->lli_len - 1].reg_lnk & 1081 - ~D40_SREG_LNK_PHYS_LNK_MASK; 1082 - d40d_prev->lli_phy.dst[d40d_prev->lli_len - 1].reg_lnk = 1083 - val | virt_to_phys(d40d->lli_phy.dst); 1084 - 1085 - (void) dma_map_single(d40c->base->dev, 1086 - d40d_prev->lli_phy.src, 1087 - d40d_prev->lli_pool.size, 1088 - DMA_TO_DEVICE); 1089 - d40d->is_hw_linked = true; 1090 - } 1091 } 1092 1093 static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) ··· 1043 struct d40_desc *d40d = container_of(tx, struct d40_desc, txd); 1044 unsigned long flags; 1045 1046 - (void) d40_pause(&d40c->chan); 1047 - 1048 spin_lock_irqsave(&d40c->lock, flags); 1049 1050 d40c->chan.cookie++; ··· 1052 1053 d40d->txd.cookie = d40c->chan.cookie; 1054 1055 - if (d40c->log_num == D40_PHY_CHAN) 1056 - d40_tx_submit_phy(d40c, d40d); 1057 - else 1058 - d40_tx_submit_log(d40c, d40d); 1059 - 1060 d40_desc_queue(d40c, d40d); 1061 1062 spin_unlock_irqrestore(&d40c->lock, flags); 1063 - 1064 - (void) d40_resume(&d40c->chan); 1065 1066 return tx->cookie; 1067 } ··· 1064 if (d40c->base->rev == 0) { 1065 int err; 1066 1067 - if (d40c->log_num != D40_PHY_CHAN) { 1068 err = d40_channel_execute_command(d40c, 1069 D40_DMA_SUSPEND_REQ); 1070 if (err) ··· 1072 } 1073 } 1074 1075 - if (d40c->log_num != D40_PHY_CHAN) 1076 d40_config_set_event(d40c, true); 1077 1078 return d40_channel_execute_command(d40c, D40_DMA_RUN); ··· 1095 /* Add to active queue */ 1096 d40_desc_submit(d40c, d40d); 1097 1098 - /* 1099 - * If this job is already linked in hw, 1100 - * do not submit it. 1101 - */ 1102 1103 - if (!d40d->is_hw_linked) { 1104 - /* Initiate DMA job */ 1105 - d40_desc_load(d40c, d40d); 1106 1107 - /* Start dma job */ 1108 - err = d40_start(d40c); 1109 - 1110 - if (err) 1111 - return NULL; 1112 - } 1113 } 1114 1115 return d40d; ··· 1119 if (d40d == NULL) 1120 return; 1121 1122 - d40_lcla_free_all(d40c, d40d); 1123 1124 - if (d40d->lli_current < d40d->lli_len) { 1125 - d40_desc_load(d40c, d40d); 1126 - /* Start dma job */ 1127 - (void) d40_start(d40c); 1128 - return; 1129 } 1130 - 1131 - if (d40_queue_start(d40c) == NULL) 1132 - d40c->busy = false; 1133 1134 d40c->pending_tx++; 1135 tasklet_schedule(&d40c->tasklet); ··· 1167 1168 /* Get first active entry from list */ 1169 d40d = d40_first_active_get(d40c); 1170 - 1171 if (d40d == NULL) 1172 goto err; 1173 1174 - d40c->completed = d40d->txd.cookie; 1175 1176 /* 1177 * If terminating a channel pending_tx is set to zero. ··· 1186 callback = d40d->txd.callback; 1187 callback_param = d40d->txd.callback_param; 1188 1189 - if (async_tx_test_ack(&d40d->txd)) { 1190 - d40_pool_lli_free(d40d); 1191 - d40_desc_remove(d40d); 1192 - d40_desc_free(d40c, d40d); 1193 - } else { 1194 - if (!d40d->is_in_client_list) { 1195 d40_desc_remove(d40d); 1196 - d40_lcla_free_all(d40c, d40d); 1197 - list_add_tail(&d40d->node, &d40c->client); 1198 - d40d->is_in_client_list = true; 1199 } 1200 } 1201 ··· 1274 if (!il[row].is_error) 1275 dma_tc_handle(d40c); 1276 else 1277 - dev_err(base->dev, 1278 - "[%s] IRQ chan: %ld offset %d idx %d\n", 1279 - __func__, chan, il[row].offset, idx); 1280 1281 spin_unlock(&d40c->lock); 1282 } ··· 1294 bool is_log = conf->mode == STEDMA40_MODE_LOGICAL; 1295 1296 if (!conf->dir) { 1297 - dev_err(&d40c->chan.dev->device, "[%s] Invalid direction.\n", 1298 - __func__); 1299 res = -EINVAL; 1300 } 1301 ··· 1302 d40c->base->plat_data->dev_tx[conf->dst_dev_type] == 0 && 1303 d40c->runtime_addr == 0) { 1304 1305 - dev_err(&d40c->chan.dev->device, 1306 - "[%s] Invalid TX channel address (%d)\n", 1307 - __func__, conf->dst_dev_type); 1308 res = -EINVAL; 1309 } 1310 1311 if (conf->src_dev_type != STEDMA40_DEV_SRC_MEMORY && 1312 d40c->base->plat_data->dev_rx[conf->src_dev_type] == 0 && 1313 d40c->runtime_addr == 0) { 1314 - dev_err(&d40c->chan.dev->device, 1315 - "[%s] Invalid RX channel address (%d)\n", 1316 - __func__, conf->src_dev_type); 1317 res = -EINVAL; 1318 } 1319 1320 if (conf->dir == STEDMA40_MEM_TO_PERIPH && 1321 dst_event_group == STEDMA40_DEV_DST_MEMORY) { 1322 - dev_err(&d40c->chan.dev->device, "[%s] Invalid dst\n", 1323 - __func__); 1324 res = -EINVAL; 1325 } 1326 1327 if (conf->dir == STEDMA40_PERIPH_TO_MEM && 1328 src_event_group == STEDMA40_DEV_SRC_MEMORY) { 1329 - dev_err(&d40c->chan.dev->device, "[%s] Invalid src\n", 1330 - __func__); 1331 res = -EINVAL; 1332 } 1333 1334 if (src_event_group == STEDMA40_DEV_SRC_MEMORY && 1335 dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) { 1336 - dev_err(&d40c->chan.dev->device, 1337 - "[%s] No event line\n", __func__); 1338 res = -EINVAL; 1339 } 1340 1341 if (conf->dir == STEDMA40_PERIPH_TO_PERIPH && 1342 (src_event_group != dst_event_group)) { 1343 - dev_err(&d40c->chan.dev->device, 1344 - "[%s] Invalid event group\n", __func__); 1345 res = -EINVAL; 1346 } 1347 ··· 1344 * DMAC HW supports it. Will be added to this driver, 1345 * in case any dma client requires it. 1346 */ 1347 - dev_err(&d40c->chan.dev->device, 1348 - "[%s] periph to periph not supported\n", 1349 - __func__); 1350 res = -EINVAL; 1351 } 1352 ··· 1357 * src (burst x width) == dst (burst x width) 1358 */ 1359 1360 - dev_err(&d40c->chan.dev->device, 1361 - "[%s] src (burst x width) != dst (burst x width)\n", 1362 - __func__); 1363 res = -EINVAL; 1364 } 1365 ··· 1560 dma_has_cap(DMA_SLAVE, cap)) { 1561 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy; 1562 } else { 1563 - dev_err(&d40c->chan.dev->device, "[%s] No memcpy\n", 1564 - __func__); 1565 return -EINVAL; 1566 } 1567 ··· 1585 /* Release client owned descriptors */ 1586 if (!list_empty(&d40c->client)) 1587 list_for_each_entry_safe(d, _d, &d40c->client, node) { 1588 - d40_pool_lli_free(d); 1589 d40_desc_remove(d); 1590 d40_desc_free(d40c, d); 1591 } 1592 1593 if (phy == NULL) { 1594 - dev_err(&d40c->chan.dev->device, "[%s] phy == null\n", 1595 - __func__); 1596 return -EINVAL; 1597 } 1598 1599 if (phy->allocated_src == D40_ALLOC_FREE && 1600 phy->allocated_dst == D40_ALLOC_FREE) { 1601 - dev_err(&d40c->chan.dev->device, "[%s] channel already free\n", 1602 - __func__); 1603 return -EINVAL; 1604 } 1605 ··· 1609 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); 1610 is_src = true; 1611 } else { 1612 - dev_err(&d40c->chan.dev->device, 1613 - "[%s] Unknown direction\n", __func__); 1614 return -EINVAL; 1615 } 1616 1617 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); 1618 if (res) { 1619 - dev_err(&d40c->chan.dev->device, "[%s] suspend failed\n", 1620 - __func__); 1621 return res; 1622 } 1623 1624 - if (d40c->log_num != D40_PHY_CHAN) { 1625 /* Release logical channel, deactivate the event line */ 1626 1627 d40_config_set_event(d40c, false); ··· 1635 res = d40_channel_execute_command(d40c, 1636 D40_DMA_RUN); 1637 if (res) { 1638 - dev_err(&d40c->chan.dev->device, 1639 - "[%s] Executing RUN command\n", 1640 - __func__); 1641 return res; 1642 } 1643 } ··· 1649 /* Release physical channel */ 1650 res = d40_channel_execute_command(d40c, D40_DMA_STOP); 1651 if (res) { 1652 - dev_err(&d40c->chan.dev->device, 1653 - "[%s] Failed to stop channel\n", __func__); 1654 return res; 1655 } 1656 d40c->phy_chan = NULL; ··· 1661 1662 static bool d40_is_paused(struct d40_chan *d40c) 1663 { 1664 bool is_paused = false; 1665 unsigned long flags; 1666 void __iomem *active_reg; ··· 1670 1671 spin_lock_irqsave(&d40c->lock, flags); 1672 1673 - if (d40c->log_num == D40_PHY_CHAN) { 1674 if (d40c->phy_chan->num % 2 == 0) 1675 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE; 1676 else ··· 1688 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || 1689 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { 1690 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); 1691 - status = readl(d40c->base->virtbase + D40_DREG_PCBASE + 1692 - d40c->phy_chan->num * D40_DREG_PCDELTA + 1693 - D40_CHAN_REG_SDLNK); 1694 } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) { 1695 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); 1696 - status = readl(d40c->base->virtbase + D40_DREG_PCBASE + 1697 - d40c->phy_chan->num * D40_DREG_PCDELTA + 1698 - D40_CHAN_REG_SSLNK); 1699 } else { 1700 - dev_err(&d40c->chan.dev->device, 1701 - "[%s] Unknown direction\n", __func__); 1702 goto _exit; 1703 } 1704 ··· 1723 return bytes_left; 1724 } 1725 1726 - struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, 1727 - struct scatterlist *sgl_dst, 1728 - struct scatterlist *sgl_src, 1729 - unsigned int sgl_len, 1730 - unsigned long dma_flags) 1731 { 1732 - int res; 1733 - struct d40_desc *d40d; 1734 - struct d40_chan *d40c = container_of(chan, struct d40_chan, 1735 - chan); 1736 - unsigned long flags; 1737 1738 - if (d40c->phy_chan == NULL) { 1739 - dev_err(&d40c->chan.dev->device, 1740 - "[%s] Unallocated channel.\n", __func__); 1741 - return ERR_PTR(-EINVAL); 1742 - } 1743 1744 - spin_lock_irqsave(&d40c->lock, flags); 1745 - d40d = d40_desc_get(d40c); 1746 1747 - if (d40d == NULL) 1748 goto err; 1749 1750 - d40d->lli_len = d40_sg_2_dmalen(sgl_dst, sgl_len, 1751 - d40c->dma_cfg.src_info.data_width, 1752 - d40c->dma_cfg.dst_info.data_width); 1753 - if (d40d->lli_len < 0) { 1754 - dev_err(&d40c->chan.dev->device, 1755 - "[%s] Unaligned size\n", __func__); 1756 goto err; 1757 } 1758 1759 - d40d->lli_current = 0; 1760 - d40d->txd.flags = dma_flags; 1761 1762 - if (d40c->log_num != D40_PHY_CHAN) { 1763 1764 - if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) { 1765 - dev_err(&d40c->chan.dev->device, 1766 - "[%s] Out of memory\n", __func__); 1767 - goto err; 1768 - } 1769 1770 - (void) d40_log_sg_to_lli(sgl_src, 1771 - sgl_len, 1772 - d40d->lli_log.src, 1773 - d40c->log_def.lcsp1, 1774 - d40c->dma_cfg.src_info.data_width, 1775 - d40c->dma_cfg.dst_info.data_width); 1776 1777 - (void) d40_log_sg_to_lli(sgl_dst, 1778 - sgl_len, 1779 - d40d->lli_log.dst, 1780 - d40c->log_def.lcsp3, 1781 - d40c->dma_cfg.dst_info.data_width, 1782 - d40c->dma_cfg.src_info.data_width); 1783 - } else { 1784 - if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) { 1785 - dev_err(&d40c->chan.dev->device, 1786 - "[%s] Out of memory\n", __func__); 1787 - goto err; 1788 - } 1789 - 1790 - res = d40_phy_sg_to_lli(sgl_src, 1791 - sgl_len, 1792 - 0, 1793 - d40d->lli_phy.src, 1794 - virt_to_phys(d40d->lli_phy.src), 1795 - d40c->src_def_cfg, 1796 - d40c->dma_cfg.src_info.data_width, 1797 - d40c->dma_cfg.dst_info.data_width, 1798 - d40c->dma_cfg.src_info.psize); 1799 - 1800 - if (res < 0) 1801 - goto err; 1802 - 1803 - res = d40_phy_sg_to_lli(sgl_dst, 1804 - sgl_len, 1805 - 0, 1806 - d40d->lli_phy.dst, 1807 - virt_to_phys(d40d->lli_phy.dst), 1808 - d40c->dst_def_cfg, 1809 - d40c->dma_cfg.dst_info.data_width, 1810 - d40c->dma_cfg.src_info.data_width, 1811 - d40c->dma_cfg.dst_info.psize); 1812 - 1813 - if (res < 0) 1814 - goto err; 1815 - 1816 - (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src, 1817 - d40d->lli_pool.size, DMA_TO_DEVICE); 1818 - } 1819 - 1820 - dma_async_tx_descriptor_init(&d40d->txd, chan); 1821 - 1822 - d40d->txd.tx_submit = d40_tx_submit; 1823 - 1824 - spin_unlock_irqrestore(&d40c->lock, flags); 1825 - 1826 - return &d40d->txd; 1827 err: 1828 - if (d40d) 1829 - d40_desc_free(d40c, d40d); 1830 - spin_unlock_irqrestore(&d40c->lock, flags); 1831 return NULL; 1832 } 1833 - EXPORT_SYMBOL(stedma40_memcpy_sg); 1834 1835 bool stedma40_filter(struct dma_chan *chan, void *data) 1836 { ··· 1923 } 1924 EXPORT_SYMBOL(stedma40_filter); 1925 1926 /* DMA ENGINE functions */ 1927 static int d40_alloc_chan_resources(struct dma_chan *chan) 1928 { ··· 1971 if (!d40c->configured) { 1972 err = d40_config_memcpy(d40c); 1973 if (err) { 1974 - dev_err(&d40c->chan.dev->device, 1975 - "[%s] Failed to configure memcpy channel\n", 1976 - __func__); 1977 goto fail; 1978 } 1979 } ··· 1979 1980 err = d40_allocate_channel(d40c); 1981 if (err) { 1982 - dev_err(&d40c->chan.dev->device, 1983 - "[%s] Failed to allocate channel\n", __func__); 1984 goto fail; 1985 } 1986 1987 /* Fill in basic CFG register values */ 1988 d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg, 1989 - &d40c->dst_def_cfg, d40c->log_num != D40_PHY_CHAN); 1990 1991 - if (d40c->log_num != D40_PHY_CHAN) { 1992 d40_log_cfg(&d40c->dma_cfg, 1993 &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); 1994 ··· 2022 unsigned long flags; 2023 2024 if (d40c->phy_chan == NULL) { 2025 - dev_err(&d40c->chan.dev->device, 2026 - "[%s] Cannot free unallocated channel\n", __func__); 2027 return; 2028 } 2029 ··· 2032 err = d40_free_dma(d40c); 2033 2034 if (err) 2035 - dev_err(&d40c->chan.dev->device, 2036 - "[%s] Failed to free channel\n", __func__); 2037 spin_unlock_irqrestore(&d40c->lock, flags); 2038 } 2039 ··· 2042 size_t size, 2043 unsigned long dma_flags) 2044 { 2045 - struct d40_desc *d40d; 2046 - struct d40_chan *d40c = container_of(chan, struct d40_chan, 2047 - chan); 2048 - unsigned long flags; 2049 2050 - if (d40c->phy_chan == NULL) { 2051 - dev_err(&d40c->chan.dev->device, 2052 - "[%s] Channel is not allocated.\n", __func__); 2053 - return ERR_PTR(-EINVAL); 2054 - } 2055 2056 - spin_lock_irqsave(&d40c->lock, flags); 2057 - d40d = d40_desc_get(d40c); 2058 2059 - if (d40d == NULL) { 2060 - dev_err(&d40c->chan.dev->device, 2061 - "[%s] Descriptor is NULL\n", __func__); 2062 - goto err; 2063 - } 2064 2065 - d40d->txd.flags = dma_flags; 2066 - d40d->lli_len = d40_size_2_dmalen(size, 2067 - d40c->dma_cfg.src_info.data_width, 2068 - d40c->dma_cfg.dst_info.data_width); 2069 - if (d40d->lli_len < 0) { 2070 - dev_err(&d40c->chan.dev->device, 2071 - "[%s] Unaligned size\n", __func__); 2072 - goto err; 2073 - } 2074 - 2075 - 2076 - dma_async_tx_descriptor_init(&d40d->txd, chan); 2077 - 2078 - d40d->txd.tx_submit = d40_tx_submit; 2079 - 2080 - if (d40c->log_num != D40_PHY_CHAN) { 2081 - 2082 - if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) { 2083 - dev_err(&d40c->chan.dev->device, 2084 - "[%s] Out of memory\n", __func__); 2085 - goto err; 2086 - } 2087 - d40d->lli_current = 0; 2088 - 2089 - if (d40_log_buf_to_lli(d40d->lli_log.src, 2090 - src, 2091 - size, 2092 - d40c->log_def.lcsp1, 2093 - d40c->dma_cfg.src_info.data_width, 2094 - d40c->dma_cfg.dst_info.data_width, 2095 - true) == NULL) 2096 - goto err; 2097 - 2098 - if (d40_log_buf_to_lli(d40d->lli_log.dst, 2099 - dst, 2100 - size, 2101 - d40c->log_def.lcsp3, 2102 - d40c->dma_cfg.dst_info.data_width, 2103 - d40c->dma_cfg.src_info.data_width, 2104 - true) == NULL) 2105 - goto err; 2106 - 2107 - } else { 2108 - 2109 - if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) { 2110 - dev_err(&d40c->chan.dev->device, 2111 - "[%s] Out of memory\n", __func__); 2112 - goto err; 2113 - } 2114 - 2115 - if (d40_phy_buf_to_lli(d40d->lli_phy.src, 2116 - src, 2117 - size, 2118 - d40c->dma_cfg.src_info.psize, 2119 - 0, 2120 - d40c->src_def_cfg, 2121 - true, 2122 - d40c->dma_cfg.src_info.data_width, 2123 - d40c->dma_cfg.dst_info.data_width, 2124 - false) == NULL) 2125 - goto err; 2126 - 2127 - if (d40_phy_buf_to_lli(d40d->lli_phy.dst, 2128 - dst, 2129 - size, 2130 - d40c->dma_cfg.dst_info.psize, 2131 - 0, 2132 - d40c->dst_def_cfg, 2133 - true, 2134 - d40c->dma_cfg.dst_info.data_width, 2135 - d40c->dma_cfg.src_info.data_width, 2136 - false) == NULL) 2137 - goto err; 2138 - 2139 - (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src, 2140 - d40d->lli_pool.size, DMA_TO_DEVICE); 2141 - } 2142 - 2143 - spin_unlock_irqrestore(&d40c->lock, flags); 2144 - return &d40d->txd; 2145 - 2146 - err: 2147 - if (d40d) 2148 - d40_desc_free(d40c, d40d); 2149 - spin_unlock_irqrestore(&d40c->lock, flags); 2150 - return NULL; 2151 } 2152 2153 static struct dma_async_tx_descriptor * 2154 - d40_prep_sg(struct dma_chan *chan, 2155 - struct scatterlist *dst_sg, unsigned int dst_nents, 2156 - struct scatterlist *src_sg, unsigned int src_nents, 2157 - unsigned long dma_flags) 2158 { 2159 if (dst_nents != src_nents) 2160 return NULL; 2161 2162 - return stedma40_memcpy_sg(chan, dst_sg, src_sg, dst_nents, dma_flags); 2163 - } 2164 - 2165 - static int d40_prep_slave_sg_log(struct d40_desc *d40d, 2166 - struct d40_chan *d40c, 2167 - struct scatterlist *sgl, 2168 - unsigned int sg_len, 2169 - enum dma_data_direction direction, 2170 - unsigned long dma_flags) 2171 - { 2172 - dma_addr_t dev_addr = 0; 2173 - int total_size; 2174 - 2175 - d40d->lli_len = d40_sg_2_dmalen(sgl, sg_len, 2176 - d40c->dma_cfg.src_info.data_width, 2177 - d40c->dma_cfg.dst_info.data_width); 2178 - if (d40d->lli_len < 0) { 2179 - dev_err(&d40c->chan.dev->device, 2180 - "[%s] Unaligned size\n", __func__); 2181 - return -EINVAL; 2182 - } 2183 - 2184 - if (d40_pool_lli_alloc(d40d, d40d->lli_len, true) < 0) { 2185 - dev_err(&d40c->chan.dev->device, 2186 - "[%s] Out of memory\n", __func__); 2187 - return -ENOMEM; 2188 - } 2189 - 2190 - d40d->lli_current = 0; 2191 - 2192 - if (direction == DMA_FROM_DEVICE) 2193 - if (d40c->runtime_addr) 2194 - dev_addr = d40c->runtime_addr; 2195 - else 2196 - dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type]; 2197 - else if (direction == DMA_TO_DEVICE) 2198 - if (d40c->runtime_addr) 2199 - dev_addr = d40c->runtime_addr; 2200 - else 2201 - dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type]; 2202 - 2203 - else 2204 - return -EINVAL; 2205 - 2206 - total_size = d40_log_sg_to_dev(sgl, sg_len, 2207 - &d40d->lli_log, 2208 - &d40c->log_def, 2209 - d40c->dma_cfg.src_info.data_width, 2210 - d40c->dma_cfg.dst_info.data_width, 2211 - direction, 2212 - dev_addr); 2213 - 2214 - if (total_size < 0) 2215 - return -EINVAL; 2216 - 2217 - return 0; 2218 - } 2219 - 2220 - static int d40_prep_slave_sg_phy(struct d40_desc *d40d, 2221 - struct d40_chan *d40c, 2222 - struct scatterlist *sgl, 2223 - unsigned int sgl_len, 2224 - enum dma_data_direction direction, 2225 - unsigned long dma_flags) 2226 - { 2227 - dma_addr_t src_dev_addr; 2228 - dma_addr_t dst_dev_addr; 2229 - int res; 2230 - 2231 - d40d->lli_len = d40_sg_2_dmalen(sgl, sgl_len, 2232 - d40c->dma_cfg.src_info.data_width, 2233 - d40c->dma_cfg.dst_info.data_width); 2234 - if (d40d->lli_len < 0) { 2235 - dev_err(&d40c->chan.dev->device, 2236 - "[%s] Unaligned size\n", __func__); 2237 - return -EINVAL; 2238 - } 2239 - 2240 - if (d40_pool_lli_alloc(d40d, d40d->lli_len, false) < 0) { 2241 - dev_err(&d40c->chan.dev->device, 2242 - "[%s] Out of memory\n", __func__); 2243 - return -ENOMEM; 2244 - } 2245 - 2246 - d40d->lli_current = 0; 2247 - 2248 - if (direction == DMA_FROM_DEVICE) { 2249 - dst_dev_addr = 0; 2250 - if (d40c->runtime_addr) 2251 - src_dev_addr = d40c->runtime_addr; 2252 - else 2253 - src_dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type]; 2254 - } else if (direction == DMA_TO_DEVICE) { 2255 - if (d40c->runtime_addr) 2256 - dst_dev_addr = d40c->runtime_addr; 2257 - else 2258 - dst_dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type]; 2259 - src_dev_addr = 0; 2260 - } else 2261 - return -EINVAL; 2262 - 2263 - res = d40_phy_sg_to_lli(sgl, 2264 - sgl_len, 2265 - src_dev_addr, 2266 - d40d->lli_phy.src, 2267 - virt_to_phys(d40d->lli_phy.src), 2268 - d40c->src_def_cfg, 2269 - d40c->dma_cfg.src_info.data_width, 2270 - d40c->dma_cfg.dst_info.data_width, 2271 - d40c->dma_cfg.src_info.psize); 2272 - if (res < 0) 2273 - return res; 2274 - 2275 - res = d40_phy_sg_to_lli(sgl, 2276 - sgl_len, 2277 - dst_dev_addr, 2278 - d40d->lli_phy.dst, 2279 - virt_to_phys(d40d->lli_phy.dst), 2280 - d40c->dst_def_cfg, 2281 - d40c->dma_cfg.dst_info.data_width, 2282 - d40c->dma_cfg.src_info.data_width, 2283 - d40c->dma_cfg.dst_info.psize); 2284 - if (res < 0) 2285 - return res; 2286 - 2287 - (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src, 2288 - d40d->lli_pool.size, DMA_TO_DEVICE); 2289 - return 0; 2290 } 2291 2292 static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, ··· 2075 enum dma_data_direction direction, 2076 unsigned long dma_flags) 2077 { 2078 - struct d40_desc *d40d; 2079 - struct d40_chan *d40c = container_of(chan, struct d40_chan, 2080 - chan); 2081 - unsigned long flags; 2082 - int err; 2083 2084 - if (d40c->phy_chan == NULL) { 2085 - dev_err(&d40c->chan.dev->device, 2086 - "[%s] Cannot prepare unallocated channel\n", __func__); 2087 - return ERR_PTR(-EINVAL); 2088 } 2089 2090 - spin_lock_irqsave(&d40c->lock, flags); 2091 - d40d = d40_desc_get(d40c); 2092 2093 - if (d40d == NULL) 2094 - goto err; 2095 2096 - if (d40c->log_num != D40_PHY_CHAN) 2097 - err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len, 2098 - direction, dma_flags); 2099 - else 2100 - err = d40_prep_slave_sg_phy(d40d, d40c, sgl, sg_len, 2101 - direction, dma_flags); 2102 - if (err) { 2103 - dev_err(&d40c->chan.dev->device, 2104 - "[%s] Failed to prepare %s slave sg job: %d\n", 2105 - __func__, 2106 - d40c->log_num != D40_PHY_CHAN ? "log" : "phy", err); 2107 - goto err; 2108 - } 2109 2110 - d40d->txd.flags = dma_flags; 2111 - 2112 - dma_async_tx_descriptor_init(&d40d->txd, chan); 2113 - 2114 - d40d->txd.tx_submit = d40_tx_submit; 2115 - 2116 - spin_unlock_irqrestore(&d40c->lock, flags); 2117 - return &d40d->txd; 2118 - 2119 - err: 2120 - if (d40d) 2121 - d40_desc_free(d40c, d40d); 2122 - spin_unlock_irqrestore(&d40c->lock, flags); 2123 - return NULL; 2124 } 2125 2126 static enum dma_status d40_tx_status(struct dma_chan *chan, ··· 2121 int ret; 2122 2123 if (d40c->phy_chan == NULL) { 2124 - dev_err(&d40c->chan.dev->device, 2125 - "[%s] Cannot read status of unallocated channel\n", 2126 - __func__); 2127 return -EINVAL; 2128 } 2129 ··· 2145 unsigned long flags; 2146 2147 if (d40c->phy_chan == NULL) { 2148 - dev_err(&d40c->chan.dev->device, 2149 - "[%s] Channel is not allocated!\n", __func__); 2150 return; 2151 } 2152 ··· 2238 return; 2239 } 2240 2241 - if (d40c->log_num != D40_PHY_CHAN) { 2242 if (config_maxburst >= 16) 2243 psize = STEDMA40_PSIZE_LOG_16; 2244 else if (config_maxburst >= 8) ··· 2271 cfg->dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL; 2272 2273 /* Fill in register values */ 2274 - if (d40c->log_num != D40_PHY_CHAN) 2275 d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); 2276 else 2277 d40_phy_cfg(cfg, &d40c->src_def_cfg, ··· 2292 static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 2293 unsigned long arg) 2294 { 2295 - unsigned long flags; 2296 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); 2297 2298 if (d40c->phy_chan == NULL) { 2299 - dev_err(&d40c->chan.dev->device, 2300 - "[%s] Channel is not allocated!\n", __func__); 2301 return -EINVAL; 2302 } 2303 2304 switch (cmd) { 2305 case DMA_TERMINATE_ALL: 2306 - spin_lock_irqsave(&d40c->lock, flags); 2307 - d40_term_all(d40c); 2308 - spin_unlock_irqrestore(&d40c->lock, flags); 2309 - return 0; 2310 case DMA_PAUSE: 2311 - return d40_pause(chan); 2312 case DMA_RESUME: 2313 - return d40_resume(chan); 2314 case DMA_SLAVE_CONFIG: 2315 d40_set_runtime_config(chan, 2316 (struct dma_slave_config *) arg); ··· 2350 } 2351 } 2352 2353 static int __init d40_dmaengine_init(struct d40_base *base, 2354 int num_reserved_chans) 2355 { ··· 2389 2390 dma_cap_zero(base->dma_slave.cap_mask); 2391 dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask); 2392 2393 - base->dma_slave.device_alloc_chan_resources = d40_alloc_chan_resources; 2394 - base->dma_slave.device_free_chan_resources = d40_free_chan_resources; 2395 - base->dma_slave.device_prep_dma_memcpy = d40_prep_memcpy; 2396 - base->dma_slave.device_prep_dma_sg = d40_prep_sg; 2397 - base->dma_slave.device_prep_slave_sg = d40_prep_slave_sg; 2398 - base->dma_slave.device_tx_status = d40_tx_status; 2399 - base->dma_slave.device_issue_pending = d40_issue_pending; 2400 - base->dma_slave.device_control = d40_control; 2401 - base->dma_slave.dev = base->dev; 2402 2403 err = dma_async_device_register(&base->dma_slave); 2404 2405 if (err) { 2406 - dev_err(base->dev, 2407 - "[%s] Failed to register slave channels\n", 2408 - __func__); 2409 goto failure1; 2410 } 2411 ··· 2405 2406 dma_cap_zero(base->dma_memcpy.cap_mask); 2407 dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask); 2408 - dma_cap_set(DMA_SG, base->dma_slave.cap_mask); 2409 2410 - base->dma_memcpy.device_alloc_chan_resources = d40_alloc_chan_resources; 2411 - base->dma_memcpy.device_free_chan_resources = d40_free_chan_resources; 2412 - base->dma_memcpy.device_prep_dma_memcpy = d40_prep_memcpy; 2413 - base->dma_slave.device_prep_dma_sg = d40_prep_sg; 2414 - base->dma_memcpy.device_prep_slave_sg = d40_prep_slave_sg; 2415 - base->dma_memcpy.device_tx_status = d40_tx_status; 2416 - base->dma_memcpy.device_issue_pending = d40_issue_pending; 2417 - base->dma_memcpy.device_control = d40_control; 2418 - base->dma_memcpy.dev = base->dev; 2419 - /* 2420 - * This controller can only access address at even 2421 - * 32bit boundaries, i.e. 2^2 2422 - */ 2423 - base->dma_memcpy.copy_align = 2; 2424 2425 err = dma_async_device_register(&base->dma_memcpy); 2426 2427 if (err) { 2428 - dev_err(base->dev, 2429 - "[%s] Failed to regsiter memcpy only channels\n", 2430 - __func__); 2431 goto failure2; 2432 } 2433 ··· 2423 dma_cap_zero(base->dma_both.cap_mask); 2424 dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask); 2425 dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask); 2426 - dma_cap_set(DMA_SG, base->dma_slave.cap_mask); 2427 2428 - base->dma_both.device_alloc_chan_resources = d40_alloc_chan_resources; 2429 - base->dma_both.device_free_chan_resources = d40_free_chan_resources; 2430 - base->dma_both.device_prep_dma_memcpy = d40_prep_memcpy; 2431 - base->dma_slave.device_prep_dma_sg = d40_prep_sg; 2432 - base->dma_both.device_prep_slave_sg = d40_prep_slave_sg; 2433 - base->dma_both.device_tx_status = d40_tx_status; 2434 - base->dma_both.device_issue_pending = d40_issue_pending; 2435 - base->dma_both.device_control = d40_control; 2436 - base->dma_both.dev = base->dev; 2437 - base->dma_both.copy_align = 2; 2438 err = dma_async_device_register(&base->dma_both); 2439 2440 if (err) { 2441 - dev_err(base->dev, 2442 - "[%s] Failed to register logical and physical capable channels\n", 2443 - __func__); 2444 goto failure3; 2445 } 2446 return 0; ··· 2507 { .reg = D40_DREG_PERIPHID1, .val = 0x0000}, 2508 /* 2509 * D40_DREG_PERIPHID2 Depends on HW revision: 2510 - * MOP500/HREF ED has 0x0008, 2511 * ? has 0x0018, 2512 - * HREF V1 has 0x0028 2513 */ 2514 { .reg = D40_DREG_PERIPHID3, .val = 0x0000}, 2515 ··· 2534 clk = clk_get(&pdev->dev, NULL); 2535 2536 if (IS_ERR(clk)) { 2537 - dev_err(&pdev->dev, "[%s] No matching clock found\n", 2538 - __func__); 2539 goto failure; 2540 } 2541 ··· 2557 for (i = 0; i < ARRAY_SIZE(dma_id_regs); i++) { 2558 if (dma_id_regs[i].val != 2559 readl(virtbase + dma_id_regs[i].reg)) { 2560 - dev_err(&pdev->dev, 2561 - "[%s] Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n", 2562 - __func__, 2563 dma_id_regs[i].val, 2564 dma_id_regs[i].reg, 2565 readl(virtbase + dma_id_regs[i].reg)); ··· 2571 2572 if ((val & D40_DREG_PERIPHID2_DESIGNER_MASK) != 2573 D40_HW_DESIGNER) { 2574 - dev_err(&pdev->dev, 2575 - "[%s] Unknown designer! Got %x wanted %x\n", 2576 - __func__, val & D40_DREG_PERIPHID2_DESIGNER_MASK, 2577 D40_HW_DESIGNER); 2578 goto failure; 2579 } ··· 2602 sizeof(struct d40_chan), GFP_KERNEL); 2603 2604 if (base == NULL) { 2605 - dev_err(&pdev->dev, "[%s] Out of memory\n", __func__); 2606 goto failure; 2607 } 2608 ··· 2749 2750 static int __init d40_lcla_allocate(struct d40_base *base) 2751 { 2752 unsigned long *page_list; 2753 int i, j; 2754 int ret = 0; ··· 2775 base->lcla_pool.pages); 2776 if (!page_list[i]) { 2777 2778 - dev_err(base->dev, 2779 - "[%s] Failed to allocate %d pages.\n", 2780 - __func__, base->lcla_pool.pages); 2781 2782 for (j = 0; j < i; j++) 2783 free_pages(page_list[j], base->lcla_pool.pages); ··· 2814 LCLA_ALIGNMENT); 2815 } 2816 2817 writel(virt_to_phys(base->lcla_pool.base), 2818 base->virtbase + D40_DREG_LCLA); 2819 failure: ··· 2855 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa"); 2856 if (!res) { 2857 ret = -ENOENT; 2858 - dev_err(&pdev->dev, 2859 - "[%s] No \"lcpa\" memory resource\n", 2860 - __func__); 2861 goto failure; 2862 } 2863 base->lcpa_size = resource_size(res); ··· 2864 if (request_mem_region(res->start, resource_size(res), 2865 D40_NAME " I/O lcpa") == NULL) { 2866 ret = -EBUSY; 2867 - dev_err(&pdev->dev, 2868 - "[%s] Failed to request LCPA region 0x%x-0x%x\n", 2869 - __func__, res->start, res->end); 2870 goto failure; 2871 } 2872 ··· 2882 base->lcpa_base = ioremap(res->start, resource_size(res)); 2883 if (!base->lcpa_base) { 2884 ret = -ENOMEM; 2885 - dev_err(&pdev->dev, 2886 - "[%s] Failed to ioremap LCPA region\n", 2887 - __func__); 2888 goto failure; 2889 } 2890 2891 ret = d40_lcla_allocate(base); 2892 if (ret) { 2893 - dev_err(&pdev->dev, "[%s] Failed to allocate LCLA area\n", 2894 - __func__); 2895 goto failure; 2896 } 2897 ··· 2897 base->irq = platform_get_irq(pdev, 0); 2898 2899 ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base); 2900 - 2901 if (ret) { 2902 - dev_err(&pdev->dev, "[%s] No IRQ defined\n", __func__); 2903 goto failure; 2904 } 2905 ··· 2917 kmem_cache_destroy(base->desc_slab); 2918 if (base->virtbase) 2919 iounmap(base->virtbase); 2920 if (!base->lcla_pool.base_unaligned && base->lcla_pool.base) 2921 free_pages((unsigned long)base->lcla_pool.base, 2922 base->lcla_pool.pages); ··· 2947 kfree(base); 2948 } 2949 2950 - dev_err(&pdev->dev, "[%s] probe failed\n", __func__); 2951 return ret; 2952 } 2953 ··· 2958 }, 2959 }; 2960 2961 - int __init stedma40_init(void) 2962 { 2963 return platform_driver_probe(&d40_driver, d40_probe); 2964 }
··· 68 * @base: Pointer to memory area when the pre_alloc_lli's are not large 69 * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if 70 * pre_alloc_lli is used. 71 + * @dma_addr: DMA address, if mapped 72 * @size: The size in bytes of the memory at base or the size of pre_alloc_lli. 73 * @pre_alloc_lli: Pre allocated area for the most common case of transfers, 74 * one buffer to one buffer. ··· 75 struct d40_lli_pool { 76 void *base; 77 int size; 78 + dma_addr_t dma_addr; 79 /* Space for dst and src, plus an extra for padding */ 80 u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)]; 81 }; ··· 94 * during a transfer. 95 * @node: List entry. 96 * @is_in_client_list: true if the client owns this descriptor. 97 * the previous one. 98 * 99 * This descriptor is used for both logical and physical transfers. ··· 114 struct list_head node; 115 116 bool is_in_client_list; 117 + bool cyclic; 118 }; 119 120 /** ··· 130 */ 131 struct d40_lcla_pool { 132 void *base; 133 + dma_addr_t dma_addr; 134 void *base_unaligned; 135 int pages; 136 spinlock_t lock; ··· 303 unsigned int val; 304 }; 305 306 + static struct device *chan2dev(struct d40_chan *d40c) 307 { 308 + return &d40c->chan.dev->device; 309 + } 310 + 311 + static bool chan_is_physical(struct d40_chan *chan) 312 + { 313 + return chan->log_num == D40_PHY_CHAN; 314 + } 315 + 316 + static bool chan_is_logical(struct d40_chan *chan) 317 + { 318 + return !chan_is_physical(chan); 319 + } 320 + 321 + static void __iomem *chan_base(struct d40_chan *chan) 322 + { 323 + return chan->base->virtbase + D40_DREG_PCBASE + 324 + chan->phy_chan->num * D40_DREG_PCDELTA; 325 + } 326 + 327 + #define d40_err(dev, format, arg...) \ 328 + dev_err(dev, "[%s] " format, __func__, ## arg) 329 + 330 + #define chan_err(d40c, format, arg...) \ 331 + d40_err(chan2dev(d40c), format, ## arg) 332 + 333 + static int d40_pool_lli_alloc(struct d40_chan *d40c, struct d40_desc *d40d, 334 + int lli_len) 335 + { 336 + bool is_log = chan_is_logical(d40c); 337 u32 align; 338 void *base; 339 ··· 319 d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli); 320 d40d->lli_pool.base = NULL; 321 } else { 322 + d40d->lli_pool.size = lli_len * 2 * align; 323 324 base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT); 325 d40d->lli_pool.base = base; ··· 329 } 330 331 if (is_log) { 332 + d40d->lli_log.src = PTR_ALIGN(base, align); 333 + d40d->lli_log.dst = d40d->lli_log.src + lli_len; 334 + 335 + d40d->lli_pool.dma_addr = 0; 336 } else { 337 + d40d->lli_phy.src = PTR_ALIGN(base, align); 338 + d40d->lli_phy.dst = d40d->lli_phy.src + lli_len; 339 + 340 + d40d->lli_pool.dma_addr = dma_map_single(d40c->base->dev, 341 + d40d->lli_phy.src, 342 + d40d->lli_pool.size, 343 + DMA_TO_DEVICE); 344 + 345 + if (dma_mapping_error(d40c->base->dev, 346 + d40d->lli_pool.dma_addr)) { 347 + kfree(d40d->lli_pool.base); 348 + d40d->lli_pool.base = NULL; 349 + d40d->lli_pool.dma_addr = 0; 350 + return -ENOMEM; 351 + } 352 } 353 354 return 0; 355 } 356 357 + static void d40_pool_lli_free(struct d40_chan *d40c, struct d40_desc *d40d) 358 { 359 + if (d40d->lli_pool.dma_addr) 360 + dma_unmap_single(d40c->base->dev, d40d->lli_pool.dma_addr, 361 + d40d->lli_pool.size, DMA_TO_DEVICE); 362 + 363 kfree(d40d->lli_pool.base); 364 d40d->lli_pool.base = NULL; 365 d40d->lli_pool.size = 0; ··· 391 int i; 392 int ret = -EINVAL; 393 394 + if (chan_is_physical(d40c)) 395 return 0; 396 397 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags); ··· 430 431 list_for_each_entry_safe(d, _d, &d40c->client, node) 432 if (async_tx_test_ack(&d->txd)) { 433 + d40_pool_lli_free(d40c, d); 434 d40_desc_remove(d); 435 desc = d; 436 memset(desc, 0, sizeof(*desc)); ··· 450 static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d) 451 { 452 453 + d40_pool_lli_free(d40c, d40d); 454 d40_lcla_free_all(d40c, d40d); 455 kmem_cache_free(d40c->base->desc_slab, d40d); 456 } ··· 459 list_add_tail(&desc->node, &d40c->active); 460 } 461 462 + static void d40_phy_lli_load(struct d40_chan *chan, struct d40_desc *desc) 463 { 464 + struct d40_phy_lli *lli_dst = desc->lli_phy.dst; 465 + struct d40_phy_lli *lli_src = desc->lli_phy.src; 466 + void __iomem *base = chan_base(chan); 467 468 + writel(lli_src->reg_cfg, base + D40_CHAN_REG_SSCFG); 469 + writel(lli_src->reg_elt, base + D40_CHAN_REG_SSELT); 470 + writel(lli_src->reg_ptr, base + D40_CHAN_REG_SSPTR); 471 + writel(lli_src->reg_lnk, base + D40_CHAN_REG_SSLNK); 472 473 + writel(lli_dst->reg_cfg, base + D40_CHAN_REG_SDCFG); 474 + writel(lli_dst->reg_elt, base + D40_CHAN_REG_SDELT); 475 + writel(lli_dst->reg_ptr, base + D40_CHAN_REG_SDPTR); 476 + writel(lli_dst->reg_lnk, base + D40_CHAN_REG_SDLNK); 477 + } 478 479 + static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc) 480 + { 481 + struct d40_lcla_pool *pool = &chan->base->lcla_pool; 482 + struct d40_log_lli_bidir *lli = &desc->lli_log; 483 + int lli_current = desc->lli_current; 484 + int lli_len = desc->lli_len; 485 + bool cyclic = desc->cyclic; 486 + int curr_lcla = -EINVAL; 487 + int first_lcla = 0; 488 + bool linkback; 489 490 + /* 491 + * We may have partially running cyclic transfers, in case we did't get 492 + * enough LCLA entries. 493 + */ 494 + linkback = cyclic && lli_current == 0; 495 496 + /* 497 + * For linkback, we need one LCLA even with only one link, because we 498 + * can't link back to the one in LCPA space 499 + */ 500 + if (linkback || (lli_len - lli_current > 1)) { 501 + curr_lcla = d40_lcla_alloc_one(chan, desc); 502 + first_lcla = curr_lcla; 503 + } 504 505 + /* 506 + * For linkback, we normally load the LCPA in the loop since we need to 507 + * link it to the second LCLA and not the first. However, if we 508 + * couldn't even get a first LCLA, then we have to run in LCPA and 509 + * reload manually. 510 + */ 511 + if (!linkback || curr_lcla == -EINVAL) { 512 + unsigned int flags = 0; 513 514 + if (curr_lcla == -EINVAL) 515 + flags |= LLI_TERM_INT; 516 517 + d40_log_lli_lcpa_write(chan->lcpa, 518 + &lli->dst[lli_current], 519 + &lli->src[lli_current], 520 + curr_lcla, 521 + flags); 522 + lli_current++; 523 + } 524 525 + if (curr_lcla < 0) 526 + goto out; 527 528 + for (; lli_current < lli_len; lli_current++) { 529 + unsigned int lcla_offset = chan->phy_chan->num * 1024 + 530 + 8 * curr_lcla * 2; 531 + struct d40_log_lli *lcla = pool->base + lcla_offset; 532 + unsigned int flags = 0; 533 + int next_lcla; 534 535 + if (lli_current + 1 < lli_len) 536 + next_lcla = d40_lcla_alloc_one(chan, desc); 537 + else 538 + next_lcla = linkback ? first_lcla : -EINVAL; 539 + 540 + if (cyclic || next_lcla == -EINVAL) 541 + flags |= LLI_TERM_INT; 542 + 543 + if (linkback && curr_lcla == first_lcla) { 544 + /* First link goes in both LCPA and LCLA */ 545 + d40_log_lli_lcpa_write(chan->lcpa, 546 + &lli->dst[lli_current], 547 + &lli->src[lli_current], 548 + next_lcla, flags); 549 + } 550 + 551 + /* 552 + * One unused LCLA in the cyclic case if the very first 553 + * next_lcla fails... 554 + */ 555 + d40_log_lli_lcla_write(lcla, 556 + &lli->dst[lli_current], 557 + &lli->src[lli_current], 558 + next_lcla, flags); 559 + 560 + dma_sync_single_range_for_device(chan->base->dev, 561 + pool->dma_addr, lcla_offset, 562 + 2 * sizeof(struct d40_log_lli), 563 + DMA_TO_DEVICE); 564 + 565 + curr_lcla = next_lcla; 566 + 567 + if (curr_lcla == -EINVAL || curr_lcla == first_lcla) { 568 + lli_current++; 569 + break; 570 } 571 } 572 + 573 + out: 574 + desc->lli_current = lli_current; 575 + } 576 + 577 + static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d) 578 + { 579 + if (chan_is_physical(d40c)) { 580 + d40_phy_lli_load(d40c, d40d); 581 + d40d->lli_current = d40d->lli_len; 582 + } else 583 + d40_log_lli_to_lcxa(d40c, d40d); 584 } 585 586 static struct d40_desc *d40_first_active_get(struct d40_chan *d40c) ··· 540 d = list_first_entry(&d40c->queue, 541 struct d40_desc, 542 node); 543 return d; 544 } 545 ··· 666 } 667 668 if (i == D40_SUSPEND_MAX_IT) { 669 + chan_err(d40c, 670 + "unable to suspend the chl %d (log: %d) status %x\n", 671 + d40c->phy_chan->num, d40c->log_num, 672 status); 673 dump_stack(); 674 ret = -EBUSY; ··· 701 d40c->busy = false; 702 } 703 704 + static void __d40_config_set_event(struct d40_chan *d40c, bool enable, 705 + u32 event, int reg) 706 + { 707 + void __iomem *addr = chan_base(d40c) + reg; 708 + int tries; 709 + 710 + if (!enable) { 711 + writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event)) 712 + | ~D40_EVENTLINE_MASK(event), addr); 713 + return; 714 + } 715 + 716 + /* 717 + * The hardware sometimes doesn't register the enable when src and dst 718 + * event lines are active on the same logical channel. Retry to ensure 719 + * it does. Usually only one retry is sufficient. 720 + */ 721 + tries = 100; 722 + while (--tries) { 723 + writel((D40_ACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event)) 724 + | ~D40_EVENTLINE_MASK(event), addr); 725 + 726 + if (readl(addr) & D40_EVENTLINE_MASK(event)) 727 + break; 728 + } 729 + 730 + if (tries != 99) 731 + dev_dbg(chan2dev(d40c), 732 + "[%s] workaround enable S%cLNK (%d tries)\n", 733 + __func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D', 734 + 100 - tries); 735 + 736 + WARN_ON(!tries); 737 + } 738 + 739 static void d40_config_set_event(struct d40_chan *d40c, bool do_enable) 740 { 741 unsigned long flags; 742 743 spin_lock_irqsave(&d40c->phy_chan->lock, flags); 744 ··· 719 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) { 720 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); 721 722 + __d40_config_set_event(d40c, do_enable, event, 723 + D40_CHAN_REG_SSLNK); 724 } 725 + 726 if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) { 727 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); 728 729 + __d40_config_set_event(d40c, do_enable, event, 730 + D40_CHAN_REG_SDLNK); 731 } 732 733 spin_unlock_irqrestore(&d40c->phy_chan->lock, flags); ··· 740 741 static u32 d40_chan_has_events(struct d40_chan *d40c) 742 { 743 + void __iomem *chanbase = chan_base(d40c); 744 u32 val; 745 746 + val = readl(chanbase + D40_CHAN_REG_SSLNK); 747 + val |= readl(chanbase + D40_CHAN_REG_SDLNK); 748 749 return val; 750 } 751 ··· 771 = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG, 772 }; 773 774 + if (chan_is_physical(d40c)) 775 return phy_map[d40c->dma_cfg.mode_opt]; 776 else 777 return log_map[d40c->dma_cfg.mode_opt]; ··· 785 /* Odd addresses are even addresses + 4 */ 786 addr_base = (d40c->phy_chan->num % 2) * 4; 787 /* Setup channel mode to logical or physical */ 788 + var = ((u32)(chan_is_logical(d40c)) + 1) << 789 D40_CHAN_POS(d40c->phy_chan->num); 790 writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base); 791 ··· 794 795 writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base); 796 797 + if (chan_is_logical(d40c)) { 798 + int lidx = (d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) 799 + & D40_SREG_ELEM_LOG_LIDX_MASK; 800 + void __iomem *chanbase = chan_base(d40c); 801 + 802 /* Set default config for CFG reg */ 803 + writel(d40c->src_def_cfg, chanbase + D40_CHAN_REG_SSCFG); 804 + writel(d40c->dst_def_cfg, chanbase + D40_CHAN_REG_SDCFG); 805 806 /* Set LIDX for lcla */ 807 + writel(lidx, chanbase + D40_CHAN_REG_SSELT); 808 + writel(lidx, chanbase + D40_CHAN_REG_SDELT); 809 } 810 } 811 ··· 825 { 826 u32 num_elt; 827 828 + if (chan_is_logical(d40c)) 829 num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK) 830 >> D40_MEM_LCSP2_ECNT_POS; 831 + else { 832 + u32 val = readl(chan_base(d40c) + D40_CHAN_REG_SDELT); 833 + num_elt = (val & D40_SREG_ELEM_PHY_ECNT_MASK) 834 + >> D40_SREG_ELEM_PHY_ECNT_POS; 835 + } 836 + 837 return num_elt * (1 << d40c->dma_cfg.dst_info.data_width); 838 } 839 ··· 841 { 842 bool is_link; 843 844 + if (chan_is_logical(d40c)) 845 is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK; 846 else 847 + is_link = readl(chan_base(d40c) + D40_CHAN_REG_SDLNK) 848 + & D40_SREG_LNK_PHYS_LNK_MASK; 849 + 850 return is_link; 851 } 852 853 + static int d40_pause(struct d40_chan *d40c) 854 { 855 int res = 0; 856 unsigned long flags; 857 ··· 865 866 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); 867 if (res == 0) { 868 + if (chan_is_logical(d40c)) { 869 d40_config_set_event(d40c, false); 870 /* Resume the other logical channels if any */ 871 if (d40_chan_has_events(d40c)) ··· 878 return res; 879 } 880 881 + static int d40_resume(struct d40_chan *d40c) 882 { 883 int res = 0; 884 unsigned long flags; 885 ··· 891 spin_lock_irqsave(&d40c->lock, flags); 892 893 if (d40c->base->rev == 0) 894 + if (chan_is_logical(d40c)) { 895 res = d40_channel_execute_command(d40c, 896 D40_DMA_SUSPEND_REQ); 897 goto no_suspend; ··· 900 /* If bytes left to transfer or linked tx resume job */ 901 if (d40_residue(d40c) || d40_tx_is_linked(d40c)) { 902 903 + if (chan_is_logical(d40c)) 904 d40_config_set_event(d40c, true); 905 906 res = d40_channel_execute_command(d40c, D40_DMA_RUN); ··· 911 return res; 912 } 913 914 + static int d40_terminate_all(struct d40_chan *chan) 915 { 916 + unsigned long flags; 917 + int ret = 0; 918 919 + ret = d40_pause(chan); 920 + if (!ret && chan_is_physical(chan)) 921 + ret = d40_channel_execute_command(chan, D40_DMA_STOP); 922 923 + spin_lock_irqsave(&chan->lock, flags); 924 + d40_term_all(chan); 925 + spin_unlock_irqrestore(&chan->lock, flags); 926 927 + return ret; 928 } 929 930 static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) ··· 990 struct d40_desc *d40d = container_of(tx, struct d40_desc, txd); 991 unsigned long flags; 992 993 spin_lock_irqsave(&d40c->lock, flags); 994 995 d40c->chan.cookie++; ··· 1001 1002 d40d->txd.cookie = d40c->chan.cookie; 1003 1004 d40_desc_queue(d40c, d40d); 1005 1006 spin_unlock_irqrestore(&d40c->lock, flags); 1007 1008 return tx->cookie; 1009 } ··· 1020 if (d40c->base->rev == 0) { 1021 int err; 1022 1023 + if (chan_is_logical(d40c)) { 1024 err = d40_channel_execute_command(d40c, 1025 D40_DMA_SUSPEND_REQ); 1026 if (err) ··· 1028 } 1029 } 1030 1031 + if (chan_is_logical(d40c)) 1032 d40_config_set_event(d40c, true); 1033 1034 return d40_channel_execute_command(d40c, D40_DMA_RUN); ··· 1051 /* Add to active queue */ 1052 d40_desc_submit(d40c, d40d); 1053 1054 + /* Initiate DMA job */ 1055 + d40_desc_load(d40c, d40d); 1056 1057 + /* Start dma job */ 1058 + err = d40_start(d40c); 1059 1060 + if (err) 1061 + return NULL; 1062 } 1063 1064 return d40d; ··· 1082 if (d40d == NULL) 1083 return; 1084 1085 + if (d40d->cyclic) { 1086 + /* 1087 + * If this was a paritially loaded list, we need to reloaded 1088 + * it, and only when the list is completed. We need to check 1089 + * for done because the interrupt will hit for every link, and 1090 + * not just the last one. 1091 + */ 1092 + if (d40d->lli_current < d40d->lli_len 1093 + && !d40_tx_is_linked(d40c) 1094 + && !d40_residue(d40c)) { 1095 + d40_lcla_free_all(d40c, d40d); 1096 + d40_desc_load(d40c, d40d); 1097 + (void) d40_start(d40c); 1098 1099 + if (d40d->lli_current == d40d->lli_len) 1100 + d40d->lli_current = 0; 1101 + } 1102 + } else { 1103 + d40_lcla_free_all(d40c, d40d); 1104 + 1105 + if (d40d->lli_current < d40d->lli_len) { 1106 + d40_desc_load(d40c, d40d); 1107 + /* Start dma job */ 1108 + (void) d40_start(d40c); 1109 + return; 1110 + } 1111 + 1112 + if (d40_queue_start(d40c) == NULL) 1113 + d40c->busy = false; 1114 } 1115 1116 d40c->pending_tx++; 1117 tasklet_schedule(&d40c->tasklet); ··· 1111 1112 /* Get first active entry from list */ 1113 d40d = d40_first_active_get(d40c); 1114 if (d40d == NULL) 1115 goto err; 1116 1117 + if (!d40d->cyclic) 1118 + d40c->completed = d40d->txd.cookie; 1119 1120 /* 1121 * If terminating a channel pending_tx is set to zero. ··· 1130 callback = d40d->txd.callback; 1131 callback_param = d40d->txd.callback_param; 1132 1133 + if (!d40d->cyclic) { 1134 + if (async_tx_test_ack(&d40d->txd)) { 1135 + d40_pool_lli_free(d40c, d40d); 1136 d40_desc_remove(d40d); 1137 + d40_desc_free(d40c, d40d); 1138 + } else { 1139 + if (!d40d->is_in_client_list) { 1140 + d40_desc_remove(d40d); 1141 + d40_lcla_free_all(d40c, d40d); 1142 + list_add_tail(&d40d->node, &d40c->client); 1143 + d40d->is_in_client_list = true; 1144 + } 1145 } 1146 } 1147 ··· 1216 if (!il[row].is_error) 1217 dma_tc_handle(d40c); 1218 else 1219 + d40_err(base->dev, "IRQ chan: %ld offset %d idx %d\n", 1220 + chan, il[row].offset, idx); 1221 1222 spin_unlock(&d40c->lock); 1223 } ··· 1237 bool is_log = conf->mode == STEDMA40_MODE_LOGICAL; 1238 1239 if (!conf->dir) { 1240 + chan_err(d40c, "Invalid direction.\n"); 1241 res = -EINVAL; 1242 } 1243 ··· 1246 d40c->base->plat_data->dev_tx[conf->dst_dev_type] == 0 && 1247 d40c->runtime_addr == 0) { 1248 1249 + chan_err(d40c, "Invalid TX channel address (%d)\n", 1250 + conf->dst_dev_type); 1251 res = -EINVAL; 1252 } 1253 1254 if (conf->src_dev_type != STEDMA40_DEV_SRC_MEMORY && 1255 d40c->base->plat_data->dev_rx[conf->src_dev_type] == 0 && 1256 d40c->runtime_addr == 0) { 1257 + chan_err(d40c, "Invalid RX channel address (%d)\n", 1258 + conf->src_dev_type); 1259 res = -EINVAL; 1260 } 1261 1262 if (conf->dir == STEDMA40_MEM_TO_PERIPH && 1263 dst_event_group == STEDMA40_DEV_DST_MEMORY) { 1264 + chan_err(d40c, "Invalid dst\n"); 1265 res = -EINVAL; 1266 } 1267 1268 if (conf->dir == STEDMA40_PERIPH_TO_MEM && 1269 src_event_group == STEDMA40_DEV_SRC_MEMORY) { 1270 + chan_err(d40c, "Invalid src\n"); 1271 res = -EINVAL; 1272 } 1273 1274 if (src_event_group == STEDMA40_DEV_SRC_MEMORY && 1275 dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) { 1276 + chan_err(d40c, "No event line\n"); 1277 res = -EINVAL; 1278 } 1279 1280 if (conf->dir == STEDMA40_PERIPH_TO_PERIPH && 1281 (src_event_group != dst_event_group)) { 1282 + chan_err(d40c, "Invalid event group\n"); 1283 res = -EINVAL; 1284 } 1285 ··· 1294 * DMAC HW supports it. Will be added to this driver, 1295 * in case any dma client requires it. 1296 */ 1297 + chan_err(d40c, "periph to periph not supported\n"); 1298 res = -EINVAL; 1299 } 1300 ··· 1309 * src (burst x width) == dst (burst x width) 1310 */ 1311 1312 + chan_err(d40c, "src (burst x width) != dst (burst x width)\n"); 1313 res = -EINVAL; 1314 } 1315 ··· 1514 dma_has_cap(DMA_SLAVE, cap)) { 1515 d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy; 1516 } else { 1517 + chan_err(d40c, "No memcpy\n"); 1518 return -EINVAL; 1519 } 1520 ··· 1540 /* Release client owned descriptors */ 1541 if (!list_empty(&d40c->client)) 1542 list_for_each_entry_safe(d, _d, &d40c->client, node) { 1543 + d40_pool_lli_free(d40c, d); 1544 d40_desc_remove(d); 1545 d40_desc_free(d40c, d); 1546 } 1547 1548 if (phy == NULL) { 1549 + chan_err(d40c, "phy == null\n"); 1550 return -EINVAL; 1551 } 1552 1553 if (phy->allocated_src == D40_ALLOC_FREE && 1554 phy->allocated_dst == D40_ALLOC_FREE) { 1555 + chan_err(d40c, "channel already free\n"); 1556 return -EINVAL; 1557 } 1558 ··· 1566 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); 1567 is_src = true; 1568 } else { 1569 + chan_err(d40c, "Unknown direction\n"); 1570 return -EINVAL; 1571 } 1572 1573 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); 1574 if (res) { 1575 + chan_err(d40c, "suspend failed\n"); 1576 return res; 1577 } 1578 1579 + if (chan_is_logical(d40c)) { 1580 /* Release logical channel, deactivate the event line */ 1581 1582 d40_config_set_event(d40c, false); ··· 1594 res = d40_channel_execute_command(d40c, 1595 D40_DMA_RUN); 1596 if (res) { 1597 + chan_err(d40c, 1598 + "Executing RUN command\n"); 1599 return res; 1600 } 1601 } ··· 1609 /* Release physical channel */ 1610 res = d40_channel_execute_command(d40c, D40_DMA_STOP); 1611 if (res) { 1612 + chan_err(d40c, "Failed to stop channel\n"); 1613 return res; 1614 } 1615 d40c->phy_chan = NULL; ··· 1622 1623 static bool d40_is_paused(struct d40_chan *d40c) 1624 { 1625 + void __iomem *chanbase = chan_base(d40c); 1626 bool is_paused = false; 1627 unsigned long flags; 1628 void __iomem *active_reg; ··· 1630 1631 spin_lock_irqsave(&d40c->lock, flags); 1632 1633 + if (chan_is_physical(d40c)) { 1634 if (d40c->phy_chan->num % 2 == 0) 1635 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE; 1636 else ··· 1648 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || 1649 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { 1650 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); 1651 + status = readl(chanbase + D40_CHAN_REG_SDLNK); 1652 } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) { 1653 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); 1654 + status = readl(chanbase + D40_CHAN_REG_SSLNK); 1655 } else { 1656 + chan_err(d40c, "Unknown direction\n"); 1657 goto _exit; 1658 } 1659 ··· 1688 return bytes_left; 1689 } 1690 1691 + static int 1692 + d40_prep_sg_log(struct d40_chan *chan, struct d40_desc *desc, 1693 + struct scatterlist *sg_src, struct scatterlist *sg_dst, 1694 + unsigned int sg_len, dma_addr_t src_dev_addr, 1695 + dma_addr_t dst_dev_addr) 1696 { 1697 + struct stedma40_chan_cfg *cfg = &chan->dma_cfg; 1698 + struct stedma40_half_channel_info *src_info = &cfg->src_info; 1699 + struct stedma40_half_channel_info *dst_info = &cfg->dst_info; 1700 + int ret; 1701 1702 + ret = d40_log_sg_to_lli(sg_src, sg_len, 1703 + src_dev_addr, 1704 + desc->lli_log.src, 1705 + chan->log_def.lcsp1, 1706 + src_info->data_width, 1707 + dst_info->data_width); 1708 1709 + ret = d40_log_sg_to_lli(sg_dst, sg_len, 1710 + dst_dev_addr, 1711 + desc->lli_log.dst, 1712 + chan->log_def.lcsp3, 1713 + dst_info->data_width, 1714 + src_info->data_width); 1715 1716 + return ret < 0 ? ret : 0; 1717 + } 1718 + 1719 + static int 1720 + d40_prep_sg_phy(struct d40_chan *chan, struct d40_desc *desc, 1721 + struct scatterlist *sg_src, struct scatterlist *sg_dst, 1722 + unsigned int sg_len, dma_addr_t src_dev_addr, 1723 + dma_addr_t dst_dev_addr) 1724 + { 1725 + struct stedma40_chan_cfg *cfg = &chan->dma_cfg; 1726 + struct stedma40_half_channel_info *src_info = &cfg->src_info; 1727 + struct stedma40_half_channel_info *dst_info = &cfg->dst_info; 1728 + unsigned long flags = 0; 1729 + int ret; 1730 + 1731 + if (desc->cyclic) 1732 + flags |= LLI_CYCLIC | LLI_TERM_INT; 1733 + 1734 + ret = d40_phy_sg_to_lli(sg_src, sg_len, src_dev_addr, 1735 + desc->lli_phy.src, 1736 + virt_to_phys(desc->lli_phy.src), 1737 + chan->src_def_cfg, 1738 + src_info, dst_info, flags); 1739 + 1740 + ret = d40_phy_sg_to_lli(sg_dst, sg_len, dst_dev_addr, 1741 + desc->lli_phy.dst, 1742 + virt_to_phys(desc->lli_phy.dst), 1743 + chan->dst_def_cfg, 1744 + dst_info, src_info, flags); 1745 + 1746 + dma_sync_single_for_device(chan->base->dev, desc->lli_pool.dma_addr, 1747 + desc->lli_pool.size, DMA_TO_DEVICE); 1748 + 1749 + return ret < 0 ? ret : 0; 1750 + } 1751 + 1752 + 1753 + static struct d40_desc * 1754 + d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg, 1755 + unsigned int sg_len, unsigned long dma_flags) 1756 + { 1757 + struct stedma40_chan_cfg *cfg = &chan->dma_cfg; 1758 + struct d40_desc *desc; 1759 + int ret; 1760 + 1761 + desc = d40_desc_get(chan); 1762 + if (!desc) 1763 + return NULL; 1764 + 1765 + desc->lli_len = d40_sg_2_dmalen(sg, sg_len, cfg->src_info.data_width, 1766 + cfg->dst_info.data_width); 1767 + if (desc->lli_len < 0) { 1768 + chan_err(chan, "Unaligned size\n"); 1769 goto err; 1770 + } 1771 1772 + ret = d40_pool_lli_alloc(chan, desc, desc->lli_len); 1773 + if (ret < 0) { 1774 + chan_err(chan, "Could not allocate lli\n"); 1775 goto err; 1776 } 1777 1778 1779 + desc->lli_current = 0; 1780 + desc->txd.flags = dma_flags; 1781 + desc->txd.tx_submit = d40_tx_submit; 1782 1783 + dma_async_tx_descriptor_init(&desc->txd, &chan->chan); 1784 1785 + return desc; 1786 1787 err: 1788 + d40_desc_free(chan, desc); 1789 return NULL; 1790 } 1791 + 1792 + static dma_addr_t 1793 + d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction) 1794 + { 1795 + struct stedma40_platform_data *plat = chan->base->plat_data; 1796 + struct stedma40_chan_cfg *cfg = &chan->dma_cfg; 1797 + dma_addr_t addr; 1798 + 1799 + if (chan->runtime_addr) 1800 + return chan->runtime_addr; 1801 + 1802 + if (direction == DMA_FROM_DEVICE) 1803 + addr = plat->dev_rx[cfg->src_dev_type]; 1804 + else if (direction == DMA_TO_DEVICE) 1805 + addr = plat->dev_tx[cfg->dst_dev_type]; 1806 + 1807 + return addr; 1808 + } 1809 + 1810 + static struct dma_async_tx_descriptor * 1811 + d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src, 1812 + struct scatterlist *sg_dst, unsigned int sg_len, 1813 + enum dma_data_direction direction, unsigned long dma_flags) 1814 + { 1815 + struct d40_chan *chan = container_of(dchan, struct d40_chan, chan); 1816 + dma_addr_t src_dev_addr = 0; 1817 + dma_addr_t dst_dev_addr = 0; 1818 + struct d40_desc *desc; 1819 + unsigned long flags; 1820 + int ret; 1821 + 1822 + if (!chan->phy_chan) { 1823 + chan_err(chan, "Cannot prepare unallocated channel\n"); 1824 + return NULL; 1825 + } 1826 + 1827 + 1828 + spin_lock_irqsave(&chan->lock, flags); 1829 + 1830 + desc = d40_prep_desc(chan, sg_src, sg_len, dma_flags); 1831 + if (desc == NULL) 1832 + goto err; 1833 + 1834 + if (sg_next(&sg_src[sg_len - 1]) == sg_src) 1835 + desc->cyclic = true; 1836 + 1837 + if (direction != DMA_NONE) { 1838 + dma_addr_t dev_addr = d40_get_dev_addr(chan, direction); 1839 + 1840 + if (direction == DMA_FROM_DEVICE) 1841 + src_dev_addr = dev_addr; 1842 + else if (direction == DMA_TO_DEVICE) 1843 + dst_dev_addr = dev_addr; 1844 + } 1845 + 1846 + if (chan_is_logical(chan)) 1847 + ret = d40_prep_sg_log(chan, desc, sg_src, sg_dst, 1848 + sg_len, src_dev_addr, dst_dev_addr); 1849 + else 1850 + ret = d40_prep_sg_phy(chan, desc, sg_src, sg_dst, 1851 + sg_len, src_dev_addr, dst_dev_addr); 1852 + 1853 + if (ret) { 1854 + chan_err(chan, "Failed to prepare %s sg job: %d\n", 1855 + chan_is_logical(chan) ? "log" : "phy", ret); 1856 + goto err; 1857 + } 1858 + 1859 + spin_unlock_irqrestore(&chan->lock, flags); 1860 + 1861 + return &desc->txd; 1862 + 1863 + err: 1864 + if (desc) 1865 + d40_desc_free(chan, desc); 1866 + spin_unlock_irqrestore(&chan->lock, flags); 1867 + return NULL; 1868 + } 1869 1870 bool stedma40_filter(struct dma_chan *chan, void *data) 1871 { ··· 1818 } 1819 EXPORT_SYMBOL(stedma40_filter); 1820 1821 + static void __d40_set_prio_rt(struct d40_chan *d40c, int dev_type, bool src) 1822 + { 1823 + bool realtime = d40c->dma_cfg.realtime; 1824 + bool highprio = d40c->dma_cfg.high_priority; 1825 + u32 prioreg = highprio ? D40_DREG_PSEG1 : D40_DREG_PCEG1; 1826 + u32 rtreg = realtime ? D40_DREG_RSEG1 : D40_DREG_RCEG1; 1827 + u32 event = D40_TYPE_TO_EVENT(dev_type); 1828 + u32 group = D40_TYPE_TO_GROUP(dev_type); 1829 + u32 bit = 1 << event; 1830 + 1831 + /* Destination event lines are stored in the upper halfword */ 1832 + if (!src) 1833 + bit <<= 16; 1834 + 1835 + writel(bit, d40c->base->virtbase + prioreg + group * 4); 1836 + writel(bit, d40c->base->virtbase + rtreg + group * 4); 1837 + } 1838 + 1839 + static void d40_set_prio_realtime(struct d40_chan *d40c) 1840 + { 1841 + if (d40c->base->rev < 3) 1842 + return; 1843 + 1844 + if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) || 1845 + (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) 1846 + __d40_set_prio_rt(d40c, d40c->dma_cfg.src_dev_type, true); 1847 + 1848 + if ((d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH) || 1849 + (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) 1850 + __d40_set_prio_rt(d40c, d40c->dma_cfg.dst_dev_type, false); 1851 + } 1852 + 1853 /* DMA ENGINE functions */ 1854 static int d40_alloc_chan_resources(struct dma_chan *chan) 1855 { ··· 1834 if (!d40c->configured) { 1835 err = d40_config_memcpy(d40c); 1836 if (err) { 1837 + chan_err(d40c, "Failed to configure memcpy channel\n"); 1838 goto fail; 1839 } 1840 } ··· 1844 1845 err = d40_allocate_channel(d40c); 1846 if (err) { 1847 + chan_err(d40c, "Failed to allocate channel\n"); 1848 goto fail; 1849 } 1850 1851 /* Fill in basic CFG register values */ 1852 d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg, 1853 + &d40c->dst_def_cfg, chan_is_logical(d40c)); 1854 1855 + d40_set_prio_realtime(d40c); 1856 + 1857 + if (chan_is_logical(d40c)) { 1858 d40_log_cfg(&d40c->dma_cfg, 1859 &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); 1860 ··· 1886 unsigned long flags; 1887 1888 if (d40c->phy_chan == NULL) { 1889 + chan_err(d40c, "Cannot free unallocated channel\n"); 1890 return; 1891 } 1892 ··· 1897 err = d40_free_dma(d40c); 1898 1899 if (err) 1900 + chan_err(d40c, "Failed to free channel\n"); 1901 spin_unlock_irqrestore(&d40c->lock, flags); 1902 } 1903 ··· 1908 size_t size, 1909 unsigned long dma_flags) 1910 { 1911 + struct scatterlist dst_sg; 1912 + struct scatterlist src_sg; 1913 1914 + sg_init_table(&dst_sg, 1); 1915 + sg_init_table(&src_sg, 1); 1916 1917 + sg_dma_address(&dst_sg) = dst; 1918 + sg_dma_address(&src_sg) = src; 1919 1920 + sg_dma_len(&dst_sg) = size; 1921 + sg_dma_len(&src_sg) = size; 1922 1923 + return d40_prep_sg(chan, &src_sg, &dst_sg, 1, DMA_NONE, dma_flags); 1924 } 1925 1926 static struct dma_async_tx_descriptor * 1927 + d40_prep_memcpy_sg(struct dma_chan *chan, 1928 + struct scatterlist *dst_sg, unsigned int dst_nents, 1929 + struct scatterlist *src_sg, unsigned int src_nents, 1930 + unsigned long dma_flags) 1931 { 1932 if (dst_nents != src_nents) 1933 return NULL; 1934 1935 + return d40_prep_sg(chan, src_sg, dst_sg, src_nents, DMA_NONE, dma_flags); 1936 } 1937 1938 static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, ··· 2161 enum dma_data_direction direction, 2162 unsigned long dma_flags) 2163 { 2164 + if (direction != DMA_FROM_DEVICE && direction != DMA_TO_DEVICE) 2165 + return NULL; 2166 2167 + return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags); 2168 + } 2169 + 2170 + static struct dma_async_tx_descriptor * 2171 + dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr, 2172 + size_t buf_len, size_t period_len, 2173 + enum dma_data_direction direction) 2174 + { 2175 + unsigned int periods = buf_len / period_len; 2176 + struct dma_async_tx_descriptor *txd; 2177 + struct scatterlist *sg; 2178 + int i; 2179 + 2180 + sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_KERNEL); 2181 + for (i = 0; i < periods; i++) { 2182 + sg_dma_address(&sg[i]) = dma_addr; 2183 + sg_dma_len(&sg[i]) = period_len; 2184 + dma_addr += period_len; 2185 } 2186 2187 + sg[periods].offset = 0; 2188 + sg[periods].length = 0; 2189 + sg[periods].page_link = 2190 + ((unsigned long)sg | 0x01) & ~0x02; 2191 2192 + txd = d40_prep_sg(chan, sg, sg, periods, direction, 2193 + DMA_PREP_INTERRUPT); 2194 2195 + kfree(sg); 2196 2197 + return txd; 2198 } 2199 2200 static enum dma_status d40_tx_status(struct dma_chan *chan, ··· 2219 int ret; 2220 2221 if (d40c->phy_chan == NULL) { 2222 + chan_err(d40c, "Cannot read status of unallocated channel\n"); 2223 return -EINVAL; 2224 } 2225 ··· 2245 unsigned long flags; 2246 2247 if (d40c->phy_chan == NULL) { 2248 + chan_err(d40c, "Channel is not allocated!\n"); 2249 return; 2250 } 2251 ··· 2339 return; 2340 } 2341 2342 + if (chan_is_logical(d40c)) { 2343 if (config_maxburst >= 16) 2344 psize = STEDMA40_PSIZE_LOG_16; 2345 else if (config_maxburst >= 8) ··· 2372 cfg->dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL; 2373 2374 /* Fill in register values */ 2375 + if (chan_is_logical(d40c)) 2376 d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); 2377 else 2378 d40_phy_cfg(cfg, &d40c->src_def_cfg, ··· 2393 static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 2394 unsigned long arg) 2395 { 2396 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); 2397 2398 if (d40c->phy_chan == NULL) { 2399 + chan_err(d40c, "Channel is not allocated!\n"); 2400 return -EINVAL; 2401 } 2402 2403 switch (cmd) { 2404 case DMA_TERMINATE_ALL: 2405 + return d40_terminate_all(d40c); 2406 case DMA_PAUSE: 2407 + return d40_pause(d40c); 2408 case DMA_RESUME: 2409 + return d40_resume(d40c); 2410 case DMA_SLAVE_CONFIG: 2411 d40_set_runtime_config(chan, 2412 (struct dma_slave_config *) arg); ··· 2456 } 2457 } 2458 2459 + static void d40_ops_init(struct d40_base *base, struct dma_device *dev) 2460 + { 2461 + if (dma_has_cap(DMA_SLAVE, dev->cap_mask)) 2462 + dev->device_prep_slave_sg = d40_prep_slave_sg; 2463 + 2464 + if (dma_has_cap(DMA_MEMCPY, dev->cap_mask)) { 2465 + dev->device_prep_dma_memcpy = d40_prep_memcpy; 2466 + 2467 + /* 2468 + * This controller can only access address at even 2469 + * 32bit boundaries, i.e. 2^2 2470 + */ 2471 + dev->copy_align = 2; 2472 + } 2473 + 2474 + if (dma_has_cap(DMA_SG, dev->cap_mask)) 2475 + dev->device_prep_dma_sg = d40_prep_memcpy_sg; 2476 + 2477 + if (dma_has_cap(DMA_CYCLIC, dev->cap_mask)) 2478 + dev->device_prep_dma_cyclic = dma40_prep_dma_cyclic; 2479 + 2480 + dev->device_alloc_chan_resources = d40_alloc_chan_resources; 2481 + dev->device_free_chan_resources = d40_free_chan_resources; 2482 + dev->device_issue_pending = d40_issue_pending; 2483 + dev->device_tx_status = d40_tx_status; 2484 + dev->device_control = d40_control; 2485 + dev->dev = base->dev; 2486 + } 2487 + 2488 static int __init d40_dmaengine_init(struct d40_base *base, 2489 int num_reserved_chans) 2490 { ··· 2466 2467 dma_cap_zero(base->dma_slave.cap_mask); 2468 dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask); 2469 + dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask); 2470 2471 + d40_ops_init(base, &base->dma_slave); 2472 2473 err = dma_async_device_register(&base->dma_slave); 2474 2475 if (err) { 2476 + d40_err(base->dev, "Failed to register slave channels\n"); 2477 goto failure1; 2478 } 2479 ··· 2491 2492 dma_cap_zero(base->dma_memcpy.cap_mask); 2493 dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask); 2494 + dma_cap_set(DMA_SG, base->dma_memcpy.cap_mask); 2495 2496 + d40_ops_init(base, &base->dma_memcpy); 2497 2498 err = dma_async_device_register(&base->dma_memcpy); 2499 2500 if (err) { 2501 + d40_err(base->dev, 2502 + "Failed to regsiter memcpy only channels\n"); 2503 goto failure2; 2504 } 2505 ··· 2523 dma_cap_zero(base->dma_both.cap_mask); 2524 dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask); 2525 dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask); 2526 + dma_cap_set(DMA_SG, base->dma_both.cap_mask); 2527 + dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask); 2528 2529 + d40_ops_init(base, &base->dma_both); 2530 err = dma_async_device_register(&base->dma_both); 2531 2532 if (err) { 2533 + d40_err(base->dev, 2534 + "Failed to register logical and physical capable channels\n"); 2535 goto failure3; 2536 } 2537 return 0; ··· 2616 { .reg = D40_DREG_PERIPHID1, .val = 0x0000}, 2617 /* 2618 * D40_DREG_PERIPHID2 Depends on HW revision: 2619 + * DB8500ed has 0x0008, 2620 * ? has 0x0018, 2621 + * DB8500v1 has 0x0028 2622 + * DB8500v2 has 0x0038 2623 */ 2624 { .reg = D40_DREG_PERIPHID3, .val = 0x0000}, 2625 ··· 2642 clk = clk_get(&pdev->dev, NULL); 2643 2644 if (IS_ERR(clk)) { 2645 + d40_err(&pdev->dev, "No matching clock found\n"); 2646 goto failure; 2647 } 2648 ··· 2666 for (i = 0; i < ARRAY_SIZE(dma_id_regs); i++) { 2667 if (dma_id_regs[i].val != 2668 readl(virtbase + dma_id_regs[i].reg)) { 2669 + d40_err(&pdev->dev, 2670 + "Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n", 2671 dma_id_regs[i].val, 2672 dma_id_regs[i].reg, 2673 readl(virtbase + dma_id_regs[i].reg)); ··· 2681 2682 if ((val & D40_DREG_PERIPHID2_DESIGNER_MASK) != 2683 D40_HW_DESIGNER) { 2684 + d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n", 2685 + val & D40_DREG_PERIPHID2_DESIGNER_MASK, 2686 D40_HW_DESIGNER); 2687 goto failure; 2688 } ··· 2713 sizeof(struct d40_chan), GFP_KERNEL); 2714 2715 if (base == NULL) { 2716 + d40_err(&pdev->dev, "Out of memory\n"); 2717 goto failure; 2718 } 2719 ··· 2860 2861 static int __init d40_lcla_allocate(struct d40_base *base) 2862 { 2863 + struct d40_lcla_pool *pool = &base->lcla_pool; 2864 unsigned long *page_list; 2865 int i, j; 2866 int ret = 0; ··· 2885 base->lcla_pool.pages); 2886 if (!page_list[i]) { 2887 2888 + d40_err(base->dev, "Failed to allocate %d pages.\n", 2889 + base->lcla_pool.pages); 2890 2891 for (j = 0; j < i; j++) 2892 free_pages(page_list[j], base->lcla_pool.pages); ··· 2925 LCLA_ALIGNMENT); 2926 } 2927 2928 + pool->dma_addr = dma_map_single(base->dev, pool->base, 2929 + SZ_1K * base->num_phy_chans, 2930 + DMA_TO_DEVICE); 2931 + if (dma_mapping_error(base->dev, pool->dma_addr)) { 2932 + pool->dma_addr = 0; 2933 + ret = -ENOMEM; 2934 + goto failure; 2935 + } 2936 + 2937 writel(virt_to_phys(base->lcla_pool.base), 2938 base->virtbase + D40_DREG_LCLA); 2939 failure: ··· 2957 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa"); 2958 if (!res) { 2959 ret = -ENOENT; 2960 + d40_err(&pdev->dev, "No \"lcpa\" memory resource\n"); 2961 goto failure; 2962 } 2963 base->lcpa_size = resource_size(res); ··· 2968 if (request_mem_region(res->start, resource_size(res), 2969 D40_NAME " I/O lcpa") == NULL) { 2970 ret = -EBUSY; 2971 + d40_err(&pdev->dev, 2972 + "Failed to request LCPA region 0x%x-0x%x\n", 2973 + res->start, res->end); 2974 goto failure; 2975 } 2976 ··· 2986 base->lcpa_base = ioremap(res->start, resource_size(res)); 2987 if (!base->lcpa_base) { 2988 ret = -ENOMEM; 2989 + d40_err(&pdev->dev, "Failed to ioremap LCPA region\n"); 2990 goto failure; 2991 } 2992 2993 ret = d40_lcla_allocate(base); 2994 if (ret) { 2995 + d40_err(&pdev->dev, "Failed to allocate LCLA area\n"); 2996 goto failure; 2997 } 2998 ··· 3004 base->irq = platform_get_irq(pdev, 0); 3005 3006 ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base); 3007 if (ret) { 3008 + d40_err(&pdev->dev, "No IRQ defined\n"); 3009 goto failure; 3010 } 3011 ··· 3025 kmem_cache_destroy(base->desc_slab); 3026 if (base->virtbase) 3027 iounmap(base->virtbase); 3028 + 3029 + if (base->lcla_pool.dma_addr) 3030 + dma_unmap_single(base->dev, base->lcla_pool.dma_addr, 3031 + SZ_1K * base->num_phy_chans, 3032 + DMA_TO_DEVICE); 3033 + 3034 if (!base->lcla_pool.base_unaligned && base->lcla_pool.base) 3035 free_pages((unsigned long)base->lcla_pool.base, 3036 base->lcla_pool.pages); ··· 3049 kfree(base); 3050 } 3051 3052 + d40_err(&pdev->dev, "probe failed\n"); 3053 return ret; 3054 } 3055 ··· 3060 }, 3061 }; 3062 3063 + static int __init stedma40_init(void) 3064 { 3065 return platform_driver_probe(&d40_driver, d40_probe); 3066 }
+79 -139
drivers/dma/ste_dma40_ll.c
··· 125 static int d40_phy_fill_lli(struct d40_phy_lli *lli, 126 dma_addr_t data, 127 u32 data_size, 128 - int psize, 129 dma_addr_t next_lli, 130 u32 reg_cfg, 131 - bool term_int, 132 - u32 data_width, 133 - bool is_device) 134 { 135 int num_elems; 136 137 if (psize == STEDMA40_PSIZE_PHY_1) ··· 156 * Distance to next element sized entry. 157 * Usually the size of the element unless you want gaps. 158 */ 159 - if (!is_device) 160 lli->reg_elt |= (0x1 << data_width) << 161 D40_SREG_ELEM_PHY_EIDX_POS; 162 ··· 200 return seg_max; 201 } 202 203 - struct d40_phy_lli *d40_phy_buf_to_lli(struct d40_phy_lli *lli, 204 - dma_addr_t addr, 205 - u32 size, 206 - int psize, 207 - dma_addr_t lli_phys, 208 - u32 reg_cfg, 209 - bool term_int, 210 - u32 data_width1, 211 - u32 data_width2, 212 - bool is_device) 213 { 214 int err; 215 dma_addr_t next = lli_phys; 216 int size_rest = size; 217 int size_seg = 0; 218 219 do { 220 - size_seg = d40_seg_size(size_rest, data_width1, data_width2); 221 size_rest -= size_seg; 222 223 - if (term_int && size_rest == 0) 224 - next = 0; 225 else 226 next = ALIGN(next + sizeof(struct d40_phy_lli), 227 D40_LLI_ALIGN); 228 229 - err = d40_phy_fill_lli(lli, 230 - addr, 231 - size_seg, 232 - psize, 233 - next, 234 - reg_cfg, 235 - !next, 236 - data_width1, 237 - is_device); 238 239 if (err) 240 goto err; 241 242 lli++; 243 - if (!is_device) 244 addr += size_seg; 245 } while (size_rest); 246 ··· 260 struct d40_phy_lli *lli_sg, 261 dma_addr_t lli_phys, 262 u32 reg_cfg, 263 - u32 data_width1, 264 - u32 data_width2, 265 - int psize) 266 { 267 int total_size = 0; 268 int i; 269 struct scatterlist *current_sg = sg; 270 - dma_addr_t dst; 271 struct d40_phy_lli *lli = lli_sg; 272 dma_addr_t l_phys = lli_phys; 273 274 for_each_sg(sg, current_sg, sg_len, i) { 275 276 total_size += sg_dma_len(current_sg); 277 278 - if (target) 279 - dst = target; 280 - else 281 - dst = sg_phys(current_sg); 282 283 l_phys = ALIGN(lli_phys + (lli - lli_sg) * 284 sizeof(struct d40_phy_lli), D40_LLI_ALIGN); 285 286 - lli = d40_phy_buf_to_lli(lli, 287 - dst, 288 - sg_dma_len(current_sg), 289 - psize, 290 - l_phys, 291 - reg_cfg, 292 - sg_len - 1 == i, 293 - data_width1, 294 - data_width2, 295 - target == dst); 296 if (lli == NULL) 297 return -EINVAL; 298 } ··· 297 } 298 299 300 - void d40_phy_lli_write(void __iomem *virtbase, 301 - u32 phy_chan_num, 302 - struct d40_phy_lli *lli_dst, 303 - struct d40_phy_lli *lli_src) 304 - { 305 - 306 - writel(lli_src->reg_cfg, virtbase + D40_DREG_PCBASE + 307 - phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSCFG); 308 - writel(lli_src->reg_elt, virtbase + D40_DREG_PCBASE + 309 - phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSELT); 310 - writel(lli_src->reg_ptr, virtbase + D40_DREG_PCBASE + 311 - phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSPTR); 312 - writel(lli_src->reg_lnk, virtbase + D40_DREG_PCBASE + 313 - phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSLNK); 314 - 315 - writel(lli_dst->reg_cfg, virtbase + D40_DREG_PCBASE + 316 - phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDCFG); 317 - writel(lli_dst->reg_elt, virtbase + D40_DREG_PCBASE + 318 - phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDELT); 319 - writel(lli_dst->reg_ptr, virtbase + D40_DREG_PCBASE + 320 - phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDPTR); 321 - writel(lli_dst->reg_lnk, virtbase + D40_DREG_PCBASE + 322 - phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDLNK); 323 - 324 - } 325 - 326 /* DMA logical lli operations */ 327 328 static void d40_log_lli_link(struct d40_log_lli *lli_dst, 329 struct d40_log_lli *lli_src, 330 - int next) 331 { 332 u32 slos = 0; 333 u32 dlos = 0; 334 335 if (next != -EINVAL) { 336 slos = next * 2; 337 dlos = next * 2 + 1; 338 - } else { 339 lli_dst->lcsp13 |= D40_MEM_LCSP1_SCFG_TIM_MASK; 340 lli_dst->lcsp13 |= D40_MEM_LCSP3_DTCP_MASK; 341 } ··· 327 void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa, 328 struct d40_log_lli *lli_dst, 329 struct d40_log_lli *lli_src, 330 - int next) 331 { 332 - d40_log_lli_link(lli_dst, lli_src, next); 333 334 writel(lli_src->lcsp02, &lcpa[0].lcsp0); 335 writel(lli_src->lcsp13, &lcpa[0].lcsp1); ··· 340 void d40_log_lli_lcla_write(struct d40_log_lli *lcla, 341 struct d40_log_lli *lli_dst, 342 struct d40_log_lli *lli_src, 343 - int next) 344 { 345 - d40_log_lli_link(lli_dst, lli_src, next); 346 347 writel(lli_src->lcsp02, &lcla[0].lcsp02); 348 writel(lli_src->lcsp13, &lcla[0].lcsp13); ··· 354 dma_addr_t data, u32 data_size, 355 u32 reg_cfg, 356 u32 data_width, 357 - bool addr_inc) 358 { 359 lli->lcsp13 = reg_cfg; 360 361 /* The number of elements to transfer */ ··· 376 377 } 378 379 - int d40_log_sg_to_dev(struct scatterlist *sg, 380 - int sg_len, 381 - struct d40_log_lli_bidir *lli, 382 - struct d40_def_lcsp *lcsp, 383 - u32 src_data_width, 384 - u32 dst_data_width, 385 - enum dma_data_direction direction, 386 - dma_addr_t dev_addr) 387 - { 388 - int total_size = 0; 389 - struct scatterlist *current_sg = sg; 390 - int i; 391 - struct d40_log_lli *lli_src = lli->src; 392 - struct d40_log_lli *lli_dst = lli->dst; 393 - 394 - for_each_sg(sg, current_sg, sg_len, i) { 395 - total_size += sg_dma_len(current_sg); 396 - 397 - if (direction == DMA_TO_DEVICE) { 398 - lli_src = 399 - d40_log_buf_to_lli(lli_src, 400 - sg_phys(current_sg), 401 - sg_dma_len(current_sg), 402 - lcsp->lcsp1, src_data_width, 403 - dst_data_width, 404 - true); 405 - lli_dst = 406 - d40_log_buf_to_lli(lli_dst, 407 - dev_addr, 408 - sg_dma_len(current_sg), 409 - lcsp->lcsp3, dst_data_width, 410 - src_data_width, 411 - false); 412 - } else { 413 - lli_dst = 414 - d40_log_buf_to_lli(lli_dst, 415 - sg_phys(current_sg), 416 - sg_dma_len(current_sg), 417 - lcsp->lcsp3, dst_data_width, 418 - src_data_width, 419 - true); 420 - lli_src = 421 - d40_log_buf_to_lli(lli_src, 422 - dev_addr, 423 - sg_dma_len(current_sg), 424 - lcsp->lcsp1, src_data_width, 425 - dst_data_width, 426 - false); 427 - } 428 - } 429 - return total_size; 430 - } 431 - 432 - struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg, 433 dma_addr_t addr, 434 int size, 435 u32 lcsp13, /* src or dst*/ 436 u32 data_width1, 437 u32 data_width2, 438 - bool addr_inc) 439 { 440 struct d40_log_lli *lli = lli_sg; 441 int size_rest = size; 442 int size_seg = 0; ··· 397 addr, 398 size_seg, 399 lcsp13, data_width1, 400 - addr_inc); 401 if (addr_inc) 402 addr += size_seg; 403 lli++; ··· 408 409 int d40_log_sg_to_lli(struct scatterlist *sg, 410 int sg_len, 411 struct d40_log_lli *lli_sg, 412 u32 lcsp13, /* src or dst*/ 413 u32 data_width1, u32 data_width2) ··· 417 struct scatterlist *current_sg = sg; 418 int i; 419 struct d40_log_lli *lli = lli_sg; 420 421 for_each_sg(sg, current_sg, sg_len, i) { 422 total_size += sg_dma_len(current_sg); 423 - lli = d40_log_buf_to_lli(lli, 424 - sg_phys(current_sg), 425 - sg_dma_len(current_sg), 426 lcsp13, 427 - data_width1, data_width2, true); 428 } 429 return total_size; 430 }
··· 125 static int d40_phy_fill_lli(struct d40_phy_lli *lli, 126 dma_addr_t data, 127 u32 data_size, 128 dma_addr_t next_lli, 129 u32 reg_cfg, 130 + struct stedma40_half_channel_info *info, 131 + unsigned int flags) 132 { 133 + bool addr_inc = flags & LLI_ADDR_INC; 134 + bool term_int = flags & LLI_TERM_INT; 135 + unsigned int data_width = info->data_width; 136 + int psize = info->psize; 137 int num_elems; 138 139 if (psize == STEDMA40_PSIZE_PHY_1) ··· 154 * Distance to next element sized entry. 155 * Usually the size of the element unless you want gaps. 156 */ 157 + if (addr_inc) 158 lli->reg_elt |= (0x1 << data_width) << 159 D40_SREG_ELEM_PHY_EIDX_POS; 160 ··· 198 return seg_max; 199 } 200 201 + static struct d40_phy_lli * 202 + d40_phy_buf_to_lli(struct d40_phy_lli *lli, dma_addr_t addr, u32 size, 203 + dma_addr_t lli_phys, dma_addr_t first_phys, u32 reg_cfg, 204 + struct stedma40_half_channel_info *info, 205 + struct stedma40_half_channel_info *otherinfo, 206 + unsigned long flags) 207 { 208 + bool lastlink = flags & LLI_LAST_LINK; 209 + bool addr_inc = flags & LLI_ADDR_INC; 210 + bool term_int = flags & LLI_TERM_INT; 211 + bool cyclic = flags & LLI_CYCLIC; 212 int err; 213 dma_addr_t next = lli_phys; 214 int size_rest = size; 215 int size_seg = 0; 216 217 + /* 218 + * This piece may be split up based on d40_seg_size(); we only want the 219 + * term int on the last part. 220 + */ 221 + if (term_int) 222 + flags &= ~LLI_TERM_INT; 223 + 224 do { 225 + size_seg = d40_seg_size(size_rest, info->data_width, 226 + otherinfo->data_width); 227 size_rest -= size_seg; 228 229 + if (size_rest == 0 && term_int) 230 + flags |= LLI_TERM_INT; 231 + 232 + if (size_rest == 0 && lastlink) 233 + next = cyclic ? first_phys : 0; 234 else 235 next = ALIGN(next + sizeof(struct d40_phy_lli), 236 D40_LLI_ALIGN); 237 238 + err = d40_phy_fill_lli(lli, addr, size_seg, next, 239 + reg_cfg, info, flags); 240 241 if (err) 242 goto err; 243 244 lli++; 245 + if (addr_inc) 246 addr += size_seg; 247 } while (size_rest); 248 ··· 254 struct d40_phy_lli *lli_sg, 255 dma_addr_t lli_phys, 256 u32 reg_cfg, 257 + struct stedma40_half_channel_info *info, 258 + struct stedma40_half_channel_info *otherinfo, 259 + unsigned long flags) 260 { 261 int total_size = 0; 262 int i; 263 struct scatterlist *current_sg = sg; 264 struct d40_phy_lli *lli = lli_sg; 265 dma_addr_t l_phys = lli_phys; 266 267 + if (!target) 268 + flags |= LLI_ADDR_INC; 269 + 270 for_each_sg(sg, current_sg, sg_len, i) { 271 + dma_addr_t sg_addr = sg_dma_address(current_sg); 272 + unsigned int len = sg_dma_len(current_sg); 273 + dma_addr_t dst = target ?: sg_addr; 274 275 total_size += sg_dma_len(current_sg); 276 277 + if (i == sg_len - 1) 278 + flags |= LLI_TERM_INT | LLI_LAST_LINK; 279 280 l_phys = ALIGN(lli_phys + (lli - lli_sg) * 281 sizeof(struct d40_phy_lli), D40_LLI_ALIGN); 282 283 + lli = d40_phy_buf_to_lli(lli, dst, len, l_phys, lli_phys, 284 + reg_cfg, info, otherinfo, flags); 285 + 286 if (lli == NULL) 287 return -EINVAL; 288 } ··· 295 } 296 297 298 /* DMA logical lli operations */ 299 300 static void d40_log_lli_link(struct d40_log_lli *lli_dst, 301 struct d40_log_lli *lli_src, 302 + int next, unsigned int flags) 303 { 304 + bool interrupt = flags & LLI_TERM_INT; 305 u32 slos = 0; 306 u32 dlos = 0; 307 308 if (next != -EINVAL) { 309 slos = next * 2; 310 dlos = next * 2 + 1; 311 + } 312 + 313 + if (interrupt) { 314 lli_dst->lcsp13 |= D40_MEM_LCSP1_SCFG_TIM_MASK; 315 lli_dst->lcsp13 |= D40_MEM_LCSP3_DTCP_MASK; 316 } ··· 348 void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa, 349 struct d40_log_lli *lli_dst, 350 struct d40_log_lli *lli_src, 351 + int next, unsigned int flags) 352 { 353 + d40_log_lli_link(lli_dst, lli_src, next, flags); 354 355 writel(lli_src->lcsp02, &lcpa[0].lcsp0); 356 writel(lli_src->lcsp13, &lcpa[0].lcsp1); ··· 361 void d40_log_lli_lcla_write(struct d40_log_lli *lcla, 362 struct d40_log_lli *lli_dst, 363 struct d40_log_lli *lli_src, 364 + int next, unsigned int flags) 365 { 366 + d40_log_lli_link(lli_dst, lli_src, next, flags); 367 368 writel(lli_src->lcsp02, &lcla[0].lcsp02); 369 writel(lli_src->lcsp13, &lcla[0].lcsp13); ··· 375 dma_addr_t data, u32 data_size, 376 u32 reg_cfg, 377 u32 data_width, 378 + unsigned int flags) 379 { 380 + bool addr_inc = flags & LLI_ADDR_INC; 381 + 382 lli->lcsp13 = reg_cfg; 383 384 /* The number of elements to transfer */ ··· 395 396 } 397 398 + static struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg, 399 dma_addr_t addr, 400 int size, 401 u32 lcsp13, /* src or dst*/ 402 u32 data_width1, 403 u32 data_width2, 404 + unsigned int flags) 405 { 406 + bool addr_inc = flags & LLI_ADDR_INC; 407 struct d40_log_lli *lli = lli_sg; 408 int size_rest = size; 409 int size_seg = 0; ··· 468 addr, 469 size_seg, 470 lcsp13, data_width1, 471 + flags); 472 if (addr_inc) 473 addr += size_seg; 474 lli++; ··· 479 480 int d40_log_sg_to_lli(struct scatterlist *sg, 481 int sg_len, 482 + dma_addr_t dev_addr, 483 struct d40_log_lli *lli_sg, 484 u32 lcsp13, /* src or dst*/ 485 u32 data_width1, u32 data_width2) ··· 487 struct scatterlist *current_sg = sg; 488 int i; 489 struct d40_log_lli *lli = lli_sg; 490 + unsigned long flags = 0; 491 + 492 + if (!dev_addr) 493 + flags |= LLI_ADDR_INC; 494 495 for_each_sg(sg, current_sg, sg_len, i) { 496 + dma_addr_t sg_addr = sg_dma_address(current_sg); 497 + unsigned int len = sg_dma_len(current_sg); 498 + dma_addr_t addr = dev_addr ?: sg_addr; 499 + 500 total_size += sg_dma_len(current_sg); 501 + 502 + lli = d40_log_buf_to_lli(lli, addr, len, 503 lcsp13, 504 + data_width1, 505 + data_width2, 506 + flags); 507 } 508 + 509 return total_size; 510 }
+29 -37
drivers/dma/ste_dma40_ll.h
··· 163 #define D40_DREG_LCEIS1 0x0B4 164 #define D40_DREG_LCEIS2 0x0B8 165 #define D40_DREG_LCEIS3 0x0BC 166 #define D40_DREG_STFU 0xFC8 167 #define D40_DREG_ICFG 0xFCC 168 #define D40_DREG_PERIPHID0 0xFE0 ··· 293 294 /* Physical channels */ 295 296 void d40_phy_cfg(struct stedma40_chan_cfg *cfg, 297 u32 *src_cfg, 298 u32 *dst_cfg, ··· 315 struct d40_phy_lli *lli, 316 dma_addr_t lli_phys, 317 u32 reg_cfg, 318 - u32 data_width1, 319 - u32 data_width2, 320 - int psize); 321 - 322 - struct d40_phy_lli *d40_phy_buf_to_lli(struct d40_phy_lli *lli, 323 - dma_addr_t data, 324 - u32 data_size, 325 - int psize, 326 - dma_addr_t next_lli, 327 - u32 reg_cfg, 328 - bool term_int, 329 - u32 data_width1, 330 - u32 data_width2, 331 - bool is_device); 332 - 333 - void d40_phy_lli_write(void __iomem *virtbase, 334 - u32 phy_chan_num, 335 - struct d40_phy_lli *lli_dst, 336 - struct d40_phy_lli *lli_src); 337 338 /* Logical channels */ 339 340 - struct d40_log_lli *d40_log_buf_to_lli(struct d40_log_lli *lli_sg, 341 - dma_addr_t addr, 342 - int size, 343 - u32 lcsp13, /* src or dst*/ 344 - u32 data_width1, u32 data_width2, 345 - bool addr_inc); 346 - 347 - int d40_log_sg_to_dev(struct scatterlist *sg, 348 - int sg_len, 349 - struct d40_log_lli_bidir *lli, 350 - struct d40_def_lcsp *lcsp, 351 - u32 src_data_width, 352 - u32 dst_data_width, 353 - enum dma_data_direction direction, 354 - dma_addr_t dev_addr); 355 - 356 int d40_log_sg_to_lli(struct scatterlist *sg, 357 int sg_len, 358 struct d40_log_lli *lli_sg, 359 u32 lcsp13, /* src or dst*/ 360 u32 data_width1, u32 data_width2); ··· 331 void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa, 332 struct d40_log_lli *lli_dst, 333 struct d40_log_lli *lli_src, 334 - int next); 335 336 void d40_log_lli_lcla_write(struct d40_log_lli *lcla, 337 struct d40_log_lli *lli_dst, 338 struct d40_log_lli *lli_src, 339 - int next); 340 341 #endif /* STE_DMA40_LLI_H */
··· 163 #define D40_DREG_LCEIS1 0x0B4 164 #define D40_DREG_LCEIS2 0x0B8 165 #define D40_DREG_LCEIS3 0x0BC 166 + #define D40_DREG_PSEG1 0x110 167 + #define D40_DREG_PSEG2 0x114 168 + #define D40_DREG_PSEG3 0x118 169 + #define D40_DREG_PSEG4 0x11C 170 + #define D40_DREG_PCEG1 0x120 171 + #define D40_DREG_PCEG2 0x124 172 + #define D40_DREG_PCEG3 0x128 173 + #define D40_DREG_PCEG4 0x12C 174 + #define D40_DREG_RSEG1 0x130 175 + #define D40_DREG_RSEG2 0x134 176 + #define D40_DREG_RSEG3 0x138 177 + #define D40_DREG_RSEG4 0x13C 178 + #define D40_DREG_RCEG1 0x140 179 + #define D40_DREG_RCEG2 0x144 180 + #define D40_DREG_RCEG3 0x148 181 + #define D40_DREG_RCEG4 0x14C 182 #define D40_DREG_STFU 0xFC8 183 #define D40_DREG_ICFG 0xFCC 184 #define D40_DREG_PERIPHID0 0xFE0 ··· 277 278 /* Physical channels */ 279 280 + enum d40_lli_flags { 281 + LLI_ADDR_INC = 1 << 0, 282 + LLI_TERM_INT = 1 << 1, 283 + LLI_CYCLIC = 1 << 2, 284 + LLI_LAST_LINK = 1 << 3, 285 + }; 286 + 287 void d40_phy_cfg(struct stedma40_chan_cfg *cfg, 288 u32 *src_cfg, 289 u32 *dst_cfg, ··· 292 struct d40_phy_lli *lli, 293 dma_addr_t lli_phys, 294 u32 reg_cfg, 295 + struct stedma40_half_channel_info *info, 296 + struct stedma40_half_channel_info *otherinfo, 297 + unsigned long flags); 298 299 /* Logical channels */ 300 301 int d40_log_sg_to_lli(struct scatterlist *sg, 302 int sg_len, 303 + dma_addr_t dev_addr, 304 struct d40_log_lli *lli_sg, 305 u32 lcsp13, /* src or dst*/ 306 u32 data_width1, u32 data_width2); ··· 339 void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa, 340 struct d40_log_lli *lli_dst, 341 struct d40_log_lli *lli_src, 342 + int next, unsigned int flags); 343 344 void d40_log_lli_lcla_write(struct d40_log_lli *lcla, 345 struct d40_log_lli *lli_dst, 346 struct d40_log_lli *lli_src, 347 + int next, unsigned int flags); 348 349 #endif /* STE_DMA40_LLI_H */
+43 -1
include/linux/dw_dmac.h
··· 16 /** 17 * struct dw_dma_platform_data - Controller configuration parameters 18 * @nr_channels: Number of channels supported by hardware (max 8) 19 */ 20 struct dw_dma_platform_data { 21 unsigned int nr_channels; 22 }; 23 24 /** ··· 42 DW_DMA_SLAVE_WIDTH_32BIT, 43 }; 44 45 /** 46 * struct dw_dma_slave - Controller-specific information about a slave 47 * ··· 77 * @reg_width: peripheral register width 78 * @cfg_hi: Platform-specific initializer for the CFG_HI register 79 * @cfg_lo: Platform-specific initializer for the CFG_LO register 80 */ 81 struct dw_dma_slave { 82 struct device *dma_dev; ··· 90 enum dw_dma_slave_width reg_width; 91 u32 cfg_hi; 92 u32 cfg_lo; 93 }; 94 95 /* Platform-configurable bits in CFG_HI */ ··· 105 #define DWC_CFGH_DST_PER(x) ((x) << 11) 106 107 /* Platform-configurable bits in CFG_LO */ 108 - #define DWC_CFGL_PRIO(x) ((x) << 5) /* priority */ 109 #define DWC_CFGL_LOCK_CH_XFER (0 << 12) /* scope of LOCK_CH */ 110 #define DWC_CFGL_LOCK_CH_BLOCK (1 << 12) 111 #define DWC_CFGL_LOCK_CH_XACT (2 << 12)
··· 16 /** 17 * struct dw_dma_platform_data - Controller configuration parameters 18 * @nr_channels: Number of channels supported by hardware (max 8) 19 + * @is_private: The device channels should be marked as private and not for 20 + * by the general purpose DMA channel allocator. 21 */ 22 struct dw_dma_platform_data { 23 unsigned int nr_channels; 24 + bool is_private; 25 + #define CHAN_ALLOCATION_ASCENDING 0 /* zero to seven */ 26 + #define CHAN_ALLOCATION_DESCENDING 1 /* seven to zero */ 27 + unsigned char chan_allocation_order; 28 + #define CHAN_PRIORITY_ASCENDING 0 /* chan0 highest */ 29 + #define CHAN_PRIORITY_DESCENDING 1 /* chan7 highest */ 30 + unsigned char chan_priority; 31 }; 32 33 /** ··· 33 DW_DMA_SLAVE_WIDTH_32BIT, 34 }; 35 36 + /* bursts size */ 37 + enum dw_dma_msize { 38 + DW_DMA_MSIZE_1, 39 + DW_DMA_MSIZE_4, 40 + DW_DMA_MSIZE_8, 41 + DW_DMA_MSIZE_16, 42 + DW_DMA_MSIZE_32, 43 + DW_DMA_MSIZE_64, 44 + DW_DMA_MSIZE_128, 45 + DW_DMA_MSIZE_256, 46 + }; 47 + 48 + /* flow controller */ 49 + enum dw_dma_fc { 50 + DW_DMA_FC_D_M2M, 51 + DW_DMA_FC_D_M2P, 52 + DW_DMA_FC_D_P2M, 53 + DW_DMA_FC_D_P2P, 54 + DW_DMA_FC_P_P2M, 55 + DW_DMA_FC_SP_P2P, 56 + DW_DMA_FC_P_M2P, 57 + DW_DMA_FC_DP_P2P, 58 + }; 59 + 60 /** 61 * struct dw_dma_slave - Controller-specific information about a slave 62 * ··· 44 * @reg_width: peripheral register width 45 * @cfg_hi: Platform-specific initializer for the CFG_HI register 46 * @cfg_lo: Platform-specific initializer for the CFG_LO register 47 + * @src_master: src master for transfers on allocated channel. 48 + * @dst_master: dest master for transfers on allocated channel. 49 + * @src_msize: src burst size. 50 + * @dst_msize: dest burst size. 51 + * @fc: flow controller for DMA transfer 52 */ 53 struct dw_dma_slave { 54 struct device *dma_dev; ··· 52 enum dw_dma_slave_width reg_width; 53 u32 cfg_hi; 54 u32 cfg_lo; 55 + u8 src_master; 56 + u8 dst_master; 57 + u8 src_msize; 58 + u8 dst_msize; 59 + u8 fc; 60 }; 61 62 /* Platform-configurable bits in CFG_HI */ ··· 62 #define DWC_CFGH_DST_PER(x) ((x) << 11) 63 64 /* Platform-configurable bits in CFG_LO */ 65 #define DWC_CFGL_LOCK_CH_XFER (0 << 12) /* scope of LOCK_CH */ 66 #define DWC_CFGL_LOCK_CH_BLOCK (1 << 12) 67 #define DWC_CFGL_LOCK_CH_XACT (2 << 12)