Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branches 'dma40', 'pl08x', 'fsldma', 'imx' and 'intel-mid' into dmaengine

+5001 -437
+2 -6
arch/arm/mach-imx/include/mach/dma-v1.h
··· 27 27 28 28 #define imx_has_dma_v1() (cpu_is_mx1() || cpu_is_mx21() || cpu_is_mx27()) 29 29 30 + #include <mach/dma.h> 31 + 30 32 #define IMX_DMA_CHANNELS 16 31 33 32 34 #define DMA_MODE_READ 0 ··· 97 95 int imx_dma_request(int channel, const char *name); 98 96 99 97 void imx_dma_free(int channel); 100 - 101 - enum imx_dma_prio { 102 - DMA_PRIO_HIGH = 0, 103 - DMA_PRIO_MEDIUM = 1, 104 - DMA_PRIO_LOW = 2 105 - }; 106 98 107 99 int imx_dma_request_by_prio(const char *name, enum imx_dma_prio prio); 108 100
+67
arch/arm/plat-mxc/include/mach/dma.h
··· 1 + /* 2 + * Copyright 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved. 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 as 6 + * published by the Free Software Foundation. 7 + */ 8 + 9 + #ifndef __ASM_ARCH_MXC_DMA_H__ 10 + #define __ASM_ARCH_MXC_DMA_H__ 11 + 12 + #include <linux/scatterlist.h> 13 + #include <linux/device.h> 14 + #include <linux/dmaengine.h> 15 + 16 + /* 17 + * This enumerates peripheral types. Used for SDMA. 18 + */ 19 + enum sdma_peripheral_type { 20 + IMX_DMATYPE_SSI, /* MCU domain SSI */ 21 + IMX_DMATYPE_SSI_SP, /* Shared SSI */ 22 + IMX_DMATYPE_MMC, /* MMC */ 23 + IMX_DMATYPE_SDHC, /* SDHC */ 24 + IMX_DMATYPE_UART, /* MCU domain UART */ 25 + IMX_DMATYPE_UART_SP, /* Shared UART */ 26 + IMX_DMATYPE_FIRI, /* FIRI */ 27 + IMX_DMATYPE_CSPI, /* MCU domain CSPI */ 28 + IMX_DMATYPE_CSPI_SP, /* Shared CSPI */ 29 + IMX_DMATYPE_SIM, /* SIM */ 30 + IMX_DMATYPE_ATA, /* ATA */ 31 + IMX_DMATYPE_CCM, /* CCM */ 32 + IMX_DMATYPE_EXT, /* External peripheral */ 33 + IMX_DMATYPE_MSHC, /* Memory Stick Host Controller */ 34 + IMX_DMATYPE_MSHC_SP, /* Shared Memory Stick Host Controller */ 35 + IMX_DMATYPE_DSP, /* DSP */ 36 + IMX_DMATYPE_MEMORY, /* Memory */ 37 + IMX_DMATYPE_FIFO_MEMORY,/* FIFO type Memory */ 38 + IMX_DMATYPE_SPDIF, /* SPDIF */ 39 + IMX_DMATYPE_IPU_MEMORY, /* IPU Memory */ 40 + IMX_DMATYPE_ASRC, /* ASRC */ 41 + IMX_DMATYPE_ESAI, /* ESAI */ 42 + }; 43 + 44 + enum imx_dma_prio { 45 + DMA_PRIO_HIGH = 0, 46 + DMA_PRIO_MEDIUM = 1, 47 + DMA_PRIO_LOW = 2 48 + }; 49 + 50 + struct imx_dma_data { 51 + int dma_request; /* DMA request line */ 52 + enum sdma_peripheral_type peripheral_type; 53 + int priority; 54 + }; 55 + 56 + static inline int imx_dma_is_ipu(struct dma_chan *chan) 57 + { 58 + return !strcmp(dev_name(chan->device->dev), "ipu-core"); 59 + } 60 + 61 + static inline int imx_dma_is_general_purpose(struct dma_chan *chan) 62 + { 63 + return !strcmp(dev_name(chan->device->dev), "imx-sdma") || 64 + !strcmp(dev_name(chan->device->dev), "imx-dma"); 65 + } 66 + 67 + #endif
+17
arch/arm/plat-mxc/include/mach/sdma.h
··· 1 + #ifndef __MACH_MXC_SDMA_H__ 2 + #define __MACH_MXC_SDMA_H__ 3 + 4 + /** 5 + * struct sdma_platform_data - platform specific data for SDMA engine 6 + * 7 + * @sdma_version The version of this SDMA engine 8 + * @cpu_name used to generate the firmware name 9 + * @to_version CPU Tape out version 10 + */ 11 + struct sdma_platform_data { 12 + int sdma_version; 13 + char *cpu_name; 14 + int to_version; 15 + }; 16 + 17 + #endif /* __MACH_MXC_SDMA_H__ */
-137
arch/powerpc/include/asm/fsldma.h
··· 1 - /* 2 - * Freescale MPC83XX / MPC85XX DMA Controller 3 - * 4 - * Copyright (c) 2009 Ira W. Snyder <iws@ovro.caltech.edu> 5 - * 6 - * This file is licensed under the terms of the GNU General Public License 7 - * version 2. This program is licensed "as is" without any warranty of any 8 - * kind, whether express or implied. 9 - */ 10 - 11 - #ifndef __ARCH_POWERPC_ASM_FSLDMA_H__ 12 - #define __ARCH_POWERPC_ASM_FSLDMA_H__ 13 - 14 - #include <linux/slab.h> 15 - #include <linux/dmaengine.h> 16 - 17 - /* 18 - * Definitions for the Freescale DMA controller's DMA_SLAVE implemention 19 - * 20 - * The Freescale DMA_SLAVE implementation was designed to handle many-to-many 21 - * transfers. An example usage would be an accelerated copy between two 22 - * scatterlists. Another example use would be an accelerated copy from 23 - * multiple non-contiguous device buffers into a single scatterlist. 24 - * 25 - * A DMA_SLAVE transaction is defined by a struct fsl_dma_slave. This 26 - * structure contains a list of hardware addresses that should be copied 27 - * to/from the scatterlist passed into device_prep_slave_sg(). The structure 28 - * also has some fields to enable hardware-specific features. 29 - */ 30 - 31 - /** 32 - * struct fsl_dma_hw_addr 33 - * @entry: linked list entry 34 - * @address: the hardware address 35 - * @length: length to transfer 36 - * 37 - * Holds a single physical hardware address / length pair for use 38 - * with the DMAEngine DMA_SLAVE API. 39 - */ 40 - struct fsl_dma_hw_addr { 41 - struct list_head entry; 42 - 43 - dma_addr_t address; 44 - size_t length; 45 - }; 46 - 47 - /** 48 - * struct fsl_dma_slave 49 - * @addresses: a linked list of struct fsl_dma_hw_addr structures 50 - * @request_count: value for DMA request count 51 - * @src_loop_size: setup and enable constant source-address DMA transfers 52 - * @dst_loop_size: setup and enable constant destination address DMA transfers 53 - * @external_start: enable externally started DMA transfers 54 - * @external_pause: enable externally paused DMA transfers 55 - * 56 - * Holds a list of address / length pairs for use with the DMAEngine 57 - * DMA_SLAVE API implementation for the Freescale DMA controller. 58 - */ 59 - struct fsl_dma_slave { 60 - 61 - /* List of hardware address/length pairs */ 62 - struct list_head addresses; 63 - 64 - /* Support for extra controller features */ 65 - unsigned int request_count; 66 - unsigned int src_loop_size; 67 - unsigned int dst_loop_size; 68 - bool external_start; 69 - bool external_pause; 70 - }; 71 - 72 - /** 73 - * fsl_dma_slave_append - add an address/length pair to a struct fsl_dma_slave 74 - * @slave: the &struct fsl_dma_slave to add to 75 - * @address: the hardware address to add 76 - * @length: the length of bytes to transfer from @address 77 - * 78 - * Add a hardware address/length pair to a struct fsl_dma_slave. Returns 0 on 79 - * success, -ERRNO otherwise. 80 - */ 81 - static inline int fsl_dma_slave_append(struct fsl_dma_slave *slave, 82 - dma_addr_t address, size_t length) 83 - { 84 - struct fsl_dma_hw_addr *addr; 85 - 86 - addr = kzalloc(sizeof(*addr), GFP_ATOMIC); 87 - if (!addr) 88 - return -ENOMEM; 89 - 90 - INIT_LIST_HEAD(&addr->entry); 91 - addr->address = address; 92 - addr->length = length; 93 - 94 - list_add_tail(&addr->entry, &slave->addresses); 95 - return 0; 96 - } 97 - 98 - /** 99 - * fsl_dma_slave_free - free a struct fsl_dma_slave 100 - * @slave: the struct fsl_dma_slave to free 101 - * 102 - * Free a struct fsl_dma_slave and all associated address/length pairs 103 - */ 104 - static inline void fsl_dma_slave_free(struct fsl_dma_slave *slave) 105 - { 106 - struct fsl_dma_hw_addr *addr, *tmp; 107 - 108 - if (slave) { 109 - list_for_each_entry_safe(addr, tmp, &slave->addresses, entry) { 110 - list_del(&addr->entry); 111 - kfree(addr); 112 - } 113 - 114 - kfree(slave); 115 - } 116 - } 117 - 118 - /** 119 - * fsl_dma_slave_alloc - allocate a struct fsl_dma_slave 120 - * @gfp: the flags to pass to kmalloc when allocating this structure 121 - * 122 - * Allocate a struct fsl_dma_slave for use by the DMA_SLAVE API. Returns a new 123 - * struct fsl_dma_slave on success, or NULL on failure. 124 - */ 125 - static inline struct fsl_dma_slave *fsl_dma_slave_alloc(gfp_t gfp) 126 - { 127 - struct fsl_dma_slave *slave; 128 - 129 - slave = kzalloc(sizeof(*slave), gfp); 130 - if (!slave) 131 - return NULL; 132 - 133 - INIT_LIST_HEAD(&slave->addresses); 134 - return slave; 135 - } 136 - 137 - #endif /* __ARCH_POWERPC_ASM_FSLDMA_H__ */
+24
drivers/dma/Kconfig
··· 49 49 config ASYNC_TX_DISABLE_CHANNEL_SWITCH 50 50 bool 51 51 52 + config AMBA_PL08X 53 + bool "ARM PrimeCell PL080 or PL081 support" 54 + depends on ARM_AMBA && EXPERIMENTAL 55 + select DMA_ENGINE 56 + help 57 + Platform has a PL08x DMAC device 58 + which can provide DMA engine support 59 + 52 60 config INTEL_IOATDMA 53 61 tristate "Intel I/OAT DMA support" 54 62 depends on PCI && X86 ··· 202 194 select DMA_ENGINE 203 195 help 204 196 Enable support for the Topcliff PCH DMA engine. 197 + 198 + config IMX_SDMA 199 + tristate "i.MX SDMA support" 200 + depends on ARCH_MX25 || ARCH_MX3 || ARCH_MX5 201 + select DMA_ENGINE 202 + help 203 + Support the i.MX SDMA engine. This engine is integrated into 204 + Freescale i.MX25/31/35/51 chips. 205 + 206 + config IMX_DMA 207 + tristate "i.MX DMA support" 208 + depends on ARCH_MX1 || ARCH_MX21 || MACH_MX27 209 + select DMA_ENGINE 210 + help 211 + Support the i.MX DMA engine. This engine is integrated into 212 + Freescale i.MX1/21/27 chips. 205 213 206 214 config DMA_ENGINE 207 215 bool
+3
drivers/dma/Makefile
··· 21 21 obj-$(CONFIG_SH_DMAE) += shdma.o 22 22 obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o 23 23 obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/ 24 + obj-$(CONFIG_IMX_SDMA) += imx-sdma.o 25 + obj-$(CONFIG_IMX_DMA) += imx-dma.o 24 26 obj-$(CONFIG_TIMB_DMA) += timb_dma.o 25 27 obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o 26 28 obj-$(CONFIG_PL330_DMA) += pl330.o 27 29 obj-$(CONFIG_PCH_DMA) += pch_dma.o 30 + obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o
+2167
drivers/dma/amba-pl08x.c
··· 1 + /* 2 + * Copyright (c) 2006 ARM Ltd. 3 + * Copyright (c) 2010 ST-Ericsson SA 4 + * 5 + * Author: Peter Pearse <peter.pearse@arm.com> 6 + * Author: Linus Walleij <linus.walleij@stericsson.com> 7 + * 8 + * This program is free software; you can redistribute it and/or modify it 9 + * under the terms of the GNU General Public License as published by the Free 10 + * Software Foundation; either version 2 of the License, or (at your option) 11 + * any later version. 12 + * 13 + * This program is distributed in the hope that it will be useful, but WITHOUT 14 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 15 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 16 + * more details. 17 + * 18 + * You should have received a copy of the GNU General Public License along with 19 + * this program; if not, write to the Free Software Foundation, Inc., 59 20 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. 21 + * 22 + * The full GNU General Public License is iin this distribution in the 23 + * file called COPYING. 24 + * 25 + * Documentation: ARM DDI 0196G == PL080 26 + * Documentation: ARM DDI 0218E == PL081 27 + * 28 + * PL080 & PL081 both have 16 sets of DMA signals that can be routed to 29 + * any channel. 30 + * 31 + * The PL080 has 8 channels available for simultaneous use, and the PL081 32 + * has only two channels. So on these DMA controllers the number of channels 33 + * and the number of incoming DMA signals are two totally different things. 34 + * It is usually not possible to theoretically handle all physical signals, 35 + * so a multiplexing scheme with possible denial of use is necessary. 36 + * 37 + * The PL080 has a dual bus master, PL081 has a single master. 38 + * 39 + * Memory to peripheral transfer may be visualized as 40 + * Get data from memory to DMAC 41 + * Until no data left 42 + * On burst request from peripheral 43 + * Destination burst from DMAC to peripheral 44 + * Clear burst request 45 + * Raise terminal count interrupt 46 + * 47 + * For peripherals with a FIFO: 48 + * Source burst size == half the depth of the peripheral FIFO 49 + * Destination burst size == the depth of the peripheral FIFO 50 + * 51 + * (Bursts are irrelevant for mem to mem transfers - there are no burst 52 + * signals, the DMA controller will simply facilitate its AHB master.) 53 + * 54 + * ASSUMES default (little) endianness for DMA transfers 55 + * 56 + * Only DMAC flow control is implemented 57 + * 58 + * Global TODO: 59 + * - Break out common code from arch/arm/mach-s3c64xx and share 60 + */ 61 + #include <linux/device.h> 62 + #include <linux/init.h> 63 + #include <linux/module.h> 64 + #include <linux/pci.h> 65 + #include <linux/interrupt.h> 66 + #include <linux/slab.h> 67 + #include <linux/dmapool.h> 68 + #include <linux/amba/bus.h> 69 + #include <linux/dmaengine.h> 70 + #include <linux/amba/pl08x.h> 71 + #include <linux/debugfs.h> 72 + #include <linux/seq_file.h> 73 + 74 + #include <asm/hardware/pl080.h> 75 + #include <asm/dma.h> 76 + #include <asm/mach/dma.h> 77 + #include <asm/atomic.h> 78 + #include <asm/processor.h> 79 + #include <asm/cacheflush.h> 80 + 81 + #define DRIVER_NAME "pl08xdmac" 82 + 83 + /** 84 + * struct vendor_data - vendor-specific config parameters 85 + * for PL08x derivates 86 + * @name: the name of this specific variant 87 + * @channels: the number of channels available in this variant 88 + * @dualmaster: whether this version supports dual AHB masters 89 + * or not. 90 + */ 91 + struct vendor_data { 92 + char *name; 93 + u8 channels; 94 + bool dualmaster; 95 + }; 96 + 97 + /* 98 + * PL08X private data structures 99 + * An LLI struct - see pl08x TRM 100 + * Note that next uses bit[0] as a bus bit, 101 + * start & end do not - their bus bit info 102 + * is in cctl 103 + */ 104 + struct lli { 105 + dma_addr_t src; 106 + dma_addr_t dst; 107 + dma_addr_t next; 108 + u32 cctl; 109 + }; 110 + 111 + /** 112 + * struct pl08x_driver_data - the local state holder for the PL08x 113 + * @slave: slave engine for this instance 114 + * @memcpy: memcpy engine for this instance 115 + * @base: virtual memory base (remapped) for the PL08x 116 + * @adev: the corresponding AMBA (PrimeCell) bus entry 117 + * @vd: vendor data for this PL08x variant 118 + * @pd: platform data passed in from the platform/machine 119 + * @phy_chans: array of data for the physical channels 120 + * @pool: a pool for the LLI descriptors 121 + * @pool_ctr: counter of LLIs in the pool 122 + * @lock: a spinlock for this struct 123 + */ 124 + struct pl08x_driver_data { 125 + struct dma_device slave; 126 + struct dma_device memcpy; 127 + void __iomem *base; 128 + struct amba_device *adev; 129 + struct vendor_data *vd; 130 + struct pl08x_platform_data *pd; 131 + struct pl08x_phy_chan *phy_chans; 132 + struct dma_pool *pool; 133 + int pool_ctr; 134 + spinlock_t lock; 135 + }; 136 + 137 + /* 138 + * PL08X specific defines 139 + */ 140 + 141 + /* 142 + * Memory boundaries: the manual for PL08x says that the controller 143 + * cannot read past a 1KiB boundary, so these defines are used to 144 + * create transfer LLIs that do not cross such boundaries. 145 + */ 146 + #define PL08X_BOUNDARY_SHIFT (10) /* 1KB 0x400 */ 147 + #define PL08X_BOUNDARY_SIZE (1 << PL08X_BOUNDARY_SHIFT) 148 + 149 + /* Minimum period between work queue runs */ 150 + #define PL08X_WQ_PERIODMIN 20 151 + 152 + /* Size (bytes) of each LLI buffer allocated for one transfer */ 153 + # define PL08X_LLI_TSFR_SIZE 0x2000 154 + 155 + /* Maximimum times we call dma_pool_alloc on this pool without freeing */ 156 + #define PL08X_MAX_ALLOCS 0x40 157 + #define MAX_NUM_TSFR_LLIS (PL08X_LLI_TSFR_SIZE/sizeof(struct lli)) 158 + #define PL08X_ALIGN 8 159 + 160 + static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan) 161 + { 162 + return container_of(chan, struct pl08x_dma_chan, chan); 163 + } 164 + 165 + /* 166 + * Physical channel handling 167 + */ 168 + 169 + /* Whether a certain channel is busy or not */ 170 + static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch) 171 + { 172 + unsigned int val; 173 + 174 + val = readl(ch->base + PL080_CH_CONFIG); 175 + return val & PL080_CONFIG_ACTIVE; 176 + } 177 + 178 + /* 179 + * Set the initial DMA register values i.e. those for the first LLI 180 + * The next lli pointer and the configuration interrupt bit have 181 + * been set when the LLIs were constructed 182 + */ 183 + static void pl08x_set_cregs(struct pl08x_driver_data *pl08x, 184 + struct pl08x_phy_chan *ch) 185 + { 186 + /* Wait for channel inactive */ 187 + while (pl08x_phy_channel_busy(ch)) 188 + ; 189 + 190 + dev_vdbg(&pl08x->adev->dev, 191 + "WRITE channel %d: csrc=%08x, cdst=%08x, " 192 + "cctl=%08x, clli=%08x, ccfg=%08x\n", 193 + ch->id, 194 + ch->csrc, 195 + ch->cdst, 196 + ch->cctl, 197 + ch->clli, 198 + ch->ccfg); 199 + 200 + writel(ch->csrc, ch->base + PL080_CH_SRC_ADDR); 201 + writel(ch->cdst, ch->base + PL080_CH_DST_ADDR); 202 + writel(ch->clli, ch->base + PL080_CH_LLI); 203 + writel(ch->cctl, ch->base + PL080_CH_CONTROL); 204 + writel(ch->ccfg, ch->base + PL080_CH_CONFIG); 205 + } 206 + 207 + static inline void pl08x_config_phychan_for_txd(struct pl08x_dma_chan *plchan) 208 + { 209 + struct pl08x_channel_data *cd = plchan->cd; 210 + struct pl08x_phy_chan *phychan = plchan->phychan; 211 + struct pl08x_txd *txd = plchan->at; 212 + 213 + /* Copy the basic control register calculated at transfer config */ 214 + phychan->csrc = txd->csrc; 215 + phychan->cdst = txd->cdst; 216 + phychan->clli = txd->clli; 217 + phychan->cctl = txd->cctl; 218 + 219 + /* Assign the signal to the proper control registers */ 220 + phychan->ccfg = cd->ccfg; 221 + phychan->ccfg &= ~PL080_CONFIG_SRC_SEL_MASK; 222 + phychan->ccfg &= ~PL080_CONFIG_DST_SEL_MASK; 223 + /* If it wasn't set from AMBA, ignore it */ 224 + if (txd->direction == DMA_TO_DEVICE) 225 + /* Select signal as destination */ 226 + phychan->ccfg |= 227 + (phychan->signal << PL080_CONFIG_DST_SEL_SHIFT); 228 + else if (txd->direction == DMA_FROM_DEVICE) 229 + /* Select signal as source */ 230 + phychan->ccfg |= 231 + (phychan->signal << PL080_CONFIG_SRC_SEL_SHIFT); 232 + /* Always enable error interrupts */ 233 + phychan->ccfg |= PL080_CONFIG_ERR_IRQ_MASK; 234 + /* Always enable terminal interrupts */ 235 + phychan->ccfg |= PL080_CONFIG_TC_IRQ_MASK; 236 + } 237 + 238 + /* 239 + * Enable the DMA channel 240 + * Assumes all other configuration bits have been set 241 + * as desired before this code is called 242 + */ 243 + static void pl08x_enable_phy_chan(struct pl08x_driver_data *pl08x, 244 + struct pl08x_phy_chan *ch) 245 + { 246 + u32 val; 247 + 248 + /* 249 + * Do not access config register until channel shows as disabled 250 + */ 251 + while (readl(pl08x->base + PL080_EN_CHAN) & (1 << ch->id)) 252 + ; 253 + 254 + /* 255 + * Do not access config register until channel shows as inactive 256 + */ 257 + val = readl(ch->base + PL080_CH_CONFIG); 258 + while ((val & PL080_CONFIG_ACTIVE) || (val & PL080_CONFIG_ENABLE)) 259 + val = readl(ch->base + PL080_CH_CONFIG); 260 + 261 + writel(val | PL080_CONFIG_ENABLE, ch->base + PL080_CH_CONFIG); 262 + } 263 + 264 + /* 265 + * Overall DMAC remains enabled always. 266 + * 267 + * Disabling individual channels could lose data. 268 + * 269 + * Disable the peripheral DMA after disabling the DMAC 270 + * in order to allow the DMAC FIFO to drain, and 271 + * hence allow the channel to show inactive 272 + * 273 + */ 274 + static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch) 275 + { 276 + u32 val; 277 + 278 + /* Set the HALT bit and wait for the FIFO to drain */ 279 + val = readl(ch->base + PL080_CH_CONFIG); 280 + val |= PL080_CONFIG_HALT; 281 + writel(val, ch->base + PL080_CH_CONFIG); 282 + 283 + /* Wait for channel inactive */ 284 + while (pl08x_phy_channel_busy(ch)) 285 + ; 286 + } 287 + 288 + static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch) 289 + { 290 + u32 val; 291 + 292 + /* Clear the HALT bit */ 293 + val = readl(ch->base + PL080_CH_CONFIG); 294 + val &= ~PL080_CONFIG_HALT; 295 + writel(val, ch->base + PL080_CH_CONFIG); 296 + } 297 + 298 + 299 + /* Stops the channel */ 300 + static void pl08x_stop_phy_chan(struct pl08x_phy_chan *ch) 301 + { 302 + u32 val; 303 + 304 + pl08x_pause_phy_chan(ch); 305 + 306 + /* Disable channel */ 307 + val = readl(ch->base + PL080_CH_CONFIG); 308 + val &= ~PL080_CONFIG_ENABLE; 309 + val &= ~PL080_CONFIG_ERR_IRQ_MASK; 310 + val &= ~PL080_CONFIG_TC_IRQ_MASK; 311 + writel(val, ch->base + PL080_CH_CONFIG); 312 + } 313 + 314 + static inline u32 get_bytes_in_cctl(u32 cctl) 315 + { 316 + /* The source width defines the number of bytes */ 317 + u32 bytes = cctl & PL080_CONTROL_TRANSFER_SIZE_MASK; 318 + 319 + switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) { 320 + case PL080_WIDTH_8BIT: 321 + break; 322 + case PL080_WIDTH_16BIT: 323 + bytes *= 2; 324 + break; 325 + case PL080_WIDTH_32BIT: 326 + bytes *= 4; 327 + break; 328 + } 329 + return bytes; 330 + } 331 + 332 + /* The channel should be paused when calling this */ 333 + static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan) 334 + { 335 + struct pl08x_phy_chan *ch; 336 + struct pl08x_txd *txdi = NULL; 337 + struct pl08x_txd *txd; 338 + unsigned long flags; 339 + u32 bytes = 0; 340 + 341 + spin_lock_irqsave(&plchan->lock, flags); 342 + 343 + ch = plchan->phychan; 344 + txd = plchan->at; 345 + 346 + /* 347 + * Next follow the LLIs to get the number of pending bytes in the 348 + * currently active transaction. 349 + */ 350 + if (ch && txd) { 351 + struct lli *llis_va = txd->llis_va; 352 + struct lli *llis_bus = (struct lli *) txd->llis_bus; 353 + u32 clli = readl(ch->base + PL080_CH_LLI); 354 + 355 + /* First get the bytes in the current active LLI */ 356 + bytes = get_bytes_in_cctl(readl(ch->base + PL080_CH_CONTROL)); 357 + 358 + if (clli) { 359 + int i = 0; 360 + 361 + /* Forward to the LLI pointed to by clli */ 362 + while ((clli != (u32) &(llis_bus[i])) && 363 + (i < MAX_NUM_TSFR_LLIS)) 364 + i++; 365 + 366 + while (clli) { 367 + bytes += get_bytes_in_cctl(llis_va[i].cctl); 368 + /* 369 + * A clli of 0x00000000 will terminate the 370 + * LLI list 371 + */ 372 + clli = llis_va[i].next; 373 + i++; 374 + } 375 + } 376 + } 377 + 378 + /* Sum up all queued transactions */ 379 + if (!list_empty(&plchan->desc_list)) { 380 + list_for_each_entry(txdi, &plchan->desc_list, node) { 381 + bytes += txdi->len; 382 + } 383 + 384 + } 385 + 386 + spin_unlock_irqrestore(&plchan->lock, flags); 387 + 388 + return bytes; 389 + } 390 + 391 + /* 392 + * Allocate a physical channel for a virtual channel 393 + */ 394 + static struct pl08x_phy_chan * 395 + pl08x_get_phy_channel(struct pl08x_driver_data *pl08x, 396 + struct pl08x_dma_chan *virt_chan) 397 + { 398 + struct pl08x_phy_chan *ch = NULL; 399 + unsigned long flags; 400 + int i; 401 + 402 + /* 403 + * Try to locate a physical channel to be used for 404 + * this transfer. If all are taken return NULL and 405 + * the requester will have to cope by using some fallback 406 + * PIO mode or retrying later. 407 + */ 408 + for (i = 0; i < pl08x->vd->channels; i++) { 409 + ch = &pl08x->phy_chans[i]; 410 + 411 + spin_lock_irqsave(&ch->lock, flags); 412 + 413 + if (!ch->serving) { 414 + ch->serving = virt_chan; 415 + ch->signal = -1; 416 + spin_unlock_irqrestore(&ch->lock, flags); 417 + break; 418 + } 419 + 420 + spin_unlock_irqrestore(&ch->lock, flags); 421 + } 422 + 423 + if (i == pl08x->vd->channels) { 424 + /* No physical channel available, cope with it */ 425 + return NULL; 426 + } 427 + 428 + return ch; 429 + } 430 + 431 + static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x, 432 + struct pl08x_phy_chan *ch) 433 + { 434 + unsigned long flags; 435 + 436 + /* Stop the channel and clear its interrupts */ 437 + pl08x_stop_phy_chan(ch); 438 + writel((1 << ch->id), pl08x->base + PL080_ERR_CLEAR); 439 + writel((1 << ch->id), pl08x->base + PL080_TC_CLEAR); 440 + 441 + /* Mark it as free */ 442 + spin_lock_irqsave(&ch->lock, flags); 443 + ch->serving = NULL; 444 + spin_unlock_irqrestore(&ch->lock, flags); 445 + } 446 + 447 + /* 448 + * LLI handling 449 + */ 450 + 451 + static inline unsigned int pl08x_get_bytes_for_cctl(unsigned int coded) 452 + { 453 + switch (coded) { 454 + case PL080_WIDTH_8BIT: 455 + return 1; 456 + case PL080_WIDTH_16BIT: 457 + return 2; 458 + case PL080_WIDTH_32BIT: 459 + return 4; 460 + default: 461 + break; 462 + } 463 + BUG(); 464 + return 0; 465 + } 466 + 467 + static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth, 468 + u32 tsize) 469 + { 470 + u32 retbits = cctl; 471 + 472 + /* Remove all src, dst and transfersize bits */ 473 + retbits &= ~PL080_CONTROL_DWIDTH_MASK; 474 + retbits &= ~PL080_CONTROL_SWIDTH_MASK; 475 + retbits &= ~PL080_CONTROL_TRANSFER_SIZE_MASK; 476 + 477 + /* Then set the bits according to the parameters */ 478 + switch (srcwidth) { 479 + case 1: 480 + retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT; 481 + break; 482 + case 2: 483 + retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT; 484 + break; 485 + case 4: 486 + retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT; 487 + break; 488 + default: 489 + BUG(); 490 + break; 491 + } 492 + 493 + switch (dstwidth) { 494 + case 1: 495 + retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT; 496 + break; 497 + case 2: 498 + retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT; 499 + break; 500 + case 4: 501 + retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT; 502 + break; 503 + default: 504 + BUG(); 505 + break; 506 + } 507 + 508 + retbits |= tsize << PL080_CONTROL_TRANSFER_SIZE_SHIFT; 509 + return retbits; 510 + } 511 + 512 + /* 513 + * Autoselect a master bus to use for the transfer 514 + * this prefers the destination bus if both available 515 + * if fixed address on one bus the other will be chosen 516 + */ 517 + void pl08x_choose_master_bus(struct pl08x_bus_data *src_bus, 518 + struct pl08x_bus_data *dst_bus, struct pl08x_bus_data **mbus, 519 + struct pl08x_bus_data **sbus, u32 cctl) 520 + { 521 + if (!(cctl & PL080_CONTROL_DST_INCR)) { 522 + *mbus = src_bus; 523 + *sbus = dst_bus; 524 + } else if (!(cctl & PL080_CONTROL_SRC_INCR)) { 525 + *mbus = dst_bus; 526 + *sbus = src_bus; 527 + } else { 528 + if (dst_bus->buswidth == 4) { 529 + *mbus = dst_bus; 530 + *sbus = src_bus; 531 + } else if (src_bus->buswidth == 4) { 532 + *mbus = src_bus; 533 + *sbus = dst_bus; 534 + } else if (dst_bus->buswidth == 2) { 535 + *mbus = dst_bus; 536 + *sbus = src_bus; 537 + } else if (src_bus->buswidth == 2) { 538 + *mbus = src_bus; 539 + *sbus = dst_bus; 540 + } else { 541 + /* src_bus->buswidth == 1 */ 542 + *mbus = dst_bus; 543 + *sbus = src_bus; 544 + } 545 + } 546 + } 547 + 548 + /* 549 + * Fills in one LLI for a certain transfer descriptor 550 + * and advance the counter 551 + */ 552 + int pl08x_fill_lli_for_desc(struct pl08x_driver_data *pl08x, 553 + struct pl08x_txd *txd, int num_llis, int len, 554 + u32 cctl, u32 *remainder) 555 + { 556 + struct lli *llis_va = txd->llis_va; 557 + struct lli *llis_bus = (struct lli *) txd->llis_bus; 558 + 559 + BUG_ON(num_llis >= MAX_NUM_TSFR_LLIS); 560 + 561 + llis_va[num_llis].cctl = cctl; 562 + llis_va[num_llis].src = txd->srcbus.addr; 563 + llis_va[num_llis].dst = txd->dstbus.addr; 564 + 565 + /* 566 + * On versions with dual masters, you can optionally AND on 567 + * PL080_LLI_LM_AHB2 to the LLI to tell the hardware to read 568 + * in new LLIs with that controller, but we always try to 569 + * choose AHB1 to point into memory. The idea is to have AHB2 570 + * fixed on the peripheral and AHB1 messing around in the 571 + * memory. So we don't manipulate this bit currently. 572 + */ 573 + 574 + llis_va[num_llis].next = 575 + (dma_addr_t)((u32) &(llis_bus[num_llis + 1])); 576 + 577 + if (cctl & PL080_CONTROL_SRC_INCR) 578 + txd->srcbus.addr += len; 579 + if (cctl & PL080_CONTROL_DST_INCR) 580 + txd->dstbus.addr += len; 581 + 582 + *remainder -= len; 583 + 584 + return num_llis + 1; 585 + } 586 + 587 + /* 588 + * Return number of bytes to fill to boundary, or len 589 + */ 590 + static inline u32 pl08x_pre_boundary(u32 addr, u32 len) 591 + { 592 + u32 boundary; 593 + 594 + boundary = ((addr >> PL08X_BOUNDARY_SHIFT) + 1) 595 + << PL08X_BOUNDARY_SHIFT; 596 + 597 + if (boundary < addr + len) 598 + return boundary - addr; 599 + else 600 + return len; 601 + } 602 + 603 + /* 604 + * This fills in the table of LLIs for the transfer descriptor 605 + * Note that we assume we never have to change the burst sizes 606 + * Return 0 for error 607 + */ 608 + static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, 609 + struct pl08x_txd *txd) 610 + { 611 + struct pl08x_channel_data *cd = txd->cd; 612 + struct pl08x_bus_data *mbus, *sbus; 613 + u32 remainder; 614 + int num_llis = 0; 615 + u32 cctl; 616 + int max_bytes_per_lli; 617 + int total_bytes = 0; 618 + struct lli *llis_va; 619 + struct lli *llis_bus; 620 + 621 + if (!txd) { 622 + dev_err(&pl08x->adev->dev, "%s no descriptor\n", __func__); 623 + return 0; 624 + } 625 + 626 + txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, 627 + &txd->llis_bus); 628 + if (!txd->llis_va) { 629 + dev_err(&pl08x->adev->dev, "%s no memory for llis\n", __func__); 630 + return 0; 631 + } 632 + 633 + pl08x->pool_ctr++; 634 + 635 + /* 636 + * Initialize bus values for this transfer 637 + * from the passed optimal values 638 + */ 639 + if (!cd) { 640 + dev_err(&pl08x->adev->dev, "%s no channel data\n", __func__); 641 + return 0; 642 + } 643 + 644 + /* Get the default CCTL from the platform data */ 645 + cctl = cd->cctl; 646 + 647 + /* 648 + * On the PL080 we have two bus masters and we 649 + * should select one for source and one for 650 + * destination. We try to use AHB2 for the 651 + * bus which does not increment (typically the 652 + * peripheral) else we just choose something. 653 + */ 654 + cctl &= ~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2); 655 + if (pl08x->vd->dualmaster) { 656 + if (cctl & PL080_CONTROL_SRC_INCR) 657 + /* Source increments, use AHB2 for destination */ 658 + cctl |= PL080_CONTROL_DST_AHB2; 659 + else if (cctl & PL080_CONTROL_DST_INCR) 660 + /* Destination increments, use AHB2 for source */ 661 + cctl |= PL080_CONTROL_SRC_AHB2; 662 + else 663 + /* Just pick something, source AHB1 dest AHB2 */ 664 + cctl |= PL080_CONTROL_DST_AHB2; 665 + } 666 + 667 + /* Find maximum width of the source bus */ 668 + txd->srcbus.maxwidth = 669 + pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_SWIDTH_MASK) >> 670 + PL080_CONTROL_SWIDTH_SHIFT); 671 + 672 + /* Find maximum width of the destination bus */ 673 + txd->dstbus.maxwidth = 674 + pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_DWIDTH_MASK) >> 675 + PL080_CONTROL_DWIDTH_SHIFT); 676 + 677 + /* Set up the bus widths to the maximum */ 678 + txd->srcbus.buswidth = txd->srcbus.maxwidth; 679 + txd->dstbus.buswidth = txd->dstbus.maxwidth; 680 + dev_vdbg(&pl08x->adev->dev, 681 + "%s source bus is %d bytes wide, dest bus is %d bytes wide\n", 682 + __func__, txd->srcbus.buswidth, txd->dstbus.buswidth); 683 + 684 + 685 + /* 686 + * Bytes transferred == tsize * MIN(buswidths), not max(buswidths) 687 + */ 688 + max_bytes_per_lli = min(txd->srcbus.buswidth, txd->dstbus.buswidth) * 689 + PL080_CONTROL_TRANSFER_SIZE_MASK; 690 + dev_vdbg(&pl08x->adev->dev, 691 + "%s max bytes per lli = %d\n", 692 + __func__, max_bytes_per_lli); 693 + 694 + /* We need to count this down to zero */ 695 + remainder = txd->len; 696 + dev_vdbg(&pl08x->adev->dev, 697 + "%s remainder = %d\n", 698 + __func__, remainder); 699 + 700 + /* 701 + * Choose bus to align to 702 + * - prefers destination bus if both available 703 + * - if fixed address on one bus chooses other 704 + * - modifies cctl to choose an apropriate master 705 + */ 706 + pl08x_choose_master_bus(&txd->srcbus, &txd->dstbus, 707 + &mbus, &sbus, cctl); 708 + 709 + 710 + /* 711 + * The lowest bit of the LLI register 712 + * is also used to indicate which master to 713 + * use for reading the LLIs. 714 + */ 715 + 716 + if (txd->len < mbus->buswidth) { 717 + /* 718 + * Less than a bus width available 719 + * - send as single bytes 720 + */ 721 + while (remainder) { 722 + dev_vdbg(&pl08x->adev->dev, 723 + "%s single byte LLIs for a transfer of " 724 + "less than a bus width (remain %08x)\n", 725 + __func__, remainder); 726 + cctl = pl08x_cctl_bits(cctl, 1, 1, 1); 727 + num_llis = 728 + pl08x_fill_lli_for_desc(pl08x, txd, num_llis, 1, 729 + cctl, &remainder); 730 + total_bytes++; 731 + } 732 + } else { 733 + /* 734 + * Make one byte LLIs until master bus is aligned 735 + * - slave will then be aligned also 736 + */ 737 + while ((mbus->addr) % (mbus->buswidth)) { 738 + dev_vdbg(&pl08x->adev->dev, 739 + "%s adjustment lli for less than bus width " 740 + "(remain %08x)\n", 741 + __func__, remainder); 742 + cctl = pl08x_cctl_bits(cctl, 1, 1, 1); 743 + num_llis = pl08x_fill_lli_for_desc 744 + (pl08x, txd, num_llis, 1, cctl, &remainder); 745 + total_bytes++; 746 + } 747 + 748 + /* 749 + * Master now aligned 750 + * - if slave is not then we must set its width down 751 + */ 752 + if (sbus->addr % sbus->buswidth) { 753 + dev_dbg(&pl08x->adev->dev, 754 + "%s set down bus width to one byte\n", 755 + __func__); 756 + 757 + sbus->buswidth = 1; 758 + } 759 + 760 + /* 761 + * Make largest possible LLIs until less than one bus 762 + * width left 763 + */ 764 + while (remainder > (mbus->buswidth - 1)) { 765 + int lli_len, target_len; 766 + int tsize; 767 + int odd_bytes; 768 + 769 + /* 770 + * If enough left try to send max possible, 771 + * otherwise try to send the remainder 772 + */ 773 + target_len = remainder; 774 + if (remainder > max_bytes_per_lli) 775 + target_len = max_bytes_per_lli; 776 + 777 + /* 778 + * Set bus lengths for incrementing busses 779 + * to number of bytes which fill to next memory 780 + * boundary 781 + */ 782 + if (cctl & PL080_CONTROL_SRC_INCR) 783 + txd->srcbus.fill_bytes = 784 + pl08x_pre_boundary( 785 + txd->srcbus.addr, 786 + remainder); 787 + else 788 + txd->srcbus.fill_bytes = 789 + max_bytes_per_lli; 790 + 791 + if (cctl & PL080_CONTROL_DST_INCR) 792 + txd->dstbus.fill_bytes = 793 + pl08x_pre_boundary( 794 + txd->dstbus.addr, 795 + remainder); 796 + else 797 + txd->dstbus.fill_bytes = 798 + max_bytes_per_lli; 799 + 800 + /* 801 + * Find the nearest 802 + */ 803 + lli_len = min(txd->srcbus.fill_bytes, 804 + txd->dstbus.fill_bytes); 805 + 806 + BUG_ON(lli_len > remainder); 807 + 808 + if (lli_len <= 0) { 809 + dev_err(&pl08x->adev->dev, 810 + "%s lli_len is %d, <= 0\n", 811 + __func__, lli_len); 812 + return 0; 813 + } 814 + 815 + if (lli_len == target_len) { 816 + /* 817 + * Can send what we wanted 818 + */ 819 + /* 820 + * Maintain alignment 821 + */ 822 + lli_len = (lli_len/mbus->buswidth) * 823 + mbus->buswidth; 824 + odd_bytes = 0; 825 + } else { 826 + /* 827 + * So now we know how many bytes to transfer 828 + * to get to the nearest boundary 829 + * The next lli will past the boundary 830 + * - however we may be working to a boundary 831 + * on the slave bus 832 + * We need to ensure the master stays aligned 833 + */ 834 + odd_bytes = lli_len % mbus->buswidth; 835 + /* 836 + * - and that we are working in multiples 837 + * of the bus widths 838 + */ 839 + lli_len -= odd_bytes; 840 + 841 + } 842 + 843 + if (lli_len) { 844 + /* 845 + * Check against minimum bus alignment: 846 + * Calculate actual transfer size in relation 847 + * to bus width an get a maximum remainder of 848 + * the smallest bus width - 1 849 + */ 850 + /* FIXME: use round_down()? */ 851 + tsize = lli_len / min(mbus->buswidth, 852 + sbus->buswidth); 853 + lli_len = tsize * min(mbus->buswidth, 854 + sbus->buswidth); 855 + 856 + if (target_len != lli_len) { 857 + dev_vdbg(&pl08x->adev->dev, 858 + "%s can't send what we want. Desired %08x, lli of %08x bytes in txd of %08x\n", 859 + __func__, target_len, lli_len, txd->len); 860 + } 861 + 862 + cctl = pl08x_cctl_bits(cctl, 863 + txd->srcbus.buswidth, 864 + txd->dstbus.buswidth, 865 + tsize); 866 + 867 + dev_vdbg(&pl08x->adev->dev, 868 + "%s fill lli with single lli chunk of size %08x (remainder %08x)\n", 869 + __func__, lli_len, remainder); 870 + num_llis = pl08x_fill_lli_for_desc(pl08x, txd, 871 + num_llis, lli_len, cctl, 872 + &remainder); 873 + total_bytes += lli_len; 874 + } 875 + 876 + 877 + if (odd_bytes) { 878 + /* 879 + * Creep past the boundary, 880 + * maintaining master alignment 881 + */ 882 + int j; 883 + for (j = 0; (j < mbus->buswidth) 884 + && (remainder); j++) { 885 + cctl = pl08x_cctl_bits(cctl, 1, 1, 1); 886 + dev_vdbg(&pl08x->adev->dev, 887 + "%s align with boundardy, single byte (remain %08x)\n", 888 + __func__, remainder); 889 + num_llis = 890 + pl08x_fill_lli_for_desc(pl08x, 891 + txd, num_llis, 1, 892 + cctl, &remainder); 893 + total_bytes++; 894 + } 895 + } 896 + } 897 + 898 + /* 899 + * Send any odd bytes 900 + */ 901 + if (remainder < 0) { 902 + dev_err(&pl08x->adev->dev, "%s remainder not fitted 0x%08x bytes\n", 903 + __func__, remainder); 904 + return 0; 905 + } 906 + 907 + while (remainder) { 908 + cctl = pl08x_cctl_bits(cctl, 1, 1, 1); 909 + dev_vdbg(&pl08x->adev->dev, 910 + "%s align with boundardy, single odd byte (remain %d)\n", 911 + __func__, remainder); 912 + num_llis = pl08x_fill_lli_for_desc(pl08x, txd, num_llis, 913 + 1, cctl, &remainder); 914 + total_bytes++; 915 + } 916 + } 917 + if (total_bytes != txd->len) { 918 + dev_err(&pl08x->adev->dev, 919 + "%s size of encoded lli:s don't match total txd, transferred 0x%08x from size 0x%08x\n", 920 + __func__, total_bytes, txd->len); 921 + return 0; 922 + } 923 + 924 + if (num_llis >= MAX_NUM_TSFR_LLIS) { 925 + dev_err(&pl08x->adev->dev, 926 + "%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n", 927 + __func__, (u32) MAX_NUM_TSFR_LLIS); 928 + return 0; 929 + } 930 + /* 931 + * Decide whether this is a loop or a terminated transfer 932 + */ 933 + llis_va = txd->llis_va; 934 + llis_bus = (struct lli *) txd->llis_bus; 935 + 936 + if (cd->circular_buffer) { 937 + /* 938 + * Loop the circular buffer so that the next element 939 + * points back to the beginning of the LLI. 940 + */ 941 + llis_va[num_llis - 1].next = 942 + (dma_addr_t)((unsigned int)&(llis_bus[0])); 943 + } else { 944 + /* 945 + * On non-circular buffers, the final LLI terminates 946 + * the LLI. 947 + */ 948 + llis_va[num_llis - 1].next = 0; 949 + /* 950 + * The final LLI element shall also fire an interrupt 951 + */ 952 + llis_va[num_llis - 1].cctl |= PL080_CONTROL_TC_IRQ_EN; 953 + } 954 + 955 + /* Now store the channel register values */ 956 + txd->csrc = llis_va[0].src; 957 + txd->cdst = llis_va[0].dst; 958 + if (num_llis > 1) 959 + txd->clli = llis_va[0].next; 960 + else 961 + txd->clli = 0; 962 + 963 + txd->cctl = llis_va[0].cctl; 964 + /* ccfg will be set at physical channel allocation time */ 965 + 966 + #ifdef VERBOSE_DEBUG 967 + { 968 + int i; 969 + 970 + for (i = 0; i < num_llis; i++) { 971 + dev_vdbg(&pl08x->adev->dev, 972 + "lli %d @%p: csrc=%08x, cdst=%08x, cctl=%08x, clli=%08x\n", 973 + i, 974 + &llis_va[i], 975 + llis_va[i].src, 976 + llis_va[i].dst, 977 + llis_va[i].cctl, 978 + llis_va[i].next 979 + ); 980 + } 981 + } 982 + #endif 983 + 984 + return num_llis; 985 + } 986 + 987 + /* You should call this with the struct pl08x lock held */ 988 + static void pl08x_free_txd(struct pl08x_driver_data *pl08x, 989 + struct pl08x_txd *txd) 990 + { 991 + if (!txd) 992 + dev_err(&pl08x->adev->dev, 993 + "%s no descriptor to free\n", 994 + __func__); 995 + 996 + /* Free the LLI */ 997 + dma_pool_free(pl08x->pool, txd->llis_va, 998 + txd->llis_bus); 999 + 1000 + pl08x->pool_ctr--; 1001 + 1002 + kfree(txd); 1003 + } 1004 + 1005 + static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x, 1006 + struct pl08x_dma_chan *plchan) 1007 + { 1008 + struct pl08x_txd *txdi = NULL; 1009 + struct pl08x_txd *next; 1010 + 1011 + if (!list_empty(&plchan->desc_list)) { 1012 + list_for_each_entry_safe(txdi, 1013 + next, &plchan->desc_list, node) { 1014 + list_del(&txdi->node); 1015 + pl08x_free_txd(pl08x, txdi); 1016 + } 1017 + 1018 + } 1019 + } 1020 + 1021 + /* 1022 + * The DMA ENGINE API 1023 + */ 1024 + static int pl08x_alloc_chan_resources(struct dma_chan *chan) 1025 + { 1026 + return 0; 1027 + } 1028 + 1029 + static void pl08x_free_chan_resources(struct dma_chan *chan) 1030 + { 1031 + } 1032 + 1033 + /* 1034 + * This should be called with the channel plchan->lock held 1035 + */ 1036 + static int prep_phy_channel(struct pl08x_dma_chan *plchan, 1037 + struct pl08x_txd *txd) 1038 + { 1039 + struct pl08x_driver_data *pl08x = plchan->host; 1040 + struct pl08x_phy_chan *ch; 1041 + int ret; 1042 + 1043 + /* Check if we already have a channel */ 1044 + if (plchan->phychan) 1045 + return 0; 1046 + 1047 + ch = pl08x_get_phy_channel(pl08x, plchan); 1048 + if (!ch) { 1049 + /* No physical channel available, cope with it */ 1050 + dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name); 1051 + return -EBUSY; 1052 + } 1053 + 1054 + /* 1055 + * OK we have a physical channel: for memcpy() this is all we 1056 + * need, but for slaves the physical signals may be muxed! 1057 + * Can the platform allow us to use this channel? 1058 + */ 1059 + if (plchan->slave && 1060 + ch->signal < 0 && 1061 + pl08x->pd->get_signal) { 1062 + ret = pl08x->pd->get_signal(plchan); 1063 + if (ret < 0) { 1064 + dev_dbg(&pl08x->adev->dev, 1065 + "unable to use physical channel %d for transfer on %s due to platform restrictions\n", 1066 + ch->id, plchan->name); 1067 + /* Release physical channel & return */ 1068 + pl08x_put_phy_channel(pl08x, ch); 1069 + return -EBUSY; 1070 + } 1071 + ch->signal = ret; 1072 + } 1073 + 1074 + dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n", 1075 + ch->id, 1076 + ch->signal, 1077 + plchan->name); 1078 + 1079 + plchan->phychan = ch; 1080 + 1081 + return 0; 1082 + } 1083 + 1084 + static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx) 1085 + { 1086 + struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan); 1087 + 1088 + atomic_inc(&plchan->last_issued); 1089 + tx->cookie = atomic_read(&plchan->last_issued); 1090 + /* This unlock follows the lock in the prep() function */ 1091 + spin_unlock_irqrestore(&plchan->lock, plchan->lockflags); 1092 + 1093 + return tx->cookie; 1094 + } 1095 + 1096 + static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt( 1097 + struct dma_chan *chan, unsigned long flags) 1098 + { 1099 + struct dma_async_tx_descriptor *retval = NULL; 1100 + 1101 + return retval; 1102 + } 1103 + 1104 + /* 1105 + * Code accessing dma_async_is_complete() in a tight loop 1106 + * may give problems - could schedule where indicated. 1107 + * If slaves are relying on interrupts to signal completion this 1108 + * function must not be called with interrupts disabled 1109 + */ 1110 + static enum dma_status 1111 + pl08x_dma_tx_status(struct dma_chan *chan, 1112 + dma_cookie_t cookie, 1113 + struct dma_tx_state *txstate) 1114 + { 1115 + struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1116 + dma_cookie_t last_used; 1117 + dma_cookie_t last_complete; 1118 + enum dma_status ret; 1119 + u32 bytesleft = 0; 1120 + 1121 + last_used = atomic_read(&plchan->last_issued); 1122 + last_complete = plchan->lc; 1123 + 1124 + ret = dma_async_is_complete(cookie, last_complete, last_used); 1125 + if (ret == DMA_SUCCESS) { 1126 + dma_set_tx_state(txstate, last_complete, last_used, 0); 1127 + return ret; 1128 + } 1129 + 1130 + /* 1131 + * schedule(); could be inserted here 1132 + */ 1133 + 1134 + /* 1135 + * This cookie not complete yet 1136 + */ 1137 + last_used = atomic_read(&plchan->last_issued); 1138 + last_complete = plchan->lc; 1139 + 1140 + /* Get number of bytes left in the active transactions and queue */ 1141 + bytesleft = pl08x_getbytes_chan(plchan); 1142 + 1143 + dma_set_tx_state(txstate, last_complete, last_used, 1144 + bytesleft); 1145 + 1146 + if (plchan->state == PL08X_CHAN_PAUSED) 1147 + return DMA_PAUSED; 1148 + 1149 + /* Whether waiting or running, we're in progress */ 1150 + return DMA_IN_PROGRESS; 1151 + } 1152 + 1153 + /* PrimeCell DMA extension */ 1154 + struct burst_table { 1155 + int burstwords; 1156 + u32 reg; 1157 + }; 1158 + 1159 + static const struct burst_table burst_sizes[] = { 1160 + { 1161 + .burstwords = 256, 1162 + .reg = (PL080_BSIZE_256 << PL080_CONTROL_SB_SIZE_SHIFT) | 1163 + (PL080_BSIZE_256 << PL080_CONTROL_DB_SIZE_SHIFT), 1164 + }, 1165 + { 1166 + .burstwords = 128, 1167 + .reg = (PL080_BSIZE_128 << PL080_CONTROL_SB_SIZE_SHIFT) | 1168 + (PL080_BSIZE_128 << PL080_CONTROL_DB_SIZE_SHIFT), 1169 + }, 1170 + { 1171 + .burstwords = 64, 1172 + .reg = (PL080_BSIZE_64 << PL080_CONTROL_SB_SIZE_SHIFT) | 1173 + (PL080_BSIZE_64 << PL080_CONTROL_DB_SIZE_SHIFT), 1174 + }, 1175 + { 1176 + .burstwords = 32, 1177 + .reg = (PL080_BSIZE_32 << PL080_CONTROL_SB_SIZE_SHIFT) | 1178 + (PL080_BSIZE_32 << PL080_CONTROL_DB_SIZE_SHIFT), 1179 + }, 1180 + { 1181 + .burstwords = 16, 1182 + .reg = (PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT) | 1183 + (PL080_BSIZE_16 << PL080_CONTROL_DB_SIZE_SHIFT), 1184 + }, 1185 + { 1186 + .burstwords = 8, 1187 + .reg = (PL080_BSIZE_8 << PL080_CONTROL_SB_SIZE_SHIFT) | 1188 + (PL080_BSIZE_8 << PL080_CONTROL_DB_SIZE_SHIFT), 1189 + }, 1190 + { 1191 + .burstwords = 4, 1192 + .reg = (PL080_BSIZE_4 << PL080_CONTROL_SB_SIZE_SHIFT) | 1193 + (PL080_BSIZE_4 << PL080_CONTROL_DB_SIZE_SHIFT), 1194 + }, 1195 + { 1196 + .burstwords = 1, 1197 + .reg = (PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT) | 1198 + (PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT), 1199 + }, 1200 + }; 1201 + 1202 + static void dma_set_runtime_config(struct dma_chan *chan, 1203 + struct dma_slave_config *config) 1204 + { 1205 + struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1206 + struct pl08x_driver_data *pl08x = plchan->host; 1207 + struct pl08x_channel_data *cd = plchan->cd; 1208 + enum dma_slave_buswidth addr_width; 1209 + u32 maxburst; 1210 + u32 cctl = 0; 1211 + /* Mask out all except src and dst channel */ 1212 + u32 ccfg = cd->ccfg & 0x000003DEU; 1213 + int i = 0; 1214 + 1215 + /* Transfer direction */ 1216 + plchan->runtime_direction = config->direction; 1217 + if (config->direction == DMA_TO_DEVICE) { 1218 + plchan->runtime_addr = config->dst_addr; 1219 + cctl |= PL080_CONTROL_SRC_INCR; 1220 + ccfg |= PL080_FLOW_MEM2PER << PL080_CONFIG_FLOW_CONTROL_SHIFT; 1221 + addr_width = config->dst_addr_width; 1222 + maxburst = config->dst_maxburst; 1223 + } else if (config->direction == DMA_FROM_DEVICE) { 1224 + plchan->runtime_addr = config->src_addr; 1225 + cctl |= PL080_CONTROL_DST_INCR; 1226 + ccfg |= PL080_FLOW_PER2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT; 1227 + addr_width = config->src_addr_width; 1228 + maxburst = config->src_maxburst; 1229 + } else { 1230 + dev_err(&pl08x->adev->dev, 1231 + "bad runtime_config: alien transfer direction\n"); 1232 + return; 1233 + } 1234 + 1235 + switch (addr_width) { 1236 + case DMA_SLAVE_BUSWIDTH_1_BYTE: 1237 + cctl |= (PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT) | 1238 + (PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT); 1239 + break; 1240 + case DMA_SLAVE_BUSWIDTH_2_BYTES: 1241 + cctl |= (PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT) | 1242 + (PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT); 1243 + break; 1244 + case DMA_SLAVE_BUSWIDTH_4_BYTES: 1245 + cctl |= (PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT) | 1246 + (PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT); 1247 + break; 1248 + default: 1249 + dev_err(&pl08x->adev->dev, 1250 + "bad runtime_config: alien address width\n"); 1251 + return; 1252 + } 1253 + 1254 + /* 1255 + * Now decide on a maxburst: 1256 + * If this channel will only request single transfers, set 1257 + * this down to ONE element. 1258 + */ 1259 + if (plchan->cd->single) { 1260 + cctl |= (PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT) | 1261 + (PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT); 1262 + } else { 1263 + while (i < ARRAY_SIZE(burst_sizes)) { 1264 + if (burst_sizes[i].burstwords <= maxburst) 1265 + break; 1266 + i++; 1267 + } 1268 + cctl |= burst_sizes[i].reg; 1269 + } 1270 + 1271 + /* Access the cell in privileged mode, non-bufferable, non-cacheable */ 1272 + cctl &= ~PL080_CONTROL_PROT_MASK; 1273 + cctl |= PL080_CONTROL_PROT_SYS; 1274 + 1275 + /* Modify the default channel data to fit PrimeCell request */ 1276 + cd->cctl = cctl; 1277 + cd->ccfg = ccfg; 1278 + 1279 + dev_dbg(&pl08x->adev->dev, 1280 + "configured channel %s (%s) for %s, data width %d, " 1281 + "maxburst %d words, LE, CCTL=%08x, CCFG=%08x\n", 1282 + dma_chan_name(chan), plchan->name, 1283 + (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX", 1284 + addr_width, 1285 + maxburst, 1286 + cctl, ccfg); 1287 + } 1288 + 1289 + /* 1290 + * Slave transactions callback to the slave device to allow 1291 + * synchronization of slave DMA signals with the DMAC enable 1292 + */ 1293 + static void pl08x_issue_pending(struct dma_chan *chan) 1294 + { 1295 + struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1296 + struct pl08x_driver_data *pl08x = plchan->host; 1297 + unsigned long flags; 1298 + 1299 + spin_lock_irqsave(&plchan->lock, flags); 1300 + /* Something is already active */ 1301 + if (plchan->at) { 1302 + spin_unlock_irqrestore(&plchan->lock, flags); 1303 + return; 1304 + } 1305 + 1306 + /* Didn't get a physical channel so waiting for it ... */ 1307 + if (plchan->state == PL08X_CHAN_WAITING) 1308 + return; 1309 + 1310 + /* Take the first element in the queue and execute it */ 1311 + if (!list_empty(&plchan->desc_list)) { 1312 + struct pl08x_txd *next; 1313 + 1314 + next = list_first_entry(&plchan->desc_list, 1315 + struct pl08x_txd, 1316 + node); 1317 + list_del(&next->node); 1318 + plchan->at = next; 1319 + plchan->state = PL08X_CHAN_RUNNING; 1320 + 1321 + /* Configure the physical channel for the active txd */ 1322 + pl08x_config_phychan_for_txd(plchan); 1323 + pl08x_set_cregs(pl08x, plchan->phychan); 1324 + pl08x_enable_phy_chan(pl08x, plchan->phychan); 1325 + } 1326 + 1327 + spin_unlock_irqrestore(&plchan->lock, flags); 1328 + } 1329 + 1330 + static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan, 1331 + struct pl08x_txd *txd) 1332 + { 1333 + int num_llis; 1334 + struct pl08x_driver_data *pl08x = plchan->host; 1335 + int ret; 1336 + 1337 + num_llis = pl08x_fill_llis_for_desc(pl08x, txd); 1338 + 1339 + if (!num_llis) 1340 + return -EINVAL; 1341 + 1342 + spin_lock_irqsave(&plchan->lock, plchan->lockflags); 1343 + 1344 + /* 1345 + * If this device is not using a circular buffer then 1346 + * queue this new descriptor for transfer. 1347 + * The descriptor for a circular buffer continues 1348 + * to be used until the channel is freed. 1349 + */ 1350 + if (txd->cd->circular_buffer) 1351 + dev_err(&pl08x->adev->dev, 1352 + "%s attempting to queue a circular buffer\n", 1353 + __func__); 1354 + else 1355 + list_add_tail(&txd->node, 1356 + &plchan->desc_list); 1357 + 1358 + /* 1359 + * See if we already have a physical channel allocated, 1360 + * else this is the time to try to get one. 1361 + */ 1362 + ret = prep_phy_channel(plchan, txd); 1363 + if (ret) { 1364 + /* 1365 + * No physical channel available, we will 1366 + * stack up the memcpy channels until there is a channel 1367 + * available to handle it whereas slave transfers may 1368 + * have been denied due to platform channel muxing restrictions 1369 + * and since there is no guarantee that this will ever be 1370 + * resolved, and since the signal must be aquired AFTER 1371 + * aquiring the physical channel, we will let them be NACK:ed 1372 + * with -EBUSY here. The drivers can alway retry the prep() 1373 + * call if they are eager on doing this using DMA. 1374 + */ 1375 + if (plchan->slave) { 1376 + pl08x_free_txd_list(pl08x, plchan); 1377 + spin_unlock_irqrestore(&plchan->lock, plchan->lockflags); 1378 + return -EBUSY; 1379 + } 1380 + /* Do this memcpy whenever there is a channel ready */ 1381 + plchan->state = PL08X_CHAN_WAITING; 1382 + plchan->waiting = txd; 1383 + } else 1384 + /* 1385 + * Else we're all set, paused and ready to roll, 1386 + * status will switch to PL08X_CHAN_RUNNING when 1387 + * we call issue_pending(). If there is something 1388 + * running on the channel already we don't change 1389 + * its state. 1390 + */ 1391 + if (plchan->state == PL08X_CHAN_IDLE) 1392 + plchan->state = PL08X_CHAN_PAUSED; 1393 + 1394 + /* 1395 + * Notice that we leave plchan->lock locked on purpose: 1396 + * it will be unlocked in the subsequent tx_submit() 1397 + * call. This is a consequence of the current API. 1398 + */ 1399 + 1400 + return 0; 1401 + } 1402 + 1403 + /* 1404 + * Initialize a descriptor to be used by memcpy submit 1405 + */ 1406 + static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy( 1407 + struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 1408 + size_t len, unsigned long flags) 1409 + { 1410 + struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1411 + struct pl08x_driver_data *pl08x = plchan->host; 1412 + struct pl08x_txd *txd; 1413 + int ret; 1414 + 1415 + txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT); 1416 + if (!txd) { 1417 + dev_err(&pl08x->adev->dev, 1418 + "%s no memory for descriptor\n", __func__); 1419 + return NULL; 1420 + } 1421 + 1422 + dma_async_tx_descriptor_init(&txd->tx, chan); 1423 + txd->direction = DMA_NONE; 1424 + txd->srcbus.addr = src; 1425 + txd->dstbus.addr = dest; 1426 + 1427 + /* Set platform data for m2m */ 1428 + txd->cd = &pl08x->pd->memcpy_channel; 1429 + /* Both to be incremented or the code will break */ 1430 + txd->cd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR; 1431 + txd->tx.tx_submit = pl08x_tx_submit; 1432 + txd->tx.callback = NULL; 1433 + txd->tx.callback_param = NULL; 1434 + txd->len = len; 1435 + 1436 + INIT_LIST_HEAD(&txd->node); 1437 + ret = pl08x_prep_channel_resources(plchan, txd); 1438 + if (ret) 1439 + return NULL; 1440 + /* 1441 + * NB: the channel lock is held at this point so tx_submit() 1442 + * must be called in direct succession. 1443 + */ 1444 + 1445 + return &txd->tx; 1446 + } 1447 + 1448 + struct dma_async_tx_descriptor *pl08x_prep_slave_sg( 1449 + struct dma_chan *chan, struct scatterlist *sgl, 1450 + unsigned int sg_len, enum dma_data_direction direction, 1451 + unsigned long flags) 1452 + { 1453 + struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1454 + struct pl08x_driver_data *pl08x = plchan->host; 1455 + struct pl08x_txd *txd; 1456 + int ret; 1457 + 1458 + /* 1459 + * Current implementation ASSUMES only one sg 1460 + */ 1461 + if (sg_len != 1) { 1462 + dev_err(&pl08x->adev->dev, "%s prepared too long sglist\n", 1463 + __func__); 1464 + BUG(); 1465 + } 1466 + 1467 + dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n", 1468 + __func__, sgl->length, plchan->name); 1469 + 1470 + txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT); 1471 + if (!txd) { 1472 + dev_err(&pl08x->adev->dev, "%s no txd\n", __func__); 1473 + return NULL; 1474 + } 1475 + 1476 + dma_async_tx_descriptor_init(&txd->tx, chan); 1477 + 1478 + if (direction != plchan->runtime_direction) 1479 + dev_err(&pl08x->adev->dev, "%s DMA setup does not match " 1480 + "the direction configured for the PrimeCell\n", 1481 + __func__); 1482 + 1483 + /* 1484 + * Set up addresses, the PrimeCell configured address 1485 + * will take precedence since this may configure the 1486 + * channel target address dynamically at runtime. 1487 + */ 1488 + txd->direction = direction; 1489 + if (direction == DMA_TO_DEVICE) { 1490 + txd->srcbus.addr = sgl->dma_address; 1491 + if (plchan->runtime_addr) 1492 + txd->dstbus.addr = plchan->runtime_addr; 1493 + else 1494 + txd->dstbus.addr = plchan->cd->addr; 1495 + } else if (direction == DMA_FROM_DEVICE) { 1496 + if (plchan->runtime_addr) 1497 + txd->srcbus.addr = plchan->runtime_addr; 1498 + else 1499 + txd->srcbus.addr = plchan->cd->addr; 1500 + txd->dstbus.addr = sgl->dma_address; 1501 + } else { 1502 + dev_err(&pl08x->adev->dev, 1503 + "%s direction unsupported\n", __func__); 1504 + return NULL; 1505 + } 1506 + txd->cd = plchan->cd; 1507 + txd->tx.tx_submit = pl08x_tx_submit; 1508 + txd->tx.callback = NULL; 1509 + txd->tx.callback_param = NULL; 1510 + txd->len = sgl->length; 1511 + INIT_LIST_HEAD(&txd->node); 1512 + 1513 + ret = pl08x_prep_channel_resources(plchan, txd); 1514 + if (ret) 1515 + return NULL; 1516 + /* 1517 + * NB: the channel lock is held at this point so tx_submit() 1518 + * must be called in direct succession. 1519 + */ 1520 + 1521 + return &txd->tx; 1522 + } 1523 + 1524 + static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 1525 + unsigned long arg) 1526 + { 1527 + struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1528 + struct pl08x_driver_data *pl08x = plchan->host; 1529 + unsigned long flags; 1530 + int ret = 0; 1531 + 1532 + /* Controls applicable to inactive channels */ 1533 + if (cmd == DMA_SLAVE_CONFIG) { 1534 + dma_set_runtime_config(chan, 1535 + (struct dma_slave_config *) 1536 + arg); 1537 + return 0; 1538 + } 1539 + 1540 + /* 1541 + * Anything succeeds on channels with no physical allocation and 1542 + * no queued transfers. 1543 + */ 1544 + spin_lock_irqsave(&plchan->lock, flags); 1545 + if (!plchan->phychan && !plchan->at) { 1546 + spin_unlock_irqrestore(&plchan->lock, flags); 1547 + return 0; 1548 + } 1549 + 1550 + switch (cmd) { 1551 + case DMA_TERMINATE_ALL: 1552 + plchan->state = PL08X_CHAN_IDLE; 1553 + 1554 + if (plchan->phychan) { 1555 + pl08x_stop_phy_chan(plchan->phychan); 1556 + 1557 + /* 1558 + * Mark physical channel as free and free any slave 1559 + * signal 1560 + */ 1561 + if ((plchan->phychan->signal >= 0) && 1562 + pl08x->pd->put_signal) { 1563 + pl08x->pd->put_signal(plchan); 1564 + plchan->phychan->signal = -1; 1565 + } 1566 + pl08x_put_phy_channel(pl08x, plchan->phychan); 1567 + plchan->phychan = NULL; 1568 + } 1569 + /* Stop any pending tasklet */ 1570 + tasklet_disable(&plchan->tasklet); 1571 + /* Dequeue jobs and free LLIs */ 1572 + if (plchan->at) { 1573 + pl08x_free_txd(pl08x, plchan->at); 1574 + plchan->at = NULL; 1575 + } 1576 + /* Dequeue jobs not yet fired as well */ 1577 + pl08x_free_txd_list(pl08x, plchan); 1578 + break; 1579 + case DMA_PAUSE: 1580 + pl08x_pause_phy_chan(plchan->phychan); 1581 + plchan->state = PL08X_CHAN_PAUSED; 1582 + break; 1583 + case DMA_RESUME: 1584 + pl08x_resume_phy_chan(plchan->phychan); 1585 + plchan->state = PL08X_CHAN_RUNNING; 1586 + break; 1587 + default: 1588 + /* Unknown command */ 1589 + ret = -ENXIO; 1590 + break; 1591 + } 1592 + 1593 + spin_unlock_irqrestore(&plchan->lock, flags); 1594 + 1595 + return ret; 1596 + } 1597 + 1598 + bool pl08x_filter_id(struct dma_chan *chan, void *chan_id) 1599 + { 1600 + struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1601 + char *name = chan_id; 1602 + 1603 + /* Check that the channel is not taken! */ 1604 + if (!strcmp(plchan->name, name)) 1605 + return true; 1606 + 1607 + return false; 1608 + } 1609 + 1610 + /* 1611 + * Just check that the device is there and active 1612 + * TODO: turn this bit on/off depending on the number of 1613 + * physical channels actually used, if it is zero... well 1614 + * shut it off. That will save some power. Cut the clock 1615 + * at the same time. 1616 + */ 1617 + static void pl08x_ensure_on(struct pl08x_driver_data *pl08x) 1618 + { 1619 + u32 val; 1620 + 1621 + val = readl(pl08x->base + PL080_CONFIG); 1622 + val &= ~(PL080_CONFIG_M2_BE | PL080_CONFIG_M1_BE | PL080_CONFIG_ENABLE); 1623 + /* We implictly clear bit 1 and that means little-endian mode */ 1624 + val |= PL080_CONFIG_ENABLE; 1625 + writel(val, pl08x->base + PL080_CONFIG); 1626 + } 1627 + 1628 + static void pl08x_tasklet(unsigned long data) 1629 + { 1630 + struct pl08x_dma_chan *plchan = (struct pl08x_dma_chan *) data; 1631 + struct pl08x_phy_chan *phychan = plchan->phychan; 1632 + struct pl08x_driver_data *pl08x = plchan->host; 1633 + 1634 + if (!plchan) 1635 + BUG(); 1636 + 1637 + spin_lock(&plchan->lock); 1638 + 1639 + if (plchan->at) { 1640 + dma_async_tx_callback callback = 1641 + plchan->at->tx.callback; 1642 + void *callback_param = 1643 + plchan->at->tx.callback_param; 1644 + 1645 + /* 1646 + * Update last completed 1647 + */ 1648 + plchan->lc = 1649 + (plchan->at->tx.cookie); 1650 + 1651 + /* 1652 + * Callback to signal completion 1653 + */ 1654 + if (callback) 1655 + callback(callback_param); 1656 + 1657 + /* 1658 + * Device callbacks should NOT clear 1659 + * the current transaction on the channel 1660 + * Linus: sometimes they should? 1661 + */ 1662 + if (!plchan->at) 1663 + BUG(); 1664 + 1665 + /* 1666 + * Free the descriptor if it's not for a device 1667 + * using a circular buffer 1668 + */ 1669 + if (!plchan->at->cd->circular_buffer) { 1670 + pl08x_free_txd(pl08x, plchan->at); 1671 + plchan->at = NULL; 1672 + } 1673 + /* 1674 + * else descriptor for circular 1675 + * buffers only freed when 1676 + * client has disabled dma 1677 + */ 1678 + } 1679 + /* 1680 + * If a new descriptor is queued, set it up 1681 + * plchan->at is NULL here 1682 + */ 1683 + if (!list_empty(&plchan->desc_list)) { 1684 + struct pl08x_txd *next; 1685 + 1686 + next = list_first_entry(&plchan->desc_list, 1687 + struct pl08x_txd, 1688 + node); 1689 + list_del(&next->node); 1690 + plchan->at = next; 1691 + /* Configure the physical channel for the next txd */ 1692 + pl08x_config_phychan_for_txd(plchan); 1693 + pl08x_set_cregs(pl08x, plchan->phychan); 1694 + pl08x_enable_phy_chan(pl08x, plchan->phychan); 1695 + } else { 1696 + struct pl08x_dma_chan *waiting = NULL; 1697 + 1698 + /* 1699 + * No more jobs, so free up the physical channel 1700 + * Free any allocated signal on slave transfers too 1701 + */ 1702 + if ((phychan->signal >= 0) && pl08x->pd->put_signal) { 1703 + pl08x->pd->put_signal(plchan); 1704 + phychan->signal = -1; 1705 + } 1706 + pl08x_put_phy_channel(pl08x, phychan); 1707 + plchan->phychan = NULL; 1708 + plchan->state = PL08X_CHAN_IDLE; 1709 + 1710 + /* 1711 + * And NOW before anyone else can grab that free:d 1712 + * up physical channel, see if there is some memcpy 1713 + * pending that seriously needs to start because of 1714 + * being stacked up while we were choking the 1715 + * physical channels with data. 1716 + */ 1717 + list_for_each_entry(waiting, &pl08x->memcpy.channels, 1718 + chan.device_node) { 1719 + if (waiting->state == PL08X_CHAN_WAITING && 1720 + waiting->waiting != NULL) { 1721 + int ret; 1722 + 1723 + /* This should REALLY not fail now */ 1724 + ret = prep_phy_channel(waiting, 1725 + waiting->waiting); 1726 + BUG_ON(ret); 1727 + waiting->state = PL08X_CHAN_RUNNING; 1728 + waiting->waiting = NULL; 1729 + pl08x_issue_pending(&waiting->chan); 1730 + break; 1731 + } 1732 + } 1733 + } 1734 + 1735 + spin_unlock(&plchan->lock); 1736 + } 1737 + 1738 + static irqreturn_t pl08x_irq(int irq, void *dev) 1739 + { 1740 + struct pl08x_driver_data *pl08x = dev; 1741 + u32 mask = 0; 1742 + u32 val; 1743 + int i; 1744 + 1745 + val = readl(pl08x->base + PL080_ERR_STATUS); 1746 + if (val) { 1747 + /* 1748 + * An error interrupt (on one or more channels) 1749 + */ 1750 + dev_err(&pl08x->adev->dev, 1751 + "%s error interrupt, register value 0x%08x\n", 1752 + __func__, val); 1753 + /* 1754 + * Simply clear ALL PL08X error interrupts, 1755 + * regardless of channel and cause 1756 + * FIXME: should be 0x00000003 on PL081 really. 1757 + */ 1758 + writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR); 1759 + } 1760 + val = readl(pl08x->base + PL080_INT_STATUS); 1761 + for (i = 0; i < pl08x->vd->channels; i++) { 1762 + if ((1 << i) & val) { 1763 + /* Locate physical channel */ 1764 + struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i]; 1765 + struct pl08x_dma_chan *plchan = phychan->serving; 1766 + 1767 + /* Schedule tasklet on this channel */ 1768 + tasklet_schedule(&plchan->tasklet); 1769 + 1770 + mask |= (1 << i); 1771 + } 1772 + } 1773 + /* 1774 + * Clear only the terminal interrupts on channels we processed 1775 + */ 1776 + writel(mask, pl08x->base + PL080_TC_CLEAR); 1777 + 1778 + return mask ? IRQ_HANDLED : IRQ_NONE; 1779 + } 1780 + 1781 + /* 1782 + * Initialise the DMAC memcpy/slave channels. 1783 + * Make a local wrapper to hold required data 1784 + */ 1785 + static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, 1786 + struct dma_device *dmadev, 1787 + unsigned int channels, 1788 + bool slave) 1789 + { 1790 + struct pl08x_dma_chan *chan; 1791 + int i; 1792 + 1793 + INIT_LIST_HEAD(&dmadev->channels); 1794 + /* 1795 + * Register as many many memcpy as we have physical channels, 1796 + * we won't always be able to use all but the code will have 1797 + * to cope with that situation. 1798 + */ 1799 + for (i = 0; i < channels; i++) { 1800 + chan = kzalloc(sizeof(struct pl08x_dma_chan), GFP_KERNEL); 1801 + if (!chan) { 1802 + dev_err(&pl08x->adev->dev, 1803 + "%s no memory for channel\n", __func__); 1804 + return -ENOMEM; 1805 + } 1806 + 1807 + chan->host = pl08x; 1808 + chan->state = PL08X_CHAN_IDLE; 1809 + 1810 + if (slave) { 1811 + chan->slave = true; 1812 + chan->name = pl08x->pd->slave_channels[i].bus_id; 1813 + chan->cd = &pl08x->pd->slave_channels[i]; 1814 + } else { 1815 + chan->cd = &pl08x->pd->memcpy_channel; 1816 + chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i); 1817 + if (!chan->name) { 1818 + kfree(chan); 1819 + return -ENOMEM; 1820 + } 1821 + } 1822 + dev_info(&pl08x->adev->dev, 1823 + "initialize virtual channel \"%s\"\n", 1824 + chan->name); 1825 + 1826 + chan->chan.device = dmadev; 1827 + atomic_set(&chan->last_issued, 0); 1828 + chan->lc = atomic_read(&chan->last_issued); 1829 + 1830 + spin_lock_init(&chan->lock); 1831 + INIT_LIST_HEAD(&chan->desc_list); 1832 + tasklet_init(&chan->tasklet, pl08x_tasklet, 1833 + (unsigned long) chan); 1834 + 1835 + list_add_tail(&chan->chan.device_node, &dmadev->channels); 1836 + } 1837 + dev_info(&pl08x->adev->dev, "initialized %d virtual %s channels\n", 1838 + i, slave ? "slave" : "memcpy"); 1839 + return i; 1840 + } 1841 + 1842 + static void pl08x_free_virtual_channels(struct dma_device *dmadev) 1843 + { 1844 + struct pl08x_dma_chan *chan = NULL; 1845 + struct pl08x_dma_chan *next; 1846 + 1847 + list_for_each_entry_safe(chan, 1848 + next, &dmadev->channels, chan.device_node) { 1849 + list_del(&chan->chan.device_node); 1850 + kfree(chan); 1851 + } 1852 + } 1853 + 1854 + #ifdef CONFIG_DEBUG_FS 1855 + static const char *pl08x_state_str(enum pl08x_dma_chan_state state) 1856 + { 1857 + switch (state) { 1858 + case PL08X_CHAN_IDLE: 1859 + return "idle"; 1860 + case PL08X_CHAN_RUNNING: 1861 + return "running"; 1862 + case PL08X_CHAN_PAUSED: 1863 + return "paused"; 1864 + case PL08X_CHAN_WAITING: 1865 + return "waiting"; 1866 + default: 1867 + break; 1868 + } 1869 + return "UNKNOWN STATE"; 1870 + } 1871 + 1872 + static int pl08x_debugfs_show(struct seq_file *s, void *data) 1873 + { 1874 + struct pl08x_driver_data *pl08x = s->private; 1875 + struct pl08x_dma_chan *chan; 1876 + struct pl08x_phy_chan *ch; 1877 + unsigned long flags; 1878 + int i; 1879 + 1880 + seq_printf(s, "PL08x physical channels:\n"); 1881 + seq_printf(s, "CHANNEL:\tUSER:\n"); 1882 + seq_printf(s, "--------\t-----\n"); 1883 + for (i = 0; i < pl08x->vd->channels; i++) { 1884 + struct pl08x_dma_chan *virt_chan; 1885 + 1886 + ch = &pl08x->phy_chans[i]; 1887 + 1888 + spin_lock_irqsave(&ch->lock, flags); 1889 + virt_chan = ch->serving; 1890 + 1891 + seq_printf(s, "%d\t\t%s\n", 1892 + ch->id, virt_chan ? virt_chan->name : "(none)"); 1893 + 1894 + spin_unlock_irqrestore(&ch->lock, flags); 1895 + } 1896 + 1897 + seq_printf(s, "\nPL08x virtual memcpy channels:\n"); 1898 + seq_printf(s, "CHANNEL:\tSTATE:\n"); 1899 + seq_printf(s, "--------\t------\n"); 1900 + list_for_each_entry(chan, &pl08x->memcpy.channels, chan.device_node) { 1901 + seq_printf(s, "%s\t\t\%s\n", chan->name, 1902 + pl08x_state_str(chan->state)); 1903 + } 1904 + 1905 + seq_printf(s, "\nPL08x virtual slave channels:\n"); 1906 + seq_printf(s, "CHANNEL:\tSTATE:\n"); 1907 + seq_printf(s, "--------\t------\n"); 1908 + list_for_each_entry(chan, &pl08x->slave.channels, chan.device_node) { 1909 + seq_printf(s, "%s\t\t\%s\n", chan->name, 1910 + pl08x_state_str(chan->state)); 1911 + } 1912 + 1913 + return 0; 1914 + } 1915 + 1916 + static int pl08x_debugfs_open(struct inode *inode, struct file *file) 1917 + { 1918 + return single_open(file, pl08x_debugfs_show, inode->i_private); 1919 + } 1920 + 1921 + static const struct file_operations pl08x_debugfs_operations = { 1922 + .open = pl08x_debugfs_open, 1923 + .read = seq_read, 1924 + .llseek = seq_lseek, 1925 + .release = single_release, 1926 + }; 1927 + 1928 + static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x) 1929 + { 1930 + /* Expose a simple debugfs interface to view all clocks */ 1931 + (void) debugfs_create_file(dev_name(&pl08x->adev->dev), S_IFREG | S_IRUGO, 1932 + NULL, pl08x, 1933 + &pl08x_debugfs_operations); 1934 + } 1935 + 1936 + #else 1937 + static inline void init_pl08x_debugfs(struct pl08x_driver_data *pl08x) 1938 + { 1939 + } 1940 + #endif 1941 + 1942 + static int pl08x_probe(struct amba_device *adev, struct amba_id *id) 1943 + { 1944 + struct pl08x_driver_data *pl08x; 1945 + struct vendor_data *vd = id->data; 1946 + int ret = 0; 1947 + int i; 1948 + 1949 + ret = amba_request_regions(adev, NULL); 1950 + if (ret) 1951 + return ret; 1952 + 1953 + /* Create the driver state holder */ 1954 + pl08x = kzalloc(sizeof(struct pl08x_driver_data), GFP_KERNEL); 1955 + if (!pl08x) { 1956 + ret = -ENOMEM; 1957 + goto out_no_pl08x; 1958 + } 1959 + 1960 + /* Initialize memcpy engine */ 1961 + dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask); 1962 + pl08x->memcpy.dev = &adev->dev; 1963 + pl08x->memcpy.device_alloc_chan_resources = pl08x_alloc_chan_resources; 1964 + pl08x->memcpy.device_free_chan_resources = pl08x_free_chan_resources; 1965 + pl08x->memcpy.device_prep_dma_memcpy = pl08x_prep_dma_memcpy; 1966 + pl08x->memcpy.device_prep_dma_interrupt = pl08x_prep_dma_interrupt; 1967 + pl08x->memcpy.device_tx_status = pl08x_dma_tx_status; 1968 + pl08x->memcpy.device_issue_pending = pl08x_issue_pending; 1969 + pl08x->memcpy.device_control = pl08x_control; 1970 + 1971 + /* Initialize slave engine */ 1972 + dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask); 1973 + pl08x->slave.dev = &adev->dev; 1974 + pl08x->slave.device_alloc_chan_resources = pl08x_alloc_chan_resources; 1975 + pl08x->slave.device_free_chan_resources = pl08x_free_chan_resources; 1976 + pl08x->slave.device_prep_dma_interrupt = pl08x_prep_dma_interrupt; 1977 + pl08x->slave.device_tx_status = pl08x_dma_tx_status; 1978 + pl08x->slave.device_issue_pending = pl08x_issue_pending; 1979 + pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg; 1980 + pl08x->slave.device_control = pl08x_control; 1981 + 1982 + /* Get the platform data */ 1983 + pl08x->pd = dev_get_platdata(&adev->dev); 1984 + if (!pl08x->pd) { 1985 + dev_err(&adev->dev, "no platform data supplied\n"); 1986 + goto out_no_platdata; 1987 + } 1988 + 1989 + /* Assign useful pointers to the driver state */ 1990 + pl08x->adev = adev; 1991 + pl08x->vd = vd; 1992 + 1993 + /* A DMA memory pool for LLIs, align on 1-byte boundary */ 1994 + pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev, 1995 + PL08X_LLI_TSFR_SIZE, PL08X_ALIGN, 0); 1996 + if (!pl08x->pool) { 1997 + ret = -ENOMEM; 1998 + goto out_no_lli_pool; 1999 + } 2000 + 2001 + spin_lock_init(&pl08x->lock); 2002 + 2003 + pl08x->base = ioremap(adev->res.start, resource_size(&adev->res)); 2004 + if (!pl08x->base) { 2005 + ret = -ENOMEM; 2006 + goto out_no_ioremap; 2007 + } 2008 + 2009 + /* Turn on the PL08x */ 2010 + pl08x_ensure_on(pl08x); 2011 + 2012 + /* 2013 + * Attach the interrupt handler 2014 + */ 2015 + writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR); 2016 + writel(0x000000FF, pl08x->base + PL080_TC_CLEAR); 2017 + 2018 + ret = request_irq(adev->irq[0], pl08x_irq, IRQF_DISABLED, 2019 + vd->name, pl08x); 2020 + if (ret) { 2021 + dev_err(&adev->dev, "%s failed to request interrupt %d\n", 2022 + __func__, adev->irq[0]); 2023 + goto out_no_irq; 2024 + } 2025 + 2026 + /* Initialize physical channels */ 2027 + pl08x->phy_chans = kmalloc((vd->channels * sizeof(struct pl08x_phy_chan)), 2028 + GFP_KERNEL); 2029 + if (!pl08x->phy_chans) { 2030 + dev_err(&adev->dev, "%s failed to allocate " 2031 + "physical channel holders\n", 2032 + __func__); 2033 + goto out_no_phychans; 2034 + } 2035 + 2036 + for (i = 0; i < vd->channels; i++) { 2037 + struct pl08x_phy_chan *ch = &pl08x->phy_chans[i]; 2038 + 2039 + ch->id = i; 2040 + ch->base = pl08x->base + PL080_Cx_BASE(i); 2041 + spin_lock_init(&ch->lock); 2042 + ch->serving = NULL; 2043 + ch->signal = -1; 2044 + dev_info(&adev->dev, 2045 + "physical channel %d is %s\n", i, 2046 + pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE"); 2047 + } 2048 + 2049 + /* Register as many memcpy channels as there are physical channels */ 2050 + ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->memcpy, 2051 + pl08x->vd->channels, false); 2052 + if (ret <= 0) { 2053 + dev_warn(&pl08x->adev->dev, 2054 + "%s failed to enumerate memcpy channels - %d\n", 2055 + __func__, ret); 2056 + goto out_no_memcpy; 2057 + } 2058 + pl08x->memcpy.chancnt = ret; 2059 + 2060 + /* Register slave channels */ 2061 + ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave, 2062 + pl08x->pd->num_slave_channels, 2063 + true); 2064 + if (ret <= 0) { 2065 + dev_warn(&pl08x->adev->dev, 2066 + "%s failed to enumerate slave channels - %d\n", 2067 + __func__, ret); 2068 + goto out_no_slave; 2069 + } 2070 + pl08x->slave.chancnt = ret; 2071 + 2072 + ret = dma_async_device_register(&pl08x->memcpy); 2073 + if (ret) { 2074 + dev_warn(&pl08x->adev->dev, 2075 + "%s failed to register memcpy as an async device - %d\n", 2076 + __func__, ret); 2077 + goto out_no_memcpy_reg; 2078 + } 2079 + 2080 + ret = dma_async_device_register(&pl08x->slave); 2081 + if (ret) { 2082 + dev_warn(&pl08x->adev->dev, 2083 + "%s failed to register slave as an async device - %d\n", 2084 + __func__, ret); 2085 + goto out_no_slave_reg; 2086 + } 2087 + 2088 + amba_set_drvdata(adev, pl08x); 2089 + init_pl08x_debugfs(pl08x); 2090 + dev_info(&pl08x->adev->dev, "ARM(R) %s DMA block initialized @%08x\n", 2091 + vd->name, adev->res.start); 2092 + return 0; 2093 + 2094 + out_no_slave_reg: 2095 + dma_async_device_unregister(&pl08x->memcpy); 2096 + out_no_memcpy_reg: 2097 + pl08x_free_virtual_channels(&pl08x->slave); 2098 + out_no_slave: 2099 + pl08x_free_virtual_channels(&pl08x->memcpy); 2100 + out_no_memcpy: 2101 + kfree(pl08x->phy_chans); 2102 + out_no_phychans: 2103 + free_irq(adev->irq[0], pl08x); 2104 + out_no_irq: 2105 + iounmap(pl08x->base); 2106 + out_no_ioremap: 2107 + dma_pool_destroy(pl08x->pool); 2108 + out_no_lli_pool: 2109 + out_no_platdata: 2110 + kfree(pl08x); 2111 + out_no_pl08x: 2112 + amba_release_regions(adev); 2113 + return ret; 2114 + } 2115 + 2116 + /* PL080 has 8 channels and the PL080 have just 2 */ 2117 + static struct vendor_data vendor_pl080 = { 2118 + .name = "PL080", 2119 + .channels = 8, 2120 + .dualmaster = true, 2121 + }; 2122 + 2123 + static struct vendor_data vendor_pl081 = { 2124 + .name = "PL081", 2125 + .channels = 2, 2126 + .dualmaster = false, 2127 + }; 2128 + 2129 + static struct amba_id pl08x_ids[] = { 2130 + /* PL080 */ 2131 + { 2132 + .id = 0x00041080, 2133 + .mask = 0x000fffff, 2134 + .data = &vendor_pl080, 2135 + }, 2136 + /* PL081 */ 2137 + { 2138 + .id = 0x00041081, 2139 + .mask = 0x000fffff, 2140 + .data = &vendor_pl081, 2141 + }, 2142 + /* Nomadik 8815 PL080 variant */ 2143 + { 2144 + .id = 0x00280880, 2145 + .mask = 0x00ffffff, 2146 + .data = &vendor_pl080, 2147 + }, 2148 + { 0, 0 }, 2149 + }; 2150 + 2151 + static struct amba_driver pl08x_amba_driver = { 2152 + .drv.name = DRIVER_NAME, 2153 + .id_table = pl08x_ids, 2154 + .probe = pl08x_probe, 2155 + }; 2156 + 2157 + static int __init pl08x_init(void) 2158 + { 2159 + int retval; 2160 + retval = amba_driver_register(&pl08x_amba_driver); 2161 + if (retval) 2162 + printk(KERN_WARNING DRIVER_NAME 2163 + "failed to register as an amba device (%d)\n", 2164 + retval); 2165 + return retval; 2166 + } 2167 + subsys_initcall(pl08x_init);
+4
drivers/dma/dmaengine.c
··· 690 690 !device->device_prep_dma_memset); 691 691 BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) && 692 692 !device->device_prep_dma_interrupt); 693 + BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) && 694 + !device->device_prep_dma_sg); 693 695 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && 694 696 !device->device_prep_slave_sg); 697 + BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) && 698 + !device->device_prep_dma_cyclic); 695 699 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && 696 700 !device->device_control); 697 701
+169 -185
drivers/dma/fsldma.c
··· 35 35 #include <linux/dmapool.h> 36 36 #include <linux/of_platform.h> 37 37 38 - #include <asm/fsldma.h> 39 38 #include "fsldma.h" 39 + 40 + static const char msg_ld_oom[] = "No free memory for link descriptor\n"; 40 41 41 42 static void dma_init(struct fsldma_chan *chan) 42 43 { ··· 500 499 501 500 new = fsl_dma_alloc_descriptor(chan); 502 501 if (!new) { 503 - dev_err(chan->dev, "No free memory for link descriptor\n"); 502 + dev_err(chan->dev, msg_ld_oom); 504 503 return NULL; 505 504 } 506 505 ··· 537 536 /* Allocate the link descriptor from DMA pool */ 538 537 new = fsl_dma_alloc_descriptor(chan); 539 538 if (!new) { 540 - dev_err(chan->dev, 541 - "No free memory for link descriptor\n"); 539 + dev_err(chan->dev, msg_ld_oom); 542 540 goto fail; 543 541 } 544 542 #ifdef FSL_DMA_LD_DEBUG ··· 583 583 return NULL; 584 584 } 585 585 586 + static struct dma_async_tx_descriptor *fsl_dma_prep_sg(struct dma_chan *dchan, 587 + struct scatterlist *dst_sg, unsigned int dst_nents, 588 + struct scatterlist *src_sg, unsigned int src_nents, 589 + unsigned long flags) 590 + { 591 + struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL; 592 + struct fsldma_chan *chan = to_fsl_chan(dchan); 593 + size_t dst_avail, src_avail; 594 + dma_addr_t dst, src; 595 + size_t len; 596 + 597 + /* basic sanity checks */ 598 + if (dst_nents == 0 || src_nents == 0) 599 + return NULL; 600 + 601 + if (dst_sg == NULL || src_sg == NULL) 602 + return NULL; 603 + 604 + /* 605 + * TODO: should we check that both scatterlists have the same 606 + * TODO: number of bytes in total? Is that really an error? 607 + */ 608 + 609 + /* get prepared for the loop */ 610 + dst_avail = sg_dma_len(dst_sg); 611 + src_avail = sg_dma_len(src_sg); 612 + 613 + /* run until we are out of scatterlist entries */ 614 + while (true) { 615 + 616 + /* create the largest transaction possible */ 617 + len = min_t(size_t, src_avail, dst_avail); 618 + len = min_t(size_t, len, FSL_DMA_BCR_MAX_CNT); 619 + if (len == 0) 620 + goto fetch; 621 + 622 + dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - dst_avail; 623 + src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - src_avail; 624 + 625 + /* allocate and populate the descriptor */ 626 + new = fsl_dma_alloc_descriptor(chan); 627 + if (!new) { 628 + dev_err(chan->dev, msg_ld_oom); 629 + goto fail; 630 + } 631 + #ifdef FSL_DMA_LD_DEBUG 632 + dev_dbg(chan->dev, "new link desc alloc %p\n", new); 633 + #endif 634 + 635 + set_desc_cnt(chan, &new->hw, len); 636 + set_desc_src(chan, &new->hw, src); 637 + set_desc_dst(chan, &new->hw, dst); 638 + 639 + if (!first) 640 + first = new; 641 + else 642 + set_desc_next(chan, &prev->hw, new->async_tx.phys); 643 + 644 + new->async_tx.cookie = 0; 645 + async_tx_ack(&new->async_tx); 646 + prev = new; 647 + 648 + /* Insert the link descriptor to the LD ring */ 649 + list_add_tail(&new->node, &first->tx_list); 650 + 651 + /* update metadata */ 652 + dst_avail -= len; 653 + src_avail -= len; 654 + 655 + fetch: 656 + /* fetch the next dst scatterlist entry */ 657 + if (dst_avail == 0) { 658 + 659 + /* no more entries: we're done */ 660 + if (dst_nents == 0) 661 + break; 662 + 663 + /* fetch the next entry: if there are no more: done */ 664 + dst_sg = sg_next(dst_sg); 665 + if (dst_sg == NULL) 666 + break; 667 + 668 + dst_nents--; 669 + dst_avail = sg_dma_len(dst_sg); 670 + } 671 + 672 + /* fetch the next src scatterlist entry */ 673 + if (src_avail == 0) { 674 + 675 + /* no more entries: we're done */ 676 + if (src_nents == 0) 677 + break; 678 + 679 + /* fetch the next entry: if there are no more: done */ 680 + src_sg = sg_next(src_sg); 681 + if (src_sg == NULL) 682 + break; 683 + 684 + src_nents--; 685 + src_avail = sg_dma_len(src_sg); 686 + } 687 + } 688 + 689 + new->async_tx.flags = flags; /* client is in control of this ack */ 690 + new->async_tx.cookie = -EBUSY; 691 + 692 + /* Set End-of-link to the last link descriptor of new list */ 693 + set_ld_eol(chan, new); 694 + 695 + return &first->async_tx; 696 + 697 + fail: 698 + if (!first) 699 + return NULL; 700 + 701 + fsldma_free_desc_list_reverse(chan, &first->tx_list); 702 + return NULL; 703 + } 704 + 586 705 /** 587 706 * fsl_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction 588 707 * @chan: DMA channel ··· 718 599 struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len, 719 600 enum dma_data_direction direction, unsigned long flags) 720 601 { 721 - struct fsldma_chan *chan; 722 - struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL; 723 - struct fsl_dma_slave *slave; 724 - size_t copy; 725 - 726 - int i; 727 - struct scatterlist *sg; 728 - size_t sg_used; 729 - size_t hw_used; 730 - struct fsl_dma_hw_addr *hw; 731 - dma_addr_t dma_dst, dma_src; 732 - 733 - if (!dchan) 734 - return NULL; 735 - 736 - if (!dchan->private) 737 - return NULL; 738 - 739 - chan = to_fsl_chan(dchan); 740 - slave = dchan->private; 741 - 742 - if (list_empty(&slave->addresses)) 743 - return NULL; 744 - 745 - hw = list_first_entry(&slave->addresses, struct fsl_dma_hw_addr, entry); 746 - hw_used = 0; 747 - 748 602 /* 749 - * Build the hardware transaction to copy from the scatterlist to 750 - * the hardware, or from the hardware to the scatterlist 603 + * This operation is not supported on the Freescale DMA controller 751 604 * 752 - * If you are copying from the hardware to the scatterlist and it 753 - * takes two hardware entries to fill an entire page, then both 754 - * hardware entries will be coalesced into the same page 755 - * 756 - * If you are copying from the scatterlist to the hardware and a 757 - * single page can fill two hardware entries, then the data will 758 - * be read out of the page into the first hardware entry, and so on 605 + * However, we need to provide the function pointer to allow the 606 + * device_control() method to work. 759 607 */ 760 - for_each_sg(sgl, sg, sg_len, i) { 761 - sg_used = 0; 762 - 763 - /* Loop until the entire scatterlist entry is used */ 764 - while (sg_used < sg_dma_len(sg)) { 765 - 766 - /* 767 - * If we've used up the current hardware address/length 768 - * pair, we need to load a new one 769 - * 770 - * This is done in a while loop so that descriptors with 771 - * length == 0 will be skipped 772 - */ 773 - while (hw_used >= hw->length) { 774 - 775 - /* 776 - * If the current hardware entry is the last 777 - * entry in the list, we're finished 778 - */ 779 - if (list_is_last(&hw->entry, &slave->addresses)) 780 - goto finished; 781 - 782 - /* Get the next hardware address/length pair */ 783 - hw = list_entry(hw->entry.next, 784 - struct fsl_dma_hw_addr, entry); 785 - hw_used = 0; 786 - } 787 - 788 - /* Allocate the link descriptor from DMA pool */ 789 - new = fsl_dma_alloc_descriptor(chan); 790 - if (!new) { 791 - dev_err(chan->dev, "No free memory for " 792 - "link descriptor\n"); 793 - goto fail; 794 - } 795 - #ifdef FSL_DMA_LD_DEBUG 796 - dev_dbg(chan->dev, "new link desc alloc %p\n", new); 797 - #endif 798 - 799 - /* 800 - * Calculate the maximum number of bytes to transfer, 801 - * making sure it is less than the DMA controller limit 802 - */ 803 - copy = min_t(size_t, sg_dma_len(sg) - sg_used, 804 - hw->length - hw_used); 805 - copy = min_t(size_t, copy, FSL_DMA_BCR_MAX_CNT); 806 - 807 - /* 808 - * DMA_FROM_DEVICE 809 - * from the hardware to the scatterlist 810 - * 811 - * DMA_TO_DEVICE 812 - * from the scatterlist to the hardware 813 - */ 814 - if (direction == DMA_FROM_DEVICE) { 815 - dma_src = hw->address + hw_used; 816 - dma_dst = sg_dma_address(sg) + sg_used; 817 - } else { 818 - dma_src = sg_dma_address(sg) + sg_used; 819 - dma_dst = hw->address + hw_used; 820 - } 821 - 822 - /* Fill in the descriptor */ 823 - set_desc_cnt(chan, &new->hw, copy); 824 - set_desc_src(chan, &new->hw, dma_src); 825 - set_desc_dst(chan, &new->hw, dma_dst); 826 - 827 - /* 828 - * If this is not the first descriptor, chain the 829 - * current descriptor after the previous descriptor 830 - */ 831 - if (!first) { 832 - first = new; 833 - } else { 834 - set_desc_next(chan, &prev->hw, 835 - new->async_tx.phys); 836 - } 837 - 838 - new->async_tx.cookie = 0; 839 - async_tx_ack(&new->async_tx); 840 - 841 - prev = new; 842 - sg_used += copy; 843 - hw_used += copy; 844 - 845 - /* Insert the link descriptor into the LD ring */ 846 - list_add_tail(&new->node, &first->tx_list); 847 - } 848 - } 849 - 850 - finished: 851 - 852 - /* All of the hardware address/length pairs had length == 0 */ 853 - if (!first || !new) 854 - return NULL; 855 - 856 - new->async_tx.flags = flags; 857 - new->async_tx.cookie = -EBUSY; 858 - 859 - /* Set End-of-link to the last link descriptor of new list */ 860 - set_ld_eol(chan, new); 861 - 862 - /* Enable extra controller features */ 863 - if (chan->set_src_loop_size) 864 - chan->set_src_loop_size(chan, slave->src_loop_size); 865 - 866 - if (chan->set_dst_loop_size) 867 - chan->set_dst_loop_size(chan, slave->dst_loop_size); 868 - 869 - if (chan->toggle_ext_start) 870 - chan->toggle_ext_start(chan, slave->external_start); 871 - 872 - if (chan->toggle_ext_pause) 873 - chan->toggle_ext_pause(chan, slave->external_pause); 874 - 875 - if (chan->set_request_count) 876 - chan->set_request_count(chan, slave->request_count); 877 - 878 - return &first->async_tx; 879 - 880 - fail: 881 - /* If first was not set, then we failed to allocate the very first 882 - * descriptor, and we're done */ 883 - if (!first) 884 - return NULL; 885 - 886 - /* 887 - * First is set, so all of the descriptors we allocated have been added 888 - * to first->tx_list, INCLUDING "first" itself. Therefore we 889 - * must traverse the list backwards freeing each descriptor in turn 890 - * 891 - * We're re-using variables for the loop, oh well 892 - */ 893 - fsldma_free_desc_list_reverse(chan, &first->tx_list); 894 608 return NULL; 895 609 } 896 610 897 611 static int fsl_dma_device_control(struct dma_chan *dchan, 898 612 enum dma_ctrl_cmd cmd, unsigned long arg) 899 613 { 614 + struct dma_slave_config *config; 900 615 struct fsldma_chan *chan; 901 616 unsigned long flags; 902 - 903 - /* Only supports DMA_TERMINATE_ALL */ 904 - if (cmd != DMA_TERMINATE_ALL) 905 - return -ENXIO; 617 + int size; 906 618 907 619 if (!dchan) 908 620 return -EINVAL; 909 621 910 622 chan = to_fsl_chan(dchan); 911 623 912 - /* Halt the DMA engine */ 913 - dma_halt(chan); 624 + switch (cmd) { 625 + case DMA_TERMINATE_ALL: 626 + /* Halt the DMA engine */ 627 + dma_halt(chan); 914 628 915 - spin_lock_irqsave(&chan->desc_lock, flags); 629 + spin_lock_irqsave(&chan->desc_lock, flags); 916 630 917 - /* Remove and free all of the descriptors in the LD queue */ 918 - fsldma_free_desc_list(chan, &chan->ld_pending); 919 - fsldma_free_desc_list(chan, &chan->ld_running); 631 + /* Remove and free all of the descriptors in the LD queue */ 632 + fsldma_free_desc_list(chan, &chan->ld_pending); 633 + fsldma_free_desc_list(chan, &chan->ld_running); 920 634 921 - spin_unlock_irqrestore(&chan->desc_lock, flags); 635 + spin_unlock_irqrestore(&chan->desc_lock, flags); 636 + return 0; 637 + 638 + case DMA_SLAVE_CONFIG: 639 + config = (struct dma_slave_config *)arg; 640 + 641 + /* make sure the channel supports setting burst size */ 642 + if (!chan->set_request_count) 643 + return -ENXIO; 644 + 645 + /* we set the controller burst size depending on direction */ 646 + if (config->direction == DMA_TO_DEVICE) 647 + size = config->dst_addr_width * config->dst_maxburst; 648 + else 649 + size = config->src_addr_width * config->src_maxburst; 650 + 651 + chan->set_request_count(chan, size); 652 + return 0; 653 + 654 + case FSLDMA_EXTERNAL_START: 655 + 656 + /* make sure the channel supports external start */ 657 + if (!chan->toggle_ext_start) 658 + return -ENXIO; 659 + 660 + chan->toggle_ext_start(chan, arg); 661 + return 0; 662 + 663 + default: 664 + return -ENXIO; 665 + } 922 666 923 667 return 0; 924 668 } ··· 1309 1327 1310 1328 dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask); 1311 1329 dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask); 1330 + dma_cap_set(DMA_SG, fdev->common.cap_mask); 1312 1331 dma_cap_set(DMA_SLAVE, fdev->common.cap_mask); 1313 1332 fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources; 1314 1333 fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources; 1315 1334 fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt; 1316 1335 fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy; 1336 + fdev->common.device_prep_dma_sg = fsl_dma_prep_sg; 1317 1337 fdev->common.device_tx_status = fsl_tx_status; 1318 1338 fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; 1319 1339 fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg;
+422
drivers/dma/imx-dma.c
··· 1 + /* 2 + * drivers/dma/imx-dma.c 3 + * 4 + * This file contains a driver for the Freescale i.MX DMA engine 5 + * found on i.MX1/21/27 6 + * 7 + * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de> 8 + * 9 + * The code contained herein is licensed under the GNU General Public 10 + * License. You may obtain a copy of the GNU General Public License 11 + * Version 2 or later at the following locations: 12 + * 13 + * http://www.opensource.org/licenses/gpl-license.html 14 + * http://www.gnu.org/copyleft/gpl.html 15 + */ 16 + #include <linux/init.h> 17 + #include <linux/types.h> 18 + #include <linux/mm.h> 19 + #include <linux/interrupt.h> 20 + #include <linux/spinlock.h> 21 + #include <linux/device.h> 22 + #include <linux/dma-mapping.h> 23 + #include <linux/slab.h> 24 + #include <linux/platform_device.h> 25 + #include <linux/dmaengine.h> 26 + 27 + #include <asm/irq.h> 28 + #include <mach/dma-v1.h> 29 + #include <mach/hardware.h> 30 + 31 + struct imxdma_channel { 32 + struct imxdma_engine *imxdma; 33 + unsigned int channel; 34 + unsigned int imxdma_channel; 35 + 36 + enum dma_slave_buswidth word_size; 37 + dma_addr_t per_address; 38 + u32 watermark_level; 39 + struct dma_chan chan; 40 + spinlock_t lock; 41 + struct dma_async_tx_descriptor desc; 42 + dma_cookie_t last_completed; 43 + enum dma_status status; 44 + int dma_request; 45 + struct scatterlist *sg_list; 46 + }; 47 + 48 + #define MAX_DMA_CHANNELS 8 49 + 50 + struct imxdma_engine { 51 + struct device *dev; 52 + struct dma_device dma_device; 53 + struct imxdma_channel channel[MAX_DMA_CHANNELS]; 54 + }; 55 + 56 + static struct imxdma_channel *to_imxdma_chan(struct dma_chan *chan) 57 + { 58 + return container_of(chan, struct imxdma_channel, chan); 59 + } 60 + 61 + static void imxdma_handle(struct imxdma_channel *imxdmac) 62 + { 63 + if (imxdmac->desc.callback) 64 + imxdmac->desc.callback(imxdmac->desc.callback_param); 65 + imxdmac->last_completed = imxdmac->desc.cookie; 66 + } 67 + 68 + static void imxdma_irq_handler(int channel, void *data) 69 + { 70 + struct imxdma_channel *imxdmac = data; 71 + 72 + imxdmac->status = DMA_SUCCESS; 73 + imxdma_handle(imxdmac); 74 + } 75 + 76 + static void imxdma_err_handler(int channel, void *data, int error) 77 + { 78 + struct imxdma_channel *imxdmac = data; 79 + 80 + imxdmac->status = DMA_ERROR; 81 + imxdma_handle(imxdmac); 82 + } 83 + 84 + static void imxdma_progression(int channel, void *data, 85 + struct scatterlist *sg) 86 + { 87 + struct imxdma_channel *imxdmac = data; 88 + 89 + imxdmac->status = DMA_SUCCESS; 90 + imxdma_handle(imxdmac); 91 + } 92 + 93 + static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 94 + unsigned long arg) 95 + { 96 + struct imxdma_channel *imxdmac = to_imxdma_chan(chan); 97 + struct dma_slave_config *dmaengine_cfg = (void *)arg; 98 + int ret; 99 + unsigned int mode = 0; 100 + 101 + switch (cmd) { 102 + case DMA_TERMINATE_ALL: 103 + imxdmac->status = DMA_ERROR; 104 + imx_dma_disable(imxdmac->imxdma_channel); 105 + return 0; 106 + case DMA_SLAVE_CONFIG: 107 + if (dmaengine_cfg->direction == DMA_FROM_DEVICE) { 108 + imxdmac->per_address = dmaengine_cfg->src_addr; 109 + imxdmac->watermark_level = dmaengine_cfg->src_maxburst; 110 + imxdmac->word_size = dmaengine_cfg->src_addr_width; 111 + } else { 112 + imxdmac->per_address = dmaengine_cfg->dst_addr; 113 + imxdmac->watermark_level = dmaengine_cfg->dst_maxburst; 114 + imxdmac->word_size = dmaengine_cfg->dst_addr_width; 115 + } 116 + 117 + switch (imxdmac->word_size) { 118 + case DMA_SLAVE_BUSWIDTH_1_BYTE: 119 + mode = IMX_DMA_MEMSIZE_8; 120 + break; 121 + case DMA_SLAVE_BUSWIDTH_2_BYTES: 122 + mode = IMX_DMA_MEMSIZE_16; 123 + break; 124 + default: 125 + case DMA_SLAVE_BUSWIDTH_4_BYTES: 126 + mode = IMX_DMA_MEMSIZE_32; 127 + break; 128 + } 129 + ret = imx_dma_config_channel(imxdmac->imxdma_channel, 130 + mode | IMX_DMA_TYPE_FIFO, 131 + IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR, 132 + imxdmac->dma_request, 1); 133 + 134 + if (ret) 135 + return ret; 136 + 137 + imx_dma_config_burstlen(imxdmac->imxdma_channel, imxdmac->watermark_level); 138 + 139 + return 0; 140 + default: 141 + return -ENOSYS; 142 + } 143 + 144 + return -EINVAL; 145 + } 146 + 147 + static enum dma_status imxdma_tx_status(struct dma_chan *chan, 148 + dma_cookie_t cookie, 149 + struct dma_tx_state *txstate) 150 + { 151 + struct imxdma_channel *imxdmac = to_imxdma_chan(chan); 152 + dma_cookie_t last_used; 153 + enum dma_status ret; 154 + 155 + last_used = chan->cookie; 156 + 157 + ret = dma_async_is_complete(cookie, imxdmac->last_completed, last_used); 158 + dma_set_tx_state(txstate, imxdmac->last_completed, last_used, 0); 159 + 160 + return ret; 161 + } 162 + 163 + static dma_cookie_t imxdma_assign_cookie(struct imxdma_channel *imxdma) 164 + { 165 + dma_cookie_t cookie = imxdma->chan.cookie; 166 + 167 + if (++cookie < 0) 168 + cookie = 1; 169 + 170 + imxdma->chan.cookie = cookie; 171 + imxdma->desc.cookie = cookie; 172 + 173 + return cookie; 174 + } 175 + 176 + static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx) 177 + { 178 + struct imxdma_channel *imxdmac = to_imxdma_chan(tx->chan); 179 + dma_cookie_t cookie; 180 + 181 + spin_lock_irq(&imxdmac->lock); 182 + 183 + cookie = imxdma_assign_cookie(imxdmac); 184 + 185 + imx_dma_enable(imxdmac->imxdma_channel); 186 + 187 + spin_unlock_irq(&imxdmac->lock); 188 + 189 + return cookie; 190 + } 191 + 192 + static int imxdma_alloc_chan_resources(struct dma_chan *chan) 193 + { 194 + struct imxdma_channel *imxdmac = to_imxdma_chan(chan); 195 + struct imx_dma_data *data = chan->private; 196 + 197 + imxdmac->dma_request = data->dma_request; 198 + 199 + dma_async_tx_descriptor_init(&imxdmac->desc, chan); 200 + imxdmac->desc.tx_submit = imxdma_tx_submit; 201 + /* txd.flags will be overwritten in prep funcs */ 202 + imxdmac->desc.flags = DMA_CTRL_ACK; 203 + 204 + imxdmac->status = DMA_SUCCESS; 205 + 206 + return 0; 207 + } 208 + 209 + static void imxdma_free_chan_resources(struct dma_chan *chan) 210 + { 211 + struct imxdma_channel *imxdmac = to_imxdma_chan(chan); 212 + 213 + imx_dma_disable(imxdmac->imxdma_channel); 214 + 215 + if (imxdmac->sg_list) { 216 + kfree(imxdmac->sg_list); 217 + imxdmac->sg_list = NULL; 218 + } 219 + } 220 + 221 + static struct dma_async_tx_descriptor *imxdma_prep_slave_sg( 222 + struct dma_chan *chan, struct scatterlist *sgl, 223 + unsigned int sg_len, enum dma_data_direction direction, 224 + unsigned long flags) 225 + { 226 + struct imxdma_channel *imxdmac = to_imxdma_chan(chan); 227 + struct scatterlist *sg; 228 + int i, ret, dma_length = 0; 229 + unsigned int dmamode; 230 + 231 + if (imxdmac->status == DMA_IN_PROGRESS) 232 + return NULL; 233 + 234 + imxdmac->status = DMA_IN_PROGRESS; 235 + 236 + for_each_sg(sgl, sg, sg_len, i) { 237 + dma_length += sg->length; 238 + } 239 + 240 + if (direction == DMA_FROM_DEVICE) 241 + dmamode = DMA_MODE_READ; 242 + else 243 + dmamode = DMA_MODE_WRITE; 244 + 245 + ret = imx_dma_setup_sg(imxdmac->imxdma_channel, sgl, sg_len, 246 + dma_length, imxdmac->per_address, dmamode); 247 + if (ret) 248 + return NULL; 249 + 250 + return &imxdmac->desc; 251 + } 252 + 253 + static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic( 254 + struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, 255 + size_t period_len, enum dma_data_direction direction) 256 + { 257 + struct imxdma_channel *imxdmac = to_imxdma_chan(chan); 258 + struct imxdma_engine *imxdma = imxdmac->imxdma; 259 + int i, ret; 260 + unsigned int periods = buf_len / period_len; 261 + unsigned int dmamode; 262 + 263 + dev_dbg(imxdma->dev, "%s channel: %d buf_len=%d period_len=%d\n", 264 + __func__, imxdmac->channel, buf_len, period_len); 265 + 266 + if (imxdmac->status == DMA_IN_PROGRESS) 267 + return NULL; 268 + imxdmac->status = DMA_IN_PROGRESS; 269 + 270 + ret = imx_dma_setup_progression_handler(imxdmac->imxdma_channel, 271 + imxdma_progression); 272 + if (ret) { 273 + dev_err(imxdma->dev, "Failed to setup the DMA handler\n"); 274 + return NULL; 275 + } 276 + 277 + if (imxdmac->sg_list) 278 + kfree(imxdmac->sg_list); 279 + 280 + imxdmac->sg_list = kcalloc(periods + 1, 281 + sizeof(struct scatterlist), GFP_KERNEL); 282 + if (!imxdmac->sg_list) 283 + return NULL; 284 + 285 + sg_init_table(imxdmac->sg_list, periods); 286 + 287 + for (i = 0; i < periods; i++) { 288 + imxdmac->sg_list[i].page_link = 0; 289 + imxdmac->sg_list[i].offset = 0; 290 + imxdmac->sg_list[i].dma_address = dma_addr; 291 + imxdmac->sg_list[i].length = period_len; 292 + dma_addr += period_len; 293 + } 294 + 295 + /* close the loop */ 296 + imxdmac->sg_list[periods].offset = 0; 297 + imxdmac->sg_list[periods].length = 0; 298 + imxdmac->sg_list[periods].page_link = 299 + ((unsigned long)imxdmac->sg_list | 0x01) & ~0x02; 300 + 301 + if (direction == DMA_FROM_DEVICE) 302 + dmamode = DMA_MODE_READ; 303 + else 304 + dmamode = DMA_MODE_WRITE; 305 + 306 + ret = imx_dma_setup_sg(imxdmac->imxdma_channel, imxdmac->sg_list, periods, 307 + IMX_DMA_LENGTH_LOOP, imxdmac->per_address, dmamode); 308 + if (ret) 309 + return NULL; 310 + 311 + return &imxdmac->desc; 312 + } 313 + 314 + static void imxdma_issue_pending(struct dma_chan *chan) 315 + { 316 + /* 317 + * Nothing to do. We only have a single descriptor 318 + */ 319 + } 320 + 321 + static int __init imxdma_probe(struct platform_device *pdev) 322 + { 323 + struct imxdma_engine *imxdma; 324 + int ret, i; 325 + 326 + imxdma = kzalloc(sizeof(*imxdma), GFP_KERNEL); 327 + if (!imxdma) 328 + return -ENOMEM; 329 + 330 + INIT_LIST_HEAD(&imxdma->dma_device.channels); 331 + 332 + /* Initialize channel parameters */ 333 + for (i = 0; i < MAX_DMA_CHANNELS; i++) { 334 + struct imxdma_channel *imxdmac = &imxdma->channel[i]; 335 + 336 + imxdmac->imxdma_channel = imx_dma_request_by_prio("dmaengine", 337 + DMA_PRIO_MEDIUM); 338 + if (imxdmac->channel < 0) 339 + goto err_init; 340 + 341 + imx_dma_setup_handlers(imxdmac->imxdma_channel, 342 + imxdma_irq_handler, imxdma_err_handler, imxdmac); 343 + 344 + imxdmac->imxdma = imxdma; 345 + spin_lock_init(&imxdmac->lock); 346 + 347 + dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask); 348 + dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask); 349 + 350 + imxdmac->chan.device = &imxdma->dma_device; 351 + imxdmac->chan.chan_id = i; 352 + imxdmac->channel = i; 353 + 354 + /* Add the channel to the DMAC list */ 355 + list_add_tail(&imxdmac->chan.device_node, &imxdma->dma_device.channels); 356 + } 357 + 358 + imxdma->dev = &pdev->dev; 359 + imxdma->dma_device.dev = &pdev->dev; 360 + 361 + imxdma->dma_device.device_alloc_chan_resources = imxdma_alloc_chan_resources; 362 + imxdma->dma_device.device_free_chan_resources = imxdma_free_chan_resources; 363 + imxdma->dma_device.device_tx_status = imxdma_tx_status; 364 + imxdma->dma_device.device_prep_slave_sg = imxdma_prep_slave_sg; 365 + imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic; 366 + imxdma->dma_device.device_control = imxdma_control; 367 + imxdma->dma_device.device_issue_pending = imxdma_issue_pending; 368 + 369 + platform_set_drvdata(pdev, imxdma); 370 + 371 + ret = dma_async_device_register(&imxdma->dma_device); 372 + if (ret) { 373 + dev_err(&pdev->dev, "unable to register\n"); 374 + goto err_init; 375 + } 376 + 377 + return 0; 378 + 379 + err_init: 380 + while (i-- >= 0) { 381 + struct imxdma_channel *imxdmac = &imxdma->channel[i]; 382 + imx_dma_free(imxdmac->imxdma_channel); 383 + } 384 + 385 + kfree(imxdma); 386 + return ret; 387 + } 388 + 389 + static int __exit imxdma_remove(struct platform_device *pdev) 390 + { 391 + struct imxdma_engine *imxdma = platform_get_drvdata(pdev); 392 + int i; 393 + 394 + dma_async_device_unregister(&imxdma->dma_device); 395 + 396 + for (i = 0; i < MAX_DMA_CHANNELS; i++) { 397 + struct imxdma_channel *imxdmac = &imxdma->channel[i]; 398 + 399 + imx_dma_free(imxdmac->imxdma_channel); 400 + } 401 + 402 + kfree(imxdma); 403 + 404 + return 0; 405 + } 406 + 407 + static struct platform_driver imxdma_driver = { 408 + .driver = { 409 + .name = "imx-dma", 410 + }, 411 + .remove = __exit_p(imxdma_remove), 412 + }; 413 + 414 + static int __init imxdma_module_init(void) 415 + { 416 + return platform_driver_probe(&imxdma_driver, imxdma_probe); 417 + } 418 + subsys_initcall(imxdma_module_init); 419 + 420 + MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>"); 421 + MODULE_DESCRIPTION("i.MX dma driver"); 422 + MODULE_LICENSE("GPL");
+1392
drivers/dma/imx-sdma.c
··· 1 + /* 2 + * drivers/dma/imx-sdma.c 3 + * 4 + * This file contains a driver for the Freescale Smart DMA engine 5 + * 6 + * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de> 7 + * 8 + * Based on code from Freescale: 9 + * 10 + * Copyright 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved. 11 + * 12 + * The code contained herein is licensed under the GNU General Public 13 + * License. You may obtain a copy of the GNU General Public License 14 + * Version 2 or later at the following locations: 15 + * 16 + * http://www.opensource.org/licenses/gpl-license.html 17 + * http://www.gnu.org/copyleft/gpl.html 18 + */ 19 + 20 + #include <linux/init.h> 21 + #include <linux/types.h> 22 + #include <linux/mm.h> 23 + #include <linux/interrupt.h> 24 + #include <linux/clk.h> 25 + #include <linux/wait.h> 26 + #include <linux/sched.h> 27 + #include <linux/semaphore.h> 28 + #include <linux/spinlock.h> 29 + #include <linux/device.h> 30 + #include <linux/dma-mapping.h> 31 + #include <linux/firmware.h> 32 + #include <linux/slab.h> 33 + #include <linux/platform_device.h> 34 + #include <linux/dmaengine.h> 35 + 36 + #include <asm/irq.h> 37 + #include <mach/sdma.h> 38 + #include <mach/dma.h> 39 + #include <mach/hardware.h> 40 + 41 + /* SDMA registers */ 42 + #define SDMA_H_C0PTR 0x000 43 + #define SDMA_H_INTR 0x004 44 + #define SDMA_H_STATSTOP 0x008 45 + #define SDMA_H_START 0x00c 46 + #define SDMA_H_EVTOVR 0x010 47 + #define SDMA_H_DSPOVR 0x014 48 + #define SDMA_H_HOSTOVR 0x018 49 + #define SDMA_H_EVTPEND 0x01c 50 + #define SDMA_H_DSPENBL 0x020 51 + #define SDMA_H_RESET 0x024 52 + #define SDMA_H_EVTERR 0x028 53 + #define SDMA_H_INTRMSK 0x02c 54 + #define SDMA_H_PSW 0x030 55 + #define SDMA_H_EVTERRDBG 0x034 56 + #define SDMA_H_CONFIG 0x038 57 + #define SDMA_ONCE_ENB 0x040 58 + #define SDMA_ONCE_DATA 0x044 59 + #define SDMA_ONCE_INSTR 0x048 60 + #define SDMA_ONCE_STAT 0x04c 61 + #define SDMA_ONCE_CMD 0x050 62 + #define SDMA_EVT_MIRROR 0x054 63 + #define SDMA_ILLINSTADDR 0x058 64 + #define SDMA_CHN0ADDR 0x05c 65 + #define SDMA_ONCE_RTB 0x060 66 + #define SDMA_XTRIG_CONF1 0x070 67 + #define SDMA_XTRIG_CONF2 0x074 68 + #define SDMA_CHNENBL0_V2 0x200 69 + #define SDMA_CHNENBL0_V1 0x080 70 + #define SDMA_CHNPRI_0 0x100 71 + 72 + /* 73 + * Buffer descriptor status values. 74 + */ 75 + #define BD_DONE 0x01 76 + #define BD_WRAP 0x02 77 + #define BD_CONT 0x04 78 + #define BD_INTR 0x08 79 + #define BD_RROR 0x10 80 + #define BD_LAST 0x20 81 + #define BD_EXTD 0x80 82 + 83 + /* 84 + * Data Node descriptor status values. 85 + */ 86 + #define DND_END_OF_FRAME 0x80 87 + #define DND_END_OF_XFER 0x40 88 + #define DND_DONE 0x20 89 + #define DND_UNUSED 0x01 90 + 91 + /* 92 + * IPCV2 descriptor status values. 93 + */ 94 + #define BD_IPCV2_END_OF_FRAME 0x40 95 + 96 + #define IPCV2_MAX_NODES 50 97 + /* 98 + * Error bit set in the CCB status field by the SDMA, 99 + * in setbd routine, in case of a transfer error 100 + */ 101 + #define DATA_ERROR 0x10000000 102 + 103 + /* 104 + * Buffer descriptor commands. 105 + */ 106 + #define C0_ADDR 0x01 107 + #define C0_LOAD 0x02 108 + #define C0_DUMP 0x03 109 + #define C0_SETCTX 0x07 110 + #define C0_GETCTX 0x03 111 + #define C0_SETDM 0x01 112 + #define C0_SETPM 0x04 113 + #define C0_GETDM 0x02 114 + #define C0_GETPM 0x08 115 + /* 116 + * Change endianness indicator in the BD command field 117 + */ 118 + #define CHANGE_ENDIANNESS 0x80 119 + 120 + /* 121 + * Mode/Count of data node descriptors - IPCv2 122 + */ 123 + struct sdma_mode_count { 124 + u32 count : 16; /* size of the buffer pointed by this BD */ 125 + u32 status : 8; /* E,R,I,C,W,D status bits stored here */ 126 + u32 command : 8; /* command mostlky used for channel 0 */ 127 + }; 128 + 129 + /* 130 + * Buffer descriptor 131 + */ 132 + struct sdma_buffer_descriptor { 133 + struct sdma_mode_count mode; 134 + u32 buffer_addr; /* address of the buffer described */ 135 + u32 ext_buffer_addr; /* extended buffer address */ 136 + } __attribute__ ((packed)); 137 + 138 + /** 139 + * struct sdma_channel_control - Channel control Block 140 + * 141 + * @current_bd_ptr current buffer descriptor processed 142 + * @base_bd_ptr first element of buffer descriptor array 143 + * @unused padding. The SDMA engine expects an array of 128 byte 144 + * control blocks 145 + */ 146 + struct sdma_channel_control { 147 + u32 current_bd_ptr; 148 + u32 base_bd_ptr; 149 + u32 unused[2]; 150 + } __attribute__ ((packed)); 151 + 152 + /** 153 + * struct sdma_state_registers - SDMA context for a channel 154 + * 155 + * @pc: program counter 156 + * @t: test bit: status of arithmetic & test instruction 157 + * @rpc: return program counter 158 + * @sf: source fault while loading data 159 + * @spc: loop start program counter 160 + * @df: destination fault while storing data 161 + * @epc: loop end program counter 162 + * @lm: loop mode 163 + */ 164 + struct sdma_state_registers { 165 + u32 pc :14; 166 + u32 unused1: 1; 167 + u32 t : 1; 168 + u32 rpc :14; 169 + u32 unused0: 1; 170 + u32 sf : 1; 171 + u32 spc :14; 172 + u32 unused2: 1; 173 + u32 df : 1; 174 + u32 epc :14; 175 + u32 lm : 2; 176 + } __attribute__ ((packed)); 177 + 178 + /** 179 + * struct sdma_context_data - sdma context specific to a channel 180 + * 181 + * @channel_state: channel state bits 182 + * @gReg: general registers 183 + * @mda: burst dma destination address register 184 + * @msa: burst dma source address register 185 + * @ms: burst dma status register 186 + * @md: burst dma data register 187 + * @pda: peripheral dma destination address register 188 + * @psa: peripheral dma source address register 189 + * @ps: peripheral dma status register 190 + * @pd: peripheral dma data register 191 + * @ca: CRC polynomial register 192 + * @cs: CRC accumulator register 193 + * @dda: dedicated core destination address register 194 + * @dsa: dedicated core source address register 195 + * @ds: dedicated core status register 196 + * @dd: dedicated core data register 197 + */ 198 + struct sdma_context_data { 199 + struct sdma_state_registers channel_state; 200 + u32 gReg[8]; 201 + u32 mda; 202 + u32 msa; 203 + u32 ms; 204 + u32 md; 205 + u32 pda; 206 + u32 psa; 207 + u32 ps; 208 + u32 pd; 209 + u32 ca; 210 + u32 cs; 211 + u32 dda; 212 + u32 dsa; 213 + u32 ds; 214 + u32 dd; 215 + u32 scratch0; 216 + u32 scratch1; 217 + u32 scratch2; 218 + u32 scratch3; 219 + u32 scratch4; 220 + u32 scratch5; 221 + u32 scratch6; 222 + u32 scratch7; 223 + } __attribute__ ((packed)); 224 + 225 + #define NUM_BD (int)(PAGE_SIZE / sizeof(struct sdma_buffer_descriptor)) 226 + 227 + struct sdma_engine; 228 + 229 + /** 230 + * struct sdma_channel - housekeeping for a SDMA channel 231 + * 232 + * @sdma pointer to the SDMA engine for this channel 233 + * @channel the channel number, matches dmaengine chan_id 234 + * @direction transfer type. Needed for setting SDMA script 235 + * @peripheral_type Peripheral type. Needed for setting SDMA script 236 + * @event_id0 aka dma request line 237 + * @event_id1 for channels that use 2 events 238 + * @word_size peripheral access size 239 + * @buf_tail ID of the buffer that was processed 240 + * @done channel completion 241 + * @num_bd max NUM_BD. number of descriptors currently handling 242 + */ 243 + struct sdma_channel { 244 + struct sdma_engine *sdma; 245 + unsigned int channel; 246 + enum dma_data_direction direction; 247 + enum sdma_peripheral_type peripheral_type; 248 + unsigned int event_id0; 249 + unsigned int event_id1; 250 + enum dma_slave_buswidth word_size; 251 + unsigned int buf_tail; 252 + struct completion done; 253 + unsigned int num_bd; 254 + struct sdma_buffer_descriptor *bd; 255 + dma_addr_t bd_phys; 256 + unsigned int pc_from_device, pc_to_device; 257 + unsigned long flags; 258 + dma_addr_t per_address; 259 + u32 event_mask0, event_mask1; 260 + u32 watermark_level; 261 + u32 shp_addr, per_addr; 262 + struct dma_chan chan; 263 + spinlock_t lock; 264 + struct dma_async_tx_descriptor desc; 265 + dma_cookie_t last_completed; 266 + enum dma_status status; 267 + }; 268 + 269 + #define IMX_DMA_SG_LOOP (1 << 0) 270 + 271 + #define MAX_DMA_CHANNELS 32 272 + #define MXC_SDMA_DEFAULT_PRIORITY 1 273 + #define MXC_SDMA_MIN_PRIORITY 1 274 + #define MXC_SDMA_MAX_PRIORITY 7 275 + 276 + /** 277 + * struct sdma_script_start_addrs - SDMA script start pointers 278 + * 279 + * start addresses of the different functions in the physical 280 + * address space of the SDMA engine. 281 + */ 282 + struct sdma_script_start_addrs { 283 + u32 ap_2_ap_addr; 284 + u32 ap_2_bp_addr; 285 + u32 ap_2_ap_fixed_addr; 286 + u32 bp_2_ap_addr; 287 + u32 loopback_on_dsp_side_addr; 288 + u32 mcu_interrupt_only_addr; 289 + u32 firi_2_per_addr; 290 + u32 firi_2_mcu_addr; 291 + u32 per_2_firi_addr; 292 + u32 mcu_2_firi_addr; 293 + u32 uart_2_per_addr; 294 + u32 uart_2_mcu_addr; 295 + u32 per_2_app_addr; 296 + u32 mcu_2_app_addr; 297 + u32 per_2_per_addr; 298 + u32 uartsh_2_per_addr; 299 + u32 uartsh_2_mcu_addr; 300 + u32 per_2_shp_addr; 301 + u32 mcu_2_shp_addr; 302 + u32 ata_2_mcu_addr; 303 + u32 mcu_2_ata_addr; 304 + u32 app_2_per_addr; 305 + u32 app_2_mcu_addr; 306 + u32 shp_2_per_addr; 307 + u32 shp_2_mcu_addr; 308 + u32 mshc_2_mcu_addr; 309 + u32 mcu_2_mshc_addr; 310 + u32 spdif_2_mcu_addr; 311 + u32 mcu_2_spdif_addr; 312 + u32 asrc_2_mcu_addr; 313 + u32 ext_mem_2_ipu_addr; 314 + u32 descrambler_addr; 315 + u32 dptc_dvfs_addr; 316 + u32 utra_addr; 317 + u32 ram_code_start_addr; 318 + }; 319 + 320 + #define SDMA_FIRMWARE_MAGIC 0x414d4453 321 + 322 + /** 323 + * struct sdma_firmware_header - Layout of the firmware image 324 + * 325 + * @magic "SDMA" 326 + * @version_major increased whenever layout of struct sdma_script_start_addrs 327 + * changes. 328 + * @version_minor firmware minor version (for binary compatible changes) 329 + * @script_addrs_start offset of struct sdma_script_start_addrs in this image 330 + * @num_script_addrs Number of script addresses in this image 331 + * @ram_code_start offset of SDMA ram image in this firmware image 332 + * @ram_code_size size of SDMA ram image 333 + * @script_addrs Stores the start address of the SDMA scripts 334 + * (in SDMA memory space) 335 + */ 336 + struct sdma_firmware_header { 337 + u32 magic; 338 + u32 version_major; 339 + u32 version_minor; 340 + u32 script_addrs_start; 341 + u32 num_script_addrs; 342 + u32 ram_code_start; 343 + u32 ram_code_size; 344 + }; 345 + 346 + struct sdma_engine { 347 + struct device *dev; 348 + struct sdma_channel channel[MAX_DMA_CHANNELS]; 349 + struct sdma_channel_control *channel_control; 350 + void __iomem *regs; 351 + unsigned int version; 352 + unsigned int num_events; 353 + struct sdma_context_data *context; 354 + dma_addr_t context_phys; 355 + struct dma_device dma_device; 356 + struct clk *clk; 357 + struct sdma_script_start_addrs *script_addrs; 358 + }; 359 + 360 + #define SDMA_H_CONFIG_DSPDMA (1 << 12) /* indicates if the DSPDMA is used */ 361 + #define SDMA_H_CONFIG_RTD_PINS (1 << 11) /* indicates if Real-Time Debug pins are enabled */ 362 + #define SDMA_H_CONFIG_ACR (1 << 4) /* indicates if AHB freq /core freq = 2 or 1 */ 363 + #define SDMA_H_CONFIG_CSM (3) /* indicates which context switch mode is selected*/ 364 + 365 + static inline u32 chnenbl_ofs(struct sdma_engine *sdma, unsigned int event) 366 + { 367 + u32 chnenbl0 = (sdma->version == 2 ? SDMA_CHNENBL0_V2 : SDMA_CHNENBL0_V1); 368 + 369 + return chnenbl0 + event * 4; 370 + } 371 + 372 + static int sdma_config_ownership(struct sdma_channel *sdmac, 373 + bool event_override, bool mcu_override, bool dsp_override) 374 + { 375 + struct sdma_engine *sdma = sdmac->sdma; 376 + int channel = sdmac->channel; 377 + u32 evt, mcu, dsp; 378 + 379 + if (event_override && mcu_override && dsp_override) 380 + return -EINVAL; 381 + 382 + evt = __raw_readl(sdma->regs + SDMA_H_EVTOVR); 383 + mcu = __raw_readl(sdma->regs + SDMA_H_HOSTOVR); 384 + dsp = __raw_readl(sdma->regs + SDMA_H_DSPOVR); 385 + 386 + if (dsp_override) 387 + dsp &= ~(1 << channel); 388 + else 389 + dsp |= (1 << channel); 390 + 391 + if (event_override) 392 + evt &= ~(1 << channel); 393 + else 394 + evt |= (1 << channel); 395 + 396 + if (mcu_override) 397 + mcu &= ~(1 << channel); 398 + else 399 + mcu |= (1 << channel); 400 + 401 + __raw_writel(evt, sdma->regs + SDMA_H_EVTOVR); 402 + __raw_writel(mcu, sdma->regs + SDMA_H_HOSTOVR); 403 + __raw_writel(dsp, sdma->regs + SDMA_H_DSPOVR); 404 + 405 + return 0; 406 + } 407 + 408 + /* 409 + * sdma_run_channel - run a channel and wait till it's done 410 + */ 411 + static int sdma_run_channel(struct sdma_channel *sdmac) 412 + { 413 + struct sdma_engine *sdma = sdmac->sdma; 414 + int channel = sdmac->channel; 415 + int ret; 416 + 417 + init_completion(&sdmac->done); 418 + 419 + __raw_writel(1 << channel, sdma->regs + SDMA_H_START); 420 + 421 + ret = wait_for_completion_timeout(&sdmac->done, HZ); 422 + 423 + return ret ? 0 : -ETIMEDOUT; 424 + } 425 + 426 + static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size, 427 + u32 address) 428 + { 429 + struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd; 430 + void *buf_virt; 431 + dma_addr_t buf_phys; 432 + int ret; 433 + 434 + buf_virt = dma_alloc_coherent(NULL, 435 + size, 436 + &buf_phys, GFP_KERNEL); 437 + if (!buf_virt) 438 + return -ENOMEM; 439 + 440 + bd0->mode.command = C0_SETPM; 441 + bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD; 442 + bd0->mode.count = size / 2; 443 + bd0->buffer_addr = buf_phys; 444 + bd0->ext_buffer_addr = address; 445 + 446 + memcpy(buf_virt, buf, size); 447 + 448 + ret = sdma_run_channel(&sdma->channel[0]); 449 + 450 + dma_free_coherent(NULL, size, buf_virt, buf_phys); 451 + 452 + return ret; 453 + } 454 + 455 + static void sdma_event_enable(struct sdma_channel *sdmac, unsigned int event) 456 + { 457 + struct sdma_engine *sdma = sdmac->sdma; 458 + int channel = sdmac->channel; 459 + u32 val; 460 + u32 chnenbl = chnenbl_ofs(sdma, event); 461 + 462 + val = __raw_readl(sdma->regs + chnenbl); 463 + val |= (1 << channel); 464 + __raw_writel(val, sdma->regs + chnenbl); 465 + } 466 + 467 + static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event) 468 + { 469 + struct sdma_engine *sdma = sdmac->sdma; 470 + int channel = sdmac->channel; 471 + u32 chnenbl = chnenbl_ofs(sdma, event); 472 + u32 val; 473 + 474 + val = __raw_readl(sdma->regs + chnenbl); 475 + val &= ~(1 << channel); 476 + __raw_writel(val, sdma->regs + chnenbl); 477 + } 478 + 479 + static void sdma_handle_channel_loop(struct sdma_channel *sdmac) 480 + { 481 + struct sdma_buffer_descriptor *bd; 482 + 483 + /* 484 + * loop mode. Iterate over descriptors, re-setup them and 485 + * call callback function. 486 + */ 487 + while (1) { 488 + bd = &sdmac->bd[sdmac->buf_tail]; 489 + 490 + if (bd->mode.status & BD_DONE) 491 + break; 492 + 493 + if (bd->mode.status & BD_RROR) 494 + sdmac->status = DMA_ERROR; 495 + else 496 + sdmac->status = DMA_SUCCESS; 497 + 498 + bd->mode.status |= BD_DONE; 499 + sdmac->buf_tail++; 500 + sdmac->buf_tail %= sdmac->num_bd; 501 + 502 + if (sdmac->desc.callback) 503 + sdmac->desc.callback(sdmac->desc.callback_param); 504 + } 505 + } 506 + 507 + static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac) 508 + { 509 + struct sdma_buffer_descriptor *bd; 510 + int i, error = 0; 511 + 512 + /* 513 + * non loop mode. Iterate over all descriptors, collect 514 + * errors and call callback function 515 + */ 516 + for (i = 0; i < sdmac->num_bd; i++) { 517 + bd = &sdmac->bd[i]; 518 + 519 + if (bd->mode.status & (BD_DONE | BD_RROR)) 520 + error = -EIO; 521 + } 522 + 523 + if (error) 524 + sdmac->status = DMA_ERROR; 525 + else 526 + sdmac->status = DMA_SUCCESS; 527 + 528 + if (sdmac->desc.callback) 529 + sdmac->desc.callback(sdmac->desc.callback_param); 530 + sdmac->last_completed = sdmac->desc.cookie; 531 + } 532 + 533 + static void mxc_sdma_handle_channel(struct sdma_channel *sdmac) 534 + { 535 + complete(&sdmac->done); 536 + 537 + /* not interested in channel 0 interrupts */ 538 + if (sdmac->channel == 0) 539 + return; 540 + 541 + if (sdmac->flags & IMX_DMA_SG_LOOP) 542 + sdma_handle_channel_loop(sdmac); 543 + else 544 + mxc_sdma_handle_channel_normal(sdmac); 545 + } 546 + 547 + static irqreturn_t sdma_int_handler(int irq, void *dev_id) 548 + { 549 + struct sdma_engine *sdma = dev_id; 550 + u32 stat; 551 + 552 + stat = __raw_readl(sdma->regs + SDMA_H_INTR); 553 + __raw_writel(stat, sdma->regs + SDMA_H_INTR); 554 + 555 + while (stat) { 556 + int channel = fls(stat) - 1; 557 + struct sdma_channel *sdmac = &sdma->channel[channel]; 558 + 559 + mxc_sdma_handle_channel(sdmac); 560 + 561 + stat &= ~(1 << channel); 562 + } 563 + 564 + return IRQ_HANDLED; 565 + } 566 + 567 + /* 568 + * sets the pc of SDMA script according to the peripheral type 569 + */ 570 + static void sdma_get_pc(struct sdma_channel *sdmac, 571 + enum sdma_peripheral_type peripheral_type) 572 + { 573 + struct sdma_engine *sdma = sdmac->sdma; 574 + int per_2_emi = 0, emi_2_per = 0; 575 + /* 576 + * These are needed once we start to support transfers between 577 + * two peripherals or memory-to-memory transfers 578 + */ 579 + int per_2_per = 0, emi_2_emi = 0; 580 + 581 + sdmac->pc_from_device = 0; 582 + sdmac->pc_to_device = 0; 583 + 584 + switch (peripheral_type) { 585 + case IMX_DMATYPE_MEMORY: 586 + emi_2_emi = sdma->script_addrs->ap_2_ap_addr; 587 + break; 588 + case IMX_DMATYPE_DSP: 589 + emi_2_per = sdma->script_addrs->bp_2_ap_addr; 590 + per_2_emi = sdma->script_addrs->ap_2_bp_addr; 591 + break; 592 + case IMX_DMATYPE_FIRI: 593 + per_2_emi = sdma->script_addrs->firi_2_mcu_addr; 594 + emi_2_per = sdma->script_addrs->mcu_2_firi_addr; 595 + break; 596 + case IMX_DMATYPE_UART: 597 + per_2_emi = sdma->script_addrs->uart_2_mcu_addr; 598 + emi_2_per = sdma->script_addrs->mcu_2_app_addr; 599 + break; 600 + case IMX_DMATYPE_UART_SP: 601 + per_2_emi = sdma->script_addrs->uartsh_2_mcu_addr; 602 + emi_2_per = sdma->script_addrs->mcu_2_shp_addr; 603 + break; 604 + case IMX_DMATYPE_ATA: 605 + per_2_emi = sdma->script_addrs->ata_2_mcu_addr; 606 + emi_2_per = sdma->script_addrs->mcu_2_ata_addr; 607 + break; 608 + case IMX_DMATYPE_CSPI: 609 + case IMX_DMATYPE_EXT: 610 + case IMX_DMATYPE_SSI: 611 + per_2_emi = sdma->script_addrs->app_2_mcu_addr; 612 + emi_2_per = sdma->script_addrs->mcu_2_app_addr; 613 + break; 614 + case IMX_DMATYPE_SSI_SP: 615 + case IMX_DMATYPE_MMC: 616 + case IMX_DMATYPE_SDHC: 617 + case IMX_DMATYPE_CSPI_SP: 618 + case IMX_DMATYPE_ESAI: 619 + case IMX_DMATYPE_MSHC_SP: 620 + per_2_emi = sdma->script_addrs->shp_2_mcu_addr; 621 + emi_2_per = sdma->script_addrs->mcu_2_shp_addr; 622 + break; 623 + case IMX_DMATYPE_ASRC: 624 + per_2_emi = sdma->script_addrs->asrc_2_mcu_addr; 625 + emi_2_per = sdma->script_addrs->asrc_2_mcu_addr; 626 + per_2_per = sdma->script_addrs->per_2_per_addr; 627 + break; 628 + case IMX_DMATYPE_MSHC: 629 + per_2_emi = sdma->script_addrs->mshc_2_mcu_addr; 630 + emi_2_per = sdma->script_addrs->mcu_2_mshc_addr; 631 + break; 632 + case IMX_DMATYPE_CCM: 633 + per_2_emi = sdma->script_addrs->dptc_dvfs_addr; 634 + break; 635 + case IMX_DMATYPE_SPDIF: 636 + per_2_emi = sdma->script_addrs->spdif_2_mcu_addr; 637 + emi_2_per = sdma->script_addrs->mcu_2_spdif_addr; 638 + break; 639 + case IMX_DMATYPE_IPU_MEMORY: 640 + emi_2_per = sdma->script_addrs->ext_mem_2_ipu_addr; 641 + break; 642 + default: 643 + break; 644 + } 645 + 646 + sdmac->pc_from_device = per_2_emi; 647 + sdmac->pc_to_device = emi_2_per; 648 + } 649 + 650 + static int sdma_load_context(struct sdma_channel *sdmac) 651 + { 652 + struct sdma_engine *sdma = sdmac->sdma; 653 + int channel = sdmac->channel; 654 + int load_address; 655 + struct sdma_context_data *context = sdma->context; 656 + struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd; 657 + int ret; 658 + 659 + if (sdmac->direction == DMA_FROM_DEVICE) { 660 + load_address = sdmac->pc_from_device; 661 + } else { 662 + load_address = sdmac->pc_to_device; 663 + } 664 + 665 + if (load_address < 0) 666 + return load_address; 667 + 668 + dev_dbg(sdma->dev, "load_address = %d\n", load_address); 669 + dev_dbg(sdma->dev, "wml = 0x%08x\n", sdmac->watermark_level); 670 + dev_dbg(sdma->dev, "shp_addr = 0x%08x\n", sdmac->shp_addr); 671 + dev_dbg(sdma->dev, "per_addr = 0x%08x\n", sdmac->per_addr); 672 + dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", sdmac->event_mask0); 673 + dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", sdmac->event_mask1); 674 + 675 + memset(context, 0, sizeof(*context)); 676 + context->channel_state.pc = load_address; 677 + 678 + /* Send by context the event mask,base address for peripheral 679 + * and watermark level 680 + */ 681 + context->gReg[0] = sdmac->event_mask1; 682 + context->gReg[1] = sdmac->event_mask0; 683 + context->gReg[2] = sdmac->per_addr; 684 + context->gReg[6] = sdmac->shp_addr; 685 + context->gReg[7] = sdmac->watermark_level; 686 + 687 + bd0->mode.command = C0_SETDM; 688 + bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD; 689 + bd0->mode.count = sizeof(*context) / 4; 690 + bd0->buffer_addr = sdma->context_phys; 691 + bd0->ext_buffer_addr = 2048 + (sizeof(*context) / 4) * channel; 692 + 693 + ret = sdma_run_channel(&sdma->channel[0]); 694 + 695 + return ret; 696 + } 697 + 698 + static void sdma_disable_channel(struct sdma_channel *sdmac) 699 + { 700 + struct sdma_engine *sdma = sdmac->sdma; 701 + int channel = sdmac->channel; 702 + 703 + __raw_writel(1 << channel, sdma->regs + SDMA_H_STATSTOP); 704 + sdmac->status = DMA_ERROR; 705 + } 706 + 707 + static int sdma_config_channel(struct sdma_channel *sdmac) 708 + { 709 + int ret; 710 + 711 + sdma_disable_channel(sdmac); 712 + 713 + sdmac->event_mask0 = 0; 714 + sdmac->event_mask1 = 0; 715 + sdmac->shp_addr = 0; 716 + sdmac->per_addr = 0; 717 + 718 + if (sdmac->event_id0) { 719 + if (sdmac->event_id0 > 32) 720 + return -EINVAL; 721 + sdma_event_enable(sdmac, sdmac->event_id0); 722 + } 723 + 724 + switch (sdmac->peripheral_type) { 725 + case IMX_DMATYPE_DSP: 726 + sdma_config_ownership(sdmac, false, true, true); 727 + break; 728 + case IMX_DMATYPE_MEMORY: 729 + sdma_config_ownership(sdmac, false, true, false); 730 + break; 731 + default: 732 + sdma_config_ownership(sdmac, true, true, false); 733 + break; 734 + } 735 + 736 + sdma_get_pc(sdmac, sdmac->peripheral_type); 737 + 738 + if ((sdmac->peripheral_type != IMX_DMATYPE_MEMORY) && 739 + (sdmac->peripheral_type != IMX_DMATYPE_DSP)) { 740 + /* Handle multiple event channels differently */ 741 + if (sdmac->event_id1) { 742 + sdmac->event_mask1 = 1 << (sdmac->event_id1 % 32); 743 + if (sdmac->event_id1 > 31) 744 + sdmac->watermark_level |= 1 << 31; 745 + sdmac->event_mask0 = 1 << (sdmac->event_id0 % 32); 746 + if (sdmac->event_id0 > 31) 747 + sdmac->watermark_level |= 1 << 30; 748 + } else { 749 + sdmac->event_mask0 = 1 << sdmac->event_id0; 750 + sdmac->event_mask1 = 1 << (sdmac->event_id0 - 32); 751 + } 752 + /* Watermark Level */ 753 + sdmac->watermark_level |= sdmac->watermark_level; 754 + /* Address */ 755 + sdmac->shp_addr = sdmac->per_address; 756 + } else { 757 + sdmac->watermark_level = 0; /* FIXME: M3_BASE_ADDRESS */ 758 + } 759 + 760 + ret = sdma_load_context(sdmac); 761 + 762 + return ret; 763 + } 764 + 765 + static int sdma_set_channel_priority(struct sdma_channel *sdmac, 766 + unsigned int priority) 767 + { 768 + struct sdma_engine *sdma = sdmac->sdma; 769 + int channel = sdmac->channel; 770 + 771 + if (priority < MXC_SDMA_MIN_PRIORITY 772 + || priority > MXC_SDMA_MAX_PRIORITY) { 773 + return -EINVAL; 774 + } 775 + 776 + __raw_writel(priority, sdma->regs + SDMA_CHNPRI_0 + 4 * channel); 777 + 778 + return 0; 779 + } 780 + 781 + static int sdma_request_channel(struct sdma_channel *sdmac) 782 + { 783 + struct sdma_engine *sdma = sdmac->sdma; 784 + int channel = sdmac->channel; 785 + int ret = -EBUSY; 786 + 787 + sdmac->bd = dma_alloc_coherent(NULL, PAGE_SIZE, &sdmac->bd_phys, GFP_KERNEL); 788 + if (!sdmac->bd) { 789 + ret = -ENOMEM; 790 + goto out; 791 + } 792 + 793 + memset(sdmac->bd, 0, PAGE_SIZE); 794 + 795 + sdma->channel_control[channel].base_bd_ptr = sdmac->bd_phys; 796 + sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys; 797 + 798 + clk_enable(sdma->clk); 799 + 800 + sdma_set_channel_priority(sdmac, MXC_SDMA_DEFAULT_PRIORITY); 801 + 802 + init_completion(&sdmac->done); 803 + 804 + sdmac->buf_tail = 0; 805 + 806 + return 0; 807 + out: 808 + 809 + return ret; 810 + } 811 + 812 + static void sdma_enable_channel(struct sdma_engine *sdma, int channel) 813 + { 814 + __raw_writel(1 << channel, sdma->regs + SDMA_H_START); 815 + } 816 + 817 + static dma_cookie_t sdma_assign_cookie(struct sdma_channel *sdma) 818 + { 819 + dma_cookie_t cookie = sdma->chan.cookie; 820 + 821 + if (++cookie < 0) 822 + cookie = 1; 823 + 824 + sdma->chan.cookie = cookie; 825 + sdma->desc.cookie = cookie; 826 + 827 + return cookie; 828 + } 829 + 830 + static struct sdma_channel *to_sdma_chan(struct dma_chan *chan) 831 + { 832 + return container_of(chan, struct sdma_channel, chan); 833 + } 834 + 835 + static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx) 836 + { 837 + struct sdma_channel *sdmac = to_sdma_chan(tx->chan); 838 + struct sdma_engine *sdma = sdmac->sdma; 839 + dma_cookie_t cookie; 840 + 841 + spin_lock_irq(&sdmac->lock); 842 + 843 + cookie = sdma_assign_cookie(sdmac); 844 + 845 + sdma_enable_channel(sdma, tx->chan->chan_id); 846 + 847 + spin_unlock_irq(&sdmac->lock); 848 + 849 + return cookie; 850 + } 851 + 852 + static int sdma_alloc_chan_resources(struct dma_chan *chan) 853 + { 854 + struct sdma_channel *sdmac = to_sdma_chan(chan); 855 + struct imx_dma_data *data = chan->private; 856 + int prio, ret; 857 + 858 + /* No need to execute this for internal channel 0 */ 859 + if (chan->chan_id == 0) 860 + return 0; 861 + 862 + if (!data) 863 + return -EINVAL; 864 + 865 + switch (data->priority) { 866 + case DMA_PRIO_HIGH: 867 + prio = 3; 868 + break; 869 + case DMA_PRIO_MEDIUM: 870 + prio = 2; 871 + break; 872 + case DMA_PRIO_LOW: 873 + default: 874 + prio = 1; 875 + break; 876 + } 877 + 878 + sdmac->peripheral_type = data->peripheral_type; 879 + sdmac->event_id0 = data->dma_request; 880 + ret = sdma_set_channel_priority(sdmac, prio); 881 + if (ret) 882 + return ret; 883 + 884 + ret = sdma_request_channel(sdmac); 885 + if (ret) 886 + return ret; 887 + 888 + dma_async_tx_descriptor_init(&sdmac->desc, chan); 889 + sdmac->desc.tx_submit = sdma_tx_submit; 890 + /* txd.flags will be overwritten in prep funcs */ 891 + sdmac->desc.flags = DMA_CTRL_ACK; 892 + 893 + return 0; 894 + } 895 + 896 + static void sdma_free_chan_resources(struct dma_chan *chan) 897 + { 898 + struct sdma_channel *sdmac = to_sdma_chan(chan); 899 + struct sdma_engine *sdma = sdmac->sdma; 900 + 901 + sdma_disable_channel(sdmac); 902 + 903 + if (sdmac->event_id0) 904 + sdma_event_disable(sdmac, sdmac->event_id0); 905 + if (sdmac->event_id1) 906 + sdma_event_disable(sdmac, sdmac->event_id1); 907 + 908 + sdmac->event_id0 = 0; 909 + sdmac->event_id1 = 0; 910 + 911 + sdma_set_channel_priority(sdmac, 0); 912 + 913 + dma_free_coherent(NULL, PAGE_SIZE, sdmac->bd, sdmac->bd_phys); 914 + 915 + clk_disable(sdma->clk); 916 + } 917 + 918 + static struct dma_async_tx_descriptor *sdma_prep_slave_sg( 919 + struct dma_chan *chan, struct scatterlist *sgl, 920 + unsigned int sg_len, enum dma_data_direction direction, 921 + unsigned long flags) 922 + { 923 + struct sdma_channel *sdmac = to_sdma_chan(chan); 924 + struct sdma_engine *sdma = sdmac->sdma; 925 + int ret, i, count; 926 + int channel = chan->chan_id; 927 + struct scatterlist *sg; 928 + 929 + if (sdmac->status == DMA_IN_PROGRESS) 930 + return NULL; 931 + sdmac->status = DMA_IN_PROGRESS; 932 + 933 + sdmac->flags = 0; 934 + 935 + dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n", 936 + sg_len, channel); 937 + 938 + sdmac->direction = direction; 939 + ret = sdma_load_context(sdmac); 940 + if (ret) 941 + goto err_out; 942 + 943 + if (sg_len > NUM_BD) { 944 + dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n", 945 + channel, sg_len, NUM_BD); 946 + ret = -EINVAL; 947 + goto err_out; 948 + } 949 + 950 + for_each_sg(sgl, sg, sg_len, i) { 951 + struct sdma_buffer_descriptor *bd = &sdmac->bd[i]; 952 + int param; 953 + 954 + bd->buffer_addr = sgl->dma_address; 955 + 956 + count = sg->length; 957 + 958 + if (count > 0xffff) { 959 + dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n", 960 + channel, count, 0xffff); 961 + ret = -EINVAL; 962 + goto err_out; 963 + } 964 + 965 + bd->mode.count = count; 966 + 967 + if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) { 968 + ret = -EINVAL; 969 + goto err_out; 970 + } 971 + if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES) 972 + bd->mode.command = 0; 973 + else 974 + bd->mode.command = sdmac->word_size; 975 + 976 + param = BD_DONE | BD_EXTD | BD_CONT; 977 + 978 + if (sdmac->flags & IMX_DMA_SG_LOOP) { 979 + param |= BD_INTR; 980 + if (i + 1 == sg_len) 981 + param |= BD_WRAP; 982 + } 983 + 984 + if (i + 1 == sg_len) 985 + param |= BD_INTR; 986 + 987 + dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n", 988 + i, count, sg->dma_address, 989 + param & BD_WRAP ? "wrap" : "", 990 + param & BD_INTR ? " intr" : ""); 991 + 992 + bd->mode.status = param; 993 + } 994 + 995 + sdmac->num_bd = sg_len; 996 + sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys; 997 + 998 + return &sdmac->desc; 999 + err_out: 1000 + return NULL; 1001 + } 1002 + 1003 + static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic( 1004 + struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, 1005 + size_t period_len, enum dma_data_direction direction) 1006 + { 1007 + struct sdma_channel *sdmac = to_sdma_chan(chan); 1008 + struct sdma_engine *sdma = sdmac->sdma; 1009 + int num_periods = buf_len / period_len; 1010 + int channel = chan->chan_id; 1011 + int ret, i = 0, buf = 0; 1012 + 1013 + dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel); 1014 + 1015 + if (sdmac->status == DMA_IN_PROGRESS) 1016 + return NULL; 1017 + 1018 + sdmac->status = DMA_IN_PROGRESS; 1019 + 1020 + sdmac->flags |= IMX_DMA_SG_LOOP; 1021 + sdmac->direction = direction; 1022 + ret = sdma_load_context(sdmac); 1023 + if (ret) 1024 + goto err_out; 1025 + 1026 + if (num_periods > NUM_BD) { 1027 + dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n", 1028 + channel, num_periods, NUM_BD); 1029 + goto err_out; 1030 + } 1031 + 1032 + if (period_len > 0xffff) { 1033 + dev_err(sdma->dev, "SDMA channel %d: maximum period size exceeded: %d > %d\n", 1034 + channel, period_len, 0xffff); 1035 + goto err_out; 1036 + } 1037 + 1038 + while (buf < buf_len) { 1039 + struct sdma_buffer_descriptor *bd = &sdmac->bd[i]; 1040 + int param; 1041 + 1042 + bd->buffer_addr = dma_addr; 1043 + 1044 + bd->mode.count = period_len; 1045 + 1046 + if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) 1047 + goto err_out; 1048 + if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES) 1049 + bd->mode.command = 0; 1050 + else 1051 + bd->mode.command = sdmac->word_size; 1052 + 1053 + param = BD_DONE | BD_EXTD | BD_CONT | BD_INTR; 1054 + if (i + 1 == num_periods) 1055 + param |= BD_WRAP; 1056 + 1057 + dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n", 1058 + i, period_len, dma_addr, 1059 + param & BD_WRAP ? "wrap" : "", 1060 + param & BD_INTR ? " intr" : ""); 1061 + 1062 + bd->mode.status = param; 1063 + 1064 + dma_addr += period_len; 1065 + buf += period_len; 1066 + 1067 + i++; 1068 + } 1069 + 1070 + sdmac->num_bd = num_periods; 1071 + sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys; 1072 + 1073 + return &sdmac->desc; 1074 + err_out: 1075 + sdmac->status = DMA_ERROR; 1076 + return NULL; 1077 + } 1078 + 1079 + static int sdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 1080 + unsigned long arg) 1081 + { 1082 + struct sdma_channel *sdmac = to_sdma_chan(chan); 1083 + struct dma_slave_config *dmaengine_cfg = (void *)arg; 1084 + 1085 + switch (cmd) { 1086 + case DMA_TERMINATE_ALL: 1087 + sdma_disable_channel(sdmac); 1088 + return 0; 1089 + case DMA_SLAVE_CONFIG: 1090 + if (dmaengine_cfg->direction == DMA_FROM_DEVICE) { 1091 + sdmac->per_address = dmaengine_cfg->src_addr; 1092 + sdmac->watermark_level = dmaengine_cfg->src_maxburst; 1093 + sdmac->word_size = dmaengine_cfg->src_addr_width; 1094 + } else { 1095 + sdmac->per_address = dmaengine_cfg->dst_addr; 1096 + sdmac->watermark_level = dmaengine_cfg->dst_maxburst; 1097 + sdmac->word_size = dmaengine_cfg->dst_addr_width; 1098 + } 1099 + return sdma_config_channel(sdmac); 1100 + default: 1101 + return -ENOSYS; 1102 + } 1103 + 1104 + return -EINVAL; 1105 + } 1106 + 1107 + static enum dma_status sdma_tx_status(struct dma_chan *chan, 1108 + dma_cookie_t cookie, 1109 + struct dma_tx_state *txstate) 1110 + { 1111 + struct sdma_channel *sdmac = to_sdma_chan(chan); 1112 + dma_cookie_t last_used; 1113 + enum dma_status ret; 1114 + 1115 + last_used = chan->cookie; 1116 + 1117 + ret = dma_async_is_complete(cookie, sdmac->last_completed, last_used); 1118 + dma_set_tx_state(txstate, sdmac->last_completed, last_used, 0); 1119 + 1120 + return ret; 1121 + } 1122 + 1123 + static void sdma_issue_pending(struct dma_chan *chan) 1124 + { 1125 + /* 1126 + * Nothing to do. We only have a single descriptor 1127 + */ 1128 + } 1129 + 1130 + static int __init sdma_init(struct sdma_engine *sdma, 1131 + void *ram_code, int ram_code_size) 1132 + { 1133 + int i, ret; 1134 + dma_addr_t ccb_phys; 1135 + 1136 + switch (sdma->version) { 1137 + case 1: 1138 + sdma->num_events = 32; 1139 + break; 1140 + case 2: 1141 + sdma->num_events = 48; 1142 + break; 1143 + default: 1144 + dev_err(sdma->dev, "Unknown version %d. aborting\n", sdma->version); 1145 + return -ENODEV; 1146 + } 1147 + 1148 + clk_enable(sdma->clk); 1149 + 1150 + /* Be sure SDMA has not started yet */ 1151 + __raw_writel(0, sdma->regs + SDMA_H_C0PTR); 1152 + 1153 + sdma->channel_control = dma_alloc_coherent(NULL, 1154 + MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control) + 1155 + sizeof(struct sdma_context_data), 1156 + &ccb_phys, GFP_KERNEL); 1157 + 1158 + if (!sdma->channel_control) { 1159 + ret = -ENOMEM; 1160 + goto err_dma_alloc; 1161 + } 1162 + 1163 + sdma->context = (void *)sdma->channel_control + 1164 + MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control); 1165 + sdma->context_phys = ccb_phys + 1166 + MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control); 1167 + 1168 + /* Zero-out the CCB structures array just allocated */ 1169 + memset(sdma->channel_control, 0, 1170 + MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control)); 1171 + 1172 + /* disable all channels */ 1173 + for (i = 0; i < sdma->num_events; i++) 1174 + __raw_writel(0, sdma->regs + chnenbl_ofs(sdma, i)); 1175 + 1176 + /* All channels have priority 0 */ 1177 + for (i = 0; i < MAX_DMA_CHANNELS; i++) 1178 + __raw_writel(0, sdma->regs + SDMA_CHNPRI_0 + i * 4); 1179 + 1180 + ret = sdma_request_channel(&sdma->channel[0]); 1181 + if (ret) 1182 + goto err_dma_alloc; 1183 + 1184 + sdma_config_ownership(&sdma->channel[0], false, true, false); 1185 + 1186 + /* Set Command Channel (Channel Zero) */ 1187 + __raw_writel(0x4050, sdma->regs + SDMA_CHN0ADDR); 1188 + 1189 + /* Set bits of CONFIG register but with static context switching */ 1190 + /* FIXME: Check whether to set ACR bit depending on clock ratios */ 1191 + __raw_writel(0, sdma->regs + SDMA_H_CONFIG); 1192 + 1193 + __raw_writel(ccb_phys, sdma->regs + SDMA_H_C0PTR); 1194 + 1195 + /* download the RAM image for SDMA */ 1196 + sdma_load_script(sdma, ram_code, 1197 + ram_code_size, 1198 + sdma->script_addrs->ram_code_start_addr); 1199 + 1200 + /* Set bits of CONFIG register with given context switching mode */ 1201 + __raw_writel(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG); 1202 + 1203 + /* Initializes channel's priorities */ 1204 + sdma_set_channel_priority(&sdma->channel[0], 7); 1205 + 1206 + clk_disable(sdma->clk); 1207 + 1208 + return 0; 1209 + 1210 + err_dma_alloc: 1211 + clk_disable(sdma->clk); 1212 + dev_err(sdma->dev, "initialisation failed with %d\n", ret); 1213 + return ret; 1214 + } 1215 + 1216 + static int __init sdma_probe(struct platform_device *pdev) 1217 + { 1218 + int ret; 1219 + const struct firmware *fw; 1220 + const struct sdma_firmware_header *header; 1221 + const struct sdma_script_start_addrs *addr; 1222 + int irq; 1223 + unsigned short *ram_code; 1224 + struct resource *iores; 1225 + struct sdma_platform_data *pdata = pdev->dev.platform_data; 1226 + char *fwname; 1227 + int i; 1228 + dma_cap_mask_t mask; 1229 + struct sdma_engine *sdma; 1230 + 1231 + sdma = kzalloc(sizeof(*sdma), GFP_KERNEL); 1232 + if (!sdma) 1233 + return -ENOMEM; 1234 + 1235 + sdma->dev = &pdev->dev; 1236 + 1237 + iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1238 + irq = platform_get_irq(pdev, 0); 1239 + if (!iores || irq < 0 || !pdata) { 1240 + ret = -EINVAL; 1241 + goto err_irq; 1242 + } 1243 + 1244 + if (!request_mem_region(iores->start, resource_size(iores), pdev->name)) { 1245 + ret = -EBUSY; 1246 + goto err_request_region; 1247 + } 1248 + 1249 + sdma->clk = clk_get(&pdev->dev, NULL); 1250 + if (IS_ERR(sdma->clk)) { 1251 + ret = PTR_ERR(sdma->clk); 1252 + goto err_clk; 1253 + } 1254 + 1255 + sdma->regs = ioremap(iores->start, resource_size(iores)); 1256 + if (!sdma->regs) { 1257 + ret = -ENOMEM; 1258 + goto err_ioremap; 1259 + } 1260 + 1261 + ret = request_irq(irq, sdma_int_handler, 0, "sdma", sdma); 1262 + if (ret) 1263 + goto err_request_irq; 1264 + 1265 + fwname = kasprintf(GFP_KERNEL, "sdma-%s-to%d.bin", 1266 + pdata->cpu_name, pdata->to_version); 1267 + if (!fwname) { 1268 + ret = -ENOMEM; 1269 + goto err_cputype; 1270 + } 1271 + 1272 + ret = request_firmware(&fw, fwname, &pdev->dev); 1273 + if (ret) { 1274 + dev_err(&pdev->dev, "request firmware \"%s\" failed with %d\n", 1275 + fwname, ret); 1276 + kfree(fwname); 1277 + goto err_cputype; 1278 + } 1279 + kfree(fwname); 1280 + 1281 + if (fw->size < sizeof(*header)) 1282 + goto err_firmware; 1283 + 1284 + header = (struct sdma_firmware_header *)fw->data; 1285 + 1286 + if (header->magic != SDMA_FIRMWARE_MAGIC) 1287 + goto err_firmware; 1288 + if (header->ram_code_start + header->ram_code_size > fw->size) 1289 + goto err_firmware; 1290 + 1291 + addr = (void *)header + header->script_addrs_start; 1292 + ram_code = (void *)header + header->ram_code_start; 1293 + sdma->script_addrs = kmalloc(sizeof(*addr), GFP_KERNEL); 1294 + if (!sdma->script_addrs) 1295 + goto err_firmware; 1296 + memcpy(sdma->script_addrs, addr, sizeof(*addr)); 1297 + 1298 + sdma->version = pdata->sdma_version; 1299 + 1300 + INIT_LIST_HEAD(&sdma->dma_device.channels); 1301 + /* Initialize channel parameters */ 1302 + for (i = 0; i < MAX_DMA_CHANNELS; i++) { 1303 + struct sdma_channel *sdmac = &sdma->channel[i]; 1304 + 1305 + sdmac->sdma = sdma; 1306 + spin_lock_init(&sdmac->lock); 1307 + 1308 + dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask); 1309 + dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask); 1310 + 1311 + sdmac->chan.device = &sdma->dma_device; 1312 + sdmac->chan.chan_id = i; 1313 + sdmac->channel = i; 1314 + 1315 + /* Add the channel to the DMAC list */ 1316 + list_add_tail(&sdmac->chan.device_node, &sdma->dma_device.channels); 1317 + } 1318 + 1319 + ret = sdma_init(sdma, ram_code, header->ram_code_size); 1320 + if (ret) 1321 + goto err_init; 1322 + 1323 + sdma->dma_device.dev = &pdev->dev; 1324 + 1325 + sdma->dma_device.device_alloc_chan_resources = sdma_alloc_chan_resources; 1326 + sdma->dma_device.device_free_chan_resources = sdma_free_chan_resources; 1327 + sdma->dma_device.device_tx_status = sdma_tx_status; 1328 + sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg; 1329 + sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic; 1330 + sdma->dma_device.device_control = sdma_control; 1331 + sdma->dma_device.device_issue_pending = sdma_issue_pending; 1332 + 1333 + ret = dma_async_device_register(&sdma->dma_device); 1334 + if (ret) { 1335 + dev_err(&pdev->dev, "unable to register\n"); 1336 + goto err_init; 1337 + } 1338 + 1339 + dev_info(&pdev->dev, "initialized (firmware %d.%d)\n", 1340 + header->version_major, 1341 + header->version_minor); 1342 + 1343 + /* request channel 0. This is an internal control channel 1344 + * to the SDMA engine and not available to clients. 1345 + */ 1346 + dma_cap_zero(mask); 1347 + dma_cap_set(DMA_SLAVE, mask); 1348 + dma_request_channel(mask, NULL, NULL); 1349 + 1350 + release_firmware(fw); 1351 + 1352 + return 0; 1353 + 1354 + err_init: 1355 + kfree(sdma->script_addrs); 1356 + err_firmware: 1357 + release_firmware(fw); 1358 + err_cputype: 1359 + free_irq(irq, sdma); 1360 + err_request_irq: 1361 + iounmap(sdma->regs); 1362 + err_ioremap: 1363 + clk_put(sdma->clk); 1364 + err_clk: 1365 + release_mem_region(iores->start, resource_size(iores)); 1366 + err_request_region: 1367 + err_irq: 1368 + kfree(sdma); 1369 + return 0; 1370 + } 1371 + 1372 + static int __exit sdma_remove(struct platform_device *pdev) 1373 + { 1374 + return -EBUSY; 1375 + } 1376 + 1377 + static struct platform_driver sdma_driver = { 1378 + .driver = { 1379 + .name = "imx-sdma", 1380 + }, 1381 + .remove = __exit_p(sdma_remove), 1382 + }; 1383 + 1384 + static int __init sdma_module_init(void) 1385 + { 1386 + return platform_driver_probe(&sdma_driver, sdma_probe); 1387 + } 1388 + subsys_initcall(sdma_module_init); 1389 + 1390 + MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>"); 1391 + MODULE_DESCRIPTION("i.MX SDMA driver"); 1392 + MODULE_LICENSE("GPL");
+390 -86
drivers/dma/intel_mid_dma.c
··· 25 25 */ 26 26 #include <linux/pci.h> 27 27 #include <linux/interrupt.h> 28 + #include <linux/pm_runtime.h> 28 29 #include <linux/intel_mid_dma.h> 29 30 30 31 #define MAX_CHAN 4 /*max ch across controllers*/ ··· 92 91 int byte_width = 0, block_ts = 0; 93 92 94 93 switch (tx_width) { 95 - case LNW_DMA_WIDTH_8BIT: 94 + case DMA_SLAVE_BUSWIDTH_1_BYTE: 96 95 byte_width = 1; 97 96 break; 98 - case LNW_DMA_WIDTH_16BIT: 97 + case DMA_SLAVE_BUSWIDTH_2_BYTES: 99 98 byte_width = 2; 100 99 break; 101 - case LNW_DMA_WIDTH_32BIT: 100 + case DMA_SLAVE_BUSWIDTH_4_BYTES: 102 101 default: 103 102 byte_width = 4; 104 103 break; ··· 248 247 struct middma_device *mid = to_middma_device(midc->chan.device); 249 248 250 249 /* channel is idle */ 251 - if (midc->in_use && test_ch_en(midc->dma_base, midc->ch_id)) { 250 + if (midc->busy && test_ch_en(midc->dma_base, midc->ch_id)) { 252 251 /*error*/ 253 252 pr_err("ERR_MDMA: channel is busy in start\n"); 254 253 /* The tasklet will hopefully advance the queue... */ 255 254 return; 256 255 } 257 - 256 + midc->busy = true; 258 257 /*write registers and en*/ 259 258 iowrite32(first->sar, midc->ch_regs + SAR); 260 259 iowrite32(first->dar, midc->ch_regs + DAR); 260 + iowrite32(first->lli_phys, midc->ch_regs + LLP); 261 261 iowrite32(first->cfg_hi, midc->ch_regs + CFG_HIGH); 262 262 iowrite32(first->cfg_lo, midc->ch_regs + CFG_LOW); 263 263 iowrite32(first->ctl_lo, midc->ch_regs + CTL_LOW); ··· 266 264 pr_debug("MDMA:TX SAR %x,DAR %x,CFGL %x,CFGH %x,CTLH %x, CTLL %x\n", 267 265 (int)first->sar, (int)first->dar, first->cfg_hi, 268 266 first->cfg_lo, first->ctl_hi, first->ctl_lo); 267 + first->status = DMA_IN_PROGRESS; 269 268 270 269 iowrite32(ENABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN); 271 - first->status = DMA_IN_PROGRESS; 272 270 } 273 271 274 272 /** ··· 285 283 { 286 284 struct dma_async_tx_descriptor *txd = &desc->txd; 287 285 dma_async_tx_callback callback_txd = NULL; 286 + struct intel_mid_dma_lli *llitem; 288 287 void *param_txd = NULL; 289 288 290 289 midc->completed = txd->cookie; 291 290 callback_txd = txd->callback; 292 291 param_txd = txd->callback_param; 293 292 294 - list_move(&desc->desc_node, &midc->free_list); 295 - 293 + if (desc->lli != NULL) { 294 + /*clear the DONE bit of completed LLI in memory*/ 295 + llitem = desc->lli + desc->current_lli; 296 + llitem->ctl_hi &= CLEAR_DONE; 297 + if (desc->current_lli < desc->lli_length-1) 298 + (desc->current_lli)++; 299 + else 300 + desc->current_lli = 0; 301 + } 296 302 spin_unlock_bh(&midc->lock); 297 303 if (callback_txd) { 298 304 pr_debug("MDMA: TXD callback set ... calling\n"); 299 305 callback_txd(param_txd); 300 - spin_lock_bh(&midc->lock); 301 - return; 306 + } 307 + if (midc->raw_tfr) { 308 + desc->status = DMA_SUCCESS; 309 + if (desc->lli != NULL) { 310 + pci_pool_free(desc->lli_pool, desc->lli, 311 + desc->lli_phys); 312 + pci_pool_destroy(desc->lli_pool); 313 + } 314 + list_move(&desc->desc_node, &midc->free_list); 315 + midc->busy = false; 302 316 } 303 317 spin_lock_bh(&midc->lock); 304 318 ··· 335 317 336 318 /*tx is complete*/ 337 319 list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { 338 - if (desc->status == DMA_IN_PROGRESS) { 339 - desc->status = DMA_SUCCESS; 320 + if (desc->status == DMA_IN_PROGRESS) 340 321 midc_descriptor_complete(midc, desc); 341 - } 342 322 } 343 323 return; 344 - } 324 + } 325 + /** 326 + * midc_lli_fill_sg - Helper function to convert 327 + * SG list to Linked List Items. 328 + *@midc: Channel 329 + *@desc: DMA descriptor 330 + *@sglist: Pointer to SG list 331 + *@sglen: SG list length 332 + *@flags: DMA transaction flags 333 + * 334 + * Walk through the SG list and convert the SG list into Linked 335 + * List Items (LLI). 336 + */ 337 + static int midc_lli_fill_sg(struct intel_mid_dma_chan *midc, 338 + struct intel_mid_dma_desc *desc, 339 + struct scatterlist *sglist, 340 + unsigned int sglen, 341 + unsigned int flags) 342 + { 343 + struct intel_mid_dma_slave *mids; 344 + struct scatterlist *sg; 345 + dma_addr_t lli_next, sg_phy_addr; 346 + struct intel_mid_dma_lli *lli_bloc_desc; 347 + union intel_mid_dma_ctl_lo ctl_lo; 348 + union intel_mid_dma_ctl_hi ctl_hi; 349 + int i; 345 350 351 + pr_debug("MDMA: Entered midc_lli_fill_sg\n"); 352 + mids = midc->mid_slave; 353 + 354 + lli_bloc_desc = desc->lli; 355 + lli_next = desc->lli_phys; 356 + 357 + ctl_lo.ctl_lo = desc->ctl_lo; 358 + ctl_hi.ctl_hi = desc->ctl_hi; 359 + for_each_sg(sglist, sg, sglen, i) { 360 + /*Populate CTL_LOW and LLI values*/ 361 + if (i != sglen - 1) { 362 + lli_next = lli_next + 363 + sizeof(struct intel_mid_dma_lli); 364 + } else { 365 + /*Check for circular list, otherwise terminate LLI to ZERO*/ 366 + if (flags & DMA_PREP_CIRCULAR_LIST) { 367 + pr_debug("MDMA: LLI is configured in circular mode\n"); 368 + lli_next = desc->lli_phys; 369 + } else { 370 + lli_next = 0; 371 + ctl_lo.ctlx.llp_dst_en = 0; 372 + ctl_lo.ctlx.llp_src_en = 0; 373 + } 374 + } 375 + /*Populate CTL_HI values*/ 376 + ctl_hi.ctlx.block_ts = get_block_ts(sg->length, 377 + desc->width, 378 + midc->dma->block_size); 379 + /*Populate SAR and DAR values*/ 380 + sg_phy_addr = sg_phys(sg); 381 + if (desc->dirn == DMA_TO_DEVICE) { 382 + lli_bloc_desc->sar = sg_phy_addr; 383 + lli_bloc_desc->dar = mids->dma_slave.dst_addr; 384 + } else if (desc->dirn == DMA_FROM_DEVICE) { 385 + lli_bloc_desc->sar = mids->dma_slave.src_addr; 386 + lli_bloc_desc->dar = sg_phy_addr; 387 + } 388 + /*Copy values into block descriptor in system memroy*/ 389 + lli_bloc_desc->llp = lli_next; 390 + lli_bloc_desc->ctl_lo = ctl_lo.ctl_lo; 391 + lli_bloc_desc->ctl_hi = ctl_hi.ctl_hi; 392 + 393 + lli_bloc_desc++; 394 + } 395 + /*Copy very first LLI values to descriptor*/ 396 + desc->ctl_lo = desc->lli->ctl_lo; 397 + desc->ctl_hi = desc->lli->ctl_hi; 398 + desc->sar = desc->lli->sar; 399 + desc->dar = desc->lli->dar; 400 + 401 + return 0; 402 + } 346 403 /***************************************************************************** 347 404 DMA engine callback Functions*/ 348 405 /** ··· 442 349 desc->txd.cookie = cookie; 443 350 444 351 445 - if (list_empty(&midc->active_list)) { 446 - midc_dostart(midc, desc); 352 + if (list_empty(&midc->active_list)) 447 353 list_add_tail(&desc->desc_node, &midc->active_list); 448 - } else { 354 + else 449 355 list_add_tail(&desc->desc_node, &midc->queue); 450 - } 356 + 357 + midc_dostart(midc, desc); 451 358 spin_unlock_bh(&midc->lock); 452 359 453 360 return cookie; ··· 507 414 return ret; 508 415 } 509 416 417 + static int dma_slave_control(struct dma_chan *chan, unsigned long arg) 418 + { 419 + struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); 420 + struct dma_slave_config *slave = (struct dma_slave_config *)arg; 421 + struct intel_mid_dma_slave *mid_slave; 422 + 423 + BUG_ON(!midc); 424 + BUG_ON(!slave); 425 + pr_debug("MDMA: slave control called\n"); 426 + 427 + mid_slave = to_intel_mid_dma_slave(slave); 428 + 429 + BUG_ON(!mid_slave); 430 + 431 + midc->mid_slave = mid_slave; 432 + return 0; 433 + } 510 434 /** 511 435 * intel_mid_dma_device_control - DMA device control 512 436 * @chan: chan for DMA control ··· 538 428 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); 539 429 struct middma_device *mid = to_middma_device(chan->device); 540 430 struct intel_mid_dma_desc *desc, *_desc; 541 - LIST_HEAD(list); 431 + union intel_mid_dma_cfg_lo cfg_lo; 432 + 433 + if (cmd == DMA_SLAVE_CONFIG) 434 + return dma_slave_control(chan, arg); 542 435 543 436 if (cmd != DMA_TERMINATE_ALL) 544 437 return -ENXIO; 545 438 546 439 spin_lock_bh(&midc->lock); 547 - if (midc->in_use == false) { 440 + if (midc->busy == false) { 548 441 spin_unlock_bh(&midc->lock); 549 442 return 0; 550 443 } 551 - list_splice_init(&midc->free_list, &list); 552 - midc->descs_allocated = 0; 553 - midc->slave = NULL; 554 - 444 + /*Suspend and disable the channel*/ 445 + cfg_lo.cfg_lo = ioread32(midc->ch_regs + CFG_LOW); 446 + cfg_lo.cfgx.ch_susp = 1; 447 + iowrite32(cfg_lo.cfg_lo, midc->ch_regs + CFG_LOW); 448 + iowrite32(DISABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN); 449 + midc->busy = false; 555 450 /* Disable interrupts */ 556 451 disable_dma_interrupt(midc); 452 + midc->descs_allocated = 0; 557 453 558 454 spin_unlock_bh(&midc->lock); 559 - list_for_each_entry_safe(desc, _desc, &list, desc_node) { 560 - pr_debug("MDMA: freeing descriptor %p\n", desc); 561 - pci_pool_free(mid->dma_pool, desc, desc->txd.phys); 455 + list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { 456 + if (desc->lli != NULL) { 457 + pci_pool_free(desc->lli_pool, desc->lli, 458 + desc->lli_phys); 459 + pci_pool_destroy(desc->lli_pool); 460 + } 461 + list_move(&desc->desc_node, &midc->free_list); 562 462 } 563 463 return 0; 564 464 } 565 465 566 - /** 567 - * intel_mid_dma_prep_slave_sg - Prep slave sg txn 568 - * @chan: chan for DMA transfer 569 - * @sgl: scatter gather list 570 - * @sg_len: length of sg txn 571 - * @direction: DMA transfer dirtn 572 - * @flags: DMA flags 573 - * 574 - * Do DMA sg txn: NOT supported now 575 - */ 576 - static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg( 577 - struct dma_chan *chan, struct scatterlist *sgl, 578 - unsigned int sg_len, enum dma_data_direction direction, 579 - unsigned long flags) 580 - { 581 - /*not supported now*/ 582 - return NULL; 583 - } 584 466 585 467 /** 586 468 * intel_mid_dma_prep_memcpy - Prep memcpy txn ··· 597 495 union intel_mid_dma_ctl_hi ctl_hi; 598 496 union intel_mid_dma_cfg_lo cfg_lo; 599 497 union intel_mid_dma_cfg_hi cfg_hi; 600 - enum intel_mid_dma_width width = 0; 498 + enum dma_slave_buswidth width; 601 499 602 500 pr_debug("MDMA: Prep for memcpy\n"); 603 - WARN_ON(!chan); 501 + BUG_ON(!chan); 604 502 if (!len) 605 503 return NULL; 606 504 607 - mids = chan->private; 608 - WARN_ON(!mids); 609 - 610 505 midc = to_intel_mid_dma_chan(chan); 611 - WARN_ON(!midc); 506 + BUG_ON(!midc); 507 + 508 + mids = midc->mid_slave; 509 + BUG_ON(!mids); 612 510 613 511 pr_debug("MDMA:called for DMA %x CH %d Length %zu\n", 614 512 midc->dma->pci_id, midc->ch_id, len); 615 513 pr_debug("MDMA:Cfg passed Mode %x, Dirn %x, HS %x, Width %x\n", 616 - mids->cfg_mode, mids->dirn, mids->hs_mode, mids->src_width); 514 + mids->cfg_mode, mids->dma_slave.direction, 515 + mids->hs_mode, mids->dma_slave.src_addr_width); 617 516 618 517 /*calculate CFG_LO*/ 619 518 if (mids->hs_mode == LNW_DMA_SW_HS) { ··· 633 530 if (midc->dma->pimr_mask) { 634 531 cfg_hi.cfgx.protctl = 0x0; /*default value*/ 635 532 cfg_hi.cfgx.fifo_mode = 1; 636 - if (mids->dirn == DMA_TO_DEVICE) { 533 + if (mids->dma_slave.direction == DMA_TO_DEVICE) { 637 534 cfg_hi.cfgx.src_per = 0; 638 535 if (mids->device_instance == 0) 639 536 cfg_hi.cfgx.dst_per = 3; 640 537 if (mids->device_instance == 1) 641 538 cfg_hi.cfgx.dst_per = 1; 642 - } else if (mids->dirn == DMA_FROM_DEVICE) { 539 + } else if (mids->dma_slave.direction == DMA_FROM_DEVICE) { 643 540 if (mids->device_instance == 0) 644 541 cfg_hi.cfgx.src_per = 2; 645 542 if (mids->device_instance == 1) ··· 655 552 656 553 /*calculate CTL_HI*/ 657 554 ctl_hi.ctlx.reser = 0; 658 - width = mids->src_width; 555 + ctl_hi.ctlx.done = 0; 556 + width = mids->dma_slave.src_addr_width; 659 557 660 558 ctl_hi.ctlx.block_ts = get_block_ts(len, width, midc->dma->block_size); 661 559 pr_debug("MDMA:calc len %d for block size %d\n", ··· 664 560 /*calculate CTL_LO*/ 665 561 ctl_lo.ctl_lo = 0; 666 562 ctl_lo.ctlx.int_en = 1; 667 - ctl_lo.ctlx.dst_tr_width = mids->dst_width; 668 - ctl_lo.ctlx.src_tr_width = mids->src_width; 669 - ctl_lo.ctlx.dst_msize = mids->src_msize; 670 - ctl_lo.ctlx.src_msize = mids->dst_msize; 563 + ctl_lo.ctlx.dst_tr_width = mids->dma_slave.dst_addr_width; 564 + ctl_lo.ctlx.src_tr_width = mids->dma_slave.src_addr_width; 565 + ctl_lo.ctlx.dst_msize = mids->dma_slave.src_maxburst; 566 + ctl_lo.ctlx.src_msize = mids->dma_slave.dst_maxburst; 671 567 672 568 if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) { 673 569 ctl_lo.ctlx.tt_fc = 0; 674 570 ctl_lo.ctlx.sinc = 0; 675 571 ctl_lo.ctlx.dinc = 0; 676 572 } else { 677 - if (mids->dirn == DMA_TO_DEVICE) { 573 + if (mids->dma_slave.direction == DMA_TO_DEVICE) { 678 574 ctl_lo.ctlx.sinc = 0; 679 575 ctl_lo.ctlx.dinc = 2; 680 576 ctl_lo.ctlx.tt_fc = 1; 681 - } else if (mids->dirn == DMA_FROM_DEVICE) { 577 + } else if (mids->dma_slave.direction == DMA_FROM_DEVICE) { 682 578 ctl_lo.ctlx.sinc = 2; 683 579 ctl_lo.ctlx.dinc = 0; 684 580 ctl_lo.ctlx.tt_fc = 2; ··· 701 597 desc->ctl_lo = ctl_lo.ctl_lo; 702 598 desc->ctl_hi = ctl_hi.ctl_hi; 703 599 desc->width = width; 704 - desc->dirn = mids->dirn; 600 + desc->dirn = mids->dma_slave.direction; 601 + desc->lli_phys = 0; 602 + desc->lli = NULL; 603 + desc->lli_pool = NULL; 705 604 return &desc->txd; 706 605 707 606 err_desc_get: 708 607 pr_err("ERR_MDMA: Failed to get desc\n"); 709 608 midc_desc_put(midc, desc); 710 609 return NULL; 610 + } 611 + /** 612 + * intel_mid_dma_prep_slave_sg - Prep slave sg txn 613 + * @chan: chan for DMA transfer 614 + * @sgl: scatter gather list 615 + * @sg_len: length of sg txn 616 + * @direction: DMA transfer dirtn 617 + * @flags: DMA flags 618 + * 619 + * Prepares LLI based periphral transfer 620 + */ 621 + static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg( 622 + struct dma_chan *chan, struct scatterlist *sgl, 623 + unsigned int sg_len, enum dma_data_direction direction, 624 + unsigned long flags) 625 + { 626 + struct intel_mid_dma_chan *midc = NULL; 627 + struct intel_mid_dma_slave *mids = NULL; 628 + struct intel_mid_dma_desc *desc = NULL; 629 + struct dma_async_tx_descriptor *txd = NULL; 630 + union intel_mid_dma_ctl_lo ctl_lo; 631 + 632 + pr_debug("MDMA: Prep for slave SG\n"); 633 + 634 + if (!sg_len) { 635 + pr_err("MDMA: Invalid SG length\n"); 636 + return NULL; 637 + } 638 + midc = to_intel_mid_dma_chan(chan); 639 + BUG_ON(!midc); 640 + 641 + mids = midc->mid_slave; 642 + BUG_ON(!mids); 643 + 644 + if (!midc->dma->pimr_mask) { 645 + pr_debug("MDMA: SG list is not supported by this controller\n"); 646 + return NULL; 647 + } 648 + 649 + pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n", 650 + sg_len, direction, flags); 651 + 652 + txd = intel_mid_dma_prep_memcpy(chan, 0, 0, sgl->length, flags); 653 + if (NULL == txd) { 654 + pr_err("MDMA: Prep memcpy failed\n"); 655 + return NULL; 656 + } 657 + desc = to_intel_mid_dma_desc(txd); 658 + desc->dirn = direction; 659 + ctl_lo.ctl_lo = desc->ctl_lo; 660 + ctl_lo.ctlx.llp_dst_en = 1; 661 + ctl_lo.ctlx.llp_src_en = 1; 662 + desc->ctl_lo = ctl_lo.ctl_lo; 663 + desc->lli_length = sg_len; 664 + desc->current_lli = 0; 665 + /* DMA coherent memory pool for LLI descriptors*/ 666 + desc->lli_pool = pci_pool_create("intel_mid_dma_lli_pool", 667 + midc->dma->pdev, 668 + (sizeof(struct intel_mid_dma_lli)*sg_len), 669 + 32, 0); 670 + if (NULL == desc->lli_pool) { 671 + pr_err("MID_DMA:LLI pool create failed\n"); 672 + return NULL; 673 + } 674 + 675 + desc->lli = pci_pool_alloc(desc->lli_pool, GFP_KERNEL, &desc->lli_phys); 676 + if (!desc->lli) { 677 + pr_err("MID_DMA: LLI alloc failed\n"); 678 + pci_pool_destroy(desc->lli_pool); 679 + return NULL; 680 + } 681 + 682 + midc_lli_fill_sg(midc, desc, sgl, sg_len, flags); 683 + if (flags & DMA_PREP_INTERRUPT) { 684 + iowrite32(UNMASK_INTR_REG(midc->ch_id), 685 + midc->dma_base + MASK_BLOCK); 686 + pr_debug("MDMA:Enabled Block interrupt\n"); 687 + } 688 + return &desc->txd; 711 689 } 712 690 713 691 /** ··· 804 618 struct middma_device *mid = to_middma_device(chan->device); 805 619 struct intel_mid_dma_desc *desc, *_desc; 806 620 807 - if (true == midc->in_use) { 621 + if (true == midc->busy) { 808 622 /*trying to free ch in use!!!!!*/ 809 623 pr_err("ERR_MDMA: trying to free ch in use\n"); 810 624 } 811 - 625 + pm_runtime_put(&mid->pdev->dev); 812 626 spin_lock_bh(&midc->lock); 813 627 midc->descs_allocated = 0; 814 628 list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { ··· 825 639 } 826 640 spin_unlock_bh(&midc->lock); 827 641 midc->in_use = false; 642 + midc->busy = false; 828 643 /* Disable CH interrupts */ 829 644 iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_BLOCK); 830 645 iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_ERR); ··· 846 659 dma_addr_t phys; 847 660 int i = 0; 848 661 662 + pm_runtime_get_sync(&mid->pdev->dev); 663 + 664 + if (mid->state == SUSPENDED) { 665 + if (dma_resume(mid->pdev)) { 666 + pr_err("ERR_MDMA: resume failed"); 667 + return -EFAULT; 668 + } 669 + } 849 670 850 671 /* ASSERT: channel is idle */ 851 672 if (test_ch_en(mid->dma_base, midc->ch_id)) { 852 673 /*ch is not idle*/ 853 674 pr_err("ERR_MDMA: ch not idle\n"); 675 + pm_runtime_put(&mid->pdev->dev); 854 676 return -EIO; 855 677 } 856 678 midc->completed = chan->cookie = 1; ··· 870 674 desc = pci_pool_alloc(mid->dma_pool, GFP_KERNEL, &phys); 871 675 if (!desc) { 872 676 pr_err("ERR_MDMA: desc failed\n"); 677 + pm_runtime_put(&mid->pdev->dev); 873 678 return -ENOMEM; 874 679 /*check*/ 875 680 } ··· 883 686 list_add_tail(&desc->desc_node, &midc->free_list); 884 687 } 885 688 spin_unlock_bh(&midc->lock); 886 - midc->in_use = false; 689 + midc->in_use = true; 690 + midc->busy = false; 887 691 pr_debug("MID_DMA: Desc alloc done ret: %d desc\n", i); 888 692 return i; 889 693 } ··· 913 715 { 914 716 struct middma_device *mid = NULL; 915 717 struct intel_mid_dma_chan *midc = NULL; 916 - u32 status; 718 + u32 status, raw_tfr, raw_block; 917 719 int i; 918 720 919 721 mid = (struct middma_device *)data; ··· 922 724 return; 923 725 } 924 726 pr_debug("MDMA: in tasklet for device %x\n", mid->pci_id); 925 - status = ioread32(mid->dma_base + RAW_TFR); 926 - pr_debug("MDMA:RAW_TFR %x\n", status); 727 + raw_tfr = ioread32(mid->dma_base + RAW_TFR); 728 + raw_block = ioread32(mid->dma_base + RAW_BLOCK); 729 + status = raw_tfr | raw_block; 927 730 status &= mid->intr_mask; 928 731 while (status) { 929 732 /*txn interrupt*/ ··· 940 741 } 941 742 pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n", 942 743 status, midc->ch_id, i); 744 + midc->raw_tfr = raw_tfr; 745 + midc->raw_block = raw_block; 746 + spin_lock_bh(&midc->lock); 943 747 /*clearing this interrupts first*/ 944 748 iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_TFR); 945 - iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_BLOCK); 946 - 947 - spin_lock_bh(&midc->lock); 749 + if (raw_block) { 750 + iowrite32((1 << midc->ch_id), 751 + mid->dma_base + CLEAR_BLOCK); 752 + } 948 753 midc_scan_descriptors(mid, midc); 949 754 pr_debug("MDMA:Scan of desc... complete, unmasking\n"); 950 755 iowrite32(UNMASK_INTR_REG(midc->ch_id), 951 756 mid->dma_base + MASK_TFR); 757 + if (raw_block) { 758 + iowrite32(UNMASK_INTR_REG(midc->ch_id), 759 + mid->dma_base + MASK_BLOCK); 760 + } 952 761 spin_unlock_bh(&midc->lock); 953 762 } 954 763 ··· 1011 804 static irqreturn_t intel_mid_dma_interrupt(int irq, void *data) 1012 805 { 1013 806 struct middma_device *mid = data; 1014 - u32 status; 807 + u32 tfr_status, err_status; 1015 808 int call_tasklet = 0; 809 + 810 + tfr_status = ioread32(mid->dma_base + RAW_TFR); 811 + err_status = ioread32(mid->dma_base + RAW_ERR); 812 + if (!tfr_status && !err_status) 813 + return IRQ_NONE; 1016 814 1017 815 /*DMA Interrupt*/ 1018 816 pr_debug("MDMA:Got an interrupt on irq %d\n", irq); ··· 1026 814 return -EINVAL; 1027 815 } 1028 816 1029 - status = ioread32(mid->dma_base + RAW_TFR); 1030 - pr_debug("MDMA: Status %x, Mask %x\n", status, mid->intr_mask); 1031 - status &= mid->intr_mask; 1032 - if (status) { 817 + pr_debug("MDMA: Status %x, Mask %x\n", tfr_status, mid->intr_mask); 818 + tfr_status &= mid->intr_mask; 819 + if (tfr_status) { 1033 820 /*need to disable intr*/ 1034 - iowrite32((status << 8), mid->dma_base + MASK_TFR); 1035 - pr_debug("MDMA: Calling tasklet %x\n", status); 821 + iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_TFR); 822 + iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_BLOCK); 823 + pr_debug("MDMA: Calling tasklet %x\n", tfr_status); 1036 824 call_tasklet = 1; 1037 825 } 1038 - status = ioread32(mid->dma_base + RAW_ERR); 1039 - status &= mid->intr_mask; 1040 - if (status) { 1041 - iowrite32(MASK_INTR_REG(status), mid->dma_base + MASK_ERR); 826 + err_status &= mid->intr_mask; 827 + if (err_status) { 828 + iowrite32(MASK_INTR_REG(err_status), mid->dma_base + MASK_ERR); 1042 829 call_tasklet = 1; 1043 830 } 1044 831 if (call_tasklet) ··· 1067 856 { 1068 857 struct middma_device *dma = pci_get_drvdata(pdev); 1069 858 int err, i; 1070 - unsigned int irq_level; 1071 859 1072 860 /* DMA coherent memory pool for DMA descriptor allocations */ 1073 861 dma->dma_pool = pci_pool_create("intel_mid_dma_desc_pool", pdev, ··· 1094 884 pr_debug("MDMA:Adding %d channel for this controller\n", dma->max_chan); 1095 885 /*init CH structures*/ 1096 886 dma->intr_mask = 0; 887 + dma->state = RUNNING; 1097 888 for (i = 0; i < dma->max_chan; i++) { 1098 889 struct intel_mid_dma_chan *midch = &dma->ch[i]; 1099 890 ··· 1154 943 1155 944 /*register irq */ 1156 945 if (dma->pimr_mask) { 1157 - irq_level = IRQF_SHARED; 1158 946 pr_debug("MDMA:Requesting irq shared for DMAC1\n"); 1159 947 err = request_irq(pdev->irq, intel_mid_dma_interrupt1, 1160 948 IRQF_SHARED, "INTEL_MID_DMAC1", dma); ··· 1161 951 goto err_irq; 1162 952 } else { 1163 953 dma->intr_mask = 0x03; 1164 - irq_level = 0; 1165 954 pr_debug("MDMA:Requesting irq for DMAC2\n"); 1166 955 err = request_irq(pdev->irq, intel_mid_dma_interrupt2, 1167 - 0, "INTEL_MID_DMAC2", dma); 956 + IRQF_SHARED, "INTEL_MID_DMAC2", dma); 1168 957 if (0 != err) 1169 958 goto err_irq; 1170 959 } ··· 1279 1070 if (err) 1280 1071 goto err_dma; 1281 1072 1073 + pm_runtime_set_active(&pdev->dev); 1074 + pm_runtime_enable(&pdev->dev); 1075 + pm_runtime_allow(&pdev->dev); 1282 1076 return 0; 1283 1077 1284 1078 err_dma: ··· 1316 1104 pci_disable_device(pdev); 1317 1105 } 1318 1106 1107 + /* Power Management */ 1108 + /* 1109 + * dma_suspend - PCI suspend function 1110 + * 1111 + * @pci: PCI device structure 1112 + * @state: PM message 1113 + * 1114 + * This function is called by OS when a power event occurs 1115 + */ 1116 + int dma_suspend(struct pci_dev *pci, pm_message_t state) 1117 + { 1118 + int i; 1119 + struct middma_device *device = pci_get_drvdata(pci); 1120 + pr_debug("MDMA: dma_suspend called\n"); 1121 + 1122 + for (i = 0; i < device->max_chan; i++) { 1123 + if (device->ch[i].in_use) 1124 + return -EAGAIN; 1125 + } 1126 + device->state = SUSPENDED; 1127 + pci_set_drvdata(pci, device); 1128 + pci_save_state(pci); 1129 + pci_disable_device(pci); 1130 + pci_set_power_state(pci, PCI_D3hot); 1131 + return 0; 1132 + } 1133 + 1134 + /** 1135 + * dma_resume - PCI resume function 1136 + * 1137 + * @pci: PCI device structure 1138 + * 1139 + * This function is called by OS when a power event occurs 1140 + */ 1141 + int dma_resume(struct pci_dev *pci) 1142 + { 1143 + int ret; 1144 + struct middma_device *device = pci_get_drvdata(pci); 1145 + 1146 + pr_debug("MDMA: dma_resume called\n"); 1147 + pci_set_power_state(pci, PCI_D0); 1148 + pci_restore_state(pci); 1149 + ret = pci_enable_device(pci); 1150 + if (ret) { 1151 + pr_err("MDMA: device cant be enabled for %x\n", pci->device); 1152 + return ret; 1153 + } 1154 + device->state = RUNNING; 1155 + iowrite32(REG_BIT0, device->dma_base + DMA_CFG); 1156 + pci_set_drvdata(pci, device); 1157 + return 0; 1158 + } 1159 + 1160 + static int dma_runtime_suspend(struct device *dev) 1161 + { 1162 + struct pci_dev *pci_dev = to_pci_dev(dev); 1163 + return dma_suspend(pci_dev, PMSG_SUSPEND); 1164 + } 1165 + 1166 + static int dma_runtime_resume(struct device *dev) 1167 + { 1168 + struct pci_dev *pci_dev = to_pci_dev(dev); 1169 + return dma_resume(pci_dev); 1170 + } 1171 + 1172 + static int dma_runtime_idle(struct device *dev) 1173 + { 1174 + struct pci_dev *pdev = to_pci_dev(dev); 1175 + struct middma_device *device = pci_get_drvdata(pdev); 1176 + int i; 1177 + 1178 + for (i = 0; i < device->max_chan; i++) { 1179 + if (device->ch[i].in_use) 1180 + return -EAGAIN; 1181 + } 1182 + 1183 + return pm_schedule_suspend(dev, 0); 1184 + } 1185 + 1319 1186 /****************************************************************************** 1320 1187 * PCI stuff 1321 1188 */ ··· 1407 1116 }; 1408 1117 MODULE_DEVICE_TABLE(pci, intel_mid_dma_ids); 1409 1118 1119 + static const struct dev_pm_ops intel_mid_dma_pm = { 1120 + .runtime_suspend = dma_runtime_suspend, 1121 + .runtime_resume = dma_runtime_resume, 1122 + .runtime_idle = dma_runtime_idle, 1123 + }; 1124 + 1410 1125 static struct pci_driver intel_mid_dma_pci = { 1411 1126 .name = "Intel MID DMA", 1412 1127 .id_table = intel_mid_dma_ids, 1413 1128 .probe = intel_mid_dma_probe, 1414 1129 .remove = __devexit_p(intel_mid_dma_remove), 1130 + #ifdef CONFIG_PM 1131 + .suspend = dma_suspend, 1132 + .resume = dma_resume, 1133 + .driver = { 1134 + .pm = &intel_mid_dma_pm, 1135 + }, 1136 + #endif 1415 1137 }; 1416 1138 1417 1139 static int __init intel_mid_dma_init(void)
+47 -6
drivers/dma/intel_mid_dma_regs.h
··· 29 29 #include <linux/dmapool.h> 30 30 #include <linux/pci_ids.h> 31 31 32 - #define INTEL_MID_DMA_DRIVER_VERSION "1.0.5" 32 + #define INTEL_MID_DMA_DRIVER_VERSION "1.1.0" 33 33 34 34 #define REG_BIT0 0x00000001 35 35 #define REG_BIT8 0x00000100 36 - 36 + #define INT_MASK_WE 0x8 37 + #define CLEAR_DONE 0xFFFFEFFF 37 38 #define UNMASK_INTR_REG(chan_num) \ 38 39 ((REG_BIT0 << chan_num) | (REG_BIT8 << chan_num)) 39 40 #define MASK_INTR_REG(chan_num) (REG_BIT8 << chan_num) 40 41 41 42 #define ENABLE_CHANNEL(chan_num) \ 42 43 ((REG_BIT0 << chan_num) | (REG_BIT8 << chan_num)) 44 + 45 + #define DISABLE_CHANNEL(chan_num) \ 46 + (REG_BIT8 << chan_num) 43 47 44 48 #define DESCS_PER_CHANNEL 16 45 49 /*DMA Registers*/ ··· 54 50 /*CH X REG = (DMA_CH_SIZE)*CH_NO + REG*/ 55 51 #define SAR 0x00 /* Source Address Register*/ 56 52 #define DAR 0x08 /* Destination Address Register*/ 53 + #define LLP 0x10 /* Linked List Pointer Register*/ 57 54 #define CTL_LOW 0x18 /* Control Register*/ 58 55 #define CTL_HIGH 0x1C /* Control Register*/ 59 56 #define CFG_LOW 0x40 /* Configuration Register Low*/ ··· 117 112 union intel_mid_dma_ctl_hi { 118 113 struct { 119 114 u32 block_ts:12; /*block transfer size*/ 120 - /*configured by DMAC*/ 121 - u32 reser:20; 115 + u32 done:1; /*Done - updated by DMAC*/ 116 + u32 reser:19; /*configured by DMAC*/ 122 117 } ctlx; 123 118 u32 ctl_hi; 124 119 ··· 157 152 u32 cfg_hi; 158 153 }; 159 154 155 + 160 156 /** 161 157 * struct intel_mid_dma_chan - internal mid representation of a DMA channel 162 158 * @chan: dma_chan strcture represetation for mid chan ··· 172 166 * @slave: dma slave struture 173 167 * @descs_allocated: total number of decsiptors allocated 174 168 * @dma: dma device struture pointer 169 + * @busy: bool representing if ch is busy (active txn) or not 175 170 * @in_use: bool representing if ch is in use or not 171 + * @raw_tfr: raw trf interrupt recieved 172 + * @raw_block: raw block interrupt recieved 176 173 */ 177 174 struct intel_mid_dma_chan { 178 175 struct dma_chan chan; ··· 187 178 struct list_head active_list; 188 179 struct list_head queue; 189 180 struct list_head free_list; 190 - struct intel_mid_dma_slave *slave; 191 181 unsigned int descs_allocated; 192 182 struct middma_device *dma; 183 + bool busy; 193 184 bool in_use; 185 + u32 raw_tfr; 186 + u32 raw_block; 187 + struct intel_mid_dma_slave *mid_slave; 194 188 }; 195 189 196 190 static inline struct intel_mid_dma_chan *to_intel_mid_dma_chan( ··· 202 190 return container_of(chan, struct intel_mid_dma_chan, chan); 203 191 } 204 192 193 + enum intel_mid_dma_state { 194 + RUNNING = 0, 195 + SUSPENDED, 196 + }; 205 197 /** 206 198 * struct middma_device - internal representation of a DMA device 207 199 * @pdev: PCI device ··· 221 205 * @max_chan: max number of chs supported (from drv_data) 222 206 * @block_size: Block size of DMA transfer supported (from drv_data) 223 207 * @pimr_mask: MMIO register addr for periphral interrupt (from drv_data) 208 + * @state: dma PM device state 224 209 */ 225 210 struct middma_device { 226 211 struct pci_dev *pdev; ··· 237 220 int max_chan; 238 221 int block_size; 239 222 unsigned int pimr_mask; 223 + enum intel_mid_dma_state state; 240 224 }; 241 225 242 226 static inline struct middma_device *to_middma_device(struct dma_device *common) ··· 256 238 u32 cfg_lo; 257 239 u32 ctl_lo; 258 240 u32 ctl_hi; 241 + struct pci_pool *lli_pool; 242 + struct intel_mid_dma_lli *lli; 243 + dma_addr_t lli_phys; 244 + unsigned int lli_length; 245 + unsigned int current_lli; 259 246 dma_addr_t next; 260 247 enum dma_data_direction dirn; 261 248 enum dma_status status; 262 - enum intel_mid_dma_width width; /*width of DMA txn*/ 249 + enum dma_slave_buswidth width; /*width of DMA txn*/ 263 250 enum intel_mid_dma_mode cfg_mode; /*mode configuration*/ 264 251 265 252 }; 253 + 254 + struct intel_mid_dma_lli { 255 + dma_addr_t sar; 256 + dma_addr_t dar; 257 + dma_addr_t llp; 258 + u32 ctl_lo; 259 + u32 ctl_hi; 260 + } __attribute__ ((packed)); 266 261 267 262 static inline int test_ch_en(void __iomem *dma, u32 ch_no) 268 263 { ··· 288 257 { 289 258 return container_of(txd, struct intel_mid_dma_desc, txd); 290 259 } 260 + 261 + static inline struct intel_mid_dma_slave *to_intel_mid_dma_slave 262 + (struct dma_slave_config *slave) 263 + { 264 + return container_of(slave, struct intel_mid_dma_slave, dma_slave); 265 + } 266 + 267 + 268 + int dma_resume(struct pci_dev *pci); 269 + 291 270 #endif /*__INTEL_MID_DMAC_REGS_H__*/
+1 -1
drivers/dma/mv_xor.c
··· 162 162 163 163 static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan) 164 164 { 165 - u32 val = (1 << (1 + (chan->idx * 16))); 165 + u32 val = ~(1 << (chan->idx * 16)); 166 166 dev_dbg(chan->device->common.dev, "%s, val 0x%08x\n", __func__, val); 167 167 __raw_writel(val, XOR_INTR_CAUSE(chan)); 168 168 }
+2 -1
drivers/dma/shdma.c
··· 580 580 581 581 sh_chan = to_sh_chan(chan); 582 582 param = chan->private; 583 - slave_addr = param->config->addr; 584 583 585 584 /* Someone calling slave DMA on a public channel? */ 586 585 if (!param || !sg_len) { ··· 587 588 __func__, param, sg_len, param ? param->slave_id : -1); 588 589 return NULL; 589 590 } 591 + 592 + slave_addr = param->config->addr; 590 593 591 594 /* 592 595 * if (param != NULL), this is a successfully requested slave channel,
+17
drivers/dma/ste_dma40.c
··· 1903 1903 return NULL; 1904 1904 } 1905 1905 1906 + static struct dma_async_tx_descriptor * 1907 + d40_prep_sg(struct dma_chan *chan, 1908 + struct scatterlist *dst_sg, unsigned int dst_nents, 1909 + struct scatterlist *src_sg, unsigned int src_nents, 1910 + unsigned long dma_flags) 1911 + { 1912 + if (dst_nents != src_nents) 1913 + return NULL; 1914 + 1915 + return stedma40_memcpy_sg(chan, dst_sg, src_sg, dst_nents, dma_flags); 1916 + } 1917 + 1906 1918 static int d40_prep_slave_sg_log(struct d40_desc *d40d, 1907 1919 struct d40_chan *d40c, 1908 1920 struct scatterlist *sgl, ··· 2337 2325 base->dma_slave.device_alloc_chan_resources = d40_alloc_chan_resources; 2338 2326 base->dma_slave.device_free_chan_resources = d40_free_chan_resources; 2339 2327 base->dma_slave.device_prep_dma_memcpy = d40_prep_memcpy; 2328 + base->dma_slave.device_prep_dma_sg = d40_prep_sg; 2340 2329 base->dma_slave.device_prep_slave_sg = d40_prep_slave_sg; 2341 2330 base->dma_slave.device_tx_status = d40_tx_status; 2342 2331 base->dma_slave.device_issue_pending = d40_issue_pending; ··· 2358 2345 2359 2346 dma_cap_zero(base->dma_memcpy.cap_mask); 2360 2347 dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask); 2348 + dma_cap_set(DMA_SG, base->dma_slave.cap_mask); 2361 2349 2362 2350 base->dma_memcpy.device_alloc_chan_resources = d40_alloc_chan_resources; 2363 2351 base->dma_memcpy.device_free_chan_resources = d40_free_chan_resources; 2364 2352 base->dma_memcpy.device_prep_dma_memcpy = d40_prep_memcpy; 2353 + base->dma_slave.device_prep_dma_sg = d40_prep_sg; 2365 2354 base->dma_memcpy.device_prep_slave_sg = d40_prep_slave_sg; 2366 2355 base->dma_memcpy.device_tx_status = d40_tx_status; 2367 2356 base->dma_memcpy.device_issue_pending = d40_issue_pending; ··· 2390 2375 dma_cap_zero(base->dma_both.cap_mask); 2391 2376 dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask); 2392 2377 dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask); 2378 + dma_cap_set(DMA_SG, base->dma_slave.cap_mask); 2393 2379 2394 2380 base->dma_both.device_alloc_chan_resources = d40_alloc_chan_resources; 2395 2381 base->dma_both.device_free_chan_resources = d40_free_chan_resources; 2396 2382 base->dma_both.device_prep_dma_memcpy = d40_prep_memcpy; 2383 + base->dma_slave.device_prep_dma_sg = d40_prep_sg; 2397 2384 base->dma_both.device_prep_slave_sg = d40_prep_slave_sg; 2398 2385 base->dma_both.device_tx_status = d40_tx_status; 2399 2386 base->dma_both.device_issue_pending = d40_issue_pending;
+222
include/linux/amba/pl08x.h
··· 1 + /* 2 + * linux/amba/pl08x.h - ARM PrimeCell DMA Controller driver 3 + * 4 + * Copyright (C) 2005 ARM Ltd 5 + * Copyright (C) 2010 ST-Ericsson SA 6 + * 7 + * This program is free software; you can redistribute it and/or modify 8 + * it under the terms of the GNU General Public License version 2 as 9 + * published by the Free Software Foundation. 10 + * 11 + * pl08x information required by platform code 12 + * 13 + * Please credit ARM.com 14 + * Documentation: ARM DDI 0196D 15 + * 16 + */ 17 + 18 + #ifndef AMBA_PL08X_H 19 + #define AMBA_PL08X_H 20 + 21 + /* We need sizes of structs from this header */ 22 + #include <linux/dmaengine.h> 23 + #include <linux/interrupt.h> 24 + 25 + /** 26 + * struct pl08x_channel_data - data structure to pass info between 27 + * platform and PL08x driver regarding channel configuration 28 + * @bus_id: name of this device channel, not just a device name since 29 + * devices may have more than one channel e.g. "foo_tx" 30 + * @min_signal: the minimum DMA signal number to be muxed in for this 31 + * channel (for platforms supporting muxed signals). If you have 32 + * static assignments, make sure this is set to the assigned signal 33 + * number, PL08x have 16 possible signals in number 0 thru 15 so 34 + * when these are not enough they often get muxed (in hardware) 35 + * disabling simultaneous use of the same channel for two devices. 36 + * @max_signal: the maximum DMA signal number to be muxed in for 37 + * the channel. Set to the same as min_signal for 38 + * devices with static assignments 39 + * @muxval: a number usually used to poke into some mux regiser to 40 + * mux in the signal to this channel 41 + * @cctl_opt: default options for the channel control register 42 + * @addr: source/target address in physical memory for this DMA channel, 43 + * can be the address of a FIFO register for burst requests for example. 44 + * This can be left undefined if the PrimeCell API is used for configuring 45 + * this. 46 + * @circular_buffer: whether the buffer passed in is circular and 47 + * shall simply be looped round round (like a record baby round 48 + * round round round) 49 + * @single: the device connected to this channel will request single 50 + * DMA transfers, not bursts. (Bursts are default.) 51 + */ 52 + struct pl08x_channel_data { 53 + char *bus_id; 54 + int min_signal; 55 + int max_signal; 56 + u32 muxval; 57 + u32 cctl; 58 + u32 ccfg; 59 + dma_addr_t addr; 60 + bool circular_buffer; 61 + bool single; 62 + }; 63 + 64 + /** 65 + * Struct pl08x_bus_data - information of source or destination 66 + * busses for a transfer 67 + * @addr: current address 68 + * @maxwidth: the maximum width of a transfer on this bus 69 + * @buswidth: the width of this bus in bytes: 1, 2 or 4 70 + * @fill_bytes: bytes required to fill to the next bus memory 71 + * boundary 72 + */ 73 + struct pl08x_bus_data { 74 + dma_addr_t addr; 75 + u8 maxwidth; 76 + u8 buswidth; 77 + u32 fill_bytes; 78 + }; 79 + 80 + /** 81 + * struct pl08x_phy_chan - holder for the physical channels 82 + * @id: physical index to this channel 83 + * @lock: a lock to use when altering an instance of this struct 84 + * @signal: the physical signal (aka channel) serving this 85 + * physical channel right now 86 + * @serving: the virtual channel currently being served by this 87 + * physical channel 88 + */ 89 + struct pl08x_phy_chan { 90 + unsigned int id; 91 + void __iomem *base; 92 + spinlock_t lock; 93 + int signal; 94 + struct pl08x_dma_chan *serving; 95 + u32 csrc; 96 + u32 cdst; 97 + u32 clli; 98 + u32 cctl; 99 + u32 ccfg; 100 + }; 101 + 102 + /** 103 + * struct pl08x_txd - wrapper for struct dma_async_tx_descriptor 104 + * @llis_bus: DMA memory address (physical) start for the LLIs 105 + * @llis_va: virtual memory address start for the LLIs 106 + */ 107 + struct pl08x_txd { 108 + struct dma_async_tx_descriptor tx; 109 + struct list_head node; 110 + enum dma_data_direction direction; 111 + struct pl08x_bus_data srcbus; 112 + struct pl08x_bus_data dstbus; 113 + int len; 114 + dma_addr_t llis_bus; 115 + void *llis_va; 116 + struct pl08x_channel_data *cd; 117 + bool active; 118 + /* 119 + * Settings to be put into the physical channel when we 120 + * trigger this txd 121 + */ 122 + u32 csrc; 123 + u32 cdst; 124 + u32 clli; 125 + u32 cctl; 126 + }; 127 + 128 + /** 129 + * struct pl08x_dma_chan_state - holds the PL08x specific virtual 130 + * channel states 131 + * @PL08X_CHAN_IDLE: the channel is idle 132 + * @PL08X_CHAN_RUNNING: the channel has allocated a physical transport 133 + * channel and is running a transfer on it 134 + * @PL08X_CHAN_PAUSED: the channel has allocated a physical transport 135 + * channel, but the transfer is currently paused 136 + * @PL08X_CHAN_WAITING: the channel is waiting for a physical transport 137 + * channel to become available (only pertains to memcpy channels) 138 + */ 139 + enum pl08x_dma_chan_state { 140 + PL08X_CHAN_IDLE, 141 + PL08X_CHAN_RUNNING, 142 + PL08X_CHAN_PAUSED, 143 + PL08X_CHAN_WAITING, 144 + }; 145 + 146 + /** 147 + * struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel 148 + * @chan: wrappped abstract channel 149 + * @phychan: the physical channel utilized by this channel, if there is one 150 + * @tasklet: tasklet scheduled by the IRQ to handle actual work etc 151 + * @name: name of channel 152 + * @cd: channel platform data 153 + * @runtime_addr: address for RX/TX according to the runtime config 154 + * @runtime_direction: current direction of this channel according to 155 + * runtime config 156 + * @lc: last completed transaction on this channel 157 + * @desc_list: queued transactions pending on this channel 158 + * @at: active transaction on this channel 159 + * @lockflags: sometimes we let a lock last between two function calls, 160 + * especially prep/submit, and then we need to store the IRQ flags 161 + * in the channel state, here 162 + * @lock: a lock for this channel data 163 + * @host: a pointer to the host (internal use) 164 + * @state: whether the channel is idle, paused, running etc 165 + * @slave: whether this channel is a device (slave) or for memcpy 166 + * @waiting: a TX descriptor on this channel which is waiting for 167 + * a physical channel to become available 168 + */ 169 + struct pl08x_dma_chan { 170 + struct dma_chan chan; 171 + struct pl08x_phy_chan *phychan; 172 + struct tasklet_struct tasklet; 173 + char *name; 174 + struct pl08x_channel_data *cd; 175 + dma_addr_t runtime_addr; 176 + enum dma_data_direction runtime_direction; 177 + atomic_t last_issued; 178 + dma_cookie_t lc; 179 + struct list_head desc_list; 180 + struct pl08x_txd *at; 181 + unsigned long lockflags; 182 + spinlock_t lock; 183 + void *host; 184 + enum pl08x_dma_chan_state state; 185 + bool slave; 186 + struct pl08x_txd *waiting; 187 + }; 188 + 189 + /** 190 + * struct pl08x_platform_data - the platform configuration for the 191 + * PL08x PrimeCells. 192 + * @slave_channels: the channels defined for the different devices on the 193 + * platform, all inclusive, including multiplexed channels. The available 194 + * physical channels will be multiplexed around these signals as they 195 + * are requested, just enumerate all possible channels. 196 + * @get_signal: request a physical signal to be used for a DMA 197 + * transfer immediately: if there is some multiplexing or similar blocking 198 + * the use of the channel the transfer can be denied by returning 199 + * less than zero, else it returns the allocated signal number 200 + * @put_signal: indicate to the platform that this physical signal is not 201 + * running any DMA transfer and multiplexing can be recycled 202 + * @bus_bit_lli: Bit[0] of the address indicated which AHB bus master the 203 + * LLI addresses are on 0/1 Master 1/2. 204 + */ 205 + struct pl08x_platform_data { 206 + struct pl08x_channel_data *slave_channels; 207 + unsigned int num_slave_channels; 208 + struct pl08x_channel_data memcpy_channel; 209 + int (*get_signal)(struct pl08x_dma_chan *); 210 + void (*put_signal)(struct pl08x_dma_chan *); 211 + }; 212 + 213 + #ifdef CONFIG_AMBA_PL08X 214 + bool pl08x_filter_id(struct dma_chan *chan, void *chan_id); 215 + #else 216 + static inline bool pl08x_filter_id(struct dma_chan *chan, void *chan_id) 217 + { 218 + return false; 219 + } 220 + #endif 221 + 222 + #endif /* AMBA_PL08X_H */
+52 -2
include/linux/dmaengine.h
··· 64 64 DMA_PQ_VAL, 65 65 DMA_MEMSET, 66 66 DMA_INTERRUPT, 67 + DMA_SG, 67 68 DMA_PRIVATE, 68 69 DMA_ASYNC_TX, 69 70 DMA_SLAVE, 71 + DMA_CYCLIC, 70 72 }; 71 73 72 74 /* last transaction type for creation of the capabilities mask */ 73 - #define DMA_TX_TYPE_END (DMA_SLAVE + 1) 75 + #define DMA_TX_TYPE_END (DMA_CYCLIC + 1) 74 76 75 77 76 78 /** ··· 121 119 * configuration data in statically from the platform). An additional 122 120 * argument of struct dma_slave_config must be passed in with this 123 121 * command. 122 + * @FSLDMA_EXTERNAL_START: this command will put the Freescale DMA controller 123 + * into external start mode. 124 124 */ 125 125 enum dma_ctrl_cmd { 126 126 DMA_TERMINATE_ALL, 127 127 DMA_PAUSE, 128 128 DMA_RESUME, 129 129 DMA_SLAVE_CONFIG, 130 + FSLDMA_EXTERNAL_START, 130 131 }; 131 132 132 133 /** ··· 427 422 * @device_prep_dma_memset: prepares a memset operation 428 423 * @device_prep_dma_interrupt: prepares an end of chain interrupt operation 429 424 * @device_prep_slave_sg: prepares a slave dma operation 425 + * @device_prep_dma_cyclic: prepare a cyclic dma operation suitable for audio. 426 + * The function takes a buffer of size buf_len. The callback function will 427 + * be called after period_len bytes have been transferred. 430 428 * @device_control: manipulate all pending operations on a channel, returns 431 429 * zero or error code 432 430 * @device_tx_status: poll for transaction completion, the optional ··· 481 473 unsigned long flags); 482 474 struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)( 483 475 struct dma_chan *chan, unsigned long flags); 476 + struct dma_async_tx_descriptor *(*device_prep_dma_sg)( 477 + struct dma_chan *chan, 478 + struct scatterlist *dst_sg, unsigned int dst_nents, 479 + struct scatterlist *src_sg, unsigned int src_nents, 480 + unsigned long flags); 484 481 485 482 struct dma_async_tx_descriptor *(*device_prep_slave_sg)( 486 483 struct dma_chan *chan, struct scatterlist *sgl, 487 484 unsigned int sg_len, enum dma_data_direction direction, 488 485 unsigned long flags); 486 + struct dma_async_tx_descriptor *(*device_prep_dma_cyclic)( 487 + struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, 488 + size_t period_len, enum dma_data_direction direction); 489 489 int (*device_control)(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 490 490 unsigned long arg); 491 491 ··· 502 486 struct dma_tx_state *txstate); 503 487 void (*device_issue_pending)(struct dma_chan *chan); 504 488 }; 489 + 490 + static inline int dmaengine_device_control(struct dma_chan *chan, 491 + enum dma_ctrl_cmd cmd, 492 + unsigned long arg) 493 + { 494 + return chan->device->device_control(chan, cmd, arg); 495 + } 496 + 497 + static inline int dmaengine_slave_config(struct dma_chan *chan, 498 + struct dma_slave_config *config) 499 + { 500 + return dmaengine_device_control(chan, DMA_SLAVE_CONFIG, 501 + (unsigned long)config); 502 + } 503 + 504 + static inline int dmaengine_terminate_all(struct dma_chan *chan) 505 + { 506 + return dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0); 507 + } 508 + 509 + static inline int dmaengine_pause(struct dma_chan *chan) 510 + { 511 + return dmaengine_device_control(chan, DMA_PAUSE, 0); 512 + } 513 + 514 + static inline int dmaengine_resume(struct dma_chan *chan) 515 + { 516 + return dmaengine_device_control(chan, DMA_RESUME, 0); 517 + } 518 + 519 + static inline int dmaengine_submit(struct dma_async_tx_descriptor *desc) 520 + { 521 + return desc->tx_submit(desc); 522 + } 505 523 506 524 static inline bool dmaengine_check_align(u8 align, size_t off1, size_t off2, size_t len) 507 525 { ··· 598 548 return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE; 599 549 } 600 550 601 - static unsigned short dma_dev_to_maxpq(struct dma_device *dma) 551 + static inline unsigned short dma_dev_to_maxpq(struct dma_device *dma) 602 552 { 603 553 return dma->max_pq & ~DMA_HAS_PQ_CONTINUE; 604 554 }
+3 -13
include/linux/intel_mid_dma.h
··· 27 27 28 28 #include <linux/dmaengine.h> 29 29 30 - /*DMA transaction width, src and dstn width would be same 31 - The DMA length must be width aligned, 32 - for 32 bit width the length must be 32 bit (4bytes) aligned only*/ 33 - enum intel_mid_dma_width { 34 - LNW_DMA_WIDTH_8BIT = 0x0, 35 - LNW_DMA_WIDTH_16BIT = 0x1, 36 - LNW_DMA_WIDTH_32BIT = 0x2, 37 - }; 30 + #define DMA_PREP_CIRCULAR_LIST (1 << 10) 38 31 39 32 /*DMA mode configurations*/ 40 33 enum intel_mid_dma_mode { ··· 62 69 * @cfg_mode: DMA data transfer mode (per-per/mem-per/mem-mem) 63 70 * @src_msize: Source DMA burst size 64 71 * @dst_msize: Dst DMA burst size 72 + * @per_addr: Periphral address 65 73 * @device_instance: DMA peripheral device instance, we can have multiple 66 74 * peripheral device connected to single DMAC 67 75 */ 68 76 struct intel_mid_dma_slave { 69 - enum dma_data_direction dirn; 70 - enum intel_mid_dma_width src_width; /*width of DMA src txn*/ 71 - enum intel_mid_dma_width dst_width; /*width of DMA dst txn*/ 72 77 enum intel_mid_dma_hs_mode hs_mode; /*handshaking*/ 73 78 enum intel_mid_dma_mode cfg_mode; /*mode configuration*/ 74 - enum intel_mid_dma_msize src_msize; /*size if src burst*/ 75 - enum intel_mid_dma_msize dst_msize; /*size of dst burst*/ 76 79 unsigned int device_instance; /*0, 1 for periphral instance*/ 80 + struct dma_slave_config dma_slave; 77 81 }; 78 82 79 83 #endif /*__INTEL_MID_DMA_H__*/