Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx

* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx: (48 commits)
DMAENGINE: move COH901318 to arch_initcall
dma: imx-dma: fix signedness bug
dma/timberdale: simplify conditional
ste_dma40: remove channel_type
ste_dma40: remove enum for endianess
ste_dma40: remove TIM_FOR_LINK option
ste_dma40: move mode_opt to separate config
ste_dma40: move channel mode to a separate field
ste_dma40: move priority to separate field
ste_dma40: add variable to indicate valid dma_cfg
async_tx: make async_tx channel switching opt-in
move async raid6 test to lib/Kconfig.debug
dmaengine: Add Freescale i.MX1/21/27 DMA driver
intel_mid_dma: change the slave interface
intel_mid_dma: fix the WARN_ONs
intel_mid_dma: Add sg list support to DMA driver
intel_mid_dma: Allow DMAC2 to share interrupt
intel_mid_dma: Allow IRQ sharing
intel_mid_dma: Add runtime PM support
DMAENGINE: define a dummy filter function for ste_dma40
...

+5723 -1166
+2 -6
arch/arm/mach-imx/include/mach/dma-v1.h
··· 27 27 28 28 #define imx_has_dma_v1() (cpu_is_mx1() || cpu_is_mx21() || cpu_is_mx27()) 29 29 30 + #include <mach/dma.h> 31 + 30 32 #define IMX_DMA_CHANNELS 16 31 33 32 34 #define DMA_MODE_READ 0 ··· 97 95 int imx_dma_request(int channel, const char *name); 98 96 99 97 void imx_dma_free(int channel); 100 - 101 - enum imx_dma_prio { 102 - DMA_PRIO_HIGH = 0, 103 - DMA_PRIO_MEDIUM = 1, 104 - DMA_PRIO_LOW = 2 105 - }; 106 98 107 99 int imx_dma_request_by_prio(const char *name, enum imx_dma_prio prio); 108 100
+1 -12
arch/arm/mach-ux500/devices-db8500.c
··· 208 208 209 209 /* Default configuration for physcial memcpy */ 210 210 struct stedma40_chan_cfg dma40_memcpy_conf_phy = { 211 - .channel_type = (STEDMA40_CHANNEL_IN_PHY_MODE | 212 - STEDMA40_LOW_PRIORITY_CHANNEL | 213 - STEDMA40_PCHAN_BASIC_MODE), 211 + .mode = STEDMA40_MODE_PHYSICAL, 214 212 .dir = STEDMA40_MEM_TO_MEM, 215 213 216 - .src_info.endianess = STEDMA40_LITTLE_ENDIAN, 217 214 .src_info.data_width = STEDMA40_BYTE_WIDTH, 218 215 .src_info.psize = STEDMA40_PSIZE_PHY_1, 219 216 .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL, 220 217 221 - .dst_info.endianess = STEDMA40_LITTLE_ENDIAN, 222 218 .dst_info.data_width = STEDMA40_BYTE_WIDTH, 223 219 .dst_info.psize = STEDMA40_PSIZE_PHY_1, 224 220 .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL, 225 221 }; 226 222 /* Default configuration for logical memcpy */ 227 223 struct stedma40_chan_cfg dma40_memcpy_conf_log = { 228 - .channel_type = (STEDMA40_CHANNEL_IN_LOG_MODE | 229 - STEDMA40_LOW_PRIORITY_CHANNEL | 230 - STEDMA40_LCHAN_SRC_LOG_DST_LOG | 231 - STEDMA40_NO_TIM_FOR_LINK), 232 224 .dir = STEDMA40_MEM_TO_MEM, 233 225 234 - .src_info.endianess = STEDMA40_LITTLE_ENDIAN, 235 226 .src_info.data_width = STEDMA40_BYTE_WIDTH, 236 227 .src_info.psize = STEDMA40_PSIZE_LOG_1, 237 228 .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL, 238 229 239 - .dst_info.endianess = STEDMA40_LITTLE_ENDIAN, 240 230 .dst_info.data_width = STEDMA40_BYTE_WIDTH, 241 231 .dst_info.psize = STEDMA40_PSIZE_LOG_1, 242 232 .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL, ··· 259 269 .memcpy_len = ARRAY_SIZE(dma40_memcpy_event), 260 270 .memcpy_conf_phy = &dma40_memcpy_conf_phy, 261 271 .memcpy_conf_log = &dma40_memcpy_conf_log, 262 - .llis_per_log = 8, 263 272 .disabled_channels = {-1}, 264 273 }; 265 274
+67
arch/arm/plat-mxc/include/mach/dma.h
··· 1 + /* 2 + * Copyright 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved. 3 + * 4 + * This program is free software; you can redistribute it and/or modify 5 + * it under the terms of the GNU General Public License version 2 as 6 + * published by the Free Software Foundation. 7 + */ 8 + 9 + #ifndef __ASM_ARCH_MXC_DMA_H__ 10 + #define __ASM_ARCH_MXC_DMA_H__ 11 + 12 + #include <linux/scatterlist.h> 13 + #include <linux/device.h> 14 + #include <linux/dmaengine.h> 15 + 16 + /* 17 + * This enumerates peripheral types. Used for SDMA. 18 + */ 19 + enum sdma_peripheral_type { 20 + IMX_DMATYPE_SSI, /* MCU domain SSI */ 21 + IMX_DMATYPE_SSI_SP, /* Shared SSI */ 22 + IMX_DMATYPE_MMC, /* MMC */ 23 + IMX_DMATYPE_SDHC, /* SDHC */ 24 + IMX_DMATYPE_UART, /* MCU domain UART */ 25 + IMX_DMATYPE_UART_SP, /* Shared UART */ 26 + IMX_DMATYPE_FIRI, /* FIRI */ 27 + IMX_DMATYPE_CSPI, /* MCU domain CSPI */ 28 + IMX_DMATYPE_CSPI_SP, /* Shared CSPI */ 29 + IMX_DMATYPE_SIM, /* SIM */ 30 + IMX_DMATYPE_ATA, /* ATA */ 31 + IMX_DMATYPE_CCM, /* CCM */ 32 + IMX_DMATYPE_EXT, /* External peripheral */ 33 + IMX_DMATYPE_MSHC, /* Memory Stick Host Controller */ 34 + IMX_DMATYPE_MSHC_SP, /* Shared Memory Stick Host Controller */ 35 + IMX_DMATYPE_DSP, /* DSP */ 36 + IMX_DMATYPE_MEMORY, /* Memory */ 37 + IMX_DMATYPE_FIFO_MEMORY,/* FIFO type Memory */ 38 + IMX_DMATYPE_SPDIF, /* SPDIF */ 39 + IMX_DMATYPE_IPU_MEMORY, /* IPU Memory */ 40 + IMX_DMATYPE_ASRC, /* ASRC */ 41 + IMX_DMATYPE_ESAI, /* ESAI */ 42 + }; 43 + 44 + enum imx_dma_prio { 45 + DMA_PRIO_HIGH = 0, 46 + DMA_PRIO_MEDIUM = 1, 47 + DMA_PRIO_LOW = 2 48 + }; 49 + 50 + struct imx_dma_data { 51 + int dma_request; /* DMA request line */ 52 + enum sdma_peripheral_type peripheral_type; 53 + int priority; 54 + }; 55 + 56 + static inline int imx_dma_is_ipu(struct dma_chan *chan) 57 + { 58 + return !strcmp(dev_name(chan->device->dev), "ipu-core"); 59 + } 60 + 61 + static inline int imx_dma_is_general_purpose(struct dma_chan *chan) 62 + { 63 + return !strcmp(dev_name(chan->device->dev), "imx-sdma") || 64 + !strcmp(dev_name(chan->device->dev), "imx-dma"); 65 + } 66 + 67 + #endif
+17
arch/arm/plat-mxc/include/mach/sdma.h
··· 1 + #ifndef __MACH_MXC_SDMA_H__ 2 + #define __MACH_MXC_SDMA_H__ 3 + 4 + /** 5 + * struct sdma_platform_data - platform specific data for SDMA engine 6 + * 7 + * @sdma_version The version of this SDMA engine 8 + * @cpu_name used to generate the firmware name 9 + * @to_version CPU Tape out version 10 + */ 11 + struct sdma_platform_data { 12 + int sdma_version; 13 + char *cpu_name; 14 + int to_version; 15 + }; 16 + 17 + #endif /* __MACH_MXC_SDMA_H__ */
+60 -74
arch/arm/plat-nomadik/include/plat/ste_dma40.h
··· 1 1 /* 2 - * arch/arm/plat-nomadik/include/plat/ste_dma40.h 3 - * 4 - * Copyright (C) ST-Ericsson 2007-2010 2 + * Copyright (C) ST-Ericsson SA 2007-2010 3 + * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson 4 + * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson 5 5 * License terms: GNU General Public License (GPL) version 2 6 - * Author: Per Friden <per.friden@stericsson.com> 7 - * Author: Jonas Aaberg <jonas.aberg@stericsson.com> 8 6 */ 9 7 10 8 ··· 17 19 #define STEDMA40_DEV_DST_MEMORY (-1) 18 20 #define STEDMA40_DEV_SRC_MEMORY (-1) 19 21 20 - /* 21 - * Description of bitfields of channel_type variable is available in 22 - * the info structure. 23 - */ 22 + enum stedma40_mode { 23 + STEDMA40_MODE_LOGICAL = 0, 24 + STEDMA40_MODE_PHYSICAL, 25 + STEDMA40_MODE_OPERATION, 26 + }; 24 27 25 - /* Priority */ 26 - #define STEDMA40_INFO_PRIO_TYPE_POS 2 27 - #define STEDMA40_HIGH_PRIORITY_CHANNEL (0x1 << STEDMA40_INFO_PRIO_TYPE_POS) 28 - #define STEDMA40_LOW_PRIORITY_CHANNEL (0x2 << STEDMA40_INFO_PRIO_TYPE_POS) 29 - 30 - /* Mode */ 31 - #define STEDMA40_INFO_CH_MODE_TYPE_POS 6 32 - #define STEDMA40_CHANNEL_IN_PHY_MODE (0x1 << STEDMA40_INFO_CH_MODE_TYPE_POS) 33 - #define STEDMA40_CHANNEL_IN_LOG_MODE (0x2 << STEDMA40_INFO_CH_MODE_TYPE_POS) 34 - #define STEDMA40_CHANNEL_IN_OPER_MODE (0x3 << STEDMA40_INFO_CH_MODE_TYPE_POS) 35 - 36 - /* Mode options */ 37 - #define STEDMA40_INFO_CH_MODE_OPT_POS 8 38 - #define STEDMA40_PCHAN_BASIC_MODE (0x1 << STEDMA40_INFO_CH_MODE_OPT_POS) 39 - #define STEDMA40_PCHAN_MODULO_MODE (0x2 << STEDMA40_INFO_CH_MODE_OPT_POS) 40 - #define STEDMA40_PCHAN_DOUBLE_DST_MODE (0x3 << STEDMA40_INFO_CH_MODE_OPT_POS) 41 - #define STEDMA40_LCHAN_SRC_PHY_DST_LOG (0x1 << STEDMA40_INFO_CH_MODE_OPT_POS) 42 - #define STEDMA40_LCHAN_SRC_LOG_DST_PHS (0x2 << STEDMA40_INFO_CH_MODE_OPT_POS) 43 - #define STEDMA40_LCHAN_SRC_LOG_DST_LOG (0x3 << STEDMA40_INFO_CH_MODE_OPT_POS) 44 - 45 - /* Interrupt */ 46 - #define STEDMA40_INFO_TIM_POS 10 47 - #define STEDMA40_NO_TIM_FOR_LINK (0x0 << STEDMA40_INFO_TIM_POS) 48 - #define STEDMA40_TIM_FOR_LINK (0x1 << STEDMA40_INFO_TIM_POS) 49 - 50 - /* End of channel_type configuration */ 28 + enum stedma40_mode_opt { 29 + STEDMA40_PCHAN_BASIC_MODE = 0, 30 + STEDMA40_LCHAN_SRC_LOG_DST_LOG = 0, 31 + STEDMA40_PCHAN_MODULO_MODE, 32 + STEDMA40_PCHAN_DOUBLE_DST_MODE, 33 + STEDMA40_LCHAN_SRC_PHY_DST_LOG, 34 + STEDMA40_LCHAN_SRC_LOG_DST_PHY, 35 + }; 51 36 52 37 #define STEDMA40_ESIZE_8_BIT 0x0 53 38 #define STEDMA40_ESIZE_16_BIT 0x1 ··· 53 72 #define STEDMA40_PSIZE_LOG_8 STEDMA40_PSIZE_PHY_8 54 73 #define STEDMA40_PSIZE_LOG_16 STEDMA40_PSIZE_PHY_16 55 74 75 + /* Maximum number of possible physical channels */ 76 + #define STEDMA40_MAX_PHYS 32 77 + 56 78 enum stedma40_flow_ctrl { 57 79 STEDMA40_NO_FLOW_CTRL, 58 80 STEDMA40_FLOW_CTRL, 59 - }; 60 - 61 - enum stedma40_endianess { 62 - STEDMA40_LITTLE_ENDIAN, 63 - STEDMA40_BIG_ENDIAN 64 81 }; 65 82 66 83 enum stedma40_periph_data_width { ··· 68 89 STEDMA40_DOUBLEWORD_WIDTH = STEDMA40_ESIZE_64_BIT 69 90 }; 70 91 71 - struct stedma40_half_channel_info { 72 - enum stedma40_endianess endianess; 73 - enum stedma40_periph_data_width data_width; 74 - int psize; 75 - enum stedma40_flow_ctrl flow_ctrl; 76 - }; 77 - 78 92 enum stedma40_xfer_dir { 79 - STEDMA40_MEM_TO_MEM, 93 + STEDMA40_MEM_TO_MEM = 1, 80 94 STEDMA40_MEM_TO_PERIPH, 81 95 STEDMA40_PERIPH_TO_MEM, 82 96 STEDMA40_PERIPH_TO_PERIPH ··· 77 105 78 106 79 107 /** 108 + * struct stedma40_chan_cfg - dst/src channel configuration 109 + * 110 + * @big_endian: true if the src/dst should be read as big endian 111 + * @data_width: Data width of the src/dst hardware 112 + * @p_size: Burst size 113 + * @flow_ctrl: Flow control on/off. 114 + */ 115 + struct stedma40_half_channel_info { 116 + bool big_endian; 117 + enum stedma40_periph_data_width data_width; 118 + int psize; 119 + enum stedma40_flow_ctrl flow_ctrl; 120 + }; 121 + 122 + /** 80 123 * struct stedma40_chan_cfg - Structure to be filled by client drivers. 81 124 * 82 125 * @dir: MEM 2 MEM, PERIPH 2 MEM , MEM 2 PERIPH, PERIPH 2 PERIPH 83 - * @channel_type: priority, mode, mode options and interrupt configuration. 126 + * @high_priority: true if high-priority 127 + * @mode: channel mode: physical, logical, or operation 128 + * @mode_opt: options for the chosen channel mode 84 129 * @src_dev_type: Src device type 85 130 * @dst_dev_type: Dst device type 86 131 * @src_info: Parameters for dst half channel 87 132 * @dst_info: Parameters for dst half channel 88 - * @pre_transfer_data: Data to be passed on to the pre_transfer() function. 89 - * @pre_transfer: Callback used if needed before preparation of transfer. 90 - * Only called if device is set. size of bytes to transfer 91 - * (in case of multiple element transfer size is size of the first element). 92 133 * 93 134 * 94 135 * This structure has to be filled by the client drivers. ··· 110 125 */ 111 126 struct stedma40_chan_cfg { 112 127 enum stedma40_xfer_dir dir; 113 - unsigned int channel_type; 128 + bool high_priority; 129 + enum stedma40_mode mode; 130 + enum stedma40_mode_opt mode_opt; 114 131 int src_dev_type; 115 132 int dst_dev_type; 116 133 struct stedma40_half_channel_info src_info; 117 134 struct stedma40_half_channel_info dst_info; 118 - void *pre_transfer_data; 119 - int (*pre_transfer) (struct dma_chan *chan, 120 - void *data, 121 - int size); 122 135 }; 123 136 124 137 /** ··· 129 146 * @memcpy_len: length of memcpy 130 147 * @memcpy_conf_phy: default configuration of physical channel memcpy 131 148 * @memcpy_conf_log: default configuration of logical channel memcpy 132 - * @llis_per_log: number of max linked list items per logical channel 133 149 * @disabled_channels: A vector, ending with -1, that marks physical channels 134 150 * that are for different reasons not available for the driver. 135 151 */ ··· 140 158 u32 memcpy_len; 141 159 struct stedma40_chan_cfg *memcpy_conf_phy; 142 160 struct stedma40_chan_cfg *memcpy_conf_log; 143 - unsigned int llis_per_log; 144 - int disabled_channels[8]; 161 + int disabled_channels[STEDMA40_MAX_PHYS]; 145 162 }; 146 163 147 - /** 148 - * setdma40_set_psize() - Used for changing the package size of an 149 - * already configured dma channel. 150 - * 151 - * @chan: dmaengine handle 152 - * @src_psize: new package side for src. (STEDMA40_PSIZE*) 153 - * @src_psize: new package side for dst. (STEDMA40_PSIZE*) 154 - * 155 - * returns 0 on ok, otherwise negative error number. 156 - */ 157 - int stedma40_set_psize(struct dma_chan *chan, 158 - int src_psize, 159 - int dst_psize); 164 + #ifdef CONFIG_STE_DMA40 160 165 161 166 /** 162 167 * stedma40_filter() - Provides stedma40_chan_cfg to the ··· 205 236 return chan->device->device_prep_slave_sg(chan, &sg, 1, 206 237 direction, flags); 207 238 } 239 + 240 + #else 241 + static inline bool stedma40_filter(struct dma_chan *chan, void *data) 242 + { 243 + return false; 244 + } 245 + 246 + static inline struct 247 + dma_async_tx_descriptor *stedma40_slave_mem(struct dma_chan *chan, 248 + dma_addr_t addr, 249 + unsigned int size, 250 + enum dma_data_direction direction, 251 + unsigned long flags) 252 + { 253 + return NULL; 254 + } 255 + #endif 208 256 209 257 #endif
-137
arch/powerpc/include/asm/fsldma.h
··· 1 - /* 2 - * Freescale MPC83XX / MPC85XX DMA Controller 3 - * 4 - * Copyright (c) 2009 Ira W. Snyder <iws@ovro.caltech.edu> 5 - * 6 - * This file is licensed under the terms of the GNU General Public License 7 - * version 2. This program is licensed "as is" without any warranty of any 8 - * kind, whether express or implied. 9 - */ 10 - 11 - #ifndef __ARCH_POWERPC_ASM_FSLDMA_H__ 12 - #define __ARCH_POWERPC_ASM_FSLDMA_H__ 13 - 14 - #include <linux/slab.h> 15 - #include <linux/dmaengine.h> 16 - 17 - /* 18 - * Definitions for the Freescale DMA controller's DMA_SLAVE implemention 19 - * 20 - * The Freescale DMA_SLAVE implementation was designed to handle many-to-many 21 - * transfers. An example usage would be an accelerated copy between two 22 - * scatterlists. Another example use would be an accelerated copy from 23 - * multiple non-contiguous device buffers into a single scatterlist. 24 - * 25 - * A DMA_SLAVE transaction is defined by a struct fsl_dma_slave. This 26 - * structure contains a list of hardware addresses that should be copied 27 - * to/from the scatterlist passed into device_prep_slave_sg(). The structure 28 - * also has some fields to enable hardware-specific features. 29 - */ 30 - 31 - /** 32 - * struct fsl_dma_hw_addr 33 - * @entry: linked list entry 34 - * @address: the hardware address 35 - * @length: length to transfer 36 - * 37 - * Holds a single physical hardware address / length pair for use 38 - * with the DMAEngine DMA_SLAVE API. 39 - */ 40 - struct fsl_dma_hw_addr { 41 - struct list_head entry; 42 - 43 - dma_addr_t address; 44 - size_t length; 45 - }; 46 - 47 - /** 48 - * struct fsl_dma_slave 49 - * @addresses: a linked list of struct fsl_dma_hw_addr structures 50 - * @request_count: value for DMA request count 51 - * @src_loop_size: setup and enable constant source-address DMA transfers 52 - * @dst_loop_size: setup and enable constant destination address DMA transfers 53 - * @external_start: enable externally started DMA transfers 54 - * @external_pause: enable externally paused DMA transfers 55 - * 56 - * Holds a list of address / length pairs for use with the DMAEngine 57 - * DMA_SLAVE API implementation for the Freescale DMA controller. 58 - */ 59 - struct fsl_dma_slave { 60 - 61 - /* List of hardware address/length pairs */ 62 - struct list_head addresses; 63 - 64 - /* Support for extra controller features */ 65 - unsigned int request_count; 66 - unsigned int src_loop_size; 67 - unsigned int dst_loop_size; 68 - bool external_start; 69 - bool external_pause; 70 - }; 71 - 72 - /** 73 - * fsl_dma_slave_append - add an address/length pair to a struct fsl_dma_slave 74 - * @slave: the &struct fsl_dma_slave to add to 75 - * @address: the hardware address to add 76 - * @length: the length of bytes to transfer from @address 77 - * 78 - * Add a hardware address/length pair to a struct fsl_dma_slave. Returns 0 on 79 - * success, -ERRNO otherwise. 80 - */ 81 - static inline int fsl_dma_slave_append(struct fsl_dma_slave *slave, 82 - dma_addr_t address, size_t length) 83 - { 84 - struct fsl_dma_hw_addr *addr; 85 - 86 - addr = kzalloc(sizeof(*addr), GFP_ATOMIC); 87 - if (!addr) 88 - return -ENOMEM; 89 - 90 - INIT_LIST_HEAD(&addr->entry); 91 - addr->address = address; 92 - addr->length = length; 93 - 94 - list_add_tail(&addr->entry, &slave->addresses); 95 - return 0; 96 - } 97 - 98 - /** 99 - * fsl_dma_slave_free - free a struct fsl_dma_slave 100 - * @slave: the struct fsl_dma_slave to free 101 - * 102 - * Free a struct fsl_dma_slave and all associated address/length pairs 103 - */ 104 - static inline void fsl_dma_slave_free(struct fsl_dma_slave *slave) 105 - { 106 - struct fsl_dma_hw_addr *addr, *tmp; 107 - 108 - if (slave) { 109 - list_for_each_entry_safe(addr, tmp, &slave->addresses, entry) { 110 - list_del(&addr->entry); 111 - kfree(addr); 112 - } 113 - 114 - kfree(slave); 115 - } 116 - } 117 - 118 - /** 119 - * fsl_dma_slave_alloc - allocate a struct fsl_dma_slave 120 - * @gfp: the flags to pass to kmalloc when allocating this structure 121 - * 122 - * Allocate a struct fsl_dma_slave for use by the DMA_SLAVE API. Returns a new 123 - * struct fsl_dma_slave on success, or NULL on failure. 124 - */ 125 - static inline struct fsl_dma_slave *fsl_dma_slave_alloc(gfp_t gfp) 126 - { 127 - struct fsl_dma_slave *slave; 128 - 129 - slave = kzalloc(sizeof(*slave), gfp); 130 - if (!slave) 131 - return NULL; 132 - 133 - INIT_LIST_HEAD(&slave->addresses); 134 - return slave; 135 - } 136 - 137 - #endif /* __ARCH_POWERPC_ASM_FSLDMA_H__ */
-13
crypto/async_tx/Kconfig
··· 24 24 select ASYNC_PQ 25 25 select ASYNC_XOR 26 26 27 - config ASYNC_RAID6_TEST 28 - tristate "Self test for hardware accelerated raid6 recovery" 29 - depends on ASYNC_RAID6_RECOV 30 - select ASYNC_MEMCPY 31 - ---help--- 32 - This is a one-shot self test that permutes through the 33 - recovery of all the possible two disk failure scenarios for a 34 - N-disk array. Recovery is performed with the asynchronous 35 - raid6 recovery routines, and will optionally use an offload 36 - engine if one is available. 37 - 38 - If unsure, say N. 39 - 40 27 config ASYNC_TX_DISABLE_PQ_VAL_DMA 41 28 bool 42 29
+29 -2
drivers/dma/Kconfig
··· 46 46 47 47 If unsure, say N. 48 48 49 - config ASYNC_TX_DISABLE_CHANNEL_SWITCH 49 + config ASYNC_TX_ENABLE_CHANNEL_SWITCH 50 50 bool 51 + 52 + config AMBA_PL08X 53 + bool "ARM PrimeCell PL080 or PL081 support" 54 + depends on ARM_AMBA && EXPERIMENTAL 55 + select DMA_ENGINE 56 + help 57 + Platform has a PL08x DMAC device 58 + which can provide DMA engine support 51 59 52 60 config INTEL_IOATDMA 53 61 tristate "Intel I/OAT DMA support" 54 62 depends on PCI && X86 55 63 select DMA_ENGINE 56 64 select DCA 57 - select ASYNC_TX_DISABLE_CHANNEL_SWITCH 58 65 select ASYNC_TX_DISABLE_PQ_VAL_DMA 59 66 select ASYNC_TX_DISABLE_XOR_VAL_DMA 60 67 help ··· 76 69 tristate "Intel IOP ADMA support" 77 70 depends on ARCH_IOP32X || ARCH_IOP33X || ARCH_IOP13XX 78 71 select DMA_ENGINE 72 + select ASYNC_TX_ENABLE_CHANNEL_SWITCH 79 73 help 80 74 Enable support for the Intel(R) IOP Series RAID engines. 81 75 ··· 101 93 tristate "Freescale Elo and Elo Plus DMA support" 102 94 depends on FSL_SOC 103 95 select DMA_ENGINE 96 + select ASYNC_TX_ENABLE_CHANNEL_SWITCH 104 97 ---help--- 105 98 Enable support for the Freescale Elo and Elo Plus DMA controllers. 106 99 The Elo is the DMA controller on some 82xx and 83xx parts, and the ··· 118 109 bool "Marvell XOR engine support" 119 110 depends on PLAT_ORION 120 111 select DMA_ENGINE 112 + select ASYNC_TX_ENABLE_CHANNEL_SWITCH 121 113 ---help--- 122 114 Enable support for the Marvell XOR engine. 123 115 ··· 176 166 depends on 440SPe || 440SP 177 167 select DMA_ENGINE 178 168 select ARCH_HAS_ASYNC_TX_FIND_CHANNEL 169 + select ASYNC_TX_ENABLE_CHANNEL_SWITCH 179 170 help 180 171 Enable support for the AMCC PPC440SPe RAID engines. 181 172 ··· 205 194 select DMA_ENGINE 206 195 help 207 196 Enable support for the Topcliff PCH DMA engine. 197 + 198 + config IMX_SDMA 199 + tristate "i.MX SDMA support" 200 + depends on ARCH_MX25 || ARCH_MX3 || ARCH_MX5 201 + select DMA_ENGINE 202 + help 203 + Support the i.MX SDMA engine. This engine is integrated into 204 + Freescale i.MX25/31/35/51 chips. 205 + 206 + config IMX_DMA 207 + tristate "i.MX DMA support" 208 + depends on ARCH_MX1 || ARCH_MX21 || MACH_MX27 209 + select DMA_ENGINE 210 + help 211 + Support the i.MX DMA engine. This engine is integrated into 212 + Freescale i.MX1/21/27 chips. 208 213 209 214 config DMA_ENGINE 210 215 bool
+3
drivers/dma/Makefile
··· 21 21 obj-$(CONFIG_SH_DMAE) += shdma.o 22 22 obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o 23 23 obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/ 24 + obj-$(CONFIG_IMX_SDMA) += imx-sdma.o 25 + obj-$(CONFIG_IMX_DMA) += imx-dma.o 24 26 obj-$(CONFIG_TIMB_DMA) += timb_dma.o 25 27 obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o 26 28 obj-$(CONFIG_PL330_DMA) += pl330.o 27 29 obj-$(CONFIG_PCH_DMA) += pch_dma.o 30 + obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o
+2167
drivers/dma/amba-pl08x.c
··· 1 + /* 2 + * Copyright (c) 2006 ARM Ltd. 3 + * Copyright (c) 2010 ST-Ericsson SA 4 + * 5 + * Author: Peter Pearse <peter.pearse@arm.com> 6 + * Author: Linus Walleij <linus.walleij@stericsson.com> 7 + * 8 + * This program is free software; you can redistribute it and/or modify it 9 + * under the terms of the GNU General Public License as published by the Free 10 + * Software Foundation; either version 2 of the License, or (at your option) 11 + * any later version. 12 + * 13 + * This program is distributed in the hope that it will be useful, but WITHOUT 14 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 15 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 16 + * more details. 17 + * 18 + * You should have received a copy of the GNU General Public License along with 19 + * this program; if not, write to the Free Software Foundation, Inc., 59 20 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. 21 + * 22 + * The full GNU General Public License is iin this distribution in the 23 + * file called COPYING. 24 + * 25 + * Documentation: ARM DDI 0196G == PL080 26 + * Documentation: ARM DDI 0218E == PL081 27 + * 28 + * PL080 & PL081 both have 16 sets of DMA signals that can be routed to 29 + * any channel. 30 + * 31 + * The PL080 has 8 channels available for simultaneous use, and the PL081 32 + * has only two channels. So on these DMA controllers the number of channels 33 + * and the number of incoming DMA signals are two totally different things. 34 + * It is usually not possible to theoretically handle all physical signals, 35 + * so a multiplexing scheme with possible denial of use is necessary. 36 + * 37 + * The PL080 has a dual bus master, PL081 has a single master. 38 + * 39 + * Memory to peripheral transfer may be visualized as 40 + * Get data from memory to DMAC 41 + * Until no data left 42 + * On burst request from peripheral 43 + * Destination burst from DMAC to peripheral 44 + * Clear burst request 45 + * Raise terminal count interrupt 46 + * 47 + * For peripherals with a FIFO: 48 + * Source burst size == half the depth of the peripheral FIFO 49 + * Destination burst size == the depth of the peripheral FIFO 50 + * 51 + * (Bursts are irrelevant for mem to mem transfers - there are no burst 52 + * signals, the DMA controller will simply facilitate its AHB master.) 53 + * 54 + * ASSUMES default (little) endianness for DMA transfers 55 + * 56 + * Only DMAC flow control is implemented 57 + * 58 + * Global TODO: 59 + * - Break out common code from arch/arm/mach-s3c64xx and share 60 + */ 61 + #include <linux/device.h> 62 + #include <linux/init.h> 63 + #include <linux/module.h> 64 + #include <linux/pci.h> 65 + #include <linux/interrupt.h> 66 + #include <linux/slab.h> 67 + #include <linux/dmapool.h> 68 + #include <linux/amba/bus.h> 69 + #include <linux/dmaengine.h> 70 + #include <linux/amba/pl08x.h> 71 + #include <linux/debugfs.h> 72 + #include <linux/seq_file.h> 73 + 74 + #include <asm/hardware/pl080.h> 75 + #include <asm/dma.h> 76 + #include <asm/mach/dma.h> 77 + #include <asm/atomic.h> 78 + #include <asm/processor.h> 79 + #include <asm/cacheflush.h> 80 + 81 + #define DRIVER_NAME "pl08xdmac" 82 + 83 + /** 84 + * struct vendor_data - vendor-specific config parameters 85 + * for PL08x derivates 86 + * @name: the name of this specific variant 87 + * @channels: the number of channels available in this variant 88 + * @dualmaster: whether this version supports dual AHB masters 89 + * or not. 90 + */ 91 + struct vendor_data { 92 + char *name; 93 + u8 channels; 94 + bool dualmaster; 95 + }; 96 + 97 + /* 98 + * PL08X private data structures 99 + * An LLI struct - see pl08x TRM 100 + * Note that next uses bit[0] as a bus bit, 101 + * start & end do not - their bus bit info 102 + * is in cctl 103 + */ 104 + struct lli { 105 + dma_addr_t src; 106 + dma_addr_t dst; 107 + dma_addr_t next; 108 + u32 cctl; 109 + }; 110 + 111 + /** 112 + * struct pl08x_driver_data - the local state holder for the PL08x 113 + * @slave: slave engine for this instance 114 + * @memcpy: memcpy engine for this instance 115 + * @base: virtual memory base (remapped) for the PL08x 116 + * @adev: the corresponding AMBA (PrimeCell) bus entry 117 + * @vd: vendor data for this PL08x variant 118 + * @pd: platform data passed in from the platform/machine 119 + * @phy_chans: array of data for the physical channels 120 + * @pool: a pool for the LLI descriptors 121 + * @pool_ctr: counter of LLIs in the pool 122 + * @lock: a spinlock for this struct 123 + */ 124 + struct pl08x_driver_data { 125 + struct dma_device slave; 126 + struct dma_device memcpy; 127 + void __iomem *base; 128 + struct amba_device *adev; 129 + struct vendor_data *vd; 130 + struct pl08x_platform_data *pd; 131 + struct pl08x_phy_chan *phy_chans; 132 + struct dma_pool *pool; 133 + int pool_ctr; 134 + spinlock_t lock; 135 + }; 136 + 137 + /* 138 + * PL08X specific defines 139 + */ 140 + 141 + /* 142 + * Memory boundaries: the manual for PL08x says that the controller 143 + * cannot read past a 1KiB boundary, so these defines are used to 144 + * create transfer LLIs that do not cross such boundaries. 145 + */ 146 + #define PL08X_BOUNDARY_SHIFT (10) /* 1KB 0x400 */ 147 + #define PL08X_BOUNDARY_SIZE (1 << PL08X_BOUNDARY_SHIFT) 148 + 149 + /* Minimum period between work queue runs */ 150 + #define PL08X_WQ_PERIODMIN 20 151 + 152 + /* Size (bytes) of each LLI buffer allocated for one transfer */ 153 + # define PL08X_LLI_TSFR_SIZE 0x2000 154 + 155 + /* Maximimum times we call dma_pool_alloc on this pool without freeing */ 156 + #define PL08X_MAX_ALLOCS 0x40 157 + #define MAX_NUM_TSFR_LLIS (PL08X_LLI_TSFR_SIZE/sizeof(struct lli)) 158 + #define PL08X_ALIGN 8 159 + 160 + static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan) 161 + { 162 + return container_of(chan, struct pl08x_dma_chan, chan); 163 + } 164 + 165 + /* 166 + * Physical channel handling 167 + */ 168 + 169 + /* Whether a certain channel is busy or not */ 170 + static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch) 171 + { 172 + unsigned int val; 173 + 174 + val = readl(ch->base + PL080_CH_CONFIG); 175 + return val & PL080_CONFIG_ACTIVE; 176 + } 177 + 178 + /* 179 + * Set the initial DMA register values i.e. those for the first LLI 180 + * The next lli pointer and the configuration interrupt bit have 181 + * been set when the LLIs were constructed 182 + */ 183 + static void pl08x_set_cregs(struct pl08x_driver_data *pl08x, 184 + struct pl08x_phy_chan *ch) 185 + { 186 + /* Wait for channel inactive */ 187 + while (pl08x_phy_channel_busy(ch)) 188 + ; 189 + 190 + dev_vdbg(&pl08x->adev->dev, 191 + "WRITE channel %d: csrc=%08x, cdst=%08x, " 192 + "cctl=%08x, clli=%08x, ccfg=%08x\n", 193 + ch->id, 194 + ch->csrc, 195 + ch->cdst, 196 + ch->cctl, 197 + ch->clli, 198 + ch->ccfg); 199 + 200 + writel(ch->csrc, ch->base + PL080_CH_SRC_ADDR); 201 + writel(ch->cdst, ch->base + PL080_CH_DST_ADDR); 202 + writel(ch->clli, ch->base + PL080_CH_LLI); 203 + writel(ch->cctl, ch->base + PL080_CH_CONTROL); 204 + writel(ch->ccfg, ch->base + PL080_CH_CONFIG); 205 + } 206 + 207 + static inline void pl08x_config_phychan_for_txd(struct pl08x_dma_chan *plchan) 208 + { 209 + struct pl08x_channel_data *cd = plchan->cd; 210 + struct pl08x_phy_chan *phychan = plchan->phychan; 211 + struct pl08x_txd *txd = plchan->at; 212 + 213 + /* Copy the basic control register calculated at transfer config */ 214 + phychan->csrc = txd->csrc; 215 + phychan->cdst = txd->cdst; 216 + phychan->clli = txd->clli; 217 + phychan->cctl = txd->cctl; 218 + 219 + /* Assign the signal to the proper control registers */ 220 + phychan->ccfg = cd->ccfg; 221 + phychan->ccfg &= ~PL080_CONFIG_SRC_SEL_MASK; 222 + phychan->ccfg &= ~PL080_CONFIG_DST_SEL_MASK; 223 + /* If it wasn't set from AMBA, ignore it */ 224 + if (txd->direction == DMA_TO_DEVICE) 225 + /* Select signal as destination */ 226 + phychan->ccfg |= 227 + (phychan->signal << PL080_CONFIG_DST_SEL_SHIFT); 228 + else if (txd->direction == DMA_FROM_DEVICE) 229 + /* Select signal as source */ 230 + phychan->ccfg |= 231 + (phychan->signal << PL080_CONFIG_SRC_SEL_SHIFT); 232 + /* Always enable error interrupts */ 233 + phychan->ccfg |= PL080_CONFIG_ERR_IRQ_MASK; 234 + /* Always enable terminal interrupts */ 235 + phychan->ccfg |= PL080_CONFIG_TC_IRQ_MASK; 236 + } 237 + 238 + /* 239 + * Enable the DMA channel 240 + * Assumes all other configuration bits have been set 241 + * as desired before this code is called 242 + */ 243 + static void pl08x_enable_phy_chan(struct pl08x_driver_data *pl08x, 244 + struct pl08x_phy_chan *ch) 245 + { 246 + u32 val; 247 + 248 + /* 249 + * Do not access config register until channel shows as disabled 250 + */ 251 + while (readl(pl08x->base + PL080_EN_CHAN) & (1 << ch->id)) 252 + ; 253 + 254 + /* 255 + * Do not access config register until channel shows as inactive 256 + */ 257 + val = readl(ch->base + PL080_CH_CONFIG); 258 + while ((val & PL080_CONFIG_ACTIVE) || (val & PL080_CONFIG_ENABLE)) 259 + val = readl(ch->base + PL080_CH_CONFIG); 260 + 261 + writel(val | PL080_CONFIG_ENABLE, ch->base + PL080_CH_CONFIG); 262 + } 263 + 264 + /* 265 + * Overall DMAC remains enabled always. 266 + * 267 + * Disabling individual channels could lose data. 268 + * 269 + * Disable the peripheral DMA after disabling the DMAC 270 + * in order to allow the DMAC FIFO to drain, and 271 + * hence allow the channel to show inactive 272 + * 273 + */ 274 + static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch) 275 + { 276 + u32 val; 277 + 278 + /* Set the HALT bit and wait for the FIFO to drain */ 279 + val = readl(ch->base + PL080_CH_CONFIG); 280 + val |= PL080_CONFIG_HALT; 281 + writel(val, ch->base + PL080_CH_CONFIG); 282 + 283 + /* Wait for channel inactive */ 284 + while (pl08x_phy_channel_busy(ch)) 285 + ; 286 + } 287 + 288 + static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch) 289 + { 290 + u32 val; 291 + 292 + /* Clear the HALT bit */ 293 + val = readl(ch->base + PL080_CH_CONFIG); 294 + val &= ~PL080_CONFIG_HALT; 295 + writel(val, ch->base + PL080_CH_CONFIG); 296 + } 297 + 298 + 299 + /* Stops the channel */ 300 + static void pl08x_stop_phy_chan(struct pl08x_phy_chan *ch) 301 + { 302 + u32 val; 303 + 304 + pl08x_pause_phy_chan(ch); 305 + 306 + /* Disable channel */ 307 + val = readl(ch->base + PL080_CH_CONFIG); 308 + val &= ~PL080_CONFIG_ENABLE; 309 + val &= ~PL080_CONFIG_ERR_IRQ_MASK; 310 + val &= ~PL080_CONFIG_TC_IRQ_MASK; 311 + writel(val, ch->base + PL080_CH_CONFIG); 312 + } 313 + 314 + static inline u32 get_bytes_in_cctl(u32 cctl) 315 + { 316 + /* The source width defines the number of bytes */ 317 + u32 bytes = cctl & PL080_CONTROL_TRANSFER_SIZE_MASK; 318 + 319 + switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) { 320 + case PL080_WIDTH_8BIT: 321 + break; 322 + case PL080_WIDTH_16BIT: 323 + bytes *= 2; 324 + break; 325 + case PL080_WIDTH_32BIT: 326 + bytes *= 4; 327 + break; 328 + } 329 + return bytes; 330 + } 331 + 332 + /* The channel should be paused when calling this */ 333 + static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan) 334 + { 335 + struct pl08x_phy_chan *ch; 336 + struct pl08x_txd *txdi = NULL; 337 + struct pl08x_txd *txd; 338 + unsigned long flags; 339 + u32 bytes = 0; 340 + 341 + spin_lock_irqsave(&plchan->lock, flags); 342 + 343 + ch = plchan->phychan; 344 + txd = plchan->at; 345 + 346 + /* 347 + * Next follow the LLIs to get the number of pending bytes in the 348 + * currently active transaction. 349 + */ 350 + if (ch && txd) { 351 + struct lli *llis_va = txd->llis_va; 352 + struct lli *llis_bus = (struct lli *) txd->llis_bus; 353 + u32 clli = readl(ch->base + PL080_CH_LLI); 354 + 355 + /* First get the bytes in the current active LLI */ 356 + bytes = get_bytes_in_cctl(readl(ch->base + PL080_CH_CONTROL)); 357 + 358 + if (clli) { 359 + int i = 0; 360 + 361 + /* Forward to the LLI pointed to by clli */ 362 + while ((clli != (u32) &(llis_bus[i])) && 363 + (i < MAX_NUM_TSFR_LLIS)) 364 + i++; 365 + 366 + while (clli) { 367 + bytes += get_bytes_in_cctl(llis_va[i].cctl); 368 + /* 369 + * A clli of 0x00000000 will terminate the 370 + * LLI list 371 + */ 372 + clli = llis_va[i].next; 373 + i++; 374 + } 375 + } 376 + } 377 + 378 + /* Sum up all queued transactions */ 379 + if (!list_empty(&plchan->desc_list)) { 380 + list_for_each_entry(txdi, &plchan->desc_list, node) { 381 + bytes += txdi->len; 382 + } 383 + 384 + } 385 + 386 + spin_unlock_irqrestore(&plchan->lock, flags); 387 + 388 + return bytes; 389 + } 390 + 391 + /* 392 + * Allocate a physical channel for a virtual channel 393 + */ 394 + static struct pl08x_phy_chan * 395 + pl08x_get_phy_channel(struct pl08x_driver_data *pl08x, 396 + struct pl08x_dma_chan *virt_chan) 397 + { 398 + struct pl08x_phy_chan *ch = NULL; 399 + unsigned long flags; 400 + int i; 401 + 402 + /* 403 + * Try to locate a physical channel to be used for 404 + * this transfer. If all are taken return NULL and 405 + * the requester will have to cope by using some fallback 406 + * PIO mode or retrying later. 407 + */ 408 + for (i = 0; i < pl08x->vd->channels; i++) { 409 + ch = &pl08x->phy_chans[i]; 410 + 411 + spin_lock_irqsave(&ch->lock, flags); 412 + 413 + if (!ch->serving) { 414 + ch->serving = virt_chan; 415 + ch->signal = -1; 416 + spin_unlock_irqrestore(&ch->lock, flags); 417 + break; 418 + } 419 + 420 + spin_unlock_irqrestore(&ch->lock, flags); 421 + } 422 + 423 + if (i == pl08x->vd->channels) { 424 + /* No physical channel available, cope with it */ 425 + return NULL; 426 + } 427 + 428 + return ch; 429 + } 430 + 431 + static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x, 432 + struct pl08x_phy_chan *ch) 433 + { 434 + unsigned long flags; 435 + 436 + /* Stop the channel and clear its interrupts */ 437 + pl08x_stop_phy_chan(ch); 438 + writel((1 << ch->id), pl08x->base + PL080_ERR_CLEAR); 439 + writel((1 << ch->id), pl08x->base + PL080_TC_CLEAR); 440 + 441 + /* Mark it as free */ 442 + spin_lock_irqsave(&ch->lock, flags); 443 + ch->serving = NULL; 444 + spin_unlock_irqrestore(&ch->lock, flags); 445 + } 446 + 447 + /* 448 + * LLI handling 449 + */ 450 + 451 + static inline unsigned int pl08x_get_bytes_for_cctl(unsigned int coded) 452 + { 453 + switch (coded) { 454 + case PL080_WIDTH_8BIT: 455 + return 1; 456 + case PL080_WIDTH_16BIT: 457 + return 2; 458 + case PL080_WIDTH_32BIT: 459 + return 4; 460 + default: 461 + break; 462 + } 463 + BUG(); 464 + return 0; 465 + } 466 + 467 + static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth, 468 + u32 tsize) 469 + { 470 + u32 retbits = cctl; 471 + 472 + /* Remove all src, dst and transfersize bits */ 473 + retbits &= ~PL080_CONTROL_DWIDTH_MASK; 474 + retbits &= ~PL080_CONTROL_SWIDTH_MASK; 475 + retbits &= ~PL080_CONTROL_TRANSFER_SIZE_MASK; 476 + 477 + /* Then set the bits according to the parameters */ 478 + switch (srcwidth) { 479 + case 1: 480 + retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT; 481 + break; 482 + case 2: 483 + retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT; 484 + break; 485 + case 4: 486 + retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT; 487 + break; 488 + default: 489 + BUG(); 490 + break; 491 + } 492 + 493 + switch (dstwidth) { 494 + case 1: 495 + retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT; 496 + break; 497 + case 2: 498 + retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT; 499 + break; 500 + case 4: 501 + retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT; 502 + break; 503 + default: 504 + BUG(); 505 + break; 506 + } 507 + 508 + retbits |= tsize << PL080_CONTROL_TRANSFER_SIZE_SHIFT; 509 + return retbits; 510 + } 511 + 512 + /* 513 + * Autoselect a master bus to use for the transfer 514 + * this prefers the destination bus if both available 515 + * if fixed address on one bus the other will be chosen 516 + */ 517 + void pl08x_choose_master_bus(struct pl08x_bus_data *src_bus, 518 + struct pl08x_bus_data *dst_bus, struct pl08x_bus_data **mbus, 519 + struct pl08x_bus_data **sbus, u32 cctl) 520 + { 521 + if (!(cctl & PL080_CONTROL_DST_INCR)) { 522 + *mbus = src_bus; 523 + *sbus = dst_bus; 524 + } else if (!(cctl & PL080_CONTROL_SRC_INCR)) { 525 + *mbus = dst_bus; 526 + *sbus = src_bus; 527 + } else { 528 + if (dst_bus->buswidth == 4) { 529 + *mbus = dst_bus; 530 + *sbus = src_bus; 531 + } else if (src_bus->buswidth == 4) { 532 + *mbus = src_bus; 533 + *sbus = dst_bus; 534 + } else if (dst_bus->buswidth == 2) { 535 + *mbus = dst_bus; 536 + *sbus = src_bus; 537 + } else if (src_bus->buswidth == 2) { 538 + *mbus = src_bus; 539 + *sbus = dst_bus; 540 + } else { 541 + /* src_bus->buswidth == 1 */ 542 + *mbus = dst_bus; 543 + *sbus = src_bus; 544 + } 545 + } 546 + } 547 + 548 + /* 549 + * Fills in one LLI for a certain transfer descriptor 550 + * and advance the counter 551 + */ 552 + int pl08x_fill_lli_for_desc(struct pl08x_driver_data *pl08x, 553 + struct pl08x_txd *txd, int num_llis, int len, 554 + u32 cctl, u32 *remainder) 555 + { 556 + struct lli *llis_va = txd->llis_va; 557 + struct lli *llis_bus = (struct lli *) txd->llis_bus; 558 + 559 + BUG_ON(num_llis >= MAX_NUM_TSFR_LLIS); 560 + 561 + llis_va[num_llis].cctl = cctl; 562 + llis_va[num_llis].src = txd->srcbus.addr; 563 + llis_va[num_llis].dst = txd->dstbus.addr; 564 + 565 + /* 566 + * On versions with dual masters, you can optionally AND on 567 + * PL080_LLI_LM_AHB2 to the LLI to tell the hardware to read 568 + * in new LLIs with that controller, but we always try to 569 + * choose AHB1 to point into memory. The idea is to have AHB2 570 + * fixed on the peripheral and AHB1 messing around in the 571 + * memory. So we don't manipulate this bit currently. 572 + */ 573 + 574 + llis_va[num_llis].next = 575 + (dma_addr_t)((u32) &(llis_bus[num_llis + 1])); 576 + 577 + if (cctl & PL080_CONTROL_SRC_INCR) 578 + txd->srcbus.addr += len; 579 + if (cctl & PL080_CONTROL_DST_INCR) 580 + txd->dstbus.addr += len; 581 + 582 + *remainder -= len; 583 + 584 + return num_llis + 1; 585 + } 586 + 587 + /* 588 + * Return number of bytes to fill to boundary, or len 589 + */ 590 + static inline u32 pl08x_pre_boundary(u32 addr, u32 len) 591 + { 592 + u32 boundary; 593 + 594 + boundary = ((addr >> PL08X_BOUNDARY_SHIFT) + 1) 595 + << PL08X_BOUNDARY_SHIFT; 596 + 597 + if (boundary < addr + len) 598 + return boundary - addr; 599 + else 600 + return len; 601 + } 602 + 603 + /* 604 + * This fills in the table of LLIs for the transfer descriptor 605 + * Note that we assume we never have to change the burst sizes 606 + * Return 0 for error 607 + */ 608 + static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, 609 + struct pl08x_txd *txd) 610 + { 611 + struct pl08x_channel_data *cd = txd->cd; 612 + struct pl08x_bus_data *mbus, *sbus; 613 + u32 remainder; 614 + int num_llis = 0; 615 + u32 cctl; 616 + int max_bytes_per_lli; 617 + int total_bytes = 0; 618 + struct lli *llis_va; 619 + struct lli *llis_bus; 620 + 621 + if (!txd) { 622 + dev_err(&pl08x->adev->dev, "%s no descriptor\n", __func__); 623 + return 0; 624 + } 625 + 626 + txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, 627 + &txd->llis_bus); 628 + if (!txd->llis_va) { 629 + dev_err(&pl08x->adev->dev, "%s no memory for llis\n", __func__); 630 + return 0; 631 + } 632 + 633 + pl08x->pool_ctr++; 634 + 635 + /* 636 + * Initialize bus values for this transfer 637 + * from the passed optimal values 638 + */ 639 + if (!cd) { 640 + dev_err(&pl08x->adev->dev, "%s no channel data\n", __func__); 641 + return 0; 642 + } 643 + 644 + /* Get the default CCTL from the platform data */ 645 + cctl = cd->cctl; 646 + 647 + /* 648 + * On the PL080 we have two bus masters and we 649 + * should select one for source and one for 650 + * destination. We try to use AHB2 for the 651 + * bus which does not increment (typically the 652 + * peripheral) else we just choose something. 653 + */ 654 + cctl &= ~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2); 655 + if (pl08x->vd->dualmaster) { 656 + if (cctl & PL080_CONTROL_SRC_INCR) 657 + /* Source increments, use AHB2 for destination */ 658 + cctl |= PL080_CONTROL_DST_AHB2; 659 + else if (cctl & PL080_CONTROL_DST_INCR) 660 + /* Destination increments, use AHB2 for source */ 661 + cctl |= PL080_CONTROL_SRC_AHB2; 662 + else 663 + /* Just pick something, source AHB1 dest AHB2 */ 664 + cctl |= PL080_CONTROL_DST_AHB2; 665 + } 666 + 667 + /* Find maximum width of the source bus */ 668 + txd->srcbus.maxwidth = 669 + pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_SWIDTH_MASK) >> 670 + PL080_CONTROL_SWIDTH_SHIFT); 671 + 672 + /* Find maximum width of the destination bus */ 673 + txd->dstbus.maxwidth = 674 + pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_DWIDTH_MASK) >> 675 + PL080_CONTROL_DWIDTH_SHIFT); 676 + 677 + /* Set up the bus widths to the maximum */ 678 + txd->srcbus.buswidth = txd->srcbus.maxwidth; 679 + txd->dstbus.buswidth = txd->dstbus.maxwidth; 680 + dev_vdbg(&pl08x->adev->dev, 681 + "%s source bus is %d bytes wide, dest bus is %d bytes wide\n", 682 + __func__, txd->srcbus.buswidth, txd->dstbus.buswidth); 683 + 684 + 685 + /* 686 + * Bytes transferred == tsize * MIN(buswidths), not max(buswidths) 687 + */ 688 + max_bytes_per_lli = min(txd->srcbus.buswidth, txd->dstbus.buswidth) * 689 + PL080_CONTROL_TRANSFER_SIZE_MASK; 690 + dev_vdbg(&pl08x->adev->dev, 691 + "%s max bytes per lli = %d\n", 692 + __func__, max_bytes_per_lli); 693 + 694 + /* We need to count this down to zero */ 695 + remainder = txd->len; 696 + dev_vdbg(&pl08x->adev->dev, 697 + "%s remainder = %d\n", 698 + __func__, remainder); 699 + 700 + /* 701 + * Choose bus to align to 702 + * - prefers destination bus if both available 703 + * - if fixed address on one bus chooses other 704 + * - modifies cctl to choose an apropriate master 705 + */ 706 + pl08x_choose_master_bus(&txd->srcbus, &txd->dstbus, 707 + &mbus, &sbus, cctl); 708 + 709 + 710 + /* 711 + * The lowest bit of the LLI register 712 + * is also used to indicate which master to 713 + * use for reading the LLIs. 714 + */ 715 + 716 + if (txd->len < mbus->buswidth) { 717 + /* 718 + * Less than a bus width available 719 + * - send as single bytes 720 + */ 721 + while (remainder) { 722 + dev_vdbg(&pl08x->adev->dev, 723 + "%s single byte LLIs for a transfer of " 724 + "less than a bus width (remain %08x)\n", 725 + __func__, remainder); 726 + cctl = pl08x_cctl_bits(cctl, 1, 1, 1); 727 + num_llis = 728 + pl08x_fill_lli_for_desc(pl08x, txd, num_llis, 1, 729 + cctl, &remainder); 730 + total_bytes++; 731 + } 732 + } else { 733 + /* 734 + * Make one byte LLIs until master bus is aligned 735 + * - slave will then be aligned also 736 + */ 737 + while ((mbus->addr) % (mbus->buswidth)) { 738 + dev_vdbg(&pl08x->adev->dev, 739 + "%s adjustment lli for less than bus width " 740 + "(remain %08x)\n", 741 + __func__, remainder); 742 + cctl = pl08x_cctl_bits(cctl, 1, 1, 1); 743 + num_llis = pl08x_fill_lli_for_desc 744 + (pl08x, txd, num_llis, 1, cctl, &remainder); 745 + total_bytes++; 746 + } 747 + 748 + /* 749 + * Master now aligned 750 + * - if slave is not then we must set its width down 751 + */ 752 + if (sbus->addr % sbus->buswidth) { 753 + dev_dbg(&pl08x->adev->dev, 754 + "%s set down bus width to one byte\n", 755 + __func__); 756 + 757 + sbus->buswidth = 1; 758 + } 759 + 760 + /* 761 + * Make largest possible LLIs until less than one bus 762 + * width left 763 + */ 764 + while (remainder > (mbus->buswidth - 1)) { 765 + int lli_len, target_len; 766 + int tsize; 767 + int odd_bytes; 768 + 769 + /* 770 + * If enough left try to send max possible, 771 + * otherwise try to send the remainder 772 + */ 773 + target_len = remainder; 774 + if (remainder > max_bytes_per_lli) 775 + target_len = max_bytes_per_lli; 776 + 777 + /* 778 + * Set bus lengths for incrementing busses 779 + * to number of bytes which fill to next memory 780 + * boundary 781 + */ 782 + if (cctl & PL080_CONTROL_SRC_INCR) 783 + txd->srcbus.fill_bytes = 784 + pl08x_pre_boundary( 785 + txd->srcbus.addr, 786 + remainder); 787 + else 788 + txd->srcbus.fill_bytes = 789 + max_bytes_per_lli; 790 + 791 + if (cctl & PL080_CONTROL_DST_INCR) 792 + txd->dstbus.fill_bytes = 793 + pl08x_pre_boundary( 794 + txd->dstbus.addr, 795 + remainder); 796 + else 797 + txd->dstbus.fill_bytes = 798 + max_bytes_per_lli; 799 + 800 + /* 801 + * Find the nearest 802 + */ 803 + lli_len = min(txd->srcbus.fill_bytes, 804 + txd->dstbus.fill_bytes); 805 + 806 + BUG_ON(lli_len > remainder); 807 + 808 + if (lli_len <= 0) { 809 + dev_err(&pl08x->adev->dev, 810 + "%s lli_len is %d, <= 0\n", 811 + __func__, lli_len); 812 + return 0; 813 + } 814 + 815 + if (lli_len == target_len) { 816 + /* 817 + * Can send what we wanted 818 + */ 819 + /* 820 + * Maintain alignment 821 + */ 822 + lli_len = (lli_len/mbus->buswidth) * 823 + mbus->buswidth; 824 + odd_bytes = 0; 825 + } else { 826 + /* 827 + * So now we know how many bytes to transfer 828 + * to get to the nearest boundary 829 + * The next lli will past the boundary 830 + * - however we may be working to a boundary 831 + * on the slave bus 832 + * We need to ensure the master stays aligned 833 + */ 834 + odd_bytes = lli_len % mbus->buswidth; 835 + /* 836 + * - and that we are working in multiples 837 + * of the bus widths 838 + */ 839 + lli_len -= odd_bytes; 840 + 841 + } 842 + 843 + if (lli_len) { 844 + /* 845 + * Check against minimum bus alignment: 846 + * Calculate actual transfer size in relation 847 + * to bus width an get a maximum remainder of 848 + * the smallest bus width - 1 849 + */ 850 + /* FIXME: use round_down()? */ 851 + tsize = lli_len / min(mbus->buswidth, 852 + sbus->buswidth); 853 + lli_len = tsize * min(mbus->buswidth, 854 + sbus->buswidth); 855 + 856 + if (target_len != lli_len) { 857 + dev_vdbg(&pl08x->adev->dev, 858 + "%s can't send what we want. Desired %08x, lli of %08x bytes in txd of %08x\n", 859 + __func__, target_len, lli_len, txd->len); 860 + } 861 + 862 + cctl = pl08x_cctl_bits(cctl, 863 + txd->srcbus.buswidth, 864 + txd->dstbus.buswidth, 865 + tsize); 866 + 867 + dev_vdbg(&pl08x->adev->dev, 868 + "%s fill lli with single lli chunk of size %08x (remainder %08x)\n", 869 + __func__, lli_len, remainder); 870 + num_llis = pl08x_fill_lli_for_desc(pl08x, txd, 871 + num_llis, lli_len, cctl, 872 + &remainder); 873 + total_bytes += lli_len; 874 + } 875 + 876 + 877 + if (odd_bytes) { 878 + /* 879 + * Creep past the boundary, 880 + * maintaining master alignment 881 + */ 882 + int j; 883 + for (j = 0; (j < mbus->buswidth) 884 + && (remainder); j++) { 885 + cctl = pl08x_cctl_bits(cctl, 1, 1, 1); 886 + dev_vdbg(&pl08x->adev->dev, 887 + "%s align with boundardy, single byte (remain %08x)\n", 888 + __func__, remainder); 889 + num_llis = 890 + pl08x_fill_lli_for_desc(pl08x, 891 + txd, num_llis, 1, 892 + cctl, &remainder); 893 + total_bytes++; 894 + } 895 + } 896 + } 897 + 898 + /* 899 + * Send any odd bytes 900 + */ 901 + if (remainder < 0) { 902 + dev_err(&pl08x->adev->dev, "%s remainder not fitted 0x%08x bytes\n", 903 + __func__, remainder); 904 + return 0; 905 + } 906 + 907 + while (remainder) { 908 + cctl = pl08x_cctl_bits(cctl, 1, 1, 1); 909 + dev_vdbg(&pl08x->adev->dev, 910 + "%s align with boundardy, single odd byte (remain %d)\n", 911 + __func__, remainder); 912 + num_llis = pl08x_fill_lli_for_desc(pl08x, txd, num_llis, 913 + 1, cctl, &remainder); 914 + total_bytes++; 915 + } 916 + } 917 + if (total_bytes != txd->len) { 918 + dev_err(&pl08x->adev->dev, 919 + "%s size of encoded lli:s don't match total txd, transferred 0x%08x from size 0x%08x\n", 920 + __func__, total_bytes, txd->len); 921 + return 0; 922 + } 923 + 924 + if (num_llis >= MAX_NUM_TSFR_LLIS) { 925 + dev_err(&pl08x->adev->dev, 926 + "%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n", 927 + __func__, (u32) MAX_NUM_TSFR_LLIS); 928 + return 0; 929 + } 930 + /* 931 + * Decide whether this is a loop or a terminated transfer 932 + */ 933 + llis_va = txd->llis_va; 934 + llis_bus = (struct lli *) txd->llis_bus; 935 + 936 + if (cd->circular_buffer) { 937 + /* 938 + * Loop the circular buffer so that the next element 939 + * points back to the beginning of the LLI. 940 + */ 941 + llis_va[num_llis - 1].next = 942 + (dma_addr_t)((unsigned int)&(llis_bus[0])); 943 + } else { 944 + /* 945 + * On non-circular buffers, the final LLI terminates 946 + * the LLI. 947 + */ 948 + llis_va[num_llis - 1].next = 0; 949 + /* 950 + * The final LLI element shall also fire an interrupt 951 + */ 952 + llis_va[num_llis - 1].cctl |= PL080_CONTROL_TC_IRQ_EN; 953 + } 954 + 955 + /* Now store the channel register values */ 956 + txd->csrc = llis_va[0].src; 957 + txd->cdst = llis_va[0].dst; 958 + if (num_llis > 1) 959 + txd->clli = llis_va[0].next; 960 + else 961 + txd->clli = 0; 962 + 963 + txd->cctl = llis_va[0].cctl; 964 + /* ccfg will be set at physical channel allocation time */ 965 + 966 + #ifdef VERBOSE_DEBUG 967 + { 968 + int i; 969 + 970 + for (i = 0; i < num_llis; i++) { 971 + dev_vdbg(&pl08x->adev->dev, 972 + "lli %d @%p: csrc=%08x, cdst=%08x, cctl=%08x, clli=%08x\n", 973 + i, 974 + &llis_va[i], 975 + llis_va[i].src, 976 + llis_va[i].dst, 977 + llis_va[i].cctl, 978 + llis_va[i].next 979 + ); 980 + } 981 + } 982 + #endif 983 + 984 + return num_llis; 985 + } 986 + 987 + /* You should call this with the struct pl08x lock held */ 988 + static void pl08x_free_txd(struct pl08x_driver_data *pl08x, 989 + struct pl08x_txd *txd) 990 + { 991 + if (!txd) 992 + dev_err(&pl08x->adev->dev, 993 + "%s no descriptor to free\n", 994 + __func__); 995 + 996 + /* Free the LLI */ 997 + dma_pool_free(pl08x->pool, txd->llis_va, 998 + txd->llis_bus); 999 + 1000 + pl08x->pool_ctr--; 1001 + 1002 + kfree(txd); 1003 + } 1004 + 1005 + static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x, 1006 + struct pl08x_dma_chan *plchan) 1007 + { 1008 + struct pl08x_txd *txdi = NULL; 1009 + struct pl08x_txd *next; 1010 + 1011 + if (!list_empty(&plchan->desc_list)) { 1012 + list_for_each_entry_safe(txdi, 1013 + next, &plchan->desc_list, node) { 1014 + list_del(&txdi->node); 1015 + pl08x_free_txd(pl08x, txdi); 1016 + } 1017 + 1018 + } 1019 + } 1020 + 1021 + /* 1022 + * The DMA ENGINE API 1023 + */ 1024 + static int pl08x_alloc_chan_resources(struct dma_chan *chan) 1025 + { 1026 + return 0; 1027 + } 1028 + 1029 + static void pl08x_free_chan_resources(struct dma_chan *chan) 1030 + { 1031 + } 1032 + 1033 + /* 1034 + * This should be called with the channel plchan->lock held 1035 + */ 1036 + static int prep_phy_channel(struct pl08x_dma_chan *plchan, 1037 + struct pl08x_txd *txd) 1038 + { 1039 + struct pl08x_driver_data *pl08x = plchan->host; 1040 + struct pl08x_phy_chan *ch; 1041 + int ret; 1042 + 1043 + /* Check if we already have a channel */ 1044 + if (plchan->phychan) 1045 + return 0; 1046 + 1047 + ch = pl08x_get_phy_channel(pl08x, plchan); 1048 + if (!ch) { 1049 + /* No physical channel available, cope with it */ 1050 + dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name); 1051 + return -EBUSY; 1052 + } 1053 + 1054 + /* 1055 + * OK we have a physical channel: for memcpy() this is all we 1056 + * need, but for slaves the physical signals may be muxed! 1057 + * Can the platform allow us to use this channel? 1058 + */ 1059 + if (plchan->slave && 1060 + ch->signal < 0 && 1061 + pl08x->pd->get_signal) { 1062 + ret = pl08x->pd->get_signal(plchan); 1063 + if (ret < 0) { 1064 + dev_dbg(&pl08x->adev->dev, 1065 + "unable to use physical channel %d for transfer on %s due to platform restrictions\n", 1066 + ch->id, plchan->name); 1067 + /* Release physical channel & return */ 1068 + pl08x_put_phy_channel(pl08x, ch); 1069 + return -EBUSY; 1070 + } 1071 + ch->signal = ret; 1072 + } 1073 + 1074 + dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n", 1075 + ch->id, 1076 + ch->signal, 1077 + plchan->name); 1078 + 1079 + plchan->phychan = ch; 1080 + 1081 + return 0; 1082 + } 1083 + 1084 + static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx) 1085 + { 1086 + struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan); 1087 + 1088 + atomic_inc(&plchan->last_issued); 1089 + tx->cookie = atomic_read(&plchan->last_issued); 1090 + /* This unlock follows the lock in the prep() function */ 1091 + spin_unlock_irqrestore(&plchan->lock, plchan->lockflags); 1092 + 1093 + return tx->cookie; 1094 + } 1095 + 1096 + static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt( 1097 + struct dma_chan *chan, unsigned long flags) 1098 + { 1099 + struct dma_async_tx_descriptor *retval = NULL; 1100 + 1101 + return retval; 1102 + } 1103 + 1104 + /* 1105 + * Code accessing dma_async_is_complete() in a tight loop 1106 + * may give problems - could schedule where indicated. 1107 + * If slaves are relying on interrupts to signal completion this 1108 + * function must not be called with interrupts disabled 1109 + */ 1110 + static enum dma_status 1111 + pl08x_dma_tx_status(struct dma_chan *chan, 1112 + dma_cookie_t cookie, 1113 + struct dma_tx_state *txstate) 1114 + { 1115 + struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1116 + dma_cookie_t last_used; 1117 + dma_cookie_t last_complete; 1118 + enum dma_status ret; 1119 + u32 bytesleft = 0; 1120 + 1121 + last_used = atomic_read(&plchan->last_issued); 1122 + last_complete = plchan->lc; 1123 + 1124 + ret = dma_async_is_complete(cookie, last_complete, last_used); 1125 + if (ret == DMA_SUCCESS) { 1126 + dma_set_tx_state(txstate, last_complete, last_used, 0); 1127 + return ret; 1128 + } 1129 + 1130 + /* 1131 + * schedule(); could be inserted here 1132 + */ 1133 + 1134 + /* 1135 + * This cookie not complete yet 1136 + */ 1137 + last_used = atomic_read(&plchan->last_issued); 1138 + last_complete = plchan->lc; 1139 + 1140 + /* Get number of bytes left in the active transactions and queue */ 1141 + bytesleft = pl08x_getbytes_chan(plchan); 1142 + 1143 + dma_set_tx_state(txstate, last_complete, last_used, 1144 + bytesleft); 1145 + 1146 + if (plchan->state == PL08X_CHAN_PAUSED) 1147 + return DMA_PAUSED; 1148 + 1149 + /* Whether waiting or running, we're in progress */ 1150 + return DMA_IN_PROGRESS; 1151 + } 1152 + 1153 + /* PrimeCell DMA extension */ 1154 + struct burst_table { 1155 + int burstwords; 1156 + u32 reg; 1157 + }; 1158 + 1159 + static const struct burst_table burst_sizes[] = { 1160 + { 1161 + .burstwords = 256, 1162 + .reg = (PL080_BSIZE_256 << PL080_CONTROL_SB_SIZE_SHIFT) | 1163 + (PL080_BSIZE_256 << PL080_CONTROL_DB_SIZE_SHIFT), 1164 + }, 1165 + { 1166 + .burstwords = 128, 1167 + .reg = (PL080_BSIZE_128 << PL080_CONTROL_SB_SIZE_SHIFT) | 1168 + (PL080_BSIZE_128 << PL080_CONTROL_DB_SIZE_SHIFT), 1169 + }, 1170 + { 1171 + .burstwords = 64, 1172 + .reg = (PL080_BSIZE_64 << PL080_CONTROL_SB_SIZE_SHIFT) | 1173 + (PL080_BSIZE_64 << PL080_CONTROL_DB_SIZE_SHIFT), 1174 + }, 1175 + { 1176 + .burstwords = 32, 1177 + .reg = (PL080_BSIZE_32 << PL080_CONTROL_SB_SIZE_SHIFT) | 1178 + (PL080_BSIZE_32 << PL080_CONTROL_DB_SIZE_SHIFT), 1179 + }, 1180 + { 1181 + .burstwords = 16, 1182 + .reg = (PL080_BSIZE_16 << PL080_CONTROL_SB_SIZE_SHIFT) | 1183 + (PL080_BSIZE_16 << PL080_CONTROL_DB_SIZE_SHIFT), 1184 + }, 1185 + { 1186 + .burstwords = 8, 1187 + .reg = (PL080_BSIZE_8 << PL080_CONTROL_SB_SIZE_SHIFT) | 1188 + (PL080_BSIZE_8 << PL080_CONTROL_DB_SIZE_SHIFT), 1189 + }, 1190 + { 1191 + .burstwords = 4, 1192 + .reg = (PL080_BSIZE_4 << PL080_CONTROL_SB_SIZE_SHIFT) | 1193 + (PL080_BSIZE_4 << PL080_CONTROL_DB_SIZE_SHIFT), 1194 + }, 1195 + { 1196 + .burstwords = 1, 1197 + .reg = (PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT) | 1198 + (PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT), 1199 + }, 1200 + }; 1201 + 1202 + static void dma_set_runtime_config(struct dma_chan *chan, 1203 + struct dma_slave_config *config) 1204 + { 1205 + struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1206 + struct pl08x_driver_data *pl08x = plchan->host; 1207 + struct pl08x_channel_data *cd = plchan->cd; 1208 + enum dma_slave_buswidth addr_width; 1209 + u32 maxburst; 1210 + u32 cctl = 0; 1211 + /* Mask out all except src and dst channel */ 1212 + u32 ccfg = cd->ccfg & 0x000003DEU; 1213 + int i = 0; 1214 + 1215 + /* Transfer direction */ 1216 + plchan->runtime_direction = config->direction; 1217 + if (config->direction == DMA_TO_DEVICE) { 1218 + plchan->runtime_addr = config->dst_addr; 1219 + cctl |= PL080_CONTROL_SRC_INCR; 1220 + ccfg |= PL080_FLOW_MEM2PER << PL080_CONFIG_FLOW_CONTROL_SHIFT; 1221 + addr_width = config->dst_addr_width; 1222 + maxburst = config->dst_maxburst; 1223 + } else if (config->direction == DMA_FROM_DEVICE) { 1224 + plchan->runtime_addr = config->src_addr; 1225 + cctl |= PL080_CONTROL_DST_INCR; 1226 + ccfg |= PL080_FLOW_PER2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT; 1227 + addr_width = config->src_addr_width; 1228 + maxburst = config->src_maxburst; 1229 + } else { 1230 + dev_err(&pl08x->adev->dev, 1231 + "bad runtime_config: alien transfer direction\n"); 1232 + return; 1233 + } 1234 + 1235 + switch (addr_width) { 1236 + case DMA_SLAVE_BUSWIDTH_1_BYTE: 1237 + cctl |= (PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT) | 1238 + (PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT); 1239 + break; 1240 + case DMA_SLAVE_BUSWIDTH_2_BYTES: 1241 + cctl |= (PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT) | 1242 + (PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT); 1243 + break; 1244 + case DMA_SLAVE_BUSWIDTH_4_BYTES: 1245 + cctl |= (PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT) | 1246 + (PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT); 1247 + break; 1248 + default: 1249 + dev_err(&pl08x->adev->dev, 1250 + "bad runtime_config: alien address width\n"); 1251 + return; 1252 + } 1253 + 1254 + /* 1255 + * Now decide on a maxburst: 1256 + * If this channel will only request single transfers, set 1257 + * this down to ONE element. 1258 + */ 1259 + if (plchan->cd->single) { 1260 + cctl |= (PL080_BSIZE_1 << PL080_CONTROL_SB_SIZE_SHIFT) | 1261 + (PL080_BSIZE_1 << PL080_CONTROL_DB_SIZE_SHIFT); 1262 + } else { 1263 + while (i < ARRAY_SIZE(burst_sizes)) { 1264 + if (burst_sizes[i].burstwords <= maxburst) 1265 + break; 1266 + i++; 1267 + } 1268 + cctl |= burst_sizes[i].reg; 1269 + } 1270 + 1271 + /* Access the cell in privileged mode, non-bufferable, non-cacheable */ 1272 + cctl &= ~PL080_CONTROL_PROT_MASK; 1273 + cctl |= PL080_CONTROL_PROT_SYS; 1274 + 1275 + /* Modify the default channel data to fit PrimeCell request */ 1276 + cd->cctl = cctl; 1277 + cd->ccfg = ccfg; 1278 + 1279 + dev_dbg(&pl08x->adev->dev, 1280 + "configured channel %s (%s) for %s, data width %d, " 1281 + "maxburst %d words, LE, CCTL=%08x, CCFG=%08x\n", 1282 + dma_chan_name(chan), plchan->name, 1283 + (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX", 1284 + addr_width, 1285 + maxburst, 1286 + cctl, ccfg); 1287 + } 1288 + 1289 + /* 1290 + * Slave transactions callback to the slave device to allow 1291 + * synchronization of slave DMA signals with the DMAC enable 1292 + */ 1293 + static void pl08x_issue_pending(struct dma_chan *chan) 1294 + { 1295 + struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1296 + struct pl08x_driver_data *pl08x = plchan->host; 1297 + unsigned long flags; 1298 + 1299 + spin_lock_irqsave(&plchan->lock, flags); 1300 + /* Something is already active */ 1301 + if (plchan->at) { 1302 + spin_unlock_irqrestore(&plchan->lock, flags); 1303 + return; 1304 + } 1305 + 1306 + /* Didn't get a physical channel so waiting for it ... */ 1307 + if (plchan->state == PL08X_CHAN_WAITING) 1308 + return; 1309 + 1310 + /* Take the first element in the queue and execute it */ 1311 + if (!list_empty(&plchan->desc_list)) { 1312 + struct pl08x_txd *next; 1313 + 1314 + next = list_first_entry(&plchan->desc_list, 1315 + struct pl08x_txd, 1316 + node); 1317 + list_del(&next->node); 1318 + plchan->at = next; 1319 + plchan->state = PL08X_CHAN_RUNNING; 1320 + 1321 + /* Configure the physical channel for the active txd */ 1322 + pl08x_config_phychan_for_txd(plchan); 1323 + pl08x_set_cregs(pl08x, plchan->phychan); 1324 + pl08x_enable_phy_chan(pl08x, plchan->phychan); 1325 + } 1326 + 1327 + spin_unlock_irqrestore(&plchan->lock, flags); 1328 + } 1329 + 1330 + static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan, 1331 + struct pl08x_txd *txd) 1332 + { 1333 + int num_llis; 1334 + struct pl08x_driver_data *pl08x = plchan->host; 1335 + int ret; 1336 + 1337 + num_llis = pl08x_fill_llis_for_desc(pl08x, txd); 1338 + 1339 + if (!num_llis) 1340 + return -EINVAL; 1341 + 1342 + spin_lock_irqsave(&plchan->lock, plchan->lockflags); 1343 + 1344 + /* 1345 + * If this device is not using a circular buffer then 1346 + * queue this new descriptor for transfer. 1347 + * The descriptor for a circular buffer continues 1348 + * to be used until the channel is freed. 1349 + */ 1350 + if (txd->cd->circular_buffer) 1351 + dev_err(&pl08x->adev->dev, 1352 + "%s attempting to queue a circular buffer\n", 1353 + __func__); 1354 + else 1355 + list_add_tail(&txd->node, 1356 + &plchan->desc_list); 1357 + 1358 + /* 1359 + * See if we already have a physical channel allocated, 1360 + * else this is the time to try to get one. 1361 + */ 1362 + ret = prep_phy_channel(plchan, txd); 1363 + if (ret) { 1364 + /* 1365 + * No physical channel available, we will 1366 + * stack up the memcpy channels until there is a channel 1367 + * available to handle it whereas slave transfers may 1368 + * have been denied due to platform channel muxing restrictions 1369 + * and since there is no guarantee that this will ever be 1370 + * resolved, and since the signal must be aquired AFTER 1371 + * aquiring the physical channel, we will let them be NACK:ed 1372 + * with -EBUSY here. The drivers can alway retry the prep() 1373 + * call if they are eager on doing this using DMA. 1374 + */ 1375 + if (plchan->slave) { 1376 + pl08x_free_txd_list(pl08x, plchan); 1377 + spin_unlock_irqrestore(&plchan->lock, plchan->lockflags); 1378 + return -EBUSY; 1379 + } 1380 + /* Do this memcpy whenever there is a channel ready */ 1381 + plchan->state = PL08X_CHAN_WAITING; 1382 + plchan->waiting = txd; 1383 + } else 1384 + /* 1385 + * Else we're all set, paused and ready to roll, 1386 + * status will switch to PL08X_CHAN_RUNNING when 1387 + * we call issue_pending(). If there is something 1388 + * running on the channel already we don't change 1389 + * its state. 1390 + */ 1391 + if (plchan->state == PL08X_CHAN_IDLE) 1392 + plchan->state = PL08X_CHAN_PAUSED; 1393 + 1394 + /* 1395 + * Notice that we leave plchan->lock locked on purpose: 1396 + * it will be unlocked in the subsequent tx_submit() 1397 + * call. This is a consequence of the current API. 1398 + */ 1399 + 1400 + return 0; 1401 + } 1402 + 1403 + /* 1404 + * Initialize a descriptor to be used by memcpy submit 1405 + */ 1406 + static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy( 1407 + struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 1408 + size_t len, unsigned long flags) 1409 + { 1410 + struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1411 + struct pl08x_driver_data *pl08x = plchan->host; 1412 + struct pl08x_txd *txd; 1413 + int ret; 1414 + 1415 + txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT); 1416 + if (!txd) { 1417 + dev_err(&pl08x->adev->dev, 1418 + "%s no memory for descriptor\n", __func__); 1419 + return NULL; 1420 + } 1421 + 1422 + dma_async_tx_descriptor_init(&txd->tx, chan); 1423 + txd->direction = DMA_NONE; 1424 + txd->srcbus.addr = src; 1425 + txd->dstbus.addr = dest; 1426 + 1427 + /* Set platform data for m2m */ 1428 + txd->cd = &pl08x->pd->memcpy_channel; 1429 + /* Both to be incremented or the code will break */ 1430 + txd->cd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR; 1431 + txd->tx.tx_submit = pl08x_tx_submit; 1432 + txd->tx.callback = NULL; 1433 + txd->tx.callback_param = NULL; 1434 + txd->len = len; 1435 + 1436 + INIT_LIST_HEAD(&txd->node); 1437 + ret = pl08x_prep_channel_resources(plchan, txd); 1438 + if (ret) 1439 + return NULL; 1440 + /* 1441 + * NB: the channel lock is held at this point so tx_submit() 1442 + * must be called in direct succession. 1443 + */ 1444 + 1445 + return &txd->tx; 1446 + } 1447 + 1448 + struct dma_async_tx_descriptor *pl08x_prep_slave_sg( 1449 + struct dma_chan *chan, struct scatterlist *sgl, 1450 + unsigned int sg_len, enum dma_data_direction direction, 1451 + unsigned long flags) 1452 + { 1453 + struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1454 + struct pl08x_driver_data *pl08x = plchan->host; 1455 + struct pl08x_txd *txd; 1456 + int ret; 1457 + 1458 + /* 1459 + * Current implementation ASSUMES only one sg 1460 + */ 1461 + if (sg_len != 1) { 1462 + dev_err(&pl08x->adev->dev, "%s prepared too long sglist\n", 1463 + __func__); 1464 + BUG(); 1465 + } 1466 + 1467 + dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n", 1468 + __func__, sgl->length, plchan->name); 1469 + 1470 + txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT); 1471 + if (!txd) { 1472 + dev_err(&pl08x->adev->dev, "%s no txd\n", __func__); 1473 + return NULL; 1474 + } 1475 + 1476 + dma_async_tx_descriptor_init(&txd->tx, chan); 1477 + 1478 + if (direction != plchan->runtime_direction) 1479 + dev_err(&pl08x->adev->dev, "%s DMA setup does not match " 1480 + "the direction configured for the PrimeCell\n", 1481 + __func__); 1482 + 1483 + /* 1484 + * Set up addresses, the PrimeCell configured address 1485 + * will take precedence since this may configure the 1486 + * channel target address dynamically at runtime. 1487 + */ 1488 + txd->direction = direction; 1489 + if (direction == DMA_TO_DEVICE) { 1490 + txd->srcbus.addr = sgl->dma_address; 1491 + if (plchan->runtime_addr) 1492 + txd->dstbus.addr = plchan->runtime_addr; 1493 + else 1494 + txd->dstbus.addr = plchan->cd->addr; 1495 + } else if (direction == DMA_FROM_DEVICE) { 1496 + if (plchan->runtime_addr) 1497 + txd->srcbus.addr = plchan->runtime_addr; 1498 + else 1499 + txd->srcbus.addr = plchan->cd->addr; 1500 + txd->dstbus.addr = sgl->dma_address; 1501 + } else { 1502 + dev_err(&pl08x->adev->dev, 1503 + "%s direction unsupported\n", __func__); 1504 + return NULL; 1505 + } 1506 + txd->cd = plchan->cd; 1507 + txd->tx.tx_submit = pl08x_tx_submit; 1508 + txd->tx.callback = NULL; 1509 + txd->tx.callback_param = NULL; 1510 + txd->len = sgl->length; 1511 + INIT_LIST_HEAD(&txd->node); 1512 + 1513 + ret = pl08x_prep_channel_resources(plchan, txd); 1514 + if (ret) 1515 + return NULL; 1516 + /* 1517 + * NB: the channel lock is held at this point so tx_submit() 1518 + * must be called in direct succession. 1519 + */ 1520 + 1521 + return &txd->tx; 1522 + } 1523 + 1524 + static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 1525 + unsigned long arg) 1526 + { 1527 + struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1528 + struct pl08x_driver_data *pl08x = plchan->host; 1529 + unsigned long flags; 1530 + int ret = 0; 1531 + 1532 + /* Controls applicable to inactive channels */ 1533 + if (cmd == DMA_SLAVE_CONFIG) { 1534 + dma_set_runtime_config(chan, 1535 + (struct dma_slave_config *) 1536 + arg); 1537 + return 0; 1538 + } 1539 + 1540 + /* 1541 + * Anything succeeds on channels with no physical allocation and 1542 + * no queued transfers. 1543 + */ 1544 + spin_lock_irqsave(&plchan->lock, flags); 1545 + if (!plchan->phychan && !plchan->at) { 1546 + spin_unlock_irqrestore(&plchan->lock, flags); 1547 + return 0; 1548 + } 1549 + 1550 + switch (cmd) { 1551 + case DMA_TERMINATE_ALL: 1552 + plchan->state = PL08X_CHAN_IDLE; 1553 + 1554 + if (plchan->phychan) { 1555 + pl08x_stop_phy_chan(plchan->phychan); 1556 + 1557 + /* 1558 + * Mark physical channel as free and free any slave 1559 + * signal 1560 + */ 1561 + if ((plchan->phychan->signal >= 0) && 1562 + pl08x->pd->put_signal) { 1563 + pl08x->pd->put_signal(plchan); 1564 + plchan->phychan->signal = -1; 1565 + } 1566 + pl08x_put_phy_channel(pl08x, plchan->phychan); 1567 + plchan->phychan = NULL; 1568 + } 1569 + /* Stop any pending tasklet */ 1570 + tasklet_disable(&plchan->tasklet); 1571 + /* Dequeue jobs and free LLIs */ 1572 + if (plchan->at) { 1573 + pl08x_free_txd(pl08x, plchan->at); 1574 + plchan->at = NULL; 1575 + } 1576 + /* Dequeue jobs not yet fired as well */ 1577 + pl08x_free_txd_list(pl08x, plchan); 1578 + break; 1579 + case DMA_PAUSE: 1580 + pl08x_pause_phy_chan(plchan->phychan); 1581 + plchan->state = PL08X_CHAN_PAUSED; 1582 + break; 1583 + case DMA_RESUME: 1584 + pl08x_resume_phy_chan(plchan->phychan); 1585 + plchan->state = PL08X_CHAN_RUNNING; 1586 + break; 1587 + default: 1588 + /* Unknown command */ 1589 + ret = -ENXIO; 1590 + break; 1591 + } 1592 + 1593 + spin_unlock_irqrestore(&plchan->lock, flags); 1594 + 1595 + return ret; 1596 + } 1597 + 1598 + bool pl08x_filter_id(struct dma_chan *chan, void *chan_id) 1599 + { 1600 + struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1601 + char *name = chan_id; 1602 + 1603 + /* Check that the channel is not taken! */ 1604 + if (!strcmp(plchan->name, name)) 1605 + return true; 1606 + 1607 + return false; 1608 + } 1609 + 1610 + /* 1611 + * Just check that the device is there and active 1612 + * TODO: turn this bit on/off depending on the number of 1613 + * physical channels actually used, if it is zero... well 1614 + * shut it off. That will save some power. Cut the clock 1615 + * at the same time. 1616 + */ 1617 + static void pl08x_ensure_on(struct pl08x_driver_data *pl08x) 1618 + { 1619 + u32 val; 1620 + 1621 + val = readl(pl08x->base + PL080_CONFIG); 1622 + val &= ~(PL080_CONFIG_M2_BE | PL080_CONFIG_M1_BE | PL080_CONFIG_ENABLE); 1623 + /* We implictly clear bit 1 and that means little-endian mode */ 1624 + val |= PL080_CONFIG_ENABLE; 1625 + writel(val, pl08x->base + PL080_CONFIG); 1626 + } 1627 + 1628 + static void pl08x_tasklet(unsigned long data) 1629 + { 1630 + struct pl08x_dma_chan *plchan = (struct pl08x_dma_chan *) data; 1631 + struct pl08x_phy_chan *phychan = plchan->phychan; 1632 + struct pl08x_driver_data *pl08x = plchan->host; 1633 + 1634 + if (!plchan) 1635 + BUG(); 1636 + 1637 + spin_lock(&plchan->lock); 1638 + 1639 + if (plchan->at) { 1640 + dma_async_tx_callback callback = 1641 + plchan->at->tx.callback; 1642 + void *callback_param = 1643 + plchan->at->tx.callback_param; 1644 + 1645 + /* 1646 + * Update last completed 1647 + */ 1648 + plchan->lc = 1649 + (plchan->at->tx.cookie); 1650 + 1651 + /* 1652 + * Callback to signal completion 1653 + */ 1654 + if (callback) 1655 + callback(callback_param); 1656 + 1657 + /* 1658 + * Device callbacks should NOT clear 1659 + * the current transaction on the channel 1660 + * Linus: sometimes they should? 1661 + */ 1662 + if (!plchan->at) 1663 + BUG(); 1664 + 1665 + /* 1666 + * Free the descriptor if it's not for a device 1667 + * using a circular buffer 1668 + */ 1669 + if (!plchan->at->cd->circular_buffer) { 1670 + pl08x_free_txd(pl08x, plchan->at); 1671 + plchan->at = NULL; 1672 + } 1673 + /* 1674 + * else descriptor for circular 1675 + * buffers only freed when 1676 + * client has disabled dma 1677 + */ 1678 + } 1679 + /* 1680 + * If a new descriptor is queued, set it up 1681 + * plchan->at is NULL here 1682 + */ 1683 + if (!list_empty(&plchan->desc_list)) { 1684 + struct pl08x_txd *next; 1685 + 1686 + next = list_first_entry(&plchan->desc_list, 1687 + struct pl08x_txd, 1688 + node); 1689 + list_del(&next->node); 1690 + plchan->at = next; 1691 + /* Configure the physical channel for the next txd */ 1692 + pl08x_config_phychan_for_txd(plchan); 1693 + pl08x_set_cregs(pl08x, plchan->phychan); 1694 + pl08x_enable_phy_chan(pl08x, plchan->phychan); 1695 + } else { 1696 + struct pl08x_dma_chan *waiting = NULL; 1697 + 1698 + /* 1699 + * No more jobs, so free up the physical channel 1700 + * Free any allocated signal on slave transfers too 1701 + */ 1702 + if ((phychan->signal >= 0) && pl08x->pd->put_signal) { 1703 + pl08x->pd->put_signal(plchan); 1704 + phychan->signal = -1; 1705 + } 1706 + pl08x_put_phy_channel(pl08x, phychan); 1707 + plchan->phychan = NULL; 1708 + plchan->state = PL08X_CHAN_IDLE; 1709 + 1710 + /* 1711 + * And NOW before anyone else can grab that free:d 1712 + * up physical channel, see if there is some memcpy 1713 + * pending that seriously needs to start because of 1714 + * being stacked up while we were choking the 1715 + * physical channels with data. 1716 + */ 1717 + list_for_each_entry(waiting, &pl08x->memcpy.channels, 1718 + chan.device_node) { 1719 + if (waiting->state == PL08X_CHAN_WAITING && 1720 + waiting->waiting != NULL) { 1721 + int ret; 1722 + 1723 + /* This should REALLY not fail now */ 1724 + ret = prep_phy_channel(waiting, 1725 + waiting->waiting); 1726 + BUG_ON(ret); 1727 + waiting->state = PL08X_CHAN_RUNNING; 1728 + waiting->waiting = NULL; 1729 + pl08x_issue_pending(&waiting->chan); 1730 + break; 1731 + } 1732 + } 1733 + } 1734 + 1735 + spin_unlock(&plchan->lock); 1736 + } 1737 + 1738 + static irqreturn_t pl08x_irq(int irq, void *dev) 1739 + { 1740 + struct pl08x_driver_data *pl08x = dev; 1741 + u32 mask = 0; 1742 + u32 val; 1743 + int i; 1744 + 1745 + val = readl(pl08x->base + PL080_ERR_STATUS); 1746 + if (val) { 1747 + /* 1748 + * An error interrupt (on one or more channels) 1749 + */ 1750 + dev_err(&pl08x->adev->dev, 1751 + "%s error interrupt, register value 0x%08x\n", 1752 + __func__, val); 1753 + /* 1754 + * Simply clear ALL PL08X error interrupts, 1755 + * regardless of channel and cause 1756 + * FIXME: should be 0x00000003 on PL081 really. 1757 + */ 1758 + writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR); 1759 + } 1760 + val = readl(pl08x->base + PL080_INT_STATUS); 1761 + for (i = 0; i < pl08x->vd->channels; i++) { 1762 + if ((1 << i) & val) { 1763 + /* Locate physical channel */ 1764 + struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i]; 1765 + struct pl08x_dma_chan *plchan = phychan->serving; 1766 + 1767 + /* Schedule tasklet on this channel */ 1768 + tasklet_schedule(&plchan->tasklet); 1769 + 1770 + mask |= (1 << i); 1771 + } 1772 + } 1773 + /* 1774 + * Clear only the terminal interrupts on channels we processed 1775 + */ 1776 + writel(mask, pl08x->base + PL080_TC_CLEAR); 1777 + 1778 + return mask ? IRQ_HANDLED : IRQ_NONE; 1779 + } 1780 + 1781 + /* 1782 + * Initialise the DMAC memcpy/slave channels. 1783 + * Make a local wrapper to hold required data 1784 + */ 1785 + static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, 1786 + struct dma_device *dmadev, 1787 + unsigned int channels, 1788 + bool slave) 1789 + { 1790 + struct pl08x_dma_chan *chan; 1791 + int i; 1792 + 1793 + INIT_LIST_HEAD(&dmadev->channels); 1794 + /* 1795 + * Register as many many memcpy as we have physical channels, 1796 + * we won't always be able to use all but the code will have 1797 + * to cope with that situation. 1798 + */ 1799 + for (i = 0; i < channels; i++) { 1800 + chan = kzalloc(sizeof(struct pl08x_dma_chan), GFP_KERNEL); 1801 + if (!chan) { 1802 + dev_err(&pl08x->adev->dev, 1803 + "%s no memory for channel\n", __func__); 1804 + return -ENOMEM; 1805 + } 1806 + 1807 + chan->host = pl08x; 1808 + chan->state = PL08X_CHAN_IDLE; 1809 + 1810 + if (slave) { 1811 + chan->slave = true; 1812 + chan->name = pl08x->pd->slave_channels[i].bus_id; 1813 + chan->cd = &pl08x->pd->slave_channels[i]; 1814 + } else { 1815 + chan->cd = &pl08x->pd->memcpy_channel; 1816 + chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i); 1817 + if (!chan->name) { 1818 + kfree(chan); 1819 + return -ENOMEM; 1820 + } 1821 + } 1822 + dev_info(&pl08x->adev->dev, 1823 + "initialize virtual channel \"%s\"\n", 1824 + chan->name); 1825 + 1826 + chan->chan.device = dmadev; 1827 + atomic_set(&chan->last_issued, 0); 1828 + chan->lc = atomic_read(&chan->last_issued); 1829 + 1830 + spin_lock_init(&chan->lock); 1831 + INIT_LIST_HEAD(&chan->desc_list); 1832 + tasklet_init(&chan->tasklet, pl08x_tasklet, 1833 + (unsigned long) chan); 1834 + 1835 + list_add_tail(&chan->chan.device_node, &dmadev->channels); 1836 + } 1837 + dev_info(&pl08x->adev->dev, "initialized %d virtual %s channels\n", 1838 + i, slave ? "slave" : "memcpy"); 1839 + return i; 1840 + } 1841 + 1842 + static void pl08x_free_virtual_channels(struct dma_device *dmadev) 1843 + { 1844 + struct pl08x_dma_chan *chan = NULL; 1845 + struct pl08x_dma_chan *next; 1846 + 1847 + list_for_each_entry_safe(chan, 1848 + next, &dmadev->channels, chan.device_node) { 1849 + list_del(&chan->chan.device_node); 1850 + kfree(chan); 1851 + } 1852 + } 1853 + 1854 + #ifdef CONFIG_DEBUG_FS 1855 + static const char *pl08x_state_str(enum pl08x_dma_chan_state state) 1856 + { 1857 + switch (state) { 1858 + case PL08X_CHAN_IDLE: 1859 + return "idle"; 1860 + case PL08X_CHAN_RUNNING: 1861 + return "running"; 1862 + case PL08X_CHAN_PAUSED: 1863 + return "paused"; 1864 + case PL08X_CHAN_WAITING: 1865 + return "waiting"; 1866 + default: 1867 + break; 1868 + } 1869 + return "UNKNOWN STATE"; 1870 + } 1871 + 1872 + static int pl08x_debugfs_show(struct seq_file *s, void *data) 1873 + { 1874 + struct pl08x_driver_data *pl08x = s->private; 1875 + struct pl08x_dma_chan *chan; 1876 + struct pl08x_phy_chan *ch; 1877 + unsigned long flags; 1878 + int i; 1879 + 1880 + seq_printf(s, "PL08x physical channels:\n"); 1881 + seq_printf(s, "CHANNEL:\tUSER:\n"); 1882 + seq_printf(s, "--------\t-----\n"); 1883 + for (i = 0; i < pl08x->vd->channels; i++) { 1884 + struct pl08x_dma_chan *virt_chan; 1885 + 1886 + ch = &pl08x->phy_chans[i]; 1887 + 1888 + spin_lock_irqsave(&ch->lock, flags); 1889 + virt_chan = ch->serving; 1890 + 1891 + seq_printf(s, "%d\t\t%s\n", 1892 + ch->id, virt_chan ? virt_chan->name : "(none)"); 1893 + 1894 + spin_unlock_irqrestore(&ch->lock, flags); 1895 + } 1896 + 1897 + seq_printf(s, "\nPL08x virtual memcpy channels:\n"); 1898 + seq_printf(s, "CHANNEL:\tSTATE:\n"); 1899 + seq_printf(s, "--------\t------\n"); 1900 + list_for_each_entry(chan, &pl08x->memcpy.channels, chan.device_node) { 1901 + seq_printf(s, "%s\t\t\%s\n", chan->name, 1902 + pl08x_state_str(chan->state)); 1903 + } 1904 + 1905 + seq_printf(s, "\nPL08x virtual slave channels:\n"); 1906 + seq_printf(s, "CHANNEL:\tSTATE:\n"); 1907 + seq_printf(s, "--------\t------\n"); 1908 + list_for_each_entry(chan, &pl08x->slave.channels, chan.device_node) { 1909 + seq_printf(s, "%s\t\t\%s\n", chan->name, 1910 + pl08x_state_str(chan->state)); 1911 + } 1912 + 1913 + return 0; 1914 + } 1915 + 1916 + static int pl08x_debugfs_open(struct inode *inode, struct file *file) 1917 + { 1918 + return single_open(file, pl08x_debugfs_show, inode->i_private); 1919 + } 1920 + 1921 + static const struct file_operations pl08x_debugfs_operations = { 1922 + .open = pl08x_debugfs_open, 1923 + .read = seq_read, 1924 + .llseek = seq_lseek, 1925 + .release = single_release, 1926 + }; 1927 + 1928 + static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x) 1929 + { 1930 + /* Expose a simple debugfs interface to view all clocks */ 1931 + (void) debugfs_create_file(dev_name(&pl08x->adev->dev), S_IFREG | S_IRUGO, 1932 + NULL, pl08x, 1933 + &pl08x_debugfs_operations); 1934 + } 1935 + 1936 + #else 1937 + static inline void init_pl08x_debugfs(struct pl08x_driver_data *pl08x) 1938 + { 1939 + } 1940 + #endif 1941 + 1942 + static int pl08x_probe(struct amba_device *adev, struct amba_id *id) 1943 + { 1944 + struct pl08x_driver_data *pl08x; 1945 + struct vendor_data *vd = id->data; 1946 + int ret = 0; 1947 + int i; 1948 + 1949 + ret = amba_request_regions(adev, NULL); 1950 + if (ret) 1951 + return ret; 1952 + 1953 + /* Create the driver state holder */ 1954 + pl08x = kzalloc(sizeof(struct pl08x_driver_data), GFP_KERNEL); 1955 + if (!pl08x) { 1956 + ret = -ENOMEM; 1957 + goto out_no_pl08x; 1958 + } 1959 + 1960 + /* Initialize memcpy engine */ 1961 + dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask); 1962 + pl08x->memcpy.dev = &adev->dev; 1963 + pl08x->memcpy.device_alloc_chan_resources = pl08x_alloc_chan_resources; 1964 + pl08x->memcpy.device_free_chan_resources = pl08x_free_chan_resources; 1965 + pl08x->memcpy.device_prep_dma_memcpy = pl08x_prep_dma_memcpy; 1966 + pl08x->memcpy.device_prep_dma_interrupt = pl08x_prep_dma_interrupt; 1967 + pl08x->memcpy.device_tx_status = pl08x_dma_tx_status; 1968 + pl08x->memcpy.device_issue_pending = pl08x_issue_pending; 1969 + pl08x->memcpy.device_control = pl08x_control; 1970 + 1971 + /* Initialize slave engine */ 1972 + dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask); 1973 + pl08x->slave.dev = &adev->dev; 1974 + pl08x->slave.device_alloc_chan_resources = pl08x_alloc_chan_resources; 1975 + pl08x->slave.device_free_chan_resources = pl08x_free_chan_resources; 1976 + pl08x->slave.device_prep_dma_interrupt = pl08x_prep_dma_interrupt; 1977 + pl08x->slave.device_tx_status = pl08x_dma_tx_status; 1978 + pl08x->slave.device_issue_pending = pl08x_issue_pending; 1979 + pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg; 1980 + pl08x->slave.device_control = pl08x_control; 1981 + 1982 + /* Get the platform data */ 1983 + pl08x->pd = dev_get_platdata(&adev->dev); 1984 + if (!pl08x->pd) { 1985 + dev_err(&adev->dev, "no platform data supplied\n"); 1986 + goto out_no_platdata; 1987 + } 1988 + 1989 + /* Assign useful pointers to the driver state */ 1990 + pl08x->adev = adev; 1991 + pl08x->vd = vd; 1992 + 1993 + /* A DMA memory pool for LLIs, align on 1-byte boundary */ 1994 + pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev, 1995 + PL08X_LLI_TSFR_SIZE, PL08X_ALIGN, 0); 1996 + if (!pl08x->pool) { 1997 + ret = -ENOMEM; 1998 + goto out_no_lli_pool; 1999 + } 2000 + 2001 + spin_lock_init(&pl08x->lock); 2002 + 2003 + pl08x->base = ioremap(adev->res.start, resource_size(&adev->res)); 2004 + if (!pl08x->base) { 2005 + ret = -ENOMEM; 2006 + goto out_no_ioremap; 2007 + } 2008 + 2009 + /* Turn on the PL08x */ 2010 + pl08x_ensure_on(pl08x); 2011 + 2012 + /* 2013 + * Attach the interrupt handler 2014 + */ 2015 + writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR); 2016 + writel(0x000000FF, pl08x->base + PL080_TC_CLEAR); 2017 + 2018 + ret = request_irq(adev->irq[0], pl08x_irq, IRQF_DISABLED, 2019 + vd->name, pl08x); 2020 + if (ret) { 2021 + dev_err(&adev->dev, "%s failed to request interrupt %d\n", 2022 + __func__, adev->irq[0]); 2023 + goto out_no_irq; 2024 + } 2025 + 2026 + /* Initialize physical channels */ 2027 + pl08x->phy_chans = kmalloc((vd->channels * sizeof(struct pl08x_phy_chan)), 2028 + GFP_KERNEL); 2029 + if (!pl08x->phy_chans) { 2030 + dev_err(&adev->dev, "%s failed to allocate " 2031 + "physical channel holders\n", 2032 + __func__); 2033 + goto out_no_phychans; 2034 + } 2035 + 2036 + for (i = 0; i < vd->channels; i++) { 2037 + struct pl08x_phy_chan *ch = &pl08x->phy_chans[i]; 2038 + 2039 + ch->id = i; 2040 + ch->base = pl08x->base + PL080_Cx_BASE(i); 2041 + spin_lock_init(&ch->lock); 2042 + ch->serving = NULL; 2043 + ch->signal = -1; 2044 + dev_info(&adev->dev, 2045 + "physical channel %d is %s\n", i, 2046 + pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE"); 2047 + } 2048 + 2049 + /* Register as many memcpy channels as there are physical channels */ 2050 + ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->memcpy, 2051 + pl08x->vd->channels, false); 2052 + if (ret <= 0) { 2053 + dev_warn(&pl08x->adev->dev, 2054 + "%s failed to enumerate memcpy channels - %d\n", 2055 + __func__, ret); 2056 + goto out_no_memcpy; 2057 + } 2058 + pl08x->memcpy.chancnt = ret; 2059 + 2060 + /* Register slave channels */ 2061 + ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave, 2062 + pl08x->pd->num_slave_channels, 2063 + true); 2064 + if (ret <= 0) { 2065 + dev_warn(&pl08x->adev->dev, 2066 + "%s failed to enumerate slave channels - %d\n", 2067 + __func__, ret); 2068 + goto out_no_slave; 2069 + } 2070 + pl08x->slave.chancnt = ret; 2071 + 2072 + ret = dma_async_device_register(&pl08x->memcpy); 2073 + if (ret) { 2074 + dev_warn(&pl08x->adev->dev, 2075 + "%s failed to register memcpy as an async device - %d\n", 2076 + __func__, ret); 2077 + goto out_no_memcpy_reg; 2078 + } 2079 + 2080 + ret = dma_async_device_register(&pl08x->slave); 2081 + if (ret) { 2082 + dev_warn(&pl08x->adev->dev, 2083 + "%s failed to register slave as an async device - %d\n", 2084 + __func__, ret); 2085 + goto out_no_slave_reg; 2086 + } 2087 + 2088 + amba_set_drvdata(adev, pl08x); 2089 + init_pl08x_debugfs(pl08x); 2090 + dev_info(&pl08x->adev->dev, "ARM(R) %s DMA block initialized @%08x\n", 2091 + vd->name, adev->res.start); 2092 + return 0; 2093 + 2094 + out_no_slave_reg: 2095 + dma_async_device_unregister(&pl08x->memcpy); 2096 + out_no_memcpy_reg: 2097 + pl08x_free_virtual_channels(&pl08x->slave); 2098 + out_no_slave: 2099 + pl08x_free_virtual_channels(&pl08x->memcpy); 2100 + out_no_memcpy: 2101 + kfree(pl08x->phy_chans); 2102 + out_no_phychans: 2103 + free_irq(adev->irq[0], pl08x); 2104 + out_no_irq: 2105 + iounmap(pl08x->base); 2106 + out_no_ioremap: 2107 + dma_pool_destroy(pl08x->pool); 2108 + out_no_lli_pool: 2109 + out_no_platdata: 2110 + kfree(pl08x); 2111 + out_no_pl08x: 2112 + amba_release_regions(adev); 2113 + return ret; 2114 + } 2115 + 2116 + /* PL080 has 8 channels and the PL080 have just 2 */ 2117 + static struct vendor_data vendor_pl080 = { 2118 + .name = "PL080", 2119 + .channels = 8, 2120 + .dualmaster = true, 2121 + }; 2122 + 2123 + static struct vendor_data vendor_pl081 = { 2124 + .name = "PL081", 2125 + .channels = 2, 2126 + .dualmaster = false, 2127 + }; 2128 + 2129 + static struct amba_id pl08x_ids[] = { 2130 + /* PL080 */ 2131 + { 2132 + .id = 0x00041080, 2133 + .mask = 0x000fffff, 2134 + .data = &vendor_pl080, 2135 + }, 2136 + /* PL081 */ 2137 + { 2138 + .id = 0x00041081, 2139 + .mask = 0x000fffff, 2140 + .data = &vendor_pl081, 2141 + }, 2142 + /* Nomadik 8815 PL080 variant */ 2143 + { 2144 + .id = 0x00280880, 2145 + .mask = 0x00ffffff, 2146 + .data = &vendor_pl080, 2147 + }, 2148 + { 0, 0 }, 2149 + }; 2150 + 2151 + static struct amba_driver pl08x_amba_driver = { 2152 + .drv.name = DRIVER_NAME, 2153 + .id_table = pl08x_ids, 2154 + .probe = pl08x_probe, 2155 + }; 2156 + 2157 + static int __init pl08x_init(void) 2158 + { 2159 + int retval; 2160 + retval = amba_driver_register(&pl08x_amba_driver); 2161 + if (retval) 2162 + printk(KERN_WARNING DRIVER_NAME 2163 + "failed to register as an amba device (%d)\n", 2164 + retval); 2165 + return retval; 2166 + } 2167 + subsys_initcall(pl08x_init);
+1 -1
drivers/dma/coh901318.c
··· 1610 1610 { 1611 1611 return platform_driver_probe(&coh901318_driver, coh901318_probe); 1612 1612 } 1613 - subsys_initcall(coh901318_init); 1613 + arch_initcall(coh901318_init); 1614 1614 1615 1615 void __exit coh901318_exit(void) 1616 1616 {
+6 -2
drivers/dma/dmaengine.c
··· 690 690 !device->device_prep_dma_memset); 691 691 BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) && 692 692 !device->device_prep_dma_interrupt); 693 + BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) && 694 + !device->device_prep_dma_sg); 693 695 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && 694 696 !device->device_prep_slave_sg); 697 + BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) && 698 + !device->device_prep_dma_cyclic); 695 699 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && 696 700 !device->device_control); 697 701 ··· 706 702 BUG_ON(!device->dev); 707 703 708 704 /* note: this only matters in the 709 - * CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH=y case 705 + * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case 710 706 */ 711 707 if (device_has_all_tx_types(device)) 712 708 dma_cap_set(DMA_ASYNC_TX, device->cap_mask); ··· 980 976 struct dma_chan *chan) 981 977 { 982 978 tx->chan = chan; 983 - #ifndef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH 979 + #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH 984 980 spin_lock_init(&tx->lock); 985 981 #endif 986 982 }
+169 -185
drivers/dma/fsldma.c
··· 35 35 #include <linux/dmapool.h> 36 36 #include <linux/of_platform.h> 37 37 38 - #include <asm/fsldma.h> 39 38 #include "fsldma.h" 39 + 40 + static const char msg_ld_oom[] = "No free memory for link descriptor\n"; 40 41 41 42 static void dma_init(struct fsldma_chan *chan) 42 43 { ··· 500 499 501 500 new = fsl_dma_alloc_descriptor(chan); 502 501 if (!new) { 503 - dev_err(chan->dev, "No free memory for link descriptor\n"); 502 + dev_err(chan->dev, msg_ld_oom); 504 503 return NULL; 505 504 } 506 505 ··· 537 536 /* Allocate the link descriptor from DMA pool */ 538 537 new = fsl_dma_alloc_descriptor(chan); 539 538 if (!new) { 540 - dev_err(chan->dev, 541 - "No free memory for link descriptor\n"); 539 + dev_err(chan->dev, msg_ld_oom); 542 540 goto fail; 543 541 } 544 542 #ifdef FSL_DMA_LD_DEBUG ··· 583 583 return NULL; 584 584 } 585 585 586 + static struct dma_async_tx_descriptor *fsl_dma_prep_sg(struct dma_chan *dchan, 587 + struct scatterlist *dst_sg, unsigned int dst_nents, 588 + struct scatterlist *src_sg, unsigned int src_nents, 589 + unsigned long flags) 590 + { 591 + struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL; 592 + struct fsldma_chan *chan = to_fsl_chan(dchan); 593 + size_t dst_avail, src_avail; 594 + dma_addr_t dst, src; 595 + size_t len; 596 + 597 + /* basic sanity checks */ 598 + if (dst_nents == 0 || src_nents == 0) 599 + return NULL; 600 + 601 + if (dst_sg == NULL || src_sg == NULL) 602 + return NULL; 603 + 604 + /* 605 + * TODO: should we check that both scatterlists have the same 606 + * TODO: number of bytes in total? Is that really an error? 607 + */ 608 + 609 + /* get prepared for the loop */ 610 + dst_avail = sg_dma_len(dst_sg); 611 + src_avail = sg_dma_len(src_sg); 612 + 613 + /* run until we are out of scatterlist entries */ 614 + while (true) { 615 + 616 + /* create the largest transaction possible */ 617 + len = min_t(size_t, src_avail, dst_avail); 618 + len = min_t(size_t, len, FSL_DMA_BCR_MAX_CNT); 619 + if (len == 0) 620 + goto fetch; 621 + 622 + dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - dst_avail; 623 + src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - src_avail; 624 + 625 + /* allocate and populate the descriptor */ 626 + new = fsl_dma_alloc_descriptor(chan); 627 + if (!new) { 628 + dev_err(chan->dev, msg_ld_oom); 629 + goto fail; 630 + } 631 + #ifdef FSL_DMA_LD_DEBUG 632 + dev_dbg(chan->dev, "new link desc alloc %p\n", new); 633 + #endif 634 + 635 + set_desc_cnt(chan, &new->hw, len); 636 + set_desc_src(chan, &new->hw, src); 637 + set_desc_dst(chan, &new->hw, dst); 638 + 639 + if (!first) 640 + first = new; 641 + else 642 + set_desc_next(chan, &prev->hw, new->async_tx.phys); 643 + 644 + new->async_tx.cookie = 0; 645 + async_tx_ack(&new->async_tx); 646 + prev = new; 647 + 648 + /* Insert the link descriptor to the LD ring */ 649 + list_add_tail(&new->node, &first->tx_list); 650 + 651 + /* update metadata */ 652 + dst_avail -= len; 653 + src_avail -= len; 654 + 655 + fetch: 656 + /* fetch the next dst scatterlist entry */ 657 + if (dst_avail == 0) { 658 + 659 + /* no more entries: we're done */ 660 + if (dst_nents == 0) 661 + break; 662 + 663 + /* fetch the next entry: if there are no more: done */ 664 + dst_sg = sg_next(dst_sg); 665 + if (dst_sg == NULL) 666 + break; 667 + 668 + dst_nents--; 669 + dst_avail = sg_dma_len(dst_sg); 670 + } 671 + 672 + /* fetch the next src scatterlist entry */ 673 + if (src_avail == 0) { 674 + 675 + /* no more entries: we're done */ 676 + if (src_nents == 0) 677 + break; 678 + 679 + /* fetch the next entry: if there are no more: done */ 680 + src_sg = sg_next(src_sg); 681 + if (src_sg == NULL) 682 + break; 683 + 684 + src_nents--; 685 + src_avail = sg_dma_len(src_sg); 686 + } 687 + } 688 + 689 + new->async_tx.flags = flags; /* client is in control of this ack */ 690 + new->async_tx.cookie = -EBUSY; 691 + 692 + /* Set End-of-link to the last link descriptor of new list */ 693 + set_ld_eol(chan, new); 694 + 695 + return &first->async_tx; 696 + 697 + fail: 698 + if (!first) 699 + return NULL; 700 + 701 + fsldma_free_desc_list_reverse(chan, &first->tx_list); 702 + return NULL; 703 + } 704 + 586 705 /** 587 706 * fsl_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction 588 707 * @chan: DMA channel ··· 718 599 struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len, 719 600 enum dma_data_direction direction, unsigned long flags) 720 601 { 721 - struct fsldma_chan *chan; 722 - struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL; 723 - struct fsl_dma_slave *slave; 724 - size_t copy; 725 - 726 - int i; 727 - struct scatterlist *sg; 728 - size_t sg_used; 729 - size_t hw_used; 730 - struct fsl_dma_hw_addr *hw; 731 - dma_addr_t dma_dst, dma_src; 732 - 733 - if (!dchan) 734 - return NULL; 735 - 736 - if (!dchan->private) 737 - return NULL; 738 - 739 - chan = to_fsl_chan(dchan); 740 - slave = dchan->private; 741 - 742 - if (list_empty(&slave->addresses)) 743 - return NULL; 744 - 745 - hw = list_first_entry(&slave->addresses, struct fsl_dma_hw_addr, entry); 746 - hw_used = 0; 747 - 748 602 /* 749 - * Build the hardware transaction to copy from the scatterlist to 750 - * the hardware, or from the hardware to the scatterlist 603 + * This operation is not supported on the Freescale DMA controller 751 604 * 752 - * If you are copying from the hardware to the scatterlist and it 753 - * takes two hardware entries to fill an entire page, then both 754 - * hardware entries will be coalesced into the same page 755 - * 756 - * If you are copying from the scatterlist to the hardware and a 757 - * single page can fill two hardware entries, then the data will 758 - * be read out of the page into the first hardware entry, and so on 605 + * However, we need to provide the function pointer to allow the 606 + * device_control() method to work. 759 607 */ 760 - for_each_sg(sgl, sg, sg_len, i) { 761 - sg_used = 0; 762 - 763 - /* Loop until the entire scatterlist entry is used */ 764 - while (sg_used < sg_dma_len(sg)) { 765 - 766 - /* 767 - * If we've used up the current hardware address/length 768 - * pair, we need to load a new one 769 - * 770 - * This is done in a while loop so that descriptors with 771 - * length == 0 will be skipped 772 - */ 773 - while (hw_used >= hw->length) { 774 - 775 - /* 776 - * If the current hardware entry is the last 777 - * entry in the list, we're finished 778 - */ 779 - if (list_is_last(&hw->entry, &slave->addresses)) 780 - goto finished; 781 - 782 - /* Get the next hardware address/length pair */ 783 - hw = list_entry(hw->entry.next, 784 - struct fsl_dma_hw_addr, entry); 785 - hw_used = 0; 786 - } 787 - 788 - /* Allocate the link descriptor from DMA pool */ 789 - new = fsl_dma_alloc_descriptor(chan); 790 - if (!new) { 791 - dev_err(chan->dev, "No free memory for " 792 - "link descriptor\n"); 793 - goto fail; 794 - } 795 - #ifdef FSL_DMA_LD_DEBUG 796 - dev_dbg(chan->dev, "new link desc alloc %p\n", new); 797 - #endif 798 - 799 - /* 800 - * Calculate the maximum number of bytes to transfer, 801 - * making sure it is less than the DMA controller limit 802 - */ 803 - copy = min_t(size_t, sg_dma_len(sg) - sg_used, 804 - hw->length - hw_used); 805 - copy = min_t(size_t, copy, FSL_DMA_BCR_MAX_CNT); 806 - 807 - /* 808 - * DMA_FROM_DEVICE 809 - * from the hardware to the scatterlist 810 - * 811 - * DMA_TO_DEVICE 812 - * from the scatterlist to the hardware 813 - */ 814 - if (direction == DMA_FROM_DEVICE) { 815 - dma_src = hw->address + hw_used; 816 - dma_dst = sg_dma_address(sg) + sg_used; 817 - } else { 818 - dma_src = sg_dma_address(sg) + sg_used; 819 - dma_dst = hw->address + hw_used; 820 - } 821 - 822 - /* Fill in the descriptor */ 823 - set_desc_cnt(chan, &new->hw, copy); 824 - set_desc_src(chan, &new->hw, dma_src); 825 - set_desc_dst(chan, &new->hw, dma_dst); 826 - 827 - /* 828 - * If this is not the first descriptor, chain the 829 - * current descriptor after the previous descriptor 830 - */ 831 - if (!first) { 832 - first = new; 833 - } else { 834 - set_desc_next(chan, &prev->hw, 835 - new->async_tx.phys); 836 - } 837 - 838 - new->async_tx.cookie = 0; 839 - async_tx_ack(&new->async_tx); 840 - 841 - prev = new; 842 - sg_used += copy; 843 - hw_used += copy; 844 - 845 - /* Insert the link descriptor into the LD ring */ 846 - list_add_tail(&new->node, &first->tx_list); 847 - } 848 - } 849 - 850 - finished: 851 - 852 - /* All of the hardware address/length pairs had length == 0 */ 853 - if (!first || !new) 854 - return NULL; 855 - 856 - new->async_tx.flags = flags; 857 - new->async_tx.cookie = -EBUSY; 858 - 859 - /* Set End-of-link to the last link descriptor of new list */ 860 - set_ld_eol(chan, new); 861 - 862 - /* Enable extra controller features */ 863 - if (chan->set_src_loop_size) 864 - chan->set_src_loop_size(chan, slave->src_loop_size); 865 - 866 - if (chan->set_dst_loop_size) 867 - chan->set_dst_loop_size(chan, slave->dst_loop_size); 868 - 869 - if (chan->toggle_ext_start) 870 - chan->toggle_ext_start(chan, slave->external_start); 871 - 872 - if (chan->toggle_ext_pause) 873 - chan->toggle_ext_pause(chan, slave->external_pause); 874 - 875 - if (chan->set_request_count) 876 - chan->set_request_count(chan, slave->request_count); 877 - 878 - return &first->async_tx; 879 - 880 - fail: 881 - /* If first was not set, then we failed to allocate the very first 882 - * descriptor, and we're done */ 883 - if (!first) 884 - return NULL; 885 - 886 - /* 887 - * First is set, so all of the descriptors we allocated have been added 888 - * to first->tx_list, INCLUDING "first" itself. Therefore we 889 - * must traverse the list backwards freeing each descriptor in turn 890 - * 891 - * We're re-using variables for the loop, oh well 892 - */ 893 - fsldma_free_desc_list_reverse(chan, &first->tx_list); 894 608 return NULL; 895 609 } 896 610 897 611 static int fsl_dma_device_control(struct dma_chan *dchan, 898 612 enum dma_ctrl_cmd cmd, unsigned long arg) 899 613 { 614 + struct dma_slave_config *config; 900 615 struct fsldma_chan *chan; 901 616 unsigned long flags; 902 - 903 - /* Only supports DMA_TERMINATE_ALL */ 904 - if (cmd != DMA_TERMINATE_ALL) 905 - return -ENXIO; 617 + int size; 906 618 907 619 if (!dchan) 908 620 return -EINVAL; 909 621 910 622 chan = to_fsl_chan(dchan); 911 623 912 - /* Halt the DMA engine */ 913 - dma_halt(chan); 624 + switch (cmd) { 625 + case DMA_TERMINATE_ALL: 626 + /* Halt the DMA engine */ 627 + dma_halt(chan); 914 628 915 - spin_lock_irqsave(&chan->desc_lock, flags); 629 + spin_lock_irqsave(&chan->desc_lock, flags); 916 630 917 - /* Remove and free all of the descriptors in the LD queue */ 918 - fsldma_free_desc_list(chan, &chan->ld_pending); 919 - fsldma_free_desc_list(chan, &chan->ld_running); 631 + /* Remove and free all of the descriptors in the LD queue */ 632 + fsldma_free_desc_list(chan, &chan->ld_pending); 633 + fsldma_free_desc_list(chan, &chan->ld_running); 920 634 921 - spin_unlock_irqrestore(&chan->desc_lock, flags); 635 + spin_unlock_irqrestore(&chan->desc_lock, flags); 636 + return 0; 637 + 638 + case DMA_SLAVE_CONFIG: 639 + config = (struct dma_slave_config *)arg; 640 + 641 + /* make sure the channel supports setting burst size */ 642 + if (!chan->set_request_count) 643 + return -ENXIO; 644 + 645 + /* we set the controller burst size depending on direction */ 646 + if (config->direction == DMA_TO_DEVICE) 647 + size = config->dst_addr_width * config->dst_maxburst; 648 + else 649 + size = config->src_addr_width * config->src_maxburst; 650 + 651 + chan->set_request_count(chan, size); 652 + return 0; 653 + 654 + case FSLDMA_EXTERNAL_START: 655 + 656 + /* make sure the channel supports external start */ 657 + if (!chan->toggle_ext_start) 658 + return -ENXIO; 659 + 660 + chan->toggle_ext_start(chan, arg); 661 + return 0; 662 + 663 + default: 664 + return -ENXIO; 665 + } 922 666 923 667 return 0; 924 668 } ··· 1309 1327 1310 1328 dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask); 1311 1329 dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask); 1330 + dma_cap_set(DMA_SG, fdev->common.cap_mask); 1312 1331 dma_cap_set(DMA_SLAVE, fdev->common.cap_mask); 1313 1332 fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources; 1314 1333 fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources; 1315 1334 fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt; 1316 1335 fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy; 1336 + fdev->common.device_prep_dma_sg = fsl_dma_prep_sg; 1317 1337 fdev->common.device_tx_status = fsl_tx_status; 1318 1338 fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; 1319 1339 fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg;
+424
drivers/dma/imx-dma.c
··· 1 + /* 2 + * drivers/dma/imx-dma.c 3 + * 4 + * This file contains a driver for the Freescale i.MX DMA engine 5 + * found on i.MX1/21/27 6 + * 7 + * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de> 8 + * 9 + * The code contained herein is licensed under the GNU General Public 10 + * License. You may obtain a copy of the GNU General Public License 11 + * Version 2 or later at the following locations: 12 + * 13 + * http://www.opensource.org/licenses/gpl-license.html 14 + * http://www.gnu.org/copyleft/gpl.html 15 + */ 16 + #include <linux/init.h> 17 + #include <linux/types.h> 18 + #include <linux/mm.h> 19 + #include <linux/interrupt.h> 20 + #include <linux/spinlock.h> 21 + #include <linux/device.h> 22 + #include <linux/dma-mapping.h> 23 + #include <linux/slab.h> 24 + #include <linux/platform_device.h> 25 + #include <linux/dmaengine.h> 26 + 27 + #include <asm/irq.h> 28 + #include <mach/dma-v1.h> 29 + #include <mach/hardware.h> 30 + 31 + struct imxdma_channel { 32 + struct imxdma_engine *imxdma; 33 + unsigned int channel; 34 + unsigned int imxdma_channel; 35 + 36 + enum dma_slave_buswidth word_size; 37 + dma_addr_t per_address; 38 + u32 watermark_level; 39 + struct dma_chan chan; 40 + spinlock_t lock; 41 + struct dma_async_tx_descriptor desc; 42 + dma_cookie_t last_completed; 43 + enum dma_status status; 44 + int dma_request; 45 + struct scatterlist *sg_list; 46 + }; 47 + 48 + #define MAX_DMA_CHANNELS 8 49 + 50 + struct imxdma_engine { 51 + struct device *dev; 52 + struct dma_device dma_device; 53 + struct imxdma_channel channel[MAX_DMA_CHANNELS]; 54 + }; 55 + 56 + static struct imxdma_channel *to_imxdma_chan(struct dma_chan *chan) 57 + { 58 + return container_of(chan, struct imxdma_channel, chan); 59 + } 60 + 61 + static void imxdma_handle(struct imxdma_channel *imxdmac) 62 + { 63 + if (imxdmac->desc.callback) 64 + imxdmac->desc.callback(imxdmac->desc.callback_param); 65 + imxdmac->last_completed = imxdmac->desc.cookie; 66 + } 67 + 68 + static void imxdma_irq_handler(int channel, void *data) 69 + { 70 + struct imxdma_channel *imxdmac = data; 71 + 72 + imxdmac->status = DMA_SUCCESS; 73 + imxdma_handle(imxdmac); 74 + } 75 + 76 + static void imxdma_err_handler(int channel, void *data, int error) 77 + { 78 + struct imxdma_channel *imxdmac = data; 79 + 80 + imxdmac->status = DMA_ERROR; 81 + imxdma_handle(imxdmac); 82 + } 83 + 84 + static void imxdma_progression(int channel, void *data, 85 + struct scatterlist *sg) 86 + { 87 + struct imxdma_channel *imxdmac = data; 88 + 89 + imxdmac->status = DMA_SUCCESS; 90 + imxdma_handle(imxdmac); 91 + } 92 + 93 + static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 94 + unsigned long arg) 95 + { 96 + struct imxdma_channel *imxdmac = to_imxdma_chan(chan); 97 + struct dma_slave_config *dmaengine_cfg = (void *)arg; 98 + int ret; 99 + unsigned int mode = 0; 100 + 101 + switch (cmd) { 102 + case DMA_TERMINATE_ALL: 103 + imxdmac->status = DMA_ERROR; 104 + imx_dma_disable(imxdmac->imxdma_channel); 105 + return 0; 106 + case DMA_SLAVE_CONFIG: 107 + if (dmaengine_cfg->direction == DMA_FROM_DEVICE) { 108 + imxdmac->per_address = dmaengine_cfg->src_addr; 109 + imxdmac->watermark_level = dmaengine_cfg->src_maxburst; 110 + imxdmac->word_size = dmaengine_cfg->src_addr_width; 111 + } else { 112 + imxdmac->per_address = dmaengine_cfg->dst_addr; 113 + imxdmac->watermark_level = dmaengine_cfg->dst_maxburst; 114 + imxdmac->word_size = dmaengine_cfg->dst_addr_width; 115 + } 116 + 117 + switch (imxdmac->word_size) { 118 + case DMA_SLAVE_BUSWIDTH_1_BYTE: 119 + mode = IMX_DMA_MEMSIZE_8; 120 + break; 121 + case DMA_SLAVE_BUSWIDTH_2_BYTES: 122 + mode = IMX_DMA_MEMSIZE_16; 123 + break; 124 + default: 125 + case DMA_SLAVE_BUSWIDTH_4_BYTES: 126 + mode = IMX_DMA_MEMSIZE_32; 127 + break; 128 + } 129 + ret = imx_dma_config_channel(imxdmac->imxdma_channel, 130 + mode | IMX_DMA_TYPE_FIFO, 131 + IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR, 132 + imxdmac->dma_request, 1); 133 + 134 + if (ret) 135 + return ret; 136 + 137 + imx_dma_config_burstlen(imxdmac->imxdma_channel, imxdmac->watermark_level); 138 + 139 + return 0; 140 + default: 141 + return -ENOSYS; 142 + } 143 + 144 + return -EINVAL; 145 + } 146 + 147 + static enum dma_status imxdma_tx_status(struct dma_chan *chan, 148 + dma_cookie_t cookie, 149 + struct dma_tx_state *txstate) 150 + { 151 + struct imxdma_channel *imxdmac = to_imxdma_chan(chan); 152 + dma_cookie_t last_used; 153 + enum dma_status ret; 154 + 155 + last_used = chan->cookie; 156 + 157 + ret = dma_async_is_complete(cookie, imxdmac->last_completed, last_used); 158 + dma_set_tx_state(txstate, imxdmac->last_completed, last_used, 0); 159 + 160 + return ret; 161 + } 162 + 163 + static dma_cookie_t imxdma_assign_cookie(struct imxdma_channel *imxdma) 164 + { 165 + dma_cookie_t cookie = imxdma->chan.cookie; 166 + 167 + if (++cookie < 0) 168 + cookie = 1; 169 + 170 + imxdma->chan.cookie = cookie; 171 + imxdma->desc.cookie = cookie; 172 + 173 + return cookie; 174 + } 175 + 176 + static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx) 177 + { 178 + struct imxdma_channel *imxdmac = to_imxdma_chan(tx->chan); 179 + dma_cookie_t cookie; 180 + 181 + spin_lock_irq(&imxdmac->lock); 182 + 183 + cookie = imxdma_assign_cookie(imxdmac); 184 + 185 + imx_dma_enable(imxdmac->imxdma_channel); 186 + 187 + spin_unlock_irq(&imxdmac->lock); 188 + 189 + return cookie; 190 + } 191 + 192 + static int imxdma_alloc_chan_resources(struct dma_chan *chan) 193 + { 194 + struct imxdma_channel *imxdmac = to_imxdma_chan(chan); 195 + struct imx_dma_data *data = chan->private; 196 + 197 + imxdmac->dma_request = data->dma_request; 198 + 199 + dma_async_tx_descriptor_init(&imxdmac->desc, chan); 200 + imxdmac->desc.tx_submit = imxdma_tx_submit; 201 + /* txd.flags will be overwritten in prep funcs */ 202 + imxdmac->desc.flags = DMA_CTRL_ACK; 203 + 204 + imxdmac->status = DMA_SUCCESS; 205 + 206 + return 0; 207 + } 208 + 209 + static void imxdma_free_chan_resources(struct dma_chan *chan) 210 + { 211 + struct imxdma_channel *imxdmac = to_imxdma_chan(chan); 212 + 213 + imx_dma_disable(imxdmac->imxdma_channel); 214 + 215 + if (imxdmac->sg_list) { 216 + kfree(imxdmac->sg_list); 217 + imxdmac->sg_list = NULL; 218 + } 219 + } 220 + 221 + static struct dma_async_tx_descriptor *imxdma_prep_slave_sg( 222 + struct dma_chan *chan, struct scatterlist *sgl, 223 + unsigned int sg_len, enum dma_data_direction direction, 224 + unsigned long flags) 225 + { 226 + struct imxdma_channel *imxdmac = to_imxdma_chan(chan); 227 + struct scatterlist *sg; 228 + int i, ret, dma_length = 0; 229 + unsigned int dmamode; 230 + 231 + if (imxdmac->status == DMA_IN_PROGRESS) 232 + return NULL; 233 + 234 + imxdmac->status = DMA_IN_PROGRESS; 235 + 236 + for_each_sg(sgl, sg, sg_len, i) { 237 + dma_length += sg->length; 238 + } 239 + 240 + if (direction == DMA_FROM_DEVICE) 241 + dmamode = DMA_MODE_READ; 242 + else 243 + dmamode = DMA_MODE_WRITE; 244 + 245 + ret = imx_dma_setup_sg(imxdmac->imxdma_channel, sgl, sg_len, 246 + dma_length, imxdmac->per_address, dmamode); 247 + if (ret) 248 + return NULL; 249 + 250 + return &imxdmac->desc; 251 + } 252 + 253 + static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic( 254 + struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, 255 + size_t period_len, enum dma_data_direction direction) 256 + { 257 + struct imxdma_channel *imxdmac = to_imxdma_chan(chan); 258 + struct imxdma_engine *imxdma = imxdmac->imxdma; 259 + int i, ret; 260 + unsigned int periods = buf_len / period_len; 261 + unsigned int dmamode; 262 + 263 + dev_dbg(imxdma->dev, "%s channel: %d buf_len=%d period_len=%d\n", 264 + __func__, imxdmac->channel, buf_len, period_len); 265 + 266 + if (imxdmac->status == DMA_IN_PROGRESS) 267 + return NULL; 268 + imxdmac->status = DMA_IN_PROGRESS; 269 + 270 + ret = imx_dma_setup_progression_handler(imxdmac->imxdma_channel, 271 + imxdma_progression); 272 + if (ret) { 273 + dev_err(imxdma->dev, "Failed to setup the DMA handler\n"); 274 + return NULL; 275 + } 276 + 277 + if (imxdmac->sg_list) 278 + kfree(imxdmac->sg_list); 279 + 280 + imxdmac->sg_list = kcalloc(periods + 1, 281 + sizeof(struct scatterlist), GFP_KERNEL); 282 + if (!imxdmac->sg_list) 283 + return NULL; 284 + 285 + sg_init_table(imxdmac->sg_list, periods); 286 + 287 + for (i = 0; i < periods; i++) { 288 + imxdmac->sg_list[i].page_link = 0; 289 + imxdmac->sg_list[i].offset = 0; 290 + imxdmac->sg_list[i].dma_address = dma_addr; 291 + imxdmac->sg_list[i].length = period_len; 292 + dma_addr += period_len; 293 + } 294 + 295 + /* close the loop */ 296 + imxdmac->sg_list[periods].offset = 0; 297 + imxdmac->sg_list[periods].length = 0; 298 + imxdmac->sg_list[periods].page_link = 299 + ((unsigned long)imxdmac->sg_list | 0x01) & ~0x02; 300 + 301 + if (direction == DMA_FROM_DEVICE) 302 + dmamode = DMA_MODE_READ; 303 + else 304 + dmamode = DMA_MODE_WRITE; 305 + 306 + ret = imx_dma_setup_sg(imxdmac->imxdma_channel, imxdmac->sg_list, periods, 307 + IMX_DMA_LENGTH_LOOP, imxdmac->per_address, dmamode); 308 + if (ret) 309 + return NULL; 310 + 311 + return &imxdmac->desc; 312 + } 313 + 314 + static void imxdma_issue_pending(struct dma_chan *chan) 315 + { 316 + /* 317 + * Nothing to do. We only have a single descriptor 318 + */ 319 + } 320 + 321 + static int __init imxdma_probe(struct platform_device *pdev) 322 + { 323 + struct imxdma_engine *imxdma; 324 + int ret, i; 325 + 326 + imxdma = kzalloc(sizeof(*imxdma), GFP_KERNEL); 327 + if (!imxdma) 328 + return -ENOMEM; 329 + 330 + INIT_LIST_HEAD(&imxdma->dma_device.channels); 331 + 332 + /* Initialize channel parameters */ 333 + for (i = 0; i < MAX_DMA_CHANNELS; i++) { 334 + struct imxdma_channel *imxdmac = &imxdma->channel[i]; 335 + 336 + imxdmac->imxdma_channel = imx_dma_request_by_prio("dmaengine", 337 + DMA_PRIO_MEDIUM); 338 + if ((int)imxdmac->channel < 0) { 339 + ret = -ENODEV; 340 + goto err_init; 341 + } 342 + 343 + imx_dma_setup_handlers(imxdmac->imxdma_channel, 344 + imxdma_irq_handler, imxdma_err_handler, imxdmac); 345 + 346 + imxdmac->imxdma = imxdma; 347 + spin_lock_init(&imxdmac->lock); 348 + 349 + dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask); 350 + dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask); 351 + 352 + imxdmac->chan.device = &imxdma->dma_device; 353 + imxdmac->chan.chan_id = i; 354 + imxdmac->channel = i; 355 + 356 + /* Add the channel to the DMAC list */ 357 + list_add_tail(&imxdmac->chan.device_node, &imxdma->dma_device.channels); 358 + } 359 + 360 + imxdma->dev = &pdev->dev; 361 + imxdma->dma_device.dev = &pdev->dev; 362 + 363 + imxdma->dma_device.device_alloc_chan_resources = imxdma_alloc_chan_resources; 364 + imxdma->dma_device.device_free_chan_resources = imxdma_free_chan_resources; 365 + imxdma->dma_device.device_tx_status = imxdma_tx_status; 366 + imxdma->dma_device.device_prep_slave_sg = imxdma_prep_slave_sg; 367 + imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic; 368 + imxdma->dma_device.device_control = imxdma_control; 369 + imxdma->dma_device.device_issue_pending = imxdma_issue_pending; 370 + 371 + platform_set_drvdata(pdev, imxdma); 372 + 373 + ret = dma_async_device_register(&imxdma->dma_device); 374 + if (ret) { 375 + dev_err(&pdev->dev, "unable to register\n"); 376 + goto err_init; 377 + } 378 + 379 + return 0; 380 + 381 + err_init: 382 + while (i-- >= 0) { 383 + struct imxdma_channel *imxdmac = &imxdma->channel[i]; 384 + imx_dma_free(imxdmac->imxdma_channel); 385 + } 386 + 387 + kfree(imxdma); 388 + return ret; 389 + } 390 + 391 + static int __exit imxdma_remove(struct platform_device *pdev) 392 + { 393 + struct imxdma_engine *imxdma = platform_get_drvdata(pdev); 394 + int i; 395 + 396 + dma_async_device_unregister(&imxdma->dma_device); 397 + 398 + for (i = 0; i < MAX_DMA_CHANNELS; i++) { 399 + struct imxdma_channel *imxdmac = &imxdma->channel[i]; 400 + 401 + imx_dma_free(imxdmac->imxdma_channel); 402 + } 403 + 404 + kfree(imxdma); 405 + 406 + return 0; 407 + } 408 + 409 + static struct platform_driver imxdma_driver = { 410 + .driver = { 411 + .name = "imx-dma", 412 + }, 413 + .remove = __exit_p(imxdma_remove), 414 + }; 415 + 416 + static int __init imxdma_module_init(void) 417 + { 418 + return platform_driver_probe(&imxdma_driver, imxdma_probe); 419 + } 420 + subsys_initcall(imxdma_module_init); 421 + 422 + MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>"); 423 + MODULE_DESCRIPTION("i.MX dma driver"); 424 + MODULE_LICENSE("GPL");
+1392
drivers/dma/imx-sdma.c
··· 1 + /* 2 + * drivers/dma/imx-sdma.c 3 + * 4 + * This file contains a driver for the Freescale Smart DMA engine 5 + * 6 + * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de> 7 + * 8 + * Based on code from Freescale: 9 + * 10 + * Copyright 2004-2009 Freescale Semiconductor, Inc. All Rights Reserved. 11 + * 12 + * The code contained herein is licensed under the GNU General Public 13 + * License. You may obtain a copy of the GNU General Public License 14 + * Version 2 or later at the following locations: 15 + * 16 + * http://www.opensource.org/licenses/gpl-license.html 17 + * http://www.gnu.org/copyleft/gpl.html 18 + */ 19 + 20 + #include <linux/init.h> 21 + #include <linux/types.h> 22 + #include <linux/mm.h> 23 + #include <linux/interrupt.h> 24 + #include <linux/clk.h> 25 + #include <linux/wait.h> 26 + #include <linux/sched.h> 27 + #include <linux/semaphore.h> 28 + #include <linux/spinlock.h> 29 + #include <linux/device.h> 30 + #include <linux/dma-mapping.h> 31 + #include <linux/firmware.h> 32 + #include <linux/slab.h> 33 + #include <linux/platform_device.h> 34 + #include <linux/dmaengine.h> 35 + 36 + #include <asm/irq.h> 37 + #include <mach/sdma.h> 38 + #include <mach/dma.h> 39 + #include <mach/hardware.h> 40 + 41 + /* SDMA registers */ 42 + #define SDMA_H_C0PTR 0x000 43 + #define SDMA_H_INTR 0x004 44 + #define SDMA_H_STATSTOP 0x008 45 + #define SDMA_H_START 0x00c 46 + #define SDMA_H_EVTOVR 0x010 47 + #define SDMA_H_DSPOVR 0x014 48 + #define SDMA_H_HOSTOVR 0x018 49 + #define SDMA_H_EVTPEND 0x01c 50 + #define SDMA_H_DSPENBL 0x020 51 + #define SDMA_H_RESET 0x024 52 + #define SDMA_H_EVTERR 0x028 53 + #define SDMA_H_INTRMSK 0x02c 54 + #define SDMA_H_PSW 0x030 55 + #define SDMA_H_EVTERRDBG 0x034 56 + #define SDMA_H_CONFIG 0x038 57 + #define SDMA_ONCE_ENB 0x040 58 + #define SDMA_ONCE_DATA 0x044 59 + #define SDMA_ONCE_INSTR 0x048 60 + #define SDMA_ONCE_STAT 0x04c 61 + #define SDMA_ONCE_CMD 0x050 62 + #define SDMA_EVT_MIRROR 0x054 63 + #define SDMA_ILLINSTADDR 0x058 64 + #define SDMA_CHN0ADDR 0x05c 65 + #define SDMA_ONCE_RTB 0x060 66 + #define SDMA_XTRIG_CONF1 0x070 67 + #define SDMA_XTRIG_CONF2 0x074 68 + #define SDMA_CHNENBL0_V2 0x200 69 + #define SDMA_CHNENBL0_V1 0x080 70 + #define SDMA_CHNPRI_0 0x100 71 + 72 + /* 73 + * Buffer descriptor status values. 74 + */ 75 + #define BD_DONE 0x01 76 + #define BD_WRAP 0x02 77 + #define BD_CONT 0x04 78 + #define BD_INTR 0x08 79 + #define BD_RROR 0x10 80 + #define BD_LAST 0x20 81 + #define BD_EXTD 0x80 82 + 83 + /* 84 + * Data Node descriptor status values. 85 + */ 86 + #define DND_END_OF_FRAME 0x80 87 + #define DND_END_OF_XFER 0x40 88 + #define DND_DONE 0x20 89 + #define DND_UNUSED 0x01 90 + 91 + /* 92 + * IPCV2 descriptor status values. 93 + */ 94 + #define BD_IPCV2_END_OF_FRAME 0x40 95 + 96 + #define IPCV2_MAX_NODES 50 97 + /* 98 + * Error bit set in the CCB status field by the SDMA, 99 + * in setbd routine, in case of a transfer error 100 + */ 101 + #define DATA_ERROR 0x10000000 102 + 103 + /* 104 + * Buffer descriptor commands. 105 + */ 106 + #define C0_ADDR 0x01 107 + #define C0_LOAD 0x02 108 + #define C0_DUMP 0x03 109 + #define C0_SETCTX 0x07 110 + #define C0_GETCTX 0x03 111 + #define C0_SETDM 0x01 112 + #define C0_SETPM 0x04 113 + #define C0_GETDM 0x02 114 + #define C0_GETPM 0x08 115 + /* 116 + * Change endianness indicator in the BD command field 117 + */ 118 + #define CHANGE_ENDIANNESS 0x80 119 + 120 + /* 121 + * Mode/Count of data node descriptors - IPCv2 122 + */ 123 + struct sdma_mode_count { 124 + u32 count : 16; /* size of the buffer pointed by this BD */ 125 + u32 status : 8; /* E,R,I,C,W,D status bits stored here */ 126 + u32 command : 8; /* command mostlky used for channel 0 */ 127 + }; 128 + 129 + /* 130 + * Buffer descriptor 131 + */ 132 + struct sdma_buffer_descriptor { 133 + struct sdma_mode_count mode; 134 + u32 buffer_addr; /* address of the buffer described */ 135 + u32 ext_buffer_addr; /* extended buffer address */ 136 + } __attribute__ ((packed)); 137 + 138 + /** 139 + * struct sdma_channel_control - Channel control Block 140 + * 141 + * @current_bd_ptr current buffer descriptor processed 142 + * @base_bd_ptr first element of buffer descriptor array 143 + * @unused padding. The SDMA engine expects an array of 128 byte 144 + * control blocks 145 + */ 146 + struct sdma_channel_control { 147 + u32 current_bd_ptr; 148 + u32 base_bd_ptr; 149 + u32 unused[2]; 150 + } __attribute__ ((packed)); 151 + 152 + /** 153 + * struct sdma_state_registers - SDMA context for a channel 154 + * 155 + * @pc: program counter 156 + * @t: test bit: status of arithmetic & test instruction 157 + * @rpc: return program counter 158 + * @sf: source fault while loading data 159 + * @spc: loop start program counter 160 + * @df: destination fault while storing data 161 + * @epc: loop end program counter 162 + * @lm: loop mode 163 + */ 164 + struct sdma_state_registers { 165 + u32 pc :14; 166 + u32 unused1: 1; 167 + u32 t : 1; 168 + u32 rpc :14; 169 + u32 unused0: 1; 170 + u32 sf : 1; 171 + u32 spc :14; 172 + u32 unused2: 1; 173 + u32 df : 1; 174 + u32 epc :14; 175 + u32 lm : 2; 176 + } __attribute__ ((packed)); 177 + 178 + /** 179 + * struct sdma_context_data - sdma context specific to a channel 180 + * 181 + * @channel_state: channel state bits 182 + * @gReg: general registers 183 + * @mda: burst dma destination address register 184 + * @msa: burst dma source address register 185 + * @ms: burst dma status register 186 + * @md: burst dma data register 187 + * @pda: peripheral dma destination address register 188 + * @psa: peripheral dma source address register 189 + * @ps: peripheral dma status register 190 + * @pd: peripheral dma data register 191 + * @ca: CRC polynomial register 192 + * @cs: CRC accumulator register 193 + * @dda: dedicated core destination address register 194 + * @dsa: dedicated core source address register 195 + * @ds: dedicated core status register 196 + * @dd: dedicated core data register 197 + */ 198 + struct sdma_context_data { 199 + struct sdma_state_registers channel_state; 200 + u32 gReg[8]; 201 + u32 mda; 202 + u32 msa; 203 + u32 ms; 204 + u32 md; 205 + u32 pda; 206 + u32 psa; 207 + u32 ps; 208 + u32 pd; 209 + u32 ca; 210 + u32 cs; 211 + u32 dda; 212 + u32 dsa; 213 + u32 ds; 214 + u32 dd; 215 + u32 scratch0; 216 + u32 scratch1; 217 + u32 scratch2; 218 + u32 scratch3; 219 + u32 scratch4; 220 + u32 scratch5; 221 + u32 scratch6; 222 + u32 scratch7; 223 + } __attribute__ ((packed)); 224 + 225 + #define NUM_BD (int)(PAGE_SIZE / sizeof(struct sdma_buffer_descriptor)) 226 + 227 + struct sdma_engine; 228 + 229 + /** 230 + * struct sdma_channel - housekeeping for a SDMA channel 231 + * 232 + * @sdma pointer to the SDMA engine for this channel 233 + * @channel the channel number, matches dmaengine chan_id 234 + * @direction transfer type. Needed for setting SDMA script 235 + * @peripheral_type Peripheral type. Needed for setting SDMA script 236 + * @event_id0 aka dma request line 237 + * @event_id1 for channels that use 2 events 238 + * @word_size peripheral access size 239 + * @buf_tail ID of the buffer that was processed 240 + * @done channel completion 241 + * @num_bd max NUM_BD. number of descriptors currently handling 242 + */ 243 + struct sdma_channel { 244 + struct sdma_engine *sdma; 245 + unsigned int channel; 246 + enum dma_data_direction direction; 247 + enum sdma_peripheral_type peripheral_type; 248 + unsigned int event_id0; 249 + unsigned int event_id1; 250 + enum dma_slave_buswidth word_size; 251 + unsigned int buf_tail; 252 + struct completion done; 253 + unsigned int num_bd; 254 + struct sdma_buffer_descriptor *bd; 255 + dma_addr_t bd_phys; 256 + unsigned int pc_from_device, pc_to_device; 257 + unsigned long flags; 258 + dma_addr_t per_address; 259 + u32 event_mask0, event_mask1; 260 + u32 watermark_level; 261 + u32 shp_addr, per_addr; 262 + struct dma_chan chan; 263 + spinlock_t lock; 264 + struct dma_async_tx_descriptor desc; 265 + dma_cookie_t last_completed; 266 + enum dma_status status; 267 + }; 268 + 269 + #define IMX_DMA_SG_LOOP (1 << 0) 270 + 271 + #define MAX_DMA_CHANNELS 32 272 + #define MXC_SDMA_DEFAULT_PRIORITY 1 273 + #define MXC_SDMA_MIN_PRIORITY 1 274 + #define MXC_SDMA_MAX_PRIORITY 7 275 + 276 + /** 277 + * struct sdma_script_start_addrs - SDMA script start pointers 278 + * 279 + * start addresses of the different functions in the physical 280 + * address space of the SDMA engine. 281 + */ 282 + struct sdma_script_start_addrs { 283 + u32 ap_2_ap_addr; 284 + u32 ap_2_bp_addr; 285 + u32 ap_2_ap_fixed_addr; 286 + u32 bp_2_ap_addr; 287 + u32 loopback_on_dsp_side_addr; 288 + u32 mcu_interrupt_only_addr; 289 + u32 firi_2_per_addr; 290 + u32 firi_2_mcu_addr; 291 + u32 per_2_firi_addr; 292 + u32 mcu_2_firi_addr; 293 + u32 uart_2_per_addr; 294 + u32 uart_2_mcu_addr; 295 + u32 per_2_app_addr; 296 + u32 mcu_2_app_addr; 297 + u32 per_2_per_addr; 298 + u32 uartsh_2_per_addr; 299 + u32 uartsh_2_mcu_addr; 300 + u32 per_2_shp_addr; 301 + u32 mcu_2_shp_addr; 302 + u32 ata_2_mcu_addr; 303 + u32 mcu_2_ata_addr; 304 + u32 app_2_per_addr; 305 + u32 app_2_mcu_addr; 306 + u32 shp_2_per_addr; 307 + u32 shp_2_mcu_addr; 308 + u32 mshc_2_mcu_addr; 309 + u32 mcu_2_mshc_addr; 310 + u32 spdif_2_mcu_addr; 311 + u32 mcu_2_spdif_addr; 312 + u32 asrc_2_mcu_addr; 313 + u32 ext_mem_2_ipu_addr; 314 + u32 descrambler_addr; 315 + u32 dptc_dvfs_addr; 316 + u32 utra_addr; 317 + u32 ram_code_start_addr; 318 + }; 319 + 320 + #define SDMA_FIRMWARE_MAGIC 0x414d4453 321 + 322 + /** 323 + * struct sdma_firmware_header - Layout of the firmware image 324 + * 325 + * @magic "SDMA" 326 + * @version_major increased whenever layout of struct sdma_script_start_addrs 327 + * changes. 328 + * @version_minor firmware minor version (for binary compatible changes) 329 + * @script_addrs_start offset of struct sdma_script_start_addrs in this image 330 + * @num_script_addrs Number of script addresses in this image 331 + * @ram_code_start offset of SDMA ram image in this firmware image 332 + * @ram_code_size size of SDMA ram image 333 + * @script_addrs Stores the start address of the SDMA scripts 334 + * (in SDMA memory space) 335 + */ 336 + struct sdma_firmware_header { 337 + u32 magic; 338 + u32 version_major; 339 + u32 version_minor; 340 + u32 script_addrs_start; 341 + u32 num_script_addrs; 342 + u32 ram_code_start; 343 + u32 ram_code_size; 344 + }; 345 + 346 + struct sdma_engine { 347 + struct device *dev; 348 + struct sdma_channel channel[MAX_DMA_CHANNELS]; 349 + struct sdma_channel_control *channel_control; 350 + void __iomem *regs; 351 + unsigned int version; 352 + unsigned int num_events; 353 + struct sdma_context_data *context; 354 + dma_addr_t context_phys; 355 + struct dma_device dma_device; 356 + struct clk *clk; 357 + struct sdma_script_start_addrs *script_addrs; 358 + }; 359 + 360 + #define SDMA_H_CONFIG_DSPDMA (1 << 12) /* indicates if the DSPDMA is used */ 361 + #define SDMA_H_CONFIG_RTD_PINS (1 << 11) /* indicates if Real-Time Debug pins are enabled */ 362 + #define SDMA_H_CONFIG_ACR (1 << 4) /* indicates if AHB freq /core freq = 2 or 1 */ 363 + #define SDMA_H_CONFIG_CSM (3) /* indicates which context switch mode is selected*/ 364 + 365 + static inline u32 chnenbl_ofs(struct sdma_engine *sdma, unsigned int event) 366 + { 367 + u32 chnenbl0 = (sdma->version == 2 ? SDMA_CHNENBL0_V2 : SDMA_CHNENBL0_V1); 368 + 369 + return chnenbl0 + event * 4; 370 + } 371 + 372 + static int sdma_config_ownership(struct sdma_channel *sdmac, 373 + bool event_override, bool mcu_override, bool dsp_override) 374 + { 375 + struct sdma_engine *sdma = sdmac->sdma; 376 + int channel = sdmac->channel; 377 + u32 evt, mcu, dsp; 378 + 379 + if (event_override && mcu_override && dsp_override) 380 + return -EINVAL; 381 + 382 + evt = __raw_readl(sdma->regs + SDMA_H_EVTOVR); 383 + mcu = __raw_readl(sdma->regs + SDMA_H_HOSTOVR); 384 + dsp = __raw_readl(sdma->regs + SDMA_H_DSPOVR); 385 + 386 + if (dsp_override) 387 + dsp &= ~(1 << channel); 388 + else 389 + dsp |= (1 << channel); 390 + 391 + if (event_override) 392 + evt &= ~(1 << channel); 393 + else 394 + evt |= (1 << channel); 395 + 396 + if (mcu_override) 397 + mcu &= ~(1 << channel); 398 + else 399 + mcu |= (1 << channel); 400 + 401 + __raw_writel(evt, sdma->regs + SDMA_H_EVTOVR); 402 + __raw_writel(mcu, sdma->regs + SDMA_H_HOSTOVR); 403 + __raw_writel(dsp, sdma->regs + SDMA_H_DSPOVR); 404 + 405 + return 0; 406 + } 407 + 408 + /* 409 + * sdma_run_channel - run a channel and wait till it's done 410 + */ 411 + static int sdma_run_channel(struct sdma_channel *sdmac) 412 + { 413 + struct sdma_engine *sdma = sdmac->sdma; 414 + int channel = sdmac->channel; 415 + int ret; 416 + 417 + init_completion(&sdmac->done); 418 + 419 + __raw_writel(1 << channel, sdma->regs + SDMA_H_START); 420 + 421 + ret = wait_for_completion_timeout(&sdmac->done, HZ); 422 + 423 + return ret ? 0 : -ETIMEDOUT; 424 + } 425 + 426 + static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size, 427 + u32 address) 428 + { 429 + struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd; 430 + void *buf_virt; 431 + dma_addr_t buf_phys; 432 + int ret; 433 + 434 + buf_virt = dma_alloc_coherent(NULL, 435 + size, 436 + &buf_phys, GFP_KERNEL); 437 + if (!buf_virt) 438 + return -ENOMEM; 439 + 440 + bd0->mode.command = C0_SETPM; 441 + bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD; 442 + bd0->mode.count = size / 2; 443 + bd0->buffer_addr = buf_phys; 444 + bd0->ext_buffer_addr = address; 445 + 446 + memcpy(buf_virt, buf, size); 447 + 448 + ret = sdma_run_channel(&sdma->channel[0]); 449 + 450 + dma_free_coherent(NULL, size, buf_virt, buf_phys); 451 + 452 + return ret; 453 + } 454 + 455 + static void sdma_event_enable(struct sdma_channel *sdmac, unsigned int event) 456 + { 457 + struct sdma_engine *sdma = sdmac->sdma; 458 + int channel = sdmac->channel; 459 + u32 val; 460 + u32 chnenbl = chnenbl_ofs(sdma, event); 461 + 462 + val = __raw_readl(sdma->regs + chnenbl); 463 + val |= (1 << channel); 464 + __raw_writel(val, sdma->regs + chnenbl); 465 + } 466 + 467 + static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event) 468 + { 469 + struct sdma_engine *sdma = sdmac->sdma; 470 + int channel = sdmac->channel; 471 + u32 chnenbl = chnenbl_ofs(sdma, event); 472 + u32 val; 473 + 474 + val = __raw_readl(sdma->regs + chnenbl); 475 + val &= ~(1 << channel); 476 + __raw_writel(val, sdma->regs + chnenbl); 477 + } 478 + 479 + static void sdma_handle_channel_loop(struct sdma_channel *sdmac) 480 + { 481 + struct sdma_buffer_descriptor *bd; 482 + 483 + /* 484 + * loop mode. Iterate over descriptors, re-setup them and 485 + * call callback function. 486 + */ 487 + while (1) { 488 + bd = &sdmac->bd[sdmac->buf_tail]; 489 + 490 + if (bd->mode.status & BD_DONE) 491 + break; 492 + 493 + if (bd->mode.status & BD_RROR) 494 + sdmac->status = DMA_ERROR; 495 + else 496 + sdmac->status = DMA_SUCCESS; 497 + 498 + bd->mode.status |= BD_DONE; 499 + sdmac->buf_tail++; 500 + sdmac->buf_tail %= sdmac->num_bd; 501 + 502 + if (sdmac->desc.callback) 503 + sdmac->desc.callback(sdmac->desc.callback_param); 504 + } 505 + } 506 + 507 + static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac) 508 + { 509 + struct sdma_buffer_descriptor *bd; 510 + int i, error = 0; 511 + 512 + /* 513 + * non loop mode. Iterate over all descriptors, collect 514 + * errors and call callback function 515 + */ 516 + for (i = 0; i < sdmac->num_bd; i++) { 517 + bd = &sdmac->bd[i]; 518 + 519 + if (bd->mode.status & (BD_DONE | BD_RROR)) 520 + error = -EIO; 521 + } 522 + 523 + if (error) 524 + sdmac->status = DMA_ERROR; 525 + else 526 + sdmac->status = DMA_SUCCESS; 527 + 528 + if (sdmac->desc.callback) 529 + sdmac->desc.callback(sdmac->desc.callback_param); 530 + sdmac->last_completed = sdmac->desc.cookie; 531 + } 532 + 533 + static void mxc_sdma_handle_channel(struct sdma_channel *sdmac) 534 + { 535 + complete(&sdmac->done); 536 + 537 + /* not interested in channel 0 interrupts */ 538 + if (sdmac->channel == 0) 539 + return; 540 + 541 + if (sdmac->flags & IMX_DMA_SG_LOOP) 542 + sdma_handle_channel_loop(sdmac); 543 + else 544 + mxc_sdma_handle_channel_normal(sdmac); 545 + } 546 + 547 + static irqreturn_t sdma_int_handler(int irq, void *dev_id) 548 + { 549 + struct sdma_engine *sdma = dev_id; 550 + u32 stat; 551 + 552 + stat = __raw_readl(sdma->regs + SDMA_H_INTR); 553 + __raw_writel(stat, sdma->regs + SDMA_H_INTR); 554 + 555 + while (stat) { 556 + int channel = fls(stat) - 1; 557 + struct sdma_channel *sdmac = &sdma->channel[channel]; 558 + 559 + mxc_sdma_handle_channel(sdmac); 560 + 561 + stat &= ~(1 << channel); 562 + } 563 + 564 + return IRQ_HANDLED; 565 + } 566 + 567 + /* 568 + * sets the pc of SDMA script according to the peripheral type 569 + */ 570 + static void sdma_get_pc(struct sdma_channel *sdmac, 571 + enum sdma_peripheral_type peripheral_type) 572 + { 573 + struct sdma_engine *sdma = sdmac->sdma; 574 + int per_2_emi = 0, emi_2_per = 0; 575 + /* 576 + * These are needed once we start to support transfers between 577 + * two peripherals or memory-to-memory transfers 578 + */ 579 + int per_2_per = 0, emi_2_emi = 0; 580 + 581 + sdmac->pc_from_device = 0; 582 + sdmac->pc_to_device = 0; 583 + 584 + switch (peripheral_type) { 585 + case IMX_DMATYPE_MEMORY: 586 + emi_2_emi = sdma->script_addrs->ap_2_ap_addr; 587 + break; 588 + case IMX_DMATYPE_DSP: 589 + emi_2_per = sdma->script_addrs->bp_2_ap_addr; 590 + per_2_emi = sdma->script_addrs->ap_2_bp_addr; 591 + break; 592 + case IMX_DMATYPE_FIRI: 593 + per_2_emi = sdma->script_addrs->firi_2_mcu_addr; 594 + emi_2_per = sdma->script_addrs->mcu_2_firi_addr; 595 + break; 596 + case IMX_DMATYPE_UART: 597 + per_2_emi = sdma->script_addrs->uart_2_mcu_addr; 598 + emi_2_per = sdma->script_addrs->mcu_2_app_addr; 599 + break; 600 + case IMX_DMATYPE_UART_SP: 601 + per_2_emi = sdma->script_addrs->uartsh_2_mcu_addr; 602 + emi_2_per = sdma->script_addrs->mcu_2_shp_addr; 603 + break; 604 + case IMX_DMATYPE_ATA: 605 + per_2_emi = sdma->script_addrs->ata_2_mcu_addr; 606 + emi_2_per = sdma->script_addrs->mcu_2_ata_addr; 607 + break; 608 + case IMX_DMATYPE_CSPI: 609 + case IMX_DMATYPE_EXT: 610 + case IMX_DMATYPE_SSI: 611 + per_2_emi = sdma->script_addrs->app_2_mcu_addr; 612 + emi_2_per = sdma->script_addrs->mcu_2_app_addr; 613 + break; 614 + case IMX_DMATYPE_SSI_SP: 615 + case IMX_DMATYPE_MMC: 616 + case IMX_DMATYPE_SDHC: 617 + case IMX_DMATYPE_CSPI_SP: 618 + case IMX_DMATYPE_ESAI: 619 + case IMX_DMATYPE_MSHC_SP: 620 + per_2_emi = sdma->script_addrs->shp_2_mcu_addr; 621 + emi_2_per = sdma->script_addrs->mcu_2_shp_addr; 622 + break; 623 + case IMX_DMATYPE_ASRC: 624 + per_2_emi = sdma->script_addrs->asrc_2_mcu_addr; 625 + emi_2_per = sdma->script_addrs->asrc_2_mcu_addr; 626 + per_2_per = sdma->script_addrs->per_2_per_addr; 627 + break; 628 + case IMX_DMATYPE_MSHC: 629 + per_2_emi = sdma->script_addrs->mshc_2_mcu_addr; 630 + emi_2_per = sdma->script_addrs->mcu_2_mshc_addr; 631 + break; 632 + case IMX_DMATYPE_CCM: 633 + per_2_emi = sdma->script_addrs->dptc_dvfs_addr; 634 + break; 635 + case IMX_DMATYPE_SPDIF: 636 + per_2_emi = sdma->script_addrs->spdif_2_mcu_addr; 637 + emi_2_per = sdma->script_addrs->mcu_2_spdif_addr; 638 + break; 639 + case IMX_DMATYPE_IPU_MEMORY: 640 + emi_2_per = sdma->script_addrs->ext_mem_2_ipu_addr; 641 + break; 642 + default: 643 + break; 644 + } 645 + 646 + sdmac->pc_from_device = per_2_emi; 647 + sdmac->pc_to_device = emi_2_per; 648 + } 649 + 650 + static int sdma_load_context(struct sdma_channel *sdmac) 651 + { 652 + struct sdma_engine *sdma = sdmac->sdma; 653 + int channel = sdmac->channel; 654 + int load_address; 655 + struct sdma_context_data *context = sdma->context; 656 + struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd; 657 + int ret; 658 + 659 + if (sdmac->direction == DMA_FROM_DEVICE) { 660 + load_address = sdmac->pc_from_device; 661 + } else { 662 + load_address = sdmac->pc_to_device; 663 + } 664 + 665 + if (load_address < 0) 666 + return load_address; 667 + 668 + dev_dbg(sdma->dev, "load_address = %d\n", load_address); 669 + dev_dbg(sdma->dev, "wml = 0x%08x\n", sdmac->watermark_level); 670 + dev_dbg(sdma->dev, "shp_addr = 0x%08x\n", sdmac->shp_addr); 671 + dev_dbg(sdma->dev, "per_addr = 0x%08x\n", sdmac->per_addr); 672 + dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", sdmac->event_mask0); 673 + dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", sdmac->event_mask1); 674 + 675 + memset(context, 0, sizeof(*context)); 676 + context->channel_state.pc = load_address; 677 + 678 + /* Send by context the event mask,base address for peripheral 679 + * and watermark level 680 + */ 681 + context->gReg[0] = sdmac->event_mask1; 682 + context->gReg[1] = sdmac->event_mask0; 683 + context->gReg[2] = sdmac->per_addr; 684 + context->gReg[6] = sdmac->shp_addr; 685 + context->gReg[7] = sdmac->watermark_level; 686 + 687 + bd0->mode.command = C0_SETDM; 688 + bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD; 689 + bd0->mode.count = sizeof(*context) / 4; 690 + bd0->buffer_addr = sdma->context_phys; 691 + bd0->ext_buffer_addr = 2048 + (sizeof(*context) / 4) * channel; 692 + 693 + ret = sdma_run_channel(&sdma->channel[0]); 694 + 695 + return ret; 696 + } 697 + 698 + static void sdma_disable_channel(struct sdma_channel *sdmac) 699 + { 700 + struct sdma_engine *sdma = sdmac->sdma; 701 + int channel = sdmac->channel; 702 + 703 + __raw_writel(1 << channel, sdma->regs + SDMA_H_STATSTOP); 704 + sdmac->status = DMA_ERROR; 705 + } 706 + 707 + static int sdma_config_channel(struct sdma_channel *sdmac) 708 + { 709 + int ret; 710 + 711 + sdma_disable_channel(sdmac); 712 + 713 + sdmac->event_mask0 = 0; 714 + sdmac->event_mask1 = 0; 715 + sdmac->shp_addr = 0; 716 + sdmac->per_addr = 0; 717 + 718 + if (sdmac->event_id0) { 719 + if (sdmac->event_id0 > 32) 720 + return -EINVAL; 721 + sdma_event_enable(sdmac, sdmac->event_id0); 722 + } 723 + 724 + switch (sdmac->peripheral_type) { 725 + case IMX_DMATYPE_DSP: 726 + sdma_config_ownership(sdmac, false, true, true); 727 + break; 728 + case IMX_DMATYPE_MEMORY: 729 + sdma_config_ownership(sdmac, false, true, false); 730 + break; 731 + default: 732 + sdma_config_ownership(sdmac, true, true, false); 733 + break; 734 + } 735 + 736 + sdma_get_pc(sdmac, sdmac->peripheral_type); 737 + 738 + if ((sdmac->peripheral_type != IMX_DMATYPE_MEMORY) && 739 + (sdmac->peripheral_type != IMX_DMATYPE_DSP)) { 740 + /* Handle multiple event channels differently */ 741 + if (sdmac->event_id1) { 742 + sdmac->event_mask1 = 1 << (sdmac->event_id1 % 32); 743 + if (sdmac->event_id1 > 31) 744 + sdmac->watermark_level |= 1 << 31; 745 + sdmac->event_mask0 = 1 << (sdmac->event_id0 % 32); 746 + if (sdmac->event_id0 > 31) 747 + sdmac->watermark_level |= 1 << 30; 748 + } else { 749 + sdmac->event_mask0 = 1 << sdmac->event_id0; 750 + sdmac->event_mask1 = 1 << (sdmac->event_id0 - 32); 751 + } 752 + /* Watermark Level */ 753 + sdmac->watermark_level |= sdmac->watermark_level; 754 + /* Address */ 755 + sdmac->shp_addr = sdmac->per_address; 756 + } else { 757 + sdmac->watermark_level = 0; /* FIXME: M3_BASE_ADDRESS */ 758 + } 759 + 760 + ret = sdma_load_context(sdmac); 761 + 762 + return ret; 763 + } 764 + 765 + static int sdma_set_channel_priority(struct sdma_channel *sdmac, 766 + unsigned int priority) 767 + { 768 + struct sdma_engine *sdma = sdmac->sdma; 769 + int channel = sdmac->channel; 770 + 771 + if (priority < MXC_SDMA_MIN_PRIORITY 772 + || priority > MXC_SDMA_MAX_PRIORITY) { 773 + return -EINVAL; 774 + } 775 + 776 + __raw_writel(priority, sdma->regs + SDMA_CHNPRI_0 + 4 * channel); 777 + 778 + return 0; 779 + } 780 + 781 + static int sdma_request_channel(struct sdma_channel *sdmac) 782 + { 783 + struct sdma_engine *sdma = sdmac->sdma; 784 + int channel = sdmac->channel; 785 + int ret = -EBUSY; 786 + 787 + sdmac->bd = dma_alloc_coherent(NULL, PAGE_SIZE, &sdmac->bd_phys, GFP_KERNEL); 788 + if (!sdmac->bd) { 789 + ret = -ENOMEM; 790 + goto out; 791 + } 792 + 793 + memset(sdmac->bd, 0, PAGE_SIZE); 794 + 795 + sdma->channel_control[channel].base_bd_ptr = sdmac->bd_phys; 796 + sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys; 797 + 798 + clk_enable(sdma->clk); 799 + 800 + sdma_set_channel_priority(sdmac, MXC_SDMA_DEFAULT_PRIORITY); 801 + 802 + init_completion(&sdmac->done); 803 + 804 + sdmac->buf_tail = 0; 805 + 806 + return 0; 807 + out: 808 + 809 + return ret; 810 + } 811 + 812 + static void sdma_enable_channel(struct sdma_engine *sdma, int channel) 813 + { 814 + __raw_writel(1 << channel, sdma->regs + SDMA_H_START); 815 + } 816 + 817 + static dma_cookie_t sdma_assign_cookie(struct sdma_channel *sdma) 818 + { 819 + dma_cookie_t cookie = sdma->chan.cookie; 820 + 821 + if (++cookie < 0) 822 + cookie = 1; 823 + 824 + sdma->chan.cookie = cookie; 825 + sdma->desc.cookie = cookie; 826 + 827 + return cookie; 828 + } 829 + 830 + static struct sdma_channel *to_sdma_chan(struct dma_chan *chan) 831 + { 832 + return container_of(chan, struct sdma_channel, chan); 833 + } 834 + 835 + static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx) 836 + { 837 + struct sdma_channel *sdmac = to_sdma_chan(tx->chan); 838 + struct sdma_engine *sdma = sdmac->sdma; 839 + dma_cookie_t cookie; 840 + 841 + spin_lock_irq(&sdmac->lock); 842 + 843 + cookie = sdma_assign_cookie(sdmac); 844 + 845 + sdma_enable_channel(sdma, tx->chan->chan_id); 846 + 847 + spin_unlock_irq(&sdmac->lock); 848 + 849 + return cookie; 850 + } 851 + 852 + static int sdma_alloc_chan_resources(struct dma_chan *chan) 853 + { 854 + struct sdma_channel *sdmac = to_sdma_chan(chan); 855 + struct imx_dma_data *data = chan->private; 856 + int prio, ret; 857 + 858 + /* No need to execute this for internal channel 0 */ 859 + if (chan->chan_id == 0) 860 + return 0; 861 + 862 + if (!data) 863 + return -EINVAL; 864 + 865 + switch (data->priority) { 866 + case DMA_PRIO_HIGH: 867 + prio = 3; 868 + break; 869 + case DMA_PRIO_MEDIUM: 870 + prio = 2; 871 + break; 872 + case DMA_PRIO_LOW: 873 + default: 874 + prio = 1; 875 + break; 876 + } 877 + 878 + sdmac->peripheral_type = data->peripheral_type; 879 + sdmac->event_id0 = data->dma_request; 880 + ret = sdma_set_channel_priority(sdmac, prio); 881 + if (ret) 882 + return ret; 883 + 884 + ret = sdma_request_channel(sdmac); 885 + if (ret) 886 + return ret; 887 + 888 + dma_async_tx_descriptor_init(&sdmac->desc, chan); 889 + sdmac->desc.tx_submit = sdma_tx_submit; 890 + /* txd.flags will be overwritten in prep funcs */ 891 + sdmac->desc.flags = DMA_CTRL_ACK; 892 + 893 + return 0; 894 + } 895 + 896 + static void sdma_free_chan_resources(struct dma_chan *chan) 897 + { 898 + struct sdma_channel *sdmac = to_sdma_chan(chan); 899 + struct sdma_engine *sdma = sdmac->sdma; 900 + 901 + sdma_disable_channel(sdmac); 902 + 903 + if (sdmac->event_id0) 904 + sdma_event_disable(sdmac, sdmac->event_id0); 905 + if (sdmac->event_id1) 906 + sdma_event_disable(sdmac, sdmac->event_id1); 907 + 908 + sdmac->event_id0 = 0; 909 + sdmac->event_id1 = 0; 910 + 911 + sdma_set_channel_priority(sdmac, 0); 912 + 913 + dma_free_coherent(NULL, PAGE_SIZE, sdmac->bd, sdmac->bd_phys); 914 + 915 + clk_disable(sdma->clk); 916 + } 917 + 918 + static struct dma_async_tx_descriptor *sdma_prep_slave_sg( 919 + struct dma_chan *chan, struct scatterlist *sgl, 920 + unsigned int sg_len, enum dma_data_direction direction, 921 + unsigned long flags) 922 + { 923 + struct sdma_channel *sdmac = to_sdma_chan(chan); 924 + struct sdma_engine *sdma = sdmac->sdma; 925 + int ret, i, count; 926 + int channel = chan->chan_id; 927 + struct scatterlist *sg; 928 + 929 + if (sdmac->status == DMA_IN_PROGRESS) 930 + return NULL; 931 + sdmac->status = DMA_IN_PROGRESS; 932 + 933 + sdmac->flags = 0; 934 + 935 + dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n", 936 + sg_len, channel); 937 + 938 + sdmac->direction = direction; 939 + ret = sdma_load_context(sdmac); 940 + if (ret) 941 + goto err_out; 942 + 943 + if (sg_len > NUM_BD) { 944 + dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n", 945 + channel, sg_len, NUM_BD); 946 + ret = -EINVAL; 947 + goto err_out; 948 + } 949 + 950 + for_each_sg(sgl, sg, sg_len, i) { 951 + struct sdma_buffer_descriptor *bd = &sdmac->bd[i]; 952 + int param; 953 + 954 + bd->buffer_addr = sgl->dma_address; 955 + 956 + count = sg->length; 957 + 958 + if (count > 0xffff) { 959 + dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n", 960 + channel, count, 0xffff); 961 + ret = -EINVAL; 962 + goto err_out; 963 + } 964 + 965 + bd->mode.count = count; 966 + 967 + if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) { 968 + ret = -EINVAL; 969 + goto err_out; 970 + } 971 + if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES) 972 + bd->mode.command = 0; 973 + else 974 + bd->mode.command = sdmac->word_size; 975 + 976 + param = BD_DONE | BD_EXTD | BD_CONT; 977 + 978 + if (sdmac->flags & IMX_DMA_SG_LOOP) { 979 + param |= BD_INTR; 980 + if (i + 1 == sg_len) 981 + param |= BD_WRAP; 982 + } 983 + 984 + if (i + 1 == sg_len) 985 + param |= BD_INTR; 986 + 987 + dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n", 988 + i, count, sg->dma_address, 989 + param & BD_WRAP ? "wrap" : "", 990 + param & BD_INTR ? " intr" : ""); 991 + 992 + bd->mode.status = param; 993 + } 994 + 995 + sdmac->num_bd = sg_len; 996 + sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys; 997 + 998 + return &sdmac->desc; 999 + err_out: 1000 + return NULL; 1001 + } 1002 + 1003 + static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic( 1004 + struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, 1005 + size_t period_len, enum dma_data_direction direction) 1006 + { 1007 + struct sdma_channel *sdmac = to_sdma_chan(chan); 1008 + struct sdma_engine *sdma = sdmac->sdma; 1009 + int num_periods = buf_len / period_len; 1010 + int channel = chan->chan_id; 1011 + int ret, i = 0, buf = 0; 1012 + 1013 + dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel); 1014 + 1015 + if (sdmac->status == DMA_IN_PROGRESS) 1016 + return NULL; 1017 + 1018 + sdmac->status = DMA_IN_PROGRESS; 1019 + 1020 + sdmac->flags |= IMX_DMA_SG_LOOP; 1021 + sdmac->direction = direction; 1022 + ret = sdma_load_context(sdmac); 1023 + if (ret) 1024 + goto err_out; 1025 + 1026 + if (num_periods > NUM_BD) { 1027 + dev_err(sdma->dev, "SDMA channel %d: maximum number of sg exceeded: %d > %d\n", 1028 + channel, num_periods, NUM_BD); 1029 + goto err_out; 1030 + } 1031 + 1032 + if (period_len > 0xffff) { 1033 + dev_err(sdma->dev, "SDMA channel %d: maximum period size exceeded: %d > %d\n", 1034 + channel, period_len, 0xffff); 1035 + goto err_out; 1036 + } 1037 + 1038 + while (buf < buf_len) { 1039 + struct sdma_buffer_descriptor *bd = &sdmac->bd[i]; 1040 + int param; 1041 + 1042 + bd->buffer_addr = dma_addr; 1043 + 1044 + bd->mode.count = period_len; 1045 + 1046 + if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) 1047 + goto err_out; 1048 + if (sdmac->word_size == DMA_SLAVE_BUSWIDTH_4_BYTES) 1049 + bd->mode.command = 0; 1050 + else 1051 + bd->mode.command = sdmac->word_size; 1052 + 1053 + param = BD_DONE | BD_EXTD | BD_CONT | BD_INTR; 1054 + if (i + 1 == num_periods) 1055 + param |= BD_WRAP; 1056 + 1057 + dev_dbg(sdma->dev, "entry %d: count: %d dma: 0x%08x %s%s\n", 1058 + i, period_len, dma_addr, 1059 + param & BD_WRAP ? "wrap" : "", 1060 + param & BD_INTR ? " intr" : ""); 1061 + 1062 + bd->mode.status = param; 1063 + 1064 + dma_addr += period_len; 1065 + buf += period_len; 1066 + 1067 + i++; 1068 + } 1069 + 1070 + sdmac->num_bd = num_periods; 1071 + sdma->channel_control[channel].current_bd_ptr = sdmac->bd_phys; 1072 + 1073 + return &sdmac->desc; 1074 + err_out: 1075 + sdmac->status = DMA_ERROR; 1076 + return NULL; 1077 + } 1078 + 1079 + static int sdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 1080 + unsigned long arg) 1081 + { 1082 + struct sdma_channel *sdmac = to_sdma_chan(chan); 1083 + struct dma_slave_config *dmaengine_cfg = (void *)arg; 1084 + 1085 + switch (cmd) { 1086 + case DMA_TERMINATE_ALL: 1087 + sdma_disable_channel(sdmac); 1088 + return 0; 1089 + case DMA_SLAVE_CONFIG: 1090 + if (dmaengine_cfg->direction == DMA_FROM_DEVICE) { 1091 + sdmac->per_address = dmaengine_cfg->src_addr; 1092 + sdmac->watermark_level = dmaengine_cfg->src_maxburst; 1093 + sdmac->word_size = dmaengine_cfg->src_addr_width; 1094 + } else { 1095 + sdmac->per_address = dmaengine_cfg->dst_addr; 1096 + sdmac->watermark_level = dmaengine_cfg->dst_maxburst; 1097 + sdmac->word_size = dmaengine_cfg->dst_addr_width; 1098 + } 1099 + return sdma_config_channel(sdmac); 1100 + default: 1101 + return -ENOSYS; 1102 + } 1103 + 1104 + return -EINVAL; 1105 + } 1106 + 1107 + static enum dma_status sdma_tx_status(struct dma_chan *chan, 1108 + dma_cookie_t cookie, 1109 + struct dma_tx_state *txstate) 1110 + { 1111 + struct sdma_channel *sdmac = to_sdma_chan(chan); 1112 + dma_cookie_t last_used; 1113 + enum dma_status ret; 1114 + 1115 + last_used = chan->cookie; 1116 + 1117 + ret = dma_async_is_complete(cookie, sdmac->last_completed, last_used); 1118 + dma_set_tx_state(txstate, sdmac->last_completed, last_used, 0); 1119 + 1120 + return ret; 1121 + } 1122 + 1123 + static void sdma_issue_pending(struct dma_chan *chan) 1124 + { 1125 + /* 1126 + * Nothing to do. We only have a single descriptor 1127 + */ 1128 + } 1129 + 1130 + static int __init sdma_init(struct sdma_engine *sdma, 1131 + void *ram_code, int ram_code_size) 1132 + { 1133 + int i, ret; 1134 + dma_addr_t ccb_phys; 1135 + 1136 + switch (sdma->version) { 1137 + case 1: 1138 + sdma->num_events = 32; 1139 + break; 1140 + case 2: 1141 + sdma->num_events = 48; 1142 + break; 1143 + default: 1144 + dev_err(sdma->dev, "Unknown version %d. aborting\n", sdma->version); 1145 + return -ENODEV; 1146 + } 1147 + 1148 + clk_enable(sdma->clk); 1149 + 1150 + /* Be sure SDMA has not started yet */ 1151 + __raw_writel(0, sdma->regs + SDMA_H_C0PTR); 1152 + 1153 + sdma->channel_control = dma_alloc_coherent(NULL, 1154 + MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control) + 1155 + sizeof(struct sdma_context_data), 1156 + &ccb_phys, GFP_KERNEL); 1157 + 1158 + if (!sdma->channel_control) { 1159 + ret = -ENOMEM; 1160 + goto err_dma_alloc; 1161 + } 1162 + 1163 + sdma->context = (void *)sdma->channel_control + 1164 + MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control); 1165 + sdma->context_phys = ccb_phys + 1166 + MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control); 1167 + 1168 + /* Zero-out the CCB structures array just allocated */ 1169 + memset(sdma->channel_control, 0, 1170 + MAX_DMA_CHANNELS * sizeof (struct sdma_channel_control)); 1171 + 1172 + /* disable all channels */ 1173 + for (i = 0; i < sdma->num_events; i++) 1174 + __raw_writel(0, sdma->regs + chnenbl_ofs(sdma, i)); 1175 + 1176 + /* All channels have priority 0 */ 1177 + for (i = 0; i < MAX_DMA_CHANNELS; i++) 1178 + __raw_writel(0, sdma->regs + SDMA_CHNPRI_0 + i * 4); 1179 + 1180 + ret = sdma_request_channel(&sdma->channel[0]); 1181 + if (ret) 1182 + goto err_dma_alloc; 1183 + 1184 + sdma_config_ownership(&sdma->channel[0], false, true, false); 1185 + 1186 + /* Set Command Channel (Channel Zero) */ 1187 + __raw_writel(0x4050, sdma->regs + SDMA_CHN0ADDR); 1188 + 1189 + /* Set bits of CONFIG register but with static context switching */ 1190 + /* FIXME: Check whether to set ACR bit depending on clock ratios */ 1191 + __raw_writel(0, sdma->regs + SDMA_H_CONFIG); 1192 + 1193 + __raw_writel(ccb_phys, sdma->regs + SDMA_H_C0PTR); 1194 + 1195 + /* download the RAM image for SDMA */ 1196 + sdma_load_script(sdma, ram_code, 1197 + ram_code_size, 1198 + sdma->script_addrs->ram_code_start_addr); 1199 + 1200 + /* Set bits of CONFIG register with given context switching mode */ 1201 + __raw_writel(SDMA_H_CONFIG_CSM, sdma->regs + SDMA_H_CONFIG); 1202 + 1203 + /* Initializes channel's priorities */ 1204 + sdma_set_channel_priority(&sdma->channel[0], 7); 1205 + 1206 + clk_disable(sdma->clk); 1207 + 1208 + return 0; 1209 + 1210 + err_dma_alloc: 1211 + clk_disable(sdma->clk); 1212 + dev_err(sdma->dev, "initialisation failed with %d\n", ret); 1213 + return ret; 1214 + } 1215 + 1216 + static int __init sdma_probe(struct platform_device *pdev) 1217 + { 1218 + int ret; 1219 + const struct firmware *fw; 1220 + const struct sdma_firmware_header *header; 1221 + const struct sdma_script_start_addrs *addr; 1222 + int irq; 1223 + unsigned short *ram_code; 1224 + struct resource *iores; 1225 + struct sdma_platform_data *pdata = pdev->dev.platform_data; 1226 + char *fwname; 1227 + int i; 1228 + dma_cap_mask_t mask; 1229 + struct sdma_engine *sdma; 1230 + 1231 + sdma = kzalloc(sizeof(*sdma), GFP_KERNEL); 1232 + if (!sdma) 1233 + return -ENOMEM; 1234 + 1235 + sdma->dev = &pdev->dev; 1236 + 1237 + iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1238 + irq = platform_get_irq(pdev, 0); 1239 + if (!iores || irq < 0 || !pdata) { 1240 + ret = -EINVAL; 1241 + goto err_irq; 1242 + } 1243 + 1244 + if (!request_mem_region(iores->start, resource_size(iores), pdev->name)) { 1245 + ret = -EBUSY; 1246 + goto err_request_region; 1247 + } 1248 + 1249 + sdma->clk = clk_get(&pdev->dev, NULL); 1250 + if (IS_ERR(sdma->clk)) { 1251 + ret = PTR_ERR(sdma->clk); 1252 + goto err_clk; 1253 + } 1254 + 1255 + sdma->regs = ioremap(iores->start, resource_size(iores)); 1256 + if (!sdma->regs) { 1257 + ret = -ENOMEM; 1258 + goto err_ioremap; 1259 + } 1260 + 1261 + ret = request_irq(irq, sdma_int_handler, 0, "sdma", sdma); 1262 + if (ret) 1263 + goto err_request_irq; 1264 + 1265 + fwname = kasprintf(GFP_KERNEL, "sdma-%s-to%d.bin", 1266 + pdata->cpu_name, pdata->to_version); 1267 + if (!fwname) { 1268 + ret = -ENOMEM; 1269 + goto err_cputype; 1270 + } 1271 + 1272 + ret = request_firmware(&fw, fwname, &pdev->dev); 1273 + if (ret) { 1274 + dev_err(&pdev->dev, "request firmware \"%s\" failed with %d\n", 1275 + fwname, ret); 1276 + kfree(fwname); 1277 + goto err_cputype; 1278 + } 1279 + kfree(fwname); 1280 + 1281 + if (fw->size < sizeof(*header)) 1282 + goto err_firmware; 1283 + 1284 + header = (struct sdma_firmware_header *)fw->data; 1285 + 1286 + if (header->magic != SDMA_FIRMWARE_MAGIC) 1287 + goto err_firmware; 1288 + if (header->ram_code_start + header->ram_code_size > fw->size) 1289 + goto err_firmware; 1290 + 1291 + addr = (void *)header + header->script_addrs_start; 1292 + ram_code = (void *)header + header->ram_code_start; 1293 + sdma->script_addrs = kmalloc(sizeof(*addr), GFP_KERNEL); 1294 + if (!sdma->script_addrs) 1295 + goto err_firmware; 1296 + memcpy(sdma->script_addrs, addr, sizeof(*addr)); 1297 + 1298 + sdma->version = pdata->sdma_version; 1299 + 1300 + INIT_LIST_HEAD(&sdma->dma_device.channels); 1301 + /* Initialize channel parameters */ 1302 + for (i = 0; i < MAX_DMA_CHANNELS; i++) { 1303 + struct sdma_channel *sdmac = &sdma->channel[i]; 1304 + 1305 + sdmac->sdma = sdma; 1306 + spin_lock_init(&sdmac->lock); 1307 + 1308 + dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask); 1309 + dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask); 1310 + 1311 + sdmac->chan.device = &sdma->dma_device; 1312 + sdmac->chan.chan_id = i; 1313 + sdmac->channel = i; 1314 + 1315 + /* Add the channel to the DMAC list */ 1316 + list_add_tail(&sdmac->chan.device_node, &sdma->dma_device.channels); 1317 + } 1318 + 1319 + ret = sdma_init(sdma, ram_code, header->ram_code_size); 1320 + if (ret) 1321 + goto err_init; 1322 + 1323 + sdma->dma_device.dev = &pdev->dev; 1324 + 1325 + sdma->dma_device.device_alloc_chan_resources = sdma_alloc_chan_resources; 1326 + sdma->dma_device.device_free_chan_resources = sdma_free_chan_resources; 1327 + sdma->dma_device.device_tx_status = sdma_tx_status; 1328 + sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg; 1329 + sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic; 1330 + sdma->dma_device.device_control = sdma_control; 1331 + sdma->dma_device.device_issue_pending = sdma_issue_pending; 1332 + 1333 + ret = dma_async_device_register(&sdma->dma_device); 1334 + if (ret) { 1335 + dev_err(&pdev->dev, "unable to register\n"); 1336 + goto err_init; 1337 + } 1338 + 1339 + dev_info(&pdev->dev, "initialized (firmware %d.%d)\n", 1340 + header->version_major, 1341 + header->version_minor); 1342 + 1343 + /* request channel 0. This is an internal control channel 1344 + * to the SDMA engine and not available to clients. 1345 + */ 1346 + dma_cap_zero(mask); 1347 + dma_cap_set(DMA_SLAVE, mask); 1348 + dma_request_channel(mask, NULL, NULL); 1349 + 1350 + release_firmware(fw); 1351 + 1352 + return 0; 1353 + 1354 + err_init: 1355 + kfree(sdma->script_addrs); 1356 + err_firmware: 1357 + release_firmware(fw); 1358 + err_cputype: 1359 + free_irq(irq, sdma); 1360 + err_request_irq: 1361 + iounmap(sdma->regs); 1362 + err_ioremap: 1363 + clk_put(sdma->clk); 1364 + err_clk: 1365 + release_mem_region(iores->start, resource_size(iores)); 1366 + err_request_region: 1367 + err_irq: 1368 + kfree(sdma); 1369 + return 0; 1370 + } 1371 + 1372 + static int __exit sdma_remove(struct platform_device *pdev) 1373 + { 1374 + return -EBUSY; 1375 + } 1376 + 1377 + static struct platform_driver sdma_driver = { 1378 + .driver = { 1379 + .name = "imx-sdma", 1380 + }, 1381 + .remove = __exit_p(sdma_remove), 1382 + }; 1383 + 1384 + static int __init sdma_module_init(void) 1385 + { 1386 + return platform_driver_probe(&sdma_driver, sdma_probe); 1387 + } 1388 + subsys_initcall(sdma_module_init); 1389 + 1390 + MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>"); 1391 + MODULE_DESCRIPTION("i.MX SDMA driver"); 1392 + MODULE_LICENSE("GPL");
+390 -86
drivers/dma/intel_mid_dma.c
··· 25 25 */ 26 26 #include <linux/pci.h> 27 27 #include <linux/interrupt.h> 28 + #include <linux/pm_runtime.h> 28 29 #include <linux/intel_mid_dma.h> 29 30 30 31 #define MAX_CHAN 4 /*max ch across controllers*/ ··· 92 91 int byte_width = 0, block_ts = 0; 93 92 94 93 switch (tx_width) { 95 - case LNW_DMA_WIDTH_8BIT: 94 + case DMA_SLAVE_BUSWIDTH_1_BYTE: 96 95 byte_width = 1; 97 96 break; 98 - case LNW_DMA_WIDTH_16BIT: 97 + case DMA_SLAVE_BUSWIDTH_2_BYTES: 99 98 byte_width = 2; 100 99 break; 101 - case LNW_DMA_WIDTH_32BIT: 100 + case DMA_SLAVE_BUSWIDTH_4_BYTES: 102 101 default: 103 102 byte_width = 4; 104 103 break; ··· 248 247 struct middma_device *mid = to_middma_device(midc->chan.device); 249 248 250 249 /* channel is idle */ 251 - if (midc->in_use && test_ch_en(midc->dma_base, midc->ch_id)) { 250 + if (midc->busy && test_ch_en(midc->dma_base, midc->ch_id)) { 252 251 /*error*/ 253 252 pr_err("ERR_MDMA: channel is busy in start\n"); 254 253 /* The tasklet will hopefully advance the queue... */ 255 254 return; 256 255 } 257 - 256 + midc->busy = true; 258 257 /*write registers and en*/ 259 258 iowrite32(first->sar, midc->ch_regs + SAR); 260 259 iowrite32(first->dar, midc->ch_regs + DAR); 260 + iowrite32(first->lli_phys, midc->ch_regs + LLP); 261 261 iowrite32(first->cfg_hi, midc->ch_regs + CFG_HIGH); 262 262 iowrite32(first->cfg_lo, midc->ch_regs + CFG_LOW); 263 263 iowrite32(first->ctl_lo, midc->ch_regs + CTL_LOW); ··· 266 264 pr_debug("MDMA:TX SAR %x,DAR %x,CFGL %x,CFGH %x,CTLH %x, CTLL %x\n", 267 265 (int)first->sar, (int)first->dar, first->cfg_hi, 268 266 first->cfg_lo, first->ctl_hi, first->ctl_lo); 267 + first->status = DMA_IN_PROGRESS; 269 268 270 269 iowrite32(ENABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN); 271 - first->status = DMA_IN_PROGRESS; 272 270 } 273 271 274 272 /** ··· 285 283 { 286 284 struct dma_async_tx_descriptor *txd = &desc->txd; 287 285 dma_async_tx_callback callback_txd = NULL; 286 + struct intel_mid_dma_lli *llitem; 288 287 void *param_txd = NULL; 289 288 290 289 midc->completed = txd->cookie; 291 290 callback_txd = txd->callback; 292 291 param_txd = txd->callback_param; 293 292 294 - list_move(&desc->desc_node, &midc->free_list); 295 - 293 + if (desc->lli != NULL) { 294 + /*clear the DONE bit of completed LLI in memory*/ 295 + llitem = desc->lli + desc->current_lli; 296 + llitem->ctl_hi &= CLEAR_DONE; 297 + if (desc->current_lli < desc->lli_length-1) 298 + (desc->current_lli)++; 299 + else 300 + desc->current_lli = 0; 301 + } 296 302 spin_unlock_bh(&midc->lock); 297 303 if (callback_txd) { 298 304 pr_debug("MDMA: TXD callback set ... calling\n"); 299 305 callback_txd(param_txd); 300 - spin_lock_bh(&midc->lock); 301 - return; 306 + } 307 + if (midc->raw_tfr) { 308 + desc->status = DMA_SUCCESS; 309 + if (desc->lli != NULL) { 310 + pci_pool_free(desc->lli_pool, desc->lli, 311 + desc->lli_phys); 312 + pci_pool_destroy(desc->lli_pool); 313 + } 314 + list_move(&desc->desc_node, &midc->free_list); 315 + midc->busy = false; 302 316 } 303 317 spin_lock_bh(&midc->lock); 304 318 ··· 335 317 336 318 /*tx is complete*/ 337 319 list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { 338 - if (desc->status == DMA_IN_PROGRESS) { 339 - desc->status = DMA_SUCCESS; 320 + if (desc->status == DMA_IN_PROGRESS) 340 321 midc_descriptor_complete(midc, desc); 341 - } 342 322 } 343 323 return; 344 - } 324 + } 325 + /** 326 + * midc_lli_fill_sg - Helper function to convert 327 + * SG list to Linked List Items. 328 + *@midc: Channel 329 + *@desc: DMA descriptor 330 + *@sglist: Pointer to SG list 331 + *@sglen: SG list length 332 + *@flags: DMA transaction flags 333 + * 334 + * Walk through the SG list and convert the SG list into Linked 335 + * List Items (LLI). 336 + */ 337 + static int midc_lli_fill_sg(struct intel_mid_dma_chan *midc, 338 + struct intel_mid_dma_desc *desc, 339 + struct scatterlist *sglist, 340 + unsigned int sglen, 341 + unsigned int flags) 342 + { 343 + struct intel_mid_dma_slave *mids; 344 + struct scatterlist *sg; 345 + dma_addr_t lli_next, sg_phy_addr; 346 + struct intel_mid_dma_lli *lli_bloc_desc; 347 + union intel_mid_dma_ctl_lo ctl_lo; 348 + union intel_mid_dma_ctl_hi ctl_hi; 349 + int i; 345 350 351 + pr_debug("MDMA: Entered midc_lli_fill_sg\n"); 352 + mids = midc->mid_slave; 353 + 354 + lli_bloc_desc = desc->lli; 355 + lli_next = desc->lli_phys; 356 + 357 + ctl_lo.ctl_lo = desc->ctl_lo; 358 + ctl_hi.ctl_hi = desc->ctl_hi; 359 + for_each_sg(sglist, sg, sglen, i) { 360 + /*Populate CTL_LOW and LLI values*/ 361 + if (i != sglen - 1) { 362 + lli_next = lli_next + 363 + sizeof(struct intel_mid_dma_lli); 364 + } else { 365 + /*Check for circular list, otherwise terminate LLI to ZERO*/ 366 + if (flags & DMA_PREP_CIRCULAR_LIST) { 367 + pr_debug("MDMA: LLI is configured in circular mode\n"); 368 + lli_next = desc->lli_phys; 369 + } else { 370 + lli_next = 0; 371 + ctl_lo.ctlx.llp_dst_en = 0; 372 + ctl_lo.ctlx.llp_src_en = 0; 373 + } 374 + } 375 + /*Populate CTL_HI values*/ 376 + ctl_hi.ctlx.block_ts = get_block_ts(sg->length, 377 + desc->width, 378 + midc->dma->block_size); 379 + /*Populate SAR and DAR values*/ 380 + sg_phy_addr = sg_phys(sg); 381 + if (desc->dirn == DMA_TO_DEVICE) { 382 + lli_bloc_desc->sar = sg_phy_addr; 383 + lli_bloc_desc->dar = mids->dma_slave.dst_addr; 384 + } else if (desc->dirn == DMA_FROM_DEVICE) { 385 + lli_bloc_desc->sar = mids->dma_slave.src_addr; 386 + lli_bloc_desc->dar = sg_phy_addr; 387 + } 388 + /*Copy values into block descriptor in system memroy*/ 389 + lli_bloc_desc->llp = lli_next; 390 + lli_bloc_desc->ctl_lo = ctl_lo.ctl_lo; 391 + lli_bloc_desc->ctl_hi = ctl_hi.ctl_hi; 392 + 393 + lli_bloc_desc++; 394 + } 395 + /*Copy very first LLI values to descriptor*/ 396 + desc->ctl_lo = desc->lli->ctl_lo; 397 + desc->ctl_hi = desc->lli->ctl_hi; 398 + desc->sar = desc->lli->sar; 399 + desc->dar = desc->lli->dar; 400 + 401 + return 0; 402 + } 346 403 /***************************************************************************** 347 404 DMA engine callback Functions*/ 348 405 /** ··· 442 349 desc->txd.cookie = cookie; 443 350 444 351 445 - if (list_empty(&midc->active_list)) { 446 - midc_dostart(midc, desc); 352 + if (list_empty(&midc->active_list)) 447 353 list_add_tail(&desc->desc_node, &midc->active_list); 448 - } else { 354 + else 449 355 list_add_tail(&desc->desc_node, &midc->queue); 450 - } 356 + 357 + midc_dostart(midc, desc); 451 358 spin_unlock_bh(&midc->lock); 452 359 453 360 return cookie; ··· 507 414 return ret; 508 415 } 509 416 417 + static int dma_slave_control(struct dma_chan *chan, unsigned long arg) 418 + { 419 + struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); 420 + struct dma_slave_config *slave = (struct dma_slave_config *)arg; 421 + struct intel_mid_dma_slave *mid_slave; 422 + 423 + BUG_ON(!midc); 424 + BUG_ON(!slave); 425 + pr_debug("MDMA: slave control called\n"); 426 + 427 + mid_slave = to_intel_mid_dma_slave(slave); 428 + 429 + BUG_ON(!mid_slave); 430 + 431 + midc->mid_slave = mid_slave; 432 + return 0; 433 + } 510 434 /** 511 435 * intel_mid_dma_device_control - DMA device control 512 436 * @chan: chan for DMA control ··· 538 428 struct intel_mid_dma_chan *midc = to_intel_mid_dma_chan(chan); 539 429 struct middma_device *mid = to_middma_device(chan->device); 540 430 struct intel_mid_dma_desc *desc, *_desc; 541 - LIST_HEAD(list); 431 + union intel_mid_dma_cfg_lo cfg_lo; 432 + 433 + if (cmd == DMA_SLAVE_CONFIG) 434 + return dma_slave_control(chan, arg); 542 435 543 436 if (cmd != DMA_TERMINATE_ALL) 544 437 return -ENXIO; 545 438 546 439 spin_lock_bh(&midc->lock); 547 - if (midc->in_use == false) { 440 + if (midc->busy == false) { 548 441 spin_unlock_bh(&midc->lock); 549 442 return 0; 550 443 } 551 - list_splice_init(&midc->free_list, &list); 552 - midc->descs_allocated = 0; 553 - midc->slave = NULL; 554 - 444 + /*Suspend and disable the channel*/ 445 + cfg_lo.cfg_lo = ioread32(midc->ch_regs + CFG_LOW); 446 + cfg_lo.cfgx.ch_susp = 1; 447 + iowrite32(cfg_lo.cfg_lo, midc->ch_regs + CFG_LOW); 448 + iowrite32(DISABLE_CHANNEL(midc->ch_id), mid->dma_base + DMA_CHAN_EN); 449 + midc->busy = false; 555 450 /* Disable interrupts */ 556 451 disable_dma_interrupt(midc); 452 + midc->descs_allocated = 0; 557 453 558 454 spin_unlock_bh(&midc->lock); 559 - list_for_each_entry_safe(desc, _desc, &list, desc_node) { 560 - pr_debug("MDMA: freeing descriptor %p\n", desc); 561 - pci_pool_free(mid->dma_pool, desc, desc->txd.phys); 455 + list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { 456 + if (desc->lli != NULL) { 457 + pci_pool_free(desc->lli_pool, desc->lli, 458 + desc->lli_phys); 459 + pci_pool_destroy(desc->lli_pool); 460 + } 461 + list_move(&desc->desc_node, &midc->free_list); 562 462 } 563 463 return 0; 564 464 } 565 465 566 - /** 567 - * intel_mid_dma_prep_slave_sg - Prep slave sg txn 568 - * @chan: chan for DMA transfer 569 - * @sgl: scatter gather list 570 - * @sg_len: length of sg txn 571 - * @direction: DMA transfer dirtn 572 - * @flags: DMA flags 573 - * 574 - * Do DMA sg txn: NOT supported now 575 - */ 576 - static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg( 577 - struct dma_chan *chan, struct scatterlist *sgl, 578 - unsigned int sg_len, enum dma_data_direction direction, 579 - unsigned long flags) 580 - { 581 - /*not supported now*/ 582 - return NULL; 583 - } 584 466 585 467 /** 586 468 * intel_mid_dma_prep_memcpy - Prep memcpy txn ··· 597 495 union intel_mid_dma_ctl_hi ctl_hi; 598 496 union intel_mid_dma_cfg_lo cfg_lo; 599 497 union intel_mid_dma_cfg_hi cfg_hi; 600 - enum intel_mid_dma_width width = 0; 498 + enum dma_slave_buswidth width; 601 499 602 500 pr_debug("MDMA: Prep for memcpy\n"); 603 - WARN_ON(!chan); 501 + BUG_ON(!chan); 604 502 if (!len) 605 503 return NULL; 606 504 607 - mids = chan->private; 608 - WARN_ON(!mids); 609 - 610 505 midc = to_intel_mid_dma_chan(chan); 611 - WARN_ON(!midc); 506 + BUG_ON(!midc); 507 + 508 + mids = midc->mid_slave; 509 + BUG_ON(!mids); 612 510 613 511 pr_debug("MDMA:called for DMA %x CH %d Length %zu\n", 614 512 midc->dma->pci_id, midc->ch_id, len); 615 513 pr_debug("MDMA:Cfg passed Mode %x, Dirn %x, HS %x, Width %x\n", 616 - mids->cfg_mode, mids->dirn, mids->hs_mode, mids->src_width); 514 + mids->cfg_mode, mids->dma_slave.direction, 515 + mids->hs_mode, mids->dma_slave.src_addr_width); 617 516 618 517 /*calculate CFG_LO*/ 619 518 if (mids->hs_mode == LNW_DMA_SW_HS) { ··· 633 530 if (midc->dma->pimr_mask) { 634 531 cfg_hi.cfgx.protctl = 0x0; /*default value*/ 635 532 cfg_hi.cfgx.fifo_mode = 1; 636 - if (mids->dirn == DMA_TO_DEVICE) { 533 + if (mids->dma_slave.direction == DMA_TO_DEVICE) { 637 534 cfg_hi.cfgx.src_per = 0; 638 535 if (mids->device_instance == 0) 639 536 cfg_hi.cfgx.dst_per = 3; 640 537 if (mids->device_instance == 1) 641 538 cfg_hi.cfgx.dst_per = 1; 642 - } else if (mids->dirn == DMA_FROM_DEVICE) { 539 + } else if (mids->dma_slave.direction == DMA_FROM_DEVICE) { 643 540 if (mids->device_instance == 0) 644 541 cfg_hi.cfgx.src_per = 2; 645 542 if (mids->device_instance == 1) ··· 655 552 656 553 /*calculate CTL_HI*/ 657 554 ctl_hi.ctlx.reser = 0; 658 - width = mids->src_width; 555 + ctl_hi.ctlx.done = 0; 556 + width = mids->dma_slave.src_addr_width; 659 557 660 558 ctl_hi.ctlx.block_ts = get_block_ts(len, width, midc->dma->block_size); 661 559 pr_debug("MDMA:calc len %d for block size %d\n", ··· 664 560 /*calculate CTL_LO*/ 665 561 ctl_lo.ctl_lo = 0; 666 562 ctl_lo.ctlx.int_en = 1; 667 - ctl_lo.ctlx.dst_tr_width = mids->dst_width; 668 - ctl_lo.ctlx.src_tr_width = mids->src_width; 669 - ctl_lo.ctlx.dst_msize = mids->src_msize; 670 - ctl_lo.ctlx.src_msize = mids->dst_msize; 563 + ctl_lo.ctlx.dst_tr_width = mids->dma_slave.dst_addr_width; 564 + ctl_lo.ctlx.src_tr_width = mids->dma_slave.src_addr_width; 565 + ctl_lo.ctlx.dst_msize = mids->dma_slave.src_maxburst; 566 + ctl_lo.ctlx.src_msize = mids->dma_slave.dst_maxburst; 671 567 672 568 if (mids->cfg_mode == LNW_DMA_MEM_TO_MEM) { 673 569 ctl_lo.ctlx.tt_fc = 0; 674 570 ctl_lo.ctlx.sinc = 0; 675 571 ctl_lo.ctlx.dinc = 0; 676 572 } else { 677 - if (mids->dirn == DMA_TO_DEVICE) { 573 + if (mids->dma_slave.direction == DMA_TO_DEVICE) { 678 574 ctl_lo.ctlx.sinc = 0; 679 575 ctl_lo.ctlx.dinc = 2; 680 576 ctl_lo.ctlx.tt_fc = 1; 681 - } else if (mids->dirn == DMA_FROM_DEVICE) { 577 + } else if (mids->dma_slave.direction == DMA_FROM_DEVICE) { 682 578 ctl_lo.ctlx.sinc = 2; 683 579 ctl_lo.ctlx.dinc = 0; 684 580 ctl_lo.ctlx.tt_fc = 2; ··· 701 597 desc->ctl_lo = ctl_lo.ctl_lo; 702 598 desc->ctl_hi = ctl_hi.ctl_hi; 703 599 desc->width = width; 704 - desc->dirn = mids->dirn; 600 + desc->dirn = mids->dma_slave.direction; 601 + desc->lli_phys = 0; 602 + desc->lli = NULL; 603 + desc->lli_pool = NULL; 705 604 return &desc->txd; 706 605 707 606 err_desc_get: 708 607 pr_err("ERR_MDMA: Failed to get desc\n"); 709 608 midc_desc_put(midc, desc); 710 609 return NULL; 610 + } 611 + /** 612 + * intel_mid_dma_prep_slave_sg - Prep slave sg txn 613 + * @chan: chan for DMA transfer 614 + * @sgl: scatter gather list 615 + * @sg_len: length of sg txn 616 + * @direction: DMA transfer dirtn 617 + * @flags: DMA flags 618 + * 619 + * Prepares LLI based periphral transfer 620 + */ 621 + static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg( 622 + struct dma_chan *chan, struct scatterlist *sgl, 623 + unsigned int sg_len, enum dma_data_direction direction, 624 + unsigned long flags) 625 + { 626 + struct intel_mid_dma_chan *midc = NULL; 627 + struct intel_mid_dma_slave *mids = NULL; 628 + struct intel_mid_dma_desc *desc = NULL; 629 + struct dma_async_tx_descriptor *txd = NULL; 630 + union intel_mid_dma_ctl_lo ctl_lo; 631 + 632 + pr_debug("MDMA: Prep for slave SG\n"); 633 + 634 + if (!sg_len) { 635 + pr_err("MDMA: Invalid SG length\n"); 636 + return NULL; 637 + } 638 + midc = to_intel_mid_dma_chan(chan); 639 + BUG_ON(!midc); 640 + 641 + mids = midc->mid_slave; 642 + BUG_ON(!mids); 643 + 644 + if (!midc->dma->pimr_mask) { 645 + pr_debug("MDMA: SG list is not supported by this controller\n"); 646 + return NULL; 647 + } 648 + 649 + pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n", 650 + sg_len, direction, flags); 651 + 652 + txd = intel_mid_dma_prep_memcpy(chan, 0, 0, sgl->length, flags); 653 + if (NULL == txd) { 654 + pr_err("MDMA: Prep memcpy failed\n"); 655 + return NULL; 656 + } 657 + desc = to_intel_mid_dma_desc(txd); 658 + desc->dirn = direction; 659 + ctl_lo.ctl_lo = desc->ctl_lo; 660 + ctl_lo.ctlx.llp_dst_en = 1; 661 + ctl_lo.ctlx.llp_src_en = 1; 662 + desc->ctl_lo = ctl_lo.ctl_lo; 663 + desc->lli_length = sg_len; 664 + desc->current_lli = 0; 665 + /* DMA coherent memory pool for LLI descriptors*/ 666 + desc->lli_pool = pci_pool_create("intel_mid_dma_lli_pool", 667 + midc->dma->pdev, 668 + (sizeof(struct intel_mid_dma_lli)*sg_len), 669 + 32, 0); 670 + if (NULL == desc->lli_pool) { 671 + pr_err("MID_DMA:LLI pool create failed\n"); 672 + return NULL; 673 + } 674 + 675 + desc->lli = pci_pool_alloc(desc->lli_pool, GFP_KERNEL, &desc->lli_phys); 676 + if (!desc->lli) { 677 + pr_err("MID_DMA: LLI alloc failed\n"); 678 + pci_pool_destroy(desc->lli_pool); 679 + return NULL; 680 + } 681 + 682 + midc_lli_fill_sg(midc, desc, sgl, sg_len, flags); 683 + if (flags & DMA_PREP_INTERRUPT) { 684 + iowrite32(UNMASK_INTR_REG(midc->ch_id), 685 + midc->dma_base + MASK_BLOCK); 686 + pr_debug("MDMA:Enabled Block interrupt\n"); 687 + } 688 + return &desc->txd; 711 689 } 712 690 713 691 /** ··· 804 618 struct middma_device *mid = to_middma_device(chan->device); 805 619 struct intel_mid_dma_desc *desc, *_desc; 806 620 807 - if (true == midc->in_use) { 621 + if (true == midc->busy) { 808 622 /*trying to free ch in use!!!!!*/ 809 623 pr_err("ERR_MDMA: trying to free ch in use\n"); 810 624 } 811 - 625 + pm_runtime_put(&mid->pdev->dev); 812 626 spin_lock_bh(&midc->lock); 813 627 midc->descs_allocated = 0; 814 628 list_for_each_entry_safe(desc, _desc, &midc->active_list, desc_node) { ··· 825 639 } 826 640 spin_unlock_bh(&midc->lock); 827 641 midc->in_use = false; 642 + midc->busy = false; 828 643 /* Disable CH interrupts */ 829 644 iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_BLOCK); 830 645 iowrite32(MASK_INTR_REG(midc->ch_id), mid->dma_base + MASK_ERR); ··· 846 659 dma_addr_t phys; 847 660 int i = 0; 848 661 662 + pm_runtime_get_sync(&mid->pdev->dev); 663 + 664 + if (mid->state == SUSPENDED) { 665 + if (dma_resume(mid->pdev)) { 666 + pr_err("ERR_MDMA: resume failed"); 667 + return -EFAULT; 668 + } 669 + } 849 670 850 671 /* ASSERT: channel is idle */ 851 672 if (test_ch_en(mid->dma_base, midc->ch_id)) { 852 673 /*ch is not idle*/ 853 674 pr_err("ERR_MDMA: ch not idle\n"); 675 + pm_runtime_put(&mid->pdev->dev); 854 676 return -EIO; 855 677 } 856 678 midc->completed = chan->cookie = 1; ··· 870 674 desc = pci_pool_alloc(mid->dma_pool, GFP_KERNEL, &phys); 871 675 if (!desc) { 872 676 pr_err("ERR_MDMA: desc failed\n"); 677 + pm_runtime_put(&mid->pdev->dev); 873 678 return -ENOMEM; 874 679 /*check*/ 875 680 } ··· 883 686 list_add_tail(&desc->desc_node, &midc->free_list); 884 687 } 885 688 spin_unlock_bh(&midc->lock); 886 - midc->in_use = false; 689 + midc->in_use = true; 690 + midc->busy = false; 887 691 pr_debug("MID_DMA: Desc alloc done ret: %d desc\n", i); 888 692 return i; 889 693 } ··· 913 715 { 914 716 struct middma_device *mid = NULL; 915 717 struct intel_mid_dma_chan *midc = NULL; 916 - u32 status; 718 + u32 status, raw_tfr, raw_block; 917 719 int i; 918 720 919 721 mid = (struct middma_device *)data; ··· 922 724 return; 923 725 } 924 726 pr_debug("MDMA: in tasklet for device %x\n", mid->pci_id); 925 - status = ioread32(mid->dma_base + RAW_TFR); 926 - pr_debug("MDMA:RAW_TFR %x\n", status); 727 + raw_tfr = ioread32(mid->dma_base + RAW_TFR); 728 + raw_block = ioread32(mid->dma_base + RAW_BLOCK); 729 + status = raw_tfr | raw_block; 927 730 status &= mid->intr_mask; 928 731 while (status) { 929 732 /*txn interrupt*/ ··· 940 741 } 941 742 pr_debug("MDMA:Tx complete interrupt %x, Ch No %d Index %d\n", 942 743 status, midc->ch_id, i); 744 + midc->raw_tfr = raw_tfr; 745 + midc->raw_block = raw_block; 746 + spin_lock_bh(&midc->lock); 943 747 /*clearing this interrupts first*/ 944 748 iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_TFR); 945 - iowrite32((1 << midc->ch_id), mid->dma_base + CLEAR_BLOCK); 946 - 947 - spin_lock_bh(&midc->lock); 749 + if (raw_block) { 750 + iowrite32((1 << midc->ch_id), 751 + mid->dma_base + CLEAR_BLOCK); 752 + } 948 753 midc_scan_descriptors(mid, midc); 949 754 pr_debug("MDMA:Scan of desc... complete, unmasking\n"); 950 755 iowrite32(UNMASK_INTR_REG(midc->ch_id), 951 756 mid->dma_base + MASK_TFR); 757 + if (raw_block) { 758 + iowrite32(UNMASK_INTR_REG(midc->ch_id), 759 + mid->dma_base + MASK_BLOCK); 760 + } 952 761 spin_unlock_bh(&midc->lock); 953 762 } 954 763 ··· 1011 804 static irqreturn_t intel_mid_dma_interrupt(int irq, void *data) 1012 805 { 1013 806 struct middma_device *mid = data; 1014 - u32 status; 807 + u32 tfr_status, err_status; 1015 808 int call_tasklet = 0; 809 + 810 + tfr_status = ioread32(mid->dma_base + RAW_TFR); 811 + err_status = ioread32(mid->dma_base + RAW_ERR); 812 + if (!tfr_status && !err_status) 813 + return IRQ_NONE; 1016 814 1017 815 /*DMA Interrupt*/ 1018 816 pr_debug("MDMA:Got an interrupt on irq %d\n", irq); ··· 1026 814 return -EINVAL; 1027 815 } 1028 816 1029 - status = ioread32(mid->dma_base + RAW_TFR); 1030 - pr_debug("MDMA: Status %x, Mask %x\n", status, mid->intr_mask); 1031 - status &= mid->intr_mask; 1032 - if (status) { 817 + pr_debug("MDMA: Status %x, Mask %x\n", tfr_status, mid->intr_mask); 818 + tfr_status &= mid->intr_mask; 819 + if (tfr_status) { 1033 820 /*need to disable intr*/ 1034 - iowrite32((status << 8), mid->dma_base + MASK_TFR); 1035 - pr_debug("MDMA: Calling tasklet %x\n", status); 821 + iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_TFR); 822 + iowrite32((tfr_status << INT_MASK_WE), mid->dma_base + MASK_BLOCK); 823 + pr_debug("MDMA: Calling tasklet %x\n", tfr_status); 1036 824 call_tasklet = 1; 1037 825 } 1038 - status = ioread32(mid->dma_base + RAW_ERR); 1039 - status &= mid->intr_mask; 1040 - if (status) { 1041 - iowrite32(MASK_INTR_REG(status), mid->dma_base + MASK_ERR); 826 + err_status &= mid->intr_mask; 827 + if (err_status) { 828 + iowrite32(MASK_INTR_REG(err_status), mid->dma_base + MASK_ERR); 1042 829 call_tasklet = 1; 1043 830 } 1044 831 if (call_tasklet) ··· 1067 856 { 1068 857 struct middma_device *dma = pci_get_drvdata(pdev); 1069 858 int err, i; 1070 - unsigned int irq_level; 1071 859 1072 860 /* DMA coherent memory pool for DMA descriptor allocations */ 1073 861 dma->dma_pool = pci_pool_create("intel_mid_dma_desc_pool", pdev, ··· 1094 884 pr_debug("MDMA:Adding %d channel for this controller\n", dma->max_chan); 1095 885 /*init CH structures*/ 1096 886 dma->intr_mask = 0; 887 + dma->state = RUNNING; 1097 888 for (i = 0; i < dma->max_chan; i++) { 1098 889 struct intel_mid_dma_chan *midch = &dma->ch[i]; 1099 890 ··· 1154 943 1155 944 /*register irq */ 1156 945 if (dma->pimr_mask) { 1157 - irq_level = IRQF_SHARED; 1158 946 pr_debug("MDMA:Requesting irq shared for DMAC1\n"); 1159 947 err = request_irq(pdev->irq, intel_mid_dma_interrupt1, 1160 948 IRQF_SHARED, "INTEL_MID_DMAC1", dma); ··· 1161 951 goto err_irq; 1162 952 } else { 1163 953 dma->intr_mask = 0x03; 1164 - irq_level = 0; 1165 954 pr_debug("MDMA:Requesting irq for DMAC2\n"); 1166 955 err = request_irq(pdev->irq, intel_mid_dma_interrupt2, 1167 - 0, "INTEL_MID_DMAC2", dma); 956 + IRQF_SHARED, "INTEL_MID_DMAC2", dma); 1168 957 if (0 != err) 1169 958 goto err_irq; 1170 959 } ··· 1279 1070 if (err) 1280 1071 goto err_dma; 1281 1072 1073 + pm_runtime_set_active(&pdev->dev); 1074 + pm_runtime_enable(&pdev->dev); 1075 + pm_runtime_allow(&pdev->dev); 1282 1076 return 0; 1283 1077 1284 1078 err_dma: ··· 1316 1104 pci_disable_device(pdev); 1317 1105 } 1318 1106 1107 + /* Power Management */ 1108 + /* 1109 + * dma_suspend - PCI suspend function 1110 + * 1111 + * @pci: PCI device structure 1112 + * @state: PM message 1113 + * 1114 + * This function is called by OS when a power event occurs 1115 + */ 1116 + int dma_suspend(struct pci_dev *pci, pm_message_t state) 1117 + { 1118 + int i; 1119 + struct middma_device *device = pci_get_drvdata(pci); 1120 + pr_debug("MDMA: dma_suspend called\n"); 1121 + 1122 + for (i = 0; i < device->max_chan; i++) { 1123 + if (device->ch[i].in_use) 1124 + return -EAGAIN; 1125 + } 1126 + device->state = SUSPENDED; 1127 + pci_set_drvdata(pci, device); 1128 + pci_save_state(pci); 1129 + pci_disable_device(pci); 1130 + pci_set_power_state(pci, PCI_D3hot); 1131 + return 0; 1132 + } 1133 + 1134 + /** 1135 + * dma_resume - PCI resume function 1136 + * 1137 + * @pci: PCI device structure 1138 + * 1139 + * This function is called by OS when a power event occurs 1140 + */ 1141 + int dma_resume(struct pci_dev *pci) 1142 + { 1143 + int ret; 1144 + struct middma_device *device = pci_get_drvdata(pci); 1145 + 1146 + pr_debug("MDMA: dma_resume called\n"); 1147 + pci_set_power_state(pci, PCI_D0); 1148 + pci_restore_state(pci); 1149 + ret = pci_enable_device(pci); 1150 + if (ret) { 1151 + pr_err("MDMA: device cant be enabled for %x\n", pci->device); 1152 + return ret; 1153 + } 1154 + device->state = RUNNING; 1155 + iowrite32(REG_BIT0, device->dma_base + DMA_CFG); 1156 + pci_set_drvdata(pci, device); 1157 + return 0; 1158 + } 1159 + 1160 + static int dma_runtime_suspend(struct device *dev) 1161 + { 1162 + struct pci_dev *pci_dev = to_pci_dev(dev); 1163 + return dma_suspend(pci_dev, PMSG_SUSPEND); 1164 + } 1165 + 1166 + static int dma_runtime_resume(struct device *dev) 1167 + { 1168 + struct pci_dev *pci_dev = to_pci_dev(dev); 1169 + return dma_resume(pci_dev); 1170 + } 1171 + 1172 + static int dma_runtime_idle(struct device *dev) 1173 + { 1174 + struct pci_dev *pdev = to_pci_dev(dev); 1175 + struct middma_device *device = pci_get_drvdata(pdev); 1176 + int i; 1177 + 1178 + for (i = 0; i < device->max_chan; i++) { 1179 + if (device->ch[i].in_use) 1180 + return -EAGAIN; 1181 + } 1182 + 1183 + return pm_schedule_suspend(dev, 0); 1184 + } 1185 + 1319 1186 /****************************************************************************** 1320 1187 * PCI stuff 1321 1188 */ ··· 1407 1116 }; 1408 1117 MODULE_DEVICE_TABLE(pci, intel_mid_dma_ids); 1409 1118 1119 + static const struct dev_pm_ops intel_mid_dma_pm = { 1120 + .runtime_suspend = dma_runtime_suspend, 1121 + .runtime_resume = dma_runtime_resume, 1122 + .runtime_idle = dma_runtime_idle, 1123 + }; 1124 + 1410 1125 static struct pci_driver intel_mid_dma_pci = { 1411 1126 .name = "Intel MID DMA", 1412 1127 .id_table = intel_mid_dma_ids, 1413 1128 .probe = intel_mid_dma_probe, 1414 1129 .remove = __devexit_p(intel_mid_dma_remove), 1130 + #ifdef CONFIG_PM 1131 + .suspend = dma_suspend, 1132 + .resume = dma_resume, 1133 + .driver = { 1134 + .pm = &intel_mid_dma_pm, 1135 + }, 1136 + #endif 1415 1137 }; 1416 1138 1417 1139 static int __init intel_mid_dma_init(void)
+47 -6
drivers/dma/intel_mid_dma_regs.h
··· 29 29 #include <linux/dmapool.h> 30 30 #include <linux/pci_ids.h> 31 31 32 - #define INTEL_MID_DMA_DRIVER_VERSION "1.0.5" 32 + #define INTEL_MID_DMA_DRIVER_VERSION "1.1.0" 33 33 34 34 #define REG_BIT0 0x00000001 35 35 #define REG_BIT8 0x00000100 36 - 36 + #define INT_MASK_WE 0x8 37 + #define CLEAR_DONE 0xFFFFEFFF 37 38 #define UNMASK_INTR_REG(chan_num) \ 38 39 ((REG_BIT0 << chan_num) | (REG_BIT8 << chan_num)) 39 40 #define MASK_INTR_REG(chan_num) (REG_BIT8 << chan_num) 40 41 41 42 #define ENABLE_CHANNEL(chan_num) \ 42 43 ((REG_BIT0 << chan_num) | (REG_BIT8 << chan_num)) 44 + 45 + #define DISABLE_CHANNEL(chan_num) \ 46 + (REG_BIT8 << chan_num) 43 47 44 48 #define DESCS_PER_CHANNEL 16 45 49 /*DMA Registers*/ ··· 54 50 /*CH X REG = (DMA_CH_SIZE)*CH_NO + REG*/ 55 51 #define SAR 0x00 /* Source Address Register*/ 56 52 #define DAR 0x08 /* Destination Address Register*/ 53 + #define LLP 0x10 /* Linked List Pointer Register*/ 57 54 #define CTL_LOW 0x18 /* Control Register*/ 58 55 #define CTL_HIGH 0x1C /* Control Register*/ 59 56 #define CFG_LOW 0x40 /* Configuration Register Low*/ ··· 117 112 union intel_mid_dma_ctl_hi { 118 113 struct { 119 114 u32 block_ts:12; /*block transfer size*/ 120 - /*configured by DMAC*/ 121 - u32 reser:20; 115 + u32 done:1; /*Done - updated by DMAC*/ 116 + u32 reser:19; /*configured by DMAC*/ 122 117 } ctlx; 123 118 u32 ctl_hi; 124 119 ··· 157 152 u32 cfg_hi; 158 153 }; 159 154 155 + 160 156 /** 161 157 * struct intel_mid_dma_chan - internal mid representation of a DMA channel 162 158 * @chan: dma_chan strcture represetation for mid chan ··· 172 166 * @slave: dma slave struture 173 167 * @descs_allocated: total number of decsiptors allocated 174 168 * @dma: dma device struture pointer 169 + * @busy: bool representing if ch is busy (active txn) or not 175 170 * @in_use: bool representing if ch is in use or not 171 + * @raw_tfr: raw trf interrupt recieved 172 + * @raw_block: raw block interrupt recieved 176 173 */ 177 174 struct intel_mid_dma_chan { 178 175 struct dma_chan chan; ··· 187 178 struct list_head active_list; 188 179 struct list_head queue; 189 180 struct list_head free_list; 190 - struct intel_mid_dma_slave *slave; 191 181 unsigned int descs_allocated; 192 182 struct middma_device *dma; 183 + bool busy; 193 184 bool in_use; 185 + u32 raw_tfr; 186 + u32 raw_block; 187 + struct intel_mid_dma_slave *mid_slave; 194 188 }; 195 189 196 190 static inline struct intel_mid_dma_chan *to_intel_mid_dma_chan( ··· 202 190 return container_of(chan, struct intel_mid_dma_chan, chan); 203 191 } 204 192 193 + enum intel_mid_dma_state { 194 + RUNNING = 0, 195 + SUSPENDED, 196 + }; 205 197 /** 206 198 * struct middma_device - internal representation of a DMA device 207 199 * @pdev: PCI device ··· 221 205 * @max_chan: max number of chs supported (from drv_data) 222 206 * @block_size: Block size of DMA transfer supported (from drv_data) 223 207 * @pimr_mask: MMIO register addr for periphral interrupt (from drv_data) 208 + * @state: dma PM device state 224 209 */ 225 210 struct middma_device { 226 211 struct pci_dev *pdev; ··· 237 220 int max_chan; 238 221 int block_size; 239 222 unsigned int pimr_mask; 223 + enum intel_mid_dma_state state; 240 224 }; 241 225 242 226 static inline struct middma_device *to_middma_device(struct dma_device *common) ··· 256 238 u32 cfg_lo; 257 239 u32 ctl_lo; 258 240 u32 ctl_hi; 241 + struct pci_pool *lli_pool; 242 + struct intel_mid_dma_lli *lli; 243 + dma_addr_t lli_phys; 244 + unsigned int lli_length; 245 + unsigned int current_lli; 259 246 dma_addr_t next; 260 247 enum dma_data_direction dirn; 261 248 enum dma_status status; 262 - enum intel_mid_dma_width width; /*width of DMA txn*/ 249 + enum dma_slave_buswidth width; /*width of DMA txn*/ 263 250 enum intel_mid_dma_mode cfg_mode; /*mode configuration*/ 264 251 265 252 }; 253 + 254 + struct intel_mid_dma_lli { 255 + dma_addr_t sar; 256 + dma_addr_t dar; 257 + dma_addr_t llp; 258 + u32 ctl_lo; 259 + u32 ctl_hi; 260 + } __attribute__ ((packed)); 266 261 267 262 static inline int test_ch_en(void __iomem *dma, u32 ch_no) 268 263 { ··· 288 257 { 289 258 return container_of(txd, struct intel_mid_dma_desc, txd); 290 259 } 260 + 261 + static inline struct intel_mid_dma_slave *to_intel_mid_dma_slave 262 + (struct dma_slave_config *slave) 263 + { 264 + return container_of(slave, struct intel_mid_dma_slave, dma_slave); 265 + } 266 + 267 + 268 + int dma_resume(struct pci_dev *pci); 269 + 291 270 #endif /*__INTEL_MID_DMAC_REGS_H__*/
+553 -460
drivers/dma/ste_dma40.c
··· 1 1 /* 2 - * driver/dma/ste_dma40.c 3 - * 4 - * Copyright (C) ST-Ericsson 2007-2010 2 + * Copyright (C) ST-Ericsson SA 2007-2010 3 + * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson 4 + * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson 5 5 * License terms: GNU General Public License (GPL) version 2 6 - * Author: Per Friden <per.friden@stericsson.com> 7 - * Author: Jonas Aaberg <jonas.aberg@stericsson.com> 8 - * 9 6 */ 10 7 11 8 #include <linux/kernel.h> ··· 11 14 #include <linux/platform_device.h> 12 15 #include <linux/clk.h> 13 16 #include <linux/delay.h> 17 + #include <linux/err.h> 14 18 15 19 #include <plat/ste_dma40.h> 16 20 ··· 30 32 31 33 /* Hardware requirement on LCLA alignment */ 32 34 #define LCLA_ALIGNMENT 0x40000 35 + 36 + /* Max number of links per event group */ 37 + #define D40_LCLA_LINK_PER_EVENT_GRP 128 38 + #define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP 39 + 33 40 /* Attempts before giving up to trying to get pages that are aligned */ 34 41 #define MAX_LCLA_ALLOC_ATTEMPTS 256 35 42 ··· 44 41 #define D40_ALLOC_LOG_FREE 0 45 42 46 43 /* Hardware designer of the block */ 47 - #define D40_PERIPHID2_DESIGNER 0x8 44 + #define D40_HW_DESIGNER 0x8 48 45 49 46 /** 50 47 * enum 40_command - The different commands and/or statuses. ··· 87 84 * @lli_log: Same as above but for logical channels. 88 85 * @lli_pool: The pool with two entries pre-allocated. 89 86 * @lli_len: Number of llis of current descriptor. 90 - * @lli_count: Number of transfered llis. 91 - * @lli_tx_len: Max number of LLIs per transfer, there can be 92 - * many transfer for one descriptor. 87 + * @lli_current: Number of transfered llis. 88 + * @lcla_alloc: Number of LCLA entries allocated. 93 89 * @txd: DMA engine struct. Used for among other things for communication 94 90 * during a transfer. 95 91 * @node: List entry. 96 - * @dir: The transfer direction of this job. 97 92 * @is_in_client_list: true if the client owns this descriptor. 93 + * @is_hw_linked: true if this job will automatically be continued for 94 + * the previous one. 98 95 * 99 96 * This descriptor is used for both logical and physical transfers. 100 97 */ 101 - 102 98 struct d40_desc { 103 99 /* LLI physical */ 104 100 struct d40_phy_lli_bidir lli_phy; ··· 106 104 107 105 struct d40_lli_pool lli_pool; 108 106 int lli_len; 109 - int lli_count; 110 - u32 lli_tx_len; 107 + int lli_current; 108 + int lcla_alloc; 111 109 112 110 struct dma_async_tx_descriptor txd; 113 111 struct list_head node; 114 112 115 - enum dma_data_direction dir; 116 113 bool is_in_client_list; 114 + bool is_hw_linked; 117 115 }; 118 116 119 117 /** ··· 125 123 * @pages: The number of pages needed for all physical channels. 126 124 * Only used later for clean-up on error 127 125 * @lock: Lock to protect the content in this struct. 128 - * @alloc_map: Bitmap mapping between physical channel and LCLA entries. 129 - * @num_blocks: The number of entries of alloc_map. Equals to the 130 - * number of physical channels. 126 + * @alloc_map: big map over which LCLA entry is own by which job. 131 127 */ 132 128 struct d40_lcla_pool { 133 129 void *base; 134 130 void *base_unaligned; 135 131 int pages; 136 132 spinlock_t lock; 137 - u32 *alloc_map; 138 - int num_blocks; 133 + struct d40_desc **alloc_map; 139 134 }; 140 135 141 136 /** ··· 145 146 * this physical channel. Can also be free or physically allocated. 146 147 * @allocated_dst: Same as for src but is dst. 147 148 * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as 148 - * event line number. Both allocated_src and allocated_dst can not be 149 - * allocated to a physical channel, since the interrupt handler has then 150 - * no way of figure out which one the interrupt belongs to. 149 + * event line number. 151 150 */ 152 151 struct d40_phy_res { 153 152 spinlock_t lock; ··· 175 178 * @active: Active descriptor. 176 179 * @queue: Queued jobs. 177 180 * @dma_cfg: The client configuration of this dma channel. 181 + * @configured: whether the dma_cfg configuration is valid 178 182 * @base: Pointer to the device instance struct. 179 183 * @src_def_cfg: Default cfg register setting for src. 180 184 * @dst_def_cfg: Default cfg register setting for dst. ··· 199 201 struct list_head active; 200 202 struct list_head queue; 201 203 struct stedma40_chan_cfg dma_cfg; 204 + bool configured; 202 205 struct d40_base *base; 203 206 /* Default register configurations */ 204 207 u32 src_def_cfg; 205 208 u32 dst_def_cfg; 206 209 struct d40_def_lcsp log_def; 207 - struct d40_lcla_elem lcla; 208 210 struct d40_log_lli_full *lcpa; 209 211 /* Runtime reconfiguration */ 210 212 dma_addr_t runtime_addr; ··· 232 234 * @dma_both: dma_device channels that can do both memcpy and slave transfers. 233 235 * @dma_slave: dma_device channels that can do only do slave transfers. 234 236 * @dma_memcpy: dma_device channels that can do only do memcpy transfers. 235 - * @phy_chans: Room for all possible physical channels in system. 236 237 * @log_chans: Room for all possible logical channels in system. 237 238 * @lookup_log_chans: Used to map interrupt number to logical channel. Points 238 239 * to log_chans entries. ··· 337 340 align); 338 341 d40d->lli_phy.dst = PTR_ALIGN(d40d->lli_phy.src + lli_len, 339 342 align); 340 - 341 - d40d->lli_phy.src_addr = virt_to_phys(d40d->lli_phy.src); 342 - d40d->lli_phy.dst_addr = virt_to_phys(d40d->lli_phy.dst); 343 343 } 344 344 345 345 return 0; ··· 351 357 d40d->lli_log.dst = NULL; 352 358 d40d->lli_phy.src = NULL; 353 359 d40d->lli_phy.dst = NULL; 354 - d40d->lli_phy.src_addr = 0; 355 - d40d->lli_phy.dst_addr = 0; 356 360 } 357 361 358 - static dma_cookie_t d40_assign_cookie(struct d40_chan *d40c, 359 - struct d40_desc *desc) 362 + static int d40_lcla_alloc_one(struct d40_chan *d40c, 363 + struct d40_desc *d40d) 360 364 { 361 - dma_cookie_t cookie = d40c->chan.cookie; 365 + unsigned long flags; 366 + int i; 367 + int ret = -EINVAL; 368 + int p; 362 369 363 - if (++cookie < 0) 364 - cookie = 1; 370 + spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags); 365 371 366 - d40c->chan.cookie = cookie; 367 - desc->txd.cookie = cookie; 372 + p = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP; 368 373 369 - return cookie; 374 + /* 375 + * Allocate both src and dst at the same time, therefore the half 376 + * start on 1 since 0 can't be used since zero is used as end marker. 377 + */ 378 + for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) { 379 + if (!d40c->base->lcla_pool.alloc_map[p + i]) { 380 + d40c->base->lcla_pool.alloc_map[p + i] = d40d; 381 + d40d->lcla_alloc++; 382 + ret = i; 383 + break; 384 + } 385 + } 386 + 387 + spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags); 388 + 389 + return ret; 390 + } 391 + 392 + static int d40_lcla_free_all(struct d40_chan *d40c, 393 + struct d40_desc *d40d) 394 + { 395 + unsigned long flags; 396 + int i; 397 + int ret = -EINVAL; 398 + 399 + if (d40c->log_num == D40_PHY_CHAN) 400 + return 0; 401 + 402 + spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags); 403 + 404 + for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) { 405 + if (d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num * 406 + D40_LCLA_LINK_PER_EVENT_GRP + i] == d40d) { 407 + d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num * 408 + D40_LCLA_LINK_PER_EVENT_GRP + i] = NULL; 409 + d40d->lcla_alloc--; 410 + if (d40d->lcla_alloc == 0) { 411 + ret = 0; 412 + break; 413 + } 414 + } 415 + } 416 + 417 + spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags); 418 + 419 + return ret; 420 + 370 421 } 371 422 372 423 static void d40_desc_remove(struct d40_desc *d40d) ··· 421 382 422 383 static struct d40_desc *d40_desc_get(struct d40_chan *d40c) 423 384 { 424 - struct d40_desc *d; 425 - struct d40_desc *_d; 385 + struct d40_desc *desc = NULL; 426 386 427 387 if (!list_empty(&d40c->client)) { 388 + struct d40_desc *d; 389 + struct d40_desc *_d; 390 + 428 391 list_for_each_entry_safe(d, _d, &d40c->client, node) 429 392 if (async_tx_test_ack(&d->txd)) { 430 393 d40_pool_lli_free(d); 431 394 d40_desc_remove(d); 395 + desc = d; 396 + memset(desc, 0, sizeof(*desc)); 432 397 break; 433 398 } 434 - } else { 435 - d = kmem_cache_alloc(d40c->base->desc_slab, GFP_NOWAIT); 436 - if (d != NULL) { 437 - memset(d, 0, sizeof(struct d40_desc)); 438 - INIT_LIST_HEAD(&d->node); 439 - } 440 399 } 441 - return d; 400 + 401 + if (!desc) 402 + desc = kmem_cache_zalloc(d40c->base->desc_slab, GFP_NOWAIT); 403 + 404 + if (desc) 405 + INIT_LIST_HEAD(&desc->node); 406 + 407 + return desc; 442 408 } 443 409 444 410 static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d) 445 411 { 412 + 413 + d40_lcla_free_all(d40c, d40d); 446 414 kmem_cache_free(d40c->base->desc_slab, d40d); 447 415 } 448 416 449 417 static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc) 450 418 { 451 419 list_add_tail(&desc->node, &d40c->active); 420 + } 421 + 422 + static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d) 423 + { 424 + int curr_lcla = -EINVAL, next_lcla; 425 + 426 + if (d40c->log_num == D40_PHY_CHAN) { 427 + d40_phy_lli_write(d40c->base->virtbase, 428 + d40c->phy_chan->num, 429 + d40d->lli_phy.dst, 430 + d40d->lli_phy.src); 431 + d40d->lli_current = d40d->lli_len; 432 + } else { 433 + 434 + if ((d40d->lli_len - d40d->lli_current) > 1) 435 + curr_lcla = d40_lcla_alloc_one(d40c, d40d); 436 + 437 + d40_log_lli_lcpa_write(d40c->lcpa, 438 + &d40d->lli_log.dst[d40d->lli_current], 439 + &d40d->lli_log.src[d40d->lli_current], 440 + curr_lcla); 441 + 442 + d40d->lli_current++; 443 + for (; d40d->lli_current < d40d->lli_len; d40d->lli_current++) { 444 + struct d40_log_lli *lcla; 445 + 446 + if (d40d->lli_current + 1 < d40d->lli_len) 447 + next_lcla = d40_lcla_alloc_one(d40c, d40d); 448 + else 449 + next_lcla = -EINVAL; 450 + 451 + lcla = d40c->base->lcla_pool.base + 452 + d40c->phy_chan->num * 1024 + 453 + 8 * curr_lcla * 2; 454 + 455 + d40_log_lli_lcla_write(lcla, 456 + &d40d->lli_log.dst[d40d->lli_current], 457 + &d40d->lli_log.src[d40d->lli_current], 458 + next_lcla); 459 + 460 + (void) dma_map_single(d40c->base->dev, lcla, 461 + 2 * sizeof(struct d40_log_lli), 462 + DMA_TO_DEVICE); 463 + 464 + curr_lcla = next_lcla; 465 + 466 + if (curr_lcla == -EINVAL) { 467 + d40d->lli_current++; 468 + break; 469 + } 470 + 471 + } 472 + } 452 473 } 453 474 454 475 static struct d40_desc *d40_first_active_get(struct d40_chan *d40c) ··· 542 443 return d; 543 444 } 544 445 545 - /* Support functions for logical channels */ 546 - 547 - static int d40_lcla_id_get(struct d40_chan *d40c) 446 + static struct d40_desc *d40_last_queued(struct d40_chan *d40c) 548 447 { 549 - int src_id = 0; 550 - int dst_id = 0; 551 - struct d40_log_lli *lcla_lidx_base = 552 - d40c->base->lcla_pool.base + d40c->phy_chan->num * 1024; 553 - int i; 554 - int lli_per_log = d40c->base->plat_data->llis_per_log; 555 - unsigned long flags; 448 + struct d40_desc *d; 556 449 557 - if (d40c->lcla.src_id >= 0 && d40c->lcla.dst_id >= 0) 558 - return 0; 559 - 560 - if (d40c->base->lcla_pool.num_blocks > 32) 561 - return -EINVAL; 562 - 563 - spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags); 564 - 565 - for (i = 0; i < d40c->base->lcla_pool.num_blocks; i++) { 566 - if (!(d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] & 567 - (0x1 << i))) { 568 - d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] |= 569 - (0x1 << i); 450 + if (list_empty(&d40c->queue)) 451 + return NULL; 452 + list_for_each_entry(d, &d40c->queue, node) 453 + if (list_is_last(&d->node, &d40c->queue)) 570 454 break; 571 - } 572 - } 573 - src_id = i; 574 - if (src_id >= d40c->base->lcla_pool.num_blocks) 575 - goto err; 576 - 577 - for (; i < d40c->base->lcla_pool.num_blocks; i++) { 578 - if (!(d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] & 579 - (0x1 << i))) { 580 - d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] |= 581 - (0x1 << i); 582 - break; 583 - } 584 - } 585 - 586 - dst_id = i; 587 - if (dst_id == src_id) 588 - goto err; 589 - 590 - d40c->lcla.src_id = src_id; 591 - d40c->lcla.dst_id = dst_id; 592 - d40c->lcla.dst = lcla_lidx_base + dst_id * lli_per_log + 1; 593 - d40c->lcla.src = lcla_lidx_base + src_id * lli_per_log + 1; 594 - 595 - spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags); 596 - return 0; 597 - err: 598 - spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags); 599 - return -EINVAL; 455 + return d; 600 456 } 457 + 458 + /* Support functions for logical channels */ 601 459 602 460 603 461 static int d40_channel_execute_command(struct d40_chan *d40c, 604 462 enum d40_command command) 605 463 { 606 - int status, i; 464 + u32 status; 465 + int i; 607 466 void __iomem *active_reg; 608 467 int ret = 0; 609 468 unsigned long flags; ··· 624 567 static void d40_term_all(struct d40_chan *d40c) 625 568 { 626 569 struct d40_desc *d40d; 627 - unsigned long flags; 628 570 629 571 /* Release active descriptors */ 630 572 while ((d40d = d40_first_active_get(d40c))) { 631 573 d40_desc_remove(d40d); 632 - 633 - /* Return desc to free-list */ 634 574 d40_desc_free(d40c, d40d); 635 575 } 636 576 637 577 /* Release queued descriptors waiting for transfer */ 638 578 while ((d40d = d40_first_queued(d40c))) { 639 579 d40_desc_remove(d40d); 640 - 641 - /* Return desc to free-list */ 642 580 d40_desc_free(d40c, d40d); 643 581 } 644 582 645 - spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags); 646 - 647 - d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &= 648 - (~(0x1 << d40c->lcla.dst_id)); 649 - d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &= 650 - (~(0x1 << d40c->lcla.src_id)); 651 - 652 - d40c->lcla.src_id = -1; 653 - d40c->lcla.dst_id = -1; 654 - 655 - spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags); 656 583 657 584 d40c->pending_tx = 0; 658 585 d40c->busy = false; ··· 681 640 682 641 static u32 d40_chan_has_events(struct d40_chan *d40c) 683 642 { 684 - u32 val = 0; 643 + u32 val; 685 644 686 - /* If SSLNK or SDLNK is zero all events are disabled */ 687 - if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) || 688 - (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) 689 - val = readl(d40c->base->virtbase + D40_DREG_PCBASE + 690 - d40c->phy_chan->num * D40_DREG_PCDELTA + 691 - D40_CHAN_REG_SSLNK); 645 + val = readl(d40c->base->virtbase + D40_DREG_PCBASE + 646 + d40c->phy_chan->num * D40_DREG_PCDELTA + 647 + D40_CHAN_REG_SSLNK); 692 648 693 - if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) 694 - val = readl(d40c->base->virtbase + D40_DREG_PCBASE + 695 - d40c->phy_chan->num * D40_DREG_PCDELTA + 696 - D40_CHAN_REG_SDLNK); 649 + val |= readl(d40c->base->virtbase + D40_DREG_PCBASE + 650 + d40c->phy_chan->num * D40_DREG_PCDELTA + 651 + D40_CHAN_REG_SDLNK); 697 652 return val; 698 653 } 699 654 700 - static void d40_config_enable_lidx(struct d40_chan *d40c) 655 + static u32 d40_get_prmo(struct d40_chan *d40c) 701 656 { 702 - /* Set LIDX for lcla */ 703 - writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) & 704 - D40_SREG_ELEM_LOG_LIDX_MASK, 705 - d40c->base->virtbase + D40_DREG_PCBASE + 706 - d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SDELT); 657 + static const unsigned int phy_map[] = { 658 + [STEDMA40_PCHAN_BASIC_MODE] 659 + = D40_DREG_PRMO_PCHAN_BASIC, 660 + [STEDMA40_PCHAN_MODULO_MODE] 661 + = D40_DREG_PRMO_PCHAN_MODULO, 662 + [STEDMA40_PCHAN_DOUBLE_DST_MODE] 663 + = D40_DREG_PRMO_PCHAN_DOUBLE_DST, 664 + }; 665 + static const unsigned int log_map[] = { 666 + [STEDMA40_LCHAN_SRC_PHY_DST_LOG] 667 + = D40_DREG_PRMO_LCHAN_SRC_PHY_DST_LOG, 668 + [STEDMA40_LCHAN_SRC_LOG_DST_PHY] 669 + = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_PHY, 670 + [STEDMA40_LCHAN_SRC_LOG_DST_LOG] 671 + = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG, 672 + }; 707 673 708 - writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) & 709 - D40_SREG_ELEM_LOG_LIDX_MASK, 710 - d40c->base->virtbase + D40_DREG_PCBASE + 711 - d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SSELT); 674 + if (d40c->log_num == D40_PHY_CHAN) 675 + return phy_map[d40c->dma_cfg.mode_opt]; 676 + else 677 + return log_map[d40c->dma_cfg.mode_opt]; 712 678 } 713 679 714 - static int d40_config_write(struct d40_chan *d40c) 680 + static void d40_config_write(struct d40_chan *d40c) 715 681 { 716 682 u32 addr_base; 717 683 u32 var; 718 - int res; 719 - 720 - res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); 721 - if (res) 722 - return res; 723 684 724 685 /* Odd addresses are even addresses + 4 */ 725 686 addr_base = (d40c->phy_chan->num % 2) * 4; ··· 731 688 writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base); 732 689 733 690 /* Setup operational mode option register */ 734 - var = ((d40c->dma_cfg.channel_type >> STEDMA40_INFO_CH_MODE_OPT_POS) & 735 - 0x3) << D40_CHAN_POS(d40c->phy_chan->num); 691 + var = d40_get_prmo(d40c) << D40_CHAN_POS(d40c->phy_chan->num); 736 692 737 693 writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base); 738 694 ··· 746 704 d40c->phy_chan->num * D40_DREG_PCDELTA + 747 705 D40_CHAN_REG_SDCFG); 748 706 749 - d40_config_enable_lidx(d40c); 707 + /* Set LIDX for lcla */ 708 + writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) & 709 + D40_SREG_ELEM_LOG_LIDX_MASK, 710 + d40c->base->virtbase + D40_DREG_PCBASE + 711 + d40c->phy_chan->num * D40_DREG_PCDELTA + 712 + D40_CHAN_REG_SDELT); 713 + 714 + writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) & 715 + D40_SREG_ELEM_LOG_LIDX_MASK, 716 + d40c->base->virtbase + D40_DREG_PCBASE + 717 + d40c->phy_chan->num * D40_DREG_PCDELTA + 718 + D40_CHAN_REG_SSELT); 719 + 750 720 } 721 + } 722 + 723 + static u32 d40_residue(struct d40_chan *d40c) 724 + { 725 + u32 num_elt; 726 + 727 + if (d40c->log_num != D40_PHY_CHAN) 728 + num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK) 729 + >> D40_MEM_LCSP2_ECNT_POS; 730 + else 731 + num_elt = (readl(d40c->base->virtbase + D40_DREG_PCBASE + 732 + d40c->phy_chan->num * D40_DREG_PCDELTA + 733 + D40_CHAN_REG_SDELT) & 734 + D40_SREG_ELEM_PHY_ECNT_MASK) >> 735 + D40_SREG_ELEM_PHY_ECNT_POS; 736 + return num_elt * (1 << d40c->dma_cfg.dst_info.data_width); 737 + } 738 + 739 + static bool d40_tx_is_linked(struct d40_chan *d40c) 740 + { 741 + bool is_link; 742 + 743 + if (d40c->log_num != D40_PHY_CHAN) 744 + is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK; 745 + else 746 + is_link = readl(d40c->base->virtbase + D40_DREG_PCBASE + 747 + d40c->phy_chan->num * D40_DREG_PCDELTA + 748 + D40_CHAN_REG_SDLNK) & 749 + D40_SREG_LNK_PHYS_LNK_MASK; 750 + return is_link; 751 + } 752 + 753 + static int d40_pause(struct dma_chan *chan) 754 + { 755 + struct d40_chan *d40c = 756 + container_of(chan, struct d40_chan, chan); 757 + int res = 0; 758 + unsigned long flags; 759 + 760 + if (!d40c->busy) 761 + return 0; 762 + 763 + spin_lock_irqsave(&d40c->lock, flags); 764 + 765 + res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); 766 + if (res == 0) { 767 + if (d40c->log_num != D40_PHY_CHAN) { 768 + d40_config_set_event(d40c, false); 769 + /* Resume the other logical channels if any */ 770 + if (d40_chan_has_events(d40c)) 771 + res = d40_channel_execute_command(d40c, 772 + D40_DMA_RUN); 773 + } 774 + } 775 + 776 + spin_unlock_irqrestore(&d40c->lock, flags); 751 777 return res; 752 778 } 753 779 754 - static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d) 780 + static int d40_resume(struct dma_chan *chan) 755 781 { 756 - if (d40d->lli_phy.dst && d40d->lli_phy.src) { 757 - d40_phy_lli_write(d40c->base->virtbase, 758 - d40c->phy_chan->num, 759 - d40d->lli_phy.dst, 760 - d40d->lli_phy.src); 761 - } else if (d40d->lli_log.dst && d40d->lli_log.src) { 762 - struct d40_log_lli *src = d40d->lli_log.src; 763 - struct d40_log_lli *dst = d40d->lli_log.dst; 764 - int s; 782 + struct d40_chan *d40c = 783 + container_of(chan, struct d40_chan, chan); 784 + int res = 0; 785 + unsigned long flags; 765 786 766 - src += d40d->lli_count; 767 - dst += d40d->lli_count; 768 - s = d40_log_lli_write(d40c->lcpa, 769 - d40c->lcla.src, d40c->lcla.dst, 770 - dst, src, 771 - d40c->base->plat_data->llis_per_log); 787 + if (!d40c->busy) 788 + return 0; 772 789 773 - /* If s equals to zero, the job is not linked */ 774 - if (s > 0) { 775 - (void) dma_map_single(d40c->base->dev, d40c->lcla.src, 776 - s * sizeof(struct d40_log_lli), 777 - DMA_TO_DEVICE); 778 - (void) dma_map_single(d40c->base->dev, d40c->lcla.dst, 779 - s * sizeof(struct d40_log_lli), 780 - DMA_TO_DEVICE); 790 + spin_lock_irqsave(&d40c->lock, flags); 791 + 792 + if (d40c->base->rev == 0) 793 + if (d40c->log_num != D40_PHY_CHAN) { 794 + res = d40_channel_execute_command(d40c, 795 + D40_DMA_SUSPEND_REQ); 796 + goto no_suspend; 781 797 } 798 + 799 + /* If bytes left to transfer or linked tx resume job */ 800 + if (d40_residue(d40c) || d40_tx_is_linked(d40c)) { 801 + 802 + if (d40c->log_num != D40_PHY_CHAN) 803 + d40_config_set_event(d40c, true); 804 + 805 + res = d40_channel_execute_command(d40c, D40_DMA_RUN); 782 806 } 783 - d40d->lli_count += d40d->lli_tx_len; 807 + 808 + no_suspend: 809 + spin_unlock_irqrestore(&d40c->lock, flags); 810 + return res; 811 + } 812 + 813 + static void d40_tx_submit_log(struct d40_chan *d40c, struct d40_desc *d40d) 814 + { 815 + /* TODO: Write */ 816 + } 817 + 818 + static void d40_tx_submit_phy(struct d40_chan *d40c, struct d40_desc *d40d) 819 + { 820 + struct d40_desc *d40d_prev = NULL; 821 + int i; 822 + u32 val; 823 + 824 + if (!list_empty(&d40c->queue)) 825 + d40d_prev = d40_last_queued(d40c); 826 + else if (!list_empty(&d40c->active)) 827 + d40d_prev = d40_first_active_get(d40c); 828 + 829 + if (!d40d_prev) 830 + return; 831 + 832 + /* Here we try to join this job with previous jobs */ 833 + val = readl(d40c->base->virtbase + D40_DREG_PCBASE + 834 + d40c->phy_chan->num * D40_DREG_PCDELTA + 835 + D40_CHAN_REG_SSLNK); 836 + 837 + /* Figure out which link we're currently transmitting */ 838 + for (i = 0; i < d40d_prev->lli_len; i++) 839 + if (val == d40d_prev->lli_phy.src[i].reg_lnk) 840 + break; 841 + 842 + val = readl(d40c->base->virtbase + D40_DREG_PCBASE + 843 + d40c->phy_chan->num * D40_DREG_PCDELTA + 844 + D40_CHAN_REG_SSELT) >> D40_SREG_ELEM_LOG_ECNT_POS; 845 + 846 + if (i == (d40d_prev->lli_len - 1) && val > 0) { 847 + /* Change the current one */ 848 + writel(virt_to_phys(d40d->lli_phy.src), 849 + d40c->base->virtbase + D40_DREG_PCBASE + 850 + d40c->phy_chan->num * D40_DREG_PCDELTA + 851 + D40_CHAN_REG_SSLNK); 852 + writel(virt_to_phys(d40d->lli_phy.dst), 853 + d40c->base->virtbase + D40_DREG_PCBASE + 854 + d40c->phy_chan->num * D40_DREG_PCDELTA + 855 + D40_CHAN_REG_SDLNK); 856 + 857 + d40d->is_hw_linked = true; 858 + 859 + } else if (i < d40d_prev->lli_len) { 860 + (void) dma_unmap_single(d40c->base->dev, 861 + virt_to_phys(d40d_prev->lli_phy.src), 862 + d40d_prev->lli_pool.size, 863 + DMA_TO_DEVICE); 864 + 865 + /* Keep the settings */ 866 + val = d40d_prev->lli_phy.src[d40d_prev->lli_len - 1].reg_lnk & 867 + ~D40_SREG_LNK_PHYS_LNK_MASK; 868 + d40d_prev->lli_phy.src[d40d_prev->lli_len - 1].reg_lnk = 869 + val | virt_to_phys(d40d->lli_phy.src); 870 + 871 + val = d40d_prev->lli_phy.dst[d40d_prev->lli_len - 1].reg_lnk & 872 + ~D40_SREG_LNK_PHYS_LNK_MASK; 873 + d40d_prev->lli_phy.dst[d40d_prev->lli_len - 1].reg_lnk = 874 + val | virt_to_phys(d40d->lli_phy.dst); 875 + 876 + (void) dma_map_single(d40c->base->dev, 877 + d40d_prev->lli_phy.src, 878 + d40d_prev->lli_pool.size, 879 + DMA_TO_DEVICE); 880 + d40d->is_hw_linked = true; 881 + } 784 882 } 785 883 786 884 static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) ··· 931 749 struct d40_desc *d40d = container_of(tx, struct d40_desc, txd); 932 750 unsigned long flags; 933 751 752 + (void) d40_pause(&d40c->chan); 753 + 934 754 spin_lock_irqsave(&d40c->lock, flags); 935 755 936 - tx->cookie = d40_assign_cookie(d40c, d40d); 756 + d40c->chan.cookie++; 757 + 758 + if (d40c->chan.cookie < 0) 759 + d40c->chan.cookie = 1; 760 + 761 + d40d->txd.cookie = d40c->chan.cookie; 762 + 763 + if (d40c->log_num == D40_PHY_CHAN) 764 + d40_tx_submit_phy(d40c, d40d); 765 + else 766 + d40_tx_submit_log(d40c, d40d); 937 767 938 768 d40_desc_queue(d40c, d40d); 939 769 940 770 spin_unlock_irqrestore(&d40c->lock, flags); 771 + 772 + (void) d40_resume(&d40c->chan); 941 773 942 774 return tx->cookie; 943 775 } ··· 992 796 /* Add to active queue */ 993 797 d40_desc_submit(d40c, d40d); 994 798 995 - /* Initiate DMA job */ 996 - d40_desc_load(d40c, d40d); 799 + /* 800 + * If this job is already linked in hw, 801 + * do not submit it. 802 + */ 997 803 998 - /* Start dma job */ 999 - err = d40_start(d40c); 804 + if (!d40d->is_hw_linked) { 805 + /* Initiate DMA job */ 806 + d40_desc_load(d40c, d40d); 1000 807 1001 - if (err) 1002 - return NULL; 808 + /* Start dma job */ 809 + err = d40_start(d40c); 810 + 811 + if (err) 812 + return NULL; 813 + } 1003 814 } 1004 815 1005 816 return d40d; ··· 1017 814 { 1018 815 struct d40_desc *d40d; 1019 816 1020 - if (!d40c->phy_chan) 1021 - return; 1022 - 1023 817 /* Get first active entry from list */ 1024 818 d40d = d40_first_active_get(d40c); 1025 819 1026 820 if (d40d == NULL) 1027 821 return; 1028 822 1029 - if (d40d->lli_count < d40d->lli_len) { 823 + d40_lcla_free_all(d40c, d40d); 1030 824 825 + if (d40d->lli_current < d40d->lli_len) { 1031 826 d40_desc_load(d40c, d40d); 1032 827 /* Start dma job */ 1033 828 (void) d40_start(d40c); ··· 1043 842 static void dma_tasklet(unsigned long data) 1044 843 { 1045 844 struct d40_chan *d40c = (struct d40_chan *) data; 1046 - struct d40_desc *d40d_fin; 845 + struct d40_desc *d40d; 1047 846 unsigned long flags; 1048 847 dma_async_tx_callback callback; 1049 848 void *callback_param; ··· 1051 850 spin_lock_irqsave(&d40c->lock, flags); 1052 851 1053 852 /* Get first active entry from list */ 1054 - d40d_fin = d40_first_active_get(d40c); 853 + d40d = d40_first_active_get(d40c); 1055 854 1056 - if (d40d_fin == NULL) 855 + if (d40d == NULL) 1057 856 goto err; 1058 857 1059 - d40c->completed = d40d_fin->txd.cookie; 858 + d40c->completed = d40d->txd.cookie; 1060 859 1061 860 /* 1062 861 * If terminating a channel pending_tx is set to zero. ··· 1068 867 } 1069 868 1070 869 /* Callback to client */ 1071 - callback = d40d_fin->txd.callback; 1072 - callback_param = d40d_fin->txd.callback_param; 870 + callback = d40d->txd.callback; 871 + callback_param = d40d->txd.callback_param; 1073 872 1074 - if (async_tx_test_ack(&d40d_fin->txd)) { 1075 - d40_pool_lli_free(d40d_fin); 1076 - d40_desc_remove(d40d_fin); 1077 - /* Return desc to free-list */ 1078 - d40_desc_free(d40c, d40d_fin); 873 + if (async_tx_test_ack(&d40d->txd)) { 874 + d40_pool_lli_free(d40d); 875 + d40_desc_remove(d40d); 876 + d40_desc_free(d40c, d40d); 1079 877 } else { 1080 - if (!d40d_fin->is_in_client_list) { 1081 - d40_desc_remove(d40d_fin); 1082 - list_add_tail(&d40d_fin->node, &d40c->client); 1083 - d40d_fin->is_in_client_list = true; 878 + if (!d40d->is_in_client_list) { 879 + d40_desc_remove(d40d); 880 + d40_lcla_free_all(d40c, d40d); 881 + list_add_tail(&d40d->node, &d40c->client); 882 + d40d->is_in_client_list = true; 1084 883 } 1085 884 } 1086 885 ··· 1091 890 1092 891 spin_unlock_irqrestore(&d40c->lock, flags); 1093 892 1094 - if (callback) 893 + if (callback && (d40d->txd.flags & DMA_PREP_INTERRUPT)) 1095 894 callback(callback_param); 1096 895 1097 896 return; ··· 1120 919 1121 920 int i; 1122 921 u32 regs[ARRAY_SIZE(il)]; 1123 - u32 tmp; 1124 922 u32 idx; 1125 923 u32 row; 1126 924 long chan = -1; ··· 1146 946 idx = chan & (BITS_PER_LONG - 1); 1147 947 1148 948 /* ACK interrupt */ 1149 - tmp = readl(base->virtbase + il[row].clr); 1150 - tmp |= 1 << idx; 1151 - writel(tmp, base->virtbase + il[row].clr); 949 + writel(1 << idx, base->virtbase + il[row].clr); 1152 950 1153 951 if (il[row].offset == D40_PHY_CHAN) 1154 952 d40c = base->lookup_phy_chans[idx]; ··· 1169 971 return IRQ_HANDLED; 1170 972 } 1171 973 1172 - 1173 974 static int d40_validate_conf(struct d40_chan *d40c, 1174 975 struct stedma40_chan_cfg *conf) 1175 976 { 1176 977 int res = 0; 1177 978 u32 dst_event_group = D40_TYPE_TO_GROUP(conf->dst_dev_type); 1178 979 u32 src_event_group = D40_TYPE_TO_GROUP(conf->src_dev_type); 1179 - bool is_log = (conf->channel_type & STEDMA40_CHANNEL_IN_OPER_MODE) 1180 - == STEDMA40_CHANNEL_IN_LOG_MODE; 980 + bool is_log = conf->mode == STEDMA40_MODE_LOGICAL; 1181 981 1182 - if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH && 982 + if (!conf->dir) { 983 + dev_err(&d40c->chan.dev->device, "[%s] Invalid direction.\n", 984 + __func__); 985 + res = -EINVAL; 986 + } 987 + 988 + if (conf->dst_dev_type != STEDMA40_DEV_DST_MEMORY && 989 + d40c->base->plat_data->dev_tx[conf->dst_dev_type] == 0 && 990 + d40c->runtime_addr == 0) { 991 + 992 + dev_err(&d40c->chan.dev->device, 993 + "[%s] Invalid TX channel address (%d)\n", 994 + __func__, conf->dst_dev_type); 995 + res = -EINVAL; 996 + } 997 + 998 + if (conf->src_dev_type != STEDMA40_DEV_SRC_MEMORY && 999 + d40c->base->plat_data->dev_rx[conf->src_dev_type] == 0 && 1000 + d40c->runtime_addr == 0) { 1001 + dev_err(&d40c->chan.dev->device, 1002 + "[%s] Invalid RX channel address (%d)\n", 1003 + __func__, conf->src_dev_type); 1004 + res = -EINVAL; 1005 + } 1006 + 1007 + if (conf->dir == STEDMA40_MEM_TO_PERIPH && 1183 1008 dst_event_group == STEDMA40_DEV_DST_MEMORY) { 1184 1009 dev_err(&d40c->chan.dev->device, "[%s] Invalid dst\n", 1185 1010 __func__); 1186 1011 res = -EINVAL; 1187 1012 } 1188 1013 1189 - if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM && 1014 + if (conf->dir == STEDMA40_PERIPH_TO_MEM && 1190 1015 src_event_group == STEDMA40_DEV_SRC_MEMORY) { 1191 1016 dev_err(&d40c->chan.dev->device, "[%s] Invalid src\n", 1192 1017 __func__); ··· 1303 1082 1304 1083 spin_lock_irqsave(&phy->lock, flags); 1305 1084 if (!log_event_line) { 1306 - /* Physical interrupts are masked per physical full channel */ 1307 1085 phy->allocated_dst = D40_ALLOC_FREE; 1308 1086 phy->allocated_src = D40_ALLOC_FREE; 1309 1087 is_free = true; ··· 1339 1119 int j; 1340 1120 int log_num; 1341 1121 bool is_src; 1342 - bool is_log = (d40c->dma_cfg.channel_type & 1343 - STEDMA40_CHANNEL_IN_OPER_MODE) 1344 - == STEDMA40_CHANNEL_IN_LOG_MODE; 1345 - 1122 + bool is_log = d40c->dma_cfg.mode == STEDMA40_MODE_LOGICAL; 1346 1123 1347 1124 phys = d40c->base->phy_res; 1348 1125 ··· 1468 1251 list_for_each_entry_safe(d, _d, &d40c->client, node) { 1469 1252 d40_pool_lli_free(d); 1470 1253 d40_desc_remove(d); 1471 - /* Return desc to free-list */ 1472 1254 d40_desc_free(d40c, d); 1473 1255 } 1474 1256 ··· 1540 1324 return res; 1541 1325 } 1542 1326 d40c->phy_chan = NULL; 1543 - /* Invalidate channel type */ 1544 - d40c->dma_cfg.channel_type = 0; 1327 + d40c->configured = false; 1545 1328 d40c->base->lookup_phy_chans[phy->num] = NULL; 1546 1329 1547 1330 return 0; 1548 - } 1549 - 1550 - static int d40_pause(struct dma_chan *chan) 1551 - { 1552 - struct d40_chan *d40c = 1553 - container_of(chan, struct d40_chan, chan); 1554 - int res; 1555 - unsigned long flags; 1556 - 1557 - spin_lock_irqsave(&d40c->lock, flags); 1558 - 1559 - res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); 1560 - if (res == 0) { 1561 - if (d40c->log_num != D40_PHY_CHAN) { 1562 - d40_config_set_event(d40c, false); 1563 - /* Resume the other logical channels if any */ 1564 - if (d40_chan_has_events(d40c)) 1565 - res = d40_channel_execute_command(d40c, 1566 - D40_DMA_RUN); 1567 - } 1568 - } 1569 - 1570 - spin_unlock_irqrestore(&d40c->lock, flags); 1571 - return res; 1572 1331 } 1573 1332 1574 1333 static bool d40_is_paused(struct d40_chan *d40c) ··· 1572 1381 } 1573 1382 1574 1383 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || 1575 - d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) 1384 + d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { 1576 1385 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); 1577 - else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) 1386 + status = readl(d40c->base->virtbase + D40_DREG_PCBASE + 1387 + d40c->phy_chan->num * D40_DREG_PCDELTA + 1388 + D40_CHAN_REG_SDLNK); 1389 + } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) { 1578 1390 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); 1579 - else { 1391 + status = readl(d40c->base->virtbase + D40_DREG_PCBASE + 1392 + d40c->phy_chan->num * D40_DREG_PCDELTA + 1393 + D40_CHAN_REG_SSLNK); 1394 + } else { 1580 1395 dev_err(&d40c->chan.dev->device, 1581 1396 "[%s] Unknown direction\n", __func__); 1582 1397 goto _exit; 1583 1398 } 1584 - status = d40_chan_has_events(d40c); 1399 + 1585 1400 status = (status & D40_EVENTLINE_MASK(event)) >> 1586 1401 D40_EVENTLINE_POS(event); 1587 1402 ··· 1599 1402 1600 1403 } 1601 1404 1602 - 1603 - static bool d40_tx_is_linked(struct d40_chan *d40c) 1604 - { 1605 - bool is_link; 1606 - 1607 - if (d40c->log_num != D40_PHY_CHAN) 1608 - is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK; 1609 - else 1610 - is_link = readl(d40c->base->virtbase + D40_DREG_PCBASE + 1611 - d40c->phy_chan->num * D40_DREG_PCDELTA + 1612 - D40_CHAN_REG_SDLNK) & 1613 - D40_SREG_LNK_PHYS_LNK_MASK; 1614 - return is_link; 1615 - } 1616 - 1617 - static u32 d40_residue(struct d40_chan *d40c) 1618 - { 1619 - u32 num_elt; 1620 - 1621 - if (d40c->log_num != D40_PHY_CHAN) 1622 - num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK) 1623 - >> D40_MEM_LCSP2_ECNT_POS; 1624 - else 1625 - num_elt = (readl(d40c->base->virtbase + D40_DREG_PCBASE + 1626 - d40c->phy_chan->num * D40_DREG_PCDELTA + 1627 - D40_CHAN_REG_SDELT) & 1628 - D40_SREG_ELEM_PHY_ECNT_MASK) >> 1629 - D40_SREG_ELEM_PHY_ECNT_POS; 1630 - return num_elt * (1 << d40c->dma_cfg.dst_info.data_width); 1631 - } 1632 - 1633 - static int d40_resume(struct dma_chan *chan) 1634 - { 1635 - struct d40_chan *d40c = 1636 - container_of(chan, struct d40_chan, chan); 1637 - int res = 0; 1638 - unsigned long flags; 1639 - 1640 - spin_lock_irqsave(&d40c->lock, flags); 1641 - 1642 - if (d40c->base->rev == 0) 1643 - if (d40c->log_num != D40_PHY_CHAN) { 1644 - res = d40_channel_execute_command(d40c, 1645 - D40_DMA_SUSPEND_REQ); 1646 - goto no_suspend; 1647 - } 1648 - 1649 - /* If bytes left to transfer or linked tx resume job */ 1650 - if (d40_residue(d40c) || d40_tx_is_linked(d40c)) { 1651 - if (d40c->log_num != D40_PHY_CHAN) 1652 - d40_config_set_event(d40c, true); 1653 - res = d40_channel_execute_command(d40c, D40_DMA_RUN); 1654 - } 1655 - 1656 - no_suspend: 1657 - spin_unlock_irqrestore(&d40c->lock, flags); 1658 - return res; 1659 - } 1660 1405 1661 1406 static u32 stedma40_residue(struct dma_chan *chan) 1662 1407 { ··· 1613 1474 1614 1475 return bytes_left; 1615 1476 } 1616 - 1617 - /* Public DMA functions in addition to the DMA engine framework */ 1618 - 1619 - int stedma40_set_psize(struct dma_chan *chan, 1620 - int src_psize, 1621 - int dst_psize) 1622 - { 1623 - struct d40_chan *d40c = 1624 - container_of(chan, struct d40_chan, chan); 1625 - unsigned long flags; 1626 - 1627 - spin_lock_irqsave(&d40c->lock, flags); 1628 - 1629 - if (d40c->log_num != D40_PHY_CHAN) { 1630 - d40c->log_def.lcsp1 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK; 1631 - d40c->log_def.lcsp3 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK; 1632 - d40c->log_def.lcsp1 |= src_psize << 1633 - D40_MEM_LCSP1_SCFG_PSIZE_POS; 1634 - d40c->log_def.lcsp3 |= dst_psize << 1635 - D40_MEM_LCSP1_SCFG_PSIZE_POS; 1636 - goto out; 1637 - } 1638 - 1639 - if (src_psize == STEDMA40_PSIZE_PHY_1) 1640 - d40c->src_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS); 1641 - else { 1642 - d40c->src_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS; 1643 - d40c->src_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 << 1644 - D40_SREG_CFG_PSIZE_POS); 1645 - d40c->src_def_cfg |= src_psize << D40_SREG_CFG_PSIZE_POS; 1646 - } 1647 - 1648 - if (dst_psize == STEDMA40_PSIZE_PHY_1) 1649 - d40c->dst_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS); 1650 - else { 1651 - d40c->dst_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS; 1652 - d40c->dst_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 << 1653 - D40_SREG_CFG_PSIZE_POS); 1654 - d40c->dst_def_cfg |= dst_psize << D40_SREG_CFG_PSIZE_POS; 1655 - } 1656 - out: 1657 - spin_unlock_irqrestore(&d40c->lock, flags); 1658 - return 0; 1659 - } 1660 - EXPORT_SYMBOL(stedma40_set_psize); 1661 1477 1662 1478 struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, 1663 1479 struct scatterlist *sgl_dst, ··· 1639 1545 goto err; 1640 1546 1641 1547 d40d->lli_len = sgl_len; 1642 - d40d->lli_tx_len = d40d->lli_len; 1548 + d40d->lli_current = 0; 1643 1549 d40d->txd.flags = dma_flags; 1644 1550 1645 1551 if (d40c->log_num != D40_PHY_CHAN) { 1646 - if (d40d->lli_len > d40c->base->plat_data->llis_per_log) 1647 - d40d->lli_tx_len = d40c->base->plat_data->llis_per_log; 1648 - 1649 - if (sgl_len > 1) 1650 - /* 1651 - * Check if there is space available in lcla. If not, 1652 - * split list into 1-length and run only in lcpa 1653 - * space. 1654 - */ 1655 - if (d40_lcla_id_get(d40c) != 0) 1656 - d40d->lli_tx_len = 1; 1657 1552 1658 1553 if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) { 1659 1554 dev_err(&d40c->chan.dev->device, ··· 1650 1567 goto err; 1651 1568 } 1652 1569 1653 - (void) d40_log_sg_to_lli(d40c->lcla.src_id, 1654 - sgl_src, 1570 + (void) d40_log_sg_to_lli(sgl_src, 1655 1571 sgl_len, 1656 1572 d40d->lli_log.src, 1657 1573 d40c->log_def.lcsp1, 1658 - d40c->dma_cfg.src_info.data_width, 1659 - dma_flags & DMA_PREP_INTERRUPT, 1660 - d40d->lli_tx_len, 1661 - d40c->base->plat_data->llis_per_log); 1574 + d40c->dma_cfg.src_info.data_width); 1662 1575 1663 - (void) d40_log_sg_to_lli(d40c->lcla.dst_id, 1664 - sgl_dst, 1576 + (void) d40_log_sg_to_lli(sgl_dst, 1665 1577 sgl_len, 1666 1578 d40d->lli_log.dst, 1667 1579 d40c->log_def.lcsp3, 1668 - d40c->dma_cfg.dst_info.data_width, 1669 - dma_flags & DMA_PREP_INTERRUPT, 1670 - d40d->lli_tx_len, 1671 - d40c->base->plat_data->llis_per_log); 1672 - 1673 - 1580 + d40c->dma_cfg.dst_info.data_width); 1674 1581 } else { 1675 1582 if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) { 1676 1583 dev_err(&d40c->chan.dev->device, ··· 1672 1599 sgl_len, 1673 1600 0, 1674 1601 d40d->lli_phy.src, 1675 - d40d->lli_phy.src_addr, 1602 + virt_to_phys(d40d->lli_phy.src), 1676 1603 d40c->src_def_cfg, 1677 1604 d40c->dma_cfg.src_info.data_width, 1678 - d40c->dma_cfg.src_info.psize, 1679 - true); 1605 + d40c->dma_cfg.src_info.psize); 1680 1606 1681 1607 if (res < 0) 1682 1608 goto err; ··· 1684 1612 sgl_len, 1685 1613 0, 1686 1614 d40d->lli_phy.dst, 1687 - d40d->lli_phy.dst_addr, 1615 + virt_to_phys(d40d->lli_phy.dst), 1688 1616 d40c->dst_def_cfg, 1689 1617 d40c->dma_cfg.dst_info.data_width, 1690 - d40c->dma_cfg.dst_info.psize, 1691 - true); 1618 + d40c->dma_cfg.dst_info.psize); 1692 1619 1693 1620 if (res < 0) 1694 1621 goto err; ··· 1704 1633 1705 1634 return &d40d->txd; 1706 1635 err: 1636 + if (d40d) 1637 + d40_desc_free(d40c, d40d); 1707 1638 spin_unlock_irqrestore(&d40c->lock, flags); 1708 1639 return NULL; 1709 1640 } ··· 1725 1652 } else 1726 1653 err = d40_config_memcpy(d40c); 1727 1654 1655 + if (!err) 1656 + d40c->configured = true; 1657 + 1728 1658 return err == 0; 1729 1659 } 1730 1660 EXPORT_SYMBOL(stedma40_filter); ··· 1744 1668 1745 1669 d40c->completed = chan->cookie = 1; 1746 1670 1747 - /* 1748 - * If no dma configuration is set (channel_type == 0) 1749 - * use default configuration (memcpy) 1750 - */ 1751 - if (d40c->dma_cfg.channel_type == 0) { 1671 + /* If no dma configuration is set use default configuration (memcpy) */ 1672 + if (!d40c->configured) { 1752 1673 err = d40_config_memcpy(d40c); 1753 1674 if (err) { 1754 1675 dev_err(&d40c->chan.dev->device, ··· 1785 1712 * resource is free. In case of multiple logical channels 1786 1713 * on the same physical resource, only the first write is necessary. 1787 1714 */ 1788 - if (is_free_phy) { 1789 - err = d40_config_write(d40c); 1790 - if (err) { 1791 - dev_err(&d40c->chan.dev->device, 1792 - "[%s] Failed to configure channel\n", 1793 - __func__); 1794 - } 1795 - } 1715 + if (is_free_phy) 1716 + d40_config_write(d40c); 1796 1717 fail: 1797 1718 spin_unlock_irqrestore(&d40c->lock, flags); 1798 1719 return err; ··· 1857 1790 goto err; 1858 1791 } 1859 1792 d40d->lli_len = 1; 1860 - d40d->lli_tx_len = 1; 1793 + d40d->lli_current = 0; 1861 1794 1862 1795 d40_log_fill_lli(d40d->lli_log.src, 1863 1796 src, 1864 1797 size, 1865 - 0, 1866 1798 d40c->log_def.lcsp1, 1867 1799 d40c->dma_cfg.src_info.data_width, 1868 - false, true); 1800 + true); 1869 1801 1870 1802 d40_log_fill_lli(d40d->lli_log.dst, 1871 1803 dst, 1872 1804 size, 1873 - 0, 1874 1805 d40c->log_def.lcsp3, 1875 1806 d40c->dma_cfg.dst_info.data_width, 1876 - true, true); 1807 + true); 1877 1808 1878 1809 } else { 1879 1810 ··· 1916 1851 err_fill_lli: 1917 1852 dev_err(&d40c->chan.dev->device, 1918 1853 "[%s] Failed filling in PHY LLI\n", __func__); 1919 - d40_pool_lli_free(d40d); 1920 1854 err: 1855 + if (d40d) 1856 + d40_desc_free(d40c, d40d); 1921 1857 spin_unlock_irqrestore(&d40c->lock, flags); 1922 1858 return NULL; 1859 + } 1860 + 1861 + static struct dma_async_tx_descriptor * 1862 + d40_prep_sg(struct dma_chan *chan, 1863 + struct scatterlist *dst_sg, unsigned int dst_nents, 1864 + struct scatterlist *src_sg, unsigned int src_nents, 1865 + unsigned long dma_flags) 1866 + { 1867 + if (dst_nents != src_nents) 1868 + return NULL; 1869 + 1870 + return stedma40_memcpy_sg(chan, dst_sg, src_sg, dst_nents, dma_flags); 1923 1871 } 1924 1872 1925 1873 static int d40_prep_slave_sg_log(struct d40_desc *d40d, ··· 1952 1874 } 1953 1875 1954 1876 d40d->lli_len = sg_len; 1955 - if (d40d->lli_len <= d40c->base->plat_data->llis_per_log) 1956 - d40d->lli_tx_len = d40d->lli_len; 1957 - else 1958 - d40d->lli_tx_len = d40c->base->plat_data->llis_per_log; 1959 - 1960 - if (sg_len > 1) 1961 - /* 1962 - * Check if there is space available in lcla. 1963 - * If not, split list into 1-length and run only 1964 - * in lcpa space. 1965 - */ 1966 - if (d40_lcla_id_get(d40c) != 0) 1967 - d40d->lli_tx_len = 1; 1877 + d40d->lli_current = 0; 1968 1878 1969 1879 if (direction == DMA_FROM_DEVICE) 1970 1880 if (d40c->runtime_addr) ··· 1968 1902 else 1969 1903 return -EINVAL; 1970 1904 1971 - total_size = d40_log_sg_to_dev(&d40c->lcla, 1972 - sgl, sg_len, 1905 + total_size = d40_log_sg_to_dev(sgl, sg_len, 1973 1906 &d40d->lli_log, 1974 1907 &d40c->log_def, 1975 1908 d40c->dma_cfg.src_info.data_width, 1976 1909 d40c->dma_cfg.dst_info.data_width, 1977 1910 direction, 1978 - dma_flags & DMA_PREP_INTERRUPT, 1979 - dev_addr, d40d->lli_tx_len, 1980 - d40c->base->plat_data->llis_per_log); 1911 + dev_addr); 1981 1912 1982 1913 if (total_size < 0) 1983 1914 return -EINVAL; ··· 2000 1937 } 2001 1938 2002 1939 d40d->lli_len = sgl_len; 2003 - d40d->lli_tx_len = sgl_len; 1940 + d40d->lli_current = 0; 2004 1941 2005 1942 if (direction == DMA_FROM_DEVICE) { 2006 1943 dst_dev_addr = 0; ··· 2021 1958 sgl_len, 2022 1959 src_dev_addr, 2023 1960 d40d->lli_phy.src, 2024 - d40d->lli_phy.src_addr, 1961 + virt_to_phys(d40d->lli_phy.src), 2025 1962 d40c->src_def_cfg, 2026 1963 d40c->dma_cfg.src_info.data_width, 2027 - d40c->dma_cfg.src_info.psize, 2028 - true); 1964 + d40c->dma_cfg.src_info.psize); 2029 1965 if (res < 0) 2030 1966 return res; 2031 1967 ··· 2032 1970 sgl_len, 2033 1971 dst_dev_addr, 2034 1972 d40d->lli_phy.dst, 2035 - d40d->lli_phy.dst_addr, 1973 + virt_to_phys(d40d->lli_phy.dst), 2036 1974 d40c->dst_def_cfg, 2037 1975 d40c->dma_cfg.dst_info.data_width, 2038 - d40c->dma_cfg.dst_info.psize, 2039 - true); 1976 + d40c->dma_cfg.dst_info.psize); 2040 1977 if (res < 0) 2041 1978 return res; 2042 1979 ··· 2062 2001 return ERR_PTR(-EINVAL); 2063 2002 } 2064 2003 2065 - if (d40c->dma_cfg.pre_transfer) 2066 - d40c->dma_cfg.pre_transfer(chan, 2067 - d40c->dma_cfg.pre_transfer_data, 2068 - sg_dma_len(sgl)); 2069 - 2070 2004 spin_lock_irqsave(&d40c->lock, flags); 2071 2005 d40d = d40_desc_get(d40c); 2072 - spin_unlock_irqrestore(&d40c->lock, flags); 2073 2006 2074 2007 if (d40d == NULL) 2075 - return NULL; 2008 + goto err; 2076 2009 2077 2010 if (d40c->log_num != D40_PHY_CHAN) 2078 2011 err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len, ··· 2079 2024 "[%s] Failed to prepare %s slave sg job: %d\n", 2080 2025 __func__, 2081 2026 d40c->log_num != D40_PHY_CHAN ? "log" : "phy", err); 2082 - return NULL; 2027 + goto err; 2083 2028 } 2084 2029 2085 2030 d40d->txd.flags = dma_flags; ··· 2088 2033 2089 2034 d40d->txd.tx_submit = d40_tx_submit; 2090 2035 2036 + spin_unlock_irqrestore(&d40c->lock, flags); 2091 2037 return &d40d->txd; 2038 + 2039 + err: 2040 + if (d40d) 2041 + d40_desc_free(d40c, d40d); 2042 + spin_unlock_irqrestore(&d40c->lock, flags); 2043 + return NULL; 2092 2044 } 2093 2045 2094 2046 static enum dma_status d40_tx_status(struct dma_chan *chan, ··· 2228 2166 return; 2229 2167 } 2230 2168 2231 - if (config_maxburst >= 16) 2232 - psize = STEDMA40_PSIZE_LOG_16; 2233 - else if (config_maxburst >= 8) 2234 - psize = STEDMA40_PSIZE_LOG_8; 2235 - else if (config_maxburst >= 4) 2236 - psize = STEDMA40_PSIZE_LOG_4; 2237 - else 2238 - psize = STEDMA40_PSIZE_LOG_1; 2169 + if (d40c->log_num != D40_PHY_CHAN) { 2170 + if (config_maxburst >= 16) 2171 + psize = STEDMA40_PSIZE_LOG_16; 2172 + else if (config_maxburst >= 8) 2173 + psize = STEDMA40_PSIZE_LOG_8; 2174 + else if (config_maxburst >= 4) 2175 + psize = STEDMA40_PSIZE_LOG_4; 2176 + else 2177 + psize = STEDMA40_PSIZE_LOG_1; 2178 + } else { 2179 + if (config_maxburst >= 16) 2180 + psize = STEDMA40_PSIZE_PHY_16; 2181 + else if (config_maxburst >= 8) 2182 + psize = STEDMA40_PSIZE_PHY_8; 2183 + else if (config_maxburst >= 4) 2184 + psize = STEDMA40_PSIZE_PHY_4; 2185 + else 2186 + psize = STEDMA40_PSIZE_PHY_1; 2187 + } 2239 2188 2240 2189 /* Set up all the endpoint configs */ 2241 2190 cfg->src_info.data_width = addr_width; 2242 2191 cfg->src_info.psize = psize; 2243 - cfg->src_info.endianess = STEDMA40_LITTLE_ENDIAN; 2192 + cfg->src_info.big_endian = false; 2244 2193 cfg->src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL; 2245 2194 cfg->dst_info.data_width = addr_width; 2246 2195 cfg->dst_info.psize = psize; 2247 - cfg->dst_info.endianess = STEDMA40_LITTLE_ENDIAN; 2196 + cfg->dst_info.big_endian = false; 2248 2197 cfg->dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL; 2198 + 2199 + /* Fill in register values */ 2200 + if (d40c->log_num != D40_PHY_CHAN) 2201 + d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); 2202 + else 2203 + d40_phy_cfg(cfg, &d40c->src_def_cfg, 2204 + &d40c->dst_def_cfg, false); 2249 2205 2250 2206 /* These settings will take precedence later */ 2251 2207 d40c->runtime_addr = config_addr; ··· 2327 2247 d40c->base = base; 2328 2248 d40c->chan.device = dma; 2329 2249 2330 - /* Invalidate lcla element */ 2331 - d40c->lcla.src_id = -1; 2332 - d40c->lcla.dst_id = -1; 2333 - 2334 2250 spin_lock_init(&d40c->lock); 2335 2251 2336 2252 d40c->log_num = D40_PHY_CHAN; ··· 2357 2281 base->dma_slave.device_alloc_chan_resources = d40_alloc_chan_resources; 2358 2282 base->dma_slave.device_free_chan_resources = d40_free_chan_resources; 2359 2283 base->dma_slave.device_prep_dma_memcpy = d40_prep_memcpy; 2284 + base->dma_slave.device_prep_dma_sg = d40_prep_sg; 2360 2285 base->dma_slave.device_prep_slave_sg = d40_prep_slave_sg; 2361 2286 base->dma_slave.device_tx_status = d40_tx_status; 2362 2287 base->dma_slave.device_issue_pending = d40_issue_pending; ··· 2378 2301 2379 2302 dma_cap_zero(base->dma_memcpy.cap_mask); 2380 2303 dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask); 2304 + dma_cap_set(DMA_SG, base->dma_slave.cap_mask); 2381 2305 2382 2306 base->dma_memcpy.device_alloc_chan_resources = d40_alloc_chan_resources; 2383 2307 base->dma_memcpy.device_free_chan_resources = d40_free_chan_resources; 2384 2308 base->dma_memcpy.device_prep_dma_memcpy = d40_prep_memcpy; 2309 + base->dma_slave.device_prep_dma_sg = d40_prep_sg; 2385 2310 base->dma_memcpy.device_prep_slave_sg = d40_prep_slave_sg; 2386 2311 base->dma_memcpy.device_tx_status = d40_tx_status; 2387 2312 base->dma_memcpy.device_issue_pending = d40_issue_pending; ··· 2410 2331 dma_cap_zero(base->dma_both.cap_mask); 2411 2332 dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask); 2412 2333 dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask); 2334 + dma_cap_set(DMA_SG, base->dma_slave.cap_mask); 2413 2335 2414 2336 base->dma_both.device_alloc_chan_resources = d40_alloc_chan_resources; 2415 2337 base->dma_both.device_free_chan_resources = d40_free_chan_resources; 2416 2338 base->dma_both.device_prep_dma_memcpy = d40_prep_memcpy; 2339 + base->dma_slave.device_prep_dma_sg = d40_prep_sg; 2417 2340 base->dma_both.device_prep_slave_sg = d40_prep_slave_sg; 2418 2341 base->dma_both.device_tx_status = d40_tx_status; 2419 2342 base->dma_both.device_issue_pending = d40_issue_pending; ··· 2468 2387 2469 2388 /* Mark disabled channels as occupied */ 2470 2389 for (i = 0; base->plat_data->disabled_channels[i] != -1; i++) { 2471 - base->phy_res[i].allocated_src = D40_ALLOC_PHY; 2472 - base->phy_res[i].allocated_dst = D40_ALLOC_PHY; 2473 - num_phy_chans_avail--; 2390 + int chan = base->plat_data->disabled_channels[i]; 2391 + 2392 + base->phy_res[chan].allocated_src = D40_ALLOC_PHY; 2393 + base->phy_res[chan].allocated_dst = D40_ALLOC_PHY; 2394 + num_phy_chans_avail--; 2474 2395 } 2475 2396 2476 2397 dev_info(base->dev, "%d of %d physical DMA channels available\n", ··· 2524 2441 int num_phy_chans; 2525 2442 int i; 2526 2443 u32 val; 2444 + u32 rev; 2527 2445 2528 2446 clk = clk_get(&pdev->dev, NULL); 2529 2447 ··· 2563 2479 } 2564 2480 } 2565 2481 2566 - /* Get silicon revision */ 2482 + /* Get silicon revision and designer */ 2567 2483 val = readl(virtbase + D40_DREG_PERIPHID2); 2568 2484 2569 - if ((val & 0xf) != D40_PERIPHID2_DESIGNER) { 2485 + if ((val & D40_DREG_PERIPHID2_DESIGNER_MASK) != 2486 + D40_HW_DESIGNER) { 2570 2487 dev_err(&pdev->dev, 2571 2488 "[%s] Unknown designer! Got %x wanted %x\n", 2572 - __func__, val & 0xf, D40_PERIPHID2_DESIGNER); 2489 + __func__, val & D40_DREG_PERIPHID2_DESIGNER_MASK, 2490 + D40_HW_DESIGNER); 2573 2491 goto failure; 2574 2492 } 2493 + 2494 + rev = (val & D40_DREG_PERIPHID2_REV_MASK) >> 2495 + D40_DREG_PERIPHID2_REV_POS; 2575 2496 2576 2497 /* The number of physical channels on this HW */ 2577 2498 num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4; 2578 2499 2579 2500 dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n", 2580 - (val >> 4) & 0xf, res->start); 2501 + rev, res->start); 2581 2502 2582 2503 plat_data = pdev->dev.platform_data; 2583 2504 ··· 2604 2515 goto failure; 2605 2516 } 2606 2517 2607 - base->rev = (val >> 4) & 0xf; 2518 + base->rev = rev; 2608 2519 base->clk = clk; 2609 2520 base->num_phy_chans = num_phy_chans; 2610 2521 base->num_log_chans = num_log_chans; ··· 2638 2549 if (!base->lookup_log_chans) 2639 2550 goto failure; 2640 2551 } 2641 - base->lcla_pool.alloc_map = kzalloc(num_phy_chans * sizeof(u32), 2552 + 2553 + base->lcla_pool.alloc_map = kzalloc(num_phy_chans * 2554 + sizeof(struct d40_desc *) * 2555 + D40_LCLA_LINK_PER_EVENT_GRP, 2642 2556 GFP_KERNEL); 2643 2557 if (!base->lcla_pool.alloc_map) 2644 2558 goto failure; ··· 2655 2563 return base; 2656 2564 2657 2565 failure: 2658 - if (clk) { 2566 + if (!IS_ERR(clk)) { 2659 2567 clk_disable(clk); 2660 2568 clk_put(clk); 2661 2569 } ··· 2792 2700 if (i < MAX_LCLA_ALLOC_ATTEMPTS) { 2793 2701 base->lcla_pool.base = (void *)page_list[i]; 2794 2702 } else { 2795 - /* After many attempts, no succees with finding the correct 2796 - * alignment try with allocating a big buffer */ 2703 + /* 2704 + * After many attempts and no succees with finding the correct 2705 + * alignment, try with allocating a big buffer. 2706 + */ 2797 2707 dev_warn(base->dev, 2798 2708 "[%s] Failed to get %d pages @ 18 bit align.\n", 2799 2709 __func__, base->lcla_pool.pages); ··· 2888 2794 2889 2795 spin_lock_init(&base->lcla_pool.lock); 2890 2796 2891 - base->lcla_pool.num_blocks = base->num_phy_chans; 2892 - 2893 2797 base->irq = platform_get_irq(pdev, 0); 2894 2798 2895 2799 ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base); ··· 2915 2823 if (!base->lcla_pool.base_unaligned && base->lcla_pool.base) 2916 2824 free_pages((unsigned long)base->lcla_pool.base, 2917 2825 base->lcla_pool.pages); 2918 - if (base->lcla_pool.base_unaligned) 2919 - kfree(base->lcla_pool.base_unaligned); 2826 + 2827 + kfree(base->lcla_pool.base_unaligned); 2828 + 2920 2829 if (base->phy_lcpa) 2921 2830 release_mem_region(base->phy_lcpa, 2922 2831 base->lcpa_size);
+64 -116
drivers/dma/ste_dma40_ll.c
··· 1 1 /* 2 - * driver/dma/ste_dma40_ll.c 3 - * 4 - * Copyright (C) ST-Ericsson 2007-2010 2 + * Copyright (C) ST-Ericsson SA 2007-2010 3 + * Author: Per Friden <per.friden@stericsson.com> for ST-Ericsson 4 + * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson 5 5 * License terms: GNU General Public License (GPL) version 2 6 - * Author: Per Friden <per.friden@stericsson.com> 7 - * Author: Jonas Aaberg <jonas.aberg@stericsson.com> 8 6 */ 9 7 10 8 #include <linux/kernel.h> ··· 37 39 cfg->dir == STEDMA40_PERIPH_TO_PERIPH) 38 40 l3 |= 1 << D40_MEM_LCSP3_DCFG_MST_POS; 39 41 40 - l3 |= 1 << D40_MEM_LCSP3_DCFG_TIM_POS; 41 42 l3 |= 1 << D40_MEM_LCSP3_DCFG_EIM_POS; 42 43 l3 |= cfg->dst_info.psize << D40_MEM_LCSP3_DCFG_PSIZE_POS; 43 44 l3 |= cfg->dst_info.data_width << D40_MEM_LCSP3_DCFG_ESIZE_POS; 44 - l3 |= 1 << D40_MEM_LCSP3_DTCP_POS; 45 45 46 46 l1 |= 1 << D40_MEM_LCSP1_SCFG_EIM_POS; 47 47 l1 |= cfg->src_info.psize << D40_MEM_LCSP1_SCFG_PSIZE_POS; 48 48 l1 |= cfg->src_info.data_width << D40_MEM_LCSP1_SCFG_ESIZE_POS; 49 - l1 |= 1 << D40_MEM_LCSP1_STCP_POS; 50 49 51 50 *lcsp1 = l1; 52 51 *lcsp3 = l3; ··· 108 113 src |= 1 << D40_SREG_CFG_LOG_GIM_POS; 109 114 } 110 115 111 - if (cfg->channel_type & STEDMA40_HIGH_PRIORITY_CHANNEL) { 116 + if (cfg->high_priority) { 112 117 src |= 1 << D40_SREG_CFG_PRI_POS; 113 118 dst |= 1 << D40_SREG_CFG_PRI_POS; 114 119 } 115 120 116 - src |= cfg->src_info.endianess << D40_SREG_CFG_LBE_POS; 117 - dst |= cfg->dst_info.endianess << D40_SREG_CFG_LBE_POS; 121 + if (cfg->src_info.big_endian) 122 + src |= 1 << D40_SREG_CFG_LBE_POS; 123 + if (cfg->dst_info.big_endian) 124 + dst |= 1 << D40_SREG_CFG_LBE_POS; 118 125 119 126 *src_cfg = src; 120 127 *dst_cfg = dst; ··· 194 197 dma_addr_t lli_phys, 195 198 u32 reg_cfg, 196 199 u32 data_width, 197 - int psize, 198 - bool term_int) 200 + int psize) 199 201 { 200 202 int total_size = 0; 201 203 int i; ··· 234 238 } 235 239 236 240 return total_size; 237 - err: 241 + err: 238 242 return err; 239 243 } 240 244 ··· 267 271 268 272 /* DMA logical lli operations */ 269 273 274 + static void d40_log_lli_link(struct d40_log_lli *lli_dst, 275 + struct d40_log_lli *lli_src, 276 + int next) 277 + { 278 + u32 slos = 0; 279 + u32 dlos = 0; 280 + 281 + if (next != -EINVAL) { 282 + slos = next * 2; 283 + dlos = next * 2 + 1; 284 + } else { 285 + lli_dst->lcsp13 |= D40_MEM_LCSP1_SCFG_TIM_MASK; 286 + lli_dst->lcsp13 |= D40_MEM_LCSP3_DTCP_MASK; 287 + } 288 + 289 + lli_src->lcsp13 = (lli_src->lcsp13 & ~D40_MEM_LCSP1_SLOS_MASK) | 290 + (slos << D40_MEM_LCSP1_SLOS_POS); 291 + 292 + lli_dst->lcsp13 = (lli_dst->lcsp13 & ~D40_MEM_LCSP1_SLOS_MASK) | 293 + (dlos << D40_MEM_LCSP1_SLOS_POS); 294 + } 295 + 296 + void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa, 297 + struct d40_log_lli *lli_dst, 298 + struct d40_log_lli *lli_src, 299 + int next) 300 + { 301 + d40_log_lli_link(lli_dst, lli_src, next); 302 + 303 + writel(lli_src->lcsp02, &lcpa[0].lcsp0); 304 + writel(lli_src->lcsp13, &lcpa[0].lcsp1); 305 + writel(lli_dst->lcsp02, &lcpa[0].lcsp2); 306 + writel(lli_dst->lcsp13, &lcpa[0].lcsp3); 307 + } 308 + 309 + void d40_log_lli_lcla_write(struct d40_log_lli *lcla, 310 + struct d40_log_lli *lli_dst, 311 + struct d40_log_lli *lli_src, 312 + int next) 313 + { 314 + d40_log_lli_link(lli_dst, lli_src, next); 315 + 316 + writel(lli_src->lcsp02, &lcla[0].lcsp02); 317 + writel(lli_src->lcsp13, &lcla[0].lcsp13); 318 + writel(lli_dst->lcsp02, &lcla[1].lcsp02); 319 + writel(lli_dst->lcsp13, &lcla[1].lcsp13); 320 + } 321 + 270 322 void d40_log_fill_lli(struct d40_log_lli *lli, 271 323 dma_addr_t data, u32 data_size, 272 - u32 lli_next_off, u32 reg_cfg, 324 + u32 reg_cfg, 273 325 u32 data_width, 274 - bool term_int, bool addr_inc) 326 + bool addr_inc) 275 327 { 276 328 lli->lcsp13 = reg_cfg; 277 329 ··· 334 290 if (addr_inc) 335 291 lli->lcsp13 |= D40_MEM_LCSP1_SCFG_INCR_MASK; 336 292 337 - lli->lcsp13 |= D40_MEM_LCSP3_DTCP_MASK; 338 - /* If this scatter list entry is the last one, no next link */ 339 - lli->lcsp13 |= (lli_next_off << D40_MEM_LCSP1_SLOS_POS) & 340 - D40_MEM_LCSP1_SLOS_MASK; 341 - 342 - if (term_int) 343 - lli->lcsp13 |= D40_MEM_LCSP1_SCFG_TIM_MASK; 344 - else 345 - lli->lcsp13 &= ~D40_MEM_LCSP1_SCFG_TIM_MASK; 346 293 } 347 294 348 - int d40_log_sg_to_dev(struct d40_lcla_elem *lcla, 349 - struct scatterlist *sg, 295 + int d40_log_sg_to_dev(struct scatterlist *sg, 350 296 int sg_len, 351 297 struct d40_log_lli_bidir *lli, 352 298 struct d40_def_lcsp *lcsp, 353 299 u32 src_data_width, 354 300 u32 dst_data_width, 355 301 enum dma_data_direction direction, 356 - bool term_int, dma_addr_t dev_addr, int max_len, 357 - int llis_per_log) 302 + dma_addr_t dev_addr) 358 303 { 359 304 int total_size = 0; 360 305 struct scatterlist *current_sg = sg; 361 306 int i; 362 - u32 next_lli_off_dst = 0; 363 - u32 next_lli_off_src = 0; 364 307 365 308 for_each_sg(sg, current_sg, sg_len, i) { 366 309 total_size += sg_dma_len(current_sg); 367 - 368 - /* 369 - * If this scatter list entry is the last one or 370 - * max length, terminate link. 371 - */ 372 - if (sg_len - 1 == i || ((i+1) % max_len == 0)) { 373 - next_lli_off_src = 0; 374 - next_lli_off_dst = 0; 375 - } else { 376 - if (next_lli_off_dst == 0 && 377 - next_lli_off_src == 0) { 378 - /* The first lli will be at next_lli_off */ 379 - next_lli_off_dst = (lcla->dst_id * 380 - llis_per_log + 1); 381 - next_lli_off_src = (lcla->src_id * 382 - llis_per_log + 1); 383 - } else { 384 - next_lli_off_dst++; 385 - next_lli_off_src++; 386 - } 387 - } 388 310 389 311 if (direction == DMA_TO_DEVICE) { 390 312 d40_log_fill_lli(&lli->src[i], 391 313 sg_phys(current_sg), 392 314 sg_dma_len(current_sg), 393 - next_lli_off_src, 394 315 lcsp->lcsp1, src_data_width, 395 - false, 396 316 true); 397 317 d40_log_fill_lli(&lli->dst[i], 398 318 dev_addr, 399 319 sg_dma_len(current_sg), 400 - next_lli_off_dst, 401 320 lcsp->lcsp3, dst_data_width, 402 - /* No next == terminal interrupt */ 403 - term_int && !next_lli_off_dst, 404 321 false); 405 322 } else { 406 323 d40_log_fill_lli(&lli->dst[i], 407 324 sg_phys(current_sg), 408 325 sg_dma_len(current_sg), 409 - next_lli_off_dst, 410 326 lcsp->lcsp3, dst_data_width, 411 - /* No next == terminal interrupt */ 412 - term_int && !next_lli_off_dst, 413 327 true); 414 328 d40_log_fill_lli(&lli->src[i], 415 329 dev_addr, 416 330 sg_dma_len(current_sg), 417 - next_lli_off_src, 418 331 lcsp->lcsp1, src_data_width, 419 - false, 420 332 false); 421 333 } 422 334 } 423 335 return total_size; 424 336 } 425 337 426 - int d40_log_sg_to_lli(int lcla_id, 427 - struct scatterlist *sg, 338 + int d40_log_sg_to_lli(struct scatterlist *sg, 428 339 int sg_len, 429 340 struct d40_log_lli *lli_sg, 430 341 u32 lcsp13, /* src or dst*/ 431 - u32 data_width, 432 - bool term_int, int max_len, int llis_per_log) 342 + u32 data_width) 433 343 { 434 344 int total_size = 0; 435 345 struct scatterlist *current_sg = sg; 436 346 int i; 437 - u32 next_lli_off = 0; 438 347 439 348 for_each_sg(sg, current_sg, sg_len, i) { 440 349 total_size += sg_dma_len(current_sg); 441 350 442 - /* 443 - * If this scatter list entry is the last one or 444 - * max length, terminate link. 445 - */ 446 - if (sg_len - 1 == i || ((i+1) % max_len == 0)) 447 - next_lli_off = 0; 448 - else { 449 - if (next_lli_off == 0) 450 - /* The first lli will be at next_lli_off */ 451 - next_lli_off = lcla_id * llis_per_log + 1; 452 - else 453 - next_lli_off++; 454 - } 455 - 456 351 d40_log_fill_lli(&lli_sg[i], 457 352 sg_phys(current_sg), 458 353 sg_dma_len(current_sg), 459 - next_lli_off, 460 354 lcsp13, data_width, 461 - term_int && !next_lli_off, 462 355 true); 463 356 } 464 357 return total_size; 465 - } 466 - 467 - int d40_log_lli_write(struct d40_log_lli_full *lcpa, 468 - struct d40_log_lli *lcla_src, 469 - struct d40_log_lli *lcla_dst, 470 - struct d40_log_lli *lli_dst, 471 - struct d40_log_lli *lli_src, 472 - int llis_per_log) 473 - { 474 - u32 slos; 475 - u32 dlos; 476 - int i; 477 - 478 - writel(lli_src->lcsp02, &lcpa->lcsp0); 479 - writel(lli_src->lcsp13, &lcpa->lcsp1); 480 - writel(lli_dst->lcsp02, &lcpa->lcsp2); 481 - writel(lli_dst->lcsp13, &lcpa->lcsp3); 482 - 483 - slos = lli_src->lcsp13 & D40_MEM_LCSP1_SLOS_MASK; 484 - dlos = lli_dst->lcsp13 & D40_MEM_LCSP3_DLOS_MASK; 485 - 486 - for (i = 0; (i < llis_per_log) && slos && dlos; i++) { 487 - writel(lli_src[i + 1].lcsp02, &lcla_src[i].lcsp02); 488 - writel(lli_src[i + 1].lcsp13, &lcla_src[i].lcsp13); 489 - writel(lli_dst[i + 1].lcsp02, &lcla_dst[i].lcsp02); 490 - writel(lli_dst[i + 1].lcsp13, &lcla_dst[i].lcsp13); 491 - 492 - slos = lli_src[i + 1].lcsp13 & D40_MEM_LCSP1_SLOS_MASK; 493 - dlos = lli_dst[i + 1].lcsp13 & D40_MEM_LCSP3_DLOS_MASK; 494 - } 495 - 496 - return i; 497 - 498 358 }
+37 -47
drivers/dma/ste_dma40_ll.h
··· 1 1 /* 2 - * driver/dma/ste_dma40_ll.h 3 - * 4 - * Copyright (C) ST-Ericsson 2007-2010 2 + * Copyright (C) ST-Ericsson SA 2007-2010 3 + * Author: Per Friden <per.friden@stericsson.com> for ST-Ericsson SA 4 + * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson SA 5 5 * License terms: GNU General Public License (GPL) version 2 6 - * Author: Per Friden <per.friden@stericsson.com> 7 - * Author: Jonas Aaberg <jonas.aberg@stericsson.com> 8 6 */ 9 7 #ifndef STE_DMA40_LL_H 10 8 #define STE_DMA40_LL_H ··· 130 132 #define D40_DREG_PRMSO 0x014 131 133 #define D40_DREG_PRMOE 0x018 132 134 #define D40_DREG_PRMOO 0x01C 135 + #define D40_DREG_PRMO_PCHAN_BASIC 0x1 136 + #define D40_DREG_PRMO_PCHAN_MODULO 0x2 137 + #define D40_DREG_PRMO_PCHAN_DOUBLE_DST 0x3 138 + #define D40_DREG_PRMO_LCHAN_SRC_PHY_DST_LOG 0x1 139 + #define D40_DREG_PRMO_LCHAN_SRC_LOG_DST_PHY 0x2 140 + #define D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG 0x3 141 + 133 142 #define D40_DREG_LCPA 0x020 134 143 #define D40_DREG_LCLA 0x024 135 144 #define D40_DREG_ACTIVE 0x050 ··· 168 163 #define D40_DREG_PERIPHID0 0xFE0 169 164 #define D40_DREG_PERIPHID1 0xFE4 170 165 #define D40_DREG_PERIPHID2 0xFE8 166 + #define D40_DREG_PERIPHID2_REV_POS 4 167 + #define D40_DREG_PERIPHID2_REV_MASK (0xf << D40_DREG_PERIPHID2_REV_POS) 168 + #define D40_DREG_PERIPHID2_DESIGNER_MASK 0xf 171 169 #define D40_DREG_PERIPHID3 0xFEC 172 170 #define D40_DREG_CELLID0 0xFF0 173 171 #define D40_DREG_CELLID1 0xFF4 ··· 207 199 * 208 200 * @src: Register settings for src channel. 209 201 * @dst: Register settings for dst channel. 210 - * @dst_addr: Physical destination address. 211 - * @src_addr: Physical source address. 212 202 * 213 203 * All DMA transfers have a source and a destination. 214 204 */ ··· 214 208 struct d40_phy_lli_bidir { 215 209 struct d40_phy_lli *src; 216 210 struct d40_phy_lli *dst; 217 - dma_addr_t dst_addr; 218 - dma_addr_t src_addr; 219 211 }; 220 212 221 213 ··· 275 271 u32 lcsp1; 276 272 }; 277 273 278 - /** 279 - * struct d40_lcla_elem - Info for one LCA element. 280 - * 281 - * @src_id: logical channel src id 282 - * @dst_id: logical channel dst id 283 - * @src: LCPA formated src parameters 284 - * @dst: LCPA formated dst parameters 285 - * 286 - */ 287 - struct d40_lcla_elem { 288 - int src_id; 289 - int dst_id; 290 - struct d40_log_lli *src; 291 - struct d40_log_lli *dst; 292 - }; 293 - 294 274 /* Physical channels */ 295 275 296 276 void d40_phy_cfg(struct stedma40_chan_cfg *cfg, 297 - u32 *src_cfg, u32 *dst_cfg, bool is_log); 277 + u32 *src_cfg, 278 + u32 *dst_cfg, 279 + bool is_log); 298 280 299 281 void d40_log_cfg(struct stedma40_chan_cfg *cfg, 300 - u32 *lcsp1, u32 *lcsp2); 282 + u32 *lcsp1, 283 + u32 *lcsp2); 301 284 302 285 int d40_phy_sg_to_lli(struct scatterlist *sg, 303 286 int sg_len, ··· 293 302 dma_addr_t lli_phys, 294 303 u32 reg_cfg, 295 304 u32 data_width, 296 - int psize, 297 - bool term_int); 305 + int psize); 298 306 299 307 int d40_phy_fill_lli(struct d40_phy_lli *lli, 300 308 dma_addr_t data, ··· 313 323 /* Logical channels */ 314 324 315 325 void d40_log_fill_lli(struct d40_log_lli *lli, 316 - dma_addr_t data, u32 data_size, 317 - u32 lli_next_off, u32 reg_cfg, 326 + dma_addr_t data, 327 + u32 data_size, 328 + u32 reg_cfg, 318 329 u32 data_width, 319 - bool term_int, bool addr_inc); 330 + bool addr_inc); 320 331 321 - int d40_log_sg_to_dev(struct d40_lcla_elem *lcla, 322 - struct scatterlist *sg, 332 + int d40_log_sg_to_dev(struct scatterlist *sg, 323 333 int sg_len, 324 334 struct d40_log_lli_bidir *lli, 325 335 struct d40_def_lcsp *lcsp, 326 336 u32 src_data_width, 327 337 u32 dst_data_width, 328 338 enum dma_data_direction direction, 329 - bool term_int, dma_addr_t dev_addr, int max_len, 330 - int llis_per_log); 339 + dma_addr_t dev_addr); 331 340 332 - int d40_log_lli_write(struct d40_log_lli_full *lcpa, 333 - struct d40_log_lli *lcla_src, 334 - struct d40_log_lli *lcla_dst, 335 - struct d40_log_lli *lli_dst, 336 - struct d40_log_lli *lli_src, 337 - int llis_per_log); 338 - 339 - int d40_log_sg_to_lli(int lcla_id, 340 - struct scatterlist *sg, 341 + int d40_log_sg_to_lli(struct scatterlist *sg, 341 342 int sg_len, 342 343 struct d40_log_lli *lli_sg, 343 344 u32 lcsp13, /* src or dst*/ 344 - u32 data_width, 345 - bool term_int, int max_len, int llis_per_log); 345 + u32 data_width); 346 + 347 + void d40_log_lli_lcpa_write(struct d40_log_lli_full *lcpa, 348 + struct d40_log_lli *lli_dst, 349 + struct d40_log_lli *lli_src, 350 + int next); 351 + 352 + void d40_log_lli_lcla_write(struct d40_log_lli *lcla, 353 + struct d40_log_lli *lli_dst, 354 + struct d40_log_lli *lli_src, 355 + int next); 346 356 347 357 #endif /* STE_DMA40_LLI_H */
+1 -1
drivers/dma/timb_dma.c
··· 759 759 pdata->channels + i; 760 760 761 761 /* even channels are RX, odd are TX */ 762 - if (((i % 2) && pchan->rx) || (!(i % 2) && !pchan->rx)) { 762 + if ((i % 2) == pchan->rx) { 763 763 dev_err(&pdev->dev, "Wrong channel configuration\n"); 764 764 err = -EINVAL; 765 765 goto err_tasklet_kill;
+222
include/linux/amba/pl08x.h
··· 1 + /* 2 + * linux/amba/pl08x.h - ARM PrimeCell DMA Controller driver 3 + * 4 + * Copyright (C) 2005 ARM Ltd 5 + * Copyright (C) 2010 ST-Ericsson SA 6 + * 7 + * This program is free software; you can redistribute it and/or modify 8 + * it under the terms of the GNU General Public License version 2 as 9 + * published by the Free Software Foundation. 10 + * 11 + * pl08x information required by platform code 12 + * 13 + * Please credit ARM.com 14 + * Documentation: ARM DDI 0196D 15 + * 16 + */ 17 + 18 + #ifndef AMBA_PL08X_H 19 + #define AMBA_PL08X_H 20 + 21 + /* We need sizes of structs from this header */ 22 + #include <linux/dmaengine.h> 23 + #include <linux/interrupt.h> 24 + 25 + /** 26 + * struct pl08x_channel_data - data structure to pass info between 27 + * platform and PL08x driver regarding channel configuration 28 + * @bus_id: name of this device channel, not just a device name since 29 + * devices may have more than one channel e.g. "foo_tx" 30 + * @min_signal: the minimum DMA signal number to be muxed in for this 31 + * channel (for platforms supporting muxed signals). If you have 32 + * static assignments, make sure this is set to the assigned signal 33 + * number, PL08x have 16 possible signals in number 0 thru 15 so 34 + * when these are not enough they often get muxed (in hardware) 35 + * disabling simultaneous use of the same channel for two devices. 36 + * @max_signal: the maximum DMA signal number to be muxed in for 37 + * the channel. Set to the same as min_signal for 38 + * devices with static assignments 39 + * @muxval: a number usually used to poke into some mux regiser to 40 + * mux in the signal to this channel 41 + * @cctl_opt: default options for the channel control register 42 + * @addr: source/target address in physical memory for this DMA channel, 43 + * can be the address of a FIFO register for burst requests for example. 44 + * This can be left undefined if the PrimeCell API is used for configuring 45 + * this. 46 + * @circular_buffer: whether the buffer passed in is circular and 47 + * shall simply be looped round round (like a record baby round 48 + * round round round) 49 + * @single: the device connected to this channel will request single 50 + * DMA transfers, not bursts. (Bursts are default.) 51 + */ 52 + struct pl08x_channel_data { 53 + char *bus_id; 54 + int min_signal; 55 + int max_signal; 56 + u32 muxval; 57 + u32 cctl; 58 + u32 ccfg; 59 + dma_addr_t addr; 60 + bool circular_buffer; 61 + bool single; 62 + }; 63 + 64 + /** 65 + * Struct pl08x_bus_data - information of source or destination 66 + * busses for a transfer 67 + * @addr: current address 68 + * @maxwidth: the maximum width of a transfer on this bus 69 + * @buswidth: the width of this bus in bytes: 1, 2 or 4 70 + * @fill_bytes: bytes required to fill to the next bus memory 71 + * boundary 72 + */ 73 + struct pl08x_bus_data { 74 + dma_addr_t addr; 75 + u8 maxwidth; 76 + u8 buswidth; 77 + u32 fill_bytes; 78 + }; 79 + 80 + /** 81 + * struct pl08x_phy_chan - holder for the physical channels 82 + * @id: physical index to this channel 83 + * @lock: a lock to use when altering an instance of this struct 84 + * @signal: the physical signal (aka channel) serving this 85 + * physical channel right now 86 + * @serving: the virtual channel currently being served by this 87 + * physical channel 88 + */ 89 + struct pl08x_phy_chan { 90 + unsigned int id; 91 + void __iomem *base; 92 + spinlock_t lock; 93 + int signal; 94 + struct pl08x_dma_chan *serving; 95 + u32 csrc; 96 + u32 cdst; 97 + u32 clli; 98 + u32 cctl; 99 + u32 ccfg; 100 + }; 101 + 102 + /** 103 + * struct pl08x_txd - wrapper for struct dma_async_tx_descriptor 104 + * @llis_bus: DMA memory address (physical) start for the LLIs 105 + * @llis_va: virtual memory address start for the LLIs 106 + */ 107 + struct pl08x_txd { 108 + struct dma_async_tx_descriptor tx; 109 + struct list_head node; 110 + enum dma_data_direction direction; 111 + struct pl08x_bus_data srcbus; 112 + struct pl08x_bus_data dstbus; 113 + int len; 114 + dma_addr_t llis_bus; 115 + void *llis_va; 116 + struct pl08x_channel_data *cd; 117 + bool active; 118 + /* 119 + * Settings to be put into the physical channel when we 120 + * trigger this txd 121 + */ 122 + u32 csrc; 123 + u32 cdst; 124 + u32 clli; 125 + u32 cctl; 126 + }; 127 + 128 + /** 129 + * struct pl08x_dma_chan_state - holds the PL08x specific virtual 130 + * channel states 131 + * @PL08X_CHAN_IDLE: the channel is idle 132 + * @PL08X_CHAN_RUNNING: the channel has allocated a physical transport 133 + * channel and is running a transfer on it 134 + * @PL08X_CHAN_PAUSED: the channel has allocated a physical transport 135 + * channel, but the transfer is currently paused 136 + * @PL08X_CHAN_WAITING: the channel is waiting for a physical transport 137 + * channel to become available (only pertains to memcpy channels) 138 + */ 139 + enum pl08x_dma_chan_state { 140 + PL08X_CHAN_IDLE, 141 + PL08X_CHAN_RUNNING, 142 + PL08X_CHAN_PAUSED, 143 + PL08X_CHAN_WAITING, 144 + }; 145 + 146 + /** 147 + * struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel 148 + * @chan: wrappped abstract channel 149 + * @phychan: the physical channel utilized by this channel, if there is one 150 + * @tasklet: tasklet scheduled by the IRQ to handle actual work etc 151 + * @name: name of channel 152 + * @cd: channel platform data 153 + * @runtime_addr: address for RX/TX according to the runtime config 154 + * @runtime_direction: current direction of this channel according to 155 + * runtime config 156 + * @lc: last completed transaction on this channel 157 + * @desc_list: queued transactions pending on this channel 158 + * @at: active transaction on this channel 159 + * @lockflags: sometimes we let a lock last between two function calls, 160 + * especially prep/submit, and then we need to store the IRQ flags 161 + * in the channel state, here 162 + * @lock: a lock for this channel data 163 + * @host: a pointer to the host (internal use) 164 + * @state: whether the channel is idle, paused, running etc 165 + * @slave: whether this channel is a device (slave) or for memcpy 166 + * @waiting: a TX descriptor on this channel which is waiting for 167 + * a physical channel to become available 168 + */ 169 + struct pl08x_dma_chan { 170 + struct dma_chan chan; 171 + struct pl08x_phy_chan *phychan; 172 + struct tasklet_struct tasklet; 173 + char *name; 174 + struct pl08x_channel_data *cd; 175 + dma_addr_t runtime_addr; 176 + enum dma_data_direction runtime_direction; 177 + atomic_t last_issued; 178 + dma_cookie_t lc; 179 + struct list_head desc_list; 180 + struct pl08x_txd *at; 181 + unsigned long lockflags; 182 + spinlock_t lock; 183 + void *host; 184 + enum pl08x_dma_chan_state state; 185 + bool slave; 186 + struct pl08x_txd *waiting; 187 + }; 188 + 189 + /** 190 + * struct pl08x_platform_data - the platform configuration for the 191 + * PL08x PrimeCells. 192 + * @slave_channels: the channels defined for the different devices on the 193 + * platform, all inclusive, including multiplexed channels. The available 194 + * physical channels will be multiplexed around these signals as they 195 + * are requested, just enumerate all possible channels. 196 + * @get_signal: request a physical signal to be used for a DMA 197 + * transfer immediately: if there is some multiplexing or similar blocking 198 + * the use of the channel the transfer can be denied by returning 199 + * less than zero, else it returns the allocated signal number 200 + * @put_signal: indicate to the platform that this physical signal is not 201 + * running any DMA transfer and multiplexing can be recycled 202 + * @bus_bit_lli: Bit[0] of the address indicated which AHB bus master the 203 + * LLI addresses are on 0/1 Master 1/2. 204 + */ 205 + struct pl08x_platform_data { 206 + struct pl08x_channel_data *slave_channels; 207 + unsigned int num_slave_channels; 208 + struct pl08x_channel_data memcpy_channel; 209 + int (*get_signal)(struct pl08x_dma_chan *); 210 + void (*put_signal)(struct pl08x_dma_chan *); 211 + }; 212 + 213 + #ifdef CONFIG_AMBA_PL08X 214 + bool pl08x_filter_id(struct dma_chan *chan, void *chan_id); 215 + #else 216 + static inline bool pl08x_filter_id(struct dma_chan *chan, void *chan_id) 217 + { 218 + return false; 219 + } 220 + #endif 221 + 222 + #endif /* AMBA_PL08X_H */
+55 -5
include/linux/dmaengine.h
··· 64 64 DMA_PQ_VAL, 65 65 DMA_MEMSET, 66 66 DMA_INTERRUPT, 67 + DMA_SG, 67 68 DMA_PRIVATE, 68 69 DMA_ASYNC_TX, 69 70 DMA_SLAVE, 71 + DMA_CYCLIC, 70 72 }; 71 73 72 74 /* last transaction type for creation of the capabilities mask */ 73 - #define DMA_TX_TYPE_END (DMA_SLAVE + 1) 75 + #define DMA_TX_TYPE_END (DMA_CYCLIC + 1) 74 76 75 77 76 78 /** ··· 121 119 * configuration data in statically from the platform). An additional 122 120 * argument of struct dma_slave_config must be passed in with this 123 121 * command. 122 + * @FSLDMA_EXTERNAL_START: this command will put the Freescale DMA controller 123 + * into external start mode. 124 124 */ 125 125 enum dma_ctrl_cmd { 126 126 DMA_TERMINATE_ALL, 127 127 DMA_PAUSE, 128 128 DMA_RESUME, 129 129 DMA_SLAVE_CONFIG, 130 + FSLDMA_EXTERNAL_START, 130 131 }; 131 132 132 133 /** ··· 321 316 dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); 322 317 dma_async_tx_callback callback; 323 318 void *callback_param; 324 - #ifndef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH 319 + #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH 325 320 struct dma_async_tx_descriptor *next; 326 321 struct dma_async_tx_descriptor *parent; 327 322 spinlock_t lock; 328 323 #endif 329 324 }; 330 325 331 - #ifdef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH 326 + #ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH 332 327 static inline void txd_lock(struct dma_async_tx_descriptor *txd) 333 328 { 334 329 } ··· 427 422 * @device_prep_dma_memset: prepares a memset operation 428 423 * @device_prep_dma_interrupt: prepares an end of chain interrupt operation 429 424 * @device_prep_slave_sg: prepares a slave dma operation 425 + * @device_prep_dma_cyclic: prepare a cyclic dma operation suitable for audio. 426 + * The function takes a buffer of size buf_len. The callback function will 427 + * be called after period_len bytes have been transferred. 430 428 * @device_control: manipulate all pending operations on a channel, returns 431 429 * zero or error code 432 430 * @device_tx_status: poll for transaction completion, the optional ··· 481 473 unsigned long flags); 482 474 struct dma_async_tx_descriptor *(*device_prep_dma_interrupt)( 483 475 struct dma_chan *chan, unsigned long flags); 476 + struct dma_async_tx_descriptor *(*device_prep_dma_sg)( 477 + struct dma_chan *chan, 478 + struct scatterlist *dst_sg, unsigned int dst_nents, 479 + struct scatterlist *src_sg, unsigned int src_nents, 480 + unsigned long flags); 484 481 485 482 struct dma_async_tx_descriptor *(*device_prep_slave_sg)( 486 483 struct dma_chan *chan, struct scatterlist *sgl, 487 484 unsigned int sg_len, enum dma_data_direction direction, 488 485 unsigned long flags); 486 + struct dma_async_tx_descriptor *(*device_prep_dma_cyclic)( 487 + struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, 488 + size_t period_len, enum dma_data_direction direction); 489 489 int (*device_control)(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 490 490 unsigned long arg); 491 491 ··· 502 486 struct dma_tx_state *txstate); 503 487 void (*device_issue_pending)(struct dma_chan *chan); 504 488 }; 489 + 490 + static inline int dmaengine_device_control(struct dma_chan *chan, 491 + enum dma_ctrl_cmd cmd, 492 + unsigned long arg) 493 + { 494 + return chan->device->device_control(chan, cmd, arg); 495 + } 496 + 497 + static inline int dmaengine_slave_config(struct dma_chan *chan, 498 + struct dma_slave_config *config) 499 + { 500 + return dmaengine_device_control(chan, DMA_SLAVE_CONFIG, 501 + (unsigned long)config); 502 + } 503 + 504 + static inline int dmaengine_terminate_all(struct dma_chan *chan) 505 + { 506 + return dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0); 507 + } 508 + 509 + static inline int dmaengine_pause(struct dma_chan *chan) 510 + { 511 + return dmaengine_device_control(chan, DMA_PAUSE, 0); 512 + } 513 + 514 + static inline int dmaengine_resume(struct dma_chan *chan) 515 + { 516 + return dmaengine_device_control(chan, DMA_RESUME, 0); 517 + } 518 + 519 + static inline int dmaengine_submit(struct dma_async_tx_descriptor *desc) 520 + { 521 + return desc->tx_submit(desc); 522 + } 505 523 506 524 static inline bool dmaengine_check_align(u8 align, size_t off1, size_t off2, size_t len) 507 525 { ··· 656 606 #ifdef CONFIG_ASYNC_TX_DMA 657 607 #define async_dmaengine_get() dmaengine_get() 658 608 #define async_dmaengine_put() dmaengine_put() 659 - #ifdef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH 609 + #ifndef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH 660 610 #define async_dma_find_channel(type) dma_find_channel(DMA_ASYNC_TX) 661 611 #else 662 612 #define async_dma_find_channel(type) dma_find_channel(type) 663 - #endif /* CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH */ 613 + #endif /* CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH */ 664 614 #else 665 615 static inline void async_dmaengine_get(void) 666 616 {
+3 -13
include/linux/intel_mid_dma.h
··· 27 27 28 28 #include <linux/dmaengine.h> 29 29 30 - /*DMA transaction width, src and dstn width would be same 31 - The DMA length must be width aligned, 32 - for 32 bit width the length must be 32 bit (4bytes) aligned only*/ 33 - enum intel_mid_dma_width { 34 - LNW_DMA_WIDTH_8BIT = 0x0, 35 - LNW_DMA_WIDTH_16BIT = 0x1, 36 - LNW_DMA_WIDTH_32BIT = 0x2, 37 - }; 30 + #define DMA_PREP_CIRCULAR_LIST (1 << 10) 38 31 39 32 /*DMA mode configurations*/ 40 33 enum intel_mid_dma_mode { ··· 62 69 * @cfg_mode: DMA data transfer mode (per-per/mem-per/mem-mem) 63 70 * @src_msize: Source DMA burst size 64 71 * @dst_msize: Dst DMA burst size 72 + * @per_addr: Periphral address 65 73 * @device_instance: DMA peripheral device instance, we can have multiple 66 74 * peripheral device connected to single DMAC 67 75 */ 68 76 struct intel_mid_dma_slave { 69 - enum dma_data_direction dirn; 70 - enum intel_mid_dma_width src_width; /*width of DMA src txn*/ 71 - enum intel_mid_dma_width dst_width; /*width of DMA dst txn*/ 72 77 enum intel_mid_dma_hs_mode hs_mode; /*handshaking*/ 73 78 enum intel_mid_dma_mode cfg_mode; /*mode configuration*/ 74 - enum intel_mid_dma_msize src_msize; /*size if src burst*/ 75 - enum intel_mid_dma_msize dst_msize; /*size of dst burst*/ 76 79 unsigned int device_instance; /*0, 1 for periphral instance*/ 80 + struct dma_slave_config dma_slave; 77 81 }; 78 82 79 83 #endif /*__INTEL_MID_DMA_H__*/
+13
lib/Kconfig.debug
··· 1217 1217 1218 1218 If unsure, say N. 1219 1219 1220 + config ASYNC_RAID6_TEST 1221 + tristate "Self test for hardware accelerated raid6 recovery" 1222 + depends on ASYNC_RAID6_RECOV 1223 + select ASYNC_MEMCPY 1224 + ---help--- 1225 + This is a one-shot self test that permutes through the 1226 + recovery of all the possible two disk failure scenarios for a 1227 + N-disk array. Recovery is performed with the asynchronous 1228 + raid6 recovery routines, and will optionally use an offload 1229 + engine if one is available. 1230 + 1231 + If unsure, say N. 1232 + 1220 1233 source "samples/Kconfig" 1221 1234 1222 1235 source "lib/Kconfig.kgdb"