Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx

* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/djbw/async_tx:
DMAENGINE: extend the control command to include an arg
async_tx: trim dma_async_tx_descriptor in 'no channel switch' case
DMAENGINE: DMA40 fix for allocation of logical channel 0
DMAENGINE: DMA40 support paused channel status
dmaengine: mpc512x: Use resource_size
DMA ENGINE: Do not reset 'private' of channel
ioat: Remove duplicated devm_kzalloc() calls for ioatdma_device
ioat3: disable cacheline-unaligned transfers for raid operations
ioat2,3: convert to producer/consumer locking
ioat: convert to circ_buf
DMAENGINE: Support for ST-Ericssons DMA40 block v3
async_tx: use of kzalloc/kfree requires the include of slab.h
dmaengine: provide helper for setting txstate
DMAENGINE: generic channel status v2
DMAENGINE: generic slave control v2
dma: timb-dma: Update comment and fix compiler warning
dma: Add timb-dma
DMAENGINE: COH 901 318 fix bytesleft
DMAENGINE: COH 901 318 rename confusing vars

+5286 -511
-21
arch/arm/mach-u300/include/mach/coh901318.h
··· 103 }; 104 105 /** 106 - * coh901318_get_bytes_left() - Get number of bytes left on a current transfer 107 - * @chan: dma channel handle 108 - * return number of bytes left, or negative on error 109 - */ 110 - u32 coh901318_get_bytes_left(struct dma_chan *chan); 111 - 112 - /** 113 - * coh901318_stop() - Stops dma transfer 114 - * @chan: dma channel handle 115 - * return 0 on success otherwise negative value 116 - */ 117 - void coh901318_stop(struct dma_chan *chan); 118 - 119 - /** 120 - * coh901318_continue() - Resumes a stopped dma transfer 121 - * @chan: dma channel handle 122 - * return 0 on success otherwise negative value 123 - */ 124 - void coh901318_continue(struct dma_chan *chan); 125 - 126 - /** 127 * coh901318_filter_id() - DMA channel filter function 128 * @chan: dma channel handle 129 * @chan_id: id of dma channel to be filter out
··· 103 }; 104 105 /** 106 * coh901318_filter_id() - DMA channel filter function 107 * @chan: dma channel handle 108 * @chan_id: id of dma channel to be filter out
+239
arch/arm/plat-nomadik/include/plat/ste_dma40.h
···
··· 1 + /* 2 + * arch/arm/plat-nomadik/include/plat/ste_dma40.h 3 + * 4 + * Copyright (C) ST-Ericsson 2007-2010 5 + * License terms: GNU General Public License (GPL) version 2 6 + * Author: Per Friden <per.friden@stericsson.com> 7 + * Author: Jonas Aaberg <jonas.aberg@stericsson.com> 8 + */ 9 + 10 + 11 + #ifndef STE_DMA40_H 12 + #define STE_DMA40_H 13 + 14 + #include <linux/dmaengine.h> 15 + #include <linux/workqueue.h> 16 + #include <linux/interrupt.h> 17 + #include <linux/dmaengine.h> 18 + 19 + /* dev types for memcpy */ 20 + #define STEDMA40_DEV_DST_MEMORY (-1) 21 + #define STEDMA40_DEV_SRC_MEMORY (-1) 22 + 23 + /* 24 + * Description of bitfields of channel_type variable is available in 25 + * the info structure. 26 + */ 27 + 28 + /* Priority */ 29 + #define STEDMA40_INFO_PRIO_TYPE_POS 2 30 + #define STEDMA40_HIGH_PRIORITY_CHANNEL (0x1 << STEDMA40_INFO_PRIO_TYPE_POS) 31 + #define STEDMA40_LOW_PRIORITY_CHANNEL (0x2 << STEDMA40_INFO_PRIO_TYPE_POS) 32 + 33 + /* Mode */ 34 + #define STEDMA40_INFO_CH_MODE_TYPE_POS 6 35 + #define STEDMA40_CHANNEL_IN_PHY_MODE (0x1 << STEDMA40_INFO_CH_MODE_TYPE_POS) 36 + #define STEDMA40_CHANNEL_IN_LOG_MODE (0x2 << STEDMA40_INFO_CH_MODE_TYPE_POS) 37 + #define STEDMA40_CHANNEL_IN_OPER_MODE (0x3 << STEDMA40_INFO_CH_MODE_TYPE_POS) 38 + 39 + /* Mode options */ 40 + #define STEDMA40_INFO_CH_MODE_OPT_POS 8 41 + #define STEDMA40_PCHAN_BASIC_MODE (0x1 << STEDMA40_INFO_CH_MODE_OPT_POS) 42 + #define STEDMA40_PCHAN_MODULO_MODE (0x2 << STEDMA40_INFO_CH_MODE_OPT_POS) 43 + #define STEDMA40_PCHAN_DOUBLE_DST_MODE (0x3 << STEDMA40_INFO_CH_MODE_OPT_POS) 44 + #define STEDMA40_LCHAN_SRC_PHY_DST_LOG (0x1 << STEDMA40_INFO_CH_MODE_OPT_POS) 45 + #define STEDMA40_LCHAN_SRC_LOG_DST_PHS (0x2 << STEDMA40_INFO_CH_MODE_OPT_POS) 46 + #define STEDMA40_LCHAN_SRC_LOG_DST_LOG (0x3 << STEDMA40_INFO_CH_MODE_OPT_POS) 47 + 48 + /* Interrupt */ 49 + #define STEDMA40_INFO_TIM_POS 10 50 + #define STEDMA40_NO_TIM_FOR_LINK (0x0 << STEDMA40_INFO_TIM_POS) 51 + #define STEDMA40_TIM_FOR_LINK (0x1 << STEDMA40_INFO_TIM_POS) 52 + 53 + /* End of channel_type configuration */ 54 + 55 + #define STEDMA40_ESIZE_8_BIT 0x0 56 + #define STEDMA40_ESIZE_16_BIT 0x1 57 + #define STEDMA40_ESIZE_32_BIT 0x2 58 + #define STEDMA40_ESIZE_64_BIT 0x3 59 + 60 + /* The value 4 indicates that PEN-reg shall be set to 0 */ 61 + #define STEDMA40_PSIZE_PHY_1 0x4 62 + #define STEDMA40_PSIZE_PHY_2 0x0 63 + #define STEDMA40_PSIZE_PHY_4 0x1 64 + #define STEDMA40_PSIZE_PHY_8 0x2 65 + #define STEDMA40_PSIZE_PHY_16 0x3 66 + 67 + /* 68 + * The number of elements differ in logical and 69 + * physical mode 70 + */ 71 + #define STEDMA40_PSIZE_LOG_1 STEDMA40_PSIZE_PHY_2 72 + #define STEDMA40_PSIZE_LOG_4 STEDMA40_PSIZE_PHY_4 73 + #define STEDMA40_PSIZE_LOG_8 STEDMA40_PSIZE_PHY_8 74 + #define STEDMA40_PSIZE_LOG_16 STEDMA40_PSIZE_PHY_16 75 + 76 + enum stedma40_flow_ctrl { 77 + STEDMA40_NO_FLOW_CTRL, 78 + STEDMA40_FLOW_CTRL, 79 + }; 80 + 81 + enum stedma40_endianess { 82 + STEDMA40_LITTLE_ENDIAN, 83 + STEDMA40_BIG_ENDIAN 84 + }; 85 + 86 + enum stedma40_periph_data_width { 87 + STEDMA40_BYTE_WIDTH = STEDMA40_ESIZE_8_BIT, 88 + STEDMA40_HALFWORD_WIDTH = STEDMA40_ESIZE_16_BIT, 89 + STEDMA40_WORD_WIDTH = STEDMA40_ESIZE_32_BIT, 90 + STEDMA40_DOUBLEWORD_WIDTH = STEDMA40_ESIZE_64_BIT 91 + }; 92 + 93 + struct stedma40_half_channel_info { 94 + enum stedma40_endianess endianess; 95 + enum stedma40_periph_data_width data_width; 96 + int psize; 97 + enum stedma40_flow_ctrl flow_ctrl; 98 + }; 99 + 100 + enum stedma40_xfer_dir { 101 + STEDMA40_MEM_TO_MEM, 102 + STEDMA40_MEM_TO_PERIPH, 103 + STEDMA40_PERIPH_TO_MEM, 104 + STEDMA40_PERIPH_TO_PERIPH 105 + }; 106 + 107 + 108 + /** 109 + * struct stedma40_chan_cfg - Structure to be filled by client drivers. 110 + * 111 + * @dir: MEM 2 MEM, PERIPH 2 MEM , MEM 2 PERIPH, PERIPH 2 PERIPH 112 + * @channel_type: priority, mode, mode options and interrupt configuration. 113 + * @src_dev_type: Src device type 114 + * @dst_dev_type: Dst device type 115 + * @src_info: Parameters for dst half channel 116 + * @dst_info: Parameters for dst half channel 117 + * @pre_transfer_data: Data to be passed on to the pre_transfer() function. 118 + * @pre_transfer: Callback used if needed before preparation of transfer. 119 + * Only called if device is set. size of bytes to transfer 120 + * (in case of multiple element transfer size is size of the first element). 121 + * 122 + * 123 + * This structure has to be filled by the client drivers. 124 + * It is recommended to do all dma configurations for clients in the machine. 125 + * 126 + */ 127 + struct stedma40_chan_cfg { 128 + enum stedma40_xfer_dir dir; 129 + unsigned int channel_type; 130 + int src_dev_type; 131 + int dst_dev_type; 132 + struct stedma40_half_channel_info src_info; 133 + struct stedma40_half_channel_info dst_info; 134 + void *pre_transfer_data; 135 + int (*pre_transfer) (struct dma_chan *chan, 136 + void *data, 137 + int size); 138 + }; 139 + 140 + /** 141 + * struct stedma40_platform_data - Configuration struct for the dma device. 142 + * 143 + * @dev_len: length of dev_tx and dev_rx 144 + * @dev_tx: mapping between destination event line and io address 145 + * @dev_rx: mapping between source event line and io address 146 + * @memcpy: list of memcpy event lines 147 + * @memcpy_len: length of memcpy 148 + * @memcpy_conf_phy: default configuration of physical channel memcpy 149 + * @memcpy_conf_log: default configuration of logical channel memcpy 150 + * @llis_per_log: number of max linked list items per logical channel 151 + * 152 + */ 153 + struct stedma40_platform_data { 154 + u32 dev_len; 155 + const dma_addr_t *dev_tx; 156 + const dma_addr_t *dev_rx; 157 + int *memcpy; 158 + u32 memcpy_len; 159 + struct stedma40_chan_cfg *memcpy_conf_phy; 160 + struct stedma40_chan_cfg *memcpy_conf_log; 161 + unsigned int llis_per_log; 162 + }; 163 + 164 + /** 165 + * setdma40_set_psize() - Used for changing the package size of an 166 + * already configured dma channel. 167 + * 168 + * @chan: dmaengine handle 169 + * @src_psize: new package side for src. (STEDMA40_PSIZE*) 170 + * @src_psize: new package side for dst. (STEDMA40_PSIZE*) 171 + * 172 + * returns 0 on ok, otherwise negative error number. 173 + */ 174 + int stedma40_set_psize(struct dma_chan *chan, 175 + int src_psize, 176 + int dst_psize); 177 + 178 + /** 179 + * stedma40_filter() - Provides stedma40_chan_cfg to the 180 + * ste_dma40 dma driver via the dmaengine framework. 181 + * does some checking of what's provided. 182 + * 183 + * Never directly called by client. It used by dmaengine. 184 + * @chan: dmaengine handle. 185 + * @data: Must be of type: struct stedma40_chan_cfg and is 186 + * the configuration of the framework. 187 + * 188 + * 189 + */ 190 + 191 + bool stedma40_filter(struct dma_chan *chan, void *data); 192 + 193 + /** 194 + * stedma40_memcpy_sg() - extension of the dma framework, memcpy to/from 195 + * scattergatter lists. 196 + * 197 + * @chan: dmaengine handle 198 + * @sgl_dst: Destination scatter list 199 + * @sgl_src: Source scatter list 200 + * @sgl_len: The length of each scatterlist. Both lists must be of equal length 201 + * and each element must match the corresponding element in the other scatter 202 + * list. 203 + * @flags: is actually enum dma_ctrl_flags. See dmaengine.h 204 + */ 205 + 206 + struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, 207 + struct scatterlist *sgl_dst, 208 + struct scatterlist *sgl_src, 209 + unsigned int sgl_len, 210 + unsigned long flags); 211 + 212 + /** 213 + * stedma40_slave_mem() - Transfers a raw data buffer to or from a slave 214 + * (=device) 215 + * 216 + * @chan: dmaengine handle 217 + * @addr: source or destination physicall address. 218 + * @size: bytes to transfer 219 + * @direction: direction of transfer 220 + * @flags: is actually enum dma_ctrl_flags. See dmaengine.h 221 + */ 222 + 223 + static inline struct 224 + dma_async_tx_descriptor *stedma40_slave_mem(struct dma_chan *chan, 225 + dma_addr_t addr, 226 + unsigned int size, 227 + enum dma_data_direction direction, 228 + unsigned long flags) 229 + { 230 + struct scatterlist sg; 231 + sg_init_table(&sg, 1); 232 + sg.dma_address = addr; 233 + sg.length = size; 234 + 235 + return chan->device->device_prep_slave_sg(chan, &sg, 1, 236 + direction, flags); 237 + } 238 + 239 + #endif
+19 -27
crypto/async_tx/async_tx.c
··· 81 struct dma_device *device = chan->device; 82 struct dma_async_tx_descriptor *intr_tx = (void *) ~0; 83 84 - #ifdef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH 85 - BUG(); 86 - #endif 87 - 88 /* first check to see if we can still append to depend_tx */ 89 - spin_lock_bh(&depend_tx->lock); 90 - if (depend_tx->parent && depend_tx->chan == tx->chan) { 91 - tx->parent = depend_tx; 92 - depend_tx->next = tx; 93 intr_tx = NULL; 94 } 95 - spin_unlock_bh(&depend_tx->lock); 96 97 /* attached dependency, flush the parent channel */ 98 if (!intr_tx) { ··· 106 if (intr_tx) { 107 intr_tx->callback = NULL; 108 intr_tx->callback_param = NULL; 109 - tx->parent = intr_tx; 110 - /* safe to set ->next outside the lock since we know we are 111 * not submitted yet 112 */ 113 - intr_tx->next = tx; 114 115 /* check if we need to append */ 116 - spin_lock_bh(&depend_tx->lock); 117 - if (depend_tx->parent) { 118 - intr_tx->parent = depend_tx; 119 - depend_tx->next = intr_tx; 120 async_tx_ack(intr_tx); 121 intr_tx = NULL; 122 } 123 - spin_unlock_bh(&depend_tx->lock); 124 125 if (intr_tx) { 126 - intr_tx->parent = NULL; 127 intr_tx->tx_submit(intr_tx); 128 async_tx_ack(intr_tx); 129 } ··· 169 * 2/ dependencies are 1:1 i.e. two transactions can 170 * not depend on the same parent 171 */ 172 - BUG_ON(async_tx_test_ack(depend_tx) || depend_tx->next || 173 - tx->parent); 174 175 /* the lock prevents async_tx_run_dependencies from missing 176 * the setting of ->next when ->parent != NULL 177 */ 178 - spin_lock_bh(&depend_tx->lock); 179 - if (depend_tx->parent) { 180 /* we have a parent so we can not submit directly 181 * if we are staying on the same channel: append 182 * else: channel switch 183 */ 184 if (depend_tx->chan == chan) { 185 - tx->parent = depend_tx; 186 - depend_tx->next = tx; 187 s = ASYNC_TX_SUBMITTED; 188 } else 189 s = ASYNC_TX_CHANNEL_SWITCH; ··· 195 else 196 s = ASYNC_TX_CHANNEL_SWITCH; 197 } 198 - spin_unlock_bh(&depend_tx->lock); 199 200 switch (s) { 201 case ASYNC_TX_SUBMITTED: ··· 204 async_tx_channel_switch(depend_tx, tx); 205 break; 206 case ASYNC_TX_DIRECT_SUBMIT: 207 - tx->parent = NULL; 208 tx->tx_submit(tx); 209 break; 210 } 211 } else { 212 - tx->parent = NULL; 213 tx->tx_submit(tx); 214 } 215
··· 81 struct dma_device *device = chan->device; 82 struct dma_async_tx_descriptor *intr_tx = (void *) ~0; 83 84 /* first check to see if we can still append to depend_tx */ 85 + txd_lock(depend_tx); 86 + if (txd_parent(depend_tx) && depend_tx->chan == tx->chan) { 87 + txd_chain(depend_tx, tx); 88 intr_tx = NULL; 89 } 90 + txd_unlock(depend_tx); 91 92 /* attached dependency, flush the parent channel */ 93 if (!intr_tx) { ··· 111 if (intr_tx) { 112 intr_tx->callback = NULL; 113 intr_tx->callback_param = NULL; 114 + /* safe to chain outside the lock since we know we are 115 * not submitted yet 116 */ 117 + txd_chain(intr_tx, tx); 118 119 /* check if we need to append */ 120 + txd_lock(depend_tx); 121 + if (txd_parent(depend_tx)) { 122 + txd_chain(depend_tx, intr_tx); 123 async_tx_ack(intr_tx); 124 intr_tx = NULL; 125 } 126 + txd_unlock(depend_tx); 127 128 if (intr_tx) { 129 + txd_clear_parent(intr_tx); 130 intr_tx->tx_submit(intr_tx); 131 async_tx_ack(intr_tx); 132 } ··· 176 * 2/ dependencies are 1:1 i.e. two transactions can 177 * not depend on the same parent 178 */ 179 + BUG_ON(async_tx_test_ack(depend_tx) || txd_next(depend_tx) || 180 + txd_parent(tx)); 181 182 /* the lock prevents async_tx_run_dependencies from missing 183 * the setting of ->next when ->parent != NULL 184 */ 185 + txd_lock(depend_tx); 186 + if (txd_parent(depend_tx)) { 187 /* we have a parent so we can not submit directly 188 * if we are staying on the same channel: append 189 * else: channel switch 190 */ 191 if (depend_tx->chan == chan) { 192 + txd_chain(depend_tx, tx); 193 s = ASYNC_TX_SUBMITTED; 194 } else 195 s = ASYNC_TX_CHANNEL_SWITCH; ··· 203 else 204 s = ASYNC_TX_CHANNEL_SWITCH; 205 } 206 + txd_unlock(depend_tx); 207 208 switch (s) { 209 case ASYNC_TX_SUBMITTED: ··· 212 async_tx_channel_switch(depend_tx, tx); 213 break; 214 case ASYNC_TX_DIRECT_SUBMIT: 215 + txd_clear_parent(tx); 216 tx->tx_submit(tx); 217 break; 218 } 219 } else { 220 + txd_clear_parent(tx); 221 tx->tx_submit(tx); 222 } 223
+14
drivers/dma/Kconfig
··· 141 help 142 Enable support for ST-Ericsson COH 901 318 DMA. 143 144 config AMCC_PPC440SPE_ADMA 145 tristate "AMCC PPC440SPe ADMA support" 146 depends on 440SPe || 440SP ··· 155 select ARCH_HAS_ASYNC_TX_FIND_CHANNEL 156 help 157 Enable support for the AMCC PPC440SPe RAID engines. 158 159 config ARCH_HAS_ASYNC_TX_FIND_CHANNEL 160 bool
··· 141 help 142 Enable support for ST-Ericsson COH 901 318 DMA. 143 144 + config STE_DMA40 145 + bool "ST-Ericsson DMA40 support" 146 + depends on ARCH_U8500 147 + select DMA_ENGINE 148 + help 149 + Support for ST-Ericsson DMA40 controller 150 + 151 config AMCC_PPC440SPE_ADMA 152 tristate "AMCC PPC440SPe ADMA support" 153 depends on 440SPe || 440SP ··· 148 select ARCH_HAS_ASYNC_TX_FIND_CHANNEL 149 help 150 Enable support for the AMCC PPC440SPe RAID engines. 151 + 152 + config TIMB_DMA 153 + tristate "Timberdale FPGA DMA support" 154 + depends on MFD_TIMBERDALE || HAS_IOMEM 155 + select DMA_ENGINE 156 + help 157 + Enable support for the Timberdale FPGA DMA engine. 158 159 config ARCH_HAS_ASYNC_TX_FIND_CHANNEL 160 bool
+2
drivers/dma/Makefile
··· 20 obj-$(CONFIG_SH_DMAE) += shdma.o 21 obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o 22 obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
··· 20 obj-$(CONFIG_SH_DMAE) += shdma.o 21 obj-$(CONFIG_COH901318) += coh901318.o coh901318_lli.o 22 obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/ 23 + obj-$(CONFIG_TIMB_DMA) += timb_dma.o 24 + obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
+19 -16
drivers/dma/at_hdmac.c
··· 760 return NULL; 761 } 762 763 - static void atc_terminate_all(struct dma_chan *chan) 764 { 765 struct at_dma_chan *atchan = to_at_dma_chan(chan); 766 struct at_dma *atdma = to_at_dma(chan->device); 767 struct at_desc *desc, *_desc; 768 LIST_HEAD(list); 769 770 /* 771 * This is only called when something went wrong elsewhere, so ··· 795 /* Flush all pending and queued descriptors */ 796 list_for_each_entry_safe(desc, _desc, &list, desc_node) 797 atc_chain_complete(atchan, desc); 798 } 799 800 /** 801 - * atc_is_tx_complete - poll for transaction completion 802 * @chan: DMA channel 803 * @cookie: transaction identifier to check status of 804 - * @done: if not %NULL, updated with last completed transaction 805 - * @used: if not %NULL, updated with last used transaction 806 * 807 - * If @done and @used are passed in, upon return they reflect the driver 808 * internal state and can be used with dma_async_is_complete() to check 809 * the status of multiple cookies without re-checking hardware state. 810 */ 811 static enum dma_status 812 - atc_is_tx_complete(struct dma_chan *chan, 813 dma_cookie_t cookie, 814 - dma_cookie_t *done, dma_cookie_t *used) 815 { 816 struct at_dma_chan *atchan = to_at_dma_chan(chan); 817 dma_cookie_t last_used; 818 dma_cookie_t last_complete; 819 enum dma_status ret; 820 - 821 - dev_vdbg(chan2dev(chan), "is_tx_complete: %d (d%d, u%d)\n", 822 - cookie, done ? *done : 0, used ? *used : 0); 823 824 spin_lock_bh(&atchan->lock); 825 ··· 836 837 spin_unlock_bh(&atchan->lock); 838 839 - if (done) 840 - *done = last_complete; 841 - if (used) 842 - *used = last_used; 843 844 return ret; 845 } ··· 1085 /* set base routines */ 1086 atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources; 1087 atdma->dma_common.device_free_chan_resources = atc_free_chan_resources; 1088 - atdma->dma_common.device_is_tx_complete = atc_is_tx_complete; 1089 atdma->dma_common.device_issue_pending = atc_issue_pending; 1090 atdma->dma_common.dev = &pdev->dev; 1091 ··· 1095 1096 if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) { 1097 atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg; 1098 - atdma->dma_common.device_terminate_all = atc_terminate_all; 1099 } 1100 1101 dma_writel(atdma, EN, AT_DMA_ENABLE);
··· 760 return NULL; 761 } 762 763 + static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 764 + unsigned long arg) 765 { 766 struct at_dma_chan *atchan = to_at_dma_chan(chan); 767 struct at_dma *atdma = to_at_dma(chan->device); 768 struct at_desc *desc, *_desc; 769 LIST_HEAD(list); 770 + 771 + /* Only supports DMA_TERMINATE_ALL */ 772 + if (cmd != DMA_TERMINATE_ALL) 773 + return -ENXIO; 774 775 /* 776 * This is only called when something went wrong elsewhere, so ··· 790 /* Flush all pending and queued descriptors */ 791 list_for_each_entry_safe(desc, _desc, &list, desc_node) 792 atc_chain_complete(atchan, desc); 793 + 794 + return 0; 795 } 796 797 /** 798 + * atc_tx_status - poll for transaction completion 799 * @chan: DMA channel 800 * @cookie: transaction identifier to check status of 801 + * @txstate: if not %NULL updated with transaction state 802 * 803 + * If @txstate is passed in, upon return it reflect the driver 804 * internal state and can be used with dma_async_is_complete() to check 805 * the status of multiple cookies without re-checking hardware state. 806 */ 807 static enum dma_status 808 + atc_tx_status(struct dma_chan *chan, 809 dma_cookie_t cookie, 810 + struct dma_tx_state *txstate) 811 { 812 struct at_dma_chan *atchan = to_at_dma_chan(chan); 813 dma_cookie_t last_used; 814 dma_cookie_t last_complete; 815 enum dma_status ret; 816 817 spin_lock_bh(&atchan->lock); 818 ··· 833 834 spin_unlock_bh(&atchan->lock); 835 836 + dma_set_tx_state(txstate, last_complete, last_used, 0); 837 + dev_vdbg(chan2dev(chan), "tx_status: %d (d%d, u%d)\n", 838 + cookie, last_complete ? last_complete : 0, 839 + last_used ? last_used : 0); 840 841 return ret; 842 } ··· 1082 /* set base routines */ 1083 atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources; 1084 atdma->dma_common.device_free_chan_resources = atc_free_chan_resources; 1085 + atdma->dma_common.device_tx_status = atc_tx_status; 1086 atdma->dma_common.device_issue_pending = atc_issue_pending; 1087 atdma->dma_common.dev = &pdev->dev; 1088 ··· 1092 1093 if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) { 1094 atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg; 1095 + atdma->dma_common.device_control = atc_control; 1096 } 1097 1098 dma_writel(atdma, EN, AT_DMA_ENABLE);
+193 -74
drivers/dma/coh901318.c
··· 37 struct list_head node; 38 struct scatterlist *sg; 39 unsigned int sg_len; 40 - struct coh901318_lli *data; 41 enum dma_data_direction dir; 42 unsigned long flags; 43 }; ··· 283 } 284 285 static int coh901318_prep_linked_list(struct coh901318_chan *cohc, 286 - struct coh901318_lli *data) 287 { 288 int channel = cohc->id; 289 void __iomem *virtbase = cohc->base->virtbase; ··· 292 COH901318_CX_STAT_SPACING*channel) & 293 COH901318_CX_STAT_ACTIVE); 294 295 - writel(data->src_addr, 296 virtbase + COH901318_CX_SRC_ADDR + 297 COH901318_CX_SRC_ADDR_SPACING * channel); 298 299 - writel(data->dst_addr, virtbase + 300 COH901318_CX_DST_ADDR + 301 COH901318_CX_DST_ADDR_SPACING * channel); 302 303 - writel(data->link_addr, virtbase + COH901318_CX_LNK_ADDR + 304 COH901318_CX_LNK_ADDR_SPACING * channel); 305 306 - writel(data->control, virtbase + COH901318_CX_CTRL + 307 COH901318_CX_CTRL_SPACING * channel); 308 309 return 0; ··· 408 return d; 409 } 410 411 - /* 412 - * DMA start/stop controls 413 - */ 414 - u32 coh901318_get_bytes_left(struct dma_chan *chan) 415 { 416 - unsigned long flags; 417 - u32 ret; 418 struct coh901318_chan *cohc = to_coh901318_chan(chan); 419 420 spin_lock_irqsave(&cohc->lock, flags); 421 422 - /* Read transfer count value */ 423 - ret = readl(cohc->base->virtbase + 424 - COH901318_CX_CTRL+COH901318_CX_CTRL_SPACING * 425 - cohc->id) & COH901318_CX_CTRL_TC_VALUE_MASK; 426 427 spin_unlock_irqrestore(&cohc->lock, flags); 428 429 - return ret; 430 } 431 - EXPORT_SYMBOL(coh901318_get_bytes_left); 432 433 - 434 - /* Stops a transfer without losing data. Enables power save. 435 - Use this function in conjunction with coh901318_continue(..) 436 - */ 437 - void coh901318_stop(struct dma_chan *chan) 438 { 439 u32 val; 440 unsigned long flags; ··· 549 550 spin_unlock_irqrestore(&cohc->lock, flags); 551 } 552 - EXPORT_SYMBOL(coh901318_stop); 553 554 - /* Continues a transfer that has been stopped via 300_dma_stop(..). 555 Power save is handled. 556 */ 557 - void coh901318_continue(struct dma_chan *chan) 558 { 559 u32 val; 560 unsigned long flags; ··· 579 580 spin_unlock_irqrestore(&cohc->lock, flags); 581 } 582 - EXPORT_SYMBOL(coh901318_continue); 583 584 bool coh901318_filter_id(struct dma_chan *chan, void *chan_id) 585 { ··· 637 */ 638 static struct coh901318_desc *coh901318_queue_start(struct coh901318_chan *cohc) 639 { 640 - struct coh901318_desc *cohd_que; 641 642 - /* start queued jobs, if any 643 * TODO: transmit all queued jobs in one go 644 */ 645 - cohd_que = coh901318_first_queued(cohc); 646 647 - if (cohd_que != NULL) { 648 /* Remove from queue */ 649 - coh901318_desc_remove(cohd_que); 650 /* initiate DMA job */ 651 cohc->busy = 1; 652 653 - coh901318_desc_submit(cohc, cohd_que); 654 655 - coh901318_prep_linked_list(cohc, cohd_que->data); 656 657 - /* start dma job */ 658 coh901318_start(cohc); 659 660 } 661 662 - return cohd_que; 663 } 664 665 /* ··· 695 cohc->completed = cohd_fin->desc.cookie; 696 697 /* release the lli allocation and remove the descriptor */ 698 - coh901318_lli_free(&cohc->base->pool, &cohd_fin->data); 699 700 /* return desc to free-list */ 701 coh901318_desc_remove(cohd_fin); ··· 739 /* called from interrupt context */ 740 static void dma_tc_handle(struct coh901318_chan *cohc) 741 { 742 - BUG_ON(!cohc->allocated && (list_empty(&cohc->active) || 743 - list_empty(&cohc->queue))); 744 - 745 - if (!cohc->allocated) 746 return; 747 748 spin_lock(&cohc->lock); 749 750 cohc->nbr_active_done++; 751 752 if (coh901318_queue_start(cohc) == NULL) 753 cohc->busy = 0; 754 755 - BUG_ON(list_empty(&cohc->active)); 756 - 757 spin_unlock(&cohc->lock); 758 759 if (cohc_chan_conf(cohc)->priority_high) 760 tasklet_hi_schedule(&cohc->tasklet); 761 else ··· 903 static int coh901318_alloc_chan_resources(struct dma_chan *chan) 904 { 905 struct coh901318_chan *cohc = to_coh901318_chan(chan); 906 907 dev_vdbg(COHC_2_DEV(cohc), "[%s] DMA channel %d\n", 908 __func__, cohc->id); ··· 911 if (chan->client_count > 1) 912 return -EBUSY; 913 914 coh901318_config(cohc, NULL); 915 916 cohc->allocated = 1; 917 cohc->completed = chan->cookie = 1; 918 919 return 1; 920 } ··· 942 943 spin_unlock_irqrestore(&cohc->lock, flags); 944 945 - chan->device->device_terminate_all(chan); 946 } 947 948 ··· 969 coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 970 size_t size, unsigned long flags) 971 { 972 - struct coh901318_lli *data; 973 struct coh901318_desc *cohd; 974 unsigned long flg; 975 struct coh901318_chan *cohc = to_coh901318_chan(chan); ··· 991 if ((lli_len << MAX_DMA_PACKET_SIZE_SHIFT) < size) 992 lli_len++; 993 994 - data = coh901318_lli_alloc(&cohc->base->pool, lli_len); 995 996 - if (data == NULL) 997 goto err; 998 999 ret = coh901318_lli_fill_memcpy( 1000 - &cohc->base->pool, data, src, size, dest, 1001 cohc_chan_param(cohc)->ctrl_lli_chained, 1002 ctrl_last); 1003 if (ret) 1004 goto err; 1005 1006 - COH_DBG(coh901318_list_print(cohc, data)); 1007 1008 /* Pick a descriptor to handle this transfer */ 1009 cohd = coh901318_desc_get(cohc); 1010 - cohd->data = data; 1011 cohd->flags = flags; 1012 cohd->desc.tx_submit = coh901318_tx_submit; 1013 ··· 1025 unsigned long flags) 1026 { 1027 struct coh901318_chan *cohc = to_coh901318_chan(chan); 1028 - struct coh901318_lli *data; 1029 struct coh901318_desc *cohd; 1030 const struct coh901318_params *params; 1031 struct scatterlist *sg; ··· 1098 } 1099 1100 pr_debug("Allocate %d lli:s for this transfer\n", len); 1101 - data = coh901318_lli_alloc(&cohc->base->pool, len); 1102 1103 - if (data == NULL) 1104 goto err_dma_alloc; 1105 1106 - /* initiate allocated data list */ 1107 - ret = coh901318_lli_fill_sg(&cohc->base->pool, data, sgl, sg_len, 1108 cohc_dev_addr(cohc), 1109 ctrl_chained, 1110 ctrl, ··· 1113 if (ret) 1114 goto err_lli_fill; 1115 1116 - COH_DBG(coh901318_list_print(cohc, data)); 1117 1118 /* Pick a descriptor to handle this transfer */ 1119 cohd = coh901318_desc_get(cohc); 1120 cohd->dir = direction; 1121 cohd->flags = flags; 1122 cohd->desc.tx_submit = coh901318_tx_submit; 1123 - cohd->data = data; 1124 1125 spin_unlock_irqrestore(&cohc->lock, flg); 1126 ··· 1134 } 1135 1136 static enum dma_status 1137 - coh901318_is_tx_complete(struct dma_chan *chan, 1138 - dma_cookie_t cookie, dma_cookie_t *done, 1139 - dma_cookie_t *used) 1140 { 1141 struct coh901318_chan *cohc = to_coh901318_chan(chan); 1142 dma_cookie_t last_used; ··· 1147 1148 ret = dma_async_is_complete(cookie, last_complete, last_used); 1149 1150 - if (done) 1151 - *done = last_complete; 1152 - if (used) 1153 - *used = last_used; 1154 1155 return ret; 1156 } ··· 1163 1164 spin_lock_irqsave(&cohc->lock, flags); 1165 1166 - /* Busy means that pending jobs are already being processed */ 1167 if (!cohc->busy) 1168 coh901318_queue_start(cohc); 1169 1170 spin_unlock_irqrestore(&cohc->lock, flags); 1171 } 1172 1173 - static void 1174 - coh901318_terminate_all(struct dma_chan *chan) 1175 { 1176 unsigned long flags; 1177 struct coh901318_chan *cohc = to_coh901318_chan(chan); 1178 struct coh901318_desc *cohd; 1179 void __iomem *virtbase = cohc->base->virtbase; 1180 1181 - coh901318_stop(chan); 1182 1183 spin_lock_irqsave(&cohc->lock, flags); 1184 1185 /* Clear any pending BE or TC interrupt */ ··· 1216 1217 while ((cohd = coh901318_first_active_get(cohc))) { 1218 /* release the lli allocation*/ 1219 - coh901318_lli_free(&cohc->base->pool, &cohd->data); 1220 1221 /* return desc to free-list */ 1222 coh901318_desc_remove(cohd); ··· 1225 1226 while ((cohd = coh901318_first_queued(cohc))) { 1227 /* release the lli allocation*/ 1228 - coh901318_lli_free(&cohc->base->pool, &cohd->data); 1229 1230 /* return desc to free-list */ 1231 coh901318_desc_remove(cohd); ··· 1237 cohc->busy = 0; 1238 1239 spin_unlock_irqrestore(&cohc->lock, flags); 1240 } 1241 void coh901318_base_init(struct dma_device *dma, const int *pick_chans, 1242 struct coh901318_base *base) ··· 1354 base->dma_slave.device_alloc_chan_resources = coh901318_alloc_chan_resources; 1355 base->dma_slave.device_free_chan_resources = coh901318_free_chan_resources; 1356 base->dma_slave.device_prep_slave_sg = coh901318_prep_slave_sg; 1357 - base->dma_slave.device_is_tx_complete = coh901318_is_tx_complete; 1358 base->dma_slave.device_issue_pending = coh901318_issue_pending; 1359 - base->dma_slave.device_terminate_all = coh901318_terminate_all; 1360 base->dma_slave.dev = &pdev->dev; 1361 1362 err = dma_async_device_register(&base->dma_slave); ··· 1374 base->dma_memcpy.device_alloc_chan_resources = coh901318_alloc_chan_resources; 1375 base->dma_memcpy.device_free_chan_resources = coh901318_free_chan_resources; 1376 base->dma_memcpy.device_prep_dma_memcpy = coh901318_prep_memcpy; 1377 - base->dma_memcpy.device_is_tx_complete = coh901318_is_tx_complete; 1378 base->dma_memcpy.device_issue_pending = coh901318_issue_pending; 1379 - base->dma_memcpy.device_terminate_all = coh901318_terminate_all; 1380 base->dma_memcpy.dev = &pdev->dev; 1381 /* 1382 * This controller can only access address at even 32bit boundaries,
··· 37 struct list_head node; 38 struct scatterlist *sg; 39 unsigned int sg_len; 40 + struct coh901318_lli *lli; 41 enum dma_data_direction dir; 42 unsigned long flags; 43 }; ··· 283 } 284 285 static int coh901318_prep_linked_list(struct coh901318_chan *cohc, 286 + struct coh901318_lli *lli) 287 { 288 int channel = cohc->id; 289 void __iomem *virtbase = cohc->base->virtbase; ··· 292 COH901318_CX_STAT_SPACING*channel) & 293 COH901318_CX_STAT_ACTIVE); 294 295 + writel(lli->src_addr, 296 virtbase + COH901318_CX_SRC_ADDR + 297 COH901318_CX_SRC_ADDR_SPACING * channel); 298 299 + writel(lli->dst_addr, virtbase + 300 COH901318_CX_DST_ADDR + 301 COH901318_CX_DST_ADDR_SPACING * channel); 302 303 + writel(lli->link_addr, virtbase + COH901318_CX_LNK_ADDR + 304 COH901318_CX_LNK_ADDR_SPACING * channel); 305 306 + writel(lli->control, virtbase + COH901318_CX_CTRL + 307 COH901318_CX_CTRL_SPACING * channel); 308 309 return 0; ··· 408 return d; 409 } 410 411 + static inline u32 coh901318_get_bytes_in_lli(struct coh901318_lli *in_lli) 412 { 413 + struct coh901318_lli *lli = in_lli; 414 + u32 bytes = 0; 415 + 416 + while (lli) { 417 + bytes += lli->control & COH901318_CX_CTRL_TC_VALUE_MASK; 418 + lli = lli->virt_link_addr; 419 + } 420 + return bytes; 421 + } 422 + 423 + /* 424 + * Get the number of bytes left to transfer on this channel, 425 + * it is unwise to call this before stopping the channel for 426 + * absolute measures, but for a rough guess you can still call 427 + * it. 428 + */ 429 + static u32 coh901318_get_bytes_left(struct dma_chan *chan) 430 + { 431 struct coh901318_chan *cohc = to_coh901318_chan(chan); 432 + struct coh901318_desc *cohd; 433 + struct list_head *pos; 434 + unsigned long flags; 435 + u32 left = 0; 436 + int i = 0; 437 438 spin_lock_irqsave(&cohc->lock, flags); 439 440 + /* 441 + * If there are many queued jobs, we iterate and add the 442 + * size of them all. We take a special look on the first 443 + * job though, since it is probably active. 444 + */ 445 + list_for_each(pos, &cohc->active) { 446 + /* 447 + * The first job in the list will be working on the 448 + * hardware. The job can be stopped but still active, 449 + * so that the transfer counter is somewhere inside 450 + * the buffer. 451 + */ 452 + cohd = list_entry(pos, struct coh901318_desc, node); 453 + 454 + if (i == 0) { 455 + struct coh901318_lli *lli; 456 + dma_addr_t ladd; 457 + 458 + /* Read current transfer count value */ 459 + left = readl(cohc->base->virtbase + 460 + COH901318_CX_CTRL + 461 + COH901318_CX_CTRL_SPACING * cohc->id) & 462 + COH901318_CX_CTRL_TC_VALUE_MASK; 463 + 464 + /* See if the transfer is linked... */ 465 + ladd = readl(cohc->base->virtbase + 466 + COH901318_CX_LNK_ADDR + 467 + COH901318_CX_LNK_ADDR_SPACING * 468 + cohc->id) & 469 + ~COH901318_CX_LNK_LINK_IMMEDIATE; 470 + /* Single transaction */ 471 + if (!ladd) 472 + continue; 473 + 474 + /* 475 + * Linked transaction, follow the lli, find the 476 + * currently processing lli, and proceed to the next 477 + */ 478 + lli = cohd->lli; 479 + while (lli && lli->link_addr != ladd) 480 + lli = lli->virt_link_addr; 481 + 482 + if (lli) 483 + lli = lli->virt_link_addr; 484 + 485 + /* 486 + * Follow remaining lli links around to count the total 487 + * number of bytes left 488 + */ 489 + left += coh901318_get_bytes_in_lli(lli); 490 + } else { 491 + left += coh901318_get_bytes_in_lli(cohd->lli); 492 + } 493 + i++; 494 + } 495 + 496 + /* Also count bytes in the queued jobs */ 497 + list_for_each(pos, &cohc->queue) { 498 + cohd = list_entry(pos, struct coh901318_desc, node); 499 + left += coh901318_get_bytes_in_lli(cohd->lli); 500 + } 501 502 spin_unlock_irqrestore(&cohc->lock, flags); 503 504 + return left; 505 } 506 507 + /* 508 + * Pauses a transfer without losing data. Enables power save. 509 + * Use this function in conjunction with coh901318_resume. 510 + */ 511 + static void coh901318_pause(struct dma_chan *chan) 512 { 513 u32 val; 514 unsigned long flags; ··· 475 476 spin_unlock_irqrestore(&cohc->lock, flags); 477 } 478 479 + /* Resumes a transfer that has been stopped via 300_dma_stop(..). 480 Power save is handled. 481 */ 482 + static void coh901318_resume(struct dma_chan *chan) 483 { 484 u32 val; 485 unsigned long flags; ··· 506 507 spin_unlock_irqrestore(&cohc->lock, flags); 508 } 509 510 bool coh901318_filter_id(struct dma_chan *chan, void *chan_id) 511 { ··· 565 */ 566 static struct coh901318_desc *coh901318_queue_start(struct coh901318_chan *cohc) 567 { 568 + struct coh901318_desc *cohd; 569 570 + /* 571 + * start queued jobs, if any 572 * TODO: transmit all queued jobs in one go 573 */ 574 + cohd = coh901318_first_queued(cohc); 575 576 + if (cohd != NULL) { 577 /* Remove from queue */ 578 + coh901318_desc_remove(cohd); 579 /* initiate DMA job */ 580 cohc->busy = 1; 581 582 + coh901318_desc_submit(cohc, cohd); 583 584 + coh901318_prep_linked_list(cohc, cohd->lli); 585 586 + /* start dma job on this channel */ 587 coh901318_start(cohc); 588 589 } 590 591 + return cohd; 592 } 593 594 /* ··· 622 cohc->completed = cohd_fin->desc.cookie; 623 624 /* release the lli allocation and remove the descriptor */ 625 + coh901318_lli_free(&cohc->base->pool, &cohd_fin->lli); 626 627 /* return desc to free-list */ 628 coh901318_desc_remove(cohd_fin); ··· 666 /* called from interrupt context */ 667 static void dma_tc_handle(struct coh901318_chan *cohc) 668 { 669 + /* 670 + * If the channel is not allocated, then we shouldn't have 671 + * any TC interrupts on it. 672 + */ 673 + if (!cohc->allocated) { 674 + dev_err(COHC_2_DEV(cohc), "spurious interrupt from " 675 + "unallocated channel\n"); 676 return; 677 + } 678 679 spin_lock(&cohc->lock); 680 681 + /* 682 + * When we reach this point, at least one queue item 683 + * should have been moved over from cohc->queue to 684 + * cohc->active and run to completion, that is why we're 685 + * getting a terminal count interrupt is it not? 686 + * If you get this BUG() the most probable cause is that 687 + * the individual nodes in the lli chain have IRQ enabled, 688 + * so check your platform config for lli chain ctrl. 689 + */ 690 + BUG_ON(list_empty(&cohc->active)); 691 + 692 cohc->nbr_active_done++; 693 694 + /* 695 + * This attempt to take a job from cohc->queue, put it 696 + * into cohc->active and start it. 697 + */ 698 if (coh901318_queue_start(cohc) == NULL) 699 cohc->busy = 0; 700 701 spin_unlock(&cohc->lock); 702 703 + /* 704 + * This tasklet will remove items from cohc->active 705 + * and thus terminates them. 706 + */ 707 if (cohc_chan_conf(cohc)->priority_high) 708 tasklet_hi_schedule(&cohc->tasklet); 709 else ··· 809 static int coh901318_alloc_chan_resources(struct dma_chan *chan) 810 { 811 struct coh901318_chan *cohc = to_coh901318_chan(chan); 812 + unsigned long flags; 813 814 dev_vdbg(COHC_2_DEV(cohc), "[%s] DMA channel %d\n", 815 __func__, cohc->id); ··· 816 if (chan->client_count > 1) 817 return -EBUSY; 818 819 + spin_lock_irqsave(&cohc->lock, flags); 820 + 821 coh901318_config(cohc, NULL); 822 823 cohc->allocated = 1; 824 cohc->completed = chan->cookie = 1; 825 + 826 + spin_unlock_irqrestore(&cohc->lock, flags); 827 828 return 1; 829 } ··· 843 844 spin_unlock_irqrestore(&cohc->lock, flags); 845 846 + chan->device->device_control(chan, DMA_TERMINATE_ALL, 0); 847 } 848 849 ··· 870 coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, 871 size_t size, unsigned long flags) 872 { 873 + struct coh901318_lli *lli; 874 struct coh901318_desc *cohd; 875 unsigned long flg; 876 struct coh901318_chan *cohc = to_coh901318_chan(chan); ··· 892 if ((lli_len << MAX_DMA_PACKET_SIZE_SHIFT) < size) 893 lli_len++; 894 895 + lli = coh901318_lli_alloc(&cohc->base->pool, lli_len); 896 897 + if (lli == NULL) 898 goto err; 899 900 ret = coh901318_lli_fill_memcpy( 901 + &cohc->base->pool, lli, src, size, dest, 902 cohc_chan_param(cohc)->ctrl_lli_chained, 903 ctrl_last); 904 if (ret) 905 goto err; 906 907 + COH_DBG(coh901318_list_print(cohc, lli)); 908 909 /* Pick a descriptor to handle this transfer */ 910 cohd = coh901318_desc_get(cohc); 911 + cohd->lli = lli; 912 cohd->flags = flags; 913 cohd->desc.tx_submit = coh901318_tx_submit; 914 ··· 926 unsigned long flags) 927 { 928 struct coh901318_chan *cohc = to_coh901318_chan(chan); 929 + struct coh901318_lli *lli; 930 struct coh901318_desc *cohd; 931 const struct coh901318_params *params; 932 struct scatterlist *sg; ··· 999 } 1000 1001 pr_debug("Allocate %d lli:s for this transfer\n", len); 1002 + lli = coh901318_lli_alloc(&cohc->base->pool, len); 1003 1004 + if (lli == NULL) 1005 goto err_dma_alloc; 1006 1007 + /* initiate allocated lli list */ 1008 + ret = coh901318_lli_fill_sg(&cohc->base->pool, lli, sgl, sg_len, 1009 cohc_dev_addr(cohc), 1010 ctrl_chained, 1011 ctrl, ··· 1014 if (ret) 1015 goto err_lli_fill; 1016 1017 + COH_DBG(coh901318_list_print(cohc, lli)); 1018 1019 /* Pick a descriptor to handle this transfer */ 1020 cohd = coh901318_desc_get(cohc); 1021 cohd->dir = direction; 1022 cohd->flags = flags; 1023 cohd->desc.tx_submit = coh901318_tx_submit; 1024 + cohd->lli = lli; 1025 1026 spin_unlock_irqrestore(&cohc->lock, flg); 1027 ··· 1035 } 1036 1037 static enum dma_status 1038 + coh901318_tx_status(struct dma_chan *chan, dma_cookie_t cookie, 1039 + struct dma_tx_state *txstate) 1040 { 1041 struct coh901318_chan *cohc = to_coh901318_chan(chan); 1042 dma_cookie_t last_used; ··· 1049 1050 ret = dma_async_is_complete(cookie, last_complete, last_used); 1051 1052 + dma_set_tx_state(txstate, last_complete, last_used, 1053 + coh901318_get_bytes_left(chan)); 1054 + if (ret == DMA_IN_PROGRESS && cohc->stopped) 1055 + ret = DMA_PAUSED; 1056 1057 return ret; 1058 } ··· 1065 1066 spin_lock_irqsave(&cohc->lock, flags); 1067 1068 + /* 1069 + * Busy means that pending jobs are already being processed, 1070 + * and then there is no point in starting the queue: the 1071 + * terminal count interrupt on the channel will take the next 1072 + * job on the queue and execute it anyway. 1073 + */ 1074 if (!cohc->busy) 1075 coh901318_queue_start(cohc); 1076 1077 spin_unlock_irqrestore(&cohc->lock, flags); 1078 } 1079 1080 + static int 1081 + coh901318_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 1082 + unsigned long arg) 1083 { 1084 unsigned long flags; 1085 struct coh901318_chan *cohc = to_coh901318_chan(chan); 1086 struct coh901318_desc *cohd; 1087 void __iomem *virtbase = cohc->base->virtbase; 1088 1089 + if (cmd == DMA_PAUSE) { 1090 + coh901318_pause(chan); 1091 + return 0; 1092 + } 1093 1094 + if (cmd == DMA_RESUME) { 1095 + coh901318_resume(chan); 1096 + return 0; 1097 + } 1098 + 1099 + if (cmd != DMA_TERMINATE_ALL) 1100 + return -ENXIO; 1101 + 1102 + /* The remainder of this function terminates the transfer */ 1103 + coh901318_pause(chan); 1104 spin_lock_irqsave(&cohc->lock, flags); 1105 1106 /* Clear any pending BE or TC interrupt */ ··· 1099 1100 while ((cohd = coh901318_first_active_get(cohc))) { 1101 /* release the lli allocation*/ 1102 + coh901318_lli_free(&cohc->base->pool, &cohd->lli); 1103 1104 /* return desc to free-list */ 1105 coh901318_desc_remove(cohd); ··· 1108 1109 while ((cohd = coh901318_first_queued(cohc))) { 1110 /* release the lli allocation*/ 1111 + coh901318_lli_free(&cohc->base->pool, &cohd->lli); 1112 1113 /* return desc to free-list */ 1114 coh901318_desc_remove(cohd); ··· 1120 cohc->busy = 0; 1121 1122 spin_unlock_irqrestore(&cohc->lock, flags); 1123 + 1124 + return 0; 1125 } 1126 void coh901318_base_init(struct dma_device *dma, const int *pick_chans, 1127 struct coh901318_base *base) ··· 1235 base->dma_slave.device_alloc_chan_resources = coh901318_alloc_chan_resources; 1236 base->dma_slave.device_free_chan_resources = coh901318_free_chan_resources; 1237 base->dma_slave.device_prep_slave_sg = coh901318_prep_slave_sg; 1238 + base->dma_slave.device_tx_status = coh901318_tx_status; 1239 base->dma_slave.device_issue_pending = coh901318_issue_pending; 1240 + base->dma_slave.device_control = coh901318_control; 1241 base->dma_slave.dev = &pdev->dev; 1242 1243 err = dma_async_device_register(&base->dma_slave); ··· 1255 base->dma_memcpy.device_alloc_chan_resources = coh901318_alloc_chan_resources; 1256 base->dma_memcpy.device_free_chan_resources = coh901318_free_chan_resources; 1257 base->dma_memcpy.device_prep_dma_memcpy = coh901318_prep_memcpy; 1258 + base->dma_memcpy.device_tx_status = coh901318_tx_status; 1259 base->dma_memcpy.device_issue_pending = coh901318_issue_pending; 1260 + base->dma_memcpy.device_control = coh901318_control; 1261 base->dma_memcpy.dev = &pdev->dev; 1262 /* 1263 * This controller can only access address at even 32bit boundaries,
+11 -11
drivers/dma/dmaengine.c
··· 515 break; 516 if (--device->privatecnt == 0) 517 dma_cap_clear(DMA_PRIVATE, device->cap_mask); 518 - chan->private = NULL; 519 chan = NULL; 520 } 521 } ··· 536 /* drop PRIVATE cap enabled by __dma_request_channel() */ 537 if (--chan->device->privatecnt == 0) 538 dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask); 539 - chan->private = NULL; 540 mutex_unlock(&dma_list_mutex); 541 } 542 EXPORT_SYMBOL_GPL(dma_release_channel); ··· 693 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && 694 !device->device_prep_slave_sg); 695 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && 696 - !device->device_terminate_all); 697 698 BUG_ON(!device->device_alloc_chan_resources); 699 BUG_ON(!device->device_free_chan_resources); 700 - BUG_ON(!device->device_is_tx_complete); 701 BUG_ON(!device->device_issue_pending); 702 BUG_ON(!device->dev); 703 ··· 976 struct dma_chan *chan) 977 { 978 tx->chan = chan; 979 spin_lock_init(&tx->lock); 980 } 981 EXPORT_SYMBOL(dma_async_tx_descriptor_init); 982 ··· 1011 */ 1012 void dma_run_dependencies(struct dma_async_tx_descriptor *tx) 1013 { 1014 - struct dma_async_tx_descriptor *dep = tx->next; 1015 struct dma_async_tx_descriptor *dep_next; 1016 struct dma_chan *chan; 1017 ··· 1019 return; 1020 1021 /* we'll submit tx->next now, so clear the link */ 1022 - tx->next = NULL; 1023 chan = dep->chan; 1024 1025 /* keep submitting up until a channel switch is detected ··· 1027 * processing the interrupt from async_tx_channel_switch 1028 */ 1029 for (; dep; dep = dep_next) { 1030 - spin_lock_bh(&dep->lock); 1031 - dep->parent = NULL; 1032 - dep_next = dep->next; 1033 if (dep_next && dep_next->chan == chan) 1034 - dep->next = NULL; /* ->next will be submitted */ 1035 else 1036 dep_next = NULL; /* submit current dep and terminate */ 1037 - spin_unlock_bh(&dep->lock); 1038 1039 dep->tx_submit(dep); 1040 }
··· 515 break; 516 if (--device->privatecnt == 0) 517 dma_cap_clear(DMA_PRIVATE, device->cap_mask); 518 chan = NULL; 519 } 520 } ··· 537 /* drop PRIVATE cap enabled by __dma_request_channel() */ 538 if (--chan->device->privatecnt == 0) 539 dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask); 540 mutex_unlock(&dma_list_mutex); 541 } 542 EXPORT_SYMBOL_GPL(dma_release_channel); ··· 695 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && 696 !device->device_prep_slave_sg); 697 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && 698 + !device->device_control); 699 700 BUG_ON(!device->device_alloc_chan_resources); 701 BUG_ON(!device->device_free_chan_resources); 702 + BUG_ON(!device->device_tx_status); 703 BUG_ON(!device->device_issue_pending); 704 BUG_ON(!device->dev); 705 ··· 978 struct dma_chan *chan) 979 { 980 tx->chan = chan; 981 + #ifndef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH 982 spin_lock_init(&tx->lock); 983 + #endif 984 } 985 EXPORT_SYMBOL(dma_async_tx_descriptor_init); 986 ··· 1011 */ 1012 void dma_run_dependencies(struct dma_async_tx_descriptor *tx) 1013 { 1014 + struct dma_async_tx_descriptor *dep = txd_next(tx); 1015 struct dma_async_tx_descriptor *dep_next; 1016 struct dma_chan *chan; 1017 ··· 1019 return; 1020 1021 /* we'll submit tx->next now, so clear the link */ 1022 + txd_clear_next(tx); 1023 chan = dep->chan; 1024 1025 /* keep submitting up until a channel switch is detected ··· 1027 * processing the interrupt from async_tx_channel_switch 1028 */ 1029 for (; dep; dep = dep_next) { 1030 + txd_lock(dep); 1031 + txd_clear_parent(dep); 1032 + dep_next = txd_next(dep); 1033 if (dep_next && dep_next->chan == chan) 1034 + txd_clear_next(dep); /* ->next will be submitted */ 1035 else 1036 dep_next = NULL; /* submit current dep and terminate */ 1037 + txd_unlock(dep); 1038 1039 dep->tx_submit(dep); 1040 }
+14 -10
drivers/dma/dw_dmac.c
··· 781 return NULL; 782 } 783 784 - static void dwc_terminate_all(struct dma_chan *chan) 785 { 786 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 787 struct dw_dma *dw = to_dw_dma(chan->device); 788 struct dw_desc *desc, *_desc; 789 LIST_HEAD(list); 790 791 /* 792 * This is only called when something went wrong elsewhere, so ··· 815 /* Flush all pending and queued descriptors */ 816 list_for_each_entry_safe(desc, _desc, &list, desc_node) 817 dwc_descriptor_complete(dwc, desc); 818 } 819 820 static enum dma_status 821 - dwc_is_tx_complete(struct dma_chan *chan, 822 - dma_cookie_t cookie, 823 - dma_cookie_t *done, dma_cookie_t *used) 824 { 825 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 826 dma_cookie_t last_used; ··· 842 ret = dma_async_is_complete(cookie, last_complete, last_used); 843 } 844 845 - if (done) 846 - *done = last_complete; 847 - if (used) 848 - *used = last_used; 849 850 return ret; 851 } ··· 1342 dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy; 1343 1344 dw->dma.device_prep_slave_sg = dwc_prep_slave_sg; 1345 - dw->dma.device_terminate_all = dwc_terminate_all; 1346 1347 - dw->dma.device_is_tx_complete = dwc_is_tx_complete; 1348 dw->dma.device_issue_pending = dwc_issue_pending; 1349 1350 dma_writel(dw, CFG, DW_CFG_DMA_EN);
··· 781 return NULL; 782 } 783 784 + static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 785 + unsigned long arg) 786 { 787 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 788 struct dw_dma *dw = to_dw_dma(chan->device); 789 struct dw_desc *desc, *_desc; 790 LIST_HEAD(list); 791 + 792 + /* Only supports DMA_TERMINATE_ALL */ 793 + if (cmd != DMA_TERMINATE_ALL) 794 + return -ENXIO; 795 796 /* 797 * This is only called when something went wrong elsewhere, so ··· 810 /* Flush all pending and queued descriptors */ 811 list_for_each_entry_safe(desc, _desc, &list, desc_node) 812 dwc_descriptor_complete(dwc, desc); 813 + 814 + return 0; 815 } 816 817 static enum dma_status 818 + dwc_tx_status(struct dma_chan *chan, 819 + dma_cookie_t cookie, 820 + struct dma_tx_state *txstate) 821 { 822 struct dw_dma_chan *dwc = to_dw_dma_chan(chan); 823 dma_cookie_t last_used; ··· 835 ret = dma_async_is_complete(cookie, last_complete, last_used); 836 } 837 838 + dma_set_tx_state(txstate, last_complete, last_used, 0); 839 840 return ret; 841 } ··· 1338 dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy; 1339 1340 dw->dma.device_prep_slave_sg = dwc_prep_slave_sg; 1341 + dw->dma.device_control = dwc_control; 1342 1343 + dw->dma.device_tx_status = dwc_tx_status; 1344 dw->dma.device_issue_pending = dwc_issue_pending; 1345 1346 dma_writel(dw, CFG, DW_CFG_DMA_EN);
+15 -13
drivers/dma/fsldma.c
··· 775 return NULL; 776 } 777 778 - static void fsl_dma_device_terminate_all(struct dma_chan *dchan) 779 { 780 struct fsldma_chan *chan; 781 unsigned long flags; 782 783 if (!dchan) 784 - return; 785 786 chan = to_fsl_chan(dchan); 787 ··· 800 fsldma_free_desc_list(chan, &chan->ld_running); 801 802 spin_unlock_irqrestore(&chan->desc_lock, flags); 803 } 804 805 /** ··· 972 } 973 974 /** 975 - * fsl_dma_is_complete - Determine the DMA status 976 * @chan : Freescale DMA channel 977 */ 978 - static enum dma_status fsl_dma_is_complete(struct dma_chan *dchan, 979 dma_cookie_t cookie, 980 - dma_cookie_t *done, 981 - dma_cookie_t *used) 982 { 983 struct fsldma_chan *chan = to_fsl_chan(dchan); 984 dma_cookie_t last_used; ··· 988 last_used = dchan->cookie; 989 last_complete = chan->completed_cookie; 990 991 - if (done) 992 - *done = last_complete; 993 - 994 - if (used) 995 - *used = last_used; 996 997 return dma_async_is_complete(cookie, last_complete, last_used); 998 } ··· 1332 fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources; 1333 fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt; 1334 fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy; 1335 - fdev->common.device_is_tx_complete = fsl_dma_is_complete; 1336 fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; 1337 fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg; 1338 - fdev->common.device_terminate_all = fsl_dma_device_terminate_all; 1339 fdev->common.dev = &op->dev; 1340 1341 dev_set_drvdata(&op->dev, fdev);
··· 775 return NULL; 776 } 777 778 + static int fsl_dma_device_control(struct dma_chan *dchan, 779 + enum dma_ctrl_cmd cmd, unsigned long arg) 780 { 781 struct fsldma_chan *chan; 782 unsigned long flags; 783 784 + /* Only supports DMA_TERMINATE_ALL */ 785 + if (cmd != DMA_TERMINATE_ALL) 786 + return -ENXIO; 787 + 788 if (!dchan) 789 + return -EINVAL; 790 791 chan = to_fsl_chan(dchan); 792 ··· 795 fsldma_free_desc_list(chan, &chan->ld_running); 796 797 spin_unlock_irqrestore(&chan->desc_lock, flags); 798 + 799 + return 0; 800 } 801 802 /** ··· 965 } 966 967 /** 968 + * fsl_tx_status - Determine the DMA status 969 * @chan : Freescale DMA channel 970 */ 971 + static enum dma_status fsl_tx_status(struct dma_chan *dchan, 972 dma_cookie_t cookie, 973 + struct dma_tx_state *txstate) 974 { 975 struct fsldma_chan *chan = to_fsl_chan(dchan); 976 dma_cookie_t last_used; ··· 982 last_used = dchan->cookie; 983 last_complete = chan->completed_cookie; 984 985 + dma_set_tx_state(txstate, last_complete, last_used, 0); 986 987 return dma_async_is_complete(cookie, last_complete, last_used); 988 } ··· 1330 fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources; 1331 fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt; 1332 fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy; 1333 + fdev->common.device_tx_status = fsl_tx_status; 1334 fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; 1335 fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg; 1336 + fdev->common.device_control = fsl_dma_device_control; 1337 fdev->common.dev = &op->dev; 1338 1339 dev_set_drvdata(&op->dev, fdev);
+6 -6
drivers/dma/ioat/dma.c
··· 727 } 728 729 enum dma_status 730 - ioat_is_dma_complete(struct dma_chan *c, dma_cookie_t cookie, 731 - dma_cookie_t *done, dma_cookie_t *used) 732 { 733 struct ioat_chan_common *chan = to_chan_common(c); 734 struct ioatdma_device *device = chan->device; 735 736 - if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS) 737 return DMA_SUCCESS; 738 739 device->cleanup_fn((unsigned long) c); 740 741 - return ioat_is_complete(c, cookie, done, used); 742 } 743 744 static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat) ··· 858 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); 859 860 if (tmo == 0 || 861 - dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL) 862 != DMA_SUCCESS) { 863 dev_err(dev, "Self-test copy timed out, disabling\n"); 864 err = -ENODEV; ··· 1199 dma->device_issue_pending = ioat1_dma_memcpy_issue_pending; 1200 dma->device_alloc_chan_resources = ioat1_dma_alloc_chan_resources; 1201 dma->device_free_chan_resources = ioat1_dma_free_chan_resources; 1202 - dma->device_is_tx_complete = ioat_is_dma_complete; 1203 1204 err = ioat_probe(device); 1205 if (err)
··· 727 } 728 729 enum dma_status 730 + ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, 731 + struct dma_tx_state *txstate) 732 { 733 struct ioat_chan_common *chan = to_chan_common(c); 734 struct ioatdma_device *device = chan->device; 735 736 + if (ioat_tx_status(c, cookie, txstate) == DMA_SUCCESS) 737 return DMA_SUCCESS; 738 739 device->cleanup_fn((unsigned long) c); 740 741 + return ioat_tx_status(c, cookie, txstate); 742 } 743 744 static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat) ··· 858 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); 859 860 if (tmo == 0 || 861 + dma->device_tx_status(dma_chan, cookie, NULL) 862 != DMA_SUCCESS) { 863 dev_err(dev, "Self-test copy timed out, disabling\n"); 864 err = -ENODEV; ··· 1199 dma->device_issue_pending = ioat1_dma_memcpy_issue_pending; 1200 dma->device_alloc_chan_resources = ioat1_dma_alloc_chan_resources; 1201 dma->device_free_chan_resources = ioat1_dma_free_chan_resources; 1202 + dma->device_tx_status = ioat_dma_tx_status; 1203 1204 err = ioat_probe(device); 1205 if (err)
+8 -11
drivers/dma/ioat/dma.h
··· 96 #define IOAT_COMPLETION_ACK 1 97 #define IOAT_RESET_PENDING 2 98 #define IOAT_KOBJ_INIT_FAIL 3 99 struct timer_list timer; 100 #define COMPLETION_TIMEOUT msecs_to_jiffies(100) 101 #define IDLE_TIMEOUT msecs_to_jiffies(2000) ··· 143 } 144 145 /** 146 - * ioat_is_complete - poll the status of an ioat transaction 147 * @c: channel handle 148 * @cookie: transaction identifier 149 - * @done: if set, updated with last completed transaction 150 - * @used: if set, updated with last used transaction 151 */ 152 static inline enum dma_status 153 - ioat_is_complete(struct dma_chan *c, dma_cookie_t cookie, 154 - dma_cookie_t *done, dma_cookie_t *used) 155 { 156 struct ioat_chan_common *chan = to_chan_common(c); 157 dma_cookie_t last_used; ··· 159 last_used = c->cookie; 160 last_complete = chan->completed_cookie; 161 162 - if (done) 163 - *done = last_complete; 164 - if (used) 165 - *used = last_used; 166 167 return dma_async_is_complete(cookie, last_complete, last_used); 168 } ··· 335 unsigned long ioat_get_current_completion(struct ioat_chan_common *chan); 336 void ioat_init_channel(struct ioatdma_device *device, 337 struct ioat_chan_common *chan, int idx); 338 - enum dma_status ioat_is_dma_complete(struct dma_chan *c, dma_cookie_t cookie, 339 - dma_cookie_t *done, dma_cookie_t *used); 340 void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, 341 size_t len, struct ioat_dma_descriptor *hw); 342 bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
··· 96 #define IOAT_COMPLETION_ACK 1 97 #define IOAT_RESET_PENDING 2 98 #define IOAT_KOBJ_INIT_FAIL 3 99 + #define IOAT_RESHAPE_PENDING 4 100 struct timer_list timer; 101 #define COMPLETION_TIMEOUT msecs_to_jiffies(100) 102 #define IDLE_TIMEOUT msecs_to_jiffies(2000) ··· 142 } 143 144 /** 145 + * ioat_tx_status - poll the status of an ioat transaction 146 * @c: channel handle 147 * @cookie: transaction identifier 148 + * @txstate: if set, updated with the transaction state 149 */ 150 static inline enum dma_status 151 + ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie, 152 + struct dma_tx_state *txstate) 153 { 154 struct ioat_chan_common *chan = to_chan_common(c); 155 dma_cookie_t last_used; ··· 159 last_used = c->cookie; 160 last_complete = chan->completed_cookie; 161 162 + dma_set_tx_state(txstate, last_complete, last_used, 0); 163 164 return dma_async_is_complete(cookie, last_complete, last_used); 165 } ··· 338 unsigned long ioat_get_current_completion(struct ioat_chan_common *chan); 339 void ioat_init_channel(struct ioatdma_device *device, 340 struct ioat_chan_common *chan, int idx); 341 + enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, 342 + struct dma_tx_state *txstate); 343 void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, 344 size_t len, struct ioat_dma_descriptor *hw); 345 bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
+93 -89
drivers/dma/ioat/dma_v2.c
··· 56 57 ioat->dmacount += ioat2_ring_pending(ioat); 58 ioat->issued = ioat->head; 59 - /* make descriptor updates globally visible before notifying channel */ 60 - wmb(); 61 writew(ioat->dmacount, chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET); 62 dev_dbg(to_dev(chan), 63 "%s: head: %#x tail: %#x issued: %#x count: %#x\n", ··· 67 struct ioat2_dma_chan *ioat = to_ioat2_chan(c); 68 69 if (ioat2_ring_pending(ioat)) { 70 - spin_lock_bh(&ioat->ring_lock); 71 __ioat2_issue_pending(ioat); 72 - spin_unlock_bh(&ioat->ring_lock); 73 } 74 } 75 ··· 78 * @ioat: ioat2+ channel 79 * 80 * Check if the number of unsubmitted descriptors has exceeded the 81 - * watermark. Called with ring_lock held 82 */ 83 static void ioat2_update_pending(struct ioat2_dma_chan *ioat) 84 { ··· 90 { 91 struct ioat_ring_ent *desc; 92 struct ioat_dma_descriptor *hw; 93 - int idx; 94 95 if (ioat2_ring_space(ioat) < 1) { 96 dev_err(to_dev(&ioat->base), ··· 99 100 dev_dbg(to_dev(&ioat->base), "%s: head: %#x tail: %#x issued: %#x\n", 101 __func__, ioat->head, ioat->tail, ioat->issued); 102 - idx = ioat2_desc_alloc(ioat, 1); 103 - desc = ioat2_get_ring_ent(ioat, idx); 104 105 hw = desc->hw; 106 hw->ctl = 0; ··· 113 async_tx_ack(&desc->txd); 114 ioat2_set_chainaddr(ioat, desc->txd.phys); 115 dump_desc_dbg(ioat, desc); 116 __ioat2_issue_pending(ioat); 117 } 118 119 static void ioat2_start_null_desc(struct ioat2_dma_chan *ioat) 120 { 121 - spin_lock_bh(&ioat->ring_lock); 122 __ioat2_start_null_desc(ioat); 123 - spin_unlock_bh(&ioat->ring_lock); 124 } 125 126 static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) ··· 132 struct ioat_ring_ent *desc; 133 bool seen_current = false; 134 u16 active; 135 - int i; 136 137 dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n", 138 __func__, ioat->head, ioat->tail, ioat->issued); 139 140 active = ioat2_ring_active(ioat); 141 for (i = 0; i < active && !seen_current; i++) { 142 - prefetch(ioat2_get_ring_ent(ioat, ioat->tail + i + 1)); 143 - desc = ioat2_get_ring_ent(ioat, ioat->tail + i); 144 tx = &desc->txd; 145 dump_desc_dbg(ioat, desc); 146 if (tx->cookie) { ··· 157 if (tx->phys == phys_complete) 158 seen_current = true; 159 } 160 - ioat->tail += i; 161 BUG_ON(active && !seen_current); /* no active descs have written a completion? */ 162 163 chan->last_completion = phys_complete; 164 - if (ioat->head == ioat->tail) { 165 dev_dbg(to_dev(chan), "%s: cancel completion timeout\n", 166 __func__); 167 clear_bit(IOAT_COMPLETION_PENDING, &chan->state); ··· 179 struct ioat_chan_common *chan = &ioat->base; 180 unsigned long phys_complete; 181 182 - prefetch(chan->completion); 183 - 184 - if (!spin_trylock_bh(&chan->cleanup_lock)) 185 - return; 186 - 187 - if (!ioat_cleanup_preamble(chan, &phys_complete)) { 188 - spin_unlock_bh(&chan->cleanup_lock); 189 - return; 190 - } 191 - 192 - if (!spin_trylock_bh(&ioat->ring_lock)) { 193 - spin_unlock_bh(&chan->cleanup_lock); 194 - return; 195 - } 196 - 197 - __cleanup(ioat, phys_complete); 198 - 199 - spin_unlock_bh(&ioat->ring_lock); 200 spin_unlock_bh(&chan->cleanup_lock); 201 } 202 ··· 272 struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); 273 struct ioat_chan_common *chan = &ioat->base; 274 275 - spin_lock_bh(&chan->cleanup_lock); 276 if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { 277 unsigned long phys_complete; 278 u64 status; 279 280 - spin_lock_bh(&ioat->ring_lock); 281 status = ioat_chansts(chan); 282 283 /* when halted due to errors check for channel ··· 294 * acknowledged a pending completion once, then be more 295 * forceful with a restart 296 */ 297 - if (ioat_cleanup_preamble(chan, &phys_complete)) 298 __cleanup(ioat, phys_complete); 299 - else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) 300 ioat2_restart_channel(ioat); 301 - else { 302 set_bit(IOAT_COMPLETION_ACK, &chan->state); 303 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); 304 } 305 - spin_unlock_bh(&ioat->ring_lock); 306 } else { 307 u16 active; 308 309 /* if the ring is idle, empty, and oversized try to step 310 * down the size 311 */ 312 - spin_lock_bh(&ioat->ring_lock); 313 active = ioat2_ring_active(ioat); 314 if (active == 0 && ioat->alloc_order > ioat_get_alloc_order()) 315 reshape_ring(ioat, ioat->alloc_order-1); 316 - spin_unlock_bh(&ioat->ring_lock); 317 318 /* keep shrinking until we get back to our minimum 319 * default size ··· 326 if (ioat->alloc_order > ioat_get_alloc_order()) 327 mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); 328 } 329 - spin_unlock_bh(&chan->cleanup_lock); 330 } 331 332 static int ioat2_reset_hw(struct ioat_chan_common *chan) ··· 379 380 ioat_init_channel(device, &ioat->base, i); 381 ioat->xfercap_log = xfercap_log; 382 - spin_lock_init(&ioat->ring_lock); 383 if (device->reset_hw(&ioat->base)) { 384 i = 0; 385 break; ··· 405 406 if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state)) 407 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); 408 ioat2_update_pending(ioat); 409 - spin_unlock_bh(&ioat->ring_lock); 410 411 return cookie; 412 } ··· 527 if (!ring) 528 return -ENOMEM; 529 530 - spin_lock_bh(&ioat->ring_lock); 531 ioat->ring = ring; 532 ioat->head = 0; 533 ioat->issued = 0; 534 ioat->tail = 0; 535 ioat->alloc_order = order; 536 - spin_unlock_bh(&ioat->ring_lock); 537 538 tasklet_enable(&chan->cleanup_task); 539 ioat2_start_null_desc(ioat); ··· 551 */ 552 struct ioat_chan_common *chan = &ioat->base; 553 struct dma_chan *c = &chan->common; 554 - const u16 curr_size = ioat2_ring_mask(ioat) + 1; 555 const u16 active = ioat2_ring_active(ioat); 556 const u16 new_size = 1 << order; 557 struct ioat_ring_ent **ring; ··· 651 } 652 653 /** 654 - * ioat2_alloc_and_lock - common descriptor alloc boilerplate for ioat2,3 ops 655 - * @idx: gets starting descriptor index on successful allocation 656 * @ioat: ioat2,3 channel (ring) to operate on 657 * @num_descs: allocation length 658 */ 659 - int ioat2_alloc_and_lock(u16 *idx, struct ioat2_dma_chan *ioat, int num_descs) 660 { 661 struct ioat_chan_common *chan = &ioat->base; 662 663 - spin_lock_bh(&ioat->ring_lock); 664 /* never allow the last descriptor to be consumed, we need at 665 * least one free at all times to allow for on-the-fly ring 666 * resizing. 667 */ 668 - while (unlikely(ioat2_ring_space(ioat) <= num_descs)) { 669 - if (reshape_ring(ioat, ioat->alloc_order + 1) && 670 - ioat2_ring_space(ioat) > num_descs) 671 - break; 672 673 - if (printk_ratelimit()) 674 - dev_dbg(to_dev(chan), 675 - "%s: ring full! num_descs: %d (%x:%x:%x)\n", 676 - __func__, num_descs, ioat->head, ioat->tail, 677 - ioat->issued); 678 - spin_unlock_bh(&ioat->ring_lock); 679 680 - /* progress reclaim in the allocation failure case we 681 - * may be called under bh_disabled so we need to trigger 682 - * the timer event directly 683 - */ 684 - spin_lock_bh(&chan->cleanup_lock); 685 - if (jiffies > chan->timer.expires && 686 - timer_pending(&chan->timer)) { 687 - struct ioatdma_device *device = chan->device; 688 689 - mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); 690 - spin_unlock_bh(&chan->cleanup_lock); 691 - device->timer_fn((unsigned long) &chan->common); 692 - } else 693 - spin_unlock_bh(&chan->cleanup_lock); 694 - return -ENOMEM; 695 } 696 697 - dev_dbg(to_dev(chan), "%s: num_descs: %d (%x:%x:%x)\n", 698 - __func__, num_descs, ioat->head, ioat->tail, ioat->issued); 699 - 700 - *idx = ioat2_desc_alloc(ioat, num_descs); 701 - return 0; /* with ioat->ring_lock held */ 702 } 703 704 struct dma_async_tx_descriptor * ··· 718 dma_addr_t dst = dma_dest; 719 dma_addr_t src = dma_src; 720 size_t total_len = len; 721 - int num_descs; 722 - u16 idx; 723 - int i; 724 725 num_descs = ioat2_xferlen_to_descs(ioat, len); 726 - if (likely(num_descs) && 727 - ioat2_alloc_and_lock(&idx, ioat, num_descs) == 0) 728 - /* pass */; 729 else 730 return NULL; 731 i = 0; ··· 779 device->cleanup_fn((unsigned long) c); 780 device->reset_hw(chan); 781 782 - spin_lock_bh(&ioat->ring_lock); 783 descs = ioat2_ring_space(ioat); 784 dev_dbg(to_dev(chan), "freeing %d idle descriptors\n", descs); 785 for (i = 0; i < descs; i++) { ··· 803 ioat->alloc_order = 0; 804 pci_pool_free(device->completion_pool, chan->completion, 805 chan->completion_dma); 806 - spin_unlock_bh(&ioat->ring_lock); 807 808 chan->last_completion = 0; 809 chan->completion_dma = 0; ··· 859 dma->device_issue_pending = ioat2_issue_pending; 860 dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; 861 dma->device_free_chan_resources = ioat2_free_chan_resources; 862 - dma->device_is_tx_complete = ioat_is_dma_complete; 863 864 err = ioat_probe(device); 865 if (err)
··· 56 57 ioat->dmacount += ioat2_ring_pending(ioat); 58 ioat->issued = ioat->head; 59 writew(ioat->dmacount, chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET); 60 dev_dbg(to_dev(chan), 61 "%s: head: %#x tail: %#x issued: %#x count: %#x\n", ··· 69 struct ioat2_dma_chan *ioat = to_ioat2_chan(c); 70 71 if (ioat2_ring_pending(ioat)) { 72 + spin_lock_bh(&ioat->prep_lock); 73 __ioat2_issue_pending(ioat); 74 + spin_unlock_bh(&ioat->prep_lock); 75 } 76 } 77 ··· 80 * @ioat: ioat2+ channel 81 * 82 * Check if the number of unsubmitted descriptors has exceeded the 83 + * watermark. Called with prep_lock held 84 */ 85 static void ioat2_update_pending(struct ioat2_dma_chan *ioat) 86 { ··· 92 { 93 struct ioat_ring_ent *desc; 94 struct ioat_dma_descriptor *hw; 95 96 if (ioat2_ring_space(ioat) < 1) { 97 dev_err(to_dev(&ioat->base), ··· 102 103 dev_dbg(to_dev(&ioat->base), "%s: head: %#x tail: %#x issued: %#x\n", 104 __func__, ioat->head, ioat->tail, ioat->issued); 105 + desc = ioat2_get_ring_ent(ioat, ioat->head); 106 107 hw = desc->hw; 108 hw->ctl = 0; ··· 117 async_tx_ack(&desc->txd); 118 ioat2_set_chainaddr(ioat, desc->txd.phys); 119 dump_desc_dbg(ioat, desc); 120 + wmb(); 121 + ioat->head += 1; 122 __ioat2_issue_pending(ioat); 123 } 124 125 static void ioat2_start_null_desc(struct ioat2_dma_chan *ioat) 126 { 127 + spin_lock_bh(&ioat->prep_lock); 128 __ioat2_start_null_desc(ioat); 129 + spin_unlock_bh(&ioat->prep_lock); 130 } 131 132 static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) ··· 134 struct ioat_ring_ent *desc; 135 bool seen_current = false; 136 u16 active; 137 + int idx = ioat->tail, i; 138 139 dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n", 140 __func__, ioat->head, ioat->tail, ioat->issued); 141 142 active = ioat2_ring_active(ioat); 143 for (i = 0; i < active && !seen_current; i++) { 144 + smp_read_barrier_depends(); 145 + prefetch(ioat2_get_ring_ent(ioat, idx + i + 1)); 146 + desc = ioat2_get_ring_ent(ioat, idx + i); 147 tx = &desc->txd; 148 dump_desc_dbg(ioat, desc); 149 if (tx->cookie) { ··· 158 if (tx->phys == phys_complete) 159 seen_current = true; 160 } 161 + smp_mb(); /* finish all descriptor reads before incrementing tail */ 162 + ioat->tail = idx + i; 163 BUG_ON(active && !seen_current); /* no active descs have written a completion? */ 164 165 chan->last_completion = phys_complete; 166 + if (active - i == 0) { 167 dev_dbg(to_dev(chan), "%s: cancel completion timeout\n", 168 __func__); 169 clear_bit(IOAT_COMPLETION_PENDING, &chan->state); ··· 179 struct ioat_chan_common *chan = &ioat->base; 180 unsigned long phys_complete; 181 182 + spin_lock_bh(&chan->cleanup_lock); 183 + if (ioat_cleanup_preamble(chan, &phys_complete)) 184 + __cleanup(ioat, phys_complete); 185 spin_unlock_bh(&chan->cleanup_lock); 186 } 187 ··· 287 struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); 288 struct ioat_chan_common *chan = &ioat->base; 289 290 if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { 291 unsigned long phys_complete; 292 u64 status; 293 294 status = ioat_chansts(chan); 295 296 /* when halted due to errors check for channel ··· 311 * acknowledged a pending completion once, then be more 312 * forceful with a restart 313 */ 314 + spin_lock_bh(&chan->cleanup_lock); 315 + if (ioat_cleanup_preamble(chan, &phys_complete)) { 316 __cleanup(ioat, phys_complete); 317 + } else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) { 318 + spin_lock_bh(&ioat->prep_lock); 319 ioat2_restart_channel(ioat); 320 + spin_unlock_bh(&ioat->prep_lock); 321 + } else { 322 set_bit(IOAT_COMPLETION_ACK, &chan->state); 323 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); 324 } 325 + spin_unlock_bh(&chan->cleanup_lock); 326 } else { 327 u16 active; 328 329 /* if the ring is idle, empty, and oversized try to step 330 * down the size 331 */ 332 + spin_lock_bh(&chan->cleanup_lock); 333 + spin_lock_bh(&ioat->prep_lock); 334 active = ioat2_ring_active(ioat); 335 if (active == 0 && ioat->alloc_order > ioat_get_alloc_order()) 336 reshape_ring(ioat, ioat->alloc_order-1); 337 + spin_unlock_bh(&ioat->prep_lock); 338 + spin_unlock_bh(&chan->cleanup_lock); 339 340 /* keep shrinking until we get back to our minimum 341 * default size ··· 338 if (ioat->alloc_order > ioat_get_alloc_order()) 339 mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); 340 } 341 } 342 343 static int ioat2_reset_hw(struct ioat_chan_common *chan) ··· 392 393 ioat_init_channel(device, &ioat->base, i); 394 ioat->xfercap_log = xfercap_log; 395 + spin_lock_init(&ioat->prep_lock); 396 if (device->reset_hw(&ioat->base)) { 397 i = 0; 398 break; ··· 418 419 if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state)) 420 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); 421 + 422 + /* make descriptor updates visible before advancing ioat->head, 423 + * this is purposefully not smp_wmb() since we are also 424 + * publishing the descriptor updates to a dma device 425 + */ 426 + wmb(); 427 + 428 + ioat->head += ioat->produce; 429 + 430 ioat2_update_pending(ioat); 431 + spin_unlock_bh(&ioat->prep_lock); 432 433 return cookie; 434 } ··· 531 if (!ring) 532 return -ENOMEM; 533 534 + spin_lock_bh(&chan->cleanup_lock); 535 + spin_lock_bh(&ioat->prep_lock); 536 ioat->ring = ring; 537 ioat->head = 0; 538 ioat->issued = 0; 539 ioat->tail = 0; 540 ioat->alloc_order = order; 541 + spin_unlock_bh(&ioat->prep_lock); 542 + spin_unlock_bh(&chan->cleanup_lock); 543 544 tasklet_enable(&chan->cleanup_task); 545 ioat2_start_null_desc(ioat); ··· 553 */ 554 struct ioat_chan_common *chan = &ioat->base; 555 struct dma_chan *c = &chan->common; 556 + const u16 curr_size = ioat2_ring_size(ioat); 557 const u16 active = ioat2_ring_active(ioat); 558 const u16 new_size = 1 << order; 559 struct ioat_ring_ent **ring; ··· 653 } 654 655 /** 656 + * ioat2_check_space_lock - verify space and grab ring producer lock 657 * @ioat: ioat2,3 channel (ring) to operate on 658 * @num_descs: allocation length 659 */ 660 + int ioat2_check_space_lock(struct ioat2_dma_chan *ioat, int num_descs) 661 { 662 struct ioat_chan_common *chan = &ioat->base; 663 + bool retry; 664 665 + retry: 666 + spin_lock_bh(&ioat->prep_lock); 667 /* never allow the last descriptor to be consumed, we need at 668 * least one free at all times to allow for on-the-fly ring 669 * resizing. 670 */ 671 + if (likely(ioat2_ring_space(ioat) > num_descs)) { 672 + dev_dbg(to_dev(chan), "%s: num_descs: %d (%x:%x:%x)\n", 673 + __func__, num_descs, ioat->head, ioat->tail, ioat->issued); 674 + ioat->produce = num_descs; 675 + return 0; /* with ioat->prep_lock held */ 676 + } 677 + retry = test_and_set_bit(IOAT_RESHAPE_PENDING, &chan->state); 678 + spin_unlock_bh(&ioat->prep_lock); 679 680 + /* is another cpu already trying to expand the ring? */ 681 + if (retry) 682 + goto retry; 683 684 + spin_lock_bh(&chan->cleanup_lock); 685 + spin_lock_bh(&ioat->prep_lock); 686 + retry = reshape_ring(ioat, ioat->alloc_order + 1); 687 + clear_bit(IOAT_RESHAPE_PENDING, &chan->state); 688 + spin_unlock_bh(&ioat->prep_lock); 689 + spin_unlock_bh(&chan->cleanup_lock); 690 691 + /* if we were able to expand the ring retry the allocation */ 692 + if (retry) 693 + goto retry; 694 + 695 + if (printk_ratelimit()) 696 + dev_dbg(to_dev(chan), "%s: ring full! num_descs: %d (%x:%x:%x)\n", 697 + __func__, num_descs, ioat->head, ioat->tail, ioat->issued); 698 + 699 + /* progress reclaim in the allocation failure case we may be 700 + * called under bh_disabled so we need to trigger the timer 701 + * event directly 702 + */ 703 + if (jiffies > chan->timer.expires && timer_pending(&chan->timer)) { 704 + struct ioatdma_device *device = chan->device; 705 + 706 + mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); 707 + device->timer_fn((unsigned long) &chan->common); 708 } 709 710 + return -ENOMEM; 711 } 712 713 struct dma_async_tx_descriptor * ··· 713 dma_addr_t dst = dma_dest; 714 dma_addr_t src = dma_src; 715 size_t total_len = len; 716 + int num_descs, idx, i; 717 718 num_descs = ioat2_xferlen_to_descs(ioat, len); 719 + if (likely(num_descs) && ioat2_check_space_lock(ioat, num_descs) == 0) 720 + idx = ioat->head; 721 else 722 return NULL; 723 i = 0; ··· 777 device->cleanup_fn((unsigned long) c); 778 device->reset_hw(chan); 779 780 + spin_lock_bh(&chan->cleanup_lock); 781 + spin_lock_bh(&ioat->prep_lock); 782 descs = ioat2_ring_space(ioat); 783 dev_dbg(to_dev(chan), "freeing %d idle descriptors\n", descs); 784 for (i = 0; i < descs; i++) { ··· 800 ioat->alloc_order = 0; 801 pci_pool_free(device->completion_pool, chan->completion, 802 chan->completion_dma); 803 + spin_unlock_bh(&ioat->prep_lock); 804 + spin_unlock_bh(&chan->cleanup_lock); 805 806 chan->last_completion = 0; 807 chan->completion_dma = 0; ··· 855 dma->device_issue_pending = ioat2_issue_pending; 856 dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; 857 dma->device_free_chan_resources = ioat2_free_chan_resources; 858 + dma->device_tx_status = ioat_tx_status; 859 860 err = ioat_probe(device); 861 if (err)
+12 -21
drivers/dma/ioat/dma_v2.h
··· 22 #define IOATDMA_V2_H 23 24 #include <linux/dmaengine.h> 25 #include "dma.h" 26 #include "hw.h" 27 ··· 50 * @tail: cleanup index 51 * @dmacount: identical to 'head' except for occasionally resetting to zero 52 * @alloc_order: log2 of the number of allocated descriptors 53 * @ring: software ring buffer implementation of hardware ring 54 - * @ring_lock: protects ring attributes 55 */ 56 struct ioat2_dma_chan { 57 struct ioat_chan_common base; ··· 62 u16 tail; 63 u16 dmacount; 64 u16 alloc_order; 65 struct ioat_ring_ent **ring; 66 - spinlock_t ring_lock; 67 }; 68 69 static inline struct ioat2_dma_chan *to_ioat2_chan(struct dma_chan *c) ··· 74 return container_of(chan, struct ioat2_dma_chan, base); 75 } 76 77 - static inline u16 ioat2_ring_mask(struct ioat2_dma_chan *ioat) 78 { 79 - return (1 << ioat->alloc_order) - 1; 80 } 81 82 /* count of descriptors in flight with the engine */ 83 static inline u16 ioat2_ring_active(struct ioat2_dma_chan *ioat) 84 { 85 - return (ioat->head - ioat->tail) & ioat2_ring_mask(ioat); 86 } 87 88 /* count of descriptors pending submission to hardware */ 89 static inline u16 ioat2_ring_pending(struct ioat2_dma_chan *ioat) 90 { 91 - return (ioat->head - ioat->issued) & ioat2_ring_mask(ioat); 92 } 93 94 static inline u16 ioat2_ring_space(struct ioat2_dma_chan *ioat) 95 { 96 - u16 num_descs = ioat2_ring_mask(ioat) + 1; 97 - u16 active = ioat2_ring_active(ioat); 98 - 99 - BUG_ON(active > num_descs); 100 - 101 - return num_descs - active; 102 - } 103 - 104 - /* assumes caller already checked space */ 105 - static inline u16 ioat2_desc_alloc(struct ioat2_dma_chan *ioat, u16 len) 106 - { 107 - ioat->head += len; 108 - return ioat->head - len; 109 } 110 111 static inline u16 ioat2_xferlen_to_descs(struct ioat2_dma_chan *ioat, size_t len) ··· 142 static inline struct ioat_ring_ent * 143 ioat2_get_ring_ent(struct ioat2_dma_chan *ioat, u16 idx) 144 { 145 - return ioat->ring[idx & ioat2_ring_mask(ioat)]; 146 } 147 148 static inline void ioat2_set_chainaddr(struct ioat2_dma_chan *ioat, u64 addr) ··· 159 int __devinit ioat3_dma_probe(struct ioatdma_device *dev, int dca); 160 struct dca_provider * __devinit ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase); 161 struct dca_provider * __devinit ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase); 162 - int ioat2_alloc_and_lock(u16 *idx, struct ioat2_dma_chan *ioat, int num_descs); 163 int ioat2_enumerate_channels(struct ioatdma_device *device); 164 struct dma_async_tx_descriptor * 165 ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
··· 22 #define IOATDMA_V2_H 23 24 #include <linux/dmaengine.h> 25 + #include <linux/circ_buf.h> 26 #include "dma.h" 27 #include "hw.h" 28 ··· 49 * @tail: cleanup index 50 * @dmacount: identical to 'head' except for occasionally resetting to zero 51 * @alloc_order: log2 of the number of allocated descriptors 52 + * @produce: number of descriptors to produce at submit time 53 * @ring: software ring buffer implementation of hardware ring 54 + * @prep_lock: serializes descriptor preparation (producers) 55 */ 56 struct ioat2_dma_chan { 57 struct ioat_chan_common base; ··· 60 u16 tail; 61 u16 dmacount; 62 u16 alloc_order; 63 + u16 produce; 64 struct ioat_ring_ent **ring; 65 + spinlock_t prep_lock; 66 }; 67 68 static inline struct ioat2_dma_chan *to_ioat2_chan(struct dma_chan *c) ··· 71 return container_of(chan, struct ioat2_dma_chan, base); 72 } 73 74 + static inline u16 ioat2_ring_size(struct ioat2_dma_chan *ioat) 75 { 76 + return 1 << ioat->alloc_order; 77 } 78 79 /* count of descriptors in flight with the engine */ 80 static inline u16 ioat2_ring_active(struct ioat2_dma_chan *ioat) 81 { 82 + return CIRC_CNT(ioat->head, ioat->tail, ioat2_ring_size(ioat)); 83 } 84 85 /* count of descriptors pending submission to hardware */ 86 static inline u16 ioat2_ring_pending(struct ioat2_dma_chan *ioat) 87 { 88 + return CIRC_CNT(ioat->head, ioat->issued, ioat2_ring_size(ioat)); 89 } 90 91 static inline u16 ioat2_ring_space(struct ioat2_dma_chan *ioat) 92 { 93 + return ioat2_ring_size(ioat) - ioat2_ring_active(ioat); 94 } 95 96 static inline u16 ioat2_xferlen_to_descs(struct ioat2_dma_chan *ioat, size_t len) ··· 151 static inline struct ioat_ring_ent * 152 ioat2_get_ring_ent(struct ioat2_dma_chan *ioat, u16 idx) 153 { 154 + return ioat->ring[idx & (ioat2_ring_size(ioat) - 1)]; 155 } 156 157 static inline void ioat2_set_chainaddr(struct ioat2_dma_chan *ioat, u64 addr) ··· 168 int __devinit ioat3_dma_probe(struct ioatdma_device *dev, int dca); 169 struct dca_provider * __devinit ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase); 170 struct dca_provider * __devinit ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase); 171 + int ioat2_check_space_lock(struct ioat2_dma_chan *ioat, int num_descs); 172 int ioat2_enumerate_channels(struct ioatdma_device *device); 173 struct dma_async_tx_descriptor * 174 ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
+48 -95
drivers/dma/ioat/dma_v3.c
··· 260 struct ioat_chan_common *chan = &ioat->base; 261 struct ioat_ring_ent *desc; 262 bool seen_current = false; 263 u16 active; 264 - int i; 265 266 dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n", 267 __func__, ioat->head, ioat->tail, ioat->issued); ··· 270 for (i = 0; i < active && !seen_current; i++) { 271 struct dma_async_tx_descriptor *tx; 272 273 - prefetch(ioat2_get_ring_ent(ioat, ioat->tail + i + 1)); 274 - desc = ioat2_get_ring_ent(ioat, ioat->tail + i); 275 dump_desc_dbg(ioat, desc); 276 tx = &desc->txd; 277 if (tx->cookie) { 278 chan->completed_cookie = tx->cookie; 279 - ioat3_dma_unmap(ioat, desc, ioat->tail + i); 280 tx->cookie = 0; 281 if (tx->callback) { 282 tx->callback(tx->callback_param); ··· 294 i++; 295 } 296 } 297 - ioat->tail += i; 298 BUG_ON(active && !seen_current); /* no active descs have written a completion? */ 299 chan->last_completion = phys_complete; 300 301 - active = ioat2_ring_active(ioat); 302 - if (active == 0) { 303 dev_dbg(to_dev(chan), "%s: cancel completion timeout\n", 304 __func__); 305 clear_bit(IOAT_COMPLETION_PENDING, &chan->state); 306 mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); 307 } 308 /* 5 microsecond delay per pending descriptor */ 309 - writew(min((5 * active), IOAT_INTRDELAY_MASK), 310 chan->device->reg_base + IOAT_INTRDELAY_OFFSET); 311 } 312 313 - /* try to cleanup, but yield (via spin_trylock) to incoming submissions 314 - * with the expectation that we will immediately poll again shortly 315 - */ 316 - static void ioat3_cleanup_poll(struct ioat2_dma_chan *ioat) 317 { 318 struct ioat_chan_common *chan = &ioat->base; 319 unsigned long phys_complete; 320 - 321 - prefetch(chan->completion); 322 - 323 - if (!spin_trylock_bh(&chan->cleanup_lock)) 324 - return; 325 - 326 - if (!ioat_cleanup_preamble(chan, &phys_complete)) { 327 - spin_unlock_bh(&chan->cleanup_lock); 328 - return; 329 - } 330 - 331 - if (!spin_trylock_bh(&ioat->ring_lock)) { 332 - spin_unlock_bh(&chan->cleanup_lock); 333 - return; 334 - } 335 - 336 - __cleanup(ioat, phys_complete); 337 - 338 - spin_unlock_bh(&ioat->ring_lock); 339 - spin_unlock_bh(&chan->cleanup_lock); 340 - } 341 - 342 - /* run cleanup now because we already delayed the interrupt via INTRDELAY */ 343 - static void ioat3_cleanup_sync(struct ioat2_dma_chan *ioat) 344 - { 345 - struct ioat_chan_common *chan = &ioat->base; 346 - unsigned long phys_complete; 347 - 348 - prefetch(chan->completion); 349 350 spin_lock_bh(&chan->cleanup_lock); 351 - if (!ioat_cleanup_preamble(chan, &phys_complete)) { 352 - spin_unlock_bh(&chan->cleanup_lock); 353 - return; 354 - } 355 - spin_lock_bh(&ioat->ring_lock); 356 - 357 - __cleanup(ioat, phys_complete); 358 - 359 - spin_unlock_bh(&ioat->ring_lock); 360 spin_unlock_bh(&chan->cleanup_lock); 361 } 362 ··· 325 { 326 struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); 327 328 - ioat3_cleanup_sync(ioat); 329 writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); 330 } 331 ··· 346 struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); 347 struct ioat_chan_common *chan = &ioat->base; 348 349 - spin_lock_bh(&chan->cleanup_lock); 350 if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { 351 unsigned long phys_complete; 352 u64 status; 353 354 - spin_lock_bh(&ioat->ring_lock); 355 status = ioat_chansts(chan); 356 357 /* when halted due to errors check for channel ··· 368 * acknowledged a pending completion once, then be more 369 * forceful with a restart 370 */ 371 if (ioat_cleanup_preamble(chan, &phys_complete)) 372 __cleanup(ioat, phys_complete); 373 - else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) 374 ioat3_restart_channel(ioat); 375 - else { 376 set_bit(IOAT_COMPLETION_ACK, &chan->state); 377 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); 378 } 379 - spin_unlock_bh(&ioat->ring_lock); 380 } else { 381 u16 active; 382 383 /* if the ring is idle, empty, and oversized try to step 384 * down the size 385 */ 386 - spin_lock_bh(&ioat->ring_lock); 387 active = ioat2_ring_active(ioat); 388 if (active == 0 && ioat->alloc_order > ioat_get_alloc_order()) 389 reshape_ring(ioat, ioat->alloc_order-1); 390 - spin_unlock_bh(&ioat->ring_lock); 391 392 /* keep shrinking until we get back to our minimum 393 * default size ··· 400 if (ioat->alloc_order > ioat_get_alloc_order()) 401 mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); 402 } 403 - spin_unlock_bh(&chan->cleanup_lock); 404 } 405 406 static enum dma_status 407 - ioat3_is_complete(struct dma_chan *c, dma_cookie_t cookie, 408 - dma_cookie_t *done, dma_cookie_t *used) 409 { 410 struct ioat2_dma_chan *ioat = to_ioat2_chan(c); 411 412 - if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS) 413 return DMA_SUCCESS; 414 415 - ioat3_cleanup_poll(ioat); 416 417 - return ioat_is_complete(c, cookie, done, used); 418 } 419 420 static struct dma_async_tx_descriptor * ··· 424 struct ioat_ring_ent *desc; 425 size_t total_len = len; 426 struct ioat_fill_descriptor *fill; 427 - int num_descs; 428 u64 src_data = (0x0101010101010101ULL) * (value & 0xff); 429 - u16 idx; 430 - int i; 431 432 num_descs = ioat2_xferlen_to_descs(ioat, len); 433 - if (likely(num_descs) && 434 - ioat2_alloc_and_lock(&idx, ioat, num_descs) == 0) 435 - /* pass */; 436 else 437 return NULL; 438 i = 0; ··· 474 struct ioat_xor_descriptor *xor; 475 struct ioat_xor_ext_descriptor *xor_ex = NULL; 476 struct ioat_dma_descriptor *hw; 477 u32 offset = 0; 478 - int num_descs; 479 - int with_ext; 480 - int i; 481 - u16 idx; 482 u8 op = result ? IOAT_OP_XOR_VAL : IOAT_OP_XOR; 483 484 BUG_ON(src_cnt < 2); ··· 495 * (legacy) descriptor to ensure all completion writes arrive in 496 * order. 497 */ 498 - if (likely(num_descs) && 499 - ioat2_alloc_and_lock(&idx, ioat, num_descs+1) == 0) 500 - /* pass */; 501 else 502 return NULL; 503 i = 0; ··· 614 struct ioat_pq_ext_descriptor *pq_ex = NULL; 615 struct ioat_dma_descriptor *hw; 616 u32 offset = 0; 617 - int num_descs; 618 - int with_ext; 619 - int i, s; 620 - u16 idx; 621 u8 op = result ? IOAT_OP_PQ_VAL : IOAT_OP_PQ; 622 623 dev_dbg(to_dev(chan), "%s\n", __func__); 624 /* the engine requires at least two sources (we provide ··· 641 * order. 642 */ 643 if (likely(num_descs) && 644 - ioat2_alloc_and_lock(&idx, ioat, num_descs+1) == 0) 645 - /* pass */; 646 else 647 return NULL; 648 i = 0; ··· 805 struct ioat2_dma_chan *ioat = to_ioat2_chan(c); 806 struct ioat_ring_ent *desc; 807 struct ioat_dma_descriptor *hw; 808 - u16 idx; 809 810 - if (ioat2_alloc_and_lock(&idx, ioat, 1) == 0) 811 - desc = ioat2_get_ring_ent(ioat, idx); 812 else 813 return NULL; 814 ··· 930 931 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); 932 933 - if (dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { 934 dev_err(dev, "Self-test xor timed out\n"); 935 err = -ENODEV; 936 goto free_resources; ··· 984 985 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); 986 987 - if (dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { 988 dev_err(dev, "Self-test validate timed out\n"); 989 err = -ENODEV; 990 goto free_resources; ··· 1025 1026 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); 1027 1028 - if (dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { 1029 dev_err(dev, "Self-test memset timed out\n"); 1030 err = -ENODEV; 1031 goto free_resources; ··· 1068 1069 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); 1070 1071 - if (dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { 1072 dev_err(dev, "Self-test 2nd validate timed out\n"); 1073 err = -ENODEV; 1074 goto free_resources; ··· 1175 if (cap & IOAT_CAP_XOR) { 1176 is_raid_device = true; 1177 dma->max_xor = 8; 1178 - dma->xor_align = 2; 1179 1180 dma_cap_set(DMA_XOR, dma->cap_mask); 1181 dma->device_prep_dma_xor = ioat3_prep_xor; ··· 1186 if (cap & IOAT_CAP_PQ) { 1187 is_raid_device = true; 1188 dma_set_maxpq(dma, 8, 0); 1189 - dma->pq_align = 2; 1190 1191 dma_cap_set(DMA_PQ, dma->cap_mask); 1192 dma->device_prep_dma_pq = ioat3_prep_pq; ··· 1196 1197 if (!(cap & IOAT_CAP_XOR)) { 1198 dma->max_xor = 8; 1199 - dma->xor_align = 2; 1200 1201 dma_cap_set(DMA_XOR, dma->cap_mask); 1202 dma->device_prep_dma_xor = ioat3_prep_pqxor; ··· 1212 1213 1214 if (is_raid_device) { 1215 - dma->device_is_tx_complete = ioat3_is_complete; 1216 device->cleanup_fn = ioat3_cleanup_event; 1217 device->timer_fn = ioat3_timer_event; 1218 } else { 1219 - dma->device_is_tx_complete = ioat_is_dma_complete; 1220 device->cleanup_fn = ioat2_cleanup_event; 1221 device->timer_fn = ioat2_timer_event; 1222 }
··· 260 struct ioat_chan_common *chan = &ioat->base; 261 struct ioat_ring_ent *desc; 262 bool seen_current = false; 263 + int idx = ioat->tail, i; 264 u16 active; 265 266 dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n", 267 __func__, ioat->head, ioat->tail, ioat->issued); ··· 270 for (i = 0; i < active && !seen_current; i++) { 271 struct dma_async_tx_descriptor *tx; 272 273 + smp_read_barrier_depends(); 274 + prefetch(ioat2_get_ring_ent(ioat, idx + i + 1)); 275 + desc = ioat2_get_ring_ent(ioat, idx + i); 276 dump_desc_dbg(ioat, desc); 277 tx = &desc->txd; 278 if (tx->cookie) { 279 chan->completed_cookie = tx->cookie; 280 + ioat3_dma_unmap(ioat, desc, idx + i); 281 tx->cookie = 0; 282 if (tx->callback) { 283 tx->callback(tx->callback_param); ··· 293 i++; 294 } 295 } 296 + smp_mb(); /* finish all descriptor reads before incrementing tail */ 297 + ioat->tail = idx + i; 298 BUG_ON(active && !seen_current); /* no active descs have written a completion? */ 299 chan->last_completion = phys_complete; 300 301 + if (active - i == 0) { 302 dev_dbg(to_dev(chan), "%s: cancel completion timeout\n", 303 __func__); 304 clear_bit(IOAT_COMPLETION_PENDING, &chan->state); 305 mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); 306 } 307 /* 5 microsecond delay per pending descriptor */ 308 + writew(min((5 * (active - i)), IOAT_INTRDELAY_MASK), 309 chan->device->reg_base + IOAT_INTRDELAY_OFFSET); 310 } 311 312 + static void ioat3_cleanup(struct ioat2_dma_chan *ioat) 313 { 314 struct ioat_chan_common *chan = &ioat->base; 315 unsigned long phys_complete; 316 317 spin_lock_bh(&chan->cleanup_lock); 318 + if (ioat_cleanup_preamble(chan, &phys_complete)) 319 + __cleanup(ioat, phys_complete); 320 spin_unlock_bh(&chan->cleanup_lock); 321 } 322 ··· 363 { 364 struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); 365 366 + ioat3_cleanup(ioat); 367 writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); 368 } 369 ··· 384 struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); 385 struct ioat_chan_common *chan = &ioat->base; 386 387 if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { 388 unsigned long phys_complete; 389 u64 status; 390 391 status = ioat_chansts(chan); 392 393 /* when halted due to errors check for channel ··· 408 * acknowledged a pending completion once, then be more 409 * forceful with a restart 410 */ 411 + spin_lock_bh(&chan->cleanup_lock); 412 if (ioat_cleanup_preamble(chan, &phys_complete)) 413 __cleanup(ioat, phys_complete); 414 + else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) { 415 + spin_lock_bh(&ioat->prep_lock); 416 ioat3_restart_channel(ioat); 417 + spin_unlock_bh(&ioat->prep_lock); 418 + } else { 419 set_bit(IOAT_COMPLETION_ACK, &chan->state); 420 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); 421 } 422 + spin_unlock_bh(&chan->cleanup_lock); 423 } else { 424 u16 active; 425 426 /* if the ring is idle, empty, and oversized try to step 427 * down the size 428 */ 429 + spin_lock_bh(&chan->cleanup_lock); 430 + spin_lock_bh(&ioat->prep_lock); 431 active = ioat2_ring_active(ioat); 432 if (active == 0 && ioat->alloc_order > ioat_get_alloc_order()) 433 reshape_ring(ioat, ioat->alloc_order-1); 434 + spin_unlock_bh(&ioat->prep_lock); 435 + spin_unlock_bh(&chan->cleanup_lock); 436 437 /* keep shrinking until we get back to our minimum 438 * default size ··· 435 if (ioat->alloc_order > ioat_get_alloc_order()) 436 mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); 437 } 438 } 439 440 static enum dma_status 441 + ioat3_tx_status(struct dma_chan *c, dma_cookie_t cookie, 442 + struct dma_tx_state *txstate) 443 { 444 struct ioat2_dma_chan *ioat = to_ioat2_chan(c); 445 446 + if (ioat_tx_status(c, cookie, txstate) == DMA_SUCCESS) 447 return DMA_SUCCESS; 448 449 + ioat3_cleanup(ioat); 450 451 + return ioat_tx_status(c, cookie, txstate); 452 } 453 454 static struct dma_async_tx_descriptor * ··· 460 struct ioat_ring_ent *desc; 461 size_t total_len = len; 462 struct ioat_fill_descriptor *fill; 463 u64 src_data = (0x0101010101010101ULL) * (value & 0xff); 464 + int num_descs, idx, i; 465 466 num_descs = ioat2_xferlen_to_descs(ioat, len); 467 + if (likely(num_descs) && ioat2_check_space_lock(ioat, num_descs) == 0) 468 + idx = ioat->head; 469 else 470 return NULL; 471 i = 0; ··· 513 struct ioat_xor_descriptor *xor; 514 struct ioat_xor_ext_descriptor *xor_ex = NULL; 515 struct ioat_dma_descriptor *hw; 516 + int num_descs, with_ext, idx, i; 517 u32 offset = 0; 518 u8 op = result ? IOAT_OP_XOR_VAL : IOAT_OP_XOR; 519 520 BUG_ON(src_cnt < 2); ··· 537 * (legacy) descriptor to ensure all completion writes arrive in 538 * order. 539 */ 540 + if (likely(num_descs) && ioat2_check_space_lock(ioat, num_descs+1) == 0) 541 + idx = ioat->head; 542 else 543 return NULL; 544 i = 0; ··· 657 struct ioat_pq_ext_descriptor *pq_ex = NULL; 658 struct ioat_dma_descriptor *hw; 659 u32 offset = 0; 660 u8 op = result ? IOAT_OP_PQ_VAL : IOAT_OP_PQ; 661 + int i, s, idx, with_ext, num_descs; 662 663 dev_dbg(to_dev(chan), "%s\n", __func__); 664 /* the engine requires at least two sources (we provide ··· 687 * order. 688 */ 689 if (likely(num_descs) && 690 + ioat2_check_space_lock(ioat, num_descs+1) == 0) 691 + idx = ioat->head; 692 else 693 return NULL; 694 i = 0; ··· 851 struct ioat2_dma_chan *ioat = to_ioat2_chan(c); 852 struct ioat_ring_ent *desc; 853 struct ioat_dma_descriptor *hw; 854 855 + if (ioat2_check_space_lock(ioat, 1) == 0) 856 + desc = ioat2_get_ring_ent(ioat, ioat->head); 857 else 858 return NULL; 859 ··· 977 978 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); 979 980 + if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { 981 dev_err(dev, "Self-test xor timed out\n"); 982 err = -ENODEV; 983 goto free_resources; ··· 1031 1032 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); 1033 1034 + if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { 1035 dev_err(dev, "Self-test validate timed out\n"); 1036 err = -ENODEV; 1037 goto free_resources; ··· 1072 1073 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); 1074 1075 + if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { 1076 dev_err(dev, "Self-test memset timed out\n"); 1077 err = -ENODEV; 1078 goto free_resources; ··· 1115 1116 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); 1117 1118 + if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { 1119 dev_err(dev, "Self-test 2nd validate timed out\n"); 1120 err = -ENODEV; 1121 goto free_resources; ··· 1222 if (cap & IOAT_CAP_XOR) { 1223 is_raid_device = true; 1224 dma->max_xor = 8; 1225 + dma->xor_align = 6; 1226 1227 dma_cap_set(DMA_XOR, dma->cap_mask); 1228 dma->device_prep_dma_xor = ioat3_prep_xor; ··· 1233 if (cap & IOAT_CAP_PQ) { 1234 is_raid_device = true; 1235 dma_set_maxpq(dma, 8, 0); 1236 + dma->pq_align = 6; 1237 1238 dma_cap_set(DMA_PQ, dma->cap_mask); 1239 dma->device_prep_dma_pq = ioat3_prep_pq; ··· 1243 1244 if (!(cap & IOAT_CAP_XOR)) { 1245 dma->max_xor = 8; 1246 + dma->xor_align = 6; 1247 1248 dma_cap_set(DMA_XOR, dma->cap_mask); 1249 dma->device_prep_dma_xor = ioat3_prep_pqxor; ··· 1259 1260 1261 if (is_raid_device) { 1262 + dma->device_tx_status = ioat3_tx_status; 1263 device->cleanup_fn = ioat3_cleanup_event; 1264 device->timer_fn = ioat3_timer_event; 1265 } else { 1266 + dma->device_tx_status = ioat_dma_tx_status; 1267 device->cleanup_fn = ioat2_cleanup_event; 1268 device->timer_fn = ioat2_timer_event; 1269 }
+1 -6
drivers/dma/ioat/pci.c
··· 138 if (err) 139 return err; 140 141 - device = devm_kzalloc(dev, sizeof(*device), GFP_KERNEL); 142 - if (!device) 143 - return -ENOMEM; 144 - 145 - pci_set_master(pdev); 146 - 147 device = alloc_ioatdma(pdev, iomap[IOAT_MMIO_BAR]); 148 if (!device) 149 return -ENOMEM; 150 pci_set_drvdata(pdev, device); 151 152 device->version = readb(device->reg_base + IOAT_VER_OFFSET);
··· 138 if (err) 139 return err; 140 141 device = alloc_ioatdma(pdev, iomap[IOAT_MMIO_BAR]); 142 if (!device) 143 return -ENOMEM; 144 + pci_set_master(pdev); 145 pci_set_drvdata(pdev, device); 146 147 device->version = readb(device->reg_base + IOAT_VER_OFFSET);
+15 -24
drivers/dma/iop-adma.c
··· 894 } 895 896 /** 897 - * iop_adma_is_complete - poll the status of an ADMA transaction 898 * @chan: ADMA channel handle 899 * @cookie: ADMA transaction identifier 900 */ 901 - static enum dma_status iop_adma_is_complete(struct dma_chan *chan, 902 dma_cookie_t cookie, 903 - dma_cookie_t *done, 904 - dma_cookie_t *used) 905 { 906 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); 907 dma_cookie_t last_used; ··· 910 911 last_used = chan->cookie; 912 last_complete = iop_chan->completed_cookie; 913 - 914 - if (done) 915 - *done = last_complete; 916 - if (used) 917 - *used = last_used; 918 - 919 ret = dma_async_is_complete(cookie, last_complete, last_used); 920 if (ret == DMA_SUCCESS) 921 return ret; ··· 919 920 last_used = chan->cookie; 921 last_complete = iop_chan->completed_cookie; 922 - 923 - if (done) 924 - *done = last_complete; 925 - if (used) 926 - *used = last_used; 927 928 return dma_async_is_complete(cookie, last_complete, last_used); 929 } ··· 1034 iop_adma_issue_pending(dma_chan); 1035 msleep(1); 1036 1037 - if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != 1038 DMA_SUCCESS) { 1039 dev_printk(KERN_ERR, dma_chan->device->dev, 1040 "Self-test copy timed out, disabling\n"); ··· 1134 iop_adma_issue_pending(dma_chan); 1135 msleep(8); 1136 1137 - if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != 1138 DMA_SUCCESS) { 1139 dev_printk(KERN_ERR, dma_chan->device->dev, 1140 "Self-test xor timed out, disabling\n"); ··· 1181 iop_adma_issue_pending(dma_chan); 1182 msleep(8); 1183 1184 - if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { 1185 dev_printk(KERN_ERR, dma_chan->device->dev, 1186 "Self-test zero sum timed out, disabling\n"); 1187 err = -ENODEV; ··· 1205 iop_adma_issue_pending(dma_chan); 1206 msleep(8); 1207 1208 - if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { 1209 dev_printk(KERN_ERR, dma_chan->device->dev, 1210 "Self-test memset timed out, disabling\n"); 1211 err = -ENODEV; ··· 1237 iop_adma_issue_pending(dma_chan); 1238 msleep(8); 1239 1240 - if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { 1241 dev_printk(KERN_ERR, dma_chan->device->dev, 1242 "Self-test non-zero sum timed out, disabling\n"); 1243 err = -ENODEV; ··· 1332 iop_adma_issue_pending(dma_chan); 1333 msleep(8); 1334 1335 - if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != 1336 DMA_SUCCESS) { 1337 dev_err(dev, "Self-test pq timed out, disabling\n"); 1338 err = -ENODEV; ··· 1369 iop_adma_issue_pending(dma_chan); 1370 msleep(8); 1371 1372 - if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != 1373 DMA_SUCCESS) { 1374 dev_err(dev, "Self-test pq-zero-sum timed out, disabling\n"); 1375 err = -ENODEV; ··· 1401 iop_adma_issue_pending(dma_chan); 1402 msleep(8); 1403 1404 - if (iop_adma_is_complete(dma_chan, cookie, NULL, NULL) != 1405 DMA_SUCCESS) { 1406 dev_err(dev, "Self-test !pq-zero-sum timed out, disabling\n"); 1407 err = -ENODEV; ··· 1499 /* set base routines */ 1500 dma_dev->device_alloc_chan_resources = iop_adma_alloc_chan_resources; 1501 dma_dev->device_free_chan_resources = iop_adma_free_chan_resources; 1502 - dma_dev->device_is_tx_complete = iop_adma_is_complete; 1503 dma_dev->device_issue_pending = iop_adma_issue_pending; 1504 dma_dev->dev = &pdev->dev; 1505
··· 894 } 895 896 /** 897 + * iop_adma_status - poll the status of an ADMA transaction 898 * @chan: ADMA channel handle 899 * @cookie: ADMA transaction identifier 900 + * @txstate: a holder for the current state of the channel or NULL 901 */ 902 + static enum dma_status iop_adma_status(struct dma_chan *chan, 903 dma_cookie_t cookie, 904 + struct dma_tx_state *txstate) 905 { 906 struct iop_adma_chan *iop_chan = to_iop_adma_chan(chan); 907 dma_cookie_t last_used; ··· 910 911 last_used = chan->cookie; 912 last_complete = iop_chan->completed_cookie; 913 + dma_set_tx_state(txstate, last_complete, last_used, 0); 914 ret = dma_async_is_complete(cookie, last_complete, last_used); 915 if (ret == DMA_SUCCESS) 916 return ret; ··· 924 925 last_used = chan->cookie; 926 last_complete = iop_chan->completed_cookie; 927 + dma_set_tx_state(txstate, last_complete, last_used, 0); 928 929 return dma_async_is_complete(cookie, last_complete, last_used); 930 } ··· 1043 iop_adma_issue_pending(dma_chan); 1044 msleep(1); 1045 1046 + if (iop_adma_status(dma_chan, cookie, NULL) != 1047 DMA_SUCCESS) { 1048 dev_printk(KERN_ERR, dma_chan->device->dev, 1049 "Self-test copy timed out, disabling\n"); ··· 1143 iop_adma_issue_pending(dma_chan); 1144 msleep(8); 1145 1146 + if (iop_adma_status(dma_chan, cookie, NULL) != 1147 DMA_SUCCESS) { 1148 dev_printk(KERN_ERR, dma_chan->device->dev, 1149 "Self-test xor timed out, disabling\n"); ··· 1190 iop_adma_issue_pending(dma_chan); 1191 msleep(8); 1192 1193 + if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { 1194 dev_printk(KERN_ERR, dma_chan->device->dev, 1195 "Self-test zero sum timed out, disabling\n"); 1196 err = -ENODEV; ··· 1214 iop_adma_issue_pending(dma_chan); 1215 msleep(8); 1216 1217 + if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { 1218 dev_printk(KERN_ERR, dma_chan->device->dev, 1219 "Self-test memset timed out, disabling\n"); 1220 err = -ENODEV; ··· 1246 iop_adma_issue_pending(dma_chan); 1247 msleep(8); 1248 1249 + if (iop_adma_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { 1250 dev_printk(KERN_ERR, dma_chan->device->dev, 1251 "Self-test non-zero sum timed out, disabling\n"); 1252 err = -ENODEV; ··· 1341 iop_adma_issue_pending(dma_chan); 1342 msleep(8); 1343 1344 + if (iop_adma_status(dma_chan, cookie, NULL) != 1345 DMA_SUCCESS) { 1346 dev_err(dev, "Self-test pq timed out, disabling\n"); 1347 err = -ENODEV; ··· 1378 iop_adma_issue_pending(dma_chan); 1379 msleep(8); 1380 1381 + if (iop_adma_status(dma_chan, cookie, NULL) != 1382 DMA_SUCCESS) { 1383 dev_err(dev, "Self-test pq-zero-sum timed out, disabling\n"); 1384 err = -ENODEV; ··· 1410 iop_adma_issue_pending(dma_chan); 1411 msleep(8); 1412 1413 + if (iop_adma_status(dma_chan, cookie, NULL) != 1414 DMA_SUCCESS) { 1415 dev_err(dev, "Self-test !pq-zero-sum timed out, disabling\n"); 1416 err = -ENODEV; ··· 1508 /* set base routines */ 1509 dma_dev->device_alloc_chan_resources = iop_adma_alloc_chan_resources; 1510 dma_dev->device_free_chan_resources = iop_adma_free_chan_resources; 1511 + dma_dev->device_tx_status = iop_adma_status; 1512 dma_dev->device_issue_pending = iop_adma_issue_pending; 1513 dma_dev->dev = &pdev->dev; 1514
+21 -13
drivers/dma/ipu/ipu_idmac.c
··· 1472 */ 1473 } 1474 1475 - static void __idmac_terminate_all(struct dma_chan *chan) 1476 { 1477 struct idmac_channel *ichan = to_idmac_chan(chan); 1478 struct idmac *idmac = to_idmac(chan->device); 1479 unsigned long flags; 1480 int i; 1481 1482 ipu_disable_channel(idmac, ichan, 1483 ichan->status >= IPU_CHANNEL_ENABLED); ··· 1510 tasklet_enable(&to_ipu(idmac)->tasklet); 1511 1512 ichan->status = IPU_CHANNEL_INITIALIZED; 1513 } 1514 1515 - static void idmac_terminate_all(struct dma_chan *chan) 1516 { 1517 struct idmac_channel *ichan = to_idmac_chan(chan); 1518 1519 mutex_lock(&ichan->chan_mutex); 1520 1521 - __idmac_terminate_all(chan); 1522 1523 mutex_unlock(&ichan->chan_mutex); 1524 } 1525 1526 #ifdef DEBUG ··· 1618 1619 mutex_lock(&ichan->chan_mutex); 1620 1621 - __idmac_terminate_all(chan); 1622 1623 if (ichan->status > IPU_CHANNEL_FREE) { 1624 #ifdef DEBUG ··· 1648 tasklet_schedule(&to_ipu(idmac)->tasklet); 1649 } 1650 1651 - static enum dma_status idmac_is_tx_complete(struct dma_chan *chan, 1652 - dma_cookie_t cookie, dma_cookie_t *done, dma_cookie_t *used) 1653 { 1654 struct idmac_channel *ichan = to_idmac_chan(chan); 1655 1656 - if (done) 1657 - *done = ichan->completed; 1658 - if (used) 1659 - *used = chan->cookie; 1660 if (cookie != chan->cookie) 1661 return DMA_ERROR; 1662 return DMA_SUCCESS; ··· 1672 dma->dev = ipu->dev; 1673 dma->device_alloc_chan_resources = idmac_alloc_chan_resources; 1674 dma->device_free_chan_resources = idmac_free_chan_resources; 1675 - dma->device_is_tx_complete = idmac_is_tx_complete; 1676 dma->device_issue_pending = idmac_issue_pending; 1677 1678 /* Compulsory for DMA_SLAVE fields */ 1679 dma->device_prep_slave_sg = idmac_prep_slave_sg; 1680 - dma->device_terminate_all = idmac_terminate_all; 1681 1682 INIT_LIST_HEAD(&dma->channels); 1683 for (i = 0; i < IPU_CHANNELS_NUM; i++) { ··· 1711 for (i = 0; i < IPU_CHANNELS_NUM; i++) { 1712 struct idmac_channel *ichan = ipu->channel + i; 1713 1714 - idmac_terminate_all(&ichan->dma_chan); 1715 idmac_prep_slave_sg(&ichan->dma_chan, NULL, 0, DMA_NONE, 0); 1716 } 1717
··· 1472 */ 1473 } 1474 1475 + static int __idmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 1476 + unsigned long arg) 1477 { 1478 struct idmac_channel *ichan = to_idmac_chan(chan); 1479 struct idmac *idmac = to_idmac(chan->device); 1480 unsigned long flags; 1481 int i; 1482 + 1483 + /* Only supports DMA_TERMINATE_ALL */ 1484 + if (cmd != DMA_TERMINATE_ALL) 1485 + return -ENXIO; 1486 1487 ipu_disable_channel(idmac, ichan, 1488 ichan->status >= IPU_CHANNEL_ENABLED); ··· 1505 tasklet_enable(&to_ipu(idmac)->tasklet); 1506 1507 ichan->status = IPU_CHANNEL_INITIALIZED; 1508 + 1509 + return 0; 1510 } 1511 1512 + static int idmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 1513 + unsigned long arg) 1514 { 1515 struct idmac_channel *ichan = to_idmac_chan(chan); 1516 + int ret; 1517 1518 mutex_lock(&ichan->chan_mutex); 1519 1520 + ret = __idmac_control(chan, cmd, arg); 1521 1522 mutex_unlock(&ichan->chan_mutex); 1523 + 1524 + return ret; 1525 } 1526 1527 #ifdef DEBUG ··· 1607 1608 mutex_lock(&ichan->chan_mutex); 1609 1610 + __idmac_control(chan, DMA_TERMINATE_ALL, 0); 1611 1612 if (ichan->status > IPU_CHANNEL_FREE) { 1613 #ifdef DEBUG ··· 1637 tasklet_schedule(&to_ipu(idmac)->tasklet); 1638 } 1639 1640 + static enum dma_status idmac_tx_status(struct dma_chan *chan, 1641 + dma_cookie_t cookie, struct dma_tx_state *txstate) 1642 { 1643 struct idmac_channel *ichan = to_idmac_chan(chan); 1644 1645 + dma_set_tx_state(txstate, ichan->completed, chan->cookie, 0); 1646 if (cookie != chan->cookie) 1647 return DMA_ERROR; 1648 return DMA_SUCCESS; ··· 1664 dma->dev = ipu->dev; 1665 dma->device_alloc_chan_resources = idmac_alloc_chan_resources; 1666 dma->device_free_chan_resources = idmac_free_chan_resources; 1667 + dma->device_tx_status = idmac_tx_status; 1668 dma->device_issue_pending = idmac_issue_pending; 1669 1670 /* Compulsory for DMA_SLAVE fields */ 1671 dma->device_prep_slave_sg = idmac_prep_slave_sg; 1672 + dma->device_control = idmac_control; 1673 1674 INIT_LIST_HEAD(&dma->channels); 1675 for (i = 0; i < IPU_CHANNELS_NUM; i++) { ··· 1703 for (i = 0; i < IPU_CHANNELS_NUM; i++) { 1704 struct idmac_channel *ichan = ipu->channel + i; 1705 1706 + idmac_control(&ichan->dma_chan, DMA_TERMINATE_ALL, 0); 1707 idmac_prep_slave_sg(&ichan->dma_chan, NULL, 0, DMA_NONE, 0); 1708 } 1709
+5 -10
drivers/dma/mpc512x_dma.c
··· 541 542 /* Check request completion status */ 543 static enum dma_status 544 - mpc_dma_is_tx_complete(struct dma_chan *chan, dma_cookie_t cookie, 545 - dma_cookie_t *done, dma_cookie_t *used) 546 { 547 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); 548 unsigned long flags; ··· 554 last_complete = mchan->completed_cookie; 555 spin_unlock_irqrestore(&mchan->lock, flags); 556 557 - if (done) 558 - *done = last_complete; 559 - 560 - if (used) 561 - *used = last_used; 562 - 563 return dma_async_is_complete(cookie, last_complete, last_used); 564 } 565 ··· 658 } 659 660 regs_start = res.start; 661 - regs_size = res.end - res.start + 1; 662 663 if (!devm_request_mem_region(dev, regs_start, regs_size, DRV_NAME)) { 664 dev_err(dev, "Error requesting memory region!\n"); ··· 689 dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources; 690 dma->device_free_chan_resources = mpc_dma_free_chan_resources; 691 dma->device_issue_pending = mpc_dma_issue_pending; 692 - dma->device_is_tx_complete = mpc_dma_is_tx_complete; 693 dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy; 694 695 INIT_LIST_HEAD(&dma->channels);
··· 541 542 /* Check request completion status */ 543 static enum dma_status 544 + mpc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, 545 + struct dma_tx_state *txstate) 546 { 547 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); 548 unsigned long flags; ··· 554 last_complete = mchan->completed_cookie; 555 spin_unlock_irqrestore(&mchan->lock, flags); 556 557 + dma_set_tx_state(txstate, last_complete, last_used, 0); 558 return dma_async_is_complete(cookie, last_complete, last_used); 559 } 560 ··· 663 } 664 665 regs_start = res.start; 666 + regs_size = resource_size(&res); 667 668 if (!devm_request_mem_region(dev, regs_start, regs_size, DRV_NAME)) { 669 dev_err(dev, "Error requesting memory region!\n"); ··· 694 dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources; 695 dma->device_free_chan_resources = mpc_dma_free_chan_resources; 696 dma->device_issue_pending = mpc_dma_issue_pending; 697 + dma->device_tx_status = mpc_dma_tx_status; 698 dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy; 699 700 INIT_LIST_HEAD(&dma->channels);
+9 -16
drivers/dma/mv_xor.c
··· 810 } 811 812 /** 813 - * mv_xor_is_complete - poll the status of an XOR transaction 814 * @chan: XOR channel handle 815 * @cookie: XOR transaction identifier 816 */ 817 - static enum dma_status mv_xor_is_complete(struct dma_chan *chan, 818 dma_cookie_t cookie, 819 - dma_cookie_t *done, 820 - dma_cookie_t *used) 821 { 822 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 823 dma_cookie_t last_used; ··· 827 last_used = chan->cookie; 828 last_complete = mv_chan->completed_cookie; 829 mv_chan->is_complete_cookie = cookie; 830 - if (done) 831 - *done = last_complete; 832 - if (used) 833 - *used = last_used; 834 835 ret = dma_async_is_complete(cookie, last_complete, last_used); 836 if (ret == DMA_SUCCESS) { ··· 839 last_used = chan->cookie; 840 last_complete = mv_chan->completed_cookie; 841 842 - if (done) 843 - *done = last_complete; 844 - if (used) 845 - *used = last_used; 846 - 847 return dma_async_is_complete(cookie, last_complete, last_used); 848 } 849 ··· 968 async_tx_ack(tx); 969 msleep(1); 970 971 - if (mv_xor_is_complete(dma_chan, cookie, NULL, NULL) != 972 DMA_SUCCESS) { 973 dev_printk(KERN_ERR, dma_chan->device->dev, 974 "Self-test copy timed out, disabling\n"); ··· 1066 async_tx_ack(tx); 1067 msleep(8); 1068 1069 - if (mv_xor_is_complete(dma_chan, cookie, NULL, NULL) != 1070 DMA_SUCCESS) { 1071 dev_printk(KERN_ERR, dma_chan->device->dev, 1072 "Self-test xor timed out, disabling\n"); ··· 1161 /* set base routines */ 1162 dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources; 1163 dma_dev->device_free_chan_resources = mv_xor_free_chan_resources; 1164 - dma_dev->device_is_tx_complete = mv_xor_is_complete; 1165 dma_dev->device_issue_pending = mv_xor_issue_pending; 1166 dma_dev->dev = &pdev->dev; 1167
··· 810 } 811 812 /** 813 + * mv_xor_status - poll the status of an XOR transaction 814 * @chan: XOR channel handle 815 * @cookie: XOR transaction identifier 816 + * @txstate: XOR transactions state holder (or NULL) 817 */ 818 + static enum dma_status mv_xor_status(struct dma_chan *chan, 819 dma_cookie_t cookie, 820 + struct dma_tx_state *txstate) 821 { 822 struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan); 823 dma_cookie_t last_used; ··· 827 last_used = chan->cookie; 828 last_complete = mv_chan->completed_cookie; 829 mv_chan->is_complete_cookie = cookie; 830 + dma_set_tx_state(txstate, last_complete, last_used, 0); 831 832 ret = dma_async_is_complete(cookie, last_complete, last_used); 833 if (ret == DMA_SUCCESS) { ··· 842 last_used = chan->cookie; 843 last_complete = mv_chan->completed_cookie; 844 845 + dma_set_tx_state(txstate, last_complete, last_used, 0); 846 return dma_async_is_complete(cookie, last_complete, last_used); 847 } 848 ··· 975 async_tx_ack(tx); 976 msleep(1); 977 978 + if (mv_xor_status(dma_chan, cookie, NULL) != 979 DMA_SUCCESS) { 980 dev_printk(KERN_ERR, dma_chan->device->dev, 981 "Self-test copy timed out, disabling\n"); ··· 1073 async_tx_ack(tx); 1074 msleep(8); 1075 1076 + if (mv_xor_status(dma_chan, cookie, NULL) != 1077 DMA_SUCCESS) { 1078 dev_printk(KERN_ERR, dma_chan->device->dev, 1079 "Self-test xor timed out, disabling\n"); ··· 1168 /* set base routines */ 1169 dma_dev->device_alloc_chan_resources = mv_xor_alloc_chan_resources; 1170 dma_dev->device_free_chan_resources = mv_xor_free_chan_resources; 1171 + dma_dev->device_tx_status = mv_xor_status; 1172 dma_dev->device_issue_pending = mv_xor_issue_pending; 1173 dma_dev->dev = &pdev->dev; 1174
+7 -12
drivers/dma/ppc4xx/adma.c
··· 3935 } 3936 3937 /** 3938 - * ppc440spe_adma_is_complete - poll the status of an ADMA transaction 3939 * @chan: ADMA channel handle 3940 * @cookie: ADMA transaction identifier 3941 */ 3942 - static enum dma_status ppc440spe_adma_is_complete(struct dma_chan *chan, 3943 - dma_cookie_t cookie, dma_cookie_t *done, dma_cookie_t *used) 3944 { 3945 struct ppc440spe_adma_chan *ppc440spe_chan; 3946 dma_cookie_t last_used; ··· 3952 last_used = chan->cookie; 3953 last_complete = ppc440spe_chan->completed_cookie; 3954 3955 - if (done) 3956 - *done = last_complete; 3957 - if (used) 3958 - *used = last_used; 3959 3960 ret = dma_async_is_complete(cookie, last_complete, last_used); 3961 if (ret == DMA_SUCCESS) ··· 3963 last_used = chan->cookie; 3964 last_complete = ppc440spe_chan->completed_cookie; 3965 3966 - if (done) 3967 - *done = last_complete; 3968 - if (used) 3969 - *used = last_used; 3970 3971 return dma_async_is_complete(cookie, last_complete, last_used); 3972 } ··· 4175 ppc440spe_adma_alloc_chan_resources; 4176 adev->common.device_free_chan_resources = 4177 ppc440spe_adma_free_chan_resources; 4178 - adev->common.device_is_tx_complete = ppc440spe_adma_is_complete; 4179 adev->common.device_issue_pending = ppc440spe_adma_issue_pending; 4180 4181 /* Set prep routines based on capability */
··· 3935 } 3936 3937 /** 3938 + * ppc440spe_adma_tx_status - poll the status of an ADMA transaction 3939 * @chan: ADMA channel handle 3940 * @cookie: ADMA transaction identifier 3941 + * @txstate: a holder for the current state of the channel 3942 */ 3943 + static enum dma_status ppc440spe_adma_tx_status(struct dma_chan *chan, 3944 + dma_cookie_t cookie, struct dma_tx_state *txstate) 3945 { 3946 struct ppc440spe_adma_chan *ppc440spe_chan; 3947 dma_cookie_t last_used; ··· 3951 last_used = chan->cookie; 3952 last_complete = ppc440spe_chan->completed_cookie; 3953 3954 + dma_set_tx_state(txstate, last_complete, last_used, 0); 3955 3956 ret = dma_async_is_complete(cookie, last_complete, last_used); 3957 if (ret == DMA_SUCCESS) ··· 3965 last_used = chan->cookie; 3966 last_complete = ppc440spe_chan->completed_cookie; 3967 3968 + dma_set_tx_state(txstate, last_complete, last_used, 0); 3969 3970 return dma_async_is_complete(cookie, last_complete, last_used); 3971 } ··· 4180 ppc440spe_adma_alloc_chan_resources; 4181 adev->common.device_free_chan_resources = 4182 ppc440spe_adma_free_chan_resources; 4183 + adev->common.device_tx_status = ppc440spe_adma_tx_status; 4184 adev->common.device_issue_pending = ppc440spe_adma_issue_pending; 4185 4186 /* Set prep routines based on capability */
+14 -13
drivers/dma/shdma.c
··· 597 direction, flags); 598 } 599 600 - static void sh_dmae_terminate_all(struct dma_chan *chan) 601 { 602 struct sh_dmae_chan *sh_chan = to_sh_chan(chan); 603 604 if (!chan) 605 - return; 606 607 dmae_halt(sh_chan); 608 ··· 623 spin_unlock_bh(&sh_chan->desc_lock); 624 625 sh_dmae_chan_ld_cleanup(sh_chan, true); 626 } 627 628 static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all) ··· 756 sh_chan_xfer_ld_queue(sh_chan); 757 } 758 759 - static enum dma_status sh_dmae_is_complete(struct dma_chan *chan, 760 dma_cookie_t cookie, 761 - dma_cookie_t *done, 762 - dma_cookie_t *used) 763 { 764 struct sh_dmae_chan *sh_chan = to_sh_chan(chan); 765 dma_cookie_t last_used; ··· 770 last_used = chan->cookie; 771 last_complete = sh_chan->completed_cookie; 772 BUG_ON(last_complete < 0); 773 - 774 - if (done) 775 - *done = last_complete; 776 - 777 - if (used) 778 - *used = last_used; 779 780 spin_lock_bh(&sh_chan->desc_lock); 781 ··· 1042 = sh_dmae_alloc_chan_resources; 1043 shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources; 1044 shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy; 1045 - shdev->common.device_is_tx_complete = sh_dmae_is_complete; 1046 shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending; 1047 1048 /* Compulsory for DMA_SLAVE fields */ 1049 shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg; 1050 - shdev->common.device_terminate_all = sh_dmae_terminate_all; 1051 1052 shdev->common.dev = &pdev->dev; 1053 /* Default transfer size of 32 bytes requires 32-byte alignment */
··· 597 direction, flags); 598 } 599 600 + static int sh_dmae_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 601 + unsigned long arg) 602 { 603 struct sh_dmae_chan *sh_chan = to_sh_chan(chan); 604 605 + /* Only supports DMA_TERMINATE_ALL */ 606 + if (cmd != DMA_TERMINATE_ALL) 607 + return -ENXIO; 608 + 609 if (!chan) 610 + return -EINVAL; 611 612 dmae_halt(sh_chan); 613 ··· 618 spin_unlock_bh(&sh_chan->desc_lock); 619 620 sh_dmae_chan_ld_cleanup(sh_chan, true); 621 + 622 + return 0; 623 } 624 625 static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all) ··· 749 sh_chan_xfer_ld_queue(sh_chan); 750 } 751 752 + static enum dma_status sh_dmae_tx_status(struct dma_chan *chan, 753 dma_cookie_t cookie, 754 + struct dma_tx_state *txstate) 755 { 756 struct sh_dmae_chan *sh_chan = to_sh_chan(chan); 757 dma_cookie_t last_used; ··· 764 last_used = chan->cookie; 765 last_complete = sh_chan->completed_cookie; 766 BUG_ON(last_complete < 0); 767 + dma_set_tx_state(txstate, last_complete, last_used, 0); 768 769 spin_lock_bh(&sh_chan->desc_lock); 770 ··· 1041 = sh_dmae_alloc_chan_resources; 1042 shdev->common.device_free_chan_resources = sh_dmae_free_chan_resources; 1043 shdev->common.device_prep_dma_memcpy = sh_dmae_prep_memcpy; 1044 + shdev->common.device_tx_status = sh_dmae_tx_status; 1045 shdev->common.device_issue_pending = sh_dmae_memcpy_issue_pending; 1046 1047 /* Compulsory for DMA_SLAVE fields */ 1048 shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg; 1049 + shdev->common.device_control = sh_dmae_control; 1050 1051 shdev->common.dev = &pdev->dev; 1052 /* Default transfer size of 32 bytes requires 32-byte alignment */
+2657
drivers/dma/ste_dma40.c
···
··· 1 + /* 2 + * driver/dma/ste_dma40.c 3 + * 4 + * Copyright (C) ST-Ericsson 2007-2010 5 + * License terms: GNU General Public License (GPL) version 2 6 + * Author: Per Friden <per.friden@stericsson.com> 7 + * Author: Jonas Aaberg <jonas.aberg@stericsson.com> 8 + * 9 + */ 10 + 11 + #include <linux/kernel.h> 12 + #include <linux/slab.h> 13 + #include <linux/dmaengine.h> 14 + #include <linux/platform_device.h> 15 + #include <linux/clk.h> 16 + #include <linux/delay.h> 17 + 18 + #include <plat/ste_dma40.h> 19 + 20 + #include "ste_dma40_ll.h" 21 + 22 + #define D40_NAME "dma40" 23 + 24 + #define D40_PHY_CHAN -1 25 + 26 + /* For masking out/in 2 bit channel positions */ 27 + #define D40_CHAN_POS(chan) (2 * (chan / 2)) 28 + #define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan)) 29 + 30 + /* Maximum iterations taken before giving up suspending a channel */ 31 + #define D40_SUSPEND_MAX_IT 500 32 + 33 + #define D40_ALLOC_FREE (1 << 31) 34 + #define D40_ALLOC_PHY (1 << 30) 35 + #define D40_ALLOC_LOG_FREE 0 36 + 37 + /* The number of free d40_desc to keep in memory before starting 38 + * to kfree() them */ 39 + #define D40_DESC_CACHE_SIZE 50 40 + 41 + /* Hardware designer of the block */ 42 + #define D40_PERIPHID2_DESIGNER 0x8 43 + 44 + /** 45 + * enum 40_command - The different commands and/or statuses. 46 + * 47 + * @D40_DMA_STOP: DMA channel command STOP or status STOPPED, 48 + * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN. 49 + * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible. 50 + * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED. 51 + */ 52 + enum d40_command { 53 + D40_DMA_STOP = 0, 54 + D40_DMA_RUN = 1, 55 + D40_DMA_SUSPEND_REQ = 2, 56 + D40_DMA_SUSPENDED = 3 57 + }; 58 + 59 + /** 60 + * struct d40_lli_pool - Structure for keeping LLIs in memory 61 + * 62 + * @base: Pointer to memory area when the pre_alloc_lli's are not large 63 + * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if 64 + * pre_alloc_lli is used. 65 + * @size: The size in bytes of the memory at base or the size of pre_alloc_lli. 66 + * @pre_alloc_lli: Pre allocated area for the most common case of transfers, 67 + * one buffer to one buffer. 68 + */ 69 + struct d40_lli_pool { 70 + void *base; 71 + int size; 72 + /* Space for dst and src, plus an extra for padding */ 73 + u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)]; 74 + }; 75 + 76 + /** 77 + * struct d40_desc - A descriptor is one DMA job. 78 + * 79 + * @lli_phy: LLI settings for physical channel. Both src and dst= 80 + * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if 81 + * lli_len equals one. 82 + * @lli_log: Same as above but for logical channels. 83 + * @lli_pool: The pool with two entries pre-allocated. 84 + * @lli_len: Number of LLI's in lli_pool 85 + * @lli_tcount: Number of LLIs processed in the transfer. When equals lli_len 86 + * then this transfer job is done. 87 + * @txd: DMA engine struct. Used for among other things for communication 88 + * during a transfer. 89 + * @node: List entry. 90 + * @dir: The transfer direction of this job. 91 + * @is_in_client_list: true if the client owns this descriptor. 92 + * 93 + * This descriptor is used for both logical and physical transfers. 94 + */ 95 + 96 + struct d40_desc { 97 + /* LLI physical */ 98 + struct d40_phy_lli_bidir lli_phy; 99 + /* LLI logical */ 100 + struct d40_log_lli_bidir lli_log; 101 + 102 + struct d40_lli_pool lli_pool; 103 + u32 lli_len; 104 + u32 lli_tcount; 105 + 106 + struct dma_async_tx_descriptor txd; 107 + struct list_head node; 108 + 109 + enum dma_data_direction dir; 110 + bool is_in_client_list; 111 + }; 112 + 113 + /** 114 + * struct d40_lcla_pool - LCLA pool settings and data. 115 + * 116 + * @base: The virtual address of LCLA. 117 + * @phy: Physical base address of LCLA. 118 + * @base_size: size of lcla. 119 + * @lock: Lock to protect the content in this struct. 120 + * @alloc_map: Mapping between physical channel and LCLA entries. 121 + * @num_blocks: The number of entries of alloc_map. Equals to the 122 + * number of physical channels. 123 + */ 124 + struct d40_lcla_pool { 125 + void *base; 126 + dma_addr_t phy; 127 + resource_size_t base_size; 128 + spinlock_t lock; 129 + u32 *alloc_map; 130 + int num_blocks; 131 + }; 132 + 133 + /** 134 + * struct d40_phy_res - struct for handling eventlines mapped to physical 135 + * channels. 136 + * 137 + * @lock: A lock protection this entity. 138 + * @num: The physical channel number of this entity. 139 + * @allocated_src: Bit mapped to show which src event line's are mapped to 140 + * this physical channel. Can also be free or physically allocated. 141 + * @allocated_dst: Same as for src but is dst. 142 + * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as 143 + * event line number. Both allocated_src and allocated_dst can not be 144 + * allocated to a physical channel, since the interrupt handler has then 145 + * no way of figure out which one the interrupt belongs to. 146 + */ 147 + struct d40_phy_res { 148 + spinlock_t lock; 149 + int num; 150 + u32 allocated_src; 151 + u32 allocated_dst; 152 + }; 153 + 154 + struct d40_base; 155 + 156 + /** 157 + * struct d40_chan - Struct that describes a channel. 158 + * 159 + * @lock: A spinlock to protect this struct. 160 + * @log_num: The logical number, if any of this channel. 161 + * @completed: Starts with 1, after first interrupt it is set to dma engine's 162 + * current cookie. 163 + * @pending_tx: The number of pending transfers. Used between interrupt handler 164 + * and tasklet. 165 + * @busy: Set to true when transfer is ongoing on this channel. 166 + * @phy_chan: Pointer to physical channel which this instance runs on. 167 + * @chan: DMA engine handle. 168 + * @tasklet: Tasklet that gets scheduled from interrupt context to complete a 169 + * transfer and call client callback. 170 + * @client: Cliented owned descriptor list. 171 + * @active: Active descriptor. 172 + * @queue: Queued jobs. 173 + * @free: List of free descripts, ready to be reused. 174 + * @free_len: Number of descriptors in the free list. 175 + * @dma_cfg: The client configuration of this dma channel. 176 + * @base: Pointer to the device instance struct. 177 + * @src_def_cfg: Default cfg register setting for src. 178 + * @dst_def_cfg: Default cfg register setting for dst. 179 + * @log_def: Default logical channel settings. 180 + * @lcla: Space for one dst src pair for logical channel transfers. 181 + * @lcpa: Pointer to dst and src lcpa settings. 182 + * 183 + * This struct can either "be" a logical or a physical channel. 184 + */ 185 + struct d40_chan { 186 + spinlock_t lock; 187 + int log_num; 188 + /* ID of the most recent completed transfer */ 189 + int completed; 190 + int pending_tx; 191 + bool busy; 192 + struct d40_phy_res *phy_chan; 193 + struct dma_chan chan; 194 + struct tasklet_struct tasklet; 195 + struct list_head client; 196 + struct list_head active; 197 + struct list_head queue; 198 + struct list_head free; 199 + int free_len; 200 + struct stedma40_chan_cfg dma_cfg; 201 + struct d40_base *base; 202 + /* Default register configurations */ 203 + u32 src_def_cfg; 204 + u32 dst_def_cfg; 205 + struct d40_def_lcsp log_def; 206 + struct d40_lcla_elem lcla; 207 + struct d40_log_lli_full *lcpa; 208 + }; 209 + 210 + /** 211 + * struct d40_base - The big global struct, one for each probe'd instance. 212 + * 213 + * @interrupt_lock: Lock used to make sure one interrupt is handle a time. 214 + * @execmd_lock: Lock for execute command usage since several channels share 215 + * the same physical register. 216 + * @dev: The device structure. 217 + * @virtbase: The virtual base address of the DMA's register. 218 + * @clk: Pointer to the DMA clock structure. 219 + * @phy_start: Physical memory start of the DMA registers. 220 + * @phy_size: Size of the DMA register map. 221 + * @irq: The IRQ number. 222 + * @num_phy_chans: The number of physical channels. Read from HW. This 223 + * is the number of available channels for this driver, not counting "Secure 224 + * mode" allocated physical channels. 225 + * @num_log_chans: The number of logical channels. Calculated from 226 + * num_phy_chans. 227 + * @dma_both: dma_device channels that can do both memcpy and slave transfers. 228 + * @dma_slave: dma_device channels that can do only do slave transfers. 229 + * @dma_memcpy: dma_device channels that can do only do memcpy transfers. 230 + * @phy_chans: Room for all possible physical channels in system. 231 + * @log_chans: Room for all possible logical channels in system. 232 + * @lookup_log_chans: Used to map interrupt number to logical channel. Points 233 + * to log_chans entries. 234 + * @lookup_phy_chans: Used to map interrupt number to physical channel. Points 235 + * to phy_chans entries. 236 + * @plat_data: Pointer to provided platform_data which is the driver 237 + * configuration. 238 + * @phy_res: Vector containing all physical channels. 239 + * @lcla_pool: lcla pool settings and data. 240 + * @lcpa_base: The virtual mapped address of LCPA. 241 + * @phy_lcpa: The physical address of the LCPA. 242 + * @lcpa_size: The size of the LCPA area. 243 + */ 244 + struct d40_base { 245 + spinlock_t interrupt_lock; 246 + spinlock_t execmd_lock; 247 + struct device *dev; 248 + void __iomem *virtbase; 249 + struct clk *clk; 250 + phys_addr_t phy_start; 251 + resource_size_t phy_size; 252 + int irq; 253 + int num_phy_chans; 254 + int num_log_chans; 255 + struct dma_device dma_both; 256 + struct dma_device dma_slave; 257 + struct dma_device dma_memcpy; 258 + struct d40_chan *phy_chans; 259 + struct d40_chan *log_chans; 260 + struct d40_chan **lookup_log_chans; 261 + struct d40_chan **lookup_phy_chans; 262 + struct stedma40_platform_data *plat_data; 263 + /* Physical half channels */ 264 + struct d40_phy_res *phy_res; 265 + struct d40_lcla_pool lcla_pool; 266 + void *lcpa_base; 267 + dma_addr_t phy_lcpa; 268 + resource_size_t lcpa_size; 269 + }; 270 + 271 + /** 272 + * struct d40_interrupt_lookup - lookup table for interrupt handler 273 + * 274 + * @src: Interrupt mask register. 275 + * @clr: Interrupt clear register. 276 + * @is_error: true if this is an error interrupt. 277 + * @offset: start delta in the lookup_log_chans in d40_base. If equals to 278 + * D40_PHY_CHAN, the lookup_phy_chans shall be used instead. 279 + */ 280 + struct d40_interrupt_lookup { 281 + u32 src; 282 + u32 clr; 283 + bool is_error; 284 + int offset; 285 + }; 286 + 287 + /** 288 + * struct d40_reg_val - simple lookup struct 289 + * 290 + * @reg: The register. 291 + * @val: The value that belongs to the register in reg. 292 + */ 293 + struct d40_reg_val { 294 + unsigned int reg; 295 + unsigned int val; 296 + }; 297 + 298 + static int d40_pool_lli_alloc(struct d40_desc *d40d, 299 + int lli_len, bool is_log) 300 + { 301 + u32 align; 302 + void *base; 303 + 304 + if (is_log) 305 + align = sizeof(struct d40_log_lli); 306 + else 307 + align = sizeof(struct d40_phy_lli); 308 + 309 + if (lli_len == 1) { 310 + base = d40d->lli_pool.pre_alloc_lli; 311 + d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli); 312 + d40d->lli_pool.base = NULL; 313 + } else { 314 + d40d->lli_pool.size = ALIGN(lli_len * 2 * align, align); 315 + 316 + base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT); 317 + d40d->lli_pool.base = base; 318 + 319 + if (d40d->lli_pool.base == NULL) 320 + return -ENOMEM; 321 + } 322 + 323 + if (is_log) { 324 + d40d->lli_log.src = PTR_ALIGN((struct d40_log_lli *) base, 325 + align); 326 + d40d->lli_log.dst = PTR_ALIGN(d40d->lli_log.src + lli_len, 327 + align); 328 + } else { 329 + d40d->lli_phy.src = PTR_ALIGN((struct d40_phy_lli *)base, 330 + align); 331 + d40d->lli_phy.dst = PTR_ALIGN(d40d->lli_phy.src + lli_len, 332 + align); 333 + 334 + d40d->lli_phy.src_addr = virt_to_phys(d40d->lli_phy.src); 335 + d40d->lli_phy.dst_addr = virt_to_phys(d40d->lli_phy.dst); 336 + } 337 + 338 + return 0; 339 + } 340 + 341 + static void d40_pool_lli_free(struct d40_desc *d40d) 342 + { 343 + kfree(d40d->lli_pool.base); 344 + d40d->lli_pool.base = NULL; 345 + d40d->lli_pool.size = 0; 346 + d40d->lli_log.src = NULL; 347 + d40d->lli_log.dst = NULL; 348 + d40d->lli_phy.src = NULL; 349 + d40d->lli_phy.dst = NULL; 350 + d40d->lli_phy.src_addr = 0; 351 + d40d->lli_phy.dst_addr = 0; 352 + } 353 + 354 + static dma_cookie_t d40_assign_cookie(struct d40_chan *d40c, 355 + struct d40_desc *desc) 356 + { 357 + dma_cookie_t cookie = d40c->chan.cookie; 358 + 359 + if (++cookie < 0) 360 + cookie = 1; 361 + 362 + d40c->chan.cookie = cookie; 363 + desc->txd.cookie = cookie; 364 + 365 + return cookie; 366 + } 367 + 368 + static void d40_desc_reset(struct d40_desc *d40d) 369 + { 370 + d40d->lli_tcount = 0; 371 + } 372 + 373 + static void d40_desc_remove(struct d40_desc *d40d) 374 + { 375 + list_del(&d40d->node); 376 + } 377 + 378 + static struct d40_desc *d40_desc_get(struct d40_chan *d40c) 379 + { 380 + struct d40_desc *desc; 381 + struct d40_desc *d; 382 + struct d40_desc *_d; 383 + 384 + if (!list_empty(&d40c->client)) { 385 + list_for_each_entry_safe(d, _d, &d40c->client, node) 386 + if (async_tx_test_ack(&d->txd)) { 387 + d40_pool_lli_free(d); 388 + d40_desc_remove(d); 389 + desc = d; 390 + goto out; 391 + } 392 + } 393 + 394 + if (list_empty(&d40c->free)) { 395 + /* Alloc new desc because we're out of used ones */ 396 + desc = kzalloc(sizeof(struct d40_desc), GFP_NOWAIT); 397 + if (desc == NULL) 398 + goto out; 399 + INIT_LIST_HEAD(&desc->node); 400 + } else { 401 + /* Reuse an old desc. */ 402 + desc = list_first_entry(&d40c->free, 403 + struct d40_desc, 404 + node); 405 + list_del(&desc->node); 406 + d40c->free_len--; 407 + } 408 + out: 409 + return desc; 410 + } 411 + 412 + static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d) 413 + { 414 + if (d40c->free_len < D40_DESC_CACHE_SIZE) { 415 + list_add_tail(&d40d->node, &d40c->free); 416 + d40c->free_len++; 417 + } else 418 + kfree(d40d); 419 + } 420 + 421 + static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc) 422 + { 423 + list_add_tail(&desc->node, &d40c->active); 424 + } 425 + 426 + static struct d40_desc *d40_first_active_get(struct d40_chan *d40c) 427 + { 428 + struct d40_desc *d; 429 + 430 + if (list_empty(&d40c->active)) 431 + return NULL; 432 + 433 + d = list_first_entry(&d40c->active, 434 + struct d40_desc, 435 + node); 436 + return d; 437 + } 438 + 439 + static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc) 440 + { 441 + list_add_tail(&desc->node, &d40c->queue); 442 + } 443 + 444 + static struct d40_desc *d40_first_queued(struct d40_chan *d40c) 445 + { 446 + struct d40_desc *d; 447 + 448 + if (list_empty(&d40c->queue)) 449 + return NULL; 450 + 451 + d = list_first_entry(&d40c->queue, 452 + struct d40_desc, 453 + node); 454 + return d; 455 + } 456 + 457 + /* Support functions for logical channels */ 458 + 459 + static int d40_lcla_id_get(struct d40_chan *d40c, 460 + struct d40_lcla_pool *pool) 461 + { 462 + int src_id = 0; 463 + int dst_id = 0; 464 + struct d40_log_lli *lcla_lidx_base = 465 + pool->base + d40c->phy_chan->num * 1024; 466 + int i; 467 + int lli_per_log = d40c->base->plat_data->llis_per_log; 468 + 469 + if (d40c->lcla.src_id >= 0 && d40c->lcla.dst_id >= 0) 470 + return 0; 471 + 472 + if (pool->num_blocks > 32) 473 + return -EINVAL; 474 + 475 + spin_lock(&pool->lock); 476 + 477 + for (i = 0; i < pool->num_blocks; i++) { 478 + if (!(pool->alloc_map[d40c->phy_chan->num] & (0x1 << i))) { 479 + pool->alloc_map[d40c->phy_chan->num] |= (0x1 << i); 480 + break; 481 + } 482 + } 483 + src_id = i; 484 + if (src_id >= pool->num_blocks) 485 + goto err; 486 + 487 + for (; i < pool->num_blocks; i++) { 488 + if (!(pool->alloc_map[d40c->phy_chan->num] & (0x1 << i))) { 489 + pool->alloc_map[d40c->phy_chan->num] |= (0x1 << i); 490 + break; 491 + } 492 + } 493 + 494 + dst_id = i; 495 + if (dst_id == src_id) 496 + goto err; 497 + 498 + d40c->lcla.src_id = src_id; 499 + d40c->lcla.dst_id = dst_id; 500 + d40c->lcla.dst = lcla_lidx_base + dst_id * lli_per_log + 1; 501 + d40c->lcla.src = lcla_lidx_base + src_id * lli_per_log + 1; 502 + 503 + 504 + spin_unlock(&pool->lock); 505 + return 0; 506 + err: 507 + spin_unlock(&pool->lock); 508 + return -EINVAL; 509 + } 510 + 511 + static void d40_lcla_id_put(struct d40_chan *d40c, 512 + struct d40_lcla_pool *pool, 513 + int id) 514 + { 515 + if (id < 0) 516 + return; 517 + 518 + d40c->lcla.src_id = -1; 519 + d40c->lcla.dst_id = -1; 520 + 521 + spin_lock(&pool->lock); 522 + pool->alloc_map[d40c->phy_chan->num] &= (~(0x1 << id)); 523 + spin_unlock(&pool->lock); 524 + } 525 + 526 + static int d40_channel_execute_command(struct d40_chan *d40c, 527 + enum d40_command command) 528 + { 529 + int status, i; 530 + void __iomem *active_reg; 531 + int ret = 0; 532 + unsigned long flags; 533 + 534 + spin_lock_irqsave(&d40c->base->execmd_lock, flags); 535 + 536 + if (d40c->phy_chan->num % 2 == 0) 537 + active_reg = d40c->base->virtbase + D40_DREG_ACTIVE; 538 + else 539 + active_reg = d40c->base->virtbase + D40_DREG_ACTIVO; 540 + 541 + if (command == D40_DMA_SUSPEND_REQ) { 542 + status = (readl(active_reg) & 543 + D40_CHAN_POS_MASK(d40c->phy_chan->num)) >> 544 + D40_CHAN_POS(d40c->phy_chan->num); 545 + 546 + if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP) 547 + goto done; 548 + } 549 + 550 + writel(command << D40_CHAN_POS(d40c->phy_chan->num), active_reg); 551 + 552 + if (command == D40_DMA_SUSPEND_REQ) { 553 + 554 + for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) { 555 + status = (readl(active_reg) & 556 + D40_CHAN_POS_MASK(d40c->phy_chan->num)) >> 557 + D40_CHAN_POS(d40c->phy_chan->num); 558 + 559 + cpu_relax(); 560 + /* 561 + * Reduce the number of bus accesses while 562 + * waiting for the DMA to suspend. 563 + */ 564 + udelay(3); 565 + 566 + if (status == D40_DMA_STOP || 567 + status == D40_DMA_SUSPENDED) 568 + break; 569 + } 570 + 571 + if (i == D40_SUSPEND_MAX_IT) { 572 + dev_err(&d40c->chan.dev->device, 573 + "[%s]: unable to suspend the chl %d (log: %d) status %x\n", 574 + __func__, d40c->phy_chan->num, d40c->log_num, 575 + status); 576 + dump_stack(); 577 + ret = -EBUSY; 578 + } 579 + 580 + } 581 + done: 582 + spin_unlock_irqrestore(&d40c->base->execmd_lock, flags); 583 + return ret; 584 + } 585 + 586 + static void d40_term_all(struct d40_chan *d40c) 587 + { 588 + struct d40_desc *d40d; 589 + struct d40_desc *d; 590 + struct d40_desc *_d; 591 + 592 + /* Release active descriptors */ 593 + while ((d40d = d40_first_active_get(d40c))) { 594 + d40_desc_remove(d40d); 595 + 596 + /* Return desc to free-list */ 597 + d40_desc_free(d40c, d40d); 598 + } 599 + 600 + /* Release queued descriptors waiting for transfer */ 601 + while ((d40d = d40_first_queued(d40c))) { 602 + d40_desc_remove(d40d); 603 + 604 + /* Return desc to free-list */ 605 + d40_desc_free(d40c, d40d); 606 + } 607 + 608 + /* Release client owned descriptors */ 609 + if (!list_empty(&d40c->client)) 610 + list_for_each_entry_safe(d, _d, &d40c->client, node) { 611 + d40_pool_lli_free(d); 612 + d40_desc_remove(d); 613 + /* Return desc to free-list */ 614 + d40_desc_free(d40c, d40d); 615 + } 616 + 617 + d40_lcla_id_put(d40c, &d40c->base->lcla_pool, 618 + d40c->lcla.src_id); 619 + d40_lcla_id_put(d40c, &d40c->base->lcla_pool, 620 + d40c->lcla.dst_id); 621 + 622 + d40c->pending_tx = 0; 623 + d40c->busy = false; 624 + } 625 + 626 + static void d40_config_set_event(struct d40_chan *d40c, bool do_enable) 627 + { 628 + u32 val; 629 + unsigned long flags; 630 + 631 + if (do_enable) 632 + val = D40_ACTIVATE_EVENTLINE; 633 + else 634 + val = D40_DEACTIVATE_EVENTLINE; 635 + 636 + spin_lock_irqsave(&d40c->phy_chan->lock, flags); 637 + 638 + /* Enable event line connected to device (or memcpy) */ 639 + if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) || 640 + (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) { 641 + u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); 642 + 643 + writel((val << D40_EVENTLINE_POS(event)) | 644 + ~D40_EVENTLINE_MASK(event), 645 + d40c->base->virtbase + D40_DREG_PCBASE + 646 + d40c->phy_chan->num * D40_DREG_PCDELTA + 647 + D40_CHAN_REG_SSLNK); 648 + } 649 + if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) { 650 + u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); 651 + 652 + writel((val << D40_EVENTLINE_POS(event)) | 653 + ~D40_EVENTLINE_MASK(event), 654 + d40c->base->virtbase + D40_DREG_PCBASE + 655 + d40c->phy_chan->num * D40_DREG_PCDELTA + 656 + D40_CHAN_REG_SDLNK); 657 + } 658 + 659 + spin_unlock_irqrestore(&d40c->phy_chan->lock, flags); 660 + } 661 + 662 + static u32 d40_chan_has_events(struct d40_chan *d40c) 663 + { 664 + u32 val = 0; 665 + 666 + /* If SSLNK or SDLNK is zero all events are disabled */ 667 + if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) || 668 + (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) 669 + val = readl(d40c->base->virtbase + D40_DREG_PCBASE + 670 + d40c->phy_chan->num * D40_DREG_PCDELTA + 671 + D40_CHAN_REG_SSLNK); 672 + 673 + if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) 674 + val = readl(d40c->base->virtbase + D40_DREG_PCBASE + 675 + d40c->phy_chan->num * D40_DREG_PCDELTA + 676 + D40_CHAN_REG_SDLNK); 677 + return val; 678 + } 679 + 680 + static void d40_config_enable_lidx(struct d40_chan *d40c) 681 + { 682 + /* Set LIDX for lcla */ 683 + writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) & 684 + D40_SREG_ELEM_LOG_LIDX_MASK, 685 + d40c->base->virtbase + D40_DREG_PCBASE + 686 + d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SDELT); 687 + 688 + writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) & 689 + D40_SREG_ELEM_LOG_LIDX_MASK, 690 + d40c->base->virtbase + D40_DREG_PCBASE + 691 + d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SSELT); 692 + } 693 + 694 + static int d40_config_write(struct d40_chan *d40c) 695 + { 696 + u32 addr_base; 697 + u32 var; 698 + int res; 699 + 700 + res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); 701 + if (res) 702 + return res; 703 + 704 + /* Odd addresses are even addresses + 4 */ 705 + addr_base = (d40c->phy_chan->num % 2) * 4; 706 + /* Setup channel mode to logical or physical */ 707 + var = ((u32)(d40c->log_num != D40_PHY_CHAN) + 1) << 708 + D40_CHAN_POS(d40c->phy_chan->num); 709 + writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base); 710 + 711 + /* Setup operational mode option register */ 712 + var = ((d40c->dma_cfg.channel_type >> STEDMA40_INFO_CH_MODE_OPT_POS) & 713 + 0x3) << D40_CHAN_POS(d40c->phy_chan->num); 714 + 715 + writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base); 716 + 717 + if (d40c->log_num != D40_PHY_CHAN) { 718 + /* Set default config for CFG reg */ 719 + writel(d40c->src_def_cfg, 720 + d40c->base->virtbase + D40_DREG_PCBASE + 721 + d40c->phy_chan->num * D40_DREG_PCDELTA + 722 + D40_CHAN_REG_SSCFG); 723 + writel(d40c->dst_def_cfg, 724 + d40c->base->virtbase + D40_DREG_PCBASE + 725 + d40c->phy_chan->num * D40_DREG_PCDELTA + 726 + D40_CHAN_REG_SDCFG); 727 + 728 + d40_config_enable_lidx(d40c); 729 + } 730 + return res; 731 + } 732 + 733 + static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d) 734 + { 735 + 736 + if (d40d->lli_phy.dst && d40d->lli_phy.src) { 737 + d40_phy_lli_write(d40c->base->virtbase, 738 + d40c->phy_chan->num, 739 + d40d->lli_phy.dst, 740 + d40d->lli_phy.src); 741 + d40d->lli_tcount = d40d->lli_len; 742 + } else if (d40d->lli_log.dst && d40d->lli_log.src) { 743 + u32 lli_len; 744 + struct d40_log_lli *src = d40d->lli_log.src; 745 + struct d40_log_lli *dst = d40d->lli_log.dst; 746 + 747 + src += d40d->lli_tcount; 748 + dst += d40d->lli_tcount; 749 + 750 + if (d40d->lli_len <= d40c->base->plat_data->llis_per_log) 751 + lli_len = d40d->lli_len; 752 + else 753 + lli_len = d40c->base->plat_data->llis_per_log; 754 + d40d->lli_tcount += lli_len; 755 + d40_log_lli_write(d40c->lcpa, d40c->lcla.src, 756 + d40c->lcla.dst, 757 + dst, src, 758 + d40c->base->plat_data->llis_per_log); 759 + } 760 + } 761 + 762 + static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) 763 + { 764 + struct d40_chan *d40c = container_of(tx->chan, 765 + struct d40_chan, 766 + chan); 767 + struct d40_desc *d40d = container_of(tx, struct d40_desc, txd); 768 + unsigned long flags; 769 + 770 + spin_lock_irqsave(&d40c->lock, flags); 771 + 772 + tx->cookie = d40_assign_cookie(d40c, d40d); 773 + 774 + d40_desc_queue(d40c, d40d); 775 + 776 + spin_unlock_irqrestore(&d40c->lock, flags); 777 + 778 + return tx->cookie; 779 + } 780 + 781 + static int d40_start(struct d40_chan *d40c) 782 + { 783 + int err; 784 + 785 + if (d40c->log_num != D40_PHY_CHAN) { 786 + err = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); 787 + if (err) 788 + return err; 789 + d40_config_set_event(d40c, true); 790 + } 791 + 792 + err = d40_channel_execute_command(d40c, D40_DMA_RUN); 793 + 794 + return err; 795 + } 796 + 797 + static struct d40_desc *d40_queue_start(struct d40_chan *d40c) 798 + { 799 + struct d40_desc *d40d; 800 + int err; 801 + 802 + /* Start queued jobs, if any */ 803 + d40d = d40_first_queued(d40c); 804 + 805 + if (d40d != NULL) { 806 + d40c->busy = true; 807 + 808 + /* Remove from queue */ 809 + d40_desc_remove(d40d); 810 + 811 + /* Add to active queue */ 812 + d40_desc_submit(d40c, d40d); 813 + 814 + /* Initiate DMA job */ 815 + d40_desc_load(d40c, d40d); 816 + 817 + /* Start dma job */ 818 + err = d40_start(d40c); 819 + 820 + if (err) 821 + return NULL; 822 + } 823 + 824 + return d40d; 825 + } 826 + 827 + /* called from interrupt context */ 828 + static void dma_tc_handle(struct d40_chan *d40c) 829 + { 830 + struct d40_desc *d40d; 831 + 832 + if (!d40c->phy_chan) 833 + return; 834 + 835 + /* Get first active entry from list */ 836 + d40d = d40_first_active_get(d40c); 837 + 838 + if (d40d == NULL) 839 + return; 840 + 841 + if (d40d->lli_tcount < d40d->lli_len) { 842 + 843 + d40_desc_load(d40c, d40d); 844 + /* Start dma job */ 845 + (void) d40_start(d40c); 846 + return; 847 + } 848 + 849 + if (d40_queue_start(d40c) == NULL) 850 + d40c->busy = false; 851 + 852 + d40c->pending_tx++; 853 + tasklet_schedule(&d40c->tasklet); 854 + 855 + } 856 + 857 + static void dma_tasklet(unsigned long data) 858 + { 859 + struct d40_chan *d40c = (struct d40_chan *) data; 860 + struct d40_desc *d40d_fin; 861 + unsigned long flags; 862 + dma_async_tx_callback callback; 863 + void *callback_param; 864 + 865 + spin_lock_irqsave(&d40c->lock, flags); 866 + 867 + /* Get first active entry from list */ 868 + d40d_fin = d40_first_active_get(d40c); 869 + 870 + if (d40d_fin == NULL) 871 + goto err; 872 + 873 + d40c->completed = d40d_fin->txd.cookie; 874 + 875 + /* 876 + * If terminating a channel pending_tx is set to zero. 877 + * This prevents any finished active jobs to return to the client. 878 + */ 879 + if (d40c->pending_tx == 0) { 880 + spin_unlock_irqrestore(&d40c->lock, flags); 881 + return; 882 + } 883 + 884 + /* Callback to client */ 885 + callback = d40d_fin->txd.callback; 886 + callback_param = d40d_fin->txd.callback_param; 887 + 888 + if (async_tx_test_ack(&d40d_fin->txd)) { 889 + d40_pool_lli_free(d40d_fin); 890 + d40_desc_remove(d40d_fin); 891 + /* Return desc to free-list */ 892 + d40_desc_free(d40c, d40d_fin); 893 + } else { 894 + d40_desc_reset(d40d_fin); 895 + if (!d40d_fin->is_in_client_list) { 896 + d40_desc_remove(d40d_fin); 897 + list_add_tail(&d40d_fin->node, &d40c->client); 898 + d40d_fin->is_in_client_list = true; 899 + } 900 + } 901 + 902 + d40c->pending_tx--; 903 + 904 + if (d40c->pending_tx) 905 + tasklet_schedule(&d40c->tasklet); 906 + 907 + spin_unlock_irqrestore(&d40c->lock, flags); 908 + 909 + if (callback) 910 + callback(callback_param); 911 + 912 + return; 913 + 914 + err: 915 + /* Rescue manouver if receiving double interrupts */ 916 + if (d40c->pending_tx > 0) 917 + d40c->pending_tx--; 918 + spin_unlock_irqrestore(&d40c->lock, flags); 919 + } 920 + 921 + static irqreturn_t d40_handle_interrupt(int irq, void *data) 922 + { 923 + static const struct d40_interrupt_lookup il[] = { 924 + {D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0}, 925 + {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32}, 926 + {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64}, 927 + {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96}, 928 + {D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0}, 929 + {D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32}, 930 + {D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64}, 931 + {D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96}, 932 + {D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN}, 933 + {D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN}, 934 + }; 935 + 936 + int i; 937 + u32 regs[ARRAY_SIZE(il)]; 938 + u32 tmp; 939 + u32 idx; 940 + u32 row; 941 + long chan = -1; 942 + struct d40_chan *d40c; 943 + unsigned long flags; 944 + struct d40_base *base = data; 945 + 946 + spin_lock_irqsave(&base->interrupt_lock, flags); 947 + 948 + /* Read interrupt status of both logical and physical channels */ 949 + for (i = 0; i < ARRAY_SIZE(il); i++) 950 + regs[i] = readl(base->virtbase + il[i].src); 951 + 952 + for (;;) { 953 + 954 + chan = find_next_bit((unsigned long *)regs, 955 + BITS_PER_LONG * ARRAY_SIZE(il), chan + 1); 956 + 957 + /* No more set bits found? */ 958 + if (chan == BITS_PER_LONG * ARRAY_SIZE(il)) 959 + break; 960 + 961 + row = chan / BITS_PER_LONG; 962 + idx = chan & (BITS_PER_LONG - 1); 963 + 964 + /* ACK interrupt */ 965 + tmp = readl(base->virtbase + il[row].clr); 966 + tmp |= 1 << idx; 967 + writel(tmp, base->virtbase + il[row].clr); 968 + 969 + if (il[row].offset == D40_PHY_CHAN) 970 + d40c = base->lookup_phy_chans[idx]; 971 + else 972 + d40c = base->lookup_log_chans[il[row].offset + idx]; 973 + spin_lock(&d40c->lock); 974 + 975 + if (!il[row].is_error) 976 + dma_tc_handle(d40c); 977 + else 978 + dev_err(base->dev, "[%s] IRQ chan: %ld offset %d idx %d\n", 979 + __func__, chan, il[row].offset, idx); 980 + 981 + spin_unlock(&d40c->lock); 982 + } 983 + 984 + spin_unlock_irqrestore(&base->interrupt_lock, flags); 985 + 986 + return IRQ_HANDLED; 987 + } 988 + 989 + 990 + static int d40_validate_conf(struct d40_chan *d40c, 991 + struct stedma40_chan_cfg *conf) 992 + { 993 + int res = 0; 994 + u32 dst_event_group = D40_TYPE_TO_GROUP(conf->dst_dev_type); 995 + u32 src_event_group = D40_TYPE_TO_GROUP(conf->src_dev_type); 996 + bool is_log = (conf->channel_type & STEDMA40_CHANNEL_IN_OPER_MODE) 997 + == STEDMA40_CHANNEL_IN_LOG_MODE; 998 + 999 + if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH && 1000 + dst_event_group == STEDMA40_DEV_DST_MEMORY) { 1001 + dev_err(&d40c->chan.dev->device, "[%s] Invalid dst\n", 1002 + __func__); 1003 + res = -EINVAL; 1004 + } 1005 + 1006 + if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM && 1007 + src_event_group == STEDMA40_DEV_SRC_MEMORY) { 1008 + dev_err(&d40c->chan.dev->device, "[%s] Invalid src\n", 1009 + __func__); 1010 + res = -EINVAL; 1011 + } 1012 + 1013 + if (src_event_group == STEDMA40_DEV_SRC_MEMORY && 1014 + dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) { 1015 + dev_err(&d40c->chan.dev->device, 1016 + "[%s] No event line\n", __func__); 1017 + res = -EINVAL; 1018 + } 1019 + 1020 + if (conf->dir == STEDMA40_PERIPH_TO_PERIPH && 1021 + (src_event_group != dst_event_group)) { 1022 + dev_err(&d40c->chan.dev->device, 1023 + "[%s] Invalid event group\n", __func__); 1024 + res = -EINVAL; 1025 + } 1026 + 1027 + if (conf->dir == STEDMA40_PERIPH_TO_PERIPH) { 1028 + /* 1029 + * DMAC HW supports it. Will be added to this driver, 1030 + * in case any dma client requires it. 1031 + */ 1032 + dev_err(&d40c->chan.dev->device, 1033 + "[%s] periph to periph not supported\n", 1034 + __func__); 1035 + res = -EINVAL; 1036 + } 1037 + 1038 + return res; 1039 + } 1040 + 1041 + static bool d40_alloc_mask_set(struct d40_phy_res *phy, bool is_src, 1042 + int log_event_line, bool is_log) 1043 + { 1044 + unsigned long flags; 1045 + spin_lock_irqsave(&phy->lock, flags); 1046 + if (!is_log) { 1047 + /* Physical interrupts are masked per physical full channel */ 1048 + if (phy->allocated_src == D40_ALLOC_FREE && 1049 + phy->allocated_dst == D40_ALLOC_FREE) { 1050 + phy->allocated_dst = D40_ALLOC_PHY; 1051 + phy->allocated_src = D40_ALLOC_PHY; 1052 + goto found; 1053 + } else 1054 + goto not_found; 1055 + } 1056 + 1057 + /* Logical channel */ 1058 + if (is_src) { 1059 + if (phy->allocated_src == D40_ALLOC_PHY) 1060 + goto not_found; 1061 + 1062 + if (phy->allocated_src == D40_ALLOC_FREE) 1063 + phy->allocated_src = D40_ALLOC_LOG_FREE; 1064 + 1065 + if (!(phy->allocated_src & (1 << log_event_line))) { 1066 + phy->allocated_src |= 1 << log_event_line; 1067 + goto found; 1068 + } else 1069 + goto not_found; 1070 + } else { 1071 + if (phy->allocated_dst == D40_ALLOC_PHY) 1072 + goto not_found; 1073 + 1074 + if (phy->allocated_dst == D40_ALLOC_FREE) 1075 + phy->allocated_dst = D40_ALLOC_LOG_FREE; 1076 + 1077 + if (!(phy->allocated_dst & (1 << log_event_line))) { 1078 + phy->allocated_dst |= 1 << log_event_line; 1079 + goto found; 1080 + } else 1081 + goto not_found; 1082 + } 1083 + 1084 + not_found: 1085 + spin_unlock_irqrestore(&phy->lock, flags); 1086 + return false; 1087 + found: 1088 + spin_unlock_irqrestore(&phy->lock, flags); 1089 + return true; 1090 + } 1091 + 1092 + static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src, 1093 + int log_event_line) 1094 + { 1095 + unsigned long flags; 1096 + bool is_free = false; 1097 + 1098 + spin_lock_irqsave(&phy->lock, flags); 1099 + if (!log_event_line) { 1100 + /* Physical interrupts are masked per physical full channel */ 1101 + phy->allocated_dst = D40_ALLOC_FREE; 1102 + phy->allocated_src = D40_ALLOC_FREE; 1103 + is_free = true; 1104 + goto out; 1105 + } 1106 + 1107 + /* Logical channel */ 1108 + if (is_src) { 1109 + phy->allocated_src &= ~(1 << log_event_line); 1110 + if (phy->allocated_src == D40_ALLOC_LOG_FREE) 1111 + phy->allocated_src = D40_ALLOC_FREE; 1112 + } else { 1113 + phy->allocated_dst &= ~(1 << log_event_line); 1114 + if (phy->allocated_dst == D40_ALLOC_LOG_FREE) 1115 + phy->allocated_dst = D40_ALLOC_FREE; 1116 + } 1117 + 1118 + is_free = ((phy->allocated_src | phy->allocated_dst) == 1119 + D40_ALLOC_FREE); 1120 + 1121 + out: 1122 + spin_unlock_irqrestore(&phy->lock, flags); 1123 + 1124 + return is_free; 1125 + } 1126 + 1127 + static int d40_allocate_channel(struct d40_chan *d40c) 1128 + { 1129 + int dev_type; 1130 + int event_group; 1131 + int event_line; 1132 + struct d40_phy_res *phys; 1133 + int i; 1134 + int j; 1135 + int log_num; 1136 + bool is_src; 1137 + bool is_log = (d40c->dma_cfg.channel_type & STEDMA40_CHANNEL_IN_OPER_MODE) 1138 + == STEDMA40_CHANNEL_IN_LOG_MODE; 1139 + 1140 + 1141 + phys = d40c->base->phy_res; 1142 + 1143 + if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) { 1144 + dev_type = d40c->dma_cfg.src_dev_type; 1145 + log_num = 2 * dev_type; 1146 + is_src = true; 1147 + } else if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || 1148 + d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { 1149 + /* dst event lines are used for logical memcpy */ 1150 + dev_type = d40c->dma_cfg.dst_dev_type; 1151 + log_num = 2 * dev_type + 1; 1152 + is_src = false; 1153 + } else 1154 + return -EINVAL; 1155 + 1156 + event_group = D40_TYPE_TO_GROUP(dev_type); 1157 + event_line = D40_TYPE_TO_EVENT(dev_type); 1158 + 1159 + if (!is_log) { 1160 + if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { 1161 + /* Find physical half channel */ 1162 + for (i = 0; i < d40c->base->num_phy_chans; i++) { 1163 + 1164 + if (d40_alloc_mask_set(&phys[i], is_src, 1165 + 0, is_log)) 1166 + goto found_phy; 1167 + } 1168 + } else 1169 + for (j = 0; j < d40c->base->num_phy_chans; j += 8) { 1170 + int phy_num = j + event_group * 2; 1171 + for (i = phy_num; i < phy_num + 2; i++) { 1172 + if (d40_alloc_mask_set(&phys[i], is_src, 1173 + 0, is_log)) 1174 + goto found_phy; 1175 + } 1176 + } 1177 + return -EINVAL; 1178 + found_phy: 1179 + d40c->phy_chan = &phys[i]; 1180 + d40c->log_num = D40_PHY_CHAN; 1181 + goto out; 1182 + } 1183 + if (dev_type == -1) 1184 + return -EINVAL; 1185 + 1186 + /* Find logical channel */ 1187 + for (j = 0; j < d40c->base->num_phy_chans; j += 8) { 1188 + int phy_num = j + event_group * 2; 1189 + /* 1190 + * Spread logical channels across all available physical rather 1191 + * than pack every logical channel at the first available phy 1192 + * channels. 1193 + */ 1194 + if (is_src) { 1195 + for (i = phy_num; i < phy_num + 2; i++) { 1196 + if (d40_alloc_mask_set(&phys[i], is_src, 1197 + event_line, is_log)) 1198 + goto found_log; 1199 + } 1200 + } else { 1201 + for (i = phy_num + 1; i >= phy_num; i--) { 1202 + if (d40_alloc_mask_set(&phys[i], is_src, 1203 + event_line, is_log)) 1204 + goto found_log; 1205 + } 1206 + } 1207 + } 1208 + return -EINVAL; 1209 + 1210 + found_log: 1211 + d40c->phy_chan = &phys[i]; 1212 + d40c->log_num = log_num; 1213 + out: 1214 + 1215 + if (is_log) 1216 + d40c->base->lookup_log_chans[d40c->log_num] = d40c; 1217 + else 1218 + d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c; 1219 + 1220 + return 0; 1221 + 1222 + } 1223 + 1224 + static int d40_config_chan(struct d40_chan *d40c, 1225 + struct stedma40_chan_cfg *info) 1226 + { 1227 + 1228 + /* Fill in basic CFG register values */ 1229 + d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg, 1230 + &d40c->dst_def_cfg, d40c->log_num != D40_PHY_CHAN); 1231 + 1232 + if (d40c->log_num != D40_PHY_CHAN) { 1233 + d40_log_cfg(&d40c->dma_cfg, 1234 + &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); 1235 + 1236 + if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) 1237 + d40c->lcpa = d40c->base->lcpa_base + 1238 + d40c->dma_cfg.src_dev_type * 32; 1239 + else 1240 + d40c->lcpa = d40c->base->lcpa_base + 1241 + d40c->dma_cfg.dst_dev_type * 32 + 16; 1242 + } 1243 + 1244 + /* Write channel configuration to the DMA */ 1245 + return d40_config_write(d40c); 1246 + } 1247 + 1248 + static int d40_config_memcpy(struct d40_chan *d40c) 1249 + { 1250 + dma_cap_mask_t cap = d40c->chan.device->cap_mask; 1251 + 1252 + if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) { 1253 + d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_log; 1254 + d40c->dma_cfg.src_dev_type = STEDMA40_DEV_SRC_MEMORY; 1255 + d40c->dma_cfg.dst_dev_type = d40c->base->plat_data-> 1256 + memcpy[d40c->chan.chan_id]; 1257 + 1258 + } else if (dma_has_cap(DMA_MEMCPY, cap) && 1259 + dma_has_cap(DMA_SLAVE, cap)) { 1260 + d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy; 1261 + } else { 1262 + dev_err(&d40c->chan.dev->device, "[%s] No memcpy\n", 1263 + __func__); 1264 + return -EINVAL; 1265 + } 1266 + 1267 + return 0; 1268 + } 1269 + 1270 + 1271 + static int d40_free_dma(struct d40_chan *d40c) 1272 + { 1273 + 1274 + int res = 0; 1275 + u32 event, dir; 1276 + struct d40_phy_res *phy = d40c->phy_chan; 1277 + bool is_src; 1278 + 1279 + /* Terminate all queued and active transfers */ 1280 + d40_term_all(d40c); 1281 + 1282 + if (phy == NULL) { 1283 + dev_err(&d40c->chan.dev->device, "[%s] phy == null\n", 1284 + __func__); 1285 + return -EINVAL; 1286 + } 1287 + 1288 + if (phy->allocated_src == D40_ALLOC_FREE && 1289 + phy->allocated_dst == D40_ALLOC_FREE) { 1290 + dev_err(&d40c->chan.dev->device, "[%s] channel already free\n", 1291 + __func__); 1292 + return -EINVAL; 1293 + } 1294 + 1295 + 1296 + res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); 1297 + if (res) { 1298 + dev_err(&d40c->chan.dev->device, "[%s] suspend\n", 1299 + __func__); 1300 + return res; 1301 + } 1302 + 1303 + if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || 1304 + d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { 1305 + event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); 1306 + dir = D40_CHAN_REG_SDLNK; 1307 + is_src = false; 1308 + } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) { 1309 + event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); 1310 + dir = D40_CHAN_REG_SSLNK; 1311 + is_src = true; 1312 + } else { 1313 + dev_err(&d40c->chan.dev->device, 1314 + "[%s] Unknown direction\n", __func__); 1315 + return -EINVAL; 1316 + } 1317 + 1318 + if (d40c->log_num != D40_PHY_CHAN) { 1319 + /* 1320 + * Release logical channel, deactivate the event line during 1321 + * the time physical res is suspended. 1322 + */ 1323 + writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event)) & 1324 + D40_EVENTLINE_MASK(event), 1325 + d40c->base->virtbase + D40_DREG_PCBASE + 1326 + phy->num * D40_DREG_PCDELTA + dir); 1327 + 1328 + d40c->base->lookup_log_chans[d40c->log_num] = NULL; 1329 + 1330 + /* 1331 + * Check if there are more logical allocation 1332 + * on this phy channel. 1333 + */ 1334 + if (!d40_alloc_mask_free(phy, is_src, event)) { 1335 + /* Resume the other logical channels if any */ 1336 + if (d40_chan_has_events(d40c)) { 1337 + res = d40_channel_execute_command(d40c, 1338 + D40_DMA_RUN); 1339 + if (res) { 1340 + dev_err(&d40c->chan.dev->device, 1341 + "[%s] Executing RUN command\n", 1342 + __func__); 1343 + return res; 1344 + } 1345 + } 1346 + return 0; 1347 + } 1348 + } else 1349 + d40_alloc_mask_free(phy, is_src, 0); 1350 + 1351 + /* Release physical channel */ 1352 + res = d40_channel_execute_command(d40c, D40_DMA_STOP); 1353 + if (res) { 1354 + dev_err(&d40c->chan.dev->device, 1355 + "[%s] Failed to stop channel\n", __func__); 1356 + return res; 1357 + } 1358 + d40c->phy_chan = NULL; 1359 + /* Invalidate channel type */ 1360 + d40c->dma_cfg.channel_type = 0; 1361 + d40c->base->lookup_phy_chans[phy->num] = NULL; 1362 + 1363 + return 0; 1364 + 1365 + 1366 + } 1367 + 1368 + static int d40_pause(struct dma_chan *chan) 1369 + { 1370 + struct d40_chan *d40c = 1371 + container_of(chan, struct d40_chan, chan); 1372 + int res; 1373 + 1374 + unsigned long flags; 1375 + 1376 + spin_lock_irqsave(&d40c->lock, flags); 1377 + 1378 + res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); 1379 + if (res == 0) { 1380 + if (d40c->log_num != D40_PHY_CHAN) { 1381 + d40_config_set_event(d40c, false); 1382 + /* Resume the other logical channels if any */ 1383 + if (d40_chan_has_events(d40c)) 1384 + res = d40_channel_execute_command(d40c, 1385 + D40_DMA_RUN); 1386 + } 1387 + } 1388 + 1389 + spin_unlock_irqrestore(&d40c->lock, flags); 1390 + return res; 1391 + } 1392 + 1393 + static bool d40_is_paused(struct d40_chan *d40c) 1394 + { 1395 + bool is_paused = false; 1396 + unsigned long flags; 1397 + void __iomem *active_reg; 1398 + u32 status; 1399 + u32 event; 1400 + int res; 1401 + 1402 + spin_lock_irqsave(&d40c->lock, flags); 1403 + 1404 + if (d40c->log_num == D40_PHY_CHAN) { 1405 + if (d40c->phy_chan->num % 2 == 0) 1406 + active_reg = d40c->base->virtbase + D40_DREG_ACTIVE; 1407 + else 1408 + active_reg = d40c->base->virtbase + D40_DREG_ACTIVO; 1409 + 1410 + status = (readl(active_reg) & 1411 + D40_CHAN_POS_MASK(d40c->phy_chan->num)) >> 1412 + D40_CHAN_POS(d40c->phy_chan->num); 1413 + if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP) 1414 + is_paused = true; 1415 + 1416 + goto _exit; 1417 + } 1418 + 1419 + res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); 1420 + if (res != 0) 1421 + goto _exit; 1422 + 1423 + if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || 1424 + d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) 1425 + event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); 1426 + else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) 1427 + event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); 1428 + else { 1429 + dev_err(&d40c->chan.dev->device, 1430 + "[%s] Unknown direction\n", __func__); 1431 + goto _exit; 1432 + } 1433 + status = d40_chan_has_events(d40c); 1434 + status = (status & D40_EVENTLINE_MASK(event)) >> 1435 + D40_EVENTLINE_POS(event); 1436 + 1437 + if (status != D40_DMA_RUN) 1438 + is_paused = true; 1439 + 1440 + /* Resume the other logical channels if any */ 1441 + if (d40_chan_has_events(d40c)) 1442 + res = d40_channel_execute_command(d40c, 1443 + D40_DMA_RUN); 1444 + 1445 + _exit: 1446 + spin_unlock_irqrestore(&d40c->lock, flags); 1447 + return is_paused; 1448 + 1449 + } 1450 + 1451 + 1452 + static bool d40_tx_is_linked(struct d40_chan *d40c) 1453 + { 1454 + bool is_link; 1455 + 1456 + if (d40c->log_num != D40_PHY_CHAN) 1457 + is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK; 1458 + else 1459 + is_link = readl(d40c->base->virtbase + D40_DREG_PCBASE + 1460 + d40c->phy_chan->num * D40_DREG_PCDELTA + 1461 + D40_CHAN_REG_SDLNK) & 1462 + D40_SREG_LNK_PHYS_LNK_MASK; 1463 + return is_link; 1464 + } 1465 + 1466 + static u32 d40_residue(struct d40_chan *d40c) 1467 + { 1468 + u32 num_elt; 1469 + 1470 + if (d40c->log_num != D40_PHY_CHAN) 1471 + num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK) 1472 + >> D40_MEM_LCSP2_ECNT_POS; 1473 + else 1474 + num_elt = (readl(d40c->base->virtbase + D40_DREG_PCBASE + 1475 + d40c->phy_chan->num * D40_DREG_PCDELTA + 1476 + D40_CHAN_REG_SDELT) & 1477 + D40_SREG_ELEM_PHY_ECNT_MASK) >> D40_SREG_ELEM_PHY_ECNT_POS; 1478 + return num_elt * (1 << d40c->dma_cfg.dst_info.data_width); 1479 + } 1480 + 1481 + static int d40_resume(struct dma_chan *chan) 1482 + { 1483 + struct d40_chan *d40c = 1484 + container_of(chan, struct d40_chan, chan); 1485 + int res = 0; 1486 + unsigned long flags; 1487 + 1488 + spin_lock_irqsave(&d40c->lock, flags); 1489 + 1490 + if (d40c->log_num != D40_PHY_CHAN) { 1491 + res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); 1492 + if (res) 1493 + goto out; 1494 + 1495 + /* If bytes left to transfer or linked tx resume job */ 1496 + if (d40_residue(d40c) || d40_tx_is_linked(d40c)) { 1497 + d40_config_set_event(d40c, true); 1498 + res = d40_channel_execute_command(d40c, D40_DMA_RUN); 1499 + } 1500 + } else if (d40_residue(d40c) || d40_tx_is_linked(d40c)) 1501 + res = d40_channel_execute_command(d40c, D40_DMA_RUN); 1502 + 1503 + out: 1504 + spin_unlock_irqrestore(&d40c->lock, flags); 1505 + return res; 1506 + } 1507 + 1508 + static u32 stedma40_residue(struct dma_chan *chan) 1509 + { 1510 + struct d40_chan *d40c = 1511 + container_of(chan, struct d40_chan, chan); 1512 + u32 bytes_left; 1513 + unsigned long flags; 1514 + 1515 + spin_lock_irqsave(&d40c->lock, flags); 1516 + bytes_left = d40_residue(d40c); 1517 + spin_unlock_irqrestore(&d40c->lock, flags); 1518 + 1519 + return bytes_left; 1520 + } 1521 + 1522 + /* Public DMA functions in addition to the DMA engine framework */ 1523 + 1524 + int stedma40_set_psize(struct dma_chan *chan, 1525 + int src_psize, 1526 + int dst_psize) 1527 + { 1528 + struct d40_chan *d40c = 1529 + container_of(chan, struct d40_chan, chan); 1530 + unsigned long flags; 1531 + 1532 + spin_lock_irqsave(&d40c->lock, flags); 1533 + 1534 + if (d40c->log_num != D40_PHY_CHAN) { 1535 + d40c->log_def.lcsp1 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK; 1536 + d40c->log_def.lcsp3 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK; 1537 + d40c->log_def.lcsp1 |= src_psize << D40_MEM_LCSP1_SCFG_PSIZE_POS; 1538 + d40c->log_def.lcsp3 |= dst_psize << D40_MEM_LCSP1_SCFG_PSIZE_POS; 1539 + goto out; 1540 + } 1541 + 1542 + if (src_psize == STEDMA40_PSIZE_PHY_1) 1543 + d40c->src_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS); 1544 + else { 1545 + d40c->src_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS; 1546 + d40c->src_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 << 1547 + D40_SREG_CFG_PSIZE_POS); 1548 + d40c->src_def_cfg |= src_psize << D40_SREG_CFG_PSIZE_POS; 1549 + } 1550 + 1551 + if (dst_psize == STEDMA40_PSIZE_PHY_1) 1552 + d40c->dst_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS); 1553 + else { 1554 + d40c->dst_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS; 1555 + d40c->dst_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 << 1556 + D40_SREG_CFG_PSIZE_POS); 1557 + d40c->dst_def_cfg |= dst_psize << D40_SREG_CFG_PSIZE_POS; 1558 + } 1559 + out: 1560 + spin_unlock_irqrestore(&d40c->lock, flags); 1561 + return 0; 1562 + } 1563 + EXPORT_SYMBOL(stedma40_set_psize); 1564 + 1565 + struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, 1566 + struct scatterlist *sgl_dst, 1567 + struct scatterlist *sgl_src, 1568 + unsigned int sgl_len, 1569 + unsigned long flags) 1570 + { 1571 + int res; 1572 + struct d40_desc *d40d; 1573 + struct d40_chan *d40c = container_of(chan, struct d40_chan, 1574 + chan); 1575 + unsigned long flg; 1576 + int lli_max = d40c->base->plat_data->llis_per_log; 1577 + 1578 + 1579 + spin_lock_irqsave(&d40c->lock, flg); 1580 + d40d = d40_desc_get(d40c); 1581 + 1582 + if (d40d == NULL) 1583 + goto err; 1584 + 1585 + memset(d40d, 0, sizeof(struct d40_desc)); 1586 + d40d->lli_len = sgl_len; 1587 + 1588 + d40d->txd.flags = flags; 1589 + 1590 + if (d40c->log_num != D40_PHY_CHAN) { 1591 + if (sgl_len > 1) 1592 + /* 1593 + * Check if there is space available in lcla. If not, 1594 + * split list into 1-length and run only in lcpa 1595 + * space. 1596 + */ 1597 + if (d40_lcla_id_get(d40c, 1598 + &d40c->base->lcla_pool) != 0) 1599 + lli_max = 1; 1600 + 1601 + if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) { 1602 + dev_err(&d40c->chan.dev->device, 1603 + "[%s] Out of memory\n", __func__); 1604 + goto err; 1605 + } 1606 + 1607 + (void) d40_log_sg_to_lli(d40c->lcla.src_id, 1608 + sgl_src, 1609 + sgl_len, 1610 + d40d->lli_log.src, 1611 + d40c->log_def.lcsp1, 1612 + d40c->dma_cfg.src_info.data_width, 1613 + flags & DMA_PREP_INTERRUPT, lli_max, 1614 + d40c->base->plat_data->llis_per_log); 1615 + 1616 + (void) d40_log_sg_to_lli(d40c->lcla.dst_id, 1617 + sgl_dst, 1618 + sgl_len, 1619 + d40d->lli_log.dst, 1620 + d40c->log_def.lcsp3, 1621 + d40c->dma_cfg.dst_info.data_width, 1622 + flags & DMA_PREP_INTERRUPT, lli_max, 1623 + d40c->base->plat_data->llis_per_log); 1624 + 1625 + 1626 + } else { 1627 + if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) { 1628 + dev_err(&d40c->chan.dev->device, 1629 + "[%s] Out of memory\n", __func__); 1630 + goto err; 1631 + } 1632 + 1633 + res = d40_phy_sg_to_lli(sgl_src, 1634 + sgl_len, 1635 + 0, 1636 + d40d->lli_phy.src, 1637 + d40d->lli_phy.src_addr, 1638 + d40c->src_def_cfg, 1639 + d40c->dma_cfg.src_info.data_width, 1640 + d40c->dma_cfg.src_info.psize, 1641 + true); 1642 + 1643 + if (res < 0) 1644 + goto err; 1645 + 1646 + res = d40_phy_sg_to_lli(sgl_dst, 1647 + sgl_len, 1648 + 0, 1649 + d40d->lli_phy.dst, 1650 + d40d->lli_phy.dst_addr, 1651 + d40c->dst_def_cfg, 1652 + d40c->dma_cfg.dst_info.data_width, 1653 + d40c->dma_cfg.dst_info.psize, 1654 + true); 1655 + 1656 + if (res < 0) 1657 + goto err; 1658 + 1659 + (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src, 1660 + d40d->lli_pool.size, DMA_TO_DEVICE); 1661 + } 1662 + 1663 + dma_async_tx_descriptor_init(&d40d->txd, chan); 1664 + 1665 + d40d->txd.tx_submit = d40_tx_submit; 1666 + 1667 + spin_unlock_irqrestore(&d40c->lock, flg); 1668 + 1669 + return &d40d->txd; 1670 + err: 1671 + spin_unlock_irqrestore(&d40c->lock, flg); 1672 + return NULL; 1673 + } 1674 + EXPORT_SYMBOL(stedma40_memcpy_sg); 1675 + 1676 + bool stedma40_filter(struct dma_chan *chan, void *data) 1677 + { 1678 + struct stedma40_chan_cfg *info = data; 1679 + struct d40_chan *d40c = 1680 + container_of(chan, struct d40_chan, chan); 1681 + int err; 1682 + 1683 + if (data) { 1684 + err = d40_validate_conf(d40c, info); 1685 + if (!err) 1686 + d40c->dma_cfg = *info; 1687 + } else 1688 + err = d40_config_memcpy(d40c); 1689 + 1690 + return err == 0; 1691 + } 1692 + EXPORT_SYMBOL(stedma40_filter); 1693 + 1694 + /* DMA ENGINE functions */ 1695 + static int d40_alloc_chan_resources(struct dma_chan *chan) 1696 + { 1697 + int err; 1698 + unsigned long flags; 1699 + struct d40_chan *d40c = 1700 + container_of(chan, struct d40_chan, chan); 1701 + 1702 + spin_lock_irqsave(&d40c->lock, flags); 1703 + 1704 + d40c->completed = chan->cookie = 1; 1705 + 1706 + /* 1707 + * If no dma configuration is set (channel_type == 0) 1708 + * use default configuration 1709 + */ 1710 + if (d40c->dma_cfg.channel_type == 0) { 1711 + err = d40_config_memcpy(d40c); 1712 + if (err) 1713 + goto err_alloc; 1714 + } 1715 + 1716 + err = d40_allocate_channel(d40c); 1717 + if (err) { 1718 + dev_err(&d40c->chan.dev->device, 1719 + "[%s] Failed to allocate channel\n", __func__); 1720 + goto err_alloc; 1721 + } 1722 + 1723 + err = d40_config_chan(d40c, &d40c->dma_cfg); 1724 + if (err) { 1725 + dev_err(&d40c->chan.dev->device, 1726 + "[%s] Failed to configure channel\n", 1727 + __func__); 1728 + goto err_config; 1729 + } 1730 + 1731 + spin_unlock_irqrestore(&d40c->lock, flags); 1732 + return 0; 1733 + 1734 + err_config: 1735 + (void) d40_free_dma(d40c); 1736 + err_alloc: 1737 + spin_unlock_irqrestore(&d40c->lock, flags); 1738 + dev_err(&d40c->chan.dev->device, 1739 + "[%s] Channel allocation failed\n", __func__); 1740 + return -EINVAL; 1741 + } 1742 + 1743 + static void d40_free_chan_resources(struct dma_chan *chan) 1744 + { 1745 + struct d40_chan *d40c = 1746 + container_of(chan, struct d40_chan, chan); 1747 + int err; 1748 + unsigned long flags; 1749 + 1750 + spin_lock_irqsave(&d40c->lock, flags); 1751 + 1752 + err = d40_free_dma(d40c); 1753 + 1754 + if (err) 1755 + dev_err(&d40c->chan.dev->device, 1756 + "[%s] Failed to free channel\n", __func__); 1757 + spin_unlock_irqrestore(&d40c->lock, flags); 1758 + } 1759 + 1760 + static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, 1761 + dma_addr_t dst, 1762 + dma_addr_t src, 1763 + size_t size, 1764 + unsigned long flags) 1765 + { 1766 + struct d40_desc *d40d; 1767 + struct d40_chan *d40c = container_of(chan, struct d40_chan, 1768 + chan); 1769 + unsigned long flg; 1770 + int err = 0; 1771 + 1772 + spin_lock_irqsave(&d40c->lock, flg); 1773 + d40d = d40_desc_get(d40c); 1774 + 1775 + if (d40d == NULL) { 1776 + dev_err(&d40c->chan.dev->device, 1777 + "[%s] Descriptor is NULL\n", __func__); 1778 + goto err; 1779 + } 1780 + 1781 + memset(d40d, 0, sizeof(struct d40_desc)); 1782 + 1783 + d40d->txd.flags = flags; 1784 + 1785 + dma_async_tx_descriptor_init(&d40d->txd, chan); 1786 + 1787 + d40d->txd.tx_submit = d40_tx_submit; 1788 + 1789 + if (d40c->log_num != D40_PHY_CHAN) { 1790 + 1791 + if (d40_pool_lli_alloc(d40d, 1, true) < 0) { 1792 + dev_err(&d40c->chan.dev->device, 1793 + "[%s] Out of memory\n", __func__); 1794 + goto err; 1795 + } 1796 + d40d->lli_len = 1; 1797 + 1798 + d40_log_fill_lli(d40d->lli_log.src, 1799 + src, 1800 + size, 1801 + 0, 1802 + d40c->log_def.lcsp1, 1803 + d40c->dma_cfg.src_info.data_width, 1804 + true, true); 1805 + 1806 + d40_log_fill_lli(d40d->lli_log.dst, 1807 + dst, 1808 + size, 1809 + 0, 1810 + d40c->log_def.lcsp3, 1811 + d40c->dma_cfg.dst_info.data_width, 1812 + true, true); 1813 + 1814 + } else { 1815 + 1816 + if (d40_pool_lli_alloc(d40d, 1, false) < 0) { 1817 + dev_err(&d40c->chan.dev->device, 1818 + "[%s] Out of memory\n", __func__); 1819 + goto err; 1820 + } 1821 + 1822 + err = d40_phy_fill_lli(d40d->lli_phy.src, 1823 + src, 1824 + size, 1825 + d40c->dma_cfg.src_info.psize, 1826 + 0, 1827 + d40c->src_def_cfg, 1828 + true, 1829 + d40c->dma_cfg.src_info.data_width, 1830 + false); 1831 + if (err) 1832 + goto err_fill_lli; 1833 + 1834 + err = d40_phy_fill_lli(d40d->lli_phy.dst, 1835 + dst, 1836 + size, 1837 + d40c->dma_cfg.dst_info.psize, 1838 + 0, 1839 + d40c->dst_def_cfg, 1840 + true, 1841 + d40c->dma_cfg.dst_info.data_width, 1842 + false); 1843 + 1844 + if (err) 1845 + goto err_fill_lli; 1846 + 1847 + (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src, 1848 + d40d->lli_pool.size, DMA_TO_DEVICE); 1849 + } 1850 + 1851 + spin_unlock_irqrestore(&d40c->lock, flg); 1852 + return &d40d->txd; 1853 + 1854 + err_fill_lli: 1855 + dev_err(&d40c->chan.dev->device, 1856 + "[%s] Failed filling in PHY LLI\n", __func__); 1857 + d40_pool_lli_free(d40d); 1858 + err: 1859 + spin_unlock_irqrestore(&d40c->lock, flg); 1860 + return NULL; 1861 + } 1862 + 1863 + static int d40_prep_slave_sg_log(struct d40_desc *d40d, 1864 + struct d40_chan *d40c, 1865 + struct scatterlist *sgl, 1866 + unsigned int sg_len, 1867 + enum dma_data_direction direction, 1868 + unsigned long flags) 1869 + { 1870 + dma_addr_t dev_addr = 0; 1871 + int total_size; 1872 + int lli_max = d40c->base->plat_data->llis_per_log; 1873 + 1874 + if (d40_pool_lli_alloc(d40d, sg_len, true) < 0) { 1875 + dev_err(&d40c->chan.dev->device, 1876 + "[%s] Out of memory\n", __func__); 1877 + return -ENOMEM; 1878 + } 1879 + 1880 + d40d->lli_len = sg_len; 1881 + d40d->lli_tcount = 0; 1882 + 1883 + if (sg_len > 1) 1884 + /* 1885 + * Check if there is space available in lcla. 1886 + * If not, split list into 1-length and run only 1887 + * in lcpa space. 1888 + */ 1889 + if (d40_lcla_id_get(d40c, &d40c->base->lcla_pool) != 0) 1890 + lli_max = 1; 1891 + 1892 + if (direction == DMA_FROM_DEVICE) { 1893 + dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type]; 1894 + total_size = d40_log_sg_to_dev(&d40c->lcla, 1895 + sgl, sg_len, 1896 + &d40d->lli_log, 1897 + &d40c->log_def, 1898 + d40c->dma_cfg.src_info.data_width, 1899 + d40c->dma_cfg.dst_info.data_width, 1900 + direction, 1901 + flags & DMA_PREP_INTERRUPT, 1902 + dev_addr, lli_max, 1903 + d40c->base->plat_data->llis_per_log); 1904 + } else if (direction == DMA_TO_DEVICE) { 1905 + dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type]; 1906 + total_size = d40_log_sg_to_dev(&d40c->lcla, 1907 + sgl, sg_len, 1908 + &d40d->lli_log, 1909 + &d40c->log_def, 1910 + d40c->dma_cfg.src_info.data_width, 1911 + d40c->dma_cfg.dst_info.data_width, 1912 + direction, 1913 + flags & DMA_PREP_INTERRUPT, 1914 + dev_addr, lli_max, 1915 + d40c->base->plat_data->llis_per_log); 1916 + } else 1917 + return -EINVAL; 1918 + if (total_size < 0) 1919 + return -EINVAL; 1920 + 1921 + return 0; 1922 + } 1923 + 1924 + static int d40_prep_slave_sg_phy(struct d40_desc *d40d, 1925 + struct d40_chan *d40c, 1926 + struct scatterlist *sgl, 1927 + unsigned int sgl_len, 1928 + enum dma_data_direction direction, 1929 + unsigned long flags) 1930 + { 1931 + dma_addr_t src_dev_addr; 1932 + dma_addr_t dst_dev_addr; 1933 + int res; 1934 + 1935 + if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) { 1936 + dev_err(&d40c->chan.dev->device, 1937 + "[%s] Out of memory\n", __func__); 1938 + return -ENOMEM; 1939 + } 1940 + 1941 + d40d->lli_len = sgl_len; 1942 + d40d->lli_tcount = 0; 1943 + 1944 + if (direction == DMA_FROM_DEVICE) { 1945 + dst_dev_addr = 0; 1946 + src_dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type]; 1947 + } else if (direction == DMA_TO_DEVICE) { 1948 + dst_dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type]; 1949 + src_dev_addr = 0; 1950 + } else 1951 + return -EINVAL; 1952 + 1953 + res = d40_phy_sg_to_lli(sgl, 1954 + sgl_len, 1955 + src_dev_addr, 1956 + d40d->lli_phy.src, 1957 + d40d->lli_phy.src_addr, 1958 + d40c->src_def_cfg, 1959 + d40c->dma_cfg.src_info.data_width, 1960 + d40c->dma_cfg.src_info.psize, 1961 + true); 1962 + if (res < 0) 1963 + return res; 1964 + 1965 + res = d40_phy_sg_to_lli(sgl, 1966 + sgl_len, 1967 + dst_dev_addr, 1968 + d40d->lli_phy.dst, 1969 + d40d->lli_phy.dst_addr, 1970 + d40c->dst_def_cfg, 1971 + d40c->dma_cfg.dst_info.data_width, 1972 + d40c->dma_cfg.dst_info.psize, 1973 + true); 1974 + if (res < 0) 1975 + return res; 1976 + 1977 + (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src, 1978 + d40d->lli_pool.size, DMA_TO_DEVICE); 1979 + return 0; 1980 + } 1981 + 1982 + static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, 1983 + struct scatterlist *sgl, 1984 + unsigned int sg_len, 1985 + enum dma_data_direction direction, 1986 + unsigned long flags) 1987 + { 1988 + struct d40_desc *d40d; 1989 + struct d40_chan *d40c = container_of(chan, struct d40_chan, 1990 + chan); 1991 + unsigned long flg; 1992 + int err; 1993 + 1994 + if (d40c->dma_cfg.pre_transfer) 1995 + d40c->dma_cfg.pre_transfer(chan, 1996 + d40c->dma_cfg.pre_transfer_data, 1997 + sg_dma_len(sgl)); 1998 + 1999 + spin_lock_irqsave(&d40c->lock, flg); 2000 + d40d = d40_desc_get(d40c); 2001 + spin_unlock_irqrestore(&d40c->lock, flg); 2002 + 2003 + if (d40d == NULL) 2004 + return NULL; 2005 + 2006 + memset(d40d, 0, sizeof(struct d40_desc)); 2007 + 2008 + if (d40c->log_num != D40_PHY_CHAN) 2009 + err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len, 2010 + direction, flags); 2011 + else 2012 + err = d40_prep_slave_sg_phy(d40d, d40c, sgl, sg_len, 2013 + direction, flags); 2014 + if (err) { 2015 + dev_err(&d40c->chan.dev->device, 2016 + "[%s] Failed to prepare %s slave sg job: %d\n", 2017 + __func__, 2018 + d40c->log_num != D40_PHY_CHAN ? "log" : "phy", err); 2019 + return NULL; 2020 + } 2021 + 2022 + d40d->txd.flags = flags; 2023 + 2024 + dma_async_tx_descriptor_init(&d40d->txd, chan); 2025 + 2026 + d40d->txd.tx_submit = d40_tx_submit; 2027 + 2028 + return &d40d->txd; 2029 + } 2030 + 2031 + static enum dma_status d40_tx_status(struct dma_chan *chan, 2032 + dma_cookie_t cookie, 2033 + struct dma_tx_state *txstate) 2034 + { 2035 + struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); 2036 + dma_cookie_t last_used; 2037 + dma_cookie_t last_complete; 2038 + int ret; 2039 + 2040 + last_complete = d40c->completed; 2041 + last_used = chan->cookie; 2042 + 2043 + if (d40_is_paused(d40c)) 2044 + ret = DMA_PAUSED; 2045 + else 2046 + ret = dma_async_is_complete(cookie, last_complete, last_used); 2047 + 2048 + dma_set_tx_state(txstate, last_complete, last_used, 2049 + stedma40_residue(chan)); 2050 + 2051 + return ret; 2052 + } 2053 + 2054 + static void d40_issue_pending(struct dma_chan *chan) 2055 + { 2056 + struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); 2057 + unsigned long flags; 2058 + 2059 + spin_lock_irqsave(&d40c->lock, flags); 2060 + 2061 + /* Busy means that pending jobs are already being processed */ 2062 + if (!d40c->busy) 2063 + (void) d40_queue_start(d40c); 2064 + 2065 + spin_unlock_irqrestore(&d40c->lock, flags); 2066 + } 2067 + 2068 + static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 2069 + unsigned long arg) 2070 + { 2071 + unsigned long flags; 2072 + struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); 2073 + 2074 + switch (cmd) { 2075 + case DMA_TERMINATE_ALL: 2076 + spin_lock_irqsave(&d40c->lock, flags); 2077 + d40_term_all(d40c); 2078 + spin_unlock_irqrestore(&d40c->lock, flags); 2079 + return 0; 2080 + case DMA_PAUSE: 2081 + return d40_pause(chan); 2082 + case DMA_RESUME: 2083 + return d40_resume(chan); 2084 + } 2085 + 2086 + /* Other commands are unimplemented */ 2087 + return -ENXIO; 2088 + } 2089 + 2090 + /* Initialization functions */ 2091 + 2092 + static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma, 2093 + struct d40_chan *chans, int offset, 2094 + int num_chans) 2095 + { 2096 + int i = 0; 2097 + struct d40_chan *d40c; 2098 + 2099 + INIT_LIST_HEAD(&dma->channels); 2100 + 2101 + for (i = offset; i < offset + num_chans; i++) { 2102 + d40c = &chans[i]; 2103 + d40c->base = base; 2104 + d40c->chan.device = dma; 2105 + 2106 + /* Invalidate lcla element */ 2107 + d40c->lcla.src_id = -1; 2108 + d40c->lcla.dst_id = -1; 2109 + 2110 + spin_lock_init(&d40c->lock); 2111 + 2112 + d40c->log_num = D40_PHY_CHAN; 2113 + 2114 + INIT_LIST_HEAD(&d40c->free); 2115 + INIT_LIST_HEAD(&d40c->active); 2116 + INIT_LIST_HEAD(&d40c->queue); 2117 + INIT_LIST_HEAD(&d40c->client); 2118 + 2119 + d40c->free_len = 0; 2120 + 2121 + tasklet_init(&d40c->tasklet, dma_tasklet, 2122 + (unsigned long) d40c); 2123 + 2124 + list_add_tail(&d40c->chan.device_node, 2125 + &dma->channels); 2126 + } 2127 + } 2128 + 2129 + static int __init d40_dmaengine_init(struct d40_base *base, 2130 + int num_reserved_chans) 2131 + { 2132 + int err ; 2133 + 2134 + d40_chan_init(base, &base->dma_slave, base->log_chans, 2135 + 0, base->num_log_chans); 2136 + 2137 + dma_cap_zero(base->dma_slave.cap_mask); 2138 + dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask); 2139 + 2140 + base->dma_slave.device_alloc_chan_resources = d40_alloc_chan_resources; 2141 + base->dma_slave.device_free_chan_resources = d40_free_chan_resources; 2142 + base->dma_slave.device_prep_dma_memcpy = d40_prep_memcpy; 2143 + base->dma_slave.device_prep_slave_sg = d40_prep_slave_sg; 2144 + base->dma_slave.device_tx_status = d40_tx_status; 2145 + base->dma_slave.device_issue_pending = d40_issue_pending; 2146 + base->dma_slave.device_control = d40_control; 2147 + base->dma_slave.dev = base->dev; 2148 + 2149 + err = dma_async_device_register(&base->dma_slave); 2150 + 2151 + if (err) { 2152 + dev_err(base->dev, 2153 + "[%s] Failed to register slave channels\n", 2154 + __func__); 2155 + goto failure1; 2156 + } 2157 + 2158 + d40_chan_init(base, &base->dma_memcpy, base->log_chans, 2159 + base->num_log_chans, base->plat_data->memcpy_len); 2160 + 2161 + dma_cap_zero(base->dma_memcpy.cap_mask); 2162 + dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask); 2163 + 2164 + base->dma_memcpy.device_alloc_chan_resources = d40_alloc_chan_resources; 2165 + base->dma_memcpy.device_free_chan_resources = d40_free_chan_resources; 2166 + base->dma_memcpy.device_prep_dma_memcpy = d40_prep_memcpy; 2167 + base->dma_memcpy.device_prep_slave_sg = d40_prep_slave_sg; 2168 + base->dma_memcpy.device_tx_status = d40_tx_status; 2169 + base->dma_memcpy.device_issue_pending = d40_issue_pending; 2170 + base->dma_memcpy.device_control = d40_control; 2171 + base->dma_memcpy.dev = base->dev; 2172 + /* 2173 + * This controller can only access address at even 2174 + * 32bit boundaries, i.e. 2^2 2175 + */ 2176 + base->dma_memcpy.copy_align = 2; 2177 + 2178 + err = dma_async_device_register(&base->dma_memcpy); 2179 + 2180 + if (err) { 2181 + dev_err(base->dev, 2182 + "[%s] Failed to regsiter memcpy only channels\n", 2183 + __func__); 2184 + goto failure2; 2185 + } 2186 + 2187 + d40_chan_init(base, &base->dma_both, base->phy_chans, 2188 + 0, num_reserved_chans); 2189 + 2190 + dma_cap_zero(base->dma_both.cap_mask); 2191 + dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask); 2192 + dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask); 2193 + 2194 + base->dma_both.device_alloc_chan_resources = d40_alloc_chan_resources; 2195 + base->dma_both.device_free_chan_resources = d40_free_chan_resources; 2196 + base->dma_both.device_prep_dma_memcpy = d40_prep_memcpy; 2197 + base->dma_both.device_prep_slave_sg = d40_prep_slave_sg; 2198 + base->dma_both.device_tx_status = d40_tx_status; 2199 + base->dma_both.device_issue_pending = d40_issue_pending; 2200 + base->dma_both.device_control = d40_control; 2201 + base->dma_both.dev = base->dev; 2202 + base->dma_both.copy_align = 2; 2203 + err = dma_async_device_register(&base->dma_both); 2204 + 2205 + if (err) { 2206 + dev_err(base->dev, 2207 + "[%s] Failed to register logical and physical capable channels\n", 2208 + __func__); 2209 + goto failure3; 2210 + } 2211 + return 0; 2212 + failure3: 2213 + dma_async_device_unregister(&base->dma_memcpy); 2214 + failure2: 2215 + dma_async_device_unregister(&base->dma_slave); 2216 + failure1: 2217 + return err; 2218 + } 2219 + 2220 + /* Initialization functions. */ 2221 + 2222 + static int __init d40_phy_res_init(struct d40_base *base) 2223 + { 2224 + int i; 2225 + int num_phy_chans_avail = 0; 2226 + u32 val[2]; 2227 + int odd_even_bit = -2; 2228 + 2229 + val[0] = readl(base->virtbase + D40_DREG_PRSME); 2230 + val[1] = readl(base->virtbase + D40_DREG_PRSMO); 2231 + 2232 + for (i = 0; i < base->num_phy_chans; i++) { 2233 + base->phy_res[i].num = i; 2234 + odd_even_bit += 2 * ((i % 2) == 0); 2235 + if (((val[i % 2] >> odd_even_bit) & 3) == 1) { 2236 + /* Mark security only channels as occupied */ 2237 + base->phy_res[i].allocated_src = D40_ALLOC_PHY; 2238 + base->phy_res[i].allocated_dst = D40_ALLOC_PHY; 2239 + } else { 2240 + base->phy_res[i].allocated_src = D40_ALLOC_FREE; 2241 + base->phy_res[i].allocated_dst = D40_ALLOC_FREE; 2242 + num_phy_chans_avail++; 2243 + } 2244 + spin_lock_init(&base->phy_res[i].lock); 2245 + } 2246 + dev_info(base->dev, "%d of %d physical DMA channels available\n", 2247 + num_phy_chans_avail, base->num_phy_chans); 2248 + 2249 + /* Verify settings extended vs standard */ 2250 + val[0] = readl(base->virtbase + D40_DREG_PRTYP); 2251 + 2252 + for (i = 0; i < base->num_phy_chans; i++) { 2253 + 2254 + if (base->phy_res[i].allocated_src == D40_ALLOC_FREE && 2255 + (val[0] & 0x3) != 1) 2256 + dev_info(base->dev, 2257 + "[%s] INFO: channel %d is misconfigured (%d)\n", 2258 + __func__, i, val[0] & 0x3); 2259 + 2260 + val[0] = val[0] >> 2; 2261 + } 2262 + 2263 + return num_phy_chans_avail; 2264 + } 2265 + 2266 + static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) 2267 + { 2268 + static const struct d40_reg_val dma_id_regs[] = { 2269 + /* Peripheral Id */ 2270 + { .reg = D40_DREG_PERIPHID0, .val = 0x0040}, 2271 + { .reg = D40_DREG_PERIPHID1, .val = 0x0000}, 2272 + /* 2273 + * D40_DREG_PERIPHID2 Depends on HW revision: 2274 + * MOP500/HREF ED has 0x0008, 2275 + * ? has 0x0018, 2276 + * HREF V1 has 0x0028 2277 + */ 2278 + { .reg = D40_DREG_PERIPHID3, .val = 0x0000}, 2279 + 2280 + /* PCell Id */ 2281 + { .reg = D40_DREG_CELLID0, .val = 0x000d}, 2282 + { .reg = D40_DREG_CELLID1, .val = 0x00f0}, 2283 + { .reg = D40_DREG_CELLID2, .val = 0x0005}, 2284 + { .reg = D40_DREG_CELLID3, .val = 0x00b1} 2285 + }; 2286 + struct stedma40_platform_data *plat_data; 2287 + struct clk *clk = NULL; 2288 + void __iomem *virtbase = NULL; 2289 + struct resource *res = NULL; 2290 + struct d40_base *base = NULL; 2291 + int num_log_chans = 0; 2292 + int num_phy_chans; 2293 + int i; 2294 + 2295 + clk = clk_get(&pdev->dev, NULL); 2296 + 2297 + if (IS_ERR(clk)) { 2298 + dev_err(&pdev->dev, "[%s] No matching clock found\n", 2299 + __func__); 2300 + goto failure; 2301 + } 2302 + 2303 + clk_enable(clk); 2304 + 2305 + /* Get IO for DMAC base address */ 2306 + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base"); 2307 + if (!res) 2308 + goto failure; 2309 + 2310 + if (request_mem_region(res->start, resource_size(res), 2311 + D40_NAME " I/O base") == NULL) 2312 + goto failure; 2313 + 2314 + virtbase = ioremap(res->start, resource_size(res)); 2315 + if (!virtbase) 2316 + goto failure; 2317 + 2318 + /* HW version check */ 2319 + for (i = 0; i < ARRAY_SIZE(dma_id_regs); i++) { 2320 + if (dma_id_regs[i].val != 2321 + readl(virtbase + dma_id_regs[i].reg)) { 2322 + dev_err(&pdev->dev, 2323 + "[%s] Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n", 2324 + __func__, 2325 + dma_id_regs[i].val, 2326 + dma_id_regs[i].reg, 2327 + readl(virtbase + dma_id_regs[i].reg)); 2328 + goto failure; 2329 + } 2330 + } 2331 + 2332 + i = readl(virtbase + D40_DREG_PERIPHID2); 2333 + 2334 + if ((i & 0xf) != D40_PERIPHID2_DESIGNER) { 2335 + dev_err(&pdev->dev, 2336 + "[%s] Unknown designer! Got %x wanted %x\n", 2337 + __func__, i & 0xf, D40_PERIPHID2_DESIGNER); 2338 + goto failure; 2339 + } 2340 + 2341 + /* The number of physical channels on this HW */ 2342 + num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4; 2343 + 2344 + dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n", 2345 + (i >> 4) & 0xf, res->start); 2346 + 2347 + plat_data = pdev->dev.platform_data; 2348 + 2349 + /* Count the number of logical channels in use */ 2350 + for (i = 0; i < plat_data->dev_len; i++) 2351 + if (plat_data->dev_rx[i] != 0) 2352 + num_log_chans++; 2353 + 2354 + for (i = 0; i < plat_data->dev_len; i++) 2355 + if (plat_data->dev_tx[i] != 0) 2356 + num_log_chans++; 2357 + 2358 + base = kzalloc(ALIGN(sizeof(struct d40_base), 4) + 2359 + (num_phy_chans + num_log_chans + plat_data->memcpy_len) * 2360 + sizeof(struct d40_chan), GFP_KERNEL); 2361 + 2362 + if (base == NULL) { 2363 + dev_err(&pdev->dev, "[%s] Out of memory\n", __func__); 2364 + goto failure; 2365 + } 2366 + 2367 + base->clk = clk; 2368 + base->num_phy_chans = num_phy_chans; 2369 + base->num_log_chans = num_log_chans; 2370 + base->phy_start = res->start; 2371 + base->phy_size = resource_size(res); 2372 + base->virtbase = virtbase; 2373 + base->plat_data = plat_data; 2374 + base->dev = &pdev->dev; 2375 + base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4); 2376 + base->log_chans = &base->phy_chans[num_phy_chans]; 2377 + 2378 + base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res), 2379 + GFP_KERNEL); 2380 + if (!base->phy_res) 2381 + goto failure; 2382 + 2383 + base->lookup_phy_chans = kzalloc(num_phy_chans * 2384 + sizeof(struct d40_chan *), 2385 + GFP_KERNEL); 2386 + if (!base->lookup_phy_chans) 2387 + goto failure; 2388 + 2389 + if (num_log_chans + plat_data->memcpy_len) { 2390 + /* 2391 + * The max number of logical channels are event lines for all 2392 + * src devices and dst devices 2393 + */ 2394 + base->lookup_log_chans = kzalloc(plat_data->dev_len * 2 * 2395 + sizeof(struct d40_chan *), 2396 + GFP_KERNEL); 2397 + if (!base->lookup_log_chans) 2398 + goto failure; 2399 + } 2400 + base->lcla_pool.alloc_map = kzalloc(num_phy_chans * sizeof(u32), 2401 + GFP_KERNEL); 2402 + if (!base->lcla_pool.alloc_map) 2403 + goto failure; 2404 + 2405 + return base; 2406 + 2407 + failure: 2408 + if (clk) { 2409 + clk_disable(clk); 2410 + clk_put(clk); 2411 + } 2412 + if (virtbase) 2413 + iounmap(virtbase); 2414 + if (res) 2415 + release_mem_region(res->start, 2416 + resource_size(res)); 2417 + if (virtbase) 2418 + iounmap(virtbase); 2419 + 2420 + if (base) { 2421 + kfree(base->lcla_pool.alloc_map); 2422 + kfree(base->lookup_log_chans); 2423 + kfree(base->lookup_phy_chans); 2424 + kfree(base->phy_res); 2425 + kfree(base); 2426 + } 2427 + 2428 + return NULL; 2429 + } 2430 + 2431 + static void __init d40_hw_init(struct d40_base *base) 2432 + { 2433 + 2434 + static const struct d40_reg_val dma_init_reg[] = { 2435 + /* Clock every part of the DMA block from start */ 2436 + { .reg = D40_DREG_GCC, .val = 0x0000ff01}, 2437 + 2438 + /* Interrupts on all logical channels */ 2439 + { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF}, 2440 + { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF}, 2441 + { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF}, 2442 + { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF}, 2443 + { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF}, 2444 + { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF}, 2445 + { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF}, 2446 + { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF}, 2447 + { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF}, 2448 + { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF}, 2449 + { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF}, 2450 + { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF} 2451 + }; 2452 + int i; 2453 + u32 prmseo[2] = {0, 0}; 2454 + u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF}; 2455 + u32 pcmis = 0; 2456 + u32 pcicr = 0; 2457 + 2458 + for (i = 0; i < ARRAY_SIZE(dma_init_reg); i++) 2459 + writel(dma_init_reg[i].val, 2460 + base->virtbase + dma_init_reg[i].reg); 2461 + 2462 + /* Configure all our dma channels to default settings */ 2463 + for (i = 0; i < base->num_phy_chans; i++) { 2464 + 2465 + activeo[i % 2] = activeo[i % 2] << 2; 2466 + 2467 + if (base->phy_res[base->num_phy_chans - i - 1].allocated_src 2468 + == D40_ALLOC_PHY) { 2469 + activeo[i % 2] |= 3; 2470 + continue; 2471 + } 2472 + 2473 + /* Enable interrupt # */ 2474 + pcmis = (pcmis << 1) | 1; 2475 + 2476 + /* Clear interrupt # */ 2477 + pcicr = (pcicr << 1) | 1; 2478 + 2479 + /* Set channel to physical mode */ 2480 + prmseo[i % 2] = prmseo[i % 2] << 2; 2481 + prmseo[i % 2] |= 1; 2482 + 2483 + } 2484 + 2485 + writel(prmseo[1], base->virtbase + D40_DREG_PRMSE); 2486 + writel(prmseo[0], base->virtbase + D40_DREG_PRMSO); 2487 + writel(activeo[1], base->virtbase + D40_DREG_ACTIVE); 2488 + writel(activeo[0], base->virtbase + D40_DREG_ACTIVO); 2489 + 2490 + /* Write which interrupt to enable */ 2491 + writel(pcmis, base->virtbase + D40_DREG_PCMIS); 2492 + 2493 + /* Write which interrupt to clear */ 2494 + writel(pcicr, base->virtbase + D40_DREG_PCICR); 2495 + 2496 + } 2497 + 2498 + static int __init d40_probe(struct platform_device *pdev) 2499 + { 2500 + int err; 2501 + int ret = -ENOENT; 2502 + struct d40_base *base; 2503 + struct resource *res = NULL; 2504 + int num_reserved_chans; 2505 + u32 val; 2506 + 2507 + base = d40_hw_detect_init(pdev); 2508 + 2509 + if (!base) 2510 + goto failure; 2511 + 2512 + num_reserved_chans = d40_phy_res_init(base); 2513 + 2514 + platform_set_drvdata(pdev, base); 2515 + 2516 + spin_lock_init(&base->interrupt_lock); 2517 + spin_lock_init(&base->execmd_lock); 2518 + 2519 + /* Get IO for logical channel parameter address */ 2520 + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa"); 2521 + if (!res) { 2522 + ret = -ENOENT; 2523 + dev_err(&pdev->dev, 2524 + "[%s] No \"lcpa\" memory resource\n", 2525 + __func__); 2526 + goto failure; 2527 + } 2528 + base->lcpa_size = resource_size(res); 2529 + base->phy_lcpa = res->start; 2530 + 2531 + if (request_mem_region(res->start, resource_size(res), 2532 + D40_NAME " I/O lcpa") == NULL) { 2533 + ret = -EBUSY; 2534 + dev_err(&pdev->dev, 2535 + "[%s] Failed to request LCPA region 0x%x-0x%x\n", 2536 + __func__, res->start, res->end); 2537 + goto failure; 2538 + } 2539 + 2540 + /* We make use of ESRAM memory for this. */ 2541 + val = readl(base->virtbase + D40_DREG_LCPA); 2542 + if (res->start != val && val != 0) { 2543 + dev_warn(&pdev->dev, 2544 + "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n", 2545 + __func__, val, res->start); 2546 + } else 2547 + writel(res->start, base->virtbase + D40_DREG_LCPA); 2548 + 2549 + base->lcpa_base = ioremap(res->start, resource_size(res)); 2550 + if (!base->lcpa_base) { 2551 + ret = -ENOMEM; 2552 + dev_err(&pdev->dev, 2553 + "[%s] Failed to ioremap LCPA region\n", 2554 + __func__); 2555 + goto failure; 2556 + } 2557 + /* Get IO for logical channel link address */ 2558 + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcla"); 2559 + if (!res) { 2560 + ret = -ENOENT; 2561 + dev_err(&pdev->dev, 2562 + "[%s] No \"lcla\" resource defined\n", 2563 + __func__); 2564 + goto failure; 2565 + } 2566 + 2567 + base->lcla_pool.base_size = resource_size(res); 2568 + base->lcla_pool.phy = res->start; 2569 + 2570 + if (request_mem_region(res->start, resource_size(res), 2571 + D40_NAME " I/O lcla") == NULL) { 2572 + ret = -EBUSY; 2573 + dev_err(&pdev->dev, 2574 + "[%s] Failed to request LCLA region 0x%x-0x%x\n", 2575 + __func__, res->start, res->end); 2576 + goto failure; 2577 + } 2578 + val = readl(base->virtbase + D40_DREG_LCLA); 2579 + if (res->start != val && val != 0) { 2580 + dev_warn(&pdev->dev, 2581 + "[%s] Mismatch LCLA dma 0x%x, def 0x%x\n", 2582 + __func__, val, res->start); 2583 + } else 2584 + writel(res->start, base->virtbase + D40_DREG_LCLA); 2585 + 2586 + base->lcla_pool.base = ioremap(res->start, resource_size(res)); 2587 + if (!base->lcla_pool.base) { 2588 + ret = -ENOMEM; 2589 + dev_err(&pdev->dev, 2590 + "[%s] Failed to ioremap LCLA 0x%x-0x%x\n", 2591 + __func__, res->start, res->end); 2592 + goto failure; 2593 + } 2594 + 2595 + spin_lock_init(&base->lcla_pool.lock); 2596 + 2597 + base->lcla_pool.num_blocks = base->num_phy_chans; 2598 + 2599 + base->irq = platform_get_irq(pdev, 0); 2600 + 2601 + ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base); 2602 + 2603 + if (ret) { 2604 + dev_err(&pdev->dev, "[%s] No IRQ defined\n", __func__); 2605 + goto failure; 2606 + } 2607 + 2608 + err = d40_dmaengine_init(base, num_reserved_chans); 2609 + if (err) 2610 + goto failure; 2611 + 2612 + d40_hw_init(base); 2613 + 2614 + dev_info(base->dev, "initialized\n"); 2615 + return 0; 2616 + 2617 + failure: 2618 + if (base) { 2619 + if (base->virtbase) 2620 + iounmap(base->virtbase); 2621 + if (base->lcla_pool.phy) 2622 + release_mem_region(base->lcla_pool.phy, 2623 + base->lcla_pool.base_size); 2624 + if (base->phy_lcpa) 2625 + release_mem_region(base->phy_lcpa, 2626 + base->lcpa_size); 2627 + if (base->phy_start) 2628 + release_mem_region(base->phy_start, 2629 + base->phy_size); 2630 + if (base->clk) { 2631 + clk_disable(base->clk); 2632 + clk_put(base->clk); 2633 + } 2634 + 2635 + kfree(base->lcla_pool.alloc_map); 2636 + kfree(base->lookup_log_chans); 2637 + kfree(base->lookup_phy_chans); 2638 + kfree(base->phy_res); 2639 + kfree(base); 2640 + } 2641 + 2642 + dev_err(&pdev->dev, "[%s] probe failed\n", __func__); 2643 + return ret; 2644 + } 2645 + 2646 + static struct platform_driver d40_driver = { 2647 + .driver = { 2648 + .owner = THIS_MODULE, 2649 + .name = D40_NAME, 2650 + }, 2651 + }; 2652 + 2653 + int __init stedma40_init(void) 2654 + { 2655 + return platform_driver_probe(&d40_driver, d40_probe); 2656 + } 2657 + arch_initcall(stedma40_init);
+454
drivers/dma/ste_dma40_ll.c
···
··· 1 + /* 2 + * driver/dma/ste_dma40_ll.c 3 + * 4 + * Copyright (C) ST-Ericsson 2007-2010 5 + * License terms: GNU General Public License (GPL) version 2 6 + * Author: Per Friden <per.friden@stericsson.com> 7 + * Author: Jonas Aaberg <jonas.aberg@stericsson.com> 8 + */ 9 + 10 + #include <linux/kernel.h> 11 + #include <plat/ste_dma40.h> 12 + 13 + #include "ste_dma40_ll.h" 14 + 15 + /* Sets up proper LCSP1 and LCSP3 register for a logical channel */ 16 + void d40_log_cfg(struct stedma40_chan_cfg *cfg, 17 + u32 *lcsp1, u32 *lcsp3) 18 + { 19 + u32 l3 = 0; /* dst */ 20 + u32 l1 = 0; /* src */ 21 + 22 + /* src is mem? -> increase address pos */ 23 + if (cfg->dir == STEDMA40_MEM_TO_PERIPH || 24 + cfg->dir == STEDMA40_MEM_TO_MEM) 25 + l1 |= 1 << D40_MEM_LCSP1_SCFG_INCR_POS; 26 + 27 + /* dst is mem? -> increase address pos */ 28 + if (cfg->dir == STEDMA40_PERIPH_TO_MEM || 29 + cfg->dir == STEDMA40_MEM_TO_MEM) 30 + l3 |= 1 << D40_MEM_LCSP3_DCFG_INCR_POS; 31 + 32 + /* src is hw? -> master port 1 */ 33 + if (cfg->dir == STEDMA40_PERIPH_TO_MEM || 34 + cfg->dir == STEDMA40_PERIPH_TO_PERIPH) 35 + l1 |= 1 << D40_MEM_LCSP1_SCFG_MST_POS; 36 + 37 + /* dst is hw? -> master port 1 */ 38 + if (cfg->dir == STEDMA40_MEM_TO_PERIPH || 39 + cfg->dir == STEDMA40_PERIPH_TO_PERIPH) 40 + l3 |= 1 << D40_MEM_LCSP3_DCFG_MST_POS; 41 + 42 + l3 |= 1 << D40_MEM_LCSP3_DCFG_TIM_POS; 43 + l3 |= 1 << D40_MEM_LCSP3_DCFG_EIM_POS; 44 + l3 |= cfg->dst_info.psize << D40_MEM_LCSP3_DCFG_PSIZE_POS; 45 + l3 |= cfg->dst_info.data_width << D40_MEM_LCSP3_DCFG_ESIZE_POS; 46 + l3 |= 1 << D40_MEM_LCSP3_DTCP_POS; 47 + 48 + l1 |= 1 << D40_MEM_LCSP1_SCFG_EIM_POS; 49 + l1 |= cfg->src_info.psize << D40_MEM_LCSP1_SCFG_PSIZE_POS; 50 + l1 |= cfg->src_info.data_width << D40_MEM_LCSP1_SCFG_ESIZE_POS; 51 + l1 |= 1 << D40_MEM_LCSP1_STCP_POS; 52 + 53 + *lcsp1 = l1; 54 + *lcsp3 = l3; 55 + 56 + } 57 + 58 + /* Sets up SRC and DST CFG register for both logical and physical channels */ 59 + void d40_phy_cfg(struct stedma40_chan_cfg *cfg, 60 + u32 *src_cfg, u32 *dst_cfg, bool is_log) 61 + { 62 + u32 src = 0; 63 + u32 dst = 0; 64 + 65 + if (!is_log) { 66 + /* Physical channel */ 67 + if ((cfg->dir == STEDMA40_PERIPH_TO_MEM) || 68 + (cfg->dir == STEDMA40_PERIPH_TO_PERIPH)) { 69 + /* Set master port to 1 */ 70 + src |= 1 << D40_SREG_CFG_MST_POS; 71 + src |= D40_TYPE_TO_EVENT(cfg->src_dev_type); 72 + 73 + if (cfg->src_info.flow_ctrl == STEDMA40_NO_FLOW_CTRL) 74 + src |= 1 << D40_SREG_CFG_PHY_TM_POS; 75 + else 76 + src |= 3 << D40_SREG_CFG_PHY_TM_POS; 77 + } 78 + if ((cfg->dir == STEDMA40_MEM_TO_PERIPH) || 79 + (cfg->dir == STEDMA40_PERIPH_TO_PERIPH)) { 80 + /* Set master port to 1 */ 81 + dst |= 1 << D40_SREG_CFG_MST_POS; 82 + dst |= D40_TYPE_TO_EVENT(cfg->dst_dev_type); 83 + 84 + if (cfg->dst_info.flow_ctrl == STEDMA40_NO_FLOW_CTRL) 85 + dst |= 1 << D40_SREG_CFG_PHY_TM_POS; 86 + else 87 + dst |= 3 << D40_SREG_CFG_PHY_TM_POS; 88 + } 89 + /* Interrupt on end of transfer for destination */ 90 + dst |= 1 << D40_SREG_CFG_TIM_POS; 91 + 92 + /* Generate interrupt on error */ 93 + src |= 1 << D40_SREG_CFG_EIM_POS; 94 + dst |= 1 << D40_SREG_CFG_EIM_POS; 95 + 96 + /* PSIZE */ 97 + if (cfg->src_info.psize != STEDMA40_PSIZE_PHY_1) { 98 + src |= 1 << D40_SREG_CFG_PHY_PEN_POS; 99 + src |= cfg->src_info.psize << D40_SREG_CFG_PSIZE_POS; 100 + } 101 + if (cfg->dst_info.psize != STEDMA40_PSIZE_PHY_1) { 102 + dst |= 1 << D40_SREG_CFG_PHY_PEN_POS; 103 + dst |= cfg->dst_info.psize << D40_SREG_CFG_PSIZE_POS; 104 + } 105 + 106 + /* Element size */ 107 + src |= cfg->src_info.data_width << D40_SREG_CFG_ESIZE_POS; 108 + dst |= cfg->dst_info.data_width << D40_SREG_CFG_ESIZE_POS; 109 + 110 + } else { 111 + /* Logical channel */ 112 + dst |= 1 << D40_SREG_CFG_LOG_GIM_POS; 113 + src |= 1 << D40_SREG_CFG_LOG_GIM_POS; 114 + } 115 + 116 + if (cfg->channel_type & STEDMA40_HIGH_PRIORITY_CHANNEL) { 117 + src |= 1 << D40_SREG_CFG_PRI_POS; 118 + dst |= 1 << D40_SREG_CFG_PRI_POS; 119 + } 120 + 121 + src |= cfg->src_info.endianess << D40_SREG_CFG_LBE_POS; 122 + dst |= cfg->dst_info.endianess << D40_SREG_CFG_LBE_POS; 123 + 124 + *src_cfg = src; 125 + *dst_cfg = dst; 126 + } 127 + 128 + int d40_phy_fill_lli(struct d40_phy_lli *lli, 129 + dma_addr_t data, 130 + u32 data_size, 131 + int psize, 132 + dma_addr_t next_lli, 133 + u32 reg_cfg, 134 + bool term_int, 135 + u32 data_width, 136 + bool is_device) 137 + { 138 + int num_elems; 139 + 140 + if (psize == STEDMA40_PSIZE_PHY_1) 141 + num_elems = 1; 142 + else 143 + num_elems = 2 << psize; 144 + 145 + /* 146 + * Size is 16bit. data_width is 8, 16, 32 or 64 bit 147 + * Block large than 64 KiB must be split. 148 + */ 149 + if (data_size > (0xffff << data_width)) 150 + return -EINVAL; 151 + 152 + /* Must be aligned */ 153 + if (!IS_ALIGNED(data, 0x1 << data_width)) 154 + return -EINVAL; 155 + 156 + /* Transfer size can't be smaller than (num_elms * elem_size) */ 157 + if (data_size < num_elems * (0x1 << data_width)) 158 + return -EINVAL; 159 + 160 + /* The number of elements. IE now many chunks */ 161 + lli->reg_elt = (data_size >> data_width) << D40_SREG_ELEM_PHY_ECNT_POS; 162 + 163 + /* 164 + * Distance to next element sized entry. 165 + * Usually the size of the element unless you want gaps. 166 + */ 167 + if (!is_device) 168 + lli->reg_elt |= (0x1 << data_width) << 169 + D40_SREG_ELEM_PHY_EIDX_POS; 170 + 171 + /* Where the data is */ 172 + lli->reg_ptr = data; 173 + lli->reg_cfg = reg_cfg; 174 + 175 + /* If this scatter list entry is the last one, no next link */ 176 + if (next_lli == 0) 177 + lli->reg_lnk = 0x1 << D40_SREG_LNK_PHY_TCP_POS; 178 + else 179 + lli->reg_lnk = next_lli; 180 + 181 + /* Set/clear interrupt generation on this link item.*/ 182 + if (term_int) 183 + lli->reg_cfg |= 0x1 << D40_SREG_CFG_TIM_POS; 184 + else 185 + lli->reg_cfg &= ~(0x1 << D40_SREG_CFG_TIM_POS); 186 + 187 + /* Post link */ 188 + lli->reg_lnk |= 0 << D40_SREG_LNK_PHY_PRE_POS; 189 + 190 + return 0; 191 + } 192 + 193 + int d40_phy_sg_to_lli(struct scatterlist *sg, 194 + int sg_len, 195 + dma_addr_t target, 196 + struct d40_phy_lli *lli, 197 + dma_addr_t lli_phys, 198 + u32 reg_cfg, 199 + u32 data_width, 200 + int psize, 201 + bool term_int) 202 + { 203 + int total_size = 0; 204 + int i; 205 + struct scatterlist *current_sg = sg; 206 + dma_addr_t next_lli_phys; 207 + dma_addr_t dst; 208 + int err = 0; 209 + 210 + for_each_sg(sg, current_sg, sg_len, i) { 211 + 212 + total_size += sg_dma_len(current_sg); 213 + 214 + /* If this scatter list entry is the last one, no next link */ 215 + if (sg_len - 1 == i) 216 + next_lli_phys = 0; 217 + else 218 + next_lli_phys = ALIGN(lli_phys + (i + 1) * 219 + sizeof(struct d40_phy_lli), 220 + D40_LLI_ALIGN); 221 + 222 + if (target) 223 + dst = target; 224 + else 225 + dst = sg_phys(current_sg); 226 + 227 + err = d40_phy_fill_lli(&lli[i], 228 + dst, 229 + sg_dma_len(current_sg), 230 + psize, 231 + next_lli_phys, 232 + reg_cfg, 233 + !next_lli_phys, 234 + data_width, 235 + target == dst); 236 + if (err) 237 + goto err; 238 + } 239 + 240 + return total_size; 241 + err: 242 + return err; 243 + } 244 + 245 + 246 + void d40_phy_lli_write(void __iomem *virtbase, 247 + u32 phy_chan_num, 248 + struct d40_phy_lli *lli_dst, 249 + struct d40_phy_lli *lli_src) 250 + { 251 + 252 + writel(lli_src->reg_cfg, virtbase + D40_DREG_PCBASE + 253 + phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSCFG); 254 + writel(lli_src->reg_elt, virtbase + D40_DREG_PCBASE + 255 + phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSELT); 256 + writel(lli_src->reg_ptr, virtbase + D40_DREG_PCBASE + 257 + phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSPTR); 258 + writel(lli_src->reg_lnk, virtbase + D40_DREG_PCBASE + 259 + phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SSLNK); 260 + 261 + writel(lli_dst->reg_cfg, virtbase + D40_DREG_PCBASE + 262 + phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDCFG); 263 + writel(lli_dst->reg_elt, virtbase + D40_DREG_PCBASE + 264 + phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDELT); 265 + writel(lli_dst->reg_ptr, virtbase + D40_DREG_PCBASE + 266 + phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDPTR); 267 + writel(lli_dst->reg_lnk, virtbase + D40_DREG_PCBASE + 268 + phy_chan_num * D40_DREG_PCDELTA + D40_CHAN_REG_SDLNK); 269 + 270 + } 271 + 272 + /* DMA logical lli operations */ 273 + 274 + void d40_log_fill_lli(struct d40_log_lli *lli, 275 + dma_addr_t data, u32 data_size, 276 + u32 lli_next_off, u32 reg_cfg, 277 + u32 data_width, 278 + bool term_int, bool addr_inc) 279 + { 280 + lli->lcsp13 = reg_cfg; 281 + 282 + /* The number of elements to transfer */ 283 + lli->lcsp02 = ((data_size >> data_width) << 284 + D40_MEM_LCSP0_ECNT_POS) & D40_MEM_LCSP0_ECNT_MASK; 285 + /* 16 LSBs address of the current element */ 286 + lli->lcsp02 |= data & D40_MEM_LCSP0_SPTR_MASK; 287 + /* 16 MSBs address of the current element */ 288 + lli->lcsp13 |= data & D40_MEM_LCSP1_SPTR_MASK; 289 + 290 + if (addr_inc) 291 + lli->lcsp13 |= D40_MEM_LCSP1_SCFG_INCR_MASK; 292 + 293 + lli->lcsp13 |= D40_MEM_LCSP3_DTCP_MASK; 294 + /* If this scatter list entry is the last one, no next link */ 295 + lli->lcsp13 |= (lli_next_off << D40_MEM_LCSP1_SLOS_POS) & 296 + D40_MEM_LCSP1_SLOS_MASK; 297 + 298 + if (term_int) 299 + lli->lcsp13 |= D40_MEM_LCSP1_SCFG_TIM_MASK; 300 + else 301 + lli->lcsp13 &= ~D40_MEM_LCSP1_SCFG_TIM_MASK; 302 + } 303 + 304 + int d40_log_sg_to_dev(struct d40_lcla_elem *lcla, 305 + struct scatterlist *sg, 306 + int sg_len, 307 + struct d40_log_lli_bidir *lli, 308 + struct d40_def_lcsp *lcsp, 309 + u32 src_data_width, 310 + u32 dst_data_width, 311 + enum dma_data_direction direction, 312 + bool term_int, dma_addr_t dev_addr, int max_len, 313 + int llis_per_log) 314 + { 315 + int total_size = 0; 316 + struct scatterlist *current_sg = sg; 317 + int i; 318 + u32 next_lli_off_dst; 319 + u32 next_lli_off_src; 320 + 321 + next_lli_off_src = 0; 322 + next_lli_off_dst = 0; 323 + 324 + for_each_sg(sg, current_sg, sg_len, i) { 325 + total_size += sg_dma_len(current_sg); 326 + 327 + /* 328 + * If this scatter list entry is the last one or 329 + * max length, terminate link. 330 + */ 331 + if (sg_len - 1 == i || ((i+1) % max_len == 0)) { 332 + next_lli_off_src = 0; 333 + next_lli_off_dst = 0; 334 + } else { 335 + if (next_lli_off_dst == 0 && 336 + next_lli_off_src == 0) { 337 + /* The first lli will be at next_lli_off */ 338 + next_lli_off_dst = (lcla->dst_id * 339 + llis_per_log + 1); 340 + next_lli_off_src = (lcla->src_id * 341 + llis_per_log + 1); 342 + } else { 343 + next_lli_off_dst++; 344 + next_lli_off_src++; 345 + } 346 + } 347 + 348 + if (direction == DMA_TO_DEVICE) { 349 + d40_log_fill_lli(&lli->src[i], 350 + sg_phys(current_sg), 351 + sg_dma_len(current_sg), 352 + next_lli_off_src, 353 + lcsp->lcsp1, src_data_width, 354 + term_int && !next_lli_off_src, 355 + true); 356 + d40_log_fill_lli(&lli->dst[i], 357 + dev_addr, 358 + sg_dma_len(current_sg), 359 + next_lli_off_dst, 360 + lcsp->lcsp3, dst_data_width, 361 + /* No next == terminal interrupt */ 362 + term_int && !next_lli_off_dst, 363 + false); 364 + } else { 365 + d40_log_fill_lli(&lli->dst[i], 366 + sg_phys(current_sg), 367 + sg_dma_len(current_sg), 368 + next_lli_off_dst, 369 + lcsp->lcsp3, dst_data_width, 370 + /* No next == terminal interrupt */ 371 + term_int && !next_lli_off_dst, 372 + true); 373 + d40_log_fill_lli(&lli->src[i], 374 + dev_addr, 375 + sg_dma_len(current_sg), 376 + next_lli_off_src, 377 + lcsp->lcsp1, src_data_width, 378 + term_int && !next_lli_off_src, 379 + false); 380 + } 381 + } 382 + return total_size; 383 + } 384 + 385 + int d40_log_sg_to_lli(int lcla_id, 386 + struct scatterlist *sg, 387 + int sg_len, 388 + struct d40_log_lli *lli_sg, 389 + u32 lcsp13, /* src or dst*/ 390 + u32 data_width, 391 + bool term_int, int max_len, int llis_per_log) 392 + { 393 + int total_size = 0; 394 + struct scatterlist *current_sg = sg; 395 + int i; 396 + u32 next_lli_off = 0; 397 + 398 + for_each_sg(sg, current_sg, sg_len, i) { 399 + total_size += sg_dma_len(current_sg); 400 + 401 + /* 402 + * If this scatter list entry is the last one or 403 + * max length, terminate link. 404 + */ 405 + if (sg_len - 1 == i || ((i+1) % max_len == 0)) 406 + next_lli_off = 0; 407 + else { 408 + if (next_lli_off == 0) 409 + /* The first lli will be at next_lli_off */ 410 + next_lli_off = lcla_id * llis_per_log + 1; 411 + else 412 + next_lli_off++; 413 + } 414 + 415 + d40_log_fill_lli(&lli_sg[i], 416 + sg_phys(current_sg), 417 + sg_dma_len(current_sg), 418 + next_lli_off, 419 + lcsp13, data_width, 420 + term_int && !next_lli_off, 421 + true); 422 + } 423 + return total_size; 424 + } 425 + 426 + void d40_log_lli_write(struct d40_log_lli_full *lcpa, 427 + struct d40_log_lli *lcla_src, 428 + struct d40_log_lli *lcla_dst, 429 + struct d40_log_lli *lli_dst, 430 + struct d40_log_lli *lli_src, 431 + int llis_per_log) 432 + { 433 + u32 slos = 0; 434 + u32 dlos = 0; 435 + int i; 436 + 437 + lcpa->lcsp0 = lli_src->lcsp02; 438 + lcpa->lcsp1 = lli_src->lcsp13; 439 + lcpa->lcsp2 = lli_dst->lcsp02; 440 + lcpa->lcsp3 = lli_dst->lcsp13; 441 + 442 + slos = lli_src->lcsp13 & D40_MEM_LCSP1_SLOS_MASK; 443 + dlos = lli_dst->lcsp13 & D40_MEM_LCSP3_DLOS_MASK; 444 + 445 + for (i = 0; (i < llis_per_log) && slos && dlos; i++) { 446 + writel(lli_src[i+1].lcsp02, &lcla_src[i].lcsp02); 447 + writel(lli_src[i+1].lcsp13, &lcla_src[i].lcsp13); 448 + writel(lli_dst[i+1].lcsp02, &lcla_dst[i].lcsp02); 449 + writel(lli_dst[i+1].lcsp13, &lcla_dst[i].lcsp13); 450 + 451 + slos = lli_src[i+1].lcsp13 & D40_MEM_LCSP1_SLOS_MASK; 452 + dlos = lli_dst[i+1].lcsp13 & D40_MEM_LCSP3_DLOS_MASK; 453 + } 454 + }
+354
drivers/dma/ste_dma40_ll.h
···
··· 1 + /* 2 + * driver/dma/ste_dma40_ll.h 3 + * 4 + * Copyright (C) ST-Ericsson 2007-2010 5 + * License terms: GNU General Public License (GPL) version 2 6 + * Author: Per Friden <per.friden@stericsson.com> 7 + * Author: Jonas Aaberg <jonas.aberg@stericsson.com> 8 + */ 9 + #ifndef STE_DMA40_LL_H 10 + #define STE_DMA40_LL_H 11 + 12 + #define D40_DREG_PCBASE 0x400 13 + #define D40_DREG_PCDELTA (8 * 4) 14 + #define D40_LLI_ALIGN 16 /* LLI alignment must be 16 bytes. */ 15 + 16 + #define D40_TYPE_TO_GROUP(type) (type / 16) 17 + #define D40_TYPE_TO_EVENT(type) (type % 16) 18 + 19 + /* Most bits of the CFG register are the same in log as in phy mode */ 20 + #define D40_SREG_CFG_MST_POS 15 21 + #define D40_SREG_CFG_TIM_POS 14 22 + #define D40_SREG_CFG_EIM_POS 13 23 + #define D40_SREG_CFG_LOG_INCR_POS 12 24 + #define D40_SREG_CFG_PHY_PEN_POS 12 25 + #define D40_SREG_CFG_PSIZE_POS 10 26 + #define D40_SREG_CFG_ESIZE_POS 8 27 + #define D40_SREG_CFG_PRI_POS 7 28 + #define D40_SREG_CFG_LBE_POS 6 29 + #define D40_SREG_CFG_LOG_GIM_POS 5 30 + #define D40_SREG_CFG_LOG_MFU_POS 4 31 + #define D40_SREG_CFG_PHY_TM_POS 4 32 + #define D40_SREG_CFG_PHY_EVTL_POS 0 33 + 34 + 35 + /* Standard channel parameters - basic mode (element register) */ 36 + #define D40_SREG_ELEM_PHY_ECNT_POS 16 37 + #define D40_SREG_ELEM_PHY_EIDX_POS 0 38 + 39 + #define D40_SREG_ELEM_PHY_ECNT_MASK (0xFFFF << D40_SREG_ELEM_PHY_ECNT_POS) 40 + 41 + /* Standard channel parameters - basic mode (Link register) */ 42 + #define D40_SREG_LNK_PHY_TCP_POS 0 43 + #define D40_SREG_LNK_PHY_LMP_POS 1 44 + #define D40_SREG_LNK_PHY_PRE_POS 2 45 + /* 46 + * Source destination link address. Contains the 47 + * 29-bit byte word aligned address of the reload area. 48 + */ 49 + #define D40_SREG_LNK_PHYS_LNK_MASK 0xFFFFFFF8UL 50 + 51 + /* Standard basic channel logical mode */ 52 + 53 + /* Element register */ 54 + #define D40_SREG_ELEM_LOG_ECNT_POS 16 55 + #define D40_SREG_ELEM_LOG_LIDX_POS 8 56 + #define D40_SREG_ELEM_LOG_LOS_POS 1 57 + #define D40_SREG_ELEM_LOG_TCP_POS 0 58 + 59 + #define D40_SREG_ELEM_LOG_LIDX_MASK (0xFF << D40_SREG_ELEM_LOG_LIDX_POS) 60 + 61 + /* Link register */ 62 + #define D40_DEACTIVATE_EVENTLINE 0x0 63 + #define D40_ACTIVATE_EVENTLINE 0x1 64 + #define D40_EVENTLINE_POS(i) (2 * i) 65 + #define D40_EVENTLINE_MASK(i) (0x3 << D40_EVENTLINE_POS(i)) 66 + 67 + /* Standard basic channel logical params in memory */ 68 + 69 + /* LCSP0 */ 70 + #define D40_MEM_LCSP0_ECNT_POS 16 71 + #define D40_MEM_LCSP0_SPTR_POS 0 72 + 73 + #define D40_MEM_LCSP0_ECNT_MASK (0xFFFF << D40_MEM_LCSP0_ECNT_POS) 74 + #define D40_MEM_LCSP0_SPTR_MASK (0xFFFF << D40_MEM_LCSP0_SPTR_POS) 75 + 76 + /* LCSP1 */ 77 + #define D40_MEM_LCSP1_SPTR_POS 16 78 + #define D40_MEM_LCSP1_SCFG_MST_POS 15 79 + #define D40_MEM_LCSP1_SCFG_TIM_POS 14 80 + #define D40_MEM_LCSP1_SCFG_EIM_POS 13 81 + #define D40_MEM_LCSP1_SCFG_INCR_POS 12 82 + #define D40_MEM_LCSP1_SCFG_PSIZE_POS 10 83 + #define D40_MEM_LCSP1_SCFG_ESIZE_POS 8 84 + #define D40_MEM_LCSP1_SLOS_POS 1 85 + #define D40_MEM_LCSP1_STCP_POS 0 86 + 87 + #define D40_MEM_LCSP1_SPTR_MASK (0xFFFF << D40_MEM_LCSP1_SPTR_POS) 88 + #define D40_MEM_LCSP1_SCFG_TIM_MASK (0x1 << D40_MEM_LCSP1_SCFG_TIM_POS) 89 + #define D40_MEM_LCSP1_SCFG_INCR_MASK (0x1 << D40_MEM_LCSP1_SCFG_INCR_POS) 90 + #define D40_MEM_LCSP1_SCFG_PSIZE_MASK (0x3 << D40_MEM_LCSP1_SCFG_PSIZE_POS) 91 + #define D40_MEM_LCSP1_SLOS_MASK (0x7F << D40_MEM_LCSP1_SLOS_POS) 92 + #define D40_MEM_LCSP1_STCP_MASK (0x1 << D40_MEM_LCSP1_STCP_POS) 93 + 94 + /* LCSP2 */ 95 + #define D40_MEM_LCSP2_ECNT_POS 16 96 + 97 + #define D40_MEM_LCSP2_ECNT_MASK (0xFFFF << D40_MEM_LCSP2_ECNT_POS) 98 + 99 + /* LCSP3 */ 100 + #define D40_MEM_LCSP3_DCFG_MST_POS 15 101 + #define D40_MEM_LCSP3_DCFG_TIM_POS 14 102 + #define D40_MEM_LCSP3_DCFG_EIM_POS 13 103 + #define D40_MEM_LCSP3_DCFG_INCR_POS 12 104 + #define D40_MEM_LCSP3_DCFG_PSIZE_POS 10 105 + #define D40_MEM_LCSP3_DCFG_ESIZE_POS 8 106 + #define D40_MEM_LCSP3_DLOS_POS 1 107 + #define D40_MEM_LCSP3_DTCP_POS 0 108 + 109 + #define D40_MEM_LCSP3_DLOS_MASK (0x7F << D40_MEM_LCSP3_DLOS_POS) 110 + #define D40_MEM_LCSP3_DTCP_MASK (0x1 << D40_MEM_LCSP3_DTCP_POS) 111 + 112 + 113 + /* Standard channel parameter register offsets */ 114 + #define D40_CHAN_REG_SSCFG 0x00 115 + #define D40_CHAN_REG_SSELT 0x04 116 + #define D40_CHAN_REG_SSPTR 0x08 117 + #define D40_CHAN_REG_SSLNK 0x0C 118 + #define D40_CHAN_REG_SDCFG 0x10 119 + #define D40_CHAN_REG_SDELT 0x14 120 + #define D40_CHAN_REG_SDPTR 0x18 121 + #define D40_CHAN_REG_SDLNK 0x1C 122 + 123 + /* DMA Register Offsets */ 124 + #define D40_DREG_GCC 0x000 125 + #define D40_DREG_PRTYP 0x004 126 + #define D40_DREG_PRSME 0x008 127 + #define D40_DREG_PRSMO 0x00C 128 + #define D40_DREG_PRMSE 0x010 129 + #define D40_DREG_PRMSO 0x014 130 + #define D40_DREG_PRMOE 0x018 131 + #define D40_DREG_PRMOO 0x01C 132 + #define D40_DREG_LCPA 0x020 133 + #define D40_DREG_LCLA 0x024 134 + #define D40_DREG_ACTIVE 0x050 135 + #define D40_DREG_ACTIVO 0x054 136 + #define D40_DREG_FSEB1 0x058 137 + #define D40_DREG_FSEB2 0x05C 138 + #define D40_DREG_PCMIS 0x060 139 + #define D40_DREG_PCICR 0x064 140 + #define D40_DREG_PCTIS 0x068 141 + #define D40_DREG_PCEIS 0x06C 142 + #define D40_DREG_LCMIS0 0x080 143 + #define D40_DREG_LCMIS1 0x084 144 + #define D40_DREG_LCMIS2 0x088 145 + #define D40_DREG_LCMIS3 0x08C 146 + #define D40_DREG_LCICR0 0x090 147 + #define D40_DREG_LCICR1 0x094 148 + #define D40_DREG_LCICR2 0x098 149 + #define D40_DREG_LCICR3 0x09C 150 + #define D40_DREG_LCTIS0 0x0A0 151 + #define D40_DREG_LCTIS1 0x0A4 152 + #define D40_DREG_LCTIS2 0x0A8 153 + #define D40_DREG_LCTIS3 0x0AC 154 + #define D40_DREG_LCEIS0 0x0B0 155 + #define D40_DREG_LCEIS1 0x0B4 156 + #define D40_DREG_LCEIS2 0x0B8 157 + #define D40_DREG_LCEIS3 0x0BC 158 + #define D40_DREG_STFU 0xFC8 159 + #define D40_DREG_ICFG 0xFCC 160 + #define D40_DREG_PERIPHID0 0xFE0 161 + #define D40_DREG_PERIPHID1 0xFE4 162 + #define D40_DREG_PERIPHID2 0xFE8 163 + #define D40_DREG_PERIPHID3 0xFEC 164 + #define D40_DREG_CELLID0 0xFF0 165 + #define D40_DREG_CELLID1 0xFF4 166 + #define D40_DREG_CELLID2 0xFF8 167 + #define D40_DREG_CELLID3 0xFFC 168 + 169 + /* LLI related structures */ 170 + 171 + /** 172 + * struct d40_phy_lli - The basic configration register for each physical 173 + * channel. 174 + * 175 + * @reg_cfg: The configuration register. 176 + * @reg_elt: The element register. 177 + * @reg_ptr: The pointer register. 178 + * @reg_lnk: The link register. 179 + * 180 + * These registers are set up for both physical and logical transfers 181 + * Note that the bit in each register means differently in logical and 182 + * physical(standard) mode. 183 + * 184 + * This struct must be 16 bytes aligned, and only contain physical registers 185 + * since it will be directly accessed by the DMA. 186 + */ 187 + struct d40_phy_lli { 188 + u32 reg_cfg; 189 + u32 reg_elt; 190 + u32 reg_ptr; 191 + u32 reg_lnk; 192 + }; 193 + 194 + /** 195 + * struct d40_phy_lli_bidir - struct for a transfer. 196 + * 197 + * @src: Register settings for src channel. 198 + * @dst: Register settings for dst channel. 199 + * @dst_addr: Physical destination address. 200 + * @src_addr: Physical source address. 201 + * 202 + * All DMA transfers have a source and a destination. 203 + */ 204 + 205 + struct d40_phy_lli_bidir { 206 + struct d40_phy_lli *src; 207 + struct d40_phy_lli *dst; 208 + dma_addr_t dst_addr; 209 + dma_addr_t src_addr; 210 + }; 211 + 212 + 213 + /** 214 + * struct d40_log_lli - logical lli configuration 215 + * 216 + * @lcsp02: Either maps to register lcsp0 if src or lcsp2 if dst. 217 + * @lcsp13: Either maps to register lcsp1 if src or lcsp3 if dst. 218 + * 219 + * This struct must be 8 bytes aligned since it will be accessed directy by 220 + * the DMA. Never add any none hw mapped registers to this struct. 221 + */ 222 + 223 + struct d40_log_lli { 224 + u32 lcsp02; 225 + u32 lcsp13; 226 + }; 227 + 228 + /** 229 + * struct d40_log_lli_bidir - For both src and dst 230 + * 231 + * @src: pointer to src lli configuration. 232 + * @dst: pointer to dst lli configuration. 233 + * 234 + * You always have a src and a dst when doing DMA transfers. 235 + */ 236 + 237 + struct d40_log_lli_bidir { 238 + struct d40_log_lli *src; 239 + struct d40_log_lli *dst; 240 + }; 241 + 242 + /** 243 + * struct d40_log_lli_full - LCPA layout 244 + * 245 + * @lcsp0: Logical Channel Standard Param 0 - Src. 246 + * @lcsp1: Logical Channel Standard Param 1 - Src. 247 + * @lcsp2: Logical Channel Standard Param 2 - Dst. 248 + * @lcsp3: Logical Channel Standard Param 3 - Dst. 249 + * 250 + * This struct maps to LCPA physical memory layout. Must map to 251 + * the hw. 252 + */ 253 + struct d40_log_lli_full { 254 + u32 lcsp0; 255 + u32 lcsp1; 256 + u32 lcsp2; 257 + u32 lcsp3; 258 + }; 259 + 260 + /** 261 + * struct d40_def_lcsp - Default LCSP1 and LCSP3 settings 262 + * 263 + * @lcsp3: The default configuration for dst. 264 + * @lcsp1: The default configuration for src. 265 + */ 266 + struct d40_def_lcsp { 267 + u32 lcsp3; 268 + u32 lcsp1; 269 + }; 270 + 271 + /** 272 + * struct d40_lcla_elem - Info for one LCA element. 273 + * 274 + * @src_id: logical channel src id 275 + * @dst_id: logical channel dst id 276 + * @src: LCPA formated src parameters 277 + * @dst: LCPA formated dst parameters 278 + * 279 + */ 280 + struct d40_lcla_elem { 281 + int src_id; 282 + int dst_id; 283 + struct d40_log_lli *src; 284 + struct d40_log_lli *dst; 285 + }; 286 + 287 + /* Physical channels */ 288 + 289 + void d40_phy_cfg(struct stedma40_chan_cfg *cfg, 290 + u32 *src_cfg, u32 *dst_cfg, bool is_log); 291 + 292 + void d40_log_cfg(struct stedma40_chan_cfg *cfg, 293 + u32 *lcsp1, u32 *lcsp2); 294 + 295 + int d40_phy_sg_to_lli(struct scatterlist *sg, 296 + int sg_len, 297 + dma_addr_t target, 298 + struct d40_phy_lli *lli, 299 + dma_addr_t lli_phys, 300 + u32 reg_cfg, 301 + u32 data_width, 302 + int psize, 303 + bool term_int); 304 + 305 + int d40_phy_fill_lli(struct d40_phy_lli *lli, 306 + dma_addr_t data, 307 + u32 data_size, 308 + int psize, 309 + dma_addr_t next_lli, 310 + u32 reg_cfg, 311 + bool term_int, 312 + u32 data_width, 313 + bool is_device); 314 + 315 + void d40_phy_lli_write(void __iomem *virtbase, 316 + u32 phy_chan_num, 317 + struct d40_phy_lli *lli_dst, 318 + struct d40_phy_lli *lli_src); 319 + 320 + /* Logical channels */ 321 + 322 + void d40_log_fill_lli(struct d40_log_lli *lli, 323 + dma_addr_t data, u32 data_size, 324 + u32 lli_next_off, u32 reg_cfg, 325 + u32 data_width, 326 + bool term_int, bool addr_inc); 327 + 328 + int d40_log_sg_to_dev(struct d40_lcla_elem *lcla, 329 + struct scatterlist *sg, 330 + int sg_len, 331 + struct d40_log_lli_bidir *lli, 332 + struct d40_def_lcsp *lcsp, 333 + u32 src_data_width, 334 + u32 dst_data_width, 335 + enum dma_data_direction direction, 336 + bool term_int, dma_addr_t dev_addr, int max_len, 337 + int llis_per_log); 338 + 339 + void d40_log_lli_write(struct d40_log_lli_full *lcpa, 340 + struct d40_log_lli *lcla_src, 341 + struct d40_log_lli *lcla_dst, 342 + struct d40_log_lli *lli_dst, 343 + struct d40_log_lli *lli_src, 344 + int llis_per_log); 345 + 346 + int d40_log_sg_to_lli(int lcla_id, 347 + struct scatterlist *sg, 348 + int sg_len, 349 + struct d40_log_lli *lli_sg, 350 + u32 lcsp13, /* src or dst*/ 351 + u32 data_width, 352 + bool term_int, int max_len, int llis_per_log); 353 + 354 + #endif /* STE_DMA40_LLI_H */
+860
drivers/dma/timb_dma.c
···
··· 1 + /* 2 + * timb_dma.c timberdale FPGA DMA driver 3 + * Copyright (c) 2010 Intel Corporation 4 + * 5 + * This program is free software; you can redistribute it and/or modify 6 + * it under the terms of the GNU General Public License version 2 as 7 + * published by the Free Software Foundation. 8 + * 9 + * This program is distributed in the hope that it will be useful, 10 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 + * GNU General Public License for more details. 13 + * 14 + * You should have received a copy of the GNU General Public License 15 + * along with this program; if not, write to the Free Software 16 + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 17 + */ 18 + 19 + /* Supports: 20 + * Timberdale FPGA DMA engine 21 + */ 22 + 23 + #include <linux/dmaengine.h> 24 + #include <linux/dma-mapping.h> 25 + #include <linux/init.h> 26 + #include <linux/interrupt.h> 27 + #include <linux/io.h> 28 + #include <linux/module.h> 29 + #include <linux/platform_device.h> 30 + #include <linux/slab.h> 31 + 32 + #include <linux/timb_dma.h> 33 + 34 + #define DRIVER_NAME "timb-dma" 35 + 36 + /* Global DMA registers */ 37 + #define TIMBDMA_ACR 0x34 38 + #define TIMBDMA_32BIT_ADDR 0x01 39 + 40 + #define TIMBDMA_ISR 0x080000 41 + #define TIMBDMA_IPR 0x080004 42 + #define TIMBDMA_IER 0x080008 43 + 44 + /* Channel specific registers */ 45 + /* RX instances base addresses are 0x00, 0x40, 0x80 ... 46 + * TX instances base addresses are 0x18, 0x58, 0x98 ... 47 + */ 48 + #define TIMBDMA_INSTANCE_OFFSET 0x40 49 + #define TIMBDMA_INSTANCE_TX_OFFSET 0x18 50 + 51 + /* RX registers, relative the instance base */ 52 + #define TIMBDMA_OFFS_RX_DHAR 0x00 53 + #define TIMBDMA_OFFS_RX_DLAR 0x04 54 + #define TIMBDMA_OFFS_RX_LR 0x0C 55 + #define TIMBDMA_OFFS_RX_BLR 0x10 56 + #define TIMBDMA_OFFS_RX_ER 0x14 57 + #define TIMBDMA_RX_EN 0x01 58 + /* bytes per Row, video specific register 59 + * which is placed after the TX registers... 60 + */ 61 + #define TIMBDMA_OFFS_RX_BPRR 0x30 62 + 63 + /* TX registers, relative the instance base */ 64 + #define TIMBDMA_OFFS_TX_DHAR 0x00 65 + #define TIMBDMA_OFFS_TX_DLAR 0x04 66 + #define TIMBDMA_OFFS_TX_BLR 0x0C 67 + #define TIMBDMA_OFFS_TX_LR 0x14 68 + 69 + 70 + #define TIMB_DMA_DESC_SIZE 8 71 + 72 + struct timb_dma_desc { 73 + struct list_head desc_node; 74 + struct dma_async_tx_descriptor txd; 75 + u8 *desc_list; 76 + unsigned int desc_list_len; 77 + bool interrupt; 78 + }; 79 + 80 + struct timb_dma_chan { 81 + struct dma_chan chan; 82 + void __iomem *membase; 83 + spinlock_t lock; /* Used to protect data structures, 84 + especially the lists and descriptors, 85 + from races between the tasklet and calls 86 + from above */ 87 + dma_cookie_t last_completed_cookie; 88 + bool ongoing; 89 + struct list_head active_list; 90 + struct list_head queue; 91 + struct list_head free_list; 92 + unsigned int bytes_per_line; 93 + enum dma_data_direction direction; 94 + unsigned int descs; /* Descriptors to allocate */ 95 + unsigned int desc_elems; /* number of elems per descriptor */ 96 + }; 97 + 98 + struct timb_dma { 99 + struct dma_device dma; 100 + void __iomem *membase; 101 + struct tasklet_struct tasklet; 102 + struct timb_dma_chan channels[0]; 103 + }; 104 + 105 + static struct device *chan2dev(struct dma_chan *chan) 106 + { 107 + return &chan->dev->device; 108 + } 109 + static struct device *chan2dmadev(struct dma_chan *chan) 110 + { 111 + return chan2dev(chan)->parent->parent; 112 + } 113 + 114 + static struct timb_dma *tdchantotd(struct timb_dma_chan *td_chan) 115 + { 116 + int id = td_chan->chan.chan_id; 117 + return (struct timb_dma *)((u8 *)td_chan - 118 + id * sizeof(struct timb_dma_chan) - sizeof(struct timb_dma)); 119 + } 120 + 121 + /* Must be called with the spinlock held */ 122 + static void __td_enable_chan_irq(struct timb_dma_chan *td_chan) 123 + { 124 + int id = td_chan->chan.chan_id; 125 + struct timb_dma *td = tdchantotd(td_chan); 126 + u32 ier; 127 + 128 + /* enable interrupt for this channel */ 129 + ier = ioread32(td->membase + TIMBDMA_IER); 130 + ier |= 1 << id; 131 + dev_dbg(chan2dev(&td_chan->chan), "Enabling irq: %d, IER: 0x%x\n", id, 132 + ier); 133 + iowrite32(ier, td->membase + TIMBDMA_IER); 134 + } 135 + 136 + /* Should be called with the spinlock held */ 137 + static bool __td_dma_done_ack(struct timb_dma_chan *td_chan) 138 + { 139 + int id = td_chan->chan.chan_id; 140 + struct timb_dma *td = (struct timb_dma *)((u8 *)td_chan - 141 + id * sizeof(struct timb_dma_chan) - sizeof(struct timb_dma)); 142 + u32 isr; 143 + bool done = false; 144 + 145 + dev_dbg(chan2dev(&td_chan->chan), "Checking irq: %d, td: %p\n", id, td); 146 + 147 + isr = ioread32(td->membase + TIMBDMA_ISR) & (1 << id); 148 + if (isr) { 149 + iowrite32(isr, td->membase + TIMBDMA_ISR); 150 + done = true; 151 + } 152 + 153 + return done; 154 + } 155 + 156 + static void __td_unmap_desc(struct timb_dma_chan *td_chan, const u8 *dma_desc, 157 + bool single) 158 + { 159 + dma_addr_t addr; 160 + int len; 161 + 162 + addr = (dma_desc[7] << 24) | (dma_desc[6] << 16) | (dma_desc[5] << 8) | 163 + dma_desc[4]; 164 + 165 + len = (dma_desc[3] << 8) | dma_desc[2]; 166 + 167 + if (single) 168 + dma_unmap_single(chan2dev(&td_chan->chan), addr, len, 169 + td_chan->direction); 170 + else 171 + dma_unmap_page(chan2dev(&td_chan->chan), addr, len, 172 + td_chan->direction); 173 + } 174 + 175 + static void __td_unmap_descs(struct timb_dma_desc *td_desc, bool single) 176 + { 177 + struct timb_dma_chan *td_chan = container_of(td_desc->txd.chan, 178 + struct timb_dma_chan, chan); 179 + u8 *descs; 180 + 181 + for (descs = td_desc->desc_list; ; descs += TIMB_DMA_DESC_SIZE) { 182 + __td_unmap_desc(td_chan, descs, single); 183 + if (descs[0] & 0x02) 184 + break; 185 + } 186 + } 187 + 188 + static int td_fill_desc(struct timb_dma_chan *td_chan, u8 *dma_desc, 189 + struct scatterlist *sg, bool last) 190 + { 191 + if (sg_dma_len(sg) > USHORT_MAX) { 192 + dev_err(chan2dev(&td_chan->chan), "Too big sg element\n"); 193 + return -EINVAL; 194 + } 195 + 196 + /* length must be word aligned */ 197 + if (sg_dma_len(sg) % sizeof(u32)) { 198 + dev_err(chan2dev(&td_chan->chan), "Incorrect length: %d\n", 199 + sg_dma_len(sg)); 200 + return -EINVAL; 201 + } 202 + 203 + dev_dbg(chan2dev(&td_chan->chan), "desc: %p, addr: %p\n", 204 + dma_desc, (void *)sg_dma_address(sg)); 205 + 206 + dma_desc[7] = (sg_dma_address(sg) >> 24) & 0xff; 207 + dma_desc[6] = (sg_dma_address(sg) >> 16) & 0xff; 208 + dma_desc[5] = (sg_dma_address(sg) >> 8) & 0xff; 209 + dma_desc[4] = (sg_dma_address(sg) >> 0) & 0xff; 210 + 211 + dma_desc[3] = (sg_dma_len(sg) >> 8) & 0xff; 212 + dma_desc[2] = (sg_dma_len(sg) >> 0) & 0xff; 213 + 214 + dma_desc[1] = 0x00; 215 + dma_desc[0] = 0x21 | (last ? 0x02 : 0); /* tran, valid */ 216 + 217 + return 0; 218 + } 219 + 220 + /* Must be called with the spinlock held */ 221 + static void __td_start_dma(struct timb_dma_chan *td_chan) 222 + { 223 + struct timb_dma_desc *td_desc; 224 + 225 + if (td_chan->ongoing) { 226 + dev_err(chan2dev(&td_chan->chan), 227 + "Transfer already ongoing\n"); 228 + return; 229 + } 230 + 231 + td_desc = list_entry(td_chan->active_list.next, struct timb_dma_desc, 232 + desc_node); 233 + 234 + dev_dbg(chan2dev(&td_chan->chan), 235 + "td_chan: %p, chan: %d, membase: %p\n", 236 + td_chan, td_chan->chan.chan_id, td_chan->membase); 237 + 238 + if (td_chan->direction == DMA_FROM_DEVICE) { 239 + 240 + /* descriptor address */ 241 + iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_DHAR); 242 + iowrite32(td_desc->txd.phys, td_chan->membase + 243 + TIMBDMA_OFFS_RX_DLAR); 244 + /* Bytes per line */ 245 + iowrite32(td_chan->bytes_per_line, td_chan->membase + 246 + TIMBDMA_OFFS_RX_BPRR); 247 + /* enable RX */ 248 + iowrite32(TIMBDMA_RX_EN, td_chan->membase + TIMBDMA_OFFS_RX_ER); 249 + } else { 250 + /* address high */ 251 + iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DHAR); 252 + iowrite32(td_desc->txd.phys, td_chan->membase + 253 + TIMBDMA_OFFS_TX_DLAR); 254 + } 255 + 256 + td_chan->ongoing = true; 257 + 258 + if (td_desc->interrupt) 259 + __td_enable_chan_irq(td_chan); 260 + } 261 + 262 + static void __td_finish(struct timb_dma_chan *td_chan) 263 + { 264 + dma_async_tx_callback callback; 265 + void *param; 266 + struct dma_async_tx_descriptor *txd; 267 + struct timb_dma_desc *td_desc; 268 + 269 + /* can happen if the descriptor is canceled */ 270 + if (list_empty(&td_chan->active_list)) 271 + return; 272 + 273 + td_desc = list_entry(td_chan->active_list.next, struct timb_dma_desc, 274 + desc_node); 275 + txd = &td_desc->txd; 276 + 277 + dev_dbg(chan2dev(&td_chan->chan), "descriptor %u complete\n", 278 + txd->cookie); 279 + 280 + /* make sure to stop the transfer */ 281 + if (td_chan->direction == DMA_FROM_DEVICE) 282 + iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_ER); 283 + /* Currently no support for stopping DMA transfers 284 + else 285 + iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DLAR); 286 + */ 287 + td_chan->last_completed_cookie = txd->cookie; 288 + td_chan->ongoing = false; 289 + 290 + callback = txd->callback; 291 + param = txd->callback_param; 292 + 293 + list_move(&td_desc->desc_node, &td_chan->free_list); 294 + 295 + if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) 296 + __td_unmap_descs(td_desc, 297 + txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE); 298 + 299 + /* 300 + * The API requires that no submissions are done from a 301 + * callback, so we don't need to drop the lock here 302 + */ 303 + if (callback) 304 + callback(param); 305 + } 306 + 307 + static u32 __td_ier_mask(struct timb_dma *td) 308 + { 309 + int i; 310 + u32 ret = 0; 311 + 312 + for (i = 0; i < td->dma.chancnt; i++) { 313 + struct timb_dma_chan *td_chan = td->channels + i; 314 + if (td_chan->ongoing) { 315 + struct timb_dma_desc *td_desc = 316 + list_entry(td_chan->active_list.next, 317 + struct timb_dma_desc, desc_node); 318 + if (td_desc->interrupt) 319 + ret |= 1 << i; 320 + } 321 + } 322 + 323 + return ret; 324 + } 325 + 326 + static void __td_start_next(struct timb_dma_chan *td_chan) 327 + { 328 + struct timb_dma_desc *td_desc; 329 + 330 + BUG_ON(list_empty(&td_chan->queue)); 331 + BUG_ON(td_chan->ongoing); 332 + 333 + td_desc = list_entry(td_chan->queue.next, struct timb_dma_desc, 334 + desc_node); 335 + 336 + dev_dbg(chan2dev(&td_chan->chan), "%s: started %u\n", 337 + __func__, td_desc->txd.cookie); 338 + 339 + list_move(&td_desc->desc_node, &td_chan->active_list); 340 + __td_start_dma(td_chan); 341 + } 342 + 343 + static dma_cookie_t td_tx_submit(struct dma_async_tx_descriptor *txd) 344 + { 345 + struct timb_dma_desc *td_desc = container_of(txd, struct timb_dma_desc, 346 + txd); 347 + struct timb_dma_chan *td_chan = container_of(txd->chan, 348 + struct timb_dma_chan, chan); 349 + dma_cookie_t cookie; 350 + 351 + spin_lock_bh(&td_chan->lock); 352 + 353 + cookie = txd->chan->cookie; 354 + if (++cookie < 0) 355 + cookie = 1; 356 + txd->chan->cookie = cookie; 357 + txd->cookie = cookie; 358 + 359 + if (list_empty(&td_chan->active_list)) { 360 + dev_dbg(chan2dev(txd->chan), "%s: started %u\n", __func__, 361 + txd->cookie); 362 + list_add_tail(&td_desc->desc_node, &td_chan->active_list); 363 + __td_start_dma(td_chan); 364 + } else { 365 + dev_dbg(chan2dev(txd->chan), "tx_submit: queued %u\n", 366 + txd->cookie); 367 + 368 + list_add_tail(&td_desc->desc_node, &td_chan->queue); 369 + } 370 + 371 + spin_unlock_bh(&td_chan->lock); 372 + 373 + return cookie; 374 + } 375 + 376 + static struct timb_dma_desc *td_alloc_init_desc(struct timb_dma_chan *td_chan) 377 + { 378 + struct dma_chan *chan = &td_chan->chan; 379 + struct timb_dma_desc *td_desc; 380 + int err; 381 + 382 + td_desc = kzalloc(sizeof(struct timb_dma_desc), GFP_KERNEL); 383 + if (!td_desc) { 384 + dev_err(chan2dev(chan), "Failed to alloc descriptor\n"); 385 + goto err; 386 + } 387 + 388 + td_desc->desc_list_len = td_chan->desc_elems * TIMB_DMA_DESC_SIZE; 389 + 390 + td_desc->desc_list = kzalloc(td_desc->desc_list_len, GFP_KERNEL); 391 + if (!td_desc->desc_list) { 392 + dev_err(chan2dev(chan), "Failed to alloc descriptor\n"); 393 + goto err; 394 + } 395 + 396 + dma_async_tx_descriptor_init(&td_desc->txd, chan); 397 + td_desc->txd.tx_submit = td_tx_submit; 398 + td_desc->txd.flags = DMA_CTRL_ACK; 399 + 400 + td_desc->txd.phys = dma_map_single(chan2dmadev(chan), 401 + td_desc->desc_list, td_desc->desc_list_len, DMA_TO_DEVICE); 402 + 403 + err = dma_mapping_error(chan2dmadev(chan), td_desc->txd.phys); 404 + if (err) { 405 + dev_err(chan2dev(chan), "DMA mapping error: %d\n", err); 406 + goto err; 407 + } 408 + 409 + return td_desc; 410 + err: 411 + kfree(td_desc->desc_list); 412 + kfree(td_desc); 413 + 414 + return NULL; 415 + 416 + } 417 + 418 + static void td_free_desc(struct timb_dma_desc *td_desc) 419 + { 420 + dev_dbg(chan2dev(td_desc->txd.chan), "Freeing desc: %p\n", td_desc); 421 + dma_unmap_single(chan2dmadev(td_desc->txd.chan), td_desc->txd.phys, 422 + td_desc->desc_list_len, DMA_TO_DEVICE); 423 + 424 + kfree(td_desc->desc_list); 425 + kfree(td_desc); 426 + } 427 + 428 + static void td_desc_put(struct timb_dma_chan *td_chan, 429 + struct timb_dma_desc *td_desc) 430 + { 431 + dev_dbg(chan2dev(&td_chan->chan), "Putting desc: %p\n", td_desc); 432 + 433 + spin_lock_bh(&td_chan->lock); 434 + list_add(&td_desc->desc_node, &td_chan->free_list); 435 + spin_unlock_bh(&td_chan->lock); 436 + } 437 + 438 + static struct timb_dma_desc *td_desc_get(struct timb_dma_chan *td_chan) 439 + { 440 + struct timb_dma_desc *td_desc, *_td_desc; 441 + struct timb_dma_desc *ret = NULL; 442 + 443 + spin_lock_bh(&td_chan->lock); 444 + list_for_each_entry_safe(td_desc, _td_desc, &td_chan->free_list, 445 + desc_node) { 446 + if (async_tx_test_ack(&td_desc->txd)) { 447 + list_del(&td_desc->desc_node); 448 + ret = td_desc; 449 + break; 450 + } 451 + dev_dbg(chan2dev(&td_chan->chan), "desc %p not ACKed\n", 452 + td_desc); 453 + } 454 + spin_unlock_bh(&td_chan->lock); 455 + 456 + return ret; 457 + } 458 + 459 + static int td_alloc_chan_resources(struct dma_chan *chan) 460 + { 461 + struct timb_dma_chan *td_chan = 462 + container_of(chan, struct timb_dma_chan, chan); 463 + int i; 464 + 465 + dev_dbg(chan2dev(chan), "%s: entry\n", __func__); 466 + 467 + BUG_ON(!list_empty(&td_chan->free_list)); 468 + for (i = 0; i < td_chan->descs; i++) { 469 + struct timb_dma_desc *td_desc = td_alloc_init_desc(td_chan); 470 + if (!td_desc) { 471 + if (i) 472 + break; 473 + else { 474 + dev_err(chan2dev(chan), 475 + "Couldnt allocate any descriptors\n"); 476 + return -ENOMEM; 477 + } 478 + } 479 + 480 + td_desc_put(td_chan, td_desc); 481 + } 482 + 483 + spin_lock_bh(&td_chan->lock); 484 + td_chan->last_completed_cookie = 1; 485 + chan->cookie = 1; 486 + spin_unlock_bh(&td_chan->lock); 487 + 488 + return 0; 489 + } 490 + 491 + static void td_free_chan_resources(struct dma_chan *chan) 492 + { 493 + struct timb_dma_chan *td_chan = 494 + container_of(chan, struct timb_dma_chan, chan); 495 + struct timb_dma_desc *td_desc, *_td_desc; 496 + LIST_HEAD(list); 497 + 498 + dev_dbg(chan2dev(chan), "%s: Entry\n", __func__); 499 + 500 + /* check that all descriptors are free */ 501 + BUG_ON(!list_empty(&td_chan->active_list)); 502 + BUG_ON(!list_empty(&td_chan->queue)); 503 + 504 + spin_lock_bh(&td_chan->lock); 505 + list_splice_init(&td_chan->free_list, &list); 506 + spin_unlock_bh(&td_chan->lock); 507 + 508 + list_for_each_entry_safe(td_desc, _td_desc, &list, desc_node) { 509 + dev_dbg(chan2dev(chan), "%s: Freeing desc: %p\n", __func__, 510 + td_desc); 511 + td_free_desc(td_desc); 512 + } 513 + } 514 + 515 + static enum dma_status td_tx_status(struct dma_chan *chan, dma_cookie_t cookie, 516 + struct dma_tx_state *txstate) 517 + { 518 + struct timb_dma_chan *td_chan = 519 + container_of(chan, struct timb_dma_chan, chan); 520 + dma_cookie_t last_used; 521 + dma_cookie_t last_complete; 522 + int ret; 523 + 524 + dev_dbg(chan2dev(chan), "%s: Entry\n", __func__); 525 + 526 + last_complete = td_chan->last_completed_cookie; 527 + last_used = chan->cookie; 528 + 529 + ret = dma_async_is_complete(cookie, last_complete, last_used); 530 + 531 + dma_set_tx_state(txstate, last_complete, last_used, 0); 532 + 533 + dev_dbg(chan2dev(chan), 534 + "%s: exit, ret: %d, last_complete: %d, last_used: %d\n", 535 + __func__, ret, last_complete, last_used); 536 + 537 + return ret; 538 + } 539 + 540 + static void td_issue_pending(struct dma_chan *chan) 541 + { 542 + struct timb_dma_chan *td_chan = 543 + container_of(chan, struct timb_dma_chan, chan); 544 + 545 + dev_dbg(chan2dev(chan), "%s: Entry\n", __func__); 546 + spin_lock_bh(&td_chan->lock); 547 + 548 + if (!list_empty(&td_chan->active_list)) 549 + /* transfer ongoing */ 550 + if (__td_dma_done_ack(td_chan)) 551 + __td_finish(td_chan); 552 + 553 + if (list_empty(&td_chan->active_list) && !list_empty(&td_chan->queue)) 554 + __td_start_next(td_chan); 555 + 556 + spin_unlock_bh(&td_chan->lock); 557 + } 558 + 559 + static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan, 560 + struct scatterlist *sgl, unsigned int sg_len, 561 + enum dma_data_direction direction, unsigned long flags) 562 + { 563 + struct timb_dma_chan *td_chan = 564 + container_of(chan, struct timb_dma_chan, chan); 565 + struct timb_dma_desc *td_desc; 566 + struct scatterlist *sg; 567 + unsigned int i; 568 + unsigned int desc_usage = 0; 569 + 570 + if (!sgl || !sg_len) { 571 + dev_err(chan2dev(chan), "%s: No SG list\n", __func__); 572 + return NULL; 573 + } 574 + 575 + /* even channels are for RX, odd for TX */ 576 + if (td_chan->direction != direction) { 577 + dev_err(chan2dev(chan), 578 + "Requesting channel in wrong direction\n"); 579 + return NULL; 580 + } 581 + 582 + td_desc = td_desc_get(td_chan); 583 + if (!td_desc) { 584 + dev_err(chan2dev(chan), "Not enough descriptors available\n"); 585 + return NULL; 586 + } 587 + 588 + td_desc->interrupt = (flags & DMA_PREP_INTERRUPT) != 0; 589 + 590 + for_each_sg(sgl, sg, sg_len, i) { 591 + int err; 592 + if (desc_usage > td_desc->desc_list_len) { 593 + dev_err(chan2dev(chan), "No descriptor space\n"); 594 + return NULL; 595 + } 596 + 597 + err = td_fill_desc(td_chan, td_desc->desc_list + desc_usage, sg, 598 + i == (sg_len - 1)); 599 + if (err) { 600 + dev_err(chan2dev(chan), "Failed to update desc: %d\n", 601 + err); 602 + td_desc_put(td_chan, td_desc); 603 + return NULL; 604 + } 605 + desc_usage += TIMB_DMA_DESC_SIZE; 606 + } 607 + 608 + dma_sync_single_for_device(chan2dmadev(chan), td_desc->txd.phys, 609 + td_desc->desc_list_len, DMA_TO_DEVICE); 610 + 611 + return &td_desc->txd; 612 + } 613 + 614 + static int td_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 615 + unsigned long arg) 616 + { 617 + struct timb_dma_chan *td_chan = 618 + container_of(chan, struct timb_dma_chan, chan); 619 + struct timb_dma_desc *td_desc, *_td_desc; 620 + 621 + dev_dbg(chan2dev(chan), "%s: Entry\n", __func__); 622 + 623 + if (cmd != DMA_TERMINATE_ALL) 624 + return -ENXIO; 625 + 626 + /* first the easy part, put the queue into the free list */ 627 + spin_lock_bh(&td_chan->lock); 628 + list_for_each_entry_safe(td_desc, _td_desc, &td_chan->queue, 629 + desc_node) 630 + list_move(&td_desc->desc_node, &td_chan->free_list); 631 + 632 + /* now tear down the runnning */ 633 + __td_finish(td_chan); 634 + spin_unlock_bh(&td_chan->lock); 635 + 636 + return 0; 637 + } 638 + 639 + static void td_tasklet(unsigned long data) 640 + { 641 + struct timb_dma *td = (struct timb_dma *)data; 642 + u32 isr; 643 + u32 ipr; 644 + u32 ier; 645 + int i; 646 + 647 + isr = ioread32(td->membase + TIMBDMA_ISR); 648 + ipr = isr & __td_ier_mask(td); 649 + 650 + /* ack the interrupts */ 651 + iowrite32(ipr, td->membase + TIMBDMA_ISR); 652 + 653 + for (i = 0; i < td->dma.chancnt; i++) 654 + if (ipr & (1 << i)) { 655 + struct timb_dma_chan *td_chan = td->channels + i; 656 + spin_lock(&td_chan->lock); 657 + __td_finish(td_chan); 658 + if (!list_empty(&td_chan->queue)) 659 + __td_start_next(td_chan); 660 + spin_unlock(&td_chan->lock); 661 + } 662 + 663 + ier = __td_ier_mask(td); 664 + iowrite32(ier, td->membase + TIMBDMA_IER); 665 + } 666 + 667 + 668 + static irqreturn_t td_irq(int irq, void *devid) 669 + { 670 + struct timb_dma *td = devid; 671 + u32 ipr = ioread32(td->membase + TIMBDMA_IPR); 672 + 673 + if (ipr) { 674 + /* disable interrupts, will be re-enabled in tasklet */ 675 + iowrite32(0, td->membase + TIMBDMA_IER); 676 + 677 + tasklet_schedule(&td->tasklet); 678 + 679 + return IRQ_HANDLED; 680 + } else 681 + return IRQ_NONE; 682 + } 683 + 684 + 685 + static int __devinit td_probe(struct platform_device *pdev) 686 + { 687 + struct timb_dma_platform_data *pdata = pdev->dev.platform_data; 688 + struct timb_dma *td; 689 + struct resource *iomem; 690 + int irq; 691 + int err; 692 + int i; 693 + 694 + if (!pdata) { 695 + dev_err(&pdev->dev, "No platform data\n"); 696 + return -EINVAL; 697 + } 698 + 699 + iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 700 + if (!iomem) 701 + return -EINVAL; 702 + 703 + irq = platform_get_irq(pdev, 0); 704 + if (irq < 0) 705 + return irq; 706 + 707 + if (!request_mem_region(iomem->start, resource_size(iomem), 708 + DRIVER_NAME)) 709 + return -EBUSY; 710 + 711 + td = kzalloc(sizeof(struct timb_dma) + 712 + sizeof(struct timb_dma_chan) * pdata->nr_channels, GFP_KERNEL); 713 + if (!td) { 714 + err = -ENOMEM; 715 + goto err_release_region; 716 + } 717 + 718 + dev_dbg(&pdev->dev, "Allocated TD: %p\n", td); 719 + 720 + td->membase = ioremap(iomem->start, resource_size(iomem)); 721 + if (!td->membase) { 722 + dev_err(&pdev->dev, "Failed to remap I/O memory\n"); 723 + err = -ENOMEM; 724 + goto err_free_mem; 725 + } 726 + 727 + /* 32bit addressing */ 728 + iowrite32(TIMBDMA_32BIT_ADDR, td->membase + TIMBDMA_ACR); 729 + 730 + /* disable and clear any interrupts */ 731 + iowrite32(0x0, td->membase + TIMBDMA_IER); 732 + iowrite32(0xFFFFFFFF, td->membase + TIMBDMA_ISR); 733 + 734 + tasklet_init(&td->tasklet, td_tasklet, (unsigned long)td); 735 + 736 + err = request_irq(irq, td_irq, IRQF_SHARED, DRIVER_NAME, td); 737 + if (err) { 738 + dev_err(&pdev->dev, "Failed to request IRQ\n"); 739 + goto err_tasklet_kill; 740 + } 741 + 742 + td->dma.device_alloc_chan_resources = td_alloc_chan_resources; 743 + td->dma.device_free_chan_resources = td_free_chan_resources; 744 + td->dma.device_tx_status = td_tx_status; 745 + td->dma.device_issue_pending = td_issue_pending; 746 + 747 + dma_cap_set(DMA_SLAVE, td->dma.cap_mask); 748 + dma_cap_set(DMA_PRIVATE, td->dma.cap_mask); 749 + td->dma.device_prep_slave_sg = td_prep_slave_sg; 750 + td->dma.device_control = td_control; 751 + 752 + td->dma.dev = &pdev->dev; 753 + 754 + INIT_LIST_HEAD(&td->dma.channels); 755 + 756 + for (i = 0; i < pdata->nr_channels; i++, td->dma.chancnt++) { 757 + struct timb_dma_chan *td_chan = &td->channels[i]; 758 + struct timb_dma_platform_data_channel *pchan = 759 + pdata->channels + i; 760 + 761 + /* even channels are RX, odd are TX */ 762 + if (((i % 2) && pchan->rx) || (!(i % 2) && !pchan->rx)) { 763 + dev_err(&pdev->dev, "Wrong channel configuration\n"); 764 + err = -EINVAL; 765 + goto err_tasklet_kill; 766 + } 767 + 768 + td_chan->chan.device = &td->dma; 769 + td_chan->chan.cookie = 1; 770 + td_chan->chan.chan_id = i; 771 + spin_lock_init(&td_chan->lock); 772 + INIT_LIST_HEAD(&td_chan->active_list); 773 + INIT_LIST_HEAD(&td_chan->queue); 774 + INIT_LIST_HEAD(&td_chan->free_list); 775 + 776 + td_chan->descs = pchan->descriptors; 777 + td_chan->desc_elems = pchan->descriptor_elements; 778 + td_chan->bytes_per_line = pchan->bytes_per_line; 779 + td_chan->direction = pchan->rx ? DMA_FROM_DEVICE : 780 + DMA_TO_DEVICE; 781 + 782 + td_chan->membase = td->membase + 783 + (i / 2) * TIMBDMA_INSTANCE_OFFSET + 784 + (pchan->rx ? 0 : TIMBDMA_INSTANCE_TX_OFFSET); 785 + 786 + dev_dbg(&pdev->dev, "Chan: %d, membase: %p\n", 787 + i, td_chan->membase); 788 + 789 + list_add_tail(&td_chan->chan.device_node, &td->dma.channels); 790 + } 791 + 792 + err = dma_async_device_register(&td->dma); 793 + if (err) { 794 + dev_err(&pdev->dev, "Failed to register async device\n"); 795 + goto err_free_irq; 796 + } 797 + 798 + platform_set_drvdata(pdev, td); 799 + 800 + dev_dbg(&pdev->dev, "Probe result: %d\n", err); 801 + return err; 802 + 803 + err_free_irq: 804 + free_irq(irq, td); 805 + err_tasklet_kill: 806 + tasklet_kill(&td->tasklet); 807 + iounmap(td->membase); 808 + err_free_mem: 809 + kfree(td); 810 + err_release_region: 811 + release_mem_region(iomem->start, resource_size(iomem)); 812 + 813 + return err; 814 + 815 + } 816 + 817 + static int __devexit td_remove(struct platform_device *pdev) 818 + { 819 + struct timb_dma *td = platform_get_drvdata(pdev); 820 + struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 821 + int irq = platform_get_irq(pdev, 0); 822 + 823 + dma_async_device_unregister(&td->dma); 824 + free_irq(irq, td); 825 + tasklet_kill(&td->tasklet); 826 + iounmap(td->membase); 827 + kfree(td); 828 + release_mem_region(iomem->start, resource_size(iomem)); 829 + 830 + platform_set_drvdata(pdev, NULL); 831 + 832 + dev_dbg(&pdev->dev, "Removed...\n"); 833 + return 0; 834 + } 835 + 836 + static struct platform_driver td_driver = { 837 + .driver = { 838 + .name = DRIVER_NAME, 839 + .owner = THIS_MODULE, 840 + }, 841 + .probe = td_probe, 842 + .remove = __exit_p(td_remove), 843 + }; 844 + 845 + static int __init td_init(void) 846 + { 847 + return platform_driver_register(&td_driver); 848 + } 849 + module_init(td_init); 850 + 851 + static void __exit td_exit(void) 852 + { 853 + platform_driver_unregister(&td_driver); 854 + } 855 + module_exit(td_exit); 856 + 857 + MODULE_LICENSE("GPL v2"); 858 + MODULE_DESCRIPTION("Timberdale DMA controller driver"); 859 + MODULE_AUTHOR("Pelagicore AB <info@pelagicore.com>"); 860 + MODULE_ALIAS("platform:"DRIVER_NAME);
+13 -10
drivers/dma/txx9dmac.c
··· 938 return &first->txd; 939 } 940 941 - static void txx9dmac_terminate_all(struct dma_chan *chan) 942 { 943 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); 944 struct txx9dmac_desc *desc, *_desc; 945 LIST_HEAD(list); 946 947 dev_vdbg(chan2dev(chan), "terminate_all\n"); 948 spin_lock_bh(&dc->lock); ··· 963 /* Flush all pending and queued descriptors */ 964 list_for_each_entry_safe(desc, _desc, &list, desc_node) 965 txx9dmac_descriptor_complete(dc, desc); 966 } 967 968 static enum dma_status 969 - txx9dmac_is_tx_complete(struct dma_chan *chan, 970 - dma_cookie_t cookie, 971 - dma_cookie_t *done, dma_cookie_t *used) 972 { 973 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); 974 dma_cookie_t last_used; ··· 991 ret = dma_async_is_complete(cookie, last_complete, last_used); 992 } 993 994 - if (done) 995 - *done = last_complete; 996 - if (used) 997 - *used = last_used; 998 999 return ret; 1000 } ··· 1156 dc->dma.dev = &pdev->dev; 1157 dc->dma.device_alloc_chan_resources = txx9dmac_alloc_chan_resources; 1158 dc->dma.device_free_chan_resources = txx9dmac_free_chan_resources; 1159 - dc->dma.device_terminate_all = txx9dmac_terminate_all; 1160 - dc->dma.device_is_tx_complete = txx9dmac_is_tx_complete; 1161 dc->dma.device_issue_pending = txx9dmac_issue_pending; 1162 if (pdata && pdata->memcpy_chan == ch) { 1163 dc->dma.device_prep_dma_memcpy = txx9dmac_prep_dma_memcpy;
··· 938 return &first->txd; 939 } 940 941 + static int txx9dmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 942 + unsigned long arg) 943 { 944 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); 945 struct txx9dmac_desc *desc, *_desc; 946 LIST_HEAD(list); 947 + 948 + /* Only supports DMA_TERMINATE_ALL */ 949 + if (cmd != DMA_TERMINATE_ALL) 950 + return -EINVAL; 951 952 dev_vdbg(chan2dev(chan), "terminate_all\n"); 953 spin_lock_bh(&dc->lock); ··· 958 /* Flush all pending and queued descriptors */ 959 list_for_each_entry_safe(desc, _desc, &list, desc_node) 960 txx9dmac_descriptor_complete(dc, desc); 961 + 962 + return 0; 963 } 964 965 static enum dma_status 966 + txx9dmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, 967 + struct dma_tx_state *txstate) 968 { 969 struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); 970 dma_cookie_t last_used; ··· 985 ret = dma_async_is_complete(cookie, last_complete, last_used); 986 } 987 988 + dma_set_tx_state(txstate, last_complete, last_used, 0); 989 990 return ret; 991 } ··· 1153 dc->dma.dev = &pdev->dev; 1154 dc->dma.device_alloc_chan_resources = txx9dmac_alloc_chan_resources; 1155 dc->dma.device_free_chan_resources = txx9dmac_free_chan_resources; 1156 + dc->dma.device_control = txx9dmac_control; 1157 + dc->dma.device_tx_status = txx9dmac_tx_status; 1158 dc->dma.device_issue_pending = txx9dmac_issue_pending; 1159 if (pdata && pdata->memcpy_chan == ch) { 1160 dc->dma.device_prep_dma_memcpy = txx9dmac_prep_dma_memcpy;
+1 -1
drivers/mmc/host/atmel-mci.c
··· 580 struct dma_chan *chan = host->data_chan; 581 582 if (chan) { 583 - chan->device->device_terminate_all(chan); 584 atmci_dma_cleanup(host); 585 } else { 586 /* Data transfer was stopped by the interrupt handler */
··· 580 struct dma_chan *chan = host->data_chan; 581 582 if (chan) { 583 + chan->device->device_control(chan, DMA_TERMINATE_ALL, 0); 584 atmci_dma_cleanup(host); 585 } else { 586 /* Data transfer was stopped by the interrupt handler */
+1 -1
drivers/serial/sh-sci.c
··· 1091 unsigned long flags; 1092 int count; 1093 1094 - chan->device->device_terminate_all(chan); 1095 dev_dbg(port->dev, "Read %u bytes with cookie %d\n", 1096 sh_desc->partial, sh_desc->cookie); 1097
··· 1091 unsigned long flags; 1092 int count; 1093 1094 + chan->device->device_control(chan, DMA_TERMINATE_ALL, 0); 1095 dev_dbg(port->dev, "Read %u bytes with cookie %d\n", 1096 sh_desc->partial, sh_desc->cookie); 1097
+2 -1
drivers/video/mx3fb.c
··· 387 388 spin_unlock_irqrestore(&mx3fb->lock, flags); 389 390 - mx3_fbi->txd->chan->device->device_terminate_all(mx3_fbi->txd->chan); 391 mx3_fbi->txd = NULL; 392 mx3_fbi->cookie = -EINVAL; 393 }
··· 387 388 spin_unlock_irqrestore(&mx3fb->lock, flags); 389 390 + mx3_fbi->txd->chan->device->device_control(mx3_fbi->txd->chan, 391 + DMA_TERMINATE_ALL, 0); 392 mx3_fbi->txd = NULL; 393 mx3_fbi->cookie = -EINVAL; 394 }
+120 -7
include/linux/dmaengine.h
··· 40 * enum dma_status - DMA transaction status 41 * @DMA_SUCCESS: transaction completed successfully 42 * @DMA_IN_PROGRESS: transaction not yet processed 43 * @DMA_ERROR: transaction failed 44 */ 45 enum dma_status { 46 DMA_SUCCESS, 47 DMA_IN_PROGRESS, 48 DMA_ERROR, 49 }; 50 ··· 106 DMA_PREP_PQ_DISABLE_Q = (1 << 7), 107 DMA_PREP_CONTINUE = (1 << 8), 108 DMA_PREP_FENCE = (1 << 9), 109 }; 110 111 /** ··· 245 dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); 246 dma_async_tx_callback callback; 247 void *callback_param; 248 struct dma_async_tx_descriptor *next; 249 struct dma_async_tx_descriptor *parent; 250 spinlock_t lock; 251 }; 252 253 /** ··· 351 * @device_prep_dma_memset: prepares a memset operation 352 * @device_prep_dma_interrupt: prepares an end of chain interrupt operation 353 * @device_prep_slave_sg: prepares a slave dma operation 354 - * @device_terminate_all: terminate all pending operations 355 - * @device_is_tx_complete: poll for transaction completion 356 * @device_issue_pending: push pending transactions to hardware 357 */ 358 struct dma_device { ··· 407 struct dma_chan *chan, struct scatterlist *sgl, 408 unsigned int sg_len, enum dma_data_direction direction, 409 unsigned long flags); 410 - void (*device_terminate_all)(struct dma_chan *chan); 411 412 - enum dma_status (*device_is_tx_complete)(struct dma_chan *chan, 413 - dma_cookie_t cookie, dma_cookie_t *last, 414 - dma_cookie_t *used); 415 void (*device_issue_pending)(struct dma_chan *chan); 416 }; 417 ··· 653 static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan, 654 dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used) 655 { 656 - return chan->device->device_is_tx_complete(chan, cookie, last, used); 657 } 658 659 #define dma_async_memcpy_complete(chan, cookie, last, used)\ ··· 687 return DMA_SUCCESS; 688 } 689 return DMA_IN_PROGRESS; 690 } 691 692 enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
··· 40 * enum dma_status - DMA transaction status 41 * @DMA_SUCCESS: transaction completed successfully 42 * @DMA_IN_PROGRESS: transaction not yet processed 43 + * @DMA_PAUSED: transaction is paused 44 * @DMA_ERROR: transaction failed 45 */ 46 enum dma_status { 47 DMA_SUCCESS, 48 DMA_IN_PROGRESS, 49 + DMA_PAUSED, 50 DMA_ERROR, 51 }; 52 ··· 104 DMA_PREP_PQ_DISABLE_Q = (1 << 7), 105 DMA_PREP_CONTINUE = (1 << 8), 106 DMA_PREP_FENCE = (1 << 9), 107 + }; 108 + 109 + /** 110 + * enum dma_ctrl_cmd - DMA operations that can optionally be exercised 111 + * on a running channel. 112 + * @DMA_TERMINATE_ALL: terminate all ongoing transfers 113 + * @DMA_PAUSE: pause ongoing transfers 114 + * @DMA_RESUME: resume paused transfer 115 + */ 116 + enum dma_ctrl_cmd { 117 + DMA_TERMINATE_ALL, 118 + DMA_PAUSE, 119 + DMA_RESUME, 120 }; 121 122 /** ··· 230 dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); 231 dma_async_tx_callback callback; 232 void *callback_param; 233 + #ifndef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH 234 struct dma_async_tx_descriptor *next; 235 struct dma_async_tx_descriptor *parent; 236 spinlock_t lock; 237 + #endif 238 + }; 239 + 240 + #ifdef CONFIG_ASYNC_TX_DISABLE_CHANNEL_SWITCH 241 + static inline void txd_lock(struct dma_async_tx_descriptor *txd) 242 + { 243 + } 244 + static inline void txd_unlock(struct dma_async_tx_descriptor *txd) 245 + { 246 + } 247 + static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next) 248 + { 249 + BUG(); 250 + } 251 + static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd) 252 + { 253 + } 254 + static inline void txd_clear_next(struct dma_async_tx_descriptor *txd) 255 + { 256 + } 257 + static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd) 258 + { 259 + return NULL; 260 + } 261 + static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd) 262 + { 263 + return NULL; 264 + } 265 + 266 + #else 267 + static inline void txd_lock(struct dma_async_tx_descriptor *txd) 268 + { 269 + spin_lock_bh(&txd->lock); 270 + } 271 + static inline void txd_unlock(struct dma_async_tx_descriptor *txd) 272 + { 273 + spin_unlock_bh(&txd->lock); 274 + } 275 + static inline void txd_chain(struct dma_async_tx_descriptor *txd, struct dma_async_tx_descriptor *next) 276 + { 277 + txd->next = next; 278 + next->parent = txd; 279 + } 280 + static inline void txd_clear_parent(struct dma_async_tx_descriptor *txd) 281 + { 282 + txd->parent = NULL; 283 + } 284 + static inline void txd_clear_next(struct dma_async_tx_descriptor *txd) 285 + { 286 + txd->next = NULL; 287 + } 288 + static inline struct dma_async_tx_descriptor *txd_parent(struct dma_async_tx_descriptor *txd) 289 + { 290 + return txd->parent; 291 + } 292 + static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descriptor *txd) 293 + { 294 + return txd->next; 295 + } 296 + #endif 297 + 298 + /** 299 + * struct dma_tx_state - filled in to report the status of 300 + * a transfer. 301 + * @last: last completed DMA cookie 302 + * @used: last issued DMA cookie (i.e. the one in progress) 303 + * @residue: the remaining number of bytes left to transmit 304 + * on the selected transfer for states DMA_IN_PROGRESS and 305 + * DMA_PAUSED if this is implemented in the driver, else 0 306 + */ 307 + struct dma_tx_state { 308 + dma_cookie_t last; 309 + dma_cookie_t used; 310 + u32 residue; 311 }; 312 313 /** ··· 261 * @device_prep_dma_memset: prepares a memset operation 262 * @device_prep_dma_interrupt: prepares an end of chain interrupt operation 263 * @device_prep_slave_sg: prepares a slave dma operation 264 + * @device_control: manipulate all pending operations on a channel, returns 265 + * zero or error code 266 + * @device_tx_status: poll for transaction completion, the optional 267 + * txstate parameter can be supplied with a pointer to get a 268 + * struct with auxilary transfer status information, otherwise the call 269 + * will just return a simple status code 270 * @device_issue_pending: push pending transactions to hardware 271 */ 272 struct dma_device { ··· 313 struct dma_chan *chan, struct scatterlist *sgl, 314 unsigned int sg_len, enum dma_data_direction direction, 315 unsigned long flags); 316 + int (*device_control)(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 317 + unsigned long arg); 318 319 + enum dma_status (*device_tx_status)(struct dma_chan *chan, 320 + dma_cookie_t cookie, 321 + struct dma_tx_state *txstate); 322 void (*device_issue_pending)(struct dma_chan *chan); 323 }; 324 ··· 558 static inline enum dma_status dma_async_is_tx_complete(struct dma_chan *chan, 559 dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t *used) 560 { 561 + struct dma_tx_state state; 562 + enum dma_status status; 563 + 564 + status = chan->device->device_tx_status(chan, cookie, &state); 565 + if (last) 566 + *last = state.last; 567 + if (used) 568 + *used = state.used; 569 + return status; 570 } 571 572 #define dma_async_memcpy_complete(chan, cookie, last, used)\ ··· 584 return DMA_SUCCESS; 585 } 586 return DMA_IN_PROGRESS; 587 + } 588 + 589 + static inline void 590 + dma_set_tx_state(struct dma_tx_state *st, dma_cookie_t last, dma_cookie_t used, u32 residue) 591 + { 592 + if (st) { 593 + st->last = last; 594 + st->used = used; 595 + st->residue = residue; 596 + } 597 } 598 599 enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie);
+55
include/linux/timb_dma.h
···
··· 1 + /* 2 + * timb_dma.h timberdale FPGA DMA driver defines 3 + * Copyright (c) 2010 Intel Corporation 4 + * 5 + * This program is free software; you can redistribute it and/or modify 6 + * it under the terms of the GNU General Public License version 2 as 7 + * published by the Free Software Foundation. 8 + * 9 + * This program is distributed in the hope that it will be useful, 10 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 + * GNU General Public License for more details. 13 + * 14 + * You should have received a copy of the GNU General Public License 15 + * along with this program; if not, write to the Free Software 16 + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 17 + */ 18 + 19 + /* Supports: 20 + * Timberdale FPGA DMA engine 21 + */ 22 + 23 + #ifndef _LINUX_TIMB_DMA_H 24 + #define _LINUX_TIMB_DMA_H 25 + 26 + /** 27 + * struct timb_dma_platform_data_channel - Description of each individual 28 + * DMA channel for the timberdale DMA driver 29 + * @rx: true if this channel handles data in the direction to 30 + * the CPU. 31 + * @bytes_per_line: Number of bytes per line, this is specific for channels 32 + * handling video data. For other channels this shall be left to 0. 33 + * @descriptors: Number of descriptors to allocate for this channel. 34 + * @descriptor_elements: Number of elements in each descriptor. 35 + * 36 + */ 37 + struct timb_dma_platform_data_channel { 38 + bool rx; 39 + unsigned int bytes_per_line; 40 + unsigned int descriptors; 41 + unsigned int descriptor_elements; 42 + }; 43 + 44 + /** 45 + * struct timb_dma_platform_data - Platform data of the timberdale DMA driver 46 + * @nr_channels: Number of defined channels in the channels array. 47 + * @channels: Definition of the each channel. 48 + * 49 + */ 50 + struct timb_dma_platform_data { 51 + unsigned nr_channels; 52 + struct timb_dma_platform_data_channel channels[32]; 53 + }; 54 + 55 + #endif
+4 -3
sound/soc/txx9/txx9aclc.c
··· 160 void __iomem *base = drvdata->base; 161 162 spin_unlock_irqrestore(&dmadata->dma_lock, flags); 163 - chan->device->device_terminate_all(chan); 164 /* first time */ 165 for (i = 0; i < NR_DMA_CHAIN; i++) { 166 desc = txx9aclc_dma_submit(dmadata, ··· 268 struct dma_chan *chan = dmadata->dma_chan; 269 270 dmadata->frag_count = -1; 271 - chan->device->device_terminate_all(chan); 272 return 0; 273 } 274 ··· 397 struct dma_chan *chan = dmadata->dma_chan; 398 if (chan) { 399 dmadata->frag_count = -1; 400 - chan->device->device_terminate_all(chan); 401 dma_release_channel(chan); 402 } 403 dev->dmadata[i].dma_chan = NULL;
··· 160 void __iomem *base = drvdata->base; 161 162 spin_unlock_irqrestore(&dmadata->dma_lock, flags); 163 + chan->device->device_control(chan, DMA_TERMINATE_ALL, 0); 164 /* first time */ 165 for (i = 0; i < NR_DMA_CHAIN; i++) { 166 desc = txx9aclc_dma_submit(dmadata, ··· 268 struct dma_chan *chan = dmadata->dma_chan; 269 270 dmadata->frag_count = -1; 271 + chan->device->device_control(chan, DMA_TERMINATE_ALL, 0); 272 return 0; 273 } 274 ··· 397 struct dma_chan *chan = dmadata->dma_chan; 398 if (chan) { 399 dmadata->frag_count = -1; 400 + chan->device->device_control(chan, 401 + DMA_TERMINATE_ALL, 0); 402 dma_release_channel(chan); 403 } 404 dev->dmadata[i].dma_chan = NULL;