Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-linus' of git://git.infradead.org/users/vkoul/slave-dma

Pull slave-dma updates from Vinod Koul:
- new driver for BCM2835 used in R-pi
- new driver for MOXA ART
- dma_get_any_slave_channel API for DT based systems
- minor fixes and updates spread acrooss driver

[ The fsl-ssi dual fifo mode support addition clashed badly with the
other changes to fsl-ssi that came in through the sound merge. I did
a very rough cut at fixing up the conflict, but Nicolin Chen (author
of both sides) will need to verify and check things ]

* 'for-linus' of git://git.infradead.org/users/vkoul/slave-dma: (36 commits)
dmaengine: mmp_pdma: fix mismerge
dma: pl08x: Export pl08x_filter_id
acpi-dma: align documentation with kernel-doc format
dma: fix vchan_cookie_complete() debug print
DMA: dmatest: extend the "device" module parameter to 32 characters
drivers/dma: fix error return code
dma: omap: Set debug level to debugging messages
dmaengine: fix kernel-doc style typos for few comments
dma: tegra: add support for Tegra148/124
dma: dw: use %pad instead of casting dma_addr_t
dma: dw: join split up messages
dma: dw: fix style of multiline comment
dmaengine: k3dma: fix sparse warnings
dma: pl330: Use dma_get_slave_channel() in the of xlate callback
dma: pl330: Differentiate between submitted and issued descriptors
dmaengine: sirf: Add device_slave_caps interface
DMA: Freescale: change BWC from 256 bytes to 1024 bytes
dmaengine: Add MOXA ART DMA engine driver
dmaengine: Add DMA_PRIVATE to BCM2835 driver
dma: imx-sdma: Assign a default script number for ROM firmware cases
...

+1882 -199
+57
Documentation/devicetree/bindings/dma/bcm2835-dma.txt
··· 1 + * BCM2835 DMA controller 2 + 3 + The BCM2835 DMA controller has 16 channels in total. 4 + Only the lower 13 channels have an associated IRQ. 5 + Some arbitrary channels are used by the firmware 6 + (1,3,6,7 in the current firmware version). 7 + The channels 0,2 and 3 have special functionality 8 + and should not be used by the driver. 9 + 10 + Required properties: 11 + - compatible: Should be "brcm,bcm2835-dma". 12 + - reg: Should contain DMA registers location and length. 13 + - interrupts: Should contain the DMA interrupts associated 14 + to the DMA channels in ascending order. 15 + - #dma-cells: Must be <1>, the cell in the dmas property of the 16 + client device represents the DREQ number. 17 + - brcm,dma-channel-mask: Bit mask representing the channels 18 + not used by the firmware in ascending order, 19 + i.e. first channel corresponds to LSB. 20 + 21 + Example: 22 + 23 + dma: dma@7e007000 { 24 + compatible = "brcm,bcm2835-dma"; 25 + reg = <0x7e007000 0xf00>; 26 + interrupts = <1 16>, 27 + <1 17>, 28 + <1 18>, 29 + <1 19>, 30 + <1 20>, 31 + <1 21>, 32 + <1 22>, 33 + <1 23>, 34 + <1 24>, 35 + <1 25>, 36 + <1 26>, 37 + <1 27>, 38 + <1 28>; 39 + 40 + #dma-cells = <1>; 41 + brcm,dma-channel-mask = <0x7f35>; 42 + }; 43 + 44 + DMA clients connected to the BCM2835 DMA controller must use the format 45 + described in the dma.txt file, using a two-cell specifier for each channel. 46 + 47 + Example: 48 + 49 + bcm2835_i2s: i2s@7e203000 { 50 + compatible = "brcm,bcm2835-i2s"; 51 + reg = < 0x7e203000 0x20>, 52 + < 0x7e101098 0x02>; 53 + 54 + dmas = <&dma 2>, 55 + <&dma 3>; 56 + dma-names = "tx", "rx"; 57 + };
+1
Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt
··· 42 42 19 IPU Memory 43 43 20 ASRC 44 44 21 ESAI 45 + 22 SSI Dual FIFO (needs firmware ver >= 2) 45 46 46 47 The third cell specifies the transfer priority as below. 47 48
+45
Documentation/devicetree/bindings/dma/moxa,moxart-dma.txt
··· 1 + MOXA ART DMA Controller 2 + 3 + See dma.txt first 4 + 5 + Required properties: 6 + 7 + - compatible : Must be "moxa,moxart-dma" 8 + - reg : Should contain registers location and length 9 + - interrupts : Should contain an interrupt-specifier for the sole 10 + interrupt generated by the device 11 + - #dma-cells : Should be 1, a single cell holding a line request number 12 + 13 + Example: 14 + 15 + dma: dma@90500000 { 16 + compatible = "moxa,moxart-dma"; 17 + reg = <0x90500080 0x40>; 18 + interrupts = <24 0>; 19 + #dma-cells = <1>; 20 + }; 21 + 22 + 23 + Clients: 24 + 25 + DMA clients connected to the MOXA ART DMA controller must use the format 26 + described in the dma.txt file, using a two-cell specifier for each channel: 27 + a phandle plus one integer cells. 28 + The two cells in order are: 29 + 30 + 1. A phandle pointing to the DMA controller. 31 + 2. Peripheral identifier for the hardware handshaking interface. 32 + 33 + Example: 34 + Use specific request line passing from dma 35 + For example, MMC request line is 5 36 + 37 + sdhci: sdhci@98e00000 { 38 + compatible = "moxa,moxart-sdhci"; 39 + reg = <0x98e00000 0x5C>; 40 + interrupts = <5 0>; 41 + clocks = <&clk_apb>; 42 + dmas = <&dma 5>, 43 + <&dma 5>; 44 + dma-names = "tx", "rx"; 45 + };
+14
drivers/dma/Kconfig
··· 306 306 select DMA_ENGINE 307 307 select DMA_VIRTUAL_CHANNELS 308 308 309 + config DMA_BCM2835 310 + tristate "BCM2835 DMA engine support" 311 + depends on (ARCH_BCM2835 || MACH_BCM2708) 312 + select DMA_ENGINE 313 + select DMA_VIRTUAL_CHANNELS 314 + 309 315 config TI_CPPI41 310 316 tristate "AM33xx CPPI41 DMA support" 311 317 depends on ARCH_OMAP ··· 341 335 help 342 336 Support the DMA engine for Hisilicon K3 platform 343 337 devices. 338 + 339 + config MOXART_DMA 340 + tristate "MOXART DMA support" 341 + depends on ARCH_MOXART 342 + select DMA_ENGINE 343 + select DMA_VIRTUAL_CHANNELS 344 + help 345 + Enable support for the MOXA ART SoC DMA controller. 344 346 345 347 config DMA_ENGINE 346 348 bool
+2
drivers/dma/Makefile
··· 38 38 obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o 39 39 obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o 40 40 obj-$(CONFIG_DMA_OMAP) += omap-dma.o 41 + obj-$(CONFIG_DMA_BCM2835) += bcm2835-dma.o 41 42 obj-$(CONFIG_MMP_PDMA) += mmp_pdma.o 42 43 obj-$(CONFIG_DMA_JZ4740) += dma-jz4740.o 43 44 obj-$(CONFIG_TI_CPPI41) += cppi41.o 44 45 obj-$(CONFIG_K3_DMA) += k3dma.o 46 + obj-$(CONFIG_MOXART_DMA) += moxart-dma.o
+24 -12
drivers/dma/acpi-dma.c
··· 30 30 * @adev: ACPI device to match with 31 31 * @adma: struct acpi_dma of the given DMA controller 32 32 * 33 - * Returns 1 on success, 0 when no information is available, or appropriate 34 - * errno value on error. 35 - * 36 33 * In order to match a device from DSDT table to the corresponding CSRT device 37 34 * we use MMIO address and IRQ. 35 + * 36 + * Return: 37 + * 1 on success, 0 when no information is available, or appropriate errno value 38 + * on error. 38 39 */ 39 40 static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp, 40 41 struct acpi_device *adev, struct acpi_dma *adma) ··· 102 101 * 103 102 * We are using this table to get the request line range of the specific DMA 104 103 * controller to be used later. 105 - * 106 104 */ 107 105 static void acpi_dma_parse_csrt(struct acpi_device *adev, struct acpi_dma *adma) 108 106 { ··· 141 141 * @data pointer to controller specific data to be used by 142 142 * translation function 143 143 * 144 - * Returns 0 on success or appropriate errno value on error. 145 - * 146 144 * Allocated memory should be freed with appropriate acpi_dma_controller_free() 147 145 * call. 146 + * 147 + * Return: 148 + * 0 on success or appropriate errno value on error. 148 149 */ 149 150 int acpi_dma_controller_register(struct device *dev, 150 151 struct dma_chan *(*acpi_dma_xlate) ··· 189 188 * @dev: struct device of DMA controller 190 189 * 191 190 * Memory allocated by acpi_dma_controller_register() is freed here. 191 + * 192 + * Return: 193 + * 0 on success or appropriate errno value on error. 192 194 */ 193 195 int acpi_dma_controller_free(struct device *dev) 194 196 { ··· 229 225 * Managed acpi_dma_controller_register(). DMA controller registered by this 230 226 * function are automatically freed on driver detach. See 231 227 * acpi_dma_controller_register() for more information. 228 + * 229 + * Return: 230 + * 0 on success or appropriate errno value on error. 232 231 */ 233 232 int devm_acpi_dma_controller_register(struct device *dev, 234 233 struct dma_chan *(*acpi_dma_xlate) ··· 274 267 * @adma: struct acpi_dma of DMA controller 275 268 * @dma_spec: dma specifier to update 276 269 * 277 - * Returns 0, if no information is avaiable, -1 on mismatch, and 1 otherwise. 278 - * 279 270 * Accordingly to ACPI 5.0 Specification Table 6-170 "Fixed DMA Resource 280 271 * Descriptor": 281 272 * DMA Request Line bits is a platform-relative number uniquely ··· 281 276 * mapping is done in a controller-specific OS driver. 282 277 * That's why we can safely adjust slave_id when the appropriate controller is 283 278 * found. 279 + * 280 + * Return: 281 + * 0, if no information is avaiable, -1 on mismatch, and 1 otherwise. 284 282 */ 285 283 static int acpi_dma_update_dma_spec(struct acpi_dma *adma, 286 284 struct acpi_dma_spec *dma_spec) ··· 342 334 * @dev: struct device to get DMA request from 343 335 * @index: index of FixedDMA descriptor for @dev 344 336 * 345 - * Returns pointer to appropriate dma channel on success or NULL on error. 337 + * Return: 338 + * Pointer to appropriate dma channel on success or NULL on error. 346 339 */ 347 340 struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev, 348 341 size_t index) ··· 412 403 * translate the names "tx" and "rx" here based on the most common case where 413 404 * the first FixedDMA descriptor is TX and second is RX. 414 405 * 415 - * Returns pointer to appropriate dma channel on success or NULL on error. 406 + * Return: 407 + * Pointer to appropriate dma channel on success or NULL on error. 416 408 */ 417 409 struct dma_chan *acpi_dma_request_slave_chan_by_name(struct device *dev, 418 410 const char *name) ··· 437 427 * @adma: pointer to ACPI DMA controller data 438 428 * 439 429 * A simple translation function for ACPI based devices. Passes &struct 440 - * dma_spec to the DMA controller driver provided filter function. Returns 441 - * pointer to the channel if found or %NULL otherwise. 430 + * dma_spec to the DMA controller driver provided filter function. 431 + * 432 + * Return: 433 + * Pointer to the channel if found or %NULL otherwise. 442 434 */ 443 435 struct dma_chan *acpi_dma_simple_xlate(struct acpi_dma_spec *dma_spec, 444 436 struct acpi_dma *adma)
+3 -1
drivers/dma/amba-pl08x.c
··· 83 83 #include <linux/dmaengine.h> 84 84 #include <linux/dmapool.h> 85 85 #include <linux/dma-mapping.h> 86 + #include <linux/export.h> 86 87 #include <linux/init.h> 87 88 #include <linux/interrupt.h> 88 89 #include <linux/module.h> ··· 1772 1771 1773 1772 return false; 1774 1773 } 1774 + EXPORT_SYMBOL_GPL(pl08x_filter_id); 1775 1775 1776 1776 /* 1777 1777 * Just check that the device is there and active ··· 2169 2167 /* Register slave channels */ 2170 2168 ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave, 2171 2169 pl08x->pd->num_slave_channels, true); 2172 - if (ret <= 0) { 2170 + if (ret < 0) { 2173 2171 dev_warn(&pl08x->adev->dev, 2174 2172 "%s failed to enumerate slave channels - %d\n", 2175 2173 __func__, ret);
+707
drivers/dma/bcm2835-dma.c
··· 1 + /* 2 + * BCM2835 DMA engine support 3 + * 4 + * This driver only supports cyclic DMA transfers 5 + * as needed for the I2S module. 6 + * 7 + * Author: Florian Meier <florian.meier@koalo.de> 8 + * Copyright 2013 9 + * 10 + * Based on 11 + * OMAP DMAengine support by Russell King 12 + * 13 + * BCM2708 DMA Driver 14 + * Copyright (C) 2010 Broadcom 15 + * 16 + * Raspberry Pi PCM I2S ALSA Driver 17 + * Copyright (c) by Phil Poole 2013 18 + * 19 + * MARVELL MMP Peripheral DMA Driver 20 + * Copyright 2012 Marvell International Ltd. 21 + * 22 + * This program is free software; you can redistribute it and/or modify 23 + * it under the terms of the GNU General Public License as published by 24 + * the Free Software Foundation; either version 2 of the License, or 25 + * (at your option) any later version. 26 + * 27 + * This program is distributed in the hope that it will be useful, 28 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 29 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 30 + * GNU General Public License for more details. 31 + */ 32 + #include <linux/dmaengine.h> 33 + #include <linux/dma-mapping.h> 34 + #include <linux/err.h> 35 + #include <linux/init.h> 36 + #include <linux/interrupt.h> 37 + #include <linux/list.h> 38 + #include <linux/module.h> 39 + #include <linux/platform_device.h> 40 + #include <linux/slab.h> 41 + #include <linux/io.h> 42 + #include <linux/spinlock.h> 43 + #include <linux/of.h> 44 + #include <linux/of_dma.h> 45 + 46 + #include "virt-dma.h" 47 + 48 + struct bcm2835_dmadev { 49 + struct dma_device ddev; 50 + spinlock_t lock; 51 + void __iomem *base; 52 + struct device_dma_parameters dma_parms; 53 + }; 54 + 55 + struct bcm2835_dma_cb { 56 + uint32_t info; 57 + uint32_t src; 58 + uint32_t dst; 59 + uint32_t length; 60 + uint32_t stride; 61 + uint32_t next; 62 + uint32_t pad[2]; 63 + }; 64 + 65 + struct bcm2835_chan { 66 + struct virt_dma_chan vc; 67 + struct list_head node; 68 + 69 + struct dma_slave_config cfg; 70 + bool cyclic; 71 + unsigned int dreq; 72 + 73 + int ch; 74 + struct bcm2835_desc *desc; 75 + 76 + void __iomem *chan_base; 77 + int irq_number; 78 + }; 79 + 80 + struct bcm2835_desc { 81 + struct virt_dma_desc vd; 82 + enum dma_transfer_direction dir; 83 + 84 + unsigned int control_block_size; 85 + struct bcm2835_dma_cb *control_block_base; 86 + dma_addr_t control_block_base_phys; 87 + 88 + unsigned int frames; 89 + size_t size; 90 + }; 91 + 92 + #define BCM2835_DMA_CS 0x00 93 + #define BCM2835_DMA_ADDR 0x04 94 + #define BCM2835_DMA_SOURCE_AD 0x0c 95 + #define BCM2835_DMA_DEST_AD 0x10 96 + #define BCM2835_DMA_NEXTCB 0x1C 97 + 98 + /* DMA CS Control and Status bits */ 99 + #define BCM2835_DMA_ACTIVE BIT(0) 100 + #define BCM2835_DMA_INT BIT(2) 101 + #define BCM2835_DMA_ISPAUSED BIT(4) /* Pause requested or not active */ 102 + #define BCM2835_DMA_ISHELD BIT(5) /* Is held by DREQ flow control */ 103 + #define BCM2835_DMA_ERR BIT(8) 104 + #define BCM2835_DMA_ABORT BIT(30) /* Stop current CB, go to next, WO */ 105 + #define BCM2835_DMA_RESET BIT(31) /* WO, self clearing */ 106 + 107 + #define BCM2835_DMA_INT_EN BIT(0) 108 + #define BCM2835_DMA_D_INC BIT(4) 109 + #define BCM2835_DMA_D_DREQ BIT(6) 110 + #define BCM2835_DMA_S_INC BIT(8) 111 + #define BCM2835_DMA_S_DREQ BIT(10) 112 + 113 + #define BCM2835_DMA_PER_MAP(x) ((x) << 16) 114 + 115 + #define BCM2835_DMA_DATA_TYPE_S8 1 116 + #define BCM2835_DMA_DATA_TYPE_S16 2 117 + #define BCM2835_DMA_DATA_TYPE_S32 4 118 + #define BCM2835_DMA_DATA_TYPE_S128 16 119 + 120 + #define BCM2835_DMA_BULK_MASK BIT(0) 121 + #define BCM2835_DMA_FIQ_MASK (BIT(2) | BIT(3)) 122 + 123 + /* Valid only for channels 0 - 14, 15 has its own base address */ 124 + #define BCM2835_DMA_CHAN(n) ((n) << 8) /* Base address */ 125 + #define BCM2835_DMA_CHANIO(base, n) ((base) + BCM2835_DMA_CHAN(n)) 126 + 127 + static inline struct bcm2835_dmadev *to_bcm2835_dma_dev(struct dma_device *d) 128 + { 129 + return container_of(d, struct bcm2835_dmadev, ddev); 130 + } 131 + 132 + static inline struct bcm2835_chan *to_bcm2835_dma_chan(struct dma_chan *c) 133 + { 134 + return container_of(c, struct bcm2835_chan, vc.chan); 135 + } 136 + 137 + static inline struct bcm2835_desc *to_bcm2835_dma_desc( 138 + struct dma_async_tx_descriptor *t) 139 + { 140 + return container_of(t, struct bcm2835_desc, vd.tx); 141 + } 142 + 143 + static void bcm2835_dma_desc_free(struct virt_dma_desc *vd) 144 + { 145 + struct bcm2835_desc *desc = container_of(vd, struct bcm2835_desc, vd); 146 + dma_free_coherent(desc->vd.tx.chan->device->dev, 147 + desc->control_block_size, 148 + desc->control_block_base, 149 + desc->control_block_base_phys); 150 + kfree(desc); 151 + } 152 + 153 + static int bcm2835_dma_abort(void __iomem *chan_base) 154 + { 155 + unsigned long cs; 156 + long int timeout = 10000; 157 + 158 + cs = readl(chan_base + BCM2835_DMA_CS); 159 + if (!(cs & BCM2835_DMA_ACTIVE)) 160 + return 0; 161 + 162 + /* Write 0 to the active bit - Pause the DMA */ 163 + writel(0, chan_base + BCM2835_DMA_CS); 164 + 165 + /* Wait for any current AXI transfer to complete */ 166 + while ((cs & BCM2835_DMA_ISPAUSED) && --timeout) { 167 + cpu_relax(); 168 + cs = readl(chan_base + BCM2835_DMA_CS); 169 + } 170 + 171 + /* We'll un-pause when we set of our next DMA */ 172 + if (!timeout) 173 + return -ETIMEDOUT; 174 + 175 + if (!(cs & BCM2835_DMA_ACTIVE)) 176 + return 0; 177 + 178 + /* Terminate the control block chain */ 179 + writel(0, chan_base + BCM2835_DMA_NEXTCB); 180 + 181 + /* Abort the whole DMA */ 182 + writel(BCM2835_DMA_ABORT | BCM2835_DMA_ACTIVE, 183 + chan_base + BCM2835_DMA_CS); 184 + 185 + return 0; 186 + } 187 + 188 + static void bcm2835_dma_start_desc(struct bcm2835_chan *c) 189 + { 190 + struct virt_dma_desc *vd = vchan_next_desc(&c->vc); 191 + struct bcm2835_desc *d; 192 + 193 + if (!vd) { 194 + c->desc = NULL; 195 + return; 196 + } 197 + 198 + list_del(&vd->node); 199 + 200 + c->desc = d = to_bcm2835_dma_desc(&vd->tx); 201 + 202 + writel(d->control_block_base_phys, c->chan_base + BCM2835_DMA_ADDR); 203 + writel(BCM2835_DMA_ACTIVE, c->chan_base + BCM2835_DMA_CS); 204 + } 205 + 206 + static irqreturn_t bcm2835_dma_callback(int irq, void *data) 207 + { 208 + struct bcm2835_chan *c = data; 209 + struct bcm2835_desc *d; 210 + unsigned long flags; 211 + 212 + spin_lock_irqsave(&c->vc.lock, flags); 213 + 214 + /* Acknowledge interrupt */ 215 + writel(BCM2835_DMA_INT, c->chan_base + BCM2835_DMA_CS); 216 + 217 + d = c->desc; 218 + 219 + if (d) { 220 + /* TODO Only works for cyclic DMA */ 221 + vchan_cyclic_callback(&d->vd); 222 + } 223 + 224 + /* Keep the DMA engine running */ 225 + writel(BCM2835_DMA_ACTIVE, c->chan_base + BCM2835_DMA_CS); 226 + 227 + spin_unlock_irqrestore(&c->vc.lock, flags); 228 + 229 + return IRQ_HANDLED; 230 + } 231 + 232 + static int bcm2835_dma_alloc_chan_resources(struct dma_chan *chan) 233 + { 234 + struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); 235 + 236 + dev_dbg(c->vc.chan.device->dev, 237 + "Allocating DMA channel %d\n", c->ch); 238 + 239 + return request_irq(c->irq_number, 240 + bcm2835_dma_callback, 0, "DMA IRQ", c); 241 + } 242 + 243 + static void bcm2835_dma_free_chan_resources(struct dma_chan *chan) 244 + { 245 + struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); 246 + 247 + vchan_free_chan_resources(&c->vc); 248 + free_irq(c->irq_number, c); 249 + 250 + dev_dbg(c->vc.chan.device->dev, "Freeing DMA channel %u\n", c->ch); 251 + } 252 + 253 + static size_t bcm2835_dma_desc_size(struct bcm2835_desc *d) 254 + { 255 + return d->size; 256 + } 257 + 258 + static size_t bcm2835_dma_desc_size_pos(struct bcm2835_desc *d, dma_addr_t addr) 259 + { 260 + unsigned int i; 261 + size_t size; 262 + 263 + for (size = i = 0; i < d->frames; i++) { 264 + struct bcm2835_dma_cb *control_block = 265 + &d->control_block_base[i]; 266 + size_t this_size = control_block->length; 267 + dma_addr_t dma; 268 + 269 + if (d->dir == DMA_DEV_TO_MEM) 270 + dma = control_block->dst; 271 + else 272 + dma = control_block->src; 273 + 274 + if (size) 275 + size += this_size; 276 + else if (addr >= dma && addr < dma + this_size) 277 + size += dma + this_size - addr; 278 + } 279 + 280 + return size; 281 + } 282 + 283 + static enum dma_status bcm2835_dma_tx_status(struct dma_chan *chan, 284 + dma_cookie_t cookie, struct dma_tx_state *txstate) 285 + { 286 + struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); 287 + struct virt_dma_desc *vd; 288 + enum dma_status ret; 289 + unsigned long flags; 290 + 291 + ret = dma_cookie_status(chan, cookie, txstate); 292 + if (ret == DMA_COMPLETE || !txstate) 293 + return ret; 294 + 295 + spin_lock_irqsave(&c->vc.lock, flags); 296 + vd = vchan_find_desc(&c->vc, cookie); 297 + if (vd) { 298 + txstate->residue = 299 + bcm2835_dma_desc_size(to_bcm2835_dma_desc(&vd->tx)); 300 + } else if (c->desc && c->desc->vd.tx.cookie == cookie) { 301 + struct bcm2835_desc *d = c->desc; 302 + dma_addr_t pos; 303 + 304 + if (d->dir == DMA_MEM_TO_DEV) 305 + pos = readl(c->chan_base + BCM2835_DMA_SOURCE_AD); 306 + else if (d->dir == DMA_DEV_TO_MEM) 307 + pos = readl(c->chan_base + BCM2835_DMA_DEST_AD); 308 + else 309 + pos = 0; 310 + 311 + txstate->residue = bcm2835_dma_desc_size_pos(d, pos); 312 + } else { 313 + txstate->residue = 0; 314 + } 315 + 316 + spin_unlock_irqrestore(&c->vc.lock, flags); 317 + 318 + return ret; 319 + } 320 + 321 + static void bcm2835_dma_issue_pending(struct dma_chan *chan) 322 + { 323 + struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); 324 + unsigned long flags; 325 + 326 + c->cyclic = true; /* Nothing else is implemented */ 327 + 328 + spin_lock_irqsave(&c->vc.lock, flags); 329 + if (vchan_issue_pending(&c->vc) && !c->desc) 330 + bcm2835_dma_start_desc(c); 331 + 332 + spin_unlock_irqrestore(&c->vc.lock, flags); 333 + } 334 + 335 + static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic( 336 + struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, 337 + size_t period_len, enum dma_transfer_direction direction, 338 + unsigned long flags, void *context) 339 + { 340 + struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); 341 + enum dma_slave_buswidth dev_width; 342 + struct bcm2835_desc *d; 343 + dma_addr_t dev_addr; 344 + unsigned int es, sync_type; 345 + unsigned int frame; 346 + 347 + /* Grab configuration */ 348 + if (!is_slave_direction(direction)) { 349 + dev_err(chan->device->dev, "%s: bad direction?\n", __func__); 350 + return NULL; 351 + } 352 + 353 + if (direction == DMA_DEV_TO_MEM) { 354 + dev_addr = c->cfg.src_addr; 355 + dev_width = c->cfg.src_addr_width; 356 + sync_type = BCM2835_DMA_S_DREQ; 357 + } else { 358 + dev_addr = c->cfg.dst_addr; 359 + dev_width = c->cfg.dst_addr_width; 360 + sync_type = BCM2835_DMA_D_DREQ; 361 + } 362 + 363 + /* Bus width translates to the element size (ES) */ 364 + switch (dev_width) { 365 + case DMA_SLAVE_BUSWIDTH_4_BYTES: 366 + es = BCM2835_DMA_DATA_TYPE_S32; 367 + break; 368 + default: 369 + return NULL; 370 + } 371 + 372 + /* Now allocate and setup the descriptor. */ 373 + d = kzalloc(sizeof(*d), GFP_NOWAIT); 374 + if (!d) 375 + return NULL; 376 + 377 + d->dir = direction; 378 + d->frames = buf_len / period_len; 379 + 380 + /* Allocate memory for control blocks */ 381 + d->control_block_size = d->frames * sizeof(struct bcm2835_dma_cb); 382 + d->control_block_base = dma_zalloc_coherent(chan->device->dev, 383 + d->control_block_size, &d->control_block_base_phys, 384 + GFP_NOWAIT); 385 + 386 + if (!d->control_block_base) { 387 + kfree(d); 388 + return NULL; 389 + } 390 + 391 + /* 392 + * Iterate over all frames, create a control block 393 + * for each frame and link them together. 394 + */ 395 + for (frame = 0; frame < d->frames; frame++) { 396 + struct bcm2835_dma_cb *control_block = 397 + &d->control_block_base[frame]; 398 + 399 + /* Setup adresses */ 400 + if (d->dir == DMA_DEV_TO_MEM) { 401 + control_block->info = BCM2835_DMA_D_INC; 402 + control_block->src = dev_addr; 403 + control_block->dst = buf_addr + frame * period_len; 404 + } else { 405 + control_block->info = BCM2835_DMA_S_INC; 406 + control_block->src = buf_addr + frame * period_len; 407 + control_block->dst = dev_addr; 408 + } 409 + 410 + /* Enable interrupt */ 411 + control_block->info |= BCM2835_DMA_INT_EN; 412 + 413 + /* Setup synchronization */ 414 + if (sync_type != 0) 415 + control_block->info |= sync_type; 416 + 417 + /* Setup DREQ channel */ 418 + if (c->dreq != 0) 419 + control_block->info |= 420 + BCM2835_DMA_PER_MAP(c->dreq); 421 + 422 + /* Length of a frame */ 423 + control_block->length = period_len; 424 + d->size += control_block->length; 425 + 426 + /* 427 + * Next block is the next frame. 428 + * This DMA engine driver currently only supports cyclic DMA. 429 + * Therefore, wrap around at number of frames. 430 + */ 431 + control_block->next = d->control_block_base_phys + 432 + sizeof(struct bcm2835_dma_cb) 433 + * ((frame + 1) % d->frames); 434 + } 435 + 436 + return vchan_tx_prep(&c->vc, &d->vd, flags); 437 + } 438 + 439 + static int bcm2835_dma_slave_config(struct bcm2835_chan *c, 440 + struct dma_slave_config *cfg) 441 + { 442 + if ((cfg->direction == DMA_DEV_TO_MEM && 443 + cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) || 444 + (cfg->direction == DMA_MEM_TO_DEV && 445 + cfg->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) || 446 + !is_slave_direction(cfg->direction)) { 447 + return -EINVAL; 448 + } 449 + 450 + c->cfg = *cfg; 451 + 452 + return 0; 453 + } 454 + 455 + static int bcm2835_dma_terminate_all(struct bcm2835_chan *c) 456 + { 457 + struct bcm2835_dmadev *d = to_bcm2835_dma_dev(c->vc.chan.device); 458 + unsigned long flags; 459 + int timeout = 10000; 460 + LIST_HEAD(head); 461 + 462 + spin_lock_irqsave(&c->vc.lock, flags); 463 + 464 + /* Prevent this channel being scheduled */ 465 + spin_lock(&d->lock); 466 + list_del_init(&c->node); 467 + spin_unlock(&d->lock); 468 + 469 + /* 470 + * Stop DMA activity: we assume the callback will not be called 471 + * after bcm_dma_abort() returns (even if it does, it will see 472 + * c->desc is NULL and exit.) 473 + */ 474 + if (c->desc) { 475 + c->desc = NULL; 476 + bcm2835_dma_abort(c->chan_base); 477 + 478 + /* Wait for stopping */ 479 + while (--timeout) { 480 + if (!(readl(c->chan_base + BCM2835_DMA_CS) & 481 + BCM2835_DMA_ACTIVE)) 482 + break; 483 + 484 + cpu_relax(); 485 + } 486 + 487 + if (!timeout) 488 + dev_err(d->ddev.dev, "DMA transfer could not be terminated\n"); 489 + } 490 + 491 + vchan_get_all_descriptors(&c->vc, &head); 492 + spin_unlock_irqrestore(&c->vc.lock, flags); 493 + vchan_dma_desc_free_list(&c->vc, &head); 494 + 495 + return 0; 496 + } 497 + 498 + static int bcm2835_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 499 + unsigned long arg) 500 + { 501 + struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); 502 + 503 + switch (cmd) { 504 + case DMA_SLAVE_CONFIG: 505 + return bcm2835_dma_slave_config(c, 506 + (struct dma_slave_config *)arg); 507 + 508 + case DMA_TERMINATE_ALL: 509 + return bcm2835_dma_terminate_all(c); 510 + 511 + default: 512 + return -ENXIO; 513 + } 514 + } 515 + 516 + static int bcm2835_dma_chan_init(struct bcm2835_dmadev *d, int chan_id, int irq) 517 + { 518 + struct bcm2835_chan *c; 519 + 520 + c = devm_kzalloc(d->ddev.dev, sizeof(*c), GFP_KERNEL); 521 + if (!c) 522 + return -ENOMEM; 523 + 524 + c->vc.desc_free = bcm2835_dma_desc_free; 525 + vchan_init(&c->vc, &d->ddev); 526 + INIT_LIST_HEAD(&c->node); 527 + 528 + d->ddev.chancnt++; 529 + 530 + c->chan_base = BCM2835_DMA_CHANIO(d->base, chan_id); 531 + c->ch = chan_id; 532 + c->irq_number = irq; 533 + 534 + return 0; 535 + } 536 + 537 + static void bcm2835_dma_free(struct bcm2835_dmadev *od) 538 + { 539 + struct bcm2835_chan *c, *next; 540 + 541 + list_for_each_entry_safe(c, next, &od->ddev.channels, 542 + vc.chan.device_node) { 543 + list_del(&c->vc.chan.device_node); 544 + tasklet_kill(&c->vc.task); 545 + } 546 + } 547 + 548 + static const struct of_device_id bcm2835_dma_of_match[] = { 549 + { .compatible = "brcm,bcm2835-dma", }, 550 + {}, 551 + }; 552 + MODULE_DEVICE_TABLE(of, bcm2835_dma_of_match); 553 + 554 + static struct dma_chan *bcm2835_dma_xlate(struct of_phandle_args *spec, 555 + struct of_dma *ofdma) 556 + { 557 + struct bcm2835_dmadev *d = ofdma->of_dma_data; 558 + struct dma_chan *chan; 559 + 560 + chan = dma_get_any_slave_channel(&d->ddev); 561 + if (!chan) 562 + return NULL; 563 + 564 + /* Set DREQ from param */ 565 + to_bcm2835_dma_chan(chan)->dreq = spec->args[0]; 566 + 567 + return chan; 568 + } 569 + 570 + static int bcm2835_dma_device_slave_caps(struct dma_chan *dchan, 571 + struct dma_slave_caps *caps) 572 + { 573 + caps->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); 574 + caps->dstn_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); 575 + caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); 576 + caps->cmd_pause = false; 577 + caps->cmd_terminate = true; 578 + 579 + return 0; 580 + } 581 + 582 + static int bcm2835_dma_probe(struct platform_device *pdev) 583 + { 584 + struct bcm2835_dmadev *od; 585 + struct resource *res; 586 + void __iomem *base; 587 + int rc; 588 + int i; 589 + int irq; 590 + uint32_t chans_available; 591 + 592 + if (!pdev->dev.dma_mask) 593 + pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; 594 + 595 + rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 596 + if (rc) 597 + return rc; 598 + 599 + od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL); 600 + if (!od) 601 + return -ENOMEM; 602 + 603 + pdev->dev.dma_parms = &od->dma_parms; 604 + dma_set_max_seg_size(&pdev->dev, 0x3FFFFFFF); 605 + 606 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 607 + base = devm_ioremap_resource(&pdev->dev, res); 608 + if (IS_ERR(base)) 609 + return PTR_ERR(base); 610 + 611 + od->base = base; 612 + 613 + dma_cap_set(DMA_SLAVE, od->ddev.cap_mask); 614 + dma_cap_set(DMA_PRIVATE, od->ddev.cap_mask); 615 + dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask); 616 + od->ddev.device_alloc_chan_resources = bcm2835_dma_alloc_chan_resources; 617 + od->ddev.device_free_chan_resources = bcm2835_dma_free_chan_resources; 618 + od->ddev.device_tx_status = bcm2835_dma_tx_status; 619 + od->ddev.device_issue_pending = bcm2835_dma_issue_pending; 620 + od->ddev.device_slave_caps = bcm2835_dma_device_slave_caps; 621 + od->ddev.device_prep_dma_cyclic = bcm2835_dma_prep_dma_cyclic; 622 + od->ddev.device_control = bcm2835_dma_control; 623 + od->ddev.dev = &pdev->dev; 624 + INIT_LIST_HEAD(&od->ddev.channels); 625 + spin_lock_init(&od->lock); 626 + 627 + platform_set_drvdata(pdev, od); 628 + 629 + /* Request DMA channel mask from device tree */ 630 + if (of_property_read_u32(pdev->dev.of_node, 631 + "brcm,dma-channel-mask", 632 + &chans_available)) { 633 + dev_err(&pdev->dev, "Failed to get channel mask\n"); 634 + rc = -EINVAL; 635 + goto err_no_dma; 636 + } 637 + 638 + /* 639 + * Do not use the FIQ and BULK channels, 640 + * because they are used by the GPU. 641 + */ 642 + chans_available &= ~(BCM2835_DMA_FIQ_MASK | BCM2835_DMA_BULK_MASK); 643 + 644 + for (i = 0; i < pdev->num_resources; i++) { 645 + irq = platform_get_irq(pdev, i); 646 + if (irq < 0) 647 + break; 648 + 649 + if (chans_available & (1 << i)) { 650 + rc = bcm2835_dma_chan_init(od, i, irq); 651 + if (rc) 652 + goto err_no_dma; 653 + } 654 + } 655 + 656 + dev_dbg(&pdev->dev, "Initialized %i DMA channels\n", i); 657 + 658 + /* Device-tree DMA controller registration */ 659 + rc = of_dma_controller_register(pdev->dev.of_node, 660 + bcm2835_dma_xlate, od); 661 + if (rc) { 662 + dev_err(&pdev->dev, "Failed to register DMA controller\n"); 663 + goto err_no_dma; 664 + } 665 + 666 + rc = dma_async_device_register(&od->ddev); 667 + if (rc) { 668 + dev_err(&pdev->dev, 669 + "Failed to register slave DMA engine device: %d\n", rc); 670 + goto err_no_dma; 671 + } 672 + 673 + dev_dbg(&pdev->dev, "Load BCM2835 DMA engine driver\n"); 674 + 675 + return 0; 676 + 677 + err_no_dma: 678 + bcm2835_dma_free(od); 679 + return rc; 680 + } 681 + 682 + static int bcm2835_dma_remove(struct platform_device *pdev) 683 + { 684 + struct bcm2835_dmadev *od = platform_get_drvdata(pdev); 685 + 686 + dma_async_device_unregister(&od->ddev); 687 + bcm2835_dma_free(od); 688 + 689 + return 0; 690 + } 691 + 692 + static struct platform_driver bcm2835_dma_driver = { 693 + .probe = bcm2835_dma_probe, 694 + .remove = bcm2835_dma_remove, 695 + .driver = { 696 + .name = "bcm2835-dma", 697 + .owner = THIS_MODULE, 698 + .of_match_table = of_match_ptr(bcm2835_dma_of_match), 699 + }, 700 + }; 701 + 702 + module_platform_driver(bcm2835_dma_driver); 703 + 704 + MODULE_ALIAS("platform:bcm2835-dma"); 705 + MODULE_DESCRIPTION("BCM2835 DMA engine driver"); 706 + MODULE_AUTHOR("Florian Meier <florian.meier@koalo.de>"); 707 + MODULE_LICENSE("GPL v2");
+3 -1
drivers/dma/cppi41.c
··· 972 972 goto err_chans; 973 973 974 974 irq = irq_of_parse_and_map(dev->of_node, 0); 975 - if (!irq) 975 + if (!irq) { 976 + ret = -EINVAL; 976 977 goto err_irq; 978 + } 977 979 978 980 cppi_writel(USBSS_IRQ_PD_COMP, cdd->usbss_mem + USBSS_IRQ_ENABLER); 979 981
+2 -2
drivers/dma/dmatest.c
··· 31 31 S_IRUGO | S_IWUSR); 32 32 MODULE_PARM_DESC(channel, "Bus ID of the channel to test (default: any)"); 33 33 34 - static char test_device[20]; 34 + static char test_device[32]; 35 35 module_param_string(device, test_device, sizeof(test_device), 36 36 S_IRUGO | S_IWUSR); 37 37 MODULE_PARM_DESC(device, "Bus ID of the DMA Engine to test (default: any)"); ··· 89 89 struct dmatest_params { 90 90 unsigned int buf_size; 91 91 char channel[20]; 92 - char device[20]; 92 + char device[32]; 93 93 unsigned int threads_per_chan; 94 94 unsigned int max_channels; 95 95 unsigned int iterations;
+18 -17
drivers/dma/dw/core.c
··· 218 218 struct dw_dma *dw = to_dw_dma(dwc->chan.device); 219 219 u32 ctllo; 220 220 221 - /* Software emulation of LLP mode relies on interrupts to continue 222 - * multi block transfer. */ 221 + /* 222 + * Software emulation of LLP mode relies on interrupts to continue 223 + * multi block transfer. 224 + */ 223 225 ctllo = desc->lli.ctllo | DWC_CTLL_INT_EN; 224 226 225 227 channel_writel(dwc, SAR, desc->lli.sar); ··· 255 253 &dwc->flags); 256 254 if (was_soft_llp) { 257 255 dev_err(chan2dev(&dwc->chan), 258 - "BUG: Attempted to start new LLP transfer " 259 - "inside ongoing one\n"); 256 + "BUG: Attempted to start new LLP transfer inside ongoing one\n"); 260 257 return; 261 258 } 262 259 ··· 421 420 return; 422 421 } 423 422 424 - dev_vdbg(chan2dev(&dwc->chan), "%s: llp=0x%llx\n", __func__, 425 - (unsigned long long)llp); 423 + dev_vdbg(chan2dev(&dwc->chan), "%s: llp=%pad\n", __func__, &llp); 426 424 427 425 list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { 428 426 /* Initial residue value */ ··· 567 567 unlikely(status_xfer & dwc->mask)) { 568 568 int i; 569 569 570 - dev_err(chan2dev(&dwc->chan), "cyclic DMA unexpected %s " 571 - "interrupt, stopping DMA transfer\n", 572 - status_xfer ? "xfer" : "error"); 570 + dev_err(chan2dev(&dwc->chan), 571 + "cyclic DMA unexpected %s interrupt, stopping DMA transfer\n", 572 + status_xfer ? "xfer" : "error"); 573 573 574 574 spin_lock_irqsave(&dwc->lock, flags); 575 575 ··· 711 711 u32 ctllo; 712 712 713 713 dev_vdbg(chan2dev(chan), 714 - "%s: d0x%llx s0x%llx l0x%zx f0x%lx\n", __func__, 715 - (unsigned long long)dest, (unsigned long long)src, 716 - len, flags); 714 + "%s: d%pad s%pad l0x%zx f0x%lx\n", __func__, 715 + &dest, &src, len, flags); 717 716 718 717 if (unlikely(!len)) { 719 718 dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__); ··· 1400 1401 /* Let's make a cyclic list */ 1401 1402 last->lli.llp = cdesc->desc[0]->txd.phys; 1402 1403 1403 - dev_dbg(chan2dev(&dwc->chan), "cyclic prepared buf 0x%llx len %zu " 1404 - "period %zu periods %d\n", (unsigned long long)buf_addr, 1405 - buf_len, period_len, periods); 1404 + dev_dbg(chan2dev(&dwc->chan), 1405 + "cyclic prepared buf %pad len %zu period %zu periods %d\n", 1406 + &buf_addr, buf_len, period_len, periods); 1406 1407 1407 1408 cdesc->periods = periods; 1408 1409 dwc->cdesc = cdesc; ··· 1602 1603 dev_dbg(chip->dev, "DWC_PARAMS[%d]: 0x%08x\n", i, 1603 1604 dwc_params); 1604 1605 1605 - /* Decode maximum block size for given channel. The 1606 + /* 1607 + * Decode maximum block size for given channel. The 1606 1608 * stored 4 bit value represents blocks from 0x00 for 3 1607 - * up to 0x0a for 4095. */ 1609 + * up to 0x0a for 4095. 1610 + */ 1608 1611 dwc->block_size = 1609 1612 (4 << ((max_blk_size >> 4 * i) & 0xf)) - 1; 1610 1613 dwc->nollp =
+3 -3
drivers/dma/edma.c
··· 699 699 echan->alloced = true; 700 700 echan->slot[0] = echan->ch_num; 701 701 702 - dev_info(dev, "allocated channel for %u:%u\n", 703 - EDMA_CTLR(echan->ch_num), EDMA_CHAN_SLOT(echan->ch_num)); 702 + dev_dbg(dev, "allocated channel for %u:%u\n", 703 + EDMA_CTLR(echan->ch_num), EDMA_CHAN_SLOT(echan->ch_num)); 704 704 705 705 return 0; 706 706 ··· 736 736 echan->alloced = false; 737 737 } 738 738 739 - dev_info(dev, "freeing channel for %u\n", echan->ch_num); 739 + dev_dbg(dev, "freeing channel for %u\n", echan->ch_num); 740 740 } 741 741 742 742 /* Send pending descriptor to hardware */
+1 -1
drivers/dma/fsldma.h
··· 41 41 * channel is allowed to transfer before the DMA engine pauses 42 42 * the current channel and switches to the next channel 43 43 */ 44 - #define FSL_DMA_MR_BWC 0x08000000 44 + #define FSL_DMA_MR_BWC 0x0A000000 45 45 46 46 /* Special MR definition for MPC8349 */ 47 47 #define FSL_DMA_MR_EOTIE 0x00000080
+22 -1
drivers/dma/imx-sdma.c
··· 323 323 struct clk *clk_ipg; 324 324 struct clk *clk_ahb; 325 325 spinlock_t channel_0_lock; 326 + u32 script_number; 326 327 struct sdma_script_start_addrs *script_addrs; 327 328 const struct sdma_driver_data *drvdata; 328 329 }; ··· 724 723 case IMX_DMATYPE_SSI: 725 724 per_2_emi = sdma->script_addrs->app_2_mcu_addr; 726 725 emi_2_per = sdma->script_addrs->mcu_2_app_addr; 726 + break; 727 + case IMX_DMATYPE_SSI_DUAL: 728 + per_2_emi = sdma->script_addrs->ssish_2_mcu_addr; 729 + emi_2_per = sdma->script_addrs->mcu_2_ssish_addr; 727 730 break; 728 731 case IMX_DMATYPE_SSI_SP: 729 732 case IMX_DMATYPE_MMC: ··· 1243 1238 } 1244 1239 1245 1240 #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 34 1241 + #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2 38 1246 1242 1247 1243 static void sdma_add_scripts(struct sdma_engine *sdma, 1248 1244 const struct sdma_script_start_addrs *addr) ··· 1252 1246 s32 *saddr_arr = (u32 *)sdma->script_addrs; 1253 1247 int i; 1254 1248 1255 - for (i = 0; i < SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; i++) 1249 + /* use the default firmware in ROM if missing external firmware */ 1250 + if (!sdma->script_number) 1251 + sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; 1252 + 1253 + for (i = 0; i < sdma->script_number; i++) 1256 1254 if (addr_arr[i] > 0) 1257 1255 saddr_arr[i] = addr_arr[i]; 1258 1256 } ··· 1282 1272 goto err_firmware; 1283 1273 if (header->ram_code_start + header->ram_code_size > fw->size) 1284 1274 goto err_firmware; 1275 + switch (header->version_major) { 1276 + case 1: 1277 + sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; 1278 + break; 1279 + case 2: 1280 + sdma->script_number = SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2; 1281 + break; 1282 + default: 1283 + dev_err(sdma->dev, "unknown firmware version\n"); 1284 + goto err_firmware; 1285 + } 1285 1286 1286 1287 addr = (void *)header + header->script_addrs_start; 1287 1288 ram_code = (void *)header + header->ram_code_start;
+2 -2
drivers/dma/k3dma.c
··· 477 477 dma_addr_t addr, src = 0, dst = 0; 478 478 int num = sglen, i; 479 479 480 - if (sgl == 0) 480 + if (sgl == NULL) 481 481 return NULL; 482 482 483 483 for_each_sg(sgl, sg, sglen, i) { ··· 817 817 return 0; 818 818 } 819 819 820 - SIMPLE_DEV_PM_OPS(k3_dma_pmops, k3_dma_suspend, k3_dma_resume); 820 + static SIMPLE_DEV_PM_OPS(k3_dma_pmops, k3_dma_suspend, k3_dma_resume); 821 821 822 822 static struct platform_driver k3_pdma_driver = { 823 823 .driver = {
+98 -93
drivers/dma/mmp_pdma.c
··· 5 5 * it under the terms of the GNU General Public License version 2 as 6 6 * published by the Free Software Foundation. 7 7 */ 8 + 8 9 #include <linux/err.h> 9 10 #include <linux/module.h> 10 11 #include <linux/init.h> ··· 33 32 #define DTADR 0x0208 34 33 #define DCMD 0x020c 35 34 36 - #define DCSR_RUN (1 << 31) /* Run Bit (read / write) */ 37 - #define DCSR_NODESC (1 << 30) /* No-Descriptor Fetch (read / write) */ 38 - #define DCSR_STOPIRQEN (1 << 29) /* Stop Interrupt Enable (read / write) */ 39 - #define DCSR_REQPEND (1 << 8) /* Request Pending (read-only) */ 40 - #define DCSR_STOPSTATE (1 << 3) /* Stop State (read-only) */ 41 - #define DCSR_ENDINTR (1 << 2) /* End Interrupt (read / write) */ 42 - #define DCSR_STARTINTR (1 << 1) /* Start Interrupt (read / write) */ 43 - #define DCSR_BUSERR (1 << 0) /* Bus Error Interrupt (read / write) */ 35 + #define DCSR_RUN BIT(31) /* Run Bit (read / write) */ 36 + #define DCSR_NODESC BIT(30) /* No-Descriptor Fetch (read / write) */ 37 + #define DCSR_STOPIRQEN BIT(29) /* Stop Interrupt Enable (read / write) */ 38 + #define DCSR_REQPEND BIT(8) /* Request Pending (read-only) */ 39 + #define DCSR_STOPSTATE BIT(3) /* Stop State (read-only) */ 40 + #define DCSR_ENDINTR BIT(2) /* End Interrupt (read / write) */ 41 + #define DCSR_STARTINTR BIT(1) /* Start Interrupt (read / write) */ 42 + #define DCSR_BUSERR BIT(0) /* Bus Error Interrupt (read / write) */ 44 43 45 - #define DCSR_EORIRQEN (1 << 28) /* End of Receive Interrupt Enable (R/W) */ 46 - #define DCSR_EORJMPEN (1 << 27) /* Jump to next descriptor on EOR */ 47 - #define DCSR_EORSTOPEN (1 << 26) /* STOP on an EOR */ 48 - #define DCSR_SETCMPST (1 << 25) /* Set Descriptor Compare Status */ 49 - #define DCSR_CLRCMPST (1 << 24) /* Clear Descriptor Compare Status */ 50 - #define DCSR_CMPST (1 << 10) /* The Descriptor Compare Status */ 51 - #define DCSR_EORINTR (1 << 9) /* The end of Receive */ 44 + #define DCSR_EORIRQEN BIT(28) /* End of Receive Interrupt Enable (R/W) */ 45 + #define DCSR_EORJMPEN BIT(27) /* Jump to next descriptor on EOR */ 46 + #define DCSR_EORSTOPEN BIT(26) /* STOP on an EOR */ 47 + #define DCSR_SETCMPST BIT(25) /* Set Descriptor Compare Status */ 48 + #define DCSR_CLRCMPST BIT(24) /* Clear Descriptor Compare Status */ 49 + #define DCSR_CMPST BIT(10) /* The Descriptor Compare Status */ 50 + #define DCSR_EORINTR BIT(9) /* The end of Receive */ 52 51 53 - #define DRCMR(n) ((((n) < 64) ? 0x0100 : 0x1100) + \ 54 - (((n) & 0x3f) << 2)) 55 - #define DRCMR_MAPVLD (1 << 7) /* Map Valid (read / write) */ 56 - #define DRCMR_CHLNUM 0x1f /* mask for Channel Number (read / write) */ 52 + #define DRCMR(n) ((((n) < 64) ? 0x0100 : 0x1100) + (((n) & 0x3f) << 2)) 53 + #define DRCMR_MAPVLD BIT(7) /* Map Valid (read / write) */ 54 + #define DRCMR_CHLNUM 0x1f /* mask for Channel Number (read / write) */ 57 55 58 56 #define DDADR_DESCADDR 0xfffffff0 /* Address of next descriptor (mask) */ 59 - #define DDADR_STOP (1 << 0) /* Stop (read / write) */ 57 + #define DDADR_STOP BIT(0) /* Stop (read / write) */ 60 58 61 - #define DCMD_INCSRCADDR (1 << 31) /* Source Address Increment Setting. */ 62 - #define DCMD_INCTRGADDR (1 << 30) /* Target Address Increment Setting. */ 63 - #define DCMD_FLOWSRC (1 << 29) /* Flow Control by the source. */ 64 - #define DCMD_FLOWTRG (1 << 28) /* Flow Control by the target. */ 65 - #define DCMD_STARTIRQEN (1 << 22) /* Start Interrupt Enable */ 66 - #define DCMD_ENDIRQEN (1 << 21) /* End Interrupt Enable */ 67 - #define DCMD_ENDIAN (1 << 18) /* Device Endian-ness. */ 59 + #define DCMD_INCSRCADDR BIT(31) /* Source Address Increment Setting. */ 60 + #define DCMD_INCTRGADDR BIT(30) /* Target Address Increment Setting. */ 61 + #define DCMD_FLOWSRC BIT(29) /* Flow Control by the source. */ 62 + #define DCMD_FLOWTRG BIT(28) /* Flow Control by the target. */ 63 + #define DCMD_STARTIRQEN BIT(22) /* Start Interrupt Enable */ 64 + #define DCMD_ENDIRQEN BIT(21) /* End Interrupt Enable */ 65 + #define DCMD_ENDIAN BIT(18) /* Device Endian-ness. */ 68 66 #define DCMD_BURST8 (1 << 16) /* 8 byte burst */ 69 67 #define DCMD_BURST16 (2 << 16) /* 16 byte burst */ 70 68 #define DCMD_BURST32 (3 << 16) /* 32 byte burst */ ··· 132 132 spinlock_t phy_lock; /* protect alloc/free phy channels */ 133 133 }; 134 134 135 - #define tx_to_mmp_pdma_desc(tx) container_of(tx, struct mmp_pdma_desc_sw, async_tx) 136 - #define to_mmp_pdma_desc(lh) container_of(lh, struct mmp_pdma_desc_sw, node) 137 - #define to_mmp_pdma_chan(dchan) container_of(dchan, struct mmp_pdma_chan, chan) 138 - #define to_mmp_pdma_dev(dmadev) container_of(dmadev, struct mmp_pdma_device, device) 135 + #define tx_to_mmp_pdma_desc(tx) \ 136 + container_of(tx, struct mmp_pdma_desc_sw, async_tx) 137 + #define to_mmp_pdma_desc(lh) \ 138 + container_of(lh, struct mmp_pdma_desc_sw, node) 139 + #define to_mmp_pdma_chan(dchan) \ 140 + container_of(dchan, struct mmp_pdma_chan, chan) 141 + #define to_mmp_pdma_dev(dmadev) \ 142 + container_of(dmadev, struct mmp_pdma_device, device) 139 143 140 144 static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr) 141 145 { ··· 166 162 writel(dalgn, phy->base + DALGN); 167 163 168 164 reg = (phy->idx << 2) + DCSR; 169 - writel(readl(phy->base + reg) | DCSR_RUN, 170 - phy->base + reg); 165 + writel(readl(phy->base + reg) | DCSR_RUN, phy->base + reg); 171 166 } 172 167 173 168 static void disable_chan(struct mmp_pdma_phy *phy) 174 169 { 175 170 u32 reg; 176 171 177 - if (phy) { 178 - reg = (phy->idx << 2) + DCSR; 179 - writel(readl(phy->base + reg) & ~DCSR_RUN, 180 - phy->base + reg); 181 - } 172 + if (!phy) 173 + return; 174 + 175 + reg = (phy->idx << 2) + DCSR; 176 + writel(readl(phy->base + reg) & ~DCSR_RUN, phy->base + reg); 182 177 } 183 178 184 179 static int clear_chan_irq(struct mmp_pdma_phy *phy) ··· 186 183 u32 dint = readl(phy->base + DINT); 187 184 u32 reg = (phy->idx << 2) + DCSR; 188 185 189 - if (dint & BIT(phy->idx)) { 190 - /* clear irq */ 191 - dcsr = readl(phy->base + reg); 192 - writel(dcsr, phy->base + reg); 193 - if ((dcsr & DCSR_BUSERR) && (phy->vchan)) 194 - dev_warn(phy->vchan->dev, "DCSR_BUSERR\n"); 195 - return 0; 196 - } 197 - return -EAGAIN; 186 + if (!(dint & BIT(phy->idx))) 187 + return -EAGAIN; 188 + 189 + /* clear irq */ 190 + dcsr = readl(phy->base + reg); 191 + writel(dcsr, phy->base + reg); 192 + if ((dcsr & DCSR_BUSERR) && (phy->vchan)) 193 + dev_warn(phy->vchan->dev, "DCSR_BUSERR\n"); 194 + 195 + return 0; 198 196 } 199 197 200 198 static irqreturn_t mmp_pdma_chan_handler(int irq, void *dev_id) 201 199 { 202 200 struct mmp_pdma_phy *phy = dev_id; 203 201 204 - if (clear_chan_irq(phy) == 0) { 205 - tasklet_schedule(&phy->vchan->tasklet); 206 - return IRQ_HANDLED; 207 - } else 202 + if (clear_chan_irq(phy) != 0) 208 203 return IRQ_NONE; 204 + 205 + tasklet_schedule(&phy->vchan->tasklet); 206 + return IRQ_HANDLED; 209 207 } 210 208 211 209 static irqreturn_t mmp_pdma_int_handler(int irq, void *dev_id) ··· 228 224 229 225 if (irq_num) 230 226 return IRQ_HANDLED; 231 - else 232 - return IRQ_NONE; 227 + 228 + return IRQ_NONE; 233 229 } 234 230 235 231 /* lookup free phy channel as descending priority */ ··· 249 245 */ 250 246 251 247 spin_lock_irqsave(&pdev->phy_lock, flags); 252 - for (prio = 0; prio <= (((pdev->dma_channels - 1) & 0xf) >> 2); prio++) { 248 + for (prio = 0; prio <= ((pdev->dma_channels - 1) & 0xf) >> 2; prio++) { 253 249 for (i = 0; i < pdev->dma_channels; i++) { 254 - if (prio != ((i & 0xf) >> 2)) 250 + if (prio != (i & 0xf) >> 2) 255 251 continue; 256 252 phy = &pdev->phy[i]; 257 253 if (!phy->vchan) { ··· 393 389 if (chan->desc_pool) 394 390 return 1; 395 391 396 - chan->desc_pool = 397 - dma_pool_create(dev_name(&dchan->dev->device), chan->dev, 398 - sizeof(struct mmp_pdma_desc_sw), 399 - __alignof__(struct mmp_pdma_desc_sw), 0); 392 + chan->desc_pool = dma_pool_create(dev_name(&dchan->dev->device), 393 + chan->dev, 394 + sizeof(struct mmp_pdma_desc_sw), 395 + __alignof__(struct mmp_pdma_desc_sw), 396 + 0); 400 397 if (!chan->desc_pool) { 401 398 dev_err(chan->dev, "unable to allocate descriptor pool\n"); 402 399 return -ENOMEM; 403 400 } 401 + 404 402 mmp_pdma_free_phy(chan); 405 403 chan->idle = true; 406 404 chan->dev_addr = 0; ··· 410 404 } 411 405 412 406 static void mmp_pdma_free_desc_list(struct mmp_pdma_chan *chan, 413 - struct list_head *list) 407 + struct list_head *list) 414 408 { 415 409 struct mmp_pdma_desc_sw *desc, *_desc; 416 410 ··· 440 434 441 435 static struct dma_async_tx_descriptor * 442 436 mmp_pdma_prep_memcpy(struct dma_chan *dchan, 443 - dma_addr_t dma_dst, dma_addr_t dma_src, 444 - size_t len, unsigned long flags) 437 + dma_addr_t dma_dst, dma_addr_t dma_src, 438 + size_t len, unsigned long flags) 445 439 { 446 440 struct mmp_pdma_chan *chan; 447 441 struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new; ··· 521 515 522 516 static struct dma_async_tx_descriptor * 523 517 mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, 524 - unsigned int sg_len, enum dma_transfer_direction dir, 525 - unsigned long flags, void *context) 518 + unsigned int sg_len, enum dma_transfer_direction dir, 519 + unsigned long flags, void *context) 526 520 { 527 521 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); 528 522 struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new = NULL; ··· 597 591 return NULL; 598 592 } 599 593 600 - static struct dma_async_tx_descriptor *mmp_pdma_prep_dma_cyclic( 601 - struct dma_chan *dchan, dma_addr_t buf_addr, size_t len, 602 - size_t period_len, enum dma_transfer_direction direction, 603 - unsigned long flags, void *context) 594 + static struct dma_async_tx_descriptor * 595 + mmp_pdma_prep_dma_cyclic(struct dma_chan *dchan, 596 + dma_addr_t buf_addr, size_t len, size_t period_len, 597 + enum dma_transfer_direction direction, 598 + unsigned long flags, void *context) 604 599 { 605 600 struct mmp_pdma_chan *chan; 606 601 struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new; ··· 643 636 goto fail; 644 637 } 645 638 646 - new->desc.dcmd = chan->dcmd | DCMD_ENDIRQEN | 647 - (DCMD_LENGTH & period_len); 639 + new->desc.dcmd = (chan->dcmd | DCMD_ENDIRQEN | 640 + (DCMD_LENGTH & period_len)); 648 641 new->desc.dsadr = dma_src; 649 642 new->desc.dtadr = dma_dst; 650 643 ··· 684 677 } 685 678 686 679 static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd, 687 - unsigned long arg) 680 + unsigned long arg) 688 681 { 689 682 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); 690 683 struct dma_slave_config *cfg = (void *)arg; 691 684 unsigned long flags; 692 - int ret = 0; 693 685 u32 maxburst = 0, addr = 0; 694 686 enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED; 695 687 ··· 745 739 return -ENOSYS; 746 740 } 747 741 748 - return ret; 742 + return 0; 749 743 } 750 744 751 745 static enum dma_status mmp_pdma_tx_status(struct dma_chan *dchan, 752 - dma_cookie_t cookie, struct dma_tx_state *txstate) 746 + dma_cookie_t cookie, 747 + struct dma_tx_state *txstate) 753 748 { 754 749 return dma_cookie_status(dchan, cookie, txstate); 755 750 } ··· 852 845 return 0; 853 846 } 854 847 855 - static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, 856 - int idx, int irq) 848 + static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, int idx, int irq) 857 849 { 858 850 struct mmp_pdma_phy *phy = &pdev->phy[idx]; 859 851 struct mmp_pdma_chan *chan; 860 852 int ret; 861 853 862 - chan = devm_kzalloc(pdev->dev, 863 - sizeof(struct mmp_pdma_chan), GFP_KERNEL); 854 + chan = devm_kzalloc(pdev->dev, sizeof(struct mmp_pdma_chan), 855 + GFP_KERNEL); 864 856 if (chan == NULL) 865 857 return -ENOMEM; 866 858 ··· 867 861 phy->base = pdev->base; 868 862 869 863 if (irq) { 870 - ret = devm_request_irq(pdev->dev, irq, 871 - mmp_pdma_chan_handler, 0, "pdma", phy); 864 + ret = devm_request_irq(pdev->dev, irq, mmp_pdma_chan_handler, 0, 865 + "pdma", phy); 872 866 if (ret) { 873 867 dev_err(pdev->dev, "channel request irq fail!\n"); 874 868 return ret; ··· 883 877 INIT_LIST_HEAD(&chan->chain_running); 884 878 885 879 /* register virt channel to dma engine */ 886 - list_add_tail(&chan->chan.device_node, 887 - &pdev->device.channels); 880 + list_add_tail(&chan->chan.device_node, &pdev->device.channels); 888 881 889 882 return 0; 890 883 } ··· 899 894 { 900 895 struct mmp_pdma_device *d = ofdma->of_dma_data; 901 896 struct dma_chan *chan; 902 - struct mmp_pdma_chan *c; 903 897 904 898 chan = dma_get_any_slave_channel(&d->device); 905 899 if (!chan) 906 900 return NULL; 907 901 908 - c = to_mmp_pdma_chan(chan); 909 - c->drcmr = dma_spec->args[0]; 902 + to_mmp_pdma_chan(chan)->drcmr = dma_spec->args[0]; 910 903 911 904 return chan; 912 905 } ··· 921 918 pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL); 922 919 if (!pdev) 923 920 return -ENOMEM; 921 + 924 922 pdev->dev = &op->dev; 925 923 926 924 spin_lock_init(&pdev->phy_lock); ··· 933 929 934 930 of_id = of_match_device(mmp_pdma_dt_ids, pdev->dev); 935 931 if (of_id) 936 - of_property_read_u32(pdev->dev->of_node, 937 - "#dma-channels", &dma_channels); 932 + of_property_read_u32(pdev->dev->of_node, "#dma-channels", 933 + &dma_channels); 938 934 else if (pdata && pdata->dma_channels) 939 935 dma_channels = pdata->dma_channels; 940 936 else ··· 946 942 irq_num++; 947 943 } 948 944 949 - pdev->phy = devm_kzalloc(pdev->dev, 950 - dma_channels * sizeof(struct mmp_pdma_chan), GFP_KERNEL); 945 + pdev->phy = devm_kcalloc(pdev->dev, 946 + dma_channels, sizeof(struct mmp_pdma_chan), 947 + GFP_KERNEL); 951 948 if (pdev->phy == NULL) 952 949 return -ENOMEM; 953 950 ··· 957 952 if (irq_num != dma_channels) { 958 953 /* all chan share one irq, demux inside */ 959 954 irq = platform_get_irq(op, 0); 960 - ret = devm_request_irq(pdev->dev, irq, 961 - mmp_pdma_int_handler, 0, "pdma", pdev); 955 + ret = devm_request_irq(pdev->dev, irq, mmp_pdma_int_handler, 0, 956 + "pdma", pdev); 962 957 if (ret) 963 958 return ret; 964 959 } ··· 1034 1029 if (chan->device->dev->driver != &mmp_pdma_driver.driver) 1035 1030 return false; 1036 1031 1037 - c->drcmr = *(unsigned int *) param; 1032 + c->drcmr = *(unsigned int *)param; 1038 1033 1039 1034 return true; 1040 1035 } ··· 1042 1037 1043 1038 module_platform_driver(mmp_pdma_driver); 1044 1039 1045 - MODULE_DESCRIPTION("MARVELL MMP Periphera DMA Driver"); 1040 + MODULE_DESCRIPTION("MARVELL MMP Peripheral DMA Driver"); 1046 1041 MODULE_AUTHOR("Marvell International Ltd."); 1047 1042 MODULE_LICENSE("GPL v2");
+21 -7
drivers/dma/mmp_tdma.c
··· 121 121 int idx; 122 122 enum mmp_tdma_type type; 123 123 int irq; 124 - unsigned long reg_base; 124 + void __iomem *reg_base; 125 125 126 126 size_t buf_len; 127 127 size_t period_len; 128 128 size_t pos; 129 + 130 + struct gen_pool *pool; 129 131 }; 130 132 131 133 #define TDMA_CHANNEL_NUM 2 ··· 184 182 185 183 static int mmp_tdma_config_chan(struct mmp_tdma_chan *tdmac) 186 184 { 187 - unsigned int tdcr; 185 + unsigned int tdcr = 0; 188 186 189 187 mmp_tdma_disable_chan(tdmac); 190 188 ··· 326 324 struct gen_pool *gpool; 327 325 int size = tdmac->desc_num * sizeof(struct mmp_tdma_desc); 328 326 329 - gpool = sram_get_gpool("asram"); 327 + gpool = tdmac->pool; 330 328 if (tdmac->desc_arr) 331 329 gen_pool_free(gpool, (unsigned long)tdmac->desc_arr, 332 330 size); ··· 376 374 struct gen_pool *gpool; 377 375 int size = tdmac->desc_num * sizeof(struct mmp_tdma_desc); 378 376 379 - gpool = sram_get_gpool("asram"); 377 + gpool = tdmac->pool; 380 378 if (!gpool) 381 379 return NULL; 382 380 ··· 507 505 } 508 506 509 507 static int mmp_tdma_chan_init(struct mmp_tdma_device *tdev, 510 - int idx, int irq, int type) 508 + int idx, int irq, 509 + int type, struct gen_pool *pool) 511 510 { 512 511 struct mmp_tdma_chan *tdmac; 513 512 ··· 529 526 tdmac->chan.device = &tdev->device; 530 527 tdmac->idx = idx; 531 528 tdmac->type = type; 532 - tdmac->reg_base = (unsigned long)tdev->base + idx * 4; 529 + tdmac->reg_base = tdev->base + idx * 4; 530 + tdmac->pool = pool; 533 531 tdmac->status = DMA_COMPLETE; 534 532 tdev->tdmac[tdmac->idx] = tdmac; 535 533 tasklet_init(&tdmac->tasklet, dma_do_tasklet, (unsigned long)tdmac); ··· 557 553 int i, ret; 558 554 int irq = 0, irq_num = 0; 559 555 int chan_num = TDMA_CHANNEL_NUM; 556 + struct gen_pool *pool; 560 557 561 558 of_id = of_match_device(mmp_tdma_dt_ids, &pdev->dev); 562 559 if (of_id) ··· 584 579 585 580 INIT_LIST_HEAD(&tdev->device.channels); 586 581 582 + if (pdev->dev.of_node) 583 + pool = of_get_named_gen_pool(pdev->dev.of_node, "asram", 0); 584 + else 585 + pool = sram_get_gpool("asram"); 586 + if (!pool) { 587 + dev_err(&pdev->dev, "asram pool not available\n"); 588 + return -ENOMEM; 589 + } 590 + 587 591 if (irq_num != chan_num) { 588 592 irq = platform_get_irq(pdev, 0); 589 593 ret = devm_request_irq(&pdev->dev, irq, ··· 604 590 /* initialize channel parameters */ 605 591 for (i = 0; i < chan_num; i++) { 606 592 irq = (irq_num != chan_num) ? 0 : platform_get_irq(pdev, i); 607 - ret = mmp_tdma_chan_init(tdev, i, irq, type); 593 + ret = mmp_tdma_chan_init(tdev, i, irq, type, pool); 608 594 if (ret) 609 595 return ret; 610 596 }
+699
drivers/dma/moxart-dma.c
··· 1 + /* 2 + * MOXA ART SoCs DMA Engine support. 3 + * 4 + * Copyright (C) 2013 Jonas Jensen 5 + * 6 + * Jonas Jensen <jonas.jensen@gmail.com> 7 + * 8 + * This file is licensed under the terms of the GNU General Public 9 + * License version 2. This program is licensed "as is" without any 10 + * warranty of any kind, whether express or implied. 11 + */ 12 + 13 + #include <linux/dmaengine.h> 14 + #include <linux/dma-mapping.h> 15 + #include <linux/err.h> 16 + #include <linux/init.h> 17 + #include <linux/interrupt.h> 18 + #include <linux/list.h> 19 + #include <linux/module.h> 20 + #include <linux/platform_device.h> 21 + #include <linux/slab.h> 22 + #include <linux/spinlock.h> 23 + #include <linux/of_address.h> 24 + #include <linux/of_irq.h> 25 + #include <linux/of_dma.h> 26 + #include <linux/bitops.h> 27 + 28 + #include <asm/cacheflush.h> 29 + 30 + #include "dmaengine.h" 31 + #include "virt-dma.h" 32 + 33 + #define APB_DMA_MAX_CHANNEL 4 34 + 35 + #define REG_OFF_ADDRESS_SOURCE 0 36 + #define REG_OFF_ADDRESS_DEST 4 37 + #define REG_OFF_CYCLES 8 38 + #define REG_OFF_CTRL 12 39 + #define REG_OFF_CHAN_SIZE 16 40 + 41 + #define APB_DMA_ENABLE BIT(0) 42 + #define APB_DMA_FIN_INT_STS BIT(1) 43 + #define APB_DMA_FIN_INT_EN BIT(2) 44 + #define APB_DMA_BURST_MODE BIT(3) 45 + #define APB_DMA_ERR_INT_STS BIT(4) 46 + #define APB_DMA_ERR_INT_EN BIT(5) 47 + 48 + /* 49 + * Unset: APB 50 + * Set: AHB 51 + */ 52 + #define APB_DMA_SOURCE_SELECT 0x40 53 + #define APB_DMA_DEST_SELECT 0x80 54 + 55 + #define APB_DMA_SOURCE 0x100 56 + #define APB_DMA_DEST 0x1000 57 + 58 + #define APB_DMA_SOURCE_MASK 0x700 59 + #define APB_DMA_DEST_MASK 0x7000 60 + 61 + /* 62 + * 000: No increment 63 + * 001: +1 (Burst=0), +4 (Burst=1) 64 + * 010: +2 (Burst=0), +8 (Burst=1) 65 + * 011: +4 (Burst=0), +16 (Burst=1) 66 + * 101: -1 (Burst=0), -4 (Burst=1) 67 + * 110: -2 (Burst=0), -8 (Burst=1) 68 + * 111: -4 (Burst=0), -16 (Burst=1) 69 + */ 70 + #define APB_DMA_SOURCE_INC_0 0 71 + #define APB_DMA_SOURCE_INC_1_4 0x100 72 + #define APB_DMA_SOURCE_INC_2_8 0x200 73 + #define APB_DMA_SOURCE_INC_4_16 0x300 74 + #define APB_DMA_SOURCE_DEC_1_4 0x500 75 + #define APB_DMA_SOURCE_DEC_2_8 0x600 76 + #define APB_DMA_SOURCE_DEC_4_16 0x700 77 + #define APB_DMA_DEST_INC_0 0 78 + #define APB_DMA_DEST_INC_1_4 0x1000 79 + #define APB_DMA_DEST_INC_2_8 0x2000 80 + #define APB_DMA_DEST_INC_4_16 0x3000 81 + #define APB_DMA_DEST_DEC_1_4 0x5000 82 + #define APB_DMA_DEST_DEC_2_8 0x6000 83 + #define APB_DMA_DEST_DEC_4_16 0x7000 84 + 85 + /* 86 + * Request signal select source/destination address for DMA hardware handshake. 87 + * 88 + * The request line number is a property of the DMA controller itself, 89 + * e.g. MMC must always request channels where dma_slave_config->slave_id is 5. 90 + * 91 + * 0: No request / Grant signal 92 + * 1-15: Request / Grant signal 93 + */ 94 + #define APB_DMA_SOURCE_REQ_NO 0x1000000 95 + #define APB_DMA_SOURCE_REQ_NO_MASK 0xf000000 96 + #define APB_DMA_DEST_REQ_NO 0x10000 97 + #define APB_DMA_DEST_REQ_NO_MASK 0xf0000 98 + 99 + #define APB_DMA_DATA_WIDTH 0x100000 100 + #define APB_DMA_DATA_WIDTH_MASK 0x300000 101 + /* 102 + * Data width of transfer: 103 + * 104 + * 00: Word 105 + * 01: Half 106 + * 10: Byte 107 + */ 108 + #define APB_DMA_DATA_WIDTH_4 0 109 + #define APB_DMA_DATA_WIDTH_2 0x100000 110 + #define APB_DMA_DATA_WIDTH_1 0x200000 111 + 112 + #define APB_DMA_CYCLES_MASK 0x00ffffff 113 + 114 + #define MOXART_DMA_DATA_TYPE_S8 0x00 115 + #define MOXART_DMA_DATA_TYPE_S16 0x01 116 + #define MOXART_DMA_DATA_TYPE_S32 0x02 117 + 118 + struct moxart_sg { 119 + dma_addr_t addr; 120 + uint32_t len; 121 + }; 122 + 123 + struct moxart_desc { 124 + enum dma_transfer_direction dma_dir; 125 + dma_addr_t dev_addr; 126 + unsigned int sglen; 127 + unsigned int dma_cycles; 128 + struct virt_dma_desc vd; 129 + uint8_t es; 130 + struct moxart_sg sg[0]; 131 + }; 132 + 133 + struct moxart_chan { 134 + struct virt_dma_chan vc; 135 + 136 + void __iomem *base; 137 + struct moxart_desc *desc; 138 + 139 + struct dma_slave_config cfg; 140 + 141 + bool allocated; 142 + bool error; 143 + int ch_num; 144 + unsigned int line_reqno; 145 + unsigned int sgidx; 146 + }; 147 + 148 + struct moxart_dmadev { 149 + struct dma_device dma_slave; 150 + struct moxart_chan slave_chans[APB_DMA_MAX_CHANNEL]; 151 + }; 152 + 153 + struct moxart_filter_data { 154 + struct moxart_dmadev *mdc; 155 + struct of_phandle_args *dma_spec; 156 + }; 157 + 158 + static const unsigned int es_bytes[] = { 159 + [MOXART_DMA_DATA_TYPE_S8] = 1, 160 + [MOXART_DMA_DATA_TYPE_S16] = 2, 161 + [MOXART_DMA_DATA_TYPE_S32] = 4, 162 + }; 163 + 164 + static struct device *chan2dev(struct dma_chan *chan) 165 + { 166 + return &chan->dev->device; 167 + } 168 + 169 + static inline struct moxart_chan *to_moxart_dma_chan(struct dma_chan *c) 170 + { 171 + return container_of(c, struct moxart_chan, vc.chan); 172 + } 173 + 174 + static inline struct moxart_desc *to_moxart_dma_desc( 175 + struct dma_async_tx_descriptor *t) 176 + { 177 + return container_of(t, struct moxart_desc, vd.tx); 178 + } 179 + 180 + static void moxart_dma_desc_free(struct virt_dma_desc *vd) 181 + { 182 + kfree(container_of(vd, struct moxart_desc, vd)); 183 + } 184 + 185 + static int moxart_terminate_all(struct dma_chan *chan) 186 + { 187 + struct moxart_chan *ch = to_moxart_dma_chan(chan); 188 + unsigned long flags; 189 + LIST_HEAD(head); 190 + u32 ctrl; 191 + 192 + dev_dbg(chan2dev(chan), "%s: ch=%p\n", __func__, ch); 193 + 194 + spin_lock_irqsave(&ch->vc.lock, flags); 195 + 196 + if (ch->desc) 197 + ch->desc = NULL; 198 + 199 + ctrl = readl(ch->base + REG_OFF_CTRL); 200 + ctrl &= ~(APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN); 201 + writel(ctrl, ch->base + REG_OFF_CTRL); 202 + 203 + vchan_get_all_descriptors(&ch->vc, &head); 204 + spin_unlock_irqrestore(&ch->vc.lock, flags); 205 + vchan_dma_desc_free_list(&ch->vc, &head); 206 + 207 + return 0; 208 + } 209 + 210 + static int moxart_slave_config(struct dma_chan *chan, 211 + struct dma_slave_config *cfg) 212 + { 213 + struct moxart_chan *ch = to_moxart_dma_chan(chan); 214 + u32 ctrl; 215 + 216 + ch->cfg = *cfg; 217 + 218 + ctrl = readl(ch->base + REG_OFF_CTRL); 219 + ctrl |= APB_DMA_BURST_MODE; 220 + ctrl &= ~(APB_DMA_DEST_MASK | APB_DMA_SOURCE_MASK); 221 + ctrl &= ~(APB_DMA_DEST_REQ_NO_MASK | APB_DMA_SOURCE_REQ_NO_MASK); 222 + 223 + switch (ch->cfg.src_addr_width) { 224 + case DMA_SLAVE_BUSWIDTH_1_BYTE: 225 + ctrl |= APB_DMA_DATA_WIDTH_1; 226 + if (ch->cfg.direction != DMA_MEM_TO_DEV) 227 + ctrl |= APB_DMA_DEST_INC_1_4; 228 + else 229 + ctrl |= APB_DMA_SOURCE_INC_1_4; 230 + break; 231 + case DMA_SLAVE_BUSWIDTH_2_BYTES: 232 + ctrl |= APB_DMA_DATA_WIDTH_2; 233 + if (ch->cfg.direction != DMA_MEM_TO_DEV) 234 + ctrl |= APB_DMA_DEST_INC_2_8; 235 + else 236 + ctrl |= APB_DMA_SOURCE_INC_2_8; 237 + break; 238 + case DMA_SLAVE_BUSWIDTH_4_BYTES: 239 + ctrl &= ~APB_DMA_DATA_WIDTH; 240 + if (ch->cfg.direction != DMA_MEM_TO_DEV) 241 + ctrl |= APB_DMA_DEST_INC_4_16; 242 + else 243 + ctrl |= APB_DMA_SOURCE_INC_4_16; 244 + break; 245 + default: 246 + return -EINVAL; 247 + } 248 + 249 + if (ch->cfg.direction == DMA_MEM_TO_DEV) { 250 + ctrl &= ~APB_DMA_DEST_SELECT; 251 + ctrl |= APB_DMA_SOURCE_SELECT; 252 + ctrl |= (ch->line_reqno << 16 & 253 + APB_DMA_DEST_REQ_NO_MASK); 254 + } else { 255 + ctrl |= APB_DMA_DEST_SELECT; 256 + ctrl &= ~APB_DMA_SOURCE_SELECT; 257 + ctrl |= (ch->line_reqno << 24 & 258 + APB_DMA_SOURCE_REQ_NO_MASK); 259 + } 260 + 261 + writel(ctrl, ch->base + REG_OFF_CTRL); 262 + 263 + return 0; 264 + } 265 + 266 + static int moxart_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, 267 + unsigned long arg) 268 + { 269 + int ret = 0; 270 + 271 + switch (cmd) { 272 + case DMA_PAUSE: 273 + case DMA_RESUME: 274 + return -EINVAL; 275 + case DMA_TERMINATE_ALL: 276 + moxart_terminate_all(chan); 277 + break; 278 + case DMA_SLAVE_CONFIG: 279 + ret = moxart_slave_config(chan, (struct dma_slave_config *)arg); 280 + break; 281 + default: 282 + ret = -ENOSYS; 283 + } 284 + 285 + return ret; 286 + } 287 + 288 + static struct dma_async_tx_descriptor *moxart_prep_slave_sg( 289 + struct dma_chan *chan, struct scatterlist *sgl, 290 + unsigned int sg_len, enum dma_transfer_direction dir, 291 + unsigned long tx_flags, void *context) 292 + { 293 + struct moxart_chan *ch = to_moxart_dma_chan(chan); 294 + struct moxart_desc *d; 295 + enum dma_slave_buswidth dev_width; 296 + dma_addr_t dev_addr; 297 + struct scatterlist *sgent; 298 + unsigned int es; 299 + unsigned int i; 300 + 301 + if (!is_slave_direction(dir)) { 302 + dev_err(chan2dev(chan), "%s: invalid DMA direction\n", 303 + __func__); 304 + return NULL; 305 + } 306 + 307 + if (dir == DMA_DEV_TO_MEM) { 308 + dev_addr = ch->cfg.src_addr; 309 + dev_width = ch->cfg.src_addr_width; 310 + } else { 311 + dev_addr = ch->cfg.dst_addr; 312 + dev_width = ch->cfg.dst_addr_width; 313 + } 314 + 315 + switch (dev_width) { 316 + case DMA_SLAVE_BUSWIDTH_1_BYTE: 317 + es = MOXART_DMA_DATA_TYPE_S8; 318 + break; 319 + case DMA_SLAVE_BUSWIDTH_2_BYTES: 320 + es = MOXART_DMA_DATA_TYPE_S16; 321 + break; 322 + case DMA_SLAVE_BUSWIDTH_4_BYTES: 323 + es = MOXART_DMA_DATA_TYPE_S32; 324 + break; 325 + default: 326 + dev_err(chan2dev(chan), "%s: unsupported data width (%u)\n", 327 + __func__, dev_width); 328 + return NULL; 329 + } 330 + 331 + d = kzalloc(sizeof(*d) + sg_len * sizeof(d->sg[0]), GFP_ATOMIC); 332 + if (!d) 333 + return NULL; 334 + 335 + d->dma_dir = dir; 336 + d->dev_addr = dev_addr; 337 + d->es = es; 338 + 339 + for_each_sg(sgl, sgent, sg_len, i) { 340 + d->sg[i].addr = sg_dma_address(sgent); 341 + d->sg[i].len = sg_dma_len(sgent); 342 + } 343 + 344 + d->sglen = sg_len; 345 + 346 + ch->error = 0; 347 + 348 + return vchan_tx_prep(&ch->vc, &d->vd, tx_flags); 349 + } 350 + 351 + static struct dma_chan *moxart_of_xlate(struct of_phandle_args *dma_spec, 352 + struct of_dma *ofdma) 353 + { 354 + struct moxart_dmadev *mdc = ofdma->of_dma_data; 355 + struct dma_chan *chan; 356 + struct moxart_chan *ch; 357 + 358 + chan = dma_get_any_slave_channel(&mdc->dma_slave); 359 + if (!chan) 360 + return NULL; 361 + 362 + ch = to_moxart_dma_chan(chan); 363 + ch->line_reqno = dma_spec->args[0]; 364 + 365 + return chan; 366 + } 367 + 368 + static int moxart_alloc_chan_resources(struct dma_chan *chan) 369 + { 370 + struct moxart_chan *ch = to_moxart_dma_chan(chan); 371 + 372 + dev_dbg(chan2dev(chan), "%s: allocating channel #%u\n", 373 + __func__, ch->ch_num); 374 + ch->allocated = 1; 375 + 376 + return 0; 377 + } 378 + 379 + static void moxart_free_chan_resources(struct dma_chan *chan) 380 + { 381 + struct moxart_chan *ch = to_moxart_dma_chan(chan); 382 + 383 + vchan_free_chan_resources(&ch->vc); 384 + 385 + dev_dbg(chan2dev(chan), "%s: freeing channel #%u\n", 386 + __func__, ch->ch_num); 387 + ch->allocated = 0; 388 + } 389 + 390 + static void moxart_dma_set_params(struct moxart_chan *ch, dma_addr_t src_addr, 391 + dma_addr_t dst_addr) 392 + { 393 + writel(src_addr, ch->base + REG_OFF_ADDRESS_SOURCE); 394 + writel(dst_addr, ch->base + REG_OFF_ADDRESS_DEST); 395 + } 396 + 397 + static void moxart_set_transfer_params(struct moxart_chan *ch, unsigned int len) 398 + { 399 + struct moxart_desc *d = ch->desc; 400 + unsigned int sglen_div = es_bytes[d->es]; 401 + 402 + d->dma_cycles = len >> sglen_div; 403 + 404 + /* 405 + * There are 4 cycles on 64 bytes copied, i.e. one cycle copies 16 406 + * bytes ( when width is APB_DMAB_DATA_WIDTH_4 ). 407 + */ 408 + writel(d->dma_cycles, ch->base + REG_OFF_CYCLES); 409 + 410 + dev_dbg(chan2dev(&ch->vc.chan), "%s: set %u DMA cycles (len=%u)\n", 411 + __func__, d->dma_cycles, len); 412 + } 413 + 414 + static void moxart_start_dma(struct moxart_chan *ch) 415 + { 416 + u32 ctrl; 417 + 418 + ctrl = readl(ch->base + REG_OFF_CTRL); 419 + ctrl |= (APB_DMA_ENABLE | APB_DMA_FIN_INT_EN | APB_DMA_ERR_INT_EN); 420 + writel(ctrl, ch->base + REG_OFF_CTRL); 421 + } 422 + 423 + static void moxart_dma_start_sg(struct moxart_chan *ch, unsigned int idx) 424 + { 425 + struct moxart_desc *d = ch->desc; 426 + struct moxart_sg *sg = ch->desc->sg + idx; 427 + 428 + if (ch->desc->dma_dir == DMA_MEM_TO_DEV) 429 + moxart_dma_set_params(ch, sg->addr, d->dev_addr); 430 + else if (ch->desc->dma_dir == DMA_DEV_TO_MEM) 431 + moxart_dma_set_params(ch, d->dev_addr, sg->addr); 432 + 433 + moxart_set_transfer_params(ch, sg->len); 434 + 435 + moxart_start_dma(ch); 436 + } 437 + 438 + static void moxart_dma_start_desc(struct dma_chan *chan) 439 + { 440 + struct moxart_chan *ch = to_moxart_dma_chan(chan); 441 + struct virt_dma_desc *vd; 442 + 443 + vd = vchan_next_desc(&ch->vc); 444 + 445 + if (!vd) { 446 + ch->desc = NULL; 447 + return; 448 + } 449 + 450 + list_del(&vd->node); 451 + 452 + ch->desc = to_moxart_dma_desc(&vd->tx); 453 + ch->sgidx = 0; 454 + 455 + moxart_dma_start_sg(ch, 0); 456 + } 457 + 458 + static void moxart_issue_pending(struct dma_chan *chan) 459 + { 460 + struct moxart_chan *ch = to_moxart_dma_chan(chan); 461 + unsigned long flags; 462 + 463 + spin_lock_irqsave(&ch->vc.lock, flags); 464 + if (vchan_issue_pending(&ch->vc) && !ch->desc) 465 + moxart_dma_start_desc(chan); 466 + spin_unlock_irqrestore(&ch->vc.lock, flags); 467 + } 468 + 469 + static size_t moxart_dma_desc_size(struct moxart_desc *d, 470 + unsigned int completed_sgs) 471 + { 472 + unsigned int i; 473 + size_t size; 474 + 475 + for (size = i = completed_sgs; i < d->sglen; i++) 476 + size += d->sg[i].len; 477 + 478 + return size; 479 + } 480 + 481 + static size_t moxart_dma_desc_size_in_flight(struct moxart_chan *ch) 482 + { 483 + size_t size; 484 + unsigned int completed_cycles, cycles; 485 + 486 + size = moxart_dma_desc_size(ch->desc, ch->sgidx); 487 + cycles = readl(ch->base + REG_OFF_CYCLES); 488 + completed_cycles = (ch->desc->dma_cycles - cycles); 489 + size -= completed_cycles << es_bytes[ch->desc->es]; 490 + 491 + dev_dbg(chan2dev(&ch->vc.chan), "%s: size=%zu\n", __func__, size); 492 + 493 + return size; 494 + } 495 + 496 + static enum dma_status moxart_tx_status(struct dma_chan *chan, 497 + dma_cookie_t cookie, 498 + struct dma_tx_state *txstate) 499 + { 500 + struct moxart_chan *ch = to_moxart_dma_chan(chan); 501 + struct virt_dma_desc *vd; 502 + struct moxart_desc *d; 503 + enum dma_status ret; 504 + unsigned long flags; 505 + 506 + /* 507 + * dma_cookie_status() assigns initial residue value. 508 + */ 509 + ret = dma_cookie_status(chan, cookie, txstate); 510 + 511 + spin_lock_irqsave(&ch->vc.lock, flags); 512 + vd = vchan_find_desc(&ch->vc, cookie); 513 + if (vd) { 514 + d = to_moxart_dma_desc(&vd->tx); 515 + txstate->residue = moxart_dma_desc_size(d, 0); 516 + } else if (ch->desc && ch->desc->vd.tx.cookie == cookie) { 517 + txstate->residue = moxart_dma_desc_size_in_flight(ch); 518 + } 519 + spin_unlock_irqrestore(&ch->vc.lock, flags); 520 + 521 + if (ch->error) 522 + return DMA_ERROR; 523 + 524 + return ret; 525 + } 526 + 527 + static void moxart_dma_init(struct dma_device *dma, struct device *dev) 528 + { 529 + dma->device_prep_slave_sg = moxart_prep_slave_sg; 530 + dma->device_alloc_chan_resources = moxart_alloc_chan_resources; 531 + dma->device_free_chan_resources = moxart_free_chan_resources; 532 + dma->device_issue_pending = moxart_issue_pending; 533 + dma->device_tx_status = moxart_tx_status; 534 + dma->device_control = moxart_control; 535 + dma->dev = dev; 536 + 537 + INIT_LIST_HEAD(&dma->channels); 538 + } 539 + 540 + static irqreturn_t moxart_dma_interrupt(int irq, void *devid) 541 + { 542 + struct moxart_dmadev *mc = devid; 543 + struct moxart_chan *ch = &mc->slave_chans[0]; 544 + unsigned int i; 545 + unsigned long flags; 546 + u32 ctrl; 547 + 548 + dev_dbg(chan2dev(&ch->vc.chan), "%s\n", __func__); 549 + 550 + for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, ch++) { 551 + if (!ch->allocated) 552 + continue; 553 + 554 + ctrl = readl(ch->base + REG_OFF_CTRL); 555 + 556 + dev_dbg(chan2dev(&ch->vc.chan), "%s: ch=%p ch->base=%p ctrl=%x\n", 557 + __func__, ch, ch->base, ctrl); 558 + 559 + if (ctrl & APB_DMA_FIN_INT_STS) { 560 + ctrl &= ~APB_DMA_FIN_INT_STS; 561 + if (ch->desc) { 562 + spin_lock_irqsave(&ch->vc.lock, flags); 563 + if (++ch->sgidx < ch->desc->sglen) { 564 + moxart_dma_start_sg(ch, ch->sgidx); 565 + } else { 566 + vchan_cookie_complete(&ch->desc->vd); 567 + moxart_dma_start_desc(&ch->vc.chan); 568 + } 569 + spin_unlock_irqrestore(&ch->vc.lock, flags); 570 + } 571 + } 572 + 573 + if (ctrl & APB_DMA_ERR_INT_STS) { 574 + ctrl &= ~APB_DMA_ERR_INT_STS; 575 + ch->error = 1; 576 + } 577 + 578 + writel(ctrl, ch->base + REG_OFF_CTRL); 579 + } 580 + 581 + return IRQ_HANDLED; 582 + } 583 + 584 + static int moxart_probe(struct platform_device *pdev) 585 + { 586 + struct device *dev = &pdev->dev; 587 + struct device_node *node = dev->of_node; 588 + struct resource *res; 589 + static void __iomem *dma_base_addr; 590 + int ret, i; 591 + unsigned int irq; 592 + struct moxart_chan *ch; 593 + struct moxart_dmadev *mdc; 594 + 595 + mdc = devm_kzalloc(dev, sizeof(*mdc), GFP_KERNEL); 596 + if (!mdc) { 597 + dev_err(dev, "can't allocate DMA container\n"); 598 + return -ENOMEM; 599 + } 600 + 601 + irq = irq_of_parse_and_map(node, 0); 602 + if (irq == NO_IRQ) { 603 + dev_err(dev, "no IRQ resource\n"); 604 + return -EINVAL; 605 + } 606 + 607 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 608 + dma_base_addr = devm_ioremap_resource(dev, res); 609 + if (IS_ERR(dma_base_addr)) 610 + return PTR_ERR(dma_base_addr); 611 + 612 + dma_cap_zero(mdc->dma_slave.cap_mask); 613 + dma_cap_set(DMA_SLAVE, mdc->dma_slave.cap_mask); 614 + dma_cap_set(DMA_PRIVATE, mdc->dma_slave.cap_mask); 615 + 616 + moxart_dma_init(&mdc->dma_slave, dev); 617 + 618 + ch = &mdc->slave_chans[0]; 619 + for (i = 0; i < APB_DMA_MAX_CHANNEL; i++, ch++) { 620 + ch->ch_num = i; 621 + ch->base = dma_base_addr + i * REG_OFF_CHAN_SIZE; 622 + ch->allocated = 0; 623 + 624 + ch->vc.desc_free = moxart_dma_desc_free; 625 + vchan_init(&ch->vc, &mdc->dma_slave); 626 + 627 + dev_dbg(dev, "%s: chs[%d]: ch->ch_num=%u ch->base=%p\n", 628 + __func__, i, ch->ch_num, ch->base); 629 + } 630 + 631 + platform_set_drvdata(pdev, mdc); 632 + 633 + ret = devm_request_irq(dev, irq, moxart_dma_interrupt, 0, 634 + "moxart-dma-engine", mdc); 635 + if (ret) { 636 + dev_err(dev, "devm_request_irq failed\n"); 637 + return ret; 638 + } 639 + 640 + ret = dma_async_device_register(&mdc->dma_slave); 641 + if (ret) { 642 + dev_err(dev, "dma_async_device_register failed\n"); 643 + return ret; 644 + } 645 + 646 + ret = of_dma_controller_register(node, moxart_of_xlate, mdc); 647 + if (ret) { 648 + dev_err(dev, "of_dma_controller_register failed\n"); 649 + dma_async_device_unregister(&mdc->dma_slave); 650 + return ret; 651 + } 652 + 653 + dev_dbg(dev, "%s: IRQ=%u\n", __func__, irq); 654 + 655 + return 0; 656 + } 657 + 658 + static int moxart_remove(struct platform_device *pdev) 659 + { 660 + struct moxart_dmadev *m = platform_get_drvdata(pdev); 661 + 662 + dma_async_device_unregister(&m->dma_slave); 663 + 664 + if (pdev->dev.of_node) 665 + of_dma_controller_free(pdev->dev.of_node); 666 + 667 + return 0; 668 + } 669 + 670 + static const struct of_device_id moxart_dma_match[] = { 671 + { .compatible = "moxa,moxart-dma" }, 672 + { } 673 + }; 674 + 675 + static struct platform_driver moxart_driver = { 676 + .probe = moxart_probe, 677 + .remove = moxart_remove, 678 + .driver = { 679 + .name = "moxart-dma-engine", 680 + .owner = THIS_MODULE, 681 + .of_match_table = moxart_dma_match, 682 + }, 683 + }; 684 + 685 + static int moxart_init(void) 686 + { 687 + return platform_driver_register(&moxart_driver); 688 + } 689 + subsys_initcall(moxart_init); 690 + 691 + static void __exit moxart_exit(void) 692 + { 693 + platform_driver_unregister(&moxart_driver); 694 + } 695 + module_exit(moxart_exit); 696 + 697 + MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>"); 698 + MODULE_DESCRIPTION("MOXART DMA engine driver"); 699 + MODULE_LICENSE("GPL v2");
+2 -2
drivers/dma/omap-dma.c
··· 190 190 { 191 191 struct omap_chan *c = to_omap_dma_chan(chan); 192 192 193 - dev_info(c->vc.chan.device->dev, "allocating channel for %u\n", c->dma_sig); 193 + dev_dbg(c->vc.chan.device->dev, "allocating channel for %u\n", c->dma_sig); 194 194 195 195 return omap_request_dma(c->dma_sig, "DMA engine", 196 196 omap_dma_callback, c, &c->dma_ch); ··· 203 203 vchan_free_chan_resources(&c->vc); 204 204 omap_free_dma(c->dma_ch); 205 205 206 - dev_info(c->vc.chan.device->dev, "freeing channel for %u\n", c->dma_sig); 206 + dev_dbg(c->vc.chan.device->dev, "freeing channel for %u\n", c->dma_sig); 207 207 } 208 208 209 209 static size_t omap_dma_sg_size(struct omap_sg *sg)
+34 -31
drivers/dma/pl330.c
··· 543 543 /* DMA-Engine Channel */ 544 544 struct dma_chan chan; 545 545 546 - /* List of to be xfered descriptors */ 546 + /* List of submitted descriptors */ 547 + struct list_head submitted_list; 548 + /* List of issued descriptors */ 547 549 struct list_head work_list; 548 550 /* List of completed descriptors */ 549 551 struct list_head completed_list; ··· 580 578 /* DMA-Engine Device */ 581 579 struct dma_device ddma; 582 580 581 + /* Holds info about sg limitations */ 582 + struct device_dma_parameters dma_parms; 583 + 583 584 /* Pool of descriptors available for the DMAC's channels */ 584 585 struct list_head desc_pool; 585 586 /* To protect desc_pool manipulation */ 586 587 spinlock_t pool_lock; 587 588 588 589 /* Peripheral channels connected to this DMAC */ 590 + unsigned int num_peripherals; 589 591 struct dma_pl330_chan *peripherals; /* keep at end */ 590 592 }; 591 593 ··· 610 604 611 605 /* The channel which currently holds this desc */ 612 606 struct dma_pl330_chan *pchan; 613 - }; 614 - 615 - struct dma_pl330_filter_args { 616 - struct dma_pl330_dmac *pdmac; 617 - unsigned int chan_id; 618 607 }; 619 608 620 609 static inline void _callback(struct pl330_req *r, enum pl330_op_err err) ··· 2299 2298 tasklet_schedule(&pch->task); 2300 2299 } 2301 2300 2302 - static bool pl330_dt_filter(struct dma_chan *chan, void *param) 2303 - { 2304 - struct dma_pl330_filter_args *fargs = param; 2305 - 2306 - if (chan->device != &fargs->pdmac->ddma) 2307 - return false; 2308 - 2309 - return (chan->chan_id == fargs->chan_id); 2310 - } 2311 - 2312 2301 bool pl330_filter(struct dma_chan *chan, void *param) 2313 2302 { 2314 2303 u8 *peri_id; ··· 2316 2325 { 2317 2326 int count = dma_spec->args_count; 2318 2327 struct dma_pl330_dmac *pdmac = ofdma->of_dma_data; 2319 - struct dma_pl330_filter_args fargs; 2320 - dma_cap_mask_t cap; 2321 - 2322 - if (!pdmac) 2323 - return NULL; 2328 + unsigned int chan_id; 2324 2329 2325 2330 if (count != 1) 2326 2331 return NULL; 2327 2332 2328 - fargs.pdmac = pdmac; 2329 - fargs.chan_id = dma_spec->args[0]; 2333 + chan_id = dma_spec->args[0]; 2334 + if (chan_id >= pdmac->num_peripherals) 2335 + return NULL; 2330 2336 2331 - dma_cap_zero(cap); 2332 - dma_cap_set(DMA_SLAVE, cap); 2333 - dma_cap_set(DMA_CYCLIC, cap); 2334 - 2335 - return dma_request_channel(cap, pl330_dt_filter, &fargs); 2337 + return dma_get_slave_channel(&pdmac->peripherals[chan_id].chan); 2336 2338 } 2337 2339 2338 2340 static int pl330_alloc_chan_resources(struct dma_chan *chan) ··· 2369 2385 pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH); 2370 2386 2371 2387 /* Mark all desc done */ 2388 + list_for_each_entry(desc, &pch->submitted_list, node) { 2389 + desc->status = FREE; 2390 + dma_cookie_complete(&desc->txd); 2391 + } 2392 + 2372 2393 list_for_each_entry(desc, &pch->work_list , node) { 2373 2394 desc->status = FREE; 2374 2395 dma_cookie_complete(&desc->txd); ··· 2384 2395 dma_cookie_complete(&desc->txd); 2385 2396 } 2386 2397 2398 + list_splice_tail_init(&pch->submitted_list, &pdmac->desc_pool); 2387 2399 list_splice_tail_init(&pch->work_list, &pdmac->desc_pool); 2388 2400 list_splice_tail_init(&pch->completed_list, &pdmac->desc_pool); 2389 2401 spin_unlock_irqrestore(&pch->lock, flags); ··· 2443 2453 2444 2454 static void pl330_issue_pending(struct dma_chan *chan) 2445 2455 { 2446 - pl330_tasklet((unsigned long) to_pchan(chan)); 2456 + struct dma_pl330_chan *pch = to_pchan(chan); 2457 + unsigned long flags; 2458 + 2459 + spin_lock_irqsave(&pch->lock, flags); 2460 + list_splice_tail_init(&pch->submitted_list, &pch->work_list); 2461 + spin_unlock_irqrestore(&pch->lock, flags); 2462 + 2463 + pl330_tasklet((unsigned long)pch); 2447 2464 } 2448 2465 2449 2466 /* ··· 2477 2480 2478 2481 dma_cookie_assign(&desc->txd); 2479 2482 2480 - list_move_tail(&desc->node, &pch->work_list); 2483 + list_move_tail(&desc->node, &pch->submitted_list); 2481 2484 } 2482 2485 2483 2486 cookie = dma_cookie_assign(&last->txd); 2484 - list_add_tail(&last->node, &pch->work_list); 2487 + list_add_tail(&last->node, &pch->submitted_list); 2485 2488 spin_unlock_irqrestore(&pch->lock, flags); 2486 2489 2487 2490 return cookie; ··· 2957 2960 else 2958 2961 num_chan = max_t(int, pi->pcfg.num_peri, pi->pcfg.num_chan); 2959 2962 2963 + pdmac->num_peripherals = num_chan; 2964 + 2960 2965 pdmac->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL); 2961 2966 if (!pdmac->peripherals) { 2962 2967 ret = -ENOMEM; ··· 2973 2974 else 2974 2975 pch->chan.private = adev->dev.of_node; 2975 2976 2977 + INIT_LIST_HEAD(&pch->submitted_list); 2976 2978 INIT_LIST_HEAD(&pch->work_list); 2977 2979 INIT_LIST_HEAD(&pch->completed_list); 2978 2980 spin_lock_init(&pch->lock); ··· 3021 3021 "unable to register DMA to the generic DT DMA helpers\n"); 3022 3022 } 3023 3023 } 3024 + 3025 + adev->dev.dma_parms = &pdmac->dma_parms; 3026 + 3024 3027 /* 3025 3028 * This is the limit for transfers with a buswidth of 1, larger 3026 3029 * buswidths will have larger limits.
+1
drivers/dma/ppc4xx/adma.c
··· 4114 4114 regs = ioremap(res.start, resource_size(&res)); 4115 4115 if (!regs) { 4116 4116 dev_err(&ofdev->dev, "failed to ioremap regs!\n"); 4117 + ret = -ENOMEM; 4117 4118 goto err_regs_alloc; 4118 4119 } 4119 4120
+20
drivers/dma/sirf-dma.c
··· 640 640 } 641 641 EXPORT_SYMBOL(sirfsoc_dma_filter_id); 642 642 643 + #define SIRFSOC_DMA_BUSWIDTHS \ 644 + (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \ 645 + BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ 646 + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ 647 + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \ 648 + BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)) 649 + 650 + static int sirfsoc_dma_device_slave_caps(struct dma_chan *dchan, 651 + struct dma_slave_caps *caps) 652 + { 653 + caps->src_addr_widths = SIRFSOC_DMA_BUSWIDTHS; 654 + caps->dstn_addr_widths = SIRFSOC_DMA_BUSWIDTHS; 655 + caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); 656 + caps->cmd_pause = true; 657 + caps->cmd_terminate = true; 658 + 659 + return 0; 660 + } 661 + 643 662 static int sirfsoc_dma_probe(struct platform_device *op) 644 663 { 645 664 struct device_node *dn = op->dev.of_node; ··· 731 712 dma->device_tx_status = sirfsoc_dma_tx_status; 732 713 dma->device_prep_interleaved_dma = sirfsoc_dma_prep_interleaved; 733 714 dma->device_prep_dma_cyclic = sirfsoc_dma_prep_cyclic; 715 + dma->device_slave_caps = sirfsoc_dma_device_slave_caps; 734 716 735 717 INIT_LIST_HEAD(&dma->channels); 736 718 dma_cap_set(DMA_SLAVE, dma->cap_mask);
+55 -7
drivers/dma/tegra20-apb-dma.c
··· 100 100 #define TEGRA_APBDMA_APBSEQ_DATA_SWAP BIT(27) 101 101 #define TEGRA_APBDMA_APBSEQ_WRAP_WORD_1 (1 << 16) 102 102 103 + /* Tegra148 specific registers */ 104 + #define TEGRA_APBDMA_CHAN_WCOUNT 0x20 105 + 106 + #define TEGRA_APBDMA_CHAN_WORD_TRANSFER 0x24 107 + 103 108 /* 104 109 * If any burst is in flight and DMA paused then this is the time to complete 105 110 * on-flight burst and update DMA status register. ··· 114 109 /* Channel base address offset from APBDMA base address */ 115 110 #define TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET 0x1000 116 111 117 - /* DMA channel register space size */ 118 - #define TEGRA_APBDMA_CHANNEL_REGISTER_SIZE 0x20 119 - 120 112 struct tegra_dma; 121 113 122 114 /* 123 115 * tegra_dma_chip_data Tegra chip specific DMA data 124 116 * @nr_channels: Number of channels available in the controller. 117 + * @channel_reg_size: Channel register size/stride. 125 118 * @max_dma_count: Maximum DMA transfer count supported by DMA controller. 126 119 * @support_channel_pause: Support channel wise pause of dma. 120 + * @support_separate_wcount_reg: Support separate word count register. 127 121 */ 128 122 struct tegra_dma_chip_data { 129 123 int nr_channels; 124 + int channel_reg_size; 130 125 int max_dma_count; 131 126 bool support_channel_pause; 127 + bool support_separate_wcount_reg; 132 128 }; 133 129 134 130 /* DMA channel registers */ ··· 139 133 unsigned long apb_ptr; 140 134 unsigned long ahb_seq; 141 135 unsigned long apb_seq; 136 + unsigned long wcount; 142 137 }; 143 138 144 139 /* ··· 433 426 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, ch_regs->apb_ptr); 434 427 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBSEQ, ch_regs->ahb_seq); 435 428 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, ch_regs->ahb_ptr); 429 + if (tdc->tdma->chip_data->support_separate_wcount_reg) 430 + tdc_write(tdc, TEGRA_APBDMA_CHAN_WCOUNT, ch_regs->wcount); 436 431 437 432 /* Start DMA */ 438 433 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, ··· 474 465 /* Safe to program new configuration */ 475 466 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, nsg_req->ch_regs.apb_ptr); 476 467 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, nsg_req->ch_regs.ahb_ptr); 468 + if (tdc->tdma->chip_data->support_separate_wcount_reg) 469 + tdc_write(tdc, TEGRA_APBDMA_CHAN_WCOUNT, 470 + nsg_req->ch_regs.wcount); 477 471 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, 478 472 nsg_req->ch_regs.csr | TEGRA_APBDMA_CSR_ENB); 479 473 nsg_req->configured = true; ··· 730 718 struct tegra_dma_desc *dma_desc; 731 719 unsigned long flags; 732 720 unsigned long status; 721 + unsigned long wcount; 733 722 bool was_busy; 734 723 735 724 spin_lock_irqsave(&tdc->lock, flags); ··· 751 738 tdc->isr_handler(tdc, true); 752 739 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS); 753 740 } 741 + if (tdc->tdma->chip_data->support_separate_wcount_reg) 742 + wcount = tdc_read(tdc, TEGRA_APBDMA_CHAN_WORD_TRANSFER); 743 + else 744 + wcount = status; 754 745 755 746 was_busy = tdc->busy; 756 747 tegra_dma_stop(tdc); ··· 763 746 sgreq = list_first_entry(&tdc->pending_sg_req, 764 747 typeof(*sgreq), node); 765 748 sgreq->dma_desc->bytes_transferred += 766 - get_current_xferred_count(tdc, sgreq, status); 749 + get_current_xferred_count(tdc, sgreq, wcount); 767 750 } 768 751 tegra_dma_resume(tdc); 769 752 ··· 925 908 return -EINVAL; 926 909 } 927 910 911 + static void tegra_dma_prep_wcount(struct tegra_dma_channel *tdc, 912 + struct tegra_dma_channel_regs *ch_regs, u32 len) 913 + { 914 + u32 len_field = (len - 4) & 0xFFFC; 915 + 916 + if (tdc->tdma->chip_data->support_separate_wcount_reg) 917 + ch_regs->wcount = len_field; 918 + else 919 + ch_regs->csr |= len_field; 920 + } 921 + 928 922 static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg( 929 923 struct dma_chan *dc, struct scatterlist *sgl, unsigned int sg_len, 930 924 enum dma_transfer_direction direction, unsigned long flags, ··· 1019 991 1020 992 sg_req->ch_regs.apb_ptr = apb_ptr; 1021 993 sg_req->ch_regs.ahb_ptr = mem; 1022 - sg_req->ch_regs.csr = csr | ((len - 4) & 0xFFFC); 994 + sg_req->ch_regs.csr = csr; 995 + tegra_dma_prep_wcount(tdc, &sg_req->ch_regs, len); 1023 996 sg_req->ch_regs.apb_seq = apb_seq; 1024 997 sg_req->ch_regs.ahb_seq = ahb_seq; 1025 998 sg_req->configured = false; ··· 1149 1120 ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len); 1150 1121 sg_req->ch_regs.apb_ptr = apb_ptr; 1151 1122 sg_req->ch_regs.ahb_ptr = mem; 1152 - sg_req->ch_regs.csr = csr | ((len - 4) & 0xFFFC); 1123 + sg_req->ch_regs.csr = csr; 1124 + tegra_dma_prep_wcount(tdc, &sg_req->ch_regs, len); 1153 1125 sg_req->ch_regs.apb_seq = apb_seq; 1154 1126 sg_req->ch_regs.ahb_seq = ahb_seq; 1155 1127 sg_req->configured = false; ··· 1264 1234 /* Tegra20 specific DMA controller information */ 1265 1235 static const struct tegra_dma_chip_data tegra20_dma_chip_data = { 1266 1236 .nr_channels = 16, 1237 + .channel_reg_size = 0x20, 1267 1238 .max_dma_count = 1024UL * 64, 1268 1239 .support_channel_pause = false, 1240 + .support_separate_wcount_reg = false, 1269 1241 }; 1270 1242 1271 1243 /* Tegra30 specific DMA controller information */ 1272 1244 static const struct tegra_dma_chip_data tegra30_dma_chip_data = { 1273 1245 .nr_channels = 32, 1246 + .channel_reg_size = 0x20, 1274 1247 .max_dma_count = 1024UL * 64, 1275 1248 .support_channel_pause = false, 1249 + .support_separate_wcount_reg = false, 1276 1250 }; 1277 1251 1278 1252 /* Tegra114 specific DMA controller information */ 1279 1253 static const struct tegra_dma_chip_data tegra114_dma_chip_data = { 1280 1254 .nr_channels = 32, 1255 + .channel_reg_size = 0x20, 1281 1256 .max_dma_count = 1024UL * 64, 1282 1257 .support_channel_pause = true, 1258 + .support_separate_wcount_reg = false, 1259 + }; 1260 + 1261 + /* Tegra148 specific DMA controller information */ 1262 + static const struct tegra_dma_chip_data tegra148_dma_chip_data = { 1263 + .nr_channels = 32, 1264 + .channel_reg_size = 0x40, 1265 + .max_dma_count = 1024UL * 64, 1266 + .support_channel_pause = true, 1267 + .support_separate_wcount_reg = true, 1283 1268 }; 1284 1269 1285 1270 1286 1271 static const struct of_device_id tegra_dma_of_match[] = { 1287 1272 { 1273 + .compatible = "nvidia,tegra148-apbdma", 1274 + .data = &tegra148_dma_chip_data, 1275 + }, { 1288 1276 .compatible = "nvidia,tegra114-apbdma", 1289 1277 .data = &tegra114_dma_chip_data, 1290 1278 }, { ··· 1396 1348 struct tegra_dma_channel *tdc = &tdma->channels[i]; 1397 1349 1398 1350 tdc->chan_base_offset = TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET + 1399 - i * TEGRA_APBDMA_CHANNEL_REGISTER_SIZE; 1351 + i * cdata->channel_reg_size; 1400 1352 1401 1353 res = platform_get_resource(pdev, IORESOURCE_IRQ, i); 1402 1354 if (!res) {
+3 -1
drivers/dma/virt-dma.h
··· 84 84 static inline void vchan_cookie_complete(struct virt_dma_desc *vd) 85 85 { 86 86 struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan); 87 + dma_cookie_t cookie; 87 88 89 + cookie = vd->tx.cookie; 88 90 dma_cookie_complete(&vd->tx); 89 91 dev_vdbg(vc->chan.device->dev, "txd %p[%x]: marked complete\n", 90 - vd, vd->tx.cookie); 92 + vd, cookie); 91 93 list_add_tail(&vd->node, &vc->desc_completed); 92 94 93 95 tasklet_schedule(&vc->task);
+7 -8
include/linux/dmaengine.h
··· 257 257 * @dev: class device for sysfs 258 258 * @device_node: used to add this to the device chan list 259 259 * @local: per-cpu pointer to a struct dma_chan_percpu 260 - * @client-count: how many clients are using this channel 260 + * @client_count: how many clients are using this channel 261 261 * @table_count: number of appearances in the mem-to-mem allocation table 262 262 * @private: private data for certain client-channel associations 263 263 */ ··· 279 279 280 280 /** 281 281 * struct dma_chan_dev - relate sysfs device node to backing channel device 282 - * @chan - driver channel device 283 - * @device - sysfs device 284 - * @dev_id - parent dma_device dev_id 285 - * @idr_ref - reference count to gate release of dma_device dev_id 282 + * @chan: driver channel device 283 + * @device: sysfs device 284 + * @dev_id: parent dma_device dev_id 285 + * @idr_ref: reference count to gate release of dma_device dev_id 286 286 */ 287 287 struct dma_chan_dev { 288 288 struct dma_chan *chan; ··· 306 306 /** 307 307 * struct dma_slave_config - dma slave channel runtime config 308 308 * @direction: whether the data shall go in or out on this slave 309 - * channel, right now. DMA_TO_DEVICE and DMA_FROM_DEVICE are 310 - * legal values, DMA_BIDIRECTIONAL is not acceptable since we 311 - * need to differentiate source and target addresses. 309 + * channel, right now. DMA_MEM_TO_DEV and DMA_DEV_TO_MEM are 310 + * legal values. 312 311 * @src_addr: this is the physical address where DMA slave data 313 312 * should be read (RX), if the source is memory this argument is 314 313 * ignored.
+5
include/linux/platform_data/dma-imx-sdma.h
··· 43 43 s32 dptc_dvfs_addr; 44 44 s32 utra_addr; 45 45 s32 ram_code_start_addr; 46 + /* End of v1 array */ 47 + s32 mcu_2_ssish_addr; 48 + s32 ssish_2_mcu_addr; 49 + s32 hdmi_dma_addr; 50 + /* End of v2 array */ 46 51 }; 47 52 48 53 /**
+1
include/linux/platform_data/dma-imx.h
··· 39 39 IMX_DMATYPE_IPU_MEMORY, /* IPU Memory */ 40 40 IMX_DMATYPE_ASRC, /* ASRC */ 41 41 IMX_DMATYPE_ESAI, /* ESAI */ 42 + IMX_DMATYPE_SSI_DUAL, /* SSI Dual FIFO */ 42 43 }; 43 44 44 45 enum imx_dma_prio {
+3 -5
include/linux/platform_data/dma-mmp_tdma.h
··· 1 1 /* 2 - * linux/arch/arm/mach-mmp/include/mach/sram.h 3 - * 4 2 * SRAM Memory Management 5 3 * 6 4 * Copyright (c) 2011 Marvell Semiconductors Inc. ··· 9 11 * 10 12 */ 11 13 12 - #ifndef __ASM_ARCH_SRAM_H 13 - #define __ASM_ARCH_SRAM_H 14 + #ifndef __DMA_MMP_TDMA_H 15 + #define __DMA_MMP_TDMA_H 14 16 15 17 #include <linux/genalloc.h> 16 18 ··· 30 32 31 33 extern struct gen_pool *sram_get_gpool(char *pool_name); 32 34 33 - #endif /* __ASM_ARCH_SRAM_H */ 35 + #endif /* __DMA_MMP_TDMA_H */
+2 -4
include/linux/platform_data/dma-mv_xor.h
··· 1 1 /* 2 - * arch/arm/plat-orion/include/plat/mv_xor.h 3 - * 4 2 * Marvell XOR platform device data definition file. 5 3 */ 6 4 7 - #ifndef __PLAT_MV_XOR_H 8 - #define __PLAT_MV_XOR_H 5 + #ifndef __DMA_MV_XOR_H 6 + #define __DMA_MV_XOR_H 9 7 10 8 #include <linux/dmaengine.h> 11 9 #include <linux/mbus.h>
+27 -1
sound/soc/fsl/fsl_ssi.c
··· 164 164 bool baudclk_locked; 165 165 bool irq_stats; 166 166 bool offline_config; 167 + bool use_dual_fifo; 167 168 u8 i2s_mode; 168 169 spinlock_t baudclk_lock; 169 170 struct clk *baudclk; ··· 722 721 CCSR_SSI_SxCCR_DC(2)); 723 722 } 724 723 724 + if (ssi_private->use_dual_fifo) { 725 + write_ssi_mask(&ssi->srcr, 0, CCSR_SSI_SRCR_RFEN1); 726 + write_ssi_mask(&ssi->stcr, 0, CCSR_SSI_STCR_TFEN1); 727 + write_ssi_mask(&ssi->scr, 0, CCSR_SSI_SCR_TCH_EN); 728 + } 729 + 725 730 return 0; 726 731 } 727 732 ··· 758 751 ssi_private->baudclk_locked = false; 759 752 spin_unlock_irqrestore(&ssi_private->baudclk_lock, flags); 760 753 } 754 + 755 + /* When using dual fifo mode, it is safer to ensure an even period 756 + * size. If appearing to an odd number while DMA always starts its 757 + * task from fifo0, fifo1 would be neglected at the end of each 758 + * period. But SSI would still access fifo1 with an invalid data. 759 + */ 760 + if (ssi_private->use_dual_fifo) 761 + snd_pcm_hw_constraint_step(substream->runtime, 0, 762 + SNDRV_PCM_HW_PARAM_PERIOD_SIZE, 2); 761 763 762 764 return 0; 763 765 } ··· 1386 1370 1387 1371 if (hw_type == FSL_SSI_MX21 || hw_type == FSL_SSI_MX51 || 1388 1372 hw_type == FSL_SSI_MX35) { 1389 - u32 dma_events[2]; 1373 + u32 dma_events[2], dmas[4]; 1390 1374 ssi_private->ssi_on_imx = true; 1391 1375 1392 1376 ssi_private->clk = devm_clk_get(&pdev->dev, NULL); ··· 1441 1425 dev_err(&pdev->dev, "could not get dma events but fsl-ssi is configured to use DMA\n"); 1442 1426 goto error_clk; 1443 1427 } 1428 + } 1429 + /* Should this be merge with the above? */ 1430 + if (!of_property_read_u32_array(pdev->dev.of_node, "dmas", dmas, 4) 1431 + && dmas[2] == IMX_DMATYPE_SSI_DUAL) { 1432 + ssi_private->use_dual_fifo = true; 1433 + /* When using dual fifo mode, we need to keep watermark 1434 + * as even numbers due to dma script limitation. 1435 + */ 1436 + ssi_private->dma_params_tx.maxburst &= ~0x1; 1437 + ssi_private->dma_params_rx.maxburst &= ~0x1; 1444 1438 } 1445 1439 1446 1440 shared = of_device_is_compatible(of_get_parent(np),