Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'next' of git://git.infradead.org/users/vkoul/slave-dma

* 'next' of git://git.infradead.org/users/vkoul/slave-dma: (63 commits)
dmaengine: mid_dma: mask_peripheral_interrupt only when dmac is idle
dmaengine/ep93xx_dma: add module.h include
pch_dma: Reduce wasting memory
pch_dma: Fix suspend issue
dma/timberdale: free_irq() on an error path
dma: shdma: transfer based runtime PM
dmaengine: shdma: protect against the IRQ handler
dmaengine i.MX DMA/SDMA: add missing include of linux/module.h
dmaengine: delete redundant chan_id and chancnt initialization in dma drivers
dmaengine/amba-pl08x: Check txd->llis_va before freeing dma_pool
dmaengine/amba-pl08x: Add support for sg len greater than one for slave transfers
serial: sh-sci: don't filter on DMA device, use only channel ID
ARM: SAMSUNG: Remove Samsung specific enum type for dma direction
ASoC: Samsung: Update DMA interface
spi/s3c64xx: Merge dma control code
spi/s3c64xx: Add support DMA engine API
ARM: SAMSUNG: Remove S3C-PL330-DMA driver
ARM: S5P64X0: Use generic DMA PL330 driver
ARM: S5PC100: Use generic DMA PL330 driver
ARM: S5PV210: Use generic DMA PL330 driver
...

Fix up fairly trivial conflicts in
- arch/arm/mach-exynos4/{Kconfig,clock.c}
- arch/arm/mach-s5p64x0/dma.c

+2328 -2471
+4
arch/arm/include/asm/hardware/pl080.h
··· 21 21 * OneNAND features. 22 22 */ 23 23 24 + #ifndef ASM_PL080_H 25 + #define ASM_PL080_H 26 + 24 27 #define PL080_INT_STATUS (0x00) 25 28 #define PL080_TC_STATUS (0x04) 26 29 #define PL080_TC_CLEAR (0x08) ··· 141 138 u32 control1; 142 139 }; 143 140 141 + #endif /* ASM_PL080_H */
+1 -1
arch/arm/mach-exynos4/Kconfig
··· 11 11 12 12 config CPU_EXYNOS4210 13 13 bool 14 - select S3C_PL330_DMA 14 + select SAMSUNG_DMADEV 15 15 select ARM_CPU_SUSPEND if PM 16 16 help 17 17 Enable EXYNOS4210 CPU support
+12 -2
arch/arm/mach-exynos4/clock.c
··· 111 111 .name = "sclk_usbphy1", 112 112 }; 113 113 114 + static struct clk dummy_apb_pclk = { 115 + .name = "apb_pclk", 116 + .id = -1, 117 + }; 118 + 114 119 static int exynos4_clksrc_mask_top_ctrl(struct clk *clk, int enable) 115 120 { 116 121 return s5p_gatectrl(S5P_CLKSRC_MASK_TOP, clk, enable); ··· 508 503 .enable = exynos4_clk_ip_fsys_ctrl, 509 504 .ctrlbit = (1 << 9), 510 505 }, { 511 - .name = "pdma", 506 + .name = "dma", 512 507 .devname = "s3c-pl330.0", 513 508 .enable = exynos4_clk_ip_fsys_ctrl, 514 509 .ctrlbit = (1 << 0), 515 510 }, { 516 - .name = "pdma", 511 + .name = "dma", 517 512 .devname = "s3c-pl330.1", 518 513 .enable = exynos4_clk_ip_fsys_ctrl, 519 514 .ctrlbit = (1 << 1), ··· 1286 1281 s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); 1287 1282 s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); 1288 1283 1284 + <<<<<<< HEAD 1289 1285 register_syscore_ops(&exynos4_clock_syscore_ops); 1286 + ======= 1287 + s3c24xx_register_clock(&dummy_apb_pclk); 1288 + 1289 + >>>>>>> 4598fc2c94b68740e0269db03c98a1e7ad5af773 1290 1290 s3c_pwmclk_init(); 1291 1291 }
+188 -111
arch/arm/mach-exynos4/dma.c
··· 21 21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 22 22 */ 23 23 24 - #include <linux/platform_device.h> 25 24 #include <linux/dma-mapping.h> 25 + #include <linux/amba/bus.h> 26 + #include <linux/amba/pl330.h> 26 27 28 + #include <asm/irq.h> 27 29 #include <plat/devs.h> 28 30 #include <plat/irqs.h> 29 31 30 32 #include <mach/map.h> 31 33 #include <mach/irqs.h> 32 - 33 - #include <plat/s3c-pl330-pdata.h> 34 + #include <mach/dma.h> 34 35 35 36 static u64 dma_dmamask = DMA_BIT_MASK(32); 36 37 37 - static struct resource exynos4_pdma0_resource[] = { 38 - [0] = { 39 - .start = EXYNOS4_PA_PDMA0, 40 - .end = EXYNOS4_PA_PDMA0 + SZ_4K, 41 - .flags = IORESOURCE_MEM, 42 - }, 43 - [1] = { 44 - .start = IRQ_PDMA0, 45 - .end = IRQ_PDMA0, 46 - .flags = IORESOURCE_IRQ, 38 + struct dma_pl330_peri pdma0_peri[28] = { 39 + { 40 + .peri_id = (u8)DMACH_PCM0_RX, 41 + .rqtype = DEVTOMEM, 42 + }, { 43 + .peri_id = (u8)DMACH_PCM0_TX, 44 + .rqtype = MEMTODEV, 45 + }, { 46 + .peri_id = (u8)DMACH_PCM2_RX, 47 + .rqtype = DEVTOMEM, 48 + }, { 49 + .peri_id = (u8)DMACH_PCM2_TX, 50 + .rqtype = MEMTODEV, 51 + }, { 52 + .peri_id = (u8)DMACH_MSM_REQ0, 53 + }, { 54 + .peri_id = (u8)DMACH_MSM_REQ2, 55 + }, { 56 + .peri_id = (u8)DMACH_SPI0_RX, 57 + .rqtype = DEVTOMEM, 58 + }, { 59 + .peri_id = (u8)DMACH_SPI0_TX, 60 + .rqtype = MEMTODEV, 61 + }, { 62 + .peri_id = (u8)DMACH_SPI2_RX, 63 + .rqtype = DEVTOMEM, 64 + }, { 65 + .peri_id = (u8)DMACH_SPI2_TX, 66 + .rqtype = MEMTODEV, 67 + }, { 68 + .peri_id = (u8)DMACH_I2S0S_TX, 69 + .rqtype = MEMTODEV, 70 + }, { 71 + .peri_id = (u8)DMACH_I2S0_RX, 72 + .rqtype = DEVTOMEM, 73 + }, { 74 + .peri_id = (u8)DMACH_I2S0_TX, 75 + .rqtype = MEMTODEV, 76 + }, { 77 + .peri_id = (u8)DMACH_UART0_RX, 78 + .rqtype = DEVTOMEM, 79 + }, { 80 + .peri_id = (u8)DMACH_UART0_TX, 81 + .rqtype = MEMTODEV, 82 + }, { 83 + .peri_id = (u8)DMACH_UART2_RX, 84 + .rqtype = DEVTOMEM, 85 + }, { 86 + .peri_id = (u8)DMACH_UART2_TX, 87 + .rqtype = MEMTODEV, 88 + }, { 89 + .peri_id = (u8)DMACH_UART4_RX, 90 + .rqtype = DEVTOMEM, 91 + }, { 92 + .peri_id = (u8)DMACH_UART4_TX, 93 + .rqtype = MEMTODEV, 94 + }, { 95 + .peri_id = (u8)DMACH_SLIMBUS0_RX, 96 + .rqtype = DEVTOMEM, 97 + }, { 98 + .peri_id = (u8)DMACH_SLIMBUS0_TX, 99 + .rqtype = MEMTODEV, 100 + }, { 101 + .peri_id = (u8)DMACH_SLIMBUS2_RX, 102 + .rqtype = DEVTOMEM, 103 + }, { 104 + .peri_id = (u8)DMACH_SLIMBUS2_TX, 105 + .rqtype = MEMTODEV, 106 + }, { 107 + .peri_id = (u8)DMACH_SLIMBUS4_RX, 108 + .rqtype = DEVTOMEM, 109 + }, { 110 + .peri_id = (u8)DMACH_SLIMBUS4_TX, 111 + .rqtype = MEMTODEV, 112 + }, { 113 + .peri_id = (u8)DMACH_AC97_MICIN, 114 + .rqtype = DEVTOMEM, 115 + }, { 116 + .peri_id = (u8)DMACH_AC97_PCMIN, 117 + .rqtype = DEVTOMEM, 118 + }, { 119 + .peri_id = (u8)DMACH_AC97_PCMOUT, 120 + .rqtype = MEMTODEV, 47 121 }, 48 122 }; 49 123 50 - static struct s3c_pl330_platdata exynos4_pdma0_pdata = { 51 - .peri = { 52 - [0] = DMACH_PCM0_RX, 53 - [1] = DMACH_PCM0_TX, 54 - [2] = DMACH_PCM2_RX, 55 - [3] = DMACH_PCM2_TX, 56 - [4] = DMACH_MSM_REQ0, 57 - [5] = DMACH_MSM_REQ2, 58 - [6] = DMACH_SPI0_RX, 59 - [7] = DMACH_SPI0_TX, 60 - [8] = DMACH_SPI2_RX, 61 - [9] = DMACH_SPI2_TX, 62 - [10] = DMACH_I2S0S_TX, 63 - [11] = DMACH_I2S0_RX, 64 - [12] = DMACH_I2S0_TX, 65 - [13] = DMACH_I2S2_RX, 66 - [14] = DMACH_I2S2_TX, 67 - [15] = DMACH_UART0_RX, 68 - [16] = DMACH_UART0_TX, 69 - [17] = DMACH_UART2_RX, 70 - [18] = DMACH_UART2_TX, 71 - [19] = DMACH_UART4_RX, 72 - [20] = DMACH_UART4_TX, 73 - [21] = DMACH_SLIMBUS0_RX, 74 - [22] = DMACH_SLIMBUS0_TX, 75 - [23] = DMACH_SLIMBUS2_RX, 76 - [24] = DMACH_SLIMBUS2_TX, 77 - [25] = DMACH_SLIMBUS4_RX, 78 - [26] = DMACH_SLIMBUS4_TX, 79 - [27] = DMACH_AC97_MICIN, 80 - [28] = DMACH_AC97_PCMIN, 81 - [29] = DMACH_AC97_PCMOUT, 82 - [30] = DMACH_MAX, 83 - [31] = DMACH_MAX, 84 - }, 124 + struct dma_pl330_platdata exynos4_pdma0_pdata = { 125 + .nr_valid_peri = ARRAY_SIZE(pdma0_peri), 126 + .peri = pdma0_peri, 85 127 }; 86 128 87 - static struct platform_device exynos4_device_pdma0 = { 88 - .name = "s3c-pl330", 89 - .id = 0, 90 - .num_resources = ARRAY_SIZE(exynos4_pdma0_resource), 91 - .resource = exynos4_pdma0_resource, 92 - .dev = { 129 + struct amba_device exynos4_device_pdma0 = { 130 + .dev = { 131 + .init_name = "dma-pl330.0", 93 132 .dma_mask = &dma_dmamask, 94 133 .coherent_dma_mask = DMA_BIT_MASK(32), 95 134 .platform_data = &exynos4_pdma0_pdata, 96 135 }, 136 + .res = { 137 + .start = EXYNOS4_PA_PDMA0, 138 + .end = EXYNOS4_PA_PDMA0 + SZ_4K, 139 + .flags = IORESOURCE_MEM, 140 + }, 141 + .irq = {IRQ_PDMA0, NO_IRQ}, 142 + .periphid = 0x00041330, 97 143 }; 98 144 99 - static struct resource exynos4_pdma1_resource[] = { 100 - [0] = { 101 - .start = EXYNOS4_PA_PDMA1, 102 - .end = EXYNOS4_PA_PDMA1 + SZ_4K, 103 - .flags = IORESOURCE_MEM, 104 - }, 105 - [1] = { 106 - .start = IRQ_PDMA1, 107 - .end = IRQ_PDMA1, 108 - .flags = IORESOURCE_IRQ, 145 + struct dma_pl330_peri pdma1_peri[25] = { 146 + { 147 + .peri_id = (u8)DMACH_PCM0_RX, 148 + .rqtype = DEVTOMEM, 149 + }, { 150 + .peri_id = (u8)DMACH_PCM0_TX, 151 + .rqtype = MEMTODEV, 152 + }, { 153 + .peri_id = (u8)DMACH_PCM1_RX, 154 + .rqtype = DEVTOMEM, 155 + }, { 156 + .peri_id = (u8)DMACH_PCM1_TX, 157 + .rqtype = MEMTODEV, 158 + }, { 159 + .peri_id = (u8)DMACH_MSM_REQ1, 160 + }, { 161 + .peri_id = (u8)DMACH_MSM_REQ3, 162 + }, { 163 + .peri_id = (u8)DMACH_SPI1_RX, 164 + .rqtype = DEVTOMEM, 165 + }, { 166 + .peri_id = (u8)DMACH_SPI1_TX, 167 + .rqtype = MEMTODEV, 168 + }, { 169 + .peri_id = (u8)DMACH_I2S0S_TX, 170 + .rqtype = MEMTODEV, 171 + }, { 172 + .peri_id = (u8)DMACH_I2S0_RX, 173 + .rqtype = DEVTOMEM, 174 + }, { 175 + .peri_id = (u8)DMACH_I2S0_TX, 176 + .rqtype = MEMTODEV, 177 + }, { 178 + .peri_id = (u8)DMACH_I2S1_RX, 179 + .rqtype = DEVTOMEM, 180 + }, { 181 + .peri_id = (u8)DMACH_I2S1_TX, 182 + .rqtype = MEMTODEV, 183 + }, { 184 + .peri_id = (u8)DMACH_UART0_RX, 185 + .rqtype = DEVTOMEM, 186 + }, { 187 + .peri_id = (u8)DMACH_UART0_TX, 188 + .rqtype = MEMTODEV, 189 + }, { 190 + .peri_id = (u8)DMACH_UART1_RX, 191 + .rqtype = DEVTOMEM, 192 + }, { 193 + .peri_id = (u8)DMACH_UART1_TX, 194 + .rqtype = MEMTODEV, 195 + }, { 196 + .peri_id = (u8)DMACH_UART3_RX, 197 + .rqtype = DEVTOMEM, 198 + }, { 199 + .peri_id = (u8)DMACH_UART3_TX, 200 + .rqtype = MEMTODEV, 201 + }, { 202 + .peri_id = (u8)DMACH_SLIMBUS1_RX, 203 + .rqtype = DEVTOMEM, 204 + }, { 205 + .peri_id = (u8)DMACH_SLIMBUS1_TX, 206 + .rqtype = MEMTODEV, 207 + }, { 208 + .peri_id = (u8)DMACH_SLIMBUS3_RX, 209 + .rqtype = DEVTOMEM, 210 + }, { 211 + .peri_id = (u8)DMACH_SLIMBUS3_TX, 212 + .rqtype = MEMTODEV, 213 + }, { 214 + .peri_id = (u8)DMACH_SLIMBUS5_RX, 215 + .rqtype = DEVTOMEM, 216 + }, { 217 + .peri_id = (u8)DMACH_SLIMBUS5_TX, 218 + .rqtype = MEMTODEV, 109 219 }, 110 220 }; 111 221 112 - static struct s3c_pl330_platdata exynos4_pdma1_pdata = { 113 - .peri = { 114 - [0] = DMACH_PCM0_RX, 115 - [1] = DMACH_PCM0_TX, 116 - [2] = DMACH_PCM1_RX, 117 - [3] = DMACH_PCM1_TX, 118 - [4] = DMACH_MSM_REQ1, 119 - [5] = DMACH_MSM_REQ3, 120 - [6] = DMACH_SPI1_RX, 121 - [7] = DMACH_SPI1_TX, 122 - [8] = DMACH_I2S0S_TX, 123 - [9] = DMACH_I2S0_RX, 124 - [10] = DMACH_I2S0_TX, 125 - [11] = DMACH_I2S1_RX, 126 - [12] = DMACH_I2S1_TX, 127 - [13] = DMACH_UART0_RX, 128 - [14] = DMACH_UART0_TX, 129 - [15] = DMACH_UART1_RX, 130 - [16] = DMACH_UART1_TX, 131 - [17] = DMACH_UART3_RX, 132 - [18] = DMACH_UART3_TX, 133 - [19] = DMACH_SLIMBUS1_RX, 134 - [20] = DMACH_SLIMBUS1_TX, 135 - [21] = DMACH_SLIMBUS3_RX, 136 - [22] = DMACH_SLIMBUS3_TX, 137 - [23] = DMACH_SLIMBUS5_RX, 138 - [24] = DMACH_SLIMBUS5_TX, 139 - [25] = DMACH_SLIMBUS0AUX_RX, 140 - [26] = DMACH_SLIMBUS0AUX_TX, 141 - [27] = DMACH_SPDIF, 142 - [28] = DMACH_MAX, 143 - [29] = DMACH_MAX, 144 - [30] = DMACH_MAX, 145 - [31] = DMACH_MAX, 146 - }, 222 + struct dma_pl330_platdata exynos4_pdma1_pdata = { 223 + .nr_valid_peri = ARRAY_SIZE(pdma1_peri), 224 + .peri = pdma1_peri, 147 225 }; 148 226 149 - static struct platform_device exynos4_device_pdma1 = { 150 - .name = "s3c-pl330", 151 - .id = 1, 152 - .num_resources = ARRAY_SIZE(exynos4_pdma1_resource), 153 - .resource = exynos4_pdma1_resource, 154 - .dev = { 227 + struct amba_device exynos4_device_pdma1 = { 228 + .dev = { 229 + .init_name = "dma-pl330.1", 155 230 .dma_mask = &dma_dmamask, 156 231 .coherent_dma_mask = DMA_BIT_MASK(32), 157 232 .platform_data = &exynos4_pdma1_pdata, 158 233 }, 159 - }; 160 - 161 - static struct platform_device *exynos4_dmacs[] __initdata = { 162 - &exynos4_device_pdma0, 163 - &exynos4_device_pdma1, 234 + .res = { 235 + .start = EXYNOS4_PA_PDMA1, 236 + .end = EXYNOS4_PA_PDMA1 + SZ_4K, 237 + .flags = IORESOURCE_MEM, 238 + }, 239 + .irq = {IRQ_PDMA1, NO_IRQ}, 240 + .periphid = 0x00041330, 164 241 }; 165 242 166 243 static int __init exynos4_dma_init(void) 167 244 { 168 - platform_add_devices(exynos4_dmacs, ARRAY_SIZE(exynos4_dmacs)); 245 + amba_device_register(&exynos4_device_pdma0, &iomem_resource); 169 246 170 247 return 0; 171 248 }
+2 -2
arch/arm/mach-exynos4/include/mach/dma.h
··· 20 20 #ifndef __MACH_DMA_H 21 21 #define __MACH_DMA_H 22 22 23 - /* This platform uses the common S3C DMA API driver for PL330 */ 24 - #include <plat/s3c-dma-pl330.h> 23 + /* This platform uses the common DMA API driver for PL330 */ 24 + #include <plat/dma-pl330.h> 25 25 26 26 #endif /* __MACH_DMA_H */
+13 -7
arch/arm/mach-s3c2410/include/mach/dma.h
··· 13 13 #ifndef __ASM_ARCH_DMA_H 14 14 #define __ASM_ARCH_DMA_H __FILE__ 15 15 16 - #include <plat/dma.h> 17 16 #include <linux/sysdev.h> 18 17 19 18 #define MAX_DMA_TRANSFER_SIZE 0x100000 /* Data Unit is half word */ ··· 49 50 DMACH_UART3_SRC2, 50 51 DMACH_MAX, /* the end entry */ 51 52 }; 53 + 54 + static inline bool samsung_dma_has_circular(void) 55 + { 56 + return false; 57 + } 58 + 59 + static inline bool samsung_dma_is_dmadev(void) 60 + { 61 + return false; 62 + } 63 + 64 + #include <plat/dma.h> 52 65 53 66 #define DMACH_LOW_LEVEL (1<<28) /* use this to specifiy hardware ch no */ 54 67 ··· 174 163 struct s3c2410_dma_client *client; 175 164 176 165 /* channel configuration */ 177 - enum s3c2410_dmasrc source; 166 + enum dma_data_direction source; 178 167 enum dma_ch req_ch; 179 168 unsigned long dev_addr; 180 169 unsigned long load_timeout; ··· 206 195 }; 207 196 208 197 typedef unsigned long dma_device_t; 209 - 210 - static inline bool s3c_dma_has_circular(void) 211 - { 212 - return false; 213 - } 214 198 215 199 #endif /* __ASM_ARCH_DMA_H */
+2 -2
arch/arm/mach-s3c2412/dma.c
··· 130 130 131 131 static void s3c2412_dma_direction(struct s3c2410_dma_chan *chan, 132 132 struct s3c24xx_dma_map *map, 133 - enum s3c2410_dmasrc dir) 133 + enum dma_data_direction dir) 134 134 { 135 135 unsigned long chsel; 136 136 137 - if (dir == S3C2410_DMASRC_HW) 137 + if (dir == DMA_FROM_DEVICE) 138 138 chsel = map->channels_rx[0]; 139 139 else 140 140 chsel = map->channels[0];
+5 -5
arch/arm/mach-s3c64xx/dma.c
··· 147 147 u32 control0, control1; 148 148 149 149 switch (chan->source) { 150 - case S3C2410_DMASRC_HW: 150 + case DMA_FROM_DEVICE: 151 151 src = chan->dev_addr; 152 152 dst = data; 153 153 control0 = PL080_CONTROL_SRC_AHB2; 154 154 control0 |= PL080_CONTROL_DST_INCR; 155 155 break; 156 156 157 - case S3C2410_DMASRC_MEM: 157 + case DMA_TO_DEVICE: 158 158 src = data; 159 159 dst = chan->dev_addr; 160 160 control0 = PL080_CONTROL_DST_AHB2; ··· 416 416 417 417 418 418 int s3c2410_dma_devconfig(enum dma_ch channel, 419 - enum s3c2410_dmasrc source, 419 + enum dma_data_direction source, 420 420 unsigned long devaddr) 421 421 { 422 422 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); ··· 437 437 pr_debug("%s: peripheral %d\n", __func__, peripheral); 438 438 439 439 switch (source) { 440 - case S3C2410_DMASRC_HW: 440 + case DMA_FROM_DEVICE: 441 441 config = 2 << PL080_CONFIG_FLOW_CONTROL_SHIFT; 442 442 config |= peripheral << PL080_CONFIG_SRC_SEL_SHIFT; 443 443 break; 444 - case S3C2410_DMASRC_MEM: 444 + case DMA_TO_DEVICE: 445 445 config = 1 << PL080_CONFIG_FLOW_CONTROL_SHIFT; 446 446 config |= peripheral << PL080_CONFIG_DST_SEL_SHIFT; 447 447 break;
+6 -2
arch/arm/mach-s3c64xx/include/mach/dma.h
··· 58 58 DMACH_MAX /* the end */ 59 59 }; 60 60 61 - static __inline__ bool s3c_dma_has_circular(void) 61 + static inline bool samsung_dma_has_circular(void) 62 62 { 63 63 return true; 64 64 } 65 65 66 + static inline bool samsung_dma_is_dmadev(void) 67 + { 68 + return false; 69 + } 66 70 #define S3C2410_DMAF_CIRCULAR (1 << 0) 67 71 68 72 #include <plat/dma.h> ··· 99 95 unsigned char peripheral; 100 96 101 97 unsigned int flags; 102 - enum s3c2410_dmasrc source; 98 + enum dma_data_direction source; 103 99 104 100 105 101 dma_addr_t dev_addr;
+2 -2
arch/arm/mach-s5p64x0/Kconfig
··· 9 9 10 10 config CPU_S5P6440 11 11 bool 12 - select S3C_PL330_DMA 12 + select SAMSUNG_DMADEV 13 13 select S5P_HRT 14 14 help 15 15 Enable S5P6440 CPU support 16 16 17 17 config CPU_S5P6450 18 18 bool 19 - select S3C_PL330_DMA 19 + select SAMSUNG_DMADEV 20 20 select S5P_HRT 21 21 help 22 22 Enable S5P6450 CPU support
+8 -1
arch/arm/mach-s5p64x0/clock-s5p6440.c
··· 146 146 .enable = s5p64x0_hclk0_ctrl, 147 147 .ctrlbit = (1 << 8), 148 148 }, { 149 - .name = "pdma", 149 + .name = "dma", 150 150 .parent = &clk_hclk_low.clk, 151 151 .enable = s5p64x0_hclk0_ctrl, 152 152 .ctrlbit = (1 << 12), ··· 499 499 &clk_pclk_low, 500 500 }; 501 501 502 + static struct clk dummy_apb_pclk = { 503 + .name = "apb_pclk", 504 + .id = -1, 505 + }; 506 + 502 507 void __init_or_cpufreq s5p6440_setup_clocks(void) 503 508 { 504 509 struct clk *xtal_clk; ··· 585 580 586 581 s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); 587 582 s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); 583 + 584 + s3c24xx_register_clock(&dummy_apb_pclk); 588 585 589 586 s3c_pwmclk_init(); 590 587 }
+8 -1
arch/arm/mach-s5p64x0/clock-s5p6450.c
··· 179 179 .enable = s5p64x0_hclk0_ctrl, 180 180 .ctrlbit = (1 << 3), 181 181 }, { 182 - .name = "pdma", 182 + .name = "dma", 183 183 .parent = &clk_hclk_low.clk, 184 184 .enable = s5p64x0_hclk0_ctrl, 185 185 .ctrlbit = (1 << 12), ··· 553 553 &clk_sclk_audio0, 554 554 }; 555 555 556 + static struct clk dummy_apb_pclk = { 557 + .name = "apb_pclk", 558 + .id = -1, 559 + }; 560 + 556 561 void __init_or_cpufreq s5p6450_setup_clocks(void) 557 562 { 558 563 struct clk *xtal_clk; ··· 636 631 637 632 s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); 638 633 s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); 634 + 635 + s3c24xx_register_clock(&dummy_apb_pclk); 639 636 640 637 s3c_pwmclk_init(); 641 638 }
+186 -93
arch/arm/mach-s5p64x0/dma.c
··· 21 21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 22 22 */ 23 23 24 - #include <linux/platform_device.h> 25 24 #include <linux/dma-mapping.h> 25 + #include <linux/amba/bus.h> 26 + #include <linux/amba/pl330.h> 27 + 28 + #include <asm/irq.h> 26 29 27 30 #include <mach/map.h> 28 31 #include <mach/irqs.h> 29 32 #include <mach/regs-clock.h> 33 + #include <mach/dma.h> 30 34 31 35 #include <plat/cpu.h> 32 36 #include <plat/devs.h> 33 - #include <plat/s3c-pl330-pdata.h> 37 + #include <plat/irqs.h> 34 38 35 39 static u64 dma_dmamask = DMA_BIT_MASK(32); 36 40 37 - static struct resource s5p64x0_pdma_resource[] = { 38 - [0] = { 39 - .start = S5P64X0_PA_PDMA, 40 - .end = S5P64X0_PA_PDMA + SZ_4K, 41 - .flags = IORESOURCE_MEM, 42 - }, 43 - [1] = { 44 - .start = IRQ_DMA0, 45 - .end = IRQ_DMA0, 46 - .flags = IORESOURCE_IRQ, 47 - }, 48 - }; 49 - 50 - static struct s3c_pl330_platdata s5p6440_pdma_pdata = { 51 - .peri = { 52 - [0] = DMACH_UART0_RX, 53 - [1] = DMACH_UART0_TX, 54 - [2] = DMACH_UART1_RX, 55 - [3] = DMACH_UART1_TX, 56 - [4] = DMACH_UART2_RX, 57 - [5] = DMACH_UART2_TX, 58 - [6] = DMACH_UART3_RX, 59 - [7] = DMACH_UART3_TX, 60 - [8] = DMACH_MAX, 61 - [9] = DMACH_MAX, 62 - [10] = DMACH_PCM0_TX, 63 - [11] = DMACH_PCM0_RX, 64 - [12] = DMACH_I2S0_TX, 65 - [13] = DMACH_I2S0_RX, 66 - [14] = DMACH_SPI0_TX, 67 - [15] = DMACH_SPI0_RX, 68 - [16] = DMACH_MAX, 69 - [17] = DMACH_MAX, 70 - [18] = DMACH_MAX, 71 - [19] = DMACH_MAX, 72 - [20] = DMACH_SPI1_TX, 73 - [21] = DMACH_SPI1_RX, 74 - [22] = DMACH_MAX, 75 - [23] = DMACH_MAX, 76 - [24] = DMACH_MAX, 77 - [25] = DMACH_MAX, 78 - [26] = DMACH_MAX, 79 - [27] = DMACH_MAX, 80 - [28] = DMACH_MAX, 81 - [29] = DMACH_PWM, 82 - [30] = DMACH_MAX, 83 - [31] = DMACH_MAX, 84 - }, 85 - }; 86 - 87 - static struct s3c_pl330_platdata s5p6450_pdma_pdata = { 88 - .peri = { 89 - [0] = DMACH_UART0_RX, 90 - [1] = DMACH_UART0_TX, 91 - [2] = DMACH_UART1_RX, 92 - [3] = DMACH_UART1_TX, 93 - [4] = DMACH_UART2_RX, 94 - [5] = DMACH_UART2_TX, 95 - [6] = DMACH_UART3_RX, 96 - [7] = DMACH_UART3_TX, 97 - [8] = DMACH_UART4_RX, 98 - [9] = DMACH_UART4_TX, 99 - [10] = DMACH_PCM0_TX, 100 - [11] = DMACH_PCM0_RX, 101 - [12] = DMACH_I2S0_TX, 102 - [13] = DMACH_I2S0_RX, 103 - [14] = DMACH_SPI0_TX, 104 - [15] = DMACH_SPI0_RX, 105 - [16] = DMACH_PCM1_TX, 106 - [17] = DMACH_PCM1_RX, 107 - [18] = DMACH_PCM2_TX, 108 - [19] = DMACH_PCM2_RX, 109 - [20] = DMACH_SPI1_TX, 110 - [21] = DMACH_SPI1_RX, 111 - [22] = DMACH_USI_TX, 112 - [23] = DMACH_USI_RX, 113 - [24] = DMACH_MAX, 114 - [25] = DMACH_I2S1_TX, 115 - [26] = DMACH_I2S1_RX, 116 - [27] = DMACH_I2S2_TX, 117 - [28] = DMACH_I2S2_RX, 118 - [29] = DMACH_PWM, 119 - [30] = DMACH_UART5_RX, 120 - [31] = DMACH_UART5_TX, 41 + struct dma_pl330_peri s5p6440_pdma_peri[22] = { 42 + { 43 + .peri_id = (u8)DMACH_UART0_RX, 44 + .rqtype = DEVTOMEM, 45 + }, { 46 + .peri_id = (u8)DMACH_UART0_TX, 47 + .rqtype = MEMTODEV, 48 + }, { 49 + .peri_id = (u8)DMACH_UART1_RX, 50 + .rqtype = DEVTOMEM, 51 + }, { 52 + .peri_id = (u8)DMACH_UART1_TX, 53 + .rqtype = MEMTODEV, 54 + }, { 55 + .peri_id = (u8)DMACH_UART2_RX, 56 + .rqtype = DEVTOMEM, 57 + }, { 58 + .peri_id = (u8)DMACH_UART2_TX, 59 + .rqtype = MEMTODEV, 60 + }, { 61 + .peri_id = (u8)DMACH_UART3_RX, 62 + .rqtype = DEVTOMEM, 63 + }, { 64 + .peri_id = (u8)DMACH_UART3_TX, 65 + .rqtype = MEMTODEV, 66 + }, { 67 + .peri_id = DMACH_MAX, 68 + }, { 69 + .peri_id = DMACH_MAX, 70 + }, { 71 + .peri_id = (u8)DMACH_PCM0_TX, 72 + .rqtype = MEMTODEV, 73 + }, { 74 + .peri_id = (u8)DMACH_PCM0_RX, 75 + .rqtype = DEVTOMEM, 76 + }, { 77 + .peri_id = (u8)DMACH_I2S0_TX, 78 + .rqtype = MEMTODEV, 79 + }, { 80 + .peri_id = (u8)DMACH_I2S0_RX, 81 + .rqtype = DEVTOMEM, 82 + }, { 83 + .peri_id = (u8)DMACH_SPI0_TX, 84 + .rqtype = MEMTODEV, 85 + }, { 86 + .peri_id = (u8)DMACH_SPI0_RX, 87 + .rqtype = DEVTOMEM, 88 + }, { 89 + .peri_id = (u8)DMACH_MAX, 90 + }, { 91 + .peri_id = (u8)DMACH_MAX, 92 + }, { 93 + .peri_id = (u8)DMACH_MAX, 94 + }, { 95 + .peri_id = (u8)DMACH_MAX, 96 + }, { 97 + .peri_id = (u8)DMACH_SPI1_TX, 98 + .rqtype = MEMTODEV, 99 + }, { 100 + .peri_id = (u8)DMACH_SPI1_RX, 101 + .rqtype = DEVTOMEM, 121 102 }, 122 103 }; 123 104 124 - static struct platform_device s5p64x0_device_pdma = { 125 - .name = "s3c-pl330", 126 - .id = -1, 127 - .num_resources = ARRAY_SIZE(s5p64x0_pdma_resource), 128 - .resource = s5p64x0_pdma_resource, 129 - .dev = { 105 + struct dma_pl330_platdata s5p6440_pdma_pdata = { 106 + .nr_valid_peri = ARRAY_SIZE(s5p6440_pdma_peri), 107 + .peri = s5p6440_pdma_peri, 108 + }; 109 + 110 + struct dma_pl330_peri s5p6450_pdma_peri[32] = { 111 + { 112 + .peri_id = (u8)DMACH_UART0_RX, 113 + .rqtype = DEVTOMEM, 114 + }, { 115 + .peri_id = (u8)DMACH_UART0_TX, 116 + .rqtype = MEMTODEV, 117 + }, { 118 + .peri_id = (u8)DMACH_UART1_RX, 119 + .rqtype = DEVTOMEM, 120 + }, { 121 + .peri_id = (u8)DMACH_UART1_TX, 122 + .rqtype = MEMTODEV, 123 + }, { 124 + .peri_id = (u8)DMACH_UART2_RX, 125 + .rqtype = DEVTOMEM, 126 + }, { 127 + .peri_id = (u8)DMACH_UART2_TX, 128 + .rqtype = MEMTODEV, 129 + }, { 130 + .peri_id = (u8)DMACH_UART3_RX, 131 + .rqtype = DEVTOMEM, 132 + }, { 133 + .peri_id = (u8)DMACH_UART3_TX, 134 + .rqtype = MEMTODEV, 135 + }, { 136 + .peri_id = (u8)DMACH_UART4_RX, 137 + .rqtype = DEVTOMEM, 138 + }, { 139 + .peri_id = (u8)DMACH_UART4_TX, 140 + .rqtype = MEMTODEV, 141 + }, { 142 + .peri_id = (u8)DMACH_PCM0_TX, 143 + .rqtype = MEMTODEV, 144 + }, { 145 + .peri_id = (u8)DMACH_PCM0_RX, 146 + .rqtype = DEVTOMEM, 147 + }, { 148 + .peri_id = (u8)DMACH_I2S0_TX, 149 + .rqtype = MEMTODEV, 150 + }, { 151 + .peri_id = (u8)DMACH_I2S0_RX, 152 + .rqtype = DEVTOMEM, 153 + }, { 154 + .peri_id = (u8)DMACH_SPI0_TX, 155 + .rqtype = MEMTODEV, 156 + }, { 157 + .peri_id = (u8)DMACH_SPI0_RX, 158 + .rqtype = DEVTOMEM, 159 + }, { 160 + .peri_id = (u8)DMACH_PCM1_TX, 161 + .rqtype = MEMTODEV, 162 + }, { 163 + .peri_id = (u8)DMACH_PCM1_RX, 164 + .rqtype = DEVTOMEM, 165 + }, { 166 + .peri_id = (u8)DMACH_PCM2_TX, 167 + .rqtype = MEMTODEV, 168 + }, { 169 + .peri_id = (u8)DMACH_PCM2_RX, 170 + .rqtype = DEVTOMEM, 171 + }, { 172 + .peri_id = (u8)DMACH_SPI1_TX, 173 + .rqtype = MEMTODEV, 174 + }, { 175 + .peri_id = (u8)DMACH_SPI1_RX, 176 + .rqtype = DEVTOMEM, 177 + }, { 178 + .peri_id = (u8)DMACH_USI_TX, 179 + .rqtype = MEMTODEV, 180 + }, { 181 + .peri_id = (u8)DMACH_USI_RX, 182 + .rqtype = DEVTOMEM, 183 + }, { 184 + .peri_id = (u8)DMACH_MAX, 185 + }, { 186 + .peri_id = (u8)DMACH_I2S1_TX, 187 + .rqtype = MEMTODEV, 188 + }, { 189 + .peri_id = (u8)DMACH_I2S1_RX, 190 + .rqtype = DEVTOMEM, 191 + }, { 192 + .peri_id = (u8)DMACH_I2S2_TX, 193 + .rqtype = MEMTODEV, 194 + }, { 195 + .peri_id = (u8)DMACH_I2S2_RX, 196 + .rqtype = DEVTOMEM, 197 + }, { 198 + .peri_id = (u8)DMACH_PWM, 199 + }, { 200 + .peri_id = (u8)DMACH_UART5_RX, 201 + .rqtype = DEVTOMEM, 202 + }, { 203 + .peri_id = (u8)DMACH_UART5_TX, 204 + .rqtype = MEMTODEV, 205 + }, 206 + }; 207 + 208 + struct dma_pl330_platdata s5p6450_pdma_pdata = { 209 + .nr_valid_peri = ARRAY_SIZE(s5p6450_pdma_peri), 210 + .peri = s5p6450_pdma_peri, 211 + }; 212 + 213 + struct amba_device s5p64x0_device_pdma = { 214 + .dev = { 215 + .init_name = "dma-pl330", 130 216 .dma_mask = &dma_dmamask, 131 217 .coherent_dma_mask = DMA_BIT_MASK(32), 132 218 }, 219 + .res = { 220 + .start = S5P64X0_PA_PDMA, 221 + .end = S5P64X0_PA_PDMA + SZ_4K, 222 + .flags = IORESOURCE_MEM, 223 + }, 224 + .irq = {IRQ_DMA0, NO_IRQ}, 225 + .periphid = 0x00041330, 133 226 }; 134 227 135 228 static int __init s5p64x0_dma_init(void) ··· 232 139 else 233 140 s5p64x0_device_pdma.dev.platform_data = &s5p6440_pdma_pdata; 234 141 235 - platform_device_register(&s5p64x0_device_pdma); 142 + amba_device_register(&s5p64x0_device_pdma, &iomem_resource); 236 143 237 144 return 0; 238 145 }
+2 -2
arch/arm/mach-s5p64x0/include/mach/dma.h
··· 20 20 #ifndef __MACH_DMA_H 21 21 #define __MACH_DMA_H 22 22 23 - /* This platform uses the common S3C DMA API driver for PL330 */ 24 - #include <plat/s3c-dma-pl330.h> 23 + /* This platform uses the common common DMA API driver for PL330 */ 24 + #include <plat/dma-pl330.h> 25 25 26 26 #endif /* __MACH_DMA_H */
+1 -1
arch/arm/mach-s5pc100/Kconfig
··· 10 10 config CPU_S5PC100 11 11 bool 12 12 select S5P_EXT_INT 13 - select S3C_PL330_DMA 13 + select SAMSUNG_DMADEV 14 14 help 15 15 Enable S5PC100 CPU support 16 16
+9 -2
arch/arm/mach-s5pc100/clock.c
··· 33 33 .name = "otg_phy", 34 34 }; 35 35 36 + static struct clk dummy_apb_pclk = { 37 + .name = "apb_pclk", 38 + .id = -1, 39 + }; 40 + 36 41 static struct clk *clk_src_mout_href_list[] = { 37 42 [0] = &s5p_clk_27m, 38 43 [1] = &clk_fin_hpll, ··· 459 454 .enable = s5pc100_d1_0_ctrl, 460 455 .ctrlbit = (1 << 2), 461 456 }, { 462 - .name = "pdma", 457 + .name = "dma", 463 458 .devname = "s3c-pl330.1", 464 459 .parent = &clk_div_d1_bus.clk, 465 460 .enable = s5pc100_d1_0_ctrl, 466 461 .ctrlbit = (1 << 1), 467 462 }, { 468 - .name = "pdma", 463 + .name = "dma", 469 464 .devname = "s3c-pl330.0", 470 465 .parent = &clk_div_d1_bus.clk, 471 466 .enable = s5pc100_d1_0_ctrl, ··· 1280 1275 1281 1276 s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); 1282 1277 s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); 1278 + 1279 + s3c24xx_register_clock(&dummy_apb_pclk); 1283 1280 1284 1281 s3c_pwmclk_init(); 1285 1282 }
+211 -112
arch/arm/mach-s5pc100/dma.c
··· 1 - /* 1 + /* linux/arch/arm/mach-s5pc100/dma.c 2 + * 3 + * Copyright (c) 2011 Samsung Electronics Co., Ltd. 4 + * http://www.samsung.com 5 + * 2 6 * Copyright (C) 2010 Samsung Electronics Co. Ltd. 3 7 * Jaswinder Singh <jassi.brar@samsung.com> 4 8 * ··· 21 17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 22 18 */ 23 19 24 - #include <linux/platform_device.h> 25 20 #include <linux/dma-mapping.h> 21 + #include <linux/amba/bus.h> 22 + #include <linux/amba/pl330.h> 26 23 24 + #include <asm/irq.h> 27 25 #include <plat/devs.h> 26 + #include <plat/irqs.h> 28 27 29 28 #include <mach/map.h> 30 29 #include <mach/irqs.h> 31 - 32 - #include <plat/s3c-pl330-pdata.h> 30 + #include <mach/dma.h> 33 31 34 32 static u64 dma_dmamask = DMA_BIT_MASK(32); 35 33 36 - static struct resource s5pc100_pdma0_resource[] = { 37 - [0] = { 38 - .start = S5PC100_PA_PDMA0, 39 - .end = S5PC100_PA_PDMA0 + SZ_4K, 40 - .flags = IORESOURCE_MEM, 41 - }, 42 - [1] = { 43 - .start = IRQ_PDMA0, 44 - .end = IRQ_PDMA0, 45 - .flags = IORESOURCE_IRQ, 34 + struct dma_pl330_peri pdma0_peri[30] = { 35 + { 36 + .peri_id = (u8)DMACH_UART0_RX, 37 + .rqtype = DEVTOMEM, 38 + }, { 39 + .peri_id = (u8)DMACH_UART0_TX, 40 + .rqtype = MEMTODEV, 41 + }, { 42 + .peri_id = (u8)DMACH_UART1_RX, 43 + .rqtype = DEVTOMEM, 44 + }, { 45 + .peri_id = (u8)DMACH_UART1_TX, 46 + .rqtype = MEMTODEV, 47 + }, { 48 + .peri_id = (u8)DMACH_UART2_RX, 49 + .rqtype = DEVTOMEM, 50 + }, { 51 + .peri_id = (u8)DMACH_UART2_TX, 52 + .rqtype = MEMTODEV, 53 + }, { 54 + .peri_id = (u8)DMACH_UART3_RX, 55 + .rqtype = DEVTOMEM, 56 + }, { 57 + .peri_id = (u8)DMACH_UART3_TX, 58 + .rqtype = MEMTODEV, 59 + }, { 60 + .peri_id = DMACH_IRDA, 61 + }, { 62 + .peri_id = (u8)DMACH_I2S0_RX, 63 + .rqtype = DEVTOMEM, 64 + }, { 65 + .peri_id = (u8)DMACH_I2S0_TX, 66 + .rqtype = MEMTODEV, 67 + }, { 68 + .peri_id = (u8)DMACH_I2S0S_TX, 69 + .rqtype = MEMTODEV, 70 + }, { 71 + .peri_id = (u8)DMACH_I2S1_RX, 72 + .rqtype = DEVTOMEM, 73 + }, { 74 + .peri_id = (u8)DMACH_I2S1_TX, 75 + .rqtype = MEMTODEV, 76 + }, { 77 + .peri_id = (u8)DMACH_I2S2_RX, 78 + .rqtype = DEVTOMEM, 79 + }, { 80 + .peri_id = (u8)DMACH_I2S2_TX, 81 + .rqtype = MEMTODEV, 82 + }, { 83 + .peri_id = (u8)DMACH_SPI0_RX, 84 + .rqtype = DEVTOMEM, 85 + }, { 86 + .peri_id = (u8)DMACH_SPI0_TX, 87 + .rqtype = MEMTODEV, 88 + }, { 89 + .peri_id = (u8)DMACH_SPI1_RX, 90 + .rqtype = DEVTOMEM, 91 + }, { 92 + .peri_id = (u8)DMACH_SPI1_TX, 93 + .rqtype = MEMTODEV, 94 + }, { 95 + .peri_id = (u8)DMACH_SPI2_RX, 96 + .rqtype = DEVTOMEM, 97 + }, { 98 + .peri_id = (u8)DMACH_SPI2_TX, 99 + .rqtype = MEMTODEV, 100 + }, { 101 + .peri_id = (u8)DMACH_AC97_MICIN, 102 + .rqtype = DEVTOMEM, 103 + }, { 104 + .peri_id = (u8)DMACH_AC97_PCMIN, 105 + .rqtype = DEVTOMEM, 106 + }, { 107 + .peri_id = (u8)DMACH_AC97_PCMOUT, 108 + .rqtype = MEMTODEV, 109 + }, { 110 + .peri_id = (u8)DMACH_EXTERNAL, 111 + }, { 112 + .peri_id = (u8)DMACH_PWM, 113 + }, { 114 + .peri_id = (u8)DMACH_SPDIF, 115 + .rqtype = MEMTODEV, 116 + }, { 117 + .peri_id = (u8)DMACH_HSI_RX, 118 + .rqtype = DEVTOMEM, 119 + }, { 120 + .peri_id = (u8)DMACH_HSI_TX, 121 + .rqtype = MEMTODEV, 46 122 }, 47 123 }; 48 124 49 - static struct s3c_pl330_platdata s5pc100_pdma0_pdata = { 50 - .peri = { 51 - [0] = DMACH_UART0_RX, 52 - [1] = DMACH_UART0_TX, 53 - [2] = DMACH_UART1_RX, 54 - [3] = DMACH_UART1_TX, 55 - [4] = DMACH_UART2_RX, 56 - [5] = DMACH_UART2_TX, 57 - [6] = DMACH_UART3_RX, 58 - [7] = DMACH_UART3_TX, 59 - [8] = DMACH_IRDA, 60 - [9] = DMACH_I2S0_RX, 61 - [10] = DMACH_I2S0_TX, 62 - [11] = DMACH_I2S0S_TX, 63 - [12] = DMACH_I2S1_RX, 64 - [13] = DMACH_I2S1_TX, 65 - [14] = DMACH_I2S2_RX, 66 - [15] = DMACH_I2S2_TX, 67 - [16] = DMACH_SPI0_RX, 68 - [17] = DMACH_SPI0_TX, 69 - [18] = DMACH_SPI1_RX, 70 - [19] = DMACH_SPI1_TX, 71 - [20] = DMACH_SPI2_RX, 72 - [21] = DMACH_SPI2_TX, 73 - [22] = DMACH_AC97_MICIN, 74 - [23] = DMACH_AC97_PCMIN, 75 - [24] = DMACH_AC97_PCMOUT, 76 - [25] = DMACH_EXTERNAL, 77 - [26] = DMACH_PWM, 78 - [27] = DMACH_SPDIF, 79 - [28] = DMACH_HSI_RX, 80 - [29] = DMACH_HSI_TX, 81 - [30] = DMACH_MAX, 82 - [31] = DMACH_MAX, 83 - }, 125 + struct dma_pl330_platdata s5pc100_pdma0_pdata = { 126 + .nr_valid_peri = ARRAY_SIZE(pdma0_peri), 127 + .peri = pdma0_peri, 84 128 }; 85 129 86 - static struct platform_device s5pc100_device_pdma0 = { 87 - .name = "s3c-pl330", 88 - .id = 0, 89 - .num_resources = ARRAY_SIZE(s5pc100_pdma0_resource), 90 - .resource = s5pc100_pdma0_resource, 91 - .dev = { 130 + struct amba_device s5pc100_device_pdma0 = { 131 + .dev = { 132 + .init_name = "dma-pl330.0", 92 133 .dma_mask = &dma_dmamask, 93 134 .coherent_dma_mask = DMA_BIT_MASK(32), 94 135 .platform_data = &s5pc100_pdma0_pdata, 95 136 }, 96 - }; 97 - 98 - static struct resource s5pc100_pdma1_resource[] = { 99 - [0] = { 100 - .start = S5PC100_PA_PDMA1, 101 - .end = S5PC100_PA_PDMA1 + SZ_4K, 137 + .res = { 138 + .start = S5PC100_PA_PDMA0, 139 + .end = S5PC100_PA_PDMA0 + SZ_4K, 102 140 .flags = IORESOURCE_MEM, 103 141 }, 104 - [1] = { 105 - .start = IRQ_PDMA1, 106 - .end = IRQ_PDMA1, 107 - .flags = IORESOURCE_IRQ, 142 + .irq = {IRQ_PDMA0, NO_IRQ}, 143 + .periphid = 0x00041330, 144 + }; 145 + 146 + struct dma_pl330_peri pdma1_peri[30] = { 147 + { 148 + .peri_id = (u8)DMACH_UART0_RX, 149 + .rqtype = DEVTOMEM, 150 + }, { 151 + .peri_id = (u8)DMACH_UART0_TX, 152 + .rqtype = MEMTODEV, 153 + }, { 154 + .peri_id = (u8)DMACH_UART1_RX, 155 + .rqtype = DEVTOMEM, 156 + }, { 157 + .peri_id = (u8)DMACH_UART1_TX, 158 + .rqtype = MEMTODEV, 159 + }, { 160 + .peri_id = (u8)DMACH_UART2_RX, 161 + .rqtype = DEVTOMEM, 162 + }, { 163 + .peri_id = (u8)DMACH_UART2_TX, 164 + .rqtype = MEMTODEV, 165 + }, { 166 + .peri_id = (u8)DMACH_UART3_RX, 167 + .rqtype = DEVTOMEM, 168 + }, { 169 + .peri_id = (u8)DMACH_UART3_TX, 170 + .rqtype = MEMTODEV, 171 + }, { 172 + .peri_id = DMACH_IRDA, 173 + }, { 174 + .peri_id = (u8)DMACH_I2S0_RX, 175 + .rqtype = DEVTOMEM, 176 + }, { 177 + .peri_id = (u8)DMACH_I2S0_TX, 178 + .rqtype = MEMTODEV, 179 + }, { 180 + .peri_id = (u8)DMACH_I2S0S_TX, 181 + .rqtype = MEMTODEV, 182 + }, { 183 + .peri_id = (u8)DMACH_I2S1_RX, 184 + .rqtype = DEVTOMEM, 185 + }, { 186 + .peri_id = (u8)DMACH_I2S1_TX, 187 + .rqtype = MEMTODEV, 188 + }, { 189 + .peri_id = (u8)DMACH_I2S2_RX, 190 + .rqtype = DEVTOMEM, 191 + }, { 192 + .peri_id = (u8)DMACH_I2S2_TX, 193 + .rqtype = MEMTODEV, 194 + }, { 195 + .peri_id = (u8)DMACH_SPI0_RX, 196 + .rqtype = DEVTOMEM, 197 + }, { 198 + .peri_id = (u8)DMACH_SPI0_TX, 199 + .rqtype = MEMTODEV, 200 + }, { 201 + .peri_id = (u8)DMACH_SPI1_RX, 202 + .rqtype = DEVTOMEM, 203 + }, { 204 + .peri_id = (u8)DMACH_SPI1_TX, 205 + .rqtype = MEMTODEV, 206 + }, { 207 + .peri_id = (u8)DMACH_SPI2_RX, 208 + .rqtype = DEVTOMEM, 209 + }, { 210 + .peri_id = (u8)DMACH_SPI2_TX, 211 + .rqtype = MEMTODEV, 212 + }, { 213 + .peri_id = (u8)DMACH_PCM0_RX, 214 + .rqtype = DEVTOMEM, 215 + }, { 216 + .peri_id = (u8)DMACH_PCM1_TX, 217 + .rqtype = MEMTODEV, 218 + }, { 219 + .peri_id = (u8)DMACH_PCM1_RX, 220 + .rqtype = DEVTOMEM, 221 + }, { 222 + .peri_id = (u8)DMACH_PCM1_TX, 223 + .rqtype = MEMTODEV, 224 + }, { 225 + .peri_id = (u8)DMACH_MSM_REQ0, 226 + }, { 227 + .peri_id = (u8)DMACH_MSM_REQ1, 228 + }, { 229 + .peri_id = (u8)DMACH_MSM_REQ2, 230 + }, { 231 + .peri_id = (u8)DMACH_MSM_REQ3, 108 232 }, 109 233 }; 110 234 111 - static struct s3c_pl330_platdata s5pc100_pdma1_pdata = { 112 - .peri = { 113 - [0] = DMACH_UART0_RX, 114 - [1] = DMACH_UART0_TX, 115 - [2] = DMACH_UART1_RX, 116 - [3] = DMACH_UART1_TX, 117 - [4] = DMACH_UART2_RX, 118 - [5] = DMACH_UART2_TX, 119 - [6] = DMACH_UART3_RX, 120 - [7] = DMACH_UART3_TX, 121 - [8] = DMACH_IRDA, 122 - [9] = DMACH_I2S0_RX, 123 - [10] = DMACH_I2S0_TX, 124 - [11] = DMACH_I2S0S_TX, 125 - [12] = DMACH_I2S1_RX, 126 - [13] = DMACH_I2S1_TX, 127 - [14] = DMACH_I2S2_RX, 128 - [15] = DMACH_I2S2_TX, 129 - [16] = DMACH_SPI0_RX, 130 - [17] = DMACH_SPI0_TX, 131 - [18] = DMACH_SPI1_RX, 132 - [19] = DMACH_SPI1_TX, 133 - [20] = DMACH_SPI2_RX, 134 - [21] = DMACH_SPI2_TX, 135 - [22] = DMACH_PCM0_RX, 136 - [23] = DMACH_PCM0_TX, 137 - [24] = DMACH_PCM1_RX, 138 - [25] = DMACH_PCM1_TX, 139 - [26] = DMACH_MSM_REQ0, 140 - [27] = DMACH_MSM_REQ1, 141 - [28] = DMACH_MSM_REQ2, 142 - [29] = DMACH_MSM_REQ3, 143 - [30] = DMACH_MAX, 144 - [31] = DMACH_MAX, 145 - }, 235 + struct dma_pl330_platdata s5pc100_pdma1_pdata = { 236 + .nr_valid_peri = ARRAY_SIZE(pdma1_peri), 237 + .peri = pdma1_peri, 146 238 }; 147 239 148 - static struct platform_device s5pc100_device_pdma1 = { 149 - .name = "s3c-pl330", 150 - .id = 1, 151 - .num_resources = ARRAY_SIZE(s5pc100_pdma1_resource), 152 - .resource = s5pc100_pdma1_resource, 153 - .dev = { 240 + struct amba_device s5pc100_device_pdma1 = { 241 + .dev = { 242 + .init_name = "dma-pl330.1", 154 243 .dma_mask = &dma_dmamask, 155 244 .coherent_dma_mask = DMA_BIT_MASK(32), 156 245 .platform_data = &s5pc100_pdma1_pdata, 157 246 }, 158 - }; 159 - 160 - static struct platform_device *s5pc100_dmacs[] __initdata = { 161 - &s5pc100_device_pdma0, 162 - &s5pc100_device_pdma1, 247 + .res = { 248 + .start = S5PC100_PA_PDMA1, 249 + .end = S5PC100_PA_PDMA1 + SZ_4K, 250 + .flags = IORESOURCE_MEM, 251 + }, 252 + .irq = {IRQ_PDMA1, NO_IRQ}, 253 + .periphid = 0x00041330, 163 254 }; 164 255 165 256 static int __init s5pc100_dma_init(void) 166 257 { 167 - platform_add_devices(s5pc100_dmacs, ARRAY_SIZE(s5pc100_dmacs)); 258 + amba_device_register(&s5pc100_device_pdma0, &iomem_resource); 168 259 169 260 return 0; 170 261 }
+2 -2
arch/arm/mach-s5pc100/include/mach/dma.h
··· 20 20 #ifndef __MACH_DMA_H 21 21 #define __MACH_DMA_H 22 22 23 - /* This platform uses the common S3C DMA API driver for PL330 */ 24 - #include <plat/s3c-dma-pl330.h> 23 + /* This platform uses the common DMA API driver for PL330 */ 24 + #include <plat/dma-pl330.h> 25 25 26 26 #endif /* __MACH_DMA_H */
+1 -1
arch/arm/mach-s5pv210/Kconfig
··· 11 11 12 12 config CPU_S5PV210 13 13 bool 14 - select S3C_PL330_DMA 14 + select SAMSUNG_DMADEV 15 15 select S5P_EXT_INT 16 16 select S5P_HRT 17 17 help
+8 -2
arch/arm/mach-s5pv210/clock.c
··· 203 203 .name = "pcmcdclk", 204 204 }; 205 205 206 + static struct clk dummy_apb_pclk = { 207 + .name = "apb_pclk", 208 + .id = -1, 209 + }; 210 + 206 211 static struct clk *clkset_vpllsrc_list[] = { 207 212 [0] = &clk_fin_vpll, 208 213 [1] = &clk_sclk_hdmi27m, ··· 294 289 295 290 static struct clk init_clocks_off[] = { 296 291 { 297 - .name = "pdma", 292 + .name = "dma", 298 293 .devname = "s3c-pl330.0", 299 294 .parent = &clk_hclk_psys.clk, 300 295 .enable = s5pv210_clk_ip0_ctrl, 301 296 .ctrlbit = (1 << 3), 302 297 }, { 303 - .name = "pdma", 298 + .name = "dma", 304 299 .devname = "s3c-pl330.1", 305 300 .parent = &clk_hclk_psys.clk, 306 301 .enable = s5pv210_clk_ip0_ctrl, ··· 1164 1159 s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); 1165 1160 s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); 1166 1161 1162 + s3c24xx_register_clock(&dummy_apb_pclk); 1167 1163 s3c_pwmclk_init(); 1168 1164 }
+204 -112
arch/arm/mach-s5pv210/dma.c
··· 1 - /* 1 + /* linux/arch/arm/mach-s5pv210/dma.c 2 + * 3 + * Copyright (c) 2011 Samsung Electronics Co., Ltd. 4 + * http://www.samsung.com 5 + * 2 6 * Copyright (C) 2010 Samsung Electronics Co. Ltd. 3 7 * Jaswinder Singh <jassi.brar@samsung.com> 4 8 * ··· 21 17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 22 18 */ 23 19 24 - #include <linux/platform_device.h> 25 20 #include <linux/dma-mapping.h> 21 + #include <linux/amba/bus.h> 22 + #include <linux/amba/pl330.h> 26 23 24 + #include <asm/irq.h> 27 25 #include <plat/devs.h> 28 26 #include <plat/irqs.h> 29 27 30 28 #include <mach/map.h> 31 29 #include <mach/irqs.h> 32 - 33 - #include <plat/s3c-pl330-pdata.h> 30 + #include <mach/dma.h> 34 31 35 32 static u64 dma_dmamask = DMA_BIT_MASK(32); 36 33 37 - static struct resource s5pv210_pdma0_resource[] = { 38 - [0] = { 39 - .start = S5PV210_PA_PDMA0, 40 - .end = S5PV210_PA_PDMA0 + SZ_4K, 41 - .flags = IORESOURCE_MEM, 42 - }, 43 - [1] = { 44 - .start = IRQ_PDMA0, 45 - .end = IRQ_PDMA0, 46 - .flags = IORESOURCE_IRQ, 34 + struct dma_pl330_peri pdma0_peri[28] = { 35 + { 36 + .peri_id = (u8)DMACH_UART0_RX, 37 + .rqtype = DEVTOMEM, 38 + }, { 39 + .peri_id = (u8)DMACH_UART0_TX, 40 + .rqtype = MEMTODEV, 41 + }, { 42 + .peri_id = (u8)DMACH_UART1_RX, 43 + .rqtype = DEVTOMEM, 44 + }, { 45 + .peri_id = (u8)DMACH_UART1_TX, 46 + .rqtype = MEMTODEV, 47 + }, { 48 + .peri_id = (u8)DMACH_UART2_RX, 49 + .rqtype = DEVTOMEM, 50 + }, { 51 + .peri_id = (u8)DMACH_UART2_TX, 52 + .rqtype = MEMTODEV, 53 + }, { 54 + .peri_id = (u8)DMACH_UART3_RX, 55 + .rqtype = DEVTOMEM, 56 + }, { 57 + .peri_id = (u8)DMACH_UART3_TX, 58 + .rqtype = MEMTODEV, 59 + }, { 60 + .peri_id = DMACH_MAX, 61 + }, { 62 + .peri_id = (u8)DMACH_I2S0_RX, 63 + .rqtype = DEVTOMEM, 64 + }, { 65 + .peri_id = (u8)DMACH_I2S0_TX, 66 + .rqtype = MEMTODEV, 67 + }, { 68 + .peri_id = (u8)DMACH_I2S0S_TX, 69 + .rqtype = MEMTODEV, 70 + }, { 71 + .peri_id = (u8)DMACH_I2S1_RX, 72 + .rqtype = DEVTOMEM, 73 + }, { 74 + .peri_id = (u8)DMACH_I2S1_TX, 75 + .rqtype = MEMTODEV, 76 + }, { 77 + .peri_id = (u8)DMACH_MAX, 78 + }, { 79 + .peri_id = (u8)DMACH_MAX, 80 + }, { 81 + .peri_id = (u8)DMACH_SPI0_RX, 82 + .rqtype = DEVTOMEM, 83 + }, { 84 + .peri_id = (u8)DMACH_SPI0_TX, 85 + .rqtype = MEMTODEV, 86 + }, { 87 + .peri_id = (u8)DMACH_SPI1_RX, 88 + .rqtype = DEVTOMEM, 89 + }, { 90 + .peri_id = (u8)DMACH_SPI1_TX, 91 + .rqtype = MEMTODEV, 92 + }, { 93 + .peri_id = (u8)DMACH_MAX, 94 + }, { 95 + .peri_id = (u8)DMACH_MAX, 96 + }, { 97 + .peri_id = (u8)DMACH_AC97_MICIN, 98 + .rqtype = DEVTOMEM, 99 + }, { 100 + .peri_id = (u8)DMACH_AC97_PCMIN, 101 + .rqtype = DEVTOMEM, 102 + }, { 103 + .peri_id = (u8)DMACH_AC97_PCMOUT, 104 + .rqtype = MEMTODEV, 105 + }, { 106 + .peri_id = (u8)DMACH_MAX, 107 + }, { 108 + .peri_id = (u8)DMACH_PWM, 109 + }, { 110 + .peri_id = (u8)DMACH_SPDIF, 111 + .rqtype = MEMTODEV, 47 112 }, 48 113 }; 49 114 50 - static struct s3c_pl330_platdata s5pv210_pdma0_pdata = { 51 - .peri = { 52 - [0] = DMACH_UART0_RX, 53 - [1] = DMACH_UART0_TX, 54 - [2] = DMACH_UART1_RX, 55 - [3] = DMACH_UART1_TX, 56 - [4] = DMACH_UART2_RX, 57 - [5] = DMACH_UART2_TX, 58 - [6] = DMACH_UART3_RX, 59 - [7] = DMACH_UART3_TX, 60 - [8] = DMACH_MAX, 61 - [9] = DMACH_I2S0_RX, 62 - [10] = DMACH_I2S0_TX, 63 - [11] = DMACH_I2S0S_TX, 64 - [12] = DMACH_I2S1_RX, 65 - [13] = DMACH_I2S1_TX, 66 - [14] = DMACH_MAX, 67 - [15] = DMACH_MAX, 68 - [16] = DMACH_SPI0_RX, 69 - [17] = DMACH_SPI0_TX, 70 - [18] = DMACH_SPI1_RX, 71 - [19] = DMACH_SPI1_TX, 72 - [20] = DMACH_MAX, 73 - [21] = DMACH_MAX, 74 - [22] = DMACH_AC97_MICIN, 75 - [23] = DMACH_AC97_PCMIN, 76 - [24] = DMACH_AC97_PCMOUT, 77 - [25] = DMACH_MAX, 78 - [26] = DMACH_PWM, 79 - [27] = DMACH_SPDIF, 80 - [28] = DMACH_MAX, 81 - [29] = DMACH_MAX, 82 - [30] = DMACH_MAX, 83 - [31] = DMACH_MAX, 84 - }, 115 + struct dma_pl330_platdata s5pv210_pdma0_pdata = { 116 + .nr_valid_peri = ARRAY_SIZE(pdma0_peri), 117 + .peri = pdma0_peri, 85 118 }; 86 119 87 - static struct platform_device s5pv210_device_pdma0 = { 88 - .name = "s3c-pl330", 89 - .id = 0, 90 - .num_resources = ARRAY_SIZE(s5pv210_pdma0_resource), 91 - .resource = s5pv210_pdma0_resource, 92 - .dev = { 120 + struct amba_device s5pv210_device_pdma0 = { 121 + .dev = { 122 + .init_name = "dma-pl330.0", 93 123 .dma_mask = &dma_dmamask, 94 124 .coherent_dma_mask = DMA_BIT_MASK(32), 95 125 .platform_data = &s5pv210_pdma0_pdata, 96 126 }, 97 - }; 98 - 99 - static struct resource s5pv210_pdma1_resource[] = { 100 - [0] = { 101 - .start = S5PV210_PA_PDMA1, 102 - .end = S5PV210_PA_PDMA1 + SZ_4K, 127 + .res = { 128 + .start = S5PV210_PA_PDMA0, 129 + .end = S5PV210_PA_PDMA0 + SZ_4K, 103 130 .flags = IORESOURCE_MEM, 104 131 }, 105 - [1] = { 106 - .start = IRQ_PDMA1, 107 - .end = IRQ_PDMA1, 108 - .flags = IORESOURCE_IRQ, 132 + .irq = {IRQ_PDMA0, NO_IRQ}, 133 + .periphid = 0x00041330, 134 + }; 135 + 136 + struct dma_pl330_peri pdma1_peri[32] = { 137 + { 138 + .peri_id = (u8)DMACH_UART0_RX, 139 + .rqtype = DEVTOMEM, 140 + }, { 141 + .peri_id = (u8)DMACH_UART0_TX, 142 + .rqtype = MEMTODEV, 143 + }, { 144 + .peri_id = (u8)DMACH_UART1_RX, 145 + .rqtype = DEVTOMEM, 146 + }, { 147 + .peri_id = (u8)DMACH_UART1_TX, 148 + .rqtype = MEMTODEV, 149 + }, { 150 + .peri_id = (u8)DMACH_UART2_RX, 151 + .rqtype = DEVTOMEM, 152 + }, { 153 + .peri_id = (u8)DMACH_UART2_TX, 154 + .rqtype = MEMTODEV, 155 + }, { 156 + .peri_id = (u8)DMACH_UART3_RX, 157 + .rqtype = DEVTOMEM, 158 + }, { 159 + .peri_id = (u8)DMACH_UART3_TX, 160 + .rqtype = MEMTODEV, 161 + }, { 162 + .peri_id = DMACH_MAX, 163 + }, { 164 + .peri_id = (u8)DMACH_I2S0_RX, 165 + .rqtype = DEVTOMEM, 166 + }, { 167 + .peri_id = (u8)DMACH_I2S0_TX, 168 + .rqtype = MEMTODEV, 169 + }, { 170 + .peri_id = (u8)DMACH_I2S0S_TX, 171 + .rqtype = MEMTODEV, 172 + }, { 173 + .peri_id = (u8)DMACH_I2S1_RX, 174 + .rqtype = DEVTOMEM, 175 + }, { 176 + .peri_id = (u8)DMACH_I2S1_TX, 177 + .rqtype = MEMTODEV, 178 + }, { 179 + .peri_id = (u8)DMACH_I2S2_RX, 180 + .rqtype = DEVTOMEM, 181 + }, { 182 + .peri_id = (u8)DMACH_I2S2_TX, 183 + .rqtype = MEMTODEV, 184 + }, { 185 + .peri_id = (u8)DMACH_SPI0_RX, 186 + .rqtype = DEVTOMEM, 187 + }, { 188 + .peri_id = (u8)DMACH_SPI0_TX, 189 + .rqtype = MEMTODEV, 190 + }, { 191 + .peri_id = (u8)DMACH_SPI1_RX, 192 + .rqtype = DEVTOMEM, 193 + }, { 194 + .peri_id = (u8)DMACH_SPI1_TX, 195 + .rqtype = MEMTODEV, 196 + }, { 197 + .peri_id = (u8)DMACH_MAX, 198 + }, { 199 + .peri_id = (u8)DMACH_MAX, 200 + }, { 201 + .peri_id = (u8)DMACH_PCM0_RX, 202 + .rqtype = DEVTOMEM, 203 + }, { 204 + .peri_id = (u8)DMACH_PCM0_TX, 205 + .rqtype = MEMTODEV, 206 + }, { 207 + .peri_id = (u8)DMACH_PCM1_RX, 208 + .rqtype = DEVTOMEM, 209 + }, { 210 + .peri_id = (u8)DMACH_PCM1_TX, 211 + .rqtype = MEMTODEV, 212 + }, { 213 + .peri_id = (u8)DMACH_MSM_REQ0, 214 + }, { 215 + .peri_id = (u8)DMACH_MSM_REQ1, 216 + }, { 217 + .peri_id = (u8)DMACH_MSM_REQ2, 218 + }, { 219 + .peri_id = (u8)DMACH_MSM_REQ3, 220 + }, { 221 + .peri_id = (u8)DMACH_PCM2_RX, 222 + .rqtype = DEVTOMEM, 223 + }, { 224 + .peri_id = (u8)DMACH_PCM2_TX, 225 + .rqtype = MEMTODEV, 109 226 }, 110 227 }; 111 228 112 - static struct s3c_pl330_platdata s5pv210_pdma1_pdata = { 113 - .peri = { 114 - [0] = DMACH_UART0_RX, 115 - [1] = DMACH_UART0_TX, 116 - [2] = DMACH_UART1_RX, 117 - [3] = DMACH_UART1_TX, 118 - [4] = DMACH_UART2_RX, 119 - [5] = DMACH_UART2_TX, 120 - [6] = DMACH_UART3_RX, 121 - [7] = DMACH_UART3_TX, 122 - [8] = DMACH_MAX, 123 - [9] = DMACH_I2S0_RX, 124 - [10] = DMACH_I2S0_TX, 125 - [11] = DMACH_I2S0S_TX, 126 - [12] = DMACH_I2S1_RX, 127 - [13] = DMACH_I2S1_TX, 128 - [14] = DMACH_I2S2_RX, 129 - [15] = DMACH_I2S2_TX, 130 - [16] = DMACH_SPI0_RX, 131 - [17] = DMACH_SPI0_TX, 132 - [18] = DMACH_SPI1_RX, 133 - [19] = DMACH_SPI1_TX, 134 - [20] = DMACH_MAX, 135 - [21] = DMACH_MAX, 136 - [22] = DMACH_PCM0_RX, 137 - [23] = DMACH_PCM0_TX, 138 - [24] = DMACH_PCM1_RX, 139 - [25] = DMACH_PCM1_TX, 140 - [26] = DMACH_MSM_REQ0, 141 - [27] = DMACH_MSM_REQ1, 142 - [28] = DMACH_MSM_REQ2, 143 - [29] = DMACH_MSM_REQ3, 144 - [30] = DMACH_PCM2_RX, 145 - [31] = DMACH_PCM2_TX, 146 - }, 229 + struct dma_pl330_platdata s5pv210_pdma1_pdata = { 230 + .nr_valid_peri = ARRAY_SIZE(pdma1_peri), 231 + .peri = pdma1_peri, 147 232 }; 148 233 149 - static struct platform_device s5pv210_device_pdma1 = { 150 - .name = "s3c-pl330", 151 - .id = 1, 152 - .num_resources = ARRAY_SIZE(s5pv210_pdma1_resource), 153 - .resource = s5pv210_pdma1_resource, 154 - .dev = { 234 + struct amba_device s5pv210_device_pdma1 = { 235 + .dev = { 236 + .init_name = "dma-pl330.1", 155 237 .dma_mask = &dma_dmamask, 156 238 .coherent_dma_mask = DMA_BIT_MASK(32), 157 239 .platform_data = &s5pv210_pdma1_pdata, 158 240 }, 159 - }; 160 - 161 - static struct platform_device *s5pv210_dmacs[] __initdata = { 162 - &s5pv210_device_pdma0, 163 - &s5pv210_device_pdma1, 241 + .res = { 242 + .start = S5PV210_PA_PDMA1, 243 + .end = S5PV210_PA_PDMA1 + SZ_4K, 244 + .flags = IORESOURCE_MEM, 245 + }, 246 + .irq = {IRQ_PDMA1, NO_IRQ}, 247 + .periphid = 0x00041330, 164 248 }; 165 249 166 250 static int __init s5pv210_dma_init(void) 167 251 { 168 - platform_add_devices(s5pv210_dmacs, ARRAY_SIZE(s5pv210_dmacs)); 252 + amba_device_register(&s5pv210_device_pdma0, &iomem_resource); 169 253 170 254 return 0; 171 255 }
+2 -2
arch/arm/mach-s5pv210/include/mach/dma.h
··· 20 20 #ifndef __MACH_DMA_H 21 21 #define __MACH_DMA_H 22 22 23 - /* This platform uses the common S3C DMA API driver for PL330 */ 24 - #include <plat/s3c-dma-pl330.h> 23 + /* This platform uses the common DMA API driver for PL330 */ 24 + #include <plat/dma-pl330.h> 25 25 26 26 #endif /* __MACH_DMA_H */
+5 -5
arch/arm/plat-s3c24xx/dma.c
··· 1094 1094 * 1095 1095 * configure the dma source/destination hardware type and address 1096 1096 * 1097 - * source: S3C2410_DMASRC_HW: source is hardware 1098 - * S3C2410_DMASRC_MEM: source is memory 1097 + * source: DMA_FROM_DEVICE: source is hardware 1098 + * DMA_TO_DEVICE: source is memory 1099 1099 * 1100 1100 * devaddr: physical address of the source 1101 1101 */ 1102 1102 1103 1103 int s3c2410_dma_devconfig(enum dma_ch channel, 1104 - enum s3c2410_dmasrc source, 1104 + enum dma_data_direction source, 1105 1105 unsigned long devaddr) 1106 1106 { 1107 1107 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); ··· 1131 1131 hwcfg |= S3C2410_DISRCC_INC; 1132 1132 1133 1133 switch (source) { 1134 - case S3C2410_DMASRC_HW: 1134 + case DMA_FROM_DEVICE: 1135 1135 /* source is hardware */ 1136 1136 pr_debug("%s: hw source, devaddr=%08lx, hwcfg=%d\n", 1137 1137 __func__, devaddr, hwcfg); ··· 1142 1142 chan->addr_reg = dma_regaddr(chan, S3C2410_DMA_DIDST); 1143 1143 break; 1144 1144 1145 - case S3C2410_DMASRC_MEM: 1145 + case DMA_TO_DEVICE: 1146 1146 /* source is memory */ 1147 1147 pr_debug("%s: mem source, devaddr=%08lx, hwcfg=%d\n", 1148 1148 __func__, devaddr, hwcfg);
+6 -3
arch/arm/plat-samsung/Kconfig
··· 295 295 help 296 296 Internal configuration for S3C DMA core 297 297 298 - config S3C_PL330_DMA 298 + config SAMSUNG_DMADEV 299 299 bool 300 - select PL330 300 + select DMADEVICES 301 + select PL330_DMA if (CPU_EXYNOS4210 || CPU_S5PV210 || CPU_S5PC100 || \ 302 + CPU_S5P6450 || CPU_S5P6440) 303 + select ARM_AMBA 301 304 help 302 - S3C DMA API Driver for PL330 DMAC. 305 + Use DMA device engine for PL330 DMAC. 303 306 304 307 comment "Power management" 305 308
+2 -2
arch/arm/plat-samsung/Makefile
··· 62 62 63 63 # DMA support 64 64 65 - obj-$(CONFIG_S3C_DMA) += dma.o 65 + obj-$(CONFIG_S3C_DMA) += dma.o s3c-dma-ops.o 66 66 67 - obj-$(CONFIG_S3C_PL330_DMA) += s3c-pl330.o 67 + obj-$(CONFIG_SAMSUNG_DMADEV) += dma-ops.o 68 68 69 69 # PM support 70 70
+131
arch/arm/plat-samsung/dma-ops.c
··· 1 + /* linux/arch/arm/plat-samsung/dma-ops.c 2 + * 3 + * Copyright (c) 2011 Samsung Electronics Co., Ltd. 4 + * http://www.samsung.com 5 + * 6 + * Samsung DMA Operations 7 + * 8 + * This program is free software; you can redistribute it and/or modify 9 + * it under the terms of the GNU General Public License version 2 as 10 + * published by the Free Software Foundation. 11 + */ 12 + 13 + #include <linux/kernel.h> 14 + #include <linux/errno.h> 15 + #include <linux/amba/pl330.h> 16 + #include <linux/scatterlist.h> 17 + 18 + #include <mach/dma.h> 19 + 20 + static inline bool pl330_filter(struct dma_chan *chan, void *param) 21 + { 22 + struct dma_pl330_peri *peri = chan->private; 23 + return peri->peri_id == (unsigned)param; 24 + } 25 + 26 + static unsigned samsung_dmadev_request(enum dma_ch dma_ch, 27 + struct samsung_dma_info *info) 28 + { 29 + struct dma_chan *chan; 30 + dma_cap_mask_t mask; 31 + struct dma_slave_config slave_config; 32 + 33 + dma_cap_zero(mask); 34 + dma_cap_set(info->cap, mask); 35 + 36 + chan = dma_request_channel(mask, pl330_filter, (void *)dma_ch); 37 + 38 + if (info->direction == DMA_FROM_DEVICE) { 39 + memset(&slave_config, 0, sizeof(struct dma_slave_config)); 40 + slave_config.direction = info->direction; 41 + slave_config.src_addr = info->fifo; 42 + slave_config.src_addr_width = info->width; 43 + slave_config.src_maxburst = 1; 44 + dmaengine_slave_config(chan, &slave_config); 45 + } else if (info->direction == DMA_TO_DEVICE) { 46 + memset(&slave_config, 0, sizeof(struct dma_slave_config)); 47 + slave_config.direction = info->direction; 48 + slave_config.dst_addr = info->fifo; 49 + slave_config.dst_addr_width = info->width; 50 + slave_config.dst_maxburst = 1; 51 + dmaengine_slave_config(chan, &slave_config); 52 + } 53 + 54 + return (unsigned)chan; 55 + } 56 + 57 + static int samsung_dmadev_release(unsigned ch, 58 + struct s3c2410_dma_client *client) 59 + { 60 + dma_release_channel((struct dma_chan *)ch); 61 + 62 + return 0; 63 + } 64 + 65 + static int samsung_dmadev_prepare(unsigned ch, 66 + struct samsung_dma_prep_info *info) 67 + { 68 + struct scatterlist sg; 69 + struct dma_chan *chan = (struct dma_chan *)ch; 70 + struct dma_async_tx_descriptor *desc; 71 + 72 + switch (info->cap) { 73 + case DMA_SLAVE: 74 + sg_init_table(&sg, 1); 75 + sg_dma_len(&sg) = info->len; 76 + sg_set_page(&sg, pfn_to_page(PFN_DOWN(info->buf)), 77 + info->len, offset_in_page(info->buf)); 78 + sg_dma_address(&sg) = info->buf; 79 + 80 + desc = chan->device->device_prep_slave_sg(chan, 81 + &sg, 1, info->direction, DMA_PREP_INTERRUPT); 82 + break; 83 + case DMA_CYCLIC: 84 + desc = chan->device->device_prep_dma_cyclic(chan, 85 + info->buf, info->len, info->period, info->direction); 86 + break; 87 + default: 88 + dev_err(&chan->dev->device, "unsupported format\n"); 89 + return -EFAULT; 90 + } 91 + 92 + if (!desc) { 93 + dev_err(&chan->dev->device, "cannot prepare cyclic dma\n"); 94 + return -EFAULT; 95 + } 96 + 97 + desc->callback = info->fp; 98 + desc->callback_param = info->fp_param; 99 + 100 + dmaengine_submit((struct dma_async_tx_descriptor *)desc); 101 + 102 + return 0; 103 + } 104 + 105 + static inline int samsung_dmadev_trigger(unsigned ch) 106 + { 107 + dma_async_issue_pending((struct dma_chan *)ch); 108 + 109 + return 0; 110 + } 111 + 112 + static inline int samsung_dmadev_flush(unsigned ch) 113 + { 114 + return dmaengine_terminate_all((struct dma_chan *)ch); 115 + } 116 + 117 + struct samsung_dma_ops dmadev_ops = { 118 + .request = samsung_dmadev_request, 119 + .release = samsung_dmadev_release, 120 + .prepare = samsung_dmadev_prepare, 121 + .trigger = samsung_dmadev_trigger, 122 + .started = NULL, 123 + .flush = samsung_dmadev_flush, 124 + .stop = samsung_dmadev_flush, 125 + }; 126 + 127 + void *samsung_dmadev_get_ops(void) 128 + { 129 + return &dmadev_ops; 130 + } 131 + EXPORT_SYMBOL(samsung_dmadev_get_ops);
+63
arch/arm/plat-samsung/include/plat/dma-ops.h
··· 1 + /* arch/arm/plat-samsung/include/plat/dma-ops.h 2 + * 3 + * Copyright (c) 2011 Samsung Electronics Co., Ltd. 4 + * http://www.samsung.com 5 + * 6 + * Samsung DMA support 7 + * 8 + * This program is free software; you can redistribute it and/or modify 9 + * it under the terms of the GNU General Public License version 2 as 10 + * published by the Free Software Foundation. 11 + */ 12 + 13 + #ifndef __SAMSUNG_DMA_OPS_H_ 14 + #define __SAMSUNG_DMA_OPS_H_ __FILE__ 15 + 16 + #include <linux/dmaengine.h> 17 + 18 + struct samsung_dma_prep_info { 19 + enum dma_transaction_type cap; 20 + enum dma_data_direction direction; 21 + dma_addr_t buf; 22 + unsigned long period; 23 + unsigned long len; 24 + void (*fp)(void *data); 25 + void *fp_param; 26 + }; 27 + 28 + struct samsung_dma_info { 29 + enum dma_transaction_type cap; 30 + enum dma_data_direction direction; 31 + enum dma_slave_buswidth width; 32 + dma_addr_t fifo; 33 + struct s3c2410_dma_client *client; 34 + }; 35 + 36 + struct samsung_dma_ops { 37 + unsigned (*request)(enum dma_ch ch, struct samsung_dma_info *info); 38 + int (*release)(unsigned ch, struct s3c2410_dma_client *client); 39 + int (*prepare)(unsigned ch, struct samsung_dma_prep_info *info); 40 + int (*trigger)(unsigned ch); 41 + int (*started)(unsigned ch); 42 + int (*flush)(unsigned ch); 43 + int (*stop)(unsigned ch); 44 + }; 45 + 46 + extern void *samsung_dmadev_get_ops(void); 47 + extern void *s3c_dma_get_ops(void); 48 + 49 + static inline void *__samsung_dma_get_ops(void) 50 + { 51 + if (samsung_dma_is_dmadev()) 52 + return samsung_dmadev_get_ops(); 53 + else 54 + return s3c_dma_get_ops(); 55 + } 56 + 57 + /* 58 + * samsung_dma_get_ops 59 + * get the set of samsung dma operations 60 + */ 61 + #define samsung_dma_get_ops() __samsung_dma_get_ops() 62 + 63 + #endif /* __SAMSUNG_DMA_OPS_H_ */
+1 -1
arch/arm/plat-samsung/include/plat/dma-s3c24xx.h
··· 41 41 42 42 void (*direction)(struct s3c2410_dma_chan *chan, 43 43 struct s3c24xx_dma_map *map, 44 - enum s3c2410_dmasrc dir); 44 + enum dma_data_direction dir); 45 45 }; 46 46 47 47 extern int s3c24xx_dma_init_map(struct s3c24xx_dma_selection *sel);
+4 -6
arch/arm/plat-samsung/include/plat/dma.h
··· 10 10 * published by the Free Software Foundation. 11 11 */ 12 12 13 + #include <linux/dma-mapping.h> 14 + 13 15 enum s3c2410_dma_buffresult { 14 16 S3C2410_RES_OK, 15 17 S3C2410_RES_ERR, 16 18 S3C2410_RES_ABORT 17 - }; 18 - 19 - enum s3c2410_dmasrc { 20 - S3C2410_DMASRC_HW, /* source is memory */ 21 - S3C2410_DMASRC_MEM /* source is hardware */ 22 19 }; 23 20 24 21 /* enum s3c2410_chan_op ··· 109 112 */ 110 113 111 114 extern int s3c2410_dma_devconfig(enum dma_ch channel, 112 - enum s3c2410_dmasrc source, unsigned long devaddr); 115 + enum dma_data_direction source, unsigned long devaddr); 113 116 114 117 /* s3c2410_dma_getposition 115 118 * ··· 123 126 extern int s3c2410_dma_set_buffdone_fn(enum dma_ch, s3c2410_dma_cbfn_t rtn); 124 127 125 128 129 + #include <plat/dma-ops.h>
+15 -9
arch/arm/plat-samsung/include/plat/s3c-dma-pl330.h arch/arm/plat-samsung/include/plat/dma-pl330.h
··· 8 8 * (at your option) any later version. 9 9 */ 10 10 11 - #ifndef __S3C_DMA_PL330_H_ 12 - #define __S3C_DMA_PL330_H_ 13 - 14 - #define S3C2410_DMAF_AUTOSTART (1 << 0) 15 - #define S3C2410_DMAF_CIRCULAR (1 << 1) 11 + #ifndef __DMA_PL330_H_ 12 + #define __DMA_PL330_H_ __FILE__ 16 13 17 14 /* 18 15 * PL330 can assign any channel to communicate with ··· 17 20 * For the sake of consistency across client drivers, 18 21 * We keep the channel names unchanged and only add 19 22 * missing peripherals are added. 20 - * Order is not important since S3C PL330 API driver 23 + * Order is not important since DMA PL330 API driver 21 24 * use these just as IDs. 22 25 */ 23 26 enum dma_ch { ··· 85 88 DMACH_MAX, 86 89 }; 87 90 88 - static inline bool s3c_dma_has_circular(void) 91 + struct s3c2410_dma_client { 92 + char *name; 93 + }; 94 + 95 + static inline bool samsung_dma_has_circular(void) 89 96 { 90 97 return true; 91 98 } 92 99 93 - #include <plat/dma.h> 100 + static inline bool samsung_dma_is_dmadev(void) 101 + { 102 + return true; 103 + } 94 104 95 - #endif /* __S3C_DMA_PL330_H_ */ 105 + #include <plat/dma-ops.h> 106 + 107 + #endif /* __DMA_PL330_H_ */
-32
arch/arm/plat-samsung/include/plat/s3c-pl330-pdata.h
··· 1 - /* linux/arch/arm/plat-samsung/include/plat/s3c-pl330-pdata.h 2 - * 3 - * Copyright (C) 2010 Samsung Electronics Co. Ltd. 4 - * Jaswinder Singh <jassi.brar@samsung.com> 5 - * 6 - * This program is free software; you can redistribute it and/or modify 7 - * it under the terms of the GNU General Public License as published by 8 - * the Free Software Foundation; either version 2 of the License, or 9 - * (at your option) any later version. 10 - */ 11 - 12 - #ifndef __S3C_PL330_PDATA_H 13 - #define __S3C_PL330_PDATA_H 14 - 15 - #include <plat/s3c-dma-pl330.h> 16 - 17 - /* 18 - * Every PL330 DMAC has max 32 peripheral interfaces, 19 - * of which some may be not be really used in your 20 - * DMAC's configuration. 21 - * Populate this array of 32 peri i/fs with relevant 22 - * channel IDs for used peri i/f and DMACH_MAX for 23 - * those unused. 24 - * 25 - * The platforms just need to provide this info 26 - * to the S3C DMA API driver for PL330. 27 - */ 28 - struct s3c_pl330_platdata { 29 - enum dma_ch peri[32]; 30 - }; 31 - 32 - #endif /* __S3C_PL330_PDATA_H */
+130
arch/arm/plat-samsung/s3c-dma-ops.c
··· 1 + /* linux/arch/arm/plat-samsung/s3c-dma-ops.c 2 + * 3 + * Copyright (c) 2011 Samsung Electronics Co., Ltd. 4 + * http://www.samsung.com 5 + * 6 + * Samsung S3C-DMA Operations 7 + * 8 + * This program is free software; you can redistribute it and/or modify 9 + * it under the terms of the GNU General Public License version 2 as 10 + * published by the Free Software Foundation. 11 + */ 12 + 13 + #include <linux/kernel.h> 14 + #include <linux/errno.h> 15 + #include <linux/slab.h> 16 + #include <linux/types.h> 17 + 18 + #include <mach/dma.h> 19 + 20 + struct cb_data { 21 + void (*fp) (void *); 22 + void *fp_param; 23 + unsigned ch; 24 + struct list_head node; 25 + }; 26 + 27 + static LIST_HEAD(dma_list); 28 + 29 + static void s3c_dma_cb(struct s3c2410_dma_chan *channel, void *param, 30 + int size, enum s3c2410_dma_buffresult res) 31 + { 32 + struct cb_data *data = param; 33 + 34 + data->fp(data->fp_param); 35 + } 36 + 37 + static unsigned s3c_dma_request(enum dma_ch dma_ch, 38 + struct samsung_dma_info *info) 39 + { 40 + struct cb_data *data; 41 + 42 + if (s3c2410_dma_request(dma_ch, info->client, NULL) < 0) { 43 + s3c2410_dma_free(dma_ch, info->client); 44 + return 0; 45 + } 46 + 47 + data = kzalloc(sizeof(struct cb_data), GFP_KERNEL); 48 + data->ch = dma_ch; 49 + list_add_tail(&data->node, &dma_list); 50 + 51 + s3c2410_dma_devconfig(dma_ch, info->direction, info->fifo); 52 + 53 + if (info->cap == DMA_CYCLIC) 54 + s3c2410_dma_setflags(dma_ch, S3C2410_DMAF_CIRCULAR); 55 + 56 + s3c2410_dma_config(dma_ch, info->width); 57 + 58 + return (unsigned)dma_ch; 59 + } 60 + 61 + static int s3c_dma_release(unsigned ch, struct s3c2410_dma_client *client) 62 + { 63 + struct cb_data *data; 64 + 65 + list_for_each_entry(data, &dma_list, node) 66 + if (data->ch == ch) 67 + break; 68 + list_del(&data->node); 69 + 70 + s3c2410_dma_free(ch, client); 71 + kfree(data); 72 + 73 + return 0; 74 + } 75 + 76 + static int s3c_dma_prepare(unsigned ch, struct samsung_dma_prep_info *info) 77 + { 78 + struct cb_data *data; 79 + int len = (info->cap == DMA_CYCLIC) ? info->period : info->len; 80 + 81 + list_for_each_entry(data, &dma_list, node) 82 + if (data->ch == ch) 83 + break; 84 + 85 + if (!data->fp) { 86 + s3c2410_dma_set_buffdone_fn(ch, s3c_dma_cb); 87 + data->fp = info->fp; 88 + data->fp_param = info->fp_param; 89 + } 90 + 91 + s3c2410_dma_enqueue(ch, (void *)data, info->buf, len); 92 + 93 + return 0; 94 + } 95 + 96 + static inline int s3c_dma_trigger(unsigned ch) 97 + { 98 + return s3c2410_dma_ctrl(ch, S3C2410_DMAOP_START); 99 + } 100 + 101 + static inline int s3c_dma_started(unsigned ch) 102 + { 103 + return s3c2410_dma_ctrl(ch, S3C2410_DMAOP_STARTED); 104 + } 105 + 106 + static inline int s3c_dma_flush(unsigned ch) 107 + { 108 + return s3c2410_dma_ctrl(ch, S3C2410_DMAOP_FLUSH); 109 + } 110 + 111 + static inline int s3c_dma_stop(unsigned ch) 112 + { 113 + return s3c2410_dma_ctrl(ch, S3C2410_DMAOP_STOP); 114 + } 115 + 116 + static struct samsung_dma_ops s3c_dma_ops = { 117 + .request = s3c_dma_request, 118 + .release = s3c_dma_release, 119 + .prepare = s3c_dma_prepare, 120 + .trigger = s3c_dma_trigger, 121 + .started = s3c_dma_started, 122 + .flush = s3c_dma_flush, 123 + .stop = s3c_dma_stop, 124 + }; 125 + 126 + void *s3c_dma_get_ops(void) 127 + { 128 + return &s3c_dma_ops; 129 + } 130 + EXPORT_SYMBOL(s3c_dma_get_ops);
-1244
arch/arm/plat-samsung/s3c-pl330.c
··· 1 - /* linux/arch/arm/plat-samsung/s3c-pl330.c 2 - * 3 - * Copyright (C) 2010 Samsung Electronics Co. Ltd. 4 - * Jaswinder Singh <jassi.brar@samsung.com> 5 - * 6 - * This program is free software; you can redistribute it and/or modify 7 - * it under the terms of the GNU General Public License as published by 8 - * the Free Software Foundation; either version 2 of the License, or 9 - * (at your option) any later version. 10 - */ 11 - 12 - #include <linux/init.h> 13 - #include <linux/module.h> 14 - #include <linux/interrupt.h> 15 - #include <linux/io.h> 16 - #include <linux/slab.h> 17 - #include <linux/platform_device.h> 18 - #include <linux/clk.h> 19 - #include <linux/err.h> 20 - 21 - #include <asm/hardware/pl330.h> 22 - 23 - #include <plat/s3c-pl330-pdata.h> 24 - 25 - /** 26 - * struct s3c_pl330_dmac - Logical representation of a PL330 DMAC. 27 - * @busy_chan: Number of channels currently busy. 28 - * @peri: List of IDs of peripherals this DMAC can work with. 29 - * @node: To attach to the global list of DMACs. 30 - * @pi: PL330 configuration info for the DMAC. 31 - * @kmcache: Pool to quickly allocate xfers for all channels in the dmac. 32 - * @clk: Pointer of DMAC operation clock. 33 - */ 34 - struct s3c_pl330_dmac { 35 - unsigned busy_chan; 36 - enum dma_ch *peri; 37 - struct list_head node; 38 - struct pl330_info *pi; 39 - struct kmem_cache *kmcache; 40 - struct clk *clk; 41 - }; 42 - 43 - /** 44 - * struct s3c_pl330_xfer - A request submitted by S3C DMA clients. 45 - * @token: Xfer ID provided by the client. 46 - * @node: To attach to the list of xfers on a channel. 47 - * @px: Xfer for PL330 core. 48 - * @chan: Owner channel of this xfer. 49 - */ 50 - struct s3c_pl330_xfer { 51 - void *token; 52 - struct list_head node; 53 - struct pl330_xfer px; 54 - struct s3c_pl330_chan *chan; 55 - }; 56 - 57 - /** 58 - * struct s3c_pl330_chan - Logical channel to communicate with 59 - * a Physical peripheral. 60 - * @pl330_chan_id: Token of a hardware channel thread of PL330 DMAC. 61 - * NULL if the channel is available to be acquired. 62 - * @id: ID of the peripheral that this channel can communicate with. 63 - * @options: Options specified by the client. 64 - * @sdaddr: Address provided via s3c2410_dma_devconfig. 65 - * @node: To attach to the global list of channels. 66 - * @lrq: Pointer to the last submitted pl330_req to PL330 core. 67 - * @xfer_list: To manage list of xfers enqueued. 68 - * @req: Two requests to communicate with the PL330 engine. 69 - * @callback_fn: Callback function to the client. 70 - * @rqcfg: Channel configuration for the xfers. 71 - * @xfer_head: Pointer to the xfer to be next executed. 72 - * @dmac: Pointer to the DMAC that manages this channel, NULL if the 73 - * channel is available to be acquired. 74 - * @client: Client of this channel. NULL if the 75 - * channel is available to be acquired. 76 - */ 77 - struct s3c_pl330_chan { 78 - void *pl330_chan_id; 79 - enum dma_ch id; 80 - unsigned int options; 81 - unsigned long sdaddr; 82 - struct list_head node; 83 - struct pl330_req *lrq; 84 - struct list_head xfer_list; 85 - struct pl330_req req[2]; 86 - s3c2410_dma_cbfn_t callback_fn; 87 - struct pl330_reqcfg rqcfg; 88 - struct s3c_pl330_xfer *xfer_head; 89 - struct s3c_pl330_dmac *dmac; 90 - struct s3c2410_dma_client *client; 91 - }; 92 - 93 - /* All DMACs in the platform */ 94 - static LIST_HEAD(dmac_list); 95 - 96 - /* All channels to peripherals in the platform */ 97 - static LIST_HEAD(chan_list); 98 - 99 - /* 100 - * Since we add resources(DMACs and Channels) to the global pool, 101 - * we need to guard access to the resources using a global lock 102 - */ 103 - static DEFINE_SPINLOCK(res_lock); 104 - 105 - /* Returns the channel with ID 'id' in the chan_list */ 106 - static struct s3c_pl330_chan *id_to_chan(const enum dma_ch id) 107 - { 108 - struct s3c_pl330_chan *ch; 109 - 110 - list_for_each_entry(ch, &chan_list, node) 111 - if (ch->id == id) 112 - return ch; 113 - 114 - return NULL; 115 - } 116 - 117 - /* Allocate a new channel with ID 'id' and add to chan_list */ 118 - static void chan_add(const enum dma_ch id) 119 - { 120 - struct s3c_pl330_chan *ch = id_to_chan(id); 121 - 122 - /* Return if the channel already exists */ 123 - if (ch) 124 - return; 125 - 126 - ch = kmalloc(sizeof(*ch), GFP_KERNEL); 127 - /* Return silently to work with other channels */ 128 - if (!ch) 129 - return; 130 - 131 - ch->id = id; 132 - ch->dmac = NULL; 133 - 134 - list_add_tail(&ch->node, &chan_list); 135 - } 136 - 137 - /* If the channel is not yet acquired by any client */ 138 - static bool chan_free(struct s3c_pl330_chan *ch) 139 - { 140 - if (!ch) 141 - return false; 142 - 143 - /* Channel points to some DMAC only when it's acquired */ 144 - return ch->dmac ? false : true; 145 - } 146 - 147 - /* 148 - * Returns 0 is peripheral i/f is invalid or not present on the dmac. 149 - * Index + 1, otherwise. 150 - */ 151 - static unsigned iface_of_dmac(struct s3c_pl330_dmac *dmac, enum dma_ch ch_id) 152 - { 153 - enum dma_ch *id = dmac->peri; 154 - int i; 155 - 156 - /* Discount invalid markers */ 157 - if (ch_id == DMACH_MAX) 158 - return 0; 159 - 160 - for (i = 0; i < PL330_MAX_PERI; i++) 161 - if (id[i] == ch_id) 162 - return i + 1; 163 - 164 - return 0; 165 - } 166 - 167 - /* If all channel threads of the DMAC are busy */ 168 - static inline bool dmac_busy(struct s3c_pl330_dmac *dmac) 169 - { 170 - struct pl330_info *pi = dmac->pi; 171 - 172 - return (dmac->busy_chan < pi->pcfg.num_chan) ? false : true; 173 - } 174 - 175 - /* 176 - * Returns the number of free channels that 177 - * can be handled by this dmac only. 178 - */ 179 - static unsigned ch_onlyby_dmac(struct s3c_pl330_dmac *dmac) 180 - { 181 - enum dma_ch *id = dmac->peri; 182 - struct s3c_pl330_dmac *d; 183 - struct s3c_pl330_chan *ch; 184 - unsigned found, count = 0; 185 - enum dma_ch p; 186 - int i; 187 - 188 - for (i = 0; i < PL330_MAX_PERI; i++) { 189 - p = id[i]; 190 - ch = id_to_chan(p); 191 - 192 - if (p == DMACH_MAX || !chan_free(ch)) 193 - continue; 194 - 195 - found = 0; 196 - list_for_each_entry(d, &dmac_list, node) { 197 - if (d != dmac && iface_of_dmac(d, ch->id)) { 198 - found = 1; 199 - break; 200 - } 201 - } 202 - if (!found) 203 - count++; 204 - } 205 - 206 - return count; 207 - } 208 - 209 - /* 210 - * Measure of suitability of 'dmac' handling 'ch' 211 - * 212 - * 0 indicates 'dmac' can not handle 'ch' either 213 - * because it is not supported by the hardware or 214 - * because all dmac channels are currently busy. 215 - * 216 - * >0 vlaue indicates 'dmac' has the capability. 217 - * The bigger the value the more suitable the dmac. 218 - */ 219 - #define MAX_SUIT UINT_MAX 220 - #define MIN_SUIT 0 221 - 222 - static unsigned suitablility(struct s3c_pl330_dmac *dmac, 223 - struct s3c_pl330_chan *ch) 224 - { 225 - struct pl330_info *pi = dmac->pi; 226 - enum dma_ch *id = dmac->peri; 227 - struct s3c_pl330_dmac *d; 228 - unsigned s; 229 - int i; 230 - 231 - s = MIN_SUIT; 232 - /* If all the DMAC channel threads are busy */ 233 - if (dmac_busy(dmac)) 234 - return s; 235 - 236 - for (i = 0; i < PL330_MAX_PERI; i++) 237 - if (id[i] == ch->id) 238 - break; 239 - 240 - /* If the 'dmac' can't talk to 'ch' */ 241 - if (i == PL330_MAX_PERI) 242 - return s; 243 - 244 - s = MAX_SUIT; 245 - list_for_each_entry(d, &dmac_list, node) { 246 - /* 247 - * If some other dmac can talk to this 248 - * peri and has some channel free. 249 - */ 250 - if (d != dmac && iface_of_dmac(d, ch->id) && !dmac_busy(d)) { 251 - s = 0; 252 - break; 253 - } 254 - } 255 - if (s) 256 - return s; 257 - 258 - s = 100; 259 - 260 - /* Good if free chans are more, bad otherwise */ 261 - s += (pi->pcfg.num_chan - dmac->busy_chan) - ch_onlyby_dmac(dmac); 262 - 263 - return s; 264 - } 265 - 266 - /* More than one DMAC may have capability to transfer data with the 267 - * peripheral. This function assigns most suitable DMAC to manage the 268 - * channel and hence communicate with the peripheral. 269 - */ 270 - static struct s3c_pl330_dmac *map_chan_to_dmac(struct s3c_pl330_chan *ch) 271 - { 272 - struct s3c_pl330_dmac *d, *dmac = NULL; 273 - unsigned sn, sl = MIN_SUIT; 274 - 275 - list_for_each_entry(d, &dmac_list, node) { 276 - sn = suitablility(d, ch); 277 - 278 - if (sn == MAX_SUIT) 279 - return d; 280 - 281 - if (sn > sl) 282 - dmac = d; 283 - } 284 - 285 - return dmac; 286 - } 287 - 288 - /* Acquire the channel for peripheral 'id' */ 289 - static struct s3c_pl330_chan *chan_acquire(const enum dma_ch id) 290 - { 291 - struct s3c_pl330_chan *ch = id_to_chan(id); 292 - struct s3c_pl330_dmac *dmac; 293 - 294 - /* If the channel doesn't exist or is already acquired */ 295 - if (!ch || !chan_free(ch)) { 296 - ch = NULL; 297 - goto acq_exit; 298 - } 299 - 300 - dmac = map_chan_to_dmac(ch); 301 - /* If couldn't map */ 302 - if (!dmac) { 303 - ch = NULL; 304 - goto acq_exit; 305 - } 306 - 307 - dmac->busy_chan++; 308 - ch->dmac = dmac; 309 - 310 - acq_exit: 311 - return ch; 312 - } 313 - 314 - /* Delete xfer from the queue */ 315 - static inline void del_from_queue(struct s3c_pl330_xfer *xfer) 316 - { 317 - struct s3c_pl330_xfer *t; 318 - struct s3c_pl330_chan *ch; 319 - int found; 320 - 321 - if (!xfer) 322 - return; 323 - 324 - ch = xfer->chan; 325 - 326 - /* Make sure xfer is in the queue */ 327 - found = 0; 328 - list_for_each_entry(t, &ch->xfer_list, node) 329 - if (t == xfer) { 330 - found = 1; 331 - break; 332 - } 333 - 334 - if (!found) 335 - return; 336 - 337 - /* If xfer is last entry in the queue */ 338 - if (xfer->node.next == &ch->xfer_list) 339 - t = list_entry(ch->xfer_list.next, 340 - struct s3c_pl330_xfer, node); 341 - else 342 - t = list_entry(xfer->node.next, 343 - struct s3c_pl330_xfer, node); 344 - 345 - /* If there was only one node left */ 346 - if (t == xfer) 347 - ch->xfer_head = NULL; 348 - else if (ch->xfer_head == xfer) 349 - ch->xfer_head = t; 350 - 351 - list_del(&xfer->node); 352 - } 353 - 354 - /* Provides pointer to the next xfer in the queue. 355 - * If CIRCULAR option is set, the list is left intact, 356 - * otherwise the xfer is removed from the list. 357 - * Forced delete 'pluck' can be set to override the CIRCULAR option. 358 - */ 359 - static struct s3c_pl330_xfer *get_from_queue(struct s3c_pl330_chan *ch, 360 - int pluck) 361 - { 362 - struct s3c_pl330_xfer *xfer = ch->xfer_head; 363 - 364 - if (!xfer) 365 - return NULL; 366 - 367 - /* If xfer is last entry in the queue */ 368 - if (xfer->node.next == &ch->xfer_list) 369 - ch->xfer_head = list_entry(ch->xfer_list.next, 370 - struct s3c_pl330_xfer, node); 371 - else 372 - ch->xfer_head = list_entry(xfer->node.next, 373 - struct s3c_pl330_xfer, node); 374 - 375 - if (pluck || !(ch->options & S3C2410_DMAF_CIRCULAR)) 376 - del_from_queue(xfer); 377 - 378 - return xfer; 379 - } 380 - 381 - static inline void add_to_queue(struct s3c_pl330_chan *ch, 382 - struct s3c_pl330_xfer *xfer, int front) 383 - { 384 - struct pl330_xfer *xt; 385 - 386 - /* If queue empty */ 387 - if (ch->xfer_head == NULL) 388 - ch->xfer_head = xfer; 389 - 390 - xt = &ch->xfer_head->px; 391 - /* If the head already submitted (CIRCULAR head) */ 392 - if (ch->options & S3C2410_DMAF_CIRCULAR && 393 - (xt == ch->req[0].x || xt == ch->req[1].x)) 394 - ch->xfer_head = xfer; 395 - 396 - /* If this is a resubmission, it should go at the head */ 397 - if (front) { 398 - ch->xfer_head = xfer; 399 - list_add(&xfer->node, &ch->xfer_list); 400 - } else { 401 - list_add_tail(&xfer->node, &ch->xfer_list); 402 - } 403 - } 404 - 405 - static inline void _finish_off(struct s3c_pl330_xfer *xfer, 406 - enum s3c2410_dma_buffresult res, int ffree) 407 - { 408 - struct s3c_pl330_chan *ch; 409 - 410 - if (!xfer) 411 - return; 412 - 413 - ch = xfer->chan; 414 - 415 - /* Do callback */ 416 - if (ch->callback_fn) 417 - ch->callback_fn(NULL, xfer->token, xfer->px.bytes, res); 418 - 419 - /* Force Free or if buffer is not needed anymore */ 420 - if (ffree || !(ch->options & S3C2410_DMAF_CIRCULAR)) 421 - kmem_cache_free(ch->dmac->kmcache, xfer); 422 - } 423 - 424 - static inline int s3c_pl330_submit(struct s3c_pl330_chan *ch, 425 - struct pl330_req *r) 426 - { 427 - struct s3c_pl330_xfer *xfer; 428 - int ret = 0; 429 - 430 - /* If already submitted */ 431 - if (r->x) 432 - return 0; 433 - 434 - xfer = get_from_queue(ch, 0); 435 - if (xfer) { 436 - r->x = &xfer->px; 437 - 438 - /* Use max bandwidth for M<->M xfers */ 439 - if (r->rqtype == MEMTOMEM) { 440 - struct pl330_info *pi = xfer->chan->dmac->pi; 441 - int burst = 1 << ch->rqcfg.brst_size; 442 - u32 bytes = r->x->bytes; 443 - int bl; 444 - 445 - bl = pi->pcfg.data_bus_width / 8; 446 - bl *= pi->pcfg.data_buf_dep; 447 - bl /= burst; 448 - 449 - /* src/dst_burst_len can't be more than 16 */ 450 - if (bl > 16) 451 - bl = 16; 452 - 453 - while (bl > 1) { 454 - if (!(bytes % (bl * burst))) 455 - break; 456 - bl--; 457 - } 458 - 459 - ch->rqcfg.brst_len = bl; 460 - } else { 461 - ch->rqcfg.brst_len = 1; 462 - } 463 - 464 - ret = pl330_submit_req(ch->pl330_chan_id, r); 465 - 466 - /* If submission was successful */ 467 - if (!ret) { 468 - ch->lrq = r; /* latest submitted req */ 469 - return 0; 470 - } 471 - 472 - r->x = NULL; 473 - 474 - /* If both of the PL330 ping-pong buffers filled */ 475 - if (ret == -EAGAIN) { 476 - dev_err(ch->dmac->pi->dev, "%s:%d!\n", 477 - __func__, __LINE__); 478 - /* Queue back again */ 479 - add_to_queue(ch, xfer, 1); 480 - ret = 0; 481 - } else { 482 - dev_err(ch->dmac->pi->dev, "%s:%d!\n", 483 - __func__, __LINE__); 484 - _finish_off(xfer, S3C2410_RES_ERR, 0); 485 - } 486 - } 487 - 488 - return ret; 489 - } 490 - 491 - static void s3c_pl330_rq(struct s3c_pl330_chan *ch, 492 - struct pl330_req *r, enum pl330_op_err err) 493 - { 494 - unsigned long flags; 495 - struct s3c_pl330_xfer *xfer; 496 - struct pl330_xfer *xl = r->x; 497 - enum s3c2410_dma_buffresult res; 498 - 499 - spin_lock_irqsave(&res_lock, flags); 500 - 501 - r->x = NULL; 502 - 503 - s3c_pl330_submit(ch, r); 504 - 505 - spin_unlock_irqrestore(&res_lock, flags); 506 - 507 - /* Map result to S3C DMA API */ 508 - if (err == PL330_ERR_NONE) 509 - res = S3C2410_RES_OK; 510 - else if (err == PL330_ERR_ABORT) 511 - res = S3C2410_RES_ABORT; 512 - else 513 - res = S3C2410_RES_ERR; 514 - 515 - /* If last request had some xfer */ 516 - if (xl) { 517 - xfer = container_of(xl, struct s3c_pl330_xfer, px); 518 - _finish_off(xfer, res, 0); 519 - } else { 520 - dev_info(ch->dmac->pi->dev, "%s:%d No Xfer?!\n", 521 - __func__, __LINE__); 522 - } 523 - } 524 - 525 - static void s3c_pl330_rq0(void *token, enum pl330_op_err err) 526 - { 527 - struct pl330_req *r = token; 528 - struct s3c_pl330_chan *ch = container_of(r, 529 - struct s3c_pl330_chan, req[0]); 530 - s3c_pl330_rq(ch, r, err); 531 - } 532 - 533 - static void s3c_pl330_rq1(void *token, enum pl330_op_err err) 534 - { 535 - struct pl330_req *r = token; 536 - struct s3c_pl330_chan *ch = container_of(r, 537 - struct s3c_pl330_chan, req[1]); 538 - s3c_pl330_rq(ch, r, err); 539 - } 540 - 541 - /* Release an acquired channel */ 542 - static void chan_release(struct s3c_pl330_chan *ch) 543 - { 544 - struct s3c_pl330_dmac *dmac; 545 - 546 - if (chan_free(ch)) 547 - return; 548 - 549 - dmac = ch->dmac; 550 - ch->dmac = NULL; 551 - dmac->busy_chan--; 552 - } 553 - 554 - int s3c2410_dma_ctrl(enum dma_ch id, enum s3c2410_chan_op op) 555 - { 556 - struct s3c_pl330_xfer *xfer; 557 - enum pl330_chan_op pl330op; 558 - struct s3c_pl330_chan *ch; 559 - unsigned long flags; 560 - int idx, ret; 561 - 562 - spin_lock_irqsave(&res_lock, flags); 563 - 564 - ch = id_to_chan(id); 565 - 566 - if (!ch || chan_free(ch)) { 567 - ret = -EINVAL; 568 - goto ctrl_exit; 569 - } 570 - 571 - switch (op) { 572 - case S3C2410_DMAOP_START: 573 - /* Make sure both reqs are enqueued */ 574 - idx = (ch->lrq == &ch->req[0]) ? 1 : 0; 575 - s3c_pl330_submit(ch, &ch->req[idx]); 576 - s3c_pl330_submit(ch, &ch->req[1 - idx]); 577 - pl330op = PL330_OP_START; 578 - break; 579 - 580 - case S3C2410_DMAOP_STOP: 581 - pl330op = PL330_OP_ABORT; 582 - break; 583 - 584 - case S3C2410_DMAOP_FLUSH: 585 - pl330op = PL330_OP_FLUSH; 586 - break; 587 - 588 - case S3C2410_DMAOP_PAUSE: 589 - case S3C2410_DMAOP_RESUME: 590 - case S3C2410_DMAOP_TIMEOUT: 591 - case S3C2410_DMAOP_STARTED: 592 - spin_unlock_irqrestore(&res_lock, flags); 593 - return 0; 594 - 595 - default: 596 - spin_unlock_irqrestore(&res_lock, flags); 597 - return -EINVAL; 598 - } 599 - 600 - ret = pl330_chan_ctrl(ch->pl330_chan_id, pl330op); 601 - 602 - if (pl330op == PL330_OP_START) { 603 - spin_unlock_irqrestore(&res_lock, flags); 604 - return ret; 605 - } 606 - 607 - idx = (ch->lrq == &ch->req[0]) ? 1 : 0; 608 - 609 - /* Abort the current xfer */ 610 - if (ch->req[idx].x) { 611 - xfer = container_of(ch->req[idx].x, 612 - struct s3c_pl330_xfer, px); 613 - 614 - /* Drop xfer during FLUSH */ 615 - if (pl330op == PL330_OP_FLUSH) 616 - del_from_queue(xfer); 617 - 618 - ch->req[idx].x = NULL; 619 - 620 - spin_unlock_irqrestore(&res_lock, flags); 621 - _finish_off(xfer, S3C2410_RES_ABORT, 622 - pl330op == PL330_OP_FLUSH ? 1 : 0); 623 - spin_lock_irqsave(&res_lock, flags); 624 - } 625 - 626 - /* Flush the whole queue */ 627 - if (pl330op == PL330_OP_FLUSH) { 628 - 629 - if (ch->req[1 - idx].x) { 630 - xfer = container_of(ch->req[1 - idx].x, 631 - struct s3c_pl330_xfer, px); 632 - 633 - del_from_queue(xfer); 634 - 635 - ch->req[1 - idx].x = NULL; 636 - 637 - spin_unlock_irqrestore(&res_lock, flags); 638 - _finish_off(xfer, S3C2410_RES_ABORT, 1); 639 - spin_lock_irqsave(&res_lock, flags); 640 - } 641 - 642 - /* Finish off the remaining in the queue */ 643 - xfer = ch->xfer_head; 644 - while (xfer) { 645 - 646 - del_from_queue(xfer); 647 - 648 - spin_unlock_irqrestore(&res_lock, flags); 649 - _finish_off(xfer, S3C2410_RES_ABORT, 1); 650 - spin_lock_irqsave(&res_lock, flags); 651 - 652 - xfer = ch->xfer_head; 653 - } 654 - } 655 - 656 - ctrl_exit: 657 - spin_unlock_irqrestore(&res_lock, flags); 658 - 659 - return ret; 660 - } 661 - EXPORT_SYMBOL(s3c2410_dma_ctrl); 662 - 663 - int s3c2410_dma_enqueue(enum dma_ch id, void *token, 664 - dma_addr_t addr, int size) 665 - { 666 - struct s3c_pl330_chan *ch; 667 - struct s3c_pl330_xfer *xfer; 668 - unsigned long flags; 669 - int idx, ret = 0; 670 - 671 - spin_lock_irqsave(&res_lock, flags); 672 - 673 - ch = id_to_chan(id); 674 - 675 - /* Error if invalid or free channel */ 676 - if (!ch || chan_free(ch)) { 677 - ret = -EINVAL; 678 - goto enq_exit; 679 - } 680 - 681 - /* Error if size is unaligned */ 682 - if (ch->rqcfg.brst_size && size % (1 << ch->rqcfg.brst_size)) { 683 - ret = -EINVAL; 684 - goto enq_exit; 685 - } 686 - 687 - xfer = kmem_cache_alloc(ch->dmac->kmcache, GFP_ATOMIC); 688 - if (!xfer) { 689 - ret = -ENOMEM; 690 - goto enq_exit; 691 - } 692 - 693 - xfer->token = token; 694 - xfer->chan = ch; 695 - xfer->px.bytes = size; 696 - xfer->px.next = NULL; /* Single request */ 697 - 698 - /* For S3C DMA API, direction is always fixed for all xfers */ 699 - if (ch->req[0].rqtype == MEMTODEV) { 700 - xfer->px.src_addr = addr; 701 - xfer->px.dst_addr = ch->sdaddr; 702 - } else { 703 - xfer->px.src_addr = ch->sdaddr; 704 - xfer->px.dst_addr = addr; 705 - } 706 - 707 - add_to_queue(ch, xfer, 0); 708 - 709 - /* Try submitting on either request */ 710 - idx = (ch->lrq == &ch->req[0]) ? 1 : 0; 711 - 712 - if (!ch->req[idx].x) 713 - s3c_pl330_submit(ch, &ch->req[idx]); 714 - else 715 - s3c_pl330_submit(ch, &ch->req[1 - idx]); 716 - 717 - spin_unlock_irqrestore(&res_lock, flags); 718 - 719 - if (ch->options & S3C2410_DMAF_AUTOSTART) 720 - s3c2410_dma_ctrl(id, S3C2410_DMAOP_START); 721 - 722 - return 0; 723 - 724 - enq_exit: 725 - spin_unlock_irqrestore(&res_lock, flags); 726 - 727 - return ret; 728 - } 729 - EXPORT_SYMBOL(s3c2410_dma_enqueue); 730 - 731 - int s3c2410_dma_request(enum dma_ch id, 732 - struct s3c2410_dma_client *client, 733 - void *dev) 734 - { 735 - struct s3c_pl330_dmac *dmac; 736 - struct s3c_pl330_chan *ch; 737 - unsigned long flags; 738 - int ret = 0; 739 - 740 - spin_lock_irqsave(&res_lock, flags); 741 - 742 - ch = chan_acquire(id); 743 - if (!ch) { 744 - ret = -EBUSY; 745 - goto req_exit; 746 - } 747 - 748 - dmac = ch->dmac; 749 - 750 - ch->pl330_chan_id = pl330_request_channel(dmac->pi); 751 - if (!ch->pl330_chan_id) { 752 - chan_release(ch); 753 - ret = -EBUSY; 754 - goto req_exit; 755 - } 756 - 757 - ch->client = client; 758 - ch->options = 0; /* Clear any option */ 759 - ch->callback_fn = NULL; /* Clear any callback */ 760 - ch->lrq = NULL; 761 - 762 - ch->rqcfg.brst_size = 2; /* Default word size */ 763 - ch->rqcfg.swap = SWAP_NO; 764 - ch->rqcfg.scctl = SCCTRL0; /* Noncacheable and nonbufferable */ 765 - ch->rqcfg.dcctl = DCCTRL0; /* Noncacheable and nonbufferable */ 766 - ch->rqcfg.privileged = 0; 767 - ch->rqcfg.insnaccess = 0; 768 - 769 - /* Set invalid direction */ 770 - ch->req[0].rqtype = DEVTODEV; 771 - ch->req[1].rqtype = ch->req[0].rqtype; 772 - 773 - ch->req[0].cfg = &ch->rqcfg; 774 - ch->req[1].cfg = ch->req[0].cfg; 775 - 776 - ch->req[0].peri = iface_of_dmac(dmac, id) - 1; /* Original index */ 777 - ch->req[1].peri = ch->req[0].peri; 778 - 779 - ch->req[0].token = &ch->req[0]; 780 - ch->req[0].xfer_cb = s3c_pl330_rq0; 781 - ch->req[1].token = &ch->req[1]; 782 - ch->req[1].xfer_cb = s3c_pl330_rq1; 783 - 784 - ch->req[0].x = NULL; 785 - ch->req[1].x = NULL; 786 - 787 - /* Reset xfer list */ 788 - INIT_LIST_HEAD(&ch->xfer_list); 789 - ch->xfer_head = NULL; 790 - 791 - req_exit: 792 - spin_unlock_irqrestore(&res_lock, flags); 793 - 794 - return ret; 795 - } 796 - EXPORT_SYMBOL(s3c2410_dma_request); 797 - 798 - int s3c2410_dma_free(enum dma_ch id, struct s3c2410_dma_client *client) 799 - { 800 - struct s3c_pl330_chan *ch; 801 - struct s3c_pl330_xfer *xfer; 802 - unsigned long flags; 803 - int ret = 0; 804 - unsigned idx; 805 - 806 - spin_lock_irqsave(&res_lock, flags); 807 - 808 - ch = id_to_chan(id); 809 - 810 - if (!ch || chan_free(ch)) 811 - goto free_exit; 812 - 813 - /* Refuse if someone else wanted to free the channel */ 814 - if (ch->client != client) { 815 - ret = -EBUSY; 816 - goto free_exit; 817 - } 818 - 819 - /* Stop any active xfer, Flushe the queue and do callbacks */ 820 - pl330_chan_ctrl(ch->pl330_chan_id, PL330_OP_FLUSH); 821 - 822 - /* Abort the submitted requests */ 823 - idx = (ch->lrq == &ch->req[0]) ? 1 : 0; 824 - 825 - if (ch->req[idx].x) { 826 - xfer = container_of(ch->req[idx].x, 827 - struct s3c_pl330_xfer, px); 828 - 829 - ch->req[idx].x = NULL; 830 - del_from_queue(xfer); 831 - 832 - spin_unlock_irqrestore(&res_lock, flags); 833 - _finish_off(xfer, S3C2410_RES_ABORT, 1); 834 - spin_lock_irqsave(&res_lock, flags); 835 - } 836 - 837 - if (ch->req[1 - idx].x) { 838 - xfer = container_of(ch->req[1 - idx].x, 839 - struct s3c_pl330_xfer, px); 840 - 841 - ch->req[1 - idx].x = NULL; 842 - del_from_queue(xfer); 843 - 844 - spin_unlock_irqrestore(&res_lock, flags); 845 - _finish_off(xfer, S3C2410_RES_ABORT, 1); 846 - spin_lock_irqsave(&res_lock, flags); 847 - } 848 - 849 - /* Pluck and Abort the queued requests in order */ 850 - do { 851 - xfer = get_from_queue(ch, 1); 852 - 853 - spin_unlock_irqrestore(&res_lock, flags); 854 - _finish_off(xfer, S3C2410_RES_ABORT, 1); 855 - spin_lock_irqsave(&res_lock, flags); 856 - } while (xfer); 857 - 858 - ch->client = NULL; 859 - 860 - pl330_release_channel(ch->pl330_chan_id); 861 - 862 - ch->pl330_chan_id = NULL; 863 - 864 - chan_release(ch); 865 - 866 - free_exit: 867 - spin_unlock_irqrestore(&res_lock, flags); 868 - 869 - return ret; 870 - } 871 - EXPORT_SYMBOL(s3c2410_dma_free); 872 - 873 - int s3c2410_dma_config(enum dma_ch id, int xferunit) 874 - { 875 - struct s3c_pl330_chan *ch; 876 - struct pl330_info *pi; 877 - unsigned long flags; 878 - int i, dbwidth, ret = 0; 879 - 880 - spin_lock_irqsave(&res_lock, flags); 881 - 882 - ch = id_to_chan(id); 883 - 884 - if (!ch || chan_free(ch)) { 885 - ret = -EINVAL; 886 - goto cfg_exit; 887 - } 888 - 889 - pi = ch->dmac->pi; 890 - dbwidth = pi->pcfg.data_bus_width / 8; 891 - 892 - /* Max size of xfer can be pcfg.data_bus_width */ 893 - if (xferunit > dbwidth) { 894 - ret = -EINVAL; 895 - goto cfg_exit; 896 - } 897 - 898 - i = 0; 899 - while (xferunit != (1 << i)) 900 - i++; 901 - 902 - /* If valid value */ 903 - if (xferunit == (1 << i)) 904 - ch->rqcfg.brst_size = i; 905 - else 906 - ret = -EINVAL; 907 - 908 - cfg_exit: 909 - spin_unlock_irqrestore(&res_lock, flags); 910 - 911 - return ret; 912 - } 913 - EXPORT_SYMBOL(s3c2410_dma_config); 914 - 915 - /* Options that are supported by this driver */ 916 - #define S3C_PL330_FLAGS (S3C2410_DMAF_CIRCULAR | S3C2410_DMAF_AUTOSTART) 917 - 918 - int s3c2410_dma_setflags(enum dma_ch id, unsigned int options) 919 - { 920 - struct s3c_pl330_chan *ch; 921 - unsigned long flags; 922 - int ret = 0; 923 - 924 - spin_lock_irqsave(&res_lock, flags); 925 - 926 - ch = id_to_chan(id); 927 - 928 - if (!ch || chan_free(ch) || options & ~(S3C_PL330_FLAGS)) 929 - ret = -EINVAL; 930 - else 931 - ch->options = options; 932 - 933 - spin_unlock_irqrestore(&res_lock, flags); 934 - 935 - return 0; 936 - } 937 - EXPORT_SYMBOL(s3c2410_dma_setflags); 938 - 939 - int s3c2410_dma_set_buffdone_fn(enum dma_ch id, s3c2410_dma_cbfn_t rtn) 940 - { 941 - struct s3c_pl330_chan *ch; 942 - unsigned long flags; 943 - int ret = 0; 944 - 945 - spin_lock_irqsave(&res_lock, flags); 946 - 947 - ch = id_to_chan(id); 948 - 949 - if (!ch || chan_free(ch)) 950 - ret = -EINVAL; 951 - else 952 - ch->callback_fn = rtn; 953 - 954 - spin_unlock_irqrestore(&res_lock, flags); 955 - 956 - return ret; 957 - } 958 - EXPORT_SYMBOL(s3c2410_dma_set_buffdone_fn); 959 - 960 - int s3c2410_dma_devconfig(enum dma_ch id, enum s3c2410_dmasrc source, 961 - unsigned long address) 962 - { 963 - struct s3c_pl330_chan *ch; 964 - unsigned long flags; 965 - int ret = 0; 966 - 967 - spin_lock_irqsave(&res_lock, flags); 968 - 969 - ch = id_to_chan(id); 970 - 971 - if (!ch || chan_free(ch)) { 972 - ret = -EINVAL; 973 - goto devcfg_exit; 974 - } 975 - 976 - switch (source) { 977 - case S3C2410_DMASRC_HW: /* P->M */ 978 - ch->req[0].rqtype = DEVTOMEM; 979 - ch->req[1].rqtype = DEVTOMEM; 980 - ch->rqcfg.src_inc = 0; 981 - ch->rqcfg.dst_inc = 1; 982 - break; 983 - case S3C2410_DMASRC_MEM: /* M->P */ 984 - ch->req[0].rqtype = MEMTODEV; 985 - ch->req[1].rqtype = MEMTODEV; 986 - ch->rqcfg.src_inc = 1; 987 - ch->rqcfg.dst_inc = 0; 988 - break; 989 - default: 990 - ret = -EINVAL; 991 - goto devcfg_exit; 992 - } 993 - 994 - ch->sdaddr = address; 995 - 996 - devcfg_exit: 997 - spin_unlock_irqrestore(&res_lock, flags); 998 - 999 - return ret; 1000 - } 1001 - EXPORT_SYMBOL(s3c2410_dma_devconfig); 1002 - 1003 - int s3c2410_dma_getposition(enum dma_ch id, dma_addr_t *src, dma_addr_t *dst) 1004 - { 1005 - struct s3c_pl330_chan *ch = id_to_chan(id); 1006 - struct pl330_chanstatus status; 1007 - int ret; 1008 - 1009 - if (!ch || chan_free(ch)) 1010 - return -EINVAL; 1011 - 1012 - ret = pl330_chan_status(ch->pl330_chan_id, &status); 1013 - if (ret < 0) 1014 - return ret; 1015 - 1016 - *src = status.src_addr; 1017 - *dst = status.dst_addr; 1018 - 1019 - return 0; 1020 - } 1021 - EXPORT_SYMBOL(s3c2410_dma_getposition); 1022 - 1023 - static irqreturn_t pl330_irq_handler(int irq, void *data) 1024 - { 1025 - if (pl330_update(data)) 1026 - return IRQ_HANDLED; 1027 - else 1028 - return IRQ_NONE; 1029 - } 1030 - 1031 - static int pl330_probe(struct platform_device *pdev) 1032 - { 1033 - struct s3c_pl330_dmac *s3c_pl330_dmac; 1034 - struct s3c_pl330_platdata *pl330pd; 1035 - struct pl330_info *pl330_info; 1036 - struct resource *res; 1037 - int i, ret, irq; 1038 - 1039 - pl330pd = pdev->dev.platform_data; 1040 - 1041 - /* Can't do without the list of _32_ peripherals */ 1042 - if (!pl330pd || !pl330pd->peri) { 1043 - dev_err(&pdev->dev, "platform data missing!\n"); 1044 - return -ENODEV; 1045 - } 1046 - 1047 - pl330_info = kzalloc(sizeof(*pl330_info), GFP_KERNEL); 1048 - if (!pl330_info) 1049 - return -ENOMEM; 1050 - 1051 - pl330_info->pl330_data = NULL; 1052 - pl330_info->dev = &pdev->dev; 1053 - 1054 - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1055 - if (!res) { 1056 - ret = -ENODEV; 1057 - goto probe_err1; 1058 - } 1059 - 1060 - request_mem_region(res->start, resource_size(res), pdev->name); 1061 - 1062 - pl330_info->base = ioremap(res->start, resource_size(res)); 1063 - if (!pl330_info->base) { 1064 - ret = -ENXIO; 1065 - goto probe_err2; 1066 - } 1067 - 1068 - irq = platform_get_irq(pdev, 0); 1069 - if (irq < 0) { 1070 - ret = irq; 1071 - goto probe_err3; 1072 - } 1073 - 1074 - ret = request_irq(irq, pl330_irq_handler, 0, 1075 - dev_name(&pdev->dev), pl330_info); 1076 - if (ret) 1077 - goto probe_err4; 1078 - 1079 - /* Allocate a new DMAC */ 1080 - s3c_pl330_dmac = kmalloc(sizeof(*s3c_pl330_dmac), GFP_KERNEL); 1081 - if (!s3c_pl330_dmac) { 1082 - ret = -ENOMEM; 1083 - goto probe_err5; 1084 - } 1085 - 1086 - /* Get operation clock and enable it */ 1087 - s3c_pl330_dmac->clk = clk_get(&pdev->dev, "pdma"); 1088 - if (IS_ERR(s3c_pl330_dmac->clk)) { 1089 - dev_err(&pdev->dev, "Cannot get operation clock.\n"); 1090 - ret = -EINVAL; 1091 - goto probe_err6; 1092 - } 1093 - clk_enable(s3c_pl330_dmac->clk); 1094 - 1095 - ret = pl330_add(pl330_info); 1096 - if (ret) 1097 - goto probe_err7; 1098 - 1099 - /* Hook the info */ 1100 - s3c_pl330_dmac->pi = pl330_info; 1101 - 1102 - /* No busy channels */ 1103 - s3c_pl330_dmac->busy_chan = 0; 1104 - 1105 - s3c_pl330_dmac->kmcache = kmem_cache_create(dev_name(&pdev->dev), 1106 - sizeof(struct s3c_pl330_xfer), 0, 0, NULL); 1107 - 1108 - if (!s3c_pl330_dmac->kmcache) { 1109 - ret = -ENOMEM; 1110 - goto probe_err8; 1111 - } 1112 - 1113 - /* Get the list of peripherals */ 1114 - s3c_pl330_dmac->peri = pl330pd->peri; 1115 - 1116 - /* Attach to the list of DMACs */ 1117 - list_add_tail(&s3c_pl330_dmac->node, &dmac_list); 1118 - 1119 - /* Create a channel for each peripheral in the DMAC 1120 - * that is, if it doesn't already exist 1121 - */ 1122 - for (i = 0; i < PL330_MAX_PERI; i++) 1123 - if (s3c_pl330_dmac->peri[i] != DMACH_MAX) 1124 - chan_add(s3c_pl330_dmac->peri[i]); 1125 - 1126 - printk(KERN_INFO 1127 - "Loaded driver for PL330 DMAC-%d %s\n", pdev->id, pdev->name); 1128 - printk(KERN_INFO 1129 - "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n", 1130 - pl330_info->pcfg.data_buf_dep, 1131 - pl330_info->pcfg.data_bus_width / 8, pl330_info->pcfg.num_chan, 1132 - pl330_info->pcfg.num_peri, pl330_info->pcfg.num_events); 1133 - 1134 - return 0; 1135 - 1136 - probe_err8: 1137 - pl330_del(pl330_info); 1138 - probe_err7: 1139 - clk_disable(s3c_pl330_dmac->clk); 1140 - clk_put(s3c_pl330_dmac->clk); 1141 - probe_err6: 1142 - kfree(s3c_pl330_dmac); 1143 - probe_err5: 1144 - free_irq(irq, pl330_info); 1145 - probe_err4: 1146 - probe_err3: 1147 - iounmap(pl330_info->base); 1148 - probe_err2: 1149 - release_mem_region(res->start, resource_size(res)); 1150 - probe_err1: 1151 - kfree(pl330_info); 1152 - 1153 - return ret; 1154 - } 1155 - 1156 - static int pl330_remove(struct platform_device *pdev) 1157 - { 1158 - struct s3c_pl330_dmac *dmac, *d; 1159 - struct s3c_pl330_chan *ch; 1160 - unsigned long flags; 1161 - int del, found; 1162 - 1163 - if (!pdev->dev.platform_data) 1164 - return -EINVAL; 1165 - 1166 - spin_lock_irqsave(&res_lock, flags); 1167 - 1168 - found = 0; 1169 - list_for_each_entry(d, &dmac_list, node) 1170 - if (d->pi->dev == &pdev->dev) { 1171 - found = 1; 1172 - break; 1173 - } 1174 - 1175 - if (!found) { 1176 - spin_unlock_irqrestore(&res_lock, flags); 1177 - return 0; 1178 - } 1179 - 1180 - dmac = d; 1181 - 1182 - /* Remove all Channels that are managed only by this DMAC */ 1183 - list_for_each_entry(ch, &chan_list, node) { 1184 - 1185 - /* Only channels that are handled by this DMAC */ 1186 - if (iface_of_dmac(dmac, ch->id)) 1187 - del = 1; 1188 - else 1189 - continue; 1190 - 1191 - /* Don't remove if some other DMAC has it too */ 1192 - list_for_each_entry(d, &dmac_list, node) 1193 - if (d != dmac && iface_of_dmac(d, ch->id)) { 1194 - del = 0; 1195 - break; 1196 - } 1197 - 1198 - if (del) { 1199 - spin_unlock_irqrestore(&res_lock, flags); 1200 - s3c2410_dma_free(ch->id, ch->client); 1201 - spin_lock_irqsave(&res_lock, flags); 1202 - list_del(&ch->node); 1203 - kfree(ch); 1204 - } 1205 - } 1206 - 1207 - /* Disable operation clock */ 1208 - clk_disable(dmac->clk); 1209 - clk_put(dmac->clk); 1210 - 1211 - /* Remove the DMAC */ 1212 - list_del(&dmac->node); 1213 - kfree(dmac); 1214 - 1215 - spin_unlock_irqrestore(&res_lock, flags); 1216 - 1217 - return 0; 1218 - } 1219 - 1220 - static struct platform_driver pl330_driver = { 1221 - .driver = { 1222 - .owner = THIS_MODULE, 1223 - .name = "s3c-pl330", 1224 - }, 1225 - .probe = pl330_probe, 1226 - .remove = pl330_remove, 1227 - }; 1228 - 1229 - static int __init pl330_init(void) 1230 - { 1231 - return platform_driver_register(&pl330_driver); 1232 - } 1233 - module_init(pl330_init); 1234 - 1235 - static void __exit pl330_exit(void) 1236 - { 1237 - platform_driver_unregister(&pl330_driver); 1238 - return; 1239 - } 1240 - module_exit(pl330_exit); 1241 - 1242 - MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>"); 1243 - MODULE_DESCRIPTION("Driver for PL330 DMA Controller"); 1244 - MODULE_LICENSE("GPL");
+2 -1
drivers/dma/Kconfig
··· 193 193 config PL330_DMA 194 194 tristate "DMA API Driver for PL330" 195 195 select DMA_ENGINE 196 - depends on PL330 196 + depends on ARM_AMBA 197 + select PL330 197 198 help 198 199 Select if your platform has one or more PL330 DMACs. 199 200 You need to provide platform specific settings via
+334 -342
drivers/dma/amba-pl08x.c
··· 66 66 * after the final transfer signalled by LBREQ or LSREQ. The DMAC 67 67 * will then move to the next LLI entry. 68 68 * 69 - * Only the former works sanely with scatter lists, so we only implement 70 - * the DMAC flow control method. However, peripherals which use the LBREQ 71 - * and LSREQ signals (eg, MMCI) are unable to use this mode, which through 72 - * these hardware restrictions prevents them from using scatter DMA. 73 - * 74 69 * Global TODO: 75 70 * - Break out common code from arch/arm/mach-s3c64xx and share 76 71 */ 77 - #include <linux/device.h> 78 - #include <linux/init.h> 79 - #include <linux/module.h> 80 - #include <linux/interrupt.h> 81 - #include <linux/slab.h> 82 - #include <linux/delay.h> 83 - #include <linux/dma-mapping.h> 84 - #include <linux/dmapool.h> 85 - #include <linux/dmaengine.h> 86 72 #include <linux/amba/bus.h> 87 73 #include <linux/amba/pl08x.h> 88 74 #include <linux/debugfs.h> 75 + #include <linux/delay.h> 76 + #include <linux/device.h> 77 + #include <linux/dmaengine.h> 78 + #include <linux/dmapool.h> 79 + #include <linux/dma-mapping.h> 80 + #include <linux/init.h> 81 + #include <linux/interrupt.h> 82 + #include <linux/module.h> 83 + #include <linux/pm_runtime.h> 89 84 #include <linux/seq_file.h> 90 - 85 + #include <linux/slab.h> 91 86 #include <asm/hardware/pl080.h> 92 87 93 88 #define DRIVER_NAME "pl08xdmac" 89 + 90 + static struct amba_driver pl08x_amba_driver; 94 91 95 92 /** 96 93 * struct vendor_data - vendor-specific config parameters for PL08x derivatives ··· 123 126 * @phy_chans: array of data for the physical channels 124 127 * @pool: a pool for the LLI descriptors 125 128 * @pool_ctr: counter of LLIs in the pool 126 - * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI fetches 129 + * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI 130 + * fetches 127 131 * @mem_buses: set to indicate memory transfers on AHB2. 128 132 * @lock: a spinlock for this struct 129 133 */ ··· 146 148 /* 147 149 * PL08X specific defines 148 150 */ 149 - 150 - /* 151 - * Memory boundaries: the manual for PL08x says that the controller 152 - * cannot read past a 1KiB boundary, so these defines are used to 153 - * create transfer LLIs that do not cross such boundaries. 154 - */ 155 - #define PL08X_BOUNDARY_SHIFT (10) /* 1KB 0x400 */ 156 - #define PL08X_BOUNDARY_SIZE (1 << PL08X_BOUNDARY_SHIFT) 157 151 158 152 /* Size (bytes) of each LLI buffer allocated for one transfer */ 159 153 # define PL08X_LLI_TSFR_SIZE 0x2000 ··· 262 272 writel(val, ch->base + PL080_CH_CONFIG); 263 273 } 264 274 265 - 266 275 /* 267 276 * pl08x_terminate_phy_chan() stops the channel, clears the FIFO and 268 277 * clears any pending interrupt status. This should not be used for ··· 352 363 if (!list_empty(&plchan->pend_list)) { 353 364 struct pl08x_txd *txdi; 354 365 list_for_each_entry(txdi, &plchan->pend_list, node) { 355 - bytes += txdi->len; 366 + struct pl08x_sg *dsg; 367 + list_for_each_entry(dsg, &txd->dsg_list, node) 368 + bytes += dsg->len; 356 369 } 357 370 } 358 371 ··· 398 407 return NULL; 399 408 } 400 409 410 + pm_runtime_get_sync(&pl08x->adev->dev); 401 411 return ch; 402 412 } 403 413 ··· 411 419 412 420 /* Stop the channel and clear its interrupts */ 413 421 pl08x_terminate_phy_chan(pl08x, ch); 422 + 423 + pm_runtime_put(&pl08x->adev->dev); 414 424 415 425 /* Mark it as free */ 416 426 ch->serving = NULL; ··· 493 499 }; 494 500 495 501 /* 496 - * Autoselect a master bus to use for the transfer this prefers the 497 - * destination bus if both available if fixed address on one bus the 498 - * other will be chosen 502 + * Autoselect a master bus to use for the transfer. Slave will be the chosen as 503 + * victim in case src & dest are not similarly aligned. i.e. If after aligning 504 + * masters address with width requirements of transfer (by sending few byte by 505 + * byte data), slave is still not aligned, then its width will be reduced to 506 + * BYTE. 507 + * - prefers the destination bus if both available 508 + * - prefers bus with fixed address (i.e. peripheral) 499 509 */ 500 510 static void pl08x_choose_master_bus(struct pl08x_lli_build_data *bd, 501 511 struct pl08x_bus_data **mbus, struct pl08x_bus_data **sbus, u32 cctl) 502 512 { 503 513 if (!(cctl & PL080_CONTROL_DST_INCR)) { 504 - *mbus = &bd->srcbus; 505 - *sbus = &bd->dstbus; 506 - } else if (!(cctl & PL080_CONTROL_SRC_INCR)) { 507 514 *mbus = &bd->dstbus; 508 515 *sbus = &bd->srcbus; 516 + } else if (!(cctl & PL080_CONTROL_SRC_INCR)) { 517 + *mbus = &bd->srcbus; 518 + *sbus = &bd->dstbus; 509 519 } else { 510 - if (bd->dstbus.buswidth == 4) { 520 + if (bd->dstbus.buswidth >= bd->srcbus.buswidth) { 511 521 *mbus = &bd->dstbus; 512 522 *sbus = &bd->srcbus; 513 - } else if (bd->srcbus.buswidth == 4) { 514 - *mbus = &bd->srcbus; 515 - *sbus = &bd->dstbus; 516 - } else if (bd->dstbus.buswidth == 2) { 517 - *mbus = &bd->dstbus; 518 - *sbus = &bd->srcbus; 519 - } else if (bd->srcbus.buswidth == 2) { 520 - *mbus = &bd->srcbus; 521 - *sbus = &bd->dstbus; 522 523 } else { 523 - /* bd->srcbus.buswidth == 1 */ 524 - *mbus = &bd->dstbus; 525 - *sbus = &bd->srcbus; 524 + *mbus = &bd->srcbus; 525 + *sbus = &bd->dstbus; 526 526 } 527 527 } 528 528 } ··· 535 547 llis_va[num_llis].cctl = cctl; 536 548 llis_va[num_llis].src = bd->srcbus.addr; 537 549 llis_va[num_llis].dst = bd->dstbus.addr; 538 - llis_va[num_llis].lli = llis_bus + (num_llis + 1) * sizeof(struct pl08x_lli); 550 + llis_va[num_llis].lli = llis_bus + (num_llis + 1) * 551 + sizeof(struct pl08x_lli); 539 552 llis_va[num_llis].lli |= bd->lli_bus; 540 553 541 554 if (cctl & PL080_CONTROL_SRC_INCR) ··· 549 560 bd->remainder -= len; 550 561 } 551 562 552 - /* 553 - * Return number of bytes to fill to boundary, or len. 554 - * This calculation works for any value of addr. 555 - */ 556 - static inline size_t pl08x_pre_boundary(u32 addr, size_t len) 563 + static inline void prep_byte_width_lli(struct pl08x_lli_build_data *bd, 564 + u32 *cctl, u32 len, int num_llis, size_t *total_bytes) 557 565 { 558 - size_t boundary_len = PL08X_BOUNDARY_SIZE - 559 - (addr & (PL08X_BOUNDARY_SIZE - 1)); 560 - 561 - return min(boundary_len, len); 566 + *cctl = pl08x_cctl_bits(*cctl, 1, 1, len); 567 + pl08x_fill_lli_for_desc(bd, num_llis, len, *cctl); 568 + (*total_bytes) += len; 562 569 } 563 570 564 571 /* ··· 568 583 struct pl08x_bus_data *mbus, *sbus; 569 584 struct pl08x_lli_build_data bd; 570 585 int num_llis = 0; 571 - u32 cctl; 572 - size_t max_bytes_per_lli; 573 - size_t total_bytes = 0; 586 + u32 cctl, early_bytes = 0; 587 + size_t max_bytes_per_lli, total_bytes; 574 588 struct pl08x_lli *llis_va; 589 + struct pl08x_sg *dsg; 575 590 576 - txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, 577 - &txd->llis_bus); 591 + txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, &txd->llis_bus); 578 592 if (!txd->llis_va) { 579 593 dev_err(&pl08x->adev->dev, "%s no memory for llis\n", __func__); 580 594 return 0; ··· 581 597 582 598 pl08x->pool_ctr++; 583 599 584 - /* Get the default CCTL */ 585 - cctl = txd->cctl; 586 - 587 600 bd.txd = txd; 588 - bd.srcbus.addr = txd->src_addr; 589 - bd.dstbus.addr = txd->dst_addr; 590 601 bd.lli_bus = (pl08x->lli_buses & PL08X_AHB2) ? PL080_LLI_LM_AHB2 : 0; 602 + cctl = txd->cctl; 591 603 592 604 /* Find maximum width of the source bus */ 593 605 bd.srcbus.maxwidth = ··· 595 615 pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_DWIDTH_MASK) >> 596 616 PL080_CONTROL_DWIDTH_SHIFT); 597 617 598 - /* Set up the bus widths to the maximum */ 599 - bd.srcbus.buswidth = bd.srcbus.maxwidth; 600 - bd.dstbus.buswidth = bd.dstbus.maxwidth; 618 + list_for_each_entry(dsg, &txd->dsg_list, node) { 619 + total_bytes = 0; 620 + cctl = txd->cctl; 601 621 602 - /* 603 - * Bytes transferred == tsize * MIN(buswidths), not max(buswidths) 604 - */ 605 - max_bytes_per_lli = min(bd.srcbus.buswidth, bd.dstbus.buswidth) * 606 - PL080_CONTROL_TRANSFER_SIZE_MASK; 622 + bd.srcbus.addr = dsg->src_addr; 623 + bd.dstbus.addr = dsg->dst_addr; 624 + bd.remainder = dsg->len; 625 + bd.srcbus.buswidth = bd.srcbus.maxwidth; 626 + bd.dstbus.buswidth = bd.dstbus.maxwidth; 607 627 608 - /* We need to count this down to zero */ 609 - bd.remainder = txd->len; 628 + pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl); 610 629 611 - /* 612 - * Choose bus to align to 613 - * - prefers destination bus if both available 614 - * - if fixed address on one bus chooses other 615 - */ 616 - pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl); 617 - 618 - dev_vdbg(&pl08x->adev->dev, "src=0x%08x%s/%u dst=0x%08x%s/%u len=%zu llimax=%zu\n", 619 - bd.srcbus.addr, cctl & PL080_CONTROL_SRC_INCR ? "+" : "", 620 - bd.srcbus.buswidth, 621 - bd.dstbus.addr, cctl & PL080_CONTROL_DST_INCR ? "+" : "", 622 - bd.dstbus.buswidth, 623 - bd.remainder, max_bytes_per_lli); 624 - dev_vdbg(&pl08x->adev->dev, "mbus=%s sbus=%s\n", 625 - mbus == &bd.srcbus ? "src" : "dst", 626 - sbus == &bd.srcbus ? "src" : "dst"); 627 - 628 - if (txd->len < mbus->buswidth) { 629 - /* Less than a bus width available - send as single bytes */ 630 - while (bd.remainder) { 631 - dev_vdbg(&pl08x->adev->dev, 632 - "%s single byte LLIs for a transfer of " 633 - "less than a bus width (remain 0x%08x)\n", 634 - __func__, bd.remainder); 635 - cctl = pl08x_cctl_bits(cctl, 1, 1, 1); 636 - pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl); 637 - total_bytes++; 638 - } 639 - } else { 640 - /* Make one byte LLIs until master bus is aligned */ 641 - while ((mbus->addr) % (mbus->buswidth)) { 642 - dev_vdbg(&pl08x->adev->dev, 643 - "%s adjustment lli for less than bus width " 644 - "(remain 0x%08x)\n", 645 - __func__, bd.remainder); 646 - cctl = pl08x_cctl_bits(cctl, 1, 1, 1); 647 - pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl); 648 - total_bytes++; 649 - } 630 + dev_vdbg(&pl08x->adev->dev, "src=0x%08x%s/%u dst=0x%08x%s/%u len=%zu\n", 631 + bd.srcbus.addr, cctl & PL080_CONTROL_SRC_INCR ? "+" : "", 632 + bd.srcbus.buswidth, 633 + bd.dstbus.addr, cctl & PL080_CONTROL_DST_INCR ? "+" : "", 634 + bd.dstbus.buswidth, 635 + bd.remainder); 636 + dev_vdbg(&pl08x->adev->dev, "mbus=%s sbus=%s\n", 637 + mbus == &bd.srcbus ? "src" : "dst", 638 + sbus == &bd.srcbus ? "src" : "dst"); 650 639 651 640 /* 652 - * Master now aligned 653 - * - if slave is not then we must set its width down 641 + * Zero length is only allowed if all these requirements are 642 + * met: 643 + * - flow controller is peripheral. 644 + * - src.addr is aligned to src.width 645 + * - dst.addr is aligned to dst.width 646 + * 647 + * sg_len == 1 should be true, as there can be two cases here: 648 + * 649 + * - Memory addresses are contiguous and are not scattered. 650 + * Here, Only one sg will be passed by user driver, with 651 + * memory address and zero length. We pass this to controller 652 + * and after the transfer it will receive the last burst 653 + * request from peripheral and so transfer finishes. 654 + * 655 + * - Memory addresses are scattered and are not contiguous. 656 + * Here, Obviously as DMA controller doesn't know when a lli's 657 + * transfer gets over, it can't load next lli. So in this 658 + * case, there has to be an assumption that only one lli is 659 + * supported. Thus, we can't have scattered addresses. 654 660 */ 655 - if (sbus->addr % sbus->buswidth) { 656 - dev_dbg(&pl08x->adev->dev, 657 - "%s set down bus width to one byte\n", 658 - __func__); 659 - 660 - sbus->buswidth = 1; 661 - } 662 - 663 - /* 664 - * Make largest possible LLIs until less than one bus 665 - * width left 666 - */ 667 - while (bd.remainder > (mbus->buswidth - 1)) { 668 - size_t lli_len, target_len, tsize, odd_bytes; 669 - 670 - /* 671 - * If enough left try to send max possible, 672 - * otherwise try to send the remainder 673 - */ 674 - target_len = min(bd.remainder, max_bytes_per_lli); 675 - 676 - /* 677 - * Set bus lengths for incrementing buses to the 678 - * number of bytes which fill to next memory boundary, 679 - * limiting on the target length calculated above. 680 - */ 681 - if (cctl & PL080_CONTROL_SRC_INCR) 682 - bd.srcbus.fill_bytes = 683 - pl08x_pre_boundary(bd.srcbus.addr, 684 - target_len); 685 - else 686 - bd.srcbus.fill_bytes = target_len; 687 - 688 - if (cctl & PL080_CONTROL_DST_INCR) 689 - bd.dstbus.fill_bytes = 690 - pl08x_pre_boundary(bd.dstbus.addr, 691 - target_len); 692 - else 693 - bd.dstbus.fill_bytes = target_len; 694 - 695 - /* Find the nearest */ 696 - lli_len = min(bd.srcbus.fill_bytes, 697 - bd.dstbus.fill_bytes); 698 - 699 - BUG_ON(lli_len > bd.remainder); 700 - 701 - if (lli_len <= 0) { 702 - dev_err(&pl08x->adev->dev, 703 - "%s lli_len is %zu, <= 0\n", 704 - __func__, lli_len); 661 + if (!bd.remainder) { 662 + u32 fc = (txd->ccfg & PL080_CONFIG_FLOW_CONTROL_MASK) >> 663 + PL080_CONFIG_FLOW_CONTROL_SHIFT; 664 + if (!((fc >= PL080_FLOW_SRC2DST_DST) && 665 + (fc <= PL080_FLOW_SRC2DST_SRC))) { 666 + dev_err(&pl08x->adev->dev, "%s sg len can't be zero", 667 + __func__); 705 668 return 0; 706 669 } 707 670 708 - if (lli_len == target_len) { 709 - /* 710 - * Can send what we wanted. 711 - * Maintain alignment 712 - */ 713 - lli_len = (lli_len/mbus->buswidth) * 714 - mbus->buswidth; 715 - odd_bytes = 0; 716 - } else { 717 - /* 718 - * So now we know how many bytes to transfer 719 - * to get to the nearest boundary. The next 720 - * LLI will past the boundary. However, we 721 - * may be working to a boundary on the slave 722 - * bus. We need to ensure the master stays 723 - * aligned, and that we are working in 724 - * multiples of the bus widths. 725 - */ 726 - odd_bytes = lli_len % mbus->buswidth; 727 - lli_len -= odd_bytes; 728 - 671 + if ((bd.srcbus.addr % bd.srcbus.buswidth) || 672 + (bd.srcbus.addr % bd.srcbus.buswidth)) { 673 + dev_err(&pl08x->adev->dev, 674 + "%s src & dst address must be aligned to src" 675 + " & dst width if peripheral is flow controller", 676 + __func__); 677 + return 0; 729 678 } 730 679 731 - if (lli_len) { 732 - /* 733 - * Check against minimum bus alignment: 734 - * Calculate actual transfer size in relation 735 - * to bus width an get a maximum remainder of 736 - * the smallest bus width - 1 737 - */ 738 - /* FIXME: use round_down()? */ 739 - tsize = lli_len / min(mbus->buswidth, 740 - sbus->buswidth); 741 - lli_len = tsize * min(mbus->buswidth, 742 - sbus->buswidth); 743 - 744 - if (target_len != lli_len) { 745 - dev_vdbg(&pl08x->adev->dev, 746 - "%s can't send what we want. Desired 0x%08zx, lli of 0x%08zx bytes in txd of 0x%08zx\n", 747 - __func__, target_len, lli_len, txd->len); 748 - } 749 - 750 - cctl = pl08x_cctl_bits(cctl, 751 - bd.srcbus.buswidth, 752 - bd.dstbus.buswidth, 753 - tsize); 754 - 755 - dev_vdbg(&pl08x->adev->dev, 756 - "%s fill lli with single lli chunk of size 0x%08zx (remainder 0x%08zx)\n", 757 - __func__, lli_len, bd.remainder); 758 - pl08x_fill_lli_for_desc(&bd, num_llis++, 759 - lli_len, cctl); 760 - total_bytes += lli_len; 761 - } 762 - 763 - 764 - if (odd_bytes) { 765 - /* 766 - * Creep past the boundary, maintaining 767 - * master alignment 768 - */ 769 - int j; 770 - for (j = 0; (j < mbus->buswidth) 771 - && (bd.remainder); j++) { 772 - cctl = pl08x_cctl_bits(cctl, 1, 1, 1); 773 - dev_vdbg(&pl08x->adev->dev, 774 - "%s align with boundary, single byte (remain 0x%08zx)\n", 775 - __func__, bd.remainder); 776 - pl08x_fill_lli_for_desc(&bd, 777 - num_llis++, 1, cctl); 778 - total_bytes++; 779 - } 780 - } 680 + cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth, 681 + bd.dstbus.buswidth, 0); 682 + pl08x_fill_lli_for_desc(&bd, num_llis++, 0, cctl); 683 + break; 781 684 } 782 685 783 686 /* 784 - * Send any odd bytes 687 + * Send byte by byte for following cases 688 + * - Less than a bus width available 689 + * - until master bus is aligned 785 690 */ 786 - while (bd.remainder) { 787 - cctl = pl08x_cctl_bits(cctl, 1, 1, 1); 788 - dev_vdbg(&pl08x->adev->dev, 789 - "%s align with boundary, single odd byte (remain %zu)\n", 790 - __func__, bd.remainder); 791 - pl08x_fill_lli_for_desc(&bd, num_llis++, 1, cctl); 792 - total_bytes++; 691 + if (bd.remainder < mbus->buswidth) 692 + early_bytes = bd.remainder; 693 + else if ((mbus->addr) % (mbus->buswidth)) { 694 + early_bytes = mbus->buswidth - (mbus->addr) % 695 + (mbus->buswidth); 696 + if ((bd.remainder - early_bytes) < mbus->buswidth) 697 + early_bytes = bd.remainder; 793 698 } 794 - } 795 - if (total_bytes != txd->len) { 796 - dev_err(&pl08x->adev->dev, 797 - "%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n", 798 - __func__, total_bytes, txd->len); 799 - return 0; 800 - } 801 699 802 - if (num_llis >= MAX_NUM_TSFR_LLIS) { 803 - dev_err(&pl08x->adev->dev, 804 - "%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n", 805 - __func__, (u32) MAX_NUM_TSFR_LLIS); 806 - return 0; 700 + if (early_bytes) { 701 + dev_vdbg(&pl08x->adev->dev, 702 + "%s byte width LLIs (remain 0x%08x)\n", 703 + __func__, bd.remainder); 704 + prep_byte_width_lli(&bd, &cctl, early_bytes, num_llis++, 705 + &total_bytes); 706 + } 707 + 708 + if (bd.remainder) { 709 + /* 710 + * Master now aligned 711 + * - if slave is not then we must set its width down 712 + */ 713 + if (sbus->addr % sbus->buswidth) { 714 + dev_dbg(&pl08x->adev->dev, 715 + "%s set down bus width to one byte\n", 716 + __func__); 717 + 718 + sbus->buswidth = 1; 719 + } 720 + 721 + /* 722 + * Bytes transferred = tsize * src width, not 723 + * MIN(buswidths) 724 + */ 725 + max_bytes_per_lli = bd.srcbus.buswidth * 726 + PL080_CONTROL_TRANSFER_SIZE_MASK; 727 + dev_vdbg(&pl08x->adev->dev, 728 + "%s max bytes per lli = %zu\n", 729 + __func__, max_bytes_per_lli); 730 + 731 + /* 732 + * Make largest possible LLIs until less than one bus 733 + * width left 734 + */ 735 + while (bd.remainder > (mbus->buswidth - 1)) { 736 + size_t lli_len, tsize, width; 737 + 738 + /* 739 + * If enough left try to send max possible, 740 + * otherwise try to send the remainder 741 + */ 742 + lli_len = min(bd.remainder, max_bytes_per_lli); 743 + 744 + /* 745 + * Check against maximum bus alignment: 746 + * Calculate actual transfer size in relation to 747 + * bus width an get a maximum remainder of the 748 + * highest bus width - 1 749 + */ 750 + width = max(mbus->buswidth, sbus->buswidth); 751 + lli_len = (lli_len / width) * width; 752 + tsize = lli_len / bd.srcbus.buswidth; 753 + 754 + dev_vdbg(&pl08x->adev->dev, 755 + "%s fill lli with single lli chunk of " 756 + "size 0x%08zx (remainder 0x%08zx)\n", 757 + __func__, lli_len, bd.remainder); 758 + 759 + cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth, 760 + bd.dstbus.buswidth, tsize); 761 + pl08x_fill_lli_for_desc(&bd, num_llis++, 762 + lli_len, cctl); 763 + total_bytes += lli_len; 764 + } 765 + 766 + /* 767 + * Send any odd bytes 768 + */ 769 + if (bd.remainder) { 770 + dev_vdbg(&pl08x->adev->dev, 771 + "%s align with boundary, send odd bytes (remain %zu)\n", 772 + __func__, bd.remainder); 773 + prep_byte_width_lli(&bd, &cctl, bd.remainder, 774 + num_llis++, &total_bytes); 775 + } 776 + } 777 + 778 + if (total_bytes != dsg->len) { 779 + dev_err(&pl08x->adev->dev, 780 + "%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n", 781 + __func__, total_bytes, dsg->len); 782 + return 0; 783 + } 784 + 785 + if (num_llis >= MAX_NUM_TSFR_LLIS) { 786 + dev_err(&pl08x->adev->dev, 787 + "%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n", 788 + __func__, (u32) MAX_NUM_TSFR_LLIS); 789 + return 0; 790 + } 807 791 } 808 792 809 793 llis_va = txd->llis_va; ··· 800 856 static void pl08x_free_txd(struct pl08x_driver_data *pl08x, 801 857 struct pl08x_txd *txd) 802 858 { 859 + struct pl08x_sg *dsg, *_dsg; 860 + 803 861 /* Free the LLI */ 804 - dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus); 862 + if (txd->llis_va) 863 + dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus); 805 864 806 865 pl08x->pool_ctr--; 866 + 867 + list_for_each_entry_safe(dsg, _dsg, &txd->dsg_list, node) { 868 + list_del(&dsg->node); 869 + kfree(dsg); 870 + } 807 871 808 872 kfree(txd); 809 873 } ··· 869 917 * need, but for slaves the physical signals may be muxed! 870 918 * Can the platform allow us to use this channel? 871 919 */ 872 - if (plchan->slave && 873 - ch->signal < 0 && 874 - pl08x->pd->get_signal) { 920 + if (plchan->slave && pl08x->pd->get_signal) { 875 921 ret = pl08x->pd->get_signal(plchan); 876 922 if (ret < 0) { 877 923 dev_dbg(&pl08x->adev->dev, ··· 958 1008 * If slaves are relying on interrupts to signal completion this function 959 1009 * must not be called with interrupts disabled. 960 1010 */ 961 - static enum dma_status 962 - pl08x_dma_tx_status(struct dma_chan *chan, 963 - dma_cookie_t cookie, 964 - struct dma_tx_state *txstate) 1011 + static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan, 1012 + dma_cookie_t cookie, struct dma_tx_state *txstate) 965 1013 { 966 1014 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 967 1015 dma_cookie_t last_used; ··· 1201 1253 1202 1254 num_llis = pl08x_fill_llis_for_desc(pl08x, txd); 1203 1255 if (!num_llis) { 1204 - kfree(txd); 1256 + spin_lock_irqsave(&plchan->lock, flags); 1257 + pl08x_free_txd(pl08x, txd); 1258 + spin_unlock_irqrestore(&plchan->lock, flags); 1205 1259 return -EINVAL; 1206 1260 } 1207 1261 ··· 1251 1301 static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan, 1252 1302 unsigned long flags) 1253 1303 { 1254 - struct pl08x_txd *txd = kzalloc(sizeof(struct pl08x_txd), GFP_NOWAIT); 1304 + struct pl08x_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT); 1255 1305 1256 1306 if (txd) { 1257 1307 dma_async_tx_descriptor_init(&txd->tx, &plchan->chan); 1258 1308 txd->tx.flags = flags; 1259 1309 txd->tx.tx_submit = pl08x_tx_submit; 1260 1310 INIT_LIST_HEAD(&txd->node); 1311 + INIT_LIST_HEAD(&txd->dsg_list); 1261 1312 1262 1313 /* Always enable error and terminal interrupts */ 1263 1314 txd->ccfg = PL080_CONFIG_ERR_IRQ_MASK | ··· 1277 1326 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1278 1327 struct pl08x_driver_data *pl08x = plchan->host; 1279 1328 struct pl08x_txd *txd; 1329 + struct pl08x_sg *dsg; 1280 1330 int ret; 1281 1331 1282 1332 txd = pl08x_get_txd(plchan, flags); ··· 1287 1335 return NULL; 1288 1336 } 1289 1337 1338 + dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT); 1339 + if (!dsg) { 1340 + pl08x_free_txd(pl08x, txd); 1341 + dev_err(&pl08x->adev->dev, "%s no memory for pl080 sg\n", 1342 + __func__); 1343 + return NULL; 1344 + } 1345 + list_add_tail(&dsg->node, &txd->dsg_list); 1346 + 1290 1347 txd->direction = DMA_NONE; 1291 - txd->src_addr = src; 1292 - txd->dst_addr = dest; 1293 - txd->len = len; 1348 + dsg->src_addr = src; 1349 + dsg->dst_addr = dest; 1350 + dsg->len = len; 1294 1351 1295 1352 /* Set platform data for m2m */ 1296 1353 txd->ccfg |= PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT; ··· 1328 1367 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1329 1368 struct pl08x_driver_data *pl08x = plchan->host; 1330 1369 struct pl08x_txd *txd; 1331 - int ret; 1332 - 1333 - /* 1334 - * Current implementation ASSUMES only one sg 1335 - */ 1336 - if (sg_len != 1) { 1337 - dev_err(&pl08x->adev->dev, "%s prepared too long sglist\n", 1338 - __func__); 1339 - BUG(); 1340 - } 1370 + struct pl08x_sg *dsg; 1371 + struct scatterlist *sg; 1372 + dma_addr_t slave_addr; 1373 + int ret, tmp; 1341 1374 1342 1375 dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n", 1343 - __func__, sgl->length, plchan->name); 1376 + __func__, sgl->length, plchan->name); 1344 1377 1345 1378 txd = pl08x_get_txd(plchan, flags); 1346 1379 if (!txd) { ··· 1353 1398 * channel target address dynamically at runtime. 1354 1399 */ 1355 1400 txd->direction = direction; 1356 - txd->len = sgl->length; 1357 1401 1358 1402 if (direction == DMA_TO_DEVICE) { 1359 - txd->ccfg |= PL080_FLOW_MEM2PER << PL080_CONFIG_FLOW_CONTROL_SHIFT; 1360 1403 txd->cctl = plchan->dst_cctl; 1361 - txd->src_addr = sgl->dma_address; 1362 - txd->dst_addr = plchan->dst_addr; 1404 + slave_addr = plchan->dst_addr; 1363 1405 } else if (direction == DMA_FROM_DEVICE) { 1364 - txd->ccfg |= PL080_FLOW_PER2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT; 1365 1406 txd->cctl = plchan->src_cctl; 1366 - txd->src_addr = plchan->src_addr; 1367 - txd->dst_addr = sgl->dma_address; 1407 + slave_addr = plchan->src_addr; 1368 1408 } else { 1409 + pl08x_free_txd(pl08x, txd); 1369 1410 dev_err(&pl08x->adev->dev, 1370 1411 "%s direction unsupported\n", __func__); 1371 1412 return NULL; 1413 + } 1414 + 1415 + if (plchan->cd->device_fc) 1416 + tmp = (direction == DMA_TO_DEVICE) ? PL080_FLOW_MEM2PER_PER : 1417 + PL080_FLOW_PER2MEM_PER; 1418 + else 1419 + tmp = (direction == DMA_TO_DEVICE) ? PL080_FLOW_MEM2PER : 1420 + PL080_FLOW_PER2MEM; 1421 + 1422 + txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT; 1423 + 1424 + for_each_sg(sgl, sg, sg_len, tmp) { 1425 + dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT); 1426 + if (!dsg) { 1427 + pl08x_free_txd(pl08x, txd); 1428 + dev_err(&pl08x->adev->dev, "%s no mem for pl080 sg\n", 1429 + __func__); 1430 + return NULL; 1431 + } 1432 + list_add_tail(&dsg->node, &txd->dsg_list); 1433 + 1434 + dsg->len = sg_dma_len(sg); 1435 + if (direction == DMA_TO_DEVICE) { 1436 + dsg->src_addr = sg_phys(sg); 1437 + dsg->dst_addr = slave_addr; 1438 + } else { 1439 + dsg->src_addr = slave_addr; 1440 + dsg->dst_addr = sg_phys(sg); 1441 + } 1372 1442 } 1373 1443 1374 1444 ret = pl08x_prep_channel_resources(plchan, txd); ··· 1469 1489 1470 1490 bool pl08x_filter_id(struct dma_chan *chan, void *chan_id) 1471 1491 { 1472 - struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); 1492 + struct pl08x_dma_chan *plchan; 1473 1493 char *name = chan_id; 1494 + 1495 + /* Reject channels for devices not bound to this driver */ 1496 + if (chan->device->dev->driver != &pl08x_amba_driver.drv) 1497 + return false; 1498 + 1499 + plchan = to_pl08x_chan(chan); 1474 1500 1475 1501 /* Check that the channel is not taken! */ 1476 1502 if (!strcmp(plchan->name, name)) ··· 1493 1507 */ 1494 1508 static void pl08x_ensure_on(struct pl08x_driver_data *pl08x) 1495 1509 { 1496 - u32 val; 1497 - 1498 - val = readl(pl08x->base + PL080_CONFIG); 1499 - val &= ~(PL080_CONFIG_M2_BE | PL080_CONFIG_M1_BE | PL080_CONFIG_ENABLE); 1500 - /* We implicitly clear bit 1 and that means little-endian mode */ 1501 - val |= PL080_CONFIG_ENABLE; 1502 - writel(val, pl08x->base + PL080_CONFIG); 1510 + writel(PL080_CONFIG_ENABLE, pl08x->base + PL080_CONFIG); 1503 1511 } 1504 1512 1505 1513 static void pl08x_unmap_buffers(struct pl08x_txd *txd) 1506 1514 { 1507 1515 struct device *dev = txd->tx.chan->device->dev; 1516 + struct pl08x_sg *dsg; 1508 1517 1509 1518 if (!(txd->tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { 1510 1519 if (txd->tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE) 1511 - dma_unmap_single(dev, txd->src_addr, txd->len, 1512 - DMA_TO_DEVICE); 1513 - else 1514 - dma_unmap_page(dev, txd->src_addr, txd->len, 1515 - DMA_TO_DEVICE); 1520 + list_for_each_entry(dsg, &txd->dsg_list, node) 1521 + dma_unmap_single(dev, dsg->src_addr, dsg->len, 1522 + DMA_TO_DEVICE); 1523 + else { 1524 + list_for_each_entry(dsg, &txd->dsg_list, node) 1525 + dma_unmap_page(dev, dsg->src_addr, dsg->len, 1526 + DMA_TO_DEVICE); 1527 + } 1516 1528 } 1517 1529 if (!(txd->tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { 1518 1530 if (txd->tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE) 1519 - dma_unmap_single(dev, txd->dst_addr, txd->len, 1520 - DMA_FROM_DEVICE); 1531 + list_for_each_entry(dsg, &txd->dsg_list, node) 1532 + dma_unmap_single(dev, dsg->dst_addr, dsg->len, 1533 + DMA_FROM_DEVICE); 1521 1534 else 1522 - dma_unmap_page(dev, txd->dst_addr, txd->len, 1523 - DMA_FROM_DEVICE); 1535 + list_for_each_entry(dsg, &txd->dsg_list, node) 1536 + dma_unmap_page(dev, dsg->dst_addr, dsg->len, 1537 + DMA_FROM_DEVICE); 1524 1538 } 1525 1539 } 1526 1540 ··· 1575 1589 */ 1576 1590 list_for_each_entry(waiting, &pl08x->memcpy.channels, 1577 1591 chan.device_node) { 1578 - if (waiting->state == PL08X_CHAN_WAITING && 1579 - waiting->waiting != NULL) { 1592 + if (waiting->state == PL08X_CHAN_WAITING && 1593 + waiting->waiting != NULL) { 1580 1594 int ret; 1581 1595 1582 1596 /* This should REALLY not fail now */ ··· 1616 1630 static irqreturn_t pl08x_irq(int irq, void *dev) 1617 1631 { 1618 1632 struct pl08x_driver_data *pl08x = dev; 1619 - u32 mask = 0; 1620 - u32 val; 1621 - int i; 1633 + u32 mask = 0, err, tc, i; 1622 1634 1623 - val = readl(pl08x->base + PL080_ERR_STATUS); 1624 - if (val) { 1625 - /* An error interrupt (on one or more channels) */ 1626 - dev_err(&pl08x->adev->dev, 1627 - "%s error interrupt, register value 0x%08x\n", 1628 - __func__, val); 1629 - /* 1630 - * Simply clear ALL PL08X error interrupts, 1631 - * regardless of channel and cause 1632 - * FIXME: should be 0x00000003 on PL081 really. 1633 - */ 1634 - writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR); 1635 + /* check & clear - ERR & TC interrupts */ 1636 + err = readl(pl08x->base + PL080_ERR_STATUS); 1637 + if (err) { 1638 + dev_err(&pl08x->adev->dev, "%s error interrupt, register value 0x%08x\n", 1639 + __func__, err); 1640 + writel(err, pl08x->base + PL080_ERR_CLEAR); 1635 1641 } 1636 - val = readl(pl08x->base + PL080_INT_STATUS); 1642 + tc = readl(pl08x->base + PL080_INT_STATUS); 1643 + if (tc) 1644 + writel(tc, pl08x->base + PL080_TC_CLEAR); 1645 + 1646 + if (!err && !tc) 1647 + return IRQ_NONE; 1648 + 1637 1649 for (i = 0; i < pl08x->vd->channels; i++) { 1638 - if ((1 << i) & val) { 1650 + if (((1 << i) & err) || ((1 << i) & tc)) { 1639 1651 /* Locate physical channel */ 1640 1652 struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i]; 1641 1653 struct pl08x_dma_chan *plchan = phychan->serving; 1642 1654 1655 + if (!plchan) { 1656 + dev_err(&pl08x->adev->dev, 1657 + "%s Error TC interrupt on unused channel: 0x%08x\n", 1658 + __func__, i); 1659 + continue; 1660 + } 1661 + 1643 1662 /* Schedule tasklet on this channel */ 1644 1663 tasklet_schedule(&plchan->tasklet); 1645 - 1646 1664 mask |= (1 << i); 1647 1665 } 1648 1666 } 1649 - /* Clear only the terminal interrupts on channels we processed */ 1650 - writel(mask, pl08x->base + PL080_TC_CLEAR); 1651 1667 1652 1668 return mask ? IRQ_HANDLED : IRQ_NONE; 1653 1669 } ··· 1673 1685 * Make a local wrapper to hold required data 1674 1686 */ 1675 1687 static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, 1676 - struct dma_device *dmadev, 1677 - unsigned int channels, 1678 - bool slave) 1688 + struct dma_device *dmadev, unsigned int channels, bool slave) 1679 1689 { 1680 1690 struct pl08x_dma_chan *chan; 1681 1691 int i; ··· 1686 1700 * to cope with that situation. 1687 1701 */ 1688 1702 for (i = 0; i < channels; i++) { 1689 - chan = kzalloc(sizeof(struct pl08x_dma_chan), GFP_KERNEL); 1703 + chan = kzalloc(sizeof(*chan), GFP_KERNEL); 1690 1704 if (!chan) { 1691 1705 dev_err(&pl08x->adev->dev, 1692 1706 "%s no memory for channel\n", __func__); ··· 1714 1728 kfree(chan); 1715 1729 continue; 1716 1730 } 1717 - dev_info(&pl08x->adev->dev, 1731 + dev_dbg(&pl08x->adev->dev, 1718 1732 "initialize virtual channel \"%s\"\n", 1719 1733 chan->name); 1720 1734 ··· 1823 1837 static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x) 1824 1838 { 1825 1839 /* Expose a simple debugfs interface to view all clocks */ 1826 - (void) debugfs_create_file(dev_name(&pl08x->adev->dev), S_IFREG | S_IRUGO, 1827 - NULL, pl08x, 1828 - &pl08x_debugfs_operations); 1840 + (void) debugfs_create_file(dev_name(&pl08x->adev->dev), 1841 + S_IFREG | S_IRUGO, NULL, pl08x, 1842 + &pl08x_debugfs_operations); 1829 1843 } 1830 1844 1831 1845 #else ··· 1846 1860 return ret; 1847 1861 1848 1862 /* Create the driver state holder */ 1849 - pl08x = kzalloc(sizeof(struct pl08x_driver_data), GFP_KERNEL); 1863 + pl08x = kzalloc(sizeof(*pl08x), GFP_KERNEL); 1850 1864 if (!pl08x) { 1851 1865 ret = -ENOMEM; 1852 1866 goto out_no_pl08x; 1853 1867 } 1868 + 1869 + pm_runtime_set_active(&adev->dev); 1870 + pm_runtime_enable(&adev->dev); 1854 1871 1855 1872 /* Initialize memcpy engine */ 1856 1873 dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask); ··· 1928 1939 } 1929 1940 1930 1941 /* Initialize physical channels */ 1931 - pl08x->phy_chans = kmalloc((vd->channels * sizeof(struct pl08x_phy_chan)), 1942 + pl08x->phy_chans = kmalloc((vd->channels * sizeof(*pl08x->phy_chans)), 1932 1943 GFP_KERNEL); 1933 1944 if (!pl08x->phy_chans) { 1934 1945 dev_err(&adev->dev, "%s failed to allocate " ··· 1945 1956 spin_lock_init(&ch->lock); 1946 1957 ch->serving = NULL; 1947 1958 ch->signal = -1; 1948 - dev_info(&adev->dev, 1949 - "physical channel %d is %s\n", i, 1950 - pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE"); 1959 + dev_dbg(&adev->dev, "physical channel %d is %s\n", 1960 + i, pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE"); 1951 1961 } 1952 1962 1953 1963 /* Register as many memcpy channels as there are physical channels */ ··· 1962 1974 1963 1975 /* Register slave channels */ 1964 1976 ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave, 1965 - pl08x->pd->num_slave_channels, 1966 - true); 1977 + pl08x->pd->num_slave_channels, true); 1967 1978 if (ret <= 0) { 1968 1979 dev_warn(&pl08x->adev->dev, 1969 1980 "%s failed to enumerate slave channels - %d\n", ··· 1992 2005 dev_info(&pl08x->adev->dev, "DMA: PL%03x rev%u at 0x%08llx irq %d\n", 1993 2006 amba_part(adev), amba_rev(adev), 1994 2007 (unsigned long long)adev->res.start, adev->irq[0]); 2008 + 2009 + pm_runtime_put(&adev->dev); 1995 2010 return 0; 1996 2011 1997 2012 out_no_slave_reg: ··· 2012 2023 dma_pool_destroy(pl08x->pool); 2013 2024 out_no_lli_pool: 2014 2025 out_no_platdata: 2026 + pm_runtime_put(&adev->dev); 2027 + pm_runtime_disable(&adev->dev); 2028 + 2015 2029 kfree(pl08x); 2016 2030 out_no_pl08x: 2017 2031 amba_release_regions(adev);
+127 -37
drivers/dma/at_hdmac.c
··· 107 107 { 108 108 struct at_desc *desc, *_desc; 109 109 struct at_desc *ret = NULL; 110 + unsigned long flags; 110 111 unsigned int i = 0; 111 112 LIST_HEAD(tmp_list); 112 113 113 - spin_lock_bh(&atchan->lock); 114 + spin_lock_irqsave(&atchan->lock, flags); 114 115 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) { 115 116 i++; 116 117 if (async_tx_test_ack(&desc->txd)) { ··· 122 121 dev_dbg(chan2dev(&atchan->chan_common), 123 122 "desc %p not ACKed\n", desc); 124 123 } 125 - spin_unlock_bh(&atchan->lock); 124 + spin_unlock_irqrestore(&atchan->lock, flags); 126 125 dev_vdbg(chan2dev(&atchan->chan_common), 127 126 "scanned %u descriptors on freelist\n", i); 128 127 ··· 130 129 if (!ret) { 131 130 ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC); 132 131 if (ret) { 133 - spin_lock_bh(&atchan->lock); 132 + spin_lock_irqsave(&atchan->lock, flags); 134 133 atchan->descs_allocated++; 135 - spin_unlock_bh(&atchan->lock); 134 + spin_unlock_irqrestore(&atchan->lock, flags); 136 135 } else { 137 136 dev_err(chan2dev(&atchan->chan_common), 138 137 "not enough descriptors available\n"); ··· 151 150 { 152 151 if (desc) { 153 152 struct at_desc *child; 153 + unsigned long flags; 154 154 155 - spin_lock_bh(&atchan->lock); 155 + spin_lock_irqsave(&atchan->lock, flags); 156 156 list_for_each_entry(child, &desc->tx_list, desc_node) 157 157 dev_vdbg(chan2dev(&atchan->chan_common), 158 158 "moving child desc %p to freelist\n", ··· 162 160 dev_vdbg(chan2dev(&atchan->chan_common), 163 161 "moving desc %p to freelist\n", desc); 164 162 list_add(&desc->desc_node, &atchan->free_list); 165 - spin_unlock_bh(&atchan->lock); 163 + spin_unlock_irqrestore(&atchan->lock, flags); 166 164 } 167 165 } 168 166 ··· 301 299 302 300 /* for cyclic transfers, 303 301 * no need to replay callback function while stopping */ 304 - if (!test_bit(ATC_IS_CYCLIC, &atchan->status)) { 302 + if (!atc_chan_is_cyclic(atchan)) { 305 303 dma_async_tx_callback callback = txd->callback; 306 304 void *param = txd->callback_param; 307 305 ··· 473 471 static void atc_tasklet(unsigned long data) 474 472 { 475 473 struct at_dma_chan *atchan = (struct at_dma_chan *)data; 474 + unsigned long flags; 476 475 477 - spin_lock(&atchan->lock); 476 + spin_lock_irqsave(&atchan->lock, flags); 478 477 if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status)) 479 478 atc_handle_error(atchan); 480 - else if (test_bit(ATC_IS_CYCLIC, &atchan->status)) 479 + else if (atc_chan_is_cyclic(atchan)) 481 480 atc_handle_cyclic(atchan); 482 481 else 483 482 atc_advance_work(atchan); 484 483 485 - spin_unlock(&atchan->lock); 484 + spin_unlock_irqrestore(&atchan->lock, flags); 486 485 } 487 486 488 487 static irqreturn_t at_dma_interrupt(int irq, void *dev_id) ··· 542 539 struct at_desc *desc = txd_to_at_desc(tx); 543 540 struct at_dma_chan *atchan = to_at_dma_chan(tx->chan); 544 541 dma_cookie_t cookie; 542 + unsigned long flags; 545 543 546 - spin_lock_bh(&atchan->lock); 544 + spin_lock_irqsave(&atchan->lock, flags); 547 545 cookie = atc_assign_cookie(atchan, desc); 548 546 549 547 if (list_empty(&atchan->active_list)) { ··· 558 554 list_add_tail(&desc->desc_node, &atchan->queue); 559 555 } 560 556 561 - spin_unlock_bh(&atchan->lock); 557 + spin_unlock_irqrestore(&atchan->lock, flags); 562 558 563 559 return cookie; 564 560 } ··· 931 927 struct at_dma_chan *atchan = to_at_dma_chan(chan); 932 928 struct at_dma *atdma = to_at_dma(chan->device); 933 929 int chan_id = atchan->chan_common.chan_id; 930 + unsigned long flags; 934 931 935 932 LIST_HEAD(list); 936 933 937 934 dev_vdbg(chan2dev(chan), "atc_control (%d)\n", cmd); 938 935 939 936 if (cmd == DMA_PAUSE) { 940 - spin_lock_bh(&atchan->lock); 937 + spin_lock_irqsave(&atchan->lock, flags); 941 938 942 939 dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id)); 943 940 set_bit(ATC_IS_PAUSED, &atchan->status); 944 941 945 - spin_unlock_bh(&atchan->lock); 942 + spin_unlock_irqrestore(&atchan->lock, flags); 946 943 } else if (cmd == DMA_RESUME) { 947 - if (!test_bit(ATC_IS_PAUSED, &atchan->status)) 944 + if (!atc_chan_is_paused(atchan)) 948 945 return 0; 949 946 950 - spin_lock_bh(&atchan->lock); 947 + spin_lock_irqsave(&atchan->lock, flags); 951 948 952 949 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id)); 953 950 clear_bit(ATC_IS_PAUSED, &atchan->status); 954 951 955 - spin_unlock_bh(&atchan->lock); 952 + spin_unlock_irqrestore(&atchan->lock, flags); 956 953 } else if (cmd == DMA_TERMINATE_ALL) { 957 954 struct at_desc *desc, *_desc; 958 955 /* ··· 962 957 * channel. We still have to poll the channel enable bit due 963 958 * to AHB/HSB limitations. 964 959 */ 965 - spin_lock_bh(&atchan->lock); 960 + spin_lock_irqsave(&atchan->lock, flags); 966 961 967 962 /* disabling channel: must also remove suspend state */ 968 963 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask); ··· 983 978 /* if channel dedicated to cyclic operations, free it */ 984 979 clear_bit(ATC_IS_CYCLIC, &atchan->status); 985 980 986 - spin_unlock_bh(&atchan->lock); 981 + spin_unlock_irqrestore(&atchan->lock, flags); 987 982 } else { 988 983 return -ENXIO; 989 984 } ··· 1009 1004 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1010 1005 dma_cookie_t last_used; 1011 1006 dma_cookie_t last_complete; 1007 + unsigned long flags; 1012 1008 enum dma_status ret; 1013 1009 1014 - spin_lock_bh(&atchan->lock); 1010 + spin_lock_irqsave(&atchan->lock, flags); 1015 1011 1016 1012 last_complete = atchan->completed_cookie; 1017 1013 last_used = chan->cookie; ··· 1027 1021 ret = dma_async_is_complete(cookie, last_complete, last_used); 1028 1022 } 1029 1023 1030 - spin_unlock_bh(&atchan->lock); 1024 + spin_unlock_irqrestore(&atchan->lock, flags); 1031 1025 1032 1026 if (ret != DMA_SUCCESS) 1033 1027 dma_set_tx_state(txstate, last_complete, last_used, ··· 1035 1029 else 1036 1030 dma_set_tx_state(txstate, last_complete, last_used, 0); 1037 1031 1038 - if (test_bit(ATC_IS_PAUSED, &atchan->status)) 1032 + if (atc_chan_is_paused(atchan)) 1039 1033 ret = DMA_PAUSED; 1040 1034 1041 1035 dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d (d%d, u%d)\n", ··· 1052 1046 static void atc_issue_pending(struct dma_chan *chan) 1053 1047 { 1054 1048 struct at_dma_chan *atchan = to_at_dma_chan(chan); 1049 + unsigned long flags; 1055 1050 1056 1051 dev_vdbg(chan2dev(chan), "issue_pending\n"); 1057 1052 1058 1053 /* Not needed for cyclic transfers */ 1059 - if (test_bit(ATC_IS_CYCLIC, &atchan->status)) 1054 + if (atc_chan_is_cyclic(atchan)) 1060 1055 return; 1061 1056 1062 - spin_lock_bh(&atchan->lock); 1057 + spin_lock_irqsave(&atchan->lock, flags); 1063 1058 if (!atc_chan_is_enabled(atchan)) { 1064 1059 atc_advance_work(atchan); 1065 1060 } 1066 - spin_unlock_bh(&atchan->lock); 1061 + spin_unlock_irqrestore(&atchan->lock, flags); 1067 1062 } 1068 1063 1069 1064 /** ··· 1080 1073 struct at_dma *atdma = to_at_dma(chan->device); 1081 1074 struct at_desc *desc; 1082 1075 struct at_dma_slave *atslave; 1076 + unsigned long flags; 1083 1077 int i; 1084 1078 u32 cfg; 1085 1079 LIST_HEAD(tmp_list); ··· 1124 1116 list_add_tail(&desc->desc_node, &tmp_list); 1125 1117 } 1126 1118 1127 - spin_lock_bh(&atchan->lock); 1119 + spin_lock_irqsave(&atchan->lock, flags); 1128 1120 atchan->descs_allocated = i; 1129 1121 list_splice(&tmp_list, &atchan->free_list); 1130 1122 atchan->completed_cookie = chan->cookie = 1; 1131 - spin_unlock_bh(&atchan->lock); 1123 + spin_unlock_irqrestore(&atchan->lock, flags); 1132 1124 1133 1125 /* channel parameters */ 1134 1126 channel_writel(atchan, CFG, cfg); ··· 1268 1260 1269 1261 /* initialize channels related values */ 1270 1262 INIT_LIST_HEAD(&atdma->dma_common.channels); 1271 - for (i = 0; i < pdata->nr_channels; i++, atdma->dma_common.chancnt++) { 1263 + for (i = 0; i < pdata->nr_channels; i++) { 1272 1264 struct at_dma_chan *atchan = &atdma->chan[i]; 1273 1265 1274 1266 atchan->chan_common.device = &atdma->dma_common; 1275 1267 atchan->chan_common.cookie = atchan->completed_cookie = 1; 1276 - atchan->chan_common.chan_id = i; 1277 1268 list_add_tail(&atchan->chan_common.device_node, 1278 1269 &atdma->dma_common.channels); 1279 1270 ··· 1300 1293 if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask)) 1301 1294 atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy; 1302 1295 1303 - if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) 1296 + if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) { 1304 1297 atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg; 1305 - 1306 - if (dma_has_cap(DMA_CYCLIC, atdma->dma_common.cap_mask)) 1298 + /* controller can do slave DMA: can trigger cyclic transfers */ 1299 + dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask); 1307 1300 atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic; 1308 - 1309 - if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) || 1310 - dma_has_cap(DMA_CYCLIC, atdma->dma_common.cap_mask)) 1311 1301 atdma->dma_common.device_control = atc_control; 1302 + } 1312 1303 1313 1304 dma_writel(atdma, EN, AT_DMA_ENABLE); 1314 1305 1315 1306 dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n", 1316 1307 dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "", 1317 1308 dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "", 1318 - atdma->dma_common.chancnt); 1309 + pdata->nr_channels); 1319 1310 1320 1311 dma_async_device_register(&atdma->dma_common); 1321 1312 ··· 1382 1377 clk_disable(atdma->clk); 1383 1378 } 1384 1379 1380 + static int at_dma_prepare(struct device *dev) 1381 + { 1382 + struct platform_device *pdev = to_platform_device(dev); 1383 + struct at_dma *atdma = platform_get_drvdata(pdev); 1384 + struct dma_chan *chan, *_chan; 1385 + 1386 + list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, 1387 + device_node) { 1388 + struct at_dma_chan *atchan = to_at_dma_chan(chan); 1389 + /* wait for transaction completion (except in cyclic case) */ 1390 + if (atc_chan_is_enabled(atchan) && !atc_chan_is_cyclic(atchan)) 1391 + return -EAGAIN; 1392 + } 1393 + return 0; 1394 + } 1395 + 1396 + static void atc_suspend_cyclic(struct at_dma_chan *atchan) 1397 + { 1398 + struct dma_chan *chan = &atchan->chan_common; 1399 + 1400 + /* Channel should be paused by user 1401 + * do it anyway even if it is not done already */ 1402 + if (!atc_chan_is_paused(atchan)) { 1403 + dev_warn(chan2dev(chan), 1404 + "cyclic channel not paused, should be done by channel user\n"); 1405 + atc_control(chan, DMA_PAUSE, 0); 1406 + } 1407 + 1408 + /* now preserve additional data for cyclic operations */ 1409 + /* next descriptor address in the cyclic list */ 1410 + atchan->save_dscr = channel_readl(atchan, DSCR); 1411 + 1412 + vdbg_dump_regs(atchan); 1413 + } 1414 + 1385 1415 static int at_dma_suspend_noirq(struct device *dev) 1386 1416 { 1387 1417 struct platform_device *pdev = to_platform_device(dev); 1388 1418 struct at_dma *atdma = platform_get_drvdata(pdev); 1419 + struct dma_chan *chan, *_chan; 1389 1420 1390 - at_dma_off(platform_get_drvdata(pdev)); 1421 + /* preserve data */ 1422 + list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, 1423 + device_node) { 1424 + struct at_dma_chan *atchan = to_at_dma_chan(chan); 1425 + 1426 + if (atc_chan_is_cyclic(atchan)) 1427 + atc_suspend_cyclic(atchan); 1428 + atchan->save_cfg = channel_readl(atchan, CFG); 1429 + } 1430 + atdma->save_imr = dma_readl(atdma, EBCIMR); 1431 + 1432 + /* disable DMA controller */ 1433 + at_dma_off(atdma); 1391 1434 clk_disable(atdma->clk); 1392 1435 return 0; 1436 + } 1437 + 1438 + static void atc_resume_cyclic(struct at_dma_chan *atchan) 1439 + { 1440 + struct at_dma *atdma = to_at_dma(atchan->chan_common.device); 1441 + 1442 + /* restore channel status for cyclic descriptors list: 1443 + * next descriptor in the cyclic list at the time of suspend */ 1444 + channel_writel(atchan, SADDR, 0); 1445 + channel_writel(atchan, DADDR, 0); 1446 + channel_writel(atchan, CTRLA, 0); 1447 + channel_writel(atchan, CTRLB, 0); 1448 + channel_writel(atchan, DSCR, atchan->save_dscr); 1449 + dma_writel(atdma, CHER, atchan->mask); 1450 + 1451 + /* channel pause status should be removed by channel user 1452 + * We cannot take the initiative to do it here */ 1453 + 1454 + vdbg_dump_regs(atchan); 1393 1455 } 1394 1456 1395 1457 static int at_dma_resume_noirq(struct device *dev) 1396 1458 { 1397 1459 struct platform_device *pdev = to_platform_device(dev); 1398 1460 struct at_dma *atdma = platform_get_drvdata(pdev); 1461 + struct dma_chan *chan, *_chan; 1399 1462 1463 + /* bring back DMA controller */ 1400 1464 clk_enable(atdma->clk); 1401 1465 dma_writel(atdma, EN, AT_DMA_ENABLE); 1466 + 1467 + /* clear any pending interrupt */ 1468 + while (dma_readl(atdma, EBCISR)) 1469 + cpu_relax(); 1470 + 1471 + /* restore saved data */ 1472 + dma_writel(atdma, EBCIER, atdma->save_imr); 1473 + list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels, 1474 + device_node) { 1475 + struct at_dma_chan *atchan = to_at_dma_chan(chan); 1476 + 1477 + channel_writel(atchan, CFG, atchan->save_cfg); 1478 + if (atc_chan_is_cyclic(atchan)) 1479 + atc_resume_cyclic(atchan); 1480 + } 1402 1481 return 0; 1403 1482 } 1404 1483 1405 1484 static const struct dev_pm_ops at_dma_dev_pm_ops = { 1485 + .prepare = at_dma_prepare, 1406 1486 .suspend_noirq = at_dma_suspend_noirq, 1407 1487 .resume_noirq = at_dma_resume_noirq, 1408 1488 };
+24
drivers/dma/at_hdmac_regs.h
··· 204 204 * @status: transmit status information from irq/prep* functions 205 205 * to tasklet (use atomic operations) 206 206 * @tasklet: bottom half to finish transaction work 207 + * @save_cfg: configuration register that is saved on suspend/resume cycle 208 + * @save_dscr: for cyclic operations, preserve next descriptor address in 209 + * the cyclic list on suspend/resume cycle 207 210 * @lock: serializes enqueue/dequeue operations to descriptors lists 208 211 * @completed_cookie: identifier for the most recently completed operation 209 212 * @active_list: list of descriptors dmaengine is being running on ··· 221 218 u8 mask; 222 219 unsigned long status; 223 220 struct tasklet_struct tasklet; 221 + u32 save_cfg; 222 + u32 save_dscr; 224 223 225 224 spinlock_t lock; 226 225 ··· 253 248 * @chan_common: common dmaengine dma_device object members 254 249 * @ch_regs: memory mapped register base 255 250 * @clk: dma controller clock 251 + * @save_imr: interrupt mask register that is saved on suspend/resume cycle 256 252 * @all_chan_mask: all channels availlable in a mask 257 253 * @dma_desc_pool: base of DMA descriptor region (DMA address) 258 254 * @chan: channels table to store at_dma_chan structures ··· 262 256 struct dma_device dma_common; 263 257 void __iomem *regs; 264 258 struct clk *clk; 259 + u32 save_imr; 265 260 266 261 u8 all_chan_mask; 267 262 ··· 362 355 return !!(dma_readl(atdma, CHSR) & atchan->mask); 363 356 } 364 357 358 + /** 359 + * atc_chan_is_paused - test channel pause/resume status 360 + * @atchan: channel we want to test status 361 + */ 362 + static inline int atc_chan_is_paused(struct at_dma_chan *atchan) 363 + { 364 + return test_bit(ATC_IS_PAUSED, &atchan->status); 365 + } 366 + 367 + /** 368 + * atc_chan_is_cyclic - test if given channel has cyclic property set 369 + * @atchan: channel we want to test status 370 + */ 371 + static inline int atc_chan_is_cyclic(struct at_dma_chan *atchan) 372 + { 373 + return test_bit(ATC_IS_CYCLIC, &atchan->status); 374 + } 365 375 366 376 /** 367 377 * set_desc_eol - set end-of-link to descriptor so it will end transfer
+21 -2
drivers/dma/dmatest.c
··· 10 10 #include <linux/delay.h> 11 11 #include <linux/dma-mapping.h> 12 12 #include <linux/dmaengine.h> 13 + #include <linux/freezer.h> 13 14 #include <linux/init.h> 14 15 #include <linux/kthread.h> 15 16 #include <linux/module.h> ··· 252 251 int i; 253 252 254 253 thread_name = current->comm; 254 + set_freezable_with_signal(); 255 255 256 256 ret = -ENOMEM; 257 257 ··· 307 305 dma_addr_t dma_srcs[src_cnt]; 308 306 dma_addr_t dma_dsts[dst_cnt]; 309 307 struct completion cmp; 310 - unsigned long tmo = msecs_to_jiffies(timeout); 308 + unsigned long start, tmo, end = 0 /* compiler... */; 309 + bool reload = true; 311 310 u8 align = 0; 312 311 313 312 total_tests++; ··· 407 404 } 408 405 dma_async_issue_pending(chan); 409 406 410 - tmo = wait_for_completion_timeout(&cmp, tmo); 407 + do { 408 + start = jiffies; 409 + if (reload) 410 + end = start + msecs_to_jiffies(timeout); 411 + else if (end <= start) 412 + end = start + 1; 413 + tmo = wait_for_completion_interruptible_timeout(&cmp, 414 + end - start); 415 + reload = try_to_freeze(); 416 + } while (tmo == -ERESTARTSYS); 417 + 411 418 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); 412 419 413 420 if (tmo == 0) { ··· 490 477 pr_notice("%s: terminating after %u tests, %u failures (status %d)\n", 491 478 thread_name, total_tests, failed_tests, ret); 492 479 480 + /* terminate all transfers on specified channels */ 481 + chan->device->device_control(chan, DMA_TERMINATE_ALL, 0); 493 482 if (iterations > 0) 494 483 while (!kthread_should_stop()) { 495 484 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wait_dmatest_exit); ··· 514 499 list_del(&thread->node); 515 500 kfree(thread); 516 501 } 502 + 503 + /* terminate all transfers on specified channels */ 504 + dtc->chan->device->device_control(dtc->chan, DMA_TERMINATE_ALL, 0); 505 + 517 506 kfree(dtc); 518 507 } 519 508
+2 -3
drivers/dma/dw_dmac.c
··· 1407 1407 dw->all_chan_mask = (1 << pdata->nr_channels) - 1; 1408 1408 1409 1409 INIT_LIST_HEAD(&dw->dma.channels); 1410 - for (i = 0; i < pdata->nr_channels; i++, dw->dma.chancnt++) { 1410 + for (i = 0; i < pdata->nr_channels; i++) { 1411 1411 struct dw_dma_chan *dwc = &dw->chan[i]; 1412 1412 1413 1413 dwc->chan.device = &dw->dma; 1414 1414 dwc->chan.cookie = dwc->completed = 1; 1415 - dwc->chan.chan_id = i; 1416 1415 if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING) 1417 1416 list_add_tail(&dwc->chan.device_node, 1418 1417 &dw->dma.channels); ··· 1467 1468 dma_writel(dw, CFG, DW_CFG_DMA_EN); 1468 1469 1469 1470 printk(KERN_INFO "%s: DesignWare DMA Controller, %d channels\n", 1470 - dev_name(&pdev->dev), dw->dma.chancnt); 1471 + dev_name(&pdev->dev), pdata->nr_channels); 1471 1472 1472 1473 dma_async_device_register(&dw->dma); 1473 1474
+1
drivers/dma/ep93xx_dma.c
··· 22 22 #include <linux/init.h> 23 23 #include <linux/interrupt.h> 24 24 #include <linux/dmaengine.h> 25 + #include <linux/module.h> 25 26 #include <linux/platform_device.h> 26 27 #include <linux/slab.h> 27 28
+1
drivers/dma/imx-dma.c
··· 14 14 * http://www.gnu.org/copyleft/gpl.html 15 15 */ 16 16 #include <linux/init.h> 17 + #include <linux/module.h> 17 18 #include <linux/types.h> 18 19 #include <linux/mm.h> 19 20 #include <linux/interrupt.h>
+39 -9
drivers/dma/imx-sdma.c
··· 18 18 */ 19 19 20 20 #include <linux/init.h> 21 + #include <linux/module.h> 21 22 #include <linux/types.h> 22 23 #include <linux/mm.h> 23 24 #include <linux/interrupt.h> ··· 319 318 dma_addr_t context_phys; 320 319 struct dma_device dma_device; 321 320 struct clk *clk; 321 + struct mutex channel_0_lock; 322 322 struct sdma_script_start_addrs *script_addrs; 323 323 }; 324 324 ··· 417 415 dma_addr_t buf_phys; 418 416 int ret; 419 417 418 + mutex_lock(&sdma->channel_0_lock); 419 + 420 420 buf_virt = dma_alloc_coherent(NULL, 421 421 size, 422 422 &buf_phys, GFP_KERNEL); 423 - if (!buf_virt) 424 - return -ENOMEM; 423 + if (!buf_virt) { 424 + ret = -ENOMEM; 425 + goto err_out; 426 + } 425 427 426 428 bd0->mode.command = C0_SETPM; 427 429 bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD; ··· 438 432 ret = sdma_run_channel(&sdma->channel[0]); 439 433 440 434 dma_free_coherent(NULL, size, buf_virt, buf_phys); 435 + 436 + err_out: 437 + mutex_unlock(&sdma->channel_0_lock); 441 438 442 439 return ret; 443 440 } ··· 665 656 dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", sdmac->event_mask0); 666 657 dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", sdmac->event_mask1); 667 658 659 + mutex_lock(&sdma->channel_0_lock); 660 + 668 661 memset(context, 0, sizeof(*context)); 669 662 context->channel_state.pc = load_address; 670 663 ··· 686 675 bd0->ext_buffer_addr = 2048 + (sizeof(*context) / 4) * channel; 687 676 688 677 ret = sdma_run_channel(&sdma->channel[0]); 678 + 679 + mutex_unlock(&sdma->channel_0_lock); 689 680 690 681 return ret; 691 682 } ··· 1144 1131 saddr_arr[i] = addr_arr[i]; 1145 1132 } 1146 1133 1147 - static int __init sdma_get_firmware(struct sdma_engine *sdma, 1148 - const char *fw_name) 1134 + static void sdma_load_firmware(const struct firmware *fw, void *context) 1149 1135 { 1150 - const struct firmware *fw; 1136 + struct sdma_engine *sdma = context; 1151 1137 const struct sdma_firmware_header *header; 1152 - int ret; 1153 1138 const struct sdma_script_start_addrs *addr; 1154 1139 unsigned short *ram_code; 1155 1140 1156 - ret = request_firmware(&fw, fw_name, sdma->dev); 1157 - if (ret) 1158 - return ret; 1141 + if (!fw) { 1142 + dev_err(sdma->dev, "firmware not found\n"); 1143 + return; 1144 + } 1159 1145 1160 1146 if (fw->size < sizeof(*header)) 1161 1147 goto err_firmware; ··· 1184 1172 1185 1173 err_firmware: 1186 1174 release_firmware(fw); 1175 + } 1176 + 1177 + static int __init sdma_get_firmware(struct sdma_engine *sdma, 1178 + const char *fw_name) 1179 + { 1180 + int ret; 1181 + 1182 + ret = request_firmware_nowait(THIS_MODULE, 1183 + FW_ACTION_HOTPLUG, fw_name, sdma->dev, 1184 + GFP_KERNEL, sdma, sdma_load_firmware); 1187 1185 1188 1186 return ret; 1189 1187 } ··· 1291 1269 struct sdma_platform_data *pdata = pdev->dev.platform_data; 1292 1270 int i; 1293 1271 struct sdma_engine *sdma; 1272 + s32 *saddr_arr; 1294 1273 1295 1274 sdma = kzalloc(sizeof(*sdma), GFP_KERNEL); 1296 1275 if (!sdma) 1297 1276 return -ENOMEM; 1277 + 1278 + mutex_init(&sdma->channel_0_lock); 1298 1279 1299 1280 sdma->dev = &pdev->dev; 1300 1281 ··· 1334 1309 ret = -ENOMEM; 1335 1310 goto err_alloc; 1336 1311 } 1312 + 1313 + /* initially no scripts available */ 1314 + saddr_arr = (s32 *)sdma->script_addrs; 1315 + for (i = 0; i < SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1; i++) 1316 + saddr_arr[i] = -EINVAL; 1337 1317 1338 1318 if (of_id) 1339 1319 pdev->id_entry = of_id->data;
+3 -6
drivers/dma/intel_mid_dma.c
··· 115 115 116 116 /** 117 117 * dmac1_mask_periphral_intr - mask the periphral interrupt 118 - * @midc: dma channel for which masking is required 118 + * @mid: dma device for which masking is required 119 119 * 120 120 * Masks the DMA periphral interrupt 121 121 * this is valid for DMAC1 family controllers only 122 122 * This controller should have periphral mask registers already mapped 123 123 */ 124 - static void dmac1_mask_periphral_intr(struct intel_mid_dma_chan *midc) 124 + static void dmac1_mask_periphral_intr(struct middma_device *mid) 125 125 { 126 126 u32 pimr; 127 - struct middma_device *mid = to_middma_device(midc->chan.device); 128 127 129 128 if (mid->pimr_mask) { 130 129 pimr = readl(mid->mask_reg + LNW_PERIPHRAL_MASK); ··· 183 184 static void disable_dma_interrupt(struct intel_mid_dma_chan *midc) 184 185 { 185 186 /*Check LPE PISR, make sure fwd is disabled*/ 186 - dmac1_mask_periphral_intr(midc); 187 187 iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_BLOCK); 188 188 iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_TFR); 189 189 iowrite32(MASK_INTR_REG(midc->ch_id), midc->dma_base + MASK_ERR); ··· 1112 1114 1113 1115 midch->chan.device = &dma->common; 1114 1116 midch->chan.cookie = 1; 1115 - midch->chan.chan_id = i; 1116 1117 midch->ch_id = dma->chan_base + i; 1117 1118 pr_debug("MDMA:Init CH %d, ID %d\n", i, midch->ch_id); 1118 1119 ··· 1147 1150 dma_cap_set(DMA_SLAVE, dma->common.cap_mask); 1148 1151 dma_cap_set(DMA_PRIVATE, dma->common.cap_mask); 1149 1152 dma->common.dev = &pdev->dev; 1150 - dma->common.chancnt = dma->max_chan; 1151 1153 1152 1154 dma->common.device_alloc_chan_resources = 1153 1155 intel_mid_dma_alloc_chan_resources; ··· 1346 1350 if (device->ch[i].in_use) 1347 1351 return -EAGAIN; 1348 1352 } 1353 + dmac1_mask_periphral_intr(device); 1349 1354 device->state = SUSPENDED; 1350 1355 pci_save_state(pci); 1351 1356 pci_disable_device(pci);
-1
drivers/dma/mpc512x_dma.c
··· 741 741 mchan = &mdma->channels[i]; 742 742 743 743 mchan->chan.device = dma; 744 - mchan->chan.chan_id = i; 745 744 mchan->chan.cookie = 1; 746 745 mchan->completed_cookie = mchan->chan.cookie; 747 746
+24 -21
drivers/dma/mxs-dma.c
··· 130 130 struct mxs_dma_chan mxs_chans[MXS_DMA_CHANNELS]; 131 131 }; 132 132 133 + static inline void mxs_dma_clkgate(struct mxs_dma_chan *mxs_chan, int enable) 134 + { 135 + struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 136 + int chan_id = mxs_chan->chan.chan_id; 137 + int set_clr = enable ? MXS_CLR_ADDR : MXS_SET_ADDR; 138 + 139 + /* enable apbh channel clock */ 140 + if (dma_is_apbh()) { 141 + if (apbh_is_old()) 142 + writel(1 << (chan_id + BP_APBH_CTRL0_CLKGATE_CHANNEL), 143 + mxs_dma->base + HW_APBHX_CTRL0 + set_clr); 144 + else 145 + writel(1 << chan_id, 146 + mxs_dma->base + HW_APBHX_CTRL0 + set_clr); 147 + } 148 + } 149 + 133 150 static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan) 134 151 { 135 152 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; ··· 165 148 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 166 149 int chan_id = mxs_chan->chan.chan_id; 167 150 151 + /* clkgate needs to be enabled before writing other registers */ 152 + mxs_dma_clkgate(mxs_chan, 1); 153 + 168 154 /* set cmd_addr up */ 169 155 writel(mxs_chan->ccw_phys, 170 156 mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(chan_id)); 171 - 172 - /* enable apbh channel clock */ 173 - if (dma_is_apbh()) { 174 - if (apbh_is_old()) 175 - writel(1 << (chan_id + BP_APBH_CTRL0_CLKGATE_CHANNEL), 176 - mxs_dma->base + HW_APBHX_CTRL0 + MXS_CLR_ADDR); 177 - else 178 - writel(1 << chan_id, 179 - mxs_dma->base + HW_APBHX_CTRL0 + MXS_CLR_ADDR); 180 - } 181 157 182 158 /* write 1 to SEMA to kick off the channel */ 183 159 writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(chan_id)); ··· 178 168 179 169 static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan) 180 170 { 181 - struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 182 - int chan_id = mxs_chan->chan.chan_id; 183 - 184 171 /* disable apbh channel clock */ 185 - if (dma_is_apbh()) { 186 - if (apbh_is_old()) 187 - writel(1 << (chan_id + BP_APBH_CTRL0_CLKGATE_CHANNEL), 188 - mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR); 189 - else 190 - writel(1 << chan_id, 191 - mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR); 192 - } 172 + mxs_dma_clkgate(mxs_chan, 0); 193 173 194 174 mxs_chan->status = DMA_SUCCESS; 195 175 } ··· 338 338 if (ret) 339 339 goto err_clk; 340 340 341 + /* clkgate needs to be enabled for reset to finish */ 342 + mxs_dma_clkgate(mxs_chan, 1); 341 343 mxs_dma_reset_chan(mxs_chan); 344 + mxs_dma_clkgate(mxs_chan, 0); 342 345 343 346 dma_async_tx_descriptor_init(&mxs_chan->desc, chan); 344 347 mxs_chan->desc.tx_submit = mxs_dma_tx_submit;
+2 -5
drivers/dma/pch_dma.c
··· 60 60 #define DMA_DESC_FOLLOW_WITHOUT_IRQ 0x2 61 61 #define DMA_DESC_FOLLOW_WITH_IRQ 0x3 62 62 63 - #define MAX_CHAN_NR 8 63 + #define MAX_CHAN_NR 12 64 64 65 65 #define DMA_MASK_CTL0_MODE 0x33333333 66 66 #define DMA_MASK_CTL2_MODE 0x00003333 ··· 872 872 int i; 873 873 874 874 nr_channels = id->driver_data; 875 - pd = kzalloc(sizeof(struct pch_dma)+ 876 - sizeof(struct pch_dma_chan) * nr_channels, GFP_KERNEL); 875 + pd = kzalloc(sizeof(*pd), GFP_KERNEL); 877 876 if (!pd) 878 877 return -ENOMEM; 879 878 ··· 925 926 } 926 927 927 928 pd->dma.dev = &pdev->dev; 928 - pd->dma.chancnt = nr_channels; 929 929 930 930 INIT_LIST_HEAD(&pd->dma.channels); 931 931 ··· 933 935 934 936 pd_chan->chan.device = &pd->dma; 935 937 pd_chan->chan.cookie = 1; 936 - pd_chan->chan.chan_id = i; 937 938 938 939 pd_chan->membase = &regs->desc[i]; 939 940
+209 -26
drivers/dma/pl330.c
··· 17 17 #include <linux/interrupt.h> 18 18 #include <linux/amba/bus.h> 19 19 #include <linux/amba/pl330.h> 20 + #include <linux/pm_runtime.h> 21 + #include <linux/scatterlist.h> 20 22 21 23 #define NR_DEFAULT_DESC 16 22 24 ··· 70 68 * NULL if the channel is available to be acquired. 71 69 */ 72 70 void *pl330_chid; 71 + 72 + /* For D-to-M and M-to-D channels */ 73 + int burst_sz; /* the peripheral fifo width */ 74 + int burst_len; /* the number of burst */ 75 + dma_addr_t fifo_addr; 76 + 77 + /* for cyclic capability */ 78 + bool cyclic; 73 79 }; 74 80 75 81 struct dma_pl330_dmac { ··· 93 83 94 84 /* Peripheral channels connected to this DMAC */ 95 85 struct dma_pl330_chan *peripherals; /* keep at end */ 86 + 87 + struct clk *clk; 96 88 }; 97 89 98 90 struct dma_pl330_desc { ··· 164 152 spin_unlock_irqrestore(&pdmac->pool_lock, flags); 165 153 } 166 154 155 + static inline void handle_cyclic_desc_list(struct list_head *list) 156 + { 157 + struct dma_pl330_desc *desc; 158 + struct dma_pl330_chan *pch; 159 + unsigned long flags; 160 + 161 + if (list_empty(list)) 162 + return; 163 + 164 + list_for_each_entry(desc, list, node) { 165 + dma_async_tx_callback callback; 166 + 167 + /* Change status to reload it */ 168 + desc->status = PREP; 169 + pch = desc->pchan; 170 + callback = desc->txd.callback; 171 + if (callback) 172 + callback(desc->txd.callback_param); 173 + } 174 + 175 + spin_lock_irqsave(&pch->lock, flags); 176 + list_splice_tail_init(list, &pch->work_list); 177 + spin_unlock_irqrestore(&pch->lock, flags); 178 + } 179 + 167 180 static inline void fill_queue(struct dma_pl330_chan *pch) 168 181 { 169 182 struct dma_pl330_desc *desc; ··· 242 205 243 206 spin_unlock_irqrestore(&pch->lock, flags); 244 207 245 - free_desc_list(&list); 208 + if (pch->cyclic) 209 + handle_cyclic_desc_list(&list); 210 + else 211 + free_desc_list(&list); 246 212 } 247 213 248 214 static void dma_pl330_rqcb(void *token, enum pl330_op_err err) ··· 276 236 spin_lock_irqsave(&pch->lock, flags); 277 237 278 238 pch->completed = chan->cookie = 1; 239 + pch->cyclic = false; 279 240 280 241 pch->pl330_chid = pl330_request_channel(&pdmac->pif); 281 242 if (!pch->pl330_chid) { ··· 294 253 static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg) 295 254 { 296 255 struct dma_pl330_chan *pch = to_pchan(chan); 297 - struct dma_pl330_desc *desc; 256 + struct dma_pl330_desc *desc, *_dt; 298 257 unsigned long flags; 258 + struct dma_pl330_dmac *pdmac = pch->dmac; 259 + struct dma_slave_config *slave_config; 260 + LIST_HEAD(list); 299 261 300 - /* Only supports DMA_TERMINATE_ALL */ 301 - if (cmd != DMA_TERMINATE_ALL) 262 + switch (cmd) { 263 + case DMA_TERMINATE_ALL: 264 + spin_lock_irqsave(&pch->lock, flags); 265 + 266 + /* FLUSH the PL330 Channel thread */ 267 + pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH); 268 + 269 + /* Mark all desc done */ 270 + list_for_each_entry_safe(desc, _dt, &pch->work_list , node) { 271 + desc->status = DONE; 272 + pch->completed = desc->txd.cookie; 273 + list_move_tail(&desc->node, &list); 274 + } 275 + 276 + list_splice_tail_init(&list, &pdmac->desc_pool); 277 + spin_unlock_irqrestore(&pch->lock, flags); 278 + break; 279 + case DMA_SLAVE_CONFIG: 280 + slave_config = (struct dma_slave_config *)arg; 281 + 282 + if (slave_config->direction == DMA_TO_DEVICE) { 283 + if (slave_config->dst_addr) 284 + pch->fifo_addr = slave_config->dst_addr; 285 + if (slave_config->dst_addr_width) 286 + pch->burst_sz = __ffs(slave_config->dst_addr_width); 287 + if (slave_config->dst_maxburst) 288 + pch->burst_len = slave_config->dst_maxburst; 289 + } else if (slave_config->direction == DMA_FROM_DEVICE) { 290 + if (slave_config->src_addr) 291 + pch->fifo_addr = slave_config->src_addr; 292 + if (slave_config->src_addr_width) 293 + pch->burst_sz = __ffs(slave_config->src_addr_width); 294 + if (slave_config->src_maxburst) 295 + pch->burst_len = slave_config->src_maxburst; 296 + } 297 + break; 298 + default: 299 + dev_err(pch->dmac->pif.dev, "Not supported command.\n"); 302 300 return -ENXIO; 303 - 304 - spin_lock_irqsave(&pch->lock, flags); 305 - 306 - /* FLUSH the PL330 Channel thread */ 307 - pl330_chan_ctrl(pch->pl330_chid, PL330_OP_FLUSH); 308 - 309 - /* Mark all desc done */ 310 - list_for_each_entry(desc, &pch->work_list, node) 311 - desc->status = DONE; 312 - 313 - spin_unlock_irqrestore(&pch->lock, flags); 314 - 315 - pl330_tasklet((unsigned long) pch); 301 + } 316 302 317 303 return 0; 318 304 } ··· 355 287 356 288 pl330_release_channel(pch->pl330_chid); 357 289 pch->pl330_chid = NULL; 290 + 291 + if (pch->cyclic) 292 + list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool); 358 293 359 294 spin_unlock_irqrestore(&pch->lock, flags); 360 295 } ··· 524 453 525 454 if (peri) { 526 455 desc->req.rqtype = peri->rqtype; 527 - desc->req.peri = peri->peri_id; 456 + desc->req.peri = pch->chan.chan_id; 528 457 } else { 529 458 desc->req.rqtype = MEMTOMEM; 530 459 desc->req.peri = 0; ··· 595 524 return burst_len; 596 525 } 597 526 527 + static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic( 528 + struct dma_chan *chan, dma_addr_t dma_addr, size_t len, 529 + size_t period_len, enum dma_data_direction direction) 530 + { 531 + struct dma_pl330_desc *desc; 532 + struct dma_pl330_chan *pch = to_pchan(chan); 533 + dma_addr_t dst; 534 + dma_addr_t src; 535 + 536 + desc = pl330_get_desc(pch); 537 + if (!desc) { 538 + dev_err(pch->dmac->pif.dev, "%s:%d Unable to fetch desc\n", 539 + __func__, __LINE__); 540 + return NULL; 541 + } 542 + 543 + switch (direction) { 544 + case DMA_TO_DEVICE: 545 + desc->rqcfg.src_inc = 1; 546 + desc->rqcfg.dst_inc = 0; 547 + src = dma_addr; 548 + dst = pch->fifo_addr; 549 + break; 550 + case DMA_FROM_DEVICE: 551 + desc->rqcfg.src_inc = 0; 552 + desc->rqcfg.dst_inc = 1; 553 + src = pch->fifo_addr; 554 + dst = dma_addr; 555 + break; 556 + default: 557 + dev_err(pch->dmac->pif.dev, "%s:%d Invalid dma direction\n", 558 + __func__, __LINE__); 559 + return NULL; 560 + } 561 + 562 + desc->rqcfg.brst_size = pch->burst_sz; 563 + desc->rqcfg.brst_len = 1; 564 + 565 + pch->cyclic = true; 566 + 567 + fill_px(&desc->px, dst, src, period_len); 568 + 569 + return &desc->txd; 570 + } 571 + 598 572 static struct dma_async_tx_descriptor * 599 573 pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst, 600 574 dma_addr_t src, size_t len, unsigned long flags) ··· 695 579 struct dma_pl330_peri *peri = chan->private; 696 580 struct scatterlist *sg; 697 581 unsigned long flags; 698 - int i, burst_size; 582 + int i; 699 583 dma_addr_t addr; 700 584 701 585 if (unlikely(!pch || !sgl || !sg_len || !peri)) ··· 711 595 return NULL; 712 596 } 713 597 714 - addr = peri->fifo_addr; 715 - burst_size = peri->burst_sz; 598 + addr = pch->fifo_addr; 716 599 717 600 first = NULL; 718 601 ··· 759 644 sg_dma_address(sg), addr, sg_dma_len(sg)); 760 645 } 761 646 762 - desc->rqcfg.brst_size = burst_size; 647 + desc->rqcfg.brst_size = pch->burst_sz; 763 648 desc->rqcfg.brst_len = 1; 764 649 } 765 650 ··· 811 696 goto probe_err1; 812 697 } 813 698 699 + pdmac->clk = clk_get(&adev->dev, "dma"); 700 + if (IS_ERR(pdmac->clk)) { 701 + dev_err(&adev->dev, "Cannot get operation clock.\n"); 702 + ret = -EINVAL; 703 + goto probe_err1; 704 + } 705 + 706 + amba_set_drvdata(adev, pdmac); 707 + 708 + #ifdef CONFIG_PM_RUNTIME 709 + /* to use the runtime PM helper functions */ 710 + pm_runtime_enable(&adev->dev); 711 + 712 + /* enable the power domain */ 713 + if (pm_runtime_get_sync(&adev->dev)) { 714 + dev_err(&adev->dev, "failed to get runtime pm\n"); 715 + ret = -ENODEV; 716 + goto probe_err1; 717 + } 718 + #else 719 + /* enable dma clk */ 720 + clk_enable(pdmac->clk); 721 + #endif 722 + 814 723 irq = adev->irq[0]; 815 724 ret = request_irq(irq, pl330_irq_handler, 0, 816 725 dev_name(&adev->dev), pi); ··· 871 732 case MEMTODEV: 872 733 case DEVTOMEM: 873 734 dma_cap_set(DMA_SLAVE, pd->cap_mask); 735 + dma_cap_set(DMA_CYCLIC, pd->cap_mask); 874 736 break; 875 737 default: 876 738 dev_err(&adev->dev, "DEVTODEV Not Supported\n"); ··· 887 747 spin_lock_init(&pch->lock); 888 748 pch->pl330_chid = NULL; 889 749 pch->chan.device = pd; 890 - pch->chan.chan_id = i; 891 750 pch->dmac = pdmac; 892 751 893 752 /* Add the channel to the DMAC list */ 894 - pd->chancnt++; 895 753 list_add_tail(&pch->chan.device_node, &pd->channels); 896 754 } 897 755 ··· 898 760 pd->device_alloc_chan_resources = pl330_alloc_chan_resources; 899 761 pd->device_free_chan_resources = pl330_free_chan_resources; 900 762 pd->device_prep_dma_memcpy = pl330_prep_dma_memcpy; 763 + pd->device_prep_dma_cyclic = pl330_prep_dma_cyclic; 901 764 pd->device_tx_status = pl330_tx_status; 902 765 pd->device_prep_slave_sg = pl330_prep_slave_sg; 903 766 pd->device_control = pl330_control; ··· 909 770 dev_err(&adev->dev, "unable to register DMAC\n"); 910 771 goto probe_err4; 911 772 } 912 - 913 - amba_set_drvdata(adev, pdmac); 914 773 915 774 dev_info(&adev->dev, 916 775 "Loaded driver for PL330 DMAC-%d\n", adev->periphid); ··· 970 833 res = &adev->res; 971 834 release_mem_region(res->start, resource_size(res)); 972 835 836 + #ifdef CONFIG_PM_RUNTIME 837 + pm_runtime_put(&adev->dev); 838 + pm_runtime_disable(&adev->dev); 839 + #else 840 + clk_disable(pdmac->clk); 841 + #endif 842 + 973 843 kfree(pdmac); 974 844 975 845 return 0; ··· 990 846 { 0, 0 }, 991 847 }; 992 848 849 + #ifdef CONFIG_PM_RUNTIME 850 + static int pl330_runtime_suspend(struct device *dev) 851 + { 852 + struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev); 853 + 854 + if (!pdmac) { 855 + dev_err(dev, "failed to get dmac\n"); 856 + return -ENODEV; 857 + } 858 + 859 + clk_disable(pdmac->clk); 860 + 861 + return 0; 862 + } 863 + 864 + static int pl330_runtime_resume(struct device *dev) 865 + { 866 + struct dma_pl330_dmac *pdmac = dev_get_drvdata(dev); 867 + 868 + if (!pdmac) { 869 + dev_err(dev, "failed to get dmac\n"); 870 + return -ENODEV; 871 + } 872 + 873 + clk_enable(pdmac->clk); 874 + 875 + return 0; 876 + } 877 + #else 878 + #define pl330_runtime_suspend NULL 879 + #define pl330_runtime_resume NULL 880 + #endif /* CONFIG_PM_RUNTIME */ 881 + 882 + static const struct dev_pm_ops pl330_pm_ops = { 883 + .runtime_suspend = pl330_runtime_suspend, 884 + .runtime_resume = pl330_runtime_resume, 885 + }; 886 + 993 887 static struct amba_driver pl330_driver = { 994 888 .drv = { 995 889 .owner = THIS_MODULE, 996 890 .name = "dma-pl330", 891 + .pm = &pl330_pm_ops, 997 892 }, 998 893 .id_table = pl330_ids, 999 894 .probe = pl330_probe,
+86 -43
drivers/dma/shdma.c
··· 259 259 return 0; 260 260 } 261 261 262 + static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan); 263 + 262 264 static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx) 263 265 { 264 266 struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c; 265 267 struct sh_dmae_chan *sh_chan = to_sh_chan(tx->chan); 268 + struct sh_dmae_slave *param = tx->chan->private; 266 269 dma_async_tx_callback callback = tx->callback; 267 270 dma_cookie_t cookie; 271 + bool power_up; 268 272 269 - spin_lock_bh(&sh_chan->desc_lock); 273 + spin_lock_irq(&sh_chan->desc_lock); 274 + 275 + if (list_empty(&sh_chan->ld_queue)) 276 + power_up = true; 277 + else 278 + power_up = false; 270 279 271 280 cookie = sh_chan->common.cookie; 272 281 cookie++; ··· 311 302 tx->cookie, &last->async_tx, sh_chan->id, 312 303 desc->hw.sar, desc->hw.tcr, desc->hw.dar); 313 304 314 - spin_unlock_bh(&sh_chan->desc_lock); 305 + if (power_up) { 306 + sh_chan->pm_state = DMAE_PM_BUSY; 307 + 308 + pm_runtime_get(sh_chan->dev); 309 + 310 + spin_unlock_irq(&sh_chan->desc_lock); 311 + 312 + pm_runtime_barrier(sh_chan->dev); 313 + 314 + spin_lock_irq(&sh_chan->desc_lock); 315 + 316 + /* Have we been reset, while waiting? */ 317 + if (sh_chan->pm_state != DMAE_PM_ESTABLISHED) { 318 + dev_dbg(sh_chan->dev, "Bring up channel %d\n", 319 + sh_chan->id); 320 + if (param) { 321 + const struct sh_dmae_slave_config *cfg = 322 + param->config; 323 + 324 + dmae_set_dmars(sh_chan, cfg->mid_rid); 325 + dmae_set_chcr(sh_chan, cfg->chcr); 326 + } else { 327 + dmae_init(sh_chan); 328 + } 329 + 330 + if (sh_chan->pm_state == DMAE_PM_PENDING) 331 + sh_chan_xfer_ld_queue(sh_chan); 332 + sh_chan->pm_state = DMAE_PM_ESTABLISHED; 333 + } 334 + } 335 + 336 + spin_unlock_irq(&sh_chan->desc_lock); 315 337 316 338 return cookie; 317 339 } ··· 386 346 struct sh_dmae_slave *param = chan->private; 387 347 int ret; 388 348 389 - pm_runtime_get_sync(sh_chan->dev); 390 - 391 349 /* 392 350 * This relies on the guarantee from dmaengine that alloc_chan_resources 393 351 * never runs concurrently with itself or free_chan_resources. ··· 405 367 } 406 368 407 369 param->config = cfg; 408 - 409 - dmae_set_dmars(sh_chan, cfg->mid_rid); 410 - dmae_set_chcr(sh_chan, cfg->chcr); 411 - } else { 412 - dmae_init(sh_chan); 413 370 } 414 371 415 - spin_lock_bh(&sh_chan->desc_lock); 416 372 while (sh_chan->descs_allocated < NR_DESCS_PER_CHANNEL) { 417 - spin_unlock_bh(&sh_chan->desc_lock); 418 373 desc = kzalloc(sizeof(struct sh_desc), GFP_KERNEL); 419 - if (!desc) { 420 - spin_lock_bh(&sh_chan->desc_lock); 374 + if (!desc) 421 375 break; 422 - } 423 376 dma_async_tx_descriptor_init(&desc->async_tx, 424 377 &sh_chan->common); 425 378 desc->async_tx.tx_submit = sh_dmae_tx_submit; 426 379 desc->mark = DESC_IDLE; 427 380 428 - spin_lock_bh(&sh_chan->desc_lock); 429 381 list_add(&desc->node, &sh_chan->ld_free); 430 382 sh_chan->descs_allocated++; 431 383 } 432 - spin_unlock_bh(&sh_chan->desc_lock); 433 384 434 385 if (!sh_chan->descs_allocated) { 435 386 ret = -ENOMEM; ··· 432 405 clear_bit(param->slave_id, sh_dmae_slave_used); 433 406 etestused: 434 407 efindslave: 435 - pm_runtime_put(sh_chan->dev); 408 + chan->private = NULL; 436 409 return ret; 437 410 } 438 411 ··· 444 417 struct sh_dmae_chan *sh_chan = to_sh_chan(chan); 445 418 struct sh_desc *desc, *_desc; 446 419 LIST_HEAD(list); 447 - int descs = sh_chan->descs_allocated; 448 420 449 421 /* Protect against ISR */ 450 422 spin_lock_irq(&sh_chan->desc_lock); ··· 463 437 chan->private = NULL; 464 438 } 465 439 466 - spin_lock_bh(&sh_chan->desc_lock); 440 + spin_lock_irq(&sh_chan->desc_lock); 467 441 468 442 list_splice_init(&sh_chan->ld_free, &list); 469 443 sh_chan->descs_allocated = 0; 470 444 471 - spin_unlock_bh(&sh_chan->desc_lock); 472 - 473 - if (descs > 0) 474 - pm_runtime_put(sh_chan->dev); 445 + spin_unlock_irq(&sh_chan->desc_lock); 475 446 476 447 list_for_each_entry_safe(desc, _desc, &list, node) 477 448 kfree(desc); ··· 557 534 struct sh_desc *first = NULL, *new = NULL /* compiler... */; 558 535 LIST_HEAD(tx_list); 559 536 int chunks = 0; 537 + unsigned long irq_flags; 560 538 int i; 561 539 562 540 if (!sg_len) ··· 568 544 (SH_DMA_TCR_MAX + 1); 569 545 570 546 /* Have to lock the whole loop to protect against concurrent release */ 571 - spin_lock_bh(&sh_chan->desc_lock); 547 + spin_lock_irqsave(&sh_chan->desc_lock, irq_flags); 572 548 573 549 /* 574 550 * Chaining: ··· 614 590 /* Put them back on the free list, so, they don't get lost */ 615 591 list_splice_tail(&tx_list, &sh_chan->ld_free); 616 592 617 - spin_unlock_bh(&sh_chan->desc_lock); 593 + spin_unlock_irqrestore(&sh_chan->desc_lock, irq_flags); 618 594 619 595 return &first->async_tx; 620 596 ··· 623 599 new->mark = DESC_IDLE; 624 600 list_splice(&tx_list, &sh_chan->ld_free); 625 601 626 - spin_unlock_bh(&sh_chan->desc_lock); 602 + spin_unlock_irqrestore(&sh_chan->desc_lock, irq_flags); 627 603 628 604 return NULL; 629 605 } ··· 685 661 unsigned long arg) 686 662 { 687 663 struct sh_dmae_chan *sh_chan = to_sh_chan(chan); 664 + unsigned long flags; 688 665 689 666 /* Only supports DMA_TERMINATE_ALL */ 690 667 if (cmd != DMA_TERMINATE_ALL) ··· 694 669 if (!chan) 695 670 return -EINVAL; 696 671 697 - spin_lock_bh(&sh_chan->desc_lock); 672 + spin_lock_irqsave(&sh_chan->desc_lock, flags); 698 673 dmae_halt(sh_chan); 699 674 700 675 if (!list_empty(&sh_chan->ld_queue)) { ··· 703 678 struct sh_desc, node); 704 679 desc->partial = (desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) << 705 680 sh_chan->xmit_shift; 706 - 707 681 } 708 - spin_unlock_bh(&sh_chan->desc_lock); 682 + spin_unlock_irqrestore(&sh_chan->desc_lock, flags); 709 683 710 684 sh_dmae_chan_ld_cleanup(sh_chan, true); 711 685 ··· 719 695 dma_cookie_t cookie = 0; 720 696 dma_async_tx_callback callback = NULL; 721 697 void *param = NULL; 698 + unsigned long flags; 722 699 723 - spin_lock_bh(&sh_chan->desc_lock); 700 + spin_lock_irqsave(&sh_chan->desc_lock, flags); 724 701 list_for_each_entry_safe(desc, _desc, &sh_chan->ld_queue, node) { 725 702 struct dma_async_tx_descriptor *tx = &desc->async_tx; 726 703 ··· 787 762 async_tx_test_ack(&desc->async_tx)) || all) { 788 763 /* Remove from ld_queue list */ 789 764 desc->mark = DESC_IDLE; 765 + 790 766 list_move(&desc->node, &sh_chan->ld_free); 767 + 768 + if (list_empty(&sh_chan->ld_queue)) { 769 + dev_dbg(sh_chan->dev, "Bring down channel %d\n", sh_chan->id); 770 + pm_runtime_put(sh_chan->dev); 771 + } 791 772 } 792 773 } 793 774 ··· 804 773 */ 805 774 sh_chan->completed_cookie = sh_chan->common.cookie; 806 775 807 - spin_unlock_bh(&sh_chan->desc_lock); 776 + spin_unlock_irqrestore(&sh_chan->desc_lock, flags); 808 777 809 778 if (callback) 810 779 callback(param); ··· 823 792 ; 824 793 } 825 794 795 + /* Called under spin_lock_irq(&sh_chan->desc_lock) */ 826 796 static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan) 827 797 { 828 798 struct sh_desc *desc; 829 799 830 - spin_lock_bh(&sh_chan->desc_lock); 831 800 /* DMA work check */ 832 801 if (dmae_is_busy(sh_chan)) 833 - goto sh_chan_xfer_ld_queue_end; 802 + return; 834 803 835 804 /* Find the first not transferred descriptor */ 836 805 list_for_each_entry(desc, &sh_chan->ld_queue, node) ··· 843 812 dmae_start(sh_chan); 844 813 break; 845 814 } 846 - 847 - sh_chan_xfer_ld_queue_end: 848 - spin_unlock_bh(&sh_chan->desc_lock); 849 815 } 850 816 851 817 static void sh_dmae_memcpy_issue_pending(struct dma_chan *chan) 852 818 { 853 819 struct sh_dmae_chan *sh_chan = to_sh_chan(chan); 854 - sh_chan_xfer_ld_queue(sh_chan); 820 + 821 + spin_lock_irq(&sh_chan->desc_lock); 822 + if (sh_chan->pm_state == DMAE_PM_ESTABLISHED) 823 + sh_chan_xfer_ld_queue(sh_chan); 824 + else 825 + sh_chan->pm_state = DMAE_PM_PENDING; 826 + spin_unlock_irq(&sh_chan->desc_lock); 855 827 } 856 828 857 829 static enum dma_status sh_dmae_tx_status(struct dma_chan *chan, ··· 865 831 dma_cookie_t last_used; 866 832 dma_cookie_t last_complete; 867 833 enum dma_status status; 834 + unsigned long flags; 868 835 869 836 sh_dmae_chan_ld_cleanup(sh_chan, false); 870 837 ··· 876 841 BUG_ON(last_complete < 0); 877 842 dma_set_tx_state(txstate, last_complete, last_used, 0); 878 843 879 - spin_lock_bh(&sh_chan->desc_lock); 844 + spin_lock_irqsave(&sh_chan->desc_lock, flags); 880 845 881 846 status = dma_async_is_complete(cookie, last_complete, last_used); 882 847 ··· 894 859 } 895 860 } 896 861 897 - spin_unlock_bh(&sh_chan->desc_lock); 862 + spin_unlock_irqrestore(&sh_chan->desc_lock, flags); 898 863 899 864 return status; 900 865 } ··· 947 912 948 913 list_splice_init(&sh_chan->ld_queue, &dl); 949 914 915 + if (!list_empty(&dl)) { 916 + dev_dbg(sh_chan->dev, "Bring down channel %d\n", sh_chan->id); 917 + pm_runtime_put(sh_chan->dev); 918 + } 919 + sh_chan->pm_state = DMAE_PM_ESTABLISHED; 920 + 950 921 spin_unlock(&sh_chan->desc_lock); 951 922 952 923 /* Complete all */ ··· 993 952 u32 sar_buf = sh_dmae_readl(sh_chan, SAR); 994 953 u32 dar_buf = sh_dmae_readl(sh_chan, DAR); 995 954 996 - spin_lock(&sh_chan->desc_lock); 955 + spin_lock_irq(&sh_chan->desc_lock); 997 956 list_for_each_entry(desc, &sh_chan->ld_queue, node) { 998 957 if (desc->mark == DESC_SUBMITTED && 999 958 ((desc->direction == DMA_FROM_DEVICE && ··· 1006 965 break; 1007 966 } 1008 967 } 1009 - spin_unlock(&sh_chan->desc_lock); 1010 - 1011 968 /* Next desc */ 1012 969 sh_chan_xfer_ld_queue(sh_chan); 970 + spin_unlock_irq(&sh_chan->desc_lock); 971 + 1013 972 sh_dmae_chan_ld_cleanup(sh_chan, false); 1014 973 } 1015 974 ··· 1077 1036 return -ENOMEM; 1078 1037 } 1079 1038 1080 - /* copy struct dma_device */ 1039 + new_sh_chan->pm_state = DMAE_PM_ESTABLISHED; 1040 + 1041 + /* reference struct dma_device */ 1081 1042 new_sh_chan->common.device = &shdev->common; 1082 1043 1083 1044 new_sh_chan->dev = shdev->common.dev;
+7
drivers/dma/shdma.h
··· 23 23 24 24 struct device; 25 25 26 + enum dmae_pm_state { 27 + DMAE_PM_ESTABLISHED, 28 + DMAE_PM_BUSY, 29 + DMAE_PM_PENDING, 30 + }; 31 + 26 32 struct sh_dmae_chan { 27 33 dma_cookie_t completed_cookie; /* The maximum cookie completed */ 28 34 spinlock_t desc_lock; /* Descriptor operation lock */ ··· 44 38 u32 __iomem *base; 45 39 char dev_id[16]; /* unique name per DMAC of channel */ 46 40 int pm_error; 41 + enum dmae_pm_state pm_state; 47 42 }; 48 43 49 44 struct sh_dmae_device {
+2 -3
drivers/dma/timb_dma.c
··· 753 753 754 754 INIT_LIST_HEAD(&td->dma.channels); 755 755 756 - for (i = 0; i < pdata->nr_channels; i++, td->dma.chancnt++) { 756 + for (i = 0; i < pdata->nr_channels; i++) { 757 757 struct timb_dma_chan *td_chan = &td->channels[i]; 758 758 struct timb_dma_platform_data_channel *pchan = 759 759 pdata->channels + i; ··· 762 762 if ((i % 2) == pchan->rx) { 763 763 dev_err(&pdev->dev, "Wrong channel configuration\n"); 764 764 err = -EINVAL; 765 - goto err_tasklet_kill; 765 + goto err_free_irq; 766 766 } 767 767 768 768 td_chan->chan.device = &td->dma; 769 769 td_chan->chan.cookie = 1; 770 - td_chan->chan.chan_id = i; 771 770 spin_lock_init(&td_chan->lock); 772 771 INIT_LIST_HEAD(&td_chan->active_list); 773 772 INIT_LIST_HEAD(&td_chan->queue);
+3 -3
drivers/mmc/host/s3cmci.c
··· 913 913 } 914 914 915 915 static void s3cmci_dma_setup(struct s3cmci_host *host, 916 - enum s3c2410_dmasrc source) 916 + enum dma_data_direction source) 917 917 { 918 - static enum s3c2410_dmasrc last_source = -1; 918 + static enum dma_data_direction last_source = -1; 919 919 static int setup_ok; 920 920 921 921 if (last_source == source) ··· 1087 1087 1088 1088 BUG_ON((data->flags & BOTH_DIR) == BOTH_DIR); 1089 1089 1090 - s3cmci_dma_setup(host, rw ? S3C2410_DMASRC_MEM : S3C2410_DMASRC_HW); 1090 + s3cmci_dma_setup(host, rw ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 1091 1091 s3c2410_dma_ctrl(host->dma, S3C2410_DMAOP_FLUSH); 1092 1092 1093 1093 dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
+92 -83
drivers/spi/spi-s3c64xx.c
··· 131 131 #define RXBUSY (1<<2) 132 132 #define TXBUSY (1<<3) 133 133 134 + struct s3c64xx_spi_dma_data { 135 + unsigned ch; 136 + enum dma_data_direction direction; 137 + enum dma_ch dmach; 138 + }; 139 + 134 140 /** 135 141 * struct s3c64xx_spi_driver_data - Runtime info holder for SPI driver. 136 142 * @clk: Pointer to the spi clock. ··· 170 164 struct work_struct work; 171 165 struct list_head queue; 172 166 spinlock_t lock; 173 - enum dma_ch rx_dmach; 174 - enum dma_ch tx_dmach; 175 167 unsigned long sfr_start; 176 168 struct completion xfer_completion; 177 169 unsigned state; 178 170 unsigned cur_mode, cur_bpw; 179 171 unsigned cur_speed; 172 + struct s3c64xx_spi_dma_data rx_dma; 173 + struct s3c64xx_spi_dma_data tx_dma; 174 + struct samsung_dma_ops *ops; 180 175 }; 181 176 182 177 static struct s3c2410_dma_client s3c64xx_spi_dma_client = { ··· 233 226 writel(val, regs + S3C64XX_SPI_CH_CFG); 234 227 } 235 228 229 + static void s3c64xx_spi_dmacb(void *data) 230 + { 231 + struct s3c64xx_spi_driver_data *sdd; 232 + struct s3c64xx_spi_dma_data *dma = data; 233 + unsigned long flags; 234 + 235 + if (dma->direction == DMA_FROM_DEVICE) 236 + sdd = container_of(data, 237 + struct s3c64xx_spi_driver_data, rx_dma); 238 + else 239 + sdd = container_of(data, 240 + struct s3c64xx_spi_driver_data, tx_dma); 241 + 242 + spin_lock_irqsave(&sdd->lock, flags); 243 + 244 + if (dma->direction == DMA_FROM_DEVICE) { 245 + sdd->state &= ~RXBUSY; 246 + if (!(sdd->state & TXBUSY)) 247 + complete(&sdd->xfer_completion); 248 + } else { 249 + sdd->state &= ~TXBUSY; 250 + if (!(sdd->state & RXBUSY)) 251 + complete(&sdd->xfer_completion); 252 + } 253 + 254 + spin_unlock_irqrestore(&sdd->lock, flags); 255 + } 256 + 257 + static void prepare_dma(struct s3c64xx_spi_dma_data *dma, 258 + unsigned len, dma_addr_t buf) 259 + { 260 + struct s3c64xx_spi_driver_data *sdd; 261 + struct samsung_dma_prep_info info; 262 + 263 + if (dma->direction == DMA_FROM_DEVICE) 264 + sdd = container_of((void *)dma, 265 + struct s3c64xx_spi_driver_data, rx_dma); 266 + else 267 + sdd = container_of((void *)dma, 268 + struct s3c64xx_spi_driver_data, tx_dma); 269 + 270 + info.cap = DMA_SLAVE; 271 + info.len = len; 272 + info.fp = s3c64xx_spi_dmacb; 273 + info.fp_param = dma; 274 + info.direction = dma->direction; 275 + info.buf = buf; 276 + 277 + sdd->ops->prepare(dma->ch, &info); 278 + sdd->ops->trigger(dma->ch); 279 + } 280 + 281 + static int acquire_dma(struct s3c64xx_spi_driver_data *sdd) 282 + { 283 + struct samsung_dma_info info; 284 + 285 + sdd->ops = samsung_dma_get_ops(); 286 + 287 + info.cap = DMA_SLAVE; 288 + info.client = &s3c64xx_spi_dma_client; 289 + info.width = sdd->cur_bpw / 8; 290 + 291 + info.direction = sdd->rx_dma.direction; 292 + info.fifo = sdd->sfr_start + S3C64XX_SPI_RX_DATA; 293 + sdd->rx_dma.ch = sdd->ops->request(sdd->rx_dma.dmach, &info); 294 + info.direction = sdd->tx_dma.direction; 295 + info.fifo = sdd->sfr_start + S3C64XX_SPI_TX_DATA; 296 + sdd->tx_dma.ch = sdd->ops->request(sdd->tx_dma.dmach, &info); 297 + 298 + return 1; 299 + } 300 + 236 301 static void enable_datapath(struct s3c64xx_spi_driver_data *sdd, 237 302 struct spi_device *spi, 238 303 struct spi_transfer *xfer, int dma_mode) ··· 337 258 chcfg |= S3C64XX_SPI_CH_TXCH_ON; 338 259 if (dma_mode) { 339 260 modecfg |= S3C64XX_SPI_MODE_TXDMA_ON; 340 - s3c2410_dma_config(sdd->tx_dmach, sdd->cur_bpw / 8); 341 - s3c2410_dma_enqueue(sdd->tx_dmach, (void *)sdd, 342 - xfer->tx_dma, xfer->len); 343 - s3c2410_dma_ctrl(sdd->tx_dmach, S3C2410_DMAOP_START); 261 + prepare_dma(&sdd->tx_dma, xfer->len, xfer->tx_dma); 344 262 } else { 345 263 switch (sdd->cur_bpw) { 346 264 case 32: ··· 369 293 writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff) 370 294 | S3C64XX_SPI_PACKET_CNT_EN, 371 295 regs + S3C64XX_SPI_PACKET_CNT); 372 - s3c2410_dma_config(sdd->rx_dmach, sdd->cur_bpw / 8); 373 - s3c2410_dma_enqueue(sdd->rx_dmach, (void *)sdd, 374 - xfer->rx_dma, xfer->len); 375 - s3c2410_dma_ctrl(sdd->rx_dmach, S3C2410_DMAOP_START); 296 + prepare_dma(&sdd->rx_dma, xfer->len, xfer->rx_dma); 376 297 } 377 298 } 378 299 ··· 555 482 } 556 483 } 557 484 558 - static void s3c64xx_spi_dma_rxcb(struct s3c2410_dma_chan *chan, void *buf_id, 559 - int size, enum s3c2410_dma_buffresult res) 560 - { 561 - struct s3c64xx_spi_driver_data *sdd = buf_id; 562 - unsigned long flags; 563 - 564 - spin_lock_irqsave(&sdd->lock, flags); 565 - 566 - if (res == S3C2410_RES_OK) 567 - sdd->state &= ~RXBUSY; 568 - else 569 - dev_err(&sdd->pdev->dev, "DmaAbrtRx-%d\n", size); 570 - 571 - /* If the other done */ 572 - if (!(sdd->state & TXBUSY)) 573 - complete(&sdd->xfer_completion); 574 - 575 - spin_unlock_irqrestore(&sdd->lock, flags); 576 - } 577 - 578 - static void s3c64xx_spi_dma_txcb(struct s3c2410_dma_chan *chan, void *buf_id, 579 - int size, enum s3c2410_dma_buffresult res) 580 - { 581 - struct s3c64xx_spi_driver_data *sdd = buf_id; 582 - unsigned long flags; 583 - 584 - spin_lock_irqsave(&sdd->lock, flags); 585 - 586 - if (res == S3C2410_RES_OK) 587 - sdd->state &= ~TXBUSY; 588 - else 589 - dev_err(&sdd->pdev->dev, "DmaAbrtTx-%d \n", size); 590 - 591 - /* If the other done */ 592 - if (!(sdd->state & RXBUSY)) 593 - complete(&sdd->xfer_completion); 594 - 595 - spin_unlock_irqrestore(&sdd->lock, flags); 596 - } 597 - 598 485 #define XFER_DMAADDR_INVALID DMA_BIT_MASK(32) 599 486 600 487 static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd, ··· 729 696 if (use_dma) { 730 697 if (xfer->tx_buf != NULL 731 698 && (sdd->state & TXBUSY)) 732 - s3c2410_dma_ctrl(sdd->tx_dmach, 733 - S3C2410_DMAOP_FLUSH); 699 + sdd->ops->stop(sdd->tx_dma.ch); 734 700 if (xfer->rx_buf != NULL 735 701 && (sdd->state & RXBUSY)) 736 - s3c2410_dma_ctrl(sdd->rx_dmach, 737 - S3C2410_DMAOP_FLUSH); 702 + sdd->ops->stop(sdd->rx_dma.ch); 738 703 } 739 704 740 705 goto out; ··· 768 737 769 738 if (msg->complete) 770 739 msg->complete(msg->context); 771 - } 772 - 773 - static int acquire_dma(struct s3c64xx_spi_driver_data *sdd) 774 - { 775 - if (s3c2410_dma_request(sdd->rx_dmach, 776 - &s3c64xx_spi_dma_client, NULL) < 0) { 777 - dev_err(&sdd->pdev->dev, "cannot get RxDMA\n"); 778 - return 0; 779 - } 780 - s3c2410_dma_set_buffdone_fn(sdd->rx_dmach, s3c64xx_spi_dma_rxcb); 781 - s3c2410_dma_devconfig(sdd->rx_dmach, S3C2410_DMASRC_HW, 782 - sdd->sfr_start + S3C64XX_SPI_RX_DATA); 783 - 784 - if (s3c2410_dma_request(sdd->tx_dmach, 785 - &s3c64xx_spi_dma_client, NULL) < 0) { 786 - dev_err(&sdd->pdev->dev, "cannot get TxDMA\n"); 787 - s3c2410_dma_free(sdd->rx_dmach, &s3c64xx_spi_dma_client); 788 - return 0; 789 - } 790 - s3c2410_dma_set_buffdone_fn(sdd->tx_dmach, s3c64xx_spi_dma_txcb); 791 - s3c2410_dma_devconfig(sdd->tx_dmach, S3C2410_DMASRC_MEM, 792 - sdd->sfr_start + S3C64XX_SPI_TX_DATA); 793 - 794 - return 1; 795 740 } 796 741 797 742 static void s3c64xx_spi_work(struct work_struct *work) ··· 806 799 spin_unlock_irqrestore(&sdd->lock, flags); 807 800 808 801 /* Free DMA channels */ 809 - s3c2410_dma_free(sdd->tx_dmach, &s3c64xx_spi_dma_client); 810 - s3c2410_dma_free(sdd->rx_dmach, &s3c64xx_spi_dma_client); 802 + sdd->ops->release(sdd->rx_dma.ch, &s3c64xx_spi_dma_client); 803 + sdd->ops->release(sdd->tx_dma.ch, &s3c64xx_spi_dma_client); 811 804 } 812 805 813 806 static int s3c64xx_spi_transfer(struct spi_device *spi, ··· 1024 1017 sdd->cntrlr_info = sci; 1025 1018 sdd->pdev = pdev; 1026 1019 sdd->sfr_start = mem_res->start; 1027 - sdd->tx_dmach = dmatx_res->start; 1028 - sdd->rx_dmach = dmarx_res->start; 1020 + sdd->tx_dma.dmach = dmatx_res->start; 1021 + sdd->tx_dma.direction = DMA_TO_DEVICE; 1022 + sdd->rx_dma.dmach = dmarx_res->start; 1023 + sdd->rx_dma.direction = DMA_FROM_DEVICE; 1029 1024 1030 1025 sdd->cur_bpw = 8; 1031 1026 ··· 1115 1106 pdev->id, master->num_chipselect); 1116 1107 dev_dbg(&pdev->dev, "\tIOmem=[0x%x-0x%x]\tDMA=[Rx-%d, Tx-%d]\n", 1117 1108 mem_res->end, mem_res->start, 1118 - sdd->rx_dmach, sdd->tx_dmach); 1109 + sdd->rx_dma.dmach, sdd->tx_dma.dmach); 1119 1110 1120 1111 return 0; 1121 1112
+8 -17
drivers/tty/serial/sh-sci.c
··· 1446 1446 dev_dbg(chan->device->dev, "%s: slave ID %d\n", __func__, 1447 1447 param->slave_id); 1448 1448 1449 - if (param->dma_dev == chan->device->dev) { 1450 - chan->private = param; 1451 - return true; 1452 - } else { 1453 - return false; 1454 - } 1449 + chan->private = param; 1450 + return true; 1455 1451 } 1456 1452 1457 1453 static void rx_timer_fn(unsigned long arg) ··· 1473 1477 dma_cap_mask_t mask; 1474 1478 int nent; 1475 1479 1476 - dev_dbg(port->dev, "%s: port %d DMA %p\n", __func__, 1477 - port->line, s->cfg->dma_dev); 1480 + dev_dbg(port->dev, "%s: port %d\n", __func__, 1481 + port->line); 1478 1482 1479 - if (!s->cfg->dma_dev) 1483 + if (s->cfg->dma_slave_tx <= 0 || s->cfg->dma_slave_rx <= 0) 1480 1484 return; 1481 1485 1482 1486 dma_cap_zero(mask); ··· 1486 1490 1487 1491 /* Slave ID, e.g., SHDMA_SLAVE_SCIF0_TX */ 1488 1492 param->slave_id = s->cfg->dma_slave_tx; 1489 - param->dma_dev = s->cfg->dma_dev; 1490 1493 1491 1494 s->cookie_tx = -EINVAL; 1492 1495 chan = dma_request_channel(mask, filter, param); ··· 1514 1519 1515 1520 /* Slave ID, e.g., SHDMA_SLAVE_SCIF0_RX */ 1516 1521 param->slave_id = s->cfg->dma_slave_rx; 1517 - param->dma_dev = s->cfg->dma_dev; 1518 1522 1519 1523 chan = dma_request_channel(mask, filter, param); 1520 1524 dev_dbg(port->dev, "%s: RX: got channel %p\n", __func__, chan); ··· 1557 1563 static void sci_free_dma(struct uart_port *port) 1558 1564 { 1559 1565 struct sci_port *s = to_sci_port(port); 1560 - 1561 - if (!s->cfg->dma_dev) 1562 - return; 1563 1566 1564 1567 if (s->chan_tx) 1565 1568 sci_tx_dma_release(s, false); ··· 1972 1981 port->serial_in = sci_serial_in; 1973 1982 port->serial_out = sci_serial_out; 1974 1983 1975 - if (p->dma_dev) 1976 - dev_dbg(port->dev, "DMA device %p, tx %d, rx %d\n", 1977 - p->dma_dev, p->dma_slave_tx, p->dma_slave_rx); 1984 + if (p->dma_slave_tx > 0 && p->dma_slave_rx > 0) 1985 + dev_dbg(port->dev, "DMA tx %d, rx %d\n", 1986 + p->dma_slave_tx, p->dma_slave_rx); 1978 1987 1979 1988 return 0; 1980 1989 }
+25 -5
include/linux/amba/pl08x.h
··· 47 47 * @muxval: a number usually used to poke into some mux regiser to 48 48 * mux in the signal to this channel 49 49 * @cctl_opt: default options for the channel control register 50 + * @device_fc: Flow Controller Settings for ccfg register. Only valid for slave 51 + * channels. Fill with 'true' if peripheral should be flow controller. Direction 52 + * will be selected at Runtime. 50 53 * @addr: source/target address in physical memory for this DMA channel, 51 54 * can be the address of a FIFO register for burst requests for example. 52 55 * This can be left undefined if the PrimeCell API is used for configuring ··· 68 65 int max_signal; 69 66 u32 muxval; 70 67 u32 cctl; 68 + bool device_fc; 71 69 dma_addr_t addr; 72 70 bool circular_buffer; 73 71 bool single; ··· 81 77 * @addr: current address 82 78 * @maxwidth: the maximum width of a transfer on this bus 83 79 * @buswidth: the width of this bus in bytes: 1, 2 or 4 84 - * @fill_bytes: bytes required to fill to the next bus memory boundary 85 80 */ 86 81 struct pl08x_bus_data { 87 82 dma_addr_t addr; 88 83 u8 maxwidth; 89 84 u8 buswidth; 90 - size_t fill_bytes; 91 85 }; 92 86 93 87 /** ··· 106 104 }; 107 105 108 106 /** 107 + * struct pl08x_sg - structure containing data per sg 108 + * @src_addr: src address of sg 109 + * @dst_addr: dst address of sg 110 + * @len: transfer len in bytes 111 + * @node: node for txd's dsg_list 112 + */ 113 + struct pl08x_sg { 114 + dma_addr_t src_addr; 115 + dma_addr_t dst_addr; 116 + size_t len; 117 + struct list_head node; 118 + }; 119 + 120 + /** 109 121 * struct pl08x_txd - wrapper for struct dma_async_tx_descriptor 122 + * @tx: async tx descriptor 123 + * @node: node for txd list for channels 124 + * @dsg_list: list of children sg's 125 + * @direction: direction of transfer 110 126 * @llis_bus: DMA memory address (physical) start for the LLIs 111 127 * @llis_va: virtual memory address start for the LLIs 128 + * @cctl: control reg values for current txd 129 + * @ccfg: config reg values for current txd 112 130 */ 113 131 struct pl08x_txd { 114 132 struct dma_async_tx_descriptor tx; 115 133 struct list_head node; 134 + struct list_head dsg_list; 116 135 enum dma_data_direction direction; 117 - dma_addr_t src_addr; 118 - dma_addr_t dst_addr; 119 - size_t len; 120 136 dma_addr_t llis_bus; 121 137 struct pl08x_lli *llis_va; 122 138 /* Default cctl value for LLIs */
+1 -5
include/linux/amba/pl330.h
··· 19 19 * Peri_Req i/f of the DMAC that is 20 20 * peripheral could be reached from. 21 21 */ 22 - u8 peri_id; /* {0, 31} */ 22 + u8 peri_id; /* specific dma id */ 23 23 enum pl330_reqtype rqtype; 24 - 25 - /* For M->D and D->M Channels */ 26 - int burst_sz; /* in power of 2 */ 27 - dma_addr_t fifo_addr; 28 24 }; 29 25 30 26 struct dma_pl330_platdata {
+11 -2
include/linux/dmaengine.h
··· 24 24 #include <linux/device.h> 25 25 #include <linux/uio.h> 26 26 #include <linux/dma-direction.h> 27 - 28 - struct scatterlist; 27 + #include <linux/scatterlist.h> 29 28 30 29 /** 31 30 * typedef dma_cookie_t - an opaque DMA cookie ··· 516 517 { 517 518 return dmaengine_device_control(chan, DMA_SLAVE_CONFIG, 518 519 (unsigned long)config); 520 + } 521 + 522 + static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single( 523 + struct dma_chan *chan, void *buf, size_t len, 524 + enum dma_data_direction dir, unsigned long flags) 525 + { 526 + struct scatterlist sg; 527 + sg_init_one(&sg, buf, len); 528 + 529 + return chan->device->device_prep_slave_sg(chan, &sg, 1, dir, flags); 519 530 } 520 531 521 532 static inline int dmaengine_terminate_all(struct dma_chan *chan)
-2
include/linux/serial_sci.h
··· 131 131 132 132 struct plat_sci_port_ops *ops; 133 133 134 - struct device *dma_dev; 135 - 136 134 unsigned int dma_slave_tx; 137 135 unsigned int dma_slave_rx; 138 136 };
+8 -2
sound/soc/samsung/ac97.c
··· 271 271 272 272 writel(ac_glbctrl, s3c_ac97.regs + S3C_AC97_GLBCTRL); 273 273 274 - s3c2410_dma_ctrl(dma_data->channel, S3C2410_DMAOP_STARTED); 274 + if (!dma_data->ops) 275 + dma_data->ops = samsung_dma_get_ops(); 276 + 277 + dma_data->ops->started(dma_data->channel); 275 278 276 279 return 0; 277 280 } ··· 320 317 321 318 writel(ac_glbctrl, s3c_ac97.regs + S3C_AC97_GLBCTRL); 322 319 323 - s3c2410_dma_ctrl(dma_data->channel, S3C2410_DMAOP_STARTED); 320 + if (!dma_data->ops) 321 + dma_data->ops = samsung_dma_get_ops(); 322 + 323 + dma_data->ops->started(dma_data->channel); 324 324 325 325 return 0; 326 326 }
+59 -85
sound/soc/samsung/dma.c
··· 54 54 spinlock_t lock; 55 55 int state; 56 56 unsigned int dma_loaded; 57 - unsigned int dma_limit; 58 57 unsigned int dma_period; 59 58 dma_addr_t dma_start; 60 59 dma_addr_t dma_pos; ··· 61 62 struct s3c_dma_params *params; 62 63 }; 63 64 65 + static void audio_buffdone(void *data); 66 + 64 67 /* dma_enqueue 65 68 * 66 69 * place a dma buffer onto the queue for the dma system 67 70 * to handle. 68 - */ 71 + */ 69 72 static void dma_enqueue(struct snd_pcm_substream *substream) 70 73 { 71 74 struct runtime_data *prtd = substream->runtime->private_data; 72 75 dma_addr_t pos = prtd->dma_pos; 73 76 unsigned int limit; 74 - int ret; 77 + struct samsung_dma_prep_info dma_info; 75 78 76 79 pr_debug("Entered %s\n", __func__); 77 80 78 - if (s3c_dma_has_circular()) 79 - limit = (prtd->dma_end - prtd->dma_start) / prtd->dma_period; 80 - else 81 - limit = prtd->dma_limit; 81 + limit = (prtd->dma_end - prtd->dma_start) / prtd->dma_period; 82 82 83 83 pr_debug("%s: loaded %d, limit %d\n", 84 84 __func__, prtd->dma_loaded, limit); 85 85 86 - while (prtd->dma_loaded < limit) { 87 - unsigned long len = prtd->dma_period; 86 + dma_info.cap = (samsung_dma_has_circular() ? DMA_CYCLIC : DMA_SLAVE); 87 + dma_info.direction = 88 + (substream->stream == SNDRV_PCM_STREAM_PLAYBACK 89 + ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 90 + dma_info.fp = audio_buffdone; 91 + dma_info.fp_param = substream; 92 + dma_info.period = prtd->dma_period; 93 + dma_info.len = prtd->dma_period*limit; 88 94 95 + while (prtd->dma_loaded < limit) { 89 96 pr_debug("dma_loaded: %d\n", prtd->dma_loaded); 90 97 91 - if ((pos + len) > prtd->dma_end) { 92 - len = prtd->dma_end - pos; 93 - pr_debug("%s: corrected dma len %ld\n", __func__, len); 98 + if ((pos + dma_info.period) > prtd->dma_end) { 99 + dma_info.period = prtd->dma_end - pos; 100 + pr_debug("%s: corrected dma len %ld\n", 101 + __func__, dma_info.period); 94 102 } 95 103 96 - ret = s3c2410_dma_enqueue(prtd->params->channel, 97 - substream, pos, len); 104 + dma_info.buf = pos; 105 + prtd->params->ops->prepare(prtd->params->ch, &dma_info); 98 106 99 - if (ret == 0) { 100 - prtd->dma_loaded++; 101 - pos += prtd->dma_period; 102 - if (pos >= prtd->dma_end) 103 - pos = prtd->dma_start; 104 - } else 105 - break; 107 + prtd->dma_loaded++; 108 + pos += prtd->dma_period; 109 + if (pos >= prtd->dma_end) 110 + pos = prtd->dma_start; 106 111 } 107 112 108 113 prtd->dma_pos = pos; 109 114 } 110 115 111 - static void audio_buffdone(struct s3c2410_dma_chan *channel, 112 - void *dev_id, int size, 113 - enum s3c2410_dma_buffresult result) 116 + static void audio_buffdone(void *data) 114 117 { 115 - struct snd_pcm_substream *substream = dev_id; 116 - struct runtime_data *prtd; 118 + struct snd_pcm_substream *substream = data; 119 + struct runtime_data *prtd = substream->runtime->private_data; 117 120 118 121 pr_debug("Entered %s\n", __func__); 119 122 120 - if (result == S3C2410_RES_ABORT || result == S3C2410_RES_ERR) 121 - return; 123 + if (prtd->state & ST_RUNNING) { 124 + prtd->dma_pos += prtd->dma_period; 125 + if (prtd->dma_pos >= prtd->dma_end) 126 + prtd->dma_pos = prtd->dma_start; 122 127 123 - prtd = substream->runtime->private_data; 128 + if (substream) 129 + snd_pcm_period_elapsed(substream); 124 130 125 - if (substream) 126 - snd_pcm_period_elapsed(substream); 127 - 128 - spin_lock(&prtd->lock); 129 - if (prtd->state & ST_RUNNING && !s3c_dma_has_circular()) { 130 - prtd->dma_loaded--; 131 - dma_enqueue(substream); 131 + spin_lock(&prtd->lock); 132 + if (!samsung_dma_has_circular()) { 133 + prtd->dma_loaded--; 134 + dma_enqueue(substream); 135 + } 136 + spin_unlock(&prtd->lock); 132 137 } 133 - 134 - spin_unlock(&prtd->lock); 135 138 } 136 139 137 140 static int dma_hw_params(struct snd_pcm_substream *substream, ··· 145 144 unsigned long totbytes = params_buffer_bytes(params); 146 145 struct s3c_dma_params *dma = 147 146 snd_soc_dai_get_dma_data(rtd->cpu_dai, substream); 148 - int ret = 0; 149 - 147 + struct samsung_dma_info dma_info; 150 148 151 149 pr_debug("Entered %s\n", __func__); 152 150 ··· 163 163 pr_debug("params %p, client %p, channel %d\n", prtd->params, 164 164 prtd->params->client, prtd->params->channel); 165 165 166 - ret = s3c2410_dma_request(prtd->params->channel, 167 - prtd->params->client, NULL); 166 + prtd->params->ops = samsung_dma_get_ops(); 168 167 169 - if (ret < 0) { 170 - printk(KERN_ERR "failed to get dma channel\n"); 171 - return ret; 172 - } 173 - 174 - /* use the circular buffering if we have it available. */ 175 - if (s3c_dma_has_circular()) 176 - s3c2410_dma_setflags(prtd->params->channel, 177 - S3C2410_DMAF_CIRCULAR); 168 + dma_info.cap = (samsung_dma_has_circular() ? 169 + DMA_CYCLIC : DMA_SLAVE); 170 + dma_info.client = prtd->params->client; 171 + dma_info.direction = 172 + (substream->stream == SNDRV_PCM_STREAM_PLAYBACK 173 + ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 174 + dma_info.width = prtd->params->dma_size; 175 + dma_info.fifo = prtd->params->dma_addr; 176 + prtd->params->ch = prtd->params->ops->request( 177 + prtd->params->channel, &dma_info); 178 178 } 179 - 180 - s3c2410_dma_set_buffdone_fn(prtd->params->channel, 181 - audio_buffdone); 182 179 183 180 snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer); 184 181 ··· 183 186 184 187 spin_lock_irq(&prtd->lock); 185 188 prtd->dma_loaded = 0; 186 - prtd->dma_limit = runtime->hw.periods_min; 187 189 prtd->dma_period = params_period_bytes(params); 188 190 prtd->dma_start = runtime->dma_addr; 189 191 prtd->dma_pos = prtd->dma_start; ··· 202 206 snd_pcm_set_runtime_buffer(substream, NULL); 203 207 204 208 if (prtd->params) { 205 - s3c2410_dma_free(prtd->params->channel, prtd->params->client); 209 + prtd->params->ops->release(prtd->params->ch, 210 + prtd->params->client); 206 211 prtd->params = NULL; 207 212 } 208 213 ··· 222 225 if (!prtd->params) 223 226 return 0; 224 227 225 - /* channel needs configuring for mem=>device, increment memory addr, 226 - * sync to pclk, half-word transfers to the IIS-FIFO. */ 227 - if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { 228 - s3c2410_dma_devconfig(prtd->params->channel, 229 - S3C2410_DMASRC_MEM, 230 - prtd->params->dma_addr); 231 - } else { 232 - s3c2410_dma_devconfig(prtd->params->channel, 233 - S3C2410_DMASRC_HW, 234 - prtd->params->dma_addr); 235 - } 236 - 237 - s3c2410_dma_config(prtd->params->channel, 238 - prtd->params->dma_size); 239 - 240 228 /* flush the DMA channel */ 241 - s3c2410_dma_ctrl(prtd->params->channel, S3C2410_DMAOP_FLUSH); 229 + prtd->params->ops->flush(prtd->params->ch); 230 + 242 231 prtd->dma_loaded = 0; 243 232 prtd->dma_pos = prtd->dma_start; 244 233 ··· 248 265 case SNDRV_PCM_TRIGGER_RESUME: 249 266 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: 250 267 prtd->state |= ST_RUNNING; 251 - s3c2410_dma_ctrl(prtd->params->channel, S3C2410_DMAOP_START); 268 + prtd->params->ops->trigger(prtd->params->ch); 252 269 break; 253 270 254 271 case SNDRV_PCM_TRIGGER_STOP: 255 272 case SNDRV_PCM_TRIGGER_SUSPEND: 256 273 case SNDRV_PCM_TRIGGER_PAUSE_PUSH: 257 274 prtd->state &= ~ST_RUNNING; 258 - s3c2410_dma_ctrl(prtd->params->channel, S3C2410_DMAOP_STOP); 275 + prtd->params->ops->stop(prtd->params->ch); 259 276 break; 260 277 261 278 default: ··· 274 291 struct snd_pcm_runtime *runtime = substream->runtime; 275 292 struct runtime_data *prtd = runtime->private_data; 276 293 unsigned long res; 277 - dma_addr_t src, dst; 278 294 279 295 pr_debug("Entered %s\n", __func__); 280 296 281 - spin_lock(&prtd->lock); 282 - s3c2410_dma_getposition(prtd->params->channel, &src, &dst); 297 + res = prtd->dma_pos - prtd->dma_start; 283 298 284 - if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) 285 - res = dst - prtd->dma_start; 286 - else 287 - res = src - prtd->dma_start; 288 - 289 - spin_unlock(&prtd->lock); 290 - 291 - pr_debug("Pointer %x %x\n", src, dst); 299 + pr_debug("Pointer offset: %lu\n", res); 292 300 293 301 /* we seem to be getting the odd error from the pcm library due 294 302 * to out-of-bounds pointers. this is maybe due to the dma engine
+3 -1
sound/soc/samsung/dma.h
··· 6 6 * Free Software Foundation; either version 2 of the License, or (at your 7 7 * option) any later version. 8 8 * 9 - * ALSA PCM interface for the Samsung S3C24xx CPU 9 + * ALSA PCM interface for the Samsung SoC 10 10 */ 11 11 12 12 #ifndef _S3C_AUDIO_H ··· 17 17 int channel; /* Channel ID */ 18 18 dma_addr_t dma_addr; 19 19 int dma_size; /* Size of the DMA transfer */ 20 + unsigned ch; 21 + struct samsung_dma_ops *ops; 20 22 }; 21 23 22 24 #endif