Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'ux500-dma40-for-arm-soc-2' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-stericsson into next/drivers

From Linus Walleij:
Second set of DMA40 changes: refactorings and device tree
support for the DMA40. Now with MUSB and some platform
data removal.

* tag 'ux500-dma40-for-arm-soc-2' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-stericsson:
dmaengine: ste_dma40: Fetch disabled channels from DT
dmaengine: ste_dma40: Fetch the number of physical channels from DT
ARM: ux500: Stop passing DMA platform data though AUXDATA
dmaengine: ste_dma40: Allow memcpy channels to be configured from DT
dmaengine: ste_dma40_ll: Replace meaningless register set with comment
dmaengine: ste_dma40: Convert data_width from register bit format to value
dmaengine: ste_dma40_ll: Use the BIT macro to replace ugly '(1 << x)'s
ARM: ux500: Remove recently unused stedma40_xfer_dir enums
dmaengine: ste_dma40: Replace ST-E's home-brew DMA direction defs with generic ones
ARM: ux500: Replace ST-E's home-brew DMA direction definition with the generic one
dmaengine: ste_dma40: Use the BIT macro to replace ugly '(1 << x)'s
ARM: ux500: Remove empty function u8500_of_init_devices()
ARM: ux500: Remove ux500-musb platform registation when booting with DT
usb: musb: ux500: add device tree probing support
usb: musb: ux500: attempt to find channels by name before using pdata
usb: musb: ux500: harden checks for platform data
usb: musb: ux500: take the dma_mask from coherent_dma_mask
usb: musb: ux500: move the MUSB HDRC configuration into the driver
usb: musb: ux500: move channel number knowledge into the driver

+374 -250
+4
Documentation/devicetree/bindings/dma/ste-dma40.txt
··· 6 6 - reg-names: Names of the above areas to use during resource look-up 7 7 - interrupt: Should contain the DMAC interrupt number 8 8 - #dma-cells: must be <3> 9 + - memcpy-channels: Channels to be used for memcpy 9 10 10 11 Optional properties: 11 12 - dma-channels: Number of channels supported by hardware - if not present 12 13 the driver will attempt to obtain the information from H/W 14 + - disabled-channels: Channels which can not be used 13 15 14 16 Example: 15 17 ··· 23 21 interrupts = <0 25 0x4>; 24 22 25 23 #dma-cells = <2>; 24 + memcpy-channels = <56 57 58 59 60>; 25 + disabled-channels = <12>; 26 26 dma-channels = <8>; 27 27 }; 28 28
+50
Documentation/devicetree/bindings/usb/ux500-usb.txt
··· 1 + Ux500 MUSB 2 + 3 + Required properties: 4 + - compatible : Should be "stericsson,db8500-musb" 5 + - reg : Offset and length of registers 6 + - interrupts : Interrupt; mode, number and trigger 7 + - dr_mode : Dual-role; either host mode "host", peripheral mode "peripheral" 8 + or both "otg" 9 + 10 + Optional properties: 11 + - dmas : A list of dma channels; 12 + dma-controller, event-line, fixed-channel, flags 13 + - dma-names : An ordered list of channel names affiliated to the above 14 + 15 + Example: 16 + 17 + usb_per5@a03e0000 { 18 + compatible = "stericsson,db8500-musb", "mentor,musb"; 19 + reg = <0xa03e0000 0x10000>; 20 + interrupts = <0 23 0x4>; 21 + interrupt-names = "mc"; 22 + 23 + dr_mode = "otg"; 24 + 25 + dmas = <&dma 38 0 0x2>, /* Logical - DevToMem */ 26 + <&dma 38 0 0x0>, /* Logical - MemToDev */ 27 + <&dma 37 0 0x2>, /* Logical - DevToMem */ 28 + <&dma 37 0 0x0>, /* Logical - MemToDev */ 29 + <&dma 36 0 0x2>, /* Logical - DevToMem */ 30 + <&dma 36 0 0x0>, /* Logical - MemToDev */ 31 + <&dma 19 0 0x2>, /* Logical - DevToMem */ 32 + <&dma 19 0 0x0>, /* Logical - MemToDev */ 33 + <&dma 18 0 0x2>, /* Logical - DevToMem */ 34 + <&dma 18 0 0x0>, /* Logical - MemToDev */ 35 + <&dma 17 0 0x2>, /* Logical - DevToMem */ 36 + <&dma 17 0 0x0>, /* Logical - MemToDev */ 37 + <&dma 16 0 0x2>, /* Logical - DevToMem */ 38 + <&dma 16 0 0x0>, /* Logical - MemToDev */ 39 + <&dma 39 0 0x2>, /* Logical - DevToMem */ 40 + <&dma 39 0 0x0>; /* Logical - MemToDev */ 41 + 42 + dma-names = "iep_1_9", "oep_1_9", 43 + "iep_2_10", "oep_2_10", 44 + "iep_3_11", "oep_3_11", 45 + "iep_4_12", "oep_4_12", 46 + "iep_5_13", "oep_5_13", 47 + "iep_6_14", "oep_6_14", 48 + "iep_7_15", "oep_7_15", 49 + "iep_8", "oep_8"; 50 + };
+6 -6
arch/arm/mach-ux500/board-mop500-audio.c
··· 21 21 22 22 static struct stedma40_chan_cfg msp0_dma_rx = { 23 23 .high_priority = true, 24 - .dir = STEDMA40_PERIPH_TO_MEM, 24 + .dir = DMA_DEV_TO_MEM, 25 25 .dev_type = DB8500_DMA_DEV31_MSP0_SLIM0_CH0, 26 26 }; 27 27 28 28 static struct stedma40_chan_cfg msp0_dma_tx = { 29 29 .high_priority = true, 30 - .dir = STEDMA40_MEM_TO_PERIPH, 30 + .dir = DMA_MEM_TO_DEV, 31 31 .dev_type = DB8500_DMA_DEV31_MSP0_SLIM0_CH0, 32 32 }; 33 33 ··· 39 39 40 40 static struct stedma40_chan_cfg msp1_dma_rx = { 41 41 .high_priority = true, 42 - .dir = STEDMA40_PERIPH_TO_MEM, 42 + .dir = DMA_DEV_TO_MEM, 43 43 .dev_type = DB8500_DMA_DEV30_MSP3, 44 44 }; 45 45 46 46 static struct stedma40_chan_cfg msp1_dma_tx = { 47 47 .high_priority = true, 48 - .dir = STEDMA40_MEM_TO_PERIPH, 48 + .dir = DMA_MEM_TO_DEV, 49 49 .dev_type = DB8500_DMA_DEV30_MSP1, 50 50 }; 51 51 ··· 57 57 58 58 static struct stedma40_chan_cfg msp2_dma_rx = { 59 59 .high_priority = true, 60 - .dir = STEDMA40_PERIPH_TO_MEM, 60 + .dir = DMA_DEV_TO_MEM, 61 61 .dev_type = DB8500_DMA_DEV14_MSP2, 62 62 }; 63 63 64 64 static struct stedma40_chan_cfg msp2_dma_tx = { 65 65 .high_priority = true, 66 - .dir = STEDMA40_MEM_TO_PERIPH, 66 + .dir = DMA_MEM_TO_DEV, 67 67 .dev_type = DB8500_DMA_DEV14_MSP2, 68 68 .use_fixed_channel = true, 69 69 .phy_channel = 1,
+8 -8
arch/arm/mach-ux500/board-mop500-sdi.c
··· 34 34 #ifdef CONFIG_STE_DMA40 35 35 struct stedma40_chan_cfg mop500_sdi0_dma_cfg_rx = { 36 36 .mode = STEDMA40_MODE_LOGICAL, 37 - .dir = STEDMA40_PERIPH_TO_MEM, 37 + .dir = DMA_DEV_TO_MEM, 38 38 .dev_type = DB8500_DMA_DEV29_SD_MM0, 39 39 }; 40 40 41 41 static struct stedma40_chan_cfg mop500_sdi0_dma_cfg_tx = { 42 42 .mode = STEDMA40_MODE_LOGICAL, 43 - .dir = STEDMA40_MEM_TO_PERIPH, 43 + .dir = DMA_MEM_TO_DEV, 44 44 .dev_type = DB8500_DMA_DEV29_SD_MM0, 45 45 }; 46 46 #endif ··· 81 81 #ifdef CONFIG_STE_DMA40 82 82 static struct stedma40_chan_cfg sdi1_dma_cfg_rx = { 83 83 .mode = STEDMA40_MODE_LOGICAL, 84 - .dir = STEDMA40_PERIPH_TO_MEM, 84 + .dir = DMA_DEV_TO_MEM, 85 85 .dev_type = DB8500_DMA_DEV32_SD_MM1, 86 86 }; 87 87 88 88 static struct stedma40_chan_cfg sdi1_dma_cfg_tx = { 89 89 .mode = STEDMA40_MODE_LOGICAL, 90 - .dir = STEDMA40_MEM_TO_PERIPH, 90 + .dir = DMA_MEM_TO_DEV, 91 91 .dev_type = DB8500_DMA_DEV32_SD_MM1, 92 92 }; 93 93 #endif ··· 112 112 #ifdef CONFIG_STE_DMA40 113 113 struct stedma40_chan_cfg mop500_sdi2_dma_cfg_rx = { 114 114 .mode = STEDMA40_MODE_LOGICAL, 115 - .dir = STEDMA40_PERIPH_TO_MEM, 115 + .dir = DMA_DEV_TO_MEM, 116 116 .dev_type = DB8500_DMA_DEV28_SD_MM2, 117 117 }; 118 118 119 119 static struct stedma40_chan_cfg mop500_sdi2_dma_cfg_tx = { 120 120 .mode = STEDMA40_MODE_LOGICAL, 121 - .dir = STEDMA40_MEM_TO_PERIPH, 121 + .dir = DMA_MEM_TO_DEV, 122 122 .dev_type = DB8500_DMA_DEV28_SD_MM2, 123 123 }; 124 124 #endif ··· 144 144 #ifdef CONFIG_STE_DMA40 145 145 struct stedma40_chan_cfg mop500_sdi4_dma_cfg_rx = { 146 146 .mode = STEDMA40_MODE_LOGICAL, 147 - .dir = STEDMA40_PERIPH_TO_MEM, 147 + .dir = DMA_DEV_TO_MEM, 148 148 .dev_type = DB8500_DMA_DEV42_SD_MM4, 149 149 }; 150 150 151 151 static struct stedma40_chan_cfg mop500_sdi4_dma_cfg_tx = { 152 152 .mode = STEDMA40_MODE_LOGICAL, 153 - .dir = STEDMA40_MEM_TO_PERIPH, 153 + .dir = DMA_MEM_TO_DEV, 154 154 .dev_type = DB8500_DMA_DEV42_SD_MM4, 155 155 }; 156 156 #endif
+11 -11
arch/arm/mach-ux500/board-mop500.c
··· 424 424 425 425 static struct cryp_platform_data u8500_cryp1_platform_data = { 426 426 .mem_to_engine = { 427 - .dir = STEDMA40_MEM_TO_PERIPH, 427 + .dir = DMA_MEM_TO_DEV, 428 428 .dev_type = DB8500_DMA_DEV48_CAC1, 429 429 .mode = STEDMA40_MODE_LOGICAL, 430 430 }, 431 431 .engine_to_mem = { 432 - .dir = STEDMA40_PERIPH_TO_MEM, 432 + .dir = DMA_DEV_TO_MEM, 433 433 .dev_type = DB8500_DMA_DEV48_CAC1, 434 434 .mode = STEDMA40_MODE_LOGICAL, 435 435 } 436 436 }; 437 437 438 438 static struct stedma40_chan_cfg u8500_hash_dma_cfg_tx = { 439 - .dir = STEDMA40_MEM_TO_PERIPH, 439 + .dir = DMA_MEM_TO_DEV, 440 440 .dev_type = DB8500_DMA_DEV50_HAC1_TX, 441 441 .mode = STEDMA40_MODE_LOGICAL, 442 442 }; ··· 455 455 #ifdef CONFIG_STE_DMA40 456 456 static struct stedma40_chan_cfg ssp0_dma_cfg_rx = { 457 457 .mode = STEDMA40_MODE_LOGICAL, 458 - .dir = STEDMA40_PERIPH_TO_MEM, 458 + .dir = DMA_DEV_TO_MEM, 459 459 .dev_type = DB8500_DMA_DEV8_SSP0, 460 460 }; 461 461 462 462 static struct stedma40_chan_cfg ssp0_dma_cfg_tx = { 463 463 .mode = STEDMA40_MODE_LOGICAL, 464 - .dir = STEDMA40_MEM_TO_PERIPH, 464 + .dir = DMA_MEM_TO_DEV, 465 465 .dev_type = DB8500_DMA_DEV8_SSP0, 466 466 }; 467 467 #endif ··· 490 490 #ifdef CONFIG_STE_DMA40 491 491 static struct stedma40_chan_cfg uart0_dma_cfg_rx = { 492 492 .mode = STEDMA40_MODE_LOGICAL, 493 - .dir = STEDMA40_PERIPH_TO_MEM, 493 + .dir = DMA_DEV_TO_MEM, 494 494 .dev_type = DB8500_DMA_DEV13_UART0, 495 495 }; 496 496 497 497 static struct stedma40_chan_cfg uart0_dma_cfg_tx = { 498 498 .mode = STEDMA40_MODE_LOGICAL, 499 - .dir = STEDMA40_MEM_TO_PERIPH, 499 + .dir = DMA_MEM_TO_DEV, 500 500 .dev_type = DB8500_DMA_DEV13_UART0, 501 501 }; 502 502 503 503 static struct stedma40_chan_cfg uart1_dma_cfg_rx = { 504 504 .mode = STEDMA40_MODE_LOGICAL, 505 - .dir = STEDMA40_PERIPH_TO_MEM, 505 + .dir = DMA_DEV_TO_MEM, 506 506 .dev_type = DB8500_DMA_DEV12_UART1, 507 507 }; 508 508 509 509 static struct stedma40_chan_cfg uart1_dma_cfg_tx = { 510 510 .mode = STEDMA40_MODE_LOGICAL, 511 - .dir = STEDMA40_MEM_TO_PERIPH, 511 + .dir = DMA_MEM_TO_DEV, 512 512 .dev_type = DB8500_DMA_DEV12_UART1, 513 513 }; 514 514 515 515 static struct stedma40_chan_cfg uart2_dma_cfg_rx = { 516 516 .mode = STEDMA40_MODE_LOGICAL, 517 - .dir = STEDMA40_PERIPH_TO_MEM, 517 + .dir = DMA_DEV_TO_MEM, 518 518 .dev_type = DB8500_DMA_DEV11_UART2, 519 519 }; 520 520 521 521 static struct stedma40_chan_cfg uart2_dma_cfg_tx = { 522 522 .mode = STEDMA40_MODE_LOGICAL, 523 - .dir = STEDMA40_MEM_TO_PERIPH, 523 + .dir = DMA_MEM_TO_DEV, 524 524 .dev_type = DB8500_DMA_DEV11_UART2, 525 525 }; 526 526 #endif
+2 -17
arch/arm/mach-ux500/cpu-db8500.c
··· 215 215 } 216 216 217 217 #ifdef CONFIG_MACH_UX500_DT 218 - 219 - /* TODO: Once all pieces are DT:ed, remove completely. */ 220 - static struct device * __init u8500_of_init_devices(void) 221 - { 222 - struct device *parent = db8500_soc_device_init(); 223 - 224 - db8500_add_usb(parent, usb_db8500_dma_cfg, usb_db8500_dma_cfg); 225 - 226 - return parent; 227 - } 228 - 229 218 static struct of_dev_auxdata u8500_auxdata_lookup[] __initdata = { 230 219 /* Requires call-back bindings. */ 231 220 OF_DEV_AUXDATA("arm,cortex-a9-pmu", 0, "arm-pmu", &db8500_pmu_platdata), ··· 258 269 OF_DEV_AUXDATA("stericsson,ux500-msp-i2s", 0x80125000, 259 270 "ux500-msp-i2s.3", &msp3_platform_data), 260 271 /* Requires clock name bindings and channel address lookup table. */ 261 - OF_DEV_AUXDATA("stericsson,db8500-dma40", 0x801C0000, 262 - "dma40.0", &dma40_plat_data), 272 + OF_DEV_AUXDATA("stericsson,db8500-dma40", 0x801C0000, "dma40.0", NULL), 263 273 {}, 264 274 }; 265 275 ··· 272 284 273 285 static void __init u8500_init_machine(void) 274 286 { 275 - struct device *parent = NULL; 287 + struct device *parent = db8500_soc_device_init(); 276 288 277 289 /* Pinmaps must be in place before devices register */ 278 290 if (of_machine_is_compatible("st-ericsson,mop500")) ··· 284 296 hrefv60_pinmaps_init(); 285 297 else if (of_machine_is_compatible("st-ericsson,ccu9540")) {} 286 298 /* TODO: Add pinmaps for ccu9540 board. */ 287 - 288 - /* TODO: Export SoC, USB, cpu-freq and DMA40 */ 289 - parent = u8500_of_init_devices(); 290 299 291 300 /* automatically probe child nodes of db8500 device */ 292 301 of_platform_populate(NULL, u8500_local_bus_nodes, u8500_auxdata_lookup, parent);
+8 -21
arch/arm/mach-ux500/usb.c
··· 14 14 15 15 #define MUSB_DMA40_RX_CH { \ 16 16 .mode = STEDMA40_MODE_LOGICAL, \ 17 - .dir = STEDMA40_PERIPH_TO_MEM, \ 17 + .dir = DMA_DEV_TO_MEM, \ 18 18 } 19 19 20 20 #define MUSB_DMA40_TX_CH { \ 21 21 .mode = STEDMA40_MODE_LOGICAL, \ 22 - .dir = STEDMA40_MEM_TO_PERIPH, \ 22 + .dir = DMA_MEM_TO_DEV, \ 23 23 } 24 24 25 - static struct stedma40_chan_cfg musb_dma_rx_ch[UX500_MUSB_DMA_NUM_RX_CHANNELS] 25 + static struct stedma40_chan_cfg musb_dma_rx_ch[UX500_MUSB_DMA_NUM_RX_TX_CHANNELS] 26 26 = { 27 27 MUSB_DMA40_RX_CH, 28 28 MUSB_DMA40_RX_CH, ··· 34 34 MUSB_DMA40_RX_CH 35 35 }; 36 36 37 - static struct stedma40_chan_cfg musb_dma_tx_ch[UX500_MUSB_DMA_NUM_TX_CHANNELS] 37 + static struct stedma40_chan_cfg musb_dma_tx_ch[UX500_MUSB_DMA_NUM_RX_TX_CHANNELS] 38 38 = { 39 39 MUSB_DMA40_TX_CH, 40 40 MUSB_DMA40_TX_CH, ··· 46 46 MUSB_DMA40_TX_CH, 47 47 }; 48 48 49 - static void *ux500_dma_rx_param_array[UX500_MUSB_DMA_NUM_RX_CHANNELS] = { 49 + static void *ux500_dma_rx_param_array[UX500_MUSB_DMA_NUM_RX_TX_CHANNELS] = { 50 50 &musb_dma_rx_ch[0], 51 51 &musb_dma_rx_ch[1], 52 52 &musb_dma_rx_ch[2], ··· 57 57 &musb_dma_rx_ch[7] 58 58 }; 59 59 60 - static void *ux500_dma_tx_param_array[UX500_MUSB_DMA_NUM_TX_CHANNELS] = { 60 + static void *ux500_dma_tx_param_array[UX500_MUSB_DMA_NUM_RX_TX_CHANNELS] = { 61 61 &musb_dma_tx_ch[0], 62 62 &musb_dma_tx_ch[1], 63 63 &musb_dma_tx_ch[2], ··· 71 71 static struct ux500_musb_board_data musb_board_data = { 72 72 .dma_rx_param_array = ux500_dma_rx_param_array, 73 73 .dma_tx_param_array = ux500_dma_tx_param_array, 74 - .num_rx_channels = UX500_MUSB_DMA_NUM_RX_CHANNELS, 75 - .num_tx_channels = UX500_MUSB_DMA_NUM_TX_CHANNELS, 76 74 .dma_filter = stedma40_filter, 77 - }; 78 - 79 - static u64 ux500_musb_dmamask = DMA_BIT_MASK(32); 80 - 81 - static struct musb_hdrc_config musb_hdrc_config = { 82 - .multipoint = true, 83 - .dyn_fifo = true, 84 - .num_eps = 16, 85 - .ram_bits = 16, 86 75 }; 87 76 88 77 static struct musb_hdrc_platform_data musb_platform_data = { 89 78 .mode = MUSB_OTG, 90 - .config = &musb_hdrc_config, 91 79 .board_data = &musb_board_data, 92 80 }; 93 81 ··· 96 108 .id = 0, 97 109 .dev = { 98 110 .platform_data = &musb_platform_data, 99 - .dma_mask = &ux500_musb_dmamask, 100 111 .coherent_dma_mask = DMA_BIT_MASK(32), 101 112 }, 102 113 .num_resources = ARRAY_SIZE(usb_resources), ··· 106 119 { 107 120 u32 idx; 108 121 109 - for (idx = 0; idx < UX500_MUSB_DMA_NUM_RX_CHANNELS; idx++) 122 + for (idx = 0; idx < UX500_MUSB_DMA_NUM_RX_TX_CHANNELS; idx++) 110 123 musb_dma_rx_ch[idx].dev_type = dev_type[idx]; 111 124 } 112 125 ··· 114 127 { 115 128 u32 idx; 116 129 117 - for (idx = 0; idx < UX500_MUSB_DMA_NUM_TX_CHANNELS; idx++) 130 + for (idx = 0; idx < UX500_MUSB_DMA_NUM_RX_TX_CHANNELS; idx++) 118 131 musb_dma_tx_ch[idx].dev_type = dev_type[idx]; 119 132 } 120 133
+115 -84
drivers/dma/ste_dma40.c
··· 54 54 #define MAX_LCLA_ALLOC_ATTEMPTS 256 55 55 56 56 /* Bit markings for allocation map */ 57 - #define D40_ALLOC_FREE (1 << 31) 58 - #define D40_ALLOC_PHY (1 << 30) 57 + #define D40_ALLOC_FREE BIT(31) 58 + #define D40_ALLOC_PHY BIT(30) 59 59 #define D40_ALLOC_LOG_FREE 0 60 + 61 + #define D40_MEMCPY_MAX_CHANS 8 60 62 61 63 /* Reserved event lines for memcpy only. */ 62 64 #define DB8500_DMA_MEMCPY_EV_0 51 ··· 80 78 /* Default configuration for physcial memcpy */ 81 79 struct stedma40_chan_cfg dma40_memcpy_conf_phy = { 82 80 .mode = STEDMA40_MODE_PHYSICAL, 83 - .dir = STEDMA40_MEM_TO_MEM, 81 + .dir = DMA_MEM_TO_MEM, 84 82 85 - .src_info.data_width = STEDMA40_BYTE_WIDTH, 83 + .src_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE, 86 84 .src_info.psize = STEDMA40_PSIZE_PHY_1, 87 85 .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL, 88 86 89 - .dst_info.data_width = STEDMA40_BYTE_WIDTH, 87 + .dst_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE, 90 88 .dst_info.psize = STEDMA40_PSIZE_PHY_1, 91 89 .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL, 92 90 }; ··· 94 92 /* Default configuration for logical memcpy */ 95 93 struct stedma40_chan_cfg dma40_memcpy_conf_log = { 96 94 .mode = STEDMA40_MODE_LOGICAL, 97 - .dir = STEDMA40_MEM_TO_MEM, 95 + .dir = DMA_MEM_TO_MEM, 98 96 99 - .src_info.data_width = STEDMA40_BYTE_WIDTH, 97 + .src_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE, 100 98 .src_info.psize = STEDMA40_PSIZE_LOG_1, 101 99 .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL, 102 100 103 - .dst_info.data_width = STEDMA40_BYTE_WIDTH, 101 + .dst_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE, 104 102 .dst_info.psize = STEDMA40_PSIZE_LOG_1, 105 103 .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL, 106 104 }; ··· 524 522 * @phy_start: Physical memory start of the DMA registers. 525 523 * @phy_size: Size of the DMA register map. 526 524 * @irq: The IRQ number. 525 + * @num_memcpy_chans: The number of channels used for memcpy (mem-to-mem 526 + * transfers). 527 527 * @num_phy_chans: The number of physical channels. Read from HW. This 528 528 * is the number of available channels for this driver, not counting "Secure 529 529 * mode" allocated physical channels. ··· 569 565 phys_addr_t phy_start; 570 566 resource_size_t phy_size; 571 567 int irq; 568 + int num_memcpy_chans; 572 569 int num_phy_chans; 573 570 int num_log_chans; 574 571 struct device_dma_parameters dma_parms; ··· 848 843 * that uses linked lists. 849 844 */ 850 845 if (!(chan->phy_chan->use_soft_lli && 851 - chan->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)) 846 + chan->dma_cfg.dir == DMA_DEV_TO_MEM)) 852 847 curr_lcla = d40_lcla_alloc_one(chan, desc); 853 848 854 849 first_lcla = curr_lcla; ··· 1010 1005 1011 1006 /* 1012 1007 * The dma only supports transmitting packages up to 1013 - * STEDMA40_MAX_SEG_SIZE << data_width. Calculate the total number of 1014 - * dma elements required to send the entire sg list 1008 + * STEDMA40_MAX_SEG_SIZE * data_width, where data_width is stored in Bytes. 1009 + * 1010 + * Calculate the total number of dma elements required to send the entire sg list. 1015 1011 */ 1016 1012 static int d40_size_2_dmalen(int size, u32 data_width1, u32 data_width2) 1017 1013 { 1018 1014 int dmalen; 1019 1015 u32 max_w = max(data_width1, data_width2); 1020 1016 u32 min_w = min(data_width1, data_width2); 1021 - u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE << min_w, 1 << max_w); 1017 + u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE * min_w, max_w); 1022 1018 1023 1019 if (seg_max > STEDMA40_MAX_SEG_SIZE) 1024 - seg_max -= (1 << max_w); 1020 + seg_max -= max_w; 1025 1021 1026 - if (!IS_ALIGNED(size, 1 << max_w)) 1022 + if (!IS_ALIGNED(size, max_w)) 1027 1023 return -EINVAL; 1028 1024 1029 1025 if (size <= seg_max) ··· 1317 1311 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type); 1318 1312 1319 1313 /* Enable event line connected to device (or memcpy) */ 1320 - if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) || 1321 - (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) 1314 + if ((d40c->dma_cfg.dir == DMA_DEV_TO_MEM) || 1315 + (d40c->dma_cfg.dir == DMA_DEV_TO_DEV)) 1322 1316 __d40_config_set_event(d40c, event_type, event, 1323 1317 D40_CHAN_REG_SSLNK); 1324 1318 1325 - if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) 1319 + if (d40c->dma_cfg.dir != DMA_DEV_TO_MEM) 1326 1320 __d40_config_set_event(d40c, event_type, event, 1327 1321 D40_CHAN_REG_SDLNK); 1328 1322 } ··· 1470 1464 >> D40_SREG_ELEM_PHY_ECNT_POS; 1471 1465 } 1472 1466 1473 - return num_elt * (1 << d40c->dma_cfg.dst_info.data_width); 1467 + return num_elt * d40c->dma_cfg.dst_info.data_width; 1474 1468 } 1475 1469 1476 1470 static bool d40_tx_is_linked(struct d40_chan *d40c) ··· 1746 1740 } 1747 1741 1748 1742 /* ACK interrupt */ 1749 - writel(1 << idx, base->virtbase + il[row].clr); 1743 + writel(BIT(idx), base->virtbase + il[row].clr); 1750 1744 1751 1745 spin_lock(&d40c->lock); 1752 1746 ··· 1782 1776 res = -EINVAL; 1783 1777 } 1784 1778 1785 - if (conf->dir == STEDMA40_PERIPH_TO_PERIPH) { 1779 + if (conf->dir == DMA_DEV_TO_DEV) { 1786 1780 /* 1787 1781 * DMAC HW supports it. Will be added to this driver, 1788 1782 * in case any dma client requires it. ··· 1792 1786 } 1793 1787 1794 1788 if (d40_psize_2_burst_size(is_log, conf->src_info.psize) * 1795 - (1 << conf->src_info.data_width) != 1789 + conf->src_info.data_width != 1796 1790 d40_psize_2_burst_size(is_log, conf->dst_info.psize) * 1797 - (1 << conf->dst_info.data_width)) { 1791 + conf->dst_info.data_width) { 1798 1792 /* 1799 1793 * The DMAC hardware only supports 1800 1794 * src (burst x width) == dst (burst x width) ··· 1836 1830 if (phy->allocated_src == D40_ALLOC_FREE) 1837 1831 phy->allocated_src = D40_ALLOC_LOG_FREE; 1838 1832 1839 - if (!(phy->allocated_src & (1 << log_event_line))) { 1840 - phy->allocated_src |= 1 << log_event_line; 1833 + if (!(phy->allocated_src & BIT(log_event_line))) { 1834 + phy->allocated_src |= BIT(log_event_line); 1841 1835 goto found; 1842 1836 } else 1843 1837 goto not_found; ··· 1848 1842 if (phy->allocated_dst == D40_ALLOC_FREE) 1849 1843 phy->allocated_dst = D40_ALLOC_LOG_FREE; 1850 1844 1851 - if (!(phy->allocated_dst & (1 << log_event_line))) { 1852 - phy->allocated_dst |= 1 << log_event_line; 1845 + if (!(phy->allocated_dst & BIT(log_event_line))) { 1846 + phy->allocated_dst |= BIT(log_event_line); 1853 1847 goto found; 1854 1848 } else 1855 1849 goto not_found; ··· 1879 1873 1880 1874 /* Logical channel */ 1881 1875 if (is_src) { 1882 - phy->allocated_src &= ~(1 << log_event_line); 1876 + phy->allocated_src &= ~BIT(log_event_line); 1883 1877 if (phy->allocated_src == D40_ALLOC_LOG_FREE) 1884 1878 phy->allocated_src = D40_ALLOC_FREE; 1885 1879 } else { 1886 - phy->allocated_dst &= ~(1 << log_event_line); 1880 + phy->allocated_dst &= ~BIT(log_event_line); 1887 1881 if (phy->allocated_dst == D40_ALLOC_LOG_FREE) 1888 1882 phy->allocated_dst = D40_ALLOC_FREE; 1889 1883 } ··· 1913 1907 phys = d40c->base->phy_res; 1914 1908 num_phy_chans = d40c->base->num_phy_chans; 1915 1909 1916 - if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) { 1910 + if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) { 1917 1911 log_num = 2 * dev_type; 1918 1912 is_src = true; 1919 - } else if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || 1920 - d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { 1913 + } else if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV || 1914 + d40c->dma_cfg.dir == DMA_MEM_TO_MEM) { 1921 1915 /* dst event lines are used for logical memcpy */ 1922 1916 log_num = 2 * dev_type + 1; 1923 1917 is_src = false; ··· 1928 1922 event_line = D40_TYPE_TO_EVENT(dev_type); 1929 1923 1930 1924 if (!is_log) { 1931 - if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { 1925 + if (d40c->dma_cfg.dir == DMA_MEM_TO_MEM) { 1932 1926 /* Find physical half channel */ 1933 1927 if (d40c->dma_cfg.use_fixed_channel) { 1934 1928 i = d40c->dma_cfg.phy_channel; ··· 2076 2070 return -EINVAL; 2077 2071 } 2078 2072 2079 - if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || 2080 - d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) 2073 + if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV || 2074 + d40c->dma_cfg.dir == DMA_MEM_TO_MEM) 2081 2075 is_src = false; 2082 - else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) 2076 + else if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) 2083 2077 is_src = true; 2084 2078 else { 2085 2079 chan_err(d40c, "Unknown direction\n"); ··· 2141 2135 goto _exit; 2142 2136 } 2143 2137 2144 - if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || 2145 - d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { 2138 + if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV || 2139 + d40c->dma_cfg.dir == DMA_MEM_TO_MEM) { 2146 2140 status = readl(chanbase + D40_CHAN_REG_SDLNK); 2147 - } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) { 2141 + } else if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) { 2148 2142 status = readl(chanbase + D40_CHAN_REG_SSLNK); 2149 2143 } else { 2150 2144 chan_err(d40c, "Unknown direction\n"); ··· 2364 2358 u32 rtreg; 2365 2359 u32 event = D40_TYPE_TO_EVENT(dev_type); 2366 2360 u32 group = D40_TYPE_TO_GROUP(dev_type); 2367 - u32 bit = 1 << event; 2361 + u32 bit = BIT(event); 2368 2362 u32 prioreg; 2369 2363 struct d40_gen_dmac *dmac = &d40c->base->gen_dmac; 2370 2364 ··· 2395 2389 if (d40c->base->rev < 3) 2396 2390 return; 2397 2391 2398 - if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) || 2399 - (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) 2392 + if ((d40c->dma_cfg.dir == DMA_DEV_TO_MEM) || 2393 + (d40c->dma_cfg.dir == DMA_DEV_TO_DEV)) 2400 2394 __d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, true); 2401 2395 2402 - if ((d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH) || 2403 - (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) 2396 + if ((d40c->dma_cfg.dir == DMA_MEM_TO_DEV) || 2397 + (d40c->dma_cfg.dir == DMA_DEV_TO_DEV)) 2404 2398 __d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, false); 2405 2399 } 2406 2400 ··· 2431 2425 2432 2426 switch (D40_DT_FLAGS_DIR(flags)) { 2433 2427 case 0: 2434 - cfg.dir = STEDMA40_MEM_TO_PERIPH; 2428 + cfg.dir = DMA_MEM_TO_DEV; 2435 2429 cfg.dst_info.big_endian = D40_DT_FLAGS_BIG_ENDIAN(flags); 2436 2430 break; 2437 2431 case 1: 2438 - cfg.dir = STEDMA40_PERIPH_TO_MEM; 2432 + cfg.dir = DMA_DEV_TO_MEM; 2439 2433 cfg.src_info.big_endian = D40_DT_FLAGS_BIG_ENDIAN(flags); 2440 2434 break; 2441 2435 } ··· 2481 2475 d40_set_prio_realtime(d40c); 2482 2476 2483 2477 if (chan_is_logical(d40c)) { 2484 - if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) 2478 + if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) 2485 2479 d40c->lcpa = d40c->base->lcpa_base + 2486 2480 d40c->dma_cfg.dev_type * D40_LCPA_CHAN_SIZE; 2487 2481 else ··· 2681 2675 static int 2682 2676 dma40_config_to_halfchannel(struct d40_chan *d40c, 2683 2677 struct stedma40_half_channel_info *info, 2684 - enum dma_slave_buswidth width, 2685 2678 u32 maxburst) 2686 2679 { 2687 - enum stedma40_periph_data_width addr_width; 2688 2680 int psize; 2689 - 2690 - switch (width) { 2691 - case DMA_SLAVE_BUSWIDTH_1_BYTE: 2692 - addr_width = STEDMA40_BYTE_WIDTH; 2693 - break; 2694 - case DMA_SLAVE_BUSWIDTH_2_BYTES: 2695 - addr_width = STEDMA40_HALFWORD_WIDTH; 2696 - break; 2697 - case DMA_SLAVE_BUSWIDTH_4_BYTES: 2698 - addr_width = STEDMA40_WORD_WIDTH; 2699 - break; 2700 - case DMA_SLAVE_BUSWIDTH_8_BYTES: 2701 - addr_width = STEDMA40_DOUBLEWORD_WIDTH; 2702 - break; 2703 - default: 2704 - dev_err(d40c->base->dev, 2705 - "illegal peripheral address width " 2706 - "requested (%d)\n", 2707 - width); 2708 - return -EINVAL; 2709 - } 2710 2681 2711 2682 if (chan_is_logical(d40c)) { 2712 2683 if (maxburst >= 16) ··· 2705 2722 psize = STEDMA40_PSIZE_PHY_1; 2706 2723 } 2707 2724 2708 - info->data_width = addr_width; 2709 2725 info->psize = psize; 2710 2726 info->flow_ctrl = STEDMA40_NO_FLOW_CTRL; 2711 2727 ··· 2730 2748 if (config->direction == DMA_DEV_TO_MEM) { 2731 2749 config_addr = config->src_addr; 2732 2750 2733 - if (cfg->dir != STEDMA40_PERIPH_TO_MEM) 2751 + if (cfg->dir != DMA_DEV_TO_MEM) 2734 2752 dev_dbg(d40c->base->dev, 2735 2753 "channel was not configured for peripheral " 2736 2754 "to memory transfer (%d) overriding\n", 2737 2755 cfg->dir); 2738 - cfg->dir = STEDMA40_PERIPH_TO_MEM; 2756 + cfg->dir = DMA_DEV_TO_MEM; 2739 2757 2740 2758 /* Configure the memory side */ 2741 2759 if (dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) ··· 2746 2764 } else if (config->direction == DMA_MEM_TO_DEV) { 2747 2765 config_addr = config->dst_addr; 2748 2766 2749 - if (cfg->dir != STEDMA40_MEM_TO_PERIPH) 2767 + if (cfg->dir != DMA_MEM_TO_DEV) 2750 2768 dev_dbg(d40c->base->dev, 2751 2769 "channel was not configured for memory " 2752 2770 "to peripheral transfer (%d) overriding\n", 2753 2771 cfg->dir); 2754 - cfg->dir = STEDMA40_MEM_TO_PERIPH; 2772 + cfg->dir = DMA_MEM_TO_DEV; 2755 2773 2756 2774 /* Configure the memory side */ 2757 2775 if (src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) ··· 2788 2806 src_maxburst = dst_maxburst * dst_addr_width / src_addr_width; 2789 2807 } 2790 2808 2809 + /* Only valid widths are; 1, 2, 4 and 8. */ 2810 + if (src_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED || 2811 + src_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES || 2812 + dst_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED || 2813 + dst_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES || 2814 + ((src_addr_width > 1) && (src_addr_width & 1)) || 2815 + ((dst_addr_width > 1) && (dst_addr_width & 1))) 2816 + return -EINVAL; 2817 + 2818 + cfg->src_info.data_width = src_addr_width; 2819 + cfg->dst_info.data_width = dst_addr_width; 2820 + 2791 2821 ret = dma40_config_to_halfchannel(d40c, &cfg->src_info, 2792 - src_addr_width, 2793 2822 src_maxburst); 2794 2823 if (ret) 2795 2824 return ret; 2796 2825 2797 2826 ret = dma40_config_to_halfchannel(d40c, &cfg->dst_info, 2798 - dst_addr_width, 2799 2827 dst_maxburst); 2800 2828 if (ret) 2801 2829 return ret; ··· 2945 2953 } 2946 2954 2947 2955 d40_chan_init(base, &base->dma_memcpy, base->log_chans, 2948 - base->num_log_chans, ARRAY_SIZE(dma40_memcpy_channels)); 2956 + base->num_log_chans, base->num_memcpy_chans); 2949 2957 2950 2958 dma_cap_zero(base->dma_memcpy.cap_mask); 2951 2959 dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask); ··· 3146 3154 struct d40_base *base = NULL; 3147 3155 int num_log_chans = 0; 3148 3156 int num_phy_chans; 3157 + int num_memcpy_chans; 3149 3158 int clk_ret = -EINVAL; 3150 3159 int i; 3151 3160 u32 pid; ··· 3217 3224 else 3218 3225 num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4; 3219 3226 3227 + /* The number of channels used for memcpy */ 3228 + if (plat_data->num_of_memcpy_chans) 3229 + num_memcpy_chans = plat_data->num_of_memcpy_chans; 3230 + else 3231 + num_memcpy_chans = ARRAY_SIZE(dma40_memcpy_channels); 3232 + 3220 3233 num_log_chans = num_phy_chans * D40_MAX_LOG_CHAN_PER_PHY; 3221 3234 3222 3235 dev_info(&pdev->dev, ··· 3230 3231 rev, res->start, num_phy_chans, num_log_chans); 3231 3232 3232 3233 base = kzalloc(ALIGN(sizeof(struct d40_base), 4) + 3233 - (num_phy_chans + num_log_chans + ARRAY_SIZE(dma40_memcpy_channels)) * 3234 + (num_phy_chans + num_log_chans + num_memcpy_chans) * 3234 3235 sizeof(struct d40_chan), GFP_KERNEL); 3235 3236 3236 3237 if (base == NULL) { ··· 3240 3241 3241 3242 base->rev = rev; 3242 3243 base->clk = clk; 3244 + base->num_memcpy_chans = num_memcpy_chans; 3243 3245 base->num_phy_chans = num_phy_chans; 3244 3246 base->num_log_chans = num_log_chans; 3245 3247 base->phy_start = res->start; ··· 3484 3484 struct device_node *np) 3485 3485 { 3486 3486 struct stedma40_platform_data *pdata; 3487 - 3488 - /* 3489 - * FIXME: Fill in this routine as more support is added. 3490 - * First platform enabled (u8500) doens't need any extra 3491 - * properties to run, so this is fairly sparce currently. 3492 - */ 3487 + int num_phy = 0, num_memcpy = 0, num_disabled = 0; 3488 + const const __be32 *list; 3493 3489 3494 3490 pdata = devm_kzalloc(&pdev->dev, 3495 3491 sizeof(struct stedma40_platform_data), 3496 3492 GFP_KERNEL); 3497 3493 if (!pdata) 3498 3494 return -ENOMEM; 3495 + 3496 + /* If absent this value will be obtained from h/w. */ 3497 + of_property_read_u32(np, "dma-channels", &num_phy); 3498 + if (num_phy > 0) 3499 + pdata->num_of_phy_chans = num_phy; 3500 + 3501 + list = of_get_property(np, "memcpy-channels", &num_memcpy); 3502 + num_memcpy /= sizeof(*list); 3503 + 3504 + if (num_memcpy > D40_MEMCPY_MAX_CHANS || num_memcpy <= 0) { 3505 + d40_err(&pdev->dev, 3506 + "Invalid number of memcpy channels specified (%d)\n", 3507 + num_memcpy); 3508 + return -EINVAL; 3509 + } 3510 + pdata->num_of_memcpy_chans = num_memcpy; 3511 + 3512 + of_property_read_u32_array(np, "memcpy-channels", 3513 + dma40_memcpy_channels, 3514 + num_memcpy); 3515 + 3516 + list = of_get_property(np, "disabled-channels", &num_disabled); 3517 + num_disabled /= sizeof(*list); 3518 + 3519 + if (num_disabled > STEDMA40_MAX_PHYS || num_disabled < 0) { 3520 + d40_err(&pdev->dev, 3521 + "Invalid number of disabled channels specified (%d)\n", 3522 + num_disabled); 3523 + return -EINVAL; 3524 + } 3525 + 3526 + of_property_read_u32_array(np, "disabled-channels", 3527 + pdata->disabled_channels, 3528 + num_disabled); 3529 + pdata->disabled_channels[num_disabled] = -1; 3499 3530 3500 3531 pdev->dev.platform_data = pdata; 3501 3532
+67 -50
drivers/dma/ste_dma40_ll.c
··· 10 10 11 11 #include "ste_dma40_ll.h" 12 12 13 + u8 d40_width_to_bits(enum dma_slave_buswidth width) 14 + { 15 + if (width == DMA_SLAVE_BUSWIDTH_1_BYTE) 16 + return STEDMA40_ESIZE_8_BIT; 17 + else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES) 18 + return STEDMA40_ESIZE_16_BIT; 19 + else if (width == DMA_SLAVE_BUSWIDTH_8_BYTES) 20 + return STEDMA40_ESIZE_64_BIT; 21 + else 22 + return STEDMA40_ESIZE_32_BIT; 23 + } 24 + 13 25 /* Sets up proper LCSP1 and LCSP3 register for a logical channel */ 14 26 void d40_log_cfg(struct stedma40_chan_cfg *cfg, 15 27 u32 *lcsp1, u32 *lcsp3) ··· 30 18 u32 l1 = 0; /* src */ 31 19 32 20 /* src is mem? -> increase address pos */ 33 - if (cfg->dir == STEDMA40_MEM_TO_PERIPH || 34 - cfg->dir == STEDMA40_MEM_TO_MEM) 35 - l1 |= 1 << D40_MEM_LCSP1_SCFG_INCR_POS; 21 + if (cfg->dir == DMA_MEM_TO_DEV || 22 + cfg->dir == DMA_MEM_TO_MEM) 23 + l1 |= BIT(D40_MEM_LCSP1_SCFG_INCR_POS); 36 24 37 25 /* dst is mem? -> increase address pos */ 38 - if (cfg->dir == STEDMA40_PERIPH_TO_MEM || 39 - cfg->dir == STEDMA40_MEM_TO_MEM) 40 - l3 |= 1 << D40_MEM_LCSP3_DCFG_INCR_POS; 26 + if (cfg->dir == DMA_DEV_TO_MEM || 27 + cfg->dir == DMA_MEM_TO_MEM) 28 + l3 |= BIT(D40_MEM_LCSP3_DCFG_INCR_POS); 41 29 42 30 /* src is hw? -> master port 1 */ 43 - if (cfg->dir == STEDMA40_PERIPH_TO_MEM || 44 - cfg->dir == STEDMA40_PERIPH_TO_PERIPH) 45 - l1 |= 1 << D40_MEM_LCSP1_SCFG_MST_POS; 31 + if (cfg->dir == DMA_DEV_TO_MEM || 32 + cfg->dir == DMA_DEV_TO_DEV) 33 + l1 |= BIT(D40_MEM_LCSP1_SCFG_MST_POS); 46 34 47 35 /* dst is hw? -> master port 1 */ 48 - if (cfg->dir == STEDMA40_MEM_TO_PERIPH || 49 - cfg->dir == STEDMA40_PERIPH_TO_PERIPH) 50 - l3 |= 1 << D40_MEM_LCSP3_DCFG_MST_POS; 36 + if (cfg->dir == DMA_MEM_TO_DEV || 37 + cfg->dir == DMA_DEV_TO_DEV) 38 + l3 |= BIT(D40_MEM_LCSP3_DCFG_MST_POS); 51 39 52 - l3 |= 1 << D40_MEM_LCSP3_DCFG_EIM_POS; 40 + l3 |= BIT(D40_MEM_LCSP3_DCFG_EIM_POS); 53 41 l3 |= cfg->dst_info.psize << D40_MEM_LCSP3_DCFG_PSIZE_POS; 54 - l3 |= cfg->dst_info.data_width << D40_MEM_LCSP3_DCFG_ESIZE_POS; 42 + l3 |= d40_width_to_bits(cfg->dst_info.data_width) 43 + << D40_MEM_LCSP3_DCFG_ESIZE_POS; 55 44 56 - l1 |= 1 << D40_MEM_LCSP1_SCFG_EIM_POS; 45 + l1 |= BIT(D40_MEM_LCSP1_SCFG_EIM_POS); 57 46 l1 |= cfg->src_info.psize << D40_MEM_LCSP1_SCFG_PSIZE_POS; 58 - l1 |= cfg->src_info.data_width << D40_MEM_LCSP1_SCFG_ESIZE_POS; 47 + l1 |= d40_width_to_bits(cfg->src_info.data_width) 48 + << D40_MEM_LCSP1_SCFG_ESIZE_POS; 59 49 60 50 *lcsp1 = l1; 61 51 *lcsp3 = l3; ··· 69 55 u32 src = 0; 70 56 u32 dst = 0; 71 57 72 - if ((cfg->dir == STEDMA40_PERIPH_TO_MEM) || 73 - (cfg->dir == STEDMA40_PERIPH_TO_PERIPH)) { 58 + if ((cfg->dir == DMA_DEV_TO_MEM) || 59 + (cfg->dir == DMA_DEV_TO_DEV)) { 74 60 /* Set master port to 1 */ 75 - src |= 1 << D40_SREG_CFG_MST_POS; 61 + src |= BIT(D40_SREG_CFG_MST_POS); 76 62 src |= D40_TYPE_TO_EVENT(cfg->dev_type); 77 63 78 64 if (cfg->src_info.flow_ctrl == STEDMA40_NO_FLOW_CTRL) 79 - src |= 1 << D40_SREG_CFG_PHY_TM_POS; 65 + src |= BIT(D40_SREG_CFG_PHY_TM_POS); 80 66 else 81 67 src |= 3 << D40_SREG_CFG_PHY_TM_POS; 82 68 } 83 - if ((cfg->dir == STEDMA40_MEM_TO_PERIPH) || 84 - (cfg->dir == STEDMA40_PERIPH_TO_PERIPH)) { 69 + if ((cfg->dir == DMA_MEM_TO_DEV) || 70 + (cfg->dir == DMA_DEV_TO_DEV)) { 85 71 /* Set master port to 1 */ 86 - dst |= 1 << D40_SREG_CFG_MST_POS; 72 + dst |= BIT(D40_SREG_CFG_MST_POS); 87 73 dst |= D40_TYPE_TO_EVENT(cfg->dev_type); 88 74 89 75 if (cfg->dst_info.flow_ctrl == STEDMA40_NO_FLOW_CTRL) 90 - dst |= 1 << D40_SREG_CFG_PHY_TM_POS; 76 + dst |= BIT(D40_SREG_CFG_PHY_TM_POS); 91 77 else 92 78 dst |= 3 << D40_SREG_CFG_PHY_TM_POS; 93 79 } 94 80 /* Interrupt on end of transfer for destination */ 95 - dst |= 1 << D40_SREG_CFG_TIM_POS; 81 + dst |= BIT(D40_SREG_CFG_TIM_POS); 96 82 97 83 /* Generate interrupt on error */ 98 - src |= 1 << D40_SREG_CFG_EIM_POS; 99 - dst |= 1 << D40_SREG_CFG_EIM_POS; 84 + src |= BIT(D40_SREG_CFG_EIM_POS); 85 + dst |= BIT(D40_SREG_CFG_EIM_POS); 100 86 101 87 /* PSIZE */ 102 88 if (cfg->src_info.psize != STEDMA40_PSIZE_PHY_1) { 103 - src |= 1 << D40_SREG_CFG_PHY_PEN_POS; 89 + src |= BIT(D40_SREG_CFG_PHY_PEN_POS); 104 90 src |= cfg->src_info.psize << D40_SREG_CFG_PSIZE_POS; 105 91 } 106 92 if (cfg->dst_info.psize != STEDMA40_PSIZE_PHY_1) { 107 - dst |= 1 << D40_SREG_CFG_PHY_PEN_POS; 93 + dst |= BIT(D40_SREG_CFG_PHY_PEN_POS); 108 94 dst |= cfg->dst_info.psize << D40_SREG_CFG_PSIZE_POS; 109 95 } 110 96 111 97 /* Element size */ 112 - src |= cfg->src_info.data_width << D40_SREG_CFG_ESIZE_POS; 113 - dst |= cfg->dst_info.data_width << D40_SREG_CFG_ESIZE_POS; 98 + src |= d40_width_to_bits(cfg->src_info.data_width) 99 + << D40_SREG_CFG_ESIZE_POS; 100 + dst |= d40_width_to_bits(cfg->dst_info.data_width) 101 + << D40_SREG_CFG_ESIZE_POS; 114 102 115 103 /* Set the priority bit to high for the physical channel */ 116 104 if (cfg->high_priority) { 117 - src |= 1 << D40_SREG_CFG_PRI_POS; 118 - dst |= 1 << D40_SREG_CFG_PRI_POS; 105 + src |= BIT(D40_SREG_CFG_PRI_POS); 106 + dst |= BIT(D40_SREG_CFG_PRI_POS); 119 107 } 120 108 121 109 if (cfg->src_info.big_endian) 122 - src |= 1 << D40_SREG_CFG_LBE_POS; 110 + src |= BIT(D40_SREG_CFG_LBE_POS); 123 111 if (cfg->dst_info.big_endian) 124 - dst |= 1 << D40_SREG_CFG_LBE_POS; 112 + dst |= BIT(D40_SREG_CFG_LBE_POS); 125 113 126 114 *src_cfg = src; 127 115 *dst_cfg = dst; ··· 149 133 num_elems = 2 << psize; 150 134 151 135 /* Must be aligned */ 152 - if (!IS_ALIGNED(data, 0x1 << data_width)) 136 + if (!IS_ALIGNED(data, data_width)) 153 137 return -EINVAL; 154 138 155 139 /* Transfer size can't be smaller than (num_elms * elem_size) */ 156 - if (data_size < num_elems * (0x1 << data_width)) 140 + if (data_size < num_elems * data_width) 157 141 return -EINVAL; 158 142 159 143 /* The number of elements. IE now many chunks */ 160 - lli->reg_elt = (data_size >> data_width) << D40_SREG_ELEM_PHY_ECNT_POS; 144 + lli->reg_elt = (data_size / data_width) << D40_SREG_ELEM_PHY_ECNT_POS; 161 145 162 146 /* 163 147 * Distance to next element sized entry. 164 148 * Usually the size of the element unless you want gaps. 165 149 */ 166 150 if (addr_inc) 167 - lli->reg_elt |= (0x1 << data_width) << 168 - D40_SREG_ELEM_PHY_EIDX_POS; 151 + lli->reg_elt |= data_width << D40_SREG_ELEM_PHY_EIDX_POS; 169 152 170 153 /* Where the data is */ 171 154 lli->reg_ptr = data; ··· 172 157 173 158 /* If this scatter list entry is the last one, no next link */ 174 159 if (next_lli == 0) 175 - lli->reg_lnk = 0x1 << D40_SREG_LNK_PHY_TCP_POS; 160 + lli->reg_lnk = BIT(D40_SREG_LNK_PHY_TCP_POS); 176 161 else 177 162 lli->reg_lnk = next_lli; 178 163 179 164 /* Set/clear interrupt generation on this link item.*/ 180 165 if (term_int) 181 - lli->reg_cfg |= 0x1 << D40_SREG_CFG_TIM_POS; 166 + lli->reg_cfg |= BIT(D40_SREG_CFG_TIM_POS); 182 167 else 183 - lli->reg_cfg &= ~(0x1 << D40_SREG_CFG_TIM_POS); 168 + lli->reg_cfg &= ~BIT(D40_SREG_CFG_TIM_POS); 184 169 185 - /* Post link */ 186 - lli->reg_lnk |= 0 << D40_SREG_LNK_PHY_PRE_POS; 170 + /* 171 + * Post link - D40_SREG_LNK_PHY_PRE_POS = 0 172 + * Relink happens after transfer completion. 173 + */ 187 174 188 175 return 0; 189 176 } ··· 194 177 { 195 178 u32 max_w = max(data_width1, data_width2); 196 179 u32 min_w = min(data_width1, data_width2); 197 - u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE << min_w, 1 << max_w); 180 + u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE * min_w, max_w); 198 181 199 182 if (seg_max > STEDMA40_MAX_SEG_SIZE) 200 - seg_max -= (1 << max_w); 183 + seg_max -= max_w; 201 184 202 185 if (size <= seg_max) 203 186 return size; 204 187 205 188 if (size <= 2 * seg_max) 206 - return ALIGN(size / 2, 1 << max_w); 189 + return ALIGN(size / 2, max_w); 207 190 208 191 return seg_max; 209 192 } ··· 369 352 lli->lcsp13 = reg_cfg; 370 353 371 354 /* The number of elements to transfer */ 372 - lli->lcsp02 = ((data_size >> data_width) << 355 + lli->lcsp02 = ((data_size / data_width) << 373 356 D40_MEM_LCSP0_ECNT_POS) & D40_MEM_LCSP0_ECNT_MASK; 374 357 375 - BUG_ON((data_size >> data_width) > STEDMA40_MAX_SEG_SIZE); 358 + BUG_ON((data_size / data_width) > STEDMA40_MAX_SEG_SIZE); 376 359 377 360 /* 16 LSBs address of the current element */ 378 361 lli->lcsp02 |= data & D40_MEM_LCSP0_SPTR_MASK;
+60 -1
drivers/usb/musb/ux500.c
··· 25 25 #include <linux/clk.h> 26 26 #include <linux/err.h> 27 27 #include <linux/io.h> 28 + #include <linux/of.h> 28 29 #include <linux/platform_device.h> 29 30 #include <linux/usb/musb-ux500.h> 30 31 31 32 #include "musb_core.h" 33 + 34 + static struct musb_hdrc_config ux500_musb_hdrc_config = { 35 + .multipoint = true, 36 + .dyn_fifo = true, 37 + .num_eps = 16, 38 + .ram_bits = 16, 39 + }; 32 40 33 41 struct ux500_glue { 34 42 struct device *dev; ··· 195 187 .set_vbus = ux500_musb_set_vbus, 196 188 }; 197 189 190 + static struct musb_hdrc_platform_data * 191 + ux500_of_probe(struct platform_device *pdev, struct device_node *np) 192 + { 193 + struct musb_hdrc_platform_data *pdata; 194 + const char *mode; 195 + int strlen; 196 + 197 + pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); 198 + if (!pdata) 199 + return NULL; 200 + 201 + mode = of_get_property(np, "dr_mode", &strlen); 202 + if (!mode) { 203 + dev_err(&pdev->dev, "No 'dr_mode' property found\n"); 204 + return NULL; 205 + } 206 + 207 + if (strlen > 0) { 208 + if (!strcmp(mode, "host")) 209 + pdata->mode = MUSB_HOST; 210 + if (!strcmp(mode, "otg")) 211 + pdata->mode = MUSB_OTG; 212 + if (!strcmp(mode, "peripheral")) 213 + pdata->mode = MUSB_PERIPHERAL; 214 + } 215 + 216 + return pdata; 217 + } 218 + 198 219 static int ux500_probe(struct platform_device *pdev) 199 220 { 200 221 struct musb_hdrc_platform_data *pdata = pdev->dev.platform_data; 222 + struct device_node *np = pdev->dev.of_node; 201 223 struct platform_device *musb; 202 224 struct ux500_glue *glue; 203 225 struct clk *clk; 204 226 int ret = -ENOMEM; 227 + 228 + if (!pdata) { 229 + if (np) { 230 + pdata = ux500_of_probe(pdev, np); 231 + if (!pdata) 232 + goto err0; 233 + 234 + pdev->dev.platform_data = pdata; 235 + } else { 236 + dev_err(&pdev->dev, "no pdata or device tree found\n"); 237 + goto err0; 238 + } 239 + } 205 240 206 241 glue = kzalloc(sizeof(*glue), GFP_KERNEL); 207 242 if (!glue) { ··· 272 221 } 273 222 274 223 musb->dev.parent = &pdev->dev; 275 - musb->dev.dma_mask = pdev->dev.dma_mask; 224 + musb->dev.dma_mask = &pdev->dev.coherent_dma_mask; 276 225 musb->dev.coherent_dma_mask = pdev->dev.coherent_dma_mask; 226 + musb->dev.of_node = pdev->dev.of_node; 277 227 278 228 glue->dev = &pdev->dev; 279 229 glue->musb = musb; 280 230 glue->clk = clk; 281 231 282 232 pdata->platform_ops = &ux500_ops; 233 + pdata->config = &ux500_musb_hdrc_config; 283 234 284 235 platform_set_drvdata(pdev, glue); 285 236 ··· 373 320 #define DEV_PM_OPS NULL 374 321 #endif 375 322 323 + static const struct of_device_id ux500_match[] = { 324 + { .compatible = "stericsson,db8500-musb", }, 325 + {} 326 + }; 327 + 376 328 static struct platform_driver ux500_driver = { 377 329 .probe = ux500_probe, 378 330 .remove = ux500_remove, 379 331 .driver = { 380 332 .name = "musb-ux500", 381 333 .pm = DEV_PM_OPS, 334 + .of_match_table = ux500_match, 382 335 }, 383 336 }; 384 337
+33 -26
drivers/usb/musb/ux500_dma.c
··· 34 34 #include <linux/platform_data/usb-musb-ux500.h> 35 35 #include "musb_core.h" 36 36 37 + static const char *iep_chan_names[] = { "iep_1_9", "iep_2_10", "iep_3_11", "iep_4_12", 38 + "iep_5_13", "iep_6_14", "iep_7_15", "iep_8" }; 39 + static const char *oep_chan_names[] = { "oep_1_9", "oep_2_10", "oep_3_11", "oep_4_12", 40 + "oep_5_13", "oep_6_14", "oep_7_15", "oep_8" }; 41 + 37 42 struct ux500_dma_channel { 38 43 struct dma_channel channel; 39 44 struct ux500_dma_controller *controller; ··· 53 48 54 49 struct ux500_dma_controller { 55 50 struct dma_controller controller; 56 - struct ux500_dma_channel rx_channel[UX500_MUSB_DMA_NUM_RX_CHANNELS]; 57 - struct ux500_dma_channel tx_channel[UX500_MUSB_DMA_NUM_TX_CHANNELS]; 58 - u32 num_rx_channels; 59 - u32 num_tx_channels; 51 + struct ux500_dma_channel rx_channel[UX500_MUSB_DMA_NUM_RX_TX_CHANNELS]; 52 + struct ux500_dma_channel tx_channel[UX500_MUSB_DMA_NUM_RX_TX_CHANNELS]; 60 53 void *private_data; 61 54 dma_addr_t phy_base; 62 55 }; ··· 147 144 struct ux500_dma_channel *ux500_channel = NULL; 148 145 struct musb *musb = controller->private_data; 149 146 u8 ch_num = hw_ep->epnum - 1; 150 - u32 max_ch; 151 147 152 - /* Max 8 DMA channels (0 - 7). Each DMA channel can only be allocated 148 + /* 8 DMA channels (0 - 7). Each DMA channel can only be allocated 153 149 * to specified hw_ep. For example DMA channel 0 can only be allocated 154 150 * to hw_ep 1 and 9. 155 151 */ 156 152 if (ch_num > 7) 157 153 ch_num -= 8; 158 154 159 - max_ch = is_tx ? controller->num_tx_channels : 160 - controller->num_rx_channels; 161 - 162 - if (ch_num >= max_ch) 155 + if (ch_num >= UX500_MUSB_DMA_NUM_RX_TX_CHANNELS) 163 156 return NULL; 164 157 165 158 ux500_channel = is_tx ? &(controller->tx_channel[ch_num]) : ··· 263 264 struct dma_channel *channel; 264 265 u8 ch_num; 265 266 266 - for (ch_num = 0; ch_num < controller->num_rx_channels; ch_num++) { 267 + for (ch_num = 0; ch_num < UX500_MUSB_DMA_NUM_RX_TX_CHANNELS; ch_num++) { 267 268 channel = &controller->rx_channel[ch_num].channel; 268 269 ux500_channel = channel->private_data; 269 270 ··· 273 274 dma_release_channel(ux500_channel->dma_chan); 274 275 } 275 276 276 - for (ch_num = 0; ch_num < controller->num_tx_channels; ch_num++) { 277 + for (ch_num = 0; ch_num < UX500_MUSB_DMA_NUM_RX_TX_CHANNELS; ch_num++) { 277 278 channel = &controller->tx_channel[ch_num].channel; 278 279 ux500_channel = channel->private_data; 279 280 ··· 294 295 struct musb *musb = controller->private_data; 295 296 struct device *dev = musb->controller; 296 297 struct musb_hdrc_platform_data *plat = dev->platform_data; 297 - struct ux500_musb_board_data *data = plat->board_data; 298 + struct ux500_musb_board_data *data; 298 299 struct dma_channel *dma_channel = NULL; 300 + char **chan_names; 299 301 u32 ch_num; 300 302 u8 dir; 301 303 u8 is_tx = 0; 302 304 303 305 void **param_array; 304 306 struct ux500_dma_channel *channel_array; 305 - u32 ch_count; 306 307 dma_cap_mask_t mask; 307 308 308 - if ((data->num_rx_channels > UX500_MUSB_DMA_NUM_RX_CHANNELS) || 309 - (data->num_tx_channels > UX500_MUSB_DMA_NUM_TX_CHANNELS)) 309 + if (!plat) { 310 + dev_err(musb->controller, "No platform data\n"); 310 311 return -EINVAL; 312 + } 311 313 312 - controller->num_rx_channels = data->num_rx_channels; 313 - controller->num_tx_channels = data->num_tx_channels; 314 + data = plat->board_data; 314 315 315 316 dma_cap_zero(mask); 316 317 dma_cap_set(DMA_SLAVE, mask); 317 318 318 319 /* Prepare the loop for RX channels */ 319 320 channel_array = controller->rx_channel; 320 - ch_count = data->num_rx_channels; 321 - param_array = data->dma_rx_param_array; 321 + param_array = data ? data->dma_rx_param_array : NULL; 322 + chan_names = (char **)iep_chan_names; 322 323 323 324 for (dir = 0; dir < 2; dir++) { 324 - for (ch_num = 0; ch_num < ch_count; ch_num++) { 325 + for (ch_num = 0; 326 + ch_num < UX500_MUSB_DMA_NUM_RX_TX_CHANNELS; 327 + ch_num++) { 325 328 ux500_channel = &channel_array[ch_num]; 326 329 ux500_channel->controller = controller; 327 330 ux500_channel->ch_num = ch_num; ··· 334 333 dma_channel->status = MUSB_DMA_STATUS_FREE; 335 334 dma_channel->max_len = SZ_16M; 336 335 337 - ux500_channel->dma_chan = dma_request_channel(mask, 338 - data->dma_filter, 339 - param_array[ch_num]); 336 + ux500_channel->dma_chan = 337 + dma_request_slave_channel(dev, chan_names[ch_num]); 338 + 339 + if (!ux500_channel->dma_chan) 340 + ux500_channel->dma_chan = 341 + dma_request_channel(mask, 342 + data->dma_filter, 343 + param_array[ch_num]); 344 + 340 345 if (!ux500_channel->dma_chan) { 341 346 ERR("Dma pipe allocation error dir=%d ch=%d\n", 342 347 dir, ch_num); ··· 357 350 358 351 /* Prepare the loop for TX channels */ 359 352 channel_array = controller->tx_channel; 360 - ch_count = data->num_tx_channels; 361 - param_array = data->dma_tx_param_array; 353 + param_array = data ? data->dma_tx_param_array : NULL; 354 + chan_names = (char **)oep_chan_names; 362 355 is_tx = 1; 363 356 } 364 357
+4 -17
include/linux/platform_data/dma-ste-dma40.h
··· 70 70 STEDMA40_FLOW_CTRL, 71 71 }; 72 72 73 - enum stedma40_periph_data_width { 74 - STEDMA40_BYTE_WIDTH = STEDMA40_ESIZE_8_BIT, 75 - STEDMA40_HALFWORD_WIDTH = STEDMA40_ESIZE_16_BIT, 76 - STEDMA40_WORD_WIDTH = STEDMA40_ESIZE_32_BIT, 77 - STEDMA40_DOUBLEWORD_WIDTH = STEDMA40_ESIZE_64_BIT 78 - }; 79 - 80 - enum stedma40_xfer_dir { 81 - STEDMA40_MEM_TO_MEM = 1, 82 - STEDMA40_MEM_TO_PERIPH, 83 - STEDMA40_PERIPH_TO_MEM, 84 - STEDMA40_PERIPH_TO_PERIPH 85 - }; 86 - 87 - 88 73 /** 89 74 * struct stedma40_half_channel_info - dst/src channel configuration 90 75 * ··· 80 95 */ 81 96 struct stedma40_half_channel_info { 82 97 bool big_endian; 83 - enum stedma40_periph_data_width data_width; 98 + enum dma_slave_buswidth data_width; 84 99 int psize; 85 100 enum stedma40_flow_ctrl flow_ctrl; 86 101 }; ··· 105 120 * 106 121 */ 107 122 struct stedma40_chan_cfg { 108 - enum stedma40_xfer_dir dir; 123 + enum dma_transfer_direction dir; 109 124 bool high_priority; 110 125 bool realtime; 111 126 enum stedma40_mode mode; ··· 132 147 * @num_of_soft_lli_chans: The number of channels that needs to be configured 133 148 * to use SoftLLI. 134 149 * @use_esram_lcla: flag for mapping the lcla into esram region 150 + * @num_of_memcpy_chans: The number of channels reserved for memcpy. 135 151 * @num_of_phy_chans: The number of physical channels implemented in HW. 136 152 * 0 means reading the number of channels from DMA HW but this is only valid 137 153 * for 'multiple of 4' channels, like 8. ··· 142 156 int *soft_lli_chans; 143 157 int num_of_soft_lli_chans; 144 158 bool use_esram_lcla; 159 + int num_of_memcpy_chans; 145 160 int num_of_phy_chans; 146 161 }; 147 162
+1 -4
include/linux/platform_data/usb-musb-ux500.h
··· 9 9 10 10 #include <linux/dmaengine.h> 11 11 12 - #define UX500_MUSB_DMA_NUM_RX_CHANNELS 8 13 - #define UX500_MUSB_DMA_NUM_TX_CHANNELS 8 12 + #define UX500_MUSB_DMA_NUM_RX_TX_CHANNELS 8 14 13 15 14 struct ux500_musb_board_data { 16 15 void **dma_rx_param_array; 17 16 void **dma_tx_param_array; 18 - u32 num_rx_channels; 19 - u32 num_tx_channels; 20 17 bool (*dma_filter)(struct dma_chan *chan, void *filter_param); 21 18 }; 22 19
+5 -5
sound/soc/ux500/ux500_pcm.c
··· 76 76 dma_params = snd_soc_dai_get_dma_data(dai, substream); 77 77 dma_cfg = dma_params->dma_cfg; 78 78 79 - mem_data_width = STEDMA40_HALFWORD_WIDTH; 79 + mem_data_width = DMA_SLAVE_BUSWIDTH_2_BYTES; 80 80 81 81 switch (dma_params->data_size) { 82 82 case 32: 83 - per_data_width = STEDMA40_WORD_WIDTH; 83 + per_data_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 84 84 break; 85 85 case 16: 86 - per_data_width = STEDMA40_HALFWORD_WIDTH; 86 + per_data_width = DMA_SLAVE_BUSWIDTH_2_BYTES; 87 87 break; 88 88 case 8: 89 - per_data_width = STEDMA40_BYTE_WIDTH; 89 + per_data_width = DMA_SLAVE_BUSWIDTH_1_BYTE; 90 90 break; 91 91 default: 92 - per_data_width = STEDMA40_WORD_WIDTH; 92 + per_data_width = DMA_SLAVE_BUSWIDTH_4_BYTES; 93 93 } 94 94 95 95 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {