Merge git://git.kernel.org/pub/scm/linux/kernel/git/lethal/sh-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/lethal/sh-2.6: (26 commits)
sh: Convert sh to use read/update_persistent_clock
sh: Move PMB debugfs entry initialization to later stage
sh: Fix up flush_cache_vmap() on SMP.
sh: fix up MMU reset with variable PMB mapping sizes.
sh: establish PMB mappings for NUMA nodes.
sh: check for existing mappings for bolted PMB entries.
sh: fixed virt/phys mapping helpers for PMB.
sh: make pmb iomapping configurable.
sh: reworked dynamic PMB mapping.
sh: Fix up cpumask_of_pcibus() for the NUMA build.
serial: sh-sci: Tidy up build warnings.
sh: Fix up ctrl_read/write stragglers in migor setup.
serial: sh-sci: Add DMA support.
dmaengine: shdma: extend .device_terminate_all() to record partial transfer
sh: merge sh7722 and sh7724 DMA register definitions
sh: activate runtime PM for dmaengine on sh7722 and sh7724
dmaengine: shdma: add runtime PM support.
dmaengine: shdma: separate DMA headers.
dmaengine: shdma: convert to platform device resources
dmaengine: shdma: fix DMA error handling.
...

+2213 -740
+16
arch/sh/boards/mach-migor/setup.c
··· 419 419 I2C_BOARD_INFO("migor_ts", 0x51), 420 420 .irq = 38, /* IRQ6 */ 421 421 }, 422 + { 423 + I2C_BOARD_INFO("wm8978", 0x1a), 424 + }, 422 425 }; 423 426 424 427 static struct i2c_board_info migor_i2c_camera[] = { ··· 621 618 __raw_writew(__raw_readw(PORT_MSELCRB) | 0x2000, PORT_MSELCRB); /* D15->D8 */ 622 619 623 620 platform_resource_setup_memory(&migor_ceu_device, "ceu", 4 << 20); 621 + 622 + /* SIU: Port B */ 623 + gpio_request(GPIO_FN_SIUBOLR, NULL); 624 + gpio_request(GPIO_FN_SIUBOBT, NULL); 625 + gpio_request(GPIO_FN_SIUBISLD, NULL); 626 + gpio_request(GPIO_FN_SIUBOSLD, NULL); 627 + gpio_request(GPIO_FN_SIUMCKB, NULL); 628 + 629 + /* 630 + * The original driver sets SIUB OLR/OBT, ILR/IBT, and SIUA OLR/OBT to 631 + * output. Need only SIUB, set to output for master mode (table 34.2) 632 + */ 633 + __raw_writew(__raw_readw(PORT_MSELCRA) | 1, PORT_MSELCRA); 624 634 625 635 i2c_register_board_info(0, migor_i2c_devices, 626 636 ARRAY_SIZE(migor_i2c_devices));
+1 -1
arch/sh/boot/compressed/cache.c
··· 5 5 6 6 for (i = 0; i < (32 * 1024); i += 32) { 7 7 (void)*p; 8 - p += (32 / sizeof (int)); 8 + p += (32 / sizeof(int)); 9 9 } 10 10 11 11 return 0;
+2 -2
arch/sh/include/asm/cacheflush.h
··· 86 86 struct page *page, unsigned long vaddr, void *dst, const void *src, 87 87 unsigned long len); 88 88 89 - #define flush_cache_vmap(start, end) flush_cache_all() 90 - #define flush_cache_vunmap(start, end) flush_cache_all() 89 + #define flush_cache_vmap(start, end) local_flush_cache_all(NULL) 90 + #define flush_cache_vunmap(start, end) local_flush_cache_all(NULL) 91 91 92 92 #define flush_dcache_mmap_lock(mapping) do { } while (0) 93 93 #define flush_dcache_mmap_unlock(mapping) do { } while (0)
+51
arch/sh/include/asm/dma-register.h
··· 1 + /* 2 + * Common header for the legacy SH DMA driver and the new dmaengine driver 3 + * 4 + * extracted from arch/sh/include/asm/dma-sh.h: 5 + * 6 + * Copyright (C) 2000 Takashi YOSHII 7 + * Copyright (C) 2003 Paul Mundt 8 + * 9 + * This file is subject to the terms and conditions of the GNU General Public 10 + * License. See the file "COPYING" in the main directory of this archive 11 + * for more details. 12 + */ 13 + #ifndef DMA_REGISTER_H 14 + #define DMA_REGISTER_H 15 + 16 + /* DMA register */ 17 + #define SAR 0x00 18 + #define DAR 0x04 19 + #define TCR 0x08 20 + #define CHCR 0x0C 21 + #define DMAOR 0x40 22 + 23 + /* DMAOR definitions */ 24 + #define DMAOR_AE 0x00000004 25 + #define DMAOR_NMIF 0x00000002 26 + #define DMAOR_DME 0x00000001 27 + 28 + /* Definitions for the SuperH DMAC */ 29 + #define REQ_L 0x00000000 30 + #define REQ_E 0x00080000 31 + #define RACK_H 0x00000000 32 + #define RACK_L 0x00040000 33 + #define ACK_R 0x00000000 34 + #define ACK_W 0x00020000 35 + #define ACK_H 0x00000000 36 + #define ACK_L 0x00010000 37 + #define DM_INC 0x00004000 38 + #define DM_DEC 0x00008000 39 + #define DM_FIX 0x0000c000 40 + #define SM_INC 0x00001000 41 + #define SM_DEC 0x00002000 42 + #define SM_FIX 0x00003000 43 + #define RS_IN 0x00000200 44 + #define RS_OUT 0x00000300 45 + #define TS_BLK 0x00000040 46 + #define TM_BUR 0x00000020 47 + #define CHCR_DE 0x00000001 48 + #define CHCR_TE 0x00000002 49 + #define CHCR_IE 0x00000004 50 + 51 + #endif
+2 -86
arch/sh/include/asm/dma-sh.h
··· 11 11 #ifndef __DMA_SH_H 12 12 #define __DMA_SH_H 13 13 14 - #include <asm/dma.h> 14 + #include <asm/dma-register.h> 15 + #include <cpu/dma-register.h> 15 16 #include <cpu/dma.h> 16 17 17 18 /* DMAOR contorl: The DMAOR access size is different by CPU.*/ ··· 54 53 #endif 55 54 }; 56 55 57 - /* Definitions for the SuperH DMAC */ 58 - #define REQ_L 0x00000000 59 - #define REQ_E 0x00080000 60 - #define RACK_H 0x00000000 61 - #define RACK_L 0x00040000 62 - #define ACK_R 0x00000000 63 - #define ACK_W 0x00020000 64 - #define ACK_H 0x00000000 65 - #define ACK_L 0x00010000 66 - #define DM_INC 0x00004000 67 - #define DM_DEC 0x00008000 68 - #define DM_FIX 0x0000c000 69 - #define SM_INC 0x00001000 70 - #define SM_DEC 0x00002000 71 - #define SM_FIX 0x00003000 72 - #define RS_IN 0x00000200 73 - #define RS_OUT 0x00000300 74 - #define TS_BLK 0x00000040 75 - #define TM_BUR 0x00000020 76 - #define CHCR_DE 0x00000001 77 - #define CHCR_TE 0x00000002 78 - #define CHCR_IE 0x00000004 79 - 80 - /* DMAOR definitions */ 81 - #define DMAOR_AE 0x00000004 82 - #define DMAOR_NMIF 0x00000002 83 - #define DMAOR_DME 0x00000001 84 - 85 56 /* 86 57 * Define the default configuration for dual address memory-memory transfer. 87 58 * The 0x400 value represents auto-request, external->external. ··· 82 109 SH_DMAC_BASE1 + 0x50, 83 110 SH_DMAC_BASE1 + 0x60, /* channel 11 */ 84 111 #endif 85 - }; 86 - 87 - /* DMA register */ 88 - #define SAR 0x00 89 - #define DAR 0x04 90 - #define TCR 0x08 91 - #define CHCR 0x0C 92 - #define DMAOR 0x40 93 - 94 - /* 95 - * for dma engine 96 - * 97 - * SuperH DMA mode 98 - */ 99 - #define SHDMA_MIX_IRQ (1 << 1) 100 - #define SHDMA_DMAOR1 (1 << 2) 101 - #define SHDMA_DMAE1 (1 << 3) 102 - 103 - enum sh_dmae_slave_chan_id { 104 - SHDMA_SLAVE_SCIF0_TX, 105 - SHDMA_SLAVE_SCIF0_RX, 106 - SHDMA_SLAVE_SCIF1_TX, 107 - SHDMA_SLAVE_SCIF1_RX, 108 - SHDMA_SLAVE_SCIF2_TX, 109 - SHDMA_SLAVE_SCIF2_RX, 110 - SHDMA_SLAVE_SCIF3_TX, 111 - SHDMA_SLAVE_SCIF3_RX, 112 - SHDMA_SLAVE_SCIF4_TX, 113 - SHDMA_SLAVE_SCIF4_RX, 114 - SHDMA_SLAVE_SCIF5_TX, 115 - SHDMA_SLAVE_SCIF5_RX, 116 - SHDMA_SLAVE_SIUA_TX, 117 - SHDMA_SLAVE_SIUA_RX, 118 - SHDMA_SLAVE_SIUB_TX, 119 - SHDMA_SLAVE_SIUB_RX, 120 - SHDMA_SLAVE_NUMBER, /* Must stay last */ 121 - }; 122 - 123 - struct sh_dmae_slave_config { 124 - enum sh_dmae_slave_chan_id slave_id; 125 - dma_addr_t addr; 126 - u32 chcr; 127 - char mid_rid; 128 - }; 129 - 130 - struct sh_dmae_pdata { 131 - unsigned int mode; 132 - struct sh_dmae_slave_config *config; 133 - int config_num; 134 - }; 135 - 136 - struct device; 137 - 138 - struct sh_dmae_slave { 139 - enum sh_dmae_slave_chan_id slave_id; /* Set by the platform */ 140 - struct device *dma_dev; /* Set by the platform */ 141 - struct sh_dmae_slave_config *config; /* Set by the driver */ 142 112 }; 143 113 144 114 #endif /* __DMA_SH_H */
+93
arch/sh/include/asm/dmaengine.h
··· 1 + /* 2 + * Header for the new SH dmaengine driver 3 + * 4 + * Copyright (C) 2010 Guennadi Liakhovetski <g.liakhovetski@gmx.de> 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License version 2 as 8 + * published by the Free Software Foundation. 9 + */ 10 + #ifndef ASM_DMAENGINE_H 11 + #define ASM_DMAENGINE_H 12 + 13 + #include <linux/dmaengine.h> 14 + #include <linux/list.h> 15 + 16 + #include <asm/dma-register.h> 17 + 18 + #define SH_DMAC_MAX_CHANNELS 6 19 + 20 + enum sh_dmae_slave_chan_id { 21 + SHDMA_SLAVE_SCIF0_TX, 22 + SHDMA_SLAVE_SCIF0_RX, 23 + SHDMA_SLAVE_SCIF1_TX, 24 + SHDMA_SLAVE_SCIF1_RX, 25 + SHDMA_SLAVE_SCIF2_TX, 26 + SHDMA_SLAVE_SCIF2_RX, 27 + SHDMA_SLAVE_SCIF3_TX, 28 + SHDMA_SLAVE_SCIF3_RX, 29 + SHDMA_SLAVE_SCIF4_TX, 30 + SHDMA_SLAVE_SCIF4_RX, 31 + SHDMA_SLAVE_SCIF5_TX, 32 + SHDMA_SLAVE_SCIF5_RX, 33 + SHDMA_SLAVE_SIUA_TX, 34 + SHDMA_SLAVE_SIUA_RX, 35 + SHDMA_SLAVE_SIUB_TX, 36 + SHDMA_SLAVE_SIUB_RX, 37 + SHDMA_SLAVE_NUMBER, /* Must stay last */ 38 + }; 39 + 40 + struct sh_dmae_slave_config { 41 + enum sh_dmae_slave_chan_id slave_id; 42 + dma_addr_t addr; 43 + u32 chcr; 44 + char mid_rid; 45 + }; 46 + 47 + struct sh_dmae_channel { 48 + unsigned int offset; 49 + unsigned int dmars; 50 + unsigned int dmars_bit; 51 + }; 52 + 53 + struct sh_dmae_pdata { 54 + struct sh_dmae_slave_config *slave; 55 + int slave_num; 56 + struct sh_dmae_channel *channel; 57 + int channel_num; 58 + unsigned int ts_low_shift; 59 + unsigned int ts_low_mask; 60 + unsigned int ts_high_shift; 61 + unsigned int ts_high_mask; 62 + unsigned int *ts_shift; 63 + int ts_shift_num; 64 + u16 dmaor_init; 65 + }; 66 + 67 + struct device; 68 + 69 + /* Used by slave DMA clients to request DMA to/from a specific peripheral */ 70 + struct sh_dmae_slave { 71 + enum sh_dmae_slave_chan_id slave_id; /* Set by the platform */ 72 + struct device *dma_dev; /* Set by the platform */ 73 + struct sh_dmae_slave_config *config; /* Set by the driver */ 74 + }; 75 + 76 + struct sh_dmae_regs { 77 + u32 sar; /* SAR / source address */ 78 + u32 dar; /* DAR / destination address */ 79 + u32 tcr; /* TCR / transfer count */ 80 + }; 81 + 82 + struct sh_desc { 83 + struct sh_dmae_regs hw; 84 + struct list_head node; 85 + struct dma_async_tx_descriptor async_tx; 86 + enum dma_data_direction direction; 87 + dma_cookie_t cookie; 88 + size_t partial; 89 + int chunks; 90 + int mark; 91 + }; 92 + 93 + #endif
+10 -13
arch/sh/include/asm/io.h
··· 291 291 * doesn't exist, so everything must go through page tables. 292 292 */ 293 293 #ifdef CONFIG_MMU 294 - void __iomem *__ioremap_caller(unsigned long offset, unsigned long size, 294 + void __iomem *__ioremap_caller(phys_addr_t offset, unsigned long size, 295 295 pgprot_t prot, void *caller); 296 296 void __iounmap(void __iomem *addr); 297 297 298 298 static inline void __iomem * 299 - __ioremap(unsigned long offset, unsigned long size, pgprot_t prot) 299 + __ioremap(phys_addr_t offset, unsigned long size, pgprot_t prot) 300 300 { 301 301 return __ioremap_caller(offset, size, prot, __builtin_return_address(0)); 302 302 } 303 303 304 304 static inline void __iomem * 305 - __ioremap_29bit(unsigned long offset, unsigned long size, pgprot_t prot) 305 + __ioremap_29bit(phys_addr_t offset, unsigned long size, pgprot_t prot) 306 306 { 307 307 #ifdef CONFIG_29BIT 308 - unsigned long last_addr = offset + size - 1; 308 + phys_addr_t last_addr = offset + size - 1; 309 309 310 310 /* 311 311 * For P1 and P2 space this is trivial, as everything is already ··· 329 329 } 330 330 331 331 static inline void __iomem * 332 - __ioremap_mode(unsigned long offset, unsigned long size, pgprot_t prot) 332 + __ioremap_mode(phys_addr_t offset, unsigned long size, pgprot_t prot) 333 333 { 334 334 void __iomem *ret; 335 335 ··· 349 349 #define __iounmap(addr) do { } while (0) 350 350 #endif /* CONFIG_MMU */ 351 351 352 - static inline void __iomem * 353 - ioremap(unsigned long offset, unsigned long size) 352 + static inline void __iomem *ioremap(phys_addr_t offset, unsigned long size) 354 353 { 355 354 return __ioremap_mode(offset, size, PAGE_KERNEL_NOCACHE); 356 355 } 357 356 358 357 static inline void __iomem * 359 - ioremap_cache(unsigned long offset, unsigned long size) 358 + ioremap_cache(phys_addr_t offset, unsigned long size) 360 359 { 361 360 return __ioremap_mode(offset, size, PAGE_KERNEL); 362 361 } 363 362 364 363 #ifdef CONFIG_HAVE_IOREMAP_PROT 365 364 static inline void __iomem * 366 - ioremap_prot(resource_size_t offset, unsigned long size, unsigned long flags) 365 + ioremap_prot(phys_addr_t offset, unsigned long size, unsigned long flags) 367 366 { 368 367 return __ioremap_mode(offset, size, __pgprot(flags)); 369 368 } 370 369 #endif 371 370 372 371 #ifdef CONFIG_IOREMAP_FIXED 373 - extern void __iomem *ioremap_fixed(resource_size_t, unsigned long, 374 - unsigned long, pgprot_t); 372 + extern void __iomem *ioremap_fixed(phys_addr_t, unsigned long, pgprot_t); 375 373 extern int iounmap_fixed(void __iomem *); 376 374 extern void ioremap_fixed_init(void); 377 375 #else 378 376 static inline void __iomem * 379 - ioremap_fixed(resource_size_t phys_addr, unsigned long offset, 380 - unsigned long size, pgprot_t prot) 377 + ioremap_fixed(phys_addr_t phys_addr, unsigned long size, pgprot_t prot) 381 378 { 382 379 BUG(); 383 380 return NULL;
+24 -7
arch/sh/include/asm/mmu.h
··· 55 55 56 56 #ifdef CONFIG_PMB 57 57 /* arch/sh/mm/pmb.c */ 58 - long pmb_remap(unsigned long virt, unsigned long phys, 59 - unsigned long size, pgprot_t prot); 60 - void pmb_unmap(unsigned long addr); 61 - void pmb_init(void); 62 58 bool __in_29bit_mode(void); 59 + 60 + void pmb_init(void); 61 + int pmb_bolt_mapping(unsigned long virt, phys_addr_t phys, 62 + unsigned long size, pgprot_t prot); 63 + void __iomem *pmb_remap_caller(phys_addr_t phys, unsigned long size, 64 + pgprot_t prot, void *caller); 65 + int pmb_unmap(void __iomem *addr); 66 + 63 67 #else 64 - static inline long pmb_remap(unsigned long virt, unsigned long phys, 65 - unsigned long size, pgprot_t prot) 68 + 69 + static inline void __iomem * 70 + pmb_remap_caller(phys_addr_t phys, unsigned long size, 71 + pgprot_t prot, void *caller) 72 + { 73 + return NULL; 74 + } 75 + 76 + static inline int pmb_unmap(void __iomem *addr) 66 77 { 67 78 return -EINVAL; 68 79 } 69 80 70 - #define pmb_unmap(addr) do { } while (0) 71 81 #define pmb_init(addr) do { } while (0) 72 82 73 83 #ifdef CONFIG_29BIT ··· 87 77 #endif 88 78 89 79 #endif /* CONFIG_PMB */ 80 + 81 + static inline void __iomem * 82 + pmb_remap(phys_addr_t phys, unsigned long size, pgprot_t prot) 83 + { 84 + return pmb_remap_caller(phys, size, prot, __builtin_return_address(0)); 85 + } 86 + 90 87 #endif /* __ASSEMBLY__ */ 91 88 92 89 #endif /* __MMU_H */
+1 -1
arch/sh/include/asm/siu.h
··· 11 11 #ifndef ASM_SIU_H 12 12 #define ASM_SIU_H 13 13 14 - #include <asm/dma-sh.h> 14 + #include <asm/dmaengine.h> 15 15 16 16 struct device; 17 17
+1 -1
arch/sh/include/asm/topology.h
··· 35 35 36 36 #define pcibus_to_node(bus) ((void)(bus), -1) 37 37 #define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \ 38 - CPU_MASK_ALL_PTR : \ 38 + cpu_all_mask : \ 39 39 cpumask_of_node(pcibus_to_node(bus))) 40 40 41 41 #endif
+41
arch/sh/include/cpu-sh3/cpu/dma-register.h
··· 1 + /* 2 + * SH3 CPU-specific DMA definitions, used by both DMA drivers 3 + * 4 + * Copyright (C) 2010 Guennadi Liakhovetski <g.liakhovetski@gmx.de> 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License version 2 as 8 + * published by the Free Software Foundation. 9 + */ 10 + #ifndef CPU_DMA_REGISTER_H 11 + #define CPU_DMA_REGISTER_H 12 + 13 + #define CHCR_TS_LOW_MASK 0x18 14 + #define CHCR_TS_LOW_SHIFT 3 15 + #define CHCR_TS_HIGH_MASK 0 16 + #define CHCR_TS_HIGH_SHIFT 0 17 + 18 + #define DMAOR_INIT DMAOR_DME 19 + 20 + /* 21 + * The SuperH DMAC supports a number of transmit sizes, we list them here, 22 + * with their respective values as they appear in the CHCR registers. 23 + */ 24 + enum { 25 + XMIT_SZ_8BIT, 26 + XMIT_SZ_16BIT, 27 + XMIT_SZ_32BIT, 28 + XMIT_SZ_128BIT, 29 + }; 30 + 31 + /* log2(size / 8) - used to calculate number of transfers */ 32 + #define TS_SHIFT { \ 33 + [XMIT_SZ_8BIT] = 0, \ 34 + [XMIT_SZ_16BIT] = 1, \ 35 + [XMIT_SZ_32BIT] = 2, \ 36 + [XMIT_SZ_128BIT] = 4, \ 37 + } 38 + 39 + #define TS_INDEX2VAL(i) (((i) & 3) << CHCR_TS_LOW_SHIFT) 40 + 41 + #endif
-27
arch/sh/include/cpu-sh3/cpu/dma.h
··· 20 20 #define TS_32 0x00000010 21 21 #define TS_128 0x00000018 22 22 23 - #define CHCR_TS_LOW_MASK 0x18 24 - #define CHCR_TS_LOW_SHIFT 3 25 - #define CHCR_TS_HIGH_MASK 0 26 - #define CHCR_TS_HIGH_SHIFT 0 27 - 28 - #define DMAOR_INIT DMAOR_DME 29 - 30 - /* 31 - * The SuperH DMAC supports a number of transmit sizes, we list them here, 32 - * with their respective values as they appear in the CHCR registers. 33 - */ 34 - enum { 35 - XMIT_SZ_8BIT, 36 - XMIT_SZ_16BIT, 37 - XMIT_SZ_32BIT, 38 - XMIT_SZ_128BIT, 39 - }; 40 - 41 - #define TS_SHIFT { \ 42 - [XMIT_SZ_8BIT] = 0, \ 43 - [XMIT_SZ_16BIT] = 1, \ 44 - [XMIT_SZ_32BIT] = 2, \ 45 - [XMIT_SZ_128BIT] = 4, \ 46 - } 47 - 48 - #define TS_INDEX2VAL(i) (((i) & 3) << CHCR_TS_LOW_SHIFT) 49 - 50 23 #endif /* __ASM_CPU_SH3_DMA_H */
+112
arch/sh/include/cpu-sh4/cpu/dma-register.h
··· 1 + /* 2 + * SH4 CPU-specific DMA definitions, used by both DMA drivers 3 + * 4 + * Copyright (C) 2010 Guennadi Liakhovetski <g.liakhovetski@gmx.de> 5 + * 6 + * This program is free software; you can redistribute it and/or modify 7 + * it under the terms of the GNU General Public License version 2 as 8 + * published by the Free Software Foundation. 9 + */ 10 + #ifndef CPU_DMA_REGISTER_H 11 + #define CPU_DMA_REGISTER_H 12 + 13 + /* SH7751/7760/7780 DMA IRQ sources */ 14 + 15 + #ifdef CONFIG_CPU_SH4A 16 + 17 + #define DMAOR_INIT DMAOR_DME 18 + 19 + #if defined(CONFIG_CPU_SUBTYPE_SH7343) || \ 20 + defined(CONFIG_CPU_SUBTYPE_SH7730) 21 + #define CHCR_TS_LOW_MASK 0x00000018 22 + #define CHCR_TS_LOW_SHIFT 3 23 + #define CHCR_TS_HIGH_MASK 0 24 + #define CHCR_TS_HIGH_SHIFT 0 25 + #elif defined(CONFIG_CPU_SUBTYPE_SH7722) || \ 26 + defined(CONFIG_CPU_SUBTYPE_SH7724) 27 + #define CHCR_TS_LOW_MASK 0x00000018 28 + #define CHCR_TS_LOW_SHIFT 3 29 + #define CHCR_TS_HIGH_MASK 0x00300000 30 + #define CHCR_TS_HIGH_SHIFT (20 - 2) /* 2 bits for shifted low TS */ 31 + #elif defined(CONFIG_CPU_SUBTYPE_SH7763) || \ 32 + defined(CONFIG_CPU_SUBTYPE_SH7764) 33 + #define CHCR_TS_LOW_MASK 0x00000018 34 + #define CHCR_TS_LOW_SHIFT 3 35 + #define CHCR_TS_HIGH_MASK 0 36 + #define CHCR_TS_HIGH_SHIFT 0 37 + #elif defined(CONFIG_CPU_SUBTYPE_SH7723) 38 + #define CHCR_TS_LOW_MASK 0x00000018 39 + #define CHCR_TS_LOW_SHIFT 3 40 + #define CHCR_TS_HIGH_MASK 0 41 + #define CHCR_TS_HIGH_SHIFT 0 42 + #elif defined(CONFIG_CPU_SUBTYPE_SH7780) 43 + #define CHCR_TS_LOW_MASK 0x00000018 44 + #define CHCR_TS_LOW_SHIFT 3 45 + #define CHCR_TS_HIGH_MASK 0 46 + #define CHCR_TS_HIGH_SHIFT 0 47 + #else /* SH7785 */ 48 + #define CHCR_TS_LOW_MASK 0x00000018 49 + #define CHCR_TS_LOW_SHIFT 3 50 + #define CHCR_TS_HIGH_MASK 0 51 + #define CHCR_TS_HIGH_SHIFT 0 52 + #endif 53 + 54 + /* Transmit sizes and respective CHCR register values */ 55 + enum { 56 + XMIT_SZ_8BIT = 0, 57 + XMIT_SZ_16BIT = 1, 58 + XMIT_SZ_32BIT = 2, 59 + XMIT_SZ_64BIT = 7, 60 + XMIT_SZ_128BIT = 3, 61 + XMIT_SZ_256BIT = 4, 62 + XMIT_SZ_128BIT_BLK = 0xb, 63 + XMIT_SZ_256BIT_BLK = 0xc, 64 + }; 65 + 66 + /* log2(size / 8) - used to calculate number of transfers */ 67 + #define TS_SHIFT { \ 68 + [XMIT_SZ_8BIT] = 0, \ 69 + [XMIT_SZ_16BIT] = 1, \ 70 + [XMIT_SZ_32BIT] = 2, \ 71 + [XMIT_SZ_64BIT] = 3, \ 72 + [XMIT_SZ_128BIT] = 4, \ 73 + [XMIT_SZ_256BIT] = 5, \ 74 + [XMIT_SZ_128BIT_BLK] = 4, \ 75 + [XMIT_SZ_256BIT_BLK] = 5, \ 76 + } 77 + 78 + #define TS_INDEX2VAL(i) ((((i) & 3) << CHCR_TS_LOW_SHIFT) | \ 79 + ((((i) >> 2) & 3) << CHCR_TS_HIGH_SHIFT)) 80 + 81 + #else /* CONFIG_CPU_SH4A */ 82 + 83 + #define DMAOR_INIT (0x8000 | DMAOR_DME) 84 + 85 + #define CHCR_TS_LOW_MASK 0x70 86 + #define CHCR_TS_LOW_SHIFT 4 87 + #define CHCR_TS_HIGH_MASK 0 88 + #define CHCR_TS_HIGH_SHIFT 0 89 + 90 + /* Transmit sizes and respective CHCR register values */ 91 + enum { 92 + XMIT_SZ_8BIT = 1, 93 + XMIT_SZ_16BIT = 2, 94 + XMIT_SZ_32BIT = 3, 95 + XMIT_SZ_64BIT = 0, 96 + XMIT_SZ_256BIT = 4, 97 + }; 98 + 99 + /* log2(size / 8) - used to calculate number of transfers */ 100 + #define TS_SHIFT { \ 101 + [XMIT_SZ_8BIT] = 0, \ 102 + [XMIT_SZ_16BIT] = 1, \ 103 + [XMIT_SZ_32BIT] = 2, \ 104 + [XMIT_SZ_64BIT] = 3, \ 105 + [XMIT_SZ_256BIT] = 5, \ 106 + } 107 + 108 + #define TS_INDEX2VAL(i) (((i) & 7) << CHCR_TS_LOW_SHIFT) 109 + 110 + #endif /* CONFIG_CPU_SH4A */ 111 + 112 + #endif
-62
arch/sh/include/cpu-sh4/cpu/dma-sh4a.h
··· 8 8 #define DMAE0_IRQ 78 /* DMA Error IRQ*/ 9 9 #define SH_DMAC_BASE0 0xFE008020 10 10 #define SH_DMARS_BASE0 0xFE009000 11 - #define CHCR_TS_LOW_MASK 0x00000018 12 - #define CHCR_TS_LOW_SHIFT 3 13 - #define CHCR_TS_HIGH_MASK 0 14 - #define CHCR_TS_HIGH_SHIFT 0 15 11 #elif defined(CONFIG_CPU_SUBTYPE_SH7722) 16 12 #define DMTE0_IRQ 48 17 13 #define DMTE4_IRQ 76 18 14 #define DMAE0_IRQ 78 /* DMA Error IRQ*/ 19 15 #define SH_DMAC_BASE0 0xFE008020 20 16 #define SH_DMARS_BASE0 0xFE009000 21 - #define CHCR_TS_LOW_MASK 0x00000018 22 - #define CHCR_TS_LOW_SHIFT 3 23 - #define CHCR_TS_HIGH_MASK 0x00300000 24 - #define CHCR_TS_HIGH_SHIFT 20 25 17 #elif defined(CONFIG_CPU_SUBTYPE_SH7763) || \ 26 18 defined(CONFIG_CPU_SUBTYPE_SH7764) 27 19 #define DMTE0_IRQ 34 ··· 21 29 #define DMAE0_IRQ 38 22 30 #define SH_DMAC_BASE0 0xFF608020 23 31 #define SH_DMARS_BASE0 0xFF609000 24 - #define CHCR_TS_LOW_MASK 0x00000018 25 - #define CHCR_TS_LOW_SHIFT 3 26 - #define CHCR_TS_HIGH_MASK 0 27 - #define CHCR_TS_HIGH_SHIFT 0 28 32 #elif defined(CONFIG_CPU_SUBTYPE_SH7723) 29 33 #define DMTE0_IRQ 48 /* DMAC0A*/ 30 34 #define DMTE4_IRQ 76 /* DMAC0B */ ··· 34 46 #define SH_DMAC_BASE0 0xFE008020 35 47 #define SH_DMAC_BASE1 0xFDC08020 36 48 #define SH_DMARS_BASE0 0xFDC09000 37 - #define CHCR_TS_LOW_MASK 0x00000018 38 - #define CHCR_TS_LOW_SHIFT 3 39 - #define CHCR_TS_HIGH_MASK 0 40 - #define CHCR_TS_HIGH_SHIFT 0 41 49 #elif defined(CONFIG_CPU_SUBTYPE_SH7724) 42 50 #define DMTE0_IRQ 48 /* DMAC0A*/ 43 51 #define DMTE4_IRQ 76 /* DMAC0B */ ··· 48 64 #define SH_DMAC_BASE1 0xFDC08020 49 65 #define SH_DMARS_BASE0 0xFE009000 50 66 #define SH_DMARS_BASE1 0xFDC09000 51 - #define CHCR_TS_LOW_MASK 0x00000018 52 - #define CHCR_TS_LOW_SHIFT 3 53 - #define CHCR_TS_HIGH_MASK 0x00600000 54 - #define CHCR_TS_HIGH_SHIFT 21 55 67 #elif defined(CONFIG_CPU_SUBTYPE_SH7780) 56 68 #define DMTE0_IRQ 34 57 69 #define DMTE4_IRQ 44 ··· 60 80 #define SH_DMAC_BASE0 0xFC808020 61 81 #define SH_DMAC_BASE1 0xFC818020 62 82 #define SH_DMARS_BASE0 0xFC809000 63 - #define CHCR_TS_LOW_MASK 0x00000018 64 - #define CHCR_TS_LOW_SHIFT 3 65 - #define CHCR_TS_HIGH_MASK 0 66 - #define CHCR_TS_HIGH_SHIFT 0 67 83 #else /* SH7785 */ 68 84 #define DMTE0_IRQ 33 69 85 #define DMTE4_IRQ 37 ··· 73 97 #define SH_DMAC_BASE0 0xFC808020 74 98 #define SH_DMAC_BASE1 0xFCC08020 75 99 #define SH_DMARS_BASE0 0xFC809000 76 - #define CHCR_TS_LOW_MASK 0x00000018 77 - #define CHCR_TS_LOW_SHIFT 3 78 - #define CHCR_TS_HIGH_MASK 0 79 - #define CHCR_TS_HIGH_SHIFT 0 80 100 #endif 81 101 82 102 #define REQ_HE 0x000000C0 83 103 #define REQ_H 0x00000080 84 104 #define REQ_LE 0x00000040 85 105 #define TM_BURST 0x00000020 86 - 87 - /* 88 - * The SuperH DMAC supports a number of transmit sizes, we list them here, 89 - * with their respective values as they appear in the CHCR registers. 90 - * 91 - * Defaults to a 64-bit transfer size. 92 - */ 93 - enum { 94 - XMIT_SZ_8BIT = 0, 95 - XMIT_SZ_16BIT = 1, 96 - XMIT_SZ_32BIT = 2, 97 - XMIT_SZ_64BIT = 7, 98 - XMIT_SZ_128BIT = 3, 99 - XMIT_SZ_256BIT = 4, 100 - XMIT_SZ_128BIT_BLK = 0xb, 101 - XMIT_SZ_256BIT_BLK = 0xc, 102 - }; 103 - 104 - /* 105 - * The DMA count is defined as the number of bytes to transfer. 106 - */ 107 - #define TS_SHIFT { \ 108 - [XMIT_SZ_8BIT] = 0, \ 109 - [XMIT_SZ_16BIT] = 1, \ 110 - [XMIT_SZ_32BIT] = 2, \ 111 - [XMIT_SZ_64BIT] = 3, \ 112 - [XMIT_SZ_128BIT] = 4, \ 113 - [XMIT_SZ_256BIT] = 5, \ 114 - [XMIT_SZ_128BIT_BLK] = 4, \ 115 - [XMIT_SZ_256BIT_BLK] = 5, \ 116 - } 117 - 118 - #define TS_INDEX2VAL(i) ((((i) & 3) << CHCR_TS_LOW_SHIFT) | \ 119 - ((((i) >> 2) & 3) << CHCR_TS_HIGH_SHIFT)) 120 106 121 107 #endif /* __ASM_SH_CPU_SH4_DMA_SH7780_H */
+1 -35
arch/sh/include/cpu-sh4/cpu/dma.h
··· 5 5 6 6 #ifdef CONFIG_CPU_SH4A 7 7 8 - #define DMAOR_INIT (DMAOR_DME) 9 - 10 8 #include <cpu/dma-sh4a.h> 9 + 11 10 #else /* CONFIG_CPU_SH4A */ 12 11 /* 13 12 * SH7750/SH7751/SH7760 ··· 16 17 #define DMTE6_IRQ 46 17 18 #define DMAE0_IRQ 38 18 19 19 - #define DMAOR_INIT (0x8000|DMAOR_DME) 20 20 #define SH_DMAC_BASE0 0xffa00000 21 21 #define SH_DMAC_BASE1 0xffa00070 22 22 /* Definitions for the SuperH DMAC */ ··· 25 27 #define TS_32 0x00000030 26 28 #define TS_64 0x00000000 27 29 28 - #define CHCR_TS_LOW_MASK 0x70 29 - #define CHCR_TS_LOW_SHIFT 4 30 - #define CHCR_TS_HIGH_MASK 0 31 - #define CHCR_TS_HIGH_SHIFT 0 32 - 33 30 #define DMAOR_COD 0x00000008 34 - 35 - /* 36 - * The SuperH DMAC supports a number of transmit sizes, we list them here, 37 - * with their respective values as they appear in the CHCR registers. 38 - * 39 - * Defaults to a 64-bit transfer size. 40 - */ 41 - enum { 42 - XMIT_SZ_8BIT = 1, 43 - XMIT_SZ_16BIT = 2, 44 - XMIT_SZ_32BIT = 3, 45 - XMIT_SZ_64BIT = 0, 46 - XMIT_SZ_256BIT = 4, 47 - }; 48 - 49 - /* 50 - * The DMA count is defined as the number of bytes to transfer. 51 - */ 52 - #define TS_SHIFT { \ 53 - [XMIT_SZ_8BIT] = 0, \ 54 - [XMIT_SZ_16BIT] = 1, \ 55 - [XMIT_SZ_32BIT] = 2, \ 56 - [XMIT_SZ_64BIT] = 3, \ 57 - [XMIT_SZ_256BIT] = 5, \ 58 - } 59 - 60 - #define TS_INDEX2VAL(i) (((i) & 7) << CHCR_TS_LOW_SHIFT) 61 31 62 32 #endif 63 33
+1
arch/sh/include/mach-migor/mach/migor.h
··· 1 1 #ifndef __ASM_SH_MIGOR_H 2 2 #define __ASM_SH_MIGOR_H 3 3 4 + #define PORT_MSELCRA 0xa4050180 4 5 #define PORT_MSELCRB 0xa4050182 5 6 #define BSC_CS4BCR 0xfec10010 6 7 #define BSC_CS6ABCR 0xfec1001c
+180 -10
arch/sh/kernel/cpu/sh4a/setup-sh7722.c
··· 7 7 * License. See the file "COPYING" in the main directory of this archive 8 8 * for more details. 9 9 */ 10 - #include <linux/platform_device.h> 11 10 #include <linux/init.h> 11 + #include <linux/mm.h> 12 + #include <linux/platform_device.h> 12 13 #include <linux/serial.h> 13 14 #include <linux/serial_sci.h> 14 - #include <linux/mm.h> 15 + #include <linux/sh_timer.h> 15 16 #include <linux/uio_driver.h> 16 17 #include <linux/usb/m66592.h> 17 - #include <linux/sh_timer.h> 18 + 18 19 #include <asm/clock.h> 20 + #include <asm/dmaengine.h> 19 21 #include <asm/mmzone.h> 20 - #include <asm/dma-sh.h> 22 + #include <asm/siu.h> 23 + 24 + #include <cpu/dma-register.h> 21 25 #include <cpu/sh7722.h> 26 + 27 + static struct sh_dmae_slave_config sh7722_dmae_slaves[] = { 28 + { 29 + .slave_id = SHDMA_SLAVE_SCIF0_TX, 30 + .addr = 0xffe0000c, 31 + .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), 32 + .mid_rid = 0x21, 33 + }, { 34 + .slave_id = SHDMA_SLAVE_SCIF0_RX, 35 + .addr = 0xffe00014, 36 + .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), 37 + .mid_rid = 0x22, 38 + }, { 39 + .slave_id = SHDMA_SLAVE_SCIF1_TX, 40 + .addr = 0xffe1000c, 41 + .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), 42 + .mid_rid = 0x25, 43 + }, { 44 + .slave_id = SHDMA_SLAVE_SCIF1_RX, 45 + .addr = 0xffe10014, 46 + .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), 47 + .mid_rid = 0x26, 48 + }, { 49 + .slave_id = SHDMA_SLAVE_SCIF2_TX, 50 + .addr = 0xffe2000c, 51 + .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), 52 + .mid_rid = 0x29, 53 + }, { 54 + .slave_id = SHDMA_SLAVE_SCIF2_RX, 55 + .addr = 0xffe20014, 56 + .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_8BIT), 57 + .mid_rid = 0x2a, 58 + }, { 59 + .slave_id = SHDMA_SLAVE_SIUA_TX, 60 + .addr = 0xa454c098, 61 + .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT), 62 + .mid_rid = 0xb1, 63 + }, { 64 + .slave_id = SHDMA_SLAVE_SIUA_RX, 65 + .addr = 0xa454c090, 66 + .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT), 67 + .mid_rid = 0xb2, 68 + }, { 69 + .slave_id = SHDMA_SLAVE_SIUB_TX, 70 + .addr = 0xa454c09c, 71 + .chcr = DM_FIX | SM_INC | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT), 72 + .mid_rid = 0xb5, 73 + }, { 74 + .slave_id = SHDMA_SLAVE_SIUB_RX, 75 + .addr = 0xa454c094, 76 + .chcr = DM_INC | SM_FIX | 0x800 | TS_INDEX2VAL(XMIT_SZ_32BIT), 77 + .mid_rid = 0xb6, 78 + }, 79 + }; 80 + 81 + static struct sh_dmae_channel sh7722_dmae_channels[] = { 82 + { 83 + .offset = 0, 84 + .dmars = 0, 85 + .dmars_bit = 0, 86 + }, { 87 + .offset = 0x10, 88 + .dmars = 0, 89 + .dmars_bit = 8, 90 + }, { 91 + .offset = 0x20, 92 + .dmars = 4, 93 + .dmars_bit = 0, 94 + }, { 95 + .offset = 0x30, 96 + .dmars = 4, 97 + .dmars_bit = 8, 98 + }, { 99 + .offset = 0x50, 100 + .dmars = 8, 101 + .dmars_bit = 0, 102 + }, { 103 + .offset = 0x60, 104 + .dmars = 8, 105 + .dmars_bit = 8, 106 + } 107 + }; 108 + 109 + static unsigned int ts_shift[] = TS_SHIFT; 110 + 111 + static struct sh_dmae_pdata dma_platform_data = { 112 + .slave = sh7722_dmae_slaves, 113 + .slave_num = ARRAY_SIZE(sh7722_dmae_slaves), 114 + .channel = sh7722_dmae_channels, 115 + .channel_num = ARRAY_SIZE(sh7722_dmae_channels), 116 + .ts_low_shift = CHCR_TS_LOW_SHIFT, 117 + .ts_low_mask = CHCR_TS_LOW_MASK, 118 + .ts_high_shift = CHCR_TS_HIGH_SHIFT, 119 + .ts_high_mask = CHCR_TS_HIGH_MASK, 120 + .ts_shift = ts_shift, 121 + .ts_shift_num = ARRAY_SIZE(ts_shift), 122 + .dmaor_init = DMAOR_INIT, 123 + }; 124 + 125 + static struct resource sh7722_dmae_resources[] = { 126 + [0] = { 127 + /* Channel registers and DMAOR */ 128 + .start = 0xfe008020, 129 + .end = 0xfe00808f, 130 + .flags = IORESOURCE_MEM, 131 + }, 132 + [1] = { 133 + /* DMARSx */ 134 + .start = 0xfe009000, 135 + .end = 0xfe00900b, 136 + .flags = IORESOURCE_MEM, 137 + }, 138 + { 139 + /* DMA error IRQ */ 140 + .start = 78, 141 + .end = 78, 142 + .flags = IORESOURCE_IRQ, 143 + }, 144 + { 145 + /* IRQ for channels 0-3 */ 146 + .start = 48, 147 + .end = 51, 148 + .flags = IORESOURCE_IRQ, 149 + }, 150 + { 151 + /* IRQ for channels 4-5 */ 152 + .start = 76, 153 + .end = 77, 154 + .flags = IORESOURCE_IRQ, 155 + }, 156 + }; 157 + 158 + struct platform_device dma_device = { 159 + .name = "sh-dma-engine", 160 + .id = -1, 161 + .resource = sh7722_dmae_resources, 162 + .num_resources = ARRAY_SIZE(sh7722_dmae_resources), 163 + .dev = { 164 + .platform_data = &dma_platform_data, 165 + }, 166 + .archdata = { 167 + .hwblk_id = HWBLK_DMAC, 168 + }, 169 + }; 22 170 23 171 /* Serial */ 24 172 static struct plat_sci_port scif0_platform_data = { ··· 536 388 }, 537 389 }; 538 390 539 - static struct sh_dmae_pdata dma_platform_data = { 540 - .mode = 0, 391 + static struct siu_platform siu_platform_data = { 392 + .dma_dev = &dma_device.dev, 393 + .dma_slave_tx_a = SHDMA_SLAVE_SIUA_TX, 394 + .dma_slave_rx_a = SHDMA_SLAVE_SIUA_RX, 395 + .dma_slave_tx_b = SHDMA_SLAVE_SIUB_TX, 396 + .dma_slave_rx_b = SHDMA_SLAVE_SIUB_RX, 541 397 }; 542 398 543 - static struct platform_device dma_device = { 544 - .name = "sh-dma-engine", 399 + static struct resource siu_resources[] = { 400 + [0] = { 401 + .start = 0xa4540000, 402 + .end = 0xa454c10f, 403 + .flags = IORESOURCE_MEM, 404 + }, 405 + [1] = { 406 + .start = 108, 407 + .flags = IORESOURCE_IRQ, 408 + }, 409 + }; 410 + 411 + static struct platform_device siu_device = { 412 + .name = "sh_siu", 545 413 .id = -1, 546 - .dev = { 547 - .platform_data = &dma_platform_data, 414 + .dev = { 415 + .platform_data = &siu_platform_data, 416 + }, 417 + .resource = siu_resources, 418 + .num_resources = ARRAY_SIZE(siu_resources), 419 + .archdata = { 420 + .hwblk_id = HWBLK_SIU, 548 421 }, 549 422 }; 550 423 ··· 583 414 &vpu_device, 584 415 &veu_device, 585 416 &jpu_device, 417 + &siu_device, 586 418 &dma_device, 587 419 }; 588 420
+177 -9
arch/sh/kernel/cpu/sh4a/setup-sh7724.c
··· 21 21 #include <linux/sh_timer.h> 22 22 #include <linux/io.h> 23 23 #include <linux/notifier.h> 24 + 24 25 #include <asm/suspend.h> 25 26 #include <asm/clock.h> 26 - #include <asm/dma-sh.h> 27 + #include <asm/dmaengine.h> 27 28 #include <asm/mmzone.h> 29 + 30 + #include <cpu/dma-register.h> 28 31 #include <cpu/sh7724.h> 29 32 30 33 /* DMA */ 31 - static struct sh_dmae_pdata dma_platform_data = { 32 - .mode = SHDMA_DMAOR1, 34 + static struct sh_dmae_channel sh7724_dmae0_channels[] = { 35 + { 36 + .offset = 0, 37 + .dmars = 0, 38 + .dmars_bit = 0, 39 + }, { 40 + .offset = 0x10, 41 + .dmars = 0, 42 + .dmars_bit = 8, 43 + }, { 44 + .offset = 0x20, 45 + .dmars = 4, 46 + .dmars_bit = 0, 47 + }, { 48 + .offset = 0x30, 49 + .dmars = 4, 50 + .dmars_bit = 8, 51 + }, { 52 + .offset = 0x50, 53 + .dmars = 8, 54 + .dmars_bit = 0, 55 + }, { 56 + .offset = 0x60, 57 + .dmars = 8, 58 + .dmars_bit = 8, 59 + } 33 60 }; 34 61 35 - static struct platform_device dma_device = { 36 - .name = "sh-dma-engine", 37 - .id = -1, 38 - .dev = { 39 - .platform_data = &dma_platform_data, 62 + static struct sh_dmae_channel sh7724_dmae1_channels[] = { 63 + { 64 + .offset = 0, 65 + .dmars = 0, 66 + .dmars_bit = 0, 67 + }, { 68 + .offset = 0x10, 69 + .dmars = 0, 70 + .dmars_bit = 8, 71 + }, { 72 + .offset = 0x20, 73 + .dmars = 4, 74 + .dmars_bit = 0, 75 + }, { 76 + .offset = 0x30, 77 + .dmars = 4, 78 + .dmars_bit = 8, 79 + }, { 80 + .offset = 0x50, 81 + .dmars = 8, 82 + .dmars_bit = 0, 83 + }, { 84 + .offset = 0x60, 85 + .dmars = 8, 86 + .dmars_bit = 8, 87 + } 88 + }; 89 + 90 + static unsigned int ts_shift[] = TS_SHIFT; 91 + 92 + static struct sh_dmae_pdata dma0_platform_data = { 93 + .channel = sh7724_dmae0_channels, 94 + .channel_num = ARRAY_SIZE(sh7724_dmae0_channels), 95 + .ts_low_shift = CHCR_TS_LOW_SHIFT, 96 + .ts_low_mask = CHCR_TS_LOW_MASK, 97 + .ts_high_shift = CHCR_TS_HIGH_SHIFT, 98 + .ts_high_mask = CHCR_TS_HIGH_MASK, 99 + .ts_shift = ts_shift, 100 + .ts_shift_num = ARRAY_SIZE(ts_shift), 101 + .dmaor_init = DMAOR_INIT, 102 + }; 103 + 104 + static struct sh_dmae_pdata dma1_platform_data = { 105 + .channel = sh7724_dmae1_channels, 106 + .channel_num = ARRAY_SIZE(sh7724_dmae1_channels), 107 + .ts_low_shift = CHCR_TS_LOW_SHIFT, 108 + .ts_low_mask = CHCR_TS_LOW_MASK, 109 + .ts_high_shift = CHCR_TS_HIGH_SHIFT, 110 + .ts_high_mask = CHCR_TS_HIGH_MASK, 111 + .ts_shift = ts_shift, 112 + .ts_shift_num = ARRAY_SIZE(ts_shift), 113 + .dmaor_init = DMAOR_INIT, 114 + }; 115 + 116 + /* Resource order important! */ 117 + static struct resource sh7724_dmae0_resources[] = { 118 + { 119 + /* Channel registers and DMAOR */ 120 + .start = 0xfe008020, 121 + .end = 0xfe00808f, 122 + .flags = IORESOURCE_MEM, 123 + }, 124 + { 125 + /* DMARSx */ 126 + .start = 0xfe009000, 127 + .end = 0xfe00900b, 128 + .flags = IORESOURCE_MEM, 129 + }, 130 + { 131 + /* DMA error IRQ */ 132 + .start = 78, 133 + .end = 78, 134 + .flags = IORESOURCE_IRQ, 135 + }, 136 + { 137 + /* IRQ for channels 0-3 */ 138 + .start = 48, 139 + .end = 51, 140 + .flags = IORESOURCE_IRQ, 141 + }, 142 + { 143 + /* IRQ for channels 4-5 */ 144 + .start = 76, 145 + .end = 77, 146 + .flags = IORESOURCE_IRQ, 147 + }, 148 + }; 149 + 150 + /* Resource order important! */ 151 + static struct resource sh7724_dmae1_resources[] = { 152 + { 153 + /* Channel registers and DMAOR */ 154 + .start = 0xfdc08020, 155 + .end = 0xfdc0808f, 156 + .flags = IORESOURCE_MEM, 157 + }, 158 + { 159 + /* DMARSx */ 160 + .start = 0xfdc09000, 161 + .end = 0xfdc0900b, 162 + .flags = IORESOURCE_MEM, 163 + }, 164 + { 165 + /* DMA error IRQ */ 166 + .start = 74, 167 + .end = 74, 168 + .flags = IORESOURCE_IRQ, 169 + }, 170 + { 171 + /* IRQ for channels 0-3 */ 172 + .start = 40, 173 + .end = 43, 174 + .flags = IORESOURCE_IRQ, 175 + }, 176 + { 177 + /* IRQ for channels 4-5 */ 178 + .start = 72, 179 + .end = 73, 180 + .flags = IORESOURCE_IRQ, 181 + }, 182 + }; 183 + 184 + static struct platform_device dma0_device = { 185 + .name = "sh-dma-engine", 186 + .id = 0, 187 + .resource = sh7724_dmae0_resources, 188 + .num_resources = ARRAY_SIZE(sh7724_dmae0_resources), 189 + .dev = { 190 + .platform_data = &dma0_platform_data, 191 + }, 192 + .archdata = { 193 + .hwblk_id = HWBLK_DMAC0, 194 + }, 195 + }; 196 + 197 + static struct platform_device dma1_device = { 198 + .name = "sh-dma-engine", 199 + .id = 1, 200 + .resource = sh7724_dmae1_resources, 201 + .num_resources = ARRAY_SIZE(sh7724_dmae1_resources), 202 + .dev = { 203 + .platform_data = &dma1_platform_data, 204 + }, 205 + .archdata = { 206 + .hwblk_id = HWBLK_DMAC1, 40 207 }, 41 208 }; 42 209 ··· 830 663 &tmu3_device, 831 664 &tmu4_device, 832 665 &tmu5_device, 833 - &dma_device, 666 + &dma0_device, 667 + &dma1_device, 834 668 &rtc_device, 835 669 &iic0_device, 836 670 &iic1_device,
+127 -7
arch/sh/kernel/cpu/sh4a/setup-sh7780.c
··· 13 13 #include <linux/io.h> 14 14 #include <linux/serial_sci.h> 15 15 #include <linux/sh_timer.h> 16 - #include <asm/dma-sh.h> 16 + 17 + #include <asm/dmaengine.h> 18 + 19 + #include <cpu/dma-register.h> 17 20 18 21 static struct plat_sci_port scif0_platform_data = { 19 22 .mapbase = 0xffe00000, ··· 250 247 .resource = rtc_resources, 251 248 }; 252 249 253 - static struct sh_dmae_pdata dma_platform_data = { 254 - .mode = (SHDMA_MIX_IRQ | SHDMA_DMAOR1), 250 + /* DMA */ 251 + static struct sh_dmae_channel sh7780_dmae0_channels[] = { 252 + { 253 + .offset = 0, 254 + .dmars = 0, 255 + .dmars_bit = 0, 256 + }, { 257 + .offset = 0x10, 258 + .dmars = 0, 259 + .dmars_bit = 8, 260 + }, { 261 + .offset = 0x20, 262 + .dmars = 4, 263 + .dmars_bit = 0, 264 + }, { 265 + .offset = 0x30, 266 + .dmars = 4, 267 + .dmars_bit = 8, 268 + }, { 269 + .offset = 0x50, 270 + .dmars = 8, 271 + .dmars_bit = 0, 272 + }, { 273 + .offset = 0x60, 274 + .dmars = 8, 275 + .dmars_bit = 8, 276 + } 255 277 }; 256 278 257 - static struct platform_device dma_device = { 279 + static struct sh_dmae_channel sh7780_dmae1_channels[] = { 280 + { 281 + .offset = 0, 282 + }, { 283 + .offset = 0x10, 284 + }, { 285 + .offset = 0x20, 286 + }, { 287 + .offset = 0x30, 288 + }, { 289 + .offset = 0x50, 290 + }, { 291 + .offset = 0x60, 292 + } 293 + }; 294 + 295 + static unsigned int ts_shift[] = TS_SHIFT; 296 + 297 + static struct sh_dmae_pdata dma0_platform_data = { 298 + .channel = sh7780_dmae0_channels, 299 + .channel_num = ARRAY_SIZE(sh7780_dmae0_channels), 300 + .ts_low_shift = CHCR_TS_LOW_SHIFT, 301 + .ts_low_mask = CHCR_TS_LOW_MASK, 302 + .ts_high_shift = CHCR_TS_HIGH_SHIFT, 303 + .ts_high_mask = CHCR_TS_HIGH_MASK, 304 + .ts_shift = ts_shift, 305 + .ts_shift_num = ARRAY_SIZE(ts_shift), 306 + .dmaor_init = DMAOR_INIT, 307 + }; 308 + 309 + static struct sh_dmae_pdata dma1_platform_data = { 310 + .channel = sh7780_dmae1_channels, 311 + .channel_num = ARRAY_SIZE(sh7780_dmae1_channels), 312 + .ts_low_shift = CHCR_TS_LOW_SHIFT, 313 + .ts_low_mask = CHCR_TS_LOW_MASK, 314 + .ts_high_shift = CHCR_TS_HIGH_SHIFT, 315 + .ts_high_mask = CHCR_TS_HIGH_MASK, 316 + .ts_shift = ts_shift, 317 + .ts_shift_num = ARRAY_SIZE(ts_shift), 318 + .dmaor_init = DMAOR_INIT, 319 + }; 320 + 321 + static struct resource sh7780_dmae0_resources[] = { 322 + [0] = { 323 + /* Channel registers and DMAOR */ 324 + .start = 0xfc808020, 325 + .end = 0xfc80808f, 326 + .flags = IORESOURCE_MEM, 327 + }, 328 + [1] = { 329 + /* DMARSx */ 330 + .start = 0xfc809000, 331 + .end = 0xfc80900b, 332 + .flags = IORESOURCE_MEM, 333 + }, 334 + { 335 + /* Real DMA error IRQ is 38, and channel IRQs are 34-37, 44-45 */ 336 + .start = 34, 337 + .end = 34, 338 + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE, 339 + }, 340 + }; 341 + 342 + static struct resource sh7780_dmae1_resources[] = { 343 + [0] = { 344 + /* Channel registers and DMAOR */ 345 + .start = 0xfc818020, 346 + .end = 0xfc81808f, 347 + .flags = IORESOURCE_MEM, 348 + }, 349 + /* DMAC1 has no DMARS */ 350 + { 351 + /* Real DMA error IRQ is 38, and channel IRQs are 46-47, 92-95 */ 352 + .start = 46, 353 + .end = 46, 354 + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE, 355 + }, 356 + }; 357 + 358 + static struct platform_device dma0_device = { 258 359 .name = "sh-dma-engine", 259 - .id = -1, 360 + .id = 0, 361 + .resource = sh7780_dmae0_resources, 362 + .num_resources = ARRAY_SIZE(sh7780_dmae0_resources), 260 363 .dev = { 261 - .platform_data = &dma_platform_data, 364 + .platform_data = &dma0_platform_data, 365 + }, 366 + }; 367 + 368 + static struct platform_device dma1_device = { 369 + .name = "sh-dma-engine", 370 + .id = 1, 371 + .resource = sh7780_dmae1_resources, 372 + .num_resources = ARRAY_SIZE(sh7780_dmae1_resources), 373 + .dev = { 374 + .platform_data = &dma1_platform_data, 262 375 }, 263 376 }; 264 377 ··· 388 269 &tmu4_device, 389 270 &tmu5_device, 390 271 &rtc_device, 391 - &dma_device, 272 + &dma0_device, 273 + &dma1_device, 392 274 }; 393 275 394 276 static int __init sh7780_devices_setup(void)
+127 -7
arch/sh/kernel/cpu/sh4a/setup-sh7785.c
··· 14 14 #include <linux/io.h> 15 15 #include <linux/mm.h> 16 16 #include <linux/sh_timer.h> 17 - #include <asm/dma-sh.h> 17 + 18 + #include <asm/dmaengine.h> 18 19 #include <asm/mmzone.h> 20 + 21 + #include <cpu/dma-register.h> 19 22 20 23 static struct plat_sci_port scif0_platform_data = { 21 24 .mapbase = 0xffea0000, ··· 298 295 .num_resources = ARRAY_SIZE(tmu5_resources), 299 296 }; 300 297 301 - static struct sh_dmae_pdata dma_platform_data = { 302 - .mode = (SHDMA_MIX_IRQ | SHDMA_DMAOR1), 298 + /* DMA */ 299 + static struct sh_dmae_channel sh7785_dmae0_channels[] = { 300 + { 301 + .offset = 0, 302 + .dmars = 0, 303 + .dmars_bit = 0, 304 + }, { 305 + .offset = 0x10, 306 + .dmars = 0, 307 + .dmars_bit = 8, 308 + }, { 309 + .offset = 0x20, 310 + .dmars = 4, 311 + .dmars_bit = 0, 312 + }, { 313 + .offset = 0x30, 314 + .dmars = 4, 315 + .dmars_bit = 8, 316 + }, { 317 + .offset = 0x50, 318 + .dmars = 8, 319 + .dmars_bit = 0, 320 + }, { 321 + .offset = 0x60, 322 + .dmars = 8, 323 + .dmars_bit = 8, 324 + } 303 325 }; 304 326 305 - static struct platform_device dma_device = { 327 + static struct sh_dmae_channel sh7785_dmae1_channels[] = { 328 + { 329 + .offset = 0, 330 + }, { 331 + .offset = 0x10, 332 + }, { 333 + .offset = 0x20, 334 + }, { 335 + .offset = 0x30, 336 + }, { 337 + .offset = 0x50, 338 + }, { 339 + .offset = 0x60, 340 + } 341 + }; 342 + 343 + static unsigned int ts_shift[] = TS_SHIFT; 344 + 345 + static struct sh_dmae_pdata dma0_platform_data = { 346 + .channel = sh7785_dmae0_channels, 347 + .channel_num = ARRAY_SIZE(sh7785_dmae0_channels), 348 + .ts_low_shift = CHCR_TS_LOW_SHIFT, 349 + .ts_low_mask = CHCR_TS_LOW_MASK, 350 + .ts_high_shift = CHCR_TS_HIGH_SHIFT, 351 + .ts_high_mask = CHCR_TS_HIGH_MASK, 352 + .ts_shift = ts_shift, 353 + .ts_shift_num = ARRAY_SIZE(ts_shift), 354 + .dmaor_init = DMAOR_INIT, 355 + }; 356 + 357 + static struct sh_dmae_pdata dma1_platform_data = { 358 + .channel = sh7785_dmae1_channels, 359 + .channel_num = ARRAY_SIZE(sh7785_dmae1_channels), 360 + .ts_low_shift = CHCR_TS_LOW_SHIFT, 361 + .ts_low_mask = CHCR_TS_LOW_MASK, 362 + .ts_high_shift = CHCR_TS_HIGH_SHIFT, 363 + .ts_high_mask = CHCR_TS_HIGH_MASK, 364 + .ts_shift = ts_shift, 365 + .ts_shift_num = ARRAY_SIZE(ts_shift), 366 + .dmaor_init = DMAOR_INIT, 367 + }; 368 + 369 + static struct resource sh7785_dmae0_resources[] = { 370 + [0] = { 371 + /* Channel registers and DMAOR */ 372 + .start = 0xfc808020, 373 + .end = 0xfc80808f, 374 + .flags = IORESOURCE_MEM, 375 + }, 376 + [1] = { 377 + /* DMARSx */ 378 + .start = 0xfc809000, 379 + .end = 0xfc80900b, 380 + .flags = IORESOURCE_MEM, 381 + }, 382 + { 383 + /* Real DMA error IRQ is 39, and channel IRQs are 33-38 */ 384 + .start = 33, 385 + .end = 33, 386 + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE, 387 + }, 388 + }; 389 + 390 + static struct resource sh7785_dmae1_resources[] = { 391 + [0] = { 392 + /* Channel registers and DMAOR */ 393 + .start = 0xfcc08020, 394 + .end = 0xfcc0808f, 395 + .flags = IORESOURCE_MEM, 396 + }, 397 + /* DMAC1 has no DMARS */ 398 + { 399 + /* Real DMA error IRQ is 58, and channel IRQs are 52-57 */ 400 + .start = 52, 401 + .end = 52, 402 + .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_SHAREABLE, 403 + }, 404 + }; 405 + 406 + static struct platform_device dma0_device = { 306 407 .name = "sh-dma-engine", 307 - .id = -1, 408 + .id = 0, 409 + .resource = sh7785_dmae0_resources, 410 + .num_resources = ARRAY_SIZE(sh7785_dmae0_resources), 308 411 .dev = { 309 - .platform_data = &dma_platform_data, 412 + .platform_data = &dma0_platform_data, 413 + }, 414 + }; 415 + 416 + static struct platform_device dma1_device = { 417 + .name = "sh-dma-engine", 418 + .id = 1, 419 + .resource = sh7785_dmae1_resources, 420 + .num_resources = ARRAY_SIZE(sh7785_dmae1_resources), 421 + .dev = { 422 + .platform_data = &dma1_platform_data, 310 423 }, 311 424 }; 312 425 ··· 439 320 &tmu3_device, 440 321 &tmu4_device, 441 322 &tmu5_device, 442 - &dma_device, 323 + &dma0_device, 324 + &dma1_device, 443 325 }; 444 326 445 327 static int __init sh7785_devices_setup(void)
+6 -24
arch/sh/kernel/hw_breakpoint.c
··· 143 143 return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE); 144 144 } 145 145 146 - /* 147 - * Store a breakpoint's encoded address, length, and type. 148 - */ 149 - static int arch_store_info(struct perf_event *bp) 150 - { 151 - struct arch_hw_breakpoint *info = counter_arch_bp(bp); 152 - 153 - /* 154 - * User-space requests will always have the address field populated 155 - * For kernel-addresses, either the address or symbol name can be 156 - * specified. 157 - */ 158 - if (info->name) 159 - info->address = (unsigned long)kallsyms_lookup_name(info->name); 160 - if (info->address) 161 - return 0; 162 - 163 - return -EINVAL; 164 - } 165 - 166 146 int arch_bp_generic_fields(int sh_len, int sh_type, 167 147 int *gen_len, int *gen_type) 168 148 { ··· 256 276 return ret; 257 277 } 258 278 259 - ret = arch_store_info(bp); 260 - 261 - if (ret < 0) 262 - return ret; 279 + /* 280 + * For kernel-addresses, either the address or symbol name can be 281 + * specified. 282 + */ 283 + if (info->name) 284 + info->address = (unsigned long)kallsyms_lookup_name(info->name); 263 285 264 286 /* 265 287 * Check that the low-order bits of the address are appropriate
+1 -2
arch/sh/kernel/setup.c
··· 443 443 444 444 nodes_clear(node_online_map); 445 445 446 - /* Setup bootmem with available RAM */ 446 + pmb_init(); 447 447 lmb_init(); 448 448 setup_memory(); 449 449 sparse_init(); ··· 452 452 conswitchp = &dummy_con; 453 453 #endif 454 454 paging_init(); 455 - pmb_init(); 456 455 457 456 ioremap_fixed_init(); 458 457
+1 -5
arch/sh/kernel/time.c
··· 39 39 void (*rtc_sh_get_time)(struct timespec *) = null_rtc_get_time; 40 40 int (*rtc_sh_set_time)(const time_t) = null_rtc_set_time; 41 41 42 - #ifdef CONFIG_GENERIC_CMOS_UPDATE 43 42 void read_persistent_clock(struct timespec *ts) 44 43 { 45 44 rtc_sh_get_time(ts); 46 45 } 47 46 47 + #ifdef CONFIG_GENERIC_CMOS_UPDATE 48 48 int update_persistent_clock(struct timespec now) 49 49 { 50 50 return rtc_sh_set_time(now.tv_sec); ··· 112 112 113 113 hwblk_init(); 114 114 clk_init(); 115 - 116 - rtc_sh_get_time(&xtime); 117 - set_normalized_timespec(&wall_to_monotonic, 118 - -xtime.tv_sec, -xtime.tv_nsec); 119 115 120 116 late_time_init = sh_late_time_init; 121 117 }
+1 -2
arch/sh/lib/libgcc.h
··· 17 17 #error I feel sick. 18 18 #endif 19 19 20 - typedef union 21 - { 20 + typedef union { 22 21 struct DWstruct s; 23 22 long long ll; 24 23 } DWunion;
+22 -48
arch/sh/mm/ioremap.c
··· 34 34 * caller shouldn't need to know that small detail. 35 35 */ 36 36 void __iomem * __init_refok 37 - __ioremap_caller(unsigned long phys_addr, unsigned long size, 37 + __ioremap_caller(phys_addr_t phys_addr, unsigned long size, 38 38 pgprot_t pgprot, void *caller) 39 39 { 40 40 struct vm_struct *area; 41 41 unsigned long offset, last_addr, addr, orig_addr; 42 + void __iomem *mapped; 42 43 43 44 /* Don't allow wraparound or zero size */ 44 45 last_addr = phys_addr + size - 1; ··· 47 46 return NULL; 48 47 49 48 /* 49 + * If we can't yet use the regular approach, go the fixmap route. 50 + */ 51 + if (!mem_init_done) 52 + return ioremap_fixed(phys_addr, size, pgprot); 53 + 54 + /* 55 + * First try to remap through the PMB. 56 + * PMB entries are all pre-faulted. 57 + */ 58 + mapped = pmb_remap_caller(phys_addr, size, pgprot, caller); 59 + if (mapped && !IS_ERR(mapped)) 60 + return mapped; 61 + 62 + /* 50 63 * Mappings have to be page-aligned 51 64 */ 52 65 offset = phys_addr & ~PAGE_MASK; 53 66 phys_addr &= PAGE_MASK; 54 67 size = PAGE_ALIGN(last_addr+1) - phys_addr; 55 - 56 - /* 57 - * If we can't yet use the regular approach, go the fixmap route. 58 - */ 59 - if (!mem_init_done) 60 - return ioremap_fixed(phys_addr, offset, size, pgprot); 61 68 62 69 /* 63 70 * Ok, go for it.. ··· 76 67 area->phys_addr = phys_addr; 77 68 orig_addr = addr = (unsigned long)area->addr; 78 69 79 - #ifdef CONFIG_PMB 80 - /* 81 - * First try to remap through the PMB once a valid VMA has been 82 - * established. Smaller allocations (or the rest of the size 83 - * remaining after a PMB mapping due to the size not being 84 - * perfectly aligned on a PMB size boundary) are then mapped 85 - * through the UTLB using conventional page tables. 86 - * 87 - * PMB entries are all pre-faulted. 88 - */ 89 - if (unlikely(phys_addr >= P1SEG)) { 90 - unsigned long mapped; 91 - 92 - mapped = pmb_remap(addr, phys_addr, size, pgprot); 93 - if (likely(mapped)) { 94 - addr += mapped; 95 - phys_addr += mapped; 96 - size -= mapped; 97 - } 70 + if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) { 71 + vunmap((void *)orig_addr); 72 + return NULL; 98 73 } 99 - #endif 100 - 101 - if (likely(size)) 102 - if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) { 103 - vunmap((void *)orig_addr); 104 - return NULL; 105 - } 106 74 107 75 return (void __iomem *)(offset + (char *)orig_addr); 108 76 } ··· 119 133 if (iounmap_fixed(addr) == 0) 120 134 return; 121 135 122 - #ifdef CONFIG_PMB 123 136 /* 124 - * Purge any PMB entries that may have been established for this 125 - * mapping, then proceed with conventional VMA teardown. 126 - * 127 - * XXX: Note that due to the way that remove_vm_area() does 128 - * matching of the resultant VMA, we aren't able to fast-forward 129 - * the address past the PMB space until the end of the VMA where 130 - * the page tables reside. As such, unmap_vm_area() will be 131 - * forced to linearly scan over the area until it finds the page 132 - * tables where PTEs that need to be unmapped actually reside, 133 - * which is far from optimal. Perhaps we need to use a separate 134 - * VMA for the PMB mappings? 135 - * -- PFM. 137 + * If the PMB handled it, there's nothing else to do. 136 138 */ 137 - pmb_unmap(vaddr); 138 - #endif 139 + if (pmb_unmap(addr) == 0) 140 + return; 139 141 140 142 p = remove_vm_area((void *)(vaddr & PAGE_MASK)); 141 143 if (!p) {
+9 -2
arch/sh/mm/ioremap_fixed.c
··· 45 45 } 46 46 47 47 void __init __iomem * 48 - ioremap_fixed(resource_size_t phys_addr, unsigned long offset, 49 - unsigned long size, pgprot_t prot) 48 + ioremap_fixed(phys_addr_t phys_addr, unsigned long size, pgprot_t prot) 50 49 { 51 50 enum fixed_addresses idx0, idx; 52 51 struct ioremap_map *map; 53 52 unsigned int nrpages; 53 + unsigned long offset; 54 54 int i, slot; 55 + 56 + /* 57 + * Mappings have to be page-aligned 58 + */ 59 + offset = phys_addr & ~PAGE_MASK; 60 + phys_addr &= PAGE_MASK; 61 + size = PAGE_ALIGN(phys_addr + size) - phys_addr; 55 62 56 63 slot = -1; 57 64 for (i = 0; i < FIX_N_IOREMAPS; i++) {
+3
arch/sh/mm/numa.c
··· 74 74 start_pfn = start >> PAGE_SHIFT; 75 75 end_pfn = end >> PAGE_SHIFT; 76 76 77 + pmb_bolt_mapping((unsigned long)__va(start), start, end - start, 78 + PAGE_KERNEL); 79 + 77 80 lmb_add(start, end - start); 78 81 79 82 __add_active_range(nid, start_pfn, end_pfn);
+296 -138
arch/sh/mm/pmb.c
··· 23 23 #include <linux/err.h> 24 24 #include <linux/io.h> 25 25 #include <linux/spinlock.h> 26 - #include <linux/rwlock.h> 26 + #include <linux/vmalloc.h> 27 + #include <asm/cacheflush.h> 27 28 #include <asm/sizes.h> 28 29 #include <asm/system.h> 29 30 #include <asm/uaccess.h> ··· 53 52 struct pmb_entry *link; 54 53 }; 55 54 55 + static struct { 56 + unsigned long size; 57 + int flag; 58 + } pmb_sizes[] = { 59 + { .size = SZ_512M, .flag = PMB_SZ_512M, }, 60 + { .size = SZ_128M, .flag = PMB_SZ_128M, }, 61 + { .size = SZ_64M, .flag = PMB_SZ_64M, }, 62 + { .size = SZ_16M, .flag = PMB_SZ_16M, }, 63 + }; 64 + 56 65 static void pmb_unmap_entry(struct pmb_entry *, int depth); 57 66 58 67 static DEFINE_RWLOCK(pmb_rwlock); 59 68 static struct pmb_entry pmb_entry_list[NR_PMB_ENTRIES]; 60 69 static DECLARE_BITMAP(pmb_map, NR_PMB_ENTRIES); 70 + 71 + static unsigned int pmb_iomapping_enabled; 61 72 62 73 static __always_inline unsigned long mk_pmb_entry(unsigned int entry) 63 74 { ··· 84 71 static __always_inline unsigned long mk_pmb_data(unsigned int entry) 85 72 { 86 73 return mk_pmb_entry(entry) | PMB_DATA; 74 + } 75 + 76 + static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn) 77 + { 78 + return ppn >= __pa(memory_start) && ppn < __pa(memory_end); 79 + } 80 + 81 + /* 82 + * Ensure that the PMB entries match our cache configuration. 83 + * 84 + * When we are in 32-bit address extended mode, CCR.CB becomes 85 + * invalid, so care must be taken to manually adjust cacheable 86 + * translations. 87 + */ 88 + static __always_inline unsigned long pmb_cache_flags(void) 89 + { 90 + unsigned long flags = 0; 91 + 92 + #if defined(CONFIG_CACHE_OFF) 93 + flags |= PMB_WT | PMB_UB; 94 + #elif defined(CONFIG_CACHE_WRITETHROUGH) 95 + flags |= PMB_C | PMB_WT | PMB_UB; 96 + #elif defined(CONFIG_CACHE_WRITEBACK) 97 + flags |= PMB_C; 98 + #endif 99 + 100 + return flags; 101 + } 102 + 103 + /* 104 + * Convert typical pgprot value to the PMB equivalent 105 + */ 106 + static inline unsigned long pgprot_to_pmb_flags(pgprot_t prot) 107 + { 108 + unsigned long pmb_flags = 0; 109 + u64 flags = pgprot_val(prot); 110 + 111 + if (flags & _PAGE_CACHABLE) 112 + pmb_flags |= PMB_C; 113 + if (flags & _PAGE_WT) 114 + pmb_flags |= PMB_WT | PMB_UB; 115 + 116 + return pmb_flags; 117 + } 118 + 119 + static inline bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b) 120 + { 121 + return (b->vpn == (a->vpn + a->size)) && 122 + (b->ppn == (a->ppn + a->size)) && 123 + (b->flags == a->flags); 124 + } 125 + 126 + static bool pmb_mapping_exists(unsigned long vaddr, phys_addr_t phys, 127 + unsigned long size) 128 + { 129 + int i; 130 + 131 + read_lock(&pmb_rwlock); 132 + 133 + for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { 134 + struct pmb_entry *pmbe, *iter; 135 + unsigned long span; 136 + 137 + if (!test_bit(i, pmb_map)) 138 + continue; 139 + 140 + pmbe = &pmb_entry_list[i]; 141 + 142 + /* 143 + * See if VPN and PPN are bounded by an existing mapping. 144 + */ 145 + if ((vaddr < pmbe->vpn) || (vaddr >= (pmbe->vpn + pmbe->size))) 146 + continue; 147 + if ((phys < pmbe->ppn) || (phys >= (pmbe->ppn + pmbe->size))) 148 + continue; 149 + 150 + /* 151 + * Now see if we're in range of a simple mapping. 152 + */ 153 + if (size <= pmbe->size) { 154 + read_unlock(&pmb_rwlock); 155 + return true; 156 + } 157 + 158 + span = pmbe->size; 159 + 160 + /* 161 + * Finally for sizes that involve compound mappings, walk 162 + * the chain. 163 + */ 164 + for (iter = pmbe->link; iter; iter = iter->link) 165 + span += iter->size; 166 + 167 + /* 168 + * Nothing else to do if the range requirements are met. 169 + */ 170 + if (size <= span) { 171 + read_unlock(&pmb_rwlock); 172 + return true; 173 + } 174 + } 175 + 176 + read_unlock(&pmb_rwlock); 177 + return false; 178 + } 179 + 180 + static bool pmb_size_valid(unsigned long size) 181 + { 182 + int i; 183 + 184 + for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) 185 + if (pmb_sizes[i].size == size) 186 + return true; 187 + 188 + return false; 189 + } 190 + 191 + static inline bool pmb_addr_valid(unsigned long addr, unsigned long size) 192 + { 193 + return (addr >= P1SEG && (addr + size - 1) < P3SEG); 194 + } 195 + 196 + static inline bool pmb_prot_valid(pgprot_t prot) 197 + { 198 + return (pgprot_val(prot) & _PAGE_USER) == 0; 199 + } 200 + 201 + static int pmb_size_to_flags(unsigned long size) 202 + { 203 + int i; 204 + 205 + for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) 206 + if (pmb_sizes[i].size == size) 207 + return pmb_sizes[i].flag; 208 + 209 + return 0; 87 210 } 88 211 89 212 static int pmb_alloc_entry(void) ··· 289 140 } 290 141 291 142 /* 292 - * Ensure that the PMB entries match our cache configuration. 293 - * 294 - * When we are in 32-bit address extended mode, CCR.CB becomes 295 - * invalid, so care must be taken to manually adjust cacheable 296 - * translations. 297 - */ 298 - static __always_inline unsigned long pmb_cache_flags(void) 299 - { 300 - unsigned long flags = 0; 301 - 302 - #if defined(CONFIG_CACHE_WRITETHROUGH) 303 - flags |= PMB_C | PMB_WT | PMB_UB; 304 - #elif defined(CONFIG_CACHE_WRITEBACK) 305 - flags |= PMB_C; 306 - #endif 307 - 308 - return flags; 309 - } 310 - 311 - /* 312 143 * Must be run uncached. 313 144 */ 314 145 static void __set_pmb_entry(struct pmb_entry *pmbe) 315 146 { 316 - writel_uncached(pmbe->vpn | PMB_V, mk_pmb_addr(pmbe->entry)); 317 - writel_uncached(pmbe->ppn | pmbe->flags | PMB_V, 318 - mk_pmb_data(pmbe->entry)); 147 + unsigned long addr, data; 148 + 149 + addr = mk_pmb_addr(pmbe->entry); 150 + data = mk_pmb_data(pmbe->entry); 151 + 152 + jump_to_uncached(); 153 + 154 + /* Set V-bit */ 155 + __raw_writel(pmbe->vpn | PMB_V, addr); 156 + __raw_writel(pmbe->ppn | pmbe->flags | PMB_V, data); 157 + 158 + back_to_cached(); 319 159 } 320 160 321 161 static void __clear_pmb_entry(struct pmb_entry *pmbe) ··· 332 194 spin_unlock_irqrestore(&pmbe->lock, flags); 333 195 } 334 196 335 - static struct { 336 - unsigned long size; 337 - int flag; 338 - } pmb_sizes[] = { 339 - { .size = SZ_512M, .flag = PMB_SZ_512M, }, 340 - { .size = SZ_128M, .flag = PMB_SZ_128M, }, 341 - { .size = SZ_64M, .flag = PMB_SZ_64M, }, 342 - { .size = SZ_16M, .flag = PMB_SZ_16M, }, 343 - }; 344 - 345 - long pmb_remap(unsigned long vaddr, unsigned long phys, 346 - unsigned long size, pgprot_t prot) 197 + int pmb_bolt_mapping(unsigned long vaddr, phys_addr_t phys, 198 + unsigned long size, pgprot_t prot) 347 199 { 348 200 struct pmb_entry *pmbp, *pmbe; 349 - unsigned long wanted; 350 - int pmb_flags, i; 351 - long err; 352 - u64 flags; 201 + unsigned long orig_addr, orig_size; 202 + unsigned long flags, pmb_flags; 203 + int i, mapped; 353 204 354 - flags = pgprot_val(prot); 205 + if (!pmb_addr_valid(vaddr, size)) 206 + return -EFAULT; 207 + if (pmb_mapping_exists(vaddr, phys, size)) 208 + return 0; 355 209 356 - pmb_flags = PMB_WT | PMB_UB; 210 + orig_addr = vaddr; 211 + orig_size = size; 357 212 358 - /* Convert typical pgprot value to the PMB equivalent */ 359 - if (flags & _PAGE_CACHABLE) { 360 - pmb_flags |= PMB_C; 213 + flush_tlb_kernel_range(vaddr, vaddr + size); 361 214 362 - if ((flags & _PAGE_WT) == 0) 363 - pmb_flags &= ~(PMB_WT | PMB_UB); 364 - } 365 - 215 + pmb_flags = pgprot_to_pmb_flags(prot); 366 216 pmbp = NULL; 367 - wanted = size; 368 217 369 - again: 370 - for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) { 371 - unsigned long flags; 218 + do { 219 + for (i = mapped = 0; i < ARRAY_SIZE(pmb_sizes); i++) { 220 + if (size < pmb_sizes[i].size) 221 + continue; 372 222 373 - if (size < pmb_sizes[i].size) 374 - continue; 223 + pmbe = pmb_alloc(vaddr, phys, pmb_flags | 224 + pmb_sizes[i].flag, PMB_NO_ENTRY); 225 + if (IS_ERR(pmbe)) { 226 + pmb_unmap_entry(pmbp, mapped); 227 + return PTR_ERR(pmbe); 228 + } 375 229 376 - pmbe = pmb_alloc(vaddr, phys, pmb_flags | pmb_sizes[i].flag, 377 - PMB_NO_ENTRY); 378 - if (IS_ERR(pmbe)) { 379 - err = PTR_ERR(pmbe); 380 - goto out; 230 + spin_lock_irqsave(&pmbe->lock, flags); 231 + 232 + pmbe->size = pmb_sizes[i].size; 233 + 234 + __set_pmb_entry(pmbe); 235 + 236 + phys += pmbe->size; 237 + vaddr += pmbe->size; 238 + size -= pmbe->size; 239 + 240 + /* 241 + * Link adjacent entries that span multiple PMB 242 + * entries for easier tear-down. 243 + */ 244 + if (likely(pmbp)) { 245 + spin_lock(&pmbp->lock); 246 + pmbp->link = pmbe; 247 + spin_unlock(&pmbp->lock); 248 + } 249 + 250 + pmbp = pmbe; 251 + 252 + /* 253 + * Instead of trying smaller sizes on every 254 + * iteration (even if we succeed in allocating 255 + * space), try using pmb_sizes[i].size again. 256 + */ 257 + i--; 258 + mapped++; 259 + 260 + spin_unlock_irqrestore(&pmbe->lock, flags); 381 261 } 262 + } while (size >= SZ_16M); 382 263 383 - spin_lock_irqsave(&pmbe->lock, flags); 264 + flush_cache_vmap(orig_addr, orig_addr + orig_size); 384 265 385 - __set_pmb_entry(pmbe); 386 - 387 - phys += pmb_sizes[i].size; 388 - vaddr += pmb_sizes[i].size; 389 - size -= pmb_sizes[i].size; 390 - 391 - pmbe->size = pmb_sizes[i].size; 392 - 393 - /* 394 - * Link adjacent entries that span multiple PMB entries 395 - * for easier tear-down. 396 - */ 397 - if (likely(pmbp)) { 398 - spin_lock(&pmbp->lock); 399 - pmbp->link = pmbe; 400 - spin_unlock(&pmbp->lock); 401 - } 402 - 403 - pmbp = pmbe; 404 - 405 - /* 406 - * Instead of trying smaller sizes on every iteration 407 - * (even if we succeed in allocating space), try using 408 - * pmb_sizes[i].size again. 409 - */ 410 - i--; 411 - 412 - spin_unlock_irqrestore(&pmbe->lock, flags); 413 - } 414 - 415 - if (size >= SZ_16M) 416 - goto again; 417 - 418 - return wanted - size; 419 - 420 - out: 421 - pmb_unmap_entry(pmbp, NR_PMB_ENTRIES); 422 - 423 - return err; 266 + return 0; 424 267 } 425 268 426 - void pmb_unmap(unsigned long addr) 269 + void __iomem *pmb_remap_caller(phys_addr_t phys, unsigned long size, 270 + pgprot_t prot, void *caller) 271 + { 272 + unsigned long vaddr; 273 + phys_addr_t offset, last_addr; 274 + phys_addr_t align_mask; 275 + unsigned long aligned; 276 + struct vm_struct *area; 277 + int i, ret; 278 + 279 + if (!pmb_iomapping_enabled) 280 + return NULL; 281 + 282 + /* 283 + * Small mappings need to go through the TLB. 284 + */ 285 + if (size < SZ_16M) 286 + return ERR_PTR(-EINVAL); 287 + if (!pmb_prot_valid(prot)) 288 + return ERR_PTR(-EINVAL); 289 + 290 + for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) 291 + if (size >= pmb_sizes[i].size) 292 + break; 293 + 294 + last_addr = phys + size; 295 + align_mask = ~(pmb_sizes[i].size - 1); 296 + offset = phys & ~align_mask; 297 + phys &= align_mask; 298 + aligned = ALIGN(last_addr, pmb_sizes[i].size) - phys; 299 + 300 + /* 301 + * XXX: This should really start from uncached_end, but this 302 + * causes the MMU to reset, so for now we restrict it to the 303 + * 0xb000...0xc000 range. 304 + */ 305 + area = __get_vm_area_caller(aligned, VM_IOREMAP, 0xb0000000, 306 + P3SEG, caller); 307 + if (!area) 308 + return NULL; 309 + 310 + area->phys_addr = phys; 311 + vaddr = (unsigned long)area->addr; 312 + 313 + ret = pmb_bolt_mapping(vaddr, phys, size, prot); 314 + if (unlikely(ret != 0)) 315 + return ERR_PTR(ret); 316 + 317 + return (void __iomem *)(offset + (char *)vaddr); 318 + } 319 + 320 + int pmb_unmap(void __iomem *addr) 427 321 { 428 322 struct pmb_entry *pmbe = NULL; 429 - int i; 323 + unsigned long vaddr = (unsigned long __force)addr; 324 + int i, found = 0; 430 325 431 326 read_lock(&pmb_rwlock); 432 327 433 328 for (i = 0; i < ARRAY_SIZE(pmb_entry_list); i++) { 434 329 if (test_bit(i, pmb_map)) { 435 330 pmbe = &pmb_entry_list[i]; 436 - if (pmbe->vpn == addr) 331 + if (pmbe->vpn == vaddr) { 332 + found = 1; 437 333 break; 334 + } 438 335 } 439 336 } 440 337 441 338 read_unlock(&pmb_rwlock); 442 339 443 - pmb_unmap_entry(pmbe, NR_PMB_ENTRIES); 444 - } 340 + if (found) { 341 + pmb_unmap_entry(pmbe, NR_PMB_ENTRIES); 342 + return 0; 343 + } 445 344 446 - static bool pmb_can_merge(struct pmb_entry *a, struct pmb_entry *b) 447 - { 448 - return (b->vpn == (a->vpn + a->size)) && 449 - (b->ppn == (a->ppn + a->size)) && 450 - (b->flags == a->flags); 451 - } 452 - 453 - static bool pmb_size_valid(unsigned long size) 454 - { 455 - int i; 456 - 457 - for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) 458 - if (pmb_sizes[i].size == size) 459 - return true; 460 - 461 - return false; 462 - } 463 - 464 - static int pmb_size_to_flags(unsigned long size) 465 - { 466 - int i; 467 - 468 - for (i = 0; i < ARRAY_SIZE(pmb_sizes); i++) 469 - if (pmb_sizes[i].size == size) 470 - return pmb_sizes[i].flag; 471 - 472 - return 0; 345 + return -EINVAL; 473 346 } 474 347 475 348 static void __pmb_unmap_entry(struct pmb_entry *pmbe, int depth) ··· 500 351 */ 501 352 __clear_pmb_entry(pmbe); 502 353 354 + flush_cache_vunmap(pmbe->vpn, pmbe->vpn + pmbe->size); 355 + 503 356 pmbe = pmblink->link; 504 357 505 358 pmb_free(pmblink); ··· 518 367 write_lock_irqsave(&pmb_rwlock, flags); 519 368 __pmb_unmap_entry(pmbe, depth); 520 369 write_unlock_irqrestore(&pmb_rwlock, flags); 521 - } 522 - 523 - static __always_inline unsigned int pmb_ppn_in_range(unsigned long ppn) 524 - { 525 - return ppn >= __pa(memory_start) && ppn < __pa(memory_end); 526 370 } 527 371 528 372 static void __init pmb_notify(void) ··· 771 625 } 772 626 #endif 773 627 628 + static int __init early_pmb(char *p) 629 + { 630 + if (!p) 631 + return 0; 632 + 633 + if (strstr(p, "iomap")) 634 + pmb_iomapping_enabled = 1; 635 + 636 + return 0; 637 + } 638 + early_param("pmb", early_pmb); 639 + 774 640 void __init pmb_init(void) 775 641 { 776 642 /* Synchronize software state */ ··· 871 713 872 714 return 0; 873 715 } 874 - postcore_initcall(pmb_debugfs_init); 716 + subsys_initcall(pmb_debugfs_init); 875 717 876 718 #ifdef CONFIG_PM 877 719 static int pmb_sysdev_suspend(struct sys_device *dev, pm_message_t state)
+317 -185
drivers/dma/shdma.c
··· 24 24 #include <linux/delay.h> 25 25 #include <linux/dma-mapping.h> 26 26 #include <linux/platform_device.h> 27 - #include <cpu/dma.h> 28 - #include <asm/dma-sh.h> 27 + #include <linux/pm_runtime.h> 28 + 29 + #include <asm/dmaengine.h> 30 + 29 31 #include "shdma.h" 30 32 31 33 /* DMA descriptor control */ ··· 40 38 }; 41 39 42 40 #define NR_DESCS_PER_CHANNEL 32 43 - /* 44 - * Define the default configuration for dual address memory-memory transfer. 45 - * The 0x400 value represents auto-request, external->external. 46 - * 47 - * And this driver set 4byte burst mode. 48 - * If you want to change mode, you need to change RS_DEFAULT of value. 49 - * (ex 1byte burst mode -> (RS_DUAL & ~TS_32) 50 - */ 51 - #define RS_DEFAULT (RS_DUAL) 41 + /* Default MEMCPY transfer size = 2^2 = 4 bytes */ 42 + #define LOG2_DEFAULT_XFER_SIZE 2 52 43 53 44 /* A bitmask with bits enough for enum sh_dmae_slave_chan_id */ 54 45 static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SHDMA_SLAVE_NUMBER)]; 55 46 56 47 static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all); 57 48 58 - #define SH_DMAC_CHAN_BASE(id) (dma_base_addr[id]) 59 49 static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg) 60 50 { 61 - ctrl_outl(data, SH_DMAC_CHAN_BASE(sh_dc->id) + reg); 51 + __raw_writel(data, sh_dc->base + reg / sizeof(u32)); 62 52 } 63 53 64 54 static u32 sh_dmae_readl(struct sh_dmae_chan *sh_dc, u32 reg) 65 55 { 66 - return ctrl_inl(SH_DMAC_CHAN_BASE(sh_dc->id) + reg); 56 + return __raw_readl(sh_dc->base + reg / sizeof(u32)); 57 + } 58 + 59 + static u16 dmaor_read(struct sh_dmae_device *shdev) 60 + { 61 + return __raw_readw(shdev->chan_reg + DMAOR / sizeof(u32)); 62 + } 63 + 64 + static void dmaor_write(struct sh_dmae_device *shdev, u16 data) 65 + { 66 + __raw_writew(data, shdev->chan_reg + DMAOR / sizeof(u32)); 67 67 } 68 68 69 69 /* ··· 73 69 * 74 70 * SH7780 has two DMAOR register 75 71 */ 76 - static void sh_dmae_ctl_stop(int id) 72 + static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev) 77 73 { 78 - unsigned short dmaor = dmaor_read_reg(id); 74 + unsigned short dmaor = dmaor_read(shdev); 79 75 80 - dmaor &= ~(DMAOR_NMIF | DMAOR_AE); 81 - dmaor_write_reg(id, dmaor); 76 + dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME)); 82 77 } 83 78 84 - static int sh_dmae_rst(int id) 79 + static int sh_dmae_rst(struct sh_dmae_device *shdev) 85 80 { 86 81 unsigned short dmaor; 87 82 88 - sh_dmae_ctl_stop(id); 89 - dmaor = dmaor_read_reg(id) | DMAOR_INIT; 83 + sh_dmae_ctl_stop(shdev); 84 + dmaor = dmaor_read(shdev) | shdev->pdata->dmaor_init; 90 85 91 - dmaor_write_reg(id, dmaor); 92 - if (dmaor_read_reg(id) & (DMAOR_AE | DMAOR_NMIF)) { 93 - pr_warning(KERN_ERR "dma-sh: Can't initialize DMAOR.\n"); 86 + dmaor_write(shdev, dmaor); 87 + if (dmaor_read(shdev) & (DMAOR_AE | DMAOR_NMIF)) { 88 + pr_warning("dma-sh: Can't initialize DMAOR.\n"); 94 89 return -EINVAL; 95 90 } 96 91 return 0; ··· 105 102 return false; /* waiting */ 106 103 } 107 104 108 - static unsigned int ts_shift[] = TS_SHIFT; 109 - static inline unsigned int calc_xmit_shift(u32 chcr) 105 + static unsigned int calc_xmit_shift(struct sh_dmae_chan *sh_chan, u32 chcr) 110 106 { 111 - int cnt = ((chcr & CHCR_TS_LOW_MASK) >> CHCR_TS_LOW_SHIFT) | 112 - ((chcr & CHCR_TS_HIGH_MASK) >> CHCR_TS_HIGH_SHIFT); 107 + struct sh_dmae_device *shdev = container_of(sh_chan->common.device, 108 + struct sh_dmae_device, common); 109 + struct sh_dmae_pdata *pdata = shdev->pdata; 110 + int cnt = ((chcr & pdata->ts_low_mask) >> pdata->ts_low_shift) | 111 + ((chcr & pdata->ts_high_mask) >> pdata->ts_high_shift); 113 112 114 - return ts_shift[cnt]; 113 + if (cnt >= pdata->ts_shift_num) 114 + cnt = 0; 115 + 116 + return pdata->ts_shift[cnt]; 117 + } 118 + 119 + static u32 log2size_to_chcr(struct sh_dmae_chan *sh_chan, int l2size) 120 + { 121 + struct sh_dmae_device *shdev = container_of(sh_chan->common.device, 122 + struct sh_dmae_device, common); 123 + struct sh_dmae_pdata *pdata = shdev->pdata; 124 + int i; 125 + 126 + for (i = 0; i < pdata->ts_shift_num; i++) 127 + if (pdata->ts_shift[i] == l2size) 128 + break; 129 + 130 + if (i == pdata->ts_shift_num) 131 + i = 0; 132 + 133 + return ((i << pdata->ts_low_shift) & pdata->ts_low_mask) | 134 + ((i << pdata->ts_high_shift) & pdata->ts_high_mask); 115 135 } 116 136 117 137 static void dmae_set_reg(struct sh_dmae_chan *sh_chan, struct sh_dmae_regs *hw) ··· 162 136 163 137 static void dmae_init(struct sh_dmae_chan *sh_chan) 164 138 { 165 - u32 chcr = RS_DEFAULT; /* default is DUAL mode */ 166 - sh_chan->xmit_shift = calc_xmit_shift(chcr); 139 + /* 140 + * Default configuration for dual address memory-memory transfer. 141 + * 0x400 represents auto-request. 142 + */ 143 + u32 chcr = DM_INC | SM_INC | 0x400 | log2size_to_chcr(sh_chan, 144 + LOG2_DEFAULT_XFER_SIZE); 145 + sh_chan->xmit_shift = calc_xmit_shift(sh_chan, chcr); 167 146 sh_dmae_writel(sh_chan, chcr, CHCR); 168 147 } 169 148 ··· 178 147 if (dmae_is_busy(sh_chan)) 179 148 return -EBUSY; 180 149 181 - sh_chan->xmit_shift = calc_xmit_shift(val); 150 + sh_chan->xmit_shift = calc_xmit_shift(sh_chan, val); 182 151 sh_dmae_writel(sh_chan, val, CHCR); 183 152 184 153 return 0; 185 154 } 186 155 187 - #define DMARS_SHIFT 8 188 - #define DMARS_CHAN_MSK 0x01 189 156 static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val) 190 157 { 191 - u32 addr; 192 - int shift = 0; 158 + struct sh_dmae_device *shdev = container_of(sh_chan->common.device, 159 + struct sh_dmae_device, common); 160 + struct sh_dmae_pdata *pdata = shdev->pdata; 161 + struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id]; 162 + u16 __iomem *addr = shdev->dmars + chan_pdata->dmars / sizeof(u16); 163 + int shift = chan_pdata->dmars_bit; 193 164 194 165 if (dmae_is_busy(sh_chan)) 195 166 return -EBUSY; 196 167 197 - if (sh_chan->id & DMARS_CHAN_MSK) 198 - shift = DMARS_SHIFT; 199 - 200 - if (sh_chan->id < 6) 201 - /* DMA0RS0 - DMA0RS2 */ 202 - addr = SH_DMARS_BASE0 + (sh_chan->id / 2) * 4; 203 - #ifdef SH_DMARS_BASE1 204 - else if (sh_chan->id < 12) 205 - /* DMA1RS0 - DMA1RS2 */ 206 - addr = SH_DMARS_BASE1 + ((sh_chan->id - 6) / 2) * 4; 207 - #endif 208 - else 209 - return -EINVAL; 210 - 211 - ctrl_outw((val << shift) | (ctrl_inw(addr) & (0xFF00 >> shift)), addr); 168 + __raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift), 169 + addr); 212 170 213 171 return 0; 214 172 } ··· 271 251 struct dma_device *dma_dev = sh_chan->common.device; 272 252 struct sh_dmae_device *shdev = container_of(dma_dev, 273 253 struct sh_dmae_device, common); 274 - struct sh_dmae_pdata *pdata = &shdev->pdata; 254 + struct sh_dmae_pdata *pdata = shdev->pdata; 275 255 int i; 276 256 277 257 if ((unsigned)slave_id >= SHDMA_SLAVE_NUMBER) 278 258 return NULL; 279 259 280 - for (i = 0; i < pdata->config_num; i++) 281 - if (pdata->config[i].slave_id == slave_id) 282 - return pdata->config + i; 260 + for (i = 0; i < pdata->slave_num; i++) 261 + if (pdata->slave[i].slave_id == slave_id) 262 + return pdata->slave + i; 283 263 284 264 return NULL; 285 265 } ··· 289 269 struct sh_dmae_chan *sh_chan = to_sh_chan(chan); 290 270 struct sh_desc *desc; 291 271 struct sh_dmae_slave *param = chan->private; 272 + 273 + pm_runtime_get_sync(sh_chan->dev); 292 274 293 275 /* 294 276 * This relies on the guarantee from dmaengine that alloc_chan_resources ··· 310 288 311 289 dmae_set_dmars(sh_chan, cfg->mid_rid); 312 290 dmae_set_chcr(sh_chan, cfg->chcr); 313 - } else { 314 - if ((sh_dmae_readl(sh_chan, CHCR) & 0x700) != 0x400) 315 - dmae_set_chcr(sh_chan, RS_DEFAULT); 291 + } else if ((sh_dmae_readl(sh_chan, CHCR) & 0xf00) != 0x400) { 292 + dmae_init(sh_chan); 316 293 } 317 294 318 295 spin_lock_bh(&sh_chan->desc_lock); ··· 333 312 } 334 313 spin_unlock_bh(&sh_chan->desc_lock); 335 314 315 + if (!sh_chan->descs_allocated) 316 + pm_runtime_put(sh_chan->dev); 317 + 336 318 return sh_chan->descs_allocated; 337 319 } 338 320 ··· 347 323 struct sh_dmae_chan *sh_chan = to_sh_chan(chan); 348 324 struct sh_desc *desc, *_desc; 349 325 LIST_HEAD(list); 326 + int descs = sh_chan->descs_allocated; 350 327 351 328 dmae_halt(sh_chan); 352 329 ··· 367 342 sh_chan->descs_allocated = 0; 368 343 369 344 spin_unlock_bh(&sh_chan->desc_lock); 345 + 346 + if (descs > 0) 347 + pm_runtime_put(sh_chan->dev); 370 348 371 349 list_for_each_entry_safe(desc, _desc, &list, node) 372 350 kfree(desc); ··· 587 559 if (!chan) 588 560 return; 589 561 562 + dmae_halt(sh_chan); 563 + 564 + spin_lock_bh(&sh_chan->desc_lock); 565 + if (!list_empty(&sh_chan->ld_queue)) { 566 + /* Record partial transfer */ 567 + struct sh_desc *desc = list_entry(sh_chan->ld_queue.next, 568 + struct sh_desc, node); 569 + desc->partial = (desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) << 570 + sh_chan->xmit_shift; 571 + 572 + } 573 + spin_unlock_bh(&sh_chan->desc_lock); 574 + 590 575 sh_dmae_chan_ld_cleanup(sh_chan, true); 591 576 } 592 577 ··· 702 661 703 662 static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan) 704 663 { 705 - struct sh_desc *sd; 664 + struct sh_desc *desc; 706 665 707 666 spin_lock_bh(&sh_chan->desc_lock); 708 667 /* DMA work check */ ··· 712 671 } 713 672 714 673 /* Find the first not transferred desciptor */ 715 - list_for_each_entry(sd, &sh_chan->ld_queue, node) 716 - if (sd->mark == DESC_SUBMITTED) { 674 + list_for_each_entry(desc, &sh_chan->ld_queue, node) 675 + if (desc->mark == DESC_SUBMITTED) { 676 + dev_dbg(sh_chan->dev, "Queue #%d to %d: %u@%x -> %x\n", 677 + desc->async_tx.cookie, sh_chan->id, 678 + desc->hw.tcr, desc->hw.sar, desc->hw.dar); 717 679 /* Get the ld start address from ld_queue */ 718 - dmae_set_reg(sh_chan, &sd->hw); 680 + dmae_set_reg(sh_chan, &desc->hw); 719 681 dmae_start(sh_chan); 720 682 break; 721 683 } ··· 740 696 struct sh_dmae_chan *sh_chan = to_sh_chan(chan); 741 697 dma_cookie_t last_used; 742 698 dma_cookie_t last_complete; 699 + enum dma_status status; 743 700 744 701 sh_dmae_chan_ld_cleanup(sh_chan, false); 745 702 ··· 754 709 if (used) 755 710 *used = last_used; 756 711 757 - return dma_async_is_complete(cookie, last_complete, last_used); 712 + spin_lock_bh(&sh_chan->desc_lock); 713 + 714 + status = dma_async_is_complete(cookie, last_complete, last_used); 715 + 716 + /* 717 + * If we don't find cookie on the queue, it has been aborted and we have 718 + * to report error 719 + */ 720 + if (status != DMA_SUCCESS) { 721 + struct sh_desc *desc; 722 + status = DMA_ERROR; 723 + list_for_each_entry(desc, &sh_chan->ld_queue, node) 724 + if (desc->cookie == cookie) { 725 + status = DMA_IN_PROGRESS; 726 + break; 727 + } 728 + } 729 + 730 + spin_unlock_bh(&sh_chan->desc_lock); 731 + 732 + return status; 758 733 } 759 734 760 735 static irqreturn_t sh_dmae_interrupt(int irq, void *data) ··· 797 732 #if defined(CONFIG_CPU_SH4) 798 733 static irqreturn_t sh_dmae_err(int irq, void *data) 799 734 { 800 - int err = 0; 801 735 struct sh_dmae_device *shdev = (struct sh_dmae_device *)data; 736 + int i; 802 737 803 - /* IRQ Multi */ 804 - if (shdev->pdata.mode & SHDMA_MIX_IRQ) { 805 - int __maybe_unused cnt = 0; 806 - switch (irq) { 807 - #if defined(DMTE6_IRQ) && defined(DMAE1_IRQ) 808 - case DMTE6_IRQ: 809 - cnt++; 810 - #endif 811 - case DMTE0_IRQ: 812 - if (dmaor_read_reg(cnt) & (DMAOR_NMIF | DMAOR_AE)) { 813 - disable_irq(irq); 814 - return IRQ_HANDLED; 738 + /* halt the dma controller */ 739 + sh_dmae_ctl_stop(shdev); 740 + 741 + /* We cannot detect, which channel caused the error, have to reset all */ 742 + for (i = 0; i < SH_DMAC_MAX_CHANNELS; i++) { 743 + struct sh_dmae_chan *sh_chan = shdev->chan[i]; 744 + if (sh_chan) { 745 + struct sh_desc *desc; 746 + /* Stop the channel */ 747 + dmae_halt(sh_chan); 748 + /* Complete all */ 749 + list_for_each_entry(desc, &sh_chan->ld_queue, node) { 750 + struct dma_async_tx_descriptor *tx = &desc->async_tx; 751 + desc->mark = DESC_IDLE; 752 + if (tx->callback) 753 + tx->callback(tx->callback_param); 815 754 } 816 - default: 817 - return IRQ_NONE; 755 + list_splice_init(&sh_chan->ld_queue, &sh_chan->ld_free); 818 756 } 819 - } else { 820 - /* reset dma controller */ 821 - err = sh_dmae_rst(0); 822 - if (err) 823 - return err; 824 - #ifdef SH_DMAC_BASE1 825 - if (shdev->pdata.mode & SHDMA_DMAOR1) { 826 - err = sh_dmae_rst(1); 827 - if (err) 828 - return err; 829 - } 830 - #endif 831 - disable_irq(irq); 832 - return IRQ_HANDLED; 833 757 } 758 + sh_dmae_rst(shdev); 759 + 760 + return IRQ_HANDLED; 834 761 } 835 762 #endif 836 763 ··· 853 796 sh_dmae_chan_ld_cleanup(sh_chan, false); 854 797 } 855 798 856 - static unsigned int get_dmae_irq(unsigned int id) 857 - { 858 - unsigned int irq = 0; 859 - if (id < ARRAY_SIZE(dmte_irq_map)) 860 - irq = dmte_irq_map[id]; 861 - return irq; 862 - } 863 - 864 - static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id) 799 + static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id, 800 + int irq, unsigned long flags) 865 801 { 866 802 int err; 867 - unsigned int irq = get_dmae_irq(id); 868 - unsigned long irqflags = IRQF_DISABLED; 803 + struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id]; 804 + struct platform_device *pdev = to_platform_device(shdev->common.dev); 869 805 struct sh_dmae_chan *new_sh_chan; 870 806 871 807 /* alloc channel */ ··· 869 819 return -ENOMEM; 870 820 } 871 821 822 + /* copy struct dma_device */ 823 + new_sh_chan->common.device = &shdev->common; 824 + 872 825 new_sh_chan->dev = shdev->common.dev; 873 826 new_sh_chan->id = id; 827 + new_sh_chan->irq = irq; 828 + new_sh_chan->base = shdev->chan_reg + chan_pdata->offset / sizeof(u32); 874 829 875 830 /* Init DMA tasklet */ 876 831 tasklet_init(&new_sh_chan->tasklet, dmae_do_tasklet, ··· 890 835 INIT_LIST_HEAD(&new_sh_chan->ld_queue); 891 836 INIT_LIST_HEAD(&new_sh_chan->ld_free); 892 837 893 - /* copy struct dma_device */ 894 - new_sh_chan->common.device = &shdev->common; 895 - 896 838 /* Add the channel to DMA device channel list */ 897 839 list_add_tail(&new_sh_chan->common.device_node, 898 840 &shdev->common.channels); 899 841 shdev->common.chancnt++; 900 842 901 - if (shdev->pdata.mode & SHDMA_MIX_IRQ) { 902 - irqflags = IRQF_SHARED; 903 - #if defined(DMTE6_IRQ) 904 - if (irq >= DMTE6_IRQ) 905 - irq = DMTE6_IRQ; 906 - else 907 - #endif 908 - irq = DMTE0_IRQ; 909 - } 910 - 911 - snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id), 912 - "sh-dmae%d", new_sh_chan->id); 843 + if (pdev->id >= 0) 844 + snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id), 845 + "sh-dmae%d.%d", pdev->id, new_sh_chan->id); 846 + else 847 + snprintf(new_sh_chan->dev_id, sizeof(new_sh_chan->dev_id), 848 + "sh-dma%d", new_sh_chan->id); 913 849 914 850 /* set up channel irq */ 915 - err = request_irq(irq, &sh_dmae_interrupt, irqflags, 851 + err = request_irq(irq, &sh_dmae_interrupt, flags, 916 852 new_sh_chan->dev_id, new_sh_chan); 917 853 if (err) { 918 854 dev_err(shdev->common.dev, "DMA channel %d request_irq error " ··· 927 881 928 882 for (i = shdev->common.chancnt - 1 ; i >= 0 ; i--) { 929 883 if (shdev->chan[i]) { 930 - struct sh_dmae_chan *shchan = shdev->chan[i]; 931 - if (!(shdev->pdata.mode & SHDMA_MIX_IRQ)) 932 - free_irq(dmte_irq_map[i], shchan); 884 + struct sh_dmae_chan *sh_chan = shdev->chan[i]; 933 885 934 - list_del(&shchan->common.device_node); 935 - kfree(shchan); 886 + free_irq(sh_chan->irq, sh_chan); 887 + 888 + list_del(&sh_chan->common.device_node); 889 + kfree(sh_chan); 936 890 shdev->chan[i] = NULL; 937 891 } 938 892 } ··· 941 895 942 896 static int __init sh_dmae_probe(struct platform_device *pdev) 943 897 { 944 - int err = 0, cnt, ecnt; 945 - unsigned long irqflags = IRQF_DISABLED; 946 - #if defined(CONFIG_CPU_SH4) 947 - int eirq[] = { DMAE0_IRQ, 948 - #if defined(DMAE1_IRQ) 949 - DMAE1_IRQ 950 - #endif 951 - }; 952 - #endif 898 + struct sh_dmae_pdata *pdata = pdev->dev.platform_data; 899 + unsigned long irqflags = IRQF_DISABLED, 900 + chan_flag[SH_DMAC_MAX_CHANNELS] = {}; 901 + int errirq, chan_irq[SH_DMAC_MAX_CHANNELS]; 902 + int err, i, irq_cnt = 0, irqres = 0; 953 903 struct sh_dmae_device *shdev; 904 + struct resource *chan, *dmars, *errirq_res, *chanirq_res; 954 905 955 906 /* get platform data */ 956 - if (!pdev->dev.platform_data) 907 + if (!pdata || !pdata->channel_num) 957 908 return -ENODEV; 958 909 910 + chan = platform_get_resource(pdev, IORESOURCE_MEM, 0); 911 + /* DMARS area is optional, if absent, this controller cannot do slave DMA */ 912 + dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1); 913 + /* 914 + * IRQ resources: 915 + * 1. there always must be at least one IRQ IO-resource. On SH4 it is 916 + * the error IRQ, in which case it is the only IRQ in this resource: 917 + * start == end. If it is the only IRQ resource, all channels also 918 + * use the same IRQ. 919 + * 2. DMA channel IRQ resources can be specified one per resource or in 920 + * ranges (start != end) 921 + * 3. iff all events (channels and, optionally, error) on this 922 + * controller use the same IRQ, only one IRQ resource can be 923 + * specified, otherwise there must be one IRQ per channel, even if 924 + * some of them are equal 925 + * 4. if all IRQs on this controller are equal or if some specific IRQs 926 + * specify IORESOURCE_IRQ_SHAREABLE in their resources, they will be 927 + * requested with the IRQF_SHARED flag 928 + */ 929 + errirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 930 + if (!chan || !errirq_res) 931 + return -ENODEV; 932 + 933 + if (!request_mem_region(chan->start, resource_size(chan), pdev->name)) { 934 + dev_err(&pdev->dev, "DMAC register region already claimed\n"); 935 + return -EBUSY; 936 + } 937 + 938 + if (dmars && !request_mem_region(dmars->start, resource_size(dmars), pdev->name)) { 939 + dev_err(&pdev->dev, "DMAC DMARS region already claimed\n"); 940 + err = -EBUSY; 941 + goto ermrdmars; 942 + } 943 + 944 + err = -ENOMEM; 959 945 shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL); 960 946 if (!shdev) { 961 - dev_err(&pdev->dev, "No enough memory\n"); 962 - return -ENOMEM; 947 + dev_err(&pdev->dev, "Not enough memory\n"); 948 + goto ealloc; 949 + } 950 + 951 + shdev->chan_reg = ioremap(chan->start, resource_size(chan)); 952 + if (!shdev->chan_reg) 953 + goto emapchan; 954 + if (dmars) { 955 + shdev->dmars = ioremap(dmars->start, resource_size(dmars)); 956 + if (!shdev->dmars) 957 + goto emapdmars; 963 958 } 964 959 965 960 /* platform data */ 966 - memcpy(&shdev->pdata, pdev->dev.platform_data, 967 - sizeof(struct sh_dmae_pdata)); 961 + shdev->pdata = pdata; 962 + 963 + pm_runtime_enable(&pdev->dev); 964 + pm_runtime_get_sync(&pdev->dev); 968 965 969 966 /* reset dma controller */ 970 - err = sh_dmae_rst(0); 967 + err = sh_dmae_rst(shdev); 971 968 if (err) 972 969 goto rst_err; 973 - 974 - /* SH7780/85/23 has DMAOR1 */ 975 - if (shdev->pdata.mode & SHDMA_DMAOR1) { 976 - err = sh_dmae_rst(1); 977 - if (err) 978 - goto rst_err; 979 - } 980 970 981 971 INIT_LIST_HEAD(&shdev->common.channels); 982 972 983 973 dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask); 984 - dma_cap_set(DMA_SLAVE, shdev->common.cap_mask); 974 + if (dmars) 975 + dma_cap_set(DMA_SLAVE, shdev->common.cap_mask); 985 976 986 977 shdev->common.device_alloc_chan_resources 987 978 = sh_dmae_alloc_chan_resources; ··· 1033 950 1034 951 shdev->common.dev = &pdev->dev; 1035 952 /* Default transfer size of 32 bytes requires 32-byte alignment */ 1036 - shdev->common.copy_align = 5; 953 + shdev->common.copy_align = LOG2_DEFAULT_XFER_SIZE; 1037 954 1038 955 #if defined(CONFIG_CPU_SH4) 1039 - /* Non Mix IRQ mode SH7722/SH7730 etc... */ 1040 - if (shdev->pdata.mode & SHDMA_MIX_IRQ) { 956 + chanirq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 1); 957 + 958 + if (!chanirq_res) 959 + chanirq_res = errirq_res; 960 + else 961 + irqres++; 962 + 963 + if (chanirq_res == errirq_res || 964 + (errirq_res->flags & IORESOURCE_BITS) == IORESOURCE_IRQ_SHAREABLE) 1041 965 irqflags = IRQF_SHARED; 1042 - eirq[0] = DMTE0_IRQ; 1043 - #if defined(DMTE6_IRQ) && defined(DMAE1_IRQ) 1044 - eirq[1] = DMTE6_IRQ; 1045 - #endif 966 + 967 + errirq = errirq_res->start; 968 + 969 + err = request_irq(errirq, sh_dmae_err, irqflags, 970 + "DMAC Address Error", shdev); 971 + if (err) { 972 + dev_err(&pdev->dev, 973 + "DMA failed requesting irq #%d, error %d\n", 974 + errirq, err); 975 + goto eirq_err; 1046 976 } 1047 977 1048 - for (ecnt = 0 ; ecnt < ARRAY_SIZE(eirq); ecnt++) { 1049 - err = request_irq(eirq[ecnt], sh_dmae_err, irqflags, 1050 - "DMAC Address Error", shdev); 1051 - if (err) { 1052 - dev_err(&pdev->dev, "DMA device request_irq" 1053 - "error (irq %d) with return %d\n", 1054 - eirq[ecnt], err); 1055 - goto eirq_err; 1056 - } 1057 - } 978 + #else 979 + chanirq_res = errirq_res; 1058 980 #endif /* CONFIG_CPU_SH4 */ 1059 981 982 + if (chanirq_res->start == chanirq_res->end && 983 + !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) { 984 + /* Special case - all multiplexed */ 985 + for (; irq_cnt < pdata->channel_num; irq_cnt++) { 986 + chan_irq[irq_cnt] = chanirq_res->start; 987 + chan_flag[irq_cnt] = IRQF_SHARED; 988 + } 989 + } else { 990 + do { 991 + for (i = chanirq_res->start; i <= chanirq_res->end; i++) { 992 + if ((errirq_res->flags & IORESOURCE_BITS) == 993 + IORESOURCE_IRQ_SHAREABLE) 994 + chan_flag[irq_cnt] = IRQF_SHARED; 995 + else 996 + chan_flag[irq_cnt] = IRQF_DISABLED; 997 + dev_dbg(&pdev->dev, 998 + "Found IRQ %d for channel %d\n", 999 + i, irq_cnt); 1000 + chan_irq[irq_cnt++] = i; 1001 + } 1002 + chanirq_res = platform_get_resource(pdev, 1003 + IORESOURCE_IRQ, ++irqres); 1004 + } while (irq_cnt < pdata->channel_num && chanirq_res); 1005 + } 1006 + 1007 + if (irq_cnt < pdata->channel_num) 1008 + goto eirqres; 1009 + 1060 1010 /* Create DMA Channel */ 1061 - for (cnt = 0 ; cnt < MAX_DMA_CHANNELS ; cnt++) { 1062 - err = sh_dmae_chan_probe(shdev, cnt); 1011 + for (i = 0; i < pdata->channel_num; i++) { 1012 + err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]); 1063 1013 if (err) 1064 1014 goto chan_probe_err; 1065 1015 } 1016 + 1017 + pm_runtime_put(&pdev->dev); 1066 1018 1067 1019 platform_set_drvdata(pdev, shdev); 1068 1020 dma_async_device_register(&shdev->common); ··· 1106 988 1107 989 chan_probe_err: 1108 990 sh_dmae_chan_remove(shdev); 1109 - 991 + eirqres: 992 + #if defined(CONFIG_CPU_SH4) 993 + free_irq(errirq, shdev); 1110 994 eirq_err: 1111 - for (ecnt-- ; ecnt >= 0; ecnt--) 1112 - free_irq(eirq[ecnt], shdev); 1113 - 995 + #endif 1114 996 rst_err: 997 + pm_runtime_put(&pdev->dev); 998 + if (dmars) 999 + iounmap(shdev->dmars); 1000 + emapdmars: 1001 + iounmap(shdev->chan_reg); 1002 + emapchan: 1115 1003 kfree(shdev); 1004 + ealloc: 1005 + if (dmars) 1006 + release_mem_region(dmars->start, resource_size(dmars)); 1007 + ermrdmars: 1008 + release_mem_region(chan->start, resource_size(chan)); 1116 1009 1117 1010 return err; 1118 1011 } ··· 1131 1002 static int __exit sh_dmae_remove(struct platform_device *pdev) 1132 1003 { 1133 1004 struct sh_dmae_device *shdev = platform_get_drvdata(pdev); 1005 + struct resource *res; 1006 + int errirq = platform_get_irq(pdev, 0); 1134 1007 1135 1008 dma_async_device_unregister(&shdev->common); 1136 1009 1137 - if (shdev->pdata.mode & SHDMA_MIX_IRQ) { 1138 - free_irq(DMTE0_IRQ, shdev); 1139 - #if defined(DMTE6_IRQ) 1140 - free_irq(DMTE6_IRQ, shdev); 1141 - #endif 1142 - } 1010 + if (errirq > 0) 1011 + free_irq(errirq, shdev); 1143 1012 1144 1013 /* channel data remove */ 1145 1014 sh_dmae_chan_remove(shdev); 1146 1015 1147 - if (!(shdev->pdata.mode & SHDMA_MIX_IRQ)) { 1148 - free_irq(DMAE0_IRQ, shdev); 1149 - #if defined(DMAE1_IRQ) 1150 - free_irq(DMAE1_IRQ, shdev); 1151 - #endif 1152 - } 1016 + pm_runtime_disable(&pdev->dev); 1017 + 1018 + if (shdev->dmars) 1019 + iounmap(shdev->dmars); 1020 + iounmap(shdev->chan_reg); 1021 + 1153 1022 kfree(shdev); 1023 + 1024 + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1025 + if (res) 1026 + release_mem_region(res->start, resource_size(res)); 1027 + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 1028 + if (res) 1029 + release_mem_region(res->start, resource_size(res)); 1154 1030 1155 1031 return 0; 1156 1032 } ··· 1163 1029 static void sh_dmae_shutdown(struct platform_device *pdev) 1164 1030 { 1165 1031 struct sh_dmae_device *shdev = platform_get_drvdata(pdev); 1166 - sh_dmae_ctl_stop(0); 1167 - if (shdev->pdata.mode & SHDMA_DMAOR1) 1168 - sh_dmae_ctl_stop(1); 1032 + sh_dmae_ctl_stop(shdev); 1169 1033 } 1170 1034 1171 1035 static struct platform_driver sh_dmae_driver = {
+8 -18
drivers/dma/shdma.h
··· 17 17 #include <linux/interrupt.h> 18 18 #include <linux/list.h> 19 19 20 + #include <asm/dmaengine.h> 21 + 20 22 #define SH_DMA_TCR_MAX 0x00FFFFFF /* 16MB */ 21 - 22 - struct sh_dmae_regs { 23 - u32 sar; /* SAR / source address */ 24 - u32 dar; /* DAR / destination address */ 25 - u32 tcr; /* TCR / transfer count */ 26 - }; 27 - 28 - struct sh_desc { 29 - struct sh_dmae_regs hw; 30 - struct list_head node; 31 - struct dma_async_tx_descriptor async_tx; 32 - enum dma_data_direction direction; 33 - dma_cookie_t cookie; 34 - int chunks; 35 - int mark; 36 - }; 37 23 38 24 struct device; 39 25 ··· 33 47 struct tasklet_struct tasklet; /* Tasklet */ 34 48 int descs_allocated; /* desc count */ 35 49 int xmit_shift; /* log_2(bytes_per_xfer) */ 50 + int irq; 36 51 int id; /* Raw id of this channel */ 52 + u32 __iomem *base; 37 53 char dev_id[16]; /* unique name per DMAC of channel */ 38 54 }; 39 55 40 56 struct sh_dmae_device { 41 57 struct dma_device common; 42 - struct sh_dmae_chan *chan[MAX_DMA_CHANNELS]; 43 - struct sh_dmae_pdata pdata; 58 + struct sh_dmae_chan *chan[SH_DMAC_MAX_CHANNELS]; 59 + struct sh_dmae_pdata *pdata; 60 + u32 __iomem *chan_reg; 61 + u16 __iomem *dmars; 44 62 }; 45 63 46 64 #define to_sh_chan(chan) container_of(chan, struct sh_dmae_chan, common)
+4
drivers/serial/Kconfig
··· 1009 1009 depends on SERIAL_SH_SCI=y 1010 1010 select SERIAL_CORE_CONSOLE 1011 1011 1012 + config SERIAL_SH_SCI_DMA 1013 + bool "DMA support" 1014 + depends on SERIAL_SH_SCI && SH_DMAE && EXPERIMENTAL 1015 + 1012 1016 config SERIAL_PNX8XXX 1013 1017 bool "Enable PNX8XXX SoCs' UART Support" 1014 1018 depends on MIPS && (SOC_PNX8550 || SOC_PNX833X)
+570 -46
drivers/serial/sh-sci.c
··· 48 48 #include <linux/ctype.h> 49 49 #include <linux/err.h> 50 50 #include <linux/list.h> 51 + #include <linux/dmaengine.h> 52 + #include <linux/scatterlist.h> 53 + #include <linux/timer.h> 51 54 52 55 #ifdef CONFIG_SUPERH 53 56 #include <asm/sh_bios.h> ··· 87 84 struct clk *dclk; 88 85 89 86 struct list_head node; 87 + struct dma_chan *chan_tx; 88 + struct dma_chan *chan_rx; 89 + #ifdef CONFIG_SERIAL_SH_SCI_DMA 90 + struct device *dma_dev; 91 + enum sh_dmae_slave_chan_id slave_tx; 92 + enum sh_dmae_slave_chan_id slave_rx; 93 + struct dma_async_tx_descriptor *desc_tx; 94 + struct dma_async_tx_descriptor *desc_rx[2]; 95 + dma_cookie_t cookie_tx; 96 + dma_cookie_t cookie_rx[2]; 97 + dma_cookie_t active_rx; 98 + struct scatterlist sg_tx; 99 + unsigned int sg_len_tx; 100 + struct scatterlist sg_rx[2]; 101 + size_t buf_len_rx; 102 + struct sh_dmae_slave param_tx; 103 + struct sh_dmae_slave param_rx; 104 + struct work_struct work_tx; 105 + struct work_struct work_rx; 106 + struct timer_list rx_timer; 107 + #endif 90 108 }; 91 109 92 110 struct sh_sci_priv { ··· 293 269 defined(CONFIG_CPU_SUBTYPE_SH7780) || \ 294 270 defined(CONFIG_CPU_SUBTYPE_SH7785) || \ 295 271 defined(CONFIG_CPU_SUBTYPE_SH7786) 296 - static inline int scif_txroom(struct uart_port *port) 272 + static int scif_txfill(struct uart_port *port) 297 273 { 298 - return SCIF_TXROOM_MAX - (sci_in(port, SCTFDR) & 0xff); 274 + return sci_in(port, SCTFDR) & 0xff; 299 275 } 300 276 301 - static inline int scif_rxroom(struct uart_port *port) 277 + static int scif_txroom(struct uart_port *port) 278 + { 279 + return SCIF_TXROOM_MAX - scif_txfill(port); 280 + } 281 + 282 + static int scif_rxfill(struct uart_port *port) 302 283 { 303 284 return sci_in(port, SCRFDR) & 0xff; 304 285 } 305 286 #elif defined(CONFIG_CPU_SUBTYPE_SH7763) 306 - static inline int scif_txroom(struct uart_port *port) 287 + static int scif_txfill(struct uart_port *port) 307 288 { 308 - if ((port->mapbase == 0xffe00000) || 309 - (port->mapbase == 0xffe08000)) { 289 + if (port->mapbase == 0xffe00000 || 290 + port->mapbase == 0xffe08000) 310 291 /* SCIF0/1*/ 311 - return SCIF_TXROOM_MAX - (sci_in(port, SCTFDR) & 0xff); 312 - } else { 292 + return sci_in(port, SCTFDR) & 0xff; 293 + else 313 294 /* SCIF2 */ 314 - return SCIF2_TXROOM_MAX - (sci_in(port, SCFDR) >> 8); 315 - } 295 + return sci_in(port, SCFDR) >> 8; 316 296 } 317 297 318 - static inline int scif_rxroom(struct uart_port *port) 298 + static int scif_txroom(struct uart_port *port) 299 + { 300 + if (port->mapbase == 0xffe00000 || 301 + port->mapbase == 0xffe08000) 302 + /* SCIF0/1*/ 303 + return SCIF_TXROOM_MAX - scif_txfill(port); 304 + else 305 + /* SCIF2 */ 306 + return SCIF2_TXROOM_MAX - scif_txfill(port); 307 + } 308 + 309 + static int scif_rxfill(struct uart_port *port) 319 310 { 320 311 if ((port->mapbase == 0xffe00000) || 321 312 (port->mapbase == 0xffe08000)) { ··· 342 303 } 343 304 } 344 305 #else 345 - static inline int scif_txroom(struct uart_port *port) 306 + static int scif_txfill(struct uart_port *port) 346 307 { 347 - return SCIF_TXROOM_MAX - (sci_in(port, SCFDR) >> 8); 308 + return sci_in(port, SCFDR) >> 8; 348 309 } 349 310 350 - static inline int scif_rxroom(struct uart_port *port) 311 + static int scif_txroom(struct uart_port *port) 312 + { 313 + return SCIF_TXROOM_MAX - scif_txfill(port); 314 + } 315 + 316 + static int scif_rxfill(struct uart_port *port) 351 317 { 352 318 return sci_in(port, SCFDR) & SCIF_RFDC_MASK; 353 319 } 354 320 #endif 355 321 356 - static inline int sci_txroom(struct uart_port *port) 322 + static int sci_txfill(struct uart_port *port) 357 323 { 358 - return (sci_in(port, SCxSR) & SCI_TDRE) != 0; 324 + return !(sci_in(port, SCxSR) & SCI_TDRE); 359 325 } 360 326 361 - static inline int sci_rxroom(struct uart_port *port) 327 + static int sci_txroom(struct uart_port *port) 328 + { 329 + return !sci_txfill(port); 330 + } 331 + 332 + static int sci_rxfill(struct uart_port *port) 362 333 { 363 334 return (sci_in(port, SCxSR) & SCxSR_RDxF(port)) != 0; 364 335 } ··· 455 406 456 407 while (1) { 457 408 if (port->type == PORT_SCI) 458 - count = sci_rxroom(port); 409 + count = sci_rxfill(port); 459 410 else 460 - count = scif_rxroom(port); 411 + count = scif_rxfill(port); 461 412 462 413 /* Don't copy more bytes than there is room for in the buffer */ 463 414 count = tty_buffer_request_room(tty, count); ··· 502 453 } 503 454 504 455 /* Store data and status */ 505 - if (status&SCxSR_FER(port)) { 456 + if (status & SCxSR_FER(port)) { 506 457 flag = TTY_FRAME; 507 458 dev_notice(port->dev, "frame error\n"); 508 - } else if (status&SCxSR_PER(port)) { 459 + } else if (status & SCxSR_PER(port)) { 509 460 flag = TTY_PARITY; 510 461 dev_notice(port->dev, "parity error\n"); 511 462 } else ··· 667 618 return copied; 668 619 } 669 620 670 - static irqreturn_t sci_rx_interrupt(int irq, void *port) 621 + static irqreturn_t sci_rx_interrupt(int irq, void *ptr) 671 622 { 623 + #ifdef CONFIG_SERIAL_SH_SCI_DMA 624 + struct uart_port *port = ptr; 625 + struct sci_port *s = to_sci_port(port); 626 + 627 + if (s->chan_rx) { 628 + unsigned long tout; 629 + u16 scr = sci_in(port, SCSCR); 630 + u16 ssr = sci_in(port, SCxSR); 631 + 632 + /* Disable future Rx interrupts */ 633 + sci_out(port, SCSCR, scr & ~SCI_CTRL_FLAGS_RIE); 634 + /* Clear current interrupt */ 635 + sci_out(port, SCxSR, ssr & ~(1 | SCxSR_RDxF(port))); 636 + /* Calculate delay for 1.5 DMA buffers */ 637 + tout = (port->timeout - HZ / 50) * s->buf_len_rx * 3 / 638 + port->fifosize / 2; 639 + dev_dbg(port->dev, "Rx IRQ: setup timeout in %lu ms\n", 640 + tout * 1000 / HZ); 641 + if (tout < 2) 642 + tout = 2; 643 + mod_timer(&s->rx_timer, jiffies + tout); 644 + 645 + return IRQ_HANDLED; 646 + } 647 + #endif 648 + 672 649 /* I think sci_receive_chars has to be called irrespective 673 650 * of whether the I_IXOFF is set, otherwise, how is the interrupt 674 651 * to be disabled? 675 652 */ 676 - sci_receive_chars(port); 653 + sci_receive_chars(ptr); 677 654 678 655 return IRQ_HANDLED; 679 656 } ··· 755 680 { 756 681 unsigned short ssr_status, scr_status, err_enabled; 757 682 struct uart_port *port = ptr; 683 + struct sci_port *s = to_sci_port(port); 758 684 irqreturn_t ret = IRQ_NONE; 759 685 760 686 ssr_status = sci_in(port, SCxSR); ··· 763 687 err_enabled = scr_status & (SCI_CTRL_FLAGS_REIE | SCI_CTRL_FLAGS_RIE); 764 688 765 689 /* Tx Interrupt */ 766 - if ((ssr_status & SCxSR_TDxE(port)) && (scr_status & SCI_CTRL_FLAGS_TIE)) 690 + if ((ssr_status & SCxSR_TDxE(port)) && (scr_status & SCI_CTRL_FLAGS_TIE) && 691 + !s->chan_tx) 767 692 ret = sci_tx_interrupt(irq, ptr); 768 - /* Rx Interrupt */ 769 - if ((ssr_status & SCxSR_RDxF(port)) && (scr_status & SCI_CTRL_FLAGS_RIE)) 693 + /* 694 + * Rx Interrupt: if we're using DMA, the DMA controller clears RDF / 695 + * DR flags 696 + */ 697 + if (((ssr_status & SCxSR_RDxF(port)) || s->chan_rx) && 698 + (scr_status & SCI_CTRL_FLAGS_RIE)) 770 699 ret = sci_rx_interrupt(irq, ptr); 771 700 /* Error Interrupt */ 772 701 if ((ssr_status & SCxSR_ERRORS(port)) && err_enabled) ··· 779 698 /* Break Interrupt */ 780 699 if ((ssr_status & SCxSR_BRK(port)) && err_enabled) 781 700 ret = sci_br_interrupt(irq, ptr); 701 + 702 + WARN_ONCE(ret == IRQ_NONE, 703 + "%s: %d IRQ %d, status %x, control %x\n", __func__, 704 + irq, port->line, ssr_status, scr_status); 782 705 783 706 return ret; 784 707 } ··· 885 800 static unsigned int sci_tx_empty(struct uart_port *port) 886 801 { 887 802 unsigned short status = sci_in(port, SCxSR); 888 - return status & SCxSR_TEND(port) ? TIOCSER_TEMT : 0; 803 + unsigned short in_tx_fifo = scif_txfill(port); 804 + 805 + return (status & SCxSR_TEND(port)) && !in_tx_fifo ? TIOCSER_TEMT : 0; 889 806 } 890 807 891 808 static void sci_set_mctrl(struct uart_port *port, unsigned int mctrl) ··· 899 812 900 813 static unsigned int sci_get_mctrl(struct uart_port *port) 901 814 { 902 - /* This routine is used for geting signals of: DTR, DCD, DSR, RI, 815 + /* This routine is used for getting signals of: DTR, DCD, DSR, RI, 903 816 and CTS/RTS */ 904 817 905 818 return TIOCM_DTR | TIOCM_RTS | TIOCM_DSR; 906 819 } 907 820 821 + #ifdef CONFIG_SERIAL_SH_SCI_DMA 822 + static void sci_dma_tx_complete(void *arg) 823 + { 824 + struct sci_port *s = arg; 825 + struct uart_port *port = &s->port; 826 + struct circ_buf *xmit = &port->state->xmit; 827 + unsigned long flags; 828 + 829 + dev_dbg(port->dev, "%s(%d)\n", __func__, port->line); 830 + 831 + spin_lock_irqsave(&port->lock, flags); 832 + 833 + xmit->tail += s->sg_tx.length; 834 + xmit->tail &= UART_XMIT_SIZE - 1; 835 + 836 + port->icount.tx += s->sg_tx.length; 837 + 838 + async_tx_ack(s->desc_tx); 839 + s->cookie_tx = -EINVAL; 840 + s->desc_tx = NULL; 841 + 842 + spin_unlock_irqrestore(&port->lock, flags); 843 + 844 + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 845 + uart_write_wakeup(port); 846 + 847 + if (uart_circ_chars_pending(xmit)) 848 + schedule_work(&s->work_tx); 849 + } 850 + 851 + /* Locking: called with port lock held */ 852 + static int sci_dma_rx_push(struct sci_port *s, struct tty_struct *tty, 853 + size_t count) 854 + { 855 + struct uart_port *port = &s->port; 856 + int i, active, room; 857 + 858 + room = tty_buffer_request_room(tty, count); 859 + 860 + if (s->active_rx == s->cookie_rx[0]) { 861 + active = 0; 862 + } else if (s->active_rx == s->cookie_rx[1]) { 863 + active = 1; 864 + } else { 865 + dev_err(port->dev, "cookie %d not found!\n", s->active_rx); 866 + return 0; 867 + } 868 + 869 + if (room < count) 870 + dev_warn(port->dev, "Rx overrun: dropping %u bytes\n", 871 + count - room); 872 + if (!room) 873 + return room; 874 + 875 + for (i = 0; i < room; i++) 876 + tty_insert_flip_char(tty, ((u8 *)sg_virt(&s->sg_rx[active]))[i], 877 + TTY_NORMAL); 878 + 879 + port->icount.rx += room; 880 + 881 + return room; 882 + } 883 + 884 + static void sci_dma_rx_complete(void *arg) 885 + { 886 + struct sci_port *s = arg; 887 + struct uart_port *port = &s->port; 888 + struct tty_struct *tty = port->state->port.tty; 889 + unsigned long flags; 890 + int count; 891 + 892 + dev_dbg(port->dev, "%s(%d)\n", __func__, port->line); 893 + 894 + spin_lock_irqsave(&port->lock, flags); 895 + 896 + count = sci_dma_rx_push(s, tty, s->buf_len_rx); 897 + 898 + mod_timer(&s->rx_timer, jiffies + msecs_to_jiffies(5)); 899 + 900 + spin_unlock_irqrestore(&port->lock, flags); 901 + 902 + if (count) 903 + tty_flip_buffer_push(tty); 904 + 905 + schedule_work(&s->work_rx); 906 + } 907 + 908 + static void sci_start_rx(struct uart_port *port); 909 + static void sci_start_tx(struct uart_port *port); 910 + 911 + static void sci_rx_dma_release(struct sci_port *s, bool enable_pio) 912 + { 913 + struct dma_chan *chan = s->chan_rx; 914 + struct uart_port *port = &s->port; 915 + 916 + s->chan_rx = NULL; 917 + s->cookie_rx[0] = s->cookie_rx[1] = -EINVAL; 918 + dma_release_channel(chan); 919 + dma_free_coherent(port->dev, s->buf_len_rx * 2, 920 + sg_virt(&s->sg_rx[0]), sg_dma_address(&s->sg_rx[0])); 921 + if (enable_pio) 922 + sci_start_rx(port); 923 + } 924 + 925 + static void sci_tx_dma_release(struct sci_port *s, bool enable_pio) 926 + { 927 + struct dma_chan *chan = s->chan_tx; 928 + struct uart_port *port = &s->port; 929 + 930 + s->chan_tx = NULL; 931 + s->cookie_tx = -EINVAL; 932 + dma_release_channel(chan); 933 + if (enable_pio) 934 + sci_start_tx(port); 935 + } 936 + 937 + static void sci_submit_rx(struct sci_port *s) 938 + { 939 + struct dma_chan *chan = s->chan_rx; 940 + int i; 941 + 942 + for (i = 0; i < 2; i++) { 943 + struct scatterlist *sg = &s->sg_rx[i]; 944 + struct dma_async_tx_descriptor *desc; 945 + 946 + desc = chan->device->device_prep_slave_sg(chan, 947 + sg, 1, DMA_FROM_DEVICE, DMA_PREP_INTERRUPT); 948 + 949 + if (desc) { 950 + s->desc_rx[i] = desc; 951 + desc->callback = sci_dma_rx_complete; 952 + desc->callback_param = s; 953 + s->cookie_rx[i] = desc->tx_submit(desc); 954 + } 955 + 956 + if (!desc || s->cookie_rx[i] < 0) { 957 + if (i) { 958 + async_tx_ack(s->desc_rx[0]); 959 + s->cookie_rx[0] = -EINVAL; 960 + } 961 + if (desc) { 962 + async_tx_ack(desc); 963 + s->cookie_rx[i] = -EINVAL; 964 + } 965 + dev_warn(s->port.dev, 966 + "failed to re-start DMA, using PIO\n"); 967 + sci_rx_dma_release(s, true); 968 + return; 969 + } 970 + } 971 + 972 + s->active_rx = s->cookie_rx[0]; 973 + 974 + dma_async_issue_pending(chan); 975 + } 976 + 977 + static void work_fn_rx(struct work_struct *work) 978 + { 979 + struct sci_port *s = container_of(work, struct sci_port, work_rx); 980 + struct uart_port *port = &s->port; 981 + struct dma_async_tx_descriptor *desc; 982 + int new; 983 + 984 + if (s->active_rx == s->cookie_rx[0]) { 985 + new = 0; 986 + } else if (s->active_rx == s->cookie_rx[1]) { 987 + new = 1; 988 + } else { 989 + dev_err(port->dev, "cookie %d not found!\n", s->active_rx); 990 + return; 991 + } 992 + desc = s->desc_rx[new]; 993 + 994 + if (dma_async_is_tx_complete(s->chan_rx, s->active_rx, NULL, NULL) != 995 + DMA_SUCCESS) { 996 + /* Handle incomplete DMA receive */ 997 + struct tty_struct *tty = port->state->port.tty; 998 + struct dma_chan *chan = s->chan_rx; 999 + struct sh_desc *sh_desc = container_of(desc, struct sh_desc, 1000 + async_tx); 1001 + unsigned long flags; 1002 + int count; 1003 + 1004 + chan->device->device_terminate_all(chan); 1005 + dev_dbg(port->dev, "Read %u bytes with cookie %d\n", 1006 + sh_desc->partial, sh_desc->cookie); 1007 + 1008 + spin_lock_irqsave(&port->lock, flags); 1009 + count = sci_dma_rx_push(s, tty, sh_desc->partial); 1010 + spin_unlock_irqrestore(&port->lock, flags); 1011 + 1012 + if (count) 1013 + tty_flip_buffer_push(tty); 1014 + 1015 + sci_submit_rx(s); 1016 + 1017 + return; 1018 + } 1019 + 1020 + s->cookie_rx[new] = desc->tx_submit(desc); 1021 + if (s->cookie_rx[new] < 0) { 1022 + dev_warn(port->dev, "Failed submitting Rx DMA descriptor\n"); 1023 + sci_rx_dma_release(s, true); 1024 + return; 1025 + } 1026 + 1027 + dev_dbg(port->dev, "%s: cookie %d #%d\n", __func__, 1028 + s->cookie_rx[new], new); 1029 + 1030 + s->active_rx = s->cookie_rx[!new]; 1031 + } 1032 + 1033 + static void work_fn_tx(struct work_struct *work) 1034 + { 1035 + struct sci_port *s = container_of(work, struct sci_port, work_tx); 1036 + struct dma_async_tx_descriptor *desc; 1037 + struct dma_chan *chan = s->chan_tx; 1038 + struct uart_port *port = &s->port; 1039 + struct circ_buf *xmit = &port->state->xmit; 1040 + struct scatterlist *sg = &s->sg_tx; 1041 + 1042 + /* 1043 + * DMA is idle now. 1044 + * Port xmit buffer is already mapped, and it is one page... Just adjust 1045 + * offsets and lengths. Since it is a circular buffer, we have to 1046 + * transmit till the end, and then the rest. Take the port lock to get a 1047 + * consistent xmit buffer state. 1048 + */ 1049 + spin_lock_irq(&port->lock); 1050 + sg->offset = xmit->tail & (UART_XMIT_SIZE - 1); 1051 + sg->dma_address = (sg_dma_address(sg) & ~(UART_XMIT_SIZE - 1)) + 1052 + sg->offset; 1053 + sg->length = min((int)CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE), 1054 + CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE)); 1055 + sg->dma_length = sg->length; 1056 + spin_unlock_irq(&port->lock); 1057 + 1058 + BUG_ON(!sg->length); 1059 + 1060 + desc = chan->device->device_prep_slave_sg(chan, 1061 + sg, s->sg_len_tx, DMA_TO_DEVICE, 1062 + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); 1063 + if (!desc) { 1064 + /* switch to PIO */ 1065 + sci_tx_dma_release(s, true); 1066 + return; 1067 + } 1068 + 1069 + dma_sync_sg_for_device(port->dev, sg, 1, DMA_TO_DEVICE); 1070 + 1071 + spin_lock_irq(&port->lock); 1072 + s->desc_tx = desc; 1073 + desc->callback = sci_dma_tx_complete; 1074 + desc->callback_param = s; 1075 + spin_unlock_irq(&port->lock); 1076 + s->cookie_tx = desc->tx_submit(desc); 1077 + if (s->cookie_tx < 0) { 1078 + dev_warn(port->dev, "Failed submitting Tx DMA descriptor\n"); 1079 + /* switch to PIO */ 1080 + sci_tx_dma_release(s, true); 1081 + return; 1082 + } 1083 + 1084 + dev_dbg(port->dev, "%s: %p: %d...%d, cookie %d\n", __func__, 1085 + xmit->buf, xmit->tail, xmit->head, s->cookie_tx); 1086 + 1087 + dma_async_issue_pending(chan); 1088 + } 1089 + #endif 1090 + 908 1091 static void sci_start_tx(struct uart_port *port) 909 1092 { 910 1093 unsigned short ctrl; 1094 + 1095 + #ifdef CONFIG_SERIAL_SH_SCI_DMA 1096 + struct sci_port *s = to_sci_port(port); 1097 + 1098 + if (s->chan_tx) { 1099 + if (!uart_circ_empty(&s->port.state->xmit) && s->cookie_tx < 0) 1100 + schedule_work(&s->work_tx); 1101 + 1102 + return; 1103 + } 1104 + #endif 911 1105 912 1106 /* Set TIE (Transmit Interrupt Enable) bit in SCSCR */ 913 1107 ctrl = sci_in(port, SCSCR); ··· 1206 838 sci_out(port, SCSCR, ctrl); 1207 839 } 1208 840 1209 - static void sci_start_rx(struct uart_port *port, unsigned int tty_start) 841 + static void sci_start_rx(struct uart_port *port) 1210 842 { 1211 - unsigned short ctrl; 843 + unsigned short ctrl = SCI_CTRL_FLAGS_RIE | SCI_CTRL_FLAGS_REIE; 1212 844 1213 845 /* Set RIE (Receive Interrupt Enable) bit in SCSCR */ 1214 - ctrl = sci_in(port, SCSCR); 1215 - ctrl |= SCI_CTRL_FLAGS_RIE | SCI_CTRL_FLAGS_REIE; 846 + ctrl |= sci_in(port, SCSCR); 1216 847 sci_out(port, SCSCR, ctrl); 1217 848 } 1218 849 ··· 1235 868 /* Nothing here yet .. */ 1236 869 } 1237 870 871 + #ifdef CONFIG_SERIAL_SH_SCI_DMA 872 + static bool filter(struct dma_chan *chan, void *slave) 873 + { 874 + struct sh_dmae_slave *param = slave; 875 + 876 + dev_dbg(chan->device->dev, "%s: slave ID %d\n", __func__, 877 + param->slave_id); 878 + 879 + if (param->dma_dev == chan->device->dev) { 880 + chan->private = param; 881 + return true; 882 + } else { 883 + return false; 884 + } 885 + } 886 + 887 + static void rx_timer_fn(unsigned long arg) 888 + { 889 + struct sci_port *s = (struct sci_port *)arg; 890 + struct uart_port *port = &s->port; 891 + 892 + u16 scr = sci_in(port, SCSCR); 893 + sci_out(port, SCSCR, scr | SCI_CTRL_FLAGS_RIE); 894 + dev_dbg(port->dev, "DMA Rx timed out\n"); 895 + schedule_work(&s->work_rx); 896 + } 897 + 898 + static void sci_request_dma(struct uart_port *port) 899 + { 900 + struct sci_port *s = to_sci_port(port); 901 + struct sh_dmae_slave *param; 902 + struct dma_chan *chan; 903 + dma_cap_mask_t mask; 904 + int nent; 905 + 906 + dev_dbg(port->dev, "%s: port %d DMA %p\n", __func__, 907 + port->line, s->dma_dev); 908 + 909 + if (!s->dma_dev) 910 + return; 911 + 912 + dma_cap_zero(mask); 913 + dma_cap_set(DMA_SLAVE, mask); 914 + 915 + param = &s->param_tx; 916 + 917 + /* Slave ID, e.g., SHDMA_SLAVE_SCIF0_TX */ 918 + param->slave_id = s->slave_tx; 919 + param->dma_dev = s->dma_dev; 920 + 921 + s->cookie_tx = -EINVAL; 922 + chan = dma_request_channel(mask, filter, param); 923 + dev_dbg(port->dev, "%s: TX: got channel %p\n", __func__, chan); 924 + if (chan) { 925 + s->chan_tx = chan; 926 + sg_init_table(&s->sg_tx, 1); 927 + /* UART circular tx buffer is an aligned page. */ 928 + BUG_ON((int)port->state->xmit.buf & ~PAGE_MASK); 929 + sg_set_page(&s->sg_tx, virt_to_page(port->state->xmit.buf), 930 + UART_XMIT_SIZE, (int)port->state->xmit.buf & ~PAGE_MASK); 931 + nent = dma_map_sg(port->dev, &s->sg_tx, 1, DMA_TO_DEVICE); 932 + if (!nent) 933 + sci_tx_dma_release(s, false); 934 + else 935 + dev_dbg(port->dev, "%s: mapped %d@%p to %x\n", __func__, 936 + sg_dma_len(&s->sg_tx), 937 + port->state->xmit.buf, sg_dma_address(&s->sg_tx)); 938 + 939 + s->sg_len_tx = nent; 940 + 941 + INIT_WORK(&s->work_tx, work_fn_tx); 942 + } 943 + 944 + param = &s->param_rx; 945 + 946 + /* Slave ID, e.g., SHDMA_SLAVE_SCIF0_RX */ 947 + param->slave_id = s->slave_rx; 948 + param->dma_dev = s->dma_dev; 949 + 950 + chan = dma_request_channel(mask, filter, param); 951 + dev_dbg(port->dev, "%s: RX: got channel %p\n", __func__, chan); 952 + if (chan) { 953 + dma_addr_t dma[2]; 954 + void *buf[2]; 955 + int i; 956 + 957 + s->chan_rx = chan; 958 + 959 + s->buf_len_rx = 2 * max(16, (int)port->fifosize); 960 + buf[0] = dma_alloc_coherent(port->dev, s->buf_len_rx * 2, 961 + &dma[0], GFP_KERNEL); 962 + 963 + if (!buf[0]) { 964 + dev_warn(port->dev, 965 + "failed to allocate dma buffer, using PIO\n"); 966 + sci_rx_dma_release(s, true); 967 + return; 968 + } 969 + 970 + buf[1] = buf[0] + s->buf_len_rx; 971 + dma[1] = dma[0] + s->buf_len_rx; 972 + 973 + for (i = 0; i < 2; i++) { 974 + struct scatterlist *sg = &s->sg_rx[i]; 975 + 976 + sg_init_table(sg, 1); 977 + sg_set_page(sg, virt_to_page(buf[i]), s->buf_len_rx, 978 + (int)buf[i] & ~PAGE_MASK); 979 + sg->dma_address = dma[i]; 980 + sg->dma_length = sg->length; 981 + } 982 + 983 + INIT_WORK(&s->work_rx, work_fn_rx); 984 + setup_timer(&s->rx_timer, rx_timer_fn, (unsigned long)s); 985 + 986 + sci_submit_rx(s); 987 + } 988 + } 989 + 990 + static void sci_free_dma(struct uart_port *port) 991 + { 992 + struct sci_port *s = to_sci_port(port); 993 + 994 + if (!s->dma_dev) 995 + return; 996 + 997 + if (s->chan_tx) 998 + sci_tx_dma_release(s, false); 999 + if (s->chan_rx) 1000 + sci_rx_dma_release(s, false); 1001 + } 1002 + #endif 1003 + 1238 1004 static int sci_startup(struct uart_port *port) 1239 1005 { 1240 1006 struct sci_port *s = to_sci_port(port); 1007 + 1008 + dev_dbg(port->dev, "%s(%d)\n", __func__, port->line); 1241 1009 1242 1010 if (s->enable) 1243 1011 s->enable(port); 1244 1012 1245 1013 sci_request_irq(s); 1014 + #ifdef CONFIG_SERIAL_SH_SCI_DMA 1015 + sci_request_dma(port); 1016 + #endif 1246 1017 sci_start_tx(port); 1247 - sci_start_rx(port, 1); 1018 + sci_start_rx(port); 1248 1019 1249 1020 return 0; 1250 1021 } ··· 1391 886 { 1392 887 struct sci_port *s = to_sci_port(port); 1393 888 889 + dev_dbg(port->dev, "%s(%d)\n", __func__, port->line); 890 + 1394 891 sci_stop_rx(port); 1395 892 sci_stop_tx(port); 893 + #ifdef CONFIG_SERIAL_SH_SCI_DMA 894 + sci_free_dma(port); 895 + #endif 1396 896 sci_free_irq(s); 1397 897 1398 898 if (s->disable) ··· 1447 937 1448 938 sci_out(port, SCSMR, smr_val); 1449 939 940 + dev_dbg(port->dev, "%s: SMR %x, t %x, SCSCR %x\n", __func__, smr_val, t, 941 + SCSCR_INIT(port)); 942 + 1450 943 if (t > 0) { 1451 944 if (t >= 256) { 1452 945 sci_out(port, SCSMR, (sci_in(port, SCSMR) & ~3) | 1); ··· 1467 954 sci_out(port, SCSCR, SCSCR_INIT(port)); 1468 955 1469 956 if ((termios->c_cflag & CREAD) != 0) 1470 - sci_start_rx(port, 0); 957 + sci_start_rx(port); 1471 958 } 1472 959 1473 960 static const char *sci_type(struct uart_port *port) ··· 1562 1049 unsigned int index, 1563 1050 struct plat_sci_port *p) 1564 1051 { 1565 - sci_port->port.ops = &sci_uart_ops; 1566 - sci_port->port.iotype = UPIO_MEM; 1567 - sci_port->port.line = index; 1052 + struct uart_port *port = &sci_port->port; 1053 + 1054 + port->ops = &sci_uart_ops; 1055 + port->iotype = UPIO_MEM; 1056 + port->line = index; 1568 1057 1569 1058 switch (p->type) { 1570 1059 case PORT_SCIFA: 1571 - sci_port->port.fifosize = 64; 1060 + port->fifosize = 64; 1572 1061 break; 1573 1062 case PORT_SCIF: 1574 - sci_port->port.fifosize = 16; 1063 + port->fifosize = 16; 1575 1064 break; 1576 1065 default: 1577 - sci_port->port.fifosize = 1; 1066 + port->fifosize = 1; 1578 1067 break; 1579 1068 } 1580 1069 ··· 1585 1070 sci_port->dclk = clk_get(&dev->dev, "peripheral_clk"); 1586 1071 sci_port->enable = sci_clk_enable; 1587 1072 sci_port->disable = sci_clk_disable; 1588 - sci_port->port.dev = &dev->dev; 1073 + port->dev = &dev->dev; 1589 1074 } 1590 1075 1591 1076 sci_port->break_timer.data = (unsigned long)sci_port; 1592 1077 sci_port->break_timer.function = sci_break_timer; 1593 1078 init_timer(&sci_port->break_timer); 1594 1079 1595 - sci_port->port.mapbase = p->mapbase; 1596 - sci_port->port.membase = p->membase; 1080 + port->mapbase = p->mapbase; 1081 + port->membase = p->membase; 1597 1082 1598 - sci_port->port.irq = p->irqs[SCIx_TXI_IRQ]; 1599 - sci_port->port.flags = p->flags; 1600 - sci_port->type = sci_port->port.type = p->type; 1083 + port->irq = p->irqs[SCIx_TXI_IRQ]; 1084 + port->flags = p->flags; 1085 + sci_port->type = port->type = p->type; 1086 + 1087 + #ifdef CONFIG_SERIAL_SH_SCI_DMA 1088 + sci_port->dma_dev = p->dma_dev; 1089 + sci_port->slave_tx = p->dma_slave_tx; 1090 + sci_port->slave_rx = p->dma_slave_rx; 1091 + 1092 + dev_dbg(port->dev, "%s: DMA device %p, tx %d, rx %d\n", __func__, 1093 + p->dma_dev, p->dma_slave_tx, p->dma_slave_rx); 1094 + #endif 1601 1095 1602 1096 memcpy(&sci_port->irqs, &p->irqs, sizeof(p->irqs)); 1603 1097 }
+6
include/linux/serial_sci.h
··· 2 2 #define __LINUX_SERIAL_SCI_H 3 3 4 4 #include <linux/serial_core.h> 5 + #include <asm/dmaengine.h> 5 6 6 7 /* 7 8 * Generic header for SuperH SCI(F) (used by sh/sh64/h8300 and related parts) ··· 17 16 SCIx_NR_IRQS, 18 17 }; 19 18 19 + struct device; 20 + 20 21 /* 21 22 * Platform device specific platform_data struct 22 23 */ ··· 29 26 unsigned int type; /* SCI / SCIF / IRDA */ 30 27 upf_t flags; /* UPF_* flags */ 31 28 char *clk; /* clock string */ 29 + struct device *dma_dev; 30 + enum sh_dmae_slave_chan_id dma_slave_tx; 31 + enum sh_dmae_slave_chan_id dma_slave_rx; 32 32 }; 33 33 34 34 #endif /* __LINUX_SERIAL_SCI_H */
+1 -1
sound/soc/sh/siu.h
··· 72 72 #include <linux/interrupt.h> 73 73 #include <linux/io.h> 74 74 75 - #include <asm/dma-sh.h> 75 + #include <asm/dmaengine.h> 76 76 77 77 #include <sound/core.h> 78 78 #include <sound/pcm.h>
+1 -1
sound/soc/sh/siu_pcm.c
··· 32 32 #include <sound/pcm_params.h> 33 33 #include <sound/soc-dai.h> 34 34 35 - #include <asm/dma-sh.h> 35 + #include <asm/dmaengine.h> 36 36 #include <asm/siu.h> 37 37 38 38 #include "siu.h"