Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus

* 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus: (37 commits)
MIPS: O32: Provide definition of registers ta0 .. ta3.
MIPS: perf: Add Octeon support for hardware perf.
MIPS: perf: Add support for 64-bit perf counters.
MIPS: perf: Reorganize contents of perf support files.
MIPS: perf: Cleanup formatting in arch/mips/kernel/perf_event.c
MIPS: Add accessor macros for 64-bit performance counter registers.
MIPS: Add probes for more Octeon II CPUs.
MIPS: Add more CPU identifiers for Octeon II CPUs.
MIPS: XLR, XLS: Add comment for smp setup
MIPS: JZ4740: GPIO: Check correct IRQ in demux handler
MIPS: JZ4740: GPIO: Simplify IRQ demuxer
MIPS: JZ4740: Use generic irq chip
MIPS: Alchemy: remove all CONFIG_SOC_AU1??? defines
MIPS: Alchemy: kill au1xxx.h header
MIPS: Alchemy: clean DMA code of CONFIG_SOC_AU1??? defines
MIPS, IDE: Alchem, au1xxx-ide: Remove pb1200/db1200 header dep
MIPS: Alchemy: Redo PCI as platform driver
MIPS: Alchemy: more base address cleanup
MIPS: Alchemy: rewrite USB platform setup.
MIPS: Alchemy: abstract USB block control register access
...

Fix up trivial conflicts in:
arch/mips/alchemy/devboards/db1x00/platform.c
drivers/ide/Kconfig
drivers/mmc/host/au1xmmc.c
drivers/video/Kconfig
sound/mips/Kconfig

+3557 -3123
+4 -1
arch/mips/Kconfig
··· 47 47 select GENERIC_GPIO 48 48 select ARCH_WANT_OPTIONAL_GPIOLIB 49 49 select SYS_SUPPORTS_ZBOOT 50 + select USB_ARCH_HAS_OHCI 51 + select USB_ARCH_HAS_EHCI 50 52 51 53 config AR7 52 54 bool "Texas Instruments AR7" ··· 208 206 select SYS_HAS_EARLY_PRINTK 209 207 select HAVE_PWM 210 208 select HAVE_CLK 209 + select GENERIC_IRQ_CHIP 211 210 212 211 config LANTIQ 213 212 bool "Lantiq based platforms" ··· 2095 2092 2096 2093 config HW_PERF_EVENTS 2097 2094 bool "Enable hardware performance counter support for perf events" 2098 - depends on PERF_EVENTS && !MIPS_MT_SMTC && OPROFILE=n && CPU_MIPS32 2095 + depends on PERF_EVENTS && !MIPS_MT_SMTC && OPROFILE=n && (CPU_MIPS32 || CPU_MIPS64 || CPU_R10000 || CPU_SB1 || CPU_CAVIUM_OCTEON) 2099 2096 default y 2100 2097 help 2101 2098 Enable hardware performance counter support for perf events. If
+3 -1
arch/mips/Makefile
··· 226 226 ifdef CONFIG_MIPS 227 227 CHECKFLAGS += $(shell $(CC) $(KBUILD_CFLAGS) -dM -E -xc /dev/null | \ 228 228 egrep -vw '__GNUC_(|MINOR_|PATCHLEVEL_)_' | \ 229 - sed -e 's/^\#define /-D/' -e "s/ /='/" -e "s/$$/'/") 229 + sed -e "s/^\#define /-D'/" -e "s/ /'='/" -e "s/$$/'/") 230 230 ifdef CONFIG_64BIT 231 231 CHECKFLAGS += -m64 232 232 endif ··· 295 295 296 296 install: 297 297 $(Q)install -D -m 755 vmlinux $(INSTALL_PATH)/vmlinux-$(KERNELRELEASE) 298 + ifdef CONFIG_SYS_SUPPORTS_ZBOOT 298 299 $(Q)install -D -m 755 vmlinuz $(INSTALL_PATH)/vmlinuz-$(KERNELRELEASE) 300 + endif 299 301 $(Q)install -D -m 644 .config $(INSTALL_PATH)/config-$(KERNELRELEASE) 300 302 $(Q)install -D -m 644 System.map $(INSTALL_PATH)/System.map-$(KERNELRELEASE) 301 303
+15 -35
arch/mips/alchemy/Kconfig
··· 18 18 bool "4G Systems MTX-1 board" 19 19 select DMA_NONCOHERENT 20 20 select HW_HAS_PCI 21 - select SOC_AU1500 21 + select ALCHEMY_GPIOINT_AU1000 22 22 select SYS_SUPPORTS_LITTLE_ENDIAN 23 23 select SYS_HAS_EARLY_PRINTK 24 24 25 25 config MIPS_BOSPORUS 26 26 bool "Alchemy Bosporus board" 27 - select SOC_AU1500 27 + select ALCHEMY_GPIOINT_AU1000 28 28 select DMA_NONCOHERENT 29 29 select SYS_SUPPORTS_LITTLE_ENDIAN 30 30 select SYS_HAS_EARLY_PRINTK 31 31 32 32 config MIPS_DB1000 33 33 bool "Alchemy DB1000 board" 34 - select SOC_AU1000 34 + select ALCHEMY_GPIOINT_AU1000 35 35 select DMA_NONCOHERENT 36 36 select HW_HAS_PCI 37 37 select SYS_SUPPORTS_LITTLE_ENDIAN ··· 39 39 40 40 config MIPS_DB1100 41 41 bool "Alchemy DB1100 board" 42 - select SOC_AU1100 42 + select ALCHEMY_GPIOINT_AU1000 43 43 select DMA_NONCOHERENT 44 44 select SYS_SUPPORTS_LITTLE_ENDIAN 45 45 select SYS_HAS_EARLY_PRINTK 46 46 47 47 config MIPS_DB1200 48 48 bool "Alchemy DB1200 board" 49 - select SOC_AU1200 49 + select ALCHEMY_GPIOINT_AU1000 50 50 select DMA_COHERENT 51 51 select MIPS_DISABLE_OBSOLETE_IDE 52 52 select SYS_SUPPORTS_LITTLE_ENDIAN ··· 54 54 55 55 config MIPS_DB1500 56 56 bool "Alchemy DB1500 board" 57 - select SOC_AU1500 57 + select ALCHEMY_GPIOINT_AU1000 58 58 select DMA_NONCOHERENT 59 59 select HW_HAS_PCI 60 60 select MIPS_DISABLE_OBSOLETE_IDE ··· 64 64 65 65 config MIPS_DB1550 66 66 bool "Alchemy DB1550 board" 67 - select SOC_AU1550 67 + select ALCHEMY_GPIOINT_AU1000 68 68 select HW_HAS_PCI 69 69 select DMA_NONCOHERENT 70 70 select MIPS_DISABLE_OBSOLETE_IDE ··· 74 74 config MIPS_MIRAGE 75 75 bool "Alchemy Mirage board" 76 76 select DMA_NONCOHERENT 77 - select SOC_AU1500 77 + select ALCHEMY_GPIOINT_AU1000 78 78 select SYS_SUPPORTS_LITTLE_ENDIAN 79 79 select SYS_HAS_EARLY_PRINTK 80 80 81 81 config MIPS_PB1000 82 82 bool "Alchemy PB1000 board" 83 - select SOC_AU1000 83 + select ALCHEMY_GPIOINT_AU1000 84 84 select DMA_NONCOHERENT 85 85 select HW_HAS_PCI 86 86 select SWAP_IO_SPACE ··· 89 89 90 90 config MIPS_PB1100 91 91 bool "Alchemy PB1100 board" 92 - select SOC_AU1100 92 + select ALCHEMY_GPIOINT_AU1000 93 93 select DMA_NONCOHERENT 94 94 select HW_HAS_PCI 95 95 select SWAP_IO_SPACE ··· 98 98 99 99 config MIPS_PB1200 100 100 bool "Alchemy PB1200 board" 101 - select SOC_AU1200 101 + select ALCHEMY_GPIOINT_AU1000 102 102 select DMA_NONCOHERENT 103 103 select MIPS_DISABLE_OBSOLETE_IDE 104 104 select SYS_SUPPORTS_LITTLE_ENDIAN ··· 106 106 107 107 config MIPS_PB1500 108 108 bool "Alchemy PB1500 board" 109 - select SOC_AU1500 109 + select ALCHEMY_GPIOINT_AU1000 110 110 select DMA_NONCOHERENT 111 111 select HW_HAS_PCI 112 112 select SYS_SUPPORTS_LITTLE_ENDIAN ··· 114 114 115 115 config MIPS_PB1550 116 116 bool "Alchemy PB1550 board" 117 - select SOC_AU1550 117 + select ALCHEMY_GPIOINT_AU1000 118 118 select DMA_NONCOHERENT 119 119 select HW_HAS_PCI 120 120 select MIPS_DISABLE_OBSOLETE_IDE ··· 124 124 config MIPS_XXS1500 125 125 bool "MyCable XXS1500 board" 126 126 select DMA_NONCOHERENT 127 - select SOC_AU1500 127 + select ALCHEMY_GPIOINT_AU1000 128 128 select SYS_SUPPORTS_LITTLE_ENDIAN 129 129 select SYS_HAS_EARLY_PRINTK 130 130 131 131 config MIPS_GPR 132 132 bool "Trapeze ITS GPR board" 133 - select SOC_AU1550 133 + select ALCHEMY_GPIOINT_AU1000 134 134 select HW_HAS_PCI 135 135 select DMA_NONCOHERENT 136 136 select MIPS_DISABLE_OBSOLETE_IDE ··· 138 138 select SYS_HAS_EARLY_PRINTK 139 139 140 140 endchoice 141 - 142 - config SOC_AU1000 143 - bool 144 - select ALCHEMY_GPIOINT_AU1000 145 - 146 - config SOC_AU1100 147 - bool 148 - select ALCHEMY_GPIOINT_AU1000 149 - 150 - config SOC_AU1500 151 - bool 152 - select ALCHEMY_GPIOINT_AU1000 153 - 154 - config SOC_AU1550 155 - bool 156 - select ALCHEMY_GPIOINT_AU1000 157 - 158 - config SOC_AU1200 159 - bool 160 - select ALCHEMY_GPIOINT_AU1000
+1 -5
arch/mips/alchemy/common/Makefile
··· 12 12 13 13 # optional gpiolib support 14 14 ifeq ($(CONFIG_ALCHEMY_GPIO_INDIRECT),) 15 - ifeq ($(CONFIG_GPIOLIB),y) 16 - obj-$(CONFIG_ALCHEMY_GPIOINT_AU1000) += gpiolib-au1000.o 17 - endif 15 + obj-$(CONFIG_GPIOLIB) += gpiolib.o 18 16 endif 19 - 20 - obj-$(CONFIG_PCI) += pci.o
+97 -118
arch/mips/alchemy/common/dbdma.c
··· 40 40 #include <asm/mach-au1x00/au1000.h> 41 41 #include <asm/mach-au1x00/au1xxx_dbdma.h> 42 42 43 - #if defined(CONFIG_SOC_AU1550) || defined(CONFIG_SOC_AU1200) 44 - 45 43 /* 46 44 * The Descriptor Based DMA supports up to 16 channels. 47 45 * ··· 60 62 (dbdma_global_t *)KSEG1ADDR(AU1550_DBDMA_CONF_PHYS_ADDR); 61 63 static int dbdma_initialized; 62 64 63 - static dbdev_tab_t dbdev_tab[] = { 64 - #ifdef CONFIG_SOC_AU1550 65 + static dbdev_tab_t *dbdev_tab; 66 + 67 + static dbdev_tab_t au1550_dbdev_tab[] __initdata = { 65 68 /* UARTS */ 66 - { DSCR_CMD0_UART0_TX, DEV_FLAGS_OUT, 0, 8, 0x11100004, 0, 0 }, 67 - { DSCR_CMD0_UART0_RX, DEV_FLAGS_IN, 0, 8, 0x11100000, 0, 0 }, 68 - { DSCR_CMD0_UART3_TX, DEV_FLAGS_OUT, 0, 8, 0x11400004, 0, 0 }, 69 - { DSCR_CMD0_UART3_RX, DEV_FLAGS_IN, 0, 8, 0x11400000, 0, 0 }, 69 + { AU1550_DSCR_CMD0_UART0_TX, DEV_FLAGS_OUT, 0, 8, 0x11100004, 0, 0 }, 70 + { AU1550_DSCR_CMD0_UART0_RX, DEV_FLAGS_IN, 0, 8, 0x11100000, 0, 0 }, 71 + { AU1550_DSCR_CMD0_UART3_TX, DEV_FLAGS_OUT, 0, 8, 0x11400004, 0, 0 }, 72 + { AU1550_DSCR_CMD0_UART3_RX, DEV_FLAGS_IN, 0, 8, 0x11400000, 0, 0 }, 70 73 71 74 /* EXT DMA */ 72 - { DSCR_CMD0_DMA_REQ0, 0, 0, 0, 0x00000000, 0, 0 }, 73 - { DSCR_CMD0_DMA_REQ1, 0, 0, 0, 0x00000000, 0, 0 }, 74 - { DSCR_CMD0_DMA_REQ2, 0, 0, 0, 0x00000000, 0, 0 }, 75 - { DSCR_CMD0_DMA_REQ3, 0, 0, 0, 0x00000000, 0, 0 }, 75 + { AU1550_DSCR_CMD0_DMA_REQ0, 0, 0, 0, 0x00000000, 0, 0 }, 76 + { AU1550_DSCR_CMD0_DMA_REQ1, 0, 0, 0, 0x00000000, 0, 0 }, 77 + { AU1550_DSCR_CMD0_DMA_REQ2, 0, 0, 0, 0x00000000, 0, 0 }, 78 + { AU1550_DSCR_CMD0_DMA_REQ3, 0, 0, 0, 0x00000000, 0, 0 }, 76 79 77 80 /* USB DEV */ 78 - { DSCR_CMD0_USBDEV_RX0, DEV_FLAGS_IN, 4, 8, 0x10200000, 0, 0 }, 79 - { DSCR_CMD0_USBDEV_TX0, DEV_FLAGS_OUT, 4, 8, 0x10200004, 0, 0 }, 80 - { DSCR_CMD0_USBDEV_TX1, DEV_FLAGS_OUT, 4, 8, 0x10200008, 0, 0 }, 81 - { DSCR_CMD0_USBDEV_TX2, DEV_FLAGS_OUT, 4, 8, 0x1020000c, 0, 0 }, 82 - { DSCR_CMD0_USBDEV_RX3, DEV_FLAGS_IN, 4, 8, 0x10200010, 0, 0 }, 83 - { DSCR_CMD0_USBDEV_RX4, DEV_FLAGS_IN, 4, 8, 0x10200014, 0, 0 }, 81 + { AU1550_DSCR_CMD0_USBDEV_RX0, DEV_FLAGS_IN, 4, 8, 0x10200000, 0, 0 }, 82 + { AU1550_DSCR_CMD0_USBDEV_TX0, DEV_FLAGS_OUT, 4, 8, 0x10200004, 0, 0 }, 83 + { AU1550_DSCR_CMD0_USBDEV_TX1, DEV_FLAGS_OUT, 4, 8, 0x10200008, 0, 0 }, 84 + { AU1550_DSCR_CMD0_USBDEV_TX2, DEV_FLAGS_OUT, 4, 8, 0x1020000c, 0, 0 }, 85 + { AU1550_DSCR_CMD0_USBDEV_RX3, DEV_FLAGS_IN, 4, 8, 0x10200010, 0, 0 }, 86 + { AU1550_DSCR_CMD0_USBDEV_RX4, DEV_FLAGS_IN, 4, 8, 0x10200014, 0, 0 }, 84 87 85 - /* PSC 0 */ 86 - { DSCR_CMD0_PSC0_TX, DEV_FLAGS_OUT, 0, 0, 0x11a0001c, 0, 0 }, 87 - { DSCR_CMD0_PSC0_RX, DEV_FLAGS_IN, 0, 0, 0x11a0001c, 0, 0 }, 88 + /* PSCs */ 89 + { AU1550_DSCR_CMD0_PSC0_TX, DEV_FLAGS_OUT, 0, 0, 0x11a0001c, 0, 0 }, 90 + { AU1550_DSCR_CMD0_PSC0_RX, DEV_FLAGS_IN, 0, 0, 0x11a0001c, 0, 0 }, 91 + { AU1550_DSCR_CMD0_PSC1_TX, DEV_FLAGS_OUT, 0, 0, 0x11b0001c, 0, 0 }, 92 + { AU1550_DSCR_CMD0_PSC1_RX, DEV_FLAGS_IN, 0, 0, 0x11b0001c, 0, 0 }, 93 + { AU1550_DSCR_CMD0_PSC2_TX, DEV_FLAGS_OUT, 0, 0, 0x10a0001c, 0, 0 }, 94 + { AU1550_DSCR_CMD0_PSC2_RX, DEV_FLAGS_IN, 0, 0, 0x10a0001c, 0, 0 }, 95 + { AU1550_DSCR_CMD0_PSC3_TX, DEV_FLAGS_OUT, 0, 0, 0x10b0001c, 0, 0 }, 96 + { AU1550_DSCR_CMD0_PSC3_RX, DEV_FLAGS_IN, 0, 0, 0x10b0001c, 0, 0 }, 88 97 89 - /* PSC 1 */ 90 - { DSCR_CMD0_PSC1_TX, DEV_FLAGS_OUT, 0, 0, 0x11b0001c, 0, 0 }, 91 - { DSCR_CMD0_PSC1_RX, DEV_FLAGS_IN, 0, 0, 0x11b0001c, 0, 0 }, 92 - 93 - /* PSC 2 */ 94 - { DSCR_CMD0_PSC2_TX, DEV_FLAGS_OUT, 0, 0, 0x10a0001c, 0, 0 }, 95 - { DSCR_CMD0_PSC2_RX, DEV_FLAGS_IN, 0, 0, 0x10a0001c, 0, 0 }, 96 - 97 - /* PSC 3 */ 98 - { DSCR_CMD0_PSC3_TX, DEV_FLAGS_OUT, 0, 0, 0x10b0001c, 0, 0 }, 99 - { DSCR_CMD0_PSC3_RX, DEV_FLAGS_IN, 0, 0, 0x10b0001c, 0, 0 }, 100 - 101 - { DSCR_CMD0_PCI_WRITE, 0, 0, 0, 0x00000000, 0, 0 }, /* PCI */ 102 - { DSCR_CMD0_NAND_FLASH, 0, 0, 0, 0x00000000, 0, 0 }, /* NAND */ 98 + { AU1550_DSCR_CMD0_PCI_WRITE, 0, 0, 0, 0x00000000, 0, 0 }, /* PCI */ 99 + { AU1550_DSCR_CMD0_NAND_FLASH, 0, 0, 0, 0x00000000, 0, 0 }, /* NAND */ 103 100 104 101 /* MAC 0 */ 105 - { DSCR_CMD0_MAC0_RX, DEV_FLAGS_IN, 0, 0, 0x00000000, 0, 0 }, 106 - { DSCR_CMD0_MAC0_TX, DEV_FLAGS_OUT, 0, 0, 0x00000000, 0, 0 }, 102 + { AU1550_DSCR_CMD0_MAC0_RX, DEV_FLAGS_IN, 0, 0, 0x00000000, 0, 0 }, 103 + { AU1550_DSCR_CMD0_MAC0_TX, DEV_FLAGS_OUT, 0, 0, 0x00000000, 0, 0 }, 107 104 108 105 /* MAC 1 */ 109 - { DSCR_CMD0_MAC1_RX, DEV_FLAGS_IN, 0, 0, 0x00000000, 0, 0 }, 110 - { DSCR_CMD0_MAC1_TX, DEV_FLAGS_OUT, 0, 0, 0x00000000, 0, 0 }, 111 - 112 - #endif /* CONFIG_SOC_AU1550 */ 113 - 114 - #ifdef CONFIG_SOC_AU1200 115 - { DSCR_CMD0_UART0_TX, DEV_FLAGS_OUT, 0, 8, 0x11100004, 0, 0 }, 116 - { DSCR_CMD0_UART0_RX, DEV_FLAGS_IN, 0, 8, 0x11100000, 0, 0 }, 117 - { DSCR_CMD0_UART1_TX, DEV_FLAGS_OUT, 0, 8, 0x11200004, 0, 0 }, 118 - { DSCR_CMD0_UART1_RX, DEV_FLAGS_IN, 0, 8, 0x11200000, 0, 0 }, 119 - 120 - { DSCR_CMD0_DMA_REQ0, 0, 0, 0, 0x00000000, 0, 0 }, 121 - { DSCR_CMD0_DMA_REQ1, 0, 0, 0, 0x00000000, 0, 0 }, 122 - 123 - { DSCR_CMD0_MAE_BE, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 }, 124 - { DSCR_CMD0_MAE_FE, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 }, 125 - { DSCR_CMD0_MAE_BOTH, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 }, 126 - { DSCR_CMD0_LCD, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 }, 127 - 128 - { DSCR_CMD0_SDMS_TX0, DEV_FLAGS_OUT, 4, 8, 0x10600000, 0, 0 }, 129 - { DSCR_CMD0_SDMS_RX0, DEV_FLAGS_IN, 4, 8, 0x10600004, 0, 0 }, 130 - { DSCR_CMD0_SDMS_TX1, DEV_FLAGS_OUT, 4, 8, 0x10680000, 0, 0 }, 131 - { DSCR_CMD0_SDMS_RX1, DEV_FLAGS_IN, 4, 8, 0x10680004, 0, 0 }, 132 - 133 - { DSCR_CMD0_AES_RX, DEV_FLAGS_IN , 4, 32, 0x10300008, 0, 0 }, 134 - { DSCR_CMD0_AES_TX, DEV_FLAGS_OUT, 4, 32, 0x10300004, 0, 0 }, 135 - 136 - { DSCR_CMD0_PSC0_TX, DEV_FLAGS_OUT, 0, 16, 0x11a0001c, 0, 0 }, 137 - { DSCR_CMD0_PSC0_RX, DEV_FLAGS_IN, 0, 16, 0x11a0001c, 0, 0 }, 138 - { DSCR_CMD0_PSC0_SYNC, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 }, 139 - 140 - { DSCR_CMD0_PSC1_TX, DEV_FLAGS_OUT, 0, 16, 0x11b0001c, 0, 0 }, 141 - { DSCR_CMD0_PSC1_RX, DEV_FLAGS_IN, 0, 16, 0x11b0001c, 0, 0 }, 142 - { DSCR_CMD0_PSC1_SYNC, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 }, 143 - 144 - { DSCR_CMD0_CIM_RXA, DEV_FLAGS_IN, 0, 32, 0x14004020, 0, 0 }, 145 - { DSCR_CMD0_CIM_RXB, DEV_FLAGS_IN, 0, 32, 0x14004040, 0, 0 }, 146 - { DSCR_CMD0_CIM_RXC, DEV_FLAGS_IN, 0, 32, 0x14004060, 0, 0 }, 147 - { DSCR_CMD0_CIM_SYNC, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 }, 148 - 149 - { DSCR_CMD0_NAND_FLASH, DEV_FLAGS_IN, 0, 0, 0x00000000, 0, 0 }, 150 - 151 - #endif /* CONFIG_SOC_AU1200 */ 106 + { AU1550_DSCR_CMD0_MAC1_RX, DEV_FLAGS_IN, 0, 0, 0x00000000, 0, 0 }, 107 + { AU1550_DSCR_CMD0_MAC1_TX, DEV_FLAGS_OUT, 0, 0, 0x00000000, 0, 0 }, 152 108 153 109 { DSCR_CMD0_THROTTLE, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 }, 154 - { DSCR_CMD0_ALWAYS, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 }, 155 - 156 - /* Provide 16 user definable device types */ 157 - { ~0, 0, 0, 0, 0, 0, 0 }, 158 - { ~0, 0, 0, 0, 0, 0, 0 }, 159 - { ~0, 0, 0, 0, 0, 0, 0 }, 160 - { ~0, 0, 0, 0, 0, 0, 0 }, 161 - { ~0, 0, 0, 0, 0, 0, 0 }, 162 - { ~0, 0, 0, 0, 0, 0, 0 }, 163 - { ~0, 0, 0, 0, 0, 0, 0 }, 164 - { ~0, 0, 0, 0, 0, 0, 0 }, 165 - { ~0, 0, 0, 0, 0, 0, 0 }, 166 - { ~0, 0, 0, 0, 0, 0, 0 }, 167 - { ~0, 0, 0, 0, 0, 0, 0 }, 168 - { ~0, 0, 0, 0, 0, 0, 0 }, 169 - { ~0, 0, 0, 0, 0, 0, 0 }, 170 - { ~0, 0, 0, 0, 0, 0, 0 }, 171 - { ~0, 0, 0, 0, 0, 0, 0 }, 172 - { ~0, 0, 0, 0, 0, 0, 0 }, 110 + { DSCR_CMD0_ALWAYS, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 }, 173 111 }; 174 112 175 - #define DBDEV_TAB_SIZE ARRAY_SIZE(dbdev_tab) 113 + static dbdev_tab_t au1200_dbdev_tab[] __initdata = { 114 + { AU1200_DSCR_CMD0_UART0_TX, DEV_FLAGS_OUT, 0, 8, 0x11100004, 0, 0 }, 115 + { AU1200_DSCR_CMD0_UART0_RX, DEV_FLAGS_IN, 0, 8, 0x11100000, 0, 0 }, 116 + { AU1200_DSCR_CMD0_UART1_TX, DEV_FLAGS_OUT, 0, 8, 0x11200004, 0, 0 }, 117 + { AU1200_DSCR_CMD0_UART1_RX, DEV_FLAGS_IN, 0, 8, 0x11200000, 0, 0 }, 176 118 119 + { AU1200_DSCR_CMD0_DMA_REQ0, 0, 0, 0, 0x00000000, 0, 0 }, 120 + { AU1200_DSCR_CMD0_DMA_REQ1, 0, 0, 0, 0x00000000, 0, 0 }, 121 + 122 + { AU1200_DSCR_CMD0_MAE_BE, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 }, 123 + { AU1200_DSCR_CMD0_MAE_FE, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 }, 124 + { AU1200_DSCR_CMD0_MAE_BOTH, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 }, 125 + { AU1200_DSCR_CMD0_LCD, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 }, 126 + 127 + { AU1200_DSCR_CMD0_SDMS_TX0, DEV_FLAGS_OUT, 4, 8, 0x10600000, 0, 0 }, 128 + { AU1200_DSCR_CMD0_SDMS_RX0, DEV_FLAGS_IN, 4, 8, 0x10600004, 0, 0 }, 129 + { AU1200_DSCR_CMD0_SDMS_TX1, DEV_FLAGS_OUT, 4, 8, 0x10680000, 0, 0 }, 130 + { AU1200_DSCR_CMD0_SDMS_RX1, DEV_FLAGS_IN, 4, 8, 0x10680004, 0, 0 }, 131 + 132 + { AU1200_DSCR_CMD0_AES_RX, DEV_FLAGS_IN , 4, 32, 0x10300008, 0, 0 }, 133 + { AU1200_DSCR_CMD0_AES_TX, DEV_FLAGS_OUT, 4, 32, 0x10300004, 0, 0 }, 134 + 135 + { AU1200_DSCR_CMD0_PSC0_TX, DEV_FLAGS_OUT, 0, 16, 0x11a0001c, 0, 0 }, 136 + { AU1200_DSCR_CMD0_PSC0_RX, DEV_FLAGS_IN, 0, 16, 0x11a0001c, 0, 0 }, 137 + { AU1200_DSCR_CMD0_PSC0_SYNC, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 }, 138 + { AU1200_DSCR_CMD0_PSC1_TX, DEV_FLAGS_OUT, 0, 16, 0x11b0001c, 0, 0 }, 139 + { AU1200_DSCR_CMD0_PSC1_RX, DEV_FLAGS_IN, 0, 16, 0x11b0001c, 0, 0 }, 140 + { AU1200_DSCR_CMD0_PSC1_SYNC, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 }, 141 + 142 + { AU1200_DSCR_CMD0_CIM_RXA, DEV_FLAGS_IN, 0, 32, 0x14004020, 0, 0 }, 143 + { AU1200_DSCR_CMD0_CIM_RXB, DEV_FLAGS_IN, 0, 32, 0x14004040, 0, 0 }, 144 + { AU1200_DSCR_CMD0_CIM_RXC, DEV_FLAGS_IN, 0, 32, 0x14004060, 0, 0 }, 145 + { AU1200_DSCR_CMD0_CIM_SYNC, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 }, 146 + 147 + { AU1200_DSCR_CMD0_NAND_FLASH, DEV_FLAGS_IN, 0, 0, 0x00000000, 0, 0 }, 148 + 149 + { DSCR_CMD0_THROTTLE, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 }, 150 + { DSCR_CMD0_ALWAYS, DEV_FLAGS_ANYUSE, 0, 0, 0x00000000, 0, 0 }, 151 + }; 152 + 153 + /* 32 predefined plus 32 custom */ 154 + #define DBDEV_TAB_SIZE 64 177 155 178 156 static chan_tab_t *chan_tab_ptr[NUM_DBDMA_CHANS]; 179 157 ··· 1002 1028 .resume = alchemy_dbdma_resume, 1003 1029 }; 1004 1030 1005 - static int __init au1xxx_dbdma_init(void) 1031 + static int __init dbdma_setup(unsigned int irq, dbdev_tab_t *idtable) 1006 1032 { 1007 - int irq_nr, ret; 1033 + int ret; 1034 + 1035 + dbdev_tab = kzalloc(sizeof(dbdev_tab_t) * DBDEV_TAB_SIZE, GFP_KERNEL); 1036 + if (!dbdev_tab) 1037 + return -ENOMEM; 1038 + 1039 + memcpy(dbdev_tab, idtable, 32 * sizeof(dbdev_tab_t)); 1040 + for (ret = 32; ret < DBDEV_TAB_SIZE; ret++) 1041 + dbdev_tab[ret].dev_id = ~0; 1008 1042 1009 1043 dbdma_gptr->ddma_config = 0; 1010 1044 dbdma_gptr->ddma_throttle = 0; 1011 1045 dbdma_gptr->ddma_inten = 0xffff; 1012 1046 au_sync(); 1013 1047 1014 - switch (alchemy_get_cputype()) { 1015 - case ALCHEMY_CPU_AU1550: 1016 - irq_nr = AU1550_DDMA_INT; 1017 - break; 1018 - case ALCHEMY_CPU_AU1200: 1019 - irq_nr = AU1200_DDMA_INT; 1020 - break; 1021 - default: 1022 - return -ENODEV; 1023 - } 1024 - 1025 - ret = request_irq(irq_nr, dbdma_interrupt, IRQF_DISABLED, 1026 - "Au1xxx dbdma", (void *)dbdma_gptr); 1048 + ret = request_irq(irq, dbdma_interrupt, IRQF_DISABLED, "dbdma", 1049 + (void *)dbdma_gptr); 1027 1050 if (ret) 1028 1051 printk(KERN_ERR "Cannot grab DBDMA interrupt!\n"); 1029 1052 else { 1030 1053 dbdma_initialized = 1; 1031 - printk(KERN_INFO "Alchemy DBDMA initialized\n"); 1032 1054 register_syscore_ops(&alchemy_dbdma_syscore_ops); 1033 1055 } 1034 1056 1035 1057 return ret; 1036 1058 } 1037 - subsys_initcall(au1xxx_dbdma_init); 1038 1059 1039 - #endif /* defined(CONFIG_SOC_AU1550) || defined(CONFIG_SOC_AU1200) */ 1060 + static int __init alchemy_dbdma_init(void) 1061 + { 1062 + switch (alchemy_get_cputype()) { 1063 + case ALCHEMY_CPU_AU1550: 1064 + return dbdma_setup(AU1550_DDMA_INT, au1550_dbdev_tab); 1065 + case ALCHEMY_CPU_AU1200: 1066 + return dbdma_setup(AU1200_DDMA_INT, au1200_dbdev_tab); 1067 + } 1068 + return 0; 1069 + } 1070 + subsys_initcall(alchemy_dbdma_init);
+31 -35
arch/mips/alchemy/common/dma.c
··· 40 40 #include <asm/mach-au1x00/au1000.h> 41 41 #include <asm/mach-au1x00/au1000_dma.h> 42 42 43 - #if defined(CONFIG_SOC_AU1000) || defined(CONFIG_SOC_AU1500) || \ 44 - defined(CONFIG_SOC_AU1100) 45 43 /* 46 44 * A note on resource allocation: 47 45 * ··· 86 88 { AU1000_AC97_PHYS_ADDR + 0x08, DMA_DW16 | DMA_DR }, /* AC97 RX c */ 87 89 { AU1000_UART3_PHYS_ADDR + 0x04, DMA_DW8 | DMA_NC }, /* UART3_TX */ 88 90 { AU1000_UART3_PHYS_ADDR + 0x00, DMA_DW8 | DMA_NC | DMA_DR }, /* UART3_RX */ 89 - { AU1000_USBD_PHYS_ADDR + 0x00, DMA_DW8 | DMA_NC | DMA_DR }, /* EP0RD */ 90 - { AU1000_USBD_PHYS_ADDR + 0x04, DMA_DW8 | DMA_NC }, /* EP0WR */ 91 - { AU1000_USBD_PHYS_ADDR + 0x08, DMA_DW8 | DMA_NC }, /* EP2WR */ 92 - { AU1000_USBD_PHYS_ADDR + 0x0c, DMA_DW8 | DMA_NC }, /* EP3WR */ 93 - { AU1000_USBD_PHYS_ADDR + 0x10, DMA_DW8 | DMA_NC | DMA_DR }, /* EP4RD */ 94 - { AU1000_USBD_PHYS_ADDR + 0x14, DMA_DW8 | DMA_NC | DMA_DR }, /* EP5RD */ 91 + { AU1000_USB_UDC_PHYS_ADDR + 0x00, DMA_DW8 | DMA_NC | DMA_DR }, /* EP0RD */ 92 + { AU1000_USB_UDC_PHYS_ADDR + 0x04, DMA_DW8 | DMA_NC }, /* EP0WR */ 93 + { AU1000_USB_UDC_PHYS_ADDR + 0x08, DMA_DW8 | DMA_NC }, /* EP2WR */ 94 + { AU1000_USB_UDC_PHYS_ADDR + 0x0c, DMA_DW8 | DMA_NC }, /* EP3WR */ 95 + { AU1000_USB_UDC_PHYS_ADDR + 0x10, DMA_DW8 | DMA_NC | DMA_DR }, /* EP4RD */ 96 + { AU1000_USB_UDC_PHYS_ADDR + 0x14, DMA_DW8 | DMA_NC | DMA_DR }, /* EP5RD */ 95 97 /* on Au1500, these 2 are DMA_REQ2/3 (GPIO208/209) instead! */ 96 98 { AU1000_I2S_PHYS_ADDR + 0x00, DMA_DW32 | DMA_NC}, /* I2S TX */ 97 99 { AU1000_I2S_PHYS_ADDR + 0x00, DMA_DW32 | DMA_NC | DMA_DR}, /* I2S RX */ ··· 168 170 const struct dma_dev *dev; 169 171 int i, ret; 170 172 171 - #if defined(CONFIG_SOC_AU1100) 172 - if (dev_id < 0 || dev_id >= (DMA_NUM_DEV + DMA_NUM_DEV_BANK2)) 173 - return -EINVAL; 174 - #else 175 - if (dev_id < 0 || dev_id >= DMA_NUM_DEV) 176 - return -EINVAL; 177 - #endif 173 + if (alchemy_get_cputype() == ALCHEMY_CPU_AU1100) { 174 + if (dev_id < 0 || dev_id >= (DMA_NUM_DEV + DMA_NUM_DEV_BANK2)) 175 + return -EINVAL; 176 + } else { 177 + if (dev_id < 0 || dev_id >= DMA_NUM_DEV) 178 + return -EINVAL; 179 + } 178 180 179 181 for (i = 0; i < NUM_AU1000_DMA_CHANNELS; i++) 180 182 if (au1000_dma_table[i].dev_id < 0) ··· 237 239 238 240 static int __init au1000_dma_init(void) 239 241 { 240 - int base, i; 242 + int base, i; 241 243 242 - switch (alchemy_get_cputype()) { 243 - case ALCHEMY_CPU_AU1000: 244 - base = AU1000_DMA_INT_BASE; 245 - break; 246 - case ALCHEMY_CPU_AU1500: 247 - base = AU1500_DMA_INT_BASE; 248 - break; 249 - case ALCHEMY_CPU_AU1100: 250 - base = AU1100_DMA_INT_BASE; 251 - break; 252 - default: 253 - goto out; 254 - } 244 + switch (alchemy_get_cputype()) { 245 + case ALCHEMY_CPU_AU1000: 246 + base = AU1000_DMA_INT_BASE; 247 + break; 248 + case ALCHEMY_CPU_AU1500: 249 + base = AU1500_DMA_INT_BASE; 250 + break; 251 + case ALCHEMY_CPU_AU1100: 252 + base = AU1100_DMA_INT_BASE; 253 + break; 254 + default: 255 + goto out; 256 + } 255 257 256 - for (i = 0; i < NUM_AU1000_DMA_CHANNELS; i++) 257 - au1000_dma_table[i].irq = base + i; 258 + for (i = 0; i < NUM_AU1000_DMA_CHANNELS; i++) 259 + au1000_dma_table[i].irq = base + i; 258 260 259 - printk(KERN_INFO "Alchemy DMA initialized\n"); 261 + printk(KERN_INFO "Alchemy DMA initialized\n"); 260 262 261 263 out: 262 - return 0; 264 + return 0; 263 265 } 264 266 arch_initcall(au1000_dma_init); 265 - 266 - #endif /* AU1000 AU1500 AU1100 */
+20 -13
arch/mips/alchemy/common/gpiolib-au1000.c arch/mips/alchemy/common/gpiolib.c
··· 1 1 /* 2 2 * Copyright (C) 2007-2009, OpenWrt.org, Florian Fainelli <florian@openwrt.org> 3 - * GPIOLIB support for Au1000, Au1500, Au1100, Au1550 and Au12x0. 3 + * GPIOLIB support for Alchemy chips. 4 4 * 5 5 * This program is free software; you can redistribute it and/or modify it 6 6 * under the terms of the GNU General Public License as published by the ··· 23 23 * 675 Mass Ave, Cambridge, MA 02139, USA. 24 24 * 25 25 * Notes : 26 - * au1000 SoC have only one GPIO block : GPIO1 27 - * Au1100, Au15x0, Au12x0 have a second one : GPIO2 26 + * This file must ONLY be built when CONFIG_GPIOLIB=y and 27 + * CONFIG_ALCHEMY_GPIO_INDIRECT=n, otherwise compilation will fail! 28 + * au1000 SoC have only one GPIO block : GPIO1 29 + * Au1100, Au15x0, Au12x0 have a second one : GPIO2 28 30 */ 29 31 32 + #include <linux/init.h> 30 33 #include <linux/kernel.h> 31 34 #include <linux/module.h> 32 35 #include <linux/types.h> 33 - #include <linux/platform_device.h> 34 36 #include <linux/gpio.h> 35 - 36 - #include <asm/mach-au1x00/au1000.h> 37 - #include <asm/mach-au1x00/gpio.h> 37 + #include <asm/mach-au1x00/gpio-au1000.h> 38 38 39 39 static int gpio2_get(struct gpio_chip *chip, unsigned offset) 40 40 { ··· 115 115 }, 116 116 }; 117 117 118 - static int __init alchemy_gpiolib_init(void) 118 + static int __init alchemy_gpiochip_init(void) 119 119 { 120 - gpiochip_add(&alchemy_gpio_chip[0]); 121 - if (alchemy_get_cputype() != ALCHEMY_CPU_AU1000) 122 - gpiochip_add(&alchemy_gpio_chip[1]); 120 + int ret = 0; 123 121 124 - return 0; 122 + switch (alchemy_get_cputype()) { 123 + case ALCHEMY_CPU_AU1000: 124 + ret = gpiochip_add(&alchemy_gpio_chip[0]); 125 + break; 126 + case ALCHEMY_CPU_AU1500...ALCHEMY_CPU_AU1200: 127 + ret = gpiochip_add(&alchemy_gpio_chip[0]); 128 + ret |= gpiochip_add(&alchemy_gpio_chip[1]); 129 + break; 130 + } 131 + return ret; 125 132 } 126 - arch_initcall(alchemy_gpiolib_init); 133 + arch_initcall(alchemy_gpiochip_init);
-104
arch/mips/alchemy/common/pci.c
··· 1 - /* 2 - * BRIEF MODULE DESCRIPTION 3 - * Alchemy/AMD Au1x00 PCI support. 4 - * 5 - * Copyright 2001-2003, 2007-2008 MontaVista Software Inc. 6 - * Author: MontaVista Software, Inc. <source@mvista.com> 7 - * 8 - * Copyright (C) 2004 by Ralf Baechle (ralf@linux-mips.org) 9 - * 10 - * Support for all devices (greater than 16) added by David Gathright. 11 - * 12 - * This program is free software; you can redistribute it and/or modify it 13 - * under the terms of the GNU General Public License as published by the 14 - * Free Software Foundation; either version 2 of the License, or (at your 15 - * option) any later version. 16 - * 17 - * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 18 - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 19 - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 20 - * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF 23 - * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 24 - * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 - * 28 - * You should have received a copy of the GNU General Public License along 29 - * with this program; if not, write to the Free Software Foundation, Inc., 30 - * 675 Mass Ave, Cambridge, MA 02139, USA. 31 - */ 32 - 33 - #include <linux/pci.h> 34 - #include <linux/kernel.h> 35 - #include <linux/init.h> 36 - 37 - #include <asm/mach-au1x00/au1000.h> 38 - 39 - /* TBD */ 40 - static struct resource pci_io_resource = { 41 - .start = PCI_IO_START, 42 - .end = PCI_IO_END, 43 - .name = "PCI IO space", 44 - .flags = IORESOURCE_IO 45 - }; 46 - 47 - static struct resource pci_mem_resource = { 48 - .start = PCI_MEM_START, 49 - .end = PCI_MEM_END, 50 - .name = "PCI memory space", 51 - .flags = IORESOURCE_MEM 52 - }; 53 - 54 - extern struct pci_ops au1x_pci_ops; 55 - 56 - static struct pci_controller au1x_controller = { 57 - .pci_ops = &au1x_pci_ops, 58 - .io_resource = &pci_io_resource, 59 - .mem_resource = &pci_mem_resource, 60 - }; 61 - 62 - #if defined(CONFIG_SOC_AU1500) || defined(CONFIG_SOC_AU1550) 63 - static unsigned long virt_io_addr; 64 - #endif 65 - 66 - static int __init au1x_pci_setup(void) 67 - { 68 - extern void au1x_pci_cfg_init(void); 69 - 70 - #if defined(CONFIG_SOC_AU1500) || defined(CONFIG_SOC_AU1550) 71 - virt_io_addr = (unsigned long)ioremap(Au1500_PCI_IO_START, 72 - Au1500_PCI_IO_END - Au1500_PCI_IO_START + 1); 73 - 74 - if (!virt_io_addr) { 75 - printk(KERN_ERR "Unable to ioremap pci space\n"); 76 - return 1; 77 - } 78 - au1x_controller.io_map_base = virt_io_addr; 79 - 80 - #ifdef CONFIG_DMA_NONCOHERENT 81 - { 82 - /* 83 - * Set the NC bit in controller for Au1500 pre-AC silicon 84 - */ 85 - u32 prid = read_c0_prid(); 86 - 87 - if ((prid & 0xFF000000) == 0x01000000 && prid < 0x01030202) { 88 - au_writel((1 << 16) | au_readl(Au1500_PCI_CFG), 89 - Au1500_PCI_CFG); 90 - printk(KERN_INFO "Non-coherent PCI accesses enabled\n"); 91 - } 92 - } 93 - #endif 94 - 95 - set_io_port_base(virt_io_addr); 96 - #endif 97 - 98 - au1x_pci_cfg_init(); 99 - 100 - register_pci_controller(&au1x_controller); 101 - return 0; 102 - } 103 - 104 - arch_initcall(au1x_pci_setup);
+86 -278
arch/mips/alchemy/common/platform.c
··· 18 18 #include <linux/serial_8250.h> 19 19 #include <linux/slab.h> 20 20 21 - #include <asm/mach-au1x00/au1xxx.h> 21 + #include <asm/mach-au1x00/au1000.h> 22 22 #include <asm/mach-au1x00/au1xxx_dbdma.h> 23 23 #include <asm/mach-au1x00/au1100_mmc.h> 24 24 #include <asm/mach-au1x00/au1xxx_eth.h> ··· 111 111 printk(KERN_INFO "Alchemy: failed to register UARTs\n"); 112 112 } 113 113 114 - /* OHCI (USB full speed host controller) */ 115 - static struct resource au1xxx_usb_ohci_resources[] = { 116 - [0] = { 117 - .start = USB_OHCI_BASE, 118 - .end = USB_OHCI_BASE + USB_OHCI_LEN - 1, 119 - .flags = IORESOURCE_MEM, 120 - }, 121 - [1] = { 122 - .start = FOR_PLATFORM_C_USB_HOST_INT, 123 - .end = FOR_PLATFORM_C_USB_HOST_INT, 124 - .flags = IORESOURCE_IRQ, 125 - }, 114 + 115 + /* The dmamask must be set for OHCI/EHCI to work */ 116 + static u64 alchemy_ohci_dmamask = DMA_BIT_MASK(32); 117 + static u64 __maybe_unused alchemy_ehci_dmamask = DMA_BIT_MASK(32); 118 + 119 + static unsigned long alchemy_ohci_data[][2] __initdata = { 120 + [ALCHEMY_CPU_AU1000] = { AU1000_USB_OHCI_PHYS_ADDR, AU1000_USB_HOST_INT }, 121 + [ALCHEMY_CPU_AU1500] = { AU1000_USB_OHCI_PHYS_ADDR, AU1500_USB_HOST_INT }, 122 + [ALCHEMY_CPU_AU1100] = { AU1000_USB_OHCI_PHYS_ADDR, AU1100_USB_HOST_INT }, 123 + [ALCHEMY_CPU_AU1550] = { AU1550_USB_OHCI_PHYS_ADDR, AU1550_USB_HOST_INT }, 124 + [ALCHEMY_CPU_AU1200] = { AU1200_USB_OHCI_PHYS_ADDR, AU1200_USB_INT }, 126 125 }; 127 126 128 - /* The dmamask must be set for OHCI to work */ 129 - static u64 ohci_dmamask = DMA_BIT_MASK(32); 130 - 131 - static struct platform_device au1xxx_usb_ohci_device = { 132 - .name = "au1xxx-ohci", 133 - .id = 0, 134 - .dev = { 135 - .dma_mask = &ohci_dmamask, 136 - .coherent_dma_mask = DMA_BIT_MASK(32), 137 - }, 138 - .num_resources = ARRAY_SIZE(au1xxx_usb_ohci_resources), 139 - .resource = au1xxx_usb_ohci_resources, 127 + static unsigned long alchemy_ehci_data[][2] __initdata = { 128 + [ALCHEMY_CPU_AU1200] = { AU1200_USB_EHCI_PHYS_ADDR, AU1200_USB_INT }, 140 129 }; 141 130 142 - /*** AU1100 LCD controller ***/ 143 - 144 - #ifdef CONFIG_FB_AU1100 145 - static struct resource au1100_lcd_resources[] = { 146 - [0] = { 147 - .start = LCD_PHYS_ADDR, 148 - .end = LCD_PHYS_ADDR + 0x800 - 1, 149 - .flags = IORESOURCE_MEM, 150 - }, 151 - [1] = { 152 - .start = AU1100_LCD_INT, 153 - .end = AU1100_LCD_INT, 154 - .flags = IORESOURCE_IRQ, 131 + static int __init _new_usbres(struct resource **r, struct platform_device **d) 132 + { 133 + *r = kzalloc(sizeof(struct resource) * 2, GFP_KERNEL); 134 + if (!*r) 135 + return -ENOMEM; 136 + *d = kzalloc(sizeof(struct platform_device), GFP_KERNEL); 137 + if (!*d) { 138 + kfree(*r); 139 + return -ENOMEM; 155 140 } 156 - }; 157 141 158 - static u64 au1100_lcd_dmamask = DMA_BIT_MASK(32); 142 + (*d)->dev.coherent_dma_mask = DMA_BIT_MASK(32); 143 + (*d)->num_resources = 2; 144 + (*d)->resource = *r; 159 145 160 - static struct platform_device au1100_lcd_device = { 161 - .name = "au1100-lcd", 162 - .id = 0, 163 - .dev = { 164 - .dma_mask = &au1100_lcd_dmamask, 165 - .coherent_dma_mask = DMA_BIT_MASK(32), 166 - }, 167 - .num_resources = ARRAY_SIZE(au1100_lcd_resources), 168 - .resource = au1100_lcd_resources, 169 - }; 170 - #endif 146 + return 0; 147 + } 171 148 172 - #ifdef CONFIG_SOC_AU1200 173 - /* EHCI (USB high speed host controller) */ 174 - static struct resource au1xxx_usb_ehci_resources[] = { 175 - [0] = { 176 - .start = USB_EHCI_BASE, 177 - .end = USB_EHCI_BASE + USB_EHCI_LEN - 1, 178 - .flags = IORESOURCE_MEM, 179 - }, 180 - [1] = { 181 - .start = AU1200_USB_INT, 182 - .end = AU1200_USB_INT, 183 - .flags = IORESOURCE_IRQ, 184 - }, 185 - }; 149 + static void __init alchemy_setup_usb(int ctype) 150 + { 151 + struct resource *res; 152 + struct platform_device *pdev; 186 153 187 - static u64 ehci_dmamask = DMA_BIT_MASK(32); 154 + /* setup OHCI0. Every variant has one */ 155 + if (_new_usbres(&res, &pdev)) 156 + return; 188 157 189 - static struct platform_device au1xxx_usb_ehci_device = { 190 - .name = "au1xxx-ehci", 191 - .id = 0, 192 - .dev = { 193 - .dma_mask = &ehci_dmamask, 194 - .coherent_dma_mask = DMA_BIT_MASK(32), 195 - }, 196 - .num_resources = ARRAY_SIZE(au1xxx_usb_ehci_resources), 197 - .resource = au1xxx_usb_ehci_resources, 198 - }; 158 + res[0].start = alchemy_ohci_data[ctype][0]; 159 + res[0].end = res[0].start + 0x100 - 1; 160 + res[0].flags = IORESOURCE_MEM; 161 + res[1].start = alchemy_ohci_data[ctype][1]; 162 + res[1].end = res[1].start; 163 + res[1].flags = IORESOURCE_IRQ; 164 + pdev->name = "au1xxx-ohci"; 165 + pdev->id = 0; 166 + pdev->dev.dma_mask = &alchemy_ohci_dmamask; 199 167 200 - /* Au1200 UDC (USB gadget controller) */ 201 - static struct resource au1xxx_usb_gdt_resources[] = { 202 - [0] = { 203 - .start = USB_UDC_BASE, 204 - .end = USB_UDC_BASE + USB_UDC_LEN - 1, 205 - .flags = IORESOURCE_MEM, 206 - }, 207 - [1] = { 208 - .start = AU1200_USB_INT, 209 - .end = AU1200_USB_INT, 210 - .flags = IORESOURCE_IRQ, 211 - }, 212 - }; 168 + if (platform_device_register(pdev)) 169 + printk(KERN_INFO "Alchemy USB: cannot add OHCI0\n"); 213 170 214 - static u64 udc_dmamask = DMA_BIT_MASK(32); 215 171 216 - static struct platform_device au1xxx_usb_gdt_device = { 217 - .name = "au1xxx-udc", 218 - .id = 0, 219 - .dev = { 220 - .dma_mask = &udc_dmamask, 221 - .coherent_dma_mask = DMA_BIT_MASK(32), 222 - }, 223 - .num_resources = ARRAY_SIZE(au1xxx_usb_gdt_resources), 224 - .resource = au1xxx_usb_gdt_resources, 225 - }; 172 + /* setup EHCI0: Au1200 */ 173 + if (ctype == ALCHEMY_CPU_AU1200) { 174 + if (_new_usbres(&res, &pdev)) 175 + return; 226 176 227 - /* Au1200 UOC (USB OTG controller) */ 228 - static struct resource au1xxx_usb_otg_resources[] = { 229 - [0] = { 230 - .start = USB_UOC_BASE, 231 - .end = USB_UOC_BASE + USB_UOC_LEN - 1, 232 - .flags = IORESOURCE_MEM, 233 - }, 234 - [1] = { 235 - .start = AU1200_USB_INT, 236 - .end = AU1200_USB_INT, 237 - .flags = IORESOURCE_IRQ, 238 - }, 239 - }; 177 + res[0].start = alchemy_ehci_data[ctype][0]; 178 + res[0].end = res[0].start + 0x100 - 1; 179 + res[0].flags = IORESOURCE_MEM; 180 + res[1].start = alchemy_ehci_data[ctype][1]; 181 + res[1].end = res[1].start; 182 + res[1].flags = IORESOURCE_IRQ; 183 + pdev->name = "au1xxx-ehci"; 184 + pdev->id = 0; 185 + pdev->dev.dma_mask = &alchemy_ehci_dmamask; 240 186 241 - static u64 uoc_dmamask = DMA_BIT_MASK(32); 242 - 243 - static struct platform_device au1xxx_usb_otg_device = { 244 - .name = "au1xxx-uoc", 245 - .id = 0, 246 - .dev = { 247 - .dma_mask = &uoc_dmamask, 248 - .coherent_dma_mask = DMA_BIT_MASK(32), 249 - }, 250 - .num_resources = ARRAY_SIZE(au1xxx_usb_otg_resources), 251 - .resource = au1xxx_usb_otg_resources, 252 - }; 253 - 254 - static struct resource au1200_lcd_resources[] = { 255 - [0] = { 256 - .start = LCD_PHYS_ADDR, 257 - .end = LCD_PHYS_ADDR + 0x800 - 1, 258 - .flags = IORESOURCE_MEM, 259 - }, 260 - [1] = { 261 - .start = AU1200_LCD_INT, 262 - .end = AU1200_LCD_INT, 263 - .flags = IORESOURCE_IRQ, 187 + if (platform_device_register(pdev)) 188 + printk(KERN_INFO "Alchemy USB: cannot add EHCI0\n"); 264 189 } 265 - }; 266 - 267 - static u64 au1200_lcd_dmamask = DMA_BIT_MASK(32); 268 - 269 - static struct platform_device au1200_lcd_device = { 270 - .name = "au1200-lcd", 271 - .id = 0, 272 - .dev = { 273 - .dma_mask = &au1200_lcd_dmamask, 274 - .coherent_dma_mask = DMA_BIT_MASK(32), 275 - }, 276 - .num_resources = ARRAY_SIZE(au1200_lcd_resources), 277 - .resource = au1200_lcd_resources, 278 - }; 279 - 280 - static u64 au1xxx_mmc_dmamask = DMA_BIT_MASK(32); 281 - 282 - extern struct au1xmmc_platform_data au1xmmc_platdata[2]; 283 - 284 - static struct resource au1200_mmc0_resources[] = { 285 - [0] = { 286 - .start = AU1100_SD0_PHYS_ADDR, 287 - .end = AU1100_SD0_PHYS_ADDR + 0xfff, 288 - .flags = IORESOURCE_MEM, 289 - }, 290 - [1] = { 291 - .start = AU1200_SD_INT, 292 - .end = AU1200_SD_INT, 293 - .flags = IORESOURCE_IRQ, 294 - }, 295 - [2] = { 296 - .start = DSCR_CMD0_SDMS_TX0, 297 - .end = DSCR_CMD0_SDMS_TX0, 298 - .flags = IORESOURCE_DMA, 299 - }, 300 - [3] = { 301 - .start = DSCR_CMD0_SDMS_RX0, 302 - .end = DSCR_CMD0_SDMS_RX0, 303 - .flags = IORESOURCE_DMA, 304 - } 305 - }; 306 - 307 - static struct platform_device au1200_mmc0_device = { 308 - .name = "au1xxx-mmc", 309 - .id = 0, 310 - .dev = { 311 - .dma_mask = &au1xxx_mmc_dmamask, 312 - .coherent_dma_mask = DMA_BIT_MASK(32), 313 - .platform_data = &au1xmmc_platdata[0], 314 - }, 315 - .num_resources = ARRAY_SIZE(au1200_mmc0_resources), 316 - .resource = au1200_mmc0_resources, 317 - }; 318 - 319 - #ifndef CONFIG_MIPS_DB1200 320 - static struct resource au1200_mmc1_resources[] = { 321 - [0] = { 322 - .start = AU1100_SD1_PHYS_ADDR, 323 - .end = AU1100_SD1_PHYS_ADDR + 0xfff, 324 - .flags = IORESOURCE_MEM, 325 - }, 326 - [1] = { 327 - .start = AU1200_SD_INT, 328 - .end = AU1200_SD_INT, 329 - .flags = IORESOURCE_IRQ, 330 - }, 331 - [2] = { 332 - .start = DSCR_CMD0_SDMS_TX1, 333 - .end = DSCR_CMD0_SDMS_TX1, 334 - .flags = IORESOURCE_DMA, 335 - }, 336 - [3] = { 337 - .start = DSCR_CMD0_SDMS_RX1, 338 - .end = DSCR_CMD0_SDMS_RX1, 339 - .flags = IORESOURCE_DMA, 340 - } 341 - }; 342 - 343 - static struct platform_device au1200_mmc1_device = { 344 - .name = "au1xxx-mmc", 345 - .id = 1, 346 - .dev = { 347 - .dma_mask = &au1xxx_mmc_dmamask, 348 - .coherent_dma_mask = DMA_BIT_MASK(32), 349 - .platform_data = &au1xmmc_platdata[1], 350 - }, 351 - .num_resources = ARRAY_SIZE(au1200_mmc1_resources), 352 - .resource = au1200_mmc1_resources, 353 - }; 354 - #endif /* #ifndef CONFIG_MIPS_DB1200 */ 355 - #endif /* #ifdef CONFIG_SOC_AU1200 */ 356 - 357 - /* All Alchemy demoboards with I2C have this #define in their headers */ 358 - #ifdef SMBUS_PSC_BASE 359 - static struct resource pbdb_smbus_resources[] = { 360 - { 361 - .start = CPHYSADDR(SMBUS_PSC_BASE), 362 - .end = CPHYSADDR(SMBUS_PSC_BASE + 0xfffff), 363 - .flags = IORESOURCE_MEM, 364 - }, 365 - }; 366 - 367 - static struct platform_device pbdb_smbus_device = { 368 - .name = "au1xpsc_smbus", 369 - .id = 0, /* bus number */ 370 - .num_resources = ARRAY_SIZE(pbdb_smbus_resources), 371 - .resource = pbdb_smbus_resources, 372 - }; 373 - #endif 190 + } 374 191 375 192 /* Macro to help defining the Ethernet MAC resources */ 376 - #define MAC_RES_COUNT 3 /* MAC regs base, MAC enable reg, MAC INT */ 377 - #define MAC_RES(_base, _enable, _irq) \ 193 + #define MAC_RES_COUNT 4 /* MAC regs, MAC en, MAC INT, MACDMA regs */ 194 + #define MAC_RES(_base, _enable, _irq, _macdma) \ 378 195 { \ 379 196 .start = _base, \ 380 197 .end = _base + 0xffff, \ ··· 206 389 .start = _irq, \ 207 390 .end = _irq, \ 208 391 .flags = IORESOURCE_IRQ \ 392 + }, \ 393 + { \ 394 + .start = _macdma, \ 395 + .end = _macdma + 0x1ff, \ 396 + .flags = IORESOURCE_MEM, \ 209 397 } 210 398 211 399 static struct resource au1xxx_eth0_resources[][MAC_RES_COUNT] __initdata = { 212 400 [ALCHEMY_CPU_AU1000] = { 213 401 MAC_RES(AU1000_MAC0_PHYS_ADDR, 214 402 AU1000_MACEN_PHYS_ADDR, 215 - AU1000_MAC0_DMA_INT) 403 + AU1000_MAC0_DMA_INT, 404 + AU1000_MACDMA0_PHYS_ADDR) 216 405 }, 217 406 [ALCHEMY_CPU_AU1500] = { 218 407 MAC_RES(AU1500_MAC0_PHYS_ADDR, 219 408 AU1500_MACEN_PHYS_ADDR, 220 - AU1500_MAC0_DMA_INT) 409 + AU1500_MAC0_DMA_INT, 410 + AU1000_MACDMA0_PHYS_ADDR) 221 411 }, 222 412 [ALCHEMY_CPU_AU1100] = { 223 413 MAC_RES(AU1000_MAC0_PHYS_ADDR, 224 414 AU1000_MACEN_PHYS_ADDR, 225 - AU1100_MAC0_DMA_INT) 415 + AU1100_MAC0_DMA_INT, 416 + AU1000_MACDMA0_PHYS_ADDR) 226 417 }, 227 418 [ALCHEMY_CPU_AU1550] = { 228 419 MAC_RES(AU1000_MAC0_PHYS_ADDR, 229 420 AU1000_MACEN_PHYS_ADDR, 230 - AU1550_MAC0_DMA_INT) 421 + AU1550_MAC0_DMA_INT, 422 + AU1000_MACDMA0_PHYS_ADDR) 231 423 }, 232 424 }; 233 425 ··· 255 429 [ALCHEMY_CPU_AU1000] = { 256 430 MAC_RES(AU1000_MAC1_PHYS_ADDR, 257 431 AU1000_MACEN_PHYS_ADDR + 4, 258 - AU1000_MAC1_DMA_INT) 432 + AU1000_MAC1_DMA_INT, 433 + AU1000_MACDMA1_PHYS_ADDR) 259 434 }, 260 435 [ALCHEMY_CPU_AU1500] = { 261 436 MAC_RES(AU1500_MAC1_PHYS_ADDR, 262 437 AU1500_MACEN_PHYS_ADDR + 4, 263 - AU1500_MAC1_DMA_INT) 438 + AU1500_MAC1_DMA_INT, 439 + AU1000_MACDMA1_PHYS_ADDR) 264 440 }, 265 441 [ALCHEMY_CPU_AU1550] = { 266 442 MAC_RES(AU1000_MAC1_PHYS_ADDR, 267 443 AU1000_MACEN_PHYS_ADDR + 4, 268 - AU1550_MAC1_DMA_INT) 444 + AU1550_MAC1_DMA_INT, 445 + AU1000_MACDMA1_PHYS_ADDR) 269 446 }, 270 447 }; 271 448 ··· 350 521 } 351 522 } 352 523 353 - static struct platform_device *au1xxx_platform_devices[] __initdata = { 354 - &au1xxx_usb_ohci_device, 355 - #ifdef CONFIG_FB_AU1100 356 - &au1100_lcd_device, 357 - #endif 358 - #ifdef CONFIG_SOC_AU1200 359 - &au1xxx_usb_ehci_device, 360 - &au1xxx_usb_gdt_device, 361 - &au1xxx_usb_otg_device, 362 - &au1200_lcd_device, 363 - &au1200_mmc0_device, 364 - #ifndef CONFIG_MIPS_DB1200 365 - &au1200_mmc1_device, 366 - #endif 367 - #endif 368 - #ifdef SMBUS_PSC_BASE 369 - &pbdb_smbus_device, 370 - #endif 371 - }; 372 - 373 524 static int __init au1xxx_platform_init(void) 374 525 { 375 - int err, ctype = alchemy_get_cputype(); 526 + int ctype = alchemy_get_cputype(); 376 527 377 528 alchemy_setup_uarts(ctype); 378 529 alchemy_setup_macs(ctype); 530 + alchemy_setup_usb(ctype); 379 531 380 - err = platform_add_devices(au1xxx_platform_devices, 381 - ARRAY_SIZE(au1xxx_platform_devices)); 382 - return err; 532 + return 0; 383 533 } 384 534 385 535 arch_initcall(au1xxx_platform_init);
-46
arch/mips/alchemy/common/power.c
··· 37 37 #include <asm/uaccess.h> 38 38 #include <asm/mach-au1x00/au1000.h> 39 39 40 - #ifdef CONFIG_PM 41 - 42 40 /* 43 41 * We need to save/restore a bunch of core registers that are 44 42 * either volatile or reset to some state across a processor sleep. ··· 47 49 * We only have to save/restore registers that aren't otherwise 48 50 * done as part of a driver pm_* function. 49 51 */ 50 - static unsigned int sleep_usb[2]; 51 52 static unsigned int sleep_sys_clocks[5]; 52 53 static unsigned int sleep_sys_pinfunc; 53 54 static unsigned int sleep_static_memctlr[4][3]; ··· 54 57 55 58 static void save_core_regs(void) 56 59 { 57 - #ifndef CONFIG_SOC_AU1200 58 - /* Shutdown USB host/device. */ 59 - sleep_usb[0] = au_readl(USB_HOST_CONFIG); 60 - 61 - /* There appears to be some undocumented reset register.... */ 62 - au_writel(0, 0xb0100004); 63 - au_sync(); 64 - au_writel(0, USB_HOST_CONFIG); 65 - au_sync(); 66 - 67 - sleep_usb[1] = au_readl(USBD_ENABLE); 68 - au_writel(0, USBD_ENABLE); 69 - au_sync(); 70 - 71 - #else /* AU1200 */ 72 - 73 - /* enable access to OTG mmio so we can save OTG CAP/MUX. 74 - * FIXME: write an OTG driver and move this stuff there! 75 - */ 76 - au_writel(au_readl(USB_MSR_BASE + 4) | (1 << 6), USB_MSR_BASE + 4); 77 - au_sync(); 78 - sleep_usb[0] = au_readl(0xb4020020); /* OTG_CAP */ 79 - sleep_usb[1] = au_readl(0xb4020024); /* OTG_MUX */ 80 - #endif 81 - 82 60 /* Clocks and PLLs. */ 83 61 sleep_sys_clocks[0] = au_readl(SYS_FREQCTRL0); 84 62 sleep_sys_clocks[1] = au_readl(SYS_FREQCTRL1); ··· 97 125 au_writel(sleep_sys_pinfunc, SYS_PINFUNC); 98 126 au_sync(); 99 127 100 - #ifndef CONFIG_SOC_AU1200 101 - au_writel(sleep_usb[0], USB_HOST_CONFIG); 102 - au_writel(sleep_usb[1], USBD_ENABLE); 103 - au_sync(); 104 - #else 105 - /* enable access to OTG memory */ 106 - au_writel(au_readl(USB_MSR_BASE + 4) | (1 << 6), USB_MSR_BASE + 4); 107 - au_sync(); 108 - 109 - /* restore OTG caps and port mux. */ 110 - au_writel(sleep_usb[0], 0xb4020020 + 0); /* OTG_CAP */ 111 - au_sync(); 112 - au_writel(sleep_usb[1], 0xb4020020 + 4); /* OTG_MUX */ 113 - au_sync(); 114 - #endif 115 - 116 128 /* Restore the static memory controller configuration. */ 117 129 au_writel(sleep_static_memctlr[0][0], MEM_STCFG0); 118 130 au_writel(sleep_static_memctlr[0][1], MEM_STTIME0); ··· 130 174 131 175 restore_core_regs(); 132 176 } 133 - 134 - #endif /* CONFIG_PM */
+3 -3
arch/mips/alchemy/common/setup.c
··· 73 73 /* This routine should be valid for all Au1x based boards */ 74 74 phys_t __fixup_bigphys_addr(phys_t phys_addr, phys_t size) 75 75 { 76 - u32 start = (u32)Au1500_PCI_MEM_START; 77 - u32 end = (u32)Au1500_PCI_MEM_END; 76 + unsigned long start = ALCHEMY_PCI_MEMWIN_START; 77 + unsigned long end = ALCHEMY_PCI_MEMWIN_END; 78 78 79 79 /* Don't fixup 36-bit addresses */ 80 80 if ((phys_addr >> 32) != 0) ··· 82 82 83 83 /* Check for PCI memory window */ 84 84 if (phys_addr >= start && (phys_addr + size - 1) <= end) 85 - return (phys_t)((phys_addr - start) + Au1500_PCI_MEM_START); 85 + return (phys_t)(AU1500_PCI_MEM_PHYS_ADDR + phys_addr); 86 86 87 87 /* default nop */ 88 88 return phys_addr;
+108 -43
arch/mips/alchemy/devboards/db1200/platform.c
··· 213 213 .start = DB1200_IDE_INT, 214 214 .end = DB1200_IDE_INT, 215 215 .flags = IORESOURCE_IRQ, 216 - } 216 + }, 217 + [2] = { 218 + .start = AU1200_DSCR_CMD0_DMA_REQ1, 219 + .end = AU1200_DSCR_CMD0_DMA_REQ1, 220 + .flags = IORESOURCE_DMA, 221 + }, 217 222 }; 218 223 219 224 static u64 ide_dmamask = DMA_BIT_MASK(32); ··· 333 328 .brightness_set = db1200_mmcled_set, 334 329 }; 335 330 336 - /* needed by arch/mips/alchemy/common/platform.c */ 337 - struct au1xmmc_platform_data au1xmmc_platdata[] = { 331 + static struct au1xmmc_platform_data db1200mmc_platdata = { 332 + .cd_setup = db1200_mmc_cd_setup, 333 + .set_power = db1200_mmc_set_power, 334 + .card_inserted = db1200_mmc_card_inserted, 335 + .card_readonly = db1200_mmc_card_readonly, 336 + .led = &db1200_mmc_led, 337 + }; 338 + 339 + static struct resource au1200_mmc0_resources[] = { 338 340 [0] = { 339 - .cd_setup = db1200_mmc_cd_setup, 340 - .set_power = db1200_mmc_set_power, 341 - .card_inserted = db1200_mmc_card_inserted, 342 - .card_readonly = db1200_mmc_card_readonly, 343 - .led = &db1200_mmc_led, 341 + .start = AU1100_SD0_PHYS_ADDR, 342 + .end = AU1100_SD0_PHYS_ADDR + 0xfff, 343 + .flags = IORESOURCE_MEM, 344 344 }, 345 + [1] = { 346 + .start = AU1200_SD_INT, 347 + .end = AU1200_SD_INT, 348 + .flags = IORESOURCE_IRQ, 349 + }, 350 + [2] = { 351 + .start = AU1200_DSCR_CMD0_SDMS_TX0, 352 + .end = AU1200_DSCR_CMD0_SDMS_TX0, 353 + .flags = IORESOURCE_DMA, 354 + }, 355 + [3] = { 356 + .start = AU1200_DSCR_CMD0_SDMS_RX0, 357 + .end = AU1200_DSCR_CMD0_SDMS_RX0, 358 + .flags = IORESOURCE_DMA, 359 + } 360 + }; 361 + 362 + static u64 au1xxx_mmc_dmamask = DMA_BIT_MASK(32); 363 + 364 + static struct platform_device db1200_mmc0_dev = { 365 + .name = "au1xxx-mmc", 366 + .id = 0, 367 + .dev = { 368 + .dma_mask = &au1xxx_mmc_dmamask, 369 + .coherent_dma_mask = DMA_BIT_MASK(32), 370 + .platform_data = &db1200mmc_platdata, 371 + }, 372 + .num_resources = ARRAY_SIZE(au1200_mmc0_resources), 373 + .resource = au1200_mmc0_resources, 374 + }; 375 + 376 + /**********************************************************************/ 377 + 378 + static struct resource au1200_lcd_res[] = { 379 + [0] = { 380 + .start = AU1200_LCD_PHYS_ADDR, 381 + .end = AU1200_LCD_PHYS_ADDR + 0x800 - 1, 382 + .flags = IORESOURCE_MEM, 383 + }, 384 + [1] = { 385 + .start = AU1200_LCD_INT, 386 + .end = AU1200_LCD_INT, 387 + .flags = IORESOURCE_IRQ, 388 + } 389 + }; 390 + 391 + static u64 au1200_lcd_dmamask = DMA_BIT_MASK(32); 392 + 393 + static struct platform_device au1200_lcd_dev = { 394 + .name = "au1200-lcd", 395 + .id = 0, 396 + .dev = { 397 + .dma_mask = &au1200_lcd_dmamask, 398 + .coherent_dma_mask = DMA_BIT_MASK(32), 399 + }, 400 + .num_resources = ARRAY_SIZE(au1200_lcd_res), 401 + .resource = au1200_lcd_res, 345 402 }; 346 403 347 404 /**********************************************************************/ 348 405 349 406 static struct resource au1200_psc0_res[] = { 350 407 [0] = { 351 - .start = PSC0_PHYS_ADDR, 352 - .end = PSC0_PHYS_ADDR + 0x000fffff, 408 + .start = AU1550_PSC0_PHYS_ADDR, 409 + .end = AU1550_PSC0_PHYS_ADDR + 0xfff, 353 410 .flags = IORESOURCE_MEM, 354 411 }, 355 412 [1] = { ··· 420 353 .flags = IORESOURCE_IRQ, 421 354 }, 422 355 [2] = { 423 - .start = DSCR_CMD0_PSC0_TX, 424 - .end = DSCR_CMD0_PSC0_TX, 356 + .start = AU1200_DSCR_CMD0_PSC0_TX, 357 + .end = AU1200_DSCR_CMD0_PSC0_TX, 425 358 .flags = IORESOURCE_DMA, 426 359 }, 427 360 [3] = { 428 - .start = DSCR_CMD0_PSC0_RX, 429 - .end = DSCR_CMD0_PSC0_RX, 361 + .start = AU1200_DSCR_CMD0_PSC0_RX, 362 + .end = AU1200_DSCR_CMD0_PSC0_RX, 430 363 .flags = IORESOURCE_DMA, 431 364 }, 432 365 }; ··· 468 401 469 402 static struct resource au1200_psc1_res[] = { 470 403 [0] = { 471 - .start = PSC1_PHYS_ADDR, 472 - .end = PSC1_PHYS_ADDR + 0x000fffff, 404 + .start = AU1550_PSC1_PHYS_ADDR, 405 + .end = AU1550_PSC1_PHYS_ADDR + 0xfff, 473 406 .flags = IORESOURCE_MEM, 474 407 }, 475 408 [1] = { ··· 478 411 .flags = IORESOURCE_IRQ, 479 412 }, 480 413 [2] = { 481 - .start = DSCR_CMD0_PSC1_TX, 482 - .end = DSCR_CMD0_PSC1_TX, 414 + .start = AU1200_DSCR_CMD0_PSC1_TX, 415 + .end = AU1200_DSCR_CMD0_PSC1_TX, 483 416 .flags = IORESOURCE_DMA, 484 417 }, 485 418 [3] = { 486 - .start = DSCR_CMD0_PSC1_RX, 487 - .end = DSCR_CMD0_PSC1_RX, 419 + .start = AU1200_DSCR_CMD0_PSC1_RX, 420 + .end = AU1200_DSCR_CMD0_PSC1_RX, 488 421 .flags = IORESOURCE_DMA, 489 422 }, 490 423 }; ··· 516 449 static struct platform_device *db1200_devs[] __initdata = { 517 450 NULL, /* PSC0, selected by S6.8 */ 518 451 &db1200_ide_dev, 452 + &db1200_mmc0_dev, 453 + &au1200_lcd_dev, 519 454 &db1200_eth_dev, 520 455 &db1200_rtc_dev, 521 456 &db1200_nand_dev, ··· 595 526 596 527 /* Audio PSC clock is supplied externally. (FIXME: platdata!!) */ 597 528 __raw_writel(PSC_SEL_CLK_SERCLK, 598 - (void __iomem *)KSEG1ADDR(PSC1_PHYS_ADDR) + PSC_SEL_OFFSET); 529 + (void __iomem *)KSEG1ADDR(AU1550_PSC1_PHYS_ADDR) + PSC_SEL_OFFSET); 599 530 wmb(); 600 531 601 - db1x_register_pcmcia_socket(PCMCIA_ATTR_PHYS_ADDR, 602 - PCMCIA_ATTR_PHYS_ADDR + 0x000400000 - 1, 603 - PCMCIA_MEM_PHYS_ADDR, 604 - PCMCIA_MEM_PHYS_ADDR + 0x000400000 - 1, 605 - PCMCIA_IO_PHYS_ADDR, 606 - PCMCIA_IO_PHYS_ADDR + 0x000010000 - 1, 607 - DB1200_PC0_INT, 608 - DB1200_PC0_INSERT_INT, 609 - /*DB1200_PC0_STSCHG_INT*/0, 610 - DB1200_PC0_EJECT_INT, 611 - 0); 532 + db1x_register_pcmcia_socket( 533 + AU1000_PCMCIA_ATTR_PHYS_ADDR, 534 + AU1000_PCMCIA_ATTR_PHYS_ADDR + 0x000400000 - 1, 535 + AU1000_PCMCIA_MEM_PHYS_ADDR, 536 + AU1000_PCMCIA_MEM_PHYS_ADDR + 0x000400000 - 1, 537 + AU1000_PCMCIA_IO_PHYS_ADDR, 538 + AU1000_PCMCIA_IO_PHYS_ADDR + 0x000010000 - 1, 539 + DB1200_PC0_INT, DB1200_PC0_INSERT_INT, 540 + /*DB1200_PC0_STSCHG_INT*/0, DB1200_PC0_EJECT_INT, 0); 612 541 613 - db1x_register_pcmcia_socket(PCMCIA_ATTR_PHYS_ADDR + 0x004000000, 614 - PCMCIA_ATTR_PHYS_ADDR + 0x004400000 - 1, 615 - PCMCIA_MEM_PHYS_ADDR + 0x004000000, 616 - PCMCIA_MEM_PHYS_ADDR + 0x004400000 - 1, 617 - PCMCIA_IO_PHYS_ADDR + 0x004000000, 618 - PCMCIA_IO_PHYS_ADDR + 0x004010000 - 1, 619 - DB1200_PC1_INT, 620 - DB1200_PC1_INSERT_INT, 621 - /*DB1200_PC1_STSCHG_INT*/0, 622 - DB1200_PC1_EJECT_INT, 623 - 1); 542 + db1x_register_pcmcia_socket( 543 + AU1000_PCMCIA_ATTR_PHYS_ADDR + 0x004000000, 544 + AU1000_PCMCIA_ATTR_PHYS_ADDR + 0x004400000 - 1, 545 + AU1000_PCMCIA_MEM_PHYS_ADDR + 0x004000000, 546 + AU1000_PCMCIA_MEM_PHYS_ADDR + 0x004400000 - 1, 547 + AU1000_PCMCIA_IO_PHYS_ADDR + 0x004000000, 548 + AU1000_PCMCIA_IO_PHYS_ADDR + 0x004010000 - 1, 549 + DB1200_PC1_INT, DB1200_PC1_INSERT_INT, 550 + /*DB1200_PC1_STSCHG_INT*/0, DB1200_PC1_EJECT_INT, 1); 624 551 625 552 swapped = bcsr_read(BCSR_STATUS) & BCSR_STATUS_DB1200_SWAPBOOT; 626 553 db1x_register_norflash(64 << 20, 2, swapped);
+1 -27
arch/mips/alchemy/devboards/db1x00/board_setup.c
··· 40 40 41 41 #include <prom.h> 42 42 43 - #ifdef CONFIG_MIPS_DB1500 44 - char irq_tab_alchemy[][5] __initdata = { 45 - [12] = { -1, AU1500_PCI_INTA, 0xff, 0xff, 0xff }, /* IDSEL 12 - HPT371 */ 46 - [13] = { -1, AU1500_PCI_INTA, AU1500_PCI_INTB, AU1500_PCI_INTC, AU1500_PCI_INTD }, /* IDSEL 13 - PCI slot */ 47 - }; 48 - 49 - #endif 50 - 51 - 52 - #ifdef CONFIG_MIPS_DB1550 53 - char irq_tab_alchemy[][5] __initdata = { 54 - [11] = { -1, AU1550_PCI_INTC, 0xff, 0xff, 0xff }, /* IDSEL 11 - on-board HPT371 */ 55 - [12] = { -1, AU1550_PCI_INTB, AU1550_PCI_INTC, AU1550_PCI_INTD, AU1550_PCI_INTA }, /* IDSEL 12 - PCI slot 2 (left) */ 56 - [13] = { -1, AU1550_PCI_INTA, AU1550_PCI_INTB, AU1550_PCI_INTC, AU1550_PCI_INTD }, /* IDSEL 13 - PCI slot 1 (right) */ 57 - }; 58 - #endif 59 - 60 - 61 43 #ifdef CONFIG_MIPS_BOSPORUS 62 44 char irq_tab_alchemy[][5] __initdata = { 63 45 [11] = { -1, AU1500_PCI_INTA, AU1500_PCI_INTB, 0xff, 0xff }, /* IDSEL 11 - miniPCI */ ··· 73 91 74 92 75 93 #ifdef CONFIG_MIPS_MIRAGE 76 - char irq_tab_alchemy[][5] __initdata = { 77 - [11] = { -1, AU1500_PCI_INTD, 0xff, 0xff, 0xff }, /* IDSEL 11 - SMI VGX */ 78 - [12] = { -1, 0xff, 0xff, AU1500_PCI_INTC, 0xff }, /* IDSEL 12 - PNX1300 */ 79 - [13] = { -1, AU1500_PCI_INTA, AU1500_PCI_INTB, 0xff, 0xff }, /* IDSEL 13 - miniPCI */ 80 - }; 81 - 82 94 static void mirage_power_off(void) 83 95 { 84 96 alchemy_gpio_direction_output(210, 1); ··· 134 158 /* initialize board register space */ 135 159 bcsr_init(bcsr1, bcsr2); 136 160 137 - /* Not valid for Au1550 */ 138 - #if defined(CONFIG_IRDA) && \ 139 - (defined(CONFIG_SOC_AU1000) || defined(CONFIG_SOC_AU1100)) 161 + #if defined(CONFIG_IRDA) && defined(CONFIG_AU1000_FIR) 140 162 { 141 163 u32 pin_func; 142 164
+173 -23
arch/mips/alchemy/devboards/db1x00/platform.c
··· 20 20 21 21 #include <linux/init.h> 22 22 #include <linux/interrupt.h> 23 + #include <linux/dma-mapping.h> 23 24 #include <linux/platform_device.h> 24 25 25 26 #include <asm/mach-au1x00/au1000.h> 26 27 #include <asm/mach-au1x00/au1000_dma.h> 27 - #include <asm/mach-au1x00/au1xxx.h> 28 28 #include <asm/mach-db1x00/bcsr.h> 29 29 #include "../platform.h" 30 + 31 + struct pci_dev; 30 32 31 33 /* DB1xxx PCMCIA interrupt sources: 32 34 * CD0/1 GPIO0/3 ··· 90 88 #endif 91 89 #endif 92 90 91 + #ifdef CONFIG_PCI 92 + #ifdef CONFIG_MIPS_DB1500 93 + static int db1xxx_map_pci_irq(const struct pci_dev *d, u8 slot, u8 pin) 94 + { 95 + if ((slot < 12) || (slot > 13) || pin == 0) 96 + return -1; 97 + if (slot == 12) 98 + return (pin == 1) ? AU1500_PCI_INTA : 0xff; 99 + if (slot == 13) { 100 + switch (pin) { 101 + case 1: return AU1500_PCI_INTA; 102 + case 2: return AU1500_PCI_INTB; 103 + case 3: return AU1500_PCI_INTC; 104 + case 4: return AU1500_PCI_INTD; 105 + } 106 + } 107 + return -1; 108 + } 109 + #endif 110 + 111 + #ifdef CONFIG_MIPS_DB1550 112 + static int db1xxx_map_pci_irq(const struct pci_dev *d, u8 slot, u8 pin) 113 + { 114 + if ((slot < 11) || (slot > 13) || pin == 0) 115 + return -1; 116 + if (slot == 11) 117 + return (pin == 1) ? AU1550_PCI_INTC : 0xff; 118 + if (slot == 12) { 119 + switch (pin) { 120 + case 1: return AU1550_PCI_INTB; 121 + case 2: return AU1550_PCI_INTC; 122 + case 3: return AU1550_PCI_INTD; 123 + case 4: return AU1550_PCI_INTA; 124 + } 125 + } 126 + if (slot == 13) { 127 + switch (pin) { 128 + case 1: return AU1550_PCI_INTA; 129 + case 2: return AU1550_PCI_INTB; 130 + case 3: return AU1550_PCI_INTC; 131 + case 4: return AU1550_PCI_INTD; 132 + } 133 + } 134 + return -1; 135 + } 136 + #endif 137 + 138 + #ifdef CONFIG_MIPS_BOSPORUS 139 + static int db1xxx_map_pci_irq(const struct pci_dev *d, u8 slot, u8 pin) 140 + { 141 + if ((slot < 11) || (slot > 13) || pin == 0) 142 + return -1; 143 + if (slot == 12) 144 + return (pin == 1) ? AU1500_PCI_INTA : 0xff; 145 + if (slot == 11) { 146 + switch (pin) { 147 + case 1: return AU1500_PCI_INTA; 148 + case 2: return AU1500_PCI_INTB; 149 + default: return 0xff; 150 + } 151 + } 152 + if (slot == 13) { 153 + switch (pin) { 154 + case 1: return AU1500_PCI_INTA; 155 + case 2: return AU1500_PCI_INTB; 156 + case 3: return AU1500_PCI_INTC; 157 + case 4: return AU1500_PCI_INTD; 158 + } 159 + } 160 + return -1; 161 + } 162 + #endif 163 + 164 + #ifdef CONFIG_MIPS_MIRAGE 165 + static int db1xxx_map_pci_irq(const struct pci_dev *d, u8 slot, u8 pin) 166 + { 167 + if ((slot < 11) || (slot > 13) || pin == 0) 168 + return -1; 169 + if (slot == 11) 170 + return (pin == 1) ? AU1500_PCI_INTD : 0xff; 171 + if (slot == 12) 172 + return (pin == 3) ? AU1500_PCI_INTC : 0xff; 173 + if (slot == 13) { 174 + switch (pin) { 175 + case 1: return AU1500_PCI_INTA; 176 + case 2: return AU1500_PCI_INTB; 177 + default: return 0xff; 178 + } 179 + } 180 + return -1; 181 + } 182 + #endif 183 + 184 + static struct resource alchemy_pci_host_res[] = { 185 + [0] = { 186 + .start = AU1500_PCI_PHYS_ADDR, 187 + .end = AU1500_PCI_PHYS_ADDR + 0xfff, 188 + .flags = IORESOURCE_MEM, 189 + }, 190 + }; 191 + 192 + static struct alchemy_pci_platdata db1xxx_pci_pd = { 193 + .board_map_irq = db1xxx_map_pci_irq, 194 + }; 195 + 196 + static struct platform_device db1xxx_pci_host_dev = { 197 + .dev.platform_data = &db1xxx_pci_pd, 198 + .name = "alchemy-pci", 199 + .id = 0, 200 + .num_resources = ARRAY_SIZE(alchemy_pci_host_res), 201 + .resource = alchemy_pci_host_res, 202 + }; 203 + 204 + static int __init db15x0_pci_init(void) 205 + { 206 + return platform_device_register(&db1xxx_pci_host_dev); 207 + } 208 + /* must be arch_initcall; MIPS PCI scans busses in a subsys_initcall */ 209 + arch_initcall(db15x0_pci_init); 210 + #endif 211 + 212 + #ifdef CONFIG_MIPS_DB1100 213 + static struct resource au1100_lcd_resources[] = { 214 + [0] = { 215 + .start = AU1100_LCD_PHYS_ADDR, 216 + .end = AU1100_LCD_PHYS_ADDR + 0x800 - 1, 217 + .flags = IORESOURCE_MEM, 218 + }, 219 + [1] = { 220 + .start = AU1100_LCD_INT, 221 + .end = AU1100_LCD_INT, 222 + .flags = IORESOURCE_IRQ, 223 + } 224 + }; 225 + 226 + static u64 au1100_lcd_dmamask = DMA_BIT_MASK(32); 227 + 228 + static struct platform_device au1100_lcd_device = { 229 + .name = "au1100-lcd", 230 + .id = 0, 231 + .dev = { 232 + .dma_mask = &au1100_lcd_dmamask, 233 + .coherent_dma_mask = DMA_BIT_MASK(32), 234 + }, 235 + .num_resources = ARRAY_SIZE(au1100_lcd_resources), 236 + .resource = au1100_lcd_resources, 237 + }; 238 + #endif 239 + 93 240 static struct resource alchemy_ac97c_res[] = { 94 241 [0] = { 95 242 .start = AU1000_AC97_PHYS_ADDR, ··· 281 130 static int __init db1xxx_dev_init(void) 282 131 { 283 132 #ifdef DB1XXX_HAS_PCMCIA 284 - db1x_register_pcmcia_socket(PCMCIA_ATTR_PHYS_ADDR, 285 - PCMCIA_ATTR_PHYS_ADDR + 0x000400000 - 1, 286 - PCMCIA_MEM_PHYS_ADDR, 287 - PCMCIA_MEM_PHYS_ADDR + 0x000400000 - 1, 288 - PCMCIA_IO_PHYS_ADDR, 289 - PCMCIA_IO_PHYS_ADDR + 0x000010000 - 1, 290 - DB1XXX_PCMCIA_CARD0, 291 - DB1XXX_PCMCIA_CD0, 292 - /*DB1XXX_PCMCIA_STSCHG0*/0, 293 - 0, 294 - 0); 133 + db1x_register_pcmcia_socket( 134 + AU1000_PCMCIA_ATTR_PHYS_ADDR, 135 + AU1000_PCMCIA_ATTR_PHYS_ADDR + 0x000400000 - 1, 136 + AU1000_PCMCIA_MEM_PHYS_ADDR, 137 + AU1000_PCMCIA_MEM_PHYS_ADDR + 0x000400000 - 1, 138 + AU1000_PCMCIA_IO_PHYS_ADDR, 139 + AU1000_PCMCIA_IO_PHYS_ADDR + 0x000010000 - 1, 140 + DB1XXX_PCMCIA_CARD0, DB1XXX_PCMCIA_CD0, 141 + /*DB1XXX_PCMCIA_STSCHG0*/0, 0, 0); 295 142 296 - db1x_register_pcmcia_socket(PCMCIA_ATTR_PHYS_ADDR + 0x004000000, 297 - PCMCIA_ATTR_PHYS_ADDR + 0x004400000 - 1, 298 - PCMCIA_MEM_PHYS_ADDR + 0x004000000, 299 - PCMCIA_MEM_PHYS_ADDR + 0x004400000 - 1, 300 - PCMCIA_IO_PHYS_ADDR + 0x004000000, 301 - PCMCIA_IO_PHYS_ADDR + 0x004010000 - 1, 302 - DB1XXX_PCMCIA_CARD1, 303 - DB1XXX_PCMCIA_CD1, 304 - /*DB1XXX_PCMCIA_STSCHG1*/0, 305 - 0, 306 - 1); 143 + db1x_register_pcmcia_socket( 144 + AU1000_PCMCIA_ATTR_PHYS_ADDR + 0x004000000, 145 + AU1000_PCMCIA_ATTR_PHYS_ADDR + 0x004400000 - 1, 146 + AU1000_PCMCIA_MEM_PHYS_ADDR + 0x004000000, 147 + AU1000_PCMCIA_MEM_PHYS_ADDR + 0x004400000 - 1, 148 + AU1000_PCMCIA_IO_PHYS_ADDR + 0x004000000, 149 + AU1000_PCMCIA_IO_PHYS_ADDR + 0x004010000 - 1, 150 + DB1XXX_PCMCIA_CARD1, DB1XXX_PCMCIA_CD1, 151 + /*DB1XXX_PCMCIA_STSCHG1*/0, 0, 1); 152 + #endif 153 + #ifdef CONFIG_MIPS_DB1100 154 + platform_device_register(&au1100_lcd_device); 307 155 #endif 308 156 db1x_register_norflash(BOARD_FLASH_SIZE, BOARD_FLASH_WIDTH, F_SWAPPED); 309 157
+38 -11
arch/mips/alchemy/devboards/pb1100/platform.c
··· 19 19 */ 20 20 21 21 #include <linux/init.h> 22 + #include <linux/dma-mapping.h> 23 + #include <linux/platform_device.h> 22 24 23 25 #include <asm/mach-au1x00/au1000.h> 24 26 #include <asm/mach-db1x00/bcsr.h> 25 27 26 28 #include "../platform.h" 27 29 30 + static struct resource au1100_lcd_resources[] = { 31 + [0] = { 32 + .start = AU1100_LCD_PHYS_ADDR, 33 + .end = AU1100_LCD_PHYS_ADDR + 0x800 - 1, 34 + .flags = IORESOURCE_MEM, 35 + }, 36 + [1] = { 37 + .start = AU1100_LCD_INT, 38 + .end = AU1100_LCD_INT, 39 + .flags = IORESOURCE_IRQ, 40 + } 41 + }; 42 + 43 + static u64 au1100_lcd_dmamask = DMA_BIT_MASK(32); 44 + 45 + static struct platform_device au1100_lcd_device = { 46 + .name = "au1100-lcd", 47 + .id = 0, 48 + .dev = { 49 + .dma_mask = &au1100_lcd_dmamask, 50 + .coherent_dma_mask = DMA_BIT_MASK(32), 51 + }, 52 + .num_resources = ARRAY_SIZE(au1100_lcd_resources), 53 + .resource = au1100_lcd_resources, 54 + }; 55 + 28 56 static int __init pb1100_dev_init(void) 29 57 { 30 58 int swapped; 31 59 32 60 /* PCMCIA. single socket, identical to Pb1500 */ 33 - db1x_register_pcmcia_socket(PCMCIA_ATTR_PHYS_ADDR, 34 - PCMCIA_ATTR_PHYS_ADDR + 0x000400000 - 1, 35 - PCMCIA_MEM_PHYS_ADDR, 36 - PCMCIA_MEM_PHYS_ADDR + 0x000400000 - 1, 37 - PCMCIA_IO_PHYS_ADDR, 38 - PCMCIA_IO_PHYS_ADDR + 0x000010000 - 1, 39 - AU1100_GPIO11_INT, /* card */ 40 - AU1100_GPIO9_INT, /* insert */ 41 - /*AU1100_GPIO10_INT*/0, /* stschg */ 42 - 0, /* eject */ 43 - 0); /* id */ 61 + db1x_register_pcmcia_socket( 62 + AU1000_PCMCIA_ATTR_PHYS_ADDR, 63 + AU1000_PCMCIA_ATTR_PHYS_ADDR + 0x000400000 - 1, 64 + AU1000_PCMCIA_MEM_PHYS_ADDR, 65 + AU1000_PCMCIA_MEM_PHYS_ADDR + 0x000400000 - 1, 66 + AU1000_PCMCIA_IO_PHYS_ADDR, 67 + AU1000_PCMCIA_IO_PHYS_ADDR + 0x000010000 - 1, 68 + AU1100_GPIO11_INT, AU1100_GPIO9_INT, /* card / insert */ 69 + /*AU1100_GPIO10_INT*/0, 0, 0); /* stschg / eject / id */ 44 70 45 71 swapped = bcsr_read(BCSR_STATUS) & BCSR_STATUS_DB1000_SWAPBOOT; 46 72 db1x_register_norflash(64 * 1024 * 1024, 4, swapped); 73 + platform_device_register(&au1100_lcd_device); 47 74 48 75 return 0; 49 76 }
+162 -26
arch/mips/alchemy/devboards/pb1200/platform.c
··· 24 24 #include <linux/platform_device.h> 25 25 #include <linux/smc91x.h> 26 26 27 - #include <asm/mach-au1x00/au1xxx.h> 27 + #include <asm/mach-au1x00/au1000.h> 28 28 #include <asm/mach-au1x00/au1100_mmc.h> 29 + #include <asm/mach-au1x00/au1xxx_dbdma.h> 29 30 #include <asm/mach-db1x00/bcsr.h> 31 + #include <asm/mach-pb1x00/pb1200.h> 30 32 31 33 #include "../platform.h" 32 34 ··· 90 88 return (bcsr_read(BCSR_SIGSTAT) & BCSR_INT_SD1INSERT) ? 1 : 0; 91 89 } 92 90 93 - const struct au1xmmc_platform_data au1xmmc_platdata[2] = { 91 + static struct au1xmmc_platform_data pb1200mmc_platdata[2] = { 94 92 [0] = { 95 93 .set_power = pb1200mmc0_set_power, 96 94 .card_inserted = pb1200mmc0_card_inserted, ··· 107 105 }, 108 106 }; 109 107 108 + static u64 au1xxx_mmc_dmamask = DMA_BIT_MASK(32); 109 + 110 + static struct resource au1200_mmc0_res[] = { 111 + [0] = { 112 + .start = AU1100_SD0_PHYS_ADDR, 113 + .end = AU1100_SD0_PHYS_ADDR + 0xfff, 114 + .flags = IORESOURCE_MEM, 115 + }, 116 + [1] = { 117 + .start = AU1200_SD_INT, 118 + .end = AU1200_SD_INT, 119 + .flags = IORESOURCE_IRQ, 120 + }, 121 + [2] = { 122 + .start = AU1200_DSCR_CMD0_SDMS_TX0, 123 + .end = AU1200_DSCR_CMD0_SDMS_TX0, 124 + .flags = IORESOURCE_DMA, 125 + }, 126 + [3] = { 127 + .start = AU1200_DSCR_CMD0_SDMS_RX0, 128 + .end = AU1200_DSCR_CMD0_SDMS_RX0, 129 + .flags = IORESOURCE_DMA, 130 + } 131 + }; 132 + 133 + static struct platform_device pb1200_mmc0_dev = { 134 + .name = "au1xxx-mmc", 135 + .id = 0, 136 + .dev = { 137 + .dma_mask = &au1xxx_mmc_dmamask, 138 + .coherent_dma_mask = DMA_BIT_MASK(32), 139 + .platform_data = &pb1200mmc_platdata[0], 140 + }, 141 + .num_resources = ARRAY_SIZE(au1200_mmc0_res), 142 + .resource = au1200_mmc0_res, 143 + }; 144 + 145 + static struct resource au1200_mmc1_res[] = { 146 + [0] = { 147 + .start = AU1100_SD1_PHYS_ADDR, 148 + .end = AU1100_SD1_PHYS_ADDR + 0xfff, 149 + .flags = IORESOURCE_MEM, 150 + }, 151 + [1] = { 152 + .start = AU1200_SD_INT, 153 + .end = AU1200_SD_INT, 154 + .flags = IORESOURCE_IRQ, 155 + }, 156 + [2] = { 157 + .start = AU1200_DSCR_CMD0_SDMS_TX1, 158 + .end = AU1200_DSCR_CMD0_SDMS_TX1, 159 + .flags = IORESOURCE_DMA, 160 + }, 161 + [3] = { 162 + .start = AU1200_DSCR_CMD0_SDMS_RX1, 163 + .end = AU1200_DSCR_CMD0_SDMS_RX1, 164 + .flags = IORESOURCE_DMA, 165 + } 166 + }; 167 + 168 + static struct platform_device pb1200_mmc1_dev = { 169 + .name = "au1xxx-mmc", 170 + .id = 1, 171 + .dev = { 172 + .dma_mask = &au1xxx_mmc_dmamask, 173 + .coherent_dma_mask = DMA_BIT_MASK(32), 174 + .platform_data = &pb1200mmc_platdata[1], 175 + }, 176 + .num_resources = ARRAY_SIZE(au1200_mmc1_res), 177 + .resource = au1200_mmc1_res, 178 + }; 179 + 180 + 110 181 static struct resource ide_resources[] = { 111 182 [0] = { 112 183 .start = IDE_PHYS_ADDR, ··· 190 115 .start = IDE_INT, 191 116 .end = IDE_INT, 192 117 .flags = IORESOURCE_IRQ 193 - } 118 + }, 119 + [2] = { 120 + .start = AU1200_DSCR_CMD0_DMA_REQ1, 121 + .end = AU1200_DSCR_CMD0_DMA_REQ1, 122 + .flags = IORESOURCE_DMA, 123 + }, 194 124 }; 195 125 196 126 static u64 ide_dmamask = DMA_BIT_MASK(32); ··· 241 161 .resource = smc91c111_resources 242 162 }; 243 163 164 + static struct resource au1200_psc0_res[] = { 165 + [0] = { 166 + .start = AU1550_PSC0_PHYS_ADDR, 167 + .end = AU1550_PSC0_PHYS_ADDR + 0xfff, 168 + .flags = IORESOURCE_MEM, 169 + }, 170 + [1] = { 171 + .start = AU1200_PSC0_INT, 172 + .end = AU1200_PSC0_INT, 173 + .flags = IORESOURCE_IRQ, 174 + }, 175 + [2] = { 176 + .start = AU1200_DSCR_CMD0_PSC0_TX, 177 + .end = AU1200_DSCR_CMD0_PSC0_TX, 178 + .flags = IORESOURCE_DMA, 179 + }, 180 + [3] = { 181 + .start = AU1200_DSCR_CMD0_PSC0_RX, 182 + .end = AU1200_DSCR_CMD0_PSC0_RX, 183 + .flags = IORESOURCE_DMA, 184 + }, 185 + }; 186 + 187 + static struct platform_device pb1200_i2c_dev = { 188 + .name = "au1xpsc_smbus", 189 + .id = 0, /* bus number */ 190 + .num_resources = ARRAY_SIZE(au1200_psc0_res), 191 + .resource = au1200_psc0_res, 192 + }; 193 + 194 + static struct resource au1200_lcd_res[] = { 195 + [0] = { 196 + .start = AU1200_LCD_PHYS_ADDR, 197 + .end = AU1200_LCD_PHYS_ADDR + 0x800 - 1, 198 + .flags = IORESOURCE_MEM, 199 + }, 200 + [1] = { 201 + .start = AU1200_LCD_INT, 202 + .end = AU1200_LCD_INT, 203 + .flags = IORESOURCE_IRQ, 204 + } 205 + }; 206 + 207 + static u64 au1200_lcd_dmamask = DMA_BIT_MASK(32); 208 + 209 + static struct platform_device au1200_lcd_dev = { 210 + .name = "au1200-lcd", 211 + .id = 0, 212 + .dev = { 213 + .dma_mask = &au1200_lcd_dmamask, 214 + .coherent_dma_mask = DMA_BIT_MASK(32), 215 + }, 216 + .num_resources = ARRAY_SIZE(au1200_lcd_res), 217 + .resource = au1200_lcd_res, 218 + }; 219 + 244 220 static struct platform_device *board_platform_devices[] __initdata = { 245 221 &ide_device, 246 - &smc91c111_device 222 + &smc91c111_device, 223 + &pb1200_i2c_dev, 224 + &pb1200_mmc0_dev, 225 + &pb1200_mmc1_dev, 226 + &au1200_lcd_dev, 247 227 }; 248 228 249 229 static int __init board_register_devices(void) 250 230 { 251 231 int swapped; 252 232 253 - db1x_register_pcmcia_socket(PCMCIA_ATTR_PHYS_ADDR, 254 - PCMCIA_ATTR_PHYS_ADDR + 0x000400000 - 1, 255 - PCMCIA_MEM_PHYS_ADDR, 256 - PCMCIA_MEM_PHYS_ADDR + 0x000400000 - 1, 257 - PCMCIA_IO_PHYS_ADDR, 258 - PCMCIA_IO_PHYS_ADDR + 0x000010000 - 1, 259 - PB1200_PC0_INT, 260 - PB1200_PC0_INSERT_INT, 261 - /*PB1200_PC0_STSCHG_INT*/0, 262 - PB1200_PC0_EJECT_INT, 263 - 0); 233 + db1x_register_pcmcia_socket( 234 + AU1000_PCMCIA_ATTR_PHYS_ADDR, 235 + AU1000_PCMCIA_ATTR_PHYS_ADDR + 0x000400000 - 1, 236 + AU1000_PCMCIA_MEM_PHYS_ADDR, 237 + AU1000_PCMCIA_MEM_PHYS_ADDR + 0x000400000 - 1, 238 + AU1000_PCMCIA_IO_PHYS_ADDR, 239 + AU1000_PCMCIA_IO_PHYS_ADDR + 0x000010000 - 1, 240 + PB1200_PC0_INT, PB1200_PC0_INSERT_INT, 241 + /*PB1200_PC0_STSCHG_INT*/0, PB1200_PC0_EJECT_INT, 0); 264 242 265 - db1x_register_pcmcia_socket(PCMCIA_ATTR_PHYS_ADDR + 0x008000000, 266 - PCMCIA_ATTR_PHYS_ADDR + 0x008400000 - 1, 267 - PCMCIA_MEM_PHYS_ADDR + 0x008000000, 268 - PCMCIA_MEM_PHYS_ADDR + 0x008400000 - 1, 269 - PCMCIA_IO_PHYS_ADDR + 0x008000000, 270 - PCMCIA_IO_PHYS_ADDR + 0x008010000 - 1, 271 - PB1200_PC1_INT, 272 - PB1200_PC1_INSERT_INT, 273 - /*PB1200_PC1_STSCHG_INT*/0, 274 - PB1200_PC1_EJECT_INT, 275 - 1); 243 + db1x_register_pcmcia_socket( 244 + AU1000_PCMCIA_ATTR_PHYS_ADDR + 0x008000000, 245 + AU1000_PCMCIA_ATTR_PHYS_ADDR + 0x008400000 - 1, 246 + AU1000_PCMCIA_MEM_PHYS_ADDR + 0x008000000, 247 + AU1000_PCMCIA_MEM_PHYS_ADDR + 0x008400000 - 1, 248 + AU1000_PCMCIA_IO_PHYS_ADDR + 0x008000000, 249 + AU1000_PCMCIA_IO_PHYS_ADDR + 0x008010000 - 1, 250 + PB1200_PC1_INT, PB1200_PC1_INSERT_INT, 251 + /*PB1200_PC1_STSCHG_INT*/0, PB1200_PC1_EJECT_INT, 1); 276 252 277 253 swapped = bcsr_read(BCSR_STATUS) & BCSR_STATUS_DB1200_SWAPBOOT; 278 254 db1x_register_norflash(128 * 1024 * 1024, 2, swapped);
+12 -21
arch/mips/alchemy/devboards/pb1500/board_setup.c
··· 33 33 34 34 #include <prom.h> 35 35 36 - 37 - char irq_tab_alchemy[][5] __initdata = { 38 - [12] = { -1, AU1500_PCI_INTA, 0xff, 0xff, 0xff }, /* IDSEL 12 - HPT370 */ 39 - [13] = { -1, AU1500_PCI_INTA, AU1500_PCI_INTB, AU1500_PCI_INTC, AU1500_PCI_INTD }, /* IDSEL 13 - PCI slot */ 40 - }; 41 - 42 - 43 36 const char *get_system_type(void) 44 37 { 45 38 return "Alchemy Pb1500"; ··· 94 101 #endif /* defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) */ 95 102 96 103 #ifdef CONFIG_PCI 97 - /* Setup PCI bus controller */ 98 - au_writel(0, Au1500_PCI_CMEM); 99 - au_writel(0x00003fff, Au1500_CFG_BASE); 100 - #if defined(__MIPSEB__) 101 - au_writel(0xf | (2 << 6) | (1 << 4), Au1500_PCI_CFG); 102 - #else 103 - au_writel(0xf, Au1500_PCI_CFG); 104 - #endif 105 - au_writel(0xf0000000, Au1500_PCI_MWMASK_DEV); 106 - au_writel(0, Au1500_PCI_MWBASE_REV_CCL); 107 - au_writel(0x02a00356, Au1500_PCI_STATCMD); 108 - au_writel(0x00003c04, Au1500_PCI_HDRTYPE); 109 - au_writel(0x00000008, Au1500_PCI_MBAR); 110 - au_sync(); 104 + { 105 + void __iomem *base = 106 + (void __iomem *)KSEG1ADDR(AU1500_PCI_PHYS_ADDR); 107 + /* Setup PCI bus controller */ 108 + __raw_writel(0x00003fff, base + PCI_REG_CMEM); 109 + __raw_writel(0xf0000000, base + PCI_REG_MWMASK_DEV); 110 + __raw_writel(0, base + PCI_REG_MWBASE_REV_CCL); 111 + __raw_writel(0x02a00356, base + PCI_REG_STATCMD); 112 + __raw_writel(0x00003c04, base + PCI_REG_PARAM); 113 + __raw_writel(0x00000008, base + PCI_REG_MBAR); 114 + wmb(); 115 + } 111 116 #endif 112 117 113 118 /* Enable sys bus clock divider when IDLE state or no bus activity. */
+58 -13
arch/mips/alchemy/devboards/pb1500/platform.c
··· 18 18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 19 19 */ 20 20 21 + #include <linux/dma-mapping.h> 21 22 #include <linux/init.h> 23 + #include <linux/platform_device.h> 22 24 #include <asm/mach-au1x00/au1000.h> 23 25 #include <asm/mach-db1x00/bcsr.h> 24 26 25 27 #include "../platform.h" 26 28 29 + static int pb1500_map_pci_irq(const struct pci_dev *d, u8 slot, u8 pin) 30 + { 31 + if ((slot < 12) || (slot > 13) || pin == 0) 32 + return -1; 33 + if (slot == 12) 34 + return (pin == 1) ? AU1500_PCI_INTA : 0xff; 35 + if (slot == 13) { 36 + switch (pin) { 37 + case 1: return AU1500_PCI_INTA; 38 + case 2: return AU1500_PCI_INTB; 39 + case 3: return AU1500_PCI_INTC; 40 + case 4: return AU1500_PCI_INTD; 41 + } 42 + } 43 + return -1; 44 + } 45 + 46 + static struct resource alchemy_pci_host_res[] = { 47 + [0] = { 48 + .start = AU1500_PCI_PHYS_ADDR, 49 + .end = AU1500_PCI_PHYS_ADDR + 0xfff, 50 + .flags = IORESOURCE_MEM, 51 + }, 52 + }; 53 + 54 + static struct alchemy_pci_platdata pb1500_pci_pd = { 55 + .board_map_irq = pb1500_map_pci_irq, 56 + .pci_cfg_set = PCI_CONFIG_AEN | PCI_CONFIG_R2H | PCI_CONFIG_R1H | 57 + PCI_CONFIG_CH | 58 + #if defined(__MIPSEB__) 59 + PCI_CONFIG_SIC_HWA_DAT | PCI_CONFIG_SM, 60 + #else 61 + 0, 62 + #endif 63 + }; 64 + 65 + static struct platform_device pb1500_pci_host = { 66 + .dev.platform_data = &pb1500_pci_pd, 67 + .name = "alchemy-pci", 68 + .id = 0, 69 + .num_resources = ARRAY_SIZE(alchemy_pci_host_res), 70 + .resource = alchemy_pci_host_res, 71 + }; 72 + 27 73 static int __init pb1500_dev_init(void) 28 74 { 29 75 int swapped; 30 76 31 - /* PCMCIA. single socket, identical to Pb1500 */ 32 - db1x_register_pcmcia_socket(PCMCIA_ATTR_PHYS_ADDR, 33 - PCMCIA_ATTR_PHYS_ADDR + 0x000400000 - 1, 34 - PCMCIA_MEM_PHYS_ADDR, 35 - PCMCIA_MEM_PHYS_ADDR + 0x000400000 - 1, 36 - PCMCIA_IO_PHYS_ADDR, 37 - PCMCIA_IO_PHYS_ADDR + 0x000010000 - 1, 38 - AU1500_GPIO11_INT, /* card */ 39 - AU1500_GPIO9_INT, /* insert */ 40 - /*AU1500_GPIO10_INT*/0, /* stschg */ 41 - 0, /* eject */ 42 - 0); /* id */ 77 + /* PCMCIA. single socket, identical to Pb1100 */ 78 + db1x_register_pcmcia_socket( 79 + AU1000_PCMCIA_ATTR_PHYS_ADDR, 80 + AU1000_PCMCIA_ATTR_PHYS_ADDR + 0x000400000 - 1, 81 + AU1000_PCMCIA_MEM_PHYS_ADDR, 82 + AU1000_PCMCIA_MEM_PHYS_ADDR + 0x000400000 - 1, 83 + AU1000_PCMCIA_IO_PHYS_ADDR, 84 + AU1000_PCMCIA_IO_PHYS_ADDR + 0x000010000 - 1, 85 + AU1500_GPIO11_INT, AU1500_GPIO9_INT, /* card / insert */ 86 + /*AU1500_GPIO10_INT*/0, 0, 0); /* stschg / eject / id */ 43 87 44 88 swapped = bcsr_read(BCSR_STATUS) & BCSR_STATUS_DB1000_SWAPBOOT; 45 89 db1x_register_norflash(64 * 1024 * 1024, 4, swapped); 90 + platform_device_register(&pb1500_pci_host); 46 91 47 92 return 0; 48 93 } 49 - device_initcall(pb1500_dev_init); 94 + arch_initcall(pb1500_dev_init);
-6
arch/mips/alchemy/devboards/pb1550/board_setup.c
··· 37 37 38 38 #include <prom.h> 39 39 40 - 41 - char irq_tab_alchemy[][5] __initdata = { 42 - [12] = { -1, AU1550_PCI_INTB, AU1550_PCI_INTC, AU1550_PCI_INTD, AU1550_PCI_INTA }, /* IDSEL 12 - PCI slot 2 (left) */ 43 - [13] = { -1, AU1550_PCI_INTA, AU1550_PCI_INTB, AU1550_PCI_INTC, AU1550_PCI_INTD }, /* IDSEL 13 - PCI slot 1 (right) */ 44 - }; 45 - 46 40 const char *get_system_type(void) 47 41 { 48 42 return "Alchemy Pb1550";
+95 -24
arch/mips/alchemy/devboards/pb1550/platform.c
··· 18 18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 19 19 */ 20 20 21 + #include <linux/dma-mapping.h> 21 22 #include <linux/init.h> 22 - 23 + #include <linux/platform_device.h> 23 24 #include <asm/mach-au1x00/au1000.h> 25 + #include <asm/mach-au1x00/au1xxx_dbdma.h> 24 26 #include <asm/mach-pb1x00/pb1550.h> 25 27 #include <asm/mach-db1x00/bcsr.h> 26 28 27 29 #include "../platform.h" 30 + 31 + static int pb1550_map_pci_irq(const struct pci_dev *d, u8 slot, u8 pin) 32 + { 33 + if ((slot < 12) || (slot > 13) || pin == 0) 34 + return -1; 35 + if (slot == 12) { 36 + switch (pin) { 37 + case 1: return AU1500_PCI_INTB; 38 + case 2: return AU1500_PCI_INTC; 39 + case 3: return AU1500_PCI_INTD; 40 + case 4: return AU1500_PCI_INTA; 41 + } 42 + } 43 + if (slot == 13) { 44 + switch (pin) { 45 + case 1: return AU1500_PCI_INTA; 46 + case 2: return AU1500_PCI_INTB; 47 + case 3: return AU1500_PCI_INTC; 48 + case 4: return AU1500_PCI_INTD; 49 + } 50 + } 51 + return -1; 52 + } 53 + 54 + static struct resource alchemy_pci_host_res[] = { 55 + [0] = { 56 + .start = AU1500_PCI_PHYS_ADDR, 57 + .end = AU1500_PCI_PHYS_ADDR + 0xfff, 58 + .flags = IORESOURCE_MEM, 59 + }, 60 + }; 61 + 62 + static struct alchemy_pci_platdata pb1550_pci_pd = { 63 + .board_map_irq = pb1550_map_pci_irq, 64 + }; 65 + 66 + static struct platform_device pb1550_pci_host = { 67 + .dev.platform_data = &pb1550_pci_pd, 68 + .name = "alchemy-pci", 69 + .id = 0, 70 + .num_resources = ARRAY_SIZE(alchemy_pci_host_res), 71 + .resource = alchemy_pci_host_res, 72 + }; 73 + 74 + static struct resource au1550_psc2_res[] = { 75 + [0] = { 76 + .start = AU1550_PSC2_PHYS_ADDR, 77 + .end = AU1550_PSC2_PHYS_ADDR + 0xfff, 78 + .flags = IORESOURCE_MEM, 79 + }, 80 + [1] = { 81 + .start = AU1550_PSC2_INT, 82 + .end = AU1550_PSC2_INT, 83 + .flags = IORESOURCE_IRQ, 84 + }, 85 + [2] = { 86 + .start = AU1550_DSCR_CMD0_PSC2_TX, 87 + .end = AU1550_DSCR_CMD0_PSC2_TX, 88 + .flags = IORESOURCE_DMA, 89 + }, 90 + [3] = { 91 + .start = AU1550_DSCR_CMD0_PSC2_RX, 92 + .end = AU1550_DSCR_CMD0_PSC2_RX, 93 + .flags = IORESOURCE_DMA, 94 + }, 95 + }; 96 + 97 + static struct platform_device pb1550_i2c_dev = { 98 + .name = "au1xpsc_smbus", 99 + .id = 0, /* bus number */ 100 + .num_resources = ARRAY_SIZE(au1550_psc2_res), 101 + .resource = au1550_psc2_res, 102 + }; 28 103 29 104 static int __init pb1550_dev_init(void) 30 105 { ··· 112 37 * drivers are used to shared irqs and b) statuschange isn't really use- 113 38 * ful anyway. 114 39 */ 115 - db1x_register_pcmcia_socket(PCMCIA_ATTR_PHYS_ADDR, 116 - PCMCIA_ATTR_PHYS_ADDR + 0x000400000 - 1, 117 - PCMCIA_MEM_PHYS_ADDR, 118 - PCMCIA_MEM_PHYS_ADDR + 0x000400000 - 1, 119 - PCMCIA_IO_PHYS_ADDR, 120 - PCMCIA_IO_PHYS_ADDR + 0x000010000 - 1, 121 - AU1550_GPIO201_205_INT, 122 - AU1550_GPIO0_INT, 123 - 0, 124 - 0, 125 - 0); 40 + db1x_register_pcmcia_socket( 41 + AU1000_PCMCIA_ATTR_PHYS_ADDR, 42 + AU1000_PCMCIA_ATTR_PHYS_ADDR + 0x000400000 - 1, 43 + AU1000_PCMCIA_MEM_PHYS_ADDR, 44 + AU1000_PCMCIA_MEM_PHYS_ADDR + 0x000400000 - 1, 45 + AU1000_PCMCIA_IO_PHYS_ADDR, 46 + AU1000_PCMCIA_IO_PHYS_ADDR + 0x000010000 - 1, 47 + AU1550_GPIO201_205_INT, AU1550_GPIO0_INT, 0, 0, 0); 126 48 127 - db1x_register_pcmcia_socket(PCMCIA_ATTR_PHYS_ADDR + 0x008000000, 128 - PCMCIA_ATTR_PHYS_ADDR + 0x008400000 - 1, 129 - PCMCIA_MEM_PHYS_ADDR + 0x008000000, 130 - PCMCIA_MEM_PHYS_ADDR + 0x008400000 - 1, 131 - PCMCIA_IO_PHYS_ADDR + 0x008000000, 132 - PCMCIA_IO_PHYS_ADDR + 0x008010000 - 1, 133 - AU1550_GPIO201_205_INT, 134 - AU1550_GPIO1_INT, 135 - 0, 136 - 0, 137 - 1); 49 + db1x_register_pcmcia_socket( 50 + AU1000_PCMCIA_ATTR_PHYS_ADDR + 0x008000000, 51 + AU1000_PCMCIA_ATTR_PHYS_ADDR + 0x008400000 - 1, 52 + AU1000_PCMCIA_MEM_PHYS_ADDR + 0x008000000, 53 + AU1000_PCMCIA_MEM_PHYS_ADDR + 0x008400000 - 1, 54 + AU1000_PCMCIA_IO_PHYS_ADDR + 0x008000000, 55 + AU1000_PCMCIA_IO_PHYS_ADDR + 0x008010000 - 1, 56 + AU1550_GPIO201_205_INT, AU1550_GPIO1_INT, 0, 0, 1); 138 57 139 58 swapped = bcsr_read(BCSR_STATUS) & BCSR_STATUS_PB1550_SWAPBOOT; 140 59 db1x_register_norflash(128 * 1024 * 1024, 4, swapped); 60 + platform_device_register(&pb1550_pci_host); 61 + platform_device_register(&pb1550_i2c_dev); 141 62 142 63 return 0; 143 64 } 144 - device_initcall(pb1550_dev_init); 65 + arch_initcall(pb1550_dev_init);
-12
arch/mips/alchemy/gpr/board_setup.c
··· 36 36 37 37 #include <prom.h> 38 38 39 - char irq_tab_alchemy[][5] __initdata = { 40 - [0] = { -1, AU1500_PCI_INTA, AU1500_PCI_INTB, 0xff, 0xff }, 41 - }; 42 - 43 39 static void gpr_reset(char *c) 44 40 { 45 41 /* switch System-LED to orange (red# and green# on) */ ··· 72 76 73 77 /* Take away Reset of UMTS-card */ 74 78 alchemy_gpio_direction_output(215, 1); 75 - 76 - #ifdef CONFIG_PCI 77 - #if defined(__MIPSEB__) 78 - au_writel(0xf | (2 << 6) | (1 << 4), Au1500_PCI_CFG); 79 - #else 80 - au_writel(0xf, Au1500_PCI_CFG); 81 - #endif 82 - #endif 83 79 }
+47
arch/mips/alchemy/gpr/platform.c
··· 167 167 } 168 168 }; 169 169 170 + 171 + 172 + static struct resource alchemy_pci_host_res[] = { 173 + [0] = { 174 + .start = AU1500_PCI_PHYS_ADDR, 175 + .end = AU1500_PCI_PHYS_ADDR + 0xfff, 176 + .flags = IORESOURCE_MEM, 177 + }, 178 + }; 179 + 180 + static int gpr_map_pci_irq(const struct pci_dev *d, u8 slot, u8 pin) 181 + { 182 + if ((slot == 0) && (pin == 1)) 183 + return AU1550_PCI_INTA; 184 + else if ((slot == 0) && (pin == 2)) 185 + return AU1550_PCI_INTB; 186 + 187 + return -1; 188 + } 189 + 190 + static struct alchemy_pci_platdata gpr_pci_pd = { 191 + .board_map_irq = gpr_map_pci_irq, 192 + .pci_cfg_set = PCI_CONFIG_AEN | PCI_CONFIG_R2H | PCI_CONFIG_R1H | 193 + PCI_CONFIG_CH | 194 + #if defined(__MIPSEB__) 195 + PCI_CONFIG_SIC_HWA_DAT | PCI_CONFIG_SM, 196 + #else 197 + 0, 198 + #endif 199 + }; 200 + 201 + static struct platform_device gpr_pci_host_dev = { 202 + .dev.platform_data = &gpr_pci_pd, 203 + .name = "alchemy-pci", 204 + .id = 0, 205 + .num_resources = ARRAY_SIZE(alchemy_pci_host_res), 206 + .resource = alchemy_pci_host_res, 207 + }; 208 + 170 209 static struct platform_device *gpr_devices[] __initdata = { 171 210 &gpr_wdt_device, 172 211 &gpr_mtd_device, 173 212 &gpr_i2c_device, 174 213 &gpr_led_devices, 175 214 }; 215 + 216 + static int __init gpr_pci_init(void) 217 + { 218 + return platform_device_register(&gpr_pci_host_dev); 219 + } 220 + /* must be arch_initcall; MIPS PCI scans busses in a subsys_initcall */ 221 + arch_initcall(gpr_pci_init); 222 + 176 223 177 224 static int __init gpr_dev_init(void) 178 225 {
-40
arch/mips/alchemy/mtx-1/board_setup.c
··· 38 38 39 39 #include <prom.h> 40 40 41 - char irq_tab_alchemy[][5] __initdata = { 42 - [0] = { -1, AU1500_PCI_INTA, AU1500_PCI_INTA, 0xff, 0xff }, /* IDSEL 00 - AdapterA-Slot0 (top) */ 43 - [1] = { -1, AU1500_PCI_INTB, AU1500_PCI_INTA, 0xff, 0xff }, /* IDSEL 01 - AdapterA-Slot1 (bottom) */ 44 - [2] = { -1, AU1500_PCI_INTC, AU1500_PCI_INTD, 0xff, 0xff }, /* IDSEL 02 - AdapterB-Slot0 (top) */ 45 - [3] = { -1, AU1500_PCI_INTD, AU1500_PCI_INTC, 0xff, 0xff }, /* IDSEL 03 - AdapterB-Slot1 (bottom) */ 46 - [4] = { -1, AU1500_PCI_INTA, AU1500_PCI_INTB, 0xff, 0xff }, /* IDSEL 04 - AdapterC-Slot0 (top) */ 47 - [5] = { -1, AU1500_PCI_INTB, AU1500_PCI_INTA, 0xff, 0xff }, /* IDSEL 05 - AdapterC-Slot1 (bottom) */ 48 - [6] = { -1, AU1500_PCI_INTC, AU1500_PCI_INTD, 0xff, 0xff }, /* IDSEL 06 - AdapterD-Slot0 (top) */ 49 - [7] = { -1, AU1500_PCI_INTD, AU1500_PCI_INTC, 0xff, 0xff }, /* IDSEL 07 - AdapterD-Slot1 (bottom) */ 50 - }; 51 - 52 - extern int (*board_pci_idsel)(unsigned int devsel, int assert); 53 - int mtx1_pci_idsel(unsigned int devsel, int assert); 54 - 55 41 static void mtx1_reset(char *c) 56 42 { 57 43 /* Jump to the reset vector */ ··· 60 74 alchemy_gpio_direction_output(204, 0); 61 75 #endif /* defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) */ 62 76 63 - #ifdef CONFIG_PCI 64 - #if defined(__MIPSEB__) 65 - au_writel(0xf | (2 << 6) | (1 << 4), Au1500_PCI_CFG); 66 - #else 67 - au_writel(0xf, Au1500_PCI_CFG); 68 - #endif 69 - board_pci_idsel = mtx1_pci_idsel; 70 - #endif 71 - 72 77 /* Initialize sys_pinfunc */ 73 78 au_writel(SYS_PF_NI2, SYS_PINFUNC); 74 79 ··· 79 102 _machine_restart = mtx1_reset; 80 103 81 104 printk(KERN_INFO "4G Systems MTX-1 Board\n"); 82 - } 83 - 84 - int 85 - mtx1_pci_idsel(unsigned int devsel, int assert) 86 - { 87 - /* This function is only necessary to support a proprietary Cardbus 88 - * adapter on the mtx-1 "singleboard" variant. It triggers a custom 89 - * logic chip connected to EXT_IO3 (GPIO1) to suppress IDSEL signals. 90 - */ 91 - if (assert && devsel != 0) 92 - /* Suppress signal to Cardbus */ 93 - alchemy_gpio_set_value(1, 0); /* set EXT_IO3 OFF */ 94 - else 95 - alchemy_gpio_set_value(1, 1); /* set EXT_IO3 ON */ 96 - 97 - udelay(1); 98 - return 1; 99 105 } 100 106 101 107 static int __init mtx1_init_irq(void)
+62
arch/mips/alchemy/mtx-1/platform.c
··· 135 135 .resource = &mtx1_mtd_resource, 136 136 }; 137 137 138 + static struct resource alchemy_pci_host_res[] = { 139 + [0] = { 140 + .start = AU1500_PCI_PHYS_ADDR, 141 + .end = AU1500_PCI_PHYS_ADDR + 0xfff, 142 + .flags = IORESOURCE_MEM, 143 + }, 144 + }; 145 + 146 + static int mtx1_pci_idsel(unsigned int devsel, int assert) 147 + { 148 + /* This function is only necessary to support a proprietary Cardbus 149 + * adapter on the mtx-1 "singleboard" variant. It triggers a custom 150 + * logic chip connected to EXT_IO3 (GPIO1) to suppress IDSEL signals. 151 + */ 152 + if (assert && devsel != 0) 153 + /* Suppress signal to Cardbus */ 154 + alchemy_gpio_set_value(1, 0); /* set EXT_IO3 OFF */ 155 + else 156 + alchemy_gpio_set_value(1, 1); /* set EXT_IO3 ON */ 157 + 158 + udelay(1); 159 + return 1; 160 + } 161 + 162 + static const char mtx1_irqtab[][5] = { 163 + [0] = { -1, AU1500_PCI_INTA, AU1500_PCI_INTA, 0xff, 0xff }, /* IDSEL 00 - AdapterA-Slot0 (top) */ 164 + [1] = { -1, AU1500_PCI_INTB, AU1500_PCI_INTA, 0xff, 0xff }, /* IDSEL 01 - AdapterA-Slot1 (bottom) */ 165 + [2] = { -1, AU1500_PCI_INTC, AU1500_PCI_INTD, 0xff, 0xff }, /* IDSEL 02 - AdapterB-Slot0 (top) */ 166 + [3] = { -1, AU1500_PCI_INTD, AU1500_PCI_INTC, 0xff, 0xff }, /* IDSEL 03 - AdapterB-Slot1 (bottom) */ 167 + [4] = { -1, AU1500_PCI_INTA, AU1500_PCI_INTB, 0xff, 0xff }, /* IDSEL 04 - AdapterC-Slot0 (top) */ 168 + [5] = { -1, AU1500_PCI_INTB, AU1500_PCI_INTA, 0xff, 0xff }, /* IDSEL 05 - AdapterC-Slot1 (bottom) */ 169 + [6] = { -1, AU1500_PCI_INTC, AU1500_PCI_INTD, 0xff, 0xff }, /* IDSEL 06 - AdapterD-Slot0 (top) */ 170 + [7] = { -1, AU1500_PCI_INTD, AU1500_PCI_INTC, 0xff, 0xff }, /* IDSEL 07 - AdapterD-Slot1 (bottom) */ 171 + }; 172 + 173 + static int mtx1_map_pci_irq(const struct pci_dev *d, u8 slot, u8 pin) 174 + { 175 + return mtx1_irqtab[slot][pin]; 176 + } 177 + 178 + static struct alchemy_pci_platdata mtx1_pci_pd = { 179 + .board_map_irq = mtx1_map_pci_irq, 180 + .board_pci_idsel = mtx1_pci_idsel, 181 + .pci_cfg_set = PCI_CONFIG_AEN | PCI_CONFIG_R2H | PCI_CONFIG_R1H | 182 + PCI_CONFIG_CH | 183 + #if defined(__MIPSEB__) 184 + PCI_CONFIG_SIC_HWA_DAT | PCI_CONFIG_SM, 185 + #else 186 + 0, 187 + #endif 188 + }; 189 + 190 + static struct platform_device mtx1_pci_host = { 191 + .dev.platform_data = &mtx1_pci_pd, 192 + .name = "alchemy-pci", 193 + .id = 0, 194 + .num_resources = ARRAY_SIZE(alchemy_pci_host_res), 195 + .resource = alchemy_pci_host_res, 196 + }; 197 + 198 + 138 199 static struct __initdata platform_device * mtx1_devs[] = { 200 + &mtx1_pci_host, 139 201 &mtx1_gpio_leds, 140 202 &mtx1_wdt, 141 203 &mtx1_button,
-8
arch/mips/alchemy/xxs1500/board_setup.c
··· 70 70 /* Enable DTR (MCR bit 0) = USB power up */ 71 71 __raw_writel(1, (void __iomem *)KSEG1ADDR(AU1000_UART3_PHYS_ADDR + 0x18)); 72 72 wmb(); 73 - 74 - #ifdef CONFIG_PCI 75 - #if defined(__MIPSEB__) 76 - au_writel(0xf | (2 << 6) | (1 << 4), Au1500_PCI_CFG); 77 - #else 78 - au_writel(0xf, Au1500_PCI_CFG); 79 - #endif 80 - #endif 81 73 } 82 74 83 75 static int __init xxs1500_init_irq(void)
+6 -6
arch/mips/alchemy/xxs1500/platform.c
··· 27 27 { 28 28 .name = "pcmcia-io", 29 29 .flags = IORESOURCE_MEM, 30 - .start = PCMCIA_IO_PHYS_ADDR, 31 - .end = PCMCIA_IO_PHYS_ADDR + 0x000400000 - 1, 30 + .start = AU1000_PCMCIA_IO_PHYS_ADDR, 31 + .end = AU1000_PCMCIA_IO_PHYS_ADDR + 0x000400000 - 1, 32 32 }, 33 33 { 34 34 .name = "pcmcia-attr", 35 35 .flags = IORESOURCE_MEM, 36 - .start = PCMCIA_ATTR_PHYS_ADDR, 37 - .end = PCMCIA_ATTR_PHYS_ADDR + 0x000400000 - 1, 36 + .start = AU1000_PCMCIA_ATTR_PHYS_ADDR, 37 + .end = AU1000_PCMCIA_ATTR_PHYS_ADDR + 0x000400000 - 1, 38 38 }, 39 39 { 40 40 .name = "pcmcia-mem", 41 41 .flags = IORESOURCE_MEM, 42 - .start = PCMCIA_MEM_PHYS_ADDR, 43 - .end = PCMCIA_MEM_PHYS_ADDR + 0x000400000 - 1, 42 + .start = AU1000_PCMCIA_MEM_PHYS_ADDR, 43 + .end = AU1000_PCMCIA_MEM_PHYS_ADDR + 0x000400000 - 1, 44 44 }, 45 45 }; 46 46
+24
arch/mips/include/asm/cacheflush.h
··· 114 114 extern void *kmap_coherent(struct page *page, unsigned long addr); 115 115 extern void kunmap_coherent(void); 116 116 117 + #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE 118 + static inline void flush_kernel_dcache_page(struct page *page) 119 + { 120 + BUG_ON(cpu_has_dc_aliases && PageHighMem(page)); 121 + } 122 + 123 + /* 124 + * For now flush_kernel_vmap_range and invalidate_kernel_vmap_range both do a 125 + * cache writeback and invalidate operation. 126 + */ 127 + extern void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size); 128 + 129 + static inline void flush_kernel_vmap_range(void *vaddr, int size) 130 + { 131 + if (cpu_has_dc_aliases) 132 + __flush_kernel_vmap_range((unsigned long) vaddr, size); 133 + } 134 + 135 + static inline void invalidate_kernel_vmap_range(void *vaddr, int size) 136 + { 137 + if (cpu_has_dc_aliases) 138 + __flush_kernel_vmap_range((unsigned long) vaddr, size); 139 + } 140 + 117 141 #endif /* _ASM_CACHEFLUSH_H */
+3
arch/mips/include/asm/cpu.h
··· 135 135 #define PRID_IMP_CAVIUM_CN50XX 0x0600 136 136 #define PRID_IMP_CAVIUM_CN52XX 0x0700 137 137 #define PRID_IMP_CAVIUM_CN63XX 0x9000 138 + #define PRID_IMP_CAVIUM_CN68XX 0x9100 139 + #define PRID_IMP_CAVIUM_CN66XX 0x9200 140 + #define PRID_IMP_CAVIUM_CN61XX 0x9300 138 141 139 142 /* 140 143 * These are the PRID's for when 23:16 == PRID_COMP_INGENIC
+2 -10
arch/mips/include/asm/io.h
··· 329 329 "dsrl32 %L0, %L0, 0" "\n\t" \ 330 330 "dsll32 %M0, %M0, 0" "\n\t" \ 331 331 "or %L0, %L0, %M0" "\n\t" \ 332 - ".set push" "\n\t" \ 333 - ".set noreorder" "\n\t" \ 334 - ".set nomacro" "\n\t" \ 335 332 "sd %L0, %2" "\n\t" \ 336 - ".set pop" "\n\t" \ 337 333 ".set mips0" "\n" \ 338 334 : "=r" (__tmp) \ 339 - : "0" (__val), "R" (*__mem)); \ 335 + : "0" (__val), "m" (*__mem)); \ 340 336 if (irq) \ 341 337 local_irq_restore(__flags); \ 342 338 } else \ ··· 355 359 local_irq_save(__flags); \ 356 360 __asm__ __volatile__( \ 357 361 ".set mips3" "\t\t# __readq" "\n\t" \ 358 - ".set push" "\n\t" \ 359 - ".set noreorder" "\n\t" \ 360 - ".set nomacro" "\n\t" \ 361 362 "ld %L0, %1" "\n\t" \ 362 - ".set pop" "\n\t" \ 363 363 "dsra32 %M0, %L0, 0" "\n\t" \ 364 364 "sll %L0, %L0, 0" "\n\t" \ 365 365 ".set mips0" "\n" \ 366 366 : "=r" (__val) \ 367 - : "R" (*__mem)); \ 367 + : "m" (*__mem)); \ 368 368 if (irq) \ 369 369 local_irq_restore(__flags); \ 370 370 } else { \
+210 -349
arch/mips/include/asm/mach-au1x00/au1000.h
··· 245 245 void alchemy_sleep_au1550(void); 246 246 void au_sleep(void); 247 247 248 + /* USB: drivers/usb/host/alchemy-common.c */ 249 + enum alchemy_usb_block { 250 + ALCHEMY_USB_OHCI0, 251 + ALCHEMY_USB_UDC0, 252 + ALCHEMY_USB_EHCI0, 253 + ALCHEMY_USB_OTG0, 254 + }; 255 + int alchemy_usb_control(int block, int enable); 256 + 257 + /* PCI controller platform data */ 258 + struct alchemy_pci_platdata { 259 + int (*board_map_irq)(const struct pci_dev *d, u8 slot, u8 pin); 260 + int (*board_pci_idsel)(unsigned int devsel, int assert); 261 + /* bits to set/clear in PCI_CONFIG register */ 262 + unsigned long pci_cfg_set; 263 + unsigned long pci_cfg_clr; 264 + }; 248 265 249 266 /* SOC Interrupt numbers */ 250 267 ··· 592 575 #endif /* !defined (_LANGUAGE_ASSEMBLY) */ 593 576 594 577 /* 595 - * SDRAM register offsets 578 + * Physical base addresses for integrated peripherals 579 + * 0..au1000 1..au1500 2..au1100 3..au1550 4..au1200 596 580 */ 597 - #if defined(CONFIG_SOC_AU1000) || defined(CONFIG_SOC_AU1500) || \ 598 - defined(CONFIG_SOC_AU1100) 599 - #define MEM_SDMODE0 0x0000 600 - #define MEM_SDMODE1 0x0004 601 - #define MEM_SDMODE2 0x0008 602 - #define MEM_SDADDR0 0x000C 603 - #define MEM_SDADDR1 0x0010 604 - #define MEM_SDADDR2 0x0014 605 - #define MEM_SDREFCFG 0x0018 606 - #define MEM_SDPRECMD 0x001C 607 - #define MEM_SDAUTOREF 0x0020 608 - #define MEM_SDWRMD0 0x0024 609 - #define MEM_SDWRMD1 0x0028 610 - #define MEM_SDWRMD2 0x002C 611 - #define MEM_SDSLEEP 0x0030 612 - #define MEM_SDSMCKE 0x0034 613 581 614 - /* 615 - * MEM_SDMODE register content definitions 616 - */ 582 + #define AU1000_AC97_PHYS_ADDR 0x10000000 /* 012 */ 583 + #define AU1000_USB_OHCI_PHYS_ADDR 0x10100000 /* 012 */ 584 + #define AU1000_USB_UDC_PHYS_ADDR 0x10200000 /* 0123 */ 585 + #define AU1000_IRDA_PHYS_ADDR 0x10300000 /* 02 */ 586 + #define AU1200_AES_PHYS_ADDR 0x10300000 /* 4 */ 587 + #define AU1000_IC0_PHYS_ADDR 0x10400000 /* 01234 */ 588 + #define AU1000_MAC0_PHYS_ADDR 0x10500000 /* 023 */ 589 + #define AU1000_MAC1_PHYS_ADDR 0x10510000 /* 023 */ 590 + #define AU1000_MACEN_PHYS_ADDR 0x10520000 /* 023 */ 591 + #define AU1100_SD0_PHYS_ADDR 0x10600000 /* 24 */ 592 + #define AU1100_SD1_PHYS_ADDR 0x10680000 /* 24 */ 593 + #define AU1550_PSC2_PHYS_ADDR 0x10A00000 /* 3 */ 594 + #define AU1550_PSC3_PHYS_ADDR 0x10B00000 /* 3 */ 595 + #define AU1000_I2S_PHYS_ADDR 0x11000000 /* 02 */ 596 + #define AU1500_MAC0_PHYS_ADDR 0x11500000 /* 1 */ 597 + #define AU1500_MAC1_PHYS_ADDR 0x11510000 /* 1 */ 598 + #define AU1500_MACEN_PHYS_ADDR 0x11520000 /* 1 */ 599 + #define AU1000_UART0_PHYS_ADDR 0x11100000 /* 01234 */ 600 + #define AU1200_SWCNT_PHYS_ADDR 0x1110010C /* 4 */ 601 + #define AU1000_UART1_PHYS_ADDR 0x11200000 /* 0234 */ 602 + #define AU1000_UART2_PHYS_ADDR 0x11300000 /* 0 */ 603 + #define AU1000_UART3_PHYS_ADDR 0x11400000 /* 0123 */ 604 + #define AU1000_SSI0_PHYS_ADDR 0x11600000 /* 02 */ 605 + #define AU1000_SSI1_PHYS_ADDR 0x11680000 /* 02 */ 606 + #define AU1500_GPIO2_PHYS_ADDR 0x11700000 /* 1234 */ 607 + #define AU1000_IC1_PHYS_ADDR 0x11800000 /* 01234 */ 608 + #define AU1000_SYS_PHYS_ADDR 0x11900000 /* 01234 */ 609 + #define AU1550_PSC0_PHYS_ADDR 0x11A00000 /* 34 */ 610 + #define AU1550_PSC1_PHYS_ADDR 0x11B00000 /* 34 */ 611 + #define AU1000_MEM_PHYS_ADDR 0x14000000 /* 01234 */ 612 + #define AU1000_STATIC_MEM_PHYS_ADDR 0x14001000 /* 01234 */ 613 + #define AU1000_DMA_PHYS_ADDR 0x14002000 /* 012 */ 614 + #define AU1550_DBDMA_PHYS_ADDR 0x14002000 /* 34 */ 615 + #define AU1550_DBDMA_CONF_PHYS_ADDR 0x14003000 /* 34 */ 616 + #define AU1000_MACDMA0_PHYS_ADDR 0x14004000 /* 0123 */ 617 + #define AU1000_MACDMA1_PHYS_ADDR 0x14004200 /* 0123 */ 618 + #define AU1200_CIM_PHYS_ADDR 0x14004000 /* 4 */ 619 + #define AU1500_PCI_PHYS_ADDR 0x14005000 /* 13 */ 620 + #define AU1550_PE_PHYS_ADDR 0x14008000 /* 3 */ 621 + #define AU1200_MAEBE_PHYS_ADDR 0x14010000 /* 4 */ 622 + #define AU1200_MAEFE_PHYS_ADDR 0x14012000 /* 4 */ 623 + #define AU1550_USB_OHCI_PHYS_ADDR 0x14020000 /* 3 */ 624 + #define AU1200_USB_CTL_PHYS_ADDR 0x14020000 /* 4 */ 625 + #define AU1200_USB_OTG_PHYS_ADDR 0x14020020 /* 4 */ 626 + #define AU1200_USB_OHCI_PHYS_ADDR 0x14020100 /* 4 */ 627 + #define AU1200_USB_EHCI_PHYS_ADDR 0x14020200 /* 4 */ 628 + #define AU1200_USB_UDC_PHYS_ADDR 0x14022000 /* 4 */ 629 + #define AU1100_LCD_PHYS_ADDR 0x15000000 /* 2 */ 630 + #define AU1200_LCD_PHYS_ADDR 0x15000000 /* 4 */ 631 + #define AU1500_PCI_MEM_PHYS_ADDR 0x400000000ULL /* 13 */ 632 + #define AU1500_PCI_IO_PHYS_ADDR 0x500000000ULL /* 13 */ 633 + #define AU1500_PCI_CONFIG0_PHYS_ADDR 0x600000000ULL /* 13 */ 634 + #define AU1500_PCI_CONFIG1_PHYS_ADDR 0x680000000ULL /* 13 */ 635 + #define AU1000_PCMCIA_IO_PHYS_ADDR 0xF00000000ULL /* 01234 */ 636 + #define AU1000_PCMCIA_ATTR_PHYS_ADDR 0xF40000000ULL /* 01234 */ 637 + #define AU1000_PCMCIA_MEM_PHYS_ADDR 0xF80000000ULL /* 01234 */ 638 + 639 + 640 + /* Au1000 SDRAM memory controller register offsets */ 641 + #define AU1000_MEM_SDMODE0 0x0000 642 + #define AU1000_MEM_SDMODE1 0x0004 643 + #define AU1000_MEM_SDMODE2 0x0008 644 + #define AU1000_MEM_SDADDR0 0x000C 645 + #define AU1000_MEM_SDADDR1 0x0010 646 + #define AU1000_MEM_SDADDR2 0x0014 647 + #define AU1000_MEM_SDREFCFG 0x0018 648 + #define AU1000_MEM_SDPRECMD 0x001C 649 + #define AU1000_MEM_SDAUTOREF 0x0020 650 + #define AU1000_MEM_SDWRMD0 0x0024 651 + #define AU1000_MEM_SDWRMD1 0x0028 652 + #define AU1000_MEM_SDWRMD2 0x002C 653 + #define AU1000_MEM_SDSLEEP 0x0030 654 + #define AU1000_MEM_SDSMCKE 0x0034 655 + 656 + /* MEM_SDMODE register content definitions */ 617 657 #define MEM_SDMODE_F (1 << 22) 618 658 #define MEM_SDMODE_SR (1 << 21) 619 659 #define MEM_SDMODE_BS (1 << 20) 620 660 #define MEM_SDMODE_RS (3 << 18) 621 661 #define MEM_SDMODE_CS (7 << 15) 622 - #define MEM_SDMODE_TRAS (15 << 11) 623 - #define MEM_SDMODE_TMRD (3 << 9) 662 + #define MEM_SDMODE_TRAS (15 << 11) 663 + #define MEM_SDMODE_TMRD (3 << 9) 624 664 #define MEM_SDMODE_TWR (3 << 7) 625 665 #define MEM_SDMODE_TRP (3 << 5) 626 - #define MEM_SDMODE_TRCD (3 << 3) 666 + #define MEM_SDMODE_TRCD (3 << 3) 627 667 #define MEM_SDMODE_TCL (7 << 0) 628 668 629 669 #define MEM_SDMODE_BS_2Bank (0 << 20) ··· 702 628 #define MEM_SDMODE_TRCD_N(N) ((N) << 3) 703 629 #define MEM_SDMODE_TCL_N(N) ((N) << 0) 704 630 705 - /* 706 - * MEM_SDADDR register contents definitions 707 - */ 631 + /* MEM_SDADDR register contents definitions */ 708 632 #define MEM_SDADDR_E (1 << 20) 709 - #define MEM_SDADDR_CSBA (0x03FF << 10) 633 + #define MEM_SDADDR_CSBA (0x03FF << 10) 710 634 #define MEM_SDADDR_CSMASK (0x03FF << 0) 711 635 #define MEM_SDADDR_CSBA_N(N) ((N) & (0x03FF << 22) >> 12) 712 636 #define MEM_SDADDR_CSMASK_N(N) ((N)&(0x03FF << 22) >> 22) 713 637 714 - /* 715 - * MEM_SDREFCFG register content definitions 716 - */ 638 + /* MEM_SDREFCFG register content definitions */ 717 639 #define MEM_SDREFCFG_TRC (15 << 28) 718 640 #define MEM_SDREFCFG_TRPM (3 << 26) 719 641 #define MEM_SDREFCFG_E (1 << 25) 720 - #define MEM_SDREFCFG_RE (0x1ffffff << 0) 642 + #define MEM_SDREFCFG_RE (0x1ffffff << 0) 721 643 #define MEM_SDREFCFG_TRC_N(N) ((N) << MEM_SDREFCFG_TRC) 722 644 #define MEM_SDREFCFG_TRPM_N(N) ((N) << MEM_SDREFCFG_TRPM) 723 645 #define MEM_SDREFCFG_REF_N(N) (N) 724 - #endif 725 646 726 - /***********************************************************************/ 727 - 728 - /* 729 - * Au1550 SDRAM Register Offsets 730 - */ 731 - 732 - /***********************************************************************/ 733 - 734 - #if defined(CONFIG_SOC_AU1550) || defined(CONFIG_SOC_AU1200) 735 - #define MEM_SDMODE0 0x0800 736 - #define MEM_SDMODE1 0x0808 737 - #define MEM_SDMODE2 0x0810 738 - #define MEM_SDADDR0 0x0820 739 - #define MEM_SDADDR1 0x0828 740 - #define MEM_SDADDR2 0x0830 741 - #define MEM_SDCONFIGA 0x0840 742 - #define MEM_SDCONFIGB 0x0848 743 - #define MEM_SDSTAT 0x0850 744 - #define MEM_SDERRADDR 0x0858 745 - #define MEM_SDSTRIDE0 0x0860 746 - #define MEM_SDSTRIDE1 0x0868 747 - #define MEM_SDSTRIDE2 0x0870 748 - #define MEM_SDWRMD0 0x0880 749 - #define MEM_SDWRMD1 0x0888 750 - #define MEM_SDWRMD2 0x0890 751 - #define MEM_SDPRECMD 0x08C0 752 - #define MEM_SDAUTOREF 0x08C8 753 - #define MEM_SDSREF 0x08D0 754 - #define MEM_SDSLEEP MEM_SDSREF 755 - 756 - #endif 757 - 758 - /* 759 - * Physical base addresses for integrated peripherals 760 - * 0..au1000 1..au1500 2..au1100 3..au1550 4..au1200 761 - */ 762 - 763 - #define AU1000_AC97_PHYS_ADDR 0x10000000 /* 012 */ 764 - #define AU1000_USBD_PHYS_ADDR 0x10200000 /* 0123 */ 765 - #define AU1000_IC0_PHYS_ADDR 0x10400000 /* 01234 */ 766 - #define AU1000_MAC0_PHYS_ADDR 0x10500000 /* 023 */ 767 - #define AU1000_MAC1_PHYS_ADDR 0x10510000 /* 023 */ 768 - #define AU1000_MACEN_PHYS_ADDR 0x10520000 /* 023 */ 769 - #define AU1100_SD0_PHYS_ADDR 0x10600000 /* 24 */ 770 - #define AU1100_SD1_PHYS_ADDR 0x10680000 /* 24 */ 771 - #define AU1000_I2S_PHYS_ADDR 0x11000000 /* 02 */ 772 - #define AU1500_MAC0_PHYS_ADDR 0x11500000 /* 1 */ 773 - #define AU1500_MAC1_PHYS_ADDR 0x11510000 /* 1 */ 774 - #define AU1500_MACEN_PHYS_ADDR 0x11520000 /* 1 */ 775 - #define AU1000_UART0_PHYS_ADDR 0x11100000 /* 01234 */ 776 - #define AU1000_UART1_PHYS_ADDR 0x11200000 /* 0234 */ 777 - #define AU1000_UART2_PHYS_ADDR 0x11300000 /* 0 */ 778 - #define AU1000_UART3_PHYS_ADDR 0x11400000 /* 0123 */ 779 - #define AU1500_GPIO2_PHYS_ADDR 0x11700000 /* 1234 */ 780 - #define AU1000_IC1_PHYS_ADDR 0x11800000 /* 01234 */ 781 - #define AU1000_SYS_PHYS_ADDR 0x11900000 /* 01234 */ 782 - #define AU1000_DMA_PHYS_ADDR 0x14002000 /* 012 */ 783 - #define AU1550_DBDMA_PHYS_ADDR 0x14002000 /* 34 */ 784 - #define AU1550_DBDMA_CONF_PHYS_ADDR 0x14003000 /* 34 */ 785 - #define AU1000_MACDMA0_PHYS_ADDR 0x14004000 /* 0123 */ 786 - #define AU1000_MACDMA1_PHYS_ADDR 0x14004200 /* 0123 */ 787 - 788 - 789 - #ifdef CONFIG_SOC_AU1000 790 - #define MEM_PHYS_ADDR 0x14000000 791 - #define STATIC_MEM_PHYS_ADDR 0x14001000 792 - #define USBH_PHYS_ADDR 0x10100000 793 - #define IRDA_PHYS_ADDR 0x10300000 794 - #define SSI0_PHYS_ADDR 0x11600000 795 - #define SSI1_PHYS_ADDR 0x11680000 796 - #define PCMCIA_IO_PHYS_ADDR 0xF00000000ULL 797 - #define PCMCIA_ATTR_PHYS_ADDR 0xF40000000ULL 798 - #define PCMCIA_MEM_PHYS_ADDR 0xF80000000ULL 799 - #endif 800 - 801 - /********************************************************************/ 802 - 803 - #ifdef CONFIG_SOC_AU1500 804 - #define MEM_PHYS_ADDR 0x14000000 805 - #define STATIC_MEM_PHYS_ADDR 0x14001000 806 - #define USBH_PHYS_ADDR 0x10100000 807 - #define PCI_PHYS_ADDR 0x14005000 808 - #define PCI_MEM_PHYS_ADDR 0x400000000ULL 809 - #define PCI_IO_PHYS_ADDR 0x500000000ULL 810 - #define PCI_CONFIG0_PHYS_ADDR 0x600000000ULL 811 - #define PCI_CONFIG1_PHYS_ADDR 0x680000000ULL 812 - #define PCMCIA_IO_PHYS_ADDR 0xF00000000ULL 813 - #define PCMCIA_ATTR_PHYS_ADDR 0xF40000000ULL 814 - #define PCMCIA_MEM_PHYS_ADDR 0xF80000000ULL 815 - #endif 816 - 817 - /********************************************************************/ 818 - 819 - #ifdef CONFIG_SOC_AU1100 820 - #define MEM_PHYS_ADDR 0x14000000 821 - #define STATIC_MEM_PHYS_ADDR 0x14001000 822 - #define USBH_PHYS_ADDR 0x10100000 823 - #define IRDA_PHYS_ADDR 0x10300000 824 - #define SSI0_PHYS_ADDR 0x11600000 825 - #define SSI1_PHYS_ADDR 0x11680000 826 - #define LCD_PHYS_ADDR 0x15000000 827 - #define PCMCIA_IO_PHYS_ADDR 0xF00000000ULL 828 - #define PCMCIA_ATTR_PHYS_ADDR 0xF40000000ULL 829 - #define PCMCIA_MEM_PHYS_ADDR 0xF80000000ULL 830 - #endif 831 - 832 - /***********************************************************************/ 833 - 834 - #ifdef CONFIG_SOC_AU1550 835 - #define MEM_PHYS_ADDR 0x14000000 836 - #define STATIC_MEM_PHYS_ADDR 0x14001000 837 - #define USBH_PHYS_ADDR 0x14020000 838 - #define PCI_PHYS_ADDR 0x14005000 839 - #define PE_PHYS_ADDR 0x14008000 840 - #define PSC0_PHYS_ADDR 0x11A00000 841 - #define PSC1_PHYS_ADDR 0x11B00000 842 - #define PSC2_PHYS_ADDR 0x10A00000 843 - #define PSC3_PHYS_ADDR 0x10B00000 844 - #define PCI_MEM_PHYS_ADDR 0x400000000ULL 845 - #define PCI_IO_PHYS_ADDR 0x500000000ULL 846 - #define PCI_CONFIG0_PHYS_ADDR 0x600000000ULL 847 - #define PCI_CONFIG1_PHYS_ADDR 0x680000000ULL 848 - #define PCMCIA_IO_PHYS_ADDR 0xF00000000ULL 849 - #define PCMCIA_ATTR_PHYS_ADDR 0xF40000000ULL 850 - #define PCMCIA_MEM_PHYS_ADDR 0xF80000000ULL 851 - #endif 852 - 853 - /***********************************************************************/ 854 - 855 - #ifdef CONFIG_SOC_AU1200 856 - #define MEM_PHYS_ADDR 0x14000000 857 - #define STATIC_MEM_PHYS_ADDR 0x14001000 858 - #define AES_PHYS_ADDR 0x10300000 859 - #define CIM_PHYS_ADDR 0x14004000 860 - #define USBM_PHYS_ADDR 0x14020000 861 - #define USBH_PHYS_ADDR 0x14020100 862 - #define PSC0_PHYS_ADDR 0x11A00000 863 - #define PSC1_PHYS_ADDR 0x11B00000 864 - #define LCD_PHYS_ADDR 0x15000000 865 - #define SWCNT_PHYS_ADDR 0x1110010C 866 - #define MAEFE_PHYS_ADDR 0x14012000 867 - #define MAEBE_PHYS_ADDR 0x14010000 868 - #define PCMCIA_IO_PHYS_ADDR 0xF00000000ULL 869 - #define PCMCIA_ATTR_PHYS_ADDR 0xF40000000ULL 870 - #define PCMCIA_MEM_PHYS_ADDR 0xF80000000ULL 871 - #endif 647 + /* Au1550 SDRAM Register Offsets */ 648 + #define AU1550_MEM_SDMODE0 0x0800 649 + #define AU1550_MEM_SDMODE1 0x0808 650 + #define AU1550_MEM_SDMODE2 0x0810 651 + #define AU1550_MEM_SDADDR0 0x0820 652 + #define AU1550_MEM_SDADDR1 0x0828 653 + #define AU1550_MEM_SDADDR2 0x0830 654 + #define AU1550_MEM_SDCONFIGA 0x0840 655 + #define AU1550_MEM_SDCONFIGB 0x0848 656 + #define AU1550_MEM_SDSTAT 0x0850 657 + #define AU1550_MEM_SDERRADDR 0x0858 658 + #define AU1550_MEM_SDSTRIDE0 0x0860 659 + #define AU1550_MEM_SDSTRIDE1 0x0868 660 + #define AU1550_MEM_SDSTRIDE2 0x0870 661 + #define AU1550_MEM_SDWRMD0 0x0880 662 + #define AU1550_MEM_SDWRMD1 0x0888 663 + #define AU1550_MEM_SDWRMD2 0x0890 664 + #define AU1550_MEM_SDPRECMD 0x08C0 665 + #define AU1550_MEM_SDAUTOREF 0x08C8 666 + #define AU1550_MEM_SDSREF 0x08D0 667 + #define AU1550_MEM_SDSLEEP MEM_SDSREF 872 668 873 669 /* Static Bus Controller */ 874 670 #define MEM_STCFG0 0xB4001000 ··· 757 813 #define MEM_STTIME3 0xB4001034 758 814 #define MEM_STADDR3 0xB4001038 759 815 760 - #if defined(CONFIG_SOC_AU1550) || defined(CONFIG_SOC_AU1200) 761 816 #define MEM_STNDCTL 0xB4001100 762 817 #define MEM_STSTAT 0xB4001104 763 818 764 819 #define MEM_STNAND_CMD 0x0 765 820 #define MEM_STNAND_ADDR 0x4 766 821 #define MEM_STNAND_DATA 0x20 767 - #endif 768 822 769 - 770 - 771 - 772 - /* Au1000 */ 773 - #ifdef CONFIG_SOC_AU1000 774 - 775 - #define USB_OHCI_BASE 0x10100000 /* phys addr for ioremap */ 776 - #define USB_HOST_CONFIG 0xB017FFFC 777 - #define FOR_PLATFORM_C_USB_HOST_INT AU1000_USB_HOST_INT 778 - #endif /* CONFIG_SOC_AU1000 */ 779 - 780 - /* Au1500 */ 781 - #ifdef CONFIG_SOC_AU1500 782 - 783 - #define USB_OHCI_BASE 0x10100000 /* phys addr for ioremap */ 784 - #define USB_HOST_CONFIG 0xB017fffc 785 - #define FOR_PLATFORM_C_USB_HOST_INT AU1500_USB_HOST_INT 786 - #endif /* CONFIG_SOC_AU1500 */ 787 - 788 - /* Au1100 */ 789 - #ifdef CONFIG_SOC_AU1100 790 - 791 - #define USB_OHCI_BASE 0x10100000 /* phys addr for ioremap */ 792 - #define USB_HOST_CONFIG 0xB017FFFC 793 - #define FOR_PLATFORM_C_USB_HOST_INT AU1100_USB_HOST_INT 794 - #endif /* CONFIG_SOC_AU1100 */ 795 - 796 - #ifdef CONFIG_SOC_AU1550 797 - 798 - #define USB_OHCI_BASE 0x14020000 /* phys addr for ioremap */ 799 - #define USB_OHCI_LEN 0x00060000 800 - #define USB_HOST_CONFIG 0xB4027ffc 801 - #define FOR_PLATFORM_C_USB_HOST_INT AU1550_USB_HOST_INT 802 - #endif /* CONFIG_SOC_AU1550 */ 803 - 804 - 805 - #ifdef CONFIG_SOC_AU1200 806 - 807 - #define USB_UOC_BASE 0x14020020 808 - #define USB_UOC_LEN 0x20 809 - #define USB_OHCI_BASE 0x14020100 810 - #define USB_OHCI_LEN 0x100 811 - #define USB_EHCI_BASE 0x14020200 812 - #define USB_EHCI_LEN 0x100 813 - #define USB_UDC_BASE 0x14022000 814 - #define USB_UDC_LEN 0x2000 815 - #define USB_MSR_BASE 0xB4020000 816 - #define USB_MSR_MCFG 4 817 - #define USBMSRMCFG_OMEMEN 0 818 - #define USBMSRMCFG_OBMEN 1 819 - #define USBMSRMCFG_EMEMEN 2 820 - #define USBMSRMCFG_EBMEN 3 821 - #define USBMSRMCFG_DMEMEN 4 822 - #define USBMSRMCFG_DBMEN 5 823 - #define USBMSRMCFG_GMEMEN 6 824 - #define USBMSRMCFG_OHCCLKEN 16 825 - #define USBMSRMCFG_EHCCLKEN 17 826 - #define USBMSRMCFG_UDCCLKEN 18 827 - #define USBMSRMCFG_PHYPLLEN 19 828 - #define USBMSRMCFG_RDCOMB 30 829 - #define USBMSRMCFG_PFEN 31 830 - 831 - #define FOR_PLATFORM_C_USB_HOST_INT AU1200_USB_INT 832 - 833 - #endif /* CONFIG_SOC_AU1200 */ 834 823 835 824 /* Programmable Counters 0 and 1 */ 836 825 #define SYS_BASE 0xB1900000 ··· 835 958 # define I2S_CONTROL_D (1 << 1) 836 959 # define I2S_CONTROL_CE (1 << 0) 837 960 838 - /* USB Host Controller */ 839 - #ifndef USB_OHCI_LEN 840 - #define USB_OHCI_LEN 0x00100000 841 - #endif 842 - 843 - #ifndef CONFIG_SOC_AU1200 844 - 845 - /* USB Device Controller */ 846 - #define USBD_EP0RD 0xB0200000 847 - #define USBD_EP0WR 0xB0200004 848 - #define USBD_EP2WR 0xB0200008 849 - #define USBD_EP3WR 0xB020000C 850 - #define USBD_EP4RD 0xB0200010 851 - #define USBD_EP5RD 0xB0200014 852 - #define USBD_INTEN 0xB0200018 853 - #define USBD_INTSTAT 0xB020001C 854 - # define USBDEV_INT_SOF (1 << 12) 855 - # define USBDEV_INT_HF_BIT 6 856 - # define USBDEV_INT_HF_MASK (0x3f << USBDEV_INT_HF_BIT) 857 - # define USBDEV_INT_CMPLT_BIT 0 858 - # define USBDEV_INT_CMPLT_MASK (0x3f << USBDEV_INT_CMPLT_BIT) 859 - #define USBD_CONFIG 0xB0200020 860 - #define USBD_EP0CS 0xB0200024 861 - #define USBD_EP2CS 0xB0200028 862 - #define USBD_EP3CS 0xB020002C 863 - #define USBD_EP4CS 0xB0200030 864 - #define USBD_EP5CS 0xB0200034 865 - # define USBDEV_CS_SU (1 << 14) 866 - # define USBDEV_CS_NAK (1 << 13) 867 - # define USBDEV_CS_ACK (1 << 12) 868 - # define USBDEV_CS_BUSY (1 << 11) 869 - # define USBDEV_CS_TSIZE_BIT 1 870 - # define USBDEV_CS_TSIZE_MASK (0x3ff << USBDEV_CS_TSIZE_BIT) 871 - # define USBDEV_CS_STALL (1 << 0) 872 - #define USBD_EP0RDSTAT 0xB0200040 873 - #define USBD_EP0WRSTAT 0xB0200044 874 - #define USBD_EP2WRSTAT 0xB0200048 875 - #define USBD_EP3WRSTAT 0xB020004C 876 - #define USBD_EP4RDSTAT 0xB0200050 877 - #define USBD_EP5RDSTAT 0xB0200054 878 - # define USBDEV_FSTAT_FLUSH (1 << 6) 879 - # define USBDEV_FSTAT_UF (1 << 5) 880 - # define USBDEV_FSTAT_OF (1 << 4) 881 - # define USBDEV_FSTAT_FCNT_BIT 0 882 - # define USBDEV_FSTAT_FCNT_MASK (0x0f << USBDEV_FSTAT_FCNT_BIT) 883 - #define USBD_ENABLE 0xB0200058 884 - # define USBDEV_ENABLE (1 << 1) 885 - # define USBDEV_CE (1 << 0) 886 - 887 - #endif /* !CONFIG_SOC_AU1200 */ 888 961 889 962 /* Ethernet Controllers */ 890 963 ··· 1149 1322 # define SYS_PF_MUST_BE_SET ((1 << 5) | (1 << 2)) 1150 1323 1151 1324 /* Au1200 only */ 1152 - #ifdef CONFIG_SOC_AU1200 1153 1325 #define SYS_PINFUNC_DMA (1 << 31) 1154 1326 #define SYS_PINFUNC_S0A (1 << 30) 1155 1327 #define SYS_PINFUNC_S1A (1 << 29) ··· 1176 1350 #define SYS_PINFUNC_P0B (1 << 4) 1177 1351 #define SYS_PINFUNC_U0T (1 << 3) 1178 1352 #define SYS_PINFUNC_S1B (1 << 2) 1179 - #endif 1180 1353 1181 1354 /* Power Management */ 1182 1355 #define SYS_SCRATCH0 0xB1900018 ··· 1231 1406 # define SYS_CS_MI2_MASK (0x7 << SYS_CS_MI2_BIT) 1232 1407 # define SYS_CS_DI2 (1 << 16) 1233 1408 # define SYS_CS_CI2 (1 << 15) 1234 - #ifdef CONFIG_SOC_AU1100 1409 + 1235 1410 # define SYS_CS_ML_BIT 7 1236 1411 # define SYS_CS_ML_MASK (0x7 << SYS_CS_ML_BIT) 1237 1412 # define SYS_CS_DL (1 << 6) 1238 1413 # define SYS_CS_CL (1 << 5) 1239 - #else 1414 + 1240 1415 # define SYS_CS_MUH_BIT 12 1241 1416 # define SYS_CS_MUH_MASK (0x7 << SYS_CS_MUH_BIT) 1242 1417 # define SYS_CS_DUH (1 << 11) ··· 1245 1420 # define SYS_CS_MUD_MASK (0x7 << SYS_CS_MUD_BIT) 1246 1421 # define SYS_CS_DUD (1 << 6) 1247 1422 # define SYS_CS_CUD (1 << 5) 1248 - #endif 1423 + 1249 1424 # define SYS_CS_MIR_BIT 2 1250 1425 # define SYS_CS_MIR_MASK (0x7 << SYS_CS_MIR_BIT) 1251 1426 # define SYS_CS_DIR (1 << 1) ··· 1292 1467 # define AC97C_RS (1 << 1) 1293 1468 # define AC97C_CE (1 << 0) 1294 1469 1295 - #if defined(CONFIG_SOC_AU1500) || defined(CONFIG_SOC_AU1550) 1296 - /* Au1500 PCI Controller */ 1297 - #define Au1500_CFG_BASE 0xB4005000 /* virtual, KSEG1 addr */ 1298 - #define Au1500_PCI_CMEM (Au1500_CFG_BASE + 0) 1299 - #define Au1500_PCI_CFG (Au1500_CFG_BASE + 4) 1300 - # define PCI_ERROR ((1 << 22) | (1 << 23) | (1 << 24) | \ 1301 - (1 << 25) | (1 << 26) | (1 << 27)) 1302 - #define Au1500_PCI_B2BMASK_CCH (Au1500_CFG_BASE + 8) 1303 - #define Au1500_PCI_B2B0_VID (Au1500_CFG_BASE + 0xC) 1304 - #define Au1500_PCI_B2B1_ID (Au1500_CFG_BASE + 0x10) 1305 - #define Au1500_PCI_MWMASK_DEV (Au1500_CFG_BASE + 0x14) 1306 - #define Au1500_PCI_MWBASE_REV_CCL (Au1500_CFG_BASE + 0x18) 1307 - #define Au1500_PCI_ERR_ADDR (Au1500_CFG_BASE + 0x1C) 1308 - #define Au1500_PCI_SPEC_INTACK (Au1500_CFG_BASE + 0x20) 1309 - #define Au1500_PCI_ID (Au1500_CFG_BASE + 0x100) 1310 - #define Au1500_PCI_STATCMD (Au1500_CFG_BASE + 0x104) 1311 - #define Au1500_PCI_CLASSREV (Au1500_CFG_BASE + 0x108) 1312 - #define Au1500_PCI_HDRTYPE (Au1500_CFG_BASE + 0x10C) 1313 - #define Au1500_PCI_MBAR (Au1500_CFG_BASE + 0x110) 1314 1470 1315 - #define Au1500_PCI_HDR 0xB4005100 /* virtual, KSEG1 addr */ 1316 - 1317 - /* 1318 - * All of our structures, like PCI resource, have 32-bit members. 1319 - * Drivers are expected to do an ioremap on the PCI MEM resource, but it's 1320 - * hard to store 0x4 0000 0000 in a 32-bit type. We require a small patch 1321 - * to __ioremap to check for addresses between (u32)Au1500_PCI_MEM_START and 1322 - * (u32)Au1500_PCI_MEM_END and change those to the full 36-bit PCI MEM 1323 - * addresses. For PCI I/O, it's simpler because we get to do the ioremap 1324 - * ourselves and then adjust the device's resources. 1471 + /* The PCI chip selects are outside the 32bit space, and since we can't 1472 + * just program the 36bit addresses into BARs, we have to take a chunk 1473 + * out of the 32bit space and reserve it for PCI. When these addresses 1474 + * are ioremap()ed, they'll be fixed up to the real 36bit address before 1475 + * being passed to the real ioremap function. 1325 1476 */ 1326 - #define Au1500_EXT_CFG 0x600000000ULL 1327 - #define Au1500_EXT_CFG_TYPE1 0x680000000ULL 1328 - #define Au1500_PCI_IO_START 0x500000000ULL 1329 - #define Au1500_PCI_IO_END 0x5000FFFFFULL 1330 - #define Au1500_PCI_MEM_START 0x440000000ULL 1331 - #define Au1500_PCI_MEM_END 0x44FFFFFFFULL 1477 + #define ALCHEMY_PCI_MEMWIN_START (AU1500_PCI_MEM_PHYS_ADDR >> 4) 1478 + #define ALCHEMY_PCI_MEMWIN_END (ALCHEMY_PCI_MEMWIN_START + 0x0FFFFFFF) 1332 1479 1333 - #define PCI_IO_START 0x00001000 1334 - #define PCI_IO_END 0x000FFFFF 1335 - #define PCI_MEM_START 0x40000000 1336 - #define PCI_MEM_END 0x4FFFFFFF 1480 + /* for PCI IO it's simpler because we get to do the ioremap ourselves and then 1481 + * adjust the device's resources. 1482 + */ 1483 + #define ALCHEMY_PCI_IOWIN_START 0x00001000 1484 + #define ALCHEMY_PCI_IOWIN_END 0x0000FFFF 1337 1485 1338 - #define PCI_FIRST_DEVFN (0 << 3) 1339 - #define PCI_LAST_DEVFN (19 << 3) 1486 + #ifdef CONFIG_PCI 1340 1487 1341 1488 #define IOPORT_RESOURCE_START 0x00001000 /* skip legacy probing */ 1342 1489 #define IOPORT_RESOURCE_END 0xffffffff 1343 1490 #define IOMEM_RESOURCE_START 0x10000000 1344 1491 #define IOMEM_RESOURCE_END 0xfffffffffULL 1345 1492 1346 - #else /* Au1000 and Au1100 and Au1200 */ 1493 + #else 1347 1494 1348 1495 /* Don't allow any legacy ports probing */ 1349 1496 #define IOPORT_RESOURCE_START 0x10000000 ··· 1323 1526 #define IOMEM_RESOURCE_START 0x10000000 1324 1527 #define IOMEM_RESOURCE_END 0xfffffffffULL 1325 1528 1326 - #define PCI_IO_START 0 1327 - #define PCI_IO_END 0 1328 - #define PCI_MEM_START 0 1329 - #define PCI_MEM_END 0 1330 - #define PCI_FIRST_DEVFN 0 1331 - #define PCI_LAST_DEVFN 0 1332 - 1333 1529 #endif 1530 + 1531 + /* PCI controller block register offsets */ 1532 + #define PCI_REG_CMEM 0x0000 1533 + #define PCI_REG_CONFIG 0x0004 1534 + #define PCI_REG_B2BMASK_CCH 0x0008 1535 + #define PCI_REG_B2BBASE0_VID 0x000C 1536 + #define PCI_REG_B2BBASE1_SID 0x0010 1537 + #define PCI_REG_MWMASK_DEV 0x0014 1538 + #define PCI_REG_MWBASE_REV_CCL 0x0018 1539 + #define PCI_REG_ERR_ADDR 0x001C 1540 + #define PCI_REG_SPEC_INTACK 0x0020 1541 + #define PCI_REG_ID 0x0100 1542 + #define PCI_REG_STATCMD 0x0104 1543 + #define PCI_REG_CLASSREV 0x0108 1544 + #define PCI_REG_PARAM 0x010C 1545 + #define PCI_REG_MBAR 0x0110 1546 + #define PCI_REG_TIMEOUT 0x0140 1547 + 1548 + /* PCI controller block register bits */ 1549 + #define PCI_CMEM_E (1 << 28) /* enable cacheable memory */ 1550 + #define PCI_CMEM_CMBASE(x) (((x) & 0x3fff) << 14) 1551 + #define PCI_CMEM_CMMASK(x) ((x) & 0x3fff) 1552 + #define PCI_CONFIG_ERD (1 << 27) /* pci error during R/W */ 1553 + #define PCI_CONFIG_ET (1 << 26) /* error in target mode */ 1554 + #define PCI_CONFIG_EF (1 << 25) /* fatal error */ 1555 + #define PCI_CONFIG_EP (1 << 24) /* parity error */ 1556 + #define PCI_CONFIG_EM (1 << 23) /* multiple errors */ 1557 + #define PCI_CONFIG_BM (1 << 22) /* bad master error */ 1558 + #define PCI_CONFIG_PD (1 << 20) /* PCI Disable */ 1559 + #define PCI_CONFIG_BME (1 << 19) /* Byte Mask Enable for reads */ 1560 + #define PCI_CONFIG_NC (1 << 16) /* mark mem access non-coherent */ 1561 + #define PCI_CONFIG_IA (1 << 15) /* INTA# enabled (target mode) */ 1562 + #define PCI_CONFIG_IP (1 << 13) /* int on PCI_PERR# */ 1563 + #define PCI_CONFIG_IS (1 << 12) /* int on PCI_SERR# */ 1564 + #define PCI_CONFIG_IMM (1 << 11) /* int on master abort */ 1565 + #define PCI_CONFIG_ITM (1 << 10) /* int on target abort (as master) */ 1566 + #define PCI_CONFIG_ITT (1 << 9) /* int on target abort (as target) */ 1567 + #define PCI_CONFIG_IPB (1 << 8) /* int on PERR# in bus master acc */ 1568 + #define PCI_CONFIG_SIC_NO (0 << 6) /* no byte mask changes */ 1569 + #define PCI_CONFIG_SIC_BA_ADR (1 << 6) /* on byte/hw acc, invert adr bits */ 1570 + #define PCI_CONFIG_SIC_HWA_DAT (2 << 6) /* on halfword acc, swap data */ 1571 + #define PCI_CONFIG_SIC_ALL (3 << 6) /* swap data bytes on all accesses */ 1572 + #define PCI_CONFIG_ST (1 << 5) /* swap data by target transactions */ 1573 + #define PCI_CONFIG_SM (1 << 4) /* swap data from PCI ctl */ 1574 + #define PCI_CONFIG_AEN (1 << 3) /* enable internal arbiter */ 1575 + #define PCI_CONFIG_R2H (1 << 2) /* REQ2# to hi-prio arbiter */ 1576 + #define PCI_CONFIG_R1H (1 << 1) /* REQ1# to hi-prio arbiter */ 1577 + #define PCI_CONFIG_CH (1 << 0) /* PCI ctl to hi-prio arbiter */ 1578 + #define PCI_B2BMASK_B2BMASK(x) (((x) & 0xffff) << 16) 1579 + #define PCI_B2BMASK_CCH(x) ((x) & 0xffff) /* 16 upper bits of class code */ 1580 + #define PCI_B2BBASE0_VID_B0(x) (((x) & 0xffff) << 16) 1581 + #define PCI_B2BBASE0_VID_SV(x) ((x) & 0xffff) 1582 + #define PCI_B2BBASE1_SID_B1(x) (((x) & 0xffff) << 16) 1583 + #define PCI_B2BBASE1_SID_SI(x) ((x) & 0xffff) 1584 + #define PCI_MWMASKDEV_MWMASK(x) (((x) & 0xffff) << 16) 1585 + #define PCI_MWMASKDEV_DEVID(x) ((x) & 0xffff) 1586 + #define PCI_MWBASEREVCCL_BASE(x) (((x) & 0xffff) << 16) 1587 + #define PCI_MWBASEREVCCL_REV(x) (((x) & 0xff) << 8) 1588 + #define PCI_MWBASEREVCCL_CCL(x) ((x) & 0xff) 1589 + #define PCI_ID_DID(x) (((x) & 0xffff) << 16) 1590 + #define PCI_ID_VID(x) ((x) & 0xffff) 1591 + #define PCI_STATCMD_STATUS(x) (((x) & 0xffff) << 16) 1592 + #define PCI_STATCMD_CMD(x) ((x) & 0xffff) 1593 + #define PCI_CLASSREV_CLASS(x) (((x) & 0x00ffffff) << 8) 1594 + #define PCI_CLASSREV_REV(x) ((x) & 0xff) 1595 + #define PCI_PARAM_BIST(x) (((x) & 0xff) << 24) 1596 + #define PCI_PARAM_HT(x) (((x) & 0xff) << 16) 1597 + #define PCI_PARAM_LT(x) (((x) & 0xff) << 8) 1598 + #define PCI_PARAM_CLS(x) ((x) & 0xff) 1599 + #define PCI_TIMEOUT_RETRIES(x) (((x) & 0xff) << 8) /* max retries */ 1600 + #define PCI_TIMEOUT_TO(x) ((x) & 0xff) /* target ready timeout */ 1334 1601 1335 1602 #endif
-43
arch/mips/include/asm/mach-au1x00/au1xxx.h
··· 1 - /* 2 - * This program is free software; you can redistribute it and/or modify it 3 - * under the terms of the GNU General Public License as published by the 4 - * Free Software Foundation; either version 2 of the License, or (at your 5 - * option) any later version. 6 - * 7 - * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 8 - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 9 - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 10 - * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 11 - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 12 - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF 13 - * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 14 - * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 15 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 16 - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 17 - * 18 - * You should have received a copy of the GNU General Public License along 19 - * with this program; if not, write to the Free Software Foundation, Inc., 20 - * 675 Mass Ave, Cambridge, MA 02139, USA. 21 - */ 22 - 23 - #ifndef _AU1XXX_H_ 24 - #define _AU1XXX_H_ 25 - 26 - #include <asm/mach-au1x00/au1000.h> 27 - 28 - #if defined(CONFIG_MIPS_DB1000) || defined(CONFIG_MIPS_DB1100) || \ 29 - defined(CONFIG_MIPS_DB1500) || defined(CONFIG_MIPS_DB1550) 30 - #include <asm/mach-db1x00/db1x00.h> 31 - 32 - #elif defined(CONFIG_MIPS_PB1550) 33 - #include <asm/mach-pb1x00/pb1550.h> 34 - 35 - #elif defined(CONFIG_MIPS_PB1200) 36 - #include <asm/mach-pb1x00/pb1200.h> 37 - 38 - #elif defined(CONFIG_MIPS_DB1200) 39 - #include <asm/mach-db1x00/db1200.h> 40 - 41 - #endif 42 - 43 - #endif /* _AU1XXX_H_ */
+55 -59
arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h
··· 126 126 #define SW_STATUS_INUSE (1 << 0) 127 127 128 128 /* Command 0 device IDs. */ 129 - #ifdef CONFIG_SOC_AU1550 130 - #define DSCR_CMD0_UART0_TX 0 131 - #define DSCR_CMD0_UART0_RX 1 132 - #define DSCR_CMD0_UART3_TX 2 133 - #define DSCR_CMD0_UART3_RX 3 134 - #define DSCR_CMD0_DMA_REQ0 4 135 - #define DSCR_CMD0_DMA_REQ1 5 136 - #define DSCR_CMD0_DMA_REQ2 6 137 - #define DSCR_CMD0_DMA_REQ3 7 138 - #define DSCR_CMD0_USBDEV_RX0 8 139 - #define DSCR_CMD0_USBDEV_TX0 9 140 - #define DSCR_CMD0_USBDEV_TX1 10 141 - #define DSCR_CMD0_USBDEV_TX2 11 142 - #define DSCR_CMD0_USBDEV_RX3 12 143 - #define DSCR_CMD0_USBDEV_RX4 13 144 - #define DSCR_CMD0_PSC0_TX 14 145 - #define DSCR_CMD0_PSC0_RX 15 146 - #define DSCR_CMD0_PSC1_TX 16 147 - #define DSCR_CMD0_PSC1_RX 17 148 - #define DSCR_CMD0_PSC2_TX 18 149 - #define DSCR_CMD0_PSC2_RX 19 150 - #define DSCR_CMD0_PSC3_TX 20 151 - #define DSCR_CMD0_PSC3_RX 21 152 - #define DSCR_CMD0_PCI_WRITE 22 153 - #define DSCR_CMD0_NAND_FLASH 23 154 - #define DSCR_CMD0_MAC0_RX 24 155 - #define DSCR_CMD0_MAC0_TX 25 156 - #define DSCR_CMD0_MAC1_RX 26 157 - #define DSCR_CMD0_MAC1_TX 27 158 - #endif /* CONFIG_SOC_AU1550 */ 129 + #define AU1550_DSCR_CMD0_UART0_TX 0 130 + #define AU1550_DSCR_CMD0_UART0_RX 1 131 + #define AU1550_DSCR_CMD0_UART3_TX 2 132 + #define AU1550_DSCR_CMD0_UART3_RX 3 133 + #define AU1550_DSCR_CMD0_DMA_REQ0 4 134 + #define AU1550_DSCR_CMD0_DMA_REQ1 5 135 + #define AU1550_DSCR_CMD0_DMA_REQ2 6 136 + #define AU1550_DSCR_CMD0_DMA_REQ3 7 137 + #define AU1550_DSCR_CMD0_USBDEV_RX0 8 138 + #define AU1550_DSCR_CMD0_USBDEV_TX0 9 139 + #define AU1550_DSCR_CMD0_USBDEV_TX1 10 140 + #define AU1550_DSCR_CMD0_USBDEV_TX2 11 141 + #define AU1550_DSCR_CMD0_USBDEV_RX3 12 142 + #define AU1550_DSCR_CMD0_USBDEV_RX4 13 143 + #define AU1550_DSCR_CMD0_PSC0_TX 14 144 + #define AU1550_DSCR_CMD0_PSC0_RX 15 145 + #define AU1550_DSCR_CMD0_PSC1_TX 16 146 + #define AU1550_DSCR_CMD0_PSC1_RX 17 147 + #define AU1550_DSCR_CMD0_PSC2_TX 18 148 + #define AU1550_DSCR_CMD0_PSC2_RX 19 149 + #define AU1550_DSCR_CMD0_PSC3_TX 20 150 + #define AU1550_DSCR_CMD0_PSC3_RX 21 151 + #define AU1550_DSCR_CMD0_PCI_WRITE 22 152 + #define AU1550_DSCR_CMD0_NAND_FLASH 23 153 + #define AU1550_DSCR_CMD0_MAC0_RX 24 154 + #define AU1550_DSCR_CMD0_MAC0_TX 25 155 + #define AU1550_DSCR_CMD0_MAC1_RX 26 156 + #define AU1550_DSCR_CMD0_MAC1_TX 27 159 157 160 - #ifdef CONFIG_SOC_AU1200 161 - #define DSCR_CMD0_UART0_TX 0 162 - #define DSCR_CMD0_UART0_RX 1 163 - #define DSCR_CMD0_UART1_TX 2 164 - #define DSCR_CMD0_UART1_RX 3 165 - #define DSCR_CMD0_DMA_REQ0 4 166 - #define DSCR_CMD0_DMA_REQ1 5 167 - #define DSCR_CMD0_MAE_BE 6 168 - #define DSCR_CMD0_MAE_FE 7 169 - #define DSCR_CMD0_SDMS_TX0 8 170 - #define DSCR_CMD0_SDMS_RX0 9 171 - #define DSCR_CMD0_SDMS_TX1 10 172 - #define DSCR_CMD0_SDMS_RX1 11 173 - #define DSCR_CMD0_AES_TX 13 174 - #define DSCR_CMD0_AES_RX 12 175 - #define DSCR_CMD0_PSC0_TX 14 176 - #define DSCR_CMD0_PSC0_RX 15 177 - #define DSCR_CMD0_PSC1_TX 16 178 - #define DSCR_CMD0_PSC1_RX 17 179 - #define DSCR_CMD0_CIM_RXA 18 180 - #define DSCR_CMD0_CIM_RXB 19 181 - #define DSCR_CMD0_CIM_RXC 20 182 - #define DSCR_CMD0_MAE_BOTH 21 183 - #define DSCR_CMD0_LCD 22 184 - #define DSCR_CMD0_NAND_FLASH 23 185 - #define DSCR_CMD0_PSC0_SYNC 24 186 - #define DSCR_CMD0_PSC1_SYNC 25 187 - #define DSCR_CMD0_CIM_SYNC 26 188 - #endif /* CONFIG_SOC_AU1200 */ 158 + #define AU1200_DSCR_CMD0_UART0_TX 0 159 + #define AU1200_DSCR_CMD0_UART0_RX 1 160 + #define AU1200_DSCR_CMD0_UART1_TX 2 161 + #define AU1200_DSCR_CMD0_UART1_RX 3 162 + #define AU1200_DSCR_CMD0_DMA_REQ0 4 163 + #define AU1200_DSCR_CMD0_DMA_REQ1 5 164 + #define AU1200_DSCR_CMD0_MAE_BE 6 165 + #define AU1200_DSCR_CMD0_MAE_FE 7 166 + #define AU1200_DSCR_CMD0_SDMS_TX0 8 167 + #define AU1200_DSCR_CMD0_SDMS_RX0 9 168 + #define AU1200_DSCR_CMD0_SDMS_TX1 10 169 + #define AU1200_DSCR_CMD0_SDMS_RX1 11 170 + #define AU1200_DSCR_CMD0_AES_TX 13 171 + #define AU1200_DSCR_CMD0_AES_RX 12 172 + #define AU1200_DSCR_CMD0_PSC0_TX 14 173 + #define AU1200_DSCR_CMD0_PSC0_RX 15 174 + #define AU1200_DSCR_CMD0_PSC1_TX 16 175 + #define AU1200_DSCR_CMD0_PSC1_RX 17 176 + #define AU1200_DSCR_CMD0_CIM_RXA 18 177 + #define AU1200_DSCR_CMD0_CIM_RXB 19 178 + #define AU1200_DSCR_CMD0_CIM_RXC 20 179 + #define AU1200_DSCR_CMD0_MAE_BOTH 21 180 + #define AU1200_DSCR_CMD0_LCD 22 181 + #define AU1200_DSCR_CMD0_NAND_FLASH 23 182 + #define AU1200_DSCR_CMD0_PSC0_SYNC 24 183 + #define AU1200_DSCR_CMD0_PSC1_SYNC 25 184 + #define AU1200_DSCR_CMD0_CIM_SYNC 26 189 185 190 186 #define DSCR_CMD0_THROTTLE 30 191 187 #define DSCR_CMD0_ALWAYS 31
+1
arch/mips/include/asm/mach-au1x00/au1xxx_ide.h
··· 58 58 #endif 59 59 int irq; 60 60 u32 regbase; 61 + int ddma_id; 61 62 } _auide_hwif; 62 63 63 64 /******************************************************************************/
-26
arch/mips/include/asm/mach-au1x00/au1xxx_psc.h
··· 33 33 #ifndef _AU1000_PSC_H_ 34 34 #define _AU1000_PSC_H_ 35 35 36 - /* The PSC base addresses. */ 37 - #ifdef CONFIG_SOC_AU1550 38 - #define PSC0_BASE_ADDR 0xb1a00000 39 - #define PSC1_BASE_ADDR 0xb1b00000 40 - #define PSC2_BASE_ADDR 0xb0a00000 41 - #define PSC3_BASE_ADDR 0xb0b00000 42 - #endif 43 - 44 - #ifdef CONFIG_SOC_AU1200 45 - #define PSC0_BASE_ADDR 0xb1a00000 46 - #define PSC1_BASE_ADDR 0xb1b00000 47 - #endif 48 - 49 36 /* 50 37 * The PSC select and control registers are common to all protocols. 51 38 */ ··· 66 79 #define PSC_AC97RST_OFFSET 0x00000024 67 80 #define PSC_AC97GPO_OFFSET 0x00000028 68 81 #define PSC_AC97GPI_OFFSET 0x0000002c 69 - 70 - #define AC97_PSC_SEL (AC97_PSC_BASE + PSC_SEL_OFFSET) 71 - #define AC97_PSC_CTRL (AC97_PSC_BASE + PSC_CTRL_OFFSET) 72 - #define PSC_AC97CFG (AC97_PSC_BASE + PSC_AC97CFG_OFFSET) 73 - #define PSC_AC97MSK (AC97_PSC_BASE + PSC_AC97MSK_OFFSET) 74 - #define PSC_AC97PCR (AC97_PSC_BASE + PSC_AC97PCR_OFFSET) 75 - #define PSC_AC97STAT (AC97_PSC_BASE + PSC_AC97STAT_OFFSET) 76 - #define PSC_AC97EVNT (AC97_PSC_BASE + PSC_AC97EVNT_OFFSET) 77 - #define PSC_AC97TXRX (AC97_PSC_BASE + PSC_AC97TXRX_OFFSET) 78 - #define PSC_AC97CDC (AC97_PSC_BASE + PSC_AC97CDC_OFFSET) 79 - #define PSC_AC97RST (AC97_PSC_BASE + PSC_AC97RST_OFFSET) 80 - #define PSC_AC97GPO (AC97_PSC_BASE + PSC_AC97GPO_OFFSET) 81 - #define PSC_AC97GPI (AC97_PSC_BASE + PSC_AC97GPI_OFFSET) 82 82 83 83 /* AC97 Config Register. */ 84 84 #define PSC_AC97CFG_RT_MASK (3 << 30)
+2 -29
arch/mips/include/asm/mach-au1x00/gpio-au1000.h
··· 347 347 348 348 /**********************************************************************/ 349 349 350 - /* On Au1000, Au1500 and Au1100 GPIOs won't work as inputs before 351 - * SYS_PININPUTEN is written to at least once. On Au1550/Au1200 this 352 - * register enables use of GPIOs as wake source. 353 - */ 354 - static inline void alchemy_gpio1_input_enable(void) 355 - { 356 - void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_SYS_PHYS_ADDR); 357 - __raw_writel(0, base + SYS_PININPUTEN); /* the write op is key */ 358 - wmb(); 359 - } 360 - 361 350 /* GPIO2 shared interrupts and control */ 362 351 363 352 static inline void __alchemy_gpio2_mod_int(int gpio2, int en) ··· 550 561 551 562 #ifndef CONFIG_GPIOLIB 552 563 564 + #ifdef CONFIG_ALCHEMY_GPIOINT_AU1000 553 565 554 566 #ifndef CONFIG_ALCHEMY_GPIO_INDIRECT /* case (4) */ 555 567 ··· 655 665 656 666 #endif /* !CONFIG_ALCHEMY_GPIO_INDIRECT */ 657 667 658 - 659 - #else /* CONFIG GPIOLIB */ 660 - 661 - 662 - /* using gpiolib to provide up to 2 gpio_chips for on-chip gpios */ 663 - #ifndef CONFIG_ALCHEMY_GPIO_INDIRECT /* case (2) */ 664 - 665 - /* get everything through gpiolib */ 666 - #define gpio_to_irq __gpio_to_irq 667 - #define gpio_get_value __gpio_get_value 668 - #define gpio_set_value __gpio_set_value 669 - #define gpio_cansleep __gpio_cansleep 670 - #define irq_to_gpio alchemy_irq_to_gpio 671 - 672 - #include <asm-generic/gpio.h> 673 - 674 - #endif /* !CONFIG_ALCHEMY_GPIO_INDIRECT */ 675 - 668 + #endif /* CONFIG_ALCHEMY_GPIOINT_AU1000 */ 676 669 677 670 #endif /* !CONFIG_GPIOLIB */ 678 671
+76 -3
arch/mips/include/asm/mach-au1x00/gpio.h
··· 1 + /* 2 + * Alchemy GPIO support. 3 + * 4 + * With CONFIG_GPIOLIB=y different types of on-chip GPIO can be supported within 5 + * the same kernel image. 6 + * With CONFIG_GPIOLIB=n, your board must select ALCHEMY_GPIOINT_AU1XXX for the 7 + * appropriate CPU type (AU1000 currently). 8 + */ 9 + 1 10 #ifndef _ALCHEMY_GPIO_H_ 2 11 #define _ALCHEMY_GPIO_H_ 3 12 4 - #if defined(CONFIG_ALCHEMY_GPIOINT_AU1000) 5 - 13 + #include <asm/mach-au1x00/au1000.h> 6 14 #include <asm/mach-au1x00/gpio-au1000.h> 7 15 8 - #endif 16 + /* On Au1000, Au1500 and Au1100 GPIOs won't work as inputs before 17 + * SYS_PININPUTEN is written to at least once. On Au1550/Au1200/Au1300 this 18 + * register enables use of GPIOs as wake source. 19 + */ 20 + static inline void alchemy_gpio1_input_enable(void) 21 + { 22 + void __iomem *base = (void __iomem *)KSEG1ADDR(AU1000_SYS_PHYS_ADDR); 23 + __raw_writel(0, base + 0x110); /* the write op is key */ 24 + wmb(); 25 + } 26 + 27 + 28 + /* Linux gpio framework integration. 29 + * 30 + * 4 use cases of Alchemy GPIOS: 31 + *(1) GPIOLIB=y, ALCHEMY_GPIO_INDIRECT=y: 32 + * Board must register gpiochips. 33 + *(2) GPIOLIB=y, ALCHEMY_GPIO_INDIRECT=n: 34 + * A gpiochip for the 75 GPIOs is registered. 35 + * 36 + *(3) GPIOLIB=n, ALCHEMY_GPIO_INDIRECT=y: 37 + * the boards' gpio.h must provide the linux gpio wrapper functions, 38 + * 39 + *(4) GPIOLIB=n, ALCHEMY_GPIO_INDIRECT=n: 40 + * inlinable gpio functions are provided which enable access to the 41 + * Au1300 gpios only by using the numbers straight out of the data- 42 + * sheets. 43 + 44 + * Cases 1 and 3 are intended for boards which want to provide their own 45 + * GPIO namespace and -operations (i.e. for example you have 8 GPIOs 46 + * which are in part provided by spare Au1300 GPIO pins and in part by 47 + * an external FPGA but you still want them to be accssible in linux 48 + * as gpio0-7. The board can of course use the alchemy_gpioX_* functions 49 + * as required). 50 + */ 51 + 52 + #ifdef CONFIG_GPIOLIB 53 + 54 + /* wraps the cpu-dependent irq_to_gpio functions */ 55 + /* FIXME: gpiolib needs an irq_to_gpio hook */ 56 + static inline int __au_irq_to_gpio(unsigned int irq) 57 + { 58 + switch (alchemy_get_cputype()) { 59 + case ALCHEMY_CPU_AU1000...ALCHEMY_CPU_AU1200: 60 + return alchemy_irq_to_gpio(irq); 61 + } 62 + return -EINVAL; 63 + } 64 + 65 + 66 + /* using gpiolib to provide up to 2 gpio_chips for on-chip gpios */ 67 + #ifndef CONFIG_ALCHEMY_GPIO_INDIRECT /* case (2) */ 68 + 69 + /* get everything through gpiolib */ 70 + #define gpio_to_irq __gpio_to_irq 71 + #define gpio_get_value __gpio_get_value 72 + #define gpio_set_value __gpio_set_value 73 + #define gpio_cansleep __gpio_cansleep 74 + #define irq_to_gpio __au_irq_to_gpio 75 + 76 + #include <asm-generic/gpio.h> 77 + 78 + #endif /* !CONFIG_ALCHEMY_GPIO_INDIRECT */ 79 + 80 + 81 + #endif /* CONFIG_GPIOLIB */ 9 82 10 83 #endif /* _ALCHEMY_GPIO_H_ */
-2
arch/mips/include/asm/mach-db1x00/db1200.h
··· 46 46 47 47 #define IDE_PHYS_ADDR 0x18800000 48 48 #define IDE_REG_SHIFT 5 49 - #define IDE_DDMA_REQ DSCR_CMD0_DMA_REQ1 50 - #define IDE_RQSIZE 128 51 49 52 50 #define DB1200_IDE_PHYS_ADDR IDE_PHYS_ADDR 53 51 #define DB1200_IDE_PHYS_LEN (16 << IDE_REG_SHIFT)
+8 -8
arch/mips/include/asm/mach-db1x00/db1x00.h
··· 31 31 32 32 #ifdef CONFIG_MIPS_DB1550 33 33 34 - #define DBDMA_AC97_TX_CHAN DSCR_CMD0_PSC1_TX 35 - #define DBDMA_AC97_RX_CHAN DSCR_CMD0_PSC1_RX 36 - #define DBDMA_I2S_TX_CHAN DSCR_CMD0_PSC3_TX 37 - #define DBDMA_I2S_RX_CHAN DSCR_CMD0_PSC3_RX 34 + #define DBDMA_AC97_TX_CHAN AU1550_DSCR_CMD0_PSC1_TX 35 + #define DBDMA_AC97_RX_CHAN AU1550_DSCR_CMD0_PSC1_RX 36 + #define DBDMA_I2S_TX_CHAN AU1550_DSCR_CMD0_PSC3_TX 37 + #define DBDMA_I2S_RX_CHAN AU1550_DSCR_CMD0_PSC3_RX 38 38 39 - #define SPI_PSC_BASE PSC0_BASE_ADDR 40 - #define AC97_PSC_BASE PSC1_BASE_ADDR 41 - #define SMBUS_PSC_BASE PSC2_BASE_ADDR 42 - #define I2S_PSC_BASE PSC3_BASE_ADDR 39 + #define SPI_PSC_BASE AU1550_PSC0_PHYS_ADDR 40 + #define AC97_PSC_BASE AU1550_PSC1_PHYS_ADDR 41 + #define SMBUS_PSC_BASE AU1550_PSC2_PHYS_ADDR 42 + #define I2S_PSC_BASE AU1550_PSC3_PHYS_ADDR 43 43 44 44 #define NAND_PHYS_ADDR 0x20000000 45 45
+8 -10
arch/mips/include/asm/mach-pb1x00/pb1200.h
··· 28 28 #include <asm/mach-au1x00/au1000.h> 29 29 #include <asm/mach-au1x00/au1xxx_psc.h> 30 30 31 - #define DBDMA_AC97_TX_CHAN DSCR_CMD0_PSC1_TX 32 - #define DBDMA_AC97_RX_CHAN DSCR_CMD0_PSC1_RX 33 - #define DBDMA_I2S_TX_CHAN DSCR_CMD0_PSC1_TX 34 - #define DBDMA_I2S_RX_CHAN DSCR_CMD0_PSC1_RX 31 + #define DBDMA_AC97_TX_CHAN AU1200_DSCR_CMD0_PSC1_TX 32 + #define DBDMA_AC97_RX_CHAN AU1200_DSCR_CMD0_PSC1_RX 33 + #define DBDMA_I2S_TX_CHAN AU1200_DSCR_CMD0_PSC1_TX 34 + #define DBDMA_I2S_RX_CHAN AU1200_DSCR_CMD0_PSC1_RX 35 35 36 36 /* 37 37 * SPI and SMB are muxed on the Pb1200 board. 38 38 * Refer to board documentation. 39 39 */ 40 - #define SPI_PSC_BASE PSC0_BASE_ADDR 41 - #define SMBUS_PSC_BASE PSC0_BASE_ADDR 40 + #define SPI_PSC_BASE AU1550_PSC0_PHYS_ADDR 41 + #define SMBUS_PSC_BASE AU1550_PSC0_PHYS_ADDR 42 42 /* 43 43 * AC97 and I2S are muxed on the Pb1200 board. 44 44 * Refer to board documentation. 45 45 */ 46 - #define AC97_PSC_BASE PSC1_BASE_ADDR 47 - #define I2S_PSC_BASE PSC1_BASE_ADDR 46 + #define AC97_PSC_BASE AU1550_PSC1_PHYS_ADDR 47 + #define I2S_PSC_BASE AU1550_PSC1_PHYS_ADDR 48 48 49 49 50 50 #define BCSR_SYSTEM_VDDI 0x001F ··· 76 76 #define IDE_REG_SHIFT 5 77 77 #define IDE_PHYS_LEN (16 << IDE_REG_SHIFT) 78 78 #define IDE_INT PB1200_IDE_INT 79 - #define IDE_DDMA_REQ DSCR_CMD0_DMA_REQ1 80 - #define IDE_RQSIZE 128 81 79 82 80 #define NAND_PHYS_ADDR 0x1C000000 83 81
+8 -8
arch/mips/include/asm/mach-pb1x00/pb1550.h
··· 30 30 #include <linux/types.h> 31 31 #include <asm/mach-au1x00/au1xxx_psc.h> 32 32 33 - #define DBDMA_AC97_TX_CHAN DSCR_CMD0_PSC1_TX 34 - #define DBDMA_AC97_RX_CHAN DSCR_CMD0_PSC1_RX 35 - #define DBDMA_I2S_TX_CHAN DSCR_CMD0_PSC3_TX 36 - #define DBDMA_I2S_RX_CHAN DSCR_CMD0_PSC3_RX 33 + #define DBDMA_AC97_TX_CHAN AU1550_DSCR_CMD0_PSC1_TX 34 + #define DBDMA_AC97_RX_CHAN AU1550_DSCR_CMD0_PSC1_RX 35 + #define DBDMA_I2S_TX_CHAN AU1550_DSCR_CMD0_PSC3_TX 36 + #define DBDMA_I2S_RX_CHAN AU1550_DSCR_CMD0_PSC3_RX 37 37 38 - #define SPI_PSC_BASE PSC0_BASE_ADDR 39 - #define AC97_PSC_BASE PSC1_BASE_ADDR 40 - #define SMBUS_PSC_BASE PSC2_BASE_ADDR 41 - #define I2S_PSC_BASE PSC3_BASE_ADDR 38 + #define SPI_PSC_BASE AU1550_PSC0_PHYS_ADDR 39 + #define AC97_PSC_BASE AU1550_PSC1_PHYS_ADDR 40 + #define SMBUS_PSC_BASE AU1550_PSC2_PHYS_ADDR 41 + #define I2S_PSC_BASE AU1550_PSC3_PHYS_ADDR 42 42 43 43 /* 44 44 * Timing values as described in databook, * ns value stripped of
+3 -3
arch/mips/include/asm/mipsprom.h
··· 1 - #ifndef __ASM_MIPS_PROM_H 2 - #define __ASM_MIPS_PROM_H 1 + #ifndef __ASM_MIPSPROM_H 2 + #define __ASM_MIPSPROM_H 3 3 4 4 #define PROM_RESET 0 5 5 #define PROM_EXEC 1 ··· 73 73 74 74 extern char *prom_getenv(char *); 75 75 76 - #endif /* __ASM_MIPS_PROM_H */ 76 + #endif /* __ASM_MIPSPROM_H */
+8
arch/mips/include/asm/mipsregs.h
··· 1006 1006 #define write_c0_perfctrl0(val) __write_32bit_c0_register($25, 0, val) 1007 1007 #define read_c0_perfcntr0() __read_32bit_c0_register($25, 1) 1008 1008 #define write_c0_perfcntr0(val) __write_32bit_c0_register($25, 1, val) 1009 + #define read_c0_perfcntr0_64() __read_64bit_c0_register($25, 1) 1010 + #define write_c0_perfcntr0_64(val) __write_64bit_c0_register($25, 1, val) 1009 1011 #define read_c0_perfctrl1() __read_32bit_c0_register($25, 2) 1010 1012 #define write_c0_perfctrl1(val) __write_32bit_c0_register($25, 2, val) 1011 1013 #define read_c0_perfcntr1() __read_32bit_c0_register($25, 3) 1012 1014 #define write_c0_perfcntr1(val) __write_32bit_c0_register($25, 3, val) 1015 + #define read_c0_perfcntr1_64() __read_64bit_c0_register($25, 3) 1016 + #define write_c0_perfcntr1_64(val) __write_64bit_c0_register($25, 3, val) 1013 1017 #define read_c0_perfctrl2() __read_32bit_c0_register($25, 4) 1014 1018 #define write_c0_perfctrl2(val) __write_32bit_c0_register($25, 4, val) 1015 1019 #define read_c0_perfcntr2() __read_32bit_c0_register($25, 5) 1016 1020 #define write_c0_perfcntr2(val) __write_32bit_c0_register($25, 5, val) 1021 + #define read_c0_perfcntr2_64() __read_64bit_c0_register($25, 5) 1022 + #define write_c0_perfcntr2_64(val) __write_64bit_c0_register($25, 5, val) 1017 1023 #define read_c0_perfctrl3() __read_32bit_c0_register($25, 6) 1018 1024 #define write_c0_perfctrl3(val) __write_32bit_c0_register($25, 6, val) 1019 1025 #define read_c0_perfcntr3() __read_32bit_c0_register($25, 7) 1020 1026 #define write_c0_perfcntr3(val) __write_32bit_c0_register($25, 7, val) 1027 + #define read_c0_perfcntr3_64() __read_64bit_c0_register($25, 7) 1028 + #define write_c0_perfcntr3_64(val) __write_64bit_c0_register($25, 7, val) 1021 1029 1022 1030 /* RM9000 PerfCount performance counter register */ 1023 1031 #define read_c0_perfcount() __read_64bit_c0_register($25, 0)
+3 -3
arch/mips/include/asm/prom.h
··· 8 8 * published by the Free Software Foundation. 9 9 * 10 10 */ 11 - #ifndef __ASM_MIPS_PROM_H 12 - #define __ASM_MIPS_PROM_H 11 + #ifndef __ASM_PROM_H 12 + #define __ASM_PROM_H 13 13 14 14 #ifdef CONFIG_OF 15 15 #include <asm/bootinfo.h> ··· 25 25 static inline void device_tree_init(void) { } 26 26 #endif /* CONFIG_OF */ 27 27 28 - #endif /* _ASM_MIPS_PROM_H */ 28 + #endif /* __ASM_PROM_H */
+6
arch/mips/include/asm/regdef.h
··· 6 6 * Copyright (C) 1985 MIPS Computer Systems, Inc. 7 7 * Copyright (C) 1994, 95, 99, 2003 by Ralf Baechle 8 8 * Copyright (C) 1990 - 1992, 1999 Silicon Graphics, Inc. 9 + * Copyright (C) 2011 Wind River Systems, 10 + * written by Ralf Baechle <ralf@linux-mips.org> 9 11 */ 10 12 #ifndef _ASM_REGDEF_H 11 13 #define _ASM_REGDEF_H ··· 32 30 #define t2 $10 33 31 #define t3 $11 34 32 #define t4 $12 33 + #define ta0 $12 35 34 #define t5 $13 35 + #define ta1 $13 36 36 #define t6 $14 37 + #define ta2 $14 37 38 #define t7 $15 39 + #define ta3 $15 38 40 #define s0 $16 /* callee saved */ 39 41 #define s1 $17 40 42 #define s2 $18
+39 -103
arch/mips/jz4740/gpio.c
··· 17 17 #include <linux/module.h> 18 18 #include <linux/init.h> 19 19 20 - #include <linux/spinlock.h> 21 - #include <linux/syscore_ops.h> 22 20 #include <linux/io.h> 23 21 #include <linux/gpio.h> 24 22 #include <linux/delay.h> ··· 27 29 #include <linux/seq_file.h> 28 30 29 31 #include <asm/mach-jz4740/base.h> 32 + 33 + #include "irq.h" 30 34 31 35 #define JZ4740_GPIO_BASE_A (32*0) 32 36 #define JZ4740_GPIO_BASE_B (32*1) ··· 77 77 struct jz_gpio_chip { 78 78 unsigned int irq; 79 79 unsigned int irq_base; 80 - uint32_t wakeup; 81 - uint32_t suspend_mask; 82 80 uint32_t edge_trigger_both; 83 81 84 82 void __iomem *base; 85 - 86 - spinlock_t lock; 87 83 88 84 struct gpio_chip gpio_chip; 89 85 }; ··· 98 102 99 103 static inline struct jz_gpio_chip *irq_to_jz_gpio_chip(struct irq_data *data) 100 104 { 101 - return irq_data_get_irq_chip_data(data); 105 + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data); 106 + return gc->private; 102 107 } 103 108 104 109 static inline void jz_gpio_write_bit(unsigned int gpio, unsigned int reg) ··· 301 304 { 302 305 uint32_t flag; 303 306 unsigned int gpio_irq; 304 - unsigned int gpio_bank; 305 307 struct jz_gpio_chip *chip = irq_desc_get_handler_data(desc); 306 308 307 - gpio_bank = JZ4740_IRQ_GPIO0 - irq; 308 - 309 309 flag = readl(chip->base + JZ_REG_GPIO_FLAG); 310 - 311 310 if (!flag) 312 311 return; 313 312 314 - gpio_irq = __fls(flag); 313 + gpio_irq = chip->irq_base + __fls(flag); 315 314 316 - jz_gpio_check_trigger_both(chip, irq); 317 - 318 - gpio_irq += (gpio_bank << 5) + JZ4740_IRQ_GPIO(0); 315 + jz_gpio_check_trigger_both(chip, gpio_irq); 319 316 320 317 generic_handle_irq(gpio_irq); 321 318 }; ··· 320 329 writel(IRQ_TO_BIT(data->irq), chip->base + reg); 321 330 } 322 331 323 - static void jz_gpio_irq_mask(struct irq_data *data) 324 - { 325 - jz_gpio_set_irq_bit(data, JZ_REG_GPIO_MASK_SET); 326 - }; 327 - 328 332 static void jz_gpio_irq_unmask(struct irq_data *data) 329 333 { 330 334 struct jz_gpio_chip *chip = irq_to_jz_gpio_chip(data); 331 335 332 336 jz_gpio_check_trigger_both(chip, data->irq); 333 - 334 - jz_gpio_set_irq_bit(data, JZ_REG_GPIO_MASK_CLEAR); 337 + irq_gc_unmask_enable_reg(data); 335 338 }; 336 339 337 340 /* TODO: Check if function is gpio */ ··· 338 353 339 354 static void jz_gpio_irq_shutdown(struct irq_data *data) 340 355 { 341 - jz_gpio_irq_mask(data); 356 + irq_gc_mask_disable_reg(data); 342 357 343 358 /* Set direction to input */ 344 359 jz_gpio_set_irq_bit(data, JZ_REG_GPIO_DIRECTION_CLEAR); 345 360 jz_gpio_set_irq_bit(data, JZ_REG_GPIO_SELECT_CLEAR); 346 361 } 347 - 348 - static void jz_gpio_irq_ack(struct irq_data *data) 349 - { 350 - jz_gpio_set_irq_bit(data, JZ_REG_GPIO_FLAG_CLEAR); 351 - }; 352 362 353 363 static int jz_gpio_irq_set_type(struct irq_data *data, unsigned int flow_type) 354 364 { ··· 388 408 static int jz_gpio_irq_set_wake(struct irq_data *data, unsigned int on) 389 409 { 390 410 struct jz_gpio_chip *chip = irq_to_jz_gpio_chip(data); 391 - spin_lock(&chip->lock); 392 - if (on) 393 - chip->wakeup |= IRQ_TO_BIT(data->irq); 394 - else 395 - chip->wakeup &= ~IRQ_TO_BIT(data->irq); 396 - spin_unlock(&chip->lock); 397 411 412 + irq_gc_set_wake(data, on); 398 413 irq_set_irq_wake(chip->irq, on); 414 + 399 415 return 0; 400 416 } 401 - 402 - static struct irq_chip jz_gpio_irq_chip = { 403 - .name = "GPIO", 404 - .irq_mask = jz_gpio_irq_mask, 405 - .irq_unmask = jz_gpio_irq_unmask, 406 - .irq_ack = jz_gpio_irq_ack, 407 - .irq_startup = jz_gpio_irq_startup, 408 - .irq_shutdown = jz_gpio_irq_shutdown, 409 - .irq_set_type = jz_gpio_irq_set_type, 410 - .irq_set_wake = jz_gpio_irq_set_wake, 411 - .flags = IRQCHIP_SET_TYPE_MASKED, 412 - }; 413 - 414 - /* 415 - * This lock class tells lockdep that GPIO irqs are in a different 416 - * category than their parents, so it won't report false recursion. 417 - */ 418 - static struct lock_class_key gpio_lock_class; 419 417 420 418 #define JZ4740_GPIO_CHIP(_bank) { \ 421 419 .irq_base = JZ4740_IRQ_GPIO_BASE_ ## _bank, \ ··· 416 458 JZ4740_GPIO_CHIP(D), 417 459 }; 418 460 419 - static void jz4740_gpio_suspend_chip(struct jz_gpio_chip *chip) 420 - { 421 - chip->suspend_mask = readl(chip->base + JZ_REG_GPIO_MASK); 422 - writel(~(chip->wakeup), chip->base + JZ_REG_GPIO_MASK_SET); 423 - writel(chip->wakeup, chip->base + JZ_REG_GPIO_MASK_CLEAR); 424 - } 425 - 426 - static int jz4740_gpio_suspend(void) 427 - { 428 - int i; 429 - 430 - for (i = 0; i < ARRAY_SIZE(jz4740_gpio_chips); i++) 431 - jz4740_gpio_suspend_chip(&jz4740_gpio_chips[i]); 432 - 433 - return 0; 434 - } 435 - 436 - static void jz4740_gpio_resume_chip(struct jz_gpio_chip *chip) 437 - { 438 - uint32_t mask = chip->suspend_mask; 439 - 440 - writel(~mask, chip->base + JZ_REG_GPIO_MASK_CLEAR); 441 - writel(mask, chip->base + JZ_REG_GPIO_MASK_SET); 442 - } 443 - 444 - static void jz4740_gpio_resume(void) 445 - { 446 - int i; 447 - 448 - for (i = ARRAY_SIZE(jz4740_gpio_chips) - 1; i >= 0 ; i--) 449 - jz4740_gpio_resume_chip(&jz4740_gpio_chips[i]); 450 - } 451 - 452 - static struct syscore_ops jz4740_gpio_syscore_ops = { 453 - .suspend = jz4740_gpio_suspend, 454 - .resume = jz4740_gpio_resume, 455 - }; 456 - 457 461 static void jz4740_gpio_chip_init(struct jz_gpio_chip *chip, unsigned int id) 458 462 { 459 - int irq; 460 - 461 - spin_lock_init(&chip->lock); 463 + struct irq_chip_generic *gc; 464 + struct irq_chip_type *ct; 462 465 463 466 chip->base = ioremap(JZ4740_GPIO_BASE_ADDR + (id * 0x100), 0x100); 464 - 465 - gpiochip_add(&chip->gpio_chip); 466 467 467 468 chip->irq = JZ4740_IRQ_INTC_GPIO(id); 468 469 irq_set_handler_data(chip->irq, chip); 469 470 irq_set_chained_handler(chip->irq, jz_gpio_irq_demux_handler); 470 471 471 - for (irq = chip->irq_base; irq < chip->irq_base + chip->gpio_chip.ngpio; ++irq) { 472 - irq_set_lockdep_class(irq, &gpio_lock_class); 473 - irq_set_chip_data(irq, chip); 474 - irq_set_chip_and_handler(irq, &jz_gpio_irq_chip, 475 - handle_level_irq); 476 - } 472 + gc = irq_alloc_generic_chip(chip->gpio_chip.label, 1, chip->irq_base, 473 + chip->base, handle_level_irq); 474 + 475 + gc->wake_enabled = IRQ_MSK(chip->gpio_chip.ngpio); 476 + gc->private = chip; 477 + 478 + ct = gc->chip_types; 479 + ct->regs.enable = JZ_REG_GPIO_MASK_CLEAR; 480 + ct->regs.disable = JZ_REG_GPIO_MASK_SET; 481 + ct->regs.ack = JZ_REG_GPIO_FLAG_CLEAR; 482 + 483 + ct->chip.name = "GPIO"; 484 + ct->chip.irq_mask = irq_gc_mask_disable_reg; 485 + ct->chip.irq_unmask = jz_gpio_irq_unmask; 486 + ct->chip.irq_ack = irq_gc_ack_set_bit; 487 + ct->chip.irq_suspend = jz4740_irq_suspend; 488 + ct->chip.irq_resume = jz4740_irq_resume; 489 + ct->chip.irq_startup = jz_gpio_irq_startup; 490 + ct->chip.irq_shutdown = jz_gpio_irq_shutdown; 491 + ct->chip.irq_set_type = jz_gpio_irq_set_type; 492 + ct->chip.irq_set_wake = jz_gpio_irq_set_wake; 493 + ct->chip.flags = IRQCHIP_SET_TYPE_MASKED; 494 + 495 + irq_setup_generic_chip(gc, IRQ_MSK(chip->gpio_chip.ngpio), 496 + IRQ_GC_INIT_NESTED_LOCK, 0, IRQ_NOPROBE | IRQ_LEVEL); 497 + 498 + gpiochip_add(&chip->gpio_chip); 477 499 } 478 500 479 501 static int __init jz4740_gpio_init(void) ··· 462 524 463 525 for (i = 0; i < ARRAY_SIZE(jz4740_gpio_chips); ++i) 464 526 jz4740_gpio_chip_init(&jz4740_gpio_chips[i], i); 465 - 466 - register_syscore_ops(&jz4740_gpio_syscore_ops); 467 527 468 528 printk(KERN_INFO "JZ4740 GPIO initialized\n"); 469 529
+39 -55
arch/mips/jz4740/irq.c
··· 32 32 #include <asm/mach-jz4740/base.h> 33 33 34 34 static void __iomem *jz_intc_base; 35 - static uint32_t jz_intc_wakeup; 36 - static uint32_t jz_intc_saved; 37 35 38 36 #define JZ_REG_INTC_STATUS 0x00 39 37 #define JZ_REG_INTC_MASK 0x04 40 38 #define JZ_REG_INTC_SET_MASK 0x08 41 39 #define JZ_REG_INTC_CLEAR_MASK 0x0c 42 40 #define JZ_REG_INTC_PENDING 0x10 43 - 44 - #define IRQ_BIT(x) BIT((x) - JZ4740_IRQ_BASE) 45 - 46 - static inline unsigned long intc_irq_bit(struct irq_data *data) 47 - { 48 - return (unsigned long)irq_data_get_irq_chip_data(data); 49 - } 50 - 51 - static void intc_irq_unmask(struct irq_data *data) 52 - { 53 - writel(intc_irq_bit(data), jz_intc_base + JZ_REG_INTC_CLEAR_MASK); 54 - } 55 - 56 - static void intc_irq_mask(struct irq_data *data) 57 - { 58 - writel(intc_irq_bit(data), jz_intc_base + JZ_REG_INTC_SET_MASK); 59 - } 60 - 61 - static int intc_irq_set_wake(struct irq_data *data, unsigned int on) 62 - { 63 - if (on) 64 - jz_intc_wakeup |= intc_irq_bit(data); 65 - else 66 - jz_intc_wakeup &= ~intc_irq_bit(data); 67 - 68 - return 0; 69 - } 70 - 71 - static struct irq_chip intc_irq_type = { 72 - .name = "INTC", 73 - .irq_mask = intc_irq_mask, 74 - .irq_mask_ack = intc_irq_mask, 75 - .irq_unmask = intc_irq_unmask, 76 - .irq_set_wake = intc_irq_set_wake, 77 - }; 78 41 79 42 static irqreturn_t jz4740_cascade(int irq, void *data) 80 43 { ··· 51 88 return IRQ_HANDLED; 52 89 } 53 90 91 + static void jz4740_irq_set_mask(struct irq_chip_generic *gc, uint32_t mask) 92 + { 93 + struct irq_chip_regs *regs = &gc->chip_types->regs; 94 + 95 + writel(mask, gc->reg_base + regs->enable); 96 + writel(~mask, gc->reg_base + regs->disable); 97 + } 98 + 99 + void jz4740_irq_suspend(struct irq_data *data) 100 + { 101 + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data); 102 + jz4740_irq_set_mask(gc, gc->wake_active); 103 + } 104 + 105 + void jz4740_irq_resume(struct irq_data *data) 106 + { 107 + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data); 108 + jz4740_irq_set_mask(gc, gc->mask_cache); 109 + } 110 + 54 111 static struct irqaction jz4740_cascade_action = { 55 112 .handler = jz4740_cascade, 56 113 .name = "JZ4740 cascade interrupt", ··· 78 95 79 96 void __init arch_init_irq(void) 80 97 { 81 - int i; 98 + struct irq_chip_generic *gc; 99 + struct irq_chip_type *ct; 100 + 82 101 mips_cpu_irq_init(); 83 102 84 103 jz_intc_base = ioremap(JZ4740_INTC_BASE_ADDR, 0x14); ··· 88 103 /* Mask all irqs */ 89 104 writel(0xffffffff, jz_intc_base + JZ_REG_INTC_SET_MASK); 90 105 91 - for (i = JZ4740_IRQ_BASE; i < JZ4740_IRQ_BASE + 32; i++) { 92 - irq_set_chip_data(i, (void *)IRQ_BIT(i)); 93 - irq_set_chip_and_handler(i, &intc_irq_type, handle_level_irq); 94 - } 106 + gc = irq_alloc_generic_chip("INTC", 1, JZ4740_IRQ_BASE, jz_intc_base, 107 + handle_level_irq); 108 + 109 + gc->wake_enabled = IRQ_MSK(32); 110 + 111 + ct = gc->chip_types; 112 + ct->regs.enable = JZ_REG_INTC_CLEAR_MASK; 113 + ct->regs.disable = JZ_REG_INTC_SET_MASK; 114 + ct->chip.irq_unmask = irq_gc_unmask_enable_reg; 115 + ct->chip.irq_mask = irq_gc_mask_disable_reg; 116 + ct->chip.irq_mask_ack = irq_gc_mask_disable_reg; 117 + ct->chip.irq_set_wake = irq_gc_set_wake; 118 + ct->chip.irq_suspend = jz4740_irq_suspend; 119 + ct->chip.irq_resume = jz4740_irq_resume; 120 + 121 + irq_setup_generic_chip(gc, IRQ_MSK(32), 0, 0, IRQ_NOPROBE | IRQ_LEVEL); 95 122 96 123 setup_irq(2, &jz4740_cascade_action); 97 124 } ··· 117 120 do_IRQ(3); 118 121 else 119 122 spurious_interrupt(); 120 - } 121 - 122 - void jz4740_intc_suspend(void) 123 - { 124 - jz_intc_saved = readl(jz_intc_base + JZ_REG_INTC_MASK); 125 - writel(~jz_intc_wakeup, jz_intc_base + JZ_REG_INTC_SET_MASK); 126 - writel(jz_intc_wakeup, jz_intc_base + JZ_REG_INTC_CLEAR_MASK); 127 - } 128 - 129 - void jz4740_intc_resume(void) 130 - { 131 - writel(~jz_intc_saved, jz_intc_base + JZ_REG_INTC_CLEAR_MASK); 132 - writel(jz_intc_saved, jz_intc_base + JZ_REG_INTC_SET_MASK); 133 123 } 134 124 135 125 #ifdef CONFIG_DEBUG_FS
+4 -2
arch/mips/jz4740/irq.h
··· 15 15 #ifndef __MIPS_JZ4740_IRQ_H__ 16 16 #define __MIPS_JZ4740_IRQ_H__ 17 17 18 - extern void jz4740_intc_suspend(void); 19 - extern void jz4740_intc_resume(void); 18 + #include <linux/irq.h> 19 + 20 + extern void jz4740_irq_suspend(struct irq_data *data); 21 + extern void jz4740_irq_resume(struct irq_data *data); 20 22 21 23 #endif
-3
arch/mips/jz4740/pm.c
··· 21 21 #include <asm/mach-jz4740/clock.h> 22 22 23 23 #include "clock.h" 24 - #include "irq.h" 25 24 26 25 static int jz4740_pm_enter(suspend_state_t state) 27 26 { 28 - jz4740_intc_suspend(); 29 27 jz4740_clock_suspend(); 30 28 31 29 jz4740_clock_set_wait_mode(JZ4740_WAIT_MODE_SLEEP); ··· 35 37 jz4740_clock_set_wait_mode(JZ4740_WAIT_MODE_IDLE); 36 38 37 39 jz4740_clock_resume(); 38 - jz4740_intc_resume(); 39 40 40 41 return 0; 41 42 }
+4 -1
arch/mips/kernel/Makefile
··· 11 11 ifdef CONFIG_FUNCTION_TRACER 12 12 CFLAGS_REMOVE_ftrace.o = -pg 13 13 CFLAGS_REMOVE_early_printk.o = -pg 14 + CFLAGS_REMOVE_perf_event.o = -pg 15 + CFLAGS_REMOVE_perf_event_mipsxx.o = -pg 14 16 endif 15 17 16 18 obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o ··· 108 106 109 107 obj-$(CONFIG_MIPS_CPUFREQ) += cpufreq/ 110 108 111 - obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o 109 + obj-$(CONFIG_PERF_EVENTS) += perf_event.o 110 + obj-$(CONFIG_HW_PERF_EVENTS) += perf_event_mipsxx.o 112 111 113 112 obj-$(CONFIG_JUMP_LABEL) += jump_label.o 114 113
+3
arch/mips/kernel/cpu-probe.c
··· 978 978 platform: 979 979 set_elf_platform(cpu, "octeon"); 980 980 break; 981 + case PRID_IMP_CAVIUM_CN61XX: 981 982 case PRID_IMP_CAVIUM_CN63XX: 983 + case PRID_IMP_CAVIUM_CN66XX: 984 + case PRID_IMP_CAVIUM_CN68XX: 982 985 c->cputype = CPU_CAVIUM_OCTEON2; 983 986 __cpu_name[cpu] = "Cavium Octeon II"; 984 987 set_elf_platform(cpu, "octeon2");
+1 -518
arch/mips/kernel/perf_event.c
··· 14 14 * published by the Free Software Foundation. 15 15 */ 16 16 17 - #include <linux/cpumask.h> 18 - #include <linux/interrupt.h> 19 - #include <linux/smp.h> 20 - #include <linux/kernel.h> 21 17 #include <linux/perf_event.h> 22 - #include <linux/uaccess.h> 23 18 24 - #include <asm/irq.h> 25 - #include <asm/irq_regs.h> 26 19 #include <asm/stacktrace.h> 27 - #include <asm/time.h> /* For perf_irq */ 28 - 29 - /* These are for 32bit counters. For 64bit ones, define them accordingly. */ 30 - #define MAX_PERIOD ((1ULL << 32) - 1) 31 - #define VALID_COUNT 0x7fffffff 32 - #define TOTAL_BITS 32 33 - #define HIGHEST_BIT 31 34 - 35 - #define MIPS_MAX_HWEVENTS 4 36 - 37 - struct cpu_hw_events { 38 - /* Array of events on this cpu. */ 39 - struct perf_event *events[MIPS_MAX_HWEVENTS]; 40 - 41 - /* 42 - * Set the bit (indexed by the counter number) when the counter 43 - * is used for an event. 44 - */ 45 - unsigned long used_mask[BITS_TO_LONGS(MIPS_MAX_HWEVENTS)]; 46 - 47 - /* 48 - * The borrowed MSB for the performance counter. A MIPS performance 49 - * counter uses its bit 31 (for 32bit counters) or bit 63 (for 64bit 50 - * counters) as a factor of determining whether a counter overflow 51 - * should be signaled. So here we use a separate MSB for each 52 - * counter to make things easy. 53 - */ 54 - unsigned long msbs[BITS_TO_LONGS(MIPS_MAX_HWEVENTS)]; 55 - 56 - /* 57 - * Software copy of the control register for each performance counter. 58 - * MIPS CPUs vary in performance counters. They use this differently, 59 - * and even may not use it. 60 - */ 61 - unsigned int saved_ctrl[MIPS_MAX_HWEVENTS]; 62 - }; 63 - DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { 64 - .saved_ctrl = {0}, 65 - }; 66 - 67 - /* The description of MIPS performance events. */ 68 - struct mips_perf_event { 69 - unsigned int event_id; 70 - /* 71 - * MIPS performance counters are indexed starting from 0. 72 - * CNTR_EVEN indicates the indexes of the counters to be used are 73 - * even numbers. 74 - */ 75 - unsigned int cntr_mask; 76 - #define CNTR_EVEN 0x55555555 77 - #define CNTR_ODD 0xaaaaaaaa 78 - #ifdef CONFIG_MIPS_MT_SMP 79 - enum { 80 - T = 0, 81 - V = 1, 82 - P = 2, 83 - } range; 84 - #else 85 - #define T 86 - #define V 87 - #define P 88 - #endif 89 - }; 90 - 91 - static struct mips_perf_event raw_event; 92 - static DEFINE_MUTEX(raw_event_mutex); 93 - 94 - #define UNSUPPORTED_PERF_EVENT_ID 0xffffffff 95 - #define C(x) PERF_COUNT_HW_CACHE_##x 96 - 97 - struct mips_pmu { 98 - const char *name; 99 - int irq; 100 - irqreturn_t (*handle_irq)(int irq, void *dev); 101 - int (*handle_shared_irq)(void); 102 - void (*start)(void); 103 - void (*stop)(void); 104 - int (*alloc_counter)(struct cpu_hw_events *cpuc, 105 - struct hw_perf_event *hwc); 106 - u64 (*read_counter)(unsigned int idx); 107 - void (*write_counter)(unsigned int idx, u64 val); 108 - void (*enable_event)(struct hw_perf_event *evt, int idx); 109 - void (*disable_event)(int idx); 110 - const struct mips_perf_event *(*map_raw_event)(u64 config); 111 - const struct mips_perf_event (*general_event_map)[PERF_COUNT_HW_MAX]; 112 - const struct mips_perf_event (*cache_event_map) 113 - [PERF_COUNT_HW_CACHE_MAX] 114 - [PERF_COUNT_HW_CACHE_OP_MAX] 115 - [PERF_COUNT_HW_CACHE_RESULT_MAX]; 116 - unsigned int num_counters; 117 - }; 118 - 119 - static const struct mips_pmu *mipspmu; 120 - 121 - static int 122 - mipspmu_event_set_period(struct perf_event *event, 123 - struct hw_perf_event *hwc, 124 - int idx) 125 - { 126 - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 127 - s64 left = local64_read(&hwc->period_left); 128 - s64 period = hwc->sample_period; 129 - int ret = 0; 130 - u64 uleft; 131 - unsigned long flags; 132 - 133 - if (unlikely(left <= -period)) { 134 - left = period; 135 - local64_set(&hwc->period_left, left); 136 - hwc->last_period = period; 137 - ret = 1; 138 - } 139 - 140 - if (unlikely(left <= 0)) { 141 - left += period; 142 - local64_set(&hwc->period_left, left); 143 - hwc->last_period = period; 144 - ret = 1; 145 - } 146 - 147 - if (left > (s64)MAX_PERIOD) 148 - left = MAX_PERIOD; 149 - 150 - local64_set(&hwc->prev_count, (u64)-left); 151 - 152 - local_irq_save(flags); 153 - uleft = (u64)(-left) & MAX_PERIOD; 154 - uleft > VALID_COUNT ? 155 - set_bit(idx, cpuc->msbs) : clear_bit(idx, cpuc->msbs); 156 - mipspmu->write_counter(idx, (u64)(-left) & VALID_COUNT); 157 - local_irq_restore(flags); 158 - 159 - perf_event_update_userpage(event); 160 - 161 - return ret; 162 - } 163 - 164 - static void mipspmu_event_update(struct perf_event *event, 165 - struct hw_perf_event *hwc, 166 - int idx) 167 - { 168 - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 169 - unsigned long flags; 170 - int shift = 64 - TOTAL_BITS; 171 - s64 prev_raw_count, new_raw_count; 172 - u64 delta; 173 - 174 - again: 175 - prev_raw_count = local64_read(&hwc->prev_count); 176 - local_irq_save(flags); 177 - /* Make the counter value be a "real" one. */ 178 - new_raw_count = mipspmu->read_counter(idx); 179 - if (new_raw_count & (test_bit(idx, cpuc->msbs) << HIGHEST_BIT)) { 180 - new_raw_count &= VALID_COUNT; 181 - clear_bit(idx, cpuc->msbs); 182 - } else 183 - new_raw_count |= (test_bit(idx, cpuc->msbs) << HIGHEST_BIT); 184 - local_irq_restore(flags); 185 - 186 - if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, 187 - new_raw_count) != prev_raw_count) 188 - goto again; 189 - 190 - delta = (new_raw_count << shift) - (prev_raw_count << shift); 191 - delta >>= shift; 192 - 193 - local64_add(delta, &event->count); 194 - local64_sub(delta, &hwc->period_left); 195 - } 196 - 197 - static void mipspmu_start(struct perf_event *event, int flags) 198 - { 199 - struct hw_perf_event *hwc = &event->hw; 200 - 201 - if (!mipspmu) 202 - return; 203 - 204 - if (flags & PERF_EF_RELOAD) 205 - WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); 206 - 207 - hwc->state = 0; 208 - 209 - /* Set the period for the event. */ 210 - mipspmu_event_set_period(event, hwc, hwc->idx); 211 - 212 - /* Enable the event. */ 213 - mipspmu->enable_event(hwc, hwc->idx); 214 - } 215 - 216 - static void mipspmu_stop(struct perf_event *event, int flags) 217 - { 218 - struct hw_perf_event *hwc = &event->hw; 219 - 220 - if (!mipspmu) 221 - return; 222 - 223 - if (!(hwc->state & PERF_HES_STOPPED)) { 224 - /* We are working on a local event. */ 225 - mipspmu->disable_event(hwc->idx); 226 - barrier(); 227 - mipspmu_event_update(event, hwc, hwc->idx); 228 - hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; 229 - } 230 - } 231 - 232 - static int mipspmu_add(struct perf_event *event, int flags) 233 - { 234 - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 235 - struct hw_perf_event *hwc = &event->hw; 236 - int idx; 237 - int err = 0; 238 - 239 - perf_pmu_disable(event->pmu); 240 - 241 - /* To look for a free counter for this event. */ 242 - idx = mipspmu->alloc_counter(cpuc, hwc); 243 - if (idx < 0) { 244 - err = idx; 245 - goto out; 246 - } 247 - 248 - /* 249 - * If there is an event in the counter we are going to use then 250 - * make sure it is disabled. 251 - */ 252 - event->hw.idx = idx; 253 - mipspmu->disable_event(idx); 254 - cpuc->events[idx] = event; 255 - 256 - hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; 257 - if (flags & PERF_EF_START) 258 - mipspmu_start(event, PERF_EF_RELOAD); 259 - 260 - /* Propagate our changes to the userspace mapping. */ 261 - perf_event_update_userpage(event); 262 - 263 - out: 264 - perf_pmu_enable(event->pmu); 265 - return err; 266 - } 267 - 268 - static void mipspmu_del(struct perf_event *event, int flags) 269 - { 270 - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 271 - struct hw_perf_event *hwc = &event->hw; 272 - int idx = hwc->idx; 273 - 274 - WARN_ON(idx < 0 || idx >= mipspmu->num_counters); 275 - 276 - mipspmu_stop(event, PERF_EF_UPDATE); 277 - cpuc->events[idx] = NULL; 278 - clear_bit(idx, cpuc->used_mask); 279 - 280 - perf_event_update_userpage(event); 281 - } 282 - 283 - static void mipspmu_read(struct perf_event *event) 284 - { 285 - struct hw_perf_event *hwc = &event->hw; 286 - 287 - /* Don't read disabled counters! */ 288 - if (hwc->idx < 0) 289 - return; 290 - 291 - mipspmu_event_update(event, hwc, hwc->idx); 292 - } 293 - 294 - static void mipspmu_enable(struct pmu *pmu) 295 - { 296 - if (mipspmu) 297 - mipspmu->start(); 298 - } 299 - 300 - static void mipspmu_disable(struct pmu *pmu) 301 - { 302 - if (mipspmu) 303 - mipspmu->stop(); 304 - } 305 - 306 - static atomic_t active_events = ATOMIC_INIT(0); 307 - static DEFINE_MUTEX(pmu_reserve_mutex); 308 - static int (*save_perf_irq)(void); 309 - 310 - static int mipspmu_get_irq(void) 311 - { 312 - int err; 313 - 314 - if (mipspmu->irq >= 0) { 315 - /* Request my own irq handler. */ 316 - err = request_irq(mipspmu->irq, mipspmu->handle_irq, 317 - IRQF_DISABLED | IRQF_NOBALANCING, 318 - "mips_perf_pmu", NULL); 319 - if (err) { 320 - pr_warning("Unable to request IRQ%d for MIPS " 321 - "performance counters!\n", mipspmu->irq); 322 - } 323 - } else if (cp0_perfcount_irq < 0) { 324 - /* 325 - * We are sharing the irq number with the timer interrupt. 326 - */ 327 - save_perf_irq = perf_irq; 328 - perf_irq = mipspmu->handle_shared_irq; 329 - err = 0; 330 - } else { 331 - pr_warning("The platform hasn't properly defined its " 332 - "interrupt controller.\n"); 333 - err = -ENOENT; 334 - } 335 - 336 - return err; 337 - } 338 - 339 - static void mipspmu_free_irq(void) 340 - { 341 - if (mipspmu->irq >= 0) 342 - free_irq(mipspmu->irq, NULL); 343 - else if (cp0_perfcount_irq < 0) 344 - perf_irq = save_perf_irq; 345 - } 346 - 347 - /* 348 - * mipsxx/rm9000/loongson2 have different performance counters, they have 349 - * specific low-level init routines. 350 - */ 351 - static void reset_counters(void *arg); 352 - static int __hw_perf_event_init(struct perf_event *event); 353 - 354 - static void hw_perf_event_destroy(struct perf_event *event) 355 - { 356 - if (atomic_dec_and_mutex_lock(&active_events, 357 - &pmu_reserve_mutex)) { 358 - /* 359 - * We must not call the destroy function with interrupts 360 - * disabled. 361 - */ 362 - on_each_cpu(reset_counters, 363 - (void *)(long)mipspmu->num_counters, 1); 364 - mipspmu_free_irq(); 365 - mutex_unlock(&pmu_reserve_mutex); 366 - } 367 - } 368 - 369 - static int mipspmu_event_init(struct perf_event *event) 370 - { 371 - int err = 0; 372 - 373 - switch (event->attr.type) { 374 - case PERF_TYPE_RAW: 375 - case PERF_TYPE_HARDWARE: 376 - case PERF_TYPE_HW_CACHE: 377 - break; 378 - 379 - default: 380 - return -ENOENT; 381 - } 382 - 383 - if (!mipspmu || event->cpu >= nr_cpumask_bits || 384 - (event->cpu >= 0 && !cpu_online(event->cpu))) 385 - return -ENODEV; 386 - 387 - if (!atomic_inc_not_zero(&active_events)) { 388 - if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) { 389 - atomic_dec(&active_events); 390 - return -ENOSPC; 391 - } 392 - 393 - mutex_lock(&pmu_reserve_mutex); 394 - if (atomic_read(&active_events) == 0) 395 - err = mipspmu_get_irq(); 396 - 397 - if (!err) 398 - atomic_inc(&active_events); 399 - mutex_unlock(&pmu_reserve_mutex); 400 - } 401 - 402 - if (err) 403 - return err; 404 - 405 - err = __hw_perf_event_init(event); 406 - if (err) 407 - hw_perf_event_destroy(event); 408 - 409 - return err; 410 - } 411 - 412 - static struct pmu pmu = { 413 - .pmu_enable = mipspmu_enable, 414 - .pmu_disable = mipspmu_disable, 415 - .event_init = mipspmu_event_init, 416 - .add = mipspmu_add, 417 - .del = mipspmu_del, 418 - .start = mipspmu_start, 419 - .stop = mipspmu_stop, 420 - .read = mipspmu_read, 421 - }; 422 - 423 - static inline unsigned int 424 - mipspmu_perf_event_encode(const struct mips_perf_event *pev) 425 - { 426 - /* 427 - * Top 8 bits for range, next 16 bits for cntr_mask, lowest 8 bits for 428 - * event_id. 429 - */ 430 - #ifdef CONFIG_MIPS_MT_SMP 431 - return ((unsigned int)pev->range << 24) | 432 - (pev->cntr_mask & 0xffff00) | 433 - (pev->event_id & 0xff); 434 - #else 435 - return (pev->cntr_mask & 0xffff00) | 436 - (pev->event_id & 0xff); 437 - #endif 438 - } 439 - 440 - static const struct mips_perf_event * 441 - mipspmu_map_general_event(int idx) 442 - { 443 - const struct mips_perf_event *pev; 444 - 445 - pev = ((*mipspmu->general_event_map)[idx].event_id == 446 - UNSUPPORTED_PERF_EVENT_ID ? ERR_PTR(-EOPNOTSUPP) : 447 - &(*mipspmu->general_event_map)[idx]); 448 - 449 - return pev; 450 - } 451 - 452 - static const struct mips_perf_event * 453 - mipspmu_map_cache_event(u64 config) 454 - { 455 - unsigned int cache_type, cache_op, cache_result; 456 - const struct mips_perf_event *pev; 457 - 458 - cache_type = (config >> 0) & 0xff; 459 - if (cache_type >= PERF_COUNT_HW_CACHE_MAX) 460 - return ERR_PTR(-EINVAL); 461 - 462 - cache_op = (config >> 8) & 0xff; 463 - if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) 464 - return ERR_PTR(-EINVAL); 465 - 466 - cache_result = (config >> 16) & 0xff; 467 - if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) 468 - return ERR_PTR(-EINVAL); 469 - 470 - pev = &((*mipspmu->cache_event_map) 471 - [cache_type] 472 - [cache_op] 473 - [cache_result]); 474 - 475 - if (pev->event_id == UNSUPPORTED_PERF_EVENT_ID) 476 - return ERR_PTR(-EOPNOTSUPP); 477 - 478 - return pev; 479 - 480 - } 481 - 482 - static int validate_event(struct cpu_hw_events *cpuc, 483 - struct perf_event *event) 484 - { 485 - struct hw_perf_event fake_hwc = event->hw; 486 - 487 - /* Allow mixed event group. So return 1 to pass validation. */ 488 - if (event->pmu != &pmu || event->state <= PERF_EVENT_STATE_OFF) 489 - return 1; 490 - 491 - return mipspmu->alloc_counter(cpuc, &fake_hwc) >= 0; 492 - } 493 - 494 - static int validate_group(struct perf_event *event) 495 - { 496 - struct perf_event *sibling, *leader = event->group_leader; 497 - struct cpu_hw_events fake_cpuc; 498 - 499 - memset(&fake_cpuc, 0, sizeof(fake_cpuc)); 500 - 501 - if (!validate_event(&fake_cpuc, leader)) 502 - return -ENOSPC; 503 - 504 - list_for_each_entry(sibling, &leader->sibling_list, group_entry) { 505 - if (!validate_event(&fake_cpuc, sibling)) 506 - return -ENOSPC; 507 - } 508 - 509 - if (!validate_event(&fake_cpuc, event)) 510 - return -ENOSPC; 511 - 512 - return 0; 513 - } 514 - 515 - /* This is needed by specific irq handlers in perf_event_*.c */ 516 - static void 517 - handle_associated_event(struct cpu_hw_events *cpuc, 518 - int idx, struct perf_sample_data *data, struct pt_regs *regs) 519 - { 520 - struct perf_event *event = cpuc->events[idx]; 521 - struct hw_perf_event *hwc = &event->hw; 522 - 523 - mipspmu_event_update(event, hwc, idx); 524 - data->period = event->hw.last_period; 525 - if (!mipspmu_event_set_period(event, hwc, idx)) 526 - return; 527 - 528 - if (perf_event_overflow(event, data, regs)) 529 - mipspmu->disable_event(idx); 530 - } 531 - 532 - #include "perf_event_mipsxx.c" 533 20 534 21 /* Callchain handling code. */ 535 22 536 23 /* 537 24 * Leave userspace callchain empty for now. When we find a way to trace 538 - * the user stack callchains, we add here. 25 + * the user stack callchains, we will add it here. 539 26 */ 540 - void perf_callchain_user(struct perf_callchain_entry *entry, 541 - struct pt_regs *regs) 542 - { 543 - } 544 27 545 28 static void save_raw_perf_callchain(struct perf_callchain_entry *entry, 546 29 unsigned long reg29)
+924 -351
arch/mips/kernel/perf_event_mipsxx.c
··· 1 - #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) || \ 2 - defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_SB1) 1 + /* 2 + * Linux performance counter support for MIPS. 3 + * 4 + * Copyright (C) 2010 MIPS Technologies, Inc. 5 + * Copyright (C) 2011 Cavium Networks, Inc. 6 + * Author: Deng-Cheng Zhu 7 + * 8 + * This code is based on the implementation for ARM, which is in turn 9 + * based on the sparc64 perf event code and the x86 code. Performance 10 + * counter access is based on the MIPS Oprofile code. And the callchain 11 + * support references the code of MIPS stacktrace.c. 12 + * 13 + * This program is free software; you can redistribute it and/or modify 14 + * it under the terms of the GNU General Public License version 2 as 15 + * published by the Free Software Foundation. 16 + */ 17 + 18 + #include <linux/cpumask.h> 19 + #include <linux/interrupt.h> 20 + #include <linux/smp.h> 21 + #include <linux/kernel.h> 22 + #include <linux/perf_event.h> 23 + #include <linux/uaccess.h> 24 + 25 + #include <asm/irq.h> 26 + #include <asm/irq_regs.h> 27 + #include <asm/stacktrace.h> 28 + #include <asm/time.h> /* For perf_irq */ 29 + 30 + #define MIPS_MAX_HWEVENTS 4 31 + 32 + struct cpu_hw_events { 33 + /* Array of events on this cpu. */ 34 + struct perf_event *events[MIPS_MAX_HWEVENTS]; 35 + 36 + /* 37 + * Set the bit (indexed by the counter number) when the counter 38 + * is used for an event. 39 + */ 40 + unsigned long used_mask[BITS_TO_LONGS(MIPS_MAX_HWEVENTS)]; 41 + 42 + /* 43 + * Software copy of the control register for each performance counter. 44 + * MIPS CPUs vary in performance counters. They use this differently, 45 + * and even may not use it. 46 + */ 47 + unsigned int saved_ctrl[MIPS_MAX_HWEVENTS]; 48 + }; 49 + DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { 50 + .saved_ctrl = {0}, 51 + }; 52 + 53 + /* The description of MIPS performance events. */ 54 + struct mips_perf_event { 55 + unsigned int event_id; 56 + /* 57 + * MIPS performance counters are indexed starting from 0. 58 + * CNTR_EVEN indicates the indexes of the counters to be used are 59 + * even numbers. 60 + */ 61 + unsigned int cntr_mask; 62 + #define CNTR_EVEN 0x55555555 63 + #define CNTR_ODD 0xaaaaaaaa 64 + #define CNTR_ALL 0xffffffff 65 + #ifdef CONFIG_MIPS_MT_SMP 66 + enum { 67 + T = 0, 68 + V = 1, 69 + P = 2, 70 + } range; 71 + #else 72 + #define T 73 + #define V 74 + #define P 75 + #endif 76 + }; 77 + 78 + static struct mips_perf_event raw_event; 79 + static DEFINE_MUTEX(raw_event_mutex); 80 + 81 + #define UNSUPPORTED_PERF_EVENT_ID 0xffffffff 82 + #define C(x) PERF_COUNT_HW_CACHE_##x 83 + 84 + struct mips_pmu { 85 + u64 max_period; 86 + u64 valid_count; 87 + u64 overflow; 88 + const char *name; 89 + int irq; 90 + u64 (*read_counter)(unsigned int idx); 91 + void (*write_counter)(unsigned int idx, u64 val); 92 + const struct mips_perf_event *(*map_raw_event)(u64 config); 93 + const struct mips_perf_event (*general_event_map)[PERF_COUNT_HW_MAX]; 94 + const struct mips_perf_event (*cache_event_map) 95 + [PERF_COUNT_HW_CACHE_MAX] 96 + [PERF_COUNT_HW_CACHE_OP_MAX] 97 + [PERF_COUNT_HW_CACHE_RESULT_MAX]; 98 + unsigned int num_counters; 99 + }; 100 + 101 + static struct mips_pmu mipspmu; 3 102 4 103 #define M_CONFIG1_PC (1 << 4) 5 104 6 - #define M_PERFCTL_EXL (1UL << 0) 7 - #define M_PERFCTL_KERNEL (1UL << 1) 8 - #define M_PERFCTL_SUPERVISOR (1UL << 2) 9 - #define M_PERFCTL_USER (1UL << 3) 10 - #define M_PERFCTL_INTERRUPT_ENABLE (1UL << 4) 105 + #define M_PERFCTL_EXL (1 << 0) 106 + #define M_PERFCTL_KERNEL (1 << 1) 107 + #define M_PERFCTL_SUPERVISOR (1 << 2) 108 + #define M_PERFCTL_USER (1 << 3) 109 + #define M_PERFCTL_INTERRUPT_ENABLE (1 << 4) 11 110 #define M_PERFCTL_EVENT(event) (((event) & 0x3ff) << 5) 12 111 #define M_PERFCTL_VPEID(vpe) ((vpe) << 16) 13 112 #define M_PERFCTL_MT_EN(filter) ((filter) << 20) ··· 114 15 #define M_TC_EN_VPE M_PERFCTL_MT_EN(1) 115 16 #define M_TC_EN_TC M_PERFCTL_MT_EN(2) 116 17 #define M_PERFCTL_TCID(tcid) ((tcid) << 22) 117 - #define M_PERFCTL_WIDE (1UL << 30) 118 - #define M_PERFCTL_MORE (1UL << 31) 18 + #define M_PERFCTL_WIDE (1 << 30) 19 + #define M_PERFCTL_MORE (1 << 31) 119 20 120 21 #define M_PERFCTL_COUNT_EVENT_WHENEVER (M_PERFCTL_EXL | \ 121 22 M_PERFCTL_KERNEL | \ ··· 130 31 #endif 131 32 #define M_PERFCTL_EVENT_MASK 0xfe0 132 33 133 - #define M_COUNTER_OVERFLOW (1UL << 31) 134 34 135 35 #ifdef CONFIG_MIPS_MT_SMP 136 36 static int cpu_has_mipsmt_pertccounters; 37 + 38 + static DEFINE_RWLOCK(pmuint_rwlock); 137 39 138 40 /* 139 41 * FIXME: For VSMP, vpe_id() is redefined for Perf-events, because ··· 149 49 #endif 150 50 151 51 /* Copied from op_model_mipsxx.c */ 152 - static inline unsigned int vpe_shift(void) 52 + static unsigned int vpe_shift(void) 153 53 { 154 54 if (num_possible_cpus() > 1) 155 55 return 1; 156 56 157 57 return 0; 158 58 } 159 - #else /* !CONFIG_MIPS_MT_SMP */ 160 - #define vpe_id() 0 161 59 162 - static inline unsigned int vpe_shift(void) 163 - { 164 - return 0; 165 - } 166 - #endif /* CONFIG_MIPS_MT_SMP */ 167 - 168 - static inline unsigned int 169 - counters_total_to_per_cpu(unsigned int counters) 60 + static unsigned int counters_total_to_per_cpu(unsigned int counters) 170 61 { 171 62 return counters >> vpe_shift(); 172 63 } 173 64 174 - static inline unsigned int 175 - counters_per_cpu_to_total(unsigned int counters) 65 + static unsigned int counters_per_cpu_to_total(unsigned int counters) 176 66 { 177 67 return counters << vpe_shift(); 178 68 } 179 69 180 - #define __define_perf_accessors(r, n, np) \ 181 - \ 182 - static inline unsigned int r_c0_ ## r ## n(void) \ 183 - { \ 184 - unsigned int cpu = vpe_id(); \ 185 - \ 186 - switch (cpu) { \ 187 - case 0: \ 188 - return read_c0_ ## r ## n(); \ 189 - case 1: \ 190 - return read_c0_ ## r ## np(); \ 191 - default: \ 192 - BUG(); \ 193 - } \ 194 - return 0; \ 195 - } \ 196 - \ 197 - static inline void w_c0_ ## r ## n(unsigned int value) \ 198 - { \ 199 - unsigned int cpu = vpe_id(); \ 200 - \ 201 - switch (cpu) { \ 202 - case 0: \ 203 - write_c0_ ## r ## n(value); \ 204 - return; \ 205 - case 1: \ 206 - write_c0_ ## r ## np(value); \ 207 - return; \ 208 - default: \ 209 - BUG(); \ 210 - } \ 211 - return; \ 212 - } \ 70 + #else /* !CONFIG_MIPS_MT_SMP */ 71 + #define vpe_id() 0 213 72 214 - __define_perf_accessors(perfcntr, 0, 2) 215 - __define_perf_accessors(perfcntr, 1, 3) 216 - __define_perf_accessors(perfcntr, 2, 0) 217 - __define_perf_accessors(perfcntr, 3, 1) 73 + #endif /* CONFIG_MIPS_MT_SMP */ 218 74 219 - __define_perf_accessors(perfctrl, 0, 2) 220 - __define_perf_accessors(perfctrl, 1, 3) 221 - __define_perf_accessors(perfctrl, 2, 0) 222 - __define_perf_accessors(perfctrl, 3, 1) 75 + static void resume_local_counters(void); 76 + static void pause_local_counters(void); 77 + static irqreturn_t mipsxx_pmu_handle_irq(int, void *); 78 + static int mipsxx_pmu_handle_shared_irq(void); 223 79 224 - static inline int __n_counters(void) 80 + static unsigned int mipsxx_pmu_swizzle_perf_idx(unsigned int idx) 81 + { 82 + if (vpe_id() == 1) 83 + idx = (idx + 2) & 3; 84 + return idx; 85 + } 86 + 87 + static u64 mipsxx_pmu_read_counter(unsigned int idx) 88 + { 89 + idx = mipsxx_pmu_swizzle_perf_idx(idx); 90 + 91 + switch (idx) { 92 + case 0: 93 + /* 94 + * The counters are unsigned, we must cast to truncate 95 + * off the high bits. 96 + */ 97 + return (u32)read_c0_perfcntr0(); 98 + case 1: 99 + return (u32)read_c0_perfcntr1(); 100 + case 2: 101 + return (u32)read_c0_perfcntr2(); 102 + case 3: 103 + return (u32)read_c0_perfcntr3(); 104 + default: 105 + WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx); 106 + return 0; 107 + } 108 + } 109 + 110 + static u64 mipsxx_pmu_read_counter_64(unsigned int idx) 111 + { 112 + idx = mipsxx_pmu_swizzle_perf_idx(idx); 113 + 114 + switch (idx) { 115 + case 0: 116 + return read_c0_perfcntr0_64(); 117 + case 1: 118 + return read_c0_perfcntr1_64(); 119 + case 2: 120 + return read_c0_perfcntr2_64(); 121 + case 3: 122 + return read_c0_perfcntr3_64(); 123 + default: 124 + WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx); 125 + return 0; 126 + } 127 + } 128 + 129 + static void mipsxx_pmu_write_counter(unsigned int idx, u64 val) 130 + { 131 + idx = mipsxx_pmu_swizzle_perf_idx(idx); 132 + 133 + switch (idx) { 134 + case 0: 135 + write_c0_perfcntr0(val); 136 + return; 137 + case 1: 138 + write_c0_perfcntr1(val); 139 + return; 140 + case 2: 141 + write_c0_perfcntr2(val); 142 + return; 143 + case 3: 144 + write_c0_perfcntr3(val); 145 + return; 146 + } 147 + } 148 + 149 + static void mipsxx_pmu_write_counter_64(unsigned int idx, u64 val) 150 + { 151 + idx = mipsxx_pmu_swizzle_perf_idx(idx); 152 + 153 + switch (idx) { 154 + case 0: 155 + write_c0_perfcntr0_64(val); 156 + return; 157 + case 1: 158 + write_c0_perfcntr1_64(val); 159 + return; 160 + case 2: 161 + write_c0_perfcntr2_64(val); 162 + return; 163 + case 3: 164 + write_c0_perfcntr3_64(val); 165 + return; 166 + } 167 + } 168 + 169 + static unsigned int mipsxx_pmu_read_control(unsigned int idx) 170 + { 171 + idx = mipsxx_pmu_swizzle_perf_idx(idx); 172 + 173 + switch (idx) { 174 + case 0: 175 + return read_c0_perfctrl0(); 176 + case 1: 177 + return read_c0_perfctrl1(); 178 + case 2: 179 + return read_c0_perfctrl2(); 180 + case 3: 181 + return read_c0_perfctrl3(); 182 + default: 183 + WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx); 184 + return 0; 185 + } 186 + } 187 + 188 + static void mipsxx_pmu_write_control(unsigned int idx, unsigned int val) 189 + { 190 + idx = mipsxx_pmu_swizzle_perf_idx(idx); 191 + 192 + switch (idx) { 193 + case 0: 194 + write_c0_perfctrl0(val); 195 + return; 196 + case 1: 197 + write_c0_perfctrl1(val); 198 + return; 199 + case 2: 200 + write_c0_perfctrl2(val); 201 + return; 202 + case 3: 203 + write_c0_perfctrl3(val); 204 + return; 205 + } 206 + } 207 + 208 + static int mipsxx_pmu_alloc_counter(struct cpu_hw_events *cpuc, 209 + struct hw_perf_event *hwc) 210 + { 211 + int i; 212 + 213 + /* 214 + * We only need to care the counter mask. The range has been 215 + * checked definitely. 216 + */ 217 + unsigned long cntr_mask = (hwc->event_base >> 8) & 0xffff; 218 + 219 + for (i = mipspmu.num_counters - 1; i >= 0; i--) { 220 + /* 221 + * Note that some MIPS perf events can be counted by both 222 + * even and odd counters, wheresas many other are only by 223 + * even _or_ odd counters. This introduces an issue that 224 + * when the former kind of event takes the counter the 225 + * latter kind of event wants to use, then the "counter 226 + * allocation" for the latter event will fail. In fact if 227 + * they can be dynamically swapped, they both feel happy. 228 + * But here we leave this issue alone for now. 229 + */ 230 + if (test_bit(i, &cntr_mask) && 231 + !test_and_set_bit(i, cpuc->used_mask)) 232 + return i; 233 + } 234 + 235 + return -EAGAIN; 236 + } 237 + 238 + static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx) 239 + { 240 + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 241 + 242 + WARN_ON(idx < 0 || idx >= mipspmu.num_counters); 243 + 244 + cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base & 0xff) | 245 + (evt->config_base & M_PERFCTL_CONFIG_MASK) | 246 + /* Make sure interrupt enabled. */ 247 + M_PERFCTL_INTERRUPT_ENABLE; 248 + /* 249 + * We do not actually let the counter run. Leave it until start(). 250 + */ 251 + } 252 + 253 + static void mipsxx_pmu_disable_event(int idx) 254 + { 255 + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 256 + unsigned long flags; 257 + 258 + WARN_ON(idx < 0 || idx >= mipspmu.num_counters); 259 + 260 + local_irq_save(flags); 261 + cpuc->saved_ctrl[idx] = mipsxx_pmu_read_control(idx) & 262 + ~M_PERFCTL_COUNT_EVENT_WHENEVER; 263 + mipsxx_pmu_write_control(idx, cpuc->saved_ctrl[idx]); 264 + local_irq_restore(flags); 265 + } 266 + 267 + static int mipspmu_event_set_period(struct perf_event *event, 268 + struct hw_perf_event *hwc, 269 + int idx) 270 + { 271 + u64 left = local64_read(&hwc->period_left); 272 + u64 period = hwc->sample_period; 273 + int ret = 0; 274 + 275 + if (unlikely((left + period) & (1ULL << 63))) { 276 + /* left underflowed by more than period. */ 277 + left = period; 278 + local64_set(&hwc->period_left, left); 279 + hwc->last_period = period; 280 + ret = 1; 281 + } else if (unlikely((left + period) <= period)) { 282 + /* left underflowed by less than period. */ 283 + left += period; 284 + local64_set(&hwc->period_left, left); 285 + hwc->last_period = period; 286 + ret = 1; 287 + } 288 + 289 + if (left > mipspmu.max_period) { 290 + left = mipspmu.max_period; 291 + local64_set(&hwc->period_left, left); 292 + } 293 + 294 + local64_set(&hwc->prev_count, mipspmu.overflow - left); 295 + 296 + mipspmu.write_counter(idx, mipspmu.overflow - left); 297 + 298 + perf_event_update_userpage(event); 299 + 300 + return ret; 301 + } 302 + 303 + static void mipspmu_event_update(struct perf_event *event, 304 + struct hw_perf_event *hwc, 305 + int idx) 306 + { 307 + u64 prev_raw_count, new_raw_count; 308 + u64 delta; 309 + 310 + again: 311 + prev_raw_count = local64_read(&hwc->prev_count); 312 + new_raw_count = mipspmu.read_counter(idx); 313 + 314 + if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, 315 + new_raw_count) != prev_raw_count) 316 + goto again; 317 + 318 + delta = new_raw_count - prev_raw_count; 319 + 320 + local64_add(delta, &event->count); 321 + local64_sub(delta, &hwc->period_left); 322 + } 323 + 324 + static void mipspmu_start(struct perf_event *event, int flags) 325 + { 326 + struct hw_perf_event *hwc = &event->hw; 327 + 328 + if (flags & PERF_EF_RELOAD) 329 + WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); 330 + 331 + hwc->state = 0; 332 + 333 + /* Set the period for the event. */ 334 + mipspmu_event_set_period(event, hwc, hwc->idx); 335 + 336 + /* Enable the event. */ 337 + mipsxx_pmu_enable_event(hwc, hwc->idx); 338 + } 339 + 340 + static void mipspmu_stop(struct perf_event *event, int flags) 341 + { 342 + struct hw_perf_event *hwc = &event->hw; 343 + 344 + if (!(hwc->state & PERF_HES_STOPPED)) { 345 + /* We are working on a local event. */ 346 + mipsxx_pmu_disable_event(hwc->idx); 347 + barrier(); 348 + mipspmu_event_update(event, hwc, hwc->idx); 349 + hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; 350 + } 351 + } 352 + 353 + static int mipspmu_add(struct perf_event *event, int flags) 354 + { 355 + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 356 + struct hw_perf_event *hwc = &event->hw; 357 + int idx; 358 + int err = 0; 359 + 360 + perf_pmu_disable(event->pmu); 361 + 362 + /* To look for a free counter for this event. */ 363 + idx = mipsxx_pmu_alloc_counter(cpuc, hwc); 364 + if (idx < 0) { 365 + err = idx; 366 + goto out; 367 + } 368 + 369 + /* 370 + * If there is an event in the counter we are going to use then 371 + * make sure it is disabled. 372 + */ 373 + event->hw.idx = idx; 374 + mipsxx_pmu_disable_event(idx); 375 + cpuc->events[idx] = event; 376 + 377 + hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; 378 + if (flags & PERF_EF_START) 379 + mipspmu_start(event, PERF_EF_RELOAD); 380 + 381 + /* Propagate our changes to the userspace mapping. */ 382 + perf_event_update_userpage(event); 383 + 384 + out: 385 + perf_pmu_enable(event->pmu); 386 + return err; 387 + } 388 + 389 + static void mipspmu_del(struct perf_event *event, int flags) 390 + { 391 + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 392 + struct hw_perf_event *hwc = &event->hw; 393 + int idx = hwc->idx; 394 + 395 + WARN_ON(idx < 0 || idx >= mipspmu.num_counters); 396 + 397 + mipspmu_stop(event, PERF_EF_UPDATE); 398 + cpuc->events[idx] = NULL; 399 + clear_bit(idx, cpuc->used_mask); 400 + 401 + perf_event_update_userpage(event); 402 + } 403 + 404 + static void mipspmu_read(struct perf_event *event) 405 + { 406 + struct hw_perf_event *hwc = &event->hw; 407 + 408 + /* Don't read disabled counters! */ 409 + if (hwc->idx < 0) 410 + return; 411 + 412 + mipspmu_event_update(event, hwc, hwc->idx); 413 + } 414 + 415 + static void mipspmu_enable(struct pmu *pmu) 416 + { 417 + #ifdef CONFIG_MIPS_MT_SMP 418 + write_unlock(&pmuint_rwlock); 419 + #endif 420 + resume_local_counters(); 421 + } 422 + 423 + /* 424 + * MIPS performance counters can be per-TC. The control registers can 425 + * not be directly accessed accross CPUs. Hence if we want to do global 426 + * control, we need cross CPU calls. on_each_cpu() can help us, but we 427 + * can not make sure this function is called with interrupts enabled. So 428 + * here we pause local counters and then grab a rwlock and leave the 429 + * counters on other CPUs alone. If any counter interrupt raises while 430 + * we own the write lock, simply pause local counters on that CPU and 431 + * spin in the handler. Also we know we won't be switched to another 432 + * CPU after pausing local counters and before grabbing the lock. 433 + */ 434 + static void mipspmu_disable(struct pmu *pmu) 435 + { 436 + pause_local_counters(); 437 + #ifdef CONFIG_MIPS_MT_SMP 438 + write_lock(&pmuint_rwlock); 439 + #endif 440 + } 441 + 442 + static atomic_t active_events = ATOMIC_INIT(0); 443 + static DEFINE_MUTEX(pmu_reserve_mutex); 444 + static int (*save_perf_irq)(void); 445 + 446 + static int mipspmu_get_irq(void) 447 + { 448 + int err; 449 + 450 + if (mipspmu.irq >= 0) { 451 + /* Request my own irq handler. */ 452 + err = request_irq(mipspmu.irq, mipsxx_pmu_handle_irq, 453 + IRQF_PERCPU | IRQF_NOBALANCING, 454 + "mips_perf_pmu", NULL); 455 + if (err) { 456 + pr_warning("Unable to request IRQ%d for MIPS " 457 + "performance counters!\n", mipspmu.irq); 458 + } 459 + } else if (cp0_perfcount_irq < 0) { 460 + /* 461 + * We are sharing the irq number with the timer interrupt. 462 + */ 463 + save_perf_irq = perf_irq; 464 + perf_irq = mipsxx_pmu_handle_shared_irq; 465 + err = 0; 466 + } else { 467 + pr_warning("The platform hasn't properly defined its " 468 + "interrupt controller.\n"); 469 + err = -ENOENT; 470 + } 471 + 472 + return err; 473 + } 474 + 475 + static void mipspmu_free_irq(void) 476 + { 477 + if (mipspmu.irq >= 0) 478 + free_irq(mipspmu.irq, NULL); 479 + else if (cp0_perfcount_irq < 0) 480 + perf_irq = save_perf_irq; 481 + } 482 + 483 + /* 484 + * mipsxx/rm9000/loongson2 have different performance counters, they have 485 + * specific low-level init routines. 486 + */ 487 + static void reset_counters(void *arg); 488 + static int __hw_perf_event_init(struct perf_event *event); 489 + 490 + static void hw_perf_event_destroy(struct perf_event *event) 491 + { 492 + if (atomic_dec_and_mutex_lock(&active_events, 493 + &pmu_reserve_mutex)) { 494 + /* 495 + * We must not call the destroy function with interrupts 496 + * disabled. 497 + */ 498 + on_each_cpu(reset_counters, 499 + (void *)(long)mipspmu.num_counters, 1); 500 + mipspmu_free_irq(); 501 + mutex_unlock(&pmu_reserve_mutex); 502 + } 503 + } 504 + 505 + static int mipspmu_event_init(struct perf_event *event) 506 + { 507 + int err = 0; 508 + 509 + switch (event->attr.type) { 510 + case PERF_TYPE_RAW: 511 + case PERF_TYPE_HARDWARE: 512 + case PERF_TYPE_HW_CACHE: 513 + break; 514 + 515 + default: 516 + return -ENOENT; 517 + } 518 + 519 + if (event->cpu >= nr_cpumask_bits || 520 + (event->cpu >= 0 && !cpu_online(event->cpu))) 521 + return -ENODEV; 522 + 523 + if (!atomic_inc_not_zero(&active_events)) { 524 + if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) { 525 + atomic_dec(&active_events); 526 + return -ENOSPC; 527 + } 528 + 529 + mutex_lock(&pmu_reserve_mutex); 530 + if (atomic_read(&active_events) == 0) 531 + err = mipspmu_get_irq(); 532 + 533 + if (!err) 534 + atomic_inc(&active_events); 535 + mutex_unlock(&pmu_reserve_mutex); 536 + } 537 + 538 + if (err) 539 + return err; 540 + 541 + err = __hw_perf_event_init(event); 542 + if (err) 543 + hw_perf_event_destroy(event); 544 + 545 + return err; 546 + } 547 + 548 + static struct pmu pmu = { 549 + .pmu_enable = mipspmu_enable, 550 + .pmu_disable = mipspmu_disable, 551 + .event_init = mipspmu_event_init, 552 + .add = mipspmu_add, 553 + .del = mipspmu_del, 554 + .start = mipspmu_start, 555 + .stop = mipspmu_stop, 556 + .read = mipspmu_read, 557 + }; 558 + 559 + static unsigned int mipspmu_perf_event_encode(const struct mips_perf_event *pev) 560 + { 561 + /* 562 + * Top 8 bits for range, next 16 bits for cntr_mask, lowest 8 bits for 563 + * event_id. 564 + */ 565 + #ifdef CONFIG_MIPS_MT_SMP 566 + return ((unsigned int)pev->range << 24) | 567 + (pev->cntr_mask & 0xffff00) | 568 + (pev->event_id & 0xff); 569 + #else 570 + return (pev->cntr_mask & 0xffff00) | 571 + (pev->event_id & 0xff); 572 + #endif 573 + } 574 + 575 + static const struct mips_perf_event *mipspmu_map_general_event(int idx) 576 + { 577 + const struct mips_perf_event *pev; 578 + 579 + pev = ((*mipspmu.general_event_map)[idx].event_id == 580 + UNSUPPORTED_PERF_EVENT_ID ? ERR_PTR(-EOPNOTSUPP) : 581 + &(*mipspmu.general_event_map)[idx]); 582 + 583 + return pev; 584 + } 585 + 586 + static const struct mips_perf_event *mipspmu_map_cache_event(u64 config) 587 + { 588 + unsigned int cache_type, cache_op, cache_result; 589 + const struct mips_perf_event *pev; 590 + 591 + cache_type = (config >> 0) & 0xff; 592 + if (cache_type >= PERF_COUNT_HW_CACHE_MAX) 593 + return ERR_PTR(-EINVAL); 594 + 595 + cache_op = (config >> 8) & 0xff; 596 + if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) 597 + return ERR_PTR(-EINVAL); 598 + 599 + cache_result = (config >> 16) & 0xff; 600 + if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) 601 + return ERR_PTR(-EINVAL); 602 + 603 + pev = &((*mipspmu.cache_event_map) 604 + [cache_type] 605 + [cache_op] 606 + [cache_result]); 607 + 608 + if (pev->event_id == UNSUPPORTED_PERF_EVENT_ID) 609 + return ERR_PTR(-EOPNOTSUPP); 610 + 611 + return pev; 612 + 613 + } 614 + 615 + static int validate_event(struct cpu_hw_events *cpuc, 616 + struct perf_event *event) 617 + { 618 + struct hw_perf_event fake_hwc = event->hw; 619 + 620 + /* Allow mixed event group. So return 1 to pass validation. */ 621 + if (event->pmu != &pmu || event->state <= PERF_EVENT_STATE_OFF) 622 + return 1; 623 + 624 + return mipsxx_pmu_alloc_counter(cpuc, &fake_hwc) >= 0; 625 + } 626 + 627 + static int validate_group(struct perf_event *event) 628 + { 629 + struct perf_event *sibling, *leader = event->group_leader; 630 + struct cpu_hw_events fake_cpuc; 631 + 632 + memset(&fake_cpuc, 0, sizeof(fake_cpuc)); 633 + 634 + if (!validate_event(&fake_cpuc, leader)) 635 + return -ENOSPC; 636 + 637 + list_for_each_entry(sibling, &leader->sibling_list, group_entry) { 638 + if (!validate_event(&fake_cpuc, sibling)) 639 + return -ENOSPC; 640 + } 641 + 642 + if (!validate_event(&fake_cpuc, event)) 643 + return -ENOSPC; 644 + 645 + return 0; 646 + } 647 + 648 + /* This is needed by specific irq handlers in perf_event_*.c */ 649 + static void handle_associated_event(struct cpu_hw_events *cpuc, 650 + int idx, struct perf_sample_data *data, 651 + struct pt_regs *regs) 652 + { 653 + struct perf_event *event = cpuc->events[idx]; 654 + struct hw_perf_event *hwc = &event->hw; 655 + 656 + mipspmu_event_update(event, hwc, idx); 657 + data->period = event->hw.last_period; 658 + if (!mipspmu_event_set_period(event, hwc, idx)) 659 + return; 660 + 661 + if (perf_event_overflow(event, data, regs)) 662 + mipsxx_pmu_disable_event(idx); 663 + } 664 + 665 + 666 + static int __n_counters(void) 225 667 { 226 668 if (!(read_c0_config1() & M_CONFIG1_PC)) 227 669 return 0; ··· 777 135 return 4; 778 136 } 779 137 780 - static inline int n_counters(void) 138 + static int n_counters(void) 781 139 { 782 140 int counters; 783 141 ··· 803 161 int counters = (int)(long)arg; 804 162 switch (counters) { 805 163 case 4: 806 - w_c0_perfctrl3(0); 807 - w_c0_perfcntr3(0); 164 + mipsxx_pmu_write_control(3, 0); 165 + mipspmu.write_counter(3, 0); 808 166 case 3: 809 - w_c0_perfctrl2(0); 810 - w_c0_perfcntr2(0); 167 + mipsxx_pmu_write_control(2, 0); 168 + mipspmu.write_counter(2, 0); 811 169 case 2: 812 - w_c0_perfctrl1(0); 813 - w_c0_perfcntr1(0); 170 + mipsxx_pmu_write_control(1, 0); 171 + mipspmu.write_counter(1, 0); 814 172 case 1: 815 - w_c0_perfctrl0(0); 816 - w_c0_perfcntr0(0); 173 + mipsxx_pmu_write_control(0, 0); 174 + mipspmu.write_counter(0, 0); 817 175 } 818 176 } 819 - 820 - static inline u64 821 - mipsxx_pmu_read_counter(unsigned int idx) 822 - { 823 - switch (idx) { 824 - case 0: 825 - return r_c0_perfcntr0(); 826 - case 1: 827 - return r_c0_perfcntr1(); 828 - case 2: 829 - return r_c0_perfcntr2(); 830 - case 3: 831 - return r_c0_perfcntr3(); 832 - default: 833 - WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx); 834 - return 0; 835 - } 836 - } 837 - 838 - static inline void 839 - mipsxx_pmu_write_counter(unsigned int idx, u64 val) 840 - { 841 - switch (idx) { 842 - case 0: 843 - w_c0_perfcntr0(val); 844 - return; 845 - case 1: 846 - w_c0_perfcntr1(val); 847 - return; 848 - case 2: 849 - w_c0_perfcntr2(val); 850 - return; 851 - case 3: 852 - w_c0_perfcntr3(val); 853 - return; 854 - } 855 - } 856 - 857 - static inline unsigned int 858 - mipsxx_pmu_read_control(unsigned int idx) 859 - { 860 - switch (idx) { 861 - case 0: 862 - return r_c0_perfctrl0(); 863 - case 1: 864 - return r_c0_perfctrl1(); 865 - case 2: 866 - return r_c0_perfctrl2(); 867 - case 3: 868 - return r_c0_perfctrl3(); 869 - default: 870 - WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx); 871 - return 0; 872 - } 873 - } 874 - 875 - static inline void 876 - mipsxx_pmu_write_control(unsigned int idx, unsigned int val) 877 - { 878 - switch (idx) { 879 - case 0: 880 - w_c0_perfctrl0(val); 881 - return; 882 - case 1: 883 - w_c0_perfctrl1(val); 884 - return; 885 - case 2: 886 - w_c0_perfctrl2(val); 887 - return; 888 - case 3: 889 - w_c0_perfctrl3(val); 890 - return; 891 - } 892 - } 893 - 894 - #ifdef CONFIG_MIPS_MT_SMP 895 - static DEFINE_RWLOCK(pmuint_rwlock); 896 - #endif 897 177 898 178 /* 24K/34K/1004K cores can share the same event map. */ 899 179 static const struct mips_perf_event mipsxxcore_event_map ··· 839 275 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x27, CNTR_EVEN, T }, 840 276 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x27, CNTR_ODD, T }, 841 277 [PERF_COUNT_HW_BUS_CYCLES] = { UNSUPPORTED_PERF_EVENT_ID }, 278 + }; 279 + 280 + static const struct mips_perf_event octeon_event_map[PERF_COUNT_HW_MAX] = { 281 + [PERF_COUNT_HW_CPU_CYCLES] = { 0x01, CNTR_ALL }, 282 + [PERF_COUNT_HW_INSTRUCTIONS] = { 0x03, CNTR_ALL }, 283 + [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x2b, CNTR_ALL }, 284 + [PERF_COUNT_HW_CACHE_MISSES] = { 0x2e, CNTR_ALL }, 285 + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x08, CNTR_ALL }, 286 + [PERF_COUNT_HW_BRANCH_MISSES] = { 0x09, CNTR_ALL }, 287 + [PERF_COUNT_HW_BUS_CYCLES] = { 0x25, CNTR_ALL }, 842 288 }; 843 289 844 290 /* 24K/34K/1004K cores can share the same cache event map. */ ··· 1084 510 }, 1085 511 }; 1086 512 513 + 514 + static const struct mips_perf_event octeon_cache_map 515 + [PERF_COUNT_HW_CACHE_MAX] 516 + [PERF_COUNT_HW_CACHE_OP_MAX] 517 + [PERF_COUNT_HW_CACHE_RESULT_MAX] = { 518 + [C(L1D)] = { 519 + [C(OP_READ)] = { 520 + [C(RESULT_ACCESS)] = { 0x2b, CNTR_ALL }, 521 + [C(RESULT_MISS)] = { 0x2e, CNTR_ALL }, 522 + }, 523 + [C(OP_WRITE)] = { 524 + [C(RESULT_ACCESS)] = { 0x30, CNTR_ALL }, 525 + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, 526 + }, 527 + [C(OP_PREFETCH)] = { 528 + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, 529 + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, 530 + }, 531 + }, 532 + [C(L1I)] = { 533 + [C(OP_READ)] = { 534 + [C(RESULT_ACCESS)] = { 0x18, CNTR_ALL }, 535 + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, 536 + }, 537 + [C(OP_WRITE)] = { 538 + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, 539 + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, 540 + }, 541 + [C(OP_PREFETCH)] = { 542 + [C(RESULT_ACCESS)] = { 0x19, CNTR_ALL }, 543 + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, 544 + }, 545 + }, 546 + [C(LL)] = { 547 + [C(OP_READ)] = { 548 + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, 549 + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, 550 + }, 551 + [C(OP_WRITE)] = { 552 + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, 553 + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, 554 + }, 555 + [C(OP_PREFETCH)] = { 556 + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, 557 + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, 558 + }, 559 + }, 560 + [C(DTLB)] = { 561 + /* 562 + * Only general DTLB misses are counted use the same event for 563 + * read and write. 564 + */ 565 + [C(OP_READ)] = { 566 + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, 567 + [C(RESULT_MISS)] = { 0x35, CNTR_ALL }, 568 + }, 569 + [C(OP_WRITE)] = { 570 + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, 571 + [C(RESULT_MISS)] = { 0x35, CNTR_ALL }, 572 + }, 573 + [C(OP_PREFETCH)] = { 574 + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, 575 + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, 576 + }, 577 + }, 578 + [C(ITLB)] = { 579 + [C(OP_READ)] = { 580 + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, 581 + [C(RESULT_MISS)] = { 0x37, CNTR_ALL }, 582 + }, 583 + [C(OP_WRITE)] = { 584 + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, 585 + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, 586 + }, 587 + [C(OP_PREFETCH)] = { 588 + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, 589 + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, 590 + }, 591 + }, 592 + [C(BPU)] = { 593 + /* Using the same code for *HW_BRANCH* */ 594 + [C(OP_READ)] = { 595 + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, 596 + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, 597 + }, 598 + [C(OP_WRITE)] = { 599 + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, 600 + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, 601 + }, 602 + [C(OP_PREFETCH)] = { 603 + [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, 604 + [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, 605 + }, 606 + }, 607 + }; 608 + 1087 609 #ifdef CONFIG_MIPS_MT_SMP 1088 - static void 1089 - check_and_calc_range(struct perf_event *event, 1090 - const struct mips_perf_event *pev) 610 + static void check_and_calc_range(struct perf_event *event, 611 + const struct mips_perf_event *pev) 1091 612 { 1092 613 struct hw_perf_event *hwc = &event->hw; 1093 614 ··· 1205 536 hwc->config_base |= M_TC_EN_ALL; 1206 537 } 1207 538 #else 1208 - static void 1209 - check_and_calc_range(struct perf_event *event, 1210 - const struct mips_perf_event *pev) 539 + static void check_and_calc_range(struct perf_event *event, 540 + const struct mips_perf_event *pev) 1211 541 { 1212 542 } 1213 543 #endif ··· 1228 560 } else if (PERF_TYPE_RAW == event->attr.type) { 1229 561 /* We are working on the global raw event. */ 1230 562 mutex_lock(&raw_event_mutex); 1231 - pev = mipspmu->map_raw_event(event->attr.config); 563 + pev = mipspmu.map_raw_event(event->attr.config); 1232 564 } else { 1233 565 /* The event type is not (yet) supported. */ 1234 566 return -EOPNOTSUPP; ··· 1273 605 hwc->config = 0; 1274 606 1275 607 if (!hwc->sample_period) { 1276 - hwc->sample_period = MAX_PERIOD; 608 + hwc->sample_period = mipspmu.max_period; 1277 609 hwc->last_period = hwc->sample_period; 1278 610 local64_set(&hwc->period_left, hwc->sample_period); 1279 611 } ··· 1286 618 } 1287 619 1288 620 event->destroy = hw_perf_event_destroy; 1289 - 1290 621 return err; 1291 622 } 1292 623 1293 624 static void pause_local_counters(void) 1294 625 { 1295 626 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1296 - int counters = mipspmu->num_counters; 627 + int ctr = mipspmu.num_counters; 1297 628 unsigned long flags; 1298 629 1299 630 local_irq_save(flags); 1300 - switch (counters) { 1301 - case 4: 1302 - cpuc->saved_ctrl[3] = r_c0_perfctrl3(); 1303 - w_c0_perfctrl3(cpuc->saved_ctrl[3] & 1304 - ~M_PERFCTL_COUNT_EVENT_WHENEVER); 1305 - case 3: 1306 - cpuc->saved_ctrl[2] = r_c0_perfctrl2(); 1307 - w_c0_perfctrl2(cpuc->saved_ctrl[2] & 1308 - ~M_PERFCTL_COUNT_EVENT_WHENEVER); 1309 - case 2: 1310 - cpuc->saved_ctrl[1] = r_c0_perfctrl1(); 1311 - w_c0_perfctrl1(cpuc->saved_ctrl[1] & 1312 - ~M_PERFCTL_COUNT_EVENT_WHENEVER); 1313 - case 1: 1314 - cpuc->saved_ctrl[0] = r_c0_perfctrl0(); 1315 - w_c0_perfctrl0(cpuc->saved_ctrl[0] & 1316 - ~M_PERFCTL_COUNT_EVENT_WHENEVER); 1317 - } 631 + do { 632 + ctr--; 633 + cpuc->saved_ctrl[ctr] = mipsxx_pmu_read_control(ctr); 634 + mipsxx_pmu_write_control(ctr, cpuc->saved_ctrl[ctr] & 635 + ~M_PERFCTL_COUNT_EVENT_WHENEVER); 636 + } while (ctr > 0); 1318 637 local_irq_restore(flags); 1319 638 } 1320 639 1321 640 static void resume_local_counters(void) 1322 641 { 1323 642 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1324 - int counters = mipspmu->num_counters; 1325 - unsigned long flags; 643 + int ctr = mipspmu.num_counters; 1326 644 1327 - local_irq_save(flags); 1328 - switch (counters) { 1329 - case 4: 1330 - w_c0_perfctrl3(cpuc->saved_ctrl[3]); 1331 - case 3: 1332 - w_c0_perfctrl2(cpuc->saved_ctrl[2]); 1333 - case 2: 1334 - w_c0_perfctrl1(cpuc->saved_ctrl[1]); 1335 - case 1: 1336 - w_c0_perfctrl0(cpuc->saved_ctrl[0]); 1337 - } 1338 - local_irq_restore(flags); 645 + do { 646 + ctr--; 647 + mipsxx_pmu_write_control(ctr, cpuc->saved_ctrl[ctr]); 648 + } while (ctr > 0); 1339 649 } 1340 650 1341 651 static int mipsxx_pmu_handle_shared_irq(void) 1342 652 { 1343 653 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1344 654 struct perf_sample_data data; 1345 - unsigned int counters = mipspmu->num_counters; 1346 - unsigned int counter; 655 + unsigned int counters = mipspmu.num_counters; 656 + u64 counter; 1347 657 int handled = IRQ_NONE; 1348 658 struct pt_regs *regs; 1349 659 1350 660 if (cpu_has_mips_r2 && !(read_c0_cause() & (1 << 26))) 1351 661 return handled; 1352 - 1353 662 /* 1354 663 * First we pause the local counters, so that when we are locked 1355 664 * here, the counters are all paused. When it gets locked due to ··· 1347 702 #define HANDLE_COUNTER(n) \ 1348 703 case n + 1: \ 1349 704 if (test_bit(n, cpuc->used_mask)) { \ 1350 - counter = r_c0_perfcntr ## n(); \ 1351 - if (counter & M_COUNTER_OVERFLOW) { \ 1352 - w_c0_perfcntr ## n(counter & \ 1353 - VALID_COUNT); \ 1354 - if (test_and_change_bit(n, cpuc->msbs)) \ 1355 - handle_associated_event(cpuc, \ 1356 - n, &data, regs); \ 705 + counter = mipspmu.read_counter(n); \ 706 + if (counter & mipspmu.overflow) { \ 707 + handle_associated_event(cpuc, n, &data, regs); \ 1357 708 handled = IRQ_HANDLED; \ 1358 709 } \ 1359 710 } ··· 1374 733 return handled; 1375 734 } 1376 735 1377 - static irqreturn_t 1378 - mipsxx_pmu_handle_irq(int irq, void *dev) 736 + static irqreturn_t mipsxx_pmu_handle_irq(int irq, void *dev) 1379 737 { 1380 738 return mipsxx_pmu_handle_shared_irq(); 1381 - } 1382 - 1383 - static void mipsxx_pmu_start(void) 1384 - { 1385 - #ifdef CONFIG_MIPS_MT_SMP 1386 - write_unlock(&pmuint_rwlock); 1387 - #endif 1388 - resume_local_counters(); 1389 - } 1390 - 1391 - /* 1392 - * MIPS performance counters can be per-TC. The control registers can 1393 - * not be directly accessed across CPUs. Hence if we want to do global 1394 - * control, we need cross CPU calls. on_each_cpu() can help us, but we 1395 - * can not make sure this function is called with interrupts enabled. So 1396 - * here we pause local counters and then grab a rwlock and leave the 1397 - * counters on other CPUs alone. If any counter interrupt raises while 1398 - * we own the write lock, simply pause local counters on that CPU and 1399 - * spin in the handler. Also we know we won't be switched to another 1400 - * CPU after pausing local counters and before grabbing the lock. 1401 - */ 1402 - static void mipsxx_pmu_stop(void) 1403 - { 1404 - pause_local_counters(); 1405 - #ifdef CONFIG_MIPS_MT_SMP 1406 - write_lock(&pmuint_rwlock); 1407 - #endif 1408 - } 1409 - 1410 - static int 1411 - mipsxx_pmu_alloc_counter(struct cpu_hw_events *cpuc, 1412 - struct hw_perf_event *hwc) 1413 - { 1414 - int i; 1415 - 1416 - /* 1417 - * We only need to care the counter mask. The range has been 1418 - * checked definitely. 1419 - */ 1420 - unsigned long cntr_mask = (hwc->event_base >> 8) & 0xffff; 1421 - 1422 - for (i = mipspmu->num_counters - 1; i >= 0; i--) { 1423 - /* 1424 - * Note that some MIPS perf events can be counted by both 1425 - * even and odd counters, wheresas many other are only by 1426 - * even _or_ odd counters. This introduces an issue that 1427 - * when the former kind of event takes the counter the 1428 - * latter kind of event wants to use, then the "counter 1429 - * allocation" for the latter event will fail. In fact if 1430 - * they can be dynamically swapped, they both feel happy. 1431 - * But here we leave this issue alone for now. 1432 - */ 1433 - if (test_bit(i, &cntr_mask) && 1434 - !test_and_set_bit(i, cpuc->used_mask)) 1435 - return i; 1436 - } 1437 - 1438 - return -EAGAIN; 1439 - } 1440 - 1441 - static void 1442 - mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx) 1443 - { 1444 - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1445 - unsigned long flags; 1446 - 1447 - WARN_ON(idx < 0 || idx >= mipspmu->num_counters); 1448 - 1449 - local_irq_save(flags); 1450 - cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base & 0xff) | 1451 - (evt->config_base & M_PERFCTL_CONFIG_MASK) | 1452 - /* Make sure interrupt enabled. */ 1453 - M_PERFCTL_INTERRUPT_ENABLE; 1454 - /* 1455 - * We do not actually let the counter run. Leave it until start(). 1456 - */ 1457 - local_irq_restore(flags); 1458 - } 1459 - 1460 - static void 1461 - mipsxx_pmu_disable_event(int idx) 1462 - { 1463 - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 1464 - unsigned long flags; 1465 - 1466 - WARN_ON(idx < 0 || idx >= mipspmu->num_counters); 1467 - 1468 - local_irq_save(flags); 1469 - cpuc->saved_ctrl[idx] = mipsxx_pmu_read_control(idx) & 1470 - ~M_PERFCTL_COUNT_EVENT_WHENEVER; 1471 - mipsxx_pmu_write_control(idx, cpuc->saved_ctrl[idx]); 1472 - local_irq_restore(flags); 1473 739 } 1474 740 1475 741 /* 24K */ ··· 1440 892 * then 128 needs to be added to 15 as the input for the event config, 1441 893 * i.e., 143 (0x8F) to be used. 1442 894 */ 1443 - static const struct mips_perf_event * 1444 - mipsxx_pmu_map_raw_event(u64 config) 895 + static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config) 1445 896 { 1446 897 unsigned int raw_id = config & 0xff; 1447 898 unsigned int base_id = raw_id & 0x7f; ··· 1517 970 return &raw_event; 1518 971 } 1519 972 1520 - static struct mips_pmu mipsxxcore_pmu = { 1521 - .handle_irq = mipsxx_pmu_handle_irq, 1522 - .handle_shared_irq = mipsxx_pmu_handle_shared_irq, 1523 - .start = mipsxx_pmu_start, 1524 - .stop = mipsxx_pmu_stop, 1525 - .alloc_counter = mipsxx_pmu_alloc_counter, 1526 - .read_counter = mipsxx_pmu_read_counter, 1527 - .write_counter = mipsxx_pmu_write_counter, 1528 - .enable_event = mipsxx_pmu_enable_event, 1529 - .disable_event = mipsxx_pmu_disable_event, 1530 - .map_raw_event = mipsxx_pmu_map_raw_event, 1531 - .general_event_map = &mipsxxcore_event_map, 1532 - .cache_event_map = &mipsxxcore_cache_map, 1533 - }; 973 + static const struct mips_perf_event *octeon_pmu_map_raw_event(u64 config) 974 + { 975 + unsigned int raw_id = config & 0xff; 976 + unsigned int base_id = raw_id & 0x7f; 1534 977 1535 - static struct mips_pmu mipsxx74Kcore_pmu = { 1536 - .handle_irq = mipsxx_pmu_handle_irq, 1537 - .handle_shared_irq = mipsxx_pmu_handle_shared_irq, 1538 - .start = mipsxx_pmu_start, 1539 - .stop = mipsxx_pmu_stop, 1540 - .alloc_counter = mipsxx_pmu_alloc_counter, 1541 - .read_counter = mipsxx_pmu_read_counter, 1542 - .write_counter = mipsxx_pmu_write_counter, 1543 - .enable_event = mipsxx_pmu_enable_event, 1544 - .disable_event = mipsxx_pmu_disable_event, 1545 - .map_raw_event = mipsxx_pmu_map_raw_event, 1546 - .general_event_map = &mipsxx74Kcore_event_map, 1547 - .cache_event_map = &mipsxx74Kcore_cache_map, 1548 - }; 978 + 979 + raw_event.cntr_mask = CNTR_ALL; 980 + raw_event.event_id = base_id; 981 + 982 + if (current_cpu_type() == CPU_CAVIUM_OCTEON2) { 983 + if (base_id > 0x42) 984 + return ERR_PTR(-EOPNOTSUPP); 985 + } else { 986 + if (base_id > 0x3a) 987 + return ERR_PTR(-EOPNOTSUPP); 988 + } 989 + 990 + switch (base_id) { 991 + case 0x00: 992 + case 0x0f: 993 + case 0x1e: 994 + case 0x1f: 995 + case 0x2f: 996 + case 0x34: 997 + case 0x3b ... 0x3f: 998 + return ERR_PTR(-EOPNOTSUPP); 999 + default: 1000 + break; 1001 + } 1002 + 1003 + return &raw_event; 1004 + } 1549 1005 1550 1006 static int __init 1551 1007 init_hw_perf_events(void) 1552 1008 { 1553 1009 int counters, irq; 1010 + int counter_bits; 1554 1011 1555 1012 pr_info("Performance counters: "); 1556 1013 ··· 1586 1035 } 1587 1036 #endif 1588 1037 1589 - on_each_cpu(reset_counters, (void *)(long)counters, 1); 1038 + mipspmu.map_raw_event = mipsxx_pmu_map_raw_event; 1590 1039 1591 1040 switch (current_cpu_type()) { 1592 1041 case CPU_24K: 1593 - mipsxxcore_pmu.name = "mips/24K"; 1594 - mipsxxcore_pmu.num_counters = counters; 1595 - mipsxxcore_pmu.irq = irq; 1596 - mipspmu = &mipsxxcore_pmu; 1042 + mipspmu.name = "mips/24K"; 1043 + mipspmu.general_event_map = &mipsxxcore_event_map; 1044 + mipspmu.cache_event_map = &mipsxxcore_cache_map; 1597 1045 break; 1598 1046 case CPU_34K: 1599 - mipsxxcore_pmu.name = "mips/34K"; 1600 - mipsxxcore_pmu.num_counters = counters; 1601 - mipsxxcore_pmu.irq = irq; 1602 - mipspmu = &mipsxxcore_pmu; 1047 + mipspmu.name = "mips/34K"; 1048 + mipspmu.general_event_map = &mipsxxcore_event_map; 1049 + mipspmu.cache_event_map = &mipsxxcore_cache_map; 1603 1050 break; 1604 1051 case CPU_74K: 1605 - mipsxx74Kcore_pmu.name = "mips/74K"; 1606 - mipsxx74Kcore_pmu.num_counters = counters; 1607 - mipsxx74Kcore_pmu.irq = irq; 1608 - mipspmu = &mipsxx74Kcore_pmu; 1052 + mipspmu.name = "mips/74K"; 1053 + mipspmu.general_event_map = &mipsxx74Kcore_event_map; 1054 + mipspmu.cache_event_map = &mipsxx74Kcore_cache_map; 1609 1055 break; 1610 1056 case CPU_1004K: 1611 - mipsxxcore_pmu.name = "mips/1004K"; 1612 - mipsxxcore_pmu.num_counters = counters; 1613 - mipsxxcore_pmu.irq = irq; 1614 - mipspmu = &mipsxxcore_pmu; 1057 + mipspmu.name = "mips/1004K"; 1058 + mipspmu.general_event_map = &mipsxxcore_event_map; 1059 + mipspmu.cache_event_map = &mipsxxcore_cache_map; 1060 + break; 1061 + case CPU_CAVIUM_OCTEON: 1062 + case CPU_CAVIUM_OCTEON_PLUS: 1063 + case CPU_CAVIUM_OCTEON2: 1064 + mipspmu.name = "octeon"; 1065 + mipspmu.general_event_map = &octeon_event_map; 1066 + mipspmu.cache_event_map = &octeon_cache_map; 1067 + mipspmu.map_raw_event = octeon_pmu_map_raw_event; 1615 1068 break; 1616 1069 default: 1617 1070 pr_cont("Either hardware does not support performance " ··· 1623 1068 return -ENODEV; 1624 1069 } 1625 1070 1626 - if (mipspmu) 1627 - pr_cont("%s PMU enabled, %d counters available to each " 1628 - "CPU, irq %d%s\n", mipspmu->name, counters, irq, 1629 - irq < 0 ? " (share with timer interrupt)" : ""); 1071 + mipspmu.num_counters = counters; 1072 + mipspmu.irq = irq; 1073 + 1074 + if (read_c0_perfctrl0() & M_PERFCTL_WIDE) { 1075 + mipspmu.max_period = (1ULL << 63) - 1; 1076 + mipspmu.valid_count = (1ULL << 63) - 1; 1077 + mipspmu.overflow = 1ULL << 63; 1078 + mipspmu.read_counter = mipsxx_pmu_read_counter_64; 1079 + mipspmu.write_counter = mipsxx_pmu_write_counter_64; 1080 + counter_bits = 64; 1081 + } else { 1082 + mipspmu.max_period = (1ULL << 31) - 1; 1083 + mipspmu.valid_count = (1ULL << 31) - 1; 1084 + mipspmu.overflow = 1ULL << 31; 1085 + mipspmu.read_counter = mipsxx_pmu_read_counter; 1086 + mipspmu.write_counter = mipsxx_pmu_write_counter; 1087 + counter_bits = 32; 1088 + } 1089 + 1090 + on_each_cpu(reset_counters, (void *)(long)counters, 1); 1091 + 1092 + pr_cont("%s PMU enabled, %d %d-bit counters available to each " 1093 + "CPU, irq %d%s\n", mipspmu.name, counters, counter_bits, irq, 1094 + irq < 0 ? " (share with timer interrupt)" : ""); 1630 1095 1631 1096 perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW); 1632 1097 1633 1098 return 0; 1634 1099 } 1635 1100 early_initcall(init_hw_perf_events); 1636 - 1637 - #endif /* defined(CONFIG_CPU_MIPS32)... */
+1 -1
arch/mips/kernel/scall32-o32.S
··· 496 496 sys sys_lookup_dcookie 4 497 497 sys sys_epoll_create 1 498 498 sys sys_epoll_ctl 4 499 - sys sys_epoll_wait 3 /* 4250 */ 499 + sys sys_epoll_wait 4 /* 4250 */ 500 500 sys sys_remap_file_pages 5 501 501 sys sys_set_tid_address 1 502 502 sys sys_restart_syscall 0
+6
arch/mips/mm/c-octeon.c
··· 169 169 octeon_flush_icache_all_cores(vma); 170 170 } 171 171 172 + static void octeon_flush_kernel_vmap_range(unsigned long vaddr, int size) 173 + { 174 + BUG(); 175 + } 172 176 173 177 /** 174 178 * Probe Octeon's caches ··· 276 272 flush_data_cache_page = octeon_flush_data_cache_page; 277 273 flush_icache_range = octeon_flush_icache_range; 278 274 local_flush_icache_range = local_octeon_flush_icache_range; 275 + 276 + __flush_kernel_vmap_range = octeon_flush_kernel_vmap_range; 279 277 280 278 build_clear_page(); 281 279 build_copy_page();
+7
arch/mips/mm/c-r3k.c
··· 299 299 write_c0_status(flags); 300 300 } 301 301 302 + static void r3k_flush_kernel_vmap_range(unsigned long vaddr, int size) 303 + { 304 + BUG(); 305 + } 306 + 302 307 static void r3k_dma_cache_wback_inv(unsigned long start, unsigned long size) 303 308 { 304 309 /* Catch bad driver code */ ··· 327 322 flush_cache_page = r3k_flush_cache_page; 328 323 flush_icache_range = r3k_flush_icache_range; 329 324 local_flush_icache_range = r3k_flush_icache_range; 325 + 326 + __flush_kernel_vmap_range = r3k_flush_kernel_vmap_range; 330 327 331 328 flush_cache_sigtramp = r3k_flush_cache_sigtramp; 332 329 local_flush_data_cache_page = local_r3k_flush_data_cache_page;
+35
arch/mips/mm/c-r4k.c
··· 722 722 r4k_blast_icache(); 723 723 } 724 724 725 + struct flush_kernel_vmap_range_args { 726 + unsigned long vaddr; 727 + int size; 728 + }; 729 + 730 + static inline void local_r4k_flush_kernel_vmap_range(void *args) 731 + { 732 + struct flush_kernel_vmap_range_args *vmra = args; 733 + unsigned long vaddr = vmra->vaddr; 734 + int size = vmra->size; 735 + 736 + /* 737 + * Aliases only affect the primary caches so don't bother with 738 + * S-caches or T-caches. 739 + */ 740 + if (cpu_has_safe_index_cacheops && size >= dcache_size) 741 + r4k_blast_dcache(); 742 + else { 743 + R4600_HIT_CACHEOP_WAR_IMPL; 744 + blast_dcache_range(vaddr, vaddr + size); 745 + } 746 + } 747 + 748 + static void r4k_flush_kernel_vmap_range(unsigned long vaddr, int size) 749 + { 750 + struct flush_kernel_vmap_range_args args; 751 + 752 + args.vaddr = (unsigned long) vaddr; 753 + args.size = size; 754 + 755 + r4k_on_each_cpu(local_r4k_flush_kernel_vmap_range, &args); 756 + } 757 + 725 758 static inline void rm7k_erratum31(void) 726 759 { 727 760 const unsigned long ic_lsize = 32; ··· 1435 1402 flush_cache_mm = r4k_flush_cache_mm; 1436 1403 flush_cache_page = r4k_flush_cache_page; 1437 1404 flush_cache_range = r4k_flush_cache_range; 1405 + 1406 + __flush_kernel_vmap_range = r4k_flush_kernel_vmap_range; 1438 1407 1439 1408 flush_cache_sigtramp = r4k_flush_cache_sigtramp; 1440 1409 flush_icache_all = r4k_flush_icache_all;
+7
arch/mips/mm/c-tx39.c
··· 253 253 } 254 254 } 255 255 256 + static void tx39_flush_kernel_vmap_range(unsigned long vaddr, int size) 257 + { 258 + BUG(); 259 + } 260 + 256 261 static void tx39_dma_cache_wback_inv(unsigned long addr, unsigned long size) 257 262 { 258 263 unsigned long end; ··· 398 393 flush_cache_page = tx39_flush_cache_page; 399 394 flush_icache_range = tx39_flush_icache_range; 400 395 local_flush_icache_range = tx39_flush_icache_range; 396 + 397 + __flush_kernel_vmap_range = tx39_flush_kernel_vmap_range; 401 398 402 399 flush_cache_sigtramp = tx39_flush_cache_sigtramp; 403 400 local_flush_data_cache_page = local_tx39_flush_data_cache_page;
+5
arch/mips/mm/cache.c
··· 35 35 void (*__flush_cache_vmap)(void); 36 36 void (*__flush_cache_vunmap)(void); 37 37 38 + void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size); 39 + void (*__invalidate_kernel_vmap_range)(unsigned long vaddr, int size); 40 + 41 + EXPORT_SYMBOL_GPL(__flush_kernel_vmap_range); 42 + 38 43 /* MIPS specific cache operations */ 39 44 void (*flush_cache_sigtramp)(unsigned long addr); 40 45 void (*local_flush_data_cache_page)(void * addr);
+2 -2
arch/mips/mm/tlb-r3k.c
··· 223 223 local_irq_restore(flags); 224 224 } 225 225 226 - void __init add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, 227 - unsigned long entryhi, unsigned long pagemask) 226 + void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, 227 + unsigned long entryhi, unsigned long pagemask) 228 228 { 229 229 unsigned long flags; 230 230 unsigned long old_ctx;
+2 -2
arch/mips/mm/tlb-r4k.c
··· 337 337 EXIT_CRITICAL(flags); 338 338 } 339 339 340 - void __init add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, 341 - unsigned long entryhi, unsigned long pagemask) 340 + void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, 341 + unsigned long entryhi, unsigned long pagemask) 342 342 { 343 343 unsigned long flags; 344 344 unsigned long wired;
+5
arch/mips/netlogic/Platform
··· 5 5 cflags-$(CONFIG_NLM_COMMON) += -I$(srctree)/arch/mips/include/asm/netlogic 6 6 7 7 # 8 + # use mips64 if xlr is not available 9 + # 10 + cflags-$(CONFIG_NLM_XLR) += $(call cc-option,-march=xlr,-march=mips64) 11 + 12 + # 8 13 # NETLOGIC XLR/XLS SoC, Simulator and boards 9 14 # 10 15 core-$(CONFIG_NLM_XLR) += arch/mips/netlogic/xlr/
+2 -2
arch/mips/netlogic/xlr/setup.c
··· 53 53 unsigned long nlm_common_ebase = 0x0; 54 54 struct psb_info nlm_prom_info; 55 55 56 - static void nlm_early_serial_setup(void) 56 + static void __init nlm_early_serial_setup(void) 57 57 { 58 58 struct uart_port s; 59 59 nlm_reg_t *uart_base; ··· 101 101 /* Nothing yet */ 102 102 } 103 103 104 - static void build_arcs_cmdline(int *argv) 104 + static void __init build_arcs_cmdline(int *argv) 105 105 { 106 106 int i, remain, len; 107 107 char *arg;
+5 -1
arch/mips/netlogic/xlr/smp.c
··· 158 158 159 159 num_cpus = 1; 160 160 for (i = 0; i < NR_CPUS; i++) { 161 + /* 162 + * BSP is not set in nlm_cpu_ready array, it is only for 163 + * ASPs (goto see smpboot.S) 164 + */ 161 165 if (nlm_cpu_ready[i]) { 162 166 cpu_set(i, phys_cpu_present_map); 163 167 __cpu_number_map[i] = num_cpus; ··· 195 191 196 192 unsigned long secondary_entry_point; 197 193 198 - int nlm_wakeup_secondary_cpus(u32 wakeup_mask) 194 + int __cpuinit nlm_wakeup_secondary_cpus(u32 wakeup_mask) 199 195 { 200 196 unsigned int tid, pid, ipi, i, boot_cpu; 201 197 void *reset_vec;
+11 -5
arch/mips/netlogic/xlr/smpboot.S
··· 32 32 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 33 */ 34 34 35 + #include <linux/init.h> 36 + 35 37 #include <asm/asm.h> 36 38 #include <asm/asm-offsets.h> 37 39 #include <asm/regdef.h> 38 40 #include <asm/mipsregs.h> 39 41 40 - 41 - /* Don't jump to linux function from Bootloader stack. Change it 42 - * here. Kernel might allocate bootloader memory before all the CPUs are 43 - * brought up (eg: Inode cache region) and we better don't overwrite this 44 - * memory 42 + /* 43 + * Early code for secondary CPUs. This will get them out of the bootloader 44 + * code and into linux. Needed because the bootloader area will be taken 45 + * and initialized by linux. 45 46 */ 47 + __CPUINIT 46 48 NESTED(prom_pre_boot_secondary_cpus, 16, sp) 47 49 .set mips64 48 50 mfc0 t0, $15, 1 # read ebase ··· 75 73 jr t0 76 74 nop 77 75 END(prom_pre_boot_secondary_cpus) 76 + __FINIT 78 77 78 + /* 79 + * NMI code, used for CPU wakeup, copied to reset entry 80 + */ 79 81 NESTED(nlm_boot_smp_nmi, 0, sp) 80 82 .set push 81 83 .set noat
+1 -2
arch/mips/pci/Makefile
··· 18 18 obj-$(CONFIG_BCM47XX) += pci-bcm47xx.o 19 19 obj-$(CONFIG_BCM63XX) += pci-bcm63xx.o fixup-bcm63xx.o \ 20 20 ops-bcm63xx.o 21 + obj-$(CONFIG_MIPS_ALCHEMY) += pci-alchemy.o 21 22 22 23 # 23 24 # These are still pretty much in the old state, watch, go blind. 24 25 # 25 26 obj-$(CONFIG_LASAT) += pci-lasat.o 26 27 obj-$(CONFIG_MIPS_COBALT) += fixup-cobalt.o 27 - obj-$(CONFIG_SOC_AU1500) += fixup-au1000.o ops-au1000.o 28 - obj-$(CONFIG_SOC_AU1550) += fixup-au1000.o ops-au1000.o 29 28 obj-$(CONFIG_SOC_PNX8550) += fixup-pnx8550.o ops-pnx8550.o 30 29 obj-$(CONFIG_LEMOTE_FULOONG2E) += fixup-fuloong2e.o ops-loongson2.o 31 30 obj-$(CONFIG_LEMOTE_MACH2F) += fixup-lemote2f.o ops-loongson2.o
-43
arch/mips/pci/fixup-au1000.c
··· 1 - /* 2 - * BRIEF MODULE DESCRIPTION 3 - * Board specific PCI fixups. 4 - * 5 - * Copyright 2001-2003, 2008 MontaVista Software Inc. 6 - * Author: MontaVista Software, Inc. <source@mvista.com> 7 - * 8 - * This program is free software; you can redistribute it and/or modify it 9 - * under the terms of the GNU General Public License as published by the 10 - * Free Software Foundation; either version 2 of the License, or (at your 11 - * option) any later version. 12 - * 13 - * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 14 - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 15 - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 16 - * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 17 - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 18 - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF 19 - * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 20 - * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 21 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 22 - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 23 - * 24 - * You should have received a copy of the GNU General Public License along 25 - * with this program; if not, write to the Free Software Foundation, Inc., 26 - * 675 Mass Ave, Cambridge, MA 02139, USA. 27 - */ 28 - 29 - #include <linux/pci.h> 30 - #include <linux/init.h> 31 - 32 - extern char irq_tab_alchemy[][5]; 33 - 34 - int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 35 - { 36 - return irq_tab_alchemy[slot][pin]; 37 - } 38 - 39 - /* Do platform specific device initialization at pci_enable_device() time */ 40 - int pcibios_plat_dev_init(struct pci_dev *dev) 41 - { 42 - return 0; 43 - }
-308
arch/mips/pci/ops-au1000.c
··· 1 - /* 2 - * BRIEF MODULE DESCRIPTION 3 - * Alchemy/AMD Au1xx0 PCI support. 4 - * 5 - * Copyright 2001-2003, 2007-2008 MontaVista Software Inc. 6 - * Author: MontaVista Software, Inc. <source@mvista.com> 7 - * 8 - * Support for all devices (greater than 16) added by David Gathright. 9 - * 10 - * This program is free software; you can redistribute it and/or modify it 11 - * under the terms of the GNU General Public License as published by the 12 - * Free Software Foundation; either version 2 of the License, or (at your 13 - * option) any later version. 14 - * 15 - * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED 16 - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 17 - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN 18 - * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF 21 - * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 22 - * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 - * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 - * 26 - * You should have received a copy of the GNU General Public License along 27 - * with this program; if not, write to the Free Software Foundation, Inc., 28 - * 675 Mass Ave, Cambridge, MA 02139, USA. 29 - */ 30 - 31 - #include <linux/types.h> 32 - #include <linux/pci.h> 33 - #include <linux/kernel.h> 34 - #include <linux/init.h> 35 - #include <linux/vmalloc.h> 36 - 37 - #include <asm/mach-au1x00/au1000.h> 38 - 39 - #undef DEBUG 40 - #ifdef DEBUG 41 - #define DBG(x...) printk(KERN_DEBUG x) 42 - #else 43 - #define DBG(x...) 44 - #endif 45 - 46 - #define PCI_ACCESS_READ 0 47 - #define PCI_ACCESS_WRITE 1 48 - 49 - int (*board_pci_idsel)(unsigned int devsel, int assert); 50 - 51 - void mod_wired_entry(int entry, unsigned long entrylo0, 52 - unsigned long entrylo1, unsigned long entryhi, 53 - unsigned long pagemask) 54 - { 55 - unsigned long old_pagemask; 56 - unsigned long old_ctx; 57 - 58 - /* Save old context and create impossible VPN2 value */ 59 - old_ctx = read_c0_entryhi() & 0xff; 60 - old_pagemask = read_c0_pagemask(); 61 - write_c0_index(entry); 62 - write_c0_pagemask(pagemask); 63 - write_c0_entryhi(entryhi); 64 - write_c0_entrylo0(entrylo0); 65 - write_c0_entrylo1(entrylo1); 66 - tlb_write_indexed(); 67 - write_c0_entryhi(old_ctx); 68 - write_c0_pagemask(old_pagemask); 69 - } 70 - 71 - static struct vm_struct *pci_cfg_vm; 72 - static int pci_cfg_wired_entry; 73 - static unsigned long last_entryLo0, last_entryLo1; 74 - 75 - /* 76 - * We can't ioremap the entire pci config space because it's too large. 77 - * Nor can we call ioremap dynamically because some device drivers use 78 - * the PCI config routines from within interrupt handlers and that 79 - * becomes a problem in get_vm_area(). We use one wired TLB to handle 80 - * all config accesses for all busses. 81 - */ 82 - void __init au1x_pci_cfg_init(void) 83 - { 84 - /* Reserve a wired entry for PCI config accesses */ 85 - pci_cfg_vm = get_vm_area(0x2000, VM_IOREMAP); 86 - if (!pci_cfg_vm) 87 - panic(KERN_ERR "PCI unable to get vm area\n"); 88 - pci_cfg_wired_entry = read_c0_wired(); 89 - add_wired_entry(0, 0, (unsigned long)pci_cfg_vm->addr, PM_4K); 90 - last_entryLo0 = last_entryLo1 = 0xffffffff; 91 - } 92 - 93 - static int config_access(unsigned char access_type, struct pci_bus *bus, 94 - unsigned int dev_fn, unsigned char where, u32 *data) 95 - { 96 - #if defined(CONFIG_SOC_AU1500) || defined(CONFIG_SOC_AU1550) 97 - unsigned int device = PCI_SLOT(dev_fn); 98 - unsigned int function = PCI_FUNC(dev_fn); 99 - unsigned long offset, status; 100 - unsigned long cfg_base; 101 - unsigned long flags; 102 - int error = PCIBIOS_SUCCESSFUL; 103 - unsigned long entryLo0, entryLo1; 104 - 105 - if (device > 19) { 106 - *data = 0xffffffff; 107 - return -1; 108 - } 109 - 110 - local_irq_save(flags); 111 - au_writel(((0x2000 << 16) | (au_readl(Au1500_PCI_STATCMD) & 0xffff)), 112 - Au1500_PCI_STATCMD); 113 - au_sync_udelay(1); 114 - 115 - /* 116 - * Allow board vendors to implement their own off-chip IDSEL. 117 - * If it doesn't succeed, may as well bail out at this point. 118 - */ 119 - if (board_pci_idsel && board_pci_idsel(device, 1) == 0) { 120 - *data = 0xffffffff; 121 - local_irq_restore(flags); 122 - return -1; 123 - } 124 - 125 - /* Setup the config window */ 126 - if (bus->number == 0) 127 - cfg_base = (1 << device) << 11; 128 - else 129 - cfg_base = 0x80000000 | (bus->number << 16) | (device << 11); 130 - 131 - /* Setup the lower bits of the 36-bit address */ 132 - offset = (function << 8) | (where & ~0x3); 133 - /* Pick up any address that falls below the page mask */ 134 - offset |= cfg_base & ~PAGE_MASK; 135 - 136 - /* Page boundary */ 137 - cfg_base = cfg_base & PAGE_MASK; 138 - 139 - /* 140 - * To improve performance, if the current device is the same as 141 - * the last device accessed, we don't touch the TLB. 142 - */ 143 - entryLo0 = (6 << 26) | (cfg_base >> 6) | (2 << 3) | 7; 144 - entryLo1 = (6 << 26) | (cfg_base >> 6) | (0x1000 >> 6) | (2 << 3) | 7; 145 - if ((entryLo0 != last_entryLo0) || (entryLo1 != last_entryLo1)) { 146 - mod_wired_entry(pci_cfg_wired_entry, entryLo0, entryLo1, 147 - (unsigned long)pci_cfg_vm->addr, PM_4K); 148 - last_entryLo0 = entryLo0; 149 - last_entryLo1 = entryLo1; 150 - } 151 - 152 - if (access_type == PCI_ACCESS_WRITE) 153 - au_writel(*data, (int)(pci_cfg_vm->addr + offset)); 154 - else 155 - *data = au_readl((int)(pci_cfg_vm->addr + offset)); 156 - 157 - au_sync_udelay(2); 158 - 159 - DBG("cfg_access %d bus->number %u dev %u at %x *data %x conf %lx\n", 160 - access_type, bus->number, device, where, *data, offset); 161 - 162 - /* Check master abort */ 163 - status = au_readl(Au1500_PCI_STATCMD); 164 - 165 - if (status & (1 << 29)) { 166 - *data = 0xffffffff; 167 - error = -1; 168 - DBG("Au1x Master Abort\n"); 169 - } else if ((status >> 28) & 0xf) { 170 - DBG("PCI ERR detected: device %u, status %lx\n", 171 - device, (status >> 28) & 0xf); 172 - 173 - /* Clear errors */ 174 - au_writel(status & 0xf000ffff, Au1500_PCI_STATCMD); 175 - 176 - *data = 0xffffffff; 177 - error = -1; 178 - } 179 - 180 - /* Take away the IDSEL. */ 181 - if (board_pci_idsel) 182 - (void)board_pci_idsel(device, 0); 183 - 184 - local_irq_restore(flags); 185 - return error; 186 - #endif 187 - } 188 - 189 - static int read_config_byte(struct pci_bus *bus, unsigned int devfn, 190 - int where, u8 *val) 191 - { 192 - u32 data; 193 - int ret; 194 - 195 - ret = config_access(PCI_ACCESS_READ, bus, devfn, where, &data); 196 - if (where & 1) 197 - data >>= 8; 198 - if (where & 2) 199 - data >>= 16; 200 - *val = data & 0xff; 201 - return ret; 202 - } 203 - 204 - static int read_config_word(struct pci_bus *bus, unsigned int devfn, 205 - int where, u16 *val) 206 - { 207 - u32 data; 208 - int ret; 209 - 210 - ret = config_access(PCI_ACCESS_READ, bus, devfn, where, &data); 211 - if (where & 2) 212 - data >>= 16; 213 - *val = data & 0xffff; 214 - return ret; 215 - } 216 - 217 - static int read_config_dword(struct pci_bus *bus, unsigned int devfn, 218 - int where, u32 *val) 219 - { 220 - int ret; 221 - 222 - ret = config_access(PCI_ACCESS_READ, bus, devfn, where, val); 223 - return ret; 224 - } 225 - 226 - static int write_config_byte(struct pci_bus *bus, unsigned int devfn, 227 - int where, u8 val) 228 - { 229 - u32 data = 0; 230 - 231 - if (config_access(PCI_ACCESS_READ, bus, devfn, where, &data)) 232 - return -1; 233 - 234 - data = (data & ~(0xff << ((where & 3) << 3))) | 235 - (val << ((where & 3) << 3)); 236 - 237 - if (config_access(PCI_ACCESS_WRITE, bus, devfn, where, &data)) 238 - return -1; 239 - 240 - return PCIBIOS_SUCCESSFUL; 241 - } 242 - 243 - static int write_config_word(struct pci_bus *bus, unsigned int devfn, 244 - int where, u16 val) 245 - { 246 - u32 data = 0; 247 - 248 - if (config_access(PCI_ACCESS_READ, bus, devfn, where, &data)) 249 - return -1; 250 - 251 - data = (data & ~(0xffff << ((where & 3) << 3))) | 252 - (val << ((where & 3) << 3)); 253 - 254 - if (config_access(PCI_ACCESS_WRITE, bus, devfn, where, &data)) 255 - return -1; 256 - 257 - return PCIBIOS_SUCCESSFUL; 258 - } 259 - 260 - static int write_config_dword(struct pci_bus *bus, unsigned int devfn, 261 - int where, u32 val) 262 - { 263 - if (config_access(PCI_ACCESS_WRITE, bus, devfn, where, &val)) 264 - return -1; 265 - 266 - return PCIBIOS_SUCCESSFUL; 267 - } 268 - 269 - static int config_read(struct pci_bus *bus, unsigned int devfn, 270 - int where, int size, u32 *val) 271 - { 272 - switch (size) { 273 - case 1: { 274 - u8 _val; 275 - int rc = read_config_byte(bus, devfn, where, &_val); 276 - 277 - *val = _val; 278 - return rc; 279 - } 280 - case 2: { 281 - u16 _val; 282 - int rc = read_config_word(bus, devfn, where, &_val); 283 - 284 - *val = _val; 285 - return rc; 286 - } 287 - default: 288 - return read_config_dword(bus, devfn, where, val); 289 - } 290 - } 291 - 292 - static int config_write(struct pci_bus *bus, unsigned int devfn, 293 - int where, int size, u32 val) 294 - { 295 - switch (size) { 296 - case 1: 297 - return write_config_byte(bus, devfn, where, (u8) val); 298 - case 2: 299 - return write_config_word(bus, devfn, where, (u16) val); 300 - default: 301 - return write_config_dword(bus, devfn, where, val); 302 - } 303 - } 304 - 305 - struct pci_ops au1x_pci_ops = { 306 - config_read, 307 - config_write 308 - };
+516
arch/mips/pci/pci-alchemy.c
··· 1 + /* 2 + * Alchemy PCI host mode support. 3 + * 4 + * Copyright 2001-2003, 2007-2008 MontaVista Software Inc. 5 + * Author: MontaVista Software, Inc. <source@mvista.com> 6 + * 7 + * Support for all devices (greater than 16) added by David Gathright. 8 + */ 9 + 10 + #include <linux/types.h> 11 + #include <linux/pci.h> 12 + #include <linux/platform_device.h> 13 + #include <linux/kernel.h> 14 + #include <linux/init.h> 15 + #include <linux/vmalloc.h> 16 + 17 + #include <asm/mach-au1x00/au1000.h> 18 + 19 + #ifdef CONFIG_DEBUG_PCI 20 + #define DBG(x...) printk(KERN_DEBUG x) 21 + #else 22 + #define DBG(x...) do {} while (0) 23 + #endif 24 + 25 + #define PCI_ACCESS_READ 0 26 + #define PCI_ACCESS_WRITE 1 27 + 28 + struct alchemy_pci_context { 29 + struct pci_controller alchemy_pci_ctrl; /* leave as first member! */ 30 + void __iomem *regs; /* ctrl base */ 31 + /* tools for wired entry for config space access */ 32 + unsigned long last_elo0; 33 + unsigned long last_elo1; 34 + int wired_entry; 35 + struct vm_struct *pci_cfg_vm; 36 + 37 + unsigned long pm[12]; 38 + 39 + int (*board_map_irq)(const struct pci_dev *d, u8 slot, u8 pin); 40 + int (*board_pci_idsel)(unsigned int devsel, int assert); 41 + }; 42 + 43 + /* IO/MEM resources for PCI. Keep the memres in sync with __fixup_bigphys_addr 44 + * in arch/mips/alchemy/common/setup.c 45 + */ 46 + static struct resource alchemy_pci_def_memres = { 47 + .start = ALCHEMY_PCI_MEMWIN_START, 48 + .end = ALCHEMY_PCI_MEMWIN_END, 49 + .name = "PCI memory space", 50 + .flags = IORESOURCE_MEM 51 + }; 52 + 53 + static struct resource alchemy_pci_def_iores = { 54 + .start = ALCHEMY_PCI_IOWIN_START, 55 + .end = ALCHEMY_PCI_IOWIN_END, 56 + .name = "PCI IO space", 57 + .flags = IORESOURCE_IO 58 + }; 59 + 60 + static void mod_wired_entry(int entry, unsigned long entrylo0, 61 + unsigned long entrylo1, unsigned long entryhi, 62 + unsigned long pagemask) 63 + { 64 + unsigned long old_pagemask; 65 + unsigned long old_ctx; 66 + 67 + /* Save old context and create impossible VPN2 value */ 68 + old_ctx = read_c0_entryhi() & 0xff; 69 + old_pagemask = read_c0_pagemask(); 70 + write_c0_index(entry); 71 + write_c0_pagemask(pagemask); 72 + write_c0_entryhi(entryhi); 73 + write_c0_entrylo0(entrylo0); 74 + write_c0_entrylo1(entrylo1); 75 + tlb_write_indexed(); 76 + write_c0_entryhi(old_ctx); 77 + write_c0_pagemask(old_pagemask); 78 + } 79 + 80 + static void alchemy_pci_wired_entry(struct alchemy_pci_context *ctx) 81 + { 82 + ctx->wired_entry = read_c0_wired(); 83 + add_wired_entry(0, 0, (unsigned long)ctx->pci_cfg_vm->addr, PM_4K); 84 + ctx->last_elo0 = ctx->last_elo1 = ~0; 85 + } 86 + 87 + static int config_access(unsigned char access_type, struct pci_bus *bus, 88 + unsigned int dev_fn, unsigned char where, u32 *data) 89 + { 90 + struct alchemy_pci_context *ctx = bus->sysdata; 91 + unsigned int device = PCI_SLOT(dev_fn); 92 + unsigned int function = PCI_FUNC(dev_fn); 93 + unsigned long offset, status, cfg_base, flags, entryLo0, entryLo1, r; 94 + int error = PCIBIOS_SUCCESSFUL; 95 + 96 + if (device > 19) { 97 + *data = 0xffffffff; 98 + return -1; 99 + } 100 + 101 + /* YAMON on all db1xxx boards wipes the TLB and writes zero to C0_wired 102 + * on resume, clearing our wired entry. Unfortunately the ->resume() 103 + * callback is called way way way too late (and ->suspend() too early) 104 + * to have them destroy and recreate it. Instead just test if c0_wired 105 + * is now lower than the index we retrieved before suspending and then 106 + * recreate the entry if necessary. Of course this is totally bonkers 107 + * and breaks as soon as someone else adds another wired entry somewhere 108 + * else. Anyone have any ideas how to handle this better? 109 + */ 110 + if (unlikely(read_c0_wired() < ctx->wired_entry)) 111 + alchemy_pci_wired_entry(ctx); 112 + 113 + local_irq_save(flags); 114 + r = __raw_readl(ctx->regs + PCI_REG_STATCMD) & 0x0000ffff; 115 + r |= PCI_STATCMD_STATUS(0x2000); 116 + __raw_writel(r, ctx->regs + PCI_REG_STATCMD); 117 + wmb(); 118 + 119 + /* Allow board vendors to implement their own off-chip IDSEL. 120 + * If it doesn't succeed, may as well bail out at this point. 121 + */ 122 + if (ctx->board_pci_idsel(device, 1) == 0) { 123 + *data = 0xffffffff; 124 + local_irq_restore(flags); 125 + return -1; 126 + } 127 + 128 + /* Setup the config window */ 129 + if (bus->number == 0) 130 + cfg_base = (1 << device) << 11; 131 + else 132 + cfg_base = 0x80000000 | (bus->number << 16) | (device << 11); 133 + 134 + /* Setup the lower bits of the 36-bit address */ 135 + offset = (function << 8) | (where & ~0x3); 136 + /* Pick up any address that falls below the page mask */ 137 + offset |= cfg_base & ~PAGE_MASK; 138 + 139 + /* Page boundary */ 140 + cfg_base = cfg_base & PAGE_MASK; 141 + 142 + /* To improve performance, if the current device is the same as 143 + * the last device accessed, we don't touch the TLB. 144 + */ 145 + entryLo0 = (6 << 26) | (cfg_base >> 6) | (2 << 3) | 7; 146 + entryLo1 = (6 << 26) | (cfg_base >> 6) | (0x1000 >> 6) | (2 << 3) | 7; 147 + if ((entryLo0 != ctx->last_elo0) || (entryLo1 != ctx->last_elo1)) { 148 + mod_wired_entry(ctx->wired_entry, entryLo0, entryLo1, 149 + (unsigned long)ctx->pci_cfg_vm->addr, PM_4K); 150 + ctx->last_elo0 = entryLo0; 151 + ctx->last_elo1 = entryLo1; 152 + } 153 + 154 + if (access_type == PCI_ACCESS_WRITE) 155 + __raw_writel(*data, ctx->pci_cfg_vm->addr + offset); 156 + else 157 + *data = __raw_readl(ctx->pci_cfg_vm->addr + offset); 158 + wmb(); 159 + 160 + DBG("alchemy-pci: cfg access %d bus %u dev %u at %x dat %x conf %lx\n", 161 + access_type, bus->number, device, where, *data, offset); 162 + 163 + /* check for errors, master abort */ 164 + status = __raw_readl(ctx->regs + PCI_REG_STATCMD); 165 + if (status & (1 << 29)) { 166 + *data = 0xffffffff; 167 + error = -1; 168 + DBG("alchemy-pci: master abort on cfg access %d bus %d dev %d", 169 + access_type, bus->number, device); 170 + } else if ((status >> 28) & 0xf) { 171 + DBG("alchemy-pci: PCI ERR detected: dev %d, status %lx\n", 172 + device, (status >> 28) & 0xf); 173 + 174 + /* clear errors */ 175 + __raw_writel(status & 0xf000ffff, ctx->regs + PCI_REG_STATCMD); 176 + 177 + *data = 0xffffffff; 178 + error = -1; 179 + } 180 + 181 + /* Take away the IDSEL. */ 182 + (void)ctx->board_pci_idsel(device, 0); 183 + 184 + local_irq_restore(flags); 185 + return error; 186 + } 187 + 188 + static int read_config_byte(struct pci_bus *bus, unsigned int devfn, 189 + int where, u8 *val) 190 + { 191 + u32 data; 192 + int ret = config_access(PCI_ACCESS_READ, bus, devfn, where, &data); 193 + 194 + if (where & 1) 195 + data >>= 8; 196 + if (where & 2) 197 + data >>= 16; 198 + *val = data & 0xff; 199 + return ret; 200 + } 201 + 202 + static int read_config_word(struct pci_bus *bus, unsigned int devfn, 203 + int where, u16 *val) 204 + { 205 + u32 data; 206 + int ret = config_access(PCI_ACCESS_READ, bus, devfn, where, &data); 207 + 208 + if (where & 2) 209 + data >>= 16; 210 + *val = data & 0xffff; 211 + return ret; 212 + } 213 + 214 + static int read_config_dword(struct pci_bus *bus, unsigned int devfn, 215 + int where, u32 *val) 216 + { 217 + return config_access(PCI_ACCESS_READ, bus, devfn, where, val); 218 + } 219 + 220 + static int write_config_byte(struct pci_bus *bus, unsigned int devfn, 221 + int where, u8 val) 222 + { 223 + u32 data = 0; 224 + 225 + if (config_access(PCI_ACCESS_READ, bus, devfn, where, &data)) 226 + return -1; 227 + 228 + data = (data & ~(0xff << ((where & 3) << 3))) | 229 + (val << ((where & 3) << 3)); 230 + 231 + if (config_access(PCI_ACCESS_WRITE, bus, devfn, where, &data)) 232 + return -1; 233 + 234 + return PCIBIOS_SUCCESSFUL; 235 + } 236 + 237 + static int write_config_word(struct pci_bus *bus, unsigned int devfn, 238 + int where, u16 val) 239 + { 240 + u32 data = 0; 241 + 242 + if (config_access(PCI_ACCESS_READ, bus, devfn, where, &data)) 243 + return -1; 244 + 245 + data = (data & ~(0xffff << ((where & 3) << 3))) | 246 + (val << ((where & 3) << 3)); 247 + 248 + if (config_access(PCI_ACCESS_WRITE, bus, devfn, where, &data)) 249 + return -1; 250 + 251 + return PCIBIOS_SUCCESSFUL; 252 + } 253 + 254 + static int write_config_dword(struct pci_bus *bus, unsigned int devfn, 255 + int where, u32 val) 256 + { 257 + return config_access(PCI_ACCESS_WRITE, bus, devfn, where, &val); 258 + } 259 + 260 + static int alchemy_pci_read(struct pci_bus *bus, unsigned int devfn, 261 + int where, int size, u32 *val) 262 + { 263 + switch (size) { 264 + case 1: { 265 + u8 _val; 266 + int rc = read_config_byte(bus, devfn, where, &_val); 267 + 268 + *val = _val; 269 + return rc; 270 + } 271 + case 2: { 272 + u16 _val; 273 + int rc = read_config_word(bus, devfn, where, &_val); 274 + 275 + *val = _val; 276 + return rc; 277 + } 278 + default: 279 + return read_config_dword(bus, devfn, where, val); 280 + } 281 + } 282 + 283 + static int alchemy_pci_write(struct pci_bus *bus, unsigned int devfn, 284 + int where, int size, u32 val) 285 + { 286 + switch (size) { 287 + case 1: 288 + return write_config_byte(bus, devfn, where, (u8) val); 289 + case 2: 290 + return write_config_word(bus, devfn, where, (u16) val); 291 + default: 292 + return write_config_dword(bus, devfn, where, val); 293 + } 294 + } 295 + 296 + static struct pci_ops alchemy_pci_ops = { 297 + .read = alchemy_pci_read, 298 + .write = alchemy_pci_write, 299 + }; 300 + 301 + static int alchemy_pci_def_idsel(unsigned int devsel, int assert) 302 + { 303 + return 1; /* success */ 304 + } 305 + 306 + static int __devinit alchemy_pci_probe(struct platform_device *pdev) 307 + { 308 + struct alchemy_pci_platdata *pd = pdev->dev.platform_data; 309 + struct alchemy_pci_context *ctx; 310 + void __iomem *virt_io; 311 + unsigned long val; 312 + struct resource *r; 313 + int ret; 314 + 315 + /* need at least PCI IRQ mapping table */ 316 + if (!pd) { 317 + dev_err(&pdev->dev, "need platform data for PCI setup\n"); 318 + ret = -ENODEV; 319 + goto out; 320 + } 321 + 322 + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 323 + if (!ctx) { 324 + dev_err(&pdev->dev, "no memory for pcictl context\n"); 325 + ret = -ENOMEM; 326 + goto out; 327 + } 328 + 329 + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 330 + if (!r) { 331 + dev_err(&pdev->dev, "no pcictl ctrl regs resource\n"); 332 + ret = -ENODEV; 333 + goto out1; 334 + } 335 + 336 + if (!request_mem_region(r->start, resource_size(r), pdev->name)) { 337 + dev_err(&pdev->dev, "cannot claim pci regs\n"); 338 + ret = -ENODEV; 339 + goto out1; 340 + } 341 + 342 + ctx->regs = ioremap_nocache(r->start, resource_size(r)); 343 + if (!ctx->regs) { 344 + dev_err(&pdev->dev, "cannot map pci regs\n"); 345 + ret = -ENODEV; 346 + goto out2; 347 + } 348 + 349 + /* map parts of the PCI IO area */ 350 + /* REVISIT: if this changes with a newer variant (doubt it) make this 351 + * a platform resource. 352 + */ 353 + virt_io = ioremap(AU1500_PCI_IO_PHYS_ADDR, 0x00100000); 354 + if (!virt_io) { 355 + dev_err(&pdev->dev, "cannot remap pci io space\n"); 356 + ret = -ENODEV; 357 + goto out3; 358 + } 359 + ctx->alchemy_pci_ctrl.io_map_base = (unsigned long)virt_io; 360 + 361 + #ifdef CONFIG_DMA_NONCOHERENT 362 + /* Au1500 revisions older than AD have borked coherent PCI */ 363 + if ((alchemy_get_cputype() == ALCHEMY_CPU_AU1500) && 364 + (read_c0_prid() < 0x01030202)) { 365 + val = __raw_readl(ctx->regs + PCI_REG_CONFIG); 366 + val |= PCI_CONFIG_NC; 367 + __raw_writel(val, ctx->regs + PCI_REG_CONFIG); 368 + wmb(); 369 + dev_info(&pdev->dev, "non-coherent PCI on Au1500 AA/AB/AC\n"); 370 + } 371 + #endif 372 + 373 + if (pd->board_map_irq) 374 + ctx->board_map_irq = pd->board_map_irq; 375 + 376 + if (pd->board_pci_idsel) 377 + ctx->board_pci_idsel = pd->board_pci_idsel; 378 + else 379 + ctx->board_pci_idsel = alchemy_pci_def_idsel; 380 + 381 + /* fill in relevant pci_controller members */ 382 + ctx->alchemy_pci_ctrl.pci_ops = &alchemy_pci_ops; 383 + ctx->alchemy_pci_ctrl.mem_resource = &alchemy_pci_def_memres; 384 + ctx->alchemy_pci_ctrl.io_resource = &alchemy_pci_def_iores; 385 + 386 + /* we can't ioremap the entire pci config space because it's too large, 387 + * nor can we dynamically ioremap it because some drivers use the 388 + * PCI config routines from within atomic contex and that becomes a 389 + * problem in get_vm_area(). Instead we use one wired TLB entry to 390 + * handle all config accesses for all busses. 391 + */ 392 + ctx->pci_cfg_vm = get_vm_area(0x2000, VM_IOREMAP); 393 + if (!ctx->pci_cfg_vm) { 394 + dev_err(&pdev->dev, "unable to get vm area\n"); 395 + ret = -ENOMEM; 396 + goto out4; 397 + } 398 + ctx->wired_entry = 8192; /* impossibly high value */ 399 + 400 + set_io_port_base((unsigned long)ctx->alchemy_pci_ctrl.io_map_base); 401 + 402 + /* board may want to modify bits in the config register, do it now */ 403 + val = __raw_readl(ctx->regs + PCI_REG_CONFIG); 404 + val &= ~pd->pci_cfg_clr; 405 + val |= pd->pci_cfg_set; 406 + val &= ~PCI_CONFIG_PD; /* clear disable bit */ 407 + __raw_writel(val, ctx->regs + PCI_REG_CONFIG); 408 + wmb(); 409 + 410 + platform_set_drvdata(pdev, ctx); 411 + register_pci_controller(&ctx->alchemy_pci_ctrl); 412 + 413 + return 0; 414 + 415 + out4: 416 + iounmap(virt_io); 417 + out3: 418 + iounmap(ctx->regs); 419 + out2: 420 + release_mem_region(r->start, resource_size(r)); 421 + out1: 422 + kfree(ctx); 423 + out: 424 + return ret; 425 + } 426 + 427 + 428 + #ifdef CONFIG_PM 429 + /* save PCI controller register contents. */ 430 + static int alchemy_pci_suspend(struct device *dev) 431 + { 432 + struct alchemy_pci_context *ctx = dev_get_drvdata(dev); 433 + 434 + ctx->pm[0] = __raw_readl(ctx->regs + PCI_REG_CMEM); 435 + ctx->pm[1] = __raw_readl(ctx->regs + PCI_REG_CONFIG) & 0x0009ffff; 436 + ctx->pm[2] = __raw_readl(ctx->regs + PCI_REG_B2BMASK_CCH); 437 + ctx->pm[3] = __raw_readl(ctx->regs + PCI_REG_B2BBASE0_VID); 438 + ctx->pm[4] = __raw_readl(ctx->regs + PCI_REG_B2BBASE1_SID); 439 + ctx->pm[5] = __raw_readl(ctx->regs + PCI_REG_MWMASK_DEV); 440 + ctx->pm[6] = __raw_readl(ctx->regs + PCI_REG_MWBASE_REV_CCL); 441 + ctx->pm[7] = __raw_readl(ctx->regs + PCI_REG_ID); 442 + ctx->pm[8] = __raw_readl(ctx->regs + PCI_REG_CLASSREV); 443 + ctx->pm[9] = __raw_readl(ctx->regs + PCI_REG_PARAM); 444 + ctx->pm[10] = __raw_readl(ctx->regs + PCI_REG_MBAR); 445 + ctx->pm[11] = __raw_readl(ctx->regs + PCI_REG_TIMEOUT); 446 + 447 + return 0; 448 + } 449 + 450 + static int alchemy_pci_resume(struct device *dev) 451 + { 452 + struct alchemy_pci_context *ctx = dev_get_drvdata(dev); 453 + 454 + __raw_writel(ctx->pm[0], ctx->regs + PCI_REG_CMEM); 455 + __raw_writel(ctx->pm[2], ctx->regs + PCI_REG_B2BMASK_CCH); 456 + __raw_writel(ctx->pm[3], ctx->regs + PCI_REG_B2BBASE0_VID); 457 + __raw_writel(ctx->pm[4], ctx->regs + PCI_REG_B2BBASE1_SID); 458 + __raw_writel(ctx->pm[5], ctx->regs + PCI_REG_MWMASK_DEV); 459 + __raw_writel(ctx->pm[6], ctx->regs + PCI_REG_MWBASE_REV_CCL); 460 + __raw_writel(ctx->pm[7], ctx->regs + PCI_REG_ID); 461 + __raw_writel(ctx->pm[8], ctx->regs + PCI_REG_CLASSREV); 462 + __raw_writel(ctx->pm[9], ctx->regs + PCI_REG_PARAM); 463 + __raw_writel(ctx->pm[10], ctx->regs + PCI_REG_MBAR); 464 + __raw_writel(ctx->pm[11], ctx->regs + PCI_REG_TIMEOUT); 465 + wmb(); 466 + __raw_writel(ctx->pm[1], ctx->regs + PCI_REG_CONFIG); 467 + wmb(); 468 + 469 + return 0; 470 + } 471 + 472 + static const struct dev_pm_ops alchemy_pci_pmops = { 473 + .suspend = alchemy_pci_suspend, 474 + .resume = alchemy_pci_resume, 475 + }; 476 + 477 + #define ALCHEMY_PCICTL_PM (&alchemy_pci_pmops) 478 + 479 + #else 480 + #define ALCHEMY_PCICTL_PM NULL 481 + #endif 482 + 483 + static struct platform_driver alchemy_pcictl_driver = { 484 + .probe = alchemy_pci_probe, 485 + .driver = { 486 + .name = "alchemy-pci", 487 + .owner = THIS_MODULE, 488 + .pm = ALCHEMY_PCICTL_PM, 489 + }, 490 + }; 491 + 492 + static int __init alchemy_pci_init(void) 493 + { 494 + /* Au1500/Au1550 have PCI */ 495 + switch (alchemy_get_cputype()) { 496 + case ALCHEMY_CPU_AU1500: 497 + case ALCHEMY_CPU_AU1550: 498 + return platform_driver_register(&alchemy_pcictl_driver); 499 + } 500 + return 0; 501 + } 502 + arch_initcall(alchemy_pci_init); 503 + 504 + 505 + int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 506 + { 507 + struct alchemy_pci_context *ctx = dev->sysdata; 508 + if (ctx && ctx->board_map_irq) 509 + return ctx->board_map_irq(dev, slot, pin); 510 + return -1; 511 + } 512 + 513 + int pcibios_plat_dev_init(struct pci_dev *dev) 514 + { 515 + return 0; 516 + }
+1
arch/mips/pmc-sierra/msp71xx/msp_setup.c
··· 14 14 #include <asm/cacheflush.h> 15 15 #include <asm/r4kcache.h> 16 16 #include <asm/reboot.h> 17 + #include <asm/smp-ops.h> 17 18 #include <asm/time.h> 18 19 19 20 #include <msp_prom.h>
+2 -10
arch/mips/pmc-sierra/yosemite/py-console.c
··· 65 65 66 66 __asm__ __volatile__ ( 67 67 " .set mips3 \n" 68 - " .set push \n" 69 - " .set noreorder \n" 70 - " .set nomacro \n" 71 68 " ld %0, %1 \n" 72 - " .set pop \n" 73 69 " lbu %0, (%0) \n" 74 70 " .set mips0 \n" 75 71 : "=r" (res) 76 - : "R" (vaddr)); 72 + : "m" (vaddr)); 77 73 78 74 write_c0_status(sr); 79 75 ssnop_4(); ··· 89 93 90 94 __asm__ __volatile__ ( 91 95 " .set mips3 \n" 92 - " .set push \n" 93 - " .set noreorder \n" 94 - " .set nomacro \n" 95 96 " ld %0, %1 \n" 96 - " .set pop \n" 97 97 " sb %2, (%0) \n" 98 98 " .set mips0 \n" 99 99 : "=&r" (tmp) 100 - : "R" (vaddr), "r" (c)); 100 + : "m" (vaddr), "r" (c)); 101 101 102 102 write_c0_status(sr); 103 103 ssnop_4();
+1 -1
arch/mips/pnx8550/common/prom.c
··· 30 30 }t_env_var; 31 31 32 32 33 - char * prom_getcmdline(void) 33 + char * __init prom_getcmdline(void) 34 34 { 35 35 return &(arcs_cmdline[0]); 36 36 }
+2 -2
arch/mips/sgi-ip27/ip27-irq.c
··· 337 337 .irq_unmask = enable_bridge_irq, 338 338 }; 339 339 340 - void __devinit register_bridge_irq(unsigned int irq) 340 + void register_bridge_irq(unsigned int irq) 341 341 { 342 342 irq_set_chip_and_handler(irq, &bridge_irq_type, handle_level_irq); 343 343 } 344 344 345 - int __devinit request_bridge_irq(struct bridge_controller *bc) 345 + int request_bridge_irq(struct bridge_controller *bc) 346 346 { 347 347 int irq = allocate_irqno(); 348 348 int swlevel, cpu;
+1 -1
drivers/i2c/busses/Kconfig
··· 300 300 301 301 config I2C_AU1550 302 302 tristate "Au1550/Au1200 SMBus interface" 303 - depends on SOC_AU1550 || SOC_AU1200 303 + depends on MIPS_ALCHEMY 304 304 help 305 305 If you say yes to this option, support will be included for the 306 306 Au1550 and Au1200 SMBus interface.
+1 -1
drivers/i2c/busses/i2c-au1550.c
··· 36 36 #include <linux/i2c.h> 37 37 #include <linux/slab.h> 38 38 39 - #include <asm/mach-au1x00/au1xxx.h> 39 + #include <asm/mach-au1x00/au1000.h> 40 40 #include <asm/mach-au1x00/au1xxx_psc.h> 41 41 42 42 #define PSC_SEL 0x00
+3 -3
drivers/ide/Kconfig
··· 677 677 678 678 config BLK_DEV_IDE_AU1XXX 679 679 bool "IDE for AMD Alchemy Au1200" 680 - depends on SOC_AU1200 680 + depends on MIPS_ALCHEMY 681 681 select IDE_XFER_MODE 682 682 choice 683 683 prompt "IDE Mode for AMD Alchemy Au1200" 684 684 default BLK_DEV_IDE_AU1XXX_PIO_DBDMA 685 - depends on SOC_AU1200 && BLK_DEV_IDE_AU1XXX 685 + depends on BLK_DEV_IDE_AU1XXX 686 686 687 687 config BLK_DEV_IDE_AU1XXX_PIO_DBDMA 688 688 bool "PIO+DbDMA IDE for AMD Alchemy Au1200" 689 689 690 690 config BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA 691 691 bool "MDMA2+DbDMA IDE for AMD Alchemy Au1200" 692 - depends on SOC_AU1200 && BLK_DEV_IDE_AU1XXX 692 + depends on BLK_DEV_IDE_AU1XXX 693 693 endchoice 694 694 695 695 config BLK_DEV_IDE_TX4938
+27 -19
drivers/ide/au1xxx-ide.c
··· 36 36 #include <linux/ide.h> 37 37 #include <linux/scatterlist.h> 38 38 39 - #include <asm/mach-au1x00/au1xxx.h> 39 + #include <asm/mach-au1x00/au1000.h> 40 40 #include <asm/mach-au1x00/au1xxx_dbdma.h> 41 41 #include <asm/mach-au1x00/au1xxx_ide.h> 42 42 43 43 #define DRV_NAME "au1200-ide" 44 44 #define DRV_AUTHOR "Enrico Walther <enrico.walther@amd.com> / Pete Popov <ppopov@embeddedalley.com>" 45 + 46 + #ifndef IDE_REG_SHIFT 47 + #define IDE_REG_SHIFT 5 48 + #endif 45 49 46 50 /* enable the burstmode in the dbdma */ 47 51 #define IDE_AU1XXX_BURSTMODE 1 ··· 321 317 } 322 318 #endif /* end CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA */ 323 319 324 - static void auide_init_dbdma_dev(dbdev_tab_t *dev, u32 dev_id, u32 tsize, u32 devwidth, u32 flags) 320 + static void auide_init_dbdma_dev(dbdev_tab_t *dev, u32 dev_id, u32 tsize, 321 + u32 devwidth, u32 flags, u32 regbase) 325 322 { 326 323 dev->dev_id = dev_id; 327 - dev->dev_physaddr = (u32)IDE_PHYS_ADDR; 324 + dev->dev_physaddr = CPHYSADDR(regbase); 328 325 dev->dev_intlevel = 0; 329 326 dev->dev_intpolarity = 0; 330 327 dev->dev_tsize = tsize; ··· 349 344 dbdev_tab_t source_dev_tab, target_dev_tab; 350 345 u32 dev_id, tsize, devwidth, flags; 351 346 352 - dev_id = IDE_DDMA_REQ; 347 + dev_id = hwif->ddma_id; 353 348 354 349 tsize = 8; /* 1 */ 355 350 devwidth = 32; /* 16 */ ··· 361 356 #endif 362 357 363 358 /* setup dev_tab for tx channel */ 364 - auide_init_dbdma_dev( &source_dev_tab, 365 - dev_id, 366 - tsize, devwidth, DEV_FLAGS_OUT | flags); 359 + auide_init_dbdma_dev(&source_dev_tab, dev_id, tsize, devwidth, 360 + DEV_FLAGS_OUT | flags, auide->regbase); 367 361 auide->tx_dev_id = au1xxx_ddma_add_device( &source_dev_tab ); 368 362 369 - auide_init_dbdma_dev( &source_dev_tab, 370 - dev_id, 371 - tsize, devwidth, DEV_FLAGS_IN | flags); 363 + auide_init_dbdma_dev(&source_dev_tab, dev_id, tsize, devwidth, 364 + DEV_FLAGS_IN | flags, auide->regbase); 372 365 auide->rx_dev_id = au1xxx_ddma_add_device( &source_dev_tab ); 373 366 374 367 /* We also need to add a target device for the DMA */ 375 - auide_init_dbdma_dev( &target_dev_tab, 376 - (u32)DSCR_CMD0_ALWAYS, 377 - tsize, devwidth, DEV_FLAGS_ANYUSE); 368 + auide_init_dbdma_dev(&target_dev_tab, (u32)DSCR_CMD0_ALWAYS, tsize, 369 + devwidth, DEV_FLAGS_ANYUSE, auide->regbase); 378 370 auide->target_dev_id = au1xxx_ddma_add_device(&target_dev_tab); 379 371 380 372 /* Get a channel for TX */ ··· 413 411 #endif 414 412 415 413 /* setup dev_tab for tx channel */ 416 - auide_init_dbdma_dev( &source_dev_tab, 417 - (u32)DSCR_CMD0_ALWAYS, 418 - 8, 32, DEV_FLAGS_OUT | flags); 414 + auide_init_dbdma_dev(&source_dev_tab, (u32)DSCR_CMD0_ALWAYS, 8, 32, 415 + DEV_FLAGS_OUT | flags, auide->regbase); 419 416 auide->tx_dev_id = au1xxx_ddma_add_device( &source_dev_tab ); 420 417 421 - auide_init_dbdma_dev( &source_dev_tab, 422 - (u32)DSCR_CMD0_ALWAYS, 423 - 8, 32, DEV_FLAGS_IN | flags); 418 + auide_init_dbdma_dev(&source_dev_tab, (u32)DSCR_CMD0_ALWAYS, 8, 32, 419 + DEV_FLAGS_IN | flags, auide->regbase); 424 420 auide->rx_dev_id = au1xxx_ddma_add_device( &source_dev_tab ); 425 421 426 422 /* Get a channel for TX */ ··· 539 539 ret = -ENOMEM; 540 540 goto out; 541 541 } 542 + 543 + res = platform_get_resource(dev, IORESOURCE_DMA, 0); 544 + if (!res) { 545 + pr_debug("%s: no DDMA ID resource\n", DRV_NAME); 546 + ret = -ENODEV; 547 + goto out; 548 + } 549 + ahwif->ddma_id = res->start; 542 550 543 551 memset(&hw, 0, sizeof(hw)); 544 552 auide_setup_ports(&hw, ahwif);
+1 -1
drivers/mmc/host/Kconfig
··· 263 263 264 264 config MMC_AU1X 265 265 tristate "Alchemy AU1XX0 MMC Card Interface support" 266 - depends on SOC_AU1200 266 + depends on MIPS_ALCHEMY 267 267 help 268 268 This selects the AMD Alchemy(R) Multimedia card interface. 269 269 If you have a Alchemy platform with a MMC slot, say Y or M here.
+49 -43
drivers/mmc/host/au1xmmc.c
··· 64 64 #define AU1XMMC_DESCRIPTOR_COUNT 1 65 65 66 66 /* max DMA seg size: 64KB on Au1100, 4MB on Au1200 */ 67 - #ifdef CONFIG_SOC_AU1100 68 - #define AU1XMMC_DESCRIPTOR_SIZE 0x0000ffff 69 - #else /* Au1200 */ 70 - #define AU1XMMC_DESCRIPTOR_SIZE 0x003fffff 71 - #endif 67 + #define AU1100_MMC_DESCRIPTOR_SIZE 0x0000ffff 68 + #define AU1200_MMC_DESCRIPTOR_SIZE 0x003fffff 72 69 73 70 #define AU1XMMC_OCR (MMC_VDD_27_28 | MMC_VDD_28_29 | MMC_VDD_29_30 | \ 74 71 MMC_VDD_30_31 | MMC_VDD_31_32 | MMC_VDD_32_33 | \ ··· 124 127 #define HOST_F_XMIT 0x0001 125 128 #define HOST_F_RECV 0x0002 126 129 #define HOST_F_DMA 0x0010 130 + #define HOST_F_DBDMA 0x0020 127 131 #define HOST_F_ACTIVE 0x0100 128 132 #define HOST_F_STOP 0x1000 129 133 ··· 148 150 149 151 #define DMA_CHANNEL(h) \ 150 152 (((h)->flags & HOST_F_XMIT) ? (h)->tx_chan : (h)->rx_chan) 153 + 154 + static inline int has_dbdma(void) 155 + { 156 + switch (alchemy_get_cputype()) { 157 + case ALCHEMY_CPU_AU1200: 158 + return 1; 159 + default: 160 + return 0; 161 + } 162 + } 151 163 152 164 static inline void IRQ_ON(struct au1xmmc_host *host, u32 mask) 153 165 { ··· 361 353 data->bytes_xfered = 0; 362 354 363 355 if (!data->error) { 364 - if (host->flags & HOST_F_DMA) { 365 - #ifdef CONFIG_SOC_AU1200 /* DBDMA */ 356 + if (host->flags & (HOST_F_DMA | HOST_F_DBDMA)) { 366 357 u32 chan = DMA_CHANNEL(host); 367 358 368 359 chan_tab_t *c = *((chan_tab_t **)chan); 369 360 au1x_dma_chan_t *cp = c->chan_ptr; 370 361 data->bytes_xfered = cp->ddma_bytecnt; 371 - #endif 372 362 } else 373 363 data->bytes_xfered = 374 364 (data->blocks * data->blksz) - host->pio.len; ··· 576 570 577 571 host->status = HOST_S_DATA; 578 572 579 - if (host->flags & HOST_F_DMA) { 580 - #ifdef CONFIG_SOC_AU1200 /* DBDMA */ 573 + if ((host->flags & (HOST_F_DMA | HOST_F_DBDMA))) { 581 574 u32 channel = DMA_CHANNEL(host); 582 575 583 - /* Start the DMA as soon as the buffer gets something in it */ 576 + /* Start the DBDMA as soon as the buffer gets something in it */ 584 577 585 578 if (host->flags & HOST_F_RECV) { 586 579 u32 mask = SD_STATUS_DB | SD_STATUS_NE; ··· 589 584 } 590 585 591 586 au1xxx_dbdma_start(channel); 592 - #endif 593 587 } 594 588 } 595 589 ··· 637 633 638 634 au_writel(data->blksz - 1, HOST_BLKSIZE(host)); 639 635 640 - if (host->flags & HOST_F_DMA) { 641 - #ifdef CONFIG_SOC_AU1200 /* DBDMA */ 636 + if (host->flags & (HOST_F_DMA | HOST_F_DBDMA)) { 642 637 int i; 643 638 u32 channel = DMA_CHANNEL(host); 644 639 ··· 666 663 667 664 datalen -= len; 668 665 } 669 - #endif 670 666 } else { 671 667 host->pio.index = 0; 672 668 host->pio.offset = 0; ··· 840 838 return IRQ_HANDLED; 841 839 } 842 840 843 - #ifdef CONFIG_SOC_AU1200 844 841 /* 8bit memory DMA device */ 845 842 static dbdev_tab_t au1xmmc_mem_dbdev = { 846 843 .dev_id = DSCR_CMD0_ALWAYS, ··· 906 905 au1xxx_dbdma_ring_alloc(host->rx_chan, AU1XMMC_DESCRIPTOR_COUNT); 907 906 908 907 /* DBDMA is good to go */ 909 - host->flags |= HOST_F_DMA; 908 + host->flags |= HOST_F_DMA | HOST_F_DBDMA; 910 909 911 910 return 0; 912 911 } ··· 919 918 au1xxx_dbdma_chan_free(host->rx_chan); 920 919 } 921 920 } 922 - #endif 923 921 924 922 static void au1xmmc_enable_sdio_irq(struct mmc_host *mmc, int en) 925 923 { ··· 997 997 mmc->f_min = 450000; 998 998 mmc->f_max = 24000000; 999 999 1000 - mmc->max_seg_size = AU1XMMC_DESCRIPTOR_SIZE; 1001 - mmc->max_segs = AU1XMMC_DESCRIPTOR_COUNT; 1000 + switch (alchemy_get_cputype()) { 1001 + case ALCHEMY_CPU_AU1100: 1002 + mmc->max_seg_size = AU1100_MMC_DESCRIPTOR_SIZE; 1003 + mmc->max_segs = AU1XMMC_DESCRIPTOR_COUNT; 1004 + break; 1005 + case ALCHEMY_CPU_AU1200: 1006 + mmc->max_seg_size = AU1200_MMC_DESCRIPTOR_SIZE; 1007 + mmc->max_segs = AU1XMMC_DESCRIPTOR_COUNT; 1008 + break; 1009 + } 1002 1010 1003 1011 mmc->max_blk_size = 2048; 1004 1012 mmc->max_blk_count = 512; ··· 1036 1028 tasklet_init(&host->finish_task, au1xmmc_tasklet_finish, 1037 1029 (unsigned long)host); 1038 1030 1039 - #ifdef CONFIG_SOC_AU1200 1040 - ret = au1xmmc_dbdma_init(host); 1041 - if (ret) 1042 - pr_info(DRIVER_NAME ": DBDMA init failed; using PIO\n"); 1043 - #endif 1031 + if (has_dbdma()) { 1032 + ret = au1xmmc_dbdma_init(host); 1033 + if (ret) 1034 + pr_info(DRIVER_NAME ": DBDMA init failed; using PIO\n"); 1035 + } 1044 1036 1045 1037 #ifdef CONFIG_LEDS_CLASS 1046 1038 if (host->platdata && host->platdata->led) { ··· 1081 1073 au_writel(0, HOST_CONFIG2(host)); 1082 1074 au_sync(); 1083 1075 1084 - #ifdef CONFIG_SOC_AU1200 1085 - au1xmmc_dbdma_shutdown(host); 1086 - #endif 1076 + if (host->flags & HOST_F_DBDMA) 1077 + au1xmmc_dbdma_shutdown(host); 1087 1078 1088 1079 tasklet_kill(&host->data_task); 1089 1080 tasklet_kill(&host->finish_task); ··· 1127 1120 tasklet_kill(&host->data_task); 1128 1121 tasklet_kill(&host->finish_task); 1129 1122 1130 - #ifdef CONFIG_SOC_AU1200 1131 - au1xmmc_dbdma_shutdown(host); 1132 - #endif 1123 + if (host->flags & HOST_F_DBDMA) 1124 + au1xmmc_dbdma_shutdown(host); 1125 + 1133 1126 au1xmmc_set_power(host, 0); 1134 1127 1135 1128 free_irq(host->irq, host); ··· 1188 1181 1189 1182 static int __init au1xmmc_init(void) 1190 1183 { 1191 - #ifdef CONFIG_SOC_AU1200 1192 - /* DSCR_CMD0_ALWAYS has a stride of 32 bits, we need a stride 1193 - * of 8 bits. And since devices are shared, we need to create 1194 - * our own to avoid freaking out other devices. 1195 - */ 1196 - memid = au1xxx_ddma_add_device(&au1xmmc_mem_dbdev); 1197 - if (!memid) 1198 - pr_err("au1xmmc: cannot add memory dbdma dev\n"); 1199 - #endif 1184 + if (has_dbdma()) { 1185 + /* DSCR_CMD0_ALWAYS has a stride of 32 bits, we need a stride 1186 + * of 8 bits. And since devices are shared, we need to create 1187 + * our own to avoid freaking out other devices. 1188 + */ 1189 + memid = au1xxx_ddma_add_device(&au1xmmc_mem_dbdev); 1190 + if (!memid) 1191 + pr_err("au1xmmc: cannot add memory dbdma\n"); 1192 + } 1200 1193 return platform_driver_register(&au1xmmc_driver); 1201 1194 } 1202 1195 1203 1196 static void __exit au1xmmc_exit(void) 1204 1197 { 1205 - #ifdef CONFIG_SOC_AU1200 1206 - if (memid) 1198 + if (has_dbdma() && memid) 1207 1199 au1xxx_ddma_del_device(memid); 1208 - #endif 1200 + 1209 1201 platform_driver_unregister(&au1xmmc_driver); 1210 1202 } 1211 1203
+2 -2
drivers/mtd/maps/lantiq-flash.c
··· 182 182 parts = ltq_mtd_data->parts; 183 183 } 184 184 185 - err = add_mtd_partitions(ltq_mtd->mtd, parts, nr_parts); 185 + err = mtd_device_register(ltq_mtd->mtd, parts, nr_parts); 186 186 if (err) { 187 187 dev_err(&pdev->dev, "failed to add partitions\n"); 188 188 goto err_destroy; ··· 208 208 209 209 if (ltq_mtd) { 210 210 if (ltq_mtd->mtd) { 211 - del_mtd_partitions(ltq_mtd->mtd); 211 + mtd_device_unregister(ltq_mtd->mtd); 212 212 map_destroy(ltq_mtd->mtd); 213 213 } 214 214 if (ltq_mtd->map->virt)
+1 -1
drivers/mtd/nand/Kconfig
··· 138 138 139 139 config MTD_NAND_AU1550 140 140 tristate "Au1550/1200 NAND support" 141 - depends on SOC_AU1200 || SOC_AU1550 141 + depends on MIPS_ALCHEMY 142 142 help 143 143 This enables the driver for the NAND flash controller on the 144 144 AMD/Alchemy 1550 SOC.
+5 -1
drivers/mtd/nand/au1550nd.c
··· 19 19 #include <linux/mtd/partitions.h> 20 20 #include <asm/io.h> 21 21 22 - #include <asm/mach-au1x00/au1xxx.h> 22 + #ifdef CONFIG_MIPS_PB1550 23 + #include <asm/mach-pb1x00/pb1550.h> 24 + #elif defined(CONFIG_MIPS_DB1550) 25 + #include <asm/mach-db1x00/db1x00.h> 26 + #endif 23 27 #include <asm/mach-db1x00/bcsr.h> 24 28 25 29 /*
+36 -12
drivers/net/ethernet/amd/au1000_eth.c
··· 541 541 * these are not descriptors sitting in memory. 542 542 */ 543 543 static void 544 - au1000_setup_hw_rings(struct au1000_private *aup, u32 rx_base, u32 tx_base) 544 + au1000_setup_hw_rings(struct au1000_private *aup, void __iomem *tx_base) 545 545 { 546 546 int i; 547 547 548 548 for (i = 0; i < NUM_RX_DMA; i++) { 549 - aup->rx_dma_ring[i] = 550 - (struct rx_dma *) 551 - (rx_base + sizeof(struct rx_dma)*i); 549 + aup->rx_dma_ring[i] = (struct rx_dma *) 550 + (tx_base + 0x100 + sizeof(struct rx_dma) * i); 552 551 } 553 552 for (i = 0; i < NUM_TX_DMA; i++) { 554 - aup->tx_dma_ring[i] = 555 - (struct tx_dma *) 556 - (tx_base + sizeof(struct tx_dma)*i); 553 + aup->tx_dma_ring[i] = (struct tx_dma *) 554 + (tx_base + sizeof(struct tx_dma) * i); 557 555 } 558 556 } 559 557 ··· 1024 1026 struct net_device *dev = NULL; 1025 1027 struct db_dest *pDB, *pDBfree; 1026 1028 int irq, i, err = 0; 1027 - struct resource *base, *macen; 1029 + struct resource *base, *macen, *macdma; 1028 1030 1029 1031 base = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1030 1032 if (!base) { ··· 1047 1049 goto out; 1048 1050 } 1049 1051 1052 + macdma = platform_get_resource(pdev, IORESOURCE_MEM, 2); 1053 + if (!macdma) { 1054 + dev_err(&pdev->dev, "failed to retrieve MACDMA registers\n"); 1055 + err = -ENODEV; 1056 + goto out; 1057 + } 1058 + 1050 1059 if (!request_mem_region(base->start, resource_size(base), 1051 1060 pdev->name)) { 1052 1061 dev_err(&pdev->dev, "failed to request memory region for base registers\n"); ··· 1066 1061 dev_err(&pdev->dev, "failed to request memory region for MAC enable register\n"); 1067 1062 err = -ENXIO; 1068 1063 goto err_request; 1064 + } 1065 + 1066 + if (!request_mem_region(macdma->start, resource_size(macdma), 1067 + pdev->name)) { 1068 + dev_err(&pdev->dev, "failed to request MACDMA memory region\n"); 1069 + err = -ENXIO; 1070 + goto err_macdma; 1069 1071 } 1070 1072 1071 1073 dev = alloc_etherdev(sizeof(struct au1000_private)); ··· 1121 1109 } 1122 1110 aup->mac_id = pdev->id; 1123 1111 1124 - if (pdev->id == 0) 1125 - au1000_setup_hw_rings(aup, MAC0_RX_DMA_ADDR, MAC0_TX_DMA_ADDR); 1126 - else if (pdev->id == 1) 1127 - au1000_setup_hw_rings(aup, MAC1_RX_DMA_ADDR, MAC1_TX_DMA_ADDR); 1112 + aup->macdma = ioremap_nocache(macdma->start, resource_size(macdma)); 1113 + if (!aup->macdma) { 1114 + dev_err(&pdev->dev, "failed to ioremap MACDMA registers\n"); 1115 + err = -ENXIO; 1116 + goto err_remap3; 1117 + } 1118 + 1119 + au1000_setup_hw_rings(aup, aup->macdma); 1128 1120 1129 1121 /* set a random MAC now in case platform_data doesn't provide one */ 1130 1122 random_ether_addr(dev->dev_addr); ··· 1268 1252 err_mdiobus_reg: 1269 1253 mdiobus_free(aup->mii_bus); 1270 1254 err_mdiobus_alloc: 1255 + iounmap(aup->macdma); 1256 + err_remap3: 1271 1257 iounmap(aup->enable); 1272 1258 err_remap2: 1273 1259 iounmap(aup->mac); ··· 1279 1261 err_vaddr: 1280 1262 free_netdev(dev); 1281 1263 err_alloc: 1264 + release_mem_region(macdma->start, resource_size(macdma)); 1265 + err_macdma: 1282 1266 release_mem_region(macen->start, resource_size(macen)); 1283 1267 err_request: 1284 1268 release_mem_region(base->start, resource_size(base)); ··· 1313 1293 (NUM_TX_BUFFS + NUM_RX_BUFFS), 1314 1294 (void *)aup->vaddr, aup->dma_addr); 1315 1295 1296 + iounmap(aup->macdma); 1316 1297 iounmap(aup->mac); 1317 1298 iounmap(aup->enable); 1299 + 1300 + base = platform_get_resource(pdev, IORESOURCE_MEM, 2); 1301 + release_mem_region(base->start, resource_size(base)); 1318 1302 1319 1303 base = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1320 1304 release_mem_region(base->start, resource_size(base));
+1 -1
drivers/net/ethernet/amd/au1000_eth.h
··· 124 124 */ 125 125 struct mac_reg *mac; /* mac registers */ 126 126 u32 *enable; /* address of MAC Enable Register */ 127 - 127 + void __iomem *macdma; /* base of MAC DMA port */ 128 128 u32 vaddr; /* virtual address of rx/tx buffers */ 129 129 dma_addr_t dma_addr; /* dma address of rx/tx buffers */ 130 130
+1 -1
drivers/net/irda/Kconfig
··· 314 314 315 315 config AU1000_FIR 316 316 tristate "Alchemy Au1000 SIR/FIR" 317 - depends on SOC_AU1000 && IRDA 317 + depends on IRDA && MIPS_ALCHEMY 318 318 319 319 config SMC_IRCC_FIR 320 320 tristate "SMSC IrCC (EXPERIMENTAL)"
+1 -1
drivers/spi/Kconfig
··· 88 88 89 89 config SPI_AU1550 90 90 tristate "Au1550/Au12x0 SPI Controller" 91 - depends on (SOC_AU1550 || SOC_AU1200) && EXPERIMENTAL 91 + depends on MIPS_ALCHEMY && EXPERIMENTAL 92 92 select SPI_BITBANG 93 93 help 94 94 If you say yes to this option, support will be included for the
-1
drivers/usb/Kconfig
··· 62 62 boolean 63 63 default y if FSL_SOC 64 64 default y if PPC_MPC512x 65 - default y if SOC_AU1200 66 65 default y if ARCH_IXP4XX 67 66 default y if ARCH_W90X900 68 67 default y if ARCH_AT91SAM9G45
+1
drivers/usb/host/Makefile
··· 36 36 obj-$(CONFIG_USB_IMX21_HCD) += imx21-hcd.o 37 37 obj-$(CONFIG_USB_FSL_MPH_DR_OF) += fsl-mph-dr-of.o 38 38 obj-$(CONFIG_USB_OCTEON2_COMMON) += octeon2-common.o 39 + obj-$(CONFIG_MIPS_ALCHEMY) += alchemy-common.o
+337
drivers/usb/host/alchemy-common.c
··· 1 + /* 2 + * USB block power/access management abstraction. 3 + * 4 + * Au1000+: The OHCI block control register is at the far end of the OHCI memory 5 + * area. Au1550 has OHCI on different base address. No need to handle 6 + * UDC here. 7 + * Au1200: one register to control access and clocks to O/EHCI, UDC and OTG 8 + * as well as the PHY for EHCI and UDC. 9 + * 10 + */ 11 + 12 + #include <linux/init.h> 13 + #include <linux/io.h> 14 + #include <linux/module.h> 15 + #include <linux/spinlock.h> 16 + #include <linux/syscore_ops.h> 17 + #include <asm/mach-au1x00/au1000.h> 18 + 19 + /* control register offsets */ 20 + #define AU1000_OHCICFG 0x7fffc 21 + #define AU1550_OHCICFG 0x07ffc 22 + #define AU1200_USBCFG 0x04 23 + 24 + /* Au1000 USB block config bits */ 25 + #define USBHEN_RD (1 << 4) /* OHCI reset-done indicator */ 26 + #define USBHEN_CE (1 << 3) /* OHCI block clock enable */ 27 + #define USBHEN_E (1 << 2) /* OHCI block enable */ 28 + #define USBHEN_C (1 << 1) /* OHCI block coherency bit */ 29 + #define USBHEN_BE (1 << 0) /* OHCI Big-Endian */ 30 + 31 + /* Au1200 USB config bits */ 32 + #define USBCFG_PFEN (1 << 31) /* prefetch enable (undoc) */ 33 + #define USBCFG_RDCOMB (1 << 30) /* read combining (undoc) */ 34 + #define USBCFG_UNKNOWN (5 << 20) /* unknown, leave this way */ 35 + #define USBCFG_SSD (1 << 23) /* serial short detect en */ 36 + #define USBCFG_PPE (1 << 19) /* HS PHY PLL */ 37 + #define USBCFG_UCE (1 << 18) /* UDC clock enable */ 38 + #define USBCFG_ECE (1 << 17) /* EHCI clock enable */ 39 + #define USBCFG_OCE (1 << 16) /* OHCI clock enable */ 40 + #define USBCFG_FLA(x) (((x) & 0x3f) << 8) 41 + #define USBCFG_UCAM (1 << 7) /* coherent access (undoc) */ 42 + #define USBCFG_GME (1 << 6) /* OTG mem access */ 43 + #define USBCFG_DBE (1 << 5) /* UDC busmaster enable */ 44 + #define USBCFG_DME (1 << 4) /* UDC mem enable */ 45 + #define USBCFG_EBE (1 << 3) /* EHCI busmaster enable */ 46 + #define USBCFG_EME (1 << 2) /* EHCI mem enable */ 47 + #define USBCFG_OBE (1 << 1) /* OHCI busmaster enable */ 48 + #define USBCFG_OME (1 << 0) /* OHCI mem enable */ 49 + #define USBCFG_INIT_AU1200 (USBCFG_PFEN | USBCFG_RDCOMB | USBCFG_UNKNOWN |\ 50 + USBCFG_SSD | USBCFG_FLA(0x20) | USBCFG_UCAM | \ 51 + USBCFG_GME | USBCFG_DBE | USBCFG_DME | \ 52 + USBCFG_EBE | USBCFG_EME | USBCFG_OBE | \ 53 + USBCFG_OME) 54 + 55 + 56 + static DEFINE_SPINLOCK(alchemy_usb_lock); 57 + 58 + 59 + static inline void __au1200_ohci_control(void __iomem *base, int enable) 60 + { 61 + unsigned long r = __raw_readl(base + AU1200_USBCFG); 62 + if (enable) { 63 + __raw_writel(r | USBCFG_OCE, base + AU1200_USBCFG); 64 + wmb(); 65 + udelay(2000); 66 + } else { 67 + __raw_writel(r & ~USBCFG_OCE, base + AU1200_USBCFG); 68 + wmb(); 69 + udelay(1000); 70 + } 71 + } 72 + 73 + static inline void __au1200_ehci_control(void __iomem *base, int enable) 74 + { 75 + unsigned long r = __raw_readl(base + AU1200_USBCFG); 76 + if (enable) { 77 + __raw_writel(r | USBCFG_ECE | USBCFG_PPE, base + AU1200_USBCFG); 78 + wmb(); 79 + udelay(1000); 80 + } else { 81 + if (!(r & USBCFG_UCE)) /* UDC also off? */ 82 + r &= ~USBCFG_PPE; /* yes: disable HS PHY PLL */ 83 + __raw_writel(r & ~USBCFG_ECE, base + AU1200_USBCFG); 84 + wmb(); 85 + udelay(1000); 86 + } 87 + } 88 + 89 + static inline void __au1200_udc_control(void __iomem *base, int enable) 90 + { 91 + unsigned long r = __raw_readl(base + AU1200_USBCFG); 92 + if (enable) { 93 + __raw_writel(r | USBCFG_UCE | USBCFG_PPE, base + AU1200_USBCFG); 94 + wmb(); 95 + } else { 96 + if (!(r & USBCFG_ECE)) /* EHCI also off? */ 97 + r &= ~USBCFG_PPE; /* yes: disable HS PHY PLL */ 98 + __raw_writel(r & ~USBCFG_UCE, base + AU1200_USBCFG); 99 + wmb(); 100 + } 101 + } 102 + 103 + static inline int au1200_coherency_bug(void) 104 + { 105 + #if defined(CONFIG_DMA_COHERENT) 106 + /* Au1200 AB USB does not support coherent memory */ 107 + if (!(read_c0_prid() & 0xff)) { 108 + printk(KERN_INFO "Au1200 USB: this is chip revision AB !!\n"); 109 + printk(KERN_INFO "Au1200 USB: update your board or re-configure" 110 + " the kernel\n"); 111 + return -ENODEV; 112 + } 113 + #endif 114 + return 0; 115 + } 116 + 117 + static inline int au1200_usb_control(int block, int enable) 118 + { 119 + void __iomem *base = 120 + (void __iomem *)KSEG1ADDR(AU1200_USB_CTL_PHYS_ADDR); 121 + int ret = 0; 122 + 123 + switch (block) { 124 + case ALCHEMY_USB_OHCI0: 125 + ret = au1200_coherency_bug(); 126 + if (ret && enable) 127 + goto out; 128 + __au1200_ohci_control(base, enable); 129 + break; 130 + case ALCHEMY_USB_UDC0: 131 + __au1200_udc_control(base, enable); 132 + break; 133 + case ALCHEMY_USB_EHCI0: 134 + ret = au1200_coherency_bug(); 135 + if (ret && enable) 136 + goto out; 137 + __au1200_ehci_control(base, enable); 138 + break; 139 + default: 140 + ret = -ENODEV; 141 + } 142 + out: 143 + return ret; 144 + } 145 + 146 + 147 + /* initialize USB block(s) to a known working state */ 148 + static inline void au1200_usb_init(void) 149 + { 150 + void __iomem *base = 151 + (void __iomem *)KSEG1ADDR(AU1200_USB_CTL_PHYS_ADDR); 152 + __raw_writel(USBCFG_INIT_AU1200, base + AU1200_USBCFG); 153 + wmb(); 154 + udelay(1000); 155 + } 156 + 157 + static inline void au1000_usb_init(unsigned long rb, int reg) 158 + { 159 + void __iomem *base = (void __iomem *)KSEG1ADDR(rb + reg); 160 + unsigned long r = __raw_readl(base); 161 + 162 + #if defined(__BIG_ENDIAN) 163 + r |= USBHEN_BE; 164 + #endif 165 + r |= USBHEN_C; 166 + 167 + __raw_writel(r, base); 168 + wmb(); 169 + udelay(1000); 170 + } 171 + 172 + 173 + static inline void __au1xx0_ohci_control(int enable, unsigned long rb, int creg) 174 + { 175 + void __iomem *base = (void __iomem *)KSEG1ADDR(rb); 176 + unsigned long r = __raw_readl(base + creg); 177 + 178 + if (enable) { 179 + __raw_writel(r | USBHEN_CE, base + creg); 180 + wmb(); 181 + udelay(1000); 182 + __raw_writel(r | USBHEN_CE | USBHEN_E, base + creg); 183 + wmb(); 184 + udelay(1000); 185 + 186 + /* wait for reset complete (read reg twice: au1500 erratum) */ 187 + while (__raw_readl(base + creg), 188 + !(__raw_readl(base + creg) & USBHEN_RD)) 189 + udelay(1000); 190 + } else { 191 + __raw_writel(r & ~(USBHEN_CE | USBHEN_E), base + creg); 192 + wmb(); 193 + } 194 + } 195 + 196 + static inline int au1000_usb_control(int block, int enable, unsigned long rb, 197 + int creg) 198 + { 199 + int ret = 0; 200 + 201 + switch (block) { 202 + case ALCHEMY_USB_OHCI0: 203 + __au1xx0_ohci_control(enable, rb, creg); 204 + break; 205 + default: 206 + ret = -ENODEV; 207 + } 208 + return ret; 209 + } 210 + 211 + /* 212 + * alchemy_usb_control - control Alchemy on-chip USB blocks 213 + * @block: USB block to target 214 + * @enable: set 1 to enable a block, 0 to disable 215 + */ 216 + int alchemy_usb_control(int block, int enable) 217 + { 218 + unsigned long flags; 219 + int ret; 220 + 221 + spin_lock_irqsave(&alchemy_usb_lock, flags); 222 + switch (alchemy_get_cputype()) { 223 + case ALCHEMY_CPU_AU1000: 224 + case ALCHEMY_CPU_AU1500: 225 + case ALCHEMY_CPU_AU1100: 226 + ret = au1000_usb_control(block, enable, 227 + AU1000_USB_OHCI_PHYS_ADDR, AU1000_OHCICFG); 228 + break; 229 + case ALCHEMY_CPU_AU1550: 230 + ret = au1000_usb_control(block, enable, 231 + AU1550_USB_OHCI_PHYS_ADDR, AU1550_OHCICFG); 232 + break; 233 + case ALCHEMY_CPU_AU1200: 234 + ret = au1200_usb_control(block, enable); 235 + break; 236 + default: 237 + ret = -ENODEV; 238 + } 239 + spin_unlock_irqrestore(&alchemy_usb_lock, flags); 240 + return ret; 241 + } 242 + EXPORT_SYMBOL_GPL(alchemy_usb_control); 243 + 244 + 245 + static unsigned long alchemy_usb_pmdata[2]; 246 + 247 + static void au1000_usb_pm(unsigned long br, int creg, int susp) 248 + { 249 + void __iomem *base = (void __iomem *)KSEG1ADDR(br); 250 + 251 + if (susp) { 252 + alchemy_usb_pmdata[0] = __raw_readl(base + creg); 253 + /* There appears to be some undocumented reset register.... */ 254 + __raw_writel(0, base + 0x04); 255 + wmb(); 256 + __raw_writel(0, base + creg); 257 + wmb(); 258 + } else { 259 + __raw_writel(alchemy_usb_pmdata[0], base + creg); 260 + wmb(); 261 + } 262 + } 263 + 264 + static void au1200_usb_pm(int susp) 265 + { 266 + void __iomem *base = 267 + (void __iomem *)KSEG1ADDR(AU1200_USB_OTG_PHYS_ADDR); 268 + if (susp) { 269 + /* save OTG_CAP/MUX registers which indicate port routing */ 270 + /* FIXME: write an OTG driver to do that */ 271 + alchemy_usb_pmdata[0] = __raw_readl(base + 0x00); 272 + alchemy_usb_pmdata[1] = __raw_readl(base + 0x04); 273 + } else { 274 + /* restore access to all MMIO areas */ 275 + au1200_usb_init(); 276 + 277 + /* restore OTG_CAP/MUX registers */ 278 + __raw_writel(alchemy_usb_pmdata[0], base + 0x00); 279 + __raw_writel(alchemy_usb_pmdata[1], base + 0x04); 280 + wmb(); 281 + } 282 + } 283 + 284 + static void alchemy_usb_pm(int susp) 285 + { 286 + switch (alchemy_get_cputype()) { 287 + case ALCHEMY_CPU_AU1000: 288 + case ALCHEMY_CPU_AU1500: 289 + case ALCHEMY_CPU_AU1100: 290 + au1000_usb_pm(AU1000_USB_OHCI_PHYS_ADDR, AU1000_OHCICFG, susp); 291 + break; 292 + case ALCHEMY_CPU_AU1550: 293 + au1000_usb_pm(AU1550_USB_OHCI_PHYS_ADDR, AU1550_OHCICFG, susp); 294 + break; 295 + case ALCHEMY_CPU_AU1200: 296 + au1200_usb_pm(susp); 297 + break; 298 + } 299 + } 300 + 301 + static int alchemy_usb_suspend(void) 302 + { 303 + alchemy_usb_pm(1); 304 + return 0; 305 + } 306 + 307 + static void alchemy_usb_resume(void) 308 + { 309 + alchemy_usb_pm(0); 310 + } 311 + 312 + static struct syscore_ops alchemy_usb_pm_ops = { 313 + .suspend = alchemy_usb_suspend, 314 + .resume = alchemy_usb_resume, 315 + }; 316 + 317 + static int __init alchemy_usb_init(void) 318 + { 319 + switch (alchemy_get_cputype()) { 320 + case ALCHEMY_CPU_AU1000: 321 + case ALCHEMY_CPU_AU1500: 322 + case ALCHEMY_CPU_AU1100: 323 + au1000_usb_init(AU1000_USB_OHCI_PHYS_ADDR, AU1000_OHCICFG); 324 + break; 325 + case ALCHEMY_CPU_AU1550: 326 + au1000_usb_init(AU1550_USB_OHCI_PHYS_ADDR, AU1550_OHCICFG); 327 + break; 328 + case ALCHEMY_CPU_AU1200: 329 + au1200_usb_init(); 330 + break; 331 + } 332 + 333 + register_syscore_ops(&alchemy_usb_pm_ops); 334 + 335 + return 0; 336 + } 337 + arch_initcall(alchemy_usb_init);
+10 -67
drivers/usb/host/ehci-au1xxx.c
··· 14 14 #include <linux/platform_device.h> 15 15 #include <asm/mach-au1x00/au1000.h> 16 16 17 - #define USB_HOST_CONFIG (USB_MSR_BASE + USB_MSR_MCFG) 18 - #define USB_MCFG_PFEN (1<<31) 19 - #define USB_MCFG_RDCOMB (1<<30) 20 - #define USB_MCFG_SSDEN (1<<23) 21 - #define USB_MCFG_PHYPLLEN (1<<19) 22 - #define USB_MCFG_UCECLKEN (1<<18) 23 - #define USB_MCFG_EHCCLKEN (1<<17) 24 - #ifdef CONFIG_DMA_COHERENT 25 - #define USB_MCFG_UCAM (1<<7) 26 - #else 27 - #define USB_MCFG_UCAM (0) 28 - #endif 29 - #define USB_MCFG_EBMEN (1<<3) 30 - #define USB_MCFG_EMEMEN (1<<2) 31 - 32 - #define USBH_ENABLE_CE (USB_MCFG_PHYPLLEN | USB_MCFG_EHCCLKEN) 33 - #define USBH_ENABLE_INIT (USB_MCFG_PFEN | USB_MCFG_RDCOMB | \ 34 - USBH_ENABLE_CE | USB_MCFG_SSDEN | \ 35 - USB_MCFG_UCAM | USB_MCFG_EBMEN | \ 36 - USB_MCFG_EMEMEN) 37 - 38 - #define USBH_DISABLE (USB_MCFG_EBMEN | USB_MCFG_EMEMEN) 39 17 40 18 extern int usb_disabled(void); 41 - 42 - static void au1xxx_start_ehc(void) 43 - { 44 - /* enable clock to EHCI block and HS PHY PLL*/ 45 - au_writel(au_readl(USB_HOST_CONFIG) | USBH_ENABLE_CE, USB_HOST_CONFIG); 46 - au_sync(); 47 - udelay(1000); 48 - 49 - /* enable EHCI mmio */ 50 - au_writel(au_readl(USB_HOST_CONFIG) | USBH_ENABLE_INIT, USB_HOST_CONFIG); 51 - au_sync(); 52 - udelay(1000); 53 - } 54 - 55 - static void au1xxx_stop_ehc(void) 56 - { 57 - unsigned long c; 58 - 59 - /* Disable mem */ 60 - au_writel(au_readl(USB_HOST_CONFIG) & ~USBH_DISABLE, USB_HOST_CONFIG); 61 - au_sync(); 62 - udelay(1000); 63 - 64 - /* Disable EHC clock. If the HS PHY is unused disable it too. */ 65 - c = au_readl(USB_HOST_CONFIG) & ~USB_MCFG_EHCCLKEN; 66 - if (!(c & USB_MCFG_UCECLKEN)) /* UDC disabled? */ 67 - c &= ~USB_MCFG_PHYPLLEN; /* yes: disable HS PHY PLL */ 68 - au_writel(c, USB_HOST_CONFIG); 69 - au_sync(); 70 - } 71 19 72 20 static int au1xxx_ehci_setup(struct usb_hcd *hcd) 73 21 { ··· 84 136 if (usb_disabled()) 85 137 return -ENODEV; 86 138 87 - #if defined(CONFIG_SOC_AU1200) && defined(CONFIG_DMA_COHERENT) 88 - /* Au1200 AB USB does not support coherent memory */ 89 - if (!(read_c0_prid() & 0xff)) { 90 - printk(KERN_INFO "%s: this is chip revision AB!\n", pdev->name); 91 - printk(KERN_INFO "%s: update your board or re-configure" 92 - " the kernel\n", pdev->name); 93 - return -ENODEV; 94 - } 95 - #endif 96 - 97 139 if (pdev->resource[1].flags != IORESOURCE_IRQ) { 98 140 pr_debug("resource[1] is not IORESOURCE_IRQ"); 99 141 return -ENOMEM; ··· 109 171 goto err2; 110 172 } 111 173 112 - au1xxx_start_ehc(); 174 + if (alchemy_usb_control(ALCHEMY_USB_EHCI0, 1)) { 175 + printk(KERN_INFO "%s: controller init failed!\n", pdev->name); 176 + ret = -ENODEV; 177 + goto err3; 178 + } 113 179 114 180 ehci = hcd_to_ehci(hcd); 115 181 ehci->caps = hcd->regs; ··· 129 187 return ret; 130 188 } 131 189 132 - au1xxx_stop_ehc(); 190 + alchemy_usb_control(ALCHEMY_USB_EHCI0, 0); 191 + err3: 133 192 iounmap(hcd->regs); 134 193 err2: 135 194 release_mem_region(hcd->rsrc_start, hcd->rsrc_len); ··· 144 201 struct usb_hcd *hcd = platform_get_drvdata(pdev); 145 202 146 203 usb_remove_hcd(hcd); 204 + alchemy_usb_control(ALCHEMY_USB_EHCI0, 0); 147 205 iounmap(hcd->regs); 148 206 release_mem_region(hcd->rsrc_start, hcd->rsrc_len); 149 207 usb_put_hcd(hcd); 150 - au1xxx_stop_ehc(); 151 208 platform_set_drvdata(pdev, NULL); 152 209 153 210 return 0; ··· 179 236 // could save FLADJ in case of Vaux power loss 180 237 // ... we'd only use it to handle clock skew 181 238 182 - au1xxx_stop_ehc(); 239 + alchemy_usb_control(ALCHEMY_USB_EHCI0, 0); 183 240 184 241 return rc; 185 242 } ··· 189 246 struct usb_hcd *hcd = dev_get_drvdata(dev); 190 247 struct ehci_hcd *ehci = hcd_to_ehci(hcd); 191 248 192 - au1xxx_start_ehc(); 249 + alchemy_usb_control(ALCHEMY_USB_EHCI0, 1); 193 250 194 251 // maybe restore FLADJ 195 252
+1 -1
drivers/usb/host/ehci-hcd.c
··· 1224 1224 #define PLATFORM_DRIVER ehci_hcd_sh_driver 1225 1225 #endif 1226 1226 1227 - #ifdef CONFIG_SOC_AU1200 1227 + #ifdef CONFIG_MIPS_ALCHEMY 1228 1228 #include "ehci-au1xxx.c" 1229 1229 #define PLATFORM_DRIVER ehci_hcd_au1xxx_driver 1230 1230 #endif
+11 -99
drivers/usb/host/ohci-au1xxx.c
··· 23 23 24 24 #include <asm/mach-au1x00/au1000.h> 25 25 26 - #ifndef CONFIG_SOC_AU1200 27 - 28 - #define USBH_ENABLE_BE (1<<0) 29 - #define USBH_ENABLE_C (1<<1) 30 - #define USBH_ENABLE_E (1<<2) 31 - #define USBH_ENABLE_CE (1<<3) 32 - #define USBH_ENABLE_RD (1<<4) 33 - 34 - #ifdef __LITTLE_ENDIAN 35 - #define USBH_ENABLE_INIT (USBH_ENABLE_CE | USBH_ENABLE_E | USBH_ENABLE_C) 36 - #elif defined(__BIG_ENDIAN) 37 - #define USBH_ENABLE_INIT (USBH_ENABLE_CE | USBH_ENABLE_E | USBH_ENABLE_C | \ 38 - USBH_ENABLE_BE) 39 - #else 40 - #error not byte order defined 41 - #endif 42 - 43 - #else /* Au1200 */ 44 - 45 - #define USB_HOST_CONFIG (USB_MSR_BASE + USB_MSR_MCFG) 46 - #define USB_MCFG_PFEN (1<<31) 47 - #define USB_MCFG_RDCOMB (1<<30) 48 - #define USB_MCFG_SSDEN (1<<23) 49 - #define USB_MCFG_OHCCLKEN (1<<16) 50 - #ifdef CONFIG_DMA_COHERENT 51 - #define USB_MCFG_UCAM (1<<7) 52 - #else 53 - #define USB_MCFG_UCAM (0) 54 - #endif 55 - #define USB_MCFG_OBMEN (1<<1) 56 - #define USB_MCFG_OMEMEN (1<<0) 57 - 58 - #define USBH_ENABLE_CE USB_MCFG_OHCCLKEN 59 - 60 - #define USBH_ENABLE_INIT (USB_MCFG_PFEN | USB_MCFG_RDCOMB | \ 61 - USBH_ENABLE_CE | USB_MCFG_SSDEN | \ 62 - USB_MCFG_UCAM | \ 63 - USB_MCFG_OBMEN | USB_MCFG_OMEMEN) 64 - 65 - #define USBH_DISABLE (USB_MCFG_OBMEN | USB_MCFG_OMEMEN) 66 - 67 - #endif /* Au1200 */ 68 26 69 27 extern int usb_disabled(void); 70 - 71 - static void au1xxx_start_ohc(void) 72 - { 73 - /* enable host controller */ 74 - #ifndef CONFIG_SOC_AU1200 75 - au_writel(USBH_ENABLE_CE, USB_HOST_CONFIG); 76 - au_sync(); 77 - udelay(1000); 78 - 79 - au_writel(au_readl(USB_HOST_CONFIG) | USBH_ENABLE_INIT, USB_HOST_CONFIG); 80 - au_sync(); 81 - udelay(1000); 82 - 83 - /* wait for reset complete (read register twice; see au1500 errata) */ 84 - while (au_readl(USB_HOST_CONFIG), 85 - !(au_readl(USB_HOST_CONFIG) & USBH_ENABLE_RD)) 86 - udelay(1000); 87 - 88 - #else /* Au1200 */ 89 - au_writel(au_readl(USB_HOST_CONFIG) | USBH_ENABLE_CE, USB_HOST_CONFIG); 90 - au_sync(); 91 - udelay(1000); 92 - 93 - au_writel(au_readl(USB_HOST_CONFIG) | USBH_ENABLE_INIT, USB_HOST_CONFIG); 94 - au_sync(); 95 - udelay(2000); 96 - #endif /* Au1200 */ 97 - } 98 - 99 - static void au1xxx_stop_ohc(void) 100 - { 101 - #ifdef CONFIG_SOC_AU1200 102 - /* Disable mem */ 103 - au_writel(au_readl(USB_HOST_CONFIG) & ~USBH_DISABLE, USB_HOST_CONFIG); 104 - au_sync(); 105 - udelay(1000); 106 - #endif 107 - /* Disable clock */ 108 - au_writel(au_readl(USB_HOST_CONFIG) & ~USBH_ENABLE_CE, USB_HOST_CONFIG); 109 - au_sync(); 110 - } 111 28 112 29 static int __devinit ohci_au1xxx_start(struct usb_hcd *hcd) 113 30 { ··· 95 178 if (usb_disabled()) 96 179 return -ENODEV; 97 180 98 - #if defined(CONFIG_SOC_AU1200) && defined(CONFIG_DMA_COHERENT) 99 - /* Au1200 AB USB does not support coherent memory */ 100 - if (!(read_c0_prid() & 0xff)) { 101 - printk(KERN_INFO "%s: this is chip revision AB !!\n", 102 - pdev->name); 103 - printk(KERN_INFO "%s: update your board or re-configure " 104 - "the kernel\n", pdev->name); 105 - return -ENODEV; 106 - } 107 - #endif 108 - 109 181 if (pdev->resource[1].flags != IORESOURCE_IRQ) { 110 182 pr_debug("resource[1] is not IORESOURCE_IRQ\n"); 111 183 return -ENOMEM; ··· 120 214 goto err2; 121 215 } 122 216 123 - au1xxx_start_ohc(); 217 + if (alchemy_usb_control(ALCHEMY_USB_OHCI0, 1)) { 218 + printk(KERN_INFO "%s: controller init failed!\n", pdev->name); 219 + ret = -ENODEV; 220 + goto err3; 221 + } 222 + 124 223 ohci_hcd_init(hcd_to_ohci(hcd)); 125 224 126 225 ret = usb_add_hcd(hcd, pdev->resource[1].start, ··· 135 224 return ret; 136 225 } 137 226 138 - au1xxx_stop_ohc(); 227 + alchemy_usb_control(ALCHEMY_USB_OHCI0, 0); 228 + err3: 139 229 iounmap(hcd->regs); 140 230 err2: 141 231 release_mem_region(hcd->rsrc_start, hcd->rsrc_len); ··· 150 238 struct usb_hcd *hcd = platform_get_drvdata(pdev); 151 239 152 240 usb_remove_hcd(hcd); 153 - au1xxx_stop_ohc(); 241 + alchemy_usb_control(ALCHEMY_USB_OHCI0, 0); 154 242 iounmap(hcd->regs); 155 243 release_mem_region(hcd->rsrc_start, hcd->rsrc_len); 156 244 usb_put_hcd(hcd); ··· 187 275 188 276 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); 189 277 190 - au1xxx_stop_ohc(); 278 + alchemy_usb_control(ALCHEMY_USB_OHCI0, 0); 191 279 bail: 192 280 spin_unlock_irqrestore(&ohci->lock, flags); 193 281 ··· 198 286 { 199 287 struct usb_hcd *hcd = dev_get_drvdata(dev); 200 288 201 - au1xxx_start_ohc(); 289 + alchemy_usb_control(ALCHEMY_USB_OHCI0, 1); 202 290 203 291 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags); 204 292 ohci_finish_controller_resume(hcd);
+2 -2
drivers/video/Kconfig
··· 1753 1753 1754 1754 config FB_AU1100 1755 1755 bool "Au1100 LCD Driver" 1756 - depends on (FB = y) && MIPS && SOC_AU1100 1756 + depends on (FB = y) && MIPS_ALCHEMY 1757 1757 select FB_CFB_FILLRECT 1758 1758 select FB_CFB_COPYAREA 1759 1759 select FB_CFB_IMAGEBLIT ··· 1764 1764 1765 1765 config FB_AU1200 1766 1766 bool "Au1200 LCD Driver" 1767 - depends on (FB = y) && MIPS && SOC_AU1200 1767 + depends on (FB = y) && MIPS_ALCHEMY 1768 1768 select FB_SYS_FILLRECT 1769 1769 select FB_SYS_COPYAREA 1770 1770 select FB_SYS_IMAGEBLIT
+1 -1
sound/mips/Kconfig
··· 24 24 25 25 config SND_AU1X00 26 26 tristate "Au1x00 AC97 Port Driver (DEPRECATED)" 27 - depends on SOC_AU1000 || SOC_AU1100 || SOC_AU1500 27 + depends on MIPS_ALCHEMY 28 28 select SND_PCM 29 29 select SND_AC97_CODEC 30 30 help
+1 -1
sound/soc/au1x/Kconfig
··· 3 3 ## 4 4 config SND_SOC_AU1XPSC 5 5 tristate "SoC Audio for Au1200/Au1250/Au1550" 6 - depends on SOC_AU1200 || SOC_AU1550 6 + depends on MIPS_ALCHEMY 7 7 help 8 8 This option enables support for the Programmable Serial 9 9 Controllers in AC97 and I2S mode, and the Descriptor-Based DMA