Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus

Pull MIPS updates from Ralf Baechle:
"This is the main pull request for MIPS:

- a number of fixes that didn't make the 3.19 release.

- a number of cleanups.

- preliminary support for Cavium's Octeon 3 SOCs which feature up to
48 MIPS64 R3 cores with FPU and hardware virtualization.

- support for MIPS R6 processors.

Revision 6 of the MIPS architecture is a major revision of the MIPS
architecture which does away with many of original sins of the
architecture such as branch delay slots. This and other changes in
R6 require major changes throughout the entire MIPS core
architecture code and make up for the lion share of this pull
request.

- finally some preparatory work for eXtendend Physical Address
support, which allows support of up to 40 bit of physical address
space on 32 bit processors"

[ Ahh, MIPS can't leave the PAE brain damage alone. It's like
every CPU architect has to make that mistake, but pee in the snow
by changing the TLA. But whether it's called PAE, LPAE or XPA,
it's horrid crud - Linus ]

* 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus: (114 commits)
MIPS: sead3: Corrected get_c0_perfcount_int
MIPS: mm: Remove dead macro definitions
MIPS: OCTEON: irq: add CIB and other fixes
MIPS: OCTEON: Don't do acknowledge operations for level triggered irqs.
MIPS: OCTEON: More OCTEONIII support
MIPS: OCTEON: Remove setting of processor specific CVMCTL icache bits.
MIPS: OCTEON: Core-15169 Workaround and general CVMSEG cleanup.
MIPS: OCTEON: Update octeon-model.h code for new SoCs.
MIPS: OCTEON: Implement DCache errata workaround for all CN6XXX
MIPS: OCTEON: Add little-endian support to asm/octeon/octeon.h
MIPS: OCTEON: Implement the core-16057 workaround
MIPS: OCTEON: Delete unused COP2 saving code
MIPS: OCTEON: Use correct instruction to read 64-bit COP0 register
MIPS: OCTEON: Save and restore CP2 SHA3 state
MIPS: OCTEON: Fix FP context save.
MIPS: OCTEON: Save/Restore wider multiply registers in OCTEON III CPUs
MIPS: boot: Provide more uImage options
MIPS: Remove unneeded #ifdef __KERNEL__ from asm/processor.h
MIPS: ip22-gio: Remove legacy suspend/resume support
mips: pci: Add ifdef around pci_proc_domain
...

+6453 -1135
+43
Documentation/devicetree/bindings/mips/cavium/cib.txt
···
··· 1 + * Cavium Interrupt Bus widget 2 + 3 + Properties: 4 + - compatible: "cavium,octeon-7130-cib" 5 + 6 + Compatibility with cn70XX SoCs. 7 + 8 + - interrupt-controller: This is an interrupt controller. 9 + 10 + - reg: Two elements consisting of the addresses of the RAW and EN 11 + registers of the CIB block 12 + 13 + - cavium,max-bits: The index (zero based) of the highest numbered bit 14 + in the CIB block. 15 + 16 + - interrupt-parent: Always the CIU on the SoC. 17 + 18 + - interrupts: The CIU line to which the CIB block is connected. 19 + 20 + - #interrupt-cells: Must be <2>. The first cell is the bit within the 21 + CIB. The second cell specifies the triggering semantics of the 22 + line. 23 + 24 + Example: 25 + 26 + interrupt-controller@107000000e000 { 27 + compatible = "cavium,octeon-7130-cib"; 28 + reg = <0x10700 0x0000e000 0x0 0x8>, /* RAW */ 29 + <0x10700 0x0000e100 0x0 0x8>; /* EN */ 30 + cavium,max-bits = <23>; 31 + 32 + interrupt-controller; 33 + interrupt-parent = <&ciu>; 34 + interrupts = <1 24>; 35 + /* Interrupts are specified by two parts: 36 + * 1) Bit number in the CIB* registers 37 + * 2) Triggering (1 - edge rising 38 + * 2 - edge falling 39 + * 4 - level active high 40 + * 8 - level active low) 41 + */ 42 + #interrupt-cells = <2>; 43 + };
+69 -4
arch/mips/Kconfig
··· 54 select CPU_PM if CPU_IDLE 55 select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST 56 select ARCH_BINFMT_ELF_STATE 57 58 menu "Machine selection" 59 ··· 377 select SYS_HAS_CPU_MIPS32_R1 378 select SYS_HAS_CPU_MIPS32_R2 379 select SYS_HAS_CPU_MIPS32_R3_5 380 select SYS_HAS_CPU_MIPS64_R1 381 select SYS_HAS_CPU_MIPS64_R2 382 select SYS_HAS_CPU_NEVADA 383 select SYS_HAS_CPU_RM7000 384 select SYS_SUPPORTS_32BIT_KERNEL ··· 1036 config NO_IOPORT_MAP 1037 def_bool n 1038 1039 config GENERIC_ISA_DMA 1040 bool 1041 select ZONE_DMA if GENERIC_ISA_DMA_SUPPORT_BROKEN=n ··· 1151 config SOC_PNX8335 1152 bool 1153 select SOC_PNX833X 1154 1155 config SWAP_IO_SPACE 1156 bool ··· 1313 specific type of processor in your system, choose those that one 1314 otherwise CPU_MIPS32_R1 is a safe bet for any MIPS32 system. 1315 1316 config CPU_MIPS64_R1 1317 bool "MIPS64 Release 1" 1318 depends on SYS_HAS_CPU_MIPS64_R1 ··· 1363 MIPS processor are based on a MIPS64 processor. If you know the 1364 specific type of processor in your system, choose those that one 1365 otherwise CPU_MIPS64_R1 is a safe bet for any MIPS64 system. 1366 1367 config CPU_R3000 1368 bool "R3000" ··· 1579 config CPU_MIPS32_3_5_FEATURES 1580 bool "MIPS32 Release 3.5 Features" 1581 depends on SYS_HAS_CPU_MIPS32_R3_5 1582 - depends on CPU_MIPS32_R2 1583 help 1584 Choose this option to build a kernel for release 2 or later of the 1585 MIPS32 architecture including features from the 3.5 release such as ··· 1699 config SYS_HAS_CPU_MIPS32_R3_5 1700 bool 1701 1702 config SYS_HAS_CPU_MIPS64_R1 1703 bool 1704 1705 config SYS_HAS_CPU_MIPS64_R2 1706 bool 1707 1708 config SYS_HAS_CPU_R3000 ··· 1810 # 1811 config CPU_MIPS32 1812 bool 1813 - default y if CPU_MIPS32_R1 || CPU_MIPS32_R2 1814 1815 config CPU_MIPS64 1816 bool 1817 - default y if CPU_MIPS64_R1 || CPU_MIPS64_R2 1818 1819 # 1820 # These two indicate the revision of the architecture, either Release 1 or Release 2 ··· 1826 config CPU_MIPSR2 1827 bool 1828 default y if CPU_MIPS32_R2 || CPU_MIPS64_R2 || CPU_CAVIUM_OCTEON 1829 1830 config EVA 1831 bool ··· 2065 default y 2066 depends on MIPS_MT_SMP 2067 2068 config MIPS_VPE_LOADER 2069 bool "VPE loader support." 2070 depends on SYS_SUPPORTS_MULTITHREADING && MODULES ··· 2213 here. 2214 2215 config CPU_MICROMIPS 2216 - depends on 32BIT && SYS_SUPPORTS_MICROMIPS 2217 bool "microMIPS" 2218 help 2219 When this option is enabled the kernel will be built using the
··· 54 select CPU_PM if CPU_IDLE 55 select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST 56 select ARCH_BINFMT_ELF_STATE 57 + select SYSCTL_EXCEPTION_TRACE 58 59 menu "Machine selection" 60 ··· 376 select SYS_HAS_CPU_MIPS32_R1 377 select SYS_HAS_CPU_MIPS32_R2 378 select SYS_HAS_CPU_MIPS32_R3_5 379 + select SYS_HAS_CPU_MIPS32_R6 380 select SYS_HAS_CPU_MIPS64_R1 381 select SYS_HAS_CPU_MIPS64_R2 382 + select SYS_HAS_CPU_MIPS64_R6 383 select SYS_HAS_CPU_NEVADA 384 select SYS_HAS_CPU_RM7000 385 select SYS_SUPPORTS_32BIT_KERNEL ··· 1033 config NO_IOPORT_MAP 1034 def_bool n 1035 1036 + config GENERIC_CSUM 1037 + bool 1038 + 1039 config GENERIC_ISA_DMA 1040 bool 1041 select ZONE_DMA if GENERIC_ISA_DMA_SUPPORT_BROKEN=n ··· 1145 config SOC_PNX8335 1146 bool 1147 select SOC_PNX833X 1148 + 1149 + config MIPS_SPRAM 1150 + bool 1151 1152 config SWAP_IO_SPACE 1153 bool ··· 1304 specific type of processor in your system, choose those that one 1305 otherwise CPU_MIPS32_R1 is a safe bet for any MIPS32 system. 1306 1307 + config CPU_MIPS32_R6 1308 + bool "MIPS32 Release 6 (EXPERIMENTAL)" 1309 + depends on SYS_HAS_CPU_MIPS32_R6 1310 + select CPU_HAS_PREFETCH 1311 + select CPU_SUPPORTS_32BIT_KERNEL 1312 + select CPU_SUPPORTS_HIGHMEM 1313 + select CPU_SUPPORTS_MSA 1314 + select GENERIC_CSUM 1315 + select HAVE_KVM 1316 + select MIPS_O32_FP64_SUPPORT 1317 + help 1318 + Choose this option to build a kernel for release 6 or later of the 1319 + MIPS32 architecture. New MIPS processors, starting with the Warrior 1320 + family, are based on a MIPS32r6 processor. If you own an older 1321 + processor, you probably need to select MIPS32r1 or MIPS32r2 instead. 1322 + 1323 config CPU_MIPS64_R1 1324 bool "MIPS64 Release 1" 1325 depends on SYS_HAS_CPU_MIPS64_R1 ··· 1338 MIPS processor are based on a MIPS64 processor. If you know the 1339 specific type of processor in your system, choose those that one 1340 otherwise CPU_MIPS64_R1 is a safe bet for any MIPS64 system. 1341 + 1342 + config CPU_MIPS64_R6 1343 + bool "MIPS64 Release 6 (EXPERIMENTAL)" 1344 + depends on SYS_HAS_CPU_MIPS64_R6 1345 + select CPU_HAS_PREFETCH 1346 + select CPU_SUPPORTS_32BIT_KERNEL 1347 + select CPU_SUPPORTS_64BIT_KERNEL 1348 + select CPU_SUPPORTS_HIGHMEM 1349 + select CPU_SUPPORTS_MSA 1350 + select GENERIC_CSUM 1351 + help 1352 + Choose this option to build a kernel for release 6 or later of the 1353 + MIPS64 architecture. New MIPS processors, starting with the Warrior 1354 + family, are based on a MIPS64r6 processor. If you own an older 1355 + processor, you probably need to select MIPS64r1 or MIPS64r2 instead. 1356 1357 config CPU_R3000 1358 bool "R3000" ··· 1539 config CPU_MIPS32_3_5_FEATURES 1540 bool "MIPS32 Release 3.5 Features" 1541 depends on SYS_HAS_CPU_MIPS32_R3_5 1542 + depends on CPU_MIPS32_R2 || CPU_MIPS32_R6 1543 help 1544 Choose this option to build a kernel for release 2 or later of the 1545 MIPS32 architecture including features from the 3.5 release such as ··· 1659 config SYS_HAS_CPU_MIPS32_R3_5 1660 bool 1661 1662 + config SYS_HAS_CPU_MIPS32_R6 1663 + bool 1664 + 1665 config SYS_HAS_CPU_MIPS64_R1 1666 bool 1667 1668 config SYS_HAS_CPU_MIPS64_R2 1669 + bool 1670 + 1671 + config SYS_HAS_CPU_MIPS64_R6 1672 bool 1673 1674 config SYS_HAS_CPU_R3000 ··· 1764 # 1765 config CPU_MIPS32 1766 bool 1767 + default y if CPU_MIPS32_R1 || CPU_MIPS32_R2 || CPU_MIPS32_R6 1768 1769 config CPU_MIPS64 1770 bool 1771 + default y if CPU_MIPS64_R1 || CPU_MIPS64_R2 || CPU_MIPS64_R6 1772 1773 # 1774 # These two indicate the revision of the architecture, either Release 1 or Release 2 ··· 1780 config CPU_MIPSR2 1781 bool 1782 default y if CPU_MIPS32_R2 || CPU_MIPS64_R2 || CPU_CAVIUM_OCTEON 1783 + select MIPS_SPRAM 1784 + 1785 + config CPU_MIPSR6 1786 + bool 1787 + default y if CPU_MIPS32_R6 || CPU_MIPS64_R6 1788 + select MIPS_SPRAM 1789 1790 config EVA 1791 bool ··· 2013 default y 2014 depends on MIPS_MT_SMP 2015 2016 + config MIPSR2_TO_R6_EMULATOR 2017 + bool "MIPS R2-to-R6 emulator" 2018 + depends on CPU_MIPSR6 && !SMP 2019 + default y 2020 + help 2021 + Choose this option if you want to run non-R6 MIPS userland code. 2022 + Even if you say 'Y' here, the emulator will still be disabled by 2023 + default. You can enable it using the 'mipsr2emul' kernel option. 2024 + The only reason this is a build-time option is to save ~14K from the 2025 + final kernel image. 2026 + comment "MIPS R2-to-R6 emulator is only available for UP kernels" 2027 + depends on SMP && CPU_MIPSR6 2028 + 2029 config MIPS_VPE_LOADER 2030 bool "VPE loader support." 2031 depends on SYS_SUPPORTS_MULTITHREADING && MODULES ··· 2148 here. 2149 2150 config CPU_MICROMIPS 2151 + depends on 32BIT && SYS_SUPPORTS_MICROMIPS && !CPU_MIPSR6 2152 bool "microMIPS" 2153 help 2154 When this option is enabled the kernel will be built using the
-13
arch/mips/Kconfig.debug
··· 122 help 123 Add several files to the debugfs to test spinlock speed. 124 125 - config FP32XX_HYBRID_FPRS 126 - bool "Run FP32 & FPXX code with hybrid FPRs" 127 - depends on MIPS_O32_FP64_SUPPORT 128 - help 129 - The hybrid FPR scheme is normally used only when a program needs to 130 - execute a mix of FP32 & FP64A code, since the trapping & emulation 131 - that it entails is expensive. When enabled, this option will lead 132 - to the kernel running programs which use the FP32 & FPXX FP ABIs 133 - using the hybrid FPR scheme, which can be useful for debugging 134 - purposes. 135 - 136 - If unsure, say N. 137 - 138 endmenu
··· 122 help 123 Add several files to the debugfs to test spinlock speed. 124 125 endmenu
+37 -18
arch/mips/Makefile
··· 122 cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(shell $(CC) -dumpmachine |grep -q 'mips.*el-.*' && echo -EB $(undef-all) $(predef-be)) 123 cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += $(shell $(CC) -dumpmachine |grep -q 'mips.*el-.*' || echo -EL $(undef-all) $(predef-le)) 124 125 - # For smartmips configurations, there are hundreds of warnings due to ISA overrides 126 - # in assembly and header files. smartmips is only supported for MIPS32r1 onwards 127 - # and there is no support for 64-bit. Various '.set mips2' or '.set mips3' or 128 - # similar directives in the kernel will spam the build logs with the following warnings: 129 - # Warning: the `smartmips' extension requires MIPS32 revision 1 or greater 130 - # or 131 - # Warning: the 64-bit MIPS architecture does not support the `smartmips' extension 132 - # Pass -Wa,--no-warn to disable all assembler warnings until the kernel code has 133 - # been fixed properly. 134 - cflags-$(CONFIG_CPU_HAS_SMARTMIPS) += $(call cc-option,-msmartmips) -Wa,--no-warn 135 - cflags-$(CONFIG_CPU_MICROMIPS) += $(call cc-option,-mmicromips) 136 - 137 cflags-$(CONFIG_SB1XXX_CORELIS) += $(call cc-option,-mno-sched-prolog) \ 138 -fno-omit-frame-pointer 139 - 140 - ifeq ($(CONFIG_CPU_HAS_MSA),y) 141 - toolchain-msa := $(call cc-option-yn,-mhard-float -mfp64 -Wa$(comma)-mmsa) 142 - cflags-$(toolchain-msa) += -DTOOLCHAIN_SUPPORTS_MSA 143 - endif 144 - 145 # 146 # CPU-dependent compiler/assembler options for optimization. 147 # ··· 138 -Wa,-mips32 -Wa,--trap 139 cflags-$(CONFIG_CPU_MIPS32_R2) += $(call cc-option,-march=mips32r2,-mips32r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS32) \ 140 -Wa,-mips32r2 -Wa,--trap 141 cflags-$(CONFIG_CPU_MIPS64_R1) += $(call cc-option,-march=mips64,-mips64 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \ 142 -Wa,-mips64 -Wa,--trap 143 cflags-$(CONFIG_CPU_MIPS64_R2) += $(call cc-option,-march=mips64r2,-mips64r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \ 144 -Wa,-mips64r2 -Wa,--trap 145 cflags-$(CONFIG_CPU_R5000) += -march=r5000 -Wa,--trap 146 cflags-$(CONFIG_CPU_R5432) += $(call cc-option,-march=r5400,-march=r5000) \ 147 -Wa,--trap ··· 166 endif 167 cflags-$(CONFIG_CAVIUM_CN63XXP1) += -Wa,-mfix-cn63xxp1 168 cflags-$(CONFIG_CPU_BMIPS) += -march=mips32 -Wa,-mips32 -Wa,--trap 169 170 cflags-$(CONFIG_CPU_R4000_WORKAROUNDS) += $(call cc-option,-mfix-r4000,) 171 cflags-$(CONFIG_CPU_R4400_WORKAROUNDS) += $(call cc-option,-mfix-r4400,) ··· 186 KBUILD_AFLAGS_MODULE += -msb1-pass1-workarounds 187 KBUILD_CFLAGS_MODULE += -msb1-pass1-workarounds 188 endif 189 endif 190 191 # ··· 298 boot-y += vmlinux.srec 299 ifeq ($(shell expr $(load-y) \< 0xffffffff80000000 2> /dev/null), 0) 300 boot-y += uImage 301 boot-y += uImage.gz 302 endif 303 304 # compressed boot image targets (arch/mips/boot/compressed/) ··· 401 echo ' vmlinuz.bin - Raw binary zboot image' 402 echo ' vmlinuz.srec - SREC zboot image' 403 echo ' uImage - U-Boot image' 404 echo ' uImage.gz - U-Boot image (gzip)' 405 echo ' dtbs - Device-tree blobs for enabled boards' 406 echo 407 echo ' These will be default as appropriate for a configured platform.'
··· 122 cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(shell $(CC) -dumpmachine |grep -q 'mips.*el-.*' && echo -EB $(undef-all) $(predef-be)) 123 cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += $(shell $(CC) -dumpmachine |grep -q 'mips.*el-.*' || echo -EL $(undef-all) $(predef-le)) 124 125 cflags-$(CONFIG_SB1XXX_CORELIS) += $(call cc-option,-mno-sched-prolog) \ 126 -fno-omit-frame-pointer 127 # 128 # CPU-dependent compiler/assembler options for optimization. 129 # ··· 156 -Wa,-mips32 -Wa,--trap 157 cflags-$(CONFIG_CPU_MIPS32_R2) += $(call cc-option,-march=mips32r2,-mips32r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS32) \ 158 -Wa,-mips32r2 -Wa,--trap 159 + cflags-$(CONFIG_CPU_MIPS32_R6) += -march=mips32r6 -Wa,--trap 160 cflags-$(CONFIG_CPU_MIPS64_R1) += $(call cc-option,-march=mips64,-mips64 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \ 161 -Wa,-mips64 -Wa,--trap 162 cflags-$(CONFIG_CPU_MIPS64_R2) += $(call cc-option,-march=mips64r2,-mips64r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \ 163 -Wa,-mips64r2 -Wa,--trap 164 + cflags-$(CONFIG_CPU_MIPS64_R6) += -march=mips64r6 -Wa,--trap 165 cflags-$(CONFIG_CPU_R5000) += -march=r5000 -Wa,--trap 166 cflags-$(CONFIG_CPU_R5432) += $(call cc-option,-march=r5400,-march=r5000) \ 167 -Wa,--trap ··· 182 endif 183 cflags-$(CONFIG_CAVIUM_CN63XXP1) += -Wa,-mfix-cn63xxp1 184 cflags-$(CONFIG_CPU_BMIPS) += -march=mips32 -Wa,-mips32 -Wa,--trap 185 + # 186 + # binutils from v2.25 on and gcc starting from v4.9.0 treat -march=loongson3a 187 + # as MIPS64 R1; older versions as just R1. This leaves the possibility open 188 + # that GCC might generate R2 code for -march=loongson3a which then is rejected 189 + # by GAS. The cc-option can't probe for this behaviour so -march=loongson3a 190 + # can't easily be used safely within the kbuild framework. 191 + # 192 + cflags-$(CONFIG_CPU_LOONGSON3) += \ 193 + $(call cc-option,-march=mips64r2,-mips64r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \ 194 + -Wa,-mips64r2 -Wa,--trap 195 196 cflags-$(CONFIG_CPU_R4000_WORKAROUNDS) += $(call cc-option,-mfix-r4000,) 197 cflags-$(CONFIG_CPU_R4400_WORKAROUNDS) += $(call cc-option,-mfix-r4400,) ··· 192 KBUILD_AFLAGS_MODULE += -msb1-pass1-workarounds 193 KBUILD_CFLAGS_MODULE += -msb1-pass1-workarounds 194 endif 195 + endif 196 + 197 + # For smartmips configurations, there are hundreds of warnings due to ISA overrides 198 + # in assembly and header files. smartmips is only supported for MIPS32r1 onwards 199 + # and there is no support for 64-bit. Various '.set mips2' or '.set mips3' or 200 + # similar directives in the kernel will spam the build logs with the following warnings: 201 + # Warning: the `smartmips' extension requires MIPS32 revision 1 or greater 202 + # or 203 + # Warning: the 64-bit MIPS architecture does not support the `smartmips' extension 204 + # Pass -Wa,--no-warn to disable all assembler warnings until the kernel code has 205 + # been fixed properly. 206 + mips-cflags := "$(cflags-y)" 207 + cflags-$(CONFIG_CPU_HAS_SMARTMIPS) += $(call cc-option,$(mips-cflags),-msmartmips) -Wa,--no-warn 208 + cflags-$(CONFIG_CPU_MICROMIPS) += $(call cc-option,$(mips-cflags),-mmicromips) 209 + ifeq ($(CONFIG_CPU_HAS_MSA),y) 210 + toolchain-msa := $(call cc-option-yn,-$(mips-cflags),mhard-float -mfp64 -Wa$(comma)-mmsa) 211 + cflags-$(toolchain-msa) += -DTOOLCHAIN_SUPPORTS_MSA 212 endif 213 214 # ··· 287 boot-y += vmlinux.srec 288 ifeq ($(shell expr $(load-y) \< 0xffffffff80000000 2> /dev/null), 0) 289 boot-y += uImage 290 + boot-y += uImage.bin 291 + boot-y += uImage.bz2 292 boot-y += uImage.gz 293 + boot-y += uImage.lzma 294 + boot-y += uImage.lzo 295 endif 296 297 # compressed boot image targets (arch/mips/boot/compressed/) ··· 386 echo ' vmlinuz.bin - Raw binary zboot image' 387 echo ' vmlinuz.srec - SREC zboot image' 388 echo ' uImage - U-Boot image' 389 + echo ' uImage.bin - U-Boot image (uncompressed)' 390 + echo ' uImage.bz2 - U-Boot image (bz2)' 391 echo ' uImage.gz - U-Boot image (gzip)' 392 + echo ' uImage.lzma - U-Boot image (lzma)' 393 + echo ' uImage.lzo - U-Boot image (lzo)' 394 echo ' dtbs - Device-tree blobs for enabled boards' 395 echo 396 echo ' These will be default as appropriate for a configured platform.'
+22 -5
arch/mips/alchemy/common/clock.c
··· 127 t = 396000000; 128 else { 129 t = alchemy_rdsys(AU1000_SYS_CPUPLL) & 0x7f; 130 t *= parent_rate; 131 } 132 133 return t; 134 } 135 136 static struct clk_ops alchemy_clkops_cpu = { ··· 323 324 /* lrclk: external synchronous static bus clock ***********************/ 325 326 - static struct clk __init *alchemy_clk_setup_lrclk(const char *pn) 327 { 328 - /* MEM_STCFG0[15:13] = divisor. 329 * L/RCLK = periph_clk / (divisor + 1) 330 * On Au1000, Au1500, Au1100 it's called LCLK, 331 * on later models it's called RCLK, but it's the same thing. 332 */ 333 struct clk *c; 334 - unsigned long v = alchemy_rdsmem(AU1000_MEM_STCFG0) >> 13; 335 336 - v = (v & 7) + 1; 337 c = clk_register_fixed_factor(NULL, ALCHEMY_LR_CLK, 338 pn, 0, 1, v); 339 if (!IS_ERR(c)) ··· 1083 ERRCK(c) 1084 1085 /* L/RCLK: external static bus clock for synchronous mode */ 1086 - c = alchemy_clk_setup_lrclk(ALCHEMY_PERIPH_CLK); 1087 ERRCK(c) 1088 1089 /* Frequency dividers 0-5 */
··· 127 t = 396000000; 128 else { 129 t = alchemy_rdsys(AU1000_SYS_CPUPLL) & 0x7f; 130 + if (alchemy_get_cputype() < ALCHEMY_CPU_AU1300) 131 + t &= 0x3f; 132 t *= parent_rate; 133 } 134 135 return t; 136 + } 137 + 138 + void __init alchemy_set_lpj(void) 139 + { 140 + preset_lpj = alchemy_clk_cpu_recalc(NULL, ALCHEMY_ROOTCLK_RATE); 141 + preset_lpj /= 2 * HZ; 142 } 143 144 static struct clk_ops alchemy_clkops_cpu = { ··· 315 316 /* lrclk: external synchronous static bus clock ***********************/ 317 318 + static struct clk __init *alchemy_clk_setup_lrclk(const char *pn, int t) 319 { 320 + /* Au1000, Au1500: MEM_STCFG0[11]: If bit is set, lrclk=pclk/5, 321 + * otherwise lrclk=pclk/4. 322 + * All other variants: MEM_STCFG0[15:13] = divisor. 323 * L/RCLK = periph_clk / (divisor + 1) 324 * On Au1000, Au1500, Au1100 it's called LCLK, 325 * on later models it's called RCLK, but it's the same thing. 326 */ 327 struct clk *c; 328 + unsigned long v = alchemy_rdsmem(AU1000_MEM_STCFG0); 329 330 + switch (t) { 331 + case ALCHEMY_CPU_AU1000: 332 + case ALCHEMY_CPU_AU1500: 333 + v = 4 + ((v >> 11) & 1); 334 + break; 335 + default: /* all other models */ 336 + v = ((v >> 13) & 7) + 1; 337 + } 338 c = clk_register_fixed_factor(NULL, ALCHEMY_LR_CLK, 339 pn, 0, 1, v); 340 if (!IS_ERR(c)) ··· 1066 ERRCK(c) 1067 1068 /* L/RCLK: external static bus clock for synchronous mode */ 1069 + c = alchemy_clk_setup_lrclk(ALCHEMY_PERIPH_CLK, ctype); 1070 ERRCK(c) 1071 1072 /* Frequency dividers 0-5 */
+3 -1
arch/mips/alchemy/common/setup.c
··· 34 #include <au1000.h> 35 36 extern void __init board_setup(void); 37 - extern void set_cpuspec(void); 38 39 void __init plat_mem_setup(void) 40 { 41 if (au1xxx_cpu_needs_config_od()) 42 /* Various early Au1xx0 errata corrected by this */ 43 set_c0_config(1 << 19); /* Set Config[OD] */
··· 34 #include <au1000.h> 35 36 extern void __init board_setup(void); 37 + extern void __init alchemy_set_lpj(void); 38 39 void __init plat_mem_setup(void) 40 { 41 + alchemy_set_lpj(); 42 + 43 if (au1xxx_cpu_needs_config_od()) 44 /* Various early Au1xx0 errata corrected by this */ 45 set_c0_config(1 << 19); /* Set Config[OD] */
+1 -1
arch/mips/bcm3384/irq.c
··· 180 181 static struct of_device_id of_irq_ids[] __initdata = { 182 { .compatible = "mti,cpu-interrupt-controller", 183 - .data = mips_cpu_intc_init }, 184 { .compatible = "brcm,bcm3384-intc", 185 .data = intc_of_init }, 186 {},
··· 180 181 static struct of_device_id of_irq_ids[] __initdata = { 182 { .compatible = "mti,cpu-interrupt-controller", 183 + .data = mips_cpu_irq_of_init }, 184 { .compatible = "brcm,bcm3384-intc", 185 .data = intc_of_init }, 186 {},
+47 -2
arch/mips/boot/Makefile
··· 23 24 hostprogs-y := elf2ecoff 25 26 targets := vmlinux.ecoff 27 quiet_cmd_ecoff = ECOFF $@ 28 cmd_ecoff = $(obj)/elf2ecoff $(VMLINUX) $@ $(e2eflag) ··· 50 UIMAGE_LOADADDR = $(VMLINUX_LOAD_ADDRESS) 51 UIMAGE_ENTRYADDR = $(VMLINUX_ENTRY_ADDRESS) 52 53 $(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE 54 $(call if_changed,gzip) 55 56 targets += uImage.gz 57 $(obj)/uImage.gz: $(obj)/vmlinux.bin.gz FORCE 58 $(call if_changed,uimage,gzip) 59 60 - targets += uImage 61 - $(obj)/uImage: $(obj)/uImage.gz FORCE 62 @ln -sf $(notdir $<) $@ 63 @echo ' Image $@ is ready'
··· 23 24 hostprogs-y := elf2ecoff 25 26 + suffix-y := bin 27 + suffix-$(CONFIG_KERNEL_BZIP2) := bz2 28 + suffix-$(CONFIG_KERNEL_GZIP) := gz 29 + suffix-$(CONFIG_KERNEL_LZMA) := lzma 30 + suffix-$(CONFIG_KERNEL_LZO) := lzo 31 + 32 targets := vmlinux.ecoff 33 quiet_cmd_ecoff = ECOFF $@ 34 cmd_ecoff = $(obj)/elf2ecoff $(VMLINUX) $@ $(e2eflag) ··· 44 UIMAGE_LOADADDR = $(VMLINUX_LOAD_ADDRESS) 45 UIMAGE_ENTRYADDR = $(VMLINUX_ENTRY_ADDRESS) 46 47 + # 48 + # Compressed vmlinux images 49 + # 50 + 51 + extra-y += vmlinux.bin.bz2 52 + extra-y += vmlinux.bin.gz 53 + extra-y += vmlinux.bin.lzma 54 + extra-y += vmlinux.bin.lzo 55 + 56 + $(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin FORCE 57 + $(call if_changed,bzip2) 58 + 59 $(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE 60 $(call if_changed,gzip) 61 62 + $(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE 63 + $(call if_changed,lzma) 64 + 65 + $(obj)/vmlinux.bin.lzo: $(obj)/vmlinux.bin FORCE 66 + $(call if_changed,lzo) 67 + 68 + # 69 + # Compressed u-boot images 70 + # 71 + 72 + targets += uImage 73 + targets += uImage.bin 74 + targets += uImage.bz2 75 targets += uImage.gz 76 + targets += uImage.lzma 77 + targets += uImage.lzo 78 + 79 + $(obj)/uImage.bin: $(obj)/vmlinux.bin FORCE 80 + $(call if_changed,uimage,none) 81 + 82 + $(obj)/uImage.bz2: $(obj)/vmlinux.bin.bz2 FORCE 83 + $(call if_changed,uimage,bzip2) 84 + 85 $(obj)/uImage.gz: $(obj)/vmlinux.bin.gz FORCE 86 $(call if_changed,uimage,gzip) 87 88 + $(obj)/uImage.lzma: $(obj)/vmlinux.bin.lzma FORCE 89 + $(call if_changed,uimage,lzma) 90 + 91 + $(obj)/uImage.lzo: $(obj)/vmlinux.bin.lzo FORCE 92 + $(call if_changed,uimage,lzo) 93 + 94 + $(obj)/uImage: $(obj)/uImage.$(suffix-y) 95 @ln -sf $(notdir $<) $@ 96 @echo ' Image $@ is ready'
-4
arch/mips/boot/elf2ecoff.c
··· 268 Elf32_Ehdr ex; 269 Elf32_Phdr *ph; 270 Elf32_Shdr *sh; 271 - char *shstrtab; 272 int i, pad; 273 struct sect text, data, bss; 274 struct filehdr efh; ··· 335 "sh"); 336 if (must_convert_endian) 337 convert_elf_shdrs(sh, ex.e_shnum); 338 - /* Read in the section string table. */ 339 - shstrtab = saveRead(infile, sh[ex.e_shstrndx].sh_offset, 340 - sh[ex.e_shstrndx].sh_size, "shstrtab"); 341 342 /* Figure out if we can cram the program header into an ECOFF 343 header... Basically, we can't handle anything but loadable
··· 268 Elf32_Ehdr ex; 269 Elf32_Phdr *ph; 270 Elf32_Shdr *sh; 271 int i, pad; 272 struct sect text, data, bss; 273 struct filehdr efh; ··· 336 "sh"); 337 if (must_convert_endian) 338 convert_elf_shdrs(sh, ex.e_shnum); 339 340 /* Figure out if we can cram the program header into an ECOFF 341 header... Basically, we can't handle anything but loadable
+10 -1
arch/mips/cavium-octeon/csrc-octeon.c
··· 18 #include <asm/octeon/octeon.h> 19 #include <asm/octeon/cvmx-ipd-defs.h> 20 #include <asm/octeon/cvmx-mio-defs.h> 21 - 22 23 static u64 f; 24 static u64 rdiv; ··· 39 40 if (current_cpu_type() == CPU_CAVIUM_OCTEON2) { 41 union cvmx_mio_rst_boot rst_boot; 42 rst_boot.u64 = cvmx_read_csr(CVMX_MIO_RST_BOOT); 43 rdiv = rst_boot.s.c_mul; /* CPU clock */ 44 sdiv = rst_boot.s.pnr_mul; /* I/O clock */ 45 f = (0x8000000000000000ull / sdiv) * 2; 46 } 47 } 48 49 /*
··· 18 #include <asm/octeon/octeon.h> 19 #include <asm/octeon/cvmx-ipd-defs.h> 20 #include <asm/octeon/cvmx-mio-defs.h> 21 + #include <asm/octeon/cvmx-rst-defs.h> 22 23 static u64 f; 24 static u64 rdiv; ··· 39 40 if (current_cpu_type() == CPU_CAVIUM_OCTEON2) { 41 union cvmx_mio_rst_boot rst_boot; 42 + 43 rst_boot.u64 = cvmx_read_csr(CVMX_MIO_RST_BOOT); 44 rdiv = rst_boot.s.c_mul; /* CPU clock */ 45 sdiv = rst_boot.s.pnr_mul; /* I/O clock */ 46 f = (0x8000000000000000ull / sdiv) * 2; 47 + } else if (current_cpu_type() == CPU_CAVIUM_OCTEON3) { 48 + union cvmx_rst_boot rst_boot; 49 + 50 + rst_boot.u64 = cvmx_read_csr(CVMX_RST_BOOT); 51 + rdiv = rst_boot.s.c_mul; /* CPU clock */ 52 + sdiv = rst_boot.s.pnr_mul; /* I/O clock */ 53 + f = (0x8000000000000000ull / sdiv) * 2; 54 } 55 + 56 } 57 58 /*
+2 -2
arch/mips/cavium-octeon/dma-octeon.c
··· 276 continue; 277 278 /* These addresses map low for PCI. */ 279 - if (e->addr > 0x410000000ull && !OCTEON_IS_MODEL(OCTEON_CN6XXX)) 280 continue; 281 282 addr_size += e->size; ··· 308 #endif 309 #ifdef CONFIG_USB_OCTEON_OHCI 310 /* OCTEON II ohci is only 32-bit. */ 311 - if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && max_addr >= 0x100000000ul) 312 swiotlbsize = 64 * (1<<20); 313 #endif 314 swiotlb_nslabs = swiotlbsize >> IO_TLB_SHIFT;
··· 276 continue; 277 278 /* These addresses map low for PCI. */ 279 + if (e->addr > 0x410000000ull && !OCTEON_IS_OCTEON2()) 280 continue; 281 282 addr_size += e->size; ··· 308 #endif 309 #ifdef CONFIG_USB_OCTEON_OHCI 310 /* OCTEON II ohci is only 32-bit. */ 311 + if (OCTEON_IS_OCTEON2() && max_addr >= 0x100000000ul) 312 swiotlbsize = 64 * (1<<20); 313 #endif 314 swiotlb_nslabs = swiotlbsize >> IO_TLB_SHIFT;
+1 -1
arch/mips/cavium-octeon/executive/cvmx-helper-board.c
··· 767 break; 768 } 769 /* Most boards except NIC10e use a 12MHz crystal */ 770 - if (OCTEON_IS_MODEL(OCTEON_FAM_2)) 771 return USB_CLOCK_TYPE_CRYSTAL_12; 772 return USB_CLOCK_TYPE_REF_48; 773 }
··· 767 break; 768 } 769 /* Most boards except NIC10e use a 12MHz crystal */ 770 + if (OCTEON_IS_OCTEON2()) 771 return USB_CLOCK_TYPE_CRYSTAL_12; 772 return USB_CLOCK_TYPE_REF_48; 773 }
+823 -271
arch/mips/cavium-octeon/octeon-irq.c
··· 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 - * Copyright (C) 2004-2012 Cavium, Inc. 7 */ 8 9 #include <linux/interrupt.h> 10 #include <linux/irqdomain.h> 11 #include <linux/bitops.h> 12 #include <linux/percpu.h> 13 #include <linux/slab.h> 14 #include <linux/irq.h> ··· 24 static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu1_en_mirror); 25 static DEFINE_PER_CPU(raw_spinlock_t, octeon_irq_ciu_spinlock); 26 27 static __read_mostly u8 octeon_irq_ciu_to_irq[8][64]; 28 29 - union octeon_ciu_chip_data { 30 - void *p; 31 - unsigned long l; 32 - struct { 33 - unsigned long line:6; 34 - unsigned long bit:6; 35 - unsigned long gpio_line:6; 36 - } s; 37 }; 38 39 struct octeon_core_chip_data { ··· 56 57 static struct octeon_core_chip_data octeon_irq_core_chip_data[MIPS_CORE_IRQ_LINES]; 58 59 - static void octeon_irq_set_ciu_mapping(int irq, int line, int bit, int gpio_line, 60 - struct irq_chip *chip, 61 - irq_flow_handler_t handler) 62 { 63 - union octeon_ciu_chip_data cd; 64 65 irq_set_chip_and_handler(irq, chip, handler); 66 67 - cd.l = 0; 68 - cd.s.line = line; 69 - cd.s.bit = bit; 70 - cd.s.gpio_line = gpio_line; 71 72 - irq_set_chip_data(irq, cd.p); 73 octeon_irq_ciu_to_irq[line][bit] = irq; 74 } 75 76 - static void octeon_irq_force_ciu_mapping(struct irq_domain *domain, 77 - int irq, int line, int bit) 78 { 79 - irq_domain_associate(domain, irq, line << 6 | bit); 80 } 81 82 static int octeon_coreid_for_cpu(int cpu) ··· 226 #ifdef CONFIG_SMP 227 int cpu; 228 int weight = cpumask_weight(data->affinity); 229 230 if (weight > 1) { 231 - cpu = smp_processor_id(); 232 for (;;) { 233 cpu = cpumask_next(cpu, data->affinity); 234 if (cpu >= nr_cpu_ids) { ··· 244 } else { 245 cpu = smp_processor_id(); 246 } 247 return cpu; 248 #else 249 return smp_processor_id(); ··· 257 int coreid = octeon_coreid_for_cpu(cpu); 258 unsigned long *pen; 259 unsigned long flags; 260 - union octeon_ciu_chip_data cd; 261 raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); 262 263 - cd.p = irq_data_get_irq_chip_data(data); 264 265 raw_spin_lock_irqsave(lock, flags); 266 - if (cd.s.line == 0) { 267 pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); 268 - __set_bit(cd.s.bit, pen); 269 /* 270 * Must be visible to octeon_irq_ip{2,3}_ciu() before 271 * enabling the irq. ··· 274 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); 275 } else { 276 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); 277 - __set_bit(cd.s.bit, pen); 278 /* 279 * Must be visible to octeon_irq_ip{2,3}_ciu() before 280 * enabling the irq. ··· 289 { 290 unsigned long *pen; 291 unsigned long flags; 292 - union octeon_ciu_chip_data cd; 293 raw_spinlock_t *lock = this_cpu_ptr(&octeon_irq_ciu_spinlock); 294 295 - cd.p = irq_data_get_irq_chip_data(data); 296 297 raw_spin_lock_irqsave(lock, flags); 298 - if (cd.s.line == 0) { 299 pen = this_cpu_ptr(&octeon_irq_ciu0_en_mirror); 300 - __set_bit(cd.s.bit, pen); 301 /* 302 * Must be visible to octeon_irq_ip{2,3}_ciu() before 303 * enabling the irq. ··· 306 cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen); 307 } else { 308 pen = this_cpu_ptr(&octeon_irq_ciu1_en_mirror); 309 - __set_bit(cd.s.bit, pen); 310 /* 311 * Must be visible to octeon_irq_ip{2,3}_ciu() before 312 * enabling the irq. ··· 321 { 322 unsigned long *pen; 323 unsigned long flags; 324 - union octeon_ciu_chip_data cd; 325 raw_spinlock_t *lock = this_cpu_ptr(&octeon_irq_ciu_spinlock); 326 327 - cd.p = irq_data_get_irq_chip_data(data); 328 329 raw_spin_lock_irqsave(lock, flags); 330 - if (cd.s.line == 0) { 331 pen = this_cpu_ptr(&octeon_irq_ciu0_en_mirror); 332 - __clear_bit(cd.s.bit, pen); 333 /* 334 * Must be visible to octeon_irq_ip{2,3}_ciu() before 335 * enabling the irq. ··· 338 cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen); 339 } else { 340 pen = this_cpu_ptr(&octeon_irq_ciu1_en_mirror); 341 - __clear_bit(cd.s.bit, pen); 342 /* 343 * Must be visible to octeon_irq_ip{2,3}_ciu() before 344 * enabling the irq. ··· 354 unsigned long flags; 355 unsigned long *pen; 356 int cpu; 357 - union octeon_ciu_chip_data cd; 358 raw_spinlock_t *lock; 359 360 - cd.p = irq_data_get_irq_chip_data(data); 361 362 for_each_online_cpu(cpu) { 363 int coreid = octeon_coreid_for_cpu(cpu); 364 lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); 365 - if (cd.s.line == 0) 366 pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); 367 else 368 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); 369 370 raw_spin_lock_irqsave(lock, flags); 371 - __clear_bit(cd.s.bit, pen); 372 /* 373 * Must be visible to octeon_irq_ip{2,3}_ciu() before 374 * enabling the irq. 375 */ 376 wmb(); 377 - if (cd.s.line == 0) 378 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); 379 else 380 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); ··· 387 unsigned long flags; 388 unsigned long *pen; 389 int cpu; 390 - union octeon_ciu_chip_data cd; 391 raw_spinlock_t *lock; 392 393 - cd.p = irq_data_get_irq_chip_data(data); 394 395 for_each_online_cpu(cpu) { 396 int coreid = octeon_coreid_for_cpu(cpu); 397 lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); 398 - if (cd.s.line == 0) 399 pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); 400 else 401 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); 402 403 raw_spin_lock_irqsave(lock, flags); 404 - __set_bit(cd.s.bit, pen); 405 /* 406 * Must be visible to octeon_irq_ip{2,3}_ciu() before 407 * enabling the irq. 408 */ 409 wmb(); 410 - if (cd.s.line == 0) 411 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); 412 else 413 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); ··· 423 { 424 u64 mask; 425 int cpu = next_cpu_for_irq(data); 426 - union octeon_ciu_chip_data cd; 427 428 - cd.p = irq_data_get_irq_chip_data(data); 429 - mask = 1ull << (cd.s.bit); 430 431 /* 432 * Called under the desc lock, so these should never get out 433 * of sync. 434 */ 435 - if (cd.s.line == 0) { 436 int index = octeon_coreid_for_cpu(cpu) * 2; 437 - set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); 438 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); 439 } else { 440 int index = octeon_coreid_for_cpu(cpu) * 2 + 1; 441 - set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); 442 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); 443 } 444 } 445 ··· 511 static void octeon_irq_ciu_enable_local_v2(struct irq_data *data) 512 { 513 u64 mask; 514 - union octeon_ciu_chip_data cd; 515 516 - cd.p = irq_data_get_irq_chip_data(data); 517 - mask = 1ull << (cd.s.bit); 518 519 - if (cd.s.line == 0) { 520 int index = cvmx_get_core_num() * 2; 521 - set_bit(cd.s.bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror)); 522 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); 523 } else { 524 int index = cvmx_get_core_num() * 2 + 1; 525 - set_bit(cd.s.bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror)); 526 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); 527 } 528 } ··· 530 static void octeon_irq_ciu_disable_local_v2(struct irq_data *data) 531 { 532 u64 mask; 533 - union octeon_ciu_chip_data cd; 534 535 - cd.p = irq_data_get_irq_chip_data(data); 536 - mask = 1ull << (cd.s.bit); 537 538 - if (cd.s.line == 0) { 539 int index = cvmx_get_core_num() * 2; 540 - clear_bit(cd.s.bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror)); 541 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); 542 } else { 543 int index = cvmx_get_core_num() * 2 + 1; 544 - clear_bit(cd.s.bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror)); 545 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); 546 } 547 } ··· 552 static void octeon_irq_ciu_ack(struct irq_data *data) 553 { 554 u64 mask; 555 - union octeon_ciu_chip_data cd; 556 557 - cd.p = irq_data_get_irq_chip_data(data); 558 - mask = 1ull << (cd.s.bit); 559 560 - if (cd.s.line == 0) { 561 int index = cvmx_get_core_num() * 2; 562 cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask); 563 } else { ··· 573 { 574 int cpu; 575 u64 mask; 576 - union octeon_ciu_chip_data cd; 577 578 - cd.p = irq_data_get_irq_chip_data(data); 579 - mask = 1ull << (cd.s.bit); 580 581 - if (cd.s.line == 0) { 582 for_each_online_cpu(cpu) { 583 int index = octeon_coreid_for_cpu(cpu) * 2; 584 - clear_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); 585 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); 586 } 587 } else { 588 for_each_online_cpu(cpu) { 589 int index = octeon_coreid_for_cpu(cpu) * 2 + 1; 590 - clear_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); 591 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); 592 } 593 } ··· 603 { 604 int cpu; 605 u64 mask; 606 - union octeon_ciu_chip_data cd; 607 608 - cd.p = irq_data_get_irq_chip_data(data); 609 - mask = 1ull << (cd.s.bit); 610 611 - if (cd.s.line == 0) { 612 for_each_online_cpu(cpu) { 613 int index = octeon_coreid_for_cpu(cpu) * 2; 614 - set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); 615 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); 616 } 617 } else { 618 for_each_online_cpu(cpu) { 619 int index = octeon_coreid_for_cpu(cpu) * 2 + 1; 620 - set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); 621 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); 622 } 623 } ··· 628 static void octeon_irq_gpio_setup(struct irq_data *data) 629 { 630 union cvmx_gpio_bit_cfgx cfg; 631 - union octeon_ciu_chip_data cd; 632 u32 t = irqd_get_trigger_type(data); 633 634 - cd.p = irq_data_get_irq_chip_data(data); 635 636 cfg.u64 = 0; 637 cfg.s.int_en = 1; ··· 642 cfg.s.fil_cnt = 7; 643 cfg.s.fil_sel = 3; 644 645 - cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), cfg.u64); 646 } 647 648 static void octeon_irq_ciu_enable_gpio_v2(struct irq_data *data) ··· 667 668 static void octeon_irq_ciu_disable_gpio_v2(struct irq_data *data) 669 { 670 - union octeon_ciu_chip_data cd; 671 672 - cd.p = irq_data_get_irq_chip_data(data); 673 - cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), 0); 674 675 octeon_irq_ciu_disable_all_v2(data); 676 } 677 678 static void octeon_irq_ciu_disable_gpio(struct irq_data *data) 679 { 680 - union octeon_ciu_chip_data cd; 681 682 - cd.p = irq_data_get_irq_chip_data(data); 683 - cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), 0); 684 685 octeon_irq_ciu_disable_all(data); 686 } 687 688 static void octeon_irq_ciu_gpio_ack(struct irq_data *data) 689 { 690 - union octeon_ciu_chip_data cd; 691 u64 mask; 692 693 - cd.p = irq_data_get_irq_chip_data(data); 694 - mask = 1ull << (cd.s.gpio_line); 695 696 cvmx_write_csr(CVMX_GPIO_INT_CLR, mask); 697 } 698 699 - static void octeon_irq_handle_gpio(unsigned int irq, struct irq_desc *desc) 700 { 701 if (irq_get_trigger_type(irq) & IRQ_TYPE_EDGE_BOTH) 702 handle_edge_irq(irq, desc); ··· 735 int cpu; 736 bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); 737 unsigned long flags; 738 - union octeon_ciu_chip_data cd; 739 unsigned long *pen; 740 raw_spinlock_t *lock; 741 742 - cd.p = irq_data_get_irq_chip_data(data); 743 744 /* 745 * For non-v2 CIU, we will allow only single CPU affinity. ··· 759 lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); 760 raw_spin_lock_irqsave(lock, flags); 761 762 - if (cd.s.line == 0) 763 pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); 764 else 765 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); 766 767 if (cpumask_test_cpu(cpu, dest) && enable_one) { 768 enable_one = 0; 769 - __set_bit(cd.s.bit, pen); 770 } else { 771 - __clear_bit(cd.s.bit, pen); 772 } 773 /* 774 * Must be visible to octeon_irq_ip{2,3}_ciu() before ··· 776 */ 777 wmb(); 778 779 - if (cd.s.line == 0) 780 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); 781 else 782 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); ··· 797 int cpu; 798 bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); 799 u64 mask; 800 - union octeon_ciu_chip_data cd; 801 802 if (!enable_one) 803 return 0; 804 805 - cd.p = irq_data_get_irq_chip_data(data); 806 - mask = 1ull << cd.s.bit; 807 808 - if (cd.s.line == 0) { 809 for_each_online_cpu(cpu) { 810 unsigned long *pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); 811 int index = octeon_coreid_for_cpu(cpu) * 2; 812 if (cpumask_test_cpu(cpu, dest) && enable_one) { 813 enable_one = false; 814 - set_bit(cd.s.bit, pen); 815 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); 816 } else { 817 - clear_bit(cd.s.bit, pen); 818 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); 819 } 820 } ··· 824 int index = octeon_coreid_for_cpu(cpu) * 2 + 1; 825 if (cpumask_test_cpu(cpu, dest) && enable_one) { 826 enable_one = false; 827 - set_bit(cd.s.bit, pen); 828 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); 829 } else { 830 - clear_bit(cd.s.bit, pen); 831 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); 832 } 833 } 834 } 835 return 0; ··· 871 .name = "CIU", 872 .irq_enable = octeon_irq_ciu_enable_v2, 873 .irq_disable = octeon_irq_ciu_disable_all_v2, 874 .irq_ack = octeon_irq_ciu_ack, 875 .irq_mask = octeon_irq_ciu_disable_local_v2, 876 .irq_unmask = octeon_irq_ciu_enable_v2, ··· 892 #endif 893 }; 894 895 static struct irq_chip octeon_irq_chip_ciu = { 896 .name = "CIU", 897 .irq_enable = octeon_irq_ciu_enable, 898 .irq_disable = octeon_irq_ciu_disable_all, ··· 1141 unsigned int *out_type) 1142 { 1143 unsigned int ciu, bit; 1144 1145 ciu = intspec[0]; 1146 bit = intspec[1]; 1147 1148 - if (ciu > 1 || bit > 63) 1149 return -EINVAL; 1150 1151 *out_hwirq = (ciu << 6) | bit; ··· 1156 } 1157 1158 static struct irq_chip *octeon_irq_ciu_chip; 1159 static struct irq_chip *octeon_irq_gpio_chip; 1160 1161 static bool octeon_irq_virq_in_range(unsigned int virq) ··· 1172 static int octeon_irq_ciu_map(struct irq_domain *d, 1173 unsigned int virq, irq_hw_number_t hw) 1174 { 1175 unsigned int line = hw >> 6; 1176 unsigned int bit = hw & 63; 1177 1178 if (!octeon_irq_virq_in_range(virq)) 1179 return -EINVAL; ··· 1184 if (line == 0 && bit >= 16 && bit <32) 1185 return 0; 1186 1187 - if (line > 1 || octeon_irq_ciu_to_irq[line][bit] != 0) 1188 return -EINVAL; 1189 1190 - if (octeon_irq_ciu_is_edge(line, bit)) 1191 - octeon_irq_set_ciu_mapping(virq, line, bit, 0, 1192 - octeon_irq_ciu_chip, 1193 - handle_edge_irq); 1194 - else 1195 - octeon_irq_set_ciu_mapping(virq, line, bit, 0, 1196 - octeon_irq_ciu_chip, 1197 - handle_level_irq); 1198 - 1199 - return 0; 1200 } 1201 1202 - static int octeon_irq_gpio_map_common(struct irq_domain *d, 1203 - unsigned int virq, irq_hw_number_t hw, 1204 - int line_limit, struct irq_chip *chip) 1205 { 1206 struct octeon_irq_gpio_domain_data *gpiod = d->host_data; 1207 unsigned int line, bit; 1208 1209 if (!octeon_irq_virq_in_range(virq)) 1210 return -EINVAL; 1211 1212 line = (hw + gpiod->base_hwirq) >> 6; 1213 bit = (hw + gpiod->base_hwirq) & 63; 1214 - if (line > line_limit || octeon_irq_ciu_to_irq[line][bit] != 0) 1215 return -EINVAL; 1216 1217 - octeon_irq_set_ciu_mapping(virq, line, bit, hw, 1218 - chip, octeon_irq_handle_gpio); 1219 - return 0; 1220 - } 1221 - 1222 - static int octeon_irq_gpio_map(struct irq_domain *d, 1223 - unsigned int virq, irq_hw_number_t hw) 1224 - { 1225 - return octeon_irq_gpio_map_common(d, virq, hw, 1, octeon_irq_gpio_chip); 1226 } 1227 1228 static struct irq_domain_ops octeon_irq_domain_ciu_ops = { 1229 .map = octeon_irq_ciu_map, 1230 .xlate = octeon_irq_ciu_xlat, 1231 }; 1232 1233 static struct irq_domain_ops octeon_irq_domain_gpio_ops = { 1234 .map = octeon_irq_gpio_map, 1235 .xlate = octeon_irq_gpio_xlat, 1236 }; 1237 ··· 1268 if (likely(ciu_sum)) { 1269 int bit = fls64(ciu_sum) - 1; 1270 int irq = octeon_irq_ciu_to_irq[1][bit]; 1271 if (likely(irq)) 1272 do_IRQ(irq); 1273 else ··· 1378 1379 /* Enable the CIU lines */ 1380 set_c0_status(STATUSF_IP3 | STATUSF_IP2); 1381 - clear_c0_status(STATUSF_IP4); 1382 } 1383 1384 static void octeon_irq_setup_secondary_ciu2(void) ··· 1397 clear_c0_status(STATUSF_IP4); 1398 } 1399 1400 - static void __init octeon_irq_init_ciu(void) 1401 { 1402 - unsigned int i; 1403 struct irq_chip *chip; 1404 struct irq_chip *chip_mbox; 1405 struct irq_chip *chip_wd; 1406 - struct device_node *gpio_node; 1407 - struct device_node *ciu_node; 1408 struct irq_domain *ciu_domain = NULL; 1409 1410 octeon_irq_init_ciu_percpu(); 1411 octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu; 1412 1413 octeon_irq_ip2 = octeon_irq_ip2_ciu; 1414 octeon_irq_ip3 = octeon_irq_ip3_ciu; 1415 if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) || 1416 OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) || 1417 OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X) || 1418 - OCTEON_IS_MODEL(OCTEON_CN6XXX)) { 1419 chip = &octeon_irq_chip_ciu_v2; 1420 chip_mbox = &octeon_irq_chip_ciu_mbox_v2; 1421 chip_wd = &octeon_irq_chip_ciu_wd_v2; 1422 octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio_v2; 1423 } else { 1424 chip = &octeon_irq_chip_ciu; 1425 chip_mbox = &octeon_irq_chip_ciu_mbox; 1426 chip_wd = &octeon_irq_chip_ciu_wd; 1427 octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio; 1428 } 1429 octeon_irq_ciu_chip = chip; 1430 - octeon_irq_ip4 = octeon_irq_ip4_mask; 1431 1432 /* Mips internal */ 1433 octeon_irq_init_core(); 1434 1435 - gpio_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-gpio"); 1436 - if (gpio_node) { 1437 - struct octeon_irq_gpio_domain_data *gpiod; 1438 - 1439 - gpiod = kzalloc(sizeof(*gpiod), GFP_KERNEL); 1440 - if (gpiod) { 1441 - /* gpio domain host_data is the base hwirq number. */ 1442 - gpiod->base_hwirq = 16; 1443 - irq_domain_add_linear(gpio_node, 16, &octeon_irq_domain_gpio_ops, gpiod); 1444 - of_node_put(gpio_node); 1445 - } else 1446 - pr_warn("Cannot allocate memory for GPIO irq_domain.\n"); 1447 - } else 1448 - pr_warn("Cannot find device node for cavium,octeon-3860-gpio.\n"); 1449 - 1450 - ciu_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-ciu"); 1451 - if (ciu_node) { 1452 - ciu_domain = irq_domain_add_tree(ciu_node, &octeon_irq_domain_ciu_ops, NULL); 1453 - irq_set_default_host(ciu_domain); 1454 - of_node_put(ciu_node); 1455 - } else 1456 - panic("Cannot find device node for cavium,octeon-3860-ciu."); 1457 1458 /* CIU_0 */ 1459 - for (i = 0; i < 16; i++) 1460 - octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i + 0); 1461 1462 - octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX0, 0, 32, 0, chip_mbox, handle_percpu_irq); 1463 - octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX1, 0, 33, 0, chip_mbox, handle_percpu_irq); 1464 1465 - for (i = 0; i < 4; i++) 1466 - octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_INT0, 0, i + 36); 1467 - for (i = 0; i < 4; i++) 1468 - octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 0, i + 40); 1469 1470 - octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI, 0, 45); 1471 - octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_RML, 0, 46); 1472 - for (i = 0; i < 4; i++) 1473 - octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_TIMER0, 0, i + 52); 1474 1475 - octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 0, 56); 1476 - octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI2, 0, 59); 1477 1478 /* CIU_1 */ 1479 - for (i = 0; i < 16; i++) 1480 - octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i + 0, 0, chip_wd, handle_level_irq); 1481 1482 - octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB1, 1, 17); 1483 1484 /* Enable the CIU lines */ 1485 set_c0_status(STATUSF_IP3 | STATUSF_IP2); 1486 - clear_c0_status(STATUSF_IP4); 1487 } 1488 1489 /* 1490 * Watchdog interrupts are special. They are associated with a single 1491 * core, so we hardwire the affinity to that core. ··· 1594 u64 mask; 1595 u64 en_addr; 1596 int coreid = data->irq - OCTEON_IRQ_WDOG0; 1597 - union octeon_ciu_chip_data cd; 1598 1599 - cd.p = irq_data_get_irq_chip_data(data); 1600 - mask = 1ull << (cd.s.bit); 1601 1602 - en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + (0x1000ull * cd.s.line); 1603 cvmx_write_csr(en_addr, mask); 1604 1605 } ··· 1611 u64 en_addr; 1612 int cpu = next_cpu_for_irq(data); 1613 int coreid = octeon_coreid_for_cpu(cpu); 1614 - union octeon_ciu_chip_data cd; 1615 1616 - cd.p = irq_data_get_irq_chip_data(data); 1617 - mask = 1ull << (cd.s.bit); 1618 1619 - en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + (0x1000ull * cd.s.line); 1620 cvmx_write_csr(en_addr, mask); 1621 } 1622 ··· 1626 u64 mask; 1627 u64 en_addr; 1628 int coreid = cvmx_get_core_num(); 1629 - union octeon_ciu_chip_data cd; 1630 1631 - cd.p = irq_data_get_irq_chip_data(data); 1632 - mask = 1ull << (cd.s.bit); 1633 1634 - en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + (0x1000ull * cd.s.line); 1635 cvmx_write_csr(en_addr, mask); 1636 1637 } ··· 1642 u64 mask; 1643 u64 en_addr; 1644 int coreid = cvmx_get_core_num(); 1645 - union octeon_ciu_chip_data cd; 1646 1647 - cd.p = irq_data_get_irq_chip_data(data); 1648 - mask = 1ull << (cd.s.bit); 1649 1650 - en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(coreid) + (0x1000ull * cd.s.line); 1651 cvmx_write_csr(en_addr, mask); 1652 1653 } ··· 1658 u64 mask; 1659 u64 en_addr; 1660 int coreid = cvmx_get_core_num(); 1661 - union octeon_ciu_chip_data cd; 1662 1663 - cd.p = irq_data_get_irq_chip_data(data); 1664 - mask = 1ull << (cd.s.bit); 1665 1666 - en_addr = CVMX_CIU2_RAW_PPX_IP2_WRKQ(coreid) + (0x1000ull * cd.s.line); 1667 cvmx_write_csr(en_addr, mask); 1668 1669 } ··· 1672 { 1673 int cpu; 1674 u64 mask; 1675 - union octeon_ciu_chip_data cd; 1676 1677 - cd.p = irq_data_get_irq_chip_data(data); 1678 - mask = 1ull << (cd.s.bit); 1679 1680 for_each_online_cpu(cpu) { 1681 - u64 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd.s.line); 1682 cvmx_write_csr(en_addr, mask); 1683 } 1684 } ··· 1692 mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0); 1693 1694 for_each_online_cpu(cpu) { 1695 - u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1S(octeon_coreid_for_cpu(cpu)); 1696 cvmx_write_csr(en_addr, mask); 1697 } 1698 } ··· 1706 mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0); 1707 1708 for_each_online_cpu(cpu) { 1709 - u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1C(octeon_coreid_for_cpu(cpu)); 1710 cvmx_write_csr(en_addr, mask); 1711 } 1712 } ··· 1741 int cpu; 1742 bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); 1743 u64 mask; 1744 - union octeon_ciu_chip_data cd; 1745 1746 if (!enable_one) 1747 return 0; 1748 1749 - cd.p = irq_data_get_irq_chip_data(data); 1750 - mask = 1ull << cd.s.bit; 1751 1752 for_each_online_cpu(cpu) { 1753 u64 en_addr; 1754 if (cpumask_test_cpu(cpu, dest) && enable_one) { 1755 enable_one = false; 1756 - en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd.s.line); 1757 } else { 1758 - en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd.s.line); 1759 } 1760 cvmx_write_csr(en_addr, mask); 1761 } ··· 1776 1777 static void octeon_irq_ciu2_disable_gpio(struct irq_data *data) 1778 { 1779 - union octeon_ciu_chip_data cd; 1780 - cd.p = irq_data_get_irq_chip_data(data); 1781 1782 - cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), 0); 1783 1784 octeon_irq_ciu2_disable_all(data); 1785 } 1786 1787 static struct irq_chip octeon_irq_chip_ciu2 = { 1788 .name = "CIU2-E", 1789 .irq_enable = octeon_irq_ciu2_enable, 1790 .irq_disable = octeon_irq_ciu2_disable_all, ··· 1910 1911 if (octeon_irq_ciu2_is_edge(line, bit)) 1912 octeon_irq_set_ciu_mapping(virq, line, bit, 0, 1913 - &octeon_irq_chip_ciu2, 1914 handle_edge_irq); 1915 else 1916 octeon_irq_set_ciu_mapping(virq, line, bit, 0, ··· 1919 1920 return 0; 1921 } 1922 - static int octeon_irq_ciu2_gpio_map(struct irq_domain *d, 1923 - unsigned int virq, irq_hw_number_t hw) 1924 - { 1925 - return octeon_irq_gpio_map_common(d, virq, hw, 7, &octeon_irq_chip_ciu2_gpio); 1926 - } 1927 1928 static struct irq_domain_ops octeon_irq_domain_ciu2_ops = { 1929 .map = octeon_irq_ciu2_map, 1930 .xlate = octeon_irq_ciu2_xlat, 1931 - }; 1932 - 1933 - static struct irq_domain_ops octeon_irq_domain_ciu2_gpio_ops = { 1934 - .map = octeon_irq_ciu2_gpio_map, 1935 - .xlate = octeon_irq_gpio_xlat, 1936 }; 1937 1938 static void octeon_irq_ciu2(void) ··· 1993 return; 1994 } 1995 1996 - static void __init octeon_irq_init_ciu2(void) 1997 { 1998 - unsigned int i; 1999 - struct device_node *gpio_node; 2000 - struct device_node *ciu_node; 2001 struct irq_domain *ciu_domain = NULL; 2002 2003 octeon_irq_init_ciu2_percpu(); 2004 octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu2; 2005 2006 octeon_irq_ip2 = octeon_irq_ciu2; 2007 octeon_irq_ip3 = octeon_irq_ciu2_mbox; 2008 octeon_irq_ip4 = octeon_irq_ip4_mask; ··· 2010 /* Mips internal */ 2011 octeon_irq_init_core(); 2012 2013 - gpio_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-gpio"); 2014 - if (gpio_node) { 2015 - struct octeon_irq_gpio_domain_data *gpiod; 2016 - 2017 - gpiod = kzalloc(sizeof(*gpiod), GFP_KERNEL); 2018 - if (gpiod) { 2019 - /* gpio domain host_data is the base hwirq number. */ 2020 - gpiod->base_hwirq = 7 << 6; 2021 - irq_domain_add_linear(gpio_node, 16, &octeon_irq_domain_ciu2_gpio_ops, gpiod); 2022 - of_node_put(gpio_node); 2023 - } else 2024 - pr_warn("Cannot allocate memory for GPIO irq_domain.\n"); 2025 - } else 2026 - pr_warn("Cannot find device node for cavium,octeon-3860-gpio.\n"); 2027 - 2028 - ciu_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-6880-ciu2"); 2029 - if (ciu_node) { 2030 - ciu_domain = irq_domain_add_tree(ciu_node, &octeon_irq_domain_ciu2_ops, NULL); 2031 - irq_set_default_host(ciu_domain); 2032 - of_node_put(ciu_node); 2033 - } else 2034 - panic("Cannot find device node for cavium,octeon-6880-ciu2."); 2035 2036 /* CUI2 */ 2037 - for (i = 0; i < 64; i++) 2038 - octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i); 2039 2040 - for (i = 0; i < 32; i++) 2041 - octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i, 0, 2042 - &octeon_irq_chip_ciu2_wd, handle_level_irq); 2043 2044 - for (i = 0; i < 4; i++) 2045 - octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_TIMER0, 3, i + 8); 2046 2047 - octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 3, 44); 2048 2049 - for (i = 0; i < 4; i++) 2050 - octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_INT0, 4, i); 2051 2052 - for (i = 0; i < 4; i++) 2053 - octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 4, i + 8); 2054 2055 irq_set_chip_and_handler(OCTEON_IRQ_MBOX0, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq); 2056 irq_set_chip_and_handler(OCTEON_IRQ_MBOX1, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq); ··· 2062 /* Enable the CIU lines */ 2063 set_c0_status(STATUSF_IP3 | STATUSF_IP2); 2064 clear_c0_status(STATUSF_IP4); 2065 } 2066 2067 void __init arch_init_irq(void) 2068 { ··· 2305 cpumask_clear(irq_default_affinity); 2306 cpumask_set_cpu(smp_processor_id(), irq_default_affinity); 2307 #endif 2308 - if (OCTEON_IS_MODEL(OCTEON_CN68XX)) 2309 - octeon_irq_init_ciu2(); 2310 - else 2311 - octeon_irq_init_ciu(); 2312 } 2313 2314 asmlinkage void plat_irq_dispatch(void) ··· 2319 cop0_cause &= cop0_status; 2320 cop0_cause &= ST0_IM; 2321 2322 - if (unlikely(cop0_cause & STATUSF_IP2)) 2323 octeon_irq_ip2(); 2324 - else if (unlikely(cop0_cause & STATUSF_IP3)) 2325 octeon_irq_ip3(); 2326 - else if (unlikely(cop0_cause & STATUSF_IP4)) 2327 octeon_irq_ip4(); 2328 - else if (likely(cop0_cause)) 2329 do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE); 2330 else 2331 break;
··· 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 + * Copyright (C) 2004-2014 Cavium, Inc. 7 */ 8 9 + #include <linux/of_address.h> 10 #include <linux/interrupt.h> 11 #include <linux/irqdomain.h> 12 #include <linux/bitops.h> 13 + #include <linux/of_irq.h> 14 #include <linux/percpu.h> 15 #include <linux/slab.h> 16 #include <linux/irq.h> ··· 22 static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu1_en_mirror); 23 static DEFINE_PER_CPU(raw_spinlock_t, octeon_irq_ciu_spinlock); 24 25 + struct octeon_irq_ciu_domain_data { 26 + int num_sum; /* number of sum registers (2 or 3). */ 27 + }; 28 + 29 static __read_mostly u8 octeon_irq_ciu_to_irq[8][64]; 30 31 + struct octeon_ciu_chip_data { 32 + union { 33 + struct { /* only used for ciu3 */ 34 + u64 ciu3_addr; 35 + unsigned int intsn; 36 + }; 37 + struct { /* only used for ciu/ciu2 */ 38 + u8 line; 39 + u8 bit; 40 + u8 gpio_line; 41 + }; 42 + }; 43 + int current_cpu; /* Next CPU expected to take this irq */ 44 }; 45 46 struct octeon_core_chip_data { ··· 45 46 static struct octeon_core_chip_data octeon_irq_core_chip_data[MIPS_CORE_IRQ_LINES]; 47 48 + static int octeon_irq_set_ciu_mapping(int irq, int line, int bit, int gpio_line, 49 + struct irq_chip *chip, 50 + irq_flow_handler_t handler) 51 { 52 + struct octeon_ciu_chip_data *cd; 53 + 54 + cd = kzalloc(sizeof(*cd), GFP_KERNEL); 55 + if (!cd) 56 + return -ENOMEM; 57 58 irq_set_chip_and_handler(irq, chip, handler); 59 60 + cd->line = line; 61 + cd->bit = bit; 62 + cd->gpio_line = gpio_line; 63 64 + irq_set_chip_data(irq, cd); 65 octeon_irq_ciu_to_irq[line][bit] = irq; 66 + return 0; 67 } 68 69 + static void octeon_irq_free_cd(struct irq_domain *d, unsigned int irq) 70 { 71 + struct irq_data *data = irq_get_irq_data(irq); 72 + struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(data); 73 + 74 + irq_set_chip_data(irq, NULL); 75 + kfree(cd); 76 + } 77 + 78 + static int octeon_irq_force_ciu_mapping(struct irq_domain *domain, 79 + int irq, int line, int bit) 80 + { 81 + return irq_domain_associate(domain, irq, line << 6 | bit); 82 } 83 84 static int octeon_coreid_for_cpu(int cpu) ··· 202 #ifdef CONFIG_SMP 203 int cpu; 204 int weight = cpumask_weight(data->affinity); 205 + struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(data); 206 207 if (weight > 1) { 208 + cpu = cd->current_cpu; 209 for (;;) { 210 cpu = cpumask_next(cpu, data->affinity); 211 if (cpu >= nr_cpu_ids) { ··· 219 } else { 220 cpu = smp_processor_id(); 221 } 222 + cd->current_cpu = cpu; 223 return cpu; 224 #else 225 return smp_processor_id(); ··· 231 int coreid = octeon_coreid_for_cpu(cpu); 232 unsigned long *pen; 233 unsigned long flags; 234 + struct octeon_ciu_chip_data *cd; 235 raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); 236 237 + cd = irq_data_get_irq_chip_data(data); 238 239 raw_spin_lock_irqsave(lock, flags); 240 + if (cd->line == 0) { 241 pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); 242 + __set_bit(cd->bit, pen); 243 /* 244 * Must be visible to octeon_irq_ip{2,3}_ciu() before 245 * enabling the irq. ··· 248 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); 249 } else { 250 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); 251 + __set_bit(cd->bit, pen); 252 /* 253 * Must be visible to octeon_irq_ip{2,3}_ciu() before 254 * enabling the irq. ··· 263 { 264 unsigned long *pen; 265 unsigned long flags; 266 + struct octeon_ciu_chip_data *cd; 267 raw_spinlock_t *lock = this_cpu_ptr(&octeon_irq_ciu_spinlock); 268 269 + cd = irq_data_get_irq_chip_data(data); 270 271 raw_spin_lock_irqsave(lock, flags); 272 + if (cd->line == 0) { 273 pen = this_cpu_ptr(&octeon_irq_ciu0_en_mirror); 274 + __set_bit(cd->bit, pen); 275 /* 276 * Must be visible to octeon_irq_ip{2,3}_ciu() before 277 * enabling the irq. ··· 280 cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen); 281 } else { 282 pen = this_cpu_ptr(&octeon_irq_ciu1_en_mirror); 283 + __set_bit(cd->bit, pen); 284 /* 285 * Must be visible to octeon_irq_ip{2,3}_ciu() before 286 * enabling the irq. ··· 295 { 296 unsigned long *pen; 297 unsigned long flags; 298 + struct octeon_ciu_chip_data *cd; 299 raw_spinlock_t *lock = this_cpu_ptr(&octeon_irq_ciu_spinlock); 300 301 + cd = irq_data_get_irq_chip_data(data); 302 303 raw_spin_lock_irqsave(lock, flags); 304 + if (cd->line == 0) { 305 pen = this_cpu_ptr(&octeon_irq_ciu0_en_mirror); 306 + __clear_bit(cd->bit, pen); 307 /* 308 * Must be visible to octeon_irq_ip{2,3}_ciu() before 309 * enabling the irq. ··· 312 cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen); 313 } else { 314 pen = this_cpu_ptr(&octeon_irq_ciu1_en_mirror); 315 + __clear_bit(cd->bit, pen); 316 /* 317 * Must be visible to octeon_irq_ip{2,3}_ciu() before 318 * enabling the irq. ··· 328 unsigned long flags; 329 unsigned long *pen; 330 int cpu; 331 + struct octeon_ciu_chip_data *cd; 332 raw_spinlock_t *lock; 333 334 + cd = irq_data_get_irq_chip_data(data); 335 336 for_each_online_cpu(cpu) { 337 int coreid = octeon_coreid_for_cpu(cpu); 338 lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); 339 + if (cd->line == 0) 340 pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); 341 else 342 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); 343 344 raw_spin_lock_irqsave(lock, flags); 345 + __clear_bit(cd->bit, pen); 346 /* 347 * Must be visible to octeon_irq_ip{2,3}_ciu() before 348 * enabling the irq. 349 */ 350 wmb(); 351 + if (cd->line == 0) 352 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); 353 else 354 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); ··· 361 unsigned long flags; 362 unsigned long *pen; 363 int cpu; 364 + struct octeon_ciu_chip_data *cd; 365 raw_spinlock_t *lock; 366 367 + cd = irq_data_get_irq_chip_data(data); 368 369 for_each_online_cpu(cpu) { 370 int coreid = octeon_coreid_for_cpu(cpu); 371 lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); 372 + if (cd->line == 0) 373 pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); 374 else 375 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); 376 377 raw_spin_lock_irqsave(lock, flags); 378 + __set_bit(cd->bit, pen); 379 /* 380 * Must be visible to octeon_irq_ip{2,3}_ciu() before 381 * enabling the irq. 382 */ 383 wmb(); 384 + if (cd->line == 0) 385 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); 386 else 387 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); ··· 397 { 398 u64 mask; 399 int cpu = next_cpu_for_irq(data); 400 + struct octeon_ciu_chip_data *cd; 401 402 + cd = irq_data_get_irq_chip_data(data); 403 + mask = 1ull << (cd->bit); 404 405 /* 406 * Called under the desc lock, so these should never get out 407 * of sync. 408 */ 409 + if (cd->line == 0) { 410 int index = octeon_coreid_for_cpu(cpu) * 2; 411 + set_bit(cd->bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); 412 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); 413 } else { 414 int index = octeon_coreid_for_cpu(cpu) * 2 + 1; 415 + set_bit(cd->bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); 416 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); 417 + } 418 + } 419 + 420 + /* 421 + * Enable the irq in the sum2 registers. 422 + */ 423 + static void octeon_irq_ciu_enable_sum2(struct irq_data *data) 424 + { 425 + u64 mask; 426 + int cpu = next_cpu_for_irq(data); 427 + int index = octeon_coreid_for_cpu(cpu); 428 + struct octeon_ciu_chip_data *cd; 429 + 430 + cd = irq_data_get_irq_chip_data(data); 431 + mask = 1ull << (cd->bit); 432 + 433 + cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1S(index), mask); 434 + } 435 + 436 + /* 437 + * Disable the irq in the sum2 registers. 438 + */ 439 + static void octeon_irq_ciu_disable_local_sum2(struct irq_data *data) 440 + { 441 + u64 mask; 442 + int cpu = next_cpu_for_irq(data); 443 + int index = octeon_coreid_for_cpu(cpu); 444 + struct octeon_ciu_chip_data *cd; 445 + 446 + cd = irq_data_get_irq_chip_data(data); 447 + mask = 1ull << (cd->bit); 448 + 449 + cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(index), mask); 450 + } 451 + 452 + static void octeon_irq_ciu_ack_sum2(struct irq_data *data) 453 + { 454 + u64 mask; 455 + int cpu = next_cpu_for_irq(data); 456 + int index = octeon_coreid_for_cpu(cpu); 457 + struct octeon_ciu_chip_data *cd; 458 + 459 + cd = irq_data_get_irq_chip_data(data); 460 + mask = 1ull << (cd->bit); 461 + 462 + cvmx_write_csr(CVMX_CIU_SUM2_PPX_IP4(index), mask); 463 + } 464 + 465 + static void octeon_irq_ciu_disable_all_sum2(struct irq_data *data) 466 + { 467 + int cpu; 468 + struct octeon_ciu_chip_data *cd; 469 + u64 mask; 470 + 471 + cd = irq_data_get_irq_chip_data(data); 472 + mask = 1ull << (cd->bit); 473 + 474 + for_each_online_cpu(cpu) { 475 + int coreid = octeon_coreid_for_cpu(cpu); 476 + 477 + cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(coreid), mask); 478 } 479 } 480 ··· 424 static void octeon_irq_ciu_enable_local_v2(struct irq_data *data) 425 { 426 u64 mask; 427 + struct octeon_ciu_chip_data *cd; 428 429 + cd = irq_data_get_irq_chip_data(data); 430 + mask = 1ull << (cd->bit); 431 432 + if (cd->line == 0) { 433 int index = cvmx_get_core_num() * 2; 434 + set_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror)); 435 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); 436 } else { 437 int index = cvmx_get_core_num() * 2 + 1; 438 + set_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror)); 439 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); 440 } 441 } ··· 443 static void octeon_irq_ciu_disable_local_v2(struct irq_data *data) 444 { 445 u64 mask; 446 + struct octeon_ciu_chip_data *cd; 447 448 + cd = irq_data_get_irq_chip_data(data); 449 + mask = 1ull << (cd->bit); 450 451 + if (cd->line == 0) { 452 int index = cvmx_get_core_num() * 2; 453 + clear_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror)); 454 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); 455 } else { 456 int index = cvmx_get_core_num() * 2 + 1; 457 + clear_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror)); 458 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); 459 } 460 } ··· 465 static void octeon_irq_ciu_ack(struct irq_data *data) 466 { 467 u64 mask; 468 + struct octeon_ciu_chip_data *cd; 469 470 + cd = irq_data_get_irq_chip_data(data); 471 + mask = 1ull << (cd->bit); 472 473 + if (cd->line == 0) { 474 int index = cvmx_get_core_num() * 2; 475 cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask); 476 } else { ··· 486 { 487 int cpu; 488 u64 mask; 489 + struct octeon_ciu_chip_data *cd; 490 491 + cd = irq_data_get_irq_chip_data(data); 492 + mask = 1ull << (cd->bit); 493 494 + if (cd->line == 0) { 495 for_each_online_cpu(cpu) { 496 int index = octeon_coreid_for_cpu(cpu) * 2; 497 + clear_bit(cd->bit, 498 + &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); 499 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); 500 } 501 } else { 502 for_each_online_cpu(cpu) { 503 int index = octeon_coreid_for_cpu(cpu) * 2 + 1; 504 + clear_bit(cd->bit, 505 + &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); 506 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); 507 } 508 } ··· 514 { 515 int cpu; 516 u64 mask; 517 + struct octeon_ciu_chip_data *cd; 518 519 + cd = irq_data_get_irq_chip_data(data); 520 + mask = 1ull << (cd->bit); 521 522 + if (cd->line == 0) { 523 for_each_online_cpu(cpu) { 524 int index = octeon_coreid_for_cpu(cpu) * 2; 525 + set_bit(cd->bit, 526 + &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); 527 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); 528 } 529 } else { 530 for_each_online_cpu(cpu) { 531 int index = octeon_coreid_for_cpu(cpu) * 2 + 1; 532 + set_bit(cd->bit, 533 + &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); 534 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); 535 } 536 } ··· 537 static void octeon_irq_gpio_setup(struct irq_data *data) 538 { 539 union cvmx_gpio_bit_cfgx cfg; 540 + struct octeon_ciu_chip_data *cd; 541 u32 t = irqd_get_trigger_type(data); 542 543 + cd = irq_data_get_irq_chip_data(data); 544 545 cfg.u64 = 0; 546 cfg.s.int_en = 1; ··· 551 cfg.s.fil_cnt = 7; 552 cfg.s.fil_sel = 3; 553 554 + cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), cfg.u64); 555 } 556 557 static void octeon_irq_ciu_enable_gpio_v2(struct irq_data *data) ··· 576 577 static void octeon_irq_ciu_disable_gpio_v2(struct irq_data *data) 578 { 579 + struct octeon_ciu_chip_data *cd; 580 581 + cd = irq_data_get_irq_chip_data(data); 582 + cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0); 583 584 octeon_irq_ciu_disable_all_v2(data); 585 } 586 587 static void octeon_irq_ciu_disable_gpio(struct irq_data *data) 588 { 589 + struct octeon_ciu_chip_data *cd; 590 591 + cd = irq_data_get_irq_chip_data(data); 592 + cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0); 593 594 octeon_irq_ciu_disable_all(data); 595 } 596 597 static void octeon_irq_ciu_gpio_ack(struct irq_data *data) 598 { 599 + struct octeon_ciu_chip_data *cd; 600 u64 mask; 601 602 + cd = irq_data_get_irq_chip_data(data); 603 + mask = 1ull << (cd->gpio_line); 604 605 cvmx_write_csr(CVMX_GPIO_INT_CLR, mask); 606 } 607 608 + static void octeon_irq_handle_trigger(unsigned int irq, struct irq_desc *desc) 609 { 610 if (irq_get_trigger_type(irq) & IRQ_TYPE_EDGE_BOTH) 611 handle_edge_irq(irq, desc); ··· 644 int cpu; 645 bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); 646 unsigned long flags; 647 + struct octeon_ciu_chip_data *cd; 648 unsigned long *pen; 649 raw_spinlock_t *lock; 650 651 + cd = irq_data_get_irq_chip_data(data); 652 653 /* 654 * For non-v2 CIU, we will allow only single CPU affinity. ··· 668 lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); 669 raw_spin_lock_irqsave(lock, flags); 670 671 + if (cd->line == 0) 672 pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); 673 else 674 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); 675 676 if (cpumask_test_cpu(cpu, dest) && enable_one) { 677 enable_one = 0; 678 + __set_bit(cd->bit, pen); 679 } else { 680 + __clear_bit(cd->bit, pen); 681 } 682 /* 683 * Must be visible to octeon_irq_ip{2,3}_ciu() before ··· 685 */ 686 wmb(); 687 688 + if (cd->line == 0) 689 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); 690 else 691 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); ··· 706 int cpu; 707 bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); 708 u64 mask; 709 + struct octeon_ciu_chip_data *cd; 710 711 if (!enable_one) 712 return 0; 713 714 + cd = irq_data_get_irq_chip_data(data); 715 + mask = 1ull << cd->bit; 716 717 + if (cd->line == 0) { 718 for_each_online_cpu(cpu) { 719 unsigned long *pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); 720 int index = octeon_coreid_for_cpu(cpu) * 2; 721 if (cpumask_test_cpu(cpu, dest) && enable_one) { 722 enable_one = false; 723 + set_bit(cd->bit, pen); 724 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); 725 } else { 726 + clear_bit(cd->bit, pen); 727 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); 728 } 729 } ··· 733 int index = octeon_coreid_for_cpu(cpu) * 2 + 1; 734 if (cpumask_test_cpu(cpu, dest) && enable_one) { 735 enable_one = false; 736 + set_bit(cd->bit, pen); 737 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); 738 } else { 739 + clear_bit(cd->bit, pen); 740 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); 741 } 742 + } 743 + } 744 + return 0; 745 + } 746 + 747 + static int octeon_irq_ciu_set_affinity_sum2(struct irq_data *data, 748 + const struct cpumask *dest, 749 + bool force) 750 + { 751 + int cpu; 752 + bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); 753 + u64 mask; 754 + struct octeon_ciu_chip_data *cd; 755 + 756 + if (!enable_one) 757 + return 0; 758 + 759 + cd = irq_data_get_irq_chip_data(data); 760 + mask = 1ull << cd->bit; 761 + 762 + for_each_online_cpu(cpu) { 763 + int index = octeon_coreid_for_cpu(cpu); 764 + 765 + if (cpumask_test_cpu(cpu, dest) && enable_one) { 766 + enable_one = false; 767 + cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1S(index), mask); 768 + } else { 769 + cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(index), mask); 770 } 771 } 772 return 0; ··· 752 .name = "CIU", 753 .irq_enable = octeon_irq_ciu_enable_v2, 754 .irq_disable = octeon_irq_ciu_disable_all_v2, 755 + .irq_mask = octeon_irq_ciu_disable_local_v2, 756 + .irq_unmask = octeon_irq_ciu_enable_v2, 757 + #ifdef CONFIG_SMP 758 + .irq_set_affinity = octeon_irq_ciu_set_affinity_v2, 759 + .irq_cpu_offline = octeon_irq_cpu_offline_ciu, 760 + #endif 761 + }; 762 + 763 + static struct irq_chip octeon_irq_chip_ciu_v2_edge = { 764 + .name = "CIU", 765 + .irq_enable = octeon_irq_ciu_enable_v2, 766 + .irq_disable = octeon_irq_ciu_disable_all_v2, 767 .irq_ack = octeon_irq_ciu_ack, 768 .irq_mask = octeon_irq_ciu_disable_local_v2, 769 .irq_unmask = octeon_irq_ciu_enable_v2, ··· 761 #endif 762 }; 763 764 + /* 765 + * Newer octeon chips have support for lockless CIU operation. 766 + */ 767 + static struct irq_chip octeon_irq_chip_ciu_sum2 = { 768 + .name = "CIU", 769 + .irq_enable = octeon_irq_ciu_enable_sum2, 770 + .irq_disable = octeon_irq_ciu_disable_all_sum2, 771 + .irq_mask = octeon_irq_ciu_disable_local_sum2, 772 + .irq_unmask = octeon_irq_ciu_enable_sum2, 773 + #ifdef CONFIG_SMP 774 + .irq_set_affinity = octeon_irq_ciu_set_affinity_sum2, 775 + .irq_cpu_offline = octeon_irq_cpu_offline_ciu, 776 + #endif 777 + }; 778 + 779 + static struct irq_chip octeon_irq_chip_ciu_sum2_edge = { 780 + .name = "CIU", 781 + .irq_enable = octeon_irq_ciu_enable_sum2, 782 + .irq_disable = octeon_irq_ciu_disable_all_sum2, 783 + .irq_ack = octeon_irq_ciu_ack_sum2, 784 + .irq_mask = octeon_irq_ciu_disable_local_sum2, 785 + .irq_unmask = octeon_irq_ciu_enable_sum2, 786 + #ifdef CONFIG_SMP 787 + .irq_set_affinity = octeon_irq_ciu_set_affinity_sum2, 788 + .irq_cpu_offline = octeon_irq_cpu_offline_ciu, 789 + #endif 790 + }; 791 + 792 static struct irq_chip octeon_irq_chip_ciu = { 793 + .name = "CIU", 794 + .irq_enable = octeon_irq_ciu_enable, 795 + .irq_disable = octeon_irq_ciu_disable_all, 796 + .irq_mask = octeon_irq_ciu_disable_local, 797 + .irq_unmask = octeon_irq_ciu_enable, 798 + #ifdef CONFIG_SMP 799 + .irq_set_affinity = octeon_irq_ciu_set_affinity, 800 + .irq_cpu_offline = octeon_irq_cpu_offline_ciu, 801 + #endif 802 + }; 803 + 804 + static struct irq_chip octeon_irq_chip_ciu_edge = { 805 .name = "CIU", 806 .irq_enable = octeon_irq_ciu_enable, 807 .irq_disable = octeon_irq_ciu_disable_all, ··· 970 unsigned int *out_type) 971 { 972 unsigned int ciu, bit; 973 + struct octeon_irq_ciu_domain_data *dd = d->host_data; 974 975 ciu = intspec[0]; 976 bit = intspec[1]; 977 978 + if (ciu >= dd->num_sum || bit > 63) 979 return -EINVAL; 980 981 *out_hwirq = (ciu << 6) | bit; ··· 984 } 985 986 static struct irq_chip *octeon_irq_ciu_chip; 987 + static struct irq_chip *octeon_irq_ciu_chip_edge; 988 static struct irq_chip *octeon_irq_gpio_chip; 989 990 static bool octeon_irq_virq_in_range(unsigned int virq) ··· 999 static int octeon_irq_ciu_map(struct irq_domain *d, 1000 unsigned int virq, irq_hw_number_t hw) 1001 { 1002 + int rv; 1003 unsigned int line = hw >> 6; 1004 unsigned int bit = hw & 63; 1005 + struct octeon_irq_ciu_domain_data *dd = d->host_data; 1006 1007 if (!octeon_irq_virq_in_range(virq)) 1008 return -EINVAL; ··· 1009 if (line == 0 && bit >= 16 && bit <32) 1010 return 0; 1011 1012 + if (line >= dd->num_sum || octeon_irq_ciu_to_irq[line][bit] != 0) 1013 return -EINVAL; 1014 1015 + if (line == 2) { 1016 + if (octeon_irq_ciu_is_edge(line, bit)) 1017 + rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0, 1018 + &octeon_irq_chip_ciu_sum2_edge, 1019 + handle_edge_irq); 1020 + else 1021 + rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0, 1022 + &octeon_irq_chip_ciu_sum2, 1023 + handle_level_irq); 1024 + } else { 1025 + if (octeon_irq_ciu_is_edge(line, bit)) 1026 + rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0, 1027 + octeon_irq_ciu_chip_edge, 1028 + handle_edge_irq); 1029 + else 1030 + rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0, 1031 + octeon_irq_ciu_chip, 1032 + handle_level_irq); 1033 + } 1034 + return rv; 1035 } 1036 1037 + static int octeon_irq_gpio_map(struct irq_domain *d, 1038 + unsigned int virq, irq_hw_number_t hw) 1039 { 1040 struct octeon_irq_gpio_domain_data *gpiod = d->host_data; 1041 unsigned int line, bit; 1042 + int r; 1043 1044 if (!octeon_irq_virq_in_range(virq)) 1045 return -EINVAL; 1046 1047 line = (hw + gpiod->base_hwirq) >> 6; 1048 bit = (hw + gpiod->base_hwirq) & 63; 1049 + if (line > ARRAY_SIZE(octeon_irq_ciu_to_irq) || 1050 + octeon_irq_ciu_to_irq[line][bit] != 0) 1051 return -EINVAL; 1052 1053 + r = octeon_irq_set_ciu_mapping(virq, line, bit, hw, 1054 + octeon_irq_gpio_chip, octeon_irq_handle_trigger); 1055 + return r; 1056 } 1057 1058 static struct irq_domain_ops octeon_irq_domain_ciu_ops = { 1059 .map = octeon_irq_ciu_map, 1060 + .unmap = octeon_irq_free_cd, 1061 .xlate = octeon_irq_ciu_xlat, 1062 }; 1063 1064 static struct irq_domain_ops octeon_irq_domain_gpio_ops = { 1065 .map = octeon_irq_gpio_map, 1066 + .unmap = octeon_irq_free_cd, 1067 .xlate = octeon_irq_gpio_xlat, 1068 }; 1069 ··· 1086 if (likely(ciu_sum)) { 1087 int bit = fls64(ciu_sum) - 1; 1088 int irq = octeon_irq_ciu_to_irq[1][bit]; 1089 + if (likely(irq)) 1090 + do_IRQ(irq); 1091 + else 1092 + spurious_interrupt(); 1093 + } else { 1094 + spurious_interrupt(); 1095 + } 1096 + } 1097 + 1098 + static void octeon_irq_ip4_ciu(void) 1099 + { 1100 + int coreid = cvmx_get_core_num(); 1101 + u64 ciu_sum = cvmx_read_csr(CVMX_CIU_SUM2_PPX_IP4(coreid)); 1102 + u64 ciu_en = cvmx_read_csr(CVMX_CIU_EN2_PPX_IP4(coreid)); 1103 + 1104 + ciu_sum &= ciu_en; 1105 + if (likely(ciu_sum)) { 1106 + int bit = fls64(ciu_sum) - 1; 1107 + int irq = octeon_irq_ciu_to_irq[2][bit]; 1108 + 1109 if (likely(irq)) 1110 do_IRQ(irq); 1111 else ··· 1176 1177 /* Enable the CIU lines */ 1178 set_c0_status(STATUSF_IP3 | STATUSF_IP2); 1179 + if (octeon_irq_use_ip4) 1180 + set_c0_status(STATUSF_IP4); 1181 + else 1182 + clear_c0_status(STATUSF_IP4); 1183 } 1184 1185 static void octeon_irq_setup_secondary_ciu2(void) ··· 1192 clear_c0_status(STATUSF_IP4); 1193 } 1194 1195 + static int __init octeon_irq_init_ciu( 1196 + struct device_node *ciu_node, struct device_node *parent) 1197 { 1198 + unsigned int i, r; 1199 struct irq_chip *chip; 1200 + struct irq_chip *chip_edge; 1201 struct irq_chip *chip_mbox; 1202 struct irq_chip *chip_wd; 1203 struct irq_domain *ciu_domain = NULL; 1204 + struct octeon_irq_ciu_domain_data *dd; 1205 + 1206 + dd = kzalloc(sizeof(*dd), GFP_KERNEL); 1207 + if (!dd) 1208 + return -ENOMEM; 1209 1210 octeon_irq_init_ciu_percpu(); 1211 octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu; 1212 1213 octeon_irq_ip2 = octeon_irq_ip2_ciu; 1214 octeon_irq_ip3 = octeon_irq_ip3_ciu; 1215 + if ((OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3()) 1216 + && !OCTEON_IS_MODEL(OCTEON_CN63XX)) { 1217 + octeon_irq_ip4 = octeon_irq_ip4_ciu; 1218 + dd->num_sum = 3; 1219 + octeon_irq_use_ip4 = true; 1220 + } else { 1221 + octeon_irq_ip4 = octeon_irq_ip4_mask; 1222 + dd->num_sum = 2; 1223 + octeon_irq_use_ip4 = false; 1224 + } 1225 if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) || 1226 OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) || 1227 OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X) || 1228 + OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3()) { 1229 chip = &octeon_irq_chip_ciu_v2; 1230 + chip_edge = &octeon_irq_chip_ciu_v2_edge; 1231 chip_mbox = &octeon_irq_chip_ciu_mbox_v2; 1232 chip_wd = &octeon_irq_chip_ciu_wd_v2; 1233 octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio_v2; 1234 } else { 1235 chip = &octeon_irq_chip_ciu; 1236 + chip_edge = &octeon_irq_chip_ciu_edge; 1237 chip_mbox = &octeon_irq_chip_ciu_mbox; 1238 chip_wd = &octeon_irq_chip_ciu_wd; 1239 octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio; 1240 } 1241 octeon_irq_ciu_chip = chip; 1242 + octeon_irq_ciu_chip_edge = chip_edge; 1243 1244 /* Mips internal */ 1245 octeon_irq_init_core(); 1246 1247 + ciu_domain = irq_domain_add_tree( 1248 + ciu_node, &octeon_irq_domain_ciu_ops, dd); 1249 + irq_set_default_host(ciu_domain); 1250 1251 /* CIU_0 */ 1252 + for (i = 0; i < 16; i++) { 1253 + r = octeon_irq_force_ciu_mapping( 1254 + ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i + 0); 1255 + if (r) 1256 + goto err; 1257 + } 1258 1259 + r = octeon_irq_set_ciu_mapping( 1260 + OCTEON_IRQ_MBOX0, 0, 32, 0, chip_mbox, handle_percpu_irq); 1261 + if (r) 1262 + goto err; 1263 + r = octeon_irq_set_ciu_mapping( 1264 + OCTEON_IRQ_MBOX1, 0, 33, 0, chip_mbox, handle_percpu_irq); 1265 + if (r) 1266 + goto err; 1267 1268 + for (i = 0; i < 4; i++) { 1269 + r = octeon_irq_force_ciu_mapping( 1270 + ciu_domain, i + OCTEON_IRQ_PCI_INT0, 0, i + 36); 1271 + if (r) 1272 + goto err; 1273 + } 1274 + for (i = 0; i < 4; i++) { 1275 + r = octeon_irq_force_ciu_mapping( 1276 + ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 0, i + 40); 1277 + if (r) 1278 + goto err; 1279 + } 1280 1281 + r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI, 0, 45); 1282 + if (r) 1283 + goto err; 1284 1285 + r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_RML, 0, 46); 1286 + if (r) 1287 + goto err; 1288 + 1289 + for (i = 0; i < 4; i++) { 1290 + r = octeon_irq_force_ciu_mapping( 1291 + ciu_domain, i + OCTEON_IRQ_TIMER0, 0, i + 52); 1292 + if (r) 1293 + goto err; 1294 + } 1295 + 1296 + r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 0, 56); 1297 + if (r) 1298 + goto err; 1299 + 1300 + r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI2, 0, 59); 1301 + if (r) 1302 + goto err; 1303 1304 /* CIU_1 */ 1305 + for (i = 0; i < 16; i++) { 1306 + r = octeon_irq_set_ciu_mapping( 1307 + i + OCTEON_IRQ_WDOG0, 1, i + 0, 0, chip_wd, 1308 + handle_level_irq); 1309 + if (r) 1310 + goto err; 1311 + } 1312 1313 + r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB1, 1, 17); 1314 + if (r) 1315 + goto err; 1316 1317 /* Enable the CIU lines */ 1318 set_c0_status(STATUSF_IP3 | STATUSF_IP2); 1319 + if (octeon_irq_use_ip4) 1320 + set_c0_status(STATUSF_IP4); 1321 + else 1322 + clear_c0_status(STATUSF_IP4); 1323 + 1324 + return 0; 1325 + err: 1326 + return r; 1327 } 1328 1329 + static int __init octeon_irq_init_gpio( 1330 + struct device_node *gpio_node, struct device_node *parent) 1331 + { 1332 + struct octeon_irq_gpio_domain_data *gpiod; 1333 + u32 interrupt_cells; 1334 + unsigned int base_hwirq; 1335 + int r; 1336 + 1337 + r = of_property_read_u32(parent, "#interrupt-cells", &interrupt_cells); 1338 + if (r) 1339 + return r; 1340 + 1341 + if (interrupt_cells == 1) { 1342 + u32 v; 1343 + 1344 + r = of_property_read_u32_index(gpio_node, "interrupts", 0, &v); 1345 + if (r) { 1346 + pr_warn("No \"interrupts\" property.\n"); 1347 + return r; 1348 + } 1349 + base_hwirq = v; 1350 + } else if (interrupt_cells == 2) { 1351 + u32 v0, v1; 1352 + 1353 + r = of_property_read_u32_index(gpio_node, "interrupts", 0, &v0); 1354 + if (r) { 1355 + pr_warn("No \"interrupts\" property.\n"); 1356 + return r; 1357 + } 1358 + r = of_property_read_u32_index(gpio_node, "interrupts", 1, &v1); 1359 + if (r) { 1360 + pr_warn("No \"interrupts\" property.\n"); 1361 + return r; 1362 + } 1363 + base_hwirq = (v0 << 6) | v1; 1364 + } else { 1365 + pr_warn("Bad \"#interrupt-cells\" property: %u\n", 1366 + interrupt_cells); 1367 + return -EINVAL; 1368 + } 1369 + 1370 + gpiod = kzalloc(sizeof(*gpiod), GFP_KERNEL); 1371 + if (gpiod) { 1372 + /* gpio domain host_data is the base hwirq number. */ 1373 + gpiod->base_hwirq = base_hwirq; 1374 + irq_domain_add_linear( 1375 + gpio_node, 16, &octeon_irq_domain_gpio_ops, gpiod); 1376 + } else { 1377 + pr_warn("Cannot allocate memory for GPIO irq_domain.\n"); 1378 + return -ENOMEM; 1379 + } 1380 + 1381 + return 0; 1382 + } 1383 /* 1384 * Watchdog interrupts are special. They are associated with a single 1385 * core, so we hardwire the affinity to that core. ··· 1290 u64 mask; 1291 u64 en_addr; 1292 int coreid = data->irq - OCTEON_IRQ_WDOG0; 1293 + struct octeon_ciu_chip_data *cd; 1294 1295 + cd = irq_data_get_irq_chip_data(data); 1296 + mask = 1ull << (cd->bit); 1297 1298 + en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + 1299 + (0x1000ull * cd->line); 1300 cvmx_write_csr(en_addr, mask); 1301 1302 } ··· 1306 u64 en_addr; 1307 int cpu = next_cpu_for_irq(data); 1308 int coreid = octeon_coreid_for_cpu(cpu); 1309 + struct octeon_ciu_chip_data *cd; 1310 1311 + cd = irq_data_get_irq_chip_data(data); 1312 + mask = 1ull << (cd->bit); 1313 1314 + en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + 1315 + (0x1000ull * cd->line); 1316 cvmx_write_csr(en_addr, mask); 1317 } 1318 ··· 1320 u64 mask; 1321 u64 en_addr; 1322 int coreid = cvmx_get_core_num(); 1323 + struct octeon_ciu_chip_data *cd; 1324 1325 + cd = irq_data_get_irq_chip_data(data); 1326 + mask = 1ull << (cd->bit); 1327 1328 + en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + 1329 + (0x1000ull * cd->line); 1330 cvmx_write_csr(en_addr, mask); 1331 1332 } ··· 1335 u64 mask; 1336 u64 en_addr; 1337 int coreid = cvmx_get_core_num(); 1338 + struct octeon_ciu_chip_data *cd; 1339 1340 + cd = irq_data_get_irq_chip_data(data); 1341 + mask = 1ull << (cd->bit); 1342 1343 + en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(coreid) + 1344 + (0x1000ull * cd->line); 1345 cvmx_write_csr(en_addr, mask); 1346 1347 } ··· 1350 u64 mask; 1351 u64 en_addr; 1352 int coreid = cvmx_get_core_num(); 1353 + struct octeon_ciu_chip_data *cd; 1354 1355 + cd = irq_data_get_irq_chip_data(data); 1356 + mask = 1ull << (cd->bit); 1357 1358 + en_addr = CVMX_CIU2_RAW_PPX_IP2_WRKQ(coreid) + (0x1000ull * cd->line); 1359 cvmx_write_csr(en_addr, mask); 1360 1361 } ··· 1364 { 1365 int cpu; 1366 u64 mask; 1367 + struct octeon_ciu_chip_data *cd; 1368 1369 + cd = irq_data_get_irq_chip_data(data); 1370 + mask = 1ull << (cd->bit); 1371 1372 for_each_online_cpu(cpu) { 1373 + u64 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C( 1374 + octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd->line); 1375 cvmx_write_csr(en_addr, mask); 1376 } 1377 } ··· 1383 mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0); 1384 1385 for_each_online_cpu(cpu) { 1386 + u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1S( 1387 + octeon_coreid_for_cpu(cpu)); 1388 cvmx_write_csr(en_addr, mask); 1389 } 1390 } ··· 1396 mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0); 1397 1398 for_each_online_cpu(cpu) { 1399 + u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1C( 1400 + octeon_coreid_for_cpu(cpu)); 1401 cvmx_write_csr(en_addr, mask); 1402 } 1403 } ··· 1430 int cpu; 1431 bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); 1432 u64 mask; 1433 + struct octeon_ciu_chip_data *cd; 1434 1435 if (!enable_one) 1436 return 0; 1437 1438 + cd = irq_data_get_irq_chip_data(data); 1439 + mask = 1ull << cd->bit; 1440 1441 for_each_online_cpu(cpu) { 1442 u64 en_addr; 1443 if (cpumask_test_cpu(cpu, dest) && enable_one) { 1444 enable_one = false; 1445 + en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S( 1446 + octeon_coreid_for_cpu(cpu)) + 1447 + (0x1000ull * cd->line); 1448 } else { 1449 + en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C( 1450 + octeon_coreid_for_cpu(cpu)) + 1451 + (0x1000ull * cd->line); 1452 } 1453 cvmx_write_csr(en_addr, mask); 1454 } ··· 1461 1462 static void octeon_irq_ciu2_disable_gpio(struct irq_data *data) 1463 { 1464 + struct octeon_ciu_chip_data *cd; 1465 1466 + cd = irq_data_get_irq_chip_data(data); 1467 + 1468 + cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0); 1469 1470 octeon_irq_ciu2_disable_all(data); 1471 } 1472 1473 static struct irq_chip octeon_irq_chip_ciu2 = { 1474 + .name = "CIU2-E", 1475 + .irq_enable = octeon_irq_ciu2_enable, 1476 + .irq_disable = octeon_irq_ciu2_disable_all, 1477 + .irq_mask = octeon_irq_ciu2_disable_local, 1478 + .irq_unmask = octeon_irq_ciu2_enable, 1479 + #ifdef CONFIG_SMP 1480 + .irq_set_affinity = octeon_irq_ciu2_set_affinity, 1481 + .irq_cpu_offline = octeon_irq_cpu_offline_ciu, 1482 + #endif 1483 + }; 1484 + 1485 + static struct irq_chip octeon_irq_chip_ciu2_edge = { 1486 .name = "CIU2-E", 1487 .irq_enable = octeon_irq_ciu2_enable, 1488 .irq_disable = octeon_irq_ciu2_disable_all, ··· 1582 1583 if (octeon_irq_ciu2_is_edge(line, bit)) 1584 octeon_irq_set_ciu_mapping(virq, line, bit, 0, 1585 + &octeon_irq_chip_ciu2_edge, 1586 handle_edge_irq); 1587 else 1588 octeon_irq_set_ciu_mapping(virq, line, bit, 0, ··· 1591 1592 return 0; 1593 } 1594 1595 static struct irq_domain_ops octeon_irq_domain_ciu2_ops = { 1596 .map = octeon_irq_ciu2_map, 1597 + .unmap = octeon_irq_free_cd, 1598 .xlate = octeon_irq_ciu2_xlat, 1599 }; 1600 1601 static void octeon_irq_ciu2(void) ··· 1674 return; 1675 } 1676 1677 + static int __init octeon_irq_init_ciu2( 1678 + struct device_node *ciu_node, struct device_node *parent) 1679 { 1680 + unsigned int i, r; 1681 struct irq_domain *ciu_domain = NULL; 1682 1683 octeon_irq_init_ciu2_percpu(); 1684 octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu2; 1685 1686 + octeon_irq_gpio_chip = &octeon_irq_chip_ciu2_gpio; 1687 octeon_irq_ip2 = octeon_irq_ciu2; 1688 octeon_irq_ip3 = octeon_irq_ciu2_mbox; 1689 octeon_irq_ip4 = octeon_irq_ip4_mask; ··· 1691 /* Mips internal */ 1692 octeon_irq_init_core(); 1693 1694 + ciu_domain = irq_domain_add_tree( 1695 + ciu_node, &octeon_irq_domain_ciu2_ops, NULL); 1696 + irq_set_default_host(ciu_domain); 1697 1698 /* CUI2 */ 1699 + for (i = 0; i < 64; i++) { 1700 + r = octeon_irq_force_ciu_mapping( 1701 + ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i); 1702 + if (r) 1703 + goto err; 1704 + } 1705 1706 + for (i = 0; i < 32; i++) { 1707 + r = octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i, 0, 1708 + &octeon_irq_chip_ciu2_wd, handle_level_irq); 1709 + if (r) 1710 + goto err; 1711 + } 1712 1713 + for (i = 0; i < 4; i++) { 1714 + r = octeon_irq_force_ciu_mapping( 1715 + ciu_domain, i + OCTEON_IRQ_TIMER0, 3, i + 8); 1716 + if (r) 1717 + goto err; 1718 + } 1719 1720 + r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 3, 44); 1721 + if (r) 1722 + goto err; 1723 1724 + for (i = 0; i < 4; i++) { 1725 + r = octeon_irq_force_ciu_mapping( 1726 + ciu_domain, i + OCTEON_IRQ_PCI_INT0, 4, i); 1727 + if (r) 1728 + goto err; 1729 + } 1730 1731 + for (i = 0; i < 4; i++) { 1732 + r = octeon_irq_force_ciu_mapping( 1733 + ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 4, i + 8); 1734 + if (r) 1735 + goto err; 1736 + } 1737 1738 irq_set_chip_and_handler(OCTEON_IRQ_MBOX0, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq); 1739 irq_set_chip_and_handler(OCTEON_IRQ_MBOX1, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq); ··· 1741 /* Enable the CIU lines */ 1742 set_c0_status(STATUSF_IP3 | STATUSF_IP2); 1743 clear_c0_status(STATUSF_IP4); 1744 + return 0; 1745 + err: 1746 + return r; 1747 } 1748 + 1749 + struct octeon_irq_cib_host_data { 1750 + raw_spinlock_t lock; 1751 + u64 raw_reg; 1752 + u64 en_reg; 1753 + int max_bits; 1754 + }; 1755 + 1756 + struct octeon_irq_cib_chip_data { 1757 + struct octeon_irq_cib_host_data *host_data; 1758 + int bit; 1759 + }; 1760 + 1761 + static void octeon_irq_cib_enable(struct irq_data *data) 1762 + { 1763 + unsigned long flags; 1764 + u64 en; 1765 + struct octeon_irq_cib_chip_data *cd = irq_data_get_irq_chip_data(data); 1766 + struct octeon_irq_cib_host_data *host_data = cd->host_data; 1767 + 1768 + raw_spin_lock_irqsave(&host_data->lock, flags); 1769 + en = cvmx_read_csr(host_data->en_reg); 1770 + en |= 1ull << cd->bit; 1771 + cvmx_write_csr(host_data->en_reg, en); 1772 + raw_spin_unlock_irqrestore(&host_data->lock, flags); 1773 + } 1774 + 1775 + static void octeon_irq_cib_disable(struct irq_data *data) 1776 + { 1777 + unsigned long flags; 1778 + u64 en; 1779 + struct octeon_irq_cib_chip_data *cd = irq_data_get_irq_chip_data(data); 1780 + struct octeon_irq_cib_host_data *host_data = cd->host_data; 1781 + 1782 + raw_spin_lock_irqsave(&host_data->lock, flags); 1783 + en = cvmx_read_csr(host_data->en_reg); 1784 + en &= ~(1ull << cd->bit); 1785 + cvmx_write_csr(host_data->en_reg, en); 1786 + raw_spin_unlock_irqrestore(&host_data->lock, flags); 1787 + } 1788 + 1789 + static int octeon_irq_cib_set_type(struct irq_data *data, unsigned int t) 1790 + { 1791 + irqd_set_trigger_type(data, t); 1792 + return IRQ_SET_MASK_OK; 1793 + } 1794 + 1795 + static struct irq_chip octeon_irq_chip_cib = { 1796 + .name = "CIB", 1797 + .irq_enable = octeon_irq_cib_enable, 1798 + .irq_disable = octeon_irq_cib_disable, 1799 + .irq_mask = octeon_irq_cib_disable, 1800 + .irq_unmask = octeon_irq_cib_enable, 1801 + .irq_set_type = octeon_irq_cib_set_type, 1802 + }; 1803 + 1804 + static int octeon_irq_cib_xlat(struct irq_domain *d, 1805 + struct device_node *node, 1806 + const u32 *intspec, 1807 + unsigned int intsize, 1808 + unsigned long *out_hwirq, 1809 + unsigned int *out_type) 1810 + { 1811 + unsigned int type = 0; 1812 + 1813 + if (intsize == 2) 1814 + type = intspec[1]; 1815 + 1816 + switch (type) { 1817 + case 0: /* unofficial value, but we might as well let it work. */ 1818 + case 4: /* official value for level triggering. */ 1819 + *out_type = IRQ_TYPE_LEVEL_HIGH; 1820 + break; 1821 + case 1: /* official value for edge triggering. */ 1822 + *out_type = IRQ_TYPE_EDGE_RISING; 1823 + break; 1824 + default: /* Nothing else is acceptable. */ 1825 + return -EINVAL; 1826 + } 1827 + 1828 + *out_hwirq = intspec[0]; 1829 + 1830 + return 0; 1831 + } 1832 + 1833 + static int octeon_irq_cib_map(struct irq_domain *d, 1834 + unsigned int virq, irq_hw_number_t hw) 1835 + { 1836 + struct octeon_irq_cib_host_data *host_data = d->host_data; 1837 + struct octeon_irq_cib_chip_data *cd; 1838 + 1839 + if (hw >= host_data->max_bits) { 1840 + pr_err("ERROR: %s mapping %u is to big!\n", 1841 + d->of_node->name, (unsigned)hw); 1842 + return -EINVAL; 1843 + } 1844 + 1845 + cd = kzalloc(sizeof(*cd), GFP_KERNEL); 1846 + cd->host_data = host_data; 1847 + cd->bit = hw; 1848 + 1849 + irq_set_chip_and_handler(virq, &octeon_irq_chip_cib, 1850 + handle_simple_irq); 1851 + irq_set_chip_data(virq, cd); 1852 + return 0; 1853 + } 1854 + 1855 + static struct irq_domain_ops octeon_irq_domain_cib_ops = { 1856 + .map = octeon_irq_cib_map, 1857 + .unmap = octeon_irq_free_cd, 1858 + .xlate = octeon_irq_cib_xlat, 1859 + }; 1860 + 1861 + /* Chain to real handler. */ 1862 + static irqreturn_t octeon_irq_cib_handler(int my_irq, void *data) 1863 + { 1864 + u64 en; 1865 + u64 raw; 1866 + u64 bits; 1867 + int i; 1868 + int irq; 1869 + struct irq_domain *cib_domain = data; 1870 + struct octeon_irq_cib_host_data *host_data = cib_domain->host_data; 1871 + 1872 + en = cvmx_read_csr(host_data->en_reg); 1873 + raw = cvmx_read_csr(host_data->raw_reg); 1874 + 1875 + bits = en & raw; 1876 + 1877 + for (i = 0; i < host_data->max_bits; i++) { 1878 + if ((bits & 1ull << i) == 0) 1879 + continue; 1880 + irq = irq_find_mapping(cib_domain, i); 1881 + if (!irq) { 1882 + unsigned long flags; 1883 + 1884 + pr_err("ERROR: CIB bit %d@%llx IRQ unhandled, disabling\n", 1885 + i, host_data->raw_reg); 1886 + raw_spin_lock_irqsave(&host_data->lock, flags); 1887 + en = cvmx_read_csr(host_data->en_reg); 1888 + en &= ~(1ull << i); 1889 + cvmx_write_csr(host_data->en_reg, en); 1890 + cvmx_write_csr(host_data->raw_reg, 1ull << i); 1891 + raw_spin_unlock_irqrestore(&host_data->lock, flags); 1892 + } else { 1893 + struct irq_desc *desc = irq_to_desc(irq); 1894 + struct irq_data *irq_data = irq_desc_get_irq_data(desc); 1895 + /* If edge, acknowledge the bit we will be sending. */ 1896 + if (irqd_get_trigger_type(irq_data) & 1897 + IRQ_TYPE_EDGE_BOTH) 1898 + cvmx_write_csr(host_data->raw_reg, 1ull << i); 1899 + generic_handle_irq_desc(irq, desc); 1900 + } 1901 + } 1902 + 1903 + return IRQ_HANDLED; 1904 + } 1905 + 1906 + static int __init octeon_irq_init_cib(struct device_node *ciu_node, 1907 + struct device_node *parent) 1908 + { 1909 + const __be32 *addr; 1910 + u32 val; 1911 + struct octeon_irq_cib_host_data *host_data; 1912 + int parent_irq; 1913 + int r; 1914 + struct irq_domain *cib_domain; 1915 + 1916 + parent_irq = irq_of_parse_and_map(ciu_node, 0); 1917 + if (!parent_irq) { 1918 + pr_err("ERROR: Couldn't acquire parent_irq for %s\n.", 1919 + ciu_node->name); 1920 + return -EINVAL; 1921 + } 1922 + 1923 + host_data = kzalloc(sizeof(*host_data), GFP_KERNEL); 1924 + raw_spin_lock_init(&host_data->lock); 1925 + 1926 + addr = of_get_address(ciu_node, 0, NULL, NULL); 1927 + if (!addr) { 1928 + pr_err("ERROR: Couldn't acquire reg(0) %s\n.", ciu_node->name); 1929 + return -EINVAL; 1930 + } 1931 + host_data->raw_reg = (u64)phys_to_virt( 1932 + of_translate_address(ciu_node, addr)); 1933 + 1934 + addr = of_get_address(ciu_node, 1, NULL, NULL); 1935 + if (!addr) { 1936 + pr_err("ERROR: Couldn't acquire reg(1) %s\n.", ciu_node->name); 1937 + return -EINVAL; 1938 + } 1939 + host_data->en_reg = (u64)phys_to_virt( 1940 + of_translate_address(ciu_node, addr)); 1941 + 1942 + r = of_property_read_u32(ciu_node, "cavium,max-bits", &val); 1943 + if (r) { 1944 + pr_err("ERROR: Couldn't read cavium,max-bits from %s\n.", 1945 + ciu_node->name); 1946 + return r; 1947 + } 1948 + host_data->max_bits = val; 1949 + 1950 + cib_domain = irq_domain_add_linear(ciu_node, host_data->max_bits, 1951 + &octeon_irq_domain_cib_ops, 1952 + host_data); 1953 + if (!cib_domain) { 1954 + pr_err("ERROR: Couldn't irq_domain_add_linear()\n."); 1955 + return -ENOMEM; 1956 + } 1957 + 1958 + cvmx_write_csr(host_data->en_reg, 0); /* disable all IRQs */ 1959 + cvmx_write_csr(host_data->raw_reg, ~0); /* ack any outstanding */ 1960 + 1961 + r = request_irq(parent_irq, octeon_irq_cib_handler, 1962 + IRQF_NO_THREAD, "cib", cib_domain); 1963 + if (r) { 1964 + pr_err("request_irq cib failed %d\n", r); 1965 + return r; 1966 + } 1967 + pr_info("CIB interrupt controller probed: %llx %d\n", 1968 + host_data->raw_reg, host_data->max_bits); 1969 + return 0; 1970 + } 1971 + 1972 + static struct of_device_id ciu_types[] __initdata = { 1973 + {.compatible = "cavium,octeon-3860-ciu", .data = octeon_irq_init_ciu}, 1974 + {.compatible = "cavium,octeon-3860-gpio", .data = octeon_irq_init_gpio}, 1975 + {.compatible = "cavium,octeon-6880-ciu2", .data = octeon_irq_init_ciu2}, 1976 + {.compatible = "cavium,octeon-7130-cib", .data = octeon_irq_init_cib}, 1977 + {} 1978 + }; 1979 1980 void __init arch_init_irq(void) 1981 { ··· 1750 cpumask_clear(irq_default_affinity); 1751 cpumask_set_cpu(smp_processor_id(), irq_default_affinity); 1752 #endif 1753 + of_irq_init(ciu_types); 1754 } 1755 1756 asmlinkage void plat_irq_dispatch(void) ··· 1767 cop0_cause &= cop0_status; 1768 cop0_cause &= ST0_IM; 1769 1770 + if (cop0_cause & STATUSF_IP2) 1771 octeon_irq_ip2(); 1772 + else if (cop0_cause & STATUSF_IP3) 1773 octeon_irq_ip3(); 1774 + else if (cop0_cause & STATUSF_IP4) 1775 octeon_irq_ip4(); 1776 + else if (cop0_cause) 1777 do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE); 1778 else 1779 break;
+49 -7
arch/mips/cavium-octeon/setup.c
··· 41 #include <asm/octeon/octeon.h> 42 #include <asm/octeon/pci-octeon.h> 43 #include <asm/octeon/cvmx-mio-defs.h> 44 45 extern struct plat_smp_ops octeon_smp_ops; 46 ··· 580 /* R/W If set, CVMSEG is available for loads/stores in user 581 * mode. */ 582 cvmmemctl.s.cvmsegenau = 0; 583 - /* R/W Size of local memory in cache blocks, 54 (6912 bytes) 584 - * is max legal value. */ 585 - cvmmemctl.s.lmemsz = CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE; 586 587 write_c0_cvmmemctl(cvmmemctl.u64); 588 589 if (smp_processor_id() == 0) 590 pr_notice("CVMSEG size: %d cache lines (%d bytes)\n", 591 CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE, ··· 614 const char *arg; 615 char *p; 616 int i; 617 int argc; 618 #ifdef CONFIG_CAVIUM_RESERVE32 619 int64_t addr = -1; ··· 654 sysinfo->dfa_ref_clock_hz = octeon_bootinfo->dfa_ref_clock_hz; 655 sysinfo->bootloader_config_flags = octeon_bootinfo->config_flags; 656 657 - if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { 658 /* I/O clock runs at a different rate than the CPU. */ 659 union cvmx_mio_rst_boot rst_boot; 660 rst_boot.u64 = cvmx_read_csr(CVMX_MIO_RST_BOOT); 661 octeon_io_clock_rate = 50000000 * rst_boot.s.pnr_mul; 662 } else { 663 octeon_io_clock_rate = sysinfo->cpu_clock_hz; 664 } 665 666 /* ··· 1045 1046 void prom_free_prom_memory(void) 1047 { 1048 - if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X)) { 1049 /* Check for presence of Core-14449 fix. */ 1050 u32 insn; 1051 u32 *foo; ··· 1067 panic("No PREF instruction at Core-14449 probe point."); 1068 1069 if (((insn >> 16) & 0x1f) != 28) 1070 - panic("Core-14449 WAR not in place (%04x).\n" 1071 - "Please build kernel with proper options (CONFIG_CAVIUM_CN63XXP1).", insn); 1072 } 1073 } 1074
··· 41 #include <asm/octeon/octeon.h> 42 #include <asm/octeon/pci-octeon.h> 43 #include <asm/octeon/cvmx-mio-defs.h> 44 + #include <asm/octeon/cvmx-rst-defs.h> 45 46 extern struct plat_smp_ops octeon_smp_ops; 47 ··· 579 /* R/W If set, CVMSEG is available for loads/stores in user 580 * mode. */ 581 cvmmemctl.s.cvmsegenau = 0; 582 583 write_c0_cvmmemctl(cvmmemctl.u64); 584 585 + /* Setup of CVMSEG is done in kernel-entry-init.h */ 586 if (smp_processor_id() == 0) 587 pr_notice("CVMSEG size: %d cache lines (%d bytes)\n", 588 CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE, ··· 615 const char *arg; 616 char *p; 617 int i; 618 + u64 t; 619 int argc; 620 #ifdef CONFIG_CAVIUM_RESERVE32 621 int64_t addr = -1; ··· 654 sysinfo->dfa_ref_clock_hz = octeon_bootinfo->dfa_ref_clock_hz; 655 sysinfo->bootloader_config_flags = octeon_bootinfo->config_flags; 656 657 + if (OCTEON_IS_OCTEON2()) { 658 /* I/O clock runs at a different rate than the CPU. */ 659 union cvmx_mio_rst_boot rst_boot; 660 rst_boot.u64 = cvmx_read_csr(CVMX_MIO_RST_BOOT); 661 octeon_io_clock_rate = 50000000 * rst_boot.s.pnr_mul; 662 + } else if (OCTEON_IS_OCTEON3()) { 663 + /* I/O clock runs at a different rate than the CPU. */ 664 + union cvmx_rst_boot rst_boot; 665 + rst_boot.u64 = cvmx_read_csr(CVMX_RST_BOOT); 666 + octeon_io_clock_rate = 50000000 * rst_boot.s.pnr_mul; 667 } else { 668 octeon_io_clock_rate = sysinfo->cpu_clock_hz; 669 + } 670 + 671 + t = read_c0_cvmctl(); 672 + if ((t & (1ull << 27)) == 0) { 673 + /* 674 + * Setup the multiplier save/restore code if 675 + * CvmCtl[NOMUL] clear. 676 + */ 677 + void *save; 678 + void *save_end; 679 + void *restore; 680 + void *restore_end; 681 + int save_len; 682 + int restore_len; 683 + int save_max = (char *)octeon_mult_save_end - 684 + (char *)octeon_mult_save; 685 + int restore_max = (char *)octeon_mult_restore_end - 686 + (char *)octeon_mult_restore; 687 + if (current_cpu_data.cputype == CPU_CAVIUM_OCTEON3) { 688 + save = octeon_mult_save3; 689 + save_end = octeon_mult_save3_end; 690 + restore = octeon_mult_restore3; 691 + restore_end = octeon_mult_restore3_end; 692 + } else { 693 + save = octeon_mult_save2; 694 + save_end = octeon_mult_save2_end; 695 + restore = octeon_mult_restore2; 696 + restore_end = octeon_mult_restore2_end; 697 + } 698 + save_len = (char *)save_end - (char *)save; 699 + restore_len = (char *)restore_end - (char *)restore; 700 + if (!WARN_ON(save_len > save_max || 701 + restore_len > restore_max)) { 702 + memcpy(octeon_mult_save, save, save_len); 703 + memcpy(octeon_mult_restore, restore, restore_len); 704 + } 705 } 706 707 /* ··· 1004 1005 void prom_free_prom_memory(void) 1006 { 1007 + if (CAVIUM_OCTEON_DCACHE_PREFETCH_WAR) { 1008 /* Check for presence of Core-14449 fix. */ 1009 u32 insn; 1010 u32 *foo; ··· 1026 panic("No PREF instruction at Core-14449 probe point."); 1027 1028 if (((insn >> 16) & 0x1f) != 28) 1029 + panic("OCTEON II DCache prefetch workaround not in place (%04x).\n" 1030 + "Please build kernel with proper options (CONFIG_CAVIUM_CN63XXP1).", 1031 + insn); 1032 } 1033 } 1034
+193
arch/mips/configs/malta_qemu_32r6_defconfig
···
··· 1 + CONFIG_MIPS_MALTA=y 2 + CONFIG_CPU_LITTLE_ENDIAN=y 3 + CONFIG_CPU_MIPS32_R6=y 4 + CONFIG_PAGE_SIZE_16KB=y 5 + CONFIG_HZ_100=y 6 + CONFIG_SYSVIPC=y 7 + CONFIG_POSIX_MQUEUE=y 8 + CONFIG_AUDIT=y 9 + CONFIG_NO_HZ=y 10 + CONFIG_IKCONFIG=y 11 + CONFIG_IKCONFIG_PROC=y 12 + CONFIG_LOG_BUF_SHIFT=15 13 + CONFIG_SYSCTL_SYSCALL=y 14 + CONFIG_EMBEDDED=y 15 + CONFIG_SLAB=y 16 + CONFIG_MODULES=y 17 + CONFIG_MODULE_UNLOAD=y 18 + CONFIG_MODVERSIONS=y 19 + CONFIG_MODULE_SRCVERSION_ALL=y 20 + # CONFIG_BLK_DEV_BSG is not set 21 + CONFIG_PCI=y 22 + # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 23 + CONFIG_NET=y 24 + CONFIG_PACKET=y 25 + CONFIG_UNIX=y 26 + CONFIG_XFRM_USER=m 27 + CONFIG_NET_KEY=y 28 + CONFIG_INET=y 29 + CONFIG_IP_MULTICAST=y 30 + CONFIG_IP_ADVANCED_ROUTER=y 31 + CONFIG_IP_MULTIPLE_TABLES=y 32 + CONFIG_IP_ROUTE_MULTIPATH=y 33 + CONFIG_IP_ROUTE_VERBOSE=y 34 + CONFIG_IP_PNP=y 35 + CONFIG_IP_PNP_DHCP=y 36 + CONFIG_IP_PNP_BOOTP=y 37 + CONFIG_NET_IPIP=m 38 + CONFIG_IP_MROUTE=y 39 + CONFIG_IP_PIMSM_V1=y 40 + CONFIG_IP_PIMSM_V2=y 41 + CONFIG_SYN_COOKIES=y 42 + CONFIG_INET_AH=m 43 + CONFIG_INET_ESP=m 44 + CONFIG_INET_IPCOMP=m 45 + # CONFIG_INET_LRO is not set 46 + CONFIG_INET6_AH=m 47 + CONFIG_INET6_ESP=m 48 + CONFIG_INET6_IPCOMP=m 49 + CONFIG_IPV6_TUNNEL=m 50 + CONFIG_BRIDGE=m 51 + CONFIG_VLAN_8021Q=m 52 + CONFIG_ATALK=m 53 + CONFIG_DEV_APPLETALK=m 54 + CONFIG_IPDDP=m 55 + CONFIG_IPDDP_ENCAP=y 56 + CONFIG_NET_SCHED=y 57 + CONFIG_NET_SCH_CBQ=m 58 + CONFIG_NET_SCH_HTB=m 59 + CONFIG_NET_SCH_HFSC=m 60 + CONFIG_NET_SCH_PRIO=m 61 + CONFIG_NET_SCH_RED=m 62 + CONFIG_NET_SCH_SFQ=m 63 + CONFIG_NET_SCH_TEQL=m 64 + CONFIG_NET_SCH_TBF=m 65 + CONFIG_NET_SCH_GRED=m 66 + CONFIG_NET_SCH_DSMARK=m 67 + CONFIG_NET_SCH_NETEM=m 68 + CONFIG_NET_SCH_INGRESS=m 69 + CONFIG_NET_CLS_BASIC=m 70 + CONFIG_NET_CLS_TCINDEX=m 71 + CONFIG_NET_CLS_ROUTE4=m 72 + CONFIG_NET_CLS_FW=m 73 + CONFIG_NET_CLS_U32=m 74 + CONFIG_NET_CLS_RSVP=m 75 + CONFIG_NET_CLS_RSVP6=m 76 + CONFIG_NET_CLS_ACT=y 77 + CONFIG_NET_ACT_POLICE=y 78 + CONFIG_NET_CLS_IND=y 79 + # CONFIG_WIRELESS is not set 80 + CONFIG_DEVTMPFS=y 81 + CONFIG_BLK_DEV_LOOP=y 82 + CONFIG_BLK_DEV_CRYPTOLOOP=m 83 + CONFIG_IDE=y 84 + # CONFIG_IDE_PROC_FS is not set 85 + # CONFIG_IDEPCI_PCIBUS_ORDER is not set 86 + CONFIG_BLK_DEV_GENERIC=y 87 + CONFIG_BLK_DEV_PIIX=y 88 + CONFIG_SCSI=y 89 + CONFIG_BLK_DEV_SD=y 90 + CONFIG_CHR_DEV_SG=y 91 + # CONFIG_SCSI_LOWLEVEL is not set 92 + CONFIG_NETDEVICES=y 93 + # CONFIG_NET_VENDOR_3COM is not set 94 + # CONFIG_NET_VENDOR_ADAPTEC is not set 95 + # CONFIG_NET_VENDOR_ALTEON is not set 96 + CONFIG_PCNET32=y 97 + # CONFIG_NET_VENDOR_ATHEROS is not set 98 + # CONFIG_NET_VENDOR_BROADCOM is not set 99 + # CONFIG_NET_VENDOR_BROCADE is not set 100 + # CONFIG_NET_VENDOR_CHELSIO is not set 101 + # CONFIG_NET_VENDOR_CISCO is not set 102 + # CONFIG_NET_VENDOR_DEC is not set 103 + # CONFIG_NET_VENDOR_DLINK is not set 104 + # CONFIG_NET_VENDOR_EMULEX is not set 105 + # CONFIG_NET_VENDOR_EXAR is not set 106 + # CONFIG_NET_VENDOR_HP is not set 107 + # CONFIG_NET_VENDOR_INTEL is not set 108 + # CONFIG_NET_VENDOR_MARVELL is not set 109 + # CONFIG_NET_VENDOR_MELLANOX is not set 110 + # CONFIG_NET_VENDOR_MICREL is not set 111 + # CONFIG_NET_VENDOR_MYRI is not set 112 + # CONFIG_NET_VENDOR_NATSEMI is not set 113 + # CONFIG_NET_VENDOR_NVIDIA is not set 114 + # CONFIG_NET_VENDOR_OKI is not set 115 + # CONFIG_NET_PACKET_ENGINE is not set 116 + # CONFIG_NET_VENDOR_QLOGIC is not set 117 + # CONFIG_NET_VENDOR_REALTEK is not set 118 + # CONFIG_NET_VENDOR_RDC is not set 119 + # CONFIG_NET_VENDOR_SEEQ is not set 120 + # CONFIG_NET_VENDOR_SILAN is not set 121 + # CONFIG_NET_VENDOR_SIS is not set 122 + # CONFIG_NET_VENDOR_SMSC is not set 123 + # CONFIG_NET_VENDOR_STMICRO is not set 124 + # CONFIG_NET_VENDOR_SUN is not set 125 + # CONFIG_NET_VENDOR_TEHUTI is not set 126 + # CONFIG_NET_VENDOR_TI is not set 127 + # CONFIG_NET_VENDOR_TOSHIBA is not set 128 + # CONFIG_NET_VENDOR_VIA is not set 129 + # CONFIG_NET_VENDOR_WIZNET is not set 130 + # CONFIG_WLAN is not set 131 + # CONFIG_VT is not set 132 + CONFIG_LEGACY_PTY_COUNT=4 133 + CONFIG_SERIAL_8250=y 134 + CONFIG_SERIAL_8250_CONSOLE=y 135 + CONFIG_HW_RANDOM=y 136 + # CONFIG_HWMON is not set 137 + CONFIG_FB=y 138 + CONFIG_FIRMWARE_EDID=y 139 + CONFIG_FB_MATROX=y 140 + CONFIG_FB_MATROX_G=y 141 + CONFIG_USB=y 142 + CONFIG_USB_EHCI_HCD=y 143 + # CONFIG_USB_EHCI_TT_NEWSCHED is not set 144 + CONFIG_USB_UHCI_HCD=y 145 + CONFIG_USB_STORAGE=y 146 + CONFIG_NEW_LEDS=y 147 + CONFIG_LEDS_CLASS=y 148 + CONFIG_LEDS_TRIGGERS=y 149 + CONFIG_LEDS_TRIGGER_TIMER=y 150 + CONFIG_LEDS_TRIGGER_IDE_DISK=y 151 + CONFIG_LEDS_TRIGGER_HEARTBEAT=y 152 + CONFIG_LEDS_TRIGGER_BACKLIGHT=y 153 + CONFIG_LEDS_TRIGGER_DEFAULT_ON=y 154 + CONFIG_RTC_CLASS=y 155 + CONFIG_RTC_DRV_CMOS=y 156 + CONFIG_EXT2_FS=y 157 + CONFIG_EXT3_FS=y 158 + # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set 159 + CONFIG_XFS_FS=y 160 + CONFIG_XFS_QUOTA=y 161 + CONFIG_XFS_POSIX_ACL=y 162 + CONFIG_QUOTA=y 163 + CONFIG_QFMT_V2=y 164 + CONFIG_MSDOS_FS=m 165 + CONFIG_VFAT_FS=m 166 + CONFIG_PROC_KCORE=y 167 + CONFIG_TMPFS=y 168 + CONFIG_NFS_FS=y 169 + CONFIG_ROOT_NFS=y 170 + CONFIG_CIFS=m 171 + CONFIG_CIFS_WEAK_PW_HASH=y 172 + CONFIG_CIFS_XATTR=y 173 + CONFIG_CIFS_POSIX=y 174 + CONFIG_NLS_CODEPAGE_437=m 175 + CONFIG_NLS_ISO8859_1=m 176 + # CONFIG_FTRACE is not set 177 + CONFIG_CRYPTO_NULL=m 178 + CONFIG_CRYPTO_PCBC=m 179 + CONFIG_CRYPTO_HMAC=y 180 + CONFIG_CRYPTO_MICHAEL_MIC=m 181 + CONFIG_CRYPTO_SHA512=m 182 + CONFIG_CRYPTO_TGR192=m 183 + CONFIG_CRYPTO_WP512=m 184 + CONFIG_CRYPTO_ANUBIS=m 185 + CONFIG_CRYPTO_BLOWFISH=m 186 + CONFIG_CRYPTO_CAST5=m 187 + CONFIG_CRYPTO_CAST6=m 188 + CONFIG_CRYPTO_KHAZAD=m 189 + CONFIG_CRYPTO_SERPENT=m 190 + CONFIG_CRYPTO_TEA=m 191 + CONFIG_CRYPTO_TWOFISH=m 192 + # CONFIG_CRYPTO_ANSI_CPRNG is not set 193 + # CONFIG_CRYPTO_HW is not set
+16 -10
arch/mips/fw/arc/misc.c
··· 9 * Copyright (C) 1999 Ralf Baechle (ralf@gnu.org) 10 * Copyright (C) 1999 Silicon Graphics, Inc. 11 */ 12 #include <linux/init.h> 13 #include <linux/kernel.h> 14 #include <linux/irqflags.h> ··· 20 #include <asm/sgialib.h> 21 #include <asm/bootinfo.h> 22 23 - VOID 24 ArcHalt(VOID) 25 { 26 bc_disable(); 27 local_irq_disable(); 28 ARC_CALL0(halt); 29 - never: goto never; 30 } 31 32 - VOID 33 ArcPowerDown(VOID) 34 { 35 bc_disable(); 36 local_irq_disable(); 37 ARC_CALL0(pdown); 38 - never: goto never; 39 } 40 41 /* XXX is this a soft reset basically? XXX */ 42 - VOID 43 ArcRestart(VOID) 44 { 45 bc_disable(); 46 local_irq_disable(); 47 ARC_CALL0(restart); 48 - never: goto never; 49 } 50 51 - VOID 52 ArcReboot(VOID) 53 { 54 bc_disable(); 55 local_irq_disable(); 56 ARC_CALL0(reboot); 57 - never: goto never; 58 } 59 60 - VOID 61 ArcEnterInteractiveMode(VOID) 62 { 63 bc_disable(); 64 local_irq_disable(); 65 ARC_CALL0(imode); 66 - never: goto never; 67 } 68 69 LONG
··· 9 * Copyright (C) 1999 Ralf Baechle (ralf@gnu.org) 10 * Copyright (C) 1999 Silicon Graphics, Inc. 11 */ 12 + #include <linux/compiler.h> 13 #include <linux/init.h> 14 #include <linux/kernel.h> 15 #include <linux/irqflags.h> ··· 19 #include <asm/sgialib.h> 20 #include <asm/bootinfo.h> 21 22 + VOID __noreturn 23 ArcHalt(VOID) 24 { 25 bc_disable(); 26 local_irq_disable(); 27 ARC_CALL0(halt); 28 + 29 + unreachable(); 30 } 31 32 + VOID __noreturn 33 ArcPowerDown(VOID) 34 { 35 bc_disable(); 36 local_irq_disable(); 37 ARC_CALL0(pdown); 38 + 39 + unreachable(); 40 } 41 42 /* XXX is this a soft reset basically? XXX */ 43 + VOID __noreturn 44 ArcRestart(VOID) 45 { 46 bc_disable(); 47 local_irq_disable(); 48 ARC_CALL0(restart); 49 + 50 + unreachable(); 51 } 52 53 + VOID __noreturn 54 ArcReboot(VOID) 55 { 56 bc_disable(); 57 local_irq_disable(); 58 ARC_CALL0(reboot); 59 + 60 + unreachable(); 61 } 62 63 + VOID __noreturn 64 ArcEnterInteractiveMode(VOID) 65 { 66 bc_disable(); 67 local_irq_disable(); 68 ARC_CALL0(imode); 69 + 70 + unreachable(); 71 } 72 73 LONG
+1
arch/mips/include/asm/Kbuild
··· 1 # MIPS headers 2 generic-y += cputime.h 3 generic-y += current.h 4 generic-y += dma-contiguous.h
··· 1 # MIPS headers 2 + generic-(CONFIG_GENERIC_CSUM) += checksum.h 3 generic-y += cputime.h 4 generic-y += current.h 5 generic-y += dma-contiguous.h
+10 -8
arch/mips/include/asm/asmmacro.h
··· 19 #include <asm/asmmacro-64.h> 20 #endif 21 22 - #ifdef CONFIG_CPU_MIPSR2 23 .macro local_irq_enable reg=t0 24 ei 25 irq_enable_hazard ··· 104 .endm 105 106 .macro fpu_save_double thread status tmp 107 - #if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) 108 sll \tmp, \status, 5 109 bgez \tmp, 10f 110 fpu_save_16odd \thread ··· 161 .endm 162 163 .macro fpu_restore_double thread status tmp 164 - #if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) 165 sll \tmp, \status, 5 166 bgez \tmp, 10f # 16 register mode? 167 ··· 172 fpu_restore_16even \thread \tmp 173 .endm 174 175 - #ifdef CONFIG_CPU_MIPSR2 176 .macro _EXT rd, rs, p, s 177 ext \rd, \rs, \p, \s 178 .endm 179 - #else /* !CONFIG_CPU_MIPSR2 */ 180 .macro _EXT rd, rs, p, s 181 srl \rd, \rs, \p 182 andi \rd, \rd, (1 << \s) - 1 183 .endm 184 - #endif /* !CONFIG_CPU_MIPSR2 */ 185 186 /* 187 * Temporary until all gas have MT ASE support ··· 306 .set push 307 .set noat 308 SET_HARDFLOAT 309 - add $1, \base, \off 310 .word LDD_MSA_INSN | (\wd << 6) 311 .set pop 312 .endm ··· 315 .set push 316 .set noat 317 SET_HARDFLOAT 318 - add $1, \base, \off 319 .word STD_MSA_INSN | (\wd << 6) 320 .set pop 321 .endm
··· 19 #include <asm/asmmacro-64.h> 20 #endif 21 22 + #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) 23 .macro local_irq_enable reg=t0 24 ei 25 irq_enable_hazard ··· 104 .endm 105 106 .macro fpu_save_double thread status tmp 107 + #if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \ 108 + defined(CONFIG_CPU_MIPS32_R6) 109 sll \tmp, \status, 5 110 bgez \tmp, 10f 111 fpu_save_16odd \thread ··· 160 .endm 161 162 .macro fpu_restore_double thread status tmp 163 + #if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \ 164 + defined(CONFIG_CPU_MIPS32_R6) 165 sll \tmp, \status, 5 166 bgez \tmp, 10f # 16 register mode? 167 ··· 170 fpu_restore_16even \thread \tmp 171 .endm 172 173 + #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) 174 .macro _EXT rd, rs, p, s 175 ext \rd, \rs, \p, \s 176 .endm 177 + #else /* !CONFIG_CPU_MIPSR2 || !CONFIG_CPU_MIPSR6 */ 178 .macro _EXT rd, rs, p, s 179 srl \rd, \rs, \p 180 andi \rd, \rd, (1 << \s) - 1 181 .endm 182 + #endif /* !CONFIG_CPU_MIPSR2 || !CONFIG_CPU_MIPSR6 */ 183 184 /* 185 * Temporary until all gas have MT ASE support ··· 304 .set push 305 .set noat 306 SET_HARDFLOAT 307 + addu $1, \base, \off 308 .word LDD_MSA_INSN | (\wd << 6) 309 .set pop 310 .endm ··· 313 .set push 314 .set noat 315 SET_HARDFLOAT 316 + addu $1, \base, \off 317 .word STD_MSA_INSN | (\wd << 6) 318 .set pop 319 .endm
+21 -21
arch/mips/include/asm/atomic.h
··· 54 " sc %0, %1 \n" \ 55 " beqzl %0, 1b \n" \ 56 " .set mips0 \n" \ 57 - : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \ 58 : "Ir" (i)); \ 59 } else if (kernel_uses_llsc) { \ 60 int temp; \ 61 \ 62 do { \ 63 __asm__ __volatile__( \ 64 - " .set arch=r4000 \n" \ 65 " ll %0, %1 # atomic_" #op "\n" \ 66 " " #asm_op " %0, %2 \n" \ 67 " sc %0, %1 \n" \ 68 " .set mips0 \n" \ 69 - : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \ 70 : "Ir" (i)); \ 71 } while (unlikely(!temp)); \ 72 } else { \ ··· 97 " " #asm_op " %0, %1, %3 \n" \ 98 " .set mips0 \n" \ 99 : "=&r" (result), "=&r" (temp), \ 100 - "+" GCC_OFF12_ASM() (v->counter) \ 101 : "Ir" (i)); \ 102 } else if (kernel_uses_llsc) { \ 103 int temp; \ 104 \ 105 do { \ 106 __asm__ __volatile__( \ 107 - " .set arch=r4000 \n" \ 108 " ll %1, %2 # atomic_" #op "_return \n" \ 109 " " #asm_op " %0, %1, %3 \n" \ 110 " sc %0, %2 \n" \ 111 " .set mips0 \n" \ 112 : "=&r" (result), "=&r" (temp), \ 113 - "+" GCC_OFF12_ASM() (v->counter) \ 114 : "Ir" (i)); \ 115 } while (unlikely(!result)); \ 116 \ ··· 171 "1: \n" 172 " .set mips0 \n" 173 : "=&r" (result), "=&r" (temp), 174 - "+" GCC_OFF12_ASM() (v->counter) 175 - : "Ir" (i), GCC_OFF12_ASM() (v->counter) 176 : "memory"); 177 } else if (kernel_uses_llsc) { 178 int temp; 179 180 __asm__ __volatile__( 181 - " .set arch=r4000 \n" 182 "1: ll %1, %2 # atomic_sub_if_positive\n" 183 " subu %0, %1, %3 \n" 184 " bltz %0, 1f \n" ··· 190 "1: \n" 191 " .set mips0 \n" 192 : "=&r" (result), "=&r" (temp), 193 - "+" GCC_OFF12_ASM() (v->counter) 194 : "Ir" (i)); 195 } else { 196 unsigned long flags; ··· 333 " scd %0, %1 \n" \ 334 " beqzl %0, 1b \n" \ 335 " .set mips0 \n" \ 336 - : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \ 337 : "Ir" (i)); \ 338 } else if (kernel_uses_llsc) { \ 339 long temp; \ 340 \ 341 do { \ 342 __asm__ __volatile__( \ 343 - " .set arch=r4000 \n" \ 344 " lld %0, %1 # atomic64_" #op "\n" \ 345 " " #asm_op " %0, %2 \n" \ 346 " scd %0, %1 \n" \ 347 " .set mips0 \n" \ 348 - : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \ 349 : "Ir" (i)); \ 350 } while (unlikely(!temp)); \ 351 } else { \ ··· 376 " " #asm_op " %0, %1, %3 \n" \ 377 " .set mips0 \n" \ 378 : "=&r" (result), "=&r" (temp), \ 379 - "+" GCC_OFF12_ASM() (v->counter) \ 380 : "Ir" (i)); \ 381 } else if (kernel_uses_llsc) { \ 382 long temp; \ 383 \ 384 do { \ 385 __asm__ __volatile__( \ 386 - " .set arch=r4000 \n" \ 387 " lld %1, %2 # atomic64_" #op "_return\n" \ 388 " " #asm_op " %0, %1, %3 \n" \ 389 " scd %0, %2 \n" \ 390 " .set mips0 \n" \ 391 : "=&r" (result), "=&r" (temp), \ 392 - "=" GCC_OFF12_ASM() (v->counter) \ 393 - : "Ir" (i), GCC_OFF12_ASM() (v->counter) \ 394 : "memory"); \ 395 } while (unlikely(!result)); \ 396 \ ··· 452 "1: \n" 453 " .set mips0 \n" 454 : "=&r" (result), "=&r" (temp), 455 - "=" GCC_OFF12_ASM() (v->counter) 456 - : "Ir" (i), GCC_OFF12_ASM() (v->counter) 457 : "memory"); 458 } else if (kernel_uses_llsc) { 459 long temp; 460 461 __asm__ __volatile__( 462 - " .set arch=r4000 \n" 463 "1: lld %1, %2 # atomic64_sub_if_positive\n" 464 " dsubu %0, %1, %3 \n" 465 " bltz %0, 1f \n" ··· 471 "1: \n" 472 " .set mips0 \n" 473 : "=&r" (result), "=&r" (temp), 474 - "+" GCC_OFF12_ASM() (v->counter) 475 : "Ir" (i)); 476 } else { 477 unsigned long flags;
··· 54 " sc %0, %1 \n" \ 55 " beqzl %0, 1b \n" \ 56 " .set mips0 \n" \ 57 + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \ 58 : "Ir" (i)); \ 59 } else if (kernel_uses_llsc) { \ 60 int temp; \ 61 \ 62 do { \ 63 __asm__ __volatile__( \ 64 + " .set "MIPS_ISA_LEVEL" \n" \ 65 " ll %0, %1 # atomic_" #op "\n" \ 66 " " #asm_op " %0, %2 \n" \ 67 " sc %0, %1 \n" \ 68 " .set mips0 \n" \ 69 + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \ 70 : "Ir" (i)); \ 71 } while (unlikely(!temp)); \ 72 } else { \ ··· 97 " " #asm_op " %0, %1, %3 \n" \ 98 " .set mips0 \n" \ 99 : "=&r" (result), "=&r" (temp), \ 100 + "+" GCC_OFF_SMALL_ASM() (v->counter) \ 101 : "Ir" (i)); \ 102 } else if (kernel_uses_llsc) { \ 103 int temp; \ 104 \ 105 do { \ 106 __asm__ __volatile__( \ 107 + " .set "MIPS_ISA_LEVEL" \n" \ 108 " ll %1, %2 # atomic_" #op "_return \n" \ 109 " " #asm_op " %0, %1, %3 \n" \ 110 " sc %0, %2 \n" \ 111 " .set mips0 \n" \ 112 : "=&r" (result), "=&r" (temp), \ 113 + "+" GCC_OFF_SMALL_ASM() (v->counter) \ 114 : "Ir" (i)); \ 115 } while (unlikely(!result)); \ 116 \ ··· 171 "1: \n" 172 " .set mips0 \n" 173 : "=&r" (result), "=&r" (temp), 174 + "+" GCC_OFF_SMALL_ASM() (v->counter) 175 + : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter) 176 : "memory"); 177 } else if (kernel_uses_llsc) { 178 int temp; 179 180 __asm__ __volatile__( 181 + " .set "MIPS_ISA_LEVEL" \n" 182 "1: ll %1, %2 # atomic_sub_if_positive\n" 183 " subu %0, %1, %3 \n" 184 " bltz %0, 1f \n" ··· 190 "1: \n" 191 " .set mips0 \n" 192 : "=&r" (result), "=&r" (temp), 193 + "+" GCC_OFF_SMALL_ASM() (v->counter) 194 : "Ir" (i)); 195 } else { 196 unsigned long flags; ··· 333 " scd %0, %1 \n" \ 334 " beqzl %0, 1b \n" \ 335 " .set mips0 \n" \ 336 + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \ 337 : "Ir" (i)); \ 338 } else if (kernel_uses_llsc) { \ 339 long temp; \ 340 \ 341 do { \ 342 __asm__ __volatile__( \ 343 + " .set "MIPS_ISA_LEVEL" \n" \ 344 " lld %0, %1 # atomic64_" #op "\n" \ 345 " " #asm_op " %0, %2 \n" \ 346 " scd %0, %1 \n" \ 347 " .set mips0 \n" \ 348 + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \ 349 : "Ir" (i)); \ 350 } while (unlikely(!temp)); \ 351 } else { \ ··· 376 " " #asm_op " %0, %1, %3 \n" \ 377 " .set mips0 \n" \ 378 : "=&r" (result), "=&r" (temp), \ 379 + "+" GCC_OFF_SMALL_ASM() (v->counter) \ 380 : "Ir" (i)); \ 381 } else if (kernel_uses_llsc) { \ 382 long temp; \ 383 \ 384 do { \ 385 __asm__ __volatile__( \ 386 + " .set "MIPS_ISA_LEVEL" \n" \ 387 " lld %1, %2 # atomic64_" #op "_return\n" \ 388 " " #asm_op " %0, %1, %3 \n" \ 389 " scd %0, %2 \n" \ 390 " .set mips0 \n" \ 391 : "=&r" (result), "=&r" (temp), \ 392 + "=" GCC_OFF_SMALL_ASM() (v->counter) \ 393 + : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter) \ 394 : "memory"); \ 395 } while (unlikely(!result)); \ 396 \ ··· 452 "1: \n" 453 " .set mips0 \n" 454 : "=&r" (result), "=&r" (temp), 455 + "=" GCC_OFF_SMALL_ASM() (v->counter) 456 + : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter) 457 : "memory"); 458 } else if (kernel_uses_llsc) { 459 long temp; 460 461 __asm__ __volatile__( 462 + " .set "MIPS_ISA_LEVEL" \n" 463 "1: lld %1, %2 # atomic64_sub_if_positive\n" 464 " dsubu %0, %1, %3 \n" 465 " bltz %0, 1f \n" ··· 471 "1: \n" 472 " .set mips0 \n" 473 : "=&r" (result), "=&r" (temp), 474 + "+" GCC_OFF_SMALL_ASM() (v->counter) 475 : "Ir" (i)); 476 } else { 477 unsigned long flags;
+32 -32
arch/mips/include/asm/bitops.h
··· 79 " " __SC "%0, %1 \n" 80 " beqzl %0, 1b \n" 81 " .set mips0 \n" 82 - : "=&r" (temp), "=" GCC_OFF12_ASM() (*m) 83 - : "ir" (1UL << bit), GCC_OFF12_ASM() (*m)); 84 - #ifdef CONFIG_CPU_MIPSR2 85 } else if (kernel_uses_llsc && __builtin_constant_p(bit)) { 86 do { 87 __asm__ __volatile__( 88 " " __LL "%0, %1 # set_bit \n" 89 " " __INS "%0, %3, %2, 1 \n" 90 " " __SC "%0, %1 \n" 91 - : "=&r" (temp), "+" GCC_OFF12_ASM() (*m) 92 : "ir" (bit), "r" (~0)); 93 } while (unlikely(!temp)); 94 - #endif /* CONFIG_CPU_MIPSR2 */ 95 } else if (kernel_uses_llsc) { 96 do { 97 __asm__ __volatile__( 98 - " .set arch=r4000 \n" 99 " " __LL "%0, %1 # set_bit \n" 100 " or %0, %2 \n" 101 " " __SC "%0, %1 \n" 102 " .set mips0 \n" 103 - : "=&r" (temp), "+" GCC_OFF12_ASM() (*m) 104 : "ir" (1UL << bit)); 105 } while (unlikely(!temp)); 106 } else ··· 131 " " __SC "%0, %1 \n" 132 " beqzl %0, 1b \n" 133 " .set mips0 \n" 134 - : "=&r" (temp), "+" GCC_OFF12_ASM() (*m) 135 : "ir" (~(1UL << bit))); 136 - #ifdef CONFIG_CPU_MIPSR2 137 } else if (kernel_uses_llsc && __builtin_constant_p(bit)) { 138 do { 139 __asm__ __volatile__( 140 " " __LL "%0, %1 # clear_bit \n" 141 " " __INS "%0, $0, %2, 1 \n" 142 " " __SC "%0, %1 \n" 143 - : "=&r" (temp), "+" GCC_OFF12_ASM() (*m) 144 : "ir" (bit)); 145 } while (unlikely(!temp)); 146 - #endif /* CONFIG_CPU_MIPSR2 */ 147 } else if (kernel_uses_llsc) { 148 do { 149 __asm__ __volatile__( 150 - " .set arch=r4000 \n" 151 " " __LL "%0, %1 # clear_bit \n" 152 " and %0, %2 \n" 153 " " __SC "%0, %1 \n" 154 " .set mips0 \n" 155 - : "=&r" (temp), "+" GCC_OFF12_ASM() (*m) 156 : "ir" (~(1UL << bit))); 157 } while (unlikely(!temp)); 158 } else ··· 197 " " __SC "%0, %1 \n" 198 " beqzl %0, 1b \n" 199 " .set mips0 \n" 200 - : "=&r" (temp), "+" GCC_OFF12_ASM() (*m) 201 : "ir" (1UL << bit)); 202 } else if (kernel_uses_llsc) { 203 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); ··· 205 206 do { 207 __asm__ __volatile__( 208 - " .set arch=r4000 \n" 209 " " __LL "%0, %1 # change_bit \n" 210 " xor %0, %2 \n" 211 " " __SC "%0, %1 \n" 212 " .set mips0 \n" 213 - : "=&r" (temp), "+" GCC_OFF12_ASM() (*m) 214 : "ir" (1UL << bit)); 215 } while (unlikely(!temp)); 216 } else ··· 245 " beqzl %2, 1b \n" 246 " and %2, %0, %3 \n" 247 " .set mips0 \n" 248 - : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) 249 : "r" (1UL << bit) 250 : "memory"); 251 } else if (kernel_uses_llsc) { ··· 254 255 do { 256 __asm__ __volatile__( 257 - " .set arch=r4000 \n" 258 " " __LL "%0, %1 # test_and_set_bit \n" 259 " or %2, %0, %3 \n" 260 " " __SC "%2, %1 \n" 261 " .set mips0 \n" 262 - : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) 263 : "r" (1UL << bit) 264 : "memory"); 265 } while (unlikely(!res)); ··· 308 309 do { 310 __asm__ __volatile__( 311 - " .set arch=r4000 \n" 312 " " __LL "%0, %1 # test_and_set_bit \n" 313 " or %2, %0, %3 \n" 314 " " __SC "%2, %1 \n" 315 " .set mips0 \n" 316 - : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) 317 : "r" (1UL << bit) 318 : "memory"); 319 } while (unlikely(!res)); ··· 355 " beqzl %2, 1b \n" 356 " and %2, %0, %3 \n" 357 " .set mips0 \n" 358 - : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) 359 : "r" (1UL << bit) 360 : "memory"); 361 - #ifdef CONFIG_CPU_MIPSR2 362 } else if (kernel_uses_llsc && __builtin_constant_p(nr)) { 363 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); 364 unsigned long temp; ··· 369 " " __EXT "%2, %0, %3, 1 \n" 370 " " __INS "%0, $0, %3, 1 \n" 371 " " __SC "%0, %1 \n" 372 - : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) 373 : "ir" (bit) 374 : "memory"); 375 } while (unlikely(!temp)); ··· 380 381 do { 382 __asm__ __volatile__( 383 - " .set arch=r4000 \n" 384 " " __LL "%0, %1 # test_and_clear_bit \n" 385 " or %2, %0, %3 \n" 386 " xor %2, %3 \n" 387 " " __SC "%2, %1 \n" 388 " .set mips0 \n" 389 - : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) 390 : "r" (1UL << bit) 391 : "memory"); 392 } while (unlikely(!res)); ··· 428 " beqzl %2, 1b \n" 429 " and %2, %0, %3 \n" 430 " .set mips0 \n" 431 - : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) 432 : "r" (1UL << bit) 433 : "memory"); 434 } else if (kernel_uses_llsc) { ··· 437 438 do { 439 __asm__ __volatile__( 440 - " .set arch=r4000 \n" 441 " " __LL "%0, %1 # test_and_change_bit \n" 442 " xor %2, %0, %3 \n" 443 " " __SC "\t%2, %1 \n" 444 " .set mips0 \n" 445 - : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) 446 : "r" (1UL << bit) 447 : "memory"); 448 } while (unlikely(!res)); ··· 485 __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) { 486 __asm__( 487 " .set push \n" 488 - " .set mips32 \n" 489 " clz %0, %1 \n" 490 " .set pop \n" 491 : "=r" (num) ··· 498 __builtin_constant_p(cpu_has_mips64) && cpu_has_mips64) { 499 __asm__( 500 " .set push \n" 501 - " .set mips64 \n" 502 " dclz %0, %1 \n" 503 " .set pop \n" 504 : "=r" (num) ··· 562 if (__builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) { 563 __asm__( 564 " .set push \n" 565 - " .set mips32 \n" 566 " clz %0, %1 \n" 567 " .set pop \n" 568 : "=r" (x)
··· 79 " " __SC "%0, %1 \n" 80 " beqzl %0, 1b \n" 81 " .set mips0 \n" 82 + : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*m) 83 + : "ir" (1UL << bit), GCC_OFF_SMALL_ASM() (*m)); 84 + #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) 85 } else if (kernel_uses_llsc && __builtin_constant_p(bit)) { 86 do { 87 __asm__ __volatile__( 88 " " __LL "%0, %1 # set_bit \n" 89 " " __INS "%0, %3, %2, 1 \n" 90 " " __SC "%0, %1 \n" 91 + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m) 92 : "ir" (bit), "r" (~0)); 93 } while (unlikely(!temp)); 94 + #endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */ 95 } else if (kernel_uses_llsc) { 96 do { 97 __asm__ __volatile__( 98 + " .set "MIPS_ISA_ARCH_LEVEL" \n" 99 " " __LL "%0, %1 # set_bit \n" 100 " or %0, %2 \n" 101 " " __SC "%0, %1 \n" 102 " .set mips0 \n" 103 + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m) 104 : "ir" (1UL << bit)); 105 } while (unlikely(!temp)); 106 } else ··· 131 " " __SC "%0, %1 \n" 132 " beqzl %0, 1b \n" 133 " .set mips0 \n" 134 + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m) 135 : "ir" (~(1UL << bit))); 136 + #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) 137 } else if (kernel_uses_llsc && __builtin_constant_p(bit)) { 138 do { 139 __asm__ __volatile__( 140 " " __LL "%0, %1 # clear_bit \n" 141 " " __INS "%0, $0, %2, 1 \n" 142 " " __SC "%0, %1 \n" 143 + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m) 144 : "ir" (bit)); 145 } while (unlikely(!temp)); 146 + #endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */ 147 } else if (kernel_uses_llsc) { 148 do { 149 __asm__ __volatile__( 150 + " .set "MIPS_ISA_ARCH_LEVEL" \n" 151 " " __LL "%0, %1 # clear_bit \n" 152 " and %0, %2 \n" 153 " " __SC "%0, %1 \n" 154 " .set mips0 \n" 155 + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m) 156 : "ir" (~(1UL << bit))); 157 } while (unlikely(!temp)); 158 } else ··· 197 " " __SC "%0, %1 \n" 198 " beqzl %0, 1b \n" 199 " .set mips0 \n" 200 + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m) 201 : "ir" (1UL << bit)); 202 } else if (kernel_uses_llsc) { 203 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); ··· 205 206 do { 207 __asm__ __volatile__( 208 + " .set "MIPS_ISA_ARCH_LEVEL" \n" 209 " " __LL "%0, %1 # change_bit \n" 210 " xor %0, %2 \n" 211 " " __SC "%0, %1 \n" 212 " .set mips0 \n" 213 + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m) 214 : "ir" (1UL << bit)); 215 } while (unlikely(!temp)); 216 } else ··· 245 " beqzl %2, 1b \n" 246 " and %2, %0, %3 \n" 247 " .set mips0 \n" 248 + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) 249 : "r" (1UL << bit) 250 : "memory"); 251 } else if (kernel_uses_llsc) { ··· 254 255 do { 256 __asm__ __volatile__( 257 + " .set "MIPS_ISA_ARCH_LEVEL" \n" 258 " " __LL "%0, %1 # test_and_set_bit \n" 259 " or %2, %0, %3 \n" 260 " " __SC "%2, %1 \n" 261 " .set mips0 \n" 262 + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) 263 : "r" (1UL << bit) 264 : "memory"); 265 } while (unlikely(!res)); ··· 308 309 do { 310 __asm__ __volatile__( 311 + " .set "MIPS_ISA_ARCH_LEVEL" \n" 312 " " __LL "%0, %1 # test_and_set_bit \n" 313 " or %2, %0, %3 \n" 314 " " __SC "%2, %1 \n" 315 " .set mips0 \n" 316 + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) 317 : "r" (1UL << bit) 318 : "memory"); 319 } while (unlikely(!res)); ··· 355 " beqzl %2, 1b \n" 356 " and %2, %0, %3 \n" 357 " .set mips0 \n" 358 + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) 359 : "r" (1UL << bit) 360 : "memory"); 361 + #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) 362 } else if (kernel_uses_llsc && __builtin_constant_p(nr)) { 363 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); 364 unsigned long temp; ··· 369 " " __EXT "%2, %0, %3, 1 \n" 370 " " __INS "%0, $0, %3, 1 \n" 371 " " __SC "%0, %1 \n" 372 + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) 373 : "ir" (bit) 374 : "memory"); 375 } while (unlikely(!temp)); ··· 380 381 do { 382 __asm__ __volatile__( 383 + " .set "MIPS_ISA_ARCH_LEVEL" \n" 384 " " __LL "%0, %1 # test_and_clear_bit \n" 385 " or %2, %0, %3 \n" 386 " xor %2, %3 \n" 387 " " __SC "%2, %1 \n" 388 " .set mips0 \n" 389 + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) 390 : "r" (1UL << bit) 391 : "memory"); 392 } while (unlikely(!res)); ··· 428 " beqzl %2, 1b \n" 429 " and %2, %0, %3 \n" 430 " .set mips0 \n" 431 + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) 432 : "r" (1UL << bit) 433 : "memory"); 434 } else if (kernel_uses_llsc) { ··· 437 438 do { 439 __asm__ __volatile__( 440 + " .set "MIPS_ISA_ARCH_LEVEL" \n" 441 " " __LL "%0, %1 # test_and_change_bit \n" 442 " xor %2, %0, %3 \n" 443 " " __SC "\t%2, %1 \n" 444 " .set mips0 \n" 445 + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) 446 : "r" (1UL << bit) 447 : "memory"); 448 } while (unlikely(!res)); ··· 485 __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) { 486 __asm__( 487 " .set push \n" 488 + " .set "MIPS_ISA_LEVEL" \n" 489 " clz %0, %1 \n" 490 " .set pop \n" 491 : "=r" (num) ··· 498 __builtin_constant_p(cpu_has_mips64) && cpu_has_mips64) { 499 __asm__( 500 " .set push \n" 501 + " .set "MIPS_ISA_LEVEL" \n" 502 " dclz %0, %1 \n" 503 " .set pop \n" 504 : "=r" (num) ··· 562 if (__builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) { 563 __asm__( 564 " .set push \n" 565 + " .set "MIPS_ISA_LEVEL" \n" 566 " clz %0, %1 \n" 567 " .set pop \n" 568 : "=r" (x)
+19 -26
arch/mips/include/asm/checksum.h
··· 12 #ifndef _ASM_CHECKSUM_H 13 #define _ASM_CHECKSUM_H 14 15 #include <linux/in6.h> 16 17 #include <asm/uaccess.h> ··· 103 */ 104 __wsum csum_partial_copy_nocheck(const void *src, void *dst, 105 int len, __wsum sum); 106 107 /* 108 * Fold a partial checksum without adding pseudo headers 109 */ 110 - static inline __sum16 csum_fold(__wsum sum) 111 { 112 - __asm__( 113 - " .set push # csum_fold\n" 114 - " .set noat \n" 115 - " sll $1, %0, 16 \n" 116 - " addu %0, $1 \n" 117 - " sltu $1, %0, $1 \n" 118 - " srl %0, %0, 16 \n" 119 - " addu %0, $1 \n" 120 - " xori %0, 0xffff \n" 121 - " .set pop" 122 - : "=r" (sum) 123 - : "0" (sum)); 124 125 - return (__force __sum16)sum; 126 } 127 128 /* 129 * This is a version of ip_compute_csum() optimized for IP headers, ··· 158 159 return csum_fold(csum); 160 } 161 162 static inline __wsum csum_tcpudp_nofold(__be32 saddr, 163 __be32 daddr, unsigned short len, unsigned short proto, ··· 201 202 return sum; 203 } 204 - 205 - /* 206 - * computes the checksum of the TCP/UDP pseudo-header 207 - * returns a 16-bit checksum, already complemented 208 - */ 209 - static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, 210 - unsigned short len, 211 - unsigned short proto, 212 - __wsum sum) 213 - { 214 - return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum)); 215 - } 216 217 /* 218 * this routine is used for miscellaneous IP-like checksums, mainly ··· 276 277 return csum_fold(sum); 278 } 279 280 #endif /* _ASM_CHECKSUM_H */
··· 12 #ifndef _ASM_CHECKSUM_H 13 #define _ASM_CHECKSUM_H 14 15 + #ifdef CONFIG_GENERIC_CSUM 16 + #include <asm-generic/checksum.h> 17 + #else 18 + 19 #include <linux/in6.h> 20 21 #include <asm/uaccess.h> ··· 99 */ 100 __wsum csum_partial_copy_nocheck(const void *src, void *dst, 101 int len, __wsum sum); 102 + #define csum_partial_copy_nocheck csum_partial_copy_nocheck 103 104 /* 105 * Fold a partial checksum without adding pseudo headers 106 */ 107 + static inline __sum16 csum_fold(__wsum csum) 108 { 109 + u32 sum = (__force u32)csum;; 110 111 + sum += (sum << 16); 112 + csum = (sum < csum); 113 + sum >>= 16; 114 + sum += csum; 115 + 116 + return (__force __sum16)~sum; 117 } 118 + #define csum_fold csum_fold 119 120 /* 121 * This is a version of ip_compute_csum() optimized for IP headers, ··· 158 159 return csum_fold(csum); 160 } 161 + #define ip_fast_csum ip_fast_csum 162 163 static inline __wsum csum_tcpudp_nofold(__be32 saddr, 164 __be32 daddr, unsigned short len, unsigned short proto, ··· 200 201 return sum; 202 } 203 + #define csum_tcpudp_nofold csum_tcpudp_nofold 204 205 /* 206 * this routine is used for miscellaneous IP-like checksums, mainly ··· 286 287 return csum_fold(sum); 288 } 289 + 290 + #include <asm-generic/checksum.h> 291 + #endif /* CONFIG_GENERIC_CSUM */ 292 293 #endif /* _ASM_CHECKSUM_H */
+17 -17
arch/mips/include/asm/cmpxchg.h
··· 31 " sc %2, %1 \n" 32 " beqzl %2, 1b \n" 33 " .set mips0 \n" 34 - : "=&r" (retval), "=" GCC_OFF12_ASM() (*m), "=&r" (dummy) 35 - : GCC_OFF12_ASM() (*m), "Jr" (val) 36 : "memory"); 37 } else if (kernel_uses_llsc) { 38 unsigned long dummy; 39 40 do { 41 __asm__ __volatile__( 42 - " .set arch=r4000 \n" 43 " ll %0, %3 # xchg_u32 \n" 44 " .set mips0 \n" 45 " move %2, %z4 \n" 46 - " .set arch=r4000 \n" 47 " sc %2, %1 \n" 48 " .set mips0 \n" 49 - : "=&r" (retval), "=" GCC_OFF12_ASM() (*m), 50 "=&r" (dummy) 51 - : GCC_OFF12_ASM() (*m), "Jr" (val) 52 : "memory"); 53 } while (unlikely(!dummy)); 54 } else { ··· 82 " scd %2, %1 \n" 83 " beqzl %2, 1b \n" 84 " .set mips0 \n" 85 - : "=&r" (retval), "=" GCC_OFF12_ASM() (*m), "=&r" (dummy) 86 - : GCC_OFF12_ASM() (*m), "Jr" (val) 87 : "memory"); 88 } else if (kernel_uses_llsc) { 89 unsigned long dummy; 90 91 do { 92 __asm__ __volatile__( 93 - " .set arch=r4000 \n" 94 " lld %0, %3 # xchg_u64 \n" 95 " move %2, %z4 \n" 96 " scd %2, %1 \n" 97 " .set mips0 \n" 98 - : "=&r" (retval), "=" GCC_OFF12_ASM() (*m), 99 "=&r" (dummy) 100 - : GCC_OFF12_ASM() (*m), "Jr" (val) 101 : "memory"); 102 } while (unlikely(!dummy)); 103 } else { ··· 158 " beqzl $1, 1b \n" \ 159 "2: \n" \ 160 " .set pop \n" \ 161 - : "=&r" (__ret), "=" GCC_OFF12_ASM() (*m) \ 162 - : GCC_OFF12_ASM() (*m), "Jr" (old), "Jr" (new) \ 163 : "memory"); \ 164 } else if (kernel_uses_llsc) { \ 165 __asm__ __volatile__( \ 166 " .set push \n" \ 167 " .set noat \n" \ 168 - " .set arch=r4000 \n" \ 169 "1: " ld " %0, %2 # __cmpxchg_asm \n" \ 170 " bne %0, %z3, 2f \n" \ 171 " .set mips0 \n" \ 172 " move $1, %z4 \n" \ 173 - " .set arch=r4000 \n" \ 174 " " st " $1, %1 \n" \ 175 " beqz $1, 1b \n" \ 176 " .set pop \n" \ 177 "2: \n" \ 178 - : "=&r" (__ret), "=" GCC_OFF12_ASM() (*m) \ 179 - : GCC_OFF12_ASM() (*m), "Jr" (old), "Jr" (new) \ 180 : "memory"); \ 181 } else { \ 182 unsigned long __flags; \
··· 31 " sc %2, %1 \n" 32 " beqzl %2, 1b \n" 33 " .set mips0 \n" 34 + : "=&r" (retval), "=" GCC_OFF_SMALL_ASM() (*m), "=&r" (dummy) 35 + : GCC_OFF_SMALL_ASM() (*m), "Jr" (val) 36 : "memory"); 37 } else if (kernel_uses_llsc) { 38 unsigned long dummy; 39 40 do { 41 __asm__ __volatile__( 42 + " .set "MIPS_ISA_ARCH_LEVEL" \n" 43 " ll %0, %3 # xchg_u32 \n" 44 " .set mips0 \n" 45 " move %2, %z4 \n" 46 + " .set "MIPS_ISA_ARCH_LEVEL" \n" 47 " sc %2, %1 \n" 48 " .set mips0 \n" 49 + : "=&r" (retval), "=" GCC_OFF_SMALL_ASM() (*m), 50 "=&r" (dummy) 51 + : GCC_OFF_SMALL_ASM() (*m), "Jr" (val) 52 : "memory"); 53 } while (unlikely(!dummy)); 54 } else { ··· 82 " scd %2, %1 \n" 83 " beqzl %2, 1b \n" 84 " .set mips0 \n" 85 + : "=&r" (retval), "=" GCC_OFF_SMALL_ASM() (*m), "=&r" (dummy) 86 + : GCC_OFF_SMALL_ASM() (*m), "Jr" (val) 87 : "memory"); 88 } else if (kernel_uses_llsc) { 89 unsigned long dummy; 90 91 do { 92 __asm__ __volatile__( 93 + " .set "MIPS_ISA_ARCH_LEVEL" \n" 94 " lld %0, %3 # xchg_u64 \n" 95 " move %2, %z4 \n" 96 " scd %2, %1 \n" 97 " .set mips0 \n" 98 + : "=&r" (retval), "=" GCC_OFF_SMALL_ASM() (*m), 99 "=&r" (dummy) 100 + : GCC_OFF_SMALL_ASM() (*m), "Jr" (val) 101 : "memory"); 102 } while (unlikely(!dummy)); 103 } else { ··· 158 " beqzl $1, 1b \n" \ 159 "2: \n" \ 160 " .set pop \n" \ 161 + : "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m) \ 162 + : GCC_OFF_SMALL_ASM() (*m), "Jr" (old), "Jr" (new) \ 163 : "memory"); \ 164 } else if (kernel_uses_llsc) { \ 165 __asm__ __volatile__( \ 166 " .set push \n" \ 167 " .set noat \n" \ 168 + " .set "MIPS_ISA_ARCH_LEVEL" \n" \ 169 "1: " ld " %0, %2 # __cmpxchg_asm \n" \ 170 " bne %0, %z3, 2f \n" \ 171 " .set mips0 \n" \ 172 " move $1, %z4 \n" \ 173 + " .set "MIPS_ISA_ARCH_LEVEL" \n" \ 174 " " st " $1, %1 \n" \ 175 " beqz $1, 1b \n" \ 176 " .set pop \n" \ 177 "2: \n" \ 178 + : "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m) \ 179 + : GCC_OFF_SMALL_ASM() (*m), "Jr" (old), "Jr" (new) \ 180 : "memory"); \ 181 } else { \ 182 unsigned long __flags; \
+21 -3
arch/mips/include/asm/compiler.h
··· 16 #define GCC_REG_ACCUM "accum" 17 #endif 18 19 #ifndef CONFIG_CPU_MICROMIPS 20 - #define GCC_OFF12_ASM() "R" 21 #elif __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 9) 22 - #define GCC_OFF12_ASM() "ZC" 23 #else 24 #error "microMIPS compilation unsupported with GCC older than 4.9" 25 - #endif 26 27 #endif /* _ASM_COMPILER_H */
··· 16 #define GCC_REG_ACCUM "accum" 17 #endif 18 19 + #ifdef CONFIG_CPU_MIPSR6 20 + /* All MIPS R6 toolchains support the ZC constrain */ 21 + #define GCC_OFF_SMALL_ASM() "ZC" 22 + #else 23 #ifndef CONFIG_CPU_MICROMIPS 24 + #define GCC_OFF_SMALL_ASM() "R" 25 #elif __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 9) 26 + #define GCC_OFF_SMALL_ASM() "ZC" 27 #else 28 #error "microMIPS compilation unsupported with GCC older than 4.9" 29 + #endif /* CONFIG_CPU_MICROMIPS */ 30 + #endif /* CONFIG_CPU_MIPSR6 */ 31 + 32 + #ifdef CONFIG_CPU_MIPSR6 33 + #define MIPS_ISA_LEVEL "mips64r6" 34 + #define MIPS_ISA_ARCH_LEVEL MIPS_ISA_LEVEL 35 + #define MIPS_ISA_LEVEL_RAW mips64r6 36 + #define MIPS_ISA_ARCH_LEVEL_RAW MIPS_ISA_LEVEL_RAW 37 + #else 38 + /* MIPS64 is a superset of MIPS32 */ 39 + #define MIPS_ISA_LEVEL "mips64r2" 40 + #define MIPS_ISA_ARCH_LEVEL "arch=r4000" 41 + #define MIPS_ISA_LEVEL_RAW mips64r2 42 + #define MIPS_ISA_ARCH_LEVEL_RAW MIPS_ISA_LEVEL_RAW 43 + #endif /* CONFIG_CPU_MIPSR6 */ 44 45 #endif /* _ASM_COMPILER_H */
+23 -5
arch/mips/include/asm/cpu-features.h
··· 38 #ifndef cpu_has_maar 39 #define cpu_has_maar (cpu_data[0].options & MIPS_CPU_MAAR) 40 #endif 41 42 /* 43 * For the moment we don't consider R6000 and R8000 so we can assume that ··· 174 #endif 175 #endif 176 177 #ifndef cpu_has_mips_2 178 # define cpu_has_mips_2 (cpu_data[0].isa_level & MIPS_CPU_ISA_II) 179 #endif ··· 195 #ifndef cpu_has_mips32r2 196 # define cpu_has_mips32r2 (cpu_data[0].isa_level & MIPS_CPU_ISA_M32R2) 197 #endif 198 #ifndef cpu_has_mips64r1 199 # define cpu_has_mips64r1 (cpu_data[0].isa_level & MIPS_CPU_ISA_M64R1) 200 #endif 201 #ifndef cpu_has_mips64r2 202 # define cpu_has_mips64r2 (cpu_data[0].isa_level & MIPS_CPU_ISA_M64R2) 203 #endif 204 205 /* ··· 220 #define cpu_has_mips_4_5_r (cpu_has_mips_4 | cpu_has_mips_5_r) 221 #define cpu_has_mips_5_r (cpu_has_mips_5 | cpu_has_mips_r) 222 223 - #define cpu_has_mips_4_5_r2 (cpu_has_mips_4_5 | cpu_has_mips_r2) 224 225 - #define cpu_has_mips32 (cpu_has_mips32r1 | cpu_has_mips32r2) 226 - #define cpu_has_mips64 (cpu_has_mips64r1 | cpu_has_mips64r2) 227 #define cpu_has_mips_r1 (cpu_has_mips32r1 | cpu_has_mips64r1) 228 #define cpu_has_mips_r2 (cpu_has_mips32r2 | cpu_has_mips64r2) 229 #define cpu_has_mips_r (cpu_has_mips32r1 | cpu_has_mips32r2 | \ 230 - cpu_has_mips64r1 | cpu_has_mips64r2) 231 232 #ifndef cpu_has_mips_r2_exec_hazard 233 - #define cpu_has_mips_r2_exec_hazard cpu_has_mips_r2 234 #endif 235 236 /*
··· 38 #ifndef cpu_has_maar 39 #define cpu_has_maar (cpu_data[0].options & MIPS_CPU_MAAR) 40 #endif 41 + #ifndef cpu_has_rw_llb 42 + #define cpu_has_rw_llb (cpu_data[0].options & MIPS_CPU_RW_LLB) 43 + #endif 44 45 /* 46 * For the moment we don't consider R6000 and R8000 so we can assume that ··· 171 #endif 172 #endif 173 174 + #ifndef cpu_has_mips_1 175 + # define cpu_has_mips_1 (!cpu_has_mips_r6) 176 + #endif 177 #ifndef cpu_has_mips_2 178 # define cpu_has_mips_2 (cpu_data[0].isa_level & MIPS_CPU_ISA_II) 179 #endif ··· 189 #ifndef cpu_has_mips32r2 190 # define cpu_has_mips32r2 (cpu_data[0].isa_level & MIPS_CPU_ISA_M32R2) 191 #endif 192 + #ifndef cpu_has_mips32r6 193 + # define cpu_has_mips32r6 (cpu_data[0].isa_level & MIPS_CPU_ISA_M32R6) 194 + #endif 195 #ifndef cpu_has_mips64r1 196 # define cpu_has_mips64r1 (cpu_data[0].isa_level & MIPS_CPU_ISA_M64R1) 197 #endif 198 #ifndef cpu_has_mips64r2 199 # define cpu_has_mips64r2 (cpu_data[0].isa_level & MIPS_CPU_ISA_M64R2) 200 + #endif 201 + #ifndef cpu_has_mips64r6 202 + # define cpu_has_mips64r6 (cpu_data[0].isa_level & MIPS_CPU_ISA_M64R6) 203 #endif 204 205 /* ··· 208 #define cpu_has_mips_4_5_r (cpu_has_mips_4 | cpu_has_mips_5_r) 209 #define cpu_has_mips_5_r (cpu_has_mips_5 | cpu_has_mips_r) 210 211 + #define cpu_has_mips_4_5_r2_r6 (cpu_has_mips_4_5 | cpu_has_mips_r2 | \ 212 + cpu_has_mips_r6) 213 214 + #define cpu_has_mips32 (cpu_has_mips32r1 | cpu_has_mips32r2 | cpu_has_mips32r6) 215 + #define cpu_has_mips64 (cpu_has_mips64r1 | cpu_has_mips64r2 | cpu_has_mips64r6) 216 #define cpu_has_mips_r1 (cpu_has_mips32r1 | cpu_has_mips64r1) 217 #define cpu_has_mips_r2 (cpu_has_mips32r2 | cpu_has_mips64r2) 218 + #define cpu_has_mips_r6 (cpu_has_mips32r6 | cpu_has_mips64r6) 219 #define cpu_has_mips_r (cpu_has_mips32r1 | cpu_has_mips32r2 | \ 220 + cpu_has_mips32r6 | cpu_has_mips64r1 | \ 221 + cpu_has_mips64r2 | cpu_has_mips64r6) 222 + 223 + /* MIPSR2 and MIPSR6 have a lot of similarities */ 224 + #define cpu_has_mips_r2_r6 (cpu_has_mips_r2 | cpu_has_mips_r6) 225 226 #ifndef cpu_has_mips_r2_exec_hazard 227 + #define cpu_has_mips_r2_exec_hazard (cpu_has_mips_r2 | cpu_has_mips_r6) 228 #endif 229 230 /*
+5
arch/mips/include/asm/cpu-info.h
··· 84 * (shifted by _CACHE_SHIFT) 85 */ 86 unsigned int writecombine; 87 } __attribute__((aligned(SMP_CACHE_BYTES))); 88 89 extern struct cpuinfo_mips cpu_data[];
··· 84 * (shifted by _CACHE_SHIFT) 85 */ 86 unsigned int writecombine; 87 + /* 88 + * Simple counter to prevent enabling HTW in nested 89 + * htw_start/htw_stop calls 90 + */ 91 + unsigned int htw_seq; 92 } __attribute__((aligned(SMP_CACHE_BYTES))); 93 94 extern struct cpuinfo_mips cpu_data[];
+7
arch/mips/include/asm/cpu-type.h
··· 54 case CPU_M5150: 55 #endif 56 57 #ifdef CONFIG_SYS_HAS_CPU_MIPS64_R1 58 case CPU_5KC: 59 case CPU_5KE:
··· 54 case CPU_M5150: 55 #endif 56 57 + #if defined(CONFIG_SYS_HAS_CPU_MIPS32_R2) || \ 58 + defined(CONFIG_SYS_HAS_CPU_MIPS32_R6) || \ 59 + defined(CONFIG_SYS_HAS_CPU_MIPS64_R2) || \ 60 + defined(CONFIG_SYS_HAS_CPU_MIPS64_R6) 61 + case CPU_QEMU_GENERIC: 62 + #endif 63 + 64 #ifdef CONFIG_SYS_HAS_CPU_MIPS64_R1 65 case CPU_5KC: 66 case CPU_5KE:
+9 -2
arch/mips/include/asm/cpu.h
··· 93 * These are the PRID's for when 23:16 == PRID_COMP_MIPS 94 */ 95 96 #define PRID_IMP_4KC 0x8000 97 #define PRID_IMP_5KC 0x8100 98 #define PRID_IMP_20KC 0x8200 ··· 313 CPU_LOONGSON3, CPU_CAVIUM_OCTEON, CPU_CAVIUM_OCTEON_PLUS, 314 CPU_CAVIUM_OCTEON2, CPU_CAVIUM_OCTEON3, CPU_XLR, CPU_XLP, 315 316 CPU_LAST 317 }; 318 ··· 332 #define MIPS_CPU_ISA_M32R2 0x00000020 333 #define MIPS_CPU_ISA_M64R1 0x00000040 334 #define MIPS_CPU_ISA_M64R2 0x00000080 335 336 #define MIPS_CPU_ISA_32BIT (MIPS_CPU_ISA_II | MIPS_CPU_ISA_M32R1 | \ 337 - MIPS_CPU_ISA_M32R2) 338 #define MIPS_CPU_ISA_64BIT (MIPS_CPU_ISA_III | MIPS_CPU_ISA_IV | \ 339 - MIPS_CPU_ISA_V | MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2) 340 341 /* 342 * CPU Option encodings ··· 376 #define MIPS_CPU_RIXIEX 0x200000000ull /* CPU has unique exception codes for {Read, Execute}-Inhibit exceptions */ 377 #define MIPS_CPU_MAAR 0x400000000ull /* MAAR(I) registers are present */ 378 #define MIPS_CPU_FRE 0x800000000ull /* FRE & UFE bits implemented */ 379 380 /* 381 * CPU ASE encodings
··· 93 * These are the PRID's for when 23:16 == PRID_COMP_MIPS 94 */ 95 96 + #define PRID_IMP_QEMU_GENERIC 0x0000 97 #define PRID_IMP_4KC 0x8000 98 #define PRID_IMP_5KC 0x8100 99 #define PRID_IMP_20KC 0x8200 ··· 312 CPU_LOONGSON3, CPU_CAVIUM_OCTEON, CPU_CAVIUM_OCTEON_PLUS, 313 CPU_CAVIUM_OCTEON2, CPU_CAVIUM_OCTEON3, CPU_XLR, CPU_XLP, 314 315 + CPU_QEMU_GENERIC, 316 + 317 CPU_LAST 318 }; 319 ··· 329 #define MIPS_CPU_ISA_M32R2 0x00000020 330 #define MIPS_CPU_ISA_M64R1 0x00000040 331 #define MIPS_CPU_ISA_M64R2 0x00000080 332 + #define MIPS_CPU_ISA_M32R6 0x00000100 333 + #define MIPS_CPU_ISA_M64R6 0x00000200 334 335 #define MIPS_CPU_ISA_32BIT (MIPS_CPU_ISA_II | MIPS_CPU_ISA_M32R1 | \ 336 + MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M32R6) 337 #define MIPS_CPU_ISA_64BIT (MIPS_CPU_ISA_III | MIPS_CPU_ISA_IV | \ 338 + MIPS_CPU_ISA_V | MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2 | \ 339 + MIPS_CPU_ISA_M64R6) 340 341 /* 342 * CPU Option encodings ··· 370 #define MIPS_CPU_RIXIEX 0x200000000ull /* CPU has unique exception codes for {Read, Execute}-Inhibit exceptions */ 371 #define MIPS_CPU_MAAR 0x400000000ull /* MAAR(I) registers are present */ 372 #define MIPS_CPU_FRE 0x800000000ull /* FRE & UFE bits implemented */ 373 + #define MIPS_CPU_RW_LLB 0x1000000000ull /* LLADDR/LLB writes are allowed */ 374 375 /* 376 * CPU ASE encodings
+2 -2
arch/mips/include/asm/edac.h
··· 26 " sc %0, %1 \n" 27 " beqz %0, 1b \n" 28 " .set mips0 \n" 29 - : "=&r" (temp), "=" GCC_OFF12_ASM() (*virt_addr) 30 - : GCC_OFF12_ASM() (*virt_addr)); 31 32 virt_addr++; 33 }
··· 26 " sc %0, %1 \n" 27 " beqz %0, 1b \n" 28 " .set mips0 \n" 29 + : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*virt_addr) 30 + : GCC_OFF_SMALL_ASM() (*virt_addr)); 31 32 virt_addr++; 33 }
+6 -4
arch/mips/include/asm/elf.h
··· 417 struct arch_elf_state { 418 int fp_abi; 419 int interp_fp_abi; 420 - int overall_abi; 421 }; 422 423 #define INIT_ARCH_ELF_STATE { \ 424 - .fp_abi = -1, \ 425 - .interp_fp_abi = -1, \ 426 - .overall_abi = -1, \ 427 } 428 429 extern int arch_elf_pt_proc(void *ehdr, void *phdr, struct file *elf,
··· 417 struct arch_elf_state { 418 int fp_abi; 419 int interp_fp_abi; 420 + int overall_fp_mode; 421 }; 422 423 + #define MIPS_ABI_FP_UNKNOWN (-1) /* Unknown FP ABI (kernel internal) */ 424 + 425 #define INIT_ARCH_ELF_STATE { \ 426 + .fp_abi = MIPS_ABI_FP_UNKNOWN, \ 427 + .interp_fp_abi = MIPS_ABI_FP_UNKNOWN, \ 428 + .overall_fp_mode = -1, \ 429 } 430 431 extern int arch_elf_pt_proc(void *ehdr, void *phdr, struct file *elf,
+2 -1
arch/mips/include/asm/fpu.h
··· 68 goto fr_common; 69 70 case FPU_64BIT: 71 - #if !(defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_64BIT)) 72 /* we only have a 32-bit FPU */ 73 return SIGFPE; 74 #endif
··· 68 goto fr_common; 69 70 case FPU_64BIT: 71 + #if !(defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS32_R6) \ 72 + || defined(CONFIG_64BIT)) 73 /* we only have a 32-bit FPU */ 74 return SIGFPE; 75 #endif
+12 -12
arch/mips/include/asm/futex.h
··· 45 " "__UA_ADDR "\t2b, 4b \n" \ 46 " .previous \n" \ 47 : "=r" (ret), "=&r" (oldval), \ 48 - "=" GCC_OFF12_ASM() (*uaddr) \ 49 - : "0" (0), GCC_OFF12_ASM() (*uaddr), "Jr" (oparg), \ 50 "i" (-EFAULT) \ 51 : "memory"); \ 52 } else if (cpu_has_llsc) { \ 53 __asm__ __volatile__( \ 54 " .set push \n" \ 55 " .set noat \n" \ 56 - " .set arch=r4000 \n" \ 57 "1: "user_ll("%1", "%4")" # __futex_atomic_op\n" \ 58 " .set mips0 \n" \ 59 " " insn " \n" \ 60 - " .set arch=r4000 \n" \ 61 "2: "user_sc("$1", "%2")" \n" \ 62 " beqz $1, 1b \n" \ 63 __WEAK_LLSC_MB \ ··· 74 " "__UA_ADDR "\t2b, 4b \n" \ 75 " .previous \n" \ 76 : "=r" (ret), "=&r" (oldval), \ 77 - "=" GCC_OFF12_ASM() (*uaddr) \ 78 - : "0" (0), GCC_OFF12_ASM() (*uaddr), "Jr" (oparg), \ 79 "i" (-EFAULT) \ 80 : "memory"); \ 81 } else \ ··· 174 " "__UA_ADDR "\t1b, 4b \n" 175 " "__UA_ADDR "\t2b, 4b \n" 176 " .previous \n" 177 - : "+r" (ret), "=&r" (val), "=" GCC_OFF12_ASM() (*uaddr) 178 - : GCC_OFF12_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval), 179 "i" (-EFAULT) 180 : "memory"); 181 } else if (cpu_has_llsc) { ··· 183 "# futex_atomic_cmpxchg_inatomic \n" 184 " .set push \n" 185 " .set noat \n" 186 - " .set arch=r4000 \n" 187 "1: "user_ll("%1", "%3")" \n" 188 " bne %1, %z4, 3f \n" 189 " .set mips0 \n" 190 " move $1, %z5 \n" 191 - " .set arch=r4000 \n" 192 "2: "user_sc("$1", "%2")" \n" 193 " beqz $1, 1b \n" 194 __WEAK_LLSC_MB ··· 203 " "__UA_ADDR "\t1b, 4b \n" 204 " "__UA_ADDR "\t2b, 4b \n" 205 " .previous \n" 206 - : "+r" (ret), "=&r" (val), "=" GCC_OFF12_ASM() (*uaddr) 207 - : GCC_OFF12_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval), 208 "i" (-EFAULT) 209 : "memory"); 210 } else
··· 45 " "__UA_ADDR "\t2b, 4b \n" \ 46 " .previous \n" \ 47 : "=r" (ret), "=&r" (oldval), \ 48 + "=" GCC_OFF_SMALL_ASM() (*uaddr) \ 49 + : "0" (0), GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oparg), \ 50 "i" (-EFAULT) \ 51 : "memory"); \ 52 } else if (cpu_has_llsc) { \ 53 __asm__ __volatile__( \ 54 " .set push \n" \ 55 " .set noat \n" \ 56 + " .set "MIPS_ISA_ARCH_LEVEL" \n" \ 57 "1: "user_ll("%1", "%4")" # __futex_atomic_op\n" \ 58 " .set mips0 \n" \ 59 " " insn " \n" \ 60 + " .set "MIPS_ISA_ARCH_LEVEL" \n" \ 61 "2: "user_sc("$1", "%2")" \n" \ 62 " beqz $1, 1b \n" \ 63 __WEAK_LLSC_MB \ ··· 74 " "__UA_ADDR "\t2b, 4b \n" \ 75 " .previous \n" \ 76 : "=r" (ret), "=&r" (oldval), \ 77 + "=" GCC_OFF_SMALL_ASM() (*uaddr) \ 78 + : "0" (0), GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oparg), \ 79 "i" (-EFAULT) \ 80 : "memory"); \ 81 } else \ ··· 174 " "__UA_ADDR "\t1b, 4b \n" 175 " "__UA_ADDR "\t2b, 4b \n" 176 " .previous \n" 177 + : "+r" (ret), "=&r" (val), "=" GCC_OFF_SMALL_ASM() (*uaddr) 178 + : GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval), 179 "i" (-EFAULT) 180 : "memory"); 181 } else if (cpu_has_llsc) { ··· 183 "# futex_atomic_cmpxchg_inatomic \n" 184 " .set push \n" 185 " .set noat \n" 186 + " .set "MIPS_ISA_ARCH_LEVEL" \n" 187 "1: "user_ll("%1", "%3")" \n" 188 " bne %1, %z4, 3f \n" 189 " .set mips0 \n" 190 " move $1, %z5 \n" 191 + " .set "MIPS_ISA_ARCH_LEVEL" \n" 192 "2: "user_sc("$1", "%2")" \n" 193 " beqz $1, 1b \n" 194 __WEAK_LLSC_MB ··· 203 " "__UA_ADDR "\t1b, 4b \n" 204 " "__UA_ADDR "\t2b, 4b \n" 205 " .previous \n" 206 + : "+r" (ret), "=&r" (val), "=" GCC_OFF_SMALL_ASM() (*uaddr) 207 + : GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval), 208 "i" (-EFAULT) 209 : "memory"); 210 } else
-2
arch/mips/include/asm/gio_device.h
··· 25 26 int (*probe)(struct gio_device *, const struct gio_device_id *); 27 void (*remove)(struct gio_device *); 28 - int (*suspend)(struct gio_device *, pm_message_t); 29 - int (*resume)(struct gio_device *); 30 void (*shutdown)(struct gio_device *); 31 32 struct device_driver driver;
··· 25 26 int (*probe)(struct gio_device *, const struct gio_device_id *); 27 void (*remove)(struct gio_device *); 28 void (*shutdown)(struct gio_device *); 29 30 struct device_driver driver;
+5 -4
arch/mips/include/asm/hazards.h
··· 11 #define _ASM_HAZARDS_H 12 13 #include <linux/stringify.h> 14 15 #define ___ssnop \ 16 sll $0, $0, 1 ··· 22 /* 23 * TLB hazards 24 */ 25 - #if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_CPU_CAVIUM_OCTEON) 26 27 /* 28 * MIPSR2 defines ehb for hazard avoidance ··· 59 unsigned long tmp; \ 60 \ 61 __asm__ __volatile__( \ 62 - " .set mips64r2 \n" \ 63 " dla %0, 1f \n" \ 64 " jr.hb %0 \n" \ 65 " .set mips0 \n" \ ··· 133 134 #define instruction_hazard() \ 135 do { \ 136 - if (cpu_has_mips_r2) \ 137 __instruction_hazard(); \ 138 } while (0) 139 ··· 241 242 #define __disable_fpu_hazard 243 244 - #elif defined(CONFIG_CPU_MIPSR2) 245 246 #define __enable_fpu_hazard \ 247 ___ehb
··· 11 #define _ASM_HAZARDS_H 12 13 #include <linux/stringify.h> 14 + #include <asm/compiler.h> 15 16 #define ___ssnop \ 17 sll $0, $0, 1 ··· 21 /* 22 * TLB hazards 23 */ 24 + #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) && !defined(CONFIG_CPU_CAVIUM_OCTEON) 25 26 /* 27 * MIPSR2 defines ehb for hazard avoidance ··· 58 unsigned long tmp; \ 59 \ 60 __asm__ __volatile__( \ 61 + " .set "MIPS_ISA_LEVEL" \n" \ 62 " dla %0, 1f \n" \ 63 " jr.hb %0 \n" \ 64 " .set mips0 \n" \ ··· 132 133 #define instruction_hazard() \ 134 do { \ 135 + if (cpu_has_mips_r2_r6) \ 136 __instruction_hazard(); \ 137 } while (0) 138 ··· 240 241 #define __disable_fpu_hazard 242 243 + #elif defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) 244 245 #define __enable_fpu_hazard \ 246 ___ehb
+4 -3
arch/mips/include/asm/irqflags.h
··· 15 16 #include <linux/compiler.h> 17 #include <linux/stringify.h> 18 #include <asm/hazards.h> 19 20 - #ifdef CONFIG_CPU_MIPSR2 21 22 static inline void arch_local_irq_disable(void) 23 { ··· 119 unsigned long arch_local_irq_save(void); 120 void arch_local_irq_restore(unsigned long flags); 121 void __arch_local_irq_restore(unsigned long flags); 122 - #endif /* CONFIG_CPU_MIPSR2 */ 123 124 static inline void arch_local_irq_enable(void) 125 { ··· 127 " .set push \n" 128 " .set reorder \n" 129 " .set noat \n" 130 - #if defined(CONFIG_CPU_MIPSR2) 131 " ei \n" 132 #else 133 " mfc0 $1,$12 \n"
··· 15 16 #include <linux/compiler.h> 17 #include <linux/stringify.h> 18 + #include <asm/compiler.h> 19 #include <asm/hazards.h> 20 21 + #if defined(CONFIG_CPU_MIPSR2) || defined (CONFIG_CPU_MIPSR6) 22 23 static inline void arch_local_irq_disable(void) 24 { ··· 118 unsigned long arch_local_irq_save(void); 119 void arch_local_irq_restore(unsigned long flags); 120 void __arch_local_irq_restore(unsigned long flags); 121 + #endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */ 122 123 static inline void arch_local_irq_enable(void) 124 { ··· 126 " .set push \n" 127 " .set reorder \n" 128 " .set noat \n" 129 + #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) 130 " ei \n" 131 #else 132 " mfc0 $1,$12 \n"
+3 -2
arch/mips/include/asm/local.h
··· 5 #include <linux/bitops.h> 6 #include <linux/atomic.h> 7 #include <asm/cmpxchg.h> 8 #include <asm/war.h> 9 10 typedef struct ··· 48 unsigned long temp; 49 50 __asm__ __volatile__( 51 - " .set arch=r4000 \n" 52 "1:" __LL "%1, %2 # local_add_return \n" 53 " addu %0, %1, %3 \n" 54 __SC "%0, %2 \n" ··· 93 unsigned long temp; 94 95 __asm__ __volatile__( 96 - " .set arch=r4000 \n" 97 "1:" __LL "%1, %2 # local_sub_return \n" 98 " subu %0, %1, %3 \n" 99 __SC "%0, %2 \n"
··· 5 #include <linux/bitops.h> 6 #include <linux/atomic.h> 7 #include <asm/cmpxchg.h> 8 + #include <asm/compiler.h> 9 #include <asm/war.h> 10 11 typedef struct ··· 47 unsigned long temp; 48 49 __asm__ __volatile__( 50 + " .set "MIPS_ISA_ARCH_LEVEL" \n" 51 "1:" __LL "%1, %2 # local_add_return \n" 52 " addu %0, %1, %3 \n" 53 __SC "%0, %2 \n" ··· 92 unsigned long temp; 93 94 __asm__ __volatile__( 95 + " .set "MIPS_ISA_ARCH_LEVEL" \n" 96 "1:" __LL "%1, %2 # local_sub_return \n" 97 " subu %0, %1, %3 \n" 98 __SC "%0, %2 \n"
+41 -23
arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h
··· 8 #ifndef __ASM_MACH_CAVIUM_OCTEON_KERNEL_ENTRY_H 9 #define __ASM_MACH_CAVIUM_OCTEON_KERNEL_ENTRY_H 10 11 - 12 - #define CP0_CYCLE_COUNTER $9, 6 13 #define CP0_CVMCTL_REG $9, 7 14 #define CP0_CVMMEMCTL_REG $11,7 15 #define CP0_PRID_REG $15, 0 16 #define CP0_PRID_OCTEON_PASS1 0x000d0000 17 #define CP0_PRID_OCTEON_CN30XX 0x000d0200 18 ··· 37 # Needed for octeon specific memcpy 38 or v0, v0, 0x5001 39 xor v0, v0, 0x1001 40 - # Read the processor ID register 41 - mfc0 v1, CP0_PRID_REG 42 - # Disable instruction prefetching (Octeon Pass1 errata) 43 - or v0, v0, 0x2000 44 - # Skip reenable of prefetching for Octeon Pass1 45 - beq v1, CP0_PRID_OCTEON_PASS1, skip 46 - nop 47 - # Reenable instruction prefetching, not on Pass1 48 - xor v0, v0, 0x2000 49 - # Strip off pass number off of processor id 50 - srl v1, 8 51 - sll v1, 8 52 - # CN30XX needs some extra stuff turned off for better performance 53 - bne v1, CP0_PRID_OCTEON_CN30XX, skip 54 - nop 55 - # CN30XX Use random Icache replacement 56 - or v0, v0, 0x400 57 - # CN30XX Disable instruction prefetching 58 - or v0, v0, 0x2000 59 - skip: 60 # First clear off CvmCtl[IPPCI] bit and move the performance 61 # counters interrupt to IRQ 6 62 - li v1, ~(7 << 7) 63 and v0, v0, v1 64 ori v0, v0, (6 << 7) 65 # Write the cavium control register 66 dmtc0 v0, CP0_CVMCTL_REG 67 sync 68 # Flush dcache after config change 69 cache 9, 0($0) 70 # Get my core id 71 rdhwr v0, $0 72 # Jump the master to kernel_entry
··· 8 #ifndef __ASM_MACH_CAVIUM_OCTEON_KERNEL_ENTRY_H 9 #define __ASM_MACH_CAVIUM_OCTEON_KERNEL_ENTRY_H 10 11 #define CP0_CVMCTL_REG $9, 7 12 #define CP0_CVMMEMCTL_REG $11,7 13 #define CP0_PRID_REG $15, 0 14 + #define CP0_DCACHE_ERR_REG $27, 1 15 #define CP0_PRID_OCTEON_PASS1 0x000d0000 16 #define CP0_PRID_OCTEON_CN30XX 0x000d0200 17 ··· 38 # Needed for octeon specific memcpy 39 or v0, v0, 0x5001 40 xor v0, v0, 0x1001 41 # First clear off CvmCtl[IPPCI] bit and move the performance 42 # counters interrupt to IRQ 6 43 + dli v1, ~(7 << 7) 44 and v0, v0, v1 45 ori v0, v0, (6 << 7) 46 + 47 + mfc0 v1, CP0_PRID_REG 48 + and t1, v1, 0xfff8 49 + xor t1, t1, 0x9000 # 63-P1 50 + beqz t1, 4f 51 + and t1, v1, 0xfff8 52 + xor t1, t1, 0x9008 # 63-P2 53 + beqz t1, 4f 54 + and t1, v1, 0xfff8 55 + xor t1, t1, 0x9100 # 68-P1 56 + beqz t1, 4f 57 + and t1, v1, 0xff00 58 + xor t1, t1, 0x9200 # 66-PX 59 + bnez t1, 5f # Skip WAR for others. 60 + and t1, v1, 0x00ff 61 + slti t1, t1, 2 # 66-P1.2 and later good. 62 + beqz t1, 5f 63 + 64 + 4: # core-16057 work around 65 + or v0, v0, 0x2000 # Set IPREF bit. 66 + 67 + 5: # No core-16057 work around 68 # Write the cavium control register 69 dmtc0 v0, CP0_CVMCTL_REG 70 sync 71 # Flush dcache after config change 72 cache 9, 0($0) 73 + # Zero all of CVMSEG to make sure parity is correct 74 + dli v0, CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE 75 + dsll v0, 7 76 + beqz v0, 2f 77 + 1: dsubu v0, 8 78 + sd $0, -32768(v0) 79 + bnez v0, 1b 80 + 2: 81 + mfc0 v0, CP0_PRID_REG 82 + bbit0 v0, 15, 1f 83 + # OCTEON II or better have bit 15 set. Clear the error bits. 84 + and t1, v0, 0xff00 85 + dli v0, 0x9500 86 + bge t1, v0, 1f # OCTEON III has no DCACHE_ERR_REG COP0 87 + dli v0, 0x27 88 + dmtc0 v0, CP0_DCACHE_ERR_REG 89 + 1: 90 # Get my core id 91 rdhwr v0, $0 92 # Jump the master to kernel_entry
+3
arch/mips/include/asm/mach-cavium-octeon/war.h
··· 22 #define R10000_LLSC_WAR 0 23 #define MIPS34K_MISSED_ITLB_WAR 0 24 25 #endif /* __ASM_MIPS_MACH_CAVIUM_OCTEON_WAR_H */
··· 22 #define R10000_LLSC_WAR 0 23 #define MIPS34K_MISSED_ITLB_WAR 0 24 25 + #define CAVIUM_OCTEON_DCACHE_PREFETCH_WAR \ 26 + OCTEON_IS_MODEL(OCTEON_CN6XXX) 27 + 28 #endif /* __ASM_MIPS_MACH_CAVIUM_OCTEON_WAR_H */
+12 -12
arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h
··· 85 " "__beqz"%0, 1b \n" 86 " nop \n" 87 " .set pop \n" 88 - : "=&r" (temp), "=" GCC_OFF12_ASM() (*addr) 89 - : "ir" (~mask), "ir" (value), GCC_OFF12_ASM() (*addr)); 90 } 91 92 /* ··· 106 " "__beqz"%0, 1b \n" 107 " nop \n" 108 " .set pop \n" 109 - : "=&r" (temp), "=" GCC_OFF12_ASM() (*addr) 110 - : "ir" (mask), GCC_OFF12_ASM() (*addr)); 111 } 112 113 /* ··· 127 " "__beqz"%0, 1b \n" 128 " nop \n" 129 " .set pop \n" 130 - : "=&r" (temp), "=" GCC_OFF12_ASM() (*addr) 131 - : "ir" (~mask), GCC_OFF12_ASM() (*addr)); 132 } 133 134 /* ··· 148 " "__beqz"%0, 1b \n" 149 " nop \n" 150 " .set pop \n" 151 - : "=&r" (temp), "=" GCC_OFF12_ASM() (*addr) 152 - : "ir" (mask), GCC_OFF12_ASM() (*addr)); 153 } 154 155 /* ··· 220 " .set arch=r4000 \n" \ 221 "1: ll %0, %1 #custom_read_reg32 \n" \ 222 " .set pop \n" \ 223 - : "=r" (tmp), "=" GCC_OFF12_ASM() (*address) \ 224 - : GCC_OFF12_ASM() (*address)) 225 226 #define custom_write_reg32(address, tmp) \ 227 __asm__ __volatile__( \ ··· 231 " "__beqz"%0, 1b \n" \ 232 " nop \n" \ 233 " .set pop \n" \ 234 - : "=&r" (tmp), "=" GCC_OFF12_ASM() (*address) \ 235 - : "0" (tmp), GCC_OFF12_ASM() (*address)) 236 237 #endif /* __ASM_REGOPS_H__ */
··· 85 " "__beqz"%0, 1b \n" 86 " nop \n" 87 " .set pop \n" 88 + : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*addr) 89 + : "ir" (~mask), "ir" (value), GCC_OFF_SMALL_ASM() (*addr)); 90 } 91 92 /* ··· 106 " "__beqz"%0, 1b \n" 107 " nop \n" 108 " .set pop \n" 109 + : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*addr) 110 + : "ir" (mask), GCC_OFF_SMALL_ASM() (*addr)); 111 } 112 113 /* ··· 127 " "__beqz"%0, 1b \n" 128 " nop \n" 129 " .set pop \n" 130 + : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*addr) 131 + : "ir" (~mask), GCC_OFF_SMALL_ASM() (*addr)); 132 } 133 134 /* ··· 148 " "__beqz"%0, 1b \n" 149 " nop \n" 150 " .set pop \n" 151 + : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*addr) 152 + : "ir" (mask), GCC_OFF_SMALL_ASM() (*addr)); 153 } 154 155 /* ··· 220 " .set arch=r4000 \n" \ 221 "1: ll %0, %1 #custom_read_reg32 \n" \ 222 " .set pop \n" \ 223 + : "=r" (tmp), "=" GCC_OFF_SMALL_ASM() (*address) \ 224 + : GCC_OFF_SMALL_ASM() (*address)) 225 226 #define custom_write_reg32(address, tmp) \ 227 __asm__ __volatile__( \ ··· 231 " "__beqz"%0, 1b \n" \ 232 " nop \n" \ 233 " .set pop \n" \ 234 + : "=&r" (tmp), "=" GCC_OFF_SMALL_ASM() (*address) \ 235 + : "0" (tmp), GCC_OFF_SMALL_ASM() (*address)) 236 237 #endif /* __ASM_REGOPS_H__ */
+96
arch/mips/include/asm/mips-r2-to-r6-emul.h
···
··· 1 + /* 2 + * This file is subject to the terms and conditions of the GNU General Public 3 + * License. See the file "COPYING" in the main directory of this archive 4 + * for more details. 5 + * 6 + * Copyright (c) 2014 Imagination Technologies Ltd. 7 + * Author: Markos Chandras <markos.chandras@imgtec.com> 8 + */ 9 + 10 + #ifndef __ASM_MIPS_R2_TO_R6_EMUL_H 11 + #define __ASM_MIPS_R2_TO_R6_EMUL_H 12 + 13 + struct mips_r2_emulator_stats { 14 + u64 movs; 15 + u64 hilo; 16 + u64 muls; 17 + u64 divs; 18 + u64 dsps; 19 + u64 bops; 20 + u64 traps; 21 + u64 fpus; 22 + u64 loads; 23 + u64 stores; 24 + u64 llsc; 25 + u64 dsemul; 26 + }; 27 + 28 + struct mips_r2br_emulator_stats { 29 + u64 jrs; 30 + u64 bltzl; 31 + u64 bgezl; 32 + u64 bltzll; 33 + u64 bgezll; 34 + u64 bltzall; 35 + u64 bgezall; 36 + u64 bltzal; 37 + u64 bgezal; 38 + u64 beql; 39 + u64 bnel; 40 + u64 blezl; 41 + u64 bgtzl; 42 + }; 43 + 44 + #ifdef CONFIG_DEBUG_FS 45 + 46 + #define MIPS_R2_STATS(M) \ 47 + do { \ 48 + u32 nir; \ 49 + int err; \ 50 + \ 51 + preempt_disable(); \ 52 + __this_cpu_inc(mipsr2emustats.M); \ 53 + err = __get_user(nir, (u32 __user *)regs->cp0_epc); \ 54 + if (!err) { \ 55 + if (nir == BREAK_MATH) \ 56 + __this_cpu_inc(mipsr2bdemustats.M); \ 57 + } \ 58 + preempt_enable(); \ 59 + } while (0) 60 + 61 + #define MIPS_R2BR_STATS(M) \ 62 + do { \ 63 + preempt_disable(); \ 64 + __this_cpu_inc(mipsr2bremustats.M); \ 65 + preempt_enable(); \ 66 + } while (0) 67 + 68 + #else 69 + 70 + #define MIPS_R2_STATS(M) do { } while (0) 71 + #define MIPS_R2BR_STATS(M) do { } while (0) 72 + 73 + #endif /* CONFIG_DEBUG_FS */ 74 + 75 + struct r2_decoder_table { 76 + u32 mask; 77 + u32 code; 78 + int (*func)(struct pt_regs *regs, u32 inst); 79 + }; 80 + 81 + 82 + extern void do_trap_or_bp(struct pt_regs *regs, unsigned int code, 83 + const char *str); 84 + 85 + #ifndef CONFIG_MIPSR2_TO_R6_EMULATOR 86 + static int mipsr2_emulation; 87 + static __maybe_unused int mipsr2_decoder(struct pt_regs *regs, u32 inst) { return 0; }; 88 + #else 89 + /* MIPS R2 Emulator ON/OFF */ 90 + extern int mipsr2_emulation; 91 + extern int mipsr2_decoder(struct pt_regs *regs, u32 inst); 92 + #endif /* CONFIG_MIPSR2_TO_R6_EMULATOR */ 93 + 94 + #define NO_R6EMU (cpu_has_mips_r6 && !mipsr2_emulation) 95 + 96 + #endif /* __ASM_MIPS_R2_TO_R6_EMUL_H */
+4
arch/mips/include/asm/mipsregs.h
··· 653 #define MIPS_CONF5_NF (_ULCAST_(1) << 0) 654 #define MIPS_CONF5_UFR (_ULCAST_(1) << 2) 655 #define MIPS_CONF5_MRP (_ULCAST_(1) << 3) 656 #define MIPS_CONF5_MVH (_ULCAST_(1) << 5) 657 #define MIPS_CONF5_FRE (_ULCAST_(1) << 8) 658 #define MIPS_CONF5_UFE (_ULCAST_(1) << 9) ··· 1128 #define write_c0_config6(val) __write_32bit_c0_register($16, 6, val) 1129 #define write_c0_config7(val) __write_32bit_c0_register($16, 7, val) 1130 1131 #define read_c0_maar() __read_ulong_c0_register($17, 1) 1132 #define write_c0_maar(val) __write_ulong_c0_register($17, 1, val) 1133 #define read_c0_maari() __read_32bit_c0_register($17, 2) ··· 1912 __BUILD_SET_C0(intcontrol) 1913 __BUILD_SET_C0(intctl) 1914 __BUILD_SET_C0(srsmap) 1915 __BUILD_SET_C0(brcm_config_0) 1916 __BUILD_SET_C0(brcm_bus_pll) 1917 __BUILD_SET_C0(brcm_reset)
··· 653 #define MIPS_CONF5_NF (_ULCAST_(1) << 0) 654 #define MIPS_CONF5_UFR (_ULCAST_(1) << 2) 655 #define MIPS_CONF5_MRP (_ULCAST_(1) << 3) 656 + #define MIPS_CONF5_LLB (_ULCAST_(1) << 4) 657 #define MIPS_CONF5_MVH (_ULCAST_(1) << 5) 658 #define MIPS_CONF5_FRE (_ULCAST_(1) << 8) 659 #define MIPS_CONF5_UFE (_ULCAST_(1) << 9) ··· 1127 #define write_c0_config6(val) __write_32bit_c0_register($16, 6, val) 1128 #define write_c0_config7(val) __write_32bit_c0_register($16, 7, val) 1129 1130 + #define read_c0_lladdr() __read_ulong_c0_register($17, 0) 1131 + #define write_c0_lladdr(val) __write_ulong_c0_register($17, 0, val) 1132 #define read_c0_maar() __read_ulong_c0_register($17, 1) 1133 #define write_c0_maar(val) __write_ulong_c0_register($17, 1, val) 1134 #define read_c0_maari() __read_32bit_c0_register($17, 2) ··· 1909 __BUILD_SET_C0(intcontrol) 1910 __BUILD_SET_C0(intctl) 1911 __BUILD_SET_C0(srsmap) 1912 + __BUILD_SET_C0(pagegrain) 1913 __BUILD_SET_C0(brcm_config_0) 1914 __BUILD_SET_C0(brcm_bus_pll) 1915 __BUILD_SET_C0(brcm_reset)
+3
arch/mips/include/asm/mmu.h
··· 1 #ifndef __ASM_MMU_H 2 #define __ASM_MMU_H 3 4 typedef struct { 5 unsigned long asid[NR_CPUS]; 6 void *vdso; 7 } mm_context_t; 8 9 #endif /* __ASM_MMU_H */
··· 1 #ifndef __ASM_MMU_H 2 #define __ASM_MMU_H 3 4 + #include <linux/atomic.h> 5 + 6 typedef struct { 7 unsigned long asid[NR_CPUS]; 8 void *vdso; 9 + atomic_t fp_mode_switching; 10 } mm_context_t; 11 12 #endif /* __ASM_MMU_H */
+8 -1
arch/mips/include/asm/mmu_context.h
··· 25 if (cpu_has_htw) { \ 26 write_c0_pwbase(pgd); \ 27 back_to_back_c0_hazard(); \ 28 - htw_reset(); \ 29 } \ 30 } while (0) 31 ··· 131 for_each_possible_cpu(i) 132 cpu_context(i, mm) = 0; 133 134 return 0; 135 } 136 ··· 143 unsigned long flags; 144 local_irq_save(flags); 145 146 /* Check if our ASID is of an older version and thus invalid */ 147 if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK) 148 get_new_mmu_context(next, cpu); ··· 156 */ 157 cpumask_clear_cpu(cpu, mm_cpumask(prev)); 158 cpumask_set_cpu(cpu, mm_cpumask(next)); 159 160 local_irq_restore(flags); 161 } ··· 183 184 local_irq_save(flags); 185 186 /* Unconditionally get a new ASID. */ 187 get_new_mmu_context(next, cpu); 188 ··· 193 /* mark mmu ownership change */ 194 cpumask_clear_cpu(cpu, mm_cpumask(prev)); 195 cpumask_set_cpu(cpu, mm_cpumask(next)); 196 197 local_irq_restore(flags); 198 } ··· 208 unsigned long flags; 209 210 local_irq_save(flags); 211 212 if (cpumask_test_cpu(cpu, mm_cpumask(mm))) { 213 get_new_mmu_context(mm, cpu); ··· 217 /* will get a new context next time */ 218 cpu_context(cpu, mm) = 0; 219 } 220 local_irq_restore(flags); 221 } 222
··· 25 if (cpu_has_htw) { \ 26 write_c0_pwbase(pgd); \ 27 back_to_back_c0_hazard(); \ 28 } \ 29 } while (0) 30 ··· 132 for_each_possible_cpu(i) 133 cpu_context(i, mm) = 0; 134 135 + atomic_set(&mm->context.fp_mode_switching, 0); 136 + 137 return 0; 138 } 139 ··· 142 unsigned long flags; 143 local_irq_save(flags); 144 145 + htw_stop(); 146 /* Check if our ASID is of an older version and thus invalid */ 147 if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK) 148 get_new_mmu_context(next, cpu); ··· 154 */ 155 cpumask_clear_cpu(cpu, mm_cpumask(prev)); 156 cpumask_set_cpu(cpu, mm_cpumask(next)); 157 + htw_start(); 158 159 local_irq_restore(flags); 160 } ··· 180 181 local_irq_save(flags); 182 183 + htw_stop(); 184 /* Unconditionally get a new ASID. */ 185 get_new_mmu_context(next, cpu); 186 ··· 189 /* mark mmu ownership change */ 190 cpumask_clear_cpu(cpu, mm_cpumask(prev)); 191 cpumask_set_cpu(cpu, mm_cpumask(next)); 192 + htw_start(); 193 194 local_irq_restore(flags); 195 } ··· 203 unsigned long flags; 204 205 local_irq_save(flags); 206 + htw_stop(); 207 208 if (cpumask_test_cpu(cpu, mm_cpumask(mm))) { 209 get_new_mmu_context(mm, cpu); ··· 211 /* will get a new context next time */ 212 cpu_context(cpu, mm) = 0; 213 } 214 + htw_start(); 215 local_irq_restore(flags); 216 } 217
+4
arch/mips/include/asm/module.h
··· 88 #define MODULE_PROC_FAMILY "MIPS32_R1 " 89 #elif defined CONFIG_CPU_MIPS32_R2 90 #define MODULE_PROC_FAMILY "MIPS32_R2 " 91 #elif defined CONFIG_CPU_MIPS64_R1 92 #define MODULE_PROC_FAMILY "MIPS64_R1 " 93 #elif defined CONFIG_CPU_MIPS64_R2 94 #define MODULE_PROC_FAMILY "MIPS64_R2 " 95 #elif defined CONFIG_CPU_R3000 96 #define MODULE_PROC_FAMILY "R3000 " 97 #elif defined CONFIG_CPU_TX39XX
··· 88 #define MODULE_PROC_FAMILY "MIPS32_R1 " 89 #elif defined CONFIG_CPU_MIPS32_R2 90 #define MODULE_PROC_FAMILY "MIPS32_R2 " 91 + #elif defined CONFIG_CPU_MIPS32_R6 92 + #define MODULE_PROC_FAMILY "MIPS32_R6 " 93 #elif defined CONFIG_CPU_MIPS64_R1 94 #define MODULE_PROC_FAMILY "MIPS64_R1 " 95 #elif defined CONFIG_CPU_MIPS64_R2 96 #define MODULE_PROC_FAMILY "MIPS64_R2 " 97 + #elif defined CONFIG_CPU_MIPS64_R6 98 + #define MODULE_PROC_FAMILY "MIPS64_R6 " 99 #elif defined CONFIG_CPU_R3000 100 #define MODULE_PROC_FAMILY "R3000 " 101 #elif defined CONFIG_CPU_TX39XX
+1 -1
arch/mips/include/asm/octeon/cvmx-cmd-queue.h
··· 275 " lbu %[ticket], %[now_serving]\n" 276 "4:\n" 277 ".set pop\n" : 278 - [ticket_ptr] "=" GCC_OFF12_ASM()(__cvmx_cmd_queue_state_ptr->ticket[__cvmx_cmd_queue_get_index(queue_id)]), 279 [now_serving] "=m"(qptr->now_serving), [ticket] "=r"(tmp), 280 [my_ticket] "=r"(my_ticket) 281 );
··· 275 " lbu %[ticket], %[now_serving]\n" 276 "4:\n" 277 ".set pop\n" : 278 + [ticket_ptr] "=" GCC_OFF_SMALL_ASM()(__cvmx_cmd_queue_state_ptr->ticket[__cvmx_cmd_queue_get_index(queue_id)]), 279 [now_serving] "=m"(qptr->now_serving), [ticket] "=r"(tmp), 280 [my_ticket] "=r"(my_ticket) 281 );
+306
arch/mips/include/asm/octeon/cvmx-rst-defs.h
···
··· 1 + /***********************license start*************** 2 + * Author: Cavium Inc. 3 + * 4 + * Contact: support@cavium.com 5 + * This file is part of the OCTEON SDK 6 + * 7 + * Copyright (c) 2003-2014 Cavium Inc. 8 + * 9 + * This file is free software; you can redistribute it and/or modify 10 + * it under the terms of the GNU General Public License, Version 2, as 11 + * published by the Free Software Foundation. 12 + * 13 + * This file is distributed in the hope that it will be useful, but 14 + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty 15 + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or 16 + * NONINFRINGEMENT. See the GNU General Public License for more 17 + * details. 18 + * 19 + * You should have received a copy of the GNU General Public License 20 + * along with this file; if not, write to the Free Software 21 + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 22 + * or visit http://www.gnu.org/licenses/. 23 + * 24 + * This file may also be available under a different license from Cavium. 25 + * Contact Cavium Inc. for more information 26 + ***********************license end**************************************/ 27 + 28 + #ifndef __CVMX_RST_DEFS_H__ 29 + #define __CVMX_RST_DEFS_H__ 30 + 31 + #define CVMX_RST_BOOT (CVMX_ADD_IO_SEG(0x0001180006001600ull)) 32 + #define CVMX_RST_CFG (CVMX_ADD_IO_SEG(0x0001180006001610ull)) 33 + #define CVMX_RST_CKILL (CVMX_ADD_IO_SEG(0x0001180006001638ull)) 34 + #define CVMX_RST_CTLX(offset) (CVMX_ADD_IO_SEG(0x0001180006001640ull) + ((offset) & 3) * 8) 35 + #define CVMX_RST_DELAY (CVMX_ADD_IO_SEG(0x0001180006001608ull)) 36 + #define CVMX_RST_ECO (CVMX_ADD_IO_SEG(0x00011800060017B8ull)) 37 + #define CVMX_RST_INT (CVMX_ADD_IO_SEG(0x0001180006001628ull)) 38 + #define CVMX_RST_OCX (CVMX_ADD_IO_SEG(0x0001180006001618ull)) 39 + #define CVMX_RST_POWER_DBG (CVMX_ADD_IO_SEG(0x0001180006001708ull)) 40 + #define CVMX_RST_PP_POWER (CVMX_ADD_IO_SEG(0x0001180006001700ull)) 41 + #define CVMX_RST_SOFT_PRSTX(offset) (CVMX_ADD_IO_SEG(0x00011800060016C0ull) + ((offset) & 3) * 8) 42 + #define CVMX_RST_SOFT_RST (CVMX_ADD_IO_SEG(0x0001180006001680ull)) 43 + 44 + union cvmx_rst_boot { 45 + uint64_t u64; 46 + struct cvmx_rst_boot_s { 47 + #ifdef __BIG_ENDIAN_BITFIELD 48 + uint64_t chipkill:1; 49 + uint64_t jtcsrdis:1; 50 + uint64_t ejtagdis:1; 51 + uint64_t romen:1; 52 + uint64_t ckill_ppdis:1; 53 + uint64_t jt_tstmode:1; 54 + uint64_t vrm_err:1; 55 + uint64_t reserved_37_56:20; 56 + uint64_t c_mul:7; 57 + uint64_t pnr_mul:6; 58 + uint64_t reserved_21_23:3; 59 + uint64_t lboot_oci:3; 60 + uint64_t lboot_ext:6; 61 + uint64_t lboot:10; 62 + uint64_t rboot:1; 63 + uint64_t rboot_pin:1; 64 + #else 65 + uint64_t rboot_pin:1; 66 + uint64_t rboot:1; 67 + uint64_t lboot:10; 68 + uint64_t lboot_ext:6; 69 + uint64_t lboot_oci:3; 70 + uint64_t reserved_21_23:3; 71 + uint64_t pnr_mul:6; 72 + uint64_t c_mul:7; 73 + uint64_t reserved_37_56:20; 74 + uint64_t vrm_err:1; 75 + uint64_t jt_tstmode:1; 76 + uint64_t ckill_ppdis:1; 77 + uint64_t romen:1; 78 + uint64_t ejtagdis:1; 79 + uint64_t jtcsrdis:1; 80 + uint64_t chipkill:1; 81 + #endif 82 + } s; 83 + struct cvmx_rst_boot_s cn70xx; 84 + struct cvmx_rst_boot_s cn70xxp1; 85 + struct cvmx_rst_boot_s cn78xx; 86 + }; 87 + 88 + union cvmx_rst_cfg { 89 + uint64_t u64; 90 + struct cvmx_rst_cfg_s { 91 + #ifdef __BIG_ENDIAN_BITFIELD 92 + uint64_t bist_delay:58; 93 + uint64_t reserved_3_5:3; 94 + uint64_t cntl_clr_bist:1; 95 + uint64_t warm_clr_bist:1; 96 + uint64_t soft_clr_bist:1; 97 + #else 98 + uint64_t soft_clr_bist:1; 99 + uint64_t warm_clr_bist:1; 100 + uint64_t cntl_clr_bist:1; 101 + uint64_t reserved_3_5:3; 102 + uint64_t bist_delay:58; 103 + #endif 104 + } s; 105 + struct cvmx_rst_cfg_s cn70xx; 106 + struct cvmx_rst_cfg_s cn70xxp1; 107 + struct cvmx_rst_cfg_s cn78xx; 108 + }; 109 + 110 + union cvmx_rst_ckill { 111 + uint64_t u64; 112 + struct cvmx_rst_ckill_s { 113 + #ifdef __BIG_ENDIAN_BITFIELD 114 + uint64_t reserved_47_63:17; 115 + uint64_t timer:47; 116 + #else 117 + uint64_t timer:47; 118 + uint64_t reserved_47_63:17; 119 + #endif 120 + } s; 121 + struct cvmx_rst_ckill_s cn70xx; 122 + struct cvmx_rst_ckill_s cn70xxp1; 123 + struct cvmx_rst_ckill_s cn78xx; 124 + }; 125 + 126 + union cvmx_rst_ctlx { 127 + uint64_t u64; 128 + struct cvmx_rst_ctlx_s { 129 + #ifdef __BIG_ENDIAN_BITFIELD 130 + uint64_t reserved_10_63:54; 131 + uint64_t prst_link:1; 132 + uint64_t rst_done:1; 133 + uint64_t rst_link:1; 134 + uint64_t host_mode:1; 135 + uint64_t reserved_4_5:2; 136 + uint64_t rst_drv:1; 137 + uint64_t rst_rcv:1; 138 + uint64_t rst_chip:1; 139 + uint64_t rst_val:1; 140 + #else 141 + uint64_t rst_val:1; 142 + uint64_t rst_chip:1; 143 + uint64_t rst_rcv:1; 144 + uint64_t rst_drv:1; 145 + uint64_t reserved_4_5:2; 146 + uint64_t host_mode:1; 147 + uint64_t rst_link:1; 148 + uint64_t rst_done:1; 149 + uint64_t prst_link:1; 150 + uint64_t reserved_10_63:54; 151 + #endif 152 + } s; 153 + struct cvmx_rst_ctlx_s cn70xx; 154 + struct cvmx_rst_ctlx_s cn70xxp1; 155 + struct cvmx_rst_ctlx_s cn78xx; 156 + }; 157 + 158 + union cvmx_rst_delay { 159 + uint64_t u64; 160 + struct cvmx_rst_delay_s { 161 + #ifdef __BIG_ENDIAN_BITFIELD 162 + uint64_t reserved_32_63:32; 163 + uint64_t warm_rst_dly:16; 164 + uint64_t soft_rst_dly:16; 165 + #else 166 + uint64_t soft_rst_dly:16; 167 + uint64_t warm_rst_dly:16; 168 + uint64_t reserved_32_63:32; 169 + #endif 170 + } s; 171 + struct cvmx_rst_delay_s cn70xx; 172 + struct cvmx_rst_delay_s cn70xxp1; 173 + struct cvmx_rst_delay_s cn78xx; 174 + }; 175 + 176 + union cvmx_rst_eco { 177 + uint64_t u64; 178 + struct cvmx_rst_eco_s { 179 + #ifdef __BIG_ENDIAN_BITFIELD 180 + uint64_t reserved_32_63:32; 181 + uint64_t eco_rw:32; 182 + #else 183 + uint64_t eco_rw:32; 184 + uint64_t reserved_32_63:32; 185 + #endif 186 + } s; 187 + struct cvmx_rst_eco_s cn78xx; 188 + }; 189 + 190 + union cvmx_rst_int { 191 + uint64_t u64; 192 + struct cvmx_rst_int_s { 193 + #ifdef __BIG_ENDIAN_BITFIELD 194 + uint64_t reserved_12_63:52; 195 + uint64_t perst:4; 196 + uint64_t reserved_4_7:4; 197 + uint64_t rst_link:4; 198 + #else 199 + uint64_t rst_link:4; 200 + uint64_t reserved_4_7:4; 201 + uint64_t perst:4; 202 + uint64_t reserved_12_63:52; 203 + #endif 204 + } s; 205 + struct cvmx_rst_int_cn70xx { 206 + #ifdef __BIG_ENDIAN_BITFIELD 207 + uint64_t reserved_11_63:53; 208 + uint64_t perst:3; 209 + uint64_t reserved_3_7:5; 210 + uint64_t rst_link:3; 211 + #else 212 + uint64_t rst_link:3; 213 + uint64_t reserved_3_7:5; 214 + uint64_t perst:3; 215 + uint64_t reserved_11_63:53; 216 + #endif 217 + } cn70xx; 218 + struct cvmx_rst_int_cn70xx cn70xxp1; 219 + struct cvmx_rst_int_s cn78xx; 220 + }; 221 + 222 + union cvmx_rst_ocx { 223 + uint64_t u64; 224 + struct cvmx_rst_ocx_s { 225 + #ifdef __BIG_ENDIAN_BITFIELD 226 + uint64_t reserved_3_63:61; 227 + uint64_t rst_link:3; 228 + #else 229 + uint64_t rst_link:3; 230 + uint64_t reserved_3_63:61; 231 + #endif 232 + } s; 233 + struct cvmx_rst_ocx_s cn78xx; 234 + }; 235 + 236 + union cvmx_rst_power_dbg { 237 + uint64_t u64; 238 + struct cvmx_rst_power_dbg_s { 239 + #ifdef __BIG_ENDIAN_BITFIELD 240 + uint64_t reserved_3_63:61; 241 + uint64_t str:3; 242 + #else 243 + uint64_t str:3; 244 + uint64_t reserved_3_63:61; 245 + #endif 246 + } s; 247 + struct cvmx_rst_power_dbg_s cn78xx; 248 + }; 249 + 250 + union cvmx_rst_pp_power { 251 + uint64_t u64; 252 + struct cvmx_rst_pp_power_s { 253 + #ifdef __BIG_ENDIAN_BITFIELD 254 + uint64_t reserved_48_63:16; 255 + uint64_t gate:48; 256 + #else 257 + uint64_t gate:48; 258 + uint64_t reserved_48_63:16; 259 + #endif 260 + } s; 261 + struct cvmx_rst_pp_power_cn70xx { 262 + #ifdef __BIG_ENDIAN_BITFIELD 263 + uint64_t reserved_4_63:60; 264 + uint64_t gate:4; 265 + #else 266 + uint64_t gate:4; 267 + uint64_t reserved_4_63:60; 268 + #endif 269 + } cn70xx; 270 + struct cvmx_rst_pp_power_cn70xx cn70xxp1; 271 + struct cvmx_rst_pp_power_s cn78xx; 272 + }; 273 + 274 + union cvmx_rst_soft_prstx { 275 + uint64_t u64; 276 + struct cvmx_rst_soft_prstx_s { 277 + #ifdef __BIG_ENDIAN_BITFIELD 278 + uint64_t reserved_1_63:63; 279 + uint64_t soft_prst:1; 280 + #else 281 + uint64_t soft_prst:1; 282 + uint64_t reserved_1_63:63; 283 + #endif 284 + } s; 285 + struct cvmx_rst_soft_prstx_s cn70xx; 286 + struct cvmx_rst_soft_prstx_s cn70xxp1; 287 + struct cvmx_rst_soft_prstx_s cn78xx; 288 + }; 289 + 290 + union cvmx_rst_soft_rst { 291 + uint64_t u64; 292 + struct cvmx_rst_soft_rst_s { 293 + #ifdef __BIG_ENDIAN_BITFIELD 294 + uint64_t reserved_1_63:63; 295 + uint64_t soft_rst:1; 296 + #else 297 + uint64_t soft_rst:1; 298 + uint64_t reserved_1_63:63; 299 + #endif 300 + } s; 301 + struct cvmx_rst_soft_rst_s cn70xx; 302 + struct cvmx_rst_soft_rst_s cn70xxp1; 303 + struct cvmx_rst_soft_rst_s cn78xx; 304 + }; 305 + 306 + #endif
+85 -22
arch/mips/include/asm/octeon/octeon-model.h
··· 45 */ 46 47 #define OCTEON_FAMILY_MASK 0x00ffff00 48 49 /* Flag bits in top byte */ 50 /* Ignores revision in model checks */ ··· 64 #define OM_MATCH_6XXX_FAMILY_MODELS 0x40000000 65 /* Match all cnf7XXX Octeon models. */ 66 #define OM_MATCH_F7XXX_FAMILY_MODELS 0x80000000 67 68 /* 69 * CNF7XXX models with new revision encoding 70 */ 71 #define OCTEON_CNF71XX_PASS1_0 0x000d9400 72 73 #define OCTEON_CNF71XX (OCTEON_CNF71XX_PASS1_0 | OM_IGNORE_REVISION) 74 #define OCTEON_CNF71XX_PASS1_X (OCTEON_CNF71XX_PASS1_0 | OM_IGNORE_MINOR_REVISION) ··· 121 #define OCTEON_CN68XX_PASS1_1 0x000d9101 122 #define OCTEON_CN68XX_PASS1_2 0x000d9102 123 #define OCTEON_CN68XX_PASS2_0 0x000d9108 124 125 #define OCTEON_CN68XX (OCTEON_CN68XX_PASS2_0 | OM_IGNORE_REVISION) 126 #define OCTEON_CN68XX_PASS1_X (OCTEON_CN68XX_PASS1_0 | OM_IGNORE_MINOR_REVISION) ··· 148 #define OCTEON_CN63XX_PASS1_X (OCTEON_CN63XX_PASS1_0 | OM_IGNORE_MINOR_REVISION) 149 #define OCTEON_CN63XX_PASS2_X (OCTEON_CN63XX_PASS2_0 | OM_IGNORE_MINOR_REVISION) 150 151 #define OCTEON_CN61XX_PASS1_0 0x000d9300 152 153 #define OCTEON_CN61XX (OCTEON_CN61XX_PASS1_0 | OM_IGNORE_REVISION) 154 #define OCTEON_CN61XX_PASS1_X (OCTEON_CN61XX_PASS1_0 | OM_IGNORE_MINOR_REVISION) 155 156 /* 157 * CN5XXX models with new revision encoding ··· 171 #define OCTEON_CN58XX_PASS2_2 0x000d030a 172 #define OCTEON_CN58XX_PASS2_3 0x000d030b 173 174 - #define OCTEON_CN58XX (OCTEON_CN58XX_PASS1_0 | OM_IGNORE_REVISION) 175 #define OCTEON_CN58XX_PASS1_X (OCTEON_CN58XX_PASS1_0 | OM_IGNORE_MINOR_REVISION) 176 #define OCTEON_CN58XX_PASS2_X (OCTEON_CN58XX_PASS2_0 | OM_IGNORE_MINOR_REVISION) 177 #define OCTEON_CN58XX_PASS1 OCTEON_CN58XX_PASS1_X ··· 268 #define OCTEON_CN3XXX (OCTEON_CN58XX_PASS1_0 | OM_MATCH_PREVIOUS_MODELS | OM_IGNORE_REVISION) 269 #define OCTEON_CN5XXX (OCTEON_CN58XX_PASS1_0 | OM_MATCH_5XXX_FAMILY_MODELS) 270 #define OCTEON_CN6XXX (OCTEON_CN63XX_PASS1_0 | OM_MATCH_6XXX_FAMILY_MODELS) 271 - 272 - /* These are used to cover entire families of OCTEON processors */ 273 - #define OCTEON_FAM_1 (OCTEON_CN3XXX) 274 - #define OCTEON_FAM_PLUS (OCTEON_CN5XXX) 275 - #define OCTEON_FAM_1_PLUS (OCTEON_FAM_PLUS | OM_MATCH_PREVIOUS_MODELS) 276 - #define OCTEON_FAM_2 (OCTEON_CN6XXX) 277 278 /* The revision byte (low byte) has two different encodings. 279 * CN3XXX: ··· 281 * <4>: alternate package 282 * <3:0>: revision 283 * 284 - * CN5XXX: 285 * 286 * bits 287 * <7>: reserved (0) ··· 300 /* CN5XXX and later use different layout of bits in the revision ID field */ 301 #define OCTEON_58XX_FAMILY_MASK OCTEON_38XX_FAMILY_MASK 302 #define OCTEON_58XX_FAMILY_REV_MASK 0x00ffff3f 303 - #define OCTEON_58XX_MODEL_MASK 0x00ffffc0 304 #define OCTEON_58XX_MODEL_REV_MASK (OCTEON_58XX_FAMILY_REV_MASK | OCTEON_58XX_MODEL_MASK) 305 - #define OCTEON_58XX_MODEL_MINOR_REV_MASK (OCTEON_58XX_MODEL_REV_MASK & 0x00fffff8) 306 #define OCTEON_5XXX_MODEL_MASK 0x00ff0fc0 307 308 - /* forward declarations */ 309 static inline uint32_t cvmx_get_proc_id(void) __attribute__ ((pure)); 310 static inline uint64_t cvmx_read_csr(uint64_t csr_addr); 311 312 #define __OCTEON_MATCH_MASK__(x, y, z) (((x) & (z)) == ((y) & (z))) 313 314 /* NOTE: This for internal use only! */ 315 #define __OCTEON_IS_MODEL_COMPILE__(arg_model, chip_model) \ 316 ((((arg_model & OCTEON_38XX_FAMILY_MASK) < OCTEON_CN58XX_PASS1_0) && ( \ ··· 339 ((((arg_model) & (OM_FLAG_MASK)) == OM_IGNORE_REVISION) \ 340 && __OCTEON_MATCH_MASK__((chip_model), (arg_model), OCTEON_58XX_FAMILY_MASK)) || \ 341 ((((arg_model) & (OM_FLAG_MASK)) == OM_CHECK_SUBMODEL) \ 342 - && __OCTEON_MATCH_MASK__((chip_model), (arg_model), OCTEON_58XX_MODEL_REV_MASK)) || \ 343 ((((arg_model) & (OM_MATCH_5XXX_FAMILY_MODELS)) == OM_MATCH_5XXX_FAMILY_MODELS) \ 344 - && ((chip_model) >= OCTEON_CN58XX_PASS1_0) && ((chip_model) < OCTEON_CN63XX_PASS1_0)) || \ 345 ((((arg_model) & (OM_MATCH_6XXX_FAMILY_MODELS)) == OM_MATCH_6XXX_FAMILY_MODELS) \ 346 - && ((chip_model) >= OCTEON_CN63XX_PASS1_0)) || \ 347 ((((arg_model) & (OM_MATCH_PREVIOUS_MODELS)) == OM_MATCH_PREVIOUS_MODELS) \ 348 && (((chip_model) & OCTEON_58XX_MODEL_MASK) < ((arg_model) & OCTEON_58XX_MODEL_MASK))) \ 349 ))) ··· 360 { 361 uint32_t cpuid = cvmx_get_proc_id(); 362 363 - /* 364 - * Check for special case of mismarked 3005 samples. We only 365 - * need to check if the sub model isn't being ignored 366 - */ 367 - if ((model & OM_CHECK_SUBMODEL) == OM_CHECK_SUBMODEL) { 368 - if (cpuid == OCTEON_CN3010_PASS1 && (cvmx_read_csr(0x80011800800007B8ull) & (1ull << 34))) 369 - cpuid |= 0x10; 370 - } 371 return __OCTEON_IS_MODEL_COMPILE__(model, cpuid); 372 } 373 ··· 378 #define OCTEON_IS_COMMON_BINARY() 1 379 #undef OCTEON_MODEL 380 381 const char *__init octeon_model_get_string(uint32_t chip_id); 382 383 /* 384 * Return the octeon family, i.e., ProcessorID of the PrID register. 385 */ 386 static inline uint32_t cvmx_get_octeon_family(void) 387 {
··· 45 */ 46 47 #define OCTEON_FAMILY_MASK 0x00ffff00 48 + #define OCTEON_PRID_MASK 0x00ffffff 49 50 /* Flag bits in top byte */ 51 /* Ignores revision in model checks */ ··· 63 #define OM_MATCH_6XXX_FAMILY_MODELS 0x40000000 64 /* Match all cnf7XXX Octeon models. */ 65 #define OM_MATCH_F7XXX_FAMILY_MODELS 0x80000000 66 + /* Match all cn7XXX Octeon models. */ 67 + #define OM_MATCH_7XXX_FAMILY_MODELS 0x10000000 68 + #define OM_MATCH_FAMILY_MODELS (OM_MATCH_5XXX_FAMILY_MODELS | \ 69 + OM_MATCH_6XXX_FAMILY_MODELS | \ 70 + OM_MATCH_F7XXX_FAMILY_MODELS | \ 71 + OM_MATCH_7XXX_FAMILY_MODELS) 72 + /* 73 + * CN7XXX models with new revision encoding 74 + */ 75 + 76 + #define OCTEON_CN73XX_PASS1_0 0x000d9700 77 + #define OCTEON_CN73XX (OCTEON_CN73XX_PASS1_0 | OM_IGNORE_REVISION) 78 + #define OCTEON_CN73XX_PASS1_X (OCTEON_CN73XX_PASS1_0 | \ 79 + OM_IGNORE_MINOR_REVISION) 80 + 81 + #define OCTEON_CN70XX_PASS1_0 0x000d9600 82 + #define OCTEON_CN70XX_PASS1_1 0x000d9601 83 + #define OCTEON_CN70XX_PASS1_2 0x000d9602 84 + 85 + #define OCTEON_CN70XX_PASS2_0 0x000d9608 86 + 87 + #define OCTEON_CN70XX (OCTEON_CN70XX_PASS1_0 | OM_IGNORE_REVISION) 88 + #define OCTEON_CN70XX_PASS1_X (OCTEON_CN70XX_PASS1_0 | \ 89 + OM_IGNORE_MINOR_REVISION) 90 + #define OCTEON_CN70XX_PASS2_X (OCTEON_CN70XX_PASS2_0 | \ 91 + OM_IGNORE_MINOR_REVISION) 92 + 93 + #define OCTEON_CN71XX OCTEON_CN70XX 94 + 95 + #define OCTEON_CN78XX_PASS1_0 0x000d9500 96 + #define OCTEON_CN78XX_PASS1_1 0x000d9501 97 + #define OCTEON_CN78XX_PASS2_0 0x000d9508 98 + 99 + #define OCTEON_CN78XX (OCTEON_CN78XX_PASS1_0 | OM_IGNORE_REVISION) 100 + #define OCTEON_CN78XX_PASS1_X (OCTEON_CN78XX_PASS1_0 | \ 101 + OM_IGNORE_MINOR_REVISION) 102 + #define OCTEON_CN78XX_PASS2_X (OCTEON_CN78XX_PASS2_0 | \ 103 + OM_IGNORE_MINOR_REVISION) 104 + 105 + #define OCTEON_CN76XX (0x000d9540 | OM_CHECK_SUBMODEL) 106 107 /* 108 * CNF7XXX models with new revision encoding 109 */ 110 #define OCTEON_CNF71XX_PASS1_0 0x000d9400 111 + #define OCTEON_CNF71XX_PASS1_1 0x000d9401 112 113 #define OCTEON_CNF71XX (OCTEON_CNF71XX_PASS1_0 | OM_IGNORE_REVISION) 114 #define OCTEON_CNF71XX_PASS1_X (OCTEON_CNF71XX_PASS1_0 | OM_IGNORE_MINOR_REVISION) ··· 79 #define OCTEON_CN68XX_PASS1_1 0x000d9101 80 #define OCTEON_CN68XX_PASS1_2 0x000d9102 81 #define OCTEON_CN68XX_PASS2_0 0x000d9108 82 + #define OCTEON_CN68XX_PASS2_1 0x000d9109 83 + #define OCTEON_CN68XX_PASS2_2 0x000d910a 84 85 #define OCTEON_CN68XX (OCTEON_CN68XX_PASS2_0 | OM_IGNORE_REVISION) 86 #define OCTEON_CN68XX_PASS1_X (OCTEON_CN68XX_PASS1_0 | OM_IGNORE_MINOR_REVISION) ··· 104 #define OCTEON_CN63XX_PASS1_X (OCTEON_CN63XX_PASS1_0 | OM_IGNORE_MINOR_REVISION) 105 #define OCTEON_CN63XX_PASS2_X (OCTEON_CN63XX_PASS2_0 | OM_IGNORE_MINOR_REVISION) 106 107 + /* CN62XX is same as CN63XX with 1 MB cache */ 108 + #define OCTEON_CN62XX OCTEON_CN63XX 109 + 110 #define OCTEON_CN61XX_PASS1_0 0x000d9300 111 + #define OCTEON_CN61XX_PASS1_1 0x000d9301 112 113 #define OCTEON_CN61XX (OCTEON_CN61XX_PASS1_0 | OM_IGNORE_REVISION) 114 #define OCTEON_CN61XX_PASS1_X (OCTEON_CN61XX_PASS1_0 | OM_IGNORE_MINOR_REVISION) 115 + 116 + /* CN60XX is same as CN61XX with 512 KB cache */ 117 + #define OCTEON_CN60XX OCTEON_CN61XX 118 119 /* 120 * CN5XXX models with new revision encoding ··· 120 #define OCTEON_CN58XX_PASS2_2 0x000d030a 121 #define OCTEON_CN58XX_PASS2_3 0x000d030b 122 123 + #define OCTEON_CN58XX (OCTEON_CN58XX_PASS2_0 | OM_IGNORE_REVISION) 124 #define OCTEON_CN58XX_PASS1_X (OCTEON_CN58XX_PASS1_0 | OM_IGNORE_MINOR_REVISION) 125 #define OCTEON_CN58XX_PASS2_X (OCTEON_CN58XX_PASS2_0 | OM_IGNORE_MINOR_REVISION) 126 #define OCTEON_CN58XX_PASS1 OCTEON_CN58XX_PASS1_X ··· 217 #define OCTEON_CN3XXX (OCTEON_CN58XX_PASS1_0 | OM_MATCH_PREVIOUS_MODELS | OM_IGNORE_REVISION) 218 #define OCTEON_CN5XXX (OCTEON_CN58XX_PASS1_0 | OM_MATCH_5XXX_FAMILY_MODELS) 219 #define OCTEON_CN6XXX (OCTEON_CN63XX_PASS1_0 | OM_MATCH_6XXX_FAMILY_MODELS) 220 + #define OCTEON_CNF7XXX (OCTEON_CNF71XX_PASS1_0 | \ 221 + OM_MATCH_F7XXX_FAMILY_MODELS) 222 + #define OCTEON_CN7XXX (OCTEON_CN78XX_PASS1_0 | \ 223 + OM_MATCH_7XXX_FAMILY_MODELS) 224 225 /* The revision byte (low byte) has two different encodings. 226 * CN3XXX: ··· 232 * <4>: alternate package 233 * <3:0>: revision 234 * 235 + * CN5XXX and older models: 236 * 237 * bits 238 * <7>: reserved (0) ··· 251 /* CN5XXX and later use different layout of bits in the revision ID field */ 252 #define OCTEON_58XX_FAMILY_MASK OCTEON_38XX_FAMILY_MASK 253 #define OCTEON_58XX_FAMILY_REV_MASK 0x00ffff3f 254 + #define OCTEON_58XX_MODEL_MASK 0x00ffff40 255 #define OCTEON_58XX_MODEL_REV_MASK (OCTEON_58XX_FAMILY_REV_MASK | OCTEON_58XX_MODEL_MASK) 256 + #define OCTEON_58XX_MODEL_MINOR_REV_MASK (OCTEON_58XX_MODEL_REV_MASK & 0x00ffff38) 257 #define OCTEON_5XXX_MODEL_MASK 0x00ff0fc0 258 259 static inline uint32_t cvmx_get_proc_id(void) __attribute__ ((pure)); 260 static inline uint64_t cvmx_read_csr(uint64_t csr_addr); 261 262 #define __OCTEON_MATCH_MASK__(x, y, z) (((x) & (z)) == ((y) & (z))) 263 264 + /* 265 + * __OCTEON_IS_MODEL_COMPILE__(arg_model, chip_model) 266 + * returns true if chip_model is identical or belong to the OCTEON 267 + * model group specified in arg_model. 268 + */ 269 /* NOTE: This for internal use only! */ 270 #define __OCTEON_IS_MODEL_COMPILE__(arg_model, chip_model) \ 271 ((((arg_model & OCTEON_38XX_FAMILY_MASK) < OCTEON_CN58XX_PASS1_0) && ( \ ··· 286 ((((arg_model) & (OM_FLAG_MASK)) == OM_IGNORE_REVISION) \ 287 && __OCTEON_MATCH_MASK__((chip_model), (arg_model), OCTEON_58XX_FAMILY_MASK)) || \ 288 ((((arg_model) & (OM_FLAG_MASK)) == OM_CHECK_SUBMODEL) \ 289 + && __OCTEON_MATCH_MASK__((chip_model), (arg_model), OCTEON_58XX_MODEL_MASK)) || \ 290 ((((arg_model) & (OM_MATCH_5XXX_FAMILY_MODELS)) == OM_MATCH_5XXX_FAMILY_MODELS) \ 291 + && ((chip_model & OCTEON_PRID_MASK) >= OCTEON_CN58XX_PASS1_0) \ 292 + && ((chip_model & OCTEON_PRID_MASK) < OCTEON_CN63XX_PASS1_0)) || \ 293 ((((arg_model) & (OM_MATCH_6XXX_FAMILY_MODELS)) == OM_MATCH_6XXX_FAMILY_MODELS) \ 294 + && ((chip_model & OCTEON_PRID_MASK) >= OCTEON_CN63XX_PASS1_0) \ 295 + && ((chip_model & OCTEON_PRID_MASK) < OCTEON_CNF71XX_PASS1_0)) || \ 296 + ((((arg_model) & (OM_MATCH_F7XXX_FAMILY_MODELS)) == OM_MATCH_F7XXX_FAMILY_MODELS) \ 297 + && ((chip_model & OCTEON_PRID_MASK) >= OCTEON_CNF71XX_PASS1_0) \ 298 + && ((chip_model & OCTEON_PRID_MASK) < OCTEON_CN78XX_PASS1_0)) || \ 299 + ((((arg_model) & (OM_MATCH_7XXX_FAMILY_MODELS)) == OM_MATCH_7XXX_FAMILY_MODELS) \ 300 + && ((chip_model & OCTEON_PRID_MASK) >= OCTEON_CN78XX_PASS1_0)) || \ 301 ((((arg_model) & (OM_MATCH_PREVIOUS_MODELS)) == OM_MATCH_PREVIOUS_MODELS) \ 302 && (((chip_model) & OCTEON_58XX_MODEL_MASK) < ((arg_model) & OCTEON_58XX_MODEL_MASK))) \ 303 ))) ··· 300 { 301 uint32_t cpuid = cvmx_get_proc_id(); 302 303 return __OCTEON_IS_MODEL_COMPILE__(model, cpuid); 304 } 305 ··· 326 #define OCTEON_IS_COMMON_BINARY() 1 327 #undef OCTEON_MODEL 328 329 + #define OCTEON_IS_OCTEON1() OCTEON_IS_MODEL(OCTEON_CN3XXX) 330 + #define OCTEON_IS_OCTEONPLUS() OCTEON_IS_MODEL(OCTEON_CN5XXX) 331 + #define OCTEON_IS_OCTEON2() \ 332 + (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)) 333 + 334 + #define OCTEON_IS_OCTEON3() OCTEON_IS_MODEL(OCTEON_CN7XXX) 335 + 336 + #define OCTEON_IS_OCTEON1PLUS() (OCTEON_IS_OCTEON1() || OCTEON_IS_OCTEONPLUS()) 337 + 338 const char *__init octeon_model_get_string(uint32_t chip_id); 339 340 /* 341 * Return the octeon family, i.e., ProcessorID of the PrID register. 342 + * 343 + * @return the octeon family on success, ((unint32_t)-1) on error. 344 */ 345 static inline uint32_t cvmx_get_octeon_family(void) 346 {
+118 -30
arch/mips/include/asm/octeon/octeon.h
··· 9 #define __ASM_OCTEON_OCTEON_H 10 11 #include <asm/octeon/cvmx.h> 12 13 extern uint64_t octeon_bootmem_alloc_range_phys(uint64_t size, 14 uint64_t alignment, ··· 54 #define OCTOEN_SERIAL_LEN 20 55 56 struct octeon_boot_descriptor { 57 /* Start of block referenced by assembly code - do not change! */ 58 uint32_t desc_version; 59 uint32_t desc_size; ··· 106 uint8_t mac_addr_base[6]; 107 uint8_t mac_addr_count; 108 uint64_t cvmx_desc_vaddr; 109 }; 110 111 union octeon_cvmemctl { 112 uint64_t u64; 113 struct { 114 /* RO 1 = BIST fail, 0 = BIST pass */ 115 - uint64_t tlbbist:1; 116 /* RO 1 = BIST fail, 0 = BIST pass */ 117 - uint64_t l1cbist:1; 118 /* RO 1 = BIST fail, 0 = BIST pass */ 119 - uint64_t l1dbist:1; 120 /* RO 1 = BIST fail, 0 = BIST pass */ 121 - uint64_t dcmbist:1; 122 /* RO 1 = BIST fail, 0 = BIST pass */ 123 - uint64_t ptgbist:1; 124 /* RO 1 = BIST fail, 0 = BIST pass */ 125 - uint64_t wbfbist:1; 126 /* Reserved */ 127 - uint64_t reserved:22; 128 /* R/W If set, marked write-buffer entries time out 129 * the same as as other entries; if clear, marked 130 * write-buffer entries use the maximum timeout. */ 131 - uint64_t dismarkwblongto:1; 132 /* R/W If set, a merged store does not clear the 133 * write-buffer entry timeout state. */ 134 - uint64_t dismrgclrwbto:1; 135 /* R/W Two bits that are the MSBs of the resultant 136 * CVMSEG LM word location for an IOBDMA. The other 8 137 * bits come from the SCRADDR field of the IOBDMA. */ 138 - uint64_t iobdmascrmsb:2; 139 /* R/W If set, SYNCWS and SYNCS only order marked 140 * stores; if clear, SYNCWS and SYNCS only order 141 * unmarked stores. SYNCWSMARKED has no effect when 142 * DISSYNCWS is set. */ 143 - uint64_t syncwsmarked:1; 144 /* R/W If set, SYNCWS acts as SYNCW and SYNCS acts as 145 * SYNC. */ 146 - uint64_t dissyncws:1; 147 /* R/W If set, no stall happens on write buffer 148 * full. */ 149 - uint64_t diswbfst:1; 150 /* R/W If set (and SX set), supervisor-level 151 * loads/stores can use XKPHYS addresses with 152 * VA<48>==0 */ 153 - uint64_t xkmemenas:1; 154 /* R/W If set (and UX set), user-level loads/stores 155 * can use XKPHYS addresses with VA<48>==0 */ 156 - uint64_t xkmemenau:1; 157 /* R/W If set (and SX set), supervisor-level 158 * loads/stores can use XKPHYS addresses with 159 * VA<48>==1 */ 160 - uint64_t xkioenas:1; 161 /* R/W If set (and UX set), user-level loads/stores 162 * can use XKPHYS addresses with VA<48>==1 */ 163 - uint64_t xkioenau:1; 164 /* R/W If set, all stores act as SYNCW (NOMERGE must 165 * be set when this is set) RW, reset to 0. */ 166 - uint64_t allsyncw:1; 167 /* R/W If set, no stores merge, and all stores reach 168 * the coherent bus in order. */ 169 - uint64_t nomerge:1; 170 /* R/W Selects the bit in the counter used for DID 171 * time-outs 0 = 231, 1 = 230, 2 = 229, 3 = 172 * 214. Actual time-out is between 1x and 2x this 173 * interval. For example, with DIDTTO=3, expiration 174 * interval is between 16K and 32K. */ 175 - uint64_t didtto:2; 176 /* R/W If set, the (mem) CSR clock never turns off. */ 177 - uint64_t csrckalwys:1; 178 /* R/W If set, mclk never turns off. */ 179 - uint64_t mclkalwys:1; 180 /* R/W Selects the bit in the counter used for write 181 * buffer flush time-outs (WBFLT+11) is the bit 182 * position in an internal counter used to determine ··· 256 * 2x this interval. For example, with WBFLT = 0, a 257 * write buffer expires between 2K and 4K cycles after 258 * the write buffer entry is allocated. */ 259 - uint64_t wbfltime:3; 260 /* R/W If set, do not put Istream in the L2 cache. */ 261 - uint64_t istrnol2:1; 262 /* R/W The write buffer threshold. */ 263 - uint64_t wbthresh:4; 264 /* Reserved */ 265 - uint64_t reserved2:2; 266 /* R/W If set, CVMSEG is available for loads/stores in 267 * kernel/debug mode. */ 268 - uint64_t cvmsegenak:1; 269 /* R/W If set, CVMSEG is available for loads/stores in 270 * supervisor mode. */ 271 - uint64_t cvmsegenas:1; 272 /* R/W If set, CVMSEG is available for loads/stores in 273 * user mode. */ 274 - uint64_t cvmsegenau:1; 275 /* R/W Size of local memory in cache blocks, 54 (6912 276 * bytes) is max legal value. */ 277 - uint64_t lmemsz:6; 278 } s; 279 }; 280 ··· 299 cvmx_read64_uint32(address ^ 4); 300 } 301 302 303 /** 304 * Read a 32bit value from the Octeon NPI register space
··· 9 #define __ASM_OCTEON_OCTEON_H 10 11 #include <asm/octeon/cvmx.h> 12 + #include <asm/bitfield.h> 13 14 extern uint64_t octeon_bootmem_alloc_range_phys(uint64_t size, 15 uint64_t alignment, ··· 53 #define OCTOEN_SERIAL_LEN 20 54 55 struct octeon_boot_descriptor { 56 + #ifdef __BIG_ENDIAN_BITFIELD 57 /* Start of block referenced by assembly code - do not change! */ 58 uint32_t desc_version; 59 uint32_t desc_size; ··· 104 uint8_t mac_addr_base[6]; 105 uint8_t mac_addr_count; 106 uint64_t cvmx_desc_vaddr; 107 + #else 108 + uint32_t desc_size; 109 + uint32_t desc_version; 110 + uint64_t stack_top; 111 + uint64_t heap_base; 112 + uint64_t heap_end; 113 + /* Only used by bootloader */ 114 + uint64_t entry_point; 115 + uint64_t desc_vaddr; 116 + /* End of This block referenced by assembly code - do not change! */ 117 + uint32_t stack_size; 118 + uint32_t exception_base_addr; 119 + uint32_t argc; 120 + uint32_t heap_size; 121 + /* 122 + * Argc count for application. 123 + * Warning low bit scrambled in little-endian. 124 + */ 125 + uint32_t argv[OCTEON_ARGV_MAX_ARGS]; 126 + 127 + #define BOOT_FLAG_INIT_CORE (1 << 0) 128 + #define OCTEON_BL_FLAG_DEBUG (1 << 1) 129 + #define OCTEON_BL_FLAG_NO_MAGIC (1 << 2) 130 + /* If set, use uart1 for console */ 131 + #define OCTEON_BL_FLAG_CONSOLE_UART1 (1 << 3) 132 + /* If set, use PCI console */ 133 + #define OCTEON_BL_FLAG_CONSOLE_PCI (1 << 4) 134 + /* Call exit on break on serial port */ 135 + #define OCTEON_BL_FLAG_BREAK (1 << 5) 136 + 137 + uint32_t core_mask; 138 + uint32_t flags; 139 + /* physical address of free memory descriptor block. */ 140 + uint32_t phy_mem_desc_addr; 141 + /* DRAM size in megabyes. */ 142 + uint32_t dram_size; 143 + /* CPU clock speed, in hz. */ 144 + uint32_t eclock_hz; 145 + /* used to pass flags from app to debugger. */ 146 + uint32_t debugger_flags_base_addr; 147 + /* SPI4 clock in hz. */ 148 + uint32_t spi_clock_hz; 149 + /* DRAM clock speed, in hz. */ 150 + uint32_t dclock_hz; 151 + uint8_t chip_rev_minor; 152 + uint8_t chip_rev_major; 153 + uint16_t chip_type; 154 + uint8_t board_rev_minor; 155 + uint8_t board_rev_major; 156 + uint16_t board_type; 157 + 158 + uint64_t unused1[4]; /* Not even filled in by bootloader. */ 159 + 160 + uint64_t cvmx_desc_vaddr; 161 + #endif 162 }; 163 164 union octeon_cvmemctl { 165 uint64_t u64; 166 struct { 167 /* RO 1 = BIST fail, 0 = BIST pass */ 168 + __BITFIELD_FIELD(uint64_t tlbbist:1, 169 /* RO 1 = BIST fail, 0 = BIST pass */ 170 + __BITFIELD_FIELD(uint64_t l1cbist:1, 171 /* RO 1 = BIST fail, 0 = BIST pass */ 172 + __BITFIELD_FIELD(uint64_t l1dbist:1, 173 /* RO 1 = BIST fail, 0 = BIST pass */ 174 + __BITFIELD_FIELD(uint64_t dcmbist:1, 175 /* RO 1 = BIST fail, 0 = BIST pass */ 176 + __BITFIELD_FIELD(uint64_t ptgbist:1, 177 /* RO 1 = BIST fail, 0 = BIST pass */ 178 + __BITFIELD_FIELD(uint64_t wbfbist:1, 179 /* Reserved */ 180 + __BITFIELD_FIELD(uint64_t reserved:17, 181 + /* OCTEON II - TLB replacement policy: 0 = bitmask LRU; 1 = NLU. 182 + * This field selects between the TLB replacement policies: 183 + * bitmask LRU or NLU. Bitmask LRU maintains a mask of 184 + * recently used TLB entries and avoids them as new entries 185 + * are allocated. NLU simply guarantees that the next 186 + * allocation is not the last used TLB entry. */ 187 + __BITFIELD_FIELD(uint64_t tlbnlu:1, 188 + /* OCTEON II - Selects the bit in the counter used for 189 + * releasing a PAUSE. This counter trips every 2(8+PAUSETIME) 190 + * cycles. If not already released, the cnMIPS II core will 191 + * always release a given PAUSE instruction within 192 + * 2(8+PAUSETIME). If the counter trip happens to line up, 193 + * the cnMIPS II core may release the PAUSE instantly. */ 194 + __BITFIELD_FIELD(uint64_t pausetime:3, 195 + /* OCTEON II - This field is an extension of 196 + * CvmMemCtl[DIDTTO] */ 197 + __BITFIELD_FIELD(uint64_t didtto2:1, 198 /* R/W If set, marked write-buffer entries time out 199 * the same as as other entries; if clear, marked 200 * write-buffer entries use the maximum timeout. */ 201 + __BITFIELD_FIELD(uint64_t dismarkwblongto:1, 202 /* R/W If set, a merged store does not clear the 203 * write-buffer entry timeout state. */ 204 + __BITFIELD_FIELD(uint64_t dismrgclrwbto:1, 205 /* R/W Two bits that are the MSBs of the resultant 206 * CVMSEG LM word location for an IOBDMA. The other 8 207 * bits come from the SCRADDR field of the IOBDMA. */ 208 + __BITFIELD_FIELD(uint64_t iobdmascrmsb:2, 209 /* R/W If set, SYNCWS and SYNCS only order marked 210 * stores; if clear, SYNCWS and SYNCS only order 211 * unmarked stores. SYNCWSMARKED has no effect when 212 * DISSYNCWS is set. */ 213 + __BITFIELD_FIELD(uint64_t syncwsmarked:1, 214 /* R/W If set, SYNCWS acts as SYNCW and SYNCS acts as 215 * SYNC. */ 216 + __BITFIELD_FIELD(uint64_t dissyncws:1, 217 /* R/W If set, no stall happens on write buffer 218 * full. */ 219 + __BITFIELD_FIELD(uint64_t diswbfst:1, 220 /* R/W If set (and SX set), supervisor-level 221 * loads/stores can use XKPHYS addresses with 222 * VA<48>==0 */ 223 + __BITFIELD_FIELD(uint64_t xkmemenas:1, 224 /* R/W If set (and UX set), user-level loads/stores 225 * can use XKPHYS addresses with VA<48>==0 */ 226 + __BITFIELD_FIELD(uint64_t xkmemenau:1, 227 /* R/W If set (and SX set), supervisor-level 228 * loads/stores can use XKPHYS addresses with 229 * VA<48>==1 */ 230 + __BITFIELD_FIELD(uint64_t xkioenas:1, 231 /* R/W If set (and UX set), user-level loads/stores 232 * can use XKPHYS addresses with VA<48>==1 */ 233 + __BITFIELD_FIELD(uint64_t xkioenau:1, 234 /* R/W If set, all stores act as SYNCW (NOMERGE must 235 * be set when this is set) RW, reset to 0. */ 236 + __BITFIELD_FIELD(uint64_t allsyncw:1, 237 /* R/W If set, no stores merge, and all stores reach 238 * the coherent bus in order. */ 239 + __BITFIELD_FIELD(uint64_t nomerge:1, 240 /* R/W Selects the bit in the counter used for DID 241 * time-outs 0 = 231, 1 = 230, 2 = 229, 3 = 242 * 214. Actual time-out is between 1x and 2x this 243 * interval. For example, with DIDTTO=3, expiration 244 * interval is between 16K and 32K. */ 245 + __BITFIELD_FIELD(uint64_t didtto:2, 246 /* R/W If set, the (mem) CSR clock never turns off. */ 247 + __BITFIELD_FIELD(uint64_t csrckalwys:1, 248 /* R/W If set, mclk never turns off. */ 249 + __BITFIELD_FIELD(uint64_t mclkalwys:1, 250 /* R/W Selects the bit in the counter used for write 251 * buffer flush time-outs (WBFLT+11) is the bit 252 * position in an internal counter used to determine ··· 182 * 2x this interval. For example, with WBFLT = 0, a 183 * write buffer expires between 2K and 4K cycles after 184 * the write buffer entry is allocated. */ 185 + __BITFIELD_FIELD(uint64_t wbfltime:3, 186 /* R/W If set, do not put Istream in the L2 cache. */ 187 + __BITFIELD_FIELD(uint64_t istrnol2:1, 188 /* R/W The write buffer threshold. */ 189 + __BITFIELD_FIELD(uint64_t wbthresh:4, 190 /* Reserved */ 191 + __BITFIELD_FIELD(uint64_t reserved2:2, 192 /* R/W If set, CVMSEG is available for loads/stores in 193 * kernel/debug mode. */ 194 + __BITFIELD_FIELD(uint64_t cvmsegenak:1, 195 /* R/W If set, CVMSEG is available for loads/stores in 196 * supervisor mode. */ 197 + __BITFIELD_FIELD(uint64_t cvmsegenas:1, 198 /* R/W If set, CVMSEG is available for loads/stores in 199 * user mode. */ 200 + __BITFIELD_FIELD(uint64_t cvmsegenau:1, 201 /* R/W Size of local memory in cache blocks, 54 (6912 202 * bytes) is max legal value. */ 203 + __BITFIELD_FIELD(uint64_t lmemsz:6, 204 + ;))))))))))))))))))))))))))))))))) 205 } s; 206 }; 207 ··· 224 cvmx_read64_uint32(address ^ 4); 225 } 226 227 + /* Octeon multiplier save/restore routines from octeon_switch.S */ 228 + void octeon_mult_save(void); 229 + void octeon_mult_restore(void); 230 + void octeon_mult_save_end(void); 231 + void octeon_mult_restore_end(void); 232 + void octeon_mult_save3(void); 233 + void octeon_mult_save3_end(void); 234 + void octeon_mult_save2(void); 235 + void octeon_mult_save2_end(void); 236 + void octeon_mult_restore3(void); 237 + void octeon_mult_restore3_end(void); 238 + void octeon_mult_restore2(void); 239 + void octeon_mult_restore2_end(void); 240 241 /** 242 * Read a 32bit value from the Octeon NPI register space
+2
arch/mips/include/asm/pci.h
··· 121 } 122 #endif 123 124 #define pci_domain_nr(bus) ((struct pci_controller *)(bus)->sysdata)->index 125 126 static inline int pci_proc_domain(struct pci_bus *bus) ··· 129 struct pci_controller *hose = bus->sysdata; 130 return hose->need_domain_info; 131 } 132 133 #endif /* __KERNEL__ */ 134
··· 121 } 122 #endif 123 124 + #ifdef CONFIG_PCI_DOMAINS 125 #define pci_domain_nr(bus) ((struct pci_controller *)(bus)->sysdata)->index 126 127 static inline int pci_proc_domain(struct pci_bus *bus) ··· 128 struct pci_controller *hose = bus->sysdata; 129 return hose->need_domain_info; 130 } 131 + #endif /* CONFIG_PCI_DOMAINS */ 132 133 #endif /* __KERNEL__ */ 134
+32 -51
arch/mips/include/asm/pgtable-bits.h
··· 35 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) 36 37 /* 38 - * The following bits are directly used by the TLB hardware 39 */ 40 #define _PAGE_GLOBAL_SHIFT 0 41 #define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT) ··· 60 #define _PAGE_MODIFIED_SHIFT (_PAGE_ACCESSED_SHIFT + 1) 61 #define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT) 62 63 - #define _PAGE_SILENT_READ _PAGE_VALID 64 - #define _PAGE_SILENT_WRITE _PAGE_DIRTY 65 - 66 #define _PFN_SHIFT (PAGE_SHIFT - 12 + _CACHE_SHIFT + 3) 67 68 #elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 69 70 /* 71 - * The following are implemented by software 72 */ 73 - #define _PAGE_PRESENT_SHIFT 0 74 - #define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT) 75 - #define _PAGE_READ_SHIFT 1 76 - #define _PAGE_READ (1 << _PAGE_READ_SHIFT) 77 - #define _PAGE_WRITE_SHIFT 2 78 - #define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT) 79 - #define _PAGE_ACCESSED_SHIFT 3 80 - #define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT) 81 - #define _PAGE_MODIFIED_SHIFT 4 82 - #define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT) 83 84 /* 85 - * And these are the hardware TLB bits 86 */ 87 - #define _PAGE_GLOBAL_SHIFT 8 88 - #define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT) 89 - #define _PAGE_VALID_SHIFT 9 90 - #define _PAGE_VALID (1 << _PAGE_VALID_SHIFT) 91 - #define _PAGE_SILENT_READ (1 << _PAGE_VALID_SHIFT) /* synonym */ 92 - #define _PAGE_DIRTY_SHIFT 10 93 #define _PAGE_DIRTY (1 << _PAGE_DIRTY_SHIFT) 94 - #define _PAGE_SILENT_WRITE (1 << _PAGE_DIRTY_SHIFT) 95 - #define _CACHE_UNCACHED_SHIFT 11 96 #define _CACHE_UNCACHED (1 << _CACHE_UNCACHED_SHIFT) 97 - #define _CACHE_MASK (1 << _CACHE_UNCACHED_SHIFT) 98 99 - #else /* 'Normal' r4K case */ 100 /* 101 * When using the RI/XI bit support, we have 13 bits of flags below 102 * the physical address. The RI/XI bits are placed such that a SRL 5 ··· 104 105 /* 106 * The following bits are implemented in software 107 - * 108 - * _PAGE_READ / _PAGE_READ_SHIFT should be unused if cpu_has_rixi. 109 */ 110 - #define _PAGE_PRESENT_SHIFT (0) 111 #define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT) 112 #define _PAGE_READ_SHIFT (cpu_has_rixi ? _PAGE_PRESENT_SHIFT : _PAGE_PRESENT_SHIFT + 1) 113 #define _PAGE_READ ({BUG_ON(cpu_has_rixi); 1 << _PAGE_READ_SHIFT; }) ··· 120 /* huge tlb page */ 121 #define _PAGE_HUGE_SHIFT (_PAGE_MODIFIED_SHIFT + 1) 122 #define _PAGE_HUGE (1 << _PAGE_HUGE_SHIFT) 123 - #else 124 - #define _PAGE_HUGE_SHIFT (_PAGE_MODIFIED_SHIFT) 125 - #define _PAGE_HUGE ({BUG(); 1; }) /* Dummy value */ 126 - #endif 127 - 128 - #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 129 - /* huge tlb page */ 130 #define _PAGE_SPLITTING_SHIFT (_PAGE_HUGE_SHIFT + 1) 131 #define _PAGE_SPLITTING (1 << _PAGE_SPLITTING_SHIFT) 132 #else 133 #define _PAGE_SPLITTING_SHIFT (_PAGE_HUGE_SHIFT) 134 #define _PAGE_SPLITTING ({BUG(); 1; }) /* Dummy value */ 135 #endif ··· 139 140 #define _PAGE_GLOBAL_SHIFT (_PAGE_NO_READ_SHIFT + 1) 141 #define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT) 142 - 143 #define _PAGE_VALID_SHIFT (_PAGE_GLOBAL_SHIFT + 1) 144 #define _PAGE_VALID (1 << _PAGE_VALID_SHIFT) 145 - /* synonym */ 146 - #define _PAGE_SILENT_READ (_PAGE_VALID) 147 - 148 - /* The MIPS dirty bit */ 149 #define _PAGE_DIRTY_SHIFT (_PAGE_VALID_SHIFT + 1) 150 #define _PAGE_DIRTY (1 << _PAGE_DIRTY_SHIFT) 151 - #define _PAGE_SILENT_WRITE (_PAGE_DIRTY) 152 - 153 #define _CACHE_SHIFT (_PAGE_DIRTY_SHIFT + 1) 154 #define _CACHE_MASK (7 << _CACHE_SHIFT) 155 ··· 150 151 #endif /* defined(CONFIG_PHYS_ADDR_T_64BIT && defined(CONFIG_CPU_MIPS32) */ 152 153 - #ifndef _PFN_SHIFT 154 - #define _PFN_SHIFT PAGE_SHIFT 155 - #endif 156 #define _PFN_MASK (~((1 << (_PFN_SHIFT)) - 1)) 157 158 #ifndef _PAGE_NO_READ ··· 161 #endif 162 #ifndef _PAGE_NO_EXEC 163 #define _PAGE_NO_EXEC ({BUG(); 0; }) 164 - #endif 165 - #ifndef _PAGE_GLOBAL_SHIFT 166 - #define _PAGE_GLOBAL_SHIFT ilog2(_PAGE_GLOBAL) 167 #endif 168 169 ··· 246 #endif 247 248 #define __READABLE (_PAGE_SILENT_READ | _PAGE_ACCESSED | (cpu_has_rixi ? 0 : _PAGE_READ)) 249 - #define __WRITEABLE (_PAGE_WRITE | _PAGE_SILENT_WRITE | _PAGE_MODIFIED) 250 251 - #define _PAGE_CHG_MASK (_PFN_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED | _CACHE_MASK) 252 253 #endif /* _ASM_PGTABLE_BITS_H */
··· 35 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) 36 37 /* 38 + * The following bits are implemented by the TLB hardware 39 */ 40 #define _PAGE_GLOBAL_SHIFT 0 41 #define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT) ··· 60 #define _PAGE_MODIFIED_SHIFT (_PAGE_ACCESSED_SHIFT + 1) 61 #define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT) 62 63 #define _PFN_SHIFT (PAGE_SHIFT - 12 + _CACHE_SHIFT + 3) 64 65 #elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 66 67 /* 68 + * The following bits are implemented in software 69 */ 70 + #define _PAGE_PRESENT_SHIFT (0) 71 + #define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT) 72 + #define _PAGE_READ_SHIFT (_PAGE_PRESENT_SHIFT + 1) 73 + #define _PAGE_READ (1 << _PAGE_READ_SHIFT) 74 + #define _PAGE_WRITE_SHIFT (_PAGE_READ_SHIFT + 1) 75 + #define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT) 76 + #define _PAGE_ACCESSED_SHIFT (_PAGE_WRITE_SHIFT + 1) 77 + #define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT) 78 + #define _PAGE_MODIFIED_SHIFT (_PAGE_ACCESSED_SHIFT + 1) 79 + #define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT) 80 81 /* 82 + * The following bits are implemented by the TLB hardware 83 */ 84 + #define _PAGE_GLOBAL_SHIFT (_PAGE_MODIFIED_SHIFT + 4) 85 + #define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT) 86 + #define _PAGE_VALID_SHIFT (_PAGE_GLOBAL_SHIFT + 1) 87 + #define _PAGE_VALID (1 << _PAGE_VALID_SHIFT) 88 + #define _PAGE_DIRTY_SHIFT (_PAGE_VALID_SHIFT + 1) 89 #define _PAGE_DIRTY (1 << _PAGE_DIRTY_SHIFT) 90 + #define _CACHE_UNCACHED_SHIFT (_PAGE_DIRTY_SHIFT + 1) 91 #define _CACHE_UNCACHED (1 << _CACHE_UNCACHED_SHIFT) 92 + #define _CACHE_MASK _CACHE_UNCACHED 93 94 + #define _PFN_SHIFT PAGE_SHIFT 95 + 96 + #else 97 /* 98 * When using the RI/XI bit support, we have 13 bits of flags below 99 * the physical address. The RI/XI bits are placed such that a SRL 5 ··· 107 108 /* 109 * The following bits are implemented in software 110 */ 111 + #define _PAGE_PRESENT_SHIFT 0 112 #define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT) 113 #define _PAGE_READ_SHIFT (cpu_has_rixi ? _PAGE_PRESENT_SHIFT : _PAGE_PRESENT_SHIFT + 1) 114 #define _PAGE_READ ({BUG_ON(cpu_has_rixi); 1 << _PAGE_READ_SHIFT; }) ··· 125 /* huge tlb page */ 126 #define _PAGE_HUGE_SHIFT (_PAGE_MODIFIED_SHIFT + 1) 127 #define _PAGE_HUGE (1 << _PAGE_HUGE_SHIFT) 128 #define _PAGE_SPLITTING_SHIFT (_PAGE_HUGE_SHIFT + 1) 129 #define _PAGE_SPLITTING (1 << _PAGE_SPLITTING_SHIFT) 130 #else 131 + #define _PAGE_HUGE_SHIFT (_PAGE_MODIFIED_SHIFT) 132 + #define _PAGE_HUGE ({BUG(); 1; }) /* Dummy value */ 133 #define _PAGE_SPLITTING_SHIFT (_PAGE_HUGE_SHIFT) 134 #define _PAGE_SPLITTING ({BUG(); 1; }) /* Dummy value */ 135 #endif ··· 149 150 #define _PAGE_GLOBAL_SHIFT (_PAGE_NO_READ_SHIFT + 1) 151 #define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT) 152 #define _PAGE_VALID_SHIFT (_PAGE_GLOBAL_SHIFT + 1) 153 #define _PAGE_VALID (1 << _PAGE_VALID_SHIFT) 154 #define _PAGE_DIRTY_SHIFT (_PAGE_VALID_SHIFT + 1) 155 #define _PAGE_DIRTY (1 << _PAGE_DIRTY_SHIFT) 156 #define _CACHE_SHIFT (_PAGE_DIRTY_SHIFT + 1) 157 #define _CACHE_MASK (7 << _CACHE_SHIFT) 158 ··· 167 168 #endif /* defined(CONFIG_PHYS_ADDR_T_64BIT && defined(CONFIG_CPU_MIPS32) */ 169 170 + #define _PAGE_SILENT_READ _PAGE_VALID 171 + #define _PAGE_SILENT_WRITE _PAGE_DIRTY 172 + 173 #define _PFN_MASK (~((1 << (_PFN_SHIFT)) - 1)) 174 175 #ifndef _PAGE_NO_READ ··· 178 #endif 179 #ifndef _PAGE_NO_EXEC 180 #define _PAGE_NO_EXEC ({BUG(); 0; }) 181 #endif 182 183 ··· 266 #endif 267 268 #define __READABLE (_PAGE_SILENT_READ | _PAGE_ACCESSED | (cpu_has_rixi ? 0 : _PAGE_READ)) 269 + #define __WRITEABLE (_PAGE_SILENT_WRITE | _PAGE_WRITE | _PAGE_MODIFIED) 270 271 + #define _PAGE_CHG_MASK (_PAGE_ACCESSED | _PAGE_MODIFIED | \ 272 + _PFN_MASK | _CACHE_MASK) 273 274 #endif /* _ASM_PGTABLE_BITS_H */
+27 -19
arch/mips/include/asm/pgtable.h
··· 99 100 #define htw_stop() \ 101 do { \ 102 - if (cpu_has_htw) \ 103 - write_c0_pwctl(read_c0_pwctl() & \ 104 - ~(1 << MIPS_PWCTL_PWEN_SHIFT)); \ 105 } while(0) 106 107 #define htw_start() \ 108 do { \ 109 - if (cpu_has_htw) \ 110 - write_c0_pwctl(read_c0_pwctl() | \ 111 - (1 << MIPS_PWCTL_PWEN_SHIFT)); \ 112 - } while(0) 113 - 114 - 115 - #define htw_reset() \ 116 - do { \ 117 if (cpu_has_htw) { \ 118 - htw_stop(); \ 119 - back_to_back_c0_hazard(); \ 120 - htw_start(); \ 121 - back_to_back_c0_hazard(); \ 122 } \ 123 } while(0) 124 125 extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, 126 pte_t pteval); ··· 159 { 160 pte_t null = __pte(0); 161 162 /* Preserve global status for the pair */ 163 if (ptep_buddy(ptep)->pte_low & _PAGE_GLOBAL) 164 null.pte_low = null.pte_high = _PAGE_GLOBAL; 165 166 set_pte_at(mm, addr, ptep, null); 167 - htw_reset(); 168 } 169 #else 170 ··· 195 196 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 197 { 198 #if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX) 199 /* Preserve global status for the pair */ 200 if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL) ··· 203 else 204 #endif 205 set_pte_at(mm, addr, ptep, __pte(0)); 206 - htw_reset(); 207 } 208 #endif 209 ··· 342 return pte; 343 } 344 345 - #ifdef _PAGE_HUGE 346 static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_HUGE; } 347 348 static inline pte_t pte_mkhuge(pte_t pte) ··· 350 pte_val(pte) |= _PAGE_HUGE; 351 return pte; 352 } 353 - #endif /* _PAGE_HUGE */ 354 #endif 355 static inline int pte_special(pte_t pte) { return 0; } 356 static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
··· 99 100 #define htw_stop() \ 101 do { \ 102 + unsigned long flags; \ 103 + \ 104 + if (cpu_has_htw) { \ 105 + local_irq_save(flags); \ 106 + if(!raw_current_cpu_data.htw_seq++) { \ 107 + write_c0_pwctl(read_c0_pwctl() & \ 108 + ~(1 << MIPS_PWCTL_PWEN_SHIFT)); \ 109 + back_to_back_c0_hazard(); \ 110 + } \ 111 + local_irq_restore(flags); \ 112 + } \ 113 } while(0) 114 115 #define htw_start() \ 116 do { \ 117 + unsigned long flags; \ 118 + \ 119 if (cpu_has_htw) { \ 120 + local_irq_save(flags); \ 121 + if (!--raw_current_cpu_data.htw_seq) { \ 122 + write_c0_pwctl(read_c0_pwctl() | \ 123 + (1 << MIPS_PWCTL_PWEN_SHIFT)); \ 124 + back_to_back_c0_hazard(); \ 125 + } \ 126 + local_irq_restore(flags); \ 127 } \ 128 } while(0) 129 + 130 131 extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, 132 pte_t pteval); ··· 153 { 154 pte_t null = __pte(0); 155 156 + htw_stop(); 157 /* Preserve global status for the pair */ 158 if (ptep_buddy(ptep)->pte_low & _PAGE_GLOBAL) 159 null.pte_low = null.pte_high = _PAGE_GLOBAL; 160 161 set_pte_at(mm, addr, ptep, null); 162 + htw_start(); 163 } 164 #else 165 ··· 188 189 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 190 { 191 + htw_stop(); 192 #if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX) 193 /* Preserve global status for the pair */ 194 if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL) ··· 195 else 196 #endif 197 set_pte_at(mm, addr, ptep, __pte(0)); 198 + htw_start(); 199 } 200 #endif 201 ··· 334 return pte; 335 } 336 337 + #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 338 static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_HUGE; } 339 340 static inline pte_t pte_mkhuge(pte_t pte) ··· 342 pte_val(pte) |= _PAGE_HUGE; 343 return pte; 344 } 345 + #endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */ 346 #endif 347 static inline int pte_special(pte_t pte) { return 0; } 348 static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
+13 -6
arch/mips/include/asm/processor.h
··· 54 #define TASK_SIZE 0x7fff8000UL 55 #endif 56 57 - #ifdef __KERNEL__ 58 #define STACK_TOP_MAX TASK_SIZE 59 - #endif 60 61 #define TASK_IS_32BIT_ADDR 1 62 ··· 71 #define TASK_SIZE32 0x7fff8000UL 72 #define TASK_SIZE64 0x10000000000UL 73 #define TASK_SIZE (test_thread_flag(TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64) 74 - 75 - #ifdef __KERNEL__ 76 #define STACK_TOP_MAX TASK_SIZE64 77 - #endif 78 - 79 80 #define TASK_SIZE_OF(tsk) \ 81 (test_tsk_thread_flag(tsk, TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64) ··· 205 unsigned long cop2_gfm_poly; 206 /* DMFC2 rt, 0x025A; DMFC2 rt, 0x025B - Pass2 */ 207 unsigned long cop2_gfm_result[2]; 208 }; 209 #define COP2_INIT \ 210 .cp2 = {0,}, ··· 394 #define prefetchw(x) __builtin_prefetch((x), 1, 1) 395 396 #endif 397 398 #endif /* _ASM_PROCESSOR_H */
··· 54 #define TASK_SIZE 0x7fff8000UL 55 #endif 56 57 #define STACK_TOP_MAX TASK_SIZE 58 59 #define TASK_IS_32BIT_ADDR 1 60 ··· 73 #define TASK_SIZE32 0x7fff8000UL 74 #define TASK_SIZE64 0x10000000000UL 75 #define TASK_SIZE (test_thread_flag(TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64) 76 #define STACK_TOP_MAX TASK_SIZE64 77 78 #define TASK_SIZE_OF(tsk) \ 79 (test_tsk_thread_flag(tsk, TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64) ··· 211 unsigned long cop2_gfm_poly; 212 /* DMFC2 rt, 0x025A; DMFC2 rt, 0x025B - Pass2 */ 213 unsigned long cop2_gfm_result[2]; 214 + /* DMFC2 rt, 0x24F, DMFC2 rt, 0x50, OCTEON III */ 215 + unsigned long cop2_sha3[2]; 216 }; 217 #define COP2_INIT \ 218 .cp2 = {0,}, ··· 398 #define prefetchw(x) __builtin_prefetch((x), 1, 1) 399 400 #endif 401 + 402 + /* 403 + * Functions & macros implementing the PR_GET_FP_MODE & PR_SET_FP_MODE options 404 + * to the prctl syscall. 405 + */ 406 + extern int mips_get_process_fp_mode(struct task_struct *task); 407 + extern int mips_set_process_fp_mode(struct task_struct *task, 408 + unsigned int value); 409 + 410 + #define GET_FP_MODE(task) mips_get_process_fp_mode(task) 411 + #define SET_FP_MODE(task,value) mips_set_process_fp_mode(task, value) 412 413 #endif /* _ASM_PROCESSOR_H */
-7
arch/mips/include/asm/prom.h
··· 24 extern void __dt_setup_arch(void *bph); 25 extern int __dt_register_buses(const char *bus0, const char *bus1); 26 27 - #define dt_setup_arch(sym) \ 28 - ({ \ 29 - extern char __dtb_##sym##_begin[]; \ 30 - \ 31 - __dt_setup_arch(__dtb_##sym##_begin); \ 32 - }) 33 - 34 #else /* CONFIG_OF */ 35 static inline void device_tree_init(void) { } 36 #endif /* CONFIG_OF */
··· 24 extern void __dt_setup_arch(void *bph); 25 extern int __dt_register_buses(const char *bus0, const char *bus1); 26 27 #else /* CONFIG_OF */ 28 static inline void device_tree_init(void) { } 29 #endif /* CONFIG_OF */
+2 -2
arch/mips/include/asm/ptrace.h
··· 40 unsigned long cp0_cause; 41 unsigned long cp0_epc; 42 #ifdef CONFIG_CPU_CAVIUM_OCTEON 43 - unsigned long long mpl[3]; /* MTM{0,1,2} */ 44 - unsigned long long mtp[3]; /* MTP{0,1,2} */ 45 #endif 46 } __aligned(8); 47
··· 40 unsigned long cp0_cause; 41 unsigned long cp0_epc; 42 #ifdef CONFIG_CPU_CAVIUM_OCTEON 43 + unsigned long long mpl[6]; /* MTM{0-5} */ 44 + unsigned long long mtp[6]; /* MTP{0-5} */ 45 #endif 46 } __aligned(8); 47
+148 -2
arch/mips/include/asm/r4kcache.h
··· 14 15 #include <asm/asm.h> 16 #include <asm/cacheops.h> 17 #include <asm/cpu-features.h> 18 #include <asm/cpu-type.h> 19 #include <asm/mipsmtregs.h> ··· 40 __asm__ __volatile__( \ 41 " .set push \n" \ 42 " .set noreorder \n" \ 43 - " .set arch=r4000 \n" \ 44 " cache %0, %1 \n" \ 45 " .set pop \n" \ 46 : \ ··· 148 __asm__ __volatile__( \ 149 " .set push \n" \ 150 " .set noreorder \n" \ 151 - " .set arch=r4000 \n" \ 152 "1: cache %0, (%1) \n" \ 153 "2: .set pop \n" \ 154 " .section __ex_table,\"a\" \n" \ ··· 219 cache_op(Page_Invalidate_T, addr); 220 } 221 222 #define cache16_unroll32(base,op) \ 223 __asm__ __volatile__( \ 224 " .set push \n" \ ··· 323 : \ 324 : "r" (base), \ 325 "i" (op)); 326 327 /* 328 * Perform the cache operation specified by op using a user mode virtual
··· 14 15 #include <asm/asm.h> 16 #include <asm/cacheops.h> 17 + #include <asm/compiler.h> 18 #include <asm/cpu-features.h> 19 #include <asm/cpu-type.h> 20 #include <asm/mipsmtregs.h> ··· 39 __asm__ __volatile__( \ 40 " .set push \n" \ 41 " .set noreorder \n" \ 42 + " .set "MIPS_ISA_ARCH_LEVEL" \n" \ 43 " cache %0, %1 \n" \ 44 " .set pop \n" \ 45 : \ ··· 147 __asm__ __volatile__( \ 148 " .set push \n" \ 149 " .set noreorder \n" \ 150 + " .set "MIPS_ISA_ARCH_LEVEL" \n" \ 151 "1: cache %0, (%1) \n" \ 152 "2: .set pop \n" \ 153 " .section __ex_table,\"a\" \n" \ ··· 218 cache_op(Page_Invalidate_T, addr); 219 } 220 221 + #ifndef CONFIG_CPU_MIPSR6 222 #define cache16_unroll32(base,op) \ 223 __asm__ __volatile__( \ 224 " .set push \n" \ ··· 321 : \ 322 : "r" (base), \ 323 "i" (op)); 324 + 325 + #else 326 + /* 327 + * MIPS R6 changed the cache opcode and moved to a 8-bit offset field. 328 + * This means we now need to increment the base register before we flush 329 + * more cache lines 330 + */ 331 + #define cache16_unroll32(base,op) \ 332 + __asm__ __volatile__( \ 333 + " .set push\n" \ 334 + " .set noreorder\n" \ 335 + " .set mips64r6\n" \ 336 + " .set noat\n" \ 337 + " cache %1, 0x000(%0); cache %1, 0x010(%0)\n" \ 338 + " cache %1, 0x020(%0); cache %1, 0x030(%0)\n" \ 339 + " cache %1, 0x040(%0); cache %1, 0x050(%0)\n" \ 340 + " cache %1, 0x060(%0); cache %1, 0x070(%0)\n" \ 341 + " cache %1, 0x080(%0); cache %1, 0x090(%0)\n" \ 342 + " cache %1, 0x0a0(%0); cache %1, 0x0b0(%0)\n" \ 343 + " cache %1, 0x0c0(%0); cache %1, 0x0d0(%0)\n" \ 344 + " cache %1, 0x0e0(%0); cache %1, 0x0f0(%0)\n" \ 345 + " addiu $1, $0, 0x100 \n" \ 346 + " cache %1, 0x000($1); cache %1, 0x010($1)\n" \ 347 + " cache %1, 0x020($1); cache %1, 0x030($1)\n" \ 348 + " cache %1, 0x040($1); cache %1, 0x050($1)\n" \ 349 + " cache %1, 0x060($1); cache %1, 0x070($1)\n" \ 350 + " cache %1, 0x080($1); cache %1, 0x090($1)\n" \ 351 + " cache %1, 0x0a0($1); cache %1, 0x0b0($1)\n" \ 352 + " cache %1, 0x0c0($1); cache %1, 0x0d0($1)\n" \ 353 + " cache %1, 0x0e0($1); cache %1, 0x0f0($1)\n" \ 354 + " .set pop\n" \ 355 + : \ 356 + : "r" (base), \ 357 + "i" (op)); 358 + 359 + #define cache32_unroll32(base,op) \ 360 + __asm__ __volatile__( \ 361 + " .set push\n" \ 362 + " .set noreorder\n" \ 363 + " .set mips64r6\n" \ 364 + " .set noat\n" \ 365 + " cache %1, 0x000(%0); cache %1, 0x020(%0)\n" \ 366 + " cache %1, 0x040(%0); cache %1, 0x060(%0)\n" \ 367 + " cache %1, 0x080(%0); cache %1, 0x0a0(%0)\n" \ 368 + " cache %1, 0x0c0(%0); cache %1, 0x0e0(%0)\n" \ 369 + " addiu $1, %0, 0x100\n" \ 370 + " cache %1, 0x000($1); cache %1, 0x020($1)\n" \ 371 + " cache %1, 0x040($1); cache %1, 0x060($1)\n" \ 372 + " cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \ 373 + " cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n" \ 374 + " addiu $1, $1, 0x100\n" \ 375 + " cache %1, 0x000($1); cache %1, 0x020($1)\n" \ 376 + " cache %1, 0x040($1); cache %1, 0x060($1)\n" \ 377 + " cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \ 378 + " cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n" \ 379 + " addiu $1, $1, 0x100\n" \ 380 + " cache %1, 0x000($1); cache %1, 0x020($1)\n" \ 381 + " cache %1, 0x040($1); cache %1, 0x060($1)\n" \ 382 + " cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \ 383 + " cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n" \ 384 + " .set pop\n" \ 385 + : \ 386 + : "r" (base), \ 387 + "i" (op)); 388 + 389 + #define cache64_unroll32(base,op) \ 390 + __asm__ __volatile__( \ 391 + " .set push\n" \ 392 + " .set noreorder\n" \ 393 + " .set mips64r6\n" \ 394 + " .set noat\n" \ 395 + " cache %1, 0x000(%0); cache %1, 0x040(%0)\n" \ 396 + " cache %1, 0x080(%0); cache %1, 0x0c0(%0)\n" \ 397 + " addiu $1, %0, 0x100\n" \ 398 + " cache %1, 0x000($1); cache %1, 0x040($1)\n" \ 399 + " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \ 400 + " addiu $1, %0, 0x100\n" \ 401 + " cache %1, 0x000($1); cache %1, 0x040($1)\n" \ 402 + " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \ 403 + " addiu $1, %0, 0x100\n" \ 404 + " cache %1, 0x000($1); cache %1, 0x040($1)\n" \ 405 + " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \ 406 + " addiu $1, %0, 0x100\n" \ 407 + " cache %1, 0x000($1); cache %1, 0x040($1)\n" \ 408 + " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \ 409 + " addiu $1, %0, 0x100\n" \ 410 + " cache %1, 0x000($1); cache %1, 0x040($1)\n" \ 411 + " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \ 412 + " addiu $1, %0, 0x100\n" \ 413 + " cache %1, 0x000($1); cache %1, 0x040($1)\n" \ 414 + " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \ 415 + " addiu $1, %0, 0x100\n" \ 416 + " cache %1, 0x000($1); cache %1, 0x040($1)\n" \ 417 + " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \ 418 + " .set pop\n" \ 419 + : \ 420 + : "r" (base), \ 421 + "i" (op)); 422 + 423 + #define cache128_unroll32(base,op) \ 424 + __asm__ __volatile__( \ 425 + " .set push\n" \ 426 + " .set noreorder\n" \ 427 + " .set mips64r6\n" \ 428 + " .set noat\n" \ 429 + " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ 430 + " addiu $1, %0, 0x100\n" \ 431 + " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ 432 + " addiu $1, %0, 0x100\n" \ 433 + " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ 434 + " addiu $1, %0, 0x100\n" \ 435 + " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ 436 + " addiu $1, %0, 0x100\n" \ 437 + " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ 438 + " addiu $1, %0, 0x100\n" \ 439 + " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ 440 + " addiu $1, %0, 0x100\n" \ 441 + " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ 442 + " addiu $1, %0, 0x100\n" \ 443 + " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ 444 + " addiu $1, %0, 0x100\n" \ 445 + " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ 446 + " addiu $1, %0, 0x100\n" \ 447 + " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ 448 + " addiu $1, %0, 0x100\n" \ 449 + " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ 450 + " addiu $1, %0, 0x100\n" \ 451 + " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ 452 + " addiu $1, %0, 0x100\n" \ 453 + " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ 454 + " addiu $1, %0, 0x100\n" \ 455 + " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ 456 + " addiu $1, %0, 0x100\n" \ 457 + " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ 458 + " addiu $1, %0, 0x100\n" \ 459 + " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ 460 + " addiu $1, %0, 0x100\n" \ 461 + " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ 462 + " addiu $1, %0, 0x100\n" \ 463 + " .set pop\n" \ 464 + : \ 465 + : "r" (base), \ 466 + "i" (op)); 467 + #endif /* CONFIG_CPU_MIPSR6 */ 468 469 /* 470 * Perform the cache operation specified by op using a user mode virtual
+6 -2
arch/mips/include/asm/sgialib.h
··· 11 #ifndef _ASM_SGIALIB_H 12 #define _ASM_SGIALIB_H 13 14 #include <asm/sgiarcs.h> 15 16 extern struct linux_romvec *romvec; ··· 71 extern LONG ArcWrite(ULONG fd, PVOID buf, ULONG num, PULONG cnt); 72 73 /* Misc. routines. */ 74 - extern VOID ArcReboot(VOID) __attribute__((noreturn)); 75 - extern VOID ArcEnterInteractiveMode(VOID) __attribute__((noreturn)); 76 extern VOID ArcFlushAllCaches(VOID); 77 extern DISPLAY_STATUS *ArcGetDisplayStatus(ULONG FileID); 78
··· 11 #ifndef _ASM_SGIALIB_H 12 #define _ASM_SGIALIB_H 13 14 + #include <linux/compiler.h> 15 #include <asm/sgiarcs.h> 16 17 extern struct linux_romvec *romvec; ··· 70 extern LONG ArcWrite(ULONG fd, PVOID buf, ULONG num, PULONG cnt); 71 72 /* Misc. routines. */ 73 + extern VOID ArcHalt(VOID) __noreturn; 74 + extern VOID ArcPowerDown(VOID) __noreturn; 75 + extern VOID ArcRestart(VOID) __noreturn; 76 + extern VOID ArcReboot(VOID) __noreturn; 77 + extern VOID ArcEnterInteractiveMode(VOID) __noreturn; 78 extern VOID ArcFlushAllCaches(VOID); 79 extern DISPLAY_STATUS *ArcGetDisplayStatus(ULONG FileID); 80
-29
arch/mips/include/asm/siginfo.h
··· 1 - /* 2 - * This file is subject to the terms and conditions of the GNU General Public 3 - * License. See the file "COPYING" in the main directory of this archive 4 - * for more details. 5 - * 6 - * Copyright (C) 1998, 1999, 2001, 2003 Ralf Baechle 7 - * Copyright (C) 2000, 2001 Silicon Graphics, Inc. 8 - */ 9 - #ifndef _ASM_SIGINFO_H 10 - #define _ASM_SIGINFO_H 11 - 12 - #include <uapi/asm/siginfo.h> 13 - 14 - 15 - /* 16 - * Duplicated here because of <asm-generic/siginfo.h> braindamage ... 17 - */ 18 - #include <linux/string.h> 19 - 20 - static inline void copy_siginfo(struct siginfo *to, struct siginfo *from) 21 - { 22 - if (from->si_code < 0) 23 - memcpy(to, from, sizeof(*to)); 24 - else 25 - /* _sigchld is currently the largest know union member */ 26 - memcpy(to, from, 3*sizeof(int) + sizeof(from->_sifields._sigchld)); 27 - } 28 - 29 - #endif /* _ASM_SIGINFO_H */
···
+26 -29
arch/mips/include/asm/spinlock.h
··· 89 " subu %[ticket], %[ticket], 1 \n" 90 " .previous \n" 91 " .set pop \n" 92 - : [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock), 93 [serving_now_ptr] "+m" (lock->h.serving_now), 94 [ticket] "=&r" (tmp), 95 [my_ticket] "=&r" (my_ticket) ··· 122 " subu %[ticket], %[ticket], 1 \n" 123 " .previous \n" 124 " .set pop \n" 125 - : [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock), 126 [serving_now_ptr] "+m" (lock->h.serving_now), 127 [ticket] "=&r" (tmp), 128 [my_ticket] "=&r" (my_ticket) ··· 164 " li %[ticket], 0 \n" 165 " .previous \n" 166 " .set pop \n" 167 - : [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock), 168 [ticket] "=&r" (tmp), 169 [my_ticket] "=&r" (tmp2), 170 [now_serving] "=&r" (tmp3) ··· 188 " li %[ticket], 0 \n" 189 " .previous \n" 190 " .set pop \n" 191 - : [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock), 192 [ticket] "=&r" (tmp), 193 [my_ticket] "=&r" (tmp2), 194 [now_serving] "=&r" (tmp3) ··· 235 " beqzl %1, 1b \n" 236 " nop \n" 237 " .set reorder \n" 238 - : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp) 239 - : GCC_OFF12_ASM() (rw->lock) 240 : "memory"); 241 } else { 242 do { ··· 245 " bltz %1, 1b \n" 246 " addu %1, 1 \n" 247 "2: sc %1, %0 \n" 248 - : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp) 249 - : GCC_OFF12_ASM() (rw->lock) 250 : "memory"); 251 } while (unlikely(!tmp)); 252 } ··· 254 smp_llsc_mb(); 255 } 256 257 - /* Note the use of sub, not subu which will make the kernel die with an 258 - overflow exception if we ever try to unlock an rwlock that is already 259 - unlocked or is being held by a writer. */ 260 static inline void arch_read_unlock(arch_rwlock_t *rw) 261 { 262 unsigned int tmp; ··· 263 if (R10000_LLSC_WAR) { 264 __asm__ __volatile__( 265 "1: ll %1, %2 # arch_read_unlock \n" 266 - " sub %1, 1 \n" 267 " sc %1, %0 \n" 268 " beqzl %1, 1b \n" 269 - : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp) 270 - : GCC_OFF12_ASM() (rw->lock) 271 : "memory"); 272 } else { 273 do { 274 __asm__ __volatile__( 275 "1: ll %1, %2 # arch_read_unlock \n" 276 - " sub %1, 1 \n" 277 " sc %1, %0 \n" 278 - : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp) 279 - : GCC_OFF12_ASM() (rw->lock) 280 : "memory"); 281 } while (unlikely(!tmp)); 282 } ··· 296 " beqzl %1, 1b \n" 297 " nop \n" 298 " .set reorder \n" 299 - : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp) 300 - : GCC_OFF12_ASM() (rw->lock) 301 : "memory"); 302 } else { 303 do { ··· 306 " bnez %1, 1b \n" 307 " lui %1, 0x8000 \n" 308 "2: sc %1, %0 \n" 309 - : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp) 310 - : GCC_OFF12_ASM() (rw->lock) 311 : "memory"); 312 } while (unlikely(!tmp)); 313 } ··· 346 __WEAK_LLSC_MB 347 " li %2, 1 \n" 348 "2: \n" 349 - : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) 350 - : GCC_OFF12_ASM() (rw->lock) 351 : "memory"); 352 } else { 353 __asm__ __volatile__( ··· 363 __WEAK_LLSC_MB 364 " li %2, 1 \n" 365 "2: \n" 366 - : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) 367 - : GCC_OFF12_ASM() (rw->lock) 368 : "memory"); 369 } 370 ··· 390 " li %2, 1 \n" 391 " .set reorder \n" 392 "2: \n" 393 - : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) 394 - : GCC_OFF12_ASM() (rw->lock) 395 : "memory"); 396 } else { 397 do { ··· 403 " sc %1, %0 \n" 404 " li %2, 1 \n" 405 "2: \n" 406 - : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp), 407 "=&r" (ret) 408 - : GCC_OFF12_ASM() (rw->lock) 409 : "memory"); 410 } while (unlikely(!tmp)); 411
··· 89 " subu %[ticket], %[ticket], 1 \n" 90 " .previous \n" 91 " .set pop \n" 92 + : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock), 93 [serving_now_ptr] "+m" (lock->h.serving_now), 94 [ticket] "=&r" (tmp), 95 [my_ticket] "=&r" (my_ticket) ··· 122 " subu %[ticket], %[ticket], 1 \n" 123 " .previous \n" 124 " .set pop \n" 125 + : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock), 126 [serving_now_ptr] "+m" (lock->h.serving_now), 127 [ticket] "=&r" (tmp), 128 [my_ticket] "=&r" (my_ticket) ··· 164 " li %[ticket], 0 \n" 165 " .previous \n" 166 " .set pop \n" 167 + : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock), 168 [ticket] "=&r" (tmp), 169 [my_ticket] "=&r" (tmp2), 170 [now_serving] "=&r" (tmp3) ··· 188 " li %[ticket], 0 \n" 189 " .previous \n" 190 " .set pop \n" 191 + : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock), 192 [ticket] "=&r" (tmp), 193 [my_ticket] "=&r" (tmp2), 194 [now_serving] "=&r" (tmp3) ··· 235 " beqzl %1, 1b \n" 236 " nop \n" 237 " .set reorder \n" 238 + : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp) 239 + : GCC_OFF_SMALL_ASM() (rw->lock) 240 : "memory"); 241 } else { 242 do { ··· 245 " bltz %1, 1b \n" 246 " addu %1, 1 \n" 247 "2: sc %1, %0 \n" 248 + : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp) 249 + : GCC_OFF_SMALL_ASM() (rw->lock) 250 : "memory"); 251 } while (unlikely(!tmp)); 252 } ··· 254 smp_llsc_mb(); 255 } 256 257 static inline void arch_read_unlock(arch_rwlock_t *rw) 258 { 259 unsigned int tmp; ··· 266 if (R10000_LLSC_WAR) { 267 __asm__ __volatile__( 268 "1: ll %1, %2 # arch_read_unlock \n" 269 + " addiu %1, 1 \n" 270 " sc %1, %0 \n" 271 " beqzl %1, 1b \n" 272 + : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp) 273 + : GCC_OFF_SMALL_ASM() (rw->lock) 274 : "memory"); 275 } else { 276 do { 277 __asm__ __volatile__( 278 "1: ll %1, %2 # arch_read_unlock \n" 279 + " addiu %1, -1 \n" 280 " sc %1, %0 \n" 281 + : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp) 282 + : GCC_OFF_SMALL_ASM() (rw->lock) 283 : "memory"); 284 } while (unlikely(!tmp)); 285 } ··· 299 " beqzl %1, 1b \n" 300 " nop \n" 301 " .set reorder \n" 302 + : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp) 303 + : GCC_OFF_SMALL_ASM() (rw->lock) 304 : "memory"); 305 } else { 306 do { ··· 309 " bnez %1, 1b \n" 310 " lui %1, 0x8000 \n" 311 "2: sc %1, %0 \n" 312 + : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp) 313 + : GCC_OFF_SMALL_ASM() (rw->lock) 314 : "memory"); 315 } while (unlikely(!tmp)); 316 } ··· 349 __WEAK_LLSC_MB 350 " li %2, 1 \n" 351 "2: \n" 352 + : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) 353 + : GCC_OFF_SMALL_ASM() (rw->lock) 354 : "memory"); 355 } else { 356 __asm__ __volatile__( ··· 366 __WEAK_LLSC_MB 367 " li %2, 1 \n" 368 "2: \n" 369 + : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) 370 + : GCC_OFF_SMALL_ASM() (rw->lock) 371 : "memory"); 372 } 373 ··· 393 " li %2, 1 \n" 394 " .set reorder \n" 395 "2: \n" 396 + : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) 397 + : GCC_OFF_SMALL_ASM() (rw->lock) 398 : "memory"); 399 } else { 400 do { ··· 406 " sc %1, %0 \n" 407 " li %2, 1 \n" 408 "2: \n" 409 + : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), 410 "=&r" (ret) 411 + : GCC_OFF_SMALL_ASM() (rw->lock) 412 : "memory"); 413 } while (unlikely(!tmp)); 414
+2 -2
arch/mips/include/asm/spram.h
··· 1 #ifndef _MIPS_SPRAM_H 2 #define _MIPS_SPRAM_H 3 4 - #ifdef CONFIG_CPU_MIPSR2 5 extern __init void spram_config(void); 6 #else 7 static inline void spram_config(void) { }; 8 - #endif /* CONFIG_CPU_MIPSR2 */ 9 10 #endif /* _MIPS_SPRAM_H */
··· 1 #ifndef _MIPS_SPRAM_H 2 #define _MIPS_SPRAM_H 3 4 + #if defined(CONFIG_MIPS_SPRAM) 5 extern __init void spram_config(void); 6 #else 7 static inline void spram_config(void) { }; 8 + #endif /* CONFIG_MIPS_SPRAM */ 9 10 #endif /* _MIPS_SPRAM_H */
+4 -4
arch/mips/include/asm/stackframe.h
··· 40 LONG_S v1, PT_HI(sp) 41 mflhxu v1 42 LONG_S v1, PT_ACX(sp) 43 - #else 44 mfhi v1 45 #endif 46 #ifdef CONFIG_32BIT ··· 50 LONG_S $10, PT_R10(sp) 51 LONG_S $11, PT_R11(sp) 52 LONG_S $12, PT_R12(sp) 53 - #ifndef CONFIG_CPU_HAS_SMARTMIPS 54 LONG_S v1, PT_HI(sp) 55 mflo v1 56 #endif ··· 58 LONG_S $14, PT_R14(sp) 59 LONG_S $15, PT_R15(sp) 60 LONG_S $24, PT_R24(sp) 61 - #ifndef CONFIG_CPU_HAS_SMARTMIPS 62 LONG_S v1, PT_LO(sp) 63 #endif 64 #ifdef CONFIG_CPU_CAVIUM_OCTEON ··· 226 mtlhx $24 227 LONG_L $24, PT_LO(sp) 228 mtlhx $24 229 - #else 230 LONG_L $24, PT_LO(sp) 231 mtlo $24 232 LONG_L $24, PT_HI(sp)
··· 40 LONG_S v1, PT_HI(sp) 41 mflhxu v1 42 LONG_S v1, PT_ACX(sp) 43 + #elif !defined(CONFIG_CPU_MIPSR6) 44 mfhi v1 45 #endif 46 #ifdef CONFIG_32BIT ··· 50 LONG_S $10, PT_R10(sp) 51 LONG_S $11, PT_R11(sp) 52 LONG_S $12, PT_R12(sp) 53 + #if !defined(CONFIG_CPU_HAS_SMARTMIPS) && !defined(CONFIG_CPU_MIPSR6) 54 LONG_S v1, PT_HI(sp) 55 mflo v1 56 #endif ··· 58 LONG_S $14, PT_R14(sp) 59 LONG_S $15, PT_R15(sp) 60 LONG_S $24, PT_R24(sp) 61 + #if !defined(CONFIG_CPU_HAS_SMARTMIPS) && !defined(CONFIG_CPU_MIPSR6) 62 LONG_S v1, PT_LO(sp) 63 #endif 64 #ifdef CONFIG_CPU_CAVIUM_OCTEON ··· 226 mtlhx $24 227 LONG_L $24, PT_LO(sp) 228 mtlhx $24 229 + #elif !defined(CONFIG_CPU_MIPSR6) 230 LONG_L $24, PT_LO(sp) 231 mtlo $24 232 LONG_L $24, PT_HI(sp)
+6 -3
arch/mips/include/asm/switch_to.h
··· 75 #endif 76 77 #define __clear_software_ll_bit() \ 78 - do { \ 79 - if (!__builtin_constant_p(cpu_has_llsc) || !cpu_has_llsc) \ 80 - ll_bit = 0; \ 81 } while (0) 82 83 #define switch_to(prev, next, last) \
··· 75 #endif 76 77 #define __clear_software_ll_bit() \ 78 + do { if (cpu_has_rw_llb) { \ 79 + write_c0_lladdr(0); \ 80 + } else { \ 81 + if (!__builtin_constant_p(cpu_has_llsc) || !cpu_has_llsc)\ 82 + ll_bit = 0; \ 83 + } \ 84 } while (0) 85 86 #define switch_to(prev, next, last) \
+1 -1
arch/mips/include/asm/thread_info.h
··· 28 unsigned long tp_value; /* thread pointer */ 29 __u32 cpu; /* current CPU */ 30 int preempt_count; /* 0 => preemptable, <0 => BUG */ 31 - 32 mm_segment_t addr_limit; /* 33 * thread address space limit: 34 * 0x7fffffff for user-thead
··· 28 unsigned long tp_value; /* thread pointer */ 29 __u32 cpu; /* current CPU */ 30 int preempt_count; /* 0 => preemptable, <0 => BUG */ 31 + int r2_emul_return; /* 1 => Returning from R2 emulator */ 32 mm_segment_t addr_limit; /* 33 * thread address space limit: 34 * 0x7fffffff for user-thead
+14 -10
arch/mips/include/uapi/asm/inst.h
··· 21 enum major_op { 22 spec_op, bcond_op, j_op, jal_op, 23 beq_op, bne_op, blez_op, bgtz_op, 24 - addi_op, addiu_op, slti_op, sltiu_op, 25 andi_op, ori_op, xori_op, lui_op, 26 cop0_op, cop1_op, cop2_op, cop1x_op, 27 beql_op, bnel_op, blezl_op, bgtzl_op, 28 - daddi_op, daddiu_op, ldl_op, ldr_op, 29 spec2_op, jalx_op, mdmx_op, spec3_op, 30 lb_op, lh_op, lwl_op, lw_op, 31 lbu_op, lhu_op, lwr_op, lwu_op, 32 sb_op, sh_op, swl_op, sw_op, 33 sdl_op, sdr_op, swr_op, cache_op, 34 - ll_op, lwc1_op, lwc2_op, pref_op, 35 - lld_op, ldc1_op, ldc2_op, ld_op, 36 - sc_op, swc1_op, swc2_op, major_3b_op, 37 - scd_op, sdc1_op, sdc2_op, sd_op 38 }; 39 40 /* ··· 83 swe_op = 0x1f, bshfl_op = 0x20, 84 swle_op = 0x21, swre_op = 0x22, 85 prefe_op = 0x23, dbshfl_op = 0x24, 86 - lbue_op = 0x28, lhue_op = 0x29, 87 - lbe_op = 0x2c, lhe_op = 0x2d, 88 - lle_op = 0x2e, lwe_op = 0x2f, 89 rdhwr_op = 0x3b 90 }; 91 ··· 115 mfhc_op = 0x03, mtc_op = 0x04, 116 dmtc_op = 0x05, ctc_op = 0x06, 117 mthc0_op = 0x06, mthc_op = 0x07, 118 - bc_op = 0x08, cop_op = 0x10, 119 copm_op = 0x18 120 }; 121
··· 21 enum major_op { 22 spec_op, bcond_op, j_op, jal_op, 23 beq_op, bne_op, blez_op, bgtz_op, 24 + addi_op, cbcond0_op = addi_op, addiu_op, slti_op, sltiu_op, 25 andi_op, ori_op, xori_op, lui_op, 26 cop0_op, cop1_op, cop2_op, cop1x_op, 27 beql_op, bnel_op, blezl_op, bgtzl_op, 28 + daddi_op, cbcond1_op = daddi_op, daddiu_op, ldl_op, ldr_op, 29 spec2_op, jalx_op, mdmx_op, spec3_op, 30 lb_op, lh_op, lwl_op, lw_op, 31 lbu_op, lhu_op, lwr_op, lwu_op, 32 sb_op, sh_op, swl_op, sw_op, 33 sdl_op, sdr_op, swr_op, cache_op, 34 + ll_op, lwc1_op, lwc2_op, bc6_op = lwc2_op, pref_op, 35 + lld_op, ldc1_op, ldc2_op, beqzcjic_op = ldc2_op, ld_op, 36 + sc_op, swc1_op, swc2_op, balc6_op = swc2_op, major_3b_op, 37 + scd_op, sdc1_op, sdc2_op, bnezcjialc_op = sdc2_op, sd_op 38 }; 39 40 /* ··· 83 swe_op = 0x1f, bshfl_op = 0x20, 84 swle_op = 0x21, swre_op = 0x22, 85 prefe_op = 0x23, dbshfl_op = 0x24, 86 + cache6_op = 0x25, sc6_op = 0x26, 87 + scd6_op = 0x27, lbue_op = 0x28, 88 + lhue_op = 0x29, lbe_op = 0x2c, 89 + lhe_op = 0x2d, lle_op = 0x2e, 90 + lwe_op = 0x2f, pref6_op = 0x35, 91 + ll6_op = 0x36, lld6_op = 0x37, 92 rdhwr_op = 0x3b 93 }; 94 ··· 112 mfhc_op = 0x03, mtc_op = 0x04, 113 dmtc_op = 0x05, ctc_op = 0x06, 114 mthc0_op = 0x06, mthc_op = 0x07, 115 + bc_op = 0x08, bc1eqz_op = 0x09, 116 + bc1nez_op = 0x0d, cop_op = 0x10, 117 copm_op = 0x18 118 }; 119
+3 -8
arch/mips/include/uapi/asm/siginfo.h
··· 16 #define HAVE_ARCH_SIGINFO_T 17 18 /* 19 - * We duplicate the generic versions - <asm-generic/siginfo.h> is just borked 20 - * by design ... 21 - */ 22 - #define HAVE_ARCH_COPY_SIGINFO 23 - struct siginfo; 24 - 25 - /* 26 * Careful to keep union _sifields from shifting ... 27 */ 28 #if _MIPS_SZLONG == 32 ··· 28 29 #define __ARCH_SIGSYS 30 31 - #include <asm-generic/siginfo.h> 32 33 typedef struct siginfo { 34 int si_signo; 35 int si_code; ··· 118 #define SI_TIMER __SI_CODE(__SI_TIMER, -3) /* sent by timer expiration */ 119 #define SI_MESGQ __SI_CODE(__SI_MESGQ, -4) /* sent by real time mesq state change */ 120 121 122 #endif /* _UAPI_ASM_SIGINFO_H */
··· 16 #define HAVE_ARCH_SIGINFO_T 17 18 /* 19 * Careful to keep union _sifields from shifting ... 20 */ 21 #if _MIPS_SZLONG == 32 ··· 35 36 #define __ARCH_SIGSYS 37 38 + #include <uapi/asm-generic/siginfo.h> 39 40 + /* We can't use generic siginfo_t, because our si_code and si_errno are swapped */ 41 typedef struct siginfo { 42 int si_signo; 43 int si_code; ··· 124 #define SI_TIMER __SI_CODE(__SI_TIMER, -3) /* sent by timer expiration */ 125 #define SI_MESGQ __SI_CODE(__SI_MESGQ, -4) /* sent by real time mesq state change */ 126 127 + #include <asm-generic/siginfo.h> 128 129 #endif /* _UAPI_ASM_SIGINFO_H */
+2 -1
arch/mips/kernel/Makefile
··· 52 obj-$(CONFIG_MIPS_CMP) += smp-cmp.o 53 obj-$(CONFIG_MIPS_CPS) += smp-cps.o cps-vec.o 54 obj-$(CONFIG_MIPS_GIC_IPI) += smp-gic.o 55 - obj-$(CONFIG_CPU_MIPSR2) += spram.o 56 57 obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o 58 obj-$(CONFIG_MIPS_VPE_LOADER_CMP) += vpe-cmp.o ··· 90 obj-$(CONFIG_EARLY_PRINTK_8250) += early_printk_8250.o 91 obj-$(CONFIG_SPINLOCK_TEST) += spinlock_test.o 92 obj-$(CONFIG_MIPS_MACHINE) += mips_machine.o 93 94 CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/null -x c /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi) 95
··· 52 obj-$(CONFIG_MIPS_CMP) += smp-cmp.o 53 obj-$(CONFIG_MIPS_CPS) += smp-cps.o cps-vec.o 54 obj-$(CONFIG_MIPS_GIC_IPI) += smp-gic.o 55 + obj-$(CONFIG_MIPS_SPRAM) += spram.o 56 57 obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o 58 obj-$(CONFIG_MIPS_VPE_LOADER_CMP) += vpe-cmp.o ··· 90 obj-$(CONFIG_EARLY_PRINTK_8250) += early_printk_8250.o 91 obj-$(CONFIG_SPINLOCK_TEST) += spinlock_test.o 92 obj-$(CONFIG_MIPS_MACHINE) += mips_machine.o 93 + obj-$(CONFIG_MIPSR2_TO_R6_EMULATOR) += mips-r2-to-r6-emul.o 94 95 CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/null -x c /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi) 96
+2
arch/mips/kernel/asm-offsets.c
··· 97 OFFSET(TI_TP_VALUE, thread_info, tp_value); 98 OFFSET(TI_CPU, thread_info, cpu); 99 OFFSET(TI_PRE_COUNT, thread_info, preempt_count); 100 OFFSET(TI_ADDR_LIMIT, thread_info, addr_limit); 101 OFFSET(TI_REGS, thread_info, regs); 102 DEFINE(_THREAD_SIZE, THREAD_SIZE); ··· 382 OFFSET(OCTEON_CP2_GFM_RESULT, octeon_cop2_state, cop2_gfm_result); 383 OFFSET(OCTEON_CP2_HSH_DATW, octeon_cop2_state, cop2_hsh_datw); 384 OFFSET(OCTEON_CP2_HSH_IVW, octeon_cop2_state, cop2_hsh_ivw); 385 OFFSET(THREAD_CP2, task_struct, thread.cp2); 386 OFFSET(THREAD_CVMSEG, task_struct, thread.cvmseg.cvmseg); 387 BLANK();
··· 97 OFFSET(TI_TP_VALUE, thread_info, tp_value); 98 OFFSET(TI_CPU, thread_info, cpu); 99 OFFSET(TI_PRE_COUNT, thread_info, preempt_count); 100 + OFFSET(TI_R2_EMUL_RET, thread_info, r2_emul_return); 101 OFFSET(TI_ADDR_LIMIT, thread_info, addr_limit); 102 OFFSET(TI_REGS, thread_info, regs); 103 DEFINE(_THREAD_SIZE, THREAD_SIZE); ··· 381 OFFSET(OCTEON_CP2_GFM_RESULT, octeon_cop2_state, cop2_gfm_result); 382 OFFSET(OCTEON_CP2_HSH_DATW, octeon_cop2_state, cop2_hsh_datw); 383 OFFSET(OCTEON_CP2_HSH_IVW, octeon_cop2_state, cop2_hsh_ivw); 384 + OFFSET(OCTEON_CP2_SHA3, octeon_cop2_state, cop2_sha3); 385 OFFSET(THREAD_CP2, task_struct, thread.cp2); 386 OFFSET(THREAD_CVMSEG, task_struct, thread.cvmseg.cvmseg); 387 BLANK();
+250 -38
arch/mips/kernel/branch.c
··· 16 #include <asm/fpu.h> 17 #include <asm/fpu_emulator.h> 18 #include <asm/inst.h> 19 #include <asm/ptrace.h> 20 #include <asm/uaccess.h> 21 ··· 400 * @returns: -EFAULT on error and forces SIGBUS, and on success 401 * returns 0 or BRANCH_LIKELY_TAKEN as appropriate after 402 * evaluating the branch. 403 */ 404 int __compute_return_epc_for_insn(struct pt_regs *regs, 405 union mips_instruction insn) 406 { 407 - unsigned int bit, fcr31, dspcontrol; 408 long epc = regs->cp0_epc; 409 int ret = 0; 410 ··· 428 regs->regs[insn.r_format.rd] = epc + 8; 429 /* Fall through */ 430 case jr_op: 431 regs->cp0_epc = regs->regs[insn.r_format.rs]; 432 break; 433 } ··· 442 */ 443 case bcond_op: 444 switch (insn.i_format.rt) { 445 - case bltz_op: 446 case bltzl_op: 447 if ((long)regs->regs[insn.i_format.rs] < 0) { 448 epc = epc + 4 + (insn.i_format.simmediate << 2); 449 if (insn.i_format.rt == bltzl_op) ··· 455 regs->cp0_epc = epc; 456 break; 457 458 - case bgez_op: 459 case bgezl_op: 460 if ((long)regs->regs[insn.i_format.rs] >= 0) { 461 epc = epc + 4 + (insn.i_format.simmediate << 2); 462 if (insn.i_format.rt == bgezl_op) ··· 470 471 case bltzal_op: 472 case bltzall_op: 473 regs->regs[31] = epc + 8; 474 if ((long)regs->regs[insn.i_format.rs] < 0) { 475 epc = epc + 4 + (insn.i_format.simmediate << 2); 476 if (insn.i_format.rt == bltzall_op) ··· 504 505 case bgezal_op: 506 case bgezall_op: 507 regs->regs[31] = epc + 8; 508 if ((long)regs->regs[insn.i_format.rs] >= 0) { 509 epc = epc + 4 + (insn.i_format.simmediate << 2); 510 if (insn.i_format.rt == bgezall_op) ··· 538 539 case bposge32_op: 540 if (!cpu_has_dsp) 541 - goto sigill; 542 543 dspcontrol = rddsp(0x01); 544 ··· 569 /* 570 * These are conditional and in i_format. 571 */ 572 - case beq_op: 573 case beql_op: 574 if (regs->regs[insn.i_format.rs] == 575 regs->regs[insn.i_format.rt]) { 576 epc = epc + 4 + (insn.i_format.simmediate << 2); ··· 583 regs->cp0_epc = epc; 584 break; 585 586 - case bne_op: 587 case bnel_op: 588 if (regs->regs[insn.i_format.rs] != 589 regs->regs[insn.i_format.rt]) { 590 epc = epc + 4 + (insn.i_format.simmediate << 2); ··· 597 regs->cp0_epc = epc; 598 break; 599 600 - case blez_op: /* not really i_format */ 601 - case blezl_op: 602 /* rt field assumed to be zero */ 603 if ((long)regs->regs[insn.i_format.rs] <= 0) { 604 epc = epc + 4 + (insn.i_format.simmediate << 2); ··· 632 regs->cp0_epc = epc; 633 break; 634 635 - case bgtz_op: 636 case bgtzl_op: 637 /* rt field assumed to be zero */ 638 if ((long)regs->regs[insn.i_format.rs] > 0) { 639 epc = epc + 4 + (insn.i_format.simmediate << 2); ··· 672 * And now the FPA/cp1 branch instructions. 673 */ 674 case cop1_op: 675 - preempt_disable(); 676 - if (is_fpu_owner()) 677 - fcr31 = read_32bit_cp1_register(CP1_STATUS); 678 - else 679 - fcr31 = current->thread.fpu.fcr31; 680 - preempt_enable(); 681 - 682 - bit = (insn.i_format.rt >> 2); 683 - bit += (bit != 0); 684 - bit += 23; 685 - switch (insn.i_format.rt & 3) { 686 - case 0: /* bc1f */ 687 - case 2: /* bc1fl */ 688 - if (~fcr31 & (1 << bit)) { 689 - epc = epc + 4 + (insn.i_format.simmediate << 2); 690 - if (insn.i_format.rt == 2) 691 - ret = BRANCH_LIKELY_TAKEN; 692 - } else 693 epc += 8; 694 regs->cp0_epc = epc; 695 break; 696 697 - case 1: /* bc1t */ 698 - case 3: /* bc1tl */ 699 - if (fcr31 & (1 << bit)) { 700 - epc = epc + 4 + (insn.i_format.simmediate << 2); 701 - if (insn.i_format.rt == 3) 702 - ret = BRANCH_LIKELY_TAKEN; 703 - } else 704 - epc += 8; 705 - regs->cp0_epc = epc; 706 break; 707 } 708 - break; 709 #ifdef CONFIG_CPU_CAVIUM_OCTEON 710 case lwc2_op: /* This is bbit0 on Octeon */ 711 if ((regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt)) ··· 781 epc += 8; 782 regs->cp0_epc = epc; 783 break; 784 #endif 785 } 786 787 return ret; 788 789 - sigill: 790 printk("%s: DSP branch but not DSP ASE - sending SIGBUS.\n", current->comm); 791 force_sig(SIGBUS, current); 792 return -EFAULT; 793 } 794 EXPORT_SYMBOL_GPL(__compute_return_epc_for_insn);
··· 16 #include <asm/fpu.h> 17 #include <asm/fpu_emulator.h> 18 #include <asm/inst.h> 19 + #include <asm/mips-r2-to-r6-emul.h> 20 #include <asm/ptrace.h> 21 #include <asm/uaccess.h> 22 ··· 399 * @returns: -EFAULT on error and forces SIGBUS, and on success 400 * returns 0 or BRANCH_LIKELY_TAKEN as appropriate after 401 * evaluating the branch. 402 + * 403 + * MIPS R6 Compact branches and forbidden slots: 404 + * Compact branches do not throw exceptions because they do 405 + * not have delay slots. The forbidden slot instruction ($PC+4) 406 + * is only executed if the branch was not taken. Otherwise the 407 + * forbidden slot is skipped entirely. This means that the 408 + * only possible reason to be here because of a MIPS R6 compact 409 + * branch instruction is that the forbidden slot has thrown one. 410 + * In that case the branch was not taken, so the EPC can be safely 411 + * set to EPC + 8. 412 */ 413 int __compute_return_epc_for_insn(struct pt_regs *regs, 414 union mips_instruction insn) 415 { 416 + unsigned int bit, fcr31, dspcontrol, reg; 417 long epc = regs->cp0_epc; 418 int ret = 0; 419 ··· 417 regs->regs[insn.r_format.rd] = epc + 8; 418 /* Fall through */ 419 case jr_op: 420 + if (NO_R6EMU && insn.r_format.func == jr_op) 421 + goto sigill_r6; 422 regs->cp0_epc = regs->regs[insn.r_format.rs]; 423 break; 424 } ··· 429 */ 430 case bcond_op: 431 switch (insn.i_format.rt) { 432 case bltzl_op: 433 + if (NO_R6EMU) 434 + goto sigill_r6; 435 + case bltz_op: 436 if ((long)regs->regs[insn.i_format.rs] < 0) { 437 epc = epc + 4 + (insn.i_format.simmediate << 2); 438 if (insn.i_format.rt == bltzl_op) ··· 440 regs->cp0_epc = epc; 441 break; 442 443 case bgezl_op: 444 + if (NO_R6EMU) 445 + goto sigill_r6; 446 + case bgez_op: 447 if ((long)regs->regs[insn.i_format.rs] >= 0) { 448 epc = epc + 4 + (insn.i_format.simmediate << 2); 449 if (insn.i_format.rt == bgezl_op) ··· 453 454 case bltzal_op: 455 case bltzall_op: 456 + if (NO_R6EMU && (insn.i_format.rs || 457 + insn.i_format.rt == bltzall_op)) { 458 + ret = -SIGILL; 459 + break; 460 + } 461 regs->regs[31] = epc + 8; 462 + /* 463 + * OK we are here either because we hit a NAL 464 + * instruction or because we are emulating an 465 + * old bltzal{,l} one. Lets figure out what the 466 + * case really is. 467 + */ 468 + if (!insn.i_format.rs) { 469 + /* 470 + * NAL or BLTZAL with rs == 0 471 + * Doesn't matter if we are R6 or not. The 472 + * result is the same 473 + */ 474 + regs->cp0_epc += 4 + 475 + (insn.i_format.simmediate << 2); 476 + break; 477 + } 478 + /* Now do the real thing for non-R6 BLTZAL{,L} */ 479 if ((long)regs->regs[insn.i_format.rs] < 0) { 480 epc = epc + 4 + (insn.i_format.simmediate << 2); 481 if (insn.i_format.rt == bltzall_op) ··· 465 466 case bgezal_op: 467 case bgezall_op: 468 + if (NO_R6EMU && (insn.i_format.rs || 469 + insn.i_format.rt == bgezall_op)) { 470 + ret = -SIGILL; 471 + break; 472 + } 473 regs->regs[31] = epc + 8; 474 + /* 475 + * OK we are here either because we hit a BAL 476 + * instruction or because we are emulating an 477 + * old bgezal{,l} one. Lets figure out what the 478 + * case really is. 479 + */ 480 + if (!insn.i_format.rs) { 481 + /* 482 + * BAL or BGEZAL with rs == 0 483 + * Doesn't matter if we are R6 or not. The 484 + * result is the same 485 + */ 486 + regs->cp0_epc += 4 + 487 + (insn.i_format.simmediate << 2); 488 + break; 489 + } 490 + /* Now do the real thing for non-R6 BGEZAL{,L} */ 491 if ((long)regs->regs[insn.i_format.rs] >= 0) { 492 epc = epc + 4 + (insn.i_format.simmediate << 2); 493 if (insn.i_format.rt == bgezall_op) ··· 477 478 case bposge32_op: 479 if (!cpu_has_dsp) 480 + goto sigill_dsp; 481 482 dspcontrol = rddsp(0x01); 483 ··· 508 /* 509 * These are conditional and in i_format. 510 */ 511 case beql_op: 512 + if (NO_R6EMU) 513 + goto sigill_r6; 514 + case beq_op: 515 if (regs->regs[insn.i_format.rs] == 516 regs->regs[insn.i_format.rt]) { 517 epc = epc + 4 + (insn.i_format.simmediate << 2); ··· 520 regs->cp0_epc = epc; 521 break; 522 523 case bnel_op: 524 + if (NO_R6EMU) 525 + goto sigill_r6; 526 + case bne_op: 527 if (regs->regs[insn.i_format.rs] != 528 regs->regs[insn.i_format.rt]) { 529 epc = epc + 4 + (insn.i_format.simmediate << 2); ··· 532 regs->cp0_epc = epc; 533 break; 534 535 + case blezl_op: /* not really i_format */ 536 + if (NO_R6EMU) 537 + goto sigill_r6; 538 + case blez_op: 539 + /* 540 + * Compact branches for R6 for the 541 + * blez and blezl opcodes. 542 + * BLEZ | rs = 0 | rt != 0 == BLEZALC 543 + * BLEZ | rs = rt != 0 == BGEZALC 544 + * BLEZ | rs != 0 | rt != 0 == BGEUC 545 + * BLEZL | rs = 0 | rt != 0 == BLEZC 546 + * BLEZL | rs = rt != 0 == BGEZC 547 + * BLEZL | rs != 0 | rt != 0 == BGEC 548 + * 549 + * For real BLEZ{,L}, rt is always 0. 550 + */ 551 + 552 + if (cpu_has_mips_r6 && insn.i_format.rt) { 553 + if ((insn.i_format.opcode == blez_op) && 554 + ((!insn.i_format.rs && insn.i_format.rt) || 555 + (insn.i_format.rs == insn.i_format.rt))) 556 + regs->regs[31] = epc + 4; 557 + regs->cp0_epc += 8; 558 + break; 559 + } 560 /* rt field assumed to be zero */ 561 if ((long)regs->regs[insn.i_format.rs] <= 0) { 562 epc = epc + 4 + (insn.i_format.simmediate << 2); ··· 544 regs->cp0_epc = epc; 545 break; 546 547 case bgtzl_op: 548 + if (NO_R6EMU) 549 + goto sigill_r6; 550 + case bgtz_op: 551 + /* 552 + * Compact branches for R6 for the 553 + * bgtz and bgtzl opcodes. 554 + * BGTZ | rs = 0 | rt != 0 == BGTZALC 555 + * BGTZ | rs = rt != 0 == BLTZALC 556 + * BGTZ | rs != 0 | rt != 0 == BLTUC 557 + * BGTZL | rs = 0 | rt != 0 == BGTZC 558 + * BGTZL | rs = rt != 0 == BLTZC 559 + * BGTZL | rs != 0 | rt != 0 == BLTC 560 + * 561 + * *ZALC varint for BGTZ &&& rt != 0 562 + * For real GTZ{,L}, rt is always 0. 563 + */ 564 + if (cpu_has_mips_r6 && insn.i_format.rt) { 565 + if ((insn.i_format.opcode == blez_op) && 566 + ((!insn.i_format.rs && insn.i_format.rt) || 567 + (insn.i_format.rs == insn.i_format.rt))) 568 + regs->regs[31] = epc + 4; 569 + regs->cp0_epc += 8; 570 + break; 571 + } 572 + 573 /* rt field assumed to be zero */ 574 if ((long)regs->regs[insn.i_format.rs] > 0) { 575 epc = epc + 4 + (insn.i_format.simmediate << 2); ··· 560 * And now the FPA/cp1 branch instructions. 561 */ 562 case cop1_op: 563 + if (cpu_has_mips_r6 && 564 + ((insn.i_format.rs == bc1eqz_op) || 565 + (insn.i_format.rs == bc1nez_op))) { 566 + if (!used_math()) { /* First time FPU user */ 567 + ret = init_fpu(); 568 + if (ret && NO_R6EMU) { 569 + ret = -ret; 570 + break; 571 + } 572 + ret = 0; 573 + set_used_math(); 574 + } 575 + lose_fpu(1); /* Save FPU state for the emulator. */ 576 + reg = insn.i_format.rt; 577 + bit = 0; 578 + switch (insn.i_format.rs) { 579 + case bc1eqz_op: 580 + /* Test bit 0 */ 581 + if (get_fpr32(&current->thread.fpu.fpr[reg], 0) 582 + & 0x1) 583 + bit = 1; 584 + break; 585 + case bc1nez_op: 586 + /* Test bit 0 */ 587 + if (!(get_fpr32(&current->thread.fpu.fpr[reg], 0) 588 + & 0x1)) 589 + bit = 1; 590 + break; 591 + } 592 + own_fpu(1); 593 + if (bit) 594 + epc = epc + 4 + 595 + (insn.i_format.simmediate << 2); 596 + else 597 epc += 8; 598 regs->cp0_epc = epc; 599 + 600 break; 601 + } else { 602 603 + preempt_disable(); 604 + if (is_fpu_owner()) 605 + fcr31 = read_32bit_cp1_register(CP1_STATUS); 606 + else 607 + fcr31 = current->thread.fpu.fcr31; 608 + preempt_enable(); 609 + 610 + bit = (insn.i_format.rt >> 2); 611 + bit += (bit != 0); 612 + bit += 23; 613 + switch (insn.i_format.rt & 3) { 614 + case 0: /* bc1f */ 615 + case 2: /* bc1fl */ 616 + if (~fcr31 & (1 << bit)) { 617 + epc = epc + 4 + 618 + (insn.i_format.simmediate << 2); 619 + if (insn.i_format.rt == 2) 620 + ret = BRANCH_LIKELY_TAKEN; 621 + } else 622 + epc += 8; 623 + regs->cp0_epc = epc; 624 + break; 625 + 626 + case 1: /* bc1t */ 627 + case 3: /* bc1tl */ 628 + if (fcr31 & (1 << bit)) { 629 + epc = epc + 4 + 630 + (insn.i_format.simmediate << 2); 631 + if (insn.i_format.rt == 3) 632 + ret = BRANCH_LIKELY_TAKEN; 633 + } else 634 + epc += 8; 635 + regs->cp0_epc = epc; 636 + break; 637 + } 638 break; 639 } 640 #ifdef CONFIG_CPU_CAVIUM_OCTEON 641 case lwc2_op: /* This is bbit0 on Octeon */ 642 if ((regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt)) ··· 626 epc += 8; 627 regs->cp0_epc = epc; 628 break; 629 + #else 630 + case bc6_op: 631 + /* Only valid for MIPS R6 */ 632 + if (!cpu_has_mips_r6) { 633 + ret = -SIGILL; 634 + break; 635 + } 636 + regs->cp0_epc += 8; 637 + break; 638 + case balc6_op: 639 + if (!cpu_has_mips_r6) { 640 + ret = -SIGILL; 641 + break; 642 + } 643 + /* Compact branch: BALC */ 644 + regs->regs[31] = epc + 4; 645 + epc += 4 + (insn.i_format.simmediate << 2); 646 + regs->cp0_epc = epc; 647 + break; 648 + case beqzcjic_op: 649 + if (!cpu_has_mips_r6) { 650 + ret = -SIGILL; 651 + break; 652 + } 653 + /* Compact branch: BEQZC || JIC */ 654 + regs->cp0_epc += 8; 655 + break; 656 + case bnezcjialc_op: 657 + if (!cpu_has_mips_r6) { 658 + ret = -SIGILL; 659 + break; 660 + } 661 + /* Compact branch: BNEZC || JIALC */ 662 + if (insn.i_format.rs) 663 + regs->regs[31] = epc + 4; 664 + regs->cp0_epc += 8; 665 + break; 666 #endif 667 + case cbcond0_op: 668 + case cbcond1_op: 669 + /* Only valid for MIPS R6 */ 670 + if (!cpu_has_mips_r6) { 671 + ret = -SIGILL; 672 + break; 673 + } 674 + /* 675 + * Compact branches: 676 + * bovc, beqc, beqzalc, bnvc, bnec, bnezlac 677 + */ 678 + if (insn.i_format.rt && !insn.i_format.rs) 679 + regs->regs[31] = epc + 4; 680 + regs->cp0_epc += 8; 681 + break; 682 } 683 684 return ret; 685 686 + sigill_dsp: 687 printk("%s: DSP branch but not DSP ASE - sending SIGBUS.\n", current->comm); 688 force_sig(SIGBUS, current); 689 + return -EFAULT; 690 + sigill_r6: 691 + pr_info("%s: R2 branch but r2-to-r6 emulator is not preset - sending SIGILL.\n", 692 + current->comm); 693 + force_sig(SIGILL, current); 694 return -EFAULT; 695 } 696 EXPORT_SYMBOL_GPL(__compute_return_epc_for_insn);
+2 -6
arch/mips/kernel/cevt-r4k.c
··· 11 #include <linux/percpu.h> 12 #include <linux/smp.h> 13 #include <linux/irq.h> 14 - #include <linux/irqchip/mips-gic.h> 15 16 #include <asm/time.h> 17 #include <asm/cevt-r4k.h> ··· 39 40 irqreturn_t c0_compare_interrupt(int irq, void *dev_id) 41 { 42 - const int r2 = cpu_has_mips_r2; 43 struct clock_event_device *cd; 44 int cpu = smp_processor_id(); 45 ··· 84 */ 85 static int c0_compare_int_pending(void) 86 { 87 - #ifdef CONFIG_MIPS_GIC 88 - if (gic_present) 89 - return gic_get_timer_pending(); 90 - #endif 91 return (read_c0_cause() >> cp0_compare_irq_shift) & (1ul << CAUSEB_IP); 92 } 93
··· 11 #include <linux/percpu.h> 12 #include <linux/smp.h> 13 #include <linux/irq.h> 14 15 #include <asm/time.h> 16 #include <asm/cevt-r4k.h> ··· 40 41 irqreturn_t c0_compare_interrupt(int irq, void *dev_id) 42 { 43 + const int r2 = cpu_has_mips_r2_r6; 44 struct clock_event_device *cd; 45 int cpu = smp_processor_id(); 46 ··· 85 */ 86 static int c0_compare_int_pending(void) 87 { 88 + /* When cpu_has_mips_r2, this checks Cause.TI instead of Cause.IP7 */ 89 return (read_c0_cause() >> cp0_compare_irq_shift) & (1ul << CAUSEB_IP); 90 } 91
+8 -8
arch/mips/kernel/cps-vec.S
··· 99 xori t2, t1, 0x7 100 beqz t2, 1f 101 li t3, 32 102 - addi t1, t1, 1 103 sllv t1, t3, t1 104 1: /* At this point t1 == I-cache sets per way */ 105 _EXT t2, v0, MIPS_CONF1_IA_SHF, MIPS_CONF1_IA_SZ 106 - addi t2, t2, 1 107 mul t1, t1, t0 108 mul t1, t1, t2 109 ··· 126 xori t2, t1, 0x7 127 beqz t2, 1f 128 li t3, 32 129 - addi t1, t1, 1 130 sllv t1, t3, t1 131 1: /* At this point t1 == D-cache sets per way */ 132 _EXT t2, v0, MIPS_CONF1_DA_SHF, MIPS_CONF1_DA_SZ 133 - addi t2, t2, 1 134 mul t1, t1, t0 135 mul t1, t1, t2 136 ··· 250 mfc0 t0, CP0_MVPCONF0 251 srl t0, t0, MVPCONF0_PVPE_SHIFT 252 andi t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT) 253 - addi t7, t0, 1 254 255 /* If there's only 1, we're done */ 256 beqz t0, 2f ··· 280 mttc0 t0, CP0_TCHALT 281 282 /* Next VPE */ 283 - addi t5, t5, 1 284 slt t0, t5, t7 285 bnez t0, 1b 286 nop ··· 317 mfc0 t1, CP0_MVPCONF0 318 srl t1, t1, MVPCONF0_PVPE_SHIFT 319 andi t1, t1, MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT 320 - addi t1, t1, 1 321 322 /* Calculate a mask for the VPE ID from EBase.CPUNum */ 323 clz t1, t1 ··· 424 425 /* Next VPE */ 426 2: srl t6, t6, 1 427 - addi t5, t5, 1 428 bnez t6, 1b 429 nop 430
··· 99 xori t2, t1, 0x7 100 beqz t2, 1f 101 li t3, 32 102 + addiu t1, t1, 1 103 sllv t1, t3, t1 104 1: /* At this point t1 == I-cache sets per way */ 105 _EXT t2, v0, MIPS_CONF1_IA_SHF, MIPS_CONF1_IA_SZ 106 + addiu t2, t2, 1 107 mul t1, t1, t0 108 mul t1, t1, t2 109 ··· 126 xori t2, t1, 0x7 127 beqz t2, 1f 128 li t3, 32 129 + addiu t1, t1, 1 130 sllv t1, t3, t1 131 1: /* At this point t1 == D-cache sets per way */ 132 _EXT t2, v0, MIPS_CONF1_DA_SHF, MIPS_CONF1_DA_SZ 133 + addiu t2, t2, 1 134 mul t1, t1, t0 135 mul t1, t1, t2 136 ··· 250 mfc0 t0, CP0_MVPCONF0 251 srl t0, t0, MVPCONF0_PVPE_SHIFT 252 andi t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT) 253 + addiu t7, t0, 1 254 255 /* If there's only 1, we're done */ 256 beqz t0, 2f ··· 280 mttc0 t0, CP0_TCHALT 281 282 /* Next VPE */ 283 + addiu t5, t5, 1 284 slt t0, t5, t7 285 bnez t0, 1b 286 nop ··· 317 mfc0 t1, CP0_MVPCONF0 318 srl t1, t1, MVPCONF0_PVPE_SHIFT 319 andi t1, t1, MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT 320 + addiu t1, t1, 1 321 322 /* Calculate a mask for the VPE ID from EBase.CPUNum */ 323 clz t1, t1 ··· 424 425 /* Next VPE */ 426 2: srl t6, t6, 1 427 + addiu t5, t5, 1 428 bnez t6, 1b 429 nop 430
+7 -4
arch/mips/kernel/cpu-bugs64.c
··· 244 panic(bug64hit, !DADDI_WAR ? daddiwar : nowar); 245 } 246 247 - int daddiu_bug = -1; 248 249 static inline void check_daddiu(void) 250 { ··· 314 315 void __init check_bugs64_early(void) 316 { 317 - check_mult_sh(); 318 - check_daddiu(); 319 } 320 321 void __init check_bugs64(void) 322 { 323 - check_daddi(); 324 }
··· 244 panic(bug64hit, !DADDI_WAR ? daddiwar : nowar); 245 } 246 247 + int daddiu_bug = config_enabled(CONFIG_CPU_MIPSR6) ? 0 : -1; 248 249 static inline void check_daddiu(void) 250 { ··· 314 315 void __init check_bugs64_early(void) 316 { 317 + if (!config_enabled(CONFIG_CPU_MIPSR6)) { 318 + check_mult_sh(); 319 + check_daddiu(); 320 + } 321 } 322 323 void __init check_bugs64(void) 324 { 325 + if (!config_enabled(CONFIG_CPU_MIPSR6)) 326 + check_daddi(); 327 }
+27 -6
arch/mips/kernel/cpu-probe.c
··· 237 c->isa_level |= MIPS_CPU_ISA_II | MIPS_CPU_ISA_III; 238 break; 239 240 case MIPS_CPU_ISA_M32R2: 241 c->isa_level |= MIPS_CPU_ISA_M32R2; 242 case MIPS_CPU_ISA_M32R1: ··· 333 case 1: 334 set_isa(c, MIPS_CPU_ISA_M32R2); 335 break; 336 default: 337 goto unknown; 338 } ··· 347 break; 348 case 1: 349 set_isa(c, MIPS_CPU_ISA_M64R2); 350 break; 351 default: 352 goto unknown; ··· 437 if (config3 & MIPS_CONF3_MSA) 438 c->ases |= MIPS_ASE_MSA; 439 /* Only tested on 32-bit cores */ 440 - if ((config3 & MIPS_CONF3_PW) && config_enabled(CONFIG_32BIT)) 441 c->options |= MIPS_CPU_HTW; 442 443 return config3 & MIPS_CONF_M; 444 } ··· 514 c->options |= MIPS_CPU_EVA; 515 if (config5 & MIPS_CONF5_MRP) 516 c->options |= MIPS_CPU_MAAR; 517 518 return config5 & MIPS_CONF_M; 519 } ··· 550 551 if (cpu_has_rixi) { 552 /* Enable the RIXI exceptions */ 553 - write_c0_pagegrain(read_c0_pagegrain() | PG_IEC); 554 back_to_back_c0_hazard(); 555 /* Verify the IEC bit is set */ 556 if (read_c0_pagegrain() & PG_IEC) ··· 558 } 559 560 #ifndef CONFIG_MIPS_CPS 561 - if (cpu_has_mips_r2) { 562 c->core = get_ebase_cpunum(); 563 if (cpu_has_mipsmt) 564 c->core >>= fls(core_nvpes()) - 1; ··· 913 { 914 c->writecombine = _CACHE_UNCACHED_ACCELERATED; 915 switch (c->processor_id & PRID_IMP_MASK) { 916 case PRID_IMP_4KC: 917 c->cputype = CPU_4KC; 918 c->writecombine = _CACHE_UNCACHED; ··· 1367 if (c->options & MIPS_CPU_FPU) { 1368 c->fpu_id = cpu_get_fpu_id(); 1369 1370 - if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 | 1371 - MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) { 1372 if (c->fpu_id & MIPS_FPIR_3D) 1373 c->ases |= MIPS_ASE_MIPS3D; 1374 if (c->fpu_id & MIPS_FPIR_FREP) ··· 1375 } 1376 } 1377 1378 - if (cpu_has_mips_r2) { 1379 c->srsets = ((read_c0_srsctl() >> 26) & 0x0f) + 1; 1380 /* R2 has Performance Counter Interrupt indicator */ 1381 c->options |= MIPS_CPU_PCI;
··· 237 c->isa_level |= MIPS_CPU_ISA_II | MIPS_CPU_ISA_III; 238 break; 239 240 + /* R6 incompatible with everything else */ 241 + case MIPS_CPU_ISA_M64R6: 242 + c->isa_level |= MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6; 243 + case MIPS_CPU_ISA_M32R6: 244 + c->isa_level |= MIPS_CPU_ISA_M32R6; 245 + /* Break here so we don't add incompatible ISAs */ 246 + break; 247 case MIPS_CPU_ISA_M32R2: 248 c->isa_level |= MIPS_CPU_ISA_M32R2; 249 case MIPS_CPU_ISA_M32R1: ··· 326 case 1: 327 set_isa(c, MIPS_CPU_ISA_M32R2); 328 break; 329 + case 2: 330 + set_isa(c, MIPS_CPU_ISA_M32R6); 331 + break; 332 default: 333 goto unknown; 334 } ··· 337 break; 338 case 1: 339 set_isa(c, MIPS_CPU_ISA_M64R2); 340 + break; 341 + case 2: 342 + set_isa(c, MIPS_CPU_ISA_M64R6); 343 break; 344 default: 345 goto unknown; ··· 424 if (config3 & MIPS_CONF3_MSA) 425 c->ases |= MIPS_ASE_MSA; 426 /* Only tested on 32-bit cores */ 427 + if ((config3 & MIPS_CONF3_PW) && config_enabled(CONFIG_32BIT)) { 428 + c->htw_seq = 0; 429 c->options |= MIPS_CPU_HTW; 430 + } 431 432 return config3 & MIPS_CONF_M; 433 } ··· 499 c->options |= MIPS_CPU_EVA; 500 if (config5 & MIPS_CONF5_MRP) 501 c->options |= MIPS_CPU_MAAR; 502 + if (config5 & MIPS_CONF5_LLB) 503 + c->options |= MIPS_CPU_RW_LLB; 504 505 return config5 & MIPS_CONF_M; 506 } ··· 533 534 if (cpu_has_rixi) { 535 /* Enable the RIXI exceptions */ 536 + set_c0_pagegrain(PG_IEC); 537 back_to_back_c0_hazard(); 538 /* Verify the IEC bit is set */ 539 if (read_c0_pagegrain() & PG_IEC) ··· 541 } 542 543 #ifndef CONFIG_MIPS_CPS 544 + if (cpu_has_mips_r2_r6) { 545 c->core = get_ebase_cpunum(); 546 if (cpu_has_mipsmt) 547 c->core >>= fls(core_nvpes()) - 1; ··· 896 { 897 c->writecombine = _CACHE_UNCACHED_ACCELERATED; 898 switch (c->processor_id & PRID_IMP_MASK) { 899 + case PRID_IMP_QEMU_GENERIC: 900 + c->writecombine = _CACHE_UNCACHED; 901 + c->cputype = CPU_QEMU_GENERIC; 902 + __cpu_name[cpu] = "MIPS GENERIC QEMU"; 903 + break; 904 case PRID_IMP_4KC: 905 c->cputype = CPU_4KC; 906 c->writecombine = _CACHE_UNCACHED; ··· 1345 if (c->options & MIPS_CPU_FPU) { 1346 c->fpu_id = cpu_get_fpu_id(); 1347 1348 + if (c->isa_level & cpu_has_mips_r) { 1349 if (c->fpu_id & MIPS_FPIR_3D) 1350 c->ases |= MIPS_ASE_MIPS3D; 1351 if (c->fpu_id & MIPS_FPIR_FREP) ··· 1354 } 1355 } 1356 1357 + if (cpu_has_mips_r2_r6) { 1358 c->srsets = ((read_c0_srsctl() >> 26) & 0x0f) + 1; 1359 /* R2 has Performance Counter Interrupt indicator */ 1360 c->options |= MIPS_CPU_PCI;
+189 -116
arch/mips/kernel/elf.c
··· 11 #include <linux/elf.h> 12 #include <linux/sched.h> 13 14 enum { 15 - FP_ERROR = -1, 16 - FP_DOUBLE_64A = -2, 17 }; 18 19 int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf, 20 bool is_interp, struct arch_elf_state *state) 21 { 22 - struct elf32_hdr *ehdr = _ehdr; 23 - struct elf32_phdr *phdr = _phdr; 24 struct mips_elf_abiflags_v0 abiflags; 25 int ret; 26 27 - if (config_enabled(CONFIG_64BIT) && 28 - (ehdr->e_ident[EI_CLASS] != ELFCLASS32)) 29 - return 0; 30 - if (phdr->p_type != PT_MIPS_ABIFLAGS) 31 - return 0; 32 - if (phdr->p_filesz < sizeof(abiflags)) 33 - return -EINVAL; 34 35 - ret = kernel_read(elf, phdr->p_offset, (char *)&abiflags, 36 - sizeof(abiflags)); 37 if (ret < 0) 38 return ret; 39 if (ret != sizeof(abiflags)) ··· 131 return 0; 132 } 133 134 - static inline unsigned get_fp_abi(struct elf32_hdr *ehdr, int in_abi) 135 { 136 /* If the ABI requirement is provided, simply return that */ 137 - if (in_abi != -1) 138 return in_abi; 139 140 - /* If the EF_MIPS_FP64 flag was set, return MIPS_ABI_FP_64 */ 141 - if (ehdr->e_flags & EF_MIPS_FP64) 142 - return MIPS_ABI_FP_64; 143 - 144 - /* Default to MIPS_ABI_FP_DOUBLE */ 145 - return MIPS_ABI_FP_DOUBLE; 146 } 147 148 int arch_check_elf(void *_ehdr, bool has_interpreter, 149 struct arch_elf_state *state) 150 { 151 struct elf32_hdr *ehdr = _ehdr; 152 - unsigned fp_abi, interp_fp_abi, abi0, abi1; 153 154 - /* Ignore non-O32 binaries */ 155 - if (config_enabled(CONFIG_64BIT) && 156 - (ehdr->e_ident[EI_CLASS] != ELFCLASS32)) 157 return 0; 158 159 - fp_abi = get_fp_abi(ehdr, state->fp_abi); 160 161 if (has_interpreter) { 162 - interp_fp_abi = get_fp_abi(ehdr, state->interp_fp_abi); 163 164 abi0 = min(fp_abi, interp_fp_abi); 165 abi1 = max(fp_abi, interp_fp_abi); ··· 162 abi0 = abi1 = fp_abi; 163 } 164 165 - state->overall_abi = FP_ERROR; 166 167 - if (abi0 == abi1) { 168 - state->overall_abi = abi0; 169 - } else if (abi0 == MIPS_ABI_FP_ANY) { 170 - state->overall_abi = abi1; 171 - } else if (abi0 == MIPS_ABI_FP_DOUBLE) { 172 - switch (abi1) { 173 - case MIPS_ABI_FP_XX: 174 - state->overall_abi = MIPS_ABI_FP_DOUBLE; 175 - break; 176 - 177 - case MIPS_ABI_FP_64A: 178 - state->overall_abi = FP_DOUBLE_64A; 179 - break; 180 - } 181 - } else if (abi0 == MIPS_ABI_FP_SINGLE || 182 - abi0 == MIPS_ABI_FP_SOFT) { 183 - /* Cannot link with other ABIs */ 184 - } else if (abi0 == MIPS_ABI_FP_OLD_64) { 185 - switch (abi1) { 186 - case MIPS_ABI_FP_XX: 187 - case MIPS_ABI_FP_64: 188 - case MIPS_ABI_FP_64A: 189 - state->overall_abi = MIPS_ABI_FP_64; 190 - break; 191 - } 192 - } else if (abi0 == MIPS_ABI_FP_XX || 193 - abi0 == MIPS_ABI_FP_64 || 194 - abi0 == MIPS_ABI_FP_64A) { 195 - state->overall_abi = MIPS_ABI_FP_64; 196 - } 197 - 198 - switch (state->overall_abi) { 199 - case MIPS_ABI_FP_64: 200 - case MIPS_ABI_FP_64A: 201 - case FP_DOUBLE_64A: 202 - if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT)) 203 - return -ELIBBAD; 204 - break; 205 - 206 - case FP_ERROR: 207 return -ELIBBAD; 208 - } 209 210 return 0; 211 } 212 213 void mips_set_personality_fp(struct arch_elf_state *state) 214 { 215 - if (config_enabled(CONFIG_FP32XX_HYBRID_FPRS)) { 216 - /* 217 - * Use hybrid FPRs for all code which can correctly execute 218 - * with that mode. 219 - */ 220 - switch (state->overall_abi) { 221 - case MIPS_ABI_FP_DOUBLE: 222 - case MIPS_ABI_FP_SINGLE: 223 - case MIPS_ABI_FP_SOFT: 224 - case MIPS_ABI_FP_XX: 225 - case MIPS_ABI_FP_ANY: 226 - /* FR=1, FRE=1 */ 227 - clear_thread_flag(TIF_32BIT_FPREGS); 228 - set_thread_flag(TIF_HYBRID_FPREGS); 229 - return; 230 - } 231 - } 232 233 - switch (state->overall_abi) { 234 - case MIPS_ABI_FP_DOUBLE: 235 - case MIPS_ABI_FP_SINGLE: 236 - case MIPS_ABI_FP_SOFT: 237 - /* FR=0 */ 238 - set_thread_flag(TIF_32BIT_FPREGS); 239 - clear_thread_flag(TIF_HYBRID_FPREGS); 240 break; 241 - 242 - case FP_DOUBLE_64A: 243 - /* FR=1, FRE=1 */ 244 - clear_thread_flag(TIF_32BIT_FPREGS); 245 - set_thread_flag(TIF_HYBRID_FPREGS); 246 break; 247 - 248 - case MIPS_ABI_FP_64: 249 - case MIPS_ABI_FP_64A: 250 - /* FR=1, FRE=0 */ 251 - clear_thread_flag(TIF_32BIT_FPREGS); 252 - clear_thread_flag(TIF_HYBRID_FPREGS); 253 break; 254 - 255 - case MIPS_ABI_FP_XX: 256 - case MIPS_ABI_FP_ANY: 257 - if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT)) 258 - set_thread_flag(TIF_32BIT_FPREGS); 259 - else 260 - clear_thread_flag(TIF_32BIT_FPREGS); 261 - 262 - clear_thread_flag(TIF_HYBRID_FPREGS); 263 - break; 264 - 265 default: 266 - case FP_ERROR: 267 BUG(); 268 } 269 }
··· 11 #include <linux/elf.h> 12 #include <linux/sched.h> 13 14 + /* FPU modes */ 15 enum { 16 + FP_FRE, 17 + FP_FR0, 18 + FP_FR1, 19 }; 20 + 21 + /** 22 + * struct mode_req - ABI FPU mode requirements 23 + * @single: The program being loaded needs an FPU but it will only issue 24 + * single precision instructions meaning that it can execute in 25 + * either FR0 or FR1. 26 + * @soft: The soft(-float) requirement means that the program being 27 + * loaded needs has no FPU dependency at all (i.e. it has no 28 + * FPU instructions). 29 + * @fr1: The program being loaded depends on FPU being in FR=1 mode. 30 + * @frdefault: The program being loaded depends on the default FPU mode. 31 + * That is FR0 for O32 and FR1 for N32/N64. 32 + * @fre: The program being loaded depends on FPU with FRE=1. This mode is 33 + * a bridge which uses FR=1 whilst still being able to maintain 34 + * full compatibility with pre-existing code using the O32 FP32 35 + * ABI. 36 + * 37 + * More information about the FP ABIs can be found here: 38 + * 39 + * https://dmz-portal.mips.com/wiki/MIPS_O32_ABI_-_FR0_and_FR1_Interlinking#10.4.1._Basic_mode_set-up 40 + * 41 + */ 42 + 43 + struct mode_req { 44 + bool single; 45 + bool soft; 46 + bool fr1; 47 + bool frdefault; 48 + bool fre; 49 + }; 50 + 51 + static const struct mode_req fpu_reqs[] = { 52 + [MIPS_ABI_FP_ANY] = { true, true, true, true, true }, 53 + [MIPS_ABI_FP_DOUBLE] = { false, false, false, true, true }, 54 + [MIPS_ABI_FP_SINGLE] = { true, false, false, false, false }, 55 + [MIPS_ABI_FP_SOFT] = { false, true, false, false, false }, 56 + [MIPS_ABI_FP_OLD_64] = { false, false, false, false, false }, 57 + [MIPS_ABI_FP_XX] = { false, false, true, true, true }, 58 + [MIPS_ABI_FP_64] = { false, false, true, false, false }, 59 + [MIPS_ABI_FP_64A] = { false, false, true, false, true } 60 + }; 61 + 62 + /* 63 + * Mode requirements when .MIPS.abiflags is not present in the ELF. 64 + * Not present means that everything is acceptable except FR1. 65 + */ 66 + static struct mode_req none_req = { true, true, false, true, true }; 67 68 int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf, 69 bool is_interp, struct arch_elf_state *state) 70 { 71 + struct elf32_hdr *ehdr32 = _ehdr; 72 + struct elf32_phdr *phdr32 = _phdr; 73 + struct elf64_phdr *phdr64 = _phdr; 74 struct mips_elf_abiflags_v0 abiflags; 75 int ret; 76 77 + /* Lets see if this is an O32 ELF */ 78 + if (ehdr32->e_ident[EI_CLASS] == ELFCLASS32) { 79 + /* FR = 1 for N32 */ 80 + if (ehdr32->e_flags & EF_MIPS_ABI2) 81 + state->overall_fp_mode = FP_FR1; 82 + else 83 + /* Set a good default FPU mode for O32 */ 84 + state->overall_fp_mode = cpu_has_mips_r6 ? 85 + FP_FRE : FP_FR0; 86 87 + if (ehdr32->e_flags & EF_MIPS_FP64) { 88 + /* 89 + * Set MIPS_ABI_FP_OLD_64 for EF_MIPS_FP64. We will override it 90 + * later if needed 91 + */ 92 + if (is_interp) 93 + state->interp_fp_abi = MIPS_ABI_FP_OLD_64; 94 + else 95 + state->fp_abi = MIPS_ABI_FP_OLD_64; 96 + } 97 + if (phdr32->p_type != PT_MIPS_ABIFLAGS) 98 + return 0; 99 + 100 + if (phdr32->p_filesz < sizeof(abiflags)) 101 + return -EINVAL; 102 + 103 + ret = kernel_read(elf, phdr32->p_offset, 104 + (char *)&abiflags, 105 + sizeof(abiflags)); 106 + } else { 107 + /* FR=1 is really the only option for 64-bit */ 108 + state->overall_fp_mode = FP_FR1; 109 + 110 + if (phdr64->p_type != PT_MIPS_ABIFLAGS) 111 + return 0; 112 + if (phdr64->p_filesz < sizeof(abiflags)) 113 + return -EINVAL; 114 + 115 + ret = kernel_read(elf, phdr64->p_offset, 116 + (char *)&abiflags, 117 + sizeof(abiflags)); 118 + } 119 + 120 if (ret < 0) 121 return ret; 122 if (ret != sizeof(abiflags)) ··· 48 return 0; 49 } 50 51 + static inline unsigned get_fp_abi(int in_abi) 52 { 53 /* If the ABI requirement is provided, simply return that */ 54 + if (in_abi != MIPS_ABI_FP_UNKNOWN) 55 return in_abi; 56 57 + /* Unknown ABI */ 58 + return MIPS_ABI_FP_UNKNOWN; 59 } 60 61 int arch_check_elf(void *_ehdr, bool has_interpreter, 62 struct arch_elf_state *state) 63 { 64 struct elf32_hdr *ehdr = _ehdr; 65 + struct mode_req prog_req, interp_req; 66 + int fp_abi, interp_fp_abi, abi0, abi1, max_abi; 67 68 + if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT)) 69 return 0; 70 71 + fp_abi = get_fp_abi(state->fp_abi); 72 73 if (has_interpreter) { 74 + interp_fp_abi = get_fp_abi(state->interp_fp_abi); 75 76 abi0 = min(fp_abi, interp_fp_abi); 77 abi1 = max(fp_abi, interp_fp_abi); ··· 84 abi0 = abi1 = fp_abi; 85 } 86 87 + /* ABI limits. O32 = FP_64A, N32/N64 = FP_SOFT */ 88 + max_abi = ((ehdr->e_ident[EI_CLASS] == ELFCLASS32) && 89 + (!(ehdr->e_flags & EF_MIPS_ABI2))) ? 90 + MIPS_ABI_FP_64A : MIPS_ABI_FP_SOFT; 91 92 + if ((abi0 > max_abi && abi0 != MIPS_ABI_FP_UNKNOWN) || 93 + (abi1 > max_abi && abi1 != MIPS_ABI_FP_UNKNOWN)) 94 return -ELIBBAD; 95 + 96 + /* It's time to determine the FPU mode requirements */ 97 + prog_req = (abi0 == MIPS_ABI_FP_UNKNOWN) ? none_req : fpu_reqs[abi0]; 98 + interp_req = (abi1 == MIPS_ABI_FP_UNKNOWN) ? none_req : fpu_reqs[abi1]; 99 + 100 + /* 101 + * Check whether the program's and interp's ABIs have a matching FPU 102 + * mode requirement. 103 + */ 104 + prog_req.single = interp_req.single && prog_req.single; 105 + prog_req.soft = interp_req.soft && prog_req.soft; 106 + prog_req.fr1 = interp_req.fr1 && prog_req.fr1; 107 + prog_req.frdefault = interp_req.frdefault && prog_req.frdefault; 108 + prog_req.fre = interp_req.fre && prog_req.fre; 109 + 110 + /* 111 + * Determine the desired FPU mode 112 + * 113 + * Decision making: 114 + * 115 + * - We want FR_FRE if FRE=1 and both FR=1 and FR=0 are false. This 116 + * means that we have a combination of program and interpreter 117 + * that inherently require the hybrid FP mode. 118 + * - If FR1 and FRDEFAULT is true, that means we hit the any-abi or 119 + * fpxx case. This is because, in any-ABI (or no-ABI) we have no FPU 120 + * instructions so we don't care about the mode. We will simply use 121 + * the one preferred by the hardware. In fpxx case, that ABI can 122 + * handle both FR=1 and FR=0, so, again, we simply choose the one 123 + * preferred by the hardware. Next, if we only use single-precision 124 + * FPU instructions, and the default ABI FPU mode is not good 125 + * (ie single + any ABI combination), we set again the FPU mode to the 126 + * one is preferred by the hardware. Next, if we know that the code 127 + * will only use single-precision instructions, shown by single being 128 + * true but frdefault being false, then we again set the FPU mode to 129 + * the one that is preferred by the hardware. 130 + * - We want FP_FR1 if that's the only matching mode and the default one 131 + * is not good. 132 + * - Return with -ELIBADD if we can't find a matching FPU mode. 133 + */ 134 + if (prog_req.fre && !prog_req.frdefault && !prog_req.fr1) 135 + state->overall_fp_mode = FP_FRE; 136 + else if ((prog_req.fr1 && prog_req.frdefault) || 137 + (prog_req.single && !prog_req.frdefault)) 138 + /* Make sure 64-bit MIPS III/IV/64R1 will not pick FR1 */ 139 + state->overall_fp_mode = ((current_cpu_data.fpu_id & MIPS_FPIR_F64) && 140 + cpu_has_mips_r2_r6) ? 141 + FP_FR1 : FP_FR0; 142 + else if (prog_req.fr1) 143 + state->overall_fp_mode = FP_FR1; 144 + else if (!prog_req.fre && !prog_req.frdefault && 145 + !prog_req.fr1 && !prog_req.single && !prog_req.soft) 146 + return -ELIBBAD; 147 148 return 0; 149 } 150 151 + static inline void set_thread_fp_mode(int hybrid, int regs32) 152 + { 153 + if (hybrid) 154 + set_thread_flag(TIF_HYBRID_FPREGS); 155 + else 156 + clear_thread_flag(TIF_HYBRID_FPREGS); 157 + if (regs32) 158 + set_thread_flag(TIF_32BIT_FPREGS); 159 + else 160 + clear_thread_flag(TIF_32BIT_FPREGS); 161 + } 162 + 163 void mips_set_personality_fp(struct arch_elf_state *state) 164 { 165 + /* 166 + * This function is only ever called for O32 ELFs so we should 167 + * not be worried about N32/N64 binaries. 168 + */ 169 170 + if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT)) 171 + return; 172 + 173 + switch (state->overall_fp_mode) { 174 + case FP_FRE: 175 + set_thread_fp_mode(1, 0); 176 break; 177 + case FP_FR0: 178 + set_thread_fp_mode(0, 1); 179 break; 180 + case FP_FR1: 181 + set_thread_fp_mode(0, 0); 182 break; 183 default: 184 BUG(); 185 } 186 }
+21 -2
arch/mips/kernel/entry.S
··· 46 local_irq_disable # make sure we dont miss an 47 # interrupt setting need_resched 48 # between sampling and return 49 LONG_L a2, TI_FLAGS($28) # current->work 50 andi t0, a2, _TIF_WORK_MASK # (ignoring syscall_trace) 51 bnez t0, work_pending ··· 119 RESTORE_SP_AND_RET 120 .set at 121 122 work_pending: 123 andi t0, a2, _TIF_NEED_RESCHED # a2 is preloaded with TI_FLAGS 124 beqz t0, work_notifysig ··· 176 jal syscall_trace_leave 177 b resume_userspace 178 179 - #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT) 180 181 /* 182 * MIPS32R2 Instruction Hazard Barrier - must be called ··· 190 nop 191 END(mips_ihb) 192 193 - #endif /* CONFIG_CPU_MIPSR2 or CONFIG_MIPS_MT */
··· 46 local_irq_disable # make sure we dont miss an 47 # interrupt setting need_resched 48 # between sampling and return 49 + #ifdef CONFIG_MIPSR2_TO_R6_EMULATOR 50 + lw k0, TI_R2_EMUL_RET($28) 51 + bnez k0, restore_all_from_r2_emul 52 + #endif 53 + 54 LONG_L a2, TI_FLAGS($28) # current->work 55 andi t0, a2, _TIF_WORK_MASK # (ignoring syscall_trace) 56 bnez t0, work_pending ··· 114 RESTORE_SP_AND_RET 115 .set at 116 117 + #ifdef CONFIG_MIPSR2_TO_R6_EMULATOR 118 + restore_all_from_r2_emul: # restore full frame 119 + .set noat 120 + sw zero, TI_R2_EMUL_RET($28) # reset it 121 + RESTORE_TEMP 122 + RESTORE_AT 123 + RESTORE_STATIC 124 + RESTORE_SOME 125 + LONG_L sp, PT_R29(sp) 126 + eretnc 127 + .set at 128 + #endif 129 + 130 work_pending: 131 andi t0, a2, _TIF_NEED_RESCHED # a2 is preloaded with TI_FLAGS 132 beqz t0, work_notifysig ··· 158 jal syscall_trace_leave 159 b resume_userspace 160 161 + #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) || \ 162 + defined(CONFIG_MIPS_MT) 163 164 /* 165 * MIPS32R2 Instruction Hazard Barrier - must be called ··· 171 nop 172 END(mips_ihb) 173 174 + #endif /* CONFIG_CPU_MIPSR2 or CONFIG_CPU_MIPSR6 or CONFIG_MIPS_MT */
+1 -1
arch/mips/kernel/genex.S
··· 125 nop 126 nop 127 #endif 128 - .set arch=r4000 129 wait 130 /* end of rollback region (the region size must be power of two) */ 131 1:
··· 125 nop 126 nop 127 #endif 128 + .set MIPS_ISA_ARCH_LEVEL_RAW 129 wait 130 /* end of rollback region (the region size must be power of two) */ 131 1:
+1
arch/mips/kernel/idle.c
··· 186 case CPU_PROAPTIV: 187 case CPU_P5600: 188 case CPU_M5150: 189 cpu_wait = r4k_wait; 190 if (read_c0_config7() & MIPS_CONF7_WII) 191 cpu_wait = r4k_wait_irqoff;
··· 186 case CPU_PROAPTIV: 187 case CPU_P5600: 188 case CPU_M5150: 189 + case CPU_QEMU_GENERIC: 190 cpu_wait = r4k_wait; 191 if (read_c0_config7() & MIPS_CONF7_WII) 192 cpu_wait = r4k_wait_irqoff;
+2378
arch/mips/kernel/mips-r2-to-r6-emul.c
···
··· 1 + /* 2 + * This file is subject to the terms and conditions of the GNU General Public 3 + * License. See the file "COPYING" in the main directory of this archive 4 + * for more details. 5 + * 6 + * Copyright (c) 2014 Imagination Technologies Ltd. 7 + * Author: Leonid Yegoshin <Leonid.Yegoshin@imgtec.com> 8 + * Author: Markos Chandras <markos.chandras@imgtec.com> 9 + * 10 + * MIPS R2 user space instruction emulator for MIPS R6 11 + * 12 + */ 13 + #include <linux/bug.h> 14 + #include <linux/compiler.h> 15 + #include <linux/debugfs.h> 16 + #include <linux/init.h> 17 + #include <linux/kernel.h> 18 + #include <linux/module.h> 19 + #include <linux/ptrace.h> 20 + #include <linux/seq_file.h> 21 + 22 + #include <asm/asm.h> 23 + #include <asm/branch.h> 24 + #include <asm/break.h> 25 + #include <asm/fpu.h> 26 + #include <asm/fpu_emulator.h> 27 + #include <asm/inst.h> 28 + #include <asm/mips-r2-to-r6-emul.h> 29 + #include <asm/local.h> 30 + #include <asm/ptrace.h> 31 + #include <asm/uaccess.h> 32 + 33 + #ifdef CONFIG_64BIT 34 + #define ADDIU "daddiu " 35 + #define INS "dins " 36 + #define EXT "dext " 37 + #else 38 + #define ADDIU "addiu " 39 + #define INS "ins " 40 + #define EXT "ext " 41 + #endif /* CONFIG_64BIT */ 42 + 43 + #define SB "sb " 44 + #define LB "lb " 45 + #define LL "ll " 46 + #define SC "sc " 47 + 48 + DEFINE_PER_CPU(struct mips_r2_emulator_stats, mipsr2emustats); 49 + DEFINE_PER_CPU(struct mips_r2_emulator_stats, mipsr2bdemustats); 50 + DEFINE_PER_CPU(struct mips_r2br_emulator_stats, mipsr2bremustats); 51 + 52 + extern const unsigned int fpucondbit[8]; 53 + 54 + #define MIPS_R2_EMUL_TOTAL_PASS 10 55 + 56 + int mipsr2_emulation = 0; 57 + 58 + static int __init mipsr2emu_enable(char *s) 59 + { 60 + mipsr2_emulation = 1; 61 + 62 + pr_info("MIPS R2-to-R6 Emulator Enabled!"); 63 + 64 + return 1; 65 + } 66 + __setup("mipsr2emu", mipsr2emu_enable); 67 + 68 + /** 69 + * mipsr6_emul - Emulate some frequent R2/R5/R6 instructions in delay slot 70 + * for performance instead of the traditional way of using a stack trampoline 71 + * which is rather slow. 72 + * @regs: Process register set 73 + * @ir: Instruction 74 + */ 75 + static inline int mipsr6_emul(struct pt_regs *regs, u32 ir) 76 + { 77 + switch (MIPSInst_OPCODE(ir)) { 78 + case addiu_op: 79 + if (MIPSInst_RT(ir)) 80 + regs->regs[MIPSInst_RT(ir)] = 81 + (s32)regs->regs[MIPSInst_RS(ir)] + 82 + (s32)MIPSInst_SIMM(ir); 83 + return 0; 84 + case daddiu_op: 85 + if (config_enabled(CONFIG_32BIT)) 86 + break; 87 + 88 + if (MIPSInst_RT(ir)) 89 + regs->regs[MIPSInst_RT(ir)] = 90 + (s64)regs->regs[MIPSInst_RS(ir)] + 91 + (s64)MIPSInst_SIMM(ir); 92 + return 0; 93 + case lwc1_op: 94 + case swc1_op: 95 + case cop1_op: 96 + case cop1x_op: 97 + /* FPU instructions in delay slot */ 98 + return -SIGFPE; 99 + case spec_op: 100 + switch (MIPSInst_FUNC(ir)) { 101 + case or_op: 102 + if (MIPSInst_RD(ir)) 103 + regs->regs[MIPSInst_RD(ir)] = 104 + regs->regs[MIPSInst_RS(ir)] | 105 + regs->regs[MIPSInst_RT(ir)]; 106 + return 0; 107 + case sll_op: 108 + if (MIPSInst_RS(ir)) 109 + break; 110 + 111 + if (MIPSInst_RD(ir)) 112 + regs->regs[MIPSInst_RD(ir)] = 113 + (s32)(((u32)regs->regs[MIPSInst_RT(ir)]) << 114 + MIPSInst_FD(ir)); 115 + return 0; 116 + case srl_op: 117 + if (MIPSInst_RS(ir)) 118 + break; 119 + 120 + if (MIPSInst_RD(ir)) 121 + regs->regs[MIPSInst_RD(ir)] = 122 + (s32)(((u32)regs->regs[MIPSInst_RT(ir)]) >> 123 + MIPSInst_FD(ir)); 124 + return 0; 125 + case addu_op: 126 + if (MIPSInst_FD(ir)) 127 + break; 128 + 129 + if (MIPSInst_RD(ir)) 130 + regs->regs[MIPSInst_RD(ir)] = 131 + (s32)((u32)regs->regs[MIPSInst_RS(ir)] + 132 + (u32)regs->regs[MIPSInst_RT(ir)]); 133 + return 0; 134 + case subu_op: 135 + if (MIPSInst_FD(ir)) 136 + break; 137 + 138 + if (MIPSInst_RD(ir)) 139 + regs->regs[MIPSInst_RD(ir)] = 140 + (s32)((u32)regs->regs[MIPSInst_RS(ir)] - 141 + (u32)regs->regs[MIPSInst_RT(ir)]); 142 + return 0; 143 + case dsll_op: 144 + if (config_enabled(CONFIG_32BIT) || MIPSInst_RS(ir)) 145 + break; 146 + 147 + if (MIPSInst_RD(ir)) 148 + regs->regs[MIPSInst_RD(ir)] = 149 + (s64)(((u64)regs->regs[MIPSInst_RT(ir)]) << 150 + MIPSInst_FD(ir)); 151 + return 0; 152 + case dsrl_op: 153 + if (config_enabled(CONFIG_32BIT) || MIPSInst_RS(ir)) 154 + break; 155 + 156 + if (MIPSInst_RD(ir)) 157 + regs->regs[MIPSInst_RD(ir)] = 158 + (s64)(((u64)regs->regs[MIPSInst_RT(ir)]) >> 159 + MIPSInst_FD(ir)); 160 + return 0; 161 + case daddu_op: 162 + if (config_enabled(CONFIG_32BIT) || MIPSInst_FD(ir)) 163 + break; 164 + 165 + if (MIPSInst_RD(ir)) 166 + regs->regs[MIPSInst_RD(ir)] = 167 + (u64)regs->regs[MIPSInst_RS(ir)] + 168 + (u64)regs->regs[MIPSInst_RT(ir)]; 169 + return 0; 170 + case dsubu_op: 171 + if (config_enabled(CONFIG_32BIT) || MIPSInst_FD(ir)) 172 + break; 173 + 174 + if (MIPSInst_RD(ir)) 175 + regs->regs[MIPSInst_RD(ir)] = 176 + (s64)((u64)regs->regs[MIPSInst_RS(ir)] - 177 + (u64)regs->regs[MIPSInst_RT(ir)]); 178 + return 0; 179 + } 180 + break; 181 + default: 182 + pr_debug("No fastpath BD emulation for instruction 0x%08x (op: %02x)\n", 183 + ir, MIPSInst_OPCODE(ir)); 184 + } 185 + 186 + return SIGILL; 187 + } 188 + 189 + /** 190 + * movt_func - Emulate a MOVT instruction 191 + * @regs: Process register set 192 + * @ir: Instruction 193 + * 194 + * Returns 0 since it always succeeds. 195 + */ 196 + static int movf_func(struct pt_regs *regs, u32 ir) 197 + { 198 + u32 csr; 199 + u32 cond; 200 + 201 + csr = current->thread.fpu.fcr31; 202 + cond = fpucondbit[MIPSInst_RT(ir) >> 2]; 203 + if (((csr & cond) == 0) && MIPSInst_RD(ir)) 204 + regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)]; 205 + MIPS_R2_STATS(movs); 206 + return 0; 207 + } 208 + 209 + /** 210 + * movt_func - Emulate a MOVT instruction 211 + * @regs: Process register set 212 + * @ir: Instruction 213 + * 214 + * Returns 0 since it always succeeds. 215 + */ 216 + static int movt_func(struct pt_regs *regs, u32 ir) 217 + { 218 + u32 csr; 219 + u32 cond; 220 + 221 + csr = current->thread.fpu.fcr31; 222 + cond = fpucondbit[MIPSInst_RT(ir) >> 2]; 223 + 224 + if (((csr & cond) != 0) && MIPSInst_RD(ir)) 225 + regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)]; 226 + 227 + MIPS_R2_STATS(movs); 228 + 229 + return 0; 230 + } 231 + 232 + /** 233 + * jr_func - Emulate a JR instruction. 234 + * @pt_regs: Process register set 235 + * @ir: Instruction 236 + * 237 + * Returns SIGILL if JR was in delay slot, SIGEMT if we 238 + * can't compute the EPC, SIGSEGV if we can't access the 239 + * userland instruction or 0 on success. 240 + */ 241 + static int jr_func(struct pt_regs *regs, u32 ir) 242 + { 243 + int err; 244 + unsigned long cepc, epc, nepc; 245 + u32 nir; 246 + 247 + if (delay_slot(regs)) 248 + return SIGILL; 249 + 250 + /* EPC after the RI/JR instruction */ 251 + nepc = regs->cp0_epc; 252 + /* Roll back to the reserved R2 JR instruction */ 253 + regs->cp0_epc -= 4; 254 + epc = regs->cp0_epc; 255 + err = __compute_return_epc(regs); 256 + 257 + if (err < 0) 258 + return SIGEMT; 259 + 260 + 261 + /* Computed EPC */ 262 + cepc = regs->cp0_epc; 263 + 264 + /* Get DS instruction */ 265 + err = __get_user(nir, (u32 __user *)nepc); 266 + if (err) 267 + return SIGSEGV; 268 + 269 + MIPS_R2BR_STATS(jrs); 270 + 271 + /* If nir == 0(NOP), then nothing else to do */ 272 + if (nir) { 273 + /* 274 + * Negative err means FPU instruction in BD-slot, 275 + * Zero err means 'BD-slot emulation done' 276 + * For anything else we go back to trampoline emulation. 277 + */ 278 + err = mipsr6_emul(regs, nir); 279 + if (err > 0) { 280 + regs->cp0_epc = nepc; 281 + err = mips_dsemul(regs, nir, cepc); 282 + if (err == SIGILL) 283 + err = SIGEMT; 284 + MIPS_R2_STATS(dsemul); 285 + } 286 + } 287 + 288 + return err; 289 + } 290 + 291 + /** 292 + * movz_func - Emulate a MOVZ instruction 293 + * @regs: Process register set 294 + * @ir: Instruction 295 + * 296 + * Returns 0 since it always succeeds. 297 + */ 298 + static int movz_func(struct pt_regs *regs, u32 ir) 299 + { 300 + if (((regs->regs[MIPSInst_RT(ir)]) == 0) && MIPSInst_RD(ir)) 301 + regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)]; 302 + MIPS_R2_STATS(movs); 303 + 304 + return 0; 305 + } 306 + 307 + /** 308 + * movn_func - Emulate a MOVZ instruction 309 + * @regs: Process register set 310 + * @ir: Instruction 311 + * 312 + * Returns 0 since it always succeeds. 313 + */ 314 + static int movn_func(struct pt_regs *regs, u32 ir) 315 + { 316 + if (((regs->regs[MIPSInst_RT(ir)]) != 0) && MIPSInst_RD(ir)) 317 + regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)]; 318 + MIPS_R2_STATS(movs); 319 + 320 + return 0; 321 + } 322 + 323 + /** 324 + * mfhi_func - Emulate a MFHI instruction 325 + * @regs: Process register set 326 + * @ir: Instruction 327 + * 328 + * Returns 0 since it always succeeds. 329 + */ 330 + static int mfhi_func(struct pt_regs *regs, u32 ir) 331 + { 332 + if (MIPSInst_RD(ir)) 333 + regs->regs[MIPSInst_RD(ir)] = regs->hi; 334 + 335 + MIPS_R2_STATS(hilo); 336 + 337 + return 0; 338 + } 339 + 340 + /** 341 + * mthi_func - Emulate a MTHI instruction 342 + * @regs: Process register set 343 + * @ir: Instruction 344 + * 345 + * Returns 0 since it always succeeds. 346 + */ 347 + static int mthi_func(struct pt_regs *regs, u32 ir) 348 + { 349 + regs->hi = regs->regs[MIPSInst_RS(ir)]; 350 + 351 + MIPS_R2_STATS(hilo); 352 + 353 + return 0; 354 + } 355 + 356 + /** 357 + * mflo_func - Emulate a MFLO instruction 358 + * @regs: Process register set 359 + * @ir: Instruction 360 + * 361 + * Returns 0 since it always succeeds. 362 + */ 363 + static int mflo_func(struct pt_regs *regs, u32 ir) 364 + { 365 + if (MIPSInst_RD(ir)) 366 + regs->regs[MIPSInst_RD(ir)] = regs->lo; 367 + 368 + MIPS_R2_STATS(hilo); 369 + 370 + return 0; 371 + } 372 + 373 + /** 374 + * mtlo_func - Emulate a MTLO instruction 375 + * @regs: Process register set 376 + * @ir: Instruction 377 + * 378 + * Returns 0 since it always succeeds. 379 + */ 380 + static int mtlo_func(struct pt_regs *regs, u32 ir) 381 + { 382 + regs->lo = regs->regs[MIPSInst_RS(ir)]; 383 + 384 + MIPS_R2_STATS(hilo); 385 + 386 + return 0; 387 + } 388 + 389 + /** 390 + * mult_func - Emulate a MULT instruction 391 + * @regs: Process register set 392 + * @ir: Instruction 393 + * 394 + * Returns 0 since it always succeeds. 395 + */ 396 + static int mult_func(struct pt_regs *regs, u32 ir) 397 + { 398 + s64 res; 399 + s32 rt, rs; 400 + 401 + rt = regs->regs[MIPSInst_RT(ir)]; 402 + rs = regs->regs[MIPSInst_RS(ir)]; 403 + res = (s64)rt * (s64)rs; 404 + 405 + rs = res; 406 + regs->lo = (s64)rs; 407 + rt = res >> 32; 408 + res = (s64)rt; 409 + regs->hi = res; 410 + 411 + MIPS_R2_STATS(muls); 412 + 413 + return 0; 414 + } 415 + 416 + /** 417 + * multu_func - Emulate a MULTU instruction 418 + * @regs: Process register set 419 + * @ir: Instruction 420 + * 421 + * Returns 0 since it always succeeds. 422 + */ 423 + static int multu_func(struct pt_regs *regs, u32 ir) 424 + { 425 + u64 res; 426 + u32 rt, rs; 427 + 428 + rt = regs->regs[MIPSInst_RT(ir)]; 429 + rs = regs->regs[MIPSInst_RS(ir)]; 430 + res = (u64)rt * (u64)rs; 431 + rt = res; 432 + regs->lo = (s64)rt; 433 + regs->hi = (s64)(res >> 32); 434 + 435 + MIPS_R2_STATS(muls); 436 + 437 + return 0; 438 + } 439 + 440 + /** 441 + * div_func - Emulate a DIV instruction 442 + * @regs: Process register set 443 + * @ir: Instruction 444 + * 445 + * Returns 0 since it always succeeds. 446 + */ 447 + static int div_func(struct pt_regs *regs, u32 ir) 448 + { 449 + s32 rt, rs; 450 + 451 + rt = regs->regs[MIPSInst_RT(ir)]; 452 + rs = regs->regs[MIPSInst_RS(ir)]; 453 + 454 + regs->lo = (s64)(rs / rt); 455 + regs->hi = (s64)(rs % rt); 456 + 457 + MIPS_R2_STATS(divs); 458 + 459 + return 0; 460 + } 461 + 462 + /** 463 + * divu_func - Emulate a DIVU instruction 464 + * @regs: Process register set 465 + * @ir: Instruction 466 + * 467 + * Returns 0 since it always succeeds. 468 + */ 469 + static int divu_func(struct pt_regs *regs, u32 ir) 470 + { 471 + u32 rt, rs; 472 + 473 + rt = regs->regs[MIPSInst_RT(ir)]; 474 + rs = regs->regs[MIPSInst_RS(ir)]; 475 + 476 + regs->lo = (s64)(rs / rt); 477 + regs->hi = (s64)(rs % rt); 478 + 479 + MIPS_R2_STATS(divs); 480 + 481 + return 0; 482 + } 483 + 484 + /** 485 + * dmult_func - Emulate a DMULT instruction 486 + * @regs: Process register set 487 + * @ir: Instruction 488 + * 489 + * Returns 0 on success or SIGILL for 32-bit kernels. 490 + */ 491 + static int dmult_func(struct pt_regs *regs, u32 ir) 492 + { 493 + s64 res; 494 + s64 rt, rs; 495 + 496 + if (config_enabled(CONFIG_32BIT)) 497 + return SIGILL; 498 + 499 + rt = regs->regs[MIPSInst_RT(ir)]; 500 + rs = regs->regs[MIPSInst_RS(ir)]; 501 + res = rt * rs; 502 + 503 + regs->lo = res; 504 + __asm__ __volatile__( 505 + "dmuh %0, %1, %2\t\n" 506 + : "=r"(res) 507 + : "r"(rt), "r"(rs)); 508 + 509 + regs->hi = res; 510 + 511 + MIPS_R2_STATS(muls); 512 + 513 + return 0; 514 + } 515 + 516 + /** 517 + * dmultu_func - Emulate a DMULTU instruction 518 + * @regs: Process register set 519 + * @ir: Instruction 520 + * 521 + * Returns 0 on success or SIGILL for 32-bit kernels. 522 + */ 523 + static int dmultu_func(struct pt_regs *regs, u32 ir) 524 + { 525 + u64 res; 526 + u64 rt, rs; 527 + 528 + if (config_enabled(CONFIG_32BIT)) 529 + return SIGILL; 530 + 531 + rt = regs->regs[MIPSInst_RT(ir)]; 532 + rs = regs->regs[MIPSInst_RS(ir)]; 533 + res = rt * rs; 534 + 535 + regs->lo = res; 536 + __asm__ __volatile__( 537 + "dmuhu %0, %1, %2\t\n" 538 + : "=r"(res) 539 + : "r"(rt), "r"(rs)); 540 + 541 + regs->hi = res; 542 + 543 + MIPS_R2_STATS(muls); 544 + 545 + return 0; 546 + } 547 + 548 + /** 549 + * ddiv_func - Emulate a DDIV instruction 550 + * @regs: Process register set 551 + * @ir: Instruction 552 + * 553 + * Returns 0 on success or SIGILL for 32-bit kernels. 554 + */ 555 + static int ddiv_func(struct pt_regs *regs, u32 ir) 556 + { 557 + s64 rt, rs; 558 + 559 + if (config_enabled(CONFIG_32BIT)) 560 + return SIGILL; 561 + 562 + rt = regs->regs[MIPSInst_RT(ir)]; 563 + rs = regs->regs[MIPSInst_RS(ir)]; 564 + 565 + regs->lo = rs / rt; 566 + regs->hi = rs % rt; 567 + 568 + MIPS_R2_STATS(divs); 569 + 570 + return 0; 571 + } 572 + 573 + /** 574 + * ddivu_func - Emulate a DDIVU instruction 575 + * @regs: Process register set 576 + * @ir: Instruction 577 + * 578 + * Returns 0 on success or SIGILL for 32-bit kernels. 579 + */ 580 + static int ddivu_func(struct pt_regs *regs, u32 ir) 581 + { 582 + u64 rt, rs; 583 + 584 + if (config_enabled(CONFIG_32BIT)) 585 + return SIGILL; 586 + 587 + rt = regs->regs[MIPSInst_RT(ir)]; 588 + rs = regs->regs[MIPSInst_RS(ir)]; 589 + 590 + regs->lo = rs / rt; 591 + regs->hi = rs % rt; 592 + 593 + MIPS_R2_STATS(divs); 594 + 595 + return 0; 596 + } 597 + 598 + /* R6 removed instructions for the SPECIAL opcode */ 599 + static struct r2_decoder_table spec_op_table[] = { 600 + { 0xfc1ff83f, 0x00000008, jr_func }, 601 + { 0xfc00ffff, 0x00000018, mult_func }, 602 + { 0xfc00ffff, 0x00000019, multu_func }, 603 + { 0xfc00ffff, 0x0000001c, dmult_func }, 604 + { 0xfc00ffff, 0x0000001d, dmultu_func }, 605 + { 0xffff07ff, 0x00000010, mfhi_func }, 606 + { 0xfc1fffff, 0x00000011, mthi_func }, 607 + { 0xffff07ff, 0x00000012, mflo_func }, 608 + { 0xfc1fffff, 0x00000013, mtlo_func }, 609 + { 0xfc0307ff, 0x00000001, movf_func }, 610 + { 0xfc0307ff, 0x00010001, movt_func }, 611 + { 0xfc0007ff, 0x0000000a, movz_func }, 612 + { 0xfc0007ff, 0x0000000b, movn_func }, 613 + { 0xfc00ffff, 0x0000001a, div_func }, 614 + { 0xfc00ffff, 0x0000001b, divu_func }, 615 + { 0xfc00ffff, 0x0000001e, ddiv_func }, 616 + { 0xfc00ffff, 0x0000001f, ddivu_func }, 617 + {} 618 + }; 619 + 620 + /** 621 + * madd_func - Emulate a MADD instruction 622 + * @regs: Process register set 623 + * @ir: Instruction 624 + * 625 + * Returns 0 since it always succeeds. 626 + */ 627 + static int madd_func(struct pt_regs *regs, u32 ir) 628 + { 629 + s64 res; 630 + s32 rt, rs; 631 + 632 + rt = regs->regs[MIPSInst_RT(ir)]; 633 + rs = regs->regs[MIPSInst_RS(ir)]; 634 + res = (s64)rt * (s64)rs; 635 + rt = regs->hi; 636 + rs = regs->lo; 637 + res += ((((s64)rt) << 32) | (u32)rs); 638 + 639 + rt = res; 640 + regs->lo = (s64)rt; 641 + rs = res >> 32; 642 + regs->hi = (s64)rs; 643 + 644 + MIPS_R2_STATS(dsps); 645 + 646 + return 0; 647 + } 648 + 649 + /** 650 + * maddu_func - Emulate a MADDU instruction 651 + * @regs: Process register set 652 + * @ir: Instruction 653 + * 654 + * Returns 0 since it always succeeds. 655 + */ 656 + static int maddu_func(struct pt_regs *regs, u32 ir) 657 + { 658 + u64 res; 659 + u32 rt, rs; 660 + 661 + rt = regs->regs[MIPSInst_RT(ir)]; 662 + rs = regs->regs[MIPSInst_RS(ir)]; 663 + res = (u64)rt * (u64)rs; 664 + rt = regs->hi; 665 + rs = regs->lo; 666 + res += ((((s64)rt) << 32) | (u32)rs); 667 + 668 + rt = res; 669 + regs->lo = (s64)rt; 670 + rs = res >> 32; 671 + regs->hi = (s64)rs; 672 + 673 + MIPS_R2_STATS(dsps); 674 + 675 + return 0; 676 + } 677 + 678 + /** 679 + * msub_func - Emulate a MSUB instruction 680 + * @regs: Process register set 681 + * @ir: Instruction 682 + * 683 + * Returns 0 since it always succeeds. 684 + */ 685 + static int msub_func(struct pt_regs *regs, u32 ir) 686 + { 687 + s64 res; 688 + s32 rt, rs; 689 + 690 + rt = regs->regs[MIPSInst_RT(ir)]; 691 + rs = regs->regs[MIPSInst_RS(ir)]; 692 + res = (s64)rt * (s64)rs; 693 + rt = regs->hi; 694 + rs = regs->lo; 695 + res = ((((s64)rt) << 32) | (u32)rs) - res; 696 + 697 + rt = res; 698 + regs->lo = (s64)rt; 699 + rs = res >> 32; 700 + regs->hi = (s64)rs; 701 + 702 + MIPS_R2_STATS(dsps); 703 + 704 + return 0; 705 + } 706 + 707 + /** 708 + * msubu_func - Emulate a MSUBU instruction 709 + * @regs: Process register set 710 + * @ir: Instruction 711 + * 712 + * Returns 0 since it always succeeds. 713 + */ 714 + static int msubu_func(struct pt_regs *regs, u32 ir) 715 + { 716 + u64 res; 717 + u32 rt, rs; 718 + 719 + rt = regs->regs[MIPSInst_RT(ir)]; 720 + rs = regs->regs[MIPSInst_RS(ir)]; 721 + res = (u64)rt * (u64)rs; 722 + rt = regs->hi; 723 + rs = regs->lo; 724 + res = ((((s64)rt) << 32) | (u32)rs) - res; 725 + 726 + rt = res; 727 + regs->lo = (s64)rt; 728 + rs = res >> 32; 729 + regs->hi = (s64)rs; 730 + 731 + MIPS_R2_STATS(dsps); 732 + 733 + return 0; 734 + } 735 + 736 + /** 737 + * mul_func - Emulate a MUL instruction 738 + * @regs: Process register set 739 + * @ir: Instruction 740 + * 741 + * Returns 0 since it always succeeds. 742 + */ 743 + static int mul_func(struct pt_regs *regs, u32 ir) 744 + { 745 + s64 res; 746 + s32 rt, rs; 747 + 748 + if (!MIPSInst_RD(ir)) 749 + return 0; 750 + rt = regs->regs[MIPSInst_RT(ir)]; 751 + rs = regs->regs[MIPSInst_RS(ir)]; 752 + res = (s64)rt * (s64)rs; 753 + 754 + rs = res; 755 + regs->regs[MIPSInst_RD(ir)] = (s64)rs; 756 + 757 + MIPS_R2_STATS(muls); 758 + 759 + return 0; 760 + } 761 + 762 + /** 763 + * clz_func - Emulate a CLZ instruction 764 + * @regs: Process register set 765 + * @ir: Instruction 766 + * 767 + * Returns 0 since it always succeeds. 768 + */ 769 + static int clz_func(struct pt_regs *regs, u32 ir) 770 + { 771 + u32 res; 772 + u32 rs; 773 + 774 + if (!MIPSInst_RD(ir)) 775 + return 0; 776 + 777 + rs = regs->regs[MIPSInst_RS(ir)]; 778 + __asm__ __volatile__("clz %0, %1" : "=r"(res) : "r"(rs)); 779 + regs->regs[MIPSInst_RD(ir)] = res; 780 + 781 + MIPS_R2_STATS(bops); 782 + 783 + return 0; 784 + } 785 + 786 + /** 787 + * clo_func - Emulate a CLO instruction 788 + * @regs: Process register set 789 + * @ir: Instruction 790 + * 791 + * Returns 0 since it always succeeds. 792 + */ 793 + 794 + static int clo_func(struct pt_regs *regs, u32 ir) 795 + { 796 + u32 res; 797 + u32 rs; 798 + 799 + if (!MIPSInst_RD(ir)) 800 + return 0; 801 + 802 + rs = regs->regs[MIPSInst_RS(ir)]; 803 + __asm__ __volatile__("clo %0, %1" : "=r"(res) : "r"(rs)); 804 + regs->regs[MIPSInst_RD(ir)] = res; 805 + 806 + MIPS_R2_STATS(bops); 807 + 808 + return 0; 809 + } 810 + 811 + /** 812 + * dclz_func - Emulate a DCLZ instruction 813 + * @regs: Process register set 814 + * @ir: Instruction 815 + * 816 + * Returns 0 since it always succeeds. 817 + */ 818 + static int dclz_func(struct pt_regs *regs, u32 ir) 819 + { 820 + u64 res; 821 + u64 rs; 822 + 823 + if (config_enabled(CONFIG_32BIT)) 824 + return SIGILL; 825 + 826 + if (!MIPSInst_RD(ir)) 827 + return 0; 828 + 829 + rs = regs->regs[MIPSInst_RS(ir)]; 830 + __asm__ __volatile__("dclz %0, %1" : "=r"(res) : "r"(rs)); 831 + regs->regs[MIPSInst_RD(ir)] = res; 832 + 833 + MIPS_R2_STATS(bops); 834 + 835 + return 0; 836 + } 837 + 838 + /** 839 + * dclo_func - Emulate a DCLO instruction 840 + * @regs: Process register set 841 + * @ir: Instruction 842 + * 843 + * Returns 0 since it always succeeds. 844 + */ 845 + static int dclo_func(struct pt_regs *regs, u32 ir) 846 + { 847 + u64 res; 848 + u64 rs; 849 + 850 + if (config_enabled(CONFIG_32BIT)) 851 + return SIGILL; 852 + 853 + if (!MIPSInst_RD(ir)) 854 + return 0; 855 + 856 + rs = regs->regs[MIPSInst_RS(ir)]; 857 + __asm__ __volatile__("dclo %0, %1" : "=r"(res) : "r"(rs)); 858 + regs->regs[MIPSInst_RD(ir)] = res; 859 + 860 + MIPS_R2_STATS(bops); 861 + 862 + return 0; 863 + } 864 + 865 + /* R6 removed instructions for the SPECIAL2 opcode */ 866 + static struct r2_decoder_table spec2_op_table[] = { 867 + { 0xfc00ffff, 0x70000000, madd_func }, 868 + { 0xfc00ffff, 0x70000001, maddu_func }, 869 + { 0xfc0007ff, 0x70000002, mul_func }, 870 + { 0xfc00ffff, 0x70000004, msub_func }, 871 + { 0xfc00ffff, 0x70000005, msubu_func }, 872 + { 0xfc0007ff, 0x70000020, clz_func }, 873 + { 0xfc0007ff, 0x70000021, clo_func }, 874 + { 0xfc0007ff, 0x70000024, dclz_func }, 875 + { 0xfc0007ff, 0x70000025, dclo_func }, 876 + { } 877 + }; 878 + 879 + static inline int mipsr2_find_op_func(struct pt_regs *regs, u32 inst, 880 + struct r2_decoder_table *table) 881 + { 882 + struct r2_decoder_table *p; 883 + int err; 884 + 885 + for (p = table; p->func; p++) { 886 + if ((inst & p->mask) == p->code) { 887 + err = (p->func)(regs, inst); 888 + return err; 889 + } 890 + } 891 + return SIGILL; 892 + } 893 + 894 + /** 895 + * mipsr2_decoder: Decode and emulate a MIPS R2 instruction 896 + * @regs: Process register set 897 + * @inst: Instruction to decode and emulate 898 + */ 899 + int mipsr2_decoder(struct pt_regs *regs, u32 inst) 900 + { 901 + int err = 0; 902 + unsigned long vaddr; 903 + u32 nir; 904 + unsigned long cpc, epc, nepc, r31, res, rs, rt; 905 + 906 + void __user *fault_addr = NULL; 907 + int pass = 0; 908 + 909 + repeat: 910 + r31 = regs->regs[31]; 911 + epc = regs->cp0_epc; 912 + err = compute_return_epc(regs); 913 + if (err < 0) { 914 + BUG(); 915 + return SIGEMT; 916 + } 917 + pr_debug("Emulating the 0x%08x R2 instruction @ 0x%08lx (pass=%d))\n", 918 + inst, epc, pass); 919 + 920 + switch (MIPSInst_OPCODE(inst)) { 921 + case spec_op: 922 + err = mipsr2_find_op_func(regs, inst, spec_op_table); 923 + if (err < 0) { 924 + /* FPU instruction under JR */ 925 + regs->cp0_cause |= CAUSEF_BD; 926 + goto fpu_emul; 927 + } 928 + break; 929 + case spec2_op: 930 + err = mipsr2_find_op_func(regs, inst, spec2_op_table); 931 + break; 932 + case bcond_op: 933 + rt = MIPSInst_RT(inst); 934 + rs = MIPSInst_RS(inst); 935 + switch (rt) { 936 + case tgei_op: 937 + if ((long)regs->regs[rs] >= MIPSInst_SIMM(inst)) 938 + do_trap_or_bp(regs, 0, "TGEI"); 939 + 940 + MIPS_R2_STATS(traps); 941 + 942 + break; 943 + case tgeiu_op: 944 + if (regs->regs[rs] >= MIPSInst_UIMM(inst)) 945 + do_trap_or_bp(regs, 0, "TGEIU"); 946 + 947 + MIPS_R2_STATS(traps); 948 + 949 + break; 950 + case tlti_op: 951 + if ((long)regs->regs[rs] < MIPSInst_SIMM(inst)) 952 + do_trap_or_bp(regs, 0, "TLTI"); 953 + 954 + MIPS_R2_STATS(traps); 955 + 956 + break; 957 + case tltiu_op: 958 + if (regs->regs[rs] < MIPSInst_UIMM(inst)) 959 + do_trap_or_bp(regs, 0, "TLTIU"); 960 + 961 + MIPS_R2_STATS(traps); 962 + 963 + break; 964 + case teqi_op: 965 + if (regs->regs[rs] == MIPSInst_SIMM(inst)) 966 + do_trap_or_bp(regs, 0, "TEQI"); 967 + 968 + MIPS_R2_STATS(traps); 969 + 970 + break; 971 + case tnei_op: 972 + if (regs->regs[rs] != MIPSInst_SIMM(inst)) 973 + do_trap_or_bp(regs, 0, "TNEI"); 974 + 975 + MIPS_R2_STATS(traps); 976 + 977 + break; 978 + case bltzl_op: 979 + case bgezl_op: 980 + case bltzall_op: 981 + case bgezall_op: 982 + if (delay_slot(regs)) { 983 + err = SIGILL; 984 + break; 985 + } 986 + regs->regs[31] = r31; 987 + regs->cp0_epc = epc; 988 + err = __compute_return_epc(regs); 989 + if (err < 0) 990 + return SIGEMT; 991 + if (err != BRANCH_LIKELY_TAKEN) 992 + break; 993 + cpc = regs->cp0_epc; 994 + nepc = epc + 4; 995 + err = __get_user(nir, (u32 __user *)nepc); 996 + if (err) { 997 + err = SIGSEGV; 998 + break; 999 + } 1000 + /* 1001 + * This will probably be optimized away when 1002 + * CONFIG_DEBUG_FS is not enabled 1003 + */ 1004 + switch (rt) { 1005 + case bltzl_op: 1006 + MIPS_R2BR_STATS(bltzl); 1007 + break; 1008 + case bgezl_op: 1009 + MIPS_R2BR_STATS(bgezl); 1010 + break; 1011 + case bltzall_op: 1012 + MIPS_R2BR_STATS(bltzall); 1013 + break; 1014 + case bgezall_op: 1015 + MIPS_R2BR_STATS(bgezall); 1016 + break; 1017 + } 1018 + 1019 + switch (MIPSInst_OPCODE(nir)) { 1020 + case cop1_op: 1021 + case cop1x_op: 1022 + case lwc1_op: 1023 + case swc1_op: 1024 + regs->cp0_cause |= CAUSEF_BD; 1025 + goto fpu_emul; 1026 + } 1027 + if (nir) { 1028 + err = mipsr6_emul(regs, nir); 1029 + if (err > 0) { 1030 + err = mips_dsemul(regs, nir, cpc); 1031 + if (err == SIGILL) 1032 + err = SIGEMT; 1033 + MIPS_R2_STATS(dsemul); 1034 + } 1035 + } 1036 + break; 1037 + case bltzal_op: 1038 + case bgezal_op: 1039 + if (delay_slot(regs)) { 1040 + err = SIGILL; 1041 + break; 1042 + } 1043 + regs->regs[31] = r31; 1044 + regs->cp0_epc = epc; 1045 + err = __compute_return_epc(regs); 1046 + if (err < 0) 1047 + return SIGEMT; 1048 + cpc = regs->cp0_epc; 1049 + nepc = epc + 4; 1050 + err = __get_user(nir, (u32 __user *)nepc); 1051 + if (err) { 1052 + err = SIGSEGV; 1053 + break; 1054 + } 1055 + /* 1056 + * This will probably be optimized away when 1057 + * CONFIG_DEBUG_FS is not enabled 1058 + */ 1059 + switch (rt) { 1060 + case bltzal_op: 1061 + MIPS_R2BR_STATS(bltzal); 1062 + break; 1063 + case bgezal_op: 1064 + MIPS_R2BR_STATS(bgezal); 1065 + break; 1066 + } 1067 + 1068 + switch (MIPSInst_OPCODE(nir)) { 1069 + case cop1_op: 1070 + case cop1x_op: 1071 + case lwc1_op: 1072 + case swc1_op: 1073 + regs->cp0_cause |= CAUSEF_BD; 1074 + goto fpu_emul; 1075 + } 1076 + if (nir) { 1077 + err = mipsr6_emul(regs, nir); 1078 + if (err > 0) { 1079 + err = mips_dsemul(regs, nir, cpc); 1080 + if (err == SIGILL) 1081 + err = SIGEMT; 1082 + MIPS_R2_STATS(dsemul); 1083 + } 1084 + } 1085 + break; 1086 + default: 1087 + regs->regs[31] = r31; 1088 + regs->cp0_epc = epc; 1089 + err = SIGILL; 1090 + break; 1091 + } 1092 + break; 1093 + 1094 + case beql_op: 1095 + case bnel_op: 1096 + case blezl_op: 1097 + case bgtzl_op: 1098 + if (delay_slot(regs)) { 1099 + err = SIGILL; 1100 + break; 1101 + } 1102 + regs->regs[31] = r31; 1103 + regs->cp0_epc = epc; 1104 + err = __compute_return_epc(regs); 1105 + if (err < 0) 1106 + return SIGEMT; 1107 + if (err != BRANCH_LIKELY_TAKEN) 1108 + break; 1109 + cpc = regs->cp0_epc; 1110 + nepc = epc + 4; 1111 + err = __get_user(nir, (u32 __user *)nepc); 1112 + if (err) { 1113 + err = SIGSEGV; 1114 + break; 1115 + } 1116 + /* 1117 + * This will probably be optimized away when 1118 + * CONFIG_DEBUG_FS is not enabled 1119 + */ 1120 + switch (MIPSInst_OPCODE(inst)) { 1121 + case beql_op: 1122 + MIPS_R2BR_STATS(beql); 1123 + break; 1124 + case bnel_op: 1125 + MIPS_R2BR_STATS(bnel); 1126 + break; 1127 + case blezl_op: 1128 + MIPS_R2BR_STATS(blezl); 1129 + break; 1130 + case bgtzl_op: 1131 + MIPS_R2BR_STATS(bgtzl); 1132 + break; 1133 + } 1134 + 1135 + switch (MIPSInst_OPCODE(nir)) { 1136 + case cop1_op: 1137 + case cop1x_op: 1138 + case lwc1_op: 1139 + case swc1_op: 1140 + regs->cp0_cause |= CAUSEF_BD; 1141 + goto fpu_emul; 1142 + } 1143 + if (nir) { 1144 + err = mipsr6_emul(regs, nir); 1145 + if (err > 0) { 1146 + err = mips_dsemul(regs, nir, cpc); 1147 + if (err == SIGILL) 1148 + err = SIGEMT; 1149 + MIPS_R2_STATS(dsemul); 1150 + } 1151 + } 1152 + break; 1153 + case lwc1_op: 1154 + case swc1_op: 1155 + case cop1_op: 1156 + case cop1x_op: 1157 + fpu_emul: 1158 + regs->regs[31] = r31; 1159 + regs->cp0_epc = epc; 1160 + if (!used_math()) { /* First time FPU user. */ 1161 + err = init_fpu(); 1162 + set_used_math(); 1163 + } 1164 + lose_fpu(1); /* Save FPU state for the emulator. */ 1165 + 1166 + err = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 0, 1167 + &fault_addr); 1168 + 1169 + /* 1170 + * this is a tricky issue - lose_fpu() uses LL/SC atomics 1171 + * if FPU is owned and effectively cancels user level LL/SC. 1172 + * So, it could be logical to don't restore FPU ownership here. 1173 + * But the sequence of multiple FPU instructions is much much 1174 + * more often than LL-FPU-SC and I prefer loop here until 1175 + * next scheduler cycle cancels FPU ownership 1176 + */ 1177 + own_fpu(1); /* Restore FPU state. */ 1178 + 1179 + if (err) 1180 + current->thread.cp0_baduaddr = (unsigned long)fault_addr; 1181 + 1182 + MIPS_R2_STATS(fpus); 1183 + 1184 + break; 1185 + 1186 + case lwl_op: 1187 + rt = regs->regs[MIPSInst_RT(inst)]; 1188 + vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); 1189 + if (!access_ok(VERIFY_READ, vaddr, 4)) { 1190 + current->thread.cp0_baduaddr = vaddr; 1191 + err = SIGSEGV; 1192 + break; 1193 + } 1194 + __asm__ __volatile__( 1195 + " .set push\n" 1196 + " .set reorder\n" 1197 + #ifdef CONFIG_CPU_LITTLE_ENDIAN 1198 + "1:" LB "%1, 0(%2)\n" 1199 + INS "%0, %1, 24, 8\n" 1200 + " andi %1, %2, 0x3\n" 1201 + " beq $0, %1, 9f\n" 1202 + ADDIU "%2, %2, -1\n" 1203 + "2:" LB "%1, 0(%2)\n" 1204 + INS "%0, %1, 16, 8\n" 1205 + " andi %1, %2, 0x3\n" 1206 + " beq $0, %1, 9f\n" 1207 + ADDIU "%2, %2, -1\n" 1208 + "3:" LB "%1, 0(%2)\n" 1209 + INS "%0, %1, 8, 8\n" 1210 + " andi %1, %2, 0x3\n" 1211 + " beq $0, %1, 9f\n" 1212 + ADDIU "%2, %2, -1\n" 1213 + "4:" LB "%1, 0(%2)\n" 1214 + INS "%0, %1, 0, 8\n" 1215 + #else /* !CONFIG_CPU_LITTLE_ENDIAN */ 1216 + "1:" LB "%1, 0(%2)\n" 1217 + INS "%0, %1, 24, 8\n" 1218 + ADDIU "%2, %2, 1\n" 1219 + " andi %1, %2, 0x3\n" 1220 + " beq $0, %1, 9f\n" 1221 + "2:" LB "%1, 0(%2)\n" 1222 + INS "%0, %1, 16, 8\n" 1223 + ADDIU "%2, %2, 1\n" 1224 + " andi %1, %2, 0x3\n" 1225 + " beq $0, %1, 9f\n" 1226 + "3:" LB "%1, 0(%2)\n" 1227 + INS "%0, %1, 8, 8\n" 1228 + ADDIU "%2, %2, 1\n" 1229 + " andi %1, %2, 0x3\n" 1230 + " beq $0, %1, 9f\n" 1231 + "4:" LB "%1, 0(%2)\n" 1232 + INS "%0, %1, 0, 8\n" 1233 + #endif /* CONFIG_CPU_LITTLE_ENDIAN */ 1234 + "9: sll %0, %0, 0\n" 1235 + "10:\n" 1236 + " .insn\n" 1237 + " .section .fixup,\"ax\"\n" 1238 + "8: li %3,%4\n" 1239 + " j 10b\n" 1240 + " .previous\n" 1241 + " .section __ex_table,\"a\"\n" 1242 + " .word 1b,8b\n" 1243 + " .word 2b,8b\n" 1244 + " .word 3b,8b\n" 1245 + " .word 4b,8b\n" 1246 + " .previous\n" 1247 + " .set pop\n" 1248 + : "+&r"(rt), "=&r"(rs), 1249 + "+&r"(vaddr), "+&r"(err) 1250 + : "i"(SIGSEGV)); 1251 + 1252 + if (MIPSInst_RT(inst) && !err) 1253 + regs->regs[MIPSInst_RT(inst)] = rt; 1254 + 1255 + MIPS_R2_STATS(loads); 1256 + 1257 + break; 1258 + 1259 + case lwr_op: 1260 + rt = regs->regs[MIPSInst_RT(inst)]; 1261 + vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); 1262 + if (!access_ok(VERIFY_READ, vaddr, 4)) { 1263 + current->thread.cp0_baduaddr = vaddr; 1264 + err = SIGSEGV; 1265 + break; 1266 + } 1267 + __asm__ __volatile__( 1268 + " .set push\n" 1269 + " .set reorder\n" 1270 + #ifdef CONFIG_CPU_LITTLE_ENDIAN 1271 + "1:" LB "%1, 0(%2)\n" 1272 + INS "%0, %1, 0, 8\n" 1273 + ADDIU "%2, %2, 1\n" 1274 + " andi %1, %2, 0x3\n" 1275 + " beq $0, %1, 9f\n" 1276 + "2:" LB "%1, 0(%2)\n" 1277 + INS "%0, %1, 8, 8\n" 1278 + ADDIU "%2, %2, 1\n" 1279 + " andi %1, %2, 0x3\n" 1280 + " beq $0, %1, 9f\n" 1281 + "3:" LB "%1, 0(%2)\n" 1282 + INS "%0, %1, 16, 8\n" 1283 + ADDIU "%2, %2, 1\n" 1284 + " andi %1, %2, 0x3\n" 1285 + " beq $0, %1, 9f\n" 1286 + "4:" LB "%1, 0(%2)\n" 1287 + INS "%0, %1, 24, 8\n" 1288 + " sll %0, %0, 0\n" 1289 + #else /* !CONFIG_CPU_LITTLE_ENDIAN */ 1290 + "1:" LB "%1, 0(%2)\n" 1291 + INS "%0, %1, 0, 8\n" 1292 + " andi %1, %2, 0x3\n" 1293 + " beq $0, %1, 9f\n" 1294 + ADDIU "%2, %2, -1\n" 1295 + "2:" LB "%1, 0(%2)\n" 1296 + INS "%0, %1, 8, 8\n" 1297 + " andi %1, %2, 0x3\n" 1298 + " beq $0, %1, 9f\n" 1299 + ADDIU "%2, %2, -1\n" 1300 + "3:" LB "%1, 0(%2)\n" 1301 + INS "%0, %1, 16, 8\n" 1302 + " andi %1, %2, 0x3\n" 1303 + " beq $0, %1, 9f\n" 1304 + ADDIU "%2, %2, -1\n" 1305 + "4:" LB "%1, 0(%2)\n" 1306 + INS "%0, %1, 24, 8\n" 1307 + " sll %0, %0, 0\n" 1308 + #endif /* CONFIG_CPU_LITTLE_ENDIAN */ 1309 + "9:\n" 1310 + "10:\n" 1311 + " .insn\n" 1312 + " .section .fixup,\"ax\"\n" 1313 + "8: li %3,%4\n" 1314 + " j 10b\n" 1315 + " .previous\n" 1316 + " .section __ex_table,\"a\"\n" 1317 + " .word 1b,8b\n" 1318 + " .word 2b,8b\n" 1319 + " .word 3b,8b\n" 1320 + " .word 4b,8b\n" 1321 + " .previous\n" 1322 + " .set pop\n" 1323 + : "+&r"(rt), "=&r"(rs), 1324 + "+&r"(vaddr), "+&r"(err) 1325 + : "i"(SIGSEGV)); 1326 + if (MIPSInst_RT(inst) && !err) 1327 + regs->regs[MIPSInst_RT(inst)] = rt; 1328 + 1329 + MIPS_R2_STATS(loads); 1330 + 1331 + break; 1332 + 1333 + case swl_op: 1334 + rt = regs->regs[MIPSInst_RT(inst)]; 1335 + vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); 1336 + if (!access_ok(VERIFY_WRITE, vaddr, 4)) { 1337 + current->thread.cp0_baduaddr = vaddr; 1338 + err = SIGSEGV; 1339 + break; 1340 + } 1341 + __asm__ __volatile__( 1342 + " .set push\n" 1343 + " .set reorder\n" 1344 + #ifdef CONFIG_CPU_LITTLE_ENDIAN 1345 + EXT "%1, %0, 24, 8\n" 1346 + "1:" SB "%1, 0(%2)\n" 1347 + " andi %1, %2, 0x3\n" 1348 + " beq $0, %1, 9f\n" 1349 + ADDIU "%2, %2, -1\n" 1350 + EXT "%1, %0, 16, 8\n" 1351 + "2:" SB "%1, 0(%2)\n" 1352 + " andi %1, %2, 0x3\n" 1353 + " beq $0, %1, 9f\n" 1354 + ADDIU "%2, %2, -1\n" 1355 + EXT "%1, %0, 8, 8\n" 1356 + "3:" SB "%1, 0(%2)\n" 1357 + " andi %1, %2, 0x3\n" 1358 + " beq $0, %1, 9f\n" 1359 + ADDIU "%2, %2, -1\n" 1360 + EXT "%1, %0, 0, 8\n" 1361 + "4:" SB "%1, 0(%2)\n" 1362 + #else /* !CONFIG_CPU_LITTLE_ENDIAN */ 1363 + EXT "%1, %0, 24, 8\n" 1364 + "1:" SB "%1, 0(%2)\n" 1365 + ADDIU "%2, %2, 1\n" 1366 + " andi %1, %2, 0x3\n" 1367 + " beq $0, %1, 9f\n" 1368 + EXT "%1, %0, 16, 8\n" 1369 + "2:" SB "%1, 0(%2)\n" 1370 + ADDIU "%2, %2, 1\n" 1371 + " andi %1, %2, 0x3\n" 1372 + " beq $0, %1, 9f\n" 1373 + EXT "%1, %0, 8, 8\n" 1374 + "3:" SB "%1, 0(%2)\n" 1375 + ADDIU "%2, %2, 1\n" 1376 + " andi %1, %2, 0x3\n" 1377 + " beq $0, %1, 9f\n" 1378 + EXT "%1, %0, 0, 8\n" 1379 + "4:" SB "%1, 0(%2)\n" 1380 + #endif /* CONFIG_CPU_LITTLE_ENDIAN */ 1381 + "9:\n" 1382 + " .insn\n" 1383 + " .section .fixup,\"ax\"\n" 1384 + "8: li %3,%4\n" 1385 + " j 9b\n" 1386 + " .previous\n" 1387 + " .section __ex_table,\"a\"\n" 1388 + " .word 1b,8b\n" 1389 + " .word 2b,8b\n" 1390 + " .word 3b,8b\n" 1391 + " .word 4b,8b\n" 1392 + " .previous\n" 1393 + " .set pop\n" 1394 + : "+&r"(rt), "=&r"(rs), 1395 + "+&r"(vaddr), "+&r"(err) 1396 + : "i"(SIGSEGV) 1397 + : "memory"); 1398 + 1399 + MIPS_R2_STATS(stores); 1400 + 1401 + break; 1402 + 1403 + case swr_op: 1404 + rt = regs->regs[MIPSInst_RT(inst)]; 1405 + vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); 1406 + if (!access_ok(VERIFY_WRITE, vaddr, 4)) { 1407 + current->thread.cp0_baduaddr = vaddr; 1408 + err = SIGSEGV; 1409 + break; 1410 + } 1411 + __asm__ __volatile__( 1412 + " .set push\n" 1413 + " .set reorder\n" 1414 + #ifdef CONFIG_CPU_LITTLE_ENDIAN 1415 + EXT "%1, %0, 0, 8\n" 1416 + "1:" SB "%1, 0(%2)\n" 1417 + ADDIU "%2, %2, 1\n" 1418 + " andi %1, %2, 0x3\n" 1419 + " beq $0, %1, 9f\n" 1420 + EXT "%1, %0, 8, 8\n" 1421 + "2:" SB "%1, 0(%2)\n" 1422 + ADDIU "%2, %2, 1\n" 1423 + " andi %1, %2, 0x3\n" 1424 + " beq $0, %1, 9f\n" 1425 + EXT "%1, %0, 16, 8\n" 1426 + "3:" SB "%1, 0(%2)\n" 1427 + ADDIU "%2, %2, 1\n" 1428 + " andi %1, %2, 0x3\n" 1429 + " beq $0, %1, 9f\n" 1430 + EXT "%1, %0, 24, 8\n" 1431 + "4:" SB "%1, 0(%2)\n" 1432 + #else /* !CONFIG_CPU_LITTLE_ENDIAN */ 1433 + EXT "%1, %0, 0, 8\n" 1434 + "1:" SB "%1, 0(%2)\n" 1435 + " andi %1, %2, 0x3\n" 1436 + " beq $0, %1, 9f\n" 1437 + ADDIU "%2, %2, -1\n" 1438 + EXT "%1, %0, 8, 8\n" 1439 + "2:" SB "%1, 0(%2)\n" 1440 + " andi %1, %2, 0x3\n" 1441 + " beq $0, %1, 9f\n" 1442 + ADDIU "%2, %2, -1\n" 1443 + EXT "%1, %0, 16, 8\n" 1444 + "3:" SB "%1, 0(%2)\n" 1445 + " andi %1, %2, 0x3\n" 1446 + " beq $0, %1, 9f\n" 1447 + ADDIU "%2, %2, -1\n" 1448 + EXT "%1, %0, 24, 8\n" 1449 + "4:" SB "%1, 0(%2)\n" 1450 + #endif /* CONFIG_CPU_LITTLE_ENDIAN */ 1451 + "9:\n" 1452 + " .insn\n" 1453 + " .section .fixup,\"ax\"\n" 1454 + "8: li %3,%4\n" 1455 + " j 9b\n" 1456 + " .previous\n" 1457 + " .section __ex_table,\"a\"\n" 1458 + " .word 1b,8b\n" 1459 + " .word 2b,8b\n" 1460 + " .word 3b,8b\n" 1461 + " .word 4b,8b\n" 1462 + " .previous\n" 1463 + " .set pop\n" 1464 + : "+&r"(rt), "=&r"(rs), 1465 + "+&r"(vaddr), "+&r"(err) 1466 + : "i"(SIGSEGV) 1467 + : "memory"); 1468 + 1469 + MIPS_R2_STATS(stores); 1470 + 1471 + break; 1472 + 1473 + case ldl_op: 1474 + if (config_enabled(CONFIG_32BIT)) { 1475 + err = SIGILL; 1476 + break; 1477 + } 1478 + 1479 + rt = regs->regs[MIPSInst_RT(inst)]; 1480 + vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); 1481 + if (!access_ok(VERIFY_READ, vaddr, 8)) { 1482 + current->thread.cp0_baduaddr = vaddr; 1483 + err = SIGSEGV; 1484 + break; 1485 + } 1486 + __asm__ __volatile__( 1487 + " .set push\n" 1488 + " .set reorder\n" 1489 + #ifdef CONFIG_CPU_LITTLE_ENDIAN 1490 + "1: lb %1, 0(%2)\n" 1491 + " dinsu %0, %1, 56, 8\n" 1492 + " andi %1, %2, 0x7\n" 1493 + " beq $0, %1, 9f\n" 1494 + " daddiu %2, %2, -1\n" 1495 + "2: lb %1, 0(%2)\n" 1496 + " dinsu %0, %1, 48, 8\n" 1497 + " andi %1, %2, 0x7\n" 1498 + " beq $0, %1, 9f\n" 1499 + " daddiu %2, %2, -1\n" 1500 + "3: lb %1, 0(%2)\n" 1501 + " dinsu %0, %1, 40, 8\n" 1502 + " andi %1, %2, 0x7\n" 1503 + " beq $0, %1, 9f\n" 1504 + " daddiu %2, %2, -1\n" 1505 + "4: lb %1, 0(%2)\n" 1506 + " dinsu %0, %1, 32, 8\n" 1507 + " andi %1, %2, 0x7\n" 1508 + " beq $0, %1, 9f\n" 1509 + " daddiu %2, %2, -1\n" 1510 + "5: lb %1, 0(%2)\n" 1511 + " dins %0, %1, 24, 8\n" 1512 + " andi %1, %2, 0x7\n" 1513 + " beq $0, %1, 9f\n" 1514 + " daddiu %2, %2, -1\n" 1515 + "6: lb %1, 0(%2)\n" 1516 + " dins %0, %1, 16, 8\n" 1517 + " andi %1, %2, 0x7\n" 1518 + " beq $0, %1, 9f\n" 1519 + " daddiu %2, %2, -1\n" 1520 + "7: lb %1, 0(%2)\n" 1521 + " dins %0, %1, 8, 8\n" 1522 + " andi %1, %2, 0x7\n" 1523 + " beq $0, %1, 9f\n" 1524 + " daddiu %2, %2, -1\n" 1525 + "0: lb %1, 0(%2)\n" 1526 + " dins %0, %1, 0, 8\n" 1527 + #else /* !CONFIG_CPU_LITTLE_ENDIAN */ 1528 + "1: lb %1, 0(%2)\n" 1529 + " dinsu %0, %1, 56, 8\n" 1530 + " daddiu %2, %2, 1\n" 1531 + " andi %1, %2, 0x7\n" 1532 + " beq $0, %1, 9f\n" 1533 + "2: lb %1, 0(%2)\n" 1534 + " dinsu %0, %1, 48, 8\n" 1535 + " daddiu %2, %2, 1\n" 1536 + " andi %1, %2, 0x7\n" 1537 + " beq $0, %1, 9f\n" 1538 + "3: lb %1, 0(%2)\n" 1539 + " dinsu %0, %1, 40, 8\n" 1540 + " daddiu %2, %2, 1\n" 1541 + " andi %1, %2, 0x7\n" 1542 + " beq $0, %1, 9f\n" 1543 + "4: lb %1, 0(%2)\n" 1544 + " dinsu %0, %1, 32, 8\n" 1545 + " daddiu %2, %2, 1\n" 1546 + " andi %1, %2, 0x7\n" 1547 + " beq $0, %1, 9f\n" 1548 + "5: lb %1, 0(%2)\n" 1549 + " dins %0, %1, 24, 8\n" 1550 + " daddiu %2, %2, 1\n" 1551 + " andi %1, %2, 0x7\n" 1552 + " beq $0, %1, 9f\n" 1553 + "6: lb %1, 0(%2)\n" 1554 + " dins %0, %1, 16, 8\n" 1555 + " daddiu %2, %2, 1\n" 1556 + " andi %1, %2, 0x7\n" 1557 + " beq $0, %1, 9f\n" 1558 + "7: lb %1, 0(%2)\n" 1559 + " dins %0, %1, 8, 8\n" 1560 + " daddiu %2, %2, 1\n" 1561 + " andi %1, %2, 0x7\n" 1562 + " beq $0, %1, 9f\n" 1563 + "0: lb %1, 0(%2)\n" 1564 + " dins %0, %1, 0, 8\n" 1565 + #endif /* CONFIG_CPU_LITTLE_ENDIAN */ 1566 + "9:\n" 1567 + " .insn\n" 1568 + " .section .fixup,\"ax\"\n" 1569 + "8: li %3,%4\n" 1570 + " j 9b\n" 1571 + " .previous\n" 1572 + " .section __ex_table,\"a\"\n" 1573 + " .word 1b,8b\n" 1574 + " .word 2b,8b\n" 1575 + " .word 3b,8b\n" 1576 + " .word 4b,8b\n" 1577 + " .word 5b,8b\n" 1578 + " .word 6b,8b\n" 1579 + " .word 7b,8b\n" 1580 + " .word 0b,8b\n" 1581 + " .previous\n" 1582 + " .set pop\n" 1583 + : "+&r"(rt), "=&r"(rs), 1584 + "+&r"(vaddr), "+&r"(err) 1585 + : "i"(SIGSEGV)); 1586 + if (MIPSInst_RT(inst) && !err) 1587 + regs->regs[MIPSInst_RT(inst)] = rt; 1588 + 1589 + MIPS_R2_STATS(loads); 1590 + break; 1591 + 1592 + case ldr_op: 1593 + if (config_enabled(CONFIG_32BIT)) { 1594 + err = SIGILL; 1595 + break; 1596 + } 1597 + 1598 + rt = regs->regs[MIPSInst_RT(inst)]; 1599 + vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); 1600 + if (!access_ok(VERIFY_READ, vaddr, 8)) { 1601 + current->thread.cp0_baduaddr = vaddr; 1602 + err = SIGSEGV; 1603 + break; 1604 + } 1605 + __asm__ __volatile__( 1606 + " .set push\n" 1607 + " .set reorder\n" 1608 + #ifdef CONFIG_CPU_LITTLE_ENDIAN 1609 + "1: lb %1, 0(%2)\n" 1610 + " dins %0, %1, 0, 8\n" 1611 + " daddiu %2, %2, 1\n" 1612 + " andi %1, %2, 0x7\n" 1613 + " beq $0, %1, 9f\n" 1614 + "2: lb %1, 0(%2)\n" 1615 + " dins %0, %1, 8, 8\n" 1616 + " daddiu %2, %2, 1\n" 1617 + " andi %1, %2, 0x7\n" 1618 + " beq $0, %1, 9f\n" 1619 + "3: lb %1, 0(%2)\n" 1620 + " dins %0, %1, 16, 8\n" 1621 + " daddiu %2, %2, 1\n" 1622 + " andi %1, %2, 0x7\n" 1623 + " beq $0, %1, 9f\n" 1624 + "4: lb %1, 0(%2)\n" 1625 + " dins %0, %1, 24, 8\n" 1626 + " daddiu %2, %2, 1\n" 1627 + " andi %1, %2, 0x7\n" 1628 + " beq $0, %1, 9f\n" 1629 + "5: lb %1, 0(%2)\n" 1630 + " dinsu %0, %1, 32, 8\n" 1631 + " daddiu %2, %2, 1\n" 1632 + " andi %1, %2, 0x7\n" 1633 + " beq $0, %1, 9f\n" 1634 + "6: lb %1, 0(%2)\n" 1635 + " dinsu %0, %1, 40, 8\n" 1636 + " daddiu %2, %2, 1\n" 1637 + " andi %1, %2, 0x7\n" 1638 + " beq $0, %1, 9f\n" 1639 + "7: lb %1, 0(%2)\n" 1640 + " dinsu %0, %1, 48, 8\n" 1641 + " daddiu %2, %2, 1\n" 1642 + " andi %1, %2, 0x7\n" 1643 + " beq $0, %1, 9f\n" 1644 + "0: lb %1, 0(%2)\n" 1645 + " dinsu %0, %1, 56, 8\n" 1646 + #else /* !CONFIG_CPU_LITTLE_ENDIAN */ 1647 + "1: lb %1, 0(%2)\n" 1648 + " dins %0, %1, 0, 8\n" 1649 + " andi %1, %2, 0x7\n" 1650 + " beq $0, %1, 9f\n" 1651 + " daddiu %2, %2, -1\n" 1652 + "2: lb %1, 0(%2)\n" 1653 + " dins %0, %1, 8, 8\n" 1654 + " andi %1, %2, 0x7\n" 1655 + " beq $0, %1, 9f\n" 1656 + " daddiu %2, %2, -1\n" 1657 + "3: lb %1, 0(%2)\n" 1658 + " dins %0, %1, 16, 8\n" 1659 + " andi %1, %2, 0x7\n" 1660 + " beq $0, %1, 9f\n" 1661 + " daddiu %2, %2, -1\n" 1662 + "4: lb %1, 0(%2)\n" 1663 + " dins %0, %1, 24, 8\n" 1664 + " andi %1, %2, 0x7\n" 1665 + " beq $0, %1, 9f\n" 1666 + " daddiu %2, %2, -1\n" 1667 + "5: lb %1, 0(%2)\n" 1668 + " dinsu %0, %1, 32, 8\n" 1669 + " andi %1, %2, 0x7\n" 1670 + " beq $0, %1, 9f\n" 1671 + " daddiu %2, %2, -1\n" 1672 + "6: lb %1, 0(%2)\n" 1673 + " dinsu %0, %1, 40, 8\n" 1674 + " andi %1, %2, 0x7\n" 1675 + " beq $0, %1, 9f\n" 1676 + " daddiu %2, %2, -1\n" 1677 + "7: lb %1, 0(%2)\n" 1678 + " dinsu %0, %1, 48, 8\n" 1679 + " andi %1, %2, 0x7\n" 1680 + " beq $0, %1, 9f\n" 1681 + " daddiu %2, %2, -1\n" 1682 + "0: lb %1, 0(%2)\n" 1683 + " dinsu %0, %1, 56, 8\n" 1684 + #endif /* CONFIG_CPU_LITTLE_ENDIAN */ 1685 + "9:\n" 1686 + " .insn\n" 1687 + " .section .fixup,\"ax\"\n" 1688 + "8: li %3,%4\n" 1689 + " j 9b\n" 1690 + " .previous\n" 1691 + " .section __ex_table,\"a\"\n" 1692 + " .word 1b,8b\n" 1693 + " .word 2b,8b\n" 1694 + " .word 3b,8b\n" 1695 + " .word 4b,8b\n" 1696 + " .word 5b,8b\n" 1697 + " .word 6b,8b\n" 1698 + " .word 7b,8b\n" 1699 + " .word 0b,8b\n" 1700 + " .previous\n" 1701 + " .set pop\n" 1702 + : "+&r"(rt), "=&r"(rs), 1703 + "+&r"(vaddr), "+&r"(err) 1704 + : "i"(SIGSEGV)); 1705 + if (MIPSInst_RT(inst) && !err) 1706 + regs->regs[MIPSInst_RT(inst)] = rt; 1707 + 1708 + MIPS_R2_STATS(loads); 1709 + break; 1710 + 1711 + case sdl_op: 1712 + if (config_enabled(CONFIG_32BIT)) { 1713 + err = SIGILL; 1714 + break; 1715 + } 1716 + 1717 + rt = regs->regs[MIPSInst_RT(inst)]; 1718 + vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); 1719 + if (!access_ok(VERIFY_WRITE, vaddr, 8)) { 1720 + current->thread.cp0_baduaddr = vaddr; 1721 + err = SIGSEGV; 1722 + break; 1723 + } 1724 + __asm__ __volatile__( 1725 + " .set push\n" 1726 + " .set reorder\n" 1727 + #ifdef CONFIG_CPU_LITTLE_ENDIAN 1728 + " dextu %1, %0, 56, 8\n" 1729 + "1: sb %1, 0(%2)\n" 1730 + " andi %1, %2, 0x7\n" 1731 + " beq $0, %1, 9f\n" 1732 + " daddiu %2, %2, -1\n" 1733 + " dextu %1, %0, 48, 8\n" 1734 + "2: sb %1, 0(%2)\n" 1735 + " andi %1, %2, 0x7\n" 1736 + " beq $0, %1, 9f\n" 1737 + " daddiu %2, %2, -1\n" 1738 + " dextu %1, %0, 40, 8\n" 1739 + "3: sb %1, 0(%2)\n" 1740 + " andi %1, %2, 0x7\n" 1741 + " beq $0, %1, 9f\n" 1742 + " daddiu %2, %2, -1\n" 1743 + " dextu %1, %0, 32, 8\n" 1744 + "4: sb %1, 0(%2)\n" 1745 + " andi %1, %2, 0x7\n" 1746 + " beq $0, %1, 9f\n" 1747 + " daddiu %2, %2, -1\n" 1748 + " dext %1, %0, 24, 8\n" 1749 + "5: sb %1, 0(%2)\n" 1750 + " andi %1, %2, 0x7\n" 1751 + " beq $0, %1, 9f\n" 1752 + " daddiu %2, %2, -1\n" 1753 + " dext %1, %0, 16, 8\n" 1754 + "6: sb %1, 0(%2)\n" 1755 + " andi %1, %2, 0x7\n" 1756 + " beq $0, %1, 9f\n" 1757 + " daddiu %2, %2, -1\n" 1758 + " dext %1, %0, 8, 8\n" 1759 + "7: sb %1, 0(%2)\n" 1760 + " andi %1, %2, 0x7\n" 1761 + " beq $0, %1, 9f\n" 1762 + " daddiu %2, %2, -1\n" 1763 + " dext %1, %0, 0, 8\n" 1764 + "0: sb %1, 0(%2)\n" 1765 + #else /* !CONFIG_CPU_LITTLE_ENDIAN */ 1766 + " dextu %1, %0, 56, 8\n" 1767 + "1: sb %1, 0(%2)\n" 1768 + " daddiu %2, %2, 1\n" 1769 + " andi %1, %2, 0x7\n" 1770 + " beq $0, %1, 9f\n" 1771 + " dextu %1, %0, 48, 8\n" 1772 + "2: sb %1, 0(%2)\n" 1773 + " daddiu %2, %2, 1\n" 1774 + " andi %1, %2, 0x7\n" 1775 + " beq $0, %1, 9f\n" 1776 + " dextu %1, %0, 40, 8\n" 1777 + "3: sb %1, 0(%2)\n" 1778 + " daddiu %2, %2, 1\n" 1779 + " andi %1, %2, 0x7\n" 1780 + " beq $0, %1, 9f\n" 1781 + " dextu %1, %0, 32, 8\n" 1782 + "4: sb %1, 0(%2)\n" 1783 + " daddiu %2, %2, 1\n" 1784 + " andi %1, %2, 0x7\n" 1785 + " beq $0, %1, 9f\n" 1786 + " dext %1, %0, 24, 8\n" 1787 + "5: sb %1, 0(%2)\n" 1788 + " daddiu %2, %2, 1\n" 1789 + " andi %1, %2, 0x7\n" 1790 + " beq $0, %1, 9f\n" 1791 + " dext %1, %0, 16, 8\n" 1792 + "6: sb %1, 0(%2)\n" 1793 + " daddiu %2, %2, 1\n" 1794 + " andi %1, %2, 0x7\n" 1795 + " beq $0, %1, 9f\n" 1796 + " dext %1, %0, 8, 8\n" 1797 + "7: sb %1, 0(%2)\n" 1798 + " daddiu %2, %2, 1\n" 1799 + " andi %1, %2, 0x7\n" 1800 + " beq $0, %1, 9f\n" 1801 + " dext %1, %0, 0, 8\n" 1802 + "0: sb %1, 0(%2)\n" 1803 + #endif /* CONFIG_CPU_LITTLE_ENDIAN */ 1804 + "9:\n" 1805 + " .insn\n" 1806 + " .section .fixup,\"ax\"\n" 1807 + "8: li %3,%4\n" 1808 + " j 9b\n" 1809 + " .previous\n" 1810 + " .section __ex_table,\"a\"\n" 1811 + " .word 1b,8b\n" 1812 + " .word 2b,8b\n" 1813 + " .word 3b,8b\n" 1814 + " .word 4b,8b\n" 1815 + " .word 5b,8b\n" 1816 + " .word 6b,8b\n" 1817 + " .word 7b,8b\n" 1818 + " .word 0b,8b\n" 1819 + " .previous\n" 1820 + " .set pop\n" 1821 + : "+&r"(rt), "=&r"(rs), 1822 + "+&r"(vaddr), "+&r"(err) 1823 + : "i"(SIGSEGV) 1824 + : "memory"); 1825 + 1826 + MIPS_R2_STATS(stores); 1827 + break; 1828 + 1829 + case sdr_op: 1830 + if (config_enabled(CONFIG_32BIT)) { 1831 + err = SIGILL; 1832 + break; 1833 + } 1834 + 1835 + rt = regs->regs[MIPSInst_RT(inst)]; 1836 + vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); 1837 + if (!access_ok(VERIFY_WRITE, vaddr, 8)) { 1838 + current->thread.cp0_baduaddr = vaddr; 1839 + err = SIGSEGV; 1840 + break; 1841 + } 1842 + __asm__ __volatile__( 1843 + " .set push\n" 1844 + " .set reorder\n" 1845 + #ifdef CONFIG_CPU_LITTLE_ENDIAN 1846 + " dext %1, %0, 0, 8\n" 1847 + "1: sb %1, 0(%2)\n" 1848 + " daddiu %2, %2, 1\n" 1849 + " andi %1, %2, 0x7\n" 1850 + " beq $0, %1, 9f\n" 1851 + " dext %1, %0, 8, 8\n" 1852 + "2: sb %1, 0(%2)\n" 1853 + " daddiu %2, %2, 1\n" 1854 + " andi %1, %2, 0x7\n" 1855 + " beq $0, %1, 9f\n" 1856 + " dext %1, %0, 16, 8\n" 1857 + "3: sb %1, 0(%2)\n" 1858 + " daddiu %2, %2, 1\n" 1859 + " andi %1, %2, 0x7\n" 1860 + " beq $0, %1, 9f\n" 1861 + " dext %1, %0, 24, 8\n" 1862 + "4: sb %1, 0(%2)\n" 1863 + " daddiu %2, %2, 1\n" 1864 + " andi %1, %2, 0x7\n" 1865 + " beq $0, %1, 9f\n" 1866 + " dextu %1, %0, 32, 8\n" 1867 + "5: sb %1, 0(%2)\n" 1868 + " daddiu %2, %2, 1\n" 1869 + " andi %1, %2, 0x7\n" 1870 + " beq $0, %1, 9f\n" 1871 + " dextu %1, %0, 40, 8\n" 1872 + "6: sb %1, 0(%2)\n" 1873 + " daddiu %2, %2, 1\n" 1874 + " andi %1, %2, 0x7\n" 1875 + " beq $0, %1, 9f\n" 1876 + " dextu %1, %0, 48, 8\n" 1877 + "7: sb %1, 0(%2)\n" 1878 + " daddiu %2, %2, 1\n" 1879 + " andi %1, %2, 0x7\n" 1880 + " beq $0, %1, 9f\n" 1881 + " dextu %1, %0, 56, 8\n" 1882 + "0: sb %1, 0(%2)\n" 1883 + #else /* !CONFIG_CPU_LITTLE_ENDIAN */ 1884 + " dext %1, %0, 0, 8\n" 1885 + "1: sb %1, 0(%2)\n" 1886 + " andi %1, %2, 0x7\n" 1887 + " beq $0, %1, 9f\n" 1888 + " daddiu %2, %2, -1\n" 1889 + " dext %1, %0, 8, 8\n" 1890 + "2: sb %1, 0(%2)\n" 1891 + " andi %1, %2, 0x7\n" 1892 + " beq $0, %1, 9f\n" 1893 + " daddiu %2, %2, -1\n" 1894 + " dext %1, %0, 16, 8\n" 1895 + "3: sb %1, 0(%2)\n" 1896 + " andi %1, %2, 0x7\n" 1897 + " beq $0, %1, 9f\n" 1898 + " daddiu %2, %2, -1\n" 1899 + " dext %1, %0, 24, 8\n" 1900 + "4: sb %1, 0(%2)\n" 1901 + " andi %1, %2, 0x7\n" 1902 + " beq $0, %1, 9f\n" 1903 + " daddiu %2, %2, -1\n" 1904 + " dextu %1, %0, 32, 8\n" 1905 + "5: sb %1, 0(%2)\n" 1906 + " andi %1, %2, 0x7\n" 1907 + " beq $0, %1, 9f\n" 1908 + " daddiu %2, %2, -1\n" 1909 + " dextu %1, %0, 40, 8\n" 1910 + "6: sb %1, 0(%2)\n" 1911 + " andi %1, %2, 0x7\n" 1912 + " beq $0, %1, 9f\n" 1913 + " daddiu %2, %2, -1\n" 1914 + " dextu %1, %0, 48, 8\n" 1915 + "7: sb %1, 0(%2)\n" 1916 + " andi %1, %2, 0x7\n" 1917 + " beq $0, %1, 9f\n" 1918 + " daddiu %2, %2, -1\n" 1919 + " dextu %1, %0, 56, 8\n" 1920 + "0: sb %1, 0(%2)\n" 1921 + #endif /* CONFIG_CPU_LITTLE_ENDIAN */ 1922 + "9:\n" 1923 + " .insn\n" 1924 + " .section .fixup,\"ax\"\n" 1925 + "8: li %3,%4\n" 1926 + " j 9b\n" 1927 + " .previous\n" 1928 + " .section __ex_table,\"a\"\n" 1929 + " .word 1b,8b\n" 1930 + " .word 2b,8b\n" 1931 + " .word 3b,8b\n" 1932 + " .word 4b,8b\n" 1933 + " .word 5b,8b\n" 1934 + " .word 6b,8b\n" 1935 + " .word 7b,8b\n" 1936 + " .word 0b,8b\n" 1937 + " .previous\n" 1938 + " .set pop\n" 1939 + : "+&r"(rt), "=&r"(rs), 1940 + "+&r"(vaddr), "+&r"(err) 1941 + : "i"(SIGSEGV) 1942 + : "memory"); 1943 + 1944 + MIPS_R2_STATS(stores); 1945 + 1946 + break; 1947 + case ll_op: 1948 + vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); 1949 + if (vaddr & 0x3) { 1950 + current->thread.cp0_baduaddr = vaddr; 1951 + err = SIGBUS; 1952 + break; 1953 + } 1954 + if (!access_ok(VERIFY_READ, vaddr, 4)) { 1955 + current->thread.cp0_baduaddr = vaddr; 1956 + err = SIGBUS; 1957 + break; 1958 + } 1959 + 1960 + if (!cpu_has_rw_llb) { 1961 + /* 1962 + * An LL/SC block can't be safely emulated without 1963 + * a Config5/LLB availability. So it's probably time to 1964 + * kill our process before things get any worse. This is 1965 + * because Config5/LLB allows us to use ERETNC so that 1966 + * the LLAddr/LLB bit is not cleared when we return from 1967 + * an exception. MIPS R2 LL/SC instructions trap with an 1968 + * RI exception so once we emulate them here, we return 1969 + * back to userland with ERETNC. That preserves the 1970 + * LLAddr/LLB so the subsequent SC instruction will 1971 + * succeed preserving the atomic semantics of the LL/SC 1972 + * block. Without that, there is no safe way to emulate 1973 + * an LL/SC block in MIPSR2 userland. 1974 + */ 1975 + pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n"); 1976 + err = SIGKILL; 1977 + break; 1978 + } 1979 + 1980 + __asm__ __volatile__( 1981 + "1:\n" 1982 + "ll %0, 0(%2)\n" 1983 + "2:\n" 1984 + ".insn\n" 1985 + ".section .fixup,\"ax\"\n" 1986 + "3:\n" 1987 + "li %1, %3\n" 1988 + "j 2b\n" 1989 + ".previous\n" 1990 + ".section __ex_table,\"a\"\n" 1991 + ".word 1b, 3b\n" 1992 + ".previous\n" 1993 + : "=&r"(res), "+&r"(err) 1994 + : "r"(vaddr), "i"(SIGSEGV) 1995 + : "memory"); 1996 + 1997 + if (MIPSInst_RT(inst) && !err) 1998 + regs->regs[MIPSInst_RT(inst)] = res; 1999 + MIPS_R2_STATS(llsc); 2000 + 2001 + break; 2002 + 2003 + case sc_op: 2004 + vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); 2005 + if (vaddr & 0x3) { 2006 + current->thread.cp0_baduaddr = vaddr; 2007 + err = SIGBUS; 2008 + break; 2009 + } 2010 + if (!access_ok(VERIFY_WRITE, vaddr, 4)) { 2011 + current->thread.cp0_baduaddr = vaddr; 2012 + err = SIGBUS; 2013 + break; 2014 + } 2015 + 2016 + if (!cpu_has_rw_llb) { 2017 + /* 2018 + * An LL/SC block can't be safely emulated without 2019 + * a Config5/LLB availability. So it's probably time to 2020 + * kill our process before things get any worse. This is 2021 + * because Config5/LLB allows us to use ERETNC so that 2022 + * the LLAddr/LLB bit is not cleared when we return from 2023 + * an exception. MIPS R2 LL/SC instructions trap with an 2024 + * RI exception so once we emulate them here, we return 2025 + * back to userland with ERETNC. That preserves the 2026 + * LLAddr/LLB so the subsequent SC instruction will 2027 + * succeed preserving the atomic semantics of the LL/SC 2028 + * block. Without that, there is no safe way to emulate 2029 + * an LL/SC block in MIPSR2 userland. 2030 + */ 2031 + pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n"); 2032 + err = SIGKILL; 2033 + break; 2034 + } 2035 + 2036 + res = regs->regs[MIPSInst_RT(inst)]; 2037 + 2038 + __asm__ __volatile__( 2039 + "1:\n" 2040 + "sc %0, 0(%2)\n" 2041 + "2:\n" 2042 + ".insn\n" 2043 + ".section .fixup,\"ax\"\n" 2044 + "3:\n" 2045 + "li %1, %3\n" 2046 + "j 2b\n" 2047 + ".previous\n" 2048 + ".section __ex_table,\"a\"\n" 2049 + ".word 1b, 3b\n" 2050 + ".previous\n" 2051 + : "+&r"(res), "+&r"(err) 2052 + : "r"(vaddr), "i"(SIGSEGV)); 2053 + 2054 + if (MIPSInst_RT(inst) && !err) 2055 + regs->regs[MIPSInst_RT(inst)] = res; 2056 + 2057 + MIPS_R2_STATS(llsc); 2058 + 2059 + break; 2060 + 2061 + case lld_op: 2062 + if (config_enabled(CONFIG_32BIT)) { 2063 + err = SIGILL; 2064 + break; 2065 + } 2066 + 2067 + vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); 2068 + if (vaddr & 0x7) { 2069 + current->thread.cp0_baduaddr = vaddr; 2070 + err = SIGBUS; 2071 + break; 2072 + } 2073 + if (!access_ok(VERIFY_READ, vaddr, 8)) { 2074 + current->thread.cp0_baduaddr = vaddr; 2075 + err = SIGBUS; 2076 + break; 2077 + } 2078 + 2079 + if (!cpu_has_rw_llb) { 2080 + /* 2081 + * An LL/SC block can't be safely emulated without 2082 + * a Config5/LLB availability. So it's probably time to 2083 + * kill our process before things get any worse. This is 2084 + * because Config5/LLB allows us to use ERETNC so that 2085 + * the LLAddr/LLB bit is not cleared when we return from 2086 + * an exception. MIPS R2 LL/SC instructions trap with an 2087 + * RI exception so once we emulate them here, we return 2088 + * back to userland with ERETNC. That preserves the 2089 + * LLAddr/LLB so the subsequent SC instruction will 2090 + * succeed preserving the atomic semantics of the LL/SC 2091 + * block. Without that, there is no safe way to emulate 2092 + * an LL/SC block in MIPSR2 userland. 2093 + */ 2094 + pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n"); 2095 + err = SIGKILL; 2096 + break; 2097 + } 2098 + 2099 + __asm__ __volatile__( 2100 + "1:\n" 2101 + "lld %0, 0(%2)\n" 2102 + "2:\n" 2103 + ".insn\n" 2104 + ".section .fixup,\"ax\"\n" 2105 + "3:\n" 2106 + "li %1, %3\n" 2107 + "j 2b\n" 2108 + ".previous\n" 2109 + ".section __ex_table,\"a\"\n" 2110 + ".word 1b, 3b\n" 2111 + ".previous\n" 2112 + : "=&r"(res), "+&r"(err) 2113 + : "r"(vaddr), "i"(SIGSEGV) 2114 + : "memory"); 2115 + if (MIPSInst_RT(inst) && !err) 2116 + regs->regs[MIPSInst_RT(inst)] = res; 2117 + 2118 + MIPS_R2_STATS(llsc); 2119 + 2120 + break; 2121 + 2122 + case scd_op: 2123 + if (config_enabled(CONFIG_32BIT)) { 2124 + err = SIGILL; 2125 + break; 2126 + } 2127 + 2128 + vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); 2129 + if (vaddr & 0x7) { 2130 + current->thread.cp0_baduaddr = vaddr; 2131 + err = SIGBUS; 2132 + break; 2133 + } 2134 + if (!access_ok(VERIFY_WRITE, vaddr, 8)) { 2135 + current->thread.cp0_baduaddr = vaddr; 2136 + err = SIGBUS; 2137 + break; 2138 + } 2139 + 2140 + if (!cpu_has_rw_llb) { 2141 + /* 2142 + * An LL/SC block can't be safely emulated without 2143 + * a Config5/LLB availability. So it's probably time to 2144 + * kill our process before things get any worse. This is 2145 + * because Config5/LLB allows us to use ERETNC so that 2146 + * the LLAddr/LLB bit is not cleared when we return from 2147 + * an exception. MIPS R2 LL/SC instructions trap with an 2148 + * RI exception so once we emulate them here, we return 2149 + * back to userland with ERETNC. That preserves the 2150 + * LLAddr/LLB so the subsequent SC instruction will 2151 + * succeed preserving the atomic semantics of the LL/SC 2152 + * block. Without that, there is no safe way to emulate 2153 + * an LL/SC block in MIPSR2 userland. 2154 + */ 2155 + pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n"); 2156 + err = SIGKILL; 2157 + break; 2158 + } 2159 + 2160 + res = regs->regs[MIPSInst_RT(inst)]; 2161 + 2162 + __asm__ __volatile__( 2163 + "1:\n" 2164 + "scd %0, 0(%2)\n" 2165 + "2:\n" 2166 + ".insn\n" 2167 + ".section .fixup,\"ax\"\n" 2168 + "3:\n" 2169 + "li %1, %3\n" 2170 + "j 2b\n" 2171 + ".previous\n" 2172 + ".section __ex_table,\"a\"\n" 2173 + ".word 1b, 3b\n" 2174 + ".previous\n" 2175 + : "+&r"(res), "+&r"(err) 2176 + : "r"(vaddr), "i"(SIGSEGV)); 2177 + 2178 + if (MIPSInst_RT(inst) && !err) 2179 + regs->regs[MIPSInst_RT(inst)] = res; 2180 + 2181 + MIPS_R2_STATS(llsc); 2182 + 2183 + break; 2184 + case pref_op: 2185 + /* skip it */ 2186 + break; 2187 + default: 2188 + err = SIGILL; 2189 + } 2190 + 2191 + /* 2192 + * Lets not return to userland just yet. It's constly and 2193 + * it's likely we have more R2 instructions to emulate 2194 + */ 2195 + if (!err && (pass++ < MIPS_R2_EMUL_TOTAL_PASS)) { 2196 + regs->cp0_cause &= ~CAUSEF_BD; 2197 + err = get_user(inst, (u32 __user *)regs->cp0_epc); 2198 + if (!err) 2199 + goto repeat; 2200 + 2201 + if (err < 0) 2202 + err = SIGSEGV; 2203 + } 2204 + 2205 + if (err && (err != SIGEMT)) { 2206 + regs->regs[31] = r31; 2207 + regs->cp0_epc = epc; 2208 + } 2209 + 2210 + /* Likely a MIPS R6 compatible instruction */ 2211 + if (pass && (err == SIGILL)) 2212 + err = 0; 2213 + 2214 + return err; 2215 + } 2216 + 2217 + #ifdef CONFIG_DEBUG_FS 2218 + 2219 + static int mipsr2_stats_show(struct seq_file *s, void *unused) 2220 + { 2221 + 2222 + seq_printf(s, "Instruction\tTotal\tBDslot\n------------------------------\n"); 2223 + seq_printf(s, "movs\t\t%ld\t%ld\n", 2224 + (unsigned long)__this_cpu_read(mipsr2emustats.movs), 2225 + (unsigned long)__this_cpu_read(mipsr2bdemustats.movs)); 2226 + seq_printf(s, "hilo\t\t%ld\t%ld\n", 2227 + (unsigned long)__this_cpu_read(mipsr2emustats.hilo), 2228 + (unsigned long)__this_cpu_read(mipsr2bdemustats.hilo)); 2229 + seq_printf(s, "muls\t\t%ld\t%ld\n", 2230 + (unsigned long)__this_cpu_read(mipsr2emustats.muls), 2231 + (unsigned long)__this_cpu_read(mipsr2bdemustats.muls)); 2232 + seq_printf(s, "divs\t\t%ld\t%ld\n", 2233 + (unsigned long)__this_cpu_read(mipsr2emustats.divs), 2234 + (unsigned long)__this_cpu_read(mipsr2bdemustats.divs)); 2235 + seq_printf(s, "dsps\t\t%ld\t%ld\n", 2236 + (unsigned long)__this_cpu_read(mipsr2emustats.dsps), 2237 + (unsigned long)__this_cpu_read(mipsr2bdemustats.dsps)); 2238 + seq_printf(s, "bops\t\t%ld\t%ld\n", 2239 + (unsigned long)__this_cpu_read(mipsr2emustats.bops), 2240 + (unsigned long)__this_cpu_read(mipsr2bdemustats.bops)); 2241 + seq_printf(s, "traps\t\t%ld\t%ld\n", 2242 + (unsigned long)__this_cpu_read(mipsr2emustats.traps), 2243 + (unsigned long)__this_cpu_read(mipsr2bdemustats.traps)); 2244 + seq_printf(s, "fpus\t\t%ld\t%ld\n", 2245 + (unsigned long)__this_cpu_read(mipsr2emustats.fpus), 2246 + (unsigned long)__this_cpu_read(mipsr2bdemustats.fpus)); 2247 + seq_printf(s, "loads\t\t%ld\t%ld\n", 2248 + (unsigned long)__this_cpu_read(mipsr2emustats.loads), 2249 + (unsigned long)__this_cpu_read(mipsr2bdemustats.loads)); 2250 + seq_printf(s, "stores\t\t%ld\t%ld\n", 2251 + (unsigned long)__this_cpu_read(mipsr2emustats.stores), 2252 + (unsigned long)__this_cpu_read(mipsr2bdemustats.stores)); 2253 + seq_printf(s, "llsc\t\t%ld\t%ld\n", 2254 + (unsigned long)__this_cpu_read(mipsr2emustats.llsc), 2255 + (unsigned long)__this_cpu_read(mipsr2bdemustats.llsc)); 2256 + seq_printf(s, "dsemul\t\t%ld\t%ld\n", 2257 + (unsigned long)__this_cpu_read(mipsr2emustats.dsemul), 2258 + (unsigned long)__this_cpu_read(mipsr2bdemustats.dsemul)); 2259 + seq_printf(s, "jr\t\t%ld\n", 2260 + (unsigned long)__this_cpu_read(mipsr2bremustats.jrs)); 2261 + seq_printf(s, "bltzl\t\t%ld\n", 2262 + (unsigned long)__this_cpu_read(mipsr2bremustats.bltzl)); 2263 + seq_printf(s, "bgezl\t\t%ld\n", 2264 + (unsigned long)__this_cpu_read(mipsr2bremustats.bgezl)); 2265 + seq_printf(s, "bltzll\t\t%ld\n", 2266 + (unsigned long)__this_cpu_read(mipsr2bremustats.bltzll)); 2267 + seq_printf(s, "bgezll\t\t%ld\n", 2268 + (unsigned long)__this_cpu_read(mipsr2bremustats.bgezll)); 2269 + seq_printf(s, "bltzal\t\t%ld\n", 2270 + (unsigned long)__this_cpu_read(mipsr2bremustats.bltzal)); 2271 + seq_printf(s, "bgezal\t\t%ld\n", 2272 + (unsigned long)__this_cpu_read(mipsr2bremustats.bgezal)); 2273 + seq_printf(s, "beql\t\t%ld\n", 2274 + (unsigned long)__this_cpu_read(mipsr2bremustats.beql)); 2275 + seq_printf(s, "bnel\t\t%ld\n", 2276 + (unsigned long)__this_cpu_read(mipsr2bremustats.bnel)); 2277 + seq_printf(s, "blezl\t\t%ld\n", 2278 + (unsigned long)__this_cpu_read(mipsr2bremustats.blezl)); 2279 + seq_printf(s, "bgtzl\t\t%ld\n", 2280 + (unsigned long)__this_cpu_read(mipsr2bremustats.bgtzl)); 2281 + 2282 + return 0; 2283 + } 2284 + 2285 + static int mipsr2_stats_clear_show(struct seq_file *s, void *unused) 2286 + { 2287 + mipsr2_stats_show(s, unused); 2288 + 2289 + __this_cpu_write((mipsr2emustats).movs, 0); 2290 + __this_cpu_write((mipsr2bdemustats).movs, 0); 2291 + __this_cpu_write((mipsr2emustats).hilo, 0); 2292 + __this_cpu_write((mipsr2bdemustats).hilo, 0); 2293 + __this_cpu_write((mipsr2emustats).muls, 0); 2294 + __this_cpu_write((mipsr2bdemustats).muls, 0); 2295 + __this_cpu_write((mipsr2emustats).divs, 0); 2296 + __this_cpu_write((mipsr2bdemustats).divs, 0); 2297 + __this_cpu_write((mipsr2emustats).dsps, 0); 2298 + __this_cpu_write((mipsr2bdemustats).dsps, 0); 2299 + __this_cpu_write((mipsr2emustats).bops, 0); 2300 + __this_cpu_write((mipsr2bdemustats).bops, 0); 2301 + __this_cpu_write((mipsr2emustats).traps, 0); 2302 + __this_cpu_write((mipsr2bdemustats).traps, 0); 2303 + __this_cpu_write((mipsr2emustats).fpus, 0); 2304 + __this_cpu_write((mipsr2bdemustats).fpus, 0); 2305 + __this_cpu_write((mipsr2emustats).loads, 0); 2306 + __this_cpu_write((mipsr2bdemustats).loads, 0); 2307 + __this_cpu_write((mipsr2emustats).stores, 0); 2308 + __this_cpu_write((mipsr2bdemustats).stores, 0); 2309 + __this_cpu_write((mipsr2emustats).llsc, 0); 2310 + __this_cpu_write((mipsr2bdemustats).llsc, 0); 2311 + __this_cpu_write((mipsr2emustats).dsemul, 0); 2312 + __this_cpu_write((mipsr2bdemustats).dsemul, 0); 2313 + __this_cpu_write((mipsr2bremustats).jrs, 0); 2314 + __this_cpu_write((mipsr2bremustats).bltzl, 0); 2315 + __this_cpu_write((mipsr2bremustats).bgezl, 0); 2316 + __this_cpu_write((mipsr2bremustats).bltzll, 0); 2317 + __this_cpu_write((mipsr2bremustats).bgezll, 0); 2318 + __this_cpu_write((mipsr2bremustats).bltzal, 0); 2319 + __this_cpu_write((mipsr2bremustats).bgezal, 0); 2320 + __this_cpu_write((mipsr2bremustats).beql, 0); 2321 + __this_cpu_write((mipsr2bremustats).bnel, 0); 2322 + __this_cpu_write((mipsr2bremustats).blezl, 0); 2323 + __this_cpu_write((mipsr2bremustats).bgtzl, 0); 2324 + 2325 + return 0; 2326 + } 2327 + 2328 + static int mipsr2_stats_open(struct inode *inode, struct file *file) 2329 + { 2330 + return single_open(file, mipsr2_stats_show, inode->i_private); 2331 + } 2332 + 2333 + static int mipsr2_stats_clear_open(struct inode *inode, struct file *file) 2334 + { 2335 + return single_open(file, mipsr2_stats_clear_show, inode->i_private); 2336 + } 2337 + 2338 + static const struct file_operations mipsr2_emul_fops = { 2339 + .open = mipsr2_stats_open, 2340 + .read = seq_read, 2341 + .llseek = seq_lseek, 2342 + .release = single_release, 2343 + }; 2344 + 2345 + static const struct file_operations mipsr2_clear_fops = { 2346 + .open = mipsr2_stats_clear_open, 2347 + .read = seq_read, 2348 + .llseek = seq_lseek, 2349 + .release = single_release, 2350 + }; 2351 + 2352 + 2353 + static int __init mipsr2_init_debugfs(void) 2354 + { 2355 + extern struct dentry *mips_debugfs_dir; 2356 + struct dentry *mipsr2_emul; 2357 + 2358 + if (!mips_debugfs_dir) 2359 + return -ENODEV; 2360 + 2361 + mipsr2_emul = debugfs_create_file("r2_emul_stats", S_IRUGO, 2362 + mips_debugfs_dir, NULL, 2363 + &mipsr2_emul_fops); 2364 + if (!mipsr2_emul) 2365 + return -ENOMEM; 2366 + 2367 + mipsr2_emul = debugfs_create_file("r2_emul_stats_clear", S_IRUGO, 2368 + mips_debugfs_dir, NULL, 2369 + &mipsr2_clear_fops); 2370 + if (!mipsr2_emul) 2371 + return -ENOMEM; 2372 + 2373 + return 0; 2374 + } 2375 + 2376 + device_initcall(mipsr2_init_debugfs); 2377 + 2378 + #endif /* CONFIG_DEBUG_FS */
+12
arch/mips/kernel/mips_ksyms.c
··· 14 #include <linux/mm.h> 15 #include <asm/uaccess.h> 16 #include <asm/ftrace.h> 17 18 extern void *__bzero(void *__s, size_t __count); 19 extern long __strncpy_from_kernel_nocheck_asm(char *__to, ··· 32 extern long __strnlen_kernel_asm(const char *s); 33 extern long __strnlen_user_nocheck_asm(const char *s); 34 extern long __strnlen_user_asm(const char *s); 35 36 /* 37 * String functions ··· 77 EXPORT_SYMBOL(__strnlen_user_nocheck_asm); 78 EXPORT_SYMBOL(__strnlen_user_asm); 79 80 EXPORT_SYMBOL(csum_partial); 81 EXPORT_SYMBOL(csum_partial_copy_nocheck); 82 EXPORT_SYMBOL(__csum_partial_copy_kernel); 83 EXPORT_SYMBOL(__csum_partial_copy_to_user); 84 EXPORT_SYMBOL(__csum_partial_copy_from_user); 85 86 EXPORT_SYMBOL(invalid_pte_table); 87 #ifdef CONFIG_FUNCTION_TRACER
··· 14 #include <linux/mm.h> 15 #include <asm/uaccess.h> 16 #include <asm/ftrace.h> 17 + #include <asm/fpu.h> 18 + #include <asm/msa.h> 19 20 extern void *__bzero(void *__s, size_t __count); 21 extern long __strncpy_from_kernel_nocheck_asm(char *__to, ··· 30 extern long __strnlen_kernel_asm(const char *s); 31 extern long __strnlen_user_nocheck_asm(const char *s); 32 extern long __strnlen_user_asm(const char *s); 33 + 34 + /* 35 + * Core architecture code 36 + */ 37 + EXPORT_SYMBOL_GPL(_save_fp); 38 + #ifdef CONFIG_CPU_HAS_MSA 39 + EXPORT_SYMBOL_GPL(_save_msa); 40 + #endif 41 42 /* 43 * String functions ··· 67 EXPORT_SYMBOL(__strnlen_user_nocheck_asm); 68 EXPORT_SYMBOL(__strnlen_user_asm); 69 70 + #ifndef CONFIG_CPU_MIPSR6 71 EXPORT_SYMBOL(csum_partial); 72 EXPORT_SYMBOL(csum_partial_copy_nocheck); 73 EXPORT_SYMBOL(__csum_partial_copy_kernel); 74 EXPORT_SYMBOL(__csum_partial_copy_to_user); 75 EXPORT_SYMBOL(__csum_partial_copy_from_user); 76 + #endif 77 78 EXPORT_SYMBOL(invalid_pte_table); 79 #ifdef CONFIG_FUNCTION_TRACER
+139 -81
arch/mips/kernel/octeon_switch.S
··· 31 /* 32 * check if we need to save FPU registers 33 */ 34 - PTR_L t3, TASK_THREAD_INFO(a0) 35 - LONG_L t0, TI_FLAGS(t3) 36 - li t1, _TIF_USEDFPU 37 - and t2, t0, t1 38 - beqz t2, 1f 39 - nor t1, zero, t1 40 - 41 - and t0, t0, t1 42 - LONG_S t0, TI_FLAGS(t3) 43 44 /* 45 * clear saved user stack CU1 bit ··· 52 .set pop 53 1: 54 55 - /* check if we need to save COP2 registers */ 56 - PTR_L t2, TASK_THREAD_INFO(a0) 57 - LONG_L t0, ST_OFF(t2) 58 - bbit0 t0, 30, 1f 59 - 60 - /* Disable COP2 in the stored process state */ 61 - li t1, ST0_CU2 62 - xor t0, t1 63 - LONG_S t0, ST_OFF(t2) 64 - 65 - /* Enable COP2 so we can save it */ 66 - mfc0 t0, CP0_STATUS 67 - or t0, t1 68 - mtc0 t0, CP0_STATUS 69 - 70 - /* Save COP2 */ 71 - daddu a0, THREAD_CP2 72 - jal octeon_cop2_save 73 - dsubu a0, THREAD_CP2 74 - 75 - /* Disable COP2 now that we are done */ 76 - mfc0 t0, CP0_STATUS 77 - li t1, ST0_CU2 78 - xor t0, t1 79 - mtc0 t0, CP0_STATUS 80 - 81 - 1: 82 #if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0 83 /* Check if we need to store CVMSEG state */ 84 - mfc0 t0, $11,7 /* CvmMemCtl */ 85 bbit0 t0, 6, 3f /* Is user access enabled? */ 86 87 /* Store the CVMSEG state */ ··· 78 .set reorder 79 80 /* Disable access to CVMSEG */ 81 - mfc0 t0, $11,7 /* CvmMemCtl */ 82 xori t0, t0, 0x40 /* Bit 6 is CVMSEG user enable */ 83 - mtc0 t0, $11,7 /* CvmMemCtl */ 84 #endif 85 3: 86 ··· 116 * void octeon_cop2_save(struct octeon_cop2_state *a0) 117 */ 118 .align 7 119 LEAF(octeon_cop2_save) 120 121 dmfc0 t9, $9,7 /* CvmCtl register. */ ··· 128 dmfc2 t2, 0x0200 129 sd t0, OCTEON_CP2_CRC_IV(a0) 130 sd t1, OCTEON_CP2_CRC_LENGTH(a0) 131 - sd t2, OCTEON_CP2_CRC_POLY(a0) 132 /* Skip next instructions if CvmCtl[NODFA_CP2] set */ 133 bbit1 t9, 28, 1f 134 135 /* Save the LLM state */ 136 dmfc2 t0, 0x0402 137 dmfc2 t1, 0x040A 138 sd t0, OCTEON_CP2_LLM_DAT(a0) 139 - sd t1, OCTEON_CP2_LLM_DAT+8(a0) 140 141 1: bbit1 t9, 26, 3f /* done if CvmCtl[NOCRYPTO] set */ 142 143 /* Save the COP2 crypto state */ 144 /* this part is mostly common to both pass 1 and later revisions */ ··· 169 sd t2, OCTEON_CP2_AES_KEY+16(a0) 170 dmfc2 t2, 0x0101 171 sd t3, OCTEON_CP2_AES_KEY+24(a0) 172 - mfc0 t3, $15,0 /* Get the processor ID register */ 173 sd t0, OCTEON_CP2_AES_KEYLEN(a0) 174 - li t0, 0x000d0000 /* This is the processor ID of Octeon Pass1 */ 175 sd t1, OCTEON_CP2_AES_RESULT(a0) 176 - sd t2, OCTEON_CP2_AES_RESULT+8(a0) 177 /* Skip to the Pass1 version of the remainder of the COP2 state */ 178 - beq t3, t0, 2f 179 180 /* the non-pass1 state when !CvmCtl[NOCRYPTO] */ 181 dmfc2 t1, 0x0240 182 dmfc2 t2, 0x0241 183 dmfc2 t3, 0x0242 184 dmfc2 t0, 0x0243 185 sd t1, OCTEON_CP2_HSH_DATW(a0) 186 dmfc2 t1, 0x0244 ··· 235 sd t1, OCTEON_CP2_GFM_MULT+8(a0) 236 sd t2, OCTEON_CP2_GFM_POLY(a0) 237 sd t3, OCTEON_CP2_GFM_RESULT(a0) 238 - sd t0, OCTEON_CP2_GFM_RESULT+8(a0) 239 jr ra 240 241 2: /* pass 1 special stuff when !CvmCtl[NOCRYPTO] */ 242 dmfc2 t3, 0x0040 ··· 270 271 3: /* pass 1 or CvmCtl[NOCRYPTO] set */ 272 jr ra 273 END(octeon_cop2_save) 274 275 /* 276 * void octeon_cop2_restore(struct octeon_cop2_state *a0) ··· 337 ld t2, OCTEON_CP2_AES_RESULT+8(a0) 338 mfc0 t3, $15,0 /* Get the processor ID register */ 339 dmtc2 t0, 0x0110 340 - li t0, 0x000d0000 /* This is the processor ID of Octeon Pass1 */ 341 dmtc2 t1, 0x0100 342 - bne t0, t3, 3f /* Skip the next stuff for non-pass1 */ 343 dmtc2 t2, 0x0101 344 345 /* this code is specific for pass 1 */ ··· 367 368 3: /* this is post-pass1 code */ 369 ld t2, OCTEON_CP2_HSH_DATW(a0) 370 ld t0, OCTEON_CP2_HSH_DATW+8(a0) 371 ld t1, OCTEON_CP2_HSH_DATW+16(a0) 372 dmtc2 t2, 0x0240 ··· 421 dmtc2 t2, 0x0259 422 ld t2, OCTEON_CP2_GFM_RESULT+8(a0) 423 dmtc2 t0, 0x025E 424 dmtc2 t1, 0x025A 425 - dmtc2 t2, 0x025B 426 - 427 done_restore: 428 jr ra 429 nop ··· 440 * void octeon_mult_save() 441 * sp is assumed to point to a struct pt_regs 442 * 443 - * NOTE: This is called in SAVE_SOME in stackframe.h. It can only 444 - * safely modify k0 and k1. 445 */ 446 - .align 7 447 .set push 448 .set noreorder 449 LEAF(octeon_mult_save) 450 - dmfc0 k0, $9,7 /* CvmCtl register. */ 451 - bbit1 k0, 27, 1f /* Skip CvmCtl[NOMUL] */ 452 nop 453 454 - /* Save the multiplier state */ 455 v3mulu k0, $0, $0 456 v3mulu k1, $0, $0 457 sd k0, PT_MTP(sp) /* PT_MTP has P0 */ ··· 471 sd k0, PT_MPL+8(sp) /* PT_MPL+8 has MPL1 */ 472 jr ra 473 sd k1, PT_MPL+16(sp) /* PT_MPL+16 has MPL2 */ 474 475 - 1: /* Resume here if CvmCtl[NOMUL] */ 476 jr ra 477 - END(octeon_mult_save) 478 .set pop 479 480 /* 481 * void octeon_mult_restore() 482 * sp is assumed to point to a struct pt_regs 483 * 484 - * NOTE: This is called in RESTORE_SOME in stackframe.h. 485 */ 486 - .align 7 487 .set push 488 .set noreorder 489 LEAF(octeon_mult_restore) 490 - dmfc0 k1, $9,7 /* CvmCtl register. */ 491 - ld v0, PT_MPL(sp) /* MPL0 */ 492 - ld v1, PT_MPL+8(sp) /* MPL1 */ 493 - ld k0, PT_MPL+16(sp) /* MPL2 */ 494 - bbit1 k1, 27, 1f /* Skip CvmCtl[NOMUL] */ 495 - /* Normally falls through, so no time wasted here */ 496 - nop 497 - 498 - /* Restore the multiplier state */ 499 - ld k1, PT_MTP+16(sp) /* P2 */ 500 - MTM0 v0 /* MPL0 */ 501 - ld v0, PT_MTP+8(sp) /* P1 */ 502 - MTM1 v1 /* MPL1 */ 503 - ld v1, PT_MTP(sp) /* P0 */ 504 - MTM2 k0 /* MPL2 */ 505 - MTP2 k1 /* P2 */ 506 - MTP1 v0 /* P1 */ 507 - jr ra 508 - MTP0 v1 /* P0 */ 509 - 510 - 1: /* Resume here if CvmCtl[NOMUL] */ 511 jr ra 512 nop 513 END(octeon_mult_restore) 514 .set pop
··· 31 /* 32 * check if we need to save FPU registers 33 */ 34 + .set push 35 + .set noreorder 36 + beqz a3, 1f 37 + PTR_L t3, TASK_THREAD_INFO(a0) 38 + .set pop 39 40 /* 41 * clear saved user stack CU1 bit ··· 56 .set pop 57 1: 58 59 #if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0 60 /* Check if we need to store CVMSEG state */ 61 + dmfc0 t0, $11,7 /* CvmMemCtl */ 62 bbit0 t0, 6, 3f /* Is user access enabled? */ 63 64 /* Store the CVMSEG state */ ··· 109 .set reorder 110 111 /* Disable access to CVMSEG */ 112 + dmfc0 t0, $11,7 /* CvmMemCtl */ 113 xori t0, t0, 0x40 /* Bit 6 is CVMSEG user enable */ 114 + dmtc0 t0, $11,7 /* CvmMemCtl */ 115 #endif 116 3: 117 ··· 147 * void octeon_cop2_save(struct octeon_cop2_state *a0) 148 */ 149 .align 7 150 + .set push 151 + .set noreorder 152 LEAF(octeon_cop2_save) 153 154 dmfc0 t9, $9,7 /* CvmCtl register. */ ··· 157 dmfc2 t2, 0x0200 158 sd t0, OCTEON_CP2_CRC_IV(a0) 159 sd t1, OCTEON_CP2_CRC_LENGTH(a0) 160 /* Skip next instructions if CvmCtl[NODFA_CP2] set */ 161 bbit1 t9, 28, 1f 162 + sd t2, OCTEON_CP2_CRC_POLY(a0) 163 164 /* Save the LLM state */ 165 dmfc2 t0, 0x0402 166 dmfc2 t1, 0x040A 167 sd t0, OCTEON_CP2_LLM_DAT(a0) 168 169 1: bbit1 t9, 26, 3f /* done if CvmCtl[NOCRYPTO] set */ 170 + sd t1, OCTEON_CP2_LLM_DAT+8(a0) 171 172 /* Save the COP2 crypto state */ 173 /* this part is mostly common to both pass 1 and later revisions */ ··· 198 sd t2, OCTEON_CP2_AES_KEY+16(a0) 199 dmfc2 t2, 0x0101 200 sd t3, OCTEON_CP2_AES_KEY+24(a0) 201 + mfc0 v0, $15,0 /* Get the processor ID register */ 202 sd t0, OCTEON_CP2_AES_KEYLEN(a0) 203 + li v1, 0x000d0000 /* This is the processor ID of Octeon Pass1 */ 204 sd t1, OCTEON_CP2_AES_RESULT(a0) 205 /* Skip to the Pass1 version of the remainder of the COP2 state */ 206 + beq v0, v1, 2f 207 + sd t2, OCTEON_CP2_AES_RESULT+8(a0) 208 209 /* the non-pass1 state when !CvmCtl[NOCRYPTO] */ 210 dmfc2 t1, 0x0240 211 dmfc2 t2, 0x0241 212 + ori v1, v1, 0x9500 /* lowest OCTEON III PrId*/ 213 dmfc2 t3, 0x0242 214 + subu v1, v0, v1 /* prid - lowest OCTEON III PrId */ 215 dmfc2 t0, 0x0243 216 sd t1, OCTEON_CP2_HSH_DATW(a0) 217 dmfc2 t1, 0x0244 ··· 262 sd t1, OCTEON_CP2_GFM_MULT+8(a0) 263 sd t2, OCTEON_CP2_GFM_POLY(a0) 264 sd t3, OCTEON_CP2_GFM_RESULT(a0) 265 + bltz v1, 4f 266 + sd t0, OCTEON_CP2_GFM_RESULT+8(a0) 267 + /* OCTEON III things*/ 268 + dmfc2 t0, 0x024F 269 + dmfc2 t1, 0x0050 270 + sd t0, OCTEON_CP2_SHA3(a0) 271 + sd t1, OCTEON_CP2_SHA3+8(a0) 272 + 4: 273 jr ra 274 + nop 275 276 2: /* pass 1 special stuff when !CvmCtl[NOCRYPTO] */ 277 dmfc2 t3, 0x0040 ··· 289 290 3: /* pass 1 or CvmCtl[NOCRYPTO] set */ 291 jr ra 292 + nop 293 END(octeon_cop2_save) 294 + .set pop 295 296 /* 297 * void octeon_cop2_restore(struct octeon_cop2_state *a0) ··· 354 ld t2, OCTEON_CP2_AES_RESULT+8(a0) 355 mfc0 t3, $15,0 /* Get the processor ID register */ 356 dmtc2 t0, 0x0110 357 + li v0, 0x000d0000 /* This is the processor ID of Octeon Pass1 */ 358 dmtc2 t1, 0x0100 359 + bne v0, t3, 3f /* Skip the next stuff for non-pass1 */ 360 dmtc2 t2, 0x0101 361 362 /* this code is specific for pass 1 */ ··· 384 385 3: /* this is post-pass1 code */ 386 ld t2, OCTEON_CP2_HSH_DATW(a0) 387 + ori v0, v0, 0x9500 /* lowest OCTEON III PrId*/ 388 ld t0, OCTEON_CP2_HSH_DATW+8(a0) 389 ld t1, OCTEON_CP2_HSH_DATW+16(a0) 390 dmtc2 t2, 0x0240 ··· 437 dmtc2 t2, 0x0259 438 ld t2, OCTEON_CP2_GFM_RESULT+8(a0) 439 dmtc2 t0, 0x025E 440 + subu v0, t3, v0 /* prid - lowest OCTEON III PrId */ 441 dmtc2 t1, 0x025A 442 + bltz v0, done_restore 443 + dmtc2 t2, 0x025B 444 + /* OCTEON III things*/ 445 + ld t0, OCTEON_CP2_SHA3(a0) 446 + ld t1, OCTEON_CP2_SHA3+8(a0) 447 + dmtc2 t0, 0x0051 448 + dmtc2 t1, 0x0050 449 done_restore: 450 jr ra 451 nop ··· 450 * void octeon_mult_save() 451 * sp is assumed to point to a struct pt_regs 452 * 453 + * NOTE: This is called in SAVE_TEMP in stackframe.h. It can 454 + * safely modify v1,k0, k1,$10-$15, and $24. It will 455 + * be overwritten with a processor specific version of the code. 456 */ 457 + .p2align 7 458 .set push 459 .set noreorder 460 LEAF(octeon_mult_save) 461 + jr ra 462 nop 463 + .space 30 * 4, 0 464 + octeon_mult_save_end: 465 + EXPORT(octeon_mult_save_end) 466 + END(octeon_mult_save) 467 468 + LEAF(octeon_mult_save2) 469 + /* Save the multiplier state OCTEON II and earlier*/ 470 v3mulu k0, $0, $0 471 v3mulu k1, $0, $0 472 sd k0, PT_MTP(sp) /* PT_MTP has P0 */ ··· 476 sd k0, PT_MPL+8(sp) /* PT_MPL+8 has MPL1 */ 477 jr ra 478 sd k1, PT_MPL+16(sp) /* PT_MPL+16 has MPL2 */ 479 + octeon_mult_save2_end: 480 + EXPORT(octeon_mult_save2_end) 481 + END(octeon_mult_save2) 482 483 + LEAF(octeon_mult_save3) 484 + /* Save the multiplier state OCTEON III */ 485 + v3mulu $10, $0, $0 /* read P0 */ 486 + v3mulu $11, $0, $0 /* read P1 */ 487 + v3mulu $12, $0, $0 /* read P2 */ 488 + sd $10, PT_MTP+(0*8)(sp) /* store P0 */ 489 + v3mulu $10, $0, $0 /* read P3 */ 490 + sd $11, PT_MTP+(1*8)(sp) /* store P1 */ 491 + v3mulu $11, $0, $0 /* read P4 */ 492 + sd $12, PT_MTP+(2*8)(sp) /* store P2 */ 493 + ori $13, $0, 1 494 + v3mulu $12, $0, $0 /* read P5 */ 495 + sd $10, PT_MTP+(3*8)(sp) /* store P3 */ 496 + v3mulu $13, $13, $0 /* P4-P0 = MPL5-MPL1, $13 = MPL0 */ 497 + sd $11, PT_MTP+(4*8)(sp) /* store P4 */ 498 + v3mulu $10, $0, $0 /* read MPL1 */ 499 + sd $12, PT_MTP+(5*8)(sp) /* store P5 */ 500 + v3mulu $11, $0, $0 /* read MPL2 */ 501 + sd $13, PT_MPL+(0*8)(sp) /* store MPL0 */ 502 + v3mulu $12, $0, $0 /* read MPL3 */ 503 + sd $10, PT_MPL+(1*8)(sp) /* store MPL1 */ 504 + v3mulu $10, $0, $0 /* read MPL4 */ 505 + sd $11, PT_MPL+(2*8)(sp) /* store MPL2 */ 506 + v3mulu $11, $0, $0 /* read MPL5 */ 507 + sd $12, PT_MPL+(3*8)(sp) /* store MPL3 */ 508 + sd $10, PT_MPL+(4*8)(sp) /* store MPL4 */ 509 jr ra 510 + sd $11, PT_MPL+(5*8)(sp) /* store MPL5 */ 511 + octeon_mult_save3_end: 512 + EXPORT(octeon_mult_save3_end) 513 + END(octeon_mult_save3) 514 .set pop 515 516 /* 517 * void octeon_mult_restore() 518 * sp is assumed to point to a struct pt_regs 519 * 520 + * NOTE: This is called in RESTORE_TEMP in stackframe.h. 521 */ 522 + .p2align 7 523 .set push 524 .set noreorder 525 LEAF(octeon_mult_restore) 526 jr ra 527 nop 528 + .space 30 * 4, 0 529 + octeon_mult_restore_end: 530 + EXPORT(octeon_mult_restore_end) 531 END(octeon_mult_restore) 532 + 533 + LEAF(octeon_mult_restore2) 534 + ld v0, PT_MPL(sp) /* MPL0 */ 535 + ld v1, PT_MPL+8(sp) /* MPL1 */ 536 + ld k0, PT_MPL+16(sp) /* MPL2 */ 537 + /* Restore the multiplier state */ 538 + ld k1, PT_MTP+16(sp) /* P2 */ 539 + mtm0 v0 /* MPL0 */ 540 + ld v0, PT_MTP+8(sp) /* P1 */ 541 + mtm1 v1 /* MPL1 */ 542 + ld v1, PT_MTP(sp) /* P0 */ 543 + mtm2 k0 /* MPL2 */ 544 + mtp2 k1 /* P2 */ 545 + mtp1 v0 /* P1 */ 546 + jr ra 547 + mtp0 v1 /* P0 */ 548 + octeon_mult_restore2_end: 549 + EXPORT(octeon_mult_restore2_end) 550 + END(octeon_mult_restore2) 551 + 552 + LEAF(octeon_mult_restore3) 553 + ld $12, PT_MPL+(0*8)(sp) /* read MPL0 */ 554 + ld $13, PT_MPL+(3*8)(sp) /* read MPL3 */ 555 + ld $10, PT_MPL+(1*8)(sp) /* read MPL1 */ 556 + ld $11, PT_MPL+(4*8)(sp) /* read MPL4 */ 557 + .word 0x718d0008 558 + /* mtm0 $12, $13 restore MPL0 and MPL3 */ 559 + ld $12, PT_MPL+(2*8)(sp) /* read MPL2 */ 560 + .word 0x714b000c 561 + /* mtm1 $10, $11 restore MPL1 and MPL4 */ 562 + ld $13, PT_MPL+(5*8)(sp) /* read MPL5 */ 563 + ld $10, PT_MTP+(0*8)(sp) /* read P0 */ 564 + ld $11, PT_MTP+(3*8)(sp) /* read P3 */ 565 + .word 0x718d000d 566 + /* mtm2 $12, $13 restore MPL2 and MPL5 */ 567 + ld $12, PT_MTP+(1*8)(sp) /* read P1 */ 568 + .word 0x714b0009 569 + /* mtp0 $10, $11 restore P0 and P3 */ 570 + ld $13, PT_MTP+(4*8)(sp) /* read P4 */ 571 + ld $10, PT_MTP+(2*8)(sp) /* read P2 */ 572 + ld $11, PT_MTP+(5*8)(sp) /* read P5 */ 573 + .word 0x718d000a 574 + /* mtp1 $12, $13 restore P1 and P4 */ 575 + jr ra 576 + .word 0x714b000b 577 + /* mtp2 $10, $11 restore P2 and P5 */ 578 + 579 + octeon_mult_restore3_end: 580 + EXPORT(octeon_mult_restore3_end) 581 + END(octeon_mult_restore3) 582 .set pop
+7 -1
arch/mips/kernel/proc.c
··· 82 seq_printf(m, "]\n"); 83 } 84 85 - seq_printf(m, "isa\t\t\t: mips1"); 86 if (cpu_has_mips_2) 87 seq_printf(m, "%s", " mips2"); 88 if (cpu_has_mips_3) ··· 97 seq_printf(m, "%s", " mips32r1"); 98 if (cpu_has_mips32r2) 99 seq_printf(m, "%s", " mips32r2"); 100 if (cpu_has_mips64r1) 101 seq_printf(m, "%s", " mips64r1"); 102 if (cpu_has_mips64r2) 103 seq_printf(m, "%s", " mips64r2"); 104 seq_printf(m, "\n"); 105 106 seq_printf(m, "ASEs implemented\t:");
··· 82 seq_printf(m, "]\n"); 83 } 84 85 + seq_printf(m, "isa\t\t\t:"); 86 + if (cpu_has_mips_r1) 87 + seq_printf(m, " mips1"); 88 if (cpu_has_mips_2) 89 seq_printf(m, "%s", " mips2"); 90 if (cpu_has_mips_3) ··· 95 seq_printf(m, "%s", " mips32r1"); 96 if (cpu_has_mips32r2) 97 seq_printf(m, "%s", " mips32r2"); 98 + if (cpu_has_mips32r6) 99 + seq_printf(m, "%s", " mips32r6"); 100 if (cpu_has_mips64r1) 101 seq_printf(m, "%s", " mips64r1"); 102 if (cpu_has_mips64r2) 103 seq_printf(m, "%s", " mips64r2"); 104 + if (cpu_has_mips64r6) 105 + seq_printf(m, "%s", " mips64r6"); 106 seq_printf(m, "\n"); 107 108 seq_printf(m, "ASEs implemented\t:");
+96
arch/mips/kernel/process.c
··· 25 #include <linux/completion.h> 26 #include <linux/kallsyms.h> 27 #include <linux/random.h> 28 29 #include <asm/asm.h> 30 #include <asm/bootinfo.h> ··· 562 void arch_trigger_all_cpu_backtrace(bool include_self) 563 { 564 smp_call_function(arch_dump_stack, NULL, 1); 565 }
··· 25 #include <linux/completion.h> 26 #include <linux/kallsyms.h> 27 #include <linux/random.h> 28 + #include <linux/prctl.h> 29 30 #include <asm/asm.h> 31 #include <asm/bootinfo.h> ··· 561 void arch_trigger_all_cpu_backtrace(bool include_self) 562 { 563 smp_call_function(arch_dump_stack, NULL, 1); 564 + } 565 + 566 + int mips_get_process_fp_mode(struct task_struct *task) 567 + { 568 + int value = 0; 569 + 570 + if (!test_tsk_thread_flag(task, TIF_32BIT_FPREGS)) 571 + value |= PR_FP_MODE_FR; 572 + if (test_tsk_thread_flag(task, TIF_HYBRID_FPREGS)) 573 + value |= PR_FP_MODE_FRE; 574 + 575 + return value; 576 + } 577 + 578 + int mips_set_process_fp_mode(struct task_struct *task, unsigned int value) 579 + { 580 + const unsigned int known_bits = PR_FP_MODE_FR | PR_FP_MODE_FRE; 581 + unsigned long switch_count; 582 + struct task_struct *t; 583 + 584 + /* Check the value is valid */ 585 + if (value & ~known_bits) 586 + return -EOPNOTSUPP; 587 + 588 + /* Avoid inadvertently triggering emulation */ 589 + if ((value & PR_FP_MODE_FR) && cpu_has_fpu && 590 + !(current_cpu_data.fpu_id & MIPS_FPIR_F64)) 591 + return -EOPNOTSUPP; 592 + if ((value & PR_FP_MODE_FRE) && cpu_has_fpu && !cpu_has_fre) 593 + return -EOPNOTSUPP; 594 + 595 + /* FR = 0 not supported in MIPS R6 */ 596 + if (!(value & PR_FP_MODE_FR) && cpu_has_fpu && cpu_has_mips_r6) 597 + return -EOPNOTSUPP; 598 + 599 + /* Save FP & vector context, then disable FPU & MSA */ 600 + if (task->signal == current->signal) 601 + lose_fpu(1); 602 + 603 + /* Prevent any threads from obtaining live FP context */ 604 + atomic_set(&task->mm->context.fp_mode_switching, 1); 605 + smp_mb__after_atomic(); 606 + 607 + /* 608 + * If there are multiple online CPUs then wait until all threads whose 609 + * FP mode is about to change have been context switched. This approach 610 + * allows us to only worry about whether an FP mode switch is in 611 + * progress when FP is first used in a tasks time slice. Pretty much all 612 + * of the mode switch overhead can thus be confined to cases where mode 613 + * switches are actually occuring. That is, to here. However for the 614 + * thread performing the mode switch it may take a while... 615 + */ 616 + if (num_online_cpus() > 1) { 617 + spin_lock_irq(&task->sighand->siglock); 618 + 619 + for_each_thread(task, t) { 620 + if (t == current) 621 + continue; 622 + 623 + switch_count = t->nvcsw + t->nivcsw; 624 + 625 + do { 626 + spin_unlock_irq(&task->sighand->siglock); 627 + cond_resched(); 628 + spin_lock_irq(&task->sighand->siglock); 629 + } while ((t->nvcsw + t->nivcsw) == switch_count); 630 + } 631 + 632 + spin_unlock_irq(&task->sighand->siglock); 633 + } 634 + 635 + /* 636 + * There are now no threads of the process with live FP context, so it 637 + * is safe to proceed with the FP mode switch. 638 + */ 639 + for_each_thread(task, t) { 640 + /* Update desired FP register width */ 641 + if (value & PR_FP_MODE_FR) { 642 + clear_tsk_thread_flag(t, TIF_32BIT_FPREGS); 643 + } else { 644 + set_tsk_thread_flag(t, TIF_32BIT_FPREGS); 645 + clear_tsk_thread_flag(t, TIF_MSA_CTX_LIVE); 646 + } 647 + 648 + /* Update desired FP single layout */ 649 + if (value & PR_FP_MODE_FRE) 650 + set_tsk_thread_flag(t, TIF_HYBRID_FPREGS); 651 + else 652 + clear_tsk_thread_flag(t, TIF_HYBRID_FPREGS); 653 + } 654 + 655 + /* Allow threads to use FP again */ 656 + atomic_set(&task->mm->context.fp_mode_switching, 0); 657 + 658 + return 0; 659 }
+9 -3
arch/mips/kernel/r4k_fpu.S
··· 34 .endm 35 36 .set noreorder 37 - .set arch=r4000 38 39 LEAF(_save_fp_context) 40 .set push ··· 42 cfc1 t1, fcr31 43 .set pop 44 45 - #if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) 46 .set push 47 SET_HARDFLOAT 48 #ifdef CONFIG_CPU_MIPS32_R2 ··· 106 SET_HARDFLOAT 107 cfc1 t1, fcr31 108 109 mfc0 t0, CP0_STATUS 110 sll t0, t0, 5 111 bgez t0, 1f # skip storing odd if FR=0 112 nop 113 114 /* Store the 16 odd double precision registers */ 115 EX sdc1 $f1, SC32_FPREGS+8(a0) ··· 166 LEAF(_restore_fp_context) 167 EX lw t1, SC_FPC_CSR(a0) 168 169 - #if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) 170 .set push 171 SET_HARDFLOAT 172 #ifdef CONFIG_CPU_MIPS32_R2 ··· 227 SET_HARDFLOAT 228 EX lw t1, SC32_FPC_CSR(a0) 229 230 mfc0 t0, CP0_STATUS 231 sll t0, t0, 5 232 bgez t0, 1f # skip loading odd if FR=0 233 nop 234 235 EX ldc1 $f1, SC32_FPREGS+8(a0) 236 EX ldc1 $f3, SC32_FPREGS+24(a0)
··· 34 .endm 35 36 .set noreorder 37 + .set MIPS_ISA_ARCH_LEVEL_RAW 38 39 LEAF(_save_fp_context) 40 .set push ··· 42 cfc1 t1, fcr31 43 .set pop 44 45 + #if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \ 46 + defined(CONFIG_CPU_MIPS32_R6) 47 .set push 48 SET_HARDFLOAT 49 #ifdef CONFIG_CPU_MIPS32_R2 ··· 105 SET_HARDFLOAT 106 cfc1 t1, fcr31 107 108 + #ifndef CONFIG_CPU_MIPS64_R6 109 mfc0 t0, CP0_STATUS 110 sll t0, t0, 5 111 bgez t0, 1f # skip storing odd if FR=0 112 nop 113 + #endif 114 115 /* Store the 16 odd double precision registers */ 116 EX sdc1 $f1, SC32_FPREGS+8(a0) ··· 163 LEAF(_restore_fp_context) 164 EX lw t1, SC_FPC_CSR(a0) 165 166 + #if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \ 167 + defined(CONFIG_CPU_MIPS32_R6) 168 .set push 169 SET_HARDFLOAT 170 #ifdef CONFIG_CPU_MIPS32_R2 ··· 223 SET_HARDFLOAT 224 EX lw t1, SC32_FPC_CSR(a0) 225 226 + #ifndef CONFIG_CPU_MIPS64_R6 227 mfc0 t0, CP0_STATUS 228 sll t0, t0, 5 229 bgez t0, 1f # skip loading odd if FR=0 230 nop 231 + #endif 232 233 EX ldc1 $f1, SC32_FPREGS+8(a0) 234 EX ldc1 $f3, SC32_FPREGS+24(a0)
+8 -6
arch/mips/kernel/r4k_switch.S
··· 115 * Save a thread's fp context. 116 */ 117 LEAF(_save_fp) 118 - #if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) 119 mfc0 t0, CP0_STATUS 120 #endif 121 fpu_save_double a0 t0 t1 # clobbers t1 ··· 127 * Restore a thread's fp context. 128 */ 129 LEAF(_restore_fp) 130 - #if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) 131 mfc0 t0, CP0_STATUS 132 #endif 133 fpu_restore_double a0 t0 t1 # clobbers t1 ··· 242 mtc1 t1, $f30 243 mtc1 t1, $f31 244 245 - #ifdef CONFIG_CPU_MIPS32_R2 246 .set push 247 - .set mips32r2 248 .set fp=64 249 sll t0, t0, 5 # is Status.FR set? 250 bgez t0, 1f # no: skip setting upper 32b ··· 282 mthc1 t1, $f30 283 mthc1 t1, $f31 284 1: .set pop 285 - #endif /* CONFIG_CPU_MIPS32_R2 */ 286 #else 287 - .set arch=r4000 288 dmtc1 t1, $f0 289 dmtc1 t1, $f2 290 dmtc1 t1, $f4
··· 115 * Save a thread's fp context. 116 */ 117 LEAF(_save_fp) 118 + #if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \ 119 + defined(CONFIG_CPU_MIPS32_R6) 120 mfc0 t0, CP0_STATUS 121 #endif 122 fpu_save_double a0 t0 t1 # clobbers t1 ··· 126 * Restore a thread's fp context. 127 */ 128 LEAF(_restore_fp) 129 + #if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \ 130 + defined(CONFIG_CPU_MIPS32_R6) 131 mfc0 t0, CP0_STATUS 132 #endif 133 fpu_restore_double a0 t0 t1 # clobbers t1 ··· 240 mtc1 t1, $f30 241 mtc1 t1, $f31 242 243 + #if defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS32_R6) 244 .set push 245 + .set MIPS_ISA_LEVEL_RAW 246 .set fp=64 247 sll t0, t0, 5 # is Status.FR set? 248 bgez t0, 1f # no: skip setting upper 32b ··· 280 mthc1 t1, $f30 281 mthc1 t1, $f31 282 1: .set pop 283 + #endif /* CONFIG_CPU_MIPS32_R2 || CONFIG_CPU_MIPS32_R6 */ 284 #else 285 + .set MIPS_ISA_ARCH_LEVEL_RAW 286 dmtc1 t1, $f0 287 dmtc1 t1, $f2 288 dmtc1 t1, $f4
+1
arch/mips/kernel/spram.c
··· 208 case CPU_INTERAPTIV: 209 case CPU_PROAPTIV: 210 case CPU_P5600: 211 config0 = read_c0_config(); 212 /* FIXME: addresses are Malta specific */ 213 if (config0 & (1<<24)) {
··· 208 case CPU_INTERAPTIV: 209 case CPU_PROAPTIV: 210 case CPU_P5600: 211 + case CPU_QEMU_GENERIC: 212 config0 = read_c0_config(); 213 /* FIXME: addresses are Malta specific */ 214 if (config0 & (1<<24)) {
+1 -1
arch/mips/kernel/syscall.c
··· 136 : "memory"); 137 } else if (cpu_has_llsc) { 138 __asm__ __volatile__ ( 139 - " .set arch=r4000 \n" 140 " li %[err], 0 \n" 141 "1: ll %[old], (%[addr]) \n" 142 " move %[tmp], %[new] \n"
··· 136 : "memory"); 137 } else if (cpu_has_llsc) { 138 __asm__ __volatile__ ( 139 + " .set "MIPS_ISA_ARCH_LEVEL" \n" 140 " li %[err], 0 \n" 141 "1: ll %[old], (%[addr]) \n" 142 " move %[tmp], %[new] \n"
+54 -6
arch/mips/kernel/traps.c
··· 46 #include <asm/fpu.h> 47 #include <asm/fpu_emulator.h> 48 #include <asm/idle.h> 49 #include <asm/mipsregs.h> 50 #include <asm/mipsmtregs.h> 51 #include <asm/module.h> ··· 838 exception_exit(prev_state); 839 } 840 841 - static void do_trap_or_bp(struct pt_regs *regs, unsigned int code, 842 const char *str) 843 { 844 siginfo_t info; ··· 1028 unsigned int opcode = 0; 1029 int status = -1; 1030 1031 prev_state = exception_enter(); 1032 if (notify_die(DIE_RI, "RI Fault", regs, 0, regs_to_trapnr(regs), 1033 SIGILL) == NOTIFY_STOP) 1034 goto out; ··· 1162 return NOTIFY_OK; 1163 } 1164 1165 static int enable_restore_fp_context(int msa) 1166 { 1167 int err, was_fpu_owner, prior_msa; 1168 1169 if (!used_math()) { 1170 /* First time FP context user. */ ··· 1588 case CPU_INTERAPTIV: 1589 case CPU_PROAPTIV: 1590 case CPU_P5600: 1591 { 1592 #define ERRCTL_PE 0x80000000 1593 #define ERRCTL_L2P 0x00800000 ··· 1678 printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n", 1679 reg_val & (1<<30) ? "secondary" : "primary", 1680 reg_val & (1<<31) ? "data" : "insn"); 1681 - if (cpu_has_mips_r2 && 1682 ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) { 1683 pr_err("Error bits: %s%s%s%s%s%s%s%s\n", 1684 reg_val & (1<<29) ? "ED " : "", ··· 1718 unsigned int reg_val; 1719 1720 /* For the moment, report the problem and hang. */ 1721 - if (cpu_has_mips_r2 && 1722 ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) { 1723 pr_err("FTLB error exception, cp0_ecc=0x%08x:\n", 1724 read_c0_ecc()); ··· 2007 { 2008 unsigned int hwrena = cpu_hwrena_impl_bits; 2009 2010 - if (cpu_has_mips_r2) 2011 hwrena |= 0x0000000f; 2012 2013 if (!noulri && cpu_has_userlocal) ··· 2051 * o read IntCtl.IPTI to determine the timer interrupt 2052 * o read IntCtl.IPPCI to determine the performance counter interrupt 2053 */ 2054 - if (cpu_has_mips_r2) { 2055 cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP; 2056 cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7; 2057 cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7; ··· 2142 #else 2143 ebase = CKSEG0; 2144 #endif 2145 - if (cpu_has_mips_r2) 2146 ebase += (read_c0_ebase() & 0x3ffff000); 2147 } 2148
··· 46 #include <asm/fpu.h> 47 #include <asm/fpu_emulator.h> 48 #include <asm/idle.h> 49 + #include <asm/mips-r2-to-r6-emul.h> 50 #include <asm/mipsregs.h> 51 #include <asm/mipsmtregs.h> 52 #include <asm/module.h> ··· 837 exception_exit(prev_state); 838 } 839 840 + void do_trap_or_bp(struct pt_regs *regs, unsigned int code, 841 const char *str) 842 { 843 siginfo_t info; ··· 1027 unsigned int opcode = 0; 1028 int status = -1; 1029 1030 + /* 1031 + * Avoid any kernel code. Just emulate the R2 instruction 1032 + * as quickly as possible. 1033 + */ 1034 + if (mipsr2_emulation && cpu_has_mips_r6 && 1035 + likely(user_mode(regs))) { 1036 + if (likely(get_user(opcode, epc) >= 0)) { 1037 + status = mipsr2_decoder(regs, opcode); 1038 + switch (status) { 1039 + case 0: 1040 + case SIGEMT: 1041 + task_thread_info(current)->r2_emul_return = 1; 1042 + return; 1043 + case SIGILL: 1044 + goto no_r2_instr; 1045 + default: 1046 + process_fpemu_return(status, 1047 + &current->thread.cp0_baduaddr); 1048 + task_thread_info(current)->r2_emul_return = 1; 1049 + return; 1050 + } 1051 + } 1052 + } 1053 + 1054 + no_r2_instr: 1055 + 1056 prev_state = exception_enter(); 1057 + 1058 if (notify_die(DIE_RI, "RI Fault", regs, 0, regs_to_trapnr(regs), 1059 SIGILL) == NOTIFY_STOP) 1060 goto out; ··· 1134 return NOTIFY_OK; 1135 } 1136 1137 + static int wait_on_fp_mode_switch(atomic_t *p) 1138 + { 1139 + /* 1140 + * The FP mode for this task is currently being switched. That may 1141 + * involve modifications to the format of this tasks FP context which 1142 + * make it unsafe to proceed with execution for the moment. Instead, 1143 + * schedule some other task. 1144 + */ 1145 + schedule(); 1146 + return 0; 1147 + } 1148 + 1149 static int enable_restore_fp_context(int msa) 1150 { 1151 int err, was_fpu_owner, prior_msa; 1152 + 1153 + /* 1154 + * If an FP mode switch is currently underway, wait for it to 1155 + * complete before proceeding. 1156 + */ 1157 + wait_on_atomic_t(&current->mm->context.fp_mode_switching, 1158 + wait_on_fp_mode_switch, TASK_KILLABLE); 1159 1160 if (!used_math()) { 1161 /* First time FP context user. */ ··· 1541 case CPU_INTERAPTIV: 1542 case CPU_PROAPTIV: 1543 case CPU_P5600: 1544 + case CPU_QEMU_GENERIC: 1545 { 1546 #define ERRCTL_PE 0x80000000 1547 #define ERRCTL_L2P 0x00800000 ··· 1630 printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n", 1631 reg_val & (1<<30) ? "secondary" : "primary", 1632 reg_val & (1<<31) ? "data" : "insn"); 1633 + if ((cpu_has_mips_r2_r6) && 1634 ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) { 1635 pr_err("Error bits: %s%s%s%s%s%s%s%s\n", 1636 reg_val & (1<<29) ? "ED " : "", ··· 1670 unsigned int reg_val; 1671 1672 /* For the moment, report the problem and hang. */ 1673 + if ((cpu_has_mips_r2_r6) && 1674 ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) { 1675 pr_err("FTLB error exception, cp0_ecc=0x%08x:\n", 1676 read_c0_ecc()); ··· 1959 { 1960 unsigned int hwrena = cpu_hwrena_impl_bits; 1961 1962 + if (cpu_has_mips_r2_r6) 1963 hwrena |= 0x0000000f; 1964 1965 if (!noulri && cpu_has_userlocal) ··· 2003 * o read IntCtl.IPTI to determine the timer interrupt 2004 * o read IntCtl.IPPCI to determine the performance counter interrupt 2005 */ 2006 + if (cpu_has_mips_r2_r6) { 2007 cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP; 2008 cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7; 2009 cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7; ··· 2094 #else 2095 ebase = CKSEG0; 2096 #endif 2097 + if (cpu_has_mips_r2_r6) 2098 ebase += (read_c0_ebase() & 0x3ffff000); 2099 } 2100
+386 -4
arch/mips/kernel/unaligned.c
··· 129 : "=&r" (value), "=r" (res) \ 130 : "r" (addr), "i" (-EFAULT)); 131 132 #define LoadW(addr, value, res) \ 133 __asm__ __volatile__ ( \ 134 "1:\t"user_lwl("%0", "(%2)")"\n" \ ··· 147 ".previous" \ 148 : "=&r" (value), "=r" (res) \ 149 : "r" (addr), "i" (-EFAULT)); 150 151 #define LoadHWU(addr, value, res) \ 152 __asm__ __volatile__ ( \ ··· 203 : "=&r" (value), "=r" (res) \ 204 : "r" (addr), "i" (-EFAULT)); 205 206 #define LoadWU(addr, value, res) \ 207 __asm__ __volatile__ ( \ 208 "1:\t"user_lwl("%0", "(%2)")"\n" \ ··· 241 ".previous" \ 242 : "=&r" (value), "=r" (res) \ 243 : "r" (addr), "i" (-EFAULT)); 244 245 #define StoreHW(addr, value, res) \ 246 __asm__ __volatile__ ( \ ··· 344 : "=r" (res) \ 345 : "r" (value), "r" (addr), "i" (-EFAULT)); 346 347 #define StoreW(addr, value, res) \ 348 __asm__ __volatile__ ( \ 349 "1:\t"user_swl("%1", "(%2)")"\n" \ ··· 380 ".previous" \ 381 : "=r" (res) \ 382 : "r" (value), "r" (addr), "i" (-EFAULT)); 383 - #endif 384 385 - #ifdef __LITTLE_ENDIAN 386 #define LoadHW(addr, value, res) \ 387 __asm__ __volatile__ (".set\tnoat\n" \ 388 "1:\t"user_lb("%0", "1(%2)")"\n" \ ··· 476 : "=&r" (value), "=r" (res) \ 477 : "r" (addr), "i" (-EFAULT)); 478 479 #define LoadW(addr, value, res) \ 480 __asm__ __volatile__ ( \ 481 "1:\t"user_lwl("%0", "3(%2)")"\n" \ ··· 494 ".previous" \ 495 : "=&r" (value), "=r" (res) \ 496 : "r" (addr), "i" (-EFAULT)); 497 498 #define LoadHWU(addr, value, res) \ 499 __asm__ __volatile__ ( \ ··· 551 : "=&r" (value), "=r" (res) \ 552 : "r" (addr), "i" (-EFAULT)); 553 554 #define LoadWU(addr, value, res) \ 555 __asm__ __volatile__ ( \ 556 "1:\t"user_lwl("%0", "3(%2)")"\n" \ ··· 589 ".previous" \ 590 : "=&r" (value), "=r" (res) \ 591 : "r" (addr), "i" (-EFAULT)); 592 593 #define StoreHW(addr, value, res) \ 594 __asm__ __volatile__ ( \ ··· 690 ".previous" \ 691 : "=r" (res) \ 692 : "r" (value), "r" (addr), "i" (-EFAULT)); 693 - 694 #define StoreW(addr, value, res) \ 695 __asm__ __volatile__ ( \ 696 "1:\t"user_swl("%1", "3(%2)")"\n" \ ··· 726 ".previous" \ 727 : "=r" (res) \ 728 : "r" (value), "r" (addr), "i" (-EFAULT)); 729 #endif 730 731 static void emulate_load_store_insn(struct pt_regs *regs, ··· 1082 break; 1083 return; 1084 1085 /* 1086 * COP2 is available to implementor for application specific use. 1087 * It's up to applications to register a notifier chain and do 1088 * whatever they have to do, including possible sending of signals. 1089 */ 1090 case lwc2_op: 1091 cu2_notifier_call_chain(CU2_LWC2_OP, regs); ··· 1105 case sdc2_op: 1106 cu2_notifier_call_chain(CU2_SDC2_OP, regs); 1107 break; 1108 - 1109 default: 1110 /* 1111 * Pheeee... We encountered an yet unknown instruction or
··· 129 : "=&r" (value), "=r" (res) \ 130 : "r" (addr), "i" (-EFAULT)); 131 132 + #ifndef CONFIG_CPU_MIPSR6 133 #define LoadW(addr, value, res) \ 134 __asm__ __volatile__ ( \ 135 "1:\t"user_lwl("%0", "(%2)")"\n" \ ··· 146 ".previous" \ 147 : "=&r" (value), "=r" (res) \ 148 : "r" (addr), "i" (-EFAULT)); 149 + #else 150 + /* MIPSR6 has no lwl instruction */ 151 + #define LoadW(addr, value, res) \ 152 + __asm__ __volatile__ ( \ 153 + ".set\tpush\n" \ 154 + ".set\tnoat\n\t" \ 155 + "1:"user_lb("%0", "0(%2)")"\n\t" \ 156 + "2:"user_lbu("$1", "1(%2)")"\n\t" \ 157 + "sll\t%0, 0x8\n\t" \ 158 + "or\t%0, $1\n\t" \ 159 + "3:"user_lbu("$1", "2(%2)")"\n\t" \ 160 + "sll\t%0, 0x8\n\t" \ 161 + "or\t%0, $1\n\t" \ 162 + "4:"user_lbu("$1", "3(%2)")"\n\t" \ 163 + "sll\t%0, 0x8\n\t" \ 164 + "or\t%0, $1\n\t" \ 165 + "li\t%1, 0\n" \ 166 + ".set\tpop\n" \ 167 + "10:\n\t" \ 168 + ".insn\n\t" \ 169 + ".section\t.fixup,\"ax\"\n\t" \ 170 + "11:\tli\t%1, %3\n\t" \ 171 + "j\t10b\n\t" \ 172 + ".previous\n\t" \ 173 + ".section\t__ex_table,\"a\"\n\t" \ 174 + STR(PTR)"\t1b, 11b\n\t" \ 175 + STR(PTR)"\t2b, 11b\n\t" \ 176 + STR(PTR)"\t3b, 11b\n\t" \ 177 + STR(PTR)"\t4b, 11b\n\t" \ 178 + ".previous" \ 179 + : "=&r" (value), "=r" (res) \ 180 + : "r" (addr), "i" (-EFAULT)); 181 + #endif /* CONFIG_CPU_MIPSR6 */ 182 183 #define LoadHWU(addr, value, res) \ 184 __asm__ __volatile__ ( \ ··· 169 : "=&r" (value), "=r" (res) \ 170 : "r" (addr), "i" (-EFAULT)); 171 172 + #ifndef CONFIG_CPU_MIPSR6 173 #define LoadWU(addr, value, res) \ 174 __asm__ __volatile__ ( \ 175 "1:\t"user_lwl("%0", "(%2)")"\n" \ ··· 206 ".previous" \ 207 : "=&r" (value), "=r" (res) \ 208 : "r" (addr), "i" (-EFAULT)); 209 + #else 210 + /* MIPSR6 has not lwl and ldl instructions */ 211 + #define LoadWU(addr, value, res) \ 212 + __asm__ __volatile__ ( \ 213 + ".set\tpush\n\t" \ 214 + ".set\tnoat\n\t" \ 215 + "1:"user_lbu("%0", "0(%2)")"\n\t" \ 216 + "2:"user_lbu("$1", "1(%2)")"\n\t" \ 217 + "sll\t%0, 0x8\n\t" \ 218 + "or\t%0, $1\n\t" \ 219 + "3:"user_lbu("$1", "2(%2)")"\n\t" \ 220 + "sll\t%0, 0x8\n\t" \ 221 + "or\t%0, $1\n\t" \ 222 + "4:"user_lbu("$1", "3(%2)")"\n\t" \ 223 + "sll\t%0, 0x8\n\t" \ 224 + "or\t%0, $1\n\t" \ 225 + "li\t%1, 0\n" \ 226 + ".set\tpop\n" \ 227 + "10:\n\t" \ 228 + ".insn\n\t" \ 229 + ".section\t.fixup,\"ax\"\n\t" \ 230 + "11:\tli\t%1, %3\n\t" \ 231 + "j\t10b\n\t" \ 232 + ".previous\n\t" \ 233 + ".section\t__ex_table,\"a\"\n\t" \ 234 + STR(PTR)"\t1b, 11b\n\t" \ 235 + STR(PTR)"\t2b, 11b\n\t" \ 236 + STR(PTR)"\t3b, 11b\n\t" \ 237 + STR(PTR)"\t4b, 11b\n\t" \ 238 + ".previous" \ 239 + : "=&r" (value), "=r" (res) \ 240 + : "r" (addr), "i" (-EFAULT)); 241 + 242 + #define LoadDW(addr, value, res) \ 243 + __asm__ __volatile__ ( \ 244 + ".set\tpush\n\t" \ 245 + ".set\tnoat\n\t" \ 246 + "1:lb\t%0, 0(%2)\n\t" \ 247 + "2:lbu\t $1, 1(%2)\n\t" \ 248 + "dsll\t%0, 0x8\n\t" \ 249 + "or\t%0, $1\n\t" \ 250 + "3:lbu\t$1, 2(%2)\n\t" \ 251 + "dsll\t%0, 0x8\n\t" \ 252 + "or\t%0, $1\n\t" \ 253 + "4:lbu\t$1, 3(%2)\n\t" \ 254 + "dsll\t%0, 0x8\n\t" \ 255 + "or\t%0, $1\n\t" \ 256 + "5:lbu\t$1, 4(%2)\n\t" \ 257 + "dsll\t%0, 0x8\n\t" \ 258 + "or\t%0, $1\n\t" \ 259 + "6:lbu\t$1, 5(%2)\n\t" \ 260 + "dsll\t%0, 0x8\n\t" \ 261 + "or\t%0, $1\n\t" \ 262 + "7:lbu\t$1, 6(%2)\n\t" \ 263 + "dsll\t%0, 0x8\n\t" \ 264 + "or\t%0, $1\n\t" \ 265 + "8:lbu\t$1, 7(%2)\n\t" \ 266 + "dsll\t%0, 0x8\n\t" \ 267 + "or\t%0, $1\n\t" \ 268 + "li\t%1, 0\n" \ 269 + ".set\tpop\n\t" \ 270 + "10:\n\t" \ 271 + ".insn\n\t" \ 272 + ".section\t.fixup,\"ax\"\n\t" \ 273 + "11:\tli\t%1, %3\n\t" \ 274 + "j\t10b\n\t" \ 275 + ".previous\n\t" \ 276 + ".section\t__ex_table,\"a\"\n\t" \ 277 + STR(PTR)"\t1b, 11b\n\t" \ 278 + STR(PTR)"\t2b, 11b\n\t" \ 279 + STR(PTR)"\t3b, 11b\n\t" \ 280 + STR(PTR)"\t4b, 11b\n\t" \ 281 + STR(PTR)"\t5b, 11b\n\t" \ 282 + STR(PTR)"\t6b, 11b\n\t" \ 283 + STR(PTR)"\t7b, 11b\n\t" \ 284 + STR(PTR)"\t8b, 11b\n\t" \ 285 + ".previous" \ 286 + : "=&r" (value), "=r" (res) \ 287 + : "r" (addr), "i" (-EFAULT)); 288 + #endif /* CONFIG_CPU_MIPSR6 */ 289 + 290 291 #define StoreHW(addr, value, res) \ 292 __asm__ __volatile__ ( \ ··· 228 : "=r" (res) \ 229 : "r" (value), "r" (addr), "i" (-EFAULT)); 230 231 + #ifndef CONFIG_CPU_MIPSR6 232 #define StoreW(addr, value, res) \ 233 __asm__ __volatile__ ( \ 234 "1:\t"user_swl("%1", "(%2)")"\n" \ ··· 263 ".previous" \ 264 : "=r" (res) \ 265 : "r" (value), "r" (addr), "i" (-EFAULT)); 266 + #else 267 + /* MIPSR6 has no swl and sdl instructions */ 268 + #define StoreW(addr, value, res) \ 269 + __asm__ __volatile__ ( \ 270 + ".set\tpush\n\t" \ 271 + ".set\tnoat\n\t" \ 272 + "1:"user_sb("%1", "3(%2)")"\n\t" \ 273 + "srl\t$1, %1, 0x8\n\t" \ 274 + "2:"user_sb("$1", "2(%2)")"\n\t" \ 275 + "srl\t$1, $1, 0x8\n\t" \ 276 + "3:"user_sb("$1", "1(%2)")"\n\t" \ 277 + "srl\t$1, $1, 0x8\n\t" \ 278 + "4:"user_sb("$1", "0(%2)")"\n\t" \ 279 + ".set\tpop\n\t" \ 280 + "li\t%0, 0\n" \ 281 + "10:\n\t" \ 282 + ".insn\n\t" \ 283 + ".section\t.fixup,\"ax\"\n\t" \ 284 + "11:\tli\t%0, %3\n\t" \ 285 + "j\t10b\n\t" \ 286 + ".previous\n\t" \ 287 + ".section\t__ex_table,\"a\"\n\t" \ 288 + STR(PTR)"\t1b, 11b\n\t" \ 289 + STR(PTR)"\t2b, 11b\n\t" \ 290 + STR(PTR)"\t3b, 11b\n\t" \ 291 + STR(PTR)"\t4b, 11b\n\t" \ 292 + ".previous" \ 293 + : "=&r" (res) \ 294 + : "r" (value), "r" (addr), "i" (-EFAULT) \ 295 + : "memory"); 296 297 + #define StoreDW(addr, value, res) \ 298 + __asm__ __volatile__ ( \ 299 + ".set\tpush\n\t" \ 300 + ".set\tnoat\n\t" \ 301 + "1:sb\t%1, 7(%2)\n\t" \ 302 + "dsrl\t$1, %1, 0x8\n\t" \ 303 + "2:sb\t$1, 6(%2)\n\t" \ 304 + "dsrl\t$1, $1, 0x8\n\t" \ 305 + "3:sb\t$1, 5(%2)\n\t" \ 306 + "dsrl\t$1, $1, 0x8\n\t" \ 307 + "4:sb\t$1, 4(%2)\n\t" \ 308 + "dsrl\t$1, $1, 0x8\n\t" \ 309 + "5:sb\t$1, 3(%2)\n\t" \ 310 + "dsrl\t$1, $1, 0x8\n\t" \ 311 + "6:sb\t$1, 2(%2)\n\t" \ 312 + "dsrl\t$1, $1, 0x8\n\t" \ 313 + "7:sb\t$1, 1(%2)\n\t" \ 314 + "dsrl\t$1, $1, 0x8\n\t" \ 315 + "8:sb\t$1, 0(%2)\n\t" \ 316 + "dsrl\t$1, $1, 0x8\n\t" \ 317 + ".set\tpop\n\t" \ 318 + "li\t%0, 0\n" \ 319 + "10:\n\t" \ 320 + ".insn\n\t" \ 321 + ".section\t.fixup,\"ax\"\n\t" \ 322 + "11:\tli\t%0, %3\n\t" \ 323 + "j\t10b\n\t" \ 324 + ".previous\n\t" \ 325 + ".section\t__ex_table,\"a\"\n\t" \ 326 + STR(PTR)"\t1b, 11b\n\t" \ 327 + STR(PTR)"\t2b, 11b\n\t" \ 328 + STR(PTR)"\t3b, 11b\n\t" \ 329 + STR(PTR)"\t4b, 11b\n\t" \ 330 + STR(PTR)"\t5b, 11b\n\t" \ 331 + STR(PTR)"\t6b, 11b\n\t" \ 332 + STR(PTR)"\t7b, 11b\n\t" \ 333 + STR(PTR)"\t8b, 11b\n\t" \ 334 + ".previous" \ 335 + : "=&r" (res) \ 336 + : "r" (value), "r" (addr), "i" (-EFAULT) \ 337 + : "memory"); 338 + #endif /* CONFIG_CPU_MIPSR6 */ 339 + 340 + #else /* __BIG_ENDIAN */ 341 + 342 #define LoadHW(addr, value, res) \ 343 __asm__ __volatile__ (".set\tnoat\n" \ 344 "1:\t"user_lb("%0", "1(%2)")"\n" \ ··· 286 : "=&r" (value), "=r" (res) \ 287 : "r" (addr), "i" (-EFAULT)); 288 289 + #ifndef CONFIG_CPU_MIPSR6 290 #define LoadW(addr, value, res) \ 291 __asm__ __volatile__ ( \ 292 "1:\t"user_lwl("%0", "3(%2)")"\n" \ ··· 303 ".previous" \ 304 : "=&r" (value), "=r" (res) \ 305 : "r" (addr), "i" (-EFAULT)); 306 + #else 307 + /* MIPSR6 has no lwl instruction */ 308 + #define LoadW(addr, value, res) \ 309 + __asm__ __volatile__ ( \ 310 + ".set\tpush\n" \ 311 + ".set\tnoat\n\t" \ 312 + "1:"user_lb("%0", "3(%2)")"\n\t" \ 313 + "2:"user_lbu("$1", "2(%2)")"\n\t" \ 314 + "sll\t%0, 0x8\n\t" \ 315 + "or\t%0, $1\n\t" \ 316 + "3:"user_lbu("$1", "1(%2)")"\n\t" \ 317 + "sll\t%0, 0x8\n\t" \ 318 + "or\t%0, $1\n\t" \ 319 + "4:"user_lbu("$1", "0(%2)")"\n\t" \ 320 + "sll\t%0, 0x8\n\t" \ 321 + "or\t%0, $1\n\t" \ 322 + "li\t%1, 0\n" \ 323 + ".set\tpop\n" \ 324 + "10:\n\t" \ 325 + ".insn\n\t" \ 326 + ".section\t.fixup,\"ax\"\n\t" \ 327 + "11:\tli\t%1, %3\n\t" \ 328 + "j\t10b\n\t" \ 329 + ".previous\n\t" \ 330 + ".section\t__ex_table,\"a\"\n\t" \ 331 + STR(PTR)"\t1b, 11b\n\t" \ 332 + STR(PTR)"\t2b, 11b\n\t" \ 333 + STR(PTR)"\t3b, 11b\n\t" \ 334 + STR(PTR)"\t4b, 11b\n\t" \ 335 + ".previous" \ 336 + : "=&r" (value), "=r" (res) \ 337 + : "r" (addr), "i" (-EFAULT)); 338 + #endif /* CONFIG_CPU_MIPSR6 */ 339 + 340 341 #define LoadHWU(addr, value, res) \ 342 __asm__ __volatile__ ( \ ··· 326 : "=&r" (value), "=r" (res) \ 327 : "r" (addr), "i" (-EFAULT)); 328 329 + #ifndef CONFIG_CPU_MIPSR6 330 #define LoadWU(addr, value, res) \ 331 __asm__ __volatile__ ( \ 332 "1:\t"user_lwl("%0", "3(%2)")"\n" \ ··· 363 ".previous" \ 364 : "=&r" (value), "=r" (res) \ 365 : "r" (addr), "i" (-EFAULT)); 366 + #else 367 + /* MIPSR6 has not lwl and ldl instructions */ 368 + #define LoadWU(addr, value, res) \ 369 + __asm__ __volatile__ ( \ 370 + ".set\tpush\n\t" \ 371 + ".set\tnoat\n\t" \ 372 + "1:"user_lbu("%0", "3(%2)")"\n\t" \ 373 + "2:"user_lbu("$1", "2(%2)")"\n\t" \ 374 + "sll\t%0, 0x8\n\t" \ 375 + "or\t%0, $1\n\t" \ 376 + "3:"user_lbu("$1", "1(%2)")"\n\t" \ 377 + "sll\t%0, 0x8\n\t" \ 378 + "or\t%0, $1\n\t" \ 379 + "4:"user_lbu("$1", "0(%2)")"\n\t" \ 380 + "sll\t%0, 0x8\n\t" \ 381 + "or\t%0, $1\n\t" \ 382 + "li\t%1, 0\n" \ 383 + ".set\tpop\n" \ 384 + "10:\n\t" \ 385 + ".insn\n\t" \ 386 + ".section\t.fixup,\"ax\"\n\t" \ 387 + "11:\tli\t%1, %3\n\t" \ 388 + "j\t10b\n\t" \ 389 + ".previous\n\t" \ 390 + ".section\t__ex_table,\"a\"\n\t" \ 391 + STR(PTR)"\t1b, 11b\n\t" \ 392 + STR(PTR)"\t2b, 11b\n\t" \ 393 + STR(PTR)"\t3b, 11b\n\t" \ 394 + STR(PTR)"\t4b, 11b\n\t" \ 395 + ".previous" \ 396 + : "=&r" (value), "=r" (res) \ 397 + : "r" (addr), "i" (-EFAULT)); 398 + 399 + #define LoadDW(addr, value, res) \ 400 + __asm__ __volatile__ ( \ 401 + ".set\tpush\n\t" \ 402 + ".set\tnoat\n\t" \ 403 + "1:lb\t%0, 7(%2)\n\t" \ 404 + "2:lbu\t$1, 6(%2)\n\t" \ 405 + "dsll\t%0, 0x8\n\t" \ 406 + "or\t%0, $1\n\t" \ 407 + "3:lbu\t$1, 5(%2)\n\t" \ 408 + "dsll\t%0, 0x8\n\t" \ 409 + "or\t%0, $1\n\t" \ 410 + "4:lbu\t$1, 4(%2)\n\t" \ 411 + "dsll\t%0, 0x8\n\t" \ 412 + "or\t%0, $1\n\t" \ 413 + "5:lbu\t$1, 3(%2)\n\t" \ 414 + "dsll\t%0, 0x8\n\t" \ 415 + "or\t%0, $1\n\t" \ 416 + "6:lbu\t$1, 2(%2)\n\t" \ 417 + "dsll\t%0, 0x8\n\t" \ 418 + "or\t%0, $1\n\t" \ 419 + "7:lbu\t$1, 1(%2)\n\t" \ 420 + "dsll\t%0, 0x8\n\t" \ 421 + "or\t%0, $1\n\t" \ 422 + "8:lbu\t$1, 0(%2)\n\t" \ 423 + "dsll\t%0, 0x8\n\t" \ 424 + "or\t%0, $1\n\t" \ 425 + "li\t%1, 0\n" \ 426 + ".set\tpop\n\t" \ 427 + "10:\n\t" \ 428 + ".insn\n\t" \ 429 + ".section\t.fixup,\"ax\"\n\t" \ 430 + "11:\tli\t%1, %3\n\t" \ 431 + "j\t10b\n\t" \ 432 + ".previous\n\t" \ 433 + ".section\t__ex_table,\"a\"\n\t" \ 434 + STR(PTR)"\t1b, 11b\n\t" \ 435 + STR(PTR)"\t2b, 11b\n\t" \ 436 + STR(PTR)"\t3b, 11b\n\t" \ 437 + STR(PTR)"\t4b, 11b\n\t" \ 438 + STR(PTR)"\t5b, 11b\n\t" \ 439 + STR(PTR)"\t6b, 11b\n\t" \ 440 + STR(PTR)"\t7b, 11b\n\t" \ 441 + STR(PTR)"\t8b, 11b\n\t" \ 442 + ".previous" \ 443 + : "=&r" (value), "=r" (res) \ 444 + : "r" (addr), "i" (-EFAULT)); 445 + #endif /* CONFIG_CPU_MIPSR6 */ 446 447 #define StoreHW(addr, value, res) \ 448 __asm__ __volatile__ ( \ ··· 384 ".previous" \ 385 : "=r" (res) \ 386 : "r" (value), "r" (addr), "i" (-EFAULT)); 387 + #ifndef CONFIG_CPU_MIPSR6 388 #define StoreW(addr, value, res) \ 389 __asm__ __volatile__ ( \ 390 "1:\t"user_swl("%1", "3(%2)")"\n" \ ··· 420 ".previous" \ 421 : "=r" (res) \ 422 : "r" (value), "r" (addr), "i" (-EFAULT)); 423 + #else 424 + /* MIPSR6 has no swl and sdl instructions */ 425 + #define StoreW(addr, value, res) \ 426 + __asm__ __volatile__ ( \ 427 + ".set\tpush\n\t" \ 428 + ".set\tnoat\n\t" \ 429 + "1:"user_sb("%1", "0(%2)")"\n\t" \ 430 + "srl\t$1, %1, 0x8\n\t" \ 431 + "2:"user_sb("$1", "1(%2)")"\n\t" \ 432 + "srl\t$1, $1, 0x8\n\t" \ 433 + "3:"user_sb("$1", "2(%2)")"\n\t" \ 434 + "srl\t$1, $1, 0x8\n\t" \ 435 + "4:"user_sb("$1", "3(%2)")"\n\t" \ 436 + ".set\tpop\n\t" \ 437 + "li\t%0, 0\n" \ 438 + "10:\n\t" \ 439 + ".insn\n\t" \ 440 + ".section\t.fixup,\"ax\"\n\t" \ 441 + "11:\tli\t%0, %3\n\t" \ 442 + "j\t10b\n\t" \ 443 + ".previous\n\t" \ 444 + ".section\t__ex_table,\"a\"\n\t" \ 445 + STR(PTR)"\t1b, 11b\n\t" \ 446 + STR(PTR)"\t2b, 11b\n\t" \ 447 + STR(PTR)"\t3b, 11b\n\t" \ 448 + STR(PTR)"\t4b, 11b\n\t" \ 449 + ".previous" \ 450 + : "=&r" (res) \ 451 + : "r" (value), "r" (addr), "i" (-EFAULT) \ 452 + : "memory"); 453 + 454 + #define StoreDW(addr, value, res) \ 455 + __asm__ __volatile__ ( \ 456 + ".set\tpush\n\t" \ 457 + ".set\tnoat\n\t" \ 458 + "1:sb\t%1, 0(%2)\n\t" \ 459 + "dsrl\t$1, %1, 0x8\n\t" \ 460 + "2:sb\t$1, 1(%2)\n\t" \ 461 + "dsrl\t$1, $1, 0x8\n\t" \ 462 + "3:sb\t$1, 2(%2)\n\t" \ 463 + "dsrl\t$1, $1, 0x8\n\t" \ 464 + "4:sb\t$1, 3(%2)\n\t" \ 465 + "dsrl\t$1, $1, 0x8\n\t" \ 466 + "5:sb\t$1, 4(%2)\n\t" \ 467 + "dsrl\t$1, $1, 0x8\n\t" \ 468 + "6:sb\t$1, 5(%2)\n\t" \ 469 + "dsrl\t$1, $1, 0x8\n\t" \ 470 + "7:sb\t$1, 6(%2)\n\t" \ 471 + "dsrl\t$1, $1, 0x8\n\t" \ 472 + "8:sb\t$1, 7(%2)\n\t" \ 473 + "dsrl\t$1, $1, 0x8\n\t" \ 474 + ".set\tpop\n\t" \ 475 + "li\t%0, 0\n" \ 476 + "10:\n\t" \ 477 + ".insn\n\t" \ 478 + ".section\t.fixup,\"ax\"\n\t" \ 479 + "11:\tli\t%0, %3\n\t" \ 480 + "j\t10b\n\t" \ 481 + ".previous\n\t" \ 482 + ".section\t__ex_table,\"a\"\n\t" \ 483 + STR(PTR)"\t1b, 11b\n\t" \ 484 + STR(PTR)"\t2b, 11b\n\t" \ 485 + STR(PTR)"\t3b, 11b\n\t" \ 486 + STR(PTR)"\t4b, 11b\n\t" \ 487 + STR(PTR)"\t5b, 11b\n\t" \ 488 + STR(PTR)"\t6b, 11b\n\t" \ 489 + STR(PTR)"\t7b, 11b\n\t" \ 490 + STR(PTR)"\t8b, 11b\n\t" \ 491 + ".previous" \ 492 + : "=&r" (res) \ 493 + : "r" (value), "r" (addr), "i" (-EFAULT) \ 494 + : "memory"); 495 + #endif /* CONFIG_CPU_MIPSR6 */ 496 #endif 497 498 static void emulate_load_store_insn(struct pt_regs *regs, ··· 703 break; 704 return; 705 706 + #ifndef CONFIG_CPU_MIPSR6 707 /* 708 * COP2 is available to implementor for application specific use. 709 * It's up to applications to register a notifier chain and do 710 * whatever they have to do, including possible sending of signals. 711 + * 712 + * This instruction has been reallocated in Release 6 713 */ 714 case lwc2_op: 715 cu2_notifier_call_chain(CU2_LWC2_OP, regs); ··· 723 case sdc2_op: 724 cu2_notifier_call_chain(CU2_SDC2_OP, regs); 725 break; 726 + #endif 727 default: 728 /* 729 * Pheeee... We encountered an yet unknown instruction or
+1
arch/mips/lib/Makefile
··· 8 9 obj-y += iomap.o 10 obj-$(CONFIG_PCI) += iomap-pci.o 11 12 obj-$(CONFIG_CPU_GENERIC_DUMP_TLB) += dump_tlb.o 13 obj-$(CONFIG_CPU_R3000) += r3k_dump_tlb.o
··· 8 9 obj-y += iomap.o 10 obj-$(CONFIG_PCI) += iomap-pci.o 11 + lib-$(CONFIG_GENERIC_CSUM) := $(filter-out csum_partial.o, $(lib-y)) 12 13 obj-$(CONFIG_CPU_GENERIC_DUMP_TLB) += dump_tlb.o 14 obj-$(CONFIG_CPU_R3000) += r3k_dump_tlb.o
+23
arch/mips/lib/memcpy.S
··· 293 and t0, src, ADDRMASK 294 PREFS( 0, 2*32(src) ) 295 PREFD( 1, 2*32(dst) ) 296 bnez t1, .Ldst_unaligned\@ 297 nop 298 bnez t0, .Lsrc_unaligned_dst_aligned\@ 299 /* 300 * use delay slot for fall-through 301 * src and dst are aligned; need to compute rem ··· 381 bne rem, len, 1b 382 .set noreorder 383 384 /* 385 * src and dst are aligned, need to copy rem bytes (rem < NBYTES) 386 * A loop would do only a byte at a time with possible branch ··· 483 bne len, rem, 1b 484 .set noreorder 485 486 .Lcopy_bytes_checklen\@: 487 beqz len, .Ldone\@ 488 nop ··· 511 .Ldone\@: 512 jr ra 513 nop 514 .if __memcpy == 1 515 END(memcpy) 516 .set __memcpy, 0
··· 293 and t0, src, ADDRMASK 294 PREFS( 0, 2*32(src) ) 295 PREFD( 1, 2*32(dst) ) 296 + #ifndef CONFIG_CPU_MIPSR6 297 bnez t1, .Ldst_unaligned\@ 298 nop 299 bnez t0, .Lsrc_unaligned_dst_aligned\@ 300 + #else 301 + or t0, t0, t1 302 + bnez t0, .Lcopy_unaligned_bytes\@ 303 + #endif 304 /* 305 * use delay slot for fall-through 306 * src and dst are aligned; need to compute rem ··· 376 bne rem, len, 1b 377 .set noreorder 378 379 + #ifndef CONFIG_CPU_MIPSR6 380 /* 381 * src and dst are aligned, need to copy rem bytes (rem < NBYTES) 382 * A loop would do only a byte at a time with possible branch ··· 477 bne len, rem, 1b 478 .set noreorder 479 480 + #endif /* !CONFIG_CPU_MIPSR6 */ 481 .Lcopy_bytes_checklen\@: 482 beqz len, .Ldone\@ 483 nop ··· 504 .Ldone\@: 505 jr ra 506 nop 507 + 508 + #ifdef CONFIG_CPU_MIPSR6 509 + .Lcopy_unaligned_bytes\@: 510 + 1: 511 + COPY_BYTE(0) 512 + COPY_BYTE(1) 513 + COPY_BYTE(2) 514 + COPY_BYTE(3) 515 + COPY_BYTE(4) 516 + COPY_BYTE(5) 517 + COPY_BYTE(6) 518 + COPY_BYTE(7) 519 + ADD src, src, 8 520 + b 1b 521 + ADD dst, dst, 8 522 + #endif /* CONFIG_CPU_MIPSR6 */ 523 .if __memcpy == 1 524 END(memcpy) 525 .set __memcpy, 0
+47
arch/mips/lib/memset.S
··· 111 .set at 112 #endif 113 114 R10KCBARRIER(0(ra)) 115 #ifdef __MIPSEB__ 116 EX(LONG_S_L, a1, (a0), .Lfirst_fixup\@) /* make word/dword aligned */ ··· 121 PTR_SUBU a0, t0 /* long align ptr */ 122 PTR_ADDU a2, t0 /* correct size */ 123 124 1: ori t1, a2, 0x3f /* # of full blocks */ 125 xori t1, 0x3f 126 beqz t1, .Lmemset_partial\@ /* no block to fill */ ··· 184 andi a2, STORMASK /* At most one long to go */ 185 186 beqz a2, 1f 187 PTR_ADDU a0, a2 /* What's left */ 188 R10KCBARRIER(0(ra)) 189 #ifdef __MIPSEB__ 190 EX(LONG_S_R, a1, -1(a0), .Llast_fixup\@) 191 #else 192 EX(LONG_S_L, a1, -1(a0), .Llast_fixup\@) 193 #endif 194 1: jr ra 195 move a2, zero ··· 227 .set __memset, 0 228 .hidden __memset 229 .endif 230 231 .Lfirst_fixup\@: 232 jr ra
··· 111 .set at 112 #endif 113 114 + #ifndef CONFIG_CPU_MIPSR6 115 R10KCBARRIER(0(ra)) 116 #ifdef __MIPSEB__ 117 EX(LONG_S_L, a1, (a0), .Lfirst_fixup\@) /* make word/dword aligned */ ··· 120 PTR_SUBU a0, t0 /* long align ptr */ 121 PTR_ADDU a2, t0 /* correct size */ 122 123 + #else /* CONFIG_CPU_MIPSR6 */ 124 + #define STORE_BYTE(N) \ 125 + EX(sb, a1, N(a0), .Lbyte_fixup\@); \ 126 + beqz t0, 0f; \ 127 + PTR_ADDU t0, 1; 128 + 129 + PTR_ADDU a2, t0 /* correct size */ 130 + PTR_ADDU t0, 1 131 + STORE_BYTE(0) 132 + STORE_BYTE(1) 133 + #if LONGSIZE == 4 134 + EX(sb, a1, 2(a0), .Lbyte_fixup\@) 135 + #else 136 + STORE_BYTE(2) 137 + STORE_BYTE(3) 138 + STORE_BYTE(4) 139 + STORE_BYTE(5) 140 + EX(sb, a1, 6(a0), .Lbyte_fixup\@) 141 + #endif 142 + 0: 143 + ori a0, STORMASK 144 + xori a0, STORMASK 145 + PTR_ADDIU a0, STORSIZE 146 + #endif /* CONFIG_CPU_MIPSR6 */ 147 1: ori t1, a2, 0x3f /* # of full blocks */ 148 xori t1, 0x3f 149 beqz t1, .Lmemset_partial\@ /* no block to fill */ ··· 159 andi a2, STORMASK /* At most one long to go */ 160 161 beqz a2, 1f 162 + #ifndef CONFIG_CPU_MIPSR6 163 PTR_ADDU a0, a2 /* What's left */ 164 R10KCBARRIER(0(ra)) 165 #ifdef __MIPSEB__ 166 EX(LONG_S_R, a1, -1(a0), .Llast_fixup\@) 167 #else 168 EX(LONG_S_L, a1, -1(a0), .Llast_fixup\@) 169 + #endif 170 + #else 171 + PTR_SUBU t0, $0, a2 172 + PTR_ADDIU t0, 1 173 + STORE_BYTE(0) 174 + STORE_BYTE(1) 175 + #if LONGSIZE == 4 176 + EX(sb, a1, 2(a0), .Lbyte_fixup\@) 177 + #else 178 + STORE_BYTE(2) 179 + STORE_BYTE(3) 180 + STORE_BYTE(4) 181 + STORE_BYTE(5) 182 + EX(sb, a1, 6(a0), .Lbyte_fixup\@) 183 + #endif 184 + 0: 185 #endif 186 1: jr ra 187 move a2, zero ··· 185 .set __memset, 0 186 .hidden __memset 187 .endif 188 + 189 + .Lbyte_fixup\@: 190 + PTR_SUBU a2, $0, t0 191 + jr ra 192 + PTR_ADDIU a2, 1 193 194 .Lfirst_fixup\@: 195 jr ra
+1 -1
arch/mips/lib/mips-atomic.c
··· 15 #include <linux/export.h> 16 #include <linux/stringify.h> 17 18 - #ifndef CONFIG_CPU_MIPSR2 19 20 /* 21 * For cli() we have to insert nops to make sure that the new value
··· 15 #include <linux/export.h> 16 #include <linux/stringify.h> 17 18 + #if !defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_CPU_MIPSR6) 19 20 /* 21 * For cli() we have to insert nops to make sure that the new value
+158 -11
arch/mips/math-emu/cp1emu.c
··· 48 #include <asm/processor.h> 49 #include <asm/fpu_emulator.h> 50 #include <asm/fpu.h> 51 52 #include "ieee754.h" 53 ··· 69 #define modeindex(v) ((v) & FPU_CSR_RM) 70 71 /* convert condition code register number to csr bit */ 72 - static const unsigned int fpucondbit[8] = { 73 FPU_CSR_COND0, 74 FPU_CSR_COND1, 75 FPU_CSR_COND2, ··· 449 dec_insn.next_pc_inc; 450 /* Fall through */ 451 case jr_op: 452 *contpc = regs->regs[insn.r_format.rs]; 453 return 1; 454 } ··· 460 switch (insn.i_format.rt) { 461 case bltzal_op: 462 case bltzall_op: 463 regs->regs[31] = regs->cp0_epc + 464 dec_insn.pc_inc + 465 dec_insn.next_pc_inc; 466 /* Fall through */ 467 - case bltz_op: 468 case bltzl_op: 469 if ((long)regs->regs[insn.i_format.rs] < 0) 470 *contpc = regs->cp0_epc + 471 dec_insn.pc_inc + ··· 483 return 1; 484 case bgezal_op: 485 case bgezall_op: 486 regs->regs[31] = regs->cp0_epc + 487 dec_insn.pc_inc + 488 dec_insn.next_pc_inc; 489 /* Fall through */ 490 - case bgez_op: 491 case bgezl_op: 492 if ((long)regs->regs[insn.i_format.rs] >= 0) 493 *contpc = regs->cp0_epc + 494 dec_insn.pc_inc + ··· 521 /* Set microMIPS mode bit: XOR for jalx. */ 522 *contpc ^= bit; 523 return 1; 524 - case beq_op: 525 case beql_op: 526 if (regs->regs[insn.i_format.rs] == 527 regs->regs[insn.i_format.rt]) 528 *contpc = regs->cp0_epc + ··· 535 dec_insn.pc_inc + 536 dec_insn.next_pc_inc; 537 return 1; 538 - case bne_op: 539 case bnel_op: 540 if (regs->regs[insn.i_format.rs] != 541 regs->regs[insn.i_format.rt]) 542 *contpc = regs->cp0_epc + ··· 549 dec_insn.pc_inc + 550 dec_insn.next_pc_inc; 551 return 1; 552 - case blez_op: 553 case blezl_op: 554 if ((long)regs->regs[insn.i_format.rs] <= 0) 555 *contpc = regs->cp0_epc + 556 dec_insn.pc_inc + ··· 586 dec_insn.pc_inc + 587 dec_insn.next_pc_inc; 588 return 1; 589 - case bgtz_op: 590 case bgtzl_op: 591 if ((long)regs->regs[insn.i_format.rs] > 0) 592 *contpc = regs->cp0_epc + 593 dec_insn.pc_inc + ··· 623 *contpc = regs->cp0_epc + 624 dec_insn.pc_inc + 625 dec_insn.next_pc_inc; 626 return 1; 627 #ifdef CONFIG_CPU_CAVIUM_OCTEON 628 case lwc2_op: /* This is bbit0 on Octeon */ ··· 659 else 660 *contpc = regs->cp0_epc + 8; 661 return 1; 662 #endif 663 case cop0_op: 664 case cop1_op: 665 case cop2_op: 666 case cop1x_op: 667 if (insn.i_format.rs == bc_op) { ··· 1561 * achieve full IEEE-754 accuracy - however this emulator does. 1562 */ 1563 case frsqrt_op: 1564 - if (!cpu_has_mips_4_5_r2) 1565 return SIGILL; 1566 1567 handler.u = fpemu_sp_rsqrt; 1568 goto scopuop; 1569 1570 case frecip_op: 1571 - if (!cpu_has_mips_4_5_r2) 1572 return SIGILL; 1573 1574 handler.u = fpemu_sp_recip; ··· 1763 * achieve full IEEE-754 accuracy - however this emulator does. 1764 */ 1765 case frsqrt_op: 1766 - if (!cpu_has_mips_4_5_r2) 1767 return SIGILL; 1768 1769 handler.u = fpemu_dp_rsqrt; 1770 goto dcopuop; 1771 case frecip_op: 1772 - if (!cpu_has_mips_4_5_r2) 1773 return SIGILL; 1774 1775 handler.u = fpemu_dp_recip;
··· 48 #include <asm/processor.h> 49 #include <asm/fpu_emulator.h> 50 #include <asm/fpu.h> 51 + #include <asm/mips-r2-to-r6-emul.h> 52 53 #include "ieee754.h" 54 ··· 68 #define modeindex(v) ((v) & FPU_CSR_RM) 69 70 /* convert condition code register number to csr bit */ 71 + const unsigned int fpucondbit[8] = { 72 FPU_CSR_COND0, 73 FPU_CSR_COND1, 74 FPU_CSR_COND2, ··· 448 dec_insn.next_pc_inc; 449 /* Fall through */ 450 case jr_op: 451 + /* For R6, JR already emulated in jalr_op */ 452 + if (NO_R6EMU && insn.r_format.opcode == jr_op) 453 + break; 454 *contpc = regs->regs[insn.r_format.rs]; 455 return 1; 456 } ··· 456 switch (insn.i_format.rt) { 457 case bltzal_op: 458 case bltzall_op: 459 + if (NO_R6EMU && (insn.i_format.rs || 460 + insn.i_format.rt == bltzall_op)) 461 + break; 462 + 463 regs->regs[31] = regs->cp0_epc + 464 dec_insn.pc_inc + 465 dec_insn.next_pc_inc; 466 /* Fall through */ 467 case bltzl_op: 468 + if (NO_R6EMU) 469 + break; 470 + case bltz_op: 471 if ((long)regs->regs[insn.i_format.rs] < 0) 472 *contpc = regs->cp0_epc + 473 dec_insn.pc_inc + ··· 473 return 1; 474 case bgezal_op: 475 case bgezall_op: 476 + if (NO_R6EMU && (insn.i_format.rs || 477 + insn.i_format.rt == bgezall_op)) 478 + break; 479 + 480 regs->regs[31] = regs->cp0_epc + 481 dec_insn.pc_inc + 482 dec_insn.next_pc_inc; 483 /* Fall through */ 484 case bgezl_op: 485 + if (NO_R6EMU) 486 + break; 487 + case bgez_op: 488 if ((long)regs->regs[insn.i_format.rs] >= 0) 489 *contpc = regs->cp0_epc + 490 dec_insn.pc_inc + ··· 505 /* Set microMIPS mode bit: XOR for jalx. */ 506 *contpc ^= bit; 507 return 1; 508 case beql_op: 509 + if (NO_R6EMU) 510 + break; 511 + case beq_op: 512 if (regs->regs[insn.i_format.rs] == 513 regs->regs[insn.i_format.rt]) 514 *contpc = regs->cp0_epc + ··· 517 dec_insn.pc_inc + 518 dec_insn.next_pc_inc; 519 return 1; 520 case bnel_op: 521 + if (NO_R6EMU) 522 + break; 523 + case bne_op: 524 if (regs->regs[insn.i_format.rs] != 525 regs->regs[insn.i_format.rt]) 526 *contpc = regs->cp0_epc + ··· 529 dec_insn.pc_inc + 530 dec_insn.next_pc_inc; 531 return 1; 532 case blezl_op: 533 + if (NO_R6EMU) 534 + break; 535 + case blez_op: 536 + 537 + /* 538 + * Compact branches for R6 for the 539 + * blez and blezl opcodes. 540 + * BLEZ | rs = 0 | rt != 0 == BLEZALC 541 + * BLEZ | rs = rt != 0 == BGEZALC 542 + * BLEZ | rs != 0 | rt != 0 == BGEUC 543 + * BLEZL | rs = 0 | rt != 0 == BLEZC 544 + * BLEZL | rs = rt != 0 == BGEZC 545 + * BLEZL | rs != 0 | rt != 0 == BGEC 546 + * 547 + * For real BLEZ{,L}, rt is always 0. 548 + */ 549 + if (cpu_has_mips_r6 && insn.i_format.rt) { 550 + if ((insn.i_format.opcode == blez_op) && 551 + ((!insn.i_format.rs && insn.i_format.rt) || 552 + (insn.i_format.rs == insn.i_format.rt))) 553 + regs->regs[31] = regs->cp0_epc + 554 + dec_insn.pc_inc; 555 + *contpc = regs->cp0_epc + dec_insn.pc_inc + 556 + dec_insn.next_pc_inc; 557 + 558 + return 1; 559 + } 560 if ((long)regs->regs[insn.i_format.rs] <= 0) 561 *contpc = regs->cp0_epc + 562 dec_insn.pc_inc + ··· 540 dec_insn.pc_inc + 541 dec_insn.next_pc_inc; 542 return 1; 543 case bgtzl_op: 544 + if (NO_R6EMU) 545 + break; 546 + case bgtz_op: 547 + /* 548 + * Compact branches for R6 for the 549 + * bgtz and bgtzl opcodes. 550 + * BGTZ | rs = 0 | rt != 0 == BGTZALC 551 + * BGTZ | rs = rt != 0 == BLTZALC 552 + * BGTZ | rs != 0 | rt != 0 == BLTUC 553 + * BGTZL | rs = 0 | rt != 0 == BGTZC 554 + * BGTZL | rs = rt != 0 == BLTZC 555 + * BGTZL | rs != 0 | rt != 0 == BLTC 556 + * 557 + * *ZALC varint for BGTZ &&& rt != 0 558 + * For real GTZ{,L}, rt is always 0. 559 + */ 560 + if (cpu_has_mips_r6 && insn.i_format.rt) { 561 + if ((insn.i_format.opcode == blez_op) && 562 + ((!insn.i_format.rs && insn.i_format.rt) || 563 + (insn.i_format.rs == insn.i_format.rt))) 564 + regs->regs[31] = regs->cp0_epc + 565 + dec_insn.pc_inc; 566 + *contpc = regs->cp0_epc + dec_insn.pc_inc + 567 + dec_insn.next_pc_inc; 568 + 569 + return 1; 570 + } 571 + 572 if ((long)regs->regs[insn.i_format.rs] > 0) 573 *contpc = regs->cp0_epc + 574 dec_insn.pc_inc + ··· 550 *contpc = regs->cp0_epc + 551 dec_insn.pc_inc + 552 dec_insn.next_pc_inc; 553 + return 1; 554 + case cbcond0_op: 555 + case cbcond1_op: 556 + if (!cpu_has_mips_r6) 557 + break; 558 + if (insn.i_format.rt && !insn.i_format.rs) 559 + regs->regs[31] = regs->cp0_epc + 4; 560 + *contpc = regs->cp0_epc + dec_insn.pc_inc + 561 + dec_insn.next_pc_inc; 562 + 563 return 1; 564 #ifdef CONFIG_CPU_CAVIUM_OCTEON 565 case lwc2_op: /* This is bbit0 on Octeon */ ··· 576 else 577 *contpc = regs->cp0_epc + 8; 578 return 1; 579 + #else 580 + case bc6_op: 581 + /* 582 + * Only valid for MIPS R6 but we can still end up 583 + * here from a broken userland so just tell emulator 584 + * this is not a branch and let it break later on. 585 + */ 586 + if (!cpu_has_mips_r6) 587 + break; 588 + *contpc = regs->cp0_epc + dec_insn.pc_inc + 589 + dec_insn.next_pc_inc; 590 + 591 + return 1; 592 + case balc6_op: 593 + if (!cpu_has_mips_r6) 594 + break; 595 + regs->regs[31] = regs->cp0_epc + 4; 596 + *contpc = regs->cp0_epc + dec_insn.pc_inc + 597 + dec_insn.next_pc_inc; 598 + 599 + return 1; 600 + case beqzcjic_op: 601 + if (!cpu_has_mips_r6) 602 + break; 603 + *contpc = regs->cp0_epc + dec_insn.pc_inc + 604 + dec_insn.next_pc_inc; 605 + 606 + return 1; 607 + case bnezcjialc_op: 608 + if (!cpu_has_mips_r6) 609 + break; 610 + if (!insn.i_format.rs) 611 + regs->regs[31] = regs->cp0_epc + 4; 612 + *contpc = regs->cp0_epc + dec_insn.pc_inc + 613 + dec_insn.next_pc_inc; 614 + 615 + return 1; 616 #endif 617 case cop0_op: 618 case cop1_op: 619 + /* Need to check for R6 bc1nez and bc1eqz branches */ 620 + if (cpu_has_mips_r6 && 621 + ((insn.i_format.rs == bc1eqz_op) || 622 + (insn.i_format.rs == bc1nez_op))) { 623 + bit = 0; 624 + switch (insn.i_format.rs) { 625 + case bc1eqz_op: 626 + if (get_fpr32(&current->thread.fpu.fpr[insn.i_format.rt], 0) & 0x1) 627 + bit = 1; 628 + break; 629 + case bc1nez_op: 630 + if (!(get_fpr32(&current->thread.fpu.fpr[insn.i_format.rt], 0) & 0x1)) 631 + bit = 1; 632 + break; 633 + } 634 + if (bit) 635 + *contpc = regs->cp0_epc + 636 + dec_insn.pc_inc + 637 + (insn.i_format.simmediate << 2); 638 + else 639 + *contpc = regs->cp0_epc + 640 + dec_insn.pc_inc + 641 + dec_insn.next_pc_inc; 642 + 643 + return 1; 644 + } 645 + /* R2/R6 compatible cop1 instruction. Fall through */ 646 case cop2_op: 647 case cop1x_op: 648 if (insn.i_format.rs == bc_op) { ··· 1414 * achieve full IEEE-754 accuracy - however this emulator does. 1415 */ 1416 case frsqrt_op: 1417 + if (!cpu_has_mips_4_5_r2_r6) 1418 return SIGILL; 1419 1420 handler.u = fpemu_sp_rsqrt; 1421 goto scopuop; 1422 1423 case frecip_op: 1424 + if (!cpu_has_mips_4_5_r2_r6) 1425 return SIGILL; 1426 1427 handler.u = fpemu_sp_recip; ··· 1616 * achieve full IEEE-754 accuracy - however this emulator does. 1617 */ 1618 case frsqrt_op: 1619 + if (!cpu_has_mips_4_5_r2_r6) 1620 return SIGILL; 1621 1622 handler.u = fpemu_dp_rsqrt; 1623 goto dcopuop; 1624 case frecip_op: 1625 + if (!cpu_has_mips_4_5_r2_r6) 1626 return SIGILL; 1627 1628 handler.u = fpemu_dp_recip;
+4 -2
arch/mips/mm/c-r4k.c
··· 794 __asm__ __volatile__ ( 795 ".set push\n\t" 796 ".set noat\n\t" 797 - ".set mips3\n\t" 798 #ifdef CONFIG_32BIT 799 "la $at,1f\n\t" 800 #endif ··· 1255 case CPU_P5600: 1256 case CPU_PROAPTIV: 1257 case CPU_M5150: 1258 if (!(read_c0_config7() & MIPS_CONF7_IAR) && 1259 (c->icache.waysize > PAGE_SIZE)) 1260 c->icache.flags |= MIPS_CACHE_ALIASES; ··· 1473 1474 default: 1475 if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 | 1476 - MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) { 1477 #ifdef CONFIG_MIPS_CPU_SCACHE 1478 if (mips_sc_init ()) { 1479 scache_size = c->scache.ways * c->scache.sets * c->scache.linesz;
··· 794 __asm__ __volatile__ ( 795 ".set push\n\t" 796 ".set noat\n\t" 797 + ".set "MIPS_ISA_LEVEL"\n\t" 798 #ifdef CONFIG_32BIT 799 "la $at,1f\n\t" 800 #endif ··· 1255 case CPU_P5600: 1256 case CPU_PROAPTIV: 1257 case CPU_M5150: 1258 + case CPU_QEMU_GENERIC: 1259 if (!(read_c0_config7() & MIPS_CONF7_IAR) && 1260 (c->icache.waysize > PAGE_SIZE)) 1261 c->icache.flags |= MIPS_CACHE_ALIASES; ··· 1472 1473 default: 1474 if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 | 1475 + MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R1 | 1476 + MIPS_CPU_ISA_M64R2 | MIPS_CPU_ISA_M64R6)) { 1477 #ifdef CONFIG_MIPS_CPU_SCACHE 1478 if (mips_sc_init ()) { 1479 scache_size = c->scache.ways * c->scache.sets * c->scache.linesz;
+20 -9
arch/mips/mm/fault.c
··· 14 #include <linux/string.h> 15 #include <linux/types.h> 16 #include <linux/ptrace.h> 17 #include <linux/mman.h> 18 #include <linux/mm.h> 19 #include <linux/smp.h> ··· 28 #include <asm/ptrace.h> 29 #include <asm/highmem.h> /* For VMALLOC_END */ 30 #include <linux/kdebug.h> 31 32 /* 33 * This routine handles page faults. It determines the address, ··· 46 siginfo_t info; 47 int fault; 48 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 49 50 #if 0 51 printk("Cpu%d[%s:%d:%0*lx:%ld:%0*lx]\n", raw_smp_processor_id(), ··· 208 if (user_mode(regs)) { 209 tsk->thread.cp0_badvaddr = address; 210 tsk->thread.error_code = write; 211 - #if 0 212 - printk("do_page_fault() #2: sending SIGSEGV to %s for " 213 - "invalid %s\n%0*lx (epc == %0*lx, ra == %0*lx)\n", 214 - tsk->comm, 215 - write ? "write access to" : "read access from", 216 - field, address, 217 - field, (unsigned long) regs->cp0_epc, 218 - field, (unsigned long) regs->regs[31]); 219 - #endif 220 info.si_signo = SIGSEGV; 221 info.si_errno = 0; 222 /* info.si_code has been set above */
··· 14 #include <linux/string.h> 15 #include <linux/types.h> 16 #include <linux/ptrace.h> 17 + #include <linux/ratelimit.h> 18 #include <linux/mman.h> 19 #include <linux/mm.h> 20 #include <linux/smp.h> ··· 27 #include <asm/ptrace.h> 28 #include <asm/highmem.h> /* For VMALLOC_END */ 29 #include <linux/kdebug.h> 30 + 31 + int show_unhandled_signals = 1; 32 33 /* 34 * This routine handles page faults. It determines the address, ··· 43 siginfo_t info; 44 int fault; 45 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 46 + 47 + static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10); 48 49 #if 0 50 printk("Cpu%d[%s:%d:%0*lx:%ld:%0*lx]\n", raw_smp_processor_id(), ··· 203 if (user_mode(regs)) { 204 tsk->thread.cp0_badvaddr = address; 205 tsk->thread.error_code = write; 206 + if (show_unhandled_signals && 207 + unhandled_signal(tsk, SIGSEGV) && 208 + __ratelimit(&ratelimit_state)) { 209 + pr_info("\ndo_page_fault(): sending SIGSEGV to %s for invalid %s %0*lx", 210 + tsk->comm, 211 + write ? "write access to" : "read access from", 212 + field, address); 213 + pr_info("epc = %0*lx in", field, 214 + (unsigned long) regs->cp0_epc); 215 + print_vma_addr(" ", regs->cp0_epc); 216 + pr_info("ra = %0*lx in", field, 217 + (unsigned long) regs->regs[31]); 218 + print_vma_addr(" ", regs->regs[31]); 219 + pr_info("\n"); 220 + } 221 info.si_signo = SIGSEGV; 222 info.si_errno = 0; 223 /* info.si_code has been set above */
+26 -4
arch/mips/mm/page.c
··· 72 #define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010) 73 #define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020) 74 75 static int pref_bias_clear_store; 76 static int pref_bias_copy_load; 77 static int pref_bias_copy_store; ··· 192 pref_bias_copy_load = 256; 193 pref_bias_copy_store = 128; 194 pref_src_mode = Pref_LoadStreamed; 195 - pref_dst_mode = Pref_PrepareForStore; 196 break; 197 } 198 } else { ··· 236 return; 237 238 if (pref_bias_clear_store) { 239 - uasm_i_pref(buf, pref_dst_mode, pref_bias_clear_store + off, 240 A0); 241 } else if (cache_line_size == (half_clear_loop_size << 1)) { 242 if (cpu_has_cache_cdex_s) { ··· 379 return; 380 381 if (pref_bias_copy_load) 382 - uasm_i_pref(buf, pref_src_mode, pref_bias_copy_load + off, A1); 383 } 384 385 static inline void build_copy_store_pref(u32 **buf, int off) ··· 388 return; 389 390 if (pref_bias_copy_store) { 391 - uasm_i_pref(buf, pref_dst_mode, pref_bias_copy_store + off, 392 A0); 393 } else if (cache_line_size == (half_copy_loop_size << 1)) { 394 if (cpu_has_cache_cdex_s) {
··· 72 #define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010) 73 #define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020) 74 75 + /* 76 + * R6 has a limited offset of the pref instruction. 77 + * Skip it if the offset is more than 9 bits. 78 + */ 79 + #define _uasm_i_pref(a, b, c, d) \ 80 + do { \ 81 + if (cpu_has_mips_r6) { \ 82 + if (c <= 0xff && c >= -0x100) \ 83 + uasm_i_pref(a, b, c, d);\ 84 + } else { \ 85 + uasm_i_pref(a, b, c, d); \ 86 + } \ 87 + } while(0) 88 + 89 static int pref_bias_clear_store; 90 static int pref_bias_copy_load; 91 static int pref_bias_copy_store; ··· 178 pref_bias_copy_load = 256; 179 pref_bias_copy_store = 128; 180 pref_src_mode = Pref_LoadStreamed; 181 + if (cpu_has_mips_r6) 182 + /* 183 + * Bit 30 (Pref_PrepareForStore) has been 184 + * removed from MIPS R6. Use bit 5 185 + * (Pref_StoreStreamed). 186 + */ 187 + pref_dst_mode = Pref_StoreStreamed; 188 + else 189 + pref_dst_mode = Pref_PrepareForStore; 190 break; 191 } 192 } else { ··· 214 return; 215 216 if (pref_bias_clear_store) { 217 + _uasm_i_pref(buf, pref_dst_mode, pref_bias_clear_store + off, 218 A0); 219 } else if (cache_line_size == (half_clear_loop_size << 1)) { 220 if (cpu_has_cache_cdex_s) { ··· 357 return; 358 359 if (pref_bias_copy_load) 360 + _uasm_i_pref(buf, pref_src_mode, pref_bias_copy_load + off, A1); 361 } 362 363 static inline void build_copy_store_pref(u32 **buf, int off) ··· 366 return; 367 368 if (pref_bias_copy_store) { 369 + _uasm_i_pref(buf, pref_dst_mode, pref_bias_copy_store + off, 370 A0); 371 } else if (cache_line_size == (half_copy_loop_size << 1)) { 372 if (cpu_has_cache_cdex_s) {
+3 -1
arch/mips/mm/sc-mips.c
··· 81 case CPU_PROAPTIV: 82 case CPU_P5600: 83 case CPU_BMIPS5000: 84 if (config2 & (1 << 12)) 85 return 0; 86 } ··· 105 106 /* Ignore anything but MIPSxx processors */ 107 if (!(c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 | 108 - MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2))) 109 return 0; 110 111 /* Does this MIPS32/MIPS64 CPU have a config2 register? */
··· 81 case CPU_PROAPTIV: 82 case CPU_P5600: 83 case CPU_BMIPS5000: 84 + case CPU_QEMU_GENERIC: 85 if (config2 & (1 << 12)) 86 return 0; 87 } ··· 104 105 /* Ignore anything but MIPSxx processors */ 106 if (!(c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 | 107 + MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R1 | 108 + MIPS_CPU_ISA_M64R2 | MIPS_CPU_ISA_M64R6))) 109 return 0; 110 111 /* Does this MIPS32/MIPS64 CPU have a config2 register? */
+3 -5
arch/mips/mm/tlb-r4k.c
··· 485 * Enable the no read, no exec bits, and enable large virtual 486 * address. 487 */ 488 - u32 pg = PG_RIE | PG_XIE; 489 #ifdef CONFIG_64BIT 490 - pg |= PG_ELPA; 491 #endif 492 - if (cpu_has_rixiex) 493 - pg |= PG_IEC; 494 - write_c0_pagegrain(pg); 495 } 496 497 temp_tlb_entry = current_cpu_data.tlbsize - 1;
··· 485 * Enable the no read, no exec bits, and enable large virtual 486 * address. 487 */ 488 #ifdef CONFIG_64BIT 489 + set_c0_pagegrain(PG_RIE | PG_XIE | PG_ELPA); 490 + #else 491 + set_c0_pagegrain(PG_RIE | PG_XIE); 492 #endif 493 } 494 495 temp_tlb_entry = current_cpu_data.tlbsize - 1;
+4 -3
arch/mips/mm/tlbex.c
··· 501 case tlb_indexed: tlbw = uasm_i_tlbwi; break; 502 } 503 504 - if (cpu_has_mips_r2) { 505 /* 506 * The architecture spec says an ehb is required here, 507 * but a number of cores do not have the hazard and ··· 514 case CPU_PROAPTIV: 515 case CPU_P5600: 516 case CPU_M5150: 517 break; 518 519 default: ··· 1953 1954 switch (current_cpu_type()) { 1955 default: 1956 - if (cpu_has_mips_r2) { 1957 uasm_i_ehb(&p); 1958 1959 case CPU_CAVIUM_OCTEON: ··· 2020 2021 switch (current_cpu_type()) { 2022 default: 2023 - if (cpu_has_mips_r2) { 2024 uasm_i_ehb(&p); 2025 2026 case CPU_CAVIUM_OCTEON:
··· 501 case tlb_indexed: tlbw = uasm_i_tlbwi; break; 502 } 503 504 + if (cpu_has_mips_r2_exec_hazard) { 505 /* 506 * The architecture spec says an ehb is required here, 507 * but a number of cores do not have the hazard and ··· 514 case CPU_PROAPTIV: 515 case CPU_P5600: 516 case CPU_M5150: 517 + case CPU_QEMU_GENERIC: 518 break; 519 520 default: ··· 1952 1953 switch (current_cpu_type()) { 1954 default: 1955 + if (cpu_has_mips_r2_exec_hazard) { 1956 uasm_i_ehb(&p); 1957 1958 case CPU_CAVIUM_OCTEON: ··· 2019 2020 switch (current_cpu_type()) { 2021 default: 2022 + if (cpu_has_mips_r2_exec_hazard) { 2023 uasm_i_ehb(&p); 2024 2025 case CPU_CAVIUM_OCTEON:
-8
arch/mips/mm/uasm-micromips.c
··· 38 | (e) << RE_SH \ 39 | (f) << FUNC_SH) 40 41 - /* Define these when we are not the ISA the kernel is being compiled with. */ 42 - #ifndef CONFIG_CPU_MICROMIPS 43 - #define MM_uasm_i_b(buf, off) ISAOPC(_beq)(buf, 0, 0, off) 44 - #define MM_uasm_i_beqz(buf, rs, off) ISAOPC(_beq)(buf, rs, 0, off) 45 - #define MM_uasm_i_beqzl(buf, rs, off) ISAOPC(_beql)(buf, rs, 0, off) 46 - #define MM_uasm_i_bnez(buf, rs, off) ISAOPC(_bne)(buf, rs, 0, off) 47 - #endif 48 - 49 #include "uasm.c" 50 51 static struct insn insn_table_MM[] = {
··· 38 | (e) << RE_SH \ 39 | (f) << FUNC_SH) 40 41 #include "uasm.c" 42 43 static struct insn insn_table_MM[] = {
+31 -7
arch/mips/mm/uasm-mips.c
··· 38 | (e) << RE_SH \ 39 | (f) << FUNC_SH) 40 41 - /* Define these when we are not the ISA the kernel is being compiled with. */ 42 - #ifdef CONFIG_CPU_MICROMIPS 43 - #define CL_uasm_i_b(buf, off) ISAOPC(_beq)(buf, 0, 0, off) 44 - #define CL_uasm_i_beqz(buf, rs, off) ISAOPC(_beq)(buf, rs, 0, off) 45 - #define CL_uasm_i_beqzl(buf, rs, off) ISAOPC(_beql)(buf, rs, 0, off) 46 - #define CL_uasm_i_bnez(buf, rs, off) ISAOPC(_bne)(buf, rs, 0, off) 47 - #endif 48 49 #include "uasm.c" 50 ··· 62 { insn_bltzl, M(bcond_op, 0, bltzl_op, 0, 0, 0), RS | BIMM }, 63 { insn_bltz, M(bcond_op, 0, bltz_op, 0, 0, 0), RS | BIMM }, 64 { insn_bne, M(bne_op, 0, 0, 0, 0, 0), RS | RT | BIMM }, 65 { insn_cache, M(cache_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 66 { insn_daddiu, M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 67 { insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD }, 68 { insn_dinsm, M(spec3_op, 0, 0, 0, 0, dinsm_op), RS | RT | RD | RE }, ··· 89 { insn_jal, M(jal_op, 0, 0, 0, 0, 0), JIMM }, 90 { insn_jalr, M(spec_op, 0, 0, 0, 0, jalr_op), RS | RD }, 91 { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM }, 92 { insn_jr, M(spec_op, 0, 0, 0, 0, jr_op), RS }, 93 { insn_lb, M(lb_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 94 { insn_ld, M(ld_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 95 { insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD }, 96 { insn_lh, M(lh_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 97 { insn_lld, M(lld_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 98 { insn_ll, M(ll_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 99 { insn_lui, M(lui_op, 0, 0, 0, 0, 0), RT | SIMM }, 100 { insn_lw, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 101 { insn_lwx, M(spec3_op, 0, 0, 0, lwx_op, lx_op), RS | RT | RD }, ··· 117 { insn_mul, M(spec2_op, 0, 0, 0, 0, mul_op), RS | RT | RD}, 118 { insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, 119 { insn_or, M(spec_op, 0, 0, 0, 0, or_op), RS | RT | RD }, 120 { insn_pref, M(pref_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 121 { insn_rfe, M(cop0_op, cop_op, 0, 0, 0, rfe_op), 0 }, 122 { insn_rotr, M(spec_op, 1, 0, 0, 0, srl_op), RT | RD | RE }, 123 { insn_scd, M(scd_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 124 { insn_sc, M(sc_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 125 { insn_sd, M(sd_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 126 { insn_sll, M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE }, 127 { insn_sllv, M(spec_op, 0, 0, 0, 0, sllv_op), RS | RT | RD }, ··· 220 op |= build_set(va_arg(ap, u32)); 221 if (ip->fields & SCIMM) 222 op |= build_scimm(va_arg(ap, u32)); 223 va_end(ap); 224 225 **buf = op;
··· 38 | (e) << RE_SH \ 39 | (f) << FUNC_SH) 40 41 + /* This macro sets the non-variable bits of an R6 instruction. */ 42 + #define M6(a, b, c, d, e) \ 43 + ((a) << OP_SH \ 44 + | (b) << RS_SH \ 45 + | (c) << RT_SH \ 46 + | (d) << SIMM9_SH \ 47 + | (e) << FUNC_SH) 48 49 #include "uasm.c" 50 ··· 62 { insn_bltzl, M(bcond_op, 0, bltzl_op, 0, 0, 0), RS | BIMM }, 63 { insn_bltz, M(bcond_op, 0, bltz_op, 0, 0, 0), RS | BIMM }, 64 { insn_bne, M(bne_op, 0, 0, 0, 0, 0), RS | RT | BIMM }, 65 + #ifndef CONFIG_CPU_MIPSR6 66 { insn_cache, M(cache_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 67 + #else 68 + { insn_cache, M6(cache_op, 0, 0, 0, cache6_op), RS | RT | SIMM9 }, 69 + #endif 70 { insn_daddiu, M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 71 { insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD }, 72 { insn_dinsm, M(spec3_op, 0, 0, 0, 0, dinsm_op), RS | RT | RD | RE }, ··· 85 { insn_jal, M(jal_op, 0, 0, 0, 0, 0), JIMM }, 86 { insn_jalr, M(spec_op, 0, 0, 0, 0, jalr_op), RS | RD }, 87 { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM }, 88 + #ifndef CONFIG_CPU_MIPSR6 89 { insn_jr, M(spec_op, 0, 0, 0, 0, jr_op), RS }, 90 + #else 91 + { insn_jr, M(spec_op, 0, 0, 0, 0, jalr_op), RS }, 92 + #endif 93 { insn_lb, M(lb_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 94 { insn_ld, M(ld_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 95 { insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD }, 96 { insn_lh, M(lh_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 97 + #ifndef CONFIG_CPU_MIPSR6 98 { insn_lld, M(lld_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 99 { insn_ll, M(ll_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 100 + #else 101 + { insn_lld, M6(spec3_op, 0, 0, 0, lld6_op), RS | RT | SIMM9 }, 102 + { insn_ll, M6(spec3_op, 0, 0, 0, ll6_op), RS | RT | SIMM9 }, 103 + #endif 104 { insn_lui, M(lui_op, 0, 0, 0, 0, 0), RT | SIMM }, 105 { insn_lw, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 106 { insn_lwx, M(spec3_op, 0, 0, 0, lwx_op, lx_op), RS | RT | RD }, ··· 104 { insn_mul, M(spec2_op, 0, 0, 0, 0, mul_op), RS | RT | RD}, 105 { insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, 106 { insn_or, M(spec_op, 0, 0, 0, 0, or_op), RS | RT | RD }, 107 + #ifndef CONFIG_CPU_MIPSR6 108 { insn_pref, M(pref_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 109 + #else 110 + { insn_pref, M6(spec3_op, 0, 0, 0, pref6_op), RS | RT | SIMM9 }, 111 + #endif 112 { insn_rfe, M(cop0_op, cop_op, 0, 0, 0, rfe_op), 0 }, 113 { insn_rotr, M(spec_op, 1, 0, 0, 0, srl_op), RT | RD | RE }, 114 + #ifndef CONFIG_CPU_MIPSR6 115 { insn_scd, M(scd_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 116 { insn_sc, M(sc_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 117 + #else 118 + { insn_scd, M6(spec3_op, 0, 0, 0, scd6_op), RS | RT | SIMM9 }, 119 + { insn_sc, M6(spec3_op, 0, 0, 0, sc6_op), RS | RT | SIMM9 }, 120 + #endif 121 { insn_sd, M(sd_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 122 { insn_sll, M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE }, 123 { insn_sllv, M(spec_op, 0, 0, 0, 0, sllv_op), RS | RT | RD }, ··· 198 op |= build_set(va_arg(ap, u32)); 199 if (ip->fields & SCIMM) 200 op |= build_scimm(va_arg(ap, u32)); 201 + if (ip->fields & SIMM9) 202 + op |= build_scimm9(va_arg(ap, u32)); 203 va_end(ap); 204 205 **buf = op;
+13 -2
arch/mips/mm/uasm.c
··· 24 JIMM = 0x080, 25 FUNC = 0x100, 26 SET = 0x200, 27 - SCIMM = 0x400 28 }; 29 30 #define OP_MASK 0x3f ··· 42 #define FUNC_SH 0 43 #define SET_MASK 0x7 44 #define SET_SH 0 45 46 enum opcode { 47 insn_invalid, ··· 117 KERN_WARNING "Micro-assembler field overflow\n"); 118 119 return (arg & SCIMM_MASK) << SCIMM_SH; 120 } 121 122 static inline u32 build_func(u32 arg) ··· 341 void ISAFUNC(uasm_i_pref)(u32 **buf, unsigned int a, signed int b, 342 unsigned int c) 343 { 344 - if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X) && a <= 24 && a != 5) 345 /* 346 * As per erratum Core-14449, replace prefetches 0-4, 347 * 6-24 with 'pref 28'.
··· 24 JIMM = 0x080, 25 FUNC = 0x100, 26 SET = 0x200, 27 + SCIMM = 0x400, 28 + SIMM9 = 0x800, 29 }; 30 31 #define OP_MASK 0x3f ··· 41 #define FUNC_SH 0 42 #define SET_MASK 0x7 43 #define SET_SH 0 44 + #define SIMM9_SH 7 45 + #define SIMM9_MASK 0x1ff 46 47 enum opcode { 48 insn_invalid, ··· 114 KERN_WARNING "Micro-assembler field overflow\n"); 115 116 return (arg & SCIMM_MASK) << SCIMM_SH; 117 + } 118 + 119 + static inline u32 build_scimm9(s32 arg) 120 + { 121 + WARN((arg > 0xff || arg < -0x100), 122 + KERN_WARNING "Micro-assembler field overflow\n"); 123 + 124 + return (arg & SIMM9_MASK) << SIMM9_SH; 125 } 126 127 static inline u32 build_func(u32 arg) ··· 330 void ISAFUNC(uasm_i_pref)(u32 **buf, unsigned int a, signed int b, 331 unsigned int c) 332 { 333 + if (CAVIUM_OCTEON_DCACHE_PREFETCH_WAR && a <= 24 && a != 5) 334 /* 335 * As per erratum Core-14449, replace prefetches 0-4, 336 * 6-24 with 'pref 28'.
+1 -1
arch/mips/mti-sead3/sead3-time.c
··· 72 int get_c0_perfcount_int(void) 73 { 74 if (gic_present) 75 - return gic_get_c0_compare_int(); 76 if (cp0_perfcount_irq >= 0) 77 return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq; 78 return -1;
··· 72 int get_c0_perfcount_int(void) 73 { 74 if (gic_present) 75 + return gic_get_c0_perfcount_int(); 76 if (cp0_perfcount_irq >= 0) 77 return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq; 78 return -1;
+2 -2
arch/mips/pci/pci-bcm1480.c
··· 173 } 174 175 struct pci_ops bcm1480_pci_ops = { 176 - .read = bcm1480_pcibios_read, 177 - .write = bcm1480_pcibios_write, 178 }; 179 180 static struct resource bcm1480_mem_resource = {
··· 173 } 174 175 struct pci_ops bcm1480_pci_ops = { 176 + .read = bcm1480_pcibios_read, 177 + .write = bcm1480_pcibios_write, 178 }; 179 180 static struct resource bcm1480_mem_resource = {
+2 -2
arch/mips/pci/pci-octeon.c
··· 327 328 329 static struct pci_ops octeon_pci_ops = { 330 - .read = octeon_read_config, 331 - .write = octeon_write_config, 332 }; 333 334 static struct resource octeon_pci_mem_resource = {
··· 327 328 329 static struct pci_ops octeon_pci_ops = { 330 + .read = octeon_read_config, 331 + .write = octeon_write_config, 332 }; 333 334 static struct resource octeon_pci_mem_resource = {
+6 -6
arch/mips/pci/pcie-octeon.c
··· 1792 } 1793 1794 static struct pci_ops octeon_pcie0_ops = { 1795 - .read = octeon_pcie0_read_config, 1796 - .write = octeon_pcie0_write_config, 1797 }; 1798 1799 static struct resource octeon_pcie0_mem_resource = { ··· 1813 }; 1814 1815 static struct pci_ops octeon_pcie1_ops = { 1816 - .read = octeon_pcie1_read_config, 1817 - .write = octeon_pcie1_write_config, 1818 }; 1819 1820 static struct resource octeon_pcie1_mem_resource = { ··· 1834 }; 1835 1836 static struct pci_ops octeon_dummy_ops = { 1837 - .read = octeon_dummy_read_config, 1838 - .write = octeon_dummy_write_config, 1839 }; 1840 1841 static struct resource octeon_dummy_mem_resource = {
··· 1792 } 1793 1794 static struct pci_ops octeon_pcie0_ops = { 1795 + .read = octeon_pcie0_read_config, 1796 + .write = octeon_pcie0_write_config, 1797 }; 1798 1799 static struct resource octeon_pcie0_mem_resource = { ··· 1813 }; 1814 1815 static struct pci_ops octeon_pcie1_ops = { 1816 + .read = octeon_pcie1_read_config, 1817 + .write = octeon_pcie1_write_config, 1818 }; 1819 1820 static struct resource octeon_pcie1_mem_resource = { ··· 1834 }; 1835 1836 static struct pci_ops octeon_dummy_ops = { 1837 + .read = octeon_dummy_read_config, 1838 + .write = octeon_dummy_write_config, 1839 }; 1840 1841 static struct resource octeon_dummy_mem_resource = {
-24
arch/mips/sgi-ip22/ip22-gio.c
··· 152 return 0; 153 } 154 155 - static int gio_device_suspend(struct device *dev, pm_message_t state) 156 - { 157 - struct gio_device *gio_dev = to_gio_device(dev); 158 - struct gio_driver *drv = to_gio_driver(dev->driver); 159 - int error = 0; 160 - 161 - if (dev->driver && drv->suspend) 162 - error = drv->suspend(gio_dev, state); 163 - return error; 164 - } 165 - 166 - static int gio_device_resume(struct device *dev) 167 - { 168 - struct gio_device *gio_dev = to_gio_device(dev); 169 - struct gio_driver *drv = to_gio_driver(dev->driver); 170 - int error = 0; 171 - 172 - if (dev->driver && drv->resume) 173 - error = drv->resume(gio_dev); 174 - return error; 175 - } 176 - 177 static void gio_device_shutdown(struct device *dev) 178 { 179 struct gio_device *gio_dev = to_gio_device(dev); ··· 378 .match = gio_bus_match, 379 .probe = gio_device_probe, 380 .remove = gio_device_remove, 381 - .suspend = gio_device_suspend, 382 - .resume = gio_device_resume, 383 .shutdown = gio_device_shutdown, 384 .uevent = gio_device_uevent, 385 };
··· 152 return 0; 153 } 154 155 static void gio_device_shutdown(struct device *dev) 156 { 157 struct gio_device *gio_dev = to_gio_device(dev); ··· 400 .match = gio_bus_match, 401 .probe = gio_device_probe, 402 .remove = gio_device_remove, 403 .shutdown = gio_device_shutdown, 404 .uevent = gio_device_uevent, 405 };
+4 -3
arch/mips/sgi-ip27/ip27-reset.c
··· 8 * Copyright (C) 1997, 1998, 1999, 2000, 06 by Ralf Baechle 9 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 10 */ 11 #include <linux/kernel.h> 12 #include <linux/sched.h> 13 #include <linux/timer.h> ··· 26 #include <asm/sn/gda.h> 27 #include <asm/sn/sn0/hub.h> 28 29 - void machine_restart(char *command) __attribute__((noreturn)); 30 - void machine_halt(void) __attribute__((noreturn)); 31 - void machine_power_off(void) __attribute__((noreturn)); 32 33 #define noreturn while(1); /* Silence gcc. */ 34
··· 8 * Copyright (C) 1997, 1998, 1999, 2000, 06 by Ralf Baechle 9 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 10 */ 11 + #include <linux/compiler.h> 12 #include <linux/kernel.h> 13 #include <linux/sched.h> 14 #include <linux/timer.h> ··· 25 #include <asm/sn/gda.h> 26 #include <asm/sn/sn0/hub.h> 27 28 + void machine_restart(char *command) __noreturn; 29 + void machine_halt(void) __noreturn; 30 + void machine_power_off(void) __noreturn; 31 32 #define noreturn while(1); /* Silence gcc. */ 33
+4 -3
arch/mips/sgi-ip32/ip32-reset.c
··· 8 * Copyright (C) 2003 Guido Guenther <agx@sigxcpu.org> 9 */ 10 11 #include <linux/init.h> 12 #include <linux/kernel.h> 13 #include <linux/sched.h> ··· 36 static struct timer_list power_timer, blink_timer, debounce_timer; 37 static int has_panicked, shuting_down; 38 39 - static void ip32_machine_restart(char *command) __attribute__((noreturn)); 40 - static void ip32_machine_halt(void) __attribute__((noreturn)); 41 - static void ip32_machine_power_off(void) __attribute__((noreturn)); 42 43 static void ip32_machine_restart(char *cmd) 44 {
··· 8 * Copyright (C) 2003 Guido Guenther <agx@sigxcpu.org> 9 */ 10 11 + #include <linux/compiler.h> 12 #include <linux/init.h> 13 #include <linux/kernel.h> 14 #include <linux/sched.h> ··· 35 static struct timer_list power_timer, blink_timer, debounce_timer; 36 static int has_panicked, shuting_down; 37 38 + static void ip32_machine_restart(char *command) __noreturn; 39 + static void ip32_machine_halt(void) __noreturn; 40 + static void ip32_machine_power_off(void) __noreturn; 41 42 static void ip32_machine_restart(char *cmd) 43 {
-8
drivers/irqchip/irq-mips-gic.c
··· 192 } 193 } 194 195 - unsigned int gic_get_timer_pending(void) 196 - { 197 - unsigned int vpe_pending; 198 - 199 - vpe_pending = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_PEND)); 200 - return vpe_pending & GIC_VPE_PEND_TIMER_MSK; 201 - } 202 - 203 static void gic_bind_eic_interrupt(int irq, int set) 204 { 205 /* Convert irq vector # to hw int # */
··· 192 } 193 } 194 195 static void gic_bind_eic_interrupt(int irq, int set) 196 { 197 /* Convert irq vector # to hw int # */
-1
include/linux/irqchip/mips-gic.h
··· 243 extern void gic_send_ipi(unsigned int intr); 244 extern unsigned int plat_ipi_call_int_xlate(unsigned int); 245 extern unsigned int plat_ipi_resched_int_xlate(unsigned int); 246 - extern unsigned int gic_get_timer_pending(void); 247 extern int gic_get_c0_compare_int(void); 248 extern int gic_get_c0_perfcount_int(void); 249 #endif /* __LINUX_IRQCHIP_MIPS_GIC_H */
··· 243 extern void gic_send_ipi(unsigned int intr); 244 extern unsigned int plat_ipi_call_int_xlate(unsigned int); 245 extern unsigned int plat_ipi_resched_int_xlate(unsigned int); 246 extern int gic_get_c0_compare_int(void); 247 extern int gic_get_c0_perfcount_int(void); 248 #endif /* __LINUX_IRQCHIP_MIPS_GIC_H */
+5
include/uapi/linux/prctl.h
··· 185 #define PR_MPX_ENABLE_MANAGEMENT 43 186 #define PR_MPX_DISABLE_MANAGEMENT 44 187 188 #endif /* _LINUX_PRCTL_H */
··· 185 #define PR_MPX_ENABLE_MANAGEMENT 43 186 #define PR_MPX_DISABLE_MANAGEMENT 44 187 188 + #define PR_SET_FP_MODE 45 189 + #define PR_GET_FP_MODE 46 190 + # define PR_FP_MODE_FR (1 << 0) /* 64b FP registers */ 191 + # define PR_FP_MODE_FRE (1 << 1) /* 32b compatibility */ 192 + 193 #endif /* _LINUX_PRCTL_H */
+12
kernel/sys.c
··· 97 #ifndef MPX_DISABLE_MANAGEMENT 98 # define MPX_DISABLE_MANAGEMENT(a) (-EINVAL) 99 #endif 100 101 /* 102 * this is where the system-wide overflow UID and GID are defined, for ··· 2224 if (arg2 || arg3 || arg4 || arg5) 2225 return -EINVAL; 2226 error = MPX_DISABLE_MANAGEMENT(me); 2227 break; 2228 default: 2229 error = -EINVAL;
··· 97 #ifndef MPX_DISABLE_MANAGEMENT 98 # define MPX_DISABLE_MANAGEMENT(a) (-EINVAL) 99 #endif 100 + #ifndef GET_FP_MODE 101 + # define GET_FP_MODE(a) (-EINVAL) 102 + #endif 103 + #ifndef SET_FP_MODE 104 + # define SET_FP_MODE(a,b) (-EINVAL) 105 + #endif 106 107 /* 108 * this is where the system-wide overflow UID and GID are defined, for ··· 2218 if (arg2 || arg3 || arg4 || arg5) 2219 return -EINVAL; 2220 error = MPX_DISABLE_MANAGEMENT(me); 2221 + break; 2222 + case PR_SET_FP_MODE: 2223 + error = SET_FP_MODE(me, arg2); 2224 + break; 2225 + case PR_GET_FP_MODE: 2226 + error = GET_FP_MODE(me); 2227 break; 2228 default: 2229 error = -EINVAL;