Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus

Pull MIPS updates from Ralf Baechle:
"This is the main pull request for MIPS:

- a number of fixes that didn't make the 3.19 release.

- a number of cleanups.

- preliminary support for Cavium's Octeon 3 SOCs which feature up to
48 MIPS64 R3 cores with FPU and hardware virtualization.

- support for MIPS R6 processors.

Revision 6 of the MIPS architecture is a major revision of the MIPS
architecture which does away with many of original sins of the
architecture such as branch delay slots. This and other changes in
R6 require major changes throughout the entire MIPS core
architecture code and make up for the lion share of this pull
request.

- finally some preparatory work for eXtendend Physical Address
support, which allows support of up to 40 bit of physical address
space on 32 bit processors"

[ Ahh, MIPS can't leave the PAE brain damage alone. It's like
every CPU architect has to make that mistake, but pee in the snow
by changing the TLA. But whether it's called PAE, LPAE or XPA,
it's horrid crud - Linus ]

* 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus: (114 commits)
MIPS: sead3: Corrected get_c0_perfcount_int
MIPS: mm: Remove dead macro definitions
MIPS: OCTEON: irq: add CIB and other fixes
MIPS: OCTEON: Don't do acknowledge operations for level triggered irqs.
MIPS: OCTEON: More OCTEONIII support
MIPS: OCTEON: Remove setting of processor specific CVMCTL icache bits.
MIPS: OCTEON: Core-15169 Workaround and general CVMSEG cleanup.
MIPS: OCTEON: Update octeon-model.h code for new SoCs.
MIPS: OCTEON: Implement DCache errata workaround for all CN6XXX
MIPS: OCTEON: Add little-endian support to asm/octeon/octeon.h
MIPS: OCTEON: Implement the core-16057 workaround
MIPS: OCTEON: Delete unused COP2 saving code
MIPS: OCTEON: Use correct instruction to read 64-bit COP0 register
MIPS: OCTEON: Save and restore CP2 SHA3 state
MIPS: OCTEON: Fix FP context save.
MIPS: OCTEON: Save/Restore wider multiply registers in OCTEON III CPUs
MIPS: boot: Provide more uImage options
MIPS: Remove unneeded #ifdef __KERNEL__ from asm/processor.h
MIPS: ip22-gio: Remove legacy suspend/resume support
mips: pci: Add ifdef around pci_proc_domain
...

Changed files
+6453 -1135
Documentation
devicetree
bindings
mips
cavium
arch
drivers
irqchip
include
linux
irqchip
uapi
linux
kernel
+43
Documentation/devicetree/bindings/mips/cavium/cib.txt
··· 1 + * Cavium Interrupt Bus widget 2 + 3 + Properties: 4 + - compatible: "cavium,octeon-7130-cib" 5 + 6 + Compatibility with cn70XX SoCs. 7 + 8 + - interrupt-controller: This is an interrupt controller. 9 + 10 + - reg: Two elements consisting of the addresses of the RAW and EN 11 + registers of the CIB block 12 + 13 + - cavium,max-bits: The index (zero based) of the highest numbered bit 14 + in the CIB block. 15 + 16 + - interrupt-parent: Always the CIU on the SoC. 17 + 18 + - interrupts: The CIU line to which the CIB block is connected. 19 + 20 + - #interrupt-cells: Must be <2>. The first cell is the bit within the 21 + CIB. The second cell specifies the triggering semantics of the 22 + line. 23 + 24 + Example: 25 + 26 + interrupt-controller@107000000e000 { 27 + compatible = "cavium,octeon-7130-cib"; 28 + reg = <0x10700 0x0000e000 0x0 0x8>, /* RAW */ 29 + <0x10700 0x0000e100 0x0 0x8>; /* EN */ 30 + cavium,max-bits = <23>; 31 + 32 + interrupt-controller; 33 + interrupt-parent = <&ciu>; 34 + interrupts = <1 24>; 35 + /* Interrupts are specified by two parts: 36 + * 1) Bit number in the CIB* registers 37 + * 2) Triggering (1 - edge rising 38 + * 2 - edge falling 39 + * 4 - level active high 40 + * 8 - level active low) 41 + */ 42 + #interrupt-cells = <2>; 43 + };
+69 -4
arch/mips/Kconfig
··· 54 54 select CPU_PM if CPU_IDLE 55 55 select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST 56 56 select ARCH_BINFMT_ELF_STATE 57 + select SYSCTL_EXCEPTION_TRACE 57 58 58 59 menu "Machine selection" 59 60 ··· 377 376 select SYS_HAS_CPU_MIPS32_R1 378 377 select SYS_HAS_CPU_MIPS32_R2 379 378 select SYS_HAS_CPU_MIPS32_R3_5 379 + select SYS_HAS_CPU_MIPS32_R6 380 380 select SYS_HAS_CPU_MIPS64_R1 381 381 select SYS_HAS_CPU_MIPS64_R2 382 + select SYS_HAS_CPU_MIPS64_R6 382 383 select SYS_HAS_CPU_NEVADA 383 384 select SYS_HAS_CPU_RM7000 384 385 select SYS_SUPPORTS_32BIT_KERNEL ··· 1036 1033 config NO_IOPORT_MAP 1037 1034 def_bool n 1038 1035 1036 + config GENERIC_CSUM 1037 + bool 1038 + 1039 1039 config GENERIC_ISA_DMA 1040 1040 bool 1041 1041 select ZONE_DMA if GENERIC_ISA_DMA_SUPPORT_BROKEN=n ··· 1151 1145 config SOC_PNX8335 1152 1146 bool 1153 1147 select SOC_PNX833X 1148 + 1149 + config MIPS_SPRAM 1150 + bool 1154 1151 1155 1152 config SWAP_IO_SPACE 1156 1153 bool ··· 1313 1304 specific type of processor in your system, choose those that one 1314 1305 otherwise CPU_MIPS32_R1 is a safe bet for any MIPS32 system. 1315 1306 1307 + config CPU_MIPS32_R6 1308 + bool "MIPS32 Release 6 (EXPERIMENTAL)" 1309 + depends on SYS_HAS_CPU_MIPS32_R6 1310 + select CPU_HAS_PREFETCH 1311 + select CPU_SUPPORTS_32BIT_KERNEL 1312 + select CPU_SUPPORTS_HIGHMEM 1313 + select CPU_SUPPORTS_MSA 1314 + select GENERIC_CSUM 1315 + select HAVE_KVM 1316 + select MIPS_O32_FP64_SUPPORT 1317 + help 1318 + Choose this option to build a kernel for release 6 or later of the 1319 + MIPS32 architecture. New MIPS processors, starting with the Warrior 1320 + family, are based on a MIPS32r6 processor. If you own an older 1321 + processor, you probably need to select MIPS32r1 or MIPS32r2 instead. 1322 + 1316 1323 config CPU_MIPS64_R1 1317 1324 bool "MIPS64 Release 1" 1318 1325 depends on SYS_HAS_CPU_MIPS64_R1 ··· 1363 1338 MIPS processor are based on a MIPS64 processor. If you know the 1364 1339 specific type of processor in your system, choose those that one 1365 1340 otherwise CPU_MIPS64_R1 is a safe bet for any MIPS64 system. 1341 + 1342 + config CPU_MIPS64_R6 1343 + bool "MIPS64 Release 6 (EXPERIMENTAL)" 1344 + depends on SYS_HAS_CPU_MIPS64_R6 1345 + select CPU_HAS_PREFETCH 1346 + select CPU_SUPPORTS_32BIT_KERNEL 1347 + select CPU_SUPPORTS_64BIT_KERNEL 1348 + select CPU_SUPPORTS_HIGHMEM 1349 + select CPU_SUPPORTS_MSA 1350 + select GENERIC_CSUM 1351 + help 1352 + Choose this option to build a kernel for release 6 or later of the 1353 + MIPS64 architecture. New MIPS processors, starting with the Warrior 1354 + family, are based on a MIPS64r6 processor. If you own an older 1355 + processor, you probably need to select MIPS64r1 or MIPS64r2 instead. 1366 1356 1367 1357 config CPU_R3000 1368 1358 bool "R3000" ··· 1579 1539 config CPU_MIPS32_3_5_FEATURES 1580 1540 bool "MIPS32 Release 3.5 Features" 1581 1541 depends on SYS_HAS_CPU_MIPS32_R3_5 1582 - depends on CPU_MIPS32_R2 1542 + depends on CPU_MIPS32_R2 || CPU_MIPS32_R6 1583 1543 help 1584 1544 Choose this option to build a kernel for release 2 or later of the 1585 1545 MIPS32 architecture including features from the 3.5 release such as ··· 1699 1659 config SYS_HAS_CPU_MIPS32_R3_5 1700 1660 bool 1701 1661 1662 + config SYS_HAS_CPU_MIPS32_R6 1663 + bool 1664 + 1702 1665 config SYS_HAS_CPU_MIPS64_R1 1703 1666 bool 1704 1667 1705 1668 config SYS_HAS_CPU_MIPS64_R2 1669 + bool 1670 + 1671 + config SYS_HAS_CPU_MIPS64_R6 1706 1672 bool 1707 1673 1708 1674 config SYS_HAS_CPU_R3000 ··· 1810 1764 # 1811 1765 config CPU_MIPS32 1812 1766 bool 1813 - default y if CPU_MIPS32_R1 || CPU_MIPS32_R2 1767 + default y if CPU_MIPS32_R1 || CPU_MIPS32_R2 || CPU_MIPS32_R6 1814 1768 1815 1769 config CPU_MIPS64 1816 1770 bool 1817 - default y if CPU_MIPS64_R1 || CPU_MIPS64_R2 1771 + default y if CPU_MIPS64_R1 || CPU_MIPS64_R2 || CPU_MIPS64_R6 1818 1772 1819 1773 # 1820 1774 # These two indicate the revision of the architecture, either Release 1 or Release 2 ··· 1826 1780 config CPU_MIPSR2 1827 1781 bool 1828 1782 default y if CPU_MIPS32_R2 || CPU_MIPS64_R2 || CPU_CAVIUM_OCTEON 1783 + select MIPS_SPRAM 1784 + 1785 + config CPU_MIPSR6 1786 + bool 1787 + default y if CPU_MIPS32_R6 || CPU_MIPS64_R6 1788 + select MIPS_SPRAM 1829 1789 1830 1790 config EVA 1831 1791 bool ··· 2065 2013 default y 2066 2014 depends on MIPS_MT_SMP 2067 2015 2016 + config MIPSR2_TO_R6_EMULATOR 2017 + bool "MIPS R2-to-R6 emulator" 2018 + depends on CPU_MIPSR6 && !SMP 2019 + default y 2020 + help 2021 + Choose this option if you want to run non-R6 MIPS userland code. 2022 + Even if you say 'Y' here, the emulator will still be disabled by 2023 + default. You can enable it using the 'mipsr2emul' kernel option. 2024 + The only reason this is a build-time option is to save ~14K from the 2025 + final kernel image. 2026 + comment "MIPS R2-to-R6 emulator is only available for UP kernels" 2027 + depends on SMP && CPU_MIPSR6 2028 + 2068 2029 config MIPS_VPE_LOADER 2069 2030 bool "VPE loader support." 2070 2031 depends on SYS_SUPPORTS_MULTITHREADING && MODULES ··· 2213 2148 here. 2214 2149 2215 2150 config CPU_MICROMIPS 2216 - depends on 32BIT && SYS_SUPPORTS_MICROMIPS 2151 + depends on 32BIT && SYS_SUPPORTS_MICROMIPS && !CPU_MIPSR6 2217 2152 bool "microMIPS" 2218 2153 help 2219 2154 When this option is enabled the kernel will be built using the
-13
arch/mips/Kconfig.debug
··· 122 122 help 123 123 Add several files to the debugfs to test spinlock speed. 124 124 125 - config FP32XX_HYBRID_FPRS 126 - bool "Run FP32 & FPXX code with hybrid FPRs" 127 - depends on MIPS_O32_FP64_SUPPORT 128 - help 129 - The hybrid FPR scheme is normally used only when a program needs to 130 - execute a mix of FP32 & FP64A code, since the trapping & emulation 131 - that it entails is expensive. When enabled, this option will lead 132 - to the kernel running programs which use the FP32 & FPXX FP ABIs 133 - using the hybrid FPR scheme, which can be useful for debugging 134 - purposes. 135 - 136 - If unsure, say N. 137 - 138 125 endmenu
+37 -18
arch/mips/Makefile
··· 122 122 cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(shell $(CC) -dumpmachine |grep -q 'mips.*el-.*' && echo -EB $(undef-all) $(predef-be)) 123 123 cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += $(shell $(CC) -dumpmachine |grep -q 'mips.*el-.*' || echo -EL $(undef-all) $(predef-le)) 124 124 125 - # For smartmips configurations, there are hundreds of warnings due to ISA overrides 126 - # in assembly and header files. smartmips is only supported for MIPS32r1 onwards 127 - # and there is no support for 64-bit. Various '.set mips2' or '.set mips3' or 128 - # similar directives in the kernel will spam the build logs with the following warnings: 129 - # Warning: the `smartmips' extension requires MIPS32 revision 1 or greater 130 - # or 131 - # Warning: the 64-bit MIPS architecture does not support the `smartmips' extension 132 - # Pass -Wa,--no-warn to disable all assembler warnings until the kernel code has 133 - # been fixed properly. 134 - cflags-$(CONFIG_CPU_HAS_SMARTMIPS) += $(call cc-option,-msmartmips) -Wa,--no-warn 135 - cflags-$(CONFIG_CPU_MICROMIPS) += $(call cc-option,-mmicromips) 136 - 137 125 cflags-$(CONFIG_SB1XXX_CORELIS) += $(call cc-option,-mno-sched-prolog) \ 138 126 -fno-omit-frame-pointer 139 - 140 - ifeq ($(CONFIG_CPU_HAS_MSA),y) 141 - toolchain-msa := $(call cc-option-yn,-mhard-float -mfp64 -Wa$(comma)-mmsa) 142 - cflags-$(toolchain-msa) += -DTOOLCHAIN_SUPPORTS_MSA 143 - endif 144 - 145 127 # 146 128 # CPU-dependent compiler/assembler options for optimization. 147 129 # ··· 138 156 -Wa,-mips32 -Wa,--trap 139 157 cflags-$(CONFIG_CPU_MIPS32_R2) += $(call cc-option,-march=mips32r2,-mips32r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS32) \ 140 158 -Wa,-mips32r2 -Wa,--trap 159 + cflags-$(CONFIG_CPU_MIPS32_R6) += -march=mips32r6 -Wa,--trap 141 160 cflags-$(CONFIG_CPU_MIPS64_R1) += $(call cc-option,-march=mips64,-mips64 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \ 142 161 -Wa,-mips64 -Wa,--trap 143 162 cflags-$(CONFIG_CPU_MIPS64_R2) += $(call cc-option,-march=mips64r2,-mips64r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \ 144 163 -Wa,-mips64r2 -Wa,--trap 164 + cflags-$(CONFIG_CPU_MIPS64_R6) += -march=mips64r6 -Wa,--trap 145 165 cflags-$(CONFIG_CPU_R5000) += -march=r5000 -Wa,--trap 146 166 cflags-$(CONFIG_CPU_R5432) += $(call cc-option,-march=r5400,-march=r5000) \ 147 167 -Wa,--trap ··· 166 182 endif 167 183 cflags-$(CONFIG_CAVIUM_CN63XXP1) += -Wa,-mfix-cn63xxp1 168 184 cflags-$(CONFIG_CPU_BMIPS) += -march=mips32 -Wa,-mips32 -Wa,--trap 185 + # 186 + # binutils from v2.25 on and gcc starting from v4.9.0 treat -march=loongson3a 187 + # as MIPS64 R1; older versions as just R1. This leaves the possibility open 188 + # that GCC might generate R2 code for -march=loongson3a which then is rejected 189 + # by GAS. The cc-option can't probe for this behaviour so -march=loongson3a 190 + # can't easily be used safely within the kbuild framework. 191 + # 192 + cflags-$(CONFIG_CPU_LOONGSON3) += \ 193 + $(call cc-option,-march=mips64r2,-mips64r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \ 194 + -Wa,-mips64r2 -Wa,--trap 169 195 170 196 cflags-$(CONFIG_CPU_R4000_WORKAROUNDS) += $(call cc-option,-mfix-r4000,) 171 197 cflags-$(CONFIG_CPU_R4400_WORKAROUNDS) += $(call cc-option,-mfix-r4400,) ··· 186 192 KBUILD_AFLAGS_MODULE += -msb1-pass1-workarounds 187 193 KBUILD_CFLAGS_MODULE += -msb1-pass1-workarounds 188 194 endif 195 + endif 196 + 197 + # For smartmips configurations, there are hundreds of warnings due to ISA overrides 198 + # in assembly and header files. smartmips is only supported for MIPS32r1 onwards 199 + # and there is no support for 64-bit. Various '.set mips2' or '.set mips3' or 200 + # similar directives in the kernel will spam the build logs with the following warnings: 201 + # Warning: the `smartmips' extension requires MIPS32 revision 1 or greater 202 + # or 203 + # Warning: the 64-bit MIPS architecture does not support the `smartmips' extension 204 + # Pass -Wa,--no-warn to disable all assembler warnings until the kernel code has 205 + # been fixed properly. 206 + mips-cflags := "$(cflags-y)" 207 + cflags-$(CONFIG_CPU_HAS_SMARTMIPS) += $(call cc-option,$(mips-cflags),-msmartmips) -Wa,--no-warn 208 + cflags-$(CONFIG_CPU_MICROMIPS) += $(call cc-option,$(mips-cflags),-mmicromips) 209 + ifeq ($(CONFIG_CPU_HAS_MSA),y) 210 + toolchain-msa := $(call cc-option-yn,-$(mips-cflags),mhard-float -mfp64 -Wa$(comma)-mmsa) 211 + cflags-$(toolchain-msa) += -DTOOLCHAIN_SUPPORTS_MSA 189 212 endif 190 213 191 214 # ··· 298 287 boot-y += vmlinux.srec 299 288 ifeq ($(shell expr $(load-y) \< 0xffffffff80000000 2> /dev/null), 0) 300 289 boot-y += uImage 290 + boot-y += uImage.bin 291 + boot-y += uImage.bz2 301 292 boot-y += uImage.gz 293 + boot-y += uImage.lzma 294 + boot-y += uImage.lzo 302 295 endif 303 296 304 297 # compressed boot image targets (arch/mips/boot/compressed/) ··· 401 386 echo ' vmlinuz.bin - Raw binary zboot image' 402 387 echo ' vmlinuz.srec - SREC zboot image' 403 388 echo ' uImage - U-Boot image' 389 + echo ' uImage.bin - U-Boot image (uncompressed)' 390 + echo ' uImage.bz2 - U-Boot image (bz2)' 404 391 echo ' uImage.gz - U-Boot image (gzip)' 392 + echo ' uImage.lzma - U-Boot image (lzma)' 393 + echo ' uImage.lzo - U-Boot image (lzo)' 405 394 echo ' dtbs - Device-tree blobs for enabled boards' 406 395 echo 407 396 echo ' These will be default as appropriate for a configured platform.'
+22 -5
arch/mips/alchemy/common/clock.c
··· 127 127 t = 396000000; 128 128 else { 129 129 t = alchemy_rdsys(AU1000_SYS_CPUPLL) & 0x7f; 130 + if (alchemy_get_cputype() < ALCHEMY_CPU_AU1300) 131 + t &= 0x3f; 130 132 t *= parent_rate; 131 133 } 132 134 133 135 return t; 136 + } 137 + 138 + void __init alchemy_set_lpj(void) 139 + { 140 + preset_lpj = alchemy_clk_cpu_recalc(NULL, ALCHEMY_ROOTCLK_RATE); 141 + preset_lpj /= 2 * HZ; 134 142 } 135 143 136 144 static struct clk_ops alchemy_clkops_cpu = { ··· 323 315 324 316 /* lrclk: external synchronous static bus clock ***********************/ 325 317 326 - static struct clk __init *alchemy_clk_setup_lrclk(const char *pn) 318 + static struct clk __init *alchemy_clk_setup_lrclk(const char *pn, int t) 327 319 { 328 - /* MEM_STCFG0[15:13] = divisor. 320 + /* Au1000, Au1500: MEM_STCFG0[11]: If bit is set, lrclk=pclk/5, 321 + * otherwise lrclk=pclk/4. 322 + * All other variants: MEM_STCFG0[15:13] = divisor. 329 323 * L/RCLK = periph_clk / (divisor + 1) 330 324 * On Au1000, Au1500, Au1100 it's called LCLK, 331 325 * on later models it's called RCLK, but it's the same thing. 332 326 */ 333 327 struct clk *c; 334 - unsigned long v = alchemy_rdsmem(AU1000_MEM_STCFG0) >> 13; 328 + unsigned long v = alchemy_rdsmem(AU1000_MEM_STCFG0); 335 329 336 - v = (v & 7) + 1; 330 + switch (t) { 331 + case ALCHEMY_CPU_AU1000: 332 + case ALCHEMY_CPU_AU1500: 333 + v = 4 + ((v >> 11) & 1); 334 + break; 335 + default: /* all other models */ 336 + v = ((v >> 13) & 7) + 1; 337 + } 337 338 c = clk_register_fixed_factor(NULL, ALCHEMY_LR_CLK, 338 339 pn, 0, 1, v); 339 340 if (!IS_ERR(c)) ··· 1083 1066 ERRCK(c) 1084 1067 1085 1068 /* L/RCLK: external static bus clock for synchronous mode */ 1086 - c = alchemy_clk_setup_lrclk(ALCHEMY_PERIPH_CLK); 1069 + c = alchemy_clk_setup_lrclk(ALCHEMY_PERIPH_CLK, ctype); 1087 1070 ERRCK(c) 1088 1071 1089 1072 /* Frequency dividers 0-5 */
+3 -1
arch/mips/alchemy/common/setup.c
··· 34 34 #include <au1000.h> 35 35 36 36 extern void __init board_setup(void); 37 - extern void set_cpuspec(void); 37 + extern void __init alchemy_set_lpj(void); 38 38 39 39 void __init plat_mem_setup(void) 40 40 { 41 + alchemy_set_lpj(); 42 + 41 43 if (au1xxx_cpu_needs_config_od()) 42 44 /* Various early Au1xx0 errata corrected by this */ 43 45 set_c0_config(1 << 19); /* Set Config[OD] */
+1 -1
arch/mips/bcm3384/irq.c
··· 180 180 181 181 static struct of_device_id of_irq_ids[] __initdata = { 182 182 { .compatible = "mti,cpu-interrupt-controller", 183 - .data = mips_cpu_intc_init }, 183 + .data = mips_cpu_irq_of_init }, 184 184 { .compatible = "brcm,bcm3384-intc", 185 185 .data = intc_of_init }, 186 186 {},
+47 -2
arch/mips/boot/Makefile
··· 23 23 24 24 hostprogs-y := elf2ecoff 25 25 26 + suffix-y := bin 27 + suffix-$(CONFIG_KERNEL_BZIP2) := bz2 28 + suffix-$(CONFIG_KERNEL_GZIP) := gz 29 + suffix-$(CONFIG_KERNEL_LZMA) := lzma 30 + suffix-$(CONFIG_KERNEL_LZO) := lzo 31 + 26 32 targets := vmlinux.ecoff 27 33 quiet_cmd_ecoff = ECOFF $@ 28 34 cmd_ecoff = $(obj)/elf2ecoff $(VMLINUX) $@ $(e2eflag) ··· 50 44 UIMAGE_LOADADDR = $(VMLINUX_LOAD_ADDRESS) 51 45 UIMAGE_ENTRYADDR = $(VMLINUX_ENTRY_ADDRESS) 52 46 47 + # 48 + # Compressed vmlinux images 49 + # 50 + 51 + extra-y += vmlinux.bin.bz2 52 + extra-y += vmlinux.bin.gz 53 + extra-y += vmlinux.bin.lzma 54 + extra-y += vmlinux.bin.lzo 55 + 56 + $(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin FORCE 57 + $(call if_changed,bzip2) 58 + 53 59 $(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE 54 60 $(call if_changed,gzip) 55 61 62 + $(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE 63 + $(call if_changed,lzma) 64 + 65 + $(obj)/vmlinux.bin.lzo: $(obj)/vmlinux.bin FORCE 66 + $(call if_changed,lzo) 67 + 68 + # 69 + # Compressed u-boot images 70 + # 71 + 72 + targets += uImage 73 + targets += uImage.bin 74 + targets += uImage.bz2 56 75 targets += uImage.gz 76 + targets += uImage.lzma 77 + targets += uImage.lzo 78 + 79 + $(obj)/uImage.bin: $(obj)/vmlinux.bin FORCE 80 + $(call if_changed,uimage,none) 81 + 82 + $(obj)/uImage.bz2: $(obj)/vmlinux.bin.bz2 FORCE 83 + $(call if_changed,uimage,bzip2) 84 + 57 85 $(obj)/uImage.gz: $(obj)/vmlinux.bin.gz FORCE 58 86 $(call if_changed,uimage,gzip) 59 87 60 - targets += uImage 61 - $(obj)/uImage: $(obj)/uImage.gz FORCE 88 + $(obj)/uImage.lzma: $(obj)/vmlinux.bin.lzma FORCE 89 + $(call if_changed,uimage,lzma) 90 + 91 + $(obj)/uImage.lzo: $(obj)/vmlinux.bin.lzo FORCE 92 + $(call if_changed,uimage,lzo) 93 + 94 + $(obj)/uImage: $(obj)/uImage.$(suffix-y) 62 95 @ln -sf $(notdir $<) $@ 63 96 @echo ' Image $@ is ready'
-4
arch/mips/boot/elf2ecoff.c
··· 268 268 Elf32_Ehdr ex; 269 269 Elf32_Phdr *ph; 270 270 Elf32_Shdr *sh; 271 - char *shstrtab; 272 271 int i, pad; 273 272 struct sect text, data, bss; 274 273 struct filehdr efh; ··· 335 336 "sh"); 336 337 if (must_convert_endian) 337 338 convert_elf_shdrs(sh, ex.e_shnum); 338 - /* Read in the section string table. */ 339 - shstrtab = saveRead(infile, sh[ex.e_shstrndx].sh_offset, 340 - sh[ex.e_shstrndx].sh_size, "shstrtab"); 341 339 342 340 /* Figure out if we can cram the program header into an ECOFF 343 341 header... Basically, we can't handle anything but loadable
+10 -1
arch/mips/cavium-octeon/csrc-octeon.c
··· 18 18 #include <asm/octeon/octeon.h> 19 19 #include <asm/octeon/cvmx-ipd-defs.h> 20 20 #include <asm/octeon/cvmx-mio-defs.h> 21 - 21 + #include <asm/octeon/cvmx-rst-defs.h> 22 22 23 23 static u64 f; 24 24 static u64 rdiv; ··· 39 39 40 40 if (current_cpu_type() == CPU_CAVIUM_OCTEON2) { 41 41 union cvmx_mio_rst_boot rst_boot; 42 + 42 43 rst_boot.u64 = cvmx_read_csr(CVMX_MIO_RST_BOOT); 43 44 rdiv = rst_boot.s.c_mul; /* CPU clock */ 44 45 sdiv = rst_boot.s.pnr_mul; /* I/O clock */ 45 46 f = (0x8000000000000000ull / sdiv) * 2; 47 + } else if (current_cpu_type() == CPU_CAVIUM_OCTEON3) { 48 + union cvmx_rst_boot rst_boot; 49 + 50 + rst_boot.u64 = cvmx_read_csr(CVMX_RST_BOOT); 51 + rdiv = rst_boot.s.c_mul; /* CPU clock */ 52 + sdiv = rst_boot.s.pnr_mul; /* I/O clock */ 53 + f = (0x8000000000000000ull / sdiv) * 2; 46 54 } 55 + 47 56 } 48 57 49 58 /*
+2 -2
arch/mips/cavium-octeon/dma-octeon.c
··· 276 276 continue; 277 277 278 278 /* These addresses map low for PCI. */ 279 - if (e->addr > 0x410000000ull && !OCTEON_IS_MODEL(OCTEON_CN6XXX)) 279 + if (e->addr > 0x410000000ull && !OCTEON_IS_OCTEON2()) 280 280 continue; 281 281 282 282 addr_size += e->size; ··· 308 308 #endif 309 309 #ifdef CONFIG_USB_OCTEON_OHCI 310 310 /* OCTEON II ohci is only 32-bit. */ 311 - if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && max_addr >= 0x100000000ul) 311 + if (OCTEON_IS_OCTEON2() && max_addr >= 0x100000000ul) 312 312 swiotlbsize = 64 * (1<<20); 313 313 #endif 314 314 swiotlb_nslabs = swiotlbsize >> IO_TLB_SHIFT;
+1 -1
arch/mips/cavium-octeon/executive/cvmx-helper-board.c
··· 767 767 break; 768 768 } 769 769 /* Most boards except NIC10e use a 12MHz crystal */ 770 - if (OCTEON_IS_MODEL(OCTEON_FAM_2)) 770 + if (OCTEON_IS_OCTEON2()) 771 771 return USB_CLOCK_TYPE_CRYSTAL_12; 772 772 return USB_CLOCK_TYPE_REF_48; 773 773 }
+823 -271
arch/mips/cavium-octeon/octeon-irq.c
··· 3 3 * License. See the file "COPYING" in the main directory of this archive 4 4 * for more details. 5 5 * 6 - * Copyright (C) 2004-2012 Cavium, Inc. 6 + * Copyright (C) 2004-2014 Cavium, Inc. 7 7 */ 8 8 9 + #include <linux/of_address.h> 9 10 #include <linux/interrupt.h> 10 11 #include <linux/irqdomain.h> 11 12 #include <linux/bitops.h> 13 + #include <linux/of_irq.h> 12 14 #include <linux/percpu.h> 13 15 #include <linux/slab.h> 14 16 #include <linux/irq.h> ··· 24 22 static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu1_en_mirror); 25 23 static DEFINE_PER_CPU(raw_spinlock_t, octeon_irq_ciu_spinlock); 26 24 25 + struct octeon_irq_ciu_domain_data { 26 + int num_sum; /* number of sum registers (2 or 3). */ 27 + }; 28 + 27 29 static __read_mostly u8 octeon_irq_ciu_to_irq[8][64]; 28 30 29 - union octeon_ciu_chip_data { 30 - void *p; 31 - unsigned long l; 32 - struct { 33 - unsigned long line:6; 34 - unsigned long bit:6; 35 - unsigned long gpio_line:6; 36 - } s; 31 + struct octeon_ciu_chip_data { 32 + union { 33 + struct { /* only used for ciu3 */ 34 + u64 ciu3_addr; 35 + unsigned int intsn; 36 + }; 37 + struct { /* only used for ciu/ciu2 */ 38 + u8 line; 39 + u8 bit; 40 + u8 gpio_line; 41 + }; 42 + }; 43 + int current_cpu; /* Next CPU expected to take this irq */ 37 44 }; 38 45 39 46 struct octeon_core_chip_data { ··· 56 45 57 46 static struct octeon_core_chip_data octeon_irq_core_chip_data[MIPS_CORE_IRQ_LINES]; 58 47 59 - static void octeon_irq_set_ciu_mapping(int irq, int line, int bit, int gpio_line, 60 - struct irq_chip *chip, 61 - irq_flow_handler_t handler) 48 + static int octeon_irq_set_ciu_mapping(int irq, int line, int bit, int gpio_line, 49 + struct irq_chip *chip, 50 + irq_flow_handler_t handler) 62 51 { 63 - union octeon_ciu_chip_data cd; 52 + struct octeon_ciu_chip_data *cd; 53 + 54 + cd = kzalloc(sizeof(*cd), GFP_KERNEL); 55 + if (!cd) 56 + return -ENOMEM; 64 57 65 58 irq_set_chip_and_handler(irq, chip, handler); 66 59 67 - cd.l = 0; 68 - cd.s.line = line; 69 - cd.s.bit = bit; 70 - cd.s.gpio_line = gpio_line; 60 + cd->line = line; 61 + cd->bit = bit; 62 + cd->gpio_line = gpio_line; 71 63 72 - irq_set_chip_data(irq, cd.p); 64 + irq_set_chip_data(irq, cd); 73 65 octeon_irq_ciu_to_irq[line][bit] = irq; 66 + return 0; 74 67 } 75 68 76 - static void octeon_irq_force_ciu_mapping(struct irq_domain *domain, 77 - int irq, int line, int bit) 69 + static void octeon_irq_free_cd(struct irq_domain *d, unsigned int irq) 78 70 { 79 - irq_domain_associate(domain, irq, line << 6 | bit); 71 + struct irq_data *data = irq_get_irq_data(irq); 72 + struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(data); 73 + 74 + irq_set_chip_data(irq, NULL); 75 + kfree(cd); 76 + } 77 + 78 + static int octeon_irq_force_ciu_mapping(struct irq_domain *domain, 79 + int irq, int line, int bit) 80 + { 81 + return irq_domain_associate(domain, irq, line << 6 | bit); 80 82 } 81 83 82 84 static int octeon_coreid_for_cpu(int cpu) ··· 226 202 #ifdef CONFIG_SMP 227 203 int cpu; 228 204 int weight = cpumask_weight(data->affinity); 205 + struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(data); 229 206 230 207 if (weight > 1) { 231 - cpu = smp_processor_id(); 208 + cpu = cd->current_cpu; 232 209 for (;;) { 233 210 cpu = cpumask_next(cpu, data->affinity); 234 211 if (cpu >= nr_cpu_ids) { ··· 244 219 } else { 245 220 cpu = smp_processor_id(); 246 221 } 222 + cd->current_cpu = cpu; 247 223 return cpu; 248 224 #else 249 225 return smp_processor_id(); ··· 257 231 int coreid = octeon_coreid_for_cpu(cpu); 258 232 unsigned long *pen; 259 233 unsigned long flags; 260 - union octeon_ciu_chip_data cd; 234 + struct octeon_ciu_chip_data *cd; 261 235 raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); 262 236 263 - cd.p = irq_data_get_irq_chip_data(data); 237 + cd = irq_data_get_irq_chip_data(data); 264 238 265 239 raw_spin_lock_irqsave(lock, flags); 266 - if (cd.s.line == 0) { 240 + if (cd->line == 0) { 267 241 pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); 268 - __set_bit(cd.s.bit, pen); 242 + __set_bit(cd->bit, pen); 269 243 /* 270 244 * Must be visible to octeon_irq_ip{2,3}_ciu() before 271 245 * enabling the irq. ··· 274 248 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); 275 249 } else { 276 250 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); 277 - __set_bit(cd.s.bit, pen); 251 + __set_bit(cd->bit, pen); 278 252 /* 279 253 * Must be visible to octeon_irq_ip{2,3}_ciu() before 280 254 * enabling the irq. ··· 289 263 { 290 264 unsigned long *pen; 291 265 unsigned long flags; 292 - union octeon_ciu_chip_data cd; 266 + struct octeon_ciu_chip_data *cd; 293 267 raw_spinlock_t *lock = this_cpu_ptr(&octeon_irq_ciu_spinlock); 294 268 295 - cd.p = irq_data_get_irq_chip_data(data); 269 + cd = irq_data_get_irq_chip_data(data); 296 270 297 271 raw_spin_lock_irqsave(lock, flags); 298 - if (cd.s.line == 0) { 272 + if (cd->line == 0) { 299 273 pen = this_cpu_ptr(&octeon_irq_ciu0_en_mirror); 300 - __set_bit(cd.s.bit, pen); 274 + __set_bit(cd->bit, pen); 301 275 /* 302 276 * Must be visible to octeon_irq_ip{2,3}_ciu() before 303 277 * enabling the irq. ··· 306 280 cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen); 307 281 } else { 308 282 pen = this_cpu_ptr(&octeon_irq_ciu1_en_mirror); 309 - __set_bit(cd.s.bit, pen); 283 + __set_bit(cd->bit, pen); 310 284 /* 311 285 * Must be visible to octeon_irq_ip{2,3}_ciu() before 312 286 * enabling the irq. ··· 321 295 { 322 296 unsigned long *pen; 323 297 unsigned long flags; 324 - union octeon_ciu_chip_data cd; 298 + struct octeon_ciu_chip_data *cd; 325 299 raw_spinlock_t *lock = this_cpu_ptr(&octeon_irq_ciu_spinlock); 326 300 327 - cd.p = irq_data_get_irq_chip_data(data); 301 + cd = irq_data_get_irq_chip_data(data); 328 302 329 303 raw_spin_lock_irqsave(lock, flags); 330 - if (cd.s.line == 0) { 304 + if (cd->line == 0) { 331 305 pen = this_cpu_ptr(&octeon_irq_ciu0_en_mirror); 332 - __clear_bit(cd.s.bit, pen); 306 + __clear_bit(cd->bit, pen); 333 307 /* 334 308 * Must be visible to octeon_irq_ip{2,3}_ciu() before 335 309 * enabling the irq. ··· 338 312 cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen); 339 313 } else { 340 314 pen = this_cpu_ptr(&octeon_irq_ciu1_en_mirror); 341 - __clear_bit(cd.s.bit, pen); 315 + __clear_bit(cd->bit, pen); 342 316 /* 343 317 * Must be visible to octeon_irq_ip{2,3}_ciu() before 344 318 * enabling the irq. ··· 354 328 unsigned long flags; 355 329 unsigned long *pen; 356 330 int cpu; 357 - union octeon_ciu_chip_data cd; 331 + struct octeon_ciu_chip_data *cd; 358 332 raw_spinlock_t *lock; 359 333 360 - cd.p = irq_data_get_irq_chip_data(data); 334 + cd = irq_data_get_irq_chip_data(data); 361 335 362 336 for_each_online_cpu(cpu) { 363 337 int coreid = octeon_coreid_for_cpu(cpu); 364 338 lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); 365 - if (cd.s.line == 0) 339 + if (cd->line == 0) 366 340 pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); 367 341 else 368 342 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); 369 343 370 344 raw_spin_lock_irqsave(lock, flags); 371 - __clear_bit(cd.s.bit, pen); 345 + __clear_bit(cd->bit, pen); 372 346 /* 373 347 * Must be visible to octeon_irq_ip{2,3}_ciu() before 374 348 * enabling the irq. 375 349 */ 376 350 wmb(); 377 - if (cd.s.line == 0) 351 + if (cd->line == 0) 378 352 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); 379 353 else 380 354 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); ··· 387 361 unsigned long flags; 388 362 unsigned long *pen; 389 363 int cpu; 390 - union octeon_ciu_chip_data cd; 364 + struct octeon_ciu_chip_data *cd; 391 365 raw_spinlock_t *lock; 392 366 393 - cd.p = irq_data_get_irq_chip_data(data); 367 + cd = irq_data_get_irq_chip_data(data); 394 368 395 369 for_each_online_cpu(cpu) { 396 370 int coreid = octeon_coreid_for_cpu(cpu); 397 371 lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); 398 - if (cd.s.line == 0) 372 + if (cd->line == 0) 399 373 pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); 400 374 else 401 375 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); 402 376 403 377 raw_spin_lock_irqsave(lock, flags); 404 - __set_bit(cd.s.bit, pen); 378 + __set_bit(cd->bit, pen); 405 379 /* 406 380 * Must be visible to octeon_irq_ip{2,3}_ciu() before 407 381 * enabling the irq. 408 382 */ 409 383 wmb(); 410 - if (cd.s.line == 0) 384 + if (cd->line == 0) 411 385 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); 412 386 else 413 387 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); ··· 423 397 { 424 398 u64 mask; 425 399 int cpu = next_cpu_for_irq(data); 426 - union octeon_ciu_chip_data cd; 400 + struct octeon_ciu_chip_data *cd; 427 401 428 - cd.p = irq_data_get_irq_chip_data(data); 429 - mask = 1ull << (cd.s.bit); 402 + cd = irq_data_get_irq_chip_data(data); 403 + mask = 1ull << (cd->bit); 430 404 431 405 /* 432 406 * Called under the desc lock, so these should never get out 433 407 * of sync. 434 408 */ 435 - if (cd.s.line == 0) { 409 + if (cd->line == 0) { 436 410 int index = octeon_coreid_for_cpu(cpu) * 2; 437 - set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); 411 + set_bit(cd->bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); 438 412 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); 439 413 } else { 440 414 int index = octeon_coreid_for_cpu(cpu) * 2 + 1; 441 - set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); 415 + set_bit(cd->bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); 442 416 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); 417 + } 418 + } 419 + 420 + /* 421 + * Enable the irq in the sum2 registers. 422 + */ 423 + static void octeon_irq_ciu_enable_sum2(struct irq_data *data) 424 + { 425 + u64 mask; 426 + int cpu = next_cpu_for_irq(data); 427 + int index = octeon_coreid_for_cpu(cpu); 428 + struct octeon_ciu_chip_data *cd; 429 + 430 + cd = irq_data_get_irq_chip_data(data); 431 + mask = 1ull << (cd->bit); 432 + 433 + cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1S(index), mask); 434 + } 435 + 436 + /* 437 + * Disable the irq in the sum2 registers. 438 + */ 439 + static void octeon_irq_ciu_disable_local_sum2(struct irq_data *data) 440 + { 441 + u64 mask; 442 + int cpu = next_cpu_for_irq(data); 443 + int index = octeon_coreid_for_cpu(cpu); 444 + struct octeon_ciu_chip_data *cd; 445 + 446 + cd = irq_data_get_irq_chip_data(data); 447 + mask = 1ull << (cd->bit); 448 + 449 + cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(index), mask); 450 + } 451 + 452 + static void octeon_irq_ciu_ack_sum2(struct irq_data *data) 453 + { 454 + u64 mask; 455 + int cpu = next_cpu_for_irq(data); 456 + int index = octeon_coreid_for_cpu(cpu); 457 + struct octeon_ciu_chip_data *cd; 458 + 459 + cd = irq_data_get_irq_chip_data(data); 460 + mask = 1ull << (cd->bit); 461 + 462 + cvmx_write_csr(CVMX_CIU_SUM2_PPX_IP4(index), mask); 463 + } 464 + 465 + static void octeon_irq_ciu_disable_all_sum2(struct irq_data *data) 466 + { 467 + int cpu; 468 + struct octeon_ciu_chip_data *cd; 469 + u64 mask; 470 + 471 + cd = irq_data_get_irq_chip_data(data); 472 + mask = 1ull << (cd->bit); 473 + 474 + for_each_online_cpu(cpu) { 475 + int coreid = octeon_coreid_for_cpu(cpu); 476 + 477 + cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(coreid), mask); 443 478 } 444 479 } 445 480 ··· 511 424 static void octeon_irq_ciu_enable_local_v2(struct irq_data *data) 512 425 { 513 426 u64 mask; 514 - union octeon_ciu_chip_data cd; 427 + struct octeon_ciu_chip_data *cd; 515 428 516 - cd.p = irq_data_get_irq_chip_data(data); 517 - mask = 1ull << (cd.s.bit); 429 + cd = irq_data_get_irq_chip_data(data); 430 + mask = 1ull << (cd->bit); 518 431 519 - if (cd.s.line == 0) { 432 + if (cd->line == 0) { 520 433 int index = cvmx_get_core_num() * 2; 521 - set_bit(cd.s.bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror)); 434 + set_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror)); 522 435 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); 523 436 } else { 524 437 int index = cvmx_get_core_num() * 2 + 1; 525 - set_bit(cd.s.bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror)); 438 + set_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror)); 526 439 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); 527 440 } 528 441 } ··· 530 443 static void octeon_irq_ciu_disable_local_v2(struct irq_data *data) 531 444 { 532 445 u64 mask; 533 - union octeon_ciu_chip_data cd; 446 + struct octeon_ciu_chip_data *cd; 534 447 535 - cd.p = irq_data_get_irq_chip_data(data); 536 - mask = 1ull << (cd.s.bit); 448 + cd = irq_data_get_irq_chip_data(data); 449 + mask = 1ull << (cd->bit); 537 450 538 - if (cd.s.line == 0) { 451 + if (cd->line == 0) { 539 452 int index = cvmx_get_core_num() * 2; 540 - clear_bit(cd.s.bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror)); 453 + clear_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror)); 541 454 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); 542 455 } else { 543 456 int index = cvmx_get_core_num() * 2 + 1; 544 - clear_bit(cd.s.bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror)); 457 + clear_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror)); 545 458 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); 546 459 } 547 460 } ··· 552 465 static void octeon_irq_ciu_ack(struct irq_data *data) 553 466 { 554 467 u64 mask; 555 - union octeon_ciu_chip_data cd; 468 + struct octeon_ciu_chip_data *cd; 556 469 557 - cd.p = irq_data_get_irq_chip_data(data); 558 - mask = 1ull << (cd.s.bit); 470 + cd = irq_data_get_irq_chip_data(data); 471 + mask = 1ull << (cd->bit); 559 472 560 - if (cd.s.line == 0) { 473 + if (cd->line == 0) { 561 474 int index = cvmx_get_core_num() * 2; 562 475 cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask); 563 476 } else { ··· 573 486 { 574 487 int cpu; 575 488 u64 mask; 576 - union octeon_ciu_chip_data cd; 489 + struct octeon_ciu_chip_data *cd; 577 490 578 - cd.p = irq_data_get_irq_chip_data(data); 579 - mask = 1ull << (cd.s.bit); 491 + cd = irq_data_get_irq_chip_data(data); 492 + mask = 1ull << (cd->bit); 580 493 581 - if (cd.s.line == 0) { 494 + if (cd->line == 0) { 582 495 for_each_online_cpu(cpu) { 583 496 int index = octeon_coreid_for_cpu(cpu) * 2; 584 - clear_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); 497 + clear_bit(cd->bit, 498 + &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); 585 499 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); 586 500 } 587 501 } else { 588 502 for_each_online_cpu(cpu) { 589 503 int index = octeon_coreid_for_cpu(cpu) * 2 + 1; 590 - clear_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); 504 + clear_bit(cd->bit, 505 + &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); 591 506 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); 592 507 } 593 508 } ··· 603 514 { 604 515 int cpu; 605 516 u64 mask; 606 - union octeon_ciu_chip_data cd; 517 + struct octeon_ciu_chip_data *cd; 607 518 608 - cd.p = irq_data_get_irq_chip_data(data); 609 - mask = 1ull << (cd.s.bit); 519 + cd = irq_data_get_irq_chip_data(data); 520 + mask = 1ull << (cd->bit); 610 521 611 - if (cd.s.line == 0) { 522 + if (cd->line == 0) { 612 523 for_each_online_cpu(cpu) { 613 524 int index = octeon_coreid_for_cpu(cpu) * 2; 614 - set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); 525 + set_bit(cd->bit, 526 + &per_cpu(octeon_irq_ciu0_en_mirror, cpu)); 615 527 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); 616 528 } 617 529 } else { 618 530 for_each_online_cpu(cpu) { 619 531 int index = octeon_coreid_for_cpu(cpu) * 2 + 1; 620 - set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); 532 + set_bit(cd->bit, 533 + &per_cpu(octeon_irq_ciu1_en_mirror, cpu)); 621 534 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); 622 535 } 623 536 } ··· 628 537 static void octeon_irq_gpio_setup(struct irq_data *data) 629 538 { 630 539 union cvmx_gpio_bit_cfgx cfg; 631 - union octeon_ciu_chip_data cd; 540 + struct octeon_ciu_chip_data *cd; 632 541 u32 t = irqd_get_trigger_type(data); 633 542 634 - cd.p = irq_data_get_irq_chip_data(data); 543 + cd = irq_data_get_irq_chip_data(data); 635 544 636 545 cfg.u64 = 0; 637 546 cfg.s.int_en = 1; ··· 642 551 cfg.s.fil_cnt = 7; 643 552 cfg.s.fil_sel = 3; 644 553 645 - cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), cfg.u64); 554 + cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), cfg.u64); 646 555 } 647 556 648 557 static void octeon_irq_ciu_enable_gpio_v2(struct irq_data *data) ··· 667 576 668 577 static void octeon_irq_ciu_disable_gpio_v2(struct irq_data *data) 669 578 { 670 - union octeon_ciu_chip_data cd; 579 + struct octeon_ciu_chip_data *cd; 671 580 672 - cd.p = irq_data_get_irq_chip_data(data); 673 - cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), 0); 581 + cd = irq_data_get_irq_chip_data(data); 582 + cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0); 674 583 675 584 octeon_irq_ciu_disable_all_v2(data); 676 585 } 677 586 678 587 static void octeon_irq_ciu_disable_gpio(struct irq_data *data) 679 588 { 680 - union octeon_ciu_chip_data cd; 589 + struct octeon_ciu_chip_data *cd; 681 590 682 - cd.p = irq_data_get_irq_chip_data(data); 683 - cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), 0); 591 + cd = irq_data_get_irq_chip_data(data); 592 + cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0); 684 593 685 594 octeon_irq_ciu_disable_all(data); 686 595 } 687 596 688 597 static void octeon_irq_ciu_gpio_ack(struct irq_data *data) 689 598 { 690 - union octeon_ciu_chip_data cd; 599 + struct octeon_ciu_chip_data *cd; 691 600 u64 mask; 692 601 693 - cd.p = irq_data_get_irq_chip_data(data); 694 - mask = 1ull << (cd.s.gpio_line); 602 + cd = irq_data_get_irq_chip_data(data); 603 + mask = 1ull << (cd->gpio_line); 695 604 696 605 cvmx_write_csr(CVMX_GPIO_INT_CLR, mask); 697 606 } 698 607 699 - static void octeon_irq_handle_gpio(unsigned int irq, struct irq_desc *desc) 608 + static void octeon_irq_handle_trigger(unsigned int irq, struct irq_desc *desc) 700 609 { 701 610 if (irq_get_trigger_type(irq) & IRQ_TYPE_EDGE_BOTH) 702 611 handle_edge_irq(irq, desc); ··· 735 644 int cpu; 736 645 bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); 737 646 unsigned long flags; 738 - union octeon_ciu_chip_data cd; 647 + struct octeon_ciu_chip_data *cd; 739 648 unsigned long *pen; 740 649 raw_spinlock_t *lock; 741 650 742 - cd.p = irq_data_get_irq_chip_data(data); 651 + cd = irq_data_get_irq_chip_data(data); 743 652 744 653 /* 745 654 * For non-v2 CIU, we will allow only single CPU affinity. ··· 759 668 lock = &per_cpu(octeon_irq_ciu_spinlock, cpu); 760 669 raw_spin_lock_irqsave(lock, flags); 761 670 762 - if (cd.s.line == 0) 671 + if (cd->line == 0) 763 672 pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); 764 673 else 765 674 pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu); 766 675 767 676 if (cpumask_test_cpu(cpu, dest) && enable_one) { 768 677 enable_one = 0; 769 - __set_bit(cd.s.bit, pen); 678 + __set_bit(cd->bit, pen); 770 679 } else { 771 - __clear_bit(cd.s.bit, pen); 680 + __clear_bit(cd->bit, pen); 772 681 } 773 682 /* 774 683 * Must be visible to octeon_irq_ip{2,3}_ciu() before ··· 776 685 */ 777 686 wmb(); 778 687 779 - if (cd.s.line == 0) 688 + if (cd->line == 0) 780 689 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen); 781 690 else 782 691 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen); ··· 797 706 int cpu; 798 707 bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); 799 708 u64 mask; 800 - union octeon_ciu_chip_data cd; 709 + struct octeon_ciu_chip_data *cd; 801 710 802 711 if (!enable_one) 803 712 return 0; 804 713 805 - cd.p = irq_data_get_irq_chip_data(data); 806 - mask = 1ull << cd.s.bit; 714 + cd = irq_data_get_irq_chip_data(data); 715 + mask = 1ull << cd->bit; 807 716 808 - if (cd.s.line == 0) { 717 + if (cd->line == 0) { 809 718 for_each_online_cpu(cpu) { 810 719 unsigned long *pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu); 811 720 int index = octeon_coreid_for_cpu(cpu) * 2; 812 721 if (cpumask_test_cpu(cpu, dest) && enable_one) { 813 722 enable_one = false; 814 - set_bit(cd.s.bit, pen); 723 + set_bit(cd->bit, pen); 815 724 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); 816 725 } else { 817 - clear_bit(cd.s.bit, pen); 726 + clear_bit(cd->bit, pen); 818 727 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); 819 728 } 820 729 } ··· 824 733 int index = octeon_coreid_for_cpu(cpu) * 2 + 1; 825 734 if (cpumask_test_cpu(cpu, dest) && enable_one) { 826 735 enable_one = false; 827 - set_bit(cd.s.bit, pen); 736 + set_bit(cd->bit, pen); 828 737 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); 829 738 } else { 830 - clear_bit(cd.s.bit, pen); 739 + clear_bit(cd->bit, pen); 831 740 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); 832 741 } 742 + } 743 + } 744 + return 0; 745 + } 746 + 747 + static int octeon_irq_ciu_set_affinity_sum2(struct irq_data *data, 748 + const struct cpumask *dest, 749 + bool force) 750 + { 751 + int cpu; 752 + bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); 753 + u64 mask; 754 + struct octeon_ciu_chip_data *cd; 755 + 756 + if (!enable_one) 757 + return 0; 758 + 759 + cd = irq_data_get_irq_chip_data(data); 760 + mask = 1ull << cd->bit; 761 + 762 + for_each_online_cpu(cpu) { 763 + int index = octeon_coreid_for_cpu(cpu); 764 + 765 + if (cpumask_test_cpu(cpu, dest) && enable_one) { 766 + enable_one = false; 767 + cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1S(index), mask); 768 + } else { 769 + cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(index), mask); 833 770 } 834 771 } 835 772 return 0; ··· 871 752 .name = "CIU", 872 753 .irq_enable = octeon_irq_ciu_enable_v2, 873 754 .irq_disable = octeon_irq_ciu_disable_all_v2, 755 + .irq_mask = octeon_irq_ciu_disable_local_v2, 756 + .irq_unmask = octeon_irq_ciu_enable_v2, 757 + #ifdef CONFIG_SMP 758 + .irq_set_affinity = octeon_irq_ciu_set_affinity_v2, 759 + .irq_cpu_offline = octeon_irq_cpu_offline_ciu, 760 + #endif 761 + }; 762 + 763 + static struct irq_chip octeon_irq_chip_ciu_v2_edge = { 764 + .name = "CIU", 765 + .irq_enable = octeon_irq_ciu_enable_v2, 766 + .irq_disable = octeon_irq_ciu_disable_all_v2, 874 767 .irq_ack = octeon_irq_ciu_ack, 875 768 .irq_mask = octeon_irq_ciu_disable_local_v2, 876 769 .irq_unmask = octeon_irq_ciu_enable_v2, ··· 892 761 #endif 893 762 }; 894 763 764 + /* 765 + * Newer octeon chips have support for lockless CIU operation. 766 + */ 767 + static struct irq_chip octeon_irq_chip_ciu_sum2 = { 768 + .name = "CIU", 769 + .irq_enable = octeon_irq_ciu_enable_sum2, 770 + .irq_disable = octeon_irq_ciu_disable_all_sum2, 771 + .irq_mask = octeon_irq_ciu_disable_local_sum2, 772 + .irq_unmask = octeon_irq_ciu_enable_sum2, 773 + #ifdef CONFIG_SMP 774 + .irq_set_affinity = octeon_irq_ciu_set_affinity_sum2, 775 + .irq_cpu_offline = octeon_irq_cpu_offline_ciu, 776 + #endif 777 + }; 778 + 779 + static struct irq_chip octeon_irq_chip_ciu_sum2_edge = { 780 + .name = "CIU", 781 + .irq_enable = octeon_irq_ciu_enable_sum2, 782 + .irq_disable = octeon_irq_ciu_disable_all_sum2, 783 + .irq_ack = octeon_irq_ciu_ack_sum2, 784 + .irq_mask = octeon_irq_ciu_disable_local_sum2, 785 + .irq_unmask = octeon_irq_ciu_enable_sum2, 786 + #ifdef CONFIG_SMP 787 + .irq_set_affinity = octeon_irq_ciu_set_affinity_sum2, 788 + .irq_cpu_offline = octeon_irq_cpu_offline_ciu, 789 + #endif 790 + }; 791 + 895 792 static struct irq_chip octeon_irq_chip_ciu = { 793 + .name = "CIU", 794 + .irq_enable = octeon_irq_ciu_enable, 795 + .irq_disable = octeon_irq_ciu_disable_all, 796 + .irq_mask = octeon_irq_ciu_disable_local, 797 + .irq_unmask = octeon_irq_ciu_enable, 798 + #ifdef CONFIG_SMP 799 + .irq_set_affinity = octeon_irq_ciu_set_affinity, 800 + .irq_cpu_offline = octeon_irq_cpu_offline_ciu, 801 + #endif 802 + }; 803 + 804 + static struct irq_chip octeon_irq_chip_ciu_edge = { 896 805 .name = "CIU", 897 806 .irq_enable = octeon_irq_ciu_enable, 898 807 .irq_disable = octeon_irq_ciu_disable_all, ··· 1141 970 unsigned int *out_type) 1142 971 { 1143 972 unsigned int ciu, bit; 973 + struct octeon_irq_ciu_domain_data *dd = d->host_data; 1144 974 1145 975 ciu = intspec[0]; 1146 976 bit = intspec[1]; 1147 977 1148 - if (ciu > 1 || bit > 63) 978 + if (ciu >= dd->num_sum || bit > 63) 1149 979 return -EINVAL; 1150 980 1151 981 *out_hwirq = (ciu << 6) | bit; ··· 1156 984 } 1157 985 1158 986 static struct irq_chip *octeon_irq_ciu_chip; 987 + static struct irq_chip *octeon_irq_ciu_chip_edge; 1159 988 static struct irq_chip *octeon_irq_gpio_chip; 1160 989 1161 990 static bool octeon_irq_virq_in_range(unsigned int virq) ··· 1172 999 static int octeon_irq_ciu_map(struct irq_domain *d, 1173 1000 unsigned int virq, irq_hw_number_t hw) 1174 1001 { 1002 + int rv; 1175 1003 unsigned int line = hw >> 6; 1176 1004 unsigned int bit = hw & 63; 1005 + struct octeon_irq_ciu_domain_data *dd = d->host_data; 1177 1006 1178 1007 if (!octeon_irq_virq_in_range(virq)) 1179 1008 return -EINVAL; ··· 1184 1009 if (line == 0 && bit >= 16 && bit <32) 1185 1010 return 0; 1186 1011 1187 - if (line > 1 || octeon_irq_ciu_to_irq[line][bit] != 0) 1012 + if (line >= dd->num_sum || octeon_irq_ciu_to_irq[line][bit] != 0) 1188 1013 return -EINVAL; 1189 1014 1190 - if (octeon_irq_ciu_is_edge(line, bit)) 1191 - octeon_irq_set_ciu_mapping(virq, line, bit, 0, 1192 - octeon_irq_ciu_chip, 1193 - handle_edge_irq); 1194 - else 1195 - octeon_irq_set_ciu_mapping(virq, line, bit, 0, 1196 - octeon_irq_ciu_chip, 1197 - handle_level_irq); 1198 - 1199 - return 0; 1015 + if (line == 2) { 1016 + if (octeon_irq_ciu_is_edge(line, bit)) 1017 + rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0, 1018 + &octeon_irq_chip_ciu_sum2_edge, 1019 + handle_edge_irq); 1020 + else 1021 + rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0, 1022 + &octeon_irq_chip_ciu_sum2, 1023 + handle_level_irq); 1024 + } else { 1025 + if (octeon_irq_ciu_is_edge(line, bit)) 1026 + rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0, 1027 + octeon_irq_ciu_chip_edge, 1028 + handle_edge_irq); 1029 + else 1030 + rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0, 1031 + octeon_irq_ciu_chip, 1032 + handle_level_irq); 1033 + } 1034 + return rv; 1200 1035 } 1201 1036 1202 - static int octeon_irq_gpio_map_common(struct irq_domain *d, 1203 - unsigned int virq, irq_hw_number_t hw, 1204 - int line_limit, struct irq_chip *chip) 1037 + static int octeon_irq_gpio_map(struct irq_domain *d, 1038 + unsigned int virq, irq_hw_number_t hw) 1205 1039 { 1206 1040 struct octeon_irq_gpio_domain_data *gpiod = d->host_data; 1207 1041 unsigned int line, bit; 1042 + int r; 1208 1043 1209 1044 if (!octeon_irq_virq_in_range(virq)) 1210 1045 return -EINVAL; 1211 1046 1212 1047 line = (hw + gpiod->base_hwirq) >> 6; 1213 1048 bit = (hw + gpiod->base_hwirq) & 63; 1214 - if (line > line_limit || octeon_irq_ciu_to_irq[line][bit] != 0) 1049 + if (line > ARRAY_SIZE(octeon_irq_ciu_to_irq) || 1050 + octeon_irq_ciu_to_irq[line][bit] != 0) 1215 1051 return -EINVAL; 1216 1052 1217 - octeon_irq_set_ciu_mapping(virq, line, bit, hw, 1218 - chip, octeon_irq_handle_gpio); 1219 - return 0; 1220 - } 1221 - 1222 - static int octeon_irq_gpio_map(struct irq_domain *d, 1223 - unsigned int virq, irq_hw_number_t hw) 1224 - { 1225 - return octeon_irq_gpio_map_common(d, virq, hw, 1, octeon_irq_gpio_chip); 1053 + r = octeon_irq_set_ciu_mapping(virq, line, bit, hw, 1054 + octeon_irq_gpio_chip, octeon_irq_handle_trigger); 1055 + return r; 1226 1056 } 1227 1057 1228 1058 static struct irq_domain_ops octeon_irq_domain_ciu_ops = { 1229 1059 .map = octeon_irq_ciu_map, 1060 + .unmap = octeon_irq_free_cd, 1230 1061 .xlate = octeon_irq_ciu_xlat, 1231 1062 }; 1232 1063 1233 1064 static struct irq_domain_ops octeon_irq_domain_gpio_ops = { 1234 1065 .map = octeon_irq_gpio_map, 1066 + .unmap = octeon_irq_free_cd, 1235 1067 .xlate = octeon_irq_gpio_xlat, 1236 1068 }; 1237 1069 ··· 1268 1086 if (likely(ciu_sum)) { 1269 1087 int bit = fls64(ciu_sum) - 1; 1270 1088 int irq = octeon_irq_ciu_to_irq[1][bit]; 1089 + if (likely(irq)) 1090 + do_IRQ(irq); 1091 + else 1092 + spurious_interrupt(); 1093 + } else { 1094 + spurious_interrupt(); 1095 + } 1096 + } 1097 + 1098 + static void octeon_irq_ip4_ciu(void) 1099 + { 1100 + int coreid = cvmx_get_core_num(); 1101 + u64 ciu_sum = cvmx_read_csr(CVMX_CIU_SUM2_PPX_IP4(coreid)); 1102 + u64 ciu_en = cvmx_read_csr(CVMX_CIU_EN2_PPX_IP4(coreid)); 1103 + 1104 + ciu_sum &= ciu_en; 1105 + if (likely(ciu_sum)) { 1106 + int bit = fls64(ciu_sum) - 1; 1107 + int irq = octeon_irq_ciu_to_irq[2][bit]; 1108 + 1271 1109 if (likely(irq)) 1272 1110 do_IRQ(irq); 1273 1111 else ··· 1378 1176 1379 1177 /* Enable the CIU lines */ 1380 1178 set_c0_status(STATUSF_IP3 | STATUSF_IP2); 1381 - clear_c0_status(STATUSF_IP4); 1179 + if (octeon_irq_use_ip4) 1180 + set_c0_status(STATUSF_IP4); 1181 + else 1182 + clear_c0_status(STATUSF_IP4); 1382 1183 } 1383 1184 1384 1185 static void octeon_irq_setup_secondary_ciu2(void) ··· 1397 1192 clear_c0_status(STATUSF_IP4); 1398 1193 } 1399 1194 1400 - static void __init octeon_irq_init_ciu(void) 1195 + static int __init octeon_irq_init_ciu( 1196 + struct device_node *ciu_node, struct device_node *parent) 1401 1197 { 1402 - unsigned int i; 1198 + unsigned int i, r; 1403 1199 struct irq_chip *chip; 1200 + struct irq_chip *chip_edge; 1404 1201 struct irq_chip *chip_mbox; 1405 1202 struct irq_chip *chip_wd; 1406 - struct device_node *gpio_node; 1407 - struct device_node *ciu_node; 1408 1203 struct irq_domain *ciu_domain = NULL; 1204 + struct octeon_irq_ciu_domain_data *dd; 1205 + 1206 + dd = kzalloc(sizeof(*dd), GFP_KERNEL); 1207 + if (!dd) 1208 + return -ENOMEM; 1409 1209 1410 1210 octeon_irq_init_ciu_percpu(); 1411 1211 octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu; 1412 1212 1413 1213 octeon_irq_ip2 = octeon_irq_ip2_ciu; 1414 1214 octeon_irq_ip3 = octeon_irq_ip3_ciu; 1215 + if ((OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3()) 1216 + && !OCTEON_IS_MODEL(OCTEON_CN63XX)) { 1217 + octeon_irq_ip4 = octeon_irq_ip4_ciu; 1218 + dd->num_sum = 3; 1219 + octeon_irq_use_ip4 = true; 1220 + } else { 1221 + octeon_irq_ip4 = octeon_irq_ip4_mask; 1222 + dd->num_sum = 2; 1223 + octeon_irq_use_ip4 = false; 1224 + } 1415 1225 if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) || 1416 1226 OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) || 1417 1227 OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X) || 1418 - OCTEON_IS_MODEL(OCTEON_CN6XXX)) { 1228 + OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3()) { 1419 1229 chip = &octeon_irq_chip_ciu_v2; 1230 + chip_edge = &octeon_irq_chip_ciu_v2_edge; 1420 1231 chip_mbox = &octeon_irq_chip_ciu_mbox_v2; 1421 1232 chip_wd = &octeon_irq_chip_ciu_wd_v2; 1422 1233 octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio_v2; 1423 1234 } else { 1424 1235 chip = &octeon_irq_chip_ciu; 1236 + chip_edge = &octeon_irq_chip_ciu_edge; 1425 1237 chip_mbox = &octeon_irq_chip_ciu_mbox; 1426 1238 chip_wd = &octeon_irq_chip_ciu_wd; 1427 1239 octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio; 1428 1240 } 1429 1241 octeon_irq_ciu_chip = chip; 1430 - octeon_irq_ip4 = octeon_irq_ip4_mask; 1242 + octeon_irq_ciu_chip_edge = chip_edge; 1431 1243 1432 1244 /* Mips internal */ 1433 1245 octeon_irq_init_core(); 1434 1246 1435 - gpio_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-gpio"); 1436 - if (gpio_node) { 1437 - struct octeon_irq_gpio_domain_data *gpiod; 1438 - 1439 - gpiod = kzalloc(sizeof(*gpiod), GFP_KERNEL); 1440 - if (gpiod) { 1441 - /* gpio domain host_data is the base hwirq number. */ 1442 - gpiod->base_hwirq = 16; 1443 - irq_domain_add_linear(gpio_node, 16, &octeon_irq_domain_gpio_ops, gpiod); 1444 - of_node_put(gpio_node); 1445 - } else 1446 - pr_warn("Cannot allocate memory for GPIO irq_domain.\n"); 1447 - } else 1448 - pr_warn("Cannot find device node for cavium,octeon-3860-gpio.\n"); 1449 - 1450 - ciu_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-ciu"); 1451 - if (ciu_node) { 1452 - ciu_domain = irq_domain_add_tree(ciu_node, &octeon_irq_domain_ciu_ops, NULL); 1453 - irq_set_default_host(ciu_domain); 1454 - of_node_put(ciu_node); 1455 - } else 1456 - panic("Cannot find device node for cavium,octeon-3860-ciu."); 1247 + ciu_domain = irq_domain_add_tree( 1248 + ciu_node, &octeon_irq_domain_ciu_ops, dd); 1249 + irq_set_default_host(ciu_domain); 1457 1250 1458 1251 /* CIU_0 */ 1459 - for (i = 0; i < 16; i++) 1460 - octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i + 0); 1252 + for (i = 0; i < 16; i++) { 1253 + r = octeon_irq_force_ciu_mapping( 1254 + ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i + 0); 1255 + if (r) 1256 + goto err; 1257 + } 1461 1258 1462 - octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX0, 0, 32, 0, chip_mbox, handle_percpu_irq); 1463 - octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX1, 0, 33, 0, chip_mbox, handle_percpu_irq); 1259 + r = octeon_irq_set_ciu_mapping( 1260 + OCTEON_IRQ_MBOX0, 0, 32, 0, chip_mbox, handle_percpu_irq); 1261 + if (r) 1262 + goto err; 1263 + r = octeon_irq_set_ciu_mapping( 1264 + OCTEON_IRQ_MBOX1, 0, 33, 0, chip_mbox, handle_percpu_irq); 1265 + if (r) 1266 + goto err; 1464 1267 1465 - for (i = 0; i < 4; i++) 1466 - octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_INT0, 0, i + 36); 1467 - for (i = 0; i < 4; i++) 1468 - octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 0, i + 40); 1268 + for (i = 0; i < 4; i++) { 1269 + r = octeon_irq_force_ciu_mapping( 1270 + ciu_domain, i + OCTEON_IRQ_PCI_INT0, 0, i + 36); 1271 + if (r) 1272 + goto err; 1273 + } 1274 + for (i = 0; i < 4; i++) { 1275 + r = octeon_irq_force_ciu_mapping( 1276 + ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 0, i + 40); 1277 + if (r) 1278 + goto err; 1279 + } 1469 1280 1470 - octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI, 0, 45); 1471 - octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_RML, 0, 46); 1472 - for (i = 0; i < 4; i++) 1473 - octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_TIMER0, 0, i + 52); 1281 + r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI, 0, 45); 1282 + if (r) 1283 + goto err; 1474 1284 1475 - octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 0, 56); 1476 - octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI2, 0, 59); 1285 + r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_RML, 0, 46); 1286 + if (r) 1287 + goto err; 1288 + 1289 + for (i = 0; i < 4; i++) { 1290 + r = octeon_irq_force_ciu_mapping( 1291 + ciu_domain, i + OCTEON_IRQ_TIMER0, 0, i + 52); 1292 + if (r) 1293 + goto err; 1294 + } 1295 + 1296 + r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 0, 56); 1297 + if (r) 1298 + goto err; 1299 + 1300 + r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI2, 0, 59); 1301 + if (r) 1302 + goto err; 1477 1303 1478 1304 /* CIU_1 */ 1479 - for (i = 0; i < 16; i++) 1480 - octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i + 0, 0, chip_wd, handle_level_irq); 1305 + for (i = 0; i < 16; i++) { 1306 + r = octeon_irq_set_ciu_mapping( 1307 + i + OCTEON_IRQ_WDOG0, 1, i + 0, 0, chip_wd, 1308 + handle_level_irq); 1309 + if (r) 1310 + goto err; 1311 + } 1481 1312 1482 - octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB1, 1, 17); 1313 + r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB1, 1, 17); 1314 + if (r) 1315 + goto err; 1483 1316 1484 1317 /* Enable the CIU lines */ 1485 1318 set_c0_status(STATUSF_IP3 | STATUSF_IP2); 1486 - clear_c0_status(STATUSF_IP4); 1319 + if (octeon_irq_use_ip4) 1320 + set_c0_status(STATUSF_IP4); 1321 + else 1322 + clear_c0_status(STATUSF_IP4); 1323 + 1324 + return 0; 1325 + err: 1326 + return r; 1487 1327 } 1488 1328 1329 + static int __init octeon_irq_init_gpio( 1330 + struct device_node *gpio_node, struct device_node *parent) 1331 + { 1332 + struct octeon_irq_gpio_domain_data *gpiod; 1333 + u32 interrupt_cells; 1334 + unsigned int base_hwirq; 1335 + int r; 1336 + 1337 + r = of_property_read_u32(parent, "#interrupt-cells", &interrupt_cells); 1338 + if (r) 1339 + return r; 1340 + 1341 + if (interrupt_cells == 1) { 1342 + u32 v; 1343 + 1344 + r = of_property_read_u32_index(gpio_node, "interrupts", 0, &v); 1345 + if (r) { 1346 + pr_warn("No \"interrupts\" property.\n"); 1347 + return r; 1348 + } 1349 + base_hwirq = v; 1350 + } else if (interrupt_cells == 2) { 1351 + u32 v0, v1; 1352 + 1353 + r = of_property_read_u32_index(gpio_node, "interrupts", 0, &v0); 1354 + if (r) { 1355 + pr_warn("No \"interrupts\" property.\n"); 1356 + return r; 1357 + } 1358 + r = of_property_read_u32_index(gpio_node, "interrupts", 1, &v1); 1359 + if (r) { 1360 + pr_warn("No \"interrupts\" property.\n"); 1361 + return r; 1362 + } 1363 + base_hwirq = (v0 << 6) | v1; 1364 + } else { 1365 + pr_warn("Bad \"#interrupt-cells\" property: %u\n", 1366 + interrupt_cells); 1367 + return -EINVAL; 1368 + } 1369 + 1370 + gpiod = kzalloc(sizeof(*gpiod), GFP_KERNEL); 1371 + if (gpiod) { 1372 + /* gpio domain host_data is the base hwirq number. */ 1373 + gpiod->base_hwirq = base_hwirq; 1374 + irq_domain_add_linear( 1375 + gpio_node, 16, &octeon_irq_domain_gpio_ops, gpiod); 1376 + } else { 1377 + pr_warn("Cannot allocate memory for GPIO irq_domain.\n"); 1378 + return -ENOMEM; 1379 + } 1380 + 1381 + return 0; 1382 + } 1489 1383 /* 1490 1384 * Watchdog interrupts are special. They are associated with a single 1491 1385 * core, so we hardwire the affinity to that core. ··· 1594 1290 u64 mask; 1595 1291 u64 en_addr; 1596 1292 int coreid = data->irq - OCTEON_IRQ_WDOG0; 1597 - union octeon_ciu_chip_data cd; 1293 + struct octeon_ciu_chip_data *cd; 1598 1294 1599 - cd.p = irq_data_get_irq_chip_data(data); 1600 - mask = 1ull << (cd.s.bit); 1295 + cd = irq_data_get_irq_chip_data(data); 1296 + mask = 1ull << (cd->bit); 1601 1297 1602 - en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + (0x1000ull * cd.s.line); 1298 + en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + 1299 + (0x1000ull * cd->line); 1603 1300 cvmx_write_csr(en_addr, mask); 1604 1301 1605 1302 } ··· 1611 1306 u64 en_addr; 1612 1307 int cpu = next_cpu_for_irq(data); 1613 1308 int coreid = octeon_coreid_for_cpu(cpu); 1614 - union octeon_ciu_chip_data cd; 1309 + struct octeon_ciu_chip_data *cd; 1615 1310 1616 - cd.p = irq_data_get_irq_chip_data(data); 1617 - mask = 1ull << (cd.s.bit); 1311 + cd = irq_data_get_irq_chip_data(data); 1312 + mask = 1ull << (cd->bit); 1618 1313 1619 - en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + (0x1000ull * cd.s.line); 1314 + en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + 1315 + (0x1000ull * cd->line); 1620 1316 cvmx_write_csr(en_addr, mask); 1621 1317 } 1622 1318 ··· 1626 1320 u64 mask; 1627 1321 u64 en_addr; 1628 1322 int coreid = cvmx_get_core_num(); 1629 - union octeon_ciu_chip_data cd; 1323 + struct octeon_ciu_chip_data *cd; 1630 1324 1631 - cd.p = irq_data_get_irq_chip_data(data); 1632 - mask = 1ull << (cd.s.bit); 1325 + cd = irq_data_get_irq_chip_data(data); 1326 + mask = 1ull << (cd->bit); 1633 1327 1634 - en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + (0x1000ull * cd.s.line); 1328 + en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + 1329 + (0x1000ull * cd->line); 1635 1330 cvmx_write_csr(en_addr, mask); 1636 1331 1637 1332 } ··· 1642 1335 u64 mask; 1643 1336 u64 en_addr; 1644 1337 int coreid = cvmx_get_core_num(); 1645 - union octeon_ciu_chip_data cd; 1338 + struct octeon_ciu_chip_data *cd; 1646 1339 1647 - cd.p = irq_data_get_irq_chip_data(data); 1648 - mask = 1ull << (cd.s.bit); 1340 + cd = irq_data_get_irq_chip_data(data); 1341 + mask = 1ull << (cd->bit); 1649 1342 1650 - en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(coreid) + (0x1000ull * cd.s.line); 1343 + en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(coreid) + 1344 + (0x1000ull * cd->line); 1651 1345 cvmx_write_csr(en_addr, mask); 1652 1346 1653 1347 } ··· 1658 1350 u64 mask; 1659 1351 u64 en_addr; 1660 1352 int coreid = cvmx_get_core_num(); 1661 - union octeon_ciu_chip_data cd; 1353 + struct octeon_ciu_chip_data *cd; 1662 1354 1663 - cd.p = irq_data_get_irq_chip_data(data); 1664 - mask = 1ull << (cd.s.bit); 1355 + cd = irq_data_get_irq_chip_data(data); 1356 + mask = 1ull << (cd->bit); 1665 1357 1666 - en_addr = CVMX_CIU2_RAW_PPX_IP2_WRKQ(coreid) + (0x1000ull * cd.s.line); 1358 + en_addr = CVMX_CIU2_RAW_PPX_IP2_WRKQ(coreid) + (0x1000ull * cd->line); 1667 1359 cvmx_write_csr(en_addr, mask); 1668 1360 1669 1361 } ··· 1672 1364 { 1673 1365 int cpu; 1674 1366 u64 mask; 1675 - union octeon_ciu_chip_data cd; 1367 + struct octeon_ciu_chip_data *cd; 1676 1368 1677 - cd.p = irq_data_get_irq_chip_data(data); 1678 - mask = 1ull << (cd.s.bit); 1369 + cd = irq_data_get_irq_chip_data(data); 1370 + mask = 1ull << (cd->bit); 1679 1371 1680 1372 for_each_online_cpu(cpu) { 1681 - u64 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd.s.line); 1373 + u64 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C( 1374 + octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd->line); 1682 1375 cvmx_write_csr(en_addr, mask); 1683 1376 } 1684 1377 } ··· 1692 1383 mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0); 1693 1384 1694 1385 for_each_online_cpu(cpu) { 1695 - u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1S(octeon_coreid_for_cpu(cpu)); 1386 + u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1S( 1387 + octeon_coreid_for_cpu(cpu)); 1696 1388 cvmx_write_csr(en_addr, mask); 1697 1389 } 1698 1390 } ··· 1706 1396 mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0); 1707 1397 1708 1398 for_each_online_cpu(cpu) { 1709 - u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1C(octeon_coreid_for_cpu(cpu)); 1399 + u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1C( 1400 + octeon_coreid_for_cpu(cpu)); 1710 1401 cvmx_write_csr(en_addr, mask); 1711 1402 } 1712 1403 } ··· 1741 1430 int cpu; 1742 1431 bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data); 1743 1432 u64 mask; 1744 - union octeon_ciu_chip_data cd; 1433 + struct octeon_ciu_chip_data *cd; 1745 1434 1746 1435 if (!enable_one) 1747 1436 return 0; 1748 1437 1749 - cd.p = irq_data_get_irq_chip_data(data); 1750 - mask = 1ull << cd.s.bit; 1438 + cd = irq_data_get_irq_chip_data(data); 1439 + mask = 1ull << cd->bit; 1751 1440 1752 1441 for_each_online_cpu(cpu) { 1753 1442 u64 en_addr; 1754 1443 if (cpumask_test_cpu(cpu, dest) && enable_one) { 1755 1444 enable_one = false; 1756 - en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd.s.line); 1445 + en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S( 1446 + octeon_coreid_for_cpu(cpu)) + 1447 + (0x1000ull * cd->line); 1757 1448 } else { 1758 - en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd.s.line); 1449 + en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C( 1450 + octeon_coreid_for_cpu(cpu)) + 1451 + (0x1000ull * cd->line); 1759 1452 } 1760 1453 cvmx_write_csr(en_addr, mask); 1761 1454 } ··· 1776 1461 1777 1462 static void octeon_irq_ciu2_disable_gpio(struct irq_data *data) 1778 1463 { 1779 - union octeon_ciu_chip_data cd; 1780 - cd.p = irq_data_get_irq_chip_data(data); 1464 + struct octeon_ciu_chip_data *cd; 1781 1465 1782 - cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), 0); 1466 + cd = irq_data_get_irq_chip_data(data); 1467 + 1468 + cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0); 1783 1469 1784 1470 octeon_irq_ciu2_disable_all(data); 1785 1471 } 1786 1472 1787 1473 static struct irq_chip octeon_irq_chip_ciu2 = { 1474 + .name = "CIU2-E", 1475 + .irq_enable = octeon_irq_ciu2_enable, 1476 + .irq_disable = octeon_irq_ciu2_disable_all, 1477 + .irq_mask = octeon_irq_ciu2_disable_local, 1478 + .irq_unmask = octeon_irq_ciu2_enable, 1479 + #ifdef CONFIG_SMP 1480 + .irq_set_affinity = octeon_irq_ciu2_set_affinity, 1481 + .irq_cpu_offline = octeon_irq_cpu_offline_ciu, 1482 + #endif 1483 + }; 1484 + 1485 + static struct irq_chip octeon_irq_chip_ciu2_edge = { 1788 1486 .name = "CIU2-E", 1789 1487 .irq_enable = octeon_irq_ciu2_enable, 1790 1488 .irq_disable = octeon_irq_ciu2_disable_all, ··· 1910 1582 1911 1583 if (octeon_irq_ciu2_is_edge(line, bit)) 1912 1584 octeon_irq_set_ciu_mapping(virq, line, bit, 0, 1913 - &octeon_irq_chip_ciu2, 1585 + &octeon_irq_chip_ciu2_edge, 1914 1586 handle_edge_irq); 1915 1587 else 1916 1588 octeon_irq_set_ciu_mapping(virq, line, bit, 0, ··· 1919 1591 1920 1592 return 0; 1921 1593 } 1922 - static int octeon_irq_ciu2_gpio_map(struct irq_domain *d, 1923 - unsigned int virq, irq_hw_number_t hw) 1924 - { 1925 - return octeon_irq_gpio_map_common(d, virq, hw, 7, &octeon_irq_chip_ciu2_gpio); 1926 - } 1927 1594 1928 1595 static struct irq_domain_ops octeon_irq_domain_ciu2_ops = { 1929 1596 .map = octeon_irq_ciu2_map, 1597 + .unmap = octeon_irq_free_cd, 1930 1598 .xlate = octeon_irq_ciu2_xlat, 1931 - }; 1932 - 1933 - static struct irq_domain_ops octeon_irq_domain_ciu2_gpio_ops = { 1934 - .map = octeon_irq_ciu2_gpio_map, 1935 - .xlate = octeon_irq_gpio_xlat, 1936 1599 }; 1937 1600 1938 1601 static void octeon_irq_ciu2(void) ··· 1993 1674 return; 1994 1675 } 1995 1676 1996 - static void __init octeon_irq_init_ciu2(void) 1677 + static int __init octeon_irq_init_ciu2( 1678 + struct device_node *ciu_node, struct device_node *parent) 1997 1679 { 1998 - unsigned int i; 1999 - struct device_node *gpio_node; 2000 - struct device_node *ciu_node; 1680 + unsigned int i, r; 2001 1681 struct irq_domain *ciu_domain = NULL; 2002 1682 2003 1683 octeon_irq_init_ciu2_percpu(); 2004 1684 octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu2; 2005 1685 1686 + octeon_irq_gpio_chip = &octeon_irq_chip_ciu2_gpio; 2006 1687 octeon_irq_ip2 = octeon_irq_ciu2; 2007 1688 octeon_irq_ip3 = octeon_irq_ciu2_mbox; 2008 1689 octeon_irq_ip4 = octeon_irq_ip4_mask; ··· 2010 1691 /* Mips internal */ 2011 1692 octeon_irq_init_core(); 2012 1693 2013 - gpio_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-gpio"); 2014 - if (gpio_node) { 2015 - struct octeon_irq_gpio_domain_data *gpiod; 2016 - 2017 - gpiod = kzalloc(sizeof(*gpiod), GFP_KERNEL); 2018 - if (gpiod) { 2019 - /* gpio domain host_data is the base hwirq number. */ 2020 - gpiod->base_hwirq = 7 << 6; 2021 - irq_domain_add_linear(gpio_node, 16, &octeon_irq_domain_ciu2_gpio_ops, gpiod); 2022 - of_node_put(gpio_node); 2023 - } else 2024 - pr_warn("Cannot allocate memory for GPIO irq_domain.\n"); 2025 - } else 2026 - pr_warn("Cannot find device node for cavium,octeon-3860-gpio.\n"); 2027 - 2028 - ciu_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-6880-ciu2"); 2029 - if (ciu_node) { 2030 - ciu_domain = irq_domain_add_tree(ciu_node, &octeon_irq_domain_ciu2_ops, NULL); 2031 - irq_set_default_host(ciu_domain); 2032 - of_node_put(ciu_node); 2033 - } else 2034 - panic("Cannot find device node for cavium,octeon-6880-ciu2."); 1694 + ciu_domain = irq_domain_add_tree( 1695 + ciu_node, &octeon_irq_domain_ciu2_ops, NULL); 1696 + irq_set_default_host(ciu_domain); 2035 1697 2036 1698 /* CUI2 */ 2037 - for (i = 0; i < 64; i++) 2038 - octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i); 1699 + for (i = 0; i < 64; i++) { 1700 + r = octeon_irq_force_ciu_mapping( 1701 + ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i); 1702 + if (r) 1703 + goto err; 1704 + } 2039 1705 2040 - for (i = 0; i < 32; i++) 2041 - octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i, 0, 2042 - &octeon_irq_chip_ciu2_wd, handle_level_irq); 1706 + for (i = 0; i < 32; i++) { 1707 + r = octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i, 0, 1708 + &octeon_irq_chip_ciu2_wd, handle_level_irq); 1709 + if (r) 1710 + goto err; 1711 + } 2043 1712 2044 - for (i = 0; i < 4; i++) 2045 - octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_TIMER0, 3, i + 8); 1713 + for (i = 0; i < 4; i++) { 1714 + r = octeon_irq_force_ciu_mapping( 1715 + ciu_domain, i + OCTEON_IRQ_TIMER0, 3, i + 8); 1716 + if (r) 1717 + goto err; 1718 + } 2046 1719 2047 - octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 3, 44); 1720 + r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 3, 44); 1721 + if (r) 1722 + goto err; 2048 1723 2049 - for (i = 0; i < 4; i++) 2050 - octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_INT0, 4, i); 1724 + for (i = 0; i < 4; i++) { 1725 + r = octeon_irq_force_ciu_mapping( 1726 + ciu_domain, i + OCTEON_IRQ_PCI_INT0, 4, i); 1727 + if (r) 1728 + goto err; 1729 + } 2051 1730 2052 - for (i = 0; i < 4; i++) 2053 - octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 4, i + 8); 1731 + for (i = 0; i < 4; i++) { 1732 + r = octeon_irq_force_ciu_mapping( 1733 + ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 4, i + 8); 1734 + if (r) 1735 + goto err; 1736 + } 2054 1737 2055 1738 irq_set_chip_and_handler(OCTEON_IRQ_MBOX0, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq); 2056 1739 irq_set_chip_and_handler(OCTEON_IRQ_MBOX1, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq); ··· 2062 1741 /* Enable the CIU lines */ 2063 1742 set_c0_status(STATUSF_IP3 | STATUSF_IP2); 2064 1743 clear_c0_status(STATUSF_IP4); 1744 + return 0; 1745 + err: 1746 + return r; 2065 1747 } 1748 + 1749 + struct octeon_irq_cib_host_data { 1750 + raw_spinlock_t lock; 1751 + u64 raw_reg; 1752 + u64 en_reg; 1753 + int max_bits; 1754 + }; 1755 + 1756 + struct octeon_irq_cib_chip_data { 1757 + struct octeon_irq_cib_host_data *host_data; 1758 + int bit; 1759 + }; 1760 + 1761 + static void octeon_irq_cib_enable(struct irq_data *data) 1762 + { 1763 + unsigned long flags; 1764 + u64 en; 1765 + struct octeon_irq_cib_chip_data *cd = irq_data_get_irq_chip_data(data); 1766 + struct octeon_irq_cib_host_data *host_data = cd->host_data; 1767 + 1768 + raw_spin_lock_irqsave(&host_data->lock, flags); 1769 + en = cvmx_read_csr(host_data->en_reg); 1770 + en |= 1ull << cd->bit; 1771 + cvmx_write_csr(host_data->en_reg, en); 1772 + raw_spin_unlock_irqrestore(&host_data->lock, flags); 1773 + } 1774 + 1775 + static void octeon_irq_cib_disable(struct irq_data *data) 1776 + { 1777 + unsigned long flags; 1778 + u64 en; 1779 + struct octeon_irq_cib_chip_data *cd = irq_data_get_irq_chip_data(data); 1780 + struct octeon_irq_cib_host_data *host_data = cd->host_data; 1781 + 1782 + raw_spin_lock_irqsave(&host_data->lock, flags); 1783 + en = cvmx_read_csr(host_data->en_reg); 1784 + en &= ~(1ull << cd->bit); 1785 + cvmx_write_csr(host_data->en_reg, en); 1786 + raw_spin_unlock_irqrestore(&host_data->lock, flags); 1787 + } 1788 + 1789 + static int octeon_irq_cib_set_type(struct irq_data *data, unsigned int t) 1790 + { 1791 + irqd_set_trigger_type(data, t); 1792 + return IRQ_SET_MASK_OK; 1793 + } 1794 + 1795 + static struct irq_chip octeon_irq_chip_cib = { 1796 + .name = "CIB", 1797 + .irq_enable = octeon_irq_cib_enable, 1798 + .irq_disable = octeon_irq_cib_disable, 1799 + .irq_mask = octeon_irq_cib_disable, 1800 + .irq_unmask = octeon_irq_cib_enable, 1801 + .irq_set_type = octeon_irq_cib_set_type, 1802 + }; 1803 + 1804 + static int octeon_irq_cib_xlat(struct irq_domain *d, 1805 + struct device_node *node, 1806 + const u32 *intspec, 1807 + unsigned int intsize, 1808 + unsigned long *out_hwirq, 1809 + unsigned int *out_type) 1810 + { 1811 + unsigned int type = 0; 1812 + 1813 + if (intsize == 2) 1814 + type = intspec[1]; 1815 + 1816 + switch (type) { 1817 + case 0: /* unofficial value, but we might as well let it work. */ 1818 + case 4: /* official value for level triggering. */ 1819 + *out_type = IRQ_TYPE_LEVEL_HIGH; 1820 + break; 1821 + case 1: /* official value for edge triggering. */ 1822 + *out_type = IRQ_TYPE_EDGE_RISING; 1823 + break; 1824 + default: /* Nothing else is acceptable. */ 1825 + return -EINVAL; 1826 + } 1827 + 1828 + *out_hwirq = intspec[0]; 1829 + 1830 + return 0; 1831 + } 1832 + 1833 + static int octeon_irq_cib_map(struct irq_domain *d, 1834 + unsigned int virq, irq_hw_number_t hw) 1835 + { 1836 + struct octeon_irq_cib_host_data *host_data = d->host_data; 1837 + struct octeon_irq_cib_chip_data *cd; 1838 + 1839 + if (hw >= host_data->max_bits) { 1840 + pr_err("ERROR: %s mapping %u is to big!\n", 1841 + d->of_node->name, (unsigned)hw); 1842 + return -EINVAL; 1843 + } 1844 + 1845 + cd = kzalloc(sizeof(*cd), GFP_KERNEL); 1846 + cd->host_data = host_data; 1847 + cd->bit = hw; 1848 + 1849 + irq_set_chip_and_handler(virq, &octeon_irq_chip_cib, 1850 + handle_simple_irq); 1851 + irq_set_chip_data(virq, cd); 1852 + return 0; 1853 + } 1854 + 1855 + static struct irq_domain_ops octeon_irq_domain_cib_ops = { 1856 + .map = octeon_irq_cib_map, 1857 + .unmap = octeon_irq_free_cd, 1858 + .xlate = octeon_irq_cib_xlat, 1859 + }; 1860 + 1861 + /* Chain to real handler. */ 1862 + static irqreturn_t octeon_irq_cib_handler(int my_irq, void *data) 1863 + { 1864 + u64 en; 1865 + u64 raw; 1866 + u64 bits; 1867 + int i; 1868 + int irq; 1869 + struct irq_domain *cib_domain = data; 1870 + struct octeon_irq_cib_host_data *host_data = cib_domain->host_data; 1871 + 1872 + en = cvmx_read_csr(host_data->en_reg); 1873 + raw = cvmx_read_csr(host_data->raw_reg); 1874 + 1875 + bits = en & raw; 1876 + 1877 + for (i = 0; i < host_data->max_bits; i++) { 1878 + if ((bits & 1ull << i) == 0) 1879 + continue; 1880 + irq = irq_find_mapping(cib_domain, i); 1881 + if (!irq) { 1882 + unsigned long flags; 1883 + 1884 + pr_err("ERROR: CIB bit %d@%llx IRQ unhandled, disabling\n", 1885 + i, host_data->raw_reg); 1886 + raw_spin_lock_irqsave(&host_data->lock, flags); 1887 + en = cvmx_read_csr(host_data->en_reg); 1888 + en &= ~(1ull << i); 1889 + cvmx_write_csr(host_data->en_reg, en); 1890 + cvmx_write_csr(host_data->raw_reg, 1ull << i); 1891 + raw_spin_unlock_irqrestore(&host_data->lock, flags); 1892 + } else { 1893 + struct irq_desc *desc = irq_to_desc(irq); 1894 + struct irq_data *irq_data = irq_desc_get_irq_data(desc); 1895 + /* If edge, acknowledge the bit we will be sending. */ 1896 + if (irqd_get_trigger_type(irq_data) & 1897 + IRQ_TYPE_EDGE_BOTH) 1898 + cvmx_write_csr(host_data->raw_reg, 1ull << i); 1899 + generic_handle_irq_desc(irq, desc); 1900 + } 1901 + } 1902 + 1903 + return IRQ_HANDLED; 1904 + } 1905 + 1906 + static int __init octeon_irq_init_cib(struct device_node *ciu_node, 1907 + struct device_node *parent) 1908 + { 1909 + const __be32 *addr; 1910 + u32 val; 1911 + struct octeon_irq_cib_host_data *host_data; 1912 + int parent_irq; 1913 + int r; 1914 + struct irq_domain *cib_domain; 1915 + 1916 + parent_irq = irq_of_parse_and_map(ciu_node, 0); 1917 + if (!parent_irq) { 1918 + pr_err("ERROR: Couldn't acquire parent_irq for %s\n.", 1919 + ciu_node->name); 1920 + return -EINVAL; 1921 + } 1922 + 1923 + host_data = kzalloc(sizeof(*host_data), GFP_KERNEL); 1924 + raw_spin_lock_init(&host_data->lock); 1925 + 1926 + addr = of_get_address(ciu_node, 0, NULL, NULL); 1927 + if (!addr) { 1928 + pr_err("ERROR: Couldn't acquire reg(0) %s\n.", ciu_node->name); 1929 + return -EINVAL; 1930 + } 1931 + host_data->raw_reg = (u64)phys_to_virt( 1932 + of_translate_address(ciu_node, addr)); 1933 + 1934 + addr = of_get_address(ciu_node, 1, NULL, NULL); 1935 + if (!addr) { 1936 + pr_err("ERROR: Couldn't acquire reg(1) %s\n.", ciu_node->name); 1937 + return -EINVAL; 1938 + } 1939 + host_data->en_reg = (u64)phys_to_virt( 1940 + of_translate_address(ciu_node, addr)); 1941 + 1942 + r = of_property_read_u32(ciu_node, "cavium,max-bits", &val); 1943 + if (r) { 1944 + pr_err("ERROR: Couldn't read cavium,max-bits from %s\n.", 1945 + ciu_node->name); 1946 + return r; 1947 + } 1948 + host_data->max_bits = val; 1949 + 1950 + cib_domain = irq_domain_add_linear(ciu_node, host_data->max_bits, 1951 + &octeon_irq_domain_cib_ops, 1952 + host_data); 1953 + if (!cib_domain) { 1954 + pr_err("ERROR: Couldn't irq_domain_add_linear()\n."); 1955 + return -ENOMEM; 1956 + } 1957 + 1958 + cvmx_write_csr(host_data->en_reg, 0); /* disable all IRQs */ 1959 + cvmx_write_csr(host_data->raw_reg, ~0); /* ack any outstanding */ 1960 + 1961 + r = request_irq(parent_irq, octeon_irq_cib_handler, 1962 + IRQF_NO_THREAD, "cib", cib_domain); 1963 + if (r) { 1964 + pr_err("request_irq cib failed %d\n", r); 1965 + return r; 1966 + } 1967 + pr_info("CIB interrupt controller probed: %llx %d\n", 1968 + host_data->raw_reg, host_data->max_bits); 1969 + return 0; 1970 + } 1971 + 1972 + static struct of_device_id ciu_types[] __initdata = { 1973 + {.compatible = "cavium,octeon-3860-ciu", .data = octeon_irq_init_ciu}, 1974 + {.compatible = "cavium,octeon-3860-gpio", .data = octeon_irq_init_gpio}, 1975 + {.compatible = "cavium,octeon-6880-ciu2", .data = octeon_irq_init_ciu2}, 1976 + {.compatible = "cavium,octeon-7130-cib", .data = octeon_irq_init_cib}, 1977 + {} 1978 + }; 2066 1979 2067 1980 void __init arch_init_irq(void) 2068 1981 { ··· 2305 1750 cpumask_clear(irq_default_affinity); 2306 1751 cpumask_set_cpu(smp_processor_id(), irq_default_affinity); 2307 1752 #endif 2308 - if (OCTEON_IS_MODEL(OCTEON_CN68XX)) 2309 - octeon_irq_init_ciu2(); 2310 - else 2311 - octeon_irq_init_ciu(); 1753 + of_irq_init(ciu_types); 2312 1754 } 2313 1755 2314 1756 asmlinkage void plat_irq_dispatch(void) ··· 2319 1767 cop0_cause &= cop0_status; 2320 1768 cop0_cause &= ST0_IM; 2321 1769 2322 - if (unlikely(cop0_cause & STATUSF_IP2)) 1770 + if (cop0_cause & STATUSF_IP2) 2323 1771 octeon_irq_ip2(); 2324 - else if (unlikely(cop0_cause & STATUSF_IP3)) 1772 + else if (cop0_cause & STATUSF_IP3) 2325 1773 octeon_irq_ip3(); 2326 - else if (unlikely(cop0_cause & STATUSF_IP4)) 1774 + else if (cop0_cause & STATUSF_IP4) 2327 1775 octeon_irq_ip4(); 2328 - else if (likely(cop0_cause)) 1776 + else if (cop0_cause) 2329 1777 do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE); 2330 1778 else 2331 1779 break;
+49 -7
arch/mips/cavium-octeon/setup.c
··· 41 41 #include <asm/octeon/octeon.h> 42 42 #include <asm/octeon/pci-octeon.h> 43 43 #include <asm/octeon/cvmx-mio-defs.h> 44 + #include <asm/octeon/cvmx-rst-defs.h> 44 45 45 46 extern struct plat_smp_ops octeon_smp_ops; 46 47 ··· 580 579 /* R/W If set, CVMSEG is available for loads/stores in user 581 580 * mode. */ 582 581 cvmmemctl.s.cvmsegenau = 0; 583 - /* R/W Size of local memory in cache blocks, 54 (6912 bytes) 584 - * is max legal value. */ 585 - cvmmemctl.s.lmemsz = CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE; 586 582 587 583 write_c0_cvmmemctl(cvmmemctl.u64); 588 584 585 + /* Setup of CVMSEG is done in kernel-entry-init.h */ 589 586 if (smp_processor_id() == 0) 590 587 pr_notice("CVMSEG size: %d cache lines (%d bytes)\n", 591 588 CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE, ··· 614 615 const char *arg; 615 616 char *p; 616 617 int i; 618 + u64 t; 617 619 int argc; 618 620 #ifdef CONFIG_CAVIUM_RESERVE32 619 621 int64_t addr = -1; ··· 654 654 sysinfo->dfa_ref_clock_hz = octeon_bootinfo->dfa_ref_clock_hz; 655 655 sysinfo->bootloader_config_flags = octeon_bootinfo->config_flags; 656 656 657 - if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { 657 + if (OCTEON_IS_OCTEON2()) { 658 658 /* I/O clock runs at a different rate than the CPU. */ 659 659 union cvmx_mio_rst_boot rst_boot; 660 660 rst_boot.u64 = cvmx_read_csr(CVMX_MIO_RST_BOOT); 661 661 octeon_io_clock_rate = 50000000 * rst_boot.s.pnr_mul; 662 + } else if (OCTEON_IS_OCTEON3()) { 663 + /* I/O clock runs at a different rate than the CPU. */ 664 + union cvmx_rst_boot rst_boot; 665 + rst_boot.u64 = cvmx_read_csr(CVMX_RST_BOOT); 666 + octeon_io_clock_rate = 50000000 * rst_boot.s.pnr_mul; 662 667 } else { 663 668 octeon_io_clock_rate = sysinfo->cpu_clock_hz; 669 + } 670 + 671 + t = read_c0_cvmctl(); 672 + if ((t & (1ull << 27)) == 0) { 673 + /* 674 + * Setup the multiplier save/restore code if 675 + * CvmCtl[NOMUL] clear. 676 + */ 677 + void *save; 678 + void *save_end; 679 + void *restore; 680 + void *restore_end; 681 + int save_len; 682 + int restore_len; 683 + int save_max = (char *)octeon_mult_save_end - 684 + (char *)octeon_mult_save; 685 + int restore_max = (char *)octeon_mult_restore_end - 686 + (char *)octeon_mult_restore; 687 + if (current_cpu_data.cputype == CPU_CAVIUM_OCTEON3) { 688 + save = octeon_mult_save3; 689 + save_end = octeon_mult_save3_end; 690 + restore = octeon_mult_restore3; 691 + restore_end = octeon_mult_restore3_end; 692 + } else { 693 + save = octeon_mult_save2; 694 + save_end = octeon_mult_save2_end; 695 + restore = octeon_mult_restore2; 696 + restore_end = octeon_mult_restore2_end; 697 + } 698 + save_len = (char *)save_end - (char *)save; 699 + restore_len = (char *)restore_end - (char *)restore; 700 + if (!WARN_ON(save_len > save_max || 701 + restore_len > restore_max)) { 702 + memcpy(octeon_mult_save, save, save_len); 703 + memcpy(octeon_mult_restore, restore, restore_len); 704 + } 664 705 } 665 706 666 707 /* ··· 1045 1004 1046 1005 void prom_free_prom_memory(void) 1047 1006 { 1048 - if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X)) { 1007 + if (CAVIUM_OCTEON_DCACHE_PREFETCH_WAR) { 1049 1008 /* Check for presence of Core-14449 fix. */ 1050 1009 u32 insn; 1051 1010 u32 *foo; ··· 1067 1026 panic("No PREF instruction at Core-14449 probe point."); 1068 1027 1069 1028 if (((insn >> 16) & 0x1f) != 28) 1070 - panic("Core-14449 WAR not in place (%04x).\n" 1071 - "Please build kernel with proper options (CONFIG_CAVIUM_CN63XXP1).", insn); 1029 + panic("OCTEON II DCache prefetch workaround not in place (%04x).\n" 1030 + "Please build kernel with proper options (CONFIG_CAVIUM_CN63XXP1).", 1031 + insn); 1072 1032 } 1073 1033 } 1074 1034
+193
arch/mips/configs/malta_qemu_32r6_defconfig
··· 1 + CONFIG_MIPS_MALTA=y 2 + CONFIG_CPU_LITTLE_ENDIAN=y 3 + CONFIG_CPU_MIPS32_R6=y 4 + CONFIG_PAGE_SIZE_16KB=y 5 + CONFIG_HZ_100=y 6 + CONFIG_SYSVIPC=y 7 + CONFIG_POSIX_MQUEUE=y 8 + CONFIG_AUDIT=y 9 + CONFIG_NO_HZ=y 10 + CONFIG_IKCONFIG=y 11 + CONFIG_IKCONFIG_PROC=y 12 + CONFIG_LOG_BUF_SHIFT=15 13 + CONFIG_SYSCTL_SYSCALL=y 14 + CONFIG_EMBEDDED=y 15 + CONFIG_SLAB=y 16 + CONFIG_MODULES=y 17 + CONFIG_MODULE_UNLOAD=y 18 + CONFIG_MODVERSIONS=y 19 + CONFIG_MODULE_SRCVERSION_ALL=y 20 + # CONFIG_BLK_DEV_BSG is not set 21 + CONFIG_PCI=y 22 + # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 23 + CONFIG_NET=y 24 + CONFIG_PACKET=y 25 + CONFIG_UNIX=y 26 + CONFIG_XFRM_USER=m 27 + CONFIG_NET_KEY=y 28 + CONFIG_INET=y 29 + CONFIG_IP_MULTICAST=y 30 + CONFIG_IP_ADVANCED_ROUTER=y 31 + CONFIG_IP_MULTIPLE_TABLES=y 32 + CONFIG_IP_ROUTE_MULTIPATH=y 33 + CONFIG_IP_ROUTE_VERBOSE=y 34 + CONFIG_IP_PNP=y 35 + CONFIG_IP_PNP_DHCP=y 36 + CONFIG_IP_PNP_BOOTP=y 37 + CONFIG_NET_IPIP=m 38 + CONFIG_IP_MROUTE=y 39 + CONFIG_IP_PIMSM_V1=y 40 + CONFIG_IP_PIMSM_V2=y 41 + CONFIG_SYN_COOKIES=y 42 + CONFIG_INET_AH=m 43 + CONFIG_INET_ESP=m 44 + CONFIG_INET_IPCOMP=m 45 + # CONFIG_INET_LRO is not set 46 + CONFIG_INET6_AH=m 47 + CONFIG_INET6_ESP=m 48 + CONFIG_INET6_IPCOMP=m 49 + CONFIG_IPV6_TUNNEL=m 50 + CONFIG_BRIDGE=m 51 + CONFIG_VLAN_8021Q=m 52 + CONFIG_ATALK=m 53 + CONFIG_DEV_APPLETALK=m 54 + CONFIG_IPDDP=m 55 + CONFIG_IPDDP_ENCAP=y 56 + CONFIG_NET_SCHED=y 57 + CONFIG_NET_SCH_CBQ=m 58 + CONFIG_NET_SCH_HTB=m 59 + CONFIG_NET_SCH_HFSC=m 60 + CONFIG_NET_SCH_PRIO=m 61 + CONFIG_NET_SCH_RED=m 62 + CONFIG_NET_SCH_SFQ=m 63 + CONFIG_NET_SCH_TEQL=m 64 + CONFIG_NET_SCH_TBF=m 65 + CONFIG_NET_SCH_GRED=m 66 + CONFIG_NET_SCH_DSMARK=m 67 + CONFIG_NET_SCH_NETEM=m 68 + CONFIG_NET_SCH_INGRESS=m 69 + CONFIG_NET_CLS_BASIC=m 70 + CONFIG_NET_CLS_TCINDEX=m 71 + CONFIG_NET_CLS_ROUTE4=m 72 + CONFIG_NET_CLS_FW=m 73 + CONFIG_NET_CLS_U32=m 74 + CONFIG_NET_CLS_RSVP=m 75 + CONFIG_NET_CLS_RSVP6=m 76 + CONFIG_NET_CLS_ACT=y 77 + CONFIG_NET_ACT_POLICE=y 78 + CONFIG_NET_CLS_IND=y 79 + # CONFIG_WIRELESS is not set 80 + CONFIG_DEVTMPFS=y 81 + CONFIG_BLK_DEV_LOOP=y 82 + CONFIG_BLK_DEV_CRYPTOLOOP=m 83 + CONFIG_IDE=y 84 + # CONFIG_IDE_PROC_FS is not set 85 + # CONFIG_IDEPCI_PCIBUS_ORDER is not set 86 + CONFIG_BLK_DEV_GENERIC=y 87 + CONFIG_BLK_DEV_PIIX=y 88 + CONFIG_SCSI=y 89 + CONFIG_BLK_DEV_SD=y 90 + CONFIG_CHR_DEV_SG=y 91 + # CONFIG_SCSI_LOWLEVEL is not set 92 + CONFIG_NETDEVICES=y 93 + # CONFIG_NET_VENDOR_3COM is not set 94 + # CONFIG_NET_VENDOR_ADAPTEC is not set 95 + # CONFIG_NET_VENDOR_ALTEON is not set 96 + CONFIG_PCNET32=y 97 + # CONFIG_NET_VENDOR_ATHEROS is not set 98 + # CONFIG_NET_VENDOR_BROADCOM is not set 99 + # CONFIG_NET_VENDOR_BROCADE is not set 100 + # CONFIG_NET_VENDOR_CHELSIO is not set 101 + # CONFIG_NET_VENDOR_CISCO is not set 102 + # CONFIG_NET_VENDOR_DEC is not set 103 + # CONFIG_NET_VENDOR_DLINK is not set 104 + # CONFIG_NET_VENDOR_EMULEX is not set 105 + # CONFIG_NET_VENDOR_EXAR is not set 106 + # CONFIG_NET_VENDOR_HP is not set 107 + # CONFIG_NET_VENDOR_INTEL is not set 108 + # CONFIG_NET_VENDOR_MARVELL is not set 109 + # CONFIG_NET_VENDOR_MELLANOX is not set 110 + # CONFIG_NET_VENDOR_MICREL is not set 111 + # CONFIG_NET_VENDOR_MYRI is not set 112 + # CONFIG_NET_VENDOR_NATSEMI is not set 113 + # CONFIG_NET_VENDOR_NVIDIA is not set 114 + # CONFIG_NET_VENDOR_OKI is not set 115 + # CONFIG_NET_PACKET_ENGINE is not set 116 + # CONFIG_NET_VENDOR_QLOGIC is not set 117 + # CONFIG_NET_VENDOR_REALTEK is not set 118 + # CONFIG_NET_VENDOR_RDC is not set 119 + # CONFIG_NET_VENDOR_SEEQ is not set 120 + # CONFIG_NET_VENDOR_SILAN is not set 121 + # CONFIG_NET_VENDOR_SIS is not set 122 + # CONFIG_NET_VENDOR_SMSC is not set 123 + # CONFIG_NET_VENDOR_STMICRO is not set 124 + # CONFIG_NET_VENDOR_SUN is not set 125 + # CONFIG_NET_VENDOR_TEHUTI is not set 126 + # CONFIG_NET_VENDOR_TI is not set 127 + # CONFIG_NET_VENDOR_TOSHIBA is not set 128 + # CONFIG_NET_VENDOR_VIA is not set 129 + # CONFIG_NET_VENDOR_WIZNET is not set 130 + # CONFIG_WLAN is not set 131 + # CONFIG_VT is not set 132 + CONFIG_LEGACY_PTY_COUNT=4 133 + CONFIG_SERIAL_8250=y 134 + CONFIG_SERIAL_8250_CONSOLE=y 135 + CONFIG_HW_RANDOM=y 136 + # CONFIG_HWMON is not set 137 + CONFIG_FB=y 138 + CONFIG_FIRMWARE_EDID=y 139 + CONFIG_FB_MATROX=y 140 + CONFIG_FB_MATROX_G=y 141 + CONFIG_USB=y 142 + CONFIG_USB_EHCI_HCD=y 143 + # CONFIG_USB_EHCI_TT_NEWSCHED is not set 144 + CONFIG_USB_UHCI_HCD=y 145 + CONFIG_USB_STORAGE=y 146 + CONFIG_NEW_LEDS=y 147 + CONFIG_LEDS_CLASS=y 148 + CONFIG_LEDS_TRIGGERS=y 149 + CONFIG_LEDS_TRIGGER_TIMER=y 150 + CONFIG_LEDS_TRIGGER_IDE_DISK=y 151 + CONFIG_LEDS_TRIGGER_HEARTBEAT=y 152 + CONFIG_LEDS_TRIGGER_BACKLIGHT=y 153 + CONFIG_LEDS_TRIGGER_DEFAULT_ON=y 154 + CONFIG_RTC_CLASS=y 155 + CONFIG_RTC_DRV_CMOS=y 156 + CONFIG_EXT2_FS=y 157 + CONFIG_EXT3_FS=y 158 + # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set 159 + CONFIG_XFS_FS=y 160 + CONFIG_XFS_QUOTA=y 161 + CONFIG_XFS_POSIX_ACL=y 162 + CONFIG_QUOTA=y 163 + CONFIG_QFMT_V2=y 164 + CONFIG_MSDOS_FS=m 165 + CONFIG_VFAT_FS=m 166 + CONFIG_PROC_KCORE=y 167 + CONFIG_TMPFS=y 168 + CONFIG_NFS_FS=y 169 + CONFIG_ROOT_NFS=y 170 + CONFIG_CIFS=m 171 + CONFIG_CIFS_WEAK_PW_HASH=y 172 + CONFIG_CIFS_XATTR=y 173 + CONFIG_CIFS_POSIX=y 174 + CONFIG_NLS_CODEPAGE_437=m 175 + CONFIG_NLS_ISO8859_1=m 176 + # CONFIG_FTRACE is not set 177 + CONFIG_CRYPTO_NULL=m 178 + CONFIG_CRYPTO_PCBC=m 179 + CONFIG_CRYPTO_HMAC=y 180 + CONFIG_CRYPTO_MICHAEL_MIC=m 181 + CONFIG_CRYPTO_SHA512=m 182 + CONFIG_CRYPTO_TGR192=m 183 + CONFIG_CRYPTO_WP512=m 184 + CONFIG_CRYPTO_ANUBIS=m 185 + CONFIG_CRYPTO_BLOWFISH=m 186 + CONFIG_CRYPTO_CAST5=m 187 + CONFIG_CRYPTO_CAST6=m 188 + CONFIG_CRYPTO_KHAZAD=m 189 + CONFIG_CRYPTO_SERPENT=m 190 + CONFIG_CRYPTO_TEA=m 191 + CONFIG_CRYPTO_TWOFISH=m 192 + # CONFIG_CRYPTO_ANSI_CPRNG is not set 193 + # CONFIG_CRYPTO_HW is not set
+16 -10
arch/mips/fw/arc/misc.c
··· 9 9 * Copyright (C) 1999 Ralf Baechle (ralf@gnu.org) 10 10 * Copyright (C) 1999 Silicon Graphics, Inc. 11 11 */ 12 + #include <linux/compiler.h> 12 13 #include <linux/init.h> 13 14 #include <linux/kernel.h> 14 15 #include <linux/irqflags.h> ··· 20 19 #include <asm/sgialib.h> 21 20 #include <asm/bootinfo.h> 22 21 23 - VOID 22 + VOID __noreturn 24 23 ArcHalt(VOID) 25 24 { 26 25 bc_disable(); 27 26 local_irq_disable(); 28 27 ARC_CALL0(halt); 29 - never: goto never; 28 + 29 + unreachable(); 30 30 } 31 31 32 - VOID 32 + VOID __noreturn 33 33 ArcPowerDown(VOID) 34 34 { 35 35 bc_disable(); 36 36 local_irq_disable(); 37 37 ARC_CALL0(pdown); 38 - never: goto never; 38 + 39 + unreachable(); 39 40 } 40 41 41 42 /* XXX is this a soft reset basically? XXX */ 42 - VOID 43 + VOID __noreturn 43 44 ArcRestart(VOID) 44 45 { 45 46 bc_disable(); 46 47 local_irq_disable(); 47 48 ARC_CALL0(restart); 48 - never: goto never; 49 + 50 + unreachable(); 49 51 } 50 52 51 - VOID 53 + VOID __noreturn 52 54 ArcReboot(VOID) 53 55 { 54 56 bc_disable(); 55 57 local_irq_disable(); 56 58 ARC_CALL0(reboot); 57 - never: goto never; 59 + 60 + unreachable(); 58 61 } 59 62 60 - VOID 63 + VOID __noreturn 61 64 ArcEnterInteractiveMode(VOID) 62 65 { 63 66 bc_disable(); 64 67 local_irq_disable(); 65 68 ARC_CALL0(imode); 66 - never: goto never; 69 + 70 + unreachable(); 67 71 } 68 72 69 73 LONG
+1
arch/mips/include/asm/Kbuild
··· 1 1 # MIPS headers 2 + generic-(CONFIG_GENERIC_CSUM) += checksum.h 2 3 generic-y += cputime.h 3 4 generic-y += current.h 4 5 generic-y += dma-contiguous.h
+10 -8
arch/mips/include/asm/asmmacro.h
··· 19 19 #include <asm/asmmacro-64.h> 20 20 #endif 21 21 22 - #ifdef CONFIG_CPU_MIPSR2 22 + #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) 23 23 .macro local_irq_enable reg=t0 24 24 ei 25 25 irq_enable_hazard ··· 104 104 .endm 105 105 106 106 .macro fpu_save_double thread status tmp 107 - #if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) 107 + #if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \ 108 + defined(CONFIG_CPU_MIPS32_R6) 108 109 sll \tmp, \status, 5 109 110 bgez \tmp, 10f 110 111 fpu_save_16odd \thread ··· 161 160 .endm 162 161 163 162 .macro fpu_restore_double thread status tmp 164 - #if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) 163 + #if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \ 164 + defined(CONFIG_CPU_MIPS32_R6) 165 165 sll \tmp, \status, 5 166 166 bgez \tmp, 10f # 16 register mode? 167 167 ··· 172 170 fpu_restore_16even \thread \tmp 173 171 .endm 174 172 175 - #ifdef CONFIG_CPU_MIPSR2 173 + #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) 176 174 .macro _EXT rd, rs, p, s 177 175 ext \rd, \rs, \p, \s 178 176 .endm 179 - #else /* !CONFIG_CPU_MIPSR2 */ 177 + #else /* !CONFIG_CPU_MIPSR2 || !CONFIG_CPU_MIPSR6 */ 180 178 .macro _EXT rd, rs, p, s 181 179 srl \rd, \rs, \p 182 180 andi \rd, \rd, (1 << \s) - 1 183 181 .endm 184 - #endif /* !CONFIG_CPU_MIPSR2 */ 182 + #endif /* !CONFIG_CPU_MIPSR2 || !CONFIG_CPU_MIPSR6 */ 185 183 186 184 /* 187 185 * Temporary until all gas have MT ASE support ··· 306 304 .set push 307 305 .set noat 308 306 SET_HARDFLOAT 309 - add $1, \base, \off 307 + addu $1, \base, \off 310 308 .word LDD_MSA_INSN | (\wd << 6) 311 309 .set pop 312 310 .endm ··· 315 313 .set push 316 314 .set noat 317 315 SET_HARDFLOAT 318 - add $1, \base, \off 316 + addu $1, \base, \off 319 317 .word STD_MSA_INSN | (\wd << 6) 320 318 .set pop 321 319 .endm
+21 -21
arch/mips/include/asm/atomic.h
··· 54 54 " sc %0, %1 \n" \ 55 55 " beqzl %0, 1b \n" \ 56 56 " .set mips0 \n" \ 57 - : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \ 57 + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \ 58 58 : "Ir" (i)); \ 59 59 } else if (kernel_uses_llsc) { \ 60 60 int temp; \ 61 61 \ 62 62 do { \ 63 63 __asm__ __volatile__( \ 64 - " .set arch=r4000 \n" \ 64 + " .set "MIPS_ISA_LEVEL" \n" \ 65 65 " ll %0, %1 # atomic_" #op "\n" \ 66 66 " " #asm_op " %0, %2 \n" \ 67 67 " sc %0, %1 \n" \ 68 68 " .set mips0 \n" \ 69 - : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \ 69 + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \ 70 70 : "Ir" (i)); \ 71 71 } while (unlikely(!temp)); \ 72 72 } else { \ ··· 97 97 " " #asm_op " %0, %1, %3 \n" \ 98 98 " .set mips0 \n" \ 99 99 : "=&r" (result), "=&r" (temp), \ 100 - "+" GCC_OFF12_ASM() (v->counter) \ 100 + "+" GCC_OFF_SMALL_ASM() (v->counter) \ 101 101 : "Ir" (i)); \ 102 102 } else if (kernel_uses_llsc) { \ 103 103 int temp; \ 104 104 \ 105 105 do { \ 106 106 __asm__ __volatile__( \ 107 - " .set arch=r4000 \n" \ 107 + " .set "MIPS_ISA_LEVEL" \n" \ 108 108 " ll %1, %2 # atomic_" #op "_return \n" \ 109 109 " " #asm_op " %0, %1, %3 \n" \ 110 110 " sc %0, %2 \n" \ 111 111 " .set mips0 \n" \ 112 112 : "=&r" (result), "=&r" (temp), \ 113 - "+" GCC_OFF12_ASM() (v->counter) \ 113 + "+" GCC_OFF_SMALL_ASM() (v->counter) \ 114 114 : "Ir" (i)); \ 115 115 } while (unlikely(!result)); \ 116 116 \ ··· 171 171 "1: \n" 172 172 " .set mips0 \n" 173 173 : "=&r" (result), "=&r" (temp), 174 - "+" GCC_OFF12_ASM() (v->counter) 175 - : "Ir" (i), GCC_OFF12_ASM() (v->counter) 174 + "+" GCC_OFF_SMALL_ASM() (v->counter) 175 + : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter) 176 176 : "memory"); 177 177 } else if (kernel_uses_llsc) { 178 178 int temp; 179 179 180 180 __asm__ __volatile__( 181 - " .set arch=r4000 \n" 181 + " .set "MIPS_ISA_LEVEL" \n" 182 182 "1: ll %1, %2 # atomic_sub_if_positive\n" 183 183 " subu %0, %1, %3 \n" 184 184 " bltz %0, 1f \n" ··· 190 190 "1: \n" 191 191 " .set mips0 \n" 192 192 : "=&r" (result), "=&r" (temp), 193 - "+" GCC_OFF12_ASM() (v->counter) 193 + "+" GCC_OFF_SMALL_ASM() (v->counter) 194 194 : "Ir" (i)); 195 195 } else { 196 196 unsigned long flags; ··· 333 333 " scd %0, %1 \n" \ 334 334 " beqzl %0, 1b \n" \ 335 335 " .set mips0 \n" \ 336 - : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \ 336 + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \ 337 337 : "Ir" (i)); \ 338 338 } else if (kernel_uses_llsc) { \ 339 339 long temp; \ 340 340 \ 341 341 do { \ 342 342 __asm__ __volatile__( \ 343 - " .set arch=r4000 \n" \ 343 + " .set "MIPS_ISA_LEVEL" \n" \ 344 344 " lld %0, %1 # atomic64_" #op "\n" \ 345 345 " " #asm_op " %0, %2 \n" \ 346 346 " scd %0, %1 \n" \ 347 347 " .set mips0 \n" \ 348 - : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \ 348 + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \ 349 349 : "Ir" (i)); \ 350 350 } while (unlikely(!temp)); \ 351 351 } else { \ ··· 376 376 " " #asm_op " %0, %1, %3 \n" \ 377 377 " .set mips0 \n" \ 378 378 : "=&r" (result), "=&r" (temp), \ 379 - "+" GCC_OFF12_ASM() (v->counter) \ 379 + "+" GCC_OFF_SMALL_ASM() (v->counter) \ 380 380 : "Ir" (i)); \ 381 381 } else if (kernel_uses_llsc) { \ 382 382 long temp; \ 383 383 \ 384 384 do { \ 385 385 __asm__ __volatile__( \ 386 - " .set arch=r4000 \n" \ 386 + " .set "MIPS_ISA_LEVEL" \n" \ 387 387 " lld %1, %2 # atomic64_" #op "_return\n" \ 388 388 " " #asm_op " %0, %1, %3 \n" \ 389 389 " scd %0, %2 \n" \ 390 390 " .set mips0 \n" \ 391 391 : "=&r" (result), "=&r" (temp), \ 392 - "=" GCC_OFF12_ASM() (v->counter) \ 393 - : "Ir" (i), GCC_OFF12_ASM() (v->counter) \ 392 + "=" GCC_OFF_SMALL_ASM() (v->counter) \ 393 + : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter) \ 394 394 : "memory"); \ 395 395 } while (unlikely(!result)); \ 396 396 \ ··· 452 452 "1: \n" 453 453 " .set mips0 \n" 454 454 : "=&r" (result), "=&r" (temp), 455 - "=" GCC_OFF12_ASM() (v->counter) 456 - : "Ir" (i), GCC_OFF12_ASM() (v->counter) 455 + "=" GCC_OFF_SMALL_ASM() (v->counter) 456 + : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter) 457 457 : "memory"); 458 458 } else if (kernel_uses_llsc) { 459 459 long temp; 460 460 461 461 __asm__ __volatile__( 462 - " .set arch=r4000 \n" 462 + " .set "MIPS_ISA_LEVEL" \n" 463 463 "1: lld %1, %2 # atomic64_sub_if_positive\n" 464 464 " dsubu %0, %1, %3 \n" 465 465 " bltz %0, 1f \n" ··· 471 471 "1: \n" 472 472 " .set mips0 \n" 473 473 : "=&r" (result), "=&r" (temp), 474 - "+" GCC_OFF12_ASM() (v->counter) 474 + "+" GCC_OFF_SMALL_ASM() (v->counter) 475 475 : "Ir" (i)); 476 476 } else { 477 477 unsigned long flags;
+32 -32
arch/mips/include/asm/bitops.h
··· 79 79 " " __SC "%0, %1 \n" 80 80 " beqzl %0, 1b \n" 81 81 " .set mips0 \n" 82 - : "=&r" (temp), "=" GCC_OFF12_ASM() (*m) 83 - : "ir" (1UL << bit), GCC_OFF12_ASM() (*m)); 84 - #ifdef CONFIG_CPU_MIPSR2 82 + : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*m) 83 + : "ir" (1UL << bit), GCC_OFF_SMALL_ASM() (*m)); 84 + #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) 85 85 } else if (kernel_uses_llsc && __builtin_constant_p(bit)) { 86 86 do { 87 87 __asm__ __volatile__( 88 88 " " __LL "%0, %1 # set_bit \n" 89 89 " " __INS "%0, %3, %2, 1 \n" 90 90 " " __SC "%0, %1 \n" 91 - : "=&r" (temp), "+" GCC_OFF12_ASM() (*m) 91 + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m) 92 92 : "ir" (bit), "r" (~0)); 93 93 } while (unlikely(!temp)); 94 - #endif /* CONFIG_CPU_MIPSR2 */ 94 + #endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */ 95 95 } else if (kernel_uses_llsc) { 96 96 do { 97 97 __asm__ __volatile__( 98 - " .set arch=r4000 \n" 98 + " .set "MIPS_ISA_ARCH_LEVEL" \n" 99 99 " " __LL "%0, %1 # set_bit \n" 100 100 " or %0, %2 \n" 101 101 " " __SC "%0, %1 \n" 102 102 " .set mips0 \n" 103 - : "=&r" (temp), "+" GCC_OFF12_ASM() (*m) 103 + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m) 104 104 : "ir" (1UL << bit)); 105 105 } while (unlikely(!temp)); 106 106 } else ··· 131 131 " " __SC "%0, %1 \n" 132 132 " beqzl %0, 1b \n" 133 133 " .set mips0 \n" 134 - : "=&r" (temp), "+" GCC_OFF12_ASM() (*m) 134 + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m) 135 135 : "ir" (~(1UL << bit))); 136 - #ifdef CONFIG_CPU_MIPSR2 136 + #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) 137 137 } else if (kernel_uses_llsc && __builtin_constant_p(bit)) { 138 138 do { 139 139 __asm__ __volatile__( 140 140 " " __LL "%0, %1 # clear_bit \n" 141 141 " " __INS "%0, $0, %2, 1 \n" 142 142 " " __SC "%0, %1 \n" 143 - : "=&r" (temp), "+" GCC_OFF12_ASM() (*m) 143 + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m) 144 144 : "ir" (bit)); 145 145 } while (unlikely(!temp)); 146 - #endif /* CONFIG_CPU_MIPSR2 */ 146 + #endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */ 147 147 } else if (kernel_uses_llsc) { 148 148 do { 149 149 __asm__ __volatile__( 150 - " .set arch=r4000 \n" 150 + " .set "MIPS_ISA_ARCH_LEVEL" \n" 151 151 " " __LL "%0, %1 # clear_bit \n" 152 152 " and %0, %2 \n" 153 153 " " __SC "%0, %1 \n" 154 154 " .set mips0 \n" 155 - : "=&r" (temp), "+" GCC_OFF12_ASM() (*m) 155 + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m) 156 156 : "ir" (~(1UL << bit))); 157 157 } while (unlikely(!temp)); 158 158 } else ··· 197 197 " " __SC "%0, %1 \n" 198 198 " beqzl %0, 1b \n" 199 199 " .set mips0 \n" 200 - : "=&r" (temp), "+" GCC_OFF12_ASM() (*m) 200 + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m) 201 201 : "ir" (1UL << bit)); 202 202 } else if (kernel_uses_llsc) { 203 203 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); ··· 205 205 206 206 do { 207 207 __asm__ __volatile__( 208 - " .set arch=r4000 \n" 208 + " .set "MIPS_ISA_ARCH_LEVEL" \n" 209 209 " " __LL "%0, %1 # change_bit \n" 210 210 " xor %0, %2 \n" 211 211 " " __SC "%0, %1 \n" 212 212 " .set mips0 \n" 213 - : "=&r" (temp), "+" GCC_OFF12_ASM() (*m) 213 + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m) 214 214 : "ir" (1UL << bit)); 215 215 } while (unlikely(!temp)); 216 216 } else ··· 245 245 " beqzl %2, 1b \n" 246 246 " and %2, %0, %3 \n" 247 247 " .set mips0 \n" 248 - : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) 248 + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) 249 249 : "r" (1UL << bit) 250 250 : "memory"); 251 251 } else if (kernel_uses_llsc) { ··· 254 254 255 255 do { 256 256 __asm__ __volatile__( 257 - " .set arch=r4000 \n" 257 + " .set "MIPS_ISA_ARCH_LEVEL" \n" 258 258 " " __LL "%0, %1 # test_and_set_bit \n" 259 259 " or %2, %0, %3 \n" 260 260 " " __SC "%2, %1 \n" 261 261 " .set mips0 \n" 262 - : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) 262 + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) 263 263 : "r" (1UL << bit) 264 264 : "memory"); 265 265 } while (unlikely(!res)); ··· 308 308 309 309 do { 310 310 __asm__ __volatile__( 311 - " .set arch=r4000 \n" 311 + " .set "MIPS_ISA_ARCH_LEVEL" \n" 312 312 " " __LL "%0, %1 # test_and_set_bit \n" 313 313 " or %2, %0, %3 \n" 314 314 " " __SC "%2, %1 \n" 315 315 " .set mips0 \n" 316 - : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) 316 + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) 317 317 : "r" (1UL << bit) 318 318 : "memory"); 319 319 } while (unlikely(!res)); ··· 355 355 " beqzl %2, 1b \n" 356 356 " and %2, %0, %3 \n" 357 357 " .set mips0 \n" 358 - : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) 358 + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) 359 359 : "r" (1UL << bit) 360 360 : "memory"); 361 - #ifdef CONFIG_CPU_MIPSR2 361 + #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) 362 362 } else if (kernel_uses_llsc && __builtin_constant_p(nr)) { 363 363 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); 364 364 unsigned long temp; ··· 369 369 " " __EXT "%2, %0, %3, 1 \n" 370 370 " " __INS "%0, $0, %3, 1 \n" 371 371 " " __SC "%0, %1 \n" 372 - : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) 372 + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) 373 373 : "ir" (bit) 374 374 : "memory"); 375 375 } while (unlikely(!temp)); ··· 380 380 381 381 do { 382 382 __asm__ __volatile__( 383 - " .set arch=r4000 \n" 383 + " .set "MIPS_ISA_ARCH_LEVEL" \n" 384 384 " " __LL "%0, %1 # test_and_clear_bit \n" 385 385 " or %2, %0, %3 \n" 386 386 " xor %2, %3 \n" 387 387 " " __SC "%2, %1 \n" 388 388 " .set mips0 \n" 389 - : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) 389 + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) 390 390 : "r" (1UL << bit) 391 391 : "memory"); 392 392 } while (unlikely(!res)); ··· 428 428 " beqzl %2, 1b \n" 429 429 " and %2, %0, %3 \n" 430 430 " .set mips0 \n" 431 - : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) 431 + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) 432 432 : "r" (1UL << bit) 433 433 : "memory"); 434 434 } else if (kernel_uses_llsc) { ··· 437 437 438 438 do { 439 439 __asm__ __volatile__( 440 - " .set arch=r4000 \n" 440 + " .set "MIPS_ISA_ARCH_LEVEL" \n" 441 441 " " __LL "%0, %1 # test_and_change_bit \n" 442 442 " xor %2, %0, %3 \n" 443 443 " " __SC "\t%2, %1 \n" 444 444 " .set mips0 \n" 445 - : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res) 445 + : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res) 446 446 : "r" (1UL << bit) 447 447 : "memory"); 448 448 } while (unlikely(!res)); ··· 485 485 __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) { 486 486 __asm__( 487 487 " .set push \n" 488 - " .set mips32 \n" 488 + " .set "MIPS_ISA_LEVEL" \n" 489 489 " clz %0, %1 \n" 490 490 " .set pop \n" 491 491 : "=r" (num) ··· 498 498 __builtin_constant_p(cpu_has_mips64) && cpu_has_mips64) { 499 499 __asm__( 500 500 " .set push \n" 501 - " .set mips64 \n" 501 + " .set "MIPS_ISA_LEVEL" \n" 502 502 " dclz %0, %1 \n" 503 503 " .set pop \n" 504 504 : "=r" (num) ··· 562 562 if (__builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) { 563 563 __asm__( 564 564 " .set push \n" 565 - " .set mips32 \n" 565 + " .set "MIPS_ISA_LEVEL" \n" 566 566 " clz %0, %1 \n" 567 567 " .set pop \n" 568 568 : "=r" (x)
+19 -26
arch/mips/include/asm/checksum.h
··· 12 12 #ifndef _ASM_CHECKSUM_H 13 13 #define _ASM_CHECKSUM_H 14 14 15 + #ifdef CONFIG_GENERIC_CSUM 16 + #include <asm-generic/checksum.h> 17 + #else 18 + 15 19 #include <linux/in6.h> 16 20 17 21 #include <asm/uaccess.h> ··· 103 99 */ 104 100 __wsum csum_partial_copy_nocheck(const void *src, void *dst, 105 101 int len, __wsum sum); 102 + #define csum_partial_copy_nocheck csum_partial_copy_nocheck 106 103 107 104 /* 108 105 * Fold a partial checksum without adding pseudo headers 109 106 */ 110 - static inline __sum16 csum_fold(__wsum sum) 107 + static inline __sum16 csum_fold(__wsum csum) 111 108 { 112 - __asm__( 113 - " .set push # csum_fold\n" 114 - " .set noat \n" 115 - " sll $1, %0, 16 \n" 116 - " addu %0, $1 \n" 117 - " sltu $1, %0, $1 \n" 118 - " srl %0, %0, 16 \n" 119 - " addu %0, $1 \n" 120 - " xori %0, 0xffff \n" 121 - " .set pop" 122 - : "=r" (sum) 123 - : "0" (sum)); 109 + u32 sum = (__force u32)csum;; 124 110 125 - return (__force __sum16)sum; 111 + sum += (sum << 16); 112 + csum = (sum < csum); 113 + sum >>= 16; 114 + sum += csum; 115 + 116 + return (__force __sum16)~sum; 126 117 } 118 + #define csum_fold csum_fold 127 119 128 120 /* 129 121 * This is a version of ip_compute_csum() optimized for IP headers, ··· 158 158 159 159 return csum_fold(csum); 160 160 } 161 + #define ip_fast_csum ip_fast_csum 161 162 162 163 static inline __wsum csum_tcpudp_nofold(__be32 saddr, 163 164 __be32 daddr, unsigned short len, unsigned short proto, ··· 201 200 202 201 return sum; 203 202 } 204 - 205 - /* 206 - * computes the checksum of the TCP/UDP pseudo-header 207 - * returns a 16-bit checksum, already complemented 208 - */ 209 - static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, 210 - unsigned short len, 211 - unsigned short proto, 212 - __wsum sum) 213 - { 214 - return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum)); 215 - } 203 + #define csum_tcpudp_nofold csum_tcpudp_nofold 216 204 217 205 /* 218 206 * this routine is used for miscellaneous IP-like checksums, mainly ··· 276 286 277 287 return csum_fold(sum); 278 288 } 289 + 290 + #include <asm-generic/checksum.h> 291 + #endif /* CONFIG_GENERIC_CSUM */ 279 292 280 293 #endif /* _ASM_CHECKSUM_H */
+17 -17
arch/mips/include/asm/cmpxchg.h
··· 31 31 " sc %2, %1 \n" 32 32 " beqzl %2, 1b \n" 33 33 " .set mips0 \n" 34 - : "=&r" (retval), "=" GCC_OFF12_ASM() (*m), "=&r" (dummy) 35 - : GCC_OFF12_ASM() (*m), "Jr" (val) 34 + : "=&r" (retval), "=" GCC_OFF_SMALL_ASM() (*m), "=&r" (dummy) 35 + : GCC_OFF_SMALL_ASM() (*m), "Jr" (val) 36 36 : "memory"); 37 37 } else if (kernel_uses_llsc) { 38 38 unsigned long dummy; 39 39 40 40 do { 41 41 __asm__ __volatile__( 42 - " .set arch=r4000 \n" 42 + " .set "MIPS_ISA_ARCH_LEVEL" \n" 43 43 " ll %0, %3 # xchg_u32 \n" 44 44 " .set mips0 \n" 45 45 " move %2, %z4 \n" 46 - " .set arch=r4000 \n" 46 + " .set "MIPS_ISA_ARCH_LEVEL" \n" 47 47 " sc %2, %1 \n" 48 48 " .set mips0 \n" 49 - : "=&r" (retval), "=" GCC_OFF12_ASM() (*m), 49 + : "=&r" (retval), "=" GCC_OFF_SMALL_ASM() (*m), 50 50 "=&r" (dummy) 51 - : GCC_OFF12_ASM() (*m), "Jr" (val) 51 + : GCC_OFF_SMALL_ASM() (*m), "Jr" (val) 52 52 : "memory"); 53 53 } while (unlikely(!dummy)); 54 54 } else { ··· 82 82 " scd %2, %1 \n" 83 83 " beqzl %2, 1b \n" 84 84 " .set mips0 \n" 85 - : "=&r" (retval), "=" GCC_OFF12_ASM() (*m), "=&r" (dummy) 86 - : GCC_OFF12_ASM() (*m), "Jr" (val) 85 + : "=&r" (retval), "=" GCC_OFF_SMALL_ASM() (*m), "=&r" (dummy) 86 + : GCC_OFF_SMALL_ASM() (*m), "Jr" (val) 87 87 : "memory"); 88 88 } else if (kernel_uses_llsc) { 89 89 unsigned long dummy; 90 90 91 91 do { 92 92 __asm__ __volatile__( 93 - " .set arch=r4000 \n" 93 + " .set "MIPS_ISA_ARCH_LEVEL" \n" 94 94 " lld %0, %3 # xchg_u64 \n" 95 95 " move %2, %z4 \n" 96 96 " scd %2, %1 \n" 97 97 " .set mips0 \n" 98 - : "=&r" (retval), "=" GCC_OFF12_ASM() (*m), 98 + : "=&r" (retval), "=" GCC_OFF_SMALL_ASM() (*m), 99 99 "=&r" (dummy) 100 - : GCC_OFF12_ASM() (*m), "Jr" (val) 100 + : GCC_OFF_SMALL_ASM() (*m), "Jr" (val) 101 101 : "memory"); 102 102 } while (unlikely(!dummy)); 103 103 } else { ··· 158 158 " beqzl $1, 1b \n" \ 159 159 "2: \n" \ 160 160 " .set pop \n" \ 161 - : "=&r" (__ret), "=" GCC_OFF12_ASM() (*m) \ 162 - : GCC_OFF12_ASM() (*m), "Jr" (old), "Jr" (new) \ 161 + : "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m) \ 162 + : GCC_OFF_SMALL_ASM() (*m), "Jr" (old), "Jr" (new) \ 163 163 : "memory"); \ 164 164 } else if (kernel_uses_llsc) { \ 165 165 __asm__ __volatile__( \ 166 166 " .set push \n" \ 167 167 " .set noat \n" \ 168 - " .set arch=r4000 \n" \ 168 + " .set "MIPS_ISA_ARCH_LEVEL" \n" \ 169 169 "1: " ld " %0, %2 # __cmpxchg_asm \n" \ 170 170 " bne %0, %z3, 2f \n" \ 171 171 " .set mips0 \n" \ 172 172 " move $1, %z4 \n" \ 173 - " .set arch=r4000 \n" \ 173 + " .set "MIPS_ISA_ARCH_LEVEL" \n" \ 174 174 " " st " $1, %1 \n" \ 175 175 " beqz $1, 1b \n" \ 176 176 " .set pop \n" \ 177 177 "2: \n" \ 178 - : "=&r" (__ret), "=" GCC_OFF12_ASM() (*m) \ 179 - : GCC_OFF12_ASM() (*m), "Jr" (old), "Jr" (new) \ 178 + : "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m) \ 179 + : GCC_OFF_SMALL_ASM() (*m), "Jr" (old), "Jr" (new) \ 180 180 : "memory"); \ 181 181 } else { \ 182 182 unsigned long __flags; \
+21 -3
arch/mips/include/asm/compiler.h
··· 16 16 #define GCC_REG_ACCUM "accum" 17 17 #endif 18 18 19 + #ifdef CONFIG_CPU_MIPSR6 20 + /* All MIPS R6 toolchains support the ZC constrain */ 21 + #define GCC_OFF_SMALL_ASM() "ZC" 22 + #else 19 23 #ifndef CONFIG_CPU_MICROMIPS 20 - #define GCC_OFF12_ASM() "R" 24 + #define GCC_OFF_SMALL_ASM() "R" 21 25 #elif __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 9) 22 - #define GCC_OFF12_ASM() "ZC" 26 + #define GCC_OFF_SMALL_ASM() "ZC" 23 27 #else 24 28 #error "microMIPS compilation unsupported with GCC older than 4.9" 25 - #endif 29 + #endif /* CONFIG_CPU_MICROMIPS */ 30 + #endif /* CONFIG_CPU_MIPSR6 */ 31 + 32 + #ifdef CONFIG_CPU_MIPSR6 33 + #define MIPS_ISA_LEVEL "mips64r6" 34 + #define MIPS_ISA_ARCH_LEVEL MIPS_ISA_LEVEL 35 + #define MIPS_ISA_LEVEL_RAW mips64r6 36 + #define MIPS_ISA_ARCH_LEVEL_RAW MIPS_ISA_LEVEL_RAW 37 + #else 38 + /* MIPS64 is a superset of MIPS32 */ 39 + #define MIPS_ISA_LEVEL "mips64r2" 40 + #define MIPS_ISA_ARCH_LEVEL "arch=r4000" 41 + #define MIPS_ISA_LEVEL_RAW mips64r2 42 + #define MIPS_ISA_ARCH_LEVEL_RAW MIPS_ISA_LEVEL_RAW 43 + #endif /* CONFIG_CPU_MIPSR6 */ 26 44 27 45 #endif /* _ASM_COMPILER_H */
+23 -5
arch/mips/include/asm/cpu-features.h
··· 38 38 #ifndef cpu_has_maar 39 39 #define cpu_has_maar (cpu_data[0].options & MIPS_CPU_MAAR) 40 40 #endif 41 + #ifndef cpu_has_rw_llb 42 + #define cpu_has_rw_llb (cpu_data[0].options & MIPS_CPU_RW_LLB) 43 + #endif 41 44 42 45 /* 43 46 * For the moment we don't consider R6000 and R8000 so we can assume that ··· 174 171 #endif 175 172 #endif 176 173 174 + #ifndef cpu_has_mips_1 175 + # define cpu_has_mips_1 (!cpu_has_mips_r6) 176 + #endif 177 177 #ifndef cpu_has_mips_2 178 178 # define cpu_has_mips_2 (cpu_data[0].isa_level & MIPS_CPU_ISA_II) 179 179 #endif ··· 195 189 #ifndef cpu_has_mips32r2 196 190 # define cpu_has_mips32r2 (cpu_data[0].isa_level & MIPS_CPU_ISA_M32R2) 197 191 #endif 192 + #ifndef cpu_has_mips32r6 193 + # define cpu_has_mips32r6 (cpu_data[0].isa_level & MIPS_CPU_ISA_M32R6) 194 + #endif 198 195 #ifndef cpu_has_mips64r1 199 196 # define cpu_has_mips64r1 (cpu_data[0].isa_level & MIPS_CPU_ISA_M64R1) 200 197 #endif 201 198 #ifndef cpu_has_mips64r2 202 199 # define cpu_has_mips64r2 (cpu_data[0].isa_level & MIPS_CPU_ISA_M64R2) 200 + #endif 201 + #ifndef cpu_has_mips64r6 202 + # define cpu_has_mips64r6 (cpu_data[0].isa_level & MIPS_CPU_ISA_M64R6) 203 203 #endif 204 204 205 205 /* ··· 220 208 #define cpu_has_mips_4_5_r (cpu_has_mips_4 | cpu_has_mips_5_r) 221 209 #define cpu_has_mips_5_r (cpu_has_mips_5 | cpu_has_mips_r) 222 210 223 - #define cpu_has_mips_4_5_r2 (cpu_has_mips_4_5 | cpu_has_mips_r2) 211 + #define cpu_has_mips_4_5_r2_r6 (cpu_has_mips_4_5 | cpu_has_mips_r2 | \ 212 + cpu_has_mips_r6) 224 213 225 - #define cpu_has_mips32 (cpu_has_mips32r1 | cpu_has_mips32r2) 226 - #define cpu_has_mips64 (cpu_has_mips64r1 | cpu_has_mips64r2) 214 + #define cpu_has_mips32 (cpu_has_mips32r1 | cpu_has_mips32r2 | cpu_has_mips32r6) 215 + #define cpu_has_mips64 (cpu_has_mips64r1 | cpu_has_mips64r2 | cpu_has_mips64r6) 227 216 #define cpu_has_mips_r1 (cpu_has_mips32r1 | cpu_has_mips64r1) 228 217 #define cpu_has_mips_r2 (cpu_has_mips32r2 | cpu_has_mips64r2) 218 + #define cpu_has_mips_r6 (cpu_has_mips32r6 | cpu_has_mips64r6) 229 219 #define cpu_has_mips_r (cpu_has_mips32r1 | cpu_has_mips32r2 | \ 230 - cpu_has_mips64r1 | cpu_has_mips64r2) 220 + cpu_has_mips32r6 | cpu_has_mips64r1 | \ 221 + cpu_has_mips64r2 | cpu_has_mips64r6) 222 + 223 + /* MIPSR2 and MIPSR6 have a lot of similarities */ 224 + #define cpu_has_mips_r2_r6 (cpu_has_mips_r2 | cpu_has_mips_r6) 231 225 232 226 #ifndef cpu_has_mips_r2_exec_hazard 233 - #define cpu_has_mips_r2_exec_hazard cpu_has_mips_r2 227 + #define cpu_has_mips_r2_exec_hazard (cpu_has_mips_r2 | cpu_has_mips_r6) 234 228 #endif 235 229 236 230 /*
+5
arch/mips/include/asm/cpu-info.h
··· 84 84 * (shifted by _CACHE_SHIFT) 85 85 */ 86 86 unsigned int writecombine; 87 + /* 88 + * Simple counter to prevent enabling HTW in nested 89 + * htw_start/htw_stop calls 90 + */ 91 + unsigned int htw_seq; 87 92 } __attribute__((aligned(SMP_CACHE_BYTES))); 88 93 89 94 extern struct cpuinfo_mips cpu_data[];
+7
arch/mips/include/asm/cpu-type.h
··· 54 54 case CPU_M5150: 55 55 #endif 56 56 57 + #if defined(CONFIG_SYS_HAS_CPU_MIPS32_R2) || \ 58 + defined(CONFIG_SYS_HAS_CPU_MIPS32_R6) || \ 59 + defined(CONFIG_SYS_HAS_CPU_MIPS64_R2) || \ 60 + defined(CONFIG_SYS_HAS_CPU_MIPS64_R6) 61 + case CPU_QEMU_GENERIC: 62 + #endif 63 + 57 64 #ifdef CONFIG_SYS_HAS_CPU_MIPS64_R1 58 65 case CPU_5KC: 59 66 case CPU_5KE:
+9 -2
arch/mips/include/asm/cpu.h
··· 93 93 * These are the PRID's for when 23:16 == PRID_COMP_MIPS 94 94 */ 95 95 96 + #define PRID_IMP_QEMU_GENERIC 0x0000 96 97 #define PRID_IMP_4KC 0x8000 97 98 #define PRID_IMP_5KC 0x8100 98 99 #define PRID_IMP_20KC 0x8200 ··· 313 312 CPU_LOONGSON3, CPU_CAVIUM_OCTEON, CPU_CAVIUM_OCTEON_PLUS, 314 313 CPU_CAVIUM_OCTEON2, CPU_CAVIUM_OCTEON3, CPU_XLR, CPU_XLP, 315 314 315 + CPU_QEMU_GENERIC, 316 + 316 317 CPU_LAST 317 318 }; 318 319 ··· 332 329 #define MIPS_CPU_ISA_M32R2 0x00000020 333 330 #define MIPS_CPU_ISA_M64R1 0x00000040 334 331 #define MIPS_CPU_ISA_M64R2 0x00000080 332 + #define MIPS_CPU_ISA_M32R6 0x00000100 333 + #define MIPS_CPU_ISA_M64R6 0x00000200 335 334 336 335 #define MIPS_CPU_ISA_32BIT (MIPS_CPU_ISA_II | MIPS_CPU_ISA_M32R1 | \ 337 - MIPS_CPU_ISA_M32R2) 336 + MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M32R6) 338 337 #define MIPS_CPU_ISA_64BIT (MIPS_CPU_ISA_III | MIPS_CPU_ISA_IV | \ 339 - MIPS_CPU_ISA_V | MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2) 338 + MIPS_CPU_ISA_V | MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2 | \ 339 + MIPS_CPU_ISA_M64R6) 340 340 341 341 /* 342 342 * CPU Option encodings ··· 376 370 #define MIPS_CPU_RIXIEX 0x200000000ull /* CPU has unique exception codes for {Read, Execute}-Inhibit exceptions */ 377 371 #define MIPS_CPU_MAAR 0x400000000ull /* MAAR(I) registers are present */ 378 372 #define MIPS_CPU_FRE 0x800000000ull /* FRE & UFE bits implemented */ 373 + #define MIPS_CPU_RW_LLB 0x1000000000ull /* LLADDR/LLB writes are allowed */ 379 374 380 375 /* 381 376 * CPU ASE encodings
+2 -2
arch/mips/include/asm/edac.h
··· 26 26 " sc %0, %1 \n" 27 27 " beqz %0, 1b \n" 28 28 " .set mips0 \n" 29 - : "=&r" (temp), "=" GCC_OFF12_ASM() (*virt_addr) 30 - : GCC_OFF12_ASM() (*virt_addr)); 29 + : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*virt_addr) 30 + : GCC_OFF_SMALL_ASM() (*virt_addr)); 31 31 32 32 virt_addr++; 33 33 }
+6 -4
arch/mips/include/asm/elf.h
··· 417 417 struct arch_elf_state { 418 418 int fp_abi; 419 419 int interp_fp_abi; 420 - int overall_abi; 420 + int overall_fp_mode; 421 421 }; 422 422 423 + #define MIPS_ABI_FP_UNKNOWN (-1) /* Unknown FP ABI (kernel internal) */ 424 + 423 425 #define INIT_ARCH_ELF_STATE { \ 424 - .fp_abi = -1, \ 425 - .interp_fp_abi = -1, \ 426 - .overall_abi = -1, \ 426 + .fp_abi = MIPS_ABI_FP_UNKNOWN, \ 427 + .interp_fp_abi = MIPS_ABI_FP_UNKNOWN, \ 428 + .overall_fp_mode = -1, \ 427 429 } 428 430 429 431 extern int arch_elf_pt_proc(void *ehdr, void *phdr, struct file *elf,
+2 -1
arch/mips/include/asm/fpu.h
··· 68 68 goto fr_common; 69 69 70 70 case FPU_64BIT: 71 - #if !(defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_64BIT)) 71 + #if !(defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS32_R6) \ 72 + || defined(CONFIG_64BIT)) 72 73 /* we only have a 32-bit FPU */ 73 74 return SIGFPE; 74 75 #endif
+12 -12
arch/mips/include/asm/futex.h
··· 45 45 " "__UA_ADDR "\t2b, 4b \n" \ 46 46 " .previous \n" \ 47 47 : "=r" (ret), "=&r" (oldval), \ 48 - "=" GCC_OFF12_ASM() (*uaddr) \ 49 - : "0" (0), GCC_OFF12_ASM() (*uaddr), "Jr" (oparg), \ 48 + "=" GCC_OFF_SMALL_ASM() (*uaddr) \ 49 + : "0" (0), GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oparg), \ 50 50 "i" (-EFAULT) \ 51 51 : "memory"); \ 52 52 } else if (cpu_has_llsc) { \ 53 53 __asm__ __volatile__( \ 54 54 " .set push \n" \ 55 55 " .set noat \n" \ 56 - " .set arch=r4000 \n" \ 56 + " .set "MIPS_ISA_ARCH_LEVEL" \n" \ 57 57 "1: "user_ll("%1", "%4")" # __futex_atomic_op\n" \ 58 58 " .set mips0 \n" \ 59 59 " " insn " \n" \ 60 - " .set arch=r4000 \n" \ 60 + " .set "MIPS_ISA_ARCH_LEVEL" \n" \ 61 61 "2: "user_sc("$1", "%2")" \n" \ 62 62 " beqz $1, 1b \n" \ 63 63 __WEAK_LLSC_MB \ ··· 74 74 " "__UA_ADDR "\t2b, 4b \n" \ 75 75 " .previous \n" \ 76 76 : "=r" (ret), "=&r" (oldval), \ 77 - "=" GCC_OFF12_ASM() (*uaddr) \ 78 - : "0" (0), GCC_OFF12_ASM() (*uaddr), "Jr" (oparg), \ 77 + "=" GCC_OFF_SMALL_ASM() (*uaddr) \ 78 + : "0" (0), GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oparg), \ 79 79 "i" (-EFAULT) \ 80 80 : "memory"); \ 81 81 } else \ ··· 174 174 " "__UA_ADDR "\t1b, 4b \n" 175 175 " "__UA_ADDR "\t2b, 4b \n" 176 176 " .previous \n" 177 - : "+r" (ret), "=&r" (val), "=" GCC_OFF12_ASM() (*uaddr) 178 - : GCC_OFF12_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval), 177 + : "+r" (ret), "=&r" (val), "=" GCC_OFF_SMALL_ASM() (*uaddr) 178 + : GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval), 179 179 "i" (-EFAULT) 180 180 : "memory"); 181 181 } else if (cpu_has_llsc) { ··· 183 183 "# futex_atomic_cmpxchg_inatomic \n" 184 184 " .set push \n" 185 185 " .set noat \n" 186 - " .set arch=r4000 \n" 186 + " .set "MIPS_ISA_ARCH_LEVEL" \n" 187 187 "1: "user_ll("%1", "%3")" \n" 188 188 " bne %1, %z4, 3f \n" 189 189 " .set mips0 \n" 190 190 " move $1, %z5 \n" 191 - " .set arch=r4000 \n" 191 + " .set "MIPS_ISA_ARCH_LEVEL" \n" 192 192 "2: "user_sc("$1", "%2")" \n" 193 193 " beqz $1, 1b \n" 194 194 __WEAK_LLSC_MB ··· 203 203 " "__UA_ADDR "\t1b, 4b \n" 204 204 " "__UA_ADDR "\t2b, 4b \n" 205 205 " .previous \n" 206 - : "+r" (ret), "=&r" (val), "=" GCC_OFF12_ASM() (*uaddr) 207 - : GCC_OFF12_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval), 206 + : "+r" (ret), "=&r" (val), "=" GCC_OFF_SMALL_ASM() (*uaddr) 207 + : GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval), 208 208 "i" (-EFAULT) 209 209 : "memory"); 210 210 } else
-2
arch/mips/include/asm/gio_device.h
··· 25 25 26 26 int (*probe)(struct gio_device *, const struct gio_device_id *); 27 27 void (*remove)(struct gio_device *); 28 - int (*suspend)(struct gio_device *, pm_message_t); 29 - int (*resume)(struct gio_device *); 30 28 void (*shutdown)(struct gio_device *); 31 29 32 30 struct device_driver driver;
+5 -4
arch/mips/include/asm/hazards.h
··· 11 11 #define _ASM_HAZARDS_H 12 12 13 13 #include <linux/stringify.h> 14 + #include <asm/compiler.h> 14 15 15 16 #define ___ssnop \ 16 17 sll $0, $0, 1 ··· 22 21 /* 23 22 * TLB hazards 24 23 */ 25 - #if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_CPU_CAVIUM_OCTEON) 24 + #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) && !defined(CONFIG_CPU_CAVIUM_OCTEON) 26 25 27 26 /* 28 27 * MIPSR2 defines ehb for hazard avoidance ··· 59 58 unsigned long tmp; \ 60 59 \ 61 60 __asm__ __volatile__( \ 62 - " .set mips64r2 \n" \ 61 + " .set "MIPS_ISA_LEVEL" \n" \ 63 62 " dla %0, 1f \n" \ 64 63 " jr.hb %0 \n" \ 65 64 " .set mips0 \n" \ ··· 133 132 134 133 #define instruction_hazard() \ 135 134 do { \ 136 - if (cpu_has_mips_r2) \ 135 + if (cpu_has_mips_r2_r6) \ 137 136 __instruction_hazard(); \ 138 137 } while (0) 139 138 ··· 241 240 242 241 #define __disable_fpu_hazard 243 242 244 - #elif defined(CONFIG_CPU_MIPSR2) 243 + #elif defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) 245 244 246 245 #define __enable_fpu_hazard \ 247 246 ___ehb
+4 -3
arch/mips/include/asm/irqflags.h
··· 15 15 16 16 #include <linux/compiler.h> 17 17 #include <linux/stringify.h> 18 + #include <asm/compiler.h> 18 19 #include <asm/hazards.h> 19 20 20 - #ifdef CONFIG_CPU_MIPSR2 21 + #if defined(CONFIG_CPU_MIPSR2) || defined (CONFIG_CPU_MIPSR6) 21 22 22 23 static inline void arch_local_irq_disable(void) 23 24 { ··· 119 118 unsigned long arch_local_irq_save(void); 120 119 void arch_local_irq_restore(unsigned long flags); 121 120 void __arch_local_irq_restore(unsigned long flags); 122 - #endif /* CONFIG_CPU_MIPSR2 */ 121 + #endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */ 123 122 124 123 static inline void arch_local_irq_enable(void) 125 124 { ··· 127 126 " .set push \n" 128 127 " .set reorder \n" 129 128 " .set noat \n" 130 - #if defined(CONFIG_CPU_MIPSR2) 129 + #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) 131 130 " ei \n" 132 131 #else 133 132 " mfc0 $1,$12 \n"
+3 -2
arch/mips/include/asm/local.h
··· 5 5 #include <linux/bitops.h> 6 6 #include <linux/atomic.h> 7 7 #include <asm/cmpxchg.h> 8 + #include <asm/compiler.h> 8 9 #include <asm/war.h> 9 10 10 11 typedef struct ··· 48 47 unsigned long temp; 49 48 50 49 __asm__ __volatile__( 51 - " .set arch=r4000 \n" 50 + " .set "MIPS_ISA_ARCH_LEVEL" \n" 52 51 "1:" __LL "%1, %2 # local_add_return \n" 53 52 " addu %0, %1, %3 \n" 54 53 __SC "%0, %2 \n" ··· 93 92 unsigned long temp; 94 93 95 94 __asm__ __volatile__( 96 - " .set arch=r4000 \n" 95 + " .set "MIPS_ISA_ARCH_LEVEL" \n" 97 96 "1:" __LL "%1, %2 # local_sub_return \n" 98 97 " subu %0, %1, %3 \n" 99 98 __SC "%0, %2 \n"
+41 -23
arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h
··· 8 8 #ifndef __ASM_MACH_CAVIUM_OCTEON_KERNEL_ENTRY_H 9 9 #define __ASM_MACH_CAVIUM_OCTEON_KERNEL_ENTRY_H 10 10 11 - 12 - #define CP0_CYCLE_COUNTER $9, 6 13 11 #define CP0_CVMCTL_REG $9, 7 14 12 #define CP0_CVMMEMCTL_REG $11,7 15 13 #define CP0_PRID_REG $15, 0 14 + #define CP0_DCACHE_ERR_REG $27, 1 16 15 #define CP0_PRID_OCTEON_PASS1 0x000d0000 17 16 #define CP0_PRID_OCTEON_CN30XX 0x000d0200 18 17 ··· 37 38 # Needed for octeon specific memcpy 38 39 or v0, v0, 0x5001 39 40 xor v0, v0, 0x1001 40 - # Read the processor ID register 41 - mfc0 v1, CP0_PRID_REG 42 - # Disable instruction prefetching (Octeon Pass1 errata) 43 - or v0, v0, 0x2000 44 - # Skip reenable of prefetching for Octeon Pass1 45 - beq v1, CP0_PRID_OCTEON_PASS1, skip 46 - nop 47 - # Reenable instruction prefetching, not on Pass1 48 - xor v0, v0, 0x2000 49 - # Strip off pass number off of processor id 50 - srl v1, 8 51 - sll v1, 8 52 - # CN30XX needs some extra stuff turned off for better performance 53 - bne v1, CP0_PRID_OCTEON_CN30XX, skip 54 - nop 55 - # CN30XX Use random Icache replacement 56 - or v0, v0, 0x400 57 - # CN30XX Disable instruction prefetching 58 - or v0, v0, 0x2000 59 - skip: 60 41 # First clear off CvmCtl[IPPCI] bit and move the performance 61 42 # counters interrupt to IRQ 6 62 - li v1, ~(7 << 7) 43 + dli v1, ~(7 << 7) 63 44 and v0, v0, v1 64 45 ori v0, v0, (6 << 7) 46 + 47 + mfc0 v1, CP0_PRID_REG 48 + and t1, v1, 0xfff8 49 + xor t1, t1, 0x9000 # 63-P1 50 + beqz t1, 4f 51 + and t1, v1, 0xfff8 52 + xor t1, t1, 0x9008 # 63-P2 53 + beqz t1, 4f 54 + and t1, v1, 0xfff8 55 + xor t1, t1, 0x9100 # 68-P1 56 + beqz t1, 4f 57 + and t1, v1, 0xff00 58 + xor t1, t1, 0x9200 # 66-PX 59 + bnez t1, 5f # Skip WAR for others. 60 + and t1, v1, 0x00ff 61 + slti t1, t1, 2 # 66-P1.2 and later good. 62 + beqz t1, 5f 63 + 64 + 4: # core-16057 work around 65 + or v0, v0, 0x2000 # Set IPREF bit. 66 + 67 + 5: # No core-16057 work around 65 68 # Write the cavium control register 66 69 dmtc0 v0, CP0_CVMCTL_REG 67 70 sync 68 71 # Flush dcache after config change 69 72 cache 9, 0($0) 73 + # Zero all of CVMSEG to make sure parity is correct 74 + dli v0, CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE 75 + dsll v0, 7 76 + beqz v0, 2f 77 + 1: dsubu v0, 8 78 + sd $0, -32768(v0) 79 + bnez v0, 1b 80 + 2: 81 + mfc0 v0, CP0_PRID_REG 82 + bbit0 v0, 15, 1f 83 + # OCTEON II or better have bit 15 set. Clear the error bits. 84 + and t1, v0, 0xff00 85 + dli v0, 0x9500 86 + bge t1, v0, 1f # OCTEON III has no DCACHE_ERR_REG COP0 87 + dli v0, 0x27 88 + dmtc0 v0, CP0_DCACHE_ERR_REG 89 + 1: 70 90 # Get my core id 71 91 rdhwr v0, $0 72 92 # Jump the master to kernel_entry
+3
arch/mips/include/asm/mach-cavium-octeon/war.h
··· 22 22 #define R10000_LLSC_WAR 0 23 23 #define MIPS34K_MISSED_ITLB_WAR 0 24 24 25 + #define CAVIUM_OCTEON_DCACHE_PREFETCH_WAR \ 26 + OCTEON_IS_MODEL(OCTEON_CN6XXX) 27 + 25 28 #endif /* __ASM_MIPS_MACH_CAVIUM_OCTEON_WAR_H */
+12 -12
arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h
··· 85 85 " "__beqz"%0, 1b \n" 86 86 " nop \n" 87 87 " .set pop \n" 88 - : "=&r" (temp), "=" GCC_OFF12_ASM() (*addr) 89 - : "ir" (~mask), "ir" (value), GCC_OFF12_ASM() (*addr)); 88 + : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*addr) 89 + : "ir" (~mask), "ir" (value), GCC_OFF_SMALL_ASM() (*addr)); 90 90 } 91 91 92 92 /* ··· 106 106 " "__beqz"%0, 1b \n" 107 107 " nop \n" 108 108 " .set pop \n" 109 - : "=&r" (temp), "=" GCC_OFF12_ASM() (*addr) 110 - : "ir" (mask), GCC_OFF12_ASM() (*addr)); 109 + : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*addr) 110 + : "ir" (mask), GCC_OFF_SMALL_ASM() (*addr)); 111 111 } 112 112 113 113 /* ··· 127 127 " "__beqz"%0, 1b \n" 128 128 " nop \n" 129 129 " .set pop \n" 130 - : "=&r" (temp), "=" GCC_OFF12_ASM() (*addr) 131 - : "ir" (~mask), GCC_OFF12_ASM() (*addr)); 130 + : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*addr) 131 + : "ir" (~mask), GCC_OFF_SMALL_ASM() (*addr)); 132 132 } 133 133 134 134 /* ··· 148 148 " "__beqz"%0, 1b \n" 149 149 " nop \n" 150 150 " .set pop \n" 151 - : "=&r" (temp), "=" GCC_OFF12_ASM() (*addr) 152 - : "ir" (mask), GCC_OFF12_ASM() (*addr)); 151 + : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*addr) 152 + : "ir" (mask), GCC_OFF_SMALL_ASM() (*addr)); 153 153 } 154 154 155 155 /* ··· 220 220 " .set arch=r4000 \n" \ 221 221 "1: ll %0, %1 #custom_read_reg32 \n" \ 222 222 " .set pop \n" \ 223 - : "=r" (tmp), "=" GCC_OFF12_ASM() (*address) \ 224 - : GCC_OFF12_ASM() (*address)) 223 + : "=r" (tmp), "=" GCC_OFF_SMALL_ASM() (*address) \ 224 + : GCC_OFF_SMALL_ASM() (*address)) 225 225 226 226 #define custom_write_reg32(address, tmp) \ 227 227 __asm__ __volatile__( \ ··· 231 231 " "__beqz"%0, 1b \n" \ 232 232 " nop \n" \ 233 233 " .set pop \n" \ 234 - : "=&r" (tmp), "=" GCC_OFF12_ASM() (*address) \ 235 - : "0" (tmp), GCC_OFF12_ASM() (*address)) 234 + : "=&r" (tmp), "=" GCC_OFF_SMALL_ASM() (*address) \ 235 + : "0" (tmp), GCC_OFF_SMALL_ASM() (*address)) 236 236 237 237 #endif /* __ASM_REGOPS_H__ */
+96
arch/mips/include/asm/mips-r2-to-r6-emul.h
··· 1 + /* 2 + * This file is subject to the terms and conditions of the GNU General Public 3 + * License. See the file "COPYING" in the main directory of this archive 4 + * for more details. 5 + * 6 + * Copyright (c) 2014 Imagination Technologies Ltd. 7 + * Author: Markos Chandras <markos.chandras@imgtec.com> 8 + */ 9 + 10 + #ifndef __ASM_MIPS_R2_TO_R6_EMUL_H 11 + #define __ASM_MIPS_R2_TO_R6_EMUL_H 12 + 13 + struct mips_r2_emulator_stats { 14 + u64 movs; 15 + u64 hilo; 16 + u64 muls; 17 + u64 divs; 18 + u64 dsps; 19 + u64 bops; 20 + u64 traps; 21 + u64 fpus; 22 + u64 loads; 23 + u64 stores; 24 + u64 llsc; 25 + u64 dsemul; 26 + }; 27 + 28 + struct mips_r2br_emulator_stats { 29 + u64 jrs; 30 + u64 bltzl; 31 + u64 bgezl; 32 + u64 bltzll; 33 + u64 bgezll; 34 + u64 bltzall; 35 + u64 bgezall; 36 + u64 bltzal; 37 + u64 bgezal; 38 + u64 beql; 39 + u64 bnel; 40 + u64 blezl; 41 + u64 bgtzl; 42 + }; 43 + 44 + #ifdef CONFIG_DEBUG_FS 45 + 46 + #define MIPS_R2_STATS(M) \ 47 + do { \ 48 + u32 nir; \ 49 + int err; \ 50 + \ 51 + preempt_disable(); \ 52 + __this_cpu_inc(mipsr2emustats.M); \ 53 + err = __get_user(nir, (u32 __user *)regs->cp0_epc); \ 54 + if (!err) { \ 55 + if (nir == BREAK_MATH) \ 56 + __this_cpu_inc(mipsr2bdemustats.M); \ 57 + } \ 58 + preempt_enable(); \ 59 + } while (0) 60 + 61 + #define MIPS_R2BR_STATS(M) \ 62 + do { \ 63 + preempt_disable(); \ 64 + __this_cpu_inc(mipsr2bremustats.M); \ 65 + preempt_enable(); \ 66 + } while (0) 67 + 68 + #else 69 + 70 + #define MIPS_R2_STATS(M) do { } while (0) 71 + #define MIPS_R2BR_STATS(M) do { } while (0) 72 + 73 + #endif /* CONFIG_DEBUG_FS */ 74 + 75 + struct r2_decoder_table { 76 + u32 mask; 77 + u32 code; 78 + int (*func)(struct pt_regs *regs, u32 inst); 79 + }; 80 + 81 + 82 + extern void do_trap_or_bp(struct pt_regs *regs, unsigned int code, 83 + const char *str); 84 + 85 + #ifndef CONFIG_MIPSR2_TO_R6_EMULATOR 86 + static int mipsr2_emulation; 87 + static __maybe_unused int mipsr2_decoder(struct pt_regs *regs, u32 inst) { return 0; }; 88 + #else 89 + /* MIPS R2 Emulator ON/OFF */ 90 + extern int mipsr2_emulation; 91 + extern int mipsr2_decoder(struct pt_regs *regs, u32 inst); 92 + #endif /* CONFIG_MIPSR2_TO_R6_EMULATOR */ 93 + 94 + #define NO_R6EMU (cpu_has_mips_r6 && !mipsr2_emulation) 95 + 96 + #endif /* __ASM_MIPS_R2_TO_R6_EMUL_H */
+4
arch/mips/include/asm/mipsregs.h
··· 653 653 #define MIPS_CONF5_NF (_ULCAST_(1) << 0) 654 654 #define MIPS_CONF5_UFR (_ULCAST_(1) << 2) 655 655 #define MIPS_CONF5_MRP (_ULCAST_(1) << 3) 656 + #define MIPS_CONF5_LLB (_ULCAST_(1) << 4) 656 657 #define MIPS_CONF5_MVH (_ULCAST_(1) << 5) 657 658 #define MIPS_CONF5_FRE (_ULCAST_(1) << 8) 658 659 #define MIPS_CONF5_UFE (_ULCAST_(1) << 9) ··· 1128 1127 #define write_c0_config6(val) __write_32bit_c0_register($16, 6, val) 1129 1128 #define write_c0_config7(val) __write_32bit_c0_register($16, 7, val) 1130 1129 1130 + #define read_c0_lladdr() __read_ulong_c0_register($17, 0) 1131 + #define write_c0_lladdr(val) __write_ulong_c0_register($17, 0, val) 1131 1132 #define read_c0_maar() __read_ulong_c0_register($17, 1) 1132 1133 #define write_c0_maar(val) __write_ulong_c0_register($17, 1, val) 1133 1134 #define read_c0_maari() __read_32bit_c0_register($17, 2) ··· 1912 1909 __BUILD_SET_C0(intcontrol) 1913 1910 __BUILD_SET_C0(intctl) 1914 1911 __BUILD_SET_C0(srsmap) 1912 + __BUILD_SET_C0(pagegrain) 1915 1913 __BUILD_SET_C0(brcm_config_0) 1916 1914 __BUILD_SET_C0(brcm_bus_pll) 1917 1915 __BUILD_SET_C0(brcm_reset)
+3
arch/mips/include/asm/mmu.h
··· 1 1 #ifndef __ASM_MMU_H 2 2 #define __ASM_MMU_H 3 3 4 + #include <linux/atomic.h> 5 + 4 6 typedef struct { 5 7 unsigned long asid[NR_CPUS]; 6 8 void *vdso; 9 + atomic_t fp_mode_switching; 7 10 } mm_context_t; 8 11 9 12 #endif /* __ASM_MMU_H */
+8 -1
arch/mips/include/asm/mmu_context.h
··· 25 25 if (cpu_has_htw) { \ 26 26 write_c0_pwbase(pgd); \ 27 27 back_to_back_c0_hazard(); \ 28 - htw_reset(); \ 29 28 } \ 30 29 } while (0) 31 30 ··· 131 132 for_each_possible_cpu(i) 132 133 cpu_context(i, mm) = 0; 133 134 135 + atomic_set(&mm->context.fp_mode_switching, 0); 136 + 134 137 return 0; 135 138 } 136 139 ··· 143 142 unsigned long flags; 144 143 local_irq_save(flags); 145 144 145 + htw_stop(); 146 146 /* Check if our ASID is of an older version and thus invalid */ 147 147 if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK) 148 148 get_new_mmu_context(next, cpu); ··· 156 154 */ 157 155 cpumask_clear_cpu(cpu, mm_cpumask(prev)); 158 156 cpumask_set_cpu(cpu, mm_cpumask(next)); 157 + htw_start(); 159 158 160 159 local_irq_restore(flags); 161 160 } ··· 183 180 184 181 local_irq_save(flags); 185 182 183 + htw_stop(); 186 184 /* Unconditionally get a new ASID. */ 187 185 get_new_mmu_context(next, cpu); 188 186 ··· 193 189 /* mark mmu ownership change */ 194 190 cpumask_clear_cpu(cpu, mm_cpumask(prev)); 195 191 cpumask_set_cpu(cpu, mm_cpumask(next)); 192 + htw_start(); 196 193 197 194 local_irq_restore(flags); 198 195 } ··· 208 203 unsigned long flags; 209 204 210 205 local_irq_save(flags); 206 + htw_stop(); 211 207 212 208 if (cpumask_test_cpu(cpu, mm_cpumask(mm))) { 213 209 get_new_mmu_context(mm, cpu); ··· 217 211 /* will get a new context next time */ 218 212 cpu_context(cpu, mm) = 0; 219 213 } 214 + htw_start(); 220 215 local_irq_restore(flags); 221 216 } 222 217
+4
arch/mips/include/asm/module.h
··· 88 88 #define MODULE_PROC_FAMILY "MIPS32_R1 " 89 89 #elif defined CONFIG_CPU_MIPS32_R2 90 90 #define MODULE_PROC_FAMILY "MIPS32_R2 " 91 + #elif defined CONFIG_CPU_MIPS32_R6 92 + #define MODULE_PROC_FAMILY "MIPS32_R6 " 91 93 #elif defined CONFIG_CPU_MIPS64_R1 92 94 #define MODULE_PROC_FAMILY "MIPS64_R1 " 93 95 #elif defined CONFIG_CPU_MIPS64_R2 94 96 #define MODULE_PROC_FAMILY "MIPS64_R2 " 97 + #elif defined CONFIG_CPU_MIPS64_R6 98 + #define MODULE_PROC_FAMILY "MIPS64_R6 " 95 99 #elif defined CONFIG_CPU_R3000 96 100 #define MODULE_PROC_FAMILY "R3000 " 97 101 #elif defined CONFIG_CPU_TX39XX
+1 -1
arch/mips/include/asm/octeon/cvmx-cmd-queue.h
··· 275 275 " lbu %[ticket], %[now_serving]\n" 276 276 "4:\n" 277 277 ".set pop\n" : 278 - [ticket_ptr] "=" GCC_OFF12_ASM()(__cvmx_cmd_queue_state_ptr->ticket[__cvmx_cmd_queue_get_index(queue_id)]), 278 + [ticket_ptr] "=" GCC_OFF_SMALL_ASM()(__cvmx_cmd_queue_state_ptr->ticket[__cvmx_cmd_queue_get_index(queue_id)]), 279 279 [now_serving] "=m"(qptr->now_serving), [ticket] "=r"(tmp), 280 280 [my_ticket] "=r"(my_ticket) 281 281 );
+306
arch/mips/include/asm/octeon/cvmx-rst-defs.h
··· 1 + /***********************license start*************** 2 + * Author: Cavium Inc. 3 + * 4 + * Contact: support@cavium.com 5 + * This file is part of the OCTEON SDK 6 + * 7 + * Copyright (c) 2003-2014 Cavium Inc. 8 + * 9 + * This file is free software; you can redistribute it and/or modify 10 + * it under the terms of the GNU General Public License, Version 2, as 11 + * published by the Free Software Foundation. 12 + * 13 + * This file is distributed in the hope that it will be useful, but 14 + * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty 15 + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or 16 + * NONINFRINGEMENT. See the GNU General Public License for more 17 + * details. 18 + * 19 + * You should have received a copy of the GNU General Public License 20 + * along with this file; if not, write to the Free Software 21 + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 22 + * or visit http://www.gnu.org/licenses/. 23 + * 24 + * This file may also be available under a different license from Cavium. 25 + * Contact Cavium Inc. for more information 26 + ***********************license end**************************************/ 27 + 28 + #ifndef __CVMX_RST_DEFS_H__ 29 + #define __CVMX_RST_DEFS_H__ 30 + 31 + #define CVMX_RST_BOOT (CVMX_ADD_IO_SEG(0x0001180006001600ull)) 32 + #define CVMX_RST_CFG (CVMX_ADD_IO_SEG(0x0001180006001610ull)) 33 + #define CVMX_RST_CKILL (CVMX_ADD_IO_SEG(0x0001180006001638ull)) 34 + #define CVMX_RST_CTLX(offset) (CVMX_ADD_IO_SEG(0x0001180006001640ull) + ((offset) & 3) * 8) 35 + #define CVMX_RST_DELAY (CVMX_ADD_IO_SEG(0x0001180006001608ull)) 36 + #define CVMX_RST_ECO (CVMX_ADD_IO_SEG(0x00011800060017B8ull)) 37 + #define CVMX_RST_INT (CVMX_ADD_IO_SEG(0x0001180006001628ull)) 38 + #define CVMX_RST_OCX (CVMX_ADD_IO_SEG(0x0001180006001618ull)) 39 + #define CVMX_RST_POWER_DBG (CVMX_ADD_IO_SEG(0x0001180006001708ull)) 40 + #define CVMX_RST_PP_POWER (CVMX_ADD_IO_SEG(0x0001180006001700ull)) 41 + #define CVMX_RST_SOFT_PRSTX(offset) (CVMX_ADD_IO_SEG(0x00011800060016C0ull) + ((offset) & 3) * 8) 42 + #define CVMX_RST_SOFT_RST (CVMX_ADD_IO_SEG(0x0001180006001680ull)) 43 + 44 + union cvmx_rst_boot { 45 + uint64_t u64; 46 + struct cvmx_rst_boot_s { 47 + #ifdef __BIG_ENDIAN_BITFIELD 48 + uint64_t chipkill:1; 49 + uint64_t jtcsrdis:1; 50 + uint64_t ejtagdis:1; 51 + uint64_t romen:1; 52 + uint64_t ckill_ppdis:1; 53 + uint64_t jt_tstmode:1; 54 + uint64_t vrm_err:1; 55 + uint64_t reserved_37_56:20; 56 + uint64_t c_mul:7; 57 + uint64_t pnr_mul:6; 58 + uint64_t reserved_21_23:3; 59 + uint64_t lboot_oci:3; 60 + uint64_t lboot_ext:6; 61 + uint64_t lboot:10; 62 + uint64_t rboot:1; 63 + uint64_t rboot_pin:1; 64 + #else 65 + uint64_t rboot_pin:1; 66 + uint64_t rboot:1; 67 + uint64_t lboot:10; 68 + uint64_t lboot_ext:6; 69 + uint64_t lboot_oci:3; 70 + uint64_t reserved_21_23:3; 71 + uint64_t pnr_mul:6; 72 + uint64_t c_mul:7; 73 + uint64_t reserved_37_56:20; 74 + uint64_t vrm_err:1; 75 + uint64_t jt_tstmode:1; 76 + uint64_t ckill_ppdis:1; 77 + uint64_t romen:1; 78 + uint64_t ejtagdis:1; 79 + uint64_t jtcsrdis:1; 80 + uint64_t chipkill:1; 81 + #endif 82 + } s; 83 + struct cvmx_rst_boot_s cn70xx; 84 + struct cvmx_rst_boot_s cn70xxp1; 85 + struct cvmx_rst_boot_s cn78xx; 86 + }; 87 + 88 + union cvmx_rst_cfg { 89 + uint64_t u64; 90 + struct cvmx_rst_cfg_s { 91 + #ifdef __BIG_ENDIAN_BITFIELD 92 + uint64_t bist_delay:58; 93 + uint64_t reserved_3_5:3; 94 + uint64_t cntl_clr_bist:1; 95 + uint64_t warm_clr_bist:1; 96 + uint64_t soft_clr_bist:1; 97 + #else 98 + uint64_t soft_clr_bist:1; 99 + uint64_t warm_clr_bist:1; 100 + uint64_t cntl_clr_bist:1; 101 + uint64_t reserved_3_5:3; 102 + uint64_t bist_delay:58; 103 + #endif 104 + } s; 105 + struct cvmx_rst_cfg_s cn70xx; 106 + struct cvmx_rst_cfg_s cn70xxp1; 107 + struct cvmx_rst_cfg_s cn78xx; 108 + }; 109 + 110 + union cvmx_rst_ckill { 111 + uint64_t u64; 112 + struct cvmx_rst_ckill_s { 113 + #ifdef __BIG_ENDIAN_BITFIELD 114 + uint64_t reserved_47_63:17; 115 + uint64_t timer:47; 116 + #else 117 + uint64_t timer:47; 118 + uint64_t reserved_47_63:17; 119 + #endif 120 + } s; 121 + struct cvmx_rst_ckill_s cn70xx; 122 + struct cvmx_rst_ckill_s cn70xxp1; 123 + struct cvmx_rst_ckill_s cn78xx; 124 + }; 125 + 126 + union cvmx_rst_ctlx { 127 + uint64_t u64; 128 + struct cvmx_rst_ctlx_s { 129 + #ifdef __BIG_ENDIAN_BITFIELD 130 + uint64_t reserved_10_63:54; 131 + uint64_t prst_link:1; 132 + uint64_t rst_done:1; 133 + uint64_t rst_link:1; 134 + uint64_t host_mode:1; 135 + uint64_t reserved_4_5:2; 136 + uint64_t rst_drv:1; 137 + uint64_t rst_rcv:1; 138 + uint64_t rst_chip:1; 139 + uint64_t rst_val:1; 140 + #else 141 + uint64_t rst_val:1; 142 + uint64_t rst_chip:1; 143 + uint64_t rst_rcv:1; 144 + uint64_t rst_drv:1; 145 + uint64_t reserved_4_5:2; 146 + uint64_t host_mode:1; 147 + uint64_t rst_link:1; 148 + uint64_t rst_done:1; 149 + uint64_t prst_link:1; 150 + uint64_t reserved_10_63:54; 151 + #endif 152 + } s; 153 + struct cvmx_rst_ctlx_s cn70xx; 154 + struct cvmx_rst_ctlx_s cn70xxp1; 155 + struct cvmx_rst_ctlx_s cn78xx; 156 + }; 157 + 158 + union cvmx_rst_delay { 159 + uint64_t u64; 160 + struct cvmx_rst_delay_s { 161 + #ifdef __BIG_ENDIAN_BITFIELD 162 + uint64_t reserved_32_63:32; 163 + uint64_t warm_rst_dly:16; 164 + uint64_t soft_rst_dly:16; 165 + #else 166 + uint64_t soft_rst_dly:16; 167 + uint64_t warm_rst_dly:16; 168 + uint64_t reserved_32_63:32; 169 + #endif 170 + } s; 171 + struct cvmx_rst_delay_s cn70xx; 172 + struct cvmx_rst_delay_s cn70xxp1; 173 + struct cvmx_rst_delay_s cn78xx; 174 + }; 175 + 176 + union cvmx_rst_eco { 177 + uint64_t u64; 178 + struct cvmx_rst_eco_s { 179 + #ifdef __BIG_ENDIAN_BITFIELD 180 + uint64_t reserved_32_63:32; 181 + uint64_t eco_rw:32; 182 + #else 183 + uint64_t eco_rw:32; 184 + uint64_t reserved_32_63:32; 185 + #endif 186 + } s; 187 + struct cvmx_rst_eco_s cn78xx; 188 + }; 189 + 190 + union cvmx_rst_int { 191 + uint64_t u64; 192 + struct cvmx_rst_int_s { 193 + #ifdef __BIG_ENDIAN_BITFIELD 194 + uint64_t reserved_12_63:52; 195 + uint64_t perst:4; 196 + uint64_t reserved_4_7:4; 197 + uint64_t rst_link:4; 198 + #else 199 + uint64_t rst_link:4; 200 + uint64_t reserved_4_7:4; 201 + uint64_t perst:4; 202 + uint64_t reserved_12_63:52; 203 + #endif 204 + } s; 205 + struct cvmx_rst_int_cn70xx { 206 + #ifdef __BIG_ENDIAN_BITFIELD 207 + uint64_t reserved_11_63:53; 208 + uint64_t perst:3; 209 + uint64_t reserved_3_7:5; 210 + uint64_t rst_link:3; 211 + #else 212 + uint64_t rst_link:3; 213 + uint64_t reserved_3_7:5; 214 + uint64_t perst:3; 215 + uint64_t reserved_11_63:53; 216 + #endif 217 + } cn70xx; 218 + struct cvmx_rst_int_cn70xx cn70xxp1; 219 + struct cvmx_rst_int_s cn78xx; 220 + }; 221 + 222 + union cvmx_rst_ocx { 223 + uint64_t u64; 224 + struct cvmx_rst_ocx_s { 225 + #ifdef __BIG_ENDIAN_BITFIELD 226 + uint64_t reserved_3_63:61; 227 + uint64_t rst_link:3; 228 + #else 229 + uint64_t rst_link:3; 230 + uint64_t reserved_3_63:61; 231 + #endif 232 + } s; 233 + struct cvmx_rst_ocx_s cn78xx; 234 + }; 235 + 236 + union cvmx_rst_power_dbg { 237 + uint64_t u64; 238 + struct cvmx_rst_power_dbg_s { 239 + #ifdef __BIG_ENDIAN_BITFIELD 240 + uint64_t reserved_3_63:61; 241 + uint64_t str:3; 242 + #else 243 + uint64_t str:3; 244 + uint64_t reserved_3_63:61; 245 + #endif 246 + } s; 247 + struct cvmx_rst_power_dbg_s cn78xx; 248 + }; 249 + 250 + union cvmx_rst_pp_power { 251 + uint64_t u64; 252 + struct cvmx_rst_pp_power_s { 253 + #ifdef __BIG_ENDIAN_BITFIELD 254 + uint64_t reserved_48_63:16; 255 + uint64_t gate:48; 256 + #else 257 + uint64_t gate:48; 258 + uint64_t reserved_48_63:16; 259 + #endif 260 + } s; 261 + struct cvmx_rst_pp_power_cn70xx { 262 + #ifdef __BIG_ENDIAN_BITFIELD 263 + uint64_t reserved_4_63:60; 264 + uint64_t gate:4; 265 + #else 266 + uint64_t gate:4; 267 + uint64_t reserved_4_63:60; 268 + #endif 269 + } cn70xx; 270 + struct cvmx_rst_pp_power_cn70xx cn70xxp1; 271 + struct cvmx_rst_pp_power_s cn78xx; 272 + }; 273 + 274 + union cvmx_rst_soft_prstx { 275 + uint64_t u64; 276 + struct cvmx_rst_soft_prstx_s { 277 + #ifdef __BIG_ENDIAN_BITFIELD 278 + uint64_t reserved_1_63:63; 279 + uint64_t soft_prst:1; 280 + #else 281 + uint64_t soft_prst:1; 282 + uint64_t reserved_1_63:63; 283 + #endif 284 + } s; 285 + struct cvmx_rst_soft_prstx_s cn70xx; 286 + struct cvmx_rst_soft_prstx_s cn70xxp1; 287 + struct cvmx_rst_soft_prstx_s cn78xx; 288 + }; 289 + 290 + union cvmx_rst_soft_rst { 291 + uint64_t u64; 292 + struct cvmx_rst_soft_rst_s { 293 + #ifdef __BIG_ENDIAN_BITFIELD 294 + uint64_t reserved_1_63:63; 295 + uint64_t soft_rst:1; 296 + #else 297 + uint64_t soft_rst:1; 298 + uint64_t reserved_1_63:63; 299 + #endif 300 + } s; 301 + struct cvmx_rst_soft_rst_s cn70xx; 302 + struct cvmx_rst_soft_rst_s cn70xxp1; 303 + struct cvmx_rst_soft_rst_s cn78xx; 304 + }; 305 + 306 + #endif
+85 -22
arch/mips/include/asm/octeon/octeon-model.h
··· 45 45 */ 46 46 47 47 #define OCTEON_FAMILY_MASK 0x00ffff00 48 + #define OCTEON_PRID_MASK 0x00ffffff 48 49 49 50 /* Flag bits in top byte */ 50 51 /* Ignores revision in model checks */ ··· 64 63 #define OM_MATCH_6XXX_FAMILY_MODELS 0x40000000 65 64 /* Match all cnf7XXX Octeon models. */ 66 65 #define OM_MATCH_F7XXX_FAMILY_MODELS 0x80000000 66 + /* Match all cn7XXX Octeon models. */ 67 + #define OM_MATCH_7XXX_FAMILY_MODELS 0x10000000 68 + #define OM_MATCH_FAMILY_MODELS (OM_MATCH_5XXX_FAMILY_MODELS | \ 69 + OM_MATCH_6XXX_FAMILY_MODELS | \ 70 + OM_MATCH_F7XXX_FAMILY_MODELS | \ 71 + OM_MATCH_7XXX_FAMILY_MODELS) 72 + /* 73 + * CN7XXX models with new revision encoding 74 + */ 75 + 76 + #define OCTEON_CN73XX_PASS1_0 0x000d9700 77 + #define OCTEON_CN73XX (OCTEON_CN73XX_PASS1_0 | OM_IGNORE_REVISION) 78 + #define OCTEON_CN73XX_PASS1_X (OCTEON_CN73XX_PASS1_0 | \ 79 + OM_IGNORE_MINOR_REVISION) 80 + 81 + #define OCTEON_CN70XX_PASS1_0 0x000d9600 82 + #define OCTEON_CN70XX_PASS1_1 0x000d9601 83 + #define OCTEON_CN70XX_PASS1_2 0x000d9602 84 + 85 + #define OCTEON_CN70XX_PASS2_0 0x000d9608 86 + 87 + #define OCTEON_CN70XX (OCTEON_CN70XX_PASS1_0 | OM_IGNORE_REVISION) 88 + #define OCTEON_CN70XX_PASS1_X (OCTEON_CN70XX_PASS1_0 | \ 89 + OM_IGNORE_MINOR_REVISION) 90 + #define OCTEON_CN70XX_PASS2_X (OCTEON_CN70XX_PASS2_0 | \ 91 + OM_IGNORE_MINOR_REVISION) 92 + 93 + #define OCTEON_CN71XX OCTEON_CN70XX 94 + 95 + #define OCTEON_CN78XX_PASS1_0 0x000d9500 96 + #define OCTEON_CN78XX_PASS1_1 0x000d9501 97 + #define OCTEON_CN78XX_PASS2_0 0x000d9508 98 + 99 + #define OCTEON_CN78XX (OCTEON_CN78XX_PASS1_0 | OM_IGNORE_REVISION) 100 + #define OCTEON_CN78XX_PASS1_X (OCTEON_CN78XX_PASS1_0 | \ 101 + OM_IGNORE_MINOR_REVISION) 102 + #define OCTEON_CN78XX_PASS2_X (OCTEON_CN78XX_PASS2_0 | \ 103 + OM_IGNORE_MINOR_REVISION) 104 + 105 + #define OCTEON_CN76XX (0x000d9540 | OM_CHECK_SUBMODEL) 67 106 68 107 /* 69 108 * CNF7XXX models with new revision encoding 70 109 */ 71 110 #define OCTEON_CNF71XX_PASS1_0 0x000d9400 111 + #define OCTEON_CNF71XX_PASS1_1 0x000d9401 72 112 73 113 #define OCTEON_CNF71XX (OCTEON_CNF71XX_PASS1_0 | OM_IGNORE_REVISION) 74 114 #define OCTEON_CNF71XX_PASS1_X (OCTEON_CNF71XX_PASS1_0 | OM_IGNORE_MINOR_REVISION) ··· 121 79 #define OCTEON_CN68XX_PASS1_1 0x000d9101 122 80 #define OCTEON_CN68XX_PASS1_2 0x000d9102 123 81 #define OCTEON_CN68XX_PASS2_0 0x000d9108 82 + #define OCTEON_CN68XX_PASS2_1 0x000d9109 83 + #define OCTEON_CN68XX_PASS2_2 0x000d910a 124 84 125 85 #define OCTEON_CN68XX (OCTEON_CN68XX_PASS2_0 | OM_IGNORE_REVISION) 126 86 #define OCTEON_CN68XX_PASS1_X (OCTEON_CN68XX_PASS1_0 | OM_IGNORE_MINOR_REVISION) ··· 148 104 #define OCTEON_CN63XX_PASS1_X (OCTEON_CN63XX_PASS1_0 | OM_IGNORE_MINOR_REVISION) 149 105 #define OCTEON_CN63XX_PASS2_X (OCTEON_CN63XX_PASS2_0 | OM_IGNORE_MINOR_REVISION) 150 106 107 + /* CN62XX is same as CN63XX with 1 MB cache */ 108 + #define OCTEON_CN62XX OCTEON_CN63XX 109 + 151 110 #define OCTEON_CN61XX_PASS1_0 0x000d9300 111 + #define OCTEON_CN61XX_PASS1_1 0x000d9301 152 112 153 113 #define OCTEON_CN61XX (OCTEON_CN61XX_PASS1_0 | OM_IGNORE_REVISION) 154 114 #define OCTEON_CN61XX_PASS1_X (OCTEON_CN61XX_PASS1_0 | OM_IGNORE_MINOR_REVISION) 115 + 116 + /* CN60XX is same as CN61XX with 512 KB cache */ 117 + #define OCTEON_CN60XX OCTEON_CN61XX 155 118 156 119 /* 157 120 * CN5XXX models with new revision encoding ··· 171 120 #define OCTEON_CN58XX_PASS2_2 0x000d030a 172 121 #define OCTEON_CN58XX_PASS2_3 0x000d030b 173 122 174 - #define OCTEON_CN58XX (OCTEON_CN58XX_PASS1_0 | OM_IGNORE_REVISION) 123 + #define OCTEON_CN58XX (OCTEON_CN58XX_PASS2_0 | OM_IGNORE_REVISION) 175 124 #define OCTEON_CN58XX_PASS1_X (OCTEON_CN58XX_PASS1_0 | OM_IGNORE_MINOR_REVISION) 176 125 #define OCTEON_CN58XX_PASS2_X (OCTEON_CN58XX_PASS2_0 | OM_IGNORE_MINOR_REVISION) 177 126 #define OCTEON_CN58XX_PASS1 OCTEON_CN58XX_PASS1_X ··· 268 217 #define OCTEON_CN3XXX (OCTEON_CN58XX_PASS1_0 | OM_MATCH_PREVIOUS_MODELS | OM_IGNORE_REVISION) 269 218 #define OCTEON_CN5XXX (OCTEON_CN58XX_PASS1_0 | OM_MATCH_5XXX_FAMILY_MODELS) 270 219 #define OCTEON_CN6XXX (OCTEON_CN63XX_PASS1_0 | OM_MATCH_6XXX_FAMILY_MODELS) 271 - 272 - /* These are used to cover entire families of OCTEON processors */ 273 - #define OCTEON_FAM_1 (OCTEON_CN3XXX) 274 - #define OCTEON_FAM_PLUS (OCTEON_CN5XXX) 275 - #define OCTEON_FAM_1_PLUS (OCTEON_FAM_PLUS | OM_MATCH_PREVIOUS_MODELS) 276 - #define OCTEON_FAM_2 (OCTEON_CN6XXX) 220 + #define OCTEON_CNF7XXX (OCTEON_CNF71XX_PASS1_0 | \ 221 + OM_MATCH_F7XXX_FAMILY_MODELS) 222 + #define OCTEON_CN7XXX (OCTEON_CN78XX_PASS1_0 | \ 223 + OM_MATCH_7XXX_FAMILY_MODELS) 277 224 278 225 /* The revision byte (low byte) has two different encodings. 279 226 * CN3XXX: ··· 281 232 * <4>: alternate package 282 233 * <3:0>: revision 283 234 * 284 - * CN5XXX: 235 + * CN5XXX and older models: 285 236 * 286 237 * bits 287 238 * <7>: reserved (0) ··· 300 251 /* CN5XXX and later use different layout of bits in the revision ID field */ 301 252 #define OCTEON_58XX_FAMILY_MASK OCTEON_38XX_FAMILY_MASK 302 253 #define OCTEON_58XX_FAMILY_REV_MASK 0x00ffff3f 303 - #define OCTEON_58XX_MODEL_MASK 0x00ffffc0 254 + #define OCTEON_58XX_MODEL_MASK 0x00ffff40 304 255 #define OCTEON_58XX_MODEL_REV_MASK (OCTEON_58XX_FAMILY_REV_MASK | OCTEON_58XX_MODEL_MASK) 305 - #define OCTEON_58XX_MODEL_MINOR_REV_MASK (OCTEON_58XX_MODEL_REV_MASK & 0x00fffff8) 256 + #define OCTEON_58XX_MODEL_MINOR_REV_MASK (OCTEON_58XX_MODEL_REV_MASK & 0x00ffff38) 306 257 #define OCTEON_5XXX_MODEL_MASK 0x00ff0fc0 307 258 308 - /* forward declarations */ 309 259 static inline uint32_t cvmx_get_proc_id(void) __attribute__ ((pure)); 310 260 static inline uint64_t cvmx_read_csr(uint64_t csr_addr); 311 261 312 262 #define __OCTEON_MATCH_MASK__(x, y, z) (((x) & (z)) == ((y) & (z))) 313 263 264 + /* 265 + * __OCTEON_IS_MODEL_COMPILE__(arg_model, chip_model) 266 + * returns true if chip_model is identical or belong to the OCTEON 267 + * model group specified in arg_model. 268 + */ 314 269 /* NOTE: This for internal use only! */ 315 270 #define __OCTEON_IS_MODEL_COMPILE__(arg_model, chip_model) \ 316 271 ((((arg_model & OCTEON_38XX_FAMILY_MASK) < OCTEON_CN58XX_PASS1_0) && ( \ ··· 339 286 ((((arg_model) & (OM_FLAG_MASK)) == OM_IGNORE_REVISION) \ 340 287 && __OCTEON_MATCH_MASK__((chip_model), (arg_model), OCTEON_58XX_FAMILY_MASK)) || \ 341 288 ((((arg_model) & (OM_FLAG_MASK)) == OM_CHECK_SUBMODEL) \ 342 - && __OCTEON_MATCH_MASK__((chip_model), (arg_model), OCTEON_58XX_MODEL_REV_MASK)) || \ 289 + && __OCTEON_MATCH_MASK__((chip_model), (arg_model), OCTEON_58XX_MODEL_MASK)) || \ 343 290 ((((arg_model) & (OM_MATCH_5XXX_FAMILY_MODELS)) == OM_MATCH_5XXX_FAMILY_MODELS) \ 344 - && ((chip_model) >= OCTEON_CN58XX_PASS1_0) && ((chip_model) < OCTEON_CN63XX_PASS1_0)) || \ 291 + && ((chip_model & OCTEON_PRID_MASK) >= OCTEON_CN58XX_PASS1_0) \ 292 + && ((chip_model & OCTEON_PRID_MASK) < OCTEON_CN63XX_PASS1_0)) || \ 345 293 ((((arg_model) & (OM_MATCH_6XXX_FAMILY_MODELS)) == OM_MATCH_6XXX_FAMILY_MODELS) \ 346 - && ((chip_model) >= OCTEON_CN63XX_PASS1_0)) || \ 294 + && ((chip_model & OCTEON_PRID_MASK) >= OCTEON_CN63XX_PASS1_0) \ 295 + && ((chip_model & OCTEON_PRID_MASK) < OCTEON_CNF71XX_PASS1_0)) || \ 296 + ((((arg_model) & (OM_MATCH_F7XXX_FAMILY_MODELS)) == OM_MATCH_F7XXX_FAMILY_MODELS) \ 297 + && ((chip_model & OCTEON_PRID_MASK) >= OCTEON_CNF71XX_PASS1_0) \ 298 + && ((chip_model & OCTEON_PRID_MASK) < OCTEON_CN78XX_PASS1_0)) || \ 299 + ((((arg_model) & (OM_MATCH_7XXX_FAMILY_MODELS)) == OM_MATCH_7XXX_FAMILY_MODELS) \ 300 + && ((chip_model & OCTEON_PRID_MASK) >= OCTEON_CN78XX_PASS1_0)) || \ 347 301 ((((arg_model) & (OM_MATCH_PREVIOUS_MODELS)) == OM_MATCH_PREVIOUS_MODELS) \ 348 302 && (((chip_model) & OCTEON_58XX_MODEL_MASK) < ((arg_model) & OCTEON_58XX_MODEL_MASK))) \ 349 303 ))) ··· 360 300 { 361 301 uint32_t cpuid = cvmx_get_proc_id(); 362 302 363 - /* 364 - * Check for special case of mismarked 3005 samples. We only 365 - * need to check if the sub model isn't being ignored 366 - */ 367 - if ((model & OM_CHECK_SUBMODEL) == OM_CHECK_SUBMODEL) { 368 - if (cpuid == OCTEON_CN3010_PASS1 && (cvmx_read_csr(0x80011800800007B8ull) & (1ull << 34))) 369 - cpuid |= 0x10; 370 - } 371 303 return __OCTEON_IS_MODEL_COMPILE__(model, cpuid); 372 304 } 373 305 ··· 378 326 #define OCTEON_IS_COMMON_BINARY() 1 379 327 #undef OCTEON_MODEL 380 328 329 + #define OCTEON_IS_OCTEON1() OCTEON_IS_MODEL(OCTEON_CN3XXX) 330 + #define OCTEON_IS_OCTEONPLUS() OCTEON_IS_MODEL(OCTEON_CN5XXX) 331 + #define OCTEON_IS_OCTEON2() \ 332 + (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF71XX)) 333 + 334 + #define OCTEON_IS_OCTEON3() OCTEON_IS_MODEL(OCTEON_CN7XXX) 335 + 336 + #define OCTEON_IS_OCTEON1PLUS() (OCTEON_IS_OCTEON1() || OCTEON_IS_OCTEONPLUS()) 337 + 381 338 const char *__init octeon_model_get_string(uint32_t chip_id); 382 339 383 340 /* 384 341 * Return the octeon family, i.e., ProcessorID of the PrID register. 342 + * 343 + * @return the octeon family on success, ((unint32_t)-1) on error. 385 344 */ 386 345 static inline uint32_t cvmx_get_octeon_family(void) 387 346 {
+118 -30
arch/mips/include/asm/octeon/octeon.h
··· 9 9 #define __ASM_OCTEON_OCTEON_H 10 10 11 11 #include <asm/octeon/cvmx.h> 12 + #include <asm/bitfield.h> 12 13 13 14 extern uint64_t octeon_bootmem_alloc_range_phys(uint64_t size, 14 15 uint64_t alignment, ··· 54 53 #define OCTOEN_SERIAL_LEN 20 55 54 56 55 struct octeon_boot_descriptor { 56 + #ifdef __BIG_ENDIAN_BITFIELD 57 57 /* Start of block referenced by assembly code - do not change! */ 58 58 uint32_t desc_version; 59 59 uint32_t desc_size; ··· 106 104 uint8_t mac_addr_base[6]; 107 105 uint8_t mac_addr_count; 108 106 uint64_t cvmx_desc_vaddr; 107 + #else 108 + uint32_t desc_size; 109 + uint32_t desc_version; 110 + uint64_t stack_top; 111 + uint64_t heap_base; 112 + uint64_t heap_end; 113 + /* Only used by bootloader */ 114 + uint64_t entry_point; 115 + uint64_t desc_vaddr; 116 + /* End of This block referenced by assembly code - do not change! */ 117 + uint32_t stack_size; 118 + uint32_t exception_base_addr; 119 + uint32_t argc; 120 + uint32_t heap_size; 121 + /* 122 + * Argc count for application. 123 + * Warning low bit scrambled in little-endian. 124 + */ 125 + uint32_t argv[OCTEON_ARGV_MAX_ARGS]; 126 + 127 + #define BOOT_FLAG_INIT_CORE (1 << 0) 128 + #define OCTEON_BL_FLAG_DEBUG (1 << 1) 129 + #define OCTEON_BL_FLAG_NO_MAGIC (1 << 2) 130 + /* If set, use uart1 for console */ 131 + #define OCTEON_BL_FLAG_CONSOLE_UART1 (1 << 3) 132 + /* If set, use PCI console */ 133 + #define OCTEON_BL_FLAG_CONSOLE_PCI (1 << 4) 134 + /* Call exit on break on serial port */ 135 + #define OCTEON_BL_FLAG_BREAK (1 << 5) 136 + 137 + uint32_t core_mask; 138 + uint32_t flags; 139 + /* physical address of free memory descriptor block. */ 140 + uint32_t phy_mem_desc_addr; 141 + /* DRAM size in megabyes. */ 142 + uint32_t dram_size; 143 + /* CPU clock speed, in hz. */ 144 + uint32_t eclock_hz; 145 + /* used to pass flags from app to debugger. */ 146 + uint32_t debugger_flags_base_addr; 147 + /* SPI4 clock in hz. */ 148 + uint32_t spi_clock_hz; 149 + /* DRAM clock speed, in hz. */ 150 + uint32_t dclock_hz; 151 + uint8_t chip_rev_minor; 152 + uint8_t chip_rev_major; 153 + uint16_t chip_type; 154 + uint8_t board_rev_minor; 155 + uint8_t board_rev_major; 156 + uint16_t board_type; 157 + 158 + uint64_t unused1[4]; /* Not even filled in by bootloader. */ 159 + 160 + uint64_t cvmx_desc_vaddr; 161 + #endif 109 162 }; 110 163 111 164 union octeon_cvmemctl { 112 165 uint64_t u64; 113 166 struct { 114 167 /* RO 1 = BIST fail, 0 = BIST pass */ 115 - uint64_t tlbbist:1; 168 + __BITFIELD_FIELD(uint64_t tlbbist:1, 116 169 /* RO 1 = BIST fail, 0 = BIST pass */ 117 - uint64_t l1cbist:1; 170 + __BITFIELD_FIELD(uint64_t l1cbist:1, 118 171 /* RO 1 = BIST fail, 0 = BIST pass */ 119 - uint64_t l1dbist:1; 172 + __BITFIELD_FIELD(uint64_t l1dbist:1, 120 173 /* RO 1 = BIST fail, 0 = BIST pass */ 121 - uint64_t dcmbist:1; 174 + __BITFIELD_FIELD(uint64_t dcmbist:1, 122 175 /* RO 1 = BIST fail, 0 = BIST pass */ 123 - uint64_t ptgbist:1; 176 + __BITFIELD_FIELD(uint64_t ptgbist:1, 124 177 /* RO 1 = BIST fail, 0 = BIST pass */ 125 - uint64_t wbfbist:1; 178 + __BITFIELD_FIELD(uint64_t wbfbist:1, 126 179 /* Reserved */ 127 - uint64_t reserved:22; 180 + __BITFIELD_FIELD(uint64_t reserved:17, 181 + /* OCTEON II - TLB replacement policy: 0 = bitmask LRU; 1 = NLU. 182 + * This field selects between the TLB replacement policies: 183 + * bitmask LRU or NLU. Bitmask LRU maintains a mask of 184 + * recently used TLB entries and avoids them as new entries 185 + * are allocated. NLU simply guarantees that the next 186 + * allocation is not the last used TLB entry. */ 187 + __BITFIELD_FIELD(uint64_t tlbnlu:1, 188 + /* OCTEON II - Selects the bit in the counter used for 189 + * releasing a PAUSE. This counter trips every 2(8+PAUSETIME) 190 + * cycles. If not already released, the cnMIPS II core will 191 + * always release a given PAUSE instruction within 192 + * 2(8+PAUSETIME). If the counter trip happens to line up, 193 + * the cnMIPS II core may release the PAUSE instantly. */ 194 + __BITFIELD_FIELD(uint64_t pausetime:3, 195 + /* OCTEON II - This field is an extension of 196 + * CvmMemCtl[DIDTTO] */ 197 + __BITFIELD_FIELD(uint64_t didtto2:1, 128 198 /* R/W If set, marked write-buffer entries time out 129 199 * the same as as other entries; if clear, marked 130 200 * write-buffer entries use the maximum timeout. */ 131 - uint64_t dismarkwblongto:1; 201 + __BITFIELD_FIELD(uint64_t dismarkwblongto:1, 132 202 /* R/W If set, a merged store does not clear the 133 203 * write-buffer entry timeout state. */ 134 - uint64_t dismrgclrwbto:1; 204 + __BITFIELD_FIELD(uint64_t dismrgclrwbto:1, 135 205 /* R/W Two bits that are the MSBs of the resultant 136 206 * CVMSEG LM word location for an IOBDMA. The other 8 137 207 * bits come from the SCRADDR field of the IOBDMA. */ 138 - uint64_t iobdmascrmsb:2; 208 + __BITFIELD_FIELD(uint64_t iobdmascrmsb:2, 139 209 /* R/W If set, SYNCWS and SYNCS only order marked 140 210 * stores; if clear, SYNCWS and SYNCS only order 141 211 * unmarked stores. SYNCWSMARKED has no effect when 142 212 * DISSYNCWS is set. */ 143 - uint64_t syncwsmarked:1; 213 + __BITFIELD_FIELD(uint64_t syncwsmarked:1, 144 214 /* R/W If set, SYNCWS acts as SYNCW and SYNCS acts as 145 215 * SYNC. */ 146 - uint64_t dissyncws:1; 216 + __BITFIELD_FIELD(uint64_t dissyncws:1, 147 217 /* R/W If set, no stall happens on write buffer 148 218 * full. */ 149 - uint64_t diswbfst:1; 219 + __BITFIELD_FIELD(uint64_t diswbfst:1, 150 220 /* R/W If set (and SX set), supervisor-level 151 221 * loads/stores can use XKPHYS addresses with 152 222 * VA<48>==0 */ 153 - uint64_t xkmemenas:1; 223 + __BITFIELD_FIELD(uint64_t xkmemenas:1, 154 224 /* R/W If set (and UX set), user-level loads/stores 155 225 * can use XKPHYS addresses with VA<48>==0 */ 156 - uint64_t xkmemenau:1; 226 + __BITFIELD_FIELD(uint64_t xkmemenau:1, 157 227 /* R/W If set (and SX set), supervisor-level 158 228 * loads/stores can use XKPHYS addresses with 159 229 * VA<48>==1 */ 160 - uint64_t xkioenas:1; 230 + __BITFIELD_FIELD(uint64_t xkioenas:1, 161 231 /* R/W If set (and UX set), user-level loads/stores 162 232 * can use XKPHYS addresses with VA<48>==1 */ 163 - uint64_t xkioenau:1; 233 + __BITFIELD_FIELD(uint64_t xkioenau:1, 164 234 /* R/W If set, all stores act as SYNCW (NOMERGE must 165 235 * be set when this is set) RW, reset to 0. */ 166 - uint64_t allsyncw:1; 236 + __BITFIELD_FIELD(uint64_t allsyncw:1, 167 237 /* R/W If set, no stores merge, and all stores reach 168 238 * the coherent bus in order. */ 169 - uint64_t nomerge:1; 239 + __BITFIELD_FIELD(uint64_t nomerge:1, 170 240 /* R/W Selects the bit in the counter used for DID 171 241 * time-outs 0 = 231, 1 = 230, 2 = 229, 3 = 172 242 * 214. Actual time-out is between 1x and 2x this 173 243 * interval. For example, with DIDTTO=3, expiration 174 244 * interval is between 16K and 32K. */ 175 - uint64_t didtto:2; 245 + __BITFIELD_FIELD(uint64_t didtto:2, 176 246 /* R/W If set, the (mem) CSR clock never turns off. */ 177 - uint64_t csrckalwys:1; 247 + __BITFIELD_FIELD(uint64_t csrckalwys:1, 178 248 /* R/W If set, mclk never turns off. */ 179 - uint64_t mclkalwys:1; 249 + __BITFIELD_FIELD(uint64_t mclkalwys:1, 180 250 /* R/W Selects the bit in the counter used for write 181 251 * buffer flush time-outs (WBFLT+11) is the bit 182 252 * position in an internal counter used to determine ··· 256 182 * 2x this interval. For example, with WBFLT = 0, a 257 183 * write buffer expires between 2K and 4K cycles after 258 184 * the write buffer entry is allocated. */ 259 - uint64_t wbfltime:3; 185 + __BITFIELD_FIELD(uint64_t wbfltime:3, 260 186 /* R/W If set, do not put Istream in the L2 cache. */ 261 - uint64_t istrnol2:1; 187 + __BITFIELD_FIELD(uint64_t istrnol2:1, 262 188 /* R/W The write buffer threshold. */ 263 - uint64_t wbthresh:4; 189 + __BITFIELD_FIELD(uint64_t wbthresh:4, 264 190 /* Reserved */ 265 - uint64_t reserved2:2; 191 + __BITFIELD_FIELD(uint64_t reserved2:2, 266 192 /* R/W If set, CVMSEG is available for loads/stores in 267 193 * kernel/debug mode. */ 268 - uint64_t cvmsegenak:1; 194 + __BITFIELD_FIELD(uint64_t cvmsegenak:1, 269 195 /* R/W If set, CVMSEG is available for loads/stores in 270 196 * supervisor mode. */ 271 - uint64_t cvmsegenas:1; 197 + __BITFIELD_FIELD(uint64_t cvmsegenas:1, 272 198 /* R/W If set, CVMSEG is available for loads/stores in 273 199 * user mode. */ 274 - uint64_t cvmsegenau:1; 200 + __BITFIELD_FIELD(uint64_t cvmsegenau:1, 275 201 /* R/W Size of local memory in cache blocks, 54 (6912 276 202 * bytes) is max legal value. */ 277 - uint64_t lmemsz:6; 203 + __BITFIELD_FIELD(uint64_t lmemsz:6, 204 + ;))))))))))))))))))))))))))))))))) 278 205 } s; 279 206 }; 280 207 ··· 299 224 cvmx_read64_uint32(address ^ 4); 300 225 } 301 226 227 + /* Octeon multiplier save/restore routines from octeon_switch.S */ 228 + void octeon_mult_save(void); 229 + void octeon_mult_restore(void); 230 + void octeon_mult_save_end(void); 231 + void octeon_mult_restore_end(void); 232 + void octeon_mult_save3(void); 233 + void octeon_mult_save3_end(void); 234 + void octeon_mult_save2(void); 235 + void octeon_mult_save2_end(void); 236 + void octeon_mult_restore3(void); 237 + void octeon_mult_restore3_end(void); 238 + void octeon_mult_restore2(void); 239 + void octeon_mult_restore2_end(void); 302 240 303 241 /** 304 242 * Read a 32bit value from the Octeon NPI register space
+2
arch/mips/include/asm/pci.h
··· 121 121 } 122 122 #endif 123 123 124 + #ifdef CONFIG_PCI_DOMAINS 124 125 #define pci_domain_nr(bus) ((struct pci_controller *)(bus)->sysdata)->index 125 126 126 127 static inline int pci_proc_domain(struct pci_bus *bus) ··· 129 128 struct pci_controller *hose = bus->sysdata; 130 129 return hose->need_domain_info; 131 130 } 131 + #endif /* CONFIG_PCI_DOMAINS */ 132 132 133 133 #endif /* __KERNEL__ */ 134 134
+32 -51
arch/mips/include/asm/pgtable-bits.h
··· 35 35 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) 36 36 37 37 /* 38 - * The following bits are directly used by the TLB hardware 38 + * The following bits are implemented by the TLB hardware 39 39 */ 40 40 #define _PAGE_GLOBAL_SHIFT 0 41 41 #define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT) ··· 60 60 #define _PAGE_MODIFIED_SHIFT (_PAGE_ACCESSED_SHIFT + 1) 61 61 #define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT) 62 62 63 - #define _PAGE_SILENT_READ _PAGE_VALID 64 - #define _PAGE_SILENT_WRITE _PAGE_DIRTY 65 - 66 63 #define _PFN_SHIFT (PAGE_SHIFT - 12 + _CACHE_SHIFT + 3) 67 64 68 65 #elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 69 66 70 67 /* 71 - * The following are implemented by software 68 + * The following bits are implemented in software 72 69 */ 73 - #define _PAGE_PRESENT_SHIFT 0 74 - #define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT) 75 - #define _PAGE_READ_SHIFT 1 76 - #define _PAGE_READ (1 << _PAGE_READ_SHIFT) 77 - #define _PAGE_WRITE_SHIFT 2 78 - #define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT) 79 - #define _PAGE_ACCESSED_SHIFT 3 80 - #define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT) 81 - #define _PAGE_MODIFIED_SHIFT 4 82 - #define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT) 70 + #define _PAGE_PRESENT_SHIFT (0) 71 + #define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT) 72 + #define _PAGE_READ_SHIFT (_PAGE_PRESENT_SHIFT + 1) 73 + #define _PAGE_READ (1 << _PAGE_READ_SHIFT) 74 + #define _PAGE_WRITE_SHIFT (_PAGE_READ_SHIFT + 1) 75 + #define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT) 76 + #define _PAGE_ACCESSED_SHIFT (_PAGE_WRITE_SHIFT + 1) 77 + #define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT) 78 + #define _PAGE_MODIFIED_SHIFT (_PAGE_ACCESSED_SHIFT + 1) 79 + #define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT) 83 80 84 81 /* 85 - * And these are the hardware TLB bits 82 + * The following bits are implemented by the TLB hardware 86 83 */ 87 - #define _PAGE_GLOBAL_SHIFT 8 88 - #define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT) 89 - #define _PAGE_VALID_SHIFT 9 90 - #define _PAGE_VALID (1 << _PAGE_VALID_SHIFT) 91 - #define _PAGE_SILENT_READ (1 << _PAGE_VALID_SHIFT) /* synonym */ 92 - #define _PAGE_DIRTY_SHIFT 10 84 + #define _PAGE_GLOBAL_SHIFT (_PAGE_MODIFIED_SHIFT + 4) 85 + #define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT) 86 + #define _PAGE_VALID_SHIFT (_PAGE_GLOBAL_SHIFT + 1) 87 + #define _PAGE_VALID (1 << _PAGE_VALID_SHIFT) 88 + #define _PAGE_DIRTY_SHIFT (_PAGE_VALID_SHIFT + 1) 93 89 #define _PAGE_DIRTY (1 << _PAGE_DIRTY_SHIFT) 94 - #define _PAGE_SILENT_WRITE (1 << _PAGE_DIRTY_SHIFT) 95 - #define _CACHE_UNCACHED_SHIFT 11 90 + #define _CACHE_UNCACHED_SHIFT (_PAGE_DIRTY_SHIFT + 1) 96 91 #define _CACHE_UNCACHED (1 << _CACHE_UNCACHED_SHIFT) 97 - #define _CACHE_MASK (1 << _CACHE_UNCACHED_SHIFT) 92 + #define _CACHE_MASK _CACHE_UNCACHED 98 93 99 - #else /* 'Normal' r4K case */ 94 + #define _PFN_SHIFT PAGE_SHIFT 95 + 96 + #else 100 97 /* 101 98 * When using the RI/XI bit support, we have 13 bits of flags below 102 99 * the physical address. The RI/XI bits are placed such that a SRL 5 ··· 104 107 105 108 /* 106 109 * The following bits are implemented in software 107 - * 108 - * _PAGE_READ / _PAGE_READ_SHIFT should be unused if cpu_has_rixi. 109 110 */ 110 - #define _PAGE_PRESENT_SHIFT (0) 111 + #define _PAGE_PRESENT_SHIFT 0 111 112 #define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT) 112 113 #define _PAGE_READ_SHIFT (cpu_has_rixi ? _PAGE_PRESENT_SHIFT : _PAGE_PRESENT_SHIFT + 1) 113 114 #define _PAGE_READ ({BUG_ON(cpu_has_rixi); 1 << _PAGE_READ_SHIFT; }) ··· 120 125 /* huge tlb page */ 121 126 #define _PAGE_HUGE_SHIFT (_PAGE_MODIFIED_SHIFT + 1) 122 127 #define _PAGE_HUGE (1 << _PAGE_HUGE_SHIFT) 123 - #else 124 - #define _PAGE_HUGE_SHIFT (_PAGE_MODIFIED_SHIFT) 125 - #define _PAGE_HUGE ({BUG(); 1; }) /* Dummy value */ 126 - #endif 127 - 128 - #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 129 - /* huge tlb page */ 130 128 #define _PAGE_SPLITTING_SHIFT (_PAGE_HUGE_SHIFT + 1) 131 129 #define _PAGE_SPLITTING (1 << _PAGE_SPLITTING_SHIFT) 132 130 #else 131 + #define _PAGE_HUGE_SHIFT (_PAGE_MODIFIED_SHIFT) 132 + #define _PAGE_HUGE ({BUG(); 1; }) /* Dummy value */ 133 133 #define _PAGE_SPLITTING_SHIFT (_PAGE_HUGE_SHIFT) 134 134 #define _PAGE_SPLITTING ({BUG(); 1; }) /* Dummy value */ 135 135 #endif ··· 139 149 140 150 #define _PAGE_GLOBAL_SHIFT (_PAGE_NO_READ_SHIFT + 1) 141 151 #define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT) 142 - 143 152 #define _PAGE_VALID_SHIFT (_PAGE_GLOBAL_SHIFT + 1) 144 153 #define _PAGE_VALID (1 << _PAGE_VALID_SHIFT) 145 - /* synonym */ 146 - #define _PAGE_SILENT_READ (_PAGE_VALID) 147 - 148 - /* The MIPS dirty bit */ 149 154 #define _PAGE_DIRTY_SHIFT (_PAGE_VALID_SHIFT + 1) 150 155 #define _PAGE_DIRTY (1 << _PAGE_DIRTY_SHIFT) 151 - #define _PAGE_SILENT_WRITE (_PAGE_DIRTY) 152 - 153 156 #define _CACHE_SHIFT (_PAGE_DIRTY_SHIFT + 1) 154 157 #define _CACHE_MASK (7 << _CACHE_SHIFT) 155 158 ··· 150 167 151 168 #endif /* defined(CONFIG_PHYS_ADDR_T_64BIT && defined(CONFIG_CPU_MIPS32) */ 152 169 153 - #ifndef _PFN_SHIFT 154 - #define _PFN_SHIFT PAGE_SHIFT 155 - #endif 170 + #define _PAGE_SILENT_READ _PAGE_VALID 171 + #define _PAGE_SILENT_WRITE _PAGE_DIRTY 172 + 156 173 #define _PFN_MASK (~((1 << (_PFN_SHIFT)) - 1)) 157 174 158 175 #ifndef _PAGE_NO_READ ··· 161 178 #endif 162 179 #ifndef _PAGE_NO_EXEC 163 180 #define _PAGE_NO_EXEC ({BUG(); 0; }) 164 - #endif 165 - #ifndef _PAGE_GLOBAL_SHIFT 166 - #define _PAGE_GLOBAL_SHIFT ilog2(_PAGE_GLOBAL) 167 181 #endif 168 182 169 183 ··· 246 266 #endif 247 267 248 268 #define __READABLE (_PAGE_SILENT_READ | _PAGE_ACCESSED | (cpu_has_rixi ? 0 : _PAGE_READ)) 249 - #define __WRITEABLE (_PAGE_WRITE | _PAGE_SILENT_WRITE | _PAGE_MODIFIED) 269 + #define __WRITEABLE (_PAGE_SILENT_WRITE | _PAGE_WRITE | _PAGE_MODIFIED) 250 270 251 - #define _PAGE_CHG_MASK (_PFN_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED | _CACHE_MASK) 271 + #define _PAGE_CHG_MASK (_PAGE_ACCESSED | _PAGE_MODIFIED | \ 272 + _PFN_MASK | _CACHE_MASK) 252 273 253 274 #endif /* _ASM_PGTABLE_BITS_H */
+27 -19
arch/mips/include/asm/pgtable.h
··· 99 99 100 100 #define htw_stop() \ 101 101 do { \ 102 - if (cpu_has_htw) \ 103 - write_c0_pwctl(read_c0_pwctl() & \ 104 - ~(1 << MIPS_PWCTL_PWEN_SHIFT)); \ 102 + unsigned long flags; \ 103 + \ 104 + if (cpu_has_htw) { \ 105 + local_irq_save(flags); \ 106 + if(!raw_current_cpu_data.htw_seq++) { \ 107 + write_c0_pwctl(read_c0_pwctl() & \ 108 + ~(1 << MIPS_PWCTL_PWEN_SHIFT)); \ 109 + back_to_back_c0_hazard(); \ 110 + } \ 111 + local_irq_restore(flags); \ 112 + } \ 105 113 } while(0) 106 114 107 115 #define htw_start() \ 108 116 do { \ 109 - if (cpu_has_htw) \ 110 - write_c0_pwctl(read_c0_pwctl() | \ 111 - (1 << MIPS_PWCTL_PWEN_SHIFT)); \ 112 - } while(0) 113 - 114 - 115 - #define htw_reset() \ 116 - do { \ 117 + unsigned long flags; \ 118 + \ 117 119 if (cpu_has_htw) { \ 118 - htw_stop(); \ 119 - back_to_back_c0_hazard(); \ 120 - htw_start(); \ 121 - back_to_back_c0_hazard(); \ 120 + local_irq_save(flags); \ 121 + if (!--raw_current_cpu_data.htw_seq) { \ 122 + write_c0_pwctl(read_c0_pwctl() | \ 123 + (1 << MIPS_PWCTL_PWEN_SHIFT)); \ 124 + back_to_back_c0_hazard(); \ 125 + } \ 126 + local_irq_restore(flags); \ 122 127 } \ 123 128 } while(0) 129 + 124 130 125 131 extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, 126 132 pte_t pteval); ··· 159 153 { 160 154 pte_t null = __pte(0); 161 155 156 + htw_stop(); 162 157 /* Preserve global status for the pair */ 163 158 if (ptep_buddy(ptep)->pte_low & _PAGE_GLOBAL) 164 159 null.pte_low = null.pte_high = _PAGE_GLOBAL; 165 160 166 161 set_pte_at(mm, addr, ptep, null); 167 - htw_reset(); 162 + htw_start(); 168 163 } 169 164 #else 170 165 ··· 195 188 196 189 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) 197 190 { 191 + htw_stop(); 198 192 #if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX) 199 193 /* Preserve global status for the pair */ 200 194 if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL) ··· 203 195 else 204 196 #endif 205 197 set_pte_at(mm, addr, ptep, __pte(0)); 206 - htw_reset(); 198 + htw_start(); 207 199 } 208 200 #endif 209 201 ··· 342 334 return pte; 343 335 } 344 336 345 - #ifdef _PAGE_HUGE 337 + #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 346 338 static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_HUGE; } 347 339 348 340 static inline pte_t pte_mkhuge(pte_t pte) ··· 350 342 pte_val(pte) |= _PAGE_HUGE; 351 343 return pte; 352 344 } 353 - #endif /* _PAGE_HUGE */ 345 + #endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */ 354 346 #endif 355 347 static inline int pte_special(pte_t pte) { return 0; } 356 348 static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
+13 -6
arch/mips/include/asm/processor.h
··· 54 54 #define TASK_SIZE 0x7fff8000UL 55 55 #endif 56 56 57 - #ifdef __KERNEL__ 58 57 #define STACK_TOP_MAX TASK_SIZE 59 - #endif 60 58 61 59 #define TASK_IS_32BIT_ADDR 1 62 60 ··· 71 73 #define TASK_SIZE32 0x7fff8000UL 72 74 #define TASK_SIZE64 0x10000000000UL 73 75 #define TASK_SIZE (test_thread_flag(TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64) 74 - 75 - #ifdef __KERNEL__ 76 76 #define STACK_TOP_MAX TASK_SIZE64 77 - #endif 78 - 79 77 80 78 #define TASK_SIZE_OF(tsk) \ 81 79 (test_tsk_thread_flag(tsk, TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64) ··· 205 211 unsigned long cop2_gfm_poly; 206 212 /* DMFC2 rt, 0x025A; DMFC2 rt, 0x025B - Pass2 */ 207 213 unsigned long cop2_gfm_result[2]; 214 + /* DMFC2 rt, 0x24F, DMFC2 rt, 0x50, OCTEON III */ 215 + unsigned long cop2_sha3[2]; 208 216 }; 209 217 #define COP2_INIT \ 210 218 .cp2 = {0,}, ··· 394 398 #define prefetchw(x) __builtin_prefetch((x), 1, 1) 395 399 396 400 #endif 401 + 402 + /* 403 + * Functions & macros implementing the PR_GET_FP_MODE & PR_SET_FP_MODE options 404 + * to the prctl syscall. 405 + */ 406 + extern int mips_get_process_fp_mode(struct task_struct *task); 407 + extern int mips_set_process_fp_mode(struct task_struct *task, 408 + unsigned int value); 409 + 410 + #define GET_FP_MODE(task) mips_get_process_fp_mode(task) 411 + #define SET_FP_MODE(task,value) mips_set_process_fp_mode(task, value) 397 412 398 413 #endif /* _ASM_PROCESSOR_H */
-7
arch/mips/include/asm/prom.h
··· 24 24 extern void __dt_setup_arch(void *bph); 25 25 extern int __dt_register_buses(const char *bus0, const char *bus1); 26 26 27 - #define dt_setup_arch(sym) \ 28 - ({ \ 29 - extern char __dtb_##sym##_begin[]; \ 30 - \ 31 - __dt_setup_arch(__dtb_##sym##_begin); \ 32 - }) 33 - 34 27 #else /* CONFIG_OF */ 35 28 static inline void device_tree_init(void) { } 36 29 #endif /* CONFIG_OF */
+2 -2
arch/mips/include/asm/ptrace.h
··· 40 40 unsigned long cp0_cause; 41 41 unsigned long cp0_epc; 42 42 #ifdef CONFIG_CPU_CAVIUM_OCTEON 43 - unsigned long long mpl[3]; /* MTM{0,1,2} */ 44 - unsigned long long mtp[3]; /* MTP{0,1,2} */ 43 + unsigned long long mpl[6]; /* MTM{0-5} */ 44 + unsigned long long mtp[6]; /* MTP{0-5} */ 45 45 #endif 46 46 } __aligned(8); 47 47
+148 -2
arch/mips/include/asm/r4kcache.h
··· 14 14 15 15 #include <asm/asm.h> 16 16 #include <asm/cacheops.h> 17 + #include <asm/compiler.h> 17 18 #include <asm/cpu-features.h> 18 19 #include <asm/cpu-type.h> 19 20 #include <asm/mipsmtregs.h> ··· 40 39 __asm__ __volatile__( \ 41 40 " .set push \n" \ 42 41 " .set noreorder \n" \ 43 - " .set arch=r4000 \n" \ 42 + " .set "MIPS_ISA_ARCH_LEVEL" \n" \ 44 43 " cache %0, %1 \n" \ 45 44 " .set pop \n" \ 46 45 : \ ··· 148 147 __asm__ __volatile__( \ 149 148 " .set push \n" \ 150 149 " .set noreorder \n" \ 151 - " .set arch=r4000 \n" \ 150 + " .set "MIPS_ISA_ARCH_LEVEL" \n" \ 152 151 "1: cache %0, (%1) \n" \ 153 152 "2: .set pop \n" \ 154 153 " .section __ex_table,\"a\" \n" \ ··· 219 218 cache_op(Page_Invalidate_T, addr); 220 219 } 221 220 221 + #ifndef CONFIG_CPU_MIPSR6 222 222 #define cache16_unroll32(base,op) \ 223 223 __asm__ __volatile__( \ 224 224 " .set push \n" \ ··· 323 321 : \ 324 322 : "r" (base), \ 325 323 "i" (op)); 324 + 325 + #else 326 + /* 327 + * MIPS R6 changed the cache opcode and moved to a 8-bit offset field. 328 + * This means we now need to increment the base register before we flush 329 + * more cache lines 330 + */ 331 + #define cache16_unroll32(base,op) \ 332 + __asm__ __volatile__( \ 333 + " .set push\n" \ 334 + " .set noreorder\n" \ 335 + " .set mips64r6\n" \ 336 + " .set noat\n" \ 337 + " cache %1, 0x000(%0); cache %1, 0x010(%0)\n" \ 338 + " cache %1, 0x020(%0); cache %1, 0x030(%0)\n" \ 339 + " cache %1, 0x040(%0); cache %1, 0x050(%0)\n" \ 340 + " cache %1, 0x060(%0); cache %1, 0x070(%0)\n" \ 341 + " cache %1, 0x080(%0); cache %1, 0x090(%0)\n" \ 342 + " cache %1, 0x0a0(%0); cache %1, 0x0b0(%0)\n" \ 343 + " cache %1, 0x0c0(%0); cache %1, 0x0d0(%0)\n" \ 344 + " cache %1, 0x0e0(%0); cache %1, 0x0f0(%0)\n" \ 345 + " addiu $1, $0, 0x100 \n" \ 346 + " cache %1, 0x000($1); cache %1, 0x010($1)\n" \ 347 + " cache %1, 0x020($1); cache %1, 0x030($1)\n" \ 348 + " cache %1, 0x040($1); cache %1, 0x050($1)\n" \ 349 + " cache %1, 0x060($1); cache %1, 0x070($1)\n" \ 350 + " cache %1, 0x080($1); cache %1, 0x090($1)\n" \ 351 + " cache %1, 0x0a0($1); cache %1, 0x0b0($1)\n" \ 352 + " cache %1, 0x0c0($1); cache %1, 0x0d0($1)\n" \ 353 + " cache %1, 0x0e0($1); cache %1, 0x0f0($1)\n" \ 354 + " .set pop\n" \ 355 + : \ 356 + : "r" (base), \ 357 + "i" (op)); 358 + 359 + #define cache32_unroll32(base,op) \ 360 + __asm__ __volatile__( \ 361 + " .set push\n" \ 362 + " .set noreorder\n" \ 363 + " .set mips64r6\n" \ 364 + " .set noat\n" \ 365 + " cache %1, 0x000(%0); cache %1, 0x020(%0)\n" \ 366 + " cache %1, 0x040(%0); cache %1, 0x060(%0)\n" \ 367 + " cache %1, 0x080(%0); cache %1, 0x0a0(%0)\n" \ 368 + " cache %1, 0x0c0(%0); cache %1, 0x0e0(%0)\n" \ 369 + " addiu $1, %0, 0x100\n" \ 370 + " cache %1, 0x000($1); cache %1, 0x020($1)\n" \ 371 + " cache %1, 0x040($1); cache %1, 0x060($1)\n" \ 372 + " cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \ 373 + " cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n" \ 374 + " addiu $1, $1, 0x100\n" \ 375 + " cache %1, 0x000($1); cache %1, 0x020($1)\n" \ 376 + " cache %1, 0x040($1); cache %1, 0x060($1)\n" \ 377 + " cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \ 378 + " cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n" \ 379 + " addiu $1, $1, 0x100\n" \ 380 + " cache %1, 0x000($1); cache %1, 0x020($1)\n" \ 381 + " cache %1, 0x040($1); cache %1, 0x060($1)\n" \ 382 + " cache %1, 0x080($1); cache %1, 0x0a0($1)\n" \ 383 + " cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n" \ 384 + " .set pop\n" \ 385 + : \ 386 + : "r" (base), \ 387 + "i" (op)); 388 + 389 + #define cache64_unroll32(base,op) \ 390 + __asm__ __volatile__( \ 391 + " .set push\n" \ 392 + " .set noreorder\n" \ 393 + " .set mips64r6\n" \ 394 + " .set noat\n" \ 395 + " cache %1, 0x000(%0); cache %1, 0x040(%0)\n" \ 396 + " cache %1, 0x080(%0); cache %1, 0x0c0(%0)\n" \ 397 + " addiu $1, %0, 0x100\n" \ 398 + " cache %1, 0x000($1); cache %1, 0x040($1)\n" \ 399 + " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \ 400 + " addiu $1, %0, 0x100\n" \ 401 + " cache %1, 0x000($1); cache %1, 0x040($1)\n" \ 402 + " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \ 403 + " addiu $1, %0, 0x100\n" \ 404 + " cache %1, 0x000($1); cache %1, 0x040($1)\n" \ 405 + " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \ 406 + " addiu $1, %0, 0x100\n" \ 407 + " cache %1, 0x000($1); cache %1, 0x040($1)\n" \ 408 + " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \ 409 + " addiu $1, %0, 0x100\n" \ 410 + " cache %1, 0x000($1); cache %1, 0x040($1)\n" \ 411 + " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \ 412 + " addiu $1, %0, 0x100\n" \ 413 + " cache %1, 0x000($1); cache %1, 0x040($1)\n" \ 414 + " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \ 415 + " addiu $1, %0, 0x100\n" \ 416 + " cache %1, 0x000($1); cache %1, 0x040($1)\n" \ 417 + " cache %1, 0x080($1); cache %1, 0x0c0($1)\n" \ 418 + " .set pop\n" \ 419 + : \ 420 + : "r" (base), \ 421 + "i" (op)); 422 + 423 + #define cache128_unroll32(base,op) \ 424 + __asm__ __volatile__( \ 425 + " .set push\n" \ 426 + " .set noreorder\n" \ 427 + " .set mips64r6\n" \ 428 + " .set noat\n" \ 429 + " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ 430 + " addiu $1, %0, 0x100\n" \ 431 + " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ 432 + " addiu $1, %0, 0x100\n" \ 433 + " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ 434 + " addiu $1, %0, 0x100\n" \ 435 + " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ 436 + " addiu $1, %0, 0x100\n" \ 437 + " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ 438 + " addiu $1, %0, 0x100\n" \ 439 + " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ 440 + " addiu $1, %0, 0x100\n" \ 441 + " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ 442 + " addiu $1, %0, 0x100\n" \ 443 + " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ 444 + " addiu $1, %0, 0x100\n" \ 445 + " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ 446 + " addiu $1, %0, 0x100\n" \ 447 + " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ 448 + " addiu $1, %0, 0x100\n" \ 449 + " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ 450 + " addiu $1, %0, 0x100\n" \ 451 + " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ 452 + " addiu $1, %0, 0x100\n" \ 453 + " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ 454 + " addiu $1, %0, 0x100\n" \ 455 + " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ 456 + " addiu $1, %0, 0x100\n" \ 457 + " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ 458 + " addiu $1, %0, 0x100\n" \ 459 + " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ 460 + " addiu $1, %0, 0x100\n" \ 461 + " cache %1, 0x000(%0); cache %1, 0x080(%0)\n" \ 462 + " addiu $1, %0, 0x100\n" \ 463 + " .set pop\n" \ 464 + : \ 465 + : "r" (base), \ 466 + "i" (op)); 467 + #endif /* CONFIG_CPU_MIPSR6 */ 326 468 327 469 /* 328 470 * Perform the cache operation specified by op using a user mode virtual
+6 -2
arch/mips/include/asm/sgialib.h
··· 11 11 #ifndef _ASM_SGIALIB_H 12 12 #define _ASM_SGIALIB_H 13 13 14 + #include <linux/compiler.h> 14 15 #include <asm/sgiarcs.h> 15 16 16 17 extern struct linux_romvec *romvec; ··· 71 70 extern LONG ArcWrite(ULONG fd, PVOID buf, ULONG num, PULONG cnt); 72 71 73 72 /* Misc. routines. */ 74 - extern VOID ArcReboot(VOID) __attribute__((noreturn)); 75 - extern VOID ArcEnterInteractiveMode(VOID) __attribute__((noreturn)); 73 + extern VOID ArcHalt(VOID) __noreturn; 74 + extern VOID ArcPowerDown(VOID) __noreturn; 75 + extern VOID ArcRestart(VOID) __noreturn; 76 + extern VOID ArcReboot(VOID) __noreturn; 77 + extern VOID ArcEnterInteractiveMode(VOID) __noreturn; 76 78 extern VOID ArcFlushAllCaches(VOID); 77 79 extern DISPLAY_STATUS *ArcGetDisplayStatus(ULONG FileID); 78 80
-29
arch/mips/include/asm/siginfo.h
··· 1 - /* 2 - * This file is subject to the terms and conditions of the GNU General Public 3 - * License. See the file "COPYING" in the main directory of this archive 4 - * for more details. 5 - * 6 - * Copyright (C) 1998, 1999, 2001, 2003 Ralf Baechle 7 - * Copyright (C) 2000, 2001 Silicon Graphics, Inc. 8 - */ 9 - #ifndef _ASM_SIGINFO_H 10 - #define _ASM_SIGINFO_H 11 - 12 - #include <uapi/asm/siginfo.h> 13 - 14 - 15 - /* 16 - * Duplicated here because of <asm-generic/siginfo.h> braindamage ... 17 - */ 18 - #include <linux/string.h> 19 - 20 - static inline void copy_siginfo(struct siginfo *to, struct siginfo *from) 21 - { 22 - if (from->si_code < 0) 23 - memcpy(to, from, sizeof(*to)); 24 - else 25 - /* _sigchld is currently the largest know union member */ 26 - memcpy(to, from, 3*sizeof(int) + sizeof(from->_sifields._sigchld)); 27 - } 28 - 29 - #endif /* _ASM_SIGINFO_H */
+26 -29
arch/mips/include/asm/spinlock.h
··· 89 89 " subu %[ticket], %[ticket], 1 \n" 90 90 " .previous \n" 91 91 " .set pop \n" 92 - : [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock), 92 + : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock), 93 93 [serving_now_ptr] "+m" (lock->h.serving_now), 94 94 [ticket] "=&r" (tmp), 95 95 [my_ticket] "=&r" (my_ticket) ··· 122 122 " subu %[ticket], %[ticket], 1 \n" 123 123 " .previous \n" 124 124 " .set pop \n" 125 - : [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock), 125 + : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock), 126 126 [serving_now_ptr] "+m" (lock->h.serving_now), 127 127 [ticket] "=&r" (tmp), 128 128 [my_ticket] "=&r" (my_ticket) ··· 164 164 " li %[ticket], 0 \n" 165 165 " .previous \n" 166 166 " .set pop \n" 167 - : [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock), 167 + : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock), 168 168 [ticket] "=&r" (tmp), 169 169 [my_ticket] "=&r" (tmp2), 170 170 [now_serving] "=&r" (tmp3) ··· 188 188 " li %[ticket], 0 \n" 189 189 " .previous \n" 190 190 " .set pop \n" 191 - : [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock), 191 + : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock), 192 192 [ticket] "=&r" (tmp), 193 193 [my_ticket] "=&r" (tmp2), 194 194 [now_serving] "=&r" (tmp3) ··· 235 235 " beqzl %1, 1b \n" 236 236 " nop \n" 237 237 " .set reorder \n" 238 - : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp) 239 - : GCC_OFF12_ASM() (rw->lock) 238 + : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp) 239 + : GCC_OFF_SMALL_ASM() (rw->lock) 240 240 : "memory"); 241 241 } else { 242 242 do { ··· 245 245 " bltz %1, 1b \n" 246 246 " addu %1, 1 \n" 247 247 "2: sc %1, %0 \n" 248 - : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp) 249 - : GCC_OFF12_ASM() (rw->lock) 248 + : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp) 249 + : GCC_OFF_SMALL_ASM() (rw->lock) 250 250 : "memory"); 251 251 } while (unlikely(!tmp)); 252 252 } ··· 254 254 smp_llsc_mb(); 255 255 } 256 256 257 - /* Note the use of sub, not subu which will make the kernel die with an 258 - overflow exception if we ever try to unlock an rwlock that is already 259 - unlocked or is being held by a writer. */ 260 257 static inline void arch_read_unlock(arch_rwlock_t *rw) 261 258 { 262 259 unsigned int tmp; ··· 263 266 if (R10000_LLSC_WAR) { 264 267 __asm__ __volatile__( 265 268 "1: ll %1, %2 # arch_read_unlock \n" 266 - " sub %1, 1 \n" 269 + " addiu %1, 1 \n" 267 270 " sc %1, %0 \n" 268 271 " beqzl %1, 1b \n" 269 - : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp) 270 - : GCC_OFF12_ASM() (rw->lock) 272 + : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp) 273 + : GCC_OFF_SMALL_ASM() (rw->lock) 271 274 : "memory"); 272 275 } else { 273 276 do { 274 277 __asm__ __volatile__( 275 278 "1: ll %1, %2 # arch_read_unlock \n" 276 - " sub %1, 1 \n" 279 + " addiu %1, -1 \n" 277 280 " sc %1, %0 \n" 278 - : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp) 279 - : GCC_OFF12_ASM() (rw->lock) 281 + : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp) 282 + : GCC_OFF_SMALL_ASM() (rw->lock) 280 283 : "memory"); 281 284 } while (unlikely(!tmp)); 282 285 } ··· 296 299 " beqzl %1, 1b \n" 297 300 " nop \n" 298 301 " .set reorder \n" 299 - : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp) 300 - : GCC_OFF12_ASM() (rw->lock) 302 + : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp) 303 + : GCC_OFF_SMALL_ASM() (rw->lock) 301 304 : "memory"); 302 305 } else { 303 306 do { ··· 306 309 " bnez %1, 1b \n" 307 310 " lui %1, 0x8000 \n" 308 311 "2: sc %1, %0 \n" 309 - : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp) 310 - : GCC_OFF12_ASM() (rw->lock) 312 + : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp) 313 + : GCC_OFF_SMALL_ASM() (rw->lock) 311 314 : "memory"); 312 315 } while (unlikely(!tmp)); 313 316 } ··· 346 349 __WEAK_LLSC_MB 347 350 " li %2, 1 \n" 348 351 "2: \n" 349 - : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) 350 - : GCC_OFF12_ASM() (rw->lock) 352 + : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) 353 + : GCC_OFF_SMALL_ASM() (rw->lock) 351 354 : "memory"); 352 355 } else { 353 356 __asm__ __volatile__( ··· 363 366 __WEAK_LLSC_MB 364 367 " li %2, 1 \n" 365 368 "2: \n" 366 - : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) 367 - : GCC_OFF12_ASM() (rw->lock) 369 + : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) 370 + : GCC_OFF_SMALL_ASM() (rw->lock) 368 371 : "memory"); 369 372 } 370 373 ··· 390 393 " li %2, 1 \n" 391 394 " .set reorder \n" 392 395 "2: \n" 393 - : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) 394 - : GCC_OFF12_ASM() (rw->lock) 396 + : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret) 397 + : GCC_OFF_SMALL_ASM() (rw->lock) 395 398 : "memory"); 396 399 } else { 397 400 do { ··· 403 406 " sc %1, %0 \n" 404 407 " li %2, 1 \n" 405 408 "2: \n" 406 - : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp), 409 + : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), 407 410 "=&r" (ret) 408 - : GCC_OFF12_ASM() (rw->lock) 411 + : GCC_OFF_SMALL_ASM() (rw->lock) 409 412 : "memory"); 410 413 } while (unlikely(!tmp)); 411 414
+2 -2
arch/mips/include/asm/spram.h
··· 1 1 #ifndef _MIPS_SPRAM_H 2 2 #define _MIPS_SPRAM_H 3 3 4 - #ifdef CONFIG_CPU_MIPSR2 4 + #if defined(CONFIG_MIPS_SPRAM) 5 5 extern __init void spram_config(void); 6 6 #else 7 7 static inline void spram_config(void) { }; 8 - #endif /* CONFIG_CPU_MIPSR2 */ 8 + #endif /* CONFIG_MIPS_SPRAM */ 9 9 10 10 #endif /* _MIPS_SPRAM_H */
+4 -4
arch/mips/include/asm/stackframe.h
··· 40 40 LONG_S v1, PT_HI(sp) 41 41 mflhxu v1 42 42 LONG_S v1, PT_ACX(sp) 43 - #else 43 + #elif !defined(CONFIG_CPU_MIPSR6) 44 44 mfhi v1 45 45 #endif 46 46 #ifdef CONFIG_32BIT ··· 50 50 LONG_S $10, PT_R10(sp) 51 51 LONG_S $11, PT_R11(sp) 52 52 LONG_S $12, PT_R12(sp) 53 - #ifndef CONFIG_CPU_HAS_SMARTMIPS 53 + #if !defined(CONFIG_CPU_HAS_SMARTMIPS) && !defined(CONFIG_CPU_MIPSR6) 54 54 LONG_S v1, PT_HI(sp) 55 55 mflo v1 56 56 #endif ··· 58 58 LONG_S $14, PT_R14(sp) 59 59 LONG_S $15, PT_R15(sp) 60 60 LONG_S $24, PT_R24(sp) 61 - #ifndef CONFIG_CPU_HAS_SMARTMIPS 61 + #if !defined(CONFIG_CPU_HAS_SMARTMIPS) && !defined(CONFIG_CPU_MIPSR6) 62 62 LONG_S v1, PT_LO(sp) 63 63 #endif 64 64 #ifdef CONFIG_CPU_CAVIUM_OCTEON ··· 226 226 mtlhx $24 227 227 LONG_L $24, PT_LO(sp) 228 228 mtlhx $24 229 - #else 229 + #elif !defined(CONFIG_CPU_MIPSR6) 230 230 LONG_L $24, PT_LO(sp) 231 231 mtlo $24 232 232 LONG_L $24, PT_HI(sp)
+6 -3
arch/mips/include/asm/switch_to.h
··· 75 75 #endif 76 76 77 77 #define __clear_software_ll_bit() \ 78 - do { \ 79 - if (!__builtin_constant_p(cpu_has_llsc) || !cpu_has_llsc) \ 80 - ll_bit = 0; \ 78 + do { if (cpu_has_rw_llb) { \ 79 + write_c0_lladdr(0); \ 80 + } else { \ 81 + if (!__builtin_constant_p(cpu_has_llsc) || !cpu_has_llsc)\ 82 + ll_bit = 0; \ 83 + } \ 81 84 } while (0) 82 85 83 86 #define switch_to(prev, next, last) \
+1 -1
arch/mips/include/asm/thread_info.h
··· 28 28 unsigned long tp_value; /* thread pointer */ 29 29 __u32 cpu; /* current CPU */ 30 30 int preempt_count; /* 0 => preemptable, <0 => BUG */ 31 - 31 + int r2_emul_return; /* 1 => Returning from R2 emulator */ 32 32 mm_segment_t addr_limit; /* 33 33 * thread address space limit: 34 34 * 0x7fffffff for user-thead
+14 -10
arch/mips/include/uapi/asm/inst.h
··· 21 21 enum major_op { 22 22 spec_op, bcond_op, j_op, jal_op, 23 23 beq_op, bne_op, blez_op, bgtz_op, 24 - addi_op, addiu_op, slti_op, sltiu_op, 24 + addi_op, cbcond0_op = addi_op, addiu_op, slti_op, sltiu_op, 25 25 andi_op, ori_op, xori_op, lui_op, 26 26 cop0_op, cop1_op, cop2_op, cop1x_op, 27 27 beql_op, bnel_op, blezl_op, bgtzl_op, 28 - daddi_op, daddiu_op, ldl_op, ldr_op, 28 + daddi_op, cbcond1_op = daddi_op, daddiu_op, ldl_op, ldr_op, 29 29 spec2_op, jalx_op, mdmx_op, spec3_op, 30 30 lb_op, lh_op, lwl_op, lw_op, 31 31 lbu_op, lhu_op, lwr_op, lwu_op, 32 32 sb_op, sh_op, swl_op, sw_op, 33 33 sdl_op, sdr_op, swr_op, cache_op, 34 - ll_op, lwc1_op, lwc2_op, pref_op, 35 - lld_op, ldc1_op, ldc2_op, ld_op, 36 - sc_op, swc1_op, swc2_op, major_3b_op, 37 - scd_op, sdc1_op, sdc2_op, sd_op 34 + ll_op, lwc1_op, lwc2_op, bc6_op = lwc2_op, pref_op, 35 + lld_op, ldc1_op, ldc2_op, beqzcjic_op = ldc2_op, ld_op, 36 + sc_op, swc1_op, swc2_op, balc6_op = swc2_op, major_3b_op, 37 + scd_op, sdc1_op, sdc2_op, bnezcjialc_op = sdc2_op, sd_op 38 38 }; 39 39 40 40 /* ··· 83 83 swe_op = 0x1f, bshfl_op = 0x20, 84 84 swle_op = 0x21, swre_op = 0x22, 85 85 prefe_op = 0x23, dbshfl_op = 0x24, 86 - lbue_op = 0x28, lhue_op = 0x29, 87 - lbe_op = 0x2c, lhe_op = 0x2d, 88 - lle_op = 0x2e, lwe_op = 0x2f, 86 + cache6_op = 0x25, sc6_op = 0x26, 87 + scd6_op = 0x27, lbue_op = 0x28, 88 + lhue_op = 0x29, lbe_op = 0x2c, 89 + lhe_op = 0x2d, lle_op = 0x2e, 90 + lwe_op = 0x2f, pref6_op = 0x35, 91 + ll6_op = 0x36, lld6_op = 0x37, 89 92 rdhwr_op = 0x3b 90 93 }; 91 94 ··· 115 112 mfhc_op = 0x03, mtc_op = 0x04, 116 113 dmtc_op = 0x05, ctc_op = 0x06, 117 114 mthc0_op = 0x06, mthc_op = 0x07, 118 - bc_op = 0x08, cop_op = 0x10, 115 + bc_op = 0x08, bc1eqz_op = 0x09, 116 + bc1nez_op = 0x0d, cop_op = 0x10, 119 117 copm_op = 0x18 120 118 }; 121 119
+3 -8
arch/mips/include/uapi/asm/siginfo.h
··· 16 16 #define HAVE_ARCH_SIGINFO_T 17 17 18 18 /* 19 - * We duplicate the generic versions - <asm-generic/siginfo.h> is just borked 20 - * by design ... 21 - */ 22 - #define HAVE_ARCH_COPY_SIGINFO 23 - struct siginfo; 24 - 25 - /* 26 19 * Careful to keep union _sifields from shifting ... 27 20 */ 28 21 #if _MIPS_SZLONG == 32 ··· 28 35 29 36 #define __ARCH_SIGSYS 30 37 31 - #include <asm-generic/siginfo.h> 38 + #include <uapi/asm-generic/siginfo.h> 32 39 40 + /* We can't use generic siginfo_t, because our si_code and si_errno are swapped */ 33 41 typedef struct siginfo { 34 42 int si_signo; 35 43 int si_code; ··· 118 124 #define SI_TIMER __SI_CODE(__SI_TIMER, -3) /* sent by timer expiration */ 119 125 #define SI_MESGQ __SI_CODE(__SI_MESGQ, -4) /* sent by real time mesq state change */ 120 126 127 + #include <asm-generic/siginfo.h> 121 128 122 129 #endif /* _UAPI_ASM_SIGINFO_H */
+2 -1
arch/mips/kernel/Makefile
··· 52 52 obj-$(CONFIG_MIPS_CMP) += smp-cmp.o 53 53 obj-$(CONFIG_MIPS_CPS) += smp-cps.o cps-vec.o 54 54 obj-$(CONFIG_MIPS_GIC_IPI) += smp-gic.o 55 - obj-$(CONFIG_CPU_MIPSR2) += spram.o 55 + obj-$(CONFIG_MIPS_SPRAM) += spram.o 56 56 57 57 obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o 58 58 obj-$(CONFIG_MIPS_VPE_LOADER_CMP) += vpe-cmp.o ··· 90 90 obj-$(CONFIG_EARLY_PRINTK_8250) += early_printk_8250.o 91 91 obj-$(CONFIG_SPINLOCK_TEST) += spinlock_test.o 92 92 obj-$(CONFIG_MIPS_MACHINE) += mips_machine.o 93 + obj-$(CONFIG_MIPSR2_TO_R6_EMULATOR) += mips-r2-to-r6-emul.o 93 94 94 95 CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/null -x c /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi) 95 96
+2
arch/mips/kernel/asm-offsets.c
··· 97 97 OFFSET(TI_TP_VALUE, thread_info, tp_value); 98 98 OFFSET(TI_CPU, thread_info, cpu); 99 99 OFFSET(TI_PRE_COUNT, thread_info, preempt_count); 100 + OFFSET(TI_R2_EMUL_RET, thread_info, r2_emul_return); 100 101 OFFSET(TI_ADDR_LIMIT, thread_info, addr_limit); 101 102 OFFSET(TI_REGS, thread_info, regs); 102 103 DEFINE(_THREAD_SIZE, THREAD_SIZE); ··· 382 381 OFFSET(OCTEON_CP2_GFM_RESULT, octeon_cop2_state, cop2_gfm_result); 383 382 OFFSET(OCTEON_CP2_HSH_DATW, octeon_cop2_state, cop2_hsh_datw); 384 383 OFFSET(OCTEON_CP2_HSH_IVW, octeon_cop2_state, cop2_hsh_ivw); 384 + OFFSET(OCTEON_CP2_SHA3, octeon_cop2_state, cop2_sha3); 385 385 OFFSET(THREAD_CP2, task_struct, thread.cp2); 386 386 OFFSET(THREAD_CVMSEG, task_struct, thread.cvmseg.cvmseg); 387 387 BLANK();
+250 -38
arch/mips/kernel/branch.c
··· 16 16 #include <asm/fpu.h> 17 17 #include <asm/fpu_emulator.h> 18 18 #include <asm/inst.h> 19 + #include <asm/mips-r2-to-r6-emul.h> 19 20 #include <asm/ptrace.h> 20 21 #include <asm/uaccess.h> 21 22 ··· 400 399 * @returns: -EFAULT on error and forces SIGBUS, and on success 401 400 * returns 0 or BRANCH_LIKELY_TAKEN as appropriate after 402 401 * evaluating the branch. 402 + * 403 + * MIPS R6 Compact branches and forbidden slots: 404 + * Compact branches do not throw exceptions because they do 405 + * not have delay slots. The forbidden slot instruction ($PC+4) 406 + * is only executed if the branch was not taken. Otherwise the 407 + * forbidden slot is skipped entirely. This means that the 408 + * only possible reason to be here because of a MIPS R6 compact 409 + * branch instruction is that the forbidden slot has thrown one. 410 + * In that case the branch was not taken, so the EPC can be safely 411 + * set to EPC + 8. 403 412 */ 404 413 int __compute_return_epc_for_insn(struct pt_regs *regs, 405 414 union mips_instruction insn) 406 415 { 407 - unsigned int bit, fcr31, dspcontrol; 416 + unsigned int bit, fcr31, dspcontrol, reg; 408 417 long epc = regs->cp0_epc; 409 418 int ret = 0; 410 419 ··· 428 417 regs->regs[insn.r_format.rd] = epc + 8; 429 418 /* Fall through */ 430 419 case jr_op: 420 + if (NO_R6EMU && insn.r_format.func == jr_op) 421 + goto sigill_r6; 431 422 regs->cp0_epc = regs->regs[insn.r_format.rs]; 432 423 break; 433 424 } ··· 442 429 */ 443 430 case bcond_op: 444 431 switch (insn.i_format.rt) { 445 - case bltz_op: 446 432 case bltzl_op: 433 + if (NO_R6EMU) 434 + goto sigill_r6; 435 + case bltz_op: 447 436 if ((long)regs->regs[insn.i_format.rs] < 0) { 448 437 epc = epc + 4 + (insn.i_format.simmediate << 2); 449 438 if (insn.i_format.rt == bltzl_op) ··· 455 440 regs->cp0_epc = epc; 456 441 break; 457 442 458 - case bgez_op: 459 443 case bgezl_op: 444 + if (NO_R6EMU) 445 + goto sigill_r6; 446 + case bgez_op: 460 447 if ((long)regs->regs[insn.i_format.rs] >= 0) { 461 448 epc = epc + 4 + (insn.i_format.simmediate << 2); 462 449 if (insn.i_format.rt == bgezl_op) ··· 470 453 471 454 case bltzal_op: 472 455 case bltzall_op: 456 + if (NO_R6EMU && (insn.i_format.rs || 457 + insn.i_format.rt == bltzall_op)) { 458 + ret = -SIGILL; 459 + break; 460 + } 473 461 regs->regs[31] = epc + 8; 462 + /* 463 + * OK we are here either because we hit a NAL 464 + * instruction or because we are emulating an 465 + * old bltzal{,l} one. Lets figure out what the 466 + * case really is. 467 + */ 468 + if (!insn.i_format.rs) { 469 + /* 470 + * NAL or BLTZAL with rs == 0 471 + * Doesn't matter if we are R6 or not. The 472 + * result is the same 473 + */ 474 + regs->cp0_epc += 4 + 475 + (insn.i_format.simmediate << 2); 476 + break; 477 + } 478 + /* Now do the real thing for non-R6 BLTZAL{,L} */ 474 479 if ((long)regs->regs[insn.i_format.rs] < 0) { 475 480 epc = epc + 4 + (insn.i_format.simmediate << 2); 476 481 if (insn.i_format.rt == bltzall_op) ··· 504 465 505 466 case bgezal_op: 506 467 case bgezall_op: 468 + if (NO_R6EMU && (insn.i_format.rs || 469 + insn.i_format.rt == bgezall_op)) { 470 + ret = -SIGILL; 471 + break; 472 + } 507 473 regs->regs[31] = epc + 8; 474 + /* 475 + * OK we are here either because we hit a BAL 476 + * instruction or because we are emulating an 477 + * old bgezal{,l} one. Lets figure out what the 478 + * case really is. 479 + */ 480 + if (!insn.i_format.rs) { 481 + /* 482 + * BAL or BGEZAL with rs == 0 483 + * Doesn't matter if we are R6 or not. The 484 + * result is the same 485 + */ 486 + regs->cp0_epc += 4 + 487 + (insn.i_format.simmediate << 2); 488 + break; 489 + } 490 + /* Now do the real thing for non-R6 BGEZAL{,L} */ 508 491 if ((long)regs->regs[insn.i_format.rs] >= 0) { 509 492 epc = epc + 4 + (insn.i_format.simmediate << 2); 510 493 if (insn.i_format.rt == bgezall_op) ··· 538 477 539 478 case bposge32_op: 540 479 if (!cpu_has_dsp) 541 - goto sigill; 480 + goto sigill_dsp; 542 481 543 482 dspcontrol = rddsp(0x01); 544 483 ··· 569 508 /* 570 509 * These are conditional and in i_format. 571 510 */ 572 - case beq_op: 573 511 case beql_op: 512 + if (NO_R6EMU) 513 + goto sigill_r6; 514 + case beq_op: 574 515 if (regs->regs[insn.i_format.rs] == 575 516 regs->regs[insn.i_format.rt]) { 576 517 epc = epc + 4 + (insn.i_format.simmediate << 2); ··· 583 520 regs->cp0_epc = epc; 584 521 break; 585 522 586 - case bne_op: 587 523 case bnel_op: 524 + if (NO_R6EMU) 525 + goto sigill_r6; 526 + case bne_op: 588 527 if (regs->regs[insn.i_format.rs] != 589 528 regs->regs[insn.i_format.rt]) { 590 529 epc = epc + 4 + (insn.i_format.simmediate << 2); ··· 597 532 regs->cp0_epc = epc; 598 533 break; 599 534 600 - case blez_op: /* not really i_format */ 601 - case blezl_op: 535 + case blezl_op: /* not really i_format */ 536 + if (NO_R6EMU) 537 + goto sigill_r6; 538 + case blez_op: 539 + /* 540 + * Compact branches for R6 for the 541 + * blez and blezl opcodes. 542 + * BLEZ | rs = 0 | rt != 0 == BLEZALC 543 + * BLEZ | rs = rt != 0 == BGEZALC 544 + * BLEZ | rs != 0 | rt != 0 == BGEUC 545 + * BLEZL | rs = 0 | rt != 0 == BLEZC 546 + * BLEZL | rs = rt != 0 == BGEZC 547 + * BLEZL | rs != 0 | rt != 0 == BGEC 548 + * 549 + * For real BLEZ{,L}, rt is always 0. 550 + */ 551 + 552 + if (cpu_has_mips_r6 && insn.i_format.rt) { 553 + if ((insn.i_format.opcode == blez_op) && 554 + ((!insn.i_format.rs && insn.i_format.rt) || 555 + (insn.i_format.rs == insn.i_format.rt))) 556 + regs->regs[31] = epc + 4; 557 + regs->cp0_epc += 8; 558 + break; 559 + } 602 560 /* rt field assumed to be zero */ 603 561 if ((long)regs->regs[insn.i_format.rs] <= 0) { 604 562 epc = epc + 4 + (insn.i_format.simmediate << 2); ··· 632 544 regs->cp0_epc = epc; 633 545 break; 634 546 635 - case bgtz_op: 636 547 case bgtzl_op: 548 + if (NO_R6EMU) 549 + goto sigill_r6; 550 + case bgtz_op: 551 + /* 552 + * Compact branches for R6 for the 553 + * bgtz and bgtzl opcodes. 554 + * BGTZ | rs = 0 | rt != 0 == BGTZALC 555 + * BGTZ | rs = rt != 0 == BLTZALC 556 + * BGTZ | rs != 0 | rt != 0 == BLTUC 557 + * BGTZL | rs = 0 | rt != 0 == BGTZC 558 + * BGTZL | rs = rt != 0 == BLTZC 559 + * BGTZL | rs != 0 | rt != 0 == BLTC 560 + * 561 + * *ZALC varint for BGTZ &&& rt != 0 562 + * For real GTZ{,L}, rt is always 0. 563 + */ 564 + if (cpu_has_mips_r6 && insn.i_format.rt) { 565 + if ((insn.i_format.opcode == blez_op) && 566 + ((!insn.i_format.rs && insn.i_format.rt) || 567 + (insn.i_format.rs == insn.i_format.rt))) 568 + regs->regs[31] = epc + 4; 569 + regs->cp0_epc += 8; 570 + break; 571 + } 572 + 637 573 /* rt field assumed to be zero */ 638 574 if ((long)regs->regs[insn.i_format.rs] > 0) { 639 575 epc = epc + 4 + (insn.i_format.simmediate << 2); ··· 672 560 * And now the FPA/cp1 branch instructions. 673 561 */ 674 562 case cop1_op: 675 - preempt_disable(); 676 - if (is_fpu_owner()) 677 - fcr31 = read_32bit_cp1_register(CP1_STATUS); 678 - else 679 - fcr31 = current->thread.fpu.fcr31; 680 - preempt_enable(); 681 - 682 - bit = (insn.i_format.rt >> 2); 683 - bit += (bit != 0); 684 - bit += 23; 685 - switch (insn.i_format.rt & 3) { 686 - case 0: /* bc1f */ 687 - case 2: /* bc1fl */ 688 - if (~fcr31 & (1 << bit)) { 689 - epc = epc + 4 + (insn.i_format.simmediate << 2); 690 - if (insn.i_format.rt == 2) 691 - ret = BRANCH_LIKELY_TAKEN; 692 - } else 563 + if (cpu_has_mips_r6 && 564 + ((insn.i_format.rs == bc1eqz_op) || 565 + (insn.i_format.rs == bc1nez_op))) { 566 + if (!used_math()) { /* First time FPU user */ 567 + ret = init_fpu(); 568 + if (ret && NO_R6EMU) { 569 + ret = -ret; 570 + break; 571 + } 572 + ret = 0; 573 + set_used_math(); 574 + } 575 + lose_fpu(1); /* Save FPU state for the emulator. */ 576 + reg = insn.i_format.rt; 577 + bit = 0; 578 + switch (insn.i_format.rs) { 579 + case bc1eqz_op: 580 + /* Test bit 0 */ 581 + if (get_fpr32(&current->thread.fpu.fpr[reg], 0) 582 + & 0x1) 583 + bit = 1; 584 + break; 585 + case bc1nez_op: 586 + /* Test bit 0 */ 587 + if (!(get_fpr32(&current->thread.fpu.fpr[reg], 0) 588 + & 0x1)) 589 + bit = 1; 590 + break; 591 + } 592 + own_fpu(1); 593 + if (bit) 594 + epc = epc + 4 + 595 + (insn.i_format.simmediate << 2); 596 + else 693 597 epc += 8; 694 598 regs->cp0_epc = epc; 599 + 695 600 break; 601 + } else { 696 602 697 - case 1: /* bc1t */ 698 - case 3: /* bc1tl */ 699 - if (fcr31 & (1 << bit)) { 700 - epc = epc + 4 + (insn.i_format.simmediate << 2); 701 - if (insn.i_format.rt == 3) 702 - ret = BRANCH_LIKELY_TAKEN; 703 - } else 704 - epc += 8; 705 - regs->cp0_epc = epc; 603 + preempt_disable(); 604 + if (is_fpu_owner()) 605 + fcr31 = read_32bit_cp1_register(CP1_STATUS); 606 + else 607 + fcr31 = current->thread.fpu.fcr31; 608 + preempt_enable(); 609 + 610 + bit = (insn.i_format.rt >> 2); 611 + bit += (bit != 0); 612 + bit += 23; 613 + switch (insn.i_format.rt & 3) { 614 + case 0: /* bc1f */ 615 + case 2: /* bc1fl */ 616 + if (~fcr31 & (1 << bit)) { 617 + epc = epc + 4 + 618 + (insn.i_format.simmediate << 2); 619 + if (insn.i_format.rt == 2) 620 + ret = BRANCH_LIKELY_TAKEN; 621 + } else 622 + epc += 8; 623 + regs->cp0_epc = epc; 624 + break; 625 + 626 + case 1: /* bc1t */ 627 + case 3: /* bc1tl */ 628 + if (fcr31 & (1 << bit)) { 629 + epc = epc + 4 + 630 + (insn.i_format.simmediate << 2); 631 + if (insn.i_format.rt == 3) 632 + ret = BRANCH_LIKELY_TAKEN; 633 + } else 634 + epc += 8; 635 + regs->cp0_epc = epc; 636 + break; 637 + } 706 638 break; 707 639 } 708 - break; 709 640 #ifdef CONFIG_CPU_CAVIUM_OCTEON 710 641 case lwc2_op: /* This is bbit0 on Octeon */ 711 642 if ((regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt)) ··· 781 626 epc += 8; 782 627 regs->cp0_epc = epc; 783 628 break; 629 + #else 630 + case bc6_op: 631 + /* Only valid for MIPS R6 */ 632 + if (!cpu_has_mips_r6) { 633 + ret = -SIGILL; 634 + break; 635 + } 636 + regs->cp0_epc += 8; 637 + break; 638 + case balc6_op: 639 + if (!cpu_has_mips_r6) { 640 + ret = -SIGILL; 641 + break; 642 + } 643 + /* Compact branch: BALC */ 644 + regs->regs[31] = epc + 4; 645 + epc += 4 + (insn.i_format.simmediate << 2); 646 + regs->cp0_epc = epc; 647 + break; 648 + case beqzcjic_op: 649 + if (!cpu_has_mips_r6) { 650 + ret = -SIGILL; 651 + break; 652 + } 653 + /* Compact branch: BEQZC || JIC */ 654 + regs->cp0_epc += 8; 655 + break; 656 + case bnezcjialc_op: 657 + if (!cpu_has_mips_r6) { 658 + ret = -SIGILL; 659 + break; 660 + } 661 + /* Compact branch: BNEZC || JIALC */ 662 + if (insn.i_format.rs) 663 + regs->regs[31] = epc + 4; 664 + regs->cp0_epc += 8; 665 + break; 784 666 #endif 667 + case cbcond0_op: 668 + case cbcond1_op: 669 + /* Only valid for MIPS R6 */ 670 + if (!cpu_has_mips_r6) { 671 + ret = -SIGILL; 672 + break; 673 + } 674 + /* 675 + * Compact branches: 676 + * bovc, beqc, beqzalc, bnvc, bnec, bnezlac 677 + */ 678 + if (insn.i_format.rt && !insn.i_format.rs) 679 + regs->regs[31] = epc + 4; 680 + regs->cp0_epc += 8; 681 + break; 785 682 } 786 683 787 684 return ret; 788 685 789 - sigill: 686 + sigill_dsp: 790 687 printk("%s: DSP branch but not DSP ASE - sending SIGBUS.\n", current->comm); 791 688 force_sig(SIGBUS, current); 689 + return -EFAULT; 690 + sigill_r6: 691 + pr_info("%s: R2 branch but r2-to-r6 emulator is not preset - sending SIGILL.\n", 692 + current->comm); 693 + force_sig(SIGILL, current); 792 694 return -EFAULT; 793 695 } 794 696 EXPORT_SYMBOL_GPL(__compute_return_epc_for_insn);
+2 -6
arch/mips/kernel/cevt-r4k.c
··· 11 11 #include <linux/percpu.h> 12 12 #include <linux/smp.h> 13 13 #include <linux/irq.h> 14 - #include <linux/irqchip/mips-gic.h> 15 14 16 15 #include <asm/time.h> 17 16 #include <asm/cevt-r4k.h> ··· 39 40 40 41 irqreturn_t c0_compare_interrupt(int irq, void *dev_id) 41 42 { 42 - const int r2 = cpu_has_mips_r2; 43 + const int r2 = cpu_has_mips_r2_r6; 43 44 struct clock_event_device *cd; 44 45 int cpu = smp_processor_id(); 45 46 ··· 84 85 */ 85 86 static int c0_compare_int_pending(void) 86 87 { 87 - #ifdef CONFIG_MIPS_GIC 88 - if (gic_present) 89 - return gic_get_timer_pending(); 90 - #endif 88 + /* When cpu_has_mips_r2, this checks Cause.TI instead of Cause.IP7 */ 91 89 return (read_c0_cause() >> cp0_compare_irq_shift) & (1ul << CAUSEB_IP); 92 90 } 93 91
+8 -8
arch/mips/kernel/cps-vec.S
··· 99 99 xori t2, t1, 0x7 100 100 beqz t2, 1f 101 101 li t3, 32 102 - addi t1, t1, 1 102 + addiu t1, t1, 1 103 103 sllv t1, t3, t1 104 104 1: /* At this point t1 == I-cache sets per way */ 105 105 _EXT t2, v0, MIPS_CONF1_IA_SHF, MIPS_CONF1_IA_SZ 106 - addi t2, t2, 1 106 + addiu t2, t2, 1 107 107 mul t1, t1, t0 108 108 mul t1, t1, t2 109 109 ··· 126 126 xori t2, t1, 0x7 127 127 beqz t2, 1f 128 128 li t3, 32 129 - addi t1, t1, 1 129 + addiu t1, t1, 1 130 130 sllv t1, t3, t1 131 131 1: /* At this point t1 == D-cache sets per way */ 132 132 _EXT t2, v0, MIPS_CONF1_DA_SHF, MIPS_CONF1_DA_SZ 133 - addi t2, t2, 1 133 + addiu t2, t2, 1 134 134 mul t1, t1, t0 135 135 mul t1, t1, t2 136 136 ··· 250 250 mfc0 t0, CP0_MVPCONF0 251 251 srl t0, t0, MVPCONF0_PVPE_SHIFT 252 252 andi t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT) 253 - addi t7, t0, 1 253 + addiu t7, t0, 1 254 254 255 255 /* If there's only 1, we're done */ 256 256 beqz t0, 2f ··· 280 280 mttc0 t0, CP0_TCHALT 281 281 282 282 /* Next VPE */ 283 - addi t5, t5, 1 283 + addiu t5, t5, 1 284 284 slt t0, t5, t7 285 285 bnez t0, 1b 286 286 nop ··· 317 317 mfc0 t1, CP0_MVPCONF0 318 318 srl t1, t1, MVPCONF0_PVPE_SHIFT 319 319 andi t1, t1, MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT 320 - addi t1, t1, 1 320 + addiu t1, t1, 1 321 321 322 322 /* Calculate a mask for the VPE ID from EBase.CPUNum */ 323 323 clz t1, t1 ··· 424 424 425 425 /* Next VPE */ 426 426 2: srl t6, t6, 1 427 - addi t5, t5, 1 427 + addiu t5, t5, 1 428 428 bnez t6, 1b 429 429 nop 430 430
+7 -4
arch/mips/kernel/cpu-bugs64.c
··· 244 244 panic(bug64hit, !DADDI_WAR ? daddiwar : nowar); 245 245 } 246 246 247 - int daddiu_bug = -1; 247 + int daddiu_bug = config_enabled(CONFIG_CPU_MIPSR6) ? 0 : -1; 248 248 249 249 static inline void check_daddiu(void) 250 250 { ··· 314 314 315 315 void __init check_bugs64_early(void) 316 316 { 317 - check_mult_sh(); 318 - check_daddiu(); 317 + if (!config_enabled(CONFIG_CPU_MIPSR6)) { 318 + check_mult_sh(); 319 + check_daddiu(); 320 + } 319 321 } 320 322 321 323 void __init check_bugs64(void) 322 324 { 323 - check_daddi(); 325 + if (!config_enabled(CONFIG_CPU_MIPSR6)) 326 + check_daddi(); 324 327 }
+27 -6
arch/mips/kernel/cpu-probe.c
··· 237 237 c->isa_level |= MIPS_CPU_ISA_II | MIPS_CPU_ISA_III; 238 238 break; 239 239 240 + /* R6 incompatible with everything else */ 241 + case MIPS_CPU_ISA_M64R6: 242 + c->isa_level |= MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6; 243 + case MIPS_CPU_ISA_M32R6: 244 + c->isa_level |= MIPS_CPU_ISA_M32R6; 245 + /* Break here so we don't add incompatible ISAs */ 246 + break; 240 247 case MIPS_CPU_ISA_M32R2: 241 248 c->isa_level |= MIPS_CPU_ISA_M32R2; 242 249 case MIPS_CPU_ISA_M32R1: ··· 333 326 case 1: 334 327 set_isa(c, MIPS_CPU_ISA_M32R2); 335 328 break; 329 + case 2: 330 + set_isa(c, MIPS_CPU_ISA_M32R6); 331 + break; 336 332 default: 337 333 goto unknown; 338 334 } ··· 347 337 break; 348 338 case 1: 349 339 set_isa(c, MIPS_CPU_ISA_M64R2); 340 + break; 341 + case 2: 342 + set_isa(c, MIPS_CPU_ISA_M64R6); 350 343 break; 351 344 default: 352 345 goto unknown; ··· 437 424 if (config3 & MIPS_CONF3_MSA) 438 425 c->ases |= MIPS_ASE_MSA; 439 426 /* Only tested on 32-bit cores */ 440 - if ((config3 & MIPS_CONF3_PW) && config_enabled(CONFIG_32BIT)) 427 + if ((config3 & MIPS_CONF3_PW) && config_enabled(CONFIG_32BIT)) { 428 + c->htw_seq = 0; 441 429 c->options |= MIPS_CPU_HTW; 430 + } 442 431 443 432 return config3 & MIPS_CONF_M; 444 433 } ··· 514 499 c->options |= MIPS_CPU_EVA; 515 500 if (config5 & MIPS_CONF5_MRP) 516 501 c->options |= MIPS_CPU_MAAR; 502 + if (config5 & MIPS_CONF5_LLB) 503 + c->options |= MIPS_CPU_RW_LLB; 517 504 518 505 return config5 & MIPS_CONF_M; 519 506 } ··· 550 533 551 534 if (cpu_has_rixi) { 552 535 /* Enable the RIXI exceptions */ 553 - write_c0_pagegrain(read_c0_pagegrain() | PG_IEC); 536 + set_c0_pagegrain(PG_IEC); 554 537 back_to_back_c0_hazard(); 555 538 /* Verify the IEC bit is set */ 556 539 if (read_c0_pagegrain() & PG_IEC) ··· 558 541 } 559 542 560 543 #ifndef CONFIG_MIPS_CPS 561 - if (cpu_has_mips_r2) { 544 + if (cpu_has_mips_r2_r6) { 562 545 c->core = get_ebase_cpunum(); 563 546 if (cpu_has_mipsmt) 564 547 c->core >>= fls(core_nvpes()) - 1; ··· 913 896 { 914 897 c->writecombine = _CACHE_UNCACHED_ACCELERATED; 915 898 switch (c->processor_id & PRID_IMP_MASK) { 899 + case PRID_IMP_QEMU_GENERIC: 900 + c->writecombine = _CACHE_UNCACHED; 901 + c->cputype = CPU_QEMU_GENERIC; 902 + __cpu_name[cpu] = "MIPS GENERIC QEMU"; 903 + break; 916 904 case PRID_IMP_4KC: 917 905 c->cputype = CPU_4KC; 918 906 c->writecombine = _CACHE_UNCACHED; ··· 1367 1345 if (c->options & MIPS_CPU_FPU) { 1368 1346 c->fpu_id = cpu_get_fpu_id(); 1369 1347 1370 - if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 | 1371 - MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) { 1348 + if (c->isa_level & cpu_has_mips_r) { 1372 1349 if (c->fpu_id & MIPS_FPIR_3D) 1373 1350 c->ases |= MIPS_ASE_MIPS3D; 1374 1351 if (c->fpu_id & MIPS_FPIR_FREP) ··· 1375 1354 } 1376 1355 } 1377 1356 1378 - if (cpu_has_mips_r2) { 1357 + if (cpu_has_mips_r2_r6) { 1379 1358 c->srsets = ((read_c0_srsctl() >> 26) & 0x0f) + 1; 1380 1359 /* R2 has Performance Counter Interrupt indicator */ 1381 1360 c->options |= MIPS_CPU_PCI;
+189 -116
arch/mips/kernel/elf.c
··· 11 11 #include <linux/elf.h> 12 12 #include <linux/sched.h> 13 13 14 + /* FPU modes */ 14 15 enum { 15 - FP_ERROR = -1, 16 - FP_DOUBLE_64A = -2, 16 + FP_FRE, 17 + FP_FR0, 18 + FP_FR1, 17 19 }; 20 + 21 + /** 22 + * struct mode_req - ABI FPU mode requirements 23 + * @single: The program being loaded needs an FPU but it will only issue 24 + * single precision instructions meaning that it can execute in 25 + * either FR0 or FR1. 26 + * @soft: The soft(-float) requirement means that the program being 27 + * loaded needs has no FPU dependency at all (i.e. it has no 28 + * FPU instructions). 29 + * @fr1: The program being loaded depends on FPU being in FR=1 mode. 30 + * @frdefault: The program being loaded depends on the default FPU mode. 31 + * That is FR0 for O32 and FR1 for N32/N64. 32 + * @fre: The program being loaded depends on FPU with FRE=1. This mode is 33 + * a bridge which uses FR=1 whilst still being able to maintain 34 + * full compatibility with pre-existing code using the O32 FP32 35 + * ABI. 36 + * 37 + * More information about the FP ABIs can be found here: 38 + * 39 + * https://dmz-portal.mips.com/wiki/MIPS_O32_ABI_-_FR0_and_FR1_Interlinking#10.4.1._Basic_mode_set-up 40 + * 41 + */ 42 + 43 + struct mode_req { 44 + bool single; 45 + bool soft; 46 + bool fr1; 47 + bool frdefault; 48 + bool fre; 49 + }; 50 + 51 + static const struct mode_req fpu_reqs[] = { 52 + [MIPS_ABI_FP_ANY] = { true, true, true, true, true }, 53 + [MIPS_ABI_FP_DOUBLE] = { false, false, false, true, true }, 54 + [MIPS_ABI_FP_SINGLE] = { true, false, false, false, false }, 55 + [MIPS_ABI_FP_SOFT] = { false, true, false, false, false }, 56 + [MIPS_ABI_FP_OLD_64] = { false, false, false, false, false }, 57 + [MIPS_ABI_FP_XX] = { false, false, true, true, true }, 58 + [MIPS_ABI_FP_64] = { false, false, true, false, false }, 59 + [MIPS_ABI_FP_64A] = { false, false, true, false, true } 60 + }; 61 + 62 + /* 63 + * Mode requirements when .MIPS.abiflags is not present in the ELF. 64 + * Not present means that everything is acceptable except FR1. 65 + */ 66 + static struct mode_req none_req = { true, true, false, true, true }; 18 67 19 68 int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf, 20 69 bool is_interp, struct arch_elf_state *state) 21 70 { 22 - struct elf32_hdr *ehdr = _ehdr; 23 - struct elf32_phdr *phdr = _phdr; 71 + struct elf32_hdr *ehdr32 = _ehdr; 72 + struct elf32_phdr *phdr32 = _phdr; 73 + struct elf64_phdr *phdr64 = _phdr; 24 74 struct mips_elf_abiflags_v0 abiflags; 25 75 int ret; 26 76 27 - if (config_enabled(CONFIG_64BIT) && 28 - (ehdr->e_ident[EI_CLASS] != ELFCLASS32)) 29 - return 0; 30 - if (phdr->p_type != PT_MIPS_ABIFLAGS) 31 - return 0; 32 - if (phdr->p_filesz < sizeof(abiflags)) 33 - return -EINVAL; 77 + /* Lets see if this is an O32 ELF */ 78 + if (ehdr32->e_ident[EI_CLASS] == ELFCLASS32) { 79 + /* FR = 1 for N32 */ 80 + if (ehdr32->e_flags & EF_MIPS_ABI2) 81 + state->overall_fp_mode = FP_FR1; 82 + else 83 + /* Set a good default FPU mode for O32 */ 84 + state->overall_fp_mode = cpu_has_mips_r6 ? 85 + FP_FRE : FP_FR0; 34 86 35 - ret = kernel_read(elf, phdr->p_offset, (char *)&abiflags, 36 - sizeof(abiflags)); 87 + if (ehdr32->e_flags & EF_MIPS_FP64) { 88 + /* 89 + * Set MIPS_ABI_FP_OLD_64 for EF_MIPS_FP64. We will override it 90 + * later if needed 91 + */ 92 + if (is_interp) 93 + state->interp_fp_abi = MIPS_ABI_FP_OLD_64; 94 + else 95 + state->fp_abi = MIPS_ABI_FP_OLD_64; 96 + } 97 + if (phdr32->p_type != PT_MIPS_ABIFLAGS) 98 + return 0; 99 + 100 + if (phdr32->p_filesz < sizeof(abiflags)) 101 + return -EINVAL; 102 + 103 + ret = kernel_read(elf, phdr32->p_offset, 104 + (char *)&abiflags, 105 + sizeof(abiflags)); 106 + } else { 107 + /* FR=1 is really the only option for 64-bit */ 108 + state->overall_fp_mode = FP_FR1; 109 + 110 + if (phdr64->p_type != PT_MIPS_ABIFLAGS) 111 + return 0; 112 + if (phdr64->p_filesz < sizeof(abiflags)) 113 + return -EINVAL; 114 + 115 + ret = kernel_read(elf, phdr64->p_offset, 116 + (char *)&abiflags, 117 + sizeof(abiflags)); 118 + } 119 + 37 120 if (ret < 0) 38 121 return ret; 39 122 if (ret != sizeof(abiflags)) ··· 131 48 return 0; 132 49 } 133 50 134 - static inline unsigned get_fp_abi(struct elf32_hdr *ehdr, int in_abi) 51 + static inline unsigned get_fp_abi(int in_abi) 135 52 { 136 53 /* If the ABI requirement is provided, simply return that */ 137 - if (in_abi != -1) 54 + if (in_abi != MIPS_ABI_FP_UNKNOWN) 138 55 return in_abi; 139 56 140 - /* If the EF_MIPS_FP64 flag was set, return MIPS_ABI_FP_64 */ 141 - if (ehdr->e_flags & EF_MIPS_FP64) 142 - return MIPS_ABI_FP_64; 143 - 144 - /* Default to MIPS_ABI_FP_DOUBLE */ 145 - return MIPS_ABI_FP_DOUBLE; 57 + /* Unknown ABI */ 58 + return MIPS_ABI_FP_UNKNOWN; 146 59 } 147 60 148 61 int arch_check_elf(void *_ehdr, bool has_interpreter, 149 62 struct arch_elf_state *state) 150 63 { 151 64 struct elf32_hdr *ehdr = _ehdr; 152 - unsigned fp_abi, interp_fp_abi, abi0, abi1; 65 + struct mode_req prog_req, interp_req; 66 + int fp_abi, interp_fp_abi, abi0, abi1, max_abi; 153 67 154 - /* Ignore non-O32 binaries */ 155 - if (config_enabled(CONFIG_64BIT) && 156 - (ehdr->e_ident[EI_CLASS] != ELFCLASS32)) 68 + if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT)) 157 69 return 0; 158 70 159 - fp_abi = get_fp_abi(ehdr, state->fp_abi); 71 + fp_abi = get_fp_abi(state->fp_abi); 160 72 161 73 if (has_interpreter) { 162 - interp_fp_abi = get_fp_abi(ehdr, state->interp_fp_abi); 74 + interp_fp_abi = get_fp_abi(state->interp_fp_abi); 163 75 164 76 abi0 = min(fp_abi, interp_fp_abi); 165 77 abi1 = max(fp_abi, interp_fp_abi); ··· 162 84 abi0 = abi1 = fp_abi; 163 85 } 164 86 165 - state->overall_abi = FP_ERROR; 87 + /* ABI limits. O32 = FP_64A, N32/N64 = FP_SOFT */ 88 + max_abi = ((ehdr->e_ident[EI_CLASS] == ELFCLASS32) && 89 + (!(ehdr->e_flags & EF_MIPS_ABI2))) ? 90 + MIPS_ABI_FP_64A : MIPS_ABI_FP_SOFT; 166 91 167 - if (abi0 == abi1) { 168 - state->overall_abi = abi0; 169 - } else if (abi0 == MIPS_ABI_FP_ANY) { 170 - state->overall_abi = abi1; 171 - } else if (abi0 == MIPS_ABI_FP_DOUBLE) { 172 - switch (abi1) { 173 - case MIPS_ABI_FP_XX: 174 - state->overall_abi = MIPS_ABI_FP_DOUBLE; 175 - break; 176 - 177 - case MIPS_ABI_FP_64A: 178 - state->overall_abi = FP_DOUBLE_64A; 179 - break; 180 - } 181 - } else if (abi0 == MIPS_ABI_FP_SINGLE || 182 - abi0 == MIPS_ABI_FP_SOFT) { 183 - /* Cannot link with other ABIs */ 184 - } else if (abi0 == MIPS_ABI_FP_OLD_64) { 185 - switch (abi1) { 186 - case MIPS_ABI_FP_XX: 187 - case MIPS_ABI_FP_64: 188 - case MIPS_ABI_FP_64A: 189 - state->overall_abi = MIPS_ABI_FP_64; 190 - break; 191 - } 192 - } else if (abi0 == MIPS_ABI_FP_XX || 193 - abi0 == MIPS_ABI_FP_64 || 194 - abi0 == MIPS_ABI_FP_64A) { 195 - state->overall_abi = MIPS_ABI_FP_64; 196 - } 197 - 198 - switch (state->overall_abi) { 199 - case MIPS_ABI_FP_64: 200 - case MIPS_ABI_FP_64A: 201 - case FP_DOUBLE_64A: 202 - if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT)) 203 - return -ELIBBAD; 204 - break; 205 - 206 - case FP_ERROR: 92 + if ((abi0 > max_abi && abi0 != MIPS_ABI_FP_UNKNOWN) || 93 + (abi1 > max_abi && abi1 != MIPS_ABI_FP_UNKNOWN)) 207 94 return -ELIBBAD; 208 - } 95 + 96 + /* It's time to determine the FPU mode requirements */ 97 + prog_req = (abi0 == MIPS_ABI_FP_UNKNOWN) ? none_req : fpu_reqs[abi0]; 98 + interp_req = (abi1 == MIPS_ABI_FP_UNKNOWN) ? none_req : fpu_reqs[abi1]; 99 + 100 + /* 101 + * Check whether the program's and interp's ABIs have a matching FPU 102 + * mode requirement. 103 + */ 104 + prog_req.single = interp_req.single && prog_req.single; 105 + prog_req.soft = interp_req.soft && prog_req.soft; 106 + prog_req.fr1 = interp_req.fr1 && prog_req.fr1; 107 + prog_req.frdefault = interp_req.frdefault && prog_req.frdefault; 108 + prog_req.fre = interp_req.fre && prog_req.fre; 109 + 110 + /* 111 + * Determine the desired FPU mode 112 + * 113 + * Decision making: 114 + * 115 + * - We want FR_FRE if FRE=1 and both FR=1 and FR=0 are false. This 116 + * means that we have a combination of program and interpreter 117 + * that inherently require the hybrid FP mode. 118 + * - If FR1 and FRDEFAULT is true, that means we hit the any-abi or 119 + * fpxx case. This is because, in any-ABI (or no-ABI) we have no FPU 120 + * instructions so we don't care about the mode. We will simply use 121 + * the one preferred by the hardware. In fpxx case, that ABI can 122 + * handle both FR=1 and FR=0, so, again, we simply choose the one 123 + * preferred by the hardware. Next, if we only use single-precision 124 + * FPU instructions, and the default ABI FPU mode is not good 125 + * (ie single + any ABI combination), we set again the FPU mode to the 126 + * one is preferred by the hardware. Next, if we know that the code 127 + * will only use single-precision instructions, shown by single being 128 + * true but frdefault being false, then we again set the FPU mode to 129 + * the one that is preferred by the hardware. 130 + * - We want FP_FR1 if that's the only matching mode and the default one 131 + * is not good. 132 + * - Return with -ELIBADD if we can't find a matching FPU mode. 133 + */ 134 + if (prog_req.fre && !prog_req.frdefault && !prog_req.fr1) 135 + state->overall_fp_mode = FP_FRE; 136 + else if ((prog_req.fr1 && prog_req.frdefault) || 137 + (prog_req.single && !prog_req.frdefault)) 138 + /* Make sure 64-bit MIPS III/IV/64R1 will not pick FR1 */ 139 + state->overall_fp_mode = ((current_cpu_data.fpu_id & MIPS_FPIR_F64) && 140 + cpu_has_mips_r2_r6) ? 141 + FP_FR1 : FP_FR0; 142 + else if (prog_req.fr1) 143 + state->overall_fp_mode = FP_FR1; 144 + else if (!prog_req.fre && !prog_req.frdefault && 145 + !prog_req.fr1 && !prog_req.single && !prog_req.soft) 146 + return -ELIBBAD; 209 147 210 148 return 0; 211 149 } 212 150 151 + static inline void set_thread_fp_mode(int hybrid, int regs32) 152 + { 153 + if (hybrid) 154 + set_thread_flag(TIF_HYBRID_FPREGS); 155 + else 156 + clear_thread_flag(TIF_HYBRID_FPREGS); 157 + if (regs32) 158 + set_thread_flag(TIF_32BIT_FPREGS); 159 + else 160 + clear_thread_flag(TIF_32BIT_FPREGS); 161 + } 162 + 213 163 void mips_set_personality_fp(struct arch_elf_state *state) 214 164 { 215 - if (config_enabled(CONFIG_FP32XX_HYBRID_FPRS)) { 216 - /* 217 - * Use hybrid FPRs for all code which can correctly execute 218 - * with that mode. 219 - */ 220 - switch (state->overall_abi) { 221 - case MIPS_ABI_FP_DOUBLE: 222 - case MIPS_ABI_FP_SINGLE: 223 - case MIPS_ABI_FP_SOFT: 224 - case MIPS_ABI_FP_XX: 225 - case MIPS_ABI_FP_ANY: 226 - /* FR=1, FRE=1 */ 227 - clear_thread_flag(TIF_32BIT_FPREGS); 228 - set_thread_flag(TIF_HYBRID_FPREGS); 229 - return; 230 - } 231 - } 165 + /* 166 + * This function is only ever called for O32 ELFs so we should 167 + * not be worried about N32/N64 binaries. 168 + */ 232 169 233 - switch (state->overall_abi) { 234 - case MIPS_ABI_FP_DOUBLE: 235 - case MIPS_ABI_FP_SINGLE: 236 - case MIPS_ABI_FP_SOFT: 237 - /* FR=0 */ 238 - set_thread_flag(TIF_32BIT_FPREGS); 239 - clear_thread_flag(TIF_HYBRID_FPREGS); 170 + if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT)) 171 + return; 172 + 173 + switch (state->overall_fp_mode) { 174 + case FP_FRE: 175 + set_thread_fp_mode(1, 0); 240 176 break; 241 - 242 - case FP_DOUBLE_64A: 243 - /* FR=1, FRE=1 */ 244 - clear_thread_flag(TIF_32BIT_FPREGS); 245 - set_thread_flag(TIF_HYBRID_FPREGS); 177 + case FP_FR0: 178 + set_thread_fp_mode(0, 1); 246 179 break; 247 - 248 - case MIPS_ABI_FP_64: 249 - case MIPS_ABI_FP_64A: 250 - /* FR=1, FRE=0 */ 251 - clear_thread_flag(TIF_32BIT_FPREGS); 252 - clear_thread_flag(TIF_HYBRID_FPREGS); 180 + case FP_FR1: 181 + set_thread_fp_mode(0, 0); 253 182 break; 254 - 255 - case MIPS_ABI_FP_XX: 256 - case MIPS_ABI_FP_ANY: 257 - if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT)) 258 - set_thread_flag(TIF_32BIT_FPREGS); 259 - else 260 - clear_thread_flag(TIF_32BIT_FPREGS); 261 - 262 - clear_thread_flag(TIF_HYBRID_FPREGS); 263 - break; 264 - 265 183 default: 266 - case FP_ERROR: 267 184 BUG(); 268 185 } 269 186 }
+21 -2
arch/mips/kernel/entry.S
··· 46 46 local_irq_disable # make sure we dont miss an 47 47 # interrupt setting need_resched 48 48 # between sampling and return 49 + #ifdef CONFIG_MIPSR2_TO_R6_EMULATOR 50 + lw k0, TI_R2_EMUL_RET($28) 51 + bnez k0, restore_all_from_r2_emul 52 + #endif 53 + 49 54 LONG_L a2, TI_FLAGS($28) # current->work 50 55 andi t0, a2, _TIF_WORK_MASK # (ignoring syscall_trace) 51 56 bnez t0, work_pending ··· 119 114 RESTORE_SP_AND_RET 120 115 .set at 121 116 117 + #ifdef CONFIG_MIPSR2_TO_R6_EMULATOR 118 + restore_all_from_r2_emul: # restore full frame 119 + .set noat 120 + sw zero, TI_R2_EMUL_RET($28) # reset it 121 + RESTORE_TEMP 122 + RESTORE_AT 123 + RESTORE_STATIC 124 + RESTORE_SOME 125 + LONG_L sp, PT_R29(sp) 126 + eretnc 127 + .set at 128 + #endif 129 + 122 130 work_pending: 123 131 andi t0, a2, _TIF_NEED_RESCHED # a2 is preloaded with TI_FLAGS 124 132 beqz t0, work_notifysig ··· 176 158 jal syscall_trace_leave 177 159 b resume_userspace 178 160 179 - #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT) 161 + #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) || \ 162 + defined(CONFIG_MIPS_MT) 180 163 181 164 /* 182 165 * MIPS32R2 Instruction Hazard Barrier - must be called ··· 190 171 nop 191 172 END(mips_ihb) 192 173 193 - #endif /* CONFIG_CPU_MIPSR2 or CONFIG_MIPS_MT */ 174 + #endif /* CONFIG_CPU_MIPSR2 or CONFIG_CPU_MIPSR6 or CONFIG_MIPS_MT */
+1 -1
arch/mips/kernel/genex.S
··· 125 125 nop 126 126 nop 127 127 #endif 128 - .set arch=r4000 128 + .set MIPS_ISA_ARCH_LEVEL_RAW 129 129 wait 130 130 /* end of rollback region (the region size must be power of two) */ 131 131 1:
+1
arch/mips/kernel/idle.c
··· 186 186 case CPU_PROAPTIV: 187 187 case CPU_P5600: 188 188 case CPU_M5150: 189 + case CPU_QEMU_GENERIC: 189 190 cpu_wait = r4k_wait; 190 191 if (read_c0_config7() & MIPS_CONF7_WII) 191 192 cpu_wait = r4k_wait_irqoff;
+2378
arch/mips/kernel/mips-r2-to-r6-emul.c
··· 1 + /* 2 + * This file is subject to the terms and conditions of the GNU General Public 3 + * License. See the file "COPYING" in the main directory of this archive 4 + * for more details. 5 + * 6 + * Copyright (c) 2014 Imagination Technologies Ltd. 7 + * Author: Leonid Yegoshin <Leonid.Yegoshin@imgtec.com> 8 + * Author: Markos Chandras <markos.chandras@imgtec.com> 9 + * 10 + * MIPS R2 user space instruction emulator for MIPS R6 11 + * 12 + */ 13 + #include <linux/bug.h> 14 + #include <linux/compiler.h> 15 + #include <linux/debugfs.h> 16 + #include <linux/init.h> 17 + #include <linux/kernel.h> 18 + #include <linux/module.h> 19 + #include <linux/ptrace.h> 20 + #include <linux/seq_file.h> 21 + 22 + #include <asm/asm.h> 23 + #include <asm/branch.h> 24 + #include <asm/break.h> 25 + #include <asm/fpu.h> 26 + #include <asm/fpu_emulator.h> 27 + #include <asm/inst.h> 28 + #include <asm/mips-r2-to-r6-emul.h> 29 + #include <asm/local.h> 30 + #include <asm/ptrace.h> 31 + #include <asm/uaccess.h> 32 + 33 + #ifdef CONFIG_64BIT 34 + #define ADDIU "daddiu " 35 + #define INS "dins " 36 + #define EXT "dext " 37 + #else 38 + #define ADDIU "addiu " 39 + #define INS "ins " 40 + #define EXT "ext " 41 + #endif /* CONFIG_64BIT */ 42 + 43 + #define SB "sb " 44 + #define LB "lb " 45 + #define LL "ll " 46 + #define SC "sc " 47 + 48 + DEFINE_PER_CPU(struct mips_r2_emulator_stats, mipsr2emustats); 49 + DEFINE_PER_CPU(struct mips_r2_emulator_stats, mipsr2bdemustats); 50 + DEFINE_PER_CPU(struct mips_r2br_emulator_stats, mipsr2bremustats); 51 + 52 + extern const unsigned int fpucondbit[8]; 53 + 54 + #define MIPS_R2_EMUL_TOTAL_PASS 10 55 + 56 + int mipsr2_emulation = 0; 57 + 58 + static int __init mipsr2emu_enable(char *s) 59 + { 60 + mipsr2_emulation = 1; 61 + 62 + pr_info("MIPS R2-to-R6 Emulator Enabled!"); 63 + 64 + return 1; 65 + } 66 + __setup("mipsr2emu", mipsr2emu_enable); 67 + 68 + /** 69 + * mipsr6_emul - Emulate some frequent R2/R5/R6 instructions in delay slot 70 + * for performance instead of the traditional way of using a stack trampoline 71 + * which is rather slow. 72 + * @regs: Process register set 73 + * @ir: Instruction 74 + */ 75 + static inline int mipsr6_emul(struct pt_regs *regs, u32 ir) 76 + { 77 + switch (MIPSInst_OPCODE(ir)) { 78 + case addiu_op: 79 + if (MIPSInst_RT(ir)) 80 + regs->regs[MIPSInst_RT(ir)] = 81 + (s32)regs->regs[MIPSInst_RS(ir)] + 82 + (s32)MIPSInst_SIMM(ir); 83 + return 0; 84 + case daddiu_op: 85 + if (config_enabled(CONFIG_32BIT)) 86 + break; 87 + 88 + if (MIPSInst_RT(ir)) 89 + regs->regs[MIPSInst_RT(ir)] = 90 + (s64)regs->regs[MIPSInst_RS(ir)] + 91 + (s64)MIPSInst_SIMM(ir); 92 + return 0; 93 + case lwc1_op: 94 + case swc1_op: 95 + case cop1_op: 96 + case cop1x_op: 97 + /* FPU instructions in delay slot */ 98 + return -SIGFPE; 99 + case spec_op: 100 + switch (MIPSInst_FUNC(ir)) { 101 + case or_op: 102 + if (MIPSInst_RD(ir)) 103 + regs->regs[MIPSInst_RD(ir)] = 104 + regs->regs[MIPSInst_RS(ir)] | 105 + regs->regs[MIPSInst_RT(ir)]; 106 + return 0; 107 + case sll_op: 108 + if (MIPSInst_RS(ir)) 109 + break; 110 + 111 + if (MIPSInst_RD(ir)) 112 + regs->regs[MIPSInst_RD(ir)] = 113 + (s32)(((u32)regs->regs[MIPSInst_RT(ir)]) << 114 + MIPSInst_FD(ir)); 115 + return 0; 116 + case srl_op: 117 + if (MIPSInst_RS(ir)) 118 + break; 119 + 120 + if (MIPSInst_RD(ir)) 121 + regs->regs[MIPSInst_RD(ir)] = 122 + (s32)(((u32)regs->regs[MIPSInst_RT(ir)]) >> 123 + MIPSInst_FD(ir)); 124 + return 0; 125 + case addu_op: 126 + if (MIPSInst_FD(ir)) 127 + break; 128 + 129 + if (MIPSInst_RD(ir)) 130 + regs->regs[MIPSInst_RD(ir)] = 131 + (s32)((u32)regs->regs[MIPSInst_RS(ir)] + 132 + (u32)regs->regs[MIPSInst_RT(ir)]); 133 + return 0; 134 + case subu_op: 135 + if (MIPSInst_FD(ir)) 136 + break; 137 + 138 + if (MIPSInst_RD(ir)) 139 + regs->regs[MIPSInst_RD(ir)] = 140 + (s32)((u32)regs->regs[MIPSInst_RS(ir)] - 141 + (u32)regs->regs[MIPSInst_RT(ir)]); 142 + return 0; 143 + case dsll_op: 144 + if (config_enabled(CONFIG_32BIT) || MIPSInst_RS(ir)) 145 + break; 146 + 147 + if (MIPSInst_RD(ir)) 148 + regs->regs[MIPSInst_RD(ir)] = 149 + (s64)(((u64)regs->regs[MIPSInst_RT(ir)]) << 150 + MIPSInst_FD(ir)); 151 + return 0; 152 + case dsrl_op: 153 + if (config_enabled(CONFIG_32BIT) || MIPSInst_RS(ir)) 154 + break; 155 + 156 + if (MIPSInst_RD(ir)) 157 + regs->regs[MIPSInst_RD(ir)] = 158 + (s64)(((u64)regs->regs[MIPSInst_RT(ir)]) >> 159 + MIPSInst_FD(ir)); 160 + return 0; 161 + case daddu_op: 162 + if (config_enabled(CONFIG_32BIT) || MIPSInst_FD(ir)) 163 + break; 164 + 165 + if (MIPSInst_RD(ir)) 166 + regs->regs[MIPSInst_RD(ir)] = 167 + (u64)regs->regs[MIPSInst_RS(ir)] + 168 + (u64)regs->regs[MIPSInst_RT(ir)]; 169 + return 0; 170 + case dsubu_op: 171 + if (config_enabled(CONFIG_32BIT) || MIPSInst_FD(ir)) 172 + break; 173 + 174 + if (MIPSInst_RD(ir)) 175 + regs->regs[MIPSInst_RD(ir)] = 176 + (s64)((u64)regs->regs[MIPSInst_RS(ir)] - 177 + (u64)regs->regs[MIPSInst_RT(ir)]); 178 + return 0; 179 + } 180 + break; 181 + default: 182 + pr_debug("No fastpath BD emulation for instruction 0x%08x (op: %02x)\n", 183 + ir, MIPSInst_OPCODE(ir)); 184 + } 185 + 186 + return SIGILL; 187 + } 188 + 189 + /** 190 + * movt_func - Emulate a MOVT instruction 191 + * @regs: Process register set 192 + * @ir: Instruction 193 + * 194 + * Returns 0 since it always succeeds. 195 + */ 196 + static int movf_func(struct pt_regs *regs, u32 ir) 197 + { 198 + u32 csr; 199 + u32 cond; 200 + 201 + csr = current->thread.fpu.fcr31; 202 + cond = fpucondbit[MIPSInst_RT(ir) >> 2]; 203 + if (((csr & cond) == 0) && MIPSInst_RD(ir)) 204 + regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)]; 205 + MIPS_R2_STATS(movs); 206 + return 0; 207 + } 208 + 209 + /** 210 + * movt_func - Emulate a MOVT instruction 211 + * @regs: Process register set 212 + * @ir: Instruction 213 + * 214 + * Returns 0 since it always succeeds. 215 + */ 216 + static int movt_func(struct pt_regs *regs, u32 ir) 217 + { 218 + u32 csr; 219 + u32 cond; 220 + 221 + csr = current->thread.fpu.fcr31; 222 + cond = fpucondbit[MIPSInst_RT(ir) >> 2]; 223 + 224 + if (((csr & cond) != 0) && MIPSInst_RD(ir)) 225 + regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)]; 226 + 227 + MIPS_R2_STATS(movs); 228 + 229 + return 0; 230 + } 231 + 232 + /** 233 + * jr_func - Emulate a JR instruction. 234 + * @pt_regs: Process register set 235 + * @ir: Instruction 236 + * 237 + * Returns SIGILL if JR was in delay slot, SIGEMT if we 238 + * can't compute the EPC, SIGSEGV if we can't access the 239 + * userland instruction or 0 on success. 240 + */ 241 + static int jr_func(struct pt_regs *regs, u32 ir) 242 + { 243 + int err; 244 + unsigned long cepc, epc, nepc; 245 + u32 nir; 246 + 247 + if (delay_slot(regs)) 248 + return SIGILL; 249 + 250 + /* EPC after the RI/JR instruction */ 251 + nepc = regs->cp0_epc; 252 + /* Roll back to the reserved R2 JR instruction */ 253 + regs->cp0_epc -= 4; 254 + epc = regs->cp0_epc; 255 + err = __compute_return_epc(regs); 256 + 257 + if (err < 0) 258 + return SIGEMT; 259 + 260 + 261 + /* Computed EPC */ 262 + cepc = regs->cp0_epc; 263 + 264 + /* Get DS instruction */ 265 + err = __get_user(nir, (u32 __user *)nepc); 266 + if (err) 267 + return SIGSEGV; 268 + 269 + MIPS_R2BR_STATS(jrs); 270 + 271 + /* If nir == 0(NOP), then nothing else to do */ 272 + if (nir) { 273 + /* 274 + * Negative err means FPU instruction in BD-slot, 275 + * Zero err means 'BD-slot emulation done' 276 + * For anything else we go back to trampoline emulation. 277 + */ 278 + err = mipsr6_emul(regs, nir); 279 + if (err > 0) { 280 + regs->cp0_epc = nepc; 281 + err = mips_dsemul(regs, nir, cepc); 282 + if (err == SIGILL) 283 + err = SIGEMT; 284 + MIPS_R2_STATS(dsemul); 285 + } 286 + } 287 + 288 + return err; 289 + } 290 + 291 + /** 292 + * movz_func - Emulate a MOVZ instruction 293 + * @regs: Process register set 294 + * @ir: Instruction 295 + * 296 + * Returns 0 since it always succeeds. 297 + */ 298 + static int movz_func(struct pt_regs *regs, u32 ir) 299 + { 300 + if (((regs->regs[MIPSInst_RT(ir)]) == 0) && MIPSInst_RD(ir)) 301 + regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)]; 302 + MIPS_R2_STATS(movs); 303 + 304 + return 0; 305 + } 306 + 307 + /** 308 + * movn_func - Emulate a MOVZ instruction 309 + * @regs: Process register set 310 + * @ir: Instruction 311 + * 312 + * Returns 0 since it always succeeds. 313 + */ 314 + static int movn_func(struct pt_regs *regs, u32 ir) 315 + { 316 + if (((regs->regs[MIPSInst_RT(ir)]) != 0) && MIPSInst_RD(ir)) 317 + regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)]; 318 + MIPS_R2_STATS(movs); 319 + 320 + return 0; 321 + } 322 + 323 + /** 324 + * mfhi_func - Emulate a MFHI instruction 325 + * @regs: Process register set 326 + * @ir: Instruction 327 + * 328 + * Returns 0 since it always succeeds. 329 + */ 330 + static int mfhi_func(struct pt_regs *regs, u32 ir) 331 + { 332 + if (MIPSInst_RD(ir)) 333 + regs->regs[MIPSInst_RD(ir)] = regs->hi; 334 + 335 + MIPS_R2_STATS(hilo); 336 + 337 + return 0; 338 + } 339 + 340 + /** 341 + * mthi_func - Emulate a MTHI instruction 342 + * @regs: Process register set 343 + * @ir: Instruction 344 + * 345 + * Returns 0 since it always succeeds. 346 + */ 347 + static int mthi_func(struct pt_regs *regs, u32 ir) 348 + { 349 + regs->hi = regs->regs[MIPSInst_RS(ir)]; 350 + 351 + MIPS_R2_STATS(hilo); 352 + 353 + return 0; 354 + } 355 + 356 + /** 357 + * mflo_func - Emulate a MFLO instruction 358 + * @regs: Process register set 359 + * @ir: Instruction 360 + * 361 + * Returns 0 since it always succeeds. 362 + */ 363 + static int mflo_func(struct pt_regs *regs, u32 ir) 364 + { 365 + if (MIPSInst_RD(ir)) 366 + regs->regs[MIPSInst_RD(ir)] = regs->lo; 367 + 368 + MIPS_R2_STATS(hilo); 369 + 370 + return 0; 371 + } 372 + 373 + /** 374 + * mtlo_func - Emulate a MTLO instruction 375 + * @regs: Process register set 376 + * @ir: Instruction 377 + * 378 + * Returns 0 since it always succeeds. 379 + */ 380 + static int mtlo_func(struct pt_regs *regs, u32 ir) 381 + { 382 + regs->lo = regs->regs[MIPSInst_RS(ir)]; 383 + 384 + MIPS_R2_STATS(hilo); 385 + 386 + return 0; 387 + } 388 + 389 + /** 390 + * mult_func - Emulate a MULT instruction 391 + * @regs: Process register set 392 + * @ir: Instruction 393 + * 394 + * Returns 0 since it always succeeds. 395 + */ 396 + static int mult_func(struct pt_regs *regs, u32 ir) 397 + { 398 + s64 res; 399 + s32 rt, rs; 400 + 401 + rt = regs->regs[MIPSInst_RT(ir)]; 402 + rs = regs->regs[MIPSInst_RS(ir)]; 403 + res = (s64)rt * (s64)rs; 404 + 405 + rs = res; 406 + regs->lo = (s64)rs; 407 + rt = res >> 32; 408 + res = (s64)rt; 409 + regs->hi = res; 410 + 411 + MIPS_R2_STATS(muls); 412 + 413 + return 0; 414 + } 415 + 416 + /** 417 + * multu_func - Emulate a MULTU instruction 418 + * @regs: Process register set 419 + * @ir: Instruction 420 + * 421 + * Returns 0 since it always succeeds. 422 + */ 423 + static int multu_func(struct pt_regs *regs, u32 ir) 424 + { 425 + u64 res; 426 + u32 rt, rs; 427 + 428 + rt = regs->regs[MIPSInst_RT(ir)]; 429 + rs = regs->regs[MIPSInst_RS(ir)]; 430 + res = (u64)rt * (u64)rs; 431 + rt = res; 432 + regs->lo = (s64)rt; 433 + regs->hi = (s64)(res >> 32); 434 + 435 + MIPS_R2_STATS(muls); 436 + 437 + return 0; 438 + } 439 + 440 + /** 441 + * div_func - Emulate a DIV instruction 442 + * @regs: Process register set 443 + * @ir: Instruction 444 + * 445 + * Returns 0 since it always succeeds. 446 + */ 447 + static int div_func(struct pt_regs *regs, u32 ir) 448 + { 449 + s32 rt, rs; 450 + 451 + rt = regs->regs[MIPSInst_RT(ir)]; 452 + rs = regs->regs[MIPSInst_RS(ir)]; 453 + 454 + regs->lo = (s64)(rs / rt); 455 + regs->hi = (s64)(rs % rt); 456 + 457 + MIPS_R2_STATS(divs); 458 + 459 + return 0; 460 + } 461 + 462 + /** 463 + * divu_func - Emulate a DIVU instruction 464 + * @regs: Process register set 465 + * @ir: Instruction 466 + * 467 + * Returns 0 since it always succeeds. 468 + */ 469 + static int divu_func(struct pt_regs *regs, u32 ir) 470 + { 471 + u32 rt, rs; 472 + 473 + rt = regs->regs[MIPSInst_RT(ir)]; 474 + rs = regs->regs[MIPSInst_RS(ir)]; 475 + 476 + regs->lo = (s64)(rs / rt); 477 + regs->hi = (s64)(rs % rt); 478 + 479 + MIPS_R2_STATS(divs); 480 + 481 + return 0; 482 + } 483 + 484 + /** 485 + * dmult_func - Emulate a DMULT instruction 486 + * @regs: Process register set 487 + * @ir: Instruction 488 + * 489 + * Returns 0 on success or SIGILL for 32-bit kernels. 490 + */ 491 + static int dmult_func(struct pt_regs *regs, u32 ir) 492 + { 493 + s64 res; 494 + s64 rt, rs; 495 + 496 + if (config_enabled(CONFIG_32BIT)) 497 + return SIGILL; 498 + 499 + rt = regs->regs[MIPSInst_RT(ir)]; 500 + rs = regs->regs[MIPSInst_RS(ir)]; 501 + res = rt * rs; 502 + 503 + regs->lo = res; 504 + __asm__ __volatile__( 505 + "dmuh %0, %1, %2\t\n" 506 + : "=r"(res) 507 + : "r"(rt), "r"(rs)); 508 + 509 + regs->hi = res; 510 + 511 + MIPS_R2_STATS(muls); 512 + 513 + return 0; 514 + } 515 + 516 + /** 517 + * dmultu_func - Emulate a DMULTU instruction 518 + * @regs: Process register set 519 + * @ir: Instruction 520 + * 521 + * Returns 0 on success or SIGILL for 32-bit kernels. 522 + */ 523 + static int dmultu_func(struct pt_regs *regs, u32 ir) 524 + { 525 + u64 res; 526 + u64 rt, rs; 527 + 528 + if (config_enabled(CONFIG_32BIT)) 529 + return SIGILL; 530 + 531 + rt = regs->regs[MIPSInst_RT(ir)]; 532 + rs = regs->regs[MIPSInst_RS(ir)]; 533 + res = rt * rs; 534 + 535 + regs->lo = res; 536 + __asm__ __volatile__( 537 + "dmuhu %0, %1, %2\t\n" 538 + : "=r"(res) 539 + : "r"(rt), "r"(rs)); 540 + 541 + regs->hi = res; 542 + 543 + MIPS_R2_STATS(muls); 544 + 545 + return 0; 546 + } 547 + 548 + /** 549 + * ddiv_func - Emulate a DDIV instruction 550 + * @regs: Process register set 551 + * @ir: Instruction 552 + * 553 + * Returns 0 on success or SIGILL for 32-bit kernels. 554 + */ 555 + static int ddiv_func(struct pt_regs *regs, u32 ir) 556 + { 557 + s64 rt, rs; 558 + 559 + if (config_enabled(CONFIG_32BIT)) 560 + return SIGILL; 561 + 562 + rt = regs->regs[MIPSInst_RT(ir)]; 563 + rs = regs->regs[MIPSInst_RS(ir)]; 564 + 565 + regs->lo = rs / rt; 566 + regs->hi = rs % rt; 567 + 568 + MIPS_R2_STATS(divs); 569 + 570 + return 0; 571 + } 572 + 573 + /** 574 + * ddivu_func - Emulate a DDIVU instruction 575 + * @regs: Process register set 576 + * @ir: Instruction 577 + * 578 + * Returns 0 on success or SIGILL for 32-bit kernels. 579 + */ 580 + static int ddivu_func(struct pt_regs *regs, u32 ir) 581 + { 582 + u64 rt, rs; 583 + 584 + if (config_enabled(CONFIG_32BIT)) 585 + return SIGILL; 586 + 587 + rt = regs->regs[MIPSInst_RT(ir)]; 588 + rs = regs->regs[MIPSInst_RS(ir)]; 589 + 590 + regs->lo = rs / rt; 591 + regs->hi = rs % rt; 592 + 593 + MIPS_R2_STATS(divs); 594 + 595 + return 0; 596 + } 597 + 598 + /* R6 removed instructions for the SPECIAL opcode */ 599 + static struct r2_decoder_table spec_op_table[] = { 600 + { 0xfc1ff83f, 0x00000008, jr_func }, 601 + { 0xfc00ffff, 0x00000018, mult_func }, 602 + { 0xfc00ffff, 0x00000019, multu_func }, 603 + { 0xfc00ffff, 0x0000001c, dmult_func }, 604 + { 0xfc00ffff, 0x0000001d, dmultu_func }, 605 + { 0xffff07ff, 0x00000010, mfhi_func }, 606 + { 0xfc1fffff, 0x00000011, mthi_func }, 607 + { 0xffff07ff, 0x00000012, mflo_func }, 608 + { 0xfc1fffff, 0x00000013, mtlo_func }, 609 + { 0xfc0307ff, 0x00000001, movf_func }, 610 + { 0xfc0307ff, 0x00010001, movt_func }, 611 + { 0xfc0007ff, 0x0000000a, movz_func }, 612 + { 0xfc0007ff, 0x0000000b, movn_func }, 613 + { 0xfc00ffff, 0x0000001a, div_func }, 614 + { 0xfc00ffff, 0x0000001b, divu_func }, 615 + { 0xfc00ffff, 0x0000001e, ddiv_func }, 616 + { 0xfc00ffff, 0x0000001f, ddivu_func }, 617 + {} 618 + }; 619 + 620 + /** 621 + * madd_func - Emulate a MADD instruction 622 + * @regs: Process register set 623 + * @ir: Instruction 624 + * 625 + * Returns 0 since it always succeeds. 626 + */ 627 + static int madd_func(struct pt_regs *regs, u32 ir) 628 + { 629 + s64 res; 630 + s32 rt, rs; 631 + 632 + rt = regs->regs[MIPSInst_RT(ir)]; 633 + rs = regs->regs[MIPSInst_RS(ir)]; 634 + res = (s64)rt * (s64)rs; 635 + rt = regs->hi; 636 + rs = regs->lo; 637 + res += ((((s64)rt) << 32) | (u32)rs); 638 + 639 + rt = res; 640 + regs->lo = (s64)rt; 641 + rs = res >> 32; 642 + regs->hi = (s64)rs; 643 + 644 + MIPS_R2_STATS(dsps); 645 + 646 + return 0; 647 + } 648 + 649 + /** 650 + * maddu_func - Emulate a MADDU instruction 651 + * @regs: Process register set 652 + * @ir: Instruction 653 + * 654 + * Returns 0 since it always succeeds. 655 + */ 656 + static int maddu_func(struct pt_regs *regs, u32 ir) 657 + { 658 + u64 res; 659 + u32 rt, rs; 660 + 661 + rt = regs->regs[MIPSInst_RT(ir)]; 662 + rs = regs->regs[MIPSInst_RS(ir)]; 663 + res = (u64)rt * (u64)rs; 664 + rt = regs->hi; 665 + rs = regs->lo; 666 + res += ((((s64)rt) << 32) | (u32)rs); 667 + 668 + rt = res; 669 + regs->lo = (s64)rt; 670 + rs = res >> 32; 671 + regs->hi = (s64)rs; 672 + 673 + MIPS_R2_STATS(dsps); 674 + 675 + return 0; 676 + } 677 + 678 + /** 679 + * msub_func - Emulate a MSUB instruction 680 + * @regs: Process register set 681 + * @ir: Instruction 682 + * 683 + * Returns 0 since it always succeeds. 684 + */ 685 + static int msub_func(struct pt_regs *regs, u32 ir) 686 + { 687 + s64 res; 688 + s32 rt, rs; 689 + 690 + rt = regs->regs[MIPSInst_RT(ir)]; 691 + rs = regs->regs[MIPSInst_RS(ir)]; 692 + res = (s64)rt * (s64)rs; 693 + rt = regs->hi; 694 + rs = regs->lo; 695 + res = ((((s64)rt) << 32) | (u32)rs) - res; 696 + 697 + rt = res; 698 + regs->lo = (s64)rt; 699 + rs = res >> 32; 700 + regs->hi = (s64)rs; 701 + 702 + MIPS_R2_STATS(dsps); 703 + 704 + return 0; 705 + } 706 + 707 + /** 708 + * msubu_func - Emulate a MSUBU instruction 709 + * @regs: Process register set 710 + * @ir: Instruction 711 + * 712 + * Returns 0 since it always succeeds. 713 + */ 714 + static int msubu_func(struct pt_regs *regs, u32 ir) 715 + { 716 + u64 res; 717 + u32 rt, rs; 718 + 719 + rt = regs->regs[MIPSInst_RT(ir)]; 720 + rs = regs->regs[MIPSInst_RS(ir)]; 721 + res = (u64)rt * (u64)rs; 722 + rt = regs->hi; 723 + rs = regs->lo; 724 + res = ((((s64)rt) << 32) | (u32)rs) - res; 725 + 726 + rt = res; 727 + regs->lo = (s64)rt; 728 + rs = res >> 32; 729 + regs->hi = (s64)rs; 730 + 731 + MIPS_R2_STATS(dsps); 732 + 733 + return 0; 734 + } 735 + 736 + /** 737 + * mul_func - Emulate a MUL instruction 738 + * @regs: Process register set 739 + * @ir: Instruction 740 + * 741 + * Returns 0 since it always succeeds. 742 + */ 743 + static int mul_func(struct pt_regs *regs, u32 ir) 744 + { 745 + s64 res; 746 + s32 rt, rs; 747 + 748 + if (!MIPSInst_RD(ir)) 749 + return 0; 750 + rt = regs->regs[MIPSInst_RT(ir)]; 751 + rs = regs->regs[MIPSInst_RS(ir)]; 752 + res = (s64)rt * (s64)rs; 753 + 754 + rs = res; 755 + regs->regs[MIPSInst_RD(ir)] = (s64)rs; 756 + 757 + MIPS_R2_STATS(muls); 758 + 759 + return 0; 760 + } 761 + 762 + /** 763 + * clz_func - Emulate a CLZ instruction 764 + * @regs: Process register set 765 + * @ir: Instruction 766 + * 767 + * Returns 0 since it always succeeds. 768 + */ 769 + static int clz_func(struct pt_regs *regs, u32 ir) 770 + { 771 + u32 res; 772 + u32 rs; 773 + 774 + if (!MIPSInst_RD(ir)) 775 + return 0; 776 + 777 + rs = regs->regs[MIPSInst_RS(ir)]; 778 + __asm__ __volatile__("clz %0, %1" : "=r"(res) : "r"(rs)); 779 + regs->regs[MIPSInst_RD(ir)] = res; 780 + 781 + MIPS_R2_STATS(bops); 782 + 783 + return 0; 784 + } 785 + 786 + /** 787 + * clo_func - Emulate a CLO instruction 788 + * @regs: Process register set 789 + * @ir: Instruction 790 + * 791 + * Returns 0 since it always succeeds. 792 + */ 793 + 794 + static int clo_func(struct pt_regs *regs, u32 ir) 795 + { 796 + u32 res; 797 + u32 rs; 798 + 799 + if (!MIPSInst_RD(ir)) 800 + return 0; 801 + 802 + rs = regs->regs[MIPSInst_RS(ir)]; 803 + __asm__ __volatile__("clo %0, %1" : "=r"(res) : "r"(rs)); 804 + regs->regs[MIPSInst_RD(ir)] = res; 805 + 806 + MIPS_R2_STATS(bops); 807 + 808 + return 0; 809 + } 810 + 811 + /** 812 + * dclz_func - Emulate a DCLZ instruction 813 + * @regs: Process register set 814 + * @ir: Instruction 815 + * 816 + * Returns 0 since it always succeeds. 817 + */ 818 + static int dclz_func(struct pt_regs *regs, u32 ir) 819 + { 820 + u64 res; 821 + u64 rs; 822 + 823 + if (config_enabled(CONFIG_32BIT)) 824 + return SIGILL; 825 + 826 + if (!MIPSInst_RD(ir)) 827 + return 0; 828 + 829 + rs = regs->regs[MIPSInst_RS(ir)]; 830 + __asm__ __volatile__("dclz %0, %1" : "=r"(res) : "r"(rs)); 831 + regs->regs[MIPSInst_RD(ir)] = res; 832 + 833 + MIPS_R2_STATS(bops); 834 + 835 + return 0; 836 + } 837 + 838 + /** 839 + * dclo_func - Emulate a DCLO instruction 840 + * @regs: Process register set 841 + * @ir: Instruction 842 + * 843 + * Returns 0 since it always succeeds. 844 + */ 845 + static int dclo_func(struct pt_regs *regs, u32 ir) 846 + { 847 + u64 res; 848 + u64 rs; 849 + 850 + if (config_enabled(CONFIG_32BIT)) 851 + return SIGILL; 852 + 853 + if (!MIPSInst_RD(ir)) 854 + return 0; 855 + 856 + rs = regs->regs[MIPSInst_RS(ir)]; 857 + __asm__ __volatile__("dclo %0, %1" : "=r"(res) : "r"(rs)); 858 + regs->regs[MIPSInst_RD(ir)] = res; 859 + 860 + MIPS_R2_STATS(bops); 861 + 862 + return 0; 863 + } 864 + 865 + /* R6 removed instructions for the SPECIAL2 opcode */ 866 + static struct r2_decoder_table spec2_op_table[] = { 867 + { 0xfc00ffff, 0x70000000, madd_func }, 868 + { 0xfc00ffff, 0x70000001, maddu_func }, 869 + { 0xfc0007ff, 0x70000002, mul_func }, 870 + { 0xfc00ffff, 0x70000004, msub_func }, 871 + { 0xfc00ffff, 0x70000005, msubu_func }, 872 + { 0xfc0007ff, 0x70000020, clz_func }, 873 + { 0xfc0007ff, 0x70000021, clo_func }, 874 + { 0xfc0007ff, 0x70000024, dclz_func }, 875 + { 0xfc0007ff, 0x70000025, dclo_func }, 876 + { } 877 + }; 878 + 879 + static inline int mipsr2_find_op_func(struct pt_regs *regs, u32 inst, 880 + struct r2_decoder_table *table) 881 + { 882 + struct r2_decoder_table *p; 883 + int err; 884 + 885 + for (p = table; p->func; p++) { 886 + if ((inst & p->mask) == p->code) { 887 + err = (p->func)(regs, inst); 888 + return err; 889 + } 890 + } 891 + return SIGILL; 892 + } 893 + 894 + /** 895 + * mipsr2_decoder: Decode and emulate a MIPS R2 instruction 896 + * @regs: Process register set 897 + * @inst: Instruction to decode and emulate 898 + */ 899 + int mipsr2_decoder(struct pt_regs *regs, u32 inst) 900 + { 901 + int err = 0; 902 + unsigned long vaddr; 903 + u32 nir; 904 + unsigned long cpc, epc, nepc, r31, res, rs, rt; 905 + 906 + void __user *fault_addr = NULL; 907 + int pass = 0; 908 + 909 + repeat: 910 + r31 = regs->regs[31]; 911 + epc = regs->cp0_epc; 912 + err = compute_return_epc(regs); 913 + if (err < 0) { 914 + BUG(); 915 + return SIGEMT; 916 + } 917 + pr_debug("Emulating the 0x%08x R2 instruction @ 0x%08lx (pass=%d))\n", 918 + inst, epc, pass); 919 + 920 + switch (MIPSInst_OPCODE(inst)) { 921 + case spec_op: 922 + err = mipsr2_find_op_func(regs, inst, spec_op_table); 923 + if (err < 0) { 924 + /* FPU instruction under JR */ 925 + regs->cp0_cause |= CAUSEF_BD; 926 + goto fpu_emul; 927 + } 928 + break; 929 + case spec2_op: 930 + err = mipsr2_find_op_func(regs, inst, spec2_op_table); 931 + break; 932 + case bcond_op: 933 + rt = MIPSInst_RT(inst); 934 + rs = MIPSInst_RS(inst); 935 + switch (rt) { 936 + case tgei_op: 937 + if ((long)regs->regs[rs] >= MIPSInst_SIMM(inst)) 938 + do_trap_or_bp(regs, 0, "TGEI"); 939 + 940 + MIPS_R2_STATS(traps); 941 + 942 + break; 943 + case tgeiu_op: 944 + if (regs->regs[rs] >= MIPSInst_UIMM(inst)) 945 + do_trap_or_bp(regs, 0, "TGEIU"); 946 + 947 + MIPS_R2_STATS(traps); 948 + 949 + break; 950 + case tlti_op: 951 + if ((long)regs->regs[rs] < MIPSInst_SIMM(inst)) 952 + do_trap_or_bp(regs, 0, "TLTI"); 953 + 954 + MIPS_R2_STATS(traps); 955 + 956 + break; 957 + case tltiu_op: 958 + if (regs->regs[rs] < MIPSInst_UIMM(inst)) 959 + do_trap_or_bp(regs, 0, "TLTIU"); 960 + 961 + MIPS_R2_STATS(traps); 962 + 963 + break; 964 + case teqi_op: 965 + if (regs->regs[rs] == MIPSInst_SIMM(inst)) 966 + do_trap_or_bp(regs, 0, "TEQI"); 967 + 968 + MIPS_R2_STATS(traps); 969 + 970 + break; 971 + case tnei_op: 972 + if (regs->regs[rs] != MIPSInst_SIMM(inst)) 973 + do_trap_or_bp(regs, 0, "TNEI"); 974 + 975 + MIPS_R2_STATS(traps); 976 + 977 + break; 978 + case bltzl_op: 979 + case bgezl_op: 980 + case bltzall_op: 981 + case bgezall_op: 982 + if (delay_slot(regs)) { 983 + err = SIGILL; 984 + break; 985 + } 986 + regs->regs[31] = r31; 987 + regs->cp0_epc = epc; 988 + err = __compute_return_epc(regs); 989 + if (err < 0) 990 + return SIGEMT; 991 + if (err != BRANCH_LIKELY_TAKEN) 992 + break; 993 + cpc = regs->cp0_epc; 994 + nepc = epc + 4; 995 + err = __get_user(nir, (u32 __user *)nepc); 996 + if (err) { 997 + err = SIGSEGV; 998 + break; 999 + } 1000 + /* 1001 + * This will probably be optimized away when 1002 + * CONFIG_DEBUG_FS is not enabled 1003 + */ 1004 + switch (rt) { 1005 + case bltzl_op: 1006 + MIPS_R2BR_STATS(bltzl); 1007 + break; 1008 + case bgezl_op: 1009 + MIPS_R2BR_STATS(bgezl); 1010 + break; 1011 + case bltzall_op: 1012 + MIPS_R2BR_STATS(bltzall); 1013 + break; 1014 + case bgezall_op: 1015 + MIPS_R2BR_STATS(bgezall); 1016 + break; 1017 + } 1018 + 1019 + switch (MIPSInst_OPCODE(nir)) { 1020 + case cop1_op: 1021 + case cop1x_op: 1022 + case lwc1_op: 1023 + case swc1_op: 1024 + regs->cp0_cause |= CAUSEF_BD; 1025 + goto fpu_emul; 1026 + } 1027 + if (nir) { 1028 + err = mipsr6_emul(regs, nir); 1029 + if (err > 0) { 1030 + err = mips_dsemul(regs, nir, cpc); 1031 + if (err == SIGILL) 1032 + err = SIGEMT; 1033 + MIPS_R2_STATS(dsemul); 1034 + } 1035 + } 1036 + break; 1037 + case bltzal_op: 1038 + case bgezal_op: 1039 + if (delay_slot(regs)) { 1040 + err = SIGILL; 1041 + break; 1042 + } 1043 + regs->regs[31] = r31; 1044 + regs->cp0_epc = epc; 1045 + err = __compute_return_epc(regs); 1046 + if (err < 0) 1047 + return SIGEMT; 1048 + cpc = regs->cp0_epc; 1049 + nepc = epc + 4; 1050 + err = __get_user(nir, (u32 __user *)nepc); 1051 + if (err) { 1052 + err = SIGSEGV; 1053 + break; 1054 + } 1055 + /* 1056 + * This will probably be optimized away when 1057 + * CONFIG_DEBUG_FS is not enabled 1058 + */ 1059 + switch (rt) { 1060 + case bltzal_op: 1061 + MIPS_R2BR_STATS(bltzal); 1062 + break; 1063 + case bgezal_op: 1064 + MIPS_R2BR_STATS(bgezal); 1065 + break; 1066 + } 1067 + 1068 + switch (MIPSInst_OPCODE(nir)) { 1069 + case cop1_op: 1070 + case cop1x_op: 1071 + case lwc1_op: 1072 + case swc1_op: 1073 + regs->cp0_cause |= CAUSEF_BD; 1074 + goto fpu_emul; 1075 + } 1076 + if (nir) { 1077 + err = mipsr6_emul(regs, nir); 1078 + if (err > 0) { 1079 + err = mips_dsemul(regs, nir, cpc); 1080 + if (err == SIGILL) 1081 + err = SIGEMT; 1082 + MIPS_R2_STATS(dsemul); 1083 + } 1084 + } 1085 + break; 1086 + default: 1087 + regs->regs[31] = r31; 1088 + regs->cp0_epc = epc; 1089 + err = SIGILL; 1090 + break; 1091 + } 1092 + break; 1093 + 1094 + case beql_op: 1095 + case bnel_op: 1096 + case blezl_op: 1097 + case bgtzl_op: 1098 + if (delay_slot(regs)) { 1099 + err = SIGILL; 1100 + break; 1101 + } 1102 + regs->regs[31] = r31; 1103 + regs->cp0_epc = epc; 1104 + err = __compute_return_epc(regs); 1105 + if (err < 0) 1106 + return SIGEMT; 1107 + if (err != BRANCH_LIKELY_TAKEN) 1108 + break; 1109 + cpc = regs->cp0_epc; 1110 + nepc = epc + 4; 1111 + err = __get_user(nir, (u32 __user *)nepc); 1112 + if (err) { 1113 + err = SIGSEGV; 1114 + break; 1115 + } 1116 + /* 1117 + * This will probably be optimized away when 1118 + * CONFIG_DEBUG_FS is not enabled 1119 + */ 1120 + switch (MIPSInst_OPCODE(inst)) { 1121 + case beql_op: 1122 + MIPS_R2BR_STATS(beql); 1123 + break; 1124 + case bnel_op: 1125 + MIPS_R2BR_STATS(bnel); 1126 + break; 1127 + case blezl_op: 1128 + MIPS_R2BR_STATS(blezl); 1129 + break; 1130 + case bgtzl_op: 1131 + MIPS_R2BR_STATS(bgtzl); 1132 + break; 1133 + } 1134 + 1135 + switch (MIPSInst_OPCODE(nir)) { 1136 + case cop1_op: 1137 + case cop1x_op: 1138 + case lwc1_op: 1139 + case swc1_op: 1140 + regs->cp0_cause |= CAUSEF_BD; 1141 + goto fpu_emul; 1142 + } 1143 + if (nir) { 1144 + err = mipsr6_emul(regs, nir); 1145 + if (err > 0) { 1146 + err = mips_dsemul(regs, nir, cpc); 1147 + if (err == SIGILL) 1148 + err = SIGEMT; 1149 + MIPS_R2_STATS(dsemul); 1150 + } 1151 + } 1152 + break; 1153 + case lwc1_op: 1154 + case swc1_op: 1155 + case cop1_op: 1156 + case cop1x_op: 1157 + fpu_emul: 1158 + regs->regs[31] = r31; 1159 + regs->cp0_epc = epc; 1160 + if (!used_math()) { /* First time FPU user. */ 1161 + err = init_fpu(); 1162 + set_used_math(); 1163 + } 1164 + lose_fpu(1); /* Save FPU state for the emulator. */ 1165 + 1166 + err = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 0, 1167 + &fault_addr); 1168 + 1169 + /* 1170 + * this is a tricky issue - lose_fpu() uses LL/SC atomics 1171 + * if FPU is owned and effectively cancels user level LL/SC. 1172 + * So, it could be logical to don't restore FPU ownership here. 1173 + * But the sequence of multiple FPU instructions is much much 1174 + * more often than LL-FPU-SC and I prefer loop here until 1175 + * next scheduler cycle cancels FPU ownership 1176 + */ 1177 + own_fpu(1); /* Restore FPU state. */ 1178 + 1179 + if (err) 1180 + current->thread.cp0_baduaddr = (unsigned long)fault_addr; 1181 + 1182 + MIPS_R2_STATS(fpus); 1183 + 1184 + break; 1185 + 1186 + case lwl_op: 1187 + rt = regs->regs[MIPSInst_RT(inst)]; 1188 + vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); 1189 + if (!access_ok(VERIFY_READ, vaddr, 4)) { 1190 + current->thread.cp0_baduaddr = vaddr; 1191 + err = SIGSEGV; 1192 + break; 1193 + } 1194 + __asm__ __volatile__( 1195 + " .set push\n" 1196 + " .set reorder\n" 1197 + #ifdef CONFIG_CPU_LITTLE_ENDIAN 1198 + "1:" LB "%1, 0(%2)\n" 1199 + INS "%0, %1, 24, 8\n" 1200 + " andi %1, %2, 0x3\n" 1201 + " beq $0, %1, 9f\n" 1202 + ADDIU "%2, %2, -1\n" 1203 + "2:" LB "%1, 0(%2)\n" 1204 + INS "%0, %1, 16, 8\n" 1205 + " andi %1, %2, 0x3\n" 1206 + " beq $0, %1, 9f\n" 1207 + ADDIU "%2, %2, -1\n" 1208 + "3:" LB "%1, 0(%2)\n" 1209 + INS "%0, %1, 8, 8\n" 1210 + " andi %1, %2, 0x3\n" 1211 + " beq $0, %1, 9f\n" 1212 + ADDIU "%2, %2, -1\n" 1213 + "4:" LB "%1, 0(%2)\n" 1214 + INS "%0, %1, 0, 8\n" 1215 + #else /* !CONFIG_CPU_LITTLE_ENDIAN */ 1216 + "1:" LB "%1, 0(%2)\n" 1217 + INS "%0, %1, 24, 8\n" 1218 + ADDIU "%2, %2, 1\n" 1219 + " andi %1, %2, 0x3\n" 1220 + " beq $0, %1, 9f\n" 1221 + "2:" LB "%1, 0(%2)\n" 1222 + INS "%0, %1, 16, 8\n" 1223 + ADDIU "%2, %2, 1\n" 1224 + " andi %1, %2, 0x3\n" 1225 + " beq $0, %1, 9f\n" 1226 + "3:" LB "%1, 0(%2)\n" 1227 + INS "%0, %1, 8, 8\n" 1228 + ADDIU "%2, %2, 1\n" 1229 + " andi %1, %2, 0x3\n" 1230 + " beq $0, %1, 9f\n" 1231 + "4:" LB "%1, 0(%2)\n" 1232 + INS "%0, %1, 0, 8\n" 1233 + #endif /* CONFIG_CPU_LITTLE_ENDIAN */ 1234 + "9: sll %0, %0, 0\n" 1235 + "10:\n" 1236 + " .insn\n" 1237 + " .section .fixup,\"ax\"\n" 1238 + "8: li %3,%4\n" 1239 + " j 10b\n" 1240 + " .previous\n" 1241 + " .section __ex_table,\"a\"\n" 1242 + " .word 1b,8b\n" 1243 + " .word 2b,8b\n" 1244 + " .word 3b,8b\n" 1245 + " .word 4b,8b\n" 1246 + " .previous\n" 1247 + " .set pop\n" 1248 + : "+&r"(rt), "=&r"(rs), 1249 + "+&r"(vaddr), "+&r"(err) 1250 + : "i"(SIGSEGV)); 1251 + 1252 + if (MIPSInst_RT(inst) && !err) 1253 + regs->regs[MIPSInst_RT(inst)] = rt; 1254 + 1255 + MIPS_R2_STATS(loads); 1256 + 1257 + break; 1258 + 1259 + case lwr_op: 1260 + rt = regs->regs[MIPSInst_RT(inst)]; 1261 + vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); 1262 + if (!access_ok(VERIFY_READ, vaddr, 4)) { 1263 + current->thread.cp0_baduaddr = vaddr; 1264 + err = SIGSEGV; 1265 + break; 1266 + } 1267 + __asm__ __volatile__( 1268 + " .set push\n" 1269 + " .set reorder\n" 1270 + #ifdef CONFIG_CPU_LITTLE_ENDIAN 1271 + "1:" LB "%1, 0(%2)\n" 1272 + INS "%0, %1, 0, 8\n" 1273 + ADDIU "%2, %2, 1\n" 1274 + " andi %1, %2, 0x3\n" 1275 + " beq $0, %1, 9f\n" 1276 + "2:" LB "%1, 0(%2)\n" 1277 + INS "%0, %1, 8, 8\n" 1278 + ADDIU "%2, %2, 1\n" 1279 + " andi %1, %2, 0x3\n" 1280 + " beq $0, %1, 9f\n" 1281 + "3:" LB "%1, 0(%2)\n" 1282 + INS "%0, %1, 16, 8\n" 1283 + ADDIU "%2, %2, 1\n" 1284 + " andi %1, %2, 0x3\n" 1285 + " beq $0, %1, 9f\n" 1286 + "4:" LB "%1, 0(%2)\n" 1287 + INS "%0, %1, 24, 8\n" 1288 + " sll %0, %0, 0\n" 1289 + #else /* !CONFIG_CPU_LITTLE_ENDIAN */ 1290 + "1:" LB "%1, 0(%2)\n" 1291 + INS "%0, %1, 0, 8\n" 1292 + " andi %1, %2, 0x3\n" 1293 + " beq $0, %1, 9f\n" 1294 + ADDIU "%2, %2, -1\n" 1295 + "2:" LB "%1, 0(%2)\n" 1296 + INS "%0, %1, 8, 8\n" 1297 + " andi %1, %2, 0x3\n" 1298 + " beq $0, %1, 9f\n" 1299 + ADDIU "%2, %2, -1\n" 1300 + "3:" LB "%1, 0(%2)\n" 1301 + INS "%0, %1, 16, 8\n" 1302 + " andi %1, %2, 0x3\n" 1303 + " beq $0, %1, 9f\n" 1304 + ADDIU "%2, %2, -1\n" 1305 + "4:" LB "%1, 0(%2)\n" 1306 + INS "%0, %1, 24, 8\n" 1307 + " sll %0, %0, 0\n" 1308 + #endif /* CONFIG_CPU_LITTLE_ENDIAN */ 1309 + "9:\n" 1310 + "10:\n" 1311 + " .insn\n" 1312 + " .section .fixup,\"ax\"\n" 1313 + "8: li %3,%4\n" 1314 + " j 10b\n" 1315 + " .previous\n" 1316 + " .section __ex_table,\"a\"\n" 1317 + " .word 1b,8b\n" 1318 + " .word 2b,8b\n" 1319 + " .word 3b,8b\n" 1320 + " .word 4b,8b\n" 1321 + " .previous\n" 1322 + " .set pop\n" 1323 + : "+&r"(rt), "=&r"(rs), 1324 + "+&r"(vaddr), "+&r"(err) 1325 + : "i"(SIGSEGV)); 1326 + if (MIPSInst_RT(inst) && !err) 1327 + regs->regs[MIPSInst_RT(inst)] = rt; 1328 + 1329 + MIPS_R2_STATS(loads); 1330 + 1331 + break; 1332 + 1333 + case swl_op: 1334 + rt = regs->regs[MIPSInst_RT(inst)]; 1335 + vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); 1336 + if (!access_ok(VERIFY_WRITE, vaddr, 4)) { 1337 + current->thread.cp0_baduaddr = vaddr; 1338 + err = SIGSEGV; 1339 + break; 1340 + } 1341 + __asm__ __volatile__( 1342 + " .set push\n" 1343 + " .set reorder\n" 1344 + #ifdef CONFIG_CPU_LITTLE_ENDIAN 1345 + EXT "%1, %0, 24, 8\n" 1346 + "1:" SB "%1, 0(%2)\n" 1347 + " andi %1, %2, 0x3\n" 1348 + " beq $0, %1, 9f\n" 1349 + ADDIU "%2, %2, -1\n" 1350 + EXT "%1, %0, 16, 8\n" 1351 + "2:" SB "%1, 0(%2)\n" 1352 + " andi %1, %2, 0x3\n" 1353 + " beq $0, %1, 9f\n" 1354 + ADDIU "%2, %2, -1\n" 1355 + EXT "%1, %0, 8, 8\n" 1356 + "3:" SB "%1, 0(%2)\n" 1357 + " andi %1, %2, 0x3\n" 1358 + " beq $0, %1, 9f\n" 1359 + ADDIU "%2, %2, -1\n" 1360 + EXT "%1, %0, 0, 8\n" 1361 + "4:" SB "%1, 0(%2)\n" 1362 + #else /* !CONFIG_CPU_LITTLE_ENDIAN */ 1363 + EXT "%1, %0, 24, 8\n" 1364 + "1:" SB "%1, 0(%2)\n" 1365 + ADDIU "%2, %2, 1\n" 1366 + " andi %1, %2, 0x3\n" 1367 + " beq $0, %1, 9f\n" 1368 + EXT "%1, %0, 16, 8\n" 1369 + "2:" SB "%1, 0(%2)\n" 1370 + ADDIU "%2, %2, 1\n" 1371 + " andi %1, %2, 0x3\n" 1372 + " beq $0, %1, 9f\n" 1373 + EXT "%1, %0, 8, 8\n" 1374 + "3:" SB "%1, 0(%2)\n" 1375 + ADDIU "%2, %2, 1\n" 1376 + " andi %1, %2, 0x3\n" 1377 + " beq $0, %1, 9f\n" 1378 + EXT "%1, %0, 0, 8\n" 1379 + "4:" SB "%1, 0(%2)\n" 1380 + #endif /* CONFIG_CPU_LITTLE_ENDIAN */ 1381 + "9:\n" 1382 + " .insn\n" 1383 + " .section .fixup,\"ax\"\n" 1384 + "8: li %3,%4\n" 1385 + " j 9b\n" 1386 + " .previous\n" 1387 + " .section __ex_table,\"a\"\n" 1388 + " .word 1b,8b\n" 1389 + " .word 2b,8b\n" 1390 + " .word 3b,8b\n" 1391 + " .word 4b,8b\n" 1392 + " .previous\n" 1393 + " .set pop\n" 1394 + : "+&r"(rt), "=&r"(rs), 1395 + "+&r"(vaddr), "+&r"(err) 1396 + : "i"(SIGSEGV) 1397 + : "memory"); 1398 + 1399 + MIPS_R2_STATS(stores); 1400 + 1401 + break; 1402 + 1403 + case swr_op: 1404 + rt = regs->regs[MIPSInst_RT(inst)]; 1405 + vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); 1406 + if (!access_ok(VERIFY_WRITE, vaddr, 4)) { 1407 + current->thread.cp0_baduaddr = vaddr; 1408 + err = SIGSEGV; 1409 + break; 1410 + } 1411 + __asm__ __volatile__( 1412 + " .set push\n" 1413 + " .set reorder\n" 1414 + #ifdef CONFIG_CPU_LITTLE_ENDIAN 1415 + EXT "%1, %0, 0, 8\n" 1416 + "1:" SB "%1, 0(%2)\n" 1417 + ADDIU "%2, %2, 1\n" 1418 + " andi %1, %2, 0x3\n" 1419 + " beq $0, %1, 9f\n" 1420 + EXT "%1, %0, 8, 8\n" 1421 + "2:" SB "%1, 0(%2)\n" 1422 + ADDIU "%2, %2, 1\n" 1423 + " andi %1, %2, 0x3\n" 1424 + " beq $0, %1, 9f\n" 1425 + EXT "%1, %0, 16, 8\n" 1426 + "3:" SB "%1, 0(%2)\n" 1427 + ADDIU "%2, %2, 1\n" 1428 + " andi %1, %2, 0x3\n" 1429 + " beq $0, %1, 9f\n" 1430 + EXT "%1, %0, 24, 8\n" 1431 + "4:" SB "%1, 0(%2)\n" 1432 + #else /* !CONFIG_CPU_LITTLE_ENDIAN */ 1433 + EXT "%1, %0, 0, 8\n" 1434 + "1:" SB "%1, 0(%2)\n" 1435 + " andi %1, %2, 0x3\n" 1436 + " beq $0, %1, 9f\n" 1437 + ADDIU "%2, %2, -1\n" 1438 + EXT "%1, %0, 8, 8\n" 1439 + "2:" SB "%1, 0(%2)\n" 1440 + " andi %1, %2, 0x3\n" 1441 + " beq $0, %1, 9f\n" 1442 + ADDIU "%2, %2, -1\n" 1443 + EXT "%1, %0, 16, 8\n" 1444 + "3:" SB "%1, 0(%2)\n" 1445 + " andi %1, %2, 0x3\n" 1446 + " beq $0, %1, 9f\n" 1447 + ADDIU "%2, %2, -1\n" 1448 + EXT "%1, %0, 24, 8\n" 1449 + "4:" SB "%1, 0(%2)\n" 1450 + #endif /* CONFIG_CPU_LITTLE_ENDIAN */ 1451 + "9:\n" 1452 + " .insn\n" 1453 + " .section .fixup,\"ax\"\n" 1454 + "8: li %3,%4\n" 1455 + " j 9b\n" 1456 + " .previous\n" 1457 + " .section __ex_table,\"a\"\n" 1458 + " .word 1b,8b\n" 1459 + " .word 2b,8b\n" 1460 + " .word 3b,8b\n" 1461 + " .word 4b,8b\n" 1462 + " .previous\n" 1463 + " .set pop\n" 1464 + : "+&r"(rt), "=&r"(rs), 1465 + "+&r"(vaddr), "+&r"(err) 1466 + : "i"(SIGSEGV) 1467 + : "memory"); 1468 + 1469 + MIPS_R2_STATS(stores); 1470 + 1471 + break; 1472 + 1473 + case ldl_op: 1474 + if (config_enabled(CONFIG_32BIT)) { 1475 + err = SIGILL; 1476 + break; 1477 + } 1478 + 1479 + rt = regs->regs[MIPSInst_RT(inst)]; 1480 + vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); 1481 + if (!access_ok(VERIFY_READ, vaddr, 8)) { 1482 + current->thread.cp0_baduaddr = vaddr; 1483 + err = SIGSEGV; 1484 + break; 1485 + } 1486 + __asm__ __volatile__( 1487 + " .set push\n" 1488 + " .set reorder\n" 1489 + #ifdef CONFIG_CPU_LITTLE_ENDIAN 1490 + "1: lb %1, 0(%2)\n" 1491 + " dinsu %0, %1, 56, 8\n" 1492 + " andi %1, %2, 0x7\n" 1493 + " beq $0, %1, 9f\n" 1494 + " daddiu %2, %2, -1\n" 1495 + "2: lb %1, 0(%2)\n" 1496 + " dinsu %0, %1, 48, 8\n" 1497 + " andi %1, %2, 0x7\n" 1498 + " beq $0, %1, 9f\n" 1499 + " daddiu %2, %2, -1\n" 1500 + "3: lb %1, 0(%2)\n" 1501 + " dinsu %0, %1, 40, 8\n" 1502 + " andi %1, %2, 0x7\n" 1503 + " beq $0, %1, 9f\n" 1504 + " daddiu %2, %2, -1\n" 1505 + "4: lb %1, 0(%2)\n" 1506 + " dinsu %0, %1, 32, 8\n" 1507 + " andi %1, %2, 0x7\n" 1508 + " beq $0, %1, 9f\n" 1509 + " daddiu %2, %2, -1\n" 1510 + "5: lb %1, 0(%2)\n" 1511 + " dins %0, %1, 24, 8\n" 1512 + " andi %1, %2, 0x7\n" 1513 + " beq $0, %1, 9f\n" 1514 + " daddiu %2, %2, -1\n" 1515 + "6: lb %1, 0(%2)\n" 1516 + " dins %0, %1, 16, 8\n" 1517 + " andi %1, %2, 0x7\n" 1518 + " beq $0, %1, 9f\n" 1519 + " daddiu %2, %2, -1\n" 1520 + "7: lb %1, 0(%2)\n" 1521 + " dins %0, %1, 8, 8\n" 1522 + " andi %1, %2, 0x7\n" 1523 + " beq $0, %1, 9f\n" 1524 + " daddiu %2, %2, -1\n" 1525 + "0: lb %1, 0(%2)\n" 1526 + " dins %0, %1, 0, 8\n" 1527 + #else /* !CONFIG_CPU_LITTLE_ENDIAN */ 1528 + "1: lb %1, 0(%2)\n" 1529 + " dinsu %0, %1, 56, 8\n" 1530 + " daddiu %2, %2, 1\n" 1531 + " andi %1, %2, 0x7\n" 1532 + " beq $0, %1, 9f\n" 1533 + "2: lb %1, 0(%2)\n" 1534 + " dinsu %0, %1, 48, 8\n" 1535 + " daddiu %2, %2, 1\n" 1536 + " andi %1, %2, 0x7\n" 1537 + " beq $0, %1, 9f\n" 1538 + "3: lb %1, 0(%2)\n" 1539 + " dinsu %0, %1, 40, 8\n" 1540 + " daddiu %2, %2, 1\n" 1541 + " andi %1, %2, 0x7\n" 1542 + " beq $0, %1, 9f\n" 1543 + "4: lb %1, 0(%2)\n" 1544 + " dinsu %0, %1, 32, 8\n" 1545 + " daddiu %2, %2, 1\n" 1546 + " andi %1, %2, 0x7\n" 1547 + " beq $0, %1, 9f\n" 1548 + "5: lb %1, 0(%2)\n" 1549 + " dins %0, %1, 24, 8\n" 1550 + " daddiu %2, %2, 1\n" 1551 + " andi %1, %2, 0x7\n" 1552 + " beq $0, %1, 9f\n" 1553 + "6: lb %1, 0(%2)\n" 1554 + " dins %0, %1, 16, 8\n" 1555 + " daddiu %2, %2, 1\n" 1556 + " andi %1, %2, 0x7\n" 1557 + " beq $0, %1, 9f\n" 1558 + "7: lb %1, 0(%2)\n" 1559 + " dins %0, %1, 8, 8\n" 1560 + " daddiu %2, %2, 1\n" 1561 + " andi %1, %2, 0x7\n" 1562 + " beq $0, %1, 9f\n" 1563 + "0: lb %1, 0(%2)\n" 1564 + " dins %0, %1, 0, 8\n" 1565 + #endif /* CONFIG_CPU_LITTLE_ENDIAN */ 1566 + "9:\n" 1567 + " .insn\n" 1568 + " .section .fixup,\"ax\"\n" 1569 + "8: li %3,%4\n" 1570 + " j 9b\n" 1571 + " .previous\n" 1572 + " .section __ex_table,\"a\"\n" 1573 + " .word 1b,8b\n" 1574 + " .word 2b,8b\n" 1575 + " .word 3b,8b\n" 1576 + " .word 4b,8b\n" 1577 + " .word 5b,8b\n" 1578 + " .word 6b,8b\n" 1579 + " .word 7b,8b\n" 1580 + " .word 0b,8b\n" 1581 + " .previous\n" 1582 + " .set pop\n" 1583 + : "+&r"(rt), "=&r"(rs), 1584 + "+&r"(vaddr), "+&r"(err) 1585 + : "i"(SIGSEGV)); 1586 + if (MIPSInst_RT(inst) && !err) 1587 + regs->regs[MIPSInst_RT(inst)] = rt; 1588 + 1589 + MIPS_R2_STATS(loads); 1590 + break; 1591 + 1592 + case ldr_op: 1593 + if (config_enabled(CONFIG_32BIT)) { 1594 + err = SIGILL; 1595 + break; 1596 + } 1597 + 1598 + rt = regs->regs[MIPSInst_RT(inst)]; 1599 + vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); 1600 + if (!access_ok(VERIFY_READ, vaddr, 8)) { 1601 + current->thread.cp0_baduaddr = vaddr; 1602 + err = SIGSEGV; 1603 + break; 1604 + } 1605 + __asm__ __volatile__( 1606 + " .set push\n" 1607 + " .set reorder\n" 1608 + #ifdef CONFIG_CPU_LITTLE_ENDIAN 1609 + "1: lb %1, 0(%2)\n" 1610 + " dins %0, %1, 0, 8\n" 1611 + " daddiu %2, %2, 1\n" 1612 + " andi %1, %2, 0x7\n" 1613 + " beq $0, %1, 9f\n" 1614 + "2: lb %1, 0(%2)\n" 1615 + " dins %0, %1, 8, 8\n" 1616 + " daddiu %2, %2, 1\n" 1617 + " andi %1, %2, 0x7\n" 1618 + " beq $0, %1, 9f\n" 1619 + "3: lb %1, 0(%2)\n" 1620 + " dins %0, %1, 16, 8\n" 1621 + " daddiu %2, %2, 1\n" 1622 + " andi %1, %2, 0x7\n" 1623 + " beq $0, %1, 9f\n" 1624 + "4: lb %1, 0(%2)\n" 1625 + " dins %0, %1, 24, 8\n" 1626 + " daddiu %2, %2, 1\n" 1627 + " andi %1, %2, 0x7\n" 1628 + " beq $0, %1, 9f\n" 1629 + "5: lb %1, 0(%2)\n" 1630 + " dinsu %0, %1, 32, 8\n" 1631 + " daddiu %2, %2, 1\n" 1632 + " andi %1, %2, 0x7\n" 1633 + " beq $0, %1, 9f\n" 1634 + "6: lb %1, 0(%2)\n" 1635 + " dinsu %0, %1, 40, 8\n" 1636 + " daddiu %2, %2, 1\n" 1637 + " andi %1, %2, 0x7\n" 1638 + " beq $0, %1, 9f\n" 1639 + "7: lb %1, 0(%2)\n" 1640 + " dinsu %0, %1, 48, 8\n" 1641 + " daddiu %2, %2, 1\n" 1642 + " andi %1, %2, 0x7\n" 1643 + " beq $0, %1, 9f\n" 1644 + "0: lb %1, 0(%2)\n" 1645 + " dinsu %0, %1, 56, 8\n" 1646 + #else /* !CONFIG_CPU_LITTLE_ENDIAN */ 1647 + "1: lb %1, 0(%2)\n" 1648 + " dins %0, %1, 0, 8\n" 1649 + " andi %1, %2, 0x7\n" 1650 + " beq $0, %1, 9f\n" 1651 + " daddiu %2, %2, -1\n" 1652 + "2: lb %1, 0(%2)\n" 1653 + " dins %0, %1, 8, 8\n" 1654 + " andi %1, %2, 0x7\n" 1655 + " beq $0, %1, 9f\n" 1656 + " daddiu %2, %2, -1\n" 1657 + "3: lb %1, 0(%2)\n" 1658 + " dins %0, %1, 16, 8\n" 1659 + " andi %1, %2, 0x7\n" 1660 + " beq $0, %1, 9f\n" 1661 + " daddiu %2, %2, -1\n" 1662 + "4: lb %1, 0(%2)\n" 1663 + " dins %0, %1, 24, 8\n" 1664 + " andi %1, %2, 0x7\n" 1665 + " beq $0, %1, 9f\n" 1666 + " daddiu %2, %2, -1\n" 1667 + "5: lb %1, 0(%2)\n" 1668 + " dinsu %0, %1, 32, 8\n" 1669 + " andi %1, %2, 0x7\n" 1670 + " beq $0, %1, 9f\n" 1671 + " daddiu %2, %2, -1\n" 1672 + "6: lb %1, 0(%2)\n" 1673 + " dinsu %0, %1, 40, 8\n" 1674 + " andi %1, %2, 0x7\n" 1675 + " beq $0, %1, 9f\n" 1676 + " daddiu %2, %2, -1\n" 1677 + "7: lb %1, 0(%2)\n" 1678 + " dinsu %0, %1, 48, 8\n" 1679 + " andi %1, %2, 0x7\n" 1680 + " beq $0, %1, 9f\n" 1681 + " daddiu %2, %2, -1\n" 1682 + "0: lb %1, 0(%2)\n" 1683 + " dinsu %0, %1, 56, 8\n" 1684 + #endif /* CONFIG_CPU_LITTLE_ENDIAN */ 1685 + "9:\n" 1686 + " .insn\n" 1687 + " .section .fixup,\"ax\"\n" 1688 + "8: li %3,%4\n" 1689 + " j 9b\n" 1690 + " .previous\n" 1691 + " .section __ex_table,\"a\"\n" 1692 + " .word 1b,8b\n" 1693 + " .word 2b,8b\n" 1694 + " .word 3b,8b\n" 1695 + " .word 4b,8b\n" 1696 + " .word 5b,8b\n" 1697 + " .word 6b,8b\n" 1698 + " .word 7b,8b\n" 1699 + " .word 0b,8b\n" 1700 + " .previous\n" 1701 + " .set pop\n" 1702 + : "+&r"(rt), "=&r"(rs), 1703 + "+&r"(vaddr), "+&r"(err) 1704 + : "i"(SIGSEGV)); 1705 + if (MIPSInst_RT(inst) && !err) 1706 + regs->regs[MIPSInst_RT(inst)] = rt; 1707 + 1708 + MIPS_R2_STATS(loads); 1709 + break; 1710 + 1711 + case sdl_op: 1712 + if (config_enabled(CONFIG_32BIT)) { 1713 + err = SIGILL; 1714 + break; 1715 + } 1716 + 1717 + rt = regs->regs[MIPSInst_RT(inst)]; 1718 + vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); 1719 + if (!access_ok(VERIFY_WRITE, vaddr, 8)) { 1720 + current->thread.cp0_baduaddr = vaddr; 1721 + err = SIGSEGV; 1722 + break; 1723 + } 1724 + __asm__ __volatile__( 1725 + " .set push\n" 1726 + " .set reorder\n" 1727 + #ifdef CONFIG_CPU_LITTLE_ENDIAN 1728 + " dextu %1, %0, 56, 8\n" 1729 + "1: sb %1, 0(%2)\n" 1730 + " andi %1, %2, 0x7\n" 1731 + " beq $0, %1, 9f\n" 1732 + " daddiu %2, %2, -1\n" 1733 + " dextu %1, %0, 48, 8\n" 1734 + "2: sb %1, 0(%2)\n" 1735 + " andi %1, %2, 0x7\n" 1736 + " beq $0, %1, 9f\n" 1737 + " daddiu %2, %2, -1\n" 1738 + " dextu %1, %0, 40, 8\n" 1739 + "3: sb %1, 0(%2)\n" 1740 + " andi %1, %2, 0x7\n" 1741 + " beq $0, %1, 9f\n" 1742 + " daddiu %2, %2, -1\n" 1743 + " dextu %1, %0, 32, 8\n" 1744 + "4: sb %1, 0(%2)\n" 1745 + " andi %1, %2, 0x7\n" 1746 + " beq $0, %1, 9f\n" 1747 + " daddiu %2, %2, -1\n" 1748 + " dext %1, %0, 24, 8\n" 1749 + "5: sb %1, 0(%2)\n" 1750 + " andi %1, %2, 0x7\n" 1751 + " beq $0, %1, 9f\n" 1752 + " daddiu %2, %2, -1\n" 1753 + " dext %1, %0, 16, 8\n" 1754 + "6: sb %1, 0(%2)\n" 1755 + " andi %1, %2, 0x7\n" 1756 + " beq $0, %1, 9f\n" 1757 + " daddiu %2, %2, -1\n" 1758 + " dext %1, %0, 8, 8\n" 1759 + "7: sb %1, 0(%2)\n" 1760 + " andi %1, %2, 0x7\n" 1761 + " beq $0, %1, 9f\n" 1762 + " daddiu %2, %2, -1\n" 1763 + " dext %1, %0, 0, 8\n" 1764 + "0: sb %1, 0(%2)\n" 1765 + #else /* !CONFIG_CPU_LITTLE_ENDIAN */ 1766 + " dextu %1, %0, 56, 8\n" 1767 + "1: sb %1, 0(%2)\n" 1768 + " daddiu %2, %2, 1\n" 1769 + " andi %1, %2, 0x7\n" 1770 + " beq $0, %1, 9f\n" 1771 + " dextu %1, %0, 48, 8\n" 1772 + "2: sb %1, 0(%2)\n" 1773 + " daddiu %2, %2, 1\n" 1774 + " andi %1, %2, 0x7\n" 1775 + " beq $0, %1, 9f\n" 1776 + " dextu %1, %0, 40, 8\n" 1777 + "3: sb %1, 0(%2)\n" 1778 + " daddiu %2, %2, 1\n" 1779 + " andi %1, %2, 0x7\n" 1780 + " beq $0, %1, 9f\n" 1781 + " dextu %1, %0, 32, 8\n" 1782 + "4: sb %1, 0(%2)\n" 1783 + " daddiu %2, %2, 1\n" 1784 + " andi %1, %2, 0x7\n" 1785 + " beq $0, %1, 9f\n" 1786 + " dext %1, %0, 24, 8\n" 1787 + "5: sb %1, 0(%2)\n" 1788 + " daddiu %2, %2, 1\n" 1789 + " andi %1, %2, 0x7\n" 1790 + " beq $0, %1, 9f\n" 1791 + " dext %1, %0, 16, 8\n" 1792 + "6: sb %1, 0(%2)\n" 1793 + " daddiu %2, %2, 1\n" 1794 + " andi %1, %2, 0x7\n" 1795 + " beq $0, %1, 9f\n" 1796 + " dext %1, %0, 8, 8\n" 1797 + "7: sb %1, 0(%2)\n" 1798 + " daddiu %2, %2, 1\n" 1799 + " andi %1, %2, 0x7\n" 1800 + " beq $0, %1, 9f\n" 1801 + " dext %1, %0, 0, 8\n" 1802 + "0: sb %1, 0(%2)\n" 1803 + #endif /* CONFIG_CPU_LITTLE_ENDIAN */ 1804 + "9:\n" 1805 + " .insn\n" 1806 + " .section .fixup,\"ax\"\n" 1807 + "8: li %3,%4\n" 1808 + " j 9b\n" 1809 + " .previous\n" 1810 + " .section __ex_table,\"a\"\n" 1811 + " .word 1b,8b\n" 1812 + " .word 2b,8b\n" 1813 + " .word 3b,8b\n" 1814 + " .word 4b,8b\n" 1815 + " .word 5b,8b\n" 1816 + " .word 6b,8b\n" 1817 + " .word 7b,8b\n" 1818 + " .word 0b,8b\n" 1819 + " .previous\n" 1820 + " .set pop\n" 1821 + : "+&r"(rt), "=&r"(rs), 1822 + "+&r"(vaddr), "+&r"(err) 1823 + : "i"(SIGSEGV) 1824 + : "memory"); 1825 + 1826 + MIPS_R2_STATS(stores); 1827 + break; 1828 + 1829 + case sdr_op: 1830 + if (config_enabled(CONFIG_32BIT)) { 1831 + err = SIGILL; 1832 + break; 1833 + } 1834 + 1835 + rt = regs->regs[MIPSInst_RT(inst)]; 1836 + vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); 1837 + if (!access_ok(VERIFY_WRITE, vaddr, 8)) { 1838 + current->thread.cp0_baduaddr = vaddr; 1839 + err = SIGSEGV; 1840 + break; 1841 + } 1842 + __asm__ __volatile__( 1843 + " .set push\n" 1844 + " .set reorder\n" 1845 + #ifdef CONFIG_CPU_LITTLE_ENDIAN 1846 + " dext %1, %0, 0, 8\n" 1847 + "1: sb %1, 0(%2)\n" 1848 + " daddiu %2, %2, 1\n" 1849 + " andi %1, %2, 0x7\n" 1850 + " beq $0, %1, 9f\n" 1851 + " dext %1, %0, 8, 8\n" 1852 + "2: sb %1, 0(%2)\n" 1853 + " daddiu %2, %2, 1\n" 1854 + " andi %1, %2, 0x7\n" 1855 + " beq $0, %1, 9f\n" 1856 + " dext %1, %0, 16, 8\n" 1857 + "3: sb %1, 0(%2)\n" 1858 + " daddiu %2, %2, 1\n" 1859 + " andi %1, %2, 0x7\n" 1860 + " beq $0, %1, 9f\n" 1861 + " dext %1, %0, 24, 8\n" 1862 + "4: sb %1, 0(%2)\n" 1863 + " daddiu %2, %2, 1\n" 1864 + " andi %1, %2, 0x7\n" 1865 + " beq $0, %1, 9f\n" 1866 + " dextu %1, %0, 32, 8\n" 1867 + "5: sb %1, 0(%2)\n" 1868 + " daddiu %2, %2, 1\n" 1869 + " andi %1, %2, 0x7\n" 1870 + " beq $0, %1, 9f\n" 1871 + " dextu %1, %0, 40, 8\n" 1872 + "6: sb %1, 0(%2)\n" 1873 + " daddiu %2, %2, 1\n" 1874 + " andi %1, %2, 0x7\n" 1875 + " beq $0, %1, 9f\n" 1876 + " dextu %1, %0, 48, 8\n" 1877 + "7: sb %1, 0(%2)\n" 1878 + " daddiu %2, %2, 1\n" 1879 + " andi %1, %2, 0x7\n" 1880 + " beq $0, %1, 9f\n" 1881 + " dextu %1, %0, 56, 8\n" 1882 + "0: sb %1, 0(%2)\n" 1883 + #else /* !CONFIG_CPU_LITTLE_ENDIAN */ 1884 + " dext %1, %0, 0, 8\n" 1885 + "1: sb %1, 0(%2)\n" 1886 + " andi %1, %2, 0x7\n" 1887 + " beq $0, %1, 9f\n" 1888 + " daddiu %2, %2, -1\n" 1889 + " dext %1, %0, 8, 8\n" 1890 + "2: sb %1, 0(%2)\n" 1891 + " andi %1, %2, 0x7\n" 1892 + " beq $0, %1, 9f\n" 1893 + " daddiu %2, %2, -1\n" 1894 + " dext %1, %0, 16, 8\n" 1895 + "3: sb %1, 0(%2)\n" 1896 + " andi %1, %2, 0x7\n" 1897 + " beq $0, %1, 9f\n" 1898 + " daddiu %2, %2, -1\n" 1899 + " dext %1, %0, 24, 8\n" 1900 + "4: sb %1, 0(%2)\n" 1901 + " andi %1, %2, 0x7\n" 1902 + " beq $0, %1, 9f\n" 1903 + " daddiu %2, %2, -1\n" 1904 + " dextu %1, %0, 32, 8\n" 1905 + "5: sb %1, 0(%2)\n" 1906 + " andi %1, %2, 0x7\n" 1907 + " beq $0, %1, 9f\n" 1908 + " daddiu %2, %2, -1\n" 1909 + " dextu %1, %0, 40, 8\n" 1910 + "6: sb %1, 0(%2)\n" 1911 + " andi %1, %2, 0x7\n" 1912 + " beq $0, %1, 9f\n" 1913 + " daddiu %2, %2, -1\n" 1914 + " dextu %1, %0, 48, 8\n" 1915 + "7: sb %1, 0(%2)\n" 1916 + " andi %1, %2, 0x7\n" 1917 + " beq $0, %1, 9f\n" 1918 + " daddiu %2, %2, -1\n" 1919 + " dextu %1, %0, 56, 8\n" 1920 + "0: sb %1, 0(%2)\n" 1921 + #endif /* CONFIG_CPU_LITTLE_ENDIAN */ 1922 + "9:\n" 1923 + " .insn\n" 1924 + " .section .fixup,\"ax\"\n" 1925 + "8: li %3,%4\n" 1926 + " j 9b\n" 1927 + " .previous\n" 1928 + " .section __ex_table,\"a\"\n" 1929 + " .word 1b,8b\n" 1930 + " .word 2b,8b\n" 1931 + " .word 3b,8b\n" 1932 + " .word 4b,8b\n" 1933 + " .word 5b,8b\n" 1934 + " .word 6b,8b\n" 1935 + " .word 7b,8b\n" 1936 + " .word 0b,8b\n" 1937 + " .previous\n" 1938 + " .set pop\n" 1939 + : "+&r"(rt), "=&r"(rs), 1940 + "+&r"(vaddr), "+&r"(err) 1941 + : "i"(SIGSEGV) 1942 + : "memory"); 1943 + 1944 + MIPS_R2_STATS(stores); 1945 + 1946 + break; 1947 + case ll_op: 1948 + vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); 1949 + if (vaddr & 0x3) { 1950 + current->thread.cp0_baduaddr = vaddr; 1951 + err = SIGBUS; 1952 + break; 1953 + } 1954 + if (!access_ok(VERIFY_READ, vaddr, 4)) { 1955 + current->thread.cp0_baduaddr = vaddr; 1956 + err = SIGBUS; 1957 + break; 1958 + } 1959 + 1960 + if (!cpu_has_rw_llb) { 1961 + /* 1962 + * An LL/SC block can't be safely emulated without 1963 + * a Config5/LLB availability. So it's probably time to 1964 + * kill our process before things get any worse. This is 1965 + * because Config5/LLB allows us to use ERETNC so that 1966 + * the LLAddr/LLB bit is not cleared when we return from 1967 + * an exception. MIPS R2 LL/SC instructions trap with an 1968 + * RI exception so once we emulate them here, we return 1969 + * back to userland with ERETNC. That preserves the 1970 + * LLAddr/LLB so the subsequent SC instruction will 1971 + * succeed preserving the atomic semantics of the LL/SC 1972 + * block. Without that, there is no safe way to emulate 1973 + * an LL/SC block in MIPSR2 userland. 1974 + */ 1975 + pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n"); 1976 + err = SIGKILL; 1977 + break; 1978 + } 1979 + 1980 + __asm__ __volatile__( 1981 + "1:\n" 1982 + "ll %0, 0(%2)\n" 1983 + "2:\n" 1984 + ".insn\n" 1985 + ".section .fixup,\"ax\"\n" 1986 + "3:\n" 1987 + "li %1, %3\n" 1988 + "j 2b\n" 1989 + ".previous\n" 1990 + ".section __ex_table,\"a\"\n" 1991 + ".word 1b, 3b\n" 1992 + ".previous\n" 1993 + : "=&r"(res), "+&r"(err) 1994 + : "r"(vaddr), "i"(SIGSEGV) 1995 + : "memory"); 1996 + 1997 + if (MIPSInst_RT(inst) && !err) 1998 + regs->regs[MIPSInst_RT(inst)] = res; 1999 + MIPS_R2_STATS(llsc); 2000 + 2001 + break; 2002 + 2003 + case sc_op: 2004 + vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); 2005 + if (vaddr & 0x3) { 2006 + current->thread.cp0_baduaddr = vaddr; 2007 + err = SIGBUS; 2008 + break; 2009 + } 2010 + if (!access_ok(VERIFY_WRITE, vaddr, 4)) { 2011 + current->thread.cp0_baduaddr = vaddr; 2012 + err = SIGBUS; 2013 + break; 2014 + } 2015 + 2016 + if (!cpu_has_rw_llb) { 2017 + /* 2018 + * An LL/SC block can't be safely emulated without 2019 + * a Config5/LLB availability. So it's probably time to 2020 + * kill our process before things get any worse. This is 2021 + * because Config5/LLB allows us to use ERETNC so that 2022 + * the LLAddr/LLB bit is not cleared when we return from 2023 + * an exception. MIPS R2 LL/SC instructions trap with an 2024 + * RI exception so once we emulate them here, we return 2025 + * back to userland with ERETNC. That preserves the 2026 + * LLAddr/LLB so the subsequent SC instruction will 2027 + * succeed preserving the atomic semantics of the LL/SC 2028 + * block. Without that, there is no safe way to emulate 2029 + * an LL/SC block in MIPSR2 userland. 2030 + */ 2031 + pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n"); 2032 + err = SIGKILL; 2033 + break; 2034 + } 2035 + 2036 + res = regs->regs[MIPSInst_RT(inst)]; 2037 + 2038 + __asm__ __volatile__( 2039 + "1:\n" 2040 + "sc %0, 0(%2)\n" 2041 + "2:\n" 2042 + ".insn\n" 2043 + ".section .fixup,\"ax\"\n" 2044 + "3:\n" 2045 + "li %1, %3\n" 2046 + "j 2b\n" 2047 + ".previous\n" 2048 + ".section __ex_table,\"a\"\n" 2049 + ".word 1b, 3b\n" 2050 + ".previous\n" 2051 + : "+&r"(res), "+&r"(err) 2052 + : "r"(vaddr), "i"(SIGSEGV)); 2053 + 2054 + if (MIPSInst_RT(inst) && !err) 2055 + regs->regs[MIPSInst_RT(inst)] = res; 2056 + 2057 + MIPS_R2_STATS(llsc); 2058 + 2059 + break; 2060 + 2061 + case lld_op: 2062 + if (config_enabled(CONFIG_32BIT)) { 2063 + err = SIGILL; 2064 + break; 2065 + } 2066 + 2067 + vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); 2068 + if (vaddr & 0x7) { 2069 + current->thread.cp0_baduaddr = vaddr; 2070 + err = SIGBUS; 2071 + break; 2072 + } 2073 + if (!access_ok(VERIFY_READ, vaddr, 8)) { 2074 + current->thread.cp0_baduaddr = vaddr; 2075 + err = SIGBUS; 2076 + break; 2077 + } 2078 + 2079 + if (!cpu_has_rw_llb) { 2080 + /* 2081 + * An LL/SC block can't be safely emulated without 2082 + * a Config5/LLB availability. So it's probably time to 2083 + * kill our process before things get any worse. This is 2084 + * because Config5/LLB allows us to use ERETNC so that 2085 + * the LLAddr/LLB bit is not cleared when we return from 2086 + * an exception. MIPS R2 LL/SC instructions trap with an 2087 + * RI exception so once we emulate them here, we return 2088 + * back to userland with ERETNC. That preserves the 2089 + * LLAddr/LLB so the subsequent SC instruction will 2090 + * succeed preserving the atomic semantics of the LL/SC 2091 + * block. Without that, there is no safe way to emulate 2092 + * an LL/SC block in MIPSR2 userland. 2093 + */ 2094 + pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n"); 2095 + err = SIGKILL; 2096 + break; 2097 + } 2098 + 2099 + __asm__ __volatile__( 2100 + "1:\n" 2101 + "lld %0, 0(%2)\n" 2102 + "2:\n" 2103 + ".insn\n" 2104 + ".section .fixup,\"ax\"\n" 2105 + "3:\n" 2106 + "li %1, %3\n" 2107 + "j 2b\n" 2108 + ".previous\n" 2109 + ".section __ex_table,\"a\"\n" 2110 + ".word 1b, 3b\n" 2111 + ".previous\n" 2112 + : "=&r"(res), "+&r"(err) 2113 + : "r"(vaddr), "i"(SIGSEGV) 2114 + : "memory"); 2115 + if (MIPSInst_RT(inst) && !err) 2116 + regs->regs[MIPSInst_RT(inst)] = res; 2117 + 2118 + MIPS_R2_STATS(llsc); 2119 + 2120 + break; 2121 + 2122 + case scd_op: 2123 + if (config_enabled(CONFIG_32BIT)) { 2124 + err = SIGILL; 2125 + break; 2126 + } 2127 + 2128 + vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); 2129 + if (vaddr & 0x7) { 2130 + current->thread.cp0_baduaddr = vaddr; 2131 + err = SIGBUS; 2132 + break; 2133 + } 2134 + if (!access_ok(VERIFY_WRITE, vaddr, 8)) { 2135 + current->thread.cp0_baduaddr = vaddr; 2136 + err = SIGBUS; 2137 + break; 2138 + } 2139 + 2140 + if (!cpu_has_rw_llb) { 2141 + /* 2142 + * An LL/SC block can't be safely emulated without 2143 + * a Config5/LLB availability. So it's probably time to 2144 + * kill our process before things get any worse. This is 2145 + * because Config5/LLB allows us to use ERETNC so that 2146 + * the LLAddr/LLB bit is not cleared when we return from 2147 + * an exception. MIPS R2 LL/SC instructions trap with an 2148 + * RI exception so once we emulate them here, we return 2149 + * back to userland with ERETNC. That preserves the 2150 + * LLAddr/LLB so the subsequent SC instruction will 2151 + * succeed preserving the atomic semantics of the LL/SC 2152 + * block. Without that, there is no safe way to emulate 2153 + * an LL/SC block in MIPSR2 userland. 2154 + */ 2155 + pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n"); 2156 + err = SIGKILL; 2157 + break; 2158 + } 2159 + 2160 + res = regs->regs[MIPSInst_RT(inst)]; 2161 + 2162 + __asm__ __volatile__( 2163 + "1:\n" 2164 + "scd %0, 0(%2)\n" 2165 + "2:\n" 2166 + ".insn\n" 2167 + ".section .fixup,\"ax\"\n" 2168 + "3:\n" 2169 + "li %1, %3\n" 2170 + "j 2b\n" 2171 + ".previous\n" 2172 + ".section __ex_table,\"a\"\n" 2173 + ".word 1b, 3b\n" 2174 + ".previous\n" 2175 + : "+&r"(res), "+&r"(err) 2176 + : "r"(vaddr), "i"(SIGSEGV)); 2177 + 2178 + if (MIPSInst_RT(inst) && !err) 2179 + regs->regs[MIPSInst_RT(inst)] = res; 2180 + 2181 + MIPS_R2_STATS(llsc); 2182 + 2183 + break; 2184 + case pref_op: 2185 + /* skip it */ 2186 + break; 2187 + default: 2188 + err = SIGILL; 2189 + } 2190 + 2191 + /* 2192 + * Lets not return to userland just yet. It's constly and 2193 + * it's likely we have more R2 instructions to emulate 2194 + */ 2195 + if (!err && (pass++ < MIPS_R2_EMUL_TOTAL_PASS)) { 2196 + regs->cp0_cause &= ~CAUSEF_BD; 2197 + err = get_user(inst, (u32 __user *)regs->cp0_epc); 2198 + if (!err) 2199 + goto repeat; 2200 + 2201 + if (err < 0) 2202 + err = SIGSEGV; 2203 + } 2204 + 2205 + if (err && (err != SIGEMT)) { 2206 + regs->regs[31] = r31; 2207 + regs->cp0_epc = epc; 2208 + } 2209 + 2210 + /* Likely a MIPS R6 compatible instruction */ 2211 + if (pass && (err == SIGILL)) 2212 + err = 0; 2213 + 2214 + return err; 2215 + } 2216 + 2217 + #ifdef CONFIG_DEBUG_FS 2218 + 2219 + static int mipsr2_stats_show(struct seq_file *s, void *unused) 2220 + { 2221 + 2222 + seq_printf(s, "Instruction\tTotal\tBDslot\n------------------------------\n"); 2223 + seq_printf(s, "movs\t\t%ld\t%ld\n", 2224 + (unsigned long)__this_cpu_read(mipsr2emustats.movs), 2225 + (unsigned long)__this_cpu_read(mipsr2bdemustats.movs)); 2226 + seq_printf(s, "hilo\t\t%ld\t%ld\n", 2227 + (unsigned long)__this_cpu_read(mipsr2emustats.hilo), 2228 + (unsigned long)__this_cpu_read(mipsr2bdemustats.hilo)); 2229 + seq_printf(s, "muls\t\t%ld\t%ld\n", 2230 + (unsigned long)__this_cpu_read(mipsr2emustats.muls), 2231 + (unsigned long)__this_cpu_read(mipsr2bdemustats.muls)); 2232 + seq_printf(s, "divs\t\t%ld\t%ld\n", 2233 + (unsigned long)__this_cpu_read(mipsr2emustats.divs), 2234 + (unsigned long)__this_cpu_read(mipsr2bdemustats.divs)); 2235 + seq_printf(s, "dsps\t\t%ld\t%ld\n", 2236 + (unsigned long)__this_cpu_read(mipsr2emustats.dsps), 2237 + (unsigned long)__this_cpu_read(mipsr2bdemustats.dsps)); 2238 + seq_printf(s, "bops\t\t%ld\t%ld\n", 2239 + (unsigned long)__this_cpu_read(mipsr2emustats.bops), 2240 + (unsigned long)__this_cpu_read(mipsr2bdemustats.bops)); 2241 + seq_printf(s, "traps\t\t%ld\t%ld\n", 2242 + (unsigned long)__this_cpu_read(mipsr2emustats.traps), 2243 + (unsigned long)__this_cpu_read(mipsr2bdemustats.traps)); 2244 + seq_printf(s, "fpus\t\t%ld\t%ld\n", 2245 + (unsigned long)__this_cpu_read(mipsr2emustats.fpus), 2246 + (unsigned long)__this_cpu_read(mipsr2bdemustats.fpus)); 2247 + seq_printf(s, "loads\t\t%ld\t%ld\n", 2248 + (unsigned long)__this_cpu_read(mipsr2emustats.loads), 2249 + (unsigned long)__this_cpu_read(mipsr2bdemustats.loads)); 2250 + seq_printf(s, "stores\t\t%ld\t%ld\n", 2251 + (unsigned long)__this_cpu_read(mipsr2emustats.stores), 2252 + (unsigned long)__this_cpu_read(mipsr2bdemustats.stores)); 2253 + seq_printf(s, "llsc\t\t%ld\t%ld\n", 2254 + (unsigned long)__this_cpu_read(mipsr2emustats.llsc), 2255 + (unsigned long)__this_cpu_read(mipsr2bdemustats.llsc)); 2256 + seq_printf(s, "dsemul\t\t%ld\t%ld\n", 2257 + (unsigned long)__this_cpu_read(mipsr2emustats.dsemul), 2258 + (unsigned long)__this_cpu_read(mipsr2bdemustats.dsemul)); 2259 + seq_printf(s, "jr\t\t%ld\n", 2260 + (unsigned long)__this_cpu_read(mipsr2bremustats.jrs)); 2261 + seq_printf(s, "bltzl\t\t%ld\n", 2262 + (unsigned long)__this_cpu_read(mipsr2bremustats.bltzl)); 2263 + seq_printf(s, "bgezl\t\t%ld\n", 2264 + (unsigned long)__this_cpu_read(mipsr2bremustats.bgezl)); 2265 + seq_printf(s, "bltzll\t\t%ld\n", 2266 + (unsigned long)__this_cpu_read(mipsr2bremustats.bltzll)); 2267 + seq_printf(s, "bgezll\t\t%ld\n", 2268 + (unsigned long)__this_cpu_read(mipsr2bremustats.bgezll)); 2269 + seq_printf(s, "bltzal\t\t%ld\n", 2270 + (unsigned long)__this_cpu_read(mipsr2bremustats.bltzal)); 2271 + seq_printf(s, "bgezal\t\t%ld\n", 2272 + (unsigned long)__this_cpu_read(mipsr2bremustats.bgezal)); 2273 + seq_printf(s, "beql\t\t%ld\n", 2274 + (unsigned long)__this_cpu_read(mipsr2bremustats.beql)); 2275 + seq_printf(s, "bnel\t\t%ld\n", 2276 + (unsigned long)__this_cpu_read(mipsr2bremustats.bnel)); 2277 + seq_printf(s, "blezl\t\t%ld\n", 2278 + (unsigned long)__this_cpu_read(mipsr2bremustats.blezl)); 2279 + seq_printf(s, "bgtzl\t\t%ld\n", 2280 + (unsigned long)__this_cpu_read(mipsr2bremustats.bgtzl)); 2281 + 2282 + return 0; 2283 + } 2284 + 2285 + static int mipsr2_stats_clear_show(struct seq_file *s, void *unused) 2286 + { 2287 + mipsr2_stats_show(s, unused); 2288 + 2289 + __this_cpu_write((mipsr2emustats).movs, 0); 2290 + __this_cpu_write((mipsr2bdemustats).movs, 0); 2291 + __this_cpu_write((mipsr2emustats).hilo, 0); 2292 + __this_cpu_write((mipsr2bdemustats).hilo, 0); 2293 + __this_cpu_write((mipsr2emustats).muls, 0); 2294 + __this_cpu_write((mipsr2bdemustats).muls, 0); 2295 + __this_cpu_write((mipsr2emustats).divs, 0); 2296 + __this_cpu_write((mipsr2bdemustats).divs, 0); 2297 + __this_cpu_write((mipsr2emustats).dsps, 0); 2298 + __this_cpu_write((mipsr2bdemustats).dsps, 0); 2299 + __this_cpu_write((mipsr2emustats).bops, 0); 2300 + __this_cpu_write((mipsr2bdemustats).bops, 0); 2301 + __this_cpu_write((mipsr2emustats).traps, 0); 2302 + __this_cpu_write((mipsr2bdemustats).traps, 0); 2303 + __this_cpu_write((mipsr2emustats).fpus, 0); 2304 + __this_cpu_write((mipsr2bdemustats).fpus, 0); 2305 + __this_cpu_write((mipsr2emustats).loads, 0); 2306 + __this_cpu_write((mipsr2bdemustats).loads, 0); 2307 + __this_cpu_write((mipsr2emustats).stores, 0); 2308 + __this_cpu_write((mipsr2bdemustats).stores, 0); 2309 + __this_cpu_write((mipsr2emustats).llsc, 0); 2310 + __this_cpu_write((mipsr2bdemustats).llsc, 0); 2311 + __this_cpu_write((mipsr2emustats).dsemul, 0); 2312 + __this_cpu_write((mipsr2bdemustats).dsemul, 0); 2313 + __this_cpu_write((mipsr2bremustats).jrs, 0); 2314 + __this_cpu_write((mipsr2bremustats).bltzl, 0); 2315 + __this_cpu_write((mipsr2bremustats).bgezl, 0); 2316 + __this_cpu_write((mipsr2bremustats).bltzll, 0); 2317 + __this_cpu_write((mipsr2bremustats).bgezll, 0); 2318 + __this_cpu_write((mipsr2bremustats).bltzal, 0); 2319 + __this_cpu_write((mipsr2bremustats).bgezal, 0); 2320 + __this_cpu_write((mipsr2bremustats).beql, 0); 2321 + __this_cpu_write((mipsr2bremustats).bnel, 0); 2322 + __this_cpu_write((mipsr2bremustats).blezl, 0); 2323 + __this_cpu_write((mipsr2bremustats).bgtzl, 0); 2324 + 2325 + return 0; 2326 + } 2327 + 2328 + static int mipsr2_stats_open(struct inode *inode, struct file *file) 2329 + { 2330 + return single_open(file, mipsr2_stats_show, inode->i_private); 2331 + } 2332 + 2333 + static int mipsr2_stats_clear_open(struct inode *inode, struct file *file) 2334 + { 2335 + return single_open(file, mipsr2_stats_clear_show, inode->i_private); 2336 + } 2337 + 2338 + static const struct file_operations mipsr2_emul_fops = { 2339 + .open = mipsr2_stats_open, 2340 + .read = seq_read, 2341 + .llseek = seq_lseek, 2342 + .release = single_release, 2343 + }; 2344 + 2345 + static const struct file_operations mipsr2_clear_fops = { 2346 + .open = mipsr2_stats_clear_open, 2347 + .read = seq_read, 2348 + .llseek = seq_lseek, 2349 + .release = single_release, 2350 + }; 2351 + 2352 + 2353 + static int __init mipsr2_init_debugfs(void) 2354 + { 2355 + extern struct dentry *mips_debugfs_dir; 2356 + struct dentry *mipsr2_emul; 2357 + 2358 + if (!mips_debugfs_dir) 2359 + return -ENODEV; 2360 + 2361 + mipsr2_emul = debugfs_create_file("r2_emul_stats", S_IRUGO, 2362 + mips_debugfs_dir, NULL, 2363 + &mipsr2_emul_fops); 2364 + if (!mipsr2_emul) 2365 + return -ENOMEM; 2366 + 2367 + mipsr2_emul = debugfs_create_file("r2_emul_stats_clear", S_IRUGO, 2368 + mips_debugfs_dir, NULL, 2369 + &mipsr2_clear_fops); 2370 + if (!mipsr2_emul) 2371 + return -ENOMEM; 2372 + 2373 + return 0; 2374 + } 2375 + 2376 + device_initcall(mipsr2_init_debugfs); 2377 + 2378 + #endif /* CONFIG_DEBUG_FS */
+12
arch/mips/kernel/mips_ksyms.c
··· 14 14 #include <linux/mm.h> 15 15 #include <asm/uaccess.h> 16 16 #include <asm/ftrace.h> 17 + #include <asm/fpu.h> 18 + #include <asm/msa.h> 17 19 18 20 extern void *__bzero(void *__s, size_t __count); 19 21 extern long __strncpy_from_kernel_nocheck_asm(char *__to, ··· 32 30 extern long __strnlen_kernel_asm(const char *s); 33 31 extern long __strnlen_user_nocheck_asm(const char *s); 34 32 extern long __strnlen_user_asm(const char *s); 33 + 34 + /* 35 + * Core architecture code 36 + */ 37 + EXPORT_SYMBOL_GPL(_save_fp); 38 + #ifdef CONFIG_CPU_HAS_MSA 39 + EXPORT_SYMBOL_GPL(_save_msa); 40 + #endif 35 41 36 42 /* 37 43 * String functions ··· 77 67 EXPORT_SYMBOL(__strnlen_user_nocheck_asm); 78 68 EXPORT_SYMBOL(__strnlen_user_asm); 79 69 70 + #ifndef CONFIG_CPU_MIPSR6 80 71 EXPORT_SYMBOL(csum_partial); 81 72 EXPORT_SYMBOL(csum_partial_copy_nocheck); 82 73 EXPORT_SYMBOL(__csum_partial_copy_kernel); 83 74 EXPORT_SYMBOL(__csum_partial_copy_to_user); 84 75 EXPORT_SYMBOL(__csum_partial_copy_from_user); 76 + #endif 85 77 86 78 EXPORT_SYMBOL(invalid_pte_table); 87 79 #ifdef CONFIG_FUNCTION_TRACER
+139 -81
arch/mips/kernel/octeon_switch.S
··· 31 31 /* 32 32 * check if we need to save FPU registers 33 33 */ 34 - PTR_L t3, TASK_THREAD_INFO(a0) 35 - LONG_L t0, TI_FLAGS(t3) 36 - li t1, _TIF_USEDFPU 37 - and t2, t0, t1 38 - beqz t2, 1f 39 - nor t1, zero, t1 40 - 41 - and t0, t0, t1 42 - LONG_S t0, TI_FLAGS(t3) 34 + .set push 35 + .set noreorder 36 + beqz a3, 1f 37 + PTR_L t3, TASK_THREAD_INFO(a0) 38 + .set pop 43 39 44 40 /* 45 41 * clear saved user stack CU1 bit ··· 52 56 .set pop 53 57 1: 54 58 55 - /* check if we need to save COP2 registers */ 56 - PTR_L t2, TASK_THREAD_INFO(a0) 57 - LONG_L t0, ST_OFF(t2) 58 - bbit0 t0, 30, 1f 59 - 60 - /* Disable COP2 in the stored process state */ 61 - li t1, ST0_CU2 62 - xor t0, t1 63 - LONG_S t0, ST_OFF(t2) 64 - 65 - /* Enable COP2 so we can save it */ 66 - mfc0 t0, CP0_STATUS 67 - or t0, t1 68 - mtc0 t0, CP0_STATUS 69 - 70 - /* Save COP2 */ 71 - daddu a0, THREAD_CP2 72 - jal octeon_cop2_save 73 - dsubu a0, THREAD_CP2 74 - 75 - /* Disable COP2 now that we are done */ 76 - mfc0 t0, CP0_STATUS 77 - li t1, ST0_CU2 78 - xor t0, t1 79 - mtc0 t0, CP0_STATUS 80 - 81 - 1: 82 59 #if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0 83 60 /* Check if we need to store CVMSEG state */ 84 - mfc0 t0, $11,7 /* CvmMemCtl */ 61 + dmfc0 t0, $11,7 /* CvmMemCtl */ 85 62 bbit0 t0, 6, 3f /* Is user access enabled? */ 86 63 87 64 /* Store the CVMSEG state */ ··· 78 109 .set reorder 79 110 80 111 /* Disable access to CVMSEG */ 81 - mfc0 t0, $11,7 /* CvmMemCtl */ 112 + dmfc0 t0, $11,7 /* CvmMemCtl */ 82 113 xori t0, t0, 0x40 /* Bit 6 is CVMSEG user enable */ 83 - mtc0 t0, $11,7 /* CvmMemCtl */ 114 + dmtc0 t0, $11,7 /* CvmMemCtl */ 84 115 #endif 85 116 3: 86 117 ··· 116 147 * void octeon_cop2_save(struct octeon_cop2_state *a0) 117 148 */ 118 149 .align 7 150 + .set push 151 + .set noreorder 119 152 LEAF(octeon_cop2_save) 120 153 121 154 dmfc0 t9, $9,7 /* CvmCtl register. */ ··· 128 157 dmfc2 t2, 0x0200 129 158 sd t0, OCTEON_CP2_CRC_IV(a0) 130 159 sd t1, OCTEON_CP2_CRC_LENGTH(a0) 131 - sd t2, OCTEON_CP2_CRC_POLY(a0) 132 160 /* Skip next instructions if CvmCtl[NODFA_CP2] set */ 133 161 bbit1 t9, 28, 1f 162 + sd t2, OCTEON_CP2_CRC_POLY(a0) 134 163 135 164 /* Save the LLM state */ 136 165 dmfc2 t0, 0x0402 137 166 dmfc2 t1, 0x040A 138 167 sd t0, OCTEON_CP2_LLM_DAT(a0) 139 - sd t1, OCTEON_CP2_LLM_DAT+8(a0) 140 168 141 169 1: bbit1 t9, 26, 3f /* done if CvmCtl[NOCRYPTO] set */ 170 + sd t1, OCTEON_CP2_LLM_DAT+8(a0) 142 171 143 172 /* Save the COP2 crypto state */ 144 173 /* this part is mostly common to both pass 1 and later revisions */ ··· 169 198 sd t2, OCTEON_CP2_AES_KEY+16(a0) 170 199 dmfc2 t2, 0x0101 171 200 sd t3, OCTEON_CP2_AES_KEY+24(a0) 172 - mfc0 t3, $15,0 /* Get the processor ID register */ 201 + mfc0 v0, $15,0 /* Get the processor ID register */ 173 202 sd t0, OCTEON_CP2_AES_KEYLEN(a0) 174 - li t0, 0x000d0000 /* This is the processor ID of Octeon Pass1 */ 203 + li v1, 0x000d0000 /* This is the processor ID of Octeon Pass1 */ 175 204 sd t1, OCTEON_CP2_AES_RESULT(a0) 176 - sd t2, OCTEON_CP2_AES_RESULT+8(a0) 177 205 /* Skip to the Pass1 version of the remainder of the COP2 state */ 178 - beq t3, t0, 2f 206 + beq v0, v1, 2f 207 + sd t2, OCTEON_CP2_AES_RESULT+8(a0) 179 208 180 209 /* the non-pass1 state when !CvmCtl[NOCRYPTO] */ 181 210 dmfc2 t1, 0x0240 182 211 dmfc2 t2, 0x0241 212 + ori v1, v1, 0x9500 /* lowest OCTEON III PrId*/ 183 213 dmfc2 t3, 0x0242 214 + subu v1, v0, v1 /* prid - lowest OCTEON III PrId */ 184 215 dmfc2 t0, 0x0243 185 216 sd t1, OCTEON_CP2_HSH_DATW(a0) 186 217 dmfc2 t1, 0x0244 ··· 235 262 sd t1, OCTEON_CP2_GFM_MULT+8(a0) 236 263 sd t2, OCTEON_CP2_GFM_POLY(a0) 237 264 sd t3, OCTEON_CP2_GFM_RESULT(a0) 238 - sd t0, OCTEON_CP2_GFM_RESULT+8(a0) 265 + bltz v1, 4f 266 + sd t0, OCTEON_CP2_GFM_RESULT+8(a0) 267 + /* OCTEON III things*/ 268 + dmfc2 t0, 0x024F 269 + dmfc2 t1, 0x0050 270 + sd t0, OCTEON_CP2_SHA3(a0) 271 + sd t1, OCTEON_CP2_SHA3+8(a0) 272 + 4: 239 273 jr ra 274 + nop 240 275 241 276 2: /* pass 1 special stuff when !CvmCtl[NOCRYPTO] */ 242 277 dmfc2 t3, 0x0040 ··· 270 289 271 290 3: /* pass 1 or CvmCtl[NOCRYPTO] set */ 272 291 jr ra 292 + nop 273 293 END(octeon_cop2_save) 294 + .set pop 274 295 275 296 /* 276 297 * void octeon_cop2_restore(struct octeon_cop2_state *a0) ··· 337 354 ld t2, OCTEON_CP2_AES_RESULT+8(a0) 338 355 mfc0 t3, $15,0 /* Get the processor ID register */ 339 356 dmtc2 t0, 0x0110 340 - li t0, 0x000d0000 /* This is the processor ID of Octeon Pass1 */ 357 + li v0, 0x000d0000 /* This is the processor ID of Octeon Pass1 */ 341 358 dmtc2 t1, 0x0100 342 - bne t0, t3, 3f /* Skip the next stuff for non-pass1 */ 359 + bne v0, t3, 3f /* Skip the next stuff for non-pass1 */ 343 360 dmtc2 t2, 0x0101 344 361 345 362 /* this code is specific for pass 1 */ ··· 367 384 368 385 3: /* this is post-pass1 code */ 369 386 ld t2, OCTEON_CP2_HSH_DATW(a0) 387 + ori v0, v0, 0x9500 /* lowest OCTEON III PrId*/ 370 388 ld t0, OCTEON_CP2_HSH_DATW+8(a0) 371 389 ld t1, OCTEON_CP2_HSH_DATW+16(a0) 372 390 dmtc2 t2, 0x0240 ··· 421 437 dmtc2 t2, 0x0259 422 438 ld t2, OCTEON_CP2_GFM_RESULT+8(a0) 423 439 dmtc2 t0, 0x025E 440 + subu v0, t3, v0 /* prid - lowest OCTEON III PrId */ 424 441 dmtc2 t1, 0x025A 425 - dmtc2 t2, 0x025B 426 - 442 + bltz v0, done_restore 443 + dmtc2 t2, 0x025B 444 + /* OCTEON III things*/ 445 + ld t0, OCTEON_CP2_SHA3(a0) 446 + ld t1, OCTEON_CP2_SHA3+8(a0) 447 + dmtc2 t0, 0x0051 448 + dmtc2 t1, 0x0050 427 449 done_restore: 428 450 jr ra 429 451 nop ··· 440 450 * void octeon_mult_save() 441 451 * sp is assumed to point to a struct pt_regs 442 452 * 443 - * NOTE: This is called in SAVE_SOME in stackframe.h. It can only 444 - * safely modify k0 and k1. 453 + * NOTE: This is called in SAVE_TEMP in stackframe.h. It can 454 + * safely modify v1,k0, k1,$10-$15, and $24. It will 455 + * be overwritten with a processor specific version of the code. 445 456 */ 446 - .align 7 457 + .p2align 7 447 458 .set push 448 459 .set noreorder 449 460 LEAF(octeon_mult_save) 450 - dmfc0 k0, $9,7 /* CvmCtl register. */ 451 - bbit1 k0, 27, 1f /* Skip CvmCtl[NOMUL] */ 461 + jr ra 452 462 nop 463 + .space 30 * 4, 0 464 + octeon_mult_save_end: 465 + EXPORT(octeon_mult_save_end) 466 + END(octeon_mult_save) 453 467 454 - /* Save the multiplier state */ 468 + LEAF(octeon_mult_save2) 469 + /* Save the multiplier state OCTEON II and earlier*/ 455 470 v3mulu k0, $0, $0 456 471 v3mulu k1, $0, $0 457 472 sd k0, PT_MTP(sp) /* PT_MTP has P0 */ ··· 471 476 sd k0, PT_MPL+8(sp) /* PT_MPL+8 has MPL1 */ 472 477 jr ra 473 478 sd k1, PT_MPL+16(sp) /* PT_MPL+16 has MPL2 */ 479 + octeon_mult_save2_end: 480 + EXPORT(octeon_mult_save2_end) 481 + END(octeon_mult_save2) 474 482 475 - 1: /* Resume here if CvmCtl[NOMUL] */ 483 + LEAF(octeon_mult_save3) 484 + /* Save the multiplier state OCTEON III */ 485 + v3mulu $10, $0, $0 /* read P0 */ 486 + v3mulu $11, $0, $0 /* read P1 */ 487 + v3mulu $12, $0, $0 /* read P2 */ 488 + sd $10, PT_MTP+(0*8)(sp) /* store P0 */ 489 + v3mulu $10, $0, $0 /* read P3 */ 490 + sd $11, PT_MTP+(1*8)(sp) /* store P1 */ 491 + v3mulu $11, $0, $0 /* read P4 */ 492 + sd $12, PT_MTP+(2*8)(sp) /* store P2 */ 493 + ori $13, $0, 1 494 + v3mulu $12, $0, $0 /* read P5 */ 495 + sd $10, PT_MTP+(3*8)(sp) /* store P3 */ 496 + v3mulu $13, $13, $0 /* P4-P0 = MPL5-MPL1, $13 = MPL0 */ 497 + sd $11, PT_MTP+(4*8)(sp) /* store P4 */ 498 + v3mulu $10, $0, $0 /* read MPL1 */ 499 + sd $12, PT_MTP+(5*8)(sp) /* store P5 */ 500 + v3mulu $11, $0, $0 /* read MPL2 */ 501 + sd $13, PT_MPL+(0*8)(sp) /* store MPL0 */ 502 + v3mulu $12, $0, $0 /* read MPL3 */ 503 + sd $10, PT_MPL+(1*8)(sp) /* store MPL1 */ 504 + v3mulu $10, $0, $0 /* read MPL4 */ 505 + sd $11, PT_MPL+(2*8)(sp) /* store MPL2 */ 506 + v3mulu $11, $0, $0 /* read MPL5 */ 507 + sd $12, PT_MPL+(3*8)(sp) /* store MPL3 */ 508 + sd $10, PT_MPL+(4*8)(sp) /* store MPL4 */ 476 509 jr ra 477 - END(octeon_mult_save) 510 + sd $11, PT_MPL+(5*8)(sp) /* store MPL5 */ 511 + octeon_mult_save3_end: 512 + EXPORT(octeon_mult_save3_end) 513 + END(octeon_mult_save3) 478 514 .set pop 479 515 480 516 /* 481 517 * void octeon_mult_restore() 482 518 * sp is assumed to point to a struct pt_regs 483 519 * 484 - * NOTE: This is called in RESTORE_SOME in stackframe.h. 520 + * NOTE: This is called in RESTORE_TEMP in stackframe.h. 485 521 */ 486 - .align 7 522 + .p2align 7 487 523 .set push 488 524 .set noreorder 489 525 LEAF(octeon_mult_restore) 490 - dmfc0 k1, $9,7 /* CvmCtl register. */ 491 - ld v0, PT_MPL(sp) /* MPL0 */ 492 - ld v1, PT_MPL+8(sp) /* MPL1 */ 493 - ld k0, PT_MPL+16(sp) /* MPL2 */ 494 - bbit1 k1, 27, 1f /* Skip CvmCtl[NOMUL] */ 495 - /* Normally falls through, so no time wasted here */ 496 - nop 497 - 498 - /* Restore the multiplier state */ 499 - ld k1, PT_MTP+16(sp) /* P2 */ 500 - MTM0 v0 /* MPL0 */ 501 - ld v0, PT_MTP+8(sp) /* P1 */ 502 - MTM1 v1 /* MPL1 */ 503 - ld v1, PT_MTP(sp) /* P0 */ 504 - MTM2 k0 /* MPL2 */ 505 - MTP2 k1 /* P2 */ 506 - MTP1 v0 /* P1 */ 507 - jr ra 508 - MTP0 v1 /* P0 */ 509 - 510 - 1: /* Resume here if CvmCtl[NOMUL] */ 511 526 jr ra 512 527 nop 528 + .space 30 * 4, 0 529 + octeon_mult_restore_end: 530 + EXPORT(octeon_mult_restore_end) 513 531 END(octeon_mult_restore) 532 + 533 + LEAF(octeon_mult_restore2) 534 + ld v0, PT_MPL(sp) /* MPL0 */ 535 + ld v1, PT_MPL+8(sp) /* MPL1 */ 536 + ld k0, PT_MPL+16(sp) /* MPL2 */ 537 + /* Restore the multiplier state */ 538 + ld k1, PT_MTP+16(sp) /* P2 */ 539 + mtm0 v0 /* MPL0 */ 540 + ld v0, PT_MTP+8(sp) /* P1 */ 541 + mtm1 v1 /* MPL1 */ 542 + ld v1, PT_MTP(sp) /* P0 */ 543 + mtm2 k0 /* MPL2 */ 544 + mtp2 k1 /* P2 */ 545 + mtp1 v0 /* P1 */ 546 + jr ra 547 + mtp0 v1 /* P0 */ 548 + octeon_mult_restore2_end: 549 + EXPORT(octeon_mult_restore2_end) 550 + END(octeon_mult_restore2) 551 + 552 + LEAF(octeon_mult_restore3) 553 + ld $12, PT_MPL+(0*8)(sp) /* read MPL0 */ 554 + ld $13, PT_MPL+(3*8)(sp) /* read MPL3 */ 555 + ld $10, PT_MPL+(1*8)(sp) /* read MPL1 */ 556 + ld $11, PT_MPL+(4*8)(sp) /* read MPL4 */ 557 + .word 0x718d0008 558 + /* mtm0 $12, $13 restore MPL0 and MPL3 */ 559 + ld $12, PT_MPL+(2*8)(sp) /* read MPL2 */ 560 + .word 0x714b000c 561 + /* mtm1 $10, $11 restore MPL1 and MPL4 */ 562 + ld $13, PT_MPL+(5*8)(sp) /* read MPL5 */ 563 + ld $10, PT_MTP+(0*8)(sp) /* read P0 */ 564 + ld $11, PT_MTP+(3*8)(sp) /* read P3 */ 565 + .word 0x718d000d 566 + /* mtm2 $12, $13 restore MPL2 and MPL5 */ 567 + ld $12, PT_MTP+(1*8)(sp) /* read P1 */ 568 + .word 0x714b0009 569 + /* mtp0 $10, $11 restore P0 and P3 */ 570 + ld $13, PT_MTP+(4*8)(sp) /* read P4 */ 571 + ld $10, PT_MTP+(2*8)(sp) /* read P2 */ 572 + ld $11, PT_MTP+(5*8)(sp) /* read P5 */ 573 + .word 0x718d000a 574 + /* mtp1 $12, $13 restore P1 and P4 */ 575 + jr ra 576 + .word 0x714b000b 577 + /* mtp2 $10, $11 restore P2 and P5 */ 578 + 579 + octeon_mult_restore3_end: 580 + EXPORT(octeon_mult_restore3_end) 581 + END(octeon_mult_restore3) 514 582 .set pop
+7 -1
arch/mips/kernel/proc.c
··· 82 82 seq_printf(m, "]\n"); 83 83 } 84 84 85 - seq_printf(m, "isa\t\t\t: mips1"); 85 + seq_printf(m, "isa\t\t\t:"); 86 + if (cpu_has_mips_r1) 87 + seq_printf(m, " mips1"); 86 88 if (cpu_has_mips_2) 87 89 seq_printf(m, "%s", " mips2"); 88 90 if (cpu_has_mips_3) ··· 97 95 seq_printf(m, "%s", " mips32r1"); 98 96 if (cpu_has_mips32r2) 99 97 seq_printf(m, "%s", " mips32r2"); 98 + if (cpu_has_mips32r6) 99 + seq_printf(m, "%s", " mips32r6"); 100 100 if (cpu_has_mips64r1) 101 101 seq_printf(m, "%s", " mips64r1"); 102 102 if (cpu_has_mips64r2) 103 103 seq_printf(m, "%s", " mips64r2"); 104 + if (cpu_has_mips64r6) 105 + seq_printf(m, "%s", " mips64r6"); 104 106 seq_printf(m, "\n"); 105 107 106 108 seq_printf(m, "ASEs implemented\t:");
+96
arch/mips/kernel/process.c
··· 25 25 #include <linux/completion.h> 26 26 #include <linux/kallsyms.h> 27 27 #include <linux/random.h> 28 + #include <linux/prctl.h> 28 29 29 30 #include <asm/asm.h> 30 31 #include <asm/bootinfo.h> ··· 562 561 void arch_trigger_all_cpu_backtrace(bool include_self) 563 562 { 564 563 smp_call_function(arch_dump_stack, NULL, 1); 564 + } 565 + 566 + int mips_get_process_fp_mode(struct task_struct *task) 567 + { 568 + int value = 0; 569 + 570 + if (!test_tsk_thread_flag(task, TIF_32BIT_FPREGS)) 571 + value |= PR_FP_MODE_FR; 572 + if (test_tsk_thread_flag(task, TIF_HYBRID_FPREGS)) 573 + value |= PR_FP_MODE_FRE; 574 + 575 + return value; 576 + } 577 + 578 + int mips_set_process_fp_mode(struct task_struct *task, unsigned int value) 579 + { 580 + const unsigned int known_bits = PR_FP_MODE_FR | PR_FP_MODE_FRE; 581 + unsigned long switch_count; 582 + struct task_struct *t; 583 + 584 + /* Check the value is valid */ 585 + if (value & ~known_bits) 586 + return -EOPNOTSUPP; 587 + 588 + /* Avoid inadvertently triggering emulation */ 589 + if ((value & PR_FP_MODE_FR) && cpu_has_fpu && 590 + !(current_cpu_data.fpu_id & MIPS_FPIR_F64)) 591 + return -EOPNOTSUPP; 592 + if ((value & PR_FP_MODE_FRE) && cpu_has_fpu && !cpu_has_fre) 593 + return -EOPNOTSUPP; 594 + 595 + /* FR = 0 not supported in MIPS R6 */ 596 + if (!(value & PR_FP_MODE_FR) && cpu_has_fpu && cpu_has_mips_r6) 597 + return -EOPNOTSUPP; 598 + 599 + /* Save FP & vector context, then disable FPU & MSA */ 600 + if (task->signal == current->signal) 601 + lose_fpu(1); 602 + 603 + /* Prevent any threads from obtaining live FP context */ 604 + atomic_set(&task->mm->context.fp_mode_switching, 1); 605 + smp_mb__after_atomic(); 606 + 607 + /* 608 + * If there are multiple online CPUs then wait until all threads whose 609 + * FP mode is about to change have been context switched. This approach 610 + * allows us to only worry about whether an FP mode switch is in 611 + * progress when FP is first used in a tasks time slice. Pretty much all 612 + * of the mode switch overhead can thus be confined to cases where mode 613 + * switches are actually occuring. That is, to here. However for the 614 + * thread performing the mode switch it may take a while... 615 + */ 616 + if (num_online_cpus() > 1) { 617 + spin_lock_irq(&task->sighand->siglock); 618 + 619 + for_each_thread(task, t) { 620 + if (t == current) 621 + continue; 622 + 623 + switch_count = t->nvcsw + t->nivcsw; 624 + 625 + do { 626 + spin_unlock_irq(&task->sighand->siglock); 627 + cond_resched(); 628 + spin_lock_irq(&task->sighand->siglock); 629 + } while ((t->nvcsw + t->nivcsw) == switch_count); 630 + } 631 + 632 + spin_unlock_irq(&task->sighand->siglock); 633 + } 634 + 635 + /* 636 + * There are now no threads of the process with live FP context, so it 637 + * is safe to proceed with the FP mode switch. 638 + */ 639 + for_each_thread(task, t) { 640 + /* Update desired FP register width */ 641 + if (value & PR_FP_MODE_FR) { 642 + clear_tsk_thread_flag(t, TIF_32BIT_FPREGS); 643 + } else { 644 + set_tsk_thread_flag(t, TIF_32BIT_FPREGS); 645 + clear_tsk_thread_flag(t, TIF_MSA_CTX_LIVE); 646 + } 647 + 648 + /* Update desired FP single layout */ 649 + if (value & PR_FP_MODE_FRE) 650 + set_tsk_thread_flag(t, TIF_HYBRID_FPREGS); 651 + else 652 + clear_tsk_thread_flag(t, TIF_HYBRID_FPREGS); 653 + } 654 + 655 + /* Allow threads to use FP again */ 656 + atomic_set(&task->mm->context.fp_mode_switching, 0); 657 + 658 + return 0; 565 659 }
+9 -3
arch/mips/kernel/r4k_fpu.S
··· 34 34 .endm 35 35 36 36 .set noreorder 37 - .set arch=r4000 37 + .set MIPS_ISA_ARCH_LEVEL_RAW 38 38 39 39 LEAF(_save_fp_context) 40 40 .set push ··· 42 42 cfc1 t1, fcr31 43 43 .set pop 44 44 45 - #if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) 45 + #if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \ 46 + defined(CONFIG_CPU_MIPS32_R6) 46 47 .set push 47 48 SET_HARDFLOAT 48 49 #ifdef CONFIG_CPU_MIPS32_R2 ··· 106 105 SET_HARDFLOAT 107 106 cfc1 t1, fcr31 108 107 108 + #ifndef CONFIG_CPU_MIPS64_R6 109 109 mfc0 t0, CP0_STATUS 110 110 sll t0, t0, 5 111 111 bgez t0, 1f # skip storing odd if FR=0 112 112 nop 113 + #endif 113 114 114 115 /* Store the 16 odd double precision registers */ 115 116 EX sdc1 $f1, SC32_FPREGS+8(a0) ··· 166 163 LEAF(_restore_fp_context) 167 164 EX lw t1, SC_FPC_CSR(a0) 168 165 169 - #if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) 166 + #if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \ 167 + defined(CONFIG_CPU_MIPS32_R6) 170 168 .set push 171 169 SET_HARDFLOAT 172 170 #ifdef CONFIG_CPU_MIPS32_R2 ··· 227 223 SET_HARDFLOAT 228 224 EX lw t1, SC32_FPC_CSR(a0) 229 225 226 + #ifndef CONFIG_CPU_MIPS64_R6 230 227 mfc0 t0, CP0_STATUS 231 228 sll t0, t0, 5 232 229 bgez t0, 1f # skip loading odd if FR=0 233 230 nop 231 + #endif 234 232 235 233 EX ldc1 $f1, SC32_FPREGS+8(a0) 236 234 EX ldc1 $f3, SC32_FPREGS+24(a0)
+8 -6
arch/mips/kernel/r4k_switch.S
··· 115 115 * Save a thread's fp context. 116 116 */ 117 117 LEAF(_save_fp) 118 - #if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) 118 + #if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \ 119 + defined(CONFIG_CPU_MIPS32_R6) 119 120 mfc0 t0, CP0_STATUS 120 121 #endif 121 122 fpu_save_double a0 t0 t1 # clobbers t1 ··· 127 126 * Restore a thread's fp context. 128 127 */ 129 128 LEAF(_restore_fp) 130 - #if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) 129 + #if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \ 130 + defined(CONFIG_CPU_MIPS32_R6) 131 131 mfc0 t0, CP0_STATUS 132 132 #endif 133 133 fpu_restore_double a0 t0 t1 # clobbers t1 ··· 242 240 mtc1 t1, $f30 243 241 mtc1 t1, $f31 244 242 245 - #ifdef CONFIG_CPU_MIPS32_R2 243 + #if defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS32_R6) 246 244 .set push 247 - .set mips32r2 245 + .set MIPS_ISA_LEVEL_RAW 248 246 .set fp=64 249 247 sll t0, t0, 5 # is Status.FR set? 250 248 bgez t0, 1f # no: skip setting upper 32b ··· 282 280 mthc1 t1, $f30 283 281 mthc1 t1, $f31 284 282 1: .set pop 285 - #endif /* CONFIG_CPU_MIPS32_R2 */ 283 + #endif /* CONFIG_CPU_MIPS32_R2 || CONFIG_CPU_MIPS32_R6 */ 286 284 #else 287 - .set arch=r4000 285 + .set MIPS_ISA_ARCH_LEVEL_RAW 288 286 dmtc1 t1, $f0 289 287 dmtc1 t1, $f2 290 288 dmtc1 t1, $f4
+1
arch/mips/kernel/spram.c
··· 208 208 case CPU_INTERAPTIV: 209 209 case CPU_PROAPTIV: 210 210 case CPU_P5600: 211 + case CPU_QEMU_GENERIC: 211 212 config0 = read_c0_config(); 212 213 /* FIXME: addresses are Malta specific */ 213 214 if (config0 & (1<<24)) {
+1 -1
arch/mips/kernel/syscall.c
··· 136 136 : "memory"); 137 137 } else if (cpu_has_llsc) { 138 138 __asm__ __volatile__ ( 139 - " .set arch=r4000 \n" 139 + " .set "MIPS_ISA_ARCH_LEVEL" \n" 140 140 " li %[err], 0 \n" 141 141 "1: ll %[old], (%[addr]) \n" 142 142 " move %[tmp], %[new] \n"
+54 -6
arch/mips/kernel/traps.c
··· 46 46 #include <asm/fpu.h> 47 47 #include <asm/fpu_emulator.h> 48 48 #include <asm/idle.h> 49 + #include <asm/mips-r2-to-r6-emul.h> 49 50 #include <asm/mipsregs.h> 50 51 #include <asm/mipsmtregs.h> 51 52 #include <asm/module.h> ··· 838 837 exception_exit(prev_state); 839 838 } 840 839 841 - static void do_trap_or_bp(struct pt_regs *regs, unsigned int code, 840 + void do_trap_or_bp(struct pt_regs *regs, unsigned int code, 842 841 const char *str) 843 842 { 844 843 siginfo_t info; ··· 1028 1027 unsigned int opcode = 0; 1029 1028 int status = -1; 1030 1029 1030 + /* 1031 + * Avoid any kernel code. Just emulate the R2 instruction 1032 + * as quickly as possible. 1033 + */ 1034 + if (mipsr2_emulation && cpu_has_mips_r6 && 1035 + likely(user_mode(regs))) { 1036 + if (likely(get_user(opcode, epc) >= 0)) { 1037 + status = mipsr2_decoder(regs, opcode); 1038 + switch (status) { 1039 + case 0: 1040 + case SIGEMT: 1041 + task_thread_info(current)->r2_emul_return = 1; 1042 + return; 1043 + case SIGILL: 1044 + goto no_r2_instr; 1045 + default: 1046 + process_fpemu_return(status, 1047 + &current->thread.cp0_baduaddr); 1048 + task_thread_info(current)->r2_emul_return = 1; 1049 + return; 1050 + } 1051 + } 1052 + } 1053 + 1054 + no_r2_instr: 1055 + 1031 1056 prev_state = exception_enter(); 1057 + 1032 1058 if (notify_die(DIE_RI, "RI Fault", regs, 0, regs_to_trapnr(regs), 1033 1059 SIGILL) == NOTIFY_STOP) 1034 1060 goto out; ··· 1162 1134 return NOTIFY_OK; 1163 1135 } 1164 1136 1137 + static int wait_on_fp_mode_switch(atomic_t *p) 1138 + { 1139 + /* 1140 + * The FP mode for this task is currently being switched. That may 1141 + * involve modifications to the format of this tasks FP context which 1142 + * make it unsafe to proceed with execution for the moment. Instead, 1143 + * schedule some other task. 1144 + */ 1145 + schedule(); 1146 + return 0; 1147 + } 1148 + 1165 1149 static int enable_restore_fp_context(int msa) 1166 1150 { 1167 1151 int err, was_fpu_owner, prior_msa; 1152 + 1153 + /* 1154 + * If an FP mode switch is currently underway, wait for it to 1155 + * complete before proceeding. 1156 + */ 1157 + wait_on_atomic_t(&current->mm->context.fp_mode_switching, 1158 + wait_on_fp_mode_switch, TASK_KILLABLE); 1168 1159 1169 1160 if (!used_math()) { 1170 1161 /* First time FP context user. */ ··· 1588 1541 case CPU_INTERAPTIV: 1589 1542 case CPU_PROAPTIV: 1590 1543 case CPU_P5600: 1544 + case CPU_QEMU_GENERIC: 1591 1545 { 1592 1546 #define ERRCTL_PE 0x80000000 1593 1547 #define ERRCTL_L2P 0x00800000 ··· 1678 1630 printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n", 1679 1631 reg_val & (1<<30) ? "secondary" : "primary", 1680 1632 reg_val & (1<<31) ? "data" : "insn"); 1681 - if (cpu_has_mips_r2 && 1633 + if ((cpu_has_mips_r2_r6) && 1682 1634 ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) { 1683 1635 pr_err("Error bits: %s%s%s%s%s%s%s%s\n", 1684 1636 reg_val & (1<<29) ? "ED " : "", ··· 1718 1670 unsigned int reg_val; 1719 1671 1720 1672 /* For the moment, report the problem and hang. */ 1721 - if (cpu_has_mips_r2 && 1673 + if ((cpu_has_mips_r2_r6) && 1722 1674 ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) { 1723 1675 pr_err("FTLB error exception, cp0_ecc=0x%08x:\n", 1724 1676 read_c0_ecc()); ··· 2007 1959 { 2008 1960 unsigned int hwrena = cpu_hwrena_impl_bits; 2009 1961 2010 - if (cpu_has_mips_r2) 1962 + if (cpu_has_mips_r2_r6) 2011 1963 hwrena |= 0x0000000f; 2012 1964 2013 1965 if (!noulri && cpu_has_userlocal) ··· 2051 2003 * o read IntCtl.IPTI to determine the timer interrupt 2052 2004 * o read IntCtl.IPPCI to determine the performance counter interrupt 2053 2005 */ 2054 - if (cpu_has_mips_r2) { 2006 + if (cpu_has_mips_r2_r6) { 2055 2007 cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP; 2056 2008 cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7; 2057 2009 cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7; ··· 2142 2094 #else 2143 2095 ebase = CKSEG0; 2144 2096 #endif 2145 - if (cpu_has_mips_r2) 2097 + if (cpu_has_mips_r2_r6) 2146 2098 ebase += (read_c0_ebase() & 0x3ffff000); 2147 2099 } 2148 2100
+386 -4
arch/mips/kernel/unaligned.c
··· 129 129 : "=&r" (value), "=r" (res) \ 130 130 : "r" (addr), "i" (-EFAULT)); 131 131 132 + #ifndef CONFIG_CPU_MIPSR6 132 133 #define LoadW(addr, value, res) \ 133 134 __asm__ __volatile__ ( \ 134 135 "1:\t"user_lwl("%0", "(%2)")"\n" \ ··· 147 146 ".previous" \ 148 147 : "=&r" (value), "=r" (res) \ 149 148 : "r" (addr), "i" (-EFAULT)); 149 + #else 150 + /* MIPSR6 has no lwl instruction */ 151 + #define LoadW(addr, value, res) \ 152 + __asm__ __volatile__ ( \ 153 + ".set\tpush\n" \ 154 + ".set\tnoat\n\t" \ 155 + "1:"user_lb("%0", "0(%2)")"\n\t" \ 156 + "2:"user_lbu("$1", "1(%2)")"\n\t" \ 157 + "sll\t%0, 0x8\n\t" \ 158 + "or\t%0, $1\n\t" \ 159 + "3:"user_lbu("$1", "2(%2)")"\n\t" \ 160 + "sll\t%0, 0x8\n\t" \ 161 + "or\t%0, $1\n\t" \ 162 + "4:"user_lbu("$1", "3(%2)")"\n\t" \ 163 + "sll\t%0, 0x8\n\t" \ 164 + "or\t%0, $1\n\t" \ 165 + "li\t%1, 0\n" \ 166 + ".set\tpop\n" \ 167 + "10:\n\t" \ 168 + ".insn\n\t" \ 169 + ".section\t.fixup,\"ax\"\n\t" \ 170 + "11:\tli\t%1, %3\n\t" \ 171 + "j\t10b\n\t" \ 172 + ".previous\n\t" \ 173 + ".section\t__ex_table,\"a\"\n\t" \ 174 + STR(PTR)"\t1b, 11b\n\t" \ 175 + STR(PTR)"\t2b, 11b\n\t" \ 176 + STR(PTR)"\t3b, 11b\n\t" \ 177 + STR(PTR)"\t4b, 11b\n\t" \ 178 + ".previous" \ 179 + : "=&r" (value), "=r" (res) \ 180 + : "r" (addr), "i" (-EFAULT)); 181 + #endif /* CONFIG_CPU_MIPSR6 */ 150 182 151 183 #define LoadHWU(addr, value, res) \ 152 184 __asm__ __volatile__ ( \ ··· 203 169 : "=&r" (value), "=r" (res) \ 204 170 : "r" (addr), "i" (-EFAULT)); 205 171 172 + #ifndef CONFIG_CPU_MIPSR6 206 173 #define LoadWU(addr, value, res) \ 207 174 __asm__ __volatile__ ( \ 208 175 "1:\t"user_lwl("%0", "(%2)")"\n" \ ··· 241 206 ".previous" \ 242 207 : "=&r" (value), "=r" (res) \ 243 208 : "r" (addr), "i" (-EFAULT)); 209 + #else 210 + /* MIPSR6 has not lwl and ldl instructions */ 211 + #define LoadWU(addr, value, res) \ 212 + __asm__ __volatile__ ( \ 213 + ".set\tpush\n\t" \ 214 + ".set\tnoat\n\t" \ 215 + "1:"user_lbu("%0", "0(%2)")"\n\t" \ 216 + "2:"user_lbu("$1", "1(%2)")"\n\t" \ 217 + "sll\t%0, 0x8\n\t" \ 218 + "or\t%0, $1\n\t" \ 219 + "3:"user_lbu("$1", "2(%2)")"\n\t" \ 220 + "sll\t%0, 0x8\n\t" \ 221 + "or\t%0, $1\n\t" \ 222 + "4:"user_lbu("$1", "3(%2)")"\n\t" \ 223 + "sll\t%0, 0x8\n\t" \ 224 + "or\t%0, $1\n\t" \ 225 + "li\t%1, 0\n" \ 226 + ".set\tpop\n" \ 227 + "10:\n\t" \ 228 + ".insn\n\t" \ 229 + ".section\t.fixup,\"ax\"\n\t" \ 230 + "11:\tli\t%1, %3\n\t" \ 231 + "j\t10b\n\t" \ 232 + ".previous\n\t" \ 233 + ".section\t__ex_table,\"a\"\n\t" \ 234 + STR(PTR)"\t1b, 11b\n\t" \ 235 + STR(PTR)"\t2b, 11b\n\t" \ 236 + STR(PTR)"\t3b, 11b\n\t" \ 237 + STR(PTR)"\t4b, 11b\n\t" \ 238 + ".previous" \ 239 + : "=&r" (value), "=r" (res) \ 240 + : "r" (addr), "i" (-EFAULT)); 241 + 242 + #define LoadDW(addr, value, res) \ 243 + __asm__ __volatile__ ( \ 244 + ".set\tpush\n\t" \ 245 + ".set\tnoat\n\t" \ 246 + "1:lb\t%0, 0(%2)\n\t" \ 247 + "2:lbu\t $1, 1(%2)\n\t" \ 248 + "dsll\t%0, 0x8\n\t" \ 249 + "or\t%0, $1\n\t" \ 250 + "3:lbu\t$1, 2(%2)\n\t" \ 251 + "dsll\t%0, 0x8\n\t" \ 252 + "or\t%0, $1\n\t" \ 253 + "4:lbu\t$1, 3(%2)\n\t" \ 254 + "dsll\t%0, 0x8\n\t" \ 255 + "or\t%0, $1\n\t" \ 256 + "5:lbu\t$1, 4(%2)\n\t" \ 257 + "dsll\t%0, 0x8\n\t" \ 258 + "or\t%0, $1\n\t" \ 259 + "6:lbu\t$1, 5(%2)\n\t" \ 260 + "dsll\t%0, 0x8\n\t" \ 261 + "or\t%0, $1\n\t" \ 262 + "7:lbu\t$1, 6(%2)\n\t" \ 263 + "dsll\t%0, 0x8\n\t" \ 264 + "or\t%0, $1\n\t" \ 265 + "8:lbu\t$1, 7(%2)\n\t" \ 266 + "dsll\t%0, 0x8\n\t" \ 267 + "or\t%0, $1\n\t" \ 268 + "li\t%1, 0\n" \ 269 + ".set\tpop\n\t" \ 270 + "10:\n\t" \ 271 + ".insn\n\t" \ 272 + ".section\t.fixup,\"ax\"\n\t" \ 273 + "11:\tli\t%1, %3\n\t" \ 274 + "j\t10b\n\t" \ 275 + ".previous\n\t" \ 276 + ".section\t__ex_table,\"a\"\n\t" \ 277 + STR(PTR)"\t1b, 11b\n\t" \ 278 + STR(PTR)"\t2b, 11b\n\t" \ 279 + STR(PTR)"\t3b, 11b\n\t" \ 280 + STR(PTR)"\t4b, 11b\n\t" \ 281 + STR(PTR)"\t5b, 11b\n\t" \ 282 + STR(PTR)"\t6b, 11b\n\t" \ 283 + STR(PTR)"\t7b, 11b\n\t" \ 284 + STR(PTR)"\t8b, 11b\n\t" \ 285 + ".previous" \ 286 + : "=&r" (value), "=r" (res) \ 287 + : "r" (addr), "i" (-EFAULT)); 288 + #endif /* CONFIG_CPU_MIPSR6 */ 289 + 244 290 245 291 #define StoreHW(addr, value, res) \ 246 292 __asm__ __volatile__ ( \ ··· 344 228 : "=r" (res) \ 345 229 : "r" (value), "r" (addr), "i" (-EFAULT)); 346 230 231 + #ifndef CONFIG_CPU_MIPSR6 347 232 #define StoreW(addr, value, res) \ 348 233 __asm__ __volatile__ ( \ 349 234 "1:\t"user_swl("%1", "(%2)")"\n" \ ··· 380 263 ".previous" \ 381 264 : "=r" (res) \ 382 265 : "r" (value), "r" (addr), "i" (-EFAULT)); 383 - #endif 266 + #else 267 + /* MIPSR6 has no swl and sdl instructions */ 268 + #define StoreW(addr, value, res) \ 269 + __asm__ __volatile__ ( \ 270 + ".set\tpush\n\t" \ 271 + ".set\tnoat\n\t" \ 272 + "1:"user_sb("%1", "3(%2)")"\n\t" \ 273 + "srl\t$1, %1, 0x8\n\t" \ 274 + "2:"user_sb("$1", "2(%2)")"\n\t" \ 275 + "srl\t$1, $1, 0x8\n\t" \ 276 + "3:"user_sb("$1", "1(%2)")"\n\t" \ 277 + "srl\t$1, $1, 0x8\n\t" \ 278 + "4:"user_sb("$1", "0(%2)")"\n\t" \ 279 + ".set\tpop\n\t" \ 280 + "li\t%0, 0\n" \ 281 + "10:\n\t" \ 282 + ".insn\n\t" \ 283 + ".section\t.fixup,\"ax\"\n\t" \ 284 + "11:\tli\t%0, %3\n\t" \ 285 + "j\t10b\n\t" \ 286 + ".previous\n\t" \ 287 + ".section\t__ex_table,\"a\"\n\t" \ 288 + STR(PTR)"\t1b, 11b\n\t" \ 289 + STR(PTR)"\t2b, 11b\n\t" \ 290 + STR(PTR)"\t3b, 11b\n\t" \ 291 + STR(PTR)"\t4b, 11b\n\t" \ 292 + ".previous" \ 293 + : "=&r" (res) \ 294 + : "r" (value), "r" (addr), "i" (-EFAULT) \ 295 + : "memory"); 384 296 385 - #ifdef __LITTLE_ENDIAN 297 + #define StoreDW(addr, value, res) \ 298 + __asm__ __volatile__ ( \ 299 + ".set\tpush\n\t" \ 300 + ".set\tnoat\n\t" \ 301 + "1:sb\t%1, 7(%2)\n\t" \ 302 + "dsrl\t$1, %1, 0x8\n\t" \ 303 + "2:sb\t$1, 6(%2)\n\t" \ 304 + "dsrl\t$1, $1, 0x8\n\t" \ 305 + "3:sb\t$1, 5(%2)\n\t" \ 306 + "dsrl\t$1, $1, 0x8\n\t" \ 307 + "4:sb\t$1, 4(%2)\n\t" \ 308 + "dsrl\t$1, $1, 0x8\n\t" \ 309 + "5:sb\t$1, 3(%2)\n\t" \ 310 + "dsrl\t$1, $1, 0x8\n\t" \ 311 + "6:sb\t$1, 2(%2)\n\t" \ 312 + "dsrl\t$1, $1, 0x8\n\t" \ 313 + "7:sb\t$1, 1(%2)\n\t" \ 314 + "dsrl\t$1, $1, 0x8\n\t" \ 315 + "8:sb\t$1, 0(%2)\n\t" \ 316 + "dsrl\t$1, $1, 0x8\n\t" \ 317 + ".set\tpop\n\t" \ 318 + "li\t%0, 0\n" \ 319 + "10:\n\t" \ 320 + ".insn\n\t" \ 321 + ".section\t.fixup,\"ax\"\n\t" \ 322 + "11:\tli\t%0, %3\n\t" \ 323 + "j\t10b\n\t" \ 324 + ".previous\n\t" \ 325 + ".section\t__ex_table,\"a\"\n\t" \ 326 + STR(PTR)"\t1b, 11b\n\t" \ 327 + STR(PTR)"\t2b, 11b\n\t" \ 328 + STR(PTR)"\t3b, 11b\n\t" \ 329 + STR(PTR)"\t4b, 11b\n\t" \ 330 + STR(PTR)"\t5b, 11b\n\t" \ 331 + STR(PTR)"\t6b, 11b\n\t" \ 332 + STR(PTR)"\t7b, 11b\n\t" \ 333 + STR(PTR)"\t8b, 11b\n\t" \ 334 + ".previous" \ 335 + : "=&r" (res) \ 336 + : "r" (value), "r" (addr), "i" (-EFAULT) \ 337 + : "memory"); 338 + #endif /* CONFIG_CPU_MIPSR6 */ 339 + 340 + #else /* __BIG_ENDIAN */ 341 + 386 342 #define LoadHW(addr, value, res) \ 387 343 __asm__ __volatile__ (".set\tnoat\n" \ 388 344 "1:\t"user_lb("%0", "1(%2)")"\n" \ ··· 476 286 : "=&r" (value), "=r" (res) \ 477 287 : "r" (addr), "i" (-EFAULT)); 478 288 289 + #ifndef CONFIG_CPU_MIPSR6 479 290 #define LoadW(addr, value, res) \ 480 291 __asm__ __volatile__ ( \ 481 292 "1:\t"user_lwl("%0", "3(%2)")"\n" \ ··· 494 303 ".previous" \ 495 304 : "=&r" (value), "=r" (res) \ 496 305 : "r" (addr), "i" (-EFAULT)); 306 + #else 307 + /* MIPSR6 has no lwl instruction */ 308 + #define LoadW(addr, value, res) \ 309 + __asm__ __volatile__ ( \ 310 + ".set\tpush\n" \ 311 + ".set\tnoat\n\t" \ 312 + "1:"user_lb("%0", "3(%2)")"\n\t" \ 313 + "2:"user_lbu("$1", "2(%2)")"\n\t" \ 314 + "sll\t%0, 0x8\n\t" \ 315 + "or\t%0, $1\n\t" \ 316 + "3:"user_lbu("$1", "1(%2)")"\n\t" \ 317 + "sll\t%0, 0x8\n\t" \ 318 + "or\t%0, $1\n\t" \ 319 + "4:"user_lbu("$1", "0(%2)")"\n\t" \ 320 + "sll\t%0, 0x8\n\t" \ 321 + "or\t%0, $1\n\t" \ 322 + "li\t%1, 0\n" \ 323 + ".set\tpop\n" \ 324 + "10:\n\t" \ 325 + ".insn\n\t" \ 326 + ".section\t.fixup,\"ax\"\n\t" \ 327 + "11:\tli\t%1, %3\n\t" \ 328 + "j\t10b\n\t" \ 329 + ".previous\n\t" \ 330 + ".section\t__ex_table,\"a\"\n\t" \ 331 + STR(PTR)"\t1b, 11b\n\t" \ 332 + STR(PTR)"\t2b, 11b\n\t" \ 333 + STR(PTR)"\t3b, 11b\n\t" \ 334 + STR(PTR)"\t4b, 11b\n\t" \ 335 + ".previous" \ 336 + : "=&r" (value), "=r" (res) \ 337 + : "r" (addr), "i" (-EFAULT)); 338 + #endif /* CONFIG_CPU_MIPSR6 */ 339 + 497 340 498 341 #define LoadHWU(addr, value, res) \ 499 342 __asm__ __volatile__ ( \ ··· 551 326 : "=&r" (value), "=r" (res) \ 552 327 : "r" (addr), "i" (-EFAULT)); 553 328 329 + #ifndef CONFIG_CPU_MIPSR6 554 330 #define LoadWU(addr, value, res) \ 555 331 __asm__ __volatile__ ( \ 556 332 "1:\t"user_lwl("%0", "3(%2)")"\n" \ ··· 589 363 ".previous" \ 590 364 : "=&r" (value), "=r" (res) \ 591 365 : "r" (addr), "i" (-EFAULT)); 366 + #else 367 + /* MIPSR6 has not lwl and ldl instructions */ 368 + #define LoadWU(addr, value, res) \ 369 + __asm__ __volatile__ ( \ 370 + ".set\tpush\n\t" \ 371 + ".set\tnoat\n\t" \ 372 + "1:"user_lbu("%0", "3(%2)")"\n\t" \ 373 + "2:"user_lbu("$1", "2(%2)")"\n\t" \ 374 + "sll\t%0, 0x8\n\t" \ 375 + "or\t%0, $1\n\t" \ 376 + "3:"user_lbu("$1", "1(%2)")"\n\t" \ 377 + "sll\t%0, 0x8\n\t" \ 378 + "or\t%0, $1\n\t" \ 379 + "4:"user_lbu("$1", "0(%2)")"\n\t" \ 380 + "sll\t%0, 0x8\n\t" \ 381 + "or\t%0, $1\n\t" \ 382 + "li\t%1, 0\n" \ 383 + ".set\tpop\n" \ 384 + "10:\n\t" \ 385 + ".insn\n\t" \ 386 + ".section\t.fixup,\"ax\"\n\t" \ 387 + "11:\tli\t%1, %3\n\t" \ 388 + "j\t10b\n\t" \ 389 + ".previous\n\t" \ 390 + ".section\t__ex_table,\"a\"\n\t" \ 391 + STR(PTR)"\t1b, 11b\n\t" \ 392 + STR(PTR)"\t2b, 11b\n\t" \ 393 + STR(PTR)"\t3b, 11b\n\t" \ 394 + STR(PTR)"\t4b, 11b\n\t" \ 395 + ".previous" \ 396 + : "=&r" (value), "=r" (res) \ 397 + : "r" (addr), "i" (-EFAULT)); 398 + 399 + #define LoadDW(addr, value, res) \ 400 + __asm__ __volatile__ ( \ 401 + ".set\tpush\n\t" \ 402 + ".set\tnoat\n\t" \ 403 + "1:lb\t%0, 7(%2)\n\t" \ 404 + "2:lbu\t$1, 6(%2)\n\t" \ 405 + "dsll\t%0, 0x8\n\t" \ 406 + "or\t%0, $1\n\t" \ 407 + "3:lbu\t$1, 5(%2)\n\t" \ 408 + "dsll\t%0, 0x8\n\t" \ 409 + "or\t%0, $1\n\t" \ 410 + "4:lbu\t$1, 4(%2)\n\t" \ 411 + "dsll\t%0, 0x8\n\t" \ 412 + "or\t%0, $1\n\t" \ 413 + "5:lbu\t$1, 3(%2)\n\t" \ 414 + "dsll\t%0, 0x8\n\t" \ 415 + "or\t%0, $1\n\t" \ 416 + "6:lbu\t$1, 2(%2)\n\t" \ 417 + "dsll\t%0, 0x8\n\t" \ 418 + "or\t%0, $1\n\t" \ 419 + "7:lbu\t$1, 1(%2)\n\t" \ 420 + "dsll\t%0, 0x8\n\t" \ 421 + "or\t%0, $1\n\t" \ 422 + "8:lbu\t$1, 0(%2)\n\t" \ 423 + "dsll\t%0, 0x8\n\t" \ 424 + "or\t%0, $1\n\t" \ 425 + "li\t%1, 0\n" \ 426 + ".set\tpop\n\t" \ 427 + "10:\n\t" \ 428 + ".insn\n\t" \ 429 + ".section\t.fixup,\"ax\"\n\t" \ 430 + "11:\tli\t%1, %3\n\t" \ 431 + "j\t10b\n\t" \ 432 + ".previous\n\t" \ 433 + ".section\t__ex_table,\"a\"\n\t" \ 434 + STR(PTR)"\t1b, 11b\n\t" \ 435 + STR(PTR)"\t2b, 11b\n\t" \ 436 + STR(PTR)"\t3b, 11b\n\t" \ 437 + STR(PTR)"\t4b, 11b\n\t" \ 438 + STR(PTR)"\t5b, 11b\n\t" \ 439 + STR(PTR)"\t6b, 11b\n\t" \ 440 + STR(PTR)"\t7b, 11b\n\t" \ 441 + STR(PTR)"\t8b, 11b\n\t" \ 442 + ".previous" \ 443 + : "=&r" (value), "=r" (res) \ 444 + : "r" (addr), "i" (-EFAULT)); 445 + #endif /* CONFIG_CPU_MIPSR6 */ 592 446 593 447 #define StoreHW(addr, value, res) \ 594 448 __asm__ __volatile__ ( \ ··· 690 384 ".previous" \ 691 385 : "=r" (res) \ 692 386 : "r" (value), "r" (addr), "i" (-EFAULT)); 693 - 387 + #ifndef CONFIG_CPU_MIPSR6 694 388 #define StoreW(addr, value, res) \ 695 389 __asm__ __volatile__ ( \ 696 390 "1:\t"user_swl("%1", "3(%2)")"\n" \ ··· 726 420 ".previous" \ 727 421 : "=r" (res) \ 728 422 : "r" (value), "r" (addr), "i" (-EFAULT)); 423 + #else 424 + /* MIPSR6 has no swl and sdl instructions */ 425 + #define StoreW(addr, value, res) \ 426 + __asm__ __volatile__ ( \ 427 + ".set\tpush\n\t" \ 428 + ".set\tnoat\n\t" \ 429 + "1:"user_sb("%1", "0(%2)")"\n\t" \ 430 + "srl\t$1, %1, 0x8\n\t" \ 431 + "2:"user_sb("$1", "1(%2)")"\n\t" \ 432 + "srl\t$1, $1, 0x8\n\t" \ 433 + "3:"user_sb("$1", "2(%2)")"\n\t" \ 434 + "srl\t$1, $1, 0x8\n\t" \ 435 + "4:"user_sb("$1", "3(%2)")"\n\t" \ 436 + ".set\tpop\n\t" \ 437 + "li\t%0, 0\n" \ 438 + "10:\n\t" \ 439 + ".insn\n\t" \ 440 + ".section\t.fixup,\"ax\"\n\t" \ 441 + "11:\tli\t%0, %3\n\t" \ 442 + "j\t10b\n\t" \ 443 + ".previous\n\t" \ 444 + ".section\t__ex_table,\"a\"\n\t" \ 445 + STR(PTR)"\t1b, 11b\n\t" \ 446 + STR(PTR)"\t2b, 11b\n\t" \ 447 + STR(PTR)"\t3b, 11b\n\t" \ 448 + STR(PTR)"\t4b, 11b\n\t" \ 449 + ".previous" \ 450 + : "=&r" (res) \ 451 + : "r" (value), "r" (addr), "i" (-EFAULT) \ 452 + : "memory"); 453 + 454 + #define StoreDW(addr, value, res) \ 455 + __asm__ __volatile__ ( \ 456 + ".set\tpush\n\t" \ 457 + ".set\tnoat\n\t" \ 458 + "1:sb\t%1, 0(%2)\n\t" \ 459 + "dsrl\t$1, %1, 0x8\n\t" \ 460 + "2:sb\t$1, 1(%2)\n\t" \ 461 + "dsrl\t$1, $1, 0x8\n\t" \ 462 + "3:sb\t$1, 2(%2)\n\t" \ 463 + "dsrl\t$1, $1, 0x8\n\t" \ 464 + "4:sb\t$1, 3(%2)\n\t" \ 465 + "dsrl\t$1, $1, 0x8\n\t" \ 466 + "5:sb\t$1, 4(%2)\n\t" \ 467 + "dsrl\t$1, $1, 0x8\n\t" \ 468 + "6:sb\t$1, 5(%2)\n\t" \ 469 + "dsrl\t$1, $1, 0x8\n\t" \ 470 + "7:sb\t$1, 6(%2)\n\t" \ 471 + "dsrl\t$1, $1, 0x8\n\t" \ 472 + "8:sb\t$1, 7(%2)\n\t" \ 473 + "dsrl\t$1, $1, 0x8\n\t" \ 474 + ".set\tpop\n\t" \ 475 + "li\t%0, 0\n" \ 476 + "10:\n\t" \ 477 + ".insn\n\t" \ 478 + ".section\t.fixup,\"ax\"\n\t" \ 479 + "11:\tli\t%0, %3\n\t" \ 480 + "j\t10b\n\t" \ 481 + ".previous\n\t" \ 482 + ".section\t__ex_table,\"a\"\n\t" \ 483 + STR(PTR)"\t1b, 11b\n\t" \ 484 + STR(PTR)"\t2b, 11b\n\t" \ 485 + STR(PTR)"\t3b, 11b\n\t" \ 486 + STR(PTR)"\t4b, 11b\n\t" \ 487 + STR(PTR)"\t5b, 11b\n\t" \ 488 + STR(PTR)"\t6b, 11b\n\t" \ 489 + STR(PTR)"\t7b, 11b\n\t" \ 490 + STR(PTR)"\t8b, 11b\n\t" \ 491 + ".previous" \ 492 + : "=&r" (res) \ 493 + : "r" (value), "r" (addr), "i" (-EFAULT) \ 494 + : "memory"); 495 + #endif /* CONFIG_CPU_MIPSR6 */ 729 496 #endif 730 497 731 498 static void emulate_load_store_insn(struct pt_regs *regs, ··· 1082 703 break; 1083 704 return; 1084 705 706 + #ifndef CONFIG_CPU_MIPSR6 1085 707 /* 1086 708 * COP2 is available to implementor for application specific use. 1087 709 * It's up to applications to register a notifier chain and do 1088 710 * whatever they have to do, including possible sending of signals. 711 + * 712 + * This instruction has been reallocated in Release 6 1089 713 */ 1090 714 case lwc2_op: 1091 715 cu2_notifier_call_chain(CU2_LWC2_OP, regs); ··· 1105 723 case sdc2_op: 1106 724 cu2_notifier_call_chain(CU2_SDC2_OP, regs); 1107 725 break; 1108 - 726 + #endif 1109 727 default: 1110 728 /* 1111 729 * Pheeee... We encountered an yet unknown instruction or
+1
arch/mips/lib/Makefile
··· 8 8 9 9 obj-y += iomap.o 10 10 obj-$(CONFIG_PCI) += iomap-pci.o 11 + lib-$(CONFIG_GENERIC_CSUM) := $(filter-out csum_partial.o, $(lib-y)) 11 12 12 13 obj-$(CONFIG_CPU_GENERIC_DUMP_TLB) += dump_tlb.o 13 14 obj-$(CONFIG_CPU_R3000) += r3k_dump_tlb.o
+23
arch/mips/lib/memcpy.S
··· 293 293 and t0, src, ADDRMASK 294 294 PREFS( 0, 2*32(src) ) 295 295 PREFD( 1, 2*32(dst) ) 296 + #ifndef CONFIG_CPU_MIPSR6 296 297 bnez t1, .Ldst_unaligned\@ 297 298 nop 298 299 bnez t0, .Lsrc_unaligned_dst_aligned\@ 300 + #else 301 + or t0, t0, t1 302 + bnez t0, .Lcopy_unaligned_bytes\@ 303 + #endif 299 304 /* 300 305 * use delay slot for fall-through 301 306 * src and dst are aligned; need to compute rem ··· 381 376 bne rem, len, 1b 382 377 .set noreorder 383 378 379 + #ifndef CONFIG_CPU_MIPSR6 384 380 /* 385 381 * src and dst are aligned, need to copy rem bytes (rem < NBYTES) 386 382 * A loop would do only a byte at a time with possible branch ··· 483 477 bne len, rem, 1b 484 478 .set noreorder 485 479 480 + #endif /* !CONFIG_CPU_MIPSR6 */ 486 481 .Lcopy_bytes_checklen\@: 487 482 beqz len, .Ldone\@ 488 483 nop ··· 511 504 .Ldone\@: 512 505 jr ra 513 506 nop 507 + 508 + #ifdef CONFIG_CPU_MIPSR6 509 + .Lcopy_unaligned_bytes\@: 510 + 1: 511 + COPY_BYTE(0) 512 + COPY_BYTE(1) 513 + COPY_BYTE(2) 514 + COPY_BYTE(3) 515 + COPY_BYTE(4) 516 + COPY_BYTE(5) 517 + COPY_BYTE(6) 518 + COPY_BYTE(7) 519 + ADD src, src, 8 520 + b 1b 521 + ADD dst, dst, 8 522 + #endif /* CONFIG_CPU_MIPSR6 */ 514 523 .if __memcpy == 1 515 524 END(memcpy) 516 525 .set __memcpy, 0
+47
arch/mips/lib/memset.S
··· 111 111 .set at 112 112 #endif 113 113 114 + #ifndef CONFIG_CPU_MIPSR6 114 115 R10KCBARRIER(0(ra)) 115 116 #ifdef __MIPSEB__ 116 117 EX(LONG_S_L, a1, (a0), .Lfirst_fixup\@) /* make word/dword aligned */ ··· 121 120 PTR_SUBU a0, t0 /* long align ptr */ 122 121 PTR_ADDU a2, t0 /* correct size */ 123 122 123 + #else /* CONFIG_CPU_MIPSR6 */ 124 + #define STORE_BYTE(N) \ 125 + EX(sb, a1, N(a0), .Lbyte_fixup\@); \ 126 + beqz t0, 0f; \ 127 + PTR_ADDU t0, 1; 128 + 129 + PTR_ADDU a2, t0 /* correct size */ 130 + PTR_ADDU t0, 1 131 + STORE_BYTE(0) 132 + STORE_BYTE(1) 133 + #if LONGSIZE == 4 134 + EX(sb, a1, 2(a0), .Lbyte_fixup\@) 135 + #else 136 + STORE_BYTE(2) 137 + STORE_BYTE(3) 138 + STORE_BYTE(4) 139 + STORE_BYTE(5) 140 + EX(sb, a1, 6(a0), .Lbyte_fixup\@) 141 + #endif 142 + 0: 143 + ori a0, STORMASK 144 + xori a0, STORMASK 145 + PTR_ADDIU a0, STORSIZE 146 + #endif /* CONFIG_CPU_MIPSR6 */ 124 147 1: ori t1, a2, 0x3f /* # of full blocks */ 125 148 xori t1, 0x3f 126 149 beqz t1, .Lmemset_partial\@ /* no block to fill */ ··· 184 159 andi a2, STORMASK /* At most one long to go */ 185 160 186 161 beqz a2, 1f 162 + #ifndef CONFIG_CPU_MIPSR6 187 163 PTR_ADDU a0, a2 /* What's left */ 188 164 R10KCBARRIER(0(ra)) 189 165 #ifdef __MIPSEB__ 190 166 EX(LONG_S_R, a1, -1(a0), .Llast_fixup\@) 191 167 #else 192 168 EX(LONG_S_L, a1, -1(a0), .Llast_fixup\@) 169 + #endif 170 + #else 171 + PTR_SUBU t0, $0, a2 172 + PTR_ADDIU t0, 1 173 + STORE_BYTE(0) 174 + STORE_BYTE(1) 175 + #if LONGSIZE == 4 176 + EX(sb, a1, 2(a0), .Lbyte_fixup\@) 177 + #else 178 + STORE_BYTE(2) 179 + STORE_BYTE(3) 180 + STORE_BYTE(4) 181 + STORE_BYTE(5) 182 + EX(sb, a1, 6(a0), .Lbyte_fixup\@) 183 + #endif 184 + 0: 193 185 #endif 194 186 1: jr ra 195 187 move a2, zero ··· 227 185 .set __memset, 0 228 186 .hidden __memset 229 187 .endif 188 + 189 + .Lbyte_fixup\@: 190 + PTR_SUBU a2, $0, t0 191 + jr ra 192 + PTR_ADDIU a2, 1 230 193 231 194 .Lfirst_fixup\@: 232 195 jr ra
+1 -1
arch/mips/lib/mips-atomic.c
··· 15 15 #include <linux/export.h> 16 16 #include <linux/stringify.h> 17 17 18 - #ifndef CONFIG_CPU_MIPSR2 18 + #if !defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_CPU_MIPSR6) 19 19 20 20 /* 21 21 * For cli() we have to insert nops to make sure that the new value
+158 -11
arch/mips/math-emu/cp1emu.c
··· 48 48 #include <asm/processor.h> 49 49 #include <asm/fpu_emulator.h> 50 50 #include <asm/fpu.h> 51 + #include <asm/mips-r2-to-r6-emul.h> 51 52 52 53 #include "ieee754.h" 53 54 ··· 69 68 #define modeindex(v) ((v) & FPU_CSR_RM) 70 69 71 70 /* convert condition code register number to csr bit */ 72 - static const unsigned int fpucondbit[8] = { 71 + const unsigned int fpucondbit[8] = { 73 72 FPU_CSR_COND0, 74 73 FPU_CSR_COND1, 75 74 FPU_CSR_COND2, ··· 449 448 dec_insn.next_pc_inc; 450 449 /* Fall through */ 451 450 case jr_op: 451 + /* For R6, JR already emulated in jalr_op */ 452 + if (NO_R6EMU && insn.r_format.opcode == jr_op) 453 + break; 452 454 *contpc = regs->regs[insn.r_format.rs]; 453 455 return 1; 454 456 } ··· 460 456 switch (insn.i_format.rt) { 461 457 case bltzal_op: 462 458 case bltzall_op: 459 + if (NO_R6EMU && (insn.i_format.rs || 460 + insn.i_format.rt == bltzall_op)) 461 + break; 462 + 463 463 regs->regs[31] = regs->cp0_epc + 464 464 dec_insn.pc_inc + 465 465 dec_insn.next_pc_inc; 466 466 /* Fall through */ 467 - case bltz_op: 468 467 case bltzl_op: 468 + if (NO_R6EMU) 469 + break; 470 + case bltz_op: 469 471 if ((long)regs->regs[insn.i_format.rs] < 0) 470 472 *contpc = regs->cp0_epc + 471 473 dec_insn.pc_inc + ··· 483 473 return 1; 484 474 case bgezal_op: 485 475 case bgezall_op: 476 + if (NO_R6EMU && (insn.i_format.rs || 477 + insn.i_format.rt == bgezall_op)) 478 + break; 479 + 486 480 regs->regs[31] = regs->cp0_epc + 487 481 dec_insn.pc_inc + 488 482 dec_insn.next_pc_inc; 489 483 /* Fall through */ 490 - case bgez_op: 491 484 case bgezl_op: 485 + if (NO_R6EMU) 486 + break; 487 + case bgez_op: 492 488 if ((long)regs->regs[insn.i_format.rs] >= 0) 493 489 *contpc = regs->cp0_epc + 494 490 dec_insn.pc_inc + ··· 521 505 /* Set microMIPS mode bit: XOR for jalx. */ 522 506 *contpc ^= bit; 523 507 return 1; 524 - case beq_op: 525 508 case beql_op: 509 + if (NO_R6EMU) 510 + break; 511 + case beq_op: 526 512 if (regs->regs[insn.i_format.rs] == 527 513 regs->regs[insn.i_format.rt]) 528 514 *contpc = regs->cp0_epc + ··· 535 517 dec_insn.pc_inc + 536 518 dec_insn.next_pc_inc; 537 519 return 1; 538 - case bne_op: 539 520 case bnel_op: 521 + if (NO_R6EMU) 522 + break; 523 + case bne_op: 540 524 if (regs->regs[insn.i_format.rs] != 541 525 regs->regs[insn.i_format.rt]) 542 526 *contpc = regs->cp0_epc + ··· 549 529 dec_insn.pc_inc + 550 530 dec_insn.next_pc_inc; 551 531 return 1; 552 - case blez_op: 553 532 case blezl_op: 533 + if (NO_R6EMU) 534 + break; 535 + case blez_op: 536 + 537 + /* 538 + * Compact branches for R6 for the 539 + * blez and blezl opcodes. 540 + * BLEZ | rs = 0 | rt != 0 == BLEZALC 541 + * BLEZ | rs = rt != 0 == BGEZALC 542 + * BLEZ | rs != 0 | rt != 0 == BGEUC 543 + * BLEZL | rs = 0 | rt != 0 == BLEZC 544 + * BLEZL | rs = rt != 0 == BGEZC 545 + * BLEZL | rs != 0 | rt != 0 == BGEC 546 + * 547 + * For real BLEZ{,L}, rt is always 0. 548 + */ 549 + if (cpu_has_mips_r6 && insn.i_format.rt) { 550 + if ((insn.i_format.opcode == blez_op) && 551 + ((!insn.i_format.rs && insn.i_format.rt) || 552 + (insn.i_format.rs == insn.i_format.rt))) 553 + regs->regs[31] = regs->cp0_epc + 554 + dec_insn.pc_inc; 555 + *contpc = regs->cp0_epc + dec_insn.pc_inc + 556 + dec_insn.next_pc_inc; 557 + 558 + return 1; 559 + } 554 560 if ((long)regs->regs[insn.i_format.rs] <= 0) 555 561 *contpc = regs->cp0_epc + 556 562 dec_insn.pc_inc + ··· 586 540 dec_insn.pc_inc + 587 541 dec_insn.next_pc_inc; 588 542 return 1; 589 - case bgtz_op: 590 543 case bgtzl_op: 544 + if (NO_R6EMU) 545 + break; 546 + case bgtz_op: 547 + /* 548 + * Compact branches for R6 for the 549 + * bgtz and bgtzl opcodes. 550 + * BGTZ | rs = 0 | rt != 0 == BGTZALC 551 + * BGTZ | rs = rt != 0 == BLTZALC 552 + * BGTZ | rs != 0 | rt != 0 == BLTUC 553 + * BGTZL | rs = 0 | rt != 0 == BGTZC 554 + * BGTZL | rs = rt != 0 == BLTZC 555 + * BGTZL | rs != 0 | rt != 0 == BLTC 556 + * 557 + * *ZALC varint for BGTZ &&& rt != 0 558 + * For real GTZ{,L}, rt is always 0. 559 + */ 560 + if (cpu_has_mips_r6 && insn.i_format.rt) { 561 + if ((insn.i_format.opcode == blez_op) && 562 + ((!insn.i_format.rs && insn.i_format.rt) || 563 + (insn.i_format.rs == insn.i_format.rt))) 564 + regs->regs[31] = regs->cp0_epc + 565 + dec_insn.pc_inc; 566 + *contpc = regs->cp0_epc + dec_insn.pc_inc + 567 + dec_insn.next_pc_inc; 568 + 569 + return 1; 570 + } 571 + 591 572 if ((long)regs->regs[insn.i_format.rs] > 0) 592 573 *contpc = regs->cp0_epc + 593 574 dec_insn.pc_inc + ··· 623 550 *contpc = regs->cp0_epc + 624 551 dec_insn.pc_inc + 625 552 dec_insn.next_pc_inc; 553 + return 1; 554 + case cbcond0_op: 555 + case cbcond1_op: 556 + if (!cpu_has_mips_r6) 557 + break; 558 + if (insn.i_format.rt && !insn.i_format.rs) 559 + regs->regs[31] = regs->cp0_epc + 4; 560 + *contpc = regs->cp0_epc + dec_insn.pc_inc + 561 + dec_insn.next_pc_inc; 562 + 626 563 return 1; 627 564 #ifdef CONFIG_CPU_CAVIUM_OCTEON 628 565 case lwc2_op: /* This is bbit0 on Octeon */ ··· 659 576 else 660 577 *contpc = regs->cp0_epc + 8; 661 578 return 1; 579 + #else 580 + case bc6_op: 581 + /* 582 + * Only valid for MIPS R6 but we can still end up 583 + * here from a broken userland so just tell emulator 584 + * this is not a branch and let it break later on. 585 + */ 586 + if (!cpu_has_mips_r6) 587 + break; 588 + *contpc = regs->cp0_epc + dec_insn.pc_inc + 589 + dec_insn.next_pc_inc; 590 + 591 + return 1; 592 + case balc6_op: 593 + if (!cpu_has_mips_r6) 594 + break; 595 + regs->regs[31] = regs->cp0_epc + 4; 596 + *contpc = regs->cp0_epc + dec_insn.pc_inc + 597 + dec_insn.next_pc_inc; 598 + 599 + return 1; 600 + case beqzcjic_op: 601 + if (!cpu_has_mips_r6) 602 + break; 603 + *contpc = regs->cp0_epc + dec_insn.pc_inc + 604 + dec_insn.next_pc_inc; 605 + 606 + return 1; 607 + case bnezcjialc_op: 608 + if (!cpu_has_mips_r6) 609 + break; 610 + if (!insn.i_format.rs) 611 + regs->regs[31] = regs->cp0_epc + 4; 612 + *contpc = regs->cp0_epc + dec_insn.pc_inc + 613 + dec_insn.next_pc_inc; 614 + 615 + return 1; 662 616 #endif 663 617 case cop0_op: 664 618 case cop1_op: 619 + /* Need to check for R6 bc1nez and bc1eqz branches */ 620 + if (cpu_has_mips_r6 && 621 + ((insn.i_format.rs == bc1eqz_op) || 622 + (insn.i_format.rs == bc1nez_op))) { 623 + bit = 0; 624 + switch (insn.i_format.rs) { 625 + case bc1eqz_op: 626 + if (get_fpr32(&current->thread.fpu.fpr[insn.i_format.rt], 0) & 0x1) 627 + bit = 1; 628 + break; 629 + case bc1nez_op: 630 + if (!(get_fpr32(&current->thread.fpu.fpr[insn.i_format.rt], 0) & 0x1)) 631 + bit = 1; 632 + break; 633 + } 634 + if (bit) 635 + *contpc = regs->cp0_epc + 636 + dec_insn.pc_inc + 637 + (insn.i_format.simmediate << 2); 638 + else 639 + *contpc = regs->cp0_epc + 640 + dec_insn.pc_inc + 641 + dec_insn.next_pc_inc; 642 + 643 + return 1; 644 + } 645 + /* R2/R6 compatible cop1 instruction. Fall through */ 665 646 case cop2_op: 666 647 case cop1x_op: 667 648 if (insn.i_format.rs == bc_op) { ··· 1561 1414 * achieve full IEEE-754 accuracy - however this emulator does. 1562 1415 */ 1563 1416 case frsqrt_op: 1564 - if (!cpu_has_mips_4_5_r2) 1417 + if (!cpu_has_mips_4_5_r2_r6) 1565 1418 return SIGILL; 1566 1419 1567 1420 handler.u = fpemu_sp_rsqrt; 1568 1421 goto scopuop; 1569 1422 1570 1423 case frecip_op: 1571 - if (!cpu_has_mips_4_5_r2) 1424 + if (!cpu_has_mips_4_5_r2_r6) 1572 1425 return SIGILL; 1573 1426 1574 1427 handler.u = fpemu_sp_recip; ··· 1763 1616 * achieve full IEEE-754 accuracy - however this emulator does. 1764 1617 */ 1765 1618 case frsqrt_op: 1766 - if (!cpu_has_mips_4_5_r2) 1619 + if (!cpu_has_mips_4_5_r2_r6) 1767 1620 return SIGILL; 1768 1621 1769 1622 handler.u = fpemu_dp_rsqrt; 1770 1623 goto dcopuop; 1771 1624 case frecip_op: 1772 - if (!cpu_has_mips_4_5_r2) 1625 + if (!cpu_has_mips_4_5_r2_r6) 1773 1626 return SIGILL; 1774 1627 1775 1628 handler.u = fpemu_dp_recip;
+4 -2
arch/mips/mm/c-r4k.c
··· 794 794 __asm__ __volatile__ ( 795 795 ".set push\n\t" 796 796 ".set noat\n\t" 797 - ".set mips3\n\t" 797 + ".set "MIPS_ISA_LEVEL"\n\t" 798 798 #ifdef CONFIG_32BIT 799 799 "la $at,1f\n\t" 800 800 #endif ··· 1255 1255 case CPU_P5600: 1256 1256 case CPU_PROAPTIV: 1257 1257 case CPU_M5150: 1258 + case CPU_QEMU_GENERIC: 1258 1259 if (!(read_c0_config7() & MIPS_CONF7_IAR) && 1259 1260 (c->icache.waysize > PAGE_SIZE)) 1260 1261 c->icache.flags |= MIPS_CACHE_ALIASES; ··· 1473 1472 1474 1473 default: 1475 1474 if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 | 1476 - MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) { 1475 + MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R1 | 1476 + MIPS_CPU_ISA_M64R2 | MIPS_CPU_ISA_M64R6)) { 1477 1477 #ifdef CONFIG_MIPS_CPU_SCACHE 1478 1478 if (mips_sc_init ()) { 1479 1479 scache_size = c->scache.ways * c->scache.sets * c->scache.linesz;
+20 -9
arch/mips/mm/fault.c
··· 14 14 #include <linux/string.h> 15 15 #include <linux/types.h> 16 16 #include <linux/ptrace.h> 17 + #include <linux/ratelimit.h> 17 18 #include <linux/mman.h> 18 19 #include <linux/mm.h> 19 20 #include <linux/smp.h> ··· 28 27 #include <asm/ptrace.h> 29 28 #include <asm/highmem.h> /* For VMALLOC_END */ 30 29 #include <linux/kdebug.h> 30 + 31 + int show_unhandled_signals = 1; 31 32 32 33 /* 33 34 * This routine handles page faults. It determines the address, ··· 46 43 siginfo_t info; 47 44 int fault; 48 45 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; 46 + 47 + static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10); 49 48 50 49 #if 0 51 50 printk("Cpu%d[%s:%d:%0*lx:%ld:%0*lx]\n", raw_smp_processor_id(), ··· 208 203 if (user_mode(regs)) { 209 204 tsk->thread.cp0_badvaddr = address; 210 205 tsk->thread.error_code = write; 211 - #if 0 212 - printk("do_page_fault() #2: sending SIGSEGV to %s for " 213 - "invalid %s\n%0*lx (epc == %0*lx, ra == %0*lx)\n", 214 - tsk->comm, 215 - write ? "write access to" : "read access from", 216 - field, address, 217 - field, (unsigned long) regs->cp0_epc, 218 - field, (unsigned long) regs->regs[31]); 219 - #endif 206 + if (show_unhandled_signals && 207 + unhandled_signal(tsk, SIGSEGV) && 208 + __ratelimit(&ratelimit_state)) { 209 + pr_info("\ndo_page_fault(): sending SIGSEGV to %s for invalid %s %0*lx", 210 + tsk->comm, 211 + write ? "write access to" : "read access from", 212 + field, address); 213 + pr_info("epc = %0*lx in", field, 214 + (unsigned long) regs->cp0_epc); 215 + print_vma_addr(" ", regs->cp0_epc); 216 + pr_info("ra = %0*lx in", field, 217 + (unsigned long) regs->regs[31]); 218 + print_vma_addr(" ", regs->regs[31]); 219 + pr_info("\n"); 220 + } 220 221 info.si_signo = SIGSEGV; 221 222 info.si_errno = 0; 222 223 /* info.si_code has been set above */
+26 -4
arch/mips/mm/page.c
··· 72 72 #define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010) 73 73 #define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020) 74 74 75 + /* 76 + * R6 has a limited offset of the pref instruction. 77 + * Skip it if the offset is more than 9 bits. 78 + */ 79 + #define _uasm_i_pref(a, b, c, d) \ 80 + do { \ 81 + if (cpu_has_mips_r6) { \ 82 + if (c <= 0xff && c >= -0x100) \ 83 + uasm_i_pref(a, b, c, d);\ 84 + } else { \ 85 + uasm_i_pref(a, b, c, d); \ 86 + } \ 87 + } while(0) 88 + 75 89 static int pref_bias_clear_store; 76 90 static int pref_bias_copy_load; 77 91 static int pref_bias_copy_store; ··· 192 178 pref_bias_copy_load = 256; 193 179 pref_bias_copy_store = 128; 194 180 pref_src_mode = Pref_LoadStreamed; 195 - pref_dst_mode = Pref_PrepareForStore; 181 + if (cpu_has_mips_r6) 182 + /* 183 + * Bit 30 (Pref_PrepareForStore) has been 184 + * removed from MIPS R6. Use bit 5 185 + * (Pref_StoreStreamed). 186 + */ 187 + pref_dst_mode = Pref_StoreStreamed; 188 + else 189 + pref_dst_mode = Pref_PrepareForStore; 196 190 break; 197 191 } 198 192 } else { ··· 236 214 return; 237 215 238 216 if (pref_bias_clear_store) { 239 - uasm_i_pref(buf, pref_dst_mode, pref_bias_clear_store + off, 217 + _uasm_i_pref(buf, pref_dst_mode, pref_bias_clear_store + off, 240 218 A0); 241 219 } else if (cache_line_size == (half_clear_loop_size << 1)) { 242 220 if (cpu_has_cache_cdex_s) { ··· 379 357 return; 380 358 381 359 if (pref_bias_copy_load) 382 - uasm_i_pref(buf, pref_src_mode, pref_bias_copy_load + off, A1); 360 + _uasm_i_pref(buf, pref_src_mode, pref_bias_copy_load + off, A1); 383 361 } 384 362 385 363 static inline void build_copy_store_pref(u32 **buf, int off) ··· 388 366 return; 389 367 390 368 if (pref_bias_copy_store) { 391 - uasm_i_pref(buf, pref_dst_mode, pref_bias_copy_store + off, 369 + _uasm_i_pref(buf, pref_dst_mode, pref_bias_copy_store + off, 392 370 A0); 393 371 } else if (cache_line_size == (half_copy_loop_size << 1)) { 394 372 if (cpu_has_cache_cdex_s) {
+3 -1
arch/mips/mm/sc-mips.c
··· 81 81 case CPU_PROAPTIV: 82 82 case CPU_P5600: 83 83 case CPU_BMIPS5000: 84 + case CPU_QEMU_GENERIC: 84 85 if (config2 & (1 << 12)) 85 86 return 0; 86 87 } ··· 105 104 106 105 /* Ignore anything but MIPSxx processors */ 107 106 if (!(c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 | 108 - MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2))) 107 + MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R1 | 108 + MIPS_CPU_ISA_M64R2 | MIPS_CPU_ISA_M64R6))) 109 109 return 0; 110 110 111 111 /* Does this MIPS32/MIPS64 CPU have a config2 register? */
+3 -5
arch/mips/mm/tlb-r4k.c
··· 485 485 * Enable the no read, no exec bits, and enable large virtual 486 486 * address. 487 487 */ 488 - u32 pg = PG_RIE | PG_XIE; 489 488 #ifdef CONFIG_64BIT 490 - pg |= PG_ELPA; 489 + set_c0_pagegrain(PG_RIE | PG_XIE | PG_ELPA); 490 + #else 491 + set_c0_pagegrain(PG_RIE | PG_XIE); 491 492 #endif 492 - if (cpu_has_rixiex) 493 - pg |= PG_IEC; 494 - write_c0_pagegrain(pg); 495 493 } 496 494 497 495 temp_tlb_entry = current_cpu_data.tlbsize - 1;
+4 -3
arch/mips/mm/tlbex.c
··· 501 501 case tlb_indexed: tlbw = uasm_i_tlbwi; break; 502 502 } 503 503 504 - if (cpu_has_mips_r2) { 504 + if (cpu_has_mips_r2_exec_hazard) { 505 505 /* 506 506 * The architecture spec says an ehb is required here, 507 507 * but a number of cores do not have the hazard and ··· 514 514 case CPU_PROAPTIV: 515 515 case CPU_P5600: 516 516 case CPU_M5150: 517 + case CPU_QEMU_GENERIC: 517 518 break; 518 519 519 520 default: ··· 1953 1952 1954 1953 switch (current_cpu_type()) { 1955 1954 default: 1956 - if (cpu_has_mips_r2) { 1955 + if (cpu_has_mips_r2_exec_hazard) { 1957 1956 uasm_i_ehb(&p); 1958 1957 1959 1958 case CPU_CAVIUM_OCTEON: ··· 2020 2019 2021 2020 switch (current_cpu_type()) { 2022 2021 default: 2023 - if (cpu_has_mips_r2) { 2022 + if (cpu_has_mips_r2_exec_hazard) { 2024 2023 uasm_i_ehb(&p); 2025 2024 2026 2025 case CPU_CAVIUM_OCTEON:
-8
arch/mips/mm/uasm-micromips.c
··· 38 38 | (e) << RE_SH \ 39 39 | (f) << FUNC_SH) 40 40 41 - /* Define these when we are not the ISA the kernel is being compiled with. */ 42 - #ifndef CONFIG_CPU_MICROMIPS 43 - #define MM_uasm_i_b(buf, off) ISAOPC(_beq)(buf, 0, 0, off) 44 - #define MM_uasm_i_beqz(buf, rs, off) ISAOPC(_beq)(buf, rs, 0, off) 45 - #define MM_uasm_i_beqzl(buf, rs, off) ISAOPC(_beql)(buf, rs, 0, off) 46 - #define MM_uasm_i_bnez(buf, rs, off) ISAOPC(_bne)(buf, rs, 0, off) 47 - #endif 48 - 49 41 #include "uasm.c" 50 42 51 43 static struct insn insn_table_MM[] = {
+31 -7
arch/mips/mm/uasm-mips.c
··· 38 38 | (e) << RE_SH \ 39 39 | (f) << FUNC_SH) 40 40 41 - /* Define these when we are not the ISA the kernel is being compiled with. */ 42 - #ifdef CONFIG_CPU_MICROMIPS 43 - #define CL_uasm_i_b(buf, off) ISAOPC(_beq)(buf, 0, 0, off) 44 - #define CL_uasm_i_beqz(buf, rs, off) ISAOPC(_beq)(buf, rs, 0, off) 45 - #define CL_uasm_i_beqzl(buf, rs, off) ISAOPC(_beql)(buf, rs, 0, off) 46 - #define CL_uasm_i_bnez(buf, rs, off) ISAOPC(_bne)(buf, rs, 0, off) 47 - #endif 41 + /* This macro sets the non-variable bits of an R6 instruction. */ 42 + #define M6(a, b, c, d, e) \ 43 + ((a) << OP_SH \ 44 + | (b) << RS_SH \ 45 + | (c) << RT_SH \ 46 + | (d) << SIMM9_SH \ 47 + | (e) << FUNC_SH) 48 48 49 49 #include "uasm.c" 50 50 ··· 62 62 { insn_bltzl, M(bcond_op, 0, bltzl_op, 0, 0, 0), RS | BIMM }, 63 63 { insn_bltz, M(bcond_op, 0, bltz_op, 0, 0, 0), RS | BIMM }, 64 64 { insn_bne, M(bne_op, 0, 0, 0, 0, 0), RS | RT | BIMM }, 65 + #ifndef CONFIG_CPU_MIPSR6 65 66 { insn_cache, M(cache_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 67 + #else 68 + { insn_cache, M6(cache_op, 0, 0, 0, cache6_op), RS | RT | SIMM9 }, 69 + #endif 66 70 { insn_daddiu, M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 67 71 { insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD }, 68 72 { insn_dinsm, M(spec3_op, 0, 0, 0, 0, dinsm_op), RS | RT | RD | RE }, ··· 89 85 { insn_jal, M(jal_op, 0, 0, 0, 0, 0), JIMM }, 90 86 { insn_jalr, M(spec_op, 0, 0, 0, 0, jalr_op), RS | RD }, 91 87 { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM }, 88 + #ifndef CONFIG_CPU_MIPSR6 92 89 { insn_jr, M(spec_op, 0, 0, 0, 0, jr_op), RS }, 90 + #else 91 + { insn_jr, M(spec_op, 0, 0, 0, 0, jalr_op), RS }, 92 + #endif 93 93 { insn_lb, M(lb_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 94 94 { insn_ld, M(ld_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 95 95 { insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD }, 96 96 { insn_lh, M(lh_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 97 + #ifndef CONFIG_CPU_MIPSR6 97 98 { insn_lld, M(lld_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 98 99 { insn_ll, M(ll_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 100 + #else 101 + { insn_lld, M6(spec3_op, 0, 0, 0, lld6_op), RS | RT | SIMM9 }, 102 + { insn_ll, M6(spec3_op, 0, 0, 0, ll6_op), RS | RT | SIMM9 }, 103 + #endif 99 104 { insn_lui, M(lui_op, 0, 0, 0, 0, 0), RT | SIMM }, 100 105 { insn_lw, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 101 106 { insn_lwx, M(spec3_op, 0, 0, 0, lwx_op, lx_op), RS | RT | RD }, ··· 117 104 { insn_mul, M(spec2_op, 0, 0, 0, 0, mul_op), RS | RT | RD}, 118 105 { insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, 119 106 { insn_or, M(spec_op, 0, 0, 0, 0, or_op), RS | RT | RD }, 107 + #ifndef CONFIG_CPU_MIPSR6 120 108 { insn_pref, M(pref_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 109 + #else 110 + { insn_pref, M6(spec3_op, 0, 0, 0, pref6_op), RS | RT | SIMM9 }, 111 + #endif 121 112 { insn_rfe, M(cop0_op, cop_op, 0, 0, 0, rfe_op), 0 }, 122 113 { insn_rotr, M(spec_op, 1, 0, 0, 0, srl_op), RT | RD | RE }, 114 + #ifndef CONFIG_CPU_MIPSR6 123 115 { insn_scd, M(scd_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 124 116 { insn_sc, M(sc_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 117 + #else 118 + { insn_scd, M6(spec3_op, 0, 0, 0, scd6_op), RS | RT | SIMM9 }, 119 + { insn_sc, M6(spec3_op, 0, 0, 0, sc6_op), RS | RT | SIMM9 }, 120 + #endif 125 121 { insn_sd, M(sd_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 126 122 { insn_sll, M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE }, 127 123 { insn_sllv, M(spec_op, 0, 0, 0, 0, sllv_op), RS | RT | RD }, ··· 220 198 op |= build_set(va_arg(ap, u32)); 221 199 if (ip->fields & SCIMM) 222 200 op |= build_scimm(va_arg(ap, u32)); 201 + if (ip->fields & SIMM9) 202 + op |= build_scimm9(va_arg(ap, u32)); 223 203 va_end(ap); 224 204 225 205 **buf = op;
+13 -2
arch/mips/mm/uasm.c
··· 24 24 JIMM = 0x080, 25 25 FUNC = 0x100, 26 26 SET = 0x200, 27 - SCIMM = 0x400 27 + SCIMM = 0x400, 28 + SIMM9 = 0x800, 28 29 }; 29 30 30 31 #define OP_MASK 0x3f ··· 42 41 #define FUNC_SH 0 43 42 #define SET_MASK 0x7 44 43 #define SET_SH 0 44 + #define SIMM9_SH 7 45 + #define SIMM9_MASK 0x1ff 45 46 46 47 enum opcode { 47 48 insn_invalid, ··· 117 114 KERN_WARNING "Micro-assembler field overflow\n"); 118 115 119 116 return (arg & SCIMM_MASK) << SCIMM_SH; 117 + } 118 + 119 + static inline u32 build_scimm9(s32 arg) 120 + { 121 + WARN((arg > 0xff || arg < -0x100), 122 + KERN_WARNING "Micro-assembler field overflow\n"); 123 + 124 + return (arg & SIMM9_MASK) << SIMM9_SH; 120 125 } 121 126 122 127 static inline u32 build_func(u32 arg) ··· 341 330 void ISAFUNC(uasm_i_pref)(u32 **buf, unsigned int a, signed int b, 342 331 unsigned int c) 343 332 { 344 - if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X) && a <= 24 && a != 5) 333 + if (CAVIUM_OCTEON_DCACHE_PREFETCH_WAR && a <= 24 && a != 5) 345 334 /* 346 335 * As per erratum Core-14449, replace prefetches 0-4, 347 336 * 6-24 with 'pref 28'.
+1 -1
arch/mips/mti-sead3/sead3-time.c
··· 72 72 int get_c0_perfcount_int(void) 73 73 { 74 74 if (gic_present) 75 - return gic_get_c0_compare_int(); 75 + return gic_get_c0_perfcount_int(); 76 76 if (cp0_perfcount_irq >= 0) 77 77 return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq; 78 78 return -1;
+2 -2
arch/mips/pci/pci-bcm1480.c
··· 173 173 } 174 174 175 175 struct pci_ops bcm1480_pci_ops = { 176 - .read = bcm1480_pcibios_read, 177 - .write = bcm1480_pcibios_write, 176 + .read = bcm1480_pcibios_read, 177 + .write = bcm1480_pcibios_write, 178 178 }; 179 179 180 180 static struct resource bcm1480_mem_resource = {
+2 -2
arch/mips/pci/pci-octeon.c
··· 327 327 328 328 329 329 static struct pci_ops octeon_pci_ops = { 330 - .read = octeon_read_config, 331 - .write = octeon_write_config, 330 + .read = octeon_read_config, 331 + .write = octeon_write_config, 332 332 }; 333 333 334 334 static struct resource octeon_pci_mem_resource = {
+6 -6
arch/mips/pci/pcie-octeon.c
··· 1792 1792 } 1793 1793 1794 1794 static struct pci_ops octeon_pcie0_ops = { 1795 - .read = octeon_pcie0_read_config, 1796 - .write = octeon_pcie0_write_config, 1795 + .read = octeon_pcie0_read_config, 1796 + .write = octeon_pcie0_write_config, 1797 1797 }; 1798 1798 1799 1799 static struct resource octeon_pcie0_mem_resource = { ··· 1813 1813 }; 1814 1814 1815 1815 static struct pci_ops octeon_pcie1_ops = { 1816 - .read = octeon_pcie1_read_config, 1817 - .write = octeon_pcie1_write_config, 1816 + .read = octeon_pcie1_read_config, 1817 + .write = octeon_pcie1_write_config, 1818 1818 }; 1819 1819 1820 1820 static struct resource octeon_pcie1_mem_resource = { ··· 1834 1834 }; 1835 1835 1836 1836 static struct pci_ops octeon_dummy_ops = { 1837 - .read = octeon_dummy_read_config, 1838 - .write = octeon_dummy_write_config, 1837 + .read = octeon_dummy_read_config, 1838 + .write = octeon_dummy_write_config, 1839 1839 }; 1840 1840 1841 1841 static struct resource octeon_dummy_mem_resource = {
-24
arch/mips/sgi-ip22/ip22-gio.c
··· 152 152 return 0; 153 153 } 154 154 155 - static int gio_device_suspend(struct device *dev, pm_message_t state) 156 - { 157 - struct gio_device *gio_dev = to_gio_device(dev); 158 - struct gio_driver *drv = to_gio_driver(dev->driver); 159 - int error = 0; 160 - 161 - if (dev->driver && drv->suspend) 162 - error = drv->suspend(gio_dev, state); 163 - return error; 164 - } 165 - 166 - static int gio_device_resume(struct device *dev) 167 - { 168 - struct gio_device *gio_dev = to_gio_device(dev); 169 - struct gio_driver *drv = to_gio_driver(dev->driver); 170 - int error = 0; 171 - 172 - if (dev->driver && drv->resume) 173 - error = drv->resume(gio_dev); 174 - return error; 175 - } 176 - 177 155 static void gio_device_shutdown(struct device *dev) 178 156 { 179 157 struct gio_device *gio_dev = to_gio_device(dev); ··· 378 400 .match = gio_bus_match, 379 401 .probe = gio_device_probe, 380 402 .remove = gio_device_remove, 381 - .suspend = gio_device_suspend, 382 - .resume = gio_device_resume, 383 403 .shutdown = gio_device_shutdown, 384 404 .uevent = gio_device_uevent, 385 405 };
+4 -3
arch/mips/sgi-ip27/ip27-reset.c
··· 8 8 * Copyright (C) 1997, 1998, 1999, 2000, 06 by Ralf Baechle 9 9 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 10 10 */ 11 + #include <linux/compiler.h> 11 12 #include <linux/kernel.h> 12 13 #include <linux/sched.h> 13 14 #include <linux/timer.h> ··· 26 25 #include <asm/sn/gda.h> 27 26 #include <asm/sn/sn0/hub.h> 28 27 29 - void machine_restart(char *command) __attribute__((noreturn)); 30 - void machine_halt(void) __attribute__((noreturn)); 31 - void machine_power_off(void) __attribute__((noreturn)); 28 + void machine_restart(char *command) __noreturn; 29 + void machine_halt(void) __noreturn; 30 + void machine_power_off(void) __noreturn; 32 31 33 32 #define noreturn while(1); /* Silence gcc. */ 34 33
+4 -3
arch/mips/sgi-ip32/ip32-reset.c
··· 8 8 * Copyright (C) 2003 Guido Guenther <agx@sigxcpu.org> 9 9 */ 10 10 11 + #include <linux/compiler.h> 11 12 #include <linux/init.h> 12 13 #include <linux/kernel.h> 13 14 #include <linux/sched.h> ··· 36 35 static struct timer_list power_timer, blink_timer, debounce_timer; 37 36 static int has_panicked, shuting_down; 38 37 39 - static void ip32_machine_restart(char *command) __attribute__((noreturn)); 40 - static void ip32_machine_halt(void) __attribute__((noreturn)); 41 - static void ip32_machine_power_off(void) __attribute__((noreturn)); 38 + static void ip32_machine_restart(char *command) __noreturn; 39 + static void ip32_machine_halt(void) __noreturn; 40 + static void ip32_machine_power_off(void) __noreturn; 42 41 43 42 static void ip32_machine_restart(char *cmd) 44 43 {
-8
drivers/irqchip/irq-mips-gic.c
··· 192 192 } 193 193 } 194 194 195 - unsigned int gic_get_timer_pending(void) 196 - { 197 - unsigned int vpe_pending; 198 - 199 - vpe_pending = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_PEND)); 200 - return vpe_pending & GIC_VPE_PEND_TIMER_MSK; 201 - } 202 - 203 195 static void gic_bind_eic_interrupt(int irq, int set) 204 196 { 205 197 /* Convert irq vector # to hw int # */
-1
include/linux/irqchip/mips-gic.h
··· 243 243 extern void gic_send_ipi(unsigned int intr); 244 244 extern unsigned int plat_ipi_call_int_xlate(unsigned int); 245 245 extern unsigned int plat_ipi_resched_int_xlate(unsigned int); 246 - extern unsigned int gic_get_timer_pending(void); 247 246 extern int gic_get_c0_compare_int(void); 248 247 extern int gic_get_c0_perfcount_int(void); 249 248 #endif /* __LINUX_IRQCHIP_MIPS_GIC_H */
+5
include/uapi/linux/prctl.h
··· 185 185 #define PR_MPX_ENABLE_MANAGEMENT 43 186 186 #define PR_MPX_DISABLE_MANAGEMENT 44 187 187 188 + #define PR_SET_FP_MODE 45 189 + #define PR_GET_FP_MODE 46 190 + # define PR_FP_MODE_FR (1 << 0) /* 64b FP registers */ 191 + # define PR_FP_MODE_FRE (1 << 1) /* 32b compatibility */ 192 + 188 193 #endif /* _LINUX_PRCTL_H */
+12
kernel/sys.c
··· 97 97 #ifndef MPX_DISABLE_MANAGEMENT 98 98 # define MPX_DISABLE_MANAGEMENT(a) (-EINVAL) 99 99 #endif 100 + #ifndef GET_FP_MODE 101 + # define GET_FP_MODE(a) (-EINVAL) 102 + #endif 103 + #ifndef SET_FP_MODE 104 + # define SET_FP_MODE(a,b) (-EINVAL) 105 + #endif 100 106 101 107 /* 102 108 * this is where the system-wide overflow UID and GID are defined, for ··· 2224 2218 if (arg2 || arg3 || arg4 || arg5) 2225 2219 return -EINVAL; 2226 2220 error = MPX_DISABLE_MANAGEMENT(me); 2221 + break; 2222 + case PR_SET_FP_MODE: 2223 + error = SET_FP_MODE(me, arg2); 2224 + break; 2225 + case PR_GET_FP_MODE: 2226 + error = GET_FP_MODE(me); 2227 2227 break; 2228 2228 default: 2229 2229 error = -EINVAL;