Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'mips_6.9' of git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux

Pull MIPS updates from Thomas Bogendoerfer:

- added support for Mobileye SoCs

- unified GPR/CP0 regs handling for uasm

- cleanups and fixes

* tag 'mips_6.9' of git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux: (56 commits)
mips: cm: Convert __mips_cm_phys_base() to weak function
mips: cm: Convert __mips_cm_l2sync_phys_base() to weak function
mips: dts: ralink: mt7621: add cell count properties to usb
mips: dts: ralink: mt7621: add serial1 and serial2 nodes
mips: dts: ralink: mt7621: reorder serial0 properties
mips: dts: ralink: mt7621: associate uart1_pins with serial0
MIPS: ralink: Don't use "proxy" headers
mips: sibyte: make tb_class constant
mips: mt: make mt_class constant
MIPS: ralink: Remove unused of_gpio.h
bus: bt1-apb: Remove duplicate include
MAINTAINERS: remove entry to non-existing file in MOBILEYE MIPS SOCS
MIPS: mipsregs: Parse fp and sp register by name in parse_r
tty: mips_ejtag_fdc: Fix passing incompatible pointer type warning
mips: zboot: Fix "no previous prototype" build warning
MIPS: mipsregs: Set proper ISA level for virt extensions
MIPS: Implement microMIPS MT ASE helpers
MIPS: Limit MIPS_MT_SMP support by ISA reversion
MIPS: Loongson64: test for -march=loongson3a cflag
MIPS: BMIPS: Drop unnecessary assembler flag
...

+2014 -925
+8 -7
Documentation/devicetree/bindings/mips/cpus.yaml
··· 23 23 - brcm,bmips4380 24 24 - brcm,bmips5000 25 25 - brcm,bmips5200 26 - - ingenic,xburst-mxu1.0 26 + - img,i6500 27 27 - ingenic,xburst-fpu1.0-mxu1.1 28 28 - ingenic,xburst-fpu2.0-mxu2.0 29 + - ingenic,xburst-mxu1.0 29 30 - ingenic,xburst2-fpu2.1-mxu2.1-smt 30 31 - loongson,gs264 31 32 - mips,m14Kc 32 - - mips,mips4Kc 33 - - mips,mips4KEc 34 - - mips,mips24Kc 35 - - mips,mips24KEc 36 - - mips,mips74Kc 37 33 - mips,mips1004Kc 34 + - mips,mips24KEc 35 + - mips,mips24Kc 36 + - mips,mips4KEc 37 + - mips,mips4Kc 38 + - mips,mips74Kc 38 39 - mti,interaptiv 39 - - mti,mips24KEc 40 40 - mti,mips14KEc 41 41 - mti,mips14Kc 42 + - mti,mips24KEc 42 43 43 44 reg: 44 45 maxItems: 1
+32
Documentation/devicetree/bindings/mips/mobileye.yaml
··· 1 + # SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause 2 + # Copyright 2023 Mobileye Vision Technologies Ltd. 3 + %YAML 1.2 4 + --- 5 + $id: http://devicetree.org/schemas/mips/mobileye.yaml# 6 + $schema: http://devicetree.org/meta-schemas/core.yaml# 7 + 8 + title: Mobileye SoC series 9 + 10 + maintainers: 11 + - Vladimir Kondratiev <vladimir.kondratiev@intel.com> 12 + - Gregory CLEMENT <gregory.clement@bootlin.com> 13 + - Théo Lebrun <theo.lebrun@bootlin.com> 14 + 15 + description: 16 + Boards with a Mobileye SoC shall have the following properties. 17 + 18 + properties: 19 + $nodename: 20 + const: '/' 21 + 22 + compatible: 23 + oneOf: 24 + - description: Boards with Mobileye EyeQ5 SoC 25 + items: 26 + - enum: 27 + - mobileye,eyeq5-epm5 28 + - const: mobileye,eyeq5 29 + 30 + additionalProperties: true 31 + 32 + ...
+2
Documentation/devicetree/bindings/vendor-prefixes.yaml
··· 941 941 description: Miyoo 942 942 "^mntre,.*": 943 943 description: MNT Research GmbH 944 + "^mobileye,.*": 945 + description: Mobileye Vision Technologies Ltd. 944 946 "^modtronix,.*": 945 947 description: Modtronix Engineering 946 948 "^moortec,.*":
+22
MAINTAINERS
··· 14741 14741 F: drivers/platform/mips/ 14742 14742 F: include/dt-bindings/mips/ 14743 14743 14744 + MIPS BAIKAL-T1 PLATFORM 14745 + M: Serge Semin <fancer.lancer@gmail.com> 14746 + L: linux-mips@vger.kernel.org 14747 + S: Supported 14748 + F: Documentation/devicetree/bindings/bus/baikal,bt1-*.yaml 14749 + F: Documentation/devicetree/bindings/clock/baikal,bt1-*.yaml 14750 + F: drivers/bus/bt1-*.c 14751 + F: drivers/clk/baikal-t1/ 14752 + F: drivers/memory/bt1-l2-ctl.c 14753 + F: drivers/mtd/maps/physmap-bt1-rom.[ch] 14754 + 14744 14755 MIPS BOSTON DEVELOPMENT BOARD 14745 14756 M: Paul Burton <paulburton@kernel.org> 14746 14757 L: linux-mips@vger.kernel.org ··· 14871 14860 W: https://linuxtv.org 14872 14861 Q: http://patchwork.linuxtv.org/project/linux-media/list/ 14873 14862 F: drivers/media/dvb-frontends/mn88473* 14863 + 14864 + MOBILEYE MIPS SOCS 14865 + M: Vladimir Kondratiev <vladimir.kondratiev@mobileye.com> 14866 + M: Gregory CLEMENT <gregory.clement@bootlin.com> 14867 + M: Théo Lebrun <theo.lebrun@bootlin.com> 14868 + L: linux-mips@vger.kernel.org 14869 + S: Maintained 14870 + F: Documentation/devicetree/bindings/mips/mobileye.yaml 14871 + F: arch/mips/boot/dts/mobileye/ 14872 + F: arch/mips/configs/eyeq5_defconfig 14873 + F: arch/mips/mobileye/board-epm5.its.S 14874 14874 14875 14875 MODULE SUPPORT 14876 14876 M: Luis Chamberlain <mcgrof@kernel.org>
+1
arch/mips/Kbuild
··· 11 11 # mips object files 12 12 # The object files are linked as core-y files would be linked 13 13 14 + obj-y += generic/ 14 15 obj-y += kernel/ 15 16 obj-y += mm/ 16 17 obj-y += net/
+1
arch/mips/Kbuild.platforms
··· 17 17 platform-$(CONFIG_MACH_LOONGSON32) += loongson32/ 18 18 platform-$(CONFIG_MACH_LOONGSON64) += loongson64/ 19 19 platform-$(CONFIG_MIPS_MALTA) += mti-malta/ 20 + platform-$(CONFIG_MACH_EYEQ5) += mobileye/ 20 21 platform-$(CONFIG_MACH_NINTENDO64) += n64/ 21 22 platform-$(CONFIG_PIC32MZDA) += pic32/ 22 23 platform-$(CONFIG_RALINK) += ralink/
+99 -39
arch/mips/Kconfig
··· 113 113 config MIPS_GENERIC 114 114 bool 115 115 116 + config MACH_GENERIC_CORE 117 + bool 118 + 116 119 config MACH_INGENIC 117 120 bool 118 121 select SYS_SUPPORTS_32BIT_KERNEL ··· 152 149 select DMA_NONCOHERENT 153 150 select HAVE_PCI 154 151 select IRQ_MIPS_CPU 152 + select MACH_GENERIC_CORE 155 153 select MIPS_AUTO_PFN_OFFSET 156 154 select MIPS_CPU_SCACHE 157 155 select MIPS_GIC ··· 421 417 bool "Ingenic SoC based machines" 422 418 select MIPS_GENERIC 423 419 select MACH_INGENIC 420 + select MACH_GENERIC_CORE 424 421 select SYS_SUPPORTS_ZBOOT_UART16550 425 422 select CPU_SUPPORTS_CPUFREQ 426 423 select MIPS_EXTERNAL_TIMER ··· 575 570 Microchip PIC32 is a family of general-purpose 32 bit MIPS core 576 571 microcontrollers. 577 572 573 + config MACH_EYEQ5 574 + bool "Mobileye EyeQ5 SoC" 575 + select MACH_GENERIC_CORE 576 + select ARM_AMBA 577 + select PHYSICAL_START_BOOL 578 + select ARCH_SPARSEMEM_DEFAULT if 64BIT 579 + select BOOT_RAW 580 + select BUILTIN_DTB 581 + select CEVT_R4K 582 + select CLKSRC_MIPS_GIC 583 + select COMMON_CLK 584 + select CPU_MIPSR2_IRQ_EI 585 + select CPU_MIPSR2_IRQ_VI 586 + select CSRC_R4K 587 + select DMA_NONCOHERENT 588 + select HAVE_PCI 589 + select IRQ_MIPS_CPU 590 + select MIPS_AUTO_PFN_OFFSET 591 + select MIPS_CPU_SCACHE 592 + select MIPS_GIC 593 + select MIPS_L1_CACHE_SHIFT_7 594 + select PCI_DRIVERS_GENERIC 595 + select SMP_UP if SMP 596 + select SWAP_IO_SPACE 597 + select SYS_HAS_CPU_MIPS64_R6 598 + select SYS_SUPPORTS_64BIT_KERNEL 599 + select SYS_SUPPORTS_HIGHMEM 600 + select SYS_SUPPORTS_LITTLE_ENDIAN 601 + select SYS_SUPPORTS_MIPS_CPS 602 + select SYS_SUPPORTS_RELOCATABLE 603 + select SYS_SUPPORTS_ZBOOT 604 + select UHI_BOOT 605 + select USB_EHCI_BIG_ENDIAN_DESC if CPU_BIG_ENDIAN 606 + select USB_EHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN 607 + select USB_OHCI_BIG_ENDIAN_DESC if CPU_BIG_ENDIAN 608 + select USB_OHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN 609 + select USB_UHCI_BIG_ENDIAN_DESC if CPU_BIG_ENDIAN 610 + select USB_UHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN 611 + select USE_OF 612 + help 613 + Select this to build a kernel supporting EyeQ5 SoC from Mobileye. 614 + 615 + bool 616 + 617 + config FIT_IMAGE_FDT_EPM5 618 + bool "Include FDT for Mobileye EyeQ5 development platforms" 619 + depends on MACH_EYEQ5 620 + default n 621 + help 622 + Enable this to include the FDT for the EyeQ5 development platforms 623 + from Mobileye in the FIT kernel image. 624 + This requires u-boot on the platform. 625 + 578 626 config MACH_NINTENDO64 579 627 bool "Nintendo 64 console" 580 628 select CEVT_R4K ··· 661 603 config MACH_REALTEK_RTL 662 604 bool "Realtek RTL838x/RTL839x based machines" 663 605 select MIPS_GENERIC 606 + select MACH_GENERIC_CORE 664 607 select DMA_NONCOHERENT 665 608 select IRQ_MIPS_CPU 666 609 select CSRC_R4K ··· 1332 1273 3B1000, 3B1500, 3A2000, 3A3000 and 3A4000) processors. However, old 1333 1274 Loongson-2E/2F is not covered here and will be removed in future. 1334 1275 1335 - config LOONGSON3_ENHANCEMENT 1336 - bool "New Loongson-3 CPU Enhancements" 1337 - default n 1338 - depends on CPU_LOONGSON64 1339 - help 1340 - New Loongson-3 cores (since Loongson-3A R2, as opposed to Loongson-3A 1341 - R1, Loongson-3B R1 and Loongson-3B R2) has many enhancements, such as 1342 - FTLB, L1-VCache, EI/DI/Wait/Prefetch instruction, DSP/DSPr2 ASE, User 1343 - Local register, Read-Inhibit/Execute-Inhibit, SFB (Store Fill Buffer), 1344 - Fast TLB refill support, etc. 1345 - 1346 - This option enable those enhancements which are not probed at run 1347 - time. If you want a generic kernel to run on all Loongson 3 machines, 1348 - please say 'N' here. If you want a high-performance kernel to run on 1349 - new Loongson-3 machines only, please say 'Y' here. 1350 - 1351 - config CPU_LOONGSON3_WORKAROUNDS 1352 - bool "Loongson-3 LLSC Workarounds" 1353 - default y if SMP 1354 - depends on CPU_LOONGSON64 1355 - help 1356 - Loongson-3 processors have the llsc issues which require workarounds. 1357 - Without workarounds the system may hang unexpectedly. 1358 - 1359 - Say Y, unless you know what you are doing. 1360 - 1361 - config CPU_LOONGSON3_CPUCFG_EMULATION 1362 - bool "Emulate the CPUCFG instruction on older Loongson cores" 1363 - default y 1364 - depends on CPU_LOONGSON64 1365 - help 1366 - Loongson-3A R4 and newer have the CPUCFG instruction available for 1367 - userland to query CPU capabilities, much like CPUID on x86. This 1368 - option provides emulation of the instruction on older Loongson 1369 - cores, back to Loongson-3A1000. 1370 - 1371 - If unsure, please say Y. 1372 - 1373 1276 config CPU_LOONGSON2E 1374 1277 bool "Loongson 2E" 1375 1278 depends on SYS_HAS_CPU_LOONGSON2E ··· 1670 1649 Support for BMIPS32/3300/4350/4380 and BMIPS5000 processors. 1671 1650 1672 1651 endchoice 1652 + 1653 + config LOONGSON3_ENHANCEMENT 1654 + bool "New Loongson-3 CPU Enhancements" 1655 + default n 1656 + depends on CPU_LOONGSON64 1657 + help 1658 + New Loongson-3 cores (since Loongson-3A R2, as opposed to Loongson-3A 1659 + R1, Loongson-3B R1 and Loongson-3B R2) has many enhancements, such as 1660 + FTLB, L1-VCache, EI/DI/Wait/Prefetch instruction, DSP/DSPr2 ASE, User 1661 + Local register, Read-Inhibit/Execute-Inhibit, SFB (Store Fill Buffer), 1662 + Fast TLB refill support, etc. 1663 + 1664 + This option enable those enhancements which are not probed at run 1665 + time. If you want a generic kernel to run on all Loongson 3 machines, 1666 + please say 'N' here. If you want a high-performance kernel to run on 1667 + new Loongson-3 machines only, please say 'Y' here. 1668 + 1669 + config CPU_LOONGSON3_WORKAROUNDS 1670 + bool "Loongson-3 LLSC Workarounds" 1671 + default y if SMP 1672 + depends on CPU_LOONGSON64 1673 + help 1674 + Loongson-3 processors have the llsc issues which require workarounds. 1675 + Without workarounds the system may hang unexpectedly. 1676 + 1677 + Say Y, unless you know what you are doing. 1678 + 1679 + config CPU_LOONGSON3_CPUCFG_EMULATION 1680 + bool "Emulate the CPUCFG instruction on older Loongson cores" 1681 + default y 1682 + depends on CPU_LOONGSON64 1683 + help 1684 + Loongson-3A R4 and newer have the CPUCFG instruction available for 1685 + userland to query CPU capabilities, much like CPUID on x86. This 1686 + option provides emulation of the instruction on older Loongson 1687 + cores, back to Loongson-3A1000. 1688 + 1689 + If unsure, please say Y. 1673 1690 1674 1691 config CPU_MIPS32_3_5_FEATURES 1675 1692 bool "MIPS32 Release 3.5 Features" ··· 2183 2124 config MIPS_MT_SMP 2184 2125 bool "MIPS MT SMP support (1 TC on each available VPE)" 2185 2126 default y 2186 - depends on SYS_SUPPORTS_MULTITHREADING && !CPU_MIPSR6 && !CPU_MICROMIPS 2127 + depends on TARGET_ISA_REV > 0 && TARGET_ISA_REV < 6 2128 + depends on SYS_SUPPORTS_MULTITHREADING && !CPU_MICROMIPS 2187 2129 select CPU_MIPSR2_IRQ_VI 2188 2130 select CPU_MIPSR2_IRQ_EI 2189 2131 select SYNC_R4K
+22 -24
arch/mips/Makefile
··· 148 148 # 149 149 # CPU-dependent compiler/assembler options for optimization. 150 150 # 151 - cflags-$(CONFIG_CPU_R3000) += -march=r3000 152 - cflags-$(CONFIG_CPU_R4300) += -march=r4300 -Wa,--trap 153 - cflags-$(CONFIG_CPU_R4X00) += -march=r4600 -Wa,--trap 154 - cflags-$(CONFIG_CPU_TX49XX) += -march=r4600 -Wa,--trap 151 + cflags-$(CONFIG_CPU_R3000) += $(call cc-option,-march=r3000,-march=mips1) 152 + cflags-$(CONFIG_CPU_R4300) += $(call cc-option,-march=r4300,-march=mips3) -Wa,--trap 153 + cflags-$(CONFIG_CPU_R4X00) += $(call cc-option,-march=r4600,-march=mips3) -Wa,--trap 154 + cflags-$(CONFIG_CPU_TX49XX) += $(call cc-option,-march=r4600,-march=mips3) -Wa,--trap 155 155 cflags-$(CONFIG_CPU_MIPS32_R1) += -march=mips32 -Wa,--trap 156 156 cflags-$(CONFIG_CPU_MIPS32_R2) += -march=mips32r2 -Wa,--trap 157 157 cflags-$(CONFIG_CPU_MIPS32_R5) += -march=mips32r5 -Wa,--trap -modd-spreg ··· 160 160 cflags-$(CONFIG_CPU_MIPS64_R2) += -march=mips64r2 -Wa,--trap 161 161 cflags-$(CONFIG_CPU_MIPS64_R5) += -march=mips64r5 -Wa,--trap 162 162 cflags-$(CONFIG_CPU_MIPS64_R6) += -march=mips64r6 -Wa,--trap 163 - cflags-$(CONFIG_CPU_P5600) += -march=p5600 -Wa,--trap -modd-spreg 164 - cflags-$(CONFIG_CPU_R5000) += -march=r5000 -Wa,--trap 165 - cflags-$(CONFIG_CPU_R5500) += $(call cc-option,-march=r5500,-march=r5000) \ 163 + cflags-$(CONFIG_CPU_P5600) += $(call cc-option,-march=p5600,-march=mips32r5) \ 164 + -Wa,--trap -modd-spreg 165 + cflags-$(CONFIG_CPU_R5000) += $(call cc-option,-march=r5000,-march=mips4) \ 166 166 -Wa,--trap 167 - cflags-$(CONFIG_CPU_NEVADA) += $(call cc-option,-march=rm5200,-march=r5000) \ 167 + cflags-$(CONFIG_CPU_R5500) += $(call cc-option,-march=r5500,-march=mips4) \ 168 168 -Wa,--trap 169 - cflags-$(CONFIG_CPU_RM7000) += $(call cc-option,-march=rm7000,-march=r5000) \ 169 + cflags-$(CONFIG_CPU_NEVADA) += $(call cc-option,-march=rm5200,-march=mips4) \ 170 170 -Wa,--trap 171 - cflags-$(CONFIG_CPU_SB1) += $(call cc-option,-march=sb1,-march=r5000) \ 171 + cflags-$(CONFIG_CPU_RM7000) += $(call cc-option,-march=rm7000,-march=mips4) \ 172 + -Wa,--trap 173 + cflags-$(CONFIG_CPU_SB1) += $(call cc-option,-march=sb1,-march=mips64r1) \ 172 174 -Wa,--trap 173 175 cflags-$(CONFIG_CPU_SB1) += $(call cc-option,-mno-mdmx) 174 176 cflags-$(CONFIG_CPU_SB1) += $(call cc-option,-mno-mips3d) 175 - cflags-$(CONFIG_CPU_R10000) += $(call cc-option,-march=r10000,-march=r8000) \ 177 + cflags-$(CONFIG_CPU_R10000) += $(call cc-option,-march=r10000,-march=mips4) \ 176 178 -Wa,--trap 177 - cflags-$(CONFIG_CPU_CAVIUM_OCTEON) += $(call cc-option,-march=octeon) -Wa,--trap 178 - ifeq (,$(findstring march=octeon, $(cflags-$(CONFIG_CPU_CAVIUM_OCTEON)))) 179 - cflags-$(CONFIG_CPU_CAVIUM_OCTEON) += -Wa,-march=octeon 180 - endif 179 + cflags-$(CONFIG_CPU_CAVIUM_OCTEON) += -march=octeon -Wa,--trap 181 180 cflags-$(CONFIG_CAVIUM_CN63XXP1) += -Wa,-mfix-cn63xxp1 182 - cflags-$(CONFIG_CPU_BMIPS) += -march=mips32 -Wa,-mips32 -Wa,--trap 181 + cflags-$(CONFIG_CPU_BMIPS) += -march=mips32 -Wa,--trap 183 182 184 - cflags-$(CONFIG_CPU_LOONGSON2E) += -march=loongson2e -Wa,--trap 185 - cflags-$(CONFIG_CPU_LOONGSON2F) += -march=loongson2f -Wa,--trap 183 + cflags-$(CONFIG_CPU_LOONGSON2E) += \ 184 + $(call cc-option,-march=loongson2e,-march=mips3) -Wa,--trap 185 + cflags-$(CONFIG_CPU_LOONGSON2F) += \ 186 + $(call cc-option,-march=loongson2f,-march=mips3) -Wa,--trap 186 187 # Some -march= flags enable MMI instructions, and GCC complains about that 187 188 # support being enabled alongside -msoft-float. Thus explicitly disable MMI. 188 189 cflags-$(CONFIG_CPU_LOONGSON2EF) += $(call cc-option,-mno-loongson-mmi) 189 - ifdef CONFIG_CPU_LOONGSON64 190 - cflags-$(CONFIG_CPU_LOONGSON64) += -Wa,--trap 191 - cflags-$(CONFIG_CC_IS_GCC) += -march=loongson3a 192 - cflags-$(CONFIG_CC_IS_CLANG) += -march=mips64r2 193 - endif 190 + cflags-$(CONFIG_CPU_LOONGSON64) += \ 191 + $(call cc-option,-march=loongson3a,-march=mips64r2) -Wa,--trap 194 192 cflags-$(CONFIG_CPU_LOONGSON64) += $(call cc-option,-mno-loongson-mmi) 195 193 196 194 cflags-$(CONFIG_CPU_R4000_WORKAROUNDS) += $(call cc-option,-mfix-r4000,) ··· 297 299 ifdef CONFIG_64BIT 298 300 ifndef KBUILD_SYM32 299 301 ifeq ($(shell expr $(load-y) \< 0xffffffff80000000), 0) 300 - KBUILD_SYM32 = y 302 + KBUILD_SYM32 = $(call cc-option-yn, -msym32) 301 303 endif 302 304 endif 303 305
+1 -1
arch/mips/alchemy/common/clock.c
··· 771 771 } 772 772 id.flags = CLK_SET_RATE_PARENT | CLK_GET_RATE_NOCACHE; 773 773 774 - a = kzalloc((sizeof(*a)) * 6, GFP_KERNEL); 774 + a = kcalloc(6, sizeof(*a), GFP_KERNEL); 775 775 if (!a) 776 776 return -ENOMEM; 777 777
+2
arch/mips/boot/compressed/uart-16550.c
··· 8 8 9 9 #include <asm/addrspace.h> 10 10 11 + #include "decompress.h" 12 + 11 13 #if defined(CONFIG_MACH_LOONGSON64) || defined(CONFIG_MIPS_MALTA) 12 14 #define UART_BASE 0x1fd003f8 13 15 #define PORT(offset) (CKSEG1ADDR(UART_BASE) + (offset))
+2
arch/mips/boot/compressed/uart-alchemy.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 #include <asm/mach-au1x00/au1000.h> 3 3 4 + #include "decompress.h" 5 + 4 6 void putc(char c) 5 7 { 6 8 alchemy_uart_putchar(AU1000_UART0_PHYS_ADDR, c);
+2
arch/mips/boot/compressed/uart-prom.c
··· 1 1 // SPDX-License-Identifier: GPL-2.0 2 2 #include <asm/setup.h> 3 3 4 + #include "decompress.h" 5 + 4 6 void putc(char c) 5 7 { 6 8 prom_putchar(c);
+1
arch/mips/boot/dts/Makefile
··· 8 8 subdir-$(CONFIG_MACH_LOONGSON64) += loongson 9 9 subdir-$(CONFIG_SOC_VCOREIII) += mscc 10 10 subdir-$(CONFIG_MIPS_MALTA) += mti 11 + subdir-$(CONFIG_MACH_EYEQ5) += mobileye 11 12 subdir-$(CONFIG_LEGACY_BOARD_SEAD3) += mti 12 13 subdir-$(CONFIG_FIT_IMAGE_FDT_NI169445) += ni 13 14 subdir-$(CONFIG_MACH_PIC32) += pic32
+4
arch/mips/boot/dts/mobileye/Makefile
··· 1 + # SPDX-License-Identifier: GPL-2.0-only 2 + # Copyright 2023 Mobileye Vision Technologies Ltd. 3 + 4 + dtb-$(CONFIG_MACH_EYEQ5) += eyeq5-epm5.dtb
+23
arch/mips/boot/dts/mobileye/eyeq5-epm5.dts
··· 1 + // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 + /* 3 + * Copyright 2023 Mobileye Vision Technologies Ltd. 4 + */ 5 + 6 + /dts-v1/; 7 + 8 + #include "eyeq5.dtsi" 9 + 10 + / { 11 + compatible = "mobileye,eyeq5-epm5", "mobileye,eyeq5"; 12 + model = "Mobile EyeQ5 MP5 Evaluation board"; 13 + 14 + chosen { 15 + stdout-path = "serial2:115200n8"; 16 + }; 17 + 18 + memory@0 { 19 + device_type = "memory"; 20 + reg = <0x0 0x40000000 0x0 0x02000000>, 21 + <0x8 0x02000000 0x0 0x7E000000>; 22 + }; 23 + };
+292
arch/mips/boot/dts/mobileye/eyeq5-fixed-clocks.dtsi
··· 1 + // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) 2 + /* 3 + * Copyright 2023 Mobileye Vision Technologies Ltd. 4 + */ 5 + 6 + / { 7 + /* Fixed clock */ 8 + pll_cpu: pll-cpu { 9 + compatible = "fixed-clock"; 10 + #clock-cells = <0>; 11 + clock-frequency = <1500000000>; 12 + }; 13 + 14 + pll_vdi: pll-vdi { 15 + compatible = "fixed-clock"; 16 + #clock-cells = <0>; 17 + clock-frequency = <1280000000>; 18 + }; 19 + 20 + pll_per: pll-per { 21 + compatible = "fixed-clock"; 22 + #clock-cells = <0>; 23 + clock-frequency = <2000000000>; 24 + }; 25 + 26 + pll_ddr0: pll-ddr0 { 27 + compatible = "fixed-clock"; 28 + #clock-cells = <0>; 29 + clock-frequency = <1857210000>; 30 + }; 31 + 32 + pll_ddr1: pll-ddr1 { 33 + compatible = "fixed-clock"; 34 + #clock-cells = <0>; 35 + clock-frequency = <1857210000>; 36 + }; 37 + 38 + /* PLL_CPU derivatives */ 39 + occ_cpu: occ-cpu { 40 + compatible = "fixed-factor-clock"; 41 + clocks = <&pll_cpu>; 42 + #clock-cells = <0>; 43 + clock-div = <1>; 44 + clock-mult = <1>; 45 + }; 46 + si_css0_ref_clk: si-css0-ref-clk { /* gate ClkRstGen_si_css0_ref */ 47 + compatible = "fixed-factor-clock"; 48 + clocks = <&occ_cpu>; 49 + #clock-cells = <0>; 50 + clock-div = <1>; 51 + clock-mult = <1>; 52 + }; 53 + cpc_clk: cpc-clk { 54 + compatible = "fixed-factor-clock"; 55 + clocks = <&si_css0_ref_clk>; 56 + #clock-cells = <0>; 57 + clock-div = <1>; 58 + clock-mult = <1>; 59 + }; 60 + core0_clk: core0-clk { 61 + compatible = "fixed-factor-clock"; 62 + clocks = <&si_css0_ref_clk>; 63 + #clock-cells = <0>; 64 + clock-div = <1>; 65 + clock-mult = <1>; 66 + }; 67 + core1_clk: core1-clk { 68 + compatible = "fixed-factor-clock"; 69 + clocks = <&si_css0_ref_clk>; 70 + #clock-cells = <0>; 71 + clock-div = <1>; 72 + clock-mult = <1>; 73 + }; 74 + core2_clk: core2-clk { 75 + compatible = "fixed-factor-clock"; 76 + clocks = <&si_css0_ref_clk>; 77 + #clock-cells = <0>; 78 + clock-div = <1>; 79 + clock-mult = <1>; 80 + }; 81 + core3_clk: core3-clk { 82 + compatible = "fixed-factor-clock"; 83 + clocks = <&si_css0_ref_clk>; 84 + #clock-cells = <0>; 85 + clock-div = <1>; 86 + clock-mult = <1>; 87 + }; 88 + cm_clk: cm-clk { 89 + compatible = "fixed-factor-clock"; 90 + clocks = <&si_css0_ref_clk>; 91 + #clock-cells = <0>; 92 + clock-div = <1>; 93 + clock-mult = <1>; 94 + }; 95 + mem_clk: mem-clk { 96 + compatible = "fixed-factor-clock"; 97 + clocks = <&si_css0_ref_clk>; 98 + #clock-cells = <0>; 99 + clock-div = <1>; 100 + clock-mult = <1>; 101 + }; 102 + occ_isram: occ-isram { 103 + compatible = "fixed-factor-clock"; 104 + clocks = <&pll_cpu>; 105 + #clock-cells = <0>; 106 + clock-div = <2>; 107 + clock-mult = <1>; 108 + }; 109 + isram_clk: isram-clk { /* gate ClkRstGen_isram */ 110 + compatible = "fixed-factor-clock"; 111 + clocks = <&occ_isram>; 112 + #clock-cells = <0>; 113 + clock-div = <1>; 114 + clock-mult = <1>; 115 + }; 116 + occ_dbu: occ-dbu { 117 + compatible = "fixed-factor-clock"; 118 + clocks = <&pll_cpu>; 119 + #clock-cells = <0>; 120 + clock-div = <10>; 121 + clock-mult = <1>; 122 + }; 123 + si_dbu_tp_pclk: si-dbu-tp-pclk { /* gate ClkRstGen_dbu */ 124 + compatible = "fixed-factor-clock"; 125 + clocks = <&occ_dbu>; 126 + #clock-cells = <0>; 127 + clock-div = <1>; 128 + clock-mult = <1>; 129 + }; 130 + /* PLL_VDI derivatives */ 131 + occ_vdi: occ-vdi { 132 + compatible = "fixed-factor-clock"; 133 + clocks = <&pll_vdi>; 134 + #clock-cells = <0>; 135 + clock-div = <2>; 136 + clock-mult = <1>; 137 + }; 138 + vdi_clk: vdi-clk { /* gate ClkRstGen_vdi */ 139 + compatible = "fixed-factor-clock"; 140 + clocks = <&occ_vdi>; 141 + #clock-cells = <0>; 142 + clock-div = <1>; 143 + clock-mult = <1>; 144 + }; 145 + occ_can_ser: occ-can-ser { 146 + compatible = "fixed-factor-clock"; 147 + clocks = <&pll_vdi>; 148 + #clock-cells = <0>; 149 + clock-div = <16>; 150 + clock-mult = <1>; 151 + }; 152 + can_ser_clk: can-ser-clk { /* gate ClkRstGen_can_ser */ 153 + compatible = "fixed-factor-clock"; 154 + clocks = <&occ_can_ser>; 155 + #clock-cells = <0>; 156 + clock-div = <1>; 157 + clock-mult = <1>; 158 + }; 159 + i2c_ser_clk: i2c-ser-clk { 160 + compatible = "fixed-factor-clock"; 161 + clocks = <&pll_vdi>; 162 + #clock-cells = <0>; 163 + clock-div = <20>; 164 + clock-mult = <1>; 165 + }; 166 + /* PLL_PER derivatives */ 167 + occ_periph: occ-periph { 168 + compatible = "fixed-factor-clock"; 169 + clocks = <&pll_per>; 170 + #clock-cells = <0>; 171 + clock-div = <16>; 172 + clock-mult = <1>; 173 + }; 174 + periph_clk: periph-clk { 175 + compatible = "fixed-factor-clock"; 176 + clocks = <&occ_periph>; 177 + #clock-cells = <0>; 178 + clock-div = <1>; 179 + clock-mult = <1>; 180 + }; 181 + can_clk: can-clk { 182 + compatible = "fixed-factor-clock"; 183 + clocks = <&occ_periph>; 184 + #clock-cells = <0>; 185 + clock-div = <1>; 186 + clock-mult = <1>; 187 + }; 188 + spi_clk: spi-clk { 189 + compatible = "fixed-factor-clock"; 190 + clocks = <&occ_periph>; 191 + #clock-cells = <0>; 192 + clock-div = <1>; 193 + clock-mult = <1>; 194 + }; 195 + uart_clk: uart-clk { 196 + compatible = "fixed-factor-clock"; 197 + clocks = <&occ_periph>; 198 + #clock-cells = <0>; 199 + clock-div = <1>; 200 + clock-mult = <1>; 201 + }; 202 + i2c_clk: i2c-clk { 203 + compatible = "fixed-factor-clock"; 204 + clocks = <&occ_periph>; 205 + #clock-cells = <0>; 206 + clock-div = <1>; 207 + clock-mult = <1>; 208 + clock-output-names = "i2c_clk"; 209 + }; 210 + timer_clk: timer-clk { 211 + compatible = "fixed-factor-clock"; 212 + clocks = <&occ_periph>; 213 + #clock-cells = <0>; 214 + clock-div = <1>; 215 + clock-mult = <1>; 216 + clock-output-names = "timer_clk"; 217 + }; 218 + gpio_clk: gpio-clk { 219 + compatible = "fixed-factor-clock"; 220 + clocks = <&occ_periph>; 221 + #clock-cells = <0>; 222 + clock-div = <1>; 223 + clock-mult = <1>; 224 + clock-output-names = "gpio_clk"; 225 + }; 226 + emmc_sys_clk: emmc-sys-clk { 227 + compatible = "fixed-factor-clock"; 228 + clocks = <&pll_per>; 229 + #clock-cells = <0>; 230 + clock-div = <10>; 231 + clock-mult = <1>; 232 + clock-output-names = "emmc_sys_clk"; 233 + }; 234 + ccf_ctrl_clk: ccf-ctrl-clk { 235 + compatible = "fixed-factor-clock"; 236 + clocks = <&pll_per>; 237 + #clock-cells = <0>; 238 + clock-div = <4>; 239 + clock-mult = <1>; 240 + clock-output-names = "ccf_ctrl_clk"; 241 + }; 242 + occ_mjpeg_core: occ-mjpeg-core { 243 + compatible = "fixed-factor-clock"; 244 + clocks = <&pll_per>; 245 + #clock-cells = <0>; 246 + clock-div = <2>; 247 + clock-mult = <1>; 248 + clock-output-names = "occ_mjpeg_core"; 249 + }; 250 + hsm_clk: hsm-clk { /* gate ClkRstGen_hsm */ 251 + compatible = "fixed-factor-clock"; 252 + clocks = <&occ_mjpeg_core>; 253 + #clock-cells = <0>; 254 + clock-div = <1>; 255 + clock-mult = <1>; 256 + clock-output-names = "hsm_clk"; 257 + }; 258 + mjpeg_core_clk: mjpeg-core-clk { /* gate ClkRstGen_mjpeg_gen */ 259 + compatible = "fixed-factor-clock"; 260 + clocks = <&occ_mjpeg_core>; 261 + #clock-cells = <0>; 262 + clock-div = <1>; 263 + clock-mult = <1>; 264 + clock-output-names = "mjpeg_core_clk"; 265 + }; 266 + fcmu_a_clk: fcmu-a-clk { 267 + compatible = "fixed-factor-clock"; 268 + clocks = <&pll_per>; 269 + #clock-cells = <0>; 270 + clock-div = <20>; 271 + clock-mult = <1>; 272 + clock-output-names = "fcmu_a_clk"; 273 + }; 274 + occ_pci_sys: occ-pci-sys { 275 + compatible = "fixed-factor-clock"; 276 + clocks = <&pll_per>; 277 + #clock-cells = <0>; 278 + clock-div = <8>; 279 + clock-mult = <1>; 280 + clock-output-names = "occ_pci_sys"; 281 + }; 282 + pclk: pclk { 283 + compatible = "fixed-clock"; 284 + #clock-cells = <0>; 285 + clock-frequency = <250000000>; /* 250MHz */ 286 + }; 287 + tsu_clk: tsu-clk { 288 + compatible = "fixed-clock"; 289 + #clock-cells = <0>; 290 + clock-frequency = <125000000>; /* 125MHz */ 291 + }; 292 + };
+124
arch/mips/boot/dts/mobileye/eyeq5.dtsi
··· 1 + // SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */ 2 + /* 3 + * Copyright 2023 Mobileye Vision Technologies Ltd. 4 + */ 5 + 6 + #include <dt-bindings/interrupt-controller/mips-gic.h> 7 + 8 + #include "eyeq5-fixed-clocks.dtsi" 9 + 10 + / { 11 + #address-cells = <2>; 12 + #size-cells = <2>; 13 + cpus { 14 + #address-cells = <1>; 15 + #size-cells = <0>; 16 + cpu@0 { 17 + device_type = "cpu"; 18 + compatible = "img,i6500"; 19 + reg = <0>; 20 + clocks = <&core0_clk>; 21 + }; 22 + }; 23 + 24 + reserved-memory { 25 + #address-cells = <2>; 26 + #size-cells = <2>; 27 + ranges; 28 + 29 + /* These reserved memory regions are also defined in bootmanager 30 + * for configuring inbound translation for BARS, don't change 31 + * these without syncing with bootmanager 32 + */ 33 + shmem0_reserved: shmem@804000000 { 34 + reg = <0x8 0x04000000 0x0 0x1000000>; 35 + }; 36 + shmem1_reserved: shmem@805000000 { 37 + reg = <0x8 0x05000000 0x0 0x1000000>; 38 + }; 39 + pci0_msi_reserved: pci0-msi@806000000 { 40 + reg = <0x8 0x06000000 0x0 0x100000>; 41 + }; 42 + pci1_msi_reserved: pci1-msi@806100000 { 43 + reg = <0x8 0x06100000 0x0 0x100000>; 44 + }; 45 + 46 + mini_coredump0_reserved: mini-coredump0@806200000 { 47 + reg = <0x8 0x06200000 0x0 0x100000>; 48 + }; 49 + mhm_reserved_0: the-mhm-reserved-0@0 { 50 + reg = <0x8 0x00000000 0x0 0x0000800>; 51 + }; 52 + }; 53 + 54 + aliases { 55 + serial0 = &uart0; 56 + serial1 = &uart1; 57 + serial2 = &uart2; 58 + }; 59 + 60 + cpu_intc: interrupt-controller { 61 + compatible = "mti,cpu-interrupt-controller"; 62 + interrupt-controller; 63 + #address-cells = <0>; 64 + #interrupt-cells = <1>; 65 + }; 66 + 67 + soc: soc { 68 + #address-cells = <2>; 69 + #size-cells = <2>; 70 + ranges; 71 + compatible = "simple-bus"; 72 + 73 + uart0: serial@800000 { 74 + compatible = "arm,pl011", "arm,primecell"; 75 + reg = <0 0x800000 0x0 0x1000>; 76 + reg-io-width = <4>; 77 + interrupt-parent = <&gic>; 78 + interrupts = <GIC_SHARED 6 IRQ_TYPE_LEVEL_HIGH>; 79 + clocks = <&uart_clk>, <&occ_periph>; 80 + clock-names = "uartclk", "apb_pclk"; 81 + }; 82 + 83 + uart1: serial@900000 { 84 + compatible = "arm,pl011", "arm,primecell"; 85 + reg = <0 0x900000 0x0 0x1000>; 86 + reg-io-width = <4>; 87 + interrupt-parent = <&gic>; 88 + interrupts = <GIC_SHARED 6 IRQ_TYPE_LEVEL_HIGH>; 89 + clocks = <&uart_clk>, <&occ_periph>; 90 + clock-names = "uartclk", "apb_pclk"; 91 + }; 92 + 93 + uart2: serial@a00000 { 94 + compatible = "arm,pl011", "arm,primecell"; 95 + reg = <0 0xa00000 0x0 0x1000>; 96 + reg-io-width = <4>; 97 + interrupt-parent = <&gic>; 98 + interrupts = <GIC_SHARED 6 IRQ_TYPE_LEVEL_HIGH>; 99 + clocks = <&uart_clk>, <&occ_periph>; 100 + clock-names = "uartclk", "apb_pclk"; 101 + }; 102 + 103 + gic: interrupt-controller@140000 { 104 + compatible = "mti,gic"; 105 + reg = <0x0 0x140000 0x0 0x20000>; 106 + interrupt-controller; 107 + #interrupt-cells = <3>; 108 + 109 + /* 110 + * Declare the interrupt-parent even though the mti,gic 111 + * binding doesn't require it, such that the kernel can 112 + * figure out that cpu_intc is the root interrupt 113 + * controller & should be probed first. 114 + */ 115 + interrupt-parent = <&cpu_intc>; 116 + 117 + timer { 118 + compatible = "mti,gic-timer"; 119 + interrupts = <GIC_LOCAL 1 IRQ_TYPE_NONE>; 120 + clocks = <&core0_clk>; 121 + }; 122 + }; 123 + }; 124 + };
+49 -2
arch/mips/boot/dts/ralink/mt7621.dtsi
··· 115 115 compatible = "ns16550a"; 116 116 reg = <0xc00 0x100>; 117 117 118 + reg-io-width = <4>; 119 + reg-shift = <2>; 120 + 118 121 clocks = <&sysc MT7621_CLK_UART1>; 119 122 120 123 interrupt-parent = <&gic>; 121 124 interrupts = <GIC_SHARED 26 IRQ_TYPE_LEVEL_HIGH>; 122 125 123 - reg-shift = <2>; 124 - reg-io-width = <4>; 125 126 no-loopback-test; 127 + 128 + pinctrl-names = "default"; 129 + pinctrl-0 = <&uart1_pins>; 130 + }; 131 + 132 + serial1: serial@d00 { 133 + compatible = "ns16550a"; 134 + reg = <0xd00 0x100>; 135 + 136 + reg-io-width = <4>; 137 + reg-shift = <2>; 138 + 139 + clocks = <&sysc MT7621_CLK_UART2>; 140 + 141 + interrupt-parent = <&gic>; 142 + interrupts = <GIC_SHARED 27 IRQ_TYPE_LEVEL_HIGH>; 143 + 144 + no-loopback-test; 145 + 146 + pinctrl-names = "default"; 147 + pinctrl-0 = <&uart2_pins>; 148 + 149 + status = "disabled"; 150 + }; 151 + 152 + serial2: serial@e00 { 153 + compatible = "ns16550a"; 154 + reg = <0xe00 0x100>; 155 + 156 + reg-io-width = <4>; 157 + reg-shift = <2>; 158 + 159 + clocks = <&sysc MT7621_CLK_UART3>; 160 + 161 + interrupt-parent = <&gic>; 162 + interrupts = <GIC_SHARED 28 IRQ_TYPE_LEVEL_HIGH>; 163 + 164 + no-loopback-test; 165 + 166 + pinctrl-names = "default"; 167 + pinctrl-0 = <&uart3_pins>; 168 + 169 + status = "disabled"; 126 170 }; 127 171 128 172 spi0: spi@b00 { ··· 306 262 reg = <0x1e1c0000 0x1000 307 263 0x1e1d0700 0x0100>; 308 264 reg-names = "mac", "ippc"; 265 + 266 + #address-cells = <1>; 267 + #size-cells = <0>; 309 268 310 269 clocks = <&sysc MT7621_CLK_XTAL>; 311 270 clock-names = "sys_ck";
+108
arch/mips/configs/eyeq5_defconfig
··· 1 + CONFIG_SYSVIPC=y 2 + CONFIG_NO_HZ_IDLE=y 3 + CONFIG_HIGH_RES_TIMERS=y 4 + CONFIG_BPF_SYSCALL=y 5 + CONFIG_TASKSTATS=y 6 + CONFIG_IKCONFIG=y 7 + CONFIG_IKCONFIG_PROC=y 8 + CONFIG_MEMCG=y 9 + CONFIG_BLK_CGROUP=y 10 + CONFIG_CFS_BANDWIDTH=y 11 + CONFIG_RT_GROUP_SCHED=y 12 + CONFIG_CGROUP_PIDS=y 13 + CONFIG_CGROUP_FREEZER=y 14 + CONFIG_CPUSETS=y 15 + CONFIG_CGROUP_DEVICE=y 16 + CONFIG_CGROUP_CPUACCT=y 17 + CONFIG_NAMESPACES=y 18 + CONFIG_USER_NS=y 19 + CONFIG_SCHED_AUTOGROUP=y 20 + CONFIG_BLK_DEV_INITRD=y 21 + CONFIG_EXPERT=y 22 + CONFIG_MACH_EYEQ5=y 23 + CONFIG_FIT_IMAGE_FDT_EPM5=y 24 + CONFIG_PAGE_SIZE_16KB=y 25 + CONFIG_MIPS_CPS=y 26 + CONFIG_CPU_HAS_MSA=y 27 + CONFIG_NR_CPUS=16 28 + CONFIG_MIPS_RAW_APPENDED_DTB=y 29 + CONFIG_JUMP_LABEL=y 30 + CONFIG_COMPAT_32BIT_TIME=y 31 + CONFIG_MODULES=y 32 + CONFIG_MODULE_UNLOAD=y 33 + CONFIG_TRIM_UNUSED_KSYMS=y 34 + # CONFIG_COMPAT_BRK is not set 35 + CONFIG_SPARSEMEM_MANUAL=y 36 + CONFIG_USERFAULTFD=y 37 + CONFIG_NET=y 38 + CONFIG_PACKET=y 39 + CONFIG_UNIX=y 40 + CONFIG_NET_KEY=y 41 + CONFIG_INET=y 42 + CONFIG_IP_PNP=y 43 + CONFIG_IP_PNP_DHCP=y 44 + CONFIG_NETFILTER=y 45 + CONFIG_CAN=y 46 + CONFIG_PCI=y 47 + CONFIG_PCI_MSI=y 48 + CONFIG_PCI_DEBUG=y 49 + CONFIG_PCI_ENDPOINT=y 50 + CONFIG_DEVTMPFS=y 51 + CONFIG_DEVTMPFS_MOUNT=y 52 + CONFIG_CONNECTOR=y 53 + CONFIG_MTD=y 54 + CONFIG_MTD_UBI=y 55 + CONFIG_MTD_UBI_BLOCK=y 56 + CONFIG_SCSI=y 57 + CONFIG_NETDEVICES=y 58 + CONFIG_MACVLAN=y 59 + CONFIG_IPVLAN=y 60 + CONFIG_MACB=y 61 + CONFIG_MARVELL_PHY=y 62 + CONFIG_MICREL_PHY=y 63 + CONFIG_CAN_M_CAN=y 64 + CONFIG_SERIAL_AMBA_PL011=y 65 + CONFIG_SERIAL_AMBA_PL011_CONSOLE=y 66 + CONFIG_HW_RANDOM=y 67 + # CONFIG_PTP_1588_CLOCK is not set 68 + CONFIG_PINCTRL=y 69 + CONFIG_MFD_SYSCON=y 70 + CONFIG_HID_A4TECH=y 71 + CONFIG_HID_BELKIN=y 72 + CONFIG_HID_CHERRY=y 73 + CONFIG_HID_CYPRESS=y 74 + CONFIG_HID_EZKEY=y 75 + CONFIG_HID_ITE=y 76 + CONFIG_HID_KENSINGTON=y 77 + CONFIG_HID_REDRAGON=y 78 + CONFIG_HID_MICROSOFT=y 79 + CONFIG_HID_MONTEREY=y 80 + CONFIG_MMC=y 81 + CONFIG_MMC_SDHCI=y 82 + # CONFIG_IOMMU_SUPPORT is not set 83 + CONFIG_RESET_CONTROLLER=y 84 + # CONFIG_NVMEM is not set 85 + CONFIG_EXT4_FS=y 86 + CONFIG_EXT4_FS_POSIX_ACL=y 87 + CONFIG_EXT4_FS_SECURITY=y 88 + CONFIG_FS_ENCRYPTION=y 89 + CONFIG_FUSE_FS=y 90 + CONFIG_CUSE=y 91 + CONFIG_MSDOS_FS=y 92 + CONFIG_VFAT_FS=y 93 + CONFIG_TMPFS=y 94 + CONFIG_TMPFS_POSIX_ACL=y 95 + CONFIG_UBIFS_FS=y 96 + CONFIG_NFS_FS=y 97 + CONFIG_NFS_V3_ACL=y 98 + CONFIG_NFS_V4=y 99 + CONFIG_NFS_V4_1=y 100 + CONFIG_NFS_V4_2=y 101 + CONFIG_ROOT_NFS=y 102 + CONFIG_CRYPTO_CRC32_MIPS=y 103 + CONFIG_FRAME_WARN=1024 104 + CONFIG_DEBUG_FS=y 105 + # CONFIG_RCU_TRACE is not set 106 + # CONFIG_FTRACE is not set 107 + CONFIG_CMDLINE_BOOL=y 108 + CONFIG_CMDLINE="earlycon"
+3 -3
arch/mips/generic/Makefile
··· 4 4 # Author: Paul Burton <paul.burton@mips.com> 5 5 # 6 6 7 - obj-y += init.o 8 - obj-y += irq.o 9 - obj-y += proc.o 7 + obj-$(CONFIG_MACH_GENERIC_CORE) += init.o 8 + obj-$(CONFIG_MACH_GENERIC_CORE) += irq.o 9 + obj-$(CONFIG_MACH_GENERIC_CORE) += proc.o 10 10 11 11 obj-$(CONFIG_YAMON_DT_SHIM) += yamon-dt.o 12 12 obj-$(CONFIG_LEGACY_BOARD_SEAD3) += board-sead3.o
+5
arch/mips/include/asm/addrspace.h
··· 48 48 #define KSEGX(a) ((_ACAST32_(a)) & _ACAST32_(0xe0000000)) 49 49 50 50 /* 51 + * Gives the size of each kernel segment 52 + */ 53 + #define CSEGX_SIZE 0x20000000 54 + 55 + /* 51 56 * Returns the physical address of a CKSEGx / XKPHYS address 52 57 */ 53 58 #define CPHYSADDR(a) ((_ACAST32_(a)) & 0x1fffffff)
+14 -8
arch/mips/include/asm/asmmacro.h
··· 216 216 * Temporary until all gas have MT ASE support 217 217 */ 218 218 .macro DMT reg=0 219 - .word 0x41600bc1 | (\reg << 16) 219 + insn_if_mips 0x41600bc1 | (\reg << 16) 220 + insn32_if_mm 0x0000057C | (\reg << 21) 220 221 .endm 221 222 222 223 .macro EMT reg=0 223 - .word 0x41600be1 | (\reg << 16) 224 + insn_if_mips 0x41600be1 | (\reg << 16) 225 + insn32_if_mm 0x0000257C | (\reg << 21) 224 226 .endm 225 227 226 228 .macro DVPE reg=0 227 - .word 0x41600001 | (\reg << 16) 229 + insn_if_mips 0x41600001 | (\reg << 16) 230 + insn32_if_mm 0x0000157C | (\reg << 21) 228 231 .endm 229 232 230 233 .macro EVPE reg=0 231 - .word 0x41600021 | (\reg << 16) 234 + insn_if_mips 0x41600021 | (\reg << 16) 235 + insn32_if_mm 0x0000357C | (\reg << 21) 232 236 .endm 233 237 234 - .macro MFTR rt=0, rd=0, u=0, sel=0 235 - .word 0x41000000 | (\rt << 16) | (\rd << 11) | (\u << 5) | (\sel) 238 + .macro MFTR rs=0, rt=0, u=0, sel=0 239 + insn_if_mips 0x41000000 | (\rt << 16) | (\rs << 11) | (\u << 5) | (\sel) 240 + insn32_if_mm 0x0000000E | (\rt << 21) | (\rs << 16) | (\u << 10) | (\sel << 4) 236 241 .endm 237 242 238 - .macro MTTR rt=0, rd=0, u=0, sel=0 239 - .word 0x41800000 | (\rt << 16) | (\rd << 11) | (\u << 5) | (\sel) 243 + .macro MTTR rt=0, rs=0, u=0, sel=0 244 + insn_if_mips 0x41800000 | (\rt << 16) | (\rs << 11) | (\u << 5) | (\sel) 245 + insn32_if_mm 0x00000006 | (\rt << 21) | (\rs << 16) | (\u << 10) | (\sel << 4) 240 246 .endm 241 247 242 248 #ifdef TOOLCHAIN_SUPPORTS_MSA
+1 -1
arch/mips/include/asm/cdmm.h
··· 63 63 */ 64 64 phys_addr_t mips_cdmm_phys_base(void); 65 65 66 - extern struct bus_type mips_cdmm_bustype; 66 + extern const struct bus_type mips_cdmm_bustype; 67 67 void __iomem *mips_cdmm_early_probe(unsigned int dev_type); 68 68 69 69 #define to_mips_cdmm_device(d) container_of(d, struct mips_cdmm_device, dev)
+4
arch/mips/include/asm/mach-generic/spaces.h
··· 49 49 #define HIGHMEM_START _AC(0x20000000, UL) 50 50 #endif 51 51 52 + #define CKSEG0ADDR_OR_64BIT(x) CKSEG0ADDR(x) 53 + #define CKSEG1ADDR_OR_64BIT(x) CKSEG1ADDR(x) 52 54 #endif /* CONFIG_32BIT */ 53 55 54 56 #ifdef CONFIG_64BIT ··· 84 82 #define TO_CAC(x) (CAC_BASE | ((x) & TO_PHYS_MASK)) 85 83 #define TO_UNCAC(x) (UNCAC_BASE | ((x) & TO_PHYS_MASK)) 86 84 85 + #define CKSEG0ADDR_OR_64BIT(x) TO_CAC(x) 86 + #define CKSEG1ADDR_OR_64BIT(x) TO_UNCAC(x) 87 87 #endif /* CONFIG_64BIT */ 88 88 89 89 /*
+17 -4
arch/mips/include/asm/mips-cm.h
··· 22 22 extern void __iomem *mips_cm_l2sync_base; 23 23 24 24 /** 25 - * __mips_cm_phys_base - retrieve the physical base address of the CM 25 + * mips_cm_phys_base - retrieve the physical base address of the CM 26 26 * 27 27 * This function returns the physical base address of the Coherence Manager 28 28 * global control block, or 0 if no Coherence Manager is present. It provides 29 29 * a default implementation which reads the CMGCRBase register where available, 30 30 * and may be overridden by platforms which determine this address in a 31 - * different way by defining a function with the same prototype except for the 32 - * name mips_cm_phys_base (without underscores). 31 + * different way by defining a function with the same prototype. 33 32 */ 34 - extern phys_addr_t __mips_cm_phys_base(void); 33 + extern phys_addr_t mips_cm_phys_base(void); 34 + 35 + /** 36 + * mips_cm_l2sync_phys_base - retrieve the physical base address of the CM 37 + * L2-sync region 38 + * 39 + * This function returns the physical base address of the Coherence Manager 40 + * L2-cache only region. It provides a default implementation which reads the 41 + * CMGCRL2OnlySyncBase register where available or returns a 4K region just 42 + * behind the CM GCR base address. It may be overridden by platforms which 43 + * determine this address in a different way by defining a function with the 44 + * same prototype. 45 + */ 46 + extern phys_addr_t mips_cm_l2sync_phys_base(void); 35 47 36 48 /* 37 49 * mips_cm_is64 - determine CM register width ··· 323 311 /* GCR_Cx_RESET_BASE - Configure where powered up cores will fetch from */ 324 312 GCR_CX_ACCESSOR_RW(32, 0x020, reset_base) 325 313 #define CM_GCR_Cx_RESET_BASE_BEVEXCBASE GENMASK(31, 12) 314 + #define CM_GCR_Cx_RESET_BASE_MODE BIT(1) 326 315 327 316 /* GCR_Cx_ID - Identify the current core */ 328 317 GCR_CX_ACCESSOR_RO(32, 0x028, id)
+1 -1
arch/mips/include/asm/mips_mt.h
··· 26 26 #endif 27 27 28 28 struct class; 29 - extern struct class *mt_class; 29 + extern const struct class mt_class; 30 30 31 31 #endif /* __ASM_MIPS_MT_H */
+154 -102
arch/mips/include/asm/mipsmtregs.h
··· 189 189 return ((conf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; 190 190 } 191 191 192 + #define _ASM_SET_DVPE \ 193 + _ASM_MACRO_1R(dvpe, rt, \ 194 + _ASM_INSN_IF_MIPS(0x41600001 | __rt << 16) \ 195 + _ASM_INSN32_IF_MM(0x0000157C | __rt << 21)) 196 + #define _ASM_UNSET_DVPE ".purgem dvpe\n\t" 197 + 192 198 static inline unsigned int dvpe(void) 193 199 { 194 200 int res = 0; 195 201 196 202 __asm__ __volatile__( 197 - " .set push \n" 198 - " .set noreorder \n" 199 - " .set noat \n" 200 - " .set mips32r2 \n" 201 - " .word 0x41610001 # dvpe $1 \n" 202 - " move %0, $1 \n" 203 - " ehb \n" 204 - " .set pop \n" 203 + " .set push \n" 204 + " .set "MIPS_ISA_LEVEL" \n" 205 + _ASM_SET_DVPE 206 + " dvpe %0 \n" 207 + " ehb \n" 208 + _ASM_UNSET_DVPE 209 + " .set pop \n" 205 210 : "=r" (res)); 206 211 207 212 instruction_hazard(); ··· 214 209 return res; 215 210 } 216 211 212 + #define _ASM_SET_EVPE \ 213 + _ASM_MACRO_1R(evpe, rt, \ 214 + _ASM_INSN_IF_MIPS(0x41600021 | __rt << 16) \ 215 + _ASM_INSN32_IF_MM(0x0000357C | __rt << 21)) 216 + #define _ASM_UNSET_EVPE ".purgem evpe\n\t" 217 + 217 218 static inline void __raw_evpe(void) 218 219 { 219 220 __asm__ __volatile__( 220 - " .set push \n" 221 - " .set noreorder \n" 222 - " .set noat \n" 223 - " .set mips32r2 \n" 224 - " .word 0x41600021 # evpe \n" 225 - " ehb \n" 226 - " .set pop \n"); 221 + " .set push \n" 222 + " .set "MIPS_ISA_LEVEL" \n" 223 + _ASM_SET_EVPE 224 + " evpe $0 \n" 225 + " ehb \n" 226 + _ASM_UNSET_EVPE 227 + " .set pop \n"); 227 228 } 228 229 229 230 /* Enable virtual processor execution if previous suggested it should be. ··· 243 232 __raw_evpe(); 244 233 } 245 234 235 + #define _ASM_SET_DMT \ 236 + _ASM_MACRO_1R(dmt, rt, \ 237 + _ASM_INSN_IF_MIPS(0x41600bc1 | __rt << 16) \ 238 + _ASM_INSN32_IF_MM(0x0000057C | __rt << 21)) 239 + #define _ASM_UNSET_DMT ".purgem dmt\n\t" 240 + 246 241 static inline unsigned int dmt(void) 247 242 { 248 243 int res; 249 244 250 245 __asm__ __volatile__( 251 - " .set push \n" 252 - " .set mips32r2 \n" 253 - " .set noat \n" 254 - " .word 0x41610BC1 # dmt $1 \n" 255 - " ehb \n" 256 - " move %0, $1 \n" 257 - " .set pop \n" 246 + " .set push \n" 247 + " .set "MIPS_ISA_LEVEL" \n" 248 + _ASM_SET_DMT 249 + " dmt %0 \n" 250 + " ehb \n" 251 + _ASM_UNSET_DMT 252 + " .set pop \n" 258 253 : "=r" (res)); 259 254 260 255 instruction_hazard(); ··· 268 251 return res; 269 252 } 270 253 254 + #define _ASM_SET_EMT \ 255 + _ASM_MACRO_1R(emt, rt, \ 256 + _ASM_INSN_IF_MIPS(0x41600be1 | __rt << 16) \ 257 + _ASM_INSN32_IF_MM(0x0000257C | __rt << 21)) 258 + #define _ASM_UNSET_EMT ".purgem emt\n\t" 259 + 271 260 static inline void __raw_emt(void) 272 261 { 273 262 __asm__ __volatile__( 274 - " .set push \n" 275 - " .set noreorder \n" 276 - " .set mips32r2 \n" 277 - " .word 0x41600be1 # emt \n" 278 - " ehb \n" 263 + " .set push \n" 264 + " .set "MIPS_ISA_LEVEL" \n" 265 + _ASM_SET_EMT 266 + " emt $0 \n" 267 + _ASM_UNSET_EMT 268 + " ehb \n" 279 269 " .set pop"); 280 270 } 281 271 ··· 300 276 static inline void ehb(void) 301 277 { 302 278 __asm__ __volatile__( 303 - " .set push \n" 304 - " .set mips32r2 \n" 305 - " ehb \n" 306 - " .set pop \n"); 279 + " .set push \n" 280 + " .set "MIPS_ISA_LEVEL" \n" 281 + " ehb \n" 282 + " .set pop \n"); 307 283 } 308 284 309 - #define mftc0(rt,sel) \ 285 + #define _ASM_SET_MFTC0 \ 286 + _ASM_MACRO_2R_1S(mftc0, rs, rt, sel, \ 287 + _ASM_INSN_IF_MIPS(0x41000000 | __rt << 16 | \ 288 + __rs << 11 | \\sel) \ 289 + _ASM_INSN32_IF_MM(0x0000000E | __rt << 21 | \ 290 + __rs << 16 | \\sel << 4)) 291 + #define _ASM_UNSET_MFTC0 ".purgem mftc0\n\t" 292 + 293 + #define mftc0(rt, sel) \ 310 294 ({ \ 311 - unsigned long __res; \ 295 + unsigned long __res; \ 312 296 \ 313 297 __asm__ __volatile__( \ 314 - " .set push \n" \ 315 - " .set mips32r2 \n" \ 316 - " .set noat \n" \ 317 - " # mftc0 $1, $" #rt ", " #sel " \n" \ 318 - " .word 0x41000800 | (" #rt " << 16) | " #sel " \n" \ 319 - " move %0, $1 \n" \ 320 - " .set pop \n" \ 298 + " .set push \n" \ 299 + " .set "MIPS_ISA_LEVEL" \n" \ 300 + _ASM_SET_MFTC0 \ 301 + " mftc0 $1, " #rt ", " #sel " \n" \ 302 + _ASM_UNSET_MFTC0 \ 303 + " .set pop \n" \ 321 304 : "=r" (__res)); \ 322 305 \ 323 306 __res; \ 324 307 }) 308 + 309 + #define _ASM_SET_MFTGPR \ 310 + _ASM_MACRO_2R(mftgpr, rs, rt, \ 311 + _ASM_INSN_IF_MIPS(0x41000020 | __rt << 16 | \ 312 + __rs << 11) \ 313 + _ASM_INSN32_IF_MM(0x0000040E | __rt << 21 | \ 314 + __rs << 16)) 315 + #define _ASM_UNSET_MFTGPR ".purgem mftgpr\n\t" 325 316 326 317 #define mftgpr(rt) \ 327 318 ({ \ 328 319 unsigned long __res; \ 329 320 \ 330 321 __asm__ __volatile__( \ 331 - " .set push \n" \ 332 - " .set noat \n" \ 333 - " .set mips32r2 \n" \ 334 - " # mftgpr $1," #rt " \n" \ 335 - " .word 0x41000820 | (" #rt " << 16) \n" \ 336 - " move %0, $1 \n" \ 337 - " .set pop \n" \ 322 + " .set push \n" \ 323 + " .set "MIPS_ISA_LEVEL" \n" \ 324 + _ASM_SET_MFTGPR \ 325 + " mftgpr %0," #rt " \n" \ 326 + _ASM_UNSET_MFTGPR \ 327 + " .set pop \n" \ 338 328 : "=r" (__res)); \ 339 329 \ 340 330 __res; \ ··· 359 321 unsigned long __res; \ 360 322 \ 361 323 __asm__ __volatile__( \ 362 - " mftr %0, " #rt ", " #u ", " #sel " \n" \ 324 + " mftr %0, " #rt ", " #u ", " #sel " \n" \ 363 325 : "=r" (__res)); \ 364 326 \ 365 327 __res; \ 366 328 }) 367 329 368 - #define mttgpr(rd,v) \ 330 + #define _ASM_SET_MTTGPR \ 331 + _ASM_MACRO_2R(mttgpr, rt, rs, \ 332 + _ASM_INSN_IF_MIPS(0x41800020 | __rt << 16 | \ 333 + __rs << 11) \ 334 + _ASM_INSN32_IF_MM(0x00000406 | __rt << 21 | \ 335 + __rs << 16)) 336 + #define _ASM_UNSET_MTTGPR ".purgem mttgpr\n\t" 337 + 338 + #define mttgpr(rs, v) \ 369 339 do { \ 370 340 __asm__ __volatile__( \ 371 - " .set push \n" \ 372 - " .set mips32r2 \n" \ 373 - " .set noat \n" \ 374 - " move $1, %0 \n" \ 375 - " # mttgpr $1, " #rd " \n" \ 376 - " .word 0x41810020 | (" #rd " << 11) \n" \ 377 - " .set pop \n" \ 341 + " .set push \n" \ 342 + " .set "MIPS_ISA_LEVEL" \n" \ 343 + _ASM_SET_MTTGPR \ 344 + " mttgpr %0, " #rs " \n" \ 345 + _ASM_UNSET_MTTGPR \ 346 + " .set pop \n" \ 378 347 : : "r" (v)); \ 379 348 } while (0) 380 349 381 - #define mttc0(rd, sel, v) \ 350 + #define _ASM_SET_MTTC0 \ 351 + _ASM_MACRO_2R_1S(mttc0, rt, rs, sel, \ 352 + _ASM_INSN_IF_MIPS(0x41800000 | __rt << 16 | \ 353 + __rs << 11 | \\sel) \ 354 + _ASM_INSN32_IF_MM(0x0000040E | __rt << 21 | \ 355 + __rs << 16 | \\sel << 4)) 356 + #define _ASM_UNSET_MTTC0 ".purgem mttc0\n\t" 357 + 358 + #define mttc0(rs, sel, v) \ 382 359 ({ \ 383 360 __asm__ __volatile__( \ 384 - " .set push \n" \ 385 - " .set mips32r2 \n" \ 386 - " .set noat \n" \ 387 - " move $1, %0 \n" \ 388 - " # mttc0 %0," #rd ", " #sel " \n" \ 389 - " .word 0x41810000 | (" #rd " << 11) | " #sel " \n" \ 390 - " .set pop \n" \ 361 + " .set push \n" \ 362 + " .set "MIPS_ISA_LEVEL" \n" \ 363 + _ASM_SET_MTTC0 \ 364 + " mttc0 %0," #rs ", " #sel " \n" \ 365 + _ASM_UNSET_MTTC0 \ 366 + " .set pop \n" \ 391 367 : \ 392 368 : "r" (v)); \ 393 369 }) ··· 423 371 424 372 425 373 /* you *must* set the target tc (settc) before trying to use these */ 426 - #define read_vpe_c0_vpecontrol() mftc0(1, 1) 427 - #define write_vpe_c0_vpecontrol(val) mttc0(1, 1, val) 428 - #define read_vpe_c0_vpeconf0() mftc0(1, 2) 429 - #define write_vpe_c0_vpeconf0(val) mttc0(1, 2, val) 430 - #define read_vpe_c0_vpeconf1() mftc0(1, 3) 431 - #define write_vpe_c0_vpeconf1(val) mttc0(1, 3, val) 432 - #define read_vpe_c0_count() mftc0(9, 0) 433 - #define write_vpe_c0_count(val) mttc0(9, 0, val) 434 - #define read_vpe_c0_status() mftc0(12, 0) 435 - #define write_vpe_c0_status(val) mttc0(12, 0, val) 436 - #define read_vpe_c0_cause() mftc0(13, 0) 437 - #define write_vpe_c0_cause(val) mttc0(13, 0, val) 438 - #define read_vpe_c0_config() mftc0(16, 0) 439 - #define write_vpe_c0_config(val) mttc0(16, 0, val) 440 - #define read_vpe_c0_config1() mftc0(16, 1) 441 - #define write_vpe_c0_config1(val) mttc0(16, 1, val) 442 - #define read_vpe_c0_config7() mftc0(16, 7) 443 - #define write_vpe_c0_config7(val) mttc0(16, 7, val) 444 - #define read_vpe_c0_ebase() mftc0(15, 1) 445 - #define write_vpe_c0_ebase(val) mttc0(15, 1, val) 446 - #define write_vpe_c0_compare(val) mttc0(11, 0, val) 447 - #define read_vpe_c0_badvaddr() mftc0(8, 0) 448 - #define read_vpe_c0_epc() mftc0(14, 0) 449 - #define write_vpe_c0_epc(val) mttc0(14, 0, val) 374 + #define read_vpe_c0_vpecontrol() mftc0($1, 1) 375 + #define write_vpe_c0_vpecontrol(val) mttc0($1, 1, val) 376 + #define read_vpe_c0_vpeconf0() mftc0($1, 2) 377 + #define write_vpe_c0_vpeconf0(val) mttc0($1, 2, val) 378 + #define read_vpe_c0_vpeconf1() mftc0($1, 3) 379 + #define write_vpe_c0_vpeconf1(val) mttc0($1, 3, val) 380 + #define read_vpe_c0_count() mftc0($9, 0) 381 + #define write_vpe_c0_count(val) mttc0($9, 0, val) 382 + #define read_vpe_c0_status() mftc0($12, 0) 383 + #define write_vpe_c0_status(val) mttc0($12, 0, val) 384 + #define read_vpe_c0_cause() mftc0($13, 0) 385 + #define write_vpe_c0_cause(val) mttc0($13, 0, val) 386 + #define read_vpe_c0_config() mftc0($16, 0) 387 + #define write_vpe_c0_config(val) mttc0($16, 0, val) 388 + #define read_vpe_c0_config1() mftc0($16, 1) 389 + #define write_vpe_c0_config1(val) mttc0($16, 1, val) 390 + #define read_vpe_c0_config7() mftc0($16, 7) 391 + #define write_vpe_c0_config7(val) mttc0($16, 7, val) 392 + #define read_vpe_c0_ebase() mftc0($15, 1) 393 + #define write_vpe_c0_ebase(val) mttc0($15, 1, val) 394 + #define write_vpe_c0_compare(val) mttc0($11, 0, val) 395 + #define read_vpe_c0_badvaddr() mftc0($8, 0) 396 + #define read_vpe_c0_epc() mftc0($14, 0) 397 + #define write_vpe_c0_epc(val) mttc0($14, 0, val) 450 398 451 399 452 400 /* TC */ 453 - #define read_tc_c0_tcstatus() mftc0(2, 1) 454 - #define write_tc_c0_tcstatus(val) mttc0(2, 1, val) 455 - #define read_tc_c0_tcbind() mftc0(2, 2) 456 - #define write_tc_c0_tcbind(val) mttc0(2, 2, val) 457 - #define read_tc_c0_tcrestart() mftc0(2, 3) 458 - #define write_tc_c0_tcrestart(val) mttc0(2, 3, val) 459 - #define read_tc_c0_tchalt() mftc0(2, 4) 460 - #define write_tc_c0_tchalt(val) mttc0(2, 4, val) 461 - #define read_tc_c0_tccontext() mftc0(2, 5) 462 - #define write_tc_c0_tccontext(val) mttc0(2, 5, val) 401 + #define read_tc_c0_tcstatus() mftc0($2, 1) 402 + #define write_tc_c0_tcstatus(val) mttc0($2, 1, val) 403 + #define read_tc_c0_tcbind() mftc0($2, 2) 404 + #define write_tc_c0_tcbind(val) mttc0($2, 2, val) 405 + #define read_tc_c0_tcrestart() mftc0($2, 3) 406 + #define write_tc_c0_tcrestart(val) mttc0($2, 3, val) 407 + #define read_tc_c0_tchalt() mftc0($2, 4) 408 + #define write_tc_c0_tchalt(val) mttc0($2, 4, val) 409 + #define read_tc_c0_tccontext() mftc0($2, 5) 410 + #define write_tc_c0_tccontext(val) mttc0($2, 5, val) 463 411 464 412 /* GPR */ 465 - #define read_tc_gpr_sp() mftgpr(29) 466 - #define write_tc_gpr_sp(val) mttgpr(29, val) 467 - #define read_tc_gpr_gp() mftgpr(28) 468 - #define write_tc_gpr_gp(val) mttgpr(28, val) 413 + #define read_tc_gpr_sp() mftgpr($29) 414 + #define write_tc_gpr_sp(val) mttgpr($29, val) 415 + #define read_tc_gpr_gp() mftgpr($28) 416 + #define write_tc_gpr_gp(val) mttgpr($28, val) 469 417 470 418 __BUILD_SET_C0(mvpcontrol) 471 419
+219 -59
arch/mips/include/asm/mipsregs.h
··· 42 42 43 43 /* 44 44 * Coprocessor 0 register names 45 + * 46 + * CP0_REGISTER variant is meant to be used in assembly code, C0_REGISTER 47 + * variant is meant to be used in C (uasm) code. 45 48 */ 46 - #define CP0_INDEX $0 47 - #define CP0_RANDOM $1 48 - #define CP0_ENTRYLO0 $2 49 - #define CP0_ENTRYLO1 $3 50 - #define CP0_CONF $3 51 - #define CP0_GLOBALNUMBER $3, 1 52 - #define CP0_CONTEXT $4 53 - #define CP0_PAGEMASK $5 54 - #define CP0_PAGEGRAIN $5, 1 55 - #define CP0_SEGCTL0 $5, 2 56 - #define CP0_SEGCTL1 $5, 3 57 - #define CP0_SEGCTL2 $5, 4 58 - #define CP0_WIRED $6 59 - #define CP0_INFO $7 60 - #define CP0_HWRENA $7 61 - #define CP0_BADVADDR $8 62 - #define CP0_BADINSTR $8, 1 63 - #define CP0_COUNT $9 64 - #define CP0_ENTRYHI $10 65 - #define CP0_GUESTCTL1 $10, 4 66 - #define CP0_GUESTCTL2 $10, 5 67 - #define CP0_GUESTCTL3 $10, 6 68 - #define CP0_COMPARE $11 69 - #define CP0_GUESTCTL0EXT $11, 4 70 - #define CP0_STATUS $12 71 - #define CP0_GUESTCTL0 $12, 6 72 - #define CP0_GTOFFSET $12, 7 73 - #define CP0_CAUSE $13 74 - #define CP0_EPC $14 75 - #define CP0_PRID $15 76 - #define CP0_EBASE $15, 1 77 - #define CP0_CMGCRBASE $15, 3 78 - #define CP0_CONFIG $16 79 - #define CP0_CONFIG3 $16, 3 80 - #define CP0_CONFIG5 $16, 5 81 - #define CP0_CONFIG6 $16, 6 82 - #define CP0_LLADDR $17 83 - #define CP0_WATCHLO $18 84 - #define CP0_WATCHHI $19 85 - #define CP0_XCONTEXT $20 86 - #define CP0_FRAMEMASK $21 87 - #define CP0_DIAGNOSTIC $22 88 - #define CP0_DIAGNOSTIC1 $22, 1 89 - #define CP0_DEBUG $23 90 - #define CP0_DEPC $24 91 - #define CP0_PERFORMANCE $25 92 - #define CP0_ECC $26 93 - #define CP0_CACHEERR $27 94 - #define CP0_TAGLO $28 95 - #define CP0_TAGHI $29 96 - #define CP0_ERROREPC $30 97 - #define CP0_DESAVE $31 49 + #define CP0_INDEX $0 50 + #define C0_INDEX 0, 0 51 + 52 + #define CP0_RANDOM $1 53 + #define C0_RANDOM 1, 0 54 + 55 + #define CP0_ENTRYLO0 $2 56 + #define C0_ENTRYLO0 2, 0 57 + 58 + #define CP0_ENTRYLO1 $3 59 + #define C0_ENTRYLO1 3, 0 60 + 61 + #define CP0_CONF $3 62 + #define C0_CONF 3, 0 63 + 64 + #define CP0_GLOBALNUMBER $3, 1 65 + #define C0_GLOBALNUMBER 3, 1 66 + 67 + #define CP0_CONTEXT $4 68 + #define C0_CONTEXT 4, 0 69 + 70 + #define CP0_PAGEMASK $5 71 + #define C0_PAGEMASK 5, 0 72 + 73 + #define CP0_PAGEGRAIN $5, 1 74 + #define C0_PAGEGRAIN 5, 1 75 + 76 + #define CP0_SEGCTL0 $5, 2 77 + #define C0_SEGCTL0 5, 2 78 + 79 + #define CP0_SEGCTL1 $5, 3 80 + #define C0_SEGCTL1 5, 3 81 + 82 + #define CP0_SEGCTL2 $5, 4 83 + #define C0_SEGCTL2 5, 4 84 + 85 + #define CP0_PWBASE $5, 5 86 + #define C0_PWBASE 5, 5 87 + 88 + #define CP0_PWFIELD $5, 6 89 + #define C0_PWFIELD 5, 6 90 + 91 + #define CP0_PWCTL $5, 7 92 + #define C0_PWCTL 5, 7 93 + 94 + #define CP0_WIRED $6 95 + #define C0_WIRED 6, 0 96 + 97 + #define CP0_INFO $7 98 + #define C0_INFO 7, 0 99 + 100 + #define CP0_HWRENA $7 101 + #define C0_HWRENA 7, 0 102 + 103 + #define CP0_BADVADDR $8 104 + #define C0_BADVADDR 8, 0 105 + 106 + #define CP0_BADINSTR $8, 1 107 + #define C0_BADINSTR 8, 1 108 + 109 + #define CP0_BADINSTRP $8, 2 110 + #define C0_BADINSTRP 8, 2 111 + 112 + #define CP0_COUNT $9 113 + #define C0_COUNT 9, 0 114 + 115 + #define CP0_PGD $9, 7 116 + #define C0_PGD 9, 7 117 + 118 + #define CP0_ENTRYHI $10 119 + #define C0_ENTRYHI 10, 0 120 + 121 + #define CP0_GUESTCTL1 $10, 4 122 + #define C0_GUESTCTL1 10, 5 123 + 124 + #define CP0_GUESTCTL2 $10, 5 125 + #define C0_GUESTCTL2 10, 5 126 + 127 + #define CP0_GUESTCTL3 $10, 6 128 + #define C0_GUESTCTL3 10, 6 129 + 130 + #define CP0_COMPARE $11 131 + #define C0_COMPARE 11, 0 132 + 133 + #define CP0_GUESTCTL0EXT $11, 4 134 + #define C0_GUESTCTL0EXT 11, 4 135 + 136 + #define CP0_STATUS $12 137 + #define C0_STATUS 12, 0 138 + 139 + #define CP0_GUESTCTL0 $12, 6 140 + #define C0_GUESTCTL0 12, 6 141 + 142 + #define CP0_GTOFFSET $12, 7 143 + #define C0_GTOFFSET 12, 7 144 + 145 + #define CP0_CAUSE $13 146 + #define C0_CAUSE 13, 0 147 + 148 + #define CP0_EPC $14 149 + #define C0_EPC 14, 0 150 + 151 + #define CP0_PRID $15 152 + #define C0_PRID 15, 0 153 + 154 + #define CP0_EBASE $15, 1 155 + #define C0_EBASE 15, 1 156 + 157 + #define CP0_CMGCRBASE $15, 3 158 + #define C0_CMGCRBASE 15, 3 159 + 160 + #define CP0_CONFIG $16 161 + #define C0_CONFIG 16, 0 162 + 163 + #define CP0_CONFIG1 $16, 1 164 + #define C0_CONFIG1 16, 1 165 + 166 + #define CP0_CONFIG2 $16, 2 167 + #define C0_CONFIG2 16, 2 168 + 169 + #define CP0_CONFIG3 $16, 3 170 + #define C0_CONFIG3 16, 3 171 + 172 + #define CP0_CONFIG4 $16, 4 173 + #define C0_CONFIG4 16, 4 174 + 175 + #define CP0_CONFIG5 $16, 5 176 + #define C0_CONFIG5 16, 5 177 + 178 + #define CP0_CONFIG6 $16, 6 179 + #define C0_CONFIG6 16, 6 180 + 181 + #define CP0_LLADDR $17 182 + #define C0_LLADDR 17, 0 183 + 184 + #define CP0_WATCHLO $18 185 + #define C0_WATCHLO 18, 0 186 + 187 + #define CP0_WATCHHI $19 188 + #define C0_WATCHHI 19, 0 189 + 190 + #define CP0_XCONTEXT $20 191 + #define C0_XCONTEXT 20, 0 192 + 193 + #define CP0_FRAMEMASK $21 194 + #define C0_FRAMEMASK 21, 0 195 + 196 + #define CP0_DIAGNOSTIC $22 197 + #define C0_DIAGNOSTIC 22, 0 198 + 199 + #define CP0_DIAGNOSTIC1 $22, 1 200 + #define C0_DIAGNOSTIC1 22, 1 201 + 202 + #define CP0_DEBUG $23 203 + #define C0_DEBUG 23, 0 204 + 205 + #define CP0_DEPC $24 206 + #define C0_DEPC 24, 0 207 + 208 + #define CP0_PERFORMANCE $25 209 + #define C0_PERFORMANCE 25, 0 210 + 211 + #define CP0_ECC $26 212 + #define C0_ECC 26, 0 213 + 214 + #define CP0_CACHEERR $27 215 + #define C0_CACHEERR 27, 0 216 + 217 + #define CP0_TAGLO $28 218 + #define C0_TAGLO 28, 0 219 + 220 + #define CP0_DTAGLO $28, 2 221 + #define C0_DTAGLO 28, 2 222 + 223 + #define CP0_DDATALO $28, 3 224 + #define C0_DDATALO 28, 3 225 + 226 + #define CP0_STAGLO $28, 4 227 + #define C0_STAGLO 28, 4 228 + 229 + #define CP0_TAGHI $29 230 + #define C0_TAGHI 29, 0 231 + 232 + #define CP0_ERROREPC $30 233 + #define C0_ERROREPC 30, 0 234 + 235 + #define CP0_DESAVE $31 236 + #define C0_DESAVE 31, 0 98 237 99 238 /* 100 239 * R4640/R4650 cp0 register names. These registers are listed ··· 429 290 #define ST0_KX 0x00000080 430 291 #define ST0_DE 0x00010000 431 292 #define ST0_CE 0x00020000 293 + 294 + #ifdef CONFIG_64BIT 295 + #define ST0_KX_IF_64 ST0_KX 296 + #else 297 + #define ST0_KX_IF_64 0 298 + #endif 432 299 433 300 /* 434 301 * Setting c0_status.co enables Hit_Writeback and Hit_Writeback_Invalidate ··· 1422 1277 */ 1423 1278 1424 1279 /* Match an individual register number and assign to \var */ 1425 - #define _IFC_REG(n) \ 1426 - ".ifc \\r, $" #n "\n\t" \ 1280 + #define _IFC_REG_NAME(name, n) \ 1281 + ".ifc \\r, $" #name "\n\t" \ 1427 1282 "\\var = " #n "\n\t" \ 1428 1283 ".endif\n\t" 1284 + 1285 + #define _IFC_REG(n) _IFC_REG_NAME(n, n) 1429 1286 1430 1287 #define _ASM_SET_PARSE_R \ 1431 1288 ".macro parse_r var r\n\t" \ ··· 1440 1293 _IFC_REG(20) _IFC_REG(21) _IFC_REG(22) _IFC_REG(23) \ 1441 1294 _IFC_REG(24) _IFC_REG(25) _IFC_REG(26) _IFC_REG(27) \ 1442 1295 _IFC_REG(28) _IFC_REG(29) _IFC_REG(30) _IFC_REG(31) \ 1296 + _IFC_REG_NAME(sp, 29) _IFC_REG_NAME(fp, 30) \ 1443 1297 ".iflt \\var\n\t" \ 1444 1298 ".error \"Unable to parse register name \\r\"\n\t" \ 1445 1299 ".endif\n\t" \ ··· 1454 1306 * register operand \<Rn> is assigned to __<Rn> where it can be accessed from 1455 1307 * the ENC encodings. 1456 1308 */ 1309 + 1310 + /* Instructions with 1 register operand */ 1311 + #define _ASM_MACRO_1R(OP, R1, ENC) \ 1312 + ".macro " #OP " " #R1 "\n\t" \ 1313 + _ASM_SET_PARSE_R \ 1314 + "parse_r __" #R1 ", \\" #R1 "\n\t" \ 1315 + ENC \ 1316 + _ASM_UNSET_PARSE_R \ 1317 + ".endm\n\t" 1457 1318 1458 1319 /* Instructions with 1 register operand & 1 immediate operand */ 1459 1320 #define _ASM_MACRO_1R1I(OP, R1, I2, ENC) \ ··· 2235 2078 _ASM_INSN_IF_MIPS(0x4200000c) \ 2236 2079 _ASM_INSN32_IF_MM(0x0000517c) 2237 2080 #else /* !TOOLCHAIN_SUPPORTS_VIRT */ 2238 - #define _ASM_SET_VIRT ".set\tvirt\n\t" 2081 + #if MIPS_ISA_REV >= 5 2082 + #define _ASM_SET_VIRT_ISA 2083 + #elif defined(CONFIG_64BIT) 2084 + #define _ASM_SET_VIRT_ISA ".set\tmips64r5\n\t" 2085 + #else 2086 + #define _ASM_SET_VIRT_ISA ".set\tmips32r5\n\t" 2087 + #endif 2088 + #define _ASM_SET_VIRT _ASM_SET_VIRT_ISA ".set\tvirt\n\t" 2239 2089 #define _ASM_SET_MFGC0 _ASM_SET_VIRT 2240 2090 #define _ASM_SET_DMFGC0 _ASM_SET_VIRT 2241 2091 #define _ASM_SET_MTGC0 _ASM_SET_VIRT ··· 2263 2099 ({ int __res; \ 2264 2100 __asm__ __volatile__( \ 2265 2101 ".set\tpush\n\t" \ 2266 - ".set\tmips32r5\n\t" \ 2267 2102 _ASM_SET_MFGC0 \ 2268 2103 "mfgc0\t%0, " #source ", %1\n\t" \ 2269 2104 _ASM_UNSET_MFGC0 \ ··· 2276 2113 ({ unsigned long long __res; \ 2277 2114 __asm__ __volatile__( \ 2278 2115 ".set\tpush\n\t" \ 2279 - ".set\tmips64r5\n\t" \ 2280 2116 _ASM_SET_DMFGC0 \ 2281 2117 "dmfgc0\t%0, " #source ", %1\n\t" \ 2282 2118 _ASM_UNSET_DMFGC0 \ ··· 2289 2127 do { \ 2290 2128 __asm__ __volatile__( \ 2291 2129 ".set\tpush\n\t" \ 2292 - ".set\tmips32r5\n\t" \ 2293 2130 _ASM_SET_MTGC0 \ 2294 2131 "mtgc0\t%z0, " #register ", %1\n\t" \ 2295 2132 _ASM_UNSET_MTGC0 \ ··· 2301 2140 do { \ 2302 2141 __asm__ __volatile__( \ 2303 2142 ".set\tpush\n\t" \ 2304 - ".set\tmips64r5\n\t" \ 2305 2143 _ASM_SET_DMTGC0 \ 2306 2144 "dmtgc0\t%z0, " #register ", %1\n\t" \ 2307 2145 _ASM_UNSET_DMTGC0 \
+91
arch/mips/include/asm/regdef.h
··· 17 17 #if _MIPS_SIM == _MIPS_SIM_ABI32 18 18 19 19 /* 20 + * General purpose register numbers for 32 bit ABI 21 + */ 22 + #define GPR_ZERO 0 /* wired zero */ 23 + #define GPR_AT 1 /* assembler temp */ 24 + #define GPR_V0 2 /* return value */ 25 + #define GPR_V1 3 26 + #define GPR_A0 4 /* argument registers */ 27 + #define GPR_A1 5 28 + #define GPR_A2 6 29 + #define GPR_A3 7 30 + #define GPR_T0 8 /* caller saved */ 31 + #define GPR_T1 9 32 + #define GPR_T2 10 33 + #define GPR_T3 11 34 + #define GPR_T4 12 35 + #define GPR_TA0 12 36 + #define GPR_T5 13 37 + #define GPR_TA1 13 38 + #define GPR_T6 14 39 + #define GPR_TA2 14 40 + #define GPR_T7 15 41 + #define GPR_TA3 15 42 + #define GPR_S0 16 /* callee saved */ 43 + #define GPR_S1 17 44 + #define GPR_S2 18 45 + #define GPR_S3 19 46 + #define GPR_S4 20 47 + #define GPR_S5 21 48 + #define GPR_S6 22 49 + #define GPR_S7 23 50 + #define GPR_T8 24 /* caller saved */ 51 + #define GPR_T9 25 52 + #define GPR_JP 25 /* PIC jump register */ 53 + #define GPR_K0 26 /* kernel scratch */ 54 + #define GPR_K1 27 55 + #define GPR_GP 28 /* global pointer */ 56 + #define GPR_SP 29 /* stack pointer */ 57 + #define GPR_FP 30 /* frame pointer */ 58 + #define GPR_S8 30 /* same like fp! */ 59 + #define GPR_RA 31 /* return address */ 60 + 61 + #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ 62 + 63 + #if _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 64 + 65 + #define GPR_ZERO 0 /* wired zero */ 66 + #define GPR_AT 1 /* assembler temp */ 67 + #define GPR_V0 2 /* return value - caller saved */ 68 + #define GPR_V1 3 69 + #define GPR_A0 4 /* argument registers */ 70 + #define GPR_A1 5 71 + #define GPR_A2 6 72 + #define GPR_A3 7 73 + #define GPR_A4 8 /* arg reg 64 bit; caller saved in 32 bit */ 74 + #define GPR_TA0 8 75 + #define GPR_A5 9 76 + #define GPR_TA1 9 77 + #define GPR_A6 10 78 + #define GPR_TA2 10 79 + #define GPR_A7 11 80 + #define GPR_TA3 11 81 + #define GPR_T0 12 /* caller saved */ 82 + #define GPR_T1 13 83 + #define GPR_T2 14 84 + #define GPR_T3 15 85 + #define GPR_S0 16 /* callee saved */ 86 + #define GPR_S1 17 87 + #define GPR_S2 18 88 + #define GPR_S3 19 89 + #define GPR_S4 20 90 + #define GPR_S5 21 91 + #define GPR_S6 22 92 + #define GPR_S7 23 93 + #define GPR_T8 24 /* caller saved */ 94 + #define GPR_T9 25 /* callee address for PIC/temp */ 95 + #define GPR_JP 25 /* PIC jump register */ 96 + #define GPR_K0 26 /* kernel temporary */ 97 + #define GPR_K1 27 98 + #define GPR_GP 28 /* global pointer - caller saved for PIC */ 99 + #define GPR_SP 29 /* stack pointer */ 100 + #define GPR_FP 30 /* frame pointer */ 101 + #define GPR_S8 30 /* callee saved */ 102 + #define GPR_RA 31 /* return address */ 103 + 104 + #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 */ 105 + 106 + #ifdef __ASSEMBLY__ 107 + #if _MIPS_SIM == _MIPS_SIM_ABI32 108 + 109 + /* 20 110 * Symbolic register names for 32 bit ABI 21 111 */ 22 112 #define zero $0 /* wired zero */ ··· 192 102 #define ra $31 /* return address */ 193 103 194 104 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 */ 105 + #endif /* __ASSEMBLY__ */ 195 106 196 107 #endif /* _ASM_REGDEF_H */
+7 -2
arch/mips/include/asm/smp-cps.h
··· 24 24 25 25 extern struct core_boot_config *mips_cps_core_bootcfg; 26 26 27 - extern void mips_cps_core_entry(void); 27 + extern void mips_cps_core_boot(int cca, void __iomem *gcr_base); 28 28 extern void mips_cps_core_init(void); 29 29 30 30 extern void mips_cps_boot_vpes(struct core_boot_config *cfg, unsigned vpe); ··· 32 32 extern void mips_cps_pm_save(void); 33 33 extern void mips_cps_pm_restore(void); 34 34 35 - extern void *mips_cps_core_entry_patch_end; 35 + extern void excep_tlbfill(void); 36 + extern void excep_xtlbfill(void); 37 + extern void excep_cache(void); 38 + extern void excep_genex(void); 39 + extern void excep_intex(void); 40 + extern void excep_ejtag(void); 36 41 37 42 #ifdef CONFIG_MIPS_CPS 38 43
+11 -43
arch/mips/kernel/cps-vec.S
··· 4 4 * Author: Paul Burton <paul.burton@mips.com> 5 5 */ 6 6 7 + #include <linux/init.h> 7 8 #include <asm/addrspace.h> 8 9 #include <asm/asm.h> 9 10 #include <asm/asm-offsets.h> ··· 83 82 .endm 84 83 85 84 86 - .balign 0x1000 87 - 88 - LEAF(mips_cps_core_entry) 89 - /* 90 - * These first several instructions will be patched by cps_smp_setup to load the 91 - * CCA to use into register s0 and GCR base address to register s1. 92 - */ 93 - .rept CPS_ENTRY_PATCH_INSNS 94 - nop 95 - .endr 96 - 97 - .global mips_cps_core_entry_patch_end 98 - mips_cps_core_entry_patch_end: 99 - 100 - /* Check whether we're here due to an NMI */ 101 - mfc0 k0, CP0_STATUS 102 - and k0, k0, ST0_NMI 103 - beqz k0, not_nmi 104 - nop 105 - 106 - /* This is an NMI */ 107 - PTR_LA k0, nmi_handler 108 - jr k0 109 - nop 110 - 111 - not_nmi: 112 - /* Setup Cause */ 113 - li t0, CAUSEF_IV 114 - mtc0 t0, CP0_CAUSE 115 - 116 - /* Setup Status */ 117 - li t0, ST0_CU1 | ST0_CU0 | ST0_BEV | STATUS_BITDEPS 118 - mtc0 t0, CP0_STATUS 85 + LEAF(mips_cps_core_boot) 86 + /* Save CCA and GCR base */ 87 + move s0, a0 88 + move s1, a1 119 89 120 90 /* We don't know how to do coherence setup on earlier ISA */ 121 91 #if MIPS_ISA_REV > 0 ··· 150 178 PTR_L sp, VPEBOOTCFG_SP(v1) 151 179 jr t1 152 180 nop 153 - END(mips_cps_core_entry) 181 + END(mips_cps_core_boot) 154 182 155 - .org 0x200 183 + __INIT 156 184 LEAF(excep_tlbfill) 157 185 DUMP_EXCEP("TLB Fill") 158 186 b . 159 187 nop 160 188 END(excep_tlbfill) 161 189 162 - .org 0x280 163 190 LEAF(excep_xtlbfill) 164 191 DUMP_EXCEP("XTLB Fill") 165 192 b . 166 193 nop 167 194 END(excep_xtlbfill) 168 195 169 - .org 0x300 170 196 LEAF(excep_cache) 171 197 DUMP_EXCEP("Cache") 172 198 b . 173 199 nop 174 200 END(excep_cache) 175 201 176 - .org 0x380 177 202 LEAF(excep_genex) 178 203 DUMP_EXCEP("General") 179 204 b . 180 205 nop 181 206 END(excep_genex) 182 207 183 - .org 0x400 184 208 LEAF(excep_intex) 185 209 DUMP_EXCEP("Interrupt") 186 210 b . 187 211 nop 188 212 END(excep_intex) 189 213 190 - .org 0x480 191 214 LEAF(excep_ejtag) 192 215 PTR_LA k0, ejtag_debug_handler 193 216 jr k0 194 217 nop 195 218 END(excep_ejtag) 219 + __FINIT 196 220 197 221 LEAF(mips_cps_core_init) 198 222 #ifdef CONFIG_MIPS_MT_SMP ··· 396 428 /* Calculate a pointer to the VPEs struct vpe_boot_config */ 397 429 li t0, VPEBOOTCFG_SIZE 398 430 mul t0, t0, ta1 399 - addu t0, t0, ta3 431 + PTR_ADDU t0, t0, ta3 400 432 401 433 /* Set the TC restart PC */ 402 434 lw t1, VPEBOOTCFG_PC(t0) ··· 571 603 lw $1, TI_CPU(gp) 572 604 sll $1, $1, LONGLOG 573 605 PTR_LA \dest, __per_cpu_offset 574 - addu $1, $1, \dest 606 + PTR_ADDU $1, $1, \dest 575 607 lw $1, 0($1) 576 608 PTR_LA \dest, cps_cpu_state 577 - addu \dest, \dest, $1 609 + PTR_ADDU \dest, \dest, $1 578 610 .set pop 579 611 .endm 580 612
+2 -8
arch/mips/kernel/mips-cm.c
··· 179 179 static DEFINE_PER_CPU_ALIGNED(spinlock_t, cm_core_lock); 180 180 static DEFINE_PER_CPU_ALIGNED(unsigned long, cm_core_lock_flags); 181 181 182 - phys_addr_t __mips_cm_phys_base(void) 182 + phys_addr_t __weak mips_cm_phys_base(void) 183 183 { 184 184 unsigned long cmgcr; 185 185 ··· 198 198 return (cmgcr & MIPS_CMGCRF_BASE) << (36 - 32); 199 199 } 200 200 201 - phys_addr_t mips_cm_phys_base(void) 202 - __attribute__((weak, alias("__mips_cm_phys_base"))); 203 - 204 - static phys_addr_t __mips_cm_l2sync_phys_base(void) 201 + phys_addr_t __weak mips_cm_l2sync_phys_base(void) 205 202 { 206 203 u32 base_reg; 207 204 ··· 213 216 /* Default to following the CM */ 214 217 return mips_cm_phys_base() + MIPS_CM_GCR_SIZE; 215 218 } 216 - 217 - phys_addr_t mips_cm_l2sync_phys_base(void) 218 - __attribute__((weak, alias("__mips_cm_l2sync_phys_base"))); 219 219 220 220 static void mips_cm_probe_l2sync(void) 221 221 {
+4 -10
arch/mips/kernel/mips-mt.c
··· 229 229 } 230 230 } 231 231 232 - struct class *mt_class; 232 + const struct class mt_class = { 233 + .name = "mt", 234 + }; 233 235 234 236 static int __init mips_mt_init(void) 235 237 { 236 - struct class *mtc; 237 - 238 - mtc = class_create("mt"); 239 - if (IS_ERR(mtc)) 240 - return PTR_ERR(mtc); 241 - 242 - mt_class = mtc; 243 - 244 - return 0; 238 + return class_register(&mt_class); 245 239 } 246 240 247 241 subsys_initcall(mips_mt_init);
+64 -70
arch/mips/kernel/pm-cps.c
··· 18 18 #include <asm/mipsmtregs.h> 19 19 #include <asm/pm.h> 20 20 #include <asm/pm-cps.h> 21 + #include <asm/regdef.h> 21 22 #include <asm/smp-cps.h> 22 23 #include <asm/uasm.h> 23 24 ··· 69 68 /* A somewhat arbitrary number of labels & relocs for uasm */ 70 69 static struct uasm_label labels[32]; 71 70 static struct uasm_reloc relocs[32]; 72 - 73 - enum mips_reg { 74 - zero, at, v0, v1, a0, a1, a2, a3, 75 - t0, t1, t2, t3, t4, t5, t6, t7, 76 - s0, s1, s2, s3, s4, s5, s6, s7, 77 - t8, t9, k0, k1, gp, sp, fp, ra, 78 - }; 79 71 80 72 bool cps_pm_support_state(enum cps_pm_state state) 81 73 { ··· 197 203 return; 198 204 199 205 /* Load base address */ 200 - UASM_i_LA(pp, t0, (long)CKSEG0); 206 + UASM_i_LA(pp, GPR_T0, (long)CKSEG0); 201 207 202 208 /* Calculate end address */ 203 209 if (cache_size < 0x8000) 204 - uasm_i_addiu(pp, t1, t0, cache_size); 210 + uasm_i_addiu(pp, GPR_T1, GPR_T0, cache_size); 205 211 else 206 - UASM_i_LA(pp, t1, (long)(CKSEG0 + cache_size)); 212 + UASM_i_LA(pp, GPR_T1, (long)(CKSEG0 + cache_size)); 207 213 208 214 /* Start of cache op loop */ 209 215 uasm_build_label(pl, *pp, lbl); ··· 211 217 /* Generate the cache ops */ 212 218 for (i = 0; i < unroll_lines; i++) { 213 219 if (cpu_has_mips_r6) { 214 - uasm_i_cache(pp, op, 0, t0); 215 - uasm_i_addiu(pp, t0, t0, cache->linesz); 220 + uasm_i_cache(pp, op, 0, GPR_T0); 221 + uasm_i_addiu(pp, GPR_T0, GPR_T0, cache->linesz); 216 222 } else { 217 - uasm_i_cache(pp, op, i * cache->linesz, t0); 223 + uasm_i_cache(pp, op, i * cache->linesz, GPR_T0); 218 224 } 219 225 } 220 226 221 227 if (!cpu_has_mips_r6) 222 228 /* Update the base address */ 223 - uasm_i_addiu(pp, t0, t0, unroll_lines * cache->linesz); 229 + uasm_i_addiu(pp, GPR_T0, GPR_T0, unroll_lines * cache->linesz); 224 230 225 231 /* Loop if we haven't reached the end address yet */ 226 - uasm_il_bne(pp, pr, t0, t1, lbl); 232 + uasm_il_bne(pp, pr, GPR_T0, GPR_T1, lbl); 227 233 uasm_i_nop(pp); 228 234 } 229 235 ··· 269 275 */ 270 276 271 277 /* Preserve perf counter setup */ 272 - uasm_i_mfc0(pp, t2, 25, (perf_counter * 2) + 0); /* PerfCtlN */ 273 - uasm_i_mfc0(pp, t3, 25, (perf_counter * 2) + 1); /* PerfCntN */ 278 + uasm_i_mfc0(pp, GPR_T2, 25, (perf_counter * 2) + 0); /* PerfCtlN */ 279 + uasm_i_mfc0(pp, GPR_T3, 25, (perf_counter * 2) + 1); /* PerfCntN */ 274 280 275 281 /* Setup perf counter to count FSB full pipeline stalls */ 276 - uasm_i_addiu(pp, t0, zero, (perf_event << 5) | 0xf); 277 - uasm_i_mtc0(pp, t0, 25, (perf_counter * 2) + 0); /* PerfCtlN */ 282 + uasm_i_addiu(pp, GPR_T0, GPR_ZERO, (perf_event << 5) | 0xf); 283 + uasm_i_mtc0(pp, GPR_T0, 25, (perf_counter * 2) + 0); /* PerfCtlN */ 278 284 uasm_i_ehb(pp); 279 - uasm_i_mtc0(pp, zero, 25, (perf_counter * 2) + 1); /* PerfCntN */ 285 + uasm_i_mtc0(pp, GPR_ZERO, 25, (perf_counter * 2) + 1); /* PerfCntN */ 280 286 uasm_i_ehb(pp); 281 287 282 288 /* Base address for loads */ 283 - UASM_i_LA(pp, t0, (long)CKSEG0); 289 + UASM_i_LA(pp, GPR_T0, (long)CKSEG0); 284 290 285 291 /* Start of clear loop */ 286 292 uasm_build_label(pl, *pp, lbl); 287 293 288 294 /* Perform some loads to fill the FSB */ 289 295 for (i = 0; i < num_loads; i++) 290 - uasm_i_lw(pp, zero, i * line_size * line_stride, t0); 296 + uasm_i_lw(pp, GPR_ZERO, i * line_size * line_stride, GPR_T0); 291 297 292 298 /* 293 299 * Invalidate the new D-cache entries so that the cache will need ··· 295 301 */ 296 302 for (i = 0; i < num_loads; i++) { 297 303 uasm_i_cache(pp, Hit_Invalidate_D, 298 - i * line_size * line_stride, t0); 304 + i * line_size * line_stride, GPR_T0); 299 305 uasm_i_cache(pp, Hit_Writeback_Inv_SD, 300 - i * line_size * line_stride, t0); 306 + i * line_size * line_stride, GPR_T0); 301 307 } 302 308 303 309 /* Barrier ensuring previous cache invalidates are complete */ ··· 305 311 uasm_i_ehb(pp); 306 312 307 313 /* Check whether the pipeline stalled due to the FSB being full */ 308 - uasm_i_mfc0(pp, t1, 25, (perf_counter * 2) + 1); /* PerfCntN */ 314 + uasm_i_mfc0(pp, GPR_T1, 25, (perf_counter * 2) + 1); /* PerfCntN */ 309 315 310 316 /* Loop if it didn't */ 311 - uasm_il_beqz(pp, pr, t1, lbl); 317 + uasm_il_beqz(pp, pr, GPR_T1, lbl); 312 318 uasm_i_nop(pp); 313 319 314 320 /* Restore perf counter 1. The count may well now be wrong... */ 315 - uasm_i_mtc0(pp, t2, 25, (perf_counter * 2) + 0); /* PerfCtlN */ 321 + uasm_i_mtc0(pp, GPR_T2, 25, (perf_counter * 2) + 0); /* PerfCtlN */ 316 322 uasm_i_ehb(pp); 317 - uasm_i_mtc0(pp, t3, 25, (perf_counter * 2) + 1); /* PerfCntN */ 323 + uasm_i_mtc0(pp, GPR_T3, 25, (perf_counter * 2) + 1); /* PerfCntN */ 318 324 uasm_i_ehb(pp); 319 325 320 326 return 0; ··· 324 330 struct uasm_reloc **pr, 325 331 unsigned r_addr, int lbl) 326 332 { 327 - uasm_i_lui(pp, t0, uasm_rel_hi(0x80000000)); 333 + uasm_i_lui(pp, GPR_T0, uasm_rel_hi(0x80000000)); 328 334 uasm_build_label(pl, *pp, lbl); 329 - uasm_i_ll(pp, t1, 0, r_addr); 330 - uasm_i_or(pp, t1, t1, t0); 331 - uasm_i_sc(pp, t1, 0, r_addr); 332 - uasm_il_beqz(pp, pr, t1, lbl); 335 + uasm_i_ll(pp, GPR_T1, 0, r_addr); 336 + uasm_i_or(pp, GPR_T1, GPR_T1, GPR_T0); 337 + uasm_i_sc(pp, GPR_T1, 0, r_addr); 338 + uasm_il_beqz(pp, pr, GPR_T1, lbl); 333 339 uasm_i_nop(pp); 334 340 } 335 341 ··· 338 344 struct uasm_label *l = labels; 339 345 struct uasm_reloc *r = relocs; 340 346 u32 *buf, *p; 341 - const unsigned r_online = a0; 342 - const unsigned r_nc_count = a1; 343 - const unsigned r_pcohctl = t7; 347 + const unsigned r_online = GPR_A0; 348 + const unsigned r_nc_count = GPR_A1; 349 + const unsigned r_pcohctl = GPR_T8; 344 350 const unsigned max_instrs = 256; 345 351 unsigned cpc_cmd; 346 352 int err; ··· 377 383 * with the return address placed in v0 to avoid clobbering 378 384 * the ra register before it is saved. 379 385 */ 380 - UASM_i_LA(&p, t0, (long)mips_cps_pm_save); 381 - uasm_i_jalr(&p, v0, t0); 386 + UASM_i_LA(&p, GPR_T0, (long)mips_cps_pm_save); 387 + uasm_i_jalr(&p, GPR_V0, GPR_T0); 382 388 uasm_i_nop(&p); 383 389 } 384 390 ··· 393 399 /* Increment ready_count */ 394 400 uasm_i_sync(&p, __SYNC_mb); 395 401 uasm_build_label(&l, p, lbl_incready); 396 - uasm_i_ll(&p, t1, 0, r_nc_count); 397 - uasm_i_addiu(&p, t2, t1, 1); 398 - uasm_i_sc(&p, t2, 0, r_nc_count); 399 - uasm_il_beqz(&p, &r, t2, lbl_incready); 400 - uasm_i_addiu(&p, t1, t1, 1); 402 + uasm_i_ll(&p, GPR_T1, 0, r_nc_count); 403 + uasm_i_addiu(&p, GPR_T2, GPR_T1, 1); 404 + uasm_i_sc(&p, GPR_T2, 0, r_nc_count); 405 + uasm_il_beqz(&p, &r, GPR_T2, lbl_incready); 406 + uasm_i_addiu(&p, GPR_T1, GPR_T1, 1); 401 407 402 408 /* Barrier ensuring all CPUs see the updated r_nc_count value */ 403 409 uasm_i_sync(&p, __SYNC_mb); ··· 406 412 * If this is the last VPE to become ready for non-coherence 407 413 * then it should branch below. 408 414 */ 409 - uasm_il_beq(&p, &r, t1, r_online, lbl_disable_coherence); 415 + uasm_il_beq(&p, &r, GPR_T1, r_online, lbl_disable_coherence); 410 416 uasm_i_nop(&p); 411 417 412 418 if (state < CPS_PM_POWER_GATED) { ··· 416 422 * has been disabled before proceeding, which it will do 417 423 * by polling for the top bit of ready_count being set. 418 424 */ 419 - uasm_i_addiu(&p, t1, zero, -1); 425 + uasm_i_addiu(&p, GPR_T1, GPR_ZERO, -1); 420 426 uasm_build_label(&l, p, lbl_poll_cont); 421 - uasm_i_lw(&p, t0, 0, r_nc_count); 422 - uasm_il_bltz(&p, &r, t0, lbl_secondary_cont); 427 + uasm_i_lw(&p, GPR_T0, 0, r_nc_count); 428 + uasm_il_bltz(&p, &r, GPR_T0, lbl_secondary_cont); 423 429 uasm_i_ehb(&p); 424 430 if (cpu_has_mipsmt) 425 - uasm_i_yield(&p, zero, t1); 431 + uasm_i_yield(&p, GPR_ZERO, GPR_T1); 426 432 uasm_il_b(&p, &r, lbl_poll_cont); 427 433 uasm_i_nop(&p); 428 434 } else { ··· 432 438 */ 433 439 if (cpu_has_mipsmt) { 434 440 /* Halt the VPE via C0 tchalt register */ 435 - uasm_i_addiu(&p, t0, zero, TCHALT_H); 436 - uasm_i_mtc0(&p, t0, 2, 4); 441 + uasm_i_addiu(&p, GPR_T0, GPR_ZERO, TCHALT_H); 442 + uasm_i_mtc0(&p, GPR_T0, 2, 4); 437 443 } else if (cpu_has_vp) { 438 444 /* Halt the VP via the CPC VP_STOP register */ 439 445 unsigned int vpe_id; 440 446 441 447 vpe_id = cpu_vpe_id(&cpu_data[cpu]); 442 - uasm_i_addiu(&p, t0, zero, 1 << vpe_id); 443 - UASM_i_LA(&p, t1, (long)addr_cpc_cl_vp_stop()); 444 - uasm_i_sw(&p, t0, 0, t1); 448 + uasm_i_addiu(&p, GPR_T0, GPR_ZERO, 1 << vpe_id); 449 + UASM_i_LA(&p, GPR_T1, (long)addr_cpc_cl_vp_stop()); 450 + uasm_i_sw(&p, GPR_T0, 0, GPR_T1); 445 451 } else { 446 452 BUG(); 447 453 } ··· 476 482 * defined by the interAptiv & proAptiv SUMs as ensuring that the 477 483 * operation resulting from the preceding store is complete. 478 484 */ 479 - uasm_i_addiu(&p, t0, zero, 1 << cpu_core(&cpu_data[cpu])); 480 - uasm_i_sw(&p, t0, 0, r_pcohctl); 481 - uasm_i_lw(&p, t0, 0, r_pcohctl); 485 + uasm_i_addiu(&p, GPR_T0, GPR_ZERO, 1 << cpu_core(&cpu_data[cpu])); 486 + uasm_i_sw(&p, GPR_T0, 0, r_pcohctl); 487 + uasm_i_lw(&p, GPR_T0, 0, r_pcohctl); 482 488 483 489 /* Barrier to ensure write to coherence control is complete */ 484 490 uasm_i_sync(&p, __SYNC_full); ··· 486 492 } 487 493 488 494 /* Disable coherence */ 489 - uasm_i_sw(&p, zero, 0, r_pcohctl); 490 - uasm_i_lw(&p, t0, 0, r_pcohctl); 495 + uasm_i_sw(&p, GPR_ZERO, 0, r_pcohctl); 496 + uasm_i_lw(&p, GPR_T0, 0, r_pcohctl); 491 497 492 498 if (state >= CPS_PM_CLOCK_GATED) { 493 499 err = cps_gen_flush_fsb(&p, &l, &r, &cpu_data[cpu], ··· 509 515 } 510 516 511 517 /* Issue the CPC command */ 512 - UASM_i_LA(&p, t0, (long)addr_cpc_cl_cmd()); 513 - uasm_i_addiu(&p, t1, zero, cpc_cmd); 514 - uasm_i_sw(&p, t1, 0, t0); 518 + UASM_i_LA(&p, GPR_T0, (long)addr_cpc_cl_cmd()); 519 + uasm_i_addiu(&p, GPR_T1, GPR_ZERO, cpc_cmd); 520 + uasm_i_sw(&p, GPR_T1, 0, GPR_T0); 515 521 516 522 if (state == CPS_PM_POWER_GATED) { 517 523 /* If anything goes wrong just hang */ ··· 558 564 * will run this. The first will actually re-enable coherence & the 559 565 * rest will just be performing a rather unusual nop. 560 566 */ 561 - uasm_i_addiu(&p, t0, zero, mips_cm_revision() < CM_REV_CM3 567 + uasm_i_addiu(&p, GPR_T0, GPR_ZERO, mips_cm_revision() < CM_REV_CM3 562 568 ? CM_GCR_Cx_COHERENCE_COHDOMAINEN 563 569 : CM3_GCR_Cx_COHERENCE_COHEN); 564 570 565 - uasm_i_sw(&p, t0, 0, r_pcohctl); 566 - uasm_i_lw(&p, t0, 0, r_pcohctl); 571 + uasm_i_sw(&p, GPR_T0, 0, r_pcohctl); 572 + uasm_i_lw(&p, GPR_T0, 0, r_pcohctl); 567 573 568 574 /* Barrier to ensure write to coherence control is complete */ 569 575 uasm_i_sync(&p, __SYNC_full); ··· 573 579 /* Decrement ready_count */ 574 580 uasm_build_label(&l, p, lbl_decready); 575 581 uasm_i_sync(&p, __SYNC_mb); 576 - uasm_i_ll(&p, t1, 0, r_nc_count); 577 - uasm_i_addiu(&p, t2, t1, -1); 578 - uasm_i_sc(&p, t2, 0, r_nc_count); 579 - uasm_il_beqz(&p, &r, t2, lbl_decready); 580 - uasm_i_andi(&p, v0, t1, (1 << fls(smp_num_siblings)) - 1); 582 + uasm_i_ll(&p, GPR_T1, 0, r_nc_count); 583 + uasm_i_addiu(&p, GPR_T2, GPR_T1, -1); 584 + uasm_i_sc(&p, GPR_T2, 0, r_nc_count); 585 + uasm_il_beqz(&p, &r, GPR_T2, lbl_decready); 586 + uasm_i_andi(&p, GPR_V0, GPR_T1, (1 << fls(smp_num_siblings)) - 1); 581 587 582 588 /* Barrier ensuring all CPUs see the updated r_nc_count value */ 583 589 uasm_i_sync(&p, __SYNC_mb); ··· 606 612 } 607 613 608 614 /* The core is coherent, time to return to C code */ 609 - uasm_i_jr(&p, ra); 615 + uasm_i_jr(&p, GPR_RA); 610 616 uasm_i_nop(&p); 611 617 612 618 gen_done:
+4 -4
arch/mips/kernel/rtlx-mt.c
··· 95 95 atomic_set(&channel_wqs[i].in_open, 0); 96 96 mutex_init(&channel_wqs[i].mutex); 97 97 98 - dev = device_create(mt_class, NULL, MKDEV(major, i), NULL, 98 + dev = device_create(&mt_class, NULL, MKDEV(major, i), NULL, 99 99 "%s%d", RTLX_MODULE_NAME, i); 100 100 if (IS_ERR(dev)) { 101 101 while (i--) 102 - device_destroy(mt_class, MKDEV(major, i)); 102 + device_destroy(&mt_class, MKDEV(major, i)); 103 103 104 104 err = PTR_ERR(dev); 105 105 goto out_chrdev; ··· 127 127 128 128 out_class: 129 129 for (i = 0; i < RTLX_CHANNELS; i++) 130 - device_destroy(mt_class, MKDEV(major, i)); 130 + device_destroy(&mt_class, MKDEV(major, i)); 131 131 out_chrdev: 132 132 unregister_chrdev(major, RTLX_MODULE_NAME); 133 133 ··· 139 139 int i; 140 140 141 141 for (i = 0; i < RTLX_CHANNELS; i++) 142 - device_destroy(mt_class, MKDEV(major, i)); 142 + device_destroy(&mt_class, MKDEV(major, i)); 143 143 144 144 unregister_chrdev(major, RTLX_MODULE_NAME); 145 145
+117 -24
arch/mips/kernel/smp-cps.c
··· 7 7 #include <linux/cpu.h> 8 8 #include <linux/delay.h> 9 9 #include <linux/io.h> 10 + #include <linux/memblock.h> 10 11 #include <linux/sched/task_stack.h> 11 12 #include <linux/sched/hotplug.h> 12 13 #include <linux/slab.h> ··· 21 20 #include <asm/mipsregs.h> 22 21 #include <asm/pm-cps.h> 23 22 #include <asm/r4kcache.h> 23 + #include <asm/regdef.h> 24 24 #include <asm/smp.h> 25 25 #include <asm/smp-cps.h> 26 26 #include <asm/time.h> 27 27 #include <asm/uasm.h> 28 28 29 + #define BEV_VEC_SIZE 0x500 30 + #define BEV_VEC_ALIGN 0x1000 31 + 32 + enum label_id { 33 + label_not_nmi = 1, 34 + }; 35 + 36 + UASM_L_LA(_not_nmi) 37 + 29 38 static DECLARE_BITMAP(core_power, NR_CPUS); 39 + static uint32_t core_entry_reg; 40 + static phys_addr_t cps_vec_pa; 30 41 31 42 struct core_boot_config *mips_cps_core_bootcfg; 32 43 ··· 47 34 return min(smp_max_threads, mips_cps_numvps(cluster, core)); 48 35 } 49 36 37 + static void __init *mips_cps_build_core_entry(void *addr) 38 + { 39 + extern void (*nmi_handler)(void); 40 + u32 *p = addr; 41 + u32 val; 42 + struct uasm_label labels[2]; 43 + struct uasm_reloc relocs[2]; 44 + struct uasm_label *l = labels; 45 + struct uasm_reloc *r = relocs; 46 + 47 + memset(labels, 0, sizeof(labels)); 48 + memset(relocs, 0, sizeof(relocs)); 49 + 50 + uasm_i_mfc0(&p, GPR_K0, C0_STATUS); 51 + UASM_i_LA(&p, GPR_T9, ST0_NMI); 52 + uasm_i_and(&p, GPR_K0, GPR_K0, GPR_T9); 53 + 54 + uasm_il_bnez(&p, &r, GPR_K0, label_not_nmi); 55 + uasm_i_nop(&p); 56 + UASM_i_LA(&p, GPR_K0, (long)&nmi_handler); 57 + 58 + uasm_l_not_nmi(&l, p); 59 + 60 + val = CAUSEF_IV; 61 + uasm_i_lui(&p, GPR_K0, val >> 16); 62 + uasm_i_ori(&p, GPR_K0, GPR_K0, val & 0xffff); 63 + uasm_i_mtc0(&p, GPR_K0, C0_CAUSE); 64 + val = ST0_CU1 | ST0_CU0 | ST0_BEV | ST0_KX_IF_64; 65 + uasm_i_lui(&p, GPR_K0, val >> 16); 66 + uasm_i_ori(&p, GPR_K0, GPR_K0, val & 0xffff); 67 + uasm_i_mtc0(&p, GPR_K0, C0_STATUS); 68 + uasm_i_ehb(&p); 69 + uasm_i_ori(&p, GPR_A0, 0, read_c0_config() & CONF_CM_CMASK); 70 + UASM_i_LA(&p, GPR_A1, (long)mips_gcr_base); 71 + #if defined(KBUILD_64BIT_SYM32) || defined(CONFIG_32BIT) 72 + UASM_i_LA(&p, GPR_T9, CKSEG1ADDR(__pa_symbol(mips_cps_core_boot))); 73 + #else 74 + UASM_i_LA(&p, GPR_T9, TO_UNCAC(__pa_symbol(mips_cps_core_boot))); 75 + #endif 76 + uasm_i_jr(&p, GPR_T9); 77 + uasm_i_nop(&p); 78 + 79 + uasm_resolve_relocs(relocs, labels); 80 + 81 + return p; 82 + } 83 + 84 + static int __init allocate_cps_vecs(void) 85 + { 86 + /* Try to allocate in KSEG1 first */ 87 + cps_vec_pa = memblock_phys_alloc_range(BEV_VEC_SIZE, BEV_VEC_ALIGN, 88 + 0x0, CSEGX_SIZE - 1); 89 + 90 + if (cps_vec_pa) 91 + core_entry_reg = CKSEG1ADDR(cps_vec_pa) & 92 + CM_GCR_Cx_RESET_BASE_BEVEXCBASE; 93 + 94 + if (!cps_vec_pa && mips_cm_is64) { 95 + cps_vec_pa = memblock_phys_alloc_range(BEV_VEC_SIZE, BEV_VEC_ALIGN, 96 + 0x0, SZ_4G - 1); 97 + if (cps_vec_pa) 98 + core_entry_reg = (cps_vec_pa & CM_GCR_Cx_RESET_BASE_BEVEXCBASE) | 99 + CM_GCR_Cx_RESET_BASE_MODE; 100 + } 101 + 102 + if (!cps_vec_pa) 103 + return -ENOMEM; 104 + 105 + return 0; 106 + } 107 + 108 + static void __init setup_cps_vecs(void) 109 + { 110 + void *cps_vec; 111 + 112 + cps_vec = (void *)CKSEG1ADDR_OR_64BIT(cps_vec_pa); 113 + mips_cps_build_core_entry(cps_vec); 114 + 115 + memcpy(cps_vec + 0x200, &excep_tlbfill, 0x80); 116 + memcpy(cps_vec + 0x280, &excep_xtlbfill, 0x80); 117 + memcpy(cps_vec + 0x300, &excep_cache, 0x80); 118 + memcpy(cps_vec + 0x380, &excep_genex, 0x80); 119 + memcpy(cps_vec + 0x400, &excep_intex, 0x80); 120 + memcpy(cps_vec + 0x480, &excep_ejtag, 0x80); 121 + 122 + /* Make sure no prefetched data in cache */ 123 + blast_inv_dcache_range(CKSEG0ADDR_OR_64BIT(cps_vec_pa), CKSEG0ADDR_OR_64BIT(cps_vec_pa) + BEV_VEC_SIZE); 124 + bc_inv(CKSEG0ADDR_OR_64BIT(cps_vec_pa), BEV_VEC_SIZE); 125 + __sync(); 126 + } 127 + 50 128 static void __init cps_smp_setup(void) 51 129 { 52 130 unsigned int nclusters, ncores, nvpes, core_vpes; 53 - unsigned long core_entry; 54 131 int cl, c, v; 55 132 56 133 /* Detect & record VPE topology */ ··· 197 94 /* Make core 0 coherent with everything */ 198 95 write_gcr_cl_coherence(0xff); 199 96 200 - if (mips_cm_revision() >= CM_REV_CM3) { 201 - core_entry = CKSEG1ADDR((unsigned long)mips_cps_core_entry); 202 - write_gcr_bev_base(core_entry); 203 - } 97 + if (allocate_cps_vecs()) 98 + pr_err("Failed to allocate CPS vectors\n"); 99 + 100 + if (core_entry_reg && mips_cm_revision() >= CM_REV_CM3) 101 + write_gcr_bev_base(core_entry_reg); 204 102 205 103 #ifdef CONFIG_MIPS_MT_FPAFF 206 104 /* If we have an FPU, enroll ourselves in the FPU-full mask */ ··· 214 110 { 215 111 unsigned ncores, core_vpes, c, cca; 216 112 bool cca_unsuitable, cores_limited; 217 - u32 *entry_code; 218 113 219 114 mips_mt_set_cpuoptions(); 115 + 116 + if (!core_entry_reg) { 117 + pr_err("core_entry address unsuitable, disabling smp-cps\n"); 118 + goto err_out; 119 + } 220 120 221 121 /* Detect whether the CCA is unsuited to multi-core SMP */ 222 122 cca = read_c0_config() & CONF_CM_CMASK; ··· 253 145 (cca_unsuitable && cpu_has_dc_aliases) ? " & " : "", 254 146 cpu_has_dc_aliases ? "dcache aliasing" : ""); 255 147 256 - /* 257 - * Patch the start of mips_cps_core_entry to provide: 258 - * 259 - * s0 = kseg0 CCA 260 - */ 261 - entry_code = (u32 *)&mips_cps_core_entry; 262 - uasm_i_addiu(&entry_code, 16, 0, cca); 263 - UASM_i_LA(&entry_code, 17, (long)mips_gcr_base); 264 - BUG_ON((void *)entry_code > (void *)&mips_cps_core_entry_patch_end); 265 - blast_dcache_range((unsigned long)&mips_cps_core_entry, 266 - (unsigned long)entry_code); 267 - bc_wback_inv((unsigned long)&mips_cps_core_entry, 268 - (void *)entry_code - (void *)&mips_cps_core_entry); 269 - __sync(); 148 + setup_cps_vecs(); 270 149 271 150 /* Allocate core boot configuration structs */ 272 151 ncores = mips_cps_numcores(0); ··· 308 213 mips_cm_lock_other(0, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL); 309 214 310 215 /* Set its reset vector */ 311 - write_gcr_co_reset_base(CKSEG1ADDR((unsigned long)mips_cps_core_entry)); 216 + write_gcr_co_reset_base(core_entry_reg); 312 217 313 218 /* Ensure its coherency is disabled */ 314 219 write_gcr_co_coherence(0); ··· 385 290 unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]); 386 291 struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core]; 387 292 struct vpe_boot_config *vpe_cfg = &core_cfg->vpe_config[vpe_id]; 388 - unsigned long core_entry; 389 293 unsigned int remote; 390 294 int err; 391 295 ··· 408 314 409 315 if (cpu_has_vp) { 410 316 mips_cm_lock_other(0, core, vpe_id, CM_GCR_Cx_OTHER_BLOCK_LOCAL); 411 - core_entry = CKSEG1ADDR((unsigned long)mips_cps_core_entry); 412 - write_gcr_co_reset_base(core_entry); 317 + write_gcr_co_reset_base(core_entry_reg); 413 318 mips_cm_unlock_other(); 414 319 } 415 320
+8 -5
arch/mips/kernel/traps.c
··· 58 58 #include <asm/module.h> 59 59 #include <asm/msa.h> 60 60 #include <asm/ptrace.h> 61 + #include <asm/regdef.h> 61 62 #include <asm/sections.h> 62 63 #include <asm/siginfo.h> 63 64 #include <asm/tlbdebug.h> ··· 2042 2041 unsigned long jump_mask = ~((1 << 28) - 1); 2043 2042 #endif 2044 2043 u32 *buf = (u32 *)(ebase + 0x200); 2045 - unsigned int k0 = 26; 2046 2044 if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) { 2047 2045 uasm_i_j(&buf, handler & ~jump_mask); 2048 2046 uasm_i_nop(&buf); 2049 2047 } else { 2050 - UASM_i_LA(&buf, k0, handler); 2051 - uasm_i_jr(&buf, k0); 2048 + UASM_i_LA(&buf, GPR_K0, handler); 2049 + uasm_i_jr(&buf, GPR_K0); 2052 2050 uasm_i_nop(&buf); 2053 2051 } 2054 2052 local_flush_icache_range(ebase + 0x200, (unsigned long)buf); ··· 2299 2299 void set_uncached_handler(unsigned long offset, void *addr, 2300 2300 unsigned long size) 2301 2301 { 2302 - unsigned long uncached_ebase = CKSEG1ADDR(ebase); 2302 + unsigned long uncached_ebase = CKSEG1ADDR_OR_64BIT(__pa(ebase)); 2303 2303 2304 2304 if (!addr) 2305 2305 panic(panic_null_cerr); ··· 2351 2351 * EVA is special though as it allows segments to be rearranged 2352 2352 * and to become uncached during cache error handling. 2353 2353 */ 2354 - if (!IS_ENABLED(CONFIG_EVA) && !WARN_ON(ebase_pa >= 0x20000000)) 2354 + if (!IS_ENABLED(CONFIG_EVA) && ebase_pa < 0x20000000) 2355 2355 ebase = CKSEG0ADDR(ebase_pa); 2356 2356 else 2357 2357 ebase = (unsigned long)phys_to_virt(ebase_pa); 2358 + if (ebase_pa >= 0x20000000) 2359 + pr_warn("ebase(%pa) should better be in KSeg0", 2360 + &ebase_pa); 2358 2361 } 2359 2362 2360 2363 if (cpu_has_mmips) {
+2 -2
arch/mips/kernel/vpe-mt.c
··· 95 95 * We don't pass the memsize here, so VPE programs need to be 96 96 * compiled with DFLT_STACK_SIZE and DFLT_HEAP_SIZE defined. 97 97 */ 98 - mttgpr(7, 0); 99 - mttgpr(6, v->ntcs); 98 + mttgpr($7, 0); 99 + mttgpr($6, v->ntcs); 100 100 101 101 /* set up VPE1 */ 102 102 /*
+189 -242
arch/mips/kvm/entry.c
··· 13 13 14 14 #include <linux/kvm_host.h> 15 15 #include <linux/log2.h> 16 + #include <asm/mipsregs.h> 16 17 #include <asm/mmu_context.h> 17 18 #include <asm/msa.h> 19 + #include <asm/regdef.h> 18 20 #include <asm/setup.h> 19 21 #include <asm/tlbex.h> 20 22 #include <asm/uasm.h> 21 23 22 - /* Register names */ 23 - #define ZERO 0 24 - #define AT 1 25 - #define V0 2 26 - #define V1 3 27 - #define A0 4 28 - #define A1 5 29 - 30 - #if _MIPS_SIM == _MIPS_SIM_ABI32 31 - #define T0 8 32 - #define T1 9 33 - #define T2 10 34 - #define T3 11 35 - #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ 36 - 37 - #if _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 38 - #define T0 12 39 - #define T1 13 40 - #define T2 14 41 - #define T3 15 42 - #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 */ 43 - 44 - #define S0 16 45 - #define S1 17 46 - #define T9 25 47 - #define K0 26 48 - #define K1 27 49 - #define GP 28 50 - #define SP 29 51 - #define RA 31 52 - 53 - /* Some CP0 registers */ 54 - #define C0_PWBASE 5, 5 55 - #define C0_HWRENA 7, 0 56 - #define C0_BADVADDR 8, 0 57 - #define C0_BADINSTR 8, 1 58 - #define C0_BADINSTRP 8, 2 59 - #define C0_PGD 9, 7 60 - #define C0_ENTRYHI 10, 0 61 - #define C0_GUESTCTL1 10, 4 62 - #define C0_STATUS 12, 0 63 - #define C0_GUESTCTL0 12, 6 64 - #define C0_CAUSE 13, 0 65 - #define C0_EPC 14, 0 66 - #define C0_EBASE 15, 1 67 - #define C0_CONFIG5 16, 5 68 - #define C0_DDATA_LO 28, 3 69 - #define C0_ERROREPC 30, 0 70 - 71 24 #define CALLFRAME_SIZ 32 72 25 73 - #ifdef CONFIG_64BIT 74 - #define ST0_KX_IF_64 ST0_KX 75 - #else 76 - #define ST0_KX_IF_64 0 77 - #endif 78 - 79 - static unsigned int scratch_vcpu[2] = { C0_DDATA_LO }; 26 + static unsigned int scratch_vcpu[2] = { C0_DDATALO }; 80 27 static unsigned int scratch_tmp[2] = { C0_ERROREPC }; 81 28 82 29 enum label_id { ··· 159 212 unsigned int i; 160 213 161 214 /* 162 - * A0: vcpu 215 + * GPR_A0: vcpu 163 216 */ 164 217 165 218 /* k0/k1 not being used in host kernel context */ 166 - UASM_i_ADDIU(&p, K1, SP, -(int)sizeof(struct pt_regs)); 219 + UASM_i_ADDIU(&p, GPR_K1, GPR_SP, -(int)sizeof(struct pt_regs)); 167 220 for (i = 16; i < 32; ++i) { 168 221 if (i == 24) 169 222 i = 28; 170 - UASM_i_SW(&p, i, offsetof(struct pt_regs, regs[i]), K1); 223 + UASM_i_SW(&p, i, offsetof(struct pt_regs, regs[i]), GPR_K1); 171 224 } 172 225 173 226 /* Save host status */ 174 - uasm_i_mfc0(&p, V0, C0_STATUS); 175 - UASM_i_SW(&p, V0, offsetof(struct pt_regs, cp0_status), K1); 227 + uasm_i_mfc0(&p, GPR_V0, C0_STATUS); 228 + UASM_i_SW(&p, GPR_V0, offsetof(struct pt_regs, cp0_status), GPR_K1); 176 229 177 230 /* Save scratch registers, will be used to store pointer to vcpu etc */ 178 - kvm_mips_build_save_scratch(&p, V1, K1); 231 + kvm_mips_build_save_scratch(&p, GPR_V1, GPR_K1); 179 232 180 233 /* VCPU scratch register has pointer to vcpu */ 181 - UASM_i_MTC0(&p, A0, scratch_vcpu[0], scratch_vcpu[1]); 234 + UASM_i_MTC0(&p, GPR_A0, scratch_vcpu[0], scratch_vcpu[1]); 182 235 183 236 /* Offset into vcpu->arch */ 184 - UASM_i_ADDIU(&p, K1, A0, offsetof(struct kvm_vcpu, arch)); 237 + UASM_i_ADDIU(&p, GPR_K1, GPR_A0, offsetof(struct kvm_vcpu, arch)); 185 238 186 239 /* 187 240 * Save the host stack to VCPU, used for exception processing 188 241 * when we exit from the Guest 189 242 */ 190 - UASM_i_SW(&p, SP, offsetof(struct kvm_vcpu_arch, host_stack), K1); 243 + UASM_i_SW(&p, GPR_SP, offsetof(struct kvm_vcpu_arch, host_stack), GPR_K1); 191 244 192 245 /* Save the kernel gp as well */ 193 - UASM_i_SW(&p, GP, offsetof(struct kvm_vcpu_arch, host_gp), K1); 246 + UASM_i_SW(&p, GPR_GP, offsetof(struct kvm_vcpu_arch, host_gp), GPR_K1); 194 247 195 248 /* 196 249 * Setup status register for running the guest in UM, interrupts 197 250 * are disabled 198 251 */ 199 - UASM_i_LA(&p, K0, ST0_EXL | KSU_USER | ST0_BEV | ST0_KX_IF_64); 200 - uasm_i_mtc0(&p, K0, C0_STATUS); 252 + UASM_i_LA(&p, GPR_K0, ST0_EXL | KSU_USER | ST0_BEV | ST0_KX_IF_64); 253 + uasm_i_mtc0(&p, GPR_K0, C0_STATUS); 201 254 uasm_i_ehb(&p); 202 255 203 256 /* load up the new EBASE */ 204 - UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1); 205 - build_set_exc_base(&p, K0); 257 + UASM_i_LW(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, guest_ebase), GPR_K1); 258 + build_set_exc_base(&p, GPR_K0); 206 259 207 260 /* 208 261 * Now that the new EBASE has been loaded, unset BEV, set 209 262 * interrupt mask as it was but make sure that timer interrupts 210 263 * are enabled 211 264 */ 212 - uasm_i_addiu(&p, K0, ZERO, ST0_EXL | KSU_USER | ST0_IE | ST0_KX_IF_64); 213 - uasm_i_andi(&p, V0, V0, ST0_IM); 214 - uasm_i_or(&p, K0, K0, V0); 215 - uasm_i_mtc0(&p, K0, C0_STATUS); 265 + uasm_i_addiu(&p, GPR_K0, GPR_ZERO, ST0_EXL | KSU_USER | ST0_IE | ST0_KX_IF_64); 266 + uasm_i_andi(&p, GPR_V0, GPR_V0, ST0_IM); 267 + uasm_i_or(&p, GPR_K0, GPR_K0, GPR_V0); 268 + uasm_i_mtc0(&p, GPR_K0, C0_STATUS); 216 269 uasm_i_ehb(&p); 217 270 218 271 p = kvm_mips_build_enter_guest(p); ··· 243 296 memset(relocs, 0, sizeof(relocs)); 244 297 245 298 /* Set Guest EPC */ 246 - UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, pc), K1); 247 - UASM_i_MTC0(&p, T0, C0_EPC); 299 + UASM_i_LW(&p, GPR_T0, offsetof(struct kvm_vcpu_arch, pc), GPR_K1); 300 + UASM_i_MTC0(&p, GPR_T0, C0_EPC); 248 301 249 302 /* Save normal linux process pgd (VZ guarantees pgd_reg is set) */ 250 303 if (cpu_has_ldpte) 251 - UASM_i_MFC0(&p, K0, C0_PWBASE); 304 + UASM_i_MFC0(&p, GPR_K0, C0_PWBASE); 252 305 else 253 - UASM_i_MFC0(&p, K0, c0_kscratch(), pgd_reg); 254 - UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_pgd), K1); 306 + UASM_i_MFC0(&p, GPR_K0, c0_kscratch(), pgd_reg); 307 + UASM_i_SW(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, host_pgd), GPR_K1); 255 308 256 309 /* 257 310 * Set up KVM GPA pgd. ··· 259 312 * - call tlbmiss_handler_setup_pgd(mm->pgd) 260 313 * - write mm->pgd into CP0_PWBase 261 314 * 262 - * We keep S0 pointing at struct kvm so we can load the ASID below. 315 + * We keep GPR_S0 pointing at struct kvm so we can load the ASID below. 263 316 */ 264 - UASM_i_LW(&p, S0, (int)offsetof(struct kvm_vcpu, kvm) - 265 - (int)offsetof(struct kvm_vcpu, arch), K1); 266 - UASM_i_LW(&p, A0, offsetof(struct kvm, arch.gpa_mm.pgd), S0); 267 - UASM_i_LA(&p, T9, (unsigned long)tlbmiss_handler_setup_pgd); 268 - uasm_i_jalr(&p, RA, T9); 317 + UASM_i_LW(&p, GPR_S0, (int)offsetof(struct kvm_vcpu, kvm) - 318 + (int)offsetof(struct kvm_vcpu, arch), GPR_K1); 319 + UASM_i_LW(&p, GPR_A0, offsetof(struct kvm, arch.gpa_mm.pgd), GPR_S0); 320 + UASM_i_LA(&p, GPR_T9, (unsigned long)tlbmiss_handler_setup_pgd); 321 + uasm_i_jalr(&p, GPR_RA, GPR_T9); 269 322 /* delay slot */ 270 323 if (cpu_has_htw) 271 - UASM_i_MTC0(&p, A0, C0_PWBASE); 324 + UASM_i_MTC0(&p, GPR_A0, C0_PWBASE); 272 325 else 273 326 uasm_i_nop(&p); 274 327 275 328 /* Set GM bit to setup eret to VZ guest context */ 276 - uasm_i_addiu(&p, V1, ZERO, 1); 277 - uasm_i_mfc0(&p, K0, C0_GUESTCTL0); 278 - uasm_i_ins(&p, K0, V1, MIPS_GCTL0_GM_SHIFT, 1); 279 - uasm_i_mtc0(&p, K0, C0_GUESTCTL0); 329 + uasm_i_addiu(&p, GPR_V1, GPR_ZERO, 1); 330 + uasm_i_mfc0(&p, GPR_K0, C0_GUESTCTL0); 331 + uasm_i_ins(&p, GPR_K0, GPR_V1, MIPS_GCTL0_GM_SHIFT, 1); 332 + uasm_i_mtc0(&p, GPR_K0, C0_GUESTCTL0); 280 333 281 334 if (cpu_has_guestid) { 282 335 /* ··· 285 338 */ 286 339 287 340 /* Get current GuestID */ 288 - uasm_i_mfc0(&p, T0, C0_GUESTCTL1); 341 + uasm_i_mfc0(&p, GPR_T0, C0_GUESTCTL1); 289 342 /* Set GuestCtl1.RID = GuestCtl1.ID */ 290 - uasm_i_ext(&p, T1, T0, MIPS_GCTL1_ID_SHIFT, 343 + uasm_i_ext(&p, GPR_T1, GPR_T0, MIPS_GCTL1_ID_SHIFT, 291 344 MIPS_GCTL1_ID_WIDTH); 292 - uasm_i_ins(&p, T0, T1, MIPS_GCTL1_RID_SHIFT, 345 + uasm_i_ins(&p, GPR_T0, GPR_T1, MIPS_GCTL1_RID_SHIFT, 293 346 MIPS_GCTL1_RID_WIDTH); 294 - uasm_i_mtc0(&p, T0, C0_GUESTCTL1); 347 + uasm_i_mtc0(&p, GPR_T0, C0_GUESTCTL1); 295 348 296 349 /* GuestID handles dealiasing so we don't need to touch ASID */ 297 350 goto skip_asid_restore; ··· 300 353 /* Root ASID Dealias (RAD) */ 301 354 302 355 /* Save host ASID */ 303 - UASM_i_MFC0(&p, K0, C0_ENTRYHI); 304 - UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_entryhi), 305 - K1); 356 + UASM_i_MFC0(&p, GPR_K0, C0_ENTRYHI); 357 + UASM_i_SW(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, host_entryhi), 358 + GPR_K1); 306 359 307 360 /* Set the root ASID for the Guest */ 308 - UASM_i_ADDIU(&p, T1, S0, 361 + UASM_i_ADDIU(&p, GPR_T1, GPR_S0, 309 362 offsetof(struct kvm, arch.gpa_mm.context.asid)); 310 363 311 364 /* t1: contains the base of the ASID array, need to get the cpu id */ 312 365 /* smp_processor_id */ 313 - uasm_i_lw(&p, T2, offsetof(struct thread_info, cpu), GP); 366 + uasm_i_lw(&p, GPR_T2, offsetof(struct thread_info, cpu), GPR_GP); 314 367 /* index the ASID array */ 315 - uasm_i_sll(&p, T2, T2, ilog2(sizeof(long))); 316 - UASM_i_ADDU(&p, T3, T1, T2); 317 - UASM_i_LW(&p, K0, 0, T3); 368 + uasm_i_sll(&p, GPR_T2, GPR_T2, ilog2(sizeof(long))); 369 + UASM_i_ADDU(&p, GPR_T3, GPR_T1, GPR_T2); 370 + UASM_i_LW(&p, GPR_K0, 0, GPR_T3); 318 371 #ifdef CONFIG_MIPS_ASID_BITS_VARIABLE 319 372 /* 320 373 * reuse ASID array offset 321 374 * cpuinfo_mips is a multiple of sizeof(long) 322 375 */ 323 - uasm_i_addiu(&p, T3, ZERO, sizeof(struct cpuinfo_mips)/sizeof(long)); 324 - uasm_i_mul(&p, T2, T2, T3); 376 + uasm_i_addiu(&p, GPR_T3, GPR_ZERO, sizeof(struct cpuinfo_mips)/sizeof(long)); 377 + uasm_i_mul(&p, GPR_T2, GPR_T2, GPR_T3); 325 378 326 - UASM_i_LA_mostly(&p, AT, (long)&cpu_data[0].asid_mask); 327 - UASM_i_ADDU(&p, AT, AT, T2); 328 - UASM_i_LW(&p, T2, uasm_rel_lo((long)&cpu_data[0].asid_mask), AT); 329 - uasm_i_and(&p, K0, K0, T2); 379 + UASM_i_LA_mostly(&p, GPR_AT, (long)&cpu_data[0].asid_mask); 380 + UASM_i_ADDU(&p, GPR_AT, GPR_AT, GPR_T2); 381 + UASM_i_LW(&p, GPR_T2, uasm_rel_lo((long)&cpu_data[0].asid_mask), GPR_AT); 382 + uasm_i_and(&p, GPR_K0, GPR_K0, GPR_T2); 330 383 #else 331 - uasm_i_andi(&p, K0, K0, MIPS_ENTRYHI_ASID); 384 + uasm_i_andi(&p, GPR_K0, GPR_K0, MIPS_ENTRYHI_ASID); 332 385 #endif 333 386 334 387 /* Set up KVM VZ root ASID (!guestid) */ 335 - uasm_i_mtc0(&p, K0, C0_ENTRYHI); 388 + uasm_i_mtc0(&p, GPR_K0, C0_ENTRYHI); 336 389 skip_asid_restore: 337 390 uasm_i_ehb(&p); 338 391 339 392 /* Disable RDHWR access */ 340 - uasm_i_mtc0(&p, ZERO, C0_HWRENA); 393 + uasm_i_mtc0(&p, GPR_ZERO, C0_HWRENA); 341 394 342 395 /* load the guest context from VCPU and return */ 343 396 for (i = 1; i < 32; ++i) { 344 397 /* Guest k0/k1 loaded later */ 345 - if (i == K0 || i == K1) 398 + if (i == GPR_K0 || i == GPR_K1) 346 399 continue; 347 - UASM_i_LW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), K1); 400 + UASM_i_LW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), GPR_K1); 348 401 } 349 402 350 403 #ifndef CONFIG_CPU_MIPSR6 351 404 /* Restore hi/lo */ 352 - UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, hi), K1); 353 - uasm_i_mthi(&p, K0); 405 + UASM_i_LW(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, hi), GPR_K1); 406 + uasm_i_mthi(&p, GPR_K0); 354 407 355 - UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, lo), K1); 356 - uasm_i_mtlo(&p, K0); 408 + UASM_i_LW(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, lo), GPR_K1); 409 + uasm_i_mtlo(&p, GPR_K0); 357 410 #endif 358 411 359 412 /* Restore the guest's k0/k1 registers */ 360 - UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, gprs[K0]), K1); 361 - UASM_i_LW(&p, K1, offsetof(struct kvm_vcpu_arch, gprs[K1]), K1); 413 + UASM_i_LW(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, gprs[GPR_K0]), GPR_K1); 414 + UASM_i_LW(&p, GPR_K1, offsetof(struct kvm_vcpu_arch, gprs[GPR_K1]), GPR_K1); 362 415 363 416 /* Jump to guest */ 364 417 uasm_i_eret(&p); ··· 391 444 memset(relocs, 0, sizeof(relocs)); 392 445 393 446 /* Save guest k1 into scratch register */ 394 - UASM_i_MTC0(&p, K1, scratch_tmp[0], scratch_tmp[1]); 447 + UASM_i_MTC0(&p, GPR_K1, scratch_tmp[0], scratch_tmp[1]); 395 448 396 449 /* Get the VCPU pointer from the VCPU scratch register */ 397 - UASM_i_MFC0(&p, K1, scratch_vcpu[0], scratch_vcpu[1]); 450 + UASM_i_MFC0(&p, GPR_K1, scratch_vcpu[0], scratch_vcpu[1]); 398 451 399 452 /* Save guest k0 into VCPU structure */ 400 - UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu, arch.gprs[K0]), K1); 453 + UASM_i_SW(&p, GPR_K0, offsetof(struct kvm_vcpu, arch.gprs[GPR_K0]), GPR_K1); 401 454 402 455 /* 403 456 * Some of the common tlbex code uses current_cpu_type(). For KVM we ··· 406 459 preempt_disable(); 407 460 408 461 #ifdef CONFIG_CPU_LOONGSON64 409 - UASM_i_MFC0(&p, K1, C0_PGD); 410 - uasm_i_lddir(&p, K0, K1, 3); /* global page dir */ 462 + UASM_i_MFC0(&p, GPR_K1, C0_PGD); 463 + uasm_i_lddir(&p, GPR_K0, GPR_K1, 3); /* global page dir */ 411 464 #ifndef __PAGETABLE_PMD_FOLDED 412 - uasm_i_lddir(&p, K1, K0, 1); /* middle page dir */ 465 + uasm_i_lddir(&p, GPR_K1, GPR_K0, 1); /* middle page dir */ 413 466 #endif 414 - uasm_i_ldpte(&p, K1, 0); /* even */ 415 - uasm_i_ldpte(&p, K1, 1); /* odd */ 467 + uasm_i_ldpte(&p, GPR_K1, 0); /* even */ 468 + uasm_i_ldpte(&p, GPR_K1, 1); /* odd */ 416 469 uasm_i_tlbwr(&p); 417 470 #else 418 471 /* ··· 427 480 */ 428 481 429 482 #ifdef CONFIG_64BIT 430 - build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */ 483 + build_get_pmde64(&p, &l, &r, GPR_K0, GPR_K1); /* get pmd in GPR_K1 */ 431 484 #else 432 - build_get_pgde32(&p, K0, K1); /* get pgd in K1 */ 485 + build_get_pgde32(&p, GPR_K0, GPR_K1); /* get pgd in GPR_K1 */ 433 486 #endif 434 487 435 488 /* we don't support huge pages yet */ 436 489 437 - build_get_ptep(&p, K0, K1); 438 - build_update_entries(&p, K0, K1); 490 + build_get_ptep(&p, GPR_K0, GPR_K1); 491 + build_update_entries(&p, GPR_K0, GPR_K1); 439 492 build_tlb_write_entry(&p, &l, &r, tlb_random); 440 493 #endif 441 494 442 495 preempt_enable(); 443 496 444 497 /* Get the VCPU pointer from the VCPU scratch register again */ 445 - UASM_i_MFC0(&p, K1, scratch_vcpu[0], scratch_vcpu[1]); 498 + UASM_i_MFC0(&p, GPR_K1, scratch_vcpu[0], scratch_vcpu[1]); 446 499 447 500 /* Restore the guest's k0/k1 registers */ 448 - UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu, arch.gprs[K0]), K1); 501 + UASM_i_LW(&p, GPR_K0, offsetof(struct kvm_vcpu, arch.gprs[GPR_K0]), GPR_K1); 449 502 uasm_i_ehb(&p); 450 - UASM_i_MFC0(&p, K1, scratch_tmp[0], scratch_tmp[1]); 503 + UASM_i_MFC0(&p, GPR_K1, scratch_tmp[0], scratch_tmp[1]); 451 504 452 505 /* Jump to guest */ 453 506 uasm_i_eret(&p); ··· 477 530 memset(relocs, 0, sizeof(relocs)); 478 531 479 532 /* Save guest k1 into scratch register */ 480 - UASM_i_MTC0(&p, K1, scratch_tmp[0], scratch_tmp[1]); 533 + UASM_i_MTC0(&p, GPR_K1, scratch_tmp[0], scratch_tmp[1]); 481 534 482 535 /* Get the VCPU pointer from the VCPU scratch register */ 483 - UASM_i_MFC0(&p, K1, scratch_vcpu[0], scratch_vcpu[1]); 484 - UASM_i_ADDIU(&p, K1, K1, offsetof(struct kvm_vcpu, arch)); 536 + UASM_i_MFC0(&p, GPR_K1, scratch_vcpu[0], scratch_vcpu[1]); 537 + UASM_i_ADDIU(&p, GPR_K1, GPR_K1, offsetof(struct kvm_vcpu, arch)); 485 538 486 539 /* Save guest k0 into VCPU structure */ 487 - UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, gprs[K0]), K1); 540 + UASM_i_SW(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, gprs[GPR_K0]), GPR_K1); 488 541 489 542 /* Branch to the common handler */ 490 543 uasm_il_b(&p, &r, label_exit_common); ··· 532 585 /* Start saving Guest context to VCPU */ 533 586 for (i = 0; i < 32; ++i) { 534 587 /* Guest k0/k1 saved later */ 535 - if (i == K0 || i == K1) 588 + if (i == GPR_K0 || i == GPR_K1) 536 589 continue; 537 - UASM_i_SW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), K1); 590 + UASM_i_SW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), GPR_K1); 538 591 } 539 592 540 593 #ifndef CONFIG_CPU_MIPSR6 541 594 /* We need to save hi/lo and restore them on the way out */ 542 - uasm_i_mfhi(&p, T0); 543 - UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, hi), K1); 595 + uasm_i_mfhi(&p, GPR_T0); 596 + UASM_i_SW(&p, GPR_T0, offsetof(struct kvm_vcpu_arch, hi), GPR_K1); 544 597 545 - uasm_i_mflo(&p, T0); 546 - UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, lo), K1); 598 + uasm_i_mflo(&p, GPR_T0); 599 + UASM_i_SW(&p, GPR_T0, offsetof(struct kvm_vcpu_arch, lo), GPR_K1); 547 600 #endif 548 601 549 602 /* Finally save guest k1 to VCPU */ 550 603 uasm_i_ehb(&p); 551 - UASM_i_MFC0(&p, T0, scratch_tmp[0], scratch_tmp[1]); 552 - UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, gprs[K1]), K1); 604 + UASM_i_MFC0(&p, GPR_T0, scratch_tmp[0], scratch_tmp[1]); 605 + UASM_i_SW(&p, GPR_T0, offsetof(struct kvm_vcpu_arch, gprs[GPR_K1]), GPR_K1); 553 606 554 607 /* Now that context has been saved, we can use other registers */ 555 608 556 609 /* Restore vcpu */ 557 - UASM_i_MFC0(&p, S0, scratch_vcpu[0], scratch_vcpu[1]); 610 + UASM_i_MFC0(&p, GPR_S0, scratch_vcpu[0], scratch_vcpu[1]); 558 611 559 612 /* 560 613 * Save Host level EPC, BadVaddr and Cause to VCPU, useful to process 561 614 * the exception 562 615 */ 563 - UASM_i_MFC0(&p, K0, C0_EPC); 564 - UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, pc), K1); 616 + UASM_i_MFC0(&p, GPR_K0, C0_EPC); 617 + UASM_i_SW(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, pc), GPR_K1); 565 618 566 - UASM_i_MFC0(&p, K0, C0_BADVADDR); 567 - UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_badvaddr), 568 - K1); 619 + UASM_i_MFC0(&p, GPR_K0, C0_BADVADDR); 620 + UASM_i_SW(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, host_cp0_badvaddr), 621 + GPR_K1); 569 622 570 - uasm_i_mfc0(&p, K0, C0_CAUSE); 571 - uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_cause), K1); 623 + uasm_i_mfc0(&p, GPR_K0, C0_CAUSE); 624 + uasm_i_sw(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, host_cp0_cause), GPR_K1); 572 625 573 626 if (cpu_has_badinstr) { 574 - uasm_i_mfc0(&p, K0, C0_BADINSTR); 575 - uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch, 576 - host_cp0_badinstr), K1); 627 + uasm_i_mfc0(&p, GPR_K0, C0_BADINSTR); 628 + uasm_i_sw(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, 629 + host_cp0_badinstr), GPR_K1); 577 630 } 578 631 579 632 if (cpu_has_badinstrp) { 580 - uasm_i_mfc0(&p, K0, C0_BADINSTRP); 581 - uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch, 582 - host_cp0_badinstrp), K1); 633 + uasm_i_mfc0(&p, GPR_K0, C0_BADINSTRP); 634 + uasm_i_sw(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, 635 + host_cp0_badinstrp), GPR_K1); 583 636 } 584 637 585 638 /* Now restore the host state just enough to run the handlers */ 586 639 587 640 /* Switch EBASE to the one used by Linux */ 588 641 /* load up the host EBASE */ 589 - uasm_i_mfc0(&p, V0, C0_STATUS); 642 + uasm_i_mfc0(&p, GPR_V0, C0_STATUS); 590 643 591 - uasm_i_lui(&p, AT, ST0_BEV >> 16); 592 - uasm_i_or(&p, K0, V0, AT); 644 + uasm_i_lui(&p, GPR_AT, ST0_BEV >> 16); 645 + uasm_i_or(&p, GPR_K0, GPR_V0, GPR_AT); 593 646 594 - uasm_i_mtc0(&p, K0, C0_STATUS); 647 + uasm_i_mtc0(&p, GPR_K0, C0_STATUS); 595 648 uasm_i_ehb(&p); 596 649 597 - UASM_i_LA_mostly(&p, K0, (long)&ebase); 598 - UASM_i_LW(&p, K0, uasm_rel_lo((long)&ebase), K0); 599 - build_set_exc_base(&p, K0); 650 + UASM_i_LA_mostly(&p, GPR_K0, (long)&ebase); 651 + UASM_i_LW(&p, GPR_K0, uasm_rel_lo((long)&ebase), GPR_K0); 652 + build_set_exc_base(&p, GPR_K0); 600 653 601 654 if (raw_cpu_has_fpu) { 602 655 /* 603 656 * If FPU is enabled, save FCR31 and clear it so that later 604 657 * ctc1's don't trigger FPE for pending exceptions. 605 658 */ 606 - uasm_i_lui(&p, AT, ST0_CU1 >> 16); 607 - uasm_i_and(&p, V1, V0, AT); 608 - uasm_il_beqz(&p, &r, V1, label_fpu_1); 659 + uasm_i_lui(&p, GPR_AT, ST0_CU1 >> 16); 660 + uasm_i_and(&p, GPR_V1, GPR_V0, GPR_AT); 661 + uasm_il_beqz(&p, &r, GPR_V1, label_fpu_1); 609 662 uasm_i_nop(&p); 610 - uasm_i_cfc1(&p, T0, 31); 611 - uasm_i_sw(&p, T0, offsetof(struct kvm_vcpu_arch, fpu.fcr31), 612 - K1); 613 - uasm_i_ctc1(&p, ZERO, 31); 663 + uasm_i_cfc1(&p, GPR_T0, 31); 664 + uasm_i_sw(&p, GPR_T0, offsetof(struct kvm_vcpu_arch, fpu.fcr31), 665 + GPR_K1); 666 + uasm_i_ctc1(&p, GPR_ZERO, 31); 614 667 uasm_l_fpu_1(&l, p); 615 668 } 616 669 ··· 619 672 * If MSA is enabled, save MSACSR and clear it so that later 620 673 * instructions don't trigger MSAFPE for pending exceptions. 621 674 */ 622 - uasm_i_mfc0(&p, T0, C0_CONFIG5); 623 - uasm_i_ext(&p, T0, T0, 27, 1); /* MIPS_CONF5_MSAEN */ 624 - uasm_il_beqz(&p, &r, T0, label_msa_1); 675 + uasm_i_mfc0(&p, GPR_T0, C0_CONFIG5); 676 + uasm_i_ext(&p, GPR_T0, GPR_T0, 27, 1); /* MIPS_CONF5_MSAEN */ 677 + uasm_il_beqz(&p, &r, GPR_T0, label_msa_1); 625 678 uasm_i_nop(&p); 626 - uasm_i_cfcmsa(&p, T0, MSA_CSR); 627 - uasm_i_sw(&p, T0, offsetof(struct kvm_vcpu_arch, fpu.msacsr), 628 - K1); 629 - uasm_i_ctcmsa(&p, MSA_CSR, ZERO); 679 + uasm_i_cfcmsa(&p, GPR_T0, MSA_CSR); 680 + uasm_i_sw(&p, GPR_T0, offsetof(struct kvm_vcpu_arch, fpu.msacsr), 681 + GPR_K1); 682 + uasm_i_ctcmsa(&p, MSA_CSR, GPR_ZERO); 630 683 uasm_l_msa_1(&l, p); 631 684 } 632 685 633 686 /* Restore host ASID */ 634 687 if (!cpu_has_guestid) { 635 - UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, host_entryhi), 636 - K1); 637 - UASM_i_MTC0(&p, K0, C0_ENTRYHI); 688 + UASM_i_LW(&p, GPR_K0, offsetof(struct kvm_vcpu_arch, host_entryhi), 689 + GPR_K1); 690 + UASM_i_MTC0(&p, GPR_K0, C0_ENTRYHI); 638 691 } 639 692 640 693 /* ··· 643 696 * - call tlbmiss_handler_setup_pgd(mm->pgd) 644 697 * - write mm->pgd into CP0_PWBase 645 698 */ 646 - UASM_i_LW(&p, A0, 647 - offsetof(struct kvm_vcpu_arch, host_pgd), K1); 648 - UASM_i_LA(&p, T9, (unsigned long)tlbmiss_handler_setup_pgd); 649 - uasm_i_jalr(&p, RA, T9); 699 + UASM_i_LW(&p, GPR_A0, 700 + offsetof(struct kvm_vcpu_arch, host_pgd), GPR_K1); 701 + UASM_i_LA(&p, GPR_T9, (unsigned long)tlbmiss_handler_setup_pgd); 702 + uasm_i_jalr(&p, GPR_RA, GPR_T9); 650 703 /* delay slot */ 651 704 if (cpu_has_htw) 652 - UASM_i_MTC0(&p, A0, C0_PWBASE); 705 + UASM_i_MTC0(&p, GPR_A0, C0_PWBASE); 653 706 else 654 707 uasm_i_nop(&p); 655 708 656 709 /* Clear GM bit so we don't enter guest mode when EXL is cleared */ 657 - uasm_i_mfc0(&p, K0, C0_GUESTCTL0); 658 - uasm_i_ins(&p, K0, ZERO, MIPS_GCTL0_GM_SHIFT, 1); 659 - uasm_i_mtc0(&p, K0, C0_GUESTCTL0); 710 + uasm_i_mfc0(&p, GPR_K0, C0_GUESTCTL0); 711 + uasm_i_ins(&p, GPR_K0, GPR_ZERO, MIPS_GCTL0_GM_SHIFT, 1); 712 + uasm_i_mtc0(&p, GPR_K0, C0_GUESTCTL0); 660 713 661 714 /* Save GuestCtl0 so we can access GExcCode after CPU migration */ 662 - uasm_i_sw(&p, K0, 663 - offsetof(struct kvm_vcpu_arch, host_cp0_guestctl0), K1); 715 + uasm_i_sw(&p, GPR_K0, 716 + offsetof(struct kvm_vcpu_arch, host_cp0_guestctl0), GPR_K1); 664 717 665 718 if (cpu_has_guestid) { 666 719 /* 667 720 * Clear root mode GuestID, so that root TLB operations use the 668 721 * root GuestID in the root TLB. 669 722 */ 670 - uasm_i_mfc0(&p, T0, C0_GUESTCTL1); 723 + uasm_i_mfc0(&p, GPR_T0, C0_GUESTCTL1); 671 724 /* Set GuestCtl1.RID = MIPS_GCTL1_ROOT_GUESTID (i.e. 0) */ 672 - uasm_i_ins(&p, T0, ZERO, MIPS_GCTL1_RID_SHIFT, 725 + uasm_i_ins(&p, GPR_T0, GPR_ZERO, MIPS_GCTL1_RID_SHIFT, 673 726 MIPS_GCTL1_RID_WIDTH); 674 - uasm_i_mtc0(&p, T0, C0_GUESTCTL1); 727 + uasm_i_mtc0(&p, GPR_T0, C0_GUESTCTL1); 675 728 } 676 729 677 730 /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */ 678 - uasm_i_addiu(&p, AT, ZERO, ~(ST0_EXL | KSU_USER | ST0_IE)); 679 - uasm_i_and(&p, V0, V0, AT); 680 - uasm_i_lui(&p, AT, ST0_CU0 >> 16); 681 - uasm_i_or(&p, V0, V0, AT); 731 + uasm_i_addiu(&p, GPR_AT, GPR_ZERO, ~(ST0_EXL | KSU_USER | ST0_IE)); 732 + uasm_i_and(&p, GPR_V0, GPR_V0, GPR_AT); 733 + uasm_i_lui(&p, GPR_AT, ST0_CU0 >> 16); 734 + uasm_i_or(&p, GPR_V0, GPR_V0, GPR_AT); 682 735 #ifdef CONFIG_64BIT 683 - uasm_i_ori(&p, V0, V0, ST0_SX | ST0_UX); 736 + uasm_i_ori(&p, GPR_V0, GPR_V0, ST0_SX | ST0_UX); 684 737 #endif 685 - uasm_i_mtc0(&p, V0, C0_STATUS); 738 + uasm_i_mtc0(&p, GPR_V0, C0_STATUS); 686 739 uasm_i_ehb(&p); 687 740 688 - /* Load up host GP */ 689 - UASM_i_LW(&p, GP, offsetof(struct kvm_vcpu_arch, host_gp), K1); 741 + /* Load up host GPR_GP */ 742 + UASM_i_LW(&p, GPR_GP, offsetof(struct kvm_vcpu_arch, host_gp), GPR_K1); 690 743 691 744 /* Need a stack before we can jump to "C" */ 692 - UASM_i_LW(&p, SP, offsetof(struct kvm_vcpu_arch, host_stack), K1); 745 + UASM_i_LW(&p, GPR_SP, offsetof(struct kvm_vcpu_arch, host_stack), GPR_K1); 693 746 694 747 /* Saved host state */ 695 - UASM_i_ADDIU(&p, SP, SP, -(int)sizeof(struct pt_regs)); 748 + UASM_i_ADDIU(&p, GPR_SP, GPR_SP, -(int)sizeof(struct pt_regs)); 696 749 697 750 /* 698 751 * XXXKYMA do we need to load the host ASID, maybe not because the ··· 700 753 */ 701 754 702 755 /* Restore host scratch registers, as we'll have clobbered them */ 703 - kvm_mips_build_restore_scratch(&p, K0, SP); 756 + kvm_mips_build_restore_scratch(&p, GPR_K0, GPR_SP); 704 757 705 758 /* Restore RDHWR access */ 706 - UASM_i_LA_mostly(&p, K0, (long)&hwrena); 707 - uasm_i_lw(&p, K0, uasm_rel_lo((long)&hwrena), K0); 708 - uasm_i_mtc0(&p, K0, C0_HWRENA); 759 + UASM_i_LA_mostly(&p, GPR_K0, (long)&hwrena); 760 + uasm_i_lw(&p, GPR_K0, uasm_rel_lo((long)&hwrena), GPR_K0); 761 + uasm_i_mtc0(&p, GPR_K0, C0_HWRENA); 709 762 710 763 /* Jump to handler */ 711 764 /* ··· 713 766 * Now jump to the kvm_mips_handle_exit() to see if we can deal 714 767 * with this in the kernel 715 768 */ 716 - uasm_i_move(&p, A0, S0); 717 - UASM_i_LA(&p, T9, (unsigned long)kvm_mips_handle_exit); 718 - uasm_i_jalr(&p, RA, T9); 719 - UASM_i_ADDIU(&p, SP, SP, -CALLFRAME_SIZ); 769 + uasm_i_move(&p, GPR_A0, GPR_S0); 770 + UASM_i_LA(&p, GPR_T9, (unsigned long)kvm_mips_handle_exit); 771 + uasm_i_jalr(&p, GPR_RA, GPR_T9); 772 + UASM_i_ADDIU(&p, GPR_SP, GPR_SP, -CALLFRAME_SIZ); 720 773 721 774 uasm_resolve_relocs(relocs, labels); 722 775 ··· 746 799 memset(relocs, 0, sizeof(relocs)); 747 800 748 801 /* Return from handler Make sure interrupts are disabled */ 749 - uasm_i_di(&p, ZERO); 802 + uasm_i_di(&p, GPR_ZERO); 750 803 uasm_i_ehb(&p); 751 804 752 805 /* ··· 755 808 * guest, reload k1 756 809 */ 757 810 758 - uasm_i_move(&p, K1, S0); 759 - UASM_i_ADDIU(&p, K1, K1, offsetof(struct kvm_vcpu, arch)); 811 + uasm_i_move(&p, GPR_K1, GPR_S0); 812 + UASM_i_ADDIU(&p, GPR_K1, GPR_K1, offsetof(struct kvm_vcpu, arch)); 760 813 761 814 /* 762 815 * Check return value, should tell us if we are returning to the 763 816 * host (handle I/O etc)or resuming the guest 764 817 */ 765 - uasm_i_andi(&p, T0, V0, RESUME_HOST); 766 - uasm_il_bnez(&p, &r, T0, label_return_to_host); 818 + uasm_i_andi(&p, GPR_T0, GPR_V0, RESUME_HOST); 819 + uasm_il_bnez(&p, &r, GPR_T0, label_return_to_host); 767 820 uasm_i_nop(&p); 768 821 769 822 p = kvm_mips_build_ret_to_guest(p); ··· 790 843 u32 *p = addr; 791 844 792 845 /* Put the saved pointer to vcpu (s0) back into the scratch register */ 793 - UASM_i_MTC0(&p, S0, scratch_vcpu[0], scratch_vcpu[1]); 846 + UASM_i_MTC0(&p, GPR_S0, scratch_vcpu[0], scratch_vcpu[1]); 794 847 795 848 /* Load up the Guest EBASE to minimize the window where BEV is set */ 796 - UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1); 849 + UASM_i_LW(&p, GPR_T0, offsetof(struct kvm_vcpu_arch, guest_ebase), GPR_K1); 797 850 798 851 /* Switch EBASE back to the one used by KVM */ 799 - uasm_i_mfc0(&p, V1, C0_STATUS); 800 - uasm_i_lui(&p, AT, ST0_BEV >> 16); 801 - uasm_i_or(&p, K0, V1, AT); 802 - uasm_i_mtc0(&p, K0, C0_STATUS); 852 + uasm_i_mfc0(&p, GPR_V1, C0_STATUS); 853 + uasm_i_lui(&p, GPR_AT, ST0_BEV >> 16); 854 + uasm_i_or(&p, GPR_K0, GPR_V1, GPR_AT); 855 + uasm_i_mtc0(&p, GPR_K0, C0_STATUS); 803 856 uasm_i_ehb(&p); 804 - build_set_exc_base(&p, T0); 857 + build_set_exc_base(&p, GPR_T0); 805 858 806 859 /* Setup status register for running guest in UM */ 807 - uasm_i_ori(&p, V1, V1, ST0_EXL | KSU_USER | ST0_IE); 808 - UASM_i_LA(&p, AT, ~(ST0_CU0 | ST0_MX | ST0_SX | ST0_UX)); 809 - uasm_i_and(&p, V1, V1, AT); 810 - uasm_i_mtc0(&p, V1, C0_STATUS); 860 + uasm_i_ori(&p, GPR_V1, GPR_V1, ST0_EXL | KSU_USER | ST0_IE); 861 + UASM_i_LA(&p, GPR_AT, ~(ST0_CU0 | ST0_MX | ST0_SX | ST0_UX)); 862 + uasm_i_and(&p, GPR_V1, GPR_V1, GPR_AT); 863 + uasm_i_mtc0(&p, GPR_V1, C0_STATUS); 811 864 uasm_i_ehb(&p); 812 865 813 866 p = kvm_mips_build_enter_guest(p); ··· 831 884 unsigned int i; 832 885 833 886 /* EBASE is already pointing to Linux */ 834 - UASM_i_LW(&p, K1, offsetof(struct kvm_vcpu_arch, host_stack), K1); 835 - UASM_i_ADDIU(&p, K1, K1, -(int)sizeof(struct pt_regs)); 887 + UASM_i_LW(&p, GPR_K1, offsetof(struct kvm_vcpu_arch, host_stack), GPR_K1); 888 + UASM_i_ADDIU(&p, GPR_K1, GPR_K1, -(int)sizeof(struct pt_regs)); 836 889 837 890 /* 838 891 * r2/v0 is the return code, shift it down by 2 (arithmetic) 839 892 * to recover the err code 840 893 */ 841 - uasm_i_sra(&p, K0, V0, 2); 842 - uasm_i_move(&p, V0, K0); 894 + uasm_i_sra(&p, GPR_K0, GPR_V0, 2); 895 + uasm_i_move(&p, GPR_V0, GPR_K0); 843 896 844 897 /* Load context saved on the host stack */ 845 898 for (i = 16; i < 31; ++i) { 846 899 if (i == 24) 847 900 i = 28; 848 - UASM_i_LW(&p, i, offsetof(struct pt_regs, regs[i]), K1); 901 + UASM_i_LW(&p, i, offsetof(struct pt_regs, regs[i]), GPR_K1); 849 902 } 850 903 851 904 /* Restore RDHWR access */ 852 - UASM_i_LA_mostly(&p, K0, (long)&hwrena); 853 - uasm_i_lw(&p, K0, uasm_rel_lo((long)&hwrena), K0); 854 - uasm_i_mtc0(&p, K0, C0_HWRENA); 905 + UASM_i_LA_mostly(&p, GPR_K0, (long)&hwrena); 906 + uasm_i_lw(&p, GPR_K0, uasm_rel_lo((long)&hwrena), GPR_K0); 907 + uasm_i_mtc0(&p, GPR_K0, C0_HWRENA); 855 908 856 - /* Restore RA, which is the address we will return to */ 857 - UASM_i_LW(&p, RA, offsetof(struct pt_regs, regs[RA]), K1); 858 - uasm_i_jr(&p, RA); 909 + /* Restore GPR_RA, which is the address we will return to */ 910 + UASM_i_LW(&p, GPR_RA, offsetof(struct pt_regs, regs[GPR_RA]), GPR_K1); 911 + uasm_i_jr(&p, GPR_RA); 859 912 uasm_i_nop(&p); 860 913 861 914 return p;
+95 -107
arch/mips/mm/page.c
··· 24 24 #include <asm/bootinfo.h> 25 25 #include <asm/mipsregs.h> 26 26 #include <asm/mmu_context.h> 27 + #include <asm/regdef.h> 27 28 #include <asm/cpu.h> 28 29 29 30 #ifdef CONFIG_SIBYTE_DMA_PAGEOPS ··· 34 33 #endif 35 34 36 35 #include <asm/uasm.h> 37 - 38 - /* Registers used in the assembled routines. */ 39 - #define ZERO 0 40 - #define AT 2 41 - #define A0 4 42 - #define A1 5 43 - #define A2 6 44 - #define T0 8 45 - #define T1 9 46 - #define T2 10 47 - #define T3 11 48 - #define T9 25 49 - #define RA 31 50 36 51 37 /* Handle labels (which must be positive integers). */ 52 38 enum label_id { ··· 94 106 IS_ENABLED(CONFIG_CPU_DADDI_WORKAROUNDS) && 95 107 r4k_daddiu_bug()) { 96 108 if (off > 0x7fff) { 97 - uasm_i_lui(buf, T9, uasm_rel_hi(off)); 98 - uasm_i_addiu(buf, T9, T9, uasm_rel_lo(off)); 109 + uasm_i_lui(buf, GPR_T9, uasm_rel_hi(off)); 110 + uasm_i_addiu(buf, GPR_T9, GPR_T9, uasm_rel_lo(off)); 99 111 } else 100 - uasm_i_addiu(buf, T9, ZERO, off); 101 - uasm_i_daddu(buf, reg1, reg2, T9); 112 + uasm_i_addiu(buf, GPR_T9, GPR_ZERO, off); 113 + uasm_i_daddu(buf, reg1, reg2, GPR_T9); 102 114 } else { 103 115 if (off > 0x7fff) { 104 - uasm_i_lui(buf, T9, uasm_rel_hi(off)); 105 - uasm_i_addiu(buf, T9, T9, uasm_rel_lo(off)); 106 - UASM_i_ADDU(buf, reg1, reg2, T9); 116 + uasm_i_lui(buf, GPR_T9, uasm_rel_hi(off)); 117 + uasm_i_addiu(buf, GPR_T9, GPR_T9, uasm_rel_lo(off)); 118 + UASM_i_ADDU(buf, reg1, reg2, GPR_T9); 107 119 } else 108 120 UASM_i_ADDIU(buf, reg1, reg2, off); 109 121 } ··· 221 233 static void build_clear_store(u32 **buf, int off) 222 234 { 223 235 if (cpu_has_64bit_gp_regs || cpu_has_64bit_zero_reg) { 224 - uasm_i_sd(buf, ZERO, off, A0); 236 + uasm_i_sd(buf, GPR_ZERO, off, GPR_A0); 225 237 } else { 226 - uasm_i_sw(buf, ZERO, off, A0); 238 + uasm_i_sw(buf, GPR_ZERO, off, GPR_A0); 227 239 } 228 240 } 229 241 ··· 234 246 235 247 if (pref_bias_clear_store) { 236 248 _uasm_i_pref(buf, pref_dst_mode, pref_bias_clear_store + off, 237 - A0); 249 + GPR_A0); 238 250 } else if (cache_line_size == (half_clear_loop_size << 1)) { 239 251 if (cpu_has_cache_cdex_s) { 240 - uasm_i_cache(buf, Create_Dirty_Excl_SD, off, A0); 252 + uasm_i_cache(buf, Create_Dirty_Excl_SD, off, GPR_A0); 241 253 } else if (cpu_has_cache_cdex_p) { 242 254 if (IS_ENABLED(CONFIG_WAR_R4600_V1_HIT_CACHEOP) && 243 255 cpu_is_r4600_v1_x()) { ··· 249 261 250 262 if (IS_ENABLED(CONFIG_WAR_R4600_V2_HIT_CACHEOP) && 251 263 cpu_is_r4600_v2_x()) 252 - uasm_i_lw(buf, ZERO, ZERO, AT); 264 + uasm_i_lw(buf, GPR_ZERO, GPR_ZERO, GPR_AT); 253 265 254 - uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0); 266 + uasm_i_cache(buf, Create_Dirty_Excl_D, off, GPR_A0); 255 267 } 256 268 } 257 269 } ··· 289 301 290 302 off = PAGE_SIZE - pref_bias_clear_store; 291 303 if (off > 0xffff || !pref_bias_clear_store) 292 - pg_addiu(&buf, A2, A0, off); 304 + pg_addiu(&buf, GPR_A2, GPR_A0, off); 293 305 else 294 - uasm_i_ori(&buf, A2, A0, off); 306 + uasm_i_ori(&buf, GPR_A2, GPR_A0, off); 295 307 296 308 if (IS_ENABLED(CONFIG_WAR_R4600_V2_HIT_CACHEOP) && cpu_is_r4600_v2_x()) 297 - uasm_i_lui(&buf, AT, uasm_rel_hi(0xa0000000)); 309 + uasm_i_lui(&buf, GPR_AT, uasm_rel_hi(0xa0000000)); 298 310 299 311 off = cache_line_size ? min(8, pref_bias_clear_store / cache_line_size) 300 312 * cache_line_size : 0; ··· 308 320 build_clear_store(&buf, off); 309 321 off += clear_word_size; 310 322 } while (off < half_clear_loop_size); 311 - pg_addiu(&buf, A0, A0, 2 * off); 323 + pg_addiu(&buf, GPR_A0, GPR_A0, 2 * off); 312 324 off = -off; 313 325 do { 314 326 build_clear_pref(&buf, off); 315 327 if (off == -clear_word_size) 316 - uasm_il_bne(&buf, &r, A0, A2, label_clear_pref); 328 + uasm_il_bne(&buf, &r, GPR_A0, GPR_A2, label_clear_pref); 317 329 build_clear_store(&buf, off); 318 330 off += clear_word_size; 319 331 } while (off < 0); 320 332 321 333 if (pref_bias_clear_store) { 322 - pg_addiu(&buf, A2, A0, pref_bias_clear_store); 334 + pg_addiu(&buf, GPR_A2, GPR_A0, pref_bias_clear_store); 323 335 uasm_l_clear_nopref(&l, buf); 324 336 off = 0; 325 337 do { 326 338 build_clear_store(&buf, off); 327 339 off += clear_word_size; 328 340 } while (off < half_clear_loop_size); 329 - pg_addiu(&buf, A0, A0, 2 * off); 341 + pg_addiu(&buf, GPR_A0, GPR_A0, 2 * off); 330 342 off = -off; 331 343 do { 332 344 if (off == -clear_word_size) 333 - uasm_il_bne(&buf, &r, A0, A2, 345 + uasm_il_bne(&buf, &r, GPR_A0, GPR_A2, 334 346 label_clear_nopref); 335 347 build_clear_store(&buf, off); 336 348 off += clear_word_size; 337 349 } while (off < 0); 338 350 } 339 351 340 - uasm_i_jr(&buf, RA); 352 + uasm_i_jr(&buf, GPR_RA); 341 353 uasm_i_nop(&buf); 342 354 343 355 BUG_ON(buf > &__clear_page_end); ··· 357 369 static void build_copy_load(u32 **buf, int reg, int off) 358 370 { 359 371 if (cpu_has_64bit_gp_regs) { 360 - uasm_i_ld(buf, reg, off, A1); 372 + uasm_i_ld(buf, reg, off, GPR_A1); 361 373 } else { 362 - uasm_i_lw(buf, reg, off, A1); 374 + uasm_i_lw(buf, reg, off, GPR_A1); 363 375 } 364 376 } 365 377 366 378 static void build_copy_store(u32 **buf, int reg, int off) 367 379 { 368 380 if (cpu_has_64bit_gp_regs) { 369 - uasm_i_sd(buf, reg, off, A0); 381 + uasm_i_sd(buf, reg, off, GPR_A0); 370 382 } else { 371 - uasm_i_sw(buf, reg, off, A0); 383 + uasm_i_sw(buf, reg, off, GPR_A0); 372 384 } 373 385 } 374 386 ··· 378 390 return; 379 391 380 392 if (pref_bias_copy_load) 381 - _uasm_i_pref(buf, pref_src_mode, pref_bias_copy_load + off, A1); 393 + _uasm_i_pref(buf, pref_src_mode, pref_bias_copy_load + off, GPR_A1); 382 394 } 383 395 384 396 static inline void build_copy_store_pref(u32 **buf, int off) ··· 388 400 389 401 if (pref_bias_copy_store) { 390 402 _uasm_i_pref(buf, pref_dst_mode, pref_bias_copy_store + off, 391 - A0); 403 + GPR_A0); 392 404 } else if (cache_line_size == (half_copy_loop_size << 1)) { 393 405 if (cpu_has_cache_cdex_s) { 394 - uasm_i_cache(buf, Create_Dirty_Excl_SD, off, A0); 406 + uasm_i_cache(buf, Create_Dirty_Excl_SD, off, GPR_A0); 395 407 } else if (cpu_has_cache_cdex_p) { 396 408 if (IS_ENABLED(CONFIG_WAR_R4600_V1_HIT_CACHEOP) && 397 409 cpu_is_r4600_v1_x()) { ··· 403 415 404 416 if (IS_ENABLED(CONFIG_WAR_R4600_V2_HIT_CACHEOP) && 405 417 cpu_is_r4600_v2_x()) 406 - uasm_i_lw(buf, ZERO, ZERO, AT); 418 + uasm_i_lw(buf, GPR_ZERO, GPR_ZERO, GPR_AT); 407 419 408 - uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0); 420 + uasm_i_cache(buf, Create_Dirty_Excl_D, off, GPR_A0); 409 421 } 410 422 } 411 423 } ··· 442 454 443 455 off = PAGE_SIZE - pref_bias_copy_load; 444 456 if (off > 0xffff || !pref_bias_copy_load) 445 - pg_addiu(&buf, A2, A0, off); 457 + pg_addiu(&buf, GPR_A2, GPR_A0, off); 446 458 else 447 - uasm_i_ori(&buf, A2, A0, off); 459 + uasm_i_ori(&buf, GPR_A2, GPR_A0, off); 448 460 449 461 if (IS_ENABLED(CONFIG_WAR_R4600_V2_HIT_CACHEOP) && cpu_is_r4600_v2_x()) 450 - uasm_i_lui(&buf, AT, uasm_rel_hi(0xa0000000)); 462 + uasm_i_lui(&buf, GPR_AT, uasm_rel_hi(0xa0000000)); 451 463 452 464 off = cache_line_size ? min(8, pref_bias_copy_load / cache_line_size) * 453 465 cache_line_size : 0; ··· 464 476 uasm_l_copy_pref_both(&l, buf); 465 477 do { 466 478 build_copy_load_pref(&buf, off); 467 - build_copy_load(&buf, T0, off); 479 + build_copy_load(&buf, GPR_T0, off); 468 480 build_copy_load_pref(&buf, off + copy_word_size); 469 - build_copy_load(&buf, T1, off + copy_word_size); 481 + build_copy_load(&buf, GPR_T1, off + copy_word_size); 470 482 build_copy_load_pref(&buf, off + 2 * copy_word_size); 471 - build_copy_load(&buf, T2, off + 2 * copy_word_size); 483 + build_copy_load(&buf, GPR_T2, off + 2 * copy_word_size); 472 484 build_copy_load_pref(&buf, off + 3 * copy_word_size); 473 - build_copy_load(&buf, T3, off + 3 * copy_word_size); 485 + build_copy_load(&buf, GPR_T3, off + 3 * copy_word_size); 474 486 build_copy_store_pref(&buf, off); 475 - build_copy_store(&buf, T0, off); 487 + build_copy_store(&buf, GPR_T0, off); 476 488 build_copy_store_pref(&buf, off + copy_word_size); 477 - build_copy_store(&buf, T1, off + copy_word_size); 489 + build_copy_store(&buf, GPR_T1, off + copy_word_size); 478 490 build_copy_store_pref(&buf, off + 2 * copy_word_size); 479 - build_copy_store(&buf, T2, off + 2 * copy_word_size); 491 + build_copy_store(&buf, GPR_T2, off + 2 * copy_word_size); 480 492 build_copy_store_pref(&buf, off + 3 * copy_word_size); 481 - build_copy_store(&buf, T3, off + 3 * copy_word_size); 493 + build_copy_store(&buf, GPR_T3, off + 3 * copy_word_size); 482 494 off += 4 * copy_word_size; 483 495 } while (off < half_copy_loop_size); 484 - pg_addiu(&buf, A1, A1, 2 * off); 485 - pg_addiu(&buf, A0, A0, 2 * off); 496 + pg_addiu(&buf, GPR_A1, GPR_A1, 2 * off); 497 + pg_addiu(&buf, GPR_A0, GPR_A0, 2 * off); 486 498 off = -off; 487 499 do { 488 500 build_copy_load_pref(&buf, off); 489 - build_copy_load(&buf, T0, off); 501 + build_copy_load(&buf, GPR_T0, off); 490 502 build_copy_load_pref(&buf, off + copy_word_size); 491 - build_copy_load(&buf, T1, off + copy_word_size); 503 + build_copy_load(&buf, GPR_T1, off + copy_word_size); 492 504 build_copy_load_pref(&buf, off + 2 * copy_word_size); 493 - build_copy_load(&buf, T2, off + 2 * copy_word_size); 505 + build_copy_load(&buf, GPR_T2, off + 2 * copy_word_size); 494 506 build_copy_load_pref(&buf, off + 3 * copy_word_size); 495 - build_copy_load(&buf, T3, off + 3 * copy_word_size); 507 + build_copy_load(&buf, GPR_T3, off + 3 * copy_word_size); 496 508 build_copy_store_pref(&buf, off); 497 - build_copy_store(&buf, T0, off); 509 + build_copy_store(&buf, GPR_T0, off); 498 510 build_copy_store_pref(&buf, off + copy_word_size); 499 - build_copy_store(&buf, T1, off + copy_word_size); 511 + build_copy_store(&buf, GPR_T1, off + copy_word_size); 500 512 build_copy_store_pref(&buf, off + 2 * copy_word_size); 501 - build_copy_store(&buf, T2, off + 2 * copy_word_size); 513 + build_copy_store(&buf, GPR_T2, off + 2 * copy_word_size); 502 514 build_copy_store_pref(&buf, off + 3 * copy_word_size); 503 515 if (off == -(4 * copy_word_size)) 504 - uasm_il_bne(&buf, &r, A2, A0, label_copy_pref_both); 505 - build_copy_store(&buf, T3, off + 3 * copy_word_size); 516 + uasm_il_bne(&buf, &r, GPR_A2, GPR_A0, label_copy_pref_both); 517 + build_copy_store(&buf, GPR_T3, off + 3 * copy_word_size); 506 518 off += 4 * copy_word_size; 507 519 } while (off < 0); 508 520 509 521 if (pref_bias_copy_load - pref_bias_copy_store) { 510 - pg_addiu(&buf, A2, A0, 522 + pg_addiu(&buf, GPR_A2, GPR_A0, 511 523 pref_bias_copy_load - pref_bias_copy_store); 512 524 uasm_l_copy_pref_store(&l, buf); 513 525 off = 0; 514 526 do { 515 - build_copy_load(&buf, T0, off); 516 - build_copy_load(&buf, T1, off + copy_word_size); 517 - build_copy_load(&buf, T2, off + 2 * copy_word_size); 518 - build_copy_load(&buf, T3, off + 3 * copy_word_size); 527 + build_copy_load(&buf, GPR_T0, off); 528 + build_copy_load(&buf, GPR_T1, off + copy_word_size); 529 + build_copy_load(&buf, GPR_T2, off + 2 * copy_word_size); 530 + build_copy_load(&buf, GPR_T3, off + 3 * copy_word_size); 519 531 build_copy_store_pref(&buf, off); 520 - build_copy_store(&buf, T0, off); 532 + build_copy_store(&buf, GPR_T0, off); 521 533 build_copy_store_pref(&buf, off + copy_word_size); 522 - build_copy_store(&buf, T1, off + copy_word_size); 534 + build_copy_store(&buf, GPR_T1, off + copy_word_size); 523 535 build_copy_store_pref(&buf, off + 2 * copy_word_size); 524 - build_copy_store(&buf, T2, off + 2 * copy_word_size); 536 + build_copy_store(&buf, GPR_T2, off + 2 * copy_word_size); 525 537 build_copy_store_pref(&buf, off + 3 * copy_word_size); 526 - build_copy_store(&buf, T3, off + 3 * copy_word_size); 538 + build_copy_store(&buf, GPR_T3, off + 3 * copy_word_size); 527 539 off += 4 * copy_word_size; 528 540 } while (off < half_copy_loop_size); 529 - pg_addiu(&buf, A1, A1, 2 * off); 530 - pg_addiu(&buf, A0, A0, 2 * off); 541 + pg_addiu(&buf, GPR_A1, GPR_A1, 2 * off); 542 + pg_addiu(&buf, GPR_A0, GPR_A0, 2 * off); 531 543 off = -off; 532 544 do { 533 - build_copy_load(&buf, T0, off); 534 - build_copy_load(&buf, T1, off + copy_word_size); 535 - build_copy_load(&buf, T2, off + 2 * copy_word_size); 536 - build_copy_load(&buf, T3, off + 3 * copy_word_size); 545 + build_copy_load(&buf, GPR_T0, off); 546 + build_copy_load(&buf, GPR_T1, off + copy_word_size); 547 + build_copy_load(&buf, GPR_T2, off + 2 * copy_word_size); 548 + build_copy_load(&buf, GPR_T3, off + 3 * copy_word_size); 537 549 build_copy_store_pref(&buf, off); 538 - build_copy_store(&buf, T0, off); 550 + build_copy_store(&buf, GPR_T0, off); 539 551 build_copy_store_pref(&buf, off + copy_word_size); 540 - build_copy_store(&buf, T1, off + copy_word_size); 552 + build_copy_store(&buf, GPR_T1, off + copy_word_size); 541 553 build_copy_store_pref(&buf, off + 2 * copy_word_size); 542 - build_copy_store(&buf, T2, off + 2 * copy_word_size); 554 + build_copy_store(&buf, GPR_T2, off + 2 * copy_word_size); 543 555 build_copy_store_pref(&buf, off + 3 * copy_word_size); 544 556 if (off == -(4 * copy_word_size)) 545 - uasm_il_bne(&buf, &r, A2, A0, 557 + uasm_il_bne(&buf, &r, GPR_A2, GPR_A0, 546 558 label_copy_pref_store); 547 - build_copy_store(&buf, T3, off + 3 * copy_word_size); 559 + build_copy_store(&buf, GPR_T3, off + 3 * copy_word_size); 548 560 off += 4 * copy_word_size; 549 561 } while (off < 0); 550 562 } 551 563 552 564 if (pref_bias_copy_store) { 553 - pg_addiu(&buf, A2, A0, pref_bias_copy_store); 565 + pg_addiu(&buf, GPR_A2, GPR_A0, pref_bias_copy_store); 554 566 uasm_l_copy_nopref(&l, buf); 555 567 off = 0; 556 568 do { 557 - build_copy_load(&buf, T0, off); 558 - build_copy_load(&buf, T1, off + copy_word_size); 559 - build_copy_load(&buf, T2, off + 2 * copy_word_size); 560 - build_copy_load(&buf, T3, off + 3 * copy_word_size); 561 - build_copy_store(&buf, T0, off); 562 - build_copy_store(&buf, T1, off + copy_word_size); 563 - build_copy_store(&buf, T2, off + 2 * copy_word_size); 564 - build_copy_store(&buf, T3, off + 3 * copy_word_size); 569 + build_copy_load(&buf, GPR_T0, off); 570 + build_copy_load(&buf, GPR_T1, off + copy_word_size); 571 + build_copy_load(&buf, GPR_T2, off + 2 * copy_word_size); 572 + build_copy_load(&buf, GPR_T3, off + 3 * copy_word_size); 573 + build_copy_store(&buf, GPR_T0, off); 574 + build_copy_store(&buf, GPR_T1, off + copy_word_size); 575 + build_copy_store(&buf, GPR_T2, off + 2 * copy_word_size); 576 + build_copy_store(&buf, GPR_T3, off + 3 * copy_word_size); 565 577 off += 4 * copy_word_size; 566 578 } while (off < half_copy_loop_size); 567 - pg_addiu(&buf, A1, A1, 2 * off); 568 - pg_addiu(&buf, A0, A0, 2 * off); 579 + pg_addiu(&buf, GPR_A1, GPR_A1, 2 * off); 580 + pg_addiu(&buf, GPR_A0, GPR_A0, 2 * off); 569 581 off = -off; 570 582 do { 571 - build_copy_load(&buf, T0, off); 572 - build_copy_load(&buf, T1, off + copy_word_size); 573 - build_copy_load(&buf, T2, off + 2 * copy_word_size); 574 - build_copy_load(&buf, T3, off + 3 * copy_word_size); 575 - build_copy_store(&buf, T0, off); 576 - build_copy_store(&buf, T1, off + copy_word_size); 577 - build_copy_store(&buf, T2, off + 2 * copy_word_size); 583 + build_copy_load(&buf, GPR_T0, off); 584 + build_copy_load(&buf, GPR_T1, off + copy_word_size); 585 + build_copy_load(&buf, GPR_T2, off + 2 * copy_word_size); 586 + build_copy_load(&buf, GPR_T3, off + 3 * copy_word_size); 587 + build_copy_store(&buf, GPR_T0, off); 588 + build_copy_store(&buf, GPR_T1, off + copy_word_size); 589 + build_copy_store(&buf, GPR_T2, off + 2 * copy_word_size); 578 590 if (off == -(4 * copy_word_size)) 579 - uasm_il_bne(&buf, &r, A2, A0, 591 + uasm_il_bne(&buf, &r, GPR_A2, GPR_A0, 580 592 label_copy_nopref); 581 - build_copy_store(&buf, T3, off + 3 * copy_word_size); 593 + build_copy_store(&buf, GPR_T3, off + 3 * copy_word_size); 582 594 off += 4 * copy_word_size; 583 595 } while (off < 0); 584 596 } 585 597 586 - uasm_i_jr(&buf, RA); 598 + uasm_i_jr(&buf, GPR_RA); 587 599 uasm_i_nop(&buf); 588 600 589 601 BUG_ON(buf > &__copy_page_end);
+98 -116
arch/mips/mm/tlbex.c
··· 32 32 33 33 #include <asm/cacheflush.h> 34 34 #include <asm/cpu-type.h> 35 + #include <asm/mipsregs.h> 35 36 #include <asm/mmu_context.h> 37 + #include <asm/regdef.h> 36 38 #include <asm/uasm.h> 37 39 #include <asm/setup.h> 38 40 #include <asm/tlbex.h> ··· 278 276 pr_debug("\tEND(%s)\n", symbol); 279 277 } 280 278 281 - /* The only general purpose registers allowed in TLB handlers. */ 282 - #define K0 26 283 - #define K1 27 284 - 285 - /* Some CP0 registers */ 286 - #define C0_INDEX 0, 0 287 - #define C0_ENTRYLO0 2, 0 288 - #define C0_TCBIND 2, 2 289 - #define C0_ENTRYLO1 3, 0 290 - #define C0_CONTEXT 4, 0 291 - #define C0_PAGEMASK 5, 0 292 - #define C0_PWBASE 5, 5 293 - #define C0_PWFIELD 5, 6 294 - #define C0_PWSIZE 5, 7 295 - #define C0_PWCTL 6, 6 296 - #define C0_BADVADDR 8, 0 297 - #define C0_PGD 9, 7 298 - #define C0_ENTRYHI 10, 0 299 - #define C0_EPC 14, 0 300 - #define C0_XCONTEXT 20, 0 301 - 302 279 #ifdef CONFIG_64BIT 303 280 # define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_XCONTEXT) 304 281 #else ··· 337 356 if (scratch_reg >= 0) { 338 357 /* Save in CPU local C0_KScratch? */ 339 358 UASM_i_MTC0(p, 1, c0_kscratch(), scratch_reg); 340 - r.r1 = K0; 341 - r.r2 = K1; 342 - r.r3 = 1; 359 + r.r1 = GPR_K0; 360 + r.r2 = GPR_K1; 361 + r.r3 = GPR_AT; 343 362 return r; 344 363 } 345 364 346 365 if (num_possible_cpus() > 1) { 347 366 /* Get smp_processor_id */ 348 - UASM_i_CPUID_MFC0(p, K0, SMP_CPUID_REG); 349 - UASM_i_SRL_SAFE(p, K0, K0, SMP_CPUID_REGSHIFT); 367 + UASM_i_CPUID_MFC0(p, GPR_K0, SMP_CPUID_REG); 368 + UASM_i_SRL_SAFE(p, GPR_K0, GPR_K0, SMP_CPUID_REGSHIFT); 350 369 351 - /* handler_reg_save index in K0 */ 352 - UASM_i_SLL(p, K0, K0, ilog2(sizeof(struct tlb_reg_save))); 370 + /* handler_reg_save index in GPR_K0 */ 371 + UASM_i_SLL(p, GPR_K0, GPR_K0, ilog2(sizeof(struct tlb_reg_save))); 353 372 354 - UASM_i_LA(p, K1, (long)&handler_reg_save); 355 - UASM_i_ADDU(p, K0, K0, K1); 373 + UASM_i_LA(p, GPR_K1, (long)&handler_reg_save); 374 + UASM_i_ADDU(p, GPR_K0, GPR_K0, GPR_K1); 356 375 } else { 357 - UASM_i_LA(p, K0, (long)&handler_reg_save); 376 + UASM_i_LA(p, GPR_K0, (long)&handler_reg_save); 358 377 } 359 - /* K0 now points to save area, save $1 and $2 */ 360 - UASM_i_SW(p, 1, offsetof(struct tlb_reg_save, a), K0); 361 - UASM_i_SW(p, 2, offsetof(struct tlb_reg_save, b), K0); 378 + /* GPR_K0 now points to save area, save $1 and $2 */ 379 + UASM_i_SW(p, 1, offsetof(struct tlb_reg_save, a), GPR_K0); 380 + UASM_i_SW(p, 2, offsetof(struct tlb_reg_save, b), GPR_K0); 362 381 363 - r.r1 = K1; 382 + r.r1 = GPR_K1; 364 383 r.r2 = 1; 365 384 r.r3 = 2; 366 385 return r; ··· 373 392 UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg); 374 393 return; 375 394 } 376 - /* K0 already points to save area, restore $1 and $2 */ 377 - UASM_i_LW(p, 1, offsetof(struct tlb_reg_save, a), K0); 378 - UASM_i_LW(p, 2, offsetof(struct tlb_reg_save, b), K0); 395 + /* GPR_K0 already points to save area, restore $1 and $2 */ 396 + UASM_i_LW(p, 1, offsetof(struct tlb_reg_save, a), GPR_K0); 397 + UASM_i_LW(p, 2, offsetof(struct tlb_reg_save, b), GPR_K0); 379 398 } 380 399 381 400 #ifndef CONFIG_MIPS_PGD_C0_CONTEXT ··· 394 413 memset(tlb_handler, 0, sizeof(tlb_handler)); 395 414 p = tlb_handler; 396 415 397 - uasm_i_mfc0(&p, K0, C0_BADVADDR); 398 - uasm_i_lui(&p, K1, uasm_rel_hi(pgdc)); /* cp0 delay */ 399 - uasm_i_lw(&p, K1, uasm_rel_lo(pgdc), K1); 400 - uasm_i_srl(&p, K0, K0, 22); /* load delay */ 401 - uasm_i_sll(&p, K0, K0, 2); 402 - uasm_i_addu(&p, K1, K1, K0); 403 - uasm_i_mfc0(&p, K0, C0_CONTEXT); 404 - uasm_i_lw(&p, K1, 0, K1); /* cp0 delay */ 405 - uasm_i_andi(&p, K0, K0, 0xffc); /* load delay */ 406 - uasm_i_addu(&p, K1, K1, K0); 407 - uasm_i_lw(&p, K0, 0, K1); 416 + uasm_i_mfc0(&p, GPR_K0, C0_BADVADDR); 417 + uasm_i_lui(&p, GPR_K1, uasm_rel_hi(pgdc)); /* cp0 delay */ 418 + uasm_i_lw(&p, GPR_K1, uasm_rel_lo(pgdc), GPR_K1); 419 + uasm_i_srl(&p, GPR_K0, GPR_K0, 22); /* load delay */ 420 + uasm_i_sll(&p, GPR_K0, GPR_K0, 2); 421 + uasm_i_addu(&p, GPR_K1, GPR_K1, GPR_K0); 422 + uasm_i_mfc0(&p, GPR_K0, C0_CONTEXT); 423 + uasm_i_lw(&p, GPR_K1, 0, GPR_K1); /* cp0 delay */ 424 + uasm_i_andi(&p, GPR_K0, GPR_K0, 0xffc); /* load delay */ 425 + uasm_i_addu(&p, GPR_K1, GPR_K1, GPR_K0); 426 + uasm_i_lw(&p, GPR_K0, 0, GPR_K1); 408 427 uasm_i_nop(&p); /* load delay */ 409 - uasm_i_mtc0(&p, K0, C0_ENTRYLO0); 410 - uasm_i_mfc0(&p, K1, C0_EPC); /* cp0 delay */ 428 + uasm_i_mtc0(&p, GPR_K0, C0_ENTRYLO0); 429 + uasm_i_mfc0(&p, GPR_K1, C0_EPC); /* cp0 delay */ 411 430 uasm_i_tlbwr(&p); /* cp0 delay */ 412 - uasm_i_jr(&p, K1); 431 + uasm_i_jr(&p, GPR_K1); 413 432 uasm_i_rfe(&p); /* branch delay */ 414 433 415 434 if (p > tlb_handler + 32) ··· 1257 1276 memset(final_handler, 0, sizeof(final_handler)); 1258 1277 1259 1278 if (IS_ENABLED(CONFIG_64BIT) && (scratch_reg >= 0 || scratchpad_available()) && use_bbit_insns()) { 1260 - htlb_info = build_fast_tlb_refill_handler(&p, &l, &r, K0, K1, 1279 + htlb_info = build_fast_tlb_refill_handler(&p, &l, &r, GPR_K0, GPR_K1, 1261 1280 scratch_reg); 1262 1281 vmalloc_mode = refill_scratch; 1263 1282 } else { 1264 - htlb_info.huge_pte = K0; 1283 + htlb_info.huge_pte = GPR_K0; 1265 1284 htlb_info.restore_scratch = 0; 1266 1285 htlb_info.need_reload_pte = true; 1267 1286 vmalloc_mode = refill_noscratch; ··· 1271 1290 if (bcm1250_m3_war()) { 1272 1291 unsigned int segbits = 44; 1273 1292 1274 - uasm_i_dmfc0(&p, K0, C0_BADVADDR); 1275 - uasm_i_dmfc0(&p, K1, C0_ENTRYHI); 1276 - uasm_i_xor(&p, K0, K0, K1); 1277 - uasm_i_dsrl_safe(&p, K1, K0, 62); 1278 - uasm_i_dsrl_safe(&p, K0, K0, 12 + 1); 1279 - uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits); 1280 - uasm_i_or(&p, K0, K0, K1); 1281 - uasm_il_bnez(&p, &r, K0, label_leave); 1293 + uasm_i_dmfc0(&p, GPR_K0, C0_BADVADDR); 1294 + uasm_i_dmfc0(&p, GPR_K1, C0_ENTRYHI); 1295 + uasm_i_xor(&p, GPR_K0, GPR_K0, GPR_K1); 1296 + uasm_i_dsrl_safe(&p, GPR_K1, GPR_K0, 62); 1297 + uasm_i_dsrl_safe(&p, GPR_K0, GPR_K0, 12 + 1); 1298 + uasm_i_dsll_safe(&p, GPR_K0, GPR_K0, 64 + 12 + 1 - segbits); 1299 + uasm_i_or(&p, GPR_K0, GPR_K0, GPR_K1); 1300 + uasm_il_bnez(&p, &r, GPR_K0, label_leave); 1282 1301 /* No need for uasm_i_nop */ 1283 1302 } 1284 1303 1285 1304 #ifdef CONFIG_64BIT 1286 - build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */ 1305 + build_get_pmde64(&p, &l, &r, GPR_K0, GPR_K1); /* get pmd in GPR_K1 */ 1287 1306 #else 1288 - build_get_pgde32(&p, K0, K1); /* get pgd in K1 */ 1307 + build_get_pgde32(&p, GPR_K0, GPR_K1); /* get pgd in GPR_K1 */ 1289 1308 #endif 1290 1309 1291 1310 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 1292 - build_is_huge_pte(&p, &r, K0, K1, label_tlb_huge_update); 1311 + build_is_huge_pte(&p, &r, GPR_K0, GPR_K1, label_tlb_huge_update); 1293 1312 #endif 1294 1313 1295 - build_get_ptep(&p, K0, K1); 1296 - build_update_entries(&p, K0, K1); 1314 + build_get_ptep(&p, GPR_K0, GPR_K1); 1315 + build_update_entries(&p, GPR_K0, GPR_K1); 1297 1316 build_tlb_write_entry(&p, &l, &r, tlb_random); 1298 1317 uasm_l_leave(&l, p); 1299 1318 uasm_i_eret(&p); /* return from trap */ ··· 1301 1320 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 1302 1321 uasm_l_tlb_huge_update(&l, p); 1303 1322 if (htlb_info.need_reload_pte) 1304 - UASM_i_LW(&p, htlb_info.huge_pte, 0, K1); 1305 - build_huge_update_entries(&p, htlb_info.huge_pte, K1); 1306 - build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random, 1323 + UASM_i_LW(&p, htlb_info.huge_pte, 0, GPR_K1); 1324 + build_huge_update_entries(&p, htlb_info.huge_pte, GPR_K1); 1325 + build_huge_tlb_write_entry(&p, &l, &r, GPR_K0, tlb_random, 1307 1326 htlb_info.restore_scratch); 1308 1327 #endif 1309 1328 1310 1329 #ifdef CONFIG_64BIT 1311 - build_get_pgd_vmalloc64(&p, &l, &r, K0, K1, vmalloc_mode); 1330 + build_get_pgd_vmalloc64(&p, &l, &r, GPR_K0, GPR_K1, vmalloc_mode); 1312 1331 #endif 1313 1332 1314 1333 /* ··· 1481 1500 memset(tlb_handler, 0, sizeof(tlb_handler)); 1482 1501 1483 1502 if (check_for_high_segbits) { 1484 - uasm_i_dmfc0(&p, K0, C0_BADVADDR); 1485 - uasm_i_dsrl_safe(&p, K1, K0, PGDIR_SHIFT + PGD_TABLE_ORDER + PAGE_SHIFT - 3); 1486 - uasm_il_beqz(&p, &r, K1, label_vmalloc); 1503 + uasm_i_dmfc0(&p, GPR_K0, C0_BADVADDR); 1504 + uasm_i_dsrl_safe(&p, GPR_K1, GPR_K0, 1505 + PGDIR_SHIFT + PGD_TABLE_ORDER + PAGE_SHIFT - 3); 1506 + uasm_il_beqz(&p, &r, GPR_K1, label_vmalloc); 1487 1507 uasm_i_nop(&p); 1488 1508 1489 - uasm_il_bgez(&p, &r, K0, label_large_segbits_fault); 1509 + uasm_il_bgez(&p, &r, GPR_K0, label_large_segbits_fault); 1490 1510 uasm_i_nop(&p); 1491 1511 uasm_l_vmalloc(&l, p); 1492 1512 } 1493 1513 1494 - uasm_i_dmfc0(&p, K1, C0_PGD); 1514 + uasm_i_dmfc0(&p, GPR_K1, C0_PGD); 1495 1515 1496 - uasm_i_lddir(&p, K0, K1, 3); /* global page dir */ 1516 + uasm_i_lddir(&p, GPR_K0, GPR_K1, 3); /* global page dir */ 1497 1517 #ifndef __PAGETABLE_PMD_FOLDED 1498 - uasm_i_lddir(&p, K1, K0, 1); /* middle page dir */ 1518 + uasm_i_lddir(&p, GPR_K1, GPR_K0, 1); /* middle page dir */ 1499 1519 #endif 1500 - uasm_i_ldpte(&p, K1, 0); /* even */ 1501 - uasm_i_ldpte(&p, K1, 1); /* odd */ 1520 + uasm_i_ldpte(&p, GPR_K1, 0); /* even */ 1521 + uasm_i_ldpte(&p, GPR_K1, 1); /* odd */ 1502 1522 uasm_i_tlbwr(&p); 1503 1523 1504 1524 /* restore page mask */ 1505 1525 if (PM_DEFAULT_MASK >> 16) { 1506 - uasm_i_lui(&p, K0, PM_DEFAULT_MASK >> 16); 1507 - uasm_i_ori(&p, K0, K0, PM_DEFAULT_MASK & 0xffff); 1508 - uasm_i_mtc0(&p, K0, C0_PAGEMASK); 1526 + uasm_i_lui(&p, GPR_K0, PM_DEFAULT_MASK >> 16); 1527 + uasm_i_ori(&p, GPR_K0, GPR_K0, PM_DEFAULT_MASK & 0xffff); 1528 + uasm_i_mtc0(&p, GPR_K0, C0_PAGEMASK); 1509 1529 } else if (PM_DEFAULT_MASK) { 1510 - uasm_i_ori(&p, K0, 0, PM_DEFAULT_MASK); 1511 - uasm_i_mtc0(&p, K0, C0_PAGEMASK); 1530 + uasm_i_ori(&p, GPR_K0, 0, PM_DEFAULT_MASK); 1531 + uasm_i_mtc0(&p, GPR_K0, C0_PAGEMASK); 1512 1532 } else { 1513 1533 uasm_i_mtc0(&p, 0, C0_PAGEMASK); 1514 1534 } ··· 1518 1536 1519 1537 if (check_for_high_segbits) { 1520 1538 uasm_l_large_segbits_fault(&l, p); 1521 - UASM_i_LA(&p, K1, (unsigned long)tlb_do_page_fault_0); 1522 - uasm_i_jr(&p, K1); 1539 + UASM_i_LA(&p, GPR_K1, (unsigned long)tlb_do_page_fault_0); 1540 + uasm_i_jr(&p, GPR_K1); 1523 1541 uasm_i_nop(&p); 1524 1542 } 1525 1543 ··· 1885 1903 memset(labels, 0, sizeof(labels)); 1886 1904 memset(relocs, 0, sizeof(relocs)); 1887 1905 1888 - build_r3000_tlbchange_handler_head(&p, K0, K1); 1889 - build_pte_present(&p, &r, K0, K1, -1, label_nopage_tlbl); 1906 + build_r3000_tlbchange_handler_head(&p, GPR_K0, GPR_K1); 1907 + build_pte_present(&p, &r, GPR_K0, GPR_K1, -1, label_nopage_tlbl); 1890 1908 uasm_i_nop(&p); /* load delay */ 1891 - build_make_valid(&p, &r, K0, K1, -1); 1892 - build_r3000_tlb_reload_write(&p, &l, &r, K0, K1); 1909 + build_make_valid(&p, &r, GPR_K0, GPR_K1, -1); 1910 + build_r3000_tlb_reload_write(&p, &l, &r, GPR_K0, GPR_K1); 1893 1911 1894 1912 uasm_l_nopage_tlbl(&l, p); 1895 1913 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff); ··· 1915 1933 memset(labels, 0, sizeof(labels)); 1916 1934 memset(relocs, 0, sizeof(relocs)); 1917 1935 1918 - build_r3000_tlbchange_handler_head(&p, K0, K1); 1919 - build_pte_writable(&p, &r, K0, K1, -1, label_nopage_tlbs); 1936 + build_r3000_tlbchange_handler_head(&p, GPR_K0, GPR_K1); 1937 + build_pte_writable(&p, &r, GPR_K0, GPR_K1, -1, label_nopage_tlbs); 1920 1938 uasm_i_nop(&p); /* load delay */ 1921 - build_make_write(&p, &r, K0, K1, -1); 1922 - build_r3000_tlb_reload_write(&p, &l, &r, K0, K1); 1939 + build_make_write(&p, &r, GPR_K0, GPR_K1, -1); 1940 + build_r3000_tlb_reload_write(&p, &l, &r, GPR_K0, GPR_K1); 1923 1941 1924 1942 uasm_l_nopage_tlbs(&l, p); 1925 1943 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); ··· 1945 1963 memset(labels, 0, sizeof(labels)); 1946 1964 memset(relocs, 0, sizeof(relocs)); 1947 1965 1948 - build_r3000_tlbchange_handler_head(&p, K0, K1); 1949 - build_pte_modifiable(&p, &r, K0, K1, -1, label_nopage_tlbm); 1966 + build_r3000_tlbchange_handler_head(&p, GPR_K0, GPR_K1); 1967 + build_pte_modifiable(&p, &r, GPR_K0, GPR_K1, -1, label_nopage_tlbm); 1950 1968 uasm_i_nop(&p); /* load delay */ 1951 - build_make_write(&p, &r, K0, K1, -1); 1952 - build_r3000_pte_reload_tlbwi(&p, K0, K1); 1969 + build_make_write(&p, &r, GPR_K0, GPR_K1, -1); 1970 + build_r3000_pte_reload_tlbwi(&p, GPR_K0, GPR_K1); 1953 1971 1954 1972 uasm_l_nopage_tlbm(&l, p); 1955 1973 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); ··· 2065 2083 if (bcm1250_m3_war()) { 2066 2084 unsigned int segbits = 44; 2067 2085 2068 - uasm_i_dmfc0(&p, K0, C0_BADVADDR); 2069 - uasm_i_dmfc0(&p, K1, C0_ENTRYHI); 2070 - uasm_i_xor(&p, K0, K0, K1); 2071 - uasm_i_dsrl_safe(&p, K1, K0, 62); 2072 - uasm_i_dsrl_safe(&p, K0, K0, 12 + 1); 2073 - uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits); 2074 - uasm_i_or(&p, K0, K0, K1); 2075 - uasm_il_bnez(&p, &r, K0, label_leave); 2086 + uasm_i_dmfc0(&p, GPR_K0, C0_BADVADDR); 2087 + uasm_i_dmfc0(&p, GPR_K1, C0_ENTRYHI); 2088 + uasm_i_xor(&p, GPR_K0, GPR_K0, GPR_K1); 2089 + uasm_i_dsrl_safe(&p, GPR_K1, GPR_K0, 62); 2090 + uasm_i_dsrl_safe(&p, GPR_K0, GPR_K0, 12 + 1); 2091 + uasm_i_dsll_safe(&p, GPR_K0, GPR_K0, 64 + 12 + 1 - segbits); 2092 + uasm_i_or(&p, GPR_K0, GPR_K0, GPR_K1); 2093 + uasm_il_bnez(&p, &r, GPR_K0, label_leave); 2076 2094 /* No need for uasm_i_nop */ 2077 2095 } 2078 2096 ··· 2215 2233 build_restore_work_registers(&p); 2216 2234 #ifdef CONFIG_CPU_MICROMIPS 2217 2235 if ((unsigned long)tlb_do_page_fault_0 & 1) { 2218 - uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_0)); 2219 - uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_0)); 2220 - uasm_i_jr(&p, K0); 2236 + uasm_i_lui(&p, GPR_K0, uasm_rel_hi((long)tlb_do_page_fault_0)); 2237 + uasm_i_addiu(&p, GPR_K0, GPR_K0, uasm_rel_lo((long)tlb_do_page_fault_0)); 2238 + uasm_i_jr(&p, GPR_K0); 2221 2239 } else 2222 2240 #endif 2223 2241 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff); ··· 2271 2289 build_restore_work_registers(&p); 2272 2290 #ifdef CONFIG_CPU_MICROMIPS 2273 2291 if ((unsigned long)tlb_do_page_fault_1 & 1) { 2274 - uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_1)); 2275 - uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_1)); 2276 - uasm_i_jr(&p, K0); 2292 + uasm_i_lui(&p, GPR_K0, uasm_rel_hi((long)tlb_do_page_fault_1)); 2293 + uasm_i_addiu(&p, GPR_K0, GPR_K0, uasm_rel_lo((long)tlb_do_page_fault_1)); 2294 + uasm_i_jr(&p, GPR_K0); 2277 2295 } else 2278 2296 #endif 2279 2297 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); ··· 2328 2346 build_restore_work_registers(&p); 2329 2347 #ifdef CONFIG_CPU_MICROMIPS 2330 2348 if ((unsigned long)tlb_do_page_fault_1 & 1) { 2331 - uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_1)); 2332 - uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_1)); 2333 - uasm_i_jr(&p, K0); 2349 + uasm_i_lui(&p, GPR_K0, uasm_rel_hi((long)tlb_do_page_fault_1)); 2350 + uasm_i_addiu(&p, GPR_K0, GPR_K0, uasm_rel_lo((long)tlb_do_page_fault_1)); 2351 + uasm_i_jr(&p, GPR_K0); 2334 2352 } else 2335 2353 #endif 2336 2354 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
+1
arch/mips/mobileye/Makefile
··· 1 + # SPDX-License-Identifier: GPL-2.0-or-later
+15
arch/mips/mobileye/Platform
··· 1 + # 2 + # Copyright (C) 2016 Imagination Technologies 3 + # Author: Paul Burton <paul.burton@mips.com> 4 + # 5 + # This program is free software; you can redistribute it and/or modify it 6 + # under the terms of the GNU General Public License as published by the 7 + # Free Software Foundation; either version 2 of the License, or (at your 8 + # option) any later version. 9 + # 10 + 11 + load-$(CONFIG_MACH_EYEQ5) = 0xa800000808000000 12 + all-$(CONFIG_MACH_EYEQ5) += vmlinux.gz.itb 13 + 14 + its-y := vmlinux.its.S 15 + its-$(CONFIG_FIT_IMAGE_FDT_EPM5) += board-epm5.its.S
+24
arch/mips/mobileye/board-epm5.its.S
··· 1 + /* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ 2 + / { 3 + images { 4 + fdt-mobileye-epm5 { 5 + description = "Mobileeye MP5 Device Tree"; 6 + data = /incbin/("boot/dts/mobileye/eyeq5-epm5.dtb"); 7 + type = "flat_dt"; 8 + arch = "mips"; 9 + compression = "none"; 10 + hash { 11 + algo = "sha1"; 12 + }; 13 + }; 14 + }; 15 + 16 + configurations { 17 + default = "conf-1"; 18 + conf-1 { 19 + description = "Mobileye EPM5 Linux kernel"; 20 + kernel = "kernel"; 21 + fdt = "fdt-mobileye-epm5"; 22 + }; 23 + }; 24 + };
+32
arch/mips/mobileye/vmlinux.its.S
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /dts-v1/; 3 + 4 + / { 5 + description = KERNEL_NAME; 6 + #address-cells = <ADDR_CELLS>; 7 + 8 + images { 9 + kernel { 10 + description = KERNEL_NAME; 11 + data = /incbin/(VMLINUX_BINARY); 12 + type = "kernel"; 13 + arch = "mips"; 14 + os = "linux"; 15 + compression = VMLINUX_COMPRESSION; 16 + load = /bits/ ADDR_BITS <VMLINUX_LOAD_ADDRESS>; 17 + entry = /bits/ ADDR_BITS <VMLINUX_ENTRY_ADDRESS>; 18 + hash { 19 + algo = "sha1"; 20 + }; 21 + }; 22 + }; 23 + 24 + configurations { 25 + default = "conf-default"; 26 + 27 + conf-default { 28 + description = "Generic Linux kernel"; 29 + kernel = "kernel"; 30 + }; 31 + }; 32 + };
+1 -1
arch/mips/pci/fixup-ath79.c
··· 9 9 10 10 int pcibios_plat_dev_init(struct pci_dev *dev) 11 11 { 12 - return PCIBIOS_SUCCESSFUL; 12 + return 0; 13 13 } 14 14 15 15 int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
-9
arch/mips/pci/fixup-lantiq.c
··· 7 7 #include <linux/of_pci.h> 8 8 #include <linux/pci.h> 9 9 10 - int (*ltq_pci_plat_arch_init)(struct pci_dev *dev) = NULL; 11 - int (*ltq_pci_plat_dev_init)(struct pci_dev *dev) = NULL; 12 - 13 10 int pcibios_plat_dev_init(struct pci_dev *dev) 14 11 { 15 - if (ltq_pci_plat_arch_init) 16 - return ltq_pci_plat_arch_init(dev); 17 - 18 - if (ltq_pci_plat_dev_init) 19 - return ltq_pci_plat_dev_init(dev); 20 - 21 12 return 0; 22 13 } 23 14
+11 -7
arch/mips/pci/ops-tx4927.c
··· 60 60 { 61 61 if (bus->parent == NULL && 62 62 devfn >= PCI_DEVFN(TX4927_PCIC_MAX_DEVNU, 0)) 63 - return -1; 63 + return PCIBIOS_DEVICE_NOT_FOUND; 64 64 __raw_writel(((bus->number & 0xff) << 0x10) 65 65 | ((devfn & 0xff) << 0x08) | (where & 0xfc) 66 66 | (bus->parent ? 1 : 0), ··· 69 69 __raw_writel((__raw_readl(&pcicptr->pcistatus) & 0x0000ffff) 70 70 | (PCI_STATUS_REC_MASTER_ABORT << 16), 71 71 &pcicptr->pcistatus); 72 - return 0; 72 + return PCIBIOS_SUCCESSFUL; 73 73 } 74 74 75 75 static int check_abort(struct tx4927_pcic_reg __iomem *pcicptr) ··· 140 140 int where, int size, u32 *val) 141 141 { 142 142 struct tx4927_pcic_reg __iomem *pcicptr = pci_bus_to_pcicptr(bus); 143 + int ret; 143 144 144 - if (mkaddr(bus, devfn, where, pcicptr)) { 145 - *val = 0xffffffff; 146 - return -1; 145 + ret = mkaddr(bus, devfn, where, pcicptr); 146 + if (ret != PCIBIOS_SUCCESSFUL) { 147 + PCI_SET_ERROR_RESPONSE(val); 148 + return ret; 147 149 } 148 150 switch (size) { 149 151 case 1: ··· 164 162 int where, int size, u32 val) 165 163 { 166 164 struct tx4927_pcic_reg __iomem *pcicptr = pci_bus_to_pcicptr(bus); 165 + int ret; 167 166 168 - if (mkaddr(bus, devfn, where, pcicptr)) 169 - return -1; 167 + ret = mkaddr(bus, devfn, where, pcicptr); 168 + if (ret != PCIBIOS_SUCCESSFUL) 169 + return ret; 170 170 switch (size) { 171 171 case 1: 172 172 icd_writeb(val, where & 3, pcicptr);
+9 -4
arch/mips/ralink/timer.c
··· 6 6 * Copyright (C) 2013 John Crispin <john@phrozen.org> 7 7 */ 8 8 9 - #include <linux/platform_device.h> 10 - #include <linux/interrupt.h> 11 - #include <linux/timer.h> 12 - #include <linux/of_gpio.h> 9 + #include <linux/bits.h> 13 10 #include <linux/clk.h> 11 + #include <linux/device.h> 12 + #include <linux/err.h> 13 + #include <linux/interrupt.h> 14 + #include <linux/io.h> 15 + #include <linux/mod_devicetable.h> 16 + #include <linux/platform_device.h> 17 + #include <linux/timer.h> 18 + #include <linux/types.h> 14 19 15 20 #include <asm/mach-ralink/ralink_regs.h> 16 21
+2 -2
arch/mips/sgi-ip22/ip22-gio.c
··· 12 12 #include <asm/sgi/mc.h> 13 13 #include <asm/sgi/ip22.h> 14 14 15 - static struct bus_type gio_bus_type; 15 + static const struct bus_type gio_bus_type; 16 16 17 17 static struct { 18 18 const char *name; ··· 378 378 printk(KERN_INFO "GIO: slot %d : Empty\n", slotno); 379 379 } 380 380 381 - static struct bus_type gio_bus_type = { 381 + static const struct bus_type gio_bus_type = { 382 382 .name = "gio", 383 383 .dev_groups = gio_dev_groups, 384 384 .match = gio_bus_match,
+9 -12
arch/mips/sibyte/common/sb_tbprof.c
··· 535 535 .llseek = default_llseek, 536 536 }; 537 537 538 - static struct class *tb_class; 538 + static const struct class tb_class = { 539 + .name = "sb_tracebuffer", 540 + }; 539 541 static struct device *tb_dev; 540 542 541 543 static int __init sbprof_tb_init(void) 542 544 { 543 545 struct device *dev; 544 - struct class *tbc; 545 546 int err; 546 547 547 548 if (register_chrdev(SBPROF_TB_MAJOR, DEVNAME, &sbprof_tb_fops)) { ··· 551 550 return -EIO; 552 551 } 553 552 554 - tbc = class_create("sb_tracebuffer"); 555 - if (IS_ERR(tbc)) { 556 - err = PTR_ERR(tbc); 553 + err = class_register(&tb_class); 554 + if (err) 557 555 goto out_chrdev; 558 - } 559 556 560 - tb_class = tbc; 561 - 562 - dev = device_create(tbc, NULL, MKDEV(SBPROF_TB_MAJOR, 0), NULL, "tb"); 557 + dev = device_create(&tb_class, NULL, MKDEV(SBPROF_TB_MAJOR, 0), NULL, "tb"); 563 558 if (IS_ERR(dev)) { 564 559 err = PTR_ERR(dev); 565 560 goto out_class; ··· 570 573 return 0; 571 574 572 575 out_class: 573 - class_destroy(tb_class); 576 + class_unregister(&tb_class); 574 577 out_chrdev: 575 578 unregister_chrdev(SBPROF_TB_MAJOR, DEVNAME); 576 579 ··· 579 582 580 583 static void __exit sbprof_tb_cleanup(void) 581 584 { 582 - device_destroy(tb_class, MKDEV(SBPROF_TB_MAJOR, 0)); 585 + device_destroy(&tb_class, MKDEV(SBPROF_TB_MAJOR, 0)); 583 586 unregister_chrdev(SBPROF_TB_MAJOR, DEVNAME); 584 - class_destroy(tb_class); 587 + class_unregister(&tb_class); 585 588 } 586 589 587 590 module_init(sbprof_tb_init);
+1 -1
arch/mips/txx9/generic/setup.c
··· 762 762 { 763 763 } 764 764 765 - static struct bus_type txx9_sramc_subsys = { 765 + static const struct bus_type txx9_sramc_subsys = { 766 766 .name = "txx9_sram", 767 767 .dev_name = "txx9_sram", 768 768 };
-1
drivers/bus/bt1-apb.c
··· 22 22 #include <linux/clk.h> 23 23 #include <linux/reset.h> 24 24 #include <linux/time64.h> 25 - #include <linux/clk.h> 26 25 #include <linux/sysfs.h> 27 26 28 27 #define APB_EHB_ISR 0x00
+1 -1
drivers/bus/mips_cdmm.c
··· 118 118 }; 119 119 ATTRIBUTE_GROUPS(mips_cdmm_dev); 120 120 121 - struct bus_type mips_cdmm_bustype = { 121 + const struct bus_type mips_cdmm_bustype = { 122 122 .name = "cdmm", 123 123 .dev_groups = mips_cdmm_dev_groups, 124 124 .match = mips_cdmm_match,
+1 -1
drivers/tc/tc-driver.c
··· 95 95 return 0; 96 96 } 97 97 98 - struct bus_type tc_bus_type = { 98 + const struct bus_type tc_bus_type = { 99 99 .name = "tc", 100 100 .match = tc_bus_match, 101 101 };
+1 -1
drivers/tty/mips_ejtag_fdc.c
··· 309 309 unsigned int i, buf_len, cpu; 310 310 bool done_cr = false; 311 311 char buf[4]; 312 - const char *buf_ptr = buf; 312 + const u8 *buf_ptr = buf; 313 313 /* Number of bytes of input data encoded up to each byte in buf */ 314 314 u8 inc[4]; 315 315
+1 -1
include/linux/tc.h
··· 120 120 121 121 #ifdef CONFIG_TC 122 122 123 - extern struct bus_type tc_bus_type; 123 + extern const struct bus_type tc_bus_type; 124 124 125 125 extern int tc_register_driver(struct tc_driver *tdrv); 126 126 extern void tc_unregister_driver(struct tc_driver *tdrv);